From 981e6b4dd911a47ffe4c1f1b0c7ed3d3c7b75c6a Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 4 Jan 2024 15:20:44 -0500 Subject: [PATCH 001/410] Fix issue with seeding optuna samplers. Figured out a better way to reseed and already instatiated Optuna sampler. I was overwriting the RNG state in a hacky way before. I didn't realize there was a reseed_rng() method on the the base sampler class that can be called when the seed is re-initialized. --- .../core/components/functions/nonstateful/fitfunctions.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index 86c5523d786..0f8b572e93c 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -798,9 +798,10 @@ def progress_callback(study, trial): progress.update(opt_task, advance=1) - # We need to hook into Optuna's random number generator here so that we can allow PsyNeuLink's RNS to - # determine the seed for Optuna's RNG. Pretty hacky unfortunately. - opt_func._rng = np.random.RandomState(self.owner.initial_seed) + # We need to hook into Optuna's random number generator. We set the seed and make sure to call + # reseed_rng method. + opt_func.seed = self.owner.initial_seed + opt_func.reseed_rng() # Turn off optuna logging except for errors or warnings, it doesn't work well with our PNL progress bar optuna.logging.set_verbosity(optuna.logging.WARNING) From 86b7a11cf98270b772179702944432843d5e7910 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 4 Jan 2024 15:29:20 -0500 Subject: [PATCH 002/410] Switch to QMCSampler instead of CMA-ES Test for CMA-ES sampler was actually just falling back to RandomSampler. I just want to test things work with another sampler so I switched to QMCSampler because it also supports 1D search spaces. --- .../stability_flexibility_pec_optimize.py | 2 +- tests/composition/test_parameterestimationcomposition.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Scripts/Debug/stability_flexibility/stability_flexibility_pec_optimize.py b/Scripts/Debug/stability_flexibility/stability_flexibility_pec_optimize.py index 46a6a523b2e..a968c5ab79e 100644 --- a/Scripts/Debug/stability_flexibility/stability_flexibility_pec_optimize.py +++ b/Scripts/Debug/stability_flexibility/stability_flexibility_pec_optimize.py @@ -114,7 +114,7 @@ def reward_rate(sim_data): responseGate.output_ports[0], ], objective_function=reward_rate, - optimization_function=pnl.PECOptimizationFunction(method=optuna.samplers.CmaEsSampler(), + optimization_function=pnl.PECOptimizationFunction(method=optuna.samplers.QMCSampler(), max_iterations=50, direction='minimize'), num_estimates=num_estimates, diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index 0dcde7a5562..19aab84e154 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -129,9 +129,9 @@ def test_pec_run_input_formats(inputs_dict, error_msg): [ ("differential_evolution", [0.010363518438648106]), (optuna.samplers.RandomSampler(), [0.01]), - (optuna.samplers.CmaEsSampler(), [0.01]), + (optuna.samplers.QMCSampler(), [0.01]), ], - ids=["differential_evolultion", "optuna_random_sampler", "optuna_cmaes_sampler"], + ids=["differential_evolultion", "optuna_random_sampler", "optuna_qmc_sampler"], ) def test_parameter_optimization_ddm(func_mode, opt_method, result): """Test parameter optimization of a DDM in integrator mode""" From 871f960e13419c213ac9f25d94bb5ce1e6364588 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 4 Jan 2024 15:30:04 -0500 Subject: [PATCH 003/410] Update optuna pin to latest. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d9e4a43687f..77c301434c6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,7 +11,7 @@ matplotlib<3.7.3 modeci_mdf<0.5, >=0.4.3; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' networkx<3.3 numpy>=1.21.0, <1.24.5 -optuna<3.4.0 +optuna<3.6.0 packaging<24.0 pandas<2.1.5 pillow<10.3.0 From 848fe5004fc3bb8e998ba97591fc7b97919abe25 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 5 Jan 2024 15:14:35 -0500 Subject: [PATCH 004/410] Remove hack for setting optuna RNG. --- .../core/components/functions/nonstateful/fitfunctions.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index 0f8b572e93c..397252cd613 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -798,11 +798,6 @@ def progress_callback(study, trial): progress.update(opt_task, advance=1) - # We need to hook into Optuna's random number generator. We set the seed and make sure to call - # reseed_rng method. - opt_func.seed = self.owner.initial_seed - opt_func.reseed_rng() - # Turn off optuna logging except for errors or warnings, it doesn't work well with our PNL progress bar optuna.logging.set_verbosity(optuna.logging.WARNING) From 54c3840ede5b31a38f37c7798873eccc065df8d8 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 9 Jan 2024 14:12:05 -0500 Subject: [PATCH 005/410] Change type to Type for Python 3.7 --- .../core/components/functions/nonstateful/fitfunctions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index 04f0106c6e8..2a7a187ff20 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -268,7 +268,7 @@ class PECOptimizationFunction(OptimizationFunction): - 'differential_evolution' : Differential evolution as implemented by scipy.optimize.differential_evolution - optuna.samplers: Pass any instance of an optuna sampler to use optuna for optimization. - - type[optuna.samplers.BaseSampler]: Pass a class of type optuna.samplers.BaseSampler to use optuna + - Type[optuna.samplers.BaseSampler]: Pass a class of type optuna.samplers.BaseSampler to use optuna for optimization. In this case, the random seed used for the sampler will be the same as the seed used as the intial_seed passed to PEC at contruction. Additonal desired keyword arguments can be passed to the sampler via the optuna_kwargs argument. @@ -302,7 +302,7 @@ class PECOptimizationFunction(OptimizationFunction): @beartype def __init__( self, - method: Union[Literal["differential_evolution"], optuna.samplers.BaseSampler, type[optuna.samplers.BaseSampler]], + method: Union[Literal["differential_evolution"], optuna.samplers.BaseSampler, Type[optuna.samplers.BaseSampler]], optuna_kwargs: Optional[Dict] = None, objective_function: Optional[Callable] = None, search_space=None, From 9932c718e6b8cec81aad90fe30c40e8684467a01 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 19 Jan 2024 14:19:28 -0500 Subject: [PATCH 006/410] Move pin back for optuna, will bump with dependabot. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 77c301434c6..d9e4a43687f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,7 +11,7 @@ matplotlib<3.7.3 modeci_mdf<0.5, >=0.4.3; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' networkx<3.3 numpy>=1.21.0, <1.24.5 -optuna<3.6.0 +optuna<3.4.0 packaging<24.0 pandas<2.1.5 pillow<10.3.0 From 12306bff68241be47aa7066e5fd22e03748e4a11 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 19 Jan 2024 14:35:36 -0500 Subject: [PATCH 007/410] Add tests to check for warnings. --- .../test_parameterestimationcomposition.py | 39 +++++++++++++++++-- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index f7114d172a1..947b453c146 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -131,8 +131,13 @@ def test_pec_run_input_formats(inputs_dict, error_msg): (optuna.samplers.RandomSampler(seed=0), None, [0.01]), (optuna.samplers.QMCSampler(seed=0), None, [0.01]), (optuna.samplers.RandomSampler, {'seed': 0}, [0.01]), + (optuna.samplers.RandomSampler(), None, None) ], - ids=["differential_evolution", "optuna_random_sampler", "optuna_qmc_sampler", "optuna_random_sampler_with_kwargs"], + ids=["differential_evolution", + "optuna_random_sampler", + "optuna_qmc_sampler", + "optuna_random_sampler_with_kwargs", + "optuna_random_sampler_no_seed"], ) def test_parameter_optimization_ddm(func_mode, opt_method, optuna_kwargs, result): """Test parameter optimization of a DDM in integrator mode""" @@ -211,9 +216,37 @@ def reward_rate(sim_data): inputs_dict = {decision: trial_inputs} - ret = pec.run(inputs={comp: trial_inputs}) + # If we are testing an instantiated optuna sampler, make sure the warning is generated about + # random seeds + if isinstance(opt_method, optuna.samplers.RandomSampler): + with pytest.warns(UserWarning) as record: + pec.run(inputs=inputs_dict) + + # Search through the warnings to make sure the one we are looking for is there + found_warning = False + for warning in record: + if "initial_seed on PEC is not None, but instantiated optuna sampler is being used." in str(warning.message): + found_warning = True + + if not found_warning: + raise AssertionError("Did not find warning about random seed") + elif isinstance(opt_method, type) and issubclass(opt_method, optuna.samplers.BaseSampler): + with pytest.warns(UserWarning) as record: + pec.run(inputs=inputs_dict) + + # Search through the warnings to make sure the one we are looking for is there + found_warning = False + for warning in record: + if "Overriding seed passed to optuna sampler with seed passed to PEC." in str(warning.message): + found_warning = True + + if not found_warning: + raise AssertionError("Did not find warning about overriding seed passed") + else: + pec.run(inputs={comp: trial_inputs}) - np.testing.assert_allclose(pec.optimized_parameter_values, result) + if result is not None: + np.testing.assert_allclose(pec.optimized_parameter_values, result) # func_mode is a hacky wa to get properly marked; Python, LLVM, and CUDA From e50fc51a84af861d7e30ac9191fa04c970382deb Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 3 Feb 2024 19:35:59 -0500 Subject: [PATCH 008/410] requirements: Bump pycuda to <2025 (#2902) Compiled GPU tests pass on a local machine. Signed-off-by: Jan Vesely --- cuda_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cuda_requirements.txt b/cuda_requirements.txt index 3a4f02b4cc9..c88d753fff0 100644 --- a/cuda_requirements.txt +++ b/cuda_requirements.txt @@ -1 +1 @@ -pycuda >2018, <2024 +pycuda >2018, <2025 From 89f26a83c86b7a5333436b74186520ba2abc2b34 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 8 Feb 2024 11:35:13 -0500 Subject: [PATCH 009/410] llvm/component: Explicitly check for numpy array functions (#2905) Numpy builtins change type in numpy-1.25+ which converted Python dispatcher function to C dispatcher in: 60a858a372b14b73547baacf4a472eccfade1073 ("ENH: Improve array function overhead by using vectorcall") Check for np.sum type to exclude parameters of this type from compiled structures. Signed-off-by: Jan Vesely --- psyneulink/core/components/component.py | 1 + 1 file changed, 1 insertion(+) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 224ac4d2aed..98b76150806 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1482,6 +1482,7 @@ def _is_user_only_param(p): # Check if the value type is valid for compilation return not isinstance(val, (str, ComponentsMeta, type(max), + type(np.sum), type(_is_compilation_param), type(self._get_compilation_params))) return False From 36bc8340af4ec738bef8b15dcb299d6dcde68eee Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 10 Feb 2024 00:29:45 -0500 Subject: [PATCH 010/410] mdf: Encode numpy functions the same as Python functions (#2909) Numpy functions like np.sum are no longer of type FunctionType since Numpy moved dispatcher implementation to C in 1.25 [0,1] Add "type(np.sum)" in addition to types.FunctionType to use the same path for Numpy functions in numpy 1.25. [0] https://github.com/numpy/numpy/commit/60a858a372b14b73547baacf4a472eccfade1073 [1] https://github.com/numpy/numpy/issues/24019 Closes: https://github.com/PrincetonUniversity/PsyNeuLink/issues/2908 Signed-off-by: Jan Vesely --- psyneulink/core/components/component.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 98b76150806..14166794185 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -4011,7 +4011,12 @@ def parse_parameter_value(value, no_expand_components=False, functions_as_dill=F value = None else: value = value.__qualname__ - elif isinstance(value, types.FunctionType): + + # numpy functions are no longer "FunctionType" since numpy + # moved dispatch implementation from Python to C in + # https://github.com/numpy/numpy/commit/60a858a372b14b73547baacf4a472eccfade1073 + # Use np.sum as a representative of these functions + elif isinstance(value, (types.FunctionType, type(np.sum))): if functions_as_dill: value = base64.encodebytes(dill.dumps(value)).decode('utf-8') elif '.' in value.__qualname__: From 1f176d84fc3da98f1c84348475ea64b36303dafb Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 10 Feb 2024 01:59:17 -0500 Subject: [PATCH 011/410] requirements: update numpy requirement from <1.24.5,>=1.21.0 to >=1.21.0,<1.25.3 (#2906) Updates the requirements on [numpy](https://github.com/numpy/numpy) to permit the latest version. - [Release notes](https://github.com/numpy/numpy/releases) - [Changelog](https://github.com/numpy/numpy/blob/main/doc/RELEASE_WALKTHROUGH.rst) - [Commits](https://github.com/numpy/numpy/compare/v1.21.0...v1.25.2) --- updated-dependencies: - dependency-name: numpy dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d4c5ad265a1..f0e8ffe98bb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ llvmlite<0.42 matplotlib<3.7.3 modeci_mdf<0.5, >=0.4.3; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' networkx<3.3 -numpy>=1.21.0, <1.24.5 +numpy>=1.21.0, <1.25.3 optuna<3.4.0 packaging<24.0 pandas<2.2.1 From 10712297cb43c9f3f3ad2be09b553ce2bdee598a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 10 Feb 2024 16:16:23 +0000 Subject: [PATCH 012/410] requirements: update numpy requirement (#2904) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f0e8ffe98bb..0de3cd9a512 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ llvmlite<0.42 matplotlib<3.7.3 modeci_mdf<0.5, >=0.4.3; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' networkx<3.3 -numpy>=1.21.0, <1.25.3 +numpy>=1.21.0, <1.26.5 optuna<3.4.0 packaging<24.0 pandas<2.2.1 From 8952b406132c73d58ee8d3fafd690c24cc3d43f8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 13 Feb 2024 14:47:54 -0500 Subject: [PATCH 013/410] requirements: update llvmlite requirement from <0.42 to <0.43 (#2910) Updates the requirements on [llvmlite](https://github.com/numba/llvmlite) to permit the latest version. - [Release notes](https://github.com/numba/llvmlite/releases) - [Commits](https://github.com/numba/llvmlite/compare/v0.1.0...v0.42.0) --- updated-dependencies: - dependency-name: llvmlite dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 0de3cd9a512..fc7043938c2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,7 @@ graph-scheduler>=1.1.1, <1.3.0 graphviz<0.21.0 grpcio<1.61.0 leabra-psyneulink<0.3.3 -llvmlite<0.42 +llvmlite<0.43 matplotlib<3.7.3 modeci_mdf<0.5, >=0.4.3; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' networkx<3.3 From 60fb50b6077205bed2d955bd1e7e735a8f1e3967 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 11 Jan 2024 22:52:12 +0000 Subject: [PATCH 014/410] tests: replace some assert == with numpy.testing.assert_array_equal --- tests/composition/test_composition.py | 50 ++++++++++++++++------ tests/composition/test_control.py | 2 +- tests/composition/test_runtime_params.py | 9 +++- tests/mechanisms/test_control_mechanism.py | 2 +- 4 files changed, 45 insertions(+), 18 deletions(-) diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 29c1af6658d..6bf10bfb741 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -3418,9 +3418,14 @@ def test_function(trial_num): c.run(inputs=test_function, num_trials=10) - assert c.parameters.results.get(c) == [[np.array([0.])], [np.array([1.])], [np.array([2.])], [np.array([3.])], - [np.array([4.])], [np.array([5.])], [np.array([6.])], [np.array([7.])], - [np.array([8.])], [np.array([9.])]] + np.testing.assert_array_equal( + c.parameters.results.get(c), + [ + [np.array([0.])], [np.array([1.])], [np.array([2.])], [np.array([3.])], + [np.array([4.])], [np.array([5.])], [np.array([6.])], [np.array([7.])], + [np.array([8.])], [np.array([9.])] + ] + ) @pytest.mark.parametrize("mode", [pnl.ExecutionMode.Python, pytest.param(pnl.ExecutionMode.LLVMRun, marks=pytest.mark.llvm), @@ -3444,9 +3449,14 @@ def test_generator(): t_g = test_generator() c.run(inputs=t_g, execution_mode=mode) - assert c.parameters.results.get(c) == [[np.array([0.])], [np.array([1.])], [np.array([2.])], [np.array([3.])], - [np.array([4.])], [np.array([5.])], [np.array([6.])], [np.array([7.])], - [np.array([8.])], [np.array([9.])]] + np.testing.assert_array_equal( + c.parameters.results.get(c), + [ + [np.array([0.])], [np.array([1.])], [np.array([2.])], [np.array([3.])], + [np.array([4.])], [np.array([5.])], [np.array([6.])], [np.array([7.])], + [np.array([8.])], [np.array([9.])] + ] + ) @pytest.mark.parametrize("mode", [pnl.ExecutionMode.Python, pytest.param(pnl.ExecutionMode.LLVMRun, marks=pytest.mark.llvm), @@ -5177,7 +5187,7 @@ def test_three_level_deep_pathway_routing_two_mech(self): c1.add_projection(MappingProjection(), sender=p1, receiver=p3b) result = c1.run([5]) - assert result == [5, 5] + np.testing.assert_array_equal(result, [[5], [5]]) @pytest.mark.pathways def test_three_level_deep_modulation_routing_single_mech(self): @@ -5207,7 +5217,7 @@ def test_three_level_deep_modulation_routing_two_mech(self): c1 = Composition(name='c1', pathways=[[(c2, NodeRole.INPUT)], [ctrl1]]) result = c1.run({c2: [[2], [2]], ctrl1: [5]}) - assert result == [10, 10] + np.testing.assert_array_equal(result, [[10], [10]]) @pytest.mark.pathways @pytest.mark.state_features @@ -6134,9 +6144,15 @@ def test_function(trial_num): c.run(inputs=test_function, num_trials=10) - assert c.parameters.results.get(c) == [[np.array([0.])], [np.array([1.])], [np.array([2.])], [np.array([3.])], - [np.array([4.])], [np.array([5.])], [np.array([6.])], [np.array([7.])], - [np.array([8.])], [np.array([9.])]] + + np.testing.assert_array_equal( + c.parameters.results.get(c), + [ + [np.array([0.])], [np.array([1.])], [np.array([2.])], [np.array([3.])], + [np.array([4.])], [np.array([5.])], [np.array([6.])], [np.array([7.])], + [np.array([8.])], [np.array([9.])] + ] + ) def test_function_as_learning_input(self): num_epochs=2 @@ -6729,7 +6745,10 @@ def test_initialize_cycle_values(self): # Run 1 --> Execution 1: 1 + 2 = 3 | Execution 2: 3 + 2 = 5 | Execution 3: 5 + 3 = 8 # Run 2 --> Execution 1: 8 + 1 = 9 | Execution 2: 9 + 2 = 11 | Execution 3: 11 + 3 = 14 - assert abc_Composition.results == [[[3]], [[5]], [[8]], [[9]], [[11]], [[14]]] + np.testing.assert_array_equal( + abc_Composition.results, + [[[3]], [[5]], [[8]], [[9]], [[11]], [[14]]] + ) def test_initialize_cycle_values_warning(self): A = ProcessingMechanism(name='A') @@ -6764,7 +6783,10 @@ def test_initialize_cycles(self, context_specified): # Run 1 --> Execution 1: 1 + 2 = 3 | Execution 2: 3 + 2 = 5 | Execution 3: 5 + 3 = 8 # Run 2 --> Execution 1: 8 + 1 = 9 | Execution 2: 9 + 2 = 11 | Execution 3: 11 + 3 = 14 - assert abc_Composition.results == [[[3]], [[5]], [[8]], [[9]], [[11]], [[14]]] + np.testing.assert_array_equal( + abc_Composition.results, + [[[3]], [[5]], [[8]], [[9]], [[11]], [[14]]] + ) def test_initialize_cycles_excluding_unspecified_nodes(self): A = ProcessingMechanism(name='A') @@ -7141,7 +7163,7 @@ def test_input_labels_and_results_by_node_and_no_orphaning_of_nested_output_node assert input_format == "\nInputs to (nested) INPUT Nodes of OUTER COMP for 2 trials:\n\tMIDDLE COMP: \n\t\tX: [ [[0.0]], [[0.0]] ]\n\t\tINNER COMP: \n\t\t\tA: [ ['red'], ['green'] ]\n\tQ: [ ['red'], ['green'] \n\nFormat as follows for inputs to run():\n{\n\tMIDDLE COMP: [ [[0.0],[0.0]], [[0.0],[0.0]] ],\n\tQ: [ [[0.0]], [[0.0]] ]\n}" result = ocomp.run(inputs={mcomp:[[.2],['green']], Q:[4.6]}) - assert result == [[0.2], [1.],[4.6]] + np.testing.assert_array_equal(result, [[0.2], [1.], [4.6]]) results_by_node = ocomp.get_results_by_nodes() assert results_by_node[O] == [0.2] assert results_by_node[C] == [1.0] diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 0533cd934c2..f72c77e8e1a 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -851,7 +851,7 @@ def test_state_input_ports_for_two_input_nodes(self): search_space=[[1],[1],[1]]) ocomp.add_controller(ocm) result = ocomp.run({oa: [[1]], ob: [[2]]}) - assert result == [[2.], [1.]] + np.testing.assert_array_equal(result, [[2.], [1.]]) assert len(ocomp.controller.state_input_ports) == 2 assert all([node in [input_port.shadow_inputs.owner for input_port in ocomp.controller.state_input_ports] for node in {oa, ob}]) diff --git a/tests/composition/test_runtime_params.py b/tests/composition/test_runtime_params.py index b35f0926df6..6bd784f7e48 100644 --- a/tests/composition/test_runtime_params.py +++ b/tests/composition/test_runtime_params.py @@ -195,8 +195,13 @@ def test_input_port_param_no_condition(self): assert T2.input_port.function.parameters.weights.get(C) is None C.run(inputs={T1: 2.0}, ) - assert C.results == [[[1201.5]], # (2*3*20*10)+1+0.5 - [[40.]]] # 2*5*4 + np.testing.assert_array_equal( + C.results, + [ + [[1201.5]], # (2*3*20*10)+1+0.5 + [[40.]] # 2*5*4 + ] + ) assert T1.function.slope.base == 5.0 assert T1.parameter_ports['slope'].parameters.value.get(C) == 5.0 assert T2.input_port.function.parameters.scale.get(C.default_execution_id) == 4.0 diff --git a/tests/mechanisms/test_control_mechanism.py b/tests/mechanisms/test_control_mechanism.py index dd5b90609bc..ef622863db3 100644 --- a/tests/mechanisms/test_control_mechanism.py +++ b/tests/mechanisms/test_control_mechanism.py @@ -131,7 +131,7 @@ def test_control_modulation(self): assert Tz.parameter_ports[pnl.SLOPE].mod_afferents[0].sender.owner == C assert C.parameters.control_allocation.get() == [1] result = comp.run(inputs={Tx:[1,1], Ty:[4,4]}) - assert comp.results == [[[4.], [4.]], [[4.], [4.]]] + np.testing.assert_array_equal(comp.results, [[[4.], [4.]], [[4.], [4.]]]) def test_identicalness_of_control_and_gating(self): From e6f27c360d3c302d25ac52ca6769c9e46cedf285 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 19 Jul 2023 02:55:35 +0000 Subject: [PATCH 015/410] tests: correct assert shape mismatch --- tests/composition/test_composition.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 6bf10bfb741..171baeae2f8 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -6212,10 +6212,10 @@ def test_function(trial_num): @pytest.mark.control @pytest.mark.parametrize( "controllers, results",[ - ('none', [[-2], [1]]), - ('inner', [[-2], [10]]), - ('outer', [[-2], [10]]), - ('inner_and_outer', [[-2], [100]]), + ('none', [[[-2]], [[1]]]), + ('inner', [[[-2]], [[10]]]), + ('outer', [[[-2]], [[10]]]), + ('inner_and_outer', [[[-2]], [[100]]]), ] ) @pytest.mark.parametrize( @@ -6323,7 +6323,7 @@ def inputs_generator_function(): # run Composition with all three input types and assert that results are as expected. ocomp.run(inputs=inputs_source) - assert ocomp.results == results + np.testing.assert_array_equal(ocomp.results, results) expected_format_strings = \ [ From dc89fbc93c0195ebbad8f9ac17895269850ac4f7 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 19 Jul 2023 03:45:43 +0000 Subject: [PATCH 016/410] tests: TestControlTimeScales: correct silent shape mismatch in results --- tests/composition/test_control.py | 16 ++++++++-------- tests/ports/test_modulatory_signals.py | 6 ++++-- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index f72c77e8e1a..601b5bbcc03 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -3914,7 +3914,7 @@ def test_time_step_before(self): # assert c.value == [4] assert c.execution_count == 4 - assert comp.results == [[2], [4]] + np.testing.assert_array_equal(comp.results, [[[2]], [[4]]]) def test_time_step_after(self): a = pnl.ProcessingMechanism() @@ -3942,7 +3942,7 @@ def test_time_step_after(self): # assert c.value == [4] assert c.execution_count == 4 - assert comp.results == [[1], [3]] + np.testing.assert_array_equal(comp.results, [[[1]], [[3]]]) def test_pass_before(self): a = pnl.ProcessingMechanism() @@ -3982,7 +3982,7 @@ def test_pass_before(self): # a b assert c.value == [6] assert c.execution_count == 6 - assert comp.results == [[3], [6]] + np.testing.assert_array_equal(comp.results, [[[3]], [[6]]]) def test_pass_after(self): a = pnl.ProcessingMechanism() @@ -4028,7 +4028,7 @@ def test_pass_after(self): # (C-6) assert c.value == [6] assert c.execution_count == 6 - assert comp.results == [[2], [5]] + np.testing.assert_array_equal(comp.results, [[[2]], [[5]]]) def test_trial_before(self): a = pnl.ProcessingMechanism() @@ -4056,7 +4056,7 @@ def test_trial_before(self): # assert c.value == [2] assert c.execution_count == 2 - assert comp.results == [[1], [2]] + np.testing.assert_array_equal(comp.results, [[[1]], [[2]]]) def test_trial_after(self): a = pnl.ProcessingMechanism() @@ -4086,7 +4086,7 @@ def test_trial_after(self): # assert c.value == [2] assert c.execution_count == 2 - assert comp.results == [[1], [1]] + np.testing.assert_array_equal(comp.results, [[[1]], [[1]]]) def test_run_before(self): a = pnl.ProcessingMechanism() @@ -4121,7 +4121,7 @@ def test_run_before(self): # a b assert c.value == [2] assert c.execution_count == 2 - assert comp.results == [[1], [1], [2], [2]] + np.testing.assert_array_equal(comp.results, [[[1]], [[1]], [[2]], [[2]]]) def test_run_after(self): a = pnl.ProcessingMechanism() @@ -4156,4 +4156,4 @@ def test_run_after(self): # a b assert c.value == [2] assert c.execution_count == 2 - assert comp.results == [[1], [1], [1], [1]] + np.testing.assert_allclose(comp.results, [[[1]], [[1]], [[1]], [[1]]]) diff --git a/tests/ports/test_modulatory_signals.py b/tests/ports/test_modulatory_signals.py index b1828ff10c7..30016784204 100644 --- a/tests/ports/test_modulatory_signals.py +++ b/tests/ports/test_modulatory_signals.py @@ -1,3 +1,5 @@ +import numpy as np + from psyneulink.core.compositions.composition import Composition from psyneulink.core.components.functions.nonstateful.transferfunctions import Exponential from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism @@ -37,7 +39,7 @@ def test_alias_equivalence_for_modulates_and_projections(self): # comp2.add_nodes([tMech3, tMech4, cMech2]) comp2.add_linear_processing_pathway([cMech2, tMech3, tMech4]) comp2.run(inputs=inputs) - assert comp1.results == comp2.results + np.testing.assert_array_equal(comp1.results, comp2.results) class TestGatingSignals: def test_alias_equivalence_for_modulates_and_projections(self): @@ -58,4 +60,4 @@ def test_alias_equivalence_for_modulates_and_projections(self): comp2.add_linear_processing_pathway([Tx2, Ty2, G2, Tx2]) comp2.run(inputs={Tx2: inputs}) - assert comp1.results == comp2.results + np.testing.assert_array_equal(comp1.results, comp2.results) From ca09aa9360ecc531d97991dad4ef9b57f9c4da91 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 10 Feb 2024 18:19:53 -0500 Subject: [PATCH 017/410] treewide, numpy: Use np.prod instead of np.product They are identical and the latter is deprecated. Signed-off-by: Jan Vesely --- .../functions/nonstateful/combinationfunctions.py | 8 ++++---- .../core/components/functions/nonstateful/fitfunctions.py | 2 +- .../functions/nonstateful/optimizationfunctions.py | 4 ++-- psyneulink/core/components/ports/port.py | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py index 564dcc6a73d..7b93330b209 100644 --- a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py @@ -911,7 +911,7 @@ def _function(self, # result = np.sum(np.atleast_2d(variable), axis=0) * scale + offset result = np.sum(np.atleast_2d(variable), axis=1) * scale + offset elif operation == PRODUCT: - result = np.product(np.atleast_2d(variable), axis=1) * scale + offset + result = np.prod(np.atleast_2d(variable), axis=1) * scale + offset else: raise FunctionError("Unrecognized operator ({0}) for Reduce function". format(self._get_current_parameter_value(OPERATION, context))) @@ -1438,7 +1438,7 @@ def _function(self, if operation == SUM: combination = np.sum(variable, axis=0) elif operation == PRODUCT: - combination = np.product(variable, axis=0) + combination = np.prod(variable, axis=0) elif operation == CROSS_ENTROPY: v1 = variable[0] v2 = variable[1] @@ -1452,7 +1452,7 @@ def _function(self, product = combination * scale else: # Hadamard scale - product = np.product([combination, scale], axis=0) + product = np.prod([combination, scale], axis=0) if isinstance(offset, numbers.Number): # scalar offset @@ -1997,7 +1997,7 @@ def _function(self, result = np.sum(means, axis=0) * scale + offset elif operation == PRODUCT: - result = np.product(means, axis=0) * scale + offset + result = np.prod(means, axis=0) * scale + offset else: raise FunctionError("Unrecognized operator ({0}) for CombineMeans function". diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index b54af944c10..0218e382051 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -406,7 +406,7 @@ def reset(self, search_space, context=None, **kwargs): f"{SampleIterator.__name__} must have a value for its 'num' attribute." ) - self.num_iterations = np.product([i.num for i in search_space]) + self.num_iterations = np.prod([i.num for i in search_space]) def _run_simulations(self, *args, context=None): """ diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index 72a6b6a5723..66e4d8f1e06 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -1607,7 +1607,7 @@ def __init__(self, except TypeError: pass - self.num_iterations = 1 if search_space is None else np.product([i.num for i in search_space]) + self.num_iterations = 1 if search_space is None else np.prod([i.num for i in search_space]) # self.tolerance = tolerance super().__init__( @@ -1664,7 +1664,7 @@ def reset(self, search_space, context=None, **kwargs): raise OptimizationFunctionError(f"Invalid {repr(SEARCH_SPACE)} arg for {self.name}{owner_str}; each " f"{SampleIterator.__name__} must have a value for its 'num' attribute.") - self.num_iterations = np.product([i.num for i in sample_iterators]) + self.num_iterations = np.prod([i.num for i in sample_iterators]) def _get_optimized_controller(self): # self.objective_function may be a bound method of diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index 8f5b6db03fd..53c3f5c4d81 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -2192,7 +2192,7 @@ def _get_combined_mod_val(self, mod_param_name, values): aliases = getattr(self.function.parameters, mod_param_name).aliases if comb_fct==MULTIPLICATIVE or any(mod_spec in aliases for mod_spec in {MULTIPLICATIVE, MULTIPLICATIVE_PARAM}): - return np.product(np.array(values), axis=0) + return np.prod(np.array(values), axis=0) if comb_fct==ADDITIVE or any(mod_spec in aliases for mod_spec in {MULTIPLICATIVE, ADDITIVE_PARAM}): return np.sum(np.array(values), axis=0) elif isinstance(comb_fct, is_function_type): From 9e18cd9295936cc960dc5d04d4d405929bdb9241 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 10 Feb 2024 18:21:55 -0500 Subject: [PATCH 018/410] treewide: Use Python's all() instead of Numpy's alltrue The latter is deprecated. Signed-off-by: Jan Vesely --- .../core/components/functions/nonstateful/fitfunctions.py | 2 +- tests/functions/test_memory.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index 0218e382051..abc945816bf 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -234,7 +234,7 @@ def simulation_likelihood( # Check to see if any of the trials have non-zero likelihood, if not, something is probably wrong # and we should warn the user. - if np.alltrue(kdes_eval == ZERO_PROB): + if all(kdes_eval == ZERO_PROB): warnings.warn( BadLikelihoodWarning( "Evaluating likelihood generated by simulation data resulted in zero values for all trials " diff --git a/tests/functions/test_memory.py b/tests/functions/test_memory.py index c4cc72707d5..b1b7bf64f13 100644 --- a/tests/functions/test_memory.py +++ b/tests/functions/test_memory.py @@ -624,7 +624,7 @@ def test_DictionaryMemory_unique_functions(self, param_name): def retrieve_label_helper(retrieved, stimuli): return [k for k,v in stimuli.items() - if all(np.alltrue(a) + if all(all(a) for a in np.equal(np.array(retrieved, dtype=object), np.array(v, dtype=object), dtype=object))] or [None] @@ -975,7 +975,7 @@ def test_ContentAddressableMemory_without_initializer_and_equal_field_sizes(self retrieved_label = retrieve_label_helper(retrieved, stimuli) assert retrieved_label == [None] expected = np.array([np.array([0,0,0]),np.array([0,0,0])]) - assert all(np.alltrue(x) for x in np.equal(expected,retrieved, dtype=object)) + assert all(all(x) for x in np.equal(expected,retrieved, dtype=object)) def test_ContentAddressableMemory_without_initializer_and_diff_field_sizes(self): From a967ba56605bef9e042d9b583295ca72e6b754db Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 10 Feb 2024 23:08:40 -0500 Subject: [PATCH 019/410] tests/IntegratorMechanism: Check shape of results as well as the values Use np.testing.assert_array_equal to check exact results. Avoid casting to single element arrays to scalars. Signed-off-by: Jan Vesely --- tests/mechanisms/test_integrator_mechanism.py | 89 +++++++++---------- 1 file changed, 43 insertions(+), 46 deletions(-) diff --git a/tests/mechanisms/test_integrator_mechanism.py b/tests/mechanisms/test_integrator_mechanism.py index 0e7f2519658..22c950bfdea 100644 --- a/tests/mechanisms/test_integrator_mechanism.py +++ b/tests/mechanisms/test_integrator_mechanism.py @@ -653,8 +653,8 @@ def test_integrator_input_float(self): ) ) # P = Process(pathway=[I]) - val = float(I.execute(10.0)) - assert val == 10.0 + val = I.execute(10.0) + np.testing.assert_array_equal(val, [[10.0]]) # input = list of length 1 @@ -667,8 +667,8 @@ def test_integrator_input_list(self): ) ) # P = Process(pathway=[I]) - val = float(I.execute([10.0])) - assert val == 10.0 + val = I.execute([10.0]) + np.testing.assert_array_equal(val, [[10.0]]) # input = list of length 5 @@ -764,8 +764,8 @@ def test_integrator_type_simple_rate_float(self): ) ) # P = Process(pathway=[I]) - val = float(I.execute(10.0)) - assert val == 50.0 + val = I.execute(10.0) + np.testing.assert_array_equal(val, [[50.0]]) # rate = float, increment = float, integration_type = accumulator @@ -780,9 +780,9 @@ def test_integrator_type_accumulator_rate_and_increment_float(self): ) ) # P = Process(pathway=[I]) - float(I.execute()) - val = float(I.execute()) - assert val == 9.0 + I.execute() + val = I.execute() + np.testing.assert_array_equal(val, [[9.0]]) # rate = float, integration_type = diffusion @@ -797,7 +797,7 @@ def test_integrator_type_diffusion_rate_float(self): ) # P = Process(pathway=[I]) val = I.execute(10.0) - np.testing.assert_allclose([[50.0], [1.0]], val) + np.testing.assert_array_equal([[50.0], [1.0]], val) # rate = list, integration_type = simple @@ -812,8 +812,8 @@ def test_integrator_type_simple_rate_list(self): ) ) # P = Process(pathway=[I]) - val = list(I.execute([10.0, 10.0, 10.0])[0]) - assert val == [50.0, 50.0, 50.0] + val = I.execute([10.0, 10.0, 10.0]) + np.testing.assert_array_equal(val, [[50.0, 50.0, 50.0]]) # rate = float, increment = list, integration_type = accumulator @@ -829,9 +829,9 @@ def test_integrator_type_accumulator_rate_float_increment_list(self): ) ) # P = Process(pathway=[I]) - list(I.execute([10.0, 10.0, 10.0])[0]) - val = list(I.execute([10.0, 10.0, 10.0])[0]) - assert val == [12.0, 15.0, 18.0] + I.execute([10.0, 10.0, 10.0]) + val = I.execute([10.0, 10.0, 10.0]) + np.testing.assert_array_equal(val, [[12.0, 15.0, 18.0]]) # rate = float, increment = list, integration_type = accumulator @@ -847,9 +847,9 @@ def test_integrator_type_accumulator_rate_list_increment_float(self): ) ) # P = Process(pathway=[I]) - list(I.execute([10.0, 10.0, 10.0])[0]) - val = list(I.execute([10.0, 10.0, 10.0])[0]) - assert val == [15.0, 20.0, 25.0] + I.execute([10.0, 10.0, 10.0]) + val = I.execute([10.0, 10.0, 10.0]) + np.testing.assert_array_equal(val, [[15.0, 20.0, 25.0]]) # rate = list, increment = list, integration_type = accumulator @@ -865,9 +865,9 @@ def test_integrator_type_accumulator_rate_and_increment_list(self): ) ) # P = Process(pathway=[I]) - list(I.execute([10.0, 10.0, 10.0])[0]) - val = list(I.execute([10.0, 10.0, 10.0])[0]) - assert val == [8.0, 15.0, 24.0] + I.execute([10.0, 10.0, 10.0]) + val = I.execute([10.0, 10.0, 10.0]) + np.testing.assert_array_equal(val, [[8.0, 15.0, 24.0]]) # rate = list, integration_type = diffusion @@ -898,8 +898,8 @@ def test_integrator_type_adaptive_rate_list(self): ) ) # P = Process(pathway=[I]) - val = list(I.execute([10.0, 10.0, 10.0])[0]) - assert val == [5.0, 5.0, 5.0] + val = I.execute([10.0, 10.0, 10.0]) + np.testing.assert_array_equal(val, [[5.0, 5.0, 5.0]]) # rate = float, integration_type = modulatory @@ -914,8 +914,8 @@ def test_integrator_type_adaptive_rate_float_input_list(self): ) ) # P = Process(pathway=[I]) - val = list(I.execute([10.0, 10.0, 10.0])[0]) - assert val == [5.0, 5.0, 5.0] + val = I.execute([10.0, 10.0, 10.0]) + np.testing.assert_array_equal(val, [[5.0, 5.0, 5.0]]) # rate = float, integration_type = modulatory @@ -929,8 +929,8 @@ def test_integrator_type_adaptive_rate_float(self): ) ) # P = Process(pathway=[I]) - val = list(I.execute(10.0)) - assert val == [5.0] + val = I.execute(10.0) + np.testing.assert_array_equal(val, [[5.0]]) # INVALID RATE: @@ -1043,14 +1043,14 @@ def test_integrator_simple_noise_fn(self): ), ) - val = float(I.execute(10)) + val = I.execute(10) I.function.reset(5.0) - val2 = float(I.execute(0)) + val2 = I.execute(0) - np.testing.assert_allclose(val, 11.00018002983055) - np.testing.assert_allclose(val2, 7.549690404329112) + np.testing.assert_allclose(val, [[11.00018002983055]]) + np.testing.assert_allclose(val2, [[7.549690404329112]]) @pytest.mark.mechanism @pytest.mark.integrator_mechanism @@ -1061,9 +1061,9 @@ def test_integrator_simple_noise_fn_noise_list(self): noise=[NormalDist()] ), ) - val = float(I.execute(10)) + val = I.execute(10) - np.testing.assert_allclose(val, 10.302846) + np.testing.assert_allclose(val, [[10.302846]]) @pytest.mark.mechanism @pytest.mark.integrator_mechanism @@ -1105,9 +1105,8 @@ def test_integrator_simple_noise_fn_var_list(self): ), ) - val = I.execute([10, 10, 10, 10])[0] - - np.testing.assert_allclose(val, [11.10887925, 9.0840107, 10.30157835, 10.65375815]) + val = I.execute([10, 10, 10, 10]) + np.testing.assert_allclose(val, [[11.10887925, 9.0840107, 10.30157835, 10.65375815]]) @pytest.mark.mechanism @pytest.mark.integrator_mechanism @@ -1119,9 +1118,8 @@ def test_integrator_accumulator_noise_fn(self): ), ) - val = float(I.execute(10)) - - np.testing.assert_allclose(val, 1.00018) + val = I.execute(10) + np.testing.assert_allclose(val, [[1.00018]]) @pytest.mark.mechanism @pytest.mark.integrator_mechanism @@ -1134,8 +1132,8 @@ def test_integrator_accumulator_noise_fn_var_list(self): ), ) - val = I.execute([10, 10, 10, 10])[0] - np.testing.assert_allclose(val, [1.10887925, -0.9159893, 0.30157835, 0.65375815]) + val = I.execute([10, 10, 10, 10]) + np.testing.assert_allclose(val, [[1.10887925, -0.9159893, 0.30157835, 0.65375815]]) @pytest.mark.mechanism @pytest.mark.integrator_mechanism @@ -1147,9 +1145,9 @@ def test_integrator_adaptive_noise_fn(self): ), ) - val = float(I.execute(10)) + val = I.execute(10) - np.testing.assert_allclose(val, 11.00018002983055) + np.testing.assert_allclose(val, [[11.00018002983055]]) @pytest.mark.mechanism @pytest.mark.integrator_mechanism @@ -1162,9 +1160,8 @@ def test_integrator_adaptive_noise_fn_var_list(self): ), ) - val = I.execute([10, 10, 10, 10])[0] - - np.testing.assert_allclose(val, [11.10887925, 9.0840107, 10.30157835, 10.65375815]) + val = I.execute([10, 10, 10, 10]) + np.testing.assert_allclose(val, [[11.10887925, 9.0840107, 10.30157835, 10.65375815]]) @pytest.mark.mechanism @pytest.mark.integrator_mechanism From 9ba7b364b9f7b6c5a7b022be1f74a488d3afccb3 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 11 Feb 2024 13:02:12 -0500 Subject: [PATCH 020/410] tests/DDM: Check shape of results as well as the values Use np.testing.assert_array_equal to check exact results. Avoid casting to single element arrays to scalars. Signed-off-by: Jan Vesely --- tests/mechanisms/test_ddm_mechanism.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index e3f306b0f81..c82c870d05b 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -266,8 +266,8 @@ def test_DDM_Integrator_Bogacz(benchmark, mech_mode, prng): ex = pytest.helpers.get_mech_execution(T, mech_mode) ex(stim) - val = benchmark(ex, stim)[0] - np.testing.assert_allclose(val, [1.0]) + val = benchmark(ex, stim) + np.testing.assert_allclose(val, [[1.0], [0.3]]) # ------------------------------------------------------------------------------------------------ # # TEST 3 @@ -284,8 +284,8 @@ def test_DDM_Integrator_Bogacz(benchmark, mech_mode, prng): # name='DDM', # function=NavarroAndFuss() # ) -# val = float(T.execute(stim)[0]) -# assert val == 10 +# val = T.execute(stim) +# np.testing.assert_array_equal(val, [[10]]) # ======================================= NOISE TESTS ============================================ @@ -339,7 +339,7 @@ def test_DDM_noise_invalid(noise): time_step_size=1.0 ), ) - float(T.execute(stim)[0]) + T.execute(stim) assert "DriftDiffusionIntegrator requires noise parameter to be a float" in str(error_text.value) # ======================================= INPUT TESTS ============================================ @@ -362,8 +362,8 @@ def test_DDM_input(stim): ), execute_until_finished=False, ) - val = float(T.execute(stim)[0]) - assert val == 10 + val = T.execute(stim) + np.testing.assert_array_equal(val, [[10], [1]]) # ------------------------------------------------------------------------------------------------ @@ -388,7 +388,7 @@ def test_DDM_input_list_len_2(): ), execute_until_finished=False, ) - float(T.execute(stim)[0]) + T.execute(stim) assert "single numeric item" in str(error_text.value) # ------------------------------------------------------------------------------------------------ @@ -413,7 +413,7 @@ def test_DDM_input_fn(): ), execute_until_finished=False, ) - float(T.execute(stim)) + T.execute(stim) assert 'Input to \'DDM\' ([(NormalDist Normal Distribution Function' in str(error_text.value) assert 'is incompatible with its corresponding InputPort (DDM[InputPort-0]): ' \ '\'unsupported operand type(s) for *: \'NormalDist\' and \'float\'.\'' in str(error_text.value) @@ -445,8 +445,8 @@ def test_DDM_rate(benchmark, rate, expected, mech_mode): ex = pytest.helpers.get_mech_execution(T, mech_mode) ex(stim) - val = float(benchmark(ex, stim)[0][0]) - assert val == expected + val = benchmark(ex, stim) + np.testing.assert_array_equal(val, [[expected], [2]]) # ------------------------------------------------------------------------------------------------ # INVALID RATES: @@ -475,7 +475,7 @@ def test_DDM_rate_fn(): ), execute_until_finished=False, ) - float(T.execute(stim)[0]) + T.execute(stim) assert "incompatible value" in str(error_text.value) # ------------------------------------------------------------------------------------------------ From b9e42f43ff0bb79b0b628c51a4c6c99f644c9556 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 14 Feb 2024 16:47:16 -0500 Subject: [PATCH 021/410] broken_trans_deps: Block beartype==0.17.1 (#2914) https://github.com/beartype/beartype/issues/324 Signed-off-by: Jan Vesely --- broken_trans_deps.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/broken_trans_deps.txt b/broken_trans_deps.txt index eb6372f61a2..f7b44a2bbe0 100644 --- a/broken_trans_deps.txt +++ b/broken_trans_deps.txt @@ -30,6 +30,10 @@ cattrs != 23.1.1; python_version < '3.8' # https://github.com/python-attrs/cattrs/issues/453 cattrs != 23.2.1, != 23.2.2 +# beartype 0.17.1 is broken on older releases of python3.9 +# https://github.com/beartype/beartype/issues/324 +beartype != 0.17.1; python_version == '3.9' + # The following need at least sphinx-5 without indicating it in dependencies: # * sphinxcontrib-applehelp >=1.0.8, # * sphinxcontrib-devhelp >=1.0.6, From 65afadb72c82bf57be0b505e9579fb3453dd8819 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 17 Feb 2024 12:06:06 -0500 Subject: [PATCH 022/410] requirements: update pytest requirement from <8.0.1 to <8.0.2 (#2917) Updates the requirements on [pytest](https://github.com/pytest-dev/pytest) to permit the latest version. - [Release notes](https://github.com/pytest-dev/pytest/releases/tag/8.0.1) - [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/8.0.0...8.0.1) --- updated-dependencies: - dependency-name: pytest dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index 3683bd99fa2..5166b6b409b 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,6 +1,6 @@ jupyter<1.0.1 packaging<24.0 -pytest<8.0.1 +pytest<8.0.2 pytest-benchmark<4.0.1 pytest-cov<4.1.1 pytest-forked<1.7.0 From 7295ef0ac35f7bf263f82da0b4473404b11828bd Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 20 Feb 2024 14:48:09 -0500 Subject: [PATCH 023/410] WIP for support of conditional parameters --- .../stability_flexibility_cond.py | 158 ++++++++++++ .../functions/nonstateful/fitfunctions.py | 44 +--- .../parameterestimationcomposition.py | 52 +++- .../test_parameterestimationcomposition.py | 224 +++++++++--------- 4 files changed, 318 insertions(+), 160 deletions(-) create mode 100644 Scripts/Debug/stability_flexibility/stability_flexibility_cond.py diff --git a/Scripts/Debug/stability_flexibility/stability_flexibility_cond.py b/Scripts/Debug/stability_flexibility/stability_flexibility_cond.py new file mode 100644 index 00000000000..c2ffea7ce25 --- /dev/null +++ b/Scripts/Debug/stability_flexibility/stability_flexibility_cond.py @@ -0,0 +1,158 @@ +#%% +import sys +import numpy as np +import psyneulink as pnl +import pandas as pd + +import psyneulink.core.llvm as pnllvm +from psyneulink.core.globals.utilities import set_global_seed + +sys.path.append(".") + +from stability_flexibility import make_stab_flex, generate_trial_sequence + +# Let's make things reproducible +pnl_seed = 0 +set_global_seed(pnl_seed) +trial_seq_seed = 0 + +# High-level parameters the impact performance of the test +num_trials = 12 +time_step_size = 0.01 +num_estimates = 3 + +sf_params = dict( + gain=3.0, + leak=3.0, + competition=2.0, + lca_time_step_size=time_step_size, + non_decision_time=0.2, + automaticity=0.01, + starting_value=0.0, + threshold=0.1, + ddm_noise=0.1, + lca_noise=0.0, + scale=0.2, + ddm_time_step_size=time_step_size, +) + +# Generate some sample data to run the model on +taskTrain, stimulusTrain, cueTrain, correctResponse = generate_trial_sequence(240, 0.5, seed=trial_seq_seed) +taskTrain = taskTrain[0:num_trials] +stimulusTrain = stimulusTrain[0:num_trials] +cueTrain = cueTrain[0:num_trials] +correctResponse = correctResponse[0:num_trials] + +# CSI is in terms of time steps, we need to scale by ten because original code +# was set to run with timestep size of 0.001 +cueTrain = [c / 10.0 for c in cueTrain] + +# Make a stability flexibility composition +comp = make_stab_flex(**sf_params) + +# Let's run the model with some sample data +taskLayer = comp.nodes["Task Input [I1, I2]"] +stimulusInfo = comp.nodes["Stimulus Input [S1, S2]"] +cueInterval = comp.nodes["Cue-Stimulus Interval"] +correctInfo = comp.nodes["Correct Response Info"] + +inputs = { + taskLayer: [[np.array(taskTrain[i])] for i in range(num_trials)], + stimulusInfo: [[np.array(stimulusTrain[i])] for i in range(num_trials)], + cueInterval: [[np.array([cueTrain[i]])] for i in range(num_trials)], + correctInfo: [[np.array([correctResponse[i]])] for i in range(num_trials)] +} + +# comp.run(inputs, execution_mode=pnl.ExecutionMode.LLVMRun) +# pnllvm.cleanup() + +#%% +# Create a parameter estimation composition to fit the data we just generated and hopefully recover the +# parameters of the composition. + +controlModule = comp.nodes["Task Activations [Act1, Act2]"] +congruenceWeighting = comp.nodes["Automaticity-weighted Stimulus Input [w*S1, w*S2]"] +decisionMaker = comp.nodes["DDM"] +decisionGate = comp.nodes["DECISION_GATE"] +responseGate = comp.nodes["RESPONSE_GATE"] + +fit_parameters = { + ("gain", controlModule): np.linspace(1.0, 10.0, 1000), # Gain + ("slope", congruenceWeighting): np.linspace(0.0, 0.1, 1000), # Automaticity + ("threshold", decisionMaker): np.linspace(0.01, 0.5, 1000), # Threshold + ("non_decision_time", decisionMaker): np.linspace(0.1, 0.4, 1000), # Threshold +} + +#%% +# For each parameter, we will add a control mechanism to the composition that overrides the parameter with a value +# from the input. +pec_mechs = {} +for (name, mech), values in fit_parameters.items(): + pec_mechs[(name, mech)] = pnl.ControlMechanism(name=f"{name}_control", + control_signals=[(name, mech)], + modulation=pnl.OVERRIDE) + comp.add_node(pec_mechs[(name, mech)]) + +#%% +print("Running inner composition to generate data to fit for parameter recovery test.") +comp.run(inputs, execution_mode=pnl.ExecutionMode.LLVMRun) +results = comp.results + +print("Setting up PEC") + +data_to_fit = pd.DataFrame( + np.squeeze(np.array(results))[:, 1:], columns=["decision", "response_time"] +) +data_to_fit["decision"] = data_to_fit["decision"].astype("category") + +#%% +inputs_with_params = {**inputs} +for (name, mech), con_mech in pec_mechs.items(): + if name == "slope": + value = sf_params['automaticity'] + elif name == "non_decision_time": + value = 5.0 + else: + value = sf_params[name] + + inputs_with_params[con_mech] = value + +comp.results.clear() +comp.run(inputs_with_params, execution_mode=pnl.ExecutionMode.LLVMRun) +results = comp.results + +#%% + +# pec = pnl.ParameterEstimationComposition( +# name="pec", +# nodes=comp, +# parameters=fit_parameters, +# outcome_variables=[ +# decisionGate.output_ports[0], +# responseGate.output_ports[0], +# ], +# data=data_to_fit, +# optimization_function='differential_evolution', +# num_estimates=num_estimates, +# ) +# +# # pec.controller.parameters.comp_execution_mode.set("LLVM") +# pec.controller.function.parameters.save_values.set(True) +# +# print("Running the PEC") +# ret = pec.run(inputs=inputs) +# optimal_parameters = pec.optimized_parameter_values +# +# # Print the recovered parameters. +# records = [] +# for (name, mech), recovered_param in zip(fit_parameters.keys(), optimal_parameters): +# +# if name == "slope": +# true_param = sf_params['automaticity'] +# else: +# true_param = sf_params[name] +# +# percent_error = 100.0 * (abs(true_param - recovered_param) / true_param) +# records.append((name, mech.name, true_param, recovered_param, percent_error)) +# df = pd.DataFrame(records, columns=['Parameter', 'Component', 'Value', 'Recovered Value', 'Percent Error']) +# print(df) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index 2a7a187ff20..173c8ac3b71 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -1,4 +1,5 @@ import copy +import re import optuna.samplers from fastkde import fastKDE @@ -438,29 +439,6 @@ def _run_simulations(self, *args, context=None): # Set the search space to the control allocation. The only thing evaluate is actually "searching" over is the # randomization dimension, which should be the last sample iterator in the search space list. - search_space = self.parameters.search_space._get(context) - for i, arg in enumerate(args): - # Map the args in order of the fittable parameters - len_search_space = ( - len(search_space) - if self.owner.num_estimates is None - else len(search_space) - 1 - ) - if i < len_search_space: - assert search_space[i].num == 1, ( - "Search space for this dimension must be a single value, during search " - "we will change the value but not the shape." - ) - - # All of this code is required to set the value of the singleton search space without creating a new - # object. It seems cleaner to just use search_space[i] = SampleIterator([arg]) but this seems to cause - # problems for Jan in compilation. Need to confirm this, maybe its ok as long as size doesn't change. - # We can protect against this with the above assert. - search_space[i].specification = [arg] - search_space[i].generator = search_space[i].specification - search_space[i].start = arg - else: - raise ValueError("Too many arguments passed to run_simulations") # Reset the search grid self.reset_grid() @@ -859,11 +837,12 @@ def progress_callback(study, trial): def fit_param_names(self) -> List[str]: """Get a unique name for each parameter in the fit.""" if self.owner is not None: - return [ - cs.efferents[0].receiver.name - for i, cs in enumerate(self.owner.control_signals) - if i != self.randomization_dimension - ] + # Go through each parameter and create a unique name for it + # If the mechanism name has an invalid character (for a python identifiter), we need to replace + # it with an underscore. + names = [(param_name, re.sub(r"\W|^(?=\d)",'_', mech.name)) + for param_name, mech in self.owner.fit_parameters.keys()] + return [f"{mech_name}_{param_name}" for param_name, mech_name in names] else: return None @@ -879,16 +858,11 @@ def fit_param_bounds(self) -> Dict[str, Tuple[float, float, float]]: """ if self.owner is not None: - acs = [ - cs.specification - for i, cs in enumerate(self._full_search_space) - if i != self.randomization_dimension - ] - bounds = [(float(min(s)), float(max(s))) for s in acs] + bounds = [(float(min(s)), float(max(s))) for s in self.owner.fit_parameters.values()] # Get the step size for each parameter. - steps = [np.unique(np.diff(s).round(decimals=5)) for s in acs] + steps = [np.unique(np.diff(s).round(decimals=5)) for s in self.owner.fit_parameters.values()] # We also check if step size is constant, if not we raise an error for s in steps: diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index 1bb05150140..d9465afd23e 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -170,6 +170,7 @@ import psyneulink.core.llvm as pnllvm from psyneulink.core.components.shellclasses import Mechanism from psyneulink.core.compositions.composition import Composition, CompositionError +from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import ControlMechanism from psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism import ( OptimizationControlMechanism, ) @@ -557,6 +558,13 @@ def __init__( self.optimized_parameter_values = [] + pec_mechs = {} + for (pname, mech), values in parameters.items(): + pec_mechs[(pname, mech)] = ControlMechanism(name=f"{pname}_control", + control_signals=[(pname, mech)], + modulation=OVERRIDE) + self.model.add_node(pec_mechs[(pname, mech)]) + super().__init__( name=name, controller_mode=BEFORE, @@ -757,16 +765,6 @@ def _instantiate_ocm( same_seed_for_all_parameter_combinations, context=None, ): - # # Parse **parameters** into ControlSignals specs - control_signals = [] - for param, allocation in parameters.items(): - control_signals.append( - ControlSignal( - modulates=param, - modulation=OVERRIDE, - allocation_samples=allocation, - ) - ) # For the PEC, the objective mechanism is not needed because in context of optimization of data fitting # we require all trials (and number of estimates) to compute the scalar objective value. In data fitting @@ -821,9 +819,12 @@ def f(sim_data): # indices it needs from composition output. This needs to be passed down from the PEC. optimization_function.outcome_variable_indices = self._outcome_variable_indices + control_signals = None + outcome_variables = None ocm = PEC_OCM( agent_rep=agent_rep, monitor_for_control=outcome_variables, + fit_parameters=parameters, allow_probes=True, objective_mechanism=objective_mechanism, function=optimization_function, @@ -855,9 +856,17 @@ def run(self, *args, **kwargs): # Capture the input passed to run and pass it on to the OCM assert self.controller is not None - self.controller.set_pec_inputs_cache( - kwargs.get("inputs", None if not args else args[0]) - ) + + # Get the inputs + inputs = kwargs.get("inputs", None if not args else args[0]) + + # Since we are passing fitting\optimazation parameters as inputs we need add them to the inputs + if inputs: + params_input = [np.array([v[0]]) for v in self.fit_parameters.values()] + inputs = {self.model: [[trial] + params_input for trial in inputs[self.model]]} + + self.controller.set_pec_inputs_cache(inputs) + # We need to set the inputs for the composition during simulation, by assigning the inputs dict passed in # PEC run() to its controller's state_feature_values (this is in order to accomodate multi-trial inputs # without having the PEC provide them one-by-one to the simulated composition. This assumes that the inputs @@ -1013,8 +1022,25 @@ class Parameters(OptimizationControlMechanism.Parameters): def __init__(self, *args, **kwargs): self._pec_input_values = None + + if 'fit_parameters' in kwargs: + self.fit_parameters = kwargs['fit_parameters'] + del kwargs['fit_parameters'] + else: + raise ValueError("PEC_OCM requires that the PEC parameters be passed down to it.") + super().__init__(*args, **kwargs) + def _instantiate_output_ports(self, context=None): + """Assign CostFunctions.DEFAULTS as default for cost_option of ControlSignals. + """ + + # The only control signal that we need for the PEC is the randomization control signal. All other parameter + # values will be passed through the inputs. This allows for trial-wise conditional parameter values to be + # passed to the composition being fit or optimized. + self.parameters.output_ports._set([], context) + self._create_randomization_control_signal(context) + def set_pec_inputs_cache(self, inputs_dict: dict) -> dict: """Cache input values passed to the last call of run for the composition that this OCM controls. This method is used by the ParamterEstimationComposition in its run() method. diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index 947b453c146..5489b489562 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -10,118 +10,118 @@ ) -input_node_1 = pnl.ProcessingMechanism(size=1) -input_node_2 = pnl.ProcessingMechanism(size=2) -input_node_3 = pnl.ProcessingMechanism(size=3) -output_node = pnl.ProcessingMechanism(size=2) -model = pnl.Composition( - [{input_node_1, input_node_2, input_node_3}, output_node], name="model" -) -pec = pnl.ParameterEstimationComposition( - name="pec", - model=model, - parameters={("slope", output_node): np.linspace(1.0, 3.0, 3)}, - outcome_variables=output_node, - objective_function=lambda x: np.sum(x), - optimization_function=PECOptimizationFunction(method="differential_evolution"), -) -run_input_test_args = [ - pytest.param( - { - model: [ - [np.array([1.0]), np.array([2.0, 3.0, 4.0]), np.array([5.0, 6.0])], - [np.array([7.0]), np.array([8.0, 9.0, 10.0]), np.array([11.0, 12.0])], - [ - np.array([13.0]), - np.array([14.0, 15.0, 16.0]), - np.array([17.0, 18.0]), - ], - [ - np.array([19.0]), - np.array([20.0, 21.0, 22.0]), - np.array([23.0, 24.0]), - ], - ] - }, - None, - id="pec_good", - ), - pytest.param( - { - model: [ - [np.array([1.0]), np.array([2.0, 3.0, 4.0])], - [np.array([7.0]), np.array([8.0, 9.0, 10.0]), np.array([11.0, 12.0])], - [ - np.array([13.0]), - np.array([14.0, 15.0, 16.0]), - np.array([17.0, 18.0]), - ], - [ - np.array([19.0]), - np.array([20.0, 21.0, 22.0]), - np.array([23.0, 24.0]), - ], - ] - }, - f"The array in the dict specified for the 'inputs' arg of pec.run() is badly formatted: " - f"the length of each item in the outer dimension (a trial's worth of inputs) " - f"must be equal to the number of inputs to 'model' (3).", - id="pec_bad", - ), - pytest.param( - { - input_node_1: [ - [np.array([1.0])], - [np.array([7.0])], - [np.array([13.0])], - [np.array([19.0])], - ], - input_node_2: [ - [np.array([2.0, 3.0, 4])], - [np.array([8.0, 9.0, 10.0])], - [np.array([14.0, 15.0, 16.0])], - [np.array([20.0, 21.0, 22.0])], - ], - input_node_3: [ - [np.array([5.0, 6.0])], - [np.array([11.0, 12.0])], - [np.array([17.0, 18.0])], - [np.array([23.0, 24.0])], - ], - }, - None, - id="model_good", - ), - pytest.param( - { - input_node_1: [ - [np.array([1.0])], - [np.array([7.0])], - [np.array([13.0])], - [np.array([19.0])], - ], - input_node_2: [ - [np.array([2.0, 3.0, 4])], - [np.array([8.0, 9.0, 10.0])], - [np.array([14.0, 15.0, 16.0])], - [np.array([20.0, 21.0, 22.0])], - ], - }, - f"The dict specified in the `input` arg of pec.run() is badly formatted: " - f"the number of entries should equal the number of inputs to 'model' (3).", - id="model_bad", - ), -] - - -@pytest.mark.parametrize("inputs_dict, error_msg", run_input_test_args) -def test_pec_run_input_formats(inputs_dict, error_msg): - if error_msg: - with pytest.raises(pnl.ParameterEstimationCompositionError) as error: - pec.run(inputs=inputs_dict) - assert error.value.args[0] == error_msg - else: - pec.run(inputs=inputs_dict) +# input_node_1 = pnl.ProcessingMechanism(size=1) +# input_node_2 = pnl.ProcessingMechanism(size=2) +# input_node_3 = pnl.ProcessingMechanism(size=3) +# output_node = pnl.ProcessingMechanism(size=2) +# model = pnl.Composition( +# [{input_node_1, input_node_2, input_node_3}, output_node], name="model" +# ) +# pec = pnl.ParameterEstimationComposition( +# name="pec", +# model=model, +# parameters={("slope", output_node): np.linspace(1.0, 3.0, 3)}, +# outcome_variables=output_node, +# objective_function=lambda x: np.sum(x), +# optimization_function=PECOptimizationFunction(method="differential_evolution"), +# ) +# run_input_test_args = [ +# pytest.param( +# { +# model: [ +# [np.array([1.0]), np.array([2.0, 3.0, 4.0]), np.array([5.0, 6.0])], +# [np.array([7.0]), np.array([8.0, 9.0, 10.0]), np.array([11.0, 12.0])], +# [ +# np.array([13.0]), +# np.array([14.0, 15.0, 16.0]), +# np.array([17.0, 18.0]), +# ], +# [ +# np.array([19.0]), +# np.array([20.0, 21.0, 22.0]), +# np.array([23.0, 24.0]), +# ], +# ] +# }, +# None, +# id="pec_good", +# ), +# pytest.param( +# { +# model: [ +# [np.array([1.0]), np.array([2.0, 3.0, 4.0])], +# [np.array([7.0]), np.array([8.0, 9.0, 10.0]), np.array([11.0, 12.0])], +# [ +# np.array([13.0]), +# np.array([14.0, 15.0, 16.0]), +# np.array([17.0, 18.0]), +# ], +# [ +# np.array([19.0]), +# np.array([20.0, 21.0, 22.0]), +# np.array([23.0, 24.0]), +# ], +# ] +# }, +# f"The array in the dict specified for the 'inputs' arg of pec.run() is badly formatted: " +# f"the length of each item in the outer dimension (a trial's worth of inputs) " +# f"must be equal to the number of inputs to 'model' (3).", +# id="pec_bad", +# ), +# pytest.param( +# { +# input_node_1: [ +# [np.array([1.0])], +# [np.array([7.0])], +# [np.array([13.0])], +# [np.array([19.0])], +# ], +# input_node_2: [ +# [np.array([2.0, 3.0, 4])], +# [np.array([8.0, 9.0, 10.0])], +# [np.array([14.0, 15.0, 16.0])], +# [np.array([20.0, 21.0, 22.0])], +# ], +# input_node_3: [ +# [np.array([5.0, 6.0])], +# [np.array([11.0, 12.0])], +# [np.array([17.0, 18.0])], +# [np.array([23.0, 24.0])], +# ], +# }, +# None, +# id="model_good", +# ), +# pytest.param( +# { +# input_node_1: [ +# [np.array([1.0])], +# [np.array([7.0])], +# [np.array([13.0])], +# [np.array([19.0])], +# ], +# input_node_2: [ +# [np.array([2.0, 3.0, 4])], +# [np.array([8.0, 9.0, 10.0])], +# [np.array([14.0, 15.0, 16.0])], +# [np.array([20.0, 21.0, 22.0])], +# ], +# }, +# f"The dict specified in the `input` arg of pec.run() is badly formatted: " +# f"the number of entries should equal the number of inputs to 'model' (3).", +# id="model_bad", +# ), +# ] + + +# @pytest.mark.parametrize("inputs_dict, error_msg", run_input_test_args) +# def test_pec_run_input_formats(inputs_dict, error_msg): +# if error_msg: +# with pytest.raises(pnl.ParameterEstimationCompositionError) as error: +# pec.run(inputs=inputs_dict) +# assert error.value.args[0] == error_msg +# else: +# pec.run(inputs=inputs_dict) @pytest.mark.parametrize( From 4e59a27d55b830dd702f226cae7931a7723e4aa9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 26 Feb 2024 13:41:19 -0500 Subject: [PATCH 024/410] requirements: update grpcio requirement from <1.61.0 to <1.63.0 (#2919) Updates the requirements on [grpcio](https://github.com/grpc/grpc) to permit the latest version. - [Release notes](https://github.com/grpc/grpc/releases) - [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md) - [Commits](https://github.com/grpc/grpc/compare/v0.15.0...v1.62.0) --- updated-dependencies: - dependency-name: grpcio dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index fc7043938c2..df1cfbfed4e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ dill<0.3.9 fastkde>=1.0.24, <1.0.31 graph-scheduler>=1.1.1, <1.3.0 graphviz<0.21.0 -grpcio<1.61.0 +grpcio<1.63.0 leabra-psyneulink<0.3.3 llvmlite<0.43 matplotlib<3.7.3 From ae5067275aa95d9c3a652806be7bd75de19b50a8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Feb 2024 08:26:09 -0500 Subject: [PATCH 025/410] requirements: update pandas requirement from <2.2.1 to <2.2.2 (#2920) Updates the requirements on [pandas](https://github.com/pandas-dev/pandas) to permit the latest version. - [Release notes](https://github.com/pandas-dev/pandas/releases) - [Commits](https://github.com/pandas-dev/pandas/compare/0.3.0...v2.2.1) --- updated-dependencies: - dependency-name: pandas dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index df1cfbfed4e..35c62318320 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ networkx<3.3 numpy>=1.21.0, <1.26.5 optuna<3.4.0 packaging<24.0 -pandas<2.2.1 +pandas<2.2.2 pillow<10.3.0 pint<0.22.0 protobuf<3.20.4 From faba09cd3bfadf4fd2b264b21241c4f3b7369e9a Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 29 Feb 2024 16:31:23 -0500 Subject: [PATCH 026/410] WIP for support of conditional parameters --- .../functions/nonstateful/fitfunctions.py | 14 +++- .../parameterestimationcomposition.py | 70 ++++++++++++++++++- 2 files changed, 81 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index 173c8ac3b71..cd266bdb09c 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -437,8 +437,18 @@ def _run_simulations(self, *args, context=None): f"Expected {len(self.fit_param_names)} arguments, got {len(args)}" ) - # Set the search space to the control allocation. The only thing evaluate is actually "searching" over is the - # randomization dimension, which should be the last sample iterator in the search space list. + # Parameter values are passed through the input data. + # Since we are passing fitting\optimization parameters as inputs we need add them to the inputs + # params_input = [np.array([v[0]]) for v in self.fit_parameters.values()] + # inputs = {self.model: [[trial] + params_input for trial in inputs[self.model]]} + # + # self.controller.set_pec_inputs_cache(inputs) + inputs_array = list(self.owner.composition.controller._pec_input_values.values())[0] + for trial in range(len(inputs_array)): + for i, name in enumerate(self.fit_param_names): + start_index = len(inputs_array[trial]) - len(self.fit_param_names) + inputs_array[trial][start_index+i] = np.array([args[i]]) + # Reset the search grid self.reset_grid() diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index d9465afd23e..7e1a20b287c 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -168,8 +168,10 @@ from psyneulink._typing import Optional, Union, Dict, List, Callable, Literal import psyneulink.core.llvm as pnllvm +from psyneulink.core.globals.utilities import ContentAddressableList from psyneulink.core.components.shellclasses import Mechanism from psyneulink.core.compositions.composition import Composition, CompositionError +from psyneulink.core.components.ports.port import Port_Base from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import ControlMechanism from psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism import ( OptimizationControlMechanism, @@ -189,6 +191,8 @@ from psyneulink.core.globals.keywords import BEFORE, OVERRIDE from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.utilities import convert_to_list +from psyneulink.core.globals.defaults import defaultControlAllocation + from psyneulink.core.scheduling.time import TimeScale from psyneulink.core.components.ports.outputport import OutputPort @@ -1038,7 +1042,8 @@ def _instantiate_output_ports(self, context=None): # The only control signal that we need for the PEC is the randomization control signal. All other parameter # values will be passed through the inputs. This allows for trial-wise conditional parameter values to be # passed to the composition being fit or optimized. - self.parameters.output_ports._set([], context) + output_ports = ContentAddressableList(component_type=Port_Base) + self.parameters.output_ports._set(output_ports, context) self._create_randomization_control_signal(context) def set_pec_inputs_cache(self, inputs_dict: dict) -> dict: @@ -1096,3 +1101,66 @@ def set_pec_inputs_cache(self, inputs_dict: dict) -> dict: inputs_dict = {model: input_values} self._pec_input_values = inputs_dict + + def _execute(self, variable=None, context=None, runtime_params=None)->np.ndarray: + """Return control_allocation that optimizes net_outcome of agent_rep.evaluate(). + """ + + if self.is_initializing: + return [defaultControlAllocation] + + # Assign default control_allocation if it is not yet specified (presumably first trial) + control_allocation = self.parameters.control_allocation._get(context) + if control_allocation is None: + control_allocation = [c.defaults.variable for c in self.control_signals] + + # Give the agent_rep a chance to adapt based on last trial's state_feature_values and control_allocation + if hasattr(self.agent_rep, "adapt"): + # KAM 4/11/19 switched from a try/except to hasattr because in the case where we don't + # have an adapt method, we also don't need to call the net_outcome getter + net_outcome = self.parameters.net_outcome._get(context) + + self.agent_rep.adapt(self.parameters.state_feature_values._get(context), + control_allocation, + net_outcome, + context=context) + + # freeze the values of current context, because they can be changed in between simulations, + # and the simulations must start from the exact spot + frozen_context = self._get_frozen_context(context) + + alt_controller = None + if self.agent_rep.controller is None: + try: + alt_controller = context.composition.controller + except AttributeError: + pass + + self.agent_rep._initialize_as_agent_rep( + frozen_context, base_context=context, alt_controller=alt_controller + ) + + # Get control_allocation that optimizes net_outcome using OptimizationControlMechanism's function + # IMPLEMENTATION NOTE: skip ControlMechanism._execute since it is a stub method that returns input_values + optimal_control_allocation, optimal_net_outcome, saved_samples, saved_values = \ + super(ControlMechanism,self)._execute( + variable=control_allocation, + num_estimates=self.parameters.num_estimates._get(context), + context=context, + runtime_params=runtime_params + ) + + # clean up frozen values after execution + self.agent_rep._clean_up_as_agent_rep(frozen_context, alt_controller=alt_controller) + + if self.function.save_samples: + self.saved_samples = saved_samples + if self.function.save_values: + self.saved_values = saved_values + + self.optimal_control_allocation = optimal_control_allocation + self.optimal_net_outcome = optimal_net_outcome + + # Return optimal control_allocation formatted as 2d array + return [defaultControlAllocation] + From b309dd65aed25044fb2bdd654225269a17c0a82d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 29 Feb 2024 21:40:28 -0500 Subject: [PATCH 027/410] requirements: update pytest requirement from <8.0.2 to <8.0.3 (#2921) Updates the requirements on [pytest](https://github.com/pytest-dev/pytest) to permit the latest version. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/8.0.1...8.0.2) --- updated-dependencies: - dependency-name: pytest dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index 5166b6b409b..183aa0f4b3f 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,6 +1,6 @@ jupyter<1.0.1 packaging<24.0 -pytest<8.0.2 +pytest<8.0.3 pytest-benchmark<4.0.1 pytest-cov<4.1.1 pytest-forked<1.7.0 From ff4f32bc242735d857a451f2ab1f1054fd56e28c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 1 Mar 2024 15:07:08 -0500 Subject: [PATCH 028/410] tests/TransferMechanism: Use assert_array_equal to check array shape and values (#2922) Signed-off-by: Jan Vesely --- tests/mechanisms/test_transfer_mechanism.py | 36 ++++++++++++--------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/tests/mechanisms/test_transfer_mechanism.py b/tests/mechanisms/test_transfer_mechanism.py index 1873e5dcae7..a84dbedaca3 100644 --- a/tests/mechanisms/test_transfer_mechanism.py +++ b/tests/mechanisms/test_transfer_mechanism.py @@ -955,7 +955,7 @@ def test_transfer_mech_size_int_check_var(self): name='T', size=4 ) - assert len(T.defaults.variable) == 1 and (T.defaults.variable[0] == [0., 0., 0., 0.]).all() + np.testing.assert_array_equal(T.defaults.variable, [[0, 0, 0, 0]]) assert len(T.size) == 1 and T.size[0] == 4 and isinstance(T.size[0], np.integer) @@ -967,7 +967,7 @@ def test_transfer_mech_size_int_inputs_ints(self): size=4 ) val = T.execute([10, 10, 10, 10]) - np.testing.assert_allclose(val, [[10.0, 10.0, 10.0, 10.0]]) + np.testing.assert_array_equal(val, [[10.0, 10.0, 10.0, 10.0]]) # ------------------------------------------------------------------------------------------------ # TEST 3 @@ -981,7 +981,7 @@ def test_transfer_mech_size_int_inputs_floats(self): size=VECTOR_SIZE ) val = T.execute([10.0 for i in range(VECTOR_SIZE)]) - np.testing.assert_allclose(val, [[10.0 for i in range(VECTOR_SIZE)]]) + np.testing.assert_array_equal(val, [[10.0 for i in range(VECTOR_SIZE)]]) # ------------------------------------------------------------------------------------------------ # TEST 4 @@ -1009,7 +1009,7 @@ def test_transfer_mech_size_float_inputs_check_var(self): name='T', size=4.0, ) - assert len(T.defaults.variable) == 1 and (T.defaults.variable[0] == [0., 0., 0., 0.]).all() + np.testing.assert_array_equal(T.defaults.variable, [[0, 0, 0, 0]]) assert len(T.size == 1) and T.size[0] == 4.0 and isinstance(T.size[0], np.integer) # ------------------------------------------------------------------------------------------------ @@ -1024,7 +1024,7 @@ def test_transfer_mech_size_float_inputs_ints(self): size=4.0 ) val = T.execute([10, 10, 10, 10]) - np.testing.assert_allclose(val, [[10.0, 10.0, 10.0, 10.0]]) + np.testing.assert_array_equal(val, [[10.0, 10.0, 10.0, 10.0]]) # ------------------------------------------------------------------------------------------------ # TEST 7 @@ -1038,7 +1038,7 @@ def test_transfer_mech_size_float_inputs_floats(self): size=4.0 ) val = T.execute([10.0, 10.0, 10.0, 10.0]) - np.testing.assert_allclose(val, [[10.0, 10.0, 10.0, 10.0]]) + np.testing.assert_array_equal(val, [[10.0, 10.0, 10.0, 10.0]]) # ------------------------------------------------------------------------------------------------ # TEST 8 @@ -1092,7 +1092,9 @@ def test_transfer_mech_size_var_both_lists(self): size=[2., 3.], default_variable=[[1, 2], [3, 4, 5]] ) - assert len(T.defaults.variable) == 2 and (T.defaults.variable[0] == [1, 2]).all() and (T.defaults.variable[1] == [3, 4, 5]).all() + assert len(T.defaults.variable) == 2 + np.testing.assert_array_equal(T.defaults.variable[0], [1, 2]) + np.testing.assert_array_equal(T.defaults.variable[1], [3, 4, 5]) # ------------------------------------------------------------------------------------------------ # TEST 12 @@ -1106,7 +1108,7 @@ def test_transfer_mech_size_scalar_var_2d(self): size=2, default_variable=[[1, 2], [3, 4]] ) - assert len(T.defaults.variable) == 2 and (T.defaults.variable[0] == [1, 2]).all() and (T.defaults.variable[1] == [3, 4]).all() + np.testing.assert_array_equal(T.defaults.variable, [[1, 2], [3, 4]]) assert len(T.size) == 2 and T.size[0] == 2 and T.size[1] == 2 # ------------------------------------------------------------------------------------------------ @@ -1120,7 +1122,7 @@ def test_transfer_mech_var_2d_array(self): name='T', default_variable=[[1, 2], [3, 4]] ) - assert len(T.defaults.variable) == 2 and (T.defaults.variable[0] == [1, 2]).all() and (T.defaults.variable[1] == [3, 4]).all() + np.testing.assert_array_equal(T.defaults.variable, [[1, 2], [3, 4]]) # ------------------------------------------------------------------------------------------------ # TEST 14 @@ -1134,9 +1136,9 @@ def test_transfer_mech_var_1D_size_wrong(self): default_variable=[1, 2, 3, 4], size=2 ) - assert len(T.defaults.variable) == 1 and (T.defaults.variable[0] == [1, 2, 3, 4]).all() + np.testing.assert_array_equal(T.defaults.variable, [[1, 2, 3, 4]]) val = T.execute([10.0, 10.0, 10.0, 10.0]) - np.testing.assert_allclose(val, [[10.0, 10.0, 10.0, 10.0]]) + np.testing.assert_array_equal(val, [[10.0, 10.0, 10.0, 10.0]]) # ------------------------------------------------------------------------------------------------ # TEST 15 @@ -1150,9 +1152,9 @@ def test_transfer_mech_var_1D_size_wrong_2(self): default_variable=[1, 2, 3, 4], size=[2, 3, 4] ) - assert len(T.defaults.variable) == 1 and (T.defaults.variable[0] == [1, 2, 3, 4]).all() + np.testing.assert_array_equal(T.defaults.variable, [[1, 2, 3, 4]]) val = T.execute([10.0, 10.0, 10.0, 10.0]) - np.testing.assert_allclose(val, [[10.0, 10.0, 10.0, 10.0]]) + np.testing.assert_array_equal(val, [[10.0, 10.0, 10.0, 10.0]]) # ------------------------------------------------------------------------------------------------ # TEST 16 @@ -1166,7 +1168,9 @@ def test_transfer_mech_size_var_incompatible1(self): size=2, default_variable=[[1, 2], [3, 4, 5]] ) - assert (T.defaults.variable[0] == [1, 2]).all() and (T.defaults.variable[1] == [3, 4, 5]).all() and len(T.defaults.variable) == 2 + assert len(T.defaults.variable) == 2 + np.testing.assert_array_equal(T.defaults.variable[0], [1, 2]) + np.testing.assert_array_equal(T.defaults.variable[1], [3, 4, 5]) # ------------------------------------------------------------------------------------------------ # TEST 17 @@ -1180,7 +1184,9 @@ def test_transfer_mech_size_var_incompatible2(self): size=[2, 2], default_variable=[[1, 2], [3, 4, 5]] ) - assert (T.defaults.variable[0] == [1, 2]).all() and (T.defaults.variable[1] == [3, 4, 5]).all() and len(T.defaults.variable) == 2 + assert len(T.defaults.variable) == 2 + np.testing.assert_array_equal(T.defaults.variable[0], [1, 2]) + np.testing.assert_array_equal(T.defaults.variable[1], [3, 4, 5]) # ------------------------------------------------------------------------------------------------ From 8c68f552846581f120b1180290e0d4c09c612c50 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 4 Mar 2024 09:39:38 -0500 Subject: [PATCH 029/410] llvm/thread_evaluate: Reuse type-cast parameters (#2923) inputs and outputs need to be cast to their respective pointer types They are identical for all thread workers. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index e50967d32cb..f28e88f5736 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -815,13 +815,19 @@ def thread_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:b parallel_start = time.time() with concurrent.futures.ThreadPoolExecutor(max_workers=jobs) as ex: + + # Create input and result typed casts once, they are the same + # for every submitted job. + input_param = ctypes.cast(ctypes.byref(ct_inputs), self.__bin_func.c_func.argtypes[5]) + results_param = ctypes.cast(ct_results, self.__bin_func.c_func.argtypes[4]) + # There are 7 arguments to evaluate_alloc_range: # comp_param, comp_state, from, to, results, input, comp_data results = [ex.submit(self.__bin_func, ct_param, ct_state, int(i * evals_per_job), min((i + 1) * evals_per_job, num_evaluations), - ctypes.cast(ct_results, self.__bin_func.c_func.argtypes[4]), - ctypes.cast(ctypes.byref(ct_inputs), self.__bin_func.c_func.argtypes[5]), + results_param, + input_param, ct_data, ct_num_inputs) for i in range(jobs)] From d803c17641d66ee3f73c3bb107cd18fba8747bfc Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 8 Mar 2024 14:51:57 -0500 Subject: [PATCH 030/410] Renable LLVM in stability_flexibility_pec_fit example. Remove some unused imports. --- .../Debug/stability_flexibility/stability_flexibility.py | 3 --- .../stability_flexibility/stability_flexibility_pec_fit.py | 6 +++--- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/Scripts/Debug/stability_flexibility/stability_flexibility.py b/Scripts/Debug/stability_flexibility/stability_flexibility.py index 9d609d52872..38420d88380 100644 --- a/Scripts/Debug/stability_flexibility/stability_flexibility.py +++ b/Scripts/Debug/stability_flexibility/stability_flexibility.py @@ -1,8 +1,5 @@ import psyneulink as pnl import numpy as np -import random -import pytest -import pandas as pd # Define function to generate a counterbalanced trial sequence with a specified switch trial frequency diff --git a/Scripts/Debug/stability_flexibility/stability_flexibility_pec_fit.py b/Scripts/Debug/stability_flexibility/stability_flexibility_pec_fit.py index 82c181ddceb..09eceb2dea5 100644 --- a/Scripts/Debug/stability_flexibility/stability_flexibility_pec_fit.py +++ b/Scripts/Debug/stability_flexibility/stability_flexibility_pec_fit.py @@ -16,9 +16,9 @@ trial_seq_seed = 0 # High-level parameters the impact performance of the test -num_trials = 12 +num_trials = 50 time_step_size = 0.01 -num_estimates = 3 +num_estimates = 10000 sf_params = dict( gain=3.0, @@ -104,7 +104,7 @@ num_estimates=num_estimates, ) -# pec.controller.parameters.comp_execution_mode.set("LLVM") +pec.controller.parameters.comp_execution_mode.set("LLVM") pec.controller.function.parameters.save_values.set(True) print("Running the PEC") From 61889791a953facab01b1a00239e1fe6dd89cb41 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 8 Mar 2024 14:54:27 -0500 Subject: [PATCH 031/410] Fix bug in PEC mapping outcomes variables to data. It turns out the that PEC code was assuming the inner compositions port map dict order was always the same as the output ports list. This was non-deterministic it seems. Now we Get the index of the outcome variable in the output ports of inner composition. To do this, we must use the inner composition's portmap to get the CIM output port that corresponds to the outcome variable. --- .../core/compositions/parameterestimationcomposition.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index 1bb05150140..9dcc8cbc9b0 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -592,13 +592,17 @@ def __init__( self._outcome_variable_indices = [] in_comp = self.nodes[0] - in_comp_ports = list(in_comp.output_CIM.port_map.keys()) for outcome_var in self.outcome_variables: try: if not isinstance(outcome_var, OutputPort): outcome_var = outcome_var.output_port - self._outcome_variable_indices.append(in_comp_ports.index(outcome_var)) + # Get the index of the outcome variable in the output ports of inner composition. To do this, + # we must use the inner composition's portmap to get the CIM output port that corresponds to + # the outcome variable + index = in_comp.output_ports.index(in_comp.output_CIM.port_map[outcome_var][1]) + + self._outcome_variable_indices.append(index) except ValueError: raise ValueError( f"Could not find outcome variable {outcome_var.full_name} in the output ports of " From e5ccabf19742cfe64677eff128f45db0976b0c62 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 8 Mar 2024 20:05:38 -0500 Subject: [PATCH 032/410] requirements: update matplotlib requirement from <3.7.3 to <3.7.6 (#2925) Updates the requirements on [matplotlib](https://github.com/matplotlib/matplotlib) to permit the latest version. - [Release notes](https://github.com/matplotlib/matplotlib/releases) - [Commits](https://github.com/matplotlib/matplotlib/compare/v3.7.2...v3.7.5) --- updated-dependencies: - dependency-name: matplotlib dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- tutorial_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 35c62318320..5ec2a42556b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ graphviz<0.21.0 grpcio<1.63.0 leabra-psyneulink<0.3.3 llvmlite<0.43 -matplotlib<3.7.3 +matplotlib<3.7.6 modeci_mdf<0.5, >=0.4.3; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' networkx<3.3 numpy>=1.21.0, <1.26.5 diff --git a/tutorial_requirements.txt b/tutorial_requirements.txt index 9efb1e3e58d..8f7bd2eaa14 100644 --- a/tutorial_requirements.txt +++ b/tutorial_requirements.txt @@ -1,3 +1,3 @@ graphviz<0.21.0 jupyter<1.0.1 -matplotlib<3.7.3 +matplotlib<3.7.6 From b8105565e256d480a2c5e83e272407a8f50626bc Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 11 Mar 2024 15:15:54 -0400 Subject: [PATCH 033/410] Fix exception (and test) for bad outcome var spec. --- .../core/compositions/parameterestimationcomposition.py | 4 ++-- tests/composition/test_parameterestimationcomposition.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index 9dcc8cbc9b0..0d7dc7590eb 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -603,8 +603,8 @@ def __init__( index = in_comp.output_ports.index(in_comp.output_CIM.port_map[outcome_var][1]) self._outcome_variable_indices.append(index) - except ValueError: - raise ValueError( + except KeyError: + raise KeyError( f"Could not find outcome variable {outcome_var.full_name} in the output ports of " f"the composition being fitted to data ({self.nodes[0]}). A current limitation of the " f"PEC data fitting API is that any output port of composition that should be fit to " diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index 0fd45cb4bc3..9bc73f00a31 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -359,7 +359,7 @@ def test_pec_bad_outcome_var_spec(): ("threshold", decision): np.linspace(0.5, 1.0, 1000), } - with pytest.raises(ValueError) as ex: + with pytest.raises(KeyError) as ex: pnl.ParameterEstimationComposition( name="pec", nodes=[comp], From 64e6de392e20ee2507732d049bff1ef4e0add50e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Mar 2024 21:32:33 -0400 Subject: [PATCH 034/410] requirements: update packaging requirement from <24.0 to <25.0 (#2926) Updates the requirements on [packaging](https://github.com/pypa/packaging) to permit the latest version. - [Release notes](https://github.com/pypa/packaging/releases) - [Changelog](https://github.com/pypa/packaging/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pypa/packaging/compare/23.0...24.0) --- updated-dependencies: - dependency-name: packaging dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index 183aa0f4b3f..fd3ba173841 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,5 +1,5 @@ jupyter<1.0.1 -packaging<24.0 +packaging<25.0 pytest<8.0.3 pytest-benchmark<4.0.1 pytest-cov<4.1.1 diff --git a/requirements.txt b/requirements.txt index 5ec2a42556b..34316e2c296 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ modeci_mdf<0.5, >=0.4.3; (platform_machine == 'AMD64' or platform_machine == 'x8 networkx<3.3 numpy>=1.21.0, <1.26.5 optuna<3.4.0 -packaging<24.0 +packaging<25.0 pandas<2.2.2 pillow<10.3.0 pint<0.22.0 From 3f1c830082054cc52c417df997449f9e496aa39b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Mar 2024 00:15:27 -0400 Subject: [PATCH 035/410] requirements: update pytest requirement from <8.0.3 to <8.1.2 (#2928) Updates the requirements on [pytest](https://github.com/pytest-dev/pytest) to permit the latest version. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/8.0.2...8.1.1) --- updated-dependencies: - dependency-name: pytest dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index fd3ba173841..875b65192f2 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,6 +1,6 @@ jupyter<1.0.1 packaging<25.0 -pytest<8.0.3 +pytest<8.1.2 pytest-benchmark<4.0.1 pytest-cov<4.1.1 pytest-forked<1.7.0 From a804a8d811c93d3d9776c1deb825d6d3bec24458 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 19 Mar 2024 14:01:55 -0400 Subject: [PATCH 036/410] Replace autograd in GradientOptimization Swap out calls to autograd in GradientOptimization for pytorch functional autograd. --- .../nonstateful/optimizationfunctions.py | 40 ++++++++++++++++--- psyneulink/core/globals/utilities.py | 38 +++++++++++++++++- .../library/compositions/regressioncfa.py | 8 ++-- 3 files changed, 75 insertions(+), 11 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index 66e4d8f1e06..b17e0a1f815 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -31,6 +31,15 @@ from numbers import Number import numpy as np + +# Conditionally import torch +try: + import torch + from torch.func import grad +except ImportError: + torch = None + grad = None + from beartype import beartype from psyneulink._typing import Optional, Union, Callable, Literal @@ -1249,13 +1258,34 @@ def reset(self, default_variable=None, objective_function=None, context=None, ** # Differentiate objective_function using autograd.grad() if objective_function is not None and not self.gradient_function: + + if torch is None: + raise ValueError("PyTorch is not installed. Please install PyTorch to use GradientOptimization without " + "specifying a gradient_function.") + + if grad is None: + raise ValueError("PyTorch version is too old. Please upgrade PyTorch to use GradientOptimization without " + "specifying a gradient_function.") + try: - from autograd import grad - self.parameters.gradient_function._set(grad(self.objective_function), context) - except: - raise OptimizationFunctionError("Unable to use autograd with {} specified for {} Function: {}.". + # Need to wrap objective_function in a lambda to pass to grad because it needs to return a torch tensor + def func_wrapper(x, context): + return torch.tensor(self.objective_function(x, context)) + + # Get the gradient of the objective function with pytorch autograd + gradient_func = torch.func.grad(func_wrapper) + + # We need to wrap the gradient function in a lambda as well because we need to convert back to numpy + def gradient_func_wrapper(x, context): + return gradient_func(torch.from_numpy(x), context).detach().numpy() + + self.parameters.gradient_function._set(gradient_func_wrapper, context) + + except Exception as ex: + + raise OptimizationFunctionError("Unable to use PyTorch autograd with {} specified for {} Function: {}.". format(repr(OBJECTIVE_FUNCTION), self.__class__.__name__, - objective_function.__name__)) + objective_function.__name__)) from ex search_space = self.search_space bounds = None diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index f3b01aa765c..d2f306035cb 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -123,6 +123,12 @@ import numpy as np +# Conditionally import torch +try: + import torch +except ImportError: + torch = None + from psyneulink.core.globals.keywords import \ comparison_operators, DISTANCE_METRICS, EXPONENTIAL, GAUSSIAN, LINEAR, MATRIX_KEYWORD_VALUES, NAME, SINUSOID, VALUE @@ -1050,10 +1056,28 @@ def safe_create_np_array(value): else: raise + except RuntimeError as e: + + # If we get a RuntimeError, it is probably because we are trying to convert a torch tensor. + # We can't convert to a numpy array without breaking autograd, so we need to return the original value + if "call numpy() on Tensor" in str(e) and torch: + return value + else: + raise e + value = safe_create_np_array(value) if dimension == 1: - value = np.atleast_1d(value) + try: + value = np.atleast_1d(value) + except RuntimeError as e: + # If we get a RuntimeError, this is probably a BatchedTensorImpl from torch\vmap + # We can't convert to a numpy array and use np.atleast_2d, so we need to use + # torch's atleast_2d function instead + if torch: + value = torch.atleast_1d(value) + else: + raise e elif dimension == 2: # Array is made up of non-uniform elements, so treat as 2d array and pass if ( @@ -1063,7 +1087,17 @@ def safe_create_np_array(value): ): pass else: - value = np.atleast_2d(value) + try: + value = np.atleast_2d(value) + except RuntimeError as e: + # If we get a RuntimeError, this is probably a BatchedTensorImpl from torch\vmap + # We can't convert to a numpy array and use np.atleast_2d, so we need to use + # torch's atleast_2d function instead + if torch: + value = torch.atleast_2d(value) + else: + raise e + elif dimension is not None: raise UtilitiesError("dimension param ({0}) must be None, 1, or 2".format(dimension)) diff --git a/psyneulink/library/compositions/regressioncfa.py b/psyneulink/library/compositions/regressioncfa.py index ef878c4056c..03f44c1f3b4 100644 --- a/psyneulink/library/compositions/regressioncfa.py +++ b/psyneulink/library/compositions/regressioncfa.py @@ -633,23 +633,23 @@ def compute_terms(self, control_allocation, context=None): computed_terms[PV.F] = f = self.terms[PV.F.value] # Compute value of each control_signal from its variable - c = [None] * len(control_allocation) + c = np.zeros((len(control_allocation), )) for i, var in enumerate(control_allocation): c[i] = self.control_signal_functions[i](var, context=context) - computed_terms[PV.C] = c = np.array(c) + computed_terms[PV.C] = c # Compute costs for new control_signal values if PV.COST in terms: # computed_terms[PV.COST] = -(np.exp(0.25*c-3)) # computed_terms[PV.COST] = -(np.exp(0.25*c-3) + (np.exp(0.25*np.abs(c-self.control_signal_change)-3))) - costs = [None] * len(c) + costs = np.zeros((len(control_allocation),)) for i, val in enumerate(c): # MODIFIED 11/9/18 OLD: costs[i] = -(self._compute_costs[i](val, context=context)) # # MODIFIED 11/9/18 NEW: [JDC] # costs[i] = -(self._compute_costs[i](val, ref_variables[i])) # MODIFIED 11/9/18 END - computed_terms[PV.COST] = np.array(costs) + computed_terms[PV.COST] = costs # Compute terms interaction that are used if any(term in terms for term in [PV.FF, PV.FFC, PV.FFCC]): From cb5aef9993c5a6db034c0b68313016a95c17bc20 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 19 Mar 2024 14:05:34 -0400 Subject: [PATCH 037/410] Mark test_lvoc_features_function as pytorch --- tests/composition/test_control.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 601b5bbcc03..2183e800bd2 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -1840,6 +1840,7 @@ def test_lvoc_both_predictors_specs(self): assert len(lvoc.input_ports) == 5 + @pytest.mark.pytorch def test_lvoc_features_function(self): m1 = pnl.TransferMechanism(input_ports=["InputPort A", "InputPort B"]) m2 = pnl.TransferMechanism() From bf05f9b18eaed7356416d21cf4241137cb07e430 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 19 Mar 2024 14:10:06 -0400 Subject: [PATCH 038/410] Remove check for autograd ArrayBox type. --- psyneulink/core/globals/utilities.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index d2f306035cb..3c926148f47 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -625,17 +625,7 @@ def recursively_check_elements_for_numeric(value): else: return True else: - if not is_number(value): - try: - # True for autograd ArrayBox (and maybe other types?) - # if isinstance(value._value, Number): - from autograd.numpy.numpy_boxes import ArrayBox - if isinstance(value, ArrayBox): - return True - except: - return False - else: - return True + return is_number(value) # Test copy since may need to convert matrix to array (see above) if not recursively_check_elements_for_numeric(candidate.copy()): return False From d19ba9b8e8a72b4ab9f32a5af65728eb721f275e Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 19 Mar 2024 14:10:27 -0400 Subject: [PATCH 039/410] Replace autograd.numpy with standard numpy. --- .../Bustamante_Stroop_XOR_LVOC_Model_VZ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py b/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py index da22452c576..4bc078ea5f5 100644 --- a/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py +++ b/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py @@ -18,7 +18,7 @@ `_ """ -import autograd.numpy as np +import numpy as np import psyneulink as pnl # from build_stimuli_VZ import xor_dict From b70c3777f7eadcee19a617d911c4e7634187c2f1 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 19 Mar 2024 14:26:11 -0400 Subject: [PATCH 040/410] Remove\replace references to autograd in docs\comments. --- README.rst | 1 - docs/source/index.rst | 1 - docs/source/index_logo_with_text.rst | 1 - .../functions/nonstateful/optimizationfunctions.py | 7 ++++--- .../functions/nonstateful/transferfunctions.py | 9 --------- psyneulink/core/globals/utilities.py | 3 ++- psyneulink/library/compositions/gymforagercfa.py | 3 ++- psyneulink/library/compositions/regressioncfa.py | 4 +++- 8 files changed, 11 insertions(+), 18 deletions(-) diff --git a/README.rst b/README.rst index d04ca20b23c..a534c0043b8 100644 --- a/README.rst +++ b/README.rst @@ -146,7 +146,6 @@ Dependencies that are automatically installed (except those noted as optional) i * pillow * llvmlite * mpi4py (optional) - * autograd (optional) Lists of required packages for PsyNeuLink, developing PsyNeuLink, and running the PsyNeuLink tutorial are also stored in pip-style `requirements.txt`, `dev_requirements.txt`, and `tutorial_requirements.txt` in the source code. diff --git a/docs/source/index.rst b/docs/source/index.rst index 70e1e0ba79b..9bc213049ea 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -178,7 +178,6 @@ Dependencies that are automatically installed (except those noted as optional) i * pillow * llvmlite * mpi4py (optional) - * autograd (optional) Lists of required packages for PsyNeuLink, developing PsyNeuLink, and running the PsyNeuLink tutorial are also stored in pip-style `requirements.txt`, `dev_requirements.txt`, and `tutorial_requirements.txt` in the source code. diff --git a/docs/source/index_logo_with_text.rst b/docs/source/index_logo_with_text.rst index 2f34d1216b0..453db5e1732 100644 --- a/docs/source/index_logo_with_text.rst +++ b/docs/source/index_logo_with_text.rst @@ -163,7 +163,6 @@ Dependencies that are automatically installed (except those noted as optional) i * pillow * llvmlite * mpi4py (optional) - * autograd (optional) Lists of required packages for PsyNeuLink, developing PsyNeuLink, and running the PsyNeuLink tutorial are also stored in pip-style `requirements.txt`, `dev_requirements.txt`, and `tutorial_requirements.txt` in the source code. diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index b17e0a1f815..0a2447723ad 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -951,8 +951,9 @@ class GradientOptimization(OptimizationFunction): which should be the derivative of the `objective_function ` with respect to `variable ` at its current value: :math:`\\frac{d(objective\\_function(variable))}{d(variable)}`. If the **gradient_function* argument of the - constructor is not specified, then an attempt is made to use `Autograd's `_ `grad - ` method to generate `gradient_function `. If that fails, + constructor is not specified, then an attempt is made to use PyTorch functional + `autograd's `_ `grad ` + method to generate `gradient_function `. If that fails, an error occurs. The **search_space** argument can be used to specify lower and/or upper bounds for each dimension of the sample; if the gradient causes a value of the sample to exceed a bound along a dimenson, the value of the bound is used for that dimension, unless/until the gradient shifts and causes it to return back within the bound. @@ -972,7 +973,7 @@ class GradientOptimization(OptimizationFunction): gradient_function : function specifies function used to compute the gradient in each iteration of the `optimization process `; if it is not specified, an attempt is made to compute it using - `autograd.grad `_. + `PyTorch autograd's `_ `grad `. direction : ASCENT or DESCENT : default ASCENT specifies the direction of gradient optimization: if *ASCENT*, movement is attempted in the positive direction diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index 545049d55f9..fe39ee705f9 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -722,8 +722,6 @@ def _function(self, scale = self._get_current_parameter_value(SCALE, context) offset = self._get_current_parameter_value(OFFSET, context) - # The following doesn't work with autograd (https://github.com/HIPS/autograd/issues/416) - # result = scale * np.exp(rate * variable + bias) + offset result = scale * e**(rate * variable + bias) + offset return self.convert_output_type(result) @@ -1022,8 +1020,6 @@ def _function(self, offset = self._get_current_parameter_value(OFFSET, context) scale = self._get_current_parameter_value(SCALE, context) - # The following doesn't work with autograd (https://github.com/HIPS/autograd/issues/416) - # result = 1. / (1 + np.exp(-gain * (variable - bias) + offset)) result = scale * (1. / (1 + e**(-gain * (variable + bias - x_0) + offset))) return self.convert_output_type(result) @@ -1346,9 +1342,6 @@ def _function(self, offset = self._get_current_parameter_value(OFFSET, context) scale = self._get_current_parameter_value(SCALE, context) - # The following probably doesn't work with autograd (https://github.com/HIPS/autograd/issues/416) - # (since np.exp doesn't work) - # result = 1. / (1 + np.tanh(-gain * (variable - bias) + offset)) exponent = -2 * (gain * (variable + bias - x_0) + offset) result = scale * (1 - e**exponent)/ (1 + e**exponent) @@ -2437,7 +2430,6 @@ def _function(self, offset = self._get_current_parameter_value(OFFSET, context) random_state = self._get_current_parameter_value('random_state', context) - # The following doesn't work with autograd (https://github.com/HIPS/autograd/issues/416) result = scale * random_state.normal(variable + bias, variance) + offset return self.convert_output_type(result) @@ -2846,7 +2838,6 @@ def _function(self, result = variable else: - # ??Not sure whether the following works with autograd (https://github.com/HIPS/autograd/issues/416) p = p or self.defaults.p self.binomial_distort.parameters.p.set(p, context) result = self.binomial_distort(variable) * (1 / (1 - p)) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index 3c926148f47..8e0298d770a 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -1049,7 +1049,8 @@ def safe_create_np_array(value): except RuntimeError as e: # If we get a RuntimeError, it is probably because we are trying to convert a torch tensor. - # We can't convert to a numpy array without breaking autograd, so we need to return the original value + # We can't convert to a numpy array without breaking pytorch autograd, so we need to return the + # original value if "call numpy() on Tensor" in str(e) and torch: return value else: diff --git a/psyneulink/library/compositions/gymforagercfa.py b/psyneulink/library/compositions/gymforagercfa.py index be7d546645a..7f5239e2c4e 100644 --- a/psyneulink/library/compositions/gymforagercfa.py +++ b/psyneulink/library/compositions/gymforagercfa.py @@ -177,7 +177,8 @@ def evaluate(self, feature_values, control_allocation, num_estimates, num_trials .. note:: If this method is assigned as the `objective_funtion of a `GradientOptimization` `Function`, - it is differentiated using `autograd `_\\.grad(). + it is differentiated using + `PyTorch autograd's `_ `grad `. """ predicted_outcome=0 diff --git a/psyneulink/library/compositions/regressioncfa.py b/psyneulink/library/compositions/regressioncfa.py index 03f44c1f3b4..f8924f62854 100644 --- a/psyneulink/library/compositions/regressioncfa.py +++ b/psyneulink/library/compositions/regressioncfa.py @@ -346,7 +346,9 @@ def evaluate(self, feature_values, control_allocation, num_estimates, num_trials .. note:: If this method is assigned as the `objective_funtion of a `GradientOptimization` `Function`, - it is differentiated using `autograd `_\\.grad(). + it is differentiated using + `PyTorch autograd's `_ + `grad `. """ predicted_outcome=0 From a89476b3cb72864f1f110c7497bd797e41ce13f0 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 19 Mar 2024 14:26:22 -0400 Subject: [PATCH 041/410] Remove autograd dependency. --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 34316e2c296..8a2de9cab23 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,3 @@ -autograd<1.7 beartype<0.18.0 dill<0.3.9 fastkde>=1.0.24, <1.0.31 From 4f41ea88c3b41c901f17bb29f76a2f358e7d4f81 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 23 Mar 2024 20:24:49 -0400 Subject: [PATCH 042/410] Component, llvm: Remove stale comment. Signed-off-by: Jan Vesely --- psyneulink/core/components/component.py | 1 - 1 file changed, 1 deletion(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 14166794185..e72aa2a3ac9 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1362,7 +1362,6 @@ def llvm_state_ids(self): def _get_state_initializer(self, context): def _convert(p): - # FIXME: This should use defaults instead of 'p.get' x = p.get(context) if isinstance(x, np.random.RandomState): # Skip first element of random state (id string) From 64b0d31380d6fca5513f6d4dae19243f96e14f5a Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 23 Mar 2024 22:05:42 -0400 Subject: [PATCH 043/410] Component, llvm: Drop 'learning_function' from compiled params Do not check for ComponentMeta type. Signed-off-by: Jan Vesely --- psyneulink/core/components/component.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index e72aa2a3ac9..7ab02990dd8 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1431,11 +1431,11 @@ def _get_compilation_params(self): "learning_results", "learning_signal", "learning_signals", "error_matrix", "error_signal", "activation_input", "activation_output", "error_sources", "covariates_sources", - "target", "sample", + "target", "sample", "learning_function" } # Mechanism's need few extra entries: # * matrix -- is never used directly, and is flatened below - # * integration rate -- shape mismatch with param port input + # * integration_rate -- shape mismatch with param port input # * initializer -- only present on DDM and never used # * search_space -- duplicated between OCM and its function if hasattr(self, 'ports'): @@ -1479,7 +1479,7 @@ def _is_user_only_param(p): # FIXME: this should use defaults val = p.get() # Check if the value type is valid for compilation - return not isinstance(val, (str, ComponentsMeta, + return not isinstance(val, (str, type(max), type(np.sum), type(_is_compilation_param), From 1d2e1ab7fddf338ed044e3e37ae5f80093217c29 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 23 Mar 2024 23:04:06 -0400 Subject: [PATCH 044/410] llvm, Component: Consolidate filter function for compilable parameter types Signed-off-by: Jan Vesely --- psyneulink/core/components/component.py | 59 ++++++++++++++----------- 1 file changed, 33 insertions(+), 26 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 7ab02990dd8..0e9fc456a97 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1292,6 +1292,27 @@ def __deepcopy__(self, memo): # ------------------------------------------------------------------------------------------------------------------ # Compilation support # ------------------------------------------------------------------------------------------------------------------ + def _is_compilable_param(self, p): + + # User only parameters are not compiled. + if p.read_only and p.getter is not None: + return False + + # Shared and aliased parameters are for user conveniecne and not compiled. + if isinstance(p, (ParameterAlias, SharedParameter)): + return False + + # TODO this should use default value + val = p.get() + + # Strings, builtins, functions, and methods are not compilable + return not isinstance(val, (str, + type(max), + type(np.sum), + type(make_parameter_property), + type(self._get_compilation_params))) + + def _get_compilation_state(self): # FIXME: MAGIC LIST, Use stateful tag for this whitelist = {"previous_time", "previous_value", "previous_v", @@ -1299,23 +1320,27 @@ def _get_compilation_state(self): "input_ports", "output_ports", "adjustment_cost", "intensity_cost", "duration_cost", "intensity"} + # Prune subcomponents (which are enabled by type rather than a list) # that should be omitted blacklist = { "objective_mechanism", "agent_rep", "projections", "shadow_inputs"} - # Only mechanisms use "value" state, can execute 'until finished', - # and need to track executions + # Mechanisms; + # * use "value" state + # * can execute 'until finished' + # * need to track number of executions if hasattr(self, 'ports'): whitelist.update({"value", "num_executions_before_finished", "num_executions", "is_finished_flag"}) - # If both the mechanism and its functoin use random_state it's DDM - # with integrator function. The mechanism's random_state is not used. + # If both the mechanism and its function use random_state. + # it's DDM with integrator function. + # The mechanism's random_state is not used. if hasattr(self.parameters, 'random_state') and hasattr(self.function.parameters, 'random_state'): whitelist.remove('random_state') - # Only mechanisms and compositions need 'num_executions' + # Compositions need to track number of executions if hasattr(self, 'nodes'): whitelist.add("num_executions") @@ -1344,8 +1369,8 @@ def _get_compilation_state(self): def _is_compilation_state(p): # FIXME: This should use defaults instead of 'p.get' return p.name not in blacklist and \ - not isinstance(p, (ParameterAlias, SharedParameter)) and \ - (p.name in whitelist or isinstance(p.get(), Component)) + (p.name in whitelist or isinstance(p.get(), Component)) and \ + self._is_compilable_param(p) return filter(_is_compilation_state, self.parameters) @@ -1466,25 +1491,7 @@ def _get_compilation_params(self): blacklist.add('duration_cost_fct') def _is_compilation_param(p): - def _is_user_only_param(p): - if p.read_only and p.getter is not None: - return True - if isinstance(p, (ParameterAlias, SharedParameter)): - return True - - return False - - - if p.name not in blacklist and not _is_user_only_param(p): - # FIXME: this should use defaults - val = p.get() - # Check if the value type is valid for compilation - return not isinstance(val, (str, - type(max), - type(np.sum), - type(_is_compilation_param), - type(self._get_compilation_params))) - return False + return p.name not in blacklist and self._is_compilable_param(p) return filter(_is_compilation_param, self.parameters) From 116d0612994f6a6f5d6a7cdb1873f88f1d61f863 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 23 Mar 2024 23:35:14 -0400 Subject: [PATCH 045/410] llvm: Move matrices of learnable projections to compiled state The values are modified by learning. Signed-off-by: Jan Vesely --- psyneulink/core/components/component.py | 12 +++++++++++- .../functions/nonstateful/transferfunctions.py | 4 ++-- psyneulink/core/compositions/composition.py | 6 ++---- psyneulink/core/llvm/builder_context.py | 2 ++ 4 files changed, 17 insertions(+), 7 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 0e9fc456a97..56e616a6663 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1366,6 +1366,10 @@ def _get_compilation_state(self): if hasattr(self.parameters, 'duplicate_keys'): blacklist.add("previous_value") + # Matrices of learnable projections are stateful + if getattr(self, 'owner', None) and getattr(self.owner, 'learnable', False): + whitelist.add('matrix') + def _is_compilation_state(p): # FIXME: This should use defaults instead of 'p.get' return p.name not in blacklist and \ @@ -1388,7 +1392,9 @@ def llvm_state_ids(self): def _get_state_initializer(self, context): def _convert(p): x = p.get(context) - if isinstance(x, np.random.RandomState): + if p.name == 'matrix': # Flatten matrix + val = tuple(np.asfarray(x).flatten()) + elif isinstance(x, np.random.RandomState): # Skip first element of random state (id string) val = pnlvm._tupleize((*x.get_state()[1:], x.used_seed[0])) elif isinstance(x, np.random.Generator): @@ -1490,6 +1496,10 @@ def _get_compilation_params(self): if cost_functions.DURATION not in cost_functions: blacklist.add('duration_cost_fct') + # Matrices of learnable projections are stateful + if getattr(self, 'owner', None) and getattr(self.owner, 'learnable', False): + blacklist.add('matrix') + def _is_compilation_param(p): return p.name not in blacklist and self._is_compilable_param(p) diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index 545049d55f9..0c4c81de7ad 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -3886,7 +3886,7 @@ def instantiate_matrix(self, specification, context=None): return np.array(specification) - def _gen_llvm_function_body(self, ctx, builder, params, _, arg_in, arg_out, *, tags:frozenset): + def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset): # Restrict to 1d arrays if self.defaults.variable.ndim != 1: warnings.warn("Shape mismatch: {} (in {}) got 2D input: {}".format( @@ -3899,7 +3899,7 @@ def _gen_llvm_function_body(self, ctx, builder, params, _, arg_in, arg_out, *, t pnlvm.PNLCompilerWarning) arg_out = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(0)]) - matrix = ctx.get_param_or_state_ptr(builder, self, MATRIX, param_struct_ptr=params) + matrix = ctx.get_param_or_state_ptr(builder, self, MATRIX, param_struct_ptr=params, state_struct_ptr=state) normalize = ctx.get_param_or_state_ptr(builder, self, NORMALIZE, param_struct_ptr=params) # Convert array pointer to pointer to the fist element diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 853029544d0..19e4f929353 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -11251,10 +11251,8 @@ def run( self.parameters.results._set(results, context) if self._is_learning(context): - # copies back matrix to pnl from param struct (after learning) - _comp_ex.writeback_params_to_pnl(params=_comp_ex._param_struct, - ids="llvm_param_ids", - condition=lambda p: p.name == "matrix") + # copies back matrix to pnl from state struct after learning + _comp_ex.writeback_params_to_pnl(condition=lambda p: p.name == "matrix") self._propagate_most_recent_context(context) diff --git a/psyneulink/core/llvm/builder_context.py b/psyneulink/core/llvm/builder_context.py index 8ed80d26c09..f5d50f67d0f 100644 --- a/psyneulink/core/llvm/builder_context.py +++ b/psyneulink/core/llvm/builder_context.py @@ -479,6 +479,8 @@ def _state_struct(p): return self.get_state_struct_type(val) if isinstance(val, ContentAddressableList): return ir.LiteralStructType(self.get_state_struct_type(x) for x in val) + if p.name == 'matrix': # Flatten matrix + val = np.asfarray(val).flatten() struct = self.convert_python_struct_to_llvm_ir(val) return ir.ArrayType(struct, p.history_min_length + 1) From f58e2c5f48a56a29c5300b9c139f8f41b9eda0f7 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 24 Mar 2024 17:02:45 -0400 Subject: [PATCH 046/410] tests: Fix AVX512 detection on Numpy>=1.26 (#2932) The output format of show_config() changed, but the library now provides and option to return the values in a dictionary. Signed-off-by: Jan Vesely --- conftest.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/conftest.py b/conftest.py index 93e50e97a46..abeb11da5c9 100644 --- a/conftest.py +++ b/conftest.py @@ -235,11 +235,25 @@ def mech_wrapper(x): @pytest.helpers.register def numpy_uses_avx512(): - out = io.StringIO() - with contextlib.redirect_stdout(out): - np.show_config() - return re.search(' found = .*AVX512.*', out.getvalue()) is not None + try: + # numpy >= 1.26 can return config info in a dictionary + config = np.show_config(mode="dicts") + + except TypeError: + # Numpy >=1.21 < 1.26 doesn't support 'mode' argument and + # prints CPU extensions in one line per category: + # baseline = ... + # found = ... + # not found = ... + out = io.StringIO() + + with contextlib.redirect_stdout(out): + np.show_config() + + return re.search(' found = .*AVX512.*', out.getvalue()) is not None + else: + return any(ext.startswith("AVX512") for ext in config['SIMD Extensions']['found']) @pytest.helpers.register def expand_np_ndarray(arr): From 69a339f4007e803da0abb3d84b090502ce57989e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 24 Mar 2024 17:28:10 -0400 Subject: [PATCH 047/410] llvm/execution: Restrict writeback API to state structures Signed-off-by: Jan Vesely --- conftest.py | 4 ++-- psyneulink/core/compositions/composition.py | 2 +- psyneulink/core/llvm/execution.py | 15 ++++++--------- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/conftest.py b/conftest.py index abeb11da5c9..99736e738e1 100644 --- a/conftest.py +++ b/conftest.py @@ -199,7 +199,7 @@ def get_func_execution(func, func_mode, *, writeback:bool=True): # with numpy instances that share memory with the binary # structure used by the compiled function if writeback: - ex.writeback_params_to_pnl() + ex.writeback_state_to_pnl() return ex.execute @@ -210,7 +210,7 @@ def get_func_execution(func, func_mode, *, writeback:bool=True): # with numpy instances that share memory with the binary # structure used by the compiled function if writeback: - ex.writeback_params_to_pnl() + ex.writeback_state_to_pnl() return ex.cuda_execute diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 19e4f929353..e5d083eacb9 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -11252,7 +11252,7 @@ def run( if self._is_learning(context): # copies back matrix to pnl from state struct after learning - _comp_ex.writeback_params_to_pnl(condition=lambda p: p.name == "matrix") + _comp_ex.writeback_state_to_pnl(condition=lambda p: p.name == "matrix") self._propagate_most_recent_context(context) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index f28e88f5736..55838dd845d 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -114,16 +114,13 @@ def _get_compilation_param(self, name, init_method, arg): return struct - def writeback_params_to_pnl(self, params=None, ids:Optional[str]=None, condition:Callable=lambda p: True): + def writeback_state_to_pnl(self, condition:Callable=lambda p: True): - assert (params is None) == (ids is None), "Either both 'params' and 'ids' have to be set or neither" - - if params is None: - # Default to stateful params - params = self._state_struct - ids = "llvm_state_ids" - - self._copy_params_to_pnl(self._execution_contexts[0], self._obj, params, ids, condition) + self._copy_params_to_pnl(self._execution_contexts[0], + self._obj, + self._state_struct, + "llvm_state_ids", + condition) def _copy_params_to_pnl(self, context, component, params, ids:str, condition:Callable): From d1fb9f5c198f47e18e79c3b394b513358b8ee21a Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 3 Apr 2024 20:25:08 -0400 Subject: [PATCH 048/410] llvm/execution: Skip "ring_memory" parameter. It's custom constructed in compiled mode and needs to be modified to match the PNL parameter. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 55838dd845d..3d2a104b7de 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -158,6 +158,14 @@ def _copy_params_to_pnl(self, context, component, params, ids:str, condition:Cal ids=ids, condition=condition) else: + # TODO: Reconstruct Python RandomState + if attribute == "random_state": + continue + + # TODO: Reconstruct Python memory storage + if attribute == "ring_memory": + continue + # Handle PNL parameters pnl_param = getattr(component.parameters, attribute) pnl_value = pnl_param.get(context=context) @@ -183,10 +191,6 @@ def _copy_params_to_pnl(self, context, component, params, ids:str, condition:Cal # Writeback parameter value if the condition matches elif condition(pnl_param): - # TODO: Reconstruct Python RandomState - if attribute == "random_state": - continue - # Replace empty structures with None if ctypes.sizeof(compiled_attribute_param) == 0: value = None From d09790dbe66e23a4cefe569a81a364d96e29b739 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 3 Apr 2024 20:27:00 -0400 Subject: [PATCH 049/410] tests/MemoryFunctions: Allow writeback on compiled tests. Remove 'writeback' parameter from function execution. Signed-off-by: Jan Vesely --- conftest.py | 8 +++----- tests/functions/test_memory.py | 4 +--- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/conftest.py b/conftest.py index 99736e738e1..7585dfd9d97 100644 --- a/conftest.py +++ b/conftest.py @@ -191,15 +191,14 @@ def cuda_param(val): return pytest.param(val, marks=[pytest.mark.llvm, pytest.mark.cuda]) @pytest.helpers.register -def get_func_execution(func, func_mode, *, writeback:bool=True): +def get_func_execution(func, func_mode): if func_mode == 'LLVM': ex = pnlvm.execution.FuncExecution(func) # Calling writeback here will replace parameter values # with numpy instances that share memory with the binary # structure used by the compiled function - if writeback: - ex.writeback_state_to_pnl() + ex.writeback_state_to_pnl() return ex.execute @@ -209,8 +208,7 @@ def get_func_execution(func, func_mode, *, writeback:bool=True): # Calling writeback here will replace parameter values # with numpy instances that share memory with the binary # structure used by the compiled function - if writeback: - ex.writeback_state_to_pnl() + ex.writeback_state_to_pnl() return ex.cuda_execute diff --git a/tests/functions/test_memory.py b/tests/functions/test_memory.py index b1b7bf64f13..7c1dbbbc19c 100644 --- a/tests/functions/test_memory.py +++ b/tests/functions/test_memory.py @@ -144,9 +144,7 @@ def test_basic(func, variable, params, expected, benchmark, func_mode): if variable is philox_var: f.parameters.random_state.set(_SeededPhilox([module_seed])) - # Do not allow writeback. "ring_memory" used by DictionaryMemory is a - # custom structure, not a PNL parameter - EX = pytest.helpers.get_func_execution(f, func_mode, writeback=False) + EX = pytest.helpers.get_func_execution(f, func_mode) EX(variable) From 0b82f41a7576ddf14d54eca7bc589a8d716d6243 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 4 Apr 2024 01:36:01 -0400 Subject: [PATCH 050/410] llvm/execution: Skip writeback of special RTM params RecurrentTransferMechanism needs special parameters to match Python semantics. These don't have equivalents in PNL. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 3d2a104b7de..1fcb8ee1c25 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -166,6 +166,13 @@ def _copy_params_to_pnl(self, context, component, params, ids:str, condition:Cal if attribute == "ring_memory": continue + # "old_val" is a helper storage in compiled RecurrentTransferMechanism + # to workaround the fact that compiled projections do no pull values + # from their source output ports + # recurrent projection of RTM is not a PNL parameter. + if attribute in {"old_val", "recurrent_projection"}: + continue + # Handle PNL parameters pnl_param = getattr(component.parameters, attribute) pnl_value = pnl_param.get(context=context) From a63de1d7b9d77ceefb7fd27200a612f94b058b5a Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 4 Apr 2024 01:37:46 -0400 Subject: [PATCH 051/410] llvm, tests: Enable writeback of state for all compiled mechanism tests Signed-off-by: Jan Vesely --- conftest.py | 20 ++++++++++++++++++-- psyneulink/core/llvm/execution.py | 2 +- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/conftest.py b/conftest.py index 7585dfd9d97..f6251fceba5 100644 --- a/conftest.py +++ b/conftest.py @@ -220,9 +220,25 @@ def get_func_execution(func, func_mode): @pytest.helpers.register def get_mech_execution(mech, mech_mode): if mech_mode == 'LLVM': - return pnlvm.execution.MechExecution(mech).execute + ex = pnlvm.execution.MechExecution(mech) + + # Calling writeback here will replace parameter values + # with numpy instances that share memory with the binary + # structure used by the compiled function + ex.writeback_state_to_pnl() + + return ex.execute + elif mech_mode == 'PTX': - return pnlvm.execution.MechExecution(mech).cuda_execute + ex = pnlvm.execution.MechExecution(mech) + + # Calling writeback here will replace parameter values + # with numpy instances that share memory with the binary + # structure used by the compiled function + ex.writeback_state_to_pnl() + + return ex.cuda_execute + elif mech_mode == 'Python': def mech_wrapper(x): mech.execute(x) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 1fcb8ee1c25..9af8f8e86d5 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -213,7 +213,7 @@ def _copy_params_to_pnl(self, context, component, params, ids:str, condition:Cal if hasattr(old_value, 'shape'): value = value.reshape(old_value.shape) - pnl_param.set(value, context=context) + pnl_param.set(value, context=context, override=True) class CUDAExecution(Execution): From 267fb8aefad3136e42b844a96a12410c9314a7c0 Mon Sep 17 00:00:00 2001 From: kmantel <1592123+kmantel@users.noreply.github.com> Date: Tue, 9 Apr 2024 19:17:11 -0400 Subject: [PATCH 052/410] RecurrentTransferMechanism: make recurrent_projection a Parameter (#2940) --- psyneulink/core/compositions/composition.py | 2 +- .../transfer/recurrenttransfermechanism.py | 17 +++++------------ 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index e5d083eacb9..e935f112458 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -12212,7 +12212,7 @@ def execute( if clamp_input: if node in hard_clamp_inputs: # clamp = HARD_CLAMP --> "turn off" recurrent projection - if hasattr(node, "recurrent_projection"): + if node.recurrent_projection is not None: node.recurrent_projection.sender.parameters.value._set([0.0], context) elif node in no_clamp_inputs: for input_port in node.input_ports: diff --git a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py index 4a3313c413d..50be6e75abc 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py @@ -181,7 +181,6 @@ """ import copy -import itertools import numbers import types import warnings @@ -275,7 +274,7 @@ def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, co # the existing behavior. Unsure if this is actually correct though # KDM 8/7/18: removing the below because it has bad side effects for _instantiate_from_context, and it's not clear # that it's the correct behavior. Similar reason for removing/not implementing auto/hetero setters - # if hasattr(owning_component, "recurrent_projection"): + # if owning_component.recurrent_projection is not None: # owning_component.recurrent_projection.parameter_ports["matrix"].function.parameters.previous_value._set(value, base_execution_id) try: @@ -636,6 +635,7 @@ class Parameters(TransferMechanism.Parameters): read_only=True, structural=True, ) + recurrent_projection = Parameter(None, stateful=False, loggable=False, structural=True) standard_output_ports = TransferMechanism.standard_output_ports.copy() standard_output_ports.extend([{NAME:ENERGY_OUTPUT_PORT_NAME}, {NAME:ENTROPY_OUTPUT_PORT_NAME}]) @@ -1014,7 +1014,7 @@ def matrix(self, val): # simplified version of standard setter (in Component.py) # KDM 10/12/18: removing below because it doesn't seem to be correct, and also causes # unexpected values to be set to previous_value # KDM 7/1/19: reinstating below - if hasattr(self, "recurrent_projection"): + if self.recurrent_projection is not None: self.recurrent_projection.parameter_ports["matrix"].function.previous_value = val self.recurrent_projection.parameter_ports["matrix"].function.reset = val @@ -1028,7 +1028,7 @@ def auto(self): def auto(self, val): self.parameters.auto._set(val, self.most_recent_context) - if hasattr(self, "recurrent_projection") and 'hetero' in self._parameter_ports: + if self.recurrent_projection is not None and 'hetero' in self._parameter_ports: self.recurrent_projection.parameter_ports["matrix"].function.previous_value = self.matrix @property @@ -1039,7 +1039,7 @@ def hetero(self): def hetero(self, val): self.parameters.hetero._set(val, self.most_recent_context) - if hasattr(self, "recurrent_projection") and 'auto' in self._parameter_ports: + if self.recurrent_projection is not None and 'auto' in self._parameter_ports: self.recurrent_projection.parameter_ports["matrix"].function.previous_value = self.matrix_param @property @@ -1356,10 +1356,3 @@ def _gen_llvm_output_ports(self, ctx, builder, value, prev_val_ptr = ctx.get_param_or_state_ptr(builder, self, "old_val", state_struct_ptr=mech_state) builder.store(builder.load(mech_out), prev_val_ptr) return ret - - @property - def _dependent_components(self): - return list(itertools.chain( - super()._dependent_components, - [self.recurrent_projection], - )) From 87a5f76862cb18776e056ead86c12aedcf6d73a6 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 9 Apr 2024 22:12:26 -0400 Subject: [PATCH 053/410] llvm, mechanism: Refactor generation of param/state structs Pure refactoring, no function change. Signed-off-by: Jan Vesely --- .../core/components/mechanisms/mechanism.py | 49 ++++++++++--------- 1 file changed, 25 insertions(+), 24 deletions(-) diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index 3a4bafca984..dc2d0e98a05 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -2836,34 +2836,50 @@ def move_item_specific_params_to_specific_sub_dict(outer_dict, def _get_param_ids(self): if len(self._parameter_ports) == 0: return super()._get_param_ids() + # FIXME: parameter ports should be part of generated params return ["_parameter_ports"] + super()._get_param_ids() def _get_param_struct_type(self, ctx): - ports_params = (ctx.get_param_struct_type(s) for s in self._parameter_ports) - ports_param_struct = pnlvm.ir.LiteralStructType(ports_params) mech_param_struct = ctx.get_param_struct_type(super()) if len(self._parameter_ports) == 0: return mech_param_struct - return pnlvm.ir.LiteralStructType((ports_param_struct, - *mech_param_struct)) + ports_params = (ctx.get_param_struct_type(s) for s in self._parameter_ports) + ports_param_struct = pnlvm.ir.LiteralStructType(ports_params) + return pnlvm.ir.LiteralStructType((ports_param_struct, *mech_param_struct)) + + def _get_param_initializer(self, context): + mech_param_init = super()._get_param_initializer(context) + if len(self._parameter_ports) == 0: + return mech_param_init + + port_param_init = tuple(s._get_param_initializer(context) for s in self._parameter_ports) + return (port_param_init, *mech_param_init) def _get_state_ids(self): if len(self._parameter_ports) == 0: return super()._get_state_ids() + # FIXME: parameter ports should be part of generated state return ["_parameter_ports"] + super()._get_state_ids() def _get_state_struct_type(self, ctx): - ports_state = (ctx.get_state_struct_type(s) for s in self._parameter_ports) - ports_state_struct = pnlvm.ir.LiteralStructType(ports_state) mech_state_struct = ctx.get_state_struct_type(super()) if len(self._parameter_ports) == 0: return mech_state_struct - return pnlvm.ir.LiteralStructType((ports_state_struct, - *mech_state_struct)) + ports_state = (ctx.get_state_struct_type(s) for s in self._parameter_ports) + ports_state_struct = pnlvm.ir.LiteralStructType(ports_state) + return pnlvm.ir.LiteralStructType((ports_state_struct, *mech_state_struct)) + + def _get_state_initializer(self, context): + mech_state_init = super()._get_state_initializer(context) + if len(self._parameter_ports) == 0: + return mech_state_init + + port_state_init = tuple(s._get_state_initializer(context) for s in self._parameter_ports) + return (port_state_init, *mech_state_init) def _get_output_struct_type(self, ctx): output_type_list = (ctx.get_output_struct_type(port) for port in self.output_ports) @@ -2889,22 +2905,6 @@ def _get_data_part_of_input_struct(p): return pnlvm.ir.LiteralStructType(input_type_list) - def _get_param_initializer(self, context): - port_param_init = tuple(s._get_param_initializer(context) for s in self._parameter_ports) - mech_param_init = super()._get_param_initializer(context) - if len(self._parameter_ports) == 0: - return mech_param_init - - return (port_param_init, *mech_param_init) - - def _get_state_initializer(self, context): - port_state_init = tuple(s._get_state_initializer(context) for s in self._parameter_ports) - mech_state_init = super()._get_state_initializer(context) - if len(self._parameter_ports) == 0: - return mech_state_init - - return (port_state_init, *mech_state_init) - def _gen_llvm_ports(self, ctx, builder, ports, group, get_output_ptr, get_input_data_ptr, mech_params, mech_state, mech_input): @@ -2931,6 +2931,7 @@ def _gen_llvm_ports(self, ctx, builder, ports, group, array_1d = pnlvm.ir.ArrayType(p_input_data.type.pointee, 1) assert array_1d == p_function.args[2].type.pointee, \ "{} vs. {}".format(p_function.args[2].type.pointee, p_input_data.type.pointee) + # restrict shape matching to casting 1d values to 2d arrays # for Control/Gating signals assert len(p_function.args[2].type.pointee) == 1 From 8da5d714eb7979158e2fb4d63664d73e76c5a40e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 9 Apr 2024 20:05:39 -0400 Subject: [PATCH 054/410] llvm, RecurrentTransferMechanism: Remove explicit listing of recurrent_projection in compiled structures It's now listed in parameters. Allow writeback of the projection parameters. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 3 +-- .../transfer/recurrenttransfermechanism.py | 21 +++---------------- 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 9af8f8e86d5..b3f65c8f4a5 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -169,8 +169,7 @@ def _copy_params_to_pnl(self, context, component, params, ids:str, condition:Cal # "old_val" is a helper storage in compiled RecurrentTransferMechanism # to workaround the fact that compiled projections do no pull values # from their source output ports - # recurrent projection of RTM is not a PNL parameter. - if attribute in {"old_val", "recurrent_projection"}: + if attribute == "old_val": continue # Handle PNL parameters diff --git a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py index 50be6e75abc..54aa1167a32 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py @@ -1243,36 +1243,21 @@ def _learning_signal_source(self): """ return self.output_port - def _get_param_ids(self): - return super()._get_param_ids() + ["recurrent_projection"] - - def _get_param_struct_type(self, ctx): - transfer_t = ctx.get_param_struct_type(super()) - projection_t = ctx.get_param_struct_type(self.recurrent_projection) - return pnlvm.ir.LiteralStructType([*transfer_t.elements, projection_t]) - def _get_state_ids(self): - return super()._get_state_ids() + ["old_val", "recurrent_projection"] + return super()._get_state_ids() + ["old_val"] def _get_state_struct_type(self, ctx): transfer_t = ctx.get_state_struct_type(super()) - projection_t = ctx.get_state_struct_type(self.recurrent_projection) return_t = ctx.get_output_struct_type(self) - return pnlvm.ir.LiteralStructType([*transfer_t.elements, return_t, projection_t]) - - def _get_param_initializer(self, context): - transfer_params = super()._get_param_initializer(context) - projection_params = self.recurrent_projection._get_param_initializer(context) - return (*transfer_params, projection_params) + return pnlvm.ir.LiteralStructType([*transfer_t.elements, return_t]) def _get_state_initializer(self, context): transfer_init = super()._get_state_initializer(context) - projection_init = self.recurrent_projection._get_state_initializer(context) # Initialize to OutputPort defaults. # That is what the recurrent projection finds. retval_init = (tuple(op.parameters.value.get(context)) if not np.isscalar(op.parameters.value.get(context)) else op.parameters.value.get(context) for op in self.output_ports) - return (*transfer_init, tuple(retval_init), projection_init) + return (*transfer_init, tuple(retval_init)) def _gen_llvm_function_reset(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset): assert "reset" in tags From ea896ae73fd030bf9709e4454a3c6151814a5d94 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 10 Apr 2024 11:00:49 -0400 Subject: [PATCH 055/410] llvm, Composition: Do not include AutoAssociative Projections in composition projection list The are part of their respective mechanism's structures. This saves ~8B per projection in compiled composition param structure, and 8-72B+ per projection in compiled composition state structure. Signed-off-by: Jan Vesely --- psyneulink/core/compositions/composition.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index e935f112458..f32125b6f9a 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -13052,12 +13052,21 @@ def _clean_up_as_agent_rep(self, context, alt_controller=None): @property def _inner_projections(self): - # PNL considers afferent projections to input_CIM to be part - # of the nested composition. Filter them out. + # Filter out projections not used in compiled variant of this composition: + # * afferent projections to input_CIM and parameter_CIM. + # These are included in node wrapper of the nested composition node, + # and included in outer composition + # * efferent projections from output_CIM. + # Same as above, they are considered part of the outer composition, + # and are executed in node wrappers of the receiving nodes + # * Autoassociative projections (RTM, LCA) + # These are executed as part of their respective mechanism and are + # included in the compiled structures of their respective mechanisms. return (p for p in self.projections - if p.receiver.owner is not self.input_CIM and - p.receiver.owner is not self.parameter_CIM and - p.sender.owner is not self.output_CIM) + if p.receiver.owner is not self.input_CIM and + p.receiver.owner is not self.parameter_CIM and + p.sender.owner is not self.output_CIM and + p.sender.owner is not p.receiver.owner) def _get_param_ids(self): return ["nodes", "projections"] + super()._get_param_ids() From dbafa35a8715684853c22423e22c1aaa3e193ab7 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 12 Apr 2024 14:27:57 -0400 Subject: [PATCH 056/410] Force skip of test_lvoc_features_function when pytorch not available. For some reason the pytest.mark.pytorch is not working for this test and its not being skipped. Lets try to force things. --- tests/composition/test_control.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 2183e800bd2..4d3ffb9516d 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -1842,6 +1842,13 @@ def test_lvoc_both_predictors_specs(self): @pytest.mark.pytorch def test_lvoc_features_function(self): + + # Skip if pytorch is not available. For some reason the pytest.mark.pytorch is not working as expected for this + # test, so we are checking for the availability of torch here and skipping the test if it is not available. + from psyneulink import torch_available + if not torch_available: + pytest.skip("Pytorch is not installed") + m1 = pnl.TransferMechanism(input_ports=["InputPort A", "InputPort B"]) m2 = pnl.TransferMechanism() c = pnl.Composition() From 013dcc5a9fffe2e8bb99d9f7406883cec2788ba3 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 12 Apr 2024 14:37:09 -0400 Subject: [PATCH 057/410] Cleanup checking for torch tensors. As per Jan's suggestion, using torch.is_tensor for checking for torch tensores rather than duck typing and catching exceptions. Makes the code cleaner and probably faster. --- psyneulink/core/globals/utilities.py | 40 ++++++++-------------------- 1 file changed, 11 insertions(+), 29 deletions(-) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index 8e0298d770a..cd70a7b1760 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -1022,7 +1022,10 @@ def safe_create_np_array(value): # See https://numpy.org/neps/nep-0034-infer-dtype-is-object.html try: try: - return np.asarray(value) + if torch and torch.is_tensor(value): + return value + else: + return np.asarray(value) except np.VisibleDeprecationWarning: return np.asarray(value, dtype=object) except ValueError as e: @@ -1046,29 +1049,14 @@ def safe_create_np_array(value): else: raise - except RuntimeError as e: - - # If we get a RuntimeError, it is probably because we are trying to convert a torch tensor. - # We can't convert to a numpy array without breaking pytorch autograd, so we need to return the - # original value - if "call numpy() on Tensor" in str(e) and torch: - return value - else: - raise e - value = safe_create_np_array(value) if dimension == 1: - try: + if torch and torch.is_tensor(value): + value = torch.atleast_1d(value) + else: value = np.atleast_1d(value) - except RuntimeError as e: - # If we get a RuntimeError, this is probably a BatchedTensorImpl from torch\vmap - # We can't convert to a numpy array and use np.atleast_2d, so we need to use - # torch's atleast_2d function instead - if torch: - value = torch.atleast_1d(value) - else: - raise e + elif dimension == 2: # Array is made up of non-uniform elements, so treat as 2d array and pass if ( @@ -1078,16 +1066,10 @@ def safe_create_np_array(value): ): pass else: - try: + if torch and torch.is_tensor(value): + value = torch.atleast_2d(value) + else: value = np.atleast_2d(value) - except RuntimeError as e: - # If we get a RuntimeError, this is probably a BatchedTensorImpl from torch\vmap - # We can't convert to a numpy array and use np.atleast_2d, so we need to use - # torch's atleast_2d function instead - if torch: - value = torch.atleast_2d(value) - else: - raise e elif dimension is not None: raise UtilitiesError("dimension param ({0}) must be None, 1, or 2".format(dimension)) From 8172b5a1e2627c6d3249b977632e44ef55f7b36c Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 12 Apr 2024 18:38:49 -0400 Subject: [PATCH 058/410] Fix issue with older pytorch. GradientOptimization doesn't work with PyTorch less than 2.0. Don't think we need to add lower pin since, raise a ValueError instead with a message. --- .../nonstateful/optimizationfunctions.py | 12 ++++++---- tests/composition/test_control.py | 24 ++++++++++++++----- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index 0a2447723ad..ec4ddd9342d 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -32,12 +32,16 @@ import numpy as np -# Conditionally import torch try: import torch - from torch.func import grad except ImportError: torch = None + +# Conditionally import torch, this is a bit more strict than the usual conditional import of pytorch. This is because +# GradientOptimization needs torch.func.grad, which is not available in pytorch versions less than 2.0.0. +try: + from torch.func import grad +except ImportError: grad = None from beartype import beartype @@ -1265,8 +1269,8 @@ def reset(self, default_variable=None, objective_function=None, context=None, ** "specifying a gradient_function.") if grad is None: - raise ValueError("PyTorch version is too old. Please upgrade PyTorch to use GradientOptimization without " - "specifying a gradient_function.") + raise ValueError("PyTorch version is too old. Please upgrade PyTorch to >= 2.0 to use " + "GradientOptimization without specifying a gradient_function.") try: # Need to wrap objective_function in a lambda to pass to grad because it needs to return a torch tensor diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 4d3ffb9516d..7fcc62d5b44 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -1843,11 +1843,8 @@ def test_lvoc_both_predictors_specs(self): @pytest.mark.pytorch def test_lvoc_features_function(self): - # Skip if pytorch is not available. For some reason the pytest.mark.pytorch is not working as expected for this - # test, so we are checking for the availability of torch here and skipping the test if it is not available. - from psyneulink import torch_available - if not torch_available: - pytest.skip("Pytorch is not installed") + + m1 = pnl.TransferMechanism(input_ports=["InputPort A", "InputPort B"]) m2 = pnl.TransferMechanism() @@ -1855,13 +1852,28 @@ def test_lvoc_features_function(self): c.add_node(m1, required_roles=pnl.NodeRole.INPUT) c.add_node(m2, required_roles=pnl.NodeRole.INPUT) c._analyze_graph() - lvoc = pnl.OptimizationControlMechanism(agent_rep=pnl.RegressionCFA, + + + ocm_kwargs = dict(agent_rep=pnl.RegressionCFA, state_features=[m1.input_ports[0], m1.input_ports[1], m2.input_port, m2], state_feature_function=pnl.LinearCombination(offset=10.0), objective_mechanism=pnl.ObjectiveMechanism( monitor=[m1, m2]), function=pnl.GradientOptimization(max_iterations=1), control_signals=[(pnl.SLOPE, m1), (pnl.SLOPE, m2)]) + + try: + from torch.func import grad + lvoc = pnl.OptimizationControlMechanism(**ocm_kwargs) + + # If pytorch is too old (< 2.0), GradientOptimization will raise an error to upgrade pytorch to have support for + # torch.func.grad. + except ImportError: + with pytest.raises(ValueError): + lvoc = pnl.OptimizationControlMechanism(**ocm_kwargs) + + return + c.add_node(lvoc) input_dict = {m1: [[1], [1]], m2: [1]} From 0b121fceab357eb85479a26383e8719f2980a449 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 12 Apr 2024 18:41:53 -0400 Subject: [PATCH 059/410] Formatting --- tests/composition/test_control.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 7fcc62d5b44..3aafb52090f 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -1853,14 +1853,13 @@ def test_lvoc_features_function(self): c.add_node(m2, required_roles=pnl.NodeRole.INPUT) c._analyze_graph() - ocm_kwargs = dict(agent_rep=pnl.RegressionCFA, - state_features=[m1.input_ports[0], m1.input_ports[1], m2.input_port, m2], - state_feature_function=pnl.LinearCombination(offset=10.0), - objective_mechanism=pnl.ObjectiveMechanism( - monitor=[m1, m2]), - function=pnl.GradientOptimization(max_iterations=1), - control_signals=[(pnl.SLOPE, m1), (pnl.SLOPE, m2)]) + state_features=[m1.input_ports[0], m1.input_ports[1], m2.input_port, m2], + state_feature_function=pnl.LinearCombination(offset=10.0), + objective_mechanism=pnl.ObjectiveMechanism( + monitor=[m1, m2]), + function=pnl.GradientOptimization(max_iterations=1), + control_signals=[(pnl.SLOPE, m1), (pnl.SLOPE, m2)]) try: from torch.func import grad From d21cc0650d60757abf591ad6c50dca7c285902b1 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 9 Feb 2024 03:45:43 +0000 Subject: [PATCH 060/410] Composition: inherit Parameters from parent class --- psyneulink/core/compositions/composition.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index f32125b6f9a..0b5b721f47a 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -3918,8 +3918,7 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): _model_spec_generic_type_name = 'graph' - - class Parameters(ParametersBase): + class Parameters(Composition_Base.Parameters): """ Attributes ---------- From e66d62430245d69e2efad6141ce0ea77a7ed3284 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 25 Jul 2023 19:54:13 +0000 Subject: [PATCH 061/410] Parameter: fix arg comment --- psyneulink/core/globals/parameters.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 49d8fd9c11a..df82d726493 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -978,7 +978,7 @@ def __init__( reference=False, dependencies=None, initializer=None, - port=None, + port=None, # if modulated, set to the ParameterPort mdf_name=None, specify_none=False, _owner=None, @@ -988,7 +988,6 @@ def __init__( # attributes will be taken from _inherited_source=None, _user_specified=False, - # if modulated, set to the ParameterPort **kwargs ): if isinstance(aliases, str): From 593400a50a703daa97098eab75945de9697f340f Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 7 Jun 2023 03:26:45 +0000 Subject: [PATCH 062/410] Parameters: clean up getter dev docs --- psyneulink/core/globals/parameters.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index df82d726493..9c9eec43c0f 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -260,12 +260,13 @@ def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, co +------------------+---------------+--------------------------------------------+-----------------------------------------+ | getter | None |hook that allows overriding the retrieval of|kwargs self, owning_component, and | | | |values based on a supplied method |context will be passed in if your | -| | |(e.g. _output_port_variable_getter) |method uses them. self - the Parameter | -| | | |calling the setter; owning_component - | -| | | |the Component to which the Parameter | -| | | |belongs; context - the context | -| | | |the setter is called with; should return | -| | | |the value | +| | |(e.g. _output_port_variable_getter) |method uses them. | +| | | |self: the Parameter calling the setter | +| | | |owning_component: the Component to which | +| | | | the Parameter belongs | +| | | |context: the context the setter is called| +| | | | with | +| | | |Getters must return the resulting value | +------------------+---------------+--------------------------------------------+-----------------------------------------+ | setter | None |hook that allows overriding the setting of |should take a positional argument; kwargs| | | |values based on a supplied method (e.g. |self, owning_component, and context | From 52827ff1b9569b6d9de47940839dcfd151fc5ead Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 3 Aug 2023 02:55:44 +0000 Subject: [PATCH 063/410] Parameter: _set_default_value: add 'directly' to bypass parse/validate --- psyneulink/core/globals/parameters.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 9c9eec43c0f..2c62a4fae2a 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -1726,9 +1726,18 @@ def _initialize_from_context(self, context=None, base_context=Context(execution_ # KDM 7/30/18: the below is weird like this in order to use this like a property, but also include it # in the interface for user simplicity: that is, inheritable (by this Parameter's children or from its parent), # visible in a Parameter's repr, and easily settable by the user - def _set_default_value(self, value): - value = self._parse(value) - self._validate(value) + def _set_default_value(self, value, directly=False): + """ + Set default_value + + Args: + value: new default_value + directly (bool, optional): if False, passes **value** + through parse and validation steps. Defaults to False. + """ + if not directly: + value = self._parse(value) + self._validate(value) super().__setattr__('default_value', value) From 65911368d95cc304cb93a04beaf3317517cdaeaf Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 25 Jul 2023 22:29:37 +0000 Subject: [PATCH 064/410] Parameter: bypass any setters when making uninherited --- psyneulink/core/globals/parameters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 2c62a4fae2a..459089e7c2d 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -1246,7 +1246,7 @@ def _restore_inherited_attrs(self, exclusions=None): attr not in exclusions and getattr(self, attr) is getattr(self._parent, attr) ): - setattr(self, attr, self._inherited_attrs_cache[attr]) + super().__setattr__(attr, self._inherited_attrs_cache[attr]) @property def _parent(self): From 5706c83b908df2fce6a5696f39531ab5826606fa Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 27 Jul 2023 05:11:00 +0000 Subject: [PATCH 065/410] Parameter: __deepcopy__: always deepcopy non-inherited default_value avoid some unintential data-sharing among Parameters --- psyneulink/core/globals/parameters.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 459089e7c2d..5ae99c0a18a 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -1092,6 +1092,17 @@ def __deepcopy__(self, memo): _inherited=self._inherited, _user_specified=self._user_specified, ) + # TODO: this is a quick fix to make sure default values are + # always copied. should be integrated with future changes to + # deepcopy + # None indicates was not already deepcopied above + if shared_types is None and not self._inherited: + # use of memo here relies on the fact that + # copy_parameter_value does not currently add + # self.default_value. Otherwise it would reuse the shared + # value from above + result._set_default_value(copy.deepcopy(self.default_value, memo), directly=True) + memo[id(self)] = result return result From ca7d859215a75bfb1738cf0598733e03d9f288dd Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 27 Jul 2023 05:08:25 +0000 Subject: [PATCH 066/410] Component: only explicitly set new parameter defaults --- psyneulink/core/components/component.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 56e616a6663..848e6183663 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -2282,15 +2282,15 @@ def _initialize_parameters(self, context=None, **param_defaults): if value is not None or parameter_obj.specify_none: defaults[name] = value - for k in defaults: - if defaults[k] is None: - continue - defaults[k] = copy_parameter_value( - defaults[k], - shared_types=shared_types - ) - - self.defaults = Defaults(owner=self, **defaults) + self.defaults = Defaults(owner=self) + for k in sorted(defaults, key=self.parameters._dependency_order_key(names=True)): + if defaults[k] is not None: + defaults[k] = copy_parameter_value( + defaults[k], + shared_types=shared_types + ) + parameter_obj = getattr(self.parameters, k) + parameter_obj._set_default_value(defaults[k]) for p in filter(lambda x: not isinstance(x, (ParameterAlias, SharedParameter)), self.parameters._in_dependency_order): # copy spec so it is not overwritten later From e238ef56d8eeb7988368b9d5ee54f92ad8d99a4b Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 8 Feb 2024 22:45:41 +0000 Subject: [PATCH 067/410] ParametersTemplate: replace _children with WeakSet --- psyneulink/core/globals/parameters.py | 38 ++++++--------------------- 1 file changed, 8 insertions(+), 30 deletions(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 5ae99c0a18a..3f810b3aa76 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -498,8 +498,7 @@ def __init__(self, owner, parent=None): self._owner = owner self._parent = parent if isinstance(self._parent, ParametersTemplate): - # using weakref to allow garbage collection of unused children - self._parent._children.add(weakref.ref(self)) + self._parent._children.add(self) # create list of params currently existing self._params = set() @@ -512,7 +511,7 @@ def __init__(self, owner, parent=None): if self._is_parameter(k): self._params.add(k) - self._children = set() + self._children = weakref.WeakSet() def __repr__(self): return '{0} :\n{1}'.format(super().__repr__(), str(self)) @@ -531,12 +530,6 @@ def __deepcopy__(self, memo): memo[id(self)] = newone return newone - def __del__(self): - try: - self._parent._children.remove(weakref.ref(self)) - except (AttributeError, KeyError): - pass - def __contains__(self, item): return item in itertools.chain.from_iterable(self.values(show_all=True).items()) @@ -554,16 +547,9 @@ def _is_parameter(self, param_name): def _register_parameter(self, param_name): self._params.add(param_name) - to_remove = set() for child in self._children: - if child() is None: - to_remove.add(child) - else: - child()._register_parameter(param_name) - - for rem in to_remove: - self._children.remove(rem) + child._register_parameter(param_name) def values(self, show_all=False): """ @@ -1213,22 +1199,14 @@ def _inherited(self, value): else: # This is a rare operation, so we can just immediately # trickle down sources without performance issues. - # Children are stored as weakref.ref, so call to deref children = [*self._owner._children] while len(children) > 0: - next_child_ref = children.pop() - next_child = next_child_ref() - - if next_child is None: - # child must have been garbage collected, remove - # here optionally - pass - else: - next_child = getattr(next_child, self.name) + next_child = children.pop() + next_child = getattr(next_child, self.name) - if next_child._inherited: - next_child._inherit_from(self) - children.extend(next_child._owner._children) + if next_child._inherited: + next_child._inherit_from(self) + children.extend(next_child._owner._children) self._restore_inherited_attrs() From cc7505e08b4dd1f66277d7b44ca9d6c1a7be4fae Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 8 Feb 2024 23:28:04 +0000 Subject: [PATCH 068/410] ParametersBase: cache names of attrs that do not exist reduces wasted time repeating searches --- psyneulink/core/globals/parameters.py | 44 +++++++++++++++++++++------ tests/misc/test_parameters.py | 6 ++-- 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 3f810b3aa76..85ca1b8f5bf 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -547,10 +547,17 @@ def _is_parameter(self, param_name): def _register_parameter(self, param_name): self._params.add(param_name) + self._nonexistent_attr_cache.discard(param_name) for child in self._children: child._register_parameter(param_name) + def _invalidate_nonexistent_attr_cache(self, attr): + self._nonexistent_attr_cache.discard(attr) + + for child in self._children: + child._invalidate_nonexistent_attr_cache(attr) + def values(self, show_all=False): """ Arguments @@ -2087,6 +2094,7 @@ class ParametersBase(ParametersTemplate): def __init__(self, owner, parent=None): self._initializing = True + self._nonexistent_attr_cache = set() super().__init__(owner=owner, parent=parent) @@ -2154,16 +2162,22 @@ def throw_error(): f"No attribute '{attr}' exists in the parameter hierarchy{owner_string}." ) from None - # underscored attributes don't need special handling because - # they're not Parameter objects. This includes parsing and - # validation methods - if attr[0] == '_': + if ( + attr in self._nonexistent_attr_cache + # attr can't be in __dict__ or __getattr__ would not be called + or ( + self._parent is not None + and attr in self._parent._nonexistent_attr_cache + ) + ): + self._nonexistent_attr_cache.add(attr) + throw_error() + + try: + return getattr(self._parent, attr) + except AttributeError: + self._nonexistent_attr_cache.add(attr) throw_error() - else: - try: - return getattr(self._parent, attr) - except AttributeError: - throw_error() def __setattr__(self, attr, value): # handles parsing: Parameter or ParameterAlias housekeeping if assigned, or creation of a Parameter @@ -2260,6 +2274,18 @@ def __setattr__(self, attr, value): self._validate(attr, getattr(self, attr).default_value) self._register_parameter(attr) + if ( + ( + attr[0] != '_' + or attr.startswith(self._parsing_method_prefix) + or attr.startswith(self._validation_method_prefix) + ) + and not self._initializing + ): + # below does happen during deepcopy, but that should only + # happen on instances, which won't have _children + self._invalidate_nonexistent_attr_cache(attr) + def _reconcile_value_with_init_default(self, attr, value): constructor_default = get_init_signature_default_value(self._owner, attr) if constructor_default is not None and constructor_default is not inspect._empty: diff --git a/tests/misc/test_parameters.py b/tests/misc/test_parameters.py index 55b7b9f0ff3..cd77e5068b3 100644 --- a/tests/misc/test_parameters.py +++ b/tests/misc/test_parameters.py @@ -433,8 +433,8 @@ def test_conflict_no_warning( def test_conflict_no_warning_parser(self): # replace with different class/parameter if _parse_noise ever implemented - assert not hasattr(pnl.AdaptiveIntegrator.Parameters, '_parse_noise') - pnl.AdaptiveIntegrator.Parameters._parse_noise = lambda self, noise: 2 * noise + assert not hasattr(pnl.AdaptiveIntegrator.parameters, '_parse_noise') + pnl.AdaptiveIntegrator.parameters._parse_noise = lambda noise: 2 * noise # pytest doesn't support inverse warning assertion for specific # warning only @@ -449,7 +449,7 @@ def test_conflict_no_warning_parser(self): if re.match(shared_parameter_warning_regex('noise'), str(w)): raise - delattr(pnl.AdaptiveIntegrator.Parameters, '_parse_noise') + delattr(pnl.AdaptiveIntegrator.parameters, '_parse_noise') class TestSpecificationType: From 24efb9277ee8e9d4382f664c055c1811c2fc46e8 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 13 Feb 2024 05:24:27 +0000 Subject: [PATCH 069/410] ParametersBase: break throw_attr into separate method --- psyneulink/core/globals/parameters.py | 38 +++++++++++++-------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 85ca1b8f5bf..54e98ee2906 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -2142,26 +2142,26 @@ def __init__(self, owner, parent=None): self._initializing = False - def __getattr__(self, attr): - def throw_error(): - try: - param_owner = self._owner - if isinstance(param_owner, type): - owner_string = f' of {param_owner}' - else: - owner_string = f' of {param_owner.name}' + def _throw_attr_error(self, attr): + try: + param_owner = self._owner + if isinstance(param_owner, type): + owner_string = f' of {param_owner}' + else: + owner_string = f' of {param_owner.name}' - if hasattr(param_owner, 'owner') and param_owner.owner: - owner_string += f' for {param_owner.owner.name}' - if hasattr(param_owner.owner, 'owner') and param_owner.owner.owner: - owner_string += f' of {param_owner.owner.owner.name}' - except AttributeError: - owner_string = '' + if hasattr(param_owner, 'owner') and param_owner.owner: + owner_string += f' for {param_owner.owner.name}' + if hasattr(param_owner.owner, 'owner') and param_owner.owner.owner: + owner_string += f' of {param_owner.owner.owner.name}' + except AttributeError: + owner_string = '' - raise AttributeError( - f"No attribute '{attr}' exists in the parameter hierarchy{owner_string}." - ) from None + raise AttributeError( + f"No attribute '{attr}' exists in the parameter hierarchy{owner_string}." + ) from None + def __getattr__(self, attr): if ( attr in self._nonexistent_attr_cache # attr can't be in __dict__ or __getattr__ would not be called @@ -2171,13 +2171,13 @@ def throw_error(): ) ): self._nonexistent_attr_cache.add(attr) - throw_error() + self._throw_attr_error(attr) try: return getattr(self._parent, attr) except AttributeError: self._nonexistent_attr_cache.add(attr) - throw_error() + self._throw_attr_error(attr) def __setattr__(self, attr, value): # handles parsing: Parameter or ParameterAlias housekeeping if assigned, or creation of a Parameter From 30ab80c767915b29d04eba857b744e0792d5447a Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 9 Feb 2024 01:12:19 +0000 Subject: [PATCH 070/410] Parameter: store source and final_source as self faster universal check for both shared and unshared --- psyneulink/core/globals/parameters.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 54e98ee2906..894ebf87857 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -1765,6 +1765,14 @@ def _set_spec(self, value): value = self._parse(value) super().__setattr__('spec', value) + @property + def source(self): + return self + + @property + def final_source(self): + return self + class _ParameterAliasMeta(type): # these will not be taken from the source @@ -2007,7 +2015,7 @@ def source(self): @property def final_source(self): base_param = self - while hasattr(base_param, 'source'): + while isinstance(base_param, SharedParameter): base_param = base_param.source return base_param From 8a03408bbd1a25cdb53bbc0a93fc117d3ddd5a3a Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 9 Feb 2024 01:46:28 +0000 Subject: [PATCH 071/410] Parameter: faster __contains__ --- psyneulink/core/globals/parameters.py | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 894ebf87857..35a16f29e40 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -531,7 +531,19 @@ def __deepcopy__(self, memo): return newone def __contains__(self, item): - return item in itertools.chain.from_iterable(self.values(show_all=True).items()) + if item in self._params: + return True + + for p in self._params: + try: + p_attr = getattr(self, p) + except AttributeError: + return False + else: + if item is p_attr: + return True + + return False def __iter__(self): return iter([getattr(self, k) for k in self.values(show_all=True).keys()]) From 5f5fed3ea991000ccd15a496eb261f2db9d738c7 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 9 Feb 2024 01:53:08 +0000 Subject: [PATCH 072/410] Component: check for existing parameter by name before getattr --- psyneulink/core/components/component.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 848e6183663..186e76aa0d3 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -2250,9 +2250,9 @@ def _initialize_parameters(self, context=None, **param_defaults): if name in alias_names: continue - try: + if name in self.parameters._params: parameter_obj = getattr(self.parameters, name) - except AttributeError: + else: # name in param_defaults does not correspond to a Parameter continue From 08738c1b52c8a967bc37c6ccc178bc4bb8aa20e7 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 9 Feb 2024 02:43:49 +0000 Subject: [PATCH 073/410] ParametersTemplate: add _params check in _is_parameter --- psyneulink/core/globals/parameters.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 35a16f29e40..87f2e19074d 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -551,6 +551,8 @@ def __iter__(self): def _is_parameter(self, param_name): if param_name[0] == '_': return False + elif param_name in self._params: + return True else: try: return not isinstance(getattr(self, param_name), (types.MethodType, types.BuiltinMethodType)) From ac7b14605f7287c536a6808b3157f9aeb9e0123e Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 9 Feb 2024 03:40:02 +0000 Subject: [PATCH 074/410] Parameters: check for alias using name --- psyneulink/core/globals/parameters.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 87f2e19074d..ef9b0ad6352 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -2229,11 +2229,9 @@ def __setattr__(self, attr, value): for alias in value.aliases: # there is a conflict if a non-ParameterAlias exists # with the same name as the planned alias - try: + if alias in self: if not isinstance(getattr(self, alias), ParameterAlias): conflicts.append(alias) - except AttributeError: - pass super().__setattr__(alias, ParameterAlias(source=getattr(self, attr), name=alias)) self._register_parameter(alias) From 7a978d503fd9cdfbcb29f894ee221152334df2b8 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 15 Feb 2024 00:48:38 +0000 Subject: [PATCH 075/410] ParameterPortList: reduce excess slow str formats UtilitiesError being thrown by underlying ContentAddressableList caused many Parameter.__str__ calls, which can be expensive when containing large numpy arrays --- .../core/components/ports/parameterport.py | 42 +++++++++---------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/psyneulink/core/components/ports/parameterport.py b/psyneulink/core/components/ports/parameterport.py index 3b7d0010cc9..243b18b873a 100644 --- a/psyneulink/core/components/ports/parameterport.py +++ b/psyneulink/core/components/ports/parameterport.py @@ -384,12 +384,12 @@ CONTEXT, CONTROL_PROJECTION, CONTROL_SIGNAL, CONTROL_SIGNALS, FUNCTION, FUNCTION_PARAMS, \ LEARNING_SIGNAL, LEARNING_SIGNALS, MECHANISM, NAME, PARAMETER_PORT, PARAMETER_PORT_PARAMS, PATHWAY_PROJECTION, \ PROJECTION, PROJECTIONS, PROJECTION_TYPE, REFERENCE_VALUE, SENDER, VALUE -from psyneulink.core.globals.parameters import ParameterBase, ParameterAlias, SharedParameter, check_user_specified +from psyneulink.core.globals.parameters import ParameterAlias, SharedParameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities \ import ContentAddressableList, ReadOnlyOrderedDict, is_iterable, is_numeric, is_value_spec, iscompatible, \ - is_instance_or_subclass, UtilitiesError, gen_friendly_comma_str + is_instance_or_subclass, gen_friendly_comma_str __all__ = [ 'ParameterPort', 'ParameterPortError', 'port_type_keywords', @@ -431,6 +431,23 @@ def __getitem__(self, key): except KeyError: pass + # is a Parameter + try: + final_source = key.final_source + except AttributeError: + pass + else: + try: + return self.parameter_mapping[final_source] + except KeyError as e: + try: + raise ParameterPortError( + f'No ParameterPort corresponds to {key._owner._owner}' + f'.parameters.{key.name}' + ) from None + except AttributeError: + raise e from None + try: return super().__getitem__(key) except TypeError as e: @@ -452,27 +469,6 @@ def __getitem__(self, key): f'Multiple ParameterPorts for {key} exist. Did you want' f' {gen_friendly_comma_str(sorted([p.name for p in possible_ports]))}?' ) from None - except UtilitiesError as e: - # ContentAddressableList throws UtilitiesError if key is not an int - # or string. handle only Parameter key here - if not isinstance(key, ParameterBase): - raise e from None - - try: - final_source = key.final_source - except AttributeError: - final_source = key - - try: - res = self.parameter_mapping[final_source] - except KeyError: - try: - raise ParameterPortError( - f'No ParameterPort corresponds to {key._owner._owner}' - f'.parameters.{key.name}' - ) from None - except AttributeError: - raise e from None if res is not None: self.parameter_mapping[key] = res From c9a1a136e878841675ca5c6ce5a4f3925df4d069 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 15 Feb 2024 03:23:49 +0000 Subject: [PATCH 076/410] Parameter:__getattr__: call _parent fewer times --- psyneulink/core/globals/parameters.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index ef9b0ad6352..b77005ba1ec 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -1129,13 +1129,10 @@ def __getattr__(self, attr): inherited_source = None if ( - self._parent is not None - and ( - inherited_source is None - # this condition indicates the cache was invalidated - # since it was set - or inherited_source._is_invalid_source - ) + inherited_source is None + # this condition indicates the cache was invalidated + # since it was set + or inherited_source._is_invalid_source ): next_parent = self._parent while next_parent is not None: @@ -1145,10 +1142,11 @@ def __getattr__(self, attr): break next_parent = next_parent._parent - try: - return getattr(inherited_source, attr) - except AttributeError: - raise AttributeError("Parameter '%s' has no attribute '%s'" % (self.name, attr)) from None + if inherited_source is None: + # will fail, use default behavior + return self.__getattribute__(attr) + else: + return inherited_source.__getattribute__(attr) def __setattr__(self, attr, value): if attr in self._additional_param_attr_properties: From 37b55f1a0dcfaf453b4d3b131ab1d0f0b0395176 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 22 Feb 2024 03:36:37 +0000 Subject: [PATCH 077/410] ParametersBase: do not getattr on parent for missing parameter attrs If a ParametersBase object does not have a parameter attribute, it returned the corresponding parameter attribute on its parent, if present. This behavior was designed to allow fallback use of parse or validate methods, but can be confusing for Parameters. Aliases are not be present on the class before __init__ --- psyneulink/core/globals/parameters.py | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index b77005ba1ec..c17809da650 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -2118,8 +2118,13 @@ def __init__(self, owner, parent=None): super().__init__(owner=owner, parent=parent) - aliases_to_create = set() - for param_name, param_value in self.values(show_all=True).items(): + aliases_to_create = {} + for param_name in copy.copy(self._params): + try: + param_value = getattr(self, param_name) + except AttributeError: + param_value = NotImplemented + constructor_default = get_init_signature_default_value(self._owner, param_name) if ( @@ -2139,7 +2144,7 @@ def __init__(self, owner, parent=None): # the param that the alias is going to refer to may not have been created yet # (the alias then may refer to the parent Parameter instead of the Parameter associated with this # Parameters class) - aliases_to_create.add(param_name) + aliases_to_create[param_name] = parent_param.source.name else: new_param = copy.deepcopy(parent_param) new_param._owner = self @@ -2147,8 +2152,10 @@ def __init__(self, owner, parent=None): setattr(self, param_name, new_param) - for alias_name in aliases_to_create: - setattr(self, alias_name, ParameterAlias(name=alias_name, source=getattr(self, alias_name).source)) + for alias_name, source_name in aliases_to_create.items(): + # getattr here and not above, because the alias may be + # iterated over before the source + setattr(self, alias_name, ParameterAlias(name=alias_name, source=getattr(self, source_name))) values = self.values(show_all=True) for param, value in values.items(): @@ -2183,7 +2190,8 @@ def _throw_attr_error(self, attr): def __getattr__(self, attr): if ( - attr in self._nonexistent_attr_cache + attr in self._params + or attr in self._nonexistent_attr_cache # attr can't be in __dict__ or __getattr__ would not be called or ( self._parent is not None @@ -2228,7 +2236,12 @@ def __setattr__(self, attr, value): # there is a conflict if a non-ParameterAlias exists # with the same name as the planned alias if alias in self: - if not isinstance(getattr(self, alias), ParameterAlias): + try: + alias_param = getattr(self, alias) + except AttributeError: + continue + + if not isinstance(alias_param, ParameterAlias): conflicts.append(alias) super().__setattr__(alias, ParameterAlias(source=getattr(self, attr), name=alias)) From 3785f78382efede6144430984b3172da4ddda5bf Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 6 Mar 2024 01:57:22 +0000 Subject: [PATCH 078/410] Parameter: replace equivalent code with copy_parameter_value --- psyneulink/core/globals/parameters.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index c17809da650..428b463bd13 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -1694,8 +1694,6 @@ def clear_history( pass def _initialize_from_context(self, context=None, base_context=Context(execution_id=None), override=True): - from psyneulink.core.components.component import Component, ComponentsMeta - try: try: cur_val = self.values[context.execution_id] @@ -1713,13 +1711,7 @@ def _initialize_from_context(self, context=None, base_context=Context(execution_ except KeyError: new_history = NotImplemented - shared_types = (Component, ComponentsMeta, types.MethodType, types.ModuleType) - - if isinstance(new_val, (dict, list)): - new_val = copy_iterable_with_shared(new_val, shared_types) - elif not isinstance(new_val, shared_types): - new_val = copy.deepcopy(new_val) - + new_val = copy_parameter_value(new_val) self.values[context.execution_id] = new_val if new_history is None: From 612e3d0fdc1bbc7a9f37ecf1aaefcc2377b2290f Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 2 Apr 2024 01:17:46 +0000 Subject: [PATCH 079/410] Parameter: _set, _set_value: reformat arg list --- psyneulink/core/globals/parameters.py | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 428b463bd13..ec2abf5bd0e 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -1483,7 +1483,14 @@ def set(self, value, context=None, override=False, skip_history=False, skip_log= return value - def _set(self, value, context, skip_history=False, skip_log=False, **kwargs): + def _set( + self, + value, + context, + skip_history=False, + skip_log=False, + **kwargs, + ): if not self.stateful: execution_id = None else: @@ -1503,10 +1510,24 @@ def _set(self, value, context, skip_history=False, skip_log=False, **kwargs): } value = call_with_pruned_args(self.setter, value, context=context, **kwargs) - self._set_value(value, execution_id=execution_id, context=context, skip_history=skip_history, skip_log=skip_log) + self._set_value( + value, + execution_id=execution_id, + context=context, + skip_history=skip_history, + skip_log=skip_log, + ) return value - def _set_value(self, value, execution_id=None, context=None, skip_history=False, skip_log=False, skip_delivery=False): + def _set_value( + self, + value, + execution_id=None, + context=None, + skip_history=False, + skip_log=False, + skip_delivery=False, + ): # store history if not skip_history: if execution_id in self.values: From a5e0315e0acdd5d0c98e0e819d526e5f074b7976 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 2 Apr 2024 01:17:46 +0000 Subject: [PATCH 080/410] Parameter: _set: make skip_delivery kwarg explicit expected by _set_value and does not need to be passed to setters --- psyneulink/core/globals/parameters.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index ec2abf5bd0e..ca74b7e1071 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -1489,6 +1489,7 @@ def _set( context, skip_history=False, skip_log=False, + skip_delivery=False, **kwargs, ): if not self.stateful: @@ -1516,6 +1517,7 @@ def _set( context=context, skip_history=skip_history, skip_log=skip_log, + skip_delivery=skip_delivery, ) return value From e2f96f00426011c3699e9f2cf330545cad1156fd Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 16 Apr 2024 12:59:04 -0400 Subject: [PATCH 081/410] dependabot: Allow up to 25 open pull requests (#2944) Signed-off-by: Jan Vesely --- .github/dependabot.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4d702e5d68a..90ce1596fb5 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -27,5 +27,5 @@ updates: prefix: "requirements" labels: - "deps" - open-pull-requests-limit: 15 + open-pull-requests-limit: 25 rebase-strategy: "disabled" From 8a4b7dd0eeeb1fe7354b97843eb6d16432bf6395 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 16 Apr 2024 13:56:36 -0400 Subject: [PATCH 082/410] Get rid of unused local --- tests/composition/test_control.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 3aafb52090f..e89d1a4e39d 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -1869,7 +1869,7 @@ def test_lvoc_features_function(self): # torch.func.grad. except ImportError: with pytest.raises(ValueError): - lvoc = pnl.OptimizationControlMechanism(**ocm_kwargs) + pnl.OptimizationControlMechanism(**ocm_kwargs) return From 16153ae10f54f921ae83675dc4e5b7f54c8518a6 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 16 Apr 2024 14:05:00 -0400 Subject: [PATCH 083/410] Check for grad in torch.func with dir() --- .../nonstateful/optimizationfunctions.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index ec4ddd9342d..8302d141679 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -32,18 +32,12 @@ import numpy as np +# Conditionally import torch try: import torch except ImportError: torch = None -# Conditionally import torch, this is a bit more strict than the usual conditional import of pytorch. This is because -# GradientOptimization needs torch.func.grad, which is not available in pytorch versions less than 2.0.0. -try: - from torch.func import grad -except ImportError: - grad = None - from beartype import beartype from psyneulink._typing import Optional, Union, Callable, Literal @@ -1261,16 +1255,17 @@ def reset(self, default_variable=None, objective_function=None, context=None, ** **kwargs ) - # Differentiate objective_function using autograd.grad() + # Differentiate objective_function using torch.func.grad() if objective_function is not None and not self.gradient_function: if torch is None: raise ValueError("PyTorch is not installed. Please install PyTorch to use GradientOptimization without " "specifying a gradient_function.") - if grad is None: - raise ValueError("PyTorch version is too old. Please upgrade PyTorch to >= 2.0 to use " - "GradientOptimization without specifying a gradient_function.") + if 'grad' not in dir(torch.func): + raise ValueError("torch.func.grad not found. PyTorch version is probably too old. Please upgrade " + "PyTorch to >= 2.0 to use GradientOptimization without specifying a " + "gradient_function.") try: # Need to wrap objective_function in a lambda to pass to grad because it needs to return a torch tensor From b3d6a5518631449e89fd9129d40a9bb25f766de1 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 16 Apr 2024 14:07:32 -0400 Subject: [PATCH 084/410] Move torch tensor check out of try block --- psyneulink/core/globals/utilities.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index cd70a7b1760..eeb9b69e5dc 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -1017,15 +1017,17 @@ def convert_to_np_array(value, dimension=None): """ def safe_create_np_array(value): with warnings.catch_warnings(): + + # If we have a torch tensor, allow it to pass through unchanged + if torch and torch.is_tensor(value): + return value + warnings.filterwarnings('error', category=np.VisibleDeprecationWarning) # NOTE: this will raise a ValueError in the future. # See https://numpy.org/neps/nep-0034-infer-dtype-is-object.html try: try: - if torch and torch.is_tensor(value): - return value - else: - return np.asarray(value) + return np.asarray(value) except np.VisibleDeprecationWarning: return np.asarray(value, dtype=object) except ValueError as e: From b29785e8b80854fafe7a8ee52355dfe3a0d79273 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 16 Apr 2024 14:12:00 -0400 Subject: [PATCH 085/410] Change test to check for grad with dir --- tests/composition/test_control.py | 29 +++++++++++------------------ 1 file changed, 11 insertions(+), 18 deletions(-) diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index e89d1a4e39d..035badc5776 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -1842,10 +1842,6 @@ def test_lvoc_both_predictors_specs(self): @pytest.mark.pytorch def test_lvoc_features_function(self): - - - - m1 = pnl.TransferMechanism(input_ports=["InputPort A", "InputPort B"]) m2 = pnl.TransferMechanism() c = pnl.Composition() @@ -1861,27 +1857,24 @@ def test_lvoc_features_function(self): function=pnl.GradientOptimization(max_iterations=1), control_signals=[(pnl.SLOPE, m1), (pnl.SLOPE, m2)]) - try: - from torch.func import grad + import torch + if 'grad' in dir(torch.func): lvoc = pnl.OptimizationControlMechanism(**ocm_kwargs) - # If pytorch is too old (< 2.0), GradientOptimization will raise an error to upgrade pytorch to have support for - # torch.func.grad. - except ImportError: - with pytest.raises(ValueError): - pnl.OptimizationControlMechanism(**ocm_kwargs) + c.add_node(lvoc) + input_dict = {m1: [[1], [1]], m2: [1]} - return + c.run(inputs=input_dict) - c.add_node(lvoc) - input_dict = {m1: [[1], [1]], m2: [1]} + assert len(lvoc.input_ports) == 5 - c.run(inputs=input_dict) + for i in range(1, 5): + assert lvoc.input_ports[i].function.offset == 10.0 - assert len(lvoc.input_ports) == 5 + else: + with pytest.raises(ValueError): + pnl.OptimizationControlMechanism(**ocm_kwargs) - for i in range(1,5): - assert lvoc.input_ports[i].function.offset == 10.0 @pytest.mark.control @pytest.mark.composition From 30275a682890afb0c18f267e88f74270ba60be9f Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 16 Apr 2024 15:12:35 -0400 Subject: [PATCH 086/410] Add likelihood_include_mask to PEC. Add argument to PEC that allows users to mask off trials for inclusion in the likelihood calculations. --- .../parameterestimationcomposition.py | 19 ++++++++++++++++++- .../test_parameterestimationcomposition.py | 10 +++++++++- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index 0d7dc7590eb..27f824f5d89 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -504,6 +504,7 @@ def __init__( ], model: Optional[Composition] = None, data: Optional[pd.DataFrame] = None, + likelihood_include_mask: Optional[np.ndarray] = None, data_categorical_dims=None, objective_function: Optional[Callable] = None, num_estimates: int = 1, @@ -616,6 +617,22 @@ def __init__( if self.data is not None: self._validate_data() + if likelihood_include_mask is not None: + + # Make sure the length is correct + if len(likelihood_include_mask) != len(self.data): + raise ValueError( + "Likelihood include mask must be the same length as the number of rows in the data!") + + # If the include mask is 2D, make it 1D + if likelihood_include_mask.ndim == 2: + likelihood_include_mask = likelihood_include_mask.flatten() + + self.likelihood_include_mask = likelihood_include_mask + + else: + self.likelihood_include_mask = np.ones(len(self.data), dtype=bool) + # Store the parameters specified for fitting self.fit_parameters = parameters @@ -792,7 +809,7 @@ def f(sim_data): categorical_dims=self.data_categorical_dims, ) - return np.sum(np.log(like)) + return np.sum(np.log(like[self.likelihood_include_mask])) objective_function = f diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index 9bc73f00a31..8d22729fc08 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -215,8 +215,12 @@ def reward_rate(sim_data): np.testing.assert_allclose(pec.optimized_parameter_values, result) +@pytest.mark.parametrize('likelihood_include_mask', [ + pytest.param('include', id='likelihood_include_mask'), + pytest.param(None, id='no_likelihood_include_mask'),] +) # func_mode is a hacky wa to get properly marked; Python, LLVM, and CUDA -def test_parameter_estimation_ddm_mle(func_mode): +def test_parameter_estimation_ddm_mle(func_mode, likelihood_include_mask): """Test parameter estimation of a DDM in integrator mode with MLE.""" if func_mode == "Python": @@ -279,6 +283,9 @@ def test_parameter_estimation_ddm_mle(func_mode): ) data_to_fit["decision"] = data_to_fit["decision"].astype("category") + if likelihood_include_mask == 'include': + likelihood_include_mask = np.ones((len(data_to_fit),), dtype=bool) + # Create a parameter estimation composition to fit the data we just generated and hopefully recover the # parameters of the DDM. @@ -297,6 +304,7 @@ def test_parameter_estimation_ddm_mle(func_mode): decision.output_ports[pnl.RESPONSE_TIME], ], data=data_to_fit, + likelihood_include_mask=likelihood_include_mask, optimization_function=PECOptimizationFunction( method="differential_evolution", max_iterations=1 ), From c72725073c7ba0575ca6429ec306bb8ae9e58106 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 16 Apr 2024 15:16:35 -0400 Subject: [PATCH 087/410] Fix check for grad in torch. Needs to be a check for torch functional API instead. --- .../components/functions/nonstateful/optimizationfunctions.py | 2 +- tests/composition/test_control.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index 8302d141679..df9cc84cc1b 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -1262,7 +1262,7 @@ def reset(self, default_variable=None, objective_function=None, context=None, ** raise ValueError("PyTorch is not installed. Please install PyTorch to use GradientOptimization without " "specifying a gradient_function.") - if 'grad' not in dir(torch.func): + if 'func' not in dir(torch): raise ValueError("torch.func.grad not found. PyTorch version is probably too old. Please upgrade " "PyTorch to >= 2.0 to use GradientOptimization without specifying a " "gradient_function.") diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 035badc5776..0e9701d9e5b 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -1858,7 +1858,7 @@ def test_lvoc_features_function(self): control_signals=[(pnl.SLOPE, m1), (pnl.SLOPE, m2)]) import torch - if 'grad' in dir(torch.func): + if 'func' in dir(torch): lvoc = pnl.OptimizationControlMechanism(**ocm_kwargs) c.add_node(lvoc) From 731197bd5aee02b621734943daa595714eb1d299 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 23 Apr 2024 09:56:31 -0400 Subject: [PATCH 088/410] WIP test for conditional pec --- .../test_parameterestimationcomposition.py | 99 +++++++++++++------ 1 file changed, 69 insertions(+), 30 deletions(-) diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index 437108a1418..889016221ea 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -9,6 +9,44 @@ PECOptimizationFunction, ) +def _run_ddm_with_params( + starting_value, + rate, + noise, + threshold, + non_decision_time, + time_step_size, + trial_inputs, +): + """Create a composition with DDM and run it with the given parameters.""" + + # Create a simple one mechanism composition containing a DDM in integrator mode. + decision = pnl.DDM( + function=pnl.DriftDiffusionIntegrator( + starting_value=starting_value, + rate=rate, + noise=noise, + threshold=threshold, + non_decision_time=non_decision_time, + time_step_size=time_step_size, + ), + output_ports=[pnl.DECISION_OUTCOME, pnl.RESPONSE_TIME], + name="DDM", + ) + + comp = pnl.Composition(pathways=decision) + + # Run the composition to generate some data to fit + comp.run(inputs={decision: trial_inputs}) + results = comp.results + + data_to_fit = pd.DataFrame( + np.squeeze(np.array(results)), columns=["decision", "response_time"] + ) + data_to_fit["decision"] = data_to_fit["decision"].astype("category") + + return comp, data_to_fit + # input_node_1 = pnl.ProcessingMechanism(size=1) # input_node_2 = pnl.ProcessingMechanism(size=2) @@ -249,6 +287,30 @@ def reward_rate(sim_data): np.testing.assert_allclose(pec.optimized_parameter_values, result) +def test_parameter_estimation_ddm_cond(func_mode): + if func_mode == "Python": + pytest.skip( + "Test not yet implemented for Python. Parameter estimate is too slow." + ) + + # High-level parameters the impact performance of the test + num_trials = 50 + time_step_size = 0.01 + num_estimates = 1000 + + ddm_params = dict( + starting_value=0.0, + rate=0.3, + noise=1.0, + threshold=0.6, + non_decision_time=0.15, + time_step_size=time_step_size, + ) + + # We will generate a dataset that is comprised of two different conditions. Each condition will have a different + # drift rate and non_decision_time. + + @pytest.mark.parametrize('likelihood_include_mask', [ pytest.param('include', id='likelihood_include_mask'), pytest.param(None, id='no_likelihood_include_mask'),] @@ -276,15 +338,6 @@ def test_parameter_estimation_ddm_mle(func_mode, likelihood_include_mask): time_step_size=time_step_size, ) - # Create a simple one mechanism composition containing a DDM in integrator mode. - decision = pnl.DDM( - function=pnl.DriftDiffusionIntegrator(**ddm_params), - output_ports=[pnl.DECISION_OUTCOME, pnl.RESPONSE_TIME], - name="DDM", - ) - - comp = pnl.Composition(pathways=decision) - # Let's generate an "experimental" dataset to fit. This is a parameter recovery test # Lets make 10% of the trials have a positive stimulus drift rate, and the other 90% # have a negative stimulus drift rate. @@ -300,22 +353,8 @@ def test_parameter_estimation_ddm_mle(func_mode, likelihood_include_mask): trial_inputs[0] = np.abs(trial_inputs[0]) trial_inputs[-1] = np.abs(trial_inputs[-1]) - inputs_dict = {decision: trial_inputs} - - # Store the results of this "experiment" as a numpy array. This should be a - # 2D array of shape (len(input), 2). The first column being a discrete variable - # specifying whether the upper or lower decision boundary is reached and the second column is the - # reaction time. We will put the data into a pandas DataFrame, this makes it - # easier to specify which columns in the data are categorical or not. - - # Run the composition to generate some data to fit - comp.run(inputs=inputs_dict) - results = comp.results - - data_to_fit = pd.DataFrame( - np.squeeze(np.array(results)), columns=["decision", "response_time"] - ) - data_to_fit["decision"] = data_to_fit["decision"].astype("category") + # Creat and run the composition to generate some data to fit + comp, data_to_fit = _run_ddm_with_params(**ddm_params, trial_inputs=trial_inputs) if likelihood_include_mask == 'include': likelihood_include_mask = np.ones((len(data_to_fit),), dtype=bool) @@ -324,9 +363,9 @@ def test_parameter_estimation_ddm_mle(func_mode, likelihood_include_mask): # parameters of the DDM. fit_parameters = { - ("rate", decision): np.linspace(-0.5, 0.5, 1000), - ("threshold", decision): np.linspace(0.5, 1.0, 1000), - ("non_decision_time", decision): np.linspace(0.0, 1.0, 1000), + ("rate", comp.nodes['DDM']): np.linspace(-0.5, 0.5, 1000), + ("threshold", comp.nodes['DDM']): np.linspace(0.5, 1.0, 1000), + ("non_decision_time", comp.nodes['DDM']): np.linspace(0.0, 1.0, 1000), } pec = pnl.ParameterEstimationComposition( @@ -334,8 +373,8 @@ def test_parameter_estimation_ddm_mle(func_mode, likelihood_include_mask): nodes=[comp], parameters=fit_parameters, outcome_variables=[ - decision.output_ports[pnl.DECISION_OUTCOME], - decision.output_ports[pnl.RESPONSE_TIME], + comp.nodes['DDM'].output_ports[pnl.DECISION_OUTCOME], + comp.nodes['DDM'].output_ports[pnl.RESPONSE_TIME], ], data=data_to_fit, likelihood_include_mask=likelihood_include_mask, From 427808eac69e8e029799c8096ab52c7d3ed8e57d Mon Sep 17 00:00:00 2001 From: kmantel <1592123+kmantel@users.noreply.github.com> Date: Tue, 23 Apr 2024 23:31:18 -0400 Subject: [PATCH 089/410] ci: split windows x86 job (#2943) Windows x86 ci runs can fail because the container runs out of memory during testing. This is caused by memory leaks in psyneulink and limited available memory on the containers. As a workaround, running the full set of tests over multiple jobs reduces peak memory usage. Three jobs replace the original Windows x86 build, running pytest with: 1. -m llvm 2. -m "not llvm and composition" 3. -m "not llvm and not composition" Jobs with the same OS, python version, and architecture overwrite uploaded dist packages. --- .github/workflows/pnl-ci.yml | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index 83834f73be4..f7ca26e93b7 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -68,9 +68,21 @@ jobs: extra-args: '--forked -m "not llvm"' # add 32-bit build on windows + # split by marks to reduce peak memory - python-version: '3.8' python-architecture: 'x86' os: windows + extra-args: '-m llvm' + + - python-version: '3.8' + python-architecture: 'x86' + os: windows + extra-args: '-m "not llvm and composition"' + + - python-version: '3.8' + python-architecture: 'x86' + os: windows + extra-args: '-m "not llvm and not composition"' # fp32 run on linux python 3.10 - python-version: '3.10' @@ -181,10 +193,16 @@ jobs: timeout-minutes: 180 run: pytest --junit-xml=tests_out.xml --verbosity=0 -n logical ${{ matrix.extra-args }} + # double quotes are disallowed in artifact names + - name: Get valid filename string from extra-args + id: extra_args_fname + run: echo extra_args="$(echo ${{ matrix.extra-args }} | tr -d '\"')" >> $GITHUB_OUTPUT + shell: bash + - name: Upload test results uses: actions/upload-artifact@v4 with: - name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }}-${{ matrix.version-restrict }} + name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }}-${{ matrix.version-restrict }}-${{ steps.extra_args_fname.outputs.extra_args }} path: tests_out.xml retention-days: 5 if: (success() || failure()) && ! contains(matrix.extra-args, 'forked') @@ -214,3 +232,4 @@ jobs: name: dist-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} path: dist/ retention-days: 2 + overwrite: true From 04a4ce036545eb9a57b760b3e1f054074ce680c8 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 24 Apr 2024 11:22:16 -0400 Subject: [PATCH 090/410] tests/DDM: Remove result shape workaround Format expected results in the expected shape instead. The numeric results are not rounded, use assert_array_equal. Signed-off-by: Jan Vesely --- tests/mechanisms/test_ddm_mechanism.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index c82c870d05b..b390e2072d0 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -710,10 +710,10 @@ def test_DDM_threshold_modulation_integrator(comp_mode): @pytest.mark.composition @pytest.mark.parametrize(["noise", "threshold", "expected_results"],[ - (1.0, 0.0, (0.0, 1.0)), - (1.5, 2, (-2.0, 1.0)), - (10.0, 10.0, (10.0, 29.0)), - (100.0, 100.0, (100.0, 76.0)), + (1.0, 0.0, [[0.0], [1.0]]), + (1.5, 2, [[-2.0], [1.0]]), + (10.0, 10.0, [[10.0], [29.0]]), + (100.0, 100.0, [[100.0], [76.0]]), ]) def test_ddm_is_finished(comp_mode, noise, threshold, expected_results): @@ -732,8 +732,7 @@ def test_ddm_is_finished(comp_mode, noise, threshold, expected_results): results = comp.run([0], execution_mode=comp_mode) - results = [x for x in np.array(results).flatten()] #HACK: The result is an object dtype in Python comp_mode for some reason? - np.testing.assert_allclose(results, np.array(expected_results).flatten()) + np.testing.assert_array_equal(results, expected_results) def test_sequence_of_DDM_mechs_in_Composition_Pathway(): From 8589001a0a6a7252d1250520e43929849bd574de Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 24 Apr 2024 13:22:47 -0400 Subject: [PATCH 091/410] tests/DDM: Add test of dependency after DDM finished Test both execute_until_finished=True, and False. Signed-off-by: Jan Vesely --- tests/mechanisms/test_ddm_mechanism.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index b390e2072d0..86ec9a0388c 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -734,6 +734,31 @@ def test_ddm_is_finished(comp_mode, noise, threshold, expected_results): np.testing.assert_array_equal(results, expected_results) +@pytest.mark.parametrize("until_finished", ["until_finished", "not_until_finished"]) +def test_ddm_is_finished_with_dependency(comp_mode, until_finished): + + # 3/5/2021 - DDM' default behaviour now requires resetting stateful + # functions after each trial. This is not supported in LLVM execution mode. + # See: https://github.com/PrincetonUniversity/PsyNeuLink/issues/1935 + # Moreover, evaluating scheduler conditions in Python is not supported + # for compiled execution + if comp_mode == pnl.ExecutionMode.LLVM: + pytest.xfail(reason="DDM' default behaviour now requires resetting stateful functions after each trial. " + "This is not supported in LLVM execution mode. " + "See: https://github.com/PrincetonUniversity/PsyNeuLink/issues/1935") + + comp = Composition() + ddm = DDM(function=DriftDiffusionIntegrator(), + # Use only the decision variable in this test + output_ports=[pnl.DECISION_VARIABLE], + execute_until_finished=until_finished == "until_finished") + dep = pnl.ProcessingMechanism() + comp.add_linear_processing_pathway([ddm, dep]) + comp.scheduler.add_condition(dep, pnl.WhenFinished(ddm)) + + results = comp.run([4], execution_mode=comp_mode) + + np.testing.assert_array_equal(results, [[100]]) def test_sequence_of_DDM_mechs_in_Composition_Pathway(): myMechanism = DDM( From efc07fc76e14fc62d0687a34c59c6691dc8f3b69 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 24 Apr 2024 13:39:45 -0400 Subject: [PATCH 092/410] llvm, node_wrapper: Allow modulatory projections for "is_finished" wrapper Otherwise the evaluation of "is_finished" is different in mechanism execution, and evaluation of scheduling conditions. Closes: https://github.com/PrincetonUniversity/PsyNeuLink/issues/2931 Signed-off-by: Jan Vesely --- psyneulink/core/llvm/codegen.py | 10 ++++------ tests/mechanisms/test_ddm_mechanism.py | 18 +++++++++++++++--- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index dd5a384d481..0d7d2207d50 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -633,12 +633,10 @@ def gen_node_wrapper(ctx, composition, node, *, tags:frozenset): # and the entire call should be optimized out. node_in = builder.alloca(node_function.args[2].type.pointee, name="mechanism_node_input") - incoming_projections = node.mod_afferents if "reset" in tags else node.afferents - - # Checking if node is finished doesn't need projections - # FIXME: Can the values used in the check be modulated? - if "is_finished" in tags: - incoming_projections = [] + if {"reset", "is_finished"}.intersection(tags): + incoming_projections = node.mod_afferents + else: + incoming_projections = node.afferents if "reset" in tags: proj_func_tags = func_tags.difference({"reset"}).union({"passthrough"}) diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index 86ec9a0388c..5cf1a04a66a 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -735,7 +735,8 @@ def test_ddm_is_finished(comp_mode, noise, threshold, expected_results): np.testing.assert_array_equal(results, expected_results) @pytest.mark.parametrize("until_finished", ["until_finished", "not_until_finished"]) -def test_ddm_is_finished_with_dependency(comp_mode, until_finished): +@pytest.mark.parametrize("threshold_mod", ["threshold_modulated", "threshold_not_modulated"]) +def test_ddm_is_finished_with_dependency(comp_mode, until_finished, threshold_mod): # 3/5/2021 - DDM' default behaviour now requires resetting stateful # functions after each trial. This is not supported in LLVM execution mode. @@ -756,9 +757,20 @@ def test_ddm_is_finished_with_dependency(comp_mode, until_finished): comp.add_linear_processing_pathway([ddm, dep]) comp.scheduler.add_condition(dep, pnl.WhenFinished(ddm)) - results = comp.run([4], execution_mode=comp_mode) + inputs = {ddm: [4]} + expected_results = [[100]] - np.testing.assert_array_equal(results, [[100]]) + if threshold_mod == "threshold_modulated": + control = pnl.ControlMechanism(control_signals=[(pnl.THRESHOLD, ddm)]) + comp.add_node(control) + + # reduce the threshold by half + inputs[control] = 0.5 + expected_results = [[50]] + + results = comp.run(inputs, execution_mode=comp_mode) + + np.testing.assert_array_equal(results, expected_results) def test_sequence_of_DDM_mechs_in_Composition_Pathway(): myMechanism = DDM( From afffa74d7a307271f1b7aa3dbc5280912d8b6f81 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 Apr 2024 22:47:21 +0000 Subject: [PATCH 093/410] requirements: update pytest-cov requirement from <4.1.1 to <5.0.1 (#2934) --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index 875b65192f2..20047954f2d 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -2,7 +2,7 @@ jupyter<1.0.1 packaging<25.0 pytest<8.1.2 pytest-benchmark<4.0.1 -pytest-cov<4.1.1 +pytest-cov<5.0.1 pytest-forked<1.7.0 pytest-helpers-namespace<2021.12.30 pytest-profiling<1.7.1 From f057236619174d400ddf68339dbaf1bf5165ba8f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 24 Apr 2024 20:59:57 -0400 Subject: [PATCH 094/410] requirements: update beartype requirement from <0.18.0 to <0.19.0 (#2949) Updates the requirements on [beartype](https://github.com/beartype/beartype) to permit the latest version. - [Release notes](https://github.com/beartype/beartype/releases) - [Changelog](https://github.com/beartype/beartype/blob/main/doc/RELEASE.rst) - [Commits](https://github.com/beartype/beartype/compare/v0.18.0...v0.18.5) --- updated-dependencies: - dependency-name: beartype dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8a2de9cab23..320d6a25c5f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -beartype<0.18.0 +beartype<0.19.0 dill<0.3.9 fastkde>=1.0.24, <1.0.31 graph-scheduler>=1.1.1, <1.3.0 From 67e319210b23fa4cc0155565158fcb0448a0bfe2 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 25 Apr 2024 14:05:11 -0400 Subject: [PATCH 095/410] requirements: update torch requirement from >=1.8.0,<2.2.0 to >=1.8.0,<2.3.0 (#2950) Updates the requirements on [pytorch](https://github.com/pytorch/pytorch) to permit the latest version. - [Release notes](https://github.com/pytorch/pytorch/releases) - [Commits](https://github.com/pytorch/pytorch/compare/v2.2.0...v2.3.0) Signed-off-by: Jan Vesely --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 320d6a25c5f..1af6dbe24c7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,4 +19,4 @@ protobuf<3.20.4 rich>=10.1, <10.13 scipy<1.12 toposort<1.11 -torch>=1.10.0, <2.2.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' +torch>=1.10.0, <2.3.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' From aaeb859641b754131d050cefcd13563b1294e702 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 25 Apr 2024 17:13:44 -0400 Subject: [PATCH 096/410] requirements: update torch requirement from >=1.8.0,<2.3.0 to >=1.8.0,<2.4.0 (#2951) Updates the requirements on [pytorch](https://github.com/pytorch/pytorch) to permit the latest version. - [Release notes](https://github.com/pytorch/pytorch/releases) - [Commits](https://github.com/pytorch/pytorch/compare/v2.3.0...v2.4.0) Signed-off-by: Jan Vesely --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1af6dbe24c7..a1eec625355 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,4 +19,4 @@ protobuf<3.20.4 rich>=10.1, <10.13 scipy<1.12 toposort<1.11 -torch>=1.10.0, <2.3.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' +torch>=1.10.0, <2.4.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' From bac0b805106c2d3afbdaa0ed5ec22bbac221bf83 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 19 Dec 2023 07:34:18 +0000 Subject: [PATCH 097/410] UserDefinedFunction: do not modify cust_fct_params attr on function call --- .../core/components/functions/userdefinedfunction.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/psyneulink/core/components/functions/userdefinedfunction.py b/psyneulink/core/components/functions/userdefinedfunction.py index 60d10013ec2..ef6f28832c7 100644 --- a/psyneulink/core/components/functions/userdefinedfunction.py +++ b/psyneulink/core/components/functions/userdefinedfunction.py @@ -617,20 +617,19 @@ def _instantiate_attributes_before_function(self, function=None, context=None): p._set(p.default_value, context, skip_history=True) def _function(self, variable, context=None, **kwargs): + call_params = self.cust_fct_params.copy() # Update value of parms in cust_fct_params - for param in self.cust_fct_params: + for param in call_params: # First check for value passed in params as runtime param: if PARAMS in kwargs and kwargs[PARAMS] is not None and param in kwargs[PARAMS]: - self.cust_fct_params[param] = kwargs[PARAMS][param] + call_params[param] = kwargs[PARAMS][param] elif param in kwargs: - self.cust_fct_params[param] = kwargs[param] + call_params[param] = kwargs[param] else: # Otherwise, get current value from ParameterPort (in case it is being modulated by ControlSignal(s) - self.cust_fct_params[param] = self._get_current_parameter_value(param, context) - - call_params = self.cust_fct_params.copy() + call_params[param] = self._get_current_parameter_value(param, context) # # MODIFIED 3/6/19 NEW: [JDC] # Add any of these that were included in the definition of the custom function: From 5a9eb9a2fd6819ab39f6787cc58f593b1596b30b Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 19 Dec 2023 07:02:38 +0000 Subject: [PATCH 098/410] Component: _initialize_parameters: allow creating Parameters dynamically --- psyneulink/core/components/component.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 186e76aa0d3..dd74544bafb 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -2217,6 +2217,12 @@ def _parse_arguments( return parameter_values, function_params def _initialize_parameters(self, context=None, **param_defaults): + """ + Args: + **param_defaults: maps Parameter names to their default + values. Sets instance-level Parameters dynamically for any + name that maps to a Parameter object. + """ from psyneulink.core.components.shellclasses import ( Composition_Base, Function, Mechanism, Port, Process_Base, Projection, System_Base @@ -2250,6 +2256,14 @@ def _initialize_parameters(self, context=None, **param_defaults): if name in alias_names: continue + if isinstance(value, Parameter): + setattr(self.parameters, name, value) + try: + value = copy.copy(value.default_value) + except TypeError: + value = value.default_value + param_defaults[name] = value + if name in self.parameters._params: parameter_obj = getattr(self.parameters, name) else: From c87671dcf2cb4c8325577757a5fce8bc2e973fbf Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 19 Dec 2023 06:30:09 +0000 Subject: [PATCH 099/410] UserDefinedFunction: rework creation of custom Parameters overriding _initialize_parameters lets UDF custom Parameters use all existing code for parsing/setting/etc. just like normal Parameters --- .../components/functions/userdefinedfunction.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/psyneulink/core/components/functions/userdefinedfunction.py b/psyneulink/core/components/functions/userdefinedfunction.py index ef6f28832c7..cf6adc77ce4 100644 --- a/psyneulink/core/components/functions/userdefinedfunction.py +++ b/psyneulink/core/components/functions/userdefinedfunction.py @@ -606,15 +606,14 @@ def _get_allowed_arguments(self): def _validate_params(self, request_set, target_set=None, context=None): pass - def _instantiate_attributes_before_function(self, function=None, context=None): - super()._instantiate_attributes_before_function(function=function, context=context) - # create transient Parameters objects for custom function params - # done here because they need to be present before _instantiate_value which calls self.function + def _initialize_parameters(self, context=None, **param_defaults): + # pass custom parameter values here so they can be created as + # Parameters in Component._initialize_parameters and + # automatically handled as if they were normal Parameters for param_name in self.cust_fct_params: - p = Parameter(self.cust_fct_params[param_name], modulable=True) - setattr(self.parameters, param_name, p) + param_defaults[param_name] = Parameter(self.cust_fct_params[param_name], modulable=True) - p._set(p.default_value, context, skip_history=True) + super()._initialize_parameters(context, **param_defaults) def _function(self, variable, context=None, **kwargs): call_params = self.cust_fct_params.copy() From 63cefb14b811f16e3ba72ddfbce88d0d5ef42422 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Apr 2024 09:09:35 -0400 Subject: [PATCH 100/410] requirements: update pandas requirement from <2.2.2 to <2.2.3 (#2952) Updates the requirements on [pandas](https://github.com/pandas-dev/pandas) to permit the latest version. - [Release notes](https://github.com/pandas-dev/pandas/releases) - [Commits](https://github.com/pandas-dev/pandas/compare/0.3.0...v2.2.2) --- updated-dependencies: - dependency-name: pandas dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index a1eec625355..f911d42b326 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ networkx<3.3 numpy>=1.21.0, <1.26.5 optuna<3.4.0 packaging<25.0 -pandas<2.2.2 +pandas<2.2.3 pillow<10.3.0 pint<0.22.0 protobuf<3.20.4 From 84f0a7a2f467e47665c9f964a6d92c71a648742c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 26 Apr 2024 11:19:55 -0400 Subject: [PATCH 101/410] ci/ga/docs: Use default python-architecture for each platform to build docs In practise it means x64 for linux and windows and arm64 for macos. Restrict python3.7 macos jobs to macos-13. x64 builds are broken on macos-14 images[0], but python3.7 does not provide arm64 image[1] [0] https://github.com/actions/setup-python/issues/855 [1] https://github.com/actions/setup-python/issues/856 Signed-off-by: Jan Vesely --- .github/workflows/pnl-ci-docs.yml | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index 2e5f305ca51..b5ca80aee63 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -17,7 +17,12 @@ concurrency: jobs: docs-build: - runs-on: ${{ matrix.os }} + # Python 3.7 x64 on macos-14 (arm64) images is broken [0] + # and arm64 version is not available [1]. + # Restrict python 3.7 macos runs to macos-13 + # [0] https://github.com/actions/setup-python/issues/855 + # [1] https://github.com/actions/setup-python/issues/856 + runs-on: ${{ (matrix.os == 'macos-latest' && matrix.python-version == '3.7') && 'macos-13' || matrix.os }} strategy: fail-fast: false # Matrix setup is a hacky way to include 'base' build in pull requests @@ -25,7 +30,6 @@ jobs: # on event name and final configuration (ubuntu, python3.11). matrix: python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] - python-architecture: ['x64'] os: [ubuntu-latest, macos-latest, windows-latest] event: - ${{ github.event_name }} @@ -81,7 +85,6 @@ jobs: # Block python3.7.17 on macos. see: # https://github.com/actions/setup-python/issues/682 python-version: ${{ (matrix.os == 'macos-latest' && matrix.python-version == '3.7') && '3.7.16' || matrix.python-version }} - architecture: ${{ matrix.python-architecture }} - name: Get pip cache location shell: bash @@ -95,8 +98,8 @@ jobs: uses: actions/cache@v4 with: path: ${{ steps.pip_cache.outputs.pip_cache_dir }}/wheels - key: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ matrix.python-architecture }}-pip-wheels-${{ hashFiles('requirements.txt', 'doc_requirements.txt') }}-${{ github.sha }} - restore-keys: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ matrix.python-architecture }}-pip-wheels-${{ hashFiles('requirements.txt', 'doc_requirements.txt') }} + key: ${{ runner.os }}-python-${{ matrix.python-version }}-pip-wheels-${{ hashFiles('requirements.txt', 'doc_requirements.txt') }}-${{ github.sha }} + restore-keys: ${{ runner.os }}-python-${{ matrix.python-version }}-pip-wheels-${{ hashFiles('requirements.txt', 'doc_requirements.txt') }} # We need to install all PNL deps since docs config imports psyneulink module - name: Install local, editable PNL package @@ -126,7 +129,7 @@ jobs: - name: Upload Documentation uses: actions/upload-artifact@v4 with: - name: Documentation-${{matrix.pnl-version}}-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} + name: Documentation-${{matrix.pnl-version}}-${{ matrix.os }}-${{ matrix.python-version }} retention-days: 1 path: docs/build/html @@ -172,7 +175,7 @@ jobs: - name: Download branch docs uses: actions/download-artifact@v4 with: - name: Documentation-head-${{ matrix.os }}-${{ matrix.python-version }}-x64 + name: Documentation-head-${{ matrix.os }}-${{ matrix.python-version }} path: _built_docs/${{ github.ref }} if: github.ref == 'refs/heads/master' || github.ref == 'refs/heads/devel' || github.ref == 'refs/heads/docs' @@ -189,7 +192,7 @@ jobs: - name: Download main docs uses: actions/download-artifact@v4 with: - name: Documentation-head-${{ matrix.os }}-${{ matrix.python-version }}-x64 + name: Documentation-head-${{ matrix.os }}-${{ matrix.python-version }} # This overwrites files in current directory if: startsWith(github.ref, 'refs/tags/') && contains(needs.*.outputs.on_master, 'master') From aaef6aa98067779651012e8ef6ad50fe60201467 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 26 Apr 2024 11:54:07 -0400 Subject: [PATCH 102/410] ci/ga: Use native python architecture for macos python 3.8 job x64 python is broken on arm64 images [0] [0] https://github.com/actions/setup-python/issues/855 Signed-off-by: Jan Vesely --- .github/workflows/pnl-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index f7ca26e93b7..b704b86e11d 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -107,8 +107,8 @@ jobs: # add python 3.8 build on macos since 3.7 is broken # https://github.com/actions/virtual-environments/issues/4230 + # use default python-architecture - python-version: '3.8' - python-architecture: 'x64' os: macos exclude: From 42d3327159176edb65db2d0535e88f7afacdb154 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Apr 2024 21:36:00 -0400 Subject: [PATCH 103/410] requirements: update pillow requirement from <10.3.0 to <10.4.0 (#2954) Updates the requirements on [pillow](https://github.com/python-pillow/Pillow) to permit the latest version. - [Release notes](https://github.com/python-pillow/Pillow/releases) - [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst) - [Commits](https://github.com/python-pillow/Pillow/compare/1.0...10.3.0) --- updated-dependencies: - dependency-name: pillow dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f911d42b326..07c50be4461 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ numpy>=1.21.0, <1.26.5 optuna<3.4.0 packaging<25.0 pandas<2.2.3 -pillow<10.3.0 +pillow<10.4.0 pint<0.22.0 protobuf<3.20.4 rich>=10.1, <10.13 From 7ac01364143fe4799c2eb6e6912bb7a23f5be16e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Apr 2024 22:58:01 -0400 Subject: [PATCH 104/410] requirements: update networkx requirement from <3.3 to <3.4 (#2955) Updates the requirements on [networkx](https://github.com/networkx/networkx) to permit the latest version. - [Release notes](https://github.com/networkx/networkx/releases) - [Commits](https://github.com/networkx/networkx/compare/networkx-0.23...networkx-3.3) --- updated-dependencies: - dependency-name: networkx dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 07c50be4461..4da5693a0a1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ leabra-psyneulink<0.3.3 llvmlite<0.43 matplotlib<3.7.6 modeci_mdf<0.5, >=0.4.3; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' -networkx<3.3 +networkx<3.4 numpy>=1.21.0, <1.26.5 optuna<3.4.0 packaging<25.0 From 7b7f3efbe81732e1a68a0870395dd02674dcf30f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 27 Apr 2024 01:03:19 -0400 Subject: [PATCH 105/410] requirements: update pytest requirement from <8.1.2 to <8.1.3 (#2956) Updates the requirements on [pytest](https://github.com/pytest-dev/pytest) to permit the latest version. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/1.0.0b3...8.1.2) --- updated-dependencies: - dependency-name: pytest dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index 20047954f2d..4990f62c900 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,6 +1,6 @@ jupyter<1.0.1 packaging<25.0 -pytest<8.1.2 +pytest<8.1.3 pytest-benchmark<4.0.1 pytest-cov<5.0.1 pytest-forked<1.7.0 From 3621054643da2955834bedef76c1f0144e2b3b69 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 26 Apr 2024 13:32:49 -0400 Subject: [PATCH 106/410] ci/ga/docs: Represent correct os version in matrix.os Signed-off-by: Jan Vesely --- .github/workflows/pnl-ci-docs.yml | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index b5ca80aee63..3fe9847c0ba 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -17,12 +17,7 @@ concurrency: jobs: docs-build: - # Python 3.7 x64 on macos-14 (arm64) images is broken [0] - # and arm64 version is not available [1]. - # Restrict python 3.7 macos runs to macos-13 - # [0] https://github.com/actions/setup-python/issues/855 - # [1] https://github.com/actions/setup-python/issues/856 - runs-on: ${{ (matrix.os == 'macos-latest' && matrix.python-version == '3.7') && 'macos-13' || matrix.os }} + runs-on: ${{ matrix.os }} strategy: fail-fast: false # Matrix setup is a hacky way to include 'base' build in pull requests @@ -50,6 +45,20 @@ jobs: - python-version: '3.10' pnl-version: 'base' + # Python 3.7 x64 on macos-14 (arm64) images is broken [0] + # and arm64 version is not available [1]. + # Restrict python 3.7 macos runs to macos-13 + # [0] https://github.com/actions/setup-python/issues/855 + # [1] https://github.com/actions/setup-python/issues/856 + - python-version: '3.7' + os: macos-latest + + include: + - python-version: '3.7' + os: macos-13 + pnl-version: 'head' + event: 'push' + outputs: on_master: ${{ steps.on_master.outputs.on-branch }} From 3dfe792b8a7b878ad2f884aceafdcddcbd22a617 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 26 Apr 2024 17:50:29 -0400 Subject: [PATCH 107/410] ci/ga/docs: Generate version matrix dimension based on GA event Signed-off-by: Jan Vesely --- .github/workflows/pnl-ci-docs.yml | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index 3fe9847c0ba..8ba5879f605 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -20,18 +20,11 @@ jobs: runs-on: ${{ matrix.os }} strategy: fail-fast: false - # Matrix setup is a hacky way to include 'base' build in pull requests - # The entire matrix is set up and 'base' builds are pruned based - # on event name and final configuration (ubuntu, python3.11). matrix: python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] os: [ubuntu-latest, macos-latest, windows-latest] - event: - - ${{ github.event_name }} - pnl-version: ['head', 'base'] + pnl-version: ${{ (github.event_name == 'push') && fromJSON('["head"]') || fromJSON('["head", "base"]') }} exclude: - - event: 'push' - pnl-version: 'base' - os: macos-latest pnl-version: 'base' - os: windows-latest @@ -57,7 +50,6 @@ jobs: - python-version: '3.7' os: macos-13 pnl-version: 'head' - event: 'push' outputs: on_master: ${{ steps.on_master.outputs.on-branch }} From 3f0c43442d9b5fd75351d8fe7a83c78acfecc18e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 13:16:12 -0400 Subject: [PATCH 108/410] requirements: update pytest requirement from <8.1.3 to <8.2.1 (#2960) Updates the requirements on [pytest](https://github.com/pytest-dev/pytest) to permit the latest version. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/1.0.0b3...8.2.0) --- updated-dependencies: - dependency-name: pytest dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index 4990f62c900..e084a322586 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,6 +1,6 @@ jupyter<1.0.1 packaging<25.0 -pytest<8.1.3 +pytest<8.2.1 pytest-benchmark<4.0.1 pytest-cov<5.0.1 pytest-forked<1.7.0 From e59784760d0720116d532fa9dd465bccd84fdc95 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 16:01:34 -0400 Subject: [PATCH 109/410] requirements: update pytest-xdist requirement (#2959) Updates the requirements on [pytest-xdist](https://github.com/pytest-dev/pytest-xdist) to permit the latest version. - [Release notes](https://github.com/pytest-dev/pytest-xdist/releases) - [Changelog](https://github.com/pytest-dev/pytest-xdist/blob/master/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest-xdist/compare/v3.2.0...v3.6.1) --- updated-dependencies: - dependency-name: pytest-xdist dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index e084a322586..b823dafcd95 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -8,4 +8,4 @@ pytest-helpers-namespace<2021.12.30 pytest-profiling<1.7.1 pytest-pycodestyle<2.4.0 pytest-pydocstyle<2.4.0 -pytest-xdist>=3.2.0, <3.6.0 +pytest-xdist>=3.2.0, <3.7.0 From 924530adae4ea1ba28733e30cec872b2300a4440 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 30 Apr 2024 20:59:16 -0400 Subject: [PATCH 110/410] requirements: update grpcio requirement from <1.63.0 to <1.64.0 (#2961) Updates the requirements on [grpcio](https://github.com/grpc/grpc) to permit the latest version. - [Release notes](https://github.com/grpc/grpc/releases) - [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md) - [Commits](https://github.com/grpc/grpc/compare/v0.62.0...v1.63.0) --- updated-dependencies: - dependency-name: grpcio dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4da5693a0a1..68bf04889c3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ dill<0.3.9 fastkde>=1.0.24, <1.0.31 graph-scheduler>=1.1.1, <1.3.0 graphviz<0.21.0 -grpcio<1.63.0 +grpcio<1.64.0 leabra-psyneulink<0.3.3 llvmlite<0.43 matplotlib<3.7.6 From 418125eb8b3fc1c34b5ac427de712b44ffbd3d7c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 26 Apr 2024 18:23:19 -0400 Subject: [PATCH 111/410] docs: Fix table formatting Fixes 3 instances of: WARNING: Malformed table. Signed-off-by: Jan Vesely --- psyneulink/core/components/ports/port.py | 6 ++--- psyneulink/core/compositions/composition.py | 26 +++++++++---------- .../mechanisms/processing/integrator/ddm.py | 2 +- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index 53c3f5c4d81..76e283a5974 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -329,9 +329,9 @@ ============================================ ============================================================ *Attribute* *Projection Type and Port(s)* ============================================ ============================================================ - `path_afferents ` `MappingProjections ` to `InputPort` - `mod_afferents ` `ModulatoryProjections ` to any Port - `efferents ` `MappingProjections ` from `OutputPort` + `path_afferents ` `MappingProjections ` to `InputPort` + `mod_afferents ` `ModulatoryProjections ` to any Port + `efferents ` `MappingProjections ` from `OutputPort` ============================================ ============================================================ In addition to these attributes, all of the Projections sent and received by a Port are listed in its `projections diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 0b5b721f47a..69c8db3d3e4 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -1098,19 +1098,19 @@ .. table:: :widths: 5 34 33 33 - +--------------------+------------------------------------+--------------------------------------------------------+ - | |**Composition** |**AutodiffComposition** | - +--------------------+------------------------------------+--------------------------+-----------------------------+ - | |*Python* |`AutodiffComposition_LLVM`|`AutodiffComposition_PyTorch`| - | | |(*Direct Compilation*) | | - +====================+====================================+==========================+=============================+ - |execution_mode= |`ExecutionMode.Python` |`ExecutionMode.LLVMRun` |`ExecutionMode.PyTorch | - +--------------------+------------------------------------+--------------------------+-----------------------------+ - |`learn() | | | | - |`|Python interpreted |LLVM compiled |PyTorch compiled | - | | | | | - |`run() | | | | - |` |Python interpreted |LLVM compiled |Python interpreted | + +--------------------+------------------------------------+---------------------------------------------------------+ + | |**Composition** |**AutodiffComposition** | + +--------------------+------------------------------------+--------------------------+------------------------------+ + | |*Python* |`AutodiffComposition_LLVM`|`AutodiffComposition_PyTorch` | + | | |(*Direct Compilation*) | | + +====================+====================================+==========================+==============================+ + |execution_mode= |`ExecutionMode.Python` |`ExecutionMode.LLVMRun` |`ExecutionMode.PyTorch | + +--------------------+------------------------------------+--------------------------+------------------------------+ + |`learn() | | | | + |`|Python interpreted |LLVM compiled |PyTorch compiled | + | | | | | + |`run() | | | | + |` |Python interpreted |LLVM compiled |Python interpreted | +--------------------+------------------------------------+--------------------------+------------------------------+ |*Speed:* |slow |fastest |fast | +--------------------+------------------------------------+--------------------------+------------------------------+ diff --git a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py index cd4d6597950..9ca3bb8876e 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py @@ -129,7 +129,7 @@ + +----------------------------+----------------------------+ | | `DriftDiffusionAnalytical` | `DriftDiffusionIntegrator` | | | (`analytic | (`path integration) | -| **OutputPorts:** | `) | `) | +| **OutputPorts:** | `) | `) | +------------------------------------+----------------------------+----------------------------+ | `DECISION_VARIABLE | | | | ` | X | X | From f3c10c2b156330eb6823b7aca5b61b3c9b032fcd Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 26 Apr 2024 18:36:22 -0400 Subject: [PATCH 112/410] docs: Extend short title underlines Fixes 13 instances of: WARNING: Title underline too short. Signed-off-by: Jan Vesely --- docs/source/Cohen_HustonModel.rst | 2 +- docs/source/ConventionsAndDefinitions.rst | 2 +- docs/source/MontagueModel.rst | 2 +- docs/source/PCTC_model.rst | 2 +- .../core/components/ports/modulatorysignals/learningsignal.py | 2 +- .../processing/integrator/episodicmemorymechanism.py | 2 +- psyneulink/library/compositions/emcomposition.py | 4 ++-- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/source/Cohen_HustonModel.rst b/docs/source/Cohen_HustonModel.rst index 58ceab953c1..f949fd55dd7 100644 --- a/docs/source/Cohen_HustonModel.rst +++ b/docs/source/Cohen_HustonModel.rst @@ -141,7 +141,7 @@ are turned on again and the system is run either for a certain amount of trials, stimulus is turned on. PLEASE NOTE: ------------ +------------ Note that this implementation is slightly different than what was originally reported. The integration rate was set to 0.1 instead of 0.01. Noise was turned of to better understand the core processes, and not having to deal with several runs, averaging these runs and plotting standard errors for these averages (which depend on the noise and amount of diff --git a/docs/source/ConventionsAndDefinitions.rst b/docs/source/ConventionsAndDefinitions.rst index 077c4b93ea3..ce34429a8f2 100644 --- a/docs/source/ConventionsAndDefinitions.rst +++ b/docs/source/ConventionsAndDefinitions.rst @@ -43,7 +43,7 @@ and Compositions (combinations of Components that implement a model). .. _Definitions_Components: `Components ` -~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~ Components are objects that perform a specific function. Every Component has a: diff --git a/docs/source/MontagueModel.rst b/docs/source/MontagueModel.rst index 2e8da2c0423..fd011bd5a70 100644 --- a/docs/source/MontagueModel.rst +++ b/docs/source/MontagueModel.rst @@ -1,5 +1,5 @@ Dopamine and Temporal Differences Learning (Montague, Dayan & Sejnowski, 1996) -================================================================== +============================================================================== `"A framework for mesencephalic dopamine systems based on predictive Hebbian learning." `_ Overview diff --git a/docs/source/PCTC_model.rst b/docs/source/PCTC_model.rst index 567c3a5b581..058bc81331f 100644 --- a/docs/source/PCTC_model.rst +++ b/docs/source/PCTC_model.rst @@ -1,5 +1,5 @@ Proactive Control & Task Control: A Stroop Model (Kalanthroff et al., 2018) -================================================================ +=========================================================================== `"Task Conflict and Proactive Control: A Computational Theory of the Stroop Task" `_ Overview diff --git a/psyneulink/core/components/ports/modulatorysignals/learningsignal.py b/psyneulink/core/components/ports/modulatorysignals/learningsignal.py index 70675475ee9..32b59586f5d 100644 --- a/psyneulink/core/components/ports/modulatorysignals/learningsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/learningsignal.py @@ -44,7 +44,7 @@ .. _LearningSignal_Creation: Creating a LearningSignal ------------------------- +------------------------- A LearningSignal is created automatically whenever a `MappingProjection` is `specified for learning ` and the Projection belongs to the same `Composition ` as the diff --git a/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py b/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py index 16676d16110..6d4279f5bf2 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py @@ -339,7 +339,7 @@ .. _EpisodicMemoryMechanism_Examples_Memory_Init_Function: *Initialize memory in function* -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The contents of `memory ` can also be initialized using the **initializer** argument in the constructor for the EpisodicMemoryMechanism's `function `:: diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index f2a60007cc2..bf39f441acd 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -507,7 +507,7 @@ .. _EMComposition_Memory: *Memory* -~~~~~~~ +~~~~~~~~ The `memory ` attribute contains a record of the entries in the EMComposition's memory. This is in the form of a 2d array, in which rows (axis 0) are entries and columns (axis 1) are fields. The number of fields @@ -534,7 +534,7 @@ .. _EMComposition_Output: *Output* -~~~~~~~ +~~~~~~~~ The outputs corresponding to retrieved value for each field are represented as `OUTPUT ` `Nodes ` of the EMComposition, listed in its `retrieved_nodes ` attribute. From 2550f9f8bc6316a9a083667277080b28201ca14d Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 26 Apr 2024 19:31:23 -0400 Subject: [PATCH 113/410] docs: Fix "strong" markup Fixes 31 instances of: WARNING: Inline strong start-string without end-string. There are 6 remaining instances that all come from: psyneulink.core.scheduling.condition imported from graph_scheduler Signed-off-by: Jan Vesely --- docs/source/NieuwenhuisModel.rst | 16 ++++++++-------- .../nonstateful/optimizationfunctions.py | 4 ++-- .../control/optimizationcontrolmechanism.py | 2 +- psyneulink/core/components/ports/outputport.py | 2 +- psyneulink/core/compositions/composition.py | 8 ++++---- psyneulink/core/compositions/showgraph.py | 2 +- .../transfer/contrastivehebbianmechanism.py | 2 +- .../library/compositions/autodiffcomposition.py | 2 +- psyneulink/library/compositions/emcomposition.py | 12 ++++++------ 9 files changed, 25 insertions(+), 25 deletions(-) diff --git a/docs/source/NieuwenhuisModel.rst b/docs/source/NieuwenhuisModel.rst index df48df75b8b..7c4c07d33f4 100644 --- a/docs/source/NieuwenhuisModel.rst +++ b/docs/source/NieuwenhuisModel.rst @@ -65,18 +65,18 @@ associated `ObjectiveMechanism`, as shown in the figure below: Behavioral Network Subsystem ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -**INPUT LAYER**: a `TransferMechanism` with **size**=3 (one element for the input to the T1, T2 and distractor units -of the *DECISION LAYER*, respectively), and assigned a `Linear` function with **slope**=1.0 and **intercept**=0.0. +**INPUT LAYER**: a `TransferMechanism` with **size**\ =3 (one element for the input to the T1, T2 and distractor units +of the *DECISION LAYER*, respectively), and assigned a `Linear` function with **slope**\ =1.0 and **intercept**\ =0.0. -**DECISION LAYER**: an `LCAMechanism` Mechanism of **size**=3 (one element each for the T1, T2 and distractor units), +**DECISION LAYER**: an `LCAMechanism` Mechanism of **size**\ =3 (one element each for the T1, T2 and distractor units), and assigned a `Logistic` Function with a slope=1.0 and intercept=0.0. Each element has a self-excitatory connection -with a weight specified by **self_excitation**=2.5, a **leak**=-1.0, and every element is connected to every other -element by mutually inhibitory connections with a weight specified by **competition**=1.0. An ordinary differential +with a weight specified by **self_excitation**\ =2.5, a **leak**\ =-1.0, and every element is connected to every other +element by mutually inhibitory connections with a weight specified by **competition** =1.0. An ordinary differential equation describes the change in state over time, implemented in the LCAMechanism mechanism by setting -**integrator_mode**=`True` and **time_step_size**=0.02. +**integrator_mode** = `True` and **time_step_size**\ =0.02. -**RESPONSE LAYER**: an `LCAMechanism` Mechanism of **size**=2, with one element each for the response to T1 and T2, -respectively, **self_excitation**=2.0, **leak**=-1.0, and no mutually inhibitory weights (**competition**=0). +**RESPONSE LAYER**: an `LCAMechanism` Mechanism of **size**\ =2, with one element each for the response to T1 and T2, +respectively, **self_excitation**\ =2.0, **leak**\ =-1.0, and no mutually inhibitory weights (**competition**\ =0). **PROJECTIONS**: The weights of the behavioral network are implemented as `MappingProjections `. The `matrix ` parameter for the one from the *INPUT_LAYER* to the *DECISION_LAYER* uses a diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index df9cc84cc1b..266c4cdf2ae 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -168,7 +168,7 @@ class OptimizationFunction(Function_Base): relevant argument(s) as `NotImplemented`. .. technical_note:: - - Constructors of subclasses should include **kwargs in their constructor method, to accommodate arguments + - Constructors of subclasses should include ``**kwargs`` in their constructor method, to accommodate arguments required by some subclasses but not others (e.g., search_space needed by `GridSearch` but not `GradientOptimization`) so that subclasses can be used interchangeably by OptimizationControlMechanism. @@ -907,7 +907,7 @@ class GradientOptimization(OptimizationFunction): Sample variable by following gradient with respect to the value of `objective_function ` it generates, and return the sample that generates either the - highest (**direction=*ASCENT*) or lowest (**direction=*DESCENT*) value. + highest (**direction** = *ASCENT*) or lowest (**direction** = *DESCENT*) value. .. _GradientOptimization_Procedure: diff --git a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py index 3fd505d122d..53a0ef82b9e 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py @@ -3703,7 +3703,7 @@ def state_distal_sources_and_destinations_dict(self): @property def state_feature_sources(self): - """Dict with {InputPort: source} for all INPUT Nodes of agent_rep, and sources in **state_feature_specs. + """Dict with {InputPort: source} for all INPUT Nodes of agent_rep, and sources in **state_feature_specs**. Used by state_distal_sources_and_destinations_dict() """ state_dict = {} diff --git a/psyneulink/core/components/ports/outputport.py b/psyneulink/core/components/ports/outputport.py index a46173272d9..a8ec7bb3a92 100644 --- a/psyneulink/core/components/ports/outputport.py +++ b/psyneulink/core/components/ports/outputport.py @@ -57,7 +57,7 @@ if the Mechanism is a `TERMINAL` Mechanism for that Process. Other configurations can also easily be specified using a Mechanism's **output_ports** argument (see `OutputPort_Specification` below). If it is created using its constructor, and a Mechanism is specified in the **owner** argument, it is automatically assigned to that Mechanism. -If its **owner* is not specified, `initialization is deferred. +If its **owner** is not specified, `initialization is deferred. .. _OutputPort_Deferred_Initialization: diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 69c8db3d3e4..907093691fe 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -8214,7 +8214,7 @@ def add_reinforcement_learning_pathway(self, learning_update: Union[bool, Literal['online', 'after']] = 'online', default_projection_matrix=None, name: Optional[str] = None): - """Convenience method that calls `add_linear_learning_pathway` with **learning_function**=`Reinforcement` + """Convenience method that calls `add_linear_learning_pathway` with **learning_function** = `Reinforcement` Arguments --------- @@ -8271,7 +8271,7 @@ def add_td_learning_pathway(self, learning_update: Union[bool, Literal['online', 'after']] = 'online', default_projection_matrix=None, name: Optional[str] = None): - """Convenience method that calls `add_linear_learning_pathway` with **learning_function**=`TDLearning` + """Convenience method that calls `add_linear_learning_pathway` with **learning_function** = `TDLearning` Arguments --------- @@ -8327,7 +8327,7 @@ def add_backpropagation_learning_pathway(self, learning_update: Optional[Union[bool, Literal['online', 'after']]] = 'after', default_projection_matrix=None, name: str = None): - """Convenience method that calls `add_linear_learning_pathway` with **learning_function**=`Backpropagation` + """Convenience method that calls `add_linear_learning_pathway` with **learning_function** = `Backpropagation` Arguments --------- @@ -12525,7 +12525,7 @@ def get_input_format(self, if True, shows labels instead of values for Mechanisms that have an `input_label_dict `. For **num_trials** = 1, a representative label is shown; for **num_trials** > 1, a different label is used for each trial shown, cycling - through the set if **num_trials** is greater than the number of labels. If **num_trials = *FULL*, + through the set if **num_trials** is greater than the number of labels. If **num_trials** = *FULL*, trials will be included. it is set to the number of labels in the largest list specified in any `input_label_dict diff --git a/psyneulink/core/compositions/showgraph.py b/psyneulink/core/compositions/showgraph.py index 1229a7e9baf..cc339787a29 100644 --- a/psyneulink/core/compositions/showgraph.py +++ b/psyneulink/core/compositions/showgraph.py @@ -664,7 +664,7 @@ def show_graph(self, ------- `pdf` or Graphviz graph object : - determined by **output_fmt: + determined by **output_fmt**: - ``pdf`` -- PDF: (placed in current directory); - ``gv`` or ``jupyter`` -- Graphviz graph object; - ``gif`` -- gif diff --git a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py index d7cb6f903e1..b1adee8216d 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py @@ -138,7 +138,7 @@ * *RECURRENT:* receives the `value ` of the Mechanism's `recurrent_projection `; .. - * *TARGET:* only implemented if **target_size** is specified, **separated = `True` (default), and + * *TARGET:* only implemented if **target_size** is specified, **separated** = `True` (default), and mode is not `SIMPLE_HEBBIAN `; receives the `target ` specified in the `run ` method of any `Composition` to which the Mechanism belongs. diff --git a/psyneulink/library/compositions/autodiffcomposition.py b/psyneulink/library/compositions/autodiffcomposition.py index 0f76f73d7d8..54d608fdd30 100644 --- a/psyneulink/library/compositions/autodiffcomposition.py +++ b/psyneulink/library/compositions/autodiffcomposition.py @@ -166,7 +166,7 @@ *PyTorch mode* ~~~~~~~~~~~~~~ -This is the default for an AutodiffComposition, but, can be specified explicitly by setting **execution_mode = +This is the default for an AutodiffComposition, but, can be specified explicitly by setting **execution_mode** = `ExecutionMode.PyTorch` in the `learn ` method (see `example ` in `BasicsAndPrimer`). In this mode, the AutodiffComposition is automatically translated to a `PyTorch `_ model for learning. This is comparable in speed to `LLVM compilation diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index bf39f441acd..dceadafbee4 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -2110,20 +2110,20 @@ def _construct_storage_node(self, and from the value_input_node to the retrieved_node for values. The `function ` of the `EMSorageMechanism` that takes the following arguments: - - **variable* -- template for an `entry ` in `memory`; + - **variable** -- template for an `entry ` in `memory`; - - **fields* -- the `input_nodes ` for the corresponding `fields + - **fields** -- the `input_nodes ` for the corresponding `fields ` of an `entry ` in `memory `; - - **field_types* -- a list of the same length as ``fields``, containing 1's for key fields and 0's for + - **field_types** -- a list of the same length as ``fields``, containing 1's for key fields and 0's for value fields; - - **concatenate_keys_node* -- node used to concatenate keys (if `concatenate_keys + - **concatenate_keys_node** -- node used to concatenate keys (if `concatenate_keys ` is `True`) or None; - - **memory_matrix* -- `memory_template `); + - **memory_matrix** -- `memory_template `); - - **learning_signals* -- list of ` `MappingProjection`\\s (or their ParameterPort`\\s) that store each + - **learning_signals** -- list of ` `MappingProjection`\\s (or their ParameterPort`\\s) that store each `field ` of `memory `; - **decay_rate** -- rate at which entries in the `memory_matrix ` decay; From 41b6ed187b70c6c436f9b39dcf37c9dd869792ed Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 26 Apr 2024 21:21:19 -0400 Subject: [PATCH 114/410] docs: Fix literal markup Fixes one instance of: WARNING: Inline literal start-string without end-string. Signed-off-by: Jan Vesely --- psyneulink/core/compositions/pathway.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/compositions/pathway.py b/psyneulink/core/compositions/pathway.py index a71632eb8b3..9e85bfc0728 100644 --- a/psyneulink/core/compositions/pathway.py +++ b/psyneulink/core/compositions/pathway.py @@ -316,7 +316,7 @@ Pathway's `pathway ` attribute. * `composition ` - contains the `Composition` that created the Pathway and to which it belongs, - or None if it is a ``template ` (i.e., was constructed on its own). + or None if it is a `template ` (i.e., was constructed on its own). * `roles ` and `Node ` attributes - if the Pathway was created by a Composition, the `roles ` attribute `this lists the `PathwayRoles ` assigned to it by the Composition From 88861123a26c497bcf6475fedfcd005a6efd0bf1 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 26 Apr 2024 22:27:44 -0400 Subject: [PATCH 115/410] docs: Fix Inline emphasis markup Fixes two instances of: WARNING: Inline emphasis start-string without end-string. The remaining instances are imported from schedulin.condition. Signed-off-by: Jan Vesely --- .../components/functions/nonstateful/optimizationfunctions.py | 2 +- psyneulink/core/components/mechanisms/mechanism.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index 266c4cdf2ae..e349a8edfe6 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -173,7 +173,7 @@ class OptimizationFunction(Function_Base): `GradientOptimization`) so that subclasses can be used interchangeably by OptimizationControlMechanism. - Subclasses with attributes that depend on one of the OptimizationFunction's parameters should implement the - `reset ` method, that calls super().reset(*args) and then + `reset ` method, that calls ``super().reset(*args)`` and then reassigns the values of the dependent attributes accordingly. If an argument is not needed for the subclass, `NotImplemented` should be passed as the argument's value in the call to super (i.e., the OptimizationFunction's diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index dc2d0e98a05..0d4c9247ec7 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -988,7 +988,7 @@ Projections will not be executed (see `Lazy Evaluation `), but its `function ` will be. - - If the `value ` of a Port is specified, *neither its `afferent Projections ` + - If the `value ` of a Port is specified, *neither* its `afferent Projections ` nor it `function ` will be executed. - If the `variable ` and/or `value ` is specified for *all* of the From 998fb0a0ec4b841c85b2baa606b7db9bd8bd6270 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 27 Apr 2024 00:50:16 -0400 Subject: [PATCH 116/410] docs: Fix external links Fixes 3 instances of: WARNING: Unknown target name: Signed-off-by: Jan Vesely --- psyneulink/core/components/component.py | 2 +- .../core/components/functions/stateful/integratorfunctions.py | 2 +- psyneulink/core/compositions/parameterestimationcomposition.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index dd74544bafb..5dceda790b4 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -113,7 +113,7 @@ .. note:: The size attribute serves a role similar to - `shape in Numpy`_, with the difference that + `shape in Numpy `_, with the difference that size permits the specification of `ragged arrays `_ -- that is, ones that have elements of varying lengths, such as [[1,2],[3,4,5]]. diff --git a/psyneulink/core/components/functions/stateful/integratorfunctions.py b/psyneulink/core/components/functions/stateful/integratorfunctions.py index 35388b54405..a87d14084d0 100644 --- a/psyneulink/core/components/functions/stateful/integratorfunctions.py +++ b/psyneulink/core/components/functions/stateful/integratorfunctions.py @@ -3913,7 +3913,7 @@ class FitzHughNagumoIntegrator(IntegratorFunction): # ------------------------- .. _FitzHughNagumoIntegrator: `function ` returns one time step of integration of the `Fitzhugh-Nagumo model - https://en.wikipedia.org/wiki/FitzHugh–Nagumo_model>`_ of an excitable oscillator: + `_ of an excitable oscillator: .. math:: time\\_constant_v \\frac{dv}{dt} = a_v * v^3 + (1 + threshold) * b_v * v^2 + (- threshold) * c_v * v^2 + diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index 0d7dc7590eb..10dd5cc20e0 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -141,7 +141,7 @@ Supported Optimizers -------------------- -- `DifferentialEvolution `_ Structure --------- From 0da29ca702c095ec97a9ee4ad587681e22553444 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 27 Apr 2024 01:15:04 -0400 Subject: [PATCH 117/410] docs: Fix inline interpreted markup Fixes 31 instances of: WARNING: Inline interpreted text or phrase reference start-string without end-string. Signed-off-by: Jan Vesely --- .../functions/nonstateful/optimizationfunctions.py | 4 ++-- .../functions/stateful/memoryfunctions.py | 13 ++++++------- psyneulink/core/components/mechanisms/mechanism.py | 8 ++++---- .../modulatory/control/gating/gatingmechanism.py | 4 ++-- .../control/optimizationcontrolmechanism.py | 2 +- .../modulatory/learning/learningmechanism.py | 2 +- .../ports/modulatorysignals/controlsignal.py | 2 +- .../ports/modulatorysignals/modulatorysignal.py | 8 ++++---- psyneulink/core/components/ports/outputport.py | 4 ++-- psyneulink/core/components/ports/port.py | 2 +- psyneulink/core/compositions/composition.py | 8 ++++---- .../compositions/parameterestimationcomposition.py | 6 +++--- psyneulink/core/globals/parameters.py | 2 +- .../learning/autoassociativelearningmechanism.py | 6 +++--- .../integrator/episodicmemorymechanism.py | 2 +- .../transfer/contrastivehebbianmechanism.py | 3 +-- .../processing/transfer/kohonenmechanism.py | 4 ++-- 17 files changed, 39 insertions(+), 41 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index e349a8edfe6..047033a548e 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -839,7 +839,7 @@ def _is_static(it:SampleIterator): return outcomes, num_evals def reset_grid(self): - """Reset iterators in `search_space """ + """Reset iterators in `search_space `""" for s in self.search_space: s.reset() self.grid = itertools.product(*[s for s in self.search_space]) @@ -1680,7 +1680,7 @@ def _validate_params(self, request_set, target_set=None, context=None): @handle_external_context(fallback_most_recent=True) def reset(self, search_space, context=None, **kwargs): - """Assign size of `search_space """ + """Assign size of `search_space `""" super(GridSearch, self).reset(search_space=search_space, context=context, **kwargs) sample_iterators = search_space owner_str = '' diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index c87886b5c52..4dbe571aba6 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -661,7 +661,7 @@ class ContentAddressableMemory(MemoryFunction): # ------------------------------ Since `memory ` was not intialized, the first call to the Function returns an array of zeros, formatted as specified in **defaul_variable**. However, the input in the call to the Function - (``[[1,2]]``) is stored as an entry in `memory `:: + (``[[1,2]]``) is stored as an entry in `memory `: >>> c.memory array([[[1., 2.]]]) @@ -833,7 +833,7 @@ class ContentAddressableMemory(MemoryFunction): # ------------------------------ noise : float, list, 2d array, or Function : default 0.0 specifies random value(s) added to `variable ` before storing in `memory `; if a list or 2d array, it must be the same shape as `variable - ContentAddressableMemory.variable>` (see `noise ` for details). + ContentAddressableMemory.variable>` (see `noise ` for details). initializer : 3d array or list : default None specifies an initial set of entries for `memory ` (see @@ -1028,8 +1028,7 @@ class ContentAddressableMemory(MemoryFunction): # ------------------------------ Returns ------- - entry from `memory ` that best matches `variable - ` : 2d array + entry from `memory ` that best matches `variable ` : 2d array if no retrieval occurs, an appropriately shaped zero-valued array is returned. """ @@ -1993,7 +1992,7 @@ class DictionaryMemory(MemoryFunction): # ------------------------------------- .. math:: variable[1] * rate + noise - If the number of entries exceeds `max_entries , the first (oldest) item in + If the number of entries exceeds `max_entries `, the first (oldest) item in memory is deleted. Arguments @@ -2012,11 +2011,11 @@ class DictionaryMemory(MemoryFunction): # ------------------------------------- rate : float, list, or array : default 1.0 specifies a value used to multiply key (first item of `variable `) before - storing in `memory ` (see `rate for details). + storing in `memory ` (see `rate ` for details). noise : float, list, array, or Function : default 0.0 specifies a random value added to key (first item of `variable `) before - storing in `memory ` (see `noise for details). + storing in `memory ` (see `noise ` for details). initializer : 3d array or list : default None specifies an initial set of entries for `memory `. It must be of the following diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index 0d4c9247ec7..5b00bcb10a8 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -258,7 +258,7 @@ `, that can be created using a constructor; a *function* is an attribute that contains a callable method belonging to a Function, and that is executed when the Component to which the Function belongs is executed. Functions are used to assign, store, and apply parameter values associated with their function (see `Function - for a more detailed explanation). + ` for a more detailed explanation). The parameters of a Mechanism's `function ` are attributes of its `function `, and can be accessed using standard "dot" notation for that object. For @@ -993,7 +993,7 @@ - If the `variable ` and/or `value ` is specified for *all* of the OutputPorts of a Mechanism, then it's function will not be executed, and the `value ` - will retain its previous value (again in accord with `Lazy Evaluation ), though its + will retain its previous value (again in accord with `Lazy Evaluation `), though its OutputPorts *will* be executed using the assigned values, and it's `execution_count ` and `num_executions ` attributes will be incremented (since the OutputPorts -- Components of the Mechanism -- executed). @@ -1012,7 +1012,7 @@ *MAPPING_PROJECTION_PARAMS*, *CONTROL_PROJECTION_PARAMS*, etc.). The sub-dictionary can contain specifications that apply to *all* Projections of that type and/or individual Projections. If the key of an entryis the name of a parameter of the Projection (or its `function `), the specified value applies to *all* Projections of that -type. Parameters for individual Projections are specified using the Projections or its `name +type. Parameters for individual Projections are specified using the Projections or its `name ` as the key, and a dictionary containing parameter specifications as its value. .. note:: @@ -2358,7 +2358,7 @@ def execute(self, .. technical_note:: Execution sequence: - * Handle initialization if `initialization_status is + * Handle initialization if `initialization_status ` is *ContextFlags.INITIALIZING* * Assign any `Port-specific runtime params <_Mechanism_Runtime_Port_and_Projection_Param_Specification>` to corresponding `runtime_params ` dict. diff --git a/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py index dc50a93ecc7..f76fbb90981 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py @@ -240,7 +240,7 @@ class GatingMechanism(ControlMechanism): modulation=MULTIPLICATIVE) Subclass of `ModulatoryMechanism ` that gates (modulates) the value(s) of one or more `Ports - `. See `Mechanism ` for additional arguments and attributes. + `. See `Mechanism ` for additional arguments and attributes. COMMENT: Description: @@ -284,7 +284,7 @@ class GatingMechanism(ControlMechanism): specifies the `InputPorts ` and/or `OutputPorts ` to be gated by the GatingMechanism; the number of items must equal the length of the **default_gating_allocation** argument; if a `Mechanism ` is specified, its `primary InputPort ` - is used (see `GatingMechanism_GatingSignals for details). + is used (see `GatingMechanism_GatingSignals` for details). modulation : str : MULTIPLICATIVE specifies the default form of modulation used by the GatingMechanism's `GatingSignals `, diff --git a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py index 53a0ef82b9e..2fbdeed01d0 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py @@ -1940,7 +1940,7 @@ def _validate_params(self, request_set, target_set=None, context=None): def _instantiate_input_ports(self, context=None): """Instantiate InputPorts for state_features (with state_feature_function if specified). - This instantiates the OptimizationControlMechanism's `state_input_ports; + This instantiates the OptimizationControlMechanism's `state_input_ports`; these are used to provide input to the agent_rep when its evaluate method is called The OptimizationControlMechanism's outcome_input_ports are instantiated by ControlMechanism._instantiate_input_ports in the call to super(). diff --git a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py index cbc36aef425..1b71f931247 100644 --- a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py @@ -843,7 +843,7 @@ class LearningMechanism(ModulatoryMechanism_Base): the values of the InputPorts to which the `covariates_sources ` project; passed to the LearningMechanism's `function ` as the *COVARIATES* item of its `variable `, and assigned as the `value ` of the LearningMechanism's - *COVARIATES* `InputPort `s. + *COVARIATES* `InputPorts `. error_sources : list[ComparatorMechanism or LearningMechanism] the Mechanism(s) that calculate the error signal(s) provided to the diff --git a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py index 2c3f401707b..648c746fb08 100644 --- a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py @@ -21,7 +21,7 @@ - `ControlSignal_Modulation` - `ControlSignal_Allocation_and_Intensity` - `ControlSignal_Costs` - * `ControlSignal_Execution`d + * `ControlSignal_Execution` * `ControlSignal_Examples` * `ControlSignal_Class_Reference` diff --git a/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py b/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py index 99ba05a591b..bdfafa0b7bc 100644 --- a/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py @@ -36,8 +36,8 @@ used to `modulate ` the `value ` of one or more `Ports ` by way of one or more `ModulatoryProjctions `. A ModulatorySignal modulates the value of a Port by modifying a parameter of that Port's `function `. There are three types of ModulatorySignals, each of which -is associated wth a particular type of `ModulatoryMechanism ` and `ModulatoryProjection -`, and modifies the value of different types of Ports, as summarized `below: +is associated with a particular type of `ModulatoryMechanism ` and `ModulatoryProjection +`, and modifies the value of different types of Ports, as summarized below: * `ControlSignal` takes the `allocation ` assigned to it by the `function ` @@ -47,8 +47,8 @@ Mechanism's `InputPorts ` or `OutputPorts ` (and thereby the `value ` of the corresponding Port). .. -* `GatingSignal` takes the `allocation ` assigned to it by the `function - ` of the `GatingMechanism` to which it belongs, and uses it to modulate the parameter +* `GatingSignal` takes the `allocation ` assigned to it by the + `function ` of the `GatingMechanism` to which it belongs, and uses it to modulate the parameter of the `function ` of an `InputPort` or `OutputPort` (and hence that Port's `value `). A GatingMechanism and GatingSignal can be thought of as implementing a form of control specialized for gating the input to and/or output of a Mechanism. diff --git a/psyneulink/core/components/ports/outputport.py b/psyneulink/core/components/ports/outputport.py index a8ec7bb3a92..97a492e0add 100644 --- a/psyneulink/core/components/ports/outputport.py +++ b/psyneulink/core/components/ports/outputport.py @@ -57,7 +57,7 @@ if the Mechanism is a `TERMINAL` Mechanism for that Process. Other configurations can also easily be specified using a Mechanism's **output_ports** argument (see `OutputPort_Specification` below). If it is created using its constructor, and a Mechanism is specified in the **owner** argument, it is automatically assigned to that Mechanism. -If its **owner** is not specified, `initialization is deferred. +If its **owner** is not specified, `initialization` is deferred. .. _OutputPort_Deferred_Initialization: @@ -1519,7 +1519,7 @@ class StandardOutputPorts(): ignoring any VARIABLE entries previously specified for individual OutputPorts; * list of ints -- assigns each int to an (OWNER_VALUE, int) entry of the corresponding OutputPort in - `output_port_dicts, ignoring any VARIABLE entries previously specified for individual OutputPorts; + `output_port_dicts`, ignoring any VARIABLE entries previously specified for individual OutputPorts; * None -- assigns `None` to VARIABLE entries for all OutputPorts for which it is not already specified. diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index 76e283a5974..162cdbe3e26 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -920,7 +920,7 @@ class Port_Base(Port): efferents : Optional[List[Projection]] list of outgoing Projections from the Port (i.e., for which is a `sender `; note: only `OutputPorts `, and members of its `ModulatoryProjection ` - subclass (`LearningProjection, ControlProjection and GatingProjection) have efferents; the list is empty for + subclass (`LearningProjection`, `ControlProjection` and `GatingProjection`) have efferents; the list is empty for InputPorts and ParameterPorts. function : TransferFunction : default determined by type diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 907093691fe..ecd6c53bfcf 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -135,7 +135,7 @@ values as the **projections** argument of that method. In general, this is not neded -- default Projections are created for Pathways and/or Nodes added to the Composition using the methods described above; however it can be useful for custom configurations, including the implementation of specific Projection `matrices - `. + `. .. _Composition_Controller_Arg: @@ -1000,7 +1000,7 @@ Add explanation of how learning_rate applies to Unsupervised forms of learning COMMENT The rate at which learning occurs in a `learning pathway ` is determined by the -`learning_rate Parameter of the `LearningMechanism(s) ` in that +`learning_rate ` Parameter of the `LearningMechanism(s) ` in that Pathway. If it is not specified, then the `default value ` for the LearningMechanism's `function ` is used, which is determined by the kind of learning in that Pathway. However, the learning_rate can be specified in several other ways, both at construction and/or execution. At construction, it can @@ -1104,7 +1104,7 @@ | |*Python* |`AutodiffComposition_LLVM`|`AutodiffComposition_PyTorch` | | | |(*Direct Compilation*) | | +====================+====================================+==========================+==============================+ - |execution_mode= |`ExecutionMode.Python` |`ExecutionMode.LLVMRun` |`ExecutionMode.PyTorch | + |execution_mode= |`ExecutionMode.Python` |`ExecutionMode.LLVMRun` |`ExecutionMode.PyTorch` | +--------------------+------------------------------------+--------------------------+------------------------------+ |`learn() | | | | |`|Python interpreted |LLVM compiled |PyTorch compiled | @@ -12542,7 +12542,7 @@ def get_input_format(self, Returns ------- - Either a dict formatted appropriately for assignment as the **inputs** argument of the Composition's `run() + Either a dict formatted appropriately for assignment as the **inputs** argument of the Composition's `run()` method (form = *DICT*, the default), or string showing the format required by the **inputs** argument ` (form = *TEXT*). diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index 10dd5cc20e0..dc8c2ff8032 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -69,7 +69,7 @@ `, the `values ` of which are used to evaluate the fit of the different combinations of `parameter ` values sampled. An important limitation of the PEC is that the `outcome_variables ` - must be a subset of the output ports of the `model `'s `terminal Mechanism. + must be a subset of the output ports of the `model `'s terminal Mechanism. * **optimization_function** - specifies the function used to search over the combinations of `parameter ` values to be estimated. This must be either an instance of @@ -266,7 +266,7 @@ class ParameterEstimationComposition(Composition): `, the `values ` of which are used to evaluate the fit of the different combinations of `parameter ` values sampled. An important limitation of the PEC is that the `outcome_variables ` - must be a subset of the output ports of the `model `'s `terminal Mechanism. + must be a subset of the output ports of the `model `'s terminal `Mechanism`. model : specifies an external `Composition` for which parameters are to be `fit to data @@ -299,7 +299,7 @@ class ParameterEstimationComposition(Composition): optimization_function : OptimizationFunction, function or method : default or MaximumLikelihood or GridSearch specifies the function used to search over the combinations of `parameter ` values to be estimated. This must be either an instance of - `PECOptimizationFunction` or a string name of one of the supported optimizers. + `PECOptimizationFunction` or a string name of one of the supported optimizers. num_estimates : int : default 1 specifies the number of estimates made for a each combination of `parameter ` diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index ca74b7e1071..47606f8b0c3 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -24,7 +24,7 @@ ``t.defaults.noise.defaults.noise``) - class defaults are accessible by ``t.class_defaults`` or ``TransferMechanism.defaults`` (e.g., - ``t.class_defaults.noise`` or `TransferMechanism.defaults.noise) + ``t.class_defaults.noise`` or `TransferMechanism.defaults.noise`) .. note:: ``t.defaults.noise`` is shorthand for ``t.parameters.noise.default_value``, and they both refer to the default diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py index 3d4b5d62962..24dc94d7403 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py @@ -76,9 +76,9 @@ --------- An AutoAssociativeLearningMechanism executes in the same manner as standard `LearningMechanism`, with two exceptions: -* 1) its execution can be enabled or disabled by setting the `learning_enabled - ` attribute of the `RecurrentTransferMechanism` with which it is - associated (identified in its `activity_source ` attribute). +* 1) its execution can be enabled or disabled by setting the `learning_enabled ` + attribute of the `RecurrentTransferMechanism` with which it is associated (identified in its + `activity_source ` attribute). * 2) it is executed during the `execution phase ` of the Composition's execution. Note that this is different from the behavior of supervised learning algorithms (such as `Reinforcement` and `BackPropagation`), that are executed during the `learning phase ` of a Composition's execution diff --git a/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py b/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py index 6d4279f5bf2..3f657fd86a0 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py @@ -87,7 +87,7 @@ .. hint:: Use **default_variable** rather than **memory** to specify the shape of memory but keep it empty until the first entry is stored; note, however, that since retrieval is executed before storage - (see `EpisodicMemoryMechanism_Execution ), the first execution will return an entry of zeros. + (see `EpisodicMemoryMechanism_Execution`), the first execution will return an entry of zeros. * **memory** -- specifies a set of entries to be stored in `memory `; it is passed to the constructor for the EpisodicMemoryMechanism's `function `) as its diff --git a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py index b1adee8216d..e4f94caf2d5 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py @@ -634,8 +634,7 @@ class ContrastiveHebbianMechanism(RecurrentTransferMechanism): condition for that phase is specified as *CONVERGENCE*. Compares the value of `current_activity ` with the previous `value `; result is - assigned as the value of `delta - . + assigned as the value of `delta `. minus_phase_termination_condition : CONVERGENCE or COUNT: default CONVERGENCE determines the type of condition used to terminate the `minus_phase ` of diff --git a/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py index 2fd224f0425..cf7182b5ea3 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py @@ -144,7 +144,7 @@ class KohonenMechanism(TransferMechanism): learning_function : LearningFunction, function or method : default Kohonen(distance_function=GUASSIAN) specifies function used by `learning_mechanism ` to update `matrix - ` of `learned_projection . + ` of `learned_projection `. Attributes @@ -179,7 +179,7 @@ class KohonenMechanism(TransferMechanism): learning_function : LearningFunction, function or method function used by `learning_mechanism ` to update `matrix - ` of `learned_projection . + ` of `learned_projection `. learning_mechanism : LearningMechanism created automatically if `learning is specified `, and used to train the From f91fe9a16866b26f96ae2bce79bd32250b5d4b89 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 27 Apr 2024 01:57:52 -0400 Subject: [PATCH 118/410] docs: Fix table vs. substitution conflict Fixes one instance of: WARNING: Inline substitution_reference start-string without end-string. Signed-off-by: Jan Vesely --- .../core/components/functions/stateful/integratorfunctions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/components/functions/stateful/integratorfunctions.py b/psyneulink/core/components/functions/stateful/integratorfunctions.py index a87d14084d0..a7dc2e4592f 100644 --- a/psyneulink/core/components/functions/stateful/integratorfunctions.py +++ b/psyneulink/core/components/functions/stateful/integratorfunctions.py @@ -4059,7 +4059,7 @@ class FitzHughNagumoIntegrator(IntegratorFunction): # ------------------------- +---------------------------------------+------------------------------------------------+----------------------------------------------+------------------------------------+---------------------------------------------------------------+ |**FitzHughNagumoIntegrator Parameter** |`threshold `|`variable `|`f_v `|`time_constant_v ` | +---------------------------------------+------------------------------------------------+----------------------------------------------+-------------------------+--------------------------------------------------------------------------+ - |**Gilzenrat Parameter** |a |:math:`f(X_1)` |:math:`w_{vX_1}` |:math:`T_{v}` | + |**Gilzenrat Parameter** |a | :math:`f(X_1)` | :math:`w_{vX_1}` | :math:`T_{v}` | +---------------------------------------+------------------------------------------------+----------------------------------------------+------------------------------------+---------------------------------------------------------------+ The following FitzHughNagumoIntegrator parameter values must be set in the equation for :math:`\\frac{dw}{dt}`: From c0f6407338b4f6687aa1179b793000dc9a13b5e6 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 18 Jul 2023 04:12:09 +0000 Subject: [PATCH 119/410] Composition: fix bug storing results Results array is stateful but was taken from the parameters object before a default execution_id is assigned. This resulted in a shared list between the None execution_id and the default. --- psyneulink/core/compositions/composition.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index ecd6c53bfcf..66fb4f7e7db 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -11128,10 +11128,6 @@ def run( self._set_up_animation(context) # SET UP EXECUTION ----------------------------------------------- - results = self.parameters.results._get(context) - if results is None: - results = [] - self.rich_diverted_reports = None self.recorded_reports = None @@ -11211,6 +11207,10 @@ def run( self._reset_stateful_functions_when_cache[node] = node.reset_stateful_function_when node.reset_stateful_function_when = reset_stateful_functions_when[node] + results = self.parameters.results._get(context) + if results is None: + results = [] + is_simulation = (context is not None and ContextFlags.SIMULATION_MODE in context.runmode) From 0a6da2d06c52b883cf05a175f4b296f997dcc7f0 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 7 Jun 2023 00:48:26 +0000 Subject: [PATCH 120/410] MDF: Condition: handle np.number and np.array types as arguments --- psyneulink/core/scheduling/condition.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/psyneulink/core/scheduling/condition.py b/psyneulink/core/scheduling/condition.py index 06b203d065d..bc617f5fa01 100644 --- a/psyneulink/core/scheduling/condition.py +++ b/psyneulink/core/scheduling/condition.py @@ -126,6 +126,11 @@ def _parse_condition_arg(arg): return parse_valid_identifier(arg.name) elif isinstance(arg, Condition): return arg.as_mdf_model() + elif ( + isinstance(arg, np.number) + or (isinstance(arg, np.ndarray) and arg.ndim == 0) + ): + return arg.item() elif arg is None or isinstance(arg, numbers.Number): return arg else: From a54977a84488027c9d4e2690f0a6b7e6f0615350 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 8 Jun 2023 04:06:46 +0000 Subject: [PATCH 121/410] MDF: handle parameters/args in list and dict format --- psyneulink/core/globals/mdf.py | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/psyneulink/core/globals/mdf.py b/psyneulink/core/globals/mdf.py index a422db02620..94e12ae8443 100644 --- a/psyneulink/core/globals/mdf.py +++ b/psyneulink/core/globals/mdf.py @@ -227,6 +227,19 @@ def _get_mdf_object(obj, cls_): return None +def _get_parameters_from_mdf_base_object(model, pnl_type): + model_params = getattr(model, pnl_type._model_spec_id_parameters) + + if isinstance(model_params, list): + parameters = {p.id: p.value for p in model_params} + elif isinstance(model_params, dict): + parameters = dict(model_params) + else: + parameters = {} + + return parameters + + def _parse_component_type(model_obj): def get_pnl_component_type(s): from psyneulink.core.components.component import ComponentsMeta @@ -543,30 +556,20 @@ def _generate_component_string( is_user_defined_function = False try: - parameters = dict(getattr(component_model, component_type._model_spec_id_parameters)) + parameters = _get_parameters_from_mdf_base_object(component_model, component_type) except AttributeError: is_user_defined_function = True - except TypeError: - parameters = {} if is_user_defined_function or component_type is UserDefinedFunction: custom_func = component_type component_type = UserDefinedFunction - try: - parameters = dict(getattr(component_model, component_type._model_spec_id_parameters)) - except TypeError: - parameters = {} + parameters = _get_parameters_from_mdf_base_object(component_model, component_type) parameters['custom_function'] = f'{custom_func}' try: del component_model.metadata['custom_function'] except KeyError: pass - try: - parameters.update(getattr(component_model, component_type._model_spec_id_parameters)) - except TypeError: - pass - try: # args in function dict parameters.update(component_model.function[list(component_model.function.keys())[0]]) From c09c19c0e63d85a62f9721f14780a6253497e72f Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 7 Jun 2023 03:30:05 +0000 Subject: [PATCH 122/410] Component: _get_current_parameter_value: pass modulated=True to _get --- psyneulink/core/components/component.py | 2 +- psyneulink/core/globals/parameters.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 5dceda790b4..139833eee1c 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -3433,7 +3433,7 @@ def _get_current_parameter_value(self, parameter, context=None): if 'Multiple ParameterPorts' in str(e): raise - return parameter._get(context) + return parameter._get(context, modulated=True) def _reset_runtime_parameters(self, context): if context.execution_id in self._runtime_params_reset: diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 47606f8b0c3..ed307c435a1 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -260,7 +260,9 @@ def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, co +------------------+---------------+--------------------------------------------+-----------------------------------------+ | getter | None |hook that allows overriding the retrieval of|kwargs self, owning_component, and | | | |values based on a supplied method |context will be passed in if your | -| | |(e.g. _output_port_variable_getter) |method uses them. | +| | |(e.g. _output_port_variable_getter) |method uses them. modulated=True will be | +| | | |passed in when modulated values are | +| | | |requested. | | | | |self: the Parameter calling the setter | | | | |owning_component: the Component to which | | | | | the Parameter belongs | From 5fb02e8d0a269f61a71c70618faa3e48af364d65 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 7 Jun 2023 03:34:19 +0000 Subject: [PATCH 123/410] Port: refactor is_modulated -> has_modulation this method actually wants the composition, so allow passing it directly instead of context --- psyneulink/core/components/component.py | 3 +-- psyneulink/core/components/functions/function.py | 10 ++++++---- psyneulink/core/components/ports/port.py | 8 +++++--- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 139833eee1c..e0d620d1181 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -4394,8 +4394,7 @@ def __repr__(self): @property def modulated(self): - # TODO: consider making this - # self._parameter.port.is_modulated(self._owner.most_recent_context) + # TODO: consider using self._parameter.port.has_modulation # because the port existing doesn't necessarily mean modulation # is actually happening if self._parameter.port is not None: diff --git a/psyneulink/core/components/functions/function.py b/psyneulink/core/components/functions/function.py index 9455414bdc7..3587651b346 100644 --- a/psyneulink/core/components/functions/function.py +++ b/psyneulink/core/components/functions/function.py @@ -351,15 +351,17 @@ def _seed_setter(value, owning_component, context): return int(value) -def _random_state_getter(self, owning_component, context): +def _random_state_getter(self, owning_component, context, modulated=False): seed_param = owning_component.parameters.seed try: - is_modulated = seed_param.port.is_modulated(context) + has_modulation = seed_param.port.has_modulation(context.composition) except AttributeError: - is_modulated = False + has_modulation = False - if is_modulated: + # 'has_modulation' indicates that seed has an active modulatory projection + # 'modulated' indicates that the modulated value is requested + if has_modulation and modulated: seed_value = [int(owning_component._get_current_parameter_value(seed_param, context))] else: seed_value = [int(seed_param._get(context=context))] diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index 162cdbe3e26..504f4bdee01 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -2324,10 +2324,12 @@ def full_name(self): def _assign_default_port_Name(self): return False - @handle_external_context() - def is_modulated(self, context): + def has_modulation(self, composition) -> bool: + """Returns True if this Port has an active incoming modulatory + projection in **composition** or False if it does not. + """ for ma in self.mod_afferents: - if self.afferents_info[ma].is_active_in_composition(context.composition): + if self.afferents_info[ma].is_active_in_composition(composition): return True return False From 83bc070b8a732ef6697b0ff815ed8c97047053dc Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 8 Jun 2023 04:33:31 +0000 Subject: [PATCH 124/410] DriftOnASphereIntegrator: fix initializer assignment use of _parse_initializer ignored any user-specified value and always set it to random values of length dimension-1. This should only occur when initializer is not specified by the user, and otherwise, the user's input should be validated against dimension. --- .../functions/stateful/integratorfunctions.py | 18 ++++++++---------- tests/functions/test_integrator.py | 4 ++-- 2 files changed, 10 insertions(+), 12 deletions(-) diff --git a/psyneulink/core/components/functions/stateful/integratorfunctions.py b/psyneulink/core/components/functions/stateful/integratorfunctions.py index a7dc2e4592f..9ed9deecd65 100644 --- a/psyneulink/core/components/functions/stateful/integratorfunctions.py +++ b/psyneulink/core/components/functions/stateful/integratorfunctions.py @@ -2985,6 +2985,11 @@ def _validate_dimension(self, dimension): if not isinstance(dimension, int) or dimension < 2: return 'dimension must be an integer >= 2' + def _parse_initializer(self, initializer): + if initializer is not None: + initializer = np.asarray(initializer) + return initializer + # FIX: THIS SEEMS DUPLICATIVE OF DriftOnASphereIntegrator._validate_params() (THOUGH THAT GETS CAUGHT EARLIER) def _validate_initializer(self, initializer): initializer_len = self.dimension.default_value - 1 @@ -2993,15 +2998,6 @@ def _validate_initializer(self, initializer): return f"'initializer' must be a list or 1d array of length {initializer_len} " \ f"(the value of the \'dimension\' parameter minus 1)" - def _parse_initializer(self, initializer): - """Assign initial value as array of random values of length dimension-1""" - initializer = np.array(initializer) - initializer_dim = self.dimension.default_value - 1 - if initializer.ndim != 1 or len(initializer) != initializer_dim: - initializer = np.random.random(initializer_dim) - self.initializer._set_default_value(initializer) - return initializer - def _parse_noise(self, noise): """Assign initial value as array of random values of length dimension-1""" if isinstance(noise, list): @@ -3085,7 +3081,9 @@ def _instantiate_attributes_before_function(self, function=None, context=None): """Need to override this to manage mismatch in dimensionality of initializer vs. variable""" if not self.parameters.initializer._user_specified: - self._initialize_previous_value(self.parameters.initializer.get(context), context) + expected_initializer_dim = self.parameters.dimension._get(context) - 1 + initializer = np.random.random(expected_initializer_dim) + self._initialize_previous_value(initializer, context) # Remove initializer from self.initializers to manage mismatch in dimensionality of initializer vs. variable initializers = list(self.initializers) diff --git a/tests/functions/test_integrator.py b/tests/functions/test_integrator.py index 9c12d407aaa..42381edb0de 100644 --- a/tests/functions/test_integrator.py +++ b/tests/functions/test_integrator.py @@ -238,8 +238,8 @@ def test_integrator_function_with_default_variable_and_params_of_different_lengt err_msg_noise = "must be a list or 1d array of length 3 (the value of the 'dimension' parameter minus 1)" test_vars = [ - ({'initializer': 0.1}, err_msg_initializer, FunctionError), - ({'initializer': [0.1,0.1]}, err_msg_initializer, FunctionError), + ({'initializer': 0.1}, err_msg_initializer, ParameterError), + ({'initializer': [0.1, 0.1]}, err_msg_initializer, ParameterError), ({'initializer': [0.1,0.1,0.1]}, None, None), ({'angle_function': Angle}, None, None), ({'angle_function': Angle()}, None, None), From 9e4863083b67ac8a8431e55eb5086a8d2fb1718a Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Sat, 10 Jun 2023 02:59:03 +0000 Subject: [PATCH 125/410] CombineMeans: avoid creating unecessary object dtype for means constructing means array elementwise with None as initial shape results in object dtype even if all computed elements are numeric this resulted in 1d mechanism value due to bypass of convert_output_type --- .../functions/nonstateful/combinationfunctions.py | 6 ++---- tests/mechanisms/test_processing_mechanism.py | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py index 7b93330b209..93decb4ecde 100644 --- a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py @@ -49,7 +49,7 @@ CROSS_ENTROPY, DEFAULT_VARIABLE, EXPONENTS, LINEAR_COMBINATION_FUNCTION, MULTIPLICATIVE_PARAM, OFFSET, OPERATION, \ PREDICTION_ERROR_DELTA_FUNCTION, PRODUCT, REARRANGE_FUNCTION, REDUCE_FUNCTION, SCALE, SUM, WEIGHTS, \ PREFERENCE_SET_NAME -from psyneulink.core.globals.utilities import convert_to_np_array, is_numeric, np_array_less_than_2d, ValidParamSpecType +from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, convert_to_np_array, is_numeric, np_array_less_than_2d, ValidParamSpecType from psyneulink.core.globals.context import ContextFlags, handle_external_context from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import \ @@ -1972,9 +1972,7 @@ def _function(self, # if np_array_less_than_2d(variable): # return (variable * scale) + offset - means = np.array([[None]] * len(variable)) - for i, item in enumerate(variable): - means[i] = np.mean(item) + means = convert_all_elements_to_np_array([np.mean(item) for item in variable]) # FIX FOR EFFICIENCY: CHANGE THIS AND WEIGHTS TO TRY/EXCEPT // OR IS IT EVEN NECESSARY, GIVEN VALIDATION ABOVE?? # Apply exponents if they were specified diff --git a/tests/mechanisms/test_processing_mechanism.py b/tests/mechanisms/test_processing_mechanism.py index 8436fd3bf10..d1d154c33e8 100644 --- a/tests/mechanisms/test_processing_mechanism.py +++ b/tests/mechanisms/test_processing_mechanism.py @@ -63,7 +63,7 @@ def test_processing_mechanism_linear_function(self): "function,expected", [ (LinearCombination, [[1.]]), (Reduce, [[1.]]), - (CombineMeans, [1.0]), + (CombineMeans, [[1.0]]), (Exponential, [[2.71828183]]), (Logistic, [[0.73105858]]), (SoftMax, [[1, ]]), From 4901bce3c6df784938764878816f0fd29591b5a6 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 2 Jun 2023 05:12:01 +0000 Subject: [PATCH 126/410] PECOptimizationFunction: make initial_seed shared and use context --- .../functions/nonstateful/fitfunctions.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index abc945816bf..9a5ba78232f 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -7,13 +7,13 @@ from beartype import beartype from psyneulink.core.globals import SampleIterator -from psyneulink.core.globals.context import ContextFlags, handle_external_context +from psyneulink.core.globals.context import Context, ContextFlags, handle_external_context from psyneulink.core.components.functions.nonstateful.optimizationfunctions import ( OptimizationFunction, OptimizationFunctionError, SEARCH_SPACE, ) -from psyneulink.core.globals.parameters import check_user_specified +from psyneulink.core.globals.parameters import SharedParameter, check_user_specified from psyneulink._typing import ( Dict, @@ -288,6 +288,8 @@ class PECOptimizationFunction(OptimizationFunction): """ + class Parameters(OptimizationFunction.Parameters): + initial_seed = SharedParameter(attribute_name='owner') @check_user_specified @beartype @@ -506,7 +508,7 @@ def _function(self, variable=None, context=None, params=None, **kwargs): f = self._make_objective_func(context=context) # Run the MLE optimization - results = self._fit(obj_func=f) + results = self._fit(obj_func=f, context=context) # Get the optimal function value and sample optimal_value = results["optimal_value"] @@ -538,9 +540,10 @@ def _fit( self, obj_func: Callable, display_iter: bool = True, + context: Context = None, ): if self.method == "differential_evolution": - return self._fit_differential_evolution(obj_func, display_iter) + return self._fit_differential_evolution(obj_func, display_iter, context) elif isinstance(self.method, optuna.samplers.BaseSampler): return self._fit_optuna( obj_func=obj_func, opt_func=self.method, display_iter=display_iter @@ -631,6 +634,7 @@ def _fit_differential_evolution( self, obj_func: Callable, display_iter: bool = True, + context: Context = None, ): """ Implementation of search using scipy's differential_evolution algorithm. @@ -643,7 +647,7 @@ def _fit_differential_evolution( # Get a seed to pass to scipy for its search. Make this dependent on the seed of the # OCM - seed_for_scipy = self.owner.initial_seed + seed_for_scipy = self._get_current_parameter_value('initial_seed', context) direction = 1 if self.direction == "minimize" else -1 From 5e972ef455ac39de9e88606941a38a74d181bc01 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 2 Jun 2023 05:15:36 +0000 Subject: [PATCH 127/410] ParameterEstimationComposition: make seed parameters shared --- .../parameterestimationcomposition.py | 50 ++----------------- 1 file changed, 3 insertions(+), 47 deletions(-) diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index dc8c2ff8032..976dd8c650b 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -186,7 +186,7 @@ handle_external_context, ) from psyneulink.core.globals.keywords import BEFORE, OVERRIDE -from psyneulink.core.globals.parameters import Parameter, check_user_specified +from psyneulink.core.globals.parameters import Parameter, SharedParameter, check_user_specified from psyneulink.core.globals.utilities import convert_to_list from psyneulink.core.scheduling.time import TimeScale from psyneulink.core.components.ports.outputport import OutputPort @@ -209,38 +209,6 @@ class ParameterEstimationCompositionError(CompositionError): pass -def _initial_seed_getter(owning_component, context=None): - try: - return owning_component.controller.parameters.initial_seed._get(context) - except AttributeError: - return None - - -def _initial_seed_setter(value, owning_component, context=None): - owning_component.controller.parameters.initial_seed.set(value, context) - return value - - -def _same_seed_for_all_parameter_combinations_getter(owning_component, context=None): - try: - return ( - owning_component.controller.parameters.same_seed_for_all_allocations._get( - context - ) - ) - except AttributeError: - return None - - -def _same_seed_for_all_parameter_combinations_setter( - value, owning_component, context=None -): - owning_component.controler.parameters.same_seed_for_all_allocations.set( - value, context - ) - return value - - class ParameterEstimationComposition(Composition): """ Subclass of `Composition` that estimates specified parameters either to fit the results of a Composition @@ -473,20 +441,8 @@ class Parameters(Composition.Parameters): """ # FIX: 11/32/21 CORRECT INITIAlIZATIONS? - initial_seed = Parameter( - None, - loggable=False, - pnl_internal=True, - getter=_initial_seed_getter, - setter=_initial_seed_setter, - ) - same_seed_for_all_parameter_combinations = Parameter( - False, - loggable=False, - pnl_internal=True, - getter=_same_seed_for_all_parameter_combinations_getter, - setter=_same_seed_for_all_parameter_combinations_setter, - ) + initial_seed = SharedParameter(attribute_name='controller') + same_seed_for_all_parameter_combinations = SharedParameter(attribute_name='controller') @handle_external_context() @check_user_specified From 5b95cf8bb49db00f66d3dabcc29f39c0b64aefdc Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 9 Jun 2023 02:29:32 +0000 Subject: [PATCH 128/410] DictionaryMemory: consolidate default entry creation --- .../functions/stateful/memoryfunctions.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index 4dbe571aba6..877d5faa1b2 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -2563,6 +2563,11 @@ def _validate(self, context=None): raise FunctionError(f'Value returned by {repr(SELECTION_FUNCTION)} specified for {self.__class__} ' f'({result}) must return an array of the same length it receives') + def _get_default_entry(self, context): + key = [0] * self.parameters.key_size._get(context) + val = [0] * self.parameters.val_size._get(context) + return [key, val] + def _initialize_previous_value(self, initializer, context=None): """Ensure that initializer is appropriate for assignment as memory attribute and assign as previous_value @@ -2691,8 +2696,7 @@ def _function(self, # QUESTION: SHOULD IT RETURN 0's VECTOR OR NOT RETRIEVE AT ALL (LEAVING VALUE & OutputPort FROM LAST TRIAL)? # CURRENT PROBLEM WITH LATTER IS THAT IT CAUSES CRASH ON INIT, SINCE NOT OUTPUT_PORT # SO, WOULD HAVE TO RETURN ZEROS ON INIT AND THEN SUPPRESS AFTERWARDS, AS MOCKED UP BELOW - memory = [[0]* self.parameters.key_size._get(context), [0]* self.parameters.val_size._get(context)] - + memory = self._get_default_entry(context) # Store variable to dict: rate = self._get_current_parameter_value(RATE, context) if rate is not None: @@ -2759,11 +2763,7 @@ def get_memory(self, query_key:Union[list, np.ndarray], context=None): # if no memory, return the zero vector if len(_memory[KEYS]) == 0: - # zeros_key = [0] * self.parameters.key_size.get(context) - # zeros_val = [0] * self.parameters.val_size.get(context) - zeros_key = [0] * self.parameters.key_size.get(context) - zeros_val = [0] * self.parameters.val_size.get(context) - return [zeros_key, zeros_val] + return self._get_default_entry(context) # Get distances between query_key and all keys in memory distances = [self.distance_function([query_key, list(m)]) for m in _memory[KEYS]] @@ -2784,8 +2784,7 @@ def get_memory(self, query_key:Union[list, np.ndarray], context=None): for other in indices_of_selected_items[1:])): warnings.warn(f'More than one item matched key ({query_key}) in memory for {self.name} of ' f'{self.owner.name} even though {repr("duplicate_keys")} is False') - return [[0]* self.parameters.key_size._get(context), - [0]* self.parameters.val_size._get(context)] + return self._get_default_entry(context) if self.equidistant_keys_select == RANDOM: random_state = self._get_current_parameter_value('random_state', context) index_of_selected_item = random_state.choice(indices_of_selected_items) From bc5f12e8cf44a00fbe7f95ff0bc5b9f5374b22c5 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 25 May 2023 03:47:09 +0000 Subject: [PATCH 129/410] DriftOnASphereIntegrator: correct default initializer --- .../core/components/functions/stateful/integratorfunctions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/components/functions/stateful/integratorfunctions.py b/psyneulink/core/components/functions/stateful/integratorfunctions.py index 9ed9deecd65..9d1b2b52954 100644 --- a/psyneulink/core/components/functions/stateful/integratorfunctions.py +++ b/psyneulink/core/components/functions/stateful/integratorfunctions.py @@ -2969,7 +2969,7 @@ class Parameters(IntegratorFunction.Parameters): time_step_size = Parameter(1.0, modulable=True) previous_time = Parameter(0.0, initializer='starting_point', pnl_internal=True) dimension = Parameter(3, stateful=False, read_only=True) - initializer = Parameter([0], initalizer='variable', dependencies=dimension, stateful=True) + initializer = Parameter([0, 0], initalizer='variable', dependencies=dimension, stateful=True) angle_function = Parameter(None, stateful=False, loggable=False) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) From b3ec692f79fcfb531649133ca5be728a00ff4c0d Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 25 May 2023 04:35:44 +0000 Subject: [PATCH 130/410] OptimizationControlMechanism: correct non-stateful use of num_estimates --- .../control/optimizationcontrolmechanism.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py index 2fbdeed01d0..0b1325b02c6 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py @@ -2953,9 +2953,11 @@ def _set_mechanism_value(self, context): def _create_randomization_control_signal(self, context): - if self.num_estimates: + num_estimates = self.parameters.num_estimates._get(context) + + if num_estimates: # must be SampleSpec in allocation_samples arg - randomization_seed_mod_values = SampleSpec(start=1, stop=self.num_estimates, step=1) + randomization_seed_mod_values = SampleSpec(start=1, stop=num_estimates, step=1) # FIX: 11/3/21 noise PARAM OF TransferMechanism IS MARKED AS SEED WHEN ASSIGNED A DISTRIBUTION FUNCTION, # BUT IT HAS NO PARAMETER PORT BECAUSE THAT PRESUMABLY IS FOR THE INTEGRATOR FUNCTION, @@ -2968,10 +2970,10 @@ def _create_randomization_control_signal(self, context): self.random_variables = self.agent_rep.random_variables if not self.random_variables: - warnings.warn(f"'{self.name}' has '{NUM_ESTIMATES} = {self.num_estimates}' specified, " + warnings.warn(f"'{self.name}' has '{NUM_ESTIMATES} = {num_estimates}' specified, " f"but its '{AGENT_REP}' ('{self.agent_rep.name}') has no random variables: " f"'{RANDOMIZATION_CONTROL_SIGNAL}' will not be created, and num_estimates set to None.") - self.num_estimates = None + self.parameters.num_estimates._set(None, context) return randomization_control_signal = ControlSignal(name=RANDOMIZATION_CONTROL_SIGNAL, @@ -2981,7 +2983,7 @@ def _create_randomization_control_signal(self, context): modulation=OVERRIDE, cost_options=CostFunctions.NONE, # FIXME: Hack that Jan found to prevent some LLVM runtime errors - default_allocation=[self.num_estimates]) + default_allocation=[num_estimates]) randomization_control_signal = self._instantiate_control_signal(randomization_control_signal, context) randomization_control_signal_index = len(self.output_ports) randomization_control_signal._variable_spec = (OWNER_VALUE, randomization_control_signal_index) @@ -2990,9 +2992,9 @@ def _create_randomization_control_signal(self, context): # Otherwise, assert that num_estimates and number of seeds generated by randomization_control_signal are equal num_seeds = self.control_signals[RANDOMIZATION_CONTROL_SIGNAL].parameters.allocation_samples._get(context).num - assert self.num_estimates == num_seeds, \ + assert num_estimates == num_seeds, \ f"PROGRAM ERROR: The value of the {NUM_ESTIMATES} Parameter of {self.name}" \ - f"({self.num_estimates}) is not equal to the number of estimates that will be generated by " \ + f"({num_estimates}) is not equal to the number of estimates that will be generated by " \ f"its {RANDOMIZATION_CONTROL_SIGNAL} ControlSignal ({num_seeds})." function_search_space = self.function.parameters.search_space._get(context) From fe52e2d5b9171ff733baebc3d20bd49a2636d40d Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 8 Jun 2023 02:06:56 +0000 Subject: [PATCH 131/410] Function_Base: don't catch and re-raise ValueError --- psyneulink/core/components/functions/function.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/psyneulink/core/components/functions/function.py b/psyneulink/core/components/functions/function.py index 3587651b346..04c1fba5f9c 100644 --- a/psyneulink/core/components/functions/function.py +++ b/psyneulink/core/components/functions/function.py @@ -706,14 +706,9 @@ def function(self, target_set=target_set, ) # Execute function - try: - value = self._function(variable=variable, - context=context, - params=params, - **kwargs) - except ValueError as err: - err_msg = f"Problem with '{self}' in '{self.owner.name if self.owner else self.__class__.__name__}': {err}" - raise FunctionError(err_msg) from err + value = self._function( + variable=variable, context=context, params=params, **kwargs + ) self.most_recent_context = context self.parameters.value._set(value, context=context) self._reset_runtime_parameters(context) From 627f3eb62f57a6b08701006b4bdc2d79f9b6ac2c Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Mon, 18 Dec 2023 23:33:17 +0000 Subject: [PATCH 132/410] EMStorageMechanism: add 'fields' as Parameter other Parameters reference it as a dependency, and it is needed for replicating the mechanism --- .../mechanisms/modulatory/learning/EMstoragemechanism.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py index a7f8107669f..87d046c8839 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py @@ -503,6 +503,9 @@ class Parameters(LearningMechanism.Parameters): parse_spec=True, constructor_argument='fields', ) + fields = Parameter( + [], stateful=False, loggable=False, read_only=True, structural=True + ) field_types = Parameter([],stateful=False, loggable=False, read_only=True, From d1d7eaf93ee6eba0d5d67ac85397f5b8fd763ca5 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 10 Mar 2021 18:27:36 -0500 Subject: [PATCH 133/410] utilities: convert_all_elements_to_np_array: fix crash on arr containing 0 dim arr --- psyneulink/core/globals/utilities.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index eeb9b69e5dc..b1cab8ff725 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -1621,10 +1621,10 @@ def convert_all_elements_to_np_array(arr, cast_from=None, cast_to=None): ------- a numpy array containing the converted **arr** """ - if isinstance(arr, np.ndarray) and arr.dtype != object: + if isinstance(arr, np.ndarray): if cast_from is not None and arr.dtype == cast_from: return np.asarray(arr, dtype=cast_to) - else: + elif arr.ndim == 0 or arr.dtype != object: return arr if cast_from is not None and isinstance(arr, cast_from): From e5a5e0f4e667fd3b7e54a33390a280a060fbdf68 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 11 Mar 2021 23:17:03 -0500 Subject: [PATCH 134/410] utilities: convert_all_elements_to_np_array: create fewer unnecessary internal arrays --- psyneulink/core/globals/utilities.py | 53 ++++++++++++++++++---------- 1 file changed, 35 insertions(+), 18 deletions(-) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index b1cab8ff725..7cb7c828ff2 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -1621,30 +1621,41 @@ def convert_all_elements_to_np_array(arr, cast_from=None, cast_to=None): ------- a numpy array containing the converted **arr** """ - if isinstance(arr, np.ndarray): - if cast_from is not None and arr.dtype == cast_from: - return np.asarray(arr, dtype=cast_to) - elif arr.ndim == 0 or arr.dtype != object: - return arr + def recurse(arr): + if isinstance(arr, np.ndarray): + if cast_from is not None and arr.dtype == cast_from: + return np.asarray(arr, dtype=cast_to) + elif arr.ndim == 0 or arr.dtype != object: + return arr - if cast_from is not None and isinstance(arr, cast_from): - return np.asarray(arr, dtype=cast_to) + if isinstance(arr, np.number): + return np.asarray(arr) - if not isinstance(arr, collections.abc.Iterable) or isinstance(arr, str): - return np.array(arr) + if cast_from is not None and isinstance(arr, cast_from): + return cast_to(arr) - if isinstance(arr, np.matrix): - if arr.dtype == object: - return np.asarray([convert_all_elements_to_np_array(arr.item(i), cast_from, cast_to) for i in range(arr.size)]) - else: + if not isinstance(arr, collections.abc.Iterable) or isinstance(arr, str): return arr - subarr = [convert_all_elements_to_np_array(x, cast_from, cast_to) for x in arr] + if isinstance(arr, np.matrix): + if arr.dtype == object: + return np.asarray([recurse(arr.item(i)) for i in range(arr.size)]) + else: + return arr + + subarr = [recurse(x) for x in arr] + + with warnings.catch_warnings(): + warnings.filterwarnings('error', message='.*ragged.*', category=np.VisibleDeprecationWarning) + try: + # the elements are all uniform in shape, so we can use numpy's standard behavior + return np.asarray(subarr) + except np.VisibleDeprecationWarning: + pass + except ValueError as e: + if 'The requested array has an inhomogeneous shape' not in str(e): + raise - if all([subarr[i].shape == subarr[0].shape for i in range(1, len(subarr))]): - # the elements are all uniform in shape, so we can use numpy's standard behavior - return np.asarray(subarr) - else: # the elements are nonuniform, so create an array that just wraps them individually # numpy cannot easily create arrays with subarrays of certain dimensions, workaround here # https://stackoverflow.com/q/26885508/3131666 @@ -1655,6 +1666,12 @@ def convert_all_elements_to_np_array(arr, cast_from=None, cast_to=None): return elementwise_subarr + if not isinstance(arr, collections.abc.Iterable) or isinstance(arr, str): + # only wrap a noniterable if it's the outermost value + return np.asarray(arr) + else: + return recurse(arr) + # Seeds and randomness class SeededRandomState(np.random.RandomState): From ac0a1dbe4600a10ba2357cd16f90793a167936d4 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Mon, 15 Mar 2021 22:28:40 -0400 Subject: [PATCH 135/410] utilities: add helper functions using numpy array_is_scalar - returns True if an ndarray represents a single scalar value try_extract_0d_array_item - returns an ndarray's single scalar value --- psyneulink/core/globals/utilities.py | 32 +++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index 7cb7c828ff2..333339e0b40 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -150,7 +150,7 @@ 'scalar_distance', 'sinusoid', 'tensor_power', 'TEST_CONDTION', 'type_match', 'underscore_to_camelCase', 'UtilitiesError', 'unproxy_weakproxy', 'create_union_set', 'merge_dictionaries', - 'contains_type' + 'contains_type', 'is_numeric_scalar', 'try_extract_0d_array_item', ] logger = logging.getLogger(__name__) @@ -2115,3 +2115,33 @@ def _generated_toposort_key(obj): return -1 return _generated_toposort_key + + +# np.isscalar returns true on non-numeric items +def is_numeric_scalar(obj) -> bool: + """ + Returns: + True if **obj** is a numbers.Number or a numpy ndarray + containing a single numeric value + False otherwise + """ + + try: + # getting .item() and checking type is significantly slower + return obj.ndim == 0 and obj.dtype.kind in {'i', 'f'} + except (AttributeError, ValueError): + return isinstance(obj, Number) + + +def try_extract_0d_array_item(arr: np.ndarray): + """ + Returns: + the single item in **arr** if **arr** is a 0-dimensional + numpy ndarray, otherwise **arr** + """ + try: + if arr.ndim == 0: + return arr.item() + except AttributeError: + pass + return arr From c64237b49261baae98176a66be96366f8d3c5a9d Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 25 Feb 2021 19:03:53 -0500 Subject: [PATCH 136/410] functions: return numpy array as output for multi-item functions --- .../functions/nonstateful/distributionfunctions.py | 10 ++++++---- .../functions/nonstateful/transferfunctions.py | 3 ++- .../functions/stateful/integratorfunctions.py | 2 +- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py index 96ae2c45292..d6ad016071d 100644 --- a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py @@ -39,7 +39,7 @@ ADDITIVE_PARAM, DIST_FUNCTION_TYPE, BETA, DIST_MEAN, DIST_SHAPE, DRIFT_DIFFUSION_ANALYTICAL_FUNCTION, \ EXPONENTIAL_DIST_FUNCTION, GAMMA_DIST_FUNCTION, HIGH, LOW, MULTIPLICATIVE_PARAM, NOISE, NORMAL_DIST_FUNCTION, \ SCALE, STANDARD_DEVIATION, THRESHOLD, UNIFORM_DIST_FUNCTION, WALD_DIST_FUNCTION -from psyneulink.core.globals.utilities import convert_to_np_array, ValidParamSpecType +from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, convert_to_np_array, ValidParamSpecType from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.parameters import Parameter, check_user_specified @@ -1321,9 +1321,11 @@ def _function(self, # Compute moments (mean, variance, skew) of condiational response time distributions moments = DriftDiffusionAnalytical._compute_conditional_rt_moments(drift_rate, noise, threshold, bias, non_decision_time) - return rt, er, \ - moments['mean_rt_plus'], moments['var_rt_plus'], moments['skew_rt_plus'], \ - moments['mean_rt_minus'], moments['var_rt_minus'], moments['skew_rt_minus'] + return convert_all_elements_to_np_array([ + rt, er, + moments['mean_rt_plus'], moments['var_rt_plus'], moments['skew_rt_plus'], + moments['mean_rt_minus'], moments['var_rt_minus'], moments['skew_rt_minus'] + ]) @staticmethod def _compute_conditional_rt_moments(drift_rate, noise, threshold, starting_value, non_decision_time): diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index beead33cb8b..4a7c86ebe41 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -101,7 +101,7 @@ FunctionParameter, Parameter, get_validator_by_function, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import \ REPORT_OUTPUT_PREF, PreferenceEntry, PreferenceLevel, ValidPrefSet -from psyneulink.core.globals.utilities import ValidParamSpecType, safe_len, is_matrix_keyword +from psyneulink.core.globals.utilities import ValidParamSpecType, convert_all_elements_to_np_array, safe_len, is_matrix_keyword __all__ = ['Angle', 'BinomialDistort', 'Dropout', 'Exponential', 'Gaussian', 'GaussianDistort', 'Identity', 'Linear', 'LinearMatrix', 'Logistic', 'ReLU', 'SoftMax', 'Tanh', 'TransferFunction', 'TransferWithCosts' @@ -3159,6 +3159,7 @@ def _function(self, output = [] for item in variable: output.append(self.apply_softmax(item, gain, output_type)) + output = convert_all_elements_to_np_array(output) else: output = self.apply_softmax(variable, gain, output_type) diff --git a/psyneulink/core/components/functions/stateful/integratorfunctions.py b/psyneulink/core/components/functions/stateful/integratorfunctions.py index 9d1b2b52954..586a4261e4e 100644 --- a/psyneulink/core/components/functions/stateful/integratorfunctions.py +++ b/psyneulink/core/components/functions/stateful/integratorfunctions.py @@ -4859,7 +4859,7 @@ def _function(self, self.parameters.previous_w._set(previous_w, context) self.parameters.previous_time._set(previous_time, context) - return previous_v, previous_w, previous_time + return convert_all_elements_to_np_array([previous_v, previous_w, previous_time]) def reset(self, previous_v=None, previous_w=None, previous_time=None, context=None): return super().reset( From b41ad7f7b5ac7fbb75a0dd228756818a19342087 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 25 Feb 2021 18:52:52 -0500 Subject: [PATCH 137/410] treewide: convert all external numeric parameter values into ndarray simple scalars are wrapped as 0-dimensional arrays internally, and returned as scalars via user-facing methods (like Parameter.get and dot-notation) xfail tests/composition/test_control.py::TestControlMechanisms::test_lvoc_features_function due to current incompatibility of operations between tensor and numpy array --- psyneulink/core/components/component.py | 40 ++++--- .../core/components/functions/function.py | 14 +-- .../nonstateful/combinationfunctions.py | 42 ++++---- .../functions/nonstateful/fitfunctions.py | 2 + .../nonstateful/learningfunctions.py | 24 ++--- .../nonstateful/objectivefunctions.py | 9 +- .../nonstateful/transferfunctions.py | 4 +- .../functions/stateful/integratorfunctions.py | 31 +++--- .../functions/stateful/memoryfunctions.py | 33 +++--- .../functions/stateful/statefulfunction.py | 17 +-- .../core/components/mechanisms/mechanism.py | 21 +++- .../control/optimizationcontrolmechanism.py | 13 +-- .../processing/transfermechanism.py | 41 +++---- psyneulink/core/components/ports/inputport.py | 6 +- .../ports/modulatorysignals/controlsignal.py | 7 +- psyneulink/core/components/ports/port.py | 4 +- psyneulink/core/compositions/composition.py | 6 +- psyneulink/core/globals/mdf.py | 5 +- psyneulink/core/globals/parameters.py | 53 +++++++-- psyneulink/core/globals/sampleiterator.py | 4 +- psyneulink/core/globals/utilities.py | 8 +- .../modulatory/learning/EMstoragemechanism.py | 4 +- .../integrator/episodicmemorymechanism.py | 4 +- .../processing/transfer/kwtamechanism.py | 23 ++-- .../transfer/recurrenttransfermechanism.py | 8 +- .../pathway/autoassociativeprojection.py | 7 +- .../pathway/maskedmappingprojection.py | 5 +- .../compositions/autodiffcomposition.py | 3 +- .../library/compositions/emcomposition.py | 13 ++- tests/composition/test_control.py | 24 +++-- tests/functions/test_buffer.py | 2 +- tests/functions/test_distance.py | 2 +- tests/functions/test_memory.py | 102 ++++++++++-------- tests/functions/test_stability.py | 2 +- tests/mechanisms/test_transfer_mechanism.py | 2 +- tests/misc/test_parameters.py | 3 +- 36 files changed, 344 insertions(+), 244 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index e0d620d1181..e190c91313d 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -539,7 +539,7 @@ from psyneulink.core.globals.utilities import \ ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, \ is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, \ - get_all_explicit_arguments, call_with_pruned_args, safe_equals, safe_len, parse_valid_identifier + get_all_explicit_arguments, is_numeric, call_with_pruned_args, safe_equals, safe_len, parse_valid_identifier, try_extract_0d_array_item from psyneulink.core.scheduling.condition import Never from psyneulink.core.scheduling.time import Time, TimeScale @@ -1161,7 +1161,7 @@ def __init__(self, # Validate the set passed in self._instantiate_defaults(variable=default_variable, - request_set=parameter_values, # requested set + request_set={k: v for (k, v) in self.defaults.values().items() if k in parameter_values}, # requested set assign_missing=True, # assign missing params from classPreferences to instanceDefaults target_set=self.defaults.values(), # destination set to which params are being assigned default_set=self.class_defaults.values(), # source set from which missing params are assigned @@ -1967,7 +1967,12 @@ def generate_error(param_name): self._runtime_params_reset[context.execution_id] = {} self._runtime_params_reset[context.execution_id][param_name] = getattr(self.parameters, param_name)._get(context) - self._set_parameter_value(param_name, runtime_params[param_name], context) + if is_numeric(runtime_params[param_name]): + runtime_value = convert_all_elements_to_np_array(runtime_params[param_name]) + else: + runtime_value = runtime_params[param_name] + + self._set_parameter_value(param_name, runtime_value, context) # Any remaining params should either belong to the Component's function # or, if the Component is a Function, to it or its owner elif ( # If Component is not a function, and its function doesn't have the parameter or @@ -2304,7 +2309,7 @@ def _initialize_parameters(self, context=None, **param_defaults): shared_types=shared_types ) parameter_obj = getattr(self.parameters, k) - parameter_obj._set_default_value(defaults[k]) + parameter_obj._set_default_value(defaults[k], check_scalar=parameter_obj._user_specified) for p in filter(lambda x: not isinstance(x, (ParameterAlias, SharedParameter)), self.parameters._in_dependency_order): # copy spec so it is not overwritten later @@ -3247,6 +3252,7 @@ def _instantiate_value(self, context=None): def _update_default_variable(self, new_default_variable, context=None): from psyneulink.core.components.shellclasses import Function + new_default_variable = convert_all_elements_to_np_array(new_default_variable) self.defaults.variable = copy.deepcopy(new_default_variable) # exclude value from validation because it isn't updated until @@ -3446,16 +3452,23 @@ def _reset_runtime_parameters(self, context): self._runtime_params_reset[context.execution_id] = {} def _try_execute_param(self, param, var, context=None): - def fill_recursively(arr, value, indices=()): - if arr.ndim == 0: + def execute_if_callable(value, context=None): + try: + return value(context=context) + except TypeError: try: - value = value(context=context) + return value() except TypeError: - try: - value = value() - except TypeError: - pass - return value + return value + + def fill_recursively(arr, value, indices=()): + try: + is_scalar = arr.ndim == 0 + except AttributeError: + is_scalar = True + + if is_scalar: + return execute_if_callable(value, context) try: len_value = len(value) @@ -3496,7 +3509,7 @@ def fill_recursively(arr, value, indices=()): (isinstance(param, list) and len(param) == 1) or (isinstance(param, np.ndarray) and param.shape == (1,)) ): - if isinstance(param[0], Component): + if isinstance(param[0], Component) or len(var) > 1: param = param[0] # Currently most noise functions do not return noise in the same @@ -3526,6 +3539,7 @@ def fill_recursively(arr, value, indices=()): # param not directly compatible with variable, continue elementwise pass + param = try_extract_0d_array_item(param) fill_recursively(var, param) return var diff --git a/psyneulink/core/components/functions/function.py b/psyneulink/core/components/functions/function.py index 04c1fba5f9c..310a208b161 100644 --- a/psyneulink/core/components/functions/function.py +++ b/psyneulink/core/components/functions/function.py @@ -171,8 +171,8 @@ from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel from psyneulink.core.globals.registry import register_category from psyneulink.core.globals.utilities import ( - convert_to_np_array, get_global_seed, is_instance_or_subclass, object_has_single_value, parameter_spec, parse_valid_identifier, safe_len, - SeededRandomState, contains_type, is_numeric, NumericCollections, + convert_all_elements_to_np_array, convert_to_np_array, get_global_seed, is_instance_or_subclass, object_has_single_value, parameter_spec, parse_valid_identifier, safe_len, + SeededRandomState, try_extract_0d_array_item, contains_type, is_numeric, NumericCollections, random_matrix ) @@ -184,7 +184,7 @@ EPSILON = np.finfo(float).eps # numeric to allow modulation, invalid to identify unseeded state -DEFAULT_SEED = -1 +DEFAULT_SEED = np.array(-1) FunctionRegistry = {} @@ -343,7 +343,8 @@ def _output_type_setter(value, owning_component): def _seed_setter(value, owning_component, context): - if value in {None, DEFAULT_SEED}: + value = try_extract_0d_array_item(value) + if value is None or value == DEFAULT_SEED: value = get_global_seed() # Remove any old PRNG state @@ -758,14 +759,13 @@ def get_previous_value(self, context=None): return value def convert_output_type(self, value, output_type=None): + value = convert_all_elements_to_np_array(value) if output_type is None: if not self.enable_output_type_conversion or self.output_type is None: return value else: output_type = self.output_type - value = convert_to_np_array(value) - # Type conversion (specified by output_type): # MODIFIED 6/21/19 NEW: [JDC] @@ -861,6 +861,8 @@ def handle_noise(noise): extra_noise_functions.append(noise_func_model) return noise_func_model.id elif isinstance(noise, (list, np.ndarray)): + if noise.ndim == 0: + return None return type(noise)(handle_noise(item) for item in noise) else: return None diff --git a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py index 93decb4ecde..ccaf0ef35b8 100644 --- a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py @@ -49,7 +49,7 @@ CROSS_ENTROPY, DEFAULT_VARIABLE, EXPONENTS, LINEAR_COMBINATION_FUNCTION, MULTIPLICATIVE_PARAM, OFFSET, OPERATION, \ PREDICTION_ERROR_DELTA_FUNCTION, PRODUCT, REARRANGE_FUNCTION, REDUCE_FUNCTION, SCALE, SUM, WEIGHTS, \ PREFERENCE_SET_NAME -from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, convert_to_np_array, is_numeric, np_array_less_than_2d, ValidParamSpecType +from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, convert_to_np_array, is_numeric, is_numeric_scalar, np_array_less_than_2d, ValidParamSpecType from psyneulink.core.globals.context import ContextFlags, handle_external_context from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import \ @@ -254,12 +254,12 @@ def _validate_params(self, request_set, target_set=None, context=None): if SCALE in target_set and target_set[SCALE] is not None: scale = target_set[SCALE] - if not isinstance(scale, numbers.Number): + if not is_numeric_scalar(scale): raise FunctionError("{} param of {} ({}) must be a scalar".format(SCALE, self.name, scale)) if OFFSET in target_set and target_set[OFFSET] is not None: offset = target_set[OFFSET] - if not isinstance(offset, numbers.Number): + if not is_numeric_scalar(offset): raise FunctionError("{} param of {} ({}) must be a scalar".format(OFFSET, self.name, offset)) def _function(self, @@ -517,12 +517,12 @@ def _validate_params(self, request_set, target_set=None, context=None): # Check that SCALE and OFFSET are scalars. if SCALE in target_set and target_set[SCALE] is not None: scale = target_set[SCALE] - if not isinstance(scale, numbers.Number): + if not is_numeric_scalar(scale): raise FunctionError("{} param of {} ({}) must be a scalar".format(SCALE, self.name, scale)) if OFFSET in target_set and target_set[OFFSET] is not None: offset = target_set[OFFSET] - if not isinstance(offset, numbers.Number): + if not is_numeric_scalar(offset): raise FunctionError("{} param of {} ({}) must be a scalar".format(OFFSET, self.name, offset)) def _instantiate_attributes_before_function(self, function=None, context=None): @@ -760,11 +760,11 @@ class Parameters(CombinationFunction.Parameters): changes_shape = Parameter(True, stateful=False, loggable=False, pnl_internal=True) def _validate_scale(self, scale): - if scale is not None and not np.isscalar(scale): + if not is_numeric_scalar(scale): return "scale must be a scalar" def _validate_offset(self, offset): - if offset is not None and not np.isscalar(offset): + if not is_numeric_scalar(offset): return "vector offset is not supported" @@ -845,12 +845,12 @@ def _validate_params(self, request_set, target_set=None, context=None): if SCALE in target_set and target_set[SCALE] is not None: scale = target_set[SCALE] - if not isinstance(scale, numbers.Number): + if not is_numeric_scalar(scale): raise FunctionError("{} param of {} ({}) must be a scalar".format(SCALE, self.name, scale)) if OFFSET in target_set and target_set[OFFSET] is not None: offset = target_set[OFFSET] - if not isinstance(offset, numbers.Number): + if not is_numeric_scalar(offset): raise FunctionError("{} param of {} ({}) must be a scalar".format(OFFSET, self.name, offset)) def _function(self, @@ -1311,10 +1311,8 @@ def _validate_params(self, request_set, target_set=None, context=None): pass elif isinstance(scale, np.ndarray): target_set[SCALE] = np.array(scale) - scale_is_a_scalar = isinstance(scale, numbers.Number) or (len(scale) == 1) and isinstance(scale[0], - numbers.Number) if context.execution_phase & (ContextFlags.PROCESSING | ContextFlags.LEARNING): - if not scale_is_a_scalar: + if not is_numeric_scalar(scale): err_msg = "Scale is using Hadamard modulation but its shape and/or size (scale shape: {}, size:{})" \ " do not match the variable being modulated (variable shape: {}, size: {})". \ format(scale.shape, scale.size, self.defaults.variable.shape, @@ -1332,10 +1330,8 @@ def _validate_params(self, request_set, target_set=None, context=None): elif isinstance(offset, np.ndarray): target_set[OFFSET] = np.array(offset) - offset_is_a_scalar = isinstance(offset, numbers.Number) or (len(offset) == 1) and isinstance(offset[0], - numbers.Number) if context.execution_phase & (ContextFlags.PROCESSING | ContextFlags.LEARNING): - if not offset_is_a_scalar: + if not is_numeric_scalar(offset): err_msg = "Offset is using Hadamard modulation but its shape and/or size (offset shape: {}, size:{})" \ " do not match the variable being modulated (variable shape: {}, size: {})". \ format(offset.shape, offset.size, self.defaults.variable.shape, @@ -1427,12 +1423,16 @@ def _function(self, # CW 3/19/18: a total hack, e.g. to make scale=[4.] turn into scale=4. Used b/c the `scale` ParameterPort # changes scale's format: e.g. if you write c = pnl.LinearCombination(scale = 4), print(c.scale) returns [4.] - if isinstance(scale, (list, np.ndarray)): - if len(scale) == 1 and isinstance(scale[0], numbers.Number): - scale = scale[0] - if isinstance(offset, (list, np.ndarray)): - if len(offset) == 1 and isinstance(offset[0], numbers.Number): - offset = offset[0] + # Don't use try_extract_0d_array_item because that will only + # handle 0d arrays, not 1d. + try: + scale = scale.item() + except (AttributeError, ValueError): + pass + try: + offset = offset.item() + except (AttributeError, ValueError): + pass # CALCULATE RESULT USING RELEVANT COMBINATION OPERATION AND MODULATION if operation == SUM: diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index 9a5ba78232f..4d187a3ed57 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -14,6 +14,7 @@ SEARCH_SPACE, ) from psyneulink.core.globals.parameters import SharedParameter, check_user_specified +from psyneulink.core.globals.utilities import try_extract_0d_array_item from psyneulink._typing import ( Dict, @@ -648,6 +649,7 @@ def _fit_differential_evolution( # Get a seed to pass to scipy for its search. Make this dependent on the seed of the # OCM seed_for_scipy = self._get_current_parameter_value('initial_seed', context) + seed_for_scipy = try_extract_0d_array_item(seed_for_scipy) direction = 1 if self.direction == "minimize" else -1 diff --git a/psyneulink/core/components/functions/nonstateful/learningfunctions.py b/psyneulink/core/components/functions/nonstateful/learningfunctions.py index 0c0045a5c3d..4f748e3a552 100644 --- a/psyneulink/core/components/functions/nonstateful/learningfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/learningfunctions.py @@ -47,7 +47,7 @@ MATRIX, Loss from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet -from psyneulink.core.globals.utilities import is_numeric, scalar_distance, convert_to_np_array, all_within_range +from psyneulink.core.globals.utilities import is_numeric, scalar_distance, convert_to_np_array, all_within_range, safe_len, is_numeric_scalar __all__ = ['LearningFunction', 'Kohonen', 'Hebbian', 'ContrastiveHebbian', 'Reinforcement', 'BayesGLM', 'BackPropagation', 'TDLearning', 'EMStorage', @@ -831,10 +831,10 @@ def _handle_default_variable(self, default_variable=None, size=None): # if both are specified, make sure they are the same size if (isinstance(self.mu_0, (list, np.ndarray)) and isinstance(self.sigma_0, (list, np.ndarray)) - and len(self.mu_0) != len(self.sigma_0)): + and safe_len(self.mu_0) != safe_len(self.sigma_0)): raise FunctionError("Length of {} ({}) does not match length of {} ({}) for {}". - format(repr('mu_0'), len(self.mu_0), - repr('sigma_0'), len(self.sigma_0), + format(repr('mu_0'), safe_len(self.mu_0), + repr('sigma_0'), safe_len(self.sigma_0), self.__class.__.__name__)) # allow their size to determine the size of variable if isinstance(self.mu_0, (list, np.ndarray)): @@ -855,22 +855,22 @@ def initialize_priors(self): if np.array(variable).dtype != object: variable = np.atleast_2d(variable) - n = len(variable[0]) + n = safe_len(variable[0]) - if isinstance(self.mu_0, (int, float)): + if is_numeric_scalar(self.mu_0): self.mu_prior = np.full((n, 1),self.mu_0) else: - if len(self.mu_0) != n: + if safe_len(self.mu_0) != n: raise FunctionError("Length of mu_0 ({}) does not match number of predictors ({})". - format(len(self.mu_0), n)) - self.mu_prior = np.array(self.mu_0).reshape(len(self._mu_0),1) + format(safe_len(self.mu_0), n)) + self.mu_prior = np.array(self.mu_0).reshape(safe_len(self._mu_0), 1) - if isinstance(self.sigma_0, (int, float)): + if is_numeric_scalar(self.sigma_0): Lambda_0 = (1 / (self.sigma_0 ** 2)) * np.eye(n) else: - if len(self.sigma_0) != n: + if safe_len(self.sigma_0) != n: raise FunctionError("Length of sigma_0 ({}) does not match number of predictors ({})". - format(len(self.sigma_0), n)) + format(safe_len(self.sigma_0), n)) Lambda_0 = (1 / (np.array(self.sigma_0) ** 2)) * np.eye(n) self.Lambda_prior = Lambda_0 diff --git a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py index bdb5d072c18..8d011e67e99 100644 --- a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py +++ b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py @@ -36,7 +36,7 @@ NORMED_L0_SIMILARITY, OBJECTIVE_FUNCTION_TYPE, SIZE, STABILITY_FUNCTION from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet -from psyneulink.core.globals.utilities import DistanceMetricLiteral, safe_len, convert_to_np_array +from psyneulink.core.globals.utilities import DistanceMetricLiteral, safe_len, convert_to_np_array, convert_all_elements_to_np_array from psyneulink.core.globals.utilities import is_iterable @@ -379,11 +379,8 @@ def _update_default_variable(self, new_default_variable, context): # this mirrors the transformation in _function # it is a hack, and a general solution should be found - squeezed = np.array(new_default_variable) - if squeezed.ndim > 1: - squeezed = np.squeeze(squeezed) - - size = safe_len(squeezed) + new_default_variable = convert_all_elements_to_np_array(new_default_variable) + size = safe_len(np.squeeze(new_default_variable)) matrix = self.parameters.matrix._get(context) if isinstance(matrix, MappingProjection): diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index 4a7c86ebe41..5f1a93e7090 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -1116,8 +1116,8 @@ def as_mdf_model(self): model = super().as_mdf_model() # x_0 is included in bias in MDF logistic - self._set_mdf_arg(model, 'bias', model.args['bias'] - model.args['x_0']) - self._set_mdf_arg(model, 'x_0', 0) + self._set_mdf_arg(model, 'bias', np.array(model.args['bias'] - model.args['x_0'])) + self._set_mdf_arg(model, 'x_0', np.array(0)) if model.args['scale'] != 1.0: warnings.warn( diff --git a/psyneulink/core/components/functions/stateful/integratorfunctions.py b/psyneulink/core/components/functions/stateful/integratorfunctions.py index 586a4261e4e..47d49206168 100644 --- a/psyneulink/core/components/functions/stateful/integratorfunctions.py +++ b/psyneulink/core/components/functions/stateful/integratorfunctions.py @@ -53,8 +53,8 @@ PREVIOUS_VALUE from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet -from psyneulink.core.globals.utilities import ValidParamSpecType, all_within_range, \ - convert_all_elements_to_np_array, parse_valid_identifier, safe_len +from psyneulink.core.globals.utilities import ValidParamSpecType, all_within_range, is_numeric_scalar, \ + convert_all_elements_to_np_array, parse_valid_identifier, safe_len, try_extract_0d_array_item __all__ = ['SimpleIntegrator', 'AdaptiveIntegrator', 'DriftDiffusionIntegrator', 'DriftOnASphereIntegrator', 'OrnsteinUhlenbeckIntegrator', 'FitzHughNagumoIntegrator', 'AccumulatorIntegrator', @@ -1098,7 +1098,7 @@ def _validate_params(self, request_set, target_set=None, context=None): if RATE in request_set: rate = request_set[RATE] if isinstance(rate, (list, np.ndarray)): - if len(rate) != 1 and len(rate) != np.array(self.defaults.variable).size: + if safe_len(rate) != 1 and safe_len(rate) != np.array(self.defaults.variable).size: # If the variable was not specified, then reformat it to match rate specification # and assign class_defaults.variable accordingly # Note: this situation can arise when the rate is parametrized (e.g., as an array) in the @@ -1113,12 +1113,12 @@ def _validate_params(self, request_set, target_set=None, context=None): warnings.warn( "The length ({}) of the array specified for the {} parameter ({}) of {} " "must match the length ({}) of the default input ({}); " - "the default input has been updated to match". - format(len(rate), repr(RATE), rate, self.name, + "the default input has been updated to match".format( + safe_len(rate), repr(RATE), rate, self.name, np.array(self.defaults.variable).size, self.defaults.variable)) else: raise FunctionError( - f"The length ({len(rate)}) of the array specified for the rate parameter ({rate}) " + f"The length ({safe_len(rate)}) of the array specified for the rate parameter ({rate}) " f"of {self.name} must match the length ({np.array(self.defaults.variable).size}) " f"of the default input ({self.defaults.variable}).") @@ -1624,7 +1624,7 @@ def _validate_params(self, request_set, target_set=None, context=None): if RATE in request_set: rate = request_set[RATE] if isinstance(rate, (list, np.ndarray)): - if len(rate) != 1 and len(rate) != np.array(self.defaults.variable).size: + if safe_len(rate) != 1 and safe_len(rate) != np.array(self.defaults.variable).size: # If the variable was not specified, then reformat it to match rate specification # and assign class_defaults.variable accordingly # Note: this situation can arise when the rate is parametrized (e.g., as an array) in the @@ -1640,7 +1640,7 @@ def _validate_params(self, request_set, target_set=None, context=None): "The length ({}) of the array specified for the rate parameter ({}) of {} " "must match the length ({}) of the default input ({}); " "the default input has been updated to match".format( - len(rate), + safe_len(rate), rate, self.name, np.array(self.defaults.variable).size @@ -1651,7 +1651,7 @@ def _validate_params(self, request_set, target_set=None, context=None): raise FunctionError( "The length ({}) of the array specified for the rate parameter ({}) of {} " "must match the length ({}) of the default input ({})".format( - len(rate), + safe_len(rate), rate, self.name, np.array(self.defaults.variable).size, @@ -2425,7 +2425,6 @@ class Parameters(IntegratorFunction.Parameters): random_draw = Parameter() def _parse_initializer(self, initializer): - initializer = np.array(initializer) if initializer.ndim > 1: return np.atleast_1d(initializer.squeeze()) else: @@ -2982,6 +2981,7 @@ class Parameters(IntegratorFunction.Parameters): ) def _validate_dimension(self, dimension): + dimension = try_extract_0d_array_item(dimension) if not isinstance(dimension, int) or dimension < 2: return 'dimension must be an integer >= 2' @@ -2998,12 +2998,6 @@ def _validate_initializer(self, initializer): return f"'initializer' must be a list or 1d array of length {initializer_len} " \ f"(the value of the \'dimension\' parameter minus 1)" - def _parse_noise(self, noise): - """Assign initial value as array of random values of length dimension-1""" - if isinstance(noise, list): - noise = np.array(noise) - return noise - @check_user_specified @beartype def __init__(self, @@ -3065,7 +3059,7 @@ def _validate_noise(self, noise): f"DriftOnASphereIntegrator requires noise parameter to be a float or float array.") if isinstance(noise, np.ndarray): initializer_len = self.parameters.dimension.default_value - 1 - if noise.ndim !=1 or len(noise) != initializer_len: + if noise.ndim > 1 or (noise.ndim == 1 and len(noise) != initializer_len): owner_str = f"'of '{self.owner.name}" if self.owner else "" raise FunctionError(f"'noise' parameter for {self.name}{owner_str} must be a list or 1d array of " f"length {initializer_len} (the value of the \'dimension\' parameter minus 1)") @@ -3456,7 +3450,6 @@ class Parameters(IntegratorFunction.Parameters): ) def _parse_initializer(self, initializer): - initializer = np.array(initializer) if initializer.ndim > 1: return np.atleast_1d(initializer.squeeze()) else: @@ -3501,7 +3494,7 @@ def __init__( ) def _validate_noise(self, noise): - if noise is not None and not isinstance(noise, float): + if noise is not None and not is_numeric_scalar(noise): raise FunctionError( "Invalid noise parameter for {}. OrnsteinUhlenbeckIntegrator requires noise parameter to be a float. " "Noise parameter is used to construct the standard DDM noise distribution".format(self.name)) diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index 877d5faa1b2..4b8a69a3e36 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -53,7 +53,7 @@ from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.utilities import \ - all_within_range, convert_to_np_array, convert_to_list, convert_all_elements_to_np_array + all_within_range, convert_all_elements_to_np_array, convert_to_np_array, convert_to_list, is_numeric_scalar __all__ = ['MemoryFunction', 'Buffer', 'DictionaryMemory', 'ContentAddressableMemory', 'RETRIEVAL_PROB', 'STORAGE_PROB'] @@ -64,6 +64,7 @@ class MemoryFunction(StatefulFunction): # ------------------------------------- # TODO: refactor to avoid skip of direct super def _update_default_variable(self, new_default_variable, context=None): if not self.parameters.initializer._user_specified: + new_default_variable = convert_all_elements_to_np_array(new_default_variable) # use * 0 instead of zeros_like to deal with ragged arrays self._initialize_previous_value([new_default_variable * 0], context) @@ -351,9 +352,14 @@ def _function(self, if len(previous_value): # TODO: remove this shape hack when buffer shapes made consistent noise = np.reshape(noise, np.asarray(previous_value[0]).shape) + variable = np.reshape(variable, np.asarray(previous_value[0]).shape) previous_value = convert_to_np_array(previous_value) * rate + noise - previous_value = deque(previous_value, maxlen=self.parameters.history._get(context)) + maxlen = self.parameters.history._get(context) + previous_value = deque( + previous_value, + maxlen=maxlen.item() if maxlen is not None else None + ) previous_value.append(variable) @@ -1757,7 +1763,7 @@ def _get_distance(self, cue:Union[list, np.ndarray], if granularity == 'per_field': # Note: this is just used for reporting, and not determining storage or retrieval # Report None if any element of cue, candidate or field_weights is None or empty list: - distances_by_field = np.array([None] * num_fields) + distances_by_field = np.full(num_fields, None) # If field_weights is scalar, splay out as array of length num_fields so can iterate through all of them if len(field_weights)==1: field_weights = np.full(num_fields, field_weights[0]) @@ -2533,7 +2539,7 @@ def _validate(self, context=None): fct_msg = 'Function' try: distance_result = distance_function(test_var, context=context) - if not np.isscalar(distance_result): + if not is_numeric_scalar(distance_result): raise FunctionError("Value returned by {} specified for {} ({}) must return a scalar". format(repr(DISTANCE_FUNCTION), self.__name__.__class__, distance_result)) except: @@ -2564,9 +2570,9 @@ def _validate(self, context=None): f'({result}) must return an array of the same length it receives') def _get_default_entry(self, context): - key = [0] * self.parameters.key_size._get(context) - val = [0] * self.parameters.val_size._get(context) - return [key, val] + key = np.zeros((self.parameters.key_size._get(context),)) + val = np.zeros((self.parameters.val_size._get(context),)) + return convert_to_np_array([key, val]) def _initialize_previous_value(self, initializer, context=None): """Ensure that initializer is appropriate for assignment as memory attribute and assign as previous_value @@ -2686,8 +2692,8 @@ def _function(self, # Set key_size and val_size if this is the first entry if len(self.parameters.previous_value._get(context)[KEYS]) == 0: - self.parameters.key_size._set(len(key), context) - self.parameters.val_size._set(len(val), context) + self.parameters.key_size._set(np.array(len(key)), context) + self.parameters.val_size._set(np.array(len(val)), context) # Retrieve value from current dict with key that best matches key if retrieval_prob == 1.0 or (retrieval_prob > 0.0 and retrieval_prob > random_state.uniform()): @@ -2712,11 +2718,7 @@ def _function(self, self._store_memory([key, val], context) # Return 3d array with keys and vals as lists - # IMPLEMENTATION NOTE: if try to create np.ndarray directly, and keys and vals have same length - # end up with array of arrays, rather than array of lists - ret_val = convert_to_np_array([list(memory[0]),[]]) - ret_val[1] = list(memory[1]) - return ret_val + return memory @beartype def _validate_memory(self, memory: Union[list, np.ndarray], context): @@ -2798,8 +2800,7 @@ def get_memory(self, query_key:Union[list, np.ndarray], context=None): best_match_key = _memory[KEYS][index_of_selected_item] best_match_val = _memory[VALS][index_of_selected_item] - # Return as list of lists - return [list(best_match_key), list(best_match_val)] + return convert_all_elements_to_np_array([best_match_key, best_match_val]) @beartype def _store_memory(self, memory:Union[list, np.ndarray], context): diff --git a/psyneulink/core/components/functions/stateful/statefulfunction.py b/psyneulink/core/components/functions/stateful/statefulfunction.py index f76e5716df8..51044080400 100644 --- a/psyneulink/core/components/functions/stateful/statefulfunction.py +++ b/psyneulink/core/components/functions/stateful/statefulfunction.py @@ -34,7 +34,7 @@ from psyneulink.core.globals.keywords import STATEFUL_FUNCTION_TYPE, STATEFUL_FUNCTION, NOISE, RATE from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet -from psyneulink.core.globals.utilities import iscompatible, convert_to_np_array, contains_type +from psyneulink.core.globals.utilities import iscompatible, convert_to_np_array, contains_type, safe_len, convert_all_elements_to_np_array __all__ = ['StatefulFunction'] @@ -264,7 +264,7 @@ def _validate_params(self, request_set, target_set=None, context=None): rate = request_set[RATE] if isinstance(rate, (list, np.ndarray)) and not iscompatible(rate, self.defaults.variable): - if len(rate) != 1 and len(rate) != np.array(self.defaults.variable).size: + if safe_len(rate) != 1 and safe_len(rate) != np.array(self.defaults.variable).size: # If the variable was not specified, then reformat it to match rate specification # and assign class_defaults.variable accordingly # Note: this situation can arise when the rate is parametrized (e.g., as an array) in the @@ -276,13 +276,13 @@ def _validate_params(self, request_set, target_set=None, context=None): if self._variable_shape_flexibility is DefaultsFlexibility.FLEXIBLE: self._instantiate_defaults(variable=np.zeros_like(np.array(rate)), context=context) if self.verbosePref: - warnings.warn(f"The length ({len(rate)}) of the array specified for " + warnings.warn(f"The length ({safe_len(rate)}) of the array specified for " f"the rate parameter ({rate}) of {self.name} must match the length " f"({np.array(self.defaults.variable).size}) of the default input " f"({self.defaults.variable}); the default input has been updated to match.") else: raise FunctionError(f"The length of the array specified for the rate parameter of {self.name}" - f"({len(rate)}) must match the length of the default input " + f"({safe_len(rate)}) must match the length of the default input " f"({np.array(self.defaults.variable).size}).") super()._validate_params(request_set=request_set, @@ -331,11 +331,11 @@ def _validate_rate(self, rate): raise FunctionError(rate_type_msg.format(self.name, rate)) if isinstance(rate, np.ndarray) and not iscompatible(rate, self.defaults.variable): - if len(rate) != 1 and len(rate) != np.array(self.defaults.variable).size: + if safe_len(rate) != 1 and safe_len(rate) != np.array(self.defaults.variable).size: if self._variable_shape_flexibility is DefaultsFlexibility.FLEXIBLE: self.defaults.variable = np.zeros_like(np.array(rate)) if self.verbosePref: - warnings.warn(f"The length ({len(rate)}) of the array specified for the rate parameter " + warnings.warn(f"The length ({safe_len(rate)}) of the array specified for the rate parameter " f"({rate}) of {self.name} must match the length " f"({np.array(self.defaults.variable).size}) of the default input " f"({self.defaults.variable}); the default input has been updated to match.") @@ -343,7 +343,7 @@ def _validate_rate(self, rate): self._variable_shape_flexibility = DefaultsFlexibility.INCREASE_DIMENSION else: raise FunctionError(f"The length of the array specified for the rate parameter of " - f"{len(rate)} ({self.name}) must match the length of the default input " + f"{safe_len(rate)} ({self.name}) must match the length of the default input " f"({np.array(self.defaults.variable).size}).") # Ensure that the noise parameter makes sense with the input type and shape; flag any noise functions that will @@ -355,7 +355,7 @@ def _validate_noise(self, noise): noise = noise.execute if isinstance(noise, (np.ndarray, list)): - if len(noise) == 1: + if safe_len(noise) == 1: pass # Variable is a list/array elif (not iscompatible(np.atleast_2d(noise), self.defaults.variable) @@ -414,6 +414,7 @@ def _initialize_previous_value(self, initializer, context=None): @handle_external_context() def _update_default_variable(self, new_default_variable, context=None): if not self.parameters.initializer._user_specified: + new_default_variable = convert_all_elements_to_np_array(new_default_variable) self._initialize_previous_value(np.zeros_like(new_default_variable), context) super()._update_default_variable(new_default_variable, context=context) diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index 5b00bcb10a8..1fcabaeb5de 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -1116,7 +1116,7 @@ from psyneulink.core.globals.registry import register_category, remove_instance_from_registry from psyneulink.core.globals.utilities import \ ContentAddressableList, append_type_to_name, convert_all_elements_to_np_array, convert_to_np_array, \ - iscompatible, kwCompatibilityNumeric, convert_to_list, parse_valid_identifier + iscompatible, kwCompatibilityNumeric, convert_to_list, is_numeric, parse_valid_identifier from psyneulink.core.scheduling.condition import Condition from psyneulink.core.scheduling.time import TimeScale @@ -1139,7 +1139,7 @@ class MechParamsDict(UserDict): def _input_port_variables_getter(owning_component=None, context=None): try: - return [input_port.parameters.variable._get(context) for input_port in owning_component.input_ports] + return convert_all_elements_to_np_array([input_port.parameters.variable._get(context) for input_port in owning_component.input_ports]) except (AttributeError, TypeError): return None @@ -1617,7 +1617,10 @@ class Parameters(Mechanism.Parameters): def _parse_input_ports(self, input_ports): if input_ports is None: return input_ports - elif not isinstance(input_ports, list): + elif ( + not isinstance(input_ports, list) + and not (isinstance(input_ports, np.ndarray) and input_ports.ndim > 0) + ): input_ports = [input_ports] spec_list = [] @@ -1857,7 +1860,10 @@ def _handle_arg_input_ports(self, input_ports): default_variable_from_input_ports = [] input_port_variable_was_specified = None - if not isinstance(input_ports, list): + if ( + not isinstance(input_ports, list) + and not (isinstance(input_ports, np.ndarray) and input_ports.ndim > 0) + ): input_ports = [input_ports] for i, s in enumerate(input_ports): @@ -2470,7 +2476,7 @@ def execute(self, # EXECUTE MECHANISM if self.parameters.is_finished_flag._get(context) is True: - self.parameters.num_executions_before_finished._set(0, override=True, context=context) + self.parameters.num_executions_before_finished._set(np.array(0), override=True, context=context) while True: @@ -2794,6 +2800,11 @@ def move_item_specific_params_to_specific_sub_dict(outer_dict, continue # Move param specification dict for item to entry with same key in _SPECIFIC_PARAMS dict item_specific_dict = {key : outer_dict.pop(key)} + item_specific_dict = { + k: convert_all_elements_to_np_array(v) if is_numeric(v) else v + for (k, v) in item_specific_dict.items() + } + if specific_dict_name in dest_dict: dest_dict[specific_dict_name].update(item_specific_dict) else: diff --git a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py index 0b1325b02c6..5c26d6c0c50 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py @@ -1111,7 +1111,7 @@ from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.registry import rename_instance_in_registry from psyneulink.core.globals.sampleiterator import SampleIterator, SampleSpec -from psyneulink.core.globals.utilities import convert_to_list, ContentAddressableList, is_numeric +from psyneulink.core.globals.utilities import convert_to_list, ContentAddressableList, is_numeric, object_has_single_value, try_extract_0d_array_item from psyneulink.core.llvm.debug import debug_env __all__ = [ @@ -1966,7 +1966,7 @@ def _instantiate_input_ports(self, context=None): # FIX: 11/3/21 : # ADD CHECK IN _parse_state_feature_specs() THAT IF A NODE RATHER THAN InputPort IS SPECIFIED, # ITS PRIMARY IS USED (SEE SCRATCH PAD FOR EXAMPLES) - if not self.state_feature_specs: + if self.state_feature_specs is None: # If agent_rep is CompositionFunctionApproximator, warn if no state_features specified. # Note: if agent rep is Composition, state_input_ports and any state_feature_function specified # are assigned in _update_state_input_ports_for_controller. @@ -2380,7 +2380,7 @@ def _parse_specs(state_feature_specs, specified_input_ports=None, spec_type="lis # SINGLE ITEM spec, SO APPLY TO ALL agent_rep_input_ports if (user_specs is None or isinstance(user_specs, (str, tuple, InputPort, OutputPort, Mechanism, Composition)) - or (is_numeric(user_specs) and (np.array(user_specs).ndim < 2))): + or (is_numeric(user_specs) and object_has_single_value(user_specs))): specs = [user_specs] * len(agent_rep_input_ports) # OK to assign here (rather than in _parse_secs()) since spec is intended for *all* state_input_ports self.parameters.state_feature_specs.set(specs, override=True) @@ -2394,10 +2394,10 @@ def _parse_specs(state_feature_specs, specified_input_ports=None, spec_type="lis # - SHADOW_INPUTS dict (with list spec as its only entry): {SHADOW_INPUTS: {[spec, spec...]}} # Treat specs as sources of input to INPUT Nodes of agent_rep (in corresponding order): # Call _parse_specs to construct a regular dict using INPUT Nodes as keys and specs as values - elif isinstance(user_specs, list) or (isinstance(user_specs, dict) and SHADOW_INPUTS in user_specs): - if isinstance(user_specs, list): + elif isinstance(user_specs, (list, np.ndarray)) or (isinstance(user_specs, dict) and SHADOW_INPUTS in user_specs): + if isinstance(user_specs, (list, np.ndarray)): num_missing_specs = len(agent_rep_input_ports) - len(self.state_feature_specs) - specs = user_specs + [self.state_feature_default] * num_missing_specs + specs = list(user_specs) + [self.state_feature_default] * num_missing_specs spec_type = 'list' else: # SHADOW_INPUTS spec: @@ -2954,6 +2954,7 @@ def _set_mechanism_value(self, context): def _create_randomization_control_signal(self, context): num_estimates = self.parameters.num_estimates._get(context) + num_estimates = try_extract_0d_array_item(num_estimates) if num_estimates: # must be SampleSpec in allocation_samples arg diff --git a/psyneulink/core/components/mechanisms/processing/transfermechanism.py b/psyneulink/core/components/mechanisms/processing/transfermechanism.py index 221eac63850..e12864489ab 100644 --- a/psyneulink/core/components/mechanisms/processing/transfermechanism.py +++ b/psyneulink/core/components/mechanisms/processing/transfermechanism.py @@ -818,7 +818,6 @@ import copy import inspect import logging -import numbers import types import warnings from collections.abc import Iterable @@ -854,7 +853,7 @@ from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import \ - all_within_range, append_type_to_name, iscompatible, convert_to_np_array, safe_equals, parse_valid_identifier + all_within_range, is_numeric_scalar, append_type_to_name, convert_all_elements_to_np_array, iscompatible, convert_to_np_array, safe_equals, parse_valid_identifier, safe_len, try_extract_0d_array_item from psyneulink.core.scheduling.time import TimeScale __all__ = [ @@ -1241,17 +1240,13 @@ def _validate_variable(self, variable): return 'may not contain non-numeric entries' def _validate_clip(self, clip): - if clip: - if (not (isinstance(clip, (list,tuple)) and len(clip)==2 - and all(isinstance(i, numbers.Number)) for i in clip)): + if clip is not None: + if (not (isinstance(clip, (list, tuple, np.ndarray)) and len(clip) == 2 + and all(is_numeric_scalar(i)) for i in clip)): return 'must be a tuple with two numbers.' if not clip[0] < clip[1]: return 'first item must be less than the second.' - def _parse_clip(self, clip): - if clip: - return tuple(clip) - def _validate_integrator_mode(self, integrator_mode): if not isinstance(integrator_mode, bool): return 'may only be True or False.' @@ -1271,8 +1266,14 @@ def _parse_termination_measure(self, termination_measure): return termination_measure def _validate_termination_threshold(self, termination_threshold): - if (termination_threshold is not None - and not isinstance(termination_threshold, (int, float))): + if ( + termination_threshold is not None + and not ( + isinstance(termination_threshold, np.ndarray) + and termination_threshold.ndim == 0 + and isinstance(termination_threshold.item(), (int, float)) + ) + ): return 'must be a float or int.' def _validate_termination_comparison_op(self, termination_comparison_op): @@ -1439,7 +1440,7 @@ def _validate_noise(self, noise): noise = noise.execute if isinstance(noise, (np.ndarray, list)): - if len(noise) == 1: + if safe_len(noise) == 1: pass # Variable is a list/array elif (not iscompatible(np.atleast_2d(noise), self.defaults.variable) @@ -1450,12 +1451,14 @@ def _validate_noise(self, noise): f"({np.shape(np.array(self.defaults.variable))}).") else: for i in range(len(noise)): - if isinstance(noise[i], DistributionFunction): - noise[i] = noise[i].execute - if (not np.isscalar(noise[i]) and not callable(noise[i]) - and not iscompatible(np.atleast_2d(noise[i]), self.defaults.variable[i]) - and not iscompatible(np.atleast_1d(noise[i]), self.defaults.variable[i])): - raise MechanismError(f"The element '{noise[i]}' specified in 'noise' for {self.name} " + elem = try_extract_0d_array_item(noise[i]) + + if isinstance(elem, DistributionFunction): + elem = elem.execute + if (not isinstance(elem, (float, int)) and not callable(elem) + and not iscompatible(np.atleast_2d(elem), self.defaults.variable[i]) + and not iscompatible(np.atleast_1d(elem), self.defaults.variable[i])): + raise MechanismError(f"The element '{elem}' specified in 'noise' for {self.name} " f"is not valid; noise must be list or array must be floats or functions.") elif _is_control_spec(noise): @@ -1800,6 +1803,8 @@ def is_finished(self, context=None): @handle_external_context() def _update_default_variable(self, new_default_variable, context=None): + new_default_variable = convert_all_elements_to_np_array(new_default_variable) + if not self.parameters.initial_value._user_specified: integrator_function_variable = self._get_parsed_variable( self.parameters.integrator_function, diff --git a/psyneulink/core/components/ports/inputport.py b/psyneulink/core/components/ports/inputport.py index 65569c0af0f..471d209036c 100644 --- a/psyneulink/core/components/ports/inputport.py +++ b/psyneulink/core/components/ports/inputport.py @@ -595,7 +595,7 @@ from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import \ - append_type_to_name, convert_to_np_array, is_numeric, iscompatible, kwCompatibilityLength, convert_to_list, parse_valid_identifier + append_type_to_name, is_numeric_scalar, convert_to_np_array, is_numeric, iscompatible, kwCompatibilityLength, convert_to_list, parse_valid_identifier __all__ = [ 'InputPort', 'InputPortError', 'port_type_keywords', 'SHADOW_INPUTS', @@ -1004,12 +1004,12 @@ def _validate_params(self, request_set, target_set=None, context=None): f"({function}){owner_name}.") if WEIGHT in target_set and target_set[WEIGHT] is not None: - if not isinstance(target_set[WEIGHT], (int, float)): + if not is_numeric_scalar(target_set[WEIGHT]): raise InputPortError(f"'{WEIGHT}' parameter of {self.name} for {self.owner.name} " f"({target_set[WEIGHT]}) must be an int or float.") if EXPONENT in target_set and target_set[EXPONENT] is not None: - if not isinstance(target_set[EXPONENT], (int, float)): + if not is_numeric_scalar(target_set[EXPONENT]): raise InputPortError(f"'{EXPONENT}' parameter of {self.name} for {self.owner.name}" f"({ target_set[EXPONENT]}) must be an int or float.") diff --git a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py index 648c746fb08..4880bfe91cf 100644 --- a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py @@ -1090,7 +1090,7 @@ def compute_costs(self, intensity, context=None): # COMPUTE COST(S) # Initialize as backups for cost function that are not enabled - intensity_cost = adjustment_cost = duration_cost = 0 + intensity_cost = adjustment_cost = duration_cost = np.zeros_like(self.defaults.value) if CostFunctions.INTENSITY & cost_options: intensity_cost = self.intensity_cost_function(intensity, context) @@ -1108,9 +1108,10 @@ def compute_costs(self, intensity, context=None): duration_cost = self.duration_cost_function(self.parameters.cost._get(context), context=context) self.parameters.duration_cost._set(duration_cost, context) - all_costs = [intensity_cost, adjustment_cost, duration_cost] + # add second dimension because Reduce function uses axis=1 + all_costs = [[intensity_cost, adjustment_cost, duration_cost]] # Combine the costs. Convert to a float because reRedcu - combined_cost = self.combine_costs_function(all_costs, context=context).astype(float) + combined_cost = float(self.combine_costs_function(all_costs, context=context)) return max(0.0, combined_cost) diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index 504f4bdee01..0750ea18847 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -807,7 +807,7 @@ def test_multiple_modulatory_projections_with_mech_and_port_Name_specs(self): from psyneulink.core.globals.socket import ConnectionInfo from psyneulink.core.globals.utilities import \ ContentAddressableList, convert_to_np_array, get_args, is_value_spec, iscompatible, \ - MODULATION_OVERRIDE, type_match + MODULATION_OVERRIDE, try_extract_0d_array_item, type_match __all__ = [ 'Port_Base', 'port_keywords', 'port_type_keywords', 'PortError', 'PortRegistry', 'PORT_SPEC' @@ -2968,7 +2968,7 @@ def _parse_port_spec(port_type=None, pass else: - port_specification = port_spec[PORT_SPEC_ARG] + port_specification = try_extract_0d_array_item(port_spec[PORT_SPEC_ARG]) # Delete the Port specification dictionary from port_spec del port_spec[PORT_SPEC_ARG] diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 66fb4f7e7db..e18c61e479a 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -2955,7 +2955,7 @@ def input_function(env, result): from psyneulink.core.globals.preferences.basepreferenceset import BasePreferenceSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel, _assign_prefs from psyneulink.core.globals.registry import register_category -from psyneulink.core.globals.utilities import ContentAddressableList, call_with_pruned_args, convert_to_list, \ +from psyneulink.core.globals.utilities import ContentAddressableList, call_with_pruned_args, convert_all_elements_to_np_array, convert_to_list, \ nesting_depth, convert_to_np_array, is_numeric, is_matrix, is_matrix_keyword, parse_valid_identifier from psyneulink.core.scheduling.condition import All, AllHaveRun, Always, Any, Condition, Never, AtNCalls, BeforeNCalls from psyneulink.core.scheduling.scheduler import Scheduler, SchedulingMode @@ -8007,7 +8007,7 @@ def add_linear_learning_pathway(self, pathway, learning_function: Union[Type[LearningFunction], LearningFunction, Callable] = None, loss_spec: Optional[Loss] = Loss.MSE, - learning_rate: Optional[Union[int, float]] = None, + learning_rate: Optional[Union[int, float, np.ndarray]] = None, error_function=LinearCombination, learning_update: Union[bool, Literal['online', 'after']] = 'after', default_projection_matrix=None, @@ -12957,6 +12957,8 @@ def validate_and_assign_default_condition(node, entry, param_key, param_value): entry, param_key, param_spec[entry]) + if is_numeric(param_spec): + param_spec = convert_all_elements_to_np_array(param_spec) return (param_spec, param_condition) if runtime_params is None: diff --git a/psyneulink/core/globals/mdf.py b/psyneulink/core/globals/mdf.py index 94e12ae8443..e0e72806ae3 100644 --- a/psyneulink/core/globals/mdf.py +++ b/psyneulink/core/globals/mdf.py @@ -174,7 +174,10 @@ def default(self, o): elif isinstance(o, SampleIterator): return f'{o.__class__.__name__}({repr(o.specification)})' elif isinstance(o, numpy.ndarray): - return list(o) + try: + return list(o) + except TypeError: + return o.item() elif isinstance(o, numpy.random.RandomState): return f'numpy.random.RandomState({o.seed})' elif isinstance(o, numpy.number): diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index ed307c435a1..b3051dce0af 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -321,8 +321,20 @@ def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, co from psyneulink.core.globals.context import Context, ContextError, ContextFlags, _get_time, handle_external_context from psyneulink.core.globals.context import time as time_object from psyneulink.core.globals.log import LogCondition, LogEntry, LogError -from psyneulink.core.globals.utilities import call_with_pruned_args, copy_iterable_with_shared, \ - get_alias_property_getter, get_alias_property_setter, get_deepcopy_with_shared, unproxy_weakproxy, create_union_set, safe_equals, get_function_sig_default_value +from psyneulink.core.globals.utilities import ( + call_with_pruned_args, + convert_all_elements_to_np_array, + copy_iterable_with_shared, + create_union_set, + get_alias_property_getter, + get_alias_property_setter, + get_deepcopy_with_shared, + get_function_sig_default_value, + is_numeric, + safe_equals, + try_extract_0d_array_item, + unproxy_weakproxy, +) from psyneulink.core.rpc.graph_pb2 import Entry, ndArray __all__ = [ @@ -998,6 +1010,7 @@ def __init__( # attributes will be taken from _inherited_source=None, _user_specified=False, + _scalar_converted=False, **kwargs ): if isinstance(aliases, str): @@ -1059,6 +1072,7 @@ def __init__( _inherited_source=_inherited_source, _user_specified=_user_specified, _temp_uninherited=set(), + _scalar_converted=_scalar_converted, **kwargs ) @@ -1100,6 +1114,7 @@ def __deepcopy__(self, memo): _owner=self._owner, _inherited=self._inherited, _user_specified=self._user_specified, + _scalar_converted=self._scalar_converted, ) # TODO: this is a quick fix to make sure default values are # always copied. should be integrated with future changes to @@ -1191,6 +1206,7 @@ def reset(self): # no default specified, must be inherited or invalid if self._parent is not None: self._inherited = True + return else: raise ParameterError( 'Parameter {0} cannot be reset, as it does not have a default specification ' @@ -1268,7 +1284,13 @@ def _parent(self): def _validate(self, value): return self._owner._validate(self.name, value) - def _parse(self, value): + def _parse(self, value, check_scalar=False): + if is_numeric(value): + orig_value = value + value = convert_all_elements_to_np_array(value) + if check_scalar: + self._scalar_converted = orig_value is not value and value.ndim == 0 + return self._owner._parse(self.name, value) @property @@ -1305,7 +1327,10 @@ def get(self, context=None, **kwargs): kwargs any additional arguments to be passed to this `Parameter`'s `getter` if it exists """ - return self._get(context, **kwargs) + base_val = self._get(context, **kwargs) + if self._scalar_converted: + base_val = try_extract_0d_array_item(base_val) + return base_val def _get(self, context=None, **kwargs): if not self.stateful: @@ -1459,7 +1484,8 @@ def set(self, value, context=None, override=False, skip_history=False, skip_log= if not override and self.read_only: raise ParameterError('Parameter \'{0}\' is read-only. Set at your own risk. Pass override=True to force set.'.format(self.name)) - value = self._set(self._parse(value), context, skip_history, skip_log, **kwargs) + value = self._parse(value, check_scalar=True) + value = self._set(value, context, skip_history, skip_log, **kwargs) try: if isinstance(value.__self__, Component): @@ -1751,7 +1777,7 @@ def _initialize_from_context(self, context=None, base_context=Context(execution_ # KDM 7/30/18: the below is weird like this in order to use this like a property, but also include it # in the interface for user simplicity: that is, inheritable (by this Parameter's children or from its parent), # visible in a Parameter's repr, and easily settable by the user - def _set_default_value(self, value, directly=False): + def _set_default_value(self, value, directly=False, check_scalar=False): """ Set default_value @@ -1759,9 +1785,12 @@ def _set_default_value(self, value, directly=False): value: new default_value directly (bool, optional): if False, passes **value** through parse and validation steps. Defaults to False. + check_scalar (bool, optional): if True, sets + _scalar_converted attribute as appropriate for + **value**. Defaults to False. """ if not directly: - value = self._parse(value) + value = self._parse(value, check_scalar=check_scalar) self._validate(value) super().__setattr__('default_value', value) @@ -2231,8 +2260,11 @@ def __setattr__(self, attr, value): super().__setattr__(attr, value) else: if isinstance(value, Parameter): + is_new_parameter = False + if value._owner is None: value._owner = self + is_new_parameter = True elif value._owner is not self and self._initializing: # case where no Parameters class defined on subclass # but default value overridden in __init__ @@ -2243,7 +2275,8 @@ def __setattr__(self, attr, value): value.name = attr if self._initializing and not value._inherited: - value.default_value = self._reconcile_value_with_init_default(attr, value.default_value) + reconciled_value = self._reconcile_value_with_init_default(attr, value.default_value) + value._set_default_value(reconciled_value, check_scalar=is_new_parameter) super().__setattr__(attr, value) @@ -2309,15 +2342,15 @@ def __setattr__(self, attr, value): # set _inherited before default_value because it will # restore from cache new_param._inherited = False - new_param.default_value = value # the old/replaced Parameter should be discarded current_value._is_invalid_source = True else: - new_param = Parameter(name=attr, default_value=value, _owner=self) + new_param = Parameter(name=attr, _owner=self) super().__setattr__(attr, new_param) + new_param._set_default_value(value) self._validate(attr, getattr(self, attr).default_value) self._register_parameter(attr) diff --git a/psyneulink/core/globals/sampleiterator.py b/psyneulink/core/globals/sampleiterator.py index 13fb229e2b0..03d7082c9bc 100644 --- a/psyneulink/core/globals/sampleiterator.py +++ b/psyneulink/core/globals/sampleiterator.py @@ -18,7 +18,7 @@ from collections.abc import Iterator from decimal import Decimal, getcontext from inspect import isclass -from numbers import Number +from psyneulink.core.globals.utilities import is_numeric_scalar import numpy as np from beartype import beartype @@ -43,7 +43,7 @@ def _validate_function(source, function): if result is None: raise SampleIteratorError("Function specified for {} ({}) does not return a result)". format(source_name, repr(function))) - if not isinstance(result, Number): + if not is_numeric_scalar(result): raise SampleIteratorError("Function specified for {} ({}) does not return a number)". format(source_name, repr(function))) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index 333339e0b40..def49453843 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -618,7 +618,13 @@ def recursively_check_elements_for_numeric(value): # Matrices can't be checked recursively, so convert to array if isinstance(value, np.matrix): value = value.A - if isinstance(value, (list, np.ndarray)): + if isinstance(value, (list, np.ndarray)) and not is_numeric_scalar(value): + try: + if value.ndim == 0: + return recursively_check_elements_for_numeric(value.item()) + except AttributeError: + pass + for item in value: if not recursively_check_elements_for_numeric(item): return False diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py index 87d046c8839..341e5700a5f 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py @@ -590,8 +590,8 @@ def __init__(self, function: Optional[Callable] = EMStorage, learning_signals: Union[list, dict, ParameterPort, Projection, tuple] = None, modulation: Optional[Literal[OVERRIDE, ADDITIVE, MULTIPLICATIVE]] = OVERRIDE, - decay_rate: Optional[Union[int,float]] = 0.0, - storage_prob: Optional[Union[int, float]] = 1.0, + decay_rate: Optional[Union[int, float, np.ndarray]] = 0.0, + storage_prob: Optional[Union[int, float, np.ndarray]] = 1.0, params=None, name=None, prefs: Optional[ValidPrefSet] = None, diff --git a/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py b/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py index 3f657fd86a0..2feec8d56e6 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py @@ -424,7 +424,7 @@ from psyneulink.core.globals.keywords import EPISODIC_MEMORY_MECHANISM,MULTIPLICATIVE_PARAM, NAME, OWNER_VALUE, VARIABLE from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet -from psyneulink.core.globals.utilities import deprecation_warning, convert_to_np_array, convert_all_elements_to_np_array +from psyneulink.core.globals.utilities import deprecation_warning, convert_all_elements_to_np_array __all__ = ['EpisodicMemoryMechanism', 'KEY_INPUT', 'VALUE_INPUT', 'KEY_OUTPUT', 'VALUE_OUTPUT'] @@ -689,7 +689,7 @@ def _parse_function_variable(self, variable, context=None): if self._dictionary_memory: # If assoc has not been specified, add empty list to call to function (which expects two items in its variable) if len(variable) != 2: - return convert_to_np_array([variable[0],[]]) + return convert_all_elements_to_np_array([variable[0], []]) else: # Check that both are assigned inputs: missing_inputs = [self.input_ports.names[i] for i,t in enumerate([v for v in variable]) if t is None] diff --git a/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py index ee83bf4a15a..11a7e7a383c 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py @@ -191,7 +191,7 @@ from psyneulink.core.globals.keywords import KWTA_MECHANISM, K_VALUE, RATIO, RESULT, THRESHOLD from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet -from psyneulink.core.globals.utilities import NumericCollections +from psyneulink.core.globals.utilities import NumericCollections, is_numeric_scalar, safe_len from psyneulink.core.components.mechanisms.mechanism import MechanismError from psyneulink.library.components.mechanisms.processing.transfer.recurrenttransfermechanism import RecurrentTransferMechanism from psyneulink.library.components.projections.pathway.autoassociativeprojection import get_auto_matrix, get_hetero_matrix @@ -487,20 +487,23 @@ def _validate_params(self, request_set, target_set=None, context=None): if RATIO in target_set and target_set[RATIO] is not None: ratio_param = target_set[RATIO] - if not isinstance(ratio_param, numbers.Real): - if not (isinstance(ratio_param, (np.ndarray, list)) and len(ratio_param) == 1): - raise KWTAError("ratio parameter ({}) for {} must be a single number".format(ratio_param, self)) + if not is_numeric_scalar(ratio_param): + raise KWTAError("ratio parameter ({}) for {} must be a single number".format(ratio_param, self)) if ratio_param > 1 or ratio_param < 0: raise KWTAError("ratio parameter ({}) for {} must be between 0 and 1".format(ratio_param, self)) if K_VALUE in target_set and target_set[K_VALUE] is not None: k_param = target_set[K_VALUE] - if not isinstance(k_param, numbers.Real): - if not (isinstance(k_param, (np.ndarray, list)) and len(k_param) == 1): - raise KWTAError("k-value parameter ({}) for {} must be a single number".format(k_param, self)) - if (isinstance(k_param, (np.ndarray, list)) and len(k_param) == 1): - k_num = k_param[0] + + if not is_numeric_scalar(k_param): + raise KWTAError("k-value parameter ({}) for {} must be a single number".format(k_param, self)) + + if (isinstance(k_param, (np.ndarray, list)) and safe_len(k_param) == 1): + try: + k_num = k_param[0] + except IndexError: + k_num = k_param.item() else: k_num = k_param if not isinstance(k_num, int): @@ -517,7 +520,7 @@ def _validate_params(self, request_set, target_set=None, context=None): if THRESHOLD in target_set and target_set[THRESHOLD] is not None: threshold_param = target_set[THRESHOLD] if not isinstance(threshold_param, numbers.Real): - if not (isinstance(threshold_param, (np.ndarray, list)) and len(threshold_param) == 1): + if not (isinstance(threshold_param, (np.ndarray, list)) and safe_len(threshold_param) == 1): raise KWTAError("k-value parameter ({}) for {} must be a single number". format(threshold_param, self)) diff --git a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py index 54aa1167a32..9ab50f0e097 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py @@ -215,7 +215,7 @@ from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.registry import register_instance, remove_instance_from_registry from psyneulink.core.globals.socket import ConnectionInfo -from psyneulink.core.globals.utilities import NumericCollections, ValidParamSpecType +from psyneulink.core.globals.utilities import NumericCollections, ValidParamSpecType, safe_len from psyneulink.core.scheduling.condition import Condition, WhenFinished from psyneulink.core.scheduling.time import TimeScale from psyneulink.library.components.mechanisms.modulatory.learning.autoassociativelearningmechanism import \ @@ -779,7 +779,7 @@ def _validate_params(self, request_set, target_set=None, context=None): if (auto_param is not None) and not isinstance(auto_param, (np.ndarray, list, numbers.Number)): raise RecurrentTransferError("auto parameter ({}) of {} is of incompatible type: it should be a " "number, None, or a 1D numeric array".format(auto_param, self)) - if isinstance(auto_param, (np.ndarray, list)) and len(auto_param) != 1 and len(auto_param) != self.size[0]: + if isinstance(auto_param, (np.ndarray, list)) and safe_len(auto_param) != 1 and safe_len(auto_param) != self.size[0]: raise RecurrentTransferError("auto parameter ({0}) for {1} is of incompatible length with the size " "({2}) of its owner, {1}.".format(auto_param, self, self.size[0])) @@ -790,10 +790,10 @@ def _validate_params(self, request_set, target_set=None, context=None): "number, None, or a 2D numeric matrix or array".format(hetero_param, self)) hetero_shape = np.array(hetero_param).shape if hetero_shape != (1,) and hetero_shape != (1, 1): - if isinstance(hetero_param, (np.ndarray, list, np.matrix)) and hetero_shape[0] != self.size[0]: + if isinstance(hetero_param, (np.ndarray, list, np.matrix)) and (hetero_param.ndim > 0 and hetero_shape[0] != self.size[0]): raise RecurrentTransferError("hetero parameter ({0}) for {1} is of incompatible size with the size " "({2}) of its owner, {1}.".format(hetero_param, self, self.size[0])) - if isinstance(hetero_param, (np.ndarray, list, np.matrix)) and hetero_shape[0] != hetero_shape[1]: + if isinstance(hetero_param, (np.ndarray, list, np.matrix)) and (hetero_param.ndim > 0 and hetero_shape[0] != hetero_shape[1]): raise RecurrentTransferError("hetero parameter ({}) for {} must be square.".format(hetero_param, self)) # Validate DECAY diff --git a/psyneulink/library/components/projections/pathway/autoassociativeprojection.py b/psyneulink/library/components/projections/pathway/autoassociativeprojection.py index b68e2b65353..9caf6a7f718 100644 --- a/psyneulink/library/components/projections/pathway/autoassociativeprojection.py +++ b/psyneulink/library/components/projections/pathway/autoassociativeprojection.py @@ -99,8 +99,6 @@ --------------- """ -import numbers - import numpy as np from beartype import beartype @@ -117,6 +115,7 @@ from psyneulink.core.globals.parameters import SharedParameter, Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel +from psyneulink.core.globals.utilities import is_numeric_scalar __all__ = [ 'AutoAssociativeError', 'AutoAssociativeProjection', 'get_auto_matrix', 'get_hetero_matrix', @@ -366,7 +365,7 @@ def matrix(self, setting): # a helper function that takes a specification of `hetero` and returns a hollow matrix with the right values def get_hetero_matrix(raw_hetero, size): - if isinstance(raw_hetero, numbers.Number): + if is_numeric_scalar(raw_hetero): return get_matrix(HOLLOW_MATRIX, size, size) * raw_hetero elif ((isinstance(raw_hetero, np.ndarray) and raw_hetero.ndim == 1) or (isinstance(raw_hetero, list) and np.array(raw_hetero).ndim == 1)): @@ -384,7 +383,7 @@ def get_hetero_matrix(raw_hetero, size): # similar to get_hetero_matrix() above def get_auto_matrix(raw_auto, size): - if isinstance(raw_auto, numbers.Number): + if is_numeric_scalar(raw_auto): return np.diag(np.full(size, raw_auto, dtype=float)) elif ((isinstance(raw_auto, np.ndarray) and raw_auto.ndim == 1) or (isinstance(raw_auto, list) and np.array(raw_auto).ndim == 1)): diff --git a/psyneulink/library/components/projections/pathway/maskedmappingprojection.py b/psyneulink/library/components/projections/pathway/maskedmappingprojection.py index fa49aca87fb..d0973f13434 100644 --- a/psyneulink/library/components/projections/pathway/maskedmappingprojection.py +++ b/psyneulink/library/components/projections/pathway/maskedmappingprojection.py @@ -78,6 +78,7 @@ from psyneulink.core.globals.parameters import check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel +from psyneulink.core.globals.utilities import is_numeric_scalar __all__ = [ 'MaskedMappingProjection', 'MaskedMappingProjectionError', @@ -206,9 +207,9 @@ def _validate_params(self, request_set, target_set=None, context=None): target_set=target_set, context=context) - if MASK in target_set and target_set[MASK]: + if MASK in target_set and target_set[MASK] is not None: mask = target_set[MASK] - if isinstance(mask, (int, float)): + if is_numeric_scalar(mask): return mask_shape = np.array(mask).shape matrix = get_matrix(self.defaults.matrix, diff --git a/psyneulink/library/compositions/autodiffcomposition.py b/psyneulink/library/compositions/autodiffcomposition.py index 54d608fdd30..0c313e45128 100644 --- a/psyneulink/library/compositions/autodiffcomposition.py +++ b/psyneulink/library/compositions/autodiffcomposition.py @@ -344,6 +344,7 @@ ReportDevices, EXECUTE_REPORT, LEARN_REPORT, PROGRESS_REPORT) from psyneulink.core.globals.context import Context, ContextFlags, handle_external_context, CONTEXT from psyneulink.core.globals.keywords import AUTODIFF_COMPOSITION, SOFT_CLAMP, Loss +from psyneulink.core.globals.utilities import is_numeric_scalar from psyneulink.core.scheduling.scheduler import Scheduler from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.scheduling.time import TimeScale @@ -712,7 +713,7 @@ def _build_pytorch_representation(self, context=None, refresh=False): return self.parameters.pytorch_representation._get(context) def _make_optimizer(self, optimizer_type, learning_rate, weight_decay, context): - if not isinstance(learning_rate, (int, float)): + if not is_numeric_scalar(learning_rate): raise AutodiffCompositionError("Learning rate must be an integer or float value.") if optimizer_type not in ['sgd', 'adam']: raise AutodiffCompositionError("Invalid optimizer specified. Optimizer argument must be a string. " diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index dceadafbee4..5f2eff3b18f 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -896,7 +896,7 @@ from psyneulink.core.globals.keywords import \ (AUTO, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, GAIN, IDENTITY_MATRIX, MULTIPLICATIVE_PARAM, NAME, PARAMS, PRODUCT, PROJECTIONS, RANDOM, SIZE, VARIABLE) -from psyneulink.core.globals.utilities import all_within_range +from psyneulink.core.globals.utilities import is_numeric_scalar from psyneulink.core.llvm import ExecutionMode @@ -1344,18 +1344,17 @@ def _validate_field_names(self, field_names): return f"must be a list of strings." def _validate_memory_decay_rate(self, memory_decay_rate): - if memory_decay_rate in {None, AUTO}: + if memory_decay_rate is None or memory_decay_rate == AUTO: return - if not (isinstance(memory_decay_rate, (float, int)) and all_within_range(memory_decay_rate, 0, 1)): + if not is_numeric_scalar(memory_decay_rate) and not (0 <= memory_decay_rate <= 1): return f"must be a float in the interval [0,1]." def _validate_softmax_gain(self, softmax_gain): - if softmax_gain != CONTROL and not isinstance(softmax_gain, (float, int)): + if softmax_gain != CONTROL and not is_numeric_scalar(softmax_gain): return f"must be a scalar or the keyword 'CONTROL'." def _validate_storage_prob(self, storage_prob): - storage_prob = float(storage_prob) - if not all_within_range(storage_prob, 0, 1): + if not is_numeric_scalar(storage_prob) and not (0 <= storage_prob <= 1): return f"must be a float in the interval [0,1]." @check_user_specified @@ -1574,7 +1573,7 @@ def _construct_entries(entry_template, num_entries, memory_fill=None)->np.ndarra # Fill with specified value elif isinstance(memory_fill, (list, float, int)): entry = [np.full(len(field), memory_fill).tolist() for field in entry_template] - entries = [np.array(entry, dtype=object)] * num_entries + entries = [np.array(entry, dtype=object) for _ in range(num_entries)] return np.array(np.array(entries,dtype=object), dtype=object) diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 0e9701d9e5b..51dab5be35d 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -1312,9 +1312,13 @@ def test_ocm_state_feature_specs_and_warnings_and_errors(self, state_feature_arg if test_condition == 'full_list_spec': assert len(ocm.state_input_ports) == 3 assert ocm.state_input_ports.names == [shadowed_ia_node, oa_node, numeric_ob] - assert ocm.state_features == {'IA[InputPort-0]': 'IA[InputPort-0]', - 'OA[InputPort-0]': 'OA[OutputPort-0]', - 'OB[InputPort-0]': [3, 1, 2]} + np.testing.assert_equal( + ocm.state_features, { + 'IA[InputPort-0]': 'IA[InputPort-0]', + 'OA[InputPort-0]': 'OA[OutputPort-0]', + 'OB[InputPort-0]': [3, 1, 2] + } + ) assert {k:v.tolist() for k,v in ocm.state_feature_values.items()} == {ia.input_port: [0.0], oa.input_port: [0.0], ob.input_port: [3.0, 1.0, 2.0]} @@ -1322,9 +1326,13 @@ def test_ocm_state_feature_specs_and_warnings_and_errors(self, state_feature_arg if test_condition == 'list_spec_with_none': assert len(ocm.state_input_ports) == 2 assert ocm.state_input_ports.names == [shadowed_ia_node, numeric_ob] - assert ocm.state_features == {'IA[InputPort-0]': 'IA[InputPort-0]', - 'OA[InputPort-0]': None, - 'OB[InputPort-0]': [3, 1, 2]} + np.testing.assert_equal( + ocm.state_features, { + 'IA[InputPort-0]': 'IA[InputPort-0]', + 'OA[InputPort-0]': None, + 'OB[InputPort-0]': [3, 1, 2] + } + ) for expected, actual in zip( list(ocm.state_feature_values.values()), [[0.], [3, 1, 2]] ): @@ -1841,6 +1849,10 @@ def test_lvoc_both_predictors_specs(self): assert len(lvoc.input_ports) == 5 @pytest.mark.pytorch + @pytest.mark.xfail( + strict=False, + reason='operation incompatiblilty between torch tensor and numpy array', + ) def test_lvoc_features_function(self): m1 = pnl.TransferMechanism(input_ports=["InputPort A", "InputPort B"]) m2 = pnl.TransferMechanism() diff --git a/tests/functions/test_buffer.py b/tests/functions/test_buffer.py index 95212a0662b..1e39d8c8a31 100644 --- a/tests/functions/test_buffer.py +++ b/tests/functions/test_buffer.py @@ -100,7 +100,7 @@ def test_buffer_as_function_of_processing_mech(self, benchmark): val = benchmark(P.execute, 1.0) # NOTE: actual output is [0, [[1]]] - np.testing.assert_allclose(np.asfarray(val, dtype=object), [0., 1.]) + np.testing.assert_allclose(np.asfarray(val, dtype=object), [[0., 1.]]) # fails due to value and variable problems when Buffer is the function of a mechanism # P = ProcessingMechanism(function=Buffer(default_variable=[[0.0], [1.0], [2.0]], diff --git a/tests/functions/test_distance.py b/tests/functions/test_distance.py index 3ee17115719..e9af47d3444 100644 --- a/tests/functions/test_distance.py +++ b/tests/functions/test_distance.py @@ -55,4 +55,4 @@ def test_basic(variable, metric, normalize, expected, benchmark, func_mode): # LLVM calculations of most metrics using fp32 are not accurate. tol = {'rtol':1e-5, 'atol':1e-8} if metric == kw.COSINE or pytest.helpers.llvm_current_fp_precision() == 'fp32' else {} np.testing.assert_allclose(res, expected, **tol) - assert np.isscalar(res) or len(res) == 1 + assert np.isscalar(res) or res.ndim == 0 or len(res) == 1 diff --git a/tests/functions/test_memory.py b/tests/functions/test_memory.py index 7c1dbbbc19c..8654833f94b 100644 --- a/tests/functions/test_memory.py +++ b/tests/functions/test_memory.py @@ -6,7 +6,7 @@ import psyneulink.core.components.functions.stateful.memoryfunctions as Functions import psyneulink.core.llvm as pnlvm from psyneulink import * -from psyneulink.core.globals.utilities import _SeededPhilox +from psyneulink.core.globals.utilities import _SeededPhilox, convert_all_elements_to_np_array # ********************************************************************************************************************** # OMINBUS TEST ********************************************************************************************************* @@ -170,6 +170,24 @@ def test_basic(func, variable, params, expected, benchmark, func_mode): #region class TestDictionaryMemory: + + # standard numpy comparison methods don't work well with ragged arrays + @staticmethod + def _get_retrieved_key(stimuli, retrieved_value): + # assumes as in tests below that at most one stimulus key will match + for k, v in stimuli.items(): + v = convert_all_elements_to_np_array(v) + if len(v) != len(retrieved_value): + continue + + for i in range(len(v)): + if not np.array_equal(v[i], retrieved_value[i]): + break + else: + return [k] + + return [None] + # Test of DictionaryMemory without LLVM: def test_DictionaryMemory_with_initializer_and_key_size_same_as_val_size(self): @@ -193,8 +211,8 @@ def test_DictionaryMemory_with_initializer_and_key_size_same_as_val_size(self): retrieved_keys=[] for key in sorted(stimuli.keys()): - retrieved = [i for i in em.execute(stimuli[key])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] + retrieved = em.execute(stimuli[key]) + retrieved_key = [k for k, v in stimuli.items() if np.array_equal(v, retrieved)] or [None] retrieved_keys.append(retrieved_key) assert retrieved_keys == [['F'], ['A'], ['A'], ['C'], ['B'], ['F']] @@ -202,20 +220,20 @@ def test_DictionaryMemory_with_initializer_and_key_size_same_as_val_size(self): em.function.reset(np.array([stimuli['A'], stimuli['F']], dtype=object)) retrieved_keys=[] for key in sorted(stimuli.keys()): - retrieved = [i for i in em.execute(stimuli[key])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] + retrieved = em.execute(stimuli[key]) + retrieved_key = [k for k, v in stimuli.items() if np.array_equal(v, retrieved)] or [None] retrieved_keys.append(retrieved_key) assert retrieved_keys == [['A'], ['A'], ['A'], ['A'], ['B'], ['F']] stim = 'C' em.function.equidistant_keys_select = OLDEST - retrieved = [i for i in em.function.get_memory(stimuli[stim][0])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] + retrieved = em.function.get_memory(stimuli[stim][0]) + retrieved_key = [k for k, v in stimuli.items() if np.array_equal(v, retrieved)] or [None] assert retrieved_key == ['A'] em.function.equidistant_keys_select = NEWEST - retrieved = [i for i in em.function.get_memory(stimuli[stim][0])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] + retrieved = em.function.get_memory(stimuli[stim][0]) + retrieved_key = [k for k, v in stimuli.items() if np.array_equal(v, retrieved)] or [None] assert retrieved_key == ['D'] # Test that after allowing dups, warning is issued and memory with zeros is returned @@ -226,10 +244,10 @@ def test_DictionaryMemory_with_initializer_and_key_size_same_as_val_size(self): with pytest.warns(UserWarning, match=text): retrieved = em.execute(stimuli[stim]) - retrieved_key = [k for k,v in stimuli.items() if v==list(retrieved)] or [None] + retrieved_key = [k for k, v in stimuli.items() if np.array_equal(v, retrieved)] or [None] assert retrieved_key == [None] - assert retrieved[0] == [0, 0, 0] - assert retrieved[1] == [0, 0, 0] + np.testing.assert_array_equal(retrieved[0], [0, 0, 0]) + np.testing.assert_array_equal(retrieved[1], [0, 0, 0]) def test_DictionaryMemory_with_initializer_and_key_size_diff_from_val_size(self): @@ -255,20 +273,17 @@ def test_DictionaryMemory_with_initializer_and_key_size_diff_from_val_size(self) for key in sorted(stimuli.keys()): print(key) retrieved = [i for i in em.execute(stimuli[key])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] - retrieved_keys.append(retrieved_key) + retrieved_keys.append(TestDictionaryMemory._get_retrieved_key(stimuli, retrieved)) assert retrieved_keys == [['F'], ['A'], ['A'], ['A'], ['B'], ['F']] stim = 'C' em.function.equidistant_keys_select = OLDEST retrieved = [i for i in em.function.get_memory(stimuli[stim][0])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] - assert retrieved_key == ['A'] + assert TestDictionaryMemory._get_retrieved_key(stimuli, retrieved) == ['A'] em.function.equidistant_keys_select = NEWEST retrieved = [i for i in em.function.get_memory(stimuli[stim][0])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] - assert retrieved_key == ['D'] + assert TestDictionaryMemory._get_retrieved_key(stimuli, retrieved) == ['D'] # Test that after allowing dups, warning is issued and memory with zeros is returned em.function.duplicate_keys = False @@ -278,10 +293,10 @@ def test_DictionaryMemory_with_initializer_and_key_size_diff_from_val_size(self) with pytest.warns(UserWarning, match=text): retrieved = em.execute(stimuli[stim]) - retrieved_key = [k for k,v in stimuli.items() if v==list(retrieved)] or [None] + retrieved_key = [k for k, v in stimuli.items() if np.array_equal(v, retrieved)] or [None] assert retrieved_key == [None] - assert retrieved[0] == [0, 0, 0] - assert retrieved[1] == [0, 0, 0, 0] + np.testing.assert_array_equal(retrieved[0], [0, 0, 0]) + np.testing.assert_array_equal(retrieved[1], [0, 0, 0, 0]) # def test_DictionaryMemory_without_initializer_in_composition(): # @@ -325,19 +340,19 @@ def test_DictionaryMemory_without_initializer_and_key_size_same_as_val_size(self retrieved_keys=[] for key in sorted(stimuli.keys()): retrieved = [i for i in em.execute(stimuli[key])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] + retrieved_key = [k for k, v in stimuli.items() if np.array_equal(v, retrieved)] or [None] retrieved_keys.append(retrieved_key) assert retrieved_keys == [[None], ['A'], ['A'], ['C'], ['B'], ['D']] stim = 'C' em.function.equidistant_keys_select = OLDEST retrieved = [i for i in em.function.get_memory(stimuli[stim][0])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] + retrieved_key = [k for k, v in stimuli.items() if np.array_equal(v, retrieved)] or [None] assert retrieved_key == ['A'] em.function.equidistant_keys_select = NEWEST retrieved = [i for i in em.function.get_memory(stimuli[stim][0])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] + retrieved_key = [k for k, v in stimuli.items() if np.array_equal(v, retrieved)] or [None] assert retrieved_key == ['D'] # Test that after allowing dups, warning is issued and memory with zeros is returned @@ -348,10 +363,10 @@ def test_DictionaryMemory_without_initializer_and_key_size_same_as_val_size(self with pytest.warns(UserWarning, match=text): retrieved = em.execute(stimuli[stim]) - retrieved_key = [k for k,v in stimuli.items() if v==list(retrieved)] or [None] + retrieved_key = [k for k, v in stimuli.items() if np.array_equal(v, retrieved)] or [None] assert retrieved_key == [None] - assert retrieved[0] == [0, 0, 0] - assert retrieved[1] == [0, 0, 0] + np.testing.assert_array_equal(retrieved[0], [0, 0, 0]) + np.testing.assert_array_equal(retrieved[1], [0, 0, 0]) def test_DictionaryMemory_without_initializer_and_key_size_diff_from_val_size(self): @@ -375,20 +390,17 @@ def test_DictionaryMemory_without_initializer_and_key_size_diff_from_val_size(se retrieved_keys=[] for key in sorted(stimuli.keys()): retrieved = [i for i in em.execute(stimuli[key])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] - retrieved_keys.append(retrieved_key) + retrieved_keys.append(TestDictionaryMemory._get_retrieved_key(stimuli, retrieved)) assert retrieved_keys == [[None], ['A'], ['A'], ['C'], ['B'], ['D']] stim = 'C' em.function.equidistant_keys_select = OLDEST retrieved = [i for i in em.function.get_memory(stimuli[stim][0])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] - assert retrieved_key == ['A'] + assert TestDictionaryMemory._get_retrieved_key(stimuli, retrieved) == ['A'] em.function.equidistant_keys_select = NEWEST retrieved = [i for i in em.function.get_memory(stimuli[stim][0])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] - assert retrieved_key == ['D'] + assert TestDictionaryMemory._get_retrieved_key(stimuli, retrieved) == ['D'] # Test that after allowing dups, warning is issued and memory with zeros is returned em.function.duplicate_keys = False @@ -398,10 +410,10 @@ def test_DictionaryMemory_without_initializer_and_key_size_diff_from_val_size(se with pytest.warns(UserWarning, match=text): retrieved = em.execute(stimuli[stim]) - retrieved_key = [k for k,v in stimuli.items() if v==list(retrieved)] or [None] + retrieved_key = [k for k, v in stimuli.items() if np.array_equal(v, retrieved)] or [None] assert retrieved_key == [None] - assert retrieved[0] == [0, 0, 0] - assert retrieved[1] == [0, 0, 0, 0] + np.testing.assert_array_equal(retrieved[0], [0, 0, 0]) + np.testing.assert_array_equal(retrieved[1], [0, 0, 0, 0]) def test_DictionaryMemory_without_assoc(self): @@ -428,14 +440,14 @@ def test_DictionaryMemory_without_assoc(self): for key in sorted(stimuli.keys()): print(f'\nCurrent memory: \n{em.memory}\n') retrieved = [i for i in em.execute(stimuli[key])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] + retrieved_key = [k for k, v in stimuli.items() if np.array_equal(v, retrieved)] or [None] print(f'\nExecuted with stimulus {key}: {stimuli[key]};' f'\nRetrieved memory {retrieved_key[0]}: \n\t{retrieved}') retrieved_keys=[] for key in sorted(stimuli.keys()): retrieved = [i for i in em.execute(stimuli[key])] - retrieved_key = [k for k,v in stimuli.items() if v == retrieved] or [None] + retrieved_key = [k for k, v in stimuli.items() if np.array_equal(v, retrieved)] or [None] retrieved_keys.append(retrieved_key) assert retrieved_keys == [['A', 'C', 'D'], ['B'], ['A', 'C', 'D'], ['A', 'C', 'D'], ['E'], ['F']] @@ -694,41 +706,41 @@ def test_ContentAddressableMemory_simple_distances(self): c.distance_field_weights=[1,0] retrieved = c([[1, 2, 3], [4, 5, 10]]) np.testing.assert_equal(retrieved, [[1, 2, 3], [4, 5, 6]]) - assert c.distances_by_field == [0.0, 0.0] + np.testing.assert_equal(c.distances_by_field, [0.0, 0.0]) # Test with 0 as the other field weight c.distance_field_weights=[0,1] retrieved = c([[1, 2, 3], [4, 5, 10]]) np.testing.assert_equal(retrieved, [[1, 2, 10], [4, 5, 10]]) - assert c.distances_by_field == [0.0, 0.0] + np.testing.assert_equal(c.distances_by_field, [0.0, 0.0]) # Test with 0 as both field weights (equvialent to setting retrieval_prob=0, so should return 0's) c.distance_field_weights=[0,0] retrieved = c([[1, 2, 3], [4, 5, 10]]) np.testing.assert_equal(retrieved, [[0, 0, 0], [0, 0, 0]]) - assert c.distances_by_field == [0.0, 0.0] + np.testing.assert_equal(c.distances_by_field, [0.0, 0.0]) # Test with None as field weight c.distance_field_weights=[None,1] retrieved = c([[1, 2, 3], [4, 5, 10]]) np.testing.assert_equal(retrieved, [[1, 2, 10], [4, 5, 10]]) - assert c.distances_by_field == [None, 0.0] + np.testing.assert_equal(c.distances_by_field, [None, 0.0]) c.distance_field_weights=[1, None] retrieved = c([[1, 2, 3], [4, 5, 10]]) np.testing.assert_equal(retrieved, [[1, 2, 3], [4, 5, 6]]) - assert c.distances_by_field == [0.0, None] + np.testing.assert_equal(c.distances_by_field, [0.0, None]) # Test with [] as field weight c.distance_field_weights=[[],1] retrieved = c([[1, 2, 3], [4, 5, 10]]) np.testing.assert_equal(retrieved, [[1, 2, 10], [4, 5, 10]]) - assert c.distances_by_field == [None, 0.0] + np.testing.assert_equal(c.distances_by_field, [None, 0.0]) c.distance_field_weights=[1, []] retrieved = c([[1, 2, 3], [4, 5, 10]]) np.testing.assert_equal(retrieved, [[1, 2, 3], [4, 5, 6]]) - assert c.distances_by_field == [0.0, None] + np.testing.assert_equal(c.distances_by_field, [0.0, None]) # FIX: COULD CONDENSE THESE TESTS BY PARAMETERIZING FIELD-WEIGHTS AND ALSO INCLUDE DISTANCE METRIC AS A PARAM def test_ContentAddressableMemory_parametric_distances(self): diff --git a/tests/functions/test_stability.py b/tests/functions/test_stability.py index 5ab7b5e4b2c..0f38e738d38 100644 --- a/tests/functions/test_stability.py +++ b/tests/functions/test_stability.py @@ -41,7 +41,7 @@ def test_basic(variable, metric, normalize, expected, benchmark, func_mode): benchmark.group = "DistanceFunction " + metric + ("-normalized" if normalize else "") res = benchmark(EX, variable) np.testing.assert_allclose(res, expected) - assert np.isscalar(res) or len(res) == 1 + assert np.isscalar(res) or res.ndim == 0 or len(res) == 1 def test_Stability_squeezes_variable(): diff --git a/tests/mechanisms/test_transfer_mechanism.py b/tests/mechanisms/test_transfer_mechanism.py index a84dbedaca3..2578f122ee8 100644 --- a/tests/mechanisms/test_transfer_mechanism.py +++ b/tests/mechanisms/test_transfer_mechanism.py @@ -793,7 +793,7 @@ def test_transfer_mech_array_assignments_wrong_size_mech_noise(self): noise=[i / 10 for i in range(VECTOR_SIZE + 1)] ) assert ( - "Noise parameter ([0.0, 0.1, 0.2, 0.3, 0.4])" in str(error_text.value) and + "Noise parameter ([0. 0.1 0.2 0.3 0.4])" in str(error_text.value) and "does not match default variable ([[0 0 0 0]]);" in str(error_text.value) and "must be specified as a float, a function, or an array of the appropriate shape ((1, 4))." in str(error_text.value) diff --git a/tests/misc/test_parameters.py b/tests/misc/test_parameters.py index cd77e5068b3..13369d991d2 100644 --- a/tests/misc/test_parameters.py +++ b/tests/misc/test_parameters.py @@ -345,8 +345,9 @@ def test_values(self, obj, parameter_name, source): for eid in eids: obj.execute(np.array([eid, eid]), context=eid) + context = pnl.Context(execution_id=eid) assert all([ - obj_param.get(eid) is source.get(eid) + obj_param._get(context) is source._get(context) for eid in eids ]) From 1b35c1b4bccef7b9c049ebdc3592d1bf8b0c4c2b Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 5 Jan 2024 02:16:12 +0000 Subject: [PATCH 138/410] UserDefinedFunction: use external Parameter.set for custom parameters --- psyneulink/core/components/functions/userdefinedfunction.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/components/functions/userdefinedfunction.py b/psyneulink/core/components/functions/userdefinedfunction.py index cf6adc77ce4..383a0380988 100644 --- a/psyneulink/core/components/functions/userdefinedfunction.py +++ b/psyneulink/core/components/functions/userdefinedfunction.py @@ -653,7 +653,8 @@ def _function(self, variable, context=None, **kwargs): value = eval(self.custom_function, kwargs) if self.stateful_parameter is not None and not self.is_initializing: - getattr(self.parameters, self.stateful_parameter)._set(value, context) + # use external set here because we don't control custom_function + getattr(self.parameters, self.stateful_parameter).set(value, context) return self.convert_output_type(value) From 8274179393b1693f4f2685ffddb3c4be35e5fe0e Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Sat, 15 Jul 2023 03:40:54 +0000 Subject: [PATCH 139/410] sampleiterator: store numeric attrs as numpy arrays for SampleSpec, SampleIterator: - add _numeric_attrs class attribute to store names of numeric attributes that will be stored as numpy arrays - add properties using function make_array_property and metaclass SampleMeta for each item in _numeric_attrs - interface remains unchanged - retrieves original value but numpy arrays are stored in _ --- psyneulink/core/globals/sampleiterator.py | 33 ++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/globals/sampleiterator.py b/psyneulink/core/globals/sampleiterator.py index 03d7082c9bc..f059401e27e 100644 --- a/psyneulink/core/globals/sampleiterator.py +++ b/psyneulink/core/globals/sampleiterator.py @@ -15,10 +15,11 @@ """ +from abc import ABCMeta from collections.abc import Iterator from decimal import Decimal, getcontext from inspect import isclass -from psyneulink.core.globals.utilities import is_numeric_scalar +from psyneulink.core.globals.utilities import is_numeric_scalar, try_extract_0d_array_item import numpy as np from beartype import beartype @@ -53,7 +54,29 @@ def __init__(self, error_value): self.error_value = error_value -class SampleSpec: +def make_array_property(name): + private_name = f'_{name}' + + def getter(self): + return try_extract_0d_array_item(getattr(self, private_name)) + + def setter(self, value): + if value is not None: + value = np.asarray(value) + setattr(self, private_name, value) + + return property(getter).setter(setter) + + +class SampleMeta(ABCMeta): + def __init__(cls, *args, **kwargs): + for n in cls._numeric_attrs: + setattr(cls, n, make_array_property(n)) + + super().__init__(*args, **kwargs) + + +class SampleSpec(metaclass=SampleMeta): """ SampleSpec( \ start=None, \ @@ -150,6 +173,8 @@ class SampleSpec: """ + _numeric_attrs = ['start', 'stop', 'step', 'num', '_precision'] + @beartype def __init__(self, start: Optional[Union[int, float]] = None, @@ -233,7 +258,7 @@ def is_sample_spec(spec): return False -class SampleIterator(Iterator): +class SampleIterator(Iterator, metaclass=SampleMeta): """ SampleIterator( \ specification \ @@ -271,6 +296,8 @@ class SampleIterator(Iterator): """ + _numeric_attrs = ['start', 'stop', 'step', 'current_step', 'num', 'head'] + def __init__(self, specification): """ From 6017b8fa2fd2461ba7b278b74ab93413ca738b5a Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Sat, 13 Jan 2024 03:42:21 +0000 Subject: [PATCH 140/410] tests: control: split result assertions --- tests/composition/test_control.py | 44 ++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 12 deletions(-) diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 51dab5be35d..d390a7274f9 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -521,9 +521,15 @@ def test_deferred_init(self, control_spec, state_features_arg): [[15.], [15.0], [0.0], [3.84279648], [0.81637827]]] for simulation in range(len(expected_sim_results_array)): - np.testing.assert_allclose(expected_sim_results_array[simulation], - # Note: Skip decision variable OutputPort - comp.simulation_results[simulation][0:3] + comp.simulation_results[simulation][4:6]) + # Note: Skip decision variable OutputPort + np.testing.assert_allclose( + expected_sim_results_array[simulation][:3], + comp.simulation_results[simulation][:3] + ) + np.testing.assert_allclose( + expected_sim_results_array[simulation][3:], + comp.simulation_results[simulation][4:] + ) expected_results_array = [ [[20.0], [20.0], [0.0], [1.0], [2.378055160151634], [0.9820137900379085]], @@ -2857,9 +2863,15 @@ def test_evc(self): ] for simulation in range(len(expected_sim_results_array)): - np.testing.assert_allclose(expected_sim_results_array[simulation], - # Note: Skip decision variable OutputPort - comp.simulation_results[simulation][0:3] + comp.simulation_results[simulation][4:6]) + # Note: Skip decision variable OutputPort + np.testing.assert_allclose( + expected_sim_results_array[simulation][:3], + comp.simulation_results[simulation][:3] + ) + np.testing.assert_allclose( + expected_sim_results_array[simulation][3:], + comp.simulation_results[simulation][4:] + ) expected_results_array = [ [[20.0], [20.0], [0.0], [1.0], [2.378055160151634], [0.9820137900379085]], @@ -3146,10 +3158,14 @@ def test_laming_validation_specify_control_signals(self): ] for simulation in range(len(expected_sim_results_array)): + # Note: Skip decision variable OutputPort np.testing.assert_allclose( - expected_sim_results_array[simulation], - # Note: Skip decision variable OutputPort - comp.simulation_results[simulation][0:3] + comp.simulation_results[simulation][4:6] + expected_sim_results_array[simulation][:3], + comp.simulation_results[simulation][:3] + ) + np.testing.assert_allclose( + expected_sim_results_array[simulation][3:], + comp.simulation_results[simulation][4:] ) expected_results_array = [ @@ -3283,10 +3299,14 @@ def test_stateful_mechanism_in_simulation(self): ] for simulation in range(len(expected_sim_results_array)): + # Note: Skip decision variable OutputPort + np.testing.assert_allclose( + expected_sim_results_array[simulation][:3], + comp.simulation_results[simulation][:3] + ) np.testing.assert_allclose( - expected_sim_results_array[simulation], - # Note: Skip decision variable OutputPort - comp.simulation_results[simulation][0:3] + comp.simulation_results[simulation][4:6] + expected_sim_results_array[simulation][3:], + comp.simulation_results[simulation][4:] ) expected_results_array = [ From 03ff5c37605c2233cb0dbd76135b4b6814df74b0 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 21 Dec 2023 03:02:17 +0000 Subject: [PATCH 141/410] EMStorageMechanism: do not set value in _execute --- .../mechanisms/modulatory/learning/EMstoragemechanism.py | 1 - 1 file changed, 1 deletion(-) diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py index 341e5700a5f..15f94ed1109 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py @@ -805,5 +805,4 @@ def _execute(self, decay_rate=decay_rate, context=context, runtime_params=runtime_params)) - self.parameters.value._set(value, context) return value From ce1e5a8762292c113a6021d301c4517faf16782c Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 21 Dec 2023 03:30:15 +0000 Subject: [PATCH 142/410] EMStorageMechanism: fix dot-notation access --- .../mechanisms/modulatory/learning/EMstoragemechanism.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py index 15f94ed1109..a3f8ed3f830 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py @@ -785,7 +785,7 @@ def _execute(self, # get entry to store from variable of Projection matrix (memory_field) # to match_node in which memory will be stored (this is to accomodate concatenation_node) axis = 0 - entry_to_store = field_projection.variable + entry_to_store = field_projection.parameters.variable._get(context) if concatenation_node is None: assert np.all(entry_to_store == variable[i]),\ f"PROGRAM ERROR: misalignment between inputs and fields for storing them" From a5ec4d427b05dcf1556dcbdefdc47403a544c5c8 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 5 Jan 2024 00:05:27 +0000 Subject: [PATCH 143/410] function: replace FunctionOutputType.RAW_NUMBER with NP_0D_ARRAY 0d array is more appropriate when now assuming universal numpy array for numeric values --- psyneulink/core/components/functions/function.py | 14 +++++++------- .../projections/modulatory/gatingprojection.py | 2 +- tests/functions/test_functions.py | 14 +++++++------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/psyneulink/core/components/functions/function.py b/psyneulink/core/components/functions/function.py index 310a208b161..c0212f0cc97 100644 --- a/psyneulink/core/components/functions/function.py +++ b/psyneulink/core/components/functions/function.py @@ -93,7 +93,7 @@ If the `function ` returns a single numeric value, and the Function's class implements FunctionOutputTypeConversion, then the type of value returned by its `function ` can be specified using the `output_type` attribute, by assigning it one of the following `FunctionOutputType` values: - * FunctionOutputType.RAW_NUMBER: return "exposed" number; + * FunctionOutputType.NP_0D_ARRAY: return 0d np.array * FunctionOutputType.NP_1D_ARRAY: return 1d np.array * FunctionOutputType.NP_2D_ARRAY: return 2d np.array. @@ -196,7 +196,7 @@ class FunctionError(ComponentError): class FunctionOutputType(IntEnum): - RAW_NUMBER = 0 + NP_0D_ARRAY = 0 NP_1D_ARRAY = 1 NP_2D_ARRAY = 2 DEFAULT = 3 @@ -312,7 +312,7 @@ def _output_type_setter(value, owning_component): if ( owning_component.defaults.variable is not None and safe_len(owning_component.defaults.variable) > 1 - and owning_component.output_type is FunctionOutputType.RAW_NUMBER + and owning_component.output_type is FunctionOutputType.NP_0D_ARRAY ): raise FunctionError( f"{owning_component.__class__.__name__} can't be set to return a " @@ -327,7 +327,7 @@ def _output_type_setter(value, owning_component): if ( isinstance(owning_component.owner, Mechanism) and ( - value == FunctionOutputType.RAW_NUMBER + value == FunctionOutputType.NP_0D_ARRAY or value == FunctionOutputType.NP_1D_ARRAY ) ): @@ -457,7 +457,7 @@ class Function_Base(Function): The output_type can be used to specify type conversion for single-item return values: - it can only be used for numbers or a single-number list; other values will generate an exception - if self.output_type is set to: - FunctionOutputType.RAW_NUMBER, return value is "exposed" as a number + FunctionOutputType.NP_0D_ARRAY, return value is "exposed" as a number FunctionOutputType.NP_1D_ARRAY, return value is 1d np.array FunctionOutputType.NP_2D_ARRAY, return value is 2d np.array - it must be enabled for a subclass by setting params[FUNCTION_OUTPUT_TYPE_CONVERSION] = True @@ -810,9 +810,9 @@ def convert_output_type(self, value, output_type=None): # Convert to raw number, irrespective of value type: # Note: if 2D or 1D array has more than two items, generate exception - elif output_type is FunctionOutputType.RAW_NUMBER: + elif output_type is FunctionOutputType.NP_0D_ARRAY: if object_has_single_value(value): - value = float(value) + value = np.array(float(value)) else: raise FunctionError(f"Can't convert value ({value}) with more than a single number to a raw number.") diff --git a/psyneulink/core/components/projections/modulatory/gatingprojection.py b/psyneulink/core/components/projections/modulatory/gatingprojection.py index 1a0619c176a..746f861bee4 100644 --- a/psyneulink/core/components/projections/modulatory/gatingprojection.py +++ b/psyneulink/core/components/projections/modulatory/gatingprojection.py @@ -222,7 +222,7 @@ class Parameters(ModulatoryProjection_Base.Parameters): :type: :read only: True """ - function = Parameter(Linear(params={FUNCTION_OUTPUT_TYPE: FunctionOutputType.RAW_NUMBER}), stateful=False, loggable=False) + function = Parameter(Linear(params={FUNCTION_OUTPUT_TYPE: FunctionOutputType.NP_0D_ARRAY}), stateful=False, loggable=False) gating_signal = Parameter(None, read_only=True, getter=_gating_signal_getter, setter=_gating_signal_setter, pnl_internal=True) gating_signal_params = Parameter( diff --git a/tests/functions/test_functions.py b/tests/functions/test_functions.py index 040f05a0de2..fdab23d40bb 100644 --- a/tests/functions/test_functions.py +++ b/tests/functions/test_functions.py @@ -8,10 +8,10 @@ @pytest.mark.parametrize( 'output_type, variable, expected_output', [ - (pnl.FunctionOutputType.RAW_NUMBER, 1, 1.), - (pnl.FunctionOutputType.RAW_NUMBER, [1], 1.), - (pnl.FunctionOutputType.RAW_NUMBER, [[1]], 1.), - (pnl.FunctionOutputType.RAW_NUMBER, [[[1]]], 1.), + (pnl.FunctionOutputType.NP_0D_ARRAY, 1, 1.), + (pnl.FunctionOutputType.NP_0D_ARRAY, [1], 1.), + (pnl.FunctionOutputType.NP_0D_ARRAY, [[1]], 1.), + (pnl.FunctionOutputType.NP_0D_ARRAY, [[[1]]], 1.), (pnl.FunctionOutputType.NP_1D_ARRAY, 1, np.array([1.])), (pnl.FunctionOutputType.NP_1D_ARRAY, [1], np.array([1.])), (pnl.FunctionOutputType.NP_1D_ARRAY, [[1]], np.array([1.])), @@ -33,9 +33,9 @@ def test_output_type_conversion(output_type, variable, expected_output): @pytest.mark.parametrize( 'output_type, variable', [ - (pnl.FunctionOutputType.RAW_NUMBER, [1, 1]), - (pnl.FunctionOutputType.RAW_NUMBER, [[1, 1]]), - (pnl.FunctionOutputType.RAW_NUMBER, [[[1], [1, 1]]]), + (pnl.FunctionOutputType.NP_0D_ARRAY, [1, 1]), + (pnl.FunctionOutputType.NP_0D_ARRAY, [[1, 1]]), + (pnl.FunctionOutputType.NP_0D_ARRAY, [[[1], [1, 1]]]), (pnl.FunctionOutputType.NP_1D_ARRAY, [[1, 1], [1, 1]]), ] ) From 46df360540b5955f5de3a35cbc3f64e0daf876ae Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 17 Jan 2024 22:16:50 +0000 Subject: [PATCH 144/410] ParameterEstimationComposition: empty results on new run, not clear --- psyneulink/core/compositions/parameterestimationcomposition.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index 976dd8c650b..3783c2fcdc5 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -802,7 +802,7 @@ def f(sim_data): def run(self, *args, **kwargs): # Clear any old results from the composition if self.results is not None: - self.results.clear() + self.results = [] context = kwargs.get("context", None) self._assign_execution_ids(context) From 5682b82620702b6410f5db6ae643e9555c4d65d8 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 23 Feb 2024 02:11:57 +0000 Subject: [PATCH 145/410] DriftDiffusionAnalytical: specify dependence of bias on starting_value, threshold bias getter uses starting_value and threshold --- .../functions/nonstateful/distributionfunctions.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py index d6ad016071d..4878ad667d5 100644 --- a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py @@ -1113,7 +1113,12 @@ class Parameters(DistributionFunction.Parameters): threshold = Parameter(1.0, modulable=True) noise = Parameter(0.5, modulable=True, setter=_noise_setter) non_decision_time = Parameter(.200, modulable=True) - bias = Parameter(0.5, read_only=True, getter=_DriftDiffusionAnalytical_bias_getter) + bias = Parameter( + 0.5, + read_only=True, + getter=_DriftDiffusionAnalytical_bias_getter, + dependencies=['starting_value', 'threshold'] + ) # this is read only because conversion is disabled for this function # this occurs in other places as well enable_output_type_conversion = Parameter( From 468e58134fd24a652f63f01584d1e13f0fd0cbae Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Sat, 13 Jan 2024 03:08:59 +0000 Subject: [PATCH 146/410] utilities: add extended numpy array comparison function - extended_array_equal - like numpy.array_equal supporting nested object-dtype arrays --- psyneulink/core/globals/utilities.py | 88 +++++++++++++++++++++++++++- tests/misc/test_utilities.py | 54 ++++++++++++++++- 2 files changed, 139 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index def49453843..dfec7aa3f61 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -100,6 +100,7 @@ import collections import copy +import functools import inspect import itertools import logging @@ -114,7 +115,7 @@ from beartype import beartype from numbers import Number -from psyneulink._typing import Optional, Union, Literal, Type, List, Tuple +from psyneulink._typing import Any, Callable, Optional, Union, Literal, Type, List, Tuple from enum import Enum, EnumMeta, IntEnum from collections.abc import Mapping @@ -134,7 +135,7 @@ __all__ = [ 'append_type_to_name', 'AutoNumber', 'ContentAddressableList', 'convert_to_list', 'convert_to_np_array', - 'convert_all_elements_to_np_array', 'copy_iterable_with_shared', 'get_class_attributes', 'flatten_list', + 'convert_all_elements_to_np_array', 'copy_iterable_with_shared', 'get_class_attributes', 'extended_array_equal', 'flatten_list', 'get_all_explicit_arguments', 'get_modulationOperation_name', 'get_value_from_array', 'insert_list', 'is_matrix_keyword', 'all_within_range', 'is_comparison_operator', 'iscompatible', 'is_component', 'is_distance_metric', 'is_iterable', 'is_matrix', @@ -2151,3 +2152,86 @@ def try_extract_0d_array_item(arr: np.ndarray): except AttributeError: pass return arr + + +def _extended_array_compare(a, b, comparison_fct: Callable[[Any, Any], bool]) -> bool: + """ + Recursively determine equality of **a** and **b** using + **comparison_fct** as an equality function. Shape and size of nested + arrays must be the same for equality. + + Args: + a (np.ndarray-like) + b (np.ndarray-like) + comparison_fct (Callable[[Any, Any], bool]): a comparison + function to be called on **a** and **b**. For example, + numpy.array_equal + + Returns: + bool: result of comparison_fct(**a**, **b**) + """ + try: + a_ndim = a.ndim + except AttributeError: + a_ndim = None + + try: + b_ndim = b.ndim + except AttributeError: + b_ndim = None + + # a or b is not a numpy array + if a_ndim is None or b_ndim is None: + return comparison_fct(a, b) + + if a_ndim != b_ndim: + return False + + # b_ndim is also 0 + if a_ndim == 0: + return comparison_fct(a, b) + + if len(a) != len(b): + return False + + if a.dtype != b.dtype: + return False + + # safe to use standard numpy comparison here because not ragged + if a.dtype != object: + return comparison_fct(a, b) + + for i in range(len(a)): + if not _extended_array_compare(a[i], b[i], comparison_fct): + return False + + return True + + +def extended_array_equal(a, b, equal_nan: bool = False) -> bool: + """ + Tests equality like numpy.array_equal, while recursively checking + object-dtype arrays. + + Args: + a (np.ndarray-like) + b (np.ndarray-like) + equal_nan (bool, optional): Whether to consider NaN as equal. + See numpy.array_equal. Defaults to False. + + Returns: + bool: **a** and **b** are equal. + + Example: + `X = np.array([np.array([0]), np.array([0, 0])], dtype=object)` + + | a | b | np.array_equal | extended_array_equal | + |---|---|----------------|----------------------| + | X | X | False | True | + """ + a = convert_all_elements_to_np_array(a) + b = convert_all_elements_to_np_array(b) + + return _extended_array_compare( + a, b, functools.partial(np.array_equal, equal_nan=equal_nan) + ) diff --git a/tests/misc/test_utilities.py b/tests/misc/test_utilities.py index 9be89ce4109..2f313cb741d 100644 --- a/tests/misc/test_utilities.py +++ b/tests/misc/test_utilities.py @@ -2,7 +2,9 @@ import numpy as np import pytest -from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, prune_unused_args +from psyneulink.core.globals.utilities import ( + convert_all_elements_to_np_array, extended_array_equal, prune_unused_args, +) @pytest.mark.parametrize( @@ -67,3 +69,53 @@ def test_prune_unused_args(func, args, kwargs, expected_pruned_args, expected_pr assert pruned_args == expected_pruned_args assert pruned_kwargs == expected_pruned_kwargs + + +regular_np_array_parametrization = [ + [], + 0, + np.array([]), + [0], + [[[[[0]]]]], + [[[[[1]]]]], + [[1], [0]], + [[[[[0]]], [[[1]]]]], +] + + +@pytest.mark.parametrize('a', regular_np_array_parametrization) +@pytest.mark.parametrize('b', regular_np_array_parametrization) +def test_extended_array_equal_regular(a, b): + assert extended_array_equal(a, b) == np.array_equal(a, b) + + +irregular_np_array_parametrization = [ + ([0, [1, 0]], [0, [1, 0]], True), + ([1, [1, 0], [[[1]]]], [1, [1, 0], [[[1]]]], True), + ([np.array([0, 0]), np.array([0])], [np.array([0, 0]), np.array([0])], True), + ([1, [], [[[]]]], [1, [], [[[]]]], True), + ([['ab'], None], [['ab'], None], True), + ([[0, None, 'ab'], [1, 0]], [[0, None, 'ab'], [1, 0]], True), + + ([0, [0, 0]], [0, [1, 0]], False), + ([1, [1, 0], [[[1]]]], [], False), + ([1, [], [[[]]]], [], False), + ([['ab'], None], [['ab'], 0], False), + ([[0, None, 'a'], [1, 0]], [[0, None, 'ab'], [1, 0]], False), +] + + +@pytest.mark.parametrize( + 'a, b, equal', irregular_np_array_parametrization +) +def test_extended_array_equal_irregular(a, b, equal): + assert extended_array_equal(a, b) == equal + + +@pytest.mark.parametrize( + 'a', + [x[0] for x in irregular_np_array_parametrization] + + [x[1] for x in irregular_np_array_parametrization] +) +def test_extended_array_equal_irregular_identical(a): + assert extended_array_equal(a, a) From ea6e19002b2f8ba58cf912e9b7099d8fa6947afe Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Sat, 15 Jul 2023 04:54:41 +0000 Subject: [PATCH 147/410] Mechanism: use np ones instead of list multiplication --- psyneulink/core/components/mechanisms/mechanism.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index 1fcabaeb5de..26838496bb5 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -2142,7 +2142,7 @@ def _instantiate_function(self, function, function_params=None, context=None): except AttributeError: default_weights = None if default_weights is None: - default_weights = default_weights or [1.0] * len(self.input_ports) + default_weights = default_weights or np.ones(len(self.input_ports)) # Assign any weights specified in input_port spec weights = [[input_port.defaults.weight if input_port.defaults.weight is not None else default_weight] @@ -2165,7 +2165,7 @@ def _instantiate_function(self, function, function_params=None, context=None): except AttributeError: default_exponents = None if default_exponents is None: - default_exponents = default_exponents or [1.0] * len(self.input_ports) + default_exponents = default_exponents or np.ones(len(self.input_ports)) # Assign any exponents specified in input_port spec exponents = [ From 76a73e1367e894b31d84d6285bb4a755d5f8a9a5 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 14 Jul 2023 05:19:05 +0000 Subject: [PATCH 148/410] treewide: change all internal non-torch numeric values to numpy array --- psyneulink/core/components/component.py | 2 + .../core/components/functions/function.py | 2 +- .../nonstateful/distributionfunctions.py | 2 +- .../nonstateful/learningfunctions.py | 8 ++-- .../nonstateful/optimizationfunctions.py | 24 +++++----- .../functions/stateful/memoryfunctions.py | 10 +++-- .../core/components/mechanisms/mechanism.py | 30 +++++++++---- .../modulatory/control/controlmechanism.py | 16 +++---- .../control/optimizationcontrolmechanism.py | 8 ++-- .../modulatory/learning/learningmechanism.py | 8 ++-- .../processing/objectivemechanism.py | 11 ++++- .../processing/transfermechanism.py | 9 ++-- psyneulink/core/components/ports/inputport.py | 7 +-- .../ports/modulatorysignals/controlsignal.py | 2 +- .../core/components/ports/outputport.py | 16 ++++--- psyneulink/core/components/ports/port.py | 42 ++++++++++++------ psyneulink/core/compositions/composition.py | 44 +++++++++++++------ .../parameterestimationcomposition.py | 3 +- psyneulink/core/globals/utilities.py | 2 + .../modulatory/learning/EMstoragemechanism.py | 16 ++++--- .../integrator/episodicmemorymechanism.py | 6 +-- .../mechanisms/processing/leabramechanism.py | 9 +++- .../transfer/contrastivehebbianmechanism.py | 6 +-- .../transfer/recurrenttransfermechanism.py | 6 +-- .../compositions/autodiffcomposition.py | 6 +-- .../library/compositions/emcomposition.py | 12 ++--- tests/composition/test_composition.py | 2 +- tests/misc/test_parameters.py | 8 ++-- 28 files changed, 200 insertions(+), 117 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index e190c91313d..868a1d4bd28 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -3326,6 +3326,8 @@ def execute(self, variable=None, context=None, runtime_params=None): if context.source is ContextFlags.COMMAND_LINE: self._initialize_from_context(context, override=False) + if is_numeric(variable): + variable = convert_all_elements_to_np_array(variable) value = self._execute(variable=variable, context=context, runtime_params=runtime_params) self.parameters.value._set(value, context=context) diff --git a/psyneulink/core/components/functions/function.py b/psyneulink/core/components/functions/function.py index c0212f0cc97..22a14b59f98 100644 --- a/psyneulink/core/components/functions/function.py +++ b/psyneulink/core/components/functions/function.py @@ -349,7 +349,7 @@ def _seed_setter(value, owning_component, context): # Remove any old PRNG state owning_component.parameters.random_state.set(None, context=context) - return int(value) + return np.asarray(value) def _random_state_getter(self, owning_component, context, modulated=False): diff --git a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py index 4878ad667d5..0021b9946a3 100644 --- a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py @@ -936,7 +936,7 @@ def _DriftDiffusionAnalytical_bias_getter(owning_component=None, context=None): starting_value = owning_component.parameters.starting_value._get(context) threshold = owning_component.parameters.threshold._get(context) try: - return (starting_value + threshold) / (2 * threshold) + return np.asarray((starting_value + threshold) / (2 * threshold)) except TypeError: return None diff --git a/psyneulink/core/components/functions/nonstateful/learningfunctions.py b/psyneulink/core/components/functions/nonstateful/learningfunctions.py index 4f748e3a552..de5970e9242 100644 --- a/psyneulink/core/components/functions/nonstateful/learningfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/learningfunctions.py @@ -47,7 +47,7 @@ MATRIX, Loss from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet -from psyneulink.core.globals.utilities import is_numeric, scalar_distance, convert_to_np_array, all_within_range, safe_len, is_numeric_scalar +from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric, scalar_distance, convert_to_np_array, all_within_range, safe_len, is_numeric_scalar __all__ = ['LearningFunction', 'Kohonen', 'Hebbian', 'ContrastiveHebbian', 'Reinforcement', 'BayesGLM', 'BackPropagation', 'TDLearning', 'EMStorage', @@ -956,7 +956,7 @@ def _function( # online update rules as per the given reference Lambda_n = (predictors.T @ predictors) + Lambda_prior mu_n = np.linalg.inv(Lambda_n) @ ((predictors.T @ dependent_vars) + (Lambda_prior @ mu_prior)) - gamma_shape_n = gamma_shape_prior + dependent_vars.shape[1] + gamma_shape_n = np.array(gamma_shape_prior + dependent_vars.shape[1]) gamma_size_n = gamma_size_prior + (dependent_vars.T @ dependent_vars) \ + (mu_prior.T @ Lambda_prior @ mu_prior) \ - (mu_n.T @ Lambda_n @ mu_n) @@ -1973,7 +1973,7 @@ def _function(self, # Construct weight change matrix with error term in proper element weight_change_matrix = np.diag(error_array) - return [error_array, error_array] + return convert_all_elements_to_np_array([error_array, error_array]) class TDLearning(Reinforcement): @@ -2556,4 +2556,4 @@ def _function(self, # Weight changes = delta rule (learning rate * activity * error) weight_change_matrix = learning_rate * activation_input * dE_dW - return [weight_change_matrix, dE_dW] + return convert_all_elements_to_np_array([weight_change_matrix, dE_dW]) diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index 047033a548e..0fc795ff931 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -54,7 +54,7 @@ OPTIMIZATION_FUNCTION_TYPE, OWNER, VALUE from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.sampleiterator import SampleIterator -from psyneulink.core.globals.utilities import call_with_pruned_args +from psyneulink.core.globals.utilities import call_with_pruned_args, convert_to_np_array __all__ = ['OptimizationFunction', 'GradientOptimization', 'GridSearch', 'GaussianProcess', 'ASCENT', 'DESCENT', 'DIRECTION', 'MAXIMIZE', 'MINIMIZE', 'OBJECTIVE_FUNCTION', 'SEARCH_FUNCTION', @@ -76,9 +76,11 @@ class OptimizationFunctionError(FunctionError): def _num_estimates_getter(owning_component, context): if owning_component.parameters.randomization_dimension._get(context) is None: - return 1 + return np.array(1) else: - return owning_component.parameters.search_space._get(context)[owning_component.randomization_dimension].num + return np.array( + owning_component.parameters.search_space._get(context)[owning_component.randomization_dimension].num + ) class OptimizationFunction(Function_Base): @@ -576,7 +578,7 @@ def reset( if SEARCH_SPACE in self._unspecified_args: del self._unspecified_args[self._unspecified_args.index(SEARCH_SPACE)] if randomization_dimension is not None: - self.parameters.randomization_dimension._set(randomization_dimension, context) + self.parameters.randomization_dimension._set(np.asarray(randomization_dimension), context) def _function(self, variable=None, @@ -677,7 +679,7 @@ def _get_builtin_dtype(dtype): try: initial_value = self.owner.objective_mechanism.parameters.value._get(context) except AttributeError: - initial_value = 0 + initial_value = np.array(0) last_sample, last_value, all_samples, all_values = self._sequential_evaluate(initial_sample, initial_value, @@ -691,7 +693,7 @@ def _get_builtin_dtype(dtype): self.parameters.num_estimates._get(context) is not None: # Reshape all_values so that aggregation can be performed over randomization dimension - num_estimates = int(self.parameters.num_estimates._get(context)) + num_estimates = np.array(int(self.parameters.num_estimates._get(context))) num_evals = np.prod([d.num for d in self.search_space]) num_param_combs = num_evals // num_estimates @@ -824,7 +826,7 @@ def _is_static(it:SampleIterator): state_features = ocm.parameters.state_feature_values._get(context) inputs, num_inputs_sets = ocm.agent_rep._parse_run_inputs(state_features, context) - num_evals = np.prod([d.num for d in self.search_space]) + num_evals = np.prod([d._num for d in self.search_space]) # Map allocations to values comp_exec = pnlvm.execution.CompExecution(ocm.agent_rep, [context.execution_id]) @@ -849,9 +851,9 @@ def _traverse_grid(self, variable, sample_num, context=None): This is assigned as the `search_function ` of the `OptimizationFunction`. """ if self.is_initializing: - return [signal.start for signal in self.search_space] + return convert_to_np_array([signal._start for signal in self.search_space]) try: - sample = next(self.grid) + sample = np.asarray(next(self.grid)) except StopIteration: raise OptimizationFunctionError("Expired grid in {} run from {} " "(execution_count: {}; num_iterations: {})". @@ -1180,9 +1182,9 @@ class Parameters(OptimizationFunction.Parameters): def _parse_direction(self, direction): if direction == ASCENT: - return 1 + return np.array(1) else: - return -1 + return np.array(-1) @check_user_specified @beartype diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index 4b8a69a3e36..2fb8ae9411b 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -663,7 +663,7 @@ class ContentAddressableMemory(MemoryFunction): # ------------------------------ >>> c = ContentAddressableMemory(default_variable=[[0,0],[0,0,0]]) >>> c([[1,2]]) - [array([0, 0])] + array([[0, 0]]) Since `memory ` was not intialized, the first call to the Function returns an array of zeros, formatted as specified in **defaul_variable**. However, the input in the call to the Function @@ -1536,7 +1536,9 @@ def _validate_entry(self, entry:Union[list, np.ndarray], context) -> None: f"for memory of '{self.name}{owner_name}'; should be: {field_shapes[i]}.") def uniform_entry(self, value:Union[int, float], context) -> np.ndarray: - return [np.full(i,value) for i in self.parameters.memory_field_shapes._get(context)] + return convert_all_elements_to_np_array( + [np.full(i, value) for i in self.parameters.memory_field_shapes._get(context)] + ) @handle_external_context() def get_memory(self, cue:Union[list, np.ndarray], field_weights=None, context=None) -> np.ndarray: @@ -1771,7 +1773,7 @@ def _get_distance(self, cue:Union[list, np.ndarray], if not any([item is None or np.asarray(item).size == 0 for item in [cue[i], candidate[i], field_weights[i]]]): distances_by_field[i] = distance_fct([cue[i], candidate[i]]) * field_weights[i] - return list(distances_by_field) + return distances_by_field elif granularity == 'full_entry': # Use first element as scalar if it is a homogenous array (i.e., all elements are the same) @@ -2856,6 +2858,8 @@ def _store_memory(self, memory:Union[list, np.ndarray], context): if len(d[KEYS]) > self.max_entries: d = np.delete(d, [KEYS], axis=1) + d = convert_all_elements_to_np_array(d) + self.parameters.previous_value._set(d,context) self._memory = d diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index 26838496bb5..8cf07ccb02c 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -1669,13 +1669,23 @@ def _parse_input_ports(self, input_ports): else: spec_list.append(port) + if is_numeric(spec_list): + spec_list = convert_all_elements_to_np_array(spec_list) + return spec_list def _parse_output_ports(self, output_ports): - if output_ports is not None and not isinstance(output_ports, list): - return [output_ports] - else: - return output_ports + if ( + output_ports is not None + and not isinstance(output_ports, list) + and not (isinstance(output_ports, np.ndarray) and output_ports.ndim > 0) + ): + output_ports = [output_ports] + + if is_numeric(output_ports): + output_ports = convert_all_elements_to_np_array(output_ports) + + return output_ports # def __new__(cls, *args, **kwargs): # def __new__(cls, name=NotImplemented, params=NotImplemented, context=None): @@ -2145,8 +2155,10 @@ def _instantiate_function(self, function, function_params=None, context=None): default_weights = default_weights or np.ones(len(self.input_ports)) # Assign any weights specified in input_port spec - weights = [[input_port.defaults.weight if input_port.defaults.weight is not None else default_weight] - for input_port, default_weight in zip(self.input_ports, default_weights)] + weights = convert_to_np_array([ + [input_port.defaults.weight if input_port.defaults.weight is not None else default_weight] + for input_port, default_weight in zip(self.input_ports, default_weights) + ]) self.function.parameters.weights._set(weights, context) if ( @@ -2168,14 +2180,14 @@ def _instantiate_function(self, function, function_params=None, context=None): default_exponents = default_exponents or np.ones(len(self.input_ports)) # Assign any exponents specified in input_port spec - exponents = [ + exponents = convert_to_np_array([ [ input_port.parameters.exponent._get(context) if input_port.parameters.exponent._get(context) is not None else default_exponent ] for input_port, default_exponent in zip(self.input_ports, default_exponents) - ] + ]) self.function.parameters.exponents._set(exponents, context) # this may be removed when the restriction making all Mechanism values 2D np arrays is lifted @@ -2560,7 +2572,7 @@ def execute(self, # MANAGE MAX_EXECUTIONS_BEFORE_FINISHED AND DETERMINE WHETHER TO BREAK max_executions = self.parameters.max_executions_before_finished._get(context) - num_executions = self.parameters.num_executions_before_finished._get(context) + 1 + num_executions = np.asarray(self.parameters.num_executions_before_finished._get(context) + 1) self.parameters.num_executions_before_finished._set(num_executions, override=True, context=context) diff --git a/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py index a72e8dfc6f2..4f77c962ef0 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py @@ -713,13 +713,11 @@ def validate_monitored_port_spec(owner, spec_list): def _control_mechanism_costs_getter(owning_component=None, context=None): # NOTE: In cases where there is a reconfiguration_cost, that cost is not returned by this method try: - costs = [ - convert_to_np_array( - c.compute_costs(c.parameters.value._get(context), context=context) - ) + costs = convert_all_elements_to_np_array([ + c.compute_costs(c.parameters.value._get(context), context=context) for c in owning_component.control_signals if hasattr(c, 'compute_costs') - ] # GatingSignals don't have cost fcts + ]) # GatingSignals don't have cost fcts return costs except TypeError: @@ -742,11 +740,13 @@ def _net_outcome_getter(owning_component=None, context=None): c.combine_costs() ) except TypeError: - return [0] + return np.array([0]) def _control_allocation_getter(owning_component=None, context=None): try: - return [v.parameters.variable._get(context) for v in owning_component.control_signals] + return convert_to_np_array([ + v.parameters.variable._get(context) for v in owning_component.control_signals + ]) except (TypeError, AttributeError): return owning_component.defaults.control_allocation @@ -1603,7 +1603,7 @@ def _instantiate_input_ports(self, input_ports=None, context=None): other_input_port_value_sizes = self._handle_arg_input_ports(other_input_ports)[0] # Construct full list of InputPort specifications and sizes input_ports = self.input_ports + other_input_ports - input_port_value_sizes = [[0]] + other_input_port_value_sizes + input_port_value_sizes = convert_all_elements_to_np_array([[0]] + other_input_port_value_sizes) super()._instantiate_input_ports(context=context, input_ports=input_ports, reference_value=input_port_value_sizes) diff --git a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py index 5c26d6c0c50..7ad0ea9c412 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py @@ -1111,7 +1111,7 @@ from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.registry import rename_instance_in_registry from psyneulink.core.globals.sampleiterator import SampleIterator, SampleSpec -from psyneulink.core.globals.utilities import convert_to_list, ContentAddressableList, is_numeric, object_has_single_value, try_extract_0d_array_item +from psyneulink.core.globals.utilities import convert_to_list, convert_to_np_array, ContentAddressableList, is_numeric, object_has_single_value, try_extract_0d_array_item from psyneulink.core.llvm.debug import debug_env __all__ = [ @@ -2984,7 +2984,7 @@ def _create_randomization_control_signal(self, context): modulation=OVERRIDE, cost_options=CostFunctions.NONE, # FIXME: Hack that Jan found to prevent some LLVM runtime errors - default_allocation=[num_estimates]) + default_allocation=np.array([num_estimates])) randomization_control_signal = self._instantiate_control_signal(randomization_control_signal, context) randomization_control_signal_index = len(self.output_ports) randomization_control_signal._variable_spec = (OWNER_VALUE, randomization_control_signal_index) @@ -3076,12 +3076,12 @@ def _execute(self, variable=None, context=None, runtime_params=None)->np.ndarray """ if self.is_initializing: - return [defaultControlAllocation] + return np.asarray([defaultControlAllocation]) # Assign default control_allocation if it is not yet specified (presumably first trial) control_allocation = self.parameters.control_allocation._get(context) if control_allocation is None: - control_allocation = [c.defaults.variable for c in self.control_signals] + control_allocation = convert_to_np_array([c.defaults.variable for c in self.control_signals]) # Give the agent_rep a chance to adapt based on last trial's state_feature_values and control_allocation if hasattr(self.agent_rep, "adapt"): diff --git a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py index 1b71f931247..630e1963635 100644 --- a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py @@ -592,7 +592,7 @@ from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel -from psyneulink.core.globals.utilities import ContentAddressableList, convert_to_np_array, is_numeric, ValidParamSpecType, \ +from psyneulink.core.globals.utilities import ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, is_numeric, ValidParamSpecType, \ convert_to_list __all__ = [ @@ -1489,9 +1489,11 @@ def _execute( if (self.in_composition and isinstance(self.function, BackPropagation) and self.initialization_status == ContextFlags.INITIALIZING): - return [0 * summed_learning_signal, 0 * summed_error_signal] + return convert_all_elements_to_np_array( + [np.zeros(summed_learning_signal.shape), np.zeros(summed_error_signal.shape)] + ) - return [summed_learning_signal, summed_error_signal] + return convert_all_elements_to_np_array([summed_learning_signal, summed_error_signal]) @property def input_source(self): diff --git a/psyneulink/core/components/mechanisms/processing/objectivemechanism.py b/psyneulink/core/components/mechanisms/processing/objectivemechanism.py index 7ee1c7e1f3a..1599b540756 100644 --- a/psyneulink/core/components/mechanisms/processing/objectivemechanism.py +++ b/psyneulink/core/components/mechanisms/processing/objectivemechanism.py @@ -367,6 +367,7 @@ from collections import namedtuple from collections.abc import Iterable +import numpy as np from beartype import beartype from psyneulink._typing import Optional, Union @@ -766,13 +767,19 @@ def _instantiate_function_weights_and_exponents(self, context=None): if WEIGHTS in self.function.parameters: if any(weight is not None for weight in weights): self.function.parameters.weights._set( - [[weight or DEFAULT_WEIGHT] for weight in weights], + np.asarray([ + [weight if weight is not None else DEFAULT_WEIGHT] + for weight in weights + ]), context ) if EXPONENTS in self.function.parameters: if any(exponent is not None for exponent in exponents): self.function.parameters.exponents._set( - [[exponent or DEFAULT_EXPONENT] for exponent in exponents], + np.asarray([ + [exponent if exponent is not None else DEFAULT_EXPONENT] + for exponent in exponents + ]), context ) assert True diff --git a/psyneulink/core/components/mechanisms/processing/transfermechanism.py b/psyneulink/core/components/mechanisms/processing/transfermechanism.py index e12864489ab..5ecbc2826d9 100644 --- a/psyneulink/core/components/mechanisms/processing/transfermechanism.py +++ b/psyneulink/core/components/mechanisms/processing/transfermechanism.py @@ -670,7 +670,7 @@ >>> my_mech.execute([0.5, 1]) array([[0.46875, 0.9375 ]]) >>> my_mech.num_executions_before_finished - 4 + array(4) Here, ``my_mech`` continued to execute for ``5`` times, until the element of the Mechanism's `value ` with the greatest value exceeded ``0.9``. Note that GREATER_THAN_EQUAL is a keyword for @@ -694,7 +694,7 @@ >>> my_mech.execute([0.5, 1]) array([[0.375, 0.75 ]]) >>> my_mech.num_executions_before_finished - 2 + array(2) As noted `above `, it will continue to execute if it is called again, but only once per call:: @@ -702,11 +702,11 @@ >>> my_mech.execute([0.5, 1]) array([[0.4375, 0.875 ]]) >>> my_mech.num_executions_before_finished - 1 + array(1) >>> my_mech.execute([0.5, 1]) array([[0.46875, 0.9375 ]]) >>> my_mech.num_executions_before_finished - 1 + array(1) In the following example, this behavior is exploited to allow a recurrent form of TransferMechanism (``attention``) to integrate for a fixed number of steps (e.g., to simulate the time taken to encode an instruction regarding the @@ -1791,6 +1791,7 @@ def is_finished(self, context=None): previous_value = self.parameters.value.get_previous(context) status = measure([value, previous_value]) + status = convert_all_elements_to_np_array(status) self.parameters.termination_measure_value._set(status, context=context, override=True) # comparator = self.parameters.termination_comparison_op._get(context) diff --git a/psyneulink/core/components/ports/inputport.py b/psyneulink/core/components/ports/inputport.py index 471d209036c..07b90a33684 100644 --- a/psyneulink/core/components/ports/inputport.py +++ b/psyneulink/core/components/ports/inputport.py @@ -1330,7 +1330,7 @@ def _parse_port_specific_specs(self, owner, port_dict, port_specific_spec, conte else: raise AttributeError(DEFER_VARIABLE_SPEC_TO_MECH_MSG) else: - port_dict[VARIABLE] = variable + port_dict[VARIABLE] = np.asarray(variable) except InputPortError: raise InputPortError(f"Tuple specification in {InputPort.__name__} specification dictionary for " @@ -1508,7 +1508,7 @@ def _get_port_function_value(owner, function, variable): ) and isinstance(variable, np.matrix) ): - variable = [variable] + variable = np.asarray([variable]) # if function is None, use Port's default function function = function or InputPort.defaults.function @@ -1554,7 +1554,8 @@ def _instantiate_input_ports(owner, input_ports=None, reference_value=None, cont # This allows method to be called by Mechanism.add_input_ports() with set of user-specified input_ports, # while calls from init_methods continue to use owner.input_ports (i.e., InputPort specifications # assigned in the **input_ports** argument of the Mechanism's constructor) - input_ports = input_ports or owner.input_ports + if input_ports is None or len(input_ports) == 0: + input_ports = owner.input_ports # Parse any SHADOW_INPUTS specs into actual InputPorts to be shadowed if input_ports is not None: diff --git a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py index 4880bfe91cf..6a6513a9901 100644 --- a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py @@ -382,7 +382,7 @@ as shown below:: >>> comp.run(inputs={mech:[3]}, num_trials=2) - [array([3.])] + array([[3.]]) >>> ctl_mech_A.control_signals[0].intensity_cost array([8103.08392758]) diff --git a/psyneulink/core/components/ports/outputport.py b/psyneulink/core/components/ports/outputport.py index 97a492e0add..1ff84d2c9dc 100644 --- a/psyneulink/core/components/ports/outputport.py +++ b/psyneulink/core/components/ports/outputport.py @@ -639,7 +639,7 @@ from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import \ - convert_to_np_array, is_numeric, iscompatible, make_readonly_property, recursive_update, parse_valid_identifier + convert_all_elements_to_np_array, convert_to_np_array, is_numeric, iscompatible, make_readonly_property, recursive_update, parse_valid_identifier __all__ = [ 'OutputPort', 'OutputPortError', 'PRIMARY', 'SEQUENTIAL', 'StandardOutputPorts', 'StandardOutputPortsError', @@ -743,11 +743,15 @@ def parse_variable_spec(spec): variable = [variable] if len(variable)== 1: - return parse_variable_spec(variable[0]) + fct_variable = parse_variable_spec(variable[0]) + else: + fct_variable = [] + for spec in variable: + fct_variable.append(parse_variable_spec(spec)) + + if fct_variable is not None: + fct_variable = convert_all_elements_to_np_array(fct_variable) - fct_variable = [] - for spec in variable: - fct_variable.append(parse_variable_spec(spec)) return fct_variable @@ -1205,6 +1209,8 @@ def _get_port_function_value(owner, function, variable): fct_variable = owner.function(owner.defaults.variable)[0] except AttributeError: fct_variable = None + else: + fct_variable = np.asarray(fct_variable) elif type(fct_variable) is str: is_PARAMS_DICT = fct_variable == PARAMS_DICT diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index 0750ea18847..fe34fe2c9f5 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -806,7 +806,7 @@ def test_multiple_modulatory_projections_with_mech_and_port_Name_specs(self): from psyneulink.core.globals.registry import register_category from psyneulink.core.globals.socket import ConnectionInfo from psyneulink.core.globals.utilities import \ - ContentAddressableList, convert_to_np_array, get_args, is_value_spec, iscompatible, \ + ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_args, is_numeric, is_value_spec, iscompatible, \ MODULATION_OVERRIDE, try_extract_0d_array_item, type_match __all__ = [ @@ -845,6 +845,17 @@ def _is_port_class(spec): return False +def match_modulation_to_value(modulatory_value, reference_value): + modulatory_value = type_match(modulatory_value, type(reference_value)) + + # mimics cast to float when default is a float, which previously + # happened in type_match + if isinstance(reference_value, np.ndarray) and reference_value.ndim == 0: + modulatory_value = modulatory_value.reshape(reference_value.shape) + + return modulatory_value + + # Note: This is created only for assignment of default projection types for each Port subclass (see .__init__.py) # Individual portRegistries (used for naming) are created for each Mechanism PortRegistry = {} @@ -1464,7 +1475,7 @@ def _instantiate_projections_to_port(self, projections, context=None): elif isinstance(projection, ModulatoryProjection_Base): mod_spec, mod_param_name, mod_param_value = self._get_modulated_param(projection, context=context) # Match the projection's value with the value of the function parameter - mod_proj_spec_value = type_match(projection.defaults.value, type(mod_param_value)) + mod_proj_spec_value = match_modulation_to_value(projection.defaults.value, mod_param_value) if (mod_param_value is not None and not iscompatible(mod_param_value, mod_proj_spec_value)): raise PortError(f"Output of function for {projection.name} ({projection.defaults.value}) " @@ -1768,7 +1779,7 @@ def _get_receiver_port(spec): # Match the projection's value with the value of the function parameter # should be defaults.value? try: - mod_proj_spec_value = type_match(projection.value, type(mod_param_value)) + mod_proj_spec_value = match_modulation_to_value(projection.value, mod_param_value) except TypeError as error: raise PortError(f"The value for {self.name} of {self.owner.name} ({projection.value}) does " f"not match the format ({mod_param_value}) of the Parameter it modulates " @@ -2091,12 +2102,12 @@ def set_projection_value(projection, value, context): # Otherwise, for efficiency, assign first OVERRIDE value encountered and return else: # FIX 5/8/20 [JDC]: SHOULD THIS USE set_projection_value()?? - self.parameters.value._set(type_match(projection_value, type(self.defaults.value)), context) + self.parameters.value._set(match_modulation_to_value(projection_value, self.defaults.value), context) return OVERRIDE else: try: - mod_value = type_match(projection_value, type(mod_param_value)) - except TypeError: + mod_value = match_modulation_to_value(projection_value, mod_param_value) + except ValueError: # if type_match fails, assume that the computation is # valid further down the line. This was implicitly true # before adding this catch block by manually setting the @@ -2114,7 +2125,7 @@ def set_projection_value(projection, value, context): # KDM 6/20/18: consider defining exactly when and how type_match occurs, now it seems # a bit handwavy just to make stuff work # FIX 5/8/20 [JDC]: SHOULD THIS USE set_projection_value()?? - self.parameters.value._set(type_match(modulatory_override[1], type(self.defaults.value)), context) + self.parameters.value._set(match_modulation_to_value(modulatory_override[1], self.defaults.value), context) return OVERRIDE # AGGREGATE ModulatoryProjection VALUES ----------------------------------------------------------------------- @@ -2170,7 +2181,7 @@ def _get_modulated_param(self, mod_proj, receiver=None, context=None): if mod_spec in {OVERRIDE, DISABLE}: mod_param_name = mod_proj.receiver.name - mod_param_value = mod_proj.sender.parameters.value.get(context) + mod_param_value = mod_proj.sender.parameters.value._get(context) else: mod_param = getattr(receiver.function.parameters, mod_spec) try: @@ -2179,7 +2190,7 @@ def _get_modulated_param(self, mod_proj, receiver=None, context=None): mod_param_name = mod_param.name # Get the value of the modulated parameter - mod_param_value = getattr(receiver.function.parameters, mod_spec).get(context) + mod_param_value = getattr(receiver.function.parameters, mod_spec)._get(context) return mod_spec, mod_param_name, mod_param_value @@ -2192,15 +2203,17 @@ def _get_combined_mod_val(self, mod_param_name, values): aliases = getattr(self.function.parameters, mod_param_name).aliases if comb_fct==MULTIPLICATIVE or any(mod_spec in aliases for mod_spec in {MULTIPLICATIVE, MULTIPLICATIVE_PARAM}): - return np.prod(np.array(values), axis=0) - if comb_fct==ADDITIVE or any(mod_spec in aliases for mod_spec in {MULTIPLICATIVE, ADDITIVE_PARAM}): - return np.sum(np.array(values), axis=0) + res = np.prod(np.array(values), axis=0) + elif comb_fct == ADDITIVE or any(mod_spec in aliases for mod_spec in {MULTIPLICATIVE, ADDITIVE_PARAM}): + res = np.sum(np.array(values), axis=0) elif isinstance(comb_fct, is_function_type): - return comb_fct(values) + res = comb_fct(values) else: assert False, f'PROGRAM ERROR: modulation_combination_function not properly specified ' \ f'for {mod_param_name} {Parameter.__name__} of {self.name}' + return convert_all_elements_to_np_array(res) + @abc.abstractmethod def _get_variable_from_projections(self, context=None): """ @@ -3362,6 +3375,9 @@ def _parse_port_spec(port_type=None, else: port_dict[VARIABLE] = port_dict[REFERENCE_VALUE] + if is_numeric(port_dict[VARIABLE]): + port_dict[VARIABLE] = convert_all_elements_to_np_array(port_dict[VARIABLE]) + # get the Port's value from the spec function if it exists, # otherwise we can assume there is a default function that does not # affect the shape, so it matches variable diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index e18c61e479a..dd55d9af6f3 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -2956,7 +2956,7 @@ def input_function(env, result): from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel, _assign_prefs from psyneulink.core.globals.registry import register_category from psyneulink.core.globals.utilities import ContentAddressableList, call_with_pruned_args, convert_all_elements_to_np_array, convert_to_list, \ - nesting_depth, convert_to_np_array, is_numeric, is_matrix, is_matrix_keyword, parse_valid_identifier + nesting_depth, convert_to_np_array, is_numeric, is_matrix, is_matrix_keyword, parse_valid_identifier, extended_array_equal from psyneulink.core.scheduling.condition import All, AllHaveRun, Always, Any, Condition, Never, AtNCalls, BeforeNCalls from psyneulink.core.scheduling.scheduler import Scheduler, SchedulingMode from psyneulink.core.scheduling.time import Time, TimeScale @@ -9732,7 +9732,9 @@ def get_controller(comp): ) # Get control signal costs - other_costs = controller.parameters.costs._get(context) or [] + other_costs = controller.parameters.costs._get(context) + if other_costs is None: + other_costs = [] all_costs = convert_to_np_array(other_costs + [reconfiguration_cost]) # Compute a total for the candidate control signal(s) total_cost = controller.combine_costs(all_costs) @@ -9842,14 +9844,16 @@ def evaluate( if buffer_animate_state: self._animate = buffer_animate_state - assert result == self.get_output_values(context) + assert extended_array_equal(result, self.get_output_values(context)) # Store simulation results on "base" composition if self.initialization_status != ContextFlags.INITIALIZING: try: self.parameters.simulation_results._get(base_context).append(result) except AttributeError: - self.parameters.simulation_results._set([result], base_context) + self.parameters.simulation_results._set( + convert_to_np_array([result]), base_context + ) # COMPUTE net_outcome and aggregate in net_outcomes @@ -11080,10 +11084,14 @@ def run( node.parameters.num_executions._get(context)._set_by_time_scale(TimeScale.RUN, 0) if ContextFlags.SIMULATION_MODE not in context.runmode: + if is_numeric(inputs): + _input_spec = convert_all_elements_to_np_array(inputs) + else: + _input_spec = inputs try: - self.parameters.input_specification._set(copy(inputs), context) + self.parameters.input_specification._set(copy(_input_spec), context) except: - self.parameters.input_specification._set(inputs, context) + self.parameters.input_specification._set(_input_spec, context) # May be used by controller for specifying num_trials_per_simulation self.num_trials = num_trials @@ -11210,6 +11218,8 @@ def run( results = self.parameters.results._get(context) if results is None: results = [] + else: + results = list(results) is_simulation = (context is not None and ContextFlags.SIMULATION_MODE in context.runmode) @@ -11247,7 +11257,7 @@ def run( assert False, "Unknown execution mode: {}".format(execution_mode) # Update the parameter for results - self.parameters.results._set(results, context) + self.parameters.results._set(convert_to_np_array(results), context) if self._is_learning(context): # copies back matrix to pnl from state struct after learning @@ -11354,7 +11364,7 @@ def run( result_copy = trial_output results.append(result_copy) - self.parameters.results._set(results, context) + self.parameters.results._set(convert_to_np_array(results), context) if not self.parameters.retain_old_simulation_data._get(): if self.controller is not None: @@ -12288,6 +12298,7 @@ def execute( # Set current Python values to LLVM results data = _comp_ex.extract_frozen_node_output(data_loc) for op, v in zip(srnode.output_ports, data): + v = convert_all_elements_to_np_array(v) op.parameters.value._set( v, context, skip_history=True, skip_log=True) @@ -12476,9 +12487,9 @@ def __call__(self, *args, **kwargs): This allows Composition, after it has been constructed, to be run simply by calling it directly. """ if not args and not kwargs: - if self.results: + try: return self.results[-1] - else: + except IndexError: return None elif (args and isinstance(args[0],dict)) or INPUTS in kwargs: from psyneulink.core.compositions.pathway import PathwayRole @@ -12728,7 +12739,10 @@ def get_results_by_nodes(self, # Get labels for corresponding values values = [node.labeled_output_values for node in output_nodes] else: - values = self.results[-1] or self.output_values + if len(self.results) > 0 and len(self.results[-1]) > 0: + values = self.results[-1] + else: + values = self.output_values full_output_set = zip(output_nodes, values) @@ -13271,9 +13285,11 @@ def output_values(self): return self.get_output_values(self.most_recent_context) def get_output_values(self, context=None): - return [output_port.parameters.value.get(context) - for output_port in self.output_CIM.output_ports - if (not self.output_CIM._sender_is_probe(output_port) or self.include_probes_in_output)] + return convert_all_elements_to_np_array([ + output_port.parameters.value.get(context) + for output_port in self.output_CIM.output_ports + if (not self.output_CIM._sender_is_probe(output_port) or self.include_probes_in_output) + ]) @property def shadowing_dict(self): diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index 3783c2fcdc5..0616bb8c900 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -187,7 +187,7 @@ ) from psyneulink.core.globals.keywords import BEFORE, OVERRIDE from psyneulink.core.globals.parameters import Parameter, SharedParameter, check_user_specified -from psyneulink.core.globals.utilities import convert_to_list +from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, convert_to_list from psyneulink.core.scheduling.time import TimeScale from psyneulink.core.components.ports.outputport import OutputPort @@ -831,6 +831,7 @@ def run(self, *args, **kwargs): for state_input_port, value in zip( self.controller.state_input_ports, inputs_dict.values() ): + value = convert_all_elements_to_np_array(value) state_input_port.parameters.value._set(value, context) kwargs.pop("inputs", None) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index dfec7aa3f61..9566a040e2a 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -752,6 +752,8 @@ def convert_to_list(l): return list(l) elif isinstance(l, set): return list(l) + elif isinstance(l, np.ndarray) and l.ndim > 0: + return list(l) else: return [l] diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py index a3f8ed3f830..3bfdf7d580e 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py @@ -179,7 +179,7 @@ from psyneulink.core.globals.parameters import Parameter, check_user_specified, FunctionParameter from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel -from psyneulink.core.globals.utilities import is_numeric, ValidParamSpecType, all_within_range +from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric, all_within_range __all__ = [ 'EMStorageMechanism', 'EMStorageMechanismError', @@ -223,8 +223,10 @@ def _memory_matrix_getter(owning_component=None, context=None)->list: memory_capacity = len(memory[0]) # Reorganize memory so that each row is an entry and each column is a field - return [[memory[j][i] for j in range(num_fields)] - for i in range(memory_capacity)] + return convert_all_elements_to_np_array([ + [memory[j][i] for j in range(num_fields)] + for i in range(memory_capacity) + ]) class EMStorageMechanismError(LearningMechanismError): @@ -757,8 +759,10 @@ def _execute(self, if memory is None or self.is_initializing: if self.is_initializing: # Return existing matrices for field_memories # FIX: THE FOLLOWING DOESN'T TEST FUNCTION: - return [learning_signal.receiver.path_afferents[0].parameters.matrix.get() - for learning_signal in self.learning_signals] + return convert_all_elements_to_np_array([ + learning_signal.receiver.path_afferents[0].parameters.matrix.get() + for learning_signal in self.learning_signals + ]) # Raise exception if not initializing and memory is not specified else: owner_string = "" @@ -805,4 +809,4 @@ def _execute(self, decay_rate=decay_rate, context=context, runtime_params=runtime_params)) - return value + return convert_all_elements_to_np_array(value) diff --git a/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py b/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py index 2feec8d56e6..ab53a4aff54 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py @@ -260,7 +260,7 @@ >>> my_em = EpisodicMemoryMechanism() >>> my_em.execute([[1,2]]) - [array([0, 0])] + array([[0, 0]]) >>> my_em.execute([[2,5]]) array([[1., 2.]]) @@ -281,7 +281,7 @@ >>> my_em = EpisodicMemoryMechanism(default_variable=[[0,0],[0,0,0]]) >>> my_em.execute([[1,2],[3,4,5]]) - [array([0, 0]), array([0, 0, 0])] + array([array([0, 0]), array([0, 0, 0])], dtype=object) As in the previous example, the first execution returns zeros since `memory ` as not been initialized; however, notice that in this case they are formated as specified in **default_variable**. Note @@ -297,7 +297,7 @@ >>> my_em = EpisodicMemoryMechanism(size=[2,3]) >>> my_em.execute([[1,2],[3,4,5]]) - [array([0, 0]), array([0, 0, 0])] + array([array([0, 0]), array([0, 0, 0])], dtype=object) Note that each element of **size** specifies the length of a field (see `EpisodicMemoryMechanism_Creation_Default_Variable_and_Size` for additional details). diff --git a/psyneulink/library/components/mechanisms/processing/leabramechanism.py b/psyneulink/library/components/mechanisms/processing/leabramechanism.py index a81fdf7109d..84c8576d96a 100644 --- a/psyneulink/library/components/mechanisms/processing/leabramechanism.py +++ b/psyneulink/library/components/mechanisms/processing/leabramechanism.py @@ -110,6 +110,7 @@ from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import REPORT_OUTPUT_PREF from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel +from psyneulink.core.globals.utilities import convert_all_elements_to_np_array from psyneulink.core.scheduling.time import TimeScale __all__ = [ @@ -614,7 +615,9 @@ def run_leabra_network(network, input_pattern): network.set_inputs({'input_layer': input_pattern}) network.set_outputs({}) # clear network._outputs network.trial() - return [unit.act_m for unit in network.layers[-1].units] + return convert_all_elements_to_np_array( + [unit.act_m for unit in network.layers[-1].units] + ) def train_leabra_network(network, input_pattern, output_pattern): @@ -635,7 +638,9 @@ def train_leabra_network(network, input_pattern, output_pattern): network.set_outputs({'output_layer': output_pattern}) network.trial() - return [unit.act_m for unit in network.layers[-1].units] + return convert_all_elements_to_np_array( + [unit.act_m for unit in network.layers[-1].units] + ) # infer whether the network is using the None or 'leabra' training rule diff --git a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py index e4f94caf2d5..a08a6185f18 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py @@ -1179,7 +1179,7 @@ def _execute(self, self.parameters.current_termination_threshold._set( self.parameters.minus_phase_termination_threshold._get(context), context) self.parameters.current_termination_condition._set(self.minus_phase_termination_condition, context) - self.parameters.phase_execution_count._set(0, context) + self.parameters.phase_execution_count._set(np.asarray(0), context) if self.parameters.is_finished_flag._get(context): # if self.parameters.is_finished_._get(context): @@ -1197,7 +1197,7 @@ def _execute(self, runtime_params=runtime_params, ) - self.parameters.phase_execution_count._set(self.parameters.phase_execution_count._get(context) + 1, context) + self.parameters.phase_execution_count._set(np.asarray(self.parameters.phase_execution_count._get(context) + 1), context) current_activity = np.squeeze(current_activity) # Set value of primary OutputPort to current activity @@ -1254,7 +1254,7 @@ def _execute(self, # Switch execution_phase self.parameters.execution_phase._set(not self.parameters.execution_phase._get(context), context) - self.parameters.phase_execution_count._set(0, context) + self.parameters.phase_execution_count._set(np.asarray(0), context) return current_activity # return self.current_activity diff --git a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py index 9ab50f0e097..bd8480db643 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py @@ -1018,7 +1018,7 @@ def matrix(self, val): # simplified version of standard setter (in Component.py) self.recurrent_projection.parameter_ports["matrix"].function.previous_value = val self.recurrent_projection.parameter_ports["matrix"].function.reset = val - self.parameters.matrix._set(val, self.most_recent_context) + self.parameters.matrix.set(val, self.most_recent_context) @property def auto(self): @@ -1026,7 +1026,7 @@ def auto(self): @auto.setter def auto(self, val): - self.parameters.auto._set(val, self.most_recent_context) + self.parameters.auto.set(val, self.most_recent_context) if self.recurrent_projection is not None and 'hetero' in self._parameter_ports: self.recurrent_projection.parameter_ports["matrix"].function.previous_value = self.matrix @@ -1037,7 +1037,7 @@ def hetero(self): @hetero.setter def hetero(self, val): - self.parameters.hetero._set(val, self.most_recent_context) + self.parameters.hetero.set(val, self.most_recent_context) if self.recurrent_projection is not None and 'auto' in self._parameter_ports: self.recurrent_projection.parameter_ports["matrix"].function.previous_value = self.matrix_param diff --git a/psyneulink/library/compositions/autodiffcomposition.py b/psyneulink/library/compositions/autodiffcomposition.py index 0c313e45128..8ab1b109b37 100644 --- a/psyneulink/library/compositions/autodiffcomposition.py +++ b/psyneulink/library/compositions/autodiffcomposition.py @@ -800,7 +800,7 @@ def autodiff_training(self, inputs, targets, context=None, scheduler=None): idx = component.output_ports.index(port) outputs += [curr_tensor_outputs[component][idx].detach().cpu().numpy().copy().tolist()] - self.parameters.tracked_loss_count._set(self.parameters.tracked_loss_count._get(context=context) + 1, + self.parameters.tracked_loss_count._set(np.array(self.parameters.tracked_loss_count._get(context=context) + 1), context=context, skip_history=True, skip_log=True) @@ -818,11 +818,11 @@ def _update_learning_parameters(self, context): optimizer = self.parameters.optimizer._get(context=context) optimizer.zero_grad() - tracked_loss = self.parameters.tracked_loss._get(context=context) / self.parameters.tracked_loss_count._get(context=context) + tracked_loss = self.parameters.tracked_loss._get(context=context) / int(self.parameters.tracked_loss_count._get(context=context)) tracked_loss.backward(retain_graph=not self.force_no_retain_graph) self.parameters.losses._get(context=context).append(tracked_loss.detach().cpu().numpy()[0]) self.parameters.tracked_loss._set(torch.zeros(1, device=self.device).double(), context=context, skip_history=True, skip_log=True) - self.parameters.tracked_loss_count._set(0, context=context, skip_history=True, skip_log=True) + self.parameters.tracked_loss_count._set(np.array(0), context=context, skip_history=True, skip_log=True) optimizer.step() self.parameters.pytorch_representation._get(context=context).detach_all() self.parameters.pytorch_representation._get(context).copy_weights_to_psyneulink(context) diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index 5f2eff3b18f..3a6353005b9 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -896,7 +896,7 @@ from psyneulink.core.globals.keywords import \ (AUTO, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, GAIN, IDENTITY_MATRIX, MULTIPLICATIVE_PARAM, NAME, PARAMS, PRODUCT, PROJECTIONS, RANDOM, SIZE, VARIABLE) -from psyneulink.core.globals.utilities import is_numeric_scalar +from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric_scalar from psyneulink.core.llvm import ExecutionMode @@ -923,8 +923,10 @@ def _memory_getter(owning_component=None, context=None)->list: for retrieved_node in owning_component.retrieved_nodes] # Reorganize memory so that each row is an entry and each column is a field memory_capacity = owning_component.memory_capacity or owning_component.defaults.memory_capacity - return [[memory[j][i] for j in range(owning_component.num_fields)] - for i in range(memory_capacity)] + return convert_all_elements_to_np_array([ + [memory[j][i] for j in range(owning_component.num_fields)] + for i in range(memory_capacity) + ]) def get_softmax_gain(v, scale=1, base=1, entropy_weighting=.1)->float: """Compute the softmax gain (inverse temperature) based on the entropy of the distribution of values. @@ -2000,7 +2002,7 @@ def _construct_field_weight_nodes(self, field_weights, concatenate_keys, use_gat if not concatenate_keys and self.num_keys > 1: if use_gating_for_weighting: - field_weight_nodes = [GatingMechanism(input_ports={VARIABLE: field_weights[i], + field_weight_nodes = [GatingMechanism(input_ports={VARIABLE: np.array(field_weights[i]), PARAMS:{DEFAULT_INPUT: DEFAULT_VARIABLE}, NAME: 'OUTCOME'}, gate=[key_match_pair[1].output_ports[0]], @@ -2009,7 +2011,7 @@ def _construct_field_weight_nodes(self, field_weights, concatenate_keys, use_gat for i, key_match_pair in enumerate(zip(self.query_input_nodes, self.softmax_nodes))] else: - field_weight_nodes = [ProcessingMechanism(input_ports={VARIABLE: field_weights[i], + field_weight_nodes = [ProcessingMechanism(input_ports={VARIABLE: np.array(field_weights[i]), PARAMS:{DEFAULT_INPUT: DEFAULT_VARIABLE}, NAME: 'FIELD_WEIGHT'}, name= 'WEIGHT' if self.num_keys == 1 diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 171baeae2f8..7769fb751dc 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -4214,7 +4214,7 @@ def test_manual_context(self): assert comp.results == [[[1]]] context = pnl.Context() - t.function.parameters.slope._set(2, context) + t.function.parameters.slope._set(np.array(2), context) comp.run({t: [1]}, context=context) assert comp.results == [[[2]]] diff --git a/tests/misc/test_parameters.py b/tests/misc/test_parameters.py index 13369d991d2..214f3fc7f08 100644 --- a/tests/misc/test_parameters.py +++ b/tests/misc/test_parameters.py @@ -128,14 +128,14 @@ def test_aliases_set_alias(obj, param_name, alias_name): def test_parameter_getter(): f = pnl.Linear() - f.parameters.slope.getter = lambda x: x ** 2 + f.parameters.slope.getter = lambda x: np.asarray(x ** 2) assert f.parameters.slope.get(x=3) == 9 def test_parameter_setter(): f = pnl.Linear() - f.parameters.slope.setter = lambda x: x ** 2 + f.parameters.slope.setter = lambda x: np.asarray(x ** 2) f.parameters.slope.set(3) @@ -435,7 +435,7 @@ def test_conflict_no_warning( def test_conflict_no_warning_parser(self): # replace with different class/parameter if _parse_noise ever implemented assert not hasattr(pnl.AdaptiveIntegrator.parameters, '_parse_noise') - pnl.AdaptiveIntegrator.parameters._parse_noise = lambda noise: 2 * noise + pnl.AdaptiveIntegrator.parameters._parse_noise = lambda noise: np.array(2 * noise) # pytest doesn't support inverse warning assertion for specific # warning only @@ -660,7 +660,7 @@ def __init__(self, **kwargs): return super().__init__(0, {}, **kwargs) def _function(self, variable=None, context=None, params=None): - return 0 + return np.array(0) pnl.ProcessingMechanism(function=NewF(a=2, b=3, c=4, d=5)) From 564adabbde104f43f05252fb6215b7ba4e8393c1 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 14 Jul 2023 04:57:54 +0000 Subject: [PATCH 149/410] tests: auto test all numeric values wrapped in np array --- conftest.py | 18 +++++++++++++++++- tests/components/test_general.py | 27 +++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/conftest.py b/conftest.py index f6251fceba5..7abdcd8ce4c 100644 --- a/conftest.py +++ b/conftest.py @@ -10,7 +10,7 @@ import psyneulink from psyneulink import clear_registry, primary_registries, torch_available from psyneulink.core import llvm as pnlvm -from psyneulink.core.globals.utilities import set_global_seed +from psyneulink.core.globals.utilities import is_numeric, set_global_seed try: import torch @@ -301,7 +301,23 @@ def power_set(s): return (c for l in range(len(vals) + 1) for c in itertools.combinations(vals, l)) +def patch_parameter_set_value_numeric_check(): + orig_parameter_set_value = psyneulink.core.globals.parameters.Parameter._set_value + + def check_numeric_set_value(self, value, **kwargs): + assert isinstance(value, np.ndarray) or not is_numeric(value), ( + f'{self._owner._owner}.{self.name} is being set to a numeric value.' + f' It must first be wrapped in a numpy array:\n\t{value}\n\t{type(value)}' + ) + + return orig_parameter_set_value(self, value, **kwargs) + + psyneulink.core.globals.parameters.Parameter._set_value = check_numeric_set_value + + # flag when run from pytest # https://docs.pytest.org/en/stable/example/simple.html#detect-if-running-from-within-a-pytest-run def pytest_configure(config): psyneulink._called_from_pytest = True + + patch_parameter_set_value_numeric_check() diff --git a/tests/components/test_general.py b/tests/components/test_general.py index 83aa0168f9f..8a12454f517 100644 --- a/tests/components/test_general.py +++ b/tests/components/test_general.py @@ -1,5 +1,6 @@ import inspect import psyneulink as pnl +import numpy as np import pytest import re @@ -63,6 +64,32 @@ def test_constructors_have_check_user_specified(class_): ) +def _numeric_parameter_value_check(class_, param_name, value, descriptor): + descriptor = f'{class_}.parameters.{param_name}{descriptor}' + assert isinstance(value, np.ndarray) or not pnl.is_numeric(value), ( + f'{descriptor} is a numeric value but is not wrapped in a' + f' numpy array:\n\t{value}\n\t{type(value)}' + ) + + +# could parametrize over each parameter for each class instead of +# looping, but this would greatly increase the overall number of tests +# for minimal benefit +@pytest.mark.parametrize('class_', component_classes) +def test_numeric_parameter_values_are_numpy_defaults(class_): + for parameter in class_.parameters: + _numeric_parameter_value_check( + class_, parameter.name, parameter.default_value, ' default_value' + ) + + +@pytest.mark.parametrize('class_', component_classes) +def test_numeric_parameter_values_are_numpy_values(class_): + for parameter in class_.parameters: + for eid, v in parameter.values.items(): + _numeric_parameter_value_check(class_, parameter.name, v, f'.values[{eid}]') + + @pytest.fixture(scope='module') def nested_compositions(): comp = pnl.Composition(name='comp') From 84b55209f91bf1365e120504469e72c1fc9ee51b Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 26 Apr 2024 00:00:51 +0000 Subject: [PATCH 150/410] Component: always shallow copy owner --- psyneulink/core/components/component.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 868a1d4bd28..19fa865e2ec 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1089,7 +1089,7 @@ def _parse_modulable(self, param_name, param_value): # insuring that assignment by one instance will not affect the value of others. name = None - _deepcopy_shared_keys = frozenset([]) + _deepcopy_shared_keys = frozenset(['owner']) @check_user_specified def __init__(self, From 5ea61c01d518e5be02e14221110a7e164057e436 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 26 Apr 2024 00:32:41 +0000 Subject: [PATCH 151/410] Component: handle hybrid copy in __deepcopy__ Use SHARED_COMPONENT_TYPES keyword in memo dict to specify object types to be shallow copied. During deepcopy of Components, we still shallow copy most attributes that are Components to treat them like pointers instead of objects. Only do instance checks within Component.__deepcopy__, because this reduces time checking the type of non-Component objects (which we do not need to hybrid copy), will work outside of calls to copy_parameter_value, and avoids the need to manually construct hybrid copies of (possibly nested) iterables. --- psyneulink/core/components/component.py | 19 ++++----- .../core/components/functions/function.py | 20 +++++----- psyneulink/core/globals/keywords.py | 4 +- psyneulink/core/globals/parameters.py | 40 ++++++++++--------- psyneulink/core/globals/utilities.py | 14 ++----- .../library/compositions/pytorchwrappers.py | 3 +- 6 files changed, 50 insertions(+), 50 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 19fa865e2ec..50fb9f54274 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -527,7 +527,7 @@ MODEL_SPEC_ID_INPUT_PORTS, MODEL_SPEC_ID_OUTPUT_PORTS, \ MODEL_SPEC_ID_MDF_VARIABLE, \ MODULATORY_SPEC_KEYWORDS, NAME, OUTPUT_PORTS, OWNER, PARAMS, PREFS_ARG, \ - RESET_STATEFUL_FUNCTION_WHEN, SIZE, VALUE, VARIABLE + RESET_STATEFUL_FUNCTION_WHEN, SIZE, VALUE, VARIABLE, SHARED_COMPONENT_TYPES from psyneulink.core.globals.log import LogCondition from psyneulink.core.globals.parameters import \ Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, check_user_specified, copy_parameter_value @@ -1264,15 +1264,16 @@ def __lt__(self, other): return self.name < other.name def __deepcopy__(self, memo): - if 'no_shared' in memo and memo['no_shared']: - shared_types = tuple() - else: - shared_types = (Component, ComponentsMeta) + if SHARED_COMPONENT_TYPES in memo: + if ( + memo[SHARED_COMPONENT_TYPES] + and isinstance(self, memo[SHARED_COMPONENT_TYPES]) + ): + return self + elif 'no_shared' not in memo or not memo['no_shared']: + memo[SHARED_COMPONENT_TYPES] = (Component,) - fun = get_deepcopy_with_shared( - self._deepcopy_shared_keys, - shared_types - ) + fun = get_deepcopy_with_shared(self._deepcopy_shared_keys) newone = fun(self, memo) if newone.parameters is not newone.class_parameters: diff --git a/psyneulink/core/components/functions/function.py b/psyneulink/core/components/functions/function.py index 22a14b59f98..0a33f5a0aa6 100644 --- a/psyneulink/core/components/functions/function.py +++ b/psyneulink/core/components/functions/function.py @@ -669,15 +669,17 @@ def __call__(self, *args, **kwargs): def __deepcopy__(self, memo): new = super().__deepcopy__(memo) - # ensure copy does not have identical name - register_category(new, Function_Base, new.name, FunctionRegistry) - if "random_state" in new.parameters: - # HACK: Make sure any copies are re-seeded to avoid dependent RNG. - # functions with "random_state" param must have "seed" parameter - for ctx in new.parameters.seed.values: - new.parameters.seed.set( - DEFAULT_SEED, ctx, skip_log=True, skip_history=True - ) + + if self is not new: + # ensure copy does not have identical name + register_category(new, Function_Base, new.name, FunctionRegistry) + if "random_state" in new.parameters: + # HACK: Make sure any copies are re-seeded to avoid dependent RNG. + # functions with "random_state" param must have "seed" parameter + for ctx in new.parameters.seed.values: + new.parameters.seed.set( + DEFAULT_SEED, ctx, skip_log=True, skip_history=True + ) return new diff --git a/psyneulink/core/globals/keywords.py b/psyneulink/core/globals/keywords.py index cc69ffecf79..cca991b3591 100644 --- a/psyneulink/core/globals/keywords.py +++ b/psyneulink/core/globals/keywords.py @@ -121,7 +121,7 @@ 'TRIAL', 'TRIALS_DIM', 'UNCHANGED', 'UNIFORM_DIST_FUNCTION', 'USER_DEFINED_FUNCTION', 'USER_DEFINED_FUNCTION_TYPE', 'VALUES', 'VALIDATE', 'VALIDATION', 'VALUE', 'VALUE_ASSIGNMENT', 'VALUE_FUNCTION', 'VARIABLE', 'VARIANCE', - 'VECTOR', 'WALD_DIST_FUNCTION', 'WEIGHT', 'WEIGHTS', 'X_0', 'ZEROS_MATRIX' + 'VECTOR', 'WALD_DIST_FUNCTION', 'WEIGHT', 'WEIGHTS', 'X_0', 'ZEROS_MATRIX', 'SHARED_COMPONENT_TYPES', ] # ********************************************************************************************************************** @@ -1092,3 +1092,5 @@ class Loss(Enum): MODEL_SPEC_ID_SHAPE = 'shape' MODEL_SPEC_ID_INPUT_PORT_COMBINATION_FUNCTION = 'input_combination_function' + +SHARED_COMPONENT_TYPES = 'shared_component_types' diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index b3051dce0af..22487fa8b5a 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -320,11 +320,11 @@ def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, co from psyneulink.core.globals.context import Context, ContextError, ContextFlags, _get_time, handle_external_context from psyneulink.core.globals.context import time as time_object +from psyneulink.core.globals.keywords import SHARED_COMPONENT_TYPES from psyneulink.core.globals.log import LogCondition, LogEntry, LogError from psyneulink.core.globals.utilities import ( call_with_pruned_args, convert_all_elements_to_np_array, - copy_iterable_with_shared, create_union_set, get_alias_property_getter, get_alias_property_setter, @@ -403,27 +403,31 @@ def copy_parameter_value(value, shared_types=None, memo=None): e.g. in spec attribute or Parameter `Mechanism.input_ports` """ - from psyneulink.core.components.component import Component, ComponentsMeta + if memo is None: + memo = {} - if shared_types is None: - shared_types = (Component, ComponentsMeta, types.MethodType, types.ModuleType) - else: - shared_types = tuple(shared_types) + if SHARED_COMPONENT_TYPES not in memo: + from psyneulink.core.components.component import Component, ComponentsMeta + if shared_types is None: + shared_types = (Component, ComponentsMeta) + else: + shared_types = tuple(shared_types) + memo[SHARED_COMPONENT_TYPES] = shared_types + + # trying to deepcopy a bound method of a Component will deepcopy the + # Component, but we treat these situations like references. + # ex: GridSearch.search_function = GridSearch._traverse_grid + method_owner = getattr(value, '__self__', None) + if method_owner: + memo[id(method_owner)] = method_owner try: - return copy_iterable_with_shared( - value, - shared_types=shared_types, - memo=memo - ) - except TypeError: - # this will attempt to copy the current object if it - # is referenced in a parameter, such as - # ComparatorMechanism, which does this for input_ports - if not isinstance(value, shared_types): - return copy.deepcopy(value, memo) - else: + return copy.deepcopy(value, memo) + except TypeError as e: + if 'pickle' in str(e): return value + else: + raise def get_init_signature_default_value(obj, parameter): diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index 9566a040e2a..5a7526f0a08 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -824,22 +824,17 @@ def multi_getattr(obj, attr, default = None): # based off the answer here https://stackoverflow.com/a/15774013/3131666 -def get_deepcopy_with_shared(shared_keys=frozenset(), shared_types=()): +def get_deepcopy_with_shared(shared_keys=frozenset()): """ Arguments --------- shared_keys an Iterable containing strings that should be shallow copied - shared_types - an Iterable containing types that when objects of that type are encountered - will be shallow copied - Returns ------- a __deepcopy__ function """ - shared_types = tuple(shared_types) shared_keys = frozenset(shared_keys) def __deepcopy__(self, memo): @@ -855,13 +850,10 @@ def __deepcopy__(self, memo): for k in ordered_dict_keys: v = self.__dict__[k] - if k in shared_keys or isinstance(v, shared_types): + if k in shared_keys: res_val = v else: - try: - res_val = copy_iterable_with_shared(v, shared_types, memo) - except TypeError: - res_val = copy.deepcopy(v, memo) + res_val = copy.deepcopy(v, memo) setattr(result, k, res_val) return result diff --git a/psyneulink/library/compositions/pytorchwrappers.py b/psyneulink/library/compositions/pytorchwrappers.py index f94b017e6dd..8797dd25ac6 100644 --- a/psyneulink/library/compositions/pytorchwrappers.py +++ b/psyneulink/library/compositions/pytorchwrappers.py @@ -13,7 +13,6 @@ import torch import torch.nn as nn -from psyneulink.core.components.component import Component, ComponentsMeta from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination, PRODUCT, SUM from psyneulink.core.compositions.composition import NodeRole, CompositionInterfaceMechanism from psyneulink.library.compositions.pytorchllvmhelper import * @@ -238,7 +237,7 @@ def _assign_input_nodes(nodes): self._regenerate_paramlist() - __deepcopy__ = get_deepcopy_with_shared(shared_types=(Component, ComponentsMeta)) + __deepcopy__ = get_deepcopy_with_shared() def _regenerate_paramlist(self): """Add Projection matrices to Pytorch Module's parameter list""" From 40c1cc05493494e5239f90b74cb158e162c5997f Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 8 May 2024 00:32:02 +0000 Subject: [PATCH 152/410] Component: __deepcopy__ remove no_shared in memo --- psyneulink/core/components/component.py | 2 +- .../core/components/ports/outputport.py | 4 +-- psyneulink/core/globals/parameters.py | 28 ++++++++----------- 3 files changed, 15 insertions(+), 19 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 50fb9f54274..8b2e72458f7 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1270,7 +1270,7 @@ def __deepcopy__(self, memo): and isinstance(self, memo[SHARED_COMPONENT_TYPES]) ): return self - elif 'no_shared' not in memo or not memo['no_shared']: + else: memo[SHARED_COMPONENT_TYPES] = (Component,) fun = get_deepcopy_with_shared(self._deepcopy_shared_keys) diff --git a/psyneulink/core/components/ports/outputport.py b/psyneulink/core/components/ports/outputport.py index 1ff84d2c9dc..9b3ff0d3a19 100644 --- a/psyneulink/core/components/ports/outputport.py +++ b/psyneulink/core/components/ports/outputport.py @@ -633,7 +633,7 @@ MAPPING_PROJECTION, MECHANISM_VALUE, NAME, OUTPUT_PORT, OUTPUT_PORTS, OUTPUT_PORT_PARAMS, \ OWNER_VALUE, PARAMS, PARAMS_DICT, PROJECTION, PROJECTIONS, RECEIVER, REFERENCE_VALUE, STANDARD_OUTPUT_PORTS, PORT, \ VALUE, VARIABLE, \ - output_port_spec_to_parameter_name, INPUT_PORT_VARIABLES + output_port_spec_to_parameter_name, INPUT_PORT_VARIABLES, SHARED_COMPONENT_TYPES from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.context import Context from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet @@ -1427,7 +1427,7 @@ def _instantiate_output_ports(owner, output_ports=None, context=None): if isinstance(std_output_port[FUNCTION], Function): # we should not reuse standard_output_port Function # instances across multiple ports - std_output_port[FUNCTION] = copy.deepcopy(std_output_port[FUNCTION], memo={'no_shared': True}) + std_output_port[FUNCTION] = copy.deepcopy(std_output_port[FUNCTION], memo={SHARED_COMPONENT_TYPES: None}) except KeyError: pass diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 22487fa8b5a..f3106048098 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -1105,14 +1105,9 @@ def __str__(self): return super().__str__() def __deepcopy__(self, memo): - if 'no_shared' in memo and memo['no_shared']: - shared_types = tuple() - else: - shared_types = None - result = type(self)( **{ - k: copy_parameter_value(getattr(self, k), memo=memo, shared_types=shared_types) + k: copy_parameter_value(getattr(self, k), memo=memo) for k in self._param_attrs }, _owner=self._owner, @@ -1120,16 +1115,17 @@ def __deepcopy__(self, memo): _user_specified=self._user_specified, _scalar_converted=self._scalar_converted, ) - # TODO: this is a quick fix to make sure default values are - # always copied. should be integrated with future changes to - # deepcopy - # None indicates was not already deepcopied above - if shared_types is None and not self._inherited: - # use of memo here relies on the fact that - # copy_parameter_value does not currently add - # self.default_value. Otherwise it would reuse the shared - # value from above - result._set_default_value(copy.deepcopy(self.default_value, memo), directly=True) + + # make sure default values are always deepcopied + if ( + not self._inherited + and id(self.default_value) in memo + and memo[id(self.default_value)] is self.default_value + ): + del memo[id(self.default_value)] + result._set_default_value( + copy_parameter_value(self.default_value, memo), directly=True + ) memo[id(self)] = result From 9490264868517324dec82969eb70bb49b51428ce Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 8 May 2024 21:59:05 +0000 Subject: [PATCH 153/410] Component: __deepcopy__ add copy to memo --- psyneulink/core/components/component.py | 1 + 1 file changed, 1 insertion(+) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 8b2e72458f7..d50b61f2530 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1275,6 +1275,7 @@ def __deepcopy__(self, memo): fun = get_deepcopy_with_shared(self._deepcopy_shared_keys) newone = fun(self, memo) + memo[id(self)] = newone if newone.parameters is not newone.class_parameters: # may be in DEFERRED INIT, so parameters/defaults belongs to class From 0b4c401bd9ff124693d17f10af759d3b3778ae6c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 10 May 2024 19:53:09 -0400 Subject: [PATCH 154/410] ci/ga: Use single variable to check for self-hosted runners availability (#2965) Use vars object in "runs-on" directly. Drop the select-runner preamble job. Signed-off-by: Jan Vesely --- .github/workflows/pnl-ci.yml | 33 ++++++++------------------------- 1 file changed, 8 insertions(+), 25 deletions(-) diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index b704b86e11d..89765843568 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -13,11 +13,6 @@ on: - 'v**' pull_request: -env: - SELF_HOSTED_MACOS: ${{ secrets.SELF_HOSTED_MACOS }} - SELF_HOSTED_LINUX: ${{ secrets.SELF_HOSTED_LINUX }} - SELF_HOSTED_WINDOWS: ${{ secrets.SELF_HOSTED_WINDOWS }} - # run only the latest instance of this workflow job for the current branch/PR # cancel older runs # fall back to run id if not available (run id is unique -> no cancellations) @@ -26,27 +21,15 @@ concurrency: cancel-in-progress: true jobs: - # A job to select self-hosted runner if requested by an env var - select-runner: - runs-on: ubuntu-latest - - outputs: - self_hosted_macos: ${{ steps.is_self_hosted.outputs.macos && 'macos' || '' }} - self_hosted_linux: ${{ steps.is_self_hosted.outputs.linux && 'linux' || '' }} - self_hosted_windows: ${{ steps.is_self_hosted.outputs.windows && 'windows' || '' }} - - steps: - - name: Add macos - id: is_self_hosted - run: | - echo "macos=$SELF_HOSTED_MACOS" | tee -a $GITHUB_OUTPUT - echo "linux=$SELF_HOSTED_LINUX" | tee -a $GITHUB_OUTPUT - echo "windows=$SELF_HOSTED_WINDOWS" | tee -a $GITHUB_OUTPUT - - # the main build job + # The main test job build: - needs: select-runner - runs-on: ${{ (contains(needs.select-runner.outputs.*, matrix.os) && fromJSON(format('[ "self-hosted","{0}", "X64" ]', matrix.os))) || format('{0}-latest', matrix.os) }} + runs-on: ${{ (contains(vars.SELF_HOSTED, format(';{0}_{1}_{2}_{3};', matrix.os, matrix.python-version, matrix.python-architecture, matrix.extra-args)) + && fromJSON(format('[ "self-hosted","{0}", "X64" ]', matrix.os == 'ubuntu' && 'Linux' || matrix.os))) + || format('{0}-latest', matrix.os) }} + env: + # Keep DESCRIPTION in sync with the above + DESCRIPTION: ${{ format(';{0}_{1}_{2}_{3};', matrix.os, matrix.python-version, matrix.python-architecture, matrix.extra-args) }} + SELF_HOSTED: ${{ vars.SELF_HOSTED }} strategy: fail-fast: false matrix: From 0409a6a669b73142b3a5b3e45c5691ba3489d677 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 12 May 2024 23:37:10 -0400 Subject: [PATCH 155/410] Report: Store report parameters on the instance instead of the class Class members are global, so all setting are leaked until overwritten by the next instantiation of Report. Signed-off-by: Jan Vesely --- psyneulink/core/compositions/report.py | 66 +++++++++++++------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/psyneulink/core/compositions/report.py b/psyneulink/core/compositions/report.py index bab58a1aaf9..a0d2d0302b3 100644 --- a/psyneulink/core/compositions/report.py +++ b/psyneulink/core/compositions/report.py @@ -672,7 +672,6 @@ def __new__(cls, ) -> 'Report': if cls._instance is None: - # Validate arguments # assert context, "PROGRAM ERROR: Call to Report() without 'context' argument." source = f'call to execution method for {caller.name or ""}' @@ -685,8 +684,8 @@ def __new__(cls, if not isinstance(report_simulations, ReportSimulations): raise ReportError(f"Bad 'report_simulations' arg in {source}: '{report_simulations}'; " f"must be {ReportSimulations} option.") - cls._report_to_devices = convert_to_list(report_to_devices or ReportDevices.CONSOLE) - if not all(isinstance(a, ReportDevices) for a in cls._report_to_devices): + _report_to_devices = convert_to_list(report_to_devices or ReportDevices.CONSOLE) + if not all(isinstance(a, ReportDevices) for a in _report_to_devices): raise ReportError(f"Bad 'report_to_devices' arg in {source}: '{report_to_devices}'; " f"must be a one or a list of {ReportDevices} option(s).") @@ -694,52 +693,53 @@ def __new__(cls, cls._instance = super(Report, cls).__new__(cls) # Assign option properties - cls._report_progress = report_progress - cls._report_output = report_output - cls._report_params = report_params - # cls._reporting_enabled = report_output is not ReportOutput.OFF or cls._report_progress - cls._reporting_enabled = report_output is not ReportOutput.OFF or report_progress is not ReportProgress.OFF - cls._report_simulations = report_simulations - cls._rich_console = ReportDevices.CONSOLE in cls._report_to_devices - cls._rich_divert = ReportDevices.DIVERT in cls._report_to_devices - cls._record_reports = ReportDevices.RECORD in cls._report_to_devices - cls._recording_enabled = any(i is not ReportDevices.CONSOLE for i in cls._report_to_devices) + cls._instance._report_to_devices = _report_to_devices + cls._instance._report_progress = report_progress + cls._instance._report_output = report_output + cls._instance._report_params = report_params + # cls._instance._reporting_enabled = report_output is not ReportOutput.OFF or cls._instance._report_progress + cls._instance._reporting_enabled = report_output is not ReportOutput.OFF or report_progress is not ReportProgress.OFF + cls._instance._report_simulations = report_simulations + cls._instance._rich_console = ReportDevices.CONSOLE in cls._instance._report_to_devices + cls._instance._rich_divert = ReportDevices.DIVERT in cls._instance._report_to_devices + cls._instance._record_reports = ReportDevices.RECORD in cls._instance._report_to_devices + cls._instance._recording_enabled = any(i is not ReportDevices.CONSOLE for i in cls._instance._report_to_devices) # Enable rich if reporting output or progress and using console or recording - cls._use_rich = (cls._reporting_enabled - and (cls._rich_console or cls._rich_divert or cls._record_reports)) - cls._use_pnl_view = ReportDevices.PNL_VIEW in cls._report_to_devices + cls._instance._use_rich = (cls._instance._reporting_enabled + and (cls._instance._rich_console or cls._instance._rich_divert or cls._instance._record_reports)) + cls._instance._use_pnl_view = ReportDevices.PNL_VIEW in cls._instance._report_to_devices - cls._outermost_comp = caller - cls._execution_stack = [] - cls._trial_header_stack = [] + cls._instance._outermost_comp = caller + cls._instance._execution_stack = [] + cls._instance._trial_header_stack = [] - cls.depth_indent_factor = depth_indent_factor - cls.padding_indent = padding_indent - cls._padding_indent_str = padding_indent * ' ' - cls.padding_lines = padding_lines + cls._instance.depth_indent_factor = depth_indent_factor + cls._instance.padding_indent = padding_indent + cls._instance._padding_indent_str = padding_indent * ' ' + cls._instance.padding_lines = padding_lines # Instantiate rich progress context object # - it is not started until the self.start_report() method is called # - auto_refresh is disabled to accommodate IDEs (such as PyCharm and Jupyter Notebooks) - if cls._use_rich: + if cls._instance._use_rich: # Set up RECORDING - if cls._record_reports: - cls._recording_console = Console() + if cls._instance._record_reports: + cls._instance._recording_console = Console() # Set up DIVERT file = False - if cls._rich_divert: + if cls._instance._rich_divert: file = StringIO() - cls._instance._rich_progress = RichProgress(auto_refresh=False, console=Console(file=file)) + cls._instance._instance._rich_progress = RichProgress(auto_refresh=False, console=Console(file=file)) # Instantiate interface to PsyNeuLinkView - if cls._use_pnl_view: + if cls._instance._use_pnl_view: warnings.warn("'pnl_view' not yet supported as an option for report_progress of Composition.run()") - cls.output_reports = {} - cls._recorded_reports = str() - cls._rich_diverted_reports = str() + cls._instance.output_reports = {} + cls._instance._recorded_reports = str() + cls._instance._rich_diverted_reports = str() - cls._ref_count = 0 + cls._instance._ref_count = 0 return cls._instance From 22593092aa873e39f83f536ba1b903a78d1b9eab Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 13 Feb 2024 04:30:51 +0000 Subject: [PATCH 156/410] utilities: copy_iterable_with_shared: handle weakref types --- psyneulink/core/globals/utilities.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index 5a7526f0a08..2cb871eab6d 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -866,9 +866,14 @@ def copy_iterable_with_shared(obj, shared_types=None, memo=None): except TypeError: shared_types = (shared_types, ) - dict_types = (dict, collections.UserDict) + dict_types = ( + dict, + collections.UserDict, + weakref.WeakKeyDictionary, + weakref.WeakValueDictionary + ) list_types = (list, collections.UserList, collections.deque) - tuple_types = (tuple, set) + tuple_types = (tuple, set, weakref.WeakSet) all_types_using_recursion = dict_types + list_types + tuple_types if isinstance(obj, dict_types): From b63b80cecac28b2897f38f68f92f7ae853911083 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 10 Apr 2024 02:27:24 +0000 Subject: [PATCH 157/410] utilities: copy_iterable_with_shared: handle ContentAddressableList --- psyneulink/core/globals/utilities.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index 2cb871eab6d..cc109eb3980 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -876,6 +876,11 @@ def copy_iterable_with_shared(obj, shared_types=None, memo=None): tuple_types = (tuple, set, weakref.WeakSet) all_types_using_recursion = dict_types + list_types + tuple_types + # ContentAddressableList + cal_component_type = getattr(obj, 'component_type', None) + if cal_component_type and issubclass(cal_component_type, shared_types): + return copy.copy(obj) + if isinstance(obj, dict_types): result = copy.copy(obj) del_keys = set() From c295d3dc6ad872d1d14187115b6cbf88d31370b3 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 16 Feb 2024 23:13:24 +0000 Subject: [PATCH 158/410] utilities: is_iterable: simplify and allow option to exclude str --- psyneulink/core/globals/utilities.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index cc109eb3980..99be76b4124 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -441,17 +441,24 @@ def is_distance_metric(s): ] -def is_iterable(x): +def is_iterable(x: Any, exclude_str: bool = False) -> bool: """ + Args: + x (Any) + exclude_str (bool, optional): if True, **x** of type str will + return False. Defaults to False. + Returns ------- True - if **x** can be iterated on False - otherwise """ - if isinstance(x, np.ndarray) and x.ndim == 0: + try: + iter(x) + except TypeError: return False else: - return isinstance(x, collections.abc.Iterable) + return not exclude_str or not isinstance(x, str) kwCompatibilityType = "type" From 4e04843e5d2eedf21963e71c57945b3bc1232d85 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 22 Feb 2024 04:06:04 +0000 Subject: [PATCH 159/410] utilities: get_deepcopy_with_shared: handle object __dict__ modification --- psyneulink/core/globals/utilities.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index 99be76b4124..95cc6cd7f90 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -855,7 +855,7 @@ def __deepcopy__(self, memo): except AttributeError: ordered_dict_keys = self.__dict__ - for k in ordered_dict_keys: + for k in copy.copy(ordered_dict_keys): v = self.__dict__[k] if k in shared_keys: res_val = v From 2cc638ccc8572a38ec24966d5c8214dd33d2b38b Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Sat, 9 Mar 2024 05:07:53 +0000 Subject: [PATCH 160/410] utilities: contains_type: handle infinite recursion for numpy matrix --- psyneulink/core/globals/utilities.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index 95cc6cd7f90..9a95b2f77c0 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -2064,11 +2064,14 @@ def contains_type( **arr** itself if needed """ try: - for a in arr: - if isinstance(a, typ) or (a is not arr and contains_type(a, typ)): - return True + arr_items = iter(arr) except TypeError: - pass + return False + + recurse = not isinstance(arr, np.matrix) + for a in arr_items: + if isinstance(a, typ) or (a is not arr and recurse and contains_type(a, typ)): + return True return False From d27043f34c042f93dcf7634ed64fc7463dedfca5 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Mon, 11 Mar 2024 23:01:34 +0000 Subject: [PATCH 161/410] utilities: copy_iterable_with_shared: support numpy array --- psyneulink/core/globals/utilities.py | 109 ++++++++++++++++----------- 1 file changed, 64 insertions(+), 45 deletions(-) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index 9a95b2f77c0..5b6d01d3ef5 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -867,7 +867,23 @@ def __deepcopy__(self, memo): return __deepcopy__ -def copy_iterable_with_shared(obj, shared_types=None, memo=None): +def _copy_shared_iterable_elementwise_as_list(obj, shared_types, memo, result_obj=None): + result = result_obj or list() + + for item in obj: + try: + new_item = copy_iterable_with_shared(item, shared_types, memo) + except TypeError: + if isinstance(item, shared_types): + new_item = item + else: + new_item = copy.deepcopy(item, memo) + result.append(new_item) + + return result + + +def copy_iterable_with_shared(obj, shared_types=type(None), memo=None): try: shared_types = tuple(shared_types) except TypeError: @@ -927,14 +943,7 @@ def copy_iterable_with_shared(obj, shared_types=None, memo=None): else: result = obj.__class__() - for item in obj: - if isinstance(item, all_types_using_recursion): - new_item = copy_iterable_with_shared(item, shared_types, memo) - elif isinstance(item, shared_types): - new_item = item - else: - new_item = copy.deepcopy(item, memo) - result.append(new_item) + result = _copy_shared_iterable_elementwise_as_list(obj, shared_types, memo, result) if is_tuple: try: @@ -942,6 +951,14 @@ def copy_iterable_with_shared(obj, shared_types=None, memo=None): except TypeError: # handle namedtuple result = obj.__class__(*result) + elif isinstance(obj, np.ndarray) and obj.dtype == object: + if obj.ndim > 0: + result = _copy_shared_iterable_elementwise_as_list(obj, shared_types, memo) + result = safe_create_np_array(result) + elif isinstance(obj, shared_types): + result = np.array(obj) + else: + result = copy.deepcopy(obj) else: raise TypeError @@ -1017,6 +1034,44 @@ def np_array_less_than_2d(array): else: return False + +def safe_create_np_array(value): + with warnings.catch_warnings(): + + # If we have a torch tensor, allow it to pass through unchanged + if torch and torch.is_tensor(value): + return value + + warnings.filterwarnings('error', category=np.VisibleDeprecationWarning) + # NOTE: this will raise a ValueError in the future. + # See https://numpy.org/neps/nep-0034-infer-dtype-is-object.html + try: + try: + return np.asarray(value) + except np.VisibleDeprecationWarning: + return np.asarray(value, dtype=object) + except ValueError as e: + # numpy 1.24 removed the above deprecation and raises + # ValueError instead. Note that the below call can still + # raise other ValueErrors + if 'The requested array has an inhomogeneous shape' in str(e): + return np.asarray(value, dtype=object) + raise + + except ValueError as e: + msg = str(e) + if 'cannot guess the desired dtype from the input' in msg: + return np.asarray(value, dtype=object) + # KDM 6/29/20: this case handles a previously noted case + # by KAM 6/28/18, #877: + # [[0.0], [0.0], np.array([[0.0, 0.0]])] + # but was only handled for dimension=1 + elif 'could not broadcast' in msg: + return convert_all_elements_to_np_array(value) + else: + raise + + def convert_to_np_array(value, dimension=None): """ Converts value to np.ndarray if it is not already. Handles @@ -1033,42 +1088,6 @@ def convert_to_np_array(value, dimension=None): Returns: value : np.ndarray """ - def safe_create_np_array(value): - with warnings.catch_warnings(): - - # If we have a torch tensor, allow it to pass through unchanged - if torch and torch.is_tensor(value): - return value - - warnings.filterwarnings('error', category=np.VisibleDeprecationWarning) - # NOTE: this will raise a ValueError in the future. - # See https://numpy.org/neps/nep-0034-infer-dtype-is-object.html - try: - try: - return np.asarray(value) - except np.VisibleDeprecationWarning: - return np.asarray(value, dtype=object) - except ValueError as e: - # numpy 1.24 removed the above deprecation and raises - # ValueError instead. Note that the below call can still - # raise other ValueErrors - if 'The requested array has an inhomogeneous shape' in str(e): - return np.asarray(value, dtype=object) - raise - - except ValueError as e: - msg = str(e) - if 'cannot guess the desired dtype from the input' in msg: - return np.asarray(value, dtype=object) - # KDM 6/29/20: this case handles a previously noted case - # by KAM 6/28/18, #877: - # [[0.0], [0.0], np.array([[0.0, 0.0]])] - # but was only handled for dimension=1 - elif 'could not broadcast' in msg: - return convert_all_elements_to_np_array(value) - else: - raise - value = safe_create_np_array(value) if dimension == 1: From ca22ac49c4fc54f9bccc97e0ad0ab0e2ebeca96b Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 13 Mar 2024 03:18:54 +0000 Subject: [PATCH 162/410] EMstorageMechanism: replace internal use of Parameter.get with _get --- .../modulatory/learning/EMstoragemechanism.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py index 3bfdf7d580e..3ce20bc1817 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py @@ -212,11 +212,11 @@ def _memory_matrix_getter(owning_component=None, context=None)->list: # Get memory from learning_signals that project to retrieved_nodes if owning_component.is_initializing: # If initializing, learning_signals are still MappingProjections used to specify them, so get from them - memory = [retrieved_learning_signal.parameters.matrix.get(context) + memory = [retrieved_learning_signal.parameters.matrix._get(context) for retrieved_learning_signal in learning_signals_for_retrieved] else: # Otherwise, get directly from the learning_signals - memory = [retrieved_learning_signal.efferents[0].receiver.owner.parameters.matrix.get(context) + memory = [retrieved_learning_signal.efferents[0].receiver.owner.parameters.matrix._get(context) for retrieved_learning_signal in learning_signals_for_retrieved] # Get memory capacity from first length of first matrix (can use full set since might be ragged array) @@ -751,7 +751,7 @@ def _execute(self, decay_rate = self.parameters.decay_rate._get(context) # modulable, so use getter storage_prob = self.parameters.storage_prob._get(context) # modulable, so use getter - field_weights = self.parameters.field_weights.get(context) # modulable, so use getter + field_weights = self.parameters.field_weights._get(context) # modulable, so use getter concatenation_node = self.concatenation_node num_match_fields = 1 if concatenation_node else len([i for i in self.field_types if i==1]) @@ -760,7 +760,7 @@ def _execute(self, if self.is_initializing: # Return existing matrices for field_memories # FIX: THE FOLLOWING DOESN'T TEST FUNCTION: return convert_all_elements_to_np_array([ - learning_signal.receiver.path_afferents[0].parameters.matrix.get() + learning_signal.receiver.path_afferents[0].parameters.matrix._get(context) for learning_signal in self.learning_signals ]) # Raise exception if not initializing and memory is not specified @@ -799,7 +799,7 @@ def _execute(self, axis = 1 entry_to_store = variable[i - num_match_fields] # Get matrix containing memories for the field from the Projection - field_memory_matrix = field_projection.parameters.matrix.get(context) + field_memory_matrix = field_projection.parameters.matrix._get(context) value.append(super(LearningMechanism, self)._execute(variable=entry_to_store, memory_matrix=field_memory_matrix, From 3fbf95c2f2cdf78dfdff000020fd33558a8bc531 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 14 Mar 2024 03:11:35 +0000 Subject: [PATCH 163/410] EMStorageMechanism: explicitly update field_projection matrices --- .../modulatory/learning/EMstoragemechanism.py | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py index 3ce20bc1817..ff6597ef732 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py @@ -801,12 +801,18 @@ def _execute(self, # Get matrix containing memories for the field from the Projection field_memory_matrix = field_projection.parameters.matrix._get(context) - value.append(super(LearningMechanism, self)._execute(variable=entry_to_store, - memory_matrix=field_memory_matrix, - axis=axis, - storage_location=idx_of_weakest_memory, - storage_prob=storage_prob, - decay_rate=decay_rate, - context=context, - runtime_params=runtime_params)) + # pass in field_projection matrix to EMStorage function + res = super(LearningMechanism, self)._execute( + variable=entry_to_store, + memory_matrix=field_memory_matrix, + axis=axis, + storage_location=idx_of_weakest_memory, + storage_prob=storage_prob, + decay_rate=decay_rate, + context=context, + runtime_params=runtime_params + ) + value.append(res) + # assign modified field_memory_matrix back + field_projection.parameters.matrix._set(res, context) return convert_all_elements_to_np_array(value) From 45e2bf4c5f1ff32cabec299156d6311ecea1b6e3 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 13 Mar 2024 03:27:40 +0000 Subject: [PATCH 164/410] EMComposition: reduce calls to memory_getter in __init__ --- psyneulink/library/compositions/emcomposition.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index 3a6353005b9..6215f85446a 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -1494,9 +1494,12 @@ def __init__(self, self.exclude_node_roles(node, NodeRole.OUTPUT) # Warn if divide by zero will occur due to memory initialization - if not np.any([np.any([self.memory[i][j] - for i in range(self.memory_capacity)]) - for j in range(self.num_keys)]): + memory = self.memory + memory_capacity = self.memory_capacity + if not np.any([ + np.any([memory[i][j] for i in range(memory_capacity)]) + for j in range(self.num_keys) + ]): warnings.warn(f"Memory initialized with at least one field that has all zeros; " f"a divide by zero will occur if 'normalize_memories' is True. " f"This can be avoided by using 'memory_fill' to initialize memories with non-zero values.") From 1cc3b7a774265ff2c80efaf8ba72a96aeb1d14c5 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 16 Feb 2024 03:37:52 +0000 Subject: [PATCH 165/410] Function: copy variable on external .function call --- psyneulink/core/components/functions/function.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/components/functions/function.py b/psyneulink/core/components/functions/function.py index 0a33f5a0aa6..16f703f331e 100644 --- a/psyneulink/core/components/functions/function.py +++ b/psyneulink/core/components/functions/function.py @@ -166,7 +166,7 @@ MODEL_SPEC_ID_MDF_VARIABLE, MatrixKeywordLiteral, ZEROS_MATRIX ) from psyneulink.core.globals.mdf import _get_variable_parameter_name -from psyneulink.core.globals.parameters import Parameter, check_user_specified +from psyneulink.core.globals.parameters import Parameter, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import REPORT_OUTPUT_PREF, ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel from psyneulink.core.globals.registry import register_category @@ -691,6 +691,9 @@ def function(self, target_set=None, **kwargs): + if ContextFlags.COMMAND_LINE in context.source: + variable = copy_parameter_value(variable) + # IMPLEMENTATION NOTE: # The following is a convenience feature that supports specification of params directly in call to function # by moving the to a params dict, which treats them as runtime_params From c275959410203d4c4490e9ea133f889ad6a5e81d Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 20 Feb 2024 01:18:19 +0000 Subject: [PATCH 166/410] Component: copy default variable if not passed to execute --- psyneulink/core/components/component.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index d50b61f2530..5c7e4ae2c2b 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1914,6 +1914,8 @@ def _check_args(self, variable=None, params=None, context=None, target_set=None) except AttributeError: variable = self.class_defaults.variable + variable = copy_parameter_value(variable) + # If the variable is a function, call it if callable(variable): variable = variable() From 788c73e5b91f5c1a8fc93958751833cd584a3e3d Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 20 Feb 2024 03:07:19 +0000 Subject: [PATCH 167/410] Mechanism: copy defaults before executing with them --- psyneulink/core/components/mechanisms/mechanism.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index 8cf07ccb02c..b9a93581d6a 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -1111,7 +1111,7 @@ NAME, OUTPUT, OUTPUT_LABELS_DICT, OUTPUT_PORT, OUTPUT_PORT_PARAMS, OUTPUT_PORTS, OWNER_EXECUTION_COUNT, OWNER_VALUE, \ PARAMETER_PORT, PARAMETER_PORT_PARAMS, PARAMETER_PORTS, PROJECTIONS, REFERENCE_VALUE, RESULT, \ TARGET_LABELS_DICT, VALUE, VARIABLE, WEIGHT, MODEL_SPEC_ID_MDF_VARIABLE, MODEL_SPEC_ID_INPUT_PORT_COMBINATION_FUNCTION -from psyneulink.core.globals.parameters import Parameter, check_user_specified +from psyneulink.core.globals.parameters import Parameter, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.registry import register_category, remove_instance_from_registry from psyneulink.core.globals.utilities import \ @@ -2447,7 +2447,7 @@ def execute(self, pass # Only call subclass' _execute method and then return (do not complete the rest of this method) elif self.initMethod == INIT_EXECUTE_METHOD_ONLY: - return_value = self._execute(variable=self.defaults.variable, + return_value = self._execute(variable=copy_parameter_value(self.defaults.variable), context=context, runtime_params=runtime_params) @@ -2474,7 +2474,7 @@ def execute(self, # Call only subclass' function during initialization (not its full _execute method nor rest of this method) elif self.initMethod == INIT_FUNCTION_METHOD_ONLY: - return_value = super()._execute(variable=self.defaults.variable, + return_value = super()._execute(variable=copy_parameter_value(self.defaults.variable), context=context, runtime_params=runtime_params) return convert_to_np_array(return_value, dimension=2) From 9cbd7f42de976d54831928733d7a749aee46d28e Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 15 Feb 2024 23:58:48 +0000 Subject: [PATCH 168/410] StatefulFunction: correct previous_value shape with ragged variable np.zeros_like ignores subarray shapes --- .../functions/stateful/statefulfunction.py | 14 ++++++++++++-- psyneulink/core/globals/utilities.py | 18 +++++++++++++++++- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/components/functions/stateful/statefulfunction.py b/psyneulink/core/components/functions/stateful/statefulfunction.py index 51044080400..3e25d51284d 100644 --- a/psyneulink/core/components/functions/stateful/statefulfunction.py +++ b/psyneulink/core/components/functions/stateful/statefulfunction.py @@ -18,6 +18,7 @@ import abc import collections +import copy import numbers import warnings @@ -34,7 +35,14 @@ from psyneulink.core.globals.keywords import STATEFUL_FUNCTION_TYPE, STATEFUL_FUNCTION, NOISE, RATE from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet -from psyneulink.core.globals.utilities import iscompatible, convert_to_np_array, contains_type, safe_len, convert_all_elements_to_np_array +from psyneulink.core.globals.utilities import ( + contains_type, + convert_all_elements_to_np_array, + convert_to_np_array, + fill_array, + iscompatible, + safe_len, +) __all__ = ['StatefulFunction'] @@ -377,7 +385,9 @@ def _validate_noise(self, noise): def _instantiate_attributes_before_function(self, function=None, context=None): if not self.parameters.initializer._user_specified: - self._initialize_previous_value(np.zeros_like(self.defaults.variable), context) + new_previous_value = copy.deepcopy(self.defaults.variable) + fill_array(new_previous_value, 0) + self._initialize_previous_value(new_previous_value, context) self._instantiate_stateful_attributes(self.stateful_attributes, self.initializers, context) super()._instantiate_attributes_before_function(function=function, context=context) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index 5b6d01d3ef5..3205060377e 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -151,7 +151,7 @@ 'scalar_distance', 'sinusoid', 'tensor_power', 'TEST_CONDTION', 'type_match', 'underscore_to_camelCase', 'UtilitiesError', 'unproxy_weakproxy', 'create_union_set', 'merge_dictionaries', - 'contains_type', 'is_numeric_scalar', 'try_extract_0d_array_item', + 'contains_type', 'is_numeric_scalar', 'try_extract_0d_array_item', 'fill_array', ] logger = logging.getLogger(__name__) @@ -2157,6 +2157,22 @@ def _generated_toposort_key(obj): return _generated_toposort_key +def fill_array(arr: np.ndarray, value: Any): + """ + Fills all elements of **arr** with **value**, maintaining embedded + shapes of object-dtype arrays + + Args: + arr (np.ndarray) + value (Any) + """ + if arr.ndim != 0 and arr.dtype == object: + for item in arr: + fill_array(item, value) + else: + arr.fill(value) + + # np.isscalar returns true on non-numeric items def is_numeric_scalar(obj) -> bool: """ From bc57033c07b8b14503bfc90750dbdf86c1c71d7e Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 20 Feb 2024 00:13:56 +0000 Subject: [PATCH 169/410] SampleIterator: make generate_current_value instance-method-like creation of the function in __init__ using self from the outer scope meant that any copied instances of a SampleIterator reused the original SampleIterator instance for their generate_current_value calls --- psyneulink/core/globals/sampleiterator.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/psyneulink/core/globals/sampleiterator.py b/psyneulink/core/globals/sampleiterator.py index f059401e27e..7366e7219b7 100644 --- a/psyneulink/core/globals/sampleiterator.py +++ b/psyneulink/core/globals/sampleiterator.py @@ -360,7 +360,7 @@ def __init__(self, self.num = len(specification) self.generator = specification # the list - def generate_current_value(): # index into the list + def _generate_current_value(self): # index into the list # KDM 12/11/19: for currently unknown and unreplicable # reasons, the checks in __next__ will fail to ensure # that self.current_step is less than the length of @@ -379,9 +379,10 @@ def generate_current_value(): # index into the list # Assumes receiver of SampleIterator will get this and know what to do with it, # therefore no other attributes are needed and, to avoid confusion, they should not be available; # so just return. - return + def _generate_current_value(self): + return - if specification.function is None: + elif specification.function is None: self.start = specification.start self.stop = specification.stop # self.step = Fraction(specification.step) @@ -389,7 +390,7 @@ def generate_current_value(): # index into the list self.num = specification.num self.generator = None # ?? - def generate_current_value(): # return next value in range + def _generate_current_value(self): # return next value in range # Save global precision for later restoration _global_precision = getcontext().prec # Set SampleSpec precision @@ -408,7 +409,7 @@ def generate_current_value(): # return next value in range self.head = self.start self.generator = specification.function - def generate_current_value(): # call function + def _generate_current_value(self): # call function return self.generator() else: @@ -423,7 +424,10 @@ def generate_current_value(): # call function self.current_step = 0 self.head = self.start - self.generate_current_value = generate_current_value + self._generate_current_value = _generate_current_value + + def generate_current_value(self): + return self._generate_current_value(self) def __next__(self): """ From 19ab7b608333e41cb3ee13c8fc63f77a31bce59a Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 20 Feb 2024 00:20:27 +0000 Subject: [PATCH 170/410] OptimizationFunction: make grid stateful --- .../components/functions/nonstateful/fitfunctions.py | 2 +- .../functions/nonstateful/optimizationfunctions.py | 11 ++++++----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index 4d187a3ed57..df0dd449825 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -454,7 +454,7 @@ def _run_simulations(self, *args, context=None): raise ValueError("Too many arguments passed to run_simulations") # Reset the search grid - self.reset_grid() + self.reset_grid(context) # Evaluate objective_function for each sample last_sample, last_value, all_samples, all_values = self._evaluate( diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index 0fc795ff931..79cd4f022d5 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -413,6 +413,8 @@ class Parameters(Function_Base.Parameters): saved_samples = Parameter([], read_only=True, pnl_internal=True) saved_values = Parameter([], read_only=True, pnl_internal=True) + grid = Parameter(None) + @check_user_specified @beartype def __init__( @@ -840,11 +842,11 @@ def _is_static(it:SampleIterator): return outcomes, num_evals - def reset_grid(self): + def reset_grid(self, context): """Reset iterators in `search_space `""" for s in self.search_space: s.reset() - self.grid = itertools.product(*[s for s in self.search_space]) + self.parameters.grid._set(itertools.product(*[s for s in self.search_space]), context) def _traverse_grid(self, variable, sample_num, context=None): """Get next sample from grid. @@ -853,7 +855,7 @@ def _traverse_grid(self, variable, sample_num, context=None): if self.is_initializing: return convert_to_np_array([signal._start for signal in self.search_space]) try: - sample = np.asarray(next(self.grid)) + sample = np.asarray(next(self.parameters.grid._get(context))) except StopIteration: raise OptimizationFunctionError("Expired grid in {} run from {} " "(execution_count: {}; num_iterations: {})". @@ -1602,7 +1604,6 @@ class Parameters(OptimizationFunction.Parameters): :default value: True :type: ``bool`` """ - grid = Parameter(None) save_samples = Parameter(False, pnl_internal=True) save_values = Parameter(False, pnl_internal=True) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') @@ -1984,7 +1985,7 @@ def _function(self, in the order they were evaluated; otherwise it is empty. """ - self.reset_grid() + self.reset_grid(context) return_all_samples = return_all_values = [] direction = self.parameters.direction._get(context) From c8662c59a725facf215795432b4a9dc8856b2c2c Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 20 Feb 2024 23:10:30 +0000 Subject: [PATCH 171/410] Component: _validate_variable: copy default variable if used --- psyneulink/core/components/component.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 5c7e4ae2c2b..ee1045da5d2 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -2729,9 +2729,10 @@ def _validate_variable(self, variable, context=None): # - return if variable is None: try: - return self.defaults.variable + variable = self.defaults.variable except AttributeError: - return self.class_defaults.variable + variable = self.class_defaults.variable + return copy_parameter_value(variable) # Otherwise, do some checking on variable before converting to np.ndarray From f7108cd1a9279da97a9b63bac9e2d374d2d7ab9a Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 20 Feb 2024 23:10:49 +0000 Subject: [PATCH 172/410] Component: copy matrix default value if used --- psyneulink/core/components/component.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index ee1045da5d2..2770ed04f5e 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -3183,7 +3183,7 @@ def _instantiate_function(self, function, function_params=None, context=None): # update it here if needed if MATRIX in kwargs_to_instantiate: try: - kwargs_to_instantiate[MATRIX] = self.parameter_ports[MATRIX].defaults.value + kwargs_to_instantiate[MATRIX] = copy_parameter_value(self.parameter_ports[MATRIX].defaults.value) except (AttributeError, KeyError, TypeError): pass From 43fa15745d8c55298e2a527e3d2f1866164df764 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 20 Feb 2024 23:13:45 +0000 Subject: [PATCH 173/410] Projection_Base: copy sender default value if used as default --- psyneulink/core/components/projections/projection.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/components/projections/projection.py b/psyneulink/core/components/projections/projection.py index cdf46687532..fa7211c2060 100644 --- a/psyneulink/core/components/projections/projection.py +++ b/psyneulink/core/components/projections/projection.py @@ -426,7 +426,7 @@ PROJECTION_RECEIVER, PROJECTION_SENDER, PROJECTION_TYPE, \ RECEIVER, SENDER, STANDARD_ARGS, PORT, PORTS, WEIGHT, ADD_INPUT_PORT, ADD_OUTPUT_PORT, \ PROJECTION_COMPONENT_CATEGORY -from psyneulink.core.globals.parameters import Parameter, check_user_specified +from psyneulink.core.globals.parameters import Parameter, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.registry import register_category, remove_instance_from_registry from psyneulink.core.globals.socket import ConnectionInfo @@ -741,7 +741,7 @@ def __init__(self, # FIX: NEED TO KNOW HERE IF SENDER IS SPECIFIED AS A MECHANISM OR PORT try: # this should become _default_value when that is fully implemented - variable = self.sender.defaults.value + variable = copy_parameter_value(self.sender.defaults.value) except AttributeError: if receiver.prefs.verbosePref: warnings.warn("Unable to get value of sender ({0}) for {1}; will assign default ({2})". From 31f7d9c07f8d5002c4ec1e650d658a9d39956754 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 20 Feb 2024 23:13:15 +0000 Subject: [PATCH 174/410] CompositionInterfaceMechanism: use standard default_variable assignment --- .../mechanisms/processing/compositioninterfacemechanism.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py b/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py index 83eb2c48c94..8176a6913f3 100644 --- a/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py +++ b/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py @@ -189,8 +189,6 @@ def __init__(self, name=None, prefs: Optional[ValidPrefSet] = None): - if default_variable is None and size is None: - default_variable = self.class_defaults.variable self.composition = composition self.port_map = port_map self.connected_to_composition = False From 7b6243b4a3141b61e756c17470c584ebabbad45b Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 22 Feb 2024 02:34:21 +0000 Subject: [PATCH 175/410] Composition: do not delete value Parameter Parameter deletion breaks assumptions about inheritance of attributes in Parameter hierarchies. --- psyneulink/core/compositions/composition.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index dd55d9af6f3..3a1e3f3e1bc 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -3958,7 +3958,7 @@ class Parameters(Composition_Base.Parameters): simulation_results = Parameter([], loggable=False, pnl_internal=True) retain_old_simulation_data = Parameter(False, stateful=False, loggable=False, pnl_internal=True) input_specification = Parameter(None, stateful=False, loggable=False, pnl_internal=True) - + value = Parameter(NotImplemented, read_only=True) # replaces deletion in constructor below class _CompilationData(ParametersBase): execution = None @@ -4109,7 +4109,7 @@ def __init__( # should be used instead - in the long run, we should look into possibly # populating both values and results, as it would be more consistent with # the behavior of components - del self.parameters.value + # del self.parameters.value # Call with context = COMPOSITION to avoid calling _check_initialization_status again self._analyze_graph(context=context) From a094f15d7ece04f8c057718ea3779ee8b1f37daa Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 15 Mar 2024 01:16:49 +0000 Subject: [PATCH 176/410] MaskedMappingProjection: also set matrix parameter port value --- .../components/projections/pathway/maskedmappingprojection.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/psyneulink/library/components/projections/pathway/maskedmappingprojection.py b/psyneulink/library/components/projections/pathway/maskedmappingprojection.py index d0973f13434..9f5c3ffa377 100644 --- a/psyneulink/library/components/projections/pathway/maskedmappingprojection.py +++ b/psyneulink/library/components/projections/pathway/maskedmappingprojection.py @@ -240,3 +240,6 @@ def _update_parameter_ports(self, runtime_params=None, context=None): matrix **= mask self.parameters.matrix._set(matrix, context) + # must manually update parameter port because super + # _update_parameter_ports already happened above + self.parameter_ports["matrix"].parameters.value._set(matrix, context) From 6fdb0433e74e17d767541abfc8d186022cb2aba8 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 10 Apr 2024 02:27:48 +0000 Subject: [PATCH 177/410] utilities: add update_array_in_place function --- psyneulink/core/globals/utilities.py | 92 +++++++++++++++++++++++++++- tests/misc/test_utilities.py | 49 ++++++++++++++- 2 files changed, 139 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index 3205060377e..63232684996 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -151,7 +151,7 @@ 'scalar_distance', 'sinusoid', 'tensor_power', 'TEST_CONDTION', 'type_match', 'underscore_to_camelCase', 'UtilitiesError', 'unproxy_weakproxy', 'create_union_set', 'merge_dictionaries', - 'contains_type', 'is_numeric_scalar', 'try_extract_0d_array_item', 'fill_array', + 'contains_type', 'is_numeric_scalar', 'try_extract_0d_array_item', 'fill_array', 'update_array_in_place', ] logger = logging.getLogger(__name__) @@ -2284,3 +2284,93 @@ def extended_array_equal(a, b, equal_nan: bool = False) -> bool: return _extended_array_compare( a, b, functools.partial(np.array_equal, equal_nan=equal_nan) ) + + +def _check_array_attr_equiv(a, b, attr): + err_msg = '{0} is not a numpy.ndarray' + + try: + a_val = getattr(a, attr) + except AttributeError: + raise ValueError(err_msg.format(a)) + + try: + b_val = getattr(b, attr) + except AttributeError: + raise ValueError(err_msg.format(b)) + + if a_val != b_val: + raise ValueError(f'{attr}s {a_val} and {b_val} differ') + + +def _update_array_in_place( + target: np.ndarray, + source: np.ndarray, + casting: Literal['no', 'equiv', 'safe', 'same_kind', 'unsafe'], + _dry_run: bool, + _in_object_dtype: bool, +): + # enforce dtype equivalence when recursing in an object-dtype target + # array, because we won't know if np.copyto will succeed on every + # element until we try + if _in_object_dtype: + _check_array_attr_equiv(target, source, 'dtype') + + # enforce shape equivalence so that we know when the python-side + # values become incompatible with compiled structs + _check_array_attr_equiv(target, source, 'shape') + + if target.dtype == object: + len_target = len(target) + len_source = len(source) + + if len_source != len_target: + raise ValueError(f'lengths {len_target} and {len_source} differ') + + # check all elements before update to avoid partial update + if not _dry_run: + for i in range(len_target): + _update_array_in_place( + target[i], + source[i], + casting=casting, + _dry_run=True, + _in_object_dtype=True, + ) + + for i in range(len_target): + _update_array_in_place( + target[i], + source[i], + casting=casting, + _dry_run=_dry_run, + _in_object_dtype=True, + ) + else: + np.broadcast(source, target) # only here to throw error if broadcast fails + if not _dry_run: + np.copyto(target, source, casting=casting) + + +def update_array_in_place( + target: np.ndarray, + source: np.ndarray, + casting: Literal['no', 'equiv', 'safe', 'same_kind', 'unsafe'] = 'same_kind', +): + """ + Copies the values in **source** to **target**, supporting ragged + object-dtype arrays. + + Args: + target (numpy.ndarray): array receiving values + source (numpy.ndarray): array providing values + casting (Literal["no", "equiv", "safe", "same_kind", "unsafe"], + optional): See `numpy.copyto`. Defaults to 'same_kind'. + """ + _update_array_in_place( + target=target, + source=source, + casting=casting, + _dry_run=False, + _in_object_dtype=False + ) diff --git a/tests/misc/test_utilities.py b/tests/misc/test_utilities.py index 2f313cb741d..e86581331ba 100644 --- a/tests/misc/test_utilities.py +++ b/tests/misc/test_utilities.py @@ -3,7 +3,7 @@ import pytest from psyneulink.core.globals.utilities import ( - convert_all_elements_to_np_array, extended_array_equal, prune_unused_args, + convert_all_elements_to_np_array, extended_array_equal, prune_unused_args, update_array_in_place ) @@ -119,3 +119,50 @@ def test_extended_array_equal_irregular(a, b, equal): ) def test_extended_array_equal_irregular_identical(a): assert extended_array_equal(a, a) + + +@pytest.mark.parametrize( + 'target, source', + [ + ([[0, 0], [0, 0]], [[1, 1], [1, 1]]), + ([[0], [0, 0]], [[1], [1, 1]]), + ] +) +def test_update_array_in_place(target, source): + target = convert_all_elements_to_np_array(target) + source = convert_all_elements_to_np_array(source) + old_target = target + + update_array_in_place(target, source) + + len_target = len(target) + assert len_target == len(source) + assert len_target == len(old_target) + for i in range(len_target): + np.testing.assert_array_equal(target[i], source[i]) + np.testing.assert_array_equal(old_target[i], source[i]) + np.testing.assert_array_equal(target[i], old_target[i]) + + +@pytest.mark.parametrize( + 'target, source', + [ + ([[0], [0, 0]], [[1], [1, 1, 1]]), + ([0, [0, 0]], [[1], [1, 1]]), + ] +) +def test_update_array_in_place_failures(target, source): + target = convert_all_elements_to_np_array(target) + source = convert_all_elements_to_np_array(source) + old_target = target + + with pytest.raises(ValueError): + update_array_in_place(target, source) + + len_target = len(target) + assert len_target == len(source) + assert len_target == len(old_target) + for i in range(len_target): + assert not np.array_equal(target[i], source[i]) + assert not np.array_equal(old_target[i], source[i]) + np.testing.assert_array_equal(target[i], old_target[i]) From 5103f33e2434b25058510b99911930576ed894b8 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 14 Feb 2024 01:55:34 +0000 Subject: [PATCH 178/410] Parameter: on set, update compatible numpy arrays in place using the same memory addresses for Parameter.values allows sharing with compiled structures where possible --- psyneulink/core/components/component.py | 47 +++++++++++++----- .../core/components/functions/function.py | 12 +++-- .../nonstateful/distributionfunctions.py | 12 ++--- .../nonstateful/learningfunctions.py | 4 +- .../nonstateful/objectivefunctions.py | 4 +- .../nonstateful/optimizationfunctions.py | 2 +- .../nonstateful/selectionfunctions.py | 2 +- .../nonstateful/transferfunctions.py | 37 +++----------- .../functions/stateful/integratorfunctions.py | 6 +-- .../functions/stateful/memoryfunctions.py | 12 ++--- .../core/components/mechanisms/mechanism.py | 6 +++ .../processing/transfermechanism.py | 8 ++-- .../core/components/ports/outputport.py | 4 +- psyneulink/core/components/ports/port.py | 31 +++++++----- .../projections/pathway/mappingprojection.py | 4 +- psyneulink/core/compositions/composition.py | 25 +++++----- psyneulink/core/globals/parameters.py | 48 ++++++++++++++++--- .../modulatory/learning/EMstoragemechanism.py | 4 +- .../mechanisms/processing/integrator/ddm.py | 2 +- .../transfer/recurrenttransfermechanism.py | 8 ++-- .../pathway/maskedmappingprojection.py | 4 +- .../library/compositions/compositionrunner.py | 3 +- .../library/compositions/emcomposition.py | 2 +- tests/functions/test_buffer.py | 2 +- tests/functions/test_transfer.py | 6 +++ 25 files changed, 179 insertions(+), 116 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 2770ed04f5e..8810025af68 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -530,7 +530,7 @@ RESET_STATEFUL_FUNCTION_WHEN, SIZE, VALUE, VARIABLE, SHARED_COMPONENT_TYPES from psyneulink.core.globals.log import LogCondition from psyneulink.core.globals.parameters import \ - Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, check_user_specified, copy_parameter_value + Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, check_user_specified, copy_parameter_value, is_array_like from psyneulink.core.globals.preferences.basepreferenceset import BasePreferenceSet, VERBOSE_PREF from psyneulink.core.globals.preferences.preferenceset import \ PreferenceLevel, PreferenceSet, _assign_prefs @@ -539,7 +539,7 @@ from psyneulink.core.globals.utilities import \ ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, \ is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, \ - get_all_explicit_arguments, is_numeric, call_with_pruned_args, safe_equals, safe_len, parse_valid_identifier, try_extract_0d_array_item + get_all_explicit_arguments, is_numeric, call_with_pruned_args, safe_equals, safe_len, parse_valid_identifier, try_extract_0d_array_item, contains_type from psyneulink.core.scheduling.condition import Never from psyneulink.core.scheduling.time import Time, TimeScale @@ -660,7 +660,16 @@ def getter(self): assert p.modulable return getattr(self, _get_parametervalue_attr(p)) else: - return p._get(self.most_recent_context) + # _get does handle stateful case, but checking here avoids + # extra overhead for most_recent_context and external get. + # external get is being used so that dot-notation returns a + # copy of stored numpy arrays. dot-notation is also often + # used internally for non-stateful parameters, like + # function, input_ports, output_ports, etc. + if not p.stateful: + return p._get() + else: + return p.get(self.most_recent_context) def setter(self, value): p = getattr(self.parameters, param.name) @@ -1139,9 +1148,10 @@ def __init__(self, default_variable = self.defaults.variable else: default_variable = var - self.defaults.variable = copy.deepcopy(default_variable) self.parameters.variable._user_specified = True + self.defaults.variable = copy.deepcopy(default_variable) + self.parameters.variable._set( copy_parameter_value(default_variable), context=context, @@ -1908,11 +1918,7 @@ def _check_args(self, variable=None, params=None, context=None, target_set=None) # If function is called without any arguments, get default for variable if variable is None: - try: - # assigned by the Function class init when initializing - variable = self.defaults.variable - except AttributeError: - variable = self.class_defaults.variable + variable = self.defaults.variable variable = copy_parameter_value(variable) @@ -1961,6 +1967,7 @@ def generate_error(param_name): raise ComponentError(err_msg) if isinstance(runtime_params, dict): + runtime_params = copy_parameter_value(runtime_params) for param_name in runtime_params: if not isinstance(param_name, str): generate_error(param_name) @@ -1969,8 +1976,9 @@ def generate_error(param_name): generate_error(param_name) if context.execution_id not in self._runtime_params_reset: self._runtime_params_reset[context.execution_id] = {} - self._runtime_params_reset[context.execution_id][param_name] = getattr(self.parameters, - param_name)._get(context) + self._runtime_params_reset[context.execution_id][param_name] = copy_parameter_value( + getattr(self.parameters, param_name)._get(context) + ) if is_numeric(runtime_params[param_name]): runtime_value = convert_all_elements_to_np_array(runtime_params[param_name]) else: @@ -2328,9 +2336,21 @@ def _initialize_parameters(self, context=None, **param_defaults): if p._user_specified: val = param_defaults[p.name] + # ideally, this would include deepcopying any + # Function objects with a non-None owner in val. + # Avoiding universal deep copy for iterables + # containing Functions here ensures that a list (ex. + # noise) containing other objects and a Function + # will use the actual Function passed in and not a + # copy. Not copying - as was done prior to this + # comment - should only be a problem if internal + # code passes such an object that is also used + # elsewhere if isinstance(val, Function): if val.owner is not None: val = copy.deepcopy(val) + elif not contains_type(val, Function): + val = copy_parameter_value(val, shared_types=shared_types) else: val = copy_parameter_value( p.default_value, @@ -4419,10 +4439,13 @@ def modulated(self): # because the port existing doesn't necessarily mean modulation # is actually happening if self._parameter.port is not None: - return self._parameter.port.owner._get_current_parameter_value( + res = self._parameter.port.owner._get_current_parameter_value( self._parameter, self._owner.most_recent_context ) + if is_array_like(res): + res = copy_parameter_value(res) + return res else: warnings.warn( f'{self._parameter.name} is not currently modulated in most' diff --git a/psyneulink/core/components/functions/function.py b/psyneulink/core/components/functions/function.py index 16f703f331e..ea7d8e7c82d 100644 --- a/psyneulink/core/components/functions/function.py +++ b/psyneulink/core/components/functions/function.py @@ -183,8 +183,12 @@ ] EPSILON = np.finfo(float).eps + + # numeric to allow modulation, invalid to identify unseeded state -DEFAULT_SEED = np.array(-1) +def DEFAULT_SEED(): + return np.array(-1) + FunctionRegistry = {} @@ -344,7 +348,7 @@ def _output_type_setter(value, owning_component): def _seed_setter(value, owning_component, context): value = try_extract_0d_array_item(value) - if value is None or value == DEFAULT_SEED: + if value is None or value == DEFAULT_SEED(): value = get_global_seed() # Remove any old PRNG state @@ -367,7 +371,7 @@ def _random_state_getter(self, owning_component, context, modulated=False): else: seed_value = [int(seed_param._get(context=context))] - if seed_value == [DEFAULT_SEED]: + if seed_value == [DEFAULT_SEED()]: raise FunctionError( "Invalid seed for {} in context: {} ({})".format( owning_component, context.execution_id, seed_param @@ -678,7 +682,7 @@ def __deepcopy__(self, memo): # functions with "random_state" param must have "seed" parameter for ctx in new.parameters.seed.values: new.parameters.seed.set( - DEFAULT_SEED, ctx, skip_log=True, skip_history=True + DEFAULT_SEED(), ctx, skip_log=True, skip_history=True ) return new diff --git a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py index 0021b9946a3..b5126da2946 100644 --- a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py @@ -160,7 +160,7 @@ class Parameters(DistributionFunction.Parameters): mean = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) standard_deviation = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM], mdf_name='scale') random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) @check_user_specified @beartype @@ -340,7 +340,7 @@ class Parameters(DistributionFunction.Parameters): :type: ``float`` """ random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) variable = Parameter(np.array([0]), read_only=True, pnl_internal=True, constructor_argument='default_variable') mean = Parameter(0.0, modulable=True, aliases=[ADDITIVE_PARAM]) standard_deviation = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) @@ -465,7 +465,7 @@ class Parameters(DistributionFunction.Parameters): """ beta = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) @check_user_specified @beartype @@ -592,7 +592,7 @@ class Parameters(DistributionFunction.Parameters): low = Parameter(0.0, modulable=True) high = Parameter(1.0, modulable=True) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) @check_user_specified @beartype @@ -748,7 +748,7 @@ class Parameters(DistributionFunction.Parameters): :type: ``float`` """ random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) scale = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) dist_shape = Parameter(1.0, modulable=True, aliases=[ADDITIVE_PARAM]) @@ -883,7 +883,7 @@ class Parameters(DistributionFunction.Parameters): :type: ``float`` """ random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) scale = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) mean = Parameter(1.0, modulable=True, aliases=[ADDITIVE_PARAM]) diff --git a/psyneulink/core/components/functions/nonstateful/learningfunctions.py b/psyneulink/core/components/functions/nonstateful/learningfunctions.py index de5970e9242..22593ed0d27 100644 --- a/psyneulink/core/components/functions/nonstateful/learningfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/learningfunctions.py @@ -349,7 +349,7 @@ class Parameters(LearningFunction.Parameters): storage_prob = Parameter(1.0, modulable=True) decay_rate = Parameter(0.0, modulable=True) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) def _validate_storage_prob(self, storage_prob): storage_prob = float(storage_prob) @@ -767,7 +767,7 @@ class Parameters(LearningFunction.Parameters): :type: ``int`` """ random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) variable = Parameter([np.array([0, 0, 0]), np.array([0])], read_only=True, diff --git a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py index 8d011e67e99..740edf90c55 100644 --- a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py +++ b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py @@ -34,7 +34,7 @@ DEFAULT_VARIABLE, DIFFERENCE, DISTANCE_FUNCTION, DISTANCE_METRICS, DOT_PRODUCT, \ ENERGY, ENTROPY, EUCLIDEAN, HOLLOW_MATRIX, MATRIX, MAX_ABS_DIFF, NORMALIZE, \ NORMED_L0_SIMILARITY, OBJECTIVE_FUNCTION_TYPE, SIZE, STABILITY_FUNCTION -from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified +from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.utilities import DistanceMetricLiteral, safe_len, convert_to_np_array, convert_all_elements_to_np_array from psyneulink.core.globals.utilities import is_iterable @@ -388,7 +388,7 @@ def _update_default_variable(self, new_default_variable, context): elif isinstance(matrix, ParameterPort): pass else: - matrix = get_matrix(self.defaults.matrix, size, size) + matrix = get_matrix(copy_parameter_value(self.defaults.matrix), size, size) self.parameters.matrix._set(matrix, context) diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index 79cd4f022d5..2e157d7b324 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -1607,7 +1607,7 @@ class Parameters(OptimizationFunction.Parameters): save_samples = Parameter(False, pnl_internal=True) save_values = Parameter(False, pnl_internal=True) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) select_randomly_from_optimal_values = Parameter(False) direction = MAXIMIZE diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index 612530c1d63..164801fa377 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -190,7 +190,7 @@ class Parameters(SelectionFunction.Parameters): """ mode = Parameter(MAX_VAL, stateful=False) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) def _validate_mode(self, mode): options = {MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index 5f1a93e7090..e1856919dfd 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -98,7 +98,7 @@ RATE, RECEIVER, RELU_FUNCTION, SCALE, SLOPE, SOFTMAX_FUNCTION, STANDARD_DEVIATION, SUM, \ TRANSFER_FUNCTION_TYPE, TRANSFER_WITH_COSTS_FUNCTION, VARIANCE, VARIABLE, X_0, PREFERENCE_SET_NAME from psyneulink.core.globals.parameters import \ - FunctionParameter, Parameter, get_validator_by_function, check_user_specified + FunctionParameter, Parameter, get_validator_by_function, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import \ REPORT_OUTPUT_PREF, PreferenceEntry, PreferenceLevel, ValidPrefSet from psyneulink.core.globals.utilities import ValidParamSpecType, convert_all_elements_to_np_array, safe_len, is_matrix_keyword @@ -2342,7 +2342,7 @@ class Parameters(TransferFunction.Parameters): scale = Parameter(1.0, modulable=True) offset = Parameter(0.0, modulable=True) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) bounds = (None, None) @check_user_specified @@ -2566,7 +2566,7 @@ class Parameters(TransferFunction.Parameters): """ p = Parameter(0.5, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) bounds = (None, None) @check_user_specified @@ -2787,7 +2787,7 @@ class Parameters(TransferFunction.Parameters): """ p = Parameter(0.5, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) @check_user_specified @beartype @@ -3827,7 +3827,7 @@ def _validate_params(self, request_set, target_set=None, context=None): def _instantiate_attributes_before_function(self, function=None, context=None): # replicates setting of receiver in _validate_params if isinstance(self.owner, Projection): - self.receiver = self.defaults.variable + self.receiver = copy_parameter_value(self.defaults.variable) matrix = self.parameters.matrix._get(context) @@ -3854,7 +3854,7 @@ def instantiate_matrix(self, specification, context=None): if isinstance(specification, np.matrix): return np.array(specification) - sender = self.defaults.variable + sender = copy_parameter_value(self.defaults.variable) sender_len = sender.shape[0] try: receiver = self.receiver @@ -4676,32 +4676,9 @@ def instantiate_fct(fct_name, fct): raise FunctionError(f"{fct} is not a valid cost function for {fct_name}.") self.intensity_cost_fct = instantiate_fct(INTENSITY_COST_FUNCTION, self.intensity_cost_fct) - # Initialize default_value for TransferWithCosts' modulation params from intensity_cost_fct's values - self.parameters.intensity_cost_fct_mult_param.default_value = \ - self.parameters.intensity_cost_fct_mult_param.get() - self.parameters.intensity_cost_fct_add_param.default_value = \ - self.parameters.intensity_cost_fct_add_param.get() - self.adjustment_cost_fct = instantiate_fct(ADJUSTMENT_COST_FUNCTION, self.adjustment_cost_fct) - # Initialize default_value for TransferWithCosts' modulation params from adjustment_cost_fct's values - self.parameters.adjustment_cost_fct_mult_param.default_value = \ - self.parameters.adjustment_cost_fct_mult_param.get() - self.parameters.adjustment_cost_fct_add_param.default_value = \ - self.parameters.adjustment_cost_fct_add_param.get() - self.duration_cost_fct = instantiate_fct(DURATION_COST_FUNCTION, self.duration_cost_fct) - # Initialize default_value for TransferWithCosts' modulation params from duration_cost_fct's values - self.parameters.duration_cost_fct_mult_param.default_value = \ - self.parameters.duration_cost_fct_add_param.get() - self.parameters.duration_cost_fct_add_param.default_value = \ - self.parameters.duration_cost_fct_add_param.get() - self.combine_costs_fct = instantiate_fct(COMBINE_COSTS_FUNCTION, self.combine_costs_fct) - # Initialize default_value for TransferWithCosts' modulation params from combined_costs_fct's values - self.parameters.combine_costs_fct_mult_param.default_value = \ - self.parameters.combine_costs_fct_mult_param.get() - self.parameters.combine_costs_fct_add_param.default_value = \ - self.parameters.combine_costs_fct_add_param.get() # Initialize intensity attributes if self.enabled_cost_functions: @@ -4792,7 +4769,7 @@ def _function(self, self.parameters.combined_costs._set(combined_costs, context) # Store current intensity - self.parameters.intensity._set(intensity, context) + self.parameters.intensity._set(copy_parameter_value(intensity), context) return intensity diff --git a/psyneulink/core/components/functions/stateful/integratorfunctions.py b/psyneulink/core/components/functions/stateful/integratorfunctions.py index 47d49206168..6e815961984 100644 --- a/psyneulink/core/components/functions/stateful/integratorfunctions.py +++ b/psyneulink/core/components/functions/stateful/integratorfunctions.py @@ -2413,7 +2413,7 @@ class Parameters(IntegratorFunction.Parameters): time_step_size = Parameter(1.0, modulable=True) previous_time = Parameter(0.0, initializer='non_decision_time') random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) enable_output_type_conversion = Parameter( False, stateful=False, @@ -2971,7 +2971,7 @@ class Parameters(IntegratorFunction.Parameters): initializer = Parameter([0, 0], initalizer='variable', dependencies=dimension, stateful=True) angle_function = Parameter(None, stateful=False, loggable=False) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) enable_output_type_conversion = Parameter( False, stateful=False, @@ -3440,7 +3440,7 @@ class Parameters(IntegratorFunction.Parameters): non_decision_time = Parameter(0.0, modulable=True) previous_time = Parameter(0.0, initializer='non_decision_time', pnl_internal=True) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) enable_output_type_conversion = Parameter( False, stateful=False, diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index 2fb8ae9411b..7c7f307897d 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -50,7 +50,7 @@ ADDITIVE_PARAM, BUFFER_FUNCTION, MEMORY_FUNCTION, COSINE, \ ContentAddressableMemory_FUNCTION, DictionaryMemory_FUNCTION, \ MIN_INDICATOR, MULTIPLICATIVE_PARAM, NEWEST, NOISE, OLDEST, OVERWRITE, RATE, RANDOM, SINGLE, WEIGHTED -from psyneulink.core.globals.parameters import Parameter, check_user_specified +from psyneulink.core.globals.parameters import Parameter, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.utilities import \ all_within_range, convert_all_elements_to_np_array, convert_to_np_array, convert_to_list, is_numeric_scalar @@ -361,7 +361,7 @@ def _function(self, maxlen=maxlen.item() if maxlen is not None else None ) - previous_value.append(variable) + previous_value.append(copy_parameter_value(variable)) self.parameters.previous_value._set(previous_value, context) return self.convert_output_type(previous_value) @@ -1191,7 +1191,7 @@ class Parameters(StatefulFunction.Parameters): ) max_entries = Parameter(1000) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) distance_function = Parameter(Distance(metric=COSINE), stateful=False, loggable=False) selection_function = Parameter(OneHot(mode=MIN_INDICATOR), stateful=False, loggable=False, dependencies='distance_function') distance = Parameter(0, stateful=True, read_only=True) @@ -1495,7 +1495,7 @@ def _function(self, # Retrieve entry from memory that best matches variable if retrieval_prob == 1.0 or (retrieval_prob > 0.0 and retrieval_prob > random_state.uniform()): - entry = self.get_memory(variable, distance_field_weights, context).copy() + entry = copy_parameter_value(self.get_memory(variable, distance_field_weights, context)) else: # QUESTION: SHOULD IT RETURN ZERO VECTOR OR NOT RETRIEVE AT ALL (LEAVING VALUE AND OutputPort FROM LAST TRIAL)? # CURRENT PROBLEM WITH LATTER IS THAT IT CAUSES CRASH ON INIT, SINCE NOT OUTPUT_PORT @@ -1859,7 +1859,7 @@ def delete_from_memory(self, fields = convert_to_list(fields) existing_memory = self.parameters.previous_value._get(context) - pruned_memory = existing_memory.copy() + pruned_memory = copy_parameter_value(existing_memory) for entry, memory in product(entries, existing_memory): if (np.all(entry == memory) or fields and all(entry[f] == memory[f] for f in fields)): @@ -2252,7 +2252,7 @@ class Parameters(StatefulFunction.Parameters): ) max_entries = Parameter(1000) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) distance_function = Parameter(Distance(metric=COSINE), stateful=False, loggable=False) selection_function = Parameter(OneHot(mode=MIN_INDICATOR), stateful=False, loggable=False) diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index b9a93581d6a..f65e1c495ec 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -2450,6 +2450,8 @@ def execute(self, return_value = self._execute(variable=copy_parameter_value(self.defaults.variable), context=context, runtime_params=runtime_params) + if context.source is ContextFlags.COMMAND_LINE: + return_value = copy_parameter_value(return_value) # IMPLEMENTATION NOTE: THIS IS HERE BECAUSE IF return_value IS A LIST, AND THE LENGTH OF ALL OF ITS # ELEMENTS ALONG ALL DIMENSIONS ARE EQUAL (E.G., A 2X2 MATRIX PAIRED WITH AN @@ -2618,6 +2620,10 @@ def execute(self, context=context, node=self) + # return copy on external call so users can store it directly + # without it changing + if context.source is ContextFlags.COMMAND_LINE: + value = copy_parameter_value(value) return value def _get_variable_from_input(self, input, context=None): diff --git a/psyneulink/core/components/mechanisms/processing/transfermechanism.py b/psyneulink/core/components/mechanisms/processing/transfermechanism.py index 5ecbc2826d9..e2dedd3d303 100644 --- a/psyneulink/core/components/mechanisms/processing/transfermechanism.py +++ b/psyneulink/core/components/mechanisms/processing/transfermechanism.py @@ -670,7 +670,7 @@ >>> my_mech.execute([0.5, 1]) array([[0.46875, 0.9375 ]]) >>> my_mech.num_executions_before_finished - array(4) + 4 Here, ``my_mech`` continued to execute for ``5`` times, until the element of the Mechanism's `value ` with the greatest value exceeded ``0.9``. Note that GREATER_THAN_EQUAL is a keyword for @@ -694,7 +694,7 @@ >>> my_mech.execute([0.5, 1]) array([[0.375, 0.75 ]]) >>> my_mech.num_executions_before_finished - array(2) + 2 As noted `above `, it will continue to execute if it is called again, but only once per call:: @@ -702,11 +702,11 @@ >>> my_mech.execute([0.5, 1]) array([[0.4375, 0.875 ]]) >>> my_mech.num_executions_before_finished - array(1) + 1 >>> my_mech.execute([0.5, 1]) array([[0.46875, 0.9375 ]]) >>> my_mech.num_executions_before_finished - array(1) + 1 In the following example, this behavior is exploited to allow a recurrent form of TransferMechanism (``attention``) to integrate for a fixed number of steps (e.g., to simulate the time taken to encode an instruction regarding the diff --git a/psyneulink/core/components/ports/outputport.py b/psyneulink/core/components/ports/outputport.py index 9b3ff0d3a19..fa46d5a54e6 100644 --- a/psyneulink/core/components/ports/outputport.py +++ b/psyneulink/core/components/ports/outputport.py @@ -634,7 +634,7 @@ OWNER_VALUE, PARAMS, PARAMS_DICT, PROJECTION, PROJECTIONS, RECEIVER, REFERENCE_VALUE, STANDARD_OUTPUT_PORTS, PORT, \ VALUE, VARIABLE, \ output_port_spec_to_parameter_name, INPUT_PORT_VARIABLES, SHARED_COMPONENT_TYPES -from psyneulink.core.globals.parameters import Parameter, check_user_specified +from psyneulink.core.globals.parameters import Parameter, check_user_specified, copy_parameter_value from psyneulink.core.globals.context import Context from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -752,7 +752,7 @@ def parse_variable_spec(spec): if fct_variable is not None: fct_variable = convert_all_elements_to_np_array(fct_variable) - return fct_variable + return copy_parameter_value(fct_variable) def _output_port_variable_getter(owning_component=None, context=None, output_port_name=None): diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index fe34fe2c9f5..80993bd44dd 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -800,7 +800,7 @@ def test_multiple_modulatory_projections_with_mech_and_port_Name_specs(self): RECEIVER, REFERENCE_VALUE, REFERENCE_VALUE_NAME, SENDER, STANDARD_OUTPUT_PORTS, \ PORT, PORT_COMPONENT_CATEGORY, PORT_CONTEXT, Port_Name, port_params, PORT_PREFS, PORT_TYPE, port_value, \ VALUE, VARIABLE, WEIGHT -from psyneulink.core.globals.parameters import Parameter, check_user_specified +from psyneulink.core.globals.parameters import Parameter, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import VERBOSE_PREF from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.registry import register_category @@ -1506,7 +1506,7 @@ def _instantiate_projections_to_port(self, projections, context=None): # assign identical default variable to function if it can be modified if self.function._variable_shape_flexibility is DefaultsFlexibility.FLEXIBLE: - self.function.defaults.variable = self.defaults.variable.copy() + self.function.defaults.variable = copy_parameter_value(self.defaults.variable) elif ( self.function._variable_shape_flexibility is DefaultsFlexibility.INCREASE_DIMENSION and np.array([self.function.defaults.variable]).shape == self.defaults.variable.shape @@ -1937,17 +1937,22 @@ def _update(self, params=None, context=None): # Copy all items in outer level of params to local_params (i.e., excluding its subdicts) local_params = defaultdict(lambda:{}, {k:v for k,v in params.items() if not isinstance(v,dict)}) # Get rid of items in params specific to this Port - for entry in params[PORT_SPECIFIC_PARAMS].copy(): + for entry in copy_parameter_value(params[PORT_SPECIFIC_PARAMS]): if entry in {self, self.name}: # Move param from params to local_params local_params.update(params[PORT_SPECIFIC_PARAMS].pop(entry)) # Put copy of all type-specific Projection dicts from params into local_params # FIX: ON FIRST PASS ALSO CREATES THOSE DICTS IN params IF THEY DON'T ALREADY EXIST - projection_params = defaultdict(lambda:{}, {proj_type:params[proj_type].copy() - for proj_type in projection_param_keywords()}) + projection_params = defaultdict( + lambda: {}, + { + proj_type: copy_parameter_value(params[proj_type]) + for proj_type in projection_param_keywords() + }, + ) - for entry in params[PROJECTION_SPECIFIC_PARAMS].copy(): + for entry in copy_parameter_value(params[PROJECTION_SPECIFIC_PARAMS]): if self.all_afferents and entry in self.all_afferents + [p.name for p in self.all_afferents]: if isinstance(entry, str): projection_type = next(p for p in self.all_afferents if p.name ==entry).componentType @@ -2023,7 +2028,7 @@ def set_projection_value(projection, value, context): # Get type-specific params that apply for type of current projection_params_keyword = projection_param_keyword_mapping()[projection.componentType] - projection_type_params = projection_params[projection_params_keyword].copy() + projection_type_params = copy_parameter_value(projection_params[projection_params_keyword]) # Get Projection's variable and/or value if specified in runtime_port_params projection_variable = projection_type_params.pop(VARIABLE, None) @@ -2156,7 +2161,7 @@ def _execute(self, variable=None, context=None, runtime_params=None): # KDM 8/2/19: double check the relevance of this branch if variable is None: if hasattr(self, DEFAULT_INPUT) and self.default_input == DEFAULT_VARIABLE: - return self.defaults.variable + return copy_parameter_value(self.defaults.variable) return None return super()._execute( @@ -2536,7 +2541,7 @@ def _instantiate_port_list(owner, # If no Ports were passed in, instantiate a default port_type using reference_value if not port_list: # assign reference_value as single item in a list, to be used as port_spec below - port_list = reference_value + port_list = copy_parameter_value(reference_value) # issue warning if in VERBOSE mode: if owner.prefs.verbosePref: @@ -2895,6 +2900,8 @@ def _parse_port_spec(port_type=None, from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base from psyneulink.core.components.projections.projection import _get_projection_value_shape + value = copy_parameter_value(value) + # Get all of the standard arguments passed from _instantiate_port (i.e., those other than port_spec) into a dict standard_args = get_args(inspect.currentframe()) @@ -2961,7 +2968,7 @@ def _parse_port_spec(port_type=None, # Use the value of any standard args specified in the Port specification dictionary # to replace those explicitly specified in the call to _instantiate_port (i.e., passed in standard_args) # (use copy so that items in port_spec dict are not deleted when called from _validate_params) - port_specific_args = port_spec[PORT_SPEC_ARG].copy() + port_specific_args = copy_parameter_value(port_spec[PORT_SPEC_ARG]) standard_args.update({key: port_specific_args[key] for key in port_specific_args if key in standard_args and port_specific_args[key] is not None}) @@ -2999,8 +3006,8 @@ def _parse_port_spec(port_type=None, context = port_dict.pop(CONTEXT, None) owner = port_dict[OWNER] port_type = port_dict[PORT_TYPE] - reference_value = port_dict[REFERENCE_VALUE] - variable = port_dict[VARIABLE] + reference_value = copy_parameter_value(port_dict[REFERENCE_VALUE]) + variable = copy_parameter_value(port_dict[VARIABLE]) params = port_specific_args # Validate that port_type is a Port class diff --git a/psyneulink/core/components/projections/pathway/mappingprojection.py b/psyneulink/core/components/projections/pathway/mappingprojection.py index a08233ab417..04671f5c3a6 100644 --- a/psyneulink/core/components/projections/pathway/mappingprojection.py +++ b/psyneulink/core/components/projections/pathway/mappingprojection.py @@ -301,7 +301,7 @@ MAPPING_PROJECTION, MATRIX, \ OUTPUT_PORT, VALUE from psyneulink.core.globals.log import ContextFlags -from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified +from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -553,7 +553,7 @@ def _instantiate_receiver(self, context=None): except TypeError: mapping_output_len = 1 - matrix_spec = self.defaults.matrix + matrix_spec = copy_parameter_value(self.defaults.matrix) if (type(matrix_spec) == str and matrix_spec == AUTO_ASSIGN_MATRIX): diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 3a1e3f3e1bc..65815c1e11e 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -2951,7 +2951,7 @@ def input_function(env, result): SAMPLE, SENDER, SHADOW_INPUTS, SOFT_CLAMP, SUM, \ TARGET, TARGET_MECHANISM, TEXT, VARIABLE, WEIGHT, OWNER_MECH from psyneulink.core.globals.log import CompositionLog, LogCondition -from psyneulink.core.globals.parameters import Parameter, ParametersBase, check_user_specified +from psyneulink.core.globals.parameters import Parameter, ParametersBase, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import BasePreferenceSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel, _assign_prefs from psyneulink.core.globals.registry import register_category @@ -5862,8 +5862,8 @@ def _create_CIM_ports(self, context=None): # instantiate the input port on the output CIM to correspond to the node's output port interface_input_port = InputPort(owner=self.output_CIM, - variable=output_port.defaults.value, - reference_value=output_port.defaults.value, + variable=copy_parameter_value(output_port.defaults.value), + reference_value=copy_parameter_value(output_port.defaults.value), name=OUTPUT_CIM_NAME + "_" + node.name + "_" + output_port.name, context=context) @@ -5877,7 +5877,7 @@ def _create_CIM_ports(self, context=None): variable=(OWNER_VALUE, functools.partial(self.output_CIM.get_input_port_position, interface_input_port)), function=Identity, - reference_value=output_port.defaults.value, + reference_value=copy_parameter_value(output_port.defaults.value), name=OUTPUT_CIM_NAME + "_" + node.name + "_" + output_port.name, context=context) @@ -11084,6 +11084,12 @@ def run( node.parameters.num_executions._get(context)._set_by_time_scale(TimeScale.RUN, 0) if ContextFlags.SIMULATION_MODE not in context.runmode: + try: + inputs = copy_parameter_value(inputs) + except TypeError: + # generator, must be copied during generation + pass + if is_numeric(inputs): _input_spec = convert_all_elements_to_np_array(inputs) else: @@ -11358,12 +11364,9 @@ def run( # store the result of this execution in case it will be the final result # object.results.append(result) - if isinstance(trial_output, collections.abc.Iterable): - result_copy = trial_output.copy() - else: - result_copy = trial_output + trial_output = copy_parameter_value(trial_output) - results.append(result_copy) + results.append(trial_output) self.parameters.results._set(convert_to_np_array(results), context) if not self.parameters.retain_old_simulation_data._get(): @@ -12213,7 +12216,7 @@ def execute( # Store values of all nodes in this execution_set for use by other nodes in the execution set # throughout this timestep (e.g., for recurrent Projections) - frozen_values[node] = node.get_output_values(context) + frozen_values[node] = copy_parameter_value(node.get_output_values(context)) # FIX: 6/12/19 Deprecate? # Handle input clamping @@ -12359,7 +12362,7 @@ def execute( # Store new value generated by node, # then set back to frozen value for use by other nodes in execution_set - new_values[node] = node.get_output_values(context) + new_values[node] = copy_parameter_value(node.get_output_values(context)) for i in range(len(node.output_ports)): node.output_ports[i].parameters.value._set(frozen_values[node][i], context, skip_history=True, skip_log=True) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index f3106048098..49e4d4cf919 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -334,6 +334,7 @@ def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, co safe_equals, try_extract_0d_array_item, unproxy_weakproxy, + update_array_in_place, ) from psyneulink.core.rpc.graph_pb2 import Entry, ndArray @@ -507,6 +508,15 @@ def check_user_specified_wrapper(self, *args, **kwargs): return check_user_specified_wrapper +def is_array_like(obj: typing.Any) -> bool: + """ + Returns: + bool: True if **obj** is a numpy-array-like object. False + otherwise + """ + return hasattr(obj, 'dtype') + + class ParametersTemplate: _deepcopy_shared_keys = ['_parent', '_params', '_owner_ref', '_children'] _values_default_excluded_attrs = {'user': False} @@ -1330,6 +1340,8 @@ def get(self, context=None, **kwargs): base_val = self._get(context, **kwargs) if self._scalar_converted: base_val = try_extract_0d_array_item(base_val) + if is_array_like(base_val): + base_val = copy_parameter_value(base_val) return base_val def _get(self, context=None, **kwargs): @@ -1558,24 +1570,48 @@ def _set_value( skip_log=False, skip_delivery=False, ): + value_is_array_like = is_array_like(value) # store history if not skip_history: if execution_id in self.values: + value_for_history = self.values[execution_id] + if value_is_array_like: + value_for_history = copy_parameter_value(value_for_history) + try: - self.history[execution_id].append(self.values[execution_id]) + self.history[execution_id].append(value_for_history) except KeyError: - self.history[execution_id] = collections.deque([self.values[execution_id]], maxlen=self.history_max_length) + self.history[execution_id] = collections.deque( + [value_for_history], + maxlen=self.history_max_length, + ) if self.loggable: + value_for_log = value + if value_is_array_like: + value_for_log = copy_parameter_value(value) # log value if not skip_log: - self._log_value(value, context) + self._log_value(value_for_log, context) # Deliver value to external application if not skip_delivery: - self._deliver_value(value, context) + self._deliver_value(value_for_log, context) + + value_updated = False + try: + update_array_in_place(self.values[execution_id], value) + except (KeyError, TypeError, ValueError): + # no self.values for execution_id + # failure during attempted update + pass + except RuntimeError: + # torch tensor + pass + else: + value_updated = True - # set value - self.values[execution_id] = value + if not value_updated: + self.values[execution_id] = value @handle_external_context() def delete(self, context=None): diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py index ff6597ef732..7c917e64c5f 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py @@ -176,7 +176,7 @@ from psyneulink.core.globals.keywords import \ (ADDITIVE, EM_STORAGE_MECHANISM, LEARNING, LEARNING_PROJECTION, LEARNING_SIGNALS, MULTIPLICATIVE, MULTIPLICATIVE_PARAM, MODULATION, NAME, OVERRIDE, OWNER_VALUE, PROJECTIONS, REFERENCE_VALUE, VARIABLE) -from psyneulink.core.globals.parameters import Parameter, check_user_specified, FunctionParameter +from psyneulink.core.globals.parameters import Parameter, check_user_specified, FunctionParameter, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric, all_within_range @@ -804,7 +804,7 @@ def _execute(self, # pass in field_projection matrix to EMStorage function res = super(LearningMechanism, self)._execute( variable=entry_to_store, - memory_matrix=field_memory_matrix, + memory_matrix=copy_parameter_value(field_memory_matrix), axis=axis, storage_location=idx_of_weakest_memory, storage_prob=storage_prob, diff --git a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py index 9ca3bb8876e..c27e19fb8c5 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py @@ -740,7 +740,7 @@ class Parameters(ProcessingMechanism.Parameters): input_format = Parameter(SCALAR, stateful=False, loggable=False) initializer = np.array([[0]]) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, fallback_default=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) output_ports = Parameter( [DECISION_VARIABLE, RESPONSE_TIME], diff --git a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py index bd8480db643..c5561becb22 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py @@ -211,7 +211,7 @@ from psyneulink.core.globals.context import handle_external_context from psyneulink.core.globals.keywords import \ AUTO, ENERGY, ENTROPY, HETERO, HOLLOW_MATRIX, INPUT_PORT, MATRIX, NAME, RECURRENT_TRANSFER_MECHANISM, RESULT -from psyneulink.core.globals.parameters import Parameter, SharedParameter, check_user_specified +from psyneulink.core.globals.parameters import Parameter, SharedParameter, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.registry import register_instance, remove_instance_from_registry from psyneulink.core.globals.socket import ConnectionInfo @@ -816,15 +816,15 @@ def _instantiate_attributes_before_function(self, function=None, context=None): param_keys = self._parameter_ports.key_values - matrix = get_matrix(self.defaults.matrix, rows=self.recurrent_size, cols=self.recurrent_size) + matrix = get_matrix(copy_parameter_value(self.defaults.matrix), rows=self.recurrent_size, cols=self.recurrent_size) # below implements the rules provided by KAM: # - If auto and hetero but not matrix are specified, the diagonal terms of the matrix are determined by auto and the off-diagonal terms are determined by hetero. # - If auto, hetero, and matrix are all specified, matrix is ignored in favor of auto and hetero. # - If auto and matrix are both specified, the diagonal terms are determined by auto and the off-diagonal terms are determined by matrix. ​ # - If hetero and matrix are both specified, the diagonal terms are determined by matrix and the off-diagonal terms are determined by hetero. - auto = get_auto_matrix(self.defaults.auto, self.recurrent_size) - hetero = get_hetero_matrix(self.defaults.hetero, self.recurrent_size) + auto = get_auto_matrix(copy_parameter_value(self.defaults.auto), self.recurrent_size) + hetero = get_hetero_matrix(copy_parameter_value(self.defaults.hetero), self.recurrent_size) auto_specified = self.parameters.auto._user_specified hetero_specified = self.parameters.hetero._user_specified diff --git a/psyneulink/library/components/projections/pathway/maskedmappingprojection.py b/psyneulink/library/components/projections/pathway/maskedmappingprojection.py index 9f5c3ffa377..c8e8f506170 100644 --- a/psyneulink/library/components/projections/pathway/maskedmappingprojection.py +++ b/psyneulink/library/components/projections/pathway/maskedmappingprojection.py @@ -75,7 +75,7 @@ from psyneulink.core.components.projections.pathway.mappingprojection import MappingError, MappingProjection from psyneulink.core.components.projections.projection import projection_keywords from psyneulink.core.globals.keywords import MASKED_MAPPING_PROJECTION, MATRIX -from psyneulink.core.globals.parameters import check_user_specified +from psyneulink.core.globals.parameters import check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel from psyneulink.core.globals.utilities import is_numeric_scalar @@ -212,7 +212,7 @@ def _validate_params(self, request_set, target_set=None, context=None): if is_numeric_scalar(mask): return mask_shape = np.array(mask).shape - matrix = get_matrix(self.defaults.matrix, + matrix = get_matrix(copy_parameter_value(self.defaults.matrix), len(self.sender.defaults.value), len(self.receiver.defaults.value)) matrix_shape = matrix.shape if mask_shape != matrix_shape: diff --git a/psyneulink/library/compositions/compositionrunner.py b/psyneulink/library/compositions/compositionrunner.py index 0aa1619f9a5..c2068f5dc82 100644 --- a/psyneulink/library/compositions/compositionrunner.py +++ b/psyneulink/library/compositions/compositionrunner.py @@ -15,6 +15,7 @@ from psyneulink.core.compositions.report import Report, ReportProgress, ReportDevices, LEARN_REPORT, PROGRESS_REPORT from psyneulink.core.components.mechanisms.modulatory.learning.learningmechanism import LearningMechanism from psyneulink.core.globals.keywords import OBJECTIVE_MECHANISM, TRAINING_SET +from psyneulink.core.globals.parameters import copy_parameter_value from inspect import isgeneratorfunction __all__ = ["CompositionRunner"] @@ -77,7 +78,7 @@ def _batch_inputs(self, chunk = {} for k, v in inputs.items(): chunk[k] = v[idx % len(v)] - yield chunk + yield copy_parameter_value(chunk) if call_after_minibatch: call_after_minibatch() diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index 6215f85446a..43e36964b88 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -1315,7 +1315,7 @@ class Parameters(AutodiffComposition.Parameters): learn_field_weights = Parameter(True, structural=True) learning_rate = Parameter(.001, modulable=True) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') - seed = Parameter(DEFAULT_SEED, modulable=True, setter=_seed_setter) + seed = Parameter(DEFAULT_SEED(), modulable=True, setter=_seed_setter) def _validate_memory_template(self, memory_template): if isinstance(memory_template, tuple): diff --git a/tests/functions/test_buffer.py b/tests/functions/test_buffer.py index 1e39d8c8a31..00b6191c717 100644 --- a/tests/functions/test_buffer.py +++ b/tests/functions/test_buffer.py @@ -77,7 +77,7 @@ def __call__(self): B.execute([7, 8, 9]) val = B.execute([10, 11, 12]) - assert counter_f.count == 4 + assert B.noise[1].count == 4 expected_val = [[24, 12.0, 46], [17, 12.0, 29], [10, 11, 12]] np.testing.assert_allclose(val, expected_val) diff --git a/tests/functions/test_transfer.py b/tests/functions/test_transfer.py index b98792c10de..43783106efa 100644 --- a/tests/functions/test_transfer.py +++ b/tests/functions/test_transfer.py @@ -279,6 +279,8 @@ def check(cost_function, if_enabled, if_disabled, observed): ex = pytest.helpers.get_func_execution(f, func_mode) res = ex(10) + if func_mode != 'Python': + ex.__self__.writeback_state_to_pnl() total_cost = (f.intensity_cost or 0) + (f.adjustment_cost or 0) + (f.duration_cost or 0) assert res == [10] @@ -300,6 +302,8 @@ def check(cost_function, if_enabled, if_disabled, observed): # Second run with positive adjustment res = ex(15) + if func_mode != 'Python': + ex.__self__.writeback_state_to_pnl() total_cost = (f.intensity_cost or 0) + (f.adjustment_cost or 0) + (f.duration_cost or 0) assert res == [15] @@ -321,6 +325,8 @@ def check(cost_function, if_enabled, if_disabled, observed): # Third run with negative adjustment res = ex(7) + if func_mode != 'Python': + ex.__self__.writeback_state_to_pnl() total_cost = (f.intensity_cost or 0) + (f.adjustment_cost or 0) + (f.duration_cost or 0) assert res == [7] From 330ee1219c9afdcaa95e7ec78a144f080fd57e15 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 3 Mar 2021 18:50:13 -0500 Subject: [PATCH 179/410] Component: store set of containing Compositions --- psyneulink/core/components/component.py | 15 +++++++++++++++ psyneulink/core/compositions/composition.py | 7 +++++++ 2 files changed, 22 insertions(+) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 8810025af68..42207832395 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -507,6 +507,7 @@ import types import typing import warnings +import weakref from abc import ABCMeta from collections.abc import Iterable from enum import Enum, IntEnum @@ -1266,6 +1267,8 @@ def __init__(self, self._update_parameter_components(context) + self.compositions = weakref.WeakSet() + def __repr__(self): return '({0} {1})'.format(type(self).__name__, self.name) #return '{1}'.format(type(self).__name__, self.name) @@ -4256,6 +4259,18 @@ def _set_mdf_arg(self, model, arg, value): model.args[arg] = value + def _add_to_composition(self, composition): + self.compositions.add(composition) + + for obj in self._parameter_components: + obj._add_to_composition(composition) + + def _remove_from_composition(self, composition): + self.compositions.discard(composition) + + for obj in self._parameter_components: + obj._remove_from_composition(composition) + @property def logged_items(self): """Dictionary of all items that have entries in the log, and their currently assigned `ContextFlags`\\s diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 65815c1e11e..2d4261b315e 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -2885,6 +2885,7 @@ def input_function(env, result): import sys import typing import warnings +import weakref from copy import deepcopy, copy from inspect import isgenerator, isgeneratorfunction @@ -4030,6 +4031,7 @@ def __init__( self._executed_from_command_line = False self.projections = ContentAddressableList(component_type=Component) + self.compositions = weakref.WeakSet() self._scheduler = None self._partially_added_nodes = [] @@ -4295,6 +4297,7 @@ def add_node(self, node, required_roles=None, context=None): except AttributeError: pass + node._add_to_composition(self) node._check_for_composition(context=context) # Add node to Composition's graph @@ -4414,6 +4417,7 @@ def _remove_node(self, node, analyze_graph=True): del self.nodes[node] self.node_ordering.remove(node) + node._remove_from_composition(self) for p in self.pathways: try: @@ -6626,6 +6630,7 @@ def add_projection(self, def _add_projection(self, projection): self.projections.append(projection) + projection._add_to_composition(self) def remove_projection(self, projection): # step 1 - remove Vertex from Graph @@ -6636,6 +6641,8 @@ def remove_projection(self, projection): if projection in self.projections: self.projections.remove(projection) + projection._remove_from_composition(self) + # step 3 - deactivate Projection in this Composition projection._deactivate_for_compositions(self) From 0a79622a90ffc63ba7dd6e25786f2a181f6fa3dd Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Wed, 27 Mar 2024 04:02:49 +0000 Subject: [PATCH 180/410] parameters: sync numeric values with compiled structures On compilation (Execution._get_compilation_param), assign shared memory (as numpy array) of compiled structures to values of Parameter objects represented in llvm param and state structs. This allows the user to make compatible changes to these values using Parameter.set without recompilation. Delete all compiled data for a given context when an incompatible change is made to the value of any of these Parameter objects. --- psyneulink/core/compositions/composition.py | 3 + psyneulink/core/globals/parameters.py | 54 ++++++++++--- psyneulink/core/llvm/execution.py | 45 +++++++++-- tests/composition/test_composition.py | 87 +++++++++++++++++++++ tests/functions/test_transfer.py | 6 -- 5 files changed, 172 insertions(+), 23 deletions(-) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 2d4261b315e..128dcadb64e 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -13165,6 +13165,9 @@ def _gen_llvm_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags:frozenset): else: return pnlvm.codegen.gen_composition_exec(ctx, self, tags=tags) + def _delete_compilation_data(self, context): + self._compilation_data.execution.delete(context) + def enable_logging(self): for item in self.nodes + self.projections: if isinstance(item, Composition): diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 49e4d4cf919..3dbaa897388 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -517,6 +517,12 @@ def is_array_like(obj: typing.Any) -> bool: return hasattr(obj, 'dtype') +# used in Parameter._set_value. Parameter names where a change in +# shape/type should cause deletion of corresponding compiled structs +# even if the values are not synced +addl_unsynced_parameter_names = {'value'} + + class ParametersTemplate: _deepcopy_shared_keys = ['_parent', '_params', '_owner_ref', '_children'] _values_default_excluded_attrs = {'user': False} @@ -1025,6 +1031,7 @@ def __init__( _inherited_source=None, _user_specified=False, _scalar_converted=False, + _tracking_compiled_struct=False, **kwargs ): if isinstance(aliases, str): @@ -1087,6 +1094,7 @@ def __init__( _user_specified=_user_specified, _temp_uninherited=set(), _scalar_converted=_scalar_converted, + _tracking_compiled_struct=_tracking_compiled_struct, **kwargs ) @@ -1530,6 +1538,7 @@ def _set( skip_history=False, skip_log=False, skip_delivery=False, + compilation_sync=False, **kwargs, ): if not self.stateful: @@ -1558,6 +1567,7 @@ def _set( skip_history=skip_history, skip_log=skip_log, skip_delivery=skip_delivery, + compilation_sync=compilation_sync, ) return value @@ -1569,6 +1579,7 @@ def _set_value( skip_history=False, skip_log=False, skip_delivery=False, + compilation_sync=False, ): value_is_array_like = is_array_like(value) # store history @@ -1598,21 +1609,42 @@ def _set_value( self._deliver_value(value_for_log, context) value_updated = False - try: - update_array_in_place(self.values[execution_id], value) - except (KeyError, TypeError, ValueError): - # no self.values for execution_id - # failure during attempted update - pass - except RuntimeError: - # torch tensor - pass - else: - value_updated = True + if not compilation_sync: + try: + update_array_in_place(self.values[execution_id], value) + except (KeyError, TypeError, ValueError): + # no self.values for execution_id + # failure during attempted update + pass + except RuntimeError: + # torch tensor + pass + else: + value_updated = True if not value_updated: self.values[execution_id] = value + if compilation_sync: + self._tracking_compiled_struct = True + elif ( + value_is_array_like + and ( + self._tracking_compiled_struct + or self.name in addl_unsynced_parameter_names + ) + ): + # recompilation is needed for arrays that could not be + # updated in place + try: + owner_comps = self._owner._owner.compositions + except AttributeError: + pass + else: + for comp in owner_comps: + comp._delete_compilation_data(context) + self._tracking_compiled_struct = False + @handle_external_context() def delete(self, context=None): try: diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index b3f65c8f4a5..314d601ab06 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -22,6 +22,8 @@ from psyneulink.core import llvm as pnlvm from psyneulink.core.globals.context import Context +from psyneulink.core.globals.parameters import is_array_like + from . import helpers, jit_engine, builder_context from .debug import debug_env @@ -111,8 +113,16 @@ def _get_compilation_param(self, name, init_method, arg): _pretty_size(ctypes.sizeof(struct_ty)), ")", "for", self._obj.name) - return struct + def cond_select_np_arrs(p): + return is_array_like(p.default_value) + if len(self._execution_contexts) == 1: + if name == '_state': + self.writeback_state_to_pnl(cond_select_np_arrs) + elif name == '_param': + self.writeback_params_to_pnl(cond_select_np_arrs) + + return struct def writeback_state_to_pnl(self, condition:Callable=lambda p: True): @@ -122,6 +132,12 @@ def writeback_state_to_pnl(self, condition:Callable=lambda p: True): "llvm_state_ids", condition) + def writeback_params_to_pnl(self, condition: Callable = lambda p: True): + self._copy_params_to_pnl(self._execution_contexts[0], + self._obj, + self._param_struct, + "llvm_param_ids", + condition) def _copy_params_to_pnl(self, context, component, params, ids:str, condition:Callable): @@ -198,7 +214,13 @@ def _copy_params_to_pnl(self, context, component, params, ids:str, condition:Cal elif condition(pnl_param): # Replace empty structures with None - if ctypes.sizeof(compiled_attribute_param) == 0: + try: + size_of = ctypes.sizeof(compiled_attribute_param) + except TypeError: + # will be a 0-dim array + size_of = 1 + + if size_of == 0: value = None else: value = np.ctypeslib.as_array(compiled_attribute_param) @@ -208,11 +230,22 @@ def _copy_params_to_pnl(self, context, component, params, ids:str, condition:Cal value = value[-1] # Try to match the shape of the old value - old_value = pnl_param.get(context) + # Use ._get to retrieve underlying numpy arrays + # (.get will extract a scalar if originally set + # as a scalar) + old_value = pnl_param._get(context) if hasattr(old_value, 'shape'): - value = value.reshape(old_value.shape) - - pnl_param.set(value, context=context, override=True) + try: + value = value.reshape(old_value.shape) + except ValueError: + pass + + pnl_param.set( + value, + context=context, + override=True, + compilation_sync=True, + ) class CUDAExecution(Execution): diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 7769fb751dc..4ba23cac84b 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -4264,6 +4264,93 @@ def test_one_time_warning_for_run_with_no_inputs(self): warnings.simplefilter("error") comp.run() + def _check_comp_ex(self, comp, comparison, comp_mode, context=None, is_not=False): + if comp_mode == pnl.ExecutionMode.Python: + return + + if context is None: + context = comp + + comp_ex = comp._compilation_data.execution.get(context) + if is_not: + assert comp_ex is not comparison + else: + assert comp_ex is comparison + + @pytest.mark.composition + def test_multiple_runs_with_parameter_change(self, comp_mode): + A = TransferMechanism(size=2) + comp = Composition([A]) + + inputs_dict = {A: [1, 1]} + output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) + np.testing.assert_allclose([[1, 1]], output) + orig_comp_ex = comp._compilation_data.execution.get(comp) + + # assign int to float, can reuse compilation + A.function.slope.base = 2 + self._check_comp_ex(comp, orig_comp_ex, comp_mode) + + output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) + np.testing.assert_allclose([[2, 2]], output) + self._check_comp_ex(comp, orig_comp_ex, comp_mode) + + # assign float to float, can reuse compilation + A.function.slope.base = 2.1 + self._check_comp_ex(comp, orig_comp_ex, comp_mode) + + output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) + np.testing.assert_allclose([[2.1, 2.1]], output) + self._check_comp_ex(comp, orig_comp_ex, comp_mode) + + # assign array with len 2 to float, must recompile + A.function.intercept.base = [3, 3] + self._check_comp_ex(comp, None, comp_mode) + # vectorized intercept not supported in LLVM modes + A.function.intercept.base = 3 + + output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) + np.testing.assert_allclose([[5.1, 5.1]], output) + self._check_comp_ex(comp, None, comp_mode, is_not=True) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, is_not=True) + + @pytest.mark.composition + def test_multiple_runs_with_parameter_change_arr(self, comp_mode): + A = TransferMechanism(size=2, integrator_mode=True) + comp = Composition([A]) + + inputs_dict = {A: [1, 1]} + output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) + np.testing.assert_allclose([[0.5, 0.5]], output) + orig_comp_ex = comp._compilation_data.execution.get(comp) + + # assign int to float, can reuse compilation + A.integrator_function.previous_value = [[1, 1]] + self._check_comp_ex(comp, orig_comp_ex, comp_mode) + + output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) + np.testing.assert_allclose([[1.0, 1.0]], output) + self._check_comp_ex(comp, orig_comp_ex, comp_mode) + + # assign float to float, can reuse compilation + A.integrator_function.previous_value = [[1.1, 1.1]] + self._check_comp_ex(comp, orig_comp_ex, comp_mode) + + output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) + np.testing.assert_allclose([[1.05, 1.05]], output) + self._check_comp_ex(comp, orig_comp_ex, comp_mode) + + # assign array with extra dim, must recompile + A.integrator_function.previous_value = [[[1.1, 1.1]]] + self._check_comp_ex(comp, None, comp_mode) + A.integrator_function.previous_value = [[1.1, 1.1]] + + output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) + np.testing.assert_allclose([[1.05, 1.05]], output) + self._check_comp_ex(comp, None, comp_mode, is_not=True) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, is_not=True) + + class TestCallBeforeAfterTimescale: def test_call_before_record_timescale(self): diff --git a/tests/functions/test_transfer.py b/tests/functions/test_transfer.py index 43783106efa..b98792c10de 100644 --- a/tests/functions/test_transfer.py +++ b/tests/functions/test_transfer.py @@ -279,8 +279,6 @@ def check(cost_function, if_enabled, if_disabled, observed): ex = pytest.helpers.get_func_execution(f, func_mode) res = ex(10) - if func_mode != 'Python': - ex.__self__.writeback_state_to_pnl() total_cost = (f.intensity_cost or 0) + (f.adjustment_cost or 0) + (f.duration_cost or 0) assert res == [10] @@ -302,8 +300,6 @@ def check(cost_function, if_enabled, if_disabled, observed): # Second run with positive adjustment res = ex(15) - if func_mode != 'Python': - ex.__self__.writeback_state_to_pnl() total_cost = (f.intensity_cost or 0) + (f.adjustment_cost or 0) + (f.duration_cost or 0) assert res == [15] @@ -325,8 +321,6 @@ def check(cost_function, if_enabled, if_disabled, observed): # Third run with negative adjustment res = ex(7) - if func_mode != 'Python': - ex.__self__.writeback_state_to_pnl() total_cost = (f.intensity_cost or 0) + (f.adjustment_cost or 0) + (f.duration_cost or 0) assert res == [7] From b5296437ee5fd9a45c56ecded2202f70f3191a80 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 12 Apr 2024 05:20:40 +0000 Subject: [PATCH 181/410] parameters: on incompatible value, only delete necessary compiled struct _param, _state, and _data are independent, so an incompatible change in a Parameter's value only needs to delete the struct in which it is stored --- psyneulink/core/compositions/composition.py | 20 +++- psyneulink/core/globals/parameters.py | 2 +- tests/composition/test_composition.py | 106 ++++++++++++++++---- 3 files changed, 105 insertions(+), 23 deletions(-) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 128dcadb64e..f1311250677 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -13165,8 +13165,24 @@ def _gen_llvm_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags:frozenset): else: return pnlvm.codegen.gen_composition_exec(ctx, self, tags=tags) - def _delete_compilation_data(self, context): - self._compilation_data.execution.delete(context) + def _delete_compilation_data(self, context: Context, from_parameter: Parameter = None): + if from_parameter is None: + self._compilation_data.execution.delete(context) + else: + execution_dict = self._compilation_data.execution.get(context) + if execution_dict is None: + return + + param_owner = from_parameter._owner._owner + if from_parameter.name in param_owner.llvm_param_ids: + struct_attr = '_param' + elif from_parameter.name in param_owner.llvm_state_ids: + struct_attr = '_state' + else: + struct_attr = '_data' + + for execution in execution_dict.values(): + setattr(execution, struct_attr, None) def enable_logging(self): for item in self.nodes + self.projections: diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 3dbaa897388..e03e01fe827 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -1642,7 +1642,7 @@ def _set_value( pass else: for comp in owner_comps: - comp._delete_compilation_data(context) + comp._delete_compilation_data(context, self) self._tracking_compiled_struct = False @handle_external_context() diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 4ba23cac84b..36c04fda79f 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -4264,21 +4264,29 @@ def test_one_time_warning_for_run_with_no_inputs(self): warnings.simplefilter("error") comp.run() - def _check_comp_ex(self, comp, comparison, comp_mode, context=None, is_not=False): + def _check_comp_ex(self, comp, comparison, comp_mode, struct_name, context=None, is_not=False): if comp_mode == pnl.ExecutionMode.Python: return if context is None: context = comp - comp_ex = comp._compilation_data.execution.get(context) - if is_not: - assert comp_ex is not comparison - else: - assert comp_ex is comparison + execution_dict = comp._compilation_data.execution.get(context) + for tag, execution in execution_dict.items(): + if comparison is None: + comparison_val = None + else: + comparison_val = comparison[tag] + + if is_not: + assert getattr(execution, struct_name) is not comparison_val + else: + assert getattr(execution, struct_name) is comparison_val @pytest.mark.composition def test_multiple_runs_with_parameter_change(self, comp_mode): + struct_name = '_param' + A = TransferMechanism(size=2) comp = Composition([A]) @@ -4286,36 +4294,43 @@ def test_multiple_runs_with_parameter_change(self, comp_mode): output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) np.testing.assert_allclose([[1, 1]], output) orig_comp_ex = comp._compilation_data.execution.get(comp) + if orig_comp_ex is not None: + orig_comp_ex = { + tag: getattr(ex, struct_name) + for tag, ex in orig_comp_ex.items() + } # assign int to float, can reuse compilation A.function.slope.base = 2 - self._check_comp_ex(comp, orig_comp_ex, comp_mode) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name) output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) np.testing.assert_allclose([[2, 2]], output) - self._check_comp_ex(comp, orig_comp_ex, comp_mode) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name) # assign float to float, can reuse compilation A.function.slope.base = 2.1 - self._check_comp_ex(comp, orig_comp_ex, comp_mode) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name) output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) np.testing.assert_allclose([[2.1, 2.1]], output) - self._check_comp_ex(comp, orig_comp_ex, comp_mode) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name) # assign array with len 2 to float, must recompile A.function.intercept.base = [3, 3] - self._check_comp_ex(comp, None, comp_mode) + self._check_comp_ex(comp, None, comp_mode, struct_name) # vectorized intercept not supported in LLVM modes A.function.intercept.base = 3 output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) np.testing.assert_allclose([[5.1, 5.1]], output) - self._check_comp_ex(comp, None, comp_mode, is_not=True) - self._check_comp_ex(comp, orig_comp_ex, comp_mode, is_not=True) + self._check_comp_ex(comp, None, comp_mode, struct_name, is_not=True) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name, is_not=True) @pytest.mark.composition def test_multiple_runs_with_parameter_change_arr(self, comp_mode): + struct_name = '_state' + A = TransferMechanism(size=2, integrator_mode=True) comp = Composition([A]) @@ -4323,32 +4338,83 @@ def test_multiple_runs_with_parameter_change_arr(self, comp_mode): output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) np.testing.assert_allclose([[0.5, 0.5]], output) orig_comp_ex = comp._compilation_data.execution.get(comp) + if orig_comp_ex is not None: + orig_comp_ex = { + tag: getattr(ex, struct_name) + for tag, ex in orig_comp_ex.items() + } # assign int to float, can reuse compilation A.integrator_function.previous_value = [[1, 1]] - self._check_comp_ex(comp, orig_comp_ex, comp_mode) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name) output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) np.testing.assert_allclose([[1.0, 1.0]], output) - self._check_comp_ex(comp, orig_comp_ex, comp_mode) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name) # assign float to float, can reuse compilation A.integrator_function.previous_value = [[1.1, 1.1]] - self._check_comp_ex(comp, orig_comp_ex, comp_mode) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name) output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) np.testing.assert_allclose([[1.05, 1.05]], output) - self._check_comp_ex(comp, orig_comp_ex, comp_mode) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name) # assign array with extra dim, must recompile A.integrator_function.previous_value = [[[1.1, 1.1]]] - self._check_comp_ex(comp, None, comp_mode) + self._check_comp_ex(comp, None, comp_mode, struct_name) A.integrator_function.previous_value = [[1.1, 1.1]] output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) np.testing.assert_allclose([[1.05, 1.05]], output) - self._check_comp_ex(comp, None, comp_mode, is_not=True) - self._check_comp_ex(comp, orig_comp_ex, comp_mode, is_not=True) + self._check_comp_ex(comp, None, comp_mode, struct_name, is_not=True) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name, is_not=True) + + @pytest.mark.composition + def test_multiple_runs_with_parameter_change_from_data_struct(self, comp_mode): + # NOTE: values in value.set calls below do not affect results, + # they are arbitrary and used just to check existence or + # non-existence of compiled structures after set + struct_name = '_data' + + A = TransferMechanism(size=2, integrator_mode=True) + comp = Composition([A]) + + inputs_dict = {A: [1, 1]} + output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) + np.testing.assert_allclose([[0.5, 0.5]], output) + orig_comp_ex = comp._compilation_data.execution.get(comp) + if orig_comp_ex is not None: + orig_comp_ex = { + tag: getattr(ex, struct_name) + for tag, ex in orig_comp_ex.items() + } + + # assign int to float, can reuse compilation + A.integrator_function.parameters.value.set([[1, 1]], comp, override=True) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name) + + output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) + np.testing.assert_allclose([[0.75, 0.75]], output) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name) + + # assign float to float, can reuse compilation + A.integrator_function.parameters.value.set([[1.0, 1.0]], comp, override=True) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name) + + output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) + np.testing.assert_allclose([[0.875, 0.875]], output) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name) + + # assign array with extra dim, must recompile + A.integrator_function.parameters.value.set([[[1.0, 1.0]]], comp, override=True) + self._check_comp_ex(comp, None, comp_mode, struct_name) + A.integrator_function.parameters.value.set([[1.0, 1.0]], comp, override=True) + + output = comp.run(inputs=inputs_dict, execution_mode=comp_mode) + np.testing.assert_allclose([[0.9375, 0.9375]], output) + self._check_comp_ex(comp, None, comp_mode, struct_name, is_not=True) + self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name, is_not=True) class TestCallBeforeAfterTimescale: From 8bc1f5f886265f80571a542c1383eb949bbb5472 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Fri, 12 Apr 2024 00:36:51 +0000 Subject: [PATCH 182/410] Composition: remove compiled writeback of matrix during learning unnecessary because matrix (a numpy array) is synced using writeback on compilation --- psyneulink/core/compositions/composition.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index f1311250677..2970ffca18f 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -11271,11 +11271,6 @@ def run( # Update the parameter for results self.parameters.results._set(convert_to_np_array(results), context) - - if self._is_learning(context): - # copies back matrix to pnl from state struct after learning - _comp_ex.writeback_state_to_pnl(condition=lambda p: p.name == "matrix") - self._propagate_most_recent_context(context) report(self, From 9db0428319fcc6ba960d6afc1b0348ad1cd18809 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 15 May 2024 12:21:48 -0400 Subject: [PATCH 183/410] tests: Add missing 'composition' marks (#2967) Fixes: efc07fc76e14fc62d0687a34c59c6691dc8f3b69 ("llvm, node_wrapper: Allow modulatory projections for "is_finished" wrapper") Signed-off-by: Jan Vesely --- tests/composition/test_parameterestimationcomposition.py | 5 +++++ tests/mechanisms/test_ddm_mechanism.py | 1 + 2 files changed, 6 insertions(+) diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index 9bc73f00a31..bf3a8c3138b 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -114,6 +114,7 @@ ] +@pytest.mark.composition @pytest.mark.parametrize("inputs_dict, error_msg", run_input_test_args) def test_pec_run_input_formats(inputs_dict, error_msg): if error_msg: @@ -124,6 +125,7 @@ def test_pec_run_input_formats(inputs_dict, error_msg): pec.run(inputs=inputs_dict) +@pytest.mark.composition @pytest.mark.parametrize( "opt_method, result", [ @@ -216,6 +218,7 @@ def reward_rate(sim_data): # func_mode is a hacky wa to get properly marked; Python, LLVM, and CUDA +@pytest.mark.composition def test_parameter_estimation_ddm_mle(func_mode): """Test parameter estimation of a DDM in integrator mode with MLE.""" @@ -318,6 +321,7 @@ def test_parameter_estimation_ddm_mle(func_mode): ) +@pytest.mark.composition def test_pec_bad_outcome_var_spec(): """ Tests that exception is raised when outcome variables specifies and output port that doesn't exist on the @@ -389,6 +393,7 @@ def test_pec_bad_outcome_var_spec(): assert "The number of columns in the data to fit must match" in str(ex) +@pytest.mark.composition def test_pec_controller_specified(): """Test that an exception is raised if a controller is specified for the PEC.""" with pytest.raises(ValueError): diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index 5cf1a04a66a..be779240a20 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -734,6 +734,7 @@ def test_ddm_is_finished(comp_mode, noise, threshold, expected_results): np.testing.assert_array_equal(results, expected_results) +@pytest.mark.composition @pytest.mark.parametrize("until_finished", ["until_finished", "not_until_finished"]) @pytest.mark.parametrize("threshold_mod", ["threshold_modulated", "threshold_not_modulated"]) def test_ddm_is_finished_with_dependency(comp_mode, until_finished, threshold_mod): From a8db9c2317c3e83f7448b12beda37e0f1522688f Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 16 Apr 2024 23:54:24 +0000 Subject: [PATCH 184/410] treewide: remove numpy matrix references from docs --- .../functions/nonstateful/learningfunctions.py | 14 +++++++------- .../functions/nonstateful/objectivefunctions.py | 14 +++++++------- .../functions/nonstateful/transferfunctions.py | 10 +++++----- .../modulatory/learning/learningmechanism.py | 2 +- .../ports/modulatorysignals/learningsignal.py | 4 ++-- .../projections/pathway/mappingprojection.py | 6 +++--- .../modulatory/learning/EMstoragemechanism.py | 11 ++++++----- .../learning/autoassociativelearningmechanism.py | 6 +++--- .../learning/kohonenlearningmechanism.py | 10 ++++++---- .../processing/transfer/kohonenmechanism.py | 4 ++-- .../transfer/recurrenttransfermechanism.py | 8 ++++---- .../pathway/autoassociativeprojection.py | 2 +- .../projections/pathway/maskedmappingprojection.py | 4 ++-- 13 files changed, 49 insertions(+), 46 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/learningfunctions.py b/psyneulink/core/components/functions/nonstateful/learningfunctions.py index 22593ed0d27..1fa189a9897 100644 --- a/psyneulink/core/components/functions/nonstateful/learningfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/learningfunctions.py @@ -416,7 +416,7 @@ def _function(self, array containing `entry ` to be added to `memory_matrix ` along `axis `. - memory_matrix : List, 2d array, np.matrix, ParameterPort, or MappingProjection + memory_matrix : List, 2d array, ParameterPort, or MappingProjection matrix to which `variable ` is stored. .. technical_note:: @@ -1027,7 +1027,7 @@ class Kohonen(LearningFunction): # -------------------------------------------- variable: List[array(float64), array(float64), 2d array[[float64]]] : default class_defaults.variable input pattern, array of activation values, and matrix used to calculate the weights changes. - learning_rate : scalar or list, 1d or 2d array, or np.matrix of numeric values: default .05 + learning_rate : scalar or list, 1d or 2d array of numeric values: default .05 specifies the learning rate used by the `function ` (see `learning_rate ` for details). @@ -1294,7 +1294,7 @@ class Hebbian(LearningFunction): # -------------------------------------------- activations in `variable `. COMMENT - learning_rate : scalar or list, 1d or 2d array, or np.matrix of numeric values: default .05 + learning_rate : scalar or list, 1d or 2d array of numeric values: default .05 specifies the learning rate used by the `function `; (see `learning_rate ` for details). @@ -1513,7 +1513,7 @@ class ContrastiveHebbian(LearningFunction): # --------------------------------- activations in `variable `. COMMENT - learning_rate : scalar or list, 1d or 2d array, or np.matrix of numeric values: default .05 + learning_rate : scalar or list, 1d or 2d array of numeric values: default .05 specifies the learning rate used by the `function `. (see `learning_rate ` for details). @@ -2139,7 +2139,7 @@ class BackPropagation(LearningFunction): COMMENT COMMENT: - error_matrix : List, 2d array, np.matrix, ParameterPort, or MappingProjection + error_matrix : List, 2d array, ParameterPort, or MappingProjection matrix, the output of which is used to calculate the `error_signal `. If it is specified as a ParameterPort it must be one for the `matrix ` parameter of a `MappingProjection`; if it is a MappingProjection, it must be one with a @@ -2349,7 +2349,7 @@ def _validate_params(self, request_set, target_set=None, context=None): """Validate learning_rate and error_matrix params `error_matrix` argument must be one of the following - - 2d list, np.ndarray or np.matrix + - 2d list, np.ndarray - ParameterPort for one of the above - MappingProjection with a parameterPorts[MATRIX] for one of the above @@ -2454,7 +2454,7 @@ def _function(self, other than activation_input and activation_output, to compute the derivative of the activation function with respect to `activation_output `. - error_matrix : List, 2d array, np.matrix, ParameterPort, or MappingProjection + error_matrix : List, 2d array, ParameterPort, or MappingProjection matrix of weights that were used to generate the `error_signal ` (3rd item of `variable ` from `activation_output `; its dimensions must be the length of `activation_output ` (rows) x diff --git a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py index 740edf90c55..66d45844e32 100644 --- a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py +++ b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py @@ -105,7 +105,7 @@ class Stability(ObjectiveFunction): in which case zeros are assigned as the value(s). An error is generated if both are specified but size != len(default_value). - matrix : list, np.ndarray, np.matrix, or matrix keyword : default HOLLOW_MATRIX + matrix : list, np.ndarray, or matrix keyword : default HOLLOW_MATRIX specifies the matrix of recurrent weights; must be a square matrix with the same width as the length of `variable `. @@ -142,7 +142,7 @@ class Stability(ObjectiveFunction): size : int length of array for which stability is calculated. - matrix : list, np.ndarray, np.matrix, function keyword, or MappingProjection : default HOLLOW_MATRIX + matrix : list, np.ndarray, function keyword, or MappingProjection : default HOLLOW_MATRIX weight matrix from each element of `variable ` to each other; if a matrix other than HOLLOW_MATRIX is assigned, it is convolved with HOLLOW_MATRIX to eliminate self-connections from the stability calculation. @@ -254,7 +254,7 @@ def _validate_params(self, variable, request_set, target_set=None, context=None) """Validate matrix param `matrix ` argument must be one of the following - - 2d list, np.ndarray or np.matrix + - 2d list, np.ndarray - ParameterPort for one of the above - MappingProjection with a parameterPorts[MATRIX] for one of the above @@ -502,7 +502,7 @@ class Energy(Stability): in which case zeros are assigned as the value(s). An error is generated if both are specified but size != len(default_value). - matrix : list, np.ndarray, np.matrix, or matrix keyword : default INVERSE_HOLLOW_MATRIX + matrix : list, np.ndarray, or matrix keyword : default INVERSE_HOLLOW_MATRIX specifies the matrix of recurrent weights; must be a square matrix with the same width as the length of `variable `. @@ -537,7 +537,7 @@ class Energy(Stability): size : int length of array for which energy is calculated. - matrix : list, np.ndarray, np.matrix, or matrix keyword + matrix : list, np.ndarray, or matrix keyword weight matrix from each element of `variable ` to each other; if a matrix other than INVERSE_HOLLOW_MATRIX is assigned, it is convolved with HOLLOW_MATRIX to eliminate self-connections from the energy calculation. @@ -612,7 +612,7 @@ class Entropy(Stability): in which case zeros are assigned as the value(s). An error is generated if both are specified but size != len(default_value). - matrix : list, np.ndarray, np.matrix, or matrix keyword : default INVERSE_HOLLOW_MATRIX + matrix : list, np.ndarray, or matrix keyword : default INVERSE_HOLLOW_MATRIX specifies the matrix of recurrent weights; must be a square matrix with the same width as the length of `variable `. @@ -647,7 +647,7 @@ class Entropy(Stability): size : int length of array for which energy is calculated. - matrix : list, np.ndarray, np.matrix, or matrix keyword + matrix : list, np.ndarray, or matrix keyword weight matrix from each element of `variable ` to each other; if a matrix other than INVERSE_HOLLOW_MATRIX is assigned, it is convolved with HOLLOW_MATRIX to eliminate self-connections from the entropy calculation. diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index e1856919dfd..dba8c8a834f 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -3455,7 +3455,7 @@ class LinearMatrix(TransferFunction): # --------------------------------------- specifies a template for the value to be transformed; length must equal the number of rows of `matrix `. - matrix : number, list, 1d or 2d np.ndarray, np.matrix, function, or matrix keyword : default IDENTITY_MATRIX + matrix : number, list, 1d or 2d np.ndarray, function, or matrix keyword : default IDENTITY_MATRIX specifies matrix used to transform `variable ` (see `matrix ` for specification details). @@ -3505,7 +3505,7 @@ class LinearMatrix(TransferFunction): # --------------------------------------- matrix used to transform `variable `. Can be specified as any of the following: * number - used as the filler value for all elements of the :keyword:`matrix` (call to np.fill); - * list of arrays, 2d array or np.matrix - assigned as the value of :keyword:`matrix`; + * list of arrays, 2d array - assigned as the value of :keyword:`matrix`; * matrix keyword - see `MatrixKeywords` for list of options. Rows correspond to elements of the input array (outer index), and columns correspond to elements of the output array (inner index). @@ -3561,7 +3561,7 @@ class Parameters(TransferFunction.Parameters): # return True # if m in MATRIX_KEYWORD_VALUES: # return True - # if isinstance(m, (list, np.ndarray, np.matrix, types.FunctionType)): + # if isinstance(m, (list, np.ndarray, types.FunctionType)): # return True # return False @@ -3812,7 +3812,7 @@ def _validate_params(self, request_set, target_set=None, context=None): "LinearMatrix function. When the LinearMatrix function is implemented in a " "mechanism, such as {}, the correct matrix cannot be determined from a " "keyword. Instead, the matrix must be fully specified as a float, list, " - "np.ndarray, or np.matrix". + "np.ndarray". format(param_value, self.name, self.owner.name)) # The only remaining valid option is matrix = None (sorted out in instantiate_attribs_before_fn) @@ -4009,7 +4009,7 @@ def _is_identity(self, context=None, defaults=False): # def is_matrix_spec(m): # if m is None: # return True -# if isinstance(m, (list, np.ndarray, np.matrix, types.FunctionType)): +# if isinstance(m, (list, np.ndarray, types.FunctionType)): # return True # if m in MATRIX_KEYWORD_VALUES: # return True diff --git a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py index 630e1963635..7512d990c32 100644 --- a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py @@ -886,7 +886,7 @@ class LearningMechanism(ModulatoryMechanism_Base): is assigned as the `value ` of the LearningMechanism's *ERROR_SIGNAL* `OutputPort `. - learning_signal : number, ndarray or matrix + learning_signal : number or ndarray one of two values returned by the LearningMechanism's `function `, that specifies the changes to the weights of the `matrix ` parameter for the LearningMechanism's `learned_projections `; it is calculated to reduce the error signal diff --git a/psyneulink/core/components/ports/modulatorysignals/learningsignal.py b/psyneulink/core/components/ports/modulatorysignals/learningsignal.py index 32b59586f5d..b4d74b151eb 100644 --- a/psyneulink/core/components/ports/modulatorysignals/learningsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/learningsignal.py @@ -167,7 +167,7 @@ this is an identity function (`Linear` with **slope**\\ =1 and **intercept**\\ =0), that simply uses the LearningMechanism's `learning_signal ` as its own. However, the LearningSignal's `function ` can be assigned another `TransferFunction`, or any other function that takes a -scalar, ndarray or matrix and returns a similar value. +scalar or ndarray and returns a similar value. .. note:: The `index ` and `assign ` attributes of a LearningSignal are automatically assigned and should not be modified. @@ -292,7 +292,7 @@ class LearningSignal(ModulatorySignal): result of the LearningSignal's `function `; same as its `learning_signal `. - learning_signal : number, ndarray or matrix + learning_signal : number or ndarray result of the LearningSignal's `function `; same as its `value `. efferents : [List[LearningProjection]] diff --git a/psyneulink/core/components/projections/pathway/mappingprojection.py b/psyneulink/core/components/projections/pathway/mappingprojection.py index 04671f5c3a6..7e3bce0bab3 100644 --- a/psyneulink/core/components/projections/pathway/mappingprojection.py +++ b/psyneulink/core/components/projections/pathway/mappingprojection.py @@ -84,8 +84,8 @@ ` provided to its `receiver `. It can be specified in any of the following ways: - * **List, array or matrix** -- if it is a list, each item must be a list or 1d np.array of numbers; otherwise, - it must be a 2d np.array or np.matrix. In each case, the outer dimension (outer list items, array axis 0, + * **List or array** -- if it is a list, each item must be a list or 1d np.array of numbers; otherwise, + it must be a 2d np.array. In each case, the outer dimension (outer list items, array axis 0, or matrix rows) corresponds to the elements of the `sender `, and the inner dimension (inner list items, array axis 1, or matrix columns) corresponds to the weighting of the contribution that a given `sender ` makes to the `receiver ` (the number of which @@ -357,7 +357,7 @@ class MappingProjection(PathwayProjection_Base): the context in which the Projection is used, or its initialization will be `deferred `. - matrix : list, np.ndarray, np.matrix, function, `RandomMatrix` or keyword : default DEFAULT_MATRIX + matrix : list, np.ndarray, function, `RandomMatrix` or keyword : default DEFAULT_MATRIX specifies the matrix used by `function ` (default: `LinearCombination`) to transform the `value ` of the `sender ` into a form suitable for the `variable ` of its `receiver ` `InputPort` diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py index 7c917e64c5f..ee8a4a03043 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py @@ -293,12 +293,12 @@ class EMStorageMechanism(LearningMechanism): specifies the function used to assign each item of the `variable ` to the corresponding `field ` of the `memory_matrix `. It must take as its `variable ` argument a list or 1d array of numeric values - (the "activity vector"); a ``memory_matrix`` argument that is a 2d array or matrix to which + (the "activity vector"); a ``memory_matrix`` argument that is a 2d array to which the `variable ` is assigned; ``axis`` and ``storage_location`` arguments that determine where in ``memory_matrix`` the `variable ` is stored; and optional ``storage_prob`` and ``decay_rate`` arguments that determine the probability with which storage occurs and the rate at which the `memory_matrix ` decays, respectively. The function - must return a list, 2d np.array or np.matrix for the corresponding `field ` of the + must return a list, 2d np.array for the corresponding `field ` of the `memory_matrix ` that is updated (see `EMStorage` for additional details). learning_signals : List[ParameterPort, Projection, tuple[str, Projection] or dict] : default None @@ -360,9 +360,10 @@ class EMStorageMechanism(LearningMechanism): function : LearningFunction or function : default EMStorage the function used to assign the value of each `field ` to the corresponding entry in `memory_matrix `. It must take as its `variable ` - argument a list or 1d array of numeric values (an `entry ` of the `memory_matrix - `. + argument a list or 1d array of numeric values (an `entry + ` of the + `memory_matrix `. storage_prob : float specifies the probability with which the current entry is stored in the EMSorageMechanism's `memory_matrix diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py index 24dc94d7403..c7b079750f1 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py @@ -163,7 +163,7 @@ class AutoAssociativeLearningMechanism(LearningMechanism): function : LearningFunction or function : default Hebbian specifies the function used to calculate the AutoAssociativeLearningMechanism's `learning_signal ` attribute. It must take as its **variable** argument a - list or 1d array of numeric values (the "activity vector") and return a list, 2d np.array or np.matrix + list or 1d array of numeric values (the "activity vector") and return a list, 2d np.array representing a square matrix with dimensions that equal the length of its variable (the "weight change matrix"). @@ -208,10 +208,10 @@ class AutoAssociativeLearningMechanism(LearningMechanism): It's `variable ` must be a list or 1d np.array of numeric entries, corresponding in length to the AutoAssociativeLearningMechanism's *ACTIVATION_INPUT* (`primary `) InputPort. - learning_rate : float, 1d or 2d np.array, or np.matrix of numeric values : default None + learning_rate : float, 1d or 2d np.array of numeric values : default None determines the learning rate used by the AutoAssociativeLearningMechanism's `function ` to scale the weight change matrix it returns. If it is a scalar, - it is used to multiply the weight change matrix; if it is a 2d array or matrix, + it is used to multiply the weight change matrix; if it is a 2d array, it is used to Hadamard (elementwise) multiply the weight matrix (allowing the contribution of individual *connections* to be scaled); if it is a 1d np.array, it is used to Hadamard (elementwise) multiply the input to the `function ` (i.e., the `value ` of the diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py index 39d6ad1973c..8100535b78b 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py @@ -173,8 +173,10 @@ class KohonenLearningMechanism(LearningMechanism): function : LearningFunction or function : default Kohonen specifies the function used to calculate the KohonenLearningMechanism's `learning_signal ` attribute. It must take as its **variable** argument a - list of three items (two 1d arrays and one 2d array, all of numeric values) and return a list, 2d np.array or - np.matrix that is a square matrix with the same dimensions as the third item of the **variable** arugment). + list of three items (two 1d arrays and one 2d array, all of + numeric values) and return a list, 2d np.array that is a square + matrix with the same dimensions as the third item of the + **variable** arugment). learning_rate : float : default None specifies the learning rate for the KohonenLearningMechanism. (see `learning_rate @@ -208,10 +210,10 @@ class KohonenLearningMechanism(LearningMechanism): the function used to calculate the `learning_signal ` (assigned to the KohonenLearningMechanism's `LearningSignal(s) `). It's `variable ` must be a list of three items (two 1d arrays and one 2d array, all of - numeric values); returns a list, 2d np.array or np.matrix that is a square matrix with the same dimensions + numeric values); returns a list, 2d np.array that is a square matrix with the same dimensions as the third item of its `variable `). - learning_rate : float, 1d or 2d np.array, or np.matrix of numeric values : default None + learning_rate : float, 1d or 2d np.array of numeric values : default None determines the learning rate used by the KohonenLearningMechanism's `function ` to scale the weight change matrix it returns. If it is a scalar, it is used to multiply the weight change matrix; if it is a 2d array or matrix, diff --git a/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py index cf7182b5ea3..b8e5d80dad6 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py @@ -136,7 +136,7 @@ class KohonenMechanism(TransferMechanism): cannot be enabled until it is configured for learning by calling the Mechanism's `configure_learning ` method. - learning_rate : scalar, or list, 1d or 2d np.array, or np.matrix of numeric values: default False + learning_rate : scalar, or list, 1d or 2d np.array of numeric values: default False specifies the learning rate used by its `learning function `. If it is `None`, the `default learning_rate for a LearningMechanism ` is used; if it is assigned a value, that is used as the learning_rate (see `learning_rate @@ -168,7 +168,7 @@ class KohonenMechanism(TransferMechanism): indicates whether `learning is enabled `; see `learning_enabled ` for additional details. - learning_rate : float, 1d or 2d np.array, or np.matrix of numeric values : default None + learning_rate : float, 1d or 2d np.array of numeric values : default None determines the learning rate used by the `learning_function ` of the `learning_mechanism ` (see `learning_rate ` for details concerning specification and default value assignment). diff --git a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py index c5561becb22..ada77f2ca2e 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py @@ -314,7 +314,7 @@ class RecurrentTransferMechanism(TransferMechanism): COMMENT: ??OLD OR NEWER THAN BELOW? - matrix : list, np.ndarray, np.matrix, matrix keyword, or AutoAssociativeProjection : default FULL_CONNECTIVITY_MATRIX + matrix : list, np.ndarray, matrix keyword, or AutoAssociativeProjection : default FULL_CONNECTIVITY_MATRIX specifies the matrix to use for creating a `recurrent AutoAssociativeProjection `, or a AutoAssociativeProjection to use. If **auto** or **hetero** arguments are specified, the **matrix** argument will be ignored in favor of those arguments. @@ -336,7 +336,7 @@ class RecurrentTransferMechanism(TransferMechanism): equal to the matrix dimensions, if a non-uniform diagonal is desired. Can be modified by control. COMMENT - matrix : list, np.ndarray, np.matrix, matrix keyword, or AutoAssociativeProjection : default HOLLOW_MATRIX + matrix : list, np.ndarray, matrix keyword, or AutoAssociativeProjection : default HOLLOW_MATRIX specifies the matrix to use for creating a `recurrent AutoAssociativeProjection `, or an AutoAssociativeProjection to use. @@ -426,7 +426,7 @@ class RecurrentTransferMechanism(TransferMechanism): if it is not (the default), then learning cannot be enabled until it is configured for learning by calling the Mechanism's `configure_learning ` method. - learning_rate : scalar, or list, 1d or 2d np.array, or np.matrix of numeric values: default False + learning_rate : scalar, or list, 1d or 2d np.array of numeric values: default False specifies the learning rate used by its `learning function `. If it is `None`, the `default learning_rate for a LearningMechanism ` is used; if it is assigned a value, that is used as the learning_rate (see `learning_rate @@ -488,7 +488,7 @@ class RecurrentTransferMechanism(TransferMechanism): created automatically if `learning is specified `, and used to train the `recurrent_projection `. - learning_rate : float, 1d or 2d np.array, or np.matrix of numeric values : default None + learning_rate : float, 1d or 2d np.array of numeric values : default None determines the learning rate used by the `learning_function ` of the `learning_mechanism ` (see `learning_rate ` for details concerning specification and default value diff --git a/psyneulink/library/components/projections/pathway/autoassociativeprojection.py b/psyneulink/library/components/projections/pathway/autoassociativeprojection.py index 9caf6a7f718..2b281fc3dab 100644 --- a/psyneulink/library/components/projections/pathway/autoassociativeprojection.py +++ b/psyneulink/library/components/projections/pathway/autoassociativeprojection.py @@ -160,7 +160,7 @@ class AutoAssociativeProjection(MappingProjection): specifies the destination of the Projection's output; must be (or belong to) the same Mechanism as **sender**, and the length of its `variable ` must match the `value ` of **sender**. - matrix : list, np.ndarray, np.matrix, function or keyword : default DEFAULT_MATRIX + matrix : list, np.ndarray, function or keyword : default DEFAULT_MATRIX specifies the matrix used by `function ` (default: `LinearCombination`) to transform the `value ` of the `sender ` into a value provided to the `variable ` of the `receiver ` `InputPort`; diff --git a/psyneulink/library/components/projections/pathway/maskedmappingprojection.py b/psyneulink/library/components/projections/pathway/maskedmappingprojection.py index c8e8f506170..922df7338c8 100644 --- a/psyneulink/library/components/projections/pathway/maskedmappingprojection.py +++ b/psyneulink/library/components/projections/pathway/maskedmappingprojection.py @@ -109,7 +109,7 @@ class MaskedMappingProjection(MappingProjection): Arguments --------- - mask : int, float, list, np.ndarray or np.matrix : default None + mask : int, float, list, np.ndarray : default None specifies a mask to be applied to the `matrix ` each time the Projection is executed, in a manner specified by the **mask_operation** argument. @@ -121,7 +121,7 @@ class MaskedMappingProjection(MappingProjection): Attributes ---------- - mask : int, float, list, np.ndarray or np.matrix : default None + mask : int, float, list, np.ndarray : default None mask applied to the `matrix ` each time the Projection is executed, in a manner specified by `mask_operation `. From 2ec1bf3cf70e94111e0c909c42cb460df875e55a Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 18 Apr 2024 00:33:51 +0000 Subject: [PATCH 185/410] utilities: add array_from_matrix_string constructs a numpy array from a string in forms like '1 2; 3 4' replicating the function of the numpy.matrix constructor --- psyneulink/core/globals/utilities.py | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index 63232684996..ab8d397453a 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -123,6 +123,7 @@ from itertools import chain, combinations import numpy as np +from numpy.typing import DTypeLike # Conditionally import torch try: @@ -151,7 +152,7 @@ 'scalar_distance', 'sinusoid', 'tensor_power', 'TEST_CONDTION', 'type_match', 'underscore_to_camelCase', 'UtilitiesError', 'unproxy_weakproxy', 'create_union_set', 'merge_dictionaries', - 'contains_type', 'is_numeric_scalar', 'try_extract_0d_array_item', 'fill_array', 'update_array_in_place', + 'contains_type', 'is_numeric_scalar', 'try_extract_0d_array_item', 'fill_array', 'update_array_in_place', 'array_from_matrix_string', ] logger = logging.getLogger(__name__) @@ -2374,3 +2375,28 @@ def update_array_in_place( _dry_run=False, _in_object_dtype=False ) + + +def array_from_matrix_string( + s: str, row_sep: str = ';', col_sep: str = ' ', dtype: DTypeLike = float +) -> np.ndarray: + """ + Constructs a numpy array from a string in forms like '1 2; 3 4' + replicating the function of the numpy.matrix constructor. + + Args: + s (str): matrix descriptor + row_sep (str, optional): separator for matrix rows. Defaults to ';'. + col_sep (str, optional): separator for matrix columns. Defaults to ' '. + dtype (DTypeLike, optional): dtype of result array. Defaults to float. + + Returns: + np.ndarray: array representation of **s** + """ + rows = s.split(row_sep) + arr = [] + for r in rows: + # filter empty columns, commonly in form like '1 2; 3 4' + arr.append([c for c in r.split(col_sep) if len(c)]) + + return np.asarray(arr, dtype=dtype) From 87fa248760e941cee8c96e19af3485c35f2cab14 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 16 Apr 2024 23:59:37 +0000 Subject: [PATCH 186/410] treewide: remove numpy matrix in favor of numpy ndarray --- ..._Shallice_debugging_Interactive_activation | 24 +++++------ Scripts/Debug/Markus Stroop.py | 16 ++++---- Scripts/Debug/Umemoto_Feb2.py | 2 +- .../laura_test_no_noise_stroop_09_11_2018.py | 18 ++++----- .../Gilbert_Shallice_Composition_Model.py | 28 ++++++------- .../Rumelhart Semantic Network (autodiff).py | 40 +++++++++---------- .../core/components/functions/function.py | 4 +- .../nonstateful/transferfunctions.py | 8 ++-- .../core/components/ports/parameterport.py | 2 +- .../projections/pathway/mappingprojection.py | 2 +- .../transfer/recurrenttransfermechanism.py | 4 +- setup.cfg | 1 + tests/control/test_gilzenrat.py | 4 +- tests/mechanisms/test_processing_mechanism.py | 2 +- .../test_recurrent_transfer_mechanism.py | 2 +- 15 files changed, 79 insertions(+), 78 deletions(-) diff --git a/Scripts/Debug/Gilbert_Shallice_debugging_Interactive_activation b/Scripts/Debug/Gilbert_Shallice_debugging_Interactive_activation index 20b1268bb86..e01144bdcf3 100644 --- a/Scripts/Debug/Gilbert_Shallice_debugging_Interactive_activation +++ b/Scripts/Debug/Gilbert_Shallice_debugging_Interactive_activation @@ -47,71 +47,71 @@ TASK_DEMAND_LAYER.set_log_conditions('value') ### WEIGHTS # WORD INPUT TO WORD OUTPUT -word_weights = pnl.MappingProjection(matrix=np.matrix([[3.5, 0.0, 0.0], +word_weights = pnl.MappingProjection(matrix=np.array([[3.5, 0.0, 0.0], [0.0, 3.5, 0.0], [0.0, 0.0, 3.5]]), name='WORD_WEIGHTS') # COLOR INPUT TO COLOR OUTPUT -color_weights = pnl.MappingProjection(matrix=np.matrix([[1.9, 0.0, 0.0], +color_weights = pnl.MappingProjection(matrix=np.array([[1.9, 0.0, 0.0], [0.0, 1.9, 0.0], [0.0, 0.0, 1.9]]), name='COLOR_WEIGHTS') # WORD INPUT to TASK DEMAND LAYER -word_task_demand_weights = pnl.MappingProjection(matrix=np.matrix([[1.0, 1.0], +word_task_demand_weights = pnl.MappingProjection(matrix=np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]), name='WORD_TASK_DEMAND_WEIGHTS') # COLOR INPUT to TASK DEMAND LAYER -color_task_demand_weights = pnl.MappingProjection(matrix=np.matrix([[1.0, 1.0], +color_task_demand_weights = pnl.MappingProjection(matrix=np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]), name='COLOR_TASK_DEMAND_WEIGHTS') # # TASK DEMAND TO WORD OUTPUT -# task_demand_word_output_weights = pnl.MappingProjection(matrix=np.matrix([[2.5, 2.5, 2.5], +# task_demand_word_output_weights = pnl.MappingProjection(matrix=np.array([[2.5, 2.5, 2.5], # [-2.5, -2.5, -2.5]]), # name='TASK_DEMAND_WORD_OUTPUT_WEIGHTS') # # # TASK DEMAND TO COLOR OUTPUT -# task_demand_color_output_weights = pnl.MappingProjection(matrix=np.matrix([[-2.5, -2.5, -2.5], +# task_demand_color_output_weights = pnl.MappingProjection(matrix=np.array([[-2.5, -2.5, -2.5], # [2.5, 2.5, 2.5]]), # name='TASK_DEMAND_COLOR_OUTPUT_WEIGHTS') # # WORD OUTPUT TO TASK DEMAND -# word_output_task_demand_weights = pnl.MappingProjection(matrix=np.matrix([[1.0, -1.0], +# word_output_task_demand_weights = pnl.MappingProjection(matrix=np.array([[1.0, -1.0], # [1.0, -1.0], # [1.0, -1.0]]), # name='WORD_OUTPUT_TASK_DEMAND_WEIGHTS') # # # WORD OUTPUT TO TASK DEMAND -# color_output_task_demand_weights = pnl.MappingProjection(matrix=np.matrix([[-1.0, 1.0], +# color_output_task_demand_weights = pnl.MappingProjection(matrix=np.array([[-1.0, 1.0], # [-1.0, 1.0], # [-1.0, 1.0]]), # name='COLOR_OUTPUT_TASK_DEMAND_WEIGHTS') # # # WORD OUTPUT to COLOR OUTPUT -# word_output_color_output_weights = pnl.MappingProjection(matrix=np.matrix([[0.0, -2.0, -2.0], +# word_output_color_output_weights = pnl.MappingProjection(matrix=np.array([[0.0, -2.0, -2.0], # [-2.0, 0.0, -2.0], # [-2.0, -2.0, 0.0]]), # name='WORD_OUTPUT_COLOR_OUTPUT_WEIGHTS') # # # WORD OUTPUT to COLOR OUTPUT -# color_output_word_output_weights = pnl.MappingProjection(matrix=np.matrix([[0.0, -2.0, -2.0], +# color_output_word_output_weights = pnl.MappingProjection(matrix=np.array([[0.0, -2.0, -2.0], # [-2.0, 0.0, -2.0], # [-2.0, -2.0, 0.0]]), # name='COLOR_OUTPUT_WORD_OUTPUT_WEIGHTS') # # WORD OUTPUT TO TASK DEMAND -# word_output_output_to_task_demand_weights = pnl.MappingProjection(matrix=np.matrix([[1.0, 1.0], +# word_output_output_to_task_demand_weights = pnl.MappingProjection(matrix=np.array([[1.0, 1.0], # [1.0, 1.0], # [1.0, 1.0]]), # name='WORD_COLOR_OUTPUT_TASK_DEMAND_WEIGHTS') # # # COLOR OUTPUT TO TASK DEMAND -# color_output_output_to_task_demand_weights = pnl.MappingProjection(matrix=np.matrix([[1.0, 1.0], +# color_output_output_to_task_demand_weights = pnl.MappingProjection(matrix=np.array([[1.0, 1.0], # [1.0, 1.0], # [1.0, 1.0]]), # name='COLOR_COLOR_OUTPUT_TASK_DEMAND_WEIGHTS') diff --git a/Scripts/Debug/Markus Stroop.py b/Scripts/Debug/Markus Stroop.py index 499e7028c48..afc339a101f 100644 --- a/Scripts/Debug/Markus Stroop.py +++ b/Scripts/Debug/Markus Stroop.py @@ -82,49 +82,49 @@ # INPUT TO HIDDEN # row 0: input_'red' to hidden_'red', hidden_'green' # row 1: input_'green' to hidden_'red', hidden_'green' -color_weights = pnl.MappingProjection(matrix=np.matrix([[2.2, -2.2], +color_weights = pnl.MappingProjection(matrix=np.array([[2.2, -2.2], [-2.2, 2.2]]), name='COLOR_WEIGHTS') # row 0: input_'RED' to hidden_'RED', hidden_'GREEN' # row 1: input_'GREEN' to hidden_'RED', hidden_'GREEN' -word_weights = pnl.MappingProjection(matrix=np.matrix([[2.6, -2.6], +word_weights = pnl.MappingProjection(matrix=np.array([[2.6, -2.6], [-2.6, 2.6]]), name='WORD_WEIGHTS') # HIDDEN TO RESPONSE # row 0: hidden_'red' to response_'red', response_'green' # row 1: hidden_'green' to response_'red', response_'green' -color_response_weights = pnl.MappingProjection(matrix=np.matrix([[1.3, -1.3], +color_response_weights = pnl.MappingProjection(matrix=np.array([[1.3, -1.3], [-1.3, 1.3]]), name='COLOR_RESPONSE_WEIGHTS') # row 0: hidden_'RED' to response_'red', response_'green' # row 1: hidden_'GREEN' to response_'red', response_'green' -word_response_weights = pnl.MappingProjection(matrix=np.matrix([[2.5, -2.5], +word_response_weights = pnl.MappingProjection(matrix=np.array([[2.5, -2.5], [-2.5, 2.5]]), name='WORD_RESPONSE_WEIGHTS') # TASK TO HIDDEN LAYER # row 0: task_CN to hidden_'red', hidden_'green' # row 1: task_WR to hidden_'red', hidden_'green' -task_CN_weights = pnl.MappingProjection(matrix=np.matrix([[4.0, 4.0], +task_CN_weights = pnl.MappingProjection(matrix=np.array([[4.0, 4.0], [0, 0]]), name='TASK_CN_WEIGHTS') # row 0: task_CN to hidden_'RED', hidden_'GREEN' # row 1: task_WR to hidden_'RED', hidden_'GREEN' -task_WR_weights = pnl.MappingProjection(matrix=np.matrix([[0, 0], +task_WR_weights = pnl.MappingProjection(matrix=np.array([[0, 0], [4.0, 4.0]]), name='TASK_WR_WEIGHTS') # RESPONSE UNITS TO ACCUMULATORS # row 0: response_'red' to respond_red_accumulator # row 1: response_'green' to respond_red_accumulator -respond_red_differencing_weights = pnl.MappingProjection(matrix=np.matrix([[1.0], [-1.0]]), +respond_red_differencing_weights = pnl.MappingProjection(matrix=np.array([[1.0], [-1.0]]), name='RESPOND_RED_WEIGHTS') # row 0: response_'red' to respond_green_accumulator # row 1: response_'green' to respond_green_accumulator -respond_green_differencing_weights = pnl.MappingProjection(matrix=np.matrix([[-1.0], [1.0]]), +respond_green_differencing_weights = pnl.MappingProjection(matrix=np.array([[-1.0], [1.0]]), name='RESPOND_GREEN_WEIGHTS') # Create pathways as processes diff --git a/Scripts/Debug/Umemoto_Feb2.py b/Scripts/Debug/Umemoto_Feb2.py index 13807b92ece..530c9406b63 100644 --- a/Scripts/Debug/Umemoto_Feb2.py +++ b/Scripts/Debug/Umemoto_Feb2.py @@ -86,7 +86,7 @@ #weights -Distractor_weight = pnl.MappingProjection(matrix=np.matrix([[-1]]), +Distractor_weight = pnl.MappingProjection(matrix=np.array([[-1]]), name='DISTRACTOR_WEIGHTS') # ADD pathways TargetControl_pathway = [Target_Stim, Target_Rep, Decision] diff --git a/Scripts/Debug/laura_test_no_noise_stroop_09_11_2018.py b/Scripts/Debug/laura_test_no_noise_stroop_09_11_2018.py index ec35a8044c3..c8c23a59b93 100644 --- a/Scripts/Debug/laura_test_no_noise_stroop_09_11_2018.py +++ b/Scripts/Debug/laura_test_no_noise_stroop_09_11_2018.py @@ -112,49 +112,49 @@ # # INPUT TO HIDDEN # # row 0: input_'red' to hidden_'red', hidden_'green' # # row 1: input_'green' to hidden_'red', hidden_'green' -# color_weights = pnl.MappingProjection(matrix=np.matrix([[2.2, -2.2], +# color_weights = pnl.MappingProjection(matrix=np.array([[2.2, -2.2], # [-2.2, 2.2]]), # name='COLOR_WEIGHTS') # # row 0: input_'RED' to hidden_'RED', hidden_'GREEN' # # row 1: input_'GREEN' to hidden_'RED', hidden_'GREEN' -# word_weights = pnl.MappingProjection(matrix=np.matrix([[2.6, -2.6], +# word_weights = pnl.MappingProjection(matrix=np.array([[2.6, -2.6], # [-2.6, 2.6]]), # name='WORD_WEIGHTS') # # # HIDDEN TO RESPONSE # # row 0: hidden_'red' to response_'red', response_'green' # # row 1: hidden_'green' to response_'red', response_'green' -# color_response_weights = pnl.MappingProjection(matrix=np.matrix([[1.3, -1.3], +# color_response_weights = pnl.MappingProjection(matrix=np.array([[1.3, -1.3], # [-1.3, 1.3]]), # name='COLOR_RESPONSE_WEIGHTS') # # row 0: hidden_'RED' to response_'red', response_'green' # # row 1: hidden_'GREEN' to response_'red', response_'green' -# word_response_weights = pnl.MappingProjection(matrix=np.matrix([[2.5, -2.5], +# word_response_weights = pnl.MappingProjection(matrix=np.array([[2.5, -2.5], # [-2.5, 2.5]]), # name='WORD_RESPONSE_WEIGHTS') # # # TASK TO HIDDEN LAYER # # row 0: task_CN to hidden_'red', hidden_'green' # # row 1: task_WR to hidden_'red', hidden_'green' -# task_CN_weights = pnl.MappingProjection(matrix=np.matrix([[4.0, 4.0], +# task_CN_weights = pnl.MappingProjection(matrix=np.array([[4.0, 4.0], # [0, 0]]), # name='TASK_CN_WEIGHTS') # # # row 0: task_CN to hidden_'RED', hidden_'GREEN' # # row 1: task_WR to hidden_'RED', hidden_'GREEN' -# task_WR_weights = pnl.MappingProjection(matrix=np.matrix([[0, 0], +# task_WR_weights = pnl.MappingProjection(matrix=np.array([[0, 0], # [4.0, 4.0]]), # name='TASK_WR_WEIGHTS') # # # RESPONSE UNITS TO ACCUMULATORS # # row 0: response_'red' to respond_red_accumulator # # row 1: response_'green' to respond_red_accumulator -# respond_red_differencing_weights = pnl.MappingProjection(matrix=np.matrix([[1.0], [-1.0]]), +# respond_red_differencing_weights = pnl.MappingProjection(matrix=np.array([[1.0], [-1.0]]), # name='RESPOND_RED_WEIGHTS') # # # row 0: response_'red' to respond_green_accumulator # # row 1: response_'green' to respond_green_accumulator -# respond_green_differencing_weights = pnl.MappingProjection(matrix=np.matrix([[-1.0], [1.0]]), +# respond_green_differencing_weights = pnl.MappingProjection(matrix=np.array([[-1.0], [1.0]]), # name='RESPOND_GREEN_WEIGHTS') # # # CREATE PATHWAYS @@ -408,4 +408,4 @@ # plt.tick_params(axis='x', labelsize=9) # plt.title('Mean Number of Cycles by trial type') # plt.legend(legend) -# plt.show() \ No newline at end of file +# plt.show() diff --git a/Scripts/Examples/Gilbert_Shallice_Composition_Model.py b/Scripts/Examples/Gilbert_Shallice_Composition_Model.py index 9710a6d61f4..285e758b7f4 100644 --- a/Scripts/Examples/Gilbert_Shallice_Composition_Model.py +++ b/Scripts/Examples/Gilbert_Shallice_Composition_Model.py @@ -49,83 +49,83 @@ ### WEIGHTS # WORD INPUT TO WORD OUTPUT -word_weights = pnl.MappingProjection(matrix=np.matrix([[3.5, 0.0, 0.0], +word_weights = pnl.MappingProjection(matrix=np.array([[3.5, 0.0, 0.0], [0.0, 3.5, 0.0], [0.0, 0.0, 3.5]]), name='WORD_WEIGHTS') # COLOR INPUT TO COLOR OUTPUT -color_weights = pnl.MappingProjection(matrix=np.matrix([[1.9, 0.0, 0.0], +color_weights = pnl.MappingProjection(matrix=np.array([[1.9, 0.0, 0.0], [0.0, 1.9, 0.0], [0.0, 0.0, 1.9]]), name='COLOR_WEIGHTS') # WORD INPUT to TASK DEMAND LAYER -word_task_demand_weights = pnl.MappingProjection(matrix=np.matrix([[1.0, 1.0], +word_task_demand_weights = pnl.MappingProjection(matrix=np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]), name='WORD_TASK_DEMAND_WEIGHTS') # COLOR INPUT to TASK DEMAND LAYER -color_task_demand_weights = pnl.MappingProjection(matrix=np.matrix([[1.0, 1.0], +color_task_demand_weights = pnl.MappingProjection(matrix=np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]), name='COLOR_TASK_DEMAND_WEIGHTS') # TASK DEMAND TO WORD OUTPUT -task_demand_word_output_weights = pnl.MappingProjection(matrix=np.matrix([[2.5, 2.5, 2.5], +task_demand_word_output_weights = pnl.MappingProjection(matrix=np.array([[2.5, 2.5, 2.5], [-2.5, -2.5, -2.5]]), name='TASK_DEMAND_WORD_OUTPUT_WEIGHTS') # TASK DEMAND TO COLOR OUTPUT -task_demand_color_output_weights = pnl.MappingProjection(matrix=np.matrix([[-2.5, -2.5, -2.5], +task_demand_color_output_weights = pnl.MappingProjection(matrix=np.array([[-2.5, -2.5, -2.5], [2.5, 2.5, 2.5]]), name='TASK_DEMAND_COLOR_OUTPUT_WEIGHTS') # WORD OUTPUT TO TASK DEMAND -word_output_task_demand_weights = pnl.MappingProjection(matrix=np.matrix([[1.0, -1.0], +word_output_task_demand_weights = pnl.MappingProjection(matrix=np.array([[1.0, -1.0], [1.0, -1.0], [1.0, -1.0]]), name='WORD_OUTPUT_TASK_DEMAND_WEIGHTS') # WORD OUTPUT TO TASK DEMAND -color_output_task_demand_weights = pnl.MappingProjection(matrix=np.matrix([[-1.0, 1.0], +color_output_task_demand_weights = pnl.MappingProjection(matrix=np.array([[-1.0, 1.0], [-1.0, 1.0], [-1.0, 1.0]]), name='COLOR_OUTPUT_TASK_DEMAND_WEIGHTS') # WORD OUTPUT to COLOR OUTPUT -word_output_color_output_weights = pnl.MappingProjection(matrix=np.matrix([[2.0, -2.0, -2.0], +word_output_color_output_weights = pnl.MappingProjection(matrix=np.array([[2.0, -2.0, -2.0], [-2.0, 2.0, -2.0], [-2.0, -2.0, 2.0]]), name='WORD_OUTPUT_COLOR_OUTPUT_WEIGHTS') # WORD OUTPUT TO TASK DEMAND -word_output_output_to_task_demand_weights = pnl.MappingProjection(matrix=np.matrix([[1.0, 1.0], +word_output_output_to_task_demand_weights = pnl.MappingProjection(matrix=np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]), name='WORD_COLOR_OUTPUT_TASK_DEMAND_WEIGHTS') # COLOR OUTPUT TO TASK DEMAND -color_output_output_to_task_demand_weights = pnl.MappingProjection(matrix=np.matrix([[1.0, 1.0], +color_output_output_to_task_demand_weights = pnl.MappingProjection(matrix=np.array([[1.0, 1.0], [1.0, 1.0], [1.0, 1.0]]), name='COLOR_COLOR_OUTPUT_TASK_DEMAND_WEIGHTS') # RECURRENT WORD weights -word_recurrent = pnl.MappingProjection(matrix=np.matrix([[0.0, -2.0, -2.0], +word_recurrent = pnl.MappingProjection(matrix=np.array([[0.0, -2.0, -2.0], [-2.0, 0.0, -2.0], [-2.0, -2.0, 0.0]]), name='WORD_RECURRENT_WEIGHTS') # RECURRENT COLOR weights -color_recurrent = pnl.MappingProjection(matrix=np.matrix([[0.0, -2.0, -2.0], +color_recurrent = pnl.MappingProjection(matrix=np.array([[0.0, -2.0, -2.0], [-2.0, 0.0, -2.0], [-2.0, -2.0, 0.0]]), name='TASK_RECURRENT_WEIGHTS') # RECURRENT TASK weights -task_recurrent = pnl.MappingProjection(matrix=np.matrix([[0.0, -2.0, -2.0], +task_recurrent = pnl.MappingProjection(matrix=np.array([[0.0, -2.0, -2.0], [-2.0, 0.0, -2.0], [-2.0, -2.0, 0.0]]), name='TASK_RECURRENT_WEIGHTS') diff --git a/Scripts/Examples/Tutorial/Rumelhart Semantic Network (autodiff).py b/Scripts/Examples/Tutorial/Rumelhart Semantic Network (autodiff).py index 6c6b780bf89..653601b6918 100644 --- a/Scripts/Examples/Tutorial/Rumelhart Semantic Network (autodiff).py +++ b/Scripts/Examples/Tutorial/Rumelhart Semantic Network (autodiff).py @@ -185,8 +185,8 @@ def gen_input_vals(nouns, relations): learning_rate=1, randomize=False ) - - + + RumelNet.add_node(nouns_in) RumelNet.add_node(rels_in) RumelNet.add_node(hn) @@ -257,7 +257,7 @@ def gen_input_vals(nouns, relations): for reps in range(tot_reps): print('Training rep: ',reps + 1, ' of: ', tot_reps) for noun in range(len(nouns)): - + inputs_dict = {} targets_dict = {} @@ -277,7 +277,7 @@ def gen_input_vals(nouns, relations): targ_is = truth_is[noun], targ_has = irrel_has[noun], targ_can = irrel_can[noun], - + targ_is=np.reshape(targ_is,np.amax(np.shape(targ_is))) targ_has=np.reshape(targ_has,np.amax(np.shape(targ_has))) targ_can=np.reshape(targ_can,np.amax(np.shape(targ_can))) @@ -288,7 +288,7 @@ def gen_input_vals(nouns, relations): targ_is = irrel_is[noun] , targ_has = truth_has[noun], targ_can = irrel_can[noun], - + targ_is=np.reshape(targ_is,np.amax(np.shape(targ_is))) targ_has=np.reshape(targ_has,np.amax(np.shape(targ_has))) targ_can=np.reshape(targ_can,np.amax(np.shape(targ_can))) @@ -300,7 +300,7 @@ def gen_input_vals(nouns, relations): targ_is = irrel_is[noun] , targ_has = irrel_has[noun], targ_can = truth_can[noun], - + targ_is=np.reshape(targ_is,np.amax(np.shape(targ_is))) targ_has=np.reshape(targ_has,np.amax(np.shape(targ_has))) targ_can=np.reshape(targ_can,np.amax(np.shape(targ_can))) @@ -314,7 +314,7 @@ def gen_input_vals(nouns, relations): targets_dict[out_sig_I].append(truth_nouns[noun]) inputs_dict[rels_in].append(rels_onehot[i]) - + result = RumelNet.run(inputs=[{'inputs': inputs_dict, 'targets': targets_dict, 'epochs': n_epochs}],do_logging=True) @@ -351,16 +351,16 @@ def gen_input_vals(nouns, relations): data_can=out_sig_can.log.nparray()[1,1] data_I=np.array(data_I[1][1::]) -data_I=np.matrix(data_I) +data_I=np.array(data_I) data_is=np.array(data_is[1][1::]) -data_is=np.matrix(data_is) - +data_is=np.array(data_is) + data_has=np.array(data_has[1][1::]) -data_has=np.matrix(data_has) +data_has=np.array(data_has) data_can=np.array(data_can[1][1::]) -data_can=np.matrix(data_can) +data_can=np.array(data_can) log_length=np.shape(data_I)[0] @@ -374,33 +374,33 @@ def gen_input_vals(nouns, relations): is_rel_log = np.append(is_rel_log,data_is[3 * (i + 1), :].T, 1) has_rel_log = np.append(has_rel_log,data_has[3 * (i + 1) + 1, :].T, 1) can_rel_log = np.append(can_rel_log,data_can[3 * (i + 1) + 2, :].T, 1) - + # This cell plots the last output values from the network for each noun/relation pair # This will show you what the network has learned with regards to the properties of # each noun. for i in range(len(nouns)): n=-i - + plt.stem(I_rel_log[:, n - 1]) - + plt.title(nouns[n - 1]) plt.ylabel('Strength of Association') plt.xticks(np.arange(len(nouns)), nouns,rotation=35) plt.yticks(np.arange(0,1.1,.1)) plt.show() - + plt.stem(is_rel_log[:, n - 1]) - + plt.title([nouns[n - 1], ' is:']) plt.ylabel('Strength of Association') plt.xticks(np.arange(len(is_list)), is_list,rotation=35) plt.yticks(np.arange(0,1.1,.1)) plt.show() - + plt.stem(has_rel_log[:, n - 1]) - + plt.title([nouns[n - 1], ' has:']) plt.ylabel('Strength of Association') plt.xticks(np.arange(len(has_list)), has_list,rotation=35) @@ -408,7 +408,7 @@ def gen_input_vals(nouns, relations): plt.show() plt.stem(can_rel_log[:, n - 1]) - + plt.title([nouns[n - 1], ' can:']) plt.ylabel('Strength of Association') plt.xticks(np.arange(len(can_list)), can_list,rotation=35) diff --git a/psyneulink/core/components/functions/function.py b/psyneulink/core/components/functions/function.py index ea7d8e7c82d..a447b16796a 100644 --- a/psyneulink/core/components/functions/function.py +++ b/psyneulink/core/components/functions/function.py @@ -173,7 +173,7 @@ from psyneulink.core.globals.utilities import ( convert_all_elements_to_np_array, convert_to_np_array, get_global_seed, is_instance_or_subclass, object_has_single_value, parameter_spec, parse_valid_identifier, safe_len, SeededRandomState, try_extract_0d_array_item, contains_type, is_numeric, NumericCollections, - random_matrix + random_matrix, array_from_matrix_string ) __all__ = [ @@ -1370,7 +1370,7 @@ def get_matrix(specification, rows=1, cols=1, context=None): # specify 'matrix' as a string (e.g. r = RecurrentTransferMechanism(matrix='1 2; 3 4')) if type(specification) == str: try: - return np.array(np.matrix(specification)) + return array_from_matrix_string(specification) except (ValueError, NameError, TypeError): # np.matrix(specification) will give ValueError if specification is a bad value (e.g. 'abc', '1; 1 2') # [JDC] actually gives NameError if specification is a string (e.g., 'abc') diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index dba8c8a834f..89267a0f611 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -3750,7 +3750,7 @@ def _validate_params(self, request_set, target_set=None, context=None): # format(param_value, self.__class__.__name__, error_msg)) format(param_value, self.name, self.owner_name, error_msg)) - # string used to describe matrix, so convert to np.matrix and pass to validation of matrix below + # string used to describe matrix, so convert to np.array and pass to validation of matrix below elif isinstance(param_value, str): try: param_value = np.atleast_2d(param_value) @@ -3763,12 +3763,12 @@ def _validate_params(self, request_set, target_set=None, context=None): # function so: # - assume it uses random.rand() # - call with two args as place markers for cols and rows - # - validate that it returns an array or np.matrix + # - validate that it returns an array elif isinstance(param_value, types.FunctionType): test = param_value(1, 1) - if not isinstance(test, (np.ndarray, np.matrix)): + if not isinstance(test, np.ndarray): raise FunctionError("A function is specified for the matrix of the {} function of {}: {}) " - "that returns a value ({}) that is neither a matrix nor an array". + "that returns a value ({}) that is not an array". # format(param_value, self.__class__.__name__, test)) format(self.name, self.owner_name, param_value, test)) diff --git a/psyneulink/core/components/ports/parameterport.py b/psyneulink/core/components/ports/parameterport.py index 243b18b873a..b46b11191b9 100644 --- a/psyneulink/core/components/ports/parameterport.py +++ b/psyneulink/core/components/ports/parameterport.py @@ -1241,7 +1241,7 @@ def _get_tuple_for_single_item_modulatory_spec(obj, name, value): return # (7/19/17 CW) added this if statement below while adding `hetero` and `auto` and AutoAssociativeProjections: this # allows `hetero` to be specified as a matrix, while still generating a ParameterPort - elif isinstance(param_value, np.ndarray) or isinstance(param_value, np.matrix): + elif isinstance(param_value, np.ndarray): pass # allow function parameters elif param_name in function.parameters.names(): diff --git a/psyneulink/core/components/projections/pathway/mappingprojection.py b/psyneulink/core/components/projections/pathway/mappingprojection.py index 7e3bce0bab3..f60514dda42 100644 --- a/psyneulink/core/components/projections/pathway/mappingprojection.py +++ b/psyneulink/core/components/projections/pathway/mappingprojection.py @@ -468,7 +468,7 @@ def __init__(self, # Assign matrix to function_params for use as matrix param of MappingProjection.function # (7/12/17 CW) this is a PATCH to allow the user to set matrix as an np.matrix... I still don't know why # it wasn't working. - if isinstance(matrix, (np.matrix, list)): + if isinstance(matrix, list): matrix = np.array(matrix) self.learning_mechanism = None diff --git a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py index ada77f2ca2e..e65e5896b9b 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py @@ -785,9 +785,9 @@ def _validate_params(self, request_set, target_set=None, context=None): if HETERO in target_set: hetero_param = target_set[HETERO] - if hetero_param is not None and not isinstance(hetero_param, (np.matrix, np.ndarray, list, numbers.Number)): + if hetero_param is not None and not isinstance(hetero_param, (np.ndarray, list, numbers.Number)): raise RecurrentTransferError("hetero parameter ({}) of {} is of incompatible type: it should be a " - "number, None, or a 2D numeric matrix or array".format(hetero_param, self)) + "number, None, or a 2D numeric array".format(hetero_param, self)) hetero_shape = np.array(hetero_param).shape if hetero_shape != (1,) and hetero_shape != (1, 1): if isinstance(hetero_param, (np.ndarray, list, np.matrix)) and (hetero_param.ndim > 0 and hetero_shape[0] != self.size[0]): diff --git a/setup.cfg b/setup.cfg index 141ba999200..ffc15d5cfb9 100644 --- a/setup.cfg +++ b/setup.cfg @@ -70,6 +70,7 @@ filterwarnings = error::SyntaxWarning error:Creating an ndarray from ragged nested sequences \(which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes\) is deprecated.*:numpy.VisibleDeprecationWarning error:Invalid escape sequence + error:the matrix subclass is not the recommended way to represent matrices or deal with linear algebra [pycodestyle] # for code explanation see https://pep8.readthedocs.io/en/latest/intro.html#error-codes diff --git a/tests/control/test_gilzenrat.py b/tests/control/test_gilzenrat.py index dda4c774a38..1f2c546a21e 100644 --- a/tests/control/test_gilzenrat.py +++ b/tests/control/test_gilzenrat.py @@ -187,7 +187,7 @@ def test_fitzHughNagumo_gilzenrat_figure_2(self): # # Implement self-excitatory (auto) and mutually inhibitory (hetero) connections within the decision layer # decision_layer = GilzenratTransferMechanism(size=2, # initial_value=np.array([[1, 0]]), -# matrix=np.matrix([[1, 0], [0, -1]]), +# matrix=np.array([[1, 0], [0, -1]]), # # auto=1.0, # # hetero=-1.0, # time_step_size=time_step_size, @@ -203,7 +203,7 @@ def test_fitzHughNagumo_gilzenrat_figure_2(self): # # To do Markus: specify recurrent self-connrection weight for response unit to 2.00 # response = GilzenratTransferMechanism(size=1, # initial_value=np.array([[2.0]]), -# matrix=np.matrix([[0.5]]), +# matrix=np.array([[0.5]]), # function=Logistic(bias=2), # time_step_size=time_step_size, # noise=NormalDist(mean=0.0, standard_deviation=standard_deviation).function, diff --git a/tests/mechanisms/test_processing_mechanism.py b/tests/mechanisms/test_processing_mechanism.py index d1d154c33e8..54c9a123cc7 100644 --- a/tests/mechanisms/test_processing_mechanism.py +++ b/tests/mechanisms/test_processing_mechanism.py @@ -279,7 +279,7 @@ def test_valid_matrix_specs(self): np.testing.assert_allclose(PM_2d_array.value, 4.0) - PM_matrix = ProcessingMechanism(function=LinearMatrix(matrix=np.matrix([[4.0]]))) + PM_matrix = ProcessingMechanism(function=LinearMatrix(matrix=np.array([[4.0]]))) PM_matrix.execute(1.0) np.testing.assert_allclose(PM_matrix.value, 4.0) diff --git a/tests/mechanisms/test_recurrent_transfer_mechanism.py b/tests/mechanisms/test_recurrent_transfer_mechanism.py index d4fa9ff75bd..38484d1227e 100644 --- a/tests/mechanisms/test_recurrent_transfer_mechanism.py +++ b/tests/mechanisms/test_recurrent_transfer_mechanism.py @@ -238,7 +238,7 @@ def test_recurrent_mech_matrix_keyword_spec(self, matrix): np.testing.assert_allclose(val, [[10., 10., 10., 10.]]) np.testing.assert_allclose(R.recurrent_projection.matrix.base, get_matrix(matrix, R.size[0], R.size[0])) - @pytest.mark.parametrize("matrix", [np.matrix('1 2; 3 4'), np.array([[1, 2], [3, 4]]), [[1, 2], [3, 4]], '1 2; 3 4']) + @pytest.mark.parametrize("matrix", [pnl.array_from_matrix_string('1 2; 3 4'), np.array([[1, 2], [3, 4]]), [[1, 2], [3, 4]], '1 2; 3 4']) def test_recurrent_mech_matrix_other_spec(self, matrix): R = RecurrentTransferMechanism( From ea09f82e140c1480c77043be61fa1b85cfcc5c3e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 15 May 2024 21:10:45 -0400 Subject: [PATCH 187/410] llvm/cuda: Always upload param structure to GPU mem The values in the ctype structure might have changed since the last upload. Fixes PTXExec and PTXRun failures in test_multiple_runs_with_parameter_change. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 314d601ab06..703ef9de7d0 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -298,7 +298,10 @@ def download_ctype(self, source, ty, name='other'): def __get_cuda_buffer(self, struct_name): private_attr_name = "_buffer_cuda" + struct_name private_attr = getattr(self, private_attr_name) - if private_attr is None: + + # Param struct needs to be reuploaded every time because the values + # might have changed. + if private_attr is None or struct_name == "_param_struct": # Set private attribute to a new buffer private_attr = self.upload_ctype(getattr(self, struct_name), struct_name) setattr(self, private_attr_name, private_attr) From f910e964806a35e98bd07a38cc3ed918a2fa1155 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 15 May 2024 19:53:52 -0400 Subject: [PATCH 188/410] llvm/cuda: Always download "data" buffer after GPU execution This makes any modified values/results visible to Python. Fixes PTXExec and PTXRun failures in test_multiple_runs_with_parameter_change_from_data_struct. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 703ef9de7d0..65460ba8fb2 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -661,11 +661,10 @@ def cuda_execute(self, inputs): self._cuda_conditions, threads=len(self._execution_contexts)) - # Copy the data struct from the device - self._data_struct = self.download_ctype(self._cuda_data_struct, type(self._data_struct), '_data_struct') + # Copy the data structs from the device + self.download_to(self._data_struct, self._cuda_data_struct, 'data') # Methods used to accelerate "Run" - def _get_run_input_struct(self, inputs, num_input_sets, arg=3): # Callers that override input arg, should ensure that _bin_func is not None bin_f = self._bin_run_func if arg == 3 else self._bin_func @@ -780,6 +779,7 @@ def cuda_run(self, inputs, runs, num_input_sets): threads=len(self._execution_contexts)) # Copy the data struct from the device + self.download_to(self._data_struct, self._cuda_data_struct, 'data') ct_out = self.download_ctype(data_out, output_type, 'result') if len(self._execution_contexts) > 1: return _convert_ctype_to_python(ct_out) From d4cfba2bdb00f8cbbf81705dd3b1fedf8ce0b550 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 16 May 2024 16:58:44 -0400 Subject: [PATCH 189/410] Conditional parameter fitting in PEC. --- Scripts/Debug/ddm/ddm_cond_pec_fit.py | 139 +++++++++++++++ Scripts/Debug/ddm/ddm_pec_fit.py | 2 +- .../stability_flexibility_pec_fit.py | 4 +- .../functions/nonstateful/fitfunctions.py | 58 +++--- psyneulink/core/compositions/composition.py | 4 +- .../parameterestimationcomposition.py | 168 +++++++++++++++++- .../test_parameterestimationcomposition.py | 96 ++++++++-- 7 files changed, 424 insertions(+), 47 deletions(-) create mode 100644 Scripts/Debug/ddm/ddm_cond_pec_fit.py diff --git a/Scripts/Debug/ddm/ddm_cond_pec_fit.py b/Scripts/Debug/ddm/ddm_cond_pec_fit.py new file mode 100644 index 00000000000..315fdf8e1cb --- /dev/null +++ b/Scripts/Debug/ddm/ddm_cond_pec_fit.py @@ -0,0 +1,139 @@ +import pandas as pd +import numpy as np +import psyneulink as pnl + +def _run_ddm_with_params( + starting_value, + rate, + noise, + threshold, + non_decision_time, + time_step_size, + trial_inputs, +): + """Create a composition with DDM and run it with the given parameters.""" + + # Create a simple one mechanism composition containing a DDM in integrator mode. + decision = pnl.DDM( + function=pnl.DriftDiffusionIntegrator( + starting_value=starting_value, + rate=rate, + noise=noise, + threshold=threshold, + non_decision_time=non_decision_time, + time_step_size=time_step_size, + ), + output_ports=[pnl.DECISION_OUTCOME, pnl.RESPONSE_TIME], + name="DDM", + ) + + comp = pnl.Composition(pathways=decision) + + # Run the composition to generate some data to fit + comp.run(inputs={decision: trial_inputs}) + results = comp.results + + data_to_fit = pd.DataFrame( + np.squeeze(np.array(results)), columns=["decision", "response_time"] + ) + data_to_fit["decision"] = data_to_fit["decision"].astype("category") + + return comp, data_to_fit + + +# High-level parameters the impact performance of the test +num_trials = 50 +time_step_size = 0.01 +num_estimates = 10000 + +# Let's generate an "experimental" dataset to fit. This is a parameter recovery test +# Lets make 10% of the trials have a positive stimulus drift rate, and the other 90% +# have a negative stimulus drift rate. +rng = np.random.default_rng(12345) +trial_inputs = rng.choice( + [5.0, -5.0], size=(num_trials, 1), p=[0.10, 0.9], replace=True +) + +# Make the first and last input positive for sure. This helps make sure inputs are really getting +# passed to the composition correctly during parameter fitting, and we aren't just getting a single +# trials worth of a cached input. +trial_inputs[0] = np.abs(trial_inputs[0]) +trial_inputs[-1] = np.abs(trial_inputs[-1]) + +ddm_params = dict( + starting_value=0.0, + rate=0.3, + noise=1.0, + threshold=0.6, + non_decision_time=0.15, + time_step_size=time_step_size, +) + +# We will generate a dataset that comprises two different conditions. Each condition will have a different +# threshold. +params_cond1 = dict( + threshold=0.7, +) + +params_cond2 = dict( + threshold=0.3, +) + +comp, data_cond1 = _run_ddm_with_params(**{**ddm_params, **params_cond1}, trial_inputs=trial_inputs) +_, data_cond2 = _run_ddm_with_params(**{**ddm_params, **params_cond2}, trial_inputs=trial_inputs) + +# Combine the data from the two conditions +data_cond1['condition'] = 'cond_t=0.7' +data_cond2['condition'] = 'cond_t=0.3' +data_to_fit = pd.concat([data_cond1, data_cond2]) + +# Add the inputs as columns to the data temporarily so we can shuffle the data and shuffle the inputs together +data_to_fit['inputs'] = np.concatenate([trial_inputs, trial_inputs]) + +# Shuffle the data, seed is set for reproducibility +data_to_fit = data_to_fit.sample(frac=1, random_state=42) + +# Extract the shuffled inputs +trial_inputs = data_to_fit['inputs'].to_numpy().reshape(-1, 1) +data_to_fit = data_to_fit.drop(columns='inputs') + +fit_parameters = { + ("rate", comp.nodes['DDM']): np.linspace(-0.5, 0.5, 1000), + ("non_decision_time", comp.nodes['DDM']): np.linspace(0.0, 1.0, 1000), + ("threshold", comp.nodes['DDM']): np.linspace(0.1, 1.0, 1000), +} + +pec = pnl.ParameterEstimationComposition( + name="pec", + nodes=[comp], + parameters=fit_parameters, + depends_on={("threshold", comp.nodes['DDM']): 'condition'}, + outcome_variables=[ + comp.nodes['DDM'].output_ports[pnl.DECISION_OUTCOME], + comp.nodes['DDM'].output_ports[pnl.RESPONSE_TIME], + ], + data=data_to_fit, + optimization_function=pnl.PECOptimizationFunction( + method="differential_evolution", max_iterations=1, + ), + num_estimates=num_estimates, + initial_seed=42, +) + +pec.controller.parameters.comp_execution_mode.set("LLVM") +pec.controller.function.parameters.save_values.set(True) +pec.run(inputs={comp: trial_inputs}) + +records = [] +params = { + 'DDM.rate': ddm_params['rate'], + 'DDM.non_decision_time': ddm_params['non_decision_time'], + 'DDM.threshold': 0.3, + 'DDM.threshold': 0.7 +} +for i, (name, recovered_param) in enumerate(pec.optimized_parameter_values.items()): + percent_error = 100.0 * (abs(params[name] - recovered_param) / params[name]) + records.append((name, params[name], recovered_param, percent_error)) +df = pd.DataFrame(records, columns=['Parameter', 'Value', 'Recovered Value', 'Percent Error']) +print(df) + diff --git a/Scripts/Debug/ddm/ddm_pec_fit.py b/Scripts/Debug/ddm/ddm_pec_fit.py index 3d5b3761106..2b073ae1229 100644 --- a/Scripts/Debug/ddm/ddm_pec_fit.py +++ b/Scripts/Debug/ddm/ddm_pec_fit.py @@ -90,7 +90,7 @@ pec.controller.parameters.comp_execution_mode.set("LLVM") pec.controller.function.parameters.save_values.set(True) ret = pec.run(inputs={comp: trial_inputs}) -optimal_parameters = pec.optimized_parameter_values +optimal_parameters = list(pec.optimized_parameter_values.values()) # Check that the parameters are recovered and that the log-likelihood is correct, set the tolerance pretty high, # things are noisy because of the low number of trials and estimates. diff --git a/Scripts/Debug/stability_flexibility/stability_flexibility_pec_fit.py b/Scripts/Debug/stability_flexibility/stability_flexibility_pec_fit.py index 09eceb2dea5..718165c8651 100644 --- a/Scripts/Debug/stability_flexibility/stability_flexibility_pec_fit.py +++ b/Scripts/Debug/stability_flexibility/stability_flexibility_pec_fit.py @@ -100,7 +100,7 @@ responseGate.output_ports[0], ], data=data_to_fit, - optimization_function='differential_evolution', + optimization_function=pnl.PECOptimizationFunction(method='differential_evolution', vectorized=False), num_estimates=num_estimates, ) @@ -109,7 +109,7 @@ print("Running the PEC") ret = pec.run(inputs=inputs) -optimal_parameters = pec.optimized_parameter_values +optimal_parameters = list(pec.optimized_parameter_values.values()) # Print the recovered parameters. records = [] diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index de02e5c9ebd..812dd23a7fe 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -368,6 +368,8 @@ def __init__( # Keep track of the best parameters self._best_params = {} + self._method_kwargs = kwargs if kwargs else {} + super().__init__( search_space=search_space, save_samples=save_samples, @@ -375,7 +377,6 @@ def __init__( search_function=search_function, search_termination_function=search_termination_function, aggregation_function=None, - **kwargs, ) def set_pec_objective_function(self, objective_function: Callable): @@ -437,18 +438,10 @@ def _run_simulations(self, *args, context=None): f"Expected {len(self.fit_param_names)} arguments, got {len(args)}" ) - # Parameter values are passed through the input data. - # Since we are passing fitting\optimization parameters as inputs we need add them to the inputs - # params_input = [np.array([v[0]]) for v in self.fit_parameters.values()] - # inputs = {self.model: [[trial] + params_input for trial in inputs[self.model]]} - # - # self.controller.set_pec_inputs_cache(inputs) - inputs_array = list(self.owner.composition.controller._pec_input_values.values())[0] - for trial in range(len(inputs_array)): - for i, name in enumerate(self.fit_param_names): - start_index = len(inputs_array[trial]) - len(self.fit_param_names) - inputs_array[trial][start_index+i] = np.array([args[i]]) - + # If the model is in the inputs, then inputs are passed as list of lists and we need to add the fitting + # parameters to each trial as a concatenated list + inputs = self.owner.composition.controller._pec_input_values + self.owner.composition.controller.set_parameters_in_inputs(parameters=args, inputs=inputs) # Reset the search grid self.reset_grid() @@ -736,6 +729,7 @@ def progress_callback(x, convergence): seed=seed_for_scipy, popsize=15, polish=False, + **self._method_kwargs ) # Bind the fitted parameters to their names @@ -848,11 +842,20 @@ def fit_param_names(self) -> List[str]: """Get a unique name for each parameter in the fit.""" if self.owner is not None: # Go through each parameter and create a unique name for it - # If the mechanism name has an invalid character (for a python identifiter), we need to replace - # it with an underscore. - names = [(param_name, re.sub(r"\W|^(?=\d)",'_', mech.name)) - for param_name, mech in self.owner.fit_parameters.keys()] - return [f"{mech_name}_{param_name}" for param_name, mech_name in names] + if not self.owner.depends_on: + return [f"{mech.name}.{param_name}" + for param_name, mech in self.owner.fit_parameters.keys()] + else: + names = [] + for param_name, mech in self.owner.fit_parameters.keys(): + if (param_name, mech) in self.owner.cond_levels: + for level in self.owner.cond_levels[(param_name, mech)]: + names.append(f"{mech.name}.{param_name}<{level}>") + else: + names.append(f"{mech.name}.{param_name}") + + return names + else: return None @@ -869,10 +872,21 @@ def fit_param_bounds(self) -> Dict[str, Tuple[float, float, float]]: if self.owner is not None: - bounds = [(float(min(s)), float(max(s))) for s in self.owner.fit_parameters.values()] - - # Get the step size for each parameter. - steps = [np.unique(np.diff(s).round(decimals=5)) for s in self.owner.fit_parameters.values()] + if not self.owner.depends_on: + bounds = [(float(min(s)), float(max(s))) for s in self.owner.fit_parameters.values()] + steps = [np.unique(np.diff(s).round(decimals=5)) for s in self.owner.fit_parameters.values()] + else: + bounds = [] + steps = [] + for param_name, mech in self.owner.fit_parameters.keys(): + s = self.owner.fit_parameters[(param_name, mech)] + if (param_name, mech) in self.owner.cond_levels: + for _ in self.owner.cond_levels[(param_name, mech)]: + bounds.append((float(min(s)), float(max(s)))) + steps.append(np.unique(np.diff(s).round(decimals=5))) + else: + bounds.append((float(min(s)), float(max(s)))) + steps.append(np.unique(np.diff(s).round(decimals=5))) # We also check if step size is constant, if not we raise an error for s in steps: diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index ecd6c53bfcf..ce90ab1f2cd 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -9395,7 +9395,9 @@ def add_controller(self, controller: ControlMechanism, context=None): and not (isinstance(self.controller.input_ports, ContentAddressableList) and self.controller.input_ports and self.controller.afferents)): - warnings.warn(f"{self.controller.name} for {self.name} is enabled but has no inputs.") + from psyneulink.core.compositions.parameterestimationcomposition import ParameterEstimationComposition + if not isinstance(self, ParameterEstimationComposition): + warnings.warn(f"{self.controller.name} for {self.name} is enabled but has no inputs.") # ADD MODULATORY COMPONENTS ----------------------------------------------------- diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index e8551f25e81..79e5f2a619d 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -516,6 +516,7 @@ def __init__( num_trials_per_estimate: Optional[int] = None, initial_seed: Optional[int] = None, same_seed_for_all_parameter_combinations: Optional[bool] = None, + depends_on: Optional[Dict] = None, name: Optional[str] = None, context: Optional[Context] = None, **kwargs, @@ -561,14 +562,25 @@ def __init__( kwargs.update({"nodes": model}) self.model = model + self.depends_on = depends_on + + # These will be assigned in _validate_date if depends_on is not None + self.cond_levels = None + self.cond_mask = None + self.cond_data = None + self.optimized_parameter_values = [] - pec_mechs = {} + self.pec_control_mechs = {} + self.pec_control_mechs_input_indices = [] + idx = len(self.model.input_ports) for (pname, mech), values in parameters.items(): - pec_mechs[(pname, mech)] = ControlMechanism(name=f"{pname}_control", - control_signals=[(pname, mech)], - modulation=OVERRIDE) - self.model.add_node(pec_mechs[(pname, mech)]) + self.pec_control_mechs[(pname, mech)] = ControlMechanism(name=f"{pname}_control", + control_signals=[(pname, mech)], + modulation=OVERRIDE) + self.model.add_node(self.pec_control_mechs[(pname, mech)]) + self.pec_control_mechs_input_indices.append(idx) + idx += 1 super().__init__( name=name, @@ -682,6 +694,54 @@ def __init__( def _validate_data(self): """Check if user supplied data to fit is valid for data fitting mode.""" + # If there is a depends_on attribute, the user is doing a conditional parameterization. The data must be a + # pandas dataframe, and we must strip out any columns that parameters are marked to depend on. These columns + # should be categorical or string columns. + if self.depends_on: + if not isinstance(self.data, pd.DataFrame): + raise ValueError( + "If using conditional parameterization, the data must be a pandas dataframe." + ) + + # Check if the dependent columns are in the data + for param, col in self.depends_on.items(): + if col not in self.data.columns: + raise ValueError(f"The data does not contain the column '{col}' that parameter '{param}' " + f"is dependent on.") + + # If the column is string, convert to categorical + if self.data[col].dtype == object: + self.data[col] = self.data[col].astype('category') + + # If the column is not categorical, return and error + if not self.data[col].dtype.name == 'category': + raise ValueError(f"The column '{col}' that parameter '{param}' is dependent on must be a string or" + f" categorical column.") + + # Make sure the column does not have too many unique values. + if len(self.data[col].unique()) > 5: + warnings.warn(f"Column '{col}' has more than 5 unique values. Values = {self.data[col].unique()}. " + f"Each unique value will be treated as a separate condition. This may lead to a " + f"large number of parameters to estimate. Consider reducing the number of unique " + f"values in this column.") + + # Get a separate copy of the dataframe with conditional columns + self.cond_data = self.data[self.depends_on.values()].copy() + + # For each value in depends_on, get the unique levels of the column. This will determine the number of + # of conditional parameters that need to be estimated for that parameter. + self.cond_levels = {param: self.cond_data[col].unique() for param, col in self.depends_on.items()} + + # We also need a mask to keep track of which trials are associated with which condition + self.cond_mask = {} + for param, col in self.depends_on.items(): + self.cond_mask[param] = {} + for level in self.cond_levels[param]: + self.cond_mask[param][level] = self.cond_data[col] == level + + # Remove the dependent columns from the data + self.data = self.data.drop(columns=self.depends_on.values()) + # If the data is not in numpy format (could be a pandas dataframe) convert it to numpy. Cast all values to # floats and keep track of categorical dimensions with a mask. This preprocessing is done to make the data # compatible with passing directly to simulation_likelihood function. This avoids having to do the same with @@ -850,6 +910,9 @@ def f(sim_data): agent_rep=agent_rep, monitor_for_control=outcome_variables, fit_parameters=parameters, + depends_on=self.depends_on, + cond_levels=self.cond_levels, + cond_mask=self.cond_mask, allow_probes=True, objective_mechanism=objective_mechanism, function=optimization_function, @@ -887,8 +950,11 @@ def run(self, *args, **kwargs): # Since we are passing fitting\optimazation parameters as inputs we need add them to the inputs if inputs: - params_input = [np.array([v[0]]) for v in self.fit_parameters.values()] - inputs = {self.model: [[trial] + params_input for trial in inputs[self.model]]} + + # Add the fitting parameters to the inputs, these will be modulated during fitting or optimization, + # we just use a dummy value here for now (the first value in the range of the parameter) + dummy_params = [v[0] for v in self.controller.function.fit_param_bounds.values()] + self.controller.set_parameters_in_inputs(dummy_params, inputs) self.controller.set_pec_inputs_cache(inputs) @@ -909,6 +975,10 @@ def run(self, *args, **kwargs): kwargs.pop("inputs", None) + # Turn off warnings about no inputs the PEC. This is because the PEC doesn't have any inputs itself, it + # caches the inputs passed to it and passes them along to the inner composition during simulation. + self.warned_about_run_with_no_inputs = True + num_trials_per_estimate = len(inputs_dict[list(inputs_dict.keys())[0]]) self.controller.parameters.num_trials_per_estimate.set( num_trials_per_estimate, context=context @@ -920,9 +990,10 @@ def run(self, *args, **kwargs): # IMPLEMENTATION NOTE: has not executed OCM after first call if hasattr(self.controller, "optimal_control_allocation"): # Assign optimized_parameter_values and optimal_value (remove randomization dimension) - self.optimized_parameter_values = ( + self.optimized_parameter_values = dict(zip( + self.controller.function.fit_param_names, self.controller.optimal_control_allocation[:-1] - ) + )) self.optimal_value = self.controller.optimal_net_outcome return results @@ -1054,6 +1125,20 @@ def __init__(self, *args, **kwargs): else: raise ValueError("PEC_OCM requires that the PEC parameters be passed down to it.") + if 'depends_on' in kwargs: + self.depends_on = kwargs['depends_on'] + del kwargs['depends_on'] + else: + self.depends_on = None + + if 'cond_levels' in kwargs: + self.cond_levels = kwargs['cond_levels'] + del kwargs['cond_levels'] + + if 'cond_mask' in kwargs: + self.cond_mask = kwargs['cond_mask'] + del kwargs['cond_mask'] + super().__init__(*args, **kwargs) def _instantiate_output_ports(self, context=None): @@ -1123,6 +1208,71 @@ def set_pec_inputs_cache(self, inputs_dict: dict) -> dict: self._pec_input_values = inputs_dict + def set_parameters_in_inputs(self, parameters, inputs): + """ + Add the fitting parameters to the inputs passed to the model for each trial. Originally, the PEC used the + OCM to modulate the parameters of the model. However, this did not allow for trial-wise conditional or varying + parameter values. The current implementation passes the fitting parameters directly to the model as inputs. + These inputs go to dummy control mechanisms that are added to the composition before fitting or optimization. + The control mechanisms are then used to modulate the parameters of the model. This function has side effects + because it modifies the inputs dictionary in place. + + Args: + parameters (list): A list of fitting parameters that are to be passed to the model as inputs. + inputs (dict): A dictionary of inputs that are passed to the model for each trial. + + """ + + # If the model is in the inputs, then inputs are passed as list of lists and we need to add the fitting + # parameters to each trial as a concatenated list. + if self.composition.model in inputs: + + in_arr = inputs[self.composition.model] + + if type(in_arr) is not np.ndarray: + in_arr = np.array(in_arr) + + # Make sure it is 3D + in_arr = np.atleast_3d(in_arr) + + # If the inputs don't have columns for the fitting parameters, then we need to add them + if in_arr.shape[1] != len(self.composition.input_ports): + num_missing = len(self.composition.input_ports) - in_arr.shape[1] + in_arr = np.hstack((in_arr, np.zeros((in_arr.shape[0], num_missing, 1)))) + + j = 0 + for i, (pname, mech) in enumerate(self.fit_parameters.keys()): + mech_idx = self.composition.pec_control_mechs_input_indices[i] + if not self.depends_on or (pname, mech) not in self.depends_on: + in_arr[:, mech_idx, 0] = parameters[j] + j += 1 + else: + for level in self.cond_levels[(pname, mech)]: + mask = self.cond_mask[(pname, mech)][level] + in_arr[mask, mech_idx, 0] = parameters[j] + j += 1 + + inputs[self.composition.model] = in_arr + + # Otherwise, assume the inputs are passed to each mechanism individually. Thus, we need to feed the + # fitting parameters to the model to their respective control mechanisms + else: + + num_trials = len(list(inputs.values())[0]) + + j = 0 + for i, ((pname, mech), values) in enumerate(self.fit_parameters.items()): + control_mech = self.composition.pec_control_mechs[(pname, mech)] + if not self.depends_on or (pname, mech) not in self.depends_on: + inputs[control_mech] = np.ones((num_trials, 1)) * parameters[j] + j += 1 + else: + inputs[control_mech] = np.zeros((num_trials, 1)) + for level in self.cond_levels[(pname, mech)]: + mask = self.cond_mask[(pname, mech)][level] + inputs[control_mech][mask] = parameters[j] + j += 1 + def _execute(self, variable=None, context=None, runtime_params=None)->np.ndarray: """Return control_allocation that optimizes net_outcome of agent_rep.evaluate(). """ diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index 889016221ea..0e40435dea0 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -171,11 +171,13 @@ def _run_ddm_with_params( (optuna.samplers.RandomSampler, {'seed': 0}, [0.01]), (optuna.samplers.RandomSampler(), None, None) ], - ids=["differential_evolution", - "optuna_random_sampler", - "optuna_qmc_sampler", - "optuna_random_sampler_with_kwargs", - "optuna_random_sampler_no_seed"], + ids=[ + "differential_evolution", + "optuna_random_sampler", + "optuna_qmc_sampler", + "optuna_random_sampler_with_kwargs", + "optuna_random_sampler_no_seed" + ], ) def test_parameter_optimization_ddm(func_mode, opt_method, optuna_kwargs, result): """Test parameter optimization of a DDM in integrator mode""" @@ -240,7 +242,6 @@ def reward_rate(sim_data): # Let's generate an "experimental" dataset to fit. This is a parameter recovery test # Lets make 10% of the trials have a positive stimulus drift rate, and the other 90% # have a negative stimulus drift rate. - # trial_inputs = np.ones((num_trials, 1)) rng = np.random.default_rng(12345) trial_inputs = rng.choice( [5.0, -5.0], size=(num_trials, 1), p=[0.10, 0.9], replace=True @@ -284,9 +285,9 @@ def reward_rate(sim_data): pec.run(inputs={comp: trial_inputs}) if result is not None: - np.testing.assert_allclose(pec.optimized_parameter_values, result) - + np.testing.assert_allclose(list(pec.optimized_parameter_values.values()), result) +@pytest.mark.skip def test_parameter_estimation_ddm_cond(func_mode): if func_mode == "Python": pytest.skip( @@ -296,7 +297,21 @@ def test_parameter_estimation_ddm_cond(func_mode): # High-level parameters the impact performance of the test num_trials = 50 time_step_size = 0.01 - num_estimates = 1000 + num_estimates = 400 + + # Let's generate an "experimental" dataset to fit. This is a parameter recovery test + # Lets make 10% of the trials have a positive stimulus drift rate, and the other 90% + # have a negative stimulus drift rate. + rng = np.random.default_rng(12345) + trial_inputs = rng.choice( + [5.0, -5.0], size=(num_trials, 1), p=[0.10, 0.9], replace=True + ) + + # Make the first and last input positive for sure. This helps make sure inputs are really getting + # passed to the composition correctly during parameter fitting, and we aren't just getting a single + # trials worth of a cached input. + trial_inputs[0] = np.abs(trial_inputs[0]) + trial_inputs[-1] = np.abs(trial_inputs[-1]) ddm_params = dict( starting_value=0.0, @@ -307,8 +322,65 @@ def test_parameter_estimation_ddm_cond(func_mode): time_step_size=time_step_size, ) - # We will generate a dataset that is comprised of two different conditions. Each condition will have a different - # drift rate and non_decision_time. + # We will generate a dataset that comprises two different conditions. Each condition will have a different + # threshold. + params_cond1 = dict( + threshold=0.7, + ) + + params_cond2 = dict( + threshold=0.3, + ) + + comp, data_cond1 = _run_ddm_with_params(**{**ddm_params, **params_cond1}, trial_inputs=trial_inputs) + _, data_cond2 = _run_ddm_with_params(**{**ddm_params, **params_cond2}, trial_inputs=trial_inputs) + + # Combine the data from the two conditions + data_cond1['condition'] = 'cond1' + data_cond2['condition'] = 'cond2' + data_to_fit = pd.concat([data_cond1, data_cond2]) + + # Add the inputs as columns to the data temporarily so we can shuffle the data and shuffle the inputs together + data_to_fit['inputs'] = np.concatenate([trial_inputs, trial_inputs]) + + # Shuffle the data, seed is set for reproducibility + data_to_fit = data_to_fit.sample(frac=1, random_state=42) + + # Extract the shuffled inputs + trial_inputs = data_to_fit['inputs'].to_numpy().reshape(-1, 1) + data_to_fit = data_to_fit.drop(columns='inputs') + + fit_parameters = { + ("rate", comp.nodes['DDM']): np.linspace(-0.5, 0.5, 1000), + ("non_decision_time", comp.nodes['DDM']): np.linspace(0.0, 1.0, 1000), + ("threshold", comp.nodes['DDM']): np.linspace(0.1, 1.0, 1000), + } + + pec = pnl.ParameterEstimationComposition( + name="pec", + nodes=[comp], + parameters=fit_parameters, + depends_on={("threshold", comp.nodes['DDM']): 'condition'}, + outcome_variables=[ + comp.nodes['DDM'].output_ports[pnl.DECISION_OUTCOME], + comp.nodes['DDM'].output_ports[pnl.RESPONSE_TIME], + ], + data=data_to_fit, + optimization_function=PECOptimizationFunction( + method="differential_evolution", max_iterations=1, + ), + num_estimates=num_estimates, + initial_seed=42, + ) + + pec.controller.parameters.comp_execution_mode.set(func_mode) + pec.controller.function.parameters.save_values.set(True) + pec.run(inputs={comp: trial_inputs}) + + np.testing.assert_allclose( + list(pec.optimized_parameter_values.values()), + [0.2227273962084888, 0.5976130662377002, 0.1227723651473831], + ) @pytest.mark.parametrize('likelihood_include_mask', [ @@ -394,7 +466,7 @@ def test_parameter_estimation_ddm_mle(func_mode, likelihood_include_mask): # against hardcoded values to make sure we are reproducing # the same search trajectory from a known working example. np.testing.assert_allclose( - pec.optimized_parameter_values, + list(pec.optimized_parameter_values.values()), [0.2227273962084888, 0.5976130662377002, 0.1227723651473831], ) From 1b002ff18aeb48ffe7ce9c771aa8e844b7f6706e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 16 May 2024 20:26:50 -0400 Subject: [PATCH 190/410] llvm/cuda: Download an free mutable GPU buffers after kernel execution Download the most recent values to update shared llvm/Python structures. Drop GPU side buffers as they need to be re-uploaded on next execution, in case the shared llvm/Python values were modified. Store gpu buffers in a dictionary instead of using attributes. Fixes PTXExec and PTXRun failures in test_multiple_runs_with_parameter_change_arr Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 38 +++++++++++++++++++------------ 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 65460ba8fb2..95eafe480ab 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -251,8 +251,9 @@ def _copy_params_to_pnl(self, context, component, params, ids:str, condition:Cal class CUDAExecution(Execution): def __init__(self, buffers=['param_struct', 'state_struct', 'out']): super().__init__() + self._gpu_buffers = {} for b in buffers: - setattr(self, "_buffer_cuda_" + b, None) + self._gpu_buffers["_" + b] = None self._uploaded_bytes = Counter() self._downloaded_bytes = Counter() @@ -285,9 +286,13 @@ def upload_ctype(self, data, name='other'): return jit_engine.pycuda.driver.mem_alloc(4) return jit_engine.pycuda.driver.to_device(bytes(data)) - def download_to(self, dst, source, name='other'): + def download_to(self, dst, source, name='other', *, move=False): bounce = self.download_ctype(source, type(dst), name) ctypes.memmove(ctypes.addressof(dst), ctypes.addressof(bounce), ctypes.sizeof(dst)) + if move: + for k, v in self._gpu_buffers.items(): + if v is source: + self._gpu_buffers[k] = None def download_ctype(self, source, ty, name='other'): self._downloaded_bytes[name] += ctypes.sizeof(ty) @@ -296,17 +301,16 @@ def download_ctype(self, source, ty, name='other'): return ty.from_buffer(out_buf) def __get_cuda_buffer(self, struct_name): - private_attr_name = "_buffer_cuda" + struct_name - private_attr = getattr(self, private_attr_name) + gpu_buffer = self._gpu_buffers[struct_name] # Param struct needs to be reuploaded every time because the values # might have changed. - if private_attr is None or struct_name == "_param_struct": + if gpu_buffer is None or struct_name == "_param_struct": # Set private attribute to a new buffer - private_attr = self.upload_ctype(getattr(self, struct_name), struct_name) - setattr(self, private_attr_name, private_attr) + gpu_buffer = self.upload_ctype(getattr(self, struct_name), struct_name) + self._gpu_buffers[struct_name] = gpu_buffer - return private_attr + return gpu_buffer @property def _cuda_param_struct(self): @@ -326,10 +330,13 @@ def _cuda_conditions(self): @property def _cuda_out(self): - if self._buffer_cuda_out is None: + gpu_buffer = self._gpu_buffers["_out"] + if gpu_buffer is None: size = ctypes.sizeof(self._ct_vo) - self._buffer_cuda_out = jit_engine.pycuda.driver.mem_alloc(size) - return self._buffer_cuda_out + gpu_buffer = jit_engine.pycuda.driver.mem_alloc(size) + self._gpu_buffers["_out"] = gpu_buffer + + return gpu_buffer def cuda_execute(self, variable): # Create input argument @@ -344,7 +351,7 @@ def cuda_execute(self, variable): # Copy the result from the device self.download_to(self._ct_vo, self._cuda_out, 'result') - self.download_to(self._state_struct, self._cuda_state_struct, 'state') + self.download_to(self._state_struct, self._cuda_state_struct, 'state', move=True) return _convert_ctype_to_python(self._ct_vo) @@ -662,7 +669,8 @@ def cuda_execute(self, inputs): threads=len(self._execution_contexts)) # Copy the data structs from the device - self.download_to(self._data_struct, self._cuda_data_struct, 'data') + self.download_to(self._data_struct, self._cuda_data_struct, 'data', move=True) + self.download_to(self._state_struct, self._cuda_state_struct, 'state', move=True) # Methods used to accelerate "Run" def _get_run_input_struct(self, inputs, num_input_sets, arg=3): @@ -779,7 +787,9 @@ def cuda_run(self, inputs, runs, num_input_sets): threads=len(self._execution_contexts)) # Copy the data struct from the device - self.download_to(self._data_struct, self._cuda_data_struct, 'data') + self.download_to(self._data_struct, self._cuda_data_struct, 'data', move=True) + self.download_to(self._state_struct, self._cuda_state_struct, 'state', move=True) + ct_out = self.download_ctype(data_out, output_type, 'result') if len(self._execution_contexts) > 1: return _convert_ctype_to_python(ct_out) From 932d0fdfcbae25d935ea1cc7161069758a3d67b8 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 17 May 2024 00:08:53 -0400 Subject: [PATCH 191/410] conftest: Clear graph scheduler cache in test teardown Bump minimum requirement to 1.2.1 Signed-off-by: Jan Vesely --- conftest.py | 3 +++ requirements.txt | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/conftest.py b/conftest.py index 7abdcd8ce4c..13b39fce95d 100644 --- a/conftest.py +++ b/conftest.py @@ -7,6 +7,7 @@ import re import sys +import graph_scheduler as gs import psyneulink from psyneulink import clear_registry, primary_registries, torch_available from psyneulink.core import llvm as pnlvm @@ -126,6 +127,8 @@ def pytest_runtest_teardown(item): # Clear Registry to have a stable reference for indexed suffixes of default names clear_registry(registry) + gs.utilities.cached_hashable_graph_function.cache_clear() + pnlvm.cleanup() @pytest.fixture diff --git a/requirements.txt b/requirements.txt index 68bf04889c3..2b7694b13c4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ beartype<0.19.0 dill<0.3.9 fastkde>=1.0.24, <1.0.31 -graph-scheduler>=1.1.1, <1.3.0 +graph-scheduler>=1.2.1, <1.3.0 graphviz<0.21.0 grpcio<1.64.0 leabra-psyneulink<0.3.3 From b01e7931582a266987b9015a0f140402552a21f9 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 17 May 2024 21:29:06 -0400 Subject: [PATCH 192/410] composition: Create a copy of 'controller_condition' If the controller_condition is not set by the caller it will be a global instance of the "Always" condition. Adding a reference there results in a leak. Creating a copy for use in controller_condition avoids adding a reference to a global instance. Signed-off-by: Jan Vesely Revert "composition: Do not the store owner of 'controller_condition'" This reverts commit a9194e8176763648f5c3ffff55c202df756c75f0. --- psyneulink/core/compositions/composition.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 2970ffca18f..01d8fa5e37e 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -4097,7 +4097,7 @@ def __init__( self.add_controller(controller) self.controller_mode = controller_mode self.controller_time_scale = controller_time_scale - self.controller_condition = controller_condition + self.controller_condition = copy(controller_condition) self.controller_condition.owner = self.controller # This is set at runtime and may be used by the controller to assign its # `num_trials_per_estimate ` attribute. From fe4d1c451df59c1bdbde5d5a75b009114bdf66f3 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 17 May 2024 21:32:50 -0400 Subject: [PATCH 193/410] Revert "ci: split windows x86 job (#2943)" This reverts commit 427808eac69e8e029799c8096ab52c7d3ed8e57d. No longer needed. --- .github/workflows/pnl-ci.yml | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index 89765843568..9b5882ac834 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -51,21 +51,9 @@ jobs: extra-args: '--forked -m "not llvm"' # add 32-bit build on windows - # split by marks to reduce peak memory - python-version: '3.8' python-architecture: 'x86' os: windows - extra-args: '-m llvm' - - - python-version: '3.8' - python-architecture: 'x86' - os: windows - extra-args: '-m "not llvm and composition"' - - - python-version: '3.8' - python-architecture: 'x86' - os: windows - extra-args: '-m "not llvm and not composition"' # fp32 run on linux python 3.10 - python-version: '3.10' @@ -176,16 +164,10 @@ jobs: timeout-minutes: 180 run: pytest --junit-xml=tests_out.xml --verbosity=0 -n logical ${{ matrix.extra-args }} - # double quotes are disallowed in artifact names - - name: Get valid filename string from extra-args - id: extra_args_fname - run: echo extra_args="$(echo ${{ matrix.extra-args }} | tr -d '\"')" >> $GITHUB_OUTPUT - shell: bash - - name: Upload test results uses: actions/upload-artifact@v4 with: - name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }}-${{ matrix.version-restrict }}-${{ steps.extra_args_fname.outputs.extra_args }} + name: test-results-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }}-${{ matrix.version-restrict }} path: tests_out.xml retention-days: 5 if: (success() || failure()) && ! contains(matrix.extra-args, 'forked') @@ -215,4 +197,3 @@ jobs: name: dist-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} path: dist/ retention-days: 2 - overwrite: true From b7d6f350efe5e61f958f07cfaa820dbc839cb8ee Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 14 Oct 2019 19:14:04 -0400 Subject: [PATCH 194/410] llvm: Add cleanup destruction check Signed-off-by: Jan Vesely --- conftest.py | 11 ++++++++++- psyneulink/core/llvm/__init__.py | 16 ++++++++++++++-- psyneulink/core/llvm/builder_context.py | 4 ++++ 3 files changed, 28 insertions(+), 3 deletions(-) diff --git a/conftest.py b/conftest.py index 13b39fce95d..ee72480ac44 100644 --- a/conftest.py +++ b/conftest.py @@ -129,7 +129,16 @@ def pytest_runtest_teardown(item): gs.utilities.cached_hashable_graph_function.cache_clear() - pnlvm.cleanup() + # Skip running the leak checker if the test is marked xfail. + # XFAIL tests catch exceptions that references call frames + # including PNL objects that would be reported as leaks. + # Hopefully, there are no leaky codepaths that are only hit + # in xfail tests. + # The same applies to test failures + skip_cleanup_check = ("xfail" in item.keywords) or item.session.testsfailed > 0 + + # Only run the llvm leak checker on llvm tests + pnlvm.cleanup("llvm" in item.keywords and not skip_cleanup_check) @pytest.fixture def comp_mode_no_llvm(): diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 15fc2706e87..69fbea15429 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -255,7 +255,7 @@ def _get_engines(): -def cleanup(): +def cleanup(check_leaks:bool=False): global _cpu_engine _cpu_engine = None global _ptx_engine @@ -267,4 +267,16 @@ def cleanup(): LLVMBinaryFunction.get.cache_clear() LLVMBinaryFunction.from_obj.cache_clear() - LLVMBuilderContext.clear_global() + if check_leaks and LLVMBuilderContext.is_active(): + old_context = LLVMBuilderContext.get_current() + + LLVMBuilderContext.clear_global() + + # check that WeakKeyDictionary is not keeping any references + import gc + gc.collect() + c = list(old_context._cache.keys()) + + assert len(c) == 0, c + else: + LLVMBuilderContext.clear_global() diff --git a/psyneulink/core/llvm/builder_context.py b/psyneulink/core/llvm/builder_context.py index f5d50f67d0f..a2e07c6b09b 100644 --- a/psyneulink/core/llvm/builder_context.py +++ b/psyneulink/core/llvm/builder_context.py @@ -163,6 +163,10 @@ def get_current(cls): return LLVMBuilderContext(cls.default_float_ty) return cls.__current_context + @classmethod + def is_active(cls): + return cls.__current_context is not None + @classmethod def clear_global(cls): cls.__current_context = None From bcf056caaed2e3758ef6b7f0b5eb163077e3aa8e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 18 May 2024 13:21:29 -0400 Subject: [PATCH 195/410] tests/llvm: Narrow down the allowed exceptions in compilation fallback test Signed-off-by: Jan Vesely --- tests/composition/test_composition.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 36c04fda79f..61e0f8a241f 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -6562,26 +6562,31 @@ def test_get_input_format(self, form, use_labels, show_nested, num_trials, expec class TestProperties: + + _fallback_xfail = pytest.mark.xfail(raises=AssertionError, match="Runtime parameters are not supported in compiled mode") + @pytest.mark.composition - @pytest.mark.parametrize("mode", [pnl.ExecutionMode.Python, pnl.ExecutionMode.Auto, - pytest.param(pnl.ExecutionMode.LLVM, marks=[pytest.mark.xfail, pytest.mark.llvm]), - pytest.param(pnl.ExecutionMode.LLVMExec, marks=[pytest.mark.xfail, pytest.mark.llvm]), - pytest.param(pnl.ExecutionMode.LLVMRun, marks=[pytest.mark.xfail, pytest.mark.llvm]), - pytest.param(pnl.ExecutionMode.PTXExec, marks=[pytest.mark.xfail, pytest.mark.llvm, pytest.mark.cuda]), - pytest.param(pnl.ExecutionMode.PTXRun, marks=[pytest.mark.xfail, pytest.mark.llvm, pytest.mark.cuda]), + @pytest.mark.parametrize("mode", [pnl.ExecutionMode.Auto, pnl.ExecutionMode.Python, + pytest.param(pnl.ExecutionMode.LLVM, marks=[_fallback_xfail, pytest.mark.llvm]), + pytest.param(pnl.ExecutionMode.LLVMExec, marks=[_fallback_xfail, pytest.mark.llvm]), + pytest.param(pnl.ExecutionMode.LLVMRun, marks=[_fallback_xfail, pytest.mark.llvm]), + pytest.param(pnl.ExecutionMode.PTXExec, marks=[_fallback_xfail, pytest.mark.llvm, pytest.mark.cuda]), + pytest.param(pnl.ExecutionMode.PTXRun, marks=[_fallback_xfail, pytest.mark.llvm, pytest.mark.cuda]), ]) def test_llvm_fallback(self, mode): - comp = Composition() + # FIXME: using num_executions is a hack. The name collides with # a stateful param of every component and thus it's not supported def myFunc(variable, params, context, num_executions): return variable * 2 + U = UserDefinedFunction(custom_function=myFunc, default_variable=[[0, 0], [0, 0]], num_executions=0) A = TransferMechanism(name="composition-pytests-A", default_variable=[[1.0, 2.0], [3.0, 4.0]], function=U) + + comp = Composition(nodes=[A]) inputs = {A: [[10., 20.], [30., 40.]]} - comp.add_node(A) res = comp.run(inputs=inputs, execution_mode=mode) np.testing.assert_allclose(res, [[20.0, 40.0], [60.0, 80.0]]) From 583cdf2e53e8547486b3d4dce8da3e6663459b90 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 May 2024 22:16:15 +0000 Subject: [PATCH 196/410] --- updated-dependencies: - dependency-name: pytest dependency-type: direct:development ... Signed-off-by: dependabot[bot] --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index b823dafcd95..f67db8a1497 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,6 +1,6 @@ jupyter<1.0.1 packaging<25.0 -pytest<8.2.1 +pytest<8.2.2 pytest-benchmark<4.0.1 pytest-cov<5.0.1 pytest-forked<1.7.0 From e1c3d7586fe73627cba7611ebb4c62dcbc6d250a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 21 May 2024 10:42:25 -0400 Subject: [PATCH 197/410] requirements: update grpcio requirement from <1.64.0 to <1.65.0 (#2970) Updates the requirements on [grpcio](https://github.com/grpc/grpc) to permit the latest version. - [Release notes](https://github.com/grpc/grpc/releases) - [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md) - [Commits](https://github.com/grpc/grpc/compare/v0.63.0...v1.64.0) updated-dependencies: - dependency-name: grpcio dependency-type: direct:production Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 68bf04889c3..3d9bc3d4dfe 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ dill<0.3.9 fastkde>=1.0.24, <1.0.31 graph-scheduler>=1.1.1, <1.3.0 graphviz<0.21.0 -grpcio<1.64.0 +grpcio<1.65.0 leabra-psyneulink<0.3.3 llvmlite<0.43 matplotlib<3.7.6 From 2e2956050fc2e1ff17c08b93da4790e89ff178d0 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 21 May 2024 17:16:12 -0400 Subject: [PATCH 198/410] llvm/execution: Consolidate recursion points in writeback Lists of Components all follow the same pattern. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 46 ++++++++++--------------------- 1 file changed, 15 insertions(+), 31 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 95eafe480ab..5d2e4a2140f 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -144,35 +144,26 @@ def _copy_params_to_pnl(self, context, component, params, ids:str, condition:Cal for idx, attribute in enumerate(getattr(component, ids)): compiled_attribute_param = getattr(params, params._fields_[idx][0]) - # Handle custom compiled-only structures by name - if attribute == 'nodes': - for node_id, node in enumerate(component._all_nodes): - node_params = getattr(compiled_attribute_param, - compiled_attribute_param._fields_[node_id][0]) + def _enumerate_recurse(elements): + for element_id, element in enumerate(elements): + element_params = getattr(compiled_attribute_param, + compiled_attribute_param._fields_[element_id][0]) self._copy_params_to_pnl(context=context, - component=node, - params=node_params, + component=element, + params=element_params, ids=ids, condition=condition) + + # Handle custom compiled-only structures by name + if attribute == 'nodes': + _enumerate_recurse(component._all_nodes) + elif attribute == 'projections': - for proj_id, projection in enumerate(component._inner_projections): - projection_params = getattr(compiled_attribute_param, - compiled_attribute_param._fields_[proj_id][0]) - self._copy_params_to_pnl(context=context, - component=projection, - params=projection_params, - ids=ids, - condition=condition) + _enumerate_recurse(component._inner_projections) elif attribute == '_parameter_ports': - for pp_id, param_port in enumerate(component._parameter_ports): - port_params = getattr(compiled_attribute_param, - compiled_attribute_param._fields_[pp_id][0]) - self._copy_params_to_pnl(context=context, - component=param_port, - params=port_params, - ids=ids, - condition=condition) + _enumerate_recurse(component._parameter_ports) + else: # TODO: Reconstruct Python RandomState if attribute == "random_state": @@ -201,14 +192,7 @@ def _copy_params_to_pnl(self, context, component, params, ids:str, condition:Cal condition=condition) elif attribute == "input_ports" or attribute == "output_ports": - for port_id, port in enumerate(pnl_value): - port_params = getattr(compiled_attribute_param, - compiled_attribute_param._fields_[port_id][0]) - self._copy_params_to_pnl(context=context, - component=port, - params=port_params, - ids=ids, - condition=condition) + _enumerate_recurse(pnl_value) # Writeback parameter value if the condition matches elif condition(pnl_param): From 908cf16257f631f00c800e76fba2433671a19e67 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 22 May 2024 00:32:22 -0400 Subject: [PATCH 199/410] llvm/execution: Use ctypes type in sizeof call ctypes.Struct members that are scalars are instances of Python builtin types (float, int) which do not have a fixed size. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 5d2e4a2140f..fc8c2964e55 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -143,6 +143,7 @@ def _copy_params_to_pnl(self, context, component, params, ids:str, condition:Cal for idx, attribute in enumerate(getattr(component, ids)): compiled_attribute_param = getattr(params, params._fields_[idx][0]) + compiled_attribute_param_ctype = params._fields_[idx][1] def _enumerate_recurse(elements): for element_id, element in enumerate(elements): @@ -198,13 +199,7 @@ def _enumerate_recurse(elements): elif condition(pnl_param): # Replace empty structures with None - try: - size_of = ctypes.sizeof(compiled_attribute_param) - except TypeError: - # will be a 0-dim array - size_of = 1 - - if size_of == 0: + if ctypes.sizeof(compiled_attribute_param_ctype) == 0: value = None else: value = np.ctypeslib.as_array(compiled_attribute_param) From 1b496e4ce040523213c48061a689941a101c82a2 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 21 May 2024 23:54:59 -0400 Subject: [PATCH 200/410] llvm/execution: Skip writeback of "optimizer" and "num_executions" "optimizer" is a custom class used in learing. "num_executions" is a Time class that needs to be reconstructed. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index fc8c2964e55..97b83713850 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -174,6 +174,14 @@ def _enumerate_recurse(elements): if attribute == "ring_memory": continue + # TODO: Reconstruct Time class + if attribute == "num_executions": + continue + + # TODO: Add support for syncing optimizer state + if attribute == "optimizer": + continue + # "old_val" is a helper storage in compiled RecurrentTransferMechanism # to workaround the fact that compiled projections do no pull values # from their source output ports From 77882b3262e11583c1efb4a70b4d38560f97f81e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 22 May 2024 01:48:41 -0400 Subject: [PATCH 201/410] llvm/execution: Remove writeback condition parameter Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 33 +++++++++++++------------------ 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 97b83713850..3f7318f0d3e 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -22,7 +22,6 @@ from psyneulink.core import llvm as pnlvm from psyneulink.core.globals.context import Context -from psyneulink.core.globals.parameters import is_array_like from . import helpers, jit_engine, builder_context from .debug import debug_env @@ -101,45 +100,43 @@ def _get_compilation_param(self, name, init_method, arg): struct = struct_ty(*initializer) struct_end = time.time() + setattr(self, name, struct) if "time_stat" in self._debug_env: print("Time to get initializer for struct:", name, "for", self._obj.name, ":", init_end - init_start) print("Time to instantiate struct:", name, "for", self._obj.name, ":", struct_end - init_end) - setattr(self, name, struct) + if "stat" in self._debug_env: print("Instantiated struct:", name, "( size:" , _pretty_size(ctypes.sizeof(struct_ty)), ")", "for", self._obj.name) - def cond_select_np_arrs(p): - return is_array_like(p.default_value) if len(self._execution_contexts) == 1: if name == '_state': - self.writeback_state_to_pnl(cond_select_np_arrs) + self.writeback_state_to_pnl() elif name == '_param': - self.writeback_params_to_pnl(cond_select_np_arrs) + self.writeback_params_to_pnl() return struct - def writeback_state_to_pnl(self, condition:Callable=lambda p: True): + def writeback_state_to_pnl(self): self._copy_params_to_pnl(self._execution_contexts[0], self._obj, self._state_struct, - "llvm_state_ids", - condition) + "llvm_state_ids") + + def writeback_params_to_pnl(self): - def writeback_params_to_pnl(self, condition: Callable = lambda p: True): self._copy_params_to_pnl(self._execution_contexts[0], self._obj, self._param_struct, - "llvm_param_ids", - condition) + "llvm_param_ids") - def _copy_params_to_pnl(self, context, component, params, ids:str, condition:Callable): + def _copy_params_to_pnl(self, context, component, params, ids:str): for idx, attribute in enumerate(getattr(component, ids)): compiled_attribute_param = getattr(params, params._fields_[idx][0]) @@ -152,8 +149,7 @@ def _enumerate_recurse(elements): self._copy_params_to_pnl(context=context, component=element, params=element_params, - ids=ids, - condition=condition) + ids=ids) # Handle custom compiled-only structures by name if attribute == 'nodes': @@ -197,14 +193,13 @@ def _enumerate_recurse(elements): self._copy_params_to_pnl(context=context, component=pnl_value, params=compiled_attribute_param, - ids=ids, - condition=condition) + ids=ids) elif attribute == "input_ports" or attribute == "output_ports": _enumerate_recurse(pnl_value) - # Writeback parameter value if the condition matches - elif condition(pnl_param): + # Writeback parameter value + else: # Replace empty structures with None if ctypes.sizeof(compiled_attribute_param_ctype) == 0: From d3302f9cb212cf8e7e967206b465152eaedec03a Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 23 May 2024 11:26:40 -0400 Subject: [PATCH 202/410] Function: Do not process the value, or reset PRNG when syncing seed Pass compilation_sync param to setters. Synchronizing seed should not reset the PRNG. Return the value verbatim to allow shared memory synchronization with compiled structure. Signed-off-by: Jan Vesely --- psyneulink/core/components/functions/function.py | 10 +++++++++- psyneulink/core/globals/parameters.py | 3 ++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/components/functions/function.py b/psyneulink/core/components/functions/function.py index a447b16796a..82a58529ec3 100644 --- a/psyneulink/core/components/functions/function.py +++ b/psyneulink/core/components/functions/function.py @@ -346,7 +346,15 @@ def _output_type_setter(value, owning_component): return value -def _seed_setter(value, owning_component, context): +def _seed_setter(value, owning_component, context, *, compilation_sync): + if compilation_sync: + # compilation sync should provide shared memory 0d array with a floating point value. + assert value is not None + assert value != DEFAULT_SEED() + assert value.shape == () + + return value + value = try_extract_0d_array_item(value) if value is None or value == DEFAULT_SEED(): value = get_global_seed() diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index e03e01fe827..d4dea623080 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -1556,7 +1556,8 @@ def _set( if self.setter is not None: kwargs = { **self._default_setter_kwargs, - **kwargs + **kwargs, + 'compilation_sync':compilation_sync, } value = call_with_pruned_args(self.setter, value, context=context, **kwargs) From ad3b5cda9e2e9dc5e71bad4c0e6ca760a1da2f8f Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 22 May 2024 21:02:22 -0400 Subject: [PATCH 203/410] llvm/execution: Make parameter and state writeback private Writeback in test execution is no longer necessary. Signed-off-by: Jan Vesely --- conftest.py | 37 +++++-------------------------- psyneulink/core/llvm/execution.py | 33 +++++++++------------------ 2 files changed, 15 insertions(+), 55 deletions(-) diff --git a/conftest.py b/conftest.py index ee72480ac44..a23219aac1d 100644 --- a/conftest.py +++ b/conftest.py @@ -205,24 +205,10 @@ def cuda_param(val): @pytest.helpers.register def get_func_execution(func, func_mode): if func_mode == 'LLVM': - ex = pnlvm.execution.FuncExecution(func) - - # Calling writeback here will replace parameter values - # with numpy instances that share memory with the binary - # structure used by the compiled function - ex.writeback_state_to_pnl() - - return ex.execute + return pnlvm.execution.FuncExecution(func).execute elif func_mode == 'PTX': - ex = pnlvm.execution.FuncExecution(func) - - # Calling writeback here will replace parameter values - # with numpy instances that share memory with the binary - # structure used by the compiled function - ex.writeback_state_to_pnl() - - return ex.cuda_execute + return pnlvm.execution.FuncExecution(func).cuda_execute elif func_mode == 'Python': return func.function @@ -232,29 +218,16 @@ def get_func_execution(func, func_mode): @pytest.helpers.register def get_mech_execution(mech, mech_mode): if mech_mode == 'LLVM': - ex = pnlvm.execution.MechExecution(mech) - - # Calling writeback here will replace parameter values - # with numpy instances that share memory with the binary - # structure used by the compiled function - ex.writeback_state_to_pnl() - - return ex.execute + return pnlvm.execution.MechExecution(mech).execute elif mech_mode == 'PTX': - ex = pnlvm.execution.MechExecution(mech) - - # Calling writeback here will replace parameter values - # with numpy instances that share memory with the binary - # structure used by the compiled function - ex.writeback_state_to_pnl() - - return ex.cuda_execute + return pnlvm.execution.MechExecution(mech).cuda_execute elif mech_mode == 'Python': def mech_wrapper(x): mech.execute(x) return mech.output_values + return mech_wrapper else: assert False, "Unknown mechanism mode: {}".format(mech_mode) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 3f7318f0d3e..60e501b19da 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -113,29 +113,21 @@ def _get_compilation_param(self, name, init_method, arg): _pretty_size(ctypes.sizeof(struct_ty)), ")", "for", self._obj.name) - if len(self._execution_contexts) == 1: if name == '_state': - self.writeback_state_to_pnl() + self._copy_params_to_pnl(self._execution_contexts[0], + self._obj, + self._state_struct, + "llvm_state_ids") + elif name == '_param': - self.writeback_params_to_pnl() + self._copy_params_to_pnl(self._execution_contexts[0], + self._obj, + self._param_struct, + "llvm_param_ids") return struct - def writeback_state_to_pnl(self): - - self._copy_params_to_pnl(self._execution_contexts[0], - self._obj, - self._state_struct, - "llvm_state_ids") - - def writeback_params_to_pnl(self): - - self._copy_params_to_pnl(self._execution_contexts[0], - self._obj, - self._param_struct, - "llvm_param_ids") - def _copy_params_to_pnl(self, context, component, params, ids:str): for idx, attribute in enumerate(getattr(component, ids)): @@ -222,12 +214,7 @@ def _enumerate_recurse(elements): except ValueError: pass - pnl_param.set( - value, - context=context, - override=True, - compilation_sync=True, - ) + pnl_param.set(value, context=context, override=True, compilation_sync=True) class CUDAExecution(Execution): From 54dfe3e845def4a525c8b160ff0a725d7d7ab7d8 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 23 May 2024 18:25:55 -0400 Subject: [PATCH 204/410] llvm/execution: Retrieve PNL parameter value only once Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 60e501b19da..f3c6694ccf6 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -178,7 +178,10 @@ def _enumerate_recurse(elements): # Handle PNL parameters pnl_param = getattr(component.parameters, attribute) - pnl_value = pnl_param.get(context=context) + + # Use ._get to retrieve underlying numpy arrays + # (.get will extract a scalar if originally set as a scalar) + pnl_value = pnl_param._get(context=context) # Recurse if the value is a PNL object with its own parameters if hasattr(pnl_value, 'parameters'): @@ -204,13 +207,9 @@ def _enumerate_recurse(elements): value = value[-1] # Try to match the shape of the old value - # Use ._get to retrieve underlying numpy arrays - # (.get will extract a scalar if originally set - # as a scalar) - old_value = pnl_param._get(context) - if hasattr(old_value, 'shape'): + if hasattr(pnl_value, 'shape'): try: - value = value.reshape(old_value.shape) + value = value.reshape(pnl_value.shape) except ValueError: pass From 4d95be757ef66eb4637b050d982d3d92039bae43 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 23 May 2024 20:48:27 -0400 Subject: [PATCH 205/410] llvm/execution: Do stricter checks when reshaping synced parameters Use shape assignment to prevent silent copy. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index f3c6694ccf6..5fe34d8f98c 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -206,12 +206,19 @@ def _enumerate_recurse(elements): if "state" in ids: value = value[-1] - # Try to match the shape of the old value - if hasattr(pnl_value, 'shape'): - try: - value = value.reshape(pnl_value.shape) - except ValueError: - pass + # Reshape to match the shape of the old value. + # Do not try to reshape ragged arrays. + if getattr(pnl_value, 'dtype', object) != object and pnl_value.shape != value.shape: + + # Reshape to match numpy 0d arrays and "matrix" + # parameters that are flattened in compiled form + assert pnl_value.shape == () or pnl_param.name == "matrix", \ + "{}: {} vs. {}".format(pnl_param.name, pnl_value.shape, value.shape) + + # Use an assignment instead of reshape(). + # The latter would silently create a copy if the shape + # could not be achieved in metadata (stride, type, ...) + value.shape = pnl_value.shape pnl_param.set(value, context=context, override=True, compilation_sync=True) From 48c827bd511f8bf653384333bf3fb0c318b2e635 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 4 Jun 2024 13:13:32 -0400 Subject: [PATCH 206/410] versioneer: Updated embedded versioneer to 0.29 Preserve the 'root_dir' global variable Signed-off-by: Jan Vesely --- psyneulink/__init__.py | 5 +- psyneulink/_version.py | 333 +++++++++--- versioneer.py | 1133 ++++++++++++++++++++++++++++------------ 3 files changed, 1044 insertions(+), 427 deletions(-) diff --git a/psyneulink/__init__.py b/psyneulink/__init__.py index 09c949c7170..95832547ca7 100644 --- a/psyneulink/__init__.py +++ b/psyneulink/__init__.py @@ -35,7 +35,7 @@ from . import core # noqa: E402 from . import library # noqa: E402 -from ._version import get_versions # noqa: E402 +from . import _version # noqa: E402 from .core import * # noqa: E402 from .library import * # noqa: E402 @@ -51,8 +51,7 @@ __all__.extend(library.__all__) # set __version__ based on versioneer -__version__ = get_versions()['version'] -del get_versions +__version__ = _version.get_versions()['version'] # suppress numpy overflow and underflow errors _numpy.seterr(over='ignore', under='ignore') diff --git a/psyneulink/_version.py b/psyneulink/_version.py index 29a8917a52b..f79bbcc8130 100644 --- a/psyneulink/_version.py +++ b/psyneulink/_version.py @@ -5,8 +5,9 @@ # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. -# This file is released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) +# This file is released into the public domain. +# Generated by versioneer-0.29 +# https://github.com/python-versioneer/python-versioneer """Git implementation of _version.py.""" @@ -15,10 +16,12 @@ import re import subprocess import sys +from typing import Any, Callable, Dict, List, Optional, Tuple +import functools root_dir = os.path.abspath(os.path.dirname(__file__)) -def get_keywords(): +def get_keywords() -> Dict[str, str]: """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must @@ -34,8 +37,15 @@ def get_keywords(): class VersioneerConfig: """Container for Versioneer configuration parameters.""" + VCS: str + style: str + tag_prefix: str + parentdir_prefix: str + versionfile_source: str + verbose: bool -def get_config(): + +def get_config() -> VersioneerConfig: """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py @@ -53,13 +63,13 @@ class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" -LONG_VERSION_PY = {} -HANDLERS = {} +LONG_VERSION_PY: Dict[str, str] = {} +HANDLERS: Dict[str, Dict[str, Callable]] = {} -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): +def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator + """Create decorator to mark a method as the handler of a VCS.""" + def decorate(f: Callable) -> Callable: """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} @@ -68,22 +78,35 @@ def decorate(f): return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): +def run_command( + commands: List[str], + args: List[str], + cwd: Optional[str] = None, + verbose: bool = False, + hide_stderr: bool = False, + env: Optional[Dict[str, str]] = None, +) -> Tuple[Optional[str], Optional[int]]: """Call the given command(s).""" assert isinstance(commands, list) - p = None - for c in commands: + process = None + + popen_kwargs: Dict[str, Any] = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: try: - dispcmd = str([c] + args) + dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + process = subprocess.Popen([command] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None), **popen_kwargs) break - except EnvironmentError: - e = sys.exc_info()[1] + except OSError as e: if e.errno == errno.ENOENT: continue if verbose: @@ -94,18 +117,20 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, if verbose: print("unable to find command, tried %s" % (commands,)) return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) - return None, p.returncode - return stdout, p.returncode + return None, process.returncode + return stdout, process.returncode -def versions_from_parentdir(parentdir_prefix, root, verbose): +def versions_from_parentdir( + parentdir_prefix: str, + root: str, + verbose: bool, +) -> Dict[str, Any]: """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both @@ -114,15 +139,14 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): """ rootdirs = [] - for i in range(3): + for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level + rootdirs.append(root) + root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % @@ -131,41 +155,48 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): @register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): +def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. - keywords = {} + keywords: Dict[str, str] = {} try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: + with open(versionfile_abs, "r") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: pass return keywords @register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): +def git_versions_from_keywords( + keywords: Dict[str, str], + tag_prefix: str, + verbose: bool, +) -> Dict[str, Any]: """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because @@ -178,11 +209,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) + refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -191,7 +222,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: @@ -200,6 +231,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r'\d', r): + continue if verbose: print("picking %s" % r) return {"version": r, @@ -215,7 +251,12 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): @register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): +def git_pieces_from_vcs( + tag_prefix: str, + root: str, + verbose: bool, + runner: Callable = run_command +) -> Dict[str, Any]: """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* @@ -226,8 +267,15 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=not verbose) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -235,24 +283,57 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) + describe_out, rc = runner(GITS, [ + "describe", "--tags", "--dirty", "--always", "--long", + "--match", f"{tag_prefix}[[:digit:]]*" + ], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() - pieces = {} + pieces: Dict[str, Any] = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], + cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out @@ -269,7 +350,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: - # unparseable. Maybe git-describe is misbehaving? + # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces @@ -294,26 +375,27 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() + date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces -def plus_or_dot(pieces): +def plus_or_dot(pieces: Dict[str, Any]) -> str: """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" -def render_pep440(pieces): +def render_pep440(pieces: Dict[str, Any]) -> str: """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you @@ -338,23 +420,71 @@ def render_pep440(pieces): return rendered -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. +def render_pep440_branch(pieces: Dict[str, Any]) -> str: + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). Exceptions: - 1: no tags. 0.post.devDISTANCE + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces: Dict[str, Any]) -> str: + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) + else: + rendered += ".post0.dev%d" % (pieces["distance"]) + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] else: # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] + rendered = "0.post0.dev%d" % pieces["distance"] return rendered -def render_pep440_post(pieces): +def render_pep440_post(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards @@ -381,12 +511,41 @@ def render_pep440_post(pieces): return rendered -def render_pep440_old(pieces): +def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. - Eexceptions: + Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: @@ -403,7 +562,7 @@ def render_pep440_old(pieces): return rendered -def render_git_describe(pieces): +def render_git_describe(pieces: Dict[str, Any]) -> str: """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. @@ -423,7 +582,7 @@ def render_git_describe(pieces): return rendered -def render_git_describe_long(pieces): +def render_git_describe_long(pieces: Dict[str, Any]) -> str: """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. @@ -443,7 +602,7 @@ def render_git_describe_long(pieces): return rendered -def render(pieces, style): +def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", @@ -457,10 +616,14 @@ def render(pieces, style): if style == "pep440": rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": @@ -475,7 +638,7 @@ def render(pieces, style): "date": pieces.get("date")} -def get_versions(): +def get_versions() -> Dict[str, Any]: """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some @@ -496,7 +659,7 @@ def get_versions(): # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): + for _ in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, diff --git a/versioneer.py b/versioneer.py index 13901fcd1b9..1e3753e63fb 100644 --- a/versioneer.py +++ b/versioneer.py @@ -1,5 +1,5 @@ -# Version: 0.18 +# Version: 0.29 """The Versioneer - like a rocketeer, but for versions. @@ -7,18 +7,14 @@ ============== * like a rocketeer, but for versions! -* https://github.com/warner/python-versioneer +* https://github.com/python-versioneer/python-versioneer * Brian Warner -* License: Public Domain -* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy -* [![Latest Version] -(https://pypip.in/version/versioneer/badge.svg?style=flat) -](https://pypi.python.org/pypi/versioneer/) -* [![Build Status] -(https://travis-ci.org/warner/python-versioneer.png?branch=master) -](https://travis-ci.org/warner/python-versioneer) - -This is a tool for managing a recorded version number in distutils-based +* License: Public Domain (Unlicense) +* Compatible with: Python 3.7, 3.8, 3.9, 3.10, 3.11 and pypy3 +* [![Latest Version][pypi-image]][pypi-url] +* [![Build Status][travis-image]][travis-url] + +This is a tool for managing a recorded version number in setuptools-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control @@ -27,9 +23,38 @@ ## Quick Install -* `pip install versioneer` to somewhere to your $PATH -* add a `[versioneer]` section to your setup.cfg (see below) -* run `versioneer install` in your source tree, commit the results +Versioneer provides two installation modes. The "classic" vendored mode installs +a copy of versioneer into your repository. The experimental build-time dependency mode +is intended to allow you to skip this step and simplify the process of upgrading. + +### Vendored mode + +* `pip install versioneer` to somewhere in your $PATH + * A [conda-forge recipe](https://github.com/conda-forge/versioneer-feedstock) is + available, so you can also use `conda install -c conda-forge versioneer` +* add a `[tool.versioneer]` section to your `pyproject.toml` or a + `[versioneer]` section to your `setup.cfg` (see [Install](INSTALL.md)) + * Note that you will need to add `tomli; python_version < "3.11"` to your + build-time dependencies if you use `pyproject.toml` +* run `versioneer install --vendor` in your source tree, commit the results +* verify version information with `python setup.py version` + +### Build-time dependency mode + +* `pip install versioneer` to somewhere in your $PATH + * A [conda-forge recipe](https://github.com/conda-forge/versioneer-feedstock) is + available, so you can also use `conda install -c conda-forge versioneer` +* add a `[tool.versioneer]` section to your `pyproject.toml` or a + `[versioneer]` section to your `setup.cfg` (see [Install](INSTALL.md)) +* add `versioneer` (with `[toml]` extra, if configuring in `pyproject.toml`) + to the `requires` key of the `build-system` table in `pyproject.toml`: + ```toml + [build-system] + requires = ["setuptools", "versioneer[toml]"] + build-backend = "setuptools.build_meta" + ``` +* run `versioneer install --no-vendor` in your source tree, commit the results +* verify version information with `python setup.py version` ## Version Identifiers @@ -61,7 +86,7 @@ for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has -uncommitted changes. +uncommitted changes). The version identifier is used for multiple purposes: @@ -166,7 +191,7 @@ Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github -[issues page](https://github.com/warner/python-versioneer/issues). +[issues page](https://github.com/python-versioneer/python-versioneer/issues). ### Subprojects @@ -180,7 +205,7 @@ `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI distributions (and upload multiple independently-installable tarballs). * Source trees whose main purpose is to contain a C library, but which also - provide bindings to Python (and perhaps other langauges) in subdirectories. + provide bindings to Python (and perhaps other languages) in subdirectories. Versioneer will look for `.git` in parent directories, and most operations should get the right version string. However `pip` and `setuptools` have bugs @@ -194,9 +219,9 @@ Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. -[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking +[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking this issue. The discussion in -[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the +[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve @@ -224,31 +249,20 @@ cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. -[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes +[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. -### Unicode version strings - -While Versioneer works (and is continually tested) with both Python 2 and -Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. -Newer releases probably generate unicode version strings on py2. It's not -clear that this is wrong, but it may be surprising for applications when then -write these strings to a network connection or include them in bytes-oriented -APIs like cryptographic checksums. - -[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates -this question. - ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) -* edit `setup.cfg`, if necessary, to include any new configuration settings - indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. -* re-run `versioneer install` in your source tree, to replace +* edit `setup.cfg` and `pyproject.toml`, if necessary, + to include any new configuration settings indicated by the release notes. + See [UPGRADING](./UPGRADING.md) for details. +* re-run `versioneer install --[no-]vendor` in your source tree, to replace `SRC/_version.py` * commit any changed files @@ -265,35 +279,70 @@ direction and include code from all supported VCS systems, reducing the number of intermediate scripts. +## Similar projects + +* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time + dependency +* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of + versioneer +* [versioningit](https://github.com/jwodder/versioningit) - a PEP 518-based setuptools + plugin ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. -Specifically, both are released under the Creative Commons "Public Domain -Dedication" license (CC0-1.0), as described in -https://creativecommons.org/publicdomain/zero/1.0/ . +Specifically, both are released under the "Unlicense", as described in +https://unlicense.org/. + +[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg +[pypi-url]: https://pypi.python.org/pypi/versioneer/ +[travis-image]: +https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg +[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer """ +# pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring +# pylint:disable=missing-class-docstring,too-many-branches,too-many-statements +# pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error +# pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with +# pylint:disable=attribute-defined-outside-init,too-many-arguments -from __future__ import print_function -try: - import configparser -except ImportError: - import ConfigParser as configparser +import configparser import errno import json import os import re import subprocess import sys +from pathlib import Path +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union +from typing import NoReturn +import functools + +have_tomllib = True +if sys.version_info >= (3, 11): + import tomllib +else: + try: + import tomli as tomllib + except ImportError: + have_tomllib = False class VersioneerConfig: """Container for Versioneer configuration parameters.""" + VCS: str + style: str + tag_prefix: str + versionfile_source: str + versionfile_build: Optional[str] + parentdir_prefix: Optional[str] + verbose: Optional[bool] + -def get_root(): +def get_root() -> str: """Get the project root directory. We require that all commands are run from the project root, i.e. the @@ -301,13 +350,23 @@ def get_root(): """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") + pyproject_toml = os.path.join(root, "pyproject.toml") versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): + if not ( + os.path.exists(setup_py) + or os.path.exists(pyproject_toml) + or os.path.exists(versioneer_py) + ): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") + pyproject_toml = os.path.join(root, "pyproject.toml") versioneer_py = os.path.join(root, "versioneer.py") - if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): + if not ( + os.path.exists(setup_py) + or os.path.exists(pyproject_toml) + or os.path.exists(versioneer_py) + ): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " @@ -321,43 +380,62 @@ def get_root(): # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. - me = os.path.realpath(os.path.abspath(__file__)) - me_dir = os.path.normcase(os.path.splitext(me)[0]) + my_path = os.path.realpath(os.path.abspath(__file__)) + me_dir = os.path.normcase(os.path.splitext(my_path)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) - if me_dir != vsr_dir: + if me_dir != vsr_dir and "VERSIONEER_PEP518" not in globals(): print("Warning: build in %s is using versioneer.py from %s" - % (os.path.dirname(me), versioneer_py)) + % (os.path.dirname(my_path), versioneer_py)) except NameError: pass return root -def get_config_from_root(root): +def get_config_from_root(root: str) -> VersioneerConfig: """Read the project setup.cfg file to determine Versioneer config.""" - # This might raise EnvironmentError (if setup.cfg is missing), or + # This might raise OSError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . - setup_cfg = os.path.join(root, "setup.cfg") - parser = configparser.SafeConfigParser() - with open(setup_cfg, "r") as f: - parser.readfp(f) - VCS = parser.get("versioneer", "VCS") # mandatory - - def get(parser, name): - if parser.has_option("versioneer", name): - return parser.get("versioneer", name) - return None + root_pth = Path(root) + pyproject_toml = root_pth / "pyproject.toml" + setup_cfg = root_pth / "setup.cfg" + section: Union[Dict[str, Any], configparser.SectionProxy, None] = None + if pyproject_toml.exists() and have_tomllib: + try: + with open(pyproject_toml, 'rb') as fobj: + pp = tomllib.load(fobj) + section = pp['tool']['versioneer'] + except (tomllib.TOMLDecodeError, KeyError) as e: + print(f"Failed to load config from {pyproject_toml}: {e}") + print("Try to load it from setup.cfg") + if not section: + parser = configparser.ConfigParser() + with open(setup_cfg) as cfg_file: + parser.read_file(cfg_file) + parser.get("versioneer", "VCS") # raise error if missing + + section = parser["versioneer"] + + # `cast`` really shouldn't be used, but its simplest for the + # common VersioneerConfig users at the moment. We verify against + # `None` values elsewhere where it matters + cfg = VersioneerConfig() - cfg.VCS = VCS - cfg.style = get(parser, "style") or "" - cfg.versionfile_source = get(parser, "versionfile_source") - cfg.versionfile_build = get(parser, "versionfile_build") - cfg.tag_prefix = get(parser, "tag_prefix") - if cfg.tag_prefix in ("''", '""'): + cfg.VCS = section['VCS'] + cfg.style = section.get("style", "") + cfg.versionfile_source = cast(str, section.get("versionfile_source")) + cfg.versionfile_build = section.get("versionfile_build") + cfg.tag_prefix = cast(str, section.get("tag_prefix")) + if cfg.tag_prefix in ("''", '""', None): cfg.tag_prefix = "" - cfg.parentdir_prefix = get(parser, "parentdir_prefix") - cfg.verbose = get(parser, "verbose") + cfg.parentdir_prefix = section.get("parentdir_prefix") + if isinstance(section, configparser.SectionProxy): + # Make sure configparser translates to bool + cfg.verbose = section.getboolean("verbose") + else: + cfg.verbose = section.get("verbose") + return cfg @@ -366,37 +444,48 @@ class NotThisMethod(Exception): # these dictionaries contain VCS-specific tools -LONG_VERSION_PY = {} -HANDLERS = {} +LONG_VERSION_PY: Dict[str, str] = {} +HANDLERS: Dict[str, Dict[str, Callable]] = {} -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): +def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator + """Create decorator to mark a method as the handler of a VCS.""" + def decorate(f: Callable) -> Callable: """Store f in HANDLERS[vcs][method].""" - if vcs not in HANDLERS: - HANDLERS[vcs] = {} - HANDLERS[vcs][method] = f + HANDLERS.setdefault(vcs, {})[method] = f return f return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): +def run_command( + commands: List[str], + args: List[str], + cwd: Optional[str] = None, + verbose: bool = False, + hide_stderr: bool = False, + env: Optional[Dict[str, str]] = None, +) -> Tuple[Optional[str], Optional[int]]: """Call the given command(s).""" assert isinstance(commands, list) - p = None - for c in commands: + process = None + + popen_kwargs: Dict[str, Any] = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: try: - dispcmd = str([c] + args) + dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + process = subprocess.Popen([command] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None), **popen_kwargs) break - except EnvironmentError: - e = sys.exc_info()[1] + except OSError as e: if e.errno == errno.ENOENT: continue if verbose: @@ -407,15 +496,13 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, if verbose: print("unable to find command, tried %s" % (commands,)) return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) - return None, p.returncode - return stdout, p.returncode + return None, process.returncode + return stdout, process.returncode LONG_VERSION_PY['git'] = r''' @@ -425,8 +512,9 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. -# This file is released into the public domain. Generated by -# versioneer-0.18 (https://github.com/warner/python-versioneer) +# This file is released into the public domain. +# Generated by versioneer-0.29 +# https://github.com/python-versioneer/python-versioneer """Git implementation of _version.py.""" @@ -435,9 +523,11 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, import re import subprocess import sys +from typing import Any, Callable, Dict, List, Optional, Tuple +import functools -def get_keywords(): +def get_keywords() -> Dict[str, str]: """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must @@ -453,8 +543,15 @@ def get_keywords(): class VersioneerConfig: """Container for Versioneer configuration parameters.""" + VCS: str + style: str + tag_prefix: str + parentdir_prefix: str + versionfile_source: str + verbose: bool + -def get_config(): +def get_config() -> VersioneerConfig: """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py @@ -472,13 +569,13 @@ class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" -LONG_VERSION_PY = {} -HANDLERS = {} +LONG_VERSION_PY: Dict[str, str] = {} +HANDLERS: Dict[str, Dict[str, Callable]] = {} -def register_vcs_handler(vcs, method): # decorator - """Decorator to mark a method as the handler for a particular VCS.""" - def decorate(f): +def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator + """Create decorator to mark a method as the handler of a VCS.""" + def decorate(f: Callable) -> Callable: """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} @@ -487,22 +584,35 @@ def decorate(f): return decorate -def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, - env=None): +def run_command( + commands: List[str], + args: List[str], + cwd: Optional[str] = None, + verbose: bool = False, + hide_stderr: bool = False, + env: Optional[Dict[str, str]] = None, +) -> Tuple[Optional[str], Optional[int]]: """Call the given command(s).""" assert isinstance(commands, list) - p = None - for c in commands: + process = None + + popen_kwargs: Dict[str, Any] = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: try: - dispcmd = str([c] + args) + dispcmd = str([command] + args) # remember shell=False, so use git.cmd on windows, not just git - p = subprocess.Popen([c] + args, cwd=cwd, env=env, - stdout=subprocess.PIPE, - stderr=(subprocess.PIPE if hide_stderr - else None)) + process = subprocess.Popen([command] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None), **popen_kwargs) break - except EnvironmentError: - e = sys.exc_info()[1] + except OSError as e: if e.errno == errno.ENOENT: continue if verbose: @@ -513,18 +623,20 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None - stdout = p.communicate()[0].strip() - if sys.version_info[0] >= 3: - stdout = stdout.decode() - if p.returncode != 0: + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) - return None, p.returncode - return stdout, p.returncode + return None, process.returncode + return stdout, process.returncode -def versions_from_parentdir(parentdir_prefix, root, verbose): +def versions_from_parentdir( + parentdir_prefix: str, + root: str, + verbose: bool, +) -> Dict[str, Any]: """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both @@ -533,15 +645,14 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): """ rootdirs = [] - for i in range(3): + for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level + rootdirs.append(root) + root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% @@ -550,41 +661,48 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): @register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): +def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. - keywords = {} + keywords: Dict[str, str] = {} try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: + with open(versionfile_abs, "r") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: pass return keywords @register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): +def git_versions_from_keywords( + keywords: Dict[str, str], + tag_prefix: str, + verbose: bool, +) -> Dict[str, Any]: """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because @@ -597,11 +715,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) + refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d @@ -610,7 +728,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: @@ -619,6 +737,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r'\d', r): + continue if verbose: print("picking %%s" %% r) return {"version": r, @@ -634,7 +757,12 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): @register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): +def git_pieces_from_vcs( + tag_prefix: str, + root: str, + verbose: bool, + runner: Callable = run_command +) -> Dict[str, Any]: """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* @@ -645,8 +773,15 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=not verbose) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) @@ -654,24 +789,57 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%%s*" %% tag_prefix], - cwd=root) + describe_out, rc = runner(GITS, [ + "describe", "--tags", "--dirty", "--always", "--long", + "--match", f"{tag_prefix}[[:digit:]]*" + ], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() - pieces = {} + pieces: Dict[str, Any] = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], + cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out @@ -688,7 +856,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: - # unparseable. Maybe git-describe is misbehaving? + # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces @@ -713,26 +881,27 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], - cwd=root)[0].strip() + date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces -def plus_or_dot(pieces): +def plus_or_dot(pieces: Dict[str, Any]) -> str: """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" -def render_pep440(pieces): +def render_pep440(pieces: Dict[str, Any]) -> str: """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you @@ -757,23 +926,71 @@ def render_pep440(pieces): return rendered -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. +def render_pep440_branch(pieces: Dict[str, Any]) -> str: + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). Exceptions: - 1: no tags. 0.post.devDISTANCE + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+untagged.%%d.g%%s" %% (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces: Dict[str, Any]) -> str: + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: if pieces["distance"]: - rendered += ".post.dev%%d" %% pieces["distance"] + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += ".post%%d.dev%%d" %% (post_version + 1, pieces["distance"]) + else: + rendered += ".post0.dev%%d" %% (pieces["distance"]) + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] else: # exception #1 - rendered = "0.post.dev%%d" %% pieces["distance"] + rendered = "0.post0.dev%%d" %% pieces["distance"] return rendered -def render_pep440_post(pieces): +def render_pep440_post(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards @@ -800,12 +1017,41 @@ def render_pep440_post(pieces): return rendered -def render_pep440_old(pieces): +def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%%d" %% pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%%s" %% pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0.post%%d" %% pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+g%%s" %% pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. - Eexceptions: + Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: @@ -822,7 +1068,7 @@ def render_pep440_old(pieces): return rendered -def render_git_describe(pieces): +def render_git_describe(pieces: Dict[str, Any]) -> str: """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. @@ -842,7 +1088,7 @@ def render_git_describe(pieces): return rendered -def render_git_describe_long(pieces): +def render_git_describe_long(pieces: Dict[str, Any]) -> str: """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. @@ -862,7 +1108,7 @@ def render_git_describe_long(pieces): return rendered -def render(pieces, style): +def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", @@ -876,10 +1122,14 @@ def render(pieces, style): if style == "pep440": rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": @@ -894,7 +1144,7 @@ def render(pieces, style): "date": pieces.get("date")} -def get_versions(): +def get_versions() -> Dict[str, Any]: """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some @@ -915,7 +1165,7 @@ def get_versions(): # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. - for i in cfg.versionfile_source.split('/'): + for _ in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, @@ -942,41 +1192,48 @@ def get_versions(): @register_vcs_handler("git", "get_keywords") -def git_get_keywords(versionfile_abs): +def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. - keywords = {} + keywords: Dict[str, str] = {} try: - f = open(versionfile_abs, "r") - for line in f.readlines(): - if line.strip().startswith("git_refnames ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["refnames"] = mo.group(1) - if line.strip().startswith("git_full ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["full"] = mo.group(1) - if line.strip().startswith("git_date ="): - mo = re.search(r'=\s*"(.*)"', line) - if mo: - keywords["date"] = mo.group(1) - f.close() - except EnvironmentError: + with open(versionfile_abs, "r") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: pass return keywords @register_vcs_handler("git", "keywords") -def git_versions_from_keywords(keywords, tag_prefix, verbose): +def git_versions_from_keywords( + keywords: Dict[str, str], + tag_prefix: str, + verbose: bool, +) -> Dict[str, Any]: """Get version information from git keywords.""" - if not keywords: - raise NotThisMethod("no keywords at all, weird") + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") date = keywords.get("date") if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because @@ -989,11 +1246,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") - refs = set([r.strip() for r in refnames.strip("()").split(",")]) + refs = {r.strip() for r in refnames.strip("()").split(",")} # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " - tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) + tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d @@ -1002,7 +1259,7 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". - tags = set([r for r in refs if re.search(r'\d', r)]) + tags = {r for r in refs if re.search(r'\d', r)} if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: @@ -1011,6 +1268,11 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r'\d', r): + continue if verbose: print("picking %s" % r) return {"version": r, @@ -1026,7 +1288,12 @@ def git_versions_from_keywords(keywords, tag_prefix, verbose): @register_vcs_handler("git", "pieces_from_vcs") -def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): +def git_pieces_from_vcs( + tag_prefix: str, + root: str, + verbose: bool, + runner: Callable = run_command +) -> Dict[str, Any]: """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* @@ -1037,8 +1304,15 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, - hide_stderr=True) + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=not verbose) if rc != 0: if verbose: print("Directory %s not under git control" % root) @@ -1046,24 +1320,57 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) - describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", - "--always", "--long", - "--match", "%s*" % tag_prefix], - cwd=root) + describe_out, rc = runner(GITS, [ + "describe", "--tags", "--dirty", "--always", "--long", + "--match", f"{tag_prefix}[[:digit:]]*" + ], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() - full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() - pieces = {} + pieces: Dict[str, Any] = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], + cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out @@ -1080,7 +1387,7 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: - # unparseable. Maybe git-describe is misbehaving? + # unparsable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces @@ -1105,19 +1412,20 @@ def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): else: # HEX: no tags pieces["closest-tag"] = None - count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], - cwd=root) - pieces["distance"] = int(count_out) # total number of commits + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() - date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], - cwd=root)[0].strip() + date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces -def do_vcs_install(manifest_in, versionfile_source, ipy): +def do_vcs_install(versionfile_source: str, ipy: Optional[str]) -> None: """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py @@ -1126,36 +1434,40 @@ def do_vcs_install(manifest_in, versionfile_source, ipy): GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] - files = [manifest_in, versionfile_source] + files = [versionfile_source] if ipy: files.append(ipy) - try: - me = __file__ - if me.endswith(".pyc") or me.endswith(".pyo"): - me = os.path.splitext(me)[0] + ".py" - versioneer_file = os.path.relpath(me) - except NameError: - versioneer_file = "versioneer.py" - files.append(versioneer_file) + if "VERSIONEER_PEP518" not in globals(): + try: + my_path = __file__ + if my_path.endswith((".pyc", ".pyo")): + my_path = os.path.splitext(my_path)[0] + ".py" + versioneer_file = os.path.relpath(my_path) + except NameError: + versioneer_file = "versioneer.py" + files.append(versioneer_file) present = False try: - f = open(".gitattributes", "r") - for line in f.readlines(): - if line.strip().startswith(versionfile_source): - if "export-subst" in line.strip().split()[1:]: - present = True - f.close() - except EnvironmentError: + with open(".gitattributes", "r") as fobj: + for line in fobj: + if line.strip().startswith(versionfile_source): + if "export-subst" in line.strip().split()[1:]: + present = True + break + except OSError: pass if not present: - f = open(".gitattributes", "a+") - f.write("%s export-subst\n" % versionfile_source) - f.close() + with open(".gitattributes", "a+") as fobj: + fobj.write(f"{versionfile_source} export-subst\n") files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) -def versions_from_parentdir(parentdir_prefix, root, verbose): +def versions_from_parentdir( + parentdir_prefix: str, + root: str, + verbose: bool, +) -> Dict[str, Any]: """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both @@ -1164,15 +1476,14 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): """ rootdirs = [] - for i in range(3): + for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} - else: - rootdirs.append(root) - root = os.path.dirname(root) # up a level + rootdirs.append(root) + root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % @@ -1181,7 +1492,7 @@ def versions_from_parentdir(parentdir_prefix, root, verbose): SHORT_VERSION_PY = """ -# This file was generated by 'versioneer.py' (0.18) from +# This file was generated by 'versioneer.py' (0.29) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. @@ -1198,12 +1509,12 @@ def get_versions(): """ -def versions_from_file(filename): +def versions_from_file(filename: str) -> Dict[str, Any]: """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() - except EnvironmentError: + except OSError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) @@ -1215,9 +1526,8 @@ def versions_from_file(filename): return json.loads(mo.group(1)) -def write_to_version_file(filename, versions): +def write_to_version_file(filename: str, versions: Dict[str, Any]) -> None: """Write the given version number to the given _version.py file.""" - os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: @@ -1226,14 +1536,14 @@ def write_to_version_file(filename, versions): print("set %s to '%s'" % (filename, versions["version"])) -def plus_or_dot(pieces): +def plus_or_dot(pieces: Dict[str, Any]) -> str: """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" -def render_pep440(pieces): +def render_pep440(pieces: Dict[str, Any]) -> str: """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you @@ -1258,23 +1568,71 @@ def render_pep440(pieces): return rendered -def render_pep440_pre(pieces): - """TAG[.post.devDISTANCE] -- No -dirty. +def render_pep440_branch(pieces: Dict[str, Any]) -> str: + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). Exceptions: - 1: no tags. 0.post.devDISTANCE + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces: Dict[str, Any]) -> str: + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: if pieces["distance"]: - rendered += ".post.dev%d" % pieces["distance"] + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) + else: + rendered += ".post0.dev%d" % (pieces["distance"]) + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] else: # exception #1 - rendered = "0.post.dev%d" % pieces["distance"] + rendered = "0.post0.dev%d" % pieces["distance"] return rendered -def render_pep440_post(pieces): +def render_pep440_post(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards @@ -1301,12 +1659,41 @@ def render_pep440_post(pieces): return rendered -def render_pep440_old(pieces): +def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces: Dict[str, Any]) -> str: """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. - Eexceptions: + Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: @@ -1323,7 +1710,7 @@ def render_pep440_old(pieces): return rendered -def render_git_describe(pieces): +def render_git_describe(pieces: Dict[str, Any]) -> str: """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. @@ -1343,7 +1730,7 @@ def render_git_describe(pieces): return rendered -def render_git_describe_long(pieces): +def render_git_describe_long(pieces: Dict[str, Any]) -> str: """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. @@ -1363,7 +1750,7 @@ def render_git_describe_long(pieces): return rendered -def render(pieces, style): +def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", @@ -1377,10 +1764,14 @@ def render(pieces, style): if style == "pep440": rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": @@ -1399,7 +1790,7 @@ class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" -def get_versions(verbose=False): +def get_versions(verbose: bool = False) -> Dict[str, Any]: """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. @@ -1414,7 +1805,7 @@ def get_versions(verbose=False): assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS - verbose = verbose or cfg.verbose + verbose = verbose or bool(cfg.verbose) # `bool()` used to avoid `None` assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" @@ -1475,13 +1866,17 @@ def get_versions(verbose=False): "date": None} -def get_version(): +def get_version() -> str: """Get the short version string for this project.""" return get_versions()["version"] -def get_cmdclass(): - """Get the custom setuptools/distutils subclasses used by Versioneer.""" +def get_cmdclass(cmdclass: Optional[Dict[str, Any]] = None): + """Get the custom setuptools subclasses used by Versioneer. + + If the package uses a different cmdclass (e.g. one from numpy), it + should be provide as an argument. + """ if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and @@ -1495,25 +1890,25 @@ def get_cmdclass(): # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. - # Also see https://github.com/warner/python-versioneer/issues/52 + # Also see https://github.com/python-versioneer/python-versioneer/issues/52 - cmds = {} + cmds = {} if cmdclass is None else cmdclass.copy() - # we add "version" to both distutils and setuptools - from distutils.core import Command + # we add "version" to setuptools + from setuptools import Command class cmd_version(Command): description = "report generated version string" - user_options = [] - boolean_options = [] + user_options: List[Tuple[str, str, str]] = [] + boolean_options: List[str] = [] - def initialize_options(self): + def initialize_options(self) -> None: pass - def finalize_options(self): + def finalize_options(self) -> None: pass - def run(self): + def run(self) -> None: vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) @@ -1523,7 +1918,7 @@ def run(self): print(" error: %s" % vers["error"]) cmds["version"] = cmd_version - # we override "build_py" in both distutils and setuptools + # we override "build_py" in setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py @@ -1538,18 +1933,25 @@ def run(self): # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? + # pip install -e . and setuptool/editable_wheel will invoke build_py + # but the build_py command is not expected to copy any files. + # we override different "build_py" commands for both environments - if "setuptools" in sys.modules: - from setuptools.command.build_py import build_py as _build_py + if 'build_py' in cmds: + _build_py: Any = cmds['build_py'] else: - from distutils.command.build_py import build_py as _build_py + from setuptools.command.build_py import build_py as _build_py class cmd_build_py(_build_py): - def run(self): + def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) + if getattr(self, "editable_mode", False): + # During editable installs `.py` and data files are + # not copied to build_lib + return # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: @@ -1559,8 +1961,40 @@ def run(self): write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py + if 'build_ext' in cmds: + _build_ext: Any = cmds['build_ext'] + else: + from setuptools.command.build_ext import build_ext as _build_ext + + class cmd_build_ext(_build_ext): + def run(self) -> None: + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + _build_ext.run(self) + if self.inplace: + # build_ext --inplace will only build extensions in + # build/lib<..> dir with no _version.py to write to. + # As in place builds will already have a _version.py + # in the module dir, we do not need to write one. + return + # now locate _version.py in the new build/ directory and replace + # it with an updated value + if not cfg.versionfile_build: + return + target_versionfile = os.path.join(self.build_lib, + cfg.versionfile_build) + if not os.path.exists(target_versionfile): + print(f"Warning: {target_versionfile} does not exist, skipping " + "version update. This can happen if you are running build_ext " + "without first running build_py.") + return + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + cmds["build_ext"] = cmd_build_ext + if "cx_Freeze" in sys.modules: # cx_freeze enabled? - from cx_Freeze.dist import build_exe as _build_exe + from cx_Freeze.dist import build_exe as _build_exe # type: ignore # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ @@ -1569,7 +2003,7 @@ def run(self): # ... class cmd_build_exe(_build_exe): - def run(self): + def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() @@ -1593,12 +2027,12 @@ def run(self): if 'py2exe' in sys.modules: # py2exe enabled? try: - from py2exe.distutils_buildexe import py2exe as _py2exe # py3 + from py2exe.setuptools_buildexe import py2exe as _py2exe # type: ignore except ImportError: - from py2exe.build_exe import py2exe as _py2exe # py2 + from py2exe.distutils_buildexe import py2exe as _py2exe # type: ignore class cmd_py2exe(_py2exe): - def run(self): + def run(self) -> None: root = get_root() cfg = get_config_from_root(root) versions = get_versions() @@ -1619,14 +2053,51 @@ def run(self): }) cmds["py2exe"] = cmd_py2exe + # sdist farms its file list building out to egg_info + if 'egg_info' in cmds: + _egg_info: Any = cmds['egg_info'] + else: + from setuptools.command.egg_info import egg_info as _egg_info + + class cmd_egg_info(_egg_info): + def find_sources(self) -> None: + # egg_info.find_sources builds the manifest list and writes it + # in one shot + super().find_sources() + + # Modify the filelist and normalize it + root = get_root() + cfg = get_config_from_root(root) + self.filelist.append('versioneer.py') + if cfg.versionfile_source: + # There are rare cases where versionfile_source might not be + # included by default, so we must be explicit + self.filelist.append(cfg.versionfile_source) + self.filelist.sort() + self.filelist.remove_duplicates() + + # The write method is hidden in the manifest_maker instance that + # generated the filelist and was thrown away + # We will instead replicate their final normalization (to unicode, + # and POSIX-style paths) + from setuptools import unicode_utils + normalized = [unicode_utils.filesys_decode(f).replace(os.sep, '/') + for f in self.filelist.files] + + manifest_filename = os.path.join(self.egg_info, 'SOURCES.txt') + with open(manifest_filename, 'w') as fobj: + fobj.write('\n'.join(normalized)) + + cmds['egg_info'] = cmd_egg_info + # we override different "sdist" commands for both environments - if "setuptools" in sys.modules: - from setuptools.command.sdist import sdist as _sdist + if 'sdist' in cmds: + _sdist: Any = cmds['sdist'] else: - from distutils.command.sdist import sdist as _sdist + from setuptools.command.sdist import sdist as _sdist class cmd_sdist(_sdist): - def run(self): + def run(self) -> None: versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old @@ -1634,7 +2105,7 @@ def run(self): self.distribution.metadata.version = versions["version"] return _sdist.run(self) - def make_release_tree(self, base_dir, files): + def make_release_tree(self, base_dir: str, files: List[str]) -> None: root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) @@ -1687,21 +2158,26 @@ def make_release_tree(self, base_dir, files): """ -INIT_PY_SNIPPET = """ +OLD_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ +INIT_PY_SNIPPET = """ +from . import {0} +__version__ = {0}.get_versions()['version'] +""" -def do_setup(): - """Main VCS-independent setup function for installing Versioneer.""" + +def do_setup() -> int: + """Do main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) - except (EnvironmentError, configparser.NoSectionError, + except (OSError, configparser.NoSectionError, configparser.NoOptionError) as e: - if isinstance(e, (EnvironmentError, configparser.NoSectionError)): + if isinstance(e, (OSError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: @@ -1721,62 +2197,37 @@ def do_setup(): ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") + maybe_ipy: Optional[str] = ipy if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() - except EnvironmentError: + except OSError: old = "" - if INIT_PY_SNIPPET not in old: + module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0] + snippet = INIT_PY_SNIPPET.format(module) + if OLD_SNIPPET in old: + print(" replacing boilerplate in %s" % ipy) + with open(ipy, "w") as f: + f.write(old.replace(OLD_SNIPPET, snippet)) + elif snippet not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: - f.write(INIT_PY_SNIPPET) + f.write(snippet) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) - ipy = None - - # Make sure both the top-level "versioneer.py" and versionfile_source - # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so - # they'll be copied into source distributions. Pip won't be able to - # install the package without this. - manifest_in = os.path.join(root, "MANIFEST.in") - simple_includes = set() - try: - with open(manifest_in, "r") as f: - for line in f: - if line.startswith("include "): - for include in line.split()[1:]: - simple_includes.add(include) - except EnvironmentError: - pass - # That doesn't cover everything MANIFEST.in can do - # (http://docs.python.org/2/distutils/sourcedist.html#commands), so - # it might give some false negatives. Appending redundant 'include' - # lines is safe, though. - if "versioneer.py" not in simple_includes: - print(" appending 'versioneer.py' to MANIFEST.in") - with open(manifest_in, "a") as f: - f.write("include versioneer.py\n") - else: - print(" 'versioneer.py' already in MANIFEST.in") - if cfg.versionfile_source not in simple_includes: - print(" appending versionfile_source ('%s') to MANIFEST.in" % - cfg.versionfile_source) - with open(manifest_in, "a") as f: - f.write("include %s\n" % cfg.versionfile_source) - else: - print(" versionfile_source already in MANIFEST.in") + maybe_ipy = None # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. - do_vcs_install(manifest_in, cfg.versionfile_source, ipy) + do_vcs_install(cfg.versionfile_source, maybe_ipy) return 0 -def scan_setup_py(): +def scan_setup_py() -> int: """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False @@ -1813,10 +2264,14 @@ def scan_setup_py(): return errors +def setup_command() -> NoReturn: + """Set up Versioneer and exit with appropriate error code.""" + errors = do_setup() + errors += scan_setup_py() + sys.exit(1 if errors else 0) + + if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": - errors = do_setup() - errors += scan_setup_py() - if errors: - sys.exit(1) + setup_command() From c28a744d0914be1c025096e7bb72f2f9b0f0e3d4 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 4 Jun 2024 15:33:02 -0400 Subject: [PATCH 207/410] treewide: Add space around operator in f-strings Python3.12 codestyle checks that. Signed-off-by: Jan Vesely --- .../functions/nonstateful/combinationfunctions.py | 2 +- .../components/functions/stateful/integratorfunctions.py | 4 ++-- .../modulatory/control/optimizationcontrolmechanism.py | 2 +- psyneulink/core/compositions/report.py | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py index ccaf0ef35b8..8bd0cb79f34 100644 --- a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py @@ -512,7 +512,7 @@ def _validate_params(self, request_set, target_set=None, context=None): except IndexError: raise FunctionError(f"Index ({i}) specified in {repr(ARRANGEMENT)} arg for " f"{self.name}{owner_str} is out of bounds for its {repr(DEFAULT_VARIABLE)} " - f"arg (max index = {len(self.parameters.variable.default_value)-1}).") + f"arg (max index = {len(self.parameters.variable.default_value) - 1}).") # Check that SCALE and OFFSET are scalars. if SCALE in target_set and target_set[SCALE] is not None: diff --git a/psyneulink/core/components/functions/stateful/integratorfunctions.py b/psyneulink/core/components/functions/stateful/integratorfunctions.py index 6e815961984..06e3fd9597d 100644 --- a/psyneulink/core/components/functions/stateful/integratorfunctions.py +++ b/psyneulink/core/components/functions/stateful/integratorfunctions.py @@ -3118,7 +3118,7 @@ def _validate(self, context=None): if angle_result.ndim != 1 and len(angle_result) != dimension: raise FunctionError(f"{fct_msg} specified for 'angle_function' arg of " f"{self.__class__.__name__} ({angle_function}) must accept a list or 1d array " - f"of length {dimension-1} and return a 1d array of length {dimension}.") + f"of length {dimension - 1} and return a 1d array of length {dimension}.") except: raise FunctionError(f"Problem with {fct_msg} specified for 'angle_function' arg of " f"{self.__class__.__name__} ({angle_function}).") @@ -3169,7 +3169,7 @@ def _function(self, owner_str = f"'of '{self.owner.name}" if self.owner else "" raise FunctionError(f"Length of 'variable' for {self.name}{owner_str} ({len(variable)}) must be " # f"1 or one less than its 'dimension' parameter ({dimension}-1={dimension-1}).") - f"1 or {dimension-1} (one less than its 'dimension' parameter: {dimension}).") + f"1 or {dimension - 1} (one less than its 'dimension' parameter: {dimension}).") random_draw = np.array([random_state.normal() for i in range(dimension - 1)]) value = previous_value + rate * drift * time_step_size \ diff --git a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py index 7ad0ea9c412..8b526d9e538 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py @@ -3653,7 +3653,7 @@ def state_features(self): else: # Specified InputPort is not (yet) in agent_rep input_port_name = (f"{input_port.full_name}" if input_port - else f"{str(i-len(agent_rep_input_ports))}") + else f"{str(i - len(agent_rep_input_ports))}") key = _deferred_agent_rep_input_port_name(input_port_name, self.agent_rep.name) # Get source for state_features dict diff --git a/psyneulink/core/compositions/report.py b/psyneulink/core/compositions/report.py index a0d2d0302b3..25b7784b2ca 100644 --- a/psyneulink/core/compositions/report.py +++ b/psyneulink/core/compositions/report.py @@ -870,7 +870,7 @@ def start_report(self, comp, num_trials, context) -> Optional[int]: self._depth_indent_i = self._depth_str_i = '' if self._run_mode is SIMULATION or self._execution_stack_depth: self._depth_indent_i = self.depth_indent_factor * self._execution_stack_depth * ' ' - self._depth_str_i = f' (depth: {self._execution_stack_depth-1})' + self._depth_str_i = f' (depth: {self._execution_stack_depth - 1})' id = self._rich_progress.add_task(f"[red]{self._depth_indent_i}{comp.name}: " f"{self._run_mode}ing {self._depth_str_i}...", @@ -1764,9 +1764,9 @@ def report_progress(self, self._depth_indent = self._depth_str = '' if simulation_mode or self._execution_stack_depth>1: self._depth_indent = self.depth_indent_factor * self._execution_stack_depth * ' ' - self._depth_str = f' (depth: {self._execution_stack_depth-1})' + self._depth_str = f' (depth: {self._execution_stack_depth - 1})' update = f'{self._depth_indent}{caller.name}: ' \ - f'{self._run_mode}ed {trial_num+1}{num_trials_str} trial{s}{self._depth_str}' + f'{self._run_mode}ed {trial_num + 1}{num_trials_str} trial{s}{self._depth_str}' # Do update self._rich_progress.update(output_report.rich_task_id, From 7a65912b083cfc92197b2f08b0bc7c5144cce944 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 17 Oct 2023 12:39:52 -0400 Subject: [PATCH 208/410] setup: Advertise support for python 3.12 Signed-off-by: Jan Vesely --- setup.py | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.py b/setup.py index 118a217b77f..f382ef5477e 100644 --- a/setup.py +++ b/setup.py @@ -62,6 +62,7 @@ def get_requirements(require_name=None): 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', + 'Programming Language :: Python :: 3.12', ], # Require recent python From 321180a968a8a44cde9e72d406c4dcba3273d752 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 17 Oct 2023 12:40:46 -0400 Subject: [PATCH 209/410] ci/github-actions: Add python 3.12 docs building jobs Signed-off-by: Jan Vesely --- .github/workflows/pnl-ci-docs.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index 8ba5879f605..042bad3c738 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -21,7 +21,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] + python-version: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12'] os: [ubuntu-latest, macos-latest, windows-latest] pnl-version: ${{ (github.event_name == 'push') && fromJSON('["head"]') || fromJSON('["head", "base"]') }} exclude: @@ -37,6 +37,8 @@ jobs: pnl-version: 'base' - python-version: '3.10' pnl-version: 'base' + - python-version: '3.12' + pnl-version: 'base' # Python 3.7 x64 on macos-14 (arm64) images is broken [0] # and arm64 version is not available [1]. From 116b91709333607139c2800c03093644a9ec259e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 17 Oct 2023 12:41:49 -0400 Subject: [PATCH 210/410] ci/github-actions: Add python 3.12 jobs Signed-off-by: Jan Vesely --- .github/workflows/pnl-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index 9b5882ac834..02f2bd31748 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -33,7 +33,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.7', '3.11'] + python-version: ['3.7', '3.11', '3.12'] python-architecture: ['x64'] extra-args: [''] os: [ubuntu, macos, windows] From 45f78e98b0d88fa7a74a739715155b3085c97165 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jun 2024 00:56:14 -0400 Subject: [PATCH 211/410] requirements: update pytest requirement from <8.2.2 to <8.2.3 (#2974) Updates the requirements on [pytest](https://github.com/pytest-dev/pytest) to permit the latest version. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/8.2.1...8.2.2) --- updated-dependencies: - dependency-name: pytest dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index f67db8a1497..3b0c7416cfe 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,6 +1,6 @@ jupyter<1.0.1 packaging<25.0 -pytest<8.2.2 +pytest<8.2.3 pytest-benchmark<4.0.1 pytest-cov<5.0.1 pytest-forked<1.7.0 From 20cc7038a9a7b0867413c54f7081a26d5fc69884 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 6 Jun 2024 12:39:24 -0400 Subject: [PATCH 212/410] Example stability_flexibility_cond.py --- .../stability_flexibility_cond.py | 227 +++++++++++------- .../parameterestimationcomposition.py | 68 ++++-- 2 files changed, 182 insertions(+), 113 deletions(-) diff --git a/Scripts/Debug/stability_flexibility/stability_flexibility_cond.py b/Scripts/Debug/stability_flexibility/stability_flexibility_cond.py index c2ffea7ce25..39f65cc2671 100644 --- a/Scripts/Debug/stability_flexibility/stability_flexibility_cond.py +++ b/Scripts/Debug/stability_flexibility/stability_flexibility_cond.py @@ -11,15 +11,99 @@ from stability_flexibility import make_stab_flex, generate_trial_sequence +def get_node(comp, name): + """ + Get the node from the composition with the given name. The name needs to match from the beginning, but it + can have any numeric suffix after the name. + """ + for node in comp.nodes: + if node.name.startswith(name): + return node + return None + + +def make_input_dict(stab_flex_comp, taskTrain, stimulusTrain, cueTrain, correctResponse): + inputs = { + get_node(stab_flex_comp, "Task Input [I1, I2]"): [[np.array(v)] for v in taskTrain], + get_node(stab_flex_comp, "Stimulus Input [S1, S2]"): [[np.array(v)] for v in stimulusTrain], + get_node(stab_flex_comp, "Cue-Stimulus Interval"): [[np.array(v)] for v in cueTrain], + get_node(stab_flex_comp, "Correct Response Info"): [[np.array(v)] for v in correctResponse] + } + + return inputs + +def run_stab_flex_cond( + taskTrain, + stimulusTrain, + cueTrain, + correctResponse, + **kwargs): + """ + Create a stability flexibility composition and run it with the given parameters. Return the composition and the + results as a pandas DataFrame. If any of the parameters are a list, then that parameter is assumed to be trial-wise + and the length of the list should be the number of trials. A control mechanism will be added to the composition to + override the parameter with the value from the input. + """ + + # Remove any parameters that are trial-wise from the kwargs, these values will be passed in as inputs + # to the composition. + cond_params = {name: value for name, value in kwargs.items() + if isinstance(value, list) or isinstance(value, np.ndarray)} + + # Remove the trial-wise parameters from the kwargs + kwargs = {name: value for name, value in kwargs.items() if name not in cond_params} + + # Make a stability flexibility composition + comp = make_stab_flex(**kwargs) + + inputs = make_input_dict(comp, taskTrain, stimulusTrain, cueTrain, correctResponse) + + # A dict to map keyword arg name to the corresponding mechanism in the composition + param_map = { + "gain": ("gain", comp.nodes["Task Activations [Act1, Act2]"]), # Gain + "automaticity": ("slope", comp.nodes["Automaticity-weighted Stimulus Input [w*S1, w*S2]"]), # Automaticity + "threshold": ("threshold", comp.nodes["DDM"]), # Threshold + "non_decision_time": ("non_decision_time", comp.nodes["DECISION_GATE"]), # Non-decision time + } + # Go through the parameters and check if any are trial-wise, if so, add a control mechanism to override the value on + # trial-by-trial basis with the value from the input. + pec_mechs = {} + for (name, value) in cond_params.items(): + + if len(value) != num_trials: + raise ValueError("Length of trial-wise parameter must be equal to the number of trials.") + + pec_mechs[name] = pnl.ControlMechanism(name=f"{name}_control", + control_signals=param_map[name], + modulation=pnl.OVERRIDE) + comp.add_node(pec_mechs[name]) + inputs[pec_mechs[name]] = [[np.array([value[i]])] for i in range(num_trials)] + + comp.run(inputs, execution_mode=pnl.ExecutionMode.LLVMRun) + + df = pd.DataFrame( + np.squeeze(np.array(comp.results))[:, 1:], columns=["decision", "response_time"] + ) + df["decision"] = df["decision"].astype("category") + + # Add the trial-wise parameters to the DataFrame as well. + for name in pec_mechs.keys(): + df[name] = cond_params[name] + + assert len(comp.input_ports) > 0 + + return comp, df + + # Let's make things reproducible pnl_seed = 0 set_global_seed(pnl_seed) trial_seq_seed = 0 # High-level parameters the impact performance of the test -num_trials = 12 +num_trials = 150 time_step_size = 0.01 -num_estimates = 3 +num_estimates = 10000 sf_params = dict( gain=3.0, @@ -47,34 +131,34 @@ # was set to run with timestep size of 0.001 cueTrain = [c / 10.0 for c in cueTrain] -# Make a stability flexibility composition -comp = make_stab_flex(**sf_params) - -# Let's run the model with some sample data -taskLayer = comp.nodes["Task Input [I1, I2]"] -stimulusInfo = comp.nodes["Stimulus Input [S1, S2]"] -cueInterval = comp.nodes["Cue-Stimulus Interval"] -correctInfo = comp.nodes["Correct Response Info"] - -inputs = { - taskLayer: [[np.array(taskTrain[i])] for i in range(num_trials)], - stimulusInfo: [[np.array(stimulusTrain[i])] for i in range(num_trials)], - cueInterval: [[np.array([cueTrain[i]])] for i in range(num_trials)], - correctInfo: [[np.array([correctResponse[i]])] for i in range(num_trials)] -} +# We will generate a dataset that comprises two different conditions. Each condition will have a different threshold. +# Randomly select which trials will be in each condition uniformly. +rng = np.random.default_rng(pnl_seed) +threshold = rng.choice([0.3, 0.7], size=num_trials, replace=True) + +# Run +_, data_to_fit = run_stab_flex_cond( + taskTrain, + stimulusTrain, + cueTrain, + correctResponse, + **{**sf_params, 'threshold': threshold} +) -# comp.run(inputs, execution_mode=pnl.ExecutionMode.LLVMRun) -# pnllvm.cleanup() +# Turn our trial-wise threshold into a condition +data_to_fit['condition'] = np.where(data_to_fit['threshold'] == 0.3, 'threshold=0.3', 'threshold=0.7') +data_to_fit.drop(columns=['threshold'], inplace=True) #%% # Create a parameter estimation composition to fit the data we just generated and hopefully recover the # parameters of the composition. +comp = make_stab_flex(**sf_params) -controlModule = comp.nodes["Task Activations [Act1, Act2]"] -congruenceWeighting = comp.nodes["Automaticity-weighted Stimulus Input [w*S1, w*S2]"] -decisionMaker = comp.nodes["DDM"] -decisionGate = comp.nodes["DECISION_GATE"] -responseGate = comp.nodes["RESPONSE_GATE"] +controlModule = get_node(comp, "Task Activations [Act1, Act2]") +congruenceWeighting = get_node(comp, "Automaticity-weighted Stimulus Input [w*S1, w*S2]") +decisionMaker = get_node(comp, "DDM") +decisionGate = get_node(comp, "DECISION_GATE") +responseGate = get_node(comp, "RESPONSE_GATE") fit_parameters = { ("gain", controlModule): np.linspace(1.0, 10.0, 1000), # Gain @@ -83,76 +167,39 @@ ("non_decision_time", decisionMaker): np.linspace(0.1, 0.4, 1000), # Threshold } -#%% -# For each parameter, we will add a control mechanism to the composition that overrides the parameter with a value -# from the input. -pec_mechs = {} -for (name, mech), values in fit_parameters.items(): - pec_mechs[(name, mech)] = pnl.ControlMechanism(name=f"{name}_control", - control_signals=[(name, mech)], - modulation=pnl.OVERRIDE) - comp.add_node(pec_mechs[(name, mech)]) +pec = pnl.ParameterEstimationComposition( + name="pec", + nodes=comp, + parameters=fit_parameters, + depends_on={("threshold", decisionMaker): 'condition'}, + outcome_variables=[ + decisionGate.output_ports[0], + responseGate.output_ports[0], + ], + data=data_to_fit, + optimization_function='differential_evolution', + num_estimates=num_estimates, +) -#%% -print("Running inner composition to generate data to fit for parameter recovery test.") -comp.run(inputs, execution_mode=pnl.ExecutionMode.LLVMRun) -results = comp.results +pec.controller.parameters.comp_execution_mode.set("LLVM") +pec.controller.function.parameters.save_values.set(True) -print("Setting up PEC") +inputs = make_input_dict(comp, taskTrain, stimulusTrain, cueTrain, correctResponse) -data_to_fit = pd.DataFrame( - np.squeeze(np.array(results))[:, 1:], columns=["decision", "response_time"] -) -data_to_fit["decision"] = data_to_fit["decision"].astype("category") +print("Running the PEC") +ret = pec.run(inputs=inputs) +optimal_parameters = pec.optimized_parameter_values + +# Print the recovered parameters. +records = [] +for (name, mech), recovered_param in zip(fit_parameters.keys(), optimal_parameters): -#%% -inputs_with_params = {**inputs} -for (name, mech), con_mech in pec_mechs.items(): if name == "slope": - value = sf_params['automaticity'] - elif name == "non_decision_time": - value = 5.0 + true_param = sf_params['automaticity'] else: - value = sf_params[name] - - inputs_with_params[con_mech] = value - -comp.results.clear() -comp.run(inputs_with_params, execution_mode=pnl.ExecutionMode.LLVMRun) -results = comp.results - -#%% + true_param = sf_params[name] -# pec = pnl.ParameterEstimationComposition( -# name="pec", -# nodes=comp, -# parameters=fit_parameters, -# outcome_variables=[ -# decisionGate.output_ports[0], -# responseGate.output_ports[0], -# ], -# data=data_to_fit, -# optimization_function='differential_evolution', -# num_estimates=num_estimates, -# ) -# -# # pec.controller.parameters.comp_execution_mode.set("LLVM") -# pec.controller.function.parameters.save_values.set(True) -# -# print("Running the PEC") -# ret = pec.run(inputs=inputs) -# optimal_parameters = pec.optimized_parameter_values -# -# # Print the recovered parameters. -# records = [] -# for (name, mech), recovered_param in zip(fit_parameters.keys(), optimal_parameters): -# -# if name == "slope": -# true_param = sf_params['automaticity'] -# else: -# true_param = sf_params[name] -# -# percent_error = 100.0 * (abs(true_param - recovered_param) / true_param) -# records.append((name, mech.name, true_param, recovered_param, percent_error)) -# df = pd.DataFrame(records, columns=['Parameter', 'Component', 'Value', 'Recovered Value', 'Percent Error']) -# print(df) + percent_error = 100.0 * (abs(true_param - recovered_param) / true_param) + records.append((name, mech.name, true_param, recovered_param, percent_error)) +df = pd.DataFrame(records, columns=['Parameter', 'Component', 'Value', 'Recovered Value', 'Percent Error']) +print(df) diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index c3106450170..1e029dd704b 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -170,7 +170,7 @@ import psyneulink.core.llvm as pnllvm from psyneulink.core.globals.utilities import ContentAddressableList, convert_to_np_array from psyneulink.core.components.shellclasses import Mechanism -from psyneulink.core.compositions.composition import Composition, CompositionError +from psyneulink.core.compositions.composition import Composition, CompositionError, NodeRole from psyneulink.core.components.ports.port import Port_Base from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import ControlMechanism from psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism import ( @@ -528,15 +528,11 @@ def __init__( self.optimized_parameter_values = [] self.pec_control_mechs = {} - self.pec_control_mechs_input_indices = [] - idx = len(self.model.input_ports) for (pname, mech), values in parameters.items(): self.pec_control_mechs[(pname, mech)] = ControlMechanism(name=f"{pname}_control", control_signals=[(pname, mech)], modulation=OVERRIDE) self.model.add_node(self.pec_control_mechs[(pname, mech)]) - self.pec_control_mechs_input_indices.append(idx) - idx += 1 super().__init__( name=name, @@ -904,15 +900,20 @@ def run(self, *args, **kwargs): # Get the inputs inputs = kwargs.get("inputs", None if not args else args[0]) - # Since we are passing fitting\optimazation parameters as inputs we need add them to the inputs + # Since we are passing fitting\optimization parameters as inputs we need add them to the inputs if inputs: + # Run parse input dict on the inputs, this will fill in missing input ports with default values. There + # will be missing input ports because the user doesn't know about the control mechanism's input ports that + # have been added by the PEC for the fitting parameters. + full_inputs, num_trials = self.model._parse_input_dict(inputs, context) + # Add the fitting parameters to the inputs, these will be modulated during fitting or optimization, # we just use a dummy value here for now (the first value in the range of the parameter) dummy_params = [v[0] for v in self.controller.function.fit_param_bounds.values()] - self.controller.set_parameters_in_inputs(dummy_params, inputs) + self.controller.set_parameters_in_inputs(dummy_params, full_inputs) - self.controller.set_pec_inputs_cache(inputs) + self.controller.set_pec_inputs_cache(full_inputs) # We need to set the inputs for the composition during simulation, by assigning the inputs dict passed in # PEC run() to its controller's state_feature_values (this is in order to accomodate multi-trial inputs @@ -1076,6 +1077,8 @@ class Parameters(OptimizationControlMechanism.Parameters): def __init__(self, *args, **kwargs): self._pec_input_values = None + self._pec_control_mech_indices = None + if 'fit_parameters' in kwargs: self.fit_parameters = kwargs['fit_parameters'] del kwargs['fit_parameters'] @@ -1142,11 +1145,16 @@ def set_pec_inputs_cache(self, inputs_dict: dict) -> dict: # Restructure inputs as nd array with each row (outer dim) a trial's worth of inputs # and each item in the row (inner dim) the input to a node (or input_port) for that trial if len(inputs_dict) != self.num_state_input_ports: + + # Since we added control mechanisms to the composition, we need to make sure that we subtract off + # the number of control mechanisms from the number of state input ports in the error message. + num_state_input_ports = self.num_state_input_ports - len(self.fit_parameters) + raise ParameterEstimationCompositionError( f"The dict specified in the `input` arg of " f"{self.composition.name}.run() is badly formatted: " f"the number of entries should equal the number of inputs " - f"to '{model.name}' ({self.num_state_input_ports})." + f"to '{model.name}' ({num_state_input_ports})." ) trial_seqs = list(inputs_dict.values()) num_trials = len(trial_seqs[0]) @@ -1180,6 +1188,13 @@ def set_parameters_in_inputs(self, parameters, inputs): """ + # Get the input indices for the control mechanisms that are used to modulate the fitting parameters + if self._pec_control_mech_indices is None: + self.composition.model._analyze_graph() + input_nodes = [node for node, roles in self.composition.model.nodes_to_roles.items() + if NodeRole.INPUT in roles] + self._pec_control_mech_indices = [input_nodes.index(m) for m in self.composition.pec_control_mechs.values()] + # If the model is in the inputs, then inputs are passed as list of lists and we need to add the fitting # parameters to each trial as a concatenated list. if self.composition.model in inputs: @@ -1189,24 +1204,34 @@ def set_parameters_in_inputs(self, parameters, inputs): if type(in_arr) is not np.ndarray: in_arr = convert_to_np_array(in_arr) - # Make sure it is 3D - in_arr = np.atleast_3d(in_arr) + # Make sure it is 3D (only if not ragged) + if in_arr.dtype != object: + in_arr = np.atleast_3d(in_arr) - # If the inputs don't have columns for the fitting parameters, then we need to add them - if in_arr.shape[1] != len(self.composition.input_ports): - num_missing = len(self.composition.input_ports) - in_arr.shape[1] - in_arr = np.hstack((in_arr, np.zeros((in_arr.shape[0], num_missing, 1)))) + # If the inputs don't have columns for the fitting parameters, then we need to add them + if in_arr.shape[1] != len(self.composition.input_ports): + num_missing = len(self.composition.input_ports) - in_arr.shape[1] + in_arr = np.hstack((in_arr, np.zeros((in_arr.shape[0], num_missing, 1)))) j = 0 for i, (pname, mech) in enumerate(self.fit_parameters.keys()): - mech_idx = self.composition.pec_control_mechs_input_indices[i] + mech_idx = self._pec_control_mech_indices[i] if not self.depends_on or (pname, mech) not in self.depends_on: - in_arr[:, mech_idx, 0] = parameters[j] + if in_arr.ndim == 3: + in_arr[:, mech_idx, 0] = parameters[j] + else: + for k in range(in_arr.shape[0]): + in_arr[k, mech_idx] = np.array([parameters[j]]) j += 1 else: for level in self.cond_levels[(pname, mech)]: mask = self.cond_mask[(pname, mech)][level] - in_arr[mask, mech_idx, 0] = parameters[j] + if in_arr.ndim == 3: + in_arr[mask, mech_idx, 0] = parameters[j] + else: + for k in range(in_arr.shape[0]): + if mask[k]: + in_arr[k, mech_idx] = np.array([parameters[j]]) j += 1 inputs[self.composition.model] = in_arr @@ -1214,17 +1239,14 @@ def set_parameters_in_inputs(self, parameters, inputs): # Otherwise, assume the inputs are passed to each mechanism individually. Thus, we need to feed the # fitting parameters to the model to their respective control mechanisms else: - - num_trials = len(list(inputs.values())[0]) - j = 0 for i, ((pname, mech), values) in enumerate(self.fit_parameters.items()): control_mech = self.composition.pec_control_mechs[(pname, mech)] if not self.depends_on or (pname, mech) not in self.depends_on: - inputs[control_mech] = np.ones((num_trials, 1)) * parameters[j] + inputs[control_mech] = np.ones_like(inputs[control_mech]) * parameters[j] j += 1 else: - inputs[control_mech] = np.zeros((num_trials, 1)) + inputs[control_mech] = np.zeros_like(inputs[control_mech]) for level in self.cond_levels[(pname, mech)]: mask = self.cond_mask[(pname, mech)][level] inputs[control_mech][mask] = parameters[j] From 444bf17e51151122d93cd22d37407ddd9ec6788c Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 6 Jun 2024 12:39:44 -0400 Subject: [PATCH 213/410] Remove old ddm fit script --- Scripts/Debug/ddm/ddm_fit.py | 64 ------------------------------------ 1 file changed, 64 deletions(-) delete mode 100644 Scripts/Debug/ddm/ddm_fit.py diff --git a/Scripts/Debug/ddm/ddm_fit.py b/Scripts/Debug/ddm/ddm_fit.py deleted file mode 100644 index f0084076e11..00000000000 --- a/Scripts/Debug/ddm/ddm_fit.py +++ /dev/null @@ -1,64 +0,0 @@ -#%% -import numpy as np -import pandas as pd -import psyneulink as pnl - -from psyneulink.core.components.functions.fitfunctions import make_likelihood_function, \ - MaxLikelihoodEstimator - -ddm_params = dict(starting_value=0.0, rate=0.3, noise=1.0, - threshold=0.6, non_decision_time=0.15, time_step_size=0.01) - -# Create a simple one mechanism composition containing a DDM in integrator mode. -decision = pnl.DDM(function=pnl.DriftDiffusionIntegrator(**ddm_params), - output_ports=[pnl.DECISION_VARIABLE, pnl.RESPONSE_TIME], - name='DDM') - -comp = pnl.Composition(pathways=decision) - -#%% - -# Lets generate an "experimental" dataset to fit. This is a parameter recovery test -# The input will be 500 trials of the same constant stimulus drift rate of 1 -input = np.ones((500, 1)) -inputs_dict = {decision: input} - -# Run the composition to generate some data to fit -comp.run(inputs=inputs_dict, - num_trials=len(input), - execution_mode=pnl.ExecutionMode.LLVMRun) - -# Store the results of this "experiment" as a numpy array. This should be a -# 2D array of shape (len(input), 2). The first column being a discrete variable -# specifying the upper or lower decision boundary and the second column is the -# reaction time. We will put the data into a pandas DataFrame, this makes its -# easier to specify which columns in the data are categorical or not. -data_to_fit = pd.DataFrame(np.squeeze(np.array(comp.results)), - columns=['decision', 'rt']) -data_to_fit['decision'] = pd.Categorical(data_to_fit['decision']) - -#%% - -# Create a likelihood function from the composition itself, this is done -# using probability density approximation via kernel density estimation. -likelihood, param_map = comp.make_likelihood_function( - fit_params=[decision.function.parameters.rate, - decision.function.parameters.starting_value, - decision.function.parameters.non_decision_time], - inputs=inputs_dict, - data_to_fit=data_to_fit, - num_sims_per_trial=100, - combine_trials=True) - -params_to_recover = {k: ddm_params[k] for k in param_map.values()} -print(f"Parameters to recover: {params_to_recover}") -print(f"Data Neg-Log-Likelihood: {-likelihood(**params_to_recover)}") - -mle = MaxLikelihoodEstimator(log_likelihood_function=likelihood, - fit_params_bounds={ - 'rate': (0.0, 1.0), - 'starting_value': (0.0, 0.9), - 'non_decision_time': (0.0, 1.0), - }) - -fit_results = mle.fit(display_iter=True, save_iterations=True) \ No newline at end of file From b02b27d1bdb8b8c032c84ee8fdad2aa403b5b1f5 Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 6 Jun 2024 13:38:51 -0400 Subject: [PATCH 214/410] Fixed some input issues. --- .../core/compositions/parameterestimationcomposition.py | 5 ++++- tests/composition/test_parameterestimationcomposition.py | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index 1e029dd704b..dd682a61ad4 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -906,7 +906,10 @@ def run(self, *args, **kwargs): # Run parse input dict on the inputs, this will fill in missing input ports with default values. There # will be missing input ports because the user doesn't know about the control mechanism's input ports that # have been added by the PEC for the fitting parameters. - full_inputs, num_trials = self.model._parse_input_dict(inputs, context) + if self.model in inputs and len(inputs) == 1: + full_inputs = inputs + else: + full_inputs, num_trials = self.model._parse_input_dict(inputs, context) # Add the fitting parameters to the inputs, these will be modulated during fitting or optimization, # we just use a dummy value here for now (the first value in the range of the parameter) diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index a53ab87959b..e6fa51b0e82 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -49,8 +49,8 @@ def _run_ddm_with_params( input_node_1 = pnl.ProcessingMechanism(size=1) -input_node_2 = pnl.ProcessingMechanism(size=2) -input_node_3 = pnl.ProcessingMechanism(size=3) +input_node_2 = pnl.ProcessingMechanism(size=3) +input_node_3 = pnl.ProcessingMechanism(size=2) output_node = pnl.ProcessingMechanism(size=2) model = pnl.Composition( [{input_node_1, input_node_2, input_node_3}, output_node], name="model" From ebeb8585a84153a590efa8fb32e098995d1b6621 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 14 Jun 2024 12:09:46 -0400 Subject: [PATCH 215/410] requirements: update llvmlite requirement from <0.43 to <0.44 (#2978) Updates the requirements on [llvmlite](https://github.com/numba/llvmlite) to permit the latest version. - [Release notes](https://github.com/numba/llvmlite/releases) - [Commits](https://github.com/numba/llvmlite/compare/v0.42.0...v0.43.0) --- updated-dependencies: - dependency-name: llvmlite dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index b2b6cba1c07..c4414db3837 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ graph-scheduler>=1.2.1, <1.3.0 graphviz<0.21.0 grpcio<1.65.0 leabra-psyneulink<0.3.3 -llvmlite<0.43 +llvmlite<0.44 matplotlib<3.7.6 modeci_mdf<0.5, >=0.4.3; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' networkx<3.4 From 865a34bcde0da21aeeaa306c1731bcc3d64632c0 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 16 Jun 2024 13:30:05 -0400 Subject: [PATCH 216/410] ci/ga: Drop python-3.7 macos documentation build The image no longer works with downloaded python3.7 Signed-off-by: Jan Vesely --- .github/workflows/pnl-ci-docs.yml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index 042bad3c738..fd2527fa8f5 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -48,11 +48,6 @@ jobs: - python-version: '3.7' os: macos-latest - include: - - python-version: '3.7' - os: macos-13 - pnl-version: 'head' - outputs: on_master: ${{ steps.on_master.outputs.on-branch }} From a4c1ec238dec7fadaf862efe7be3577b7d84f4d7 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 16 Jun 2024 14:20:47 -0400 Subject: [PATCH 217/410] tests/learning: Codestyle Signed-off-by: Jan Vesely --- tests/composition/test_learning.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/composition/test_learning.py b/tests/composition/test_learning.py index 69c48556e3b..77e94d70d4a 100644 --- a/tests/composition/test_learning.py +++ b/tests/composition/test_learning.py @@ -1893,6 +1893,7 @@ def test_matrix_spec_and_learning_rate(self): ('runtime+pway+comp', None, .02, .03, .04, [[0.63612349]]), ('learning_mech', .01, .02, .03, .04, [[0.63458688]]), ] + @pytest.mark.parametrize('spec_types', spec_types, ids=[x[0] for x in spec_types]) def test_different_learning_rate_specs_for_comp(self, spec_types): learning_mech_learning_rate = spec_types[1] @@ -1966,7 +1967,7 @@ def test_basic_python_back_prop(self): ('autodiff-pytorch', 'autodiff', pnl.ExecutionMode.PyTorch) ] - @ pytest.mark.pytorch + @pytest.mark.pytorch @pytest.mark.parametrize('test_vars', test_vars, ids=[x[0] for x in test_vars]) def test_backprop_fct_with_2_inputs_to_linear_combination_product(self, test_vars): test_name = test_vars[0] @@ -2028,7 +2029,7 @@ def test_backprop_fct_with_2_inputs_to_linear_combination_product(self, test_var [[0.05066789, 0.05971998]], [[0.06846757, 0.08519742]]] np.testing.assert_allclose(comp.results, expected, atol=1e-8) - @ pytest.mark.pytorch + @pytest.mark.pytorch @pytest.mark.parametrize('test_vars', test_vars, ids=[x[0] for x in test_vars]) def test_backprop_fct_with_3_inputs_to_linear_combination_product(self, test_vars): test_name = test_vars[0] @@ -2223,6 +2224,7 @@ def test_two_output_ports_on_OUTPUT_Node(self): [np.array([0.34065762, 0.40283722, 0.90991679])]] ), ] + # Indices into expected_quantities @pytest.mark.parametrize("expected_quantities", expected_quantities, # Rename L0 for test output as keyword actually = 'difference' From 8b0da14ea3b0eb9e3334ce4873cd66e2b8da2431 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 16 Jun 2024 14:23:04 -0400 Subject: [PATCH 218/410] DDM: Codestyle Signed-off-by: Jan Vesely --- .../components/mechanisms/processing/integrator/ddm.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py index c27e19fb8c5..de0f9e40642 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py @@ -822,8 +822,8 @@ def __init__(self, # v[0]=self.value[self.DECISION_VARIABLE_INDEX] # v[1]=self.parameter_ports[THRESHOLD] # v[2]=self.input_ports[0].variable - FUNCTION: lambda v: [float(v[2][0][0]), 0] \ - if (v[1] - v[0]) < (v[1] + v[0]) \ + FUNCTION: lambda v: [float(v[2][0][0]), 0] + if (v[1] - v[0]) < (v[1] + v[0]) else [0, float(v[2][0][1])] } ]) From ed67cbcfb3b013bd73e18e343cef93837390096f Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 18 Jun 2024 16:01:47 -0400 Subject: [PATCH 219/410] treewide: Qualify calls to itertools.product/combinations Improves readability. Signed-off-by: Jan Vesely --- Scripts/Models (Under Development)/bi-percepts.py | 4 ++-- .../components/functions/stateful/memoryfunctions.py | 8 ++++---- psyneulink/library/compositions/regressioncfa.py | 11 ++++++----- tests/models/test_bi_percepts.py | 7 +++---- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/Scripts/Models (Under Development)/bi-percepts.py b/Scripts/Models (Under Development)/bi-percepts.py index 7741205a9ed..1bbbb8e411f 100644 --- a/Scripts/Models (Under Development)/bi-percepts.py +++ b/Scripts/Models (Under Development)/bi-percepts.py @@ -2,9 +2,9 @@ bistable percepts """ +import itertools import numpy as np import psyneulink as pnl -from itertools import product import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt @@ -51,7 +51,7 @@ def get_node(percept, node_id): print('Forming connetions: ') # within-percept excitation for percept in ALL_PERCEPTS: - for node_i, node_j in product(node_dict[percept], node_dict[percept]): + for node_i, node_j in itertools.product(node_dict[percept], node_dict[percept]): if node_i is not node_j: print(f'\t{node_i} -> excite -> {node_j}') bp_comp.add_linear_processing_pathway( diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index 7c7f307897d..e421f55f386 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -24,10 +24,10 @@ """ import copy +import itertools import numbers import warnings from collections import deque -from itertools import combinations, product from psyneulink._typing import Callable, List, Literal @@ -1309,7 +1309,7 @@ def _validate(self, context=None): field_wts_homog = np.full(len(test_var),1).tolist() field_wts_heterog = np.full(len(test_var),range(0,len(test_var))).tolist() - for granularity, field_weights in product(['full_entry', 'per_field'],[field_wts_homog, field_wts_heterog]): + for granularity, field_weights in itertools.product(['full_entry', 'per_field'],[field_wts_homog, field_wts_heterog]): try: distance_result = self._get_distance(test_var, test_var, field_weights, granularity, context=context) except: @@ -1594,7 +1594,7 @@ def get_memory(self, cue:Union[list, np.ndarray], field_weights=None, context=No # Check for any duplicate entries in matches and, if they are not allowed, return zeros if (not self.duplicate_entries_allowed and any(self._is_duplicate(_memory[i],_memory[j], field_weights, context) - for i, j in combinations(indices_of_selected_items, 2))): + for i, j in itertools.combinations(indices_of_selected_items, 2))): warnings.warn(f"More than one entry matched cue ({cue}) in memory for {self.name} " f"{'of ' + self.owner.name if self.owner else ''} even though " f"{repr('duplicate_entries_allowed')} is False; zeros returned as retrieved item.") @@ -1860,7 +1860,7 @@ def delete_from_memory(self, existing_memory = self.parameters.previous_value._get(context) pruned_memory = copy_parameter_value(existing_memory) - for entry, memory in product(entries, existing_memory): + for entry, memory in itertools.product(entries, existing_memory): if (np.all(entry == memory) or fields and all(entry[f] == memory[f] for f in fields)): pruned_memory = np.delete(pruned_memory, pruned_memory.tolist().index(memory.tolist()), axis=0) diff --git a/psyneulink/library/compositions/regressioncfa.py b/psyneulink/library/compositions/regressioncfa.py index f8924f62854..be8cbd4dc63 100644 --- a/psyneulink/library/compositions/regressioncfa.py +++ b/psyneulink/library/compositions/regressioncfa.py @@ -74,13 +74,14 @@ --------------- """ + +import itertools import numpy as np from beartype import beartype from psyneulink._typing import Optional, Union from enum import Enum -from itertools import product from psyneulink.core.components.functions.nonstateful.learningfunctions import BayesGLM from psyneulink.core.components.ports.modulatorysignals.controlsignal import ControlSignal @@ -543,7 +544,7 @@ def error_for_too_few_terms(term): self.terms[FC] = fc = np.tensordot(f, c, axes=0) self.num[FC] = len(fc.reshape(-1)) self.num_elems[FC] = len(fc.reshape(-1)) - self.labels[FC] = list(product(self.labels[F], self.labels[C])) + self.labels[FC] = list(itertools.product(self.labels[F], self.labels[C])) # feature-feature-control interactions if any(term in specified_terms for term in [PV.FFC, PV.FFCC]): @@ -552,7 +553,7 @@ def error_for_too_few_terms(term): self.terms[FFC] = ffc = np.tensordot(ff, c, axes=0) self.num[FFC] = len(ffc.reshape(-1)) self.num_elems[FFC] = len(ffc.reshape(-1)) - self.labels[FFC] = list(product(self.labels[FF], self.labels[C])) + self.labels[FFC] = list(itertools.product(self.labels[FF], self.labels[C])) # feature-control-control interactions if any(term in specified_terms for term in [PV.FCC, PV.FFCC]): @@ -561,7 +562,7 @@ def error_for_too_few_terms(term): self.terms[FCC] = fcc = np.tensordot(f, cc, axes=0) self.num[FCC] = len(fcc.reshape(-1)) self.num_elems[FCC] = len(fcc.reshape(-1)) - self.labels[FCC] = list(product(self.labels[F], self.labels[CC])) + self.labels[FCC] = list(itertools.product(self.labels[F], self.labels[CC])) # feature-feature-control-control interactions if PV.FFCC in specified_terms: @@ -572,7 +573,7 @@ def error_for_too_few_terms(term): self.terms[FFCC] = ffcc = np.tensordot(ff, cc, axes=0) self.num[FFCC] = len(ffcc.reshape(-1)) self.num_elems[FFCC] = len(ffcc.reshape(-1)) - self.labels[FFCC] = list(product(self.labels[FF], self.labels[CC])) + self.labels[FFCC] = list(itertools.product(self.labels[FF], self.labels[CC])) # Construct "flattened" vector based on specified terms, and assign indices (as slices) i=0 diff --git a/tests/models/test_bi_percepts.py b/tests/models/test_bi_percepts.py index 0ca79273600..1d4149cbe38 100644 --- a/tests/models/test_bi_percepts.py +++ b/tests/models/test_bi_percepts.py @@ -5,11 +5,10 @@ """ - +import itertools import numpy as np import psyneulink as pnl import pytest -from itertools import product from psyneulink.core.compositions.report import ReportOutput @@ -69,7 +68,7 @@ def get_node(percept, node_id): # MODIFIED 4/4/20 OLD: PASSES IN PYTHON, BUT NEEDS RESULTS B BELOW # within-percept excitation for percept in ALL_PERCEPTS: - for node_i, node_j in product(node_dict[percept], node_dict[percept]): + for node_i, node_j in itertools.product(node_dict[percept], node_dict[percept]): if node_i is not node_j: bp_comp.add_linear_processing_pathway( pathway=(node_i, [excit_level], node_j)) @@ -100,7 +99,7 @@ def get_node(percept, node_id): # # MODIFIED 4/4/20 NEW: [PASSES ALL TESTS, BUT NEEDS RSEULTS A BELOW] # # within-percept excitation # for percept in ALL_PERCEPTS: - # for node_i, node_j in product(node_dict[percept], node_dict[percept]): + # for node_i, node_j in itertools.product(node_dict[percept], node_dict[percept]): # if node_i is not node_j: # bp_comp.add_linear_processing_pathway( # pathway=((node_i, [pnl.NodeRole.INPUT, pnl.NodeRole.OUTPUT]), [excit_level], (node_j, [pnl.NodeRole.INPUT, From 0d881b44f94dc61ee09116c67d0cc4cae22f3801 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 18 Jun 2024 15:59:36 -0400 Subject: [PATCH 220/410] tests: Use numpy prod instead of product The latter is deprecated since Numpy 1.25.0 Signed-off-by: Jan Vesely --- tests/functions/test_combination.py | 6 +++--- tests/functions/test_integrator.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/functions/test_combination.py b/tests/functions/test_combination.py index c0bd9050d32..cf83c64580e 100644 --- a/tests/functions/test_combination.py +++ b/tests/functions/test_combination.py @@ -229,7 +229,7 @@ def test_reduce_function(variable, operation, exponents, weights, scale, offset, if operation == pnl.SUM: expected = np.sum(tmp, axis=1) * scale + offset if operation == pnl.PRODUCT: - expected = np.product(tmp, axis=1) * scale + offset + expected = np.prod(tmp, axis=1) * scale + offset np.testing.assert_allclose(res, expected, rtol=1e-5, atol=1e-8) @@ -267,7 +267,7 @@ def test_linear_combination_function(variable, operation, exponents, weights, sc if operation == pnl.SUM: expected = np.sum(tmp, axis=0) * scale + offset if operation == pnl.PRODUCT: - expected = np.product(tmp, axis=0) * scale + offset + expected = np.prod(tmp, axis=0) * scale + offset np.testing.assert_allclose(res, expected, rtol=1e-5, atol=1e-8) @@ -292,7 +292,7 @@ def test_linear_combination_function_in_mechanism(operation, input, input_ports, if operation == pnl.SUM: expected = np.sum(input, axis=0) * scale + offset if operation == pnl.PRODUCT: - expected = np.product(input, axis=0) * scale + offset + expected = np.prod(input, axis=0) * scale + offset # expected is always 1d vs 2d return value res np.testing.assert_allclose(res[0], expected) diff --git a/tests/functions/test_integrator.py b/tests/functions/test_integrator.py index 42381edb0de..0577cf2f6e4 100644 --- a/tests/functions/test_integrator.py +++ b/tests/functions/test_integrator.py @@ -272,7 +272,7 @@ def spherical_drift(n_steps=3, dim=5, var=0, mean=.1): def convert_spherical_to_angular(dim, ros): ct = np.zeros(dim) ct[0] = np.cos(ros[0]) - prod = np.product([np.sin(ros[k]) for k in range(1, dim - 1)]) + prod = np.prod([np.sin(ros[k]) for k in range(1, dim - 1)]) n_prod = prod for j in range(dim - 2): n_prod /= np.sin(ros[j + 1]) From 62521a8f13779ef3a74e6f64820b6b04cc2d7607 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 15 Jun 2024 01:15:03 -0400 Subject: [PATCH 221/410] TransferMechanism, Component: Disable special setter logic when synchronizing compiled params Synchronization with compiled params should only replace the current value with a shared memory numpy array. The new numpy array should also have the same value as the currently set one. Do not propagate the new value. Propagating the value could result in changes to other parameters or overuse of shared memory arrays resulting in parameter value corruption. Add explicit checks to test with custom 'has_initializers' setting. The test currently passes in compiled mode only because scalar values do not share memory with the compiled structure so the corrupted parameters are not used in compiled execution. Using shared memory for scalar values would lead to intermittent failures depending on the order in which 'integrator_mode' and 'has_initializers' are synchronized. Signed-off-by: Jan Vesely --- psyneulink/core/components/component.py | 4 ++-- .../components/mechanisms/processing/transfermechanism.py | 7 ++++--- tests/mechanisms/test_recurrent_transfer_mechanism.py | 2 ++ 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 42207832395..6a837f5941b 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -694,11 +694,11 @@ def setter(self, value): return property(getter).setter(setter) -def _has_initializers_setter(value, owning_component=None, context=None): +def _has_initializers_setter(value, owning_component=None, context=None, *, compilation_sync=False): """ Assign has_initializers status to Component and any of its owners up the hierarchy. """ - if value: + if value and not compilation_sync: # only update owner's attribute if setting to True, because there may be # other children that have initializers try: diff --git a/psyneulink/core/components/mechanisms/processing/transfermechanism.py b/psyneulink/core/components/mechanisms/processing/transfermechanism.py index e2dedd3d303..3e7494e53a0 100644 --- a/psyneulink/core/components/mechanisms/processing/transfermechanism.py +++ b/psyneulink/core/components/mechanisms/processing/transfermechanism.py @@ -888,8 +888,8 @@ class TransferError(MechanismError): pass -def _integrator_mode_setter(value, owning_component=None, context=None): - if value: +def _integrator_mode_setter(value, owning_component=None, context=None, *, compilation_sync=False): + if value and not compilation_sync: if not owning_component.parameters.integrator_mode._get(context): # when first creating parameters, integrator_function is not # instantiated yet @@ -908,7 +908,8 @@ def _integrator_mode_setter(value, owning_component=None, context=None): elif owning_component.on_resume_integrator_mode == RESET: owning_component.reset(force=True, context=context) - owning_component.parameters.has_initializers._set(value, context) + if not compilation_sync: + owning_component.parameters.has_initializers._set(value, context) return value diff --git a/tests/mechanisms/test_recurrent_transfer_mechanism.py b/tests/mechanisms/test_recurrent_transfer_mechanism.py index 38484d1227e..469fed705a6 100644 --- a/tests/mechanisms/test_recurrent_transfer_mechanism.py +++ b/tests/mechanisms/test_recurrent_transfer_mechanism.py @@ -1135,6 +1135,8 @@ def test_reset_stateful_function_when_has_initializers_composition(self, comp_mo C.run(inputs={I1: [[1.0]], I2: [[1.0]]}, num_trials=7, execution_mode=comp_mode) np.testing.assert_allclose(exp, C.results) + assert I1.has_initializers == has_initializers1 + assert I2.has_initializers == has_initializers2 @pytest.mark.composition @pytest.mark.integrator_mechanism From d33d6c4f0cf0c3e03580a23f38394c8171bbd6bb Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 13 Jun 2024 18:16:00 -0400 Subject: [PATCH 222/410] llvm/execution: Covert compiled structures to numpy before sync to PNL params Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 35 ++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 5fe34d8f98c..2be72f73ba6 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -114,33 +114,44 @@ def _get_compilation_param(self, name, init_method, arg): "for", self._obj.name) if len(self._execution_contexts) == 1: + if name == '_state': + numpy_struct = np.ctypeslib.as_array(struct) + assert numpy_struct.nbytes == ctypes.sizeof(struct), \ + "Size mismatch, numpy: {} vs. ctypes:{}".format(numpy_struct.nbytes, ctypes.sizeof(struct)) self._copy_params_to_pnl(self._execution_contexts[0], self._obj, - self._state_struct, + numpy_struct, "llvm_state_ids") elif name == '_param': + numpy_struct = np.ctypeslib.as_array(struct) + assert numpy_struct.nbytes == ctypes.sizeof(struct), \ + "Size mismatch, numpy: {} vs. ctypes:{}".format(numpy_struct.nbytes, ctypes.sizeof(struct)) self._copy_params_to_pnl(self._execution_contexts[0], self._obj, - self._param_struct, + numpy_struct, "llvm_param_ids") return struct def _copy_params_to_pnl(self, context, component, params, ids:str): - for idx, attribute in enumerate(getattr(component, ids)): - compiled_attribute_param = getattr(params, params._fields_[idx][0]) - compiled_attribute_param_ctype = params._fields_[idx][1] + assert len(params.dtype.names) == len(getattr(component, ids)) + + for numpy_name, attribute in zip(params.dtype.names, getattr(component, ids)): + + numpy_field = params[numpy_name] + assert numpy_field.base is params or numpy_field.base is params.base def _enumerate_recurse(elements): - for element_id, element in enumerate(elements): - element_params = getattr(compiled_attribute_param, - compiled_attribute_param._fields_[element_id][0]) + for numpy_element_name, element in zip(numpy_field.dtype.names, elements): + numpy_element = numpy_field[numpy_element_name] + assert numpy_element.base is numpy_field.base + self._copy_params_to_pnl(context=context, component=element, - params=element_params, + params=numpy_element, ids=ids) # Handle custom compiled-only structures by name @@ -187,7 +198,7 @@ def _enumerate_recurse(elements): if hasattr(pnl_value, 'parameters'): self._copy_params_to_pnl(context=context, component=pnl_value, - params=compiled_attribute_param, + params=numpy_field, ids=ids) elif attribute == "input_ports" or attribute == "output_ports": @@ -197,10 +208,10 @@ def _enumerate_recurse(elements): else: # Replace empty structures with None - if ctypes.sizeof(compiled_attribute_param_ctype) == 0: + if numpy_field.nbytes == 0: value = None else: - value = np.ctypeslib.as_array(compiled_attribute_param) + value = numpy_field # Stateful parameters include history, get the most recent value if "state" in ids: From 0602c0b2c8e326a6374e5ebd54754b3ffb7025ff Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 21 Jun 2024 13:32:48 -0400 Subject: [PATCH 223/410] Fix some input tests for PEC. --- .../parameterestimationcomposition.py | 70 +++++++++++++------ 1 file changed, 49 insertions(+), 21 deletions(-) diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index dd682a61ad4..0f3e82a9031 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -234,6 +234,13 @@ class ParameterEstimationComposition(Composition): of each entry specifies a parameter to estimate, and its value is a list values to sample for that parameter. + depends_on : + A dictionary that specifies which parameters depend on a condition. The keys of the dictionary are the + specified identically to the keys of the parameters dictionary. The values are a string that specifies a + column in the data that the parameter depends on. The values of this column must be categorical. Each unique + value will represent a condition and will result in a separate parameter being estimated for it. The number of + unique values should be small because each unique value will result in a separate parameter being estimated. + outcome_variables : specifies the `OUTPUT` `Nodes ` of the `model `, the `values ` of which are used to evaluate the @@ -903,6 +910,11 @@ def run(self, *args, **kwargs): # Since we are passing fitting\optimization parameters as inputs we need add them to the inputs if inputs: + # Don't check inputs if we are within a call to evaluate_agent_rep, the inputs have already been checked and + # cached on the PEC controller. + if ContextFlags.PROCESSING not in context.flags: + self.controller.check_pec_inputs(inputs) + # Run parse input dict on the inputs, this will fill in missing input ports with default values. There # will be missing input ports because the user doesn't know about the control mechanism's input ports that # have been added by the PEC for the fitting parameters. @@ -1115,43 +1127,35 @@ def _instantiate_output_ports(self, context=None): self.parameters.output_ports._set(output_ports, context) self._create_randomization_control_signal(context) - def set_pec_inputs_cache(self, inputs_dict: dict) -> dict: - """Cache input values passed to the last call of run for the composition that this OCM controls. - This method is used by the ParamterEstimationComposition in its run() method. - If inputs_dict is of the form specified by ParemeterEstimationComposition.get_input_format() - ({model: inputs_array}, in which each item in the outer dimension of inputs_array is a trial's - worth of inputs, with one input for each of the pec_ocm.state_input_ports) then inputs_dict is - simply assigned to _pec_input_values. - If inputs_dict is formatted as the input to model (i.e., of the form model.get_input_format(), - it is refactored to the format required as input to the ParemeterEstimationComposition described above. - """ + def check_pec_inputs(self, inputs_dict: dict): model = self.composition.model + # Since we added control mechanisms to the composition, we need to make sure that we subtract off + # the number of control mechanisms from the number of state input ports in the error message. + num_state_input_ports = self.num_state_input_ports - len(self.fit_parameters) + if not inputs_dict: pass # If inputs_dict has model as its only entry, then check that its format is OK to pass to pec.run() elif len(inputs_dict) == 1 and model in inputs_dict: if not all( - len(trial) == self.num_state_input_ports for trial in inputs_dict[model] + len(trial) == num_state_input_ports for trial in inputs_dict[model] ): raise ParameterEstimationCompositionError( f"The array in the dict specified for the 'inputs' arg of " f"{self.composition.name}.run() is badly formatted: " f"the length of each item in the outer dimension (a trial's " f"worth of inputs) must be equal to the number of inputs to " - f"'{model.name}' ({self.num_state_input_ports})." + f"'{model.name}' ({num_state_input_ports})." ) else: + # Restructure inputs as nd array with each row (outer dim) a trial's worth of inputs # and each item in the row (inner dim) the input to a node (or input_port) for that trial - if len(inputs_dict) != self.num_state_input_ports: - - # Since we added control mechanisms to the composition, we need to make sure that we subtract off - # the number of control mechanisms from the number of state input ports in the error message. - num_state_input_ports = self.num_state_input_ports - len(self.fit_parameters) + if len(inputs_dict) != num_state_input_ports: raise ParameterEstimationCompositionError( f"The dict specified in the `input` arg of " @@ -1161,7 +1165,6 @@ def set_pec_inputs_cache(self, inputs_dict: dict) -> dict: ) trial_seqs = list(inputs_dict.values()) num_trials = len(trial_seqs[0]) - input_values = [[] for _ in range(num_trials)] for trial in range(num_trials): for trial_seq in trial_seqs: if len(trial_seq) != num_trials: @@ -1170,12 +1173,34 @@ def set_pec_inputs_cache(self, inputs_dict: dict) -> dict: f"ParameterEstimationMechanism.run() is badly formatted: " f"every entry must have the same number of inputs." ) - # input_values[trial].append(np.array([trial_seq[trial].tolist()])) + + def set_pec_inputs_cache(self, inputs_dict: dict) -> dict: + """Cache input values passed to the last call of run for the composition that this OCM controls. + This method is used by the ParamterEstimationComposition in its run() method. + If inputs_dict is of the form specified by ParemeterEstimationComposition.get_input_format() + ({model: inputs_array}, in which each item in the outer dimension of inputs_array is a trial's + worth of inputs, with one input for each of the pec_ocm.state_input_ports) then inputs_dict is + simply assigned to _pec_input_values. + If inputs_dict is formatted as the input to model (i.e., of the form model.get_input_format(), + it is refactored to the format required as input to the ParemeterEstimationComposition described above. + """ + + model = self.composition.model + + if not inputs_dict or (len(inputs_dict) == 1 and model in inputs_dict): + pass + else: + trial_seqs = list(inputs_dict.values()) + num_trials = len(trial_seqs[0]) + input_values = [[] for _ in range(num_trials)] + for trial in range(num_trials): + for trial_seq in trial_seqs: input_values[trial].extend(trial_seq[trial]) inputs_dict = {model: input_values} self._pec_input_values = inputs_dict + def set_parameters_in_inputs(self, parameters, inputs): """ Add the fitting parameters to the inputs passed to the model for each trial. Originally, the PEC used the @@ -1212,9 +1237,12 @@ def set_parameters_in_inputs(self, parameters, inputs): in_arr = np.atleast_3d(in_arr) # If the inputs don't have columns for the fitting parameters, then we need to add them - if in_arr.shape[1] != len(self.composition.input_ports): - num_missing = len(self.composition.input_ports) - in_arr.shape[1] + if in_arr.shape[1] != len(self.composition.input_ports): + num_missing = len(self.composition.input_ports) - in_arr.shape[1] + if in_arr.ndim == 3: in_arr = np.hstack((in_arr, np.zeros((in_arr.shape[0], num_missing, 1)))) + elif in_arr.ndim == 2: + in_arr = np.hstack((in_arr, np.zeros((in_arr.shape[0], num_missing)))) j = 0 for i, (pname, mech) in enumerate(self.fit_parameters.keys()): From af0f56661ae15dde0aeb38216966aa1f1e058026 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 21 Jun 2024 13:33:07 -0400 Subject: [PATCH 224/410] Fix some bad formatting in rich console. --- .../components/functions/nonstateful/fitfunctions.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index 6c74cf9d456..15bf42dd555 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -37,6 +37,8 @@ import warnings import logging +from rich.markup import escape + logger = logging.getLogger(__name__) __all__ = ["PECOptimizationFunction", "BadLikelihoodWarning", "PECObjectiveFuncWarning"] @@ -56,7 +58,7 @@ def get_param_str(params): """ return ", ".join( - f"{name.replace('PARAMETER_CIM_', '')}={value:.5f}" + f"[dodger_blue1]{escape(name.replace('PARAMETER_CIM_', ''))}[/dodger_blue1]=[spring_green1]{value:.5f}[/spring_green1]" for name, value in params.items() ) @@ -607,13 +609,15 @@ def objfunc_wrapper(x): f"{get_param_str(params)}, {self.obj_func_desc_str}: {obj_val}, " f"Eval-Time: {elapsed} (seconds)", style="bold red", + highlight=False, ) # Clear the warnings warns.clear() else: progress.console.print( f"{get_param_str(params)}, {self.obj_func_desc_str}: {obj_val}, " - f"Eval-Time: {elapsed} (seconds)" + f"Eval-Time: {elapsed} (seconds)", + highlight=False, ) # Certain algorithms like differential evolution evaluate the objective function multiple times per @@ -856,7 +860,7 @@ def fit_param_names(self) -> List[str]: for param_name, mech in self.owner.fit_parameters.keys(): if (param_name, mech) in self.owner.cond_levels: for level in self.owner.cond_levels[(param_name, mech)]: - names.append(f"{mech.name}.{param_name}<{level}>") + names.append(f"{mech.name}.{param_name}[{level}]") else: names.append(f"{mech.name}.{param_name}") From 3b1743974163898ca36df1da0fe681618081da79 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 21 Jun 2024 14:52:42 -0400 Subject: [PATCH 225/410] Fix PEC conditional test. The PEC conditional test was still marked skip. Now it is fixed and passing. I also lowered num_estimates on a lot of the PEC tests to make them faster. These are a low as I can go for the most part without having to change their expected test values. --- .../test_parameterestimationcomposition.py | 61 +++++++++++++++---- 1 file changed, 50 insertions(+), 11 deletions(-) diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index e6fa51b0e82..d23ef40bbf9 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -192,7 +192,7 @@ def test_parameter_optimization_ddm(func_mode, opt_method, optuna_kwargs, result # High-level parameters the impact performance of the test num_trials = 50 time_step_size = 0.01 - num_estimates = 400 + num_estimates = 300 ddm_params = dict( starting_value=0.0, @@ -289,17 +289,55 @@ def reward_rate(sim_data): if result is not None: np.testing.assert_allclose(list(pec.optimized_parameter_values.values()), result) -@pytest.mark.skip def test_parameter_estimation_ddm_cond(func_mode): + if func_mode == "Python": pytest.skip( "Test not yet implemented for Python. Parameter estimate is too slow." ) + def _run_ddm_with_params( + starting_value, + rate, + noise, + threshold, + non_decision_time, + time_step_size, + trial_inputs, + ): + """Create a composition with DDM and run it with the given parameters.""" + + # Create a simple one mechanism composition containing a DDM in integrator mode. + decision = pnl.DDM( + function=pnl.DriftDiffusionIntegrator( + starting_value=starting_value, + rate=rate, + noise=noise, + threshold=threshold, + non_decision_time=non_decision_time, + time_step_size=time_step_size, + ), + output_ports=[pnl.DECISION_OUTCOME, pnl.RESPONSE_TIME], + name="DDM", + ) + + comp = pnl.Composition(pathways=decision) + + # Run the composition to generate some data to fit + comp.run(inputs={decision: trial_inputs}) + results = comp.results + + data_to_fit = pd.DataFrame( + np.squeeze(np.array(results)), columns=["decision", "response_time"] + ) + data_to_fit["decision"] = data_to_fit["decision"].astype("category") + + return comp, data_to_fit + # High-level parameters the impact performance of the test num_trials = 50 time_step_size = 0.01 - num_estimates = 400 + num_estimates = 20 # Let's generate an "experimental" dataset to fit. This is a parameter recovery test # Lets make 10% of the trials have a positive stimulus drift rate, and the other 90% @@ -327,19 +365,19 @@ def test_parameter_estimation_ddm_cond(func_mode): # We will generate a dataset that comprises two different conditions. Each condition will have a different # threshold. params_cond1 = dict( - threshold=0.7, + threshold=0.7, ) params_cond2 = dict( - threshold=0.3, + threshold=0.3, ) comp, data_cond1 = _run_ddm_with_params(**{**ddm_params, **params_cond1}, trial_inputs=trial_inputs) _, data_cond2 = _run_ddm_with_params(**{**ddm_params, **params_cond2}, trial_inputs=trial_inputs) # Combine the data from the two conditions - data_cond1['condition'] = 'cond1' - data_cond2['condition'] = 'cond2' + data_cond1['condition'] = 'cond_t=0.7' + data_cond2['condition'] = 'cond_t=0.3' data_to_fit = pd.concat([data_cond1, data_cond2]) # Add the inputs as columns to the data temporarily so we can shuffle the data and shuffle the inputs together @@ -368,20 +406,21 @@ def test_parameter_estimation_ddm_cond(func_mode): comp.nodes['DDM'].output_ports[pnl.RESPONSE_TIME], ], data=data_to_fit, - optimization_function=PECOptimizationFunction( + optimization_function=pnl.PECOptimizationFunction( method="differential_evolution", max_iterations=1, ), num_estimates=num_estimates, initial_seed=42, ) - pec.controller.parameters.comp_execution_mode.set(func_mode) + pec.controller.parameters.comp_execution_mode.set("LLVM") pec.controller.function.parameters.save_values.set(True) pec.run(inputs={comp: trial_inputs}) + np.testing.assert_allclose( list(pec.optimized_parameter_values.values()), - [0.2227273962084888, 0.5976130662377002, 0.1227723651473831], + [0.13574824786818707, 0.04513454296326741, 0.49615574384553446, 0.8985587363124521] ) @@ -402,7 +441,7 @@ def test_parameter_estimation_ddm_mle(func_mode, likelihood_include_mask): # High-level parameters the impact performance of the test num_trials = 50 time_step_size = 0.01 - num_estimates = 1000 + num_estimates = 200 ddm_params = dict( starting_value=0.0, From 06d011f135cd07fdc3eb62c115f6c9ef46784b3d Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 21 Jun 2024 14:53:04 -0400 Subject: [PATCH 226/410] Modify the example DMM pec conditional fit. --- Scripts/Debug/ddm/ddm_cond_pec_fit.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Scripts/Debug/ddm/ddm_cond_pec_fit.py b/Scripts/Debug/ddm/ddm_cond_pec_fit.py index 315fdf8e1cb..6e48966c108 100644 --- a/Scripts/Debug/ddm/ddm_cond_pec_fit.py +++ b/Scripts/Debug/ddm/ddm_cond_pec_fit.py @@ -44,7 +44,7 @@ def _run_ddm_with_params( # High-level parameters the impact performance of the test num_trials = 50 time_step_size = 0.01 -num_estimates = 10000 +num_estimates = 1000 # Let's generate an "experimental" dataset to fit. This is a parameter recovery test # Lets make 10% of the trials have a positive stimulus drift rate, and the other 90% @@ -114,7 +114,7 @@ def _run_ddm_with_params( ], data=data_to_fit, optimization_function=pnl.PECOptimizationFunction( - method="differential_evolution", max_iterations=1, + method="differential_evolution", ), num_estimates=num_estimates, initial_seed=42, @@ -128,8 +128,8 @@ def _run_ddm_with_params( params = { 'DDM.rate': ddm_params['rate'], 'DDM.non_decision_time': ddm_params['non_decision_time'], - 'DDM.threshold': 0.3, - 'DDM.threshold': 0.7 + 'DDM.threshold[cond_t=0.3]': 0.3, + 'DDM.threshold[cond_t=0.7]': 0.7 } for i, (name, recovered_param) in enumerate(pec.optimized_parameter_values.items()): percent_error = 100.0 * (abs(params[name] - recovered_param) / params[name]) From b8da1edaddaaccda806d754b59ff36a4bf206f57 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 21 Jun 2024 15:00:18 -0400 Subject: [PATCH 227/410] CodeQL and pycodestyle fixes. --- .../Debug/stability_flexibility/stability_flexibility_cond.py | 1 - .../core/components/functions/nonstateful/fitfunctions.py | 1 - psyneulink/core/compositions/parameterestimationcomposition.py | 3 +-- 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/Scripts/Debug/stability_flexibility/stability_flexibility_cond.py b/Scripts/Debug/stability_flexibility/stability_flexibility_cond.py index 39f65cc2671..03377f5f780 100644 --- a/Scripts/Debug/stability_flexibility/stability_flexibility_cond.py +++ b/Scripts/Debug/stability_flexibility/stability_flexibility_cond.py @@ -4,7 +4,6 @@ import psyneulink as pnl import pandas as pd -import psyneulink.core.llvm as pnllvm from psyneulink.core.globals.utilities import set_global_seed sys.path.append(".") diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index 15bf42dd555..3c1a76f582c 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -1,5 +1,4 @@ import copy -import re import optuna.samplers from fastkde import fastKDE diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index 0f3e82a9031..b3f05477dbb 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -1315,7 +1315,7 @@ def _execute(self, variable=None, context=None, runtime_params=None)->np.ndarray try: alt_controller = context.composition.controller except AttributeError: - pass + alt_controller = None self.agent_rep._initialize_as_agent_rep( frozen_context, base_context=context, alt_controller=alt_controller @@ -1344,4 +1344,3 @@ def _execute(self, variable=None, context=None, runtime_params=None)->np.ndarray # Return optimal control_allocation formatted as 2d array return [defaultControlAllocation] - From 58037baf6c1d79521785acbfdf04bcf5e37b7a25 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 21 Jun 2024 19:12:58 -0400 Subject: [PATCH 228/410] llvm/execution: Cache both ctype and numpy compiled structures Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 133 ++++++++++++++++++------------ 1 file changed, 79 insertions(+), 54 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 2be72f73ba6..6dd75ae0995 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -84,8 +84,8 @@ def __init__(self): self._debug_env = debug_env def _get_compilation_param(self, name, init_method, arg): - struct = getattr(self, name, None) - if struct is None: + saved = getattr(self, name, None) + if saved is None: struct_ty = self._bin_func.byref_arg_types[arg] init_f = getattr(self._obj, init_method) if len(self._execution_contexts) > 1: @@ -100,7 +100,12 @@ def _get_compilation_param(self, name, init_method, arg): struct = struct_ty(*initializer) struct_end = time.time() - setattr(self, name, struct) + numpy_struct = np.ctypeslib.as_array(struct) + assert numpy_struct.nbytes == ctypes.sizeof(struct), \ + "Size mismatch ({}), numpy: {} vs. ctypes:{}".format(name, numpy_struct.nbytes, ctypes.sizeof(struct)) + + saved = (struct, numpy_struct) + setattr(self, name, saved) if "time_stat" in self._debug_env: print("Time to get initializer for struct:", name, @@ -116,24 +121,18 @@ def _get_compilation_param(self, name, init_method, arg): if len(self._execution_contexts) == 1: if name == '_state': - numpy_struct = np.ctypeslib.as_array(struct) - assert numpy_struct.nbytes == ctypes.sizeof(struct), \ - "Size mismatch, numpy: {} vs. ctypes:{}".format(numpy_struct.nbytes, ctypes.sizeof(struct)) self._copy_params_to_pnl(self._execution_contexts[0], self._obj, numpy_struct, "llvm_state_ids") elif name == '_param': - numpy_struct = np.ctypeslib.as_array(struct) - assert numpy_struct.nbytes == ctypes.sizeof(struct), \ - "Size mismatch, numpy: {} vs. ctypes:{}".format(numpy_struct.nbytes, ctypes.sizeof(struct)) self._copy_params_to_pnl(self._execution_contexts[0], self._obj, numpy_struct, "llvm_param_ids") - return struct + return saved def _copy_params_to_pnl(self, context, component, params, ids:str): @@ -292,8 +291,12 @@ def __get_cuda_buffer(self, struct_name): # Param struct needs to be reuploaded every time because the values # might have changed. if gpu_buffer is None or struct_name == "_param_struct": + c_struct = getattr(self, struct_name) + if isinstance(c_struct, tuple): + c_struct = c_struct[0] + # Set private attribute to a new buffer - gpu_buffer = self.upload_ctype(getattr(self, struct_name), struct_name) + gpu_buffer = self.upload_ctype(c_struct, struct_name) self._gpu_buffers[struct_name] = gpu_buffer return gpu_buffer @@ -332,12 +335,13 @@ def cuda_execute(self, variable): self._bin_func.cuda_call(self._cuda_param_struct, self._cuda_state_struct, - data_in, self._cuda_out, + data_in, + self._cuda_out, threads=len(self._execution_contexts)) # Copy the result from the device self.download_to(self._ct_vo, self._cuda_out, 'result') - self.download_to(self._state_struct, self._cuda_state_struct, 'state', move=True) + self.download_to(self._state_struct[0], self._cuda_state_struct, 'state', move=True) return _convert_ctype_to_python(self._ct_vo) @@ -389,13 +393,16 @@ def execute(self, variable): if len(self._execution_contexts) > 1: # wrap_call casts the arguments so we only need contiguous data # layout - self._bin_multirun.wrap_call(self._param_struct, - self._state_struct, - ct_vi, self._ct_vo, self._ct_len) + self._bin_multirun.wrap_call(self._param_struct[0], + self._state_struct[0], + ct_vi, + self._ct_vo, + self._ct_len) else: - self._bin_func(ctypes.byref(self._param_struct), - ctypes.byref(self._state_struct), - ct_vi, ctypes.byref(self._ct_vo)) + self._bin_func(ctypes.byref(self._param_struct[0]), + ctypes.byref(self._state_struct[0]), + ct_vi, + ctypes.byref(self._ct_vo)) return _convert_ctype_to_python(self._ct_vo) @@ -543,17 +550,17 @@ def extract_frozen_node_output(self, node): return self.extract_node_struct(node, self.__frozen_vals) def extract_node_output(self, node): - return self.extract_node_struct(node, self._data_struct) + return self.extract_node_struct(node, self._data_struct[0]) def extract_node_state(self, node): - return self.extract_node_struct(node, self._state_struct) + return self.extract_node_struct(node, self._state_struct[0]) def extract_node_params(self, node): - return self.extract_node_struct(node, self._param_struct) + return self.extract_node_struct(node, self._param_struct[0]) def insert_node_output(self, node, data): - my_field_name = self._data_struct._fields_[0][0] - my_res_struct = getattr(self._data_struct, my_field_name) + my_field_name = self._data_struct[0]._fields_[0][0] + my_res_struct = getattr(self._data_struct[0], my_field_name) index = self._composition._get_node_index(node) node_field_name = my_res_struct._fields_[index][0] setattr(my_res_struct, node_field_name, _tupleize(data)) @@ -577,7 +584,7 @@ def _get_input_struct(self, inputs): return c_input(*_tupleize(input_data)) def freeze_values(self): - self.__frozen_vals = copy.deepcopy(self._data_struct) + self.__frozen_vals = copy.deepcopy(self._data_struct[0]) def execute_node(self, node, inputs=None, context=None): # We need to reconstruct the input dictionary here if it was not provided. @@ -605,8 +612,11 @@ def execute_node(self, node, inputs=None, context=None): if node is not self._composition.input_CIM and self.__frozen_vals is None: self.freeze_values() - self._bin_func(self._state_struct, self._param_struct, - inputs, self.__frozen_vals, self._data_struct) + self._bin_func(self._state_struct[0], + self._param_struct[0], + inputs, + self.__frozen_vals, + self._data_struct[0]) if "comp_node_debug" in self._debug_env: print("RAN: {}. State: {}".format(node, self.extract_node_state(node))) @@ -634,15 +644,18 @@ def execute(self, inputs): # NOTE: Make sure that input struct generation is inlined. # We need the binary function to be setup for it to work correctly. if len(self._execution_contexts) > 1: - self._bin_exec_multi_func.wrap_call(self._state_struct, - self._param_struct, + self._bin_exec_multi_func.wrap_call(self._state_struct[0], + self._param_struct[0], self._get_input_struct(inputs), - self._data_struct, - self._conditions, self._ct_len) + self._data_struct[0], + self._conditions, + self._ct_len) else: - self._bin_exec_func(self._state_struct, self._param_struct, + self._bin_exec_func(self._state_struct[0], + self._param_struct[0], self._get_input_struct(inputs), - self._data_struct, self._conditions) + self._data_struct[0], + self._conditions) def cuda_execute(self, inputs): # NOTE: Make sure that input struct generation is inlined. @@ -655,8 +668,8 @@ def cuda_execute(self, inputs): threads=len(self._execution_contexts)) # Copy the data structs from the device - self.download_to(self._data_struct, self._cuda_data_struct, 'data', move=True) - self.download_to(self._state_struct, self._cuda_state_struct, 'state', move=True) + self.download_to(self._data_struct[0], self._cuda_data_struct, 'data', move=True) + self.download_to(self._state_struct[0], self._cuda_state_struct, 'state', move=True) # Methods used to accelerate "Run" def _get_run_input_struct(self, inputs, num_input_sets, arg=3): @@ -726,14 +739,24 @@ def run(self, inputs, runs=0, num_input_sets=0): runs_count = ctypes.c_int(runs) input_count = ctypes.c_int(num_input_sets) if len(self._execution_contexts) > 1: - self._bin_run_multi_func.wrap_call(self._state_struct, self._param_struct, - self._data_struct, inputs, outputs, - runs_count, input_count, self._ct_len) + self._bin_run_multi_func.wrap_call(self._state_struct[0], + self._param_struct[0], + self._data_struct[0], + inputs, + outputs, + runs_count, + input_count, + self._ct_len) + return _convert_ctype_to_python(outputs) else: - self._bin_run_func.wrap_call(self._state_struct, self._param_struct, - self._data_struct, inputs, outputs, - runs_count, input_count) + self._bin_run_func.wrap_call(self._state_struct[0], + self._param_struct[0], + self._data_struct[0], + inputs, + outputs, + runs_count, + input_count) # Extract only #trials elements in case the run exited early assert runs_count.value <= runs, "Composition ran more times than allowed!" @@ -773,8 +796,8 @@ def cuda_run(self, inputs, runs, num_input_sets): threads=len(self._execution_contexts)) # Copy the data struct from the device - self.download_to(self._data_struct, self._cuda_data_struct, 'data', move=True) - self.download_to(self._state_struct, self._cuda_state_struct, 'state', move=True) + self.download_to(self._data_struct[0], self._cuda_data_struct, 'data', move=True) + self.download_to(self._state_struct[0], self._cuda_state_struct, 'state', move=True) ct_out = self.download_ctype(data_out, output_type, 'result') if len(self._execution_contexts) > 1: @@ -800,9 +823,9 @@ def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results # Directly initialized structures assert ocm.agent_rep is self._composition - ct_comp_param = self._get_compilation_param('_eval_param', '_get_param_initializer', 0) - ct_comp_state = self._get_compilation_param('_eval_state', '_get_state_initializer', 1) - ct_comp_data = self._get_compilation_param('_eval_data', '_get_data_initializer', 6) + comp_params = self._get_compilation_param('_eval_param', '_get_param_initializer', 0) + comp_state = self._get_compilation_param('_eval_state', '_get_state_initializer', 1) + comp_data = self._get_compilation_param('_eval_data', '_get_data_initializer', 6) # Construct input variable, the 5th parameter of the evaluate function ct_inputs = self._get_run_input_struct(inputs, num_input_sets, 5) @@ -824,18 +847,18 @@ def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results "for", self._obj.name) # return variable as numpy array. pycuda can use it directly - return ct_comp_param, ct_comp_state, ct_comp_data, ct_inputs, out_ty, ct_num_inputs + return comp_params, comp_state, comp_data, ct_inputs, out_ty, ct_num_inputs def cuda_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:bool=False): - ct_comp_param, ct_comp_state, ct_comp_data, ct_inputs, out_ty, ct_num_inputs = \ + comp_params, comp_state, comp_data, ct_inputs, out_ty, ct_num_inputs = \ self._prepare_evaluate(inputs, num_input_sets, num_evaluations, all_results) # Output is allocated on device, but we need the ctype (out_ty). - cuda_args = (self.upload_ctype(ct_comp_param, 'params'), - self.upload_ctype(ct_comp_state, 'state'), + cuda_args = (self.upload_ctype(comp_params[0], 'params'), + self.upload_ctype(comp_state[0], 'state'), jit_engine.pycuda.driver.mem_alloc(ctypes.sizeof(out_ty)), self.upload_ctype(ct_inputs, 'input'), - self.upload_ctype(ct_comp_data, 'data'), + self.upload_ctype(comp_data[0], 'data'), self.upload_ctype(ct_num_inputs, 'input'), ) @@ -845,7 +868,7 @@ def cuda_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:boo return ct_results def thread_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:bool=False): - ct_param, ct_state, ct_data, ct_inputs, out_ty, ct_num_inputs = \ + comp_params, comp_state, comp_data, ct_inputs, out_ty, ct_num_inputs = \ self._prepare_evaluate(inputs, num_input_sets, num_evaluations, all_results) ct_results = out_ty() @@ -862,12 +885,14 @@ def thread_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:b # There are 7 arguments to evaluate_alloc_range: # comp_param, comp_state, from, to, results, input, comp_data - results = [ex.submit(self.__bin_func, ct_param, ct_state, + results = [ex.submit(self.__bin_func, + comp_params[0], + comp_state[0], int(i * evals_per_job), min((i + 1) * evals_per_job, num_evaluations), results_param, input_param, - ct_data, + comp_data[0], ct_num_inputs) for i in range(jobs)] From a763df4f781c9feeca3e8df07e1d84a36b7504f7 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 25 Jun 2024 09:59:11 -0400 Subject: [PATCH 229/410] llvm/execution: Cache numpy struct of execution conditions Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 6dd75ae0995..39132653ff9 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -291,9 +291,7 @@ def __get_cuda_buffer(self, struct_name): # Param struct needs to be reuploaded every time because the values # might have changed. if gpu_buffer is None or struct_name == "_param_struct": - c_struct = getattr(self, struct_name) - if isinstance(c_struct, tuple): - c_struct = c_struct[0] + c_struct = getattr(self, struct_name)[0] # Set private attribute to a new buffer gpu_buffer = self.upload_ctype(c_struct, struct_name) @@ -491,16 +489,17 @@ def _conditions(self): if self.__conds is None: gen = helpers.ConditionGenerator(None, self._composition) if len(self._execution_contexts) > 1: - cond_type = self._bin_func_multirun.byref_arg_types[4] * len(self._execution_contexts) + cond_ctype = self._bin_func_multirun.byref_arg_types[4] * len(self._execution_contexts) cond_initializer = (gen.get_condition_initializer() for _ in self._execution_contexts) else: - cond_type = self._bin_func.byref_arg_types[4] + cond_ctype = self._bin_func.byref_arg_types[4] cond_initializer = gen.get_condition_initializer() - self.__conds = cond_type(*cond_initializer) + c_conds = cond_ctype(*cond_initializer) + self.__conds = (c_conds, np.ctypeslib.as_array(c_conds)) if "stat" in self._debug_env: print("Instantiated condition struct ( size:" , - _pretty_size(ctypes.sizeof(cond_type)), ")", + _pretty_size(ctypes.sizeof(cond_ctype)), ")", "for", self._composition.name) return self.__conds @@ -648,14 +647,14 @@ def execute(self, inputs): self._param_struct[0], self._get_input_struct(inputs), self._data_struct[0], - self._conditions, + self._conditions[0], self._ct_len) else: self._bin_exec_func(self._state_struct[0], self._param_struct[0], self._get_input_struct(inputs), self._data_struct[0], - self._conditions) + self._conditions[0]) def cuda_execute(self, inputs): # NOTE: Make sure that input struct generation is inlined. From b5ad68242022a3a2d4528799d909b7e1f6fb61a7 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 25 Jun 2024 15:02:25 -0400 Subject: [PATCH 230/410] Quick fix for termination_measure bug in PEC. --- .../components/mechanisms/processing/transfermechanism.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/components/mechanisms/processing/transfermechanism.py b/psyneulink/core/components/mechanisms/processing/transfermechanism.py index e2dedd3d303..edfc6f9b32a 100644 --- a/psyneulink/core/components/mechanisms/processing/transfermechanism.py +++ b/psyneulink/core/components/mechanisms/processing/transfermechanism.py @@ -1598,9 +1598,13 @@ def _gen_llvm_is_finished_cond(self, ctx, builder, m_base_params, m_state, m_in) builder.call(func, [func_params, func_state, func_in, cmp_val_ptr]) - elif isinstance(self.termination_measure, TimeScale): + elif isinstance(self.termination_measure, TimeScale) or isinstance(self.termination_measure, np.ndarray) or self.termination_measure in {ts.value for ts in TimeScale}: + if isinstance(self.termination_measure, TimeScale): + measure = self.termination_measure.value + else: + measure = self.termination_measure num_executions_array_ptr = ctx.get_param_or_state_ptr(builder, self, "num_executions", state_struct_ptr=m_state) - elem_ptr = builder.gep(num_executions_array_ptr, [ctx.int32_ty(0), ctx.int32_ty(self.termination_measure.value)]) + elem_ptr = builder.gep(num_executions_array_ptr, [ctx.int32_ty(0), ctx.int32_ty(measure)]) elem_val = builder.sitofp(builder.load(elem_ptr), threshold.type) builder.store(elem_val, cmp_val_ptr) From 36e52b8ceb50d69c04d8e9aa81cd2139f27f01ee Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 25 Jun 2024 13:35:14 -0400 Subject: [PATCH 231/410] llvm/execution/cuda: Use numpy argument handlers to access compiled structures Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 49 +++++++++---------------------- 1 file changed, 14 insertions(+), 35 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 39132653ff9..b6c232d795f 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -271,56 +271,46 @@ def upload_ctype(self, data, name='other'): return jit_engine.pycuda.driver.mem_alloc(4) return jit_engine.pycuda.driver.to_device(bytes(data)) - def download_to(self, dst, source, name='other', *, move=False): - bounce = self.download_ctype(source, type(dst), name) - ctypes.memmove(ctypes.addressof(dst), ctypes.addressof(bounce), ctypes.sizeof(dst)) - if move: - for k, v in self._gpu_buffers.items(): - if v is source: - self._gpu_buffers[k] = None - def download_ctype(self, source, ty, name='other'): self._downloaded_bytes[name] += ctypes.sizeof(ty) out_buf = bytearray(ctypes.sizeof(ty)) jit_engine.pycuda.driver.memcpy_dtoh(out_buf, source) return ty.from_buffer(out_buf) - def __get_cuda_buffer(self, struct_name): + def __get_cuda_arg(self, struct_name, arg_handler): gpu_buffer = self._gpu_buffers[struct_name] - # Param struct needs to be reuploaded every time because the values - # might have changed. - if gpu_buffer is None or struct_name == "_param_struct": - c_struct = getattr(self, struct_name)[0] + np_struct = getattr(self, struct_name)[1] + if gpu_buffer is None or gpu_buffer.array is not np_struct: + + # 0-sized structures fail to upload use a small device buffer instead + gpu_buffer = arg_handler(np_struct) if np_struct.nbytes > 0 else jit_engine.pycuda.driver.mem_alloc(8) - # Set private attribute to a new buffer - gpu_buffer = self.upload_ctype(c_struct, struct_name) self._gpu_buffers[struct_name] = gpu_buffer return gpu_buffer @property def _cuda_param_struct(self): - return self.__get_cuda_buffer("_param_struct") + return self.__get_cuda_arg("_param_struct", jit_engine.pycuda.driver.In) @property def _cuda_state_struct(self): - return self.__get_cuda_buffer("_state_struct") + return self.__get_cuda_arg("_state_struct", jit_engine.pycuda.driver.InOut) @property def _cuda_data_struct(self): - return self.__get_cuda_buffer("_data_struct") + return self.__get_cuda_arg("_data_struct", jit_engine.pycuda.driver.InOut) @property def _cuda_conditions(self): - return self.__get_cuda_buffer("_conditions") + return self.__get_cuda_arg("_conditions", jit_engine.pycuda.driver.InOut) @property def _cuda_out(self): gpu_buffer = self._gpu_buffers["_out"] if gpu_buffer is None: - size = ctypes.sizeof(self._ct_vo) - gpu_buffer = jit_engine.pycuda.driver.mem_alloc(size) + gpu_buffer = jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(self._ct_vo)) self._gpu_buffers["_out"] = gpu_buffer return gpu_buffer @@ -337,9 +327,6 @@ def cuda_execute(self, variable): self._cuda_out, threads=len(self._execution_contexts)) - # Copy the result from the device - self.download_to(self._ct_vo, self._cuda_out, 'result') - self.download_to(self._state_struct[0], self._cuda_state_struct, 'state', move=True) return _convert_ctype_to_python(self._ct_vo) @@ -666,10 +653,6 @@ def cuda_execute(self, inputs): self._cuda_conditions, threads=len(self._execution_contexts)) - # Copy the data structs from the device - self.download_to(self._data_struct[0], self._cuda_data_struct, 'data', move=True) - self.download_to(self._state_struct[0], self._cuda_state_struct, 'state', move=True) - # Methods used to accelerate "Run" def _get_run_input_struct(self, inputs, num_input_sets, arg=3): # Callers that override input arg, should ensure that _bin_func is not None @@ -794,10 +777,6 @@ def cuda_run(self, inputs, runs, num_input_sets): data_in, data_out, runs_count, input_count, threads=len(self._execution_contexts)) - # Copy the data struct from the device - self.download_to(self._data_struct[0], self._cuda_data_struct, 'data', move=True) - self.download_to(self._state_struct[0], self._cuda_state_struct, 'state', move=True) - ct_out = self.download_ctype(data_out, output_type, 'result') if len(self._execution_contexts) > 1: return _convert_ctype_to_python(ct_out) @@ -853,11 +832,11 @@ def cuda_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:boo self._prepare_evaluate(inputs, num_input_sets, num_evaluations, all_results) # Output is allocated on device, but we need the ctype (out_ty). - cuda_args = (self.upload_ctype(comp_params[0], 'params'), - self.upload_ctype(comp_state[0], 'state'), + cuda_args = (jit_engine.pycuda.driver.In(comp_params[1]), + jit_engine.pycuda.driver.InOut(comp_state[1]), jit_engine.pycuda.driver.mem_alloc(ctypes.sizeof(out_ty)), self.upload_ctype(ct_inputs, 'input'), - self.upload_ctype(comp_data[0], 'data'), + jit_engine.pycuda.driver.InOut(comp_data[1]), self.upload_ctype(ct_num_inputs, 'input'), ) From bfea7751dfb66b6a61a55c2d670a1840b0a7f69d Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 25 Jun 2024 15:17:52 -0400 Subject: [PATCH 232/410] llvm/execution/cuda: Use numpy arrays and arg handlers to extract results Instead of calling download_ctype Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index b6c232d795f..ce390506843 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -271,12 +271,6 @@ def upload_ctype(self, data, name='other'): return jit_engine.pycuda.driver.mem_alloc(4) return jit_engine.pycuda.driver.to_device(bytes(data)) - def download_ctype(self, source, ty, name='other'): - self._downloaded_bytes[name] += ctypes.sizeof(ty) - out_buf = bytearray(ctypes.sizeof(ty)) - jit_engine.pycuda.driver.memcpy_dtoh(out_buf, source) - return ty.from_buffer(out_buf) - def __get_cuda_arg(self, struct_name, arg_handler): gpu_buffer = self._gpu_buffers[struct_name] @@ -758,8 +752,9 @@ def cuda_run(self, inputs, runs, num_input_sets): output_type = (self._bin_run_func.byref_arg_types[4] * runs) if len(self._execution_contexts) > 1: output_type = output_type * len(self._execution_contexts) - output_size = ctypes.sizeof(output_type) - data_out = jit_engine.pycuda.driver.mem_alloc(output_size) + + ct_out = output_type() + data_out = jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(ct_out)) # number of trials argument runs_np = np.full(len(self._execution_contexts), runs, dtype=np.int32) @@ -777,7 +772,6 @@ def cuda_run(self, inputs, runs, num_input_sets): data_in, data_out, runs_count, input_count, threads=len(self._execution_contexts)) - ct_out = self.download_ctype(data_out, output_type, 'result') if len(self._execution_contexts) > 1: return _convert_ctype_to_python(ct_out) else: @@ -831,17 +825,17 @@ def cuda_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:boo comp_params, comp_state, comp_data, ct_inputs, out_ty, ct_num_inputs = \ self._prepare_evaluate(inputs, num_input_sets, num_evaluations, all_results) - # Output is allocated on device, but we need the ctype (out_ty). + ct_results = out_ty() + cuda_args = (jit_engine.pycuda.driver.In(comp_params[1]), jit_engine.pycuda.driver.InOut(comp_state[1]), - jit_engine.pycuda.driver.mem_alloc(ctypes.sizeof(out_ty)), + jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(ct_results)), self.upload_ctype(ct_inputs, 'input'), jit_engine.pycuda.driver.InOut(comp_data[1]), self.upload_ctype(ct_num_inputs, 'input'), ) self.__bin_func.cuda_call(*cuda_args, threads=int(num_evaluations)) - ct_results = self.download_ctype(cuda_args[2], out_ty, 'result') return ct_results From 78657dca7a6fb7de8be3859a050f0af4120808d3 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 25 Jun 2024 21:48:26 -0400 Subject: [PATCH 233/410] llvm/execution/cuda: Create numpy stuructres for compiled input Use pycuda argument handler instead of upload_ctype. Remove upload_ctype. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 62 +++++++++++++------------------ 1 file changed, 25 insertions(+), 37 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index ce390506843..567e080521f 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -263,14 +263,6 @@ def _bin_func_multirun(self): # CUDA uses the same function for single and multi run return self._bin_func - def upload_ctype(self, data, name='other'): - self._uploaded_bytes[name] += ctypes.sizeof(data) - if ctypes.sizeof(data) == 0: - # 0-sized structures fail to upload - # provide a small device buffer instead - return jit_engine.pycuda.driver.mem_alloc(4) - return jit_engine.pycuda.driver.to_device(bytes(data)) - def __get_cuda_arg(self, struct_name, arg_handler): gpu_buffer = self._gpu_buffers[struct_name] @@ -548,20 +540,21 @@ def insert_node_output(self, node, data): def _get_input_struct(self, inputs): # Either node or composition execute. # All execute functions expect inputs to be 3rd param. - c_input = self._bin_func.byref_arg_types[2] + c_input_type = self._bin_func.byref_arg_types[2] # Read provided input data and parse into an array (generator) if len(self._execution_contexts) > 1: assert len(self._execution_contexts) == len(inputs) - c_input = c_input * len(self._execution_contexts) + c_input_type = c_input_type * len(self._execution_contexts) input_data = (([x] for x in self._composition._build_variable_for_input_CIM(inp)) for inp in inputs) else: input_data = ([x] for x in self._composition._build_variable_for_input_CIM(inputs)) if "stat" in self._debug_env: - print("Input struct size:", _pretty_size(ctypes.sizeof(c_input)), + print("Input struct size:", _pretty_size(ctypes.sizeof(c_input_type)), "for", self._composition.name) - return c_input(*_tupleize(input_data)) + c_input = c_input_type(*_tupleize(input_data)) + return c_input, np.ctypeslib.as_array(c_input) def freeze_values(self): self.__frozen_vals = copy.deepcopy(self._data_struct[0]) @@ -584,7 +577,7 @@ def execute_node(self, node, inputs=None, context=None): # Set bin node to make sure self._*struct works as expected self._set_bin_node(node) if inputs is not None: - inputs = self._get_input_struct(inputs) + inputs = self._get_input_struct(inputs)[0] assert inputs is not None or node is not self._composition.input_CIM @@ -626,14 +619,14 @@ def execute(self, inputs): if len(self._execution_contexts) > 1: self._bin_exec_multi_func.wrap_call(self._state_struct[0], self._param_struct[0], - self._get_input_struct(inputs), + self._get_input_struct(inputs)[0], self._data_struct[0], self._conditions[0], self._ct_len) else: self._bin_exec_func(self._state_struct[0], self._param_struct[0], - self._get_input_struct(inputs), + self._get_input_struct(inputs)[0], self._data_struct[0], self._conditions[0]) @@ -642,7 +635,7 @@ def cuda_execute(self, inputs): # We need the binary function to be setup for it to work correctly. self._bin_exec_func.cuda_call(self._cuda_state_struct, self._cuda_param_struct, - self.upload_ctype(self._get_input_struct(inputs), 'input'), + jit_engine.pycuda.driver.In(self._get_input_struct(inputs)[1]), self._cuda_data_struct, self._cuda_conditions, threads=len(self._execution_contexts)) @@ -741,12 +734,11 @@ def run(self, inputs, runs=0, num_input_sets=0): def cuda_run(self, inputs, runs, num_input_sets): # Create input buffer if isgenerator(inputs): - inputs, runs = self._get_generator_run_input_struct(inputs, runs) + ct_inputs, runs = self._get_generator_run_input_struct(inputs, runs) assert num_input_sets == 0 or num_input_sets == sys.maxsize - num_input_sets = len(inputs) + num_input_sets = len(ct_inputs) else: - inputs = self._get_run_input_struct(inputs, num_input_sets) - data_in = self.upload_ctype(inputs, 'input') + ct_inputs = self._get_run_input_struct(inputs, num_input_sets) # Create output buffer output_type = (self._bin_run_func.byref_arg_types[4] * runs) @@ -754,30 +746,26 @@ def cuda_run(self, inputs, runs, num_input_sets): output_type = output_type * len(self._execution_contexts) ct_out = output_type() - data_out = jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(ct_out)) # number of trials argument - runs_np = np.full(len(self._execution_contexts), runs, dtype=np.int32) - runs_count = jit_engine.pycuda.driver.InOut(runs_np) - self._uploaded_bytes['input'] += runs_np.nbytes - self._downloaded_bytes['input'] += runs_np.nbytes - - # input_count argument - input_count = jit_engine.pycuda.driver.In(np.int32(num_input_sets)) - self._uploaded_bytes['input'] += 4 + np_runs = np.full(len(self._execution_contexts), runs, dtype=np.int32) self._bin_run_func.cuda_call(self._cuda_state_struct, self._cuda_param_struct, self._cuda_data_struct, - data_in, data_out, runs_count, input_count, + jit_engine.pycuda.driver.In(np.ctypeslib.as_array(ct_inputs)), # input + jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(ct_out)), # output + jit_engine.pycuda.driver.InOut(np_runs), # runs + jit_engine.pycuda.driver.In(np.int32(num_input_sets)), # number of inputs threads=len(self._execution_contexts)) + assert all(np_runs <= runs), "Composition ran more times than allowed: {}".format(runs) + if len(self._execution_contexts) > 1: return _convert_ctype_to_python(ct_out) else: # Extract only #trials elements in case the run exited early - assert runs_np[0] <= runs, "Composition ran more times than allowed!" - return _convert_ctype_to_python(ct_out)[0:runs_np[0]] + return _convert_ctype_to_python(ct_out)[0:np_runs[0]] def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:bool): ocm = self._composition.controller @@ -822,17 +810,17 @@ def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results return comp_params, comp_state, comp_data, ct_inputs, out_ty, ct_num_inputs def cuda_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:bool=False): - comp_params, comp_state, comp_data, ct_inputs, out_ty, ct_num_inputs = \ + comp_params, comp_state, comp_data, ct_inputs, out_ty, _ = \ self._prepare_evaluate(inputs, num_input_sets, num_evaluations, all_results) ct_results = out_ty() cuda_args = (jit_engine.pycuda.driver.In(comp_params[1]), jit_engine.pycuda.driver.InOut(comp_state[1]), - jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(ct_results)), - self.upload_ctype(ct_inputs, 'input'), - jit_engine.pycuda.driver.InOut(comp_data[1]), - self.upload_ctype(ct_num_inputs, 'input'), + jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(ct_results)), # results + jit_engine.pycuda.driver.In(np.ctypeslib.as_array(ct_inputs)), # inputs + jit_engine.pycuda.driver.InOut(comp_data[1]), # composition data + jit_engine.pycuda.driver.In(np.int32(num_input_sets)), # number of inputs ) self.__bin_func.cuda_call(*cuda_args, threads=int(num_evaluations)) From 3cdc673ac853c9584731818c91841d20f46614d2 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 25 Jun 2024 22:19:42 -0400 Subject: [PATCH 234/410] llvm/execution/cuda: Drop upload/download counter Not used with pycuda ArgumentHandler. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 567e080521f..ea1e5ef5ec7 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -8,7 +8,6 @@ # ********************************************* Binary Execution Wrappers ************************************************************** -from collections import Counter import concurrent.futures import copy import ctypes @@ -239,24 +238,6 @@ def __init__(self, buffers=['param_struct', 'state_struct', 'out']): self._gpu_buffers = {} for b in buffers: self._gpu_buffers["_" + b] = None - self._uploaded_bytes = Counter() - self._downloaded_bytes = Counter() - - def __del__(self): - if "stat" in self._debug_env: - try: - name = self._bin_func.name - except AttributeError: - name = self._composition.name - - for k, v in self._uploaded_bytes.items(): - print("{} CUDA uploaded `{}': {}".format(name, k, _pretty_size(v))) - if len(self._uploaded_bytes) > 1: - print("{} CUDA uploaded `total': {}".format(name, _pretty_size(sum(self._uploaded_bytes.values())))) - for k, v in self._downloaded_bytes.items(): - print("{} CUDA downloaded `{}': {}".format(name, k, _pretty_size(v))) - if len(self._downloaded_bytes) > 1: - print("{} CUDA downloaded `total': {}".format(name, _pretty_size(sum(self._downloaded_bytes.values())))) @property def _bin_func_multirun(self): @@ -305,7 +286,6 @@ def cuda_execute(self, variable): # Create input argument new_var = np.asfarray(variable, dtype=self._vi_dty) data_in = jit_engine.pycuda.driver.In(new_var) - self._uploaded_bytes['input'] += new_var.nbytes self._bin_func.cuda_call(self._cuda_param_struct, self._cuda_state_struct, From 36c8105652ce0c13643fc760d66d7b6336592e6c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 27 Jun 2024 23:39:33 -0400 Subject: [PATCH 235/410] tests/debug_composition: Use fixture to preserve environment (#2987) Restores old value of PNL_LLVM_DEBUG even in case of error. Signed-off-by: Jan Vesely --- tests/llvm/test_debug_composition.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/tests/llvm/test_debug_composition.py b/tests/llvm/test_debug_composition.py index 84e981a4e7e..ffc6d83df0b 100644 --- a/tests/llvm/test_debug_composition.py +++ b/tests/llvm/test_debug_composition.py @@ -8,6 +8,23 @@ from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism from psyneulink.core.compositions.composition import Composition + +@pytest.fixture(autouse=True) +def preserve_env(): + + # Save old debug env var + old_env = os.environ.get("PNL_LLVM_DEBUG") + + yield + + # Restore old debug env var and reset the debug configuration + if old_env is None: + del os.environ["PNL_LLVM_DEBUG"] + else: + os.environ["PNL_LLVM_DEBUG"] = old_env + pnlvm.debug._update() + + debug_options = ["const_input=[[[7]]]", "const_input", "const_params", "const_data", "const_state", "stat", "time_stat", "unaligned_copy"] options_combinations = (";".join(c) for c in pytest.helpers.power_set(debug_options)) @@ -18,8 +35,6 @@ ]) @pytest.mark.parametrize("debug_env", [comb for comb in options_combinations if comb.count("const_input") < 2]) def test_debug_comp(mode, debug_env): - # save old debug env var - old_env = os.environ.get("PNL_LLVM_DEBUG") if debug_env is not None: os.environ["PNL_LLVM_DEBUG"] = debug_env pnlvm.debug._update() @@ -32,12 +47,6 @@ def test_debug_comp(mode, debug_env): inputs_dict = {A: [5]} output1 = comp.run(inputs=inputs_dict, execution_mode=mode) output2 = comp.run(inputs=inputs_dict, execution_mode=mode) - # restore old debug env var and cleanup the debug configuration - if old_env is None: - del os.environ["PNL_LLVM_DEBUG"] - else: - os.environ["PNL_LLVM_DEBUG"] = old_env - pnlvm.debug._update() assert len(comp.results) == 2 From ebe669a1782673077b7403e9625fbd62636a1085 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 1 Jul 2024 15:35:03 -0400 Subject: [PATCH 236/410] ci/ga: Require custom runners to have "enabled" flag (#2989) Signed-off-by: Jan Vesely --- .github/workflows/pnl-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index 02f2bd31748..710f2ffef43 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -24,7 +24,7 @@ jobs: # The main test job build: runs-on: ${{ (contains(vars.SELF_HOSTED, format(';{0}_{1}_{2}_{3};', matrix.os, matrix.python-version, matrix.python-architecture, matrix.extra-args)) - && fromJSON(format('[ "self-hosted","{0}", "X64" ]', matrix.os == 'ubuntu' && 'Linux' || matrix.os))) + && fromJSON(format('[ "self-hosted","{0}", "X64", "enabled" ]', matrix.os == 'ubuntu' && 'Linux' || matrix.os))) || format('{0}-latest', matrix.os) }} env: # Keep DESCRIPTION in sync with the above From a36255c76cc1636af9fd24c2fac8e156441fe569 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 2 Jul 2024 09:22:17 -0400 Subject: [PATCH 237/410] llvm, TransferFunction: Use compiled structures to determine the type of termination_measure (#2991) Do not hardcode the TimeScale value, it's available in compiled parameters. Closes: https://github.com/PrincetonUniversity/PsyNeuLink/issues/2984 Signed-off-by: Jan Vesely --- .../processing/transfermechanism.py | 76 +++++++++++-------- 1 file changed, 46 insertions(+), 30 deletions(-) diff --git a/psyneulink/core/components/mechanisms/processing/transfermechanism.py b/psyneulink/core/components/mechanisms/processing/transfermechanism.py index e2dedd3d303..f0b99937c24 100644 --- a/psyneulink/core/components/mechanisms/processing/transfermechanism.py +++ b/psyneulink/core/components/mechanisms/processing/transfermechanism.py @@ -1533,13 +1533,20 @@ def _clip_result(self, clip, current_input): return current_input def _gen_llvm_is_finished_cond(self, ctx, builder, m_base_params, m_state, m_in): - current = ctx.get_param_or_state_ptr(builder, self, "value", state_struct_ptr=m_state) m_params, builder = self._gen_llvm_param_ports_for_obj( self, m_base_params, ctx, builder, m_base_params, m_state, m_in) + threshold_ptr = ctx.get_param_or_state_ptr(builder, self, "termination_threshold", param_struct_ptr=m_params) + current_mech_value_ptr = ctx.get_param_or_state_ptr(builder, self, "value", state_struct_ptr=m_state) + measure_ptrs = ctx.get_param_or_state_ptr(builder, + self, + "termination_measure", + param_struct_ptr=m_base_params, + state_struct_ptr=m_state) if isinstance(threshold_ptr.type.pointee, pnlvm.ir.LiteralStructType): + # Threshold is not defined, return the old value of finished flag assert len(threshold_ptr.type.pointee) == 0 is_finished_ptr = ctx.get_param_or_state_ptr(builder, self, "is_finished_flag", state_struct_ptr=m_state) @@ -1548,27 +1555,34 @@ def _gen_llvm_is_finished_cond(self, ctx, builder, m_base_params, m_state, m_in) # If modulated, termination threshold is single element array. # Otherwise, it is scalar - threshold = pnlvm.helpers.load_extract_scalar_array_one(builder, - threshold_ptr) + threshold = pnlvm.helpers.load_extract_scalar_array_one(builder, threshold_ptr) + + # Extract value to compare with threshold above + cmp_val_ptr = builder.alloca(threshold.type, name="is_finished_threshold") + + is_in_params = "termination_measure" in self.llvm_param_ids + is_in_state = "termination_measure" in self.llvm_state_ids - cmp_val_ptr = builder.alloca(threshold.type, name="is_finished_value") - if self.termination_measure is max: + if not is_in_params and not is_in_state: + + # This can be any builtint function, but currently only max() is supported + assert measure_ptrs is None + assert self.termination_measure is max assert self._termination_measure_num_items_expected == 1 + # Get inside of the structure - val = builder.gep(current, [ctx.int32_ty(0), ctx.int32_ty(0)]) - first_val = builder.load(builder.gep(val, [ctx.int32_ty(0), ctx.int32_ty(0)])) + value = builder.gep(current_mech_value_ptr, [ctx.int32_ty(0), ctx.int32_ty(0)]) + first_val = builder.load(builder.gep(value, [ctx.int32_ty(0), ctx.int32_ty(0)])) builder.store(first_val, cmp_val_ptr) - with pnlvm.helpers.array_ptr_loop(builder, val, "max_loop") as (b, idx): - test_val = b.load(b.gep(val, [ctx.int32_ty(0), idx])) + with pnlvm.helpers.array_ptr_loop(builder, value, "max_loop") as (b, idx): + test_val = b.load(b.gep(value, [ctx.int32_ty(0), idx])) max_val = b.load(cmp_val_ptr) + cond = b.fcmp_ordered(">=", test_val, max_val) max_val = b.select(cond, test_val, max_val) b.store(max_val, cmp_val_ptr) - assert "termination_measure" not in self.llvm_param_ids, "'termination_measure' in {}: {}".format(self.name, pnlvm.helpers.get_param_ptr(builder, self, m_base_params, "termination_measure").type.pointee) - elif isinstance(self.termination_measure, Function): - prev_val_ptr = ctx.get_param_or_state_ptr(builder, self, "value", state_struct_ptr=m_state, history=1) - prev_val = builder.load(prev_val_ptr) + elif is_in_params and is_in_state: expected = np.empty_like([self.defaults.value[0], self.defaults.value[0]]) got = np.empty_like(self.termination_measure.defaults.variable) @@ -1576,32 +1590,34 @@ def _gen_llvm_is_finished_cond(self, ctx, builder, m_base_params, m_state, m_in) warnings.warn("Shape mismatch: Termination measure input: " "{} should be {}.".format(self.termination_measure.defaults.variable, expected.shape), pnlvm.PNLCompilerWarning) - # FIXME: HACK the distance function is not initialized + + # FIXME: HACK: the distance function is not initialized self.termination_measure.defaults.variable = expected func = ctx.import_llvm_function(self.termination_measure) - func_params, func_state = ctx.get_param_or_state_ptr(builder, - self, - "termination_measure", - param_struct_ptr=m_base_params, - state_struct_ptr=m_state) - func_in = builder.alloca(func.args[2].type.pointee, name="is_finished_func_in") + func_params, func_state = measure_ptrs + func_in = builder.alloca(func.args[2].type.pointee, name="termination_func_in") + # Populate input - func_in_current_ptr = builder.gep(func_in, [ctx.int32_ty(0), - ctx.int32_ty(0)]) - current_ptr = builder.gep(current, [ctx.int32_ty(0), ctx.int32_ty(0)]) - builder.store(builder.load(current_ptr), func_in_current_ptr) + func_in_current_ptr = builder.gep(func_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) + func_in_prev_ptr = builder.gep(func_in, [ctx.int32_ty(0), ctx.int32_ty(1)]) - func_in_prev_ptr = builder.gep(func_in, [ctx.int32_ty(0), - ctx.int32_ty(1)]) - builder.store(builder.extract_value(prev_val, 0), func_in_prev_ptr) + # Remove second dimension from 'value' and 'previous_value' + current_ptr = builder.gep(current_mech_value_ptr, [ctx.int32_ty(0), ctx.int32_ty(0)]) + prev_mech_value_ptr = ctx.get_param_or_state_ptr(builder, self, "value", state_struct_ptr=m_state, history=1) + prev_ptr = builder.gep(prev_mech_value_ptr, [ctx.int32_ty(0), ctx.int32_ty(0)]) + + builder.store(builder.load(current_ptr), func_in_current_ptr) + builder.store(builder.load(prev_ptr), func_in_prev_ptr) builder.call(func, [func_params, func_state, func_in, cmp_val_ptr]) - elif isinstance(self.termination_measure, TimeScale): + elif is_in_params and not is_in_state: + num_executions_array_ptr = ctx.get_param_or_state_ptr(builder, self, "num_executions", state_struct_ptr=m_state) - elem_ptr = builder.gep(num_executions_array_ptr, [ctx.int32_ty(0), ctx.int32_ty(self.termination_measure.value)]) - elem_val = builder.sitofp(builder.load(elem_ptr), threshold.type) + index = pnlvm.helpers.load_extract_scalar_array_one(builder, measure_ptrs) + elem_ptr = builder.gep(num_executions_array_ptr, [ctx.int32_ty(0), index]) + elem_val = builder.sitofp(builder.load(elem_ptr), cmp_val_ptr.type.pointee) builder.store(elem_val, cmp_val_ptr) else: From fb3039b8f24b7dc5d867894d40106be6dec46633 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Jul 2024 14:46:51 -0400 Subject: [PATCH 238/410] requirements: update pillow requirement from <10.4.0 to <10.5.0 (#2990) Updates the requirements on [pillow](https://github.com/python-pillow/Pillow) to permit the latest version. - [Release notes](https://github.com/python-pillow/Pillow/releases) - [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst) - [Commits](https://github.com/python-pillow/Pillow/compare/10.3.0...10.4.0) --- updated-dependencies: - dependency-name: pillow dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c4414db3837..dce6634af38 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ numpy>=1.21.0, <1.26.5 optuna<3.4.0 packaging<25.0 pandas<2.2.3 -pillow<10.4.0 +pillow<10.5.0 pint<0.22.0 protobuf<3.20.4 rich>=10.1, <10.13 From 3a80e7093b649f0696b25bed269d22eb4687de7d Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 3 Jul 2024 17:32:19 -0400 Subject: [PATCH 239/410] tests/misc: Add leak check test (#2992) Signed-off-by: Jan Vesely --- tests/misc/test_leak.py | 57 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 tests/misc/test_leak.py diff --git a/tests/misc/test_leak.py b/tests/misc/test_leak.py new file mode 100644 index 00000000000..3de3539aa69 --- /dev/null +++ b/tests/misc/test_leak.py @@ -0,0 +1,57 @@ +import gc +import numpy as np +import pytest +import weakref + +import graph_scheduler as gs +import psyneulink as pnl + + +@pytest.mark.composition +@pytest.mark.parametrize("run", ["not_run", "run"]) +def test_composition_leak(comp_mode, run): + + c = pnl.Composition() + t = pnl.TransferMechanism() + c.add_node(t) + + if run == "run": + res = c.run([5], execution_mode=comp_mode) + np.testing.assert_array_equal(res, [[5]]) + + weak_c = weakref.ref(c) + weak_t = weakref.ref(t) + + # Clear all known global references + for registry in pnl.primary_registries: + pnl.clear_registry(registry) + + pnl.core.llvm.LLVMBinaryFunction.get.cache_clear() + pnl.core.llvm.LLVMBinaryFunction.from_obj.cache_clear() + + gs.utilities.cached_hashable_graph_function.cache_clear() + + # Remove the original references + del t + del c + + gc.collect() + + def print_ref(r, depth=0): + if depth == 3: + return + + if isinstance(r, (dict, set, list, tuple)): + for r1 in gc.get_referrers(r): + print_ref(r1, depth + 1) + + if weak_t() is not None: + for r in gc.get_referrers(weak_t()): + print_ref(r) + + if weak_c() is not None: + for r in gc.get_referrers(weak_c()): + print_ref(r) + + assert weak_c() is None + assert weak_t() is None From 2029321b59da2dfedd63146db3fa5136826219de Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 2 Jul 2024 12:03:10 -0400 Subject: [PATCH 240/410] llvm: Rearrange members of the Mersenne-Twister state structure Reduce size to keep the size 8B aligned when using fp64. Use non-legacy get_state() for better control of initializer values. Codestyle. Signed-off-by: Jan Vesely --- psyneulink/core/components/component.py | 22 +++++++++--- psyneulink/core/llvm/builtins.py | 48 +++++++++++++------------ 2 files changed, 43 insertions(+), 27 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 42207832395..c3cf0fb0592 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1410,13 +1410,25 @@ def _convert(p): if p.name == 'matrix': # Flatten matrix val = tuple(np.asfarray(x).flatten()) elif isinstance(x, np.random.RandomState): - # Skip first element of random state (id string) - val = pnlvm._tupleize((*x.get_state()[1:], x.used_seed[0])) + state = x.get_state(legacy=False) + + # Keep the indices in sync with bultins.py:get_mersenne_twister_state_struct + val = pnlvm._tupleize((state['state']['key'], + state['gauss'], + state['state']['pos'], + state['has_gauss'], + x.used_seed[0])) elif isinstance(x, np.random.Generator): state = x.bit_generator.state - val = pnlvm._tupleize((state['state']['counter'], state['state']['key'], - state['buffer'], state['uinteger'], state['buffer_pos'], - state['has_uint32'], x.used_seed[0])) + + # Keep the indices in sync with bultins.py:get_philox_state_struct + val = pnlvm._tupleize((state['state']['counter'], + state['state']['key'], + state['buffer'], + state['uinteger'], + state['buffer_pos'], + state['has_uint32'], + x.used_seed[0])) elif isinstance(x, Time): val = tuple(x._get_by_time_scale(t) for t in TimeScale) elif isinstance(x, Component): diff --git a/psyneulink/core/llvm/builtins.py b/psyneulink/core/llvm/builtins.py index 59b2dae5a9b..f13d078174e 100644 --- a/psyneulink/core/llvm/builtins.py +++ b/psyneulink/core/llvm/builtins.py @@ -510,9 +510,9 @@ def _setup_mt_rand_init_scalar(ctx, state_ty): builder.store(seed_lo, a_0) # clear gauss helpers - last_g_avail = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(2)]) + last_g_avail = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(3)]) builder.store(last_g_avail.type.pointee(0), last_g_avail) - last_g = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(3)]) + last_g = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(1)]) builder.store(last_g.type.pointee(0), last_g) with helpers.for_loop(builder, @@ -532,8 +532,8 @@ def _setup_mt_rand_init_scalar(ctx, state_ty): val = b.and_(val, val.type(0xffffffff)) b.store(val, a_i) - pidx = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(1)]) - builder.store(pidx.type.pointee(_MERSENNE_N), pidx) + idx_ptr = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(2)]) + builder.store(idx_ptr.type.pointee(_MERSENNE_N), idx_ptr) seed_p = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(4)]) builder.store(seed, seed_p) builder.ret_void() @@ -641,8 +641,8 @@ def _setup_mt_rand_integer(ctx, state_ty): state, out = builder.function.args array = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(0)]) - pidx = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(1)]) - idx = builder.load(pidx) + idx_ptr = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(2)]) + idx = builder.load(idx_ptr) cond = builder.icmp_signed(">=", idx, ctx.int32_ty(_MERSENNE_N)) with builder.if_then(cond, likely=False): @@ -703,16 +703,16 @@ def _setup_mt_rand_integer(ctx, state_ty): b.store(val, pkk) - builder.store(pidx.type.pointee(0), pidx) + builder.store(idx_ptr.type.pointee(0), idx_ptr) # Get pointer and update index - idx = builder.load(pidx) - pval = builder.gep(array, [ctx.int32_ty(0), idx]) + idx = builder.load(idx_ptr) + val_ptr = builder.gep(array, [ctx.int32_ty(0), idx]) idx = builder.add(idx, idx.type(1)) - builder.store(idx, pidx) + builder.store(idx, idx_ptr) # Load and temper - val = builder.load(pval) + val = builder.load(val_ptr) tmp = builder.lshr(val, val.type(11)) val = builder.xor(val, tmp) @@ -793,15 +793,15 @@ def _setup_mt_rand_normal(ctx, state_ty, gen_float): builder = _setup_builtin_func_builder(ctx, "mt_rand_normal", (state_ty.as_pointer(), ctx.float_ty.as_pointer())) state, out = builder.function.args - p_last = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(3)]) - p_last_avail = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(2)]) - last_avail = builder.load(p_last_avail) + last_g_ptr = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(1)]) + last_g_avail_ptr = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(3)]) + last_g_avail = builder.load(last_g_avail_ptr) - cond = builder.icmp_signed("==", last_avail, ctx.int32_ty(1)) + cond = builder.icmp_signed("==", last_g_avail, last_g_avail.type(1)) with builder.if_then(cond, likely=False): - builder.store(builder.load(p_last), out) - builder.store(ctx.float_ty(0), p_last) - builder.store(p_last_avail.type.pointee(0), p_last_avail) + builder.store(builder.load(last_g_ptr), out) + builder.store(last_g_ptr.type.pointee(0), last_g_ptr) + builder.store(last_g_avail_ptr.type.pointee(0), last_g_avail_ptr) builder.ret_void() loop_block = builder.append_basic_block("gen_loop_gauss") @@ -844,18 +844,22 @@ def _setup_mt_rand_normal(ctx, state_ty, gen_float): builder.store(val, out) next_val = builder.fmul(f, x1) - builder.store(next_val, p_last) - builder.store(p_last_avail.type.pointee(1), p_last_avail) + builder.store(next_val, last_g_ptr) + builder.store(last_g_avail_ptr.type.pointee(1), last_g_avail_ptr) builder.ret_void() def get_mersenne_twister_state_struct(ctx): + assert _MERSENNE_N % 2 == 0 + + int16_ty = ir.IntType(16) + return ir.LiteralStructType([ ir.ArrayType(ctx.int32_ty, _MERSENNE_N), # array - ctx.int32_ty, # index - ctx.int32_ty, # last_gauss available ctx.float_ty, # last_gauss + int16_ty, # index + int16_ty, # last_gauss available ctx.int32_ty]) # used seed From be59f937b566c9697ad85f12297e6795a0c278ca Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 2 Jul 2024 14:38:13 -0400 Subject: [PATCH 241/410] llvm: Use int16 instead of int1 for flag in Philox state The size of the structure is 8B aligned. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/builtins.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/psyneulink/core/llvm/builtins.py b/psyneulink/core/llvm/builtins.py index f13d078174e..ede1ef10c7c 100644 --- a/psyneulink/core/llvm/builtins.py +++ b/psyneulink/core/llvm/builtins.py @@ -1090,10 +1090,11 @@ def _setup_philox_rand_int32(ctx, state_ty, gen_int64): buffered_ptr = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(3)]) has_buffered_ptr = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(5)]) has_buffered = builder.load(has_buffered_ptr) - with builder.if_then(has_buffered): + has_buffered_cond = builder.icmp_unsigned("!=", has_buffered, has_buffered.type(0)) + with builder.if_then(has_buffered_cond): buffered = builder.load(buffered_ptr) builder.store(buffered, out) - builder.store(has_buffered.type(False), has_buffered_ptr) + builder.store(has_buffered.type(0), has_buffered_ptr) builder.ret_void() @@ -1107,7 +1108,7 @@ def _setup_philox_rand_int32(ctx, state_ty, gen_int64): val_hi = builder.lshr(val, val.type(val.type.width // 2)) val_hi = builder.trunc(val_hi, buffered_ptr.type.pointee) builder.store(val_hi, buffered_ptr) - builder.store(has_buffered.type(True), has_buffered_ptr) + builder.store(has_buffered.type(1), has_buffered_ptr) builder.ret_void() @@ -2048,7 +2049,7 @@ def get_philox_state_struct(ctx): ir.ArrayType(int64_ty, _PHILOX_DEFAULT_BUFFER_SIZE), # pre-gen buffer ctx.int32_ty, # the other half of random 64 bit int int16_ty, # buffer pos - ctx.bool_ty, # has uint buffered + int16_ty, # has uint buffered int64_ty]) # seed From bf8f96a223c8d793bf8721881010667b8f33bec6 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 3 Jul 2024 11:29:32 -0400 Subject: [PATCH 242/410] llvm: Use known type instead of int32_ty if available If one of the operands in binary ops and comparisons is known and the other one is a constant use the known type instead of ctx.int32_ty to instantiate the constant. Signed-off-by: Jan Vesely --- .../control/optimizationcontrolmechanism.py | 25 +++++++++++-------- psyneulink/core/llvm/codegen.py | 6 ++--- psyneulink/core/llvm/helpers.py | 10 ++++---- 3 files changed, 22 insertions(+), 19 deletions(-) diff --git a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py index 8b526d9e538..af305dfd4e7 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/optimizationcontrolmechanism.py @@ -3379,7 +3379,8 @@ def _gen_llvm_evaluate_alloc_range_function(self, *, ctx:pnlvm.LLVMBuilderContex if "evaluate_type_objective" in tags: out_idx = idx elif "evaluate_type_all_results" in tags: - out_idx = builder.mul(idx, builder.load(num_trials_per_estimate_ptr)) + num_trials_per_estimate = builder.load(num_trials_per_estimate_ptr) + out_idx = builder.mul(idx, builder.trunc(num_trials_per_estimate, idx.type)) else: assert False, "Evaluation type not detected in tags, or unknown: {}".format(tags) @@ -3497,13 +3498,16 @@ def _gen_llvm_evaluate_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags=froz num_trials_per_estimate = builder.load(num_trials_per_estimate_ptr, "num_trials_per_estimate") # if num_trials_per_estimate is 0, run 1 trial - param_is_zero = builder.icmp_unsigned("==", num_trials_per_estimate, - ctx.int32_ty(0)) - num_sims = builder.select(param_is_zero, ctx.int32_ty(1), - num_trials_per_estimate, "corrected_trials per_estimate") + num_trials_param_is_zero = builder.icmp_unsigned("==", + num_trials_per_estimate, + num_trials_per_estimate.type(0)) + num_trials_per_estimate_fixed = builder.select(num_trials_param_is_zero, + num_trials_per_estimate.type(1), + num_trials_per_estimate, + "corrected_num_trials_per_estimate") - num_trials = builder.alloca(ctx.int32_ty, name="num_sim_trials") - builder.store(num_sims, num_trials) + num_trials = builder.alloca(sim_f.args[5].type.pointee, name="num_sim_trials") + builder.store(builder.trunc(num_trials_per_estimate_fixed, num_trials.type.pointee), num_trials) # Simulations don't store output unless we run parameter fitting if 'evaluate_type_objective' in tags: @@ -3513,14 +3517,13 @@ def _gen_llvm_evaluate_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags=froz else: assert False, "Evaluation type not detected in tags, or unknown: {}".format(tags) - builder.call(sim_f, [comp_state, comp_params, comp_data, comp_input, - comp_output, num_trials, num_inputs]) + builder.call(sim_f, [comp_state, comp_params, comp_data, comp_input, comp_output, num_trials, num_inputs]) if "evaluate_type_objective" in tags: # Extract objective mechanism value - assert self.objective_mechanism, f"objective_mechanism on OptimizationControlMechanism cannot be None " \ - f"in compiled mode" + assert self.objective_mechanism, \ + "objective_mechanism on OptimizationControlMechanism cannot be None in 'evaluate_type_objective'" obj_idx = self.agent_rep._get_node_index(self.objective_mechanism) # Mechanisms' results are stored in the first substructure diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index 0d7d2207d50..83998d151e0 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -873,7 +873,7 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): iter_ptr = builder.alloca(ctx.int32_ty, name="iter_counter") - builder.store(ctx.int32_ty(0), iter_ptr) + builder.store(iter_ptr.type.pointee(0), iter_ptr) # Start the main loop structure loop_condition = builder.append_basic_block(name="scheduling_loop_condition") @@ -964,12 +964,12 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): builder.block.name = "update_iter_count" # Increment number of iterations iters = builder.load(iter_ptr, name="iterw") - iters = builder.add(iters, ctx.int32_ty(1), name="iterw_inc") + iters = builder.add(iters, iters.type(1), name="iterw_inc") builder.store(iters, iter_ptr) max_iters = len(composition.scheduler.consideration_queue) completed_pass = builder.icmp_unsigned("==", iters, - ctx.int32_ty(max_iters), + iters.type(max_iters), name="completed_pass") # Increment pass and reset time step with builder.if_then(completed_pass): diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index d99980d9bfb..a7464fd7664 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -521,7 +521,7 @@ def bump_ts(self, builder, cond_ptr, count=(0, 0, 1)): for idx in range(len(ts.type)): if all(v == 0 for v in count[:idx]): el = builder.extract_value(ts, idx) - el = builder.add(el, self.ctx.int32_ty(count[idx])) + el = builder.add(el, el.type(count[idx])) else: el = self.ctx.int32_ty(0) ts = builder.insert_value(ts, el, idx) @@ -573,7 +573,7 @@ def generate_update_after_run(self, builder, cond_ptr, node): # Update number of runs runs = builder.extract_value(status, 0) - runs = builder.add(runs, self.ctx.int32_ty(1)) + runs = builder.add(runs, runs.type(1)) status = builder.insert_value(status, runs, 0) # Update time stamp @@ -682,7 +682,7 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, self.ctx.int32_ty(scale)]) num_execs = builder.load(target_num_execs_in_scale) - return builder.icmp_unsigned('<', num_execs, self.ctx.int32_ty(count)) + return builder.icmp_unsigned('<', num_execs, num_execs.type(count)) elif isinstance(condition, AtNCalls): target, count = condition.args @@ -691,7 +691,7 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, [self.ctx.int32_ty(0), self.ctx.int32_ty(scale)]) num_execs = builder.load(target_num_execs_in_scale) - return builder.icmp_unsigned('==', num_execs, self.ctx.int32_ty(count)) + return builder.icmp_unsigned('==', num_execs, num_execs.type(count)) elif isinstance(condition, AfterNCalls): target, count = condition.args @@ -700,7 +700,7 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, [self.ctx.int32_ty(0), self.ctx.int32_ty(scale)]) num_execs = builder.load(target_num_execs_in_scale) - return builder.icmp_unsigned('>=', num_execs, self.ctx.int32_ty(count)) + return builder.icmp_unsigned('>=', num_execs, num_execs.type(count)) elif isinstance(condition, WhenFinished): # The first argument is the target node From d73d26ae0399b7ca6b9b3c27ad93e8553ecac88d Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 3 Jul 2024 11:32:57 -0400 Subject: [PATCH 243/410] llvm: Codestyle Use zero constant if available. Use up to 120 characteers per line. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/codegen.py | 123 +++++++++++++++----------------- 1 file changed, 58 insertions(+), 65 deletions(-) diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index 83998d151e0..16eca1c8ddb 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -86,6 +86,7 @@ def is_lval(self, val): def visit_arguments(self, node): args = node.args variable = args[0] + # update register self.register[variable.arg] = self.arg_in parameters = args[1:] @@ -210,6 +211,7 @@ def visit_Attribute(self, node:ast.AST): if node.attr == "shape": shape = helpers.get_array_shape(val) return ir.ArrayType(self.ctx.float_ty, len(shape))(shape) + elif node.attr == "flatten": val = self.get_rval(val) def flatten(builder): @@ -224,13 +226,16 @@ def collect(builder, x): for i, v in enumerate(res): flat = self.builder.insert_value(flat, v, i) return flat + return flatten + elif node.attr == "astype": val = self.get_rval(val) def astype(builder, ty): def _convert(builder, x): return helpers.convert_type(builder, x, ty) return self._do_unary_op(builder, val, _convert) + return astype return val[node.attr] @@ -249,6 +254,7 @@ def visit_Assign(self, node): self._update_debug_metadata(self.var_builder, node) target = self.var_builder.alloca(value.type, name=str(t.id) + '_local_variable') self.register[t.id] = target + assert self.is_lval(target) self.builder.store(value, target) @@ -562,7 +568,9 @@ def call_builtin_np_max(self, builder, x): x = self.get_rval(x) if helpers.is_scalar(x): return x + res = self.ctx.float_ty(float("-Inf")) + def find_max(builder, x): nonlocal res # to propagate NaNs we use unordered >, @@ -572,11 +580,11 @@ def find_max(builder, x): cond = builder.and_(not_nan, greater) res = builder.select(cond, x, res) return res + self._do_unary_op(builder, x, find_max) return res - def gen_node_wrapper(ctx, composition, node, *, tags:frozenset): assert "node_wrapper" in tags func_tags = tags.difference({"node_wrapper"}) @@ -584,6 +592,7 @@ def gen_node_wrapper(ctx, composition, node, *, tags:frozenset): node_function = ctx.import_llvm_function(node, tags=func_tags) # FIXME: This is a hack is_mech = hasattr(node, 'function') + zero = ctx.int32_ty(0) data_struct_ptr = ctx.get_data_struct_type(composition).as_pointer() args = [ @@ -599,8 +608,7 @@ def gen_node_wrapper(ctx, composition, node, *, tags:frozenset): cond_ty = cond_gen.get_condition_struct_type().as_pointer() args.append(cond_ty) - builder = ctx.create_llvm_function(args, node, tags=tags, - return_type=node_function.type.pointee.return_type) + builder = ctx.create_llvm_function(args, node, tags=tags, return_type=node_function.type.pointee.return_type) llvm_func = builder.function for a in llvm_func.args: a.attributes.add('nonnull') @@ -611,19 +619,19 @@ def gen_node_wrapper(ctx, composition, node, *, tags:frozenset): # if there are incoming modulatory projections, # the input structure is shared if composition.parameter_CIM.afferents: - node_in = builder.gep(comp_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) + node_in = builder.gep(comp_in, [zero, zero]) else: node_in = comp_in incoming_projections = [] elif node is composition.parameter_CIM and node.afferents: # if parameter_CIM has afferent projections, # their values are in comp_in[1] - node_in = builder.gep(comp_in, [ctx.int32_ty(0), ctx.int32_ty(1)]) + node_in = builder.gep(comp_in, [zero, ctx.int32_ty(1)]) + # And we run no further projection incoming_projections = [] elif not is_mech: - node_in = builder.alloca(node_function.args[2].type.pointee, - name="composition_node_input") + node_in = builder.alloca(node_function.args[2].type.pointee, name="composition_node_input") incoming_projections = node.parameter_CIM.afferents if "reset" not in tags: incoming_projections += node.input_CIM.afferents @@ -631,8 +639,7 @@ def gen_node_wrapper(ctx, composition, node, *, tags:frozenset): # this path also handles parameter_CIM with no afferent # projections. 'comp_in' does not include any extra values, # and the entire call should be optimized out. - node_in = builder.alloca(node_function.args[2].type.pointee, - name="mechanism_node_input") + node_in = builder.alloca(node_function.args[2].type.pointee, name="mechanism_node_input") if {"reset", "is_finished"}.intersection(tags): incoming_projections = node.mod_afferents else: @@ -645,11 +652,8 @@ def gen_node_wrapper(ctx, composition, node, *, tags:frozenset): # Execute all incoming projections inner_projections = list(composition._inner_projections) - zero = ctx.int32_ty(0) - projections_params = helpers.get_param_ptr(builder, composition, - params, "projections") - projections_states = helpers.get_state_ptr(builder, composition, - state, "projections") + projections_params = helpers.get_param_ptr(builder, composition, params, "projections") + projections_states = helpers.get_state_ptr(builder, composition, state, "projections") for proj in incoming_projections: # Skip autoassociative projections. # Recurrent projections are executed as part of the mechanism to @@ -667,10 +671,7 @@ def gen_node_wrapper(ctx, composition, node, *, tags:frozenset): assert proj.sender in send_mech.output_ports output_port_idx = send_mech.output_ports.index(proj.sender) - proj_in = builder.gep(data_in, [ctx.int32_ty(0), - ctx.int32_ty(0), - ctx.int32_ty(send_node_idx), - ctx.int32_ty(output_port_idx)]) + proj_in = builder.gep(data_in, [zero, zero, ctx.int32_ty(send_node_idx), ctx.int32_ty(output_port_idx)]) # Get location of projection output (in mechanism's input structure) rec_port = proj.receiver @@ -723,7 +724,6 @@ def gen_node_wrapper(ctx, composition, node, *, tags:frozenset): builder.call(proj_function, [proj_params, proj_state, proj_in, proj_out]) - node_idx = ctx.int32_ty(composition._get_node_index(node)) nodes_params = helpers.get_param_ptr(builder, composition, params, "nodes") nodes_states = helpers.get_state_ptr(builder, composition, state, "nodes") @@ -742,8 +742,8 @@ def gen_node_wrapper(ctx, composition, node, *, tags:frozenset): nested_idx = ctx.int32_ty(composition._get_node_index(node) + 1) node_data = builder.gep(data_in, [zero, nested_idx]) node_cond = builder.gep(llvm_func.args[5], [zero, nested_idx]) - ret = builder.call(node_function, [node_state, node_params, node_in, - node_data, node_cond]) + ret = builder.call(node_function, [node_state, node_params, node_in, node_data, node_cond]) + # Copy output of the nested composition to its output place output_idx = node._get_node_index(node.output_CIM) result = builder.gep(node_data, [zero, zero, ctx.int32_ty(output_idx)]) @@ -764,7 +764,7 @@ def gen_node_wrapper(ctx, composition, node, *, tags:frozenset): def _gen_composition_exec_context(ctx, composition, *, tags:frozenset, suffix="", extra_args=[]): cond_gen = helpers.ConditionGenerator(ctx, composition) - name = "_".join(("wrap_exec", *tags ,composition.name + suffix)) + name = "_".join(("wrap_exec", *tags, composition.name + suffix)) args = [ctx.get_state_struct_type(composition).as_pointer(), ctx.get_param_struct_type(composition).as_pointer(), ctx.get_input_struct_type(composition).as_pointer(), @@ -816,12 +816,8 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): # Get locations of number of executions. num_exec_locs = {} for idx, node in enumerate(composition._all_nodes): - node_state = builder.gep(nodes_states, [ctx.int32_ty(0), - ctx.int32_ty(idx)]) - num_exec_locs[node] = helpers.get_state_ptr(builder, - node, - node_state, - "num_executions") + node_state = builder.gep(nodes_states, [ctx.int32_ty(0), ctx.int32_ty(idx)]) + num_exec_locs[node] = helpers.get_state_ptr(builder, node, node_state, "num_executions") # Generate pointers to 'is_finished' callbacks is_finished_callbacks = {} @@ -836,33 +832,34 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): # executed above for time_loc in num_exec_locs.values(): for scale in (TimeScale.TRIAL, TimeScale.PASS, TimeScale.TIME_STEP): - num_exec_time_ptr = builder.gep(time_loc, [ctx.int32_ty(0), - ctx.int32_ty(scale.value)]) + num_exec_time_ptr = builder.gep(time_loc, [ctx.int32_ty(0), ctx.int32_ty(scale.value)]) builder.store(num_exec_time_ptr.type.pointee(0), num_exec_time_ptr) # Check if there's anything to reset for node in composition._all_nodes: - when = getattr(node, "reset_stateful_function_when", Never()) # FIXME: This should not be necessary. The code gets DCE'd, # but there are still some problems with generation # 'reset' function if node is composition.controller: continue - reinit_cond = cond_gen.generate_sched_condition( - builder, when, cond, node, is_finished_callbacks, num_exec_locs, nodes_states) + reinit_cond = cond_gen.generate_sched_condition(builder, + getattr(node, "reset_stateful_function_when", Never()), + cond, + node, + is_finished_callbacks, + num_exec_locs, + nodes_states) with builder.if_then(reinit_cond): node_w = ctx.get_node_wrapper(composition, node) node_reinit_f = ctx.import_llvm_function(node_w, tags=node_tags.union({"reset"})) builder.call(node_reinit_f, [state, params, comp_in, data, data]) # Run controller if it's enabled in 'BEFORE' mode - if simulation is False and composition.enable_controller and \ - composition.controller_mode == BEFORE: + if simulation is False and composition.enable_controller and composition.controller_mode == BEFORE: assert composition.controller is not None controller_w = ctx.get_node_wrapper(composition, composition.controller) - controller_f = ctx.import_llvm_function(controller_w, - tags=node_tags) + controller_f = ctx.import_llvm_function(controller_w, tags=node_tags) builder.call(controller_f, [state, params, comp_in, data, data]) @@ -882,9 +879,13 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): # Generate a while not 'end condition' loop builder.position_at_end(loop_condition) - trial_term_cond = cond_gen.generate_sched_condition( - builder, composition.termination_processing[TimeScale.TRIAL], - cond, None, is_finished_callbacks, num_exec_locs, nodes_states) + trial_term_cond = cond_gen.generate_sched_condition(builder, + composition.termination_processing[TimeScale.TRIAL], + cond, + None, + is_finished_callbacks, + num_exec_locs, + nodes_states) trial_cond = builder.not_(trial_term_cond, name="not_trial_term_cond") loop_body = builder.append_basic_block(name="scheduling_loop_body") @@ -900,15 +901,16 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): any_cond = ctx.bool_ty(0) # Calculate execution set before running the mechanisms for idx, node in enumerate(composition.nodes): - run_set_node_ptr = builder.gep(run_set_ptr, - [zero, ctx.int32_ty(idx)], - name="run_cond_ptr_" + node.name) - node_cond = cond_gen.generate_sched_condition( - builder, composition._get_processing_condition_set(node), - cond, node, is_finished_callbacks, num_exec_locs, nodes_states) + run_set_node_ptr = builder.gep(run_set_ptr, [zero, ctx.int32_ty(idx)], name="run_cond_ptr_" + node.name) + node_cond = cond_gen.generate_sched_condition(builder, + composition._get_processing_condition_set(node), + cond, + node, + is_finished_callbacks, + num_exec_locs, + nodes_states) ran = cond_gen.generate_ran_this_pass(builder, cond, node) - node_cond = builder.and_(node_cond, builder.not_(ran), - name="run_cond_" + node.name) + node_cond = builder.and_(node_cond, builder.not_(ran), name="run_cond_" + node.name) any_cond = builder.or_(any_cond, node_cond, name="any_ran_cond") builder.store(node_cond, run_set_node_ptr) @@ -919,15 +921,13 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): ran_prev_step = builder.extract_value(previous_step, [idx]) time_loc = num_exec_locs[node] with builder.if_then(ran_prev_step): - num_exec_time_ptr = builder.gep(time_loc, [ctx.int32_ty(0), - ctx.int32_ty(TimeScale.TIME_STEP.value)]) + num_exec_time_ptr = builder.gep(time_loc, [zero, ctx.int32_ty(TimeScale.TIME_STEP.value)]) builder.store(num_exec_time_ptr.type.pointee(0), num_exec_time_ptr) for idx, node in enumerate(composition.nodes): run_set_node_ptr = builder.gep(run_set_ptr, [zero, ctx.int32_ty(idx)]) - node_cond = builder.load(run_set_node_ptr, - name="node_" + node.name + "_should_run") + node_cond = builder.load(run_set_node_ptr, name="node_" + node.name + "_should_run") with builder.if_then(node_cond): node_w = ctx.get_node_wrapper(composition, node) node_f = ctx.import_llvm_function(node_w, tags=node_tags) @@ -947,11 +947,8 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): run_set_node_ptr = builder.gep(run_set_ptr, [zero, ctx.int32_ty(idx)]) node_cond = builder.load(run_set_node_ptr, name="node_" + node.name + "_ran") with builder.if_then(node_cond): - out_ptr = builder.gep(output_storage, [zero, zero, - ctx.int32_ty(idx)], - name="result_ptr_" + node.name) - data_ptr = builder.gep(data, [zero, zero, ctx.int32_ty(idx)], - name="data_result_" + node.name) + out_ptr = builder.gep(output_storage, [zero, zero, ctx.int32_ty(idx)], name="result_ptr_" + node.name) + data_ptr = builder.gep(data, [zero, zero, ctx.int32_ty(idx)], name="data_result_" + node.name) builder.store(builder.load(out_ptr), data_ptr) # Update step counter @@ -968,9 +965,7 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): builder.store(iters, iter_ptr) max_iters = len(composition.scheduler.consideration_queue) - completed_pass = builder.icmp_unsigned("==", iters, - iters.type(max_iters), - name="completed_pass") + completed_pass = builder.icmp_unsigned("==", iters, iters.type(max_iters), name="completed_pass") # Increment pass and reset time step with builder.if_then(completed_pass): builder.block.name = "inc_pass" @@ -979,8 +974,7 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): cond_gen.bump_ts(builder, cond, (0, 1, 0)) # Reset internal PASS clock for each node for time_loc in num_exec_locs.values(): - num_exec_time_ptr = builder.gep(time_loc, [ctx.int32_ty(0), - ctx.int32_ty(TimeScale.PASS.value)]) + num_exec_time_ptr = builder.gep(time_loc, [zero, ctx.int32_ty(TimeScale.PASS.value)]) builder.store(num_exec_time_ptr.type.pointee(0), num_exec_time_ptr) builder.branch(loop_condition) @@ -1108,9 +1102,8 @@ def gen_composition_run(ctx, composition, *, tags:frozenset): if not simulation or "simulation_results" in tags: # Extract output_CIM result - idx = composition._get_node_index(composition.output_CIM) - result_ptr = builder.gep(data, [ctx.int32_ty(0), ctx.int32_ty(0), - ctx.int32_ty(idx)]) + node_idx = composition._get_node_index(composition.output_CIM) + result_ptr = builder.gep(data, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(node_idx)]) output_ptr = builder.gep(data_out, [iters]) result = builder.load(result_ptr) builder.store(result, output_ptr) From 56b555abca388b7a246ba57363db1723ca9bdfa1 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 6 Jul 2024 12:55:06 -0400 Subject: [PATCH 244/410] llvm/execution: Remove byref calls Not needed. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index ea1e5ef5ec7..0922977beff 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -350,10 +350,10 @@ def execute(self, variable): self._ct_vo, self._ct_len) else: - self._bin_func(ctypes.byref(self._param_struct[0]), - ctypes.byref(self._state_struct[0]), + self._bin_func(self._param_struct[0], + self._state_struct[0], ct_vi, - ctypes.byref(self._ct_vo)) + self._ct_vo) return _convert_ctype_to_python(self._ct_vo) @@ -820,7 +820,7 @@ def thread_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:b # Create input and result typed casts once, they are the same # for every submitted job. - input_param = ctypes.cast(ctypes.byref(ct_inputs), self.__bin_func.c_func.argtypes[5]) + input_param = ctypes.cast(ct_inputs, self.__bin_func.c_func.argtypes[5]) results_param = ctypes.cast(ct_results, self.__bin_func.c_func.argtypes[4]) # There are 7 arguments to evaluate_alloc_range: From f00e10fe08dc0a2c3a640607127d01dcfdf484cc Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 6 Jul 2024 13:11:37 -0400 Subject: [PATCH 245/410] llvm/execution: Don't use wrap_call to execute "run" The input struct needs casting because it's wrapped in an extra dimension of execution contexts. Use unsigned integers for run count and input count. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 41 +++++++++++++++++-------------- 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 0922977beff..62583b29abf 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -350,10 +350,7 @@ def execute(self, variable): self._ct_vo, self._ct_len) else: - self._bin_func(self._param_struct[0], - self._state_struct[0], - ct_vi, - self._ct_vo) + self._bin_func(self._param_struct[0], self._state_struct[0], ct_vi, self._ct_vo) return _convert_ctype_to_python(self._ct_vo) @@ -626,18 +623,20 @@ def _get_run_input_struct(self, inputs, num_input_sets, arg=3): bin_f = self._bin_run_func if arg == 3 else self._bin_func input_type = bin_f.byref_arg_types[arg] - c_input = (input_type * num_input_sets) * len(self._execution_contexts) + c_input_type = (input_type * num_input_sets) * len(self._execution_contexts) if len(self._execution_contexts) == 1: inputs = [inputs] assert len(inputs) == len(self._execution_contexts) # Extract input for each trial and execution id run_inputs = ((([x] for x in self._composition._build_variable_for_input_CIM({k:v[i] for k,v in inp.items()})) for i in range(num_input_sets)) for inp in inputs) - c_inputs = c_input(*_tupleize(run_inputs)) + c_inputs = c_input_type(*_tupleize(run_inputs)) if "stat" in self._debug_env: print("Instantiated struct: input ( size:" , - _pretty_size(ctypes.sizeof(c_inputs)), ")", - "for", self._obj.name) + _pretty_size(ctypes.sizeof(c_inputs)), + ")", + "for", + self._obj.name) return c_inputs @@ -648,8 +647,8 @@ def _get_generator_run_input_struct(self, inputs, runs): run_inputs = _tupleize(run_inputs) num_input_sets = len(run_inputs) runs = num_input_sets if runs == 0 or runs == sys.maxsize else runs - c_input = self._bin_run_func.byref_arg_types[3] * num_input_sets - return c_input(*run_inputs), runs + c_input_type = self._bin_run_func.byref_arg_types[3] * num_input_sets + return c_input_type(*run_inputs), runs @property def _bin_run_func(self): @@ -677,6 +676,7 @@ def run(self, inputs, runs=0, num_input_sets=0): ct_vo = self._bin_run_func.byref_arg_types[4] * runs if len(self._execution_contexts) > 1: ct_vo = ct_vo * len(self._execution_contexts) + outputs = ct_vo() if "stat" in self._debug_env: @@ -685,8 +685,8 @@ def run(self, inputs, runs=0, num_input_sets=0): print("Output struct size:", _pretty_size(ctypes.sizeof(outputs)), "for", self._composition.name) - runs_count = ctypes.c_int(runs) - input_count = ctypes.c_int(num_input_sets) + runs_count = ctypes.c_uint(runs) + input_count = ctypes.c_uint(num_input_sets) if len(self._execution_contexts) > 1: self._bin_run_multi_func.wrap_call(self._state_struct[0], self._param_struct[0], @@ -699,13 +699,16 @@ def run(self, inputs, runs=0, num_input_sets=0): return _convert_ctype_to_python(outputs) else: - self._bin_run_func.wrap_call(self._state_struct[0], - self._param_struct[0], - self._data_struct[0], - inputs, - outputs, - runs_count, - input_count) + # This is only needed for non-generator inputs that are wrapped in an extra context dimension + inputs = ctypes.cast(inputs, self._bin_run_func.c_func.argtypes[3]) + + self._bin_run_func(self._state_struct[0], + self._param_struct[0], + self._data_struct[0], + inputs, + outputs, + runs_count, + input_count) # Extract only #trials elements in case the run exited early assert runs_count.value <= runs, "Composition ran more times than allowed!" From 1a0ee140805c1163bf399707a7cfeb626bed5e1d Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 8 Jul 2024 10:09:48 -0400 Subject: [PATCH 246/410] Fix for multiple cond params --- psyneulink/core/compositions/parameterestimationcomposition.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index b3f05477dbb..35db1a2af81 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -685,7 +685,7 @@ def _validate_data(self): f"values in this column.") # Get a separate copy of the dataframe with conditional columns - self.cond_data = self.data[self.depends_on.values()].copy() + self.cond_data = self.data[list(set(self.depends_on.values()))].copy() # For each value in depends_on, get the unique levels of the column. This will determine the number of # of conditional parameters that need to be estimated for that parameter. From e580fb21998d79bdca244619eea8146d2746a36f Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 9 Jul 2024 12:59:00 -0400 Subject: [PATCH 247/410] llvm: Fix processing of enum values after compilation sync (#2996) Enums are represented as 0-d integer arrays and the Parameter type changes to 0-d integer array after compilation sync. Make sure the new value type can be consumed by the follow-up compiled, or Python, execution. Closes: https://github.com/PrincetonUniversity/PsyNeuLink/issues/2984 Signed-off-by: Jan Vesely --- .../processing/transfermechanism.py | 7 ++-- psyneulink/core/llvm/builder_context.py | 7 +++- tests/composition/test_composition.py | 35 +++++++++++++++++++ 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/psyneulink/core/components/mechanisms/processing/transfermechanism.py b/psyneulink/core/components/mechanisms/processing/transfermechanism.py index e0f2d97959f..61268e3d066 100644 --- a/psyneulink/core/components/mechanisms/processing/transfermechanism.py +++ b/psyneulink/core/components/mechanisms/processing/transfermechanism.py @@ -1793,13 +1793,14 @@ def is_finished(self, context=None): # return True return self.parameters.is_finished_flag._get(context) - assert self.parameters.value.history_min_length + 1 >= self._termination_measure_num_items_expected,\ - "History of 'value' is not guaranteed enough entries for termination_mesasure" + assert self.parameters.value.history_min_length + 1 >= self._termination_measure_num_items_expected, \ + "History of 'value' is not guaranteed enough entries for termination_measure" + measure = self.termination_measure value = self.parameters.value._get(context) if self._termination_measure_num_items_expected==0: - status = self.parameters.num_executions._get(context)._get_by_time_scale(self.termination_measure) + status = self.parameters.num_executions._get(context)._get_by_time_scale(TimeScale(self.termination_measure)) elif self._termination_measure_num_items_expected==1: # Squeeze to collapse 2d array with single item diff --git a/psyneulink/core/llvm/builder_context.py b/psyneulink/core/llvm/builder_context.py index a2e07c6b09b..8b0e90e7a05 100644 --- a/psyneulink/core/llvm/builder_context.py +++ b/psyneulink/core/llvm/builder_context.py @@ -370,7 +370,7 @@ def check_used_params(self, component, *, tags:frozenset): # has_initializers is only used in "reset" variants initializers.add('has_initializers') - # 'termination_mesasure" is only used in "is_finished" variant + # 'termination_measure" is only used in "is_finished" variant used_param_ids.add('termination_measure') used_state_ids.add('termination_measure') @@ -531,6 +531,11 @@ def convert_python_struct_to_llvm_ir(self, t): # Python 'int' is handled above as it is the default type for '0' return ir.IntType(t.nbytes * 8) elif isinstance(t, np.ndarray): + # 0d uint32 values were likely created from enums (above) and are + # observed here after compilation sync. + # Avoid silent promotion to float (via Python's builtin int-type) + if t.ndim == 0 and t.dtype == np.uint32: + return self.convert_python_struct_to_llvm_ir(t.reshape(1)[0]) return self.convert_python_struct_to_llvm_ir(t.tolist()) elif isinstance(t, np.random.RandomState): return pnlvm.builtins.get_mersenne_twister_state_struct(self) diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 61e0f8a241f..c8e73527b92 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -4319,6 +4319,7 @@ def test_multiple_runs_with_parameter_change(self, comp_mode): # assign array with len 2 to float, must recompile A.function.intercept.base = [3, 3] self._check_comp_ex(comp, None, comp_mode, struct_name) + # vectorized intercept not supported in LLVM modes A.function.intercept.base = 3 @@ -4416,6 +4417,40 @@ def test_multiple_runs_with_parameter_change_from_data_struct(self, comp_mode): self._check_comp_ex(comp, None, comp_mode, struct_name, is_not=True) self._check_comp_ex(comp, orig_comp_ex, comp_mode, struct_name, is_not=True) + @pytest.mark.composition + @pytest.mark.usefixtures("comp_mode_no_llvm") + @pytest.mark.parametrize("comp_mode2", [m for m in pytest.helpers.get_comp_execution_modes() if m.values[0] is not pnl.ExecutionMode.LLVM]) + def test_execution_after_cleanup_enum_param(self, comp_mode, comp_mode2): + """ + This test checks that compiled sync works for Parameters with Enum values. + Enums are converted to 0-d numpy arrays of tyep integer and the synced value + should be correctly consumed by the following execution, both Python and compiled + """ + + T = pnl.TransferMechanism(integrator_mode=True, + termination_measure=pnl.TimeScale.TRIAL, + termination_threshold=5, + execute_until_finished=True) + P = pnl.ProcessingMechanism() + + C = pnl.Composition() + C.add_linear_processing_pathway([T, P]) + + C.scheduler.add_condition(P, pnl.WhenFinished(T)) + + ctx = pnl.Context() + + res = C.run([5], execution_mode=comp_mode, context=ctx) + np.testing.assert_allclose(res, [[4.84375]]) + + # Cleanup is really only necessary if the first execution is compiled, + # but it's really cheap if it there's no compilation context + C._compilation_data.execution.set(None, context=ctx) + pnl.core.llvm.cleanup() + + res2 = C.run([5], execution_mode=comp_mode2, context=ctx) + np.testing.assert_allclose(res2, [[4.995117]]) + class TestCallBeforeAfterTimescale: From 26bccfd7f961047f99986d72409234df1f07983c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 9 Jul 2024 14:32:51 -0400 Subject: [PATCH 248/410] llvm/cleanup: Run GC only if needed Function tests should be able to free used memory using reference counts alone. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/__init__.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 69fbea15429..2e60cafcb95 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -11,10 +11,12 @@ import ctypes import enum import functools +import gc import numpy as np import time from math import ceil, log2 from psyneulink._typing import Set +import weakref from llvmlite import ir @@ -273,10 +275,11 @@ def cleanup(check_leaks:bool=False): LLVMBuilderContext.clear_global() # check that WeakKeyDictionary is not keeping any references - import gc - gc.collect() - c = list(old_context._cache.keys()) + # Try first without calling the GC + c = weakref.WeakSet(old_context._cache.keys()) + if len(c) > 0: + gc.collect() - assert len(c) == 0, c + assert len(c) == 0, list(c) else: LLVMBuilderContext.clear_global() From 4062c4a75e400d994889221609d3ec1303d24145 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 4 Jul 2024 23:28:11 -0400 Subject: [PATCH 249/410] tests/leak: Remove leftover debug code Signed-off-by: Jan Vesely --- tests/misc/test_leak.py | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/tests/misc/test_leak.py b/tests/misc/test_leak.py index 3de3539aa69..04f05da187e 100644 --- a/tests/misc/test_leak.py +++ b/tests/misc/test_leak.py @@ -37,21 +37,5 @@ def test_composition_leak(comp_mode, run): gc.collect() - def print_ref(r, depth=0): - if depth == 3: - return - - if isinstance(r, (dict, set, list, tuple)): - for r1 in gc.get_referrers(r): - print_ref(r1, depth + 1) - - if weak_t() is not None: - for r in gc.get_referrers(weak_t()): - print_ref(r) - - if weak_c() is not None: - for r in gc.get_referrers(weak_c()): - print_ref(r) - - assert weak_c() is None - assert weak_t() is None + assert weak_c() is None, gc.get_referrers(weak_c()) + assert weak_t() is None, gc.get_referrers(weak_t()) From 5aee5dddb941c36512bbe51ea4788436fd136287 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 9 Jul 2024 14:47:35 -0400 Subject: [PATCH 250/410] Mechanism: Drop beartype decorator on internal function Leaks reference to Mechanism instance. Signed-off-by: Jan Vesely --- psyneulink/core/components/mechanisms/mechanism.py | 1 - 1 file changed, 1 deletion(-) diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index f65e1c495ec..fed6c4a6327 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -3501,7 +3501,6 @@ def mech_cell(): return f'' + \ mech_name + mech_roles + mech_condition + mech_function + mech_value + '' - @beartype def port_table(port_list: ContentAddressableList, port_type: Union[Type[InputPort], Type[ParameterPort], Type[OutputPort]]): """Return html with table for each port in port_list, including functions and/or values as specified From 79d997a62610efe26d1ad10bfe49f44112eb1083 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 4 Jul 2024 23:41:03 -0400 Subject: [PATCH 251/410] tests/leak: Test show_graph in the leak test Signed-off-by: Jan Vesely --- tests/misc/test_leak.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tests/misc/test_leak.py b/tests/misc/test_leak.py index 04f05da187e..18dbba88965 100644 --- a/tests/misc/test_leak.py +++ b/tests/misc/test_leak.py @@ -6,19 +6,28 @@ import graph_scheduler as gs import psyneulink as pnl +show_graph_args = ["show_all", "show_node_structure", "show_cim", "show_learning", "show_types", "show_dimensions", + "show_projection_labels", "show_projections_not_in_composition"] @pytest.mark.composition -@pytest.mark.parametrize("run", ["not_run", "run"]) -def test_composition_leak(comp_mode, run): +@pytest.mark.parametrize("show_graph_args", [pytest.param(None, id="show_graph_disabled"), + pytest.param({}, id="show_graph_default"), + *(pytest.param({arg: True}, id=arg) for arg in show_graph_args), + ]) +@pytest.mark.parametrize("op", ["construct", "run"]) +def test_composition_leak(comp_mode, op, show_graph_args): c = pnl.Composition() t = pnl.TransferMechanism() c.add_node(t) - if run == "run": + if op == "run": res = c.run([5], execution_mode=comp_mode) np.testing.assert_array_equal(res, [[5]]) + if show_graph_args is not None: + c.show_graph(**show_graph_args, output_fmt=None) + weak_c = weakref.ref(c) weak_t = weakref.ref(t) From 8899ddb7bc055eed7b2ae59c7ca83670ee5bdeed Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 10 Jul 2024 20:28:50 -0400 Subject: [PATCH 252/410] tests/DDM: Skip running ExecutionMode.LLVM instead of xfail Signed-off-by: Jan Vesely --- tests/mechanisms/test_ddm_mechanism.py | 44 ++++++++++---------------- 1 file changed, 16 insertions(+), 28 deletions(-) diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index be779240a20..5959fd96fd4 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -715,16 +715,12 @@ def test_DDM_threshold_modulation_integrator(comp_mode): (10.0, 10.0, [[10.0], [29.0]]), (100.0, 100.0, [[100.0], [76.0]]), ]) +# 3/5/2021 - DDM' default behaviour now requires resetting stateful +# functions after each trial. This is not supported in LLVM execution mode. +# See: https://github.com/PrincetonUniversity/PsyNeuLink/issues/1935 +@pytest.mark.usefixtures("comp_mode_no_llvm") def test_ddm_is_finished(comp_mode, noise, threshold, expected_results): - # 3/5/2021 - DDM' default behaviour now requires resetting stateful - # functions after each trial. This is not supported in LLVM execution mode. - # See: https://github.com/PrincetonUniversity/PsyNeuLink/issues/1935 - if comp_mode == pnl.ExecutionMode.LLVM: - pytest.xfail(reason="DDM' default behaviour now requires resetting stateful functions after each trial. " - "This is not supported in LLVM execution mode. " - "See: https://github.com/PrincetonUniversity/PsyNeuLink/issues/1935") - comp = Composition() ddm = DDM(function=DriftDiffusionIntegrator(threshold=threshold, noise=np.sqrt(noise), time_step_size=1.0), execute_until_finished=True) @@ -737,18 +733,14 @@ def test_ddm_is_finished(comp_mode, noise, threshold, expected_results): @pytest.mark.composition @pytest.mark.parametrize("until_finished", ["until_finished", "not_until_finished"]) @pytest.mark.parametrize("threshold_mod", ["threshold_modulated", "threshold_not_modulated"]) +# 3/5/2021 - DDM' default behaviour now requires resetting stateful +# functions after each trial. This is not supported in LLVM execution mode. +# See: https://github.com/PrincetonUniversity/PsyNeuLink/issues/1935 +# Moreover, evaluating scheduler conditions in Python is not supported +# for compiled execution +@pytest.mark.usefixtures("comp_mode_no_llvm") def test_ddm_is_finished_with_dependency(comp_mode, until_finished, threshold_mod): - # 3/5/2021 - DDM' default behaviour now requires resetting stateful - # functions after each trial. This is not supported in LLVM execution mode. - # See: https://github.com/PrincetonUniversity/PsyNeuLink/issues/1935 - # Moreover, evaluating scheduler conditions in Python is not supported - # for compiled execution - if comp_mode == pnl.ExecutionMode.LLVM: - pytest.xfail(reason="DDM' default behaviour now requires resetting stateful functions after each trial. " - "This is not supported in LLVM execution mode. " - "See: https://github.com/PrincetonUniversity/PsyNeuLink/issues/1935") - comp = Composition() ddm = DDM(function=DriftDiffusionIntegrator(), # Use only the decision variable in this test @@ -831,18 +823,14 @@ def test_sequence_of_DDM_mechs_in_Composition_Pathway(): @pytest.mark.composition @pytest.mark.ddm_mechanism +# 3/5/2021 - DDM' default behaviour now requires resetting stateful +# functions after each trial. This is not supported in LLVM execution mode. +# See: https://github.com/PrincetonUniversity/PsyNeuLink/issues/1935 +@pytest.mark.usefixtures("comp_mode_no_llvm") def test_DDMMechanism_LCA_equivalent(comp_mode): - # 3/5/2021 - DDM' default behaviour now requires resetting stateful - # functions after each trial. This is not supported in LLVM execution mode. - # See: https://github.com/PrincetonUniversity/PsyNeuLink/issues/1935 - if comp_mode == pnl.ExecutionMode.LLVM: - pytest.xfail(reason="DDM' default behaviour now requires resetting stateful functions after each trial. " - "This is not supported in LLVM execution mode. " - "See: https://github.com/PrincetonUniversity/PsyNeuLink/issues/1935") - - - ddm = DDM(default_variable=[0], function=DriftDiffusionIntegrator(rate=1, time_step_size=0.1), + ddm = DDM(default_variable=[0], + function=DriftDiffusionIntegrator(rate=1, time_step_size=0.1), execute_until_finished=False) comp2 = Composition() comp2.add_node(ddm) From daef387ac08bab82c5dee8b4cbecf75d02f8bf18 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 10 Jul 2024 20:38:08 -0400 Subject: [PATCH 253/410] tests/OutputPort: Clarify failure in xfail test Signed-off-by: Jan Vesely --- tests/ports/test_output_ports.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ports/test_output_ports.py b/tests/ports/test_output_ports.py index a7c9bb2101e..c330035353c 100644 --- a/tests/ports/test_output_ports.py +++ b/tests/ports/test_output_ports.py @@ -37,7 +37,7 @@ def test_output_port_variable_spec(self, mech_mode): [((pnl.OWNER_VALUE, 0), [1], [1]), ((pnl.OWNER_VALUE, 1), [2], [2]), ((pnl.OWNER_VALUE, 2), [3], [3]), - pytest.param((pnl.OWNER_VALUE, 3), [3], [3], marks=[pytest.mark.xfail()]), + pytest.param((pnl.OWNER_VALUE, 3), [3], [3], marks=[pytest.mark.xfail(raises=IndexError, match="list index out of range")]), ((pnl.OWNER_EXECUTION_COUNT), [4], [8]), (("num_executions", pnl.TimeScale.LIFE), [4], [8]), (("num_executions", pnl.TimeScale.RUN), [4], [4]), From e5feb288cb695d686c1a8f8734c42735f8eaa36c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 11 Jul 2024 13:32:33 -0400 Subject: [PATCH 254/410] llvm: Track and clear active CompExecution instances (#2999) These instances contain invalid function pointers after llvm cleanup. Track them, and clean them from owning compositions on cleanup. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/__init__.py | 17 +++++++++++++++++ psyneulink/core/llvm/execution.py | 8 ++++++++ tests/composition/test_composition.py | 1 - 3 files changed, 25 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 2e60cafcb95..bd931413e9b 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -12,6 +12,7 @@ import enum import functools import gc +import inspect import numpy as np import time from math import ceil, log2 @@ -283,3 +284,19 @@ def cleanup(check_leaks:bool=False): assert len(c) == 0, list(c) else: LLVMBuilderContext.clear_global() + + # If not checking for leaks, there might be active compositions that + # cache pointers to binary functions. Accessing those pointers would + # cause segfault. + # Extract the set of associated compositions. Both to avoid duplicate + # clears for executions that belong to the same composition, and to + # avoid modifying the container that is iterated over. + for c in {e._composition for e in CompExecution.active_executions}: + c._compilation_data.execution.values.clear() + c._compilation_data.execution.history.clear() + + # The set of active executions should be empty + for e in CompExecution.active_executions: + assert any(inspect.isframe(r) for r in gc.get_referrers(e)) + + CompExecution.active_executions.clear() diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 62583b29abf..86f79bd0f3c 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -17,6 +17,7 @@ import sys import time from typing import Callable, Optional +import weakref from psyneulink.core import llvm as pnlvm @@ -369,6 +370,8 @@ def execute(self, variable): class CompExecution(CUDAExecution): + active_executions = weakref.WeakSet() + def __init__(self, composition, execution_ids=[None], *, additional_tags=frozenset()): super().__init__(buffers=['state_struct', 'param_struct', 'data_struct', 'conditions']) self._composition = composition @@ -388,6 +391,11 @@ def __init__(self, composition, execution_ids=[None], *, additional_tags=frozens if len(execution_ids) > 1: self._ct_len = ctypes.c_int(len(execution_ids)) + self.active_executions.add(self) + + def __del__(self): + self.active_executions.discard(self) + @staticmethod def get(composition, context, additional_tags=frozenset()): executions = composition._compilation_data.execution._get(context) diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index c8e73527b92..81c13b223a2 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -4445,7 +4445,6 @@ def test_execution_after_cleanup_enum_param(self, comp_mode, comp_mode2): # Cleanup is really only necessary if the first execution is compiled, # but it's really cheap if it there's no compilation context - C._compilation_data.execution.set(None, context=ctx) pnl.core.llvm.cleanup() res2 = C.run([5], execution_mode=comp_mode2, context=ctx) From 75a90a278676ad4561aa54927f4fc9af04449d44 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 11 Jul 2024 20:05:38 -0400 Subject: [PATCH 255/410] requirements: update grpcio requirement from <1.65.0 to <1.66.0 (#3000) Updates the requirements on [grpcio](https://github.com/grpc/grpc) to permit the latest version. - [Release notes](https://github.com/grpc/grpc/releases) - [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md) - [Commits](https://github.com/grpc/grpc/compare/v1.64.0...v1.65.0) --- updated-dependencies: - dependency-name: grpcio dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index dce6634af38..dc485bfd0e2 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ dill<0.3.9 fastkde>=1.0.24, <1.0.31 graph-scheduler>=1.2.1, <1.3.0 graphviz<0.21.0 -grpcio<1.65.0 +grpcio<1.66.0 leabra-psyneulink<0.3.3 llvmlite<0.44 matplotlib<3.7.6 From 98430ee50a90fa3d907d10ea5ff448d7854a7e15 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 Jul 2024 20:32:40 -0400 Subject: [PATCH 256/410] requirements: update pytest requirement from <8.2.3 to <8.3.2 (#3004) Updates the requirements on [pytest](https://github.com/pytest-dev/pytest) to permit the latest version. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/1.0.0b3...8.3.1) --- updated-dependencies: - dependency-name: pytest dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index 3b0c7416cfe..645d3b8eee6 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,6 +1,6 @@ jupyter<1.0.1 packaging<25.0 -pytest<8.2.3 +pytest<8.3.2 pytest-benchmark<4.0.1 pytest-cov<5.0.1 pytest-forked<1.7.0 From f4ffbdf68dcea439ced58ae91b5495754ee8057d Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 23 Jul 2024 15:17:56 -0400 Subject: [PATCH 257/410] Cleanup warning check in PEC tests. --- .../test_parameterestimationcomposition.py | 21 +++---------------- 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index d23ef40bbf9..8a1d8ab586f 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -260,35 +260,20 @@ def reward_rate(sim_data): # If we are testing an instantiated optuna sampler, make sure the warning is generated about # random seeds if isinstance(opt_method, optuna.samplers.RandomSampler): - with pytest.warns(UserWarning) as record: + with pytest.warns(UserWarning, match="initial_seed on PEC is not None, but instantiated optuna sampler is being used."): pec.run(inputs=inputs_dict) - # Search through the warnings to make sure the one we are looking for is there - found_warning = False - for warning in record: - if "initial_seed on PEC is not None, but instantiated optuna sampler is being used." in str(warning.message): - found_warning = True - - if not found_warning: - raise AssertionError("Did not find warning about random seed") elif isinstance(opt_method, type) and issubclass(opt_method, optuna.samplers.BaseSampler): - with pytest.warns(UserWarning) as record: + with pytest.warns(UserWarning, match="Overriding seed passed to optuna sampler with seed passed to PEC."): pec.run(inputs=inputs_dict) - # Search through the warnings to make sure the one we are looking for is there - found_warning = False - for warning in record: - if "Overriding seed passed to optuna sampler with seed passed to PEC." in str(warning.message): - found_warning = True - - if not found_warning: - raise AssertionError("Did not find warning about overriding seed passed") else: pec.run(inputs={comp: trial_inputs}) if result is not None: np.testing.assert_allclose(list(pec.optimized_parameter_values.values()), result) + def test_parameter_estimation_ddm_cond(func_mode): if func_mode == "Python": From 60cccbc0c521a0f0e5a9a009929dc1366e313a3f Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 23 Jul 2024 16:18:06 -0400 Subject: [PATCH 258/410] llvm/execution/cuda: Always use pycuda ArgumentHandler as cached buffer (#3010) Only the ArgumentHandler class provides the .array member that is used to check if a gpu_buffer is not stale. Fixes: 36e52b8ceb50d69c04d8e9aa81cd2139f27f01ee ("llvm/execution/cuda: Use numpy argument handlers to access compiled structures") Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 86f79bd0f3c..0d05164887d 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -249,10 +249,12 @@ def __get_cuda_arg(self, struct_name, arg_handler): gpu_buffer = self._gpu_buffers[struct_name] np_struct = getattr(self, struct_name)[1] + + # .array is a public member of pycuda's In/Out ArgumentHandler classes if gpu_buffer is None or gpu_buffer.array is not np_struct: - # 0-sized structures fail to upload use a small device buffer instead - gpu_buffer = arg_handler(np_struct) if np_struct.nbytes > 0 else jit_engine.pycuda.driver.mem_alloc(8) + # 0-sized structures fail to upload use a dummy numpy array isntead + gpu_buffer = arg_handler(np_struct if np_struct.nbytes > 0 else np.zeros(2)) self._gpu_buffers[struct_name] = gpu_buffer From 5f5359c790db7692db0330175d536eba4d6ac17c Mon Sep 17 00:00:00 2001 From: kmantel <1592123+kmantel@users.noreply.github.com> Date: Tue, 23 Jul 2024 20:55:53 -0400 Subject: [PATCH 259/410] docs: rename figure to match docstring reference (#3009) case mismatch, in section _CompositionInterfaceMechanism_Structure of compositioninterfacemechanism.py caused image to not be displayed on documentation page --- docs/source/_static/{CIM_FIgure.svg => CIM_figure.svg} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/source/_static/{CIM_FIgure.svg => CIM_figure.svg} (100%) diff --git a/docs/source/_static/CIM_FIgure.svg b/docs/source/_static/CIM_figure.svg similarity index 100% rename from docs/source/_static/CIM_FIgure.svg rename to docs/source/_static/CIM_figure.svg From 7933df97e66655e15fc4b9ada7afbcbae1fec300 Mon Sep 17 00:00:00 2001 From: jdcpni Date: Thu, 25 Jul 2024 11:22:04 -0400 Subject: [PATCH 260/410] Refactor/autodiff and emcomposition (#3011) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • processingmechanism.py: - _instantiate_output_ports(): if no OutputPorts are specified, and variable has more than one item, assign one OutputPort to each • Tests for multiple output_ports with learning need to be added • test_learning.py - TestAutodiffMultipleOutput_port: - add test_parallel_inputs_to_output_ports_converge_internal - add test_single_input_to_multiple_output_ports_converge_internal - add test_single_input_to_multiple_output_ports_converge_on_OUTPUT_Node • composition.py - _create_backpropagation_learning_pathway(): - raise error for terminal node with > 1 output_ports _instantiate_input_dict(): refactor to format user-specified inputs _instantiate_input_dict(): - NodeRoles: add BIAS - _determine_node_roles: assign NodeRole.BIAS • test_composition.py: add test_input_shape_errors • processingmechanism.py: docstring description of configuration • test_processing_mechanism.py - test_processing_mechanism_multiple_input_ports: add tests for specifying names and/or variables for output_ports • autodiffcomposition.py infer_backpropagation_learning_pathways(): - only exclude ModulatoryProjections or ones that don't have a 'learnable' attribute • emcomposition.py - execute(): - autodiff execution commented out - execute(): use autodiff - replace key_input_node(s) with query_input_node(s) • pytorchcomponents.py - pytorch_function_creator: - implement support for EMStorage - fix bug in softmax - remove pytorch_function_creator • learningfunctions.py: EMStorage: minor fixes • pytorchEMcompositionwrapper.py: - subclass PytorchCompositionWrapper to handle functionality of EMStorage - store_memory(): implementing • EMstoragemechanism.py - store_memory(): implement get_memory • test_composition.py add test_BIAS • pytorchcomponents.py • function.py - Function: add _get_pytorch_fct_param_value() • transferfunctions.py, combinationfunctions.py, learningfunctions.py: - add _gen_pytorch_fct() • autodiffcomposition.py, emcomposition.py - move assignment of pytorch_composition_wrapper_type to __init__() * • test_autodiffcomposition.py, test_emcomposition.py - add tests for execution without torch installed • test_emcomposition.py - modified test_simple_execution_without_learning() to use ragged entries in memory - add test_multiple_trials_concatenation_and_storage_node_with_learning - def test_multiple_trials_concatenation_and_storage_node(): consolidates test_multiple_trials_concatenation_and_storage_node_no_learning and test_multiple_trials_concatenation_and_storage_node_with_learning • EMStorageMechanism.py - _function(): fix bug in normalization before selecting weakest entry • pytorchemcomposition.py: - _store_memory(): fix bug in normalization before selecting weakest entry • test_empcomposition.py: - convert test_execution_data to use asymmetric fields - add testing of learn() method for use_storage_node=True and concatentate_keys=False • emcomposition.py - _parse_fields(): fix bug in which key names were messed up for non-contiguous keys in input nodes - _parse_fields(): fix bug in assignment of key_indices • pytorchwrappers.py - PytorchCompositionWrapper.__init__(): - use wrapper_type to wrap composition - forward(): handle nodes with _custom_execution attribute - PytorchMechanismWrapper: - assign _custom_execution if mechamism has _custom_autodiff_execution attribute - PytorchMechanismWrapper and PytorchProjectionWrapper: - add current_value default_value attribute and assign it either to initializer (if that has been specified else defaults.value) - add _curr_sender_value attribute - collate_afferents: - assign _curr_sender_value and handle None by assigning proj_wrapper.default_value • emcomposition.py - assign _store_memory to storage_node.custom_autodiff_execution • pytorchwrappers.py & pytorchEMcompositionwrapper.py: - refactored to simply custom_execution handling • autodiffcomposition.py - rename _infer_input_nodes -> _get_autodiff_input_vaues - rename _infer_output_nodes -> _get_autodiff_target_vaues - _get_autodiff_target_vaues(): check if TARGET Node has afferent projections and, if so, find and use source of INPUT • pytorchwrappers.py - document exclude_from_autodiff attribute • projection.py - rename _exclude_from_autodiff -> exclude_in_autodiff (to be consistent with argument to constructor) • showgraph.py - refactor to use (overridable) method to get nodes of a specified Composition (to support filitering of nodes by subclasses, such as PyTorchShowGraph) • showgraph.py - refactor to use (overridable) methods (_get_nodes and _get_projections) to get nodes and projections of a specified Composition (to support filitering of nodes and projections by subclasses, such as PyTorchShowGraph) • pytorchshowgraph.py - implement overrides of above methods to show PyTorch-relevant graph (flattenned over nested comps) - _implement_graph(): augmented to apply relevant style for ModulatoryProjections from noodes that are excluded from gradient calculation • showgraph.py and pytorchshowgraph.py: - _proj_in_composition(): implement so it can be overridden to include projections in pytorch_rep.processing_graph (such as direct projections from nodes in outer comp to ones in nested comp • composition.py _add_projection() - fix bug in which learnable parameter was not being passed to proj_spec for Projection from nested to outer comp _instantiate_projection_from_spec() - replaced call to _instantiate_projection_from_spec using params=parms to **params • autodiffcomposition.py and emcomposition.py: interim commit on refactoring of enable_learning to filter target nodes not used for learning • composition.py - get_nested_nodes_by_roles_at_any_level(): refactor to support local definition of TARGET • emcomposition.py: - _assign_target_nodes(): restric OUTPUT nodes to only those retrieval nodes enabled for learning (used by autodiff.infer_backpropagation_pathways()to determine TARGET nodes - _construct_retrieved_nodes(): return nodes in order of self.field_names - input_nodes also ordered by self.field_names • compositionrunner.py - add batch info as arg to call_after_minibatch • autodiffcomposition.py - autodiff_training(): adding support for updating of PNL node values after autodiff forward pass - try values of pytorch nodes during forward pass • computionsition.py, autodiffcomposition.py compositionrunner.py - asserts for debugging • computionsition.py, autodiffcomposition.py compositionrunner.py - learn() (and methods it calls): add synchronize_pnl_values and optimizations_per_minibatch parameters • pytorchwrappers.py __int__(), execute(): adding handling of mechanism integration_function • pytorchwrappers.py PytorchMechanismWrapper.execute(): add support for mechanisms with integration_function • integratorfunctions.py AdaptiveIntegrator: add _gen_pytorch_fct • transferfunctions.py TanH: fix bug in _gen_pytorch_fct (that was returning logistic not tanh) • emcomposition.py add normalize_field_weights arg • combinationfunctions.py Concatenate: add _get_pytorch_fct to • EGO Model (sim 2) - CSW with Learning.py: refactor script to consolidate parameter definitions • autodiffcomposition.py - add device as arg and Parameter • transferfunctions.py: SoftMax: threshold -> mask_threshold • emcomposition.py: __init__(): check if softmax_gain is set to CONTROL and, if so, set Parameters.softmax_gain.modulable=False • utilities.py - get_torch_tensor(): implemented but not yet deployed • transferfunctions.py -SoftMax: fix masking for pytorch version --------- Co-authored-by: jdcpni --- .../EGO/Declan Pytorch Integrator Module | 55 ++ ...odel (sim 1) - MDP using EMComposition.py} | 61 +-- ...m 2) - CSW using EMComposition (BACKUP).py | 433 +++++++++++++++ ...im 2) - CSW using EMComposition with WM.py | 425 +++++++++++++++ ...Model (sim 2) - CSW using EMComposition.py | 433 +++++++++++++++ ...m 2) - CSW with Integrator and Learning.py | 406 ++++++++++++++ .../EGO Model (sim 2) - CSW with Learning.py | 506 ++++++++++++++++++ .../EGO/Environment.py | 55 ++ psyneulink/core/components/component.py | 6 +- .../core/components/functions/function.py | 14 +- .../nonstateful/combinationfunctions.py | 7 + .../nonstateful/learningfunctions.py | 2 +- .../nonstateful/transferfunctions.py | 264 ++++++++- .../functions/stateful/integratorfunctions.py | 14 +- .../core/components/projections/projection.py | 8 +- psyneulink/core/compositions/composition.py | 64 ++- psyneulink/core/compositions/showgraph.py | 261 +++++---- psyneulink/core/globals/keywords.py | 23 +- psyneulink/core/globals/utilities.py | 119 ++-- psyneulink/core/llvm/builder_context.py | 10 +- .../modulatory/learning/EMstoragemechanism.py | 8 +- .../compositions/autodiffcomposition.py | 323 +++++++---- .../library/compositions/compositionrunner.py | 38 +- .../library/compositions/emcomposition.py | 421 +++++++++++---- .../pytorchEMcompositionwrapper.py | 11 +- .../library/compositions/pytorchshowgraph.py | 184 +++++++ .../library/compositions/pytorchwrappers.py | 198 +++++-- tests/composition/test_emcomposition.py | 10 +- tests/composition/test_show_graph.py | 4 +- 29 files changed, 3827 insertions(+), 536 deletions(-) create mode 100644 Scripts/Models (Under Development)/EGO/Declan Pytorch Integrator Module rename Scripts/Models (Under Development)/EGO/{EGO Model - MDP using EMComposition.py => EGO Model (sim 1) - MDP using EMComposition.py} (91%) create mode 100644 Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition (BACKUP).py create mode 100644 Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition with WM.py create mode 100644 Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition.py create mode 100644 Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Integrator and Learning.py create mode 100644 Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Learning.py create mode 100644 Scripts/Models (Under Development)/EGO/Environment.py create mode 100644 psyneulink/library/compositions/pytorchshowgraph.py diff --git a/Scripts/Models (Under Development)/EGO/Declan Pytorch Integrator Module b/Scripts/Models (Under Development)/EGO/Declan Pytorch Integrator Module new file mode 100644 index 00000000000..f02acac788b --- /dev/null +++ b/Scripts/Models (Under Development)/EGO/Declan Pytorch Integrator Module @@ -0,0 +1,55 @@ +""" Model preparation functions """ +def prep_recurrent_network(rnet, state_d, persistence=-0.6): + with torch.no_grad(): + rnet.state_to_hidden.weight.copy_(torch.eye(state_d, dtype=torch.float)) + rnet.state_to_hidden.bias.zero_() + rnet.hidden_to_hidden.weight.zero_() + rnet.hidden_to_hidden.bias.zero_() + rnet.state_to_hidden_wt.weight.zero_() + rnet.state_to_hidden_wt.bias.copy_(torch.ones((len(rnet.state_to_hidden_wt.bias),), dtype=torch.float) * persistence) + rnet.hidden_to_hidden_wt.weight.zero_() + rnet.hidden_to_hidden_wt.bias.zero_() + # Set hidden to context weights as an identity matrix. + rnet.hidden_to_context.weight.copy_(torch.eye(state_d, dtype=torch.float)) + rnet.hidden_to_context.bias.zero_() + + # Set requires_grad to True for hidden_to_context.weight before freezing other parameters + rnet.hidden_to_context.weight.requires_grad = True + rnet.hidden_to_context.bias.requires_grad = True + + # Freeze recurrent weights to stabilize training + for name, p in rnet.named_parameters(): + if 'hidden_to_context' not in name: + p.requires_grad = False + else: + p.requires_grad = True + return rnet + +class RecurrentContextModule(nn.Module): + """ + An Recurrent Neural Network module based on an architecture similar to the minimally gated recurrent unit. + """ + + def __init__(self, n_inputs, n_hidden, n_outputs) -> None: + super().__init__() + self.state_to_hidden = nn.Linear(n_inputs,n_hidden) + self.hidden_to_hidden = nn.Linear(n_hidden,n_hidden) + self.state_to_hidden_wt = nn.Linear(n_inputs,n_hidden) + self.hidden_to_hidden_wt = nn.Linear(n_hidden,n_hidden) + self.hidden_to_context = nn.Linear(n_hidden,n_outputs) + + self.n_hidden_units = n_hidden + self.hidden_state = torch.zeros((self.n_hidden_units,),dtype=torch.float) + self.update_hidden_state = True + + self.hidden_to_hidden_wt.weight.requires_grad = True + self.hidden_to_hidden_wt.bias.requires_grad = True + + def forward(self, x: torch.tensor) -> torch.tensor: + h_prev = self.hidden_state + h_update = torch.tanh(self.state_to_hidden(x)+self.hidden_to_hidden(h_prev)) + h_weight = torch.sigmoid(self.state_to_hidden_wt(x)+self.hidden_to_hidden_wt(h_prev)) + h_new = h_weight*h_prev + (1-h_weight)*h_update + if self.update_hidden_state: + self.hidden_state = h_new.detach().clone() + return self.hidden_to_context(h_new) \ No newline at end of file diff --git a/Scripts/Models (Under Development)/EGO/EGO Model - MDP using EMComposition.py b/Scripts/Models (Under Development)/EGO/EGO Model (sim 1) - MDP using EMComposition.py similarity index 91% rename from Scripts/Models (Under Development)/EGO/EGO Model - MDP using EMComposition.py rename to Scripts/Models (Under Development)/EGO/EGO Model (sim 1) - MDP using EMComposition.py index 2adcfb008f3..3420f1191d8 100644 --- a/Scripts/Models (Under Development)/EGO/EGO Model - MDP using EMComposition.py +++ b/Scripts/Models (Under Development)/EGO/EGO Model (sim 1) - MDP using EMComposition.py @@ -33,17 +33,14 @@ (under "Construction parameters") * `train_network `: - takes as arguments the feedforward neural network Composition (FFN_COMPOSITION) and number of epochs to train. - Note: learning_rate is set at construction (can specify using LEARNING_RATE under "Training parameters" below). + ... * `run_model `: - takes as arguments the drift rate in the temporal context vector to be applied on each trial, - and the number of trials to execute, as well as reporting and animation specifications - (see "Execution parameters"). + ... * `analyze_results `: takes as arguments the results of executing the model, and optionally a number of trials and EGO_level to analyze; - returns d-prime statistics and plots results for different conditions at each EGO_level executed. + returns... **The Model** @@ -53,32 +50,16 @@ .. _EGO_Fig: -.. figure:: _static/N-Back_Model_movie.gif +.. figure:: _static/` `Composition`. - -.. _EGO_ffn_composition: - -*FFN Composition* -~~~~~~~~~~~~~~~~~ - -The temporal context is provided by a randomly drifting high dimensional vector that maintains a constant norm (i.e., -drifts on a sphere). The FFN is trained, given an n-back level of *n*, to identify when the current stimulus matches -one stored in EM with a temporal context vector that differs by an amount corresponding to *n* time steps of drift. -During n-back performance, the model encodes the current stimulus and temporal context, retrieves an item from EM -that matches the current stimulus, weighted by the similarity of its temporal context vector (i.e., most recent), and -then uses the FFN to evaluate whether it is an n-back match. The model responds "match" if the FFN detects a match; -otherwise, it either uses the current stimulus and temporal context to retrieve another sample from EM and repeat the -evaluation or, with a fixed probability (hazard rate), it responds "non-match". - -The ffn Composition is trained using the train_network() method +This is comprised of... three input Mechanisms, and the nested `ffn ` `Composition`. **Construction and Execution** @@ -100,9 +81,8 @@ *Stimuli* ~~~~~~~~~ -Sequences of stimuli are constructed either using `SweetPea `_ -(using the script in stim/SweetPea) or replicate those used in the study by `Kane et al., -2007 `_ (from stimulus files in stim/Kane_et_al). +Sequences of stimuli are constructed either using `SweetPea `_ +(using the script in stim/SweetPea) or replicate those used in... .. note:: Use of SweetPea for stimulus generation requires it be installed:: @@ -151,10 +131,11 @@ ) RUN_MODEL = True # True => run the model # RUN_MODEL = False # False => don't run the model -# EXECUTION_MODE = ExecutionMode.Python -EXECUTION_MODE = ExecutionMode.PyTorch +EXECUTION_MODE = ExecutionMode.Python +# EXECUTION_MODE = ExecutionMode.PyTorch ANALYZE_RESULTS = False # True => output analysis of results of run -REPORT_OUTPUT = ReportOutput.FULL # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] +# REPORT_OUTPUT = ReportOutput.FULL # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] +REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run PRINT_RESULTS = False # print model.results after execution ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution @@ -484,7 +465,7 @@ def construct_model(model_name:str=MODEL_NAME, context_weight *= context_integration_rate # ---------------------------------------------------------------------------------------------------------------- - # ------------------------------------------------- Mechanisms ------------------------------------------------- + # ------------------------------------------------- Nodes ------------------------------------------------------ # ---------------------------------------------------------------------------------------------------------------- task_input_layer = ProcessingMechanism(name=task_input_name, size=task_size) @@ -498,22 +479,6 @@ def construct_model(model_name:str=MODEL_NAME, size=state_size, auto=1-context_integration_rate, hetero=0.0) - # em = EpisodicMemoryMechanism(name=em_name, - # default_variable=[[0] * state_size, # state - # [0] * time_size, # time - # [0] * state_size, # context - # [0] * reward_size], # reward - # input_ports=[{NAME:state_input_name, SIZE:state_size}, - # {NAME:time_input_name, SIZE:time_size}, - # {NAME:context_name, SIZE:state_size}, - # {NAME:reward_input_name, SIZE:reward_size}], - # function=ContentAddressableMemory( - # # selection_function=SoftMax(gain=retrieval_softmax_gain), - # distance_field_weights=[state_retrieval_weight, - # time_retrieval_weight, - # context_retrieval_weight, - # reward_retrieval_weight])) - # # em.output_ports[RETRIEVED_TIME_NAME].parameters.require_projection_in_composition.set(False, override=True) em = EMComposition(name=em_name, memory_template=[[0] * state_size, # state [0] * time_size, # time diff --git a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition (BACKUP).py b/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition (BACKUP).py new file mode 100644 index 00000000000..55f44870058 --- /dev/null +++ b/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition (BACKUP).py @@ -0,0 +1,433 @@ +# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy of the License at: +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and limitations under the License. + +# TODO: + +# ADD PREVIOUS STATES +# ADD previous_state to EM and control to support that + +# FIX: TERMINATION CONDITION IS GETTING TRIGGED AFTER 1st TRIAL + +# FOR INPUT NODES: scheduler.add_condition(A, BeforeNCalls(A,1) +# Termination: AfterNCalls(Ctl,2) + +""" +QUESTIONS: + +NOTES: + *MUST* run Experience before Predict, as the latter requires retrieved_reward to be non-zero + (from last trial of Experience) in order to know to encode the next state (see control policy) + +**Overview** +------------ + +This implements a model of... + +The model is an example of... + +The script contains methods to construct, train, and run the model, and analyze the results of its execution: + +* `construct_model `: + takes as arguments parameters used to construct the model; for convenience, defaults are defined below, + (under "Construction parameters") + +* `train_network `: + ... + +* `run_model `: + ... + +* `analyze_results `: + takes as arguments the results of executing the model, and optionally a number of trials and EGO_level to analyze; + returns... + + +**The Model** +------------- + +The model is comprised of... + +.. _EGO_Fig: + +.. figure:: _static/` `Composition`. + + +**Construction and Execution** +------------------------------ + +.. _EGO_settings: + +*Settings* +~~~~~~~~~~ + +The default parameters are ones that have been fit to empirical data concerning human performance +(taken from `Kane et al., 2007 `_). + +See "Settings for running the script" to specify whether the model is trained and/or executed when the script is run, +and whether a graphic display of the network is generated when it is constructed. + +.. _EGO_stimuli: + +*Stimuli* +~~~~~~~~~ + +Sequences of stimuli are constructed either using `SweetPea `_ +(using the script in stim/SweetPea) or replicate those used in... + + .. note:: + Use of SweetPea for stimulus generation requires it be installed:: + >> pip install sweetpea + + +.. _EGO_training: + +*Training* +~~~~~~~~~~ + +MORE HERE + +.. _EGO_execution: + +*Execution* +~~~~~~~~~~~ + +MORE HERE + +.. _EGO_methods_reference: + +**Methods Reference** +--------------------- + + +""" + +import numpy as np +from enum import IntEnum + +from psyneulink import * +from psyneulink._typing import Union, Literal +from psyneulink.core.scheduling.condition import Any, And, AllHaveRun, AtRunStart + +# Settings for running script: + +NUM_EXP_SEQS = 5 # Number of sequences to run in EXPERIENCE Phase (includes baseline + revaluation) +NUM_PRED_TRIALS = 10 # Number of trials (ROLL OUTS) to run in PREDICTION Phase + +CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script +DISPLAY_MODEL = ( # Only one of the following can be uncommented: + # None # suppress display of model + {} # show simple visual display of model + # {'show_node_structure': True} # show detailed view of node structures and projections +) +# RUN_MODEL = True # True => run the model +RUN_MODEL = False # False => don't run the model +EXECUTION_MODE = ExecutionMode.Python +# EXECUTION_MODE = ExecutionMode.PyTorch +ANALYZE_RESULTS = False # True => output analysis of results of run +# REPORT_OUTPUT = ReportOutput.FULL # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] +REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] +REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run +PRINT_RESULTS = False # print model.results after execution +ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution + + +#region PARAMETERS +# ====================================================================================================================== +# PARAMETERS +# ====================================================================================================================== + +# PyTorch Version Parameters: +model_params = dict( + n_participants=58, + n_simulations = 100, # number of rollouts per participant + num_seqs = 20, # total number of sequences to be executed (used to set size of EM) + n_steps = 3, # number of steps per rollout + state_d = 7, # length of state vector + context_d = 7, # length of context vector + time_d = 25, # length of time vector + self_excitation = .25, # rate at which old context is carried over to new context + input_weight = .5, # rate at which state is integrated into new context + retrieved_context_weight = .25, # rate at which context retrieved from EM is integrated into new context + time_noise=.01,# noise std for time integrator (drift is set to 0) + state_weight = .5, # weight of the state used during memory retrieval + context_weight = .3, # weight of the context used during memory retrieval + time_weight = .2, # weight of the time used during memory retrieval + temperature = .05 # temperature of the softmax used during memory retrieval (smaller means more argmax-like +) + +# Fixed (structural) parameters: + +# Names: +MODEL_NAME = "EGO Model CSW" +STATE_INPUT_LAYER_NAME = "STATE" +CONTEXT_LAYER_NAME = 'CONTEXT' +PREVIOUS_STATE_NAME = 'PREVIOUS_STATE' +EM_NAME = "EM" +PREDICTION_LAYER_NAME = "PREDICTION" + +EMFieldsIndex = IntEnum('EMFields', + ['STATE', + 'CONTEXT', + 'PREVIOUS_STATE'], + start=0) + + +# CONSTRUCTION PARAMETERS + +# Layer sizes: +STATE_SIZE = model_params['state_d'] # length of state vector +CONTEXT_SIZE = model_params['context_d'] # length of state vector + +# Context processing: +STATE_WEIGHT = model_params['input_weight'] # rate at which external vs. memory state are integrated in context_layer +CONTEXT_INTEGRATION_RATE = model_params['retrieved_context_weight'] # rate at which retrieved context (from EM) + # is integrated into context_layer +assert (model_params['retrieved_context_weight'] + STATE_WEIGHT + CONTEXT_INTEGRATION_RATE) == 1,\ + (f"Sum of STATE_WEIGHT ({STATE_WEIGHT}), CONTEXT_INTEGRATION_RATE ({CONTEXT_INTEGRATION_RATE}), " + f"and RETRIEVED_CONTEXT_WEIGHT ({model_params['retrieved_context_weight']}) must equal 1") + +# EM retrieval +STATE_RETRIEVAL_WEIGHT = model_params['state_weight'] # weight of state field in retrieval from EM +CONTEXT_RETRIEVAL_WEIGHT = model_params['context_weight'] # weight of context field in retrieval from EM +RETRIEVAL_SOFTMAX_GAIN = 1/model_params['temperature'] # gain on softmax retrieval function + +PREVIOUS_STATE_WEIGHT = 0 + +RANDOM_WEIGHTS_INITIALIZATION=RandomMatrix(center=0.0, range=0.1) # Matrix spec used to initialize all Projections + +#endregion + +#region ENVIRONMENT +# ====================================================================================================================== +# ENVIRONMENT +# ====================================================================================================================== + +# Task environment: +NUM_STIM_PER_SEQ = model_params['n_steps'] # number of stimuli in a sequence +NUM_SEQS = model_params['num_seqs'] # total number of sequences to be executed (to set size of EM) + +STIM_SEQS = [list(range(1,NUM_STIM_PER_SEQ*2,2)), + list(range(2,NUM_STIM_PER_SEQ*2+1,2))] +CURRICULUM_TYE = 'blocked' # 'blocked' or 'interleaved' + +#endregion + +#region MODEL +# ====================================================================================================================== +# MODEL +# ====================================================================================================================== + +def construct_model(model_name:str=MODEL_NAME, + + # Inputs: + state_input_name:str=STATE_INPUT_LAYER_NAME, + state_size:int=STATE_SIZE, + + # Context processing: + context_name:str=CONTEXT_LAYER_NAME, + state_weight:Union[float,int]=STATE_WEIGHT, + context_integration_rate:Union[float,int]=CONTEXT_INTEGRATION_RATE, + + # EM: + em_name:str=EM_NAME, + retrieval_softmax_gain=RETRIEVAL_SOFTMAX_GAIN, + state_retrieval_weight:Union[float,int]=STATE_RETRIEVAL_WEIGHT, + context_retrieval_weight:Union[float,int]=CONTEXT_RETRIEVAL_WEIGHT, + previous_state_name=PREVIOUS_STATE_NAME, + previous_state_weight:Union[float,int]=PREVIOUS_STATE_WEIGHT, + + # Output / decision processing: + PREDICTION_LAYER_NAME:str=PREDICTION_LAYER_NAME, + + )->Composition: + + # Apportionment of contributions of state (actual or em) vs. context (em) to context_layer integration: + + # FIX: THIS IS FOR MDP; NEEDS TO BE REVISED FOR CSW + # state input (EXPERIENCE) -\ + # --> state_weight -------\ + # state from em (PREDICT)---/ -> * (context_integration_rate) -----\ + # /-----> context_weight ---/ --> context + # context from em --------/ (=1- state_weight) / + # /---> 1 - context_integration_rate --/ + # context from prev. cycle -------------------------/ + + assert 0 <= context_integration_rate <= 1,\ + f"context_retrieval_weight must be a number from 0 to 1" + assert 0 <= state_weight <= 1,\ + f"context_retrieval_weight must be a number from 0 to 1" + context_weight = 1 - state_weight + state_weight *= context_integration_rate + context_weight *= context_integration_rate + + # ---------------------------------------------------------------------------------------------------------------- + # ------------------------------------------------- Nodes ------------------------------------------------------ + # ---------------------------------------------------------------------------------------------------------------- + + state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) + context_layer = RecurrentTransferMechanism(name=context_name, + size=state_size, + auto=1-context_integration_rate, + hetero=0.0) + em = EMComposition(name=em_name, + memory_template=[[0] * state_size, # state + [0] * state_size, # previous state + [0] * state_size], # context + memory_fill=(0,.01), + memory_capacity=NUM_SEQS, + softmax_gain=1.0, + # Input Nodes: + field_names=[state_input_name, + previous_state_name, + context_name, + ], + field_weights=(state_retrieval_weight, + previous_state_weight, + context_retrieval_weight + ) + ) + + prediction_layer = ProcessingMechanism(name=PREDICTION_LAYER_NAME) + + + # ---------------------------------------------------------------------------------------------------------------- + # ------------------------------------------------- EGO Composition -------------------------------------------- + # ---------------------------------------------------------------------------------------------------------------- + + + EGO_comp = Composition(name=model_name, + # # Terminate a Task.PREDICT trial after prediction_layer executes if a reward is retrieved + # termination_processing={ + # # TimeScale.TRIAL: And(Condition(lambda: task_input_layer.value == Task.PREDICT), + # # Condition(lambda: retrieved_reward_layer.value), + # # JustRan(prediction_layer))} + # # CRASHES: + # # TimeScale.TRIAL: Any(And(Condition(lambda: task_input_layer.value == Task.EXPERIENCE), + # # JustRan(em)), + # # And(Condition(lambda: task_input_layer.value == Task.PREDICT), + # # Condition(lambda: retrieved_reward_layer.value), + # # JustRan(prediction_layer)))} + # TimeScale.TRIAL: Any(And(Condition(lambda: task_input_layer.value == Task.EXPERIENCE), + # AllHaveRun()), + # And(Condition(lambda: task_input_layer.value == Task.PREDICT), + # Condition(lambda: retrieved_reward_layer.value), + # AllHaveRun()))} + ) + + # Nodes not included in (decision output) Pathway specified above + EGO_comp.add_nodes([state_input_layer, context_layer, em, prediction_layer]) + + # Projections: + QUERY = ' [QUERY]' + VALUE = ' [VALUE]' + RETRIEVED = ' [RETRIEVED]' + + # EM encoding -------------------------------------------------------------------------------- + # state -> em + EGO_comp.add_projection(MappingProjection(state_input_layer, + em.nodes[state_input_name + QUERY])) + # context -> em + EGO_comp.add_projection(MappingProjection(context_layer, + em.nodes[context_name + QUERY])) + + # Inputs to Context --------------------------------------------------------------------------- + # retrieved context -> context_layer + EGO_comp.add_projection(MappingProjection(state_input_layer, + context_layer, + matrix=np.eye(STATE_SIZE) * state_weight)) + + # Response pathway --------------------------------------------------------------------------- + # retrieved state -> prediction_layer + EGO_comp.add_projection(MappingProjection(em.nodes[state_input_name + RETRIEVED], + prediction_layer)) + + + # Validate construction + assert context_layer.input_port.path_afferents[0].sender.owner == context_layer # recurrent projection + assert context_layer.input_port.path_afferents[0].parameters.matrix.get()[0][0] == 1-context_integration_rate + # assert context_layer.input_port.path_afferents[1].sender.owner == em.nodes[CONTEXT_LAYER_NAME + RETRIEVED] # + assert context_layer.input_port.path_afferents[1].sender.owner == state_input_layer # + # memory of + # context + assert context_layer.input_port.path_afferents[1].parameters.matrix.get()[0][0] == state_weight + + return EGO_comp +#endregion + + +#region SCRIPT EXECUTION +# ====================================================================================================================== +# SCRIPT EXECUTION +# ====================================================================================================================== + +if __name__ == '__main__': + model = None + + if CONSTRUCT_MODEL: + print(f'Constructing {MODEL_NAME}') + model = construct_model() + assert 'DEBUGGING BREAK POINT' + + if DISPLAY_MODEL is not None: + if model: + model.show_graph(**DISPLAY_MODEL) + else: + print("Model not yet constructed") + + if RUN_MODEL: + experience_inputs = build_experience_inputs(state_size=STATE_SIZE, + time_drift_rate=TIME_DRIFT_RATE, + num_baseline_seqs=NUM_BASELINE_SEQS, + num_revaluation_seqs=NUM_REVALUATION_SEQS, + reward_vals=REWARD_VALS, + CURRICULUM_TYE=CURRICULUM_TYE, + ratio=RATIO, + stim_seqs=STIM_SEQS) + input_layers = [TIME_INPUT_LAYER_NAME, + TASK_INPUT_LAYER_NAME, + STATE_INPUT_LAYER_NAME, + REWARD_INPUT_LAYER_NAME] + + # Experience Phase + print(f"Presenting {model.name} with {TOTAL_NUM_EXPERIENCE_STIMS} EXPERIENCE stimuli") + model.run(inputs={k: v for k, v in zip(input_layers, experience_inputs)}, + execution_mode=EXECUTION_MODE, + report_output=REPORT_OUTPUT, + report_progress=REPORT_PROGRESS) + + # Prediction Phase + prediction_inputs = build_prediction_inputs(state_size=STATE_SIZE, + time_drift_rate=TIME_DRIFT_RATE, + num_roll_outs_per_stim=int(NUM_ROLL_OUTS / 2), + stim_seqs=STIM_SEQS, + reward_vals=REWARD_VALS, + seq_type=PREDICT_SEQ_TYPE) + print(f"Running {model.name} for {NUM_ROLL_OUTS} PREDICT (ROLL OUT) trials") + model.termination_processing = { + TimeScale.TRIAL: And(Condition(lambda: model.nodes[TASK_INPUT_LAYER_NAME].value == Task.PREDICT), + Condition(lambda: model.nodes[RETRIEVED_REWARD_NAME].value), + # JustRan(model.nodes[PREDICTION_LAYER_NAME]) + AllHaveRun() + ) + } + model.run(inputs={k: v for k, v in zip(input_layers, prediction_inputs)}, + report_output=REPORT_OUTPUT, + report_progress=REPORT_PROGRESS + ) + + if PRINT_RESULTS: + print(f"Predicted reward for last stimulus: {model.results}") + #endregion \ No newline at end of file diff --git a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition with WM.py b/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition with WM.py new file mode 100644 index 00000000000..00fe97f5e74 --- /dev/null +++ b/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition with WM.py @@ -0,0 +1,425 @@ +# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy of the License at: +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and limitations under the License. + +# TODO: + +# REPLACE INTEGRATOR RECURRENTTRANSFERMECHANISM WITH TRANSFERMECHANISM IN INTEGRATOR MODE + +# ADD PREVIOUS STATES +# ADD next_state to EM and control to support that +# - CONTROL FLOW: +# - UPDATE CONTEXT LAYER: INTEGRATE CURRENT STATE IN CONTEXT LAYER +# - USE UPDATED CONTEXT + CURRENT STATE TO RETRIEVE PREDICTED NEXT STATE +# - GET NEXT STATE +# - ENCODE "CURRENT" (I.E., PREVIOUS) STATE + "NEXT" (NOW ACTUALLY CURRENT) STATE + CONTEXT (PRIOR TO +# INTEGRATION OF "NEXT") INTO EM + +# - CONTROL FLOW (FROM VANTAGE OF "NEXT" STATE): +# - USE PREVIOUS STATE + CONTEXT (PRE-INTEGRATION) TO RETRIEVE PREDICTION OF CURRENT STATE +# - ENCODE PREVIOUS STATE + CURRENT STATE (INPUT) + CONTEXT (PRE-INTEGRATION) INTO EM +# - UPDATE CONTEXT LAYER: INTEGRATE CURRENT STATE (INPUT) IN CONTEXT LAYER: +# SO: +# - EM SHOULD EXECUTE FIRST: +# - RETRIEVE USING PREVIOUS STATE NODE AND CONTEXT (PRE-INTEGRATION) TO RETRIEVE PREDICTED CURRENT STATE +# - STORE VALUES OF PREVIOUS STATE, CURRENT STATE (INPUT) AND CONTEXT (PRE-INTEGRATION) INTO EM +# - THEN CONTEXT LAYER SHOULD EXECUTE, USING CURRENT INPUT TO INTEGRATE INTO CONTEXT LAYER + + + +# FIX: TERMINATION CONDITION IS GETTING TRIGGED AFTER 1st TRIAL + +# FOR INPUT NODES: scheduler.add_condition(A, BeforeNCalls(A,1) +# Termination: AfterNCalls(Ctl,2) + +""" +QUESTIONS: + +NOTES: + *MUST* run Experience before Predict, as the latter requires retrieved_reward to be non-zero + (from last trial of Experience) in order to know to encode the next state (see control policy) + +**Overview** +------------ + +This implements a model of... + +The model is an example of... + +The script contains methods to construct, train, and run the model, and analyze the results of its execution: + +* `construct_model `: + takes as arguments parameters used to construct the model; for convenience, defaults are defined below, + (under "Construction parameters") + +* `train_network `: + ... + +* `run_model `: + ... + +* `analyze_results `: + takes as arguments the results of executing the model, and optionally a number of trials and EGO_level to analyze; + returns... + + +**The Model** +------------- + +The model is comprised of... + +.. _EGO_Fig: + +.. figure:: _static/` `Composition`. + + +**Construction and Execution** +------------------------------ + +.. _EGO_settings: + +*Settings* +~~~~~~~~~~ + +The default parameters are ones that have been fit to empirical data concerning human performance +(taken from `Kane et al., 2007 `_). + +See "Settings for running the script" to specify whether the model is trained and/or executed when the script is run, +and whether a graphic display of the network is generated when it is constructed. + +.. _EGO_stimuli: + +*Stimuli* +~~~~~~~~~ + +Sequences of stimuli are constructed either using `SweetPea `_ +(using the script in stim/SweetPea) or replicate those used in... + + .. note:: + Use of SweetPea for stimulus generation requires it be installed:: + >> pip install sweetpea + + +.. _EGO_training: + +*Training* +~~~~~~~~~~ + +MORE HERE + +.. _EGO_execution: + +*Execution* +~~~~~~~~~~~ + +MORE HERE + +.. _EGO_methods_reference: + +**Methods Reference** +--------------------- + + +""" + +import numpy as np +import graph_scheduler as gs +from enum import IntEnum + +from psyneulink import * +from psyneulink._typing import Union, Literal +from psyneulink.core.scheduling.condition import Any, And, AllHaveRun, AtRunStart + +# Settings for running script: + +MEMORY_CAPACITY = 5 +CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script +DISPLAY_MODEL = ( # Only one of the following can be uncommented: + None # suppress display of model + # {} # show simple visual display of model + # {'show_node_structure': True} # show detailed view of node structures and projections +) +RUN_MODEL = True # True => run the model +# RUN_MODEL = False # False => don't run the model +EXECUTION_MODE = ExecutionMode.Python +# EXECUTION_MODE = ExecutionMode.PyTorch +ANALYZE_RESULTS = False # True => output analysis of results of run +# REPORT_OUTPUT = ReportOutput.FULL # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] +REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] +REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run +PRINT_RESULTS = False # print model.results after execution +ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution + + +#region PARAMETERS +# ====================================================================================================================== +# PARAMETERS +# ====================================================================================================================== + +# PyTorch Version Parameters: +model_params = dict( + state_d = 11, # length of state vector + integrator_d = 11, # length of integrator vector + context_d = 11, # length of context vector + integration_rate = .69, # rate at which state is integrated into new context + state_weight = .5, # weight of the state used during memory retrieval + context_weight = .5, # weight of the context used during memory retrieval + temperature = .01 # temperature of the softmax used during memory retrieval (smaller means more argmax-like +) + +# Fixed (structural) parameters: + +# Names: +MODEL_NAME = "EGO Model CSW" +STATE_INPUT_LAYER_NAME = "STATE" +PREVIOUS_STATE_LAYER_NAME = "PREVIOUS STATE" +INTEGRATOR_LAYER_NAME = 'INTEGRATOR' +CONTEXT_LAYER_NAME = 'CONTEXT' + +EM_NAME = "EM" +PREDICTION_LAYER_NAME = "PREDICTION" + +EMFieldsIndex = IntEnum('EMFields', + ['STATE', + 'CONTEXT', + 'PREVIOUS_STATE'], + start=0) + + +# CONSTRUCTION PARAMETERS + +# Layer sizes: +STATE_SIZE = model_params['state_d'] # length of state vector +INTEGRATOR_SIZE = model_params['integrator_d'] # length of state vector +CONTEXT_SIZE = model_params['context_d'] # length of state vector + +# Context processing: +INTEGRATION_RATE = model_params['integration_rate'] # rate at which state is integrated into integrator layer + +# EM retrieval +STATE_RETRIEVAL_WEIGHT = 0 +PREVIOUS_STATE_RETRIEVAL_WEIGHT = model_params['state_weight'] # weight of state field in retrieval from EM +CONTEXT_RETRIEVAL_WEIGHT = model_params['context_weight'] # weight of context field in retrieval from EM +RETRIEVAL_SOFTMAX_GAIN = 1/model_params['temperature'] # gain on softmax retrieval function + + +RANDOM_WEIGHTS_INITIALIZATION=RandomMatrix(center=0.0, range=0.1) # Matrix spec used to initialize all Projections + +#endregion + +#region ENVIRONMENT +# ====================================================================================================================== +# ENVIRONMENT +# ====================================================================================================================== + +# Task environment: +import Environment +CURRICULUM_TYPE = 'Blocked' # 'Blocked' or 'Interleaved' +INPUTS = Environment.generate_dataset(condition=CURRICULUM_TYPE).xs.numpy()[:5] +# INPUTS = [env_inputs[i][:10] for i in range(len(env_inputs))] + + +#endregion + +#region MODEL +# ====================================================================================================================== +# MODEL +# ====================================================================================================================== + +def construct_model(model_name:str=MODEL_NAME, + + # Input layer: + state_input_name:str=STATE_INPUT_LAYER_NAME, + state_size:int=STATE_SIZE, + + # Previous state + previous_state_input_name:str=PREVIOUS_STATE_LAYER_NAME, + previous_state_size:int=STATE_SIZE, + + # Integrator: + integrator_name:str=INTEGRATOR_LAYER_NAME, + integrator_size:int=INTEGRATOR_SIZE, + integration_rate:Union[float,int]=INTEGRATION_RATE, + + # Context representation (learned): + context_name:str=CONTEXT_LAYER_NAME, + context_size:Union[float,int]=CONTEXT_SIZE, + + # EM: + em_name:str=EM_NAME, + retrieval_softmax_gain=RETRIEVAL_SOFTMAX_GAIN, + state_retrieval_weight:Union[float,int]=STATE_RETRIEVAL_WEIGHT, + previous_state_retrieval_weight:Union[float,int]=PREVIOUS_STATE_RETRIEVAL_WEIGHT, + context_retrieval_weight:Union[float,int]=CONTEXT_RETRIEVAL_WEIGHT, + + # Output / decision processing: + prediction_layer_name:str=PREDICTION_LAYER_NAME, + + )->Composition: + + assert 0 <= integration_rate <= 1,\ + f"integrator_retrieval_weight must be a number from 0 to 1" + + # ---------------------------------------------------------------------------------------------------------------- + # ------------------------------------------------- Nodes ------------------------------------------------------ + # ---------------------------------------------------------------------------------------------------------------- + + state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) + previous_state_layer = ProcessingMechanism(name=previous_state_input_name, size=state_size) + integrator_layer = RecurrentTransferMechanism(name=integrator_name, + function=Tanh, + size=integrator_size, + auto=1-integration_rate, + hetero=0.0) + context_layer = ProcessingMechanism(name=context_name, size=context_size) + + em = EMComposition(name=em_name, + memory_template=[[0] * state_size, # state + [0] * state_size, # previous state + [0] * state_size], # context + # memory_fill=(0,.01), + memory_capacity=MEMORY_CAPACITY, + memory_decay_rate=0, + softmax_gain=1.0, + # Input Nodes: + field_names=[state_input_name, + previous_state_input_name, + context_name, + ], + field_weights=(state_retrieval_weight, + previous_state_retrieval_weight, + context_retrieval_weight + ) + ) + + prediction_layer = ProcessingMechanism(name=prediction_layer_name, + size=state_size) + + + # ---------------------------------------------------------------------------------------------------------------- + # ------------------------------------------------- EGO Composition -------------------------------------------- + # ---------------------------------------------------------------------------------------------------------------- + + EGO_comp = Composition(name=model_name, + # # Terminate a Task.PREDICT trial after prediction_layer executes if a reward is retrieved + # termination_processing={ + # # TimeScale.TRIAL: And(Condition(lambda: task_input_layer.value == Task.PREDICT), + # # Condition(lambda: retrieved_reward_layer.value), + # # JustRan(prediction_layer))} + # # CRASHES: + # # TimeScale.TRIAL: Any(And(Condition(lambda: task_input_layer.value == Task.EXPERIENCE), + # # JustRan(em)), + # # And(Condition(lambda: task_input_layer.value == Task.PREDICT), + # # Condition(lambda: retrieved_reward_layer.value), + # # JustRan(prediction_layer)))} + # TimeScale.TRIAL: Any(And(Condition(lambda: task_input_layer.value == Task.EXPERIENCE), + # AllHaveRun()), + # And(Condition(lambda: task_input_layer.value == Task.PREDICT), + # Condition(lambda: retrieved_reward_layer.value), + # AllHaveRun()))} + ) + + # Nodes not included in (decision output) Pathway specified above + EGO_comp.add_nodes([state_input_layer, + previous_state_layer, + integrator_layer, + context_layer, + em, + prediction_layer]) + + # Projections: + QUERY = ' [QUERY]' + VALUE = ' [VALUE]' + RETRIEVED = ' [RETRIEVED]' + + # EM encoding & retrieval -------------------------------------------------------------------------------- + # state_input -> em (retrieval) + EGO_comp.add_projection(MappingProjection(state_input_layer, + em.nodes[state_input_name + VALUE], + matrix=IDENTITY_MATRIX)) + + # previous_state -> em (retrieval) + EGO_comp.add_projection(MappingProjection(previous_state_layer, + em.nodes[previous_state_input_name + QUERY], + matrix=IDENTITY_MATRIX)) + # context -> em (retrieval) + EGO_comp.add_projection(MappingProjection(context_layer, + em.nodes[context_name + QUERY], + matrix=IDENTITY_MATRIX)) + + # Inputs to previous_state and context ------------------------------------------------------------------- + # state -> previous_layer + EGO_comp.add_projection(MappingProjection(state_input_layer, + previous_state_layer, + matrix=IDENTITY_MATRIX)) + # state -> integrator_layer + EGO_comp.add_projection(MappingProjection(state_input_layer, + integrator_layer, + matrix=np.eye(STATE_SIZE) * integration_rate)) + + # integrator_layer -> context_layer (learnable) + EGO_comp.add_projection(MappingProjection(integrator_layer, + context_layer, + matrix=IDENTITY_MATRIX)) + + # Response pathway --------------------------------------------------------------------------------------- + # retrieved state -> prediction_layer + EGO_comp.add_projection(MappingProjection(em.nodes[state_input_name + RETRIEVED], + prediction_layer)) + + + EGO_comp.scheduler.add_condition(em, BeforeNodes(previous_state_layer, integrator_layer)) + + # Validate construction + assert integrator_layer.input_port.path_afferents[0].sender.owner == integrator_layer # recurrent projection + assert integrator_layer.input_port.path_afferents[0].parameters.matrix.get()[0][0] == 1-integration_rate + assert integrator_layer.input_port.path_afferents[1].sender.owner == state_input_layer # + + return EGO_comp +#endregion + + +#region SCRIPT EXECUTION +# ====================================================================================================================== +# SCRIPT EXECUTION +# ====================================================================================================================== + +if __name__ == '__main__': + model = None + + if CONSTRUCT_MODEL: + print(f'Constructing {MODEL_NAME}') + model = construct_model() + assert 'DEBUGGING BREAK POINT' + # print(model.scheduler.consideration_queue) + # gs.output_graph_image(model.scheduler.graph, 'EGO_comp-scheduler.png') + + if DISPLAY_MODEL is not None: + if model: + model.show_graph(**DISPLAY_MODEL) + else: + print("Model not yet constructed") + + if RUN_MODEL: + # print("MODEL NOT YET FULLY EXECUTABLE") + print(f'Running {MODEL_NAME}') + model.run(inputs={STATE_INPUT_LAYER_NAME:INPUTS}, + # report_output=REPORT_OUTPUT, + # report_progress=REPORT_PROGRESS + ) + print(model.nodes['EM'].parameters.memory.get(context=MODEL_NAME)) + if PRINT_RESULTS: + print("MODEL NOT YET FULLY EXECUTABLE SO NO RESULTS") + #endregion diff --git a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition.py b/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition.py new file mode 100644 index 00000000000..b26e4d07f00 --- /dev/null +++ b/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition.py @@ -0,0 +1,433 @@ +# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy of the License at: +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and limitations under the License. + +# TODO: + +# ADD PREVIOUS STATES +# ADD next_state to EM and control to support that +# - CONTROL FLOW: +# - UPDATE CONTEXT LAYER: INTEGRATE CURRENT STATE IN CONTEXT LAYER +# - USE UPDATED CONTEXT + CURRENT STATE TO RETRIEVE PREDICTED NEXT STATE +# - GET NEXT STATE +# - ENCODE "CURRENT" (I.E., PREVIOUS) STATE + "NEXT" (NOW ACTUALLY CURRENT) STATE + CONTEXT (PRIOR TO +# INTEGRATION OF "NEXT") INTO EM + +# - CONTROL FLOW (FROM VANTAGE OF "NEXT" STATE): +# - USE CONTEXT + PREVIOUS STATE TO RETRIEVE PREDICTION OF CURRENT STATE +# - ENCODE PREVIOUS STATE + CURRENT STATE + CONTEXT INTO EM +# - UPDATE CONTEXT LAYER: INTEGRATE CURRENT STATE IN CONTEXT LAYER: +# SO: +# - EM SHOULD EXECUTE FIRST: +# - USE VALUES OF WM (PREVIOUS STATE) NODE AND CONTEXT LAYER TO RETRIEVE PREDICTED CURRENT STATE +# - ENCODE VALUES OF WM (PREVIOUS STATE), CURRENT STATE (INPUT), AND CONTEXT LAYER IN EM +# - THEN WM SHOULD EXECUTE TO UPDATE WITH CURRENT STATE (INPUT) +# - THEN CONTEXT LAYER SHOULD EXECUTE, INTEGRATING CURRENT STATE (INPUT) [OR WM] +# - LEARNING SHOULD USE CURRENT STATE AS TARGET TO TRAIN PREDICTED CURRENT STATE + + + +# FIX: TERMINATION CONDITION IS GETTING TRIGGED AFTER 1st TRIAL + +# FOR INPUT NODES: scheduler.add_condition(A, BeforeNCalls(A,1) +# Termination: AfterNCalls(Ctl,2) + +""" +QUESTIONS: + +NOTES: + *MUST* run Experience before Predict, as the latter requires retrieved_reward to be non-zero + (from last trial of Experience) in order to know to encode the next state (see control policy) + +**Overview** +------------ + +This implements a model of... + +The model is an example of... + +The script contains methods to construct, train, and run the model, and analyze the results of its execution: + +* `construct_model `: + takes as arguments parameters used to construct the model; for convenience, defaults are defined below, + (under "Construction parameters") + +* `train_network `: + ... + +* `run_model `: + ... + +* `analyze_results `: + takes as arguments the results of executing the model, and optionally a number of trials and EGO_level to analyze; + returns... + + +**The Model** +------------- + +The model is comprised of... + +.. _EGO_Fig: + +.. figure:: _static/` `Composition`. + + +**Construction and Execution** +------------------------------ + +.. _EGO_settings: + +*Settings* +~~~~~~~~~~ + +The default parameters are ones that have been fit to empirical data concerning human performance +(taken from `Kane et al., 2007 `_). + +See "Settings for running the script" to specify whether the model is trained and/or executed when the script is run, +and whether a graphic display of the network is generated when it is constructed. + +.. _EGO_stimuli: + +*Stimuli* +~~~~~~~~~ + +Sequences of stimuli are constructed either using `SweetPea `_ +(using the script in stim/SweetPea) or replicate those used in... + + .. note:: + Use of SweetPea for stimulus generation requires it be installed:: + >> pip install sweetpea + + +.. _EGO_training: + +*Training* +~~~~~~~~~~ + +MORE HERE + +.. _EGO_execution: + +*Execution* +~~~~~~~~~~~ + +MORE HERE + +.. _EGO_methods_reference: + +**Methods Reference** +--------------------- + + +""" + +import numpy as np +from enum import IntEnum + +from psyneulink import * +from psyneulink._typing import Union, Literal +from psyneulink.core.scheduling.condition import Any, And, AllHaveRun, AtRunStart + +# Settings for running script: + +NUM_EXP_SEQS = 5 # Number of sequences to run in EXPERIENCE Phase (includes baseline + revaluation) +NUM_PRED_TRIALS = 10 # Number of trials (ROLL OUTS) to run in PREDICTION Phase + +CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script +DISPLAY_MODEL = ( # Only one of the following can be uncommented: + # None # suppress display of model + {} # show simple visual display of model + # {'show_node_structure': True} # show detailed view of node structures and projections +) +# RUN_MODEL = True # True => run the model +RUN_MODEL = False # False => don't run the model +EXECUTION_MODE = ExecutionMode.Python +# EXECUTION_MODE = ExecutionMode.PyTorch +ANALYZE_RESULTS = False # True => output analysis of results of run +# REPORT_OUTPUT = ReportOutput.FULL # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] +REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] +REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run +PRINT_RESULTS = False # print model.results after execution +ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution + + +#region PARAMETERS +# ====================================================================================================================== +# PARAMETERS +# ====================================================================================================================== + +# PyTorch Version Parameters: +model_params = dict( + n_participants=58, + n_simulations = 100, # number of rollouts per participant + num_seqs = 20, # total number of sequences to be executed (used to set size of EM) + n_steps = 3, # number of steps per rollout + state_d = 7, # length of state vector + context_d = 7, # length of context vector + time_d = 25, # length of time vector + self_excitation = .25, # rate at which old context is carried over to new context + integration_rate = .5, # rate at which state is integrated into new context + state_weight = .5, # weight of the state used during memory retrieval + context_weight = .3, # weight of the context used during memory retrieval + time_noise=.01,# noise std for time integrator (drift is set to 0) + temperature = .05 # temperature of the softmax used during memory retrieval (smaller means more argmax-like +) + +# Fixed (structural) parameters: + +# Names: +MODEL_NAME = "EGO Model CSW" +STATE_INPUT_LAYER_NAME = "STATE" +CONTEXT_LAYER_NAME = 'CONTEXT' +NEXT_STATE_NAME = 'NEXT_STATE' +EM_NAME = "EM" +PREDICTION_LAYER_NAME = "PREDICTION" + +EMFieldsIndex = IntEnum('EMFields', + ['STATE', + 'CONTEXT', + 'NEXT_STATE'], + start=0) + + +# CONSTRUCTION PARAMETERS + +# Layer sizes: +STATE_SIZE = model_params['state_d'] # length of state vector +CONTEXT_SIZE = model_params['context_d'] # length of state vector + +# Context processing: +INTEGRATION_RATE = model_params['integration_rate'] # rate at which state is integrated into context_layer + +# EM retrieval +STATE_RETRIEVAL_WEIGHT = model_params['state_weight'] # weight of state field in retrieval from EM +CONTEXT_RETRIEVAL_WEIGHT = model_params['context_weight'] # weight of context field in retrieval from EM +RETRIEVAL_SOFTMAX_GAIN = 1/model_params['temperature'] # gain on softmax retrieval function + +NEXT_STATE_WEIGHT = 0 + +RANDOM_WEIGHTS_INITIALIZATION=RandomMatrix(center=0.0, range=0.1) # Matrix spec used to initialize all Projections + +#endregion + +#region ENVIRONMENT +# ====================================================================================================================== +# ENVIRONMENT +# ====================================================================================================================== + +# Task environment: +NUM_STIM_PER_SEQ = model_params['n_steps'] # number of stimuli in a sequence +NUM_SEQS = model_params['num_seqs'] # total number of sequences to be executed (to set size of EM) + +STIM_SEQS = [list(range(1,NUM_STIM_PER_SEQ*2,2)), + list(range(2,NUM_STIM_PER_SEQ*2+1,2))] +CURRICULUM_TYE = 'blocked' # 'blocked' or 'interleaved' + +#endregion + +#region MODEL +# ====================================================================================================================== +# MODEL +# ====================================================================================================================== + +def construct_model(model_name:str=MODEL_NAME, + + # Inputs: + state_input_name:str=STATE_INPUT_LAYER_NAME, + state_size:int=STATE_SIZE, + + # Context processing: + context_name:str=CONTEXT_LAYER_NAME, + integration_rate:Union[float,int]=INTEGRATION_RATE, + + # EM: + em_name:str=EM_NAME, + retrieval_softmax_gain=RETRIEVAL_SOFTMAX_GAIN, + state_retrieval_weight:Union[float,int]=STATE_RETRIEVAL_WEIGHT, + context_retrieval_weight:Union[float,int]=CONTEXT_RETRIEVAL_WEIGHT, + next_state_name=NEXT_STATE_NAME, + next_state_weight:Union[float,int]=NEXT_STATE_WEIGHT, + + # Output / decision processing: + PREDICTION_LAYER_NAME:str=PREDICTION_LAYER_NAME, + + )->Composition: + + # Apportionment of contributions of state (actual or em) vs. context (em) to context_layer integration: + + + assert 0 <= integration_rate <= 1,\ + f"context_retrieval_weight must be a number from 0 to 1" + + # ---------------------------------------------------------------------------------------------------------------- + # ------------------------------------------------- Nodes ------------------------------------------------------ + # ---------------------------------------------------------------------------------------------------------------- + + state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) + context_layer = RecurrentTransferMechanism(name=context_name, + size=state_size, + auto=1-integration_rate, + hetero=0.0) + em = EMComposition(name=em_name, + memory_template=[[0] * state_size, # state + [0] * state_size, # previous state + [0] * state_size], # context + memory_fill=(0,.01), + memory_capacity=NUM_SEQS, + softmax_gain=1.0, + # Input Nodes: + field_names=[state_input_name, + next_state_name, + context_name, + ], + field_weights=(state_retrieval_weight, + next_state_weight, + context_retrieval_weight + ) + ) + + prediction_layer = ProcessingMechanism(name=PREDICTION_LAYER_NAME) + + + # ---------------------------------------------------------------------------------------------------------------- + # ------------------------------------------------- EGO Composition -------------------------------------------- + # ---------------------------------------------------------------------------------------------------------------- + + + EGO_comp = Composition(name=model_name, + # # Terminate a Task.PREDICT trial after prediction_layer executes if a reward is retrieved + # termination_processing={ + # # TimeScale.TRIAL: And(Condition(lambda: task_input_layer.value == Task.PREDICT), + # # Condition(lambda: retrieved_reward_layer.value), + # # JustRan(prediction_layer))} + # # CRASHES: + # # TimeScale.TRIAL: Any(And(Condition(lambda: task_input_layer.value == Task.EXPERIENCE), + # # JustRan(em)), + # # And(Condition(lambda: task_input_layer.value == Task.PREDICT), + # # Condition(lambda: retrieved_reward_layer.value), + # # JustRan(prediction_layer)))} + # TimeScale.TRIAL: Any(And(Condition(lambda: task_input_layer.value == Task.EXPERIENCE), + # AllHaveRun()), + # And(Condition(lambda: task_input_layer.value == Task.PREDICT), + # Condition(lambda: retrieved_reward_layer.value), + # AllHaveRun()))} + ) + + # Nodes not included in (decision output) Pathway specified above + EGO_comp.add_nodes([state_input_layer, context_layer, em, prediction_layer]) + + # Projections: + QUERY = ' [QUERY]' + VALUE = ' [VALUE]' + RETRIEVED = ' [RETRIEVED]' + + # EM encoding -------------------------------------------------------------------------------- + # state -> em + EGO_comp.add_projection(MappingProjection(state_input_layer, + em.nodes[state_input_name + QUERY])) + # context -> em + EGO_comp.add_projection(MappingProjection(context_layer, + em.nodes[context_name + QUERY])) + + # Inputs to Context --------------------------------------------------------------------------- + # retrieved context -> context_layer + EGO_comp.add_projection(MappingProjection(state_input_layer, + context_layer, + # matrix=np.eye(STATE_SIZE) * state_weight + )) + + # Response pathway --------------------------------------------------------------------------- + # retrieved state -> prediction_layer + EGO_comp.add_projection(MappingProjection(em.nodes[next_state_name + RETRIEVED], + prediction_layer)) + + + # FIX: REMAINS TO BE FIXED: + # Validate construction + assert context_layer.input_port.path_afferents[0].sender.owner == context_layer # recurrent projection + assert context_layer.input_port.path_afferents[0].parameters.matrix.get()[0][0] == 1-integration_rate + # assert context_layer.input_port.path_afferents[1].sender.owner == em.nodes[CONTEXT_LAYER_NAME + RETRIEVED] # + assert context_layer.input_port.path_afferents[1].sender.owner == state_input_layer # + # memory of context + # assert context_layer.input_port.path_afferents[1].parameters.matrix.get()[0][0] == state_weight + + return EGO_comp +#endregion + + +#region SCRIPT EXECUTION +# ====================================================================================================================== +# SCRIPT EXECUTION +# ====================================================================================================================== + +if __name__ == '__main__': + model = None + + if CONSTRUCT_MODEL: + print(f'Constructing {MODEL_NAME}') + model = construct_model() + assert 'DEBUGGING BREAK POINT' + + if DISPLAY_MODEL is not None: + if model: + model.show_graph(**DISPLAY_MODEL) + else: + print("Model not yet constructed") + + if RUN_MODEL: + experience_inputs = build_experience_inputs(state_size=STATE_SIZE, + time_drift_rate=TIME_DRIFT_RATE, + num_baseline_seqs=NUM_BASELINE_SEQS, + num_revaluation_seqs=NUM_REVALUATION_SEQS, + reward_vals=REWARD_VALS, + CURRICULUM_TYE=CURRICULUM_TYE, + ratio=RATIO, + stim_seqs=STIM_SEQS) + input_layers = [TIME_INPUT_LAYER_NAME, + TASK_INPUT_LAYER_NAME, + STATE_INPUT_LAYER_NAME, + REWARD_INPUT_LAYER_NAME] + + # Experience Phase + print(f"Presenting {model.name} with {TOTAL_NUM_EXPERIENCE_STIMS} EXPERIENCE stimuli") + model.run(inputs={k: v for k, v in zip(input_layers, experience_inputs)}, + execution_mode=EXECUTION_MODE, + report_output=REPORT_OUTPUT, + report_progress=REPORT_PROGRESS) + + # Prediction Phase + prediction_inputs = build_prediction_inputs(state_size=STATE_SIZE, + time_drift_rate=TIME_DRIFT_RATE, + num_roll_outs_per_stim=int(NUM_ROLL_OUTS / 2), + stim_seqs=STIM_SEQS, + reward_vals=REWARD_VALS, + seq_type=PREDICT_SEQ_TYPE) + print(f"Running {model.name} for {NUM_ROLL_OUTS} PREDICT (ROLL OUT) trials") + model.termination_processing = { + TimeScale.TRIAL: And(Condition(lambda: model.nodes[TASK_INPUT_LAYER_NAME].value == Task.PREDICT), + Condition(lambda: model.nodes[RETRIEVED_REWARD_NAME].value), + # JustRan(model.nodes[PREDICTION_LAYER_NAME]) + AllHaveRun() + ) + } + model.run(inputs={k: v for k, v in zip(input_layers, prediction_inputs)}, + report_output=REPORT_OUTPUT, + report_progress=REPORT_PROGRESS + ) + + if PRINT_RESULTS: + print(f"Predicted reward for last stimulus: {model.results}") + #endregion \ No newline at end of file diff --git a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Integrator and Learning.py b/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Integrator and Learning.py new file mode 100644 index 00000000000..d9e8ac79432 --- /dev/null +++ b/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Integrator and Learning.py @@ -0,0 +1,406 @@ +# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy of the License at: +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and limitations under the License. + +# CONTROL FLOW: +# - EM EXECUTES FIRST: +# - RETRIEVES USING PREVIOUS STATE NODE AND CONTEXT (PRE-INTEGRATION) TO RETRIEVE PREDICTED CURRENT STATE +# - STORES VALUES OF PREVIOUS STATE, CURRENT STATE (INPUT) AND CONTEXT (PRE-INTEGRATION) INTO EM +# - THEN: +# - PREVIOUS_STATE EXECUTES TO GET CURRENT_STATE_INPUT (FOR RETRIEVAL ON NEXT TRIAL) +# - INTEGRATOR LAYER EXECUTES, INTEGRATING CURRENT_STATE_INPUT INTO MEMORY +# - CONTEXT LAYER EXECUTES TO GET LEARNED CONTEXT (FOR RETRIEVAL ON NEXT TRIAL) +# - PREDICTED CURRENT STATE IS COMPARED WITH ACTUAL CURRENT STATE (TARGET) TO UPDATE INTEGRATOR -> CONTEXT WEIGHTS + + +# TODO: + +# SCRIPT STUFF: +# - REPLACE INTEGRATOR RECURRENTTRANSFERMECHANISM WITH TRANSFERMECHANISM IN INTEGRATOR MODE +# OR TRY USING LCA with DECAY? +# - ADD LEARNING: +# - SET LEARNABILITY OF OUTER COMP PROJECTIONS +# - ADD PROJECTION OF CURRENT STATE TO TARGET (GOTTEN FROM LEARNING COMPONENTS) +# - DEBUG LEARNING +# PNL STUFF: +# - BUG: +# ? autodiffcomposition LINE 538: infinite while loop +# ? try taking out the integrator layer and see if it works +# ? try removing learnable attribute from projections to STORE node +# ? STORE node shows up multiple times in queue (but should be existing tests for convergence in nested) +# ? divergengence of STATE to PREVIOUS_STATE and STATE to EM projections confuses _get_backprop_pathway +# when traversing EM.input_CIM projections in depth part of search (since +# STATE->PREVIOUS_STATE->PREVIOUS_STATE [QUERY] is a valid path) even though the only one wanted for learning +# is the direct STATE->EM->STATE [VALUE] projection +# (see _get_backprop_pathway in AutodiffComposition, LINE 591 onward) +# - ADD COMMENT TO autodiffcomposition LINE 552 explaining what the subsquent block of code does +# - WRITE METHOD IN AUTODIFFCOMPOSITION to show_learning in show_graph() +# - DOCUMENT API FOR SPECIFYING PROJECTIONS TO NODES OF NESTED COMPOSITION +# (VIZ, *HAVE* TO EXPLICILTY SPECIFY PROJECTIONS TO NODES OF NESTED COMPOSITION AND ALSO INCLUDE THE NESTED COMP) + +""" +QUESTIONS: + +NOTES: + *MUST* run Experience before Predict, as the latter requires retrieved_reward to be non-zero + (from last trial of Experience) in order to know to encode the next state (see control policy) + +**Overview** +------------ + +This implements a model of... + +The model is an example of... + +The script contains methods to construct, train, and run the model, and analyze the results of its execution: + +* `construct_model `: + takes as arguments parameters used to construct the model; for convenience, defaults are defined below, + (under "Construction parameters") + +* `train_network `: + ... + +* `run_model `: + ... + +* `analyze_results `: + takes as arguments the results of executing the model, and optionally a number of trials and EGO_level to analyze; + returns... + + +**The Model** +------------- + +The model is comprised of... + +.. _EGO_Fig: + +.. figure:: _static/` `Composition`. + + +**Construction and Execution** +------------------------------ + +.. _EGO_settings: + +*Settings* +~~~~~~~~~~ + +The default parameters are ones that have been fit to empirical data concerning human performance +(taken from `Kane et al., 2007 `_). + +See "Settings for running the script" to specify whether the model is trained and/or executed when the script is run, +and whether a graphic display of the network is generated when it is constructed. + +.. _EGO_stimuli: + +*Stimuli* +~~~~~~~~~ + +Sequences of stimuli are constructed either using `SweetPea `_ +(using the script in stim/SweetPea) or replicate those used in... + + .. note:: + Use of SweetPea for stimulus generation requires it be installed:: + >> pip install sweetpea + + +.. _EGO_training: + +*Training* +~~~~~~~~~~ + +MORE HERE + +.. _EGO_execution: + +*Execution* +~~~~~~~~~~~ + +MORE HERE + +.. _EGO_methods_reference: + +**Methods Reference** +--------------------- + + +""" + +import numpy as np +import graph_scheduler as gs +from enum import IntEnum + +from psyneulink import * +from psyneulink._typing import Union, Literal + +# Settings for running script: + +MEMORY_CAPACITY = 5 +CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script +DISPLAY_MODEL = ( # Only one of the following can be uncommented: + None # suppress display of model + # {} # show simple visual display of model + # {'show_node_structure': True} # show detailed view of node structures and projections +) +RUN_MODEL = True # True => run the model +# RUN_MODEL = False # False => don't run the model +EXECUTION_MODE = ExecutionMode.Python +# EXECUTION_MODE = ExecutionMode.PyTorch +ANALYZE_RESULTS = False # True => output analysis of results of run +# REPORT_OUTPUT = ReportOutput.FULL # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] +REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] +REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run +PRINT_RESULTS = False # print model.results after execution +ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution + + +#region PARAMETERS +# ====================================================================================================================== +# PARAMETERS +# ====================================================================================================================== + +# PyTorch Version Parameters: +model_params = dict( + state_d = 11, # length of state vector + previous_state_d = 11, # length of state vector + integrator_d = 11, # length of integrator vector + context_d = 11, # length of context vector + integration_rate = .69, # rate at which state is integrated into new context + state_weight = .5, # weight of the state used during memory retrieval + context_weight = .5, # weight of the context used during memory retrieval + temperature = .01 # temperature of the softmax used during memory retrieval (smaller means more argmax-like +) + +# Fixed (structural) parameters: + +# Names: +MODEL_NAME = "EGO Model CSW" +STATE_INPUT_LAYER_NAME = "STATE" +PREVIOUS_STATE_LAYER_NAME = "PREVIOUS STATE" +INTEGRATOR_LAYER_NAME = 'INTEGRATOR' +CONTEXT_LAYER_NAME = 'CONTEXT' + +EM_NAME = "EM" +PREDICTION_LAYER_NAME = "PREDICTION" + +EMFieldsIndex = IntEnum('EMFields', + ['STATE', + 'CONTEXT', + 'PREVIOUS_STATE'], + start=0) + + +# CONSTRUCTION PARAMETERS + +# Layer sizes: +STATE_SIZE = model_params['state_d'] # length of state vector +INTEGRATOR_SIZE = model_params['integrator_d'] # length of state vector +CONTEXT_SIZE = model_params['context_d'] # length of state vector + +# Context processing: +INTEGRATION_RATE = model_params['integration_rate'] # rate at which state is integrated into integrator layer + +# EM retrieval +STATE_RETRIEVAL_WEIGHT = 0 +PREVIOUS_STATE_RETRIEVAL_WEIGHT = model_params['state_weight'] # weight of state field in retrieval from EM +CONTEXT_RETRIEVAL_WEIGHT = model_params['context_weight'] # weight of context field in retrieval from EM +RETRIEVAL_SOFTMAX_GAIN = 1/model_params['temperature'] # gain on softmax retrieval function + + +RANDOM_WEIGHTS_INITIALIZATION=RandomMatrix(center=0.0, range=0.1) # Matrix spec used to initialize all Projections + +#endregion + +#region ENVIRONMENT +# ====================================================================================================================== +# ENVIRONMENT +# ====================================================================================================================== + +# Task environment: +import Environment +CURRICULUM_TYPE = 'Blocked' # 'Blocked' or 'Interleaved' +INPUTS = Environment.generate_dataset(condition=CURRICULUM_TYPE).xs.numpy()[:5] +# INPUTS = [env_inputs[i][:10] for i in range(len(env_inputs))] + + +#endregion + +#region MODEL +# ====================================================================================================================== +# MODEL +# ====================================================================================================================== + +def construct_model(model_name:str=MODEL_NAME, + + # Input layer: + state_input_name:str=STATE_INPUT_LAYER_NAME, + state_size:int=STATE_SIZE, + + # Previous state + previous_state_input_name:str=PREVIOUS_STATE_LAYER_NAME, + + # Integrator: + integrator_name:str=INTEGRATOR_LAYER_NAME, + integrator_size:int=INTEGRATOR_SIZE, + integration_rate:Union[float,int]=INTEGRATION_RATE, + + # Context representation (learned): + context_name:str=CONTEXT_LAYER_NAME, + context_size:Union[float,int]=CONTEXT_SIZE, + + # EM: + em_name:str=EM_NAME, + retrieval_softmax_gain=RETRIEVAL_SOFTMAX_GAIN, + state_retrieval_weight:Union[float,int]=STATE_RETRIEVAL_WEIGHT, + previous_state_retrieval_weight:Union[float,int]=PREVIOUS_STATE_RETRIEVAL_WEIGHT, + context_retrieval_weight:Union[float,int]=CONTEXT_RETRIEVAL_WEIGHT, + + # Output / decision processing: + prediction_layer_name:str=PREDICTION_LAYER_NAME, + + )->Composition: + + assert 0 <= integration_rate <= 1,\ + f"integrator_retrieval_weight must be a number from 0 to 1" + + # ---------------------------------------------------------------------------------------------------------------- + # ------------------------------------------------- Nodes ------------------------------------------------------ + # ---------------------------------------------------------------------------------------------------------------- + + state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) + previous_state_layer = ProcessingMechanism(name=previous_state_input_name, size=state_size) + integrator_layer = RecurrentTransferMechanism(name=integrator_name, + function=Tanh, + size=integrator_size, + auto=1-integration_rate, + hetero=0.0) + # integrator_layer = TransferMechanism(name=integrator_name, + # function=Tanh, + # size=integrator_size + # ) + context_layer = ProcessingMechanism(name=context_name, size=context_size) + + em = EMComposition(name=em_name, + memory_template=[[0] * state_size, # state + [0] * state_size, # previous state + [0] * state_size], # context + # memory_fill=(0,.01), + memory_capacity=MEMORY_CAPACITY, + memory_decay_rate=0, + softmax_gain=1.0, + # Input Nodes: + field_names=[state_input_name, + previous_state_input_name, + context_name, + ], + field_weights=(state_retrieval_weight, + previous_state_retrieval_weight, + context_retrieval_weight + ), + # enable_learning=True, + learn_field_weights=False + ) + + prediction_layer = ProcessingMechanism(name=prediction_layer_name, size=state_size) + + + # ---------------------------------------------------------------------------------------------------------------- + # ------------------------------------------------- EGO Composition -------------------------------------------- + # ---------------------------------------------------------------------------------------------------------------- + + QUERY = ' [QUERY]' + VALUE = ' [VALUE]' + RETRIEVED = ' [RETRIEVED]' + + # Pathways + state_to_previous_state_pathway = [state_input_layer, previous_state_layer] + state_to_integrator_pathway = [state_input_layer, + np.eye(STATE_SIZE) * integration_rate, + integrator_layer] + state_to_em_pathway = [state_input_layer, + MappingProjection(state_input_layer, em.nodes[state_input_name+VALUE]), + em] + previous_state_to_em_pathway = [previous_state_layer, + MappingProjection(previous_state_layer, em.nodes[previous_state_input_name+QUERY]), + em] + context_learning_pathway = [integrator_layer, + context_layer, + MappingProjection(context_layer, em.nodes[context_name + QUERY]), + em, + MappingProjection(em.nodes[state_input_name + RETRIEVED], prediction_layer), + prediction_layer] + + # Composition + EGO_comp = AutodiffComposition([state_to_previous_state_pathway, + state_to_integrator_pathway, + state_to_em_pathway, + previous_state_to_em_pathway, + context_learning_pathway], + name=model_name) + + # EGO_comp.show_graph(show_learning=True) + + # Ensure EM is executed (to encode previous state and context, and predict current state) + # before updating state and context + EGO_comp.scheduler.add_condition(em, BeforeNodes(previous_state_layer, integrator_layer)) + + # Validate construction + assert integrator_layer.input_port.path_afferents[0].sender.owner == integrator_layer # recurrent projection + assert integrator_layer.input_port.path_afferents[0].parameters.matrix.get()[0][0] == 1-integration_rate + assert integrator_layer.input_port.path_afferents[1].sender.owner == state_input_layer # + + return EGO_comp +#endregion + + +#region SCRIPT EXECUTION +# ====================================================================================================================== +# SCRIPT EXECUTION +# ====================================================================================================================== + +if __name__ == '__main__': + model = None + + if CONSTRUCT_MODEL: + print(f'Constructing {MODEL_NAME}') + model = construct_model() + assert 'DEBUGGING BREAK POINT' + # print(model.scheduler.consideration_queue) + # gs.output_graph_image(model.scheduler.graph, 'EGO_comp-scheduler.png') + + if DISPLAY_MODEL is not None: + if model: + model.show_graph(**DISPLAY_MODEL) + else: + print("Model not yet constructed") + + if RUN_MODEL: + # print("MODEL NOT YET FULLY EXECUTABLE") + print(f'Running {MODEL_NAME}') + # model.run(inputs={STATE_INPUT_LAYER_NAME:INPUTS}, + # # report_output=REPORT_OUTPUT, + # # report_progress=REPORT_PROGRESS + # ) + model.learn(inputs={STATE_INPUT_LAYER_NAME:INPUTS}, + # report_output=REPORT_OUTPUT, + # report_progress=REPORT_PROGRESS + ) + print(model.nodes['EM'].parameters.memory.get(context=MODEL_NAME)) + + if PRINT_RESULTS: + print("MODEL NOT YET FULLY EXECUTABLE SO NO RESULTS") + #endregion diff --git a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Learning.py b/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Learning.py new file mode 100644 index 00000000000..b62564dd1ea --- /dev/null +++ b/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Learning.py @@ -0,0 +1,506 @@ +# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy of the License at: +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and limitations under the License. + +# CONTROL FLOW: +# - EM EXECUTES FIRST: +# - RETRIEVES USING PREVIOUS STATE NODE AND CONTEXT (PRE-INTEGRATION) TO RETRIEVE PREDICTED CURRENT STATE +# - STORES VALUES OF PREVIOUS STATE, CURRENT STATE (INPUT) AND CONTEXT (PRE-INTEGRATION) INTO EM +# - THEN: +# - PREVIOUS_STATE EXECUTES TO GET CURRENT_STATE_INPUT (FOR RETRIEVAL ON NEXT TRIAL) +# - INTEGRATOR LAYER EXECUTES, INTEGRATING CURRENT_STATE_INPUT INTO MEMORY +# - CONTEXT LAYER EXECUTES TO GET LEARNED CONTEXT (FOR RETRIEVAL ON NEXT TRIAL) +# - PREDICTED CURRENT STATE IS COMPARED WITH ACTUAL CURRENT STATE (TARGET) TO UPDATE INTEGRATOR -> CONTEXT WEIGHTS + +# ISSUES: +# * Using TransferMechanism (to avoid recurrent in PyTorch): +# -> input is always just linearly integrated, and the integral is tanh'd +# (not sure tanh is even necessary, since integral is always between 0 and 1) +# -> how is recurrence implemented in PyTorch? +# * ??Possible bug: for nodes in nested composition (such as EMComposition): calling of execute_node on the +# nested Composition rather than the outer one to which they now belong in +# PytorchCompositionWrapper + +# TODO: +# +# SCRIPT STUFF: +# √ REPLACE INTEGRATOR RECURRENTTRANSFERMECHANISM WITH TRANSFERMECHANISM IN INTEGRATOR MODE +# OR TRY USING LCA with DECAY? +# - CHECK THAT VERSION WITH TRANSFERMECHANISM FOR CONTEXT PRODUCES CORRECT EM ENTRIES PER PREVOUS BENCHMARKING +# - DEBUG LEARNING +# + +""" +QUESTIONS: + +NOTES: + *MUST* run Experience before Predict, as the latter requires retrieved_reward to be non-zero + (from last trial of Experience) in order to know to encode the next state (see control policy) + +**Overview** +------------ + +This implements a model of... + +The model is an example of... + +The script contains methods to construct, train, and run the model, and analyze the results of its execution: + +* `construct_model `: + takes as arguments parameters used to construct the model; for convenience, defaults are defined below, + (under "Construction parameters") + +* `train_network `: + ... + +* `run_model `: + ... + +* `analyze_results `: + takes as arguments the results of executing the model, and optionally a number of trials and EGO_level to analyze; + returns... + + +**The Model** +------------- + +The model is comprised of... + +.. _EGO_Fig: + +.. figure:: _static/` `Composition`. + + +**Construction and Execution** +------------------------------ + +.. _EGO_settings: + +*Settings* +~~~~~~~~~~ + +The default parameters are ones that have been fit to empirical data concerning human performance +(taken from `Kane et al., 2007 `_). + +See "Settings for running the script" to specify whether the model is trained and/or executed when the script is run, +and whether a graphic display of the network is generated when it is constructed. + +.. _EGO_stimuli: + +*Stimuli* +~~~~~~~~~ + +Sequences of stimuli are constructed either using `SweetPea `_ +(using the script in stim/SweetPea) or replicate those used in... + + .. note:: + Use of SweetPea for stimulus generation requires it be installed:: + >> pip install sweetpea + + +.. _EGO_training: + +*Training* +~~~~~~~~~~ + +MORE HERE + +.. _EGO_execution: + +*Execution* +~~~~~~~~~~~ + +MORE HERE + +.. _EGO_methods_reference: + +**Methods Reference** +--------------------- + + +""" +import matplotlib.pyplot as plt +import numpy as np +import graph_scheduler as gs +from enum import IntEnum + +import torch +torch.manual_seed(0) + +from psyneulink import * +from psyneulink._typing import Union, Literal + +#region SCRIPT SETTINGS +# ====================================================================================================================== +# SCRIPT SETTINGS +# ====================================================================================================================== +# Settings for running script: + +CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script +DISPLAY_MODEL = ( # Only one of the following can be uncommented: + None # suppress display of model + # { # show simple visual display of model + # 'show_pytorch': True, # show pytorch graph of model + # 'show_learning': True + # # 'show_projections_not_in_composition': True, + # # 'exclude_from_gradient_calc_style': 'dashed'# show target mechanisms for learning + # # {'show_node_structure': True # show detailed view of node structures and projections + # } +) +RUN_MODEL = True, # True => run the model +# RUN_MODEL = False # False => don't run the model +# EXECUTION_MODE = ExecutionMode.Python +EXECUTION_MODE = ExecutionMode.PyTorch +# REPORT_OUTPUT = ReportOutput.FULL # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] +REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] +REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run +PRINT_RESULTS = True # print model.results to console after execution +SAVE_RESULTS = False # save model.results to disk +# PLOT_RESULTS = True # plot results (PREDICTIONS) vs. TARGETS +PLOT_RESULTS = False # plot results (PREDICTIONS) vs. TARGETS +ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution +#endregion + +#region ENVIRONMENT +# ====================================================================================================================== +# ENVIRONMENT +# ====================================================================================================================== + +# Task environment: +import Environment + +# CURRICULUM_TYPE = 'Blocked' # 'Blocked' or 'Interleaved' +CURRICULUM_TYPE = 'Interleaved' # 'Blocked' or 'Interleaved' + +NUM_STIMS = 7 # Integer or ALL +dataset = Environment.generate_dataset(condition=CURRICULUM_TYPE) +if NUM_STIMS is ALL: + INPUTS = dataset.xs.numpy() + TARGETS = dataset.ys.numpy() +else: + INPUTS = dataset.xs.numpy()[:NUM_STIMS] + TARGETS = dataset.ys.numpy()[:NUM_STIMS] +TOTAL_NUM_STIMS = len(INPUTS) + +#endregion + +#region PARAMETERS +# ====================================================================================================================== +# MODEL PARAMETERS +# ====================================================================================================================== + +model_params = dict( + + # Names: + name = "EGO Model CSW", + state_input_layer_name = "STATE", + previous_state_layer_name = "PREVIOUS STATE", + context_layer_name = 'CONTEXT', + em_name = "EM", + prediction_layer_name = "PREDICTION", + + # Structral + state_d = 11, # length of state vector + previous_state_d = 11, # length of state vector + context_d = 11, # length of context vector + memory_capacity = TOTAL_NUM_STIMS, # number of entries in EM memory + memory_init = (0,.001), # Initialize memory with random values in interval + # memory_init = None, # Initialize with zeros + concatenate_keys = False, + + # Processing + integration_rate = .69, # rate at which state is integrated into new context + state_weight = 1, # weight of the state used during memory retrieval + context_weight = 1, # weight of the context used during memory retrieval + normalize_field_weights = True, # whether to normalize the field weights during memory retrieval + # softmax_temperature = None, # temperature of the softmax used during memory retrieval (smaller means more argmax-like + softmax_temperature = .1, # temperature of the softmax used during memory retrieval (smaller means more argmax-like + # softmax_temperature = ADAPTIVE, # temperature of the softmax used during memory retrieval (smaller means more argmax-like + # softmax_temperature = CONTROL, # temperature of the softmax used during memory retrieval (smaller means more argmax-like + # softmax_threshold = None, # threshold used to mask out small values in softmax + softmax_threshold = .001, # threshold used to mask out small values in softmax + enable_learning=[True, False, False], # Enable learning for PREDICTION (STATE) but not CONTEXT or PREVIOUS STATE + learn_field_weights = False, + loss_spec = Loss.BINARY_CROSS_ENTROPY, + # loss_spec = Loss.MSE, + learning_rate = .5, + device = CPU, + # device = MPS, +) + +# EM structdural params: +EMFieldsIndex = IntEnum('EMFields', + ['STATE', + 'CONTEXT', + 'PREVIOUS_STATE'], + start=0) +STATE_RETRIEVAL_WEIGHT = 0 +RANDOM_WEIGHTS_INITIALIZATION=RandomMatrix(center=0.0, range=0.1) # Matrix spec used to initialize all Projections + +if is_numeric_scalar(model_params['softmax_temperature']): # translate to gain of softmax retrieval function + RETRIEVAL_SOFTMAX_GAIN = 1/model_params['softmax_temperature'] +else: # pass along ADAPTIVE or CONTROL spec + RETRIEVAL_SOFTMAX_GAIN = model_params['softmax_temperature'] +#endregion + +#region MODEL +# ====================================================================================================================== +# MODEL +# ====================================================================================================================== + +def construct_model(model_name:str=model_params['name'], + + # Input layer: + state_input_name:str=model_params['state_input_layer_name'], + state_size:int=model_params['state_d'], + + # Previous state + previous_state_input_name:str=model_params['previous_state_layer_name'], + + # Context representation (learned): + context_name:str=model_params['context_layer_name'], + context_size:Union[float,int]=model_params['context_d'], + integration_rate:float=model_params['integration_rate'], + + # EM: + em_name:str=model_params['em_name'], + retrieval_softmax_gain=RETRIEVAL_SOFTMAX_GAIN, + retrieval_softmax_threshold=model_params['softmax_threshold'], + state_retrieval_weight:Union[float,int]=STATE_RETRIEVAL_WEIGHT, + previous_state_retrieval_weight:Union[float,int]=model_params['state_weight'], + context_retrieval_weight:Union[float,int]=model_params['context_weight'], + normalize_field_weights = model_params['normalize_field_weights'], + concatenate_keys = model_params['concatenate_keys'], + learn_field_weights = model_params['learn_field_weights'], + memory_capacity = model_params['memory_capacity'], + memory_init=model_params['memory_init'], + + # Output: + prediction_layer_name:str=model_params['prediction_layer_name'], + + # Learning + loss_spec=model_params['loss_spec'], + enable_learning=model_params['enable_learning'], + learning_rate = model_params['learning_rate'], + device=model_params['device'] + + )->Composition: + + assert 0 <= integration_rate <= 1,\ + f"integrator_retrieval_weight must be a number from 0 to 1" + + # ---------------------------------------------------------------------------------------------------------------- + # ------------------------------------------------- Nodes ------------------------------------------------------ + # ---------------------------------------------------------------------------------------------------------------- + + state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) + previous_state_layer = ProcessingMechanism(name=previous_state_input_name, size=state_size) + # context_layer = ProcessingMechanism(name=context_name, size=context_size) + context_layer = TransferMechanism(name=context_name, + size=context_size, + function=Tanh, + integrator_mode=True, + integration_rate=integration_rate) + + em = EMComposition(name=em_name, + memory_template=[[0] * state_size, # state + [0] * state_size, # previous state + [0] * state_size], # context + memory_fill=memory_init, + memory_capacity=memory_capacity, + memory_decay_rate=0, + softmax_gain=retrieval_softmax_gain, + softmax_threshold=retrieval_softmax_threshold, + # Input Nodes: + field_names=[state_input_name, + previous_state_input_name, + context_name, + ], + field_weights=(state_retrieval_weight, + previous_state_retrieval_weight, + context_retrieval_weight + ), + normalize_field_weights=normalize_field_weights, + concatenate_keys=concatenate_keys, + learn_field_weights=learn_field_weights, + learning_rate=learning_rate, + enable_learning=enable_learning, + device=device + ) + + prediction_layer = ProcessingMechanism(name=prediction_layer_name, size=state_size) + + + # ---------------------------------------------------------------------------------------------------------------- + # ------------------------------------------------- EGO Composition -------------------------------------------- + # ---------------------------------------------------------------------------------------------------------------- + + QUERY = ' [QUERY]' + VALUE = ' [VALUE]' + RETRIEVED = ' [RETRIEVED]' + + # Pathways + state_to_previous_state_pathway = [state_input_layer, + MappingProjection(matrix=IDENTITY_MATRIX, + learnable=False), + previous_state_layer] + state_to_context_pathway = [state_input_layer, + MappingProjection(matrix=IDENTITY_MATRIX, + learnable=False), + context_layer] + state_to_em_pathway = [state_input_layer, + MappingProjection(sender=state_input_layer, + receiver=em.nodes[state_input_name+VALUE], + matrix=IDENTITY_MATRIX, + learnable=False), + em] + previous_state_to_em_pathway = [previous_state_layer, + MappingProjection(sender=previous_state_layer, + receiver=em.nodes[previous_state_input_name+QUERY], + matrix=IDENTITY_MATRIX, + learnable=False), + em] + context_learning_pathway = [context_layer, + MappingProjection(sender=context_layer, + matrix=IDENTITY_MATRIX, + receiver=em.nodes[context_name + QUERY], + learnable=True), + em, + MappingProjection(sender=em.nodes[state_input_name + RETRIEVED], + receiver=prediction_layer, + matrix=IDENTITY_MATRIX, + learnable=False), + prediction_layer] + + # Composition + EGO_comp = AutodiffComposition([state_to_previous_state_pathway, + state_to_context_pathway, + state_to_em_pathway, + previous_state_to_em_pathway, + context_learning_pathway], + learning_rate=learning_rate, + loss_spec=loss_spec, + name=model_name, + device=device) + + learning_components = EGO_comp.infer_backpropagation_learning_pathways(ExecutionMode.PyTorch) + EGO_comp.add_projection(MappingProjection(sender=state_input_layer, + receiver=learning_components[0], + learnable=False)) + + # Ensure EM is executed (to encode previous state and context, and predict current state) + # before updating state and context + EGO_comp.scheduler.add_condition(em, BeforeNodes(previous_state_layer, context_layer)) + + # # Validate construction + # print(EGO_comp.scheduler.consideration_queue) + # import graph_scheduler + # graph_scheduler.output_graph_image(EGO_comp.scheduler.graph, 'EGO_comp-scheduler.png') + + return EGO_comp +#endregion + +#region SCRIPT EXECUTION +# ====================================================================================================================== +# SCRIPT EXECUTION +# ====================================================================================================================== + +if __name__ == '__main__': + model = None + + if CONSTRUCT_MODEL: + print(f'Constructing {model_params["name"]}') + model = construct_model() + assert 'DEBUGGING BREAK POINT' + # print(model.scheduler.consideration_queue) + # gs.output_graph_image(model.scheduler.graph, 'EGO_comp-scheduler.png') + + if DISPLAY_MODEL is not None: + if model: + model.show_graph(**DISPLAY_MODEL) + else: + print("Model not yet constructed") + + if RUN_MODEL: + import timeit + def print_stuff(**kwargs): + print(f"\n**************\n BATCH: {kwargs['batch']}\n**************\n") + print(kwargs) + print('\nContext internal: \n', model.nodes['CONTEXT'].function.parameters.value.get(kwargs['context'])) + print('\nContext hidden: \n', model.nodes['CONTEXT'].parameters.value.get(kwargs['context'])) + print('\nContext for EM: \n', + model.nodes['EM'].nodes['CONTEXT [QUERY]'].parameters.value.get(kwargs['context'])) + print('\nPrediction: \n', + model.nodes['PREDICTION'].parameters.value.get(kwargs['context'])) + # print('\nLoss: \n', + # model.parameters.tracked_loss._get(kwargs['context'])) + print('\nProjections from context to EM: \n', model.projections[7].parameters.matrix.get(kwargs['context'])) + print('\nEM Memory: \n', model.nodes['EM'].parameters.memory.get(model.name)) + + # print("MODEL NOT YET FULLY EXECUTABLE") + print(f"Running {model_params['name']}") + context = model_params['name'] + start_time = timeit.default_timer() + model.learn(inputs={model_params['state_input_layer_name']:INPUTS}, + # report_output=REPORT_OUTPUT, + # report_progress=REPORT_PROGRESS + # call_after_minibatch=print('Projections from context to EM: ', + # model.projections[7].parameters.matrix.get(context)), + # # model.projections[7].matrix) + # call_after_minibatch=print_stuff, + optimizations_per_minibatch=1, + learning_rate=model_params['learning_rate'], + execution_mode=ExecutionMode.PyTorch, + # minibatch_size=3, + ) + stop_time = timeit.default_timer() + print(f"Elapsed time: {stop_time - start_time}") + if DISPLAY_MODEL is not None: + model.show_graph(**DISPLAY_MODEL) + if PRINT_RESULTS: + print("MEMORY:") + print(model.nodes['EM'].parameters.memory.get(model.name)) + model.run(inputs={model_params["state_input_layer_name"]:INPUTS[4]}, + # report_output=REPORT_OUTPUT, + # report_progress=REPORT_PROGRESS + ) + print("CONTEXT INPUT:") + print(model.nodes['CONTEXT'].parameters.variable.get(model.name)) + print("CONTEXT OUTPUT:") + print(model.nodes['CONTEXT'].parameters.value.get(model.name)) + print("PREDICTION OUTPUT:") + print(model.nodes['PREDICTION'].parameters.value.get(model.name)) + print("CONTEXT WEIGHTS:") + print(model.projections[7].parameters.matrix.get(model.name)) + plt.imshow(model.projections[7].parameters.matrix.get(model.name)) + def test_weights(weight_mat): + # checks whether only 5 weights are updated. + weight_mat -= np.eye(11) + col_sum = weight_mat.sum(1) + row_sum = weight_mat.sum(0) + return np.max([(row_sum != 0).sum(), (col_sum != 0).sum()]) + print(test_weights(model.projections[7].parameters.matrix.get(model.name))) + + if SAVE_RESULTS: + np.save('EGO PREDICTIONS', model.results) + np.save('EGO INPUTS', INPUTS) + np.save('EGO TARGETS', TARGETS) + + if PLOT_RESULTS: + plt.plot(1 - np.abs(model.results[2:TOTAL_NUM_STIMS,2]-TARGETS[:TOTAL_NUM_STIMS-2])) + plt.show() + plt.savefig('EGO PLOT.png') + + #endregion diff --git a/Scripts/Models (Under Development)/EGO/Environment.py b/Scripts/Models (Under Development)/EGO/Environment.py new file mode 100644 index 00000000000..0ce08fafaaf --- /dev/null +++ b/Scripts/Models (Under Development)/EGO/Environment.py @@ -0,0 +1,55 @@ +import numpy as np +import torch +from torch.utils.data import dataset +from torch import utils +from numpy.random import randint + +def one_hot_encode(labels, num_classes): + """ + One hot encode labels and convert to tensor. + """ + return torch.tensor((np.arange(num_classes) == labels[..., None]).astype(float),dtype=torch.float32) + +class DeterministicCSWDataset(dataset.Dataset): + def __init__(self, n_samples_per_context, contexts_to_load) -> None: + super().__init__() + raw_xs = np.array([ + [[9,1,3,5,7],[9,2,4,6,8]], + [[10,1,4,5,8],[10,2,3,6,7]] + ]) + + item_indices = np.random.choice(raw_xs.shape[1],sum(n_samples_per_context),replace=True) + task_names = [0,1] # Flexible so these can be renamed later + task_indices = [task_names.index(name) for name in contexts_to_load] + + context_indices = np.repeat(np.array(task_indices),n_samples_per_context) + self.xs = one_hot_encode(raw_xs[context_indices,item_indices],11) + + self.xs = self.xs.reshape((-1,11)) + self.ys = torch.cat([self.xs[1:],one_hot_encode(np.array([0]),11)],dim=0) + context_indices = np.repeat(np.array(task_indices),[x*5 for x in n_samples_per_context]) + self.contexts = one_hot_encode(context_indices, len(task_names)) + + # Remove the last transition since there's no next state available + self.xs = self.xs[:-1] + self.ys = self.ys[:-1] + self.contexts = self.contexts[:-1] + + def __len__(self): + return len(self.xs) + + def __getitem__(self, idx): + return self.xs[idx], self.contexts[idx], self.ys[idx] + +def generate_dataset(condition='Blocked'): + # Generate the dataset for either the blocked or interleaved condition + if condition=='Blocked': + contexts_to_load = [0,1,0,1] + [randint(0,1) for _ in range(40)] + n_samples_per_context = [40,40,40,40] + [1]*40 + elif condition == 'Interleaved': + contexts_to_load = [0,1]*80 + [randint(0,1) for _ in range(40)] + n_samples_per_context = [1]*160 + [1]*40 + else: + raise ValueError(f'Unknown dataset condition: {condition}') + + return DeterministicCSWDataset(n_samples_per_context, contexts_to_load) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 60297b9a78c..305226c7712 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1489,7 +1489,11 @@ def _get_compilation_params(self): "learning_results", "learning_signal", "learning_signals", "error_matrix", "error_signal", "activation_input", "activation_output", "error_sources", "covariates_sources", - "target", "sample", "learning_function" + "target", "sample", "learning_function", + "device", + # should be added to relevant _gen_llvm_function... when aug: + # SoftMax: + 'mask_threshold', 'adapt_scale', 'adapt_base', 'adapt_entropy_weighting' } # Mechanism's need few extra entries: # * matrix -- is never used directly, and is flatened below diff --git a/psyneulink/core/components/functions/function.py b/psyneulink/core/components/functions/function.py index 82a58529ec3..d9be6640493 100644 --- a/psyneulink/core/components/functions/function.py +++ b/psyneulink/core/components/functions/function.py @@ -551,7 +551,10 @@ class Function_Base(Function): for details). changes_shape : bool : False - specifies whether the return value of the function is different than the shape of its `variable . Used to determine whether the shape of the inputs to the `Component` to which the function is assigned should be based on the `variable ` of the function or its `value `. + specifies whether the return value of the function is different than the shape of either is outermost dimension + (axis 0) of its its `variable `, or any of the items in the next dimension (axis 1). + Used to determine whether the shape of the inputs to the `Component` to which the function is assigned + should be based on the `variable ` of the function or its `value `. COMMENT owner : Component @@ -986,10 +989,11 @@ def _get_pytorch_fct_param_value(self, param_name, device, context): elif np.isscalar(np.array(val)): return float(val) try: - return torch.tensor(val, device=device).double() - except Exception: - assert False, (f"PROGRAM ERROR: unsupported value of parameter '{param_name}' ({val}) " - f"encountered in pytorch_function_creator().") + # return torch.tensor(val, device=device).double() + return torch.tensor(val, device=device) + except Exception as error: + raise FunctionError(f"PROGRAM ERROR: unsupported value of parameter '{param_name}' ({val}) " + f"encountered in pytorch_function_creator(): {error.args[0]}") # ***************************************** EXAMPLE FUNCTION ******************************************************* diff --git a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py index 8bd0cb79f34..e4011db2eab 100644 --- a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py @@ -321,6 +321,13 @@ def derivative(self, input=None, output=None, covariates=None, context=None): return self._get_current_parameter_value(SCALE, context) + def _gen_pytorch_fct(self, device, context=None): + scale = self._get_pytorch_fct_param_value('scale', device, context) + offset = self._get_pytorch_fct_param_value('offset', device, context) + # return lambda x: torch.concatenate(tuple(x)) * scale + offset + return lambda x: torch.hstack(tuple(x)) * scale + offset + + class Rearrange(CombinationFunction): # ------------------------------------------------------------------------ """ Rearrange( \ diff --git a/psyneulink/core/components/functions/nonstateful/learningfunctions.py b/psyneulink/core/components/functions/nonstateful/learningfunctions.py index 1fa189a9897..96b4b3c3085 100644 --- a/psyneulink/core/components/functions/nonstateful/learningfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/learningfunctions.py @@ -508,7 +508,7 @@ def func(entry_to_store, """Decay existing memories and replace weakest entry with entry_to_store (parallel EMStorage._function)""" if random_state.uniform(0, 1) < storage_prob: if decay_rate: - memory_matrix *= decay_rate + memory_matrix *= torch.tensor(decay_rate) if storage_location is not None: idx_of_min = storage_location else: diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index 89267a0f611..66ef69aadfc 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -89,14 +89,15 @@ from psyneulink.core.components.functions.stateful.integratorfunctions import SimpleIntegrator from psyneulink.core.components.shellclasses import Projection from psyneulink.core.globals.context import ContextFlags, handle_external_context +from psyneulink.core.globals.utilities import is_numeric_scalar from psyneulink.core.globals.keywords import \ - ADDITIVE_PARAM, ALL, ANGLE_FUNCTION, BIAS, BINOMIAL_DISTORT_FUNCTION, DROPOUT_FUNCTION, EXPONENTIAL_FUNCTION, \ - GAIN, GAUSSIAN_DISTORT_FUNCTION, GAUSSIAN_FUNCTION, HAS_INITIALIZERS, HOLLOW_MATRIX, \ - IDENTITY_FUNCTION, IDENTITY_MATRIX, INTERCEPT, LEAK, LINEAR_FUNCTION, LINEAR_MATRIX_FUNCTION, LOGISTIC_FUNCTION, \ - TANH_FUNCTION, MATRIX_KEYWORD_NAMES, MATRIX, MAX_INDICATOR, MAX_VAL, MULTIPLICATIVE_PARAM, NORMALIZE, \ - OFF, OFFSET, ON, PER_ITEM, PROB, PRODUCT, OUTPUT_TYPE, PROB_INDICATOR, \ - RATE, RECEIVER, RELU_FUNCTION, SCALE, SLOPE, SOFTMAX_FUNCTION, STANDARD_DEVIATION, SUM, \ - TRANSFER_FUNCTION_TYPE, TRANSFER_WITH_COSTS_FUNCTION, VARIANCE, VARIABLE, X_0, PREFERENCE_SET_NAME + (ADAPTIVE, ADDITIVE_PARAM, ALL, ANGLE_FUNCTION, BIAS, BINOMIAL_DISTORT_FUNCTION, DROPOUT_FUNCTION, + EXPONENTIAL_FUNCTION, GAIN, GAUSSIAN_DISTORT_FUNCTION, GAUSSIAN_FUNCTION, HAS_INITIALIZERS, HOLLOW_MATRIX, + IDENTITY_FUNCTION, IDENTITY_MATRIX, INTERCEPT, LEAK, LINEAR_FUNCTION, LINEAR_MATRIX_FUNCTION, LOGISTIC_FUNCTION, + TANH_FUNCTION, MATRIX_KEYWORD_NAMES, MATRIX, MAX_INDICATOR, MAX_VAL, MULTIPLICATIVE_PARAM, NORMALIZE, + OFF, OFFSET, ON, OUTPUT_TYPE, PER_ITEM, PROB, PRODUCT, PROB_INDICATOR, + RATE, RECEIVER, RELU_FUNCTION, SCALE, SLOPE, SOFTMAX_FUNCTION, STANDARD_DEVIATION, SUM, + TRANSFER_FUNCTION_TYPE, TRANSFER_WITH_COSTS_FUNCTION, VARIANCE, VARIABLE, X_0, PREFERENCE_SET_NAME) from psyneulink.core.globals.parameters import \ FunctionParameter, Parameter, get_validator_by_function, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import \ @@ -1435,7 +1436,9 @@ def _gen_pytorch_fct(self, device, context=None): gain = self._get_pytorch_fct_param_value('gain', device, context) bias = self._get_pytorch_fct_param_value('bias', device, context) offset = self._get_pytorch_fct_param_value('offset', device, context) - return lambda x: 1 / (1 + torch.exp(-gain * (x + bias) + offset)) + # return lambda x: 1 / (1 + torch.exp(-gain * (x + bias) + offset)) + return lambda x: ((torch.exp(-gain * (x + bias) + offset) - torch.exp(-gain * (-x + bias) + offset)) + / (torch.exp(-gain * (x + bias) + offset) + torch.exp(-gain * (-x + bias) + offset))) # ********************************************************************************************************************** # ReLU @@ -2894,14 +2897,18 @@ def _gen_pytorch_fct(self, device, context=None): class SoftMax(TransferFunction): """ - SoftMax( \ - default_variable, \ - gain=1.0, \ - output=ALL, \ - params=None, \ - owner=None, \ - name=None, \ - prefs=None \ + SoftMax( \ + default_variable, \ + gain=1.0, \ + mask_threshold=None, \ + adapt_scale=1, \ + adapt_base=1, \ + adapt_entropy_weighting=.1 \ + output=ALL, \ + params=None, \ + owner=None, \ + name=None, \ + prefs=None \ ) .. _SoftMax: @@ -2914,9 +2921,42 @@ class SoftMax(TransferFunction): \\frac{e^{gain * variable_i}}{\\sum\\limits^{len(variable)}e^{gain * variable}} - filtered by `ouptput ` specification (see `The Softmax function and its derivative + filtered by `output ` specification (see `The Softmax function and its derivative `_ for a nice discussion). + .. note:: + If `variable ` is all zeros, the SoftMax transform returns all zeros. + + .. _SoftMax_AdaptGain: + + *Thresholding and Adaptive Gain* + + For cases in which SoftMax is used with vector that sparse (e.g., one-hots), the value(s) of the (most( significant + entries (e.g., the one's in a one-hot) can be sensitive to (diminished by) the number of other values in the vector + (i.e., its length). For example, whereas for ``[1 0]`` the SoftMax is ``[0.73105858 0.26894142]``, for ``[1 0 0 0]`` + it is ``[0.47536689 0.1748777 0.1748777 0.1748777]``. This can be addressed in one of two ways: either by + thresholding `variable ` before applying the SoftMax function, or by adapting the `gain + ` parametrically based on the `variable `: + + - *mask_threshold* -- setting the **mask_threshold** argument to a scalar value causes the `variable + ` to be thresholded by that value before applying the SoftMax function; any elements of + `variable ` with an absolute value below the threshold are set to 0; all others are scaled + by the specified `gain ` and then passed through the SoftMax function. This only applies if the + **gain** argument is specified as a scalar; if it is specified as *ADAPTIVE*, then the **mask_threshold** + argument is ignored. + + - *ADAPTIVE* -- setting **gain** argument to *ADAPTIVE* causes it to be dynamically adjusted, + based on the entropy and length of the variable, to keep the mass of the distribution around the highest values + as consistent as possible over different sized vectors. If *ADAPTIVE* is specified, then the `mask_threshold + ` argument is ignored. The gain is adapted by calling the SoftMax function's `adapt_gain + ` method. This can be finicky, and may need to be further tuned to the length of `variable + `, which can be done using the SoftMax Function's **adapt_scale**, **adapt_base**, and + **adapt_entropy_weighting** arguments. + + .. _SoftMax_Derivative: + + *Derivatve* + `derivative ` returns the derivative of the SoftMax. If *OUTPUT_TYPE* for the SoftMax is *ALL*, returns Jacobian matrix (derivative for each element of the output array with respect to each of the others): @@ -2934,8 +2974,26 @@ class SoftMax(TransferFunction): default_variable : 1d array : default class_defaults.variable specifies a template for the value to be transformed. - gain : float : default 1.0 - specifies a value by which to multiply `variable ` before SoftMax transformation. + gain : scalar or ADAPTIVE : default 1.0 + specifies the value by which to multiply `variable ` before SoftMax transformation, + which functions as the inverse "temperature" of the function. If it is a scalar, it must be greater + than zero. If *ADAPTIVE* is specified, the value is determined dynamically based on the `variable + ` `SoftMax_AdaptGain` for details). + + mask_threshold : scalar : default None + specifies whether to mask_threshold the `variable ` before applying the SoftMax function; + this only applies if `gain ` is specified as a scalar; otherwise it is ignored + (see `SoftMax_AdaptGain` for details). + + adapt_scale : scalar : default 1 + specifies the *scale* parameter using by the `adapt_gain ` method (see method for details). + + adapt_base : scalar : default 1 + specifies the *base* parameter using by the `adapt_gain ` method (see method for details). + + adapt_entropy_weighting : default .1 + specifies the *entropy_weighting* parameter using by the `adapt_gain ` method + (see method for details). output : ALL, MAX_VAL, MAX_INDICATOR, or PROB : default ALL specifies the format of array returned by `function ` @@ -2965,9 +3023,28 @@ class SoftMax(TransferFunction): variable : 1d array contains value to be transformed. - gain : float - value by which `variable ` is multiplied before the SoftMax transformation; determines - the "sharpness" of the distribution. + gain : scalar or ADAPTIVE + determines how `variable ` is scaled before the SoftMax transformation, determining the + "sharpness" of the distribution (it is equivalent to the inverse of the temperature of the SoftMax function); + if it is 'ADAPTIVE', it is determined dynamically adjusted using the `adapt_gain ` method + (see `SoftMax_AdaptGain` for additional details). + + mask_threshold : scalar or None + determines whether the `variable ` is thresholded before applying the SoftMax function; + if it is a scalar, only elements of `variable ` with an absolute value greater than that + value are considered when applying the SoftMax function (which are then scaled by the `gain ` + parameter; all other elements are assigned 0. This only applies if `gain ` is specified as a + scalar; otherwise it is ignored (see `SoftMax_AdaptGain` for details). + + adapt_scale : scalar + determined the *scale* parameter using by the `adapt_gain ` method (see method for details). + + adapt_base : scalar + determines the *base* parameter using by the `adapt_gain ` method (see method for details). + + adapt_entropy_weighting : scalar + determines the *entropy_weighting* parameter using by the `adapt_gain ` method + (see method for details). output : ALL, MAX_VAL, MAX_INDICATOR, or PROB determines how the SoftMax-transformed values of the elements in `variable ` are reported @@ -3012,6 +3089,24 @@ class Parameters(TransferFunction.Parameters): :type: ``numpy.ndarray`` :read only: True + adapt_scale + see `adapt_scale ` + + :default value: 1.0 + :type: ``float`` + + adapt_base + see `adapt_base ` + + :default value: 1.0 + :type: ``float`` + + adapt_entropy_weighting + see `adapt_entropy_weighting ` + + :default value: 0.1 + :type: ``float`` + bounds see `bounds ` @@ -3035,14 +3130,63 @@ class Parameters(TransferFunction.Parameters): :default value: True :type: ``bool`` + + mask_threshold + see `mask_threshold ` + + :default value: None + :type: ``float`` """ variable = Parameter(np.array([[0.0]]), read_only=True, pnl_internal=True, constructor_argument='default_variable') gain = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) + mask_threshold = Parameter(None, modulable=True) + adapt_scale = Parameter(1.0, modulable=True) + adapt_base = Parameter(1.0, modulable=True) + adapt_entropy_weighting = Parameter(0.95, modulable=True) bounds = (0, 1) output = ALL per_item = Parameter(True, pnl_internal=True) one_hot_function = Parameter(None, stateful=False, loggable=False) + def _validate_gain(self, gain): + if is_numeric_scalar(gain): + if gain <= 0: + return 'must be a scalar greater than 0' + elif isinstance(gain, str): + if gain != ADAPTIVE: + return f'the keyword for adaptive gain is {ADAPTIVE}' + else: + return f'must be a scalar greater than 0 or the keyword {ADAPTIVE}' + + def _validate_mask_threshold(self, mask_threshold): + if mask_threshold is not None: + if is_numeric_scalar(mask_threshold): + if mask_threshold <= 0: + return 'must be a scalar greater than 0' + return None + return f'must be a scalar greater than 0' + + def _validate_adapt_scale(self, adapt_scale): + if is_numeric_scalar(adapt_scale): + if adapt_scale <= 0: + return 'must be a scalar greater than 0' + return None + return f'must be a scalar greater than 0' + + def _validate_adapt_base(self, adapt_base): + if is_numeric_scalar(adapt_base): + if adapt_base <= 0: + return 'must be a scalar greater than 0' + return None + return f'must be a scalar greater than 0' + + def _validate_adapt_entropy_weighting(self, adapt_entropy_weighting): + if is_numeric_scalar(adapt_entropy_weighting): + if adapt_entropy_weighting <= 0: + return 'must be a scalar greater than 0' + return None + return f'must be a scalar greater than 0' + def _validate_output(self, output): options = {ALL, MAX_VAL, MAX_INDICATOR, PROB} if output in options: @@ -3055,6 +3199,10 @@ def _validate_output(self, output): def __init__(self, default_variable=None, gain: Optional[ValidParamSpecType] = None, + mask_threshold: Optional[ValidParamSpecType] = None, + adapt_scale: Optional[ValidParamSpecType] = None, + adapt_base: Optional[ValidParamSpecType] = None, + adapt_entropy_weighting: Optional[ValidParamSpecType] = None, output=None, per_item=None, params: Optional[dict] = None, @@ -3076,6 +3224,10 @@ def __init__(self, super().__init__( default_variable=default_variable, gain=gain, + mask_threshold=mask_threshold, + adapt_scale=adapt_scale, + adapt_base=adapt_base, + adapt_entropy_weighting=adapt_entropy_weighting, per_item=per_item, output=output, one_hot_function=one_hot_function, @@ -3106,15 +3258,23 @@ def _validate_variable(self, variable, context=None): return np.asarray(variable) - def apply_softmax(self, input_value, gain, output_type): + def apply_softmax(self, input_value, gain, mask_threshold, output_type): + # Modulate input_value by gain v = gain * input_value # Shift by max to avoid extreme values: v = v - np.max(v) # Exponentiate v = np.exp(v) + # Threshold if specified: + if mask_threshold: + v = v * np.where(input_value > mask_threshold, v, 0) # Normalize (to sum to 1) - sm = v / np.sum(v, axis=0) + if not any(v): + # If v is all zeros, avoid divide by zero in normalize and return all zeros for softmax + sm = v + else: + sm = v / np.sum(v, axis=0) # Generate one-hot encoding based on selected output_type @@ -3152,19 +3312,41 @@ def _function(self, # Assign the params and return the result output_type = self._get_current_parameter_value(OUTPUT_TYPE, context) gain = self._get_current_parameter_value(GAIN, context) + mask_threshold = self._get_current_parameter_value('mask_threshold', context) + if isinstance(gain, str) and gain == ADAPTIVE: + gain = self.adapt_gain(variable, context) per_item = self._get_current_parameter_value(PER_ITEM, context) # Compute softmax and assign to sm if per_item and len(np.shape(variable)) > 1: output = [] for item in variable: - output.append(self.apply_softmax(item, gain, output_type)) + output.append(self.apply_softmax(item, gain, mask_threshold, output_type)) output = convert_all_elements_to_np_array(output) else: - output = self.apply_softmax(variable, gain, output_type) + output = self.apply_softmax(variable, gain, mask_threshold, output_type) return self.convert_output_type(output) + def adapt_gain(self, v, context)->float: + """Compute the softmax gain (inverse temperature) based on the entropy of the distribution of values. + Uses base, scale, and entropy_weighting parameters of SoftMax function to compute gain: + + .. math:: gain = scale * (base + (entropy\\_weighting * log(entropy(logistic(v))))) + """ + scale = self._get_current_parameter_value('adapt_scale', context) + base = self._get_current_parameter_value('adapt_base', context) + entropy_weighting = self._get_current_parameter_value('adapt_entropy_weighting', context) + entropy_weighting = np.log(len(v)) * entropy_weighting + + v = np.squeeze(v) + gain = scale * (base + + (entropy_weighting * + np.log( + -1 * np.sum((1 / (1 + np.exp(-1 * v))) * np.log(1 / (1 + np.exp(-1 * v))))))) + return gain + + @handle_external_context() def derivative(self, input=None, output=None, context=None): """ @@ -3390,7 +3572,35 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, def _gen_pytorch_fct(self, device, context=None): gain = self._get_pytorch_fct_param_value('gain', device, context) - return lambda x: (torch.softmax(gain * x, 0)) + mask_threshold = self._get_pytorch_fct_param_value('mask_threshold', device, context) + + if isinstance(gain, str) and gain == ADAPTIVE: + return lambda x: (torch.softmax(self._gen_pytorch_adapt_gain_fct(device, context)(x) * x, 0)) + + elif mask_threshold: + def pytorch_thresholded_softmax(_input: torch.Tensor) -> torch.Tensor: + # Mask elements of input below threshold + _mask = (torch.abs(_input) > mask_threshold) + # Subtract off the max value in the input to eliminate extreme values, exponentiate, and apply mask + masked_exp = _mask * torch.exp(gain * (_input - torch.max(_input, 0, keepdim=True)[0])) + if not any(masked_exp): + return masked_exp + return masked_exp / torch.sum(masked_exp, 0, keepdim=True) + # Return the function + return pytorch_thresholded_softmax + + else: + return lambda x: (torch.softmax(gain * x, 0)) + + def _gen_pytorch_adapt_gain_fct(self, device, context=None): + scale = self._get_pytorch_fct_param_value('adapt_scale', device, context) + base = self._get_pytorch_fct_param_value('adapt_base', device, context) + entropy_weighting = self._get_pytorch_fct_param_value('adapt_entropy_weighting', device, context) + # v = torch.squeeze(v) + return lambda x : scale * (base + + (entropy_weighting * len(x) * + torch.log(-1 * torch.sum((1 / (1 + torch.exp(-1 * x))) + * torch.log(1 / (1 + torch.exp(-1 * x))))))) # ********************************************************************************************************************** diff --git a/psyneulink/core/components/functions/stateful/integratorfunctions.py b/psyneulink/core/components/functions/stateful/integratorfunctions.py index 06e3fd9597d..306fdaab016 100644 --- a/psyneulink/core/components/functions/stateful/integratorfunctions.py +++ b/psyneulink/core/components/functions/stateful/integratorfunctions.py @@ -1220,11 +1220,7 @@ def _function(self, # execute noise if it is a function noise = self._try_execute_param(self._get_current_parameter_value(NOISE, context), variable, context=context) - # # MODIFIED 6/14/19 OLD: - # previous_value = np.atleast_2d(self.parameters.previous_value._get(context)) - # # MODIFIED 6/14/19 NEW: [JDC] previous_value = self.parameters.previous_value._get(context) - # MODIFIED 6/14/19 END try: value = self._EWMA_filter(previous_value, rate, variable) + noise @@ -1241,11 +1237,13 @@ def _function(self, if not self.is_initializing: self.parameters.previous_value._set(adjusted_value, context) - # # MODIFIED 6/21/19 OLD: - # return self.convert_output_type(adjusted_value) - # MODIFIED 6/21/19 NEW: [JDC] return self.convert_output_type(adjusted_value, variable) - # MODIFIED 6/21/19 END + + def _gen_pytorch_fct(self, device, context=None): + rate = self._get_pytorch_fct_param_value('rate', device, context) + offset = self._get_pytorch_fct_param_value('offset', device, context) + noise = self._get_pytorch_fct_param_value('noise', device, context) + return lambda prev_val, variable: self._EWMA_filter(prev_val, rate, variable) + noise + offset def as_expression(self): return f'(1 - rate) * previous_value + rate * {MODEL_SPEC_ID_MDF_VARIABLE} + noise + offset' diff --git a/psyneulink/core/components/projections/projection.py b/psyneulink/core/components/projections/projection.py index fa7211c2060..6418c6de96a 100644 --- a/psyneulink/core/components/projections/projection.py +++ b/psyneulink/core/components/projections/projection.py @@ -563,6 +563,9 @@ class Projection_Base(Projection): assignment as a *feedback* Projection, whereas False precludes it from being assigned as a feedback Projection; None (the default) allows the Composition to determine whether it is assigned as a feedback Projection. + exclude_in_autodiff : bool : default False + specifies whether Projection is included in `AutodiffComposition` gradient calculations. + Attributes ---------- @@ -588,6 +591,9 @@ class Projection_Base(Projection): ` are also accessible as (and can be modified using) attributes of the Projection, in the same manner as they can for a `Mechanism `). + exclude_in_autodiff : bool : default False + determines whether Projection is included in `AutodiffComposition` gradient calculations. + weight : number multiplies the `value ` of the Projection after applying the `exponent `, and before combining with any other Projections that project to the same `Port` @@ -717,7 +723,7 @@ def __init__(self, return self.receiver = receiver - self._exclude_from_autodiff = exclude_in_autodiff + self.exclude_in_autodiff = exclude_in_autodiff self._feedback = feedback # Assign to _feedback to avoid interference with vertex.feedback used in Composition # Register with ProjectionRegistry or create one diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 01d8fa5e37e..804ed9f6f45 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -2943,7 +2943,7 @@ def input_function(env, result): AFTER, ALL, ALLOW_PROBES, ANY, BEFORE, COMPONENT, COMPOSITION, CONTROL, CONTROL_SIGNAL, CONTROLLER, CROSS_ENTROPY, \ DEFAULT, DEFAULT_VARIABLE, DICT, FEEDBACK, FULL, FUNCTION, HARD_CLAMP, IDENTITY_MATRIX, \ INPUT, INPUT_PORTS, INPUTS, INPUT_CIM_NAME, \ - LEARNED_PROJECTIONS, LEARNING_FUNCTION, LEARNING_MECHANISM, LEARNING_MECHANISMS, LEARNING_PATHWAY, \ + LEARNABLE, LEARNED_PROJECTIONS, LEARNING_FUNCTION, LEARNING_MECHANISM, LEARNING_MECHANISMS, LEARNING_PATHWAY, \ LEARNING_SIGNAL, Loss, \ MATRIX, MAYBE, MODEL_SPEC_ID_METADATA, MONITOR, MONITOR_FOR_CONTROL, NAME, NESTED, NO_CLAMP, NODE, NODES, \ OBJECTIVE_MECHANISM, ONLINE, ONLY, OUTCOME, OUTPUT, OUTPUT_CIM_NAME, OUTPUT_MECHANISM, OUTPUT_PORTS, OWNER_VALUE, \ @@ -4116,12 +4116,17 @@ def __init__( # Call with context = COMPOSITION to avoid calling _check_initialization_status again self._analyze_graph(context=context) - show_graph_attributes = show_graph_attributes or {} - self._show_graph = ShowGraph(self, **show_graph_attributes) + # ShowGraph + self.assign_ShowGraph(show_graph_attributes) if termination_processing is not None: self.termination_processing = termination_processing + def assign_ShowGraph(self, show_graph_attributes): + """Helper function to allow override of the ShowGraph class in subclasses (e.g., AutodiffComposition)""" + show_graph_attributes = show_graph_attributes or {} + self._show_graph = ShowGraph(self, **show_graph_attributes) + @property def graph_processing(self): """ @@ -4751,15 +4756,17 @@ def get_nodes_by_role(self, role): raise CompositionError('Node missing from {0}.nodes_to_roles: {1}'.format(self, e)) def get_nested_nodes_by_roles_at_any_level(self, comp, include_roles, exclude_roles=None)->list or None: - """Return all Nodes from nested Compositions that have *include_roles* but not *exclude_roles at all levels*. - Returns Nodes that have or don't have the specified roles at *any* level of nesting, - irrespective of their status at other levels of nesting. + """Return all Nodes from comp or any nested within it that have *include_roles* but not *exclude_roles*. + Returns Nodes that have or don't have the specified roles in the Composition specified by **comp** + or any Composition nested within it, irrespective of their status at other levels of nesting. To get nodes that are either INPUT or OUTPUT Nodes at *all* levels of nesting, use either get_nested_input_nodes_at_all_levels() or get_nested_output_nodes_at_all_levels() Note: do this recursively, checking roles on the "way down," as a Node may have a role in a deeply nested Composition, but that Composition itself may not have the same role in the Composition within which *it* is nested (e.g., a Node might be an INPUT Node of a nested Composition, but that nested Composition may not be an INPUT Node of the Composition in which it is nested). + Note: exclude_roles takes precedence, so that if a NodeRole is listed in both, + nodes with that role will be *excluded*. """ nested_nodes = [] include_roles = [] if include_roles is None else convert_to_list(include_roles) @@ -4777,7 +4784,7 @@ def get_nested_nodes_by_roles_at_any_level(self, comp, include_roles, exclude_ro nested_nodes.append(node) return nested_nodes if any(nested_nodes) else None - def get_nested_nodes_input_nodes_at_levels(self)->list or None: + def get_nested_input_nodes_at_all_levels(self)->list or None: """Return all Nodes from nested Compositions that receive input directly from input to outermost Composition.""" input_nodes = self.get_nested_nodes_by_roles_at_any_level(self, include_roles=NodeRole.INPUT) return [input_node for input_node in input_nodes @@ -4786,7 +4793,7 @@ def get_nested_nodes_input_nodes_at_levels(self)->list or None: for input_port in input_node.input_ports for proj in input_port.path_afferents if isinstance(proj.sender.owner, CompositionInterfaceMechanism))] or None - def get_nested_nodes_output_nodes_at_levels(self)->list or None: + def get_nested_output_nodes_at_all_levels(self)->list or None: """Return all Nodes from nested Compositions that send output directly to outermost Composition.""" output_nodes = self.get_nested_nodes_by_roles_at_any_level(self, include_roles=NodeRole.OUTPUT) return [output_node for output_node in output_nodes @@ -6486,8 +6493,8 @@ def add_projection(self, proj_spec = {PROJECTION_TYPE:projection.className, PROJECTION_PARAMS:{ FUNCTION:projection.function, - MATRIX:projection.matrix.base} - } + MATRIX:projection.matrix.base, + LEARNABLE:projection.learnable}} return self.add_projection(proj_spec, sender=projection.sender, receiver=projection.receiver) # Create Projection if it doesn't exist @@ -6692,7 +6699,7 @@ def _instantiate_projection_from_spec(self, projection, sender=None, receiver=No if isinstance(projection, dict): proj_type = projection.pop(PROJECTION_TYPE, None) or MappingProjection params = projection.pop(PROJECTION_PARAMS, None) - projection = MappingProjection(params=params) + projection = MappingProjection(**params) elif isinstance(projection, (np.ndarray, np.matrix, list, RandomMatrix)): return MappingProjection(matrix=projection, sender=sender, receiver=receiver, name=name) elif isinstance(projection, str): @@ -8390,7 +8397,7 @@ def add_backpropagation_learning_pathway(self, default_projection_matrix=default_projection_matrix, name=name) - # NOTES: + # IMPLEMENTATION NOTE: # Learning-type-specific creation methods should: # - create ComparatorMechanism and pass in as error_source (for 1st LearningMechanism in sequence in bp) # - Determine and pass error_sources (aka previous_learning_mechanism) (for bp) @@ -10854,6 +10861,7 @@ def run( default_absolute_time_unit: typing.Optional[pint.Quantity] = None, context=None, base_context=Context(execution_id=None), + **kwargs ): """Pass inputs to Composition, then execute sets of nodes that are eligible to run until termination conditions are met. @@ -11339,11 +11347,13 @@ def run( # PROCESSING ------------------------------------------------------------------------ # Prepare stimuli from the outside world -- collect the inputs for this TRIAL and store them in a dict try: + # IMPLEMENTATION NOTE: for autdoiff, the following includes backward pass after forward pass execution_stimuli = self._parse_trial_inputs(inputs, trial_num, context) except StopIteration: break # execute processing, passing stimuli for this trial + # IMPLEMENTATION NOTE: for autdoiff, the following is the forward pass for the current trial trial_output = self.execute(inputs=execution_stimuli, scheduler=scheduler, termination_processing=termination_processing, @@ -11365,6 +11375,9 @@ def run( # --------------------------------------------------------------------------------- # store the result of this execution in case it will be the final result + + assert "AFFTER FOWARD PASS" + # object.results.append(result) trial_output = copy_parameter_value(trial_output) @@ -11452,8 +11465,10 @@ def learn( epochs: int = 1, learning_rate: Optional[Union[int,float]]=None, minibatch_size: int = 1, + optimizations_per_minibatch: int = 1, patience: Optional[int] = None, min_delta: int = 0, + synchronize_pnl_values: bool = True, context: Optional[Context] = None, execution_mode: pnlvm.ExecutionMode = pnlvm.ExecutionMode.Python, randomize_minibatches=False, @@ -11509,6 +11524,17 @@ def learn( specifies the size of the minibatches to use. The input trials will be batched and run, after which learning mechanisms with learning mode TRIAL will update weights + optimizations_per_minibatch : int (default=1) + specified the number of executions and weight updates of learnable pathways are carried out for + each set of stimuli in a minibatch. + + .. hint:: + This can be used to implement the `backprop-to-activation proceedure + `_ in which the `backpropagation + learning algorithm ` is used, with a high learning rate, to quickly search + for a pattern of activation in response to a given input (or set of inputs) that is useful for some + downstream purpose. + randomize_minibatch: bool (default=False) specifies whether the order of the input trials should be randomized on each epoch @@ -11521,6 +11547,12 @@ def learn( Any reduction less than this value is considered to be a bad epoch. Used for early stopping of training, in combination with `patience`. + synchronize_pnl_values : bool : default True + specifies whether to synchronize the `values ` of the `Mechanisms ` + in the PsyNeuLink Composition with the corresponding modules of the PyTorch implementation after each + forward pass when an `AutodiffComposition` is used is executed in ``PyTorch mode + `. + scheduler : Scheduler the scheduler object that owns the conditions that will instruct the execution of the Composition If not specified, the Composition will use its automatically generated scheduler. @@ -11594,8 +11626,10 @@ def learn( epochs=epochs, learning_rate=learning_rate, minibatch_size=minibatch_size, + optimizations_per_minibatch=optimizations_per_minibatch, patience=patience, min_delta=min_delta, + synchronize_pnl_values=synchronize_pnl_values, randomize_minibatches=randomize_minibatches, call_before_minibatch=call_before_minibatch, call_after_minibatch=call_after_minibatch, @@ -12273,7 +12307,7 @@ def execute( report_num=report_num, runtime_params=execution_runtime_params, ) - assert True + assert 'DEBUGGING BREAK POINT' # Set execution_phase for node's context back to IDLE if self._is_learning(context): @@ -13550,7 +13584,9 @@ def show_graph(self, active_items=None, output_fmt='pdf', context=None): - + """Patch to ShowGraph method + IMPLEMENTATION NOTE: arguments are listed explicitly so they show up in IDEs that support argument completion + """ return self._show_graph(show_all=show_all, show_node_structure=show_node_structure, show_nested=show_nested, diff --git a/psyneulink/core/compositions/showgraph.py b/psyneulink/core/compositions/showgraph.py index cc339787a29..feb22cc7cde 100644 --- a/psyneulink/core/compositions/showgraph.py +++ b/psyneulink/core/compositions/showgraph.py @@ -685,7 +685,7 @@ def show_graph(self, self.num_nesting_levels = kwargs.pop(NUM_NESTING_LEVELS,None) enclosing_g = enclosing_comp._show_graph.G if enclosing_comp else None - processing_graph = composition.graph_processing.dependency_dict + processing_graph = self._get_processing_graph(composition, context) # IMPLEMENTATION_NOTE: Take diff with following to get scheduling edges not in compostion graph: # processing_graph = composition.scheduler.dependency_dict @@ -826,9 +826,12 @@ def show_graph(self, rcvrs = list(processing_graph.keys()) for rcvr in rcvrs: - if any(n is rcvr for nested_comp in composition.nodes - if isinstance(nested_comp, Composition) for n in nested_comp.nodes): - continue + # # MODIFIED 7/10 NEW: + # # FIX: NOT SURE WHAT THE PURPOSE OF THIS WAS, AND DOESN'T EVER SEEM TO GET CALLED: + # if any(n is rcvr for nested_comp in self._get_nodes(composition, context) + # if isinstance(nested_comp, Composition) for n in self._get_nodes(nested_comp, context)): + # continue + # # MODIFIED 7/10 END # If show_controller is true, objective mechanism is handled in _assign_controller_components if (show_controller @@ -869,7 +872,8 @@ def show_graph(self, show_projection_labels, show_projections_not_in_composition, show_controller, - comp_hierarchy) + comp_hierarchy, + context) # Add controller-related Components to graph if show_controller if show_controller: @@ -886,7 +890,8 @@ def show_graph(self, show_projection_labels, show_projections_not_in_composition, comp_hierarchy, - nesting_level) + nesting_level, + context) # Add learning-related Components to graph if show_learning if show_learning: @@ -904,7 +909,8 @@ def show_graph(self, show_node_structure, node_struct_args, show_projection_labels, - show_projections_not_in_composition) + show_projections_not_in_composition, + context) return self._generate_output(G, enclosing_comp, @@ -916,6 +922,38 @@ def show_graph(self, def __call__(self, **args): return self.show_graph(**args) + def _get_processing_graph(self, composition, context): + """Helper method that allows override by subclass to filter nodes and their dependencies used for graph""" + return composition.graph_processing.dependency_dict + + def _get_nodes(self, composition ,context): + """Helper method that allows override by subclass to filter nodes used for graph""" + return composition.nodes + + def _get_projections(self, composition, context): + """Helper method that allows override by subclass to filter projections used for graph""" + return composition.projections + + def _proj_in_composition(self, proj, composition_projections, context): + """Helper method that allows override by subclass to filter projections used for graph""" + return proj in composition_projections + + def _get_roles_by_node(self, composition, node, context): + """Helper method that allows override by subclass to filter NodeRoles used for graph""" + return composition.get_roles_by_node(node) + + def _get_nodes_by_role(self, composition, role, context): + """Helper method that allows override by subclass to filter NodeRoles used for graph""" + return composition.get_nodes_by_role(role) + + def _implement_graph_node(self, graph, rcvr, context, *args, **kwargs): + """Helper method that allows override by subclass to assign custom attributes to nodes""" + graph.node(*args, **kwargs) + + def _implement_graph_edge(self, graph, proj, context, *args, **kwargs): + """Helper method that allows override by subclass to assign custom attributes to edges""" + graph.edge(*args, **kwargs) + def _assign_processing_components(self, g, rcvr, @@ -952,12 +990,11 @@ def _assign_processing_components(self, NESTING_LEVEL:nesting_level + 1, }) # Get subgraph for nested Composition - # # MODIFIED 10/29/22 NEW: FIX: HACK SO NESTED COMPOSITIONS DON'T CRASH ANIMATION (THOUGH STILL NOT SHOWN) + # IMPLEMENTATION NOTE: FIX: HACK SO NESTED COMPOSITIONS DON'T CRASH ANIMATION (THOUGH STILL NOT SHOWN) if hasattr(composition, '_animate') and composition._animate is not False: rcvr._animate = composition._animate rcvr._set_up_animation(context) rcvr._animate_num_trials = composition._animate_num_trials + 1 - # MODIFIED 10/29/22 END nested_comp_graph = rcvr._show_graph.show_graph(**nested_args) nested_comp_graph.name = "cluster_" + rcvr.name @@ -968,14 +1005,16 @@ def _assign_processing_components(self, # nested_comp_graph.attr(color=feedback_color) # nested_comp_attributes = {"label":rcvr_label} nested_comp_attributes = {} - if rcvr in composition.get_nodes_by_role(NodeRole.INPUT) and \ - rcvr in composition.get_nodes_by_role(NodeRole.OUTPUT): + input_nodes = self._get_nodes_by_role(composition, NodeRole.INPUT, context) + output_nodes = self._get_nodes_by_role(composition, NodeRole.OUTPUT, context) + probe_nodes = self._get_nodes_by_role(composition, NodeRole.PROBE, context) + if rcvr in input_nodes and output_nodes: nested_comp_attributes.update({"color": self.input_and_output_color}) - elif rcvr in composition.get_nodes_by_role(NodeRole.INPUT): + elif rcvr in input_nodes: nested_comp_attributes.update({"color": self.input_color}) - elif rcvr in composition.get_nodes_by_role(NodeRole.PROBE): + elif rcvr in probe_nodes: nested_comp_attributes.update({"color": self.probe_color}) - elif rcvr in composition.get_nodes_by_role(NodeRole.OUTPUT): + elif rcvr in output_nodes: nested_comp_attributes.update({"color": self.output_color}) if rcvr in active_items: if self.active_color != BOLD: @@ -993,10 +1032,7 @@ def _assign_processing_components(self, # If rcvr is a learning component and not an INPUT node, # break and handle in _assign_learning_components() # (node: this allows TARGET node for learning to remain marked as an INPUT node) - if (NodeRole.LEARNING in composition.nodes_to_roles[rcvr]): - # MODIFIED 6/13/20 OLD: FIX - MODIFIED TO ALLOW TARGET TO BE MARKED AS INPUT - # and not NodeRole.INPUT in composition.nodes_to_roles[rcvr]): - # MODIFIED 6/13/20 END + if (NodeRole.LEARNING in self._get_roles_by_node(composition, rcvr, context)): return # DEAL WITH CONTROLLER's OBJECTIVEMECHANIMS @@ -1016,9 +1052,9 @@ def _assign_processing_components(self, # Cycle or Feedback Node if isinstance(rcvr, Composition): node_shape = self.composition_shape - elif rcvr in composition.get_nodes_by_role(NodeRole.FEEDBACK_SENDER): + elif rcvr in self._get_nodes_by_role(composition, NodeRole.FEEDBACK_SENDER, context): node_shape = self.feedback_shape - elif rcvr in composition.get_nodes_by_role(NodeRole.CYCLE): + elif rcvr in self._get_nodes_by_role(composition, NodeRole.CYCLE, context): node_shape = self.cycle_shape else: node_shape = self.mechanism_shape @@ -1136,21 +1172,24 @@ def _assign_processing_components(self, show_dimensions) if show_node_structure and isinstance(rcvr, Mechanism): - g.node(rcvr_label, - rcvr._show_structure(**node_struct_args, - node_border=rcvr_penwidth, - condition=condition), - shape=self.struct_shape, - color=rcvr_color, - penwidth=rcvr_penwidth, - rank=rcvr_rank) + args = (rcvr_label, rcvr._show_structure(**node_struct_args, + node_border=rcvr_penwidth, + condition=condition)) + kwargs = {'shape': self.struct_shape, + 'color':rcvr_color, + 'penwidth': rcvr_penwidth, + 'rank': rcvr_rank} else: - g.node(rcvr_label, - shape=node_shape, - color=rcvr_color, - penwidth=rcvr_penwidth, - rank=rcvr_rank) + args = (rcvr_label,) + kwargs = {'shape': node_shape, + 'color':rcvr_color, + 'penwidth': rcvr_penwidth, + 'rank': rcvr_rank} + self._implement_graph_node(g, rcvr, context,*args, **kwargs) + + # 7/9/24 + # FIX: IMPLEMENT THIS AS METHOD THAT CAN BE OVERRIDEN BY SUBCLASS TO IMPLEMENT DIRECT PROJS TO NESTED NODES # Implement sender edges from Nodes within Composition sndrs = processing_graph[rcvr] self._assign_incoming_edges(g, @@ -1168,7 +1207,8 @@ def _assign_processing_components(self, show_projections_not_in_composition, enclosing_comp=enclosing_comp, comp_hierarchy=comp_hierarchy, - nesting_level=nesting_level) + nesting_level=nesting_level, + context=context) def _assign_cim_components(self, g, @@ -1182,11 +1222,15 @@ def _assign_cim_components(self, show_projection_labels, show_projections_not_in_composition, show_controller, - comp_hierarchy): + comp_hierarchy, + context): from psyneulink.core.compositions.composition import Composition, NodeRole composition = self.composition + composition_nodes = self._get_nodes(composition, context) + composition_projections = self._get_projections(composition, context) enclosing_g = enclosing_comp._show_graph.G if enclosing_comp else None + enclosing_comp_projections = self._get_projections(enclosing_comp, context) if enclosing_comp else None cim_rank = 'same' @@ -1235,9 +1279,9 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, # But if any Projection to it is from a controller, use controller_color for input_port in cim.input_ports: for proj in input_port.path_afferents: - if proj not in enclosing_comp.projections and not show_projections_not_in_composition: + if (proj not in enclosing_comp_projections and not show_projections_not_in_composition): continue - if self._trace_senders_for_controller(proj, enclosing_comp): + if self._trace_senders_for_controller(proj, context, enclosing_comp): cim_type_color = self.controller_color elif cim is composition.output_CIM: cim_type_color = self.output_color @@ -1292,7 +1336,7 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, for proj in projs: proj_color=self.default_node_color - if proj not in enclosing_comp.projections: + if proj not in enclosing_comp_projections: if not show_projections_not_in_composition: continue else: @@ -1344,7 +1388,7 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, for proj in projs: proj_color = self.default_node_color - if proj not in composition.projections: + if proj not in composition_projections: if not show_projections_not_in_composition: continue else: @@ -1363,14 +1407,15 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, continue # Validate the Projection is to an INPUT node or a node that is shadowing one - if ((rcvr_input_node_proj_owner in composition.nodes_to_roles and - NodeRole.INPUT not in composition.nodes_to_roles[rcvr_input_node_proj_owner]) - and (proj.receiver.shadow_inputs in composition.nodes_to_roles and - NodeRole.INPUT not in composition.nodes_to_roles[proj.receiver.shadow_inputs])): + if ((rcvr_input_node_proj_owner in composition_nodes and NodeRole.INPUT not in + self._get_roles_by_node(composition, rcvr_input_node_proj_owner, context)) + and (proj.receiver.shadow_inputs in composition_nodes and NodeRole.INPUT not in + self._get_roles_by_node(composition, proj.receiver.shadow_inputs, context))): raise ShowGraphError(f"Projection from input_CIM of {composition.name} to node " f"{rcvr_input_node_proj_owner} that is not an " f"{NodeRole.INPUT.name} node or shadowing its " f"{NodeRole.INPUT.name.lower()}.") + rcvr_label = self._get_graph_node_label(composition, rcvr_input_node_proj_owner, show_types, show_dimensions) @@ -1408,7 +1453,7 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, for proj in projs: proj_color = self.control_color - if proj not in enclosing_comp.projections: + if proj not in enclosing_comp_projections: if not show_projections_not_in_composition: continue else: @@ -1427,7 +1472,7 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, f"PROGRAM ERROR: parameter_CIM of {composition.name} recieves a Projection " \ f"from a Node from other than a {ControlMechanism.__name__}." # Skip Projections from controller (handled in _assign_controller_components) - if self._is_composition_controller(ctl_mech_output_port_owner, enclosing_comp): + if self._is_composition_controller(ctl_mech_output_port_owner, context, enclosing_comp): continue # Skip if there is no outer Composition (enclosing_g), # or Projections across nested Compositions are not being shown (show_nested=INSET) @@ -1460,7 +1505,7 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, for proj in projs: proj_color = None - if proj not in composition.projections: + if proj not in composition_projections: if not show_projections_not_in_composition: continue else: @@ -1502,7 +1547,7 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, rcvr_modulated_mec_proj_label = rcvr_label # Render Projection - if self._trace_senders_for_controller(proj, enclosing_comp): + if self._trace_senders_for_controller(proj, context, enclosing_comp): ctl_proj_color = proj_color or self.controller_color else: ctl_proj_color = proj_color or self.control_color @@ -1531,7 +1576,7 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, for proj in projs: proj_color = self.default_node_color - if proj not in composition.projections: + if proj not in composition_projections: if not show_projections_not_in_composition: continue else: @@ -1549,9 +1594,10 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, else: sndr_output_node_proj_owner = sndr_output_node_proj.owner # Validate the Projection is from an OUTPUT or PROBE node - if ((sndr_output_node_proj_owner in composition.nodes_to_roles and - not any(role for role in {NodeRole.OUTPUT, NodeRole.PROBE} if - role in composition.nodes_to_roles[sndr_output_node_proj_owner]))): + if (sndr_output_node_proj_owner in composition_nodes and + not any(role for role in {NodeRole.OUTPUT, NodeRole.PROBE} + if role in self._get_roles_by_node(composition, sndr_output_node_proj_owner, + context))): raise ShowGraphError(f"Projection to output_CIM of {composition.name} " f"from node {sndr_output_node_proj_owner} that is not " f"an {NodeRole.OUTPUT} node.") @@ -1594,7 +1640,7 @@ def _render_projection(_g, proj, sndr_label, rcvr_label, for proj in projs: proj_color = self.default_node_color - if proj not in enclosing_comp.projections: + if proj not in enclosing_comp_projections: if not show_projections_not_in_composition: continue else: @@ -1669,11 +1715,13 @@ def _assign_controller_components(self, show_projection_labels, show_projections_not_in_composition, comp_hierarchy, - nesting_level): + nesting_level, + context): """Assign control nodes and edges to graph""" from psyneulink.core.compositions.composition import Composition composition = self.composition + nodes = self._get_nodes(composition, context) controller = composition.controller if controller is None: @@ -1735,7 +1783,7 @@ def _assign_controller_components(self, ctl_proj_arrowhead = self.control_projection_arrow # Skip ControlProjections not in the Composition - if ctl_proj not in composition.projections: + if ctl_proj not in self._get_projections(composition, context): continue # Construct edge name --------------------------------------------------- @@ -1749,12 +1797,13 @@ def _assign_controller_components(self, rcvr_comp = ctl_proj_rcvr.owner.composition def find_rcvr_comp(r, c, l): """Find deepest Composition within c that encloses r within range of num_nesting_levels of c""" + rcvr_nodes = self._get_nodes(c, context) if (self.num_nesting_levels is not None and l > self.num_nesting_levels): return c, l - elif r in c.nodes: + elif r in rcvr_nodes: return r, l l+=1 - for nested_c in [nc for nc in c.nodes if isinstance(nc, Composition)]: + for nested_c in [nc for nc in nodes if isinstance(nc, Composition)]: return find_rcvr_comp(r, nested_c, l) return None project_to_node = False @@ -1917,7 +1966,7 @@ def find_rcvr_comp(r, c, l): proj_sndr.owner, show_types, show_dimensions) - if (proj_sndr.owner not in composition.nodes + if (proj_sndr.owner not in nodes # MODIFIED 1/6/22 NEW: and isinstance(proj_sndr.owner, CompositionInterfaceMechanism)): # MODIFIED 1/6/22 END @@ -1964,7 +2013,7 @@ def find_rcvr_comp(r, c, l): projection.sender.owner, show_types, show_dimensions) - if (projection.sender.owner not in composition.nodes + if (projection.sender.owner not in nodes and not controller.allow_probes): num_nesting_levels = self.num_nesting_levels or 0 nested_comp = projection.sender.owner.composition @@ -2048,7 +2097,8 @@ def find_rcvr_comp(r, c, l): show_projections_not_in_composition, proj_color=ctl_proj_color, comp_hierarchy=comp_hierarchy, - nesting_level=nesting_level) + nesting_level=nesting_level, + context=context) def _assign_learning_components(self, g, @@ -2065,7 +2115,8 @@ def _assign_learning_components(self, show_node_structure, node_struct_args, show_projection_labels, - show_projections_not_in_composition): + show_projections_not_in_composition, + context): """Assign learning nodes and edges to graph""" from psyneulink.core.compositions.composition import NodeRole @@ -2083,7 +2134,7 @@ def _assign_learning_components(self, if isinstance(rcvr, MappingProjection): return - if NodeRole.TARGET in composition.get_roles_by_node(rcvr): + if NodeRole.TARGET in self._get_roles_by_node(composition, rcvr, context): rcvr_width = self.bold_width else: rcvr_width = self.default_width @@ -2138,7 +2189,8 @@ def _assign_learning_components(self, show_projections_not_in_composition, enclosing_comp=enclosing_comp, comp_hierarchy=comp_hierarchy, - nesting_level=nesting_level) + nesting_level=nesting_level, + context=context) def _render_projection_as_node(self, g, @@ -2154,7 +2206,8 @@ def _render_projection_as_node(self, proj_color, proj_width, sndr_label=None, - rcvr_label=None): + rcvr_label=None, + context=None): composition = self.composition @@ -2206,13 +2259,25 @@ def _render_projection_as_node(self, else: edge_label = '' if show_node_structure: - self.G.edge(sndr_label + ':' + OutputPort.__name__ + '-' + 'LearningSignal', - rcvr_label, - label=edge_label, - color=learning_proj_color, penwidth=learning_proj_width) + # self.G.edge(sndr_label + ':' + OutputPort.__name__ + '-' + 'LearningSignal', + # rcvr_label, + # label=edge_label, + # color=learning_proj_color, penwidth=learning_proj_width) + self._implement_graph_edge(self.G, proj, context, + sndr_label + ':' + OutputPort.__name__ + '-' + 'LearningSignal', + rcvr_label, + label=edge_label, + color=learning_proj_color, + penwidth=learning_proj_width) else: - self.G.edge(sndr_label, rcvr_label, label = edge_label, - color=learning_proj_color, penwidth=learning_proj_width) + # self.G.edge(sndr_label, rcvr_label, label = edge_label, + # color=learning_proj_color, penwidth=learning_proj_width) + self._implement_graph_edge(self.G, proj, context, + sndr_label, + rcvr_label, + label=edge_label, + color=learning_proj_color, + penwidth=learning_proj_width) return True @beartype @@ -2234,10 +2299,12 @@ def _assign_incoming_edges(self, proj_arrow=None, enclosing_comp=None, comp_hierarchy=None, - nesting_level=None): + nesting_level=None, + context=None): from psyneulink.core.compositions.composition import Composition, NodeRole composition = self.composition + composition_projections = self._get_projections(composition, context) if nesting_level not in comp_hierarchy: comp_hierarchy[nesting_level] = composition enclosing_g = enclosing_comp._show_graph.G if enclosing_comp else None @@ -2252,7 +2319,7 @@ def _assign_incoming_edges(self, if show_nested is NESTED: # Add output_CIMs for nested Comps to find sender nodes cims = set([proj.sender.owner for proj in rcvr.afferents - if (proj in composition.projections + if (proj in composition_projections and isinstance(proj.sender.owner, CompositionInterfaceMechanism) and (proj.sender.owner is proj.sender.owner.composition.output_CIM))]) senders.update(cims) @@ -2260,7 +2327,7 @@ def _assign_incoming_edges(self, if enclosing_g and show_nested is not INSET: # Add input_CIM for current Composition to find senders from enclosing_g cims = set([proj.sender.owner for proj in rcvr.afferents - if (proj in composition.projections + if (proj in composition_projections and isinstance(proj.sender.owner, CompositionInterfaceMechanism) and proj.sender.owner in {composition.input_CIM, composition.parameter_CIM})]) senders.update(cims) @@ -2352,7 +2419,7 @@ def assign_sender_edge(sndr:Union[Mechanism, Composition], composition.active_item_rendered = True # Projection to or from a LearningMechanism - elif (NodeRole.LEARNING in composition.nodes_to_roles[rcvr]): + elif (NodeRole.LEARNING in self._get_roles_by_node(composition, rcvr, context)): proj_color = self.learning_color proj_width = str(self.default_width) @@ -2380,7 +2447,8 @@ def assign_sender_edge(sndr:Union[Mechanism, Composition], rcvr_label=proc_mech_rcvr_label, sndr_label=sndr_proj_label, proj_color=proj_color, - proj_width=proj_width) + proj_width=proj_width, + context=context) # Deferred if it is the last Mechanism in a learning Pathway # (see _render_projection_as_node) if deferred: @@ -2397,11 +2465,16 @@ def assign_sender_edge(sndr:Union[Mechanism, Composition], graph = enclosing_g else: graph = g - graph.edge(sndr_proj_label, proc_mech_rcvr_label, - label=label, - color=proj_color, - penwidth=proj_width, - arrowhead=proj_arrowhead) + + self._implement_graph_edge(graph, + proj, + context, + sndr_proj_label, + proc_mech_rcvr_label, + label=label, + color=proj_color, + penwidth=proj_width, + arrowhead=proj_arrowhead) # Sorted to insure consistency of ordering in g for testing for sender in sorted(senders): @@ -2418,11 +2491,11 @@ def assign_sender_edge(sndr:Union[Mechanism, Composition], proj_color = proj_color_default proj_arrowhead = proj_arrow_default - if proj not in composition.projections: - if not show_projections_not_in_composition: - continue - else: + if not self._proj_in_composition(proj, composition_projections, context): + if show_projections_not_in_composition: proj_color=self.inactive_projection_color + else: + continue assign_proj_to_enclosing_comp = False @@ -2460,7 +2533,7 @@ def assign_sender_edge(sndr:Union[Mechanism, Composition], rcvr is not enclosing_comp.controller and rcvr is not composition.controller and not sndr.afferents and show_cim - or self._is_composition_controller(sndr, enclosing_comp)): + or self._is_composition_controller(sndr, context, enclosing_comp)): continue if sender is composition.parameter_CIM: # # Allow MappingProjections to iconified rep of nested Composition @@ -2517,6 +2590,8 @@ def _generate_output(self, from psyneulink.core.compositions.composition import Composition, NodeRole composition = self.composition + nodes = self._get_nodes(composition, context) + projections = self._get_projections(composition, context) # Sort nodes for display def get_index_of_node_in_G_body(node, node_type: Literal['MECHANISM', 'Projection', 'Composition']): @@ -2536,10 +2611,10 @@ def get_index_of_node_in_G_body(node, node_type: Literal['MECHANISM', 'Projectio elif 'subgraph' in item and node_type in {COMPOSITION}: return i - for node in composition.nodes: + for node in nodes: if isinstance(node, Composition): continue - roles = composition.get_roles_by_node(node) + roles = self._get_roles_by_node(composition, node, context) # Put INPUT node(s) first if NodeRole.INPUT in roles: i = get_index_of_node_in_G_body(node, MECHANISM) @@ -2556,10 +2631,10 @@ def get_index_of_node_in_G_body(node, node_type: Literal['MECHANISM', 'Projectio if i is not None: G.body.insert(len(G.body),G.body.pop(i)) - for proj in composition.projections: + for proj in projections: # Put ControlProjection(s) last, except for controller of Composition (see below) # if isinstance(proj, ControlProjection) and self._is_composition_controller(proj.sender.owner): - if isinstance(proj, ControlProjection) and self._is_composition_controller(proj.sender.owner, + if isinstance(proj, ControlProjection) and self._is_composition_controller(proj.sender.owner, context, enclosing_comp): i = get_index_of_node_in_G_body(proj, PROJECTION) if i is not None: @@ -2571,7 +2646,7 @@ def get_index_of_node_in_G_body(node, node_type: Literal['MECHANISM', 'Projectio G.body.insert(len(G.body),G.body.pop(i)) # Put nested Composition(s) very last - for node in composition.nodes: + for node in nodes: if isinstance(node, Composition): i = get_index_of_node_in_G_body(node, COMPOSITION) if i is not None: @@ -2616,7 +2691,7 @@ def get_index_of_node_in_G_body(node, node_type: Literal['MECHANISM', 'Projectio except: raise ShowGraphError(f"Problem displaying graph for {composition.name}") - def _is_composition_controller(self, mech, enclosing_comp=None): + def _is_composition_controller(self, mech, context, enclosing_comp=None): # FIX 6/12/20: REPLACE WITH TEST FOR NodeRole.CONTROLLER ONCE THAT IS IMPLEMENTED # return isinstance(mech, ControlMechanism) and hasattr(mech, 'composition') and mech.composition from psyneulink.core.compositions.composition import NodeRole @@ -2625,15 +2700,15 @@ def _is_composition_controller(self, mech, enclosing_comp=None): for comp in [self.composition, enclosing_comp]: if not comp: continue - if mech in comp._all_nodes and NodeRole.CONTROLLER in comp.get_roles_by_node(mech): + if mech in comp._all_nodes and NodeRole.CONTROLLER in self._get_roles_by_node(comp, mech, context): return True return False - def _trace_senders_for_controller(self, proj, comp=None): + def _trace_senders_for_controller(self, proj, context, comp=None): """Check whether source sender of a ControlProjection is (at any level of nesting) a Composition controller.""" owner = proj.sender.owner comp = owner.composition if hasattr(owner, 'composition') else comp or self.composition - if self._is_composition_controller(owner, comp): + if self._is_composition_controller(owner, context, comp): return True if isinstance(owner, CompositionInterfaceMechanism): sender_proj = owner.port_map[proj.receiver][0].path_afferents[0] diff --git a/psyneulink/core/globals/keywords.py b/psyneulink/core/globals/keywords.py index cca991b3591..c688746286c 100644 --- a/psyneulink/core/globals/keywords.py +++ b/psyneulink/core/globals/keywords.py @@ -38,8 +38,8 @@ 'ContentAddressableMemory_FUNCTION', 'CONTEXT', 'CONTROL', 'CONTROL_MECHANISM', 'CONTROL_PATHWAY', 'CONTROL_PROJECTION', 'CONTROL_PROJECTION_PARAMS', 'CONTROL_PROJECTIONS', 'CONTROL_SIGNAL', 'CONTROL_SIGNAL_SPECS', 'CONTROL_SIGNALS', 'CONTROLLED_PARAMS', - 'CONTROLLER', 'CONTROLLER_OBJECTIVE', 'CORRELATION', 'COSINE', 'COSINE_SIMILARITY', - 'COST_FUNCTION', 'COUNT', 'CROSS_ENTROPY', 'CURRENT_EXECUTION_TIME', 'CUSTOM_FUNCTION', 'CYCLE', + 'CONTROLLER', 'CONTROLLER_OBJECTIVE', 'CORRELATION', 'CPU', 'COSINE', 'COSINE_SIMILARITY', + 'COST_FUNCTION', 'COUNT', 'CROSS_ENTROPY', 'CURRENT_EXECUTION_TIME', 'CUSTOM_FUNCTION', 'CUDA', 'CYCLE', 'DDM_MECHANISM', 'DECAY', 'DEFAULT', 'DEFAULT_CONTROL_MECHANISM', 'DEFAULT_INPUT', 'DEFAULT_MATRIX', 'DEFAULT_PREFERENCE_SET_OWNER', 'DEFAULT_PROCESSING_MECHANISM', 'DEFAULT_VARIABLE', 'DEFERRED_ASSIGNMENT', 'DEFERRED_DEFAULT_NAME', 'DEFERRED_INITIALIZATION', 'DICT', 'DictionaryMemory_FUNCTION', @@ -85,7 +85,7 @@ 'MODEL_SPEC_ID_GENERIC', 'MODEL_SPEC_ID_INPUT_PORTS', 'MODEL_SPEC_ID_OUTPUT_PORTS', 'MODEL_SPEC_ID_PSYNEULINK', 'MODEL_SPEC_ID_SENDER_MECH', 'MODEL_SPEC_ID_SENDER_PORT', 'MODEL_SPEC_ID_RECEIVER_MECH', 'MODEL_SPEC_ID_RECEIVER_PORT', - 'MODEL_SPEC_ID_PARAMETER_INITIAL_VALUE', 'MODEL_SPEC_ID_PARAMETER_SOURCE', + 'MODEL_SPEC_ID_PARAMETER_INITIAL_VALUE', 'MODEL_SPEC_ID_PARAMETER_SOURCE', 'MPS', 'MODEL_SPEC_ID_PARAMETER_VALUE', 'MODEL_SPEC_ID_TYPE', 'MULTIPLICATIVE', 'MULTIPLICATIVE_PARAM', 'MUTUAL_ENTROPY', 'NAME', 'NESTED', 'NEWEST', 'NODE', 'NODES', 'NOISE', 'NORMAL_DIST_FUNCTION', 'NORMALIZE', 'NORMED_L0_SIMILARITY', @@ -356,6 +356,7 @@ class Loss(Enum): SSE = auto() MSE = auto() CROSS_ENTROPY = auto() + BINARY_CROSS_ENTROPY = auto() KL_DIV = auto() NLL = auto() POISSON_NLL = auto() @@ -366,6 +367,17 @@ class Loss(Enum): # ****************************************** CONSTANTS ************************************************************* # ********************************************************************************************************************** +#region --------------------------------------------- DEVICES ---------------------------------------------------- +# Devices +CUDA = 'cuda' +CPU = 'cpu' +MPS = 'mps' +#endregion + +#region --------------------------------------------- GENERAL ---------------------------------------------------- +# General + + ON = True OFF = False DEFAULT = False @@ -378,9 +390,6 @@ class Loss(Enum): INIT_FUNCTION_METHOD_ONLY = 'init using only the subclass __function__ method' -#region --------------------------------------------- GENERAL ---------------------------------------------------- -# General - ALL = 'all' ANY = 'any' ONLY = 'only' @@ -998,7 +1007,7 @@ class Loss(Enum): LINEAR = 'linear' CONSTANT = 'constant' SIMPLE = 'scaled' -ADAPTIVE = 'modulatory' +ADAPTIVE = 'adaptive' DIFFUSION = 'diffusion' EXPONENTIAL = 'exponential' GAUSSIAN = 'gaussian' diff --git a/psyneulink/core/globals/utilities.py b/psyneulink/core/globals/utilities.py index ab8d397453a..5d00d17ca0e 100644 --- a/psyneulink/core/globals/utilities.py +++ b/psyneulink/core/globals/utilities.py @@ -131,8 +131,8 @@ except ImportError: torch = None -from psyneulink.core.globals.keywords import \ - comparison_operators, DISTANCE_METRICS, EXPONENTIAL, GAUSSIAN, LINEAR, MATRIX_KEYWORD_VALUES, NAME, SINUSOID, VALUE +from psyneulink.core.globals.keywords import (comparison_operators, DISTANCE_METRICS, EXPONENTIAL, GAUSSIAN, LINEAR, + MATRIX_KEYWORD_VALUES, MPS, NAME, SINUSOID, VALUE) __all__ = [ 'append_type_to_name', 'AutoNumber', 'ContentAddressableList', 'convert_to_list', 'convert_to_np_array', @@ -258,7 +258,7 @@ def __new__(component_type): return obj -# ******************************** GLOBAL STRUCTURES, CONSTANTS AND METHODS ******************************************* +#region ******************************** GLOBAL STRUCTURES, CONSTANTS AND METHODS ************************************* TEST_CONDTION = False @@ -355,7 +355,8 @@ def parameter_spec(param, numeric_only=None): Type['psyneulink.core.components.projections.MappingProjection'], 'psyneulink.library.components.projections.MaskedMappingProjection', Type['psyneulink.library.components.projections.MaskedMappingProjection'], - Literal['LEARNING', 'bias', 'control', 'gain', 'gate', 'leak', 'offset', 'ControlSignal', 'ControlProjection'], + Literal['LEARNING', 'adaptive','bias', 'control', 'gain', 'gate', 'leak', 'offset', + 'ControlSignal', 'ControlProjection'], ] @@ -676,8 +677,9 @@ def recursively_check_elements_for_numeric(value): else: return False +#endregion -# MATHEMATICAL ******************************************************************************************************** +#region MATHEMATICAL *************************************************************************************************** def normpdf(x, mu=0, sigma=1): u = float((x - mu) / abs(sigma)) @@ -697,7 +699,6 @@ def scalar_distance(measure, value, scale=1, offset=0): if measure == SINUSOID: return sinusoid(value, frequency=scale, phase=offset) - def powerset(iterable): """powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)""" s = list(iterable) @@ -743,9 +744,9 @@ def tensor_power(items, levels: Optional[range] = None, flat=False): else: pp.append(tp.reshape(-1)) return pp +#endregion - -# LIST MANAGEMENT ****************************************************************************************************** +#region LIST MANAGEMENT ************************************************************************************************ def insert_list(list1, position, list2): """Insert list2 into list1 at position""" @@ -772,10 +773,9 @@ def nesting_depth(l): if isinstance(l, np.ndarray): l = l.tolist() return isinstance(l, list) and max(map(nesting_depth, l)) + 1 +#endregion - -# OTHER **************************************************************************************************************** - +#region OTHER ********************************************************************************************************** def get_args(frame): """Gets dictionary of arguments and their values for a function Frame should be assigned as follows in the function itself: frame = inspect.currentframe() @@ -1018,7 +1018,7 @@ def setter(obj, value): setattr(obj, name, value) return setter - +#endregion #region NUMPY ARRAY METHODS ****************************************************************************************** @@ -1035,44 +1035,6 @@ def np_array_less_than_2d(array): else: return False - -def safe_create_np_array(value): - with warnings.catch_warnings(): - - # If we have a torch tensor, allow it to pass through unchanged - if torch and torch.is_tensor(value): - return value - - warnings.filterwarnings('error', category=np.VisibleDeprecationWarning) - # NOTE: this will raise a ValueError in the future. - # See https://numpy.org/neps/nep-0034-infer-dtype-is-object.html - try: - try: - return np.asarray(value) - except np.VisibleDeprecationWarning: - return np.asarray(value, dtype=object) - except ValueError as e: - # numpy 1.24 removed the above deprecation and raises - # ValueError instead. Note that the below call can still - # raise other ValueErrors - if 'The requested array has an inhomogeneous shape' in str(e): - return np.asarray(value, dtype=object) - raise - - except ValueError as e: - msg = str(e) - if 'cannot guess the desired dtype from the input' in msg: - return np.asarray(value, dtype=object) - # KDM 6/29/20: this case handles a previously noted case - # by KAM 6/28/18, #877: - # [[0.0], [0.0], np.array([[0.0, 0.0]])] - # but was only handled for dimension=1 - elif 'could not broadcast' in msg: - return convert_all_elements_to_np_array(value) - else: - raise - - def convert_to_np_array(value, dimension=None): """ Converts value to np.ndarray if it is not already. Handles @@ -1210,8 +1172,6 @@ def append_type_to_name(object, type=None): string = "\'" + name + "\'" + ' ' + type.lower() # string = name + ' ' + type.lower() return string -#endregion - class ReadOnlyOrderedDict(UserDict): def __init__(self, dict=None, name=None, **kwargs): @@ -2400,3 +2360,58 @@ def array_from_matrix_string( arr.append([c for c in r.split(col_sep) if len(c)]) return np.asarray(arr, dtype=dtype) + +#endregion + +#region PYTORCH TENSOR METHODS ***************************************************************************************** + +def get_torch_tensor(value, dtype, device): + if device == MPS or device == torch.device(MPS): + if isinstance(value, torch.Tensor): + return torch.tensor(value, dtype=torch.float32, device=device) + return torch.tensor(np.array(value, dtype=np.float32), device=device) + else: + if dtype in {np.float32, torch.float32}: + return torch.tensor(value, device=device).float() + elif dtype in {np.float64, torch.float64}: + return torch.tensor(value, device=device).double() + else: + return torch.tensor(value, device=device) + +def safe_create_np_array(value): + with warnings.catch_warnings(): + + # If we have a torch tensor, allow it to pass through unchanged + if torch and torch.is_tensor(value): + return value + + warnings.filterwarnings('error', category=np.VisibleDeprecationWarning) + # NOTE: this will raise a ValueError in the future. + # See https://numpy.org/neps/nep-0034-infer-dtype-is-object.html + try: + try: + return np.asarray(value) + except np.VisibleDeprecationWarning: + return np.asarray(value, dtype=object) + except ValueError as e: + # numpy 1.24 removed the above deprecation and raises + # ValueError instead. Note that the below call can still + # raise other ValueErrors + if 'The requested array has an inhomogeneous shape' in str(e): + return np.asarray(value, dtype=object) + raise + + except ValueError as e: + msg = str(e) + if 'cannot guess the desired dtype from the input' in msg: + return np.asarray(value, dtype=object) + # KDM 6/29/20: this case handles a previously noted case + # by KAM 6/28/18, #877: + # [[0.0], [0.0], np.array([[0.0, 0.0]])] + # but was only handled for dimension=1 + elif 'could not broadcast' in msg: + return convert_all_elements_to_np_array(value) + else: + raise + +#endregion diff --git a/psyneulink/core/llvm/builder_context.py b/psyneulink/core/llvm/builder_context.py index 8b0e90e7a05..a4dd418f6f7 100644 --- a/psyneulink/core/llvm/builder_context.py +++ b/psyneulink/core/llvm/builder_context.py @@ -378,6 +378,12 @@ def check_used_params(self, component, *, tags:frozenset): if hasattr(component, 'evaluate_agent_rep'): used_param_ids.add('num_trials_per_estimate') + if hasattr(component, 'adapt_scale'): + used_param_ids.add('threshold') + used_param_ids.add('adapt_scale') + used_param_ids.add('adapt_base') + used_param_ids.add('adapt_entropy_weighting') + unused_param_ids = component_param_ids - used_param_ids - initializers unused_state_ids = component_state_ids - used_state_ids @@ -499,10 +505,10 @@ def get_data_struct_type(self, component): return ir.LiteralStructType([]) def get_node_wrapper(self, composition, node): - cache = getattr(composition, '_node_wrappers', None) + cache = getattr(composition, '_wrapped_nodes', None) if cache is None: cache = weakref.WeakKeyDictionary() - setattr(composition, '_node_wrappers', cache) + setattr(composition, '_wrapped_nodes', cache) return cache.setdefault(node, _node_wrapper(composition, node)) def convert_python_struct_to_llvm_ir(self, t): diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py index ee8a4a03043..869d86b7051 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py @@ -320,7 +320,7 @@ class EMStorageMechanism(LearningMechanism): decay_rate : float : default 0.0 specifies the rate at which `entries ` in the `memory_matrix - ` decays (see `decay_rate ` for additional + ` decay (see `decay_rate ` for additional details). Attributes @@ -743,7 +743,7 @@ def _execute(self, - call function for each LearningSignal to decay existing memory and assign input to weakest entry EMStorage function: - decay existing memories - - assign input to weakest entry (given index for passed from EMStorageMechanism) + - assign input to weakest entry (given index passed from EMStorageMechanism) :return: List[2d np.array] self.learning_signal """ @@ -780,6 +780,10 @@ def _execute(self, if field_weights is not None: field_norms *= field_weights row_norms = np.sum(field_norms, axis=1) + # IMPLEMENTATION NOTE: + # the following will give the lowest index in case of a tie; + # this means that if memory is initialized with all zeros, + # it will be occupied in row order idx_of_weakest_memory = np.argmin(row_norms) value = [] diff --git a/psyneulink/library/compositions/autodiffcomposition.py b/psyneulink/library/compositions/autodiffcomposition.py index 8ab1b109b37..025bb11e7bf 100644 --- a/psyneulink/library/compositions/autodiffcomposition.py +++ b/psyneulink/library/compositions/autodiffcomposition.py @@ -333,9 +333,12 @@ torch_available = False else: from psyneulink.library.compositions.pytorchwrappers import PytorchCompositionWrapper + from psyneulink.library.compositions.pytorchshowgraph import PytorchShowGraph +from psyneulink.core.components.functions.stateful.statefulfunction import StatefulFunction from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism from psyneulink.core.components.mechanisms.processing.compositioninterfacemechanism import CompositionInterfaceMechanism +from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base from psyneulink.core.components.projections.modulatory.modulatoryprojection import ModulatoryProjection_Base from psyneulink.core.components.ports.inputport import InputPort @@ -343,8 +346,8 @@ from psyneulink.core.compositions.report import (ReportOutput, ReportParams, ReportProgress, ReportSimulations, ReportDevices, EXECUTE_REPORT, LEARN_REPORT, PROGRESS_REPORT) from psyneulink.core.globals.context import Context, ContextFlags, handle_external_context, CONTEXT -from psyneulink.core.globals.keywords import AUTODIFF_COMPOSITION, SOFT_CLAMP, Loss -from psyneulink.core.globals.utilities import is_numeric_scalar +from psyneulink.core.globals.keywords import AUTODIFF_COMPOSITION, CPU, CUDA, Loss, MPS, SOFT_CLAMP +from psyneulink.core.globals.utilities import is_numeric_scalar, get_torch_tensor from psyneulink.core.scheduling.scheduler import Scheduler from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.scheduling.time import TimeScale @@ -358,7 +361,6 @@ 'AutodiffComposition' ] - class AutodiffCompositionError(CompositionError): def __init__(self, error_value): @@ -376,6 +378,15 @@ class AutodiffComposition(Composition): Arguments --------- + optimizer_type : str : default 'sgd' + the kind of optimizer used in training. The current options are 'sgd' or 'adam'. + + loss_spec : Loss or PyTorch loss function : default Loss.MSE + specifies the loss function for training; see `Loss` for arguments. + + weight_decay : float : default 0 + specifies the L2 penalty (which discourages large weights) used by the optimizer. + learning_rate : float : default 0.001 specifies the learning rate passed to the optimizer if none is specified in the `learn ` method of the AutodiffComposition @@ -385,22 +396,23 @@ class AutodiffComposition(Composition): specifies whether the AutodiffComposition should disable learning when run in `learning mode `. - optimizer_type : str : default 'sgd' - the kind of optimizer used in training. The current options are 'sgd' or 'adam'. + device : torch.device : default device-dependnet + specifies the device on which the model is run. If None, the device is set to 'cuda' if available, + then 'mps`, otherwise 'cpu'. - weight_decay : float : default 0 - specifies the L2 penalty (which discourages large weights) used by the optimizer. - - loss_spec : Loss or PyTorch loss function : default Loss.MSE - specifies the loss function for training; see `Loss` for arguments. Attributes ---------- + pytorch_representation = None + optimizer : PyTorch optimizer function the optimizer used for training. Depends on the **optimizer_type**, **learning_rate**, and **weight_decay** arguments from initialization. + loss : PyTorch loss function + the loss function used for training. Depends on the **loss_spec** argument from initialization. + learning_rate : float determines the learning_rate passed the optimizer, and is applied to all `Projection`\\s in the AutodiffComposition that are `learnable `. @@ -418,12 +430,18 @@ class AutodiffComposition(Composition): **learnable** parameter of its constructor as `False`; this applies to MappingProjections at any level of `nesting `. - loss : PyTorch loss function - the loss function used for training. Depends on the **loss_spec** argument from initialization. + device : torch.device + the device on which the model is run. losses : list of floats tracks the average loss after each weight update (i.e. each minibatch) during learning. + trial_losses = Parameter([]) + + tracked_loss = Parameter(None, pnl_internal=True) + + tracked_loss_count = Parameter(0, pnl_internal=True) + last_saved_weights : path path for file to which weights were last saved. @@ -438,27 +456,33 @@ class AutodiffComposition(Composition): pytorch_composition_wrapper_type = PytorchCompositionWrapper class Parameters(Composition.Parameters): + pytorch_representation = None optimizer = None learning_rate = Parameter(.001, fallback_default=True) losses = Parameter([]) trial_losses = Parameter([]) tracked_loss = Parameter(None, pnl_internal=True) tracked_loss_count = Parameter(0, pnl_internal=True) - pytorch_representation = None + device = None + + def _validate_memory_template(self, device): + if isinstance(device, str) and device not in [CPU, CUDA, MPS]: + raise AutodiffCompositionError(f"Device must be one of {CPU}, {CUDA}, or {MPS}") # TODO (CW 9/28/18): add compositions to registry so default arg for name is no longer needed @check_user_specified def __init__(self, pathways=None, - learning_rate=None, optimizer_type='sgd', - weight_decay=0, loss_spec=Loss.MSE, + learning_rate=None, + weight_decay=0, disable_learning=False, + force_no_retain_graph=False, refresh_losses=False, + device=None, disable_cuda=True, cuda_index=None, - force_no_retain_graph=False, name="autodiff_composition", **kwargs): @@ -466,26 +490,28 @@ def __init__(self, # raise AutodiffCompositionError('Pytorch python module (torch) is not installed. Please install it with ' # '`pip install torch` or `pip3 install torch`') + show_graph_attributes = kwargs.pop('show_graph_attributes', {}) + super(AutodiffComposition, self).__init__(name = name, - learning_rate = learning_rate, + pathways=pathways, optimizer_type = optimizer_type, - weight_decay = weight_decay, loss_spec = loss_spec, - pathways=pathways, + learning_rate = learning_rate, + weight_decay = weight_decay, **kwargs) + self._built_pathways = False + self.target_output_map = {} self.optimizer_type = optimizer_type self.loss_spec = loss_spec + self._runtime_learning_rate = None + self.force_no_retain_graph = force_no_retain_graph self.refresh_losses = refresh_losses - self._built_pathways = False self.weight_decay = weight_decay - self.force_no_retain_graph = force_no_retain_graph - self.loss = None self.disable_learning = disable_learning - self._runtime_learning_rate = None + self.loss = None self.last_saved_weights = None self.last_loaded_weights = None - self.target_output_map = {} # keeps track of average loss per epoch self.losses = [] @@ -493,6 +519,8 @@ def __init__(self, # ordered execution sets for the pytorch model self.execution_sets = None + # # MODIFIED 7/10/24 OLD: + # FIX: REMOVE WHEN SUPPORT FOR MPS ADDED BELOW if not disable_cuda and torch.cuda.is_available(): if cuda_index is None: self.device = torch.device('cuda') @@ -500,11 +528,44 @@ def __init__(self, self.device = torch.device('cuda:' + str(cuda_index)) elif torch_available: self.device = torch.device('cpu') + else: + self.device = device + # # MODIFIED 7/10/24 NEW: + # FIX: ADD AFTER USE OF utilities.get_torch_tensor() AND COMPATIBLITY WITH MPS IS VALIDATED + # if device is None: + # # Try setting device by default + # if not disable_cuda and torch.cuda.is_available(): + # if cuda_index is None: + # self.device = torch.device(CUDA) + # else: + # self.device = torch.device('cuda:' + str(cuda_index)) + # elif torch_available: + # if torch.backends.mps.is_available(): + # from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear + # try: + # self.device = torch.device(MPS) + # test_pytorch_fct_with_mps = Linear()._gen_pytorch_fct(self.device, Context()) + # except AssertionError: + # self.device = torch.device(CPU) + # else: + # self.device = torch.device(CPU) + # else: + # self.device = device + # # MODIFIED 7/10/24 END # Set to True after first warning about failure to specify execution mode so warning is issued only once self.execution_mode_warned_about_default = False + # return self.infer_backpropagation_learning_pathways(pnlvm.ExecutionMode.PyTorch) - def infer_backpropagation_learning_pathways(self, execution_mode, context=None): + # ShowGraph + self.assign_ShowGraph(show_graph_attributes) + def assign_ShowGraph(self, show_graph_attributes): + """Override to replace assignment of ShowGraph class with PytorchShowGraph""" + show_graph_attributes = show_graph_attributes or {} + self._show_graph = PytorchShowGraph(self, **show_graph_attributes) + + @handle_external_context() + def infer_backpropagation_learning_pathways(self, execution_mode, context=None)->list: """Create backpropapagation learning pathways for every Input Node --> Output Node pathway Flattens nested compositions: - only includes the Projections in outer Composition to/from the CIMs of the nested Composition @@ -512,6 +573,7 @@ def infer_backpropagation_learning_pathways(self, execution_mode, context=None): - excludes Projections from/to CIMs in the nested Composition (from input_CIMs and to output_CIMs), as those should remain identity Projections; see `PytorchCompositionWrapper` for table of how Projections are handled and further details. + Returns list of target nodes for each pathway """ self._analyze_graph() @@ -522,9 +584,9 @@ def _get_pytorch_backprop_pathway(input_node)->list: IMPLEMENTATION NOTE: flattens nested Compositions Return a list of all pathways from input_node -> output node """ - pathways = [] - prev = {} - queue = collections.deque([(input_node, None, self)]) + pathways = [] # List of all feedforward pathways from INPUT Node to OUTPUT Node + prev = {} # Dictionary of previous component for each component in every pathway + queue = collections.deque([(input_node, None, self)]) # Queue of nodes to visit in breadth-first search # FIX: 9/17/23 - THIS VERSION FLATTENS NESTED COMPOSITIONS; MAY NOT STILL BE NEEDED # SINCE EXECUTION SETS ARE NOW FLATTENED IN PytorchCompositionWrapper @@ -532,14 +594,16 @@ def _get_pytorch_backprop_pathway(input_node)->list: # THOUGH DOING SO PREVIOUSLY SEEMED TO LOSE TARGET NODE. # MAYBE NOT NOW THAT THEY ARE CONSTRUCTED EXPLICITLY BELOW? def create_pathway(node)->list: + """Create pathway starting with node (presumably an output NODE) and working backward via prev""" pathway = [] entry = node while entry in prev: pathway.insert(0, entry) entry = prev[entry] pathway.insert(0, entry) - # Only consider input -> projection -> ... -> output pathways - # (since can't learn on only one mechanism) + # Only consider pathways with 3 or more components (input -> projection -> ... -> output) + # since can't learn on only one mechanism (len==1) + # and a pathway can't have just one mechanism and one projection (len==2) if len(pathway) >= 3: return pathway else: @@ -549,6 +613,7 @@ def create_pathway(node)->list: while len(queue) > 0: node, input_port, current_comp = queue.popleft() + # node is nested Composition that is an INPUT node of the immediate outer Composition if (isinstance(node, Composition) and node is not self and any(isinstance(proj.sender.owner, CompositionInterfaceMechanism) for proj in node.afferents)): @@ -655,18 +720,22 @@ def create_pathway(node)->list: if execution_mode == pnlvm.ExecutionMode.PyTorch: # For PyTorch mode, only need to construct dummy TARGET Nodes, to allow targets to be: # - specified in the same way as for other execution_modes - # - trial-by-trial values to kept aligned with inputs in batch / minibatch construction + # - trial-by-trial values kept aligned with inputs in batch / minibatch construction # - tracked for logging (as mechs of a Composition) - # IMPLEMENTATION NOTE: only add target nodes if not already present + # IMPLEMENTATION NOTE: + # only add target nodes if not already present # (to avoid duplication in multiple calls, including from command line; # see test_xor_training_identicalness_standard_composition_vs_PyTorch_and_LLVM for example) - output_mechs = self.get_nested_nodes_output_nodes_at_levels() - assert set([mech for mech in [pathway[-1] for pathway in pathways]]) == set(output_mechs) + # output_mechs_for_learning = self.get_nested_output_nodes_at_all_levels() + # assert set([mech for mech in [pathway[-1] for pathway in pathways]]) == set(output_mechs_for_learning) + pathway_terminal_nodes = [mech for mech in [pathway[-1] for pathway in pathways]] + identified_target_nodes = self._identify_target_nodes(context) + output_mechs_for_learning = [node for node in identified_target_nodes if node in pathway_terminal_nodes] target_mechs = [ProcessingMechanism(default_variable = np.array([np.zeros_like(value) for value in mech.value], dtype=object), name= 'TARGET for ' + mech.name) - for mech in output_mechs if mech not in self.target_output_map.values()] + for mech in output_mechs_for_learning if mech not in self.target_output_map.values()] # Suppress warnings about role assignments context = Context(source=ContextFlags.METHOD) self.add_nodes(target_mechs, required_roles=[NodeRole.TARGET, NodeRole.LEARNING], context=context) @@ -674,13 +743,17 @@ def create_pathway(node)->list: self.exclude_node_roles(target_mech, NodeRole.OUTPUT, context) for output_port in target_mech.output_ports: output_port.parameters.require_projection_in_composition.set(False, override=True) - self.target_output_map.update({target: output for target, output in zip(target_mechs, output_mechs)}) + self.target_output_map.update({target: output for target, output + in zip(target_mechs, output_mechs_for_learning)}) else: # Construct entire PNL backpropagation learning pathways for each INPUT Node for pathway in pathways: self.add_backpropagation_learning_pathway(pathway=pathway, loss_spec=self.loss_spec) + self._analyze_graph() + return self.learning_components + # CLEANUP: move some of what's done in the methods below to a "validate_params" type of method @handle_external_context() def _build_pytorch_representation(self, context=None, refresh=False): @@ -735,13 +808,15 @@ def _get_loss(self, loss_spec): elif loss_spec == Loss.CROSS_ENTROPY: if version.parse(torch.version.__version__) >= version.parse('1.12.0'): return nn.CrossEntropyLoss() - # Cross entropy loss is used for multiclass categorization and needs inputs in shape # ((# minibatch_size, C), targets) where C is a 1-d vector of probabilities for each potential category # and where target is a 1d vector of type long specifying the index to the target category. This # formatting is different from most other loss functions available to autodiff compositions, # and therefore requires a wrapper function to properly package inputs. return lambda x, y: nn.CrossEntropyLoss()(torch.atleast_2d(x), torch.atleast_2d(y.type(x.type()))) + elif loss_spec == Loss.BINARY_CROSS_ENTROPY: + if version.parse(torch.version.__version__) >= version.parse('1.12.0'): + return nn.BCELoss() elif loss_spec == Loss.L1: return nn.L1Loss(reduction='sum') elif loss_spec == Loss.NLL: @@ -757,10 +832,10 @@ def _get_loss(self, loss_spec): f"likelihood), POISSONNLL (Poisson negative log likelihood, " f"and KL_DIV (KL divergence.") - def autodiff_training(self, inputs, targets, context=None, scheduler=None): + def autodiff_training(self, inputs, targets, synchronize_pnl_values:bool=True, context=None, scheduler=None): """Perform learning/training on all input-target pairs received for given number of epochs""" - # compute total loss across output neurons for current trial + # Compute total loss over OUTPUT nodes for current trial tracked_loss = self.parameters.tracked_loss._get(context) if tracked_loss is None: self.parameters.tracked_loss._set(torch.zeros(1, device=self.device).double(), @@ -774,58 +849,109 @@ def autodiff_training(self, inputs, targets, context=None, scheduler=None): for component in inputs.keys(): curr_tensor_inputs[component] = torch.tensor(inputs[component], device=self.device).double() + # Get value of TARGET nodes for current trial for component in targets.keys(): curr_tensor_targets[self.target_output_map[component]] = [torch.tensor(np.atleast_1d(target), device=self.device).double() for target in targets[component]] - # do forward computation on current inputs + # Do forward computation on current inputs # should return 2d values for each component - curr_tensor_outputs = self.parameters.pytorch_representation._get(context).forward(curr_tensor_inputs, context) - - for component in curr_tensor_outputs.keys(): + pytorch_rep = self.parameters.pytorch_representation._get(context) + curr_tensor_outputs = pytorch_rep.forward(curr_tensor_inputs, context) + + # Update values of all PNL nodes executed in forward pass (if specified) + if synchronize_pnl_values: + pytorch_node_values = {} + for pnl_node, pytorch_node in pytorch_rep.nodes_map.items(): + if pytorch_node.value is None: + assert pytorch_node.exclude_from_gradient_calc, \ + (f"PROGRAM ERROR: Value of PyTorch wrapper for {pnl_node.name} is None " + f"but it is not excluded from gradient calculation.") + continue + if isinstance(pytorch_node.value, list): + value = np.array([val.detach().cpu().numpy() for val in pytorch_node.value], dtype=object) + else: + value = pytorch_node.value.detach().cpu().numpy() + pnl_node.parameters.value._set(value, context) + if isinstance(pnl_node.function, StatefulFunction): + pnl_node.function.parameters.previous_value._set(value, context) + # 7/10/24 - FIX: THIS NEEDS TO BE ALIGNED WITH HANDLING OF INTEGRATION BEFORE NONLINEARITY IN PYTORCH + # HANDLED IN forward() METHOD OF PytorchMechanismWrapper?? + # if isinstance(pnl_node, TransferMechanism) and pnl_node.integrator_mode: + # pnl_node.integrator_function.parameters.previous_value._set(value, context) + pytorch_node_values[pnl_node] = value + + # Compute the loss (TARGET-OUTPUT) for each trained OUTPUT node + outputs_for_targets = {k:v for k,v in curr_tensor_outputs.items() if k in self.target_output_map.values()} + for component in outputs_for_targets.keys(): # possibly add custom loss option, which is a loss function that takes many args # (outputs, targets, weights, and more) and returns a scalar new_loss = 0 - for i in range(len(curr_tensor_outputs[component])): - new_loss += self.loss(curr_tensor_outputs[component][i], - curr_tensor_targets[component][i]) + for i in range(len(outputs_for_targets[component])): + new_loss += self.loss(outputs_for_targets[component][i], + curr_tensor_targets[component][i]) tracked_loss += new_loss - outputs = [] + # Get values of trained OUTPUT nodes + trained_outputs = [] + trained_outputs_CIM_input_ports = [port for port in self.output_CIM.input_ports + if port.path_afferents[0].sender.owner in self.target_output_map.values()] + for input_port in trained_outputs_CIM_input_ports: + assert (len(input_port.all_afferents) == 1), \ + f"PROGRAM ERROR: {input_port.name} of ouput_CIM for '{self.name}' has more than one afferent." + port, source, _ = self.output_CIM._get_source_info_from_output_CIM(input_port) + idx = source.output_ports.index(port) + trained_outputs += [outputs_for_targets[source][idx].detach().cpu().numpy().copy().tolist()] + + # Get values of all OUTPUT nodes + all_outputs = [] for input_port in self.output_CIM.input_ports: assert (len(input_port.all_afferents) == 1), \ f"PROGRAM ERROR: {input_port.name} of ouput_CIM for '{self.name}' has more than one afferent." port, component, _ = self.output_CIM._get_source_info_from_output_CIM(input_port) idx = component.output_ports.index(port) - outputs += [curr_tensor_outputs[component][idx].detach().cpu().numpy().copy().tolist()] + all_outputs += [curr_tensor_outputs[component][idx].detach().cpu().numpy().copy().tolist()] + # Update tracked loss and loss count self.parameters.tracked_loss_count._set(np.array(self.parameters.tracked_loss_count._get(context=context) + 1), context=context, skip_history=True, skip_log=True) - return outputs + + return trained_outputs, all_outputs def clear_losses(self, context=None): self.losses = [] self.parameters.losses.set([], context=context) def _update_learning_parameters(self, context): - """Carry out backpropagation learning for one or more trials - Updates parameters (weights) based on trials run since last update. - Uses Pytorch backward method to compute gradients and update weights + """Carry out backpropagation learning (backward computation) for one or more trials. + Update parameters (weights) based on trials run since last update, + using Pytorch backward method to compute gradients and update weights + Then execute (i.e., do forward computation for) nodes in pytorch_rep._nodes_to_execute_after_gradient_calc """ optimizer = self.parameters.optimizer._get(context=context) + pytorch_rep = self.parameters.pytorch_representation._get(context=context) + optimizer.zero_grad() + # Compute and log average loss over all trials since last update tracked_loss = self.parameters.tracked_loss._get(context=context) / int(self.parameters.tracked_loss_count._get(context=context)) tracked_loss.backward(retain_graph=not self.force_no_retain_graph) self.parameters.losses._get(context=context).append(tracked_loss.detach().cpu().numpy()[0]) self.parameters.tracked_loss._set(torch.zeros(1, device=self.device).double(), context=context, skip_history=True, skip_log=True) self.parameters.tracked_loss_count._set(np.array(0), context=context, skip_history=True, skip_log=True) + + # Update weights and copy to PNL optimizer.step() - self.parameters.pytorch_representation._get(context=context).detach_all() - self.parameters.pytorch_representation._get(context).copy_weights_to_psyneulink(context) + pytorch_rep.detach_all() + pytorch_rep.copy_weights_to_psyneulink(context) + + # do forward computation on nodes that should be executed after gradient calculation + with torch.no_grad(): + for node, variable in pytorch_rep._nodes_to_execute_after_gradient_calc.items(): + node.wrapper_type.execute_node(node, variable, context) def _gen_llvm_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags:frozenset): if "run" in tags: @@ -836,7 +962,7 @@ def _gen_llvm_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags:frozenset): def _get_total_loss(self, num_trials: int=1, context:Context=None): return sum(self.parameters.trial_losses._get(context)[-num_trials:]) /num_trials - def _infer_input_nodes(self, input_dict: dict): + def _get_autodiff_inputs_values(self, input_dict: dict): """Remove TARGET Nodes, and return dict with values of INPUT Nodes for single trial For nested Compositions, replace input to nested Composition with inputs to its INPUT Nodes For InuptPorts, replace with owner @@ -848,43 +974,37 @@ def _infer_input_nodes(self, input_dict: dict): autodiff_input_dict = {} for node, values in input_dict.items(): mech = node.owner if isinstance(node, InputPort) else node - if (mech in self.get_nested_nodes_input_nodes_at_levels() + if (mech in self.get_nested_input_nodes_at_all_levels() and mech not in self.get_nodes_by_role(NodeRole.TARGET)): # Pass along inputs to all INPUT Nodes except TARGETS - # (those are handled separately in _infer_output_nodes) + # (those are handled separately in _get_autodiff_targets_values) autodiff_input_dict[node] = values - # FIX: 11/3/23: This is handled _parse_learning_spec - # elif isinstance(node, Composition): - # # Replace input to nested Composition with inputs for the InputPorts of its INPUT Nodes - # i = 0 - # for output_port in node.input_CIM.output_ports: - # # If node has input from a Node in an outer Composition, no need for input here - # if node.input_CIM._get_source_node_for_input_CIM(output_port): - # continue - # assert len(output_port.efferents) == 1, \ - # (f"PROGRAM ERROR: {output_port.name} of ouput_CIM for '{node.name}' " - # f"has more than one efferent.") - # # Get input for destination input_port for every trial in values - # # note: each value (input spec) should be 2d rather than 3d, - # # since it is the input for an InputPort rather than a Mechanism; - # # this gets parsed in PytorchCompositionWrapper.forward() - # # autodiff_input_dict[output_port.efferents[0].receiver] = values[i] - # # autodiff_input_dict[output_port.efferents[0].receiver] = [value[i] for value in values] - # i += 1 return autodiff_input_dict - def _infer_output_nodes(self, input_dict: dict): - """Remove INPUT Nodes, and return dict with values for TARGET Nodes - - Get Inputs to TARGET Nodes and assign to dict mapping them to OUTPUT Nodes of Composition, - which are used for computation of loss in autodiff_training(). + def _get_autodiff_targets_values(self, input_dict): + """Return dict with values for TARGET Nodes + Get Inputs to TARGET Nodes used for computation of loss in autodiff_training(). + Uses input_dict to get values for TARGET Nodes that are INPUT Nodes of the AutodiffComposition, + If a TARGET Node is not an INPUT Node, it is assumed to be the target of a projection from an INPUT Node + and the value is determined by searching recursively for the input Node that projects to the TARGET Node. Returns --------- - A dict mapping TARGET Nodes -> target values corresponding to OUTPUT Nodes of Composition + A dict mapping TARGET Nodes -> target values """ - # Reduce from 3d inputs to 2d values to match outputs computed in forward computation in autodiff_training() - return {node:value for node, value in input_dict.items() if node in self.target_output_map} + target_values = {} + def get_target_value(target): + if target in self.get_nodes_by_role(NodeRole.INPUT): + return input_dict[target] + if len(target.path_afferents) > 1: + raise AutodiffCompositionError(f"TARGET Node '{target.name}' (for '{self.name}')" + f"cannot have more than one afferent projection.") + target = target.path_afferents[0].sender.owner + return get_target_value(target) + + for target in self.target_output_map: + target_values[target] = get_target_value(target) + return target_values def _parse_learning_spec(self, inputs, targets, execution_mode, context): stim_input, num_input_trials = super()._parse_learning_spec(inputs, targets, execution_mode, context) @@ -908,8 +1028,19 @@ def _parse_learning_spec(self, inputs, targets, execution_mode, context): def _check_nested_target_mechs(self): pass + + def _identify_target_nodes(self, context): + """Recursively call all nested AutodiffCompositions to assign TARGET nodes for learning""" + # Default is to use OUTPUT + target_nodes = [node for node in self.get_nodes_by_role(NodeRole.OUTPUT) + if not isinstance(node, Composition)] + for node in self.nodes: + if isinstance(node, AutodiffComposition): + target_nodes.extend(node._identify_target_nodes(context)) + return target_nodes + @handle_external_context() - def learn(self, *args, **kwargs): + def learn(self, *args, synchronize_pnl_values:bool = True, **kwargs): execution_phase_at_entry = kwargs[CONTEXT].execution_phase kwargs[CONTEXT].execution_phase = ContextFlags.PREPARING @@ -933,7 +1064,7 @@ def learn(self, *args, **kwargs): f"that are not AutodiffCompositions: {' ,'.join(nested_comps)}.") if self._built_pathways is False: - self.infer_backpropagation_learning_pathways(execution_mode) + self.infer_backpropagation_learning_pathways(execution_mode, context=kwargs[CONTEXT]) self._built_pathways = True return super().learn(*args, execution_mode=execution_mode, **kwargs) @@ -973,6 +1104,7 @@ def execute(self, runtime_params=None, execution_mode:pnlvm.ExecutionMode = pnlvm.ExecutionMode.PyTorch, skip_initialization=False, + synchronize_pnl_values=True, report_output:ReportOutput=ReportOutput.OFF, report_params:ReportOutput=ReportParams.OFF, report_progress:ReportProgress=ReportProgress.OFF, @@ -1007,8 +1139,8 @@ def execute(self, # model may be modified between runs? - autodiff_inputs = self._infer_input_nodes(inputs) - autodiff_targets = self._infer_output_nodes(inputs) + autodiff_inputs = self._get_autodiff_inputs_values(inputs) + autodiff_targets = self._get_autodiff_targets_values(inputs) report(self, LEARN_REPORT, @@ -1019,15 +1151,16 @@ def execute(self, context=context) self._build_pytorch_representation(context) - output = self.autodiff_training(autodiff_inputs, - autodiff_targets, - context, - scheduler) + trained_outputs, all_outputs = self.autodiff_training(inputs=autodiff_inputs, + targets=autodiff_targets, + synchronize_pnl_values=True, + context=context, + scheduler=scheduler) execution_phase = context.execution_phase context.execution_phase = ContextFlags.PROCESSING - self.output_CIM.execute(output, context=context) + self.output_CIM.execute(all_outputs, context=context) context.execution_phase = execution_phase report(self, @@ -1040,7 +1173,7 @@ def execute(self, scheduler.get_clock(context)._increment_time(TimeScale.TRIAL) - return output + return all_outputs # Call Composition execute in Python mode return super(AutodiffComposition, self).execute(inputs=inputs, @@ -1199,3 +1332,7 @@ def _get_state_initializer(self, context): optimizer_states = tuple() return (*comp_states, optimizer_states) + + def show_graph(self, *args, **kwargs): + """Override to use PytorchShowGraph if show_pytorch is True""" + self._show_graph.show_graph(*args, **kwargs) diff --git a/psyneulink/library/compositions/compositionrunner.py b/psyneulink/library/compositions/compositionrunner.py index c2068f5dc82..108e732b267 100644 --- a/psyneulink/library/compositions/compositionrunner.py +++ b/psyneulink/library/compositions/compositionrunner.py @@ -49,6 +49,7 @@ def _batch_inputs(self, epochs: int, num_trials: int, batch_size: int = 1, + optimizations_per_minibatch: int = 1, randomize: bool = True, call_before_minibatch=None, call_after_minibatch=None, @@ -78,14 +79,25 @@ def _batch_inputs(self, chunk = {} for k, v in inputs.items(): chunk[k] = v[idx % len(v)] - yield copy_parameter_value(chunk) - if call_after_minibatch: - call_after_minibatch() + for rep_idx in range(optimizations_per_minibatch): + # Return current stimulus + yield copy_parameter_value(chunk) - # Update weights if in PyTorch execution_mode; - # handled by Composition.execute in Python mode and in compiled version in LLVM mode - if execution_mode is ExecutionMode.PyTorch: - self._composition._update_learning_parameters(context) + # Update weights if in PyTorch execution_mode; + # handled by Composition.execute in Python mode and in compiled version in LLVM mode + if execution_mode is ExecutionMode.PyTorch: + self._composition._update_learning_parameters(context) + + if call_after_minibatch: + try: + # Try with the hope that the function uses **kwargs (or these args) + call_after_minibatch(epoch=epoch, + batch=i // batch_size, + num_batches=num_trials // batch_size, + context=context) + except TypeError: + # If not, try without the args + call_after_minibatch() # Compiled mode does not need more identical inputs. # number_of_runs will be set appropriately to cycle over the set @@ -155,6 +167,7 @@ def run_learning(self, epochs: int = 1, learning_rate = None, minibatch_size: int = 1, + optimizations_per_minibatch: int = 1, patience: int = None, min_delta: int = 0, randomize_minibatches: bool = True, @@ -251,11 +264,12 @@ def run_learning(self, execution_mode=execution_mode, context=context) else: - minibatched_input = self._batch_inputs(stim_input, - stim_epoch, - num_trials, - minibatch_size, - randomize_minibatches, + minibatched_input = self._batch_inputs(inputs=stim_input, + epochs=stim_epoch, + num_trials=num_trials, + batch_size=minibatch_size, + optimizations_per_minibatch=optimizations_per_minibatch, + randomize=randomize_minibatches, call_before_minibatch=call_before_minibatch, call_after_minibatch=call_after_minibatch, early_stopper=early_stopper, diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index 43e36964b88..d5cf790d205 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -19,7 +19,7 @@ # - Deal with matrix assignment in LearningProjection LINE 643 # - Reinstate test for execution of Concatenate with learning in test_emcomposition (currently commented out) # - FIX: Softmax Gain Control: -# Test if it current works (they are added to Compostion but not in BackProp processing pathway) +# Test if it current works (they are added to Composition but not in BackProp processing pathway) # Does backprop have to run through this if not learnable? # If so, need to add PNL Function, with derivative and LLVM and Pytorch implementations # - FIX: WRITE MORE TESTS FOR EXECUTION, WARNINGS, AND ERROR MESSAGES @@ -289,9 +289,14 @@ **Organization** +.. _EMComposition_Entries_and_Fields: + *Entries and Fields*. Each entry in memory can have an arbitrary number of fields, and each field can have an arbitrary length. However, all entries must have the same number of fields, and the corresponding fields must all have the same -length across entries. Fields can be weighted to determine the influence they have on retrieval, using the +length across entries. Each field is treated as a separate "channel" for storage and retrieval, and is associated with +its own corresponding input (key or value) and output (retrieved value) `Node ` some or all of +which can be used to compute the similarity of the input (key) to entries in memory, that is used for retreieval. +Fields can be differentially weighted to determine the influence they have on retrieval, using the `field_weights ` parameter (see `retrieval ` below). The number and shape of the fields in each entry is specified in the ``memory_template`` argument of the EMComposition's constructor (see `memory_template `). Which fields treated as keys (i.e., matched against @@ -407,28 +412,43 @@ .. _EMComposition_Field_Weights: * **field_weights**: specifies which fields are used as keys, and how they are weighted during retrieval. The - number of values specified must match the number of fields specified in ``memory_template`` (i.e., the size of - of its first dimension (axis 0)). All non-zero entries must be positive, and designate *keys* -- fields - that are used to match queries agains entries in memory for retrieval (see `Match memories by field + number of entries specified must match the number of fields specified in ``memory_template`` (i.e., the size of + of its first dimension (axis 0)). All non-zero entries must be positive; these designate *keys* -- fields + that are used to match queries against entries in memory for retrieval (see `Match memories by field `). Entries of 0 designate *values* -- fields that are ignored during the matching process, but the values of which are retrieved and assigned as the `value ` of the - corresponding `retrieved_node `. This distinction between keys and value implements - a standard "dictionary; however, if all entries are non-zero, then all fields are treated as keys, implemented a - full form of content-addressable memory. If ``learn_field_weight`` is True, the field_weights can be modified - during training, and function like the attention head of a Transformer model); otherwise they remain fixed. The - following options can be used to specify ``field_weights``: + corresponding `retrieved_node `. This distinction between keys and value corresponds + to the format of a standard "dictionary," though in that case only a single key and value are allowed, whereas + here there can be one or more keys and any number of values; if all fields are keys, this implements a full + form of content-addressable memory. If ``learn_field_weight`` is True (and `enable_learning + ` is either True or a list), then the field_weights can be modified + during training (this functions similarly to the attention head of a Transformer model, although at present the + field can only be scalar values rather than vecdtors); if ``learn_field_weight`` is False, then the field_weights are + fixed. The following options can be used to specify ``field_weights``: * *None* (the default): all fields except the last are treated as keys, and are weighted equally for retrieval, while the last field is treated as a value field; - * *single entry*: its value is ignored, and all fields are treated as keys (i.e., used for - retrieval) and equally weighted for retrieval; + * *single entry*: all fields are treated as keys (i.e., used for retrieval) and weighted equally for retrieval. + if `normalize_field_weights ` is True, the value is ignored and all + of keys are weighted by 1 / number of keys (i.e., normalized), whereas if `normalize_field_weights + ` is False, then the value specified is used to weight the retrieval of + every keys. * *multiple non-zero entries*: If all entries are identical, the value is ignored and the corresponding keys are weighted equally for retrieval; if the non-zero entries are non-identical, they are used to weight the corresponding fields during retrieval (see `Weight fields `). In either case, the remaining fields (with zero weights) are treated as value fields. +.. _EMComposition_Normalize_Field_Weights: + +* **normalize_field_weights**: specifies whether the `field_weights ` are normalized + or their raw values are used. If True, the `field_weights ` are normalized so that + they sum to 1.0, and are used to weight the corresponding fields during retrieval (see `Weight fields + `). If False, the raw values of the `field_weights ` are + used to weight (i.e., multiply) the retrieved value of each field. This setting is ignored if **field_weights** + is None or `concatenate_keys ` is in effect. + .. _EMComposition_Field_Names: * **field_names**: specifies names that can be assigned to the fields. The number of names specified must @@ -443,7 +463,7 @@ `normalize_memories ` is set to False. Setting concatenate_keys to True in either of those cases issues a warning, and the setting is ignored. If the key `field_weights ` (i.e., all non-zero values) are all equal *and* ``normalize_memories`` is set to True, then setting - ``concatenate_keys`` then a concatenate_keys_node ` is created that + ``concatenate_keys`` causes a concatenate_keys_node ` to be created that receives input from all of the `query_input_nodes ` and passes them as a single vector to the `mactch_node `. @@ -480,15 +500,43 @@ * **softmax_gain** : specifies the gain (inverse temperature) used for softmax normalizing the dot products of queries and keys in memory (see `EMComposition_Execution` below). If a value is specified, that is used. If the keyword - *CONTROL* is (or the value is None), then the `softmax_gain ` function is used to - adaptively set the gain based on the entropy of the dot products, preserving the distribution over non-(or near) - zero entries irrespective of how many (near) zero entries there are. + *ADAPTIVE* is specified, then the `Softmax.adapt_gain ` function is used to adaptively set the + `softmax_gain ` based on the entropy of the dot products in order to preserve the + the distribution over non-(or near) zero entries irrespective of how many (near) zero entries there are (see + `SoftMax_AdaptGain` for additional details), If *CONTROL* is specified, this feature is implemented by creaeting a + `ContrlMechanism`, the `ControlSignal` of which is used to modulate the `softmax_gain ` + parameter of the `Softmax` function. If None is specified, the the default value of the `Softmax` function is used. -* **learn_field_weight** : specifies whether `field_weights ` are modifiable during training. +.. _EMComposition_Learning: -* **learning_rate** : specifies the rate at which `field_weights ` are learned if - ``learn_field_weight`` is True. +*Learning* + +EMComposition supports two forms of learning -- error backpropagation and the learning of `field_weights +` -- that can be configured by the following arguments of the EMComposition's constructor: + +* **enable_learning** : specifies whether learning is enabled for the EMComposition and, if so, which `retrieved_nodes + ` are used to compute errors, and propagate these back through the network. If + ``enable_learning`` is False, then no learning occurs, including of `field_weights `). + If it is True, then all of the `retrieved_nodes ` participate in learning: For + those that do not project to an outer Composition (i.e., one in which the EMComposition is `nested + `), a `TARGET ` node is constructed for each, and used to compute errors that + are backpropagated through the network to its `query_input_nodes ` and + `value_input_nodes `, and on to any nodes that project to it from a composition + in which the EMComposition is `nested `; retrieved_nodes that *do* project to an outer + Composition receive their errors from those nodes, which are also backpropagated through the EMComposition. + If ``enable_learning`` is a list, then only the `retrieved_nodes ` specified in the + list participate in learning, and errors are computed only for those nodes. The list must contain the same + number of entries as there are `fields ` and corresponding `retreived_nodes + `, and each entry must be a boolean that specifies whether the corresponding + `retrieved_node ` is used for learning. + +* **learn_field_weight** : specifies whether `field_weights ` are modifiable during + learning (see `field_weights ` and `EMComposition_Learning` for additional + information. For learning of `field_weights ` to occur, ``enable_learning`` must + also be True, or it must be a list with at least one True entry. +* **learning_rate** : specifies the rate at which `field_weights ` are learned if + ``learn_field_weight`` is True; see `EMComposition_Learning` for additional information. .. _EMComposition_Structure: @@ -590,10 +638,13 @@ * **Softmax normalize matches over fields**. The dot product for each key field is passed from the `match_node ` to the corresponding `softmax_node `, which applies - a softmax function to normalize the dot products for each key field. If a numerical value is specified for - `softmax_gain `, that is used as the gain (inverse temperature) for the softmax function; - otherwise, if it is specified as *CONTROL* or None, then the `softmax_gain ` function is - used to adaptively set the gain (see `softmax_gain ` for details). + the `SoftMax` function to normalize the dot products for each key field. If a numerical value is specified for + `softmax_gain `, that is used as the gain (inverse temperature) for the SoftMax function; + if *ADAPTIVE* is specified, then the `SoftMax.adapt_gain` function is used to adaptively set the gain based on + the dot products in each field (see `Softmax_AdaptGain` for additional details); if *CONTROL* is specified, then the + dot products are monitored by a `ControlMechanism` that uses the `adapt_gain ` method of the + `SoftMax` function to modulate the `gain ` parameter of the Softmax function; if None is specified, + the default value of the `Softmax` function is used as the `gain ` parameter. * **Weight fields**. If `field weights ` are specified, then the softmax normalized dot product for each key field is passed to the corresponding `field_weight_node ` @@ -645,28 +696,44 @@ corresponding `output ` item. COMMENT -.. _EMComposition_Learning: +.. _EMComposition_Training: -*Learning* +*Training* ~~~~~~~~~~ -FIX: MODIFY TO INDICATE THAT enable_learning ALLOWS PROPAGATION OF ERROR TRHOUGH THE NETWORK, - WHILE learn_field_weights ALLOWS LEARNING OF THE FIELD_WEIGHTS, WHICH REQUIRES enable_learning TO BE True -If `learn ` is called and the `learn_field_weights ` attribute -is True, then the `field_weights ` are modified to minimize the error passed to the -EMComposition retrieved nodes, using the learning_rate specified in the `learning_rate ` -attribute. If `learn_field_weights ` is False (or `run ` is called, -then the `field_weights ` are not modified and the EMComposition is simply executed -without any modification, and the error signal is passed to the nodes that project to its `INPUT ` -`Nodes `. +If `learn ` is called, ``enable_learning`` is True or a list with at least one True entry, +then errors will be computed for each of the `retrieved_nodes ` that is specified for +learning (see `EMComposition_Learning` for details about specification). These errors are derived either from +any errors backprpated to the EMComposition from an outer Composition in which it is `nested `, or +locally by the difference between the `retrieved_nodes ` and the `target_nodes +` that are created for each of the `retrieved_nodes ` that +do not project to an outer Composition. These errors are then backpropagated through the EMComposition to the +`query_input_nodes ` and `value_input_nodes `, and +on to any nodes that project to it from a composition in which the EMComposition is `nested `. + +If `learn_field_weights ` is also True, then the `field_weights +` are modified to minimize the error passed to the EMComposition retrieved nodes, using the +`learning_rate ` specified in the `learning_rate ` attribute. +If `learn_field_weights ` is False (or `run ` is called, then the +If `learn_field_weights ` is False), then the `field_weights +` are not modified and the EMComposition is simply executed +without any modification, and error signals are passed to the nodes that project to its `query_input_nodes +` and `value_input_nodes `. .. note:: - Although memory storage is implemented as a form of learning (though modification of MappingProjection + The only parameters modifable by learning in the EMComposition are its `field_weights + `; all other parameters (including all other Projection `matrices + `) are fixed, and used only to compute gradients and backpropagate errors. + + .. technical_note:: + Although memory storage is implemented as a form of learning (though modification of MappingProjection `matrix ` parameters; see `memory storage `), this occurs irrespective of how EMComposition is run (i.e., whether `learn ` or `run ` is called), and is not affected by the `learn_field_weights ` or `learning_rate ` attributes, which pertain only to whether the `field_weights - ` are modified during learning. + ` are modified during learning. Furthermore, when run in PyTorch mode, storage + is executed after the forward() and backward() passes are complete, and is not considered as part of the + gradient calculations. .. _EMComposition_Examples: @@ -894,9 +961,10 @@ from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.keywords import \ - (AUTO, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, + (ADAPTIVE, AUTO, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, GAIN, IDENTITY_MATRIX, MULTIPLICATIVE_PARAM, NAME, PARAMS, PRODUCT, PROJECTIONS, RANDOM, SIZE, VARIABLE) from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric_scalar +from psyneulink.core.globals.context import ContextFlags from psyneulink.core.llvm import ExecutionMode @@ -906,6 +974,15 @@ STORAGE_PROB = 'storage_prob' +QUERY_AFFIX = ' [QUERY]' +VALUE_AFFIX = ' [VALUE]' +MATCH_TO_KEYS_AFFIX = ' [MATCH to KEYS]' +RETRIEVED_AFFIX = ' [RETRIEVED]' +WEIGHTED_SOFTMAX_AFFIX = ' [WEIGHTED SOFTMAX]' +RETRIEVE_NODE_NAME = 'RETRIEVE' +STORE_NODE_NAME = 'STORE' + + def _memory_getter(owning_component=None, context=None)->list: """Return list of memories in which rows (outer dimension) are memories for each field. These are derived from `matrix ` parameter of the `afferent @@ -954,10 +1031,11 @@ class EMComposition(AutodiffComposition): memory_fill=0, \ memory_capacity=None, \ field_weights=None, \ + normalize_field_weights=True, \ field_names=None, \ concatenate_keys=False, \ normalize_memories=True, \ - softmax_gain=CONTROL, \ + softmax_gain=THRESHOLD, \ storage_prob=1.0, \ memory_decay_rate=AUTO, \ enable_learning=True, \ @@ -968,7 +1046,7 @@ class EMComposition(AutodiffComposition): ) Subclass of `AutodiffComposition` that implements the functions of an `EpisodicMemoryMechanism` in a - differentiable form and in which it `field_weights ` parameter can be learned. + differentiable form and in which it's `field_weights ` parameter can be learned. Takes only the following arguments, all of which are optional @@ -988,8 +1066,13 @@ class EMComposition(AutodiffComposition): see `memory_capacity ` for details. field_weights : tuple : default (1,0) - specifies the relative weight assigned to each key when matching an item in memory' - see `field weights ` for details. + specifies the relative weight assigned to each key when matching an item in memory; + see `field weights ` for additional details. + + normalize_field_weights : bool : default True + specifies whether the **fields_weights** are normalized over the number of keys, or used as absolute + weighting values when retrieving an item from memory; see `normalize_field weights + ` for additional details. field_names : list : default None specifies the optional names assigned to each field in the memory_template; @@ -1003,10 +1086,14 @@ class EMComposition(AutodiffComposition): specifies whether keys and memories are normalized before computing their dot product (similarity); see `Match memories by field ` for additional details. - softmax_gain : float : default CONTROL + softmax_gain : float, ADAPTIVE or CONTROL : default 1.0 specifies the temperature used for softmax normalizing the dot products of keys and memories; see `Softmax normalize matches over fields ` for additional details. + softmax_threshold : float : default .0001 + specifies the temperature used for softmax normalizing the dot products of keys and memories; assign ``None`` + to disable; see `Softmax normalize matches over fields ` for additional details. + storage_prob : float : default 1.0 specifies the probability that an item will be stored in `memory ` when the EMComposition is executed (see `Retrieval and Storage ` for @@ -1016,9 +1103,11 @@ class EMComposition(AutodiffComposition): specifies the rate at which items in the EMComposition's memory decay; see `memory_decay_rate ` for details. - enable_learning : bool : default True - specifies whether learning pathway is constructed for the EMComposition (see `enable_learning - ` for additional details). + enable_learning : bool or list[bool]: default True + specifies whether a learning pathway is constructed for each `field ` + of the EMComposition. If it is a list, each item must be ``True`` or ``False`` and the number of items + must be equal to the number of `fields specified; see `enable_learning + ` for additional details. learn_field_weights : bool : default True specifies whether `field_weights ` are learnable during training; @@ -1029,6 +1118,7 @@ class EMComposition(AutodiffComposition): specifies rate at which `field_weights ` are learned if ``learn_field_weights`` is True. + # 7/10/24 FIX: STILL TRUE? DOES IT PRECLUDE USE OF EMComposition as a nested Composition?? .. technical_note:: use_storage_node : bool : default True specifies whether to use a `LearningMechanism` to store entries in `memory `. @@ -1066,12 +1156,16 @@ class EMComposition(AutodiffComposition): determines the number of items that can be stored in `memory `; see `memory_capacity ` for additional details. - field_weights : list[float] - determines which fields of the input are treated as "keys" (non-zero values), used to match entries in `memory - ` for retrieval, and which are used as "values" (zero values), that are stored and - retrieved from memory, but not used in the match process (see `Match memories by field - `; see `field_weights ` for additional details - of specification). + field_weights : tuple[float] + determines which fields of the input are treated as "keys" (non-zero values) that are used to match entries in + `memory ` for retrieval, and which are used as "values" (zero values), that are stored + and retrieved from memory, but not used in the match process (see `Match memories by field + `. see `field_weights ` additional details. + + normalize_field_weights : bool : default True + determines whether `fields_weights ` are normalized over the number of keys, or + used as absolute weighting values when retrieving an item from memory; see `normalize_field weights + ` for additional details. field_names : list[str] determines which names that can be used to label fields in `memory `; see @@ -1085,11 +1179,15 @@ class EMComposition(AutodiffComposition): determines whether keys and memories are normalized before computing their dot product (similarity); see `Match memories by field ` for additional details. - softmax_gain : CONTROL + softmax_gain : float, ADAPTIVE or CONTROL determines gain (inverse temperature) used for softmax normalizing the dot products of keys and memories by the `softmax` function of the `softmax_nodes `; see `Softmax normalize matches over fields ` for additional details. + softmax_threshold : float + determines the threshold used to mask out small values in the softmax calculation; see `_SoftMax_AdaptGain` + for details). + storage_prob : float determines the probability that an item will be stored in `memory ` when the EMComposition is executed (see `Retrieval and Storage ` for @@ -1099,16 +1197,19 @@ class EMComposition(AutodiffComposition): determines the rate at which items in the EMComposition's memory decay (see `memory_decay_rate ` for details). - enable_learning : bool + enable_learning : bool or list[bool] determines whether `learning ` is enabled for the EMComposition, allowing any error - received by the `retrieved_nodes ` to be propagated to the `query_input_nodes - ` and `value_input_nodes `, and on to any - `Nodes ` that project to them. + received by the `retrieved_nodes ` to be propagated to the corresponding + `query_input_nodes ` and `value_input_nodes + `, and on to any `Nodes ` that project to them. + If True, learning is enabled for all fields and if False learning is disabled for all fields; If it is a + list, then each entry specifies whether learning is enabled or disabled for the corresponding field + see `Learning ` and `Fields ` for additional details. learn_field_weights : bool determines whether `field_weights ` are learnable during training; - requires `enable_learning ` to be True; see `Learning - ` for additional details. + requires `enable_learning ` to be True for the corresponding field; + see `Learning ` for additional details. learning_rate : float determines whether the rate at which `field_weights ` are learned @@ -1122,13 +1223,22 @@ class EMComposition(AutodiffComposition): ` (see `Match memories by field ` for additional details). By default these are assigned the name *KEY_n_INPUT* where n is the field number (starting from 0); however, if `field_names ` is specified, then the name of each query_input_node - is assigned the corresponding field name. + is assigned the corresponding field name appended with * [QUERY]*. value_input_nodes : list[TransferMechanism] `INPUT ` `Nodes ` that receive values to be stored in `memory `; these are not used in the matching process used for retrieval. By default these are assigned the name *VALUE_n_INPUT* where n is the field number (starting from 0); however, if `field_names ` is specified, then the name of each value_input_node is assigned + the corresponding field name appended with * [VALUE]*. + + input_nodes : list[TransferMechanism] + Full list of `INPUT ` `Nodes ` ordered with query_input_nodes first + followed by value_input_nodes; used primarily for internal computations + + field_input_nodes : list[TransferMechanism] + Full list of `INPUT ` `Nodes ` in the same order specified in the + **field_names** argument of the constructor and in `self.field_names `. concatenate_keys_node : TransferMechanism `TransferMechanism` that concatenates the inputs to `query_input_nodes ` into a @@ -1192,7 +1302,8 @@ class EMComposition(AutodiffComposition): ` (see `Retrieve values by field ` for additional details); these are assigned the same names as the `query_input_nodes ` and `value_input_nodes ` to which they correspond appended with the suffix - *_RETRIEVED*. + * [RETRIEVED]*, and are in the same order as `field_input_nodes ` to which + to which they correspond. storage_node : EMStorageMechanism `EMStorageMechanism` that receives inputs from the `query_input_nodes ` and @@ -1228,7 +1339,7 @@ class Parameters(AutodiffComposition.Parameters): see `enable_learning ` :default value: True - :type: ``bool`` + :type: ``bool`` or ``list`` field_names see `field_names ` @@ -1278,6 +1389,12 @@ class Parameters(AutodiffComposition.Parameters): :default value: np.array([[0],[0]]) :type: ``np.ndarray`` + normalize_field_weights + see `normalize_field_weights ` + + :default value: True + :type: ``bool`` + normalize_memories see `normalize_memories ` @@ -1291,9 +1408,14 @@ class Parameters(AutodiffComposition.Parameters): :type: ``numpy.random.RandomState`` softmax_gain - see `random_state ` - :default value: CONTROL - :type: ``float or CONTROL`` + see `softmax_gain ` + :default value: 1.0 + :type: ``float, ADAPTIVE or CONTROL`` + + softmax_threshold + see `softmax_threshold ` + :default value: .001 + :type: ``float`` storage_prob see `storage_prob ` @@ -1305,10 +1427,12 @@ class Parameters(AutodiffComposition.Parameters): memory_template = Parameter([[0],[0]], structural=True, valid_types=(tuple, list, np.ndarray), read_only=True) memory_capacity = Parameter(1000, structural=True) field_weights = Parameter(None) + normalize_field_weights = Parameter(True) field_names = Parameter(None, structural=True) concatenate_keys = Parameter(False, structural=True) normalize_memories = Parameter(True) - softmax_gain = Parameter(CONTROL, modulable=True) + softmax_gain = Parameter(1.0, modulable=True) + softmax_threshold = Parameter(.001, modulable=True, specify_none=True) storage_prob = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) memory_decay_rate = Parameter(AUTO, modulable=True) enable_learning = Parameter(True, structural=True) @@ -1341,10 +1465,21 @@ def _validate_field_weights(self, field_weights): if any([field_weight < 0 for field_weight in field_weights]): return f"must be all be positive values." + def _validate_normalize_field_weights(self, normalize_field_weights): + if not isinstance(normalize_field_weights, bool): + return f"must be all be a boolean value." + def _validate_field_names(self, field_names): if field_names and not all(isinstance(item, str) for item in field_names): return f"must be a list of strings." + def _validate_enable_learning(self, enable_learning): + if isinstance(enable_learning, list): + if not all(isinstance(item, bool) for item in enable_learning): + return f"can only contains bools as entries." + elif not isinstance(enable_learning, bool): + return f"must be a bool or list of bools." + def _validate_memory_decay_rate(self, memory_decay_rate): if memory_decay_rate is None or memory_decay_rate == AUTO: return @@ -1352,8 +1487,12 @@ def _validate_memory_decay_rate(self, memory_decay_rate): return f"must be a float in the interval [0,1]." def _validate_softmax_gain(self, softmax_gain): - if softmax_gain != CONTROL and not is_numeric_scalar(softmax_gain): - return f"must be a scalar or the keyword 'CONTROL'." + if not is_numeric_scalar(softmax_gain) and softmax_gain not in {ADAPTIVE, CONTROL}: + return f"must be a scalar or one the keywords '{ADAPTIVE}' or '{CONTROL}'." + + def _validate_softmax_threshold(self, softmax_threshold): + if softmax_threshold is not None and (not is_numeric_scalar(softmax_threshold) or softmax_threshold <= 0): + return f"must be a scalar greater than 0." def _validate_storage_prob(self, storage_prob): if not is_numeric_scalar(storage_prob) and not (0 <= storage_prob <= 1): @@ -1366,19 +1505,22 @@ def __init__(self, memory_fill:Union[int, float, tuple, RANDOM]=0, field_names:Optional[list]=None, field_weights:tuple=None, + normalize_field_weights:bool=True, concatenate_keys:bool=False, normalize_memories:bool=True, - softmax_gain:Union[float, CONTROL]=CONTROL, + softmax_gain:Union[float, ADAPTIVE, CONTROL]=1.0, + softmax_threshold:Optional[float]=.001, storage_prob:float=1.0, memory_decay_rate:Union[float,AUTO]=AUTO, - enable_learning:bool=True, + enable_learning:Union[bool,list]=True, learn_field_weights:bool=True, learning_rate:float=None, use_storage_node:bool=True, use_gating_for_weighting:bool=False, random_state=None, seed=None, - name="EM_Composition"): + name="EM_Composition", + **kwargs): # Construct memory -------------------------------------------------------------------------------- @@ -1389,6 +1531,7 @@ def __init__(self, memory_fill, field_weights) field_weights, field_names, concatenate_keys = self._parse_fields(field_weights, + normalize_field_weights, field_names, concatenate_keys, normalize_memories, @@ -1399,6 +1542,9 @@ def __init__(self, self.use_storage_node = use_storage_node + if softmax_gain == CONTROL: + self.parameters.softmax_gain.modulable = False + # Instantiate Composition ------------------------------------------------------------------------- @@ -1409,6 +1555,7 @@ def __init__(self, field_names = field_names, concatenate_keys = concatenate_keys, softmax_gain = softmax_gain, + softmax_threshold = softmax_threshold, storage_prob = storage_prob, memory_decay_rate = memory_decay_rate, normalize_memories = normalize_memories, @@ -1416,7 +1563,8 @@ def __init__(self, learn_field_weights = learn_field_weights, learning_rate = learning_rate, random_state = random_state, - seed = seed + seed = seed, + **kwargs ) self._construct_pathways(self.memory_template, @@ -1425,6 +1573,7 @@ def __init__(self, self.concatenate_keys, self.normalize_memories, self.softmax_gain, + self.softmax_threshold, self.storage_prob, self.memory_decay_rate, self.use_storage_node, @@ -1543,6 +1692,12 @@ def _validate_memory_specs(self, memory_template, memory_capacity, memory_fill, raise EMCompositionError(f"The 'memory_fill' arg ({memory_fill}) specified for {name} " f"must be a float, int or len tuple of ints and/or floats.") + # If enable_learning is a list of bools, it must match the len of 1st dimension (axis 0) of memory_template: + if isinstance(self.enable_learning, list) and len(self.enable_learning) != num_fields: + raise EMCompositionError(f"The number of items ({len(self.enable_learning)}) in the 'enable_learning' arg " + f"for {name} must match the number of fields in memory " + f"({num_fields}).") + # If len of field_weights > 1, must match the len of 1st dimension (axis 0) of memory_template: field_weights_len = len(np.atleast_1d(field_weights)) if field_weights is not None and field_weights_len > 1 and field_weights_len != num_fields: @@ -1644,6 +1799,7 @@ def _construct_entries(entry_template, num_entries, memory_fill=None)->np.ndarra def _parse_fields(self, field_weights, + normalize_field_weights, field_names, concatenate_keys, normalize_memories, @@ -1661,11 +1817,18 @@ def _parse_fields(self, field_weights = [1] * num_fields field_weights[-1] = 0 field_weights = np.atleast_1d(field_weights) - # Fill out and normalize all field_weights + # Fill out field_weights, normalizing if specified: + if len(field_weights) == 1: - parsed_field_weights = np.repeat(field_weights / np.sum(field_weights), len(self.entry_template)) + if normalize_field_weights: + parsed_field_weights = np.repeat(field_weights / np.sum(field_weights), len(self.entry_template)) + else: + parsed_field_weights = np.repeat(field_weights[0], len(self.entry_template)) else: - parsed_field_weights = np.array(field_weights) / np.sum(field_weights) + if normalize_field_weights: + parsed_field_weights = np.array(field_weights) / np.sum(field_weights) + else: + parsed_field_weights = field_weights # Memory structure Parameters parsed_field_names = field_names.copy() if field_names is not None else None @@ -1674,13 +1837,18 @@ def _parse_fields(self, self.num_fields = len(self.entry_template) keys_weights = [i for i in parsed_field_weights if i != 0] self.num_keys = len(keys_weights) + # Get indices of field_weights that specify keys: + self.key_indices = np.nonzero(parsed_field_weights)[0] + self.num_values = self.num_fields - self.num_keys if parsed_field_names: - self.key_names = parsed_field_names[:self.num_keys] - self.value_names = parsed_field_names[self.num_keys:] + self.key_names = [parsed_field_names[i] for i in self.key_indices] + # self.value_names = parsed_field_names[self.num_keys:] + self.value_names = [parsed_field_names[i] for i in range(self.num_fields) if i not in self.key_indices] else: - self.key_names = [f'{i} [QUERY]' for i in range(self.num_keys)] if self.num_keys > 1 else ['KEY'] + self.key_names = [f'{i}' for i in range(self.num_keys)] if self.num_keys > 1 else ['KEY'] self.value_names = [f'{i} [VALUE]' for i in range(self.num_values)] if self.num_values > 1 else ['VALUE'] + parsed_field_names = self.key_names + self.value_names user_specified_concatenate_keys = concatenate_keys or False parsed_concatenate_keys = (user_specified_concatenate_keys @@ -1735,6 +1903,7 @@ def _construct_pathways(self, concatenate_keys, normalize_memories, softmax_gain, + softmax_threshold, storage_prob, memory_decay_rate, use_storage_node, @@ -1752,10 +1921,17 @@ def _construct_pathways(self, self.query_input_nodes = self._construct_query_input_nodes(field_weights) self.value_input_nodes = self._construct_value_input_nodes(field_weights) self.input_nodes = self.query_input_nodes + self.value_input_nodes + # Order input_nodes according to self.field_names + self.field_input_nodes = [node for name in self.field_names for node in self.input_nodes + if node in self.input_nodes + if (node.name in {name + QUERY_AFFIX, name + VALUE_AFFIX})] self.concatenate_keys_node = self._construct_concatenate_keys_node(concatenate_keys) self.match_nodes = self._construct_match_nodes(memory_template, memory_capacity, concatenate_keys,normalize_memories) - self.softmax_nodes = self._construct_softmax_nodes(memory_capacity, field_weights, softmax_gain) + self.softmax_nodes = self._construct_softmax_nodes(memory_capacity, + field_weights, + softmax_gain, + softmax_threshold) self.field_weight_nodes = self._construct_field_weight_nodes(field_weights, concatenate_keys, use_gating_for_weighting) @@ -1765,6 +1941,7 @@ def _construct_pathways(self, field_weighting, use_gating_for_weighting) self.retrieved_nodes = self._construct_retrieved_nodes(memory_template) + if use_storage_node: self.storage_node = self._construct_storage_node(memory_template, field_weights, self.concatenate_keys_node, @@ -1859,14 +2036,14 @@ def _construct_query_input_nodes(self, field_weights)->list: where i is selected randomly without replacement from (0->memory_capacity) """ - # Get indices of field_weights that specify keys: - key_indices = np.nonzero(field_weights)[0] - - assert len(key_indices) == self.num_keys, \ + assert len(self.key_indices) == self.num_keys, \ f"PROGRAM ERROR: number of keys ({self.num_keys}) does not match number of " \ - f"non-zero values in field_weights ({len(key_indices)})." + f"non-zero values in field_weights ({len(self.key_indices)})." - query_input_nodes = [TransferMechanism(size=len(self.entry_template[key_indices[i]]), + # query_input_nodes = [TransferMechanism(size=len(self.entry_template[self.key_indices[i]]), + # name=f'{self.key_names[self.key_indices[i]]} [QUERY]') + # for i in range(self.num_keys)] + query_input_nodes = [TransferMechanism(size=len(self.entry_template[self.key_indices[i]]), name=f'{self.key_names[i]} [QUERY]') for i in range(self.num_keys)] @@ -1948,13 +2125,13 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_k memory_template[:,i].tolist()).transpose().astype(float), function=LinearMatrix(normalize=normalize_memories), name=f'MEMORY for {self.key_names[i]} [KEY]')}, - name=f'{self.key_names[i]} [MATCH to KEYS]') + name=self.key_names[i] + MATCH_TO_KEYS_AFFIX) for i in range(self.num_keys) ] return match_nodes - def _construct_softmax_nodes(self, memory_capacity, field_weights, softmax_gain)->list: + def _construct_softmax_nodes(self, memory_capacity, field_weights, softmax_gain, softmax_threshold)->list: """Create nodes that, for each key field, compute the softmax over the similarities between the input and the memories in the corresponding match_node. """ @@ -1967,17 +2144,14 @@ def _construct_softmax_nodes(self, memory_capacity, field_weights, softmax_gain) f"PROGRAM ERROR: number of keys ({self.num_keys}) does not match number of " \ f"non-zero values in field_weights ({len(key_indices)})." - # If softmax_gain is specified as CONTROL, then set to None for now - # (will be set in _construct_softmax_gain_control_nodes) - if softmax_gain == CONTROL: - softmax_gain = None - softmax_nodes = [TransferMechanism(input_ports={SIZE:memory_capacity, PROJECTIONS: MappingProjection( sender=match_node.output_port, matrix=IDENTITY_MATRIX, name=f'MATCH to SOFTMAX for {self.key_names[i]}')}, - function=SoftMax(gain=softmax_gain), + function=SoftMax(gain=softmax_gain, + mask_threshold=softmax_threshold, + adapt_entropy_weighting=.95), name='SOFTMAX' if len(self.match_nodes) == 1 else f'{self.key_names[i]} [SOFTMAX]') for i, match_node in enumerate(self.match_nodes)] @@ -1990,11 +2164,11 @@ def _construct_softmax_gain_control_nodes(self, softmax_gain)->list: softmax_gain_control_nodes = [] if softmax_gain == CONTROL: softmax_gain_control_nodes = [ControlMechanism(monitor_for_control=match_node, - control_signals=[(GAIN, self.softmax_nodes[i])], - function=get_softmax_gain, - name='SOFTMAX GAIN CONTROL' if len(self.softmax_nodes) == 1 - else f'SOFTMAX GAIN CONTROL {self.key_names[i]}') - for i, match_node in enumerate(self.match_nodes)] + control_signals=[(GAIN, self.softmax_nodes[i])], + function=get_softmax_gain, + name='SOFTMAX GAIN CONTROL' if len(self.softmax_nodes) == 1 + else f'SOFTMAX GAIN CONTROL {self.key_names[i]}') + for i, match_node in enumerate(self.match_nodes)] return softmax_gain_control_nodes @@ -2039,7 +2213,7 @@ def _construct_weighted_softmax_nodes(self, memory_capacity, use_gating_for_weig matrix=FULL_CONNECTIVITY_MATRIX, name=f'WEIGHT to WEIGHTED SOFTMAX for {self.key_names[i]}')}], function=LinearCombination(operation=PRODUCT), - name=f'{self.key_names[i]} [WEIGHTED SOFTMAX]') + name=self.key_names[i] + WEIGHTED_SOFTMAX_AFFIX) for i, sm_fw_pair in enumerate(zip(self.softmax_nodes, self.field_weight_nodes))] return weighted_softmax_nodes @@ -2066,7 +2240,7 @@ def _construct_combined_softmax_node(self, name=f'WEIGHTED SOFTMAX to RETRIEVAL for ' f'{self.key_names[i]}') for i, s in enumerate(input_source)]}], - name='RETRIEVE')) + name=RETRIEVE_NODE_NAME)) assert len(combined_softmax_node.output_port.value) == memory_capacity, \ 'PROGRAM ERROR: number of items in combined_softmax_node ' \ @@ -2077,7 +2251,6 @@ def _construct_combined_softmax_node(self, def _construct_retrieved_nodes(self, memory_template)->list: """Create nodes that report the value field(s) for the item(s) matched in memory. """ - self.retrieved_key_nodes = \ [TransferMechanism(input_ports={SIZE: len(self.query_input_nodes[i].variable[0]), PROJECTIONS: @@ -2086,7 +2259,7 @@ def _construct_retrieved_nodes(self, memory_template)->list: matrix=memory_template[:,i], name=f'MEMORY FOR {self.key_names[i]} [RETRIEVE KEY]') }, - name= f'{self.key_names[i]} [RETRIEVED]') + name= self.key_names[i] + RETRIEVED_AFFIX) for i in range(self.num_keys)] self.retrieved_value_nodes = \ @@ -2097,10 +2270,13 @@ def _construct_retrieved_nodes(self, memory_template)->list: matrix=memory_template[:, i + self.num_keys], name=f'MEMORY FOR {self.value_names[i]} [RETRIEVE VALUE]')}, - name= f'{self.value_names[i]} [RETRIEVED]') + name= self.value_names[i] + RETRIEVED_AFFIX) for i in range(self.num_values)] - return self.retrieved_key_nodes + self.retrieved_value_nodes + retrieved_nodes = self.retrieved_key_nodes + self.retrieved_value_nodes + # Return nodes in order sorted by self.field_names + return [node for name in self.field_names for node in retrieved_nodes + if node in retrieved_nodes if (name + RETRIEVED_AFFIX) == node.name] def _construct_storage_node(self, memory_template, @@ -2149,14 +2325,15 @@ def _construct_storage_node(self, learning_signals=learning_signals, storage_prob=storage_prob, decay_rate = memory_decay_rate, - name='STORE') + name=STORE_NODE_NAME) + return storage_node def _set_learning_attributes(self): """Set learning-related attributes for Node and Projections """ - # self.require_node_roles(self.storage_node, NodeRole.LEARNING) - + # 7/10/24 FIX: SHOULD THIS ALSO BE CONSTRAINED BY VALUE OF field_weights FOR CORRESPONDING FIELD? + # (i.e., if it is zero then not learnable? or is that a valid initial condition?) for projection in self.projections: if (projection.sender.owner in self.field_weight_nodes and self.enable_learning @@ -2181,7 +2358,10 @@ def execute(self, return results def _store_memory(self, inputs, context): - """Store inputs in memory as weights of Projections to softmax_nodes (keys) and retrieved_nodes (values). + """Store inputs to query and value nodes in memory + Store memories in weights of Projections to softmax_nodes (queries) and retrieved_nodes (values). + Note: inputs argument is ignored (included for compatibility with function of MemoryFunctions class; + storage is handled by call to EMComopsition._encode_memory """ storage_prob = np.array(self._get_current_parameter_value(STORAGE_PROB, context)).astype(float) random_state = self._get_current_parameter_value('random_state', context) @@ -2266,6 +2446,24 @@ def _get_execution_mode(self, execution_mode): execution_mode = ExecutionMode.PyTorch return execution_mode + def _identify_target_nodes(self, context)->list: + """Identify retrieval_nodes specified by **enable_learning** as TARGET nodes""" + enable_learning = self.parameters.enable_learning._get(context) + if enable_learning is False: + if self.learn_field_weights: + warnings.warn(f"The 'learn_field_weights' arg for {self.name} is True " + f"but its 'enable_learning' is False, so learn_field_weights will have no effect.") + target_nodes = [] + elif enable_learning is True: + target_nodes = [node for node in self.retrieved_nodes] + elif isinstance(enable_learning, list): + target_nodes = [node for node in self.retrieved_nodes if enable_learning[self.retrieved_nodes.index(node)]] + else: + assert False, (f"PROGRAM ERROR: enable_learning arg for {self.name}: {enable_learning} " + f"is neither True, False nor a list of bools as it should be.") + super()._identify_target_nodes(context) + return target_nodes + def infer_backpropagation_learning_pathways(self, execution_mode, context=None): if self.concatenate_keys: raise EMCompositionError(f"EMComposition does not support learning with 'concatenate_keys'=True.") @@ -2273,12 +2471,3 @@ def infer_backpropagation_learning_pathways(self, execution_mode, context=None): def _update_learning_parameters(self, context): pass - - def get_output_values(self, context=None): - """Override to provide ordering of retrieved_nodes that matches order of inputs. - This is needed since nodes were constructed as sets - """ - return [retrieved_node.output_port.parameters.value.get(context) - for retrieved_node in self.retrieved_nodes - if (not self.output_CIM._sender_is_probe(self.output_CIM.port_map[retrieved_node.output_port][1]) - or self.include_probes_in_output)] diff --git a/psyneulink/library/compositions/pytorchEMcompositionwrapper.py b/psyneulink/library/compositions/pytorchEMcompositionwrapper.py index b3dd51f5e89..b2e3b915cf6 100644 --- a/psyneulink/library/compositions/pytorchEMcompositionwrapper.py +++ b/psyneulink/library/compositions/pytorchEMcompositionwrapper.py @@ -16,8 +16,9 @@ # torch = None from typing import Optional -from psyneulink.library.compositions.pytorchwrappers import PytorchCompositionWrapper +from psyneulink.library.compositions.pytorchwrappers import PytorchCompositionWrapper, PytorchMechanismWrapper from psyneulink.library.components.mechanisms.modulatory.learning.EMstoragemechanism import EMStorageMechanism +from psyneulink.core.globals.keywords import AFTER __all__ = ['PytorchEMCompositionWrapper'] @@ -27,13 +28,17 @@ class PytorchEMCompositionWrapper(PytorchCompositionWrapper): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - # Assign storage_node (EMComposition's EMStorageMechanism) + # Assign storage_node (EMComposition's EMStorageMechanism) (assumes there is only one) self.storage_node = [node for node in self.nodes_map.values() if isinstance(node._mechanism, EMStorageMechanism)][0] + # Execute storage_node after gradient calculation, + # since it assigns weights manually which messes up PyTorch gradient tracking in forward() and backward() + self.storage_node.exclude_from_gradient_calc = AFTER # Get PytorchProjectionWrappers for Projections to match and retrieve nodes; # used by get_memory() to construct memory_matrix and store_memory() to store entry in it pnl_storage_mech = self.storage_node._mechanism + num_fields = len(pnl_storage_mech.input_ports) num_learning_signals = len(pnl_storage_mech.learning_signals) num_match_fields = num_learning_signals - num_fields @@ -123,7 +128,7 @@ def store_memory(self, memory_to_store, context): axis = 0 entry_to_store = field_projection.sender.value if concatenation_node is None: - assert (entry_to_store == memory_to_store[i]).all(), \ + assert (entry_to_store == memory_to_store[i]).all(), \ f"PROGRAM ERROR: misalignment between inputs and fields for storing them" else: # For retrieve projections, get entry to store from memory_to_store (which has inputs to all fields) diff --git a/psyneulink/library/compositions/pytorchshowgraph.py b/psyneulink/library/compositions/pytorchshowgraph.py new file mode 100644 index 00000000000..46d8ebbc6c2 --- /dev/null +++ b/psyneulink/library/compositions/pytorchshowgraph.py @@ -0,0 +1,184 @@ +# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy of the License at: +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and limitations under the License. + + +# **************************************** PyTorch show_graph ********************************************************* + +from beartype import beartype + +from psyneulink._typing import Optional, Union, Literal + +from psyneulink.core.globals.context import ContextFlags, handle_external_context +from psyneulink.core.compositions import NodeRole +from psyneulink.core.compositions.showgraph import ShowGraph, SHOW_JUST_LEARNING_PROJECTIONS +from psyneulink.core.components.mechanisms.mechanism import Mechanism +from psyneulink.core.components.mechanisms.processing.compositioninterfacemechanism import CompositionInterfaceMechanism +from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import ControlMechanism +from psyneulink.core.components.projections.projection import Projection +from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection +from psyneulink.core.components.projections.modulatory.controlprojection import ControlProjection +from psyneulink.core.globals.keywords import BOLD, NESTED, INSET + +__all__ = ['SHOW_PYTORCH'] + +SHOW_PYTORCH = 'show_pytorch' +EXCLUDE_FROM_GRADIENT_CALC_LINE_STYLE = 'exclude_from_gradient_calc_line_style' +EXCLUDE_FROM_GRADIENT_CALC_COLOR = 'exclude_from_gradient_calc_color' + +class PytorchShowGraph(ShowGraph): + """ShowGraph object with `show_graph ` method for displaying `Composition`. + + This is a subclass of the `ShowGraph` class that is used to display the graph of a `Composition` used for learning + in `PyTorch mode ` (also see `AutodiffComposition_PyTorch`). In this mode, + any `nested Compositions ` are "flattened" (i.e., incorporated into the outermost + Composition); also, any `Nodes `` designated as `exclude_from_gradient_calc + ` will be moved to the end of the graph (as they are executed + after the gradient calculation), and any Projections designated as `exclude_in_autodiff + ` will not be shown as they are not used in the gradient calculations at all. + + Arguments + --------- + + show_pytorch : keyword : default 'PYTORCH' + specifies that the PyTorch version of the graph should be shown. + + """ + + def __init__(self, *args, **kwargs): + self.show_pytorch = kwargs.pop(SHOW_PYTORCH, False) + super().__init__(*args, **kwargs) + + @beartype + @handle_external_context(source=ContextFlags.COMPOSITION) + def show_graph(self, *args, **kwargs): + """Override of show_graph to check if show_pytorch==True and if so build pytorch rep of autofiffcomposition""" + self.show_pytorch = kwargs.pop(SHOW_PYTORCH, self.show_pytorch) + context = kwargs.get('context') + if self.show_pytorch: + self.pytorch_rep = self.composition._build_pytorch_representation(context) + self.exclude_from_gradient_calc_line_style = kwargs.pop(EXCLUDE_FROM_GRADIENT_CALC_LINE_STYLE, 'dotted') + self.exclude_from_gradient_calc_color = kwargs.pop(EXCLUDE_FROM_GRADIENT_CALC_COLOR, 'brown') + return super().show_graph(*args, **kwargs) + + def _get_processing_graph(self, composition, context): + """Helper method that creates dependencies graph for nodes of autodiffcomposition used in Pytorch mode""" + if self.show_pytorch: + processing_graph = {} + projections = self._get_projections(composition, context) + # 7/9/24 FIX: COULD DO THIS BY ITERATING OVER PROJECTIONS INSTEAD OF NODES + for node in self._get_nodes(composition, context): + dependencies = set() + for projection in projections: + if node is projection.receiver.owner: + dependencies.add(projection.sender.owner) + # Add dependency of INPUT node of nested graph on node in outer graph that projects to it + elif (isinstance(projection.receiver.owner, CompositionInterfaceMechanism) and + projection.receiver.owner._get_destination_info_from_input_CIM(projection.receiver)[1] + is node): + dependencies.add(projection.sender.owner) + processing_graph[node] = dependencies + # Add TARGET nodes + for node in self.composition.learning_components: + processing_graph[node] = set([afferent.sender.owner for afferent in node.path_afferents]) + return processing_graph + else: + return super()._get_processing_graph(composition, context) + + def _get_nodes(self, composition, context): + """Override to return nodes of PytorchCompositionWrapper rather than autodiffcomposition""" + if self.show_pytorch: + nodes = list(self.pytorch_rep.nodes_map.keys()) + return nodes + else: + return super()._get_nodes(composition, context) + + def _get_projections(self, composition, context): + """Override to return nodes of Pytorch graph""" + if self.show_pytorch: + projections = list(self.pytorch_rep.projections_map.keys()) + # FIX: NEED TO ADD PROJECTIONS TO NESTED COMPS THAT ARE TO CIM + # Add any Projections to TARGET nodes + projections += [afferent + for node in self.composition.learning_components + for afferent in node.path_afferents + if not isinstance(afferent.sender.owner, CompositionInterfaceMechanism)] + return projections + else: + return super()._get_projections(composition, context) + + def _proj_in_composition(self, proj, composition_projections, context)->bool: + """Override to include direct Projections from outer to nested comps in Pytorch mode""" + if self.show_pytorch: + processing_graph = self._get_processing_graph(self.composition, context) + if proj in composition_projections: + return True + # If proj is betw. a sender and receiver specified in the processing_graphl, then it is in the autodiffcomp + elif (proj.receiver.owner in processing_graph + and proj.sender.owner in processing_graph[proj.receiver.owner]): + return True + else: + return False + else: + return super()._proj_in_composition(proj, composition_projections, context) + + def _get_roles_by_node(self, composition, node, context): + """Override in Pytorch mode to return NodeRole.INTERNAL for all nodes in nested compositions""" + if self.show_pytorch: + try: + return composition.get_roles_by_node(node) + except: + return [NodeRole.INTERNAL] + if self.show_pytorch and node not in self.composition.nodes: + return [NodeRole.INTERNAL] + else: + return super()._get_roles_by_node(composition, node, context) + + def _get_nodes_by_role(self, composition, role, context): + """Override in Pytorch mode to return all nodes in nested compositions as INTERNAL""" + if self.show_pytorch and composition is not self.composition: + return None + else: + return super()._get_nodes_by_role(composition, role, context) + + def _implement_graph_node(self, g, rcvr, context, *args, **kwargs): + """Override to assign EXCLUDE_FROM_GRADIENT_CALC nodes their own style in Pytorch mode""" + if self.show_pytorch: + if self.pytorch_rep.nodes_map[rcvr].exclude_from_gradient_calc: + kwargs['style'] = self.exclude_from_gradient_calc_line_style + kwargs['color'] = self.exclude_from_gradient_calc_color + g.node(*args, **kwargs) + else: + return super()._implement_graph_node( g, rcvr, context, *args, **kwargs) + + def _implement_graph_edge(self, graph, proj, context, *args, **kwargs): + """Override to assign custom attributes to edges""" + + if self.show_pytorch: + kwargs['color'] = self.default_node_color + + modulatory_node = None + if proj.parameter_ports[0].mod_afferents: + modulatory_node = self.pytorch_rep.nodes_map[proj.parameter_ports[0].mod_afferents[0].sender.owner] + + if proj in self.pytorch_rep.projections_map: + + # If Projection is a LearningProjection that is active, assign color and arrowhead of a LearningProjection + if proj.learnable or self.pytorch_rep.projections_map[proj].matrix.requires_grad: + kwargs['color'] = self.learning_color + + # If Projection is from a ModulatoryMechanism that is excluded from gradient calculations, assign that style + elif modulatory_node and modulatory_node.exclude_from_gradient_calc: + kwargs['color'] = self.exclude_from_gradient_calc_color + kwargs['style'] = self.exclude_from_gradient_calc_line_style + + elif self._proj_in_composition(proj, self.pytorch_rep.projections_map, context) and proj.learnable: + kwargs['color'] = self.learning_color + + graph.edge(*args, **kwargs) + + else: + return super()._implement_graph_edge(graph, proj, context, *args, **kwargs) diff --git a/psyneulink/library/compositions/pytorchwrappers.py b/psyneulink/library/compositions/pytorchwrappers.py index 8797dd25ac6..f739cfc259c 100644 --- a/psyneulink/library/compositions/pytorchwrappers.py +++ b/psyneulink/library/compositions/pytorchwrappers.py @@ -14,11 +14,12 @@ import torch.nn as nn from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination, PRODUCT, SUM +from psyneulink.core.components.functions.stateful.integratorfunctions import IntegratorFunction from psyneulink.core.compositions.composition import NodeRole, CompositionInterfaceMechanism from psyneulink.library.compositions.pytorchllvmhelper import * from psyneulink.library.compositions.compiledoptimizer import AdamOptimizer, SGDOptimizer from psyneulink.library.compositions.compiledloss import MSELoss, CROSS_ENTROPYLoss -from psyneulink.core.globals.keywords import DEFAULT_VARIABLE, Loss, NODE, TARGET_MECHANISM +from psyneulink.core.globals.keywords import AFTER, BEFORE, DEFAULT_VARIABLE, Loss, NODE, TARGET_MECHANISM from psyneulink.core.globals.context import Context, ContextFlags, handle_external_context from psyneulink.core.globals.utilities import get_deepcopy_with_shared from psyneulink.core.globals.log import LogCondition @@ -26,7 +27,6 @@ __all__ = ['PytorchCompositionWrapper', 'PytorchMechanismWrapper', 'PytorchProjectionWrapper'] - class PytorchCompositionWrapper(torch.nn.Module): """Wrapper for a Composition as a Pytorch Module Set up parameters of PyTorch model & information required for forward computation @@ -64,7 +64,7 @@ def __init__(self, self.name = f"PytorchCompositionWrapper[{composition.name}]" - self.node_wrappers = [] # can be PytorchMechanismWrapper or PytorchCompositionWrapper + self.wrapped_nodes = [] # can be PytorchMechanismWrapper or PytorchCompositionWrapper self.nodes_map = {} # maps Node (Mech or nested Comp) -> PytorchMechanismWrapper or PytorchCompositionWrapper self.projection_wrappers = [] # PytorchProjectionWrappers @@ -74,6 +74,7 @@ def __init__(self, self.device = device self._composition = composition + self._nodes_to_execute_after_gradient_calc = {} # Nodes requiring execution after Pytorch forward/backward pass # Instantiate pytorch Mechanisms nodes = list(set(composition.nodes) - set(composition.get_nodes_by_role(NodeRole.LEARNING))) @@ -89,17 +90,18 @@ def __init__(self, for node in sorted(nodes, key=lambda x: isinstance(x, AutodiffComposition)): # Wrap nested Composition if isinstance(node, AutodiffComposition): - pytorch_node = PytorchCompositionWrapper(node, device, outer_creator=self, context=context) + pytorch_node = node.pytorch_composition_wrapper_type(node, device, outer_creator=self, context=context) # Wrap Mechanism else: pytorch_node = PytorchMechanismWrapper(node, + self, self._composition._get_node_index(node), device, context=context) pytorch_node._is_bias = any(input_port.default_input == DEFAULT_VARIABLE for input_port in node.input_ports) self.nodes_map[node] = pytorch_node - self.node_wrappers.append(pytorch_node) + self.wrapped_nodes.append(pytorch_node) # Assign INPUT Nodes for outermost Composition (including any that are nested within it at any level) # Note: Pytorch representation is "flattened" (i.e., any nested Compositions are replaced by their Nodes) @@ -111,8 +113,8 @@ def _assign_input_nodes(nodes): if isinstance(pytorch_node, PytorchMechanismWrapper): pytorch_node._is_input = pytorch_node._mechanism in composition._get_input_receivers(type=NODE) else: - _assign_input_nodes(pytorch_node.node_wrappers) - _assign_input_nodes(self.node_wrappers) + _assign_input_nodes(pytorch_node.wrapped_nodes) + _assign_input_nodes(self.wrapped_nodes) # Instantiate PyTorch ProjectionWrappers (ignoring any from/to CIMs in the same composition) for projection in composition._inner_projections: @@ -219,7 +221,7 @@ def _assign_input_nodes(nodes): self.execution_sets[index:index] = exec_sets # Flatten maps - for node_wrapper in self.node_wrappers: + for node_wrapper in self.wrapped_nodes: if isinstance(node_wrapper, PytorchCompositionWrapper): # For copying weights back to PNL in AutodiffComposition._update_learning_parameters self.projections_map.update(node_wrapper.projections_map) @@ -242,7 +244,7 @@ def _assign_input_nodes(nodes): def _regenerate_paramlist(self): """Add Projection matrices to Pytorch Module's parameter list""" self.params = nn.ParameterList() - for proj_wrapper in [p for p in self.projection_wrappers if not p._projection._exclude_from_autodiff]: + for proj_wrapper in [p for p in self.projection_wrappers if not p._projection.exclude_in_autodiff]: self.params.append(proj_wrapper.matrix) # generates llvm function for self.forward @@ -513,18 +515,31 @@ def forward(self, inputs, context=None)->dict: variable.append(input_port.defaults.variable) elif not input_port.internal_only: # otherwise, use the node's input_port's afferents - variable.append(node.collate_afferents(i).squeeze(0)) + variable.append(node.aggregate_afferents(i).squeeze(0)) if len(variable) == 1: variable = variable[0] else: # Node is not INPUT to Composition or BIAS, so get all input from its afferents - variable = node.collate_afferents() + variable = node.aggregate_afferents() + + if node.exclude_from_gradient_calc: + if node.exclude_from_gradient_calc == AFTER: + self._nodes_to_execute_after_gradient_calc[node] = variable + continue + elif node.exclude_from_gradient_calc == BEFORE: + assert False, 'PROGRAM ERROR: node.exclude_from_gradient_calc == BEFORE not yet implemented' + else: + assert False, \ + (f'PROGRAM ERROR: Bad assignment to {node.name}.exclude_from_gradient_calc: ' + f'{node.exclude_from_gradient_calc}; only {AFTER} is currently supported') - self.execute_node(node, variable, context) + # Execute the node using wrapper_type for Composition to which it belongs + # Note: this is to support overrides of execute_node method by subclasses (such as in EMComposition) + node.wrapper_type.execute_node(node, variable, context) # Add entry to outputs dict for OUTPUT Nodes of pytorch representation # note: these may be different than for actual Composition, as they are flattened - if (node._mechanism in self._composition.get_nested_nodes_output_nodes_at_levels()): + if (node._mechanism in self._composition.get_nested_output_nodes_at_all_levels()): outputs[node._mechanism] = node.value # NOTE: Context source needs to be set to COMMAND_LINE to force logs to update independently of timesteps @@ -542,7 +557,8 @@ def execute_node(self, node, variable, context=None): Implemented as method (and includes context as arg) so that it can be overridden by subclasses of PytorchCompositionWrapper """ - node.execute(variable) + value = node.execute(variable, context) + assert 'DEBUGGING BREAK POINT' def detach_all(self): for projection in self.projections_map.values(): @@ -556,89 +572,173 @@ def copy_weights_to_psyneulink(self, context=None): pytorch_rep.matrix.detach().cpu().numpy(), context) projection.parameter_ports['matrix'].parameters.value._set( pytorch_rep.matrix.detach().cpu().numpy(), context) - assert True def log_weights(self): for proj_wrapper in self.projection_wrappers: proj_wrapper.log_matrix() def log_values(self): - for node_wrapper in [n for n in self.node_wrappers if not isinstance(n, PytorchCompositionWrapper)]: + for node_wrapper in [n for n in self.wrapped_nodes if not isinstance(n, PytorchCompositionWrapper)]: node_wrapper.log_value() class PytorchMechanismWrapper(): - """Wrapper for a Mechanism in a PytorchCompositionWrapper""" - def __init__(self, mechanism, component_idx, device, context=None): + """Wrapper for a Mechanism in a PytorchCompositionWrapper + + Attributes + ---------- + + function : _gen_pytorch_fct + Pytorch version of the Mechanism's function assigned in __init__ + + integrator_function : _gen_pytorch_fct + Pytorch version of the Mechanism's integrator_function assigned in __init__ if mechanism + has an integrator_function; this assumes the mechanism also has an integrator_mode attribute + that is used to determine whether to execute the integrator_function first, and use its result + as the input to its function. + + exclude_from_gradient_calc : bool or str[BEFORE | AFTER]: False + used to prevent a node from being included in the Pytorch gradient calculation by excluding it in calls to + the forward() and backward(). If AFTER is specified, the node is executed after at the end of the + `update_learning_parameters` method. BEFORE is not currently supported + """ + def __init__(self, + mechanism, # Mechanism to be wrapped + composition, # Composition to which node belongs (used for execution of nested Compositions) + component_idx, # index of the Mechanism in the Composition + device, # needed for Pytorch + context=None): self._mechanism = mechanism - self.name = f"PytorchMechanismWrapper[{mechanism.name}]" self._idx = component_idx self._context = context - self._is_input = False self._is_bias = False + self._curr_sender_value = None # Used to assign initializer or default if value == None (i.e., not yet executed) + self.exclude_from_gradient_calc = False # Used to execute node before or after forward/backward pass methods + self.wrapper_type = composition + + self.name = f"PytorchMechanismWrapper[{mechanism.name}]" self.afferents = [] self.efferents = [] + if mechanism.parameters.has_initializers._get(context) and mechanism.parameters.value.initializer: + self.default_value = mechanism.parameters.value.initializer.get(context) + else: + self.default_value = mechanism.defaults.value + + from psyneulink.core.components.functions.function import FunctionError + from psyneulink.library.compositions.autodiffcomposition import AutodiffCompositionError try: - self.function = mechanism.function._gen_pytorch_fct(device, context) - except: + pnl_fct = mechanism.function + self.function = pnl_fct._gen_pytorch_fct(device, context) + if hasattr(mechanism, 'integrator_function'): + pnl_fct = mechanism.integrator_function + self.integrator_function = pnl_fct._gen_pytorch_fct(device, context) + self.integrator_previous_value = pnl_fct._get_pytorch_fct_param_value('initializer', device, context) + except FunctionError as error: from psyneulink.library.compositions.autodiffcomposition import AutodiffCompositionError - raise AutodiffCompositionError( - f"Function {mechanism.function} is not currently supported by AutodiffComposition") + raise AutodiffCompositionError(error.args[0]) + except: + raise AutodiffCompositionError(f"Function {pnl_fct} is not currently supported by AutodiffComposition") self.value = None self._target_mechanism = None def add_efferent(self, efferent): + """Add ProjectionWrapper for efferent from MechanismWrapper. + Implemented for completeness; not currently used + """ assert efferent not in self.efferents self.efferents.append(efferent) def add_afferent(self, afferent): + """Add ProjectionWrapper for afferent to MechanismWrapper. + For use in call to aggregate_afferents + """ assert afferent not in self.afferents self.afferents.append(afferent) - - def collate_afferents(self, port=None): + def aggregate_afferents(self, port=None): """Return weight-multiplied sum of afferent projections for input_port(s) of the Mechanism If there is only one input_port, return the sum of its afferents (for those in Composition) If there are multiple input_ports, return an array with the sum for each input_port - # FIX: AUGMENT THIS TO SUPPORT InputPort's function + FIX: AUGMENT THIS TO SUPPORT InputPort's function """ assert self.afferents,\ f"PROGRAM ERROR: No afferents found for '{self._mechanism.name}' in AutodiffComposition" + + for proj_wrapper in self.afferents: + curr_val = proj_wrapper.sender.value + if curr_val is not None: + proj_wrapper._curr_sender_value = proj_wrapper.sender.value[proj_wrapper._value_idx] + else: + proj_wrapper._curr_sender_value = torch.tensor(proj_wrapper.default_value) + # Specific port is specified # FIX: USING _port_idx TO INDEX INTO sender.value GETS IT WRONG IF THE MECHANISM HAS AN OUTPUT PORT # USED BY A PROJECTION NOT IN THE CURRENT COMPOSITION if port is not None: - return sum(proj_wrapper.execute(proj_wrapper.sender.value[proj_wrapper._value_idx]).unsqueeze(0) + return sum(proj_wrapper.execute(proj_wrapper._curr_sender_value).unsqueeze(0) for proj_wrapper in self.afferents if proj_wrapper._pnl_proj in self._mechanism.input_ports[port].path_afferents) # Has only one input_port elif len(self._mechanism.input_ports) == 1: # Get value corresponding to port from which each afferent projects - return sum((proj_wrapper.execute(proj_wrapper.sender.value[proj_wrapper._value_idx]).unsqueeze(0) + return sum((proj_wrapper.execute(proj_wrapper._curr_sender_value).unsqueeze(0) for proj_wrapper in self.afferents)) # Has multiple input_ports else: - return [sum(proj_wrapper.execute(proj_wrapper.sender.value[proj_wrapper._value_idx]).unsqueeze(0) + return [sum(proj_wrapper.execute(proj_wrapper._curr_sender_value).unsqueeze(0) for proj_wrapper in self.afferents if proj_wrapper._pnl_proj in input_port.path_afferents) for input_port in self._mechanism.input_ports] - def execute(self, variable): - """Execute Mechanism's function on variable, enforce result to be 2d, and assign to self.value""" - if ((isinstance(variable, list) and len(variable) == 1) - or (isinstance(variable, torch.Tensor) and len(variable.squeeze(0).shape) == 1) - or isinstance(self._mechanism.function, LinearCombination)): - # Enforce 2d on value of MechanismWrapper (using unsqueeze) - # for single InputPort or if CombinationFunction (which reduces output to single item from multi-item input) - if isinstance(variable, torch.Tensor): - variable = variable.squeeze(0) - self.value = self.function(variable).unsqueeze(0) - else: - # Make value 2d by creating list of values returned by function for each item in variable - self.value = [self.function(variable[i].squeeze(0)) for i in range(len(variable))] + def execute(self, variable, context): + """Execute Mechanism's _gen_pytorch version of function on variable. + Enforce result to be 2d, and assign to self.value + """ + def execute_function(function, variable, fct_has_mult_args=False, is_combination_fct=False): + """Execute _gen_pytorch_fct on variable, enforce result to be 2d, and return it + If fct_has_mult_args is True, treat each item in variable as an arg to the function + If False, compute function for each item in variable and return results in a list + """ + if ((isinstance(variable, list) and len(variable) == 1) + or (isinstance(variable, torch.Tensor) and len(variable.squeeze(0).shape) == 1) + or isinstance(self._mechanism.function, LinearCombination)): + # Enforce 2d on value of MechanismWrapper (using unsqueeze) + # for single InputPort or if CombinationFunction (which reduces output to single item from multi-item input) + if isinstance(variable, torch.Tensor): + variable = variable.squeeze(0) + return function(variable).unsqueeze(0) + elif is_combination_fct: + # Function combines the elements + return function(variable) + elif fct_has_mult_args: + # Assign each element of variable as an arg to the function + return function(*variable) + else: + # Treat each item in variable as a separate input to the function and get result for each in a list: + # make return value 2d by creating list of the results of function returned for each item in variable + return [function(variable[i].squeeze(0)) for i in range(len(variable))] + + # If mechanism has an integrator_function and integrator_mode is True, + # execute it first and use result as input to the main function; + # assumes that if PyTorch node has been assigned an integrator_function then _mechanism has an integrator_mode + if hasattr(self, 'integrator_function') and self._mechanism.parameters.integrator_mode._get(context): + variable = execute_function(self.integrator_function, + [self.integrator_previous_value, variable], + fct_has_mult_args=True) + # Keep track of previous value in Pytorch node for use in next forward pass + self.integrator_previous_value = variable + # Compute main function of mechanism and return result + from psyneulink.core.components.functions.nonstateful.combinationfunctions import CombinationFunction + self.value = execute_function(self.function, variable, + is_combination_fct=isinstance(self._mechanism.function, CombinationFunction)) + # Assign previous_value back to integrator_function of pnl node + # so that if Python implementation is run it picks up where PyTorch execution left off + if isinstance(self._mechanism.function, IntegratorFunction): + self._mechanism.integrator_function.parameters.previous_value._set(self.value, context) + return self.value def _gen_llvm_execute(self, ctx, builder, state, params, mech_input, data): @@ -734,16 +834,24 @@ def __init__(self, projection, sender=None, receiver=None, context=None): - self.name = f"PytorchProjectionWrapper[{projection.name}]" self._projection = projection # Projection being wrapped (may *not* be the one being learned; see note above) self._pnl_proj = pnl_proj # Projection that directly projects to/from sender/receiver (see above) self._idx = component_idx # Index of Projection in Composition's list of projections self._port_idx = port_idx # Index of sender's port (used by LLVM) - self._value_idx = 0 # Index of value in sender's value (used in collate_afferents) + self._value_idx = 0 # Index of value in sender's value (used in aggregate_afferents) + self._curr_sender_value = None + + self.name = f"PytorchProjectionWrapper[{projection.name}]" self.sender = sender # PytorchMechanismWrapper to which Projection's sender is mapped self.receiver = receiver # PytorchMechanismWrapper to which Projection's receiver is mapped self._context = context + if projection.parameters.has_initializers._get(context) and projection.parameters.value.initializer: + self.default_value = projection.parameters.value.initializer.get(context) + else: + self.default_value = projection.defaults.value + + # Get item of value corresponding to OutputPort that is Projection's sender # Note: this may not be the same as _port_idx if the sender Mechanism has OutputPorts for Projections # that are not in the current Composition diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index 5ff2582ab76..b42a8eab28b 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -194,13 +194,13 @@ def test_structure(self, assert isinstance(em.concatenate_keys_node, Mechanism) == concatenate_node if em.concatenate_keys: assert em.field_weight_nodes == [] - assert bool(softmax_gain in {None, CONTROL}) == bool(len(em.softmax_gain_control_nodes)) + assert bool(softmax_gain == CONTROL) == bool(len(em.softmax_gain_control_nodes)) else: if num_keys > 1: assert len(em.field_weight_nodes) == num_keys else: assert em.field_weight_nodes == [] - if softmax_gain in {None, CONTROL}: + if softmax_gain == CONTROL: assert len(em.softmax_gain_control_nodes) == num_keys else: assert em.softmax_gain_control_nodes == [] @@ -339,7 +339,7 @@ class TestExecution: @pytest.mark.parametrize('enable_learning', [False, True], ids=['no_learning','learning']) @pytest.mark.composition @pytest.mark.parametrize('exec_mode', [pnl.ExecutionMode.Python, pnl.ExecutionMode.PyTorch]) - def test_simple_execution_without_learning(self, + def test_simple_execution_witemhout_learning(self, exec_mode, enable_learning, test_num, @@ -384,6 +384,8 @@ def test_simple_execution_without_learning(self, params.update({'softmax_gain': softmax_gain}) if storage_prob is not None: params.update({'storage_prob': storage_prob}) + params.update({'softmax_threshold': None}) + # FIX: ADD TESTS FOR VALIDATION USING SOFTMAX_THRESHOLD em = EMComposition(**params) @@ -479,7 +481,7 @@ def test_multiple_trials_concatenation_and_storage_node(self, exec_mode, concate else: if exec_mode == pnl.ExecutionMode.Python: - # FIX: Not sure why Pyton mode reverses last two rows/entries (dict issue?) + # FIX: Not sure why Python mode reverses last two rows/entries (dict issue?) expected_memory = [[[0.15625, 0.3125, 0.46875], [0.171875, 0.328125, 0.484375]], [[400., 500., 600.], [444., 555., 666.]], [[25., 50., 75.], [27.75, 55.5, 83.25]], diff --git a/tests/composition/test_show_graph.py b/tests/composition/test_show_graph.py index 1ed03204e18..296f5964eed 100644 --- a/tests/composition/test_show_graph.py +++ b/tests/composition/test_show_graph.py @@ -760,7 +760,7 @@ def test_projections_from_nested_comp_to_ocm_or_obj_mech(self, show_graph_kwargs # OCM's outcome_input_port. # If the test fails in this condition, it could mean that the bug has been corrected. # The bug may be the same one as in eb61303808ad2a5ba46fdd18d0e583283397915c - raise (AssertionError,"FAILURE TO REPLICATE BUGGY SHOW_GRAPH OUTPUT -- SEE COMMENT IN TEST") + raise AssertionError("FAILURE TO REPLICATE BUGGY SHOW_GRAPH OUTPUT -- SEE COMMENT IN TEST") # elif ('show_node_structure' in show_graph_kwargs # and ('show_cim' in show_graph_kwargs # and show_graph_kwargs['show_cim'] is True) @@ -769,7 +769,7 @@ def test_projections_from_nested_comp_to_ocm_or_obj_mech(self, show_graph_kwargs # ): # pass else: - raise (AssertionError) + raise AssertionError # def test_show_graph_for_nested_composition_as_agent_rep(self): # """Note: this is the same as test_control/test_nested_composition_as_agent_rep but with show_graph()""" From eb695d3874b3357b3d5454a330404f0360ee72fe Mon Sep 17 00:00:00 2001 From: jdcpni Date: Thu, 25 Jul 2024 22:36:24 -0400 Subject: [PATCH 261/410] Patch/fix autodiff pytorch dependency (#3013) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [skip ci] • autodiffcomposition.py assign_ShowGraph: make assignment of PytorchShowGraph dependent on availability of pytorch * - --- psyneulink/library/compositions/autodiffcomposition.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/psyneulink/library/compositions/autodiffcomposition.py b/psyneulink/library/compositions/autodiffcomposition.py index 025bb11e7bf..a93e94f23f8 100644 --- a/psyneulink/library/compositions/autodiffcomposition.py +++ b/psyneulink/library/compositions/autodiffcomposition.py @@ -560,9 +560,13 @@ def __init__(self, # ShowGraph self.assign_ShowGraph(show_graph_attributes) def assign_ShowGraph(self, show_graph_attributes): - """Override to replace assignment of ShowGraph class with PytorchShowGraph""" + """Override to replace assignment of ShowGraph class with PytorchShowGraph if torch is available""" show_graph_attributes = show_graph_attributes or {} - self._show_graph = PytorchShowGraph(self, **show_graph_attributes) + if torch_available: + self._show_graph = PytorchShowGraph(self, **show_graph_attributes) + else: + from psyneulink.core.compositions.showgraph import ShowGraph + self._show_graph = ShowGraph(self, **show_graph_attributes) @handle_external_context() def infer_backpropagation_learning_pathways(self, execution_mode, context=None)->list: From 1bcb2b2524286950865f66e97ae56012c82ba49a Mon Sep 17 00:00:00 2001 From: jdcpni Date: Fri, 26 Jul 2024 12:10:19 -0400 Subject: [PATCH 262/410] =?UTF-8?q?=E2=80=A2=20port.py:=20(#3014)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit _parse_port_spec(): enhance error message for bad MECHANISM entry in modulatory param specification --- psyneulink/core/components/ports/port.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index 80993bd44dd..1bfeb0a36a4 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -3273,8 +3273,15 @@ def _parse_port_spec(port_type=None, mech = port_specific_args[MECHANISM] if not isinstance(mech, Mechanism): - raise PortError(f"Value of the {MECHANISM} entry ('{mech.name}') in the specification dictionary " - f"for {port_type.__name__} of '{owner.name}' is not a {Mechanism.__name__}.") + entry_name = '' + from psyneulink.core.components.component import ParameterValue + if hasattr(mech, 'name'): + entry_name = f" ('{mech.name}')" + elif isinstance(mech, ParameterValue): + entry_name = f" ('{mech._parameter.name}' of '{mech._owner.name}')" + raise PortError(f"The type of the {MECHANISM} entry{entry_name} in the specification dictionary " + f"for {port_type.__name__} of '{owner.name}' is {type(mech).__name__}; " + f"it must be a {Mechanism.__name__}.") # For Ports with which the one being specified can connect: for PORTS in port_type.connectsWithAttribute: From 72bc42acbed613f8f41958497ca442e9a6aff56d Mon Sep 17 00:00:00 2001 From: jdcpni Date: Fri, 26 Jul 2024 13:40:14 -0400 Subject: [PATCH 263/410] Patch/lcamechanism/make matrix params nonmodulable (#3015) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * • lcamechanism.py: make matrix, auto, hetero and competition params non-modulable until that is implemented --- .../mechanisms/processing/transfer/lcamechanism.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py index 4b197406217..48e5292a9d7 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py @@ -392,7 +392,7 @@ class Parameters(RecurrentTransferMechanism.Parameters): matrix = Parameter( INVERSE_HOLLOW_MATRIX, - modulable=True, + modulable=False, getter=_recurrent_transfer_mechanism_matrix_getter, setter=_recurrent_transfer_mechanism_matrix_setter ) @@ -402,9 +402,9 @@ class Parameters(RecurrentTransferMechanism.Parameters): function_parameter_name='rate', aliases='leak' ) - auto = Parameter(0.0, modulable=True, aliases='self_excitation') - hetero = Parameter(-1.0, modulable=True) - competition = Parameter(1.0, modulable=True) + auto = Parameter(0.0, modulable=False, aliases='self_excitation') + hetero = Parameter(-1.0, modulable=False) + competition = Parameter(1.0, modulable=False) time_step_size = FunctionParameter(0.1, function_name='integrator_function') integrator_mode = Parameter(True, setter=_integrator_mode_setter, valid_types=bool) From 010dbc45d23b884ac24d77029ae30447a988c975 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 26 Jul 2024 21:05:12 -0400 Subject: [PATCH 264/410] requirements: update pytest requirement from <8.3.2 to <8.3.3 (#3012) Updates the requirements on [pytest](https://github.com/pytest-dev/pytest) to permit the latest version. - [Release notes](https://github.com/pytest-dev/pytest/releases) - [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest/compare/8.3.1...8.3.2) --- updated-dependencies: - dependency-name: pytest dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index 645d3b8eee6..afc8bbefd42 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,6 +1,6 @@ jupyter<1.0.1 packaging<25.0 -pytest<8.3.2 +pytest<8.3.3 pytest-benchmark<4.0.1 pytest-cov<5.0.1 pytest-forked<1.7.0 From b958a3fbb0ac7e323faa53a432597545cdd0315c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 26 Jul 2024 21:25:48 -0400 Subject: [PATCH 265/410] llvm/builder_context: Remove used parameters workaround Add a comment explaining the purpose of the check. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/builder_context.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/psyneulink/core/llvm/builder_context.py b/psyneulink/core/llvm/builder_context.py index a4dd418f6f7..cd41c2ad43e 100644 --- a/psyneulink/core/llvm/builder_context.py +++ b/psyneulink/core/llvm/builder_context.py @@ -349,6 +349,13 @@ def get_state_space(self, builder, component, state_ptr, param): return helpers.get_state_space(builder, component, state_ptr, param_name) def check_used_params(self, component, *, tags:frozenset): + """ + This function checks that parameters included in the compiled structures are used in compiled code. + + If the assertion in this function triggers the parameter name should be added to the parameter + block list in the Component class. + """ + # Skip the check if the parameter use is not tracked. Some components (like node wrappers) # don't even have parameters. if component not in self._component_state_use and component not in self._component_param_use: @@ -378,12 +385,6 @@ def check_used_params(self, component, *, tags:frozenset): if hasattr(component, 'evaluate_agent_rep'): used_param_ids.add('num_trials_per_estimate') - if hasattr(component, 'adapt_scale'): - used_param_ids.add('threshold') - used_param_ids.add('adapt_scale') - used_param_ids.add('adapt_base') - used_param_ids.add('adapt_entropy_weighting') - unused_param_ids = component_param_ids - used_param_ids - initializers unused_state_ids = component_state_ids - used_state_ids From f8d1f43cffd29ef4790e03209a5c0e4e4bf8b12c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 26 Jul 2024 21:29:04 -0400 Subject: [PATCH 266/410] llvm: Rename node_wrapper -> node_assembly Avoid confusion with wrapped nodes used e.g. in pytorch Signed-off-by: Jan Vesely --- psyneulink/core/llvm/builder_context.py | 12 +++++----- psyneulink/core/llvm/codegen.py | 30 ++++++++++++------------- psyneulink/core/llvm/execution.py | 4 ++-- psyneulink/core/llvm/helpers.py | 6 ++--- 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/psyneulink/core/llvm/builder_context.py b/psyneulink/core/llvm/builder_context.py index cd41c2ad43e..2bc0c7b5fec 100644 --- a/psyneulink/core/llvm/builder_context.py +++ b/psyneulink/core/llvm/builder_context.py @@ -52,7 +52,7 @@ def module_count(): 'mt_rand_init', 'philox_rand_init')) -class _node_wrapper(): +class _node_assembly(): def __init__(self, composition, node): self._comp = weakref.proxy(composition) self._node = node @@ -61,7 +61,7 @@ def __repr__(self): return "Node wrapper for node '{}' in composition '{}'".format(self._node, self._comp) def _gen_llvm_function(self, *, ctx, tags:frozenset): - return codegen.gen_node_wrapper(ctx, self._comp, self._node, tags=tags) + return codegen.gen_node_assembly(ctx, self._comp, self._node, tags=tags) def _comp_cached(func): @functools.wraps(func) @@ -505,12 +505,12 @@ def get_data_struct_type(self, component): return ir.LiteralStructType([]) - def get_node_wrapper(self, composition, node): - cache = getattr(composition, '_wrapped_nodes', None) + def get_node_assembly(self, composition, node): + cache = getattr(composition, '_node_assemblies', None) if cache is None: cache = weakref.WeakKeyDictionary() - setattr(composition, '_wrapped_nodes', cache) - return cache.setdefault(node, _node_wrapper(composition, node)) + setattr(composition, '_node_assemblies', cache) + return cache.setdefault(node, _node_assembly(composition, node)) def convert_python_struct_to_llvm_ir(self, t): self._stats["types_converted"] += 1 diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index 16eca1c8ddb..df792ce5fe9 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -585,9 +585,9 @@ def find_max(builder, x): return res -def gen_node_wrapper(ctx, composition, node, *, tags:frozenset): - assert "node_wrapper" in tags - func_tags = tags.difference({"node_wrapper"}) +def gen_node_assembly(ctx, composition, node, *, tags:frozenset): + assert "node_assembly" in tags + func_tags = tags.difference({"node_assembly"}) node_function = ctx.import_llvm_function(node, tags=func_tags) # FIXME: This is a hack @@ -782,14 +782,14 @@ def _gen_composition_exec_context(ctx, composition, *, tags:frozenset, suffix="" params = builder.alloca(const_params.type, name="const_params_loc") builder.store(const_params, params) - node_tags = tags.union({"node_wrapper"}) + node_tags = tags.union({"node_assembly"}) # Call input CIM - input_cim_w = ctx.get_node_wrapper(composition, composition.input_CIM) + input_cim_w = ctx.get_node_assembly(composition, composition.input_CIM) input_cim_f = ctx.import_llvm_function(input_cim_w, tags=node_tags) builder.call(input_cim_f, [state, params, comp_in, data, data]) # Call parameter CIM - param_cim_w = ctx.get_node_wrapper(composition, composition.parameter_CIM) + param_cim_w = ctx.get_node_assembly(composition, composition.parameter_CIM) param_cim_f = ctx.import_llvm_function(param_cim_w, tags=node_tags) builder.call(param_cim_f, [state, params, comp_in, data, data]) @@ -803,7 +803,7 @@ def _gen_composition_exec_context(ctx, composition, *, tags:frozenset, suffix="" def gen_composition_exec(ctx, composition, *, tags:frozenset): simulation = "simulation" in tags - node_tags = tags.union({"node_wrapper"}) + node_tags = tags.union({"node_assembly"}) with _gen_composition_exec_context(ctx, composition, tags=tags) as (builder, data, params, cond_gen): state, _, comp_in, _, cond = builder.function.args @@ -823,7 +823,7 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): is_finished_callbacks = {} for node in composition.nodes: args = [state, params, comp_in, data, output_storage] - wrapper = ctx.get_node_wrapper(composition, node) + wrapper = ctx.get_node_assembly(composition, node) is_finished_callbacks[node] = (wrapper, args) @@ -851,14 +851,14 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): num_exec_locs, nodes_states) with builder.if_then(reinit_cond): - node_w = ctx.get_node_wrapper(composition, node) + node_w = ctx.get_node_assembly(composition, node) node_reinit_f = ctx.import_llvm_function(node_w, tags=node_tags.union({"reset"})) builder.call(node_reinit_f, [state, params, comp_in, data, data]) # Run controller if it's enabled in 'BEFORE' mode if simulation is False and composition.enable_controller and composition.controller_mode == BEFORE: assert composition.controller is not None - controller_w = ctx.get_node_wrapper(composition, composition.controller) + controller_w = ctx.get_node_assembly(composition, composition.controller) controller_f = ctx.import_llvm_function(controller_w, tags=node_tags) builder.call(controller_f, [state, params, comp_in, data, data]) @@ -929,7 +929,7 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): run_set_node_ptr = builder.gep(run_set_ptr, [zero, ctx.int32_ty(idx)]) node_cond = builder.load(run_set_node_ptr, name="node_" + node.name + "_should_run") with builder.if_then(node_cond): - node_w = ctx.get_node_wrapper(composition, node) + node_w = ctx.get_node_assembly(composition, node) node_f = ctx.import_llvm_function(node_w, tags=node_tags) builder.block.name = "invoke_" + node_f.name # Wrappers do proper indexing of all structures @@ -984,12 +984,12 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): if simulation is False and composition.enable_controller and \ composition.controller_mode == AFTER: assert composition.controller is not None - controller_w = ctx.get_node_wrapper(composition, composition.controller) + controller_w = ctx.get_node_assembly(composition, composition.controller) controller_f = ctx.import_llvm_function(controller_w, tags=node_tags) builder.call(controller_f, [state, params, comp_in, data, data]) # Call output CIM - output_cim_w = ctx.get_node_wrapper(composition, composition.output_CIM) + output_cim_w = ctx.get_node_assembly(composition, composition.output_CIM) output_cim_f = ctx.import_llvm_function(output_cim_w, tags=node_tags) builder.block.name = "invoke_" + output_cim_f.name builder.call(output_cim_f, [state, params, comp_in, data, data]) @@ -1180,9 +1180,9 @@ def gen_autodiffcomp_exec(ctx, composition, *, tags:frozenset): pytorch_func = ctx.import_llvm_function(pytorch_model, tags=tags) builder.call(pytorch_func, [state, params, data]) - node_tags = tags.union({"node_wrapper"}) + node_tags = tags.union({"node_assembly"}) # Call output CIM - output_cim_w = ctx.get_node_wrapper(composition, composition.output_CIM) + output_cim_w = ctx.get_node_assembly(composition, composition.output_CIM) output_cim_f = ctx.import_llvm_function(output_cim_w, tags=node_tags) builder.call(output_cim_f, [state, params, comp_in, data, data]) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 0d05164887d..de7d259ce95 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -440,9 +440,9 @@ def _bin_func_multirun(self): def _set_bin_node(self, node): assert node in self._composition._all_nodes - wrapper = builder_context.LLVMBuilderContext.get_current().get_node_wrapper(self._composition, node) + wrapper = builder_context.LLVMBuilderContext.get_current().get_node_assembly(self._composition, node) self.__bin_func = pnlvm.LLVMBinaryFunction.from_obj( - wrapper, tags=self.__tags.union({"node_wrapper"})) + wrapper, tags=self.__tags.union({"node_assembly"})) @property def _conditions(self): diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index a7464fd7664..2eae0e69974 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -706,7 +706,7 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, # The first argument is the target node assert len(condition.args) == 1 target = is_finished_callbacks[condition.args[0]] - is_finished_f = self.ctx.import_llvm_function(target[0], tags=frozenset({"is_finished", "node_wrapper"})) + is_finished_f = self.ctx.import_llvm_function(target[0], tags=frozenset({"is_finished", "node_assembly"})) return builder.call(is_finished_f, target[1]) elif isinstance(condition, WhenFinishedAny): @@ -715,7 +715,7 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, run_cond = self.ctx.bool_ty(0) for node in condition.args: target = is_finished_callbacks[node] - is_finished_f = self.ctx.import_llvm_function(target[0], tags=frozenset({"is_finished", "node_wrapper"})) + is_finished_f = self.ctx.import_llvm_function(target[0], tags=frozenset({"is_finished", "node_assembly"})) node_is_finished = builder.call(is_finished_f, target[1]) run_cond = builder.or_(run_cond, node_is_finished) @@ -728,7 +728,7 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, run_cond = self.ctx.bool_ty(1) for node in condition.args: target = is_finished_callbacks[node] - is_finished_f = self.ctx.import_llvm_function(target[0], tags=frozenset({"is_finished", "node_wrapper"})) + is_finished_f = self.ctx.import_llvm_function(target[0], tags=frozenset({"is_finished", "node_assembly"})) node_is_finished = builder.call(is_finished_f, target[1]) run_cond = builder.and_(run_cond, node_is_finished) From 449e938489aeda6a5855c0861cc932dfd4b3c603 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 28 Jul 2024 00:38:25 -0400 Subject: [PATCH 267/410] llvm: Store only pointee types in byref_arg_types for pointer arguments (#3017) '_type_' special attribute stores pointee type for pointers [0], array element type for arrays[1], but string type representation for other types. Adjust builtin tests to not rely on byref_arg_types of non-pointer arguments. Cleanup codestyle in builtin tests. Use np.sum() instead of sum() to keep the result in the same precision as the input array. [0] https://docs.python.org/3/library/ctypes.html#ctypes._Pointer._type_ [1] https://docs.python.org/3/library/ctypes.html#ctypes.Array._type_ Signed-off-by: Jan Vesely --- psyneulink/core/llvm/__init__.py | 8 +++-- tests/llvm/test_builtins_intrinsics.py | 11 +++++- tests/llvm/test_builtins_matrix.py | 35 +++++++++--------- tests/llvm/test_builtins_vector.py | 50 +++++++++++++++----------- 4 files changed, 63 insertions(+), 41 deletions(-) diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index bd931413e9b..28200bc0ca5 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -143,16 +143,18 @@ def __init__(self, name: str): # Create ctype function instance start = time.perf_counter() return_type = _convert_llvm_ir_to_ctype(f.return_value.type) - params = [_convert_llvm_ir_to_ctype(a.type) for a in f.args] + args = [_convert_llvm_ir_to_ctype(a.type) for a in f.args] middle = time.perf_counter() - self.__c_func_type = ctypes.CFUNCTYPE(return_type, *params) + self.__c_func_type = ctypes.CFUNCTYPE(return_type, *args) finish = time.perf_counter() if "time_stat" in debug_env: print("Time to create ctype function '{}': {} ({} to create types)".format( name, finish - start, middle - start)) - self.byref_arg_types = [p._type_ for p in params] + # '_type_' special attribute stores pointee type for pointers + # https://docs.python.org/3/library/ctypes.html#ctypes._Pointer._type_ + self.byref_arg_types = [a._type_ if hasattr(a, "contents") else None for a in args] @property def c_func(self): diff --git a/tests/llvm/test_builtins_intrinsics.py b/tests/llvm/test_builtins_intrinsics.py index fae99faa520..a6416b205c9 100644 --- a/tests/llvm/test_builtins_intrinsics.py +++ b/tests/llvm/test_builtins_intrinsics.py @@ -32,8 +32,10 @@ def test_builtin_op(benchmark, op, args, builtin, result, func_mode): if func_mode == 'Python': f = op + elif func_mode == 'LLVM': f = pnlvm.LLVMBinaryFunction.get(builtin) + elif func_mode == 'PTX': wrap_name = builtin + "_test_wrapper" with pnlvm.LLVMBuilderContext.get_current() as ctx: @@ -47,12 +49,19 @@ def test_builtin_op(benchmark, op, args, builtin, result, func_mode): builder.ret_void() bin_f = pnlvm.LLVMBinaryFunction.get(wrap_name) - dty = np.dtype(bin_f.byref_arg_types[0]) + + # The result argument is a pointer, use it to derive + # the right argument type + assert bin_f.byref_arg_types[-1] is not None + dty = np.dtype(bin_f.byref_arg_types[-1]) + ptx_res = np.empty_like(result, dtype=dty) ptx_res_arg = pnlvm.jit_engine.pycuda.driver.Out(ptx_res) + def f(*a): bin_f.cuda_call(*(dty.type(p) for p in a), ptx_res_arg) return ptx_res + res = benchmark(f, *args) if pytest.helpers.llvm_current_fp_precision() == 'fp32': diff --git a/tests/llvm/test_builtins_matrix.py b/tests/llvm/test_builtins_matrix.py index e338f80bed3..1f1ca2f57a1 100644 --- a/tests/llvm/test_builtins_matrix.py +++ b/tests/llvm/test_builtins_matrix.py @@ -39,9 +39,6 @@ def _get_const_dim_func(builtin, *dims): builtin = ctx.import_llvm_function(builtin) pointer_arg_types = [a for a in builtin.type.pointee.args if pnlvm.helpers.is_pointer(a)] - func_ty = ir.FunctionType(ir.VoidType(), pointer_arg_types) - - # Create square vector matrix multiply function = ir.Function(ctx.module, builtin.type.pointee, name=custom_name) const_dims = (ctx.int32_ty(d) for d in dims) @@ -65,6 +62,21 @@ def _get_const_dim_func(builtin, *dims): ], ids=["ADD", "SUB", "MUL", "ADDS", "MULS", "DOT", "TRANS DOT"]) @pytest.mark.parametrize("dims", [(DIM_X, DIM_Y), (0, 0)], ids=["VAR-DIM", "CONST-DIM"]) def test_matrix_op(benchmark, op, x, y, builtin, result, func_mode, dims): + + def _numpy_args(bin_f): + dty = np.dtype(bin_f.byref_arg_types[0]) + + # non-pointer arguments have None is the respective byref_arg_types position + if bin_f.byref_arg_types[1] is not None: + assert dty == np.dtype(bin_f.byref_arg_types[1]) + assert dty == np.dtype(bin_f.byref_arg_types[4]) + + lx = x.astype(dty) + ly = dty.type(y) if np.isscalar(y) else y.astype(dty) + lres = np.empty_like(result, dtype=dty) + + return lx, ly, lres + if func_mode == 'Python': def ex(): return op(x, y) @@ -76,13 +88,7 @@ def ex(): func_name = builtin bin_f = pnlvm.LLVMBinaryFunction.get(func_name) - dty = np.dtype(bin_f.byref_arg_types[0]) - assert dty == np.dtype(bin_f.byref_arg_types[1]) - assert dty == np.dtype(bin_f.byref_arg_types[4]) - - lx = x.astype(dty) - ly = dty.type(y) if np.isscalar(y) else y.astype(dty) - lres = np.empty_like(result, dtype=dty) + lx, ly, lres = _numpy_args(bin_f) ct_x = lx.ctypes.data_as(bin_f.c_func.argtypes[0]) ct_y = ly if np.isscalar(ly) else ly.ctypes.data_as(bin_f.c_func.argtypes[1]) @@ -99,17 +105,12 @@ def ex(): func_name = builtin bin_f = pnlvm.LLVMBinaryFunction.get(func_name) - dty = np.dtype(bin_f.byref_arg_types[0]) - assert dty == np.dtype(bin_f.byref_arg_types[1]) - assert dty == np.dtype(bin_f.byref_arg_types[4]) - - lx = x.astype(dty) - ly = dty.type(y) if np.isscalar(y) else y.astype(dty) - lres = np.empty_like(result, dtype=dty) + lx, ly, lres = _numpy_args(bin_f) cuda_x = pnlvm.jit_engine.pycuda.driver.In(lx) cuda_y = ly if np.isscalar(ly) else pnlvm.jit_engine.pycuda.driver.In(ly) cuda_res = pnlvm.jit_engine.pycuda.driver.Out(lres) + def ex(): bin_f.cuda_call(cuda_x, cuda_y, np.int32(dims[0]), np.int32(dims[1]), cuda_res) return lres diff --git a/tests/llvm/test_builtins_vector.py b/tests/llvm/test_builtins_vector.py index d840b7acba8..9e4d3e83572 100644 --- a/tests/llvm/test_builtins_vector.py +++ b/tests/llvm/test_builtins_vector.py @@ -6,6 +6,7 @@ DIM_X=1500 + # These are just basic tests to check that vector indexing and operations # work correctly when compiled. The values don't matter much. # Might as well make them representable in fp32 for single precision testing. @@ -13,13 +14,11 @@ v = np.random.rand(DIM_X).astype(np.float32).astype(np.float64) scalar = np.random.rand() - add_res = np.add(u, v) sub_res = np.subtract(u, v) mul_res = np.multiply(u, v) smul_res = np.multiply(u, scalar) - @pytest.mark.benchmark(group="Hadamard") @pytest.mark.parametrize("op, v, builtin, result", [ (np.add, v, "__pnl_builtin_vec_add", add_res), @@ -28,19 +27,29 @@ (np.multiply, scalar, "__pnl_builtin_vec_scalar_mult", smul_res), ], ids=["ADD", "SUB", "MUL", "SMUL"]) def test_vector_op(benchmark, op, v, builtin, result, func_mode): - if func_mode == 'Python': - def ex(): - return op(u, v) - elif func_mode == 'LLVM': - bin_f = pnlvm.LLVMBinaryFunction.get(builtin) + + def _numpy_args(bin_f): dty = np.dtype(bin_f.byref_arg_types[0]) - assert dty == np.dtype(bin_f.byref_arg_types[1]) + + # non-pointer arguments have None is the respective byref_arg_types position + if bin_f.byref_arg_types[1] is not None: + assert dty == np.dtype(bin_f.byref_arg_types[1]) assert dty == np.dtype(bin_f.byref_arg_types[3]) lu = u.astype(dty) lv = dty.type(v) if np.isscalar(v) else v.astype(dty) lres = np.empty_like(lu) + return lu, lv, lres + + if func_mode == 'Python': + def ex(): + return op(u, v) + + elif func_mode == 'LLVM': + bin_f = pnlvm.LLVMBinaryFunction.get(builtin) + lu, lv, lres = _numpy_args(bin_f) + ct_u = lu.ctypes.data_as(bin_f.c_func.argtypes[0]) ct_v = lv if np.isscalar(lv) else lv.ctypes.data_as(bin_f.c_func.argtypes[1]) ct_res = lres.ctypes.data_as(bin_f.c_func.argtypes[3]) @@ -51,17 +60,12 @@ def ex(): elif func_mode == 'PTX': bin_f = pnlvm.LLVMBinaryFunction.get(builtin) - dty = np.dtype(bin_f.byref_arg_types[0]) - assert dty == np.dtype(bin_f.byref_arg_types[1]) - assert dty == np.dtype(bin_f.byref_arg_types[3]) - - lu = u.astype(dty) - lv = dty.type(v) if np.isscalar(v) else v.astype(dty) - lres = np.empty_like(lu) + lu, lv, lres = _numpy_args(bin_f) cuda_u = pnlvm.jit_engine.pycuda.driver.In(lu) cuda_v = lv if np.isscalar(lv) else pnlvm.jit_engine.pycuda.driver.In(lv) cuda_res = pnlvm.jit_engine.pycuda.driver.Out(lres) + def ex(): bin_f.cuda_call(cuda_u, cuda_v, np.int32(DIM_X), cuda_res) return lres @@ -72,30 +76,36 @@ def ex(): @pytest.mark.benchmark(group="Sum") def test_vector_sum(benchmark, func_mode): + if func_mode == 'Python': def ex(): return np.sum(u) + elif func_mode == 'LLVM': bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vec_sum") lu = u.astype(np.dtype(bin_f.byref_arg_types[0])) - llvm_res = np.empty(1, dtype=lu.dtype) + lres = np.empty(1, dtype=lu.dtype) ct_u = lu.ctypes.data_as(bin_f.c_func.argtypes[0]) - ct_res = llvm_res.ctypes.data_as(bin_f.c_func.argtypes[2]) + ct_res = lres.ctypes.data_as(bin_f.c_func.argtypes[2]) def ex(): bin_f(ct_u, DIM_X, ct_res) - return llvm_res[0] + return lres[0] + elif func_mode == 'PTX': bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vec_sum") + lu = u.astype(np.dtype(bin_f.byref_arg_types[0])) - cuda_u = pnlvm.jit_engine.pycuda.driver.In(lu) res = np.empty(1, dtype=lu.dtype) + + cuda_u = pnlvm.jit_engine.pycuda.driver.In(lu) cuda_res = pnlvm.jit_engine.pycuda.driver.Out(res) + def ex(): bin_f.cuda_call(cuda_u, np.int32(DIM_X), cuda_res) return res[0] res = benchmark(ex) - np.testing.assert_allclose(res, sum(u)) + np.testing.assert_allclose(res, np.sum(u)) From 1af9428234b25b6b30ce3a835a757ea02990671e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 28 Jul 2024 11:33:09 -0400 Subject: [PATCH 268/410] requirements: Set minimum scipy version to 1.7.3 scipy==1.7.3 is the latest version that still works on Python3.7 Signed-off-by: Jan Vesely --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index dc485bfd0e2..91d9df4dd0c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,6 +17,6 @@ pillow<10.5.0 pint<0.22.0 protobuf<3.20.4 rich>=10.1, <10.13 -scipy<1.12 +scipy>=1.7.3, <1.12 toposort<1.11 torch>=1.10.0, <2.4.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' From bd25f50459f8a2daed3d83bcc94853f21ca46390 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 28 Jul 2024 12:45:57 -0400 Subject: [PATCH 269/410] tests/pec/ddm: Adjust expected result based on scipy version SciPy changed their implementation of differential evolution and the way it selects samples in 1.12 [0,1], and then again in 1.14 [2,3], leading to slightly different results. Fix typo. Rename 'result' -> 'expected_result'. Remove unused local variables. [0] https://docs.scipy.org/doc/scipy/release/1.12.0-notes.html#scipy-optimize-improvements [1] https://github.com/scipy/scipy/pull/18496 [2] https://docs.scipy.org/doc/scipy/release/1.14.0-notes.html#scipy-optimize-improvements [3] https://github.com/scipy/scipy/pull/20677 Signed-off-by: Jan Vesely fixup test --- .../test_parameterestimationcomposition.py | 34 ++++++++++++++----- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index bf3a8c3138b..12c39a64a3a 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -1,7 +1,10 @@ import numpy as np +import optuna import pandas as pd import pytest -import optuna +import scipy + +from packaging import version as pversion import psyneulink as pnl @@ -125,17 +128,32 @@ def test_pec_run_input_formats(inputs_dict, error_msg): pec.run(inputs=inputs_dict) +# SciPy changed their implementation of differential evolution and the way it selects +# samples to evaluate in 1.12 [0,1], and then again in 1.14 [2,3], leading to slightly +# different results +# +# [0] https://docs.scipy.org/doc/scipy/release/1.12.0-notes.html#scipy-optimize-improvements +# [1] https://github.com/scipy/scipy/pull/18496 +# [2] https://docs.scipy.org/doc/scipy/release/1.14.0-notes.html#scipy-optimize-improvements +# [3] https://github.com/scipy/scipy/pull/20677 +if pversion.parse(scipy.version.version) >= pversion.parse('1.14.0'): + expected_differential_evolution = [0.010113000942356953] +elif pversion.parse(scipy.version.version) >= pversion.parse('1.12.0'): + expected_differential_evolution = [0.010074123395259815] +else: + expected_differential_evolution = [0.010363518438648106] + @pytest.mark.composition @pytest.mark.parametrize( - "opt_method, result", + "opt_method, expected_result", [ - ("differential_evolution", [0.010363518438648106]), + ("differential_evolution", expected_differential_evolution), (optuna.samplers.RandomSampler(seed=0), [0.01]), (optuna.samplers.CmaEsSampler(seed=0), [0.01]), ], - ids=["differential_evolultion", "optuna_random_sampler", "optuna_cmaes_sampler"], + ids=["differential_evolution", "optuna_random_sampler", "optuna_cmaes_sampler"], ) -def test_parameter_optimization_ddm(func_mode, opt_method, result): +def test_parameter_optimization_ddm(func_mode, opt_method, expected_result): """Test parameter optimization of a DDM in integrator mode""" if func_mode == "Python": @@ -210,11 +228,9 @@ def reward_rate(sim_data): trial_inputs[0] = np.abs(trial_inputs[0]) trial_inputs[-1] = np.abs(trial_inputs[-1]) - inputs_dict = {decision: trial_inputs} - - ret = pec.run(inputs={comp: trial_inputs}) + pec.run(inputs={comp: trial_inputs}) - np.testing.assert_allclose(pec.optimized_parameter_values, result) + np.testing.assert_allclose(pec.optimized_parameter_values, expected_result) # func_mode is a hacky wa to get properly marked; Python, LLVM, and CUDA From 8fc0dafe1e659022087c7ece5c906a6ea928a04c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 22:24:55 +0000 Subject: [PATCH 270/410] requirements: update scipy requirement from <1.12 to <1.14 Updates the requirements on [scipy](https://github.com/scipy/scipy) to permit the latest version. - [Release notes](https://github.com/scipy/scipy/releases) - [Commits](https://github.com/scipy/scipy/compare/v1.11.6...v1.13.1) --- updated-dependencies: - dependency-name: scipy dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 91d9df4dd0c..8353ae1795d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,6 +17,6 @@ pillow<10.5.0 pint<0.22.0 protobuf<3.20.4 rich>=10.1, <10.13 -scipy>=1.7.3, <1.12 +scipy>=1.7.3, <1.14 toposort<1.11 torch>=1.10.0, <2.4.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' From fa244c4968ee34b7bb365e680622165a50ad089c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Jul 2024 22:24:55 +0000 Subject: [PATCH 271/410] requirements: update scipy requirement from <1.14 to <1.15 Updates the requirements on [scipy](https://github.com/scipy/scipy) to permit the latest version. - [Release notes](https://github.com/scipy/scipy/releases) - [Commits](https://github.com/scipy/scipy/compare/v0.13.1...v1.14.0) --- updated-dependencies: - dependency-name: scipy dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8353ae1795d..8966c89ce8d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,6 +17,6 @@ pillow<10.5.0 pint<0.22.0 protobuf<3.20.4 rich>=10.1, <10.13 -scipy>=1.7.3, <1.14 +scipy>=1.7.3, <1.15 toposort<1.11 torch>=1.10.0, <2.4.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' From a86bd45591e1654b43a996d09ffe184a6bb27a3f Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 22 Jul 2024 23:31:00 -0400 Subject: [PATCH 272/410] llvm: Construct numpy dtypes of compiled argument structures Use indirect access to numpy dtypes (e.g. "np.float64().dtype"), direct access via np.dtypes is only available in numpy 1.25+ [0]. Use 'align=True' to match offsets used by LLVM and ctypes. [0] https://numpy.org/doc/stable/reference/routines.dtypes.html Signed-off-by: Jan Vesely --- psyneulink/core/llvm/__init__.py | 4 +- psyneulink/core/llvm/builder_context.py | 50 +++++++++++++++++++++++++ setup.cfg | 1 + 3 files changed, 54 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 28200bc0ca5..680f558ef4f 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -23,7 +23,7 @@ from . import codegen from .builder_context import * -from .builder_context import _all_modules, _convert_llvm_ir_to_ctype +from .builder_context import _all_modules, _convert_llvm_ir_to_ctype, _convert_llvm_ir_to_dtype from .debug import debug_env from .execution import * from .execution import _tupleize @@ -156,6 +156,8 @@ def __init__(self, name: str): # https://docs.python.org/3/library/ctypes.html#ctypes._Pointer._type_ self.byref_arg_types = [a._type_ if hasattr(a, "contents") else None for a in args] + self.np_params = [_convert_llvm_ir_to_dtype(getattr(a.type, "pointee", a.type)) for a in f.args] + @property def c_func(self): if self.__c_func is None: diff --git a/psyneulink/core/llvm/builder_context.py b/psyneulink/core/llvm/builder_context.py index 2bc0c7b5fec..4d8301e59a6 100644 --- a/psyneulink/core/llvm/builder_context.py +++ b/psyneulink/core/llvm/builder_context.py @@ -766,3 +766,53 @@ def _convert_llvm_ir_to_ctype(t: ir.Type): assert False, "Don't know how to convert LLVM type: {}".format(t) return ret_t + +@functools.lru_cache(maxsize=16) +def _convert_llvm_ir_to_dtype(t: ir.Type): + + if isinstance(t, ir.IntType): + if t.width == 8: + return np.uint8().dtype + + elif t.width == 16: + return np.uint16().dtype + + elif t.width == 32: + return np.uint32().dtype + + elif t.width == 64: + return np.uint64().dtype + + else: + assert False, "Unsupported integer type: {}".format(type(t)) + + elif isinstance(t, ir.DoubleType): + return np.float64().dtype + + elif isinstance(t, ir.FloatType): + return np.float32().dtype + + elif isinstance(t, ir.HalfType): + return np.float16().dtype + + elif isinstance(t, ir.ArrayType): + element_type = _convert_llvm_ir_to_dtype(t.element) + + # Create multidimensional array instead of nesting + if element_type.subdtype is not None: + element_type, shape = element_type.subdtype + else: + shape = () + + ret_t = np.dtype((element_type, (len(t),) + shape)) + + elif isinstance(t, ir.LiteralStructType): + field_list = [] + for i, e in enumerate(t.elements): + field_list.append(("field_" + str(i), _convert_llvm_ir_to_dtype(e))) + + ret_t = np.dtype(field_list, align=True) + else: + assert False, "Don't know how to convert LLVM type to dtype: {}".format(t) + + return ret_t diff --git a/setup.cfg b/setup.cfg index ffc15d5cfb9..be65d28a089 100644 --- a/setup.cfg +++ b/setup.cfg @@ -71,6 +71,7 @@ filterwarnings = error:Creating an ndarray from ragged nested sequences \(which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes\) is deprecated.*:numpy.VisibleDeprecationWarning error:Invalid escape sequence error:the matrix subclass is not the recommended way to represent matrices or deal with linear algebra + error:Passing (type, 1) or '1type' as a synonym of type is deprecated [pycodestyle] # for code explanation see https://pep8.readthedocs.io/en/latest/intro.html#error-codes From e670faa4fa1d566dcac10acf5c4cbe32856acb3b Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 28 Jul 2024 00:10:25 -0400 Subject: [PATCH 273/410] llvm/execution: Use converted dtype to construct compiled numpy structures Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index de7d259ce95..687645f82ca 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -100,7 +100,9 @@ def _get_compilation_param(self, name, init_method, arg): struct = struct_ty(*initializer) struct_end = time.time() - numpy_struct = np.ctypeslib.as_array(struct) + # numpy "frombuffer" creates a shared memory view of the provided buffer + numpy_struct = np.frombuffer(struct, dtype=self._bin_func.np_params[arg], count=len(self._execution_contexts)) + assert numpy_struct.nbytes == ctypes.sizeof(struct), \ "Size mismatch ({}), numpy: {} vs. ctypes:{}".format(name, numpy_struct.nbytes, ctypes.sizeof(struct)) @@ -120,6 +122,8 @@ def _get_compilation_param(self, name, init_method, arg): if len(self._execution_contexts) == 1: + numpy_struct.shape = () + if name == '_state': self._copy_params_to_pnl(self._execution_contexts[0], self._obj, From 0882097ec68dd8431295ac979193fd85b543436f Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 29 Jun 2024 12:15:20 -0400 Subject: [PATCH 274/410] setup: Convert PEP3118 size mismatch warnings to errors Signed-off-by: Jan Vesely --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index be65d28a089..911094866c0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -72,6 +72,7 @@ filterwarnings = error:Invalid escape sequence error:the matrix subclass is not the recommended way to represent matrices or deal with linear algebra error:Passing (type, 1) or '1type' as a synonym of type is deprecated + error:A builtin ctypes object gave a PEP3118:RuntimeWarning [pycodestyle] # for code explanation see https://pep8.readthedocs.io/en/latest/intro.html#error-codes From 9ba801935d940e681130885c3fddbdd359061e94 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 28 Jul 2024 17:22:42 -0400 Subject: [PATCH 275/410] llvm/builder_context: Merge conversion of lists and tuples Signed-off-by: Jan Vesely --- psyneulink/core/llvm/builder_context.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/psyneulink/core/llvm/builder_context.py b/psyneulink/core/llvm/builder_context.py index 4d8301e59a6..edc77fddad9 100644 --- a/psyneulink/core/llvm/builder_context.py +++ b/psyneulink/core/llvm/builder_context.py @@ -516,27 +516,26 @@ def convert_python_struct_to_llvm_ir(self, t): self._stats["types_converted"] += 1 if t is None: return ir.LiteralStructType([]) - elif type(t) is list: - if len(t) == 0: - return ir.LiteralStructType([]) - elems_t = [self.convert_python_struct_to_llvm_ir(x) for x in t] - if all(x == elems_t[0] for x in elems_t): - return ir.ArrayType(elems_t[0], len(elems_t)) - return ir.LiteralStructType(elems_t) - elif type(t) is tuple: + + elif isinstance(t, (list, tuple)): elems_t = [self.convert_python_struct_to_llvm_ir(x) for x in t] if len(elems_t) > 0 and all(x == elems_t[0] for x in elems_t): return ir.ArrayType(elems_t[0], len(elems_t)) + return ir.LiteralStructType(elems_t) + elif isinstance(t, enum.Enum): # FIXME: Consider enums of non-int type assert all(round(x.value) == x.value for x in type(t)) return self.int32_ty + elif isinstance(t, (int, float, np.floating)): return self.float_ty + elif isinstance(t, np.integer): # Python 'int' is handled above as it is the default type for '0' return ir.IntType(t.nbytes * 8) + elif isinstance(t, np.ndarray): # 0d uint32 values were likely created from enums (above) and are # observed here after compilation sync. @@ -544,18 +543,24 @@ def convert_python_struct_to_llvm_ir(self, t): if t.ndim == 0 and t.dtype == np.uint32: return self.convert_python_struct_to_llvm_ir(t.reshape(1)[0]) return self.convert_python_struct_to_llvm_ir(t.tolist()) + elif isinstance(t, np.random.RandomState): return pnlvm.builtins.get_mersenne_twister_state_struct(self) + elif isinstance(t, np.random.Generator): assert isinstance(t.bit_generator, np.random.Philox) return pnlvm.builtins.get_philox_state_struct(self) + elif isinstance(t, Time): return ir.ArrayType(self.int32_ty, len(TimeScale)) + elif isinstance(t, SampleIterator): if isinstance(t.generator, list): return ir.ArrayType(self.float_ty, len(t.generator)) + # Generic iterator is {start, increment, count} return ir.LiteralStructType((self.float_ty, self.float_ty, self.int32_ty)) + assert False, "Don't know how to convert {}".format(type(t)) From 3dd18bf540e39802cf890a1038b6a70577f95989 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 28 Jul 2024 21:22:00 -0400 Subject: [PATCH 276/410] llvm/LLVMBinaryFunction: Allow setting argument types to numpy ndpointer Split subdtype into base type and shape. Numpy can't instantiate subdtype arrays, so such ndpointers checks would not pass. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/__init__.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 680f558ef4f..4f663d7bef7 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -123,7 +123,7 @@ def _llvm_build(target_generation=_binary_generation + 1): class LLVMBinaryFunction: - def __init__(self, name: str): + def __init__(self, name: str, *, numpy_args=()): self.name = name self.__c_func = None @@ -144,6 +144,16 @@ def __init__(self, name: str): start = time.perf_counter() return_type = _convert_llvm_ir_to_ctype(f.return_value.type) args = [_convert_llvm_ir_to_ctype(a.type) for a in f.args] + + # '_type_' special attribute stores pointee type for pointers + # https://docs.python.org/3/library/ctypes.html#ctypes._Pointer._type_ + self.byref_arg_types = [a._type_ if hasattr(a, "contents") else None for a in args] + self.np_params = [_convert_llvm_ir_to_dtype(getattr(a.type, "pointee", a.type)) for a in f.args] + + for a in numpy_args: + assert self.byref_arg_types[a] is not None + args[a] = np.ctypeslib.ndpointer(dtype=self.np_params[a].base, shape=self.np_params[a].shape) + middle = time.perf_counter() self.__c_func_type = ctypes.CFUNCTYPE(return_type, *args) finish = time.perf_counter() @@ -152,12 +162,6 @@ def __init__(self, name: str): print("Time to create ctype function '{}': {} ({} to create types)".format( name, finish - start, middle - start)) - # '_type_' special attribute stores pointee type for pointers - # https://docs.python.org/3/library/ctypes.html#ctypes._Pointer._type_ - self.byref_arg_types = [a._type_ if hasattr(a, "contents") else None for a in args] - - self.np_params = [_convert_llvm_ir_to_dtype(getattr(a.type, "pointee", a.type)) for a in f.args] - @property def c_func(self): if self.__c_func is None: @@ -224,16 +228,16 @@ def cuda_wrap_call(self, *args, **kwargs): @staticmethod @functools.lru_cache(maxsize=32) - def from_obj(obj, *, tags:frozenset=frozenset()): + def from_obj(obj, *, tags:frozenset=frozenset(), numpy_args:tuple=()): name = LLVMBuilderContext.get_current().gen_llvm_function(obj, tags=tags).name - return LLVMBinaryFunction.get(name) + return LLVMBinaryFunction.get(name, numpy_args=numpy_args) @staticmethod @functools.lru_cache(maxsize=32) - def get(name: str): - return LLVMBinaryFunction(name) + def get(name: str, *, numpy_args:tuple=()): + return LLVMBinaryFunction(name, numpy_args=numpy_args) - def get_multi_run(self): + def get_multi_run(self, *, numpy_args=()): try: multirun_llvm = _find_llvm_function(self.name + "_multirun") except ValueError: @@ -241,7 +245,7 @@ def get_multi_run(self): with LLVMBuilderContext.get_current() as ctx: multirun_llvm = codegen.gen_multirun_wrapper(ctx, function) - return LLVMBinaryFunction.get(multirun_llvm.name) + return LLVMBinaryFunction.get(multirun_llvm.name, numpy_args=numpy_args) _cpu_engine = None From e7db2469e7485d6eec18ce40673ea9ba69872d6b Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 28 Jul 2024 21:39:00 -0400 Subject: [PATCH 277/410] llvm/execution: Use numpy structures for param, state and data arguments In single context executions. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 64 +++++++++++++++---------------- 1 file changed, 31 insertions(+), 33 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 687645f82ca..bad6f2570eb 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -307,7 +307,8 @@ class FuncExecution(CUDAExecution): def __init__(self, component, execution_ids=[None], *, tags=frozenset()): super().__init__() - self._bin_func = pnlvm.LLVMBinaryFunction.from_obj(component, tags=tags) + + self._bin_func = pnlvm.LLVMBinaryFunction.from_obj(component, tags=tags, numpy_args=(0, 1)) self._execution_contexts = [ Context(execution_id=eid) for eid in execution_ids ] @@ -344,20 +345,18 @@ def _state_struct(self): def execute(self, variable): # Make sure function inputs are 2d. # Mechanism inputs are already 3d so the first part is nop. - new_variable = np.asfarray(np.atleast_2d(variable), - dtype=self._vi_dty) + new_variable = np.asfarray(np.atleast_2d(variable), dtype=self._vi_dty) ct_vi = np.ctypeslib.as_ctypes(new_variable) if len(self._execution_contexts) > 1: - # wrap_call casts the arguments so we only need contiguous data - # layout + # wrap_call casts the arguments so we only need contiguous data layout self._bin_multirun.wrap_call(self._param_struct[0], self._state_struct[0], ct_vi, self._ct_vo, self._ct_len) else: - self._bin_func(self._param_struct[0], self._state_struct[0], ct_vi, self._ct_vo) + self._bin_func(self._param_struct[1], self._state_struct[1], ct_vi, self._ct_vo) return _convert_ctype_to_python(self._ct_vo) @@ -446,7 +445,7 @@ def _set_bin_node(self, node): assert node in self._composition._all_nodes wrapper = builder_context.LLVMBuilderContext.get_current().get_node_assembly(self._composition, node) self.__bin_func = pnlvm.LLVMBinaryFunction.from_obj( - wrapper, tags=self.__tags.union({"node_assembly"})) + wrapper, tags=self.__tags.union({"node_assembly"}), numpy_args=(0, 1, 4)) @property def _conditions(self): @@ -576,11 +575,11 @@ def execute_node(self, node, inputs=None, context=None): if node is not self._composition.input_CIM and self.__frozen_vals is None: self.freeze_values() - self._bin_func(self._state_struct[0], - self._param_struct[0], + self._bin_func(self._state_struct[1], + self._param_struct[1], inputs, self.__frozen_vals, - self._data_struct[0]) + self._data_struct[1]) if "comp_node_debug" in self._debug_env: print("RAN: {}. State: {}".format(node, self.extract_node_state(node))) @@ -593,7 +592,7 @@ def execute_node(self, node, inputs=None, context=None): def _bin_exec_func(self): if self.__bin_exec_func is None: self.__bin_exec_func = pnlvm.LLVMBinaryFunction.from_obj( - self._composition, tags=self.__tags) + self._composition, tags=self.__tags, numpy_args=(0, 1, 3)) return self.__bin_exec_func @@ -615,10 +614,10 @@ def execute(self, inputs): self._conditions[0], self._ct_len) else: - self._bin_exec_func(self._state_struct[0], - self._param_struct[0], + self._bin_exec_func(self._state_struct[1], + self._param_struct[1], self._get_input_struct(inputs)[0], - self._data_struct[0], + self._data_struct[1], self._conditions[0]) def cuda_execute(self, inputs): @@ -668,7 +667,7 @@ def _get_generator_run_input_struct(self, inputs, runs): def _bin_run_func(self): if self.__bin_run_func is None: self.__bin_run_func = pnlvm.LLVMBinaryFunction.from_obj( - self._composition, tags=self.__tags.union({"run"})) + self._composition, tags=self.__tags.union({"run"}), numpy_args=(0, 1, 2)) return self.__bin_run_func @@ -716,9 +715,9 @@ def run(self, inputs, runs=0, num_input_sets=0): # This is only needed for non-generator inputs that are wrapped in an extra context dimension inputs = ctypes.cast(inputs, self._bin_run_func.c_func.argtypes[3]) - self._bin_run_func(self._state_struct[0], - self._param_struct[0], - self._data_struct[0], + self._bin_run_func(self._state_struct[1], + self._param_struct[1], + self._data_struct[1], inputs, outputs, runs_count, @@ -770,7 +769,7 @@ def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results eval_type = "evaluate_type_all_results" if all_results else "evaluate_type_objective" tags = {"evaluate", "alloc_range", eval_type} - bin_func = pnlvm.LLVMBinaryFunction.from_obj(ocm, tags=frozenset(tags)) + bin_func = pnlvm.LLVMBinaryFunction.from_obj(ocm, tags=frozenset(tags), numpy_args=(0, 1, 6)) self.__bin_func = bin_func # There are 8 arguments to evaluate_alloc_range: @@ -780,9 +779,9 @@ def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results # Directly initialized structures assert ocm.agent_rep is self._composition - comp_params = self._get_compilation_param('_eval_param', '_get_param_initializer', 0) - comp_state = self._get_compilation_param('_eval_state', '_get_state_initializer', 1) - comp_data = self._get_compilation_param('_eval_data', '_get_data_initializer', 6) + comp_params = self._get_compilation_param('_eval_param', '_get_param_initializer', 0)[1] + comp_state = self._get_compilation_param('_eval_state', '_get_state_initializer', 1)[1] + comp_data = self._get_compilation_param('_eval_data', '_get_data_initializer', 6)[1] # Construct input variable, the 5th parameter of the evaluate function ct_inputs = self._get_run_input_struct(inputs, num_input_sets, 5) @@ -803,7 +802,6 @@ def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results "( evaluations:", num_evaluations, "element size:", ctypes.sizeof(out_el_ty), ")", "for", self._obj.name) - # return variable as numpy array. pycuda can use it directly return comp_params, comp_state, comp_data, ct_inputs, out_ty, ct_num_inputs def cuda_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:bool=False): @@ -812,11 +810,11 @@ def cuda_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:boo ct_results = out_ty() - cuda_args = (jit_engine.pycuda.driver.In(comp_params[1]), - jit_engine.pycuda.driver.InOut(comp_state[1]), + cuda_args = (jit_engine.pycuda.driver.In(comp_params), + jit_engine.pycuda.driver.InOut(comp_state), jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(ct_results)), # results jit_engine.pycuda.driver.In(np.ctypeslib.as_array(ct_inputs)), # inputs - jit_engine.pycuda.driver.InOut(comp_data[1]), # composition data + jit_engine.pycuda.driver.InOut(comp_data), # composition data jit_engine.pycuda.driver.In(np.int32(num_input_sets)), # number of inputs ) @@ -837,19 +835,19 @@ def thread_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:b # Create input and result typed casts once, they are the same # for every submitted job. - input_param = ctypes.cast(ct_inputs, self.__bin_func.c_func.argtypes[5]) - results_param = ctypes.cast(ct_results, self.__bin_func.c_func.argtypes[4]) + input_arg = ctypes.cast(ct_inputs, self.__bin_func.c_func.argtypes[5]) + results_arg = ctypes.cast(ct_results, self.__bin_func.c_func.argtypes[4]) # There are 7 arguments to evaluate_alloc_range: # comp_param, comp_state, from, to, results, input, comp_data results = [ex.submit(self.__bin_func, - comp_params[0], - comp_state[0], + comp_params, + comp_state, int(i * evals_per_job), min((i + 1) * evals_per_job, num_evaluations), - results_param, - input_param, - comp_data[0], + results_arg, + input_arg, + comp_data, ct_num_inputs) for i in range(jobs)] From b48447204d21d6b2ad2824bc463dc90da5dd76fc Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 29 Jul 2024 01:23:19 -0400 Subject: [PATCH 278/410] llvm/execution: Use numpy structures for Function and Mechanism outputs Drop cuda_out cached buffer. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 53 +++++++++++++++++++------------ 1 file changed, 32 insertions(+), 21 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index bad6f2570eb..cfc98135cc0 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -236,13 +236,27 @@ def _enumerate_recurse(elements): pnl_param.set(value, context=context, override=True, compilation_sync=True) + def _get_empty_for_arg(self, arg_num): + + out_base = self._bin_func.np_params[arg_num].base + out_shape = self._bin_func.np_params[arg_num].shape + + if len(self._execution_contexts) > 1: + out_shape = (len(self._execution_contexts),) + out_shape + + # "empty" is in fact filled with NaN poison + return np.full(out_shape, np.nan, dtype=out_base) + + def _get_indexable(self, np_array): + # outputs in recarrays need to be converted to list/tuple to be indexable + return np_array.tolist() if np_array.dtype.base.shape == () else np_array class CUDAExecution(Execution): - def __init__(self, buffers=['param_struct', 'state_struct', 'out']): + def __init__(self, buffers=['param_struct', 'state_struct']): super().__init__() - self._gpu_buffers = {} - for b in buffers: - self._gpu_buffers["_" + b] = None + + # Initialize GPU buffer map + self._gpu_buffers = {"_" + b: None for b in buffers} @property def _bin_func_multirun(self): @@ -257,7 +271,7 @@ def __get_cuda_arg(self, struct_name, arg_handler): # .array is a public member of pycuda's In/Out ArgumentHandler classes if gpu_buffer is None or gpu_buffer.array is not np_struct: - # 0-sized structures fail to upload use a dummy numpy array isntead + # 0-sized structures fail to upload use a dummy numpy array instead gpu_buffer = arg_handler(np_struct if np_struct.nbytes > 0 else np.zeros(2)) self._gpu_buffers[struct_name] = gpu_buffer @@ -280,27 +294,20 @@ def _cuda_data_struct(self): def _cuda_conditions(self): return self.__get_cuda_arg("_conditions", jit_engine.pycuda.driver.InOut) - @property - def _cuda_out(self): - gpu_buffer = self._gpu_buffers["_out"] - if gpu_buffer is None: - gpu_buffer = jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(self._ct_vo)) - self._gpu_buffers["_out"] = gpu_buffer - - return gpu_buffer - def cuda_execute(self, variable): # Create input argument new_var = np.asfarray(variable, dtype=self._vi_dty) data_in = jit_engine.pycuda.driver.In(new_var) + data_out = self._get_empty_for_arg(3) + self._bin_func.cuda_call(self._cuda_param_struct, self._cuda_state_struct, data_in, - self._cuda_out, + jit_engine.pycuda.driver.Out(data_out), threads=len(self._execution_contexts)) - return _convert_ctype_to_python(self._ct_vo) + return self._get_indexable(data_out) class FuncExecution(CUDAExecution): @@ -308,7 +315,7 @@ class FuncExecution(CUDAExecution): def __init__(self, component, execution_ids=[None], *, tags=frozenset()): super().__init__() - self._bin_func = pnlvm.LLVMBinaryFunction.from_obj(component, tags=tags, numpy_args=(0, 1)) + self._bin_func = pnlvm.LLVMBinaryFunction.from_obj(component, tags=tags, numpy_args=(0, 1, 3)) self._execution_contexts = [ Context(execution_id=eid) for eid in execution_ids ] @@ -322,7 +329,8 @@ def __init__(self, component, execution_ids=[None], *, tags=frozenset()): vo_ty = vo_ty * len(execution_ids) vi_ty = vi_ty * len(execution_ids) - self._ct_vo = vo_ty() + self._ct_vo = vo_ty() + self._vi_dty = _element_dtype(vi_ty) if "stat" in self._debug_env: print("Input struct size:", _pretty_size(ctypes.sizeof(vi_ty)), @@ -346,8 +354,8 @@ def execute(self, variable): # Make sure function inputs are 2d. # Mechanism inputs are already 3d so the first part is nop. new_variable = np.asfarray(np.atleast_2d(variable), dtype=self._vi_dty) - ct_vi = np.ctypeslib.as_ctypes(new_variable) + if len(self._execution_contexts) > 1: # wrap_call casts the arguments so we only need contiguous data layout self._bin_multirun.wrap_call(self._param_struct[0], @@ -355,10 +363,13 @@ def execute(self, variable): ct_vi, self._ct_vo, self._ct_len) + return _convert_ctype_to_python(self._ct_vo) else: - self._bin_func(self._param_struct[1], self._state_struct[1], ct_vi, self._ct_vo) + data_out = self._get_empty_for_arg(3) + + self._bin_func(self._param_struct[1], self._state_struct[1], ct_vi, data_out) - return _convert_ctype_to_python(self._ct_vo) + return self._get_indexable(data_out) class MechExecution(FuncExecution): From 37ac860da3ae66440e7edc557bd7a33aa5d58589 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 30 Jul 2024 00:50:15 -0400 Subject: [PATCH 279/410] llvm/execution: Use numpy structures for Function and Mechanism inputs Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 56 ++++++------------------------- 1 file changed, 10 insertions(+), 46 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index cfc98135cc0..a7f56709af6 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -48,25 +48,6 @@ def _tupleize(x): except TypeError: return x if x is not None else tuple() -def _element_dtype(x): - """ - Extract base builtin type from aggregate type. - - Throws assertion failure if the aggregate type includes more than one base type. - The assumption is that array of builtin type has the same binary layout as - the original aggregate and it's easier to construct - """ - dt = np.dtype(x) - while dt.subdtype is not None: - dt = dt.subdtype[0] - - if not dt.isbuiltin: - fdts = (_element_dtype(f[0]) for f in dt.fields.values()) - dt = next(fdts) - assert all(dt == fdt for fdt in fdts) - - assert dt.isbuiltin, "Element type is not builtin: {} from {}".format(dt, np.dtype(x)) - return dt def _pretty_size(size): units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'] @@ -295,8 +276,8 @@ def _cuda_conditions(self): return self.__get_cuda_arg("_conditions", jit_engine.pycuda.driver.InOut) def cuda_execute(self, variable): - # Create input argument - new_var = np.asfarray(variable, dtype=self._vi_dty) + # Create input argument, PyCUDA doesn't care about shape + new_var = np.asfarray(variable, dtype=self._bin_func.np_params[2].base) data_in = jit_engine.pycuda.driver.In(new_var) data_out = self._get_empty_for_arg(3) @@ -315,29 +296,20 @@ class FuncExecution(CUDAExecution): def __init__(self, component, execution_ids=[None], *, tags=frozenset()): super().__init__() - self._bin_func = pnlvm.LLVMBinaryFunction.from_obj(component, tags=tags, numpy_args=(0, 1, 3)) + self._bin_func = pnlvm.LLVMBinaryFunction.from_obj(component, tags=tags, numpy_args=(0, 1, 2, 3)) self._execution_contexts = [ Context(execution_id=eid) for eid in execution_ids ] self._component = component - _, _, vi_ty, vo_ty = self._bin_func.byref_arg_types if len(execution_ids) > 1: self._bin_multirun = self._bin_func.get_multi_run() self._ct_len = ctypes.c_int(len(execution_ids)) - vo_ty = vo_ty * len(execution_ids) - vi_ty = vi_ty * len(execution_ids) + vo_ty = self._bin_func.byref_arg_types[3] * len(execution_ids) self._ct_vo = vo_ty() - self._vi_dty = _element_dtype(vi_ty) - if "stat" in self._debug_env: - print("Input struct size:", _pretty_size(ctypes.sizeof(vi_ty)), - "for", self._component.name) - print("Output struct size:", _pretty_size(ctypes.sizeof(vo_ty)), - "for", self._component.name) - @property def _obj(self): return self._component @@ -351,13 +323,12 @@ def _state_struct(self): return self._get_compilation_param('_state', '_get_state_initializer', 1) def execute(self, variable): - # Make sure function inputs are 2d. - # Mechanism inputs are already 3d so the first part is nop. - new_variable = np.asfarray(np.atleast_2d(variable), dtype=self._vi_dty) - ct_vi = np.ctypeslib.as_ctypes(new_variable) + new_variable = np.asfarray(variable, dtype=self._bin_func.np_params[2].base) if len(self._execution_contexts) > 1: # wrap_call casts the arguments so we only need contiguous data layout + ct_vi = np.ctypeslib.as_ctypes(new_variable) + self._bin_multirun.wrap_call(self._param_struct[0], self._state_struct[0], ct_vi, @@ -366,22 +337,15 @@ def execute(self, variable): return _convert_ctype_to_python(self._ct_vo) else: data_out = self._get_empty_for_arg(3) + data_in = new_variable.reshape(self._bin_func.np_params[2].shape) - self._bin_func(self._param_struct[1], self._state_struct[1], ct_vi, data_out) + self._bin_func(self._param_struct[1], self._state_struct[1], data_in, data_out) return self._get_indexable(data_out) class MechExecution(FuncExecution): - - def execute(self, variable): - # Convert to 3d. We always assume that: - # a) the input is vector of input ports - # b) input ports take vector of projection outputs - # c) projection output is a vector (even 1 element vector) - new_var = np.atleast_3d(variable) - new_var.shape = (len(self._component.input_ports), 1, -1) - return super().execute(new_var) + pass class CompExecution(CUDAExecution): From 21960f3b593f2e7584a81aa7bb7e2647b70143ca Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 30 Jul 2024 12:29:49 -0400 Subject: [PATCH 280/410] llvm/execution: Use numpy structure for 'conditions' argument Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 33 ++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index a7f56709af6..ef4b9a99766 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -366,7 +366,8 @@ def __init__(self, composition, execution_ids=[None], *, additional_tags=frozens self.__frozen_vals = None self.__tags = frozenset(additional_tags) - self.__conds = None + # Scheduling conditions, only used by "execute" + self.__conditions = None if len(execution_ids) > 1: self._ct_len = ctypes.c_int(len(execution_ids)) @@ -424,23 +425,30 @@ def _set_bin_node(self, node): @property def _conditions(self): - if self.__conds is None: + if self.__conditions is None: gen = helpers.ConditionGenerator(None, self._composition) + if len(self._execution_contexts) > 1: - cond_ctype = self._bin_func_multirun.byref_arg_types[4] * len(self._execution_contexts) - cond_initializer = (gen.get_condition_initializer() for _ in self._execution_contexts) + conditions_ctype = self._bin_func_multirun.byref_arg_types[4] * len(self._execution_contexts) + conditions_initializer = (gen.get_condition_initializer() for _ in self._execution_contexts) else: - cond_ctype = self._bin_func.byref_arg_types[4] - cond_initializer = gen.get_condition_initializer() + conditions_ctype = self._bin_func.byref_arg_types[4] + conditions_initializer = gen.get_condition_initializer() + + ct_conditions = conditions_ctype(*conditions_initializer) + np_conditions = np.frombuffer(ct_conditions, dtype=self._bin_func.np_params[4], count=len(self._execution_contexts)) + + if len(self._execution_contexts) == 1: + np_conditions.shape = () + + self.__conditions = (ct_conditions, np_conditions) - c_conds = cond_ctype(*cond_initializer) - self.__conds = (c_conds, np.ctypeslib.as_array(c_conds)) if "stat" in self._debug_env: print("Instantiated condition struct ( size:" , - _pretty_size(ctypes.sizeof(cond_ctype)), ")", + _pretty_size(ctypes.sizeof(conditions_ctype)), ")", "for", self._composition.name) - return self.__conds + return self.__conditions @property def _param_struct(self): @@ -518,6 +526,7 @@ def _get_input_struct(self, inputs): if "stat" in self._debug_env: print("Input struct size:", _pretty_size(ctypes.sizeof(c_input_type)), "for", self._composition.name) + c_input = c_input_type(*_tupleize(input_data)) return c_input, np.ctypeslib.as_array(c_input) @@ -567,7 +576,7 @@ def execute_node(self, node, inputs=None, context=None): def _bin_exec_func(self): if self.__bin_exec_func is None: self.__bin_exec_func = pnlvm.LLVMBinaryFunction.from_obj( - self._composition, tags=self.__tags, numpy_args=(0, 1, 3)) + self._composition, tags=self.__tags, numpy_args=(0, 1, 3, 4)) return self.__bin_exec_func @@ -593,7 +602,7 @@ def execute(self, inputs): self._param_struct[1], self._get_input_struct(inputs)[0], self._data_struct[1], - self._conditions[0]) + self._conditions[1]) def cuda_execute(self, inputs): # NOTE: Make sure that input struct generation is inlined. From ca3adef1c91d915b1cc4668b6449ed096fa144bf Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 31 Jul 2024 12:20:06 -0400 Subject: [PATCH 281/410] llvm/execution: Use numpy structures for frozen values Make sure frozen values are up to date when executing the controller node. Signed-off-by: Jan Vesely --- psyneulink/core/compositions/composition.py | 1 + psyneulink/core/llvm/execution.py | 30 ++++++++++++++------- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 804ed9f6f45..9da21c5d602 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -11681,6 +11681,7 @@ def _execute_controller(self, assert (execution_mode == pnlvm.ExecutionMode.LLVM or execution_mode & pnlvm.ExecutionMode._Fallback),\ f"PROGRAM ERROR: Unrecognized compiled execution_mode: '{execution_mode}'." + _comp_ex.freeze_values() _comp_ex.execute_node(self.controller, context=context) context.remove_flag(ContextFlags.PROCESSING) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index ef4b9a99766..a060255a402 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -363,7 +363,7 @@ def __init__(self, composition, execution_ids=[None], *, additional_tags=frozens self.__bin_func = None self.__bin_run_func = None self.__bin_run_multi_func = None - self.__frozen_vals = None + self.__frozen_values = None self.__tags = frozenset(additional_tags) # Scheduling conditions, only used by "execute" @@ -419,9 +419,10 @@ def _bin_func_multirun(self): def _set_bin_node(self, node): assert node in self._composition._all_nodes - wrapper = builder_context.LLVMBuilderContext.get_current().get_node_assembly(self._composition, node) - self.__bin_func = pnlvm.LLVMBinaryFunction.from_obj( - wrapper, tags=self.__tags.union({"node_assembly"}), numpy_args=(0, 1, 4)) + node_assembly = builder_context.LLVMBuilderContext.get_current().get_node_assembly(self._composition, node) + self.__bin_func = pnlvm.LLVMBinaryFunction.from_obj(node_assembly, + tags=self.__tags.union({"node_assembly"}), + numpy_args=(0, 1, 3, 4)) @property def _conditions(self): @@ -492,7 +493,7 @@ def extract_node_struct(self, node, struct): return self._extract_node_struct(node, struct) def extract_frozen_node_output(self, node): - return self.extract_node_struct(node, self.__frozen_vals) + return self.extract_node_struct(node, self.__frozen_values[0]) def extract_node_output(self, node): return self.extract_node_struct(node, self._data_struct[0]) @@ -531,7 +532,10 @@ def _get_input_struct(self, inputs): return c_input, np.ctypeslib.as_array(c_input) def freeze_values(self): - self.__frozen_vals = copy.deepcopy(self._data_struct[0]) + np_copy = self._data_struct[1].copy() + ct_copy = np_copy.ctypes.data_as(type(ctypes.pointer(self._data_struct[0]))).contents + + self.__frozen_values = (ct_copy, np_copy) def execute_node(self, node, inputs=None, context=None): # We need to reconstruct the input dictionary here if it was not provided. @@ -555,14 +559,20 @@ def execute_node(self, node, inputs=None, context=None): assert inputs is not None or node is not self._composition.input_CIM - # Freeze output values if this is the first time we need them - if node is not self._composition.input_CIM and self.__frozen_vals is None: - self.freeze_values() + # Nodes other than input_CIM/parameter_CIM take inputs from projections + # and need frozen values available + if node is not self._composition.input_CIM and node is not self._composition.parameter_CIM: + assert self.__frozen_values is not None + data_in = self.__frozen_values[1] + else: + # The ndarray argument check doesn't allow None for null so just provide + # the same structure as outputs. + data_in = self._data_struct[1] self._bin_func(self._state_struct[1], self._param_struct[1], inputs, - self.__frozen_vals, + data_in, self._data_struct[1]) if "comp_node_debug" in self._debug_env: From f8671e1ef9d527f27577402730296197d1fd9c13 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 31 Jul 2024 12:52:17 -0400 Subject: [PATCH 282/410] tests/TestModelBasedOptimizationControlMechanisms: Update OCM controller tests Enable compiled testing in test_ocm_default_function. Use assert_array_equal instead of operator ==. Parametrize on search_space inputs directly instead of using string names. Signed-off-by: Jan Vesely --- tests/composition/test_control.py | 32 ++++++++++++++----------------- 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index d390a7274f9..04a512b15ee 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -2711,7 +2711,8 @@ def test_modulation_of_random_state(self, comp_mode, num_generators): @pytest.mark.composition @pytest.mark.control class TestModelBasedOptimizationControlMechanisms_Execution: - def test_ocm_default_function(self): + @pytest.mark.parametrize("mode, ocm_mode", pytest.helpers.get_comp_and_ocm_execution_modes()) + def test_ocm_default_function(self, ocm_mode, mode): a = pnl.ProcessingMechanism() comp = pnl.Composition( controller_mode=pnl.BEFORE, @@ -2729,26 +2730,19 @@ def test_ocm_default_function(self): ), ) ) + comp.controller.comp_execution_mode = ocm_mode + assert type(comp.controller.function) == pnl.GridSearch - assert comp.run([1]) == [10] + + res = comp.run([1], execution_mode=mode) + np.testing.assert_array_equal(res, [[10]]) @pytest.mark.parametrize("nested", [True, False]) - @pytest.mark.parametrize("format", ["list", "tuple", "SampleIterator", "SampleIteratorArray", "SampleSpec", "ndArray"]) + @pytest.mark.parametrize("search_space", + [[1, 10], (1, 10), SampleIterator((1, 10)), SampleIterator([1, 10]), SampleSpec(1, 10, 9), np.array((1, 10))], + ids=["list", "tuple", "SampleIterator", "SampleIteratorArray", "SampleSpec", "ndArray"]) @pytest.mark.parametrize("mode, ocm_mode", pytest.helpers.get_comp_and_ocm_execution_modes()) - def test_ocm_searchspace_format_equivalence(self, format, nested, mode, ocm_mode): - - if format == "list": - search_space = [1, 10] - elif format == "tuple": - search_space = (1, 10) - elif format == "SampleIterator": - search_space = SampleIterator((1, 10)) - elif format == "SampleIteratorArray": - search_space = SampleIterator([1, 10]) - elif format == "SampleSpec": - search_space = SampleSpec(1, 10, 9) - elif format == "ndArray": - search_space = np.array((1, 10)) + def test_ocm_searchspace_format_equivalence(self, search_space, nested, mode, ocm_mode): if nested: search_space = [search_space] @@ -2772,7 +2766,9 @@ def test_ocm_searchspace_format_equivalence(self, format, nested, mode, ocm_mode comp.controller.comp_execution_mode = ocm_mode assert type(comp.controller.function) == pnl.GridSearch - assert comp.run([1], execution_mode=mode) == [[10]] + + res = comp.run([1], execution_mode=mode) + np.testing.assert_array_equal(res, [[10]]) def test_evc(self): # Mechanisms From 5add6e9e8dd40716df53db0d56ba3477b5e9ebb2 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 31 Jul 2024 20:20:19 -0400 Subject: [PATCH 283/410] llvm/execution: Use numpy structures to extract data Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 33 +++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index a060255a402..82d06f906b9 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -469,8 +469,8 @@ def _data_struct(self): def _data_struct(self, data_struct): self._data = data_struct - def _extract_node_struct(self, node, data): - # context structure consists of a list of node contexts, + def _extract_node_struct_from_ctype(self, node, data): + # state structure consists of a list of node states, # followed by a list of projection contexts; get the first one # parameter structure consists of a list of node parameters, # followed by a list of projection parameters; get the first one @@ -486,23 +486,40 @@ def _extract_node_struct(self, node, data): return _convert_ctype_to_python(res_struct) + def _extract_node_struct_from_numpy(self, node, data): + # state structure consists of a list of node states, + # followed by a list of projection contexts; get the first one + # parameter structure consists of a list of node parameters, + # followed by a list of projection parameters; get the first one + # output structure consists of a list of node outputs, + # followed by a list of nested data structures; get the first one + all_nodes = data[data.dtype.names[0]] + + # Get the index into the array of all nodes + index = self._composition._get_node_index(node) + node_struct = all_nodes[all_nodes.dtype.names[index]] + + # Return copies of the extracted functions to avoid corrupting the + # returned results in next execution + return node_struct.copy().tolist() if node_struct.shape == () else node_struct.copy() + def extract_node_struct(self, node, struct): if len(self._execution_contexts) > 1: - return [self._extract_node_struct(node, struct[i]) for i, _ in enumerate(self._execution_contexts)] + return [self._extract_node_struct_from_ctype(node, struct[0][i]) for i, _ in enumerate(self._execution_contexts)] else: - return self._extract_node_struct(node, struct) + return self._extract_node_struct_from_numpy(node, struct[1]) def extract_frozen_node_output(self, node): - return self.extract_node_struct(node, self.__frozen_values[0]) + return self.extract_node_struct(node, self.__frozen_values) def extract_node_output(self, node): - return self.extract_node_struct(node, self._data_struct[0]) + return self.extract_node_struct(node, self._data_struct) def extract_node_state(self, node): - return self.extract_node_struct(node, self._state_struct[0]) + return self.extract_node_struct(node, self._state_struct) def extract_node_params(self, node): - return self.extract_node_struct(node, self._param_struct[0]) + return self.extract_node_struct(node, self._param_struct) def insert_node_output(self, node, data): my_field_name = self._data_struct[0]._fields_[0][0] From 6cd1b8590d279e7d8f51daa1247c2600346c701c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 31 Jul 2024 20:29:18 -0400 Subject: [PATCH 284/410] llvm/execution: Use numpy structure to insert output data Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 82d06f906b9..64a279bb3a2 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -522,11 +522,14 @@ def extract_node_params(self, node): return self.extract_node_struct(node, self._param_struct) def insert_node_output(self, node, data): - my_field_name = self._data_struct[0]._fields_[0][0] - my_res_struct = getattr(self._data_struct[0], my_field_name) + # output structure consists of a list of node outputs, + # followed by a list of nested data structures; get the first one + all_nodes = self._data_struct[1][self._data_struct[1].dtype.names[0]] + + # Get the index into the array of all nodes index = self._composition._get_node_index(node) - node_field_name = my_res_struct._fields_[index][0] - setattr(my_res_struct, node_field_name, _tupleize(data)) + value = all_nodes[all_nodes.dtype.names[index]] + np.copyto(value, np.asarray(data, dtype=value.dtype)) def _get_input_struct(self, inputs): # Either node or composition execute. From 1ee8c62d022fdaa561e015d3ac0fc4ca871ca0b1 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 31 Jul 2024 20:33:08 -0400 Subject: [PATCH 285/410] llvm/execution: Do not store ctype structure of frozen values Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 64a279bb3a2..ce4f1a398f6 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -553,9 +553,8 @@ def _get_input_struct(self, inputs): def freeze_values(self): np_copy = self._data_struct[1].copy() - ct_copy = np_copy.ctypes.data_as(type(ctypes.pointer(self._data_struct[0]))).contents - self.__frozen_values = (ct_copy, np_copy) + self.__frozen_values = (None, np_copy) def execute_node(self, node, inputs=None, context=None): # We need to reconstruct the input dictionary here if it was not provided. From fba041f32fb90f67b6aacfd9afda3fc8ca4e7054 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 31 Jul 2024 21:59:59 -0400 Subject: [PATCH 286/410] llvm/execution: Drop 'context' argument The execution context has to be the same as the one in _execution_contexts[0]. Fixes: 2330f4dca1deedd2a3b0f431265ec396f6def585 ("Composition: set most_recent_context after compiled execution") Signed-off-by: Jan Vesely --- psyneulink/core/compositions/composition.py | 8 ++++---- psyneulink/core/llvm/execution.py | 7 ++++--- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 9da21c5d602..79177e1c3b2 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -11682,7 +11682,7 @@ def _execute_controller(self, or execution_mode & pnlvm.ExecutionMode._Fallback),\ f"PROGRAM ERROR: Unrecognized compiled execution_mode: '{execution_mode}'." _comp_ex.freeze_values() - _comp_ex.execute_node(self.controller, context=context) + _comp_ex.execute_node(self.controller) context.remove_flag(ContextFlags.PROCESSING) @@ -12011,7 +12011,7 @@ def execute( build_CIM_input = self._build_variable_for_input_CIM(inputs) if execution_mode & pnlvm.ExecutionMode.COMPILED: - _comp_ex.execute_node(self.input_CIM, inputs, context) + _comp_ex.execute_node(self.input_CIM, inputs) # FIXME: parameter_CIM should be executed here as well, # but node execution of nested compositions with # outside control is not supported yet. @@ -12296,7 +12296,7 @@ def execute( # Execute Mechanism if execution_mode & pnlvm.ExecutionMode.COMPILED: - _comp_ex.execute_node(node, context=context) + _comp_ex.execute_node(node) else: if node is not self.controller: mech_context = copy(context) @@ -12508,7 +12508,7 @@ def execute( # Extract result here if execution_mode & pnlvm.ExecutionMode.COMPILED: _comp_ex.freeze_values() - _comp_ex.execute_node(self.output_CIM, context=context) + _comp_ex.execute_node(self.output_CIM) report(self, PROGRESS_REPORT, report_num=report_num, diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index ce4f1a398f6..1b1640c5539 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -556,13 +556,14 @@ def freeze_values(self): self.__frozen_values = (None, np_copy) - def execute_node(self, node, inputs=None, context=None): + def execute_node(self, node, inputs=None): # We need to reconstruct the input dictionary here if it was not provided. # This happens during node execution of nested compositions. assert len(self._execution_contexts) == 1 + context = self._execution_contexts[0] + if inputs is None and node is self._composition.input_CIM: - if context is None: - context = self._execution_contexts[0] + port_inputs = {origin_port:[proj.parameters.value._get(context) for proj in p[0].path_afferents] for (origin_port, p) in self._composition.input_CIM_ports.items()} inputs = {} for p, v in port_inputs.items(): From 6189a5d757e8235cd046f5320e8795fb8a97b6d3 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 1 Aug 2024 00:43:01 -0400 Subject: [PATCH 287/410] llvm/execution: Use numpy structure for execution input Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 38 +++++++++++++++++++------------ 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 1b1640c5539..2d1e1effebc 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -422,7 +422,7 @@ def _set_bin_node(self, node): node_assembly = builder_context.LLVMBuilderContext.get_current().get_node_assembly(self._composition, node) self.__bin_func = pnlvm.LLVMBinaryFunction.from_obj(node_assembly, tags=self.__tags.union({"node_assembly"}), - numpy_args=(0, 1, 3, 4)) + numpy_args=(0, 1, 2, 3, 4)) @property def _conditions(self): @@ -533,23 +533,29 @@ def insert_node_output(self, node, data): def _get_input_struct(self, inputs): # Either node or composition execute. - # All execute functions expect inputs to be 3rd param. - c_input_type = self._bin_func.byref_arg_types[2] # Read provided input data and parse into an array (generator) if len(self._execution_contexts) > 1: assert len(self._execution_contexts) == len(inputs) - c_input_type = c_input_type * len(self._execution_contexts) + + # All execute functions expect inputs to be 3rd param. + ct_input_type = self._bin_func.byref_arg_types[2] * len(self._execution_contexts) + input_data = (([x] for x in self._composition._build_variable_for_input_CIM(inp)) for inp in inputs) + + ct_input = ct_input_type(*_tupleize(input_data)) + np_input = np.ctypeslib.as_array(ct_input) else: - input_data = ([x] for x in self._composition._build_variable_for_input_CIM(inputs)) + ct_input = None + data = self._composition._build_variable_for_input_CIM(inputs) + + np_input = np.asarray(_tupleize(data), dtype=self._bin_func.np_params[2].base) + np_input = np_input.reshape(self._bin_func.np_params[2].shape) if "stat" in self._debug_env: - print("Input struct size:", _pretty_size(ctypes.sizeof(c_input_type)), - "for", self._composition.name) + print("Input struct size:", _pretty_size(np_input.nbytes), "for", self._composition.name) - c_input = c_input_type(*_tupleize(input_data)) - return c_input, np.ctypeslib.as_array(c_input) + return ct_input, np_input def freeze_values(self): np_copy = self._data_struct[1].copy() @@ -571,13 +577,17 @@ def execute_node(self, node, inputs=None): index = p.owner.input_ports.index(p) data[index] = v[0] + assert inputs is not None or node is not self._composition.input_CIM # Set bin node to make sure self._*struct works as expected self._set_bin_node(node) - if inputs is not None: - inputs = self._get_input_struct(inputs)[0] - assert inputs is not None or node is not self._composition.input_CIM + # Numpy doesn't allow to pass NULL to the called function. + # Create and pass a dummy buffer filled with NaN instead. + if inputs is not None: + inputs = self._get_input_struct(inputs)[1] + else: + inputs = self._get_empty_for_arg(2) # Nodes other than input_CIM/parameter_CIM take inputs from projections # and need frozen values available @@ -606,7 +616,7 @@ def execute_node(self, node, inputs=None): def _bin_exec_func(self): if self.__bin_exec_func is None: self.__bin_exec_func = pnlvm.LLVMBinaryFunction.from_obj( - self._composition, tags=self.__tags, numpy_args=(0, 1, 3, 4)) + self._composition, tags=self.__tags, numpy_args=(0, 1, 2, 3, 4)) return self.__bin_exec_func @@ -630,7 +640,7 @@ def execute(self, inputs): else: self._bin_exec_func(self._state_struct[1], self._param_struct[1], - self._get_input_struct(inputs)[0], + self._get_input_struct(inputs)[1], self._data_struct[1], self._conditions[1]) From 50dc619498591dcec243ab6fcab1a97bec23415a Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 3 Aug 2024 21:15:13 -0400 Subject: [PATCH 288/410] llvm: Move numpy buffer allocation to LLVMBinaryFunction Signed-off-by: Jan Vesely --- psyneulink/core/llvm/__init__.py | 8 ++++++++ psyneulink/core/llvm/execution.py | 18 ++++-------------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 4f663d7bef7..4b20e49aa97 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -226,6 +226,14 @@ def cuda_wrap_call(self, *args, **kwargs): wrap_args = (jit_engine.pycuda.driver.InOut(a) if isinstance(a, np.ndarray) else a for a in args) self.cuda_call(*wrap_args, **kwargs) + def np_buffer_for_arg(self, arg_num, *, extra_dimensions=()): + + out_base = self.np_params[arg_num].base + out_shape = extra_dimensions + self.np_params[arg_num].shape + + # fill the buffer with NaN poison + return np.full(out_shape, np.nan, dtype=out_base) + @staticmethod @functools.lru_cache(maxsize=32) def from_obj(obj, *, tags:frozenset=frozenset(), numpy_args:tuple=()): diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 2d1e1effebc..f90919b97bc 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -217,17 +217,6 @@ def _enumerate_recurse(elements): pnl_param.set(value, context=context, override=True, compilation_sync=True) - def _get_empty_for_arg(self, arg_num): - - out_base = self._bin_func.np_params[arg_num].base - out_shape = self._bin_func.np_params[arg_num].shape - - if len(self._execution_contexts) > 1: - out_shape = (len(self._execution_contexts),) + out_shape - - # "empty" is in fact filled with NaN poison - return np.full(out_shape, np.nan, dtype=out_base) - def _get_indexable(self, np_array): # outputs in recarrays need to be converted to list/tuple to be indexable return np_array.tolist() if np_array.dtype.base.shape == () else np_array @@ -280,7 +269,8 @@ def cuda_execute(self, variable): new_var = np.asfarray(variable, dtype=self._bin_func.np_params[2].base) data_in = jit_engine.pycuda.driver.In(new_var) - data_out = self._get_empty_for_arg(3) + extra_dims = (len(self._execution_contexts),) if len(self._execution_contexts) > 1 else () + data_out = self._bin_func.np_buffer_for_arg(3, extra_dimensions=extra_dims) self._bin_func.cuda_call(self._cuda_param_struct, self._cuda_state_struct, @@ -336,7 +326,7 @@ def execute(self, variable): self._ct_len) return _convert_ctype_to_python(self._ct_vo) else: - data_out = self._get_empty_for_arg(3) + data_out = self._bin_func.np_buffer_for_arg(3) data_in = new_variable.reshape(self._bin_func.np_params[2].shape) self._bin_func(self._param_struct[1], self._state_struct[1], data_in, data_out) @@ -587,7 +577,7 @@ def execute_node(self, node, inputs=None): if inputs is not None: inputs = self._get_input_struct(inputs)[1] else: - inputs = self._get_empty_for_arg(2) + inputs = self._bin_func.np_buffer_for_arg(2) # Nodes other than input_CIM/parameter_CIM take inputs from projections # and need frozen values available From abb3690c17169bede284fad02abf306b0ac2a099 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 4 Aug 2024 14:44:55 -0400 Subject: [PATCH 289/410] tests/llvm/builtins: Use numpy arrays for fixed size arguments Signed-off-by: Jan Vesely --- tests/llvm/test_builtins_intrinsics.py | 3 +- tests/llvm/test_builtins_matrix.py | 15 +++------- tests/llvm/test_builtins_vector.py | 40 +++++++++++--------------- 3 files changed, 21 insertions(+), 37 deletions(-) diff --git a/tests/llvm/test_builtins_intrinsics.py b/tests/llvm/test_builtins_intrinsics.py index a6416b205c9..22cc3d2df8d 100644 --- a/tests/llvm/test_builtins_intrinsics.py +++ b/tests/llvm/test_builtins_intrinsics.py @@ -52,8 +52,7 @@ def test_builtin_op(benchmark, op, args, builtin, result, func_mode): # The result argument is a pointer, use it to derive # the right argument type - assert bin_f.byref_arg_types[-1] is not None - dty = np.dtype(bin_f.byref_arg_types[-1]) + dty = bin_f.np_params[1].base ptx_res = np.empty_like(result, dtype=dty) ptx_res_arg = pnlvm.jit_engine.pycuda.driver.Out(ptx_res) diff --git a/tests/llvm/test_builtins_matrix.py b/tests/llvm/test_builtins_matrix.py index 1f1ca2f57a1..1cad00e1565 100644 --- a/tests/llvm/test_builtins_matrix.py +++ b/tests/llvm/test_builtins_matrix.py @@ -64,18 +64,11 @@ def _get_const_dim_func(builtin, *dims): def test_matrix_op(benchmark, op, x, y, builtin, result, func_mode, dims): def _numpy_args(bin_f): - dty = np.dtype(bin_f.byref_arg_types[0]) + np_x = x.astype(bin_f.np_params[0]) + np_y = bin_f.np_params[1].type(y) if np.isscalar(y) else y.astype(bin_f.np_params[1]) + np_res = np.empty_like(result, dtype=bin_f.np_params[-1]) - # non-pointer arguments have None is the respective byref_arg_types position - if bin_f.byref_arg_types[1] is not None: - assert dty == np.dtype(bin_f.byref_arg_types[1]) - assert dty == np.dtype(bin_f.byref_arg_types[4]) - - lx = x.astype(dty) - ly = dty.type(y) if np.isscalar(y) else y.astype(dty) - lres = np.empty_like(result, dtype=dty) - - return lx, ly, lres + return np_x, np_y, np_res if func_mode == 'Python': def ex(): diff --git a/tests/llvm/test_builtins_vector.py b/tests/llvm/test_builtins_vector.py index 9e4d3e83572..999a7e42696 100644 --- a/tests/llvm/test_builtins_vector.py +++ b/tests/llvm/test_builtins_vector.py @@ -29,18 +29,11 @@ def test_vector_op(benchmark, op, v, builtin, result, func_mode): def _numpy_args(bin_f): - dty = np.dtype(bin_f.byref_arg_types[0]) + np_u = u.astype(bin_f.np_params[0]) + np_v = bin_f.np_params[1].type(v) if np.isscalar(v) else v.astype(bin_f.np_params[1]) + np_res = np.empty_like(np_u) - # non-pointer arguments have None is the respective byref_arg_types position - if bin_f.byref_arg_types[1] is not None: - assert dty == np.dtype(bin_f.byref_arg_types[1]) - assert dty == np.dtype(bin_f.byref_arg_types[3]) - - lu = u.astype(dty) - lv = dty.type(v) if np.isscalar(v) else v.astype(dty) - lres = np.empty_like(lu) - - return lu, lv, lres + return np_u, np_v, np_res if func_mode == 'Python': def ex(): @@ -82,30 +75,29 @@ def ex(): return np.sum(u) elif func_mode == 'LLVM': - bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vec_sum") + bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vec_sum", numpy_args=(2,)) - lu = u.astype(np.dtype(bin_f.byref_arg_types[0])) - lres = np.empty(1, dtype=lu.dtype) + np_u = u.astype(bin_f.np_params[0]) + np_res = bin_f.np_buffer_for_arg(2) - ct_u = lu.ctypes.data_as(bin_f.c_func.argtypes[0]) - ct_res = lres.ctypes.data_as(bin_f.c_func.argtypes[2]) + ct_u = np_u.ctypes.data_as(bin_f.c_func.argtypes[0]) def ex(): - bin_f(ct_u, DIM_X, ct_res) - return lres[0] + bin_f(ct_u, DIM_X, np_res) + return np_res elif func_mode == 'PTX': - bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vec_sum") + bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vec_sum", numpy_args=(2,)) - lu = u.astype(np.dtype(bin_f.byref_arg_types[0])) - res = np.empty(1, dtype=lu.dtype) + np_u = u.astype(bin_f.np_params[0]) + np_res = bin_f.np_buffer_for_arg(2) - cuda_u = pnlvm.jit_engine.pycuda.driver.In(lu) - cuda_res = pnlvm.jit_engine.pycuda.driver.Out(res) + cuda_u = pnlvm.jit_engine.pycuda.driver.In(np_u) + cuda_res = pnlvm.jit_engine.pycuda.driver.Out(np_res) def ex(): bin_f.cuda_call(cuda_u, np.int32(DIM_X), cuda_res) - return res[0] + return np_res res = benchmark(ex) np.testing.assert_allclose(res, np.sum(u)) From 9e9d3fe3e68a11d7c38ecf7cdf22e487d81ea6ad Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 4 Aug 2024 14:45:44 -0400 Subject: [PATCH 290/410] tests/llvm/{compile,helpers}: Use numpy arrays for fixed size arguments Signed-off-by: Jan Vesely --- tests/llvm/test_compile.py | 34 ++++----- tests/llvm/test_helpers.py | 140 +++++++++++++++---------------------- 2 files changed, 70 insertions(+), 104 deletions(-) diff --git a/tests/llvm/test_compile.py b/tests/llvm/test_compile.py index 406fc1e2430..c396cba594f 100644 --- a/tests/llvm/test_compile.py +++ b/tests/llvm/test_compile.py @@ -4,52 +4,48 @@ from psyneulink.core import llvm as pnlvm -ITERATIONS=100 DIM_X=1000 DIM_Y=2000 @pytest.mark.llvm def test_recompile(): # The original builtin mxv function - binf = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_vxm') - dty = np.dtype(binf.byref_arg_types[0]) - assert dty == np.dtype(binf.byref_arg_types[1]) - assert dty == np.dtype(binf.byref_arg_types[4]) + bin_f = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_vxm') - matrix = np.random.rand(DIM_X, DIM_Y).astype(dty) - vector = np.random.rand(DIM_X).astype(dty) - llvm_res = np.empty(DIM_Y, dtype=dty) + vector = np.random.rand(DIM_X).astype(bin_f.np_params[0].base) + matrix = np.random.rand(DIM_X, DIM_Y).astype(bin_f.np_params[1].base) + llvm_res = np.empty(DIM_Y, dtype=bin_f.np_params[4].base) x, y = matrix.shape - ct_vec = vector.ctypes.data_as(binf.c_func.argtypes[0]) - ct_mat = matrix.ctypes.data_as(binf.c_func.argtypes[1]) + ct_vec = vector.ctypes.data_as(bin_f.c_func.argtypes[0]) + ct_mat = matrix.ctypes.data_as(bin_f.c_func.argtypes[1]) orig_res = np.empty_like(llvm_res) - ct_res = orig_res.ctypes.data_as(binf.c_func.argtypes[4]) + ct_res = orig_res.ctypes.data_as(bin_f.c_func.argtypes[4]) - binf.c_func(ct_vec, ct_mat, x, y, ct_res) + bin_f.c_func(ct_vec, ct_mat, x, y, ct_res) # Rebuild and try again # This is not a public API pnlvm._llvm_build() rebuild_res = np.empty_like(llvm_res) - ct_res = rebuild_res.ctypes.data_as(binf.c_func.argtypes[4]) + ct_res = rebuild_res.ctypes.data_as(bin_f.c_func.argtypes[4]) - binf.c_func(ct_vec, ct_mat, x, y, ct_res) + bin_f.c_func(ct_vec, ct_mat, x, y, ct_res) assert np.array_equal(orig_res, rebuild_res) # Get a new pointer - binf2 = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_vxm') + bin_f2 = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_vxm') new_res = np.empty_like(llvm_res) - ct_res = new_res.ctypes.data_as(binf2.c_func.argtypes[4]) + ct_res = new_res.ctypes.data_as(bin_f2.c_func.argtypes[4]) - binf2.c_func(ct_vec, ct_mat, x, y, ct_res) + bin_f2.c_func(ct_vec, ct_mat, x, y, ct_res) assert np.array_equal(rebuild_res, new_res) callable_res = np.empty_like(llvm_res) - ct_res = callable_res.ctypes.data_as(binf.c_func.argtypes[4]) + ct_res = callable_res.ctypes.data_as(bin_f.c_func.argtypes[4]) - binf2(ct_vec, ct_mat, x, y, ct_res) + bin_f2(ct_vec, ct_mat, x, y, ct_res) assert np.array_equal(new_res, callable_res) diff --git a/tests/llvm/test_helpers.py b/tests/llvm/test_helpers.py index f2e2cb141e6..e692bd62f37 100644 --- a/tests/llvm/test_helpers.py +++ b/tests/llvm/test_helpers.py @@ -1,6 +1,5 @@ import ctypes import ctypes.util -import copy import numpy as np import pytest import sys @@ -16,8 +15,7 @@ VECTOR = np.random.rand(DIM_X) @pytest.mark.llvm -@pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) +@pytest.mark.parametrize('mode', ['CPU', pytest.helpers.cuda_param('PTX')]) def test_helper_fclamp(mode): with pnlvm.LLVMBuilderContext.get_current() as ctx: @@ -46,12 +44,13 @@ def test_helper_fclamp(mode): ref = np.clip(VECTOR, TST_MIN, TST_MAX) bounds = np.asfarray([TST_MIN, TST_MAX]) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) - local_vec = copy.deepcopy(VECTOR) + local_vec = VECTOR.copy() + if mode == 'CPU': - ct_ty = ctypes.POINTER(bin_f.byref_arg_types[0]) - ct_vec = local_vec.ctypes.data_as(ct_ty) - ct_bounds = bounds.ctypes.data_as(ct_ty) + ct_vec = local_vec.ctypes.data_as(bin_f.c_func.argtypes[0]) + ct_bounds = bounds.ctypes.data_as(bin_f.c_func.argtypes[2]) bin_f(ct_vec, DIM_X, ct_bounds) else: @@ -61,8 +60,7 @@ def test_helper_fclamp(mode): @pytest.mark.llvm -@pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) +@pytest.mark.parametrize('mode', ['CPU', pytest.helpers.cuda_param('PTX')]) def test_helper_fclamp_const(mode): with pnlvm.LLVMBuilderContext.get_current() as ctx: @@ -85,12 +83,12 @@ def test_helper_fclamp_const(mode): builder.ret_void() - local_vec = copy.deepcopy(VECTOR) + local_vec = VECTOR.copy() ref = np.clip(VECTOR, TST_MIN, TST_MAX) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) if mode == 'CPU': - ct_ty = ctypes.POINTER(bin_f.byref_arg_types[0]) - ct_vec = local_vec.ctypes.data_as(ct_ty) + ct_vec = local_vec.ctypes.data_as(bin_f.c_func.argtypes[0]) bin_f(ct_vec, DIM_X) else: @@ -100,10 +98,8 @@ def test_helper_fclamp_const(mode): @pytest.mark.llvm -@pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) -@pytest.mark.parametrize('rtol,atol', - [[0, 0], [None, None], [None, 100], [2, None]]) +@pytest.mark.parametrize('mode', ['CPU', pytest.helpers.cuda_param('PTX')]) +@pytest.mark.parametrize('rtol,atol', [[0, 0], [None, None], [None, 100], [2, None]]) @pytest.mark.parametrize('var1,var2', [[1, 1], [1, 100], [1,2], [-4,5], [0, -100], [-1,-2], [[1,1,1,-4,0,-1], [1,100,2,5,-100,-2]] @@ -148,18 +144,16 @@ def test_helper_is_close(mode, var1, var2, rtol, atol, fp_type): bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) - dty = np.dtype(bin_f.byref_arg_types[0]) - vec1 = np.atleast_1d(np.asfarray(var1, dtype=dty)) - vec2 = np.atleast_1d(np.asfarray(var2, dtype=dty)) + vec1 = np.atleast_1d(np.asfarray(var1, dtype=bin_f.np_params[0].base)) + vec2 = np.atleast_1d(np.asfarray(var2, dtype=bin_f.np_params[1].base)) assert len(vec1) == len(vec2) res = np.empty_like(vec2) ref = np.isclose(vec1, vec2, **tolerance) if mode == 'CPU': - ct_ty = ctypes.POINTER(bin_f.byref_arg_types[0]) - ct_vec1 = vec1.ctypes.data_as(ct_ty) - ct_vec2 = vec2.ctypes.data_as(ct_ty) - ct_res = res.ctypes.data_as(ct_ty) + ct_vec1 = vec1.ctypes.data_as(bin_f.c_func.argtypes[0]) + ct_vec2 = vec2.ctypes.data_as(bin_f.c_func.argtypes[1]) + ct_res = res.ctypes.data_as(bin_f.c_func.argtypes[2]) bin_f(ct_vec1, ct_vec2, ct_res, len(res)) else: @@ -169,10 +163,8 @@ def test_helper_is_close(mode, var1, var2, rtol, atol, fp_type): @pytest.mark.llvm -@pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) -@pytest.mark.parametrize('rtol,atol', - [[0, 0], [None, None], [None, 100], [2, None]]) +@pytest.mark.parametrize('mode', ['CPU', pytest.helpers.cuda_param('PTX')]) +@pytest.mark.parametrize('rtol,atol', [[0, 0], [None, None], [None, 100], [2, None]]) @pytest.mark.parametrize('var1,var2', [[1, 1], [1, 100], [1,2], [-4,5], [0, -100], [-1,-2], [[1,1,1,-4,0,-1], [1,100,2,5,-100,-2]] @@ -191,8 +183,7 @@ def test_helper_all_close(mode, var1, var2, atol, rtol): with pnlvm.LLVMBuilderContext.get_current() as ctx: arr_ptr_ty = ir.ArrayType(ir.DoubleType(), len(vec1)).as_pointer() - func_ty = ir.FunctionType(ir.VoidType(), [arr_ptr_ty, arr_ptr_ty, - ir.IntType(32).as_pointer()]) + func_ty = ir.FunctionType(ir.VoidType(), [arr_ptr_ty, arr_ptr_ty, ir.IntType(32).as_pointer()]) custom_name = ctx.get_unique_name("all_close") function = ir.Function(ctx.module, func_ty, name=custom_name) @@ -207,18 +198,14 @@ def test_helper_all_close(mode, var1, var2, atol, rtol): ref = np.allclose(vec1, vec2, **tolerance) - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) - if mode == 'CPU': - ct_ty = ctypes.POINTER(bin_f.byref_arg_types[0]) - ct_vec1 = vec1.ctypes.data_as(ct_ty) - ct_vec2 = vec2.ctypes.data_as(ct_ty) - res = ctypes.c_uint32() + res = np.array(5, dtype=np.uint32) + + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, numpy_args=(0, 1, 2)) - bin_f(ct_vec1, ct_vec2, ctypes.byref(res)) + if mode == 'CPU': + bin_f(vec1, vec2, res) else: - res = np.array([5], dtype=np.uint32) bin_f.cuda_wrap_call(vec1, vec2, res) - res = res[0] assert np.array_equal(res, ref) @@ -425,9 +412,9 @@ def test_helper_get_array_shape(self, ir_type, expected): def test_helper_array_from_shape(self, ir_type, shape): assert ir_type == pnlvm.helpers.array_from_shape(shape, self.DOUBLE_TYPE) + @pytest.mark.llvm -@pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) +@pytest.mark.parametrize('mode', ['CPU', pytest.helpers.cuda_param('PTX')]) @pytest.mark.parametrize('op,var,expected', [ (pnlvm.helpers.tanh, 1.0, 0.7615941559557649), (pnlvm.helpers.exp, 1.0, 2.718281828459045), @@ -436,8 +423,7 @@ def test_helper_array_from_shape(self, ir_type, shape): (pnlvm.helpers.log, 1.0, 0.0), (pnlvm.helpers.log1p, 1.0, 0.6931471805599453), ]) -@pytest.mark.parametrize('fp_type', [pnlvm.ir.DoubleType(), pnlvm.ir.FloatType()], - ids=lambda x: str(x)) +@pytest.mark.parametrize('fp_type', [pnlvm.ir.DoubleType(), pnlvm.ir.FloatType()], ids=str) def test_helper_numerical(mode, op, var, expected, fp_type): with pnlvm.LLVMBuilderContext(fp_type) as ctx: func_ty = ir.FunctionType(ir.VoidType(), [ctx.float_ty.as_pointer()]) @@ -454,20 +440,19 @@ def test_helper_numerical(mode, op, var, expected, fp_type): builder.ret_void() - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, numpy_args=(0,)) + + res = np.asfarray(var, dtype=bin_f.np_params[0]) + if mode == 'CPU': - res = bin_f.byref_arg_types[0](var) - bin_f(ctypes.byref(res)) - res = res.value + bin_f(res) else: - res = np.ctypeslib.as_array(bin_f.byref_arg_types[0](var)) bin_f.cuda_wrap_call(res) np.testing.assert_allclose(res, expected) @pytest.mark.llvm -@pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) +@pytest.mark.parametrize('mode', ['CPU', pytest.helpers.cuda_param('PTX')]) @pytest.mark.parametrize('var,expected', [ (np.asfarray([1,2,3]), np.asfarray([2,3,4])), (np.asfarray([[1,2],[3,4]]), np.asfarray([[2,3],[4,5]])), @@ -488,26 +473,20 @@ def test_helper_elementwise_op(mode, var, expected): lambda ctx, builder, x: builder.fadd(x.type(1.0), x), out) builder.ret_void() - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, numpy_args=(0, 1)) - # convert input to the right type - dt = np.dtype(bin_f.byref_arg_types[0]) - dt = np.empty(1, dtype=dt).flatten().dtype - var = var.astype(dt) + vec = np.asfarray(var, dtype=bin_f.np_params[0].base) + res = bin_f.np_buffer_for_arg(1) if mode == 'CPU': - ct_vec = np.ctypeslib.as_ctypes(var) - res = bin_f.byref_arg_types[1]() - bin_f(ct_vec, ctypes.byref(res)) + bin_f(vec, res) else: - res = np.empty_like(var) - bin_f.cuda_wrap_call(var, res) + bin_f.cuda_wrap_call(vec, res) assert np.array_equal(res, expected) @pytest.mark.llvm -@pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) +@pytest.mark.parametrize('mode', ['CPU', pytest.helpers.cuda_param('PTX')]) @pytest.mark.parametrize('var1,var2,expected', [ (np.array([1.,2.,3.]), np.array([1.,2.,3.]), np.array([2.,4.,6.])), (np.array([1.,2.,3.]), np.array([0.,1.,2.]), np.array([1.,3.,5.])), @@ -537,24 +516,19 @@ def test_helper_recursive_iterate_arrays(mode, var1, var2, expected): a = builder.load(a_ptr) b = builder.load(b_ptr) builder.store(builder.fadd(a,b), o_ptr) + builder.ret_void() - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, numpy_args=(0, 1, 2)) - # convert input to the right type - dt = np.dtype(bin_f.byref_arg_types[0]) - dt = np.empty(1, dtype=dt).flatten().dtype - var1 = var1.astype(dt) - var2 = var2.astype(dt) + vec1 = np.asfarray(var1, dtype=bin_f.np_params[0].base) + vec2 = np.asfarray(var2, dtype=bin_f.np_params[0].base) + res = bin_f.np_buffer_for_arg(1) if mode == 'CPU': - ct_vec = np.ctypeslib.as_ctypes(var1) - ct_vec_2 = np.ctypeslib.as_ctypes(var2) - res = bin_f.byref_arg_types[2]() - bin_f(ct_vec, ct_vec_2, ctypes.byref(res)) + bin_f(vec1, vec2, res) else: - res = np.empty_like(var1) - bin_f.cuda_wrap_call(var1, var2, res) + bin_f.cuda_wrap_call(vec1, vec2, res) assert np.array_equal(res, expected) @@ -563,8 +537,7 @@ def test_helper_recursive_iterate_arrays(mode, var1, var2, expected): @pytest.mark.llvm -@pytest.mark.parametrize('mode', ['CPU', - pytest.param('PTX', marks=pytest.mark.cuda)]) +@pytest.mark.parametrize('mode', ['CPU', pytest.helpers.cuda_param('PTX')]) @pytest.mark.parametrize('t1', _fp_types) @pytest.mark.parametrize('t2', _fp_types) @pytest.mark.parametrize('val', [1.0, '-Inf', 'Inf', 'NaN', 16777216, 16777217, -1.0]) @@ -582,21 +555,18 @@ def test_helper_convert_fp_type(t1, t2, mode, val): builder.store(conv_x, y) builder.ret_void() - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, numpy_args=(0, 1)) - # Convert type to numpy dtype - npt1, npt2 = (np.dtype(bin_f.byref_arg_types[x]) for x in (0, 1)) - npt1, npt2 = (np.float16().dtype if x == np.uint16 else x for x in (npt1, npt2)) + # Get the argument numpy dtype + np_dt1, np_dt2 = (np.dtype(bin_f.np_params[i]) for i in (0, 1)) # instantiate value, result and reference - x = np.asfarray(val, dtype=npt1) - y = np.asfarray(np.random.rand(), dtype=npt2) - ref = x.astype(npt2) + x = np.asfarray(val, dtype=np_dt1) + y = np.asfarray(0, dtype=np_dt2) + ref = x.astype(np_dt2) if mode == 'CPU': - ct_x = x.ctypes.data_as(bin_f.c_func.argtypes[0]) - ct_y = y.ctypes.data_as(bin_f.c_func.argtypes[1]) - bin_f(ct_x, ct_y) + bin_f(x, y) else: bin_f.cuda_wrap_call(x, y) From 3179443931a2335013941d5cfd53be6be457ba57 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 4 Aug 2024 14:44:20 -0400 Subject: [PATCH 291/410] tests/llvm/Mersenne-Twister: Use numpy arrays for fixed size arguments Signed-off-by: Jan Vesely --- tests/llvm/test_builtins_mt_random.py | 112 +++++++++++++++++--------- 1 file changed, 75 insertions(+), 37 deletions(-) diff --git a/tests/llvm/test_builtins_mt_random.py b/tests/llvm/test_builtins_mt_random.py index 4d25e53c8cd..2ff7cff0ea2 100644 --- a/tests/llvm/test_builtins_mt_random.py +++ b/tests/llvm/test_builtins_mt_random.py @@ -15,35 +15,46 @@ def test_random_int(benchmark, mode): res = [] if mode == 'Python': state = random.Random(SEED) + def f(): return state.randrange(0xffffffff) + elif mode == 'numpy': # Numpy promotes elements to int64 state = np.random.RandomState([SEED]) + def f(): return state.randint(0xffffffff, dtype=np.int64) + elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') - state = init_fun.byref_arg_types[0]() + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init', numpy_args=(0,)) + state = init_fun.np_buffer_for_arg(0) + init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_int32') - out = ctypes.c_ulonglong() + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_int32', numpy_args=(0, 1)) + def f(): + out = gen_fun.np_buffer_for_arg(1) gen_fun(state, out) - return out.value + return out + elif mode == 'PTX': init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') - state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + + state_size = init_fun.np_buffer_for_arg(0).nbytes gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) + init_fun.cuda_call(gpu_state, np.int32(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_int32') - out = np.asarray([0], dtype=np.uint64) + out = gen_fun.np_buffer_for_arg(1) gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + def f(): gen_fun.cuda_call(gpu_state, gpu_out) - return out[0] + return out.copy() + else: assert False, "Unknown mode: {}".format(mode) @@ -61,35 +72,45 @@ def test_random_float(benchmark, mode): if mode == 'Python': # Python treats every seed as array state = random.Random(SEED) + def f(): return state.random() + elif mode == 'numpy': # numpy promotes elements to int64 state = np.random.RandomState([SEED]) + def f(): return state.random_sample() + elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') - state = init_fun.byref_arg_types[0]() + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init', numpy_args=(0,)) + state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_double') - out = gen_fun.byref_arg_types[1]() + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_double', numpy_args=(0, 1)) + def f(): + out = gen_fun.np_buffer_for_arg(1) gen_fun(state, out) - return out.value + return out + elif mode == 'PTX': init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') - state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + + state_size = init_fun.np_buffer_for_arg(0).nbytes gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) + init_fun.cuda_call(gpu_state, np.int32(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_double') - out = np.asfarray([0.0], dtype=np.dtype(gen_fun.byref_arg_types[1])) + out = gen_fun.np_buffer_for_arg(1) gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + def f(): gen_fun.cuda_call(gpu_state, gpu_out) - return out[0] + return out.copy() + else: assert False, "Unknown mode: {}".format(mode) @@ -107,30 +128,38 @@ def test_random_normal(benchmark, mode): if mode == 'numpy': # numpy promotes elements to int64 state = np.random.RandomState([SEED]) + def f(): return state.normal() + elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') - state = init_fun.byref_arg_types[0]() + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init', numpy_args=(0,)) + state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_normal') - out = gen_fun.byref_arg_types[1]() + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_normal', numpy_args=(0, 1)) + def f(): + out = gen_fun.np_buffer_for_arg(1) gen_fun(state, out) - return out.value + return out + elif mode == 'PTX': init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') - state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + + state_size = init_fun.np_buffer_for_arg(0).nbytes gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) + init_fun.cuda_call(gpu_state, np.int32(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_normal') - out = np.asfarray([0.0], dtype=np.dtype(gen_fun.byref_arg_types[1])) + out = gen_fun.np_buffer_for_arg(1) gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + def f(): gen_fun.cuda_call(gpu_state, gpu_out) - return out[0] + return out.copy() + else: assert False, "Unknown mode: {}".format(mode) @@ -157,35 +186,44 @@ def test_random_binomial(benchmark, mode, n, p, exp): if mode == 'numpy': # numpy promotes elements to int64 state = np.random.RandomState([SEED]) + def f(): return state.binomial(n, p) + elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') - state = init_fun.byref_arg_types[0]() + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init', numpy_args=(0,)) + state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_binomial') - c_n = gen_fun.byref_arg_types[1](n) - c_p = gen_fun.byref_arg_types[2](p) - c_out = gen_fun.byref_arg_types[-1]() + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_binomial', numpy_args=(0, 1, 2, 3)) + n = np.asarray(n, dtype=gen_fun.np_params[1]) + p = np.asarray(p, dtype=gen_fun.np_params[2]) + def f(): - gen_fun(state, c_n, c_p, c_out) - return c_out.value + out = gen_fun.np_buffer_for_arg(1) + gen_fun(state, n, p, out) + return out + elif mode == 'PTX': init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') - state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + + state_size = init_fun.np_buffer_for_arg(0).nbytes gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) + init_fun.cuda_call(gpu_state, np.int32(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_binomial') - gpu_n = pnlvm.jit_engine.pycuda.driver.In(np.array([n], dtype=np.dtype(gen_fun.byref_arg_types[1]))) - gpu_p = pnlvm.jit_engine.pycuda.driver.In(np.array([p], dtype=np.dtype(gen_fun.byref_arg_types[2]))) - out = np.array([0.0], dtype=np.dtype(gen_fun.byref_arg_types[3])) + + gpu_n = pnlvm.jit_engine.pycuda.driver.In(np.asarray(n, dtype=gen_fun.np_params[1])) + gpu_p = pnlvm.jit_engine.pycuda.driver.In(np.asarray(p, dtype=gen_fun.np_params[2])) + + out = gen_fun.np_buffer_for_arg(1) gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) def f(): gen_fun.cuda_call(gpu_state, gpu_n, gpu_p, gpu_out) - return out[0] + return out.copy() + else: assert False, "Unknown mode: {}".format(mode) From 73ed42be864f21440a399172cca2061e5bdbd7d0 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 4 Aug 2024 14:43:04 -0400 Subject: [PATCH 292/410] tests/llvm/Philox: Use numpy arrays for fixed size arguments Signed-off-by: Jan Vesely --- tests/llvm/test_builtins_philox_random.py | 185 +++++++++++++--------- 1 file changed, 110 insertions(+), 75 deletions(-) diff --git a/tests/llvm/test_builtins_philox_random.py b/tests/llvm/test_builtins_philox_random.py index 40fc1abc09a..0c6e289a700 100644 --- a/tests/llvm/test_builtins_philox_random.py +++ b/tests/llvm/test_builtins_philox_random.py @@ -26,27 +26,32 @@ def f(): return prng.integers(0xffffffffffffffff, dtype=np.uint64, endpoint=True) elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') - state = init_fun.byref_arg_types[0]() + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init', numpy_args=(0,)) + state = init_fun.np_buffer_for_arg(0) init_fun(state, seed) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int64') - out = ctypes.c_ulonglong() + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int64', numpy_args=(0, 1)) + def f(): + out = gen_fun.np_buffer_for_arg(1) gen_fun(state, out) - return np.uint64(out.value) + return out + elif mode == 'PTX': init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') - state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + state_size = init_fun.np_buffer_for_arg(0).nbytes gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) + init_fun.cuda_call(gpu_state, np.int64(seed)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int64') - out = np.asarray([0], dtype=np.uint64) + out = gen_fun.np_buffer_for_arg(1) gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + def f(): gen_fun.cuda_call(gpu_state, gpu_out) - return out[0] + return out.copy() + else: assert False, "Unknown mode: {}".format(mode) @@ -64,33 +69,38 @@ def test_random_int32(benchmark, mode): res = [] if mode == 'numpy': state = np.random.Philox([SEED]) - prng = np.random.Generator(state) + prng = np.random.Generator(state)\ + def f(): # Get uint range [0, MAX] to avoid any intermediate caching of random bits return prng.integers(0xffffffff, dtype=np.uint32, endpoint=True) elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') - state = init_fun.byref_arg_types[0]() + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init', numpy_args=(0,)) + state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int32') - out = ctypes.c_uint() + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int32', numpy_args=(0, 1)) + def f(): + out = gen_fun.np_buffer_for_arg(1) gen_fun(state, out) - return out.value + return out + elif mode == 'PTX': init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') - state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + state_size = init_fun.np_buffer_for_arg(0).nbytes gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) init_fun.cuda_call(gpu_state, np.int64(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int32') - out = np.asarray([0], dtype=np.uint32) + out = gen_fun.np_buffer_for_arg(1) gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + def f(): gen_fun.cuda_call(gpu_state, gpu_out) - return out[0] + return out.copy() + else: assert False, "Unknown mode: {}".format(mode) @@ -109,30 +119,36 @@ def test_random_double(benchmark, mode): if mode == 'numpy': state = np.random.Philox([SEED]) prng = np.random.Generator(state) + def f(): return prng.random(dtype=np.float64) + elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') - state = init_fun.byref_arg_types[0]() + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init', numpy_args=(0,)) + state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_double') - out = ctypes.c_double() + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_double', numpy_args=(0, 1)) + def f(): + out = gen_fun.np_buffer_for_arg(1) gen_fun(state, out) - return out.value + return out + elif mode == 'PTX': init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') - state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + state_size = init_fun.np_buffer_for_arg(0).nbytes gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) init_fun.cuda_call(gpu_state, np.int64(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_double') - out = np.asfarray([0.0], dtype=np.float64) + out = gen_fun.np_buffer_for_arg(1) gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + def f(): gen_fun.cuda_call(gpu_state, gpu_out) - return out[0] + return out.copy() + else: assert False, "Unknown mode: {}".format(mode) @@ -150,30 +166,36 @@ def test_random_float(benchmark, mode): if mode == 'numpy': state = np.random.Philox([SEED]) prng = np.random.Generator(state) + def f(): return prng.random(dtype=np.float32) + elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') - state = init_fun.byref_arg_types[0]() + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init', numpy_args=(0,)) + state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_float') - out = ctypes.c_float() + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_float', numpy_args=(0, 1)) + def f(): + out = gen_fun.np_buffer_for_arg(1) gen_fun(state, out) - return out.value + return out + elif mode == 'PTX': init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') - state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + state_size = init_fun.np_buffer_for_arg(0).nbytes gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) init_fun.cuda_call(gpu_state, np.int64(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_float') - out = np.asfarray([0.0], dtype=np.float32) + out = gen_fun.np_buffer_for_arg(1) gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + def f(): gen_fun.cuda_call(gpu_state, gpu_out) - return out[0] + return out.copy() + else: assert False, "Unknown mode: {}".format(mode) @@ -197,30 +219,36 @@ def test_random_normal(benchmark, mode, fp_type): if mode == 'numpy': state = np.random.Philox([SEED]) prng = np.random.Generator(state) + def f(): return prng.standard_normal(dtype=dtype) + elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') - state = init_fun.byref_arg_types[0]() + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init', numpy_args=(0,)) + state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_normal') - out = gen_fun.byref_arg_types[1]() + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_normal', numpy_args=(0, 1)) + def f(): + out = gen_fun.np_buffer_for_arg(1) gen_fun(state, out) - return out.value + return out + elif mode == 'PTX': init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') - state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + state_size = init_fun.np_buffer_for_arg(0).nbytes gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) init_fun.cuda_call(gpu_state, np.int64(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_normal') - out = np.array([0.0], dtype=np.dtype(gen_fun.byref_arg_types[1])) + out = gen_fun.np_buffer_for_arg(1) gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) + def f(): gen_fun.cuda_call(gpu_state, gpu_out) - return out[0] + return out.copy() + else: assert False, "Unknown mode: {}".format(mode) @@ -228,36 +256,38 @@ def f(): if fp_type is pnlvm.ir.DoubleType(): np.testing.assert_allclose(res[0:2], [-0.2059740286292238, -0.12884495093462758]) # 208 doesn't take the fast path but wraps around the main loop - np.testing.assert_allclose(res[207:211], [-0.768690647997579, 0.4301874289485477, - -0.7803640491708955, -1.146089287628737]) + np.testing.assert_allclose(res[207:211], + [-0.768690647997579, 0.4301874289485477, -0.7803640491708955, -1.146089287628737]) # 450 doesn't take the fast path or wrap around the main loop, # but takes the special condition at the end of the loop - np.testing.assert_allclose(res[449:453], [-0.7713655663874537, -0.5638348710823825, - -0.9415838853097869, 0.6212784278881248]) + np.testing.assert_allclose(res[449:453], + [-0.7713655663874537, -0.5638348710823825, -0.9415838853097869, 0.6212784278881248]) # 2013 takes the rare secondary loop and exists in the first iteration # taking the positive value - np.testing.assert_allclose(res[2011:2015], [0.4201922976982861, 2.7021541445373916, - 3.7809967764329375, 0.19919094793393655]) + np.testing.assert_allclose(res[2011:2015], + [0.4201922976982861, 2.7021541445373916, 3.7809967764329375, 0.19919094793393655]) # 5136 takes the rare secondary loop and exists in the first iteration # taking the negative value - np.testing.assert_allclose(res[5134:5138], [0.12317411414687844, -0.17846827974421134, - -3.6579887696059714, 0.2501530374224693]) + np.testing.assert_allclose(res[5134:5138], + [0.12317411414687844, -0.17846827974421134, -3.6579887696059714, 0.2501530374224693]) # 190855 takes the rare secondary loop and needs more than one iteration - np.testing.assert_allclose(res[190853:190857], [-0.26418319904491194, 0.35889007879353746, - -3.843811523424439, -1.5256469840469997]) + np.testing.assert_allclose(res[190853:190857], + [-0.26418319904491194, 0.35889007879353746, -3.843811523424439, -1.5256469840469997]) + elif fp_type is pnlvm.ir.FloatType(): # The indices are taken from above and don't have special meaning. np.testing.assert_allclose(res[0:2], [-0.24822916090488434, -0.02676701545715332]) - np.testing.assert_allclose(res[207:211], [-0.33086925745010376, -1.024695873260498, - -0.5162619352340698, -0.15033885836601257]) - np.testing.assert_allclose(res[449:453], [-0.2223609834909439, 0.16769859194755554, - -0.7806711196899414, 0.5867824554443359]) - np.testing.assert_allclose(res[2011:2015], [0.1979091316461563, -0.23467595875263214, - 1.1458240747451782, -1.0285860300064087]) - np.testing.assert_allclose(res[5134:5138], [-1.0523858070373535, -3.007537603378296, - -0.4331461489200592, -0.8841480612754822]) - np.testing.assert_allclose(res[190853:190857], [-0.8958197236061096, 0.10532315075397491, - 2.000257730484009, -1.129721999168396]) + np.testing.assert_allclose(res[207:211], + [-0.33086925745010376, -1.024695873260498, -0.5162619352340698, -0.15033885836601257]) + np.testing.assert_allclose(res[449:453], + [-0.2223609834909439, 0.16769859194755554, -0.7806711196899414, 0.5867824554443359]) + np.testing.assert_allclose(res[2011:2015], + [0.1979091316461563, -0.23467595875263214, 1.1458240747451782, -1.0285860300064087]) + np.testing.assert_allclose(res[5134:5138], + [-1.0523858070373535, -3.007537603378296, -0.4331461489200592, -0.8841480612754822]) + np.testing.assert_allclose(res[190853:190857], + [-0.8958197236061096, 0.10532315075397491, 2.000257730484009, -1.129721999168396]) + assert not any(np.isnan(res)), list(np.isnan(res)).index(True) benchmark(f) @@ -287,35 +317,40 @@ def test_random_binomial(benchmark, mode, fp_type, n, p, exp_64, exp_32): if mode == 'numpy': state = np.random.Philox([SEED]) prng = np.random.Generator(state) + def f(): return prng.binomial(n, p) + elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') - c_state = init_fun.byref_arg_types[0]() - init_fun(c_state, SEED) + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init', numpy_args=(0,)) + state = init_fun.np_buffer_for_arg(0) + init_fun(state, SEED) + + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_binomial', numpy_args=(0, 1, 2, 3)) + n = np.asarray(n, dtype=gen_fun.np_params[1]) + p = np.asarray(p, dtype=gen_fun.np_params[2]) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_binomial') - c_n = gen_fun.byref_arg_types[1](n) - c_p = gen_fun.byref_arg_types[2](p) - c_out = gen_fun.byref_arg_types[-1]() def f(): - gen_fun(c_state, c_n, c_p, c_out) - return c_out.value + out = gen_fun.np_buffer_for_arg(1) + gen_fun(state, n, p, out) + return out + elif mode == 'PTX': init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') - state_size = ctypes.sizeof(init_fun.byref_arg_types[0]) + state_size = init_fun.np_buffer_for_arg(0).nbytes gpu_state = pnlvm.jit_engine.pycuda.driver.mem_alloc(state_size) init_fun.cuda_call(gpu_state, np.int64(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_binomial') - gpu_n = pnlvm.jit_engine.pycuda.driver.In(np.array([n], dtype=np.dtype(gen_fun.byref_arg_types[1]))) - gpu_p = pnlvm.jit_engine.pycuda.driver.In(np.array([p], dtype=np.dtype(gen_fun.byref_arg_types[2]))) - out = np.array([0.0], dtype=np.dtype(gen_fun.byref_arg_types[3])) + gpu_n = pnlvm.jit_engine.pycuda.driver.In(np.asarray(n, dtype=gen_fun.np_params[1])) + gpu_p = pnlvm.jit_engine.pycuda.driver.In(np.asarray(p, dtype=gen_fun.np_params[2])) + out = gen_fun.np_buffer_for_arg(1) gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) def f(): gen_fun.cuda_call(gpu_state, gpu_n, gpu_p, gpu_out) - return out[0] + return out.copy() + else: assert False, "Unknown mode: {}".format(mode) From 0f226f3df35ed150f567bd17d3f85b879c3212f4 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 4 Aug 2024 16:50:04 -0400 Subject: [PATCH 293/410] llvm/Functions/GridSearch: Remove duplicate/dead code Add comment explaining the use of reservoir sampling in Python code. Signed-off-by: Jan Vesely --- .../nonstateful/optimizationfunctions.py | 28 +++++++------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index 2e157d7b324..262f4bb628b 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -2096,19 +2096,15 @@ def _function(self, # if ocm is not None and ocm.parameters.comp_execution_mode._get(context) in {"PTX", "LLVM"}: if ocm is not None and ocm.parameters.comp_execution_mode._get(context) in {"PTX", "LLVM"}: - # If we have a numpy array, convert back to ctypes - if isinstance(all_values, np.ndarray): - ct_values = all_values.flatten().ctypes.data_as(ctypes.POINTER(ctypes.c_double)) - num_values = len(all_values.flatten()) - else: - ct_values = all_values - num_values = len(ct_values) + ct_values = all_values + num_values = len(ct_values) # Reduce array of values to min/max # select_min params are: # params, state, min_sample_ptr, sample_ptr, min_value_ptr, value_ptr, opt_count_ptr, count min_tags = frozenset({"select_min", "evaluate_type_objective"}) bin_func = pnlvm.LLVMBinaryFunction.from_obj(self, tags=min_tags) + ct_param = bin_func.byref_arg_types[0](*self._get_param_initializer(context)) ct_state = bin_func.byref_arg_types[1](*self._get_state_initializer(context)) ct_opt_sample = bin_func.byref_arg_types[2](float("NaN")) @@ -2123,17 +2119,7 @@ def _function(self, optimal_value = ct_opt_value.value optimal_sample = np.ctypeslib.as_array(ct_opt_sample) - - if not isinstance(all_values, np.ndarray): - all_values = np.ctypeslib.as_array(ct_values) - - # These are normally stored in the parent function (OptimizationFunction). - # Since we didn't call super()._function like the python path, - # save the values here - if self.parameters.save_samples._get(context): - self.parameters.saved_samples._set(all_samples, context) - if self.parameters.save_values._get(context): - self.parameters.saved_values._set(all_values, context) + all_values = np.ctypeslib.as_array(ct_values) # Python version else: @@ -2153,6 +2139,12 @@ def _function(self, [all_samples[:,i] for i in range(all_samples.shape[1])]) optimal_value, optimal_sample = next(value_sample_pairs) + # The algorithm below implements "Reservoir sampling"[0]. This + # matches the compiled implementation of "select_min". The + # advantage of reservoir sampling is constant memory requirements + # and a single pass over the evaluated values. + # The disadvantage is multiple calls to the PRNG. + # https://en.wikipedia.org/wiki/Reservoir_sampling select_randomly = self.parameters.select_randomly_from_optimal_values._get(context) for value, sample in value_sample_pairs: if select_randomly and np.allclose(value, optimal_value): From 3a7fe955736e8b3586db474344dc3dcb79cc2391 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 4 Aug 2024 17:43:49 -0400 Subject: [PATCH 294/410] llvm/Functions/GridSearch: Use Numpy structures for fixed sizes arguments function params and state are still left as ctypes as they can't be easily reinitialized Signed-off-by: Jan Vesely --- .../nonstateful/optimizationfunctions.py | 29 ++++++++++--------- psyneulink/core/llvm/__init__.py | 4 +-- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index 262f4bb628b..5ff6359225e 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -2103,22 +2103,25 @@ def _function(self, # select_min params are: # params, state, min_sample_ptr, sample_ptr, min_value_ptr, value_ptr, opt_count_ptr, count min_tags = frozenset({"select_min", "evaluate_type_objective"}) - bin_func = pnlvm.LLVMBinaryFunction.from_obj(self, tags=min_tags) + bin_func = pnlvm.LLVMBinaryFunction.from_obj(self, tags=min_tags, numpy_args=(2, 4, 6)) ct_param = bin_func.byref_arg_types[0](*self._get_param_initializer(context)) ct_state = bin_func.byref_arg_types[1](*self._get_state_initializer(context)) - ct_opt_sample = bin_func.byref_arg_types[2](float("NaN")) - ct_alloc = None # NULL for samples - ct_opt_value = bin_func.byref_arg_types[4]() - ct_opt_count = bin_func.byref_arg_types[6](0) - ct_start = bin_func.c_func.argtypes[7](0) - ct_stop = bin_func.c_func.argtypes[8](num_values) - - bin_func(ct_param, ct_state, ct_opt_sample, ct_alloc, ct_opt_value, - ct_values, ct_opt_count, ct_start, ct_stop) - - optimal_value = ct_opt_value.value - optimal_sample = np.ctypeslib.as_array(ct_opt_sample) + optimal_sample = bin_func.np_buffer_for_arg(2) + optimal_value = bin_func.np_buffer_for_arg(4) + number_of_optimal_values = bin_func.np_buffer_for_arg(6, fill_value=0) + + bin_func(ct_param, + ct_state, + optimal_sample, + None, # samples. NULL, it's generated by the function. + optimal_value, + ct_values, + number_of_optimal_values, + bin_func.c_func.argtypes[7](0), # start + bin_func.c_func.argtypes[8](num_values)) # stop + + # Convert outputs to Numpy/Python all_values = np.ctypeslib.as_array(ct_values) # Python version diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 4b20e49aa97..568ef7ec910 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -226,13 +226,13 @@ def cuda_wrap_call(self, *args, **kwargs): wrap_args = (jit_engine.pycuda.driver.InOut(a) if isinstance(a, np.ndarray) else a for a in args) self.cuda_call(*wrap_args, **kwargs) - def np_buffer_for_arg(self, arg_num, *, extra_dimensions=()): + def np_buffer_for_arg(self, arg_num, *, extra_dimensions=(), fill_value=np.nan): out_base = self.np_params[arg_num].base out_shape = extra_dimensions + self.np_params[arg_num].shape # fill the buffer with NaN poison - return np.full(out_shape, np.nan, dtype=out_base) + return np.full(out_shape, fill_value, dtype=out_base) @staticmethod @functools.lru_cache(maxsize=32) From b90adc04c8e2cceeb6d2d03354d38563197c9100 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 5 Aug 2024 17:36:05 -0400 Subject: [PATCH 295/410] llvm: Remove support for running multiple contexts Execute the code per-context in multiple Python threads instead. Signed-off-by: Jan Vesely --- .../nonstateful/optimizationfunctions.py | 2 +- psyneulink/core/llvm/__init__.py | 10 - psyneulink/core/llvm/codegen.py | 50 ---- psyneulink/core/llvm/execution.py | 258 +++++------------- tests/llvm/test_multiple_executions.py | 235 ---------------- 5 files changed, 62 insertions(+), 493 deletions(-) delete mode 100644 tests/llvm/test_multiple_executions.py diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index 5ff6359225e..eee98a83d2d 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -831,7 +831,7 @@ def _is_static(it:SampleIterator): num_evals = np.prod([d._num for d in self.search_space]) # Map allocations to values - comp_exec = pnlvm.execution.CompExecution(ocm.agent_rep, [context.execution_id]) + comp_exec = pnlvm.execution.CompExecution(ocm.agent_rep, context.execution_id) execution_mode = ocm.parameters.comp_execution_mode._get(context) if execution_mode == "PTX": outcomes = comp_exec.cuda_evaluate(inputs, num_inputs_sets, num_evals, get_results) diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 568ef7ec910..1a921470690 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -245,16 +245,6 @@ def from_obj(obj, *, tags:frozenset=frozenset(), numpy_args:tuple=()): def get(name: str, *, numpy_args:tuple=()): return LLVMBinaryFunction(name, numpy_args=numpy_args) - def get_multi_run(self, *, numpy_args=()): - try: - multirun_llvm = _find_llvm_function(self.name + "_multirun") - except ValueError: - function = _find_llvm_function(self.name) - with LLVMBuilderContext.get_current() as ctx: - multirun_llvm = codegen.gen_multirun_wrapper(ctx, function) - - return LLVMBinaryFunction.get(multirun_llvm.name, numpy_args=numpy_args) - _cpu_engine = None _ptx_engine = None diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index df792ce5fe9..cd14fadc52e 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -1119,56 +1119,6 @@ def gen_composition_run(ctx, composition, *, tags:frozenset): return llvm_func -def gen_multirun_wrapper(ctx, function: ir.Function) -> ir.Function: - if function.module is not ctx.module: - function = ir.Function(ctx.module, function.type.pointee, function.name) - assert function.is_declaration - - args = [a.type for a in function.args] - args.append(ctx.int32_ty.as_pointer()) - multirun_ty = ir.FunctionType(function.type.pointee.return_type, args) - multirun_f = ir.Function(ctx.module, multirun_ty, function.name + "_multirun") - block = multirun_f.append_basic_block(name="entry") - builder = ir.IRBuilder(block) - - multi_runs = builder.load(multirun_f.args[-1]) - # Runs need special handling. data_in and data_out are one dimensional, - # but hold entries for all parallel invocations. - is_comp_run = len(function.args) == 7 - if is_comp_run: - trials_count = builder.load(multirun_f.args[5]) - input_count = builder.load(multirun_f.args[6]) - - with helpers.for_loop_zero_inc(builder, multi_runs, "multi_run_loop") as (b, index): - # Index all pointer arguments - indexed_args = [] - for i, arg in enumerate(multirun_f.args[:-1]): - # Don't adjust #inputs and #trials - if isinstance(arg.type, ir.PointerType): - offset = index - # #runs and #trials needs to be the same for every invocation - if is_comp_run and i >= 5: - offset = ctx.int32_ty(0) - # Reset trial count for every invocation. - # Previous runs might have finished earlier - if i == 5: - builder.store(trials_count, arg) - # data arrays need special handling - elif is_comp_run and i == 4: # data_out - offset = b.mul(index, trials_count) - elif is_comp_run and i == 3: # data_in - offset = b.mul(index, input_count) - - arg = b.gep(arg, [offset]) - - indexed_args.append(arg) - - b.call(function, indexed_args) - - builder.ret_void() - return multirun_f - - def gen_autodiffcomp_exec(ctx, composition, *, tags:frozenset): """Creates llvm bin execute for autodiffcomp""" assert composition.controller is None diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index f90919b97bc..60a7967e10f 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -69,20 +69,15 @@ def _get_compilation_param(self, name, init_method, arg): if saved is None: struct_ty = self._bin_func.byref_arg_types[arg] init_f = getattr(self._obj, init_method) - if len(self._execution_contexts) > 1: - struct_ty = struct_ty * len(self._execution_contexts) - init_start = time.time() - initializer = (init_f(ex) for ex in self._execution_contexts) - else: - init_start = time.time() - initializer = init_f(self._execution_contexts[0]) + init_start = time.time() + initializer = init_f(self._execution_context) init_end = time.time() struct = struct_ty(*initializer) struct_end = time.time() # numpy "frombuffer" creates a shared memory view of the provided buffer - numpy_struct = np.frombuffer(struct, dtype=self._bin_func.np_params[arg], count=len(self._execution_contexts)) + numpy_struct = np.frombuffer(struct, dtype=self._bin_func.np_params[arg], count=1) assert numpy_struct.nbytes == ctypes.sizeof(struct), \ "Size mismatch ({}), numpy: {} vs. ctypes:{}".format(name, numpy_struct.nbytes, ctypes.sizeof(struct)) @@ -101,21 +96,19 @@ def _get_compilation_param(self, name, init_method, arg): _pretty_size(ctypes.sizeof(struct_ty)), ")", "for", self._obj.name) - if len(self._execution_contexts) == 1: - - numpy_struct.shape = () + numpy_struct.shape = () - if name == '_state': - self._copy_params_to_pnl(self._execution_contexts[0], - self._obj, - numpy_struct, - "llvm_state_ids") + if name == '_state': + self._copy_params_to_pnl(self._execution_context, + self._obj, + numpy_struct, + "llvm_state_ids") - elif name == '_param': - self._copy_params_to_pnl(self._execution_contexts[0], - self._obj, - numpy_struct, - "llvm_param_ids") + elif name == '_param': + self._copy_params_to_pnl(self._execution_context, + self._obj, + numpy_struct, + "llvm_param_ids") return saved @@ -228,11 +221,6 @@ def __init__(self, buffers=['param_struct', 'state_struct']): # Initialize GPU buffer map self._gpu_buffers = {"_" + b: None for b in buffers} - @property - def _bin_func_multirun(self): - # CUDA uses the same function for single and multi run - return self._bin_func - def __get_cuda_arg(self, struct_name, arg_handler): gpu_buffer = self._gpu_buffers[struct_name] @@ -269,37 +257,25 @@ def cuda_execute(self, variable): new_var = np.asfarray(variable, dtype=self._bin_func.np_params[2].base) data_in = jit_engine.pycuda.driver.In(new_var) - extra_dims = (len(self._execution_contexts),) if len(self._execution_contexts) > 1 else () - data_out = self._bin_func.np_buffer_for_arg(3, extra_dimensions=extra_dims) + data_out = self._bin_func.np_buffer_for_arg(3) self._bin_func.cuda_call(self._cuda_param_struct, self._cuda_state_struct, data_in, - jit_engine.pycuda.driver.Out(data_out), - threads=len(self._execution_contexts)) + jit_engine.pycuda.driver.Out(data_out)) return self._get_indexable(data_out) class FuncExecution(CUDAExecution): - def __init__(self, component, execution_ids=[None], *, tags=frozenset()): + def __init__(self, component, execution_id=None, *, tags=frozenset()): super().__init__() self._bin_func = pnlvm.LLVMBinaryFunction.from_obj(component, tags=tags, numpy_args=(0, 1, 2, 3)) - self._execution_contexts = [ - Context(execution_id=eid) for eid in execution_ids - ] + self._execution_context = Context(execution_id=execution_id) self._component = component - - if len(execution_ids) > 1: - self._bin_multirun = self._bin_func.get_multi_run() - self._ct_len = ctypes.c_int(len(execution_ids)) - - vo_ty = self._bin_func.byref_arg_types[3] * len(execution_ids) - self._ct_vo = vo_ty() - @property def _obj(self): return self._component @@ -315,21 +291,10 @@ def _state_struct(self): def execute(self, variable): new_variable = np.asfarray(variable, dtype=self._bin_func.np_params[2].base) - if len(self._execution_contexts) > 1: - # wrap_call casts the arguments so we only need contiguous data layout - ct_vi = np.ctypeslib.as_ctypes(new_variable) + data_in = new_variable.reshape(self._bin_func.np_params[2].shape) + data_out = self._bin_func.np_buffer_for_arg(3) - self._bin_multirun.wrap_call(self._param_struct[0], - self._state_struct[0], - ct_vi, - self._ct_vo, - self._ct_len) - return _convert_ctype_to_python(self._ct_vo) - else: - data_out = self._bin_func.np_buffer_for_arg(3) - data_in = new_variable.reshape(self._bin_func.np_params[2].shape) - - self._bin_func(self._param_struct[1], self._state_struct[1], data_in, data_out) + self._bin_func(self._param_struct[1], self._state_struct[1], data_in, data_out) return self._get_indexable(data_out) @@ -342,26 +307,19 @@ class CompExecution(CUDAExecution): active_executions = weakref.WeakSet() - def __init__(self, composition, execution_ids=[None], *, additional_tags=frozenset()): + def __init__(self, composition, execution_id, *, additional_tags=frozenset()): super().__init__(buffers=['state_struct', 'param_struct', 'data_struct', 'conditions']) self._composition = composition - self._execution_contexts = [ - Context(execution_id=eid) for eid in execution_ids - ] + self._execution_context = Context(execution_id=execution_id) self.__bin_exec_func = None - self.__bin_exec_multi_func = None self.__bin_func = None self.__bin_run_func = None - self.__bin_run_multi_func = None self.__frozen_values = None self.__tags = frozenset(additional_tags) # Scheduling conditions, only used by "execute" self.__conditions = None - if len(execution_ids) > 1: - self._ct_len = ctypes.c_int(len(execution_ids)) - self.active_executions.add(self) def __del__(self): @@ -376,8 +334,7 @@ def get(composition, context, additional_tags=frozenset()): execution = executions.get(additional_tags, None) if execution is None: - execution = pnlvm.CompExecution(composition, [context.execution_id], - additional_tags=additional_tags) + execution = pnlvm.CompExecution(composition, context.execution_id, additional_tags=additional_tags) executions[additional_tags] = execution return execution @@ -389,7 +346,6 @@ def _obj(self): @property def _bin_func(self): if self.__bin_func is not None: - assert len(self._execution_contexts) == 1 return self.__bin_func if self.__bin_exec_func is not None: return self.__bin_exec_func @@ -398,15 +354,6 @@ def _bin_func(self): assert False, "Binary function not set for execution!" - @property - def _bin_func_multirun(self): - if self.__bin_exec_multi_func is not None: - return self.__bin_exec_multi_func - if self.__bin_run_multi_func is not None: - return self.__bin_run_multi_func - - return super()._bin_func_multirun - def _set_bin_node(self, node): assert node in self._composition._all_nodes node_assembly = builder_context.LLVMBuilderContext.get_current().get_node_assembly(self._composition, node) @@ -419,18 +366,13 @@ def _conditions(self): if self.__conditions is None: gen = helpers.ConditionGenerator(None, self._composition) - if len(self._execution_contexts) > 1: - conditions_ctype = self._bin_func_multirun.byref_arg_types[4] * len(self._execution_contexts) - conditions_initializer = (gen.get_condition_initializer() for _ in self._execution_contexts) - else: - conditions_ctype = self._bin_func.byref_arg_types[4] - conditions_initializer = gen.get_condition_initializer() + conditions_ctype = self._bin_func.byref_arg_types[4] + conditions_initializer = gen.get_condition_initializer() ct_conditions = conditions_ctype(*conditions_initializer) - np_conditions = np.frombuffer(ct_conditions, dtype=self._bin_func.np_params[4], count=len(self._execution_contexts)) + np_conditions = np.frombuffer(ct_conditions, dtype=self._bin_func.np_params[4], count=1) - if len(self._execution_contexts) == 1: - np_conditions.shape = () + np_conditions.shape = () self.__conditions = (ct_conditions, np_conditions) @@ -459,23 +401,6 @@ def _data_struct(self): def _data_struct(self, data_struct): self._data = data_struct - def _extract_node_struct_from_ctype(self, node, data): - # state structure consists of a list of node states, - # followed by a list of projection contexts; get the first one - # parameter structure consists of a list of node parameters, - # followed by a list of projection parameters; get the first one - # output structure consists of a list of node outputs, - # followed by a list of nested data structures; get the first one - field_name = data._fields_[0][0] - res_struct = getattr(data, field_name) - - # Get the index into the array of all nodes - index = self._composition._get_node_index(node) - field_name = res_struct._fields_[index][0] - res_struct = getattr(res_struct, field_name) - - return _convert_ctype_to_python(res_struct) - def _extract_node_struct_from_numpy(self, node, data): # state structure consists of a list of node states, # followed by a list of projection contexts; get the first one @@ -494,10 +419,7 @@ def _extract_node_struct_from_numpy(self, node, data): return node_struct.copy().tolist() if node_struct.shape == () else node_struct.copy() def extract_node_struct(self, node, struct): - if len(self._execution_contexts) > 1: - return [self._extract_node_struct_from_ctype(node, struct[0][i]) for i, _ in enumerate(self._execution_contexts)] - else: - return self._extract_node_struct_from_numpy(node, struct[1]) + return self._extract_node_struct_from_numpy(node, struct[1]) def extract_frozen_node_output(self, node): return self.extract_node_struct(node, self.__frozen_values) @@ -525,22 +447,11 @@ def _get_input_struct(self, inputs): # Either node or composition execute. # Read provided input data and parse into an array (generator) - if len(self._execution_contexts) > 1: - assert len(self._execution_contexts) == len(inputs) - - # All execute functions expect inputs to be 3rd param. - ct_input_type = self._bin_func.byref_arg_types[2] * len(self._execution_contexts) - - input_data = (([x] for x in self._composition._build_variable_for_input_CIM(inp)) for inp in inputs) - - ct_input = ct_input_type(*_tupleize(input_data)) - np_input = np.ctypeslib.as_array(ct_input) - else: - ct_input = None - data = self._composition._build_variable_for_input_CIM(inputs) + ct_input = None + data = self._composition._build_variable_for_input_CIM(inputs) - np_input = np.asarray(_tupleize(data), dtype=self._bin_func.np_params[2].base) - np_input = np_input.reshape(self._bin_func.np_params[2].shape) + np_input = np.asarray(_tupleize(data), dtype=self._bin_func.np_params[2].base) + np_input = np_input.reshape(self._bin_func.np_params[2].shape) if "stat" in self._debug_env: print("Input struct size:", _pretty_size(np_input.nbytes), "for", self._composition.name) @@ -555,8 +466,7 @@ def freeze_values(self): def execute_node(self, node, inputs=None): # We need to reconstruct the input dictionary here if it was not provided. # This happens during node execution of nested compositions. - assert len(self._execution_contexts) == 1 - context = self._execution_contexts[0] + context = self._execution_context if inputs is None and node is self._composition.input_CIM: @@ -610,29 +520,14 @@ def _bin_exec_func(self): return self.__bin_exec_func - @property - def _bin_exec_multi_func(self): - if self.__bin_exec_multi_func is None: - self.__bin_exec_multi_func = self._bin_exec_func.get_multi_run() - - return self.__bin_exec_multi_func - def execute(self, inputs): # NOTE: Make sure that input struct generation is inlined. # We need the binary function to be setup for it to work correctly. - if len(self._execution_contexts) > 1: - self._bin_exec_multi_func.wrap_call(self._state_struct[0], - self._param_struct[0], - self._get_input_struct(inputs)[0], - self._data_struct[0], - self._conditions[0], - self._ct_len) - else: - self._bin_exec_func(self._state_struct[1], - self._param_struct[1], - self._get_input_struct(inputs)[1], - self._data_struct[1], - self._conditions[1]) + self._bin_exec_func(self._state_struct[1], + self._param_struct[1], + self._get_input_struct(inputs)[1], + self._data_struct[1], + self._conditions[1]) def cuda_execute(self, inputs): # NOTE: Make sure that input struct generation is inlined. @@ -641,8 +536,7 @@ def cuda_execute(self, inputs): self._cuda_param_struct, jit_engine.pycuda.driver.In(self._get_input_struct(inputs)[1]), self._cuda_data_struct, - self._cuda_conditions, - threads=len(self._execution_contexts)) + self._cuda_conditions) # Methods used to accelerate "Run" def _get_run_input_struct(self, inputs, num_input_sets, arg=3): @@ -650,11 +544,9 @@ def _get_run_input_struct(self, inputs, num_input_sets, arg=3): bin_f = self._bin_run_func if arg == 3 else self._bin_func input_type = bin_f.byref_arg_types[arg] - c_input_type = (input_type * num_input_sets) * len(self._execution_contexts) - if len(self._execution_contexts) == 1: - inputs = [inputs] + c_input_type = (input_type * num_input_sets) * 1 + inputs = [inputs] - assert len(inputs) == len(self._execution_contexts) # Extract input for each trial and execution id run_inputs = ((([x] for x in self._composition._build_variable_for_input_CIM({k:v[i] for k,v in inp.items()})) for i in range(num_input_sets)) for inp in inputs) c_inputs = c_input_type(*_tupleize(run_inputs)) @@ -668,7 +560,6 @@ def _get_run_input_struct(self, inputs, num_input_sets, arg=3): return c_inputs def _get_generator_run_input_struct(self, inputs, runs): - assert len(self._execution_contexts) == 1 # Extract input for each trial run_inputs = ((np.atleast_2d(x) for x in self._composition._build_variable_for_input_CIM({k:np.atleast_1d(v) for k,v in inp.items()})) for inp in inputs) run_inputs = _tupleize(run_inputs) @@ -685,13 +576,6 @@ def _bin_run_func(self): return self.__bin_run_func - @property - def _bin_run_multi_func(self): - if self.__bin_run_multi_func is None: - self.__bin_run_multi_func = self._bin_run_func.get_multi_run() - - return self.__bin_run_multi_func - def run(self, inputs, runs=0, num_input_sets=0): if isgenerator(inputs): inputs, runs = self._get_generator_run_input_struct(inputs, runs) @@ -701,8 +585,6 @@ def run(self, inputs, runs=0, num_input_sets=0): inputs = self._get_run_input_struct(inputs, num_input_sets) ct_vo = self._bin_run_func.byref_arg_types[4] * runs - if len(self._execution_contexts) > 1: - ct_vo = ct_vo * len(self._execution_contexts) outputs = ct_vo() @@ -714,32 +596,21 @@ def run(self, inputs, runs=0, num_input_sets=0): runs_count = ctypes.c_uint(runs) input_count = ctypes.c_uint(num_input_sets) - if len(self._execution_contexts) > 1: - self._bin_run_multi_func.wrap_call(self._state_struct[0], - self._param_struct[0], - self._data_struct[0], - inputs, - outputs, - runs_count, - input_count, - self._ct_len) - - return _convert_ctype_to_python(outputs) - else: - # This is only needed for non-generator inputs that are wrapped in an extra context dimension - inputs = ctypes.cast(inputs, self._bin_run_func.c_func.argtypes[3]) - self._bin_run_func(self._state_struct[1], - self._param_struct[1], - self._data_struct[1], - inputs, - outputs, - runs_count, - input_count) + # The cast is only needed for non-generator inputs that are wrapped in an extra context dimension + inputs = ctypes.cast(inputs, self._bin_run_func.c_func.argtypes[3]) - # Extract only #trials elements in case the run exited early - assert runs_count.value <= runs, "Composition ran more times than allowed!" - return _convert_ctype_to_python(outputs)[0:runs_count.value] + self._bin_run_func(self._state_struct[1], + self._param_struct[1], + self._data_struct[1], + inputs, + outputs, + runs_count, + input_count) + + # Extract only #trials elements in case the run exited early + assert runs_count.value <= runs, "Composition ran more times than allowed!" + return _convert_ctype_to_python(outputs)[0:runs_count.value] def cuda_run(self, inputs, runs, num_input_sets): # Create input buffer @@ -752,13 +623,11 @@ def cuda_run(self, inputs, runs, num_input_sets): # Create output buffer output_type = (self._bin_run_func.byref_arg_types[4] * runs) - if len(self._execution_contexts) > 1: - output_type = output_type * len(self._execution_contexts) ct_out = output_type() # number of trials argument - np_runs = np.full(len(self._execution_contexts), runs, dtype=np.int32) + np_runs = np.asarray(runs, dtype=np.int32).copy() self._bin_run_func.cuda_call(self._cuda_state_struct, self._cuda_param_struct, @@ -766,20 +635,15 @@ def cuda_run(self, inputs, runs, num_input_sets): jit_engine.pycuda.driver.In(np.ctypeslib.as_array(ct_inputs)), # input jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(ct_out)), # output jit_engine.pycuda.driver.InOut(np_runs), # runs - jit_engine.pycuda.driver.In(np.int32(num_input_sets)), # number of inputs - threads=len(self._execution_contexts)) + jit_engine.pycuda.driver.In(np.int32(num_input_sets))) # number of inputs - assert all(np_runs <= runs), "Composition ran more times than allowed: {}".format(runs) + assert np_runs <= runs, "Composition ran more times than allowed: {}".format(runs) - if len(self._execution_contexts) > 1: - return _convert_ctype_to_python(ct_out) - else: - # Extract only #trials elements in case the run exited early - return _convert_ctype_to_python(ct_out)[0:np_runs[0]] + # Extract only #trials elements in case the run exited early + return _convert_ctype_to_python(ct_out)[0:np_runs] def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:bool): ocm = self._composition.controller - assert len(self._execution_contexts) == 1 eval_type = "evaluate_type_all_results" if all_results else "evaluate_type_objective" tags = {"evaluate", "alloc_range", eval_type} @@ -803,7 +667,7 @@ def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results # Output ctype out_el_ty = bin_func.byref_arg_types[4] if all_results: - num_trials = ocm.parameters.num_trials_per_estimate.get(self._execution_contexts[0]) + num_trials = ocm.parameters.num_trials_per_estimate.get(self._execution_context) if num_trials is None: num_trials = num_input_sets out_el_ty *= num_trials diff --git a/tests/llvm/test_multiple_executions.py b/tests/llvm/test_multiple_executions.py deleted file mode 100644 index bda26a1db83..00000000000 --- a/tests/llvm/test_multiple_executions.py +++ /dev/null @@ -1,235 +0,0 @@ -import pytest -import psyneulink.core.llvm as pnlvm - -import numpy as np -import psyneulink.core.components.functions.function as Function -import psyneulink.core.components.functions.nonstateful.objectivefunctions as Functions -from psyneulink.core.components.functions.stateful.integratorfunctions import AdaptiveIntegrator -from psyneulink.core.components.functions.nonstateful.transferfunctions import Logistic -from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism -from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism -from psyneulink.core.compositions.composition import Composition -import psyneulink.core.globals.keywords as kw - -SIZE=10 -# Some metrics (CROSS_ENTROPY) don't like 0s -test_var = [np.random.rand(SIZE) + Function.EPSILON, np.random.rand(SIZE) + Function.EPSILON] -v1 = test_var[0] -v2 = test_var[1] -expected = np.linalg.norm(v1 - v2) - -@pytest.mark.multirun -@pytest.mark.function -@pytest.mark.distance_function -@pytest.mark.benchmark -@pytest.mark.parametrize("executions", [1, 10, 100]) -def test_function(benchmark, executions, func_mode): - f = Functions.Distance(default_variable=test_var, metric=kw.EUCLIDEAN) - benchmark.group = "DistanceFunction multirun {}".format(executions) - var = [test_var for _ in range(executions)] if executions > 1 else test_var - if func_mode == 'Python': - e = f.function if executions == 1 else lambda x: [f.function(xi) for xi in x] - elif func_mode == 'LLVM': - e = pnlvm.execution.FuncExecution(f, [None for _ in range(executions)]).execute - elif func_mode == 'PTX': - e = pnlvm.execution.FuncExecution(f, [None for _ in range(executions)]).cuda_execute - - res = benchmark(e, var) - np.testing.assert_allclose(res, [expected for _ in range(executions)]) - -@pytest.mark.multirun -@pytest.mark.mechanism -@pytest.mark.transfer_mechanism -@pytest.mark.benchmark -@pytest.mark.parametrize("executions", [1, 10, 100]) -def test_mechanism(benchmark, executions, mech_mode): - benchmark.group = "TransferMechanism multirun {}".format(executions) - variable = [0 for _ in range(SIZE)] - T = TransferMechanism( - name='T', - default_variable=variable, - integration_rate=1.0, - noise=-2.0, - integrator_mode=True - ) - var = [[10.0 for _ in range(SIZE)] for _ in range(executions)] - expected = [[8.0 for i in range(SIZE)]] - if mech_mode == 'Python': - e = T.execute if executions == 1 else lambda x : [T.execute(xi) for xi in x] - elif mech_mode == 'LLVM': - e = pnlvm.execution.MechExecution(T, [None for _ in range(executions)]).execute - elif mech_mode == 'PTX': - e = pnlvm.execution.MechExecution(T, [None for _ in range(executions)]).cuda_execute - - if executions > 1: - expected = [expected for _ in range(executions)] - - res = benchmark(e, var) - np.testing.assert_allclose(res, expected) - - -@pytest.mark.multirun -@pytest.mark.nested -@pytest.mark.composition -@pytest.mark.benchmark -@pytest.mark.parametrize("executions", [1, 10, 100]) -@pytest.mark.parametrize("mode", ['Python', - pytest.param('LLVM', marks=pytest.mark.llvm), - pytest.param('PTX', marks=[pytest.mark.llvm, pytest.mark.cuda])]) -def test_nested_composition_execution(benchmark, executions, mode): - benchmark.group = "Nested Composition execution multirun {}".format(executions) - - # mechanisms - A = ProcessingMechanism(name="A", - function=AdaptiveIntegrator(rate=0.1)) - B = ProcessingMechanism(name="B", - function=Logistic) - - inner_comp = Composition(name="inner_comp") - inner_comp.add_linear_processing_pathway([A, B]) - inner_comp._analyze_graph() - - outer_comp = Composition(name="outer_comp") - outer_comp.add_node(inner_comp) - - outer_comp._analyze_graph() - - # The input dict should assign inputs origin nodes (inner_comp in this case) - var = {inner_comp: [[1.0]]} - expected = [[0.52497918747894]] - if executions > 1: - var = [var for _ in range(executions)] - - if mode == 'Python': - e = outer_comp.execute if executions == 1 else lambda x : [outer_comp.execute(x[i], context=i) for i in range(executions)] - res = e(var) - benchmark(e, var) - elif mode == 'LLVM': - e = pnlvm.execution.CompExecution(outer_comp, [None for _ in range(executions)]) - e.execute(var) - res = e.extract_node_output(outer_comp.output_CIM) - benchmark(e.execute, var) - elif mode == 'PTX': - e = pnlvm.execution.CompExecution(outer_comp, [None for _ in range(executions)]) - e.cuda_execute(var) - res = e.extract_node_output(outer_comp.output_CIM) - benchmark(e.cuda_execute, var) - else: - assert False, "Unknown mode: {}".format(mode) - - expected = [expected for _ in range(executions)] if executions > 1 else expected - np.testing.assert_allclose(res, expected) - - -@pytest.mark.multirun -@pytest.mark.nested -@pytest.mark.composition -@pytest.mark.benchmark -@pytest.mark.parametrize("executions", [1, 10, 100]) -@pytest.mark.parametrize("mode", ['Python', - pytest.param('LLVM', marks=pytest.mark.llvm), - pytest.param('PTX', marks=[pytest.mark.llvm, pytest.mark.cuda])]) -def test_nested_composition_run(benchmark, executions, mode): - benchmark.group = "Nested Composition multirun {}".format(executions) - - # mechanisms - A = ProcessingMechanism(name="A", - function=AdaptiveIntegrator(rate=0.1)) - B = ProcessingMechanism(name="B", - function=Logistic) - - inner_comp = Composition(name="inner_comp") - inner_comp.add_linear_processing_pathway([A, B]) - inner_comp._analyze_graph() - - outer_comp = Composition(name="outer_comp") - outer_comp.add_node(inner_comp) - - outer_comp._analyze_graph() - - # The input dict should assign inputs origin nodes (inner_comp in this case) - var = {inner_comp: [[[2.0]]]} - expected = [[[0.549833997312478]]] - if executions > 1: - var = [var for _ in range(executions)] - if mode == 'Python': - e = outer_comp.run if executions == 1 else lambda x: [outer_comp.run(x[i], context=i) for i in range(executions)] - res = e(var) - - # Composition.run returns only the result of the last trail, - # unlike results for all trials reported by CompExecution.run below - expected = expected[0] - - benchmark(e, var) - elif mode == 'LLVM': - e = pnlvm.execution.CompExecution(outer_comp, [None for _ in range(executions)]) - res = e.run(var, 1, 1) - benchmark(e.run, var, 1, 1) - elif mode == 'PTX': - e = pnlvm.execution.CompExecution(outer_comp, [None for _ in range(executions)]) - res = e.cuda_run(var, 1, 1) - benchmark(e.cuda_run, var, 1, 1) - else: - assert False, "Unknown mode: {}".format(mode) - - expected = [expected for _ in range(executions)] if executions > 1 else expected - np.testing.assert_allclose(res, expected) - - -@pytest.mark.multirun -@pytest.mark.nested -@pytest.mark.composition -@pytest.mark.benchmark -@pytest.mark.parametrize("executions", [1, 10, 100]) -@pytest.mark.parametrize("mode", [ - 'Python', - pytest.param('LLVM', marks=pytest.mark.llvm), - pytest.param('PTX', marks=[pytest.mark.llvm, pytest.mark.cuda]) -]) -def test_nested_composition_run_trials_inputs(benchmark, executions, mode): - benchmark.group = "Nested Composition mutliple trials/inputs multirun {}".format(executions) - - # mechanisms - A = ProcessingMechanism(name="A", - function=AdaptiveIntegrator(rate=0.1)) - B = ProcessingMechanism(name="B", - function=Logistic) - - inner_comp = Composition(name="inner_comp") - inner_comp.add_linear_processing_pathway([A, B]) - inner_comp._analyze_graph() - - outer_comp = Composition(name="outer_comp") - outer_comp.add_node(inner_comp) - - outer_comp._analyze_graph() - - # The input dict should assign inputs origin nodes (inner_comp in this case) - var = {inner_comp: [[[2.0]], [[3.0]]]} - expected = [[[0.549833997312478]], [[0.617747874769249]], [[0.6529428177055896]], [[0.7044959416252289]]] - if executions > 1: - var = [var for _ in range(executions)] - if mode == 'Python': - def f(v, num_trials, copy_results=False): - results = [] - for i in range(executions): - outer_comp.run(v[i], context=i, num_trials=num_trials) - if copy_results: # copy the results immediately, otherwise it's empty - results.append(outer_comp.results.copy()) - return results[0] if len(results) == 1 else results - - res = f(var, 4, True) if executions > 1 else f([var], 4, True) - benchmark(f if executions > 1 else outer_comp.run, var, num_trials=4) - elif mode == 'LLVM': - e = pnlvm.execution.CompExecution(outer_comp, [None for _ in range(executions)]) - res = e.run(var, 4, 2) - benchmark(e.run, var, 4, 2) - elif mode == 'PTX': - e = pnlvm.execution.CompExecution(outer_comp, [None for _ in range(executions)]) - res = e.cuda_run(var, 4, 2) - benchmark(e.cuda_run, var, 4, 2) - else: - assert False, "Unknown mode: {}".format(mode) - - expected = [expected for _ in range(executions)] if executions > 1 else expected - np.testing.assert_allclose(res, expected) From 309730b4ceccfad3fd8aacb557a1923948c6a788 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 5 Aug 2024 18:37:46 -0400 Subject: [PATCH 296/410] llvm/execution: Simplify and remove dead code Simplify run input construction. Do not store references to constructed ctype structures. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 68 ++++++++++++------------------- 1 file changed, 27 insertions(+), 41 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 60a7967e10f..51c9dba203d 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -82,7 +82,7 @@ def _get_compilation_param(self, name, init_method, arg): assert numpy_struct.nbytes == ctypes.sizeof(struct), \ "Size mismatch ({}), numpy: {} vs. ctypes:{}".format(name, numpy_struct.nbytes, ctypes.sizeof(struct)) - saved = (struct, numpy_struct) + saved = numpy_struct setattr(self, name, saved) if "time_stat" in self._debug_env: @@ -224,7 +224,7 @@ def __init__(self, buffers=['param_struct', 'state_struct']): def __get_cuda_arg(self, struct_name, arg_handler): gpu_buffer = self._gpu_buffers[struct_name] - np_struct = getattr(self, struct_name)[1] + np_struct = getattr(self, struct_name) # .array is a public member of pycuda's In/Out ArgumentHandler classes if gpu_buffer is None or gpu_buffer.array is not np_struct: @@ -294,7 +294,7 @@ def execute(self, variable): data_in = new_variable.reshape(self._bin_func.np_params[2].shape) data_out = self._bin_func.np_buffer_for_arg(3) - self._bin_func(self._param_struct[1], self._state_struct[1], data_in, data_out) + self._bin_func(self._param_struct, self._state_struct, data_in, data_out) return self._get_indexable(data_out) @@ -374,11 +374,11 @@ def _conditions(self): np_conditions.shape = () - self.__conditions = (ct_conditions, np_conditions) + self.__conditions = np_conditions if "stat" in self._debug_env: print("Instantiated condition struct ( size:" , - _pretty_size(ctypes.sizeof(conditions_ctype)), ")", + _pretty_size(np_conditions.nbytes), ")", "for", self._composition.name) return self.__conditions @@ -401,7 +401,7 @@ def _data_struct(self): def _data_struct(self, data_struct): self._data = data_struct - def _extract_node_struct_from_numpy(self, node, data): + def extract_node_struct(self, node, data): # state structure consists of a list of node states, # followed by a list of projection contexts; get the first one # parameter structure consists of a list of node parameters, @@ -418,9 +418,6 @@ def _extract_node_struct_from_numpy(self, node, data): # returned results in next execution return node_struct.copy().tolist() if node_struct.shape == () else node_struct.copy() - def extract_node_struct(self, node, struct): - return self._extract_node_struct_from_numpy(node, struct[1]) - def extract_frozen_node_output(self, node): return self.extract_node_struct(node, self.__frozen_values) @@ -436,7 +433,7 @@ def extract_node_params(self, node): def insert_node_output(self, node, data): # output structure consists of a list of node outputs, # followed by a list of nested data structures; get the first one - all_nodes = self._data_struct[1][self._data_struct[1].dtype.names[0]] + all_nodes = self._data_struct[self._data_struct.dtype.names[0]] # Get the index into the array of all nodes index = self._composition._get_node_index(node) @@ -447,7 +444,6 @@ def _get_input_struct(self, inputs): # Either node or composition execute. # Read provided input data and parse into an array (generator) - ct_input = None data = self._composition._build_variable_for_input_CIM(inputs) np_input = np.asarray(_tupleize(data), dtype=self._bin_func.np_params[2].base) @@ -456,12 +452,10 @@ def _get_input_struct(self, inputs): if "stat" in self._debug_env: print("Input struct size:", _pretty_size(np_input.nbytes), "for", self._composition.name) - return ct_input, np_input + return np_input def freeze_values(self): - np_copy = self._data_struct[1].copy() - - self.__frozen_values = (None, np_copy) + self.__frozen_values = self._data_struct.copy() def execute_node(self, node, inputs=None): # We need to reconstruct the input dictionary here if it was not provided. @@ -485,7 +479,7 @@ def execute_node(self, node, inputs=None): # Numpy doesn't allow to pass NULL to the called function. # Create and pass a dummy buffer filled with NaN instead. if inputs is not None: - inputs = self._get_input_struct(inputs)[1] + inputs = self._get_input_struct(inputs) else: inputs = self._bin_func.np_buffer_for_arg(2) @@ -493,17 +487,13 @@ def execute_node(self, node, inputs=None): # and need frozen values available if node is not self._composition.input_CIM and node is not self._composition.parameter_CIM: assert self.__frozen_values is not None - data_in = self.__frozen_values[1] + data_in = self.__frozen_values else: # The ndarray argument check doesn't allow None for null so just provide # the same structure as outputs. - data_in = self._data_struct[1] + data_in = self._data_struct - self._bin_func(self._state_struct[1], - self._param_struct[1], - inputs, - data_in, - self._data_struct[1]) + self._bin_func(self._state_struct, self._param_struct, inputs, data_in, self._data_struct) if "comp_node_debug" in self._debug_env: print("RAN: {}. State: {}".format(node, self.extract_node_state(node))) @@ -523,18 +513,18 @@ def _bin_exec_func(self): def execute(self, inputs): # NOTE: Make sure that input struct generation is inlined. # We need the binary function to be setup for it to work correctly. - self._bin_exec_func(self._state_struct[1], - self._param_struct[1], - self._get_input_struct(inputs)[1], - self._data_struct[1], - self._conditions[1]) + self._bin_exec_func(self._state_struct, + self._param_struct, + self._get_input_struct(inputs), + self._data_struct, + self._conditions) def cuda_execute(self, inputs): # NOTE: Make sure that input struct generation is inlined. # We need the binary function to be setup for it to work correctly. self._bin_exec_func.cuda_call(self._cuda_state_struct, self._cuda_param_struct, - jit_engine.pycuda.driver.In(self._get_input_struct(inputs)[1]), + jit_engine.pycuda.driver.In(self._get_input_struct(inputs)), self._cuda_data_struct, self._cuda_conditions) @@ -544,11 +534,10 @@ def _get_run_input_struct(self, inputs, num_input_sets, arg=3): bin_f = self._bin_run_func if arg == 3 else self._bin_func input_type = bin_f.byref_arg_types[arg] - c_input_type = (input_type * num_input_sets) * 1 - inputs = [inputs] + c_input_type = (input_type * num_input_sets) # Extract input for each trial and execution id - run_inputs = ((([x] for x in self._composition._build_variable_for_input_CIM({k:v[i] for k,v in inp.items()})) for i in range(num_input_sets)) for inp in inputs) + run_inputs = (([x] for x in self._composition._build_variable_for_input_CIM({k:v[i] for k,v in inputs.items()})) for i in range(num_input_sets)) c_inputs = c_input_type(*_tupleize(run_inputs)) if "stat" in self._debug_env: print("Instantiated struct: input ( size:" , @@ -597,12 +586,9 @@ def run(self, inputs, runs=0, num_input_sets=0): runs_count = ctypes.c_uint(runs) input_count = ctypes.c_uint(num_input_sets) - # The cast is only needed for non-generator inputs that are wrapped in an extra context dimension - inputs = ctypes.cast(inputs, self._bin_run_func.c_func.argtypes[3]) - - self._bin_run_func(self._state_struct[1], - self._param_struct[1], - self._data_struct[1], + self._bin_run_func(self._state_struct, + self._param_struct, + self._data_struct, inputs, outputs, runs_count, @@ -657,9 +643,9 @@ def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results # Directly initialized structures assert ocm.agent_rep is self._composition - comp_params = self._get_compilation_param('_eval_param', '_get_param_initializer', 0)[1] - comp_state = self._get_compilation_param('_eval_state', '_get_state_initializer', 1)[1] - comp_data = self._get_compilation_param('_eval_data', '_get_data_initializer', 6)[1] + comp_params = self._get_compilation_param('_eval_param', '_get_param_initializer', 0) + comp_state = self._get_compilation_param('_eval_state', '_get_state_initializer', 1) + comp_data = self._get_compilation_param('_eval_data', '_get_data_initializer', 6) # Construct input variable, the 5th parameter of the evaluate function ct_inputs = self._get_run_input_struct(inputs, num_input_sets, 5) From 7ab159935cb8b2a3ebbe989d408b029d42328d14 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 5 Aug 2024 21:08:51 -0400 Subject: [PATCH 297/410] llvm/execution/run: Use numpy structures for input and execution counts Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 51c9dba203d..f834ac540d3 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -561,7 +561,7 @@ def _get_generator_run_input_struct(self, inputs, runs): def _bin_run_func(self): if self.__bin_run_func is None: self.__bin_run_func = pnlvm.LLVMBinaryFunction.from_obj( - self._composition, tags=self.__tags.union({"run"}), numpy_args=(0, 1, 2)) + self._composition, tags=self.__tags.union({"run"}), numpy_args=(0, 1, 2, 5, 6)) return self.__bin_run_func @@ -583,8 +583,8 @@ def run(self, inputs, runs=0, num_input_sets=0): print("Output struct size:", _pretty_size(ctypes.sizeof(outputs)), "for", self._composition.name) - runs_count = ctypes.c_uint(runs) - input_count = ctypes.c_uint(num_input_sets) + runs_count = np.asarray(runs, dtype=np.uint32).copy() + input_count = np.asarray(num_input_sets, dtype=np.uint32) self._bin_run_func(self._state_struct, self._param_struct, @@ -595,8 +595,8 @@ def run(self, inputs, runs=0, num_input_sets=0): input_count) # Extract only #trials elements in case the run exited early - assert runs_count.value <= runs, "Composition ran more times than allowed!" - return _convert_ctype_to_python(outputs)[0:runs_count.value] + assert runs_count <= runs, "Composition ran more times than allowed!" + return _convert_ctype_to_python(outputs)[0:runs_count] def cuda_run(self, inputs, runs, num_input_sets): # Create input buffer From aa166a6d68ec015453d52fae7c29dd1af87caab2 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 5 Aug 2024 21:22:55 -0400 Subject: [PATCH 298/410] llvm/execution: Consolidate shared code between CPU and GPU 'run' Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 51 ++++++++++++------------------- 1 file changed, 20 insertions(+), 31 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index f834ac540d3..cddea113df7 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -565,7 +565,9 @@ def _bin_run_func(self): return self.__bin_run_func - def run(self, inputs, runs=0, num_input_sets=0): + def _prepare_run(self, inputs, runs, num_input_sets): + + # Create input buffer if isgenerator(inputs): inputs, runs = self._get_generator_run_input_struct(inputs, runs) assert num_input_sets == 0 or num_input_sets == sys.maxsize @@ -573,60 +575,47 @@ def run(self, inputs, runs=0, num_input_sets=0): else: inputs = self._get_run_input_struct(inputs, num_input_sets) - ct_vo = self._bin_run_func.byref_arg_types[4] * runs - - outputs = ct_vo() + # Create output buffer + outputs = (self._bin_run_func.byref_arg_types[4] * runs)() if "stat" in self._debug_env: - print("Input struct size:", _pretty_size(ctypes.sizeof(inputs)), - "for", self._composition.name) print("Output struct size:", _pretty_size(ctypes.sizeof(outputs)), "for", self._composition.name) runs_count = np.asarray(runs, dtype=np.uint32).copy() input_count = np.asarray(num_input_sets, dtype=np.uint32) + return inputs, outputs, runs_count, input_count + + def run(self, inputs, runs, num_input_sets): + ct_inputs, ct_outputs, runs_count, input_count = self._prepare_run(inputs, runs, num_input_sets) + self._bin_run_func(self._state_struct, self._param_struct, self._data_struct, - inputs, - outputs, + ct_inputs, + ct_outputs, runs_count, input_count) # Extract only #trials elements in case the run exited early assert runs_count <= runs, "Composition ran more times than allowed!" - return _convert_ctype_to_python(outputs)[0:runs_count] + return _convert_ctype_to_python(ct_outputs)[0:runs_count] def cuda_run(self, inputs, runs, num_input_sets): - # Create input buffer - if isgenerator(inputs): - ct_inputs, runs = self._get_generator_run_input_struct(inputs, runs) - assert num_input_sets == 0 or num_input_sets == sys.maxsize - num_input_sets = len(ct_inputs) - else: - ct_inputs = self._get_run_input_struct(inputs, num_input_sets) - - # Create output buffer - output_type = (self._bin_run_func.byref_arg_types[4] * runs) - - ct_out = output_type() - - # number of trials argument - np_runs = np.asarray(runs, dtype=np.int32).copy() + ct_inputs, ct_outputs, runs_count, input_count = self._prepare_run(inputs, runs, num_input_sets) self._bin_run_func.cuda_call(self._cuda_state_struct, self._cuda_param_struct, self._cuda_data_struct, - jit_engine.pycuda.driver.In(np.ctypeslib.as_array(ct_inputs)), # input - jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(ct_out)), # output - jit_engine.pycuda.driver.InOut(np_runs), # runs - jit_engine.pycuda.driver.In(np.int32(num_input_sets))) # number of inputs - - assert np_runs <= runs, "Composition ran more times than allowed: {}".format(runs) + jit_engine.pycuda.driver.In(np.ctypeslib.as_array(ct_inputs)), + jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(ct_outputs)), + jit_engine.pycuda.driver.InOut(runs_count), + jit_engine.pycuda.driver.In(input_count)) # Extract only #trials elements in case the run exited early - return _convert_ctype_to_python(ct_out)[0:np_runs] + assert runs_count <= runs, "Composition ran more times than allowed: {}".format(runs) + return _convert_ctype_to_python(ct_outputs)[0:runs_count] def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:bool): ocm = self._composition.controller From 286dfcec4738556e970f51c87dcb5e977a0895f1 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 5 Aug 2024 17:44:01 -0400 Subject: [PATCH 299/410] llvm/execution: Consolidate shared code between CPU and GPU 'evaluate' Do not sync back composition state or data. A call to evaluate creates a copy of these structures for each evaluation so the structure content is unchanged. Moreover, the structures are deallocated after the evaluate call anyway. Use Numpy structure for number of inputs Instantiate output buffer in the shared _prepare_evaluate function. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index cddea113df7..8b92e50af1e 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -622,7 +622,7 @@ def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results eval_type = "evaluate_type_all_results" if all_results else "evaluate_type_objective" tags = {"evaluate", "alloc_range", eval_type} - bin_func = pnlvm.LLVMBinaryFunction.from_obj(ocm, tags=frozenset(tags), numpy_args=(0, 1, 6)) + bin_func = pnlvm.LLVMBinaryFunction.from_obj(ocm, tags=frozenset(tags), numpy_args=(0, 1, 6, 7)) self.__bin_func = bin_func # There are 8 arguments to evaluate_alloc_range: @@ -648,27 +648,25 @@ def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results out_el_ty *= num_trials out_ty = out_el_ty * num_evaluations - ct_num_inputs = bin_func.byref_arg_types[7](num_input_sets) + num_inputs = np.asarray(num_input_sets, dtype=np.uint32) if "stat" in self._debug_env: print("Evaluate result struct type size:", _pretty_size(ctypes.sizeof(out_ty)), "( evaluations:", num_evaluations, "element size:", ctypes.sizeof(out_el_ty), ")", "for", self._obj.name) - return comp_params, comp_state, comp_data, ct_inputs, out_ty, ct_num_inputs + return comp_params, comp_state, comp_data, ct_inputs, out_ty(), num_inputs def cuda_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:bool=False): - comp_params, comp_state, comp_data, ct_inputs, out_ty, _ = \ + comp_params, comp_state, comp_data, ct_inputs, ct_results, num_inputs = \ self._prepare_evaluate(inputs, num_input_sets, num_evaluations, all_results) - ct_results = out_ty() - cuda_args = (jit_engine.pycuda.driver.In(comp_params), - jit_engine.pycuda.driver.InOut(comp_state), + jit_engine.pycuda.driver.In(comp_state), jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(ct_results)), # results jit_engine.pycuda.driver.In(np.ctypeslib.as_array(ct_inputs)), # inputs - jit_engine.pycuda.driver.InOut(comp_data), # composition data - jit_engine.pycuda.driver.In(np.int32(num_input_sets)), # number of inputs + jit_engine.pycuda.driver.In(comp_data), # composition data + jit_engine.pycuda.driver.In(num_inputs), # number of inputs ) self.__bin_func.cuda_call(*cuda_args, threads=int(num_evaluations)) @@ -676,10 +674,9 @@ def cuda_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:boo return ct_results def thread_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:bool=False): - comp_params, comp_state, comp_data, ct_inputs, out_ty, ct_num_inputs = \ + comp_params, comp_state, comp_data, ct_inputs, ct_results, num_inputs = \ self._prepare_evaluate(inputs, num_input_sets, num_evaluations, all_results) - ct_results = out_ty() jobs = min(os.cpu_count(), num_evaluations) evals_per_job = (num_evaluations + jobs - 1) // jobs @@ -688,11 +685,11 @@ def thread_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:b # Create input and result typed casts once, they are the same # for every submitted job. - input_arg = ctypes.cast(ct_inputs, self.__bin_func.c_func.argtypes[5]) results_arg = ctypes.cast(ct_results, self.__bin_func.c_func.argtypes[4]) + input_arg = ctypes.cast(ct_inputs, self.__bin_func.c_func.argtypes[5]) - # There are 7 arguments to evaluate_alloc_range: - # comp_param, comp_state, from, to, results, input, comp_data + # There are 8 arguments to evaluate_alloc_range: + # comp_param, comp_state, from, to, results, input, comp_data, input length results = [ex.submit(self.__bin_func, comp_params, comp_state, @@ -701,7 +698,7 @@ def thread_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:b results_arg, input_arg, comp_data, - ct_num_inputs) + num_inputs) for i in range(jobs)] parallel_stop = time.time() From 4533f621b2ca601f4d541bb6a20eba26a47824c1 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 5 Aug 2024 21:40:45 -0400 Subject: [PATCH 300/410] llvm/execute: Move cuda_execute for Function and Mechanism to FuncExecution There's an overriding implementation in CompExecution. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 8b92e50af1e..67100583b5a 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -252,20 +252,6 @@ def _cuda_data_struct(self): def _cuda_conditions(self): return self.__get_cuda_arg("_conditions", jit_engine.pycuda.driver.InOut) - def cuda_execute(self, variable): - # Create input argument, PyCUDA doesn't care about shape - new_var = np.asfarray(variable, dtype=self._bin_func.np_params[2].base) - data_in = jit_engine.pycuda.driver.In(new_var) - - data_out = self._bin_func.np_buffer_for_arg(3) - - self._bin_func.cuda_call(self._cuda_param_struct, - self._cuda_state_struct, - data_in, - jit_engine.pycuda.driver.Out(data_out)) - - return self._get_indexable(data_out) - class FuncExecution(CUDAExecution): @@ -290,14 +276,26 @@ def _state_struct(self): def execute(self, variable): new_variable = np.asfarray(variable, dtype=self._bin_func.np_params[2].base) - data_in = new_variable.reshape(self._bin_func.np_params[2].shape) + data_out = self._bin_func.np_buffer_for_arg(3) self._bin_func(self._param_struct, self._state_struct, data_in, data_out) return self._get_indexable(data_out) + def cuda_execute(self, variable): + # Create input argument, PyCUDA doesn't care about shape + data_in = np.asfarray(variable, dtype=self._bin_func.np_params[2].base) + data_out = self._bin_func.np_buffer_for_arg(3) + + self._bin_func.cuda_call(self._cuda_param_struct, + self._cuda_state_struct, + jit_engine.pycuda.driver.In(data_in), + jit_engine.pycuda.driver.Out(data_out)) + + return self._get_indexable(data_out) + class MechExecution(FuncExecution): pass From 55cb895c3f98cc7db82f59dfbd2969bdd48ade50 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 5 Aug 2024 22:35:30 -0400 Subject: [PATCH 301/410] llvm/execution: Use Context instance in the CompExecution constructor Signed-off-by: Jan Vesely --- .../functions/nonstateful/optimizationfunctions.py | 2 +- psyneulink/core/llvm/execution.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index eee98a83d2d..dfdce982a52 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -831,7 +831,7 @@ def _is_static(it:SampleIterator): num_evals = np.prod([d._num for d in self.search_space]) # Map allocations to values - comp_exec = pnlvm.execution.CompExecution(ocm.agent_rep, context.execution_id) + comp_exec = pnlvm.execution.CompExecution(ocm.agent_rep, context) execution_mode = ocm.parameters.comp_execution_mode._get(context) if execution_mode == "PTX": outcomes = comp_exec.cuda_evaluate(inputs, num_inputs_sets, num_evals, get_results) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 67100583b5a..5ead80a2731 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -305,10 +305,10 @@ class CompExecution(CUDAExecution): active_executions = weakref.WeakSet() - def __init__(self, composition, execution_id, *, additional_tags=frozenset()): + def __init__(self, composition, context:Context, *, additional_tags=frozenset()): super().__init__(buffers=['state_struct', 'param_struct', 'data_struct', 'conditions']) self._composition = composition - self._execution_context = Context(execution_id=execution_id) + self._execution_context = context self.__bin_exec_func = None self.__bin_func = None self.__bin_run_func = None @@ -324,7 +324,7 @@ def __del__(self): self.active_executions.discard(self) @staticmethod - def get(composition, context, additional_tags=frozenset()): + def get(composition, context:Context, additional_tags=frozenset()): executions = composition._compilation_data.execution._get(context) if executions is None: executions = dict() @@ -332,7 +332,7 @@ def get(composition, context, additional_tags=frozenset()): execution = executions.get(additional_tags, None) if execution is None: - execution = pnlvm.CompExecution(composition, context.execution_id, additional_tags=additional_tags) + execution = pnlvm.CompExecution(composition, context, additional_tags=additional_tags) executions[additional_tags] = execution return execution From dd1170c0fadf1bc0cc183fdd2f46f19033b243a2 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 5 Aug 2024 22:50:32 -0400 Subject: [PATCH 302/410] llvm: Remove 'wrap_call' No longer used. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/__init__.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 1a921470690..93b287cd748 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -176,11 +176,6 @@ def c_func(self): def __call__(self, *args, **kwargs): return self.c_func(*args, **kwargs) - def wrap_call(self, *pargs): - cpargs = (ctypes.byref(p) if p is not None else None for p in pargs) - args = zip(cpargs, self.c_func.argtypes) - self(*(ctypes.cast(p, t) for p, t in args)) - @property def _cuda_kernel(self): if self.__cuda_kernel is None: From cc8c381432a43185fa69c73ba8155e2e5ea6020c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 6 Aug 2024 01:00:19 -0400 Subject: [PATCH 303/410] llvm: Rename np_params -> np_arg_dtypes Signed-off-by: Jan Vesely --- psyneulink/core/llvm/__init__.py | 8 ++++---- psyneulink/core/llvm/execution.py | 14 +++++++------- tests/llvm/test_builtins_intrinsics.py | 2 +- tests/llvm/test_builtins_matrix.py | 6 +++--- tests/llvm/test_builtins_mt_random.py | 8 ++++---- tests/llvm/test_builtins_philox_random.py | 8 ++++---- tests/llvm/test_builtins_vector.py | 8 ++++---- tests/llvm/test_compile.py | 6 +++--- tests/llvm/test_helpers.py | 14 +++++++------- 9 files changed, 37 insertions(+), 37 deletions(-) diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 93b287cd748..7976f1505ed 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -148,11 +148,11 @@ def __init__(self, name: str, *, numpy_args=()): # '_type_' special attribute stores pointee type for pointers # https://docs.python.org/3/library/ctypes.html#ctypes._Pointer._type_ self.byref_arg_types = [a._type_ if hasattr(a, "contents") else None for a in args] - self.np_params = [_convert_llvm_ir_to_dtype(getattr(a.type, "pointee", a.type)) for a in f.args] + self.np_arg_dtypes = [_convert_llvm_ir_to_dtype(getattr(a.type, "pointee", a.type)) for a in f.args] for a in numpy_args: assert self.byref_arg_types[a] is not None - args[a] = np.ctypeslib.ndpointer(dtype=self.np_params[a].base, shape=self.np_params[a].shape) + args[a] = np.ctypeslib.ndpointer(dtype=self.np_arg_dtypes[a].base, shape=self.np_arg_dtypes[a].shape) middle = time.perf_counter() self.__c_func_type = ctypes.CFUNCTYPE(return_type, *args) @@ -223,8 +223,8 @@ def cuda_wrap_call(self, *args, **kwargs): def np_buffer_for_arg(self, arg_num, *, extra_dimensions=(), fill_value=np.nan): - out_base = self.np_params[arg_num].base - out_shape = extra_dimensions + self.np_params[arg_num].shape + out_base = self.np_arg_dtypes[arg_num].base + out_shape = extra_dimensions + self.np_arg_dtypes[arg_num].shape # fill the buffer with NaN poison return np.full(out_shape, fill_value, dtype=out_base) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 5ead80a2731..786f2feb6bc 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -77,7 +77,7 @@ def _get_compilation_param(self, name, init_method, arg): struct_end = time.time() # numpy "frombuffer" creates a shared memory view of the provided buffer - numpy_struct = np.frombuffer(struct, dtype=self._bin_func.np_params[arg], count=1) + numpy_struct = np.frombuffer(struct, dtype=self._bin_func.np_arg_dtypes[arg], count=1) assert numpy_struct.nbytes == ctypes.sizeof(struct), \ "Size mismatch ({}), numpy: {} vs. ctypes:{}".format(name, numpy_struct.nbytes, ctypes.sizeof(struct)) @@ -275,8 +275,8 @@ def _state_struct(self): return self._get_compilation_param('_state', '_get_state_initializer', 1) def execute(self, variable): - new_variable = np.asfarray(variable, dtype=self._bin_func.np_params[2].base) - data_in = new_variable.reshape(self._bin_func.np_params[2].shape) + new_variable = np.asfarray(variable, dtype=self._bin_func.np_arg_dtypes[2].base) + data_in = new_variable.reshape(self._bin_func.np_arg_dtypes[2].shape) data_out = self._bin_func.np_buffer_for_arg(3) @@ -286,7 +286,7 @@ def execute(self, variable): def cuda_execute(self, variable): # Create input argument, PyCUDA doesn't care about shape - data_in = np.asfarray(variable, dtype=self._bin_func.np_params[2].base) + data_in = np.asfarray(variable, dtype=self._bin_func.np_arg_dtypes[2].base) data_out = self._bin_func.np_buffer_for_arg(3) self._bin_func.cuda_call(self._cuda_param_struct, @@ -368,7 +368,7 @@ def _conditions(self): conditions_initializer = gen.get_condition_initializer() ct_conditions = conditions_ctype(*conditions_initializer) - np_conditions = np.frombuffer(ct_conditions, dtype=self._bin_func.np_params[4], count=1) + np_conditions = np.frombuffer(ct_conditions, dtype=self._bin_func.np_arg_dtypes[4], count=1) np_conditions.shape = () @@ -444,8 +444,8 @@ def _get_input_struct(self, inputs): # Read provided input data and parse into an array (generator) data = self._composition._build_variable_for_input_CIM(inputs) - np_input = np.asarray(_tupleize(data), dtype=self._bin_func.np_params[2].base) - np_input = np_input.reshape(self._bin_func.np_params[2].shape) + np_input = np.asarray(_tupleize(data), dtype=self._bin_func.np_arg_dtypes[2].base) + np_input = np_input.reshape(self._bin_func.np_arg_dtypes[2].shape) if "stat" in self._debug_env: print("Input struct size:", _pretty_size(np_input.nbytes), "for", self._composition.name) diff --git a/tests/llvm/test_builtins_intrinsics.py b/tests/llvm/test_builtins_intrinsics.py index 22cc3d2df8d..5195fcee73b 100644 --- a/tests/llvm/test_builtins_intrinsics.py +++ b/tests/llvm/test_builtins_intrinsics.py @@ -52,7 +52,7 @@ def test_builtin_op(benchmark, op, args, builtin, result, func_mode): # The result argument is a pointer, use it to derive # the right argument type - dty = bin_f.np_params[1].base + dty = bin_f.np_arg_dtypes[1].base ptx_res = np.empty_like(result, dtype=dty) ptx_res_arg = pnlvm.jit_engine.pycuda.driver.Out(ptx_res) diff --git a/tests/llvm/test_builtins_matrix.py b/tests/llvm/test_builtins_matrix.py index 1cad00e1565..f2c50bf576f 100644 --- a/tests/llvm/test_builtins_matrix.py +++ b/tests/llvm/test_builtins_matrix.py @@ -64,9 +64,9 @@ def _get_const_dim_func(builtin, *dims): def test_matrix_op(benchmark, op, x, y, builtin, result, func_mode, dims): def _numpy_args(bin_f): - np_x = x.astype(bin_f.np_params[0]) - np_y = bin_f.np_params[1].type(y) if np.isscalar(y) else y.astype(bin_f.np_params[1]) - np_res = np.empty_like(result, dtype=bin_f.np_params[-1]) + np_x = x.astype(bin_f.np_arg_dtypes[0]) + np_y = bin_f.np_arg_dtypes[1].type(y) if np.isscalar(y) else y.astype(bin_f.np_arg_dtypes[1]) + np_res = np.empty_like(result, dtype=bin_f.np_arg_dtypes[-1]) return np_x, np_y, np_res diff --git a/tests/llvm/test_builtins_mt_random.py b/tests/llvm/test_builtins_mt_random.py index 2ff7cff0ea2..28082e2d7e8 100644 --- a/tests/llvm/test_builtins_mt_random.py +++ b/tests/llvm/test_builtins_mt_random.py @@ -196,8 +196,8 @@ def f(): init_fun(state, SEED) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_binomial', numpy_args=(0, 1, 2, 3)) - n = np.asarray(n, dtype=gen_fun.np_params[1]) - p = np.asarray(p, dtype=gen_fun.np_params[2]) + n = np.asarray(n, dtype=gen_fun.np_arg_dtypes[1]) + p = np.asarray(p, dtype=gen_fun.np_arg_dtypes[2]) def f(): out = gen_fun.np_buffer_for_arg(1) @@ -214,8 +214,8 @@ def f(): gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_binomial') - gpu_n = pnlvm.jit_engine.pycuda.driver.In(np.asarray(n, dtype=gen_fun.np_params[1])) - gpu_p = pnlvm.jit_engine.pycuda.driver.In(np.asarray(p, dtype=gen_fun.np_params[2])) + gpu_n = pnlvm.jit_engine.pycuda.driver.In(np.asarray(n, dtype=gen_fun.np_arg_dtypes[1])) + gpu_p = pnlvm.jit_engine.pycuda.driver.In(np.asarray(p, dtype=gen_fun.np_arg_dtypes[2])) out = gen_fun.np_buffer_for_arg(1) gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) diff --git a/tests/llvm/test_builtins_philox_random.py b/tests/llvm/test_builtins_philox_random.py index 0c6e289a700..2466ea4f6d2 100644 --- a/tests/llvm/test_builtins_philox_random.py +++ b/tests/llvm/test_builtins_philox_random.py @@ -327,8 +327,8 @@ def f(): init_fun(state, SEED) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_binomial', numpy_args=(0, 1, 2, 3)) - n = np.asarray(n, dtype=gen_fun.np_params[1]) - p = np.asarray(p, dtype=gen_fun.np_params[2]) + n = np.asarray(n, dtype=gen_fun.np_arg_dtypes[1]) + p = np.asarray(p, dtype=gen_fun.np_arg_dtypes[2]) def f(): out = gen_fun.np_buffer_for_arg(1) @@ -342,8 +342,8 @@ def f(): init_fun.cuda_call(gpu_state, np.int64(SEED)) gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_binomial') - gpu_n = pnlvm.jit_engine.pycuda.driver.In(np.asarray(n, dtype=gen_fun.np_params[1])) - gpu_p = pnlvm.jit_engine.pycuda.driver.In(np.asarray(p, dtype=gen_fun.np_params[2])) + gpu_n = pnlvm.jit_engine.pycuda.driver.In(np.asarray(n, dtype=gen_fun.np_arg_dtypes[1])) + gpu_p = pnlvm.jit_engine.pycuda.driver.In(np.asarray(p, dtype=gen_fun.np_arg_dtypes[2])) out = gen_fun.np_buffer_for_arg(1) gpu_out = pnlvm.jit_engine.pycuda.driver.Out(out) diff --git a/tests/llvm/test_builtins_vector.py b/tests/llvm/test_builtins_vector.py index 999a7e42696..9a806bde911 100644 --- a/tests/llvm/test_builtins_vector.py +++ b/tests/llvm/test_builtins_vector.py @@ -29,8 +29,8 @@ def test_vector_op(benchmark, op, v, builtin, result, func_mode): def _numpy_args(bin_f): - np_u = u.astype(bin_f.np_params[0]) - np_v = bin_f.np_params[1].type(v) if np.isscalar(v) else v.astype(bin_f.np_params[1]) + np_u = u.astype(bin_f.np_arg_dtypes[0]) + np_v = bin_f.np_arg_dtypes[1].type(v) if np.isscalar(v) else v.astype(bin_f.np_arg_dtypes[1]) np_res = np.empty_like(np_u) return np_u, np_v, np_res @@ -77,7 +77,7 @@ def ex(): elif func_mode == 'LLVM': bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vec_sum", numpy_args=(2,)) - np_u = u.astype(bin_f.np_params[0]) + np_u = u.astype(bin_f.np_arg_dtypes[0]) np_res = bin_f.np_buffer_for_arg(2) ct_u = np_u.ctypes.data_as(bin_f.c_func.argtypes[0]) @@ -89,7 +89,7 @@ def ex(): elif func_mode == 'PTX': bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vec_sum", numpy_args=(2,)) - np_u = u.astype(bin_f.np_params[0]) + np_u = u.astype(bin_f.np_arg_dtypes[0]) np_res = bin_f.np_buffer_for_arg(2) cuda_u = pnlvm.jit_engine.pycuda.driver.In(np_u) diff --git a/tests/llvm/test_compile.py b/tests/llvm/test_compile.py index c396cba594f..71c8526e2bc 100644 --- a/tests/llvm/test_compile.py +++ b/tests/llvm/test_compile.py @@ -12,9 +12,9 @@ def test_recompile(): # The original builtin mxv function bin_f = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_vxm') - vector = np.random.rand(DIM_X).astype(bin_f.np_params[0].base) - matrix = np.random.rand(DIM_X, DIM_Y).astype(bin_f.np_params[1].base) - llvm_res = np.empty(DIM_Y, dtype=bin_f.np_params[4].base) + vector = np.random.rand(DIM_X).astype(bin_f.np_arg_dtypes[0].base) + matrix = np.random.rand(DIM_X, DIM_Y).astype(bin_f.np_arg_dtypes[1].base) + llvm_res = np.empty(DIM_Y, dtype=bin_f.np_arg_dtypes[4].base) x, y = matrix.shape diff --git a/tests/llvm/test_helpers.py b/tests/llvm/test_helpers.py index e692bd62f37..9f1c9bad29a 100644 --- a/tests/llvm/test_helpers.py +++ b/tests/llvm/test_helpers.py @@ -144,8 +144,8 @@ def test_helper_is_close(mode, var1, var2, rtol, atol, fp_type): bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) - vec1 = np.atleast_1d(np.asfarray(var1, dtype=bin_f.np_params[0].base)) - vec2 = np.atleast_1d(np.asfarray(var2, dtype=bin_f.np_params[1].base)) + vec1 = np.atleast_1d(np.asfarray(var1, dtype=bin_f.np_arg_dtypes[0].base)) + vec2 = np.atleast_1d(np.asfarray(var2, dtype=bin_f.np_arg_dtypes[1].base)) assert len(vec1) == len(vec2) res = np.empty_like(vec2) @@ -442,7 +442,7 @@ def test_helper_numerical(mode, op, var, expected, fp_type): bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, numpy_args=(0,)) - res = np.asfarray(var, dtype=bin_f.np_params[0]) + res = np.asfarray(var, dtype=bin_f.np_arg_dtypes[0]) if mode == 'CPU': bin_f(res) @@ -475,7 +475,7 @@ def test_helper_elementwise_op(mode, var, expected): bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, numpy_args=(0, 1)) - vec = np.asfarray(var, dtype=bin_f.np_params[0].base) + vec = np.asfarray(var, dtype=bin_f.np_arg_dtypes[0].base) res = bin_f.np_buffer_for_arg(1) if mode == 'CPU': @@ -521,8 +521,8 @@ def test_helper_recursive_iterate_arrays(mode, var1, var2, expected): bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, numpy_args=(0, 1, 2)) - vec1 = np.asfarray(var1, dtype=bin_f.np_params[0].base) - vec2 = np.asfarray(var2, dtype=bin_f.np_params[0].base) + vec1 = np.asfarray(var1, dtype=bin_f.np_arg_dtypes[0].base) + vec2 = np.asfarray(var2, dtype=bin_f.np_arg_dtypes[0].base) res = bin_f.np_buffer_for_arg(1) if mode == 'CPU': @@ -558,7 +558,7 @@ def test_helper_convert_fp_type(t1, t2, mode, val): bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, numpy_args=(0, 1)) # Get the argument numpy dtype - np_dt1, np_dt2 = (np.dtype(bin_f.np_params[i]) for i in (0, 1)) + np_dt1, np_dt2 = (np.dtype(bin_f.np_arg_dtypes[i]) for i in (0, 1)) # instantiate value, result and reference x = np.asfarray(val, dtype=np_dt1) From 45d8ccd2469358e489f5e9bd4e426250e0f2ebc1 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 6 Aug 2024 11:34:38 -0400 Subject: [PATCH 304/410] llvm: Use Numpy ndpointer by default ctype_ptr_arg can be used to force use ctype pointers for dynamically sized argument Signed-off-by: Jan Vesely --- .../nonstateful/optimizationfunctions.py | 2 +- psyneulink/core/llvm/__init__.py | 20 ++++++------ psyneulink/core/llvm/execution.py | 16 +++++----- tests/llvm/test_builtins_matrix.py | 2 +- tests/llvm/test_builtins_mt_random.py | 16 +++++----- tests/llvm/test_builtins_philox_random.py | 24 +++++++------- tests/llvm/test_builtins_vector.py | 6 ++-- tests/llvm/test_compile.py | 10 +++--- tests/llvm/test_custom_func.py | 10 +++--- tests/llvm/test_helpers.py | 31 +++++++++---------- 10 files changed, 66 insertions(+), 71 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index dfdce982a52..bc4d323c606 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -2103,7 +2103,7 @@ def _function(self, # select_min params are: # params, state, min_sample_ptr, sample_ptr, min_value_ptr, value_ptr, opt_count_ptr, count min_tags = frozenset({"select_min", "evaluate_type_objective"}) - bin_func = pnlvm.LLVMBinaryFunction.from_obj(self, tags=min_tags, numpy_args=(2, 4, 6)) + bin_func = pnlvm.LLVMBinaryFunction.from_obj(self, tags=min_tags, ctype_ptr_args=(0, 1, 3, 5)) ct_param = bin_func.byref_arg_types[0](*self._get_param_initializer(context)) ct_state = bin_func.byref_arg_types[1](*self._get_state_initializer(context)) diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 7976f1505ed..5a9788102f4 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -123,7 +123,7 @@ def _llvm_build(target_generation=_binary_generation + 1): class LLVMBinaryFunction: - def __init__(self, name: str, *, numpy_args=()): + def __init__(self, name: str, *, ctype_ptr_args=()): self.name = name self.__c_func = None @@ -143,16 +143,18 @@ def __init__(self, name: str, *, numpy_args=()): # Create ctype function instance start = time.perf_counter() return_type = _convert_llvm_ir_to_ctype(f.return_value.type) + + self.np_arg_dtypes = [_convert_llvm_ir_to_dtype(getattr(a.type, "pointee", a.type)) for a in f.args] + args = [_convert_llvm_ir_to_ctype(a.type) for a in f.args] # '_type_' special attribute stores pointee type for pointers # https://docs.python.org/3/library/ctypes.html#ctypes._Pointer._type_ self.byref_arg_types = [a._type_ if hasattr(a, "contents") else None for a in args] - self.np_arg_dtypes = [_convert_llvm_ir_to_dtype(getattr(a.type, "pointee", a.type)) for a in f.args] - for a in numpy_args: - assert self.byref_arg_types[a] is not None - args[a] = np.ctypeslib.ndpointer(dtype=self.np_arg_dtypes[a].base, shape=self.np_arg_dtypes[a].shape) + for i, arg in enumerate(self.np_arg_dtypes): + if i not in ctype_ptr_args and self.byref_arg_types[i] is not None: + args[i] = np.ctypeslib.ndpointer(dtype=arg.base, shape=arg.shape) middle = time.perf_counter() self.__c_func_type = ctypes.CFUNCTYPE(return_type, *args) @@ -231,14 +233,14 @@ def np_buffer_for_arg(self, arg_num, *, extra_dimensions=(), fill_value=np.nan): @staticmethod @functools.lru_cache(maxsize=32) - def from_obj(obj, *, tags:frozenset=frozenset(), numpy_args:tuple=()): + def from_obj(obj, *, tags:frozenset=frozenset(), ctype_ptr_args:tuple=()): name = LLVMBuilderContext.get_current().gen_llvm_function(obj, tags=tags).name - return LLVMBinaryFunction.get(name, numpy_args=numpy_args) + return LLVMBinaryFunction.get(name, ctype_ptr_args=ctype_ptr_args) @staticmethod @functools.lru_cache(maxsize=32) - def get(name: str, *, numpy_args:tuple=()): - return LLVMBinaryFunction(name, numpy_args=numpy_args) + def get(name: str, *, ctype_ptr_args:tuple=()): + return LLVMBinaryFunction(name, ctype_ptr_args=ctype_ptr_args) _cpu_engine = None diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 786f2feb6bc..c49c801f0b0 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -258,7 +258,7 @@ class FuncExecution(CUDAExecution): def __init__(self, component, execution_id=None, *, tags=frozenset()): super().__init__() - self._bin_func = pnlvm.LLVMBinaryFunction.from_obj(component, tags=tags, numpy_args=(0, 1, 2, 3)) + self._bin_func = pnlvm.LLVMBinaryFunction.from_obj(component, tags=tags) self._execution_context = Context(execution_id=execution_id) self._component = component @@ -355,9 +355,7 @@ def _bin_func(self): def _set_bin_node(self, node): assert node in self._composition._all_nodes node_assembly = builder_context.LLVMBuilderContext.get_current().get_node_assembly(self._composition, node) - self.__bin_func = pnlvm.LLVMBinaryFunction.from_obj(node_assembly, - tags=self.__tags.union({"node_assembly"}), - numpy_args=(0, 1, 2, 3, 4)) + self.__bin_func = pnlvm.LLVMBinaryFunction.from_obj(node_assembly, tags=self.__tags.union({"node_assembly"})) @property def _conditions(self): @@ -503,8 +501,7 @@ def execute_node(self, node, inputs=None): @property def _bin_exec_func(self): if self.__bin_exec_func is None: - self.__bin_exec_func = pnlvm.LLVMBinaryFunction.from_obj( - self._composition, tags=self.__tags, numpy_args=(0, 1, 2, 3, 4)) + self.__bin_exec_func = pnlvm.LLVMBinaryFunction.from_obj(self._composition, tags=self.__tags) return self.__bin_exec_func @@ -558,8 +555,9 @@ def _get_generator_run_input_struct(self, inputs, runs): @property def _bin_run_func(self): if self.__bin_run_func is None: - self.__bin_run_func = pnlvm.LLVMBinaryFunction.from_obj( - self._composition, tags=self.__tags.union({"run"}), numpy_args=(0, 1, 2, 5, 6)) + self.__bin_run_func = pnlvm.LLVMBinaryFunction.from_obj(self._composition, + tags=self.__tags.union({"run"}), + ctype_ptr_args=(3, 4)) return self.__bin_run_func @@ -620,7 +618,7 @@ def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results eval_type = "evaluate_type_all_results" if all_results else "evaluate_type_objective" tags = {"evaluate", "alloc_range", eval_type} - bin_func = pnlvm.LLVMBinaryFunction.from_obj(ocm, tags=frozenset(tags), numpy_args=(0, 1, 6, 7)) + bin_func = pnlvm.LLVMBinaryFunction.from_obj(ocm, tags=frozenset(tags), ctype_ptr_args=(4, 5)) self.__bin_func = bin_func # There are 8 arguments to evaluate_alloc_range: diff --git a/tests/llvm/test_builtins_matrix.py b/tests/llvm/test_builtins_matrix.py index f2c50bf576f..9280eb0db98 100644 --- a/tests/llvm/test_builtins_matrix.py +++ b/tests/llvm/test_builtins_matrix.py @@ -80,7 +80,7 @@ def ex(): else: func_name = builtin - bin_f = pnlvm.LLVMBinaryFunction.get(func_name) + bin_f = pnlvm.LLVMBinaryFunction.get(func_name, ctype_ptr_args=(0, 1, 2, 3, 4)) lx, ly, lres = _numpy_args(bin_f) ct_x = lx.ctypes.data_as(bin_f.c_func.argtypes[0]) diff --git a/tests/llvm/test_builtins_mt_random.py b/tests/llvm/test_builtins_mt_random.py index 28082e2d7e8..d8c0f51d1ce 100644 --- a/tests/llvm/test_builtins_mt_random.py +++ b/tests/llvm/test_builtins_mt_random.py @@ -27,12 +27,12 @@ def f(): return state.randint(0xffffffff, dtype=np.int64) elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init', numpy_args=(0,)) + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_int32', numpy_args=(0, 1)) + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_int32') def f(): out = gen_fun.np_buffer_for_arg(1) @@ -84,11 +84,11 @@ def f(): return state.random_sample() elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init', numpy_args=(0,)) + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_double', numpy_args=(0, 1)) + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_double') def f(): out = gen_fun.np_buffer_for_arg(1) @@ -133,11 +133,11 @@ def f(): return state.normal() elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init', numpy_args=(0,)) + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_normal', numpy_args=(0, 1)) + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_normal') def f(): out = gen_fun.np_buffer_for_arg(1) @@ -191,11 +191,11 @@ def f(): return state.binomial(n, p) elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init', numpy_args=(0,)) + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_init') state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_binomial', numpy_args=(0, 1, 2, 3)) + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_mt_rand_binomial') n = np.asarray(n, dtype=gen_fun.np_arg_dtypes[1]) p = np.asarray(p, dtype=gen_fun.np_arg_dtypes[2]) diff --git a/tests/llvm/test_builtins_philox_random.py b/tests/llvm/test_builtins_philox_random.py index 2466ea4f6d2..af9f4228d71 100644 --- a/tests/llvm/test_builtins_philox_random.py +++ b/tests/llvm/test_builtins_philox_random.py @@ -26,11 +26,11 @@ def f(): return prng.integers(0xffffffffffffffff, dtype=np.uint64, endpoint=True) elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init', numpy_args=(0,)) + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') state = init_fun.np_buffer_for_arg(0) init_fun(state, seed) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int64', numpy_args=(0, 1)) + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int64') def f(): out = gen_fun.np_buffer_for_arg(1) @@ -76,11 +76,11 @@ def f(): return prng.integers(0xffffffff, dtype=np.uint32, endpoint=True) elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init', numpy_args=(0,)) + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int32', numpy_args=(0, 1)) + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_int32') def f(): out = gen_fun.np_buffer_for_arg(1) @@ -124,11 +124,11 @@ def f(): return prng.random(dtype=np.float64) elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init', numpy_args=(0,)) + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_double', numpy_args=(0, 1)) + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_double') def f(): out = gen_fun.np_buffer_for_arg(1) @@ -171,11 +171,11 @@ def f(): return prng.random(dtype=np.float32) elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init', numpy_args=(0,)) + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_float', numpy_args=(0, 1)) + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_float') def f(): out = gen_fun.np_buffer_for_arg(1) @@ -224,11 +224,11 @@ def f(): return prng.standard_normal(dtype=dtype) elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init', numpy_args=(0,)) + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_normal', numpy_args=(0, 1)) + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_normal') def f(): out = gen_fun.np_buffer_for_arg(1) @@ -322,11 +322,11 @@ def f(): return prng.binomial(n, p) elif mode == 'LLVM': - init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init', numpy_args=(0,)) + init_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_init') state = init_fun.np_buffer_for_arg(0) init_fun(state, SEED) - gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_binomial', numpy_args=(0, 1, 2, 3)) + gen_fun = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_philox_rand_binomial') n = np.asarray(n, dtype=gen_fun.np_arg_dtypes[1]) p = np.asarray(p, dtype=gen_fun.np_arg_dtypes[2]) diff --git a/tests/llvm/test_builtins_vector.py b/tests/llvm/test_builtins_vector.py index 9a806bde911..70ced0e8864 100644 --- a/tests/llvm/test_builtins_vector.py +++ b/tests/llvm/test_builtins_vector.py @@ -40,7 +40,7 @@ def ex(): return op(u, v) elif func_mode == 'LLVM': - bin_f = pnlvm.LLVMBinaryFunction.get(builtin) + bin_f = pnlvm.LLVMBinaryFunction.get(builtin, ctype_ptr_args=(0, 1, 3)) lu, lv, lres = _numpy_args(bin_f) ct_u = lu.ctypes.data_as(bin_f.c_func.argtypes[0]) @@ -75,7 +75,7 @@ def ex(): return np.sum(u) elif func_mode == 'LLVM': - bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vec_sum", numpy_args=(2,)) + bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vec_sum", ctype_ptr_args=(0,)) np_u = u.astype(bin_f.np_arg_dtypes[0]) np_res = bin_f.np_buffer_for_arg(2) @@ -87,7 +87,7 @@ def ex(): return np_res elif func_mode == 'PTX': - bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vec_sum", numpy_args=(2,)) + bin_f = pnlvm.LLVMBinaryFunction.get("__pnl_builtin_vec_sum") np_u = u.astype(bin_f.np_arg_dtypes[0]) np_res = bin_f.np_buffer_for_arg(2) diff --git a/tests/llvm/test_compile.py b/tests/llvm/test_compile.py index 71c8526e2bc..4a1cff96317 100644 --- a/tests/llvm/test_compile.py +++ b/tests/llvm/test_compile.py @@ -10,7 +10,7 @@ @pytest.mark.llvm def test_recompile(): # The original builtin mxv function - bin_f = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_vxm') + bin_f = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_vxm', ctype_ptr_args=(0, 1, 4)) vector = np.random.rand(DIM_X).astype(bin_f.np_arg_dtypes[0].base) matrix = np.random.rand(DIM_X, DIM_Y).astype(bin_f.np_arg_dtypes[1].base) @@ -24,7 +24,7 @@ def test_recompile(): orig_res = np.empty_like(llvm_res) ct_res = orig_res.ctypes.data_as(bin_f.c_func.argtypes[4]) - bin_f.c_func(ct_vec, ct_mat, x, y, ct_res) + bin_f(ct_vec, ct_mat, x, y, ct_res) # Rebuild and try again # This is not a public API @@ -33,15 +33,15 @@ def test_recompile(): rebuild_res = np.empty_like(llvm_res) ct_res = rebuild_res.ctypes.data_as(bin_f.c_func.argtypes[4]) - bin_f.c_func(ct_vec, ct_mat, x, y, ct_res) + bin_f(ct_vec, ct_mat, x, y, ct_res) assert np.array_equal(orig_res, rebuild_res) # Get a new pointer - bin_f2 = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_vxm') + bin_f2 = pnlvm.LLVMBinaryFunction.get('__pnl_builtin_vxm', ctype_ptr_args=(0, 1, 4)) new_res = np.empty_like(llvm_res) ct_res = new_res.ctypes.data_as(bin_f2.c_func.argtypes[4]) - bin_f2.c_func(ct_vec, ct_mat, x, y, ct_res) + bin_f2(ct_vec, ct_mat, x, y, ct_res) assert np.array_equal(rebuild_res, new_res) callable_res = np.empty_like(llvm_res) diff --git a/tests/llvm/test_custom_func.py b/tests/llvm/test_custom_func.py index d15e65146ce..87936eb54e0 100644 --- a/tests/llvm/test_custom_func.py +++ b/tests/llvm/test_custom_func.py @@ -35,14 +35,12 @@ def test_integer_broadcast(mode, val): builder.ret_void() binf = pnlvm.LLVMBinaryFunction.get(custom_name) - res = np.zeros(8, dtype=val.dtype) + val = np.asarray(val) + res = binf.np_buffer_for_arg(1) if mode == 'CPU': - ct_res = np.ctypeslib.as_ctypes(res) - ct_in = np.ctypeslib.as_ctypes(val) - - binf(ctypes.byref(ct_in), ctypes.byref(ct_res)) + binf(val, res) else: - binf.cuda_wrap_call(np.asarray(val), res) + binf.cuda_wrap_call(val, res) assert all(res == np.broadcast_to(val + 1, 8)) diff --git a/tests/llvm/test_helpers.py b/tests/llvm/test_helpers.py index 9f1c9bad29a..00696744eb1 100644 --- a/tests/llvm/test_helpers.py +++ b/tests/llvm/test_helpers.py @@ -45,7 +45,7 @@ def test_helper_fclamp(mode): ref = np.clip(VECTOR, TST_MIN, TST_MAX) bounds = np.asfarray([TST_MIN, TST_MAX]) - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, ctype_ptr_args=(0, 2)) local_vec = VECTOR.copy() if mode == 'CPU': @@ -86,7 +86,7 @@ def test_helper_fclamp_const(mode): local_vec = VECTOR.copy() ref = np.clip(VECTOR, TST_MIN, TST_MAX) - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, ctype_ptr_args=(0,)) if mode == 'CPU': ct_vec = local_vec.ctypes.data_as(bin_f.c_func.argtypes[0]) @@ -118,8 +118,7 @@ def test_helper_is_close(mode, var1, var2, rtol, atol, fp_type): with pnlvm.LLVMBuilderContext.get_current() as ctx: float_ptr_ty = ctx.float_ty.as_pointer() - func_ty = ir.FunctionType(ir.VoidType(), [float_ptr_ty, float_ptr_ty, - float_ptr_ty, ctx.int32_ty]) + func_ty = ir.FunctionType(ir.VoidType(), [float_ptr_ty, float_ptr_ty, float_ptr_ty, ctx.int32_ty]) custom_name = ctx.get_unique_name("is_close") function = ir.Function(ctx.module, func_ty, name=custom_name) @@ -135,14 +134,12 @@ def test_helper_is_close(mode, var1, var2, rtol, atol, fp_type): val2 = b1.load(val2_ptr) close = pnlvm.helpers.is_close(ctx, b1, val1, val2, **tolerance) out_ptr = b1.gep(out, [index]) - out_val = b1.select(close, val1.type(1), val1.type(0)) - res = b1.select(close, out_ptr.type.pointee(1), - out_ptr.type.pointee(0)) + out_val = b1.select(close, out_ptr.type.pointee(1), out_ptr.type.pointee(0)) b1.store(out_val, out_ptr) builder.ret_void() - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, ctype_ptr_args=(0, 1, 2)) vec1 = np.atleast_1d(np.asfarray(var1, dtype=bin_f.np_arg_dtypes[0].base)) vec2 = np.atleast_1d(np.asfarray(var2, dtype=bin_f.np_arg_dtypes[1].base)) @@ -200,7 +197,7 @@ def test_helper_all_close(mode, var1, var2, atol, rtol): ref = np.allclose(vec1, vec2, **tolerance) res = np.array(5, dtype=np.uint32) - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, numpy_args=(0, 1, 2)) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) if mode == 'CPU': bin_f(vec1, vec2, res) @@ -440,7 +437,7 @@ def test_helper_numerical(mode, op, var, expected, fp_type): builder.ret_void() - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, numpy_args=(0,)) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) res = np.asfarray(var, dtype=bin_f.np_arg_dtypes[0]) @@ -473,7 +470,7 @@ def test_helper_elementwise_op(mode, var, expected): lambda ctx, builder, x: builder.fadd(x.type(1.0), x), out) builder.ret_void() - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, numpy_args=(0, 1)) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) vec = np.asfarray(var, dtype=bin_f.np_arg_dtypes[0].base) res = bin_f.np_buffer_for_arg(1) @@ -519,11 +516,11 @@ def test_helper_recursive_iterate_arrays(mode, var1, var2, expected): builder.ret_void() - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, numpy_args=(0, 1, 2)) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) vec1 = np.asfarray(var1, dtype=bin_f.np_arg_dtypes[0].base) - vec2 = np.asfarray(var2, dtype=bin_f.np_arg_dtypes[0].base) - res = bin_f.np_buffer_for_arg(1) + vec2 = np.asfarray(var2, dtype=bin_f.np_arg_dtypes[1].base) + res = bin_f.np_buffer_for_arg(2) if mode == 'CPU': bin_f(vec1, vec2, res) @@ -555,14 +552,14 @@ def test_helper_convert_fp_type(t1, t2, mode, val): builder.store(conv_x, y) builder.ret_void() - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name, numpy_args=(0, 1)) + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) # Get the argument numpy dtype np_dt1, np_dt2 = (np.dtype(bin_f.np_arg_dtypes[i]) for i in (0, 1)) # instantiate value, result and reference - x = np.asfarray(val, dtype=np_dt1) - y = np.asfarray(0, dtype=np_dt2) + x = np.asfarray(val, dtype=bin_f.np_arg_dtypes[0]) + y = bin_f.np_buffer_for_arg(1) ref = x.astype(np_dt2) if mode == 'CPU': From 7172f0567b68d7786aa678a3aa92e2703546ba0c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 6 Aug 2024 20:24:58 -0400 Subject: [PATCH 305/410] llvm/helpers: Assert on invalid floating point conversion Signed-off-by: Jan Vesely --- psyneulink/core/llvm/helpers.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index 2eae0e69974..175703458ac 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -319,8 +319,7 @@ def convert_type(builder, val, t): val = builder.fptrunc(val, ir.FloatType()) return builder.fptrunc(val, t) else: - assert val.type == t - return val + assert False, "Unknown float conversion: {} -> {}".format(val.type, t) assert False, "Unknown type conversion: {} -> {}".format(val.type, t) From c8fcf8b79254f19c6346dcb2c01f1d17eaab22b1 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 6 Aug 2024 23:53:08 -0400 Subject: [PATCH 306/410] llvm: Use zero extend to promote integers Add integer conversion tests. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/helpers.py | 2 +- tests/llvm/test_helpers.py | 47 ++++++++++++++++++++++++++++++--- 2 files changed, 44 insertions(+), 5 deletions(-) diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index 175703458ac..9ed6cc1701d 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -292,7 +292,7 @@ def convert_type(builder, val, t): return builder.trunc(val, t) elif val.type.width < t.width: # Python integers are signed - return builder.sext(val, t) + return builder.zext(val, t) else: assert False, "Unknown integer conversion: {} -> {}".format(val.type, t) diff --git a/tests/llvm/test_helpers.py b/tests/llvm/test_helpers.py index 00696744eb1..cd2227ded05 100644 --- a/tests/llvm/test_helpers.py +++ b/tests/llvm/test_helpers.py @@ -193,11 +193,10 @@ def test_helper_all_close(mode, var1, var2, atol, rtol): builder.store(res, out) builder.ret_void() + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) + res = bin_f.np_buffer_for_arg(2) ref = np.allclose(vec1, vec2, **tolerance) - res = np.array(5, dtype=np.uint32) - - bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) if mode == 'CPU': bin_f(vec1, vec2, res) @@ -558,7 +557,7 @@ def test_helper_convert_fp_type(t1, t2, mode, val): np_dt1, np_dt2 = (np.dtype(bin_f.np_arg_dtypes[i]) for i in (0, 1)) # instantiate value, result and reference - x = np.asfarray(val, dtype=bin_f.np_arg_dtypes[0]) + x = np.asfarray(val, dtype=np_dt1) y = bin_f.np_buffer_for_arg(1) ref = x.astype(np_dt2) @@ -568,3 +567,43 @@ def test_helper_convert_fp_type(t1, t2, mode, val): bin_f.cuda_wrap_call(x, y) np.testing.assert_allclose(y, ref, equal_nan=True) + + +_int_types = [ir.IntType(64), ir.IntType(32), ir.IntType(16), ir.IntType(8)] + + +@pytest.mark.llvm +@pytest.mark.parametrize('mode', ['CPU', pytest.helpers.cuda_param('PTX')]) +@pytest.mark.parametrize('t1', _int_types, ids=str) +@pytest.mark.parametrize('t2', _int_types, ids=str) +@pytest.mark.parametrize('val', [0, 1, -1, 127, -128, 255, -32768, 32767, 65535, np.iinfo(np.int32).min, np.iinfo(np.int32).max]) +def test_helper_convert_int_type(t1, t2, mode, val): + with pnlvm.LLVMBuilderContext.get_current() as ctx: + func_ty = ir.FunctionType(ir.VoidType(), [t1.as_pointer(), t2.as_pointer()]) + custom_name = ctx.get_unique_name("int_convert") + function = ir.Function(ctx.module, func_ty, name=custom_name) + x, y = function.args + block = function.append_basic_block(name="entry") + builder = ir.IRBuilder(block) + + x_val = builder.load(x) + conv_x = pnlvm.helpers.convert_type(builder, x_val, y.type.pointee) + builder.store(conv_x, y) + builder.ret_void() + + bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) + + # Get the argument numpy dtype + np_dt1, np_dt2 = (np.dtype(bin_f.np_arg_dtypes[i]) for i in (0, 1)) + + # instantiate value, result and reference + x = np.asarray(val).astype(np_dt1) + y = bin_f.np_buffer_for_arg(1) + ref = x.astype(np_dt2) + + if mode == 'CPU': + bin_f(x, y) + else: + bin_f.cuda_wrap_call(x, y) + + np.testing.assert_array_equal(y, ref) From eae133f2f1abe4ef2c1acc1baaaf62e2976e410e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 7 Aug 2024 11:13:04 -0400 Subject: [PATCH 307/410] llvm/helpers: Always assume that the Treshold target is an array Signed-off-by: Jan Vesely --- psyneulink/core/llvm/helpers.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index 9ed6cc1701d..d0958200ec0 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -757,14 +757,16 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, node_state = builder.gep(nodes_states, [self.ctx.int32_ty(0), self.ctx.int32_ty(node_idx)]) param_ptr = get_state_ptr(builder, target, node_state, param) - if isinstance(param_ptr.type.pointee, ir.ArrayType): - if indices is None: - indices = [0, 0] - elif isinstance(indices, TimeScale): - indices = [indices.value] - - indices = [self.ctx.int32_ty(x) for x in [0] + list(indices)] - param_ptr = builder.gep(param_ptr, indices) + # parameters in state include history of at least one element + # so they are always arrays. + assert isinstance(param_ptr.type.pointee, ir.ArrayType) + + if indices is None: + indices = [0, 0] + elif isinstance(indices, TimeScale): + indices = [indices.value] + + param_ptr = builder.gep(param_ptr, [self.ctx.int32_ty(x) for x in [0] + list(indices)]) val = builder.load(param_ptr) val = convert_type(builder, val, ir.DoubleType()) From db02a8ec5ccebcb0b62819f4fb9144aee54f6550 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 7 Aug 2024 11:16:06 -0400 Subject: [PATCH 308/410] llvm/helpers: Assert if printf is not found Compiled printf is only available as debugging measure, so warnings are not useful. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/helpers.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index d0958200ec0..d4fcc8cd2f5 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -408,16 +408,12 @@ def printf(builder, fmt, *args, override_debug=False): #FIXME: Fix builtin printf and use that instead of this libc_name = "msvcrt" if sys.platform == "win32" else "c" libc = util.find_library(libc_name) - if libc is None: - warnings.warn("Standard libc library not found, 'printf' not available!") - return + assert libc is not None, "Standard libc library not found" llvm.load_library_permanently(libc) # Address will be none if the symbol is not found printf_address = llvm.address_of_symbol("printf") - if printf_address is None: - warnings.warn("'printf' symbol not found in libc, 'printf' not available!") - return + assert printf_address is not None, "'printf' symbol not found in {}".format(libc) # Direct pointer constants don't work printf_ty = ir.FunctionType(ir.IntType(32), [ir.IntType(8).as_pointer()], var_arg=True) From 7047302cc481391e636c275e32c177b01dc1351a Mon Sep 17 00:00:00 2001 From: jdcpni Date: Mon, 12 Aug 2024 13:17:50 -0400 Subject: [PATCH 309/410] Refactor/autodiff/track pnl (#3030) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • composition.py, autodiffcomposition.py and relevant subordinate methods: - implement synch and track parameter dictionaries that are passed to relevant methods - add/rename attributes: - PytorchCompositionWrapper: - retained_outputs - retained_targets - retained_losses - _nodes_to_execute_after_gradient_calc - PytorchMechanismWrapper: - value -> output - input - add methods: - synch_with_psyneulink(): centralize copying of params and values to pnl using methods below - copy_node_variables_to_psyneulink(): centralize updating of node (mech & comp) variables in PNL - copy_node_values_to_psyneulink(): centralize updating of node (mech & comp) values in PNL - copy_results_to_psyneulink(): centralize updating of autodiffcomposition.results - retain_in_psyneulink(): centralize tracking of pytorch results in PNL using methods below - retain_torch_outputs: keeps record of targets and copies to AutodiffComposition.pytorch_targets at end of call to learn() - retain_torch_targets: keeps record of targets and copies to AutodiffComposition.pytorch_targets at end of call to learn() - retain_torch_losses: keeps record of losses and copies to AutodiffComposition.pytorch_losses at end of call to learn() • compositionrunner.py, autodiffcomposition.py, pytorchwrappers.py: - move loss tracking from parameter on autodiff to attribute on its pytorch_rep - batch_inputs(): add calls to synch_with_psyneulink() and retain_in_psyneulink() - batch_function_inputs(): - needs calls to synch_with_psyneulink() and retain_in_psyneulink() • composition.py: - run(): add _update_results() as helper method than can be overidden (e.g., by autodiffcomposition) for less frequent updating * • autodiffcomposition.py - restrict calls to copy_weights_to_psyneulink based on copy_parameters_to_psyneulink_after arg/attribute - implement handling of optimizations_per_minibatch and copy_parameters_to_psyneulink as attributes and args to learn - autodiff_training(): fix bug in call to pytorch_rep.forward() - implement synch and track Parameters - _manage_synch_and_retain_args() - run(): support specification of synch and retain args when called directly - autodiff._update_learning_parameters -> do_optimzation(): - calculates loss for current trial - calls autodiff_backward() to calculate gradients and update parameters - updates tracked_loss over triasl - autodiff_backward() -> new method that is called from do_optimization that calculates and updates the gradients - self.loss -> self.loss_function - _update_results() - overriden to call pytoch_rep.retain_for_psyneulink(RUN:trial_output) - learn(): - move tracked_loss for each minibatch from parameter on autodiff to attribute on its pytorch_rep (since that is already context dependent, and avoids calls to pnl.parameters._set on every call to forward() - synch_with_pnl_options: implement as dict to consolidate synch_projection_matrices_with_torch, synch_node_values_with_torch and synch_node_values_with_torch options passed to learning methods - retain_in_pnl_options implement as dict to consolidate retain_torch_outputs_in_results, retain_torch_targets and retain_torch_losses passed to learning methods • pytorchwrappers.py - sublcass PytorchCompositionWrapper from torch.jit.ScriptModule - retain_for_psyneulink(): implemented - stores outputs, targets, and losses from Pytorch execution for copying to PsyNeuLink at end of learn(). - PytorchMechanismWrapper: - .value -> .output - add .input - add/rename attributes: - PytorchCompositionWrapper: - retained_outputs - retained_targets - retained_losses - _nodes_to_execute_after_gradient_calc - PytorchMechanismWrapper: - value -> output - input - add methods: - synch_with_psyneulink(): centralize copying of params and values to pnl using methods below - copy_node_variables_to_psyneulink(): centralize updating of node (mech & comp) variables in PNL - copy_node_values_to_psyneulink(): centralize updating of node (mech & comp) values in PNL - copy_results_to_psyneulink(): centralize updating of autodiffcomposition.results - retain_in_psyneulink(): centralize tracking of pytorch results in PNL using methods below - retain_torch_outputs: keeps record of targets and copies to AutodiffComposition.pytorch_targets at end of call to learn() - retain_torch_targets: keeps record of targets and copies to AutodiffComposition.pytorch_targets at end of call to learn() - retain_torch_losses: keeps record of losses and copies to AutodiffComposition.pytorch_losses at end of call to learn() • pytorchEMcompositionwrapper.py - store_memory(): - implement single call to linalg over memory - only execute storage_node after last optimization_rep • keywords.py - implement LearningScale keywords class • AutoAssociativeProjection: make dependent on MaskedMappingProjection in prep for allowing lcamechanism to modulate auto/hetero parameters * fix Literals import • Factorize scripts into: - ScriptControl.py - TestParams.py - [MODEL].py --------- Co-authored-by: jdcpni --- ...m 2) - CSW using EMComposition (BACKUP).py | 433 ----------- ...Model (sim 2) - CSW using EMComposition.py | 433 ----------- ...m 2) - CSW with Integrator and Learning.py | 406 ---------- .../EGO/EGO Model - MDP OLD.py | 500 ------------ .../EGO/Tutorial/Declan's EGO Tutorial.ipynb | 399 ++++++++++ .../EGO/Using EMComposition/DeclanParams.py | 93 +++ .../EGO Model - CSW with RNN.py} | 6 +- ...EGO Model - CSW with Simple Integrator.py} | 258 +++---- .../EGO Model - Revaluation.py} | 4 +- .../{ => Using EMComposition}/Environment.py | 0 .../EGO/Using EMComposition/ScriptControl.py | 29 + .../EGO/Using EMComposition/TestParams.py | 56 ++ .../EGO/Using EMComposition/__init__.py | 0 .../EGO Model - MDP.py | 0 .../Using EpisodicMemoryMechanism/__init__.py | 0 .../EGO/__init__.py | 0 .../Models (Under Development)/nback/nback.py | 2 +- .../nback/nback_og_pnl.py | 2 +- psyneulink/core/components/component.py | 10 +- psyneulink/core/compositions/composition.py | 138 +++- psyneulink/core/compositions/showgraph.py | 7 - psyneulink/core/globals/keywords.py | 134 +++- psyneulink/core/globals/parameters.py | 2 +- .../modulatory/learning/EMstoragemechanism.py | 2 +- .../pathway/autoassociativeprojection.py | 3 +- .../compositions/autodiffcomposition.py | 724 ++++++++++++++---- .../library/compositions/compositionrunner.py | 138 +++- .../library/compositions/emcomposition.py | 6 +- .../pytorchEMcompositionwrapper.py | 29 +- .../library/compositions/pytorchshowgraph.py | 4 +- .../library/compositions/pytorchwrappers.py | 470 ++++++++++-- tests/composition/test_autodiffcomposition.py | 24 +- tests/composition/test_emcomposition.py | 12 +- tests/composition/test_report.py | 7 +- 34 files changed, 2033 insertions(+), 2298 deletions(-) delete mode 100644 Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition (BACKUP).py delete mode 100644 Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition.py delete mode 100644 Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Integrator and Learning.py delete mode 100644 Scripts/Models (Under Development)/EGO/EGO Model - MDP OLD.py create mode 100644 Scripts/Models (Under Development)/EGO/Tutorial/Declan's EGO Tutorial.ipynb create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py rename Scripts/Models (Under Development)/EGO/{EGO Model (sim 2) - CSW using EMComposition with WM.py => Using EMComposition/EGO Model - CSW with RNN.py} (98%) rename Scripts/Models (Under Development)/EGO/{EGO Model (sim 2) - CSW with Learning.py => Using EMComposition/EGO Model - CSW with Simple Integrator.py} (65%) rename Scripts/Models (Under Development)/EGO/{EGO Model (sim 1) - MDP using EMComposition.py => Using EMComposition/EGO Model - Revaluation.py} (99%) rename Scripts/Models (Under Development)/EGO/{ => Using EMComposition}/Environment.py (100%) create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/__init__.py rename Scripts/Models (Under Development)/EGO/{ => Using EpisodicMemoryMechanism}/EGO Model - MDP.py (100%) create mode 100644 Scripts/Models (Under Development)/EGO/Using EpisodicMemoryMechanism/__init__.py create mode 100644 Scripts/Models (Under Development)/EGO/__init__.py diff --git a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition (BACKUP).py b/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition (BACKUP).py deleted file mode 100644 index 55f44870058..00000000000 --- a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition (BACKUP).py +++ /dev/null @@ -1,433 +0,0 @@ -# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. You may obtain a copy of the License at: -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and limitations under the License. - -# TODO: - -# ADD PREVIOUS STATES -# ADD previous_state to EM and control to support that - -# FIX: TERMINATION CONDITION IS GETTING TRIGGED AFTER 1st TRIAL - -# FOR INPUT NODES: scheduler.add_condition(A, BeforeNCalls(A,1) -# Termination: AfterNCalls(Ctl,2) - -""" -QUESTIONS: - -NOTES: - *MUST* run Experience before Predict, as the latter requires retrieved_reward to be non-zero - (from last trial of Experience) in order to know to encode the next state (see control policy) - -**Overview** ------------- - -This implements a model of... - -The model is an example of... - -The script contains methods to construct, train, and run the model, and analyze the results of its execution: - -* `construct_model `: - takes as arguments parameters used to construct the model; for convenience, defaults are defined below, - (under "Construction parameters") - -* `train_network `: - ... - -* `run_model `: - ... - -* `analyze_results `: - takes as arguments the results of executing the model, and optionally a number of trials and EGO_level to analyze; - returns... - - -**The Model** -------------- - -The model is comprised of... - -.. _EGO_Fig: - -.. figure:: _static/` `Composition`. - - -**Construction and Execution** ------------------------------- - -.. _EGO_settings: - -*Settings* -~~~~~~~~~~ - -The default parameters are ones that have been fit to empirical data concerning human performance -(taken from `Kane et al., 2007 `_). - -See "Settings for running the script" to specify whether the model is trained and/or executed when the script is run, -and whether a graphic display of the network is generated when it is constructed. - -.. _EGO_stimuli: - -*Stimuli* -~~~~~~~~~ - -Sequences of stimuli are constructed either using `SweetPea `_ -(using the script in stim/SweetPea) or replicate those used in... - - .. note:: - Use of SweetPea for stimulus generation requires it be installed:: - >> pip install sweetpea - - -.. _EGO_training: - -*Training* -~~~~~~~~~~ - -MORE HERE - -.. _EGO_execution: - -*Execution* -~~~~~~~~~~~ - -MORE HERE - -.. _EGO_methods_reference: - -**Methods Reference** ---------------------- - - -""" - -import numpy as np -from enum import IntEnum - -from psyneulink import * -from psyneulink._typing import Union, Literal -from psyneulink.core.scheduling.condition import Any, And, AllHaveRun, AtRunStart - -# Settings for running script: - -NUM_EXP_SEQS = 5 # Number of sequences to run in EXPERIENCE Phase (includes baseline + revaluation) -NUM_PRED_TRIALS = 10 # Number of trials (ROLL OUTS) to run in PREDICTION Phase - -CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script -DISPLAY_MODEL = ( # Only one of the following can be uncommented: - # None # suppress display of model - {} # show simple visual display of model - # {'show_node_structure': True} # show detailed view of node structures and projections -) -# RUN_MODEL = True # True => run the model -RUN_MODEL = False # False => don't run the model -EXECUTION_MODE = ExecutionMode.Python -# EXECUTION_MODE = ExecutionMode.PyTorch -ANALYZE_RESULTS = False # True => output analysis of results of run -# REPORT_OUTPUT = ReportOutput.FULL # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] -REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] -REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run -PRINT_RESULTS = False # print model.results after execution -ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution - - -#region PARAMETERS -# ====================================================================================================================== -# PARAMETERS -# ====================================================================================================================== - -# PyTorch Version Parameters: -model_params = dict( - n_participants=58, - n_simulations = 100, # number of rollouts per participant - num_seqs = 20, # total number of sequences to be executed (used to set size of EM) - n_steps = 3, # number of steps per rollout - state_d = 7, # length of state vector - context_d = 7, # length of context vector - time_d = 25, # length of time vector - self_excitation = .25, # rate at which old context is carried over to new context - input_weight = .5, # rate at which state is integrated into new context - retrieved_context_weight = .25, # rate at which context retrieved from EM is integrated into new context - time_noise=.01,# noise std for time integrator (drift is set to 0) - state_weight = .5, # weight of the state used during memory retrieval - context_weight = .3, # weight of the context used during memory retrieval - time_weight = .2, # weight of the time used during memory retrieval - temperature = .05 # temperature of the softmax used during memory retrieval (smaller means more argmax-like -) - -# Fixed (structural) parameters: - -# Names: -MODEL_NAME = "EGO Model CSW" -STATE_INPUT_LAYER_NAME = "STATE" -CONTEXT_LAYER_NAME = 'CONTEXT' -PREVIOUS_STATE_NAME = 'PREVIOUS_STATE' -EM_NAME = "EM" -PREDICTION_LAYER_NAME = "PREDICTION" - -EMFieldsIndex = IntEnum('EMFields', - ['STATE', - 'CONTEXT', - 'PREVIOUS_STATE'], - start=0) - - -# CONSTRUCTION PARAMETERS - -# Layer sizes: -STATE_SIZE = model_params['state_d'] # length of state vector -CONTEXT_SIZE = model_params['context_d'] # length of state vector - -# Context processing: -STATE_WEIGHT = model_params['input_weight'] # rate at which external vs. memory state are integrated in context_layer -CONTEXT_INTEGRATION_RATE = model_params['retrieved_context_weight'] # rate at which retrieved context (from EM) - # is integrated into context_layer -assert (model_params['retrieved_context_weight'] + STATE_WEIGHT + CONTEXT_INTEGRATION_RATE) == 1,\ - (f"Sum of STATE_WEIGHT ({STATE_WEIGHT}), CONTEXT_INTEGRATION_RATE ({CONTEXT_INTEGRATION_RATE}), " - f"and RETRIEVED_CONTEXT_WEIGHT ({model_params['retrieved_context_weight']}) must equal 1") - -# EM retrieval -STATE_RETRIEVAL_WEIGHT = model_params['state_weight'] # weight of state field in retrieval from EM -CONTEXT_RETRIEVAL_WEIGHT = model_params['context_weight'] # weight of context field in retrieval from EM -RETRIEVAL_SOFTMAX_GAIN = 1/model_params['temperature'] # gain on softmax retrieval function - -PREVIOUS_STATE_WEIGHT = 0 - -RANDOM_WEIGHTS_INITIALIZATION=RandomMatrix(center=0.0, range=0.1) # Matrix spec used to initialize all Projections - -#endregion - -#region ENVIRONMENT -# ====================================================================================================================== -# ENVIRONMENT -# ====================================================================================================================== - -# Task environment: -NUM_STIM_PER_SEQ = model_params['n_steps'] # number of stimuli in a sequence -NUM_SEQS = model_params['num_seqs'] # total number of sequences to be executed (to set size of EM) - -STIM_SEQS = [list(range(1,NUM_STIM_PER_SEQ*2,2)), - list(range(2,NUM_STIM_PER_SEQ*2+1,2))] -CURRICULUM_TYE = 'blocked' # 'blocked' or 'interleaved' - -#endregion - -#region MODEL -# ====================================================================================================================== -# MODEL -# ====================================================================================================================== - -def construct_model(model_name:str=MODEL_NAME, - - # Inputs: - state_input_name:str=STATE_INPUT_LAYER_NAME, - state_size:int=STATE_SIZE, - - # Context processing: - context_name:str=CONTEXT_LAYER_NAME, - state_weight:Union[float,int]=STATE_WEIGHT, - context_integration_rate:Union[float,int]=CONTEXT_INTEGRATION_RATE, - - # EM: - em_name:str=EM_NAME, - retrieval_softmax_gain=RETRIEVAL_SOFTMAX_GAIN, - state_retrieval_weight:Union[float,int]=STATE_RETRIEVAL_WEIGHT, - context_retrieval_weight:Union[float,int]=CONTEXT_RETRIEVAL_WEIGHT, - previous_state_name=PREVIOUS_STATE_NAME, - previous_state_weight:Union[float,int]=PREVIOUS_STATE_WEIGHT, - - # Output / decision processing: - PREDICTION_LAYER_NAME:str=PREDICTION_LAYER_NAME, - - )->Composition: - - # Apportionment of contributions of state (actual or em) vs. context (em) to context_layer integration: - - # FIX: THIS IS FOR MDP; NEEDS TO BE REVISED FOR CSW - # state input (EXPERIENCE) -\ - # --> state_weight -------\ - # state from em (PREDICT)---/ -> * (context_integration_rate) -----\ - # /-----> context_weight ---/ --> context - # context from em --------/ (=1- state_weight) / - # /---> 1 - context_integration_rate --/ - # context from prev. cycle -------------------------/ - - assert 0 <= context_integration_rate <= 1,\ - f"context_retrieval_weight must be a number from 0 to 1" - assert 0 <= state_weight <= 1,\ - f"context_retrieval_weight must be a number from 0 to 1" - context_weight = 1 - state_weight - state_weight *= context_integration_rate - context_weight *= context_integration_rate - - # ---------------------------------------------------------------------------------------------------------------- - # ------------------------------------------------- Nodes ------------------------------------------------------ - # ---------------------------------------------------------------------------------------------------------------- - - state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) - context_layer = RecurrentTransferMechanism(name=context_name, - size=state_size, - auto=1-context_integration_rate, - hetero=0.0) - em = EMComposition(name=em_name, - memory_template=[[0] * state_size, # state - [0] * state_size, # previous state - [0] * state_size], # context - memory_fill=(0,.01), - memory_capacity=NUM_SEQS, - softmax_gain=1.0, - # Input Nodes: - field_names=[state_input_name, - previous_state_name, - context_name, - ], - field_weights=(state_retrieval_weight, - previous_state_weight, - context_retrieval_weight - ) - ) - - prediction_layer = ProcessingMechanism(name=PREDICTION_LAYER_NAME) - - - # ---------------------------------------------------------------------------------------------------------------- - # ------------------------------------------------- EGO Composition -------------------------------------------- - # ---------------------------------------------------------------------------------------------------------------- - - - EGO_comp = Composition(name=model_name, - # # Terminate a Task.PREDICT trial after prediction_layer executes if a reward is retrieved - # termination_processing={ - # # TimeScale.TRIAL: And(Condition(lambda: task_input_layer.value == Task.PREDICT), - # # Condition(lambda: retrieved_reward_layer.value), - # # JustRan(prediction_layer))} - # # CRASHES: - # # TimeScale.TRIAL: Any(And(Condition(lambda: task_input_layer.value == Task.EXPERIENCE), - # # JustRan(em)), - # # And(Condition(lambda: task_input_layer.value == Task.PREDICT), - # # Condition(lambda: retrieved_reward_layer.value), - # # JustRan(prediction_layer)))} - # TimeScale.TRIAL: Any(And(Condition(lambda: task_input_layer.value == Task.EXPERIENCE), - # AllHaveRun()), - # And(Condition(lambda: task_input_layer.value == Task.PREDICT), - # Condition(lambda: retrieved_reward_layer.value), - # AllHaveRun()))} - ) - - # Nodes not included in (decision output) Pathway specified above - EGO_comp.add_nodes([state_input_layer, context_layer, em, prediction_layer]) - - # Projections: - QUERY = ' [QUERY]' - VALUE = ' [VALUE]' - RETRIEVED = ' [RETRIEVED]' - - # EM encoding -------------------------------------------------------------------------------- - # state -> em - EGO_comp.add_projection(MappingProjection(state_input_layer, - em.nodes[state_input_name + QUERY])) - # context -> em - EGO_comp.add_projection(MappingProjection(context_layer, - em.nodes[context_name + QUERY])) - - # Inputs to Context --------------------------------------------------------------------------- - # retrieved context -> context_layer - EGO_comp.add_projection(MappingProjection(state_input_layer, - context_layer, - matrix=np.eye(STATE_SIZE) * state_weight)) - - # Response pathway --------------------------------------------------------------------------- - # retrieved state -> prediction_layer - EGO_comp.add_projection(MappingProjection(em.nodes[state_input_name + RETRIEVED], - prediction_layer)) - - - # Validate construction - assert context_layer.input_port.path_afferents[0].sender.owner == context_layer # recurrent projection - assert context_layer.input_port.path_afferents[0].parameters.matrix.get()[0][0] == 1-context_integration_rate - # assert context_layer.input_port.path_afferents[1].sender.owner == em.nodes[CONTEXT_LAYER_NAME + RETRIEVED] # - assert context_layer.input_port.path_afferents[1].sender.owner == state_input_layer # - # memory of - # context - assert context_layer.input_port.path_afferents[1].parameters.matrix.get()[0][0] == state_weight - - return EGO_comp -#endregion - - -#region SCRIPT EXECUTION -# ====================================================================================================================== -# SCRIPT EXECUTION -# ====================================================================================================================== - -if __name__ == '__main__': - model = None - - if CONSTRUCT_MODEL: - print(f'Constructing {MODEL_NAME}') - model = construct_model() - assert 'DEBUGGING BREAK POINT' - - if DISPLAY_MODEL is not None: - if model: - model.show_graph(**DISPLAY_MODEL) - else: - print("Model not yet constructed") - - if RUN_MODEL: - experience_inputs = build_experience_inputs(state_size=STATE_SIZE, - time_drift_rate=TIME_DRIFT_RATE, - num_baseline_seqs=NUM_BASELINE_SEQS, - num_revaluation_seqs=NUM_REVALUATION_SEQS, - reward_vals=REWARD_VALS, - CURRICULUM_TYE=CURRICULUM_TYE, - ratio=RATIO, - stim_seqs=STIM_SEQS) - input_layers = [TIME_INPUT_LAYER_NAME, - TASK_INPUT_LAYER_NAME, - STATE_INPUT_LAYER_NAME, - REWARD_INPUT_LAYER_NAME] - - # Experience Phase - print(f"Presenting {model.name} with {TOTAL_NUM_EXPERIENCE_STIMS} EXPERIENCE stimuli") - model.run(inputs={k: v for k, v in zip(input_layers, experience_inputs)}, - execution_mode=EXECUTION_MODE, - report_output=REPORT_OUTPUT, - report_progress=REPORT_PROGRESS) - - # Prediction Phase - prediction_inputs = build_prediction_inputs(state_size=STATE_SIZE, - time_drift_rate=TIME_DRIFT_RATE, - num_roll_outs_per_stim=int(NUM_ROLL_OUTS / 2), - stim_seqs=STIM_SEQS, - reward_vals=REWARD_VALS, - seq_type=PREDICT_SEQ_TYPE) - print(f"Running {model.name} for {NUM_ROLL_OUTS} PREDICT (ROLL OUT) trials") - model.termination_processing = { - TimeScale.TRIAL: And(Condition(lambda: model.nodes[TASK_INPUT_LAYER_NAME].value == Task.PREDICT), - Condition(lambda: model.nodes[RETRIEVED_REWARD_NAME].value), - # JustRan(model.nodes[PREDICTION_LAYER_NAME]) - AllHaveRun() - ) - } - model.run(inputs={k: v for k, v in zip(input_layers, prediction_inputs)}, - report_output=REPORT_OUTPUT, - report_progress=REPORT_PROGRESS - ) - - if PRINT_RESULTS: - print(f"Predicted reward for last stimulus: {model.results}") - #endregion \ No newline at end of file diff --git a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition.py b/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition.py deleted file mode 100644 index b26e4d07f00..00000000000 --- a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition.py +++ /dev/null @@ -1,433 +0,0 @@ -# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. You may obtain a copy of the License at: -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and limitations under the License. - -# TODO: - -# ADD PREVIOUS STATES -# ADD next_state to EM and control to support that -# - CONTROL FLOW: -# - UPDATE CONTEXT LAYER: INTEGRATE CURRENT STATE IN CONTEXT LAYER -# - USE UPDATED CONTEXT + CURRENT STATE TO RETRIEVE PREDICTED NEXT STATE -# - GET NEXT STATE -# - ENCODE "CURRENT" (I.E., PREVIOUS) STATE + "NEXT" (NOW ACTUALLY CURRENT) STATE + CONTEXT (PRIOR TO -# INTEGRATION OF "NEXT") INTO EM - -# - CONTROL FLOW (FROM VANTAGE OF "NEXT" STATE): -# - USE CONTEXT + PREVIOUS STATE TO RETRIEVE PREDICTION OF CURRENT STATE -# - ENCODE PREVIOUS STATE + CURRENT STATE + CONTEXT INTO EM -# - UPDATE CONTEXT LAYER: INTEGRATE CURRENT STATE IN CONTEXT LAYER: -# SO: -# - EM SHOULD EXECUTE FIRST: -# - USE VALUES OF WM (PREVIOUS STATE) NODE AND CONTEXT LAYER TO RETRIEVE PREDICTED CURRENT STATE -# - ENCODE VALUES OF WM (PREVIOUS STATE), CURRENT STATE (INPUT), AND CONTEXT LAYER IN EM -# - THEN WM SHOULD EXECUTE TO UPDATE WITH CURRENT STATE (INPUT) -# - THEN CONTEXT LAYER SHOULD EXECUTE, INTEGRATING CURRENT STATE (INPUT) [OR WM] -# - LEARNING SHOULD USE CURRENT STATE AS TARGET TO TRAIN PREDICTED CURRENT STATE - - - -# FIX: TERMINATION CONDITION IS GETTING TRIGGED AFTER 1st TRIAL - -# FOR INPUT NODES: scheduler.add_condition(A, BeforeNCalls(A,1) -# Termination: AfterNCalls(Ctl,2) - -""" -QUESTIONS: - -NOTES: - *MUST* run Experience before Predict, as the latter requires retrieved_reward to be non-zero - (from last trial of Experience) in order to know to encode the next state (see control policy) - -**Overview** ------------- - -This implements a model of... - -The model is an example of... - -The script contains methods to construct, train, and run the model, and analyze the results of its execution: - -* `construct_model `: - takes as arguments parameters used to construct the model; for convenience, defaults are defined below, - (under "Construction parameters") - -* `train_network `: - ... - -* `run_model `: - ... - -* `analyze_results `: - takes as arguments the results of executing the model, and optionally a number of trials and EGO_level to analyze; - returns... - - -**The Model** -------------- - -The model is comprised of... - -.. _EGO_Fig: - -.. figure:: _static/` `Composition`. - - -**Construction and Execution** ------------------------------- - -.. _EGO_settings: - -*Settings* -~~~~~~~~~~ - -The default parameters are ones that have been fit to empirical data concerning human performance -(taken from `Kane et al., 2007 `_). - -See "Settings for running the script" to specify whether the model is trained and/or executed when the script is run, -and whether a graphic display of the network is generated when it is constructed. - -.. _EGO_stimuli: - -*Stimuli* -~~~~~~~~~ - -Sequences of stimuli are constructed either using `SweetPea `_ -(using the script in stim/SweetPea) or replicate those used in... - - .. note:: - Use of SweetPea for stimulus generation requires it be installed:: - >> pip install sweetpea - - -.. _EGO_training: - -*Training* -~~~~~~~~~~ - -MORE HERE - -.. _EGO_execution: - -*Execution* -~~~~~~~~~~~ - -MORE HERE - -.. _EGO_methods_reference: - -**Methods Reference** ---------------------- - - -""" - -import numpy as np -from enum import IntEnum - -from psyneulink import * -from psyneulink._typing import Union, Literal -from psyneulink.core.scheduling.condition import Any, And, AllHaveRun, AtRunStart - -# Settings for running script: - -NUM_EXP_SEQS = 5 # Number of sequences to run in EXPERIENCE Phase (includes baseline + revaluation) -NUM_PRED_TRIALS = 10 # Number of trials (ROLL OUTS) to run in PREDICTION Phase - -CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script -DISPLAY_MODEL = ( # Only one of the following can be uncommented: - # None # suppress display of model - {} # show simple visual display of model - # {'show_node_structure': True} # show detailed view of node structures and projections -) -# RUN_MODEL = True # True => run the model -RUN_MODEL = False # False => don't run the model -EXECUTION_MODE = ExecutionMode.Python -# EXECUTION_MODE = ExecutionMode.PyTorch -ANALYZE_RESULTS = False # True => output analysis of results of run -# REPORT_OUTPUT = ReportOutput.FULL # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] -REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] -REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run -PRINT_RESULTS = False # print model.results after execution -ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution - - -#region PARAMETERS -# ====================================================================================================================== -# PARAMETERS -# ====================================================================================================================== - -# PyTorch Version Parameters: -model_params = dict( - n_participants=58, - n_simulations = 100, # number of rollouts per participant - num_seqs = 20, # total number of sequences to be executed (used to set size of EM) - n_steps = 3, # number of steps per rollout - state_d = 7, # length of state vector - context_d = 7, # length of context vector - time_d = 25, # length of time vector - self_excitation = .25, # rate at which old context is carried over to new context - integration_rate = .5, # rate at which state is integrated into new context - state_weight = .5, # weight of the state used during memory retrieval - context_weight = .3, # weight of the context used during memory retrieval - time_noise=.01,# noise std for time integrator (drift is set to 0) - temperature = .05 # temperature of the softmax used during memory retrieval (smaller means more argmax-like -) - -# Fixed (structural) parameters: - -# Names: -MODEL_NAME = "EGO Model CSW" -STATE_INPUT_LAYER_NAME = "STATE" -CONTEXT_LAYER_NAME = 'CONTEXT' -NEXT_STATE_NAME = 'NEXT_STATE' -EM_NAME = "EM" -PREDICTION_LAYER_NAME = "PREDICTION" - -EMFieldsIndex = IntEnum('EMFields', - ['STATE', - 'CONTEXT', - 'NEXT_STATE'], - start=0) - - -# CONSTRUCTION PARAMETERS - -# Layer sizes: -STATE_SIZE = model_params['state_d'] # length of state vector -CONTEXT_SIZE = model_params['context_d'] # length of state vector - -# Context processing: -INTEGRATION_RATE = model_params['integration_rate'] # rate at which state is integrated into context_layer - -# EM retrieval -STATE_RETRIEVAL_WEIGHT = model_params['state_weight'] # weight of state field in retrieval from EM -CONTEXT_RETRIEVAL_WEIGHT = model_params['context_weight'] # weight of context field in retrieval from EM -RETRIEVAL_SOFTMAX_GAIN = 1/model_params['temperature'] # gain on softmax retrieval function - -NEXT_STATE_WEIGHT = 0 - -RANDOM_WEIGHTS_INITIALIZATION=RandomMatrix(center=0.0, range=0.1) # Matrix spec used to initialize all Projections - -#endregion - -#region ENVIRONMENT -# ====================================================================================================================== -# ENVIRONMENT -# ====================================================================================================================== - -# Task environment: -NUM_STIM_PER_SEQ = model_params['n_steps'] # number of stimuli in a sequence -NUM_SEQS = model_params['num_seqs'] # total number of sequences to be executed (to set size of EM) - -STIM_SEQS = [list(range(1,NUM_STIM_PER_SEQ*2,2)), - list(range(2,NUM_STIM_PER_SEQ*2+1,2))] -CURRICULUM_TYE = 'blocked' # 'blocked' or 'interleaved' - -#endregion - -#region MODEL -# ====================================================================================================================== -# MODEL -# ====================================================================================================================== - -def construct_model(model_name:str=MODEL_NAME, - - # Inputs: - state_input_name:str=STATE_INPUT_LAYER_NAME, - state_size:int=STATE_SIZE, - - # Context processing: - context_name:str=CONTEXT_LAYER_NAME, - integration_rate:Union[float,int]=INTEGRATION_RATE, - - # EM: - em_name:str=EM_NAME, - retrieval_softmax_gain=RETRIEVAL_SOFTMAX_GAIN, - state_retrieval_weight:Union[float,int]=STATE_RETRIEVAL_WEIGHT, - context_retrieval_weight:Union[float,int]=CONTEXT_RETRIEVAL_WEIGHT, - next_state_name=NEXT_STATE_NAME, - next_state_weight:Union[float,int]=NEXT_STATE_WEIGHT, - - # Output / decision processing: - PREDICTION_LAYER_NAME:str=PREDICTION_LAYER_NAME, - - )->Composition: - - # Apportionment of contributions of state (actual or em) vs. context (em) to context_layer integration: - - - assert 0 <= integration_rate <= 1,\ - f"context_retrieval_weight must be a number from 0 to 1" - - # ---------------------------------------------------------------------------------------------------------------- - # ------------------------------------------------- Nodes ------------------------------------------------------ - # ---------------------------------------------------------------------------------------------------------------- - - state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) - context_layer = RecurrentTransferMechanism(name=context_name, - size=state_size, - auto=1-integration_rate, - hetero=0.0) - em = EMComposition(name=em_name, - memory_template=[[0] * state_size, # state - [0] * state_size, # previous state - [0] * state_size], # context - memory_fill=(0,.01), - memory_capacity=NUM_SEQS, - softmax_gain=1.0, - # Input Nodes: - field_names=[state_input_name, - next_state_name, - context_name, - ], - field_weights=(state_retrieval_weight, - next_state_weight, - context_retrieval_weight - ) - ) - - prediction_layer = ProcessingMechanism(name=PREDICTION_LAYER_NAME) - - - # ---------------------------------------------------------------------------------------------------------------- - # ------------------------------------------------- EGO Composition -------------------------------------------- - # ---------------------------------------------------------------------------------------------------------------- - - - EGO_comp = Composition(name=model_name, - # # Terminate a Task.PREDICT trial after prediction_layer executes if a reward is retrieved - # termination_processing={ - # # TimeScale.TRIAL: And(Condition(lambda: task_input_layer.value == Task.PREDICT), - # # Condition(lambda: retrieved_reward_layer.value), - # # JustRan(prediction_layer))} - # # CRASHES: - # # TimeScale.TRIAL: Any(And(Condition(lambda: task_input_layer.value == Task.EXPERIENCE), - # # JustRan(em)), - # # And(Condition(lambda: task_input_layer.value == Task.PREDICT), - # # Condition(lambda: retrieved_reward_layer.value), - # # JustRan(prediction_layer)))} - # TimeScale.TRIAL: Any(And(Condition(lambda: task_input_layer.value == Task.EXPERIENCE), - # AllHaveRun()), - # And(Condition(lambda: task_input_layer.value == Task.PREDICT), - # Condition(lambda: retrieved_reward_layer.value), - # AllHaveRun()))} - ) - - # Nodes not included in (decision output) Pathway specified above - EGO_comp.add_nodes([state_input_layer, context_layer, em, prediction_layer]) - - # Projections: - QUERY = ' [QUERY]' - VALUE = ' [VALUE]' - RETRIEVED = ' [RETRIEVED]' - - # EM encoding -------------------------------------------------------------------------------- - # state -> em - EGO_comp.add_projection(MappingProjection(state_input_layer, - em.nodes[state_input_name + QUERY])) - # context -> em - EGO_comp.add_projection(MappingProjection(context_layer, - em.nodes[context_name + QUERY])) - - # Inputs to Context --------------------------------------------------------------------------- - # retrieved context -> context_layer - EGO_comp.add_projection(MappingProjection(state_input_layer, - context_layer, - # matrix=np.eye(STATE_SIZE) * state_weight - )) - - # Response pathway --------------------------------------------------------------------------- - # retrieved state -> prediction_layer - EGO_comp.add_projection(MappingProjection(em.nodes[next_state_name + RETRIEVED], - prediction_layer)) - - - # FIX: REMAINS TO BE FIXED: - # Validate construction - assert context_layer.input_port.path_afferents[0].sender.owner == context_layer # recurrent projection - assert context_layer.input_port.path_afferents[0].parameters.matrix.get()[0][0] == 1-integration_rate - # assert context_layer.input_port.path_afferents[1].sender.owner == em.nodes[CONTEXT_LAYER_NAME + RETRIEVED] # - assert context_layer.input_port.path_afferents[1].sender.owner == state_input_layer # - # memory of context - # assert context_layer.input_port.path_afferents[1].parameters.matrix.get()[0][0] == state_weight - - return EGO_comp -#endregion - - -#region SCRIPT EXECUTION -# ====================================================================================================================== -# SCRIPT EXECUTION -# ====================================================================================================================== - -if __name__ == '__main__': - model = None - - if CONSTRUCT_MODEL: - print(f'Constructing {MODEL_NAME}') - model = construct_model() - assert 'DEBUGGING BREAK POINT' - - if DISPLAY_MODEL is not None: - if model: - model.show_graph(**DISPLAY_MODEL) - else: - print("Model not yet constructed") - - if RUN_MODEL: - experience_inputs = build_experience_inputs(state_size=STATE_SIZE, - time_drift_rate=TIME_DRIFT_RATE, - num_baseline_seqs=NUM_BASELINE_SEQS, - num_revaluation_seqs=NUM_REVALUATION_SEQS, - reward_vals=REWARD_VALS, - CURRICULUM_TYE=CURRICULUM_TYE, - ratio=RATIO, - stim_seqs=STIM_SEQS) - input_layers = [TIME_INPUT_LAYER_NAME, - TASK_INPUT_LAYER_NAME, - STATE_INPUT_LAYER_NAME, - REWARD_INPUT_LAYER_NAME] - - # Experience Phase - print(f"Presenting {model.name} with {TOTAL_NUM_EXPERIENCE_STIMS} EXPERIENCE stimuli") - model.run(inputs={k: v for k, v in zip(input_layers, experience_inputs)}, - execution_mode=EXECUTION_MODE, - report_output=REPORT_OUTPUT, - report_progress=REPORT_PROGRESS) - - # Prediction Phase - prediction_inputs = build_prediction_inputs(state_size=STATE_SIZE, - time_drift_rate=TIME_DRIFT_RATE, - num_roll_outs_per_stim=int(NUM_ROLL_OUTS / 2), - stim_seqs=STIM_SEQS, - reward_vals=REWARD_VALS, - seq_type=PREDICT_SEQ_TYPE) - print(f"Running {model.name} for {NUM_ROLL_OUTS} PREDICT (ROLL OUT) trials") - model.termination_processing = { - TimeScale.TRIAL: And(Condition(lambda: model.nodes[TASK_INPUT_LAYER_NAME].value == Task.PREDICT), - Condition(lambda: model.nodes[RETRIEVED_REWARD_NAME].value), - # JustRan(model.nodes[PREDICTION_LAYER_NAME]) - AllHaveRun() - ) - } - model.run(inputs={k: v for k, v in zip(input_layers, prediction_inputs)}, - report_output=REPORT_OUTPUT, - report_progress=REPORT_PROGRESS - ) - - if PRINT_RESULTS: - print(f"Predicted reward for last stimulus: {model.results}") - #endregion \ No newline at end of file diff --git a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Integrator and Learning.py b/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Integrator and Learning.py deleted file mode 100644 index d9e8ac79432..00000000000 --- a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Integrator and Learning.py +++ /dev/null @@ -1,406 +0,0 @@ -# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. You may obtain a copy of the License at: -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and limitations under the License. - -# CONTROL FLOW: -# - EM EXECUTES FIRST: -# - RETRIEVES USING PREVIOUS STATE NODE AND CONTEXT (PRE-INTEGRATION) TO RETRIEVE PREDICTED CURRENT STATE -# - STORES VALUES OF PREVIOUS STATE, CURRENT STATE (INPUT) AND CONTEXT (PRE-INTEGRATION) INTO EM -# - THEN: -# - PREVIOUS_STATE EXECUTES TO GET CURRENT_STATE_INPUT (FOR RETRIEVAL ON NEXT TRIAL) -# - INTEGRATOR LAYER EXECUTES, INTEGRATING CURRENT_STATE_INPUT INTO MEMORY -# - CONTEXT LAYER EXECUTES TO GET LEARNED CONTEXT (FOR RETRIEVAL ON NEXT TRIAL) -# - PREDICTED CURRENT STATE IS COMPARED WITH ACTUAL CURRENT STATE (TARGET) TO UPDATE INTEGRATOR -> CONTEXT WEIGHTS - - -# TODO: - -# SCRIPT STUFF: -# - REPLACE INTEGRATOR RECURRENTTRANSFERMECHANISM WITH TRANSFERMECHANISM IN INTEGRATOR MODE -# OR TRY USING LCA with DECAY? -# - ADD LEARNING: -# - SET LEARNABILITY OF OUTER COMP PROJECTIONS -# - ADD PROJECTION OF CURRENT STATE TO TARGET (GOTTEN FROM LEARNING COMPONENTS) -# - DEBUG LEARNING -# PNL STUFF: -# - BUG: -# ? autodiffcomposition LINE 538: infinite while loop -# ? try taking out the integrator layer and see if it works -# ? try removing learnable attribute from projections to STORE node -# ? STORE node shows up multiple times in queue (but should be existing tests for convergence in nested) -# ? divergengence of STATE to PREVIOUS_STATE and STATE to EM projections confuses _get_backprop_pathway -# when traversing EM.input_CIM projections in depth part of search (since -# STATE->PREVIOUS_STATE->PREVIOUS_STATE [QUERY] is a valid path) even though the only one wanted for learning -# is the direct STATE->EM->STATE [VALUE] projection -# (see _get_backprop_pathway in AutodiffComposition, LINE 591 onward) -# - ADD COMMENT TO autodiffcomposition LINE 552 explaining what the subsquent block of code does -# - WRITE METHOD IN AUTODIFFCOMPOSITION to show_learning in show_graph() -# - DOCUMENT API FOR SPECIFYING PROJECTIONS TO NODES OF NESTED COMPOSITION -# (VIZ, *HAVE* TO EXPLICILTY SPECIFY PROJECTIONS TO NODES OF NESTED COMPOSITION AND ALSO INCLUDE THE NESTED COMP) - -""" -QUESTIONS: - -NOTES: - *MUST* run Experience before Predict, as the latter requires retrieved_reward to be non-zero - (from last trial of Experience) in order to know to encode the next state (see control policy) - -**Overview** ------------- - -This implements a model of... - -The model is an example of... - -The script contains methods to construct, train, and run the model, and analyze the results of its execution: - -* `construct_model `: - takes as arguments parameters used to construct the model; for convenience, defaults are defined below, - (under "Construction parameters") - -* `train_network `: - ... - -* `run_model `: - ... - -* `analyze_results `: - takes as arguments the results of executing the model, and optionally a number of trials and EGO_level to analyze; - returns... - - -**The Model** -------------- - -The model is comprised of... - -.. _EGO_Fig: - -.. figure:: _static/` `Composition`. - - -**Construction and Execution** ------------------------------- - -.. _EGO_settings: - -*Settings* -~~~~~~~~~~ - -The default parameters are ones that have been fit to empirical data concerning human performance -(taken from `Kane et al., 2007 `_). - -See "Settings for running the script" to specify whether the model is trained and/or executed when the script is run, -and whether a graphic display of the network is generated when it is constructed. - -.. _EGO_stimuli: - -*Stimuli* -~~~~~~~~~ - -Sequences of stimuli are constructed either using `SweetPea `_ -(using the script in stim/SweetPea) or replicate those used in... - - .. note:: - Use of SweetPea for stimulus generation requires it be installed:: - >> pip install sweetpea - - -.. _EGO_training: - -*Training* -~~~~~~~~~~ - -MORE HERE - -.. _EGO_execution: - -*Execution* -~~~~~~~~~~~ - -MORE HERE - -.. _EGO_methods_reference: - -**Methods Reference** ---------------------- - - -""" - -import numpy as np -import graph_scheduler as gs -from enum import IntEnum - -from psyneulink import * -from psyneulink._typing import Union, Literal - -# Settings for running script: - -MEMORY_CAPACITY = 5 -CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script -DISPLAY_MODEL = ( # Only one of the following can be uncommented: - None # suppress display of model - # {} # show simple visual display of model - # {'show_node_structure': True} # show detailed view of node structures and projections -) -RUN_MODEL = True # True => run the model -# RUN_MODEL = False # False => don't run the model -EXECUTION_MODE = ExecutionMode.Python -# EXECUTION_MODE = ExecutionMode.PyTorch -ANALYZE_RESULTS = False # True => output analysis of results of run -# REPORT_OUTPUT = ReportOutput.FULL # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] -REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] -REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run -PRINT_RESULTS = False # print model.results after execution -ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution - - -#region PARAMETERS -# ====================================================================================================================== -# PARAMETERS -# ====================================================================================================================== - -# PyTorch Version Parameters: -model_params = dict( - state_d = 11, # length of state vector - previous_state_d = 11, # length of state vector - integrator_d = 11, # length of integrator vector - context_d = 11, # length of context vector - integration_rate = .69, # rate at which state is integrated into new context - state_weight = .5, # weight of the state used during memory retrieval - context_weight = .5, # weight of the context used during memory retrieval - temperature = .01 # temperature of the softmax used during memory retrieval (smaller means more argmax-like -) - -# Fixed (structural) parameters: - -# Names: -MODEL_NAME = "EGO Model CSW" -STATE_INPUT_LAYER_NAME = "STATE" -PREVIOUS_STATE_LAYER_NAME = "PREVIOUS STATE" -INTEGRATOR_LAYER_NAME = 'INTEGRATOR' -CONTEXT_LAYER_NAME = 'CONTEXT' - -EM_NAME = "EM" -PREDICTION_LAYER_NAME = "PREDICTION" - -EMFieldsIndex = IntEnum('EMFields', - ['STATE', - 'CONTEXT', - 'PREVIOUS_STATE'], - start=0) - - -# CONSTRUCTION PARAMETERS - -# Layer sizes: -STATE_SIZE = model_params['state_d'] # length of state vector -INTEGRATOR_SIZE = model_params['integrator_d'] # length of state vector -CONTEXT_SIZE = model_params['context_d'] # length of state vector - -# Context processing: -INTEGRATION_RATE = model_params['integration_rate'] # rate at which state is integrated into integrator layer - -# EM retrieval -STATE_RETRIEVAL_WEIGHT = 0 -PREVIOUS_STATE_RETRIEVAL_WEIGHT = model_params['state_weight'] # weight of state field in retrieval from EM -CONTEXT_RETRIEVAL_WEIGHT = model_params['context_weight'] # weight of context field in retrieval from EM -RETRIEVAL_SOFTMAX_GAIN = 1/model_params['temperature'] # gain on softmax retrieval function - - -RANDOM_WEIGHTS_INITIALIZATION=RandomMatrix(center=0.0, range=0.1) # Matrix spec used to initialize all Projections - -#endregion - -#region ENVIRONMENT -# ====================================================================================================================== -# ENVIRONMENT -# ====================================================================================================================== - -# Task environment: -import Environment -CURRICULUM_TYPE = 'Blocked' # 'Blocked' or 'Interleaved' -INPUTS = Environment.generate_dataset(condition=CURRICULUM_TYPE).xs.numpy()[:5] -# INPUTS = [env_inputs[i][:10] for i in range(len(env_inputs))] - - -#endregion - -#region MODEL -# ====================================================================================================================== -# MODEL -# ====================================================================================================================== - -def construct_model(model_name:str=MODEL_NAME, - - # Input layer: - state_input_name:str=STATE_INPUT_LAYER_NAME, - state_size:int=STATE_SIZE, - - # Previous state - previous_state_input_name:str=PREVIOUS_STATE_LAYER_NAME, - - # Integrator: - integrator_name:str=INTEGRATOR_LAYER_NAME, - integrator_size:int=INTEGRATOR_SIZE, - integration_rate:Union[float,int]=INTEGRATION_RATE, - - # Context representation (learned): - context_name:str=CONTEXT_LAYER_NAME, - context_size:Union[float,int]=CONTEXT_SIZE, - - # EM: - em_name:str=EM_NAME, - retrieval_softmax_gain=RETRIEVAL_SOFTMAX_GAIN, - state_retrieval_weight:Union[float,int]=STATE_RETRIEVAL_WEIGHT, - previous_state_retrieval_weight:Union[float,int]=PREVIOUS_STATE_RETRIEVAL_WEIGHT, - context_retrieval_weight:Union[float,int]=CONTEXT_RETRIEVAL_WEIGHT, - - # Output / decision processing: - prediction_layer_name:str=PREDICTION_LAYER_NAME, - - )->Composition: - - assert 0 <= integration_rate <= 1,\ - f"integrator_retrieval_weight must be a number from 0 to 1" - - # ---------------------------------------------------------------------------------------------------------------- - # ------------------------------------------------- Nodes ------------------------------------------------------ - # ---------------------------------------------------------------------------------------------------------------- - - state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) - previous_state_layer = ProcessingMechanism(name=previous_state_input_name, size=state_size) - integrator_layer = RecurrentTransferMechanism(name=integrator_name, - function=Tanh, - size=integrator_size, - auto=1-integration_rate, - hetero=0.0) - # integrator_layer = TransferMechanism(name=integrator_name, - # function=Tanh, - # size=integrator_size - # ) - context_layer = ProcessingMechanism(name=context_name, size=context_size) - - em = EMComposition(name=em_name, - memory_template=[[0] * state_size, # state - [0] * state_size, # previous state - [0] * state_size], # context - # memory_fill=(0,.01), - memory_capacity=MEMORY_CAPACITY, - memory_decay_rate=0, - softmax_gain=1.0, - # Input Nodes: - field_names=[state_input_name, - previous_state_input_name, - context_name, - ], - field_weights=(state_retrieval_weight, - previous_state_retrieval_weight, - context_retrieval_weight - ), - # enable_learning=True, - learn_field_weights=False - ) - - prediction_layer = ProcessingMechanism(name=prediction_layer_name, size=state_size) - - - # ---------------------------------------------------------------------------------------------------------------- - # ------------------------------------------------- EGO Composition -------------------------------------------- - # ---------------------------------------------------------------------------------------------------------------- - - QUERY = ' [QUERY]' - VALUE = ' [VALUE]' - RETRIEVED = ' [RETRIEVED]' - - # Pathways - state_to_previous_state_pathway = [state_input_layer, previous_state_layer] - state_to_integrator_pathway = [state_input_layer, - np.eye(STATE_SIZE) * integration_rate, - integrator_layer] - state_to_em_pathway = [state_input_layer, - MappingProjection(state_input_layer, em.nodes[state_input_name+VALUE]), - em] - previous_state_to_em_pathway = [previous_state_layer, - MappingProjection(previous_state_layer, em.nodes[previous_state_input_name+QUERY]), - em] - context_learning_pathway = [integrator_layer, - context_layer, - MappingProjection(context_layer, em.nodes[context_name + QUERY]), - em, - MappingProjection(em.nodes[state_input_name + RETRIEVED], prediction_layer), - prediction_layer] - - # Composition - EGO_comp = AutodiffComposition([state_to_previous_state_pathway, - state_to_integrator_pathway, - state_to_em_pathway, - previous_state_to_em_pathway, - context_learning_pathway], - name=model_name) - - # EGO_comp.show_graph(show_learning=True) - - # Ensure EM is executed (to encode previous state and context, and predict current state) - # before updating state and context - EGO_comp.scheduler.add_condition(em, BeforeNodes(previous_state_layer, integrator_layer)) - - # Validate construction - assert integrator_layer.input_port.path_afferents[0].sender.owner == integrator_layer # recurrent projection - assert integrator_layer.input_port.path_afferents[0].parameters.matrix.get()[0][0] == 1-integration_rate - assert integrator_layer.input_port.path_afferents[1].sender.owner == state_input_layer # - - return EGO_comp -#endregion - - -#region SCRIPT EXECUTION -# ====================================================================================================================== -# SCRIPT EXECUTION -# ====================================================================================================================== - -if __name__ == '__main__': - model = None - - if CONSTRUCT_MODEL: - print(f'Constructing {MODEL_NAME}') - model = construct_model() - assert 'DEBUGGING BREAK POINT' - # print(model.scheduler.consideration_queue) - # gs.output_graph_image(model.scheduler.graph, 'EGO_comp-scheduler.png') - - if DISPLAY_MODEL is not None: - if model: - model.show_graph(**DISPLAY_MODEL) - else: - print("Model not yet constructed") - - if RUN_MODEL: - # print("MODEL NOT YET FULLY EXECUTABLE") - print(f'Running {MODEL_NAME}') - # model.run(inputs={STATE_INPUT_LAYER_NAME:INPUTS}, - # # report_output=REPORT_OUTPUT, - # # report_progress=REPORT_PROGRESS - # ) - model.learn(inputs={STATE_INPUT_LAYER_NAME:INPUTS}, - # report_output=REPORT_OUTPUT, - # report_progress=REPORT_PROGRESS - ) - print(model.nodes['EM'].parameters.memory.get(context=MODEL_NAME)) - - if PRINT_RESULTS: - print("MODEL NOT YET FULLY EXECUTABLE SO NO RESULTS") - #endregion diff --git a/Scripts/Models (Under Development)/EGO/EGO Model - MDP OLD.py b/Scripts/Models (Under Development)/EGO/EGO Model - MDP OLD.py deleted file mode 100644 index b12538e4488..00000000000 --- a/Scripts/Models (Under Development)/EGO/EGO Model - MDP OLD.py +++ /dev/null @@ -1,500 +0,0 @@ -""" - -FIX: THIS VERSION HAS ONLY PARTIAL CONTROLLER - - -- - -**Overview** ------------- - -This implements a model of... - -The model is an example of... - -The script contains methods to construct, train, and run the model, and analyze the results of its execution: - -* `construct_model `: - takes as arguments parameters used to construct the model; for convenience, defaults are defined below, - (under "Construction parameters") - -* `train_network `: - takes as arguments the feedforward neural network Composition (FFN_COMPOSITION) and number of epochs to train. - Note: learning_rate is set at construction (can specify using LEARNING_RATE under "Training parameters" below). - -* `run_model `: - takes as arguments the drift rate in the temporal context vector to be applied on each trial, - and the number of trials to execute, as well as reporting and animation specifications - (see "Execution parameters"). - -* `analyze_results `: - takes as arguments the results of executing the model, and optionally a number of trials and EGO_level to analyze; - returns d-prime statistics and plots results for different conditions at each EGO_level executed. - - -**The Model** -------------- - -The model is comprised of... - -.. _EGO_Fig: - -.. figure:: _static/N-Back_Model_movie.gif - :align: left - :alt: N-Back Model Animation - -.. _EGO_model_composition: - -*EGO_model Composition* -~~~~~~~~~~~~~~~~~~~~~~~~~ - -This is comprised of three input Mechanisms, and the nested `ffn ` `Composition`. - -.. _EGO_ffn_composition: - -*FFN Composition* -~~~~~~~~~~~~~~~~~ - -The temporal context is provided by a randomly drifting high dimensional vector that maintains a constant norm (i.e., -drifts on a sphere). The FFN is trained, given an n-back level of *n*, to identify when the current stimulus matches -one stored in EM with a temporal context vector that differs by an amount corresponding to *n* time steps of drift. -During n-back performance, the model encodes the current stimulus and temporal context, retrieves an item from EM -that matches the current stimulus, weighted by the similarity of its temporal context vector (i.e., most recent), and -then uses the FFN to evaluate whether it is an n-back match. The model responds "match" if the FFN detects a match; -otherwise, it either uses the current stimulus and temporal context to retrieve another sample from EM and repeat the -evaluation or, with a fixed probability (hazard rate), it responds "non-match". - -The ffn Composition is trained using the train_network() method - - -**Construction and Execution** ------------------------------- - -.. _EGO_settings: - -*Settings* -~~~~~~~~~~ - -The default parameters are ones that have been fit to empirical data concerning human performance -(taken from `Kane et al., 2007 `_). - -See "Settings for running the script" to specify whether the model is trained and/or executed when the script is run, -and whether a graphic display of the network is generated when it is constructed. - -.. _EGO_stimuli: - -*Stimuli* -~~~~~~~~~ - -Sequences of stimuli are constructed either using `SweetPea `_ -(using the script in stim/SweetPea) or replicate those used in the study by `Kane et al., -2007 `_ (from stimulus files in stim/Kane_et_al). - - .. note:: - Use of SweetPea for stimulus generation requires it be installed:: - >> pip install sweetpea - -.. _EGO_training: - -*Training* -~~~~~~~~~~ - -MORE HERE - -.. _EGO_execution: - -*Execution* -~~~~~~~~~~~ - -MORE HERE - -.. _EGO_methods_reference: - -**Methods Reference** ---------------------- - - -""" - -import os -import random -import time -import timeit -import numpy as np -from typing import Union -from enum import Enum, IntEnum -from pathlib import Path - -from graph_scheduler import * - -from psyneulink import * - -# Settings for running script: -CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script -DISPLAY_MODEL = ( # Only one of the following can be uncommented: - # None # suppress display of model - {} # show simple visual display of model - # {'show_node_structure': True} # show detailed view of node structures and projections -) -RUN_MODEL = False # True => run the model -ANALYZE_RESULTS = False # True => output analysis of results of run -REPORT_OUTPUT = ReportOutput.ON # Sets console output during run -REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run -ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution - -#region ========================================= PARAMETERS =========================================================== - -# Fixed (structural) parameters: - -# Names: -MODEL_NAME = "EGO Model" -TASK_INPUT_LAYER_NAME = "TASK" -STATE_INPUT_LAYER_NAME = "STATE" -TIME_INPUT_LAYER_NAME = "TIME" -ATTENTION_LAYER_NAME = "STATE ATTENTION" -ATTENTIONAL_CONTROL_LAYER_NAME = "ATTENTIONAL CONTROL" -ACTUAL_STATE_INPUT = 'ACTUAL_STATE_INPUT' -RETRIEVED_STATE_INPUT = 'RETRIEVED_STATE' -CONTEXT_LAYER_NAME = 'CONTEXT' -REWARD_INPUT_LAYER_NAME = "REWARD" -RETRIEVED_TIME_NAME = "RETRIEVED TIME" -RETRIEVED_REWARD_NAME = "RETRIEVED REWARD" -EM_NAME = "EPISODIC MEMORY" -DECISION_LAYER_NAME = "DECISION" - -class Task(IntEnum): - EXPERIENCE = 0 - PREDICT = 1 - -# CONSTRUCTION PARAMETERS - -# Layer sizes: -TASK_SIZE = 1 # length of task vector -STATE_SIZE = 8 # length of state vector -TIME_SIZE = 25 # length of time vector -REWARD_SIZE = 1 # length of reward vector -DECISION_SIZE = 2 # length of decision vector - -# Context processing: -STATE_WEIGHT = .1 # rate at which actual vs. retrieved state (from EM) are integrated in context_layer -CONTEXT_INTEGRATION_RATE = .1 # rate at which retrieved context (from EM) is integrated into context_layer -TIME_DRIFT_NOISE = 0.0 # noise used by DriftOnASphereIntegrator (function of Context mech) - -# EM retrieval -STATE_RETRIEVAL_WEIGHT = 1 # weight of state field in retrieval from EM -TIME_RETRIEVAL_WEIGHT = 1 # weight of time field in retrieval from EM -CONTEXT_RETRIEVAL_WEIGHT = 1 # weight of context field in retrieval from EM -REWARD_RETRIEVAL_WEIGHT = 0 # weight of reward field in retrieval from EM -RETRIEVAL_SOFTMAX_GAIN = 10 # gain on softmax retrieval function -# RETRIEVAL_HAZARD_RATE = 0.04 # rate of re=sampling of em following non-match determination in a pass through ffn - -RANDOM_WEIGHTS_INITIALIZATION=RandomMatrix(center=0.0, range=0.1) # Matrix spec used to initialize all Projections - -# Execution parameters: - -# Temporal context vector generation as input to time_input_layer of model -CONTEXT_DRIFT_RATE=.1 # drift rate used for DriftOnASphereIntegrator (function of Context mech) on each trial -time_fct = DriftOnASphereIntegrator(initializer=np.random.random(TIME_SIZE - 1), - noise=TIME_DRIFT_NOISE, - dimension=TIME_SIZE) -# Task environment: -NUM_EXPERIENCE_TRIALS = 9 # number of trials for Task.EXPERIENCE (passive encoding into EM) -NUM_PREDICT_TRIALS = 9 # number of trials Task.PREDICT (active retrieval from EM and reward prediction) -NUM_ROLL_OUT = 3 # number of trials of roll-out under OCM control -NUM_TRIALS = NUM_EXPERIENCE_TRIALS + NUM_PREDICT_TRIALS # total number of trials -assert NUM_PREDICT_TRIALS % NUM_ROLL_OUT == 0, \ - f"NUM_PREDICT_TRIALS ({NUM_PREDICT_TRIALS}) " \ - f"must be evenly divisible by NUM_ROLL_OUT ({NUM_ROLL_OUT})" - -inputs = {STATE_INPUT_LAYER_NAME: [[1],[2],[3]] * STATE_SIZE * NUM_TRIALS, - TIME_INPUT_LAYER_NAME: np.array([time_fct(i) for i in range(NUM_TRIALS)]).reshape(NUM_TRIALS,TIME_SIZE,1), - REWARD_INPUT_LAYER_NAME: [[0],[0],[1]] * REWARD_SIZE * NUM_TRIALS, - TASK_INPUT_LAYER_NAME: [[Task.EXPERIENCE.value]] * NUM_EXPERIENCE_TRIALS - + [[Task.PREDICT.value]] * NUM_PREDICT_TRIALS} -def gen_baseline_trials_exp1(dim=STATE_SIZE, num_trials=NUM_EXPERIENCE_TRIALS): - # Generate one-hots - state_reps = np.eye(dim) - visited_states, rewards = [], [] - - for trial_idx in range(num_trials): - if np.random.random()<.5: - visited_states.extend([1,3,5]) - rewards.extend([0,0,10]) - else: - visited_states.extend([2,4,6]) - rewards.extend([0,0,1]) - - # Pick one-hots corresponding to each state - visited_states = state_reps[visited_states] - rewards = np.array(rewards) - - return visited_states, rewards -def gen_reward_revaluation_trials_exp1(dim=STATE_SIZE, num_trials=NUM_PREDICT_TRIALS): - # Generate one-hots - state_reps = np.eye(dim) - visited_states, rewards = [], [] - - for trial_idx in range(num_trials): - if np.random.random()<.5: - visited_states.extend([3,5]) - rewards.extend([0,1]) - else: - visited_states.extend([4,6]) - rewards.extend([0,10]) - - # Pick one-hots corresponding to each state - visited_states = state_reps[visited_states] - rewards = np.array(rewards) - - return visited_states, rewards - -assert True - -def construct_model(model_name:str=MODEL_NAME, - - # Inputs: - task_input_name:str=TASK_INPUT_LAYER_NAME, - task_size:int=1, - state_input_name:str=STATE_INPUT_LAYER_NAME, - state_size:int=STATE_SIZE, - time_input_name:str=TIME_INPUT_LAYER_NAME, - time_size:int=TIME_SIZE, - reward_input_name = REWARD_INPUT_LAYER_NAME, - reward_size:int=REWARD_SIZE, - - # Context processing: - attention_layer_name=ATTENTION_LAYER_NAME, - attentional_control_name=ATTENTIONAL_CONTROL_LAYER_NAME, - context_name:str=CONTEXT_LAYER_NAME, - state_weight:Union[float,int]=STATE_WEIGHT, - context_integration_rate:Union[float,int]=CONTEXT_INTEGRATION_RATE, - - # EM: - em_name:str=EM_NAME, - retrieval_softmax_gain=RETRIEVAL_SOFTMAX_GAIN, - # retrieval_hazard_rate=RETRIEVAL_HAZARD_RATE, - state_retrieval_weight:Union[float,int]=STATE_RETRIEVAL_WEIGHT, - time_retrieval_weight:Union[float,int]=TIME_RETRIEVAL_WEIGHT, - context_retrieval_weight:Union[float,int]=CONTEXT_RETRIEVAL_WEIGHT, - reward_retrieval_weight:Union[float,int]=REWARD_RETRIEVAL_WEIGHT, - # retrieved_time_name:str=RETRIEVED_TIME_NAME, - retrieved_reward_name:str=RETRIEVED_REWARD_NAME, - - # Output / decision processing: - decision_layer_name:str=DECISION_LAYER_NAME, - decision_size:int=DECISION_SIZE, - - )->Composition: - - # Apportionment of contributions of state (actual or em) vs. context (em) to context_layer integration: - - # state input (EXPERIENCE) -\ - # --> state_weight -------\ - # state from em (PREDICT)---/ -> * (context_integration_rate) -----\ - # /-----> context_weight ---/ --> context - # context from em --------/ (=1- state_weight) / - # /---> 1 - context_integration_rate --/ - # context from prev. cycle -------------------------/ - - assert 0 <= context_integration_rate <= 1,\ - f"context_retrieval_weight must be a number from 0 to 1" - assert 0 <= state_weight <= 1,\ - f"context_retrieval_weight must be a number from 0 to 1" - context_weight = 1 - state_weight - state_weight *= context_integration_rate - context_weight *= context_integration_rate - - task_input_layer = ProcessingMechanism(name=task_input_name, - size=task_size) - - state_input_layer = ProcessingMechanism(name=state_input_name, - size=state_size) - - time_input_layer = ProcessingMechanism(name=time_input_name, - size=time_size) - - context_layer = RecurrentTransferMechanism(name=context_name, - size=state_size, - auto=1-context_integration_rate, - hetero=0.0) - - reward_input_layer = ProcessingMechanism(name=reward_input_name, - size=reward_size) - - attention_layer = ProcessingMechanism(name=ATTENTION_LAYER_NAME, - size=(state_size,state_size), - input_ports=[ACTUAL_STATE_INPUT, RETRIEVED_STATE_INPUT], - function=LinearCombination) - - # retrieved_time_layer = TransferMechanism(name=retrieved_time_name, - # size=time_size) - - retrieved_reward_layer = TransferMechanism(name=retrieved_reward_name, - size=reward_size) - - em = EpisodicMemoryMechanism(name=em_name, - input_ports=[{NAME:state_input_name, SIZE:state_size}, - {NAME:time_input_name, SIZE:time_size}, - {NAME:context_name, SIZE:state_size}, - {NAME:reward_input_name, SIZE:reward_size} - ], - function=ContentAddressableMemory( - initializer=[[0] * state_size, # state - [0] * time_size, # time - [0] * state_size, # context - [0] * reward_size], # reward - distance_field_weights=[state_retrieval_weight, - time_retrieval_weight, - context_retrieval_weight, - reward_retrieval_weight], - selection_function=SoftMax(gain=retrieval_softmax_gain))) - - decision_layer = TransferMechanism(name=decision_layer_name, - size=decision_size, - function=SoftMax(output=PROB)) - - def encoding_control_function(variable,context): - """Used by attention_layer to control encoding of state info in context_layer and storing in EM - - If task is: - - Task.EXPERIENCE (0): - - stores state info in em on every trial (control_signal[0]=1) - - always attend to actual state (control_signal[1]=1, control_signal[2]=0) - - Task.PREDICT: (1): - - never store info in em (control_signal[0]=0) - - attend to actual state on first trial (control_signal[1]=1, control_signal[2]=0) - - attend to retrieved state on all subsequent trials (control_signal[1]=0, control_signal[2]=1) - - Returns: - control_signal[0]: 1 if store, 0 otherwise - control_signal[1]: 1 if attend to actual state, 0 otherwise - control_signal[2]: 1 if attend to retrieved state, 0 otherwise - """ - - # Get task and trial number - task = int(variable) - if context and context.composition: - trial = int([context.composition.get_current_execution_time(context)[TimeScale.TRIAL]]) - else: - trial = 0 - - # if task == Task.EXPERIENCE: - # attend_actual = 1 - # elif task == Task.PREDICT: - # attend_actual = 1 if not (trial % NUM_ROLL_OUT) else 1 - # attend_retrieved = 1 if (trial % NUM_ROLL_OUT) else 0 - # else: - # raise ValueError(f"Unrecognized task value in encoding_control_function: {task}") - # - # # Store to EM to - # store = 1 if task == Task.EXPERIENCE.value else 0 - - if task == Task.EXPERIENCE.value: - attend_actual = 1 - store = 1 - - # FIX: ADD CONNECTION FROM REWARD RETRIEVAL TO CONTROLLER - # ADD COUNTER - # ADD CONTROL SIGNAL FOR GATING TO COUNTER AND DECISION - # MAKE DECISION A DDM (OR SIMPLE INTEGRATOR) - # ADD TERMINATION CONDITION FOR TRIAL EITHER: - # - FOR WHEN COUNTER == (or %) NUM_ROLL_OUTS (AND RESET DECISION LAYER AT TRIAL START) - # - OR OUTPUT GATE DECISION TO RESPONSE NODE, AND PUT TERMINATION ON RESPONSE NODE > 0 - - if task == Task.PREDICT.value: - attend_actual = 0 if trial % NUM_ROLL_OUT else 1 - attend_retrieved = 1 if trial % NUM_ROLL_OUT else 0 - store = 0 - - control_signals = [store, attend_actual, attend_retrieved] - - return control_signals - - # Control Mechanism - # Uses the encoding_control_function (see above) to control: - # - encoding of state info in context_layer (from stimulus vs. em) - # - storage of info in em - attentional_control_layer = ControlMechanism(name=attentional_control_name, - monitor_for_control=task_input_layer, - function = encoding_control_function, - control=[(STORAGE_PROB, em), - attention_layer.input_ports[ACTUAL_STATE_INPUT], - attention_layer.input_ports[RETRIEVED_STATE_INPUT]]) - - EGO_comp = Composition(name=model_name, - pathways=[retrieved_reward_layer, decision_layer], # Decision - # # Use this to terminate a Task.PREDICT trial - termination_processing={ - TimeScale.TRIAL: And(WhenFinished(decision_layer), - )} - ) - - # Nodes not included in (decision output) Pathway specified above - EGO_comp.add_nodes([task_input_layer, - state_input_layer, - time_input_layer, - attention_layer, - attentional_control_layer, - context_layer, - reward_input_layer, - # retrieved_time_layer, - em]) - EGO_comp.exclude_node_roles(task_input_layer, NodeRole.OUTPUT) - - # Projections not included in (decision output) Pathway specified above - # EM encoding - EGO_comp.add_projection(MappingProjection(state_input_layer, em.input_ports[STATE_INPUT_LAYER_NAME])) - EGO_comp.add_projection(MappingProjection(time_input_layer, em.input_ports[TIME_INPUT_LAYER_NAME])) - EGO_comp.add_projection(MappingProjection(context_layer, em.input_ports[CONTEXT_LAYER_NAME])) - EGO_comp.add_projection(MappingProjection(reward_input_layer, em.input_ports[REWARD_INPUT_LAYER_NAME])) - - # Inputs to Context - # actual state -> attention_layer - EGO_comp.add_projection(MappingProjection(state_input_layer, - attention_layer.input_ports[ACTUAL_STATE_INPUT])) - # retrieved state -> attention_layer - EGO_comp.add_projection(MappingProjection(em.output_ports[f'RETRIEVED_{STATE_INPUT_LAYER_NAME}'], - attention_layer.input_ports[RETRIEVED_STATE_INPUT])) - # attention_layer -> context_layer - EGO_comp.add_projection(MappingProjection(attention_layer, - context_layer, - matrix=np.eye(STATE_SIZE) * state_weight)) - # retrieved context -> context_layer - EGO_comp.add_projection(MappingProjection(em.output_ports[f'RETRIEVED_{CONTEXT_LAYER_NAME}'], - context_layer, - matrix=np.eye(STATE_SIZE) * context_weight)) - - # Rest of EM retrieval - # EGO_comp.add_projection(MappingProjection(em.output_ports[f'RETRIEVED_{TIME_INPUT_LAYER_NAME}'], - # retrieved_time_layer)), - EGO_comp.add_projection(MappingProjection(em.output_ports[f'RETRIEVED_{REWARD_INPUT_LAYER_NAME}'], - retrieved_reward_layer)) - - # Validate construction - assert context_layer.input_port.path_afferents[0].sender.owner == context_layer - assert context_layer.input_port.path_afferents[0].parameters.matrix.get()[0][0] == 1-context_integration_rate - assert context_layer.input_port.path_afferents[1].sender.owner == attention_layer - assert context_layer.input_port.path_afferents[1].parameters.matrix.get()[0][0] == state_weight - assert context_layer.input_port.path_afferents[2].sender.owner == em - assert context_layer.input_port.path_afferents[2].parameters.matrix.get()[0][0] == context_weight - - print(f'{model_name} constructed') - return EGO_comp - -# Script execution: - -model = None - -if CONSTRUCT_MODEL: - model = construct_model() - -if DISPLAY_MODEL is not None: - if model: - model.show_graph(**DISPLAY_MODEL) - else: - print("Model not yet constructed") - -if RUN_MODEL: - model.run(inputs=inputs) diff --git a/Scripts/Models (Under Development)/EGO/Tutorial/Declan's EGO Tutorial.ipynb b/Scripts/Models (Under Development)/EGO/Tutorial/Declan's EGO Tutorial.ipynb new file mode 100644 index 00000000000..32106c057de --- /dev/null +++ b/Scripts/Models (Under Development)/EGO/Tutorial/Declan's EGO Tutorial.ipynb @@ -0,0 +1,399 @@ +{ + "cells": [ + { + "cell_type": "code", + "metadata": { + "ExecuteTime": { + "end_time": "2024-08-11T16:39:44.885605Z", + "start_time": "2024-08-11T16:39:44.093609Z" + } + }, + "source": [ + "# reload modules before executing code so we can modify modules and test without restarting kernel\n", + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import sys\n", + "import warnings\n", + "sys.path.append('..')\n", + "warnings.filterwarnings('ignore')\n", + "\n", + "import matplotlib.pyplot as plt\n", + "from matplotlib.patches import Patch\n", + "import numpy as np\n", + "import pandas as pd\n", + "from scipy.stats import ttest_ind\n", + "import seaborn as sns\n", + "from sklearn.metrics.pairwise import cosine_similarity\n", + "import torch\n", + "from torch import nn\n", + "from torch.utils.data import DataLoader\n", + "from tqdm import tqdm\n", + "\n", + "import run as run\n", + "from models import *\n", + "import utils" + ], + "outputs": [ + { + "ename": "ModuleNotFoundError", + "evalue": "No module named 'seaborn'", + "output_type": "error", + "traceback": [ + "\u001B[0;31m---------------------------------------------------------------------------\u001B[0m", + "\u001B[0;31mModuleNotFoundError\u001B[0m Traceback (most recent call last)", + "\u001B[0;32m/var/folders/6s/88zfvkmj43xftjg_1tf_pfkc0000gp/T/ipykernel_80467/2553525170.py\u001B[0m in \u001B[0;36m\u001B[0;34m\u001B[0m\n\u001B[1;32m 13\u001B[0m \u001B[0;32mimport\u001B[0m \u001B[0mpandas\u001B[0m \u001B[0;32mas\u001B[0m \u001B[0mpd\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 14\u001B[0m \u001B[0;32mfrom\u001B[0m \u001B[0mscipy\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mstats\u001B[0m \u001B[0;32mimport\u001B[0m \u001B[0mttest_ind\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m---> 15\u001B[0;31m \u001B[0;32mimport\u001B[0m \u001B[0mseaborn\u001B[0m \u001B[0;32mas\u001B[0m \u001B[0msns\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 16\u001B[0m \u001B[0;32mfrom\u001B[0m \u001B[0msklearn\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mmetrics\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mpairwise\u001B[0m \u001B[0;32mimport\u001B[0m \u001B[0mcosine_similarity\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 17\u001B[0m \u001B[0;32mimport\u001B[0m \u001B[0mtorch\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n", + "\u001B[0;31mModuleNotFoundError\u001B[0m: No module named 'seaborn'" + ] + } + ], + "execution_count": 3 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "#### 1. Load the EGO model and initialize its parameters." + }, + { + "cell_type": "code", + "metadata": { + "ExecuteTime": { + "end_time": "2024-08-11T16:39:19.926857Z", + "start_time": "2024-08-11T16:39:19.906657Z" + } + }, + "source": [ + "from PIL import Image\n", + "# Display the EGO model architecture\n", + "model_image = Image.open('ego_model.png')\n", + "display(model_image)" + ], + "outputs": [ + { + "ename": "FileNotFoundError", + "evalue": "[Errno 2] No such file or directory: 'ego_model.png'", + "output_type": "error", + "traceback": [ + "\u001B[0;31m---------------------------------------------------------------------------\u001B[0m", + "\u001B[0;31mFileNotFoundError\u001B[0m Traceback (most recent call last)", + "\u001B[0;32m/var/folders/6s/88zfvkmj43xftjg_1tf_pfkc0000gp/T/ipykernel_80467/384252142.py\u001B[0m in \u001B[0;36m\u001B[0;34m\u001B[0m\n\u001B[1;32m 1\u001B[0m \u001B[0;32mfrom\u001B[0m \u001B[0mPIL\u001B[0m \u001B[0;32mimport\u001B[0m \u001B[0mImage\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 2\u001B[0m \u001B[0;31m# Display the EGO model architecture\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m----> 3\u001B[0;31m \u001B[0mmodel_image\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mImage\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mopen\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0;34m'ego_model.png'\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 4\u001B[0m \u001B[0mdisplay\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mmodel_image\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n", + "\u001B[0;32m/opt/anaconda3/envs/python39/lib/python3.9/site-packages/PIL/Image.py\u001B[0m in \u001B[0;36mopen\u001B[0;34m(fp, mode, formats)\u001B[0m\n\u001B[1;32m 2973\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 2974\u001B[0m \u001B[0;32mif\u001B[0m \u001B[0mfilename\u001B[0m\u001B[0;34m:\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m-> 2975\u001B[0;31m \u001B[0mfp\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mbuiltins\u001B[0m\u001B[0;34m.\u001B[0m\u001B[0mopen\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mfilename\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0;34m\"rb\"\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 2976\u001B[0m \u001B[0mexclusive_fp\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0;32mTrue\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 2977\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n", + "\u001B[0;31mFileNotFoundError\u001B[0m: [Errno 2] No such file or directory: 'ego_model.png'" + ] + } + ], + "execution_count": 1 + }, + { + "cell_type": "code", + "metadata": { + "ExecuteTime": { + "end_time": "2024-08-11T16:39:31.849095Z", + "start_time": "2024-08-11T16:39:31.839877Z" + } + }, + "source": [ + "def prep_recurrent_network(rnet, state_d, persistance=-0.6):\n", + " '''Prepare a recurrent context module that functions as a leaky integrator of the input state.\n", + " '''\n", + " with torch.no_grad():\n", + " # Most weights/biases are set to zero or identity matrices here, except for the learnable output from context to EM.\n", + " rnet.state_to_hidden.weight.copy_(torch.eye(state_d, dtype=torch.float))\n", + " rnet.state_to_hidden.bias.zero_()\n", + " rnet.hidden_to_hidden.weight.zero_()\n", + " rnet.hidden_to_hidden.bias.zero_()\n", + " rnet.state_to_hidden_wt.weight.zero_()\n", + " # Set the integration constant (not exactly a leaky integrator, but close enough)\n", + " rnet.state_to_hidden_wt.bias.copy_(torch.ones((len(rnet.state_to_hidden_wt.bias),), dtype=torch.float) * persistance)\n", + " rnet.hidden_to_hidden_wt.weight.zero_()\n", + " rnet.hidden_to_hidden_wt.bias.zero_()\n", + " # Set hidden to context weights as an identity matrix.\n", + " rnet.hidden_to_context.weight.copy_(torch.eye(state_d, dtype=torch.float))\n", + " rnet.hidden_to_context.bias.zero_()\n", + "\n", + " # Set requires_grad to True for hidden_to_context.weight before freezing other parameters\n", + " rnet.hidden_to_context.weight.requires_grad = True\n", + " rnet.hidden_to_context.bias.requires_grad = True\n", + "\n", + " # Freeze recurrent weights to stabilize training\n", + " for name, p in rnet.named_parameters():\n", + " if 'hidden_to_context' not in name:\n", + " p.requires_grad = False\n", + " else:\n", + " p.requires_grad = True\n", + " return rnet\n", + "\n", + "# Hyperparameters for the CSW experiments.\n", + "softmax_temperature = 0.1\n", + "state_d = 11 # dimensionality of the input state vector\n", + "context_d = 11 # dimensionality of the context vector (should be the same as the state vector for these experiments)\n", + "persistance = -0.8 # how much of the incoming state information is integrated into the context\n", + "\n", + "# Initialize the recurrent context module.\n", + "context_module = RecurrentContextModule(state_d, state_d, context_d)\n", + "em_module = EMModule(softmax_temperature)\n", + "context_module = prep_recurrent_network(context_module, state_d, persistance)\n", + "print(context_module)" + ], + "outputs": [ + { + "ename": "NameError", + "evalue": "name 'RecurrentContextModule' is not defined", + "output_type": "error", + "traceback": [ + "\u001B[0;31m---------------------------------------------------------------------------\u001B[0m", + "\u001B[0;31mNameError\u001B[0m Traceback (most recent call last)", + "\u001B[0;32m/var/folders/6s/88zfvkmj43xftjg_1tf_pfkc0000gp/T/ipykernel_80467/1904193476.py\u001B[0m in \u001B[0;36m\u001B[0;34m\u001B[0m\n\u001B[1;32m 36\u001B[0m \u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 37\u001B[0m \u001B[0;31m# Initialize the recurrent context module.\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0;32m---> 38\u001B[0;31m \u001B[0mcontext_module\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mRecurrentContextModule\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mstate_d\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mstate_d\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mcontext_d\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[0m\u001B[1;32m 39\u001B[0m \u001B[0mem_module\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mEMModule\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0msoftmax_temperature\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n\u001B[1;32m 40\u001B[0m \u001B[0mcontext_module\u001B[0m \u001B[0;34m=\u001B[0m \u001B[0mprep_recurrent_network\u001B[0m\u001B[0;34m(\u001B[0m\u001B[0mcontext_module\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mstate_d\u001B[0m\u001B[0;34m,\u001B[0m \u001B[0mpersistance\u001B[0m\u001B[0;34m)\u001B[0m\u001B[0;34m\u001B[0m\u001B[0;34m\u001B[0m\u001B[0m\n", + "\u001B[0;31mNameError\u001B[0m: name 'RecurrentContextModule' is not defined" + ] + } + ], + "execution_count": 2 + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 2. Generate some toy data for the CSW task." + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAADIAAAAULCAYAAADvaziwAAEAAElEQVR4Aez9f6xd1X0n/K9747jG44CD+aahxRgmD07UNLWN0plxRcCU9IcSfk5TIzVSMHKkb0mfAZv88aRTEiCh0zx6BAZGLf1KsTCRWgk3GQiQaJqG2pConk6rYDdNlZjJxGAmpHkwuVCPcQixv+dznAOX67322ef3Pue8lnR97t0/13rtfc+613e995o53ihJIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQqK3AbG1rpmIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQJNAQEQNwIBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoOYCAiA1v0CqR4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAQAHEPECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgRqLiAAUvMLpHoECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQEQ9wABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoOYCAiA1v0CqR4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAQAHEPECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgRqLiAAUvMLpHoECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQEQ9wABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoOYCAiA1v0CqR4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBBYhIDApAvseeq5ZhO/+c8vpBd/9ONXm/vOnz0tnfozb0zN1yVvfHW5TwgQIECAAAECBAgQGH+BH3/vG+nYSy+kn/zw6fTK80+/2qBFp5+d3vDms1Pr9dUVPiFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBQcwEBkJpfINXrXCCCHp/7xtMpgh/xeZVy1mlL0/pVZ6TfWH1m86PKPrYZT4FnXjiSDs4dKa38qY1AUASDFAIECBAgQIAAgfERiMDHS//4xfTy//xa+tF3vlap4rOnnJYWv+2CdMo735+WND7ia2UyBRYGgXKt/JnG/TANJecR3wNv/Ll3TQOBNhIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgbEUmDneKGNZc5UmME/gxaM/Tp/5u++kv/iHp1MM8O+lxOD/zb/8tvThxkd8rkyWwB1f/Vba1vgoKxEG2vnB6Rj41XKI75udje+fXNn4S2enCEopBAgQIECAAIG6CRz5+z9Ph796T4oASK9l6bt/J5366x9rzhDS67HsXy+BF7/86fQvf/XptpX6+f9nru0247pBfK+89M1GSKoRkIrZccpKBGEiFHXKL77f90MZlHUECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGDIAmYAGTK40/Vf4DP//Ttp29e+lSIE0o8Sx4mAwPZGoCSCIDe+5x39OKxjEKi1QIQ/yoIx688+QwCk1ldQ5QgQIECAwPQJHG3M9jH30O+nmMmgXyUGyMfHsvdcl970ax8zI0i/YB1npAIRfvnfX7unbehjfiVjFp34eKHxPbakEQI5tfH9YGaQ+UI+J0CAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECoxEQABmNu7P2QSBmLPjw5/42ffOfy59c2u2pWkGQL+9/Nn3mA//W4PduIe03FgJxnysECBAgQIAAgXEQiJkLfrjzIykCIIMqMaPIS43jr9j0Zwa9DwrZcQcuELPi/PD+j/Q8O058r8VHBKNOu/yPBl5vJyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTyArP5VdYQqK/AnqeeS7+xfdfAwh/zWx4Bk2Gda/55fU5gWAIRphpUkGpYbXAeAgQIECBAYDoEYkD7c3966UDDHy3JmFnkB9ve05wRpLXMK4FxEYiZbOJ7Jb5n+lUiGBXfExHCUggQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIHRCAiAjMbdWXsQ+It/eDpt/LOvpZihY1glzhXnNEh+WOLOM0yBW/6qf4PChllv5yJAgAABAgSmS6AV/ujngPYqgjGDQgymVwiMi0Dcr3HfDiKo0fo+HMSxx8VXPQkQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwCgFBEBGqe/cHQv85f5n042PfL3j/fqxQysEErMlKAQmRSACVfF9pRAgQIAAAQIE6iww6kHnMZj+R9/5Wp2J1I1AUyC+V1546PcHqhHn+OHOjwz0HA5OgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIFAsIgBS7WFpDgZh9Y1ThjxZHhEA+/Lm/bX3plcBYC0T4Y9TfU2MNqPIECBAgQIDAUARipoFBzWbQSQOev++DA5lRoZM62JZAO4Fhfa8c/ccvmhmn3cWwngABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIDEBg0QCO6ZAEBiLw0cbMHxHA6LScddrS9JtvPzP9/KlL0zt/9rRXd9/z9HPpnxqhkk5nP4ggyh1f/Va68T3vePVYPiEwTgLxfXTLV76RIgCiECBAgAABAgTqLhAzDcSMA52W2VNOS0ve+f70hjefnX7mbRe8unvM5PHjZ7+RXm68RrikamkGURp1WXHNn1XdxXYEhipw5O//vOPvldb3RtzfnX6fvfjlT6el7/6dobbRyQgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwLQLCIBM+x0wJu2PwEUELzop61edkbZe8I4Ur0WltfyZF440Ax2dDIbf/nffSR/+5belU5e8sejQlhGopUCEnVof3YSpatkolSJAgAABAgQmWiBmGYiPTkoEPk799Y9lB6bPH/B++Kv3pP/9tXsqB0GiLhEgaR2jk3rZlsCgBSKQUaVEOOpNv3bieyQ+b5UIgXTyPfGTHz7dnAVECKQl6JUAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECAxeQABk8MbO0KNABDS2NQIgVUuEMm5577vSb//S2ZV2iRlC7rj0/PTb7zo7ffjzf1tplpEYPL+zMXvCh//N2yqdw0YEhimw56nnUsxwE+W//fQ1likECBAgQIAAgXETmHvo9zuqcgxqj/BHlRID32PbU37x/emH91efZSRmWRAAqSJsm2EKxH0ZgYx2Je77M373kfTGn3vXSZvO/5547k8vrRSMeumbX8yGrU46gQUECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAzwKzPR/BAQgMWCBm/6haIvyx84MXVA5/zD9uzAgS+1YtMQuIQqCOAhH+iNBUfDTDIMIfdbxM6kSAAAECBAi0Eag6oL11mDdf/SeVwx+tfeI1BsLHgPj5MyHMX7/w86hXzJSgEKiTQAQxqpTTr/mzwvDH/H1b3xPzl+U+f7kxI45CgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIDE/ADCDDs3amLgRipo2/aMy0UbXEzB/v/NnTqm5+0nax79b3vKPSjCMxM8k3//mFns53UgUWLIhzxAD+g43X+Py0RsDl509d2tXMI1HXVp1bp4nZIaLNb/qZN7YWNb+OWVF6cXz1YF18Eu2Nur74ox836xuHiPpEWX/2Gc16RdBnVCXq9pf7n22ePvz+XaNOrVKH+rXq4pUAAQIECBAgMO4CL37505WbsOw91/U0C0GEP2JgfMx6UKUcHfCsBxEwiYH1L3/vG+n40ReagZM3vPnsFO2sGlRptSNmhXjl+afTj+YN1P/xs99Is0tOS3HMVll0+tnNr0c1u8mPG22NOkbb57c56re4EdJ548+/63X1bdV7WK/h+NI/frFZv/Bb1LCbaRhGifotftsFHV+bfta9ShBjSWO2m6rXN0IgS9/9OykCT2Ulrldcu6IZRcr2s44AAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoTkAApDs3ew1JYGcH4Y/fWH1mVzN/LGzKjY0ASIROIizRrkRYoSwoEcf5i2+UB1huLgitxHG3fe3E7A1Fdfjwv3lb0eKTlsX5I6wQM0JEmKaoxLmKSoQsItDQjeutX/lGM8RRdNzWsvmzrUTdPtOYUSVmVcnVM/bb9tOdo07d1Kt17k5fW/Urui/m+82v3+ZffluKWWUUAgQIECBAgACBzgUiCBAD7quUGHh+2uV/VGXT0m1iYHx8zA9K5HaIbWJwfK7EgPgXHvr93Orm8th/4TGizRF8yQ26b9Wx9MCNlUcbQYWYESKCKt3MVhLnWfLO9zfr10ngJOqdq3urznGt5ocFYvtoc5XrHftFCCbq1km9Wufu5jXqd/ir9zRDDu32D7ei69puv17Xx/1W5Tovu+C6jk51SsO53fWMA1Y5d0cntjEBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECCQFRAAydJYUQeBCARULbf82ruqbtp2u998+5npM/+9/bn/6QcvlB4rZu6YHxAo2nh+4CE+v/GRr786w0TR9lWW3fHVb7UNU7Q7TtQlwiPxcUsj0BGzq/z2L732hOCy/WOWjHbtbu0fx482z3dorcu9tuoV98ftl55fGsLJHaPq8mjLhz/3t5UCQa1jtuoXQZ2tF7wjjXLGkladvBIgQIAAAQIExkmgyqDzVnv6Ef5oHSsGyFcJgMSA+7ISA+LbHWfxv77gdYeIEMS//FX1WU9et/NPv4jgx1wjeFIlTFG0f2tZ1D0+oj7/qmFSdeaRhTONtI43/7UVFgjDH97/kUrBitb+rX1i5pLljSBJzGgxqBKGUb9213H++Vtucf/GjDLDCqm0ux+jjlGXCKh0UmLWFYUAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBCol8BsvaqjNgReE4gZOKrMwhF7RDDhrNOWvrZzj5994F3Vgg5V61elOhE02PhnX+sp/BH1+c3tu9K2RgCkk0BFu/q1gikRhOjncWNGjV6OGWbR3jjOIEocN47f7XWOEFFcU4UAAQIECBAgQKAzgZi5okqJGSE6HdRedtyqgYIqA+7LzjN/XQQinvvTS3sKf8QxDt33weZHr+GPhXWLEEjUrxXcmL++28/DL47ZrWO0MdobAY1BlKjXD7a9p6Pwx/x6RBCk32bzj7/w8wjetCuLOwx/xPEiaKMQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEC9RIwA0i9rofazBP4r99+dt5X5Z9u/uW3lW/Q4dp3/uxp6eB/vLLDvbrfPEIVnc4ysfBsrQBJPwMaC8/Rmq3jMx/4twtXdfx161gd71iwQ8wgEqXqDCUFhzhpUb/qF9fl1sYMKjc3ZlBRCBAgQIAAAQIE2gvE4PuqYYOl7/6d9gfscIuf/3/mOtyjt817CULEmVsBkm7DFFVqH8eOep7xu4/0PKtFHCtCJVWvcVn9WjPFvPnqPynbrKN1rbb2Wr84zvONkEqY1aG88czOfx/pZ5ioDgbqQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEJkFAAGQSruKEtuGffvBCpZbFzB8R2Bjn8uHP/23Xs0xEu1sBkkGGP1q+EYyImTF6DVu0Qhut4/b62jper/WKehycO5Jax+u1XrF/zAQSIaV+zlJTVq/1Z5+R0nveUbbJq+tidpNBzaDy6kl8QoAAAQIECBDoQCBmT6haTvnF91fdtJbbvfjlT3c9C0arQREyiLDBoEsruHHa5X/U06n6Ff5oVSJCILOnnJZ6rVfreDGrSK/hj9ax4l6O+g0iqNQ6xyBfq34vLjrdTCGDvA6OTYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE5gsIgMzX8HmtBP6pMXNClbJ+VWOw+xiXv/jG02nPU8/11IJbGjNMxED+qiWCCCuXL3118wg8dLL/HV/9Vs8BkEGEVcIh7odegxadWLyK2OaTMLvj0vPbbNWf1WFQ9fsi7j0BkP64OwoBAgQIECDQH4GqYYY3vPnsFB/jWmJ2hdYMFt22IfavOkg/zhFBiTf+3GszQUTQoap37H/4q/ekN/3ax3qaBaRf4YqoT6tEvZa88/3pZ952QWtR16+deFQ5SdStDgGQxfOue5V6xzZV7s+4p8b5+7Cqhe0IECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEBdBARA6nIl1OMkgW9WDID8wlvGe/aPXgffdzKDw2+sPjPd8mvvKgxIxHFu+atvpJjho12JbeOj16DFwvPE8eYHOOIcEVCI1yolQiXRhs984N9W2bzjbVrBmThP1fuzdZIqrq1tvRIgQIAAAQIEplkgghFVSj8G+1c5z6C2qTK4vt25YwaRKiVCHzFDRpFZBDIipBAzc1QpR7/5xb4HGiJEsLgR3njjmSfCKcePvpBe+scvpqr3QtQ7Zu5463/8hypN6Hib+cGZTgI3caIIlMTH/OBNxxVos8Opv/6xFB/9LNHOKm2N4I1CgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIDE9AAGR41s7UgUAns0O882fHOwDSAUvhpv/12+0DG7Hjb//S2aUzUES4IYITH/7c31YKgcSsIf0KgJy65I3plve+KzurSIRkYnaPKvdFBC0inNGv+yLaeON73pEiPBP1bJWoy85GvbZ97VuV6hXbR5il6swcrfN4JUCAAAECBAhMm8Arz1cLgEz7rAMxOL9KQCJCH2f87iPZ2yjCDREeiNcXHvr97HatFVWvT2v7stc4Z8wosuw91520WQRWoo1RpyqzcrRmVOnXbBtRt391wXXpX/3y75w0w0WEdyJ8U8U/GhbtGGQA5CS8HhdEKCgCNVXKKQIgVZhsQ4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE+iYgANI3Sgfqp0Cnsyt0eu4YiN+vsnL50r4FIVp1iqDBxkZg49+dfUbz2BFmiABBuLz4ox+3Nmu+fvnJ9gGQCDHccen5r9sv98XWRtihymwVUZd+hBmibTs/eMHrwhUL6xbhlV9obLfxz75WKWyx/e++U7m9C881/+uyusU1+vC/eVvT4De375q/W/bzfpllT2AFAQIECBAgQGACBKoOql90+tkdtzaCBDG4vR8lAgL9HtQfx4wZFWJQ/Uzj89aMHREgON6o9/w2x7Iq5c1X/0mVzZohjJgFpF8+7U4abY1gSplhK7zy3J9eWikEEqGMfgRA2tUtzhHX6Z//6JcqeVUJsLTzGtb6uP7hXeX7MEJYS37RDCDDujbOQ4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEQkAAxH0w9gLdhBAiSNCvEoGJmCGiXyWO9+FffttJgYgIHBS1tUpY5jfffmbl6lWdOWNhEKXyCRZseHsjmDJ/Zo0Fq1/9shXGqBK2qBJgefXAJZ/EjCjt6hb1iiDIZ/77d0qOdGJVv8zansgGBAgQIECAAIEpEOhmBpCYTaJqcKIdYSuc0G67qutjIP2bN/5JcyaOhfu0giDzl7/8P9v/ThPhik6cYvt++cyva9HnMcNHWfijtU8rjFElbBGhhQhbVDlu6/hFr3Ed2h0j6hVtqDJTRpUwRVE9hr0s6nloxwcrhW2ibssb7VcIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB4QoIgAzX29kIlArELB0x20UnpUpg4xfeclonhxzattHWKvVvVSi2jX3+4h+ebi0qfI3ZUiIE8hurqwdfFh4ozhMzp1QpmxuBnSoBkH9qzJqiECBAgAABAgQIEFgoEDNKVJ2po7Vvu4BCbBezVNSxRKClk5k6OglbvPSPX2wb3igz6WRWi2hDhIrazZrSbn1ZfYa17mjD7Yc7P9K2La36NGdBMftHi8MrAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAYmoAAyNConYhAuUDMItFp+COOuPODF5QfuMO1VYIMHR4yu3k3M6fEPu0CIHHC//b0cz0FQDoJj0RQJGYKieBJWTEDSJmOdQQIECBAgACB6RSIIEen4Y+Qitkn+lli5oxhzf7RSfij1caqYYsqM6O0jln02mndFjfCLBGeKCthW9cS4ZR/+atPp8NfvadyFeOe7ff9V/nkNiRAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAlMuMDvl7df8CRB45oUjY9+KCA9sveAdI21HhBfu+Oq30q1fGc7gpJjNo+oMG/NhYp8qs4Z8s8fZNjoJgET9qtRpfjt8ToAAAQIECBAgQCAE6jCQ/sjf/3l67k8vHdoF6XZmkir79RpiidlJOilvPPNdnWxeq20juPKDbe/pOPxxxu8+kmJWFoUAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAYvoAZQIZv7owVBCIQUbUcnDvSVZCg6vGHsV2EDTppc691irBHBCQiPHOw8RGzZex56rleD9vR/utXndHR9vM3jn3bBTzarZ9/vIWfD/NaLDy3rwkQIECAAAECBKoJxED/TgfrVzvy8LaKmRSG2YaY7SFmozjeeH258frjZ7+RXm44xvJhlWhvt+GBU975/hRhlXblJz98Or3hzWe326xw/aLTu9uv8GA1XRjX+4c7P9J25pKF1Y/7VfhjoYqvCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAcAUEQIbr7WwVBTqZTeHFH/244lHru1mns01UbUmEICLY8b9ePNIMTERYpi4zpvz8qUurNuOk7X7hLe2fNhshl25LJ/df6xyxz7BDNK1zeyVAgAABAgQITJJADDKPkMI0lCozWnTjEH7x8crzT6eX/+eJgEddTLsNZoTDGyqGM6Ld3Z6n2/26uU6j2CeCUz+8/yMpQjKdlKXv/p305qv/pJNdbEuAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgMQEAAZACoDtkfgZiFocog/gg5DCpA0Z+WtD/K+rO7nw1j4dEjhPAX33g6/eX+Zyv5Ldx/WF93E7Jo1e2s06qFRyLsUnXb1rG7fX3Tz1Sftabbc9iPAAECBAgQIDANAlVnh4hgw7iXfs7+EQP6D3/1nvTSP36x48H9w3TsJWAR4SClO4GY9eNf/urTzXukkyPE9+Npl/9RigCIQoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECIxeQABk9NdADTICVWdU+HIj6HDje96ROUrx4q0Vt9/21W8VH6DPSyPs0muJIMytX/mGWSjmQcaMJ8MKgMw7rU8JECBAgAABAgR6EFj8ry9IMUtBu1Jlm4XHiEHscfx25cjf//lQQhSLKs5oUVbfGNj/w50fSUcbwQ/lhECns1tMulvM/hKzfnQ6C0wElGLWj15CO5Nuq30ECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGDYAgIgwxZ3vsoC/64xK0bMZtGuRPAhZgrpJERRNTAyjADI+lVntGti2/V/8Q9Ppxsf+Xrb7apsUHXmlSrHsg0BAgQIECBAgACBTgUWdzDLQ4Qelvzi+yufouosBke/GbNoVD5s1xv2OrA+BvQ/96eXpgiB9FpaM6/041i91qXX/V95/uleDzEx+0eY6YWHfr+jeyTuhTf92sfSsvdcNzEOGkKAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBCZFYHZSGqIdkycQM4BULTsbAYh+lyrhk36fs5vjRT17DX9ECOW3f+nsdMel56c9H/n1bqphHwIECBAgQIAAAQJ9EVjcmHWganmpEdTod4kARKczJfS7DlWOF7Nc9Br+eGMjbBOhmNMu/6P0s7//Dym+noQSM1coKb345U83Z/7oJNQTgaq4F4Q/3EEECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKCeAmYAqed1UauGwG+sPrM5q0fM7tGubP+776QP/5u3tduso/VffvLZjrYf1cadhD9ido9w/YW3nJYiYBMfncyc0s82xswt3c5+8uKP2t8TUdduj9/PdjoWAQIECBAgQIBAZwIx+0AEEaqEMGJ2g1N//WOp15k05tcwZv8Yh/LD+z/S0awOMbD/Z/71BU3bRaef3VezTrwiuNJt6WXfbs85rvvF/RHfH1VLfN+9eeOfdDSjTtVj244AAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDon4AASP8sHWkAAhFW+IsKs3s888KR9Jn/3r8QSIROBjGrSL+Jwiba3q5EyGPrBe/oe0im3XnL1v+vF9vXO7d/hEcUAgQIECBAgACByRWIWSleeOj3KzUwZjl489V/UmnbKht1Mmi+yvEGsU0EIX70na9VOvSbfu1jzdkcYoB/HUovIY5Xnu8+PFKHtg+rDp2GP2LGlPge6meQalhtdR4CBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIDBtArPT1mDtHS+B337X2ZUrvO1r36oUhqhywM80ZhSpMvNIlWMNcpu/3F9tlpKdH7ygVuGPMNnz1HNd0/y3p9vvG7ObKAQIECBAgAABAuMpEAGQqiUCG0f/sT+zdkSoomqwomr9BrHd//67ajM7nHb5HzVnSKlL+CMswvfYS90FuqvOzhKBhmkth796T0czfyx7z3XpjN99RPhjWm8Y7SZAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBsRMQABm7SzZdFV6/6oxUdSB/BDY+/Lm/7Tm4EbNLbPvqt8YCuspMGL/9S2dXNoxGDyv4EnWvMnvJwgsR9asSHvkFAZCFdL4mQIAAAQIECIyNQAQWOgmB/HDnR9KPv/eNntoXoYSYOWEcyo+fbd/WmM0hBvd3UoY1w0bVIMfCur9UIejzxp9718LdpubrCNdUnTknUGLWjwgJKQQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMD4CAiAjM+1mtqabn3POyq3PUIFG//sa10FC+Ikrf0rn3DEG1YJUKw/+4yOarmnwuwaHR2wZOM7ugjaxOwsVUqn7a5yTNsQIECAAAECBAgMT+DUX/9Y5ZNFeOO5P7206xBIa/+f/PDpyucc5YbHK8ygccovvr+jKkbbh9X+F7/86Y7qFhtHuKFK/aZ19o9OA0wR/ugkZNXxBbMDAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgMBABAZCBsDpoPwV+Y/WZKWYCqVoixPEb23elz/z3akGBOG7MKhFhhN9s7DesGTCqtmfY2/3l/meHdsq/+Ienm6GbqieMwMv2igGQuG8UAgQIECBAgACB8RWIGSze9GudhUB+sO09KcIFMRi+ajny93+e/vmPfqnr8EjV8wx7u5klp3V0yiqza3R0wJKNI8hx+Kv3lGzx+lWdhBt+5l9f8Pqdp+Sr8KwSkAkO4Y8puSk0kwABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQmUmDRRLZKoyZO4I5Lz2+GOqqGM2K7W7/yjWZY4Dfffmb69fPOTO/82dPSqUve+KpNhAkiLPLfGjNe7GwEEaoe+9UDjMkn//SD6oPf9jz1XIpQxjBLzNiy84MXNK9P2Xnj+nz4c39b6TpF+GP+tS47rnUECBAgQIAAAQL1FVj2nutSBDSqDmyPlvzLX306/e+v3ZOWvPP96ZTGxxt//l0pwiStEmGCH3/vG+noN7+YIvTQybFbxxiH12hf1VlUwiTchlleeOj30+wpp1WahSK2rXKd4jov6XDmk2G2eVDniusX93yVEkavPN/4/beLWVhyx49ZV6Z15pWcieUECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGBQAgIgg5J13L4KnHXa0nTLe9+Vbnzk6x0dN0IeMRNIJ7OBdHSCEW8cLtHGshLhlg+86+y2AYsIw3z4839bdqiBrItgR4RAtl7wjrTxl84uDG5EKOWWRqCnakhn8y+/bSB1dVACBAgQIECAAIHhCkRAYMWmP0sxs0cnJQbER3AkPiaxzDRc2pVmyKURcGkXiAir5/700o5mTWl37qrrf3j/R5phhAj6xLVeWH70na+lCH9EW6qUOM40lrjP4zpWKRGk6X/Y52MCIFXwbUOAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBPogIADSB0SHGI7AbzfCAXsas3UMe4aK4bSuu7PErCbtAiCtgEUEaIpmxojgx+e+8fRIQzJRx5ixZdvXvtUMqvy7s89ogsTsLFG/qsGP2Gn9qjOaH92J2osAAQIECBAgQKBuAm/8uXelN1/9JynCAsoJgZ/51xeko41wR7ty6L4Ppjf92sfSv/rl33ndLCixXwQBYgaUCANUDQ+09mt33k7Wx/njI2aQiGs9s+S09ONnv5F+/L++UWnWj9a5YmaLpe/+ndaXU/U6qUGnqbqIGkuAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBCoKCIBUhLJZPQTuuPT8ZkWGGQI5dckbOwogDFMqAh1/uf/ZtqeMAEVr9pSYNWTl8qXNffY89VzbfYe5QdQz6tRtveJate6RYdbbuQgQIECAAAECBAYr0BrYLwRywvmUX3x/c2aMKuqtgEXMsBEBiygxo0YnoY/554ngyCBKzPYRH92W5Zf/UeEsIt0eb1z2i+tYdYaUcWmTehIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQJ5gdn8KmsI1FMgBvjHbCDDKBEo2PnBC4Zxqq7OEQ4R6OikxIwhvYQsOjlX2bZh2+8Ss5x06tHvOjgeAQIECBAgQIDAYAQiBBIzgQyrxLliVoo6lpjtotO6RVCgFbLoNvzRD4sIovS7xL2xpBGKmcYi/DGNV12bCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGCaBQRApvnqj3HbIwQSH4MIEbRYWuGPd/7saWn9qjNai2v3OogZL6q4/tM/v9CTxcZGeCVs+1U+/G/eNrRgUL/q7DgECBAgQIAAAQKdCcRA/zN+95GBz/QQ4Y84V2vGjM5qOZyto479DlNUOd4rz/c2A0iYhm2/ShxrmMGgftW7X8fpZdaUftXBcQgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIHhCQiADM/amfosELNf/OXmiwcSzohZJGLmj1ZAofXa5yb05XARTulnCGTre96RNv/y29rW7Zs9BkDe9DMnZlfph220/+bG7B8KAQIECBAgQIDA5AvEzBc/+/v/0NcQQUstAhArrvmzV49d5wBIzALSzzBMzKDx5o3tZ1j5yQ+fTr3OINIK2LTcu32d9vBHt272I0CAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBMZXQABkfK+dmjcEWkGNCGv0a5aOmEkigiXzgwm/8Jb+zVQxiAsXYZjPfODf9jQjSviF442NAMj6s9vPePLMC0dSfPRSWrOshHk3Ja5R1DnarxAgQIAAAQIECEyPQAQ1IkTwlq1ffTWs0WvrIwARx4vXVqlzACTqGPWLEEgv9YwgSYRe4mNxI1xTpRz95herbFa6TVy/0y7/o65mMWkFdaZ55o9SXCsJECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBiRVYNLEt07CpEojwwvpVF6SYleJz33g6/ddvP9tROCGCCL+x+sxm+CFCJQvLLzSCBrmAycqC7Vv7x7rcfq1t5gdNWsu6eY367/nIr6ed//B02v5336nU/la7f/tdZ7+unic824dA9jz1XM/hi6hDzN4Rs47c8dVvpb/c/2x68eiPSwmiflHnboIfg7omgzpuKUQfVoZ/u3s0ThPbKQQIECBAgACBuglE8CFCAKf++sfSS//4xXTk7/88/fh73+iomjGLRHzEzCILSxy/aHlsVxa6iIBCbr+F5+j166hHBFei7Ye/ek/l9kfQ5ZR3vv91AZqod1jELB9l5ZXny9eX7Tt/3bL3XNc8X9Q76t/uvNHW1vWKunZSFp1+9kCuyaCOW7Vtx4++MJB2VT1/bBcGCgECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIDAcgZnjjTKcUzkLgeEKxOwUEQiJj3/50Y+br/NrEEGP+IjZLqoMgJ+/7zh8Pr/9C+sboZNoe7/CJwuPv/HPvpYiHFJWtjZmGonZRopK7BvX7cXGdZtf4lpFnYUR5qv4nAABAgQIECBAYL7AsZdeaIYgfvSdrzUXv/w/T7y2tplpBAfeeOa70uJGmCBmvOg0SNA6Tl1fF7Z/fj1joH7M+DGocMqLX/50+pe/+vT8U570eZw7Zi0pKhHeiY+FAZO4Vm/8+Xc16160n2UECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgWkRWDQtDdXO6RNoBTxiZoxpLOPc/qozkEzjddVmAgQIECBAgACBcoHW7BuDCjmUn330a8e5/THDR9nMKqPXVQMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwGgFZkd7emcnQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBBoJyAA0k7IegIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAiAUEQEZ8AZyeAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBOQACknZD1BAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIERCwiAjPgCOD0BAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoJ2AAEg7IesJECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAiMWEAAZ8QVwegIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAOwEBkHZC1hMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIERiwgADLiC+D0BAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIF2AjPHG6XdRtYTIECgE4Fv/vML6cWjPy7dZeXypems05aWbmMlAQIECBAgQIAAAQLjI/CTHz6dXnn+6dIKz55yWnrjz72rdBsrCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEigUEQIpdLCVAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQI1EZgtjY1URECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFCAQGQQhYLCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL1ERAAqc+1UBMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQKGAAEghi4UECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgfoICIDU51qoCQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgUEAApJDFQgIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAfQQEQOpzLdSEAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFAoIABSyGIhAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKA+AgIg9bkWakKAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQKBQQAClksZAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgUB8BAZD6XAs1IUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgUCgiAFLJYSIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBCoj4AASH2uhZoQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAoFBEAKWSwkQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECNRHQACkPtdCTQgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEChQICIIUsFhIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE6iMgAFKfa6EmBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFCAQGQQhYLCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL1ERAAqc+1UBMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQKGAAEghi4UECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgfoICIDU51qoCQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgUEAApJDFQgIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAfQQEQOpzLdSEAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFAoIABSyGIhAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKA+AgIg9bkWakKAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQKBQQAClksZAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgUB8BAZD6XAs1IUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgUCgiAFLJYSIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBCoj4AASH2uhZoQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAoFBEAKWSwkQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECNRHQACkPtdCTQgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEChQICIIUsFhIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE6iMgAFKfa6EmBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFCAQGQQhYLCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL1ERAAqc+1UBMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQKGAAEghi4UECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgfoICIDU51qoCQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgUEAApJDFQgIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAfQQEQOpzLdSEAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFAoIABSyGIhAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKA+AgIg9bkWakKAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQKBQQAClksZAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgUB8BAZD6XAs1IUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgUCgiAFLJYSIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBCoj4AASH2uhZoQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAoFBEAKWSwkQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECNRHQACkPtdCTQgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEChQICIIUsFhIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE6iMgAFKfa6EmBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFCAQGQQhYLCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL1ERAAqc+1UBMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQKGAAEghi4UECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgfoICIDU51qoCQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgUEAApJDFQgIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAfQQEQOpzLdSEAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFAoIABSyGIhAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKA+AgIg9bkWakKAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQKBQQAClksZAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgUB8BAZD6XAs1IUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgUCgiAFLJYSIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBCoj4AASH2uhZoQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAoFBEAKWSwkQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECNRHQACkPtdCTQgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEChQICIIUsFhIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE6iMgAFKfa6EmBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFCAQGQQhYLCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL1ERAAqc+1UBMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQKGAAEghi4UECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgfoICIDU51qoCQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgUEAApJDFQgIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAfQQEQOpzLdSEAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFAoIABSyGIhAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKA+AgIg9bkWakKAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQKBQQAClksZAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgUB8BAZD6XAs1IUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgUCgiAFLJYSIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBCoj4AASH2uhZoQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAoFBEAKWSwkQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECNRHQACkPtdCTQgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEChQICIIUsFhIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE6iMgAFKfa6EmBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFCAQGQQhYLCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL1ERAAqc+1UBMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQKGAAEghi4UECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgfoICIDU51qoCQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgUGBR4VILCRAgQIAAAQIECBAgQCArsHv37jQ3N5f27t3b3Oaxxx57ddtYV7Wcc845KT6ixOuqVauar/F566O50j8ECBAgUBuBQfYBy5cvT2vXrtUH1OZqqwgBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTqJTBzvFHqVaXxrk0MAIuBYFEOHDjQ/Gh+UfLP/IFdrT/0l2xuFQECBAjUVEAfUNMLo1oECBDoQSB+po/39/iIkEe8tn7e7+GwHe26YcOG5kDgNWvWNAcFx9cKAQIECAxeYGEfUPX/efpZM31APzUdiwABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAuMvIADSxTWMJz22/ujfetJvJ0/5rXrK1sCuiy666NUnP7aWVT2G7QgQIECgvwLD6gPiqb8RCtQH9Pf6ORoBAgTaCUTAI97r4+f8eB122KNd/Vrr4/eC6CPiNT4UAgQIEOhdIP6vZ34fEF/XscTvCvHe3+oH4vcGhQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSmQ0AApM11bg0A27dvX/Npv/H1qEv8oT8+4g/9rc9HXSfnJ0CAwCQKxHt+fLSe+F6nPiCeAh+DvqIfUAgQIECgN4EHH3wwfeELX2gO+q3rYN92Lbzyyiubvx/Ea8wwqBAgQIBANYHoA+Ln/Xgd1z4gfi+44oorkj6g2jW3FQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBMZZQABkwdWLP/a3/vhf5yf+zq92POmx9cf+eDXga76OzwkQIFBdIPqA1hN/oy+o61Pf57eo1QdEKNCAr/kyPidAgEC5QLzPR+hjXN7vy1vz+rURDrzmmmv0C69n8RUBAgReFdAHvErhEwIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAYMwEBkMYFiye633fffc1Bv3V4unuv91BrwFeEQTwZvldN+xMgMOkCreBf9AOT0AdECDCCIDHwVx8w6Xev9hEg0KlAvM/fddddExn6yFlEnxBPhd+0aVNuE8sJECAwFQLxc3/0ATt27BiLoHc/Lkr8PnDDDTc0fz+I4LhCgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAuMvMLUBkPjDf/zRPwb8xueTWuKP/Z7+O6lXV7sIEOhWIN7346m/kxL6yDm0wiAx6MvsUDklywkQmHSBmM0p3vNj0G8/g36rVqR0zoqZtGZlSsuXzqT4Oj6ixPLW5yeW5P99bP/x5soXjqS075kT2z327eNp7qXG1wdPrMvvXX1NDPyNEIg+obqZLQkQmAyB1v/9xEx//SrLl6a05qwT7/XnnNF7HxD1enz/idoNqg+IQODNN9/s94J+3QSOQ4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgREJTF0AZBB/+J9/7WaXLkuLz1r96qIlb1/36ue5T45++4lXV738zP507MjhV7/u5yee/ttPTcciQGAcBVqhj3gdROmmD3j54JOvvu8Psg+IWaEiEOgJ8IO48o5JgEAdBVqB7wh+RAikl7JmZSPocVZjsG+8NgIfF62e6eVwHe0bIZAIhjRfD6bUCox0dJAFG8fvBREEib5BIUCAwCQKxPv+nXfe2ZeHfkSgL973W31ABD8iADKMEu/9Tx06ERCMYMi+ZxoBwUZgsJcS7/0RBNEH9KJoXwIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECIxOYCoCIP0c/BWXqjXAd/HK89KiFW9Ni1eubr4uWnFm367kK4eeTa8c+n56+eD+n74+mfo1MDieAh+DgLds2ZLiScAKAQIEJlmgn4O/wmnYfcCxlw6nCAr2qw+I9/0Y9BtBELOCTPKdr20Eplcgfva/9dZbm7P9dasQg3wvamS6Y8DvhY2PYQ30rVrfCIHEk+JjMHAvgRCDgKuK244AgXERaP3s30v4rxX4uGJtPfuACIU8Fn1Aoy94aG/3M0XFjLGt3wvG5fqqJwECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAikNNEBkH4M/oqbJIIeS1af3/hY1/y8n0GPTm/CCIYc3d8YCNx4YvzR/V9vvnZ6jPnbxwDgePKjQcDzVXxOgMAkCLT6gJjtIwaCdVtafcCJ13WNwF//wn6d1in6gBPv/0/oAzrFsz0BAhMv0Hrfjxn/uimXNwb6RuAjBvzG4N9xKfEk+Mcbg4C/0BgE/NC+7p4MLwgyLldbPQkQyAn0GvyI4N+H1o9fHxAeEQKJPiACITFbSKcl/j/o3nvvNSNIp3C2J0CAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwIgEJjIA0uvgrxjcG2GPpWsvbAY/4mnvdS3HjhxOR/Y93ng6/Nebr/F1N0UQpBs1+xAgUEeBXvuAeM9fuqbx/v/285uvde8DIgx4ZG+jH2iEAyMg0k3RB3SjZh8CBOoi0Mug3xjwe/0lM+nyNfWb5aNb39ZA4G7CIBEEiUHAwuHd6tuPAIFRCNxyyy2pmxk/xjn0kXOO2UHufrS7QKAwYE7VcgIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECNRLYKICIL0M+o3QRwQ+lq1/X3OWj3pdpuq1iSfDH97zpeZg4G4GAscg4G3btqXly5dXP6ktCRAgUAOBGAB86623pjvvvLPj2rT6gBPBv3Ud71+XHfrRB5gVqi5XUz0IEKgiELN9bN26NUUfULUsX5qaT3m//pLZsZrpo2r7WtvFzCARAomBwDEguJOyZcuW5iyBfifoRM22BAgMWyBm+os+IP4vqGpp9QEx20cEQCa5fHbP8fTZvzkxM0gn7RQO70TLtgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBi+wEQEQFpP/Y2Bv52U1lPeT73k6rEOfeTaHE+DP/w3X+x4ZpAY6HXDDTekGPhl0FdO13ICBOok0O1TfyP0F6GP+Ji0EmGQFx+9v+M+IBwiBKIPmLQ7QnsITJbA3r17m4N+d+/eXblhF62eSR/6lcZHY9DvtJXWE+FjMHDVEr8HRDA8BgIrBAgQqJNABD6uvfba1EkfsGpFSp+4bHaiZnyqek2eOpTSJx8+1gwFRjiwSok+oPU7QZXtbUOAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECwxMY+wBIN0/9XbzyvBShj6VrLkwRApn0cuzI4eYA4LmHt6dOZgU555xzmoO+rrzyykkn0j4CBMZUIAZ9xeCvTp76G7N9nHrJxsaMT++fij4gLm3MDBVhkAiFVC0G/laVsh0BAsMWiNBfJ8HvCH58/LKZFK/TXmIQcIRA7n70WKo6CHjDhg3p3nvvTfG7gUKAAIFRC8Rsf9EHVJ35SR/w2hWL9/3//Ned9QFr165t9gHxqhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAPQTGNgDSzRMf4wnvEfxYsnpdPfRHUIuYFSSCIEf3f73y2SMAEk//NeirMpkNCRAYsED0AVu3bk0PPvhg5TMtWX1+Wn7Z5qnvA2JmqAiEVC0x8Df6AIO+qorZjgCBQQnErB8R+ovXKsWg37xSp4OAPQk+b2kNAQLDEYif/6+66ip9QB+4O+0D4pQxG0gEMBUCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQGL3AWAZA4o/Od911V+UnPi5b/77moN946rtyQiCeAh9Pg686CNigL3cOAQJ1Eej0qb+CHydfuZgNKsKAVfuAOIJBXyc7WkKAwPAEOpn1Q/Cj+nWJQcAf3XmsOStIlb0iFPjAAw+k+N1AIUCAwLAEOvn5f9WKlLZvmjXrU4WL02kQJALh0Qd4OEgFXJsQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEBggAJjFQDpdNYPwY/2d04EQZ7feVflGUFi0Ne9997rD/7taW1BgECfBebm5ppP/d29e3elIwt+tGfqNAgSg76iDzAbSHtbWxAg0B+BTt77Y9DvJy6bTR9aP9Ofk0/RUZ46lNLmHcfSY/uPt211hD+iL4hZAhUCBAgMUiD6gJj5qcqsf8uXpnT7Rn1AN9cj+oAbG2HAh/ZW6wNidsBNmzZ1cyr7ECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEAfBMYmALJjx460devWSrN+GPTb+Z1xdP8TjSDInSkCIe2KQV/thKwnQKDfAjHoKwZ/xSCwdmXxyvPS6Ru3pCWr17Xb1PqfCnQSBow+wKAvtw4BAsMQiMDfVVddVem9P4If/+FXZ1IMAFa6F4jBvzEIOAYDtytmhmonZD0BAr0I7N27t9kHxINA2pXrL5lJH790Vh/QDqrN+ggBRhiwSh8QAZAIAyoECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgMHyB2gdAYrBvBD8iANKuLFpxZlp+2eYUM38o3Qkc3vOlZhDk2JHDbQ+wZcuW5iDgthvagAABAj0IRB9w5513tj3C7NJlafmlm9Opl1zddlsbFAsc2ft4c1aomBmkXYknv8egrwiEKAQIEOi3QLzvx/t/u7Jm5Uzafs1MilelPwJzR1L61CPH0t2Ptn8SfMwO+MADD+gL+kPvKAQI/FSg6gNA4r3/9o0z6aLV+oB+3jyfeuR4+uTDx9oeMmYF3LVrlz6grZQNCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0F+BWgdA4omP8cT3eG1XYsBvDPyNAcBKbwIR/ojZQCIM0q74g387IesJEOhWIAKAF198caU+IIJ/MeuHPqBb7df2iz7gxb++P809vP21hZnPog+IEEi8KgQIEOiXQPz8XyX8HbN+fPxSg3775b7wOFWfBK8vWCjnawIEehGoGv7WB/Si3H7ffQcbs4HcdzzFa1mJMHiEQPw+UKZkHQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBPorUNsAyO7du9NVV12VYgBwWVm88rx0xjU3pXhV+itwdP8T6bkdt6V2T4L3B//+ujsaAQKpGfqI8Ee7PiBmfjpj001pyep12Pos8PLBJ9Nz992W4rWsRB8QIZCYEUQhQIBALwLxnl8l+GfWj16UO9s3ZgP56M5j6bN7DADuTM7WBAh0KhB9QAQAH3zwwdJdV61IafumWbN+lCr1b2X0Ae1mhIrfB7Zt25Y2bdrUvxM7EgECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAhkBWoZAIkn/sYf/tuVmPXj9I03tNvM+h4E4knwc49sTy8+en/bo8QAYH/wb8tkAwIE2ghEHxBP/m0X/jDrRxvIPq2OPqDKbCAx6GvLli19OqvDECAwbQIHDhxohr/bzfz3ofUz6faNs2n50mkTGm17H9obT4I/liIQUlb8PlCmYx0BAjmBqgHAy9fOpO3X6ANyjoNaHjNCfeAefcCgfB2XAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECnQrULgASwY8Y/FtWZpcuS2+57v/2xPcypD6vO7L38eaT4CMQUlZuvvnmdMstt5RtYh0BAgSyAnfeeWcz/JHdoLEi+oCY+Wnp2gvLNrOujwJVZ4SKEGAM/lUIECDQiUCEPtrN+hSBjwh+RABEGY3AvoMRAjme4rWsCIGU6VhHgMBCgarhj+gDrr9EH7DQb1hfRwDwvXcca9sHRCA8guEKAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECAxOoFYBkCrhjyWrz2+EPz7dHAA8OBZHLhJ45dCz6Qf3fCy9fPDJotWvLjMA+FUKnxAg0IFAlT5g8crzmn3AohVndnBkm/ZDIAKA0Qcc3f/10sNFHxCDvpYvX166nZUECBAIgSrhj1UrUvr8dbNpzUoDf0d918QA4JgJJGYEKStCIGU61hEg0BKIPuCqq65KMQtUrkQA8HONPuCi1fqAnNEwl2/ecSx9dk95H+D/hIZ5RZyLAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFpFKhFACSe+Bh/9N+9e3fpNTj1kqvT6RtvKN3GysELPLfjtnR4z5dKT+QP/qU8VhIgsECgSvhj2fr3pTM23bRgT18OW2Duke1p7uHtpaddu3Zt2rVrlxBIqZKVBAhUCX9E6OMrN86mGACs1EfgU48cT598+FhphYRASnmsJDD1AlX7gAgARhBQqY9ABEAiCFJW/J9QmY51BAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQ6E1g5AGQCH9cfPHFKf74nyuzS5c1gh9bUgz+VeohEAGQCIKUFX/wL9OxjgCBlkCV8EcEP/QBLbHRvx7Z+3h67r7bUswKkitCIDkZywkQCIEqA38/tH4m3b5R+KOud0yVAcBCIHW9eupFYLQCVfoAAcDRXqN2Z4+ZoGJGqJgZKlf8n1BOxnICBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0JjDSAEjV8Mdbb/zjtHjleb211N59Fzi6/4n0g3v+r9IBwP7g33d2ByQwUQLtwh8RADzjmpvS0rUXTlS7J6ExLx98Mn3/jt8r7QOEQCbhSmsDgf4LVBn4G+GP7Ztm+39yR+yrQJUBwEIgfSV3MAJjL1Dl/4H0AeNxmfcdPJ7ee4cQyHhcLbUkQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGCSBEYWAKnyR/8IfcTAX+GP+t5yVQYAC4HU9/qpGYFRClQJfwgAjvIKtT/3K4eebQQBP5aiL8gVIZCcjOUEplMgfgc499xzU7zmioG/OZl6Lq8yAFgIpJ7XTq0IDFugyv8D6QOGfVV6O58+oDc/exMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKAbgZEFQNoN/I3QRwz8jae/K/UWOHbkcPMp8GUDgIVA6n0N1Y7AsAWq9AECgMO+Kt2dr0ofsGHDhrRr167uTmAvAgQmRqDKwN/bN86m6y+ZmZg2T0tDqgwAjn4g+gOFAIHpFVi3bl2KWaByRfgjJ1Pv5VX6AEHAel9DtSNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYLwEZkdR3SoDf4U/RnFlujtnhHTaPaV/x44dKa67QoAAga1bt6Z4T8iVVgDQ7E85oXotr9IH7N69Wx9Qr8umNgRGInDVVVeVDvz9xGXCHyO5MH046ZqVM+krN86m5UvzB2t3/fN7WkOAwCQIxP8HCH9MwpU8uQ1V+oB21//ko1pCgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAjmBoQdAbrnllkoDf838kbtk9VxeZQBwDPguG/Rdz5apFQEC/RSI94A777wze8hW+EMfkCWq5YqqfUCEfxQCBKZTIAZ+RhgsV+Kp7x+/1MwfOZ9xWN5uAHDMABMhkHhVCBCYLoF2/w9k5o/xvx/a9QHRwosvvjgdOHBg/BurBQQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBixwMzxRhlWHWLgb9ksEAb+DutKDO48x44cTt+/4/fSywefzJ7kgQceSFdeeWV2vRUECEymQAz8jUE/uaIPyMmMz/IqfcC9996bNm3aND6NUlMCBHoWaPc7gIG/PRPX6gCP7T+e3nv7sWydNmzYkHbt2pVdbwUBApMl8OCDDzbDX7lW6QNyMuO5fN/BRh9wx7E0d6S4/mvXrm32AcuXLy/ewFICBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQaCswtADI3r1707p167IVMvA3SzN2K9oNAI4/9Megr/jDv0KAwHQIRB8Q4Y/cU79jBomz/vC/JDN/jP/90K4PiBY+8cQT+oDxv9RaQKCSQLvfAS5fO5M+f93QJyWsVHcbdS/w2T3H0+Yd+RDIli1b0rZt27o/gT0JEBgLgXa/A1SZNWIsGqqSrxNoFwSMMHiEwhUCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQ6E5gKKOtYsBv2VPfF604M731xj828Le7a1i7vWIA9xnX3JS9nnE/xEwwuYHgtWuQChEg0JNAu+/5eM/QB/REXKudW9czgp25UhYGyu1jOQEC4ycQ7/9XXXVVtuIx8Hf7NUP5dSRbBysGIxBP9P/EZflre+edd6aYFUAhQGByBdr9DrBqRUpfuXE2LV86uQbT2rKLVjf69035PiBmBot+QCFAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB7gTyf5Ht7niFe8XAr/jjf1GJgaJvue7T2bBA0T6W1V+g3Ywu8STQCIEoBAhMvsDWrVtTfM/nSoQ/ysICuf0sr69AlSBgWTC0vi1TMwIEOhGIn/UOHDhQuEsM+DXwt5BmYhZ+/NKZFEGQXCm7P3L7WE6AwPgI3HrrrdnfAaIPiNmfhD/G53p2WtN4/7/+knwf0O53xE7PZ3sCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAhMk8DAAyC33HJL2r17d9b0Ldf93wb+ZnXGe0UM6D5945ZsI+Kpv576mOWxgsBECMT3eDzhNVfO2HSTPiCHM+bLW0HAXDMiFBQDvxQCBCZTIN7/y2Z4EP6YzOu+sFW3b5xNMdNLUWnNDlC0zjICBMZboN3v+jH7U+69YbxbrvbzBaIPiNlAcsXMsDkZywkQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFAuMHO8Uco36X5tBD/KnvAdA3+XrX9f9yew51gIzD2yPc09vD1b1yeeeCKtXbs2u94KAgTGUyAG+EcfkJsB6tRLrm6ExG4Yz8apdWWBw3u+lJ7bcVt2+127dqUNGzZk11tBgMD4CcSsH+vWrcu+/8eA0LKngo9fi9W4TOCpQym9+7afpLkjxVvdfPPNKR4aoBAgMBkC8bP/ueeem+0DPnHZbIoZgpTpEIj3/ugDoi8oKlu2bEnbtm0rWmUZAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECGQEBjYDSPzRP57mlysR/BD+yOlM1vLll25OS9demG2Upz5maawgMNYCZd/bS1afL/wx1le3euXb9fdXXXVVdoBg9bPYkgCBOgmUvf9fvnZG+KNOF2sIdVm1IqV42n+u3HrrrSlCowoBApMhUNYHxGwQwh+TcZ2rtmL50pQ+f12+D4gZw8pmDa56HtsRIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEBgmgTyf4XtUSH+6B9P/y0qi1ee1xj4u6VolWUTKnDGNTelRSvOLGxdDPiKgV8KAQKTIxBP8s4N5oz3grdc9+nJaayWtBWIGb+i7y8q7QKjRftYRoBAfQXKBnK2CwLUt1Vq1qtAu+DP1q1bez2F/QkQqIHAgw8+mOKjqEQQ4HMlQYCifSybDIE1K2dSzPySK2Whodw+lhMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGCaBfJ/ge1BpeyP/rNLl6UIA8SrMj0Ccb3LBnyXDRacHiUtJTAZAu1CXREG0AdMxrXupBXRB+Sue9nPDZ2cw7YECIxWIMLfZaHe7ZtmUwwAVqZT4OOXzqYYBFxU4unv8fuAQoDA+Aq0C/XGTED6gPG9vr3WPGZ+iRlgikr8/KAPKJKxjAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAsUCfQ+AxB/9y57guvzSzdmngBdX0dJJEYinvy+/bHO2OfHUR4UAgfEXKPtejveAJavXjX8jtaBjgZj5JQKgueLJvzkZywmMj0D8DhC/CxSVePJ3buBn0faWTZ5ADPzefk3x4N9obYSHcvfP5GloEYHJEyj7Ho5ZgOJDmW6BsiBo3D+5WYSnW03rCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgcLJA3wMgZX+0Xbr2wnTqJVefXAtLpkYgAkBLVp9f2N74Y/8tt9xSuM5CAgTGQyC+h2MGkKLSDIE13gOU6RWInwPio6jEoN/4GUIhQGA8BWIGh5jNp6isWpFSPPlbIRAzgEQYqKhEP1D2IIGifSwjQKAeAvHzf24GhxPhr+Lv+3rUXi2GJXDi54H8vVD2IIFh1dF5CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgMA4CM8cbpV8VjT/6r1u3rvBws0uXpbP+8L+keFWmW+CVQ8+m7912TTp25PBJEMuXL09PPPFEOuecc05aZwEBAvUWiBBX9AG5p3f/3E33mQGq3pdwKLWL9/5n/uDfF/YBUYFdu3alDRs2DKUuTkKAQP8Ezj333OyTu7/yUbN/9E96Mo70f/zHn6SnDhW3RT9Q7GIpgToLXHzxxSmCgEXl89fNmv2jCGaKl7339mPpsf3F/xX5wAMPpCuvvHKKdTSdAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEC7QXyj95rv+9JW5Q9sfX0jVuEP04Sm84Fi1acmWImkKLiyb9FKpYRGA+BmL0hF/5Yftlm4Y/xuIwDr2UEQc+45qbsecwCkqWxgkBtBXbs2JENf3xo/Uy6aLXZP2p78UZUse2b8r+G6gdGdFGclkCXAhH8yIU/4v3/8rX6gC5pJ3a32zfm74my/1ecWBANI0CAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQIcC+ZE3HR7owQcfzP7Rf8nq89Oy9e/r8Ig2n2SBUy+5OjsYvOxemmQTbSMwzgIx6CsGABeVxSvPy4a+ira3bPIFlq69MMXPBkWl7F4q2t4yAgRGKxDBv9yA/eVLU7p9Y99+3RhtQ529rwJlg8KjH4gPhQCB8RC49tprsxUtC3tld7Ji4gXWrJxJ119SHAKJWSVzv1dOPIwGEiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoKJA30ZklT2l74xNf1CxOjabJoGYFSZXcgMJc9tbToDAaAXKvmfLvtdHW2tnH6VA2c8GZffTKOvs3AQInCxw5513Zmf/uP6S2RQhEIVAkcAdJeEg/UCRmGUE6icQA/VjwH5RiQH+q1YUrbGMQEofvzT/M4I+wB1CgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBcoG+BEDK/ugfMz0sWnFmeS2snUqBJavXZWeGiaf+xkwgCgEC9Rcoe1L3iZke1tW/EWo4dIH42WD5ZZsLz+vJv4UsFhKonUDM/nHXXXcV1isG/X780uKnexfuYOHUCcQ98onLin8dLfvZYuqgNJhAjQVyA/Uj/BcD/BUCOYGye8TvAjk1ywkQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIHBCoC9/kc/90X926bK0/NLiwZ0uAIEQiMG/cZ8UlbJZZYq2t4wAgdEI5PqAqM3pG28YTaWcdSwETv3Vq7N9QNl9NRaNU0kCUyAQIfAIgRSV3MD+om0tm16B//CrM9lZYvQD03tfaPl4CJQ9CMQMUONxDUddy7JZYvQBo746zk+AAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQJ0Feg6AlP3RP2b/yA3urzOKug1PIJ4AH/dJUYmnPpoFpEjGMgL1ESh7QncEvMwAVZ9rVceaxM8Ip2/cUlg1T/4tZLGQQK0Eymb/+NB6s3/U6mLVtDLxBPgYKF5Uyn7GKNreMgIEhiuQ6wPi+zrCXQqBKgK5wKjfBaro2YYAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgWkVKB5t04FG7ql8MejX7B8dQE7xpmVPgM8NKpliLk0nUCuBXB8QA/vje1sh0E5g2fr3ZYNCufur3TGtJ0Bg8AJlIfDcYM7B18oZxlGgbBYQvwuM4xVV52kQiIDW3r17C5tq9o9CFgszAhEYXbWieKU+oNjFUgIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECPQUACkb+BVPflcIVBFoDhTPzALiyb9VBG1DYDQCZd+fZoAazTUZ17Pmfmbw5N9xvaLqPQ0CuUGZMYjT7B/TcAf0r41ls4DEbIDRFygECNRLINcHmP2jXtdpXGqTC45GyCh+51QIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBA4PUCPQVA7rvvvtcf7adfxewf8URvhUBVAbOAVJWyHYH6COT6ALN/1OcajUtNymYByQ0wHJe2qSeBSRSIAZm5J7/nBnFOooM29U8gZgHJFf1ATsZyAqMRiFBWhLOKitk/ilQsaycQwdEIDxWV3O+cRdtaRoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgWkR6DoAUvYkvlMv2TgtftrZJ4GyWUA8+bdPyA5DoI8CZTMzLFv//hTf0wqBTgRys4CU/bzRyfFtS4BA/wRyA/Jj8Obla/ID+ftXA0eaNIG4d3Izx8SskwoBAvURKPuezH0f16f2alJXgQgPFZW43+bm5opWWUaAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIGpFSj+C2sFjtzArxj0G4N/FQKdCsQsILmSu99y21tOgMBgBcoGfgkBDtZ+Uo++dM2F2eCQJ/9O6lXXrnEUiEGYuSe/lz3Bexzbqs7DFbj+kuLwUNxzZT93DLeWzkaAQO7nsugDVq3gQ6A7gbLwkD6gO1N7ESBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECAwuQJdBUDKBn558vvk3iyDbtmJ8ND7Ck/jD/6FLBYSGJlAbuDXsvXvS4tWnDmyejnx+AqUzQQVfUD87KEQIDB6gQh/5L4fc0/vHn2t1WAcBNasnEnxUVS+8IUvFC22jACBIQvs3r07xUyAReVDv1L8/Vu0rWUEFgpEeCgXAsn97rnwGL4mQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAtAh0FQApG/jlye/TcusMpp2nXlI8C0gMNMw9bXowNXFUAgRyAvG9mBv4texXzACVc7O8vUAEiHJFH5CTsZzAcAVyA/EvWu3J78O9EpN5ttwsINEH5IJHkymhVQTqKZAbiB+D96MfUAj0InDF2uJ7aO/evSk+FAIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBA4IdBVACT3R/8lq8/35Hd3Vk8Ci1eel+KjqOTuu6JtLSNAYHACucG/MfPHktXrBndiR554gbiHlq69sLCdd911V+FyCwkQGJ5AWSDXk9+Hdx0m+UyXr5lJy5cWt9CMgMUulhIYpkAukGsGqGFehck91+WNAEiEiYqK/w8qUrGMAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFpFeg4ABJPfd+9e3eh17JfyT+5u3AHCwkUCOSeAO/JvwVYFhEYgUBu4JcZoEZwMSbwlLk+IJ76m5t5ZgIZNIlALQVy7/8xYD8G7isEehUou5dyAdRez2l/AgSqCZT9Pp6buaHakW1F4DWB3L2U+xnktT19RoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgekR6DgAkvuj6+zSZSk3aHN6OLW0HwLL1r8/e5jc/ZfdwQoCBPoqUDbwKzdzQ18r4GATLxD3UfxMUVT0AUUqlhEYnkBuAH7ZrA3Dq50zTYpAbvBvPIQgZqFRCBAYjUCuD1izMj9rw2hq6qzjLPCh9cWB0giCRyBcIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIGUOg6A3HfffYVuS9dcWLjcQgKdCsTA39xA8tygk07PYXsCBLoTyH0PxvfsohVndndQexFYIJD7mSL3M8iC3X1JgMAABGLgfS6ElRuwP4BqOOQUCFy+dibFTCBFJXcPFm1rGQEC/RXIzQSbG7Df37M72rQIlAWKcvfgtNhoJwECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBFoCHQVAYuBX7ol7uQH7rRN5JdCJQO5+MuirE0XbEui/QO57MPc92/8aOOI0COTup/gZJJ7+qxAgMHyBskGXF64uflr38GvpjJMiELPKFJXHHnusaLFlBAgMWKDsZzAhwAHjT+Hhc/dU7mEEU0ikyQQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQITLnAok7anxv4WzZjQyfHty2BlkDu6e+xPu7DK6+8srWpVwIEhiQQg38jCFhUyr5ni7a3jECZQARA4meLY0cOn7RZ3IebNm06abkFBAgMViA38L5stobB1qi+R9938Hh6bH9qfBxPLxxJad8zx9Nc4zXKqhUpnbNipvl60dtn0kWN8EwsU14vEDaf3XP89QsbX8XvAffee+9Jyy0gQGCwArkQYNlsDYOtUX2P/tShlL6w93iKviA+P3DoxGvUeH4fEHYRdNAHnHwt42eLux89uQ9o/T66fPnyk3eyhAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAlMk0FEAJDfwy8Df8jvm6P4nmhu8fHB/Y0Drm9KiFWc2v16yel35jlO8thUqOrL38ZMU4j4UADmJxQICAxfIPXF18crzmoP1B16BMT1Bqw945dCzzRboA6pdyPjZ4vCeL520cdyHAiAnsVhAYOACucG/EWBQTghEYOGTDx9rDvjNmcRg4Kcag4GjtAIOYfjxy06EQXL7Tdvy3AwgrRkp165dO20k2ktgpAK53wMuWj3SatXq5PGeHqGFCH7kyvw+IDW2/+jO1AwCXn/JTIrQg3JCIPrF5UvTq+HJ+S7x84j/D5ov4nMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBKZRoKMASDxxtagsefv5RYundlk8sfzwni82B66+fPDJUocYOL1s/fsaH+83gHqBVARkigIguQGIC3b3JQECfRbIfe/Fe5jymoA+4DWLXj6LWUCKAiC5+7CXc9mXAIFygdag+6KtDP5tzPDRGOy7+b7yQb9Fdq1lMVPIY7cfbw7+3X7NbHPQa2vdtL7GwN94On7RQOroBwRApvXO0O5RCeR+/hJaiFBfSpt3HGvO+tTN9Wn2AY1+IEIPn7tOH9AyvLDh8VBjJpWFxQNBFor4mgABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBaRSYrdrovXv3phj8VVTMAPKayouP3p+e+YN/n57feVdqF/6IvWKb2Db2iX2V1wSWrC4OFpXdi6/t7TMCBPopUDb4N/e92s/zj8uxIrTWbR8w98j2cWnmUOqZu6/K7sWhVMxJCEyhQG7gb2uQ/hSSvNrkeOL7e+84VhhUeHWjip/EQNc41tyRijtM+Ga5cFFuVsoJ59A8AiMTiN+/c2XaZ4GKkNq7b/tJ1+GP+a4RBDnvD37Sl/5k/nHH9fPcvVV2P45rW9WbAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECnQp0FAApOnjMYDG7dFnRqqlb9tyO25phjnj6e6cl9okgSBxDOSEQ99aiFWcWcuQGIhZubCEBAj0L5L7n4v0/vleV1Hz//sE9H0vd9gFzD29P37/9/+xq/0n0L7u3cvfjJDpoE4E6COQGW8bTuae5RPgjnvrez8BGDCYWAjlxVxn8O83fXdpeJ4Hcz12579E61X2QdRnE+3X0J791T3/7lUEaDPLYuRBg7n4cZF0cmwABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAnUTqBwAyT1pNfeE7ro1dND1ieDG4T1f6vk0cQwhkNcYl6xe99oX8z7L3Y/zNvEpAQJ9FMgN/tUHnEDuVx9wdP/X0/fv+D0hkJ/eu7n7Sx/Qx29uhyJQQSD3PTfNg39j4O9Hdx6roNf5JnHsTz0ymGN3XpvR7ZELGB04cCA7M+XoauvMBCZXYN++fYWNW7OycPFULIygxqDCek8dSmnzffqANSvzIdPc76ZTcfNpJAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBBoClQMguT+w5gboT5NuzNzRj/BHyyyOFcdUUnZmgdz9yIwAgcEI5Ab/6gNSevHR+/vaB7x88MlGH3DnYC7kmB01d3/pA8bsQqru2AvkvuemefDv5vuOV5r54/K1M+kTl82++vGh9TNp1Yr2t8Tdjx5PMQh4msvypSlrlbsnp9lL2wkMSiD3/TbNIcAPVJylY2EfcP0l1fqAh/YeT4/tPz6oSzo2x83dY7l7cmwapqIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoUWBR1f1zf2BdvPK8qoeYyO2O7n+iOfi3342LAcVL116YcoNf+32+uh4v9/T33bt317XK6kVgIgXyfcDqiWxv1UadCGv0P7AXQcDoA+JjmkuuD2g9/X358uXTzKPtBIYi0Pp+KzpZbmBm0baTtOyze46nmKWjrMSg3+3XzKYIMby+nHiieRwjZhCJp8jnyicfPpa2b6qc188dZqyXxxPgnzp0snX8LrBhw4axbpvKExgXgdzvAWUzNIxL27qpZwQz2oUzog+4Y+NsQYhtJt2+MaUIeMQsH2V9wKcePp4u+uiJPqObek7CPhE0fWz/yS3JzUpz8paWECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYDIFKo0oyg22n126LC1aceZkylRs1XM7bivdMoyWX7Y5nfWfPp/O+f/9TfPj5266L516ydWl+8VKT4DPzwASPrmBKLFOIUCgfwJlg3+nPaTWbramVh8Q7/utPiD6g+gXYl1ZaXfssn0nZV3Zzxn6gEm5ytpRd4HoA4rKtA78DYsIZpSVmOXj89cVhT9e2yu2+cqN5dtESKRscPBrR5vcz9Y2AiBF5amnnipabBkBAn0WKPt5q8psRn2uTi0OF8GMstLqA8p8IiDy5B++oSAg8tqRI2Qy7TNB5X7WKLsvXxP0GQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBCZXoFIAJPfH1cVnTfeT3+MJ7a8cejZ7d8TsKGf94X9Jyy/d/LqgTCw/feMNKQYElw0AjifLxwwj015ys8zkBiROu5f2E+i3QO57Lfe92e/z1/V48f58dP/Xs9ULn7fe+MfNPmC+VQQno1+IPmD+8oUHiv4l+plpL7mgae5nk2n30n4C/RbIBcHLBrb2uw51Ol48tb1sQG4MVq06a0ds+7lGUKSsRAhkmsuas4pbn/vZpHhrSwkQ6FYg9702rTNAxft/2ewf0Tfe3pj5o0qJGaIiLFhW7n60PHBYtu8krMv9rJG7LyehzdpAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBKgLlf23+6RHm5uYKj7Xk7esKl0/LwrmHt2ebGsGOGPhbFvBoDQ7OHqSxouwcZftN0rrFZ51X2ByDfwtZLCTQd4Hc4N/cwPy+V6CmByx7f473/jOuuak04BF+b7nu06X9RNk5asrS92rlftbw9Pe+UzsggUKBF154oXB5bmaGwo0naGHZwN9o5u0bi2esyBHEIOqygdQROJnmkhv8m/vZZJqttJ3AIARyv3PnvjcHUYc6HfMLbd6TP3FZ+cxOC9sSQcCYMSRX2p0vt9+kLM/1jwIgk3KFtYMAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgW4FKgVAHnvsscLjT/Pg35ido2z2j9M3bikd1NsCjRDI8ss2t7486TWeLl92npN2mMAFi844s7BVBv8WslhIoO8Cue+1stkr+l6Jmh0w3pfLZv849ZKrS8MfreZEPxqzgeRKnCf6m2kuQoDTfPW1vQ4CBv++/io8tv/1X8//Kgby5garzt9u4efXX5If/BuBk7kjC/eYnq/DVCFAYHQCuRDgOWdM5/dmWSgvQjFlYY7cVSzrA2LGkbJZp3LHnKTlMVNKUcn9fFK0rWUECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBCYNIFKAZDcDCDTHAA5vOdL2XshXJatf192/cIVp/7q1QsXve7rI3sff93X0/bFktXnFzbZUx8LWSwk0HeB3PdabmB+3ytQwwOWvS/H7B/t3tfnNynCImWzRZX1N/OPM6mfzy59U2HTcvdl4cYWEiDQd4Fpffr7voP5GTmuWNvdgOjL2+z3eCMEMs0lN/jXLCDTfFdo+7AEcoPs15w1rBrU6zwHDuXfj7sJAEbrIuhW1qe2m3mqXkL9r82as4r71tz/U/a/Bo5IgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB+glUCoDk/uhv8G/xBV269sLiFZmlMfC3bJ8je7+a2XO6F+fuy+lW0XoC/RfIDbTPDczvfw3qd8Sy9+Wlay4sDXQUtSb2yZWysElun0lavmT1usLm5O7Lwo0tJECga4HcIPtzVhQPyOz6RGOwY7uZOC5c3X0jygYO73um++NOwp65wb+T0DZtIDCuAqdlZmUY1/ZUrXfZbBwXvb37frFstqPHvp0PnVSt9yRu53eBSbyq2kSAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQFWBSgGQ3MHKnlie22cSlr9y6NkUH7lSFubI7ZMb4BrbH93/9dxuU7E8Z+OJj1Nx+TWyBgK5wTXTHAIse19e8vbiWYvKLmVZv9Guzyk77qSvy92bk95u7SNQB4Gyp5XXoX6DqMO+Z0YzCNfg3+KrqQ8odrGUQD8FhACra/bSL65tzAKSK2Whk9w+k7Q8F6zRB0zSVdYWAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQ6FWgbAMn9wX/RijM7PdfEbH90/xOlbckFFsp2WrK6fMBwu3OWHXuS1wmBTPLV1ba6C0xrCLDd+/Eg+oCXDz5Z99thoPXL9ZEGfg2U3cEJEBiiQG6Aa1ThwKHRBE+G2PzSU+Vs9AGlbFYSGKhAL2GHgVZsAg/+2P7p7gMm8JJqEgECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBHoWaBsAyZ1hmgMgZbN/5Aap5hxbyxevPK/1aeHrywf3Fy6floU5n717904LgXYSGImAEODJ7GXvx9E3dtM/RpimbL+Xn5nuAMjJV8ESAgSGIZDrA9aUPKl8GPWaxHOUzfIx7U9/n8TrrU0ECBDoREA/cLLWU089dfJCSwgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIDAlAl0HQKbEp7CZR7+dnwEkF1QoPNCChWX7Hnvp8IKtp+vL2VPeNF0N1loCNRcoCyvUvOo9V++VQ9/PHqMXl7J9p30GkBy4p7/nZCwnMFiB5acM9vh1Pfqas2ZKq/bCkdLVPa00+LcnPjsTINCFgNk2O0Pbd7Cz7edvvfdg+Swf0zwT1IWr50u99rnfA16z8BkBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAtMn0DYAknvy76IVb50+rZ+2+NhL/5JtezzFvdtSFnIoC510ez77ESBAgEDnAmVhjCVvX9f5AX+6R9m+x45MdwgwZ2PgV9e3mx0JEOhCYPnS8p0e218+gLds73b7TvPg35zbCy+8kFtlOQECfRDIzbY5zbNArVqRh93XJsSR3zOlXvYtO651BAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIDCZAm0DILlmLzrjzNyqiV9eOvh39fldtz83wLXrA07QjrlgTW5QygQ1XVMIEKiZQFkIcFBVffmZ/YM6tOMSIECAQAcCZQOfv7C3uwDIQ13u10G1x3rTNWcVV9/vAcUulhIYtMC0zgIVruesyM8E9dC+7vqACH+0m+Wpl9lFBn0/OD4BAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIDF+g6wDI8Ks63Wd85dCzUw2weOV5he2fm5srXG4hAQL9EcgNrpzmWaAGFQJcfFbx+1xcyWmfAaQ/d7OjECBAoHeBXBghjhwDeNvN5FFUg7sf7W7QcNGxJnHZaW1mXpnENmsTAQL1FLjo7fkAyNyRlD67p/P38yp9wAsv1dNDrQgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEBgNAICIB26H93/ROkes6csK11ftnLRivysKtMeAClzs44AgcEJ5EJW0zwL1KC0Z5e+aVCHdlwCBAgQ6JPAFWvzg3/jFB/d2dng3wiMVAmNePp7ny6gwxAgQKAHgcvXlO/80Z3HUgRBqpaY/aOb0EjV49uOAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBCZTQACkz9c1N1NFldOUBUCq7G8bAgQIEJhcAbOATO611TICBMZH4PJGAGR5yYwUMZh3845jlRoUg4Q/cE+1bT39vRKpjQgQIDBQgTUrZ9KqFflTxPv65vuqva+f2Laz0GD+zNYQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwDQJCIBM09XWVgIECBDoSaDdLFA9HbzNzi8/82SbLawmQIAAgWEIXH9J+a9Q8TT3CIGUPQU+Zv047w9+UrrNMNriHAQIECDQmcAnLivvAx7aezz9ViPcV9YHRFjwvXccS/GqECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBTgUWdbqD7QkQIECAAAECBAgQIDCtAv/hV2fSfX+T0lOH8gIRAnlo30/Sh9bPpItWz6TTfjpryOP7U/pCY3CwQb95u4VrwqyonHPOOUWLLSNAgMBABeJ9/e5HZ0rfxyMEct7+n6TL18ykKxozR7X6gH0HU4oAYKxXCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAtwICIN3K2Y8AAQJTLHDsyOEpbr2mEyBAgMA0CyxvhDm2b5pN7739WClDPP397kePNz9KN6ywctWKChtN2SarVq2ashZrLoHhCuRCVhFgmPay/ZqZxgwex0tn+Yg+IMKA8VGlRN9SNmtIlWPYhgABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSmQ2B2OpqpleMu8Mpzz457E9SfwFgKbNiwobDeLx98snC5hQQIECAw+QJzL01+G9u1MGb1iBBIv8onLis/lgBIv6QdhwCBqgK5AEjV/Sd5uzUrZ9LtG8vftztp//WXNGYJOaWTPaZn25g1pagsX768aLFlBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQmAqB/v3Feiq4NHJUAq8c+n7hqXOD0ws3tpAAAQI9Ciw+67wej9D97qM8d/e1ticBAuMssHbt2sLq7ztY7WnmhTtP0MIPre9PCCSO8/FLZyZIRlMIECAw+QL96gMiUBhhkqcO5c3WnJVfN+lrXsiETtesWTPpTdc+AgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIZAXaBkByA7+OfvuJ7EGnecUrh7qfqaKXfafZXNsJECAwLIHZpcuGdaqTzjPKc59UmSEvyM0C5cm/Q74QTjd1Ar7H2l/yGAD8+etm0/Kl7bct2iKe+h4zibQL1ZyzYnoDInszgSP3Z9EdZRmB4QiUhRWGU4N6nCX6gL+/aTZ1O0tT7P+5Rh/SrpzWZR/T7rjWEyBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgMB4CrT9S7OBNa+/sEtWr3v9ggVf5WaqWLBZ4ZdlAZBFK84s3MdCAgQIjELg2Ev/MorTOueUCuT61lxIdUqZNJvAUAXmjgz1dLU+2eVrZ9KTf/iG9InLqg8CXrNyJn3lo7PNp75H4+YyTzhvNbzbwcWt/cf59YXMvaYPGOerqu7jIpD7PjtwyExQrWsY7+f/4z+9ofl+XvW9OraL8GAEACNA+Nj+cs81ZwkBtrxbr/6vsiXhlQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBaRRY1G2jDf7tVq67/aY9APLyM/sL4fzRv5DFQgJ9E8gN+nr54JN9O8e4HWjxyvNSrv0vH9yf2gUFc+09uv/ruVVpmmf/yKJYQYDAUASiH9i7d+9J59r3zPF00erpHZC6ECQG8H780pnGxxuaA3kfb/zoGjNXzA8vxBPc1zYGCl++JqUYMDy/7Ds4/6vXf75w29evnfyvDDSf/GushfUV8Pt29WsTMzpdf8kbmjM6PdboA2Jmp/kzpZT1AfP7ioVnjP6l21mmFh5rHL/O2eR+Tx3HNqozAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEOhVoGwA555xzCo+ZG/xauPGELRzU4N9pNm13ixw7crhwE3/0L2SxkEDfBAz6Oply9pQ3nbzwp0uOvVT8XpXdoeKKxWetrrjlZG4mBDiZ11WrxkMg1w/kBmSOR6sGW8sIxlzUfNt+fcij7KwxUDhX1pyVWzMdy+cPoJ7fYr8HzNfwOYHBCOT+PyhCbife5wZz3nE+aoT21qyMFnTQBzyTb/E0z/4RKkKA+XvDGgIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEpldgtl3Tc3/wb7ffJK8vG/z7yqHvd930XMghDrjk7eu6Pu647/jKoWfHvQnqT2CsBXKDf6c1tBYhwFzpxeTot5/IHXbqZwDJ9Y8G/2ZvGSsI9E0g9322r2Swat9OPkUHKvO86O3VBxFPGlku/BHtzP18MmkG2kNglAKrVq0qPP3ckXxorXAHC0sFHvt23nOa+4BAy/UDGzZsKDW1kgABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABApMs0DYAEo3PhUCO7s8PVp1ktLIwRk+Df/d/Pcs2e8qy7LpJX5EL1fiD/6Rfee2ri0Bu8O+gZruoS7tz9Vi04q25VamXPqAs7FYWOslWZkJW9GI6IQSaQWCkAqeddlrh+Q88lx+sWriDhVmBGNxaNgNIzCgyrSX35He/B0zrHaHdwxbI/V/QvoPDrslkn++x/fk+9cIpnggw1zcKAE7294PWESBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQXmBR+01OBEAOHDhw0qbHjvzLScumYcGiFWdmm/nyM/uz68pWtBvgunjl9P7V/+WD3ZmWeVtHgEB1gdwAm6ON0NqS1dM3O1HZ+3GEOGK2itmlnYX2Yp/SAMhZ+VlHql/J8dwyFzQy+Hc8r6daj59AfK/deuutJ1U890TukzacsAUxGPWhfflGffzSmfzKzJov7M0P/F2zciatWpHZcQoWP575NSD3s8kUkGgigaEKZAMgz+Tft4ZawSGfLPq+z+7Jt72bPuChkj5g+dKUpjkEmPtZI/eAgiHfDk5HgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBkQlUDoAU1fDlZ55MS9deWLRqopeVDXiOQbwR5uj0ae0xkLqslJ2zbL9JWJebAeSiiy6ahOZpA4HaC6xZsyY9+OCDJ9XzleeePWnZNCxo934c7+ed9o3t+oBO+5RJug45G4N/J+kqa0udBXLfa2VPK69ze3qt29xLKX3y4WPZw3xo/Rs6DmyUDSa+Ym3ngZJs5cZwRW6mmfjZRCFAYPACucDt3JGU4iMCCtNUYlaisj7gwtWzHQc27isJlFy+Zrr7gH3PFN9duWBS8daWEiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYPIEZqs0adWqVYWbtZu1onCnCVgYM4CUzQJyeM+XOm7l0f1PZPdZsvr87LppWJG7z/zRfxquvjbWQSD3hNVcOKsOdR50Hcrel4/sfbzj05f1Ae36nI5PNmY75IJGBv+O2YVU3bEVyPUB0aCYDWPayjkrygfjdhqMie3LHD+0vvx8k+6fG/xbdl9Ouon2ERi2QO737n1TOAtIu9k4crMW5a5ZzHBRNgPIh35luvuAx75d/HOG3wNyd5TlBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAhMi0ClAEjuqY+5gfnTgFf2dPdOB//GrCFl+yxd+55pIM228eVn9heuyw1EKdzYQgIEuhbIfa/lZmbo+kRjtGPZ+/KRfZ0HQMr7gOmbaWv+rRCzjRUVg3+LVCwjMBiB3O8CucH5g6lFPY66akX5E+/Lngy/sAXx9PzNO/KzicRA4zjfNJdcOEYfMM13hbYPWyD3/dZp2GHY9R7U+daszIcy7vubY82ZUaqeu6wPiPf/doGTqucZ1+1yIaPcPTmu7VRvAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIdCpQKQCSG/z7yqFnU4QXprEsW/++bLPDpexp7gt3fPGv71+46HVfl4VNXrfhBH5Rdo/lBiNOIIMmERipQNkAm2kNApa9L0e/2MlMULFtvNflStm5cvtM0vLcPVZ2X05S+7WFQB0Ect9vuSdz16HOg6zD5Wvyg3/jae6feqT4ieXz69QMf9x3LMX2ufLxy/Lnye0zSctzs6ksX7485X4/naT2awuBugjkZluY1j7girX59+Z4T//oznywb/41jfBH7n0utvvEZZX+u27+ISfq87CMvrKo+L+gIhXLCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBCYJoFKf1GOATYx0Kao5J7MXbTtJC1bvPK8tGjFmdkmPbfjtkrhmBj0++Kj+QDIktXnl54nW4EJWZEL0uQGIk5IszWDQO0EcoNsprUPiPf/eH/Oled33lmpD4iwyNzD23OHab7/L1m9Lrt+0lfk+gCDfyf9ymtf3QSyg3/3tw861K0t/ajP9ZfkB//G8WMWkLIQSAxqfe8dx9JDe/N+8dT3aX/ye252Ab8H9OMudgwC1QVyvwfkZmeofuTx3PJD68v7gM/uOd6c3SkXXojlv3XPsRTb5UrM/tHuPLl9J2V5LhyjD5iUK6wdBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0IlApABInyP3R/+j+r/dy/rHe99RLNmbrH8GOGABcVmLg7w/u+VjpIOFlv5KfaaTs2JOyzpPfJ+VKase4C+QG2hz99vT2AWXvz/H+/v07fq/0/b21TdnsH2X9zLjfU1Xqn/sZI3c/VjmmbQgQ6Fwg93tABBniY9rKmpXtwxkRAvk//uNPmk+Cj6BHDGS9+9HjzUG/sXzfwfzA3/Cc9tk/wiA3u8BFF10UqxUCBIYkkOsDIsjQ7r1sSFUc6mmqhDMi3HHeH5zoA+LzVh8Qs37E8rIAYDTmjo2V/6tuqG0f5slyfUDufhxm3ZyLAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECoxao/Ffl3JN/j377iVG3YWTnX7b+/aWzcxze86X0vduuSUVPMG+tywUcolHxhPll66c7AJIb/Gvg18hueyeeUoFsH7B/mvuA96WYDSpX4v09+oAjex8/aZNYFuvK+oDZpcsafcD7T9p3mhbkfPQB03QXaGsdBGI2wPgoKrkndBdtO0nLtm+aTcuXlrcowjGt0Md7bz/2ahikfK+UPnHZ7NTP/hFGuXvL4N92d5D1BPovkAvfPra//+cahyPe3ghoRBCkrERAJvqACH20+oAIg+RmBmkdK2b+uHxt+SwjrW0n+TXXB+R+L51kC20jQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAQoHKAZDcQJvcAP2FJ5rEr2Nw7ukbbyhtWgxe/f7tv5ee3vrrjdf/s/lx4P/7K+m5Hbelsqe+x0HP2HRT6bEnfWU8HT83+Dc3AGXSTbSPwKgEcn1AvI+1ey8bVZ2Hcd7TN24pPU3YxExP8b4/vw+IZe3c4tjRz0xzKQrPhEfufpxmK20nMGiB3Pdd7gndg67PqI8fA3+3X1P5V6nK1Y2Bvx+/1MDfsqfj5+7Fysg2JECgY4ErrriicJ+y79XCHSZkYQQAP39d+yBgp82NGaYiXDLtJWaWyc0wpg+Y9rtD+wkQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQCIHKf1ku+yNrboDmNBAvXXthpVk6IswQYZmqgZlTL7k6LVm9bhoIs208su/xwnXLly9PAiCFNBYSGJhA2dPfi2Y5GlhFanbgeJ9eftnmSrXqpA+o2rdUOvGYblR2X5X9TDKmzVVtArUXyM2889C+47Wv+6AqGE9orzITSNXzR/gjjqek9IW9xffVlVdeiYcAgREI5H72ilka2s1oMYLqDuWUEdb4yo39C4H0+3hDQRjQSXIzy8T/A+VmJBtQVRyWAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECtRToaIRRbsBN2SDNWra6z5WKmToWrzyvb0ddsvr8tjOL9O1kNT7Q0W9/vbB2ufuwcGMLCRDom0Bu4Nc0hwADd/mlmysFAateiOhPzrhmumeACqvcfZW7D6v62o4Age4Ecj9/xcDfaX0CfEhGaCMGAMeMIN2WeJL8Jy6bFf6YBxiDyotKLohUtK1lBAj0TyB+/ooHMRSVaQ4CtkIb8dpLiT7g72/qX5ikl7rUYd/P7inuA/weUIerow4ECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECNRBoKMASG7ATW6QZh0aOKw6vPXGP07xxPZey7L170tvue7TvR5mIvbPzQCSuw8notEaQaDGArnvvaozG9W4aT1XLYKA8f7da4nwR/Qns0uX9Xqosd8/97NF7j4c+wZrAIGaC5TNwJYbrF/zJvWtejHw9+9vekMzxBFhjqolto0ASez78Ut7Gzxc9ZzjsN2+g8fTU4eKa5oLIhVvbSkBAv0UyA2+z83Y089z1/lYJ/qA2XT7xs7DgNEH/I//pA+Yf33j/T/6gaJyxRVXFC22jAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAlMnMHO8Uaq2+sCBA+ncc88t3Pznbrqvr7NgFJ5kDBbOPbI9zT28veOaxmDfeIr8qZdc3fG+k7hDDPz9wT0fK2zaD3/4w+zTRwt3sJAAgb4IzM3NpTe/+c2Fx4rgWj9CcIUHH6OFh/d8KT2/88507Mjhjmu9/LLNzX6g4x0ncIdXDj2bnvmPv1XYsieeeCKtXbu2cJ2FBAgMVuDOO+9MW7duPekkEWT4f7e94aTl07ognlz+2LdPhBjmh2PCac1ZM2nNypQuWj2TLmx8dBIYmRbPj+48lu5+9ORfUeO9P/oAhQCB0Qjs2LEjXXvttYUnjz7A+9kJmpgVK0IxEWSY3wfE2njvjxmjLnr7TLp8jT6g6GaK9//oBxaWCKLG/wUpBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIJBSRwGQAIsASARBFpYILpy+8YaFi6fy6xi4GiGQmMGi3SDgRSvObA6aPvWSjSk+V04IPLfjthQDqRcWA78WiviawHAFLr744rR79+6TThqzX8QsGEpqvu9HGDCCbNEflJUI/y1dc2GK8Ic+4DWpFx+9vxGkueu1BT/97Jxzzknf/e53T1puAQECwxEoC4N//rrZdPnameFUxFkmWuD/s/Unae7IyU3ctm1b2rJly8krLCFAYCgCZWHw7ZtmmzMaDaUiTjLRAu++7VjhDCCbNm1K995770S3XeMIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBQVWBR1Q1b21155ZUpnv67sMRAVwGQEyoxiLc1EPrlg0+ml5958qRBwLHN4rPOM2vKwhvpp19HeKaoXHPNNUWLLSNAYEgCV1xxRWEAJPc9O6Rq1eo0EeqI/jA+9AHdXZoXH91ZuGP8DKIQIDA6gQhhRRh37969J1UinnYuAHISiwUdCsST84vCH3EYfUCHmDYn0GeBmIEhvg8ffPDBk4782b85LgBykooFnQrsO3i8MPwRx4nfQxUCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQOCHQ8QwgMeBr3bp1hX5v/egfpyWri9cV7mAhgQKBCBP94J6PFaxJzSe/x+BDhQCB0QiUPf09gm8xE4hCoBeBCM1877bisN8TTzzRHHzey/HtS4BAbwIRBN+6dWvhQf7fbW9Iy5cWrrKQQCWB37rnWIoQyMJiFsCFIr4mMBqBHTt2pGuvvbbw5P/jP70hrVpRuMpCApUENu84lj675+Q+wCyAlfhsRIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAFAnMdtrWGHyTG4B/+G++2OnhbE/gJIHDe7500rJYUHbvFe5gIQECfReI9/8NGzYUHjfCWwqBXgVefPT+wkPEvRf9gEKAwGgFymZhKBq0OdraOvs4CTx1KBWGP6INZgEcpyuprpMssGnTphQzgRSVux89VrTYMgKVBGL2p4f2nRz+iJ3LfvaodHAbESBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYMIEOg6ARPtzA3CO7Hs8HTtyeMKINGeYAq8cejblBpHn7rth1s+5CBAo6QMaAZD4HlYIdCsQP0PEzxJFxcCvIhXLCAxfIMJYue9Hg3+Hfz0m6YxlAaIYdK4QIFAPgVwfEN/DMYhfIdCNQIQ/cvfPDTfc0M0h7UOAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIGJFegqAJIbgFM2cHNiBTWsrwK52T/iJLn7rq8VcDACBNoKxKCv3JN/X3x0Z9v9bUAgJ1AWJDXwK6dmOYHhC+RCuWUzOAy/ls44TgIx6DcXIIrfAXI/d4xTG9WVwKQI5H4mK5vBYVLarh2DE/jkw8UzyMTskxE+VQgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEDgNYGuAiDxx9f4I2xRmXt4e9FiywhUEnjx0fsLtzPwq5DFQgIjEYhBmLkn/x7e80UzQY3kqkzGSXM/Qxj4NRnXVysmRyD6gNxgzLsfPT45DdWSoQmUPfk9FzgaWuWciACB1wmsXbs2+/9BuUH8rzuALwgsEHho7/EUIdKiog8oUrGMAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFpF+gqABJouac+vnLo2XRk7+PT7qr9XQjE7B8xi0xR8Uf/IhXLCIxOINcHmAlqdNdk3M98dP8TKX6GKCr6gCIVywiMViDXDzy2/3iKD4VAJwK5QeNlA807Ob5tCRDor0DuZ7MYxP/ZPfqA/mpP/tFy4dEIm5oJdvKvvxYSIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINC5QNcBkLIn/7746M7Oa2KPqRfIPfm9bMaZqUcDQGBEAjEgMz6KSu57uWhbywi0BHL3Tcw4Y+BXS8krgfoIxPdlfH8WlU89bPBvkYtlxQIxWDz35Pdc0Kj4SJYSIDAsgegDcjNB5QJdw6qb84yXQFlwNBc0Gq8Wqi0BAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgT6L9B1ACSqkvtj7NH9X0/xJG+FQFWBmP0j9+T3m2++uephbEeAwBAFcoMy43s5vqcVAlUF4meG+NmhqOTus6JtLSNAYHgCEf7IfX+WDeYcXg2daVwEcoPFPfl9XK6gek6rQO73dLOATOsd0V27c6HR+Dljy5Yt3R3UXgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEJhwgZ4CIPHH2NyTf3NP8p5wT83rUiB3v8T95cnvXaLajcCABcqe/Jv7nh5wlRx+TAVy94uBX2N6QVV7agTKBmbmBnRODY6GVhK4+9H87B+5weWVDmwjAgQGLhCzwub+PygX7Bp4pZxgrAQe2ns8RWi0qETINHd/FW1vGQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBKZJoKcASPwxNvfkX7OATNNt1Ftby2b/yN1fvZ3R3gQI9EsgNzjTLCD9Ep7845TN/lE2sHDyZbSQQP0F4neBXAgkBnTGwE6FQE5g7khKn3rkWOFqs38UslhIoFYCZf8fFLOARMBLIVAmcOPO4j6g7OeLsuNZR4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgWkR6CkAEkgx6Cv+OFtUnt95Z9Fiywi8TsCT31/H4QsCYyVQNkA/vrePHTk8Vu1R2eEL5PqAqEkuYDT8WjojAQI5gfg+zf0ukBvYmTuW5dMl8J//+niKEEhR8f5fpGIZgfoJlP1/UAS8ct/j9WuJGg1boGwGKLN/DPtqOB8BAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAuMm0HMAJAZ85WZpePngkylmd1AI5ATmHtmeYqaAouKP/kUqlhGol0D0AblBmvG9/eJf31+vCqtNrQTiZ4SYMayobNq0KcUT4BUCBOotUPa7QDwB/lOPeAJ8va/gaGoX98YnHy5+8rvZP0ZzTZyVQDcC0Qds27atcNeyWX4Kd7BwagTK7o24pyJYpBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAXmDmeKPkV1dbMzc3l84999wUrwvL7NJl6aw//C8pXhUC8wViZoBn/uDfF84QEH/0/+53v5viVSFAoP4C0QccOHDgpIrGe//P3XRfWrTizJPWWTDdAtEHfO+2awpDgPqA6b43tH78BMp+F1i+NKW/v+kNadWK8WuXGg9O4L23H0uP7S/+NfSBBx5IMcOYQoDA+AjkfheIFnzlo7PpotUz49MYNR24wG/dcyw9tLe4D4hAkQDIwC+BExAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgMOYCPc8AEu2PgZq5WUBigGfM8qAQWCjw/M47C8MfsV380T/uK4UAgfEQyM0CEn3A8zvvGo9GqOVQBWJ2GDNADZXcyQgMTCB+Zit7AvzmHcUzPQysQg5ca4HP7jmeDX9s2LBB+KPWV0/lCBQL3HvvvcUrGks/urN4oH92BysmWiDCf7nwR8wAJfwx0Zdf4wgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQ6JNAX2YAibrEk3/XrVtX+AT4WB9PgF+88rz4VCGQju5/In3/9t8rlIg/+sfsHwoBAuMlEH3A3r17Cyv91o/+cVqyel3hOgunT+Dlg082Z/8oankMJDcDVJGMZQTqL1DWD9y+cTZdf4knwNf/Kg62hnNHUjrvD36S4rWoPPHEE2nt2rVFqywjQKDmAhdffHHavXt3YS0/cdls+vil+oBCnClaGO/9777tJ+mpQ8WN3rVrV4ogoEKAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEC5QJ9mQEkThEDNnNPgI/1z913W7woBJqzfjy3I38/lD09FB8BAvUVyD39PWoc3/MxG4hCIATKZoUxA5R7hMD4CpT1A5965Fh2wOf4tljNOxX4wD3HsuGPeOq78EenorYnUB+B+D0+/l+oqHzy4WNp30EzgRTZTNOysp8FrrzySuGPaboZtJUAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgZ4E+hYAiVps2rQp+wfbeNr33CPbe6qsnSdDIO6DVw49W9gYf/QvZLGQwFgIxNNaox8oKvE9rw8okpm+ZS8+en9jFqivFza87B4q3MFCAgRqJRDfwzGIv6jEU7837zhWtMqyKRG4+9Hj6bH9xQPAYwbAsocJTAmRZhIYa4H4Pr7hhhuybdh83/FsACy7kxUTI/DQ3uMp+oGiEsEhDwIpkrGMAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECxQIzxxuleFV3Sw8cOJDOPffc7M4/d9N9afHK87LrrZhsgaP7n0jfv/33ChsZf/R/4oknUgwcUQgQGE+Bubm5Zh8Qr0XlrR/947Rk9bqiVZZNgUCEQb932zXZlkYf4OnvWR4rCIyFQLz/r1u3LsXvBEXlE5fNpo9fOlO0yrIJFogn/7/3jvzsH7t27co+SGCCWTSNwEQKRB+wd+/ewrZ9aP1M2r6pr88hKTyPhfUSeOpQSu++7SfZAFDMIJYLkNarJWpDgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBegj0/S/v7Z7e+tx9t6VjRw7Xo/VqMVSBuO7P7bgte8546q/wR5bHCgJjIRBBrrIneP/gnv9LHzAWV3IwlYyfAXIl7hvhj5yO5QTGR6DdU7w/+fCx7CwQ49NKNe1EoDn7S8mT/2PQb8weoxAgMBkCZTM5fHbP8RQfynQJ/NY9+QBg2exh06WktQQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEKgu0PcASJz6lltuyQ7ijKd/P7/zzuo1tOXECMTA31cOPVvYHn/0L2SxkMBYCpQN5Iwg2A/u+dhYtkulexN4fuddKX4GKCoR/vPU3yIZywiMp0C7n+s+UDIQdDxbrNZlAh/deSzFDCBFpd3DA4r2sYwAgXoLRKC3LBBe9p5Q75apXTcCZde7XWi0m/PZhwABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAtMgMJAASMCVPfXx8J4vpfhQpkfgxUfvT0f2Pl7YYH/0L2SxkMBYC0QfEN/bReXo/q+nuUe2F62ybEIFos+PfiBXHnjggez9ktvHcgIE6i1QNqtPzAjx3jvyTwOvd8vUrhOBux8tf9p/2c8LnZzHtgQI1EsgHgoSYcCiEn1A2YwQRftYNp4CMdtL9AO5En2AWWBzOpYTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEAgLzCwAEi7pz7GLCC5J4Hnq2vNOAoc3f9EY9aXu7JVjwGC/uif5bGCwFgKxPf0tm3bsnWfe3h7NhSW3cmKsRRoN/NX2SDxsWywShMg0BRoBXxzYcCYESKeCq5MrsBDe8uvcbz/5waIT66KlhGYHoGygO9Th1KK2aCUyRWIfn7zjvw1jtn/rrzyyskF0DICBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgMUGDmeKMM8Pjp4osvTrt37y48xezSZemsP/wvKV6VyRR45dCz6Xu3XZOOHTlc2MD4g38MDFEIEJhMgWuvvTbt2LGjsHHx3v/WG/84LV55XuF6C8dfIN77ow+IvqCoxMDfXbt2Fa2yjACBCRGIPiD6gly5/pKZdPvGgWXSc6e1fMACMfC3bJYX7/8DvgAOT6AmAvF/QfF/QrnyofUzafsmfUDOZ1yXt+sD4oEx8TtALiQ6ru1WbwIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECAxLYOB/aS976mMMDP3+Hb+XDQcMC8F5BiMQ1/cH93wse31jhoB77713MCd3VAIEaiEQs4DEAJ+iEu8Rz913W/Y9omgfy8ZLIPr4XPijNTvAeLVIbQkQ6FRg06ZNKT5y5e5Hj6fP7hloHj13assHJNBu4G/8DiAAPiB8hyVQM4EIe8VsP7kS7/+fekQfkPMZx+VzR1LafN/xFK9FJX4HKPt/wqJ9LCNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACB1wsMPADS+uPu60/72lcvH3wyPb/zztcW+GxiBGLgb1zfotK6L+JVIUBgcgXiezyCXrnv9XiPEASczOv/3I7bsn1AtDgGfsUgYIUAgckXiH4gFwaM1m/ecUwIZEJuAwN/J+RCagaBPgrccsstpUHATz6sD+gj90gPFX1AzP4UQcBc8TtATsZyAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIVBcYeAAkqhJPfYynwOfK4T1fSjFQVJkcgXYDf8tmBZgcBS0hQCAEYtBvWR8gCDh590n0AdG350rcD/GzgUKAwPQI7Nq1qzT0JQQy/vdClYG/7cJA46+gBQQIFAm0+/1fH1CkNl7LqvYBfgcYr+uqtgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQI1FNgKAGQaPqWLVtKn/ooBFLPG6SbWrUb+NvuXujmnPYhQKDeAps2bUo333xztpL6gCzN2K14fuddpeGPuBeiH1AIEJgugZgJKp76nZsRKjQMAB7fe6LqwN8rr7xyfBup5gQIdC0Q7/0RBNQHdE1Y6x2r9AHxO0B8KAQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQu8DM8Ubp/TDVj7Bu3bq0d+/e7A7L1r8vnbHppux6K+ot0C78EYO+YvCfQoDAdApce+21aceOHdnG6wOyNGOxol2QJ2aDaTf4bywaqpIECHQtEL8HXHzxxWlubi57jO2bZtOH1s9k11tRL4GqA39j9g+FAIHpFtAHTN711wdM3jXVIgIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIE6i8w9ABIDPaKQV9CIPW/OTqtYbvwh4G/nYransDkCegDJu+atlok/NGS8EqAQDuBCAJGILCsCIGU6dRn3b6Dx9Pm+46neM2VeOK78EdOx3IC0ycgBDI517xK+GPDhg3NAPjktFpLCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgMHqBoQdAoskxAPjcc89tvuYIPAU+J1PP5cIf9bwuakWgjgJCIHW8Kr3VqV0fsHz58ubArwgCKgQIEAiBKiGQ2zfOpusvMRNIXe+YCH28945jKQYA54rwR07GcgLTLVAlBBIzQUUYUKmnQJUAoIeA1PPaqRUBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAuMvMJIASLBV+YO/EEj9b7BjRw6n53femeLJ77kSA3+/+93vpnhVCBAgEAJVQiBL116YzrjmpjS7dBm0GgsIf9T44qgagZoLVAmBGABcz4v40N6Y+UP4o55XR60IjIdA1T4gwoDLl45Hm6alllUCgMIf03I3aCcBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqMQGFkAJBpbJQSyZPX56S3XfdoA4FHcHW3OGeGP79/xe+nlg09mt/TU9yyNFQSmXuDAgQNp3bp1pbNBLV55XnrrjX+sD6jh3RJ9wHP33ZaO7H08Wzt9QJbGCgIEfipQZQDwRatn0ueuMwC4LjfN3Y8eTx/deay0Omb+KOWxkgCBnwrs3r07XXXVVaW/D6xZOZO+cqM+oC43zWf3NAKAO8r7AOGPulwt9SBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYFIFRhoACdQqIZAYABwhkEUrzpzU6zB27YrQRwz8Ff4Yu0unwgRqJVClD4j3/ugDoi9Q6iEgAFiP66AWBCZFoEoIZNWKlD7fCIHEQGBlNAJzR1Iz+BGDf8uK8EeZjnUECCwUqPL7QMwAEkHACAQqoxOI8F+EAMvKhg0b0gMPPGAG2DIk6wgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINCjwMgDIFH/Kn/wn126rPkUeAOAe7zifdg9nvYe4Y8YAJwr55xzTvOP/vHkR4UAAQJlAnNzc+niiy9u9gW57aIPOOOam9LStRfmNrF8SAIR/IvZn8r6ADN/DOliOA2BCRKo8hT4GAB8+8bZ9KH1BgAP+9LvO9h44vt9x1O8lhXhjzId6wgQyAlU+T+h2Df6gOsv0QfkHAe1/KlDKf3WPcf0AYMCdlwCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAh0KFCLAEjUOf7gf9VVV6UDBw6UNuH0jTekUy+5unQbKwcnMPfI9jT38PbSE0ToY9euXZ74WKpkJQEC8wWqhEBi++WXbU7LL908f1efD1HgxUfvT8/vvKv0jMIfpTxWEiBQIlB1APDla2fS9mtmUwRClMELxNPeP/XIsRQzgJSVe++9N0UARCFAgEA3AtEHXHvttc3/GyrbP2YBidlA9AFlSv1b99DeCAC27wO2bNmStm3b1r8TOxIBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIZAVqEwCJGlYdABxPgI8nwccT4ZXhCMST3n9wz8fS0f1fLz2h8Ecpj5UECJQIVO0Dlqw+P73luk/rA0os+70q+oCY+SlmgCor+oAyHesIEKgiULUvWLUipe2bZlMMBFYGIxCBjxj0G4N/y0oE/2LQr/BHmZJ1BAhUEajaB0T4I4KAEQhUBiMQfUCE/yIE2K4IALYTsp4AAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL9FahVACSaFn/wj5lAdu/eXdrSRSvOTGdsuiktWb2udDsrexeIAb8x8DcGAJeVGPQVg79iEJhCgACBbgXiyb87duwo3T0CgBEEjECgMliBo/ufSM/tuC29cujZ0hMJf5TyWEmAQAcC8fvA1q1b2/YFccjrL5lJH7/Uk+A74K20adUnvp9zzjnpgQceSNEHKAQIEOiXQJXfB+JcZoTql/jrj/PY/sasHzuOpacOvX75wq/i/36iD9iwYcPCVb4mQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgQEK1C4A0mpr1T/4n3rJ1Wn5pZs9Cb4F18fXCHzMPbI9vfjo/W2PevPNN6dbbrml7XY2IECAQBWBeD+59dZb225qRqi2RD1tEH3A3MPb2x4jAoDx5F+FAAEC/RS48847m0GQdseM2UDu2OhJ8O2cqqyPwb4x6DcG/7YrMeA3Bv4Kf7eTsp4AgW4EIhAeYcAIBZaVmA3k9kYf8KH1M2WbWVdBoOrMT3GoCP5FHxBBQIUAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhitQ2wBIMMQf/CMI0q6YDaSdUOfrY9aP53fe1faJ7zHgK2b9iMG/CgECBPop8OCDDzb7gHaDvswG0k/1E8eqOutHbB3BD31A/6+BIxIgcEIgZgWM2QHb9QWx9UWrZ9L2TbMpAiFKZwIx6Pc///XxdPejx1J83q4If7cTsp4AgX4I7N27t9kHHDhwoO3hog+4feNMWrNSEKQtVsEGdz96PH3qkWp9wJYtW5r/D1RwGIsIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYAgCtQ6ARPs7+YP/svXvS6dv3GI2kB5unFcOPdsMfkQApF2JJz7GwN94VQgQIDAIgegDIggYr+1KzAZy+sYbUoQCle4EYuan53femQ7v+VLbA0QAcNeuXfqAtlI2IECgV4EIf0RfEMHAKuX6S2bSxy+dTfFUeKW9wGf3HE+ffPhYitk/2pV40nv8/B+zfygECBAYhkD0ATETSDwgpEqJmUA+cZkwYBWr2CZmfIqZn6r0AfHzf/QBV155ZdXD244AAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEBiBQ+wBItLmTQV/xJPhTL7k6Lb908wC4JveQMej3xb++P7346P0pPm9X4mnvMfNHDABQCBAgMEiBTgd9Lb9sczr1V68WBuzwosT7/9wj2yv1ATHoKwZ/6QM6RLY5AQI9Cdx5553p1ltvbf5u0O5AEf64/pLZ9B9+dUYQJIPVyaDfOIT3/gykxQQIDEWg6uyAURl9QPtLEn3Apx4+3gyAtN86NYN/DzzwgJ//q2DZhgABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgMWGIsASMugk0Ff8QT4GAQcs4Io5QLxpPe5h7enmP2jXYnBvp742E7JegIEBiHQyaCvCAPGjFD6gPZXotM+4Oabb05btmxpf2BbECBAYAACBw4caM4Gsnv37kpHNwj4ZKaY8eOzf1N90K+f/082tIQAgdEIdPJwkKhhqw+IWUFWrRhNnet21k6DH9EH+Pm/bldRfQgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQmHaBsQqAxMXqdNCXIEj+Fu9k0G8cZcOGDc3wxznnnJM/qDUECBAYoECng770AfmL0WkfsHbt2mYfEK8KAQIERi2wY8eOtHXr1kqzgURdYxBwDACOWUGmcRDw3JGUHtp3PH3y4WPpqUPVr14E/mLgbwwAVggQIFAXgQiGRx8Q/z9UtUQf8InLprMPCKMI/9396PG07+DxqmRmfqosZUMCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgMV2DsAiAtnk5mA4l9DAI+IXfsyOF0ZN/jlWf8iL1iwNe2bdvSpk2bThzEvwQIEBixQCezgURVW33A0jUXppgdZJpLp8GP6AM89Xea7xhtJ1BfgQgFxgDgCIN0Ui5fO5OuaQwEjtdJLxH2uPvRY82BvxECqVoi7Bc//0cAXCFAgEAdBaIPaP2/UCf1u2j1TPrQrzQ+Gv3ApJfoA04EP46lTvqAeOhHzPyqD5j0O0T7CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAYV4GxDYAEeDztMQZ9xUDgqiUGAS/7lfelZevf1xwQXHW/cd/ulUPPphj0++Kj96cIgVQtEfqIwV8xAFghQIBAnQS6Gfgb4Y9l69+fTr1k41T1AfG+/+Jf358O/82XUvQHVcuVV17Z7APM/FRVzHYECIxCYO/evc3fCXbv3t3R6WMmkCsaIZAYBLxm5eQMBG7N9tHpk94DL97vI/Qn+N3RrWRjAgRGKNDN/wtFdWNmqMvXxMxQk9kHfPZvjqfH9lef7aNpIvg9wjvZqQkQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFBdYKwDIK1mxmCva6+9thkIaS2r8rp07YXNIEi8Tmo5svfxZvAjXjsp8aTHGPzliY+dqNmWAIFRCEQfcOutt6ZOB/7Ge3+rHxhFvYdxznjvb/UDnZzPU3870bItAQJ1EYh+IMLhEQjptIx7GCSe8h4Dfb+w93h6qPHRaYmw9w033JC2bNki+N0pnu0JEKiFQLe/E0Tlx70PaAX/9AG1uBVVggABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgMXmIgASEtpx44dzUFf8VT4TkrMCtIaBLx45Xmd7FrLbY/uf6LxlPcvpiP7Hu9oto9ojKf+1vKSqhQBAhUEog+IIEg8BbiTErOCLF1zIgwyCYHAlw8++Wrwr5PZPsIsBgBH+C8GACsECBAYV4HoD+66666ugiDR5hgIfNHqmXTR22eaT4ePp8TXsUTg4/H9qRn62Hew89BHtCne9wU/6nh11YkAgW4FegmCxDlbM4NEHxB9QfQJdSytPuCxb3c+00erPfqAloRXAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIjJfARAVAgj7CH3feeWdz0FenQZDYP8IgS1avawZCxmUg8LEjh9PR/V9vPuW9m9BHtFvwIxQUAgQmQaDbIEi0fX4YZMnq85tfj4NJzPIR4b947TT0Ee0z+GscrrI6EiDQqUCvg4Bb51uzciatOasRCmkMBm6FQ1rrhvUaM3xEyGPfM42ZPnoY7Nuqb/zsf80115jxowXilQCBiROIPiDCgA8++GBPbWu97zf7gpUnAoI9HbCLnfvdB/jZv4uLYBcCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAjUSGDiAiAt216DIK3jxADgJW9f1wiFNF4bwZC6lBjoG6GPo98+8dptvQQ/upWzHwECdRfoJQjSatv8PmDxWefVJhDSrz7A4K/WlfZKgMAkC+zdu7c5CDj6hX6VGAgcg4LX/vQ1Pl9+SkqxvNsSA3wPHDqeXjjSCHs0gh4HnjueYlk85b1fZcOGDc3gx6ZNm/p1SMchQIBArQVidsCYJTCCIN08JKSocfFeH+/5EQw8rfne33sfMNd873+tD5g70gj9HYz+4HiKdf0oa9eubc76pA/oh6ZjECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYHQCExsAmU/aj0HAreMtXnleikHAzdeVq5ufxxPjB1Vido+Xn3kyvXxwf+Oj8dr8/MmeT+cP/z0TOgABAmMiEH3Afffdl+IpwL2WVh+w6IyYLer8xqxRb23OHNXrcXP7D6oPEP7LiVtOgMAkC8TA31afEKGQQZblSxthkLPah0Ei8BEhj0GWCPvFYN+Y8SN+B1AIECAwjQLRB0QIJGYFGXQfEL4XrW7fB8y9dGJ2p0Fej+gDrrzyymbwQx8wSGnHJkCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwPAEpiIA0uKMwb8xCDgGfvW7xEDgKDFbSKu0lrW+LnuN2TyiNAf7NoIeUVrLml/06Z/W4K94+q9CgACBaRIYxBPgW34RDJk95U3NcGArFNhNHxDHi5mdmq8/7ReaX/Tpn9bgL31An0AdhgCBsRWIPiF+L4jBwPF0+Eks8XP/FVdc0Rz4O4nt0yYCBAh0KxDv+xEEmeQ+IH7ub/UBEQJRCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGByBKYqANK6bMN8+m/rnKN8bc32EQMA/OF/lFfCuQkQqIPAsJ/+O+o2Rx8QT32PPiBm/lAIECBA4PUCrTBIhMXj83Etrae8x4DfCPr5uX9cr6R6EyAwTIF4348gyBe+8IWx7wPivV/oY5h3j3MRIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEBgNAJTGQCZT9168uO4D/ia36b43IDfhSK+JkCAwMkC0QfEgK94Cvw4D/pd2LIIekTgI4If0R8oBAgQIFBNIPqF+L3gsccea77G13UuMdj3oosuar7ne7+v85VSNwIExkGgFRQftz4g+oL4UAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEBgOgSmPgAy/zK3BgLHH/tjQPA4lXjCb/zBvzUAzFPex+nqqSsBAnUQaA36jaf/xuDfGAA2TiX6gNYTf/UB43Tl1JUAgToLtPqGffv2NYOC0T+MqkTAIz7WrFnTfDXYd1RXwnkJEJgWgegDIiQeH/H/RPE6qt8R4v0/fsaP//OJz/UB03IXaicBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgROFhAAOdnk1SUxwCs+Rv2H/lcrNO+TVuAjBoDFH/798X8ejk8JECDQB4FWHxCDfuPzUQ32KmpK9AEx8CsGgOkDioQsI0CAwOAEYkBwKxgSZ4nfFaL0Y2BwvLfHe3wM8l21alXzvT4+j+UKAQIECIxeIH4niPf71nv+oPuA1v/9jL7lakCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECdREQAOngSsQf+GOwV7zGH/nj8/gYdGkNBGs96TG+joFgCgECBAgMTyDe7+P9Pz4iFNL6etA1iPf7+Ig+IF6jD4gPhQABAgTqKxB9RMwouHXr1mwl471827ZtgtxZISsIECAwngL6gPG8bmpNgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBcREQAOnDlYo/7s//iEM+9dRTJy0rOlUM5m19xFN+o8RgsNaTf2OdQoAAAQL1FWi9/7eeBhw17aUPaPUJ0Q9Ef6AQIECAwHgKxOxRF198cbbyMYPTrl27suutIECAAIHxFdAHjO+1U3MCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAjUXWBR3Ss4DvVrDdYtqustt9ySbr311qJVzWXXXHNNim0UAgQIEBhPgfl9wJVXXvm6RugDXsfhCwIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgR4EZnvY164ECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQJDEBAAGQKyUxAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEehEQAOlFz74ECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgSEICIAMAdkpCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQK9CAiA9KJnXwIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAEAQEQIaA7BQECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgV4EBEB60bMvAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGAIAgIgQ0B2CgIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBALwICIL3o2ZcAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgMAQBAZAhIDsFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKAXAQGQXvTsS4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAYgoAAyBCQnYIAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0IuAAEgvevYlQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECAxBQABkCMhOQYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoRUAApBc9+xIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhiAgADIEZKcgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECPQiIADSi559CRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQJDEBAAGQKyUxAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEehEQAOlFz74ECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgSEICIAMAdkpCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQK9CAiA9KJnXwIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAEAQEQIaA7BQECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgV4EBEB60bMvAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGAIAgIgQ0B2CgIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBALwICIL3o2ZcAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgMAQBAZAhIDsFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKAXAQGQXvTsS4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAYgoAAyBCQnYIAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0IuAAEgvevYlQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECAxBQABkCMhOQYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDoRUAApBc9+xIgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhiAgADIEZKcgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECPQiIADSi559CRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQJDEBAAGQKyUxAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEehEQAOlFz74ECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgSEICIAMAdkpCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQK9CAiA9KJnXwIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAEAQEQIaA7BQECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgV4EBEB60bMvAQIECBAgQIAAAQITL3DgwIE0MzPT0cfFF19c6rJ79+6Ojhfnv+WWW0qPaSUBAgQI9F9gbm4uvfnNb+7oPXsQfcC1117b/8Y5IgECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAiMncDM8UYZu1qPqMI7duxIdfiD+65du9KGDRtGpOC0BAgQmE6BGKjbbiDXMGTuvffetGnTpmGcyjkIECBAYJ5A/B4Qvw+Mqixfvjx997vfTfGqECBAgMBwBSKAd+uttw73pAvOFn3AOeecs2CpLwkQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEBg2gQEQDq84ueee26KJwCPqkTwIwIgCgECBAgMXyACIBEEGVWJAV8x8EshQIAAgeELxO8A8bvAqMrNN99sBpBR4TsvAQJTLxCzgEQfEK+jKBEAjyC4QoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEZhF0JhADr0ZZRn3+UbbduQkQIDBqgVG/B4/6/KP2d34CBAiMUiBCeKOagSlm/diyZcsom+/cBAgQmGqBeB++4YYbRmbg94CR0TsxAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEaidgBpAuLsmoZgEx+0cXF8suBAgQ6LPAqGYBMftHny+kwxEgQKALgVHNAhIDf2+55ZYuamwXAgQIEOiXwKhmATH7R7+uoOMQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEBgMgTMANLFdRzVkxdHdd4uiOxCgACBiRUY1XvxqM47sRdSwwgQINCFwChmATH7RxcXyi4ECBAYgMCoZgHxe8AALqZDEiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYIwFzADS5cUb9iwgZv/o8kLZjQABAgMQGPYsIGb/GMBFdEgCBAh0KTDsWUBi4K/ZP7q8WHYjQIBAnwWGPQuI2T/6fAEdjgABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAhMgYAaQLi/isJ/AOOzzdcliNwIECEyFwLDfk4d9vqm4iBpJgACBLgWGOQuI2T+6vEh2I0CAwIAEhj0LiN8DBnQhHZYAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQJjLGAGkB4u3rBmATH7Rw8Xya4ECBAYkMCwZgEx+8eALqDDEiBAoAeBYc0CEgN/zf7Rw4WyKwECBAYgMKxZQMz+MYCL55AECBAgQIAAAQIECBAgQIAAgf8/e38QW8d15/u+i4xhqAUdW7GMhtOIniTgWhmSNHqiBq4lHgV4gNu2xNsNeWZR0Cjocy0qHtzgtBORibvRd+CQyunAuANBkmcxTp6k2CfAe4ibkgftSSMkh5EPIAky2kGelch+gtrwdajHXynL2tpc/6pVtatqV+36LoDcZO2qVas+VXvXqr3//1oIIIAAAggggAACCCCAAAIjIMAIIAPsxLruxFjXegagYFEEEECgcwJ1vTfXtZ7O7UA2GAEEEBhAoI5RQBj9Y4AdxKIIIIBAhQJ1jQLCdUCFO5GqEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgRYLMALIgDuv6lFAGP1jwB3E4ggggECFAlWPAsLoHxXuPKpGAAEEBhSoehQQBf4y+seAO4nFEUAAgYoEqh4FhNE/KtpxVIsAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiMgAAjgAy4E6u+I2PV9Q+4+SyOAAIIdFqg6vfoquvv9M5j4xFAAIEBBaocBYTRPwbcOSyOAAIIVCxQ9SggXAdUvAOpHgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEGixACOAlLDzqhoFhNE/Stg5VIEAAghULFDVKCCM/lHxjqN6BBBAoASBqkYBUeAvo3+UsIOoAgEEEKhQoKpRQBj9o8KdRtUIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIDACAgwAkgJO7GqOzNWVW8Jm0wVCCCAAAJ/EqjqvbqqetlxCCCAAALlCVQxCgijf5S3f6gJAQQQqFKgqlFAuA6ocq9RNwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIINB+AUYAKWkflj0KCKN/lLRjqAYBBBCoQaDsUUAY/aOGncYqEEAAgZIEyh4FRIG/jP5R0s6hGgQQQKBigbJHAWH0j4p3GNUjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACIyDACCAl7cSy79BYdn0lbSbVIIAAAggEBMp+zy67vkCTmYQAAgggUJJAmaOAMPpHSTuFahBAAIGaBMoeBYTrgJp2HKtBAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEWizACCAl7ryyRgFh9I8SdwpVIYAAAjUJlDUKCKN/1LTDWA0CCCBQokBZo4Ao8JfRP0rcMVSFAAII1CBQ1iggjP5Rw85iFQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMAICDACSIk7saw7NZZVT4mbRlUIIIAAAhkCZb13l1VPRnN5GgEEEECgRIEyRgFh9I8SdwhVIYAAAjUKlDUKCNcBNe40VoUAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgi0WIARQEreeYOOAsLoHyXvEKpDAAEEahQYdBQQRv+ocWexKgQQQKBkgUFHAVHgL6N/lLxTqA4BBBCoSWDQUUAY/aOmHcVqEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgREQYASQknfioHdsHHT5kjeH6hBAAAEEcggM+h4+6PI5msqsCCCAAAIlCwwyCgijf5S8M6gOAQQQqFlg0FFAuA6oeYexOgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKDFAowAUsHOKzoKCKN/VLAzqBIBBBCoWaDoKCCM/lHzjmJ1CCCAQAUCRUcBUeAvo39UsEOoEgEEEKhRoOgoIIz+UeNOYlUIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIDACAgwAkgFO7HonRuLLlfBJlAlAggggEBBgaLv5UWXK9hMFkMAAQQQqECgyCggjP5RwY6gSgQQQGAIAkVHAeE6YAg7i1UigAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACLRZgBJCKdl7eUUAY/aOiHUG1CCCAwBAE8o4CwugfQ9hJrBIBBBCoSCDvKCAK/GX0j4p2BtUigAACNQvkHQWE0T9q3kGsDgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEBgBAUYAqWgn5r2DY975K2o21SKAAAIIlCCQ9z097/wlNJEqEEAAAQQqEsgzCgijf1S0E6gWAQQQGJJA3lFAuA4Y0o5itQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggECLBRgBpMKdFzsKCKN/VLgTqBoBBBAYkkDsKCCM/jGkHcRqEUAAgQoFYkcBUeAvo39UuCOoGgEEEBiCQOwoIIz+MYSdwyoRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBERBgBJAKd2LsnRxj56uwqVSNAAIIIFCyQOx7e+x8JTeP6hBAAAEEKhSIGQWE0T8q3AFUjQACCAxRIHYUEK4DhriTWDUCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDQYgFGAKl452WNAsLoHxXvAKpHAAEEhiiQNQoIo38MceewagQQQKBigaxRQBT4y+gfFe8EqkcAAQSGJJA1Cgijfwxpx7BaBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAYAQEGAGk4p2YdUfHrOcrbh7VI4AAAghUKJD1Hp/1fIVNo2oEEEAAgYoF0kYBYfSPivGpHgEEEBiyQNYoIFwHDHkHsXoEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgxQKMAFLDzrNGAWH0jxrwWQUCCCAwZAFrFBBG/xjyjmH1CCCAQA0C1iggCvxl9I8adgCrQAABBIYoYI0CwugfQ9wprBoBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAYAQFGAKlhJ1p3drSm19AkVoEAAgggUJOA9V5vTa+pWawGAQQQQKAGgdAoIIz+UQM8q0AAAQQaIGCNAsJ1QAN2Dk1AAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEWizACCA17bz+UUAY/aMmeFaDAAIINECgfxQQRv9owE6hCQgggEBNAv2jgCjwl9E/asJnNQgggMCQBfpHAWH0jyHvEFaPAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIjIAAI4DUtBP77/DY/39NzWA1CCCAAAJDEOh/z+//fwhNYpUIIIAAAjUJ9I4CwugfNaGzGgQQQKAhAv2jgHAd0JAdQzMQQAABBBBAAAEEEEAAgRwCly9fdmNjY1E/Oapt1Kwx2yeHYRTdZC2rfW274Y7am7VN2u6qSta6h/F8ldsrx5ht4jiq6oijXgQQQAABBIYvkKdPH9NvKGuetvU/hr8nh9OCmP57WcdEnnqqvEaL3ebh7JHia43xrdK1eMvDS5IAEnYpfaru8qjgLxWN/qEfCgIIIIBANwR63/d7A4G7sfVsJQIIIICAD/g9ceKEUzAwBQEEEECgOwJzc3PJe3/v50Ld2Xq2FAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgbIFHyq6Q+mwBBX4dO3bM+QAwe06eQQABBBAYNQG99ytDlHPAqO1ZtgcBBBAIC/TfFUDJgJOTk8m5wC+hpECfJO6n8YgAAggg0H6B0Dlg//79nAPav2vZAgQQQAABBBBAAAEEEEAAgRSB27dvu9XV1ZQ57j/FzTIziZgBAQQQQAABBBBAAIFOC/R/1xbCIN4ipMK0LgmQAFLD3vZvRnrDUdCXip/GhxsJB78QQACBkRXw7/faQJ0DdC7w0zgHjOxuZ8MQQGCEBfyXmPoiU3+vra0lj9evX3f6SSv+/T80j0YG8dcKenz88cedzhO900PLMQ0BBBBAoD4Bfw7w7/n+HOCnp7Xk4sWLaU8n7/mawZ8D9Nj7OVLqwjyJAAIIIIAAAggggAACCCCAQAME9Jnp9PR0Zkvu3buXOQ8zIIAAAggggAACCCCAQHcFYq4rdBPm+fn57iKx5Z0XIAGk4CHgv9z3X/rfuHHjq4CvtMAurS7tzckHA+tL/l27diVf9utvP71gc1kMAQQQQKBkAb3X+3NB1ecABX8pAJiCAAIIIFCvgPr6+tJSP1euXEke9d5fRVG9/jrCPy4sLHy1Kl0P6LpgYmIiuTbQuYGCAAIIIFCdgH9fruMcoK3w7/3+sXfL9J6vH84BvSr8jQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCHRTgASQiP3e/6W/T/qIWDT3LKEv+n0lCvjSz/79+78K+iIg2OvwiAACCFQjoHOAgr70/qw7/OpvnQeqKGnnAH8HeJ0DfACYzgkUBBBAAIHyBPSerzu0K9lD78lVvd8XaXH/OULnBSWF9F4bFKmXZRBAAAEE7gv4z34uXbqU9PnV729KUVv62+PPAYcPH06uD5rSVtqBAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFCtAAkgAd+mBn4pAE0/Cv7ydwP2QcCHDh1KAsBICAnsUCYhgAACOQR84JcP/u0PtMpRVamz+nb1BgArAcQHfumRhJBSyakMAQQ6IqD3eSV9+IDftmy2v2ZR21X8OUHXBQoGpiCAAAIIZAvoMxa9j54/f35TgkX20sOdQ9cF/vMhfRak934+GxruPmHtCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQB0CJID8SbmtgV9qt37OnTuXbIkSQo4ePZoEBOtvCgIIIIBAtkBbA7/Ubr3/954DlAii8wDngOz9zhwIINBdAb1/nj59Ogn61d+jUHrPCb2BwCSDjMLeZRsQQKBMAb1ftjXpw3JQUmDvdYHe+3VNwDnAEmM6AggggAACCCCAAAIIIIBAr4C+W8oqw7oRZcz3XdwgLWvv8XzMMc5xxHGCAAIIIIAAAggggMB9AX8DUjyaLdDpBBAlTuguj/rif1QCv3xCiA47vQj1hf/s7Gzyd7MPRVqHAAII1CswioFf/hywtLSUvO/7wK+YD8fr1WdtCCCAQP0CfsQMJX7o/bLqMrFzzG3/s/S1XLl6L32Ggs/2BgLri1ldD5w4cYJrgoKeLIYAAqMhoAQJfQakUTOqLrt2bHwms2MsdTVrH91zt++mzlL4SX3OpR+fEKhzANcEhTlZEAEEEEAAAQQQQAABBBAYeYHl5eXGbuPi4mJj29bWhsUkQ/Rvmz5zjv1cvUj9VX9u0eRjvN+a/xFAAAEEEECgfAF9X1Kkj6LYspi4YtVfpD+j+F5K8wW0n4ocP+o/qx+dVVR/kWNBx11VRTEm+qE0W6BzCSA+GEpf+sdeoA66Cx/d+bQb/7P/tKmaL2997PRTVdHJZ2FhIfnRCUZf+CsYuMoXflXbQr0IIIBAGQI6BygQqq7gX7X5kR3fSH7627/+H/8/98XND/snl/a/zgFKBNGPzgE+IZBzQGnEVIQAAi0R8H1ivf/HXFzHbJYP7N3/rTH3+EaSx8TO+0vt35se7JtW99rNjUDg/3Duxq37P6sb/3+6ERg8aJKIttmfD/ShhL8mSGsLzyGAAAKjIqBzgPr+Sv6o4hwgp2f33tea+OZG4t/WYnJ6779+6/77/tpHG39/ci85H5RxDvAjg/jPhfiwttg+YikEEEAAAQQQQAABBBBAAAEERkWgSDKEbqgxPT0dRVCk/qiKmQkBBBBAAAEEECgooO9IivRR5ufnk9jbrNUWrT+rXp5vhkDRZAj1n2NuTKeYPh1rFATyCnQmAUTJHv5L/7xI1vw+qDdJ8Ni6zW3Z+0wy6yM7ngoG+1r1+On3E0J+69bvbgQFf/ThxuOdJDj4i4+uJn/7+Yo8avuPHTvmTp48mSSBnDp1qlDWWJF1swwCCCAwbIEqgn/HN973H/3mXufPAY9+cyPZb+t/2nj/H+wcIKvPr/76q3NAGcmCOgfoR+cAdUq5A/Cwj0jWjwACdQjoQlrJ0DEX1GntUbKHEjs0oocSPQZJ8khbj+p/uDz4X8khCgjW45Wr9x8fnjfuP1noR3eP0PUAyeFxbsyFAALtE9B7nW78oeSHQYrOAXp/ntz4UaLHIEkeae3Qenb9acSQFyc154NzgJJD7r//b5wLbhZPDPSfC+ncqA+S5+bmuEFI2k7hOQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKChAiOfAKIv/csI/FKyh4J8t+yd2njcCPhNAn23lbpbfUKJKt06+exDdSfJIBtJIQoK1h3j9VNk9BDd8dLf/VF3AFbgV5HhiR5qHP8ggAACDRXQOUDJf7rr+yBFyR5K8rt/Hth4rPgcoHNNf/n86srGe//VjfOAHoudA1Rn7zmAu8D3K/M/AgiMgsCg/X/dvf3FiTGn0T2U7KGg3GEXn3zi9j0ICP7FqpJBiiWEKDHSJ4frXEAQ8LD3MOtHAIGyBMo4Bzy78d5/aLI55wCfHPLiRpt80fv/+xsJgZc2zgVKDslTfHK8rpM4B+SRY14EEEAAAQQQQAABBBBAYPgCuqbTT2/RyO+64zClHAHdQMEaRVQ31tEPpXkC/ftt2DEw/e3pFWvjcaTP3EKF95+QCtMQQAABBBAoXyB0Lh7GeTh0PeK3VtckahOlHQK65lGf1Zdh91H72+Pb5R+H3b/37Yh9rOu1MrIJIApwVeJH/wcgsTtAwb5bJ551W771TJL0oeSMYZb7wcdTSVt8O5QAokDgu6vvf3W3eP9czKNODPrRi4NEkBgx5kEAgbYI6L1tkOQ/n/ChZDwlYwz7HCB3tUM/jx18KdkN/hzw+W9+7e6uvZ97pCh/DlAHTucAjQxCQQABBNosMMh7v4JrFez78kaCxeaROJqpokBgHwysu8MrCPitD/IFAusiWudLHwTMsKLN3Ne0CgEEsgXKOAco6c+/r2avcbhzqK37N0Yk+f7zY07nACWE6Dyg5MDY0n8OIBkwVo75EEAAAQQQQAABBBBAAIFiAopbUAxDWtH39vrpLVrO3+wsLfZBAVca8VGj/uq7nzwl5nNBfY/UW6+2pb89N27ciFqttT7V3/99VWg9/SsJufXPE/pf18a6idyVK1ecPlvo357QMpqmdmqd+/fvZ5RlCynHdOt48FWEjgs9p32mEWC1D7Uve4u+++ytt/fv3vl6/677ONJrVj9NO470Ouh9XfTb9pr5v3lNeAkeEUAAAQQQKEdAgfnq56i/0xukH6pdfZhDhw4l/eiyEzB6+wRZ7fBt6+0nq6+TVbSN+ilS1I+3+nn91y9F6m/qMjFmoe1Xv07XVzq2Qvvz3r0H3zPGrEM+ln+Wndbfe3xlza/ndXz39p9jjq+YesuYp3dbQrahdeR9rYTqGNvYaQ/2WmiOlk3Tgae72cZenPdungJ8Fey7bd9zyV3ee59rw98+GUQJIUVGB9EBRSJIG/Y0bUQAAUtA54CiiR/+HOCTPqx1NHW6RgW588Evk6TAIucAfTC3uLiYfFDd1G2kXQgggEBIQP1+9f91DshTNNKHEj7alPQRs31Fk0FUt84FJAXGKDMPAgg0RUDnAPX/9WFlnuJHe3r5r+6P9JFn2SbPe/uuc79Yu+d+8l6+hEBtkz401fWAPhCmIIAAAggggAACCCCAAAIIPBDQ547T09MPJqT8lRZ6EVNPb9C6gnNOnjyZ+5pXzeutJ6W5Xz01Njb21d/WH8vLy07xBL7IJO9nsn5Z61H1az29JWY9ebe36OcJve3q/VvX0mqDPl+NKQqS0ucZaSVkkTZ/1c/FHL++DWmvAz9P72PW8ddvof2n14aCrKzSf0xkrUP19C9j1e2nj9pxpEA1HZdprn7b0x71GROjzqYJ8RwCCCCAQJsEYvpt2p7+/krRbdR5WOfj2ADy/vWoX6rvenQ+HqRou5UkoP7OIEUu6mPp0Sqxxtby1vT+6xdrviqnx1zLaP15+6ExZv3bv7S0lBxbacm9vf34mHWo7b3L6P+sou90dYwPemxpPUXiW2KuC/rt0rapztdKqB3joYltnOYvOPWiyXNw6C7vupv6X7x23n3zH3/unjhyopXJH9pnujO82q/t0PYokUXbF1t6DYueRGLXxXwIIIBAmQJ639f7f2zHya9b75F6r+w9B+i9tI3l0Z1PP3QO0Lktz8glMpyZmclt2EYr2owAAqMh4L8A3bNnj1M/NrborulnZsfd/3fxa+6NI+OtGfEjdvs0mskrB8fcv702nvwowUWBzjFF5wIl0+h8yvVAjBjzIIDAsAR0DtAHalNTU7kCYTTKk84BH/7D15JHnRNGqfjkRp0D/uc/fi05H8SeA2Sqc4BM85xXR8mPbUEAAQQQQAABBBBAAAEEmiKgz+b0uaeCY4oUBdToMz5d61EeFtDnCYPYPlzb/f+0n/J+RhGqh2nZAnptyHrQJIXsNaXPMWrHkf+crQxXve/oPSjvdzfp4jyLAAIIIIDAaAv4uDfFbQ3yPb36pToHFz2n+75WWQH6+r5J1yX6/olrk+Ecw3KXvxKoh7kP/LGltuh4L6P0xrfUvW1+e4b9Wml9Aog/QPMG/eoO73/+nX9y/4/F/0+rkz6sF4ICgZ+cfS3ZPj1u2fuMNeum6Xrj1UXzsF/0mxrGBAQQQKBPQOcAvVfl/QBJ74m975F6zxyl0psMonOdznmxpbfzXVaHK3bdzIcAAgjECugDC7336y4FsUWJEAqG/dWr48moH7HLtXm+3kBnJbsoOSSmcD0Qo8Q8CCAwLAH/HqUP1GI/zNM5oEhi3LC2sYz16j1f7/1KeFTSS+w5QB9Y8mF8GXuAOhBAAAEEEEAAAQQQQACBYgL+uiz2mtdai66f9R0a5b6APBVQp88TqiiqX8FMRZN2qmjTqNXpPxMa9LUxiIvWrc9NRuk40nFbxfZ4K14TgxxxLIsAAggg0AUBH0iuvk4ZRedg9XvzXgv46xA9ll3UH1AfSm2j1Csg92H3x/z+r+LYkqZeO3UeX016rbQ6AUQBXwr8ij1A/WgfGiEjb0BsvS+7ctemu9s/9eo/JyOD6O/Yktc3tl7mQwABBMoQKBL8q/dAnQP0npjn/bCM9g6rDp/wqO3WqCCxI0Pp3KpkwDzB1cPaRtaLAALdEVBimi7c9IFFzIcDuuv5D17IH/w6aqJy0KggSoDJEwSsc4DOBWV92DRqrmwPAgjUK6D3fb3/6zwQm6jsk//03qekuK4W7/Dz74y72FFPdD0wyF2iumrNdiOAAAIIIIAAAggggAACgwh8+umnpQau6Nqu6N1/B9mOJi6rALg6LBRMX1VgUxNd62qTPgvS50LDLtq/dXxeXtdxpO8A9D5RZalrW6rcBupGAAEEEECgKgGdh/V9fEzsQ9426DwfmwTiv4Oroh2+3eojq19AqU9A+3/Y1yZ+v1d5bEnUr6dq3aa9VlqZAKKdpS/8dYDGHBgKdt3+wnH3zX/4fyWjfTyy4xtV7+dG1q/t9ne8l0dMELB89cabJ8CikRtPoxBAYGQE8gb/+nOARnzSe2CXzwFPHDmRnAvznAN0rtXFhs69FAQQQGCYAv7Dj5gvV3zix4f/8DX3/efHnP6n3BfwQcCxiSD+vBt77YUzAgggUIWAAjTyJCPkfa+ros1NrPPFybFkJCyNhhWTCOI/xIxNvGziNtMmBBBAAAEEEEAAAQQQQKBNAgrSiol/yLNNVdzZP8/6mzCvPlvWT10F8/KlFbNS9msjbyv1+qwjici3q+rjSJ5Vr8NvC8GeXoJHBBBAAAEEHggoDkvfwVdZYpM9NZ/iAqou6kvV2Z+qenuaXL/iarRfh1nU36wziVvHVtXxjU17rbQuAWR+fj76LrQ+6FeJH9ufj0t4GOYBX9e6E5cNj8QlMhFEbwjcCb6uPcR6EEDAEtBJNPZO5JwDwopFzgHqHMld52AKAgggULeAvyiM/YJHQb8kfmTvpd7g6JgEGX8OrvqCObvlzIEAAl0S0DlAH37HJiD0vrft2tElqXzbquQPJYHEJgPqA9M8CTj5WsPcCCCAAAIIIIAAAggggAACVQro87yuf6YXG1i3fft2d+rUKbe8vOzu3bv31Y/+X1xcdLt3747aVbqO1mcalHIEdPzG3BiqnLXZtcQmS7TlOFJSVMxxquP+7NmzbmVl5avXxB/+8IfkdTI3N+e0vVlF+1CvCwoCCCCAAAII3BfQOTj2uy9vpnPygQMH3OTkpJ8U9ai+cFZyx+nTp6Pqmp2dTfoA6gv4/rL6COorqG0xJbZPFVMX89gCsfvUrmHwZ/IkSxw+fNhduHDBXbt27atjS39rmo672HL+/PnYWQvNF+ta12ulNQkgehPSKBSxbwB+xA8SP+zjtDcI+LGDL9kz/ukZnXh0QtB+iLkQzKyQGRBAAIFIAd/x1XtQzPvPtn3P3U9yI/nPFO49B8SOCKJzsBJBsi4MzJXyBAIIIJBTQF8K6H0n5osBBbP+z3/8WhLMGpPQkLMpIzu7T5j5wQvZl4Z6/ychcGQPBTYMgcYJ6Bygzx/04WBWyZvQkFVfV573CTM6B2SdO3uvybriw3YigAACCCCAAAIIIIAAAsMWUJCXEg+UgKDAKv3kDYDRNlQRPN+fJKEAME2LKT5YrP8xdvmYdfh5YoPcFUinACPdDK0/eE3/K9Bd/rEBd/pcg1KOQMx3w+Wsya5lFI+jS5cu2Rv8p2d07Ot1oQC23mNfSR96Tu9Psa+LqoPxMjeGGRBAAAEEEGiQgOKvYmKvfCKmEi50TvbXBepH67qg9/xsbZ76Umkx17pWiOlvad0+0aM3AVRt8MHuSqbOKuon9/aV1f/uvy7Q/zFF6wstq2nqq3S5xOzTqn1i+386rnQ8KwlEx7wv+lvT9Lx+YkrvsRUzf555hvFayXqfyI7yybOFFc2rgC8FGsV8MLF18ln3zX/8OSN+5NgXCgJ+4siJxG3L3mcyl9R+0J0fY/ZHZmXMgAACCGQI+PecmOBfvYfpHPDk7GtO722UbAGfCPIXr513SpzJKuooxQZjZ9XF8wgggECagB9xIuuCRgGrP//OeHInc+72niZqPyfD7z9/P4FGQdRZRR8Q5b0jSVadPI8AAgj0CuhLdSV/ZH1Ip/evN47cPwfEvH/1roO/HwjoHPBvr33NvTiZfQ7w5+cmfHD8YAv4CwEEEEAAAQQQQAABBBAYPQEFUSnIS4kHCl5SYJV+fACMgq57A6/SBK5cuZL29Eg/F7vtCjjK8tTzMUFtAs36TGOk0WvcOL02tE96f/Sa6Q0cK6M5wzqOqvz8JSbe58SJE5l8slYiSFaJWV9WHTyPAAIIIIDAKAgo/iHm5me6HlCfX4+hfqquC/zzWS763s2Ku4g5R+s6RP2urBJKpg4tE7PO0HJMG1xAfTcdU739Z/2t46nMoush65jrXY/67mpPVtE8Og6HWWKO27JfK1nxso8MEyRm3brbe8wb3iM7vpEE/G7ZOxVTLfMEBGT41Kv/7O6uvu9+//Zp9+WtjwNz3Z+kC00FY+jFrzduCgIIIFCFgN7/dR7IKkpiePLoa05JgJRiAv48uu2v/tp9cu71zHOAAn/VCYv5QK9Yi1gKAQS6KqB+pt779SFEVnnl4NhG4kL2Xcuz6uH5+wJKoPnVq+PurQ/uuVffXne379oyutDURXvsnUXsmngGAQQQeFgg9nMgJSucOco54GG94v/pHKCEyl+s3nPHz6efA/T+rxuD6G5Pw/6wtfgWsyQCCCCAAAIIIIAAAggg0FwBn+SR1kJdj+m7+pjv0aoMIk9rYxOe0zVsVpF3bMJAbGBUl82zvMt4Xse+vqcMBUOWUX9/HTHHkYLSyj6OtN7YY66/zWX8H3scKyBU254W5Ke69HysURntpw4EEEAAAQSaKJA2Godvr86tsSMeaD71GfSTVrTe2DrT6sl67ujRoy4rUP7GjRtZ1fB8yQK6flSMX0wiTxmrzjoe/TpiEo79vIcOHco8zv28bXgs47XS2AQQdf4VXJr1ZqAdtf2F4+6x//wSd3sv6ahVALXuon/73TPus/d+llqrTgxra2vJyaGui+vUBvEkAgiMhIDOAbHBv48dfIlRn0rc60qk1CgqOgfcfudMas1K0NF5WoFfnANSqXgSAQQiBfT+H3vH9/++EaTK3d4jYXPO9vK+MffixNfc37657q5ctYdX1Zc12l/6oGiYX0Ll3DxmRwCBhgrEfg6kUT+U+BEzWkVDN7XRzZLrh3u/liSBKBnEKtpfGhlQ54CYO/NY9TAdAQQQQAABBBBAAAEEEEBgs0BsYJYC4PV9va7R0kpMzEXa8m1+LibwaGJios2b2Km26/vIYdyQoqvH0enTp5PP/mO+B1ZSjr4zoCCAAAIIIICALaB+e8yNMGOvB/yaFNiv7+3Tim7wmLdeX5/6Qmp3zPdBSjBQvyCtkBCaplP+c9pvRfd90dbE9AvVxxy1Y6Hu10ojE0CEoOSPrIPg0Z1PJ3d81yOlXAHdTf+JIyeSu+ln3QleJwftK939d9RekOWqUhsCCMQIqLMbE/zrR6xg5KcY1fzzbH/+uNs68az75Pzr7oubH5oV6JytwC/uAG8S8QQCCEQK6P1E7/9ZX1Zyx/dI0AFnU4C1RgP5yXv3RwOxqtP+0rWbPjCI+cDHqofpCCDQbQF9pqD3Ep0L0ooS/87MjjuNVkGpTkDngNjRQI4dO5bcGISRAavbH9SMAAIIIIAAAggggAAC3RLQjVZigq29igKs9H09JSxw7559c4PwEulTu5xMky5Tz7PD+j6yq8eR/x5Yn/tk3QSK7wfqeQ2wFgQQQACBdgvE9Nt1Ts0bg6trAi2j79usou/1tf6sc7q1vL4PUv1Z7VM75ufnrWqYXrOAjo26kz+0iToGyj4Orly5UrNesdXV+VoZL9bE6pbSBbMCv9LejLR23fH9qe/+1JH8Ud2+UM0KrP6L1867bfueS12Rv/DLCtZIrYQnEUCg8wJ6D9mzZ4/Lei/RSEV6byL5o9pDRudYOeucm1Z0zta5O+ZCJa0enkMAge4K6H0/JvnjjSPjSUCqAlMp9Qi8cnDM/dtr425i51jqCnURq9G7KAgggEBeAZ0DlFCcdQ3wgxfGk8Q0kj/yChefX0mX//ba1zLPARoZUOcBCgIIIIAAAggggAACCCCAwOAC+/fvz1UJo1fk4hpoZn12wWegAxEOtLCCDRXA1vbSlOMoNrBU3wPrxi36Dl+f/+j74Kx4rrbvI9qPAAIIIIBAVQIxAex5rwd8W2MSO0Lrj+0TaD0afVB9AvUN9N0QydFev7mPw0j+qEJDySTDPt6a+Fpp1AggGiYo6wtjjUzx5NHXkpEpqjhQqHOzQGI++5rb8q1n3O/fXnLrd+9snmljirIEFbinN42YE0qwEiYigEBnBXSSVgdR7yVWuT860VxmUpq1PNOLCWhEKCXbaDSQtHOA9h93gC9mzFIIdFlA1wD60izt/V/BvroTeVYSQpcdq9x2uf/qu2Pu+Pl194tV+455+pBH+3FUPkSo0pS6EUDgvoC+8M5KAFTS33/fOAdo9A9K/QI6BysR8Pi5dffWB/Y5QOdzFd0RMs+dapOF+IUAAggggAACCCCAAAIIIPCVwOTk5Fd/x/yRJwglpj7meVhA31/q8wsFy3EjtIdt6v7v1KlTda+ytPU18Tg6evRoEsQZu5FK+tDnP/4zIL336P1KQap6HIXknFgL5kMAAQQQQKCogPqVWUX9ziLJljHLhNavON+smO3+Nqtf3Ns3Vj+gt1/ANUq/2HD+zxqtZTitilurjufe67CY4zuu5uJzNfG10pgEEF0kZL2R6E7kf/6df3KP7PhG8b3AkoUFNArIo998OgkA/uLmh8F6FPBFAHCQhokIIJAiEHMO0Hu/zgGM/JQCWeFTyagrO8+73735PWedA7R6fy5XJ5KCAAIIZAnEvP/fTz4Yd4z6kaVZ7fPyVxLOj9695374zrq5Mv/lD0kgJhFPIIDAnwT0oV1W8ofOAXrvYdSP4R82Z2Y3knC+dS9JBLFao3OA9uvy8jJJIBYS0xFAAAEEEEAAAQQQQACBDIG8SfUEV2WAZjyt+AZdy+pHf6+trSWP/v+MxXm6JgEFFDb5WO8/jvzdtZt8HM3NzbnTp08nx3uR3aggPP30B38eOnQouWFsk/dXke1lGQQQQAABBMoQUN8gq/jv27PmK/J8aP26/lCirUb3KFqU7Kof3TBSRf0ABcsrUVSPlOEIqF/W5KLjUf1JPd64cSP52/erm9juJr5WGpEAEhP4peSDJ47MOd39nTI8AQVeP/XdnyZJIHdX3zcbQgCwScMTCCDQJxBzDtiy95kk+YNzQB9ezf8qCUfnAI0GdeeDX5pr1zlAHTQNv0ZBAAEELIGY9/+X9405BZxSmiPw/efHNgKxx92rb6+723fD7fIfSpEEEvZhKgIIuOSDvKzkjxcnN84BR0kAbNLxovOyzgF/+6Z9DtCHtNq3JIE0ac/RFgQQQAABBBBAAAEEEGiTgALdKdUKKGD90qVLSZCavs+iNF+giaNL6HNwJXoo2LGNx5EC2C5cuJB8jlPWESAL/WjUdwV7njhxgpFBysKlHgQQQACB1gsosH3YxWqD4rvUP9Z3PGUU9Y2UDKIfJYNo5DEln6r/QalPoGl9aB0XvX3o+iTKW1PTXitDj6bSDvXJAhbz9heOuydnXyP5wwKqeboCsHUXfiXlpBXtV+1fCgIIIGAJxJwD9F7z1Kv/zDnAQqx5us4BOic/ceRE6pqVGZ51fk+tgCcRQGCkBWLe/3/wwjjJHw09ChQA/Kvvpgdlx+zjhm4ezUIAgYoFfIKA9SGzVq/3GY38wehPFe+MAtXv33v/HJA2KkvMPi6wahZBAAEEEEAAAQQQQAABBBBAYCABBet8/etfdzMzM06fX7YxaH8ggBYvvGvXrsa03h9HPh6mzceRggJXVlaSwMyygZVopZuE6PWW9jlg2eulPgQQQAABBJoqUFZyxaDbZ52X1SdQkkbZRX0lxZBNTU09NHJY2euhvocFlGzTlIQb/73hnj17kmNBCcNtLroJ3uzsbOmbUOS1MtQEkJigIAWZbn/+eOlYVDi4gPaNftKKv+hNm4fnEECgmwIx5wAlGWS9z3RTb/hb/djBlzL3jfaxPgSlIIAAAr0C+tA/K0FMo35opAlKcwUmdpIE0ty9Q8sQaK6A/4DP+nBZLScBsLn7z7dM54B/e+1rTo9W0b7W3R4pCCCAAAIIIIAAAggggAACCAxbQIE0CjhT4FnaZxLDbifrtwWaMDKOPusYxeNItgr4PHXqVCVBgj4RhNeefXzzDAIIIIAAAnUKqE9jlcXFxWSE9ypGjlCf3CdiW+tnenkCTeg/a2sUN6g+dNuTPnr3jBJrzp4924jXytASQLICf2NHmeiF5e/6BXRn/qzgbJJA6t8vrBGBpgtknQPUfr23KMmA0lwBnQP+4rXzqaOz6MN07W8KAgggIAF9mBCT/KE7v1OaL6DA3w//IT0AWOcAAoCbvy9pIQJ1COgcoLv+pX3ZSwJgHXuinHVodBaNBpWWBBJz3VdOa6gFAQQQQAABBBBAAAEEEEAAgbCAPodQwJE+lxikKMjn8OHDSZDPIPWwbDsFdBzpc61RPY50fCs47w9/+EMSzKZjXdPKKnJTwCcFAQQQQACBLguUeW6t0lHJHxrh4Nq1a8mIIGUnEiheZJSSAarcF22vW3EiihsctOgYVHJSFSPUDNK2JrxWHhlkA4oum/UFsJI/nvruT92jO58uugqWq1FAAcDjf7bNfXL+dbd+905wzXrj1klMF4oUBBDotoA+4MkK/lXyh95bKM0X0Lla5+zf/vjvUs8B2pIqhj9rvhAtRAABL6D3/5jAX5I/vFg7Hn0A8Ld/vO7Wbt4LNnppaclNTExwHgjqMBGBbgjoS3J9yatHqyj5g3OApdPM6THnAH0GqM+D9MEsBQEEEEAAAQQQQAABBBBAAIG6BbI+jwi1R4E8+/fvT65nFWy0e/fu5Cc0L9O6IZD3ONJnITp22ngc6ftc/52uvtdRgOaVK1ec/tadu4sW1aPPiXzdRethOQQQQAABBNoqoL5BTNG5cteuXTGzFpondnQP9YH9dzv6fq+3T6C/BymKG1SCCWV0BXSMKE4kT9FrRD86/vWoPnXv8aqE5SaWYb5Wak8A0UVBWuAvyR9NPESz27R18ln31I7sAGAd7HpxUhBAoJsCOgco+DetkPyRptPM52KTQHQO6O2YNXNraBUCCFQhoA8EdA1A4G8VusOvMyYAWPtfF+gkhA9/f9ECBOoW0Hu/rgHSviAm+aPuvVLe+mLOASQCludNTQgggAACCCCAAAIIIIAAAvECCjqKDU7T91cnTpzg88t43s7M2eXjSLE9+vF3W9bne/JQQsjFixdTv/MJHSCXLl0iASQEwzQEEEAAAQR6BI4ePdq42Cr/PX/vd/2+j6Tzu+IB8xT1KbSM+hmU0RSIHflDx5auw5T4pLjCtpe6XyvjdYLpRZsW+EvyR517o/x1+QBg7cdQ8UEfaYF/oeWYhgACoyEQ8x5A8kd793XWOUBbprvjqC9AQQCB7glkvf4J/G3/MeEDgPVoFSWBcB6wdJiOwOgKaHjftNc+54D273t/DpjYOWZuDOcAk4YnEEAAAQQQQAABBBBAAAEEKhJQMFpMUbDR8vIyyR8xWB2c5/z581FbferUqZE/jhSUp9fL2bNn3R/+8Ad34cKFXAGqShqhIIAAAggg0GWBmJvmpn2n1iQ7bYtGZFhZWUlG81BfSMHvsYV+QaxU++bzScNZLVffUsePjqNRSP6wtrfK10ptCSAK/E276y/JH9bub9d0BQA/cWTObHRMALi5ME8ggEBrBWJe+yR/tHb3ftXwrCQQHQd5h0j+qnL+QACB1goo8Fd3f7AKgb+WTPum+wBgKwkk65qwfVtMixFAIEtAIz+cO3fOnO3lfWNOP5T2C/hzwK4d9rZkjQRjL8kzCCCAAAIIIIAAAggggAACCOQXiAmeUzCOgtkpCFgCsceRAte6VnQXcCVPKREkT8Bn15zYXgQQQAABBLxAzIgXGmmrbUXB++oLXbt2jaTqtu28Ctob03/WanUdlifx49NPP62gtfVW2ftaiUkIy2pdbQkgWXf9ffLoa06Bo5T2C2zb95xTILdV9AJXICAFAQS6I5B111+SP0bnWNC5/M+/83+aG6QsX/UJKAgg0A0BBf0q+Ncqrxwk8Neyaet03f39V98dd1YSiK4FdGMACgIIjL6Akv/Srv2V+KEkQMroCOi9/+ffsc8BPiF8dLaYLUEAAQQQQAABBBBAAAEEEGiyQNqNiXy79+/f7/+MeoypM6oiZmqNQEwA26FDh3Jtz7CPI92kY2xsLPVHn+PEFiWCnDhxInZ25kMAAQQQQKCzAjF9T/UT8pyHezEVm6FEDOunvw+i/7P6BGnf9fWuW38rIVRB/SSG9st06/+Y/rOOkbwJEDH1ViUd81rR6y62aPvLSKKu5Zt2bVj/m0fvhirwd+vks72T+LvlAkoCeeKIfYGXFQzY8s2n+Qgg0COg17t+rLL9heNO7xmU0RHYsncqNRFQfYI8FwijI8OWINAtAV18pb3WFfj7xpFaLke6Bd+ArVUSSNq+1XCuaYlBDdgEmoAAAgMKZAX6632C5I8BkRu6uE8EtJqX1T+wlmM6AggggAACCCCAAAIIIIDA6AgMM3CnXzHmLsy9y1y6dKn3X/5GIBEYxeMoLcYrtNvzBvCF6mAaAggggAACoy6gpMms5Ah9x1bku3Qfi7WwsOCsnyK+efsE2r68faMi7WKZdgvkPUb0ush7LNYtlHf0njJeK5VHXAldbyhWeezgSwT+Wjgtn561bxUQ2KQPd1rOTfMRaKSAXuNpd/lW4sf25483su00ajCBZN9uJPdYRRcrCgCmIIDA6Aro/V8XYaGSlSAQWoZp7RJQgs8PXrAvN7kWaNf+pLUI5BXQiG/WOWDXDpeMFJS3TuZvj0BWgg/XAu3Zl7QUAQQQQAABBBBAAAEEEKhCoEkxAnnaos860m56V4UVdbZDIE8wWhOOo5iAu/Pnz+fCj3ktZQW85lohMyOAAAIIINBSASWBZBXFW+fpX+g8rO/m0sru3bs3jbgQ0ydQ3THn+d51x8xfRr+A5Oxe9Xb9HXOM9G5RkaSo3uUH/TvmtaLX7PXr13OtKsYh7bViR+TkakZ4Zl24pL2xaNSPtFEiwrUytU0CGt1ly95nzCanBYWYC/EEAgi0QiDrHPDozqdTR4loxUbSyFQBJfekje6i4PC8HZ/UFfIkAgg0RiAtuH/7Vud+HdG0ewABAABJREFU/p1xp0fKaAt8//kx9+LkmLmRXAuYNDyBQKsF9AGc9aE054BW79pcjVcioH6skpYoai3DdAQQQAABBBBAAAEEEEAAgdEQ0OfHSqToDXbRZwm9/5expWmBMr7+PEHuXMt6tW49lnkc+e/P9TjMsn///szV60Z+sQlP2p7Tp09n1skoIZlEzIAAAggg0AGBU6dORW3l9PS0iwl61zyaN6t/EVqv+jkxge15+sFqT1ZbBFBGv0DXD4o50LWEX6fi0Hr/j8JmptoFtL9i+5qaL20QijoaH/tayRMDU8ZrpdIEkLSNSQJ/j75Whz3rGLLAn3/nn9wjO74RbIXecHWCoCCAwOgJ6MNbK7h/fOs299R3fzp6G80WbRJ44sic0zk/VNSZ4xwQkmEaAu0W0MW0LlSs8t83kj9093dKNwTOHLX3t/oJw75Q78ZeYCsRqE9AH7TqOsAqbxwZdxodgtINgTOz9v7mWqAbxwBbiQACCCCAAAIIIIAAAgiEBPw14dTUlBsbG0t+FLSW9plCqJ6saTHBbPqMUnEt1neaWoc+81ZbGdk+S3w0n89zHOnYtoqOIx3nehx20Z3HYxJb9D2ufrJeH9qutHn89h46dMj/ySMCCCCAAAKdFdBIHKFkjBCI+sd79uxJ+skKglc/Qj/6u/e5tD6I6tU6Z2dnQ6twR48eDU7vnajv/7L6w2rD/Px8VJ9e7cnqY8UmiKiPrr7I17/+9eS6Ql76X22mDEcgdt/pGE67xvLHVJ7Ywir3exNfK49UtYsV9KU3m1BR4O+TG8kfeqSMvoD2s5JA/v318MlCL2L9xAxvNfpabCECoyGg17Q6m1ZR8gfnAEtntKb3ngPW797ZtHHqK6jPMDc3t+k5JiCAQPsEdAGWdvH1gxfG3f69BP62b88Wb7G/2/9fvr4erETnAH3pE/shQLASJiKAQGME0s4BWSNCNGYjaEipAhr16y9f/6O7fXdztf660frSYfMSTEEAAQQQQAABBBBAAAEEEGiyQFYQV91t1+eOVsxKb1t0faoffUbZOzLCjRs3ksCxKoOIetvB380UKOM40nEYkyBRp8CJEyeibtCk7/z1o9eHXuOPP/540ky9PvJslxJOiAmqcw+zLgQQQACBJgsoRurSpUtJXzOrnepD6Dv1QcrZs2fNxfUdjW7aqFiPtKJ2KHHaJ29MTEx8NfuVK1eSfsFXEzL+iAmkz6iCpxssoH6j+n5Zx5Se98eUltm1a9dXW5X3mPILZq3Tz1fksYmvlUoSQPRiT7uTa9rdwIvAskzzBXT39yeOnHC/f/t0sLEKEvEv/OAMTEQAgdYI6ESaFvil9wJrRIjWbCQNzSWgUaCU+Pm7N78XXE59Bn3gp4sECgIItFtAr2ddC4SK7vj+/efHQk8xbcQFtO911/9X3w4ngajfsLKyknwIMOIUbB4CIy2gu/pYAREa+UnvA5TuCWjfazSov3kzfA7Q3X10LaAPgikIIIAAAggggAACCCCAAALtFtC1nQLErc8H6t662AAd3y4Fs+uHgkCvgD630OcXsaUtx5E+y4sNPNW2D7pdi4uLfP4TexAxHwIIIIDAyAuo36ykDI1UUWXAuiA12ojicq3i26JA/JiieBD9KIG6SNH1gvohWSU2CTerHp4fjoD60Eoijik6nmLnjamvqnma+Fqp5Nt3BfBYb0zb9j3n9EPpnsBjB19yWyefDW64jpe0gPHgQkxEAIFGCugDMOscsGXvM07vBZTuCej939r3nAO6dzywxaMpoC/1rDtP+FEgRnPL2aoYgVcOjpmjv+iC3jp2YupmHgQQGL6AXsenT582G6JRIHQuoHRT4MXJMacRYEKFa4GQCtMQQAABBBBAAAEEEEAAgfYKaFSBphQF6AyrPWtra01hoB0DCugGdgqcHEbRKBtVlgsXLji9TqouSsbSDwUBBBBAAAEEHggoEWJ5ebnSc7HOvzHJFgrW16gkVRf1O9JGI+ldv9peRz+ld538XZ7AMJN/9b11VaVpr5XSE0CUiaPM71DRHcA1+geluwK6A/z41m1BAGUFWsdOcAEmIoBA4wT0GrYyMvXa//Pv/FPj2kyD6hPY/vxxp75AqOjYIfg3JMM0BNojkHYHrO8/P+50B3BKtwXOzNoB4Gmjx3Rbja1HoB0CaUngP3hh3GkkIEq3BTQCjNUX4POgbh8bbD0CCCCAAAIIIIAAAgiMloACtdLuMFz31irgTW0qqyhQLyY4rsqgo7K2hXriBXQcKdirrNKU40jJLRqdW+2pquj1EhvoWVUbqBcBBBBAAIGmCugcXNW5OO85WAH7+qmqqN+hhJfYfoeSP6psT1XbSb33BbT/lGxcZontU1Z9Ldak10qpCSC6a19a4NeTs3bwf5k7mrqaK6AAcCWBWIVRQCwZpiPQDoG013BaAlg7to5WDiqQlQSk4F/1JSgIINA+gbQk8P17x5xGf6AgoMBfJQNZJa0fYS3DdAQQGL6AEnmtYZ6V+PH95zkHDH8vDb8FGgFGiYBW4RxgyTAdAQQQQAABBBBAAAEEEGifgAJ9ygyWH1RAgUJlJIEoiE5Ba/v3789skkbM5juvTKZWzaDjuozjSKOJKNAz5jjS525VH0c+CUTtKvMu2z7Ik8DNVh3mNBYBBBBAYAgCZZ+LVZ/6LUXOwervqp9SdkK3rzc2+cPvBvW91Jcvs4/i6+axegEdR2WMcuMTpXQ86PjOKpcuXcqaZeDn/bXhsF8r9jevBTYxLXDzsYMvuS17pwrUyiKjJrB18lmnn1BR9lXMsFOhZZmGAALDFdBr18qgTHvdD7fVrL1ugUd3Pu3UJwgVfYCZlkgaWoZpCCAwfIGs125asOfwW08L6hZQMpCSgkJFX2bph4IAAu0SSAvcf+NI+PXeri2ktWUJ6P3/xcnwMaFrSWs0ybLWTz0IIIAAAggggAACCCCAAAL1CChAS0FnCvZRkE4TArYUOKb25A3QUdu1DdeuXUuC6PR/bB3WDTPq2QuspQqBMo4jHw/TtONI7dJxrkSQmMA6y1fJX3JSXbHbaNXFdAQQQAABBLokMOi5WEHy/hw8SDK26lG/WT+D1KP+hILke/vRRfZnb188bwJJkfWxTLkC6g/6Pmbe60Itq2O6d5ScmP5lXddhasuwXytj9zZKGbtMX9Tu2bMnWNUjO77h/uK18053/qYgIIH1u3fcR3//vyWP/SJ6oetFO8hFZX+d/I8AAtUKKPhX5wA99he99+scoHMBBQEJ6Bzw768fdV/e+jgIoo4f54AgDRMRaKSAPohQInio/OCFce78HoLp+LS1m/fcX76+HlTQ+7/OAxQEEGiHgAL2rQSQl/eNpY740I4tpJVlC9y+69zTf/9Hp8f+os+DdA7I+wFwfz38jwACCCCAAAIIIIAAAggg0DwBfYeoUTHSigK66rgmVGyLgoLW1tac/u4vasPExIRTQI9+KAiEBEb9ONL26YZNerxy5UqIIJnmXy96/er1Usdr2GwMTyCAAAIIIDBCAuo761xs9Vm1qTr/7tq1K0nUqDLOSu3Qz40bN5K+gcWs9qgfrUf9VFXUlrQiiyo90tbNc+kCug5T39K6NtRx46/F2rgP636tlJYAMjMzk1wkh3bfn3/nn8wRH0LzM60bAp+99zP3+7dPBzdWmXvK3qIggEA7BDRqw9LSUrCxTxw5YY74EFyAiZ0QuLv6vvvdm98Lbqs+HFSGLAUBBJovkJYAuGuHc//22tfc9q3N3w5aWL/Aq2+vu5+8F74Xga4DdD1AQQCB5gsoCVxfAvcXvfd/+A+cA/pd+P++wI/eved++E44EVB3mVRyKQUBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAICwwHp6cb6qyVpSZEypb9j5D8kcIhmlJQPijO58OSuguoqEgkuDMTEQAgaEK6LVqJX9o1I/HDr401Pax8mYKbJ181qmPECo+Gzb0HNMQQKBZAnr/D43+pFZq9A+SP5q1v5rUmu8/bx8f1ogyTWo/bUEAAZcE6VvX7a8ctF/j2CHw/efHnBJFQ+X06dNm3yI0P9MQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBrgmUkgCSFqCjO79TELAEnjgyZz3l0o4rcyGeQACB2gXSXqtPzr5We3tYYXsEnpz9e7OxaceVuRBPIIBArQJK/FCQZqjs3zvmXt43FnqKaQgkAkoOUoB4qCigXAnhFAQQaLaAdQ5QYL8C/CkIpAkoUTRU1L+wbjAQmp9pCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQNcEwt+25lBIu0v3tn3POWuEhxyrYNYRFtiyd8ocIYZRQEZ4x7NpIyOQFqCp0R30GqcgYAlohBj1FUIlrX8Rmp9pCCBQv0Da6B/ff4HA3/r3SPvWmHYHeBIB27c/aXG3BHS9rkD9ULEC+0PzMq27AkoUVcJoqDAKSEiFaQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMB9gYETQNICc7a/cBxnBDIF0kaJSTu+MitmBgQQqFwg7TXKOaBy/pFYgUaCGt+6LbgtacdXcAEmIoBAbQJZo39YAZ21NZAVtUbAChRPSzJtzcbRUARGWMDqp2n0D0aAGuEdX/KmWQmj6mcwElTJ2FSHAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIjIzBQAoiCcnSH7lDRHb11Z28KAlkCaXeAZxSQLD2eR2B4AmmBmToHMPrH8PZNm9as5I/HDr4UbDKjgARZmIhAIwTS7vxuBXM2ouE0onECChRXwHionD9/PjSZaQggMGSBtOt0K6lryE1m9Q0VUMKolTSqUUAoCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKbBQZKALHu+KjVcOf3zdhMsQXSjhe+9LfdeAaBYQqk3ZE17TU9zDaz7mYKPPafXzJHASH4t5n7jFYhYPXP0gI5UUPAErACxkkEtMSYjsBwBaz+GaN/DHe/tHXtL//VWLDpaTccCC7ARAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKAjAoUTQG7fvu2s4F9G/+jI0VPiZmaNAqLjjYIAAs0SsIJ/t+x9hhGgmrWrGt8ajQKybd9fB9uZdofp4AJMRACBygUuXrzoFJQZKlYQZ2hepiHgBV6cGHPbt/r/Hn60As0fnov/EECgLoHV1VVzJNhXDhb+iKmu5rOeBgowElQDdwpNQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBRgsU/nZ+aWnJ3DDu/G7S8ESKwLa/Cgf/KvlDgYYUBBBojoCC8q3ELM4BzdlPbWrJYwePmM21Ek7NBXgCAQQqFbAC8rnze6XsI125kj+swPG0PsdIo7BxCDRUwEoC1+tYgfwUBIoIpI0EZSWdFlkPyyCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKjIFA4AcQK/No6+Sx3fh+FI2MI27Bl75TTyAGhYgWZhOZlGgIIVC9gnQMe3fn0xut4qvoGsIaRE0gbCco63kYOgQ1CoAUCaYm5VgB/CzaLJjZAIC1wnGTwBuwgmoDAnwSs16New9ZIPuAhkCWQNhIUnwdl6fE8AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgh0TaBQAsjly5eddQe+xw6+1DVDtrdEgW1/9VywttXVVacfCgIIDF9A7/86D4QK54CQCtNiBayRoHTMWcGGsXUzHwIIlCOQNiJPWgB/OWunllEW0AgyL06GRw8g+HeU9zzb1iaBtBF5SAJs055sXluVPKQkkFDhOiCkwjQEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEOiyQKEEEOtO3Lp7N3d+7/LhNPi2b9v3nBvfui1YkXXcBWdmIgIIVCZgBWHqtavXMAWBogLqQ6gvESqXLl0KTWYaAgjULGD1x7jze807YkRXd3RjBIFQUSK4dQOC0PxMQwCBagSs/tjEzjGnJC4KAoMIvHIwfA5IuwHBIOtjWQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKCtAoUSQKy77z128EhbHWh3gwS27fvrYGus4y44MxMRQKAyAeu1uHXi2crWScXdEbD6Eml3nO6ODluKwHAFFIBpjch2yBi5YbgtZu1tE9AIILoLfKhY/Y/QvExDAIHyBW7fvu2s16EVuF9+K6hxlAXSEomsBNRR9mDbEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAUsgdwKIvvDXF/+hsnWS4N+QC9PyCVgjCCjo0Ao4ybcG5kYAgaICaXfgfuzgS0WrZTkEvhKwkgA1A+eAr5j4A4GhCFivQQXsK3CfgkAZAi9OhI8lgn/L0KUOBIoLWOcA1Wi9bouvjSW7KvDKwfDHlGnHX1et2G4EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEOiuQPib1RSPS5cuBZ9V8scjO74RfI6JCOQReHTn004/oXLlypXQZKYhgEBNAlbwpd7/rddtTU1jNSMiML51m7MSSq0+yIhsOpuBQOMFrHMAgb+N33WtaqA1kkBaEmqrNpDGItBSAasfljZyT0s3lWYPUcAaUUw3orFGIRtic1k1AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggMBSB3Akg1l33rGDNoWwVK229gDUKiHX8tX6D2QAEWiJgvQY5B7RkB7akmdbxZB1/LdksmolAqwXSAi+tYM1WbzCNH5rAxM4xt2tHePWcB8IuTEWgDoHLly8HV8M5IMjCxIICev/XeSBUrETU0LxMQwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBEZZIFcCiAJuFPwVKlsnng1NZhoChQSs4N/r169z18dCoiyEwOACaXfetpK2Bl8rNXRRIK1PQfBvF48ItrkJAtZrb/tW53T3dwoCZQrs3xs+phgNsExl6kIgXkDJH9ZnQYwCFe/InHECVlKRlYQUVytzIYAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAqMj8EieTbECbrbsfcaNb92WpyrmRSBV4JEd33CP7nzafXHzw03z6Uv/ycnJTdOZgAAC1QpYATf+9Vrt2qm9SwLqUygR8O7q+5s2W32Rw4cPb5rOBAQQqFbAug4g8Dfb/fZd59Y+urdpxu1/Zt/lfNPMHZug4N+3PthsZiUidYyHzUWgdgHrOkDJWkoEpDwssHbzXvIetnbTuStXN7+XaW4/0oXe73QuxfGB4YsTzv3wnQf/+790QwIlIm3fvt1P4hEBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDopECuBBDrS/+tk/9rJ/HybPSXtz52X9767aZFHtnxlFPwNGWzgBKLQgkgly5dcnNzc5sXYAoCCFQqYAX/WiP2VNqYFlSu96+7a++7z3+z4r746Kpbv3tnU6v1Pqdkty17p5KEh00zdHiCTEIJIAr+XVxc7LAMm47AcASs64D93wqP1DCcVjZzrT96d9395L3NAcAKnP7Vq/iF9tqzxgggmlfH4oEDB0KLMQ0BBCoSsK4DOAc8DP6L1Xvuh+/ec0oAySo3bjl349Y9p2VUXjk45r7//DiJIBsWEzvvJ8QogbK/6BxAMni/Cv8jgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEDXBKITQHSXPd1tL1QUwEpJF/jk3D+4z6/+etNM21847rY/f3zTdCa4JBj6s/d+tonCCkDcNCMTEECgVAHrrtsK1Kc8ELjzwS/d7XfObCT9ffxgovGXzgv60XudkgF1Tti27zlj7m5NVmLR798+vWmjr1+/7vSze/fuTc8xAQEEqhHwr7tQ7UpioKQLXPpTcG/6XDzbK6A74evYCt05nwSQXin+RqAeAesaXCM1UJxTosLx8+tfJXMUMVGi4Fsf/NGdOTruXtwYFaTrRaOihEaCYjTArh8ZbD8CCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCEhgPJbB+sJ/fOu25O7lsfV0cT4FAYeSP7pokWeb04LKreMxT/3MiwAC8QJprzlGALnvqBE+fvvGf3GfnHs9KvmjX1/nCi37768fLbR8f31t/18JMdYIWWnHY9u3m/Yj0EQB6zW3a4dz+qHYAgro1V3eKfkFrJEF1tbW8lfGEgggUFjAOgcoUUsjNXS9KPnj2z8eLPnDG6quv3lzPZj44OfpyqN1DrCOx664sJ0IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIICABKITQHSXvVBh9I+QysPTdCd4SjEB6/jiS/9iniyFQFEBRoBKl/vi5ofuo7//30pJ9lNdSgLRY9eLlQhI8G/Xjwy2v24B6zXH6B/pe2Lt5j33o3fX02fiWVPg2b3hp7gOCLswFYGqBKzX3MQ3Sf7wyR96vy+zHD+37squs8z21VHXxDfDa7GuS8NzMxUBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAYTYHoBBDrS1YrOHM0ufJv1Z0Pfun0QykmsOVbU8EFrUDE4MxMRACBgQXMJEDjNTrwCltUQTLyx4//zumxrKK6fvfm90qts6y21VnPlm89E1ydFYgYnJmJCCAwsIB1HWDdnXvgFY5ABVeu3kvuCK/gYEoxASu4/Pbt2+769evFKmUpBBDILWBde3MOcEmSX1WJGhpVpMvnEI0uo1FmQoVrgZAK0xBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoEsC0Qkg1hes1ggNXUK0tlWJH5+ce916mukRAtbxZR2PEVUyCwIIFBCwgn8f/ebTBWobrUWqStT48tbHSRLIaGnl2xrr+LKOx3y1MzcCCMQKWP0u6+7csfWO4nwKBP6bN9fdt9/oduBuGftWgb8KAA4VzgMhFaYhUI2A9XqzRumpphXNq1WJfj95L33kD72PvXJwzP38O+PuV6/e/9HfmmYlN/gtVfJH10eRshIBrWPS2/GIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIjLrAIzEbmPbl6qM7Cf7tN/z86oq7/c4Z9/nVX/c/xf85BazgX935Vz/bt2/PWSOzI4BAXoG0O21bSVp519HW+T9772eZ7/WP7PiG2zr5rNOIWeNb/1OyqTo/fHHzQ3d39f3UTdd8mkfLd7Gk9TEUkH7gwIEusrDNCNQqkDbSghWcX2sDG7AyBQGv3XTurQ/0mB4M3IDmtqoJSjKSbX/R9enhw4f7J/M/AghUIGCdB6zg/Aqa0Mgqj59bT23Xy/vG3BtHxoOJHi9OjrnvP+/cq2+vJ+cOqyIlmLxy0LldO6w5Rnu6RpnROba/WKPS9M/H/wgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMCoCkQlgFhf+Hc98Lf3oFDSxxc3rzqN+qGgXko5AuNbtzkFT+tO+P1FgV8E//ar8D8C5QtYSYB6beo12tWyfveOu/3umdTNf+zgS+6JIyc2zaNkEBWdLz45/3rqeeP3b5/ubAKIjNTXCCVUWn0TLUNBAIHyBKzXWpeTPzS6h8raR/ec7tBOqU5g95MaAYTg3+qEqRmBdAFrBCiNXpE1gkV6ze1+Vgl/N27Z26AEjzOz6QPuyk/z3P6PdfeL1c3vc772SxvPacSQLhZrpDGrb9JFI7YZAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQS6KRCVAGIH/z7VSTUF7CogV0WJCaHkhE7CVLTRugN8yJi7v1cETrUI9AnY54Bv9M3ZrX8/+5efOSWBWGX7C8fd9uePW08n0/X+9tR3f+r+/fWjwfc5zaT3P5130kbDSF1Jy5/UdpMA0vKdSPNbLWAF/1pBma3e2MjGh+5GHrkos+UUeHZveAGNTkZBAIHqBazXWtdH/1BSRlr58cbIH7HlzNFx9/TVP5oJhUoO6WoCiDXyiXV9GmvOfAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEDbBaK+lV5bWwtu5yNPdjP4d/0/7iTBqApIDSUmBLGYWFjACnr+9NNPC9fJggggEC9gBX5t+dZUfCUjOOedf/2luVUaHSUr+cMvrFFUnpx9zf8bfLy79n5wehcmPrIjnGx65cqVLmw+24jA0AWs/tb9kRmG3jwaMOICu3eMBbfQSkwKzsxEBBAoLGAF2k/sLFxl6xfUyE9pI3a8vG/MWYkLoY3XSCBaxipdTjq0Rhuzrk8tQ6YjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACoyYQNQKI9eXqlr3PjJoH29NAAQVSh4oVjBKal2kIIFBcwAq0t16bxdfUniU/v7qSmgD42MEjuTZmy96pZIQPjfQRKp//ZsW550PPjP60R3eGb/9u9U1GX4QtRKBeAau/1eURQOrdA91eW54g6m5LsfUIVCNw48aNYMXbt9oJC8EFRmji+1fTR/84NJnf5sWNZX7ynl3vjVsuV1LJCHEn263t7y+MCNsvwv8IIIAAAggggAACCCDQNYFjx46569evf7XZR48edbOzs1/9zx8IIIBAr8DMzIzr/W51cXHRTU5O9s7C3wgggAACCCCQQ0DfUywsLDy0xPLy8kP/8w8CCCDgBS5evOhOnz7t/3W7d+92Z8+e/er/on9EJYBYd1gd/7NtRdfLcghEC1hB5r0fakVXxowIIFCagPXaLG0FDa5II0CllW37/jrt6eBzWyefdVYCyBcfXQ0u04WJ1gggVlB6F0zYRgTqFOj9QqB3vY9v3LGcgkAdAkoCIfi3DmnWgcBmAeuau8tJgGsfbXbqnaJkjrxl4pvpy1y/dW8jESJ9nrzrbMv8Ggnqxsb2UxBAAAEEEEAAAQQQQAABBB4ILC0tuXPnzn01Yfv27e7ChQtf/c8fCDRZYHp6elPzlIighIS2FH1HefLkyU3NbfJ2TExMPBSkqiSylZWVTdvABAQQQAABBBDIFlAMRX9CNsnY2W7M0QwBXUueP39+U2PaliCs/ngodrCp23HgwIHkfaM3Bkt99Lm5uU37Is+EqAQQq8JHdz5tPcV0BEoTePSb4ePMCkYpbcVUhAACiYCVBGgF5neBLRmRw9hQnRvHt+ZPkLw/qtaZYK3rd+8Ep3dhYpcTjbqwf9nG5guELpjU6qxg1eZvGS1siwDBv23ZU7SzSwJdTgK88hs7GWH/3rFCh8F2kipNN+tYU/9EH5RSEEAAAQQQQAABBBBAAIGuCej78f47DZ84ccIpCaRo0feA+llbW3O9wSi+Pt2ZdNeuXe7w4cO1jBigaz6158qVK8H27N+/P2mH2lNH0Z1a1Sa1p7/IXUE7ddn0r7+N/2vftr3oddK27VBgme447F/jOqbn5+eTn7bvD9qPAAIIIIBA3QLqj/fGrapPOGgyq/oW+knrk6vfqe9GlHRaZfF9HfV/1WfoL7o+8H1g/V11kbX65LLpdffrlYdvzyDXRb6+UX+UoY61/uL7if3Tm/q/js02bYeOTV27917P628ljw1y3GYmgIRexNqpRYJbm3ow0K5mC3CsNXv/0LruCnQ5MD9tRI77iRzdPS6q2HIda1/e+nhT1erIEfi1iYUJCNQi0OVg1V+9Oh5lvHbTuVffXo+al5nyC+g6lXNAfjeWQCCPQOhDMy2vxCzKZoGJnZunxUy5fTdmrm7OM7lzzP1idXPSTds+hO7m3mOrEUAAAQQQQAABBBBAoAoB3Wm495pIgSJF7hiqOhRsoru/9taX1mbNrwCvo0ePJuscJEgltB61RQHqVnyKX8Z/XqH1K4BG2192W2SikVZ6A+b9+vsfFYzmbU6dOpUE8PTPw/8IDFvAv150rPqiv0le8ho8IoAAAgggECegvqj6ib2laEK2+r3qb6o/Gdsn13rVJ6+i36nEAPUPYtujERj0fb3aoseyi6zVHt//t+r3z+taScH0ao+MKAg0TUDXrr3XmHrd67gdZETPzAQQ683l0W/ubZpPbe3RiBRPvfrTqPXd+df/4e588MuoeZnJFiD417bhGQSqFMj6kLXKdTe57rQRObo8MkpV+8w6B1S1PupFAIH7AtZ1QNd94u/wvjlgtet2RbZ//7fG3JWrmy05PotosgwC5Qjs2lFOPW2sJfR+5Ldj+9ZiiTFrH21+j/N18ogAAggggAACCCCAAAIIIICAF1CgmQ9u8tOKBJupHgVSFfl8zQeFKWhFgVUKYBm0qE4FvfRvW1a9ar+2Q205e/ZsEsietUzM8wp460+0iVnOb4faowCeYQad6ftdBeT1F90dedC7U/fXyf/tEegPOFPLdZwsLy+3ZyNoKQIIIIAAAkMWUD+xtyjJMm+fWP1YnYOVAF2k+H6n+sLqB5eRfKG2qE15rxHUh9eP2qA+cBmJ2b6f359oE2Ol7dCPrlU02tkwi9px/vz5TU1Qf7zqUVw2rZQJjRAIJWXr+lM/SswuUjITQIpUOurLaESKLXunojbz86u/jpqPmdIFCP5N9+FZBKoSsDp2XR7lIi35Q/vh0Z3FEiS/uHm1qt04svVax+fIbjAbhkDNAlYS4MTG3bgpCCCAAAIIIFCOgEaMSivxiYdptbTzucf/LNxuDfNNQQABBBBAAAEEEEAAAQS6JOCDoHq3uUiwmQLWFIg0aFF7FCCm6zMFnRUt+gx6enraDfJ9j5admZkpJchLNv1BfXm3Tds0NTWVBNUPK7BLJgrEoyDQKxAKONNxouNed8umIIAAAggggEC6gBIKlHzRW/ImZJfR//XrV1vUlx402aGMawT1KdQHVhLIIH1g9WO1TXIapCg55saNGwNdqwyyfi2r/RPqkw9y7TNom1h++AJWUraSqNRfz1vGsxawXkxKgqAggAACCCDQNYEvPvqwkk3+4qZd76M7n65knW2p1Np+q4/Slu2inQi0VWC7EYzZ1u2h3QgggAACmwX6P8DePEc3p/zghXFn/by8r1iC5Fsf2COAdD3pcmJn+Djjw/GwC1MRQAABBBBAAAEEEEBgdAVCd+PNG2xWRmBXv/AgCRP6jmfQ5I/e9ijIq8hdgn0dg2yLr8M/lhW45uvjEYGyBBRw1h9YFnp/KWt91IMAAggggMCoCOh7M4301lt0Ts0z+kfZ/V/fFvWDdT4vUrSc+sFlFBkN0r8vuw9dZv++DB/qQEACet/QtXxv0Wun6LVs5ggg1peqVjBmb8P4G4GqBazjs+r1Uj8CCHRX4JEdT7ntLxw3AfR83qJRRe6uvW8u9ug3u50AQtKpeWjwBAIIINBZAd2xg4IAAtUJWAkgXR6NQtrff75Ykoe1p36xes+t3bQTQPYXG1zQWh3TEUAAAQQQQAABBBBAAAEEWiiga/T+oKy8wWYXL17cVEc/he7Uq7uOPv744189deXKFadAtbTv5NW2iYmJXMFvqk8JKWn1ahvVHtWt8umnnyZ30FV7rKIAtt27d7vDhw9bswSn6868WSN/+Hq9jz6flKu1DZqukUlWVlaSIJ/gijs4Ufu0vwxyl+j+uvg/XUCvK4320RtgpmNVgaOLi4vpC/MsAggggAACHRbQubK/35cnIVvLZiVH6DytftH+/fu/ko7pA2tmndvVb84zqpf68b19gq9W2vNH/zWCRgBU37nfwi/it1N94LxF/fG0vr7qUz/fXx/o/0uXLqUuo23ctWuX0+gtlPsCuq4J9cl1/FHqEVDimBLKel9Heo/R61f7J0/JTADJUxnzIlCVwJZvTbnPr/56U/V608/7Ac6mSpiAAAKmgBX4ZS7QgSce2fENt/15OwGkCMFn//IzpyQQq2z51jPWU0xHAAEEEEBgpAWeNYKf6aOM9G5n4xDohMDtu84dP7+euq1FRxVJrZQnEUAAAQQQQAABBBBAAAEEWiUQSkzIE2ymjU27I7C+a1fgd1qgiQKnFJBifSbng1Vig4Y0f1pw16lTp8wgLQWcaXus5eWlgKbYtijoJmTsDxK5nD17NqnTT/OPmq6gOW1Pb/COf15eaqvmo9wXWF5ehmLIAnr/6A/21P+anvY+MORms3oEEEAAAQSGJqB+p/rDvUV9zTyjfygxONRfVJ2qS/1fBX/r71BRv9L3yUPPa5r6nerbW3X0Lqe2pF0jKPFD1wjqV/cXLau+g/rAoSIvJVzoJ7Zo25RcbRVZy6h/27QO2ag/r+uEUFE75aJtorjkONOxRhmegI5jHZM67nuLXpMXLlzonZT593jmHMyAAAIIINBZAXWSQkVJWZRyBL64+aG7/c4ZszKNfrF14lnzeZ5AAAEEEEAAAQQQQACB9gl8+8frTkkgVtFoKxM7x6ynmY4AAggggAACCCCAAAIIINABAQUxhQKZ8gSbKajE+r5P9SjAJCvoWwFCuouvFSjkg8BidonaooCxUFEgjNaTFiymIDQlEShgJlTytEXLqy2Wj4LE1J5Q4JtftwzVnv5gNP+8/EP70D/PIwJ1C+j1HnotpwWB1t1G1ocAAggggECTBELnSPVFrf5ff9vVF7T6g76/qT5lWn06f6uPrL6pNZ/6wVZSRn+btE2aP1TUT0jrA2v9aktaH1ijG1h97P51qh0hYz+fkqmVjGJtt2zUllD/xteRVr+fh0cE6hRQQlN/URKU9V7RP6//nwQQL8EjAggggAACNQto1I/fvfm91LVu2/fXTkkgFAQQQAABBBBAAAEEEBgNgePn1t3azXupG/PGEZI/UoF4EgEEEEAAAQQQQAABBBDogEAogEuBTVbwU4hEwVehoqQGBVLFFq1TwVfWnXPPnz8fVVVom/yCSkax6vfz6DGrLdpmK6Cttx7NY/n4QLIYa7VZQWdWSdtmaxmmI1ClwNGjRzdVXyTgbFMlTEAAAQQQQGDEBBSMHQrIDgVvW5tu9TfVz4xJxu6t1/c7rT5q/4gCvcv6v5WYYc2n+mNHr0u7nlA/O7YPrLZYfXddr6Qldvht0qPabSWJW/uxd3n+RqBOAV1vho7X2NeNbysJIF6CRwQQQAABBGoUUPLHb3/8d+7LWx+ba1Xix/bnj5vP8wQCCCCAAAIIIIAAAgi0R0Ajfnz7jXX31gfpyR8/eGGc0T/as1tpKQIIIIAAAggggAACCCBQicDq6mow2CwUuG01QIFUqidU8gSt9S5vJY0okCzrLr9pwWYK7EobaaO3DfpbQW9WW7Td1igjvfUo4N0KNlMAmRVY11uH/1vBcpapAs6ybHw9PCJQh4Beazpm+0vegLP+5fkfAQQQQACBURMIJTnrPKrg7Ziivqb6nKGivmNsPb3L6xx+4sSJ3klf/Z22Pj+TlZCi55WQkqeoDx8KYlcdaX3t3nVY7ZGzRkbJU9L68KF9madu5kWgbIHQtX3eZCUSQMreK9SHAAIIIIBAhoCSPpT88cXND1PnfPLoa4z+kSrEkwgggAACCCCAAAIItENAI358+8fr7srV9OSPl/eNue8/P9aOjaKVCCCAAAIIIIAAAggggAAClQmEAqEUIKZAqNhiJX/krad3fVq/lRiRleRgBb+pfit5onfd/X+rLdYdgWMCvELGWofq1U/eogA1y8ZaV951NH1+n7ikwCXr+KtyGxT06IOmeh+tRJ8q21K07l5Dvw1Zr60i67ICzqpYV5H2sQwCCCCAAALDFtA5MTRSRugcarXV6v+qz5g3uaF3HWn9zitXrvTOuunv0DZpJvWriySkpCVlW9vvG6Xnrb5HkesDuVrJMWkjjfj2jMJjf394GP3gUH92GNcGRfdnv6Hvkxetz1pOyVOh11yea8dHrMqZjgACCCCAAALlCyjpQ8kfGgEkrWx/4bjbOvls2iw8hwACCCCAwMgLvH81vImhu5OF52QqAgiUKXD7P8qsrTt1acSPV99edxoBJK1M7BxzZ2a5V0uaEc8hgAACCCCAAAIIIIAAAl0QUMBJKDDLuruuZWIFU4WCTKw6QtP12ZyCYPqLtT4/n5WUUTTYTPUqAC9kpbYoyMj6HNE/79vW+2gFjfXOE/pbAWfaR6H2KLjNCo4L1ZVnmrazN7DLCq7ygUz9dctIbe8v/fXqec3XbypLjRyhbexth5JolpeXH6o2dNyE6nxooZR/1EatV4GOobp7F9V61Kb9+/cn+2nQ10Fv3YP8rW3Qa0Pt199pRe0/dOhQEqCp7Rmk6HV38uTJTVVoX+ru2RQEEEAAAQS6LmAFYefpk1vJGHnqCO0H369RP6i/pPUn+vtrvcsWSbjQ8upTaXtCbbl06VLSb+ldT+/fej5UVKf6PUWKkmOsUc3URvWByi7qA/e737hxI7ia/vk0k/Znfx9b00P1arrm1TK9Rdcgvk/ZO1398V5L9d31019CdfbPE/pfbZSr74+H6u5dTuvRj/q0g74Oeusd5G+/DToe1SfX/1bxx6auQ3tdrfmzpsugf/RKecpR68oqJIBkCfE8Aggg0GEB60Ty+W9WnHu+wzAFN/2z937mfv/26cylt+17zm1//njmfMyAAAIIIIBAVwUef/zxrm46241ALQL64C1UNIoFJV5ACR/Hz6+7X6xmuyn541ffJfkjXpc5EUAAAQQQQAABBBBAAIHRFQglEGhr89xtWPNnBd9onrqKgmhCwU5av4J/ihYF3ej7zNC2KnDG+oxDz4WKAqkGCURS8kho/6l9+rG+ew21JXaagvgVqJRV5D89Pb1ptv6gMD9DqF559yZ1aFs1X1qQlK9Pj6H199fZO7/1t7ZXgX0x2+3rUBu13/WjNiv4T4GOVewTv860xyLboGX8cjrW0u7+nbZuPeePdXn0Fu1TJSvpeQoCCCCAAAJdFgj16dRPzHOODPVRZTpI/9fvk4mJiaRf4/+PebQSUtRnHqRPpO3p71OoPaFpve1UvyZUiiZkqy7tH+2n0LqzElJCbYmZZvWzQ8uqH9pfrP6wVW9v/1193JmZmaSP2F9v6H8d16EEmd46Q8v1T9N6lbigRCn9HVu0TfpRO3TMqT9eRVJObHvm5+dzbYNe02q7b78Sp7X/ihZd4/cngKguucbcQIBvlovKs1ytAkmweWCN1gc2gVmZhAACBQQG6dwVWN3ILvLlrY/db9/4L9HJH0/OvjayFmwYAgi0X4C7v7d/H7IFCCCAQJZAng+vs+rq6vNXrt5zT//9H6OSP16cvJ/8sX1rV7XC222NAsV1atiLqQgggAACCCCAAAIIIDA6AqGRMnQtlPe78aqu761AtrTrNSu4S3tNAVqDFCvgxgpw07qs5wZti/aR5Z5mMMj2D2vZY8eOOf3kCfgqo60KmlMiyaCeCtqamppKgrfKaFeeOhRoNsg2yFyBe6pDAXRFixV8KhsKAggggAACXRZQ8kCoj2OdOy0r6zydt19v1Z93utV/svrTsfWn9aGtdeqawrquGLQ91n6y2hK7nU2bT8fXnj17XN3bpfWqH6r+aOh1Euuk/a/rCdU1SD2x6+udT9uga4FBtkHtV9tDST2960r7W+8Foevo2P54ZgKIdXH6xc0P09rFcwjUImAdn7WsnJUggAACEQJ3Pvil+/fXj7rPr/46c26N/EHyx2am9bt3Nk9kCgIIVC5gfejA3d8rp2cFCCCAAAItF3j17XX37TfWnUYAySov7xtzP//OuCP5I0vqwfO7du168A9/IYAAAggggAACCCCAAAIjJqBAFP30lyJBUNZnvAoQKhpgoyAX/YRK2nf3VsJFke3qX7cV4BVy9Mtaz+lOyoMWa5ssg0HXN4zllcAQG5RUZvsUIBa6Q27Rdeh1oDrr3BatL3TX5SLboONYQWfWazKrTitYM5SEllUXzyOAAAIIIDBKAhopIlSsfl5oXk2z+tyhgG+rjjKnW31gqz8du25dB6Rde4TqsZIW0uoK1ROaZrVF+6Novym0nmFO07Zo5A/rGKuqbb7/aR1LRdarY6HOJJCyt0HXJ+rjFy2hPrn2a2gUm/51ZCaAWC8GgjH7KfkfAQQQQACBBwI6T/7uze+5T8697mLOmU8cOUHyxwO+h/6ykk6tPspDC/MPAggUFkj7sq5wpSyIQE6BK7+5F1xiWB+KBRvDRAQ6JnDjVsc2OMfmKknyL19fdz95L/ze1VuVEj7OzI4nP73T+RsBBBBAAAEEEEAAAQQQQKDbAlYg1P79+3PDpH2PUjSI3gpczwrUsgKEimxXP4S1nQqaCQV4WdNVr1VX/zrT/reSSEJtSaunqc/pGLWOgyrbrACorEQNfW6swMzen5jPknXX3jr2j9qftQ0y9NsR8z2NjmcF/xUp1utWr9c6PIq0mWUQQAABBBCoQyAUeK3zs36aUj799NNcTbGuM1RJGX1gq461tbVgO62+hlVPsBJjYlod1nWJUVVjJ6v/ZxlW2WglOqj/mVZ6++L+77T59Zz2yyAjaWTV3/t8zDZofrU97VjqrVN9/ND7Ru881t9WApaViNZbzyO9//A3Ak0V+PLWx01tGu1CYKQFrI5rzGgWIw2TsXGfX11JEj9i3rvGt25zf/6d/9Nt2TuVUStP9wvEfOjZvwz/I4BAOQIK/t21o5y6qAWBIgJWH6VIXSyDAAJhAX2oFfow+PqtexvngLHwQh2eqqQPjfwRU/bvHUsSPziXpmtZSYBcB6S78SwCCCCAAAIIIIAAAgi0W8C6876u0/MWXT9Z1/cK4NedRmMDWrRuBbXoJ1RCdy3tnc8KtCrjc760OhQU1f+81Ra1t4hz73b6OkIJEqHPWfqXLfL/0aNHXW8izY0bN4L7SQ6at7/0+/Q/3/u/gr0Gubttb115/s5a7+zsrDt16tSmfe3XoeV17Gq/6O/+oml67uzZs/1Plfa/1mEFtem1qm3Q/gm9JnXsKABM2xBqv45pPac68hYFnIVeEwpgm5uby1sd8yOAAAIIINB6AZ13Q+fbIv1E9U/6S56+V/+yvf9bfUvrOxQrWUDzW8v0ri/rb2v0dmu91uh4ob5Q1rpDz1vXQer3ZF27hOpLm6Z92r+vtX2hfaT+Wr9V3mNC14yhutPaWMZz6m+G+o2q2xuk9Ue17OnTp5N+a6g9qj+tTx9aJu80jWRobYOOGfXHdXz0vyb0nqD+cZq9rpNUR/+yWW3UMqGi9WVdn2QmgFiN+eKjq6F1Mg2BSgSsIGrr4K+kEVSKQAcF8nYwOki0aZNvv3vG3X7nzKbpoQlbJ591Tx59zSkJhGILWOcAewmeQQCBsgTU1wpdOBL8W5Yw9WQJrH10L2sWnkcAgZoFPr1b8wpbsLrj59bdWx/EvV+9cWTcvXJwrAVb1dwmlvXhf3O3kJYhgAACCCCAAAIIIIBAVwUUVBIKRlHMRtHv7BRAE/qMV8bT09NJQElMAJRGDLGC11VXf8CTpvWWUBCdni+6Xb11629dK4bstO39MQVWEJoVG9O/rkH+l0PZ6+kPstI2K3iqv8haAU+DlJCxtkdtUCJBv/Ug6+pdVsFP1jG0vLycuV61UckMaqeO+9B2yCwrwKq3TXn/Vv2hbdCxe+HChdTXglz1o9eZ1X4lsGj78hbVG0pYUtAiCSB5NZkfAQQQQGAUBKy+c3/Qfsy2Dtr3stahNob6M5q/NzG4d3mrD1zWdy5Wn8JqZ6hfpPY+/vjjvc0u/LfV51aydNkl1M/Wvg8dS0owkNUgJdTXV52+7rKusfrbGOozah4dQ+qTW+a+Hs2n/raOUSupXAkii4uLfpHSH1V/qGidaX1fbZv62vrRtUloFBEd03pO8+Qt2n/9x4vq07S042U8a0XWC3z97p2sRXkegVIEONZKYaQSBEoXICj/YVK9V/32jf8SlfyhhI8nZ1/bGPnjn0j+eJgx+J91rKV1cIIVMREBBEoT0AggFATqELhtBJpzDqhDn3V0XcD6PGjto67LPNh+vUf9zZtxyR8TO8fcv71G8scDvey/rCTArA+Qs2tmDgQQQAABBBBAAAEEEECgmQL9AR++ldY1un8+7VGfo1mBLAoomZmZSQLKFaii/3uL/ldwkQLO05I/FCyTFmRkBX1pXWnL9bYl6+8814p1BL9Z7U2zsJZp8nQlD127di0J0qryM1uNfhEqOvbyrFfHSVpwmvUaDK077zRrG7KSP3rX49sfet3ouC5yfFnvL1Va9G4TfyOAAAIIINA0AWtkijx9jqq3yQrE13qt5G4r8SFPP7rM7bL6LWU5T0xMBJtrXQsEZ27BRO0/9SfVx1XiQaifWMZmaH+F7LS+tP51aN1qp3UDAeu4CNWTd5r6t/3XvKpD7bGumUPr0GvMSlKx+vyhenqnWYlbWX3yzASQ3pX0//3FzQ/7J/E/AqULfPFR+DizLkRLbwAVItBxAatj9eWt33Zc5sHmJ8kfP/479/nVXz+YaPy1Ze8z7i9eO++27XvOmIPJvQJW8kfvPPyNAALVCVj9LRJAqjOn5gcCazfj7qb/YAn+QgCBMgWsO+xc/4TXpnd+9e1194vVbA+N+KHkDyWBUOIFrCRAq38SXzNzIoAAAggggAACCCCAAALNFLCCXQa9DlJwioJarKKgEiWCfP3rX3d79uxJEj70t350Z9O0oJOYYJlQkI1vS1kBSlbgmhXo5tfPY3EB7XsFm1n2xWvevGToGNSxkydQy9eq9lqBkX6eKh5D26B25H0NqP1WwFxoHVnbovpCbdDr1npPyqqT5xFAAAEEEGizgHU+DZ0vh7Gd1sgSaoti/Kx2hoL3tYyVKKHn8hRrvarDMs1TP/NuFlA/TskXdfRtrX144sQJp3bkLWnXp3nrip3f2gaNnJK3qP2hY143VihSrGv+tbW11OqiEkCs4N/1/2AUkFRdnixFwAr+LfLGUUqDqAQBBBKBL25eRWJDwCd/xCRFPnHkhHvq1X92j+z4BnaRAlaikdXxiayW2RBAIFLACv5dJTA/UpDZBhGwEo2s69NB1sWyCCCwWcDqb1mvzc01jPYUJX+89UF68seuHc796tVx98aRqI/fRhss59ZduRq25bOgnJDMjgACCCCAAAIIIIAAAq0SsO42bH1Om2fjzp49m9ylNOu6SoFhCoxJS9rQelWPgv9VbxOKFbgWCnSzgmiybMrYzizXMtZRRx36jLaufS+zkNsggW67du2qgylzHdZxm7Wg9bldyCmrLj0fCl7TdBJApEBBAAEEEOiSQNq5zzpf1umj5I+00T+sJNE62pjHp2ifpYztSNvHZdRfZx26HrP6hWW3w9pnRfvkeY6Xsrelv76i8SfWcqFr0P519v9vXYtaSSt++Uf8H2mPVuW60/mWvVNpi/IcAgMLWAkgdb15DbwBVIBAywU0xFToZEIS4P0d+8n5111W8sejO592Tx59zemRkk/ASjSy+ib5amduBBDIEtAFS+gDBIJ/s+R4vgyBtY/CtTTpw4BwC5mKwGgIWK81KzB/NLY6bitk8JP3wgkKvob9e8fcf//OuNu+1U/hMY+A1dfgs6A8isyLAAIIIIAAAggggAACbROwAkWswJK826fRElTX9PS0swJ4YutUcFnRYJ/YdVQ1n7XtRYPxQ+3U5yqh/amAs7a69W5nXckfWqcVpFfm/urdtir+Dh0LWo+VjJTVBn0+EgrwLPpeYcUDWO3Oah/PI4AAAggg0FYB69w37O8m1H/VyHxpowtoRIKifYG695fVv1M7yooHk0Uo1sW6FqjbYND11b2/Qzcr0DWP9X3yoNtX5/KKiy3y2tHIIWUlllvrzzpeoxJAdOEUevP48pOP63RmXR0VsAKry3rxdJSVzUYgWsDqWH3+mxXnno+uZiRn/Oy9n7m7q++nbtu2fc+5J47MufGt21Ln48mwgDUCiD6IpCCAQPUC1sXaGiOAVI/PGpw10gzXARwcCNQjkPZhtoLzNbpFF8vtu84dP7eeuukv7xtzZ2YZ9SMVKeNJq6+RdlxmVMnTCCCAAAIIIIAAAggggEDjBayAszIarsCRkydPunPnzpVRXVLX+fPnk1EguFbbTKrP1qvcn5vXWN8UJbBY3x1U0QoFQ927l34jjrzrvXHjRt5FBprf8lIclgIgi7yGdAfwqkso0K/qdVI/AggggAACwxSwEhOs2Lk62rq0tJQkMqQFgqsvsbi4WEdzKl9HkX5R5Y1q4ApOnDhRa6uWl5dLXV/a8VzqiiIqU6KQlYCRtriWKbJcWp2h59ISVKK+jbZeVFZQZqgRTEOgqICVAGIdl0XXw3IIIBAWsF5rX3x0NbxAR6ZqdKLb755J3drtLxx3T86+RvJHqlL6k9Y5wPqgNL02nkUAgbwCeq1ZH2ZwB/i8msyfV8AK/q3jIjpvW5kfgVEVsK4FrNfnqDr0btd/+5d7zhqdQvO9OEnyR69X0b/XboaXbNPdPcNbwFQEEEAAAQQQQAABBBBAICyQliww6HciCmTbs2dPackffgtUr0YTyUoqUcAKZXQEDh061OqNUbBZ6Aa4VW+U9V2LXkNK5kh7D6i6bYO+x1TdPupHAAEEEECgLoFPP/20rlVlrkd96KmpqSTxOi1YXt/lKTjf6mtkrogZWiegvpv1HW5bNkaJTXUX6zXiX2vDuEboNSgSBxOVAGJ19j+/+uve9fM3AqULrN+94xRkHSptfxMLbRPTEGiigHVySXt9NnE7ym7T7XfOOBlYRSN/bH/+uPU00yMFrL4G54BIQGZDoAQB6/VmBWaWsEqqQMDpDvtWgLV1TMKGAALlC1ivt7WPyl9XW2r8yXv26B8aFeXM0aiP2tqyuUNrp5Voan1GObSGsmIEEEAAAQQQQAABBBBAoCSBtODvQa6FfJJGWtCYAmE0qsOpU6fchQsXkgAyPep/TbcCZbTpqvfYsWOpAfXWd40l0VFNzQLW50U1NyP36vRaULKSEi7SXg+5K45cQK+lUFFbdNdhJWn5ZJC6k6as9xiZURBAAAEEEOiSgHXu279/f20Mui6YmZlJ+gVWe3xj1M8m+cNrdOfR6rs1XUD9XvVzlfys/m/dxeqPqx16rel19/Wvfz25vtV1Q9o1ep1tT7s2eCSmIWkXcLoz96M7n46phnkQyC3wxUcfBpfRm1jaB03BhZiIAAKFBfSaC53UdA54ZMc3Ctfb1gWVmHbng1+azd+y95lk5A9zBp6IErBG/9DCaX2TqMqZCQEEogX0YUbogkKBma8cHIuuhxkRyCPw/sbxFSpcB4RUmIZAdQLWaAtXfnPPff/57p0D3vrgXpKgZon/4IVxt32r9SzTYwXSRpghaChWkfkQQAABBBBAAAEEEEAAgfvJGWnB7vq+/cSJE25ubi743bsPkFGgju7Qevr0aae/Q0VJILpm4zv8+zpZwXohw7ZMa+p3dP57DNnrOF1bW0se/f/D9j169GiSgJLWDm2DfnxAnF5T+o5Gj/qpu1iv97rbwfoQQAABBBDogoDOu1l97l4HJWsrkH7UivpCw+j3tMmxzoSkPC46hn3fW48aUaf3/zx1VTGvYk1mZ2dT++TaBiV/6EdFy+h49H1y/d+kEpUAogZrI/Ti6i+6MzcJIP0q/F+WAHd+L0uSehAYTEAf5AUTQDaStLZOPjtY5S1c+rP33k5t9ZOzf5/6PE/GCVhJgE39YDluq5gLgfYJWK+5tODM9m0lLW6agHXnd+t4bFr7aQ8CoyJgveas1+iobLe1HZdWw8lpfv63/vWe00+Z5Y0jY25i51iZVTa+ritXw03UZ5MUBBBAAAEEEEAAAQQQQACBeIGTJ086BbCEiq75z54966xr/95llNSh4DIFy+iuqAri6S9aj9anOpterKCdK1eulNZ0y51r23KI9b31xYsXnfaZ4pgs73LWVk4t2vcK1PTJHTG1atv045dRUpaCz/RoHccx9TIPAggggAACCDRLQP0a9aVDsXn9LVX/fXFx0bW5X1lH29WHChX6UCGVYtN6++Oha8RitVa7lF47OjZiXmtqieYLJYQcOnQo6ZNX29rs2qMTQPTGEXpRfH51xT128KXsNTEHAgUEPv/NSnAp6y6kwZmZiAACAwvogySdtPtL8hp9vn/q6P9/d/X91I385Nw/pD5f5MmnXv3nIou1epnPf/PrYPvruBAIrpiJCHRUwHrN3bjlnH527egoDJtdqYAV/NvUu1lUikHlCAxRwDoHqElKAtm/t1uJCb/ISACpIjHm9n90y9gfW6HDPiYoKbQc0xBAAAEEEEAAAQQQQACBNgiEYjEGabcPVAnVoYSOCxcu5A4gV7DU8vKy27NnTzDgXoExCnAfRlCVlbwRupbctWtXiIVpLRDQca1kCB1rbSz+Lt0+oSPvNuj7ev0oQFTHtkbwUWIWBQEEEEAAAQTaKaCgeZ3XY64F1IdXX1uj9zWxqJ9mlSaNEjiMaxXLpa3T1RdXfzZtnzd123Qs6prWurFBVru1zdp+/aguJWYP6xpYbR3ParB/3gq0sUZo8MvxiMAgAtbxlRaEMsj6WBYBBMIC1mtOr9H1u3fCC43o1C9ufui+vPVx6tbJpeyf1BWO6JNKMg0VkgBDKkxDoDoBXbSEviTTGrPuhF5dq6h5lAVu33XOGmHG6pOMsgfbhsCwBazXXVYyxLDbXfb6q0juKLuNo1Lf+xvJRaFifTYZmpdpCCCAAAIIIIAAAggggEDXBUI3dvMmuutp0cAnfV6s5a0SWm/auqq+U+zjjz9uNbWy6W0YjaKyja+wYgVZTU1NJcFWg6wm7TuPQeqNXVZJICsrK876zC22Hr12jh07liRkhV53sfUwHwIIIIAAAgg8EKi6b/pgTS4ZYU99m5jkDwWYX7t2baDkDyvmw0qk7m1rzN9pyQDWukP10pcOqTRjmvbN9PR00gdN298xrR20LxyzDmseXZ+qP67Xla4NihZ56BpFN0hQv3wYx250AogFrsBfBcNSEChbwAr81Xqs47HsNlAfAgjcF1BHzDrhWYlao2rXte0d1n5Uko2VaMM5YFh7hfV2WcB63REM2+Wjorpt/8VaOPBXfZE8Hw5V10JqRqBbAlbQvTVSz6jqvH91VLesWdulvoUSAUPF6o+E5mUaAggggAACCCCAAAIIINB1ASuQS8Eug44YoOWtpI7Qeq15tY/KCpKxApBC329anzHGBN/FHFdpgYNc28YIbp5HgVVFg6p0/MldAV662+8f/vAHd+jQoc0rqXGKjkG1RYGcatcgx4WOfd3BWHcPpyCAAAIIIIDAYAJWP3GwWh9eWv1fBdHHjAimfrf6C0ogDfVrH645/b+qE6Pz9uut/k9aXzp9Cx9+NnRdojnq2McPt2Q0/vPHbdFrJu1vHc+6mYCOafWFh130utK1gUbHVNvSrluz2uoTQco6frPW559/xP+R9eiDbUINVDDsozufzqqC5xHIJXB39f3g/Nabf3BmJiKAQGkCeu2F7h6iZK2tk8+Wtp6mV0TSYz17yDoHqLM1SIerntazFgRGT0DBv0tLS5s2rGt3f98EwIRKBK78JpwAwnVAJdxUikCmgIauDX0IrZF6btxybteOzCpGYobbd8PvTSOxcQ3aCKtvoXPAoF9uNGgzaQoCCCCAAAIIIIAAAgggsEmg7ECkUFyHVqrr/DKK6gl9ZmytV9/thJI0NH8Zn/uF6tZ2hlzTri8V2JT2fIydFfw2aL0x6x7FeXSMKPkjq+g40v5WcKP+lndo/2fVU+fzel0o8MwXBdTpRwGLesxT9HqcmJhIgtfyLMe8CCCAAAIIIFCfgPo1Sty0+q6+JerLKFC+zL6MVVfePodvY/+jdR2gbclTPv300zyzm/NaffKqE2HMBrX8CfXHrX3sN019Wx1n6pPqb/9/06+DdG3rr5P12tRrwvfHs16rftv1qGNOr2+NLlLXNkcngKiBejGGduKdD37pHjv4kmahIFCagBX8O+y7MZS2gVSEQMsEFPwbSgDRa/WJIydatjXFm/vlrd8WX5glowWsUaB8hyu6ImZEAIFSBNIuyhWo+eLkWCnroRIEJGCNAMJ1AMcHAsMR0Ad1+pAq9EHppY1zwCsHu3EOWLs5HP+urVXHVKhwDgipMA0BBBBAAAEEEEAAAQRGSaDsABErUEXBOGWUXbt2Baux1qvgn9BzN27cCNaTZ2IohsUvr/X2l7TPu1VX2vP9dYX+t9pjBd2F6mDaA4G0kS20fzWChr4/LPs19KAF9f2lY6/3+FPwWW8AWlZLZFXEIvS5X9a6eB4BBBBAAAEE8gmoj6iRP9LOu+rbnD179qH+QL612HOH+sV+bvXT057386U9rq2tBZ+26lUcovo5/cXqS/fPl/W/VU9vXyurDp6/L6D9FIoZ9T4aPePEiRNJ8oef1tZHHa/aHv2o6LWh7Y9NCNH8SszuTfJOKqro13ieeq0vW3U39PW7d/JUxbwIpAp8eetjp59Q4U04pMI0BKoX0IdFoaLXapdGxfjio6shBqaVKKA+hZUEqAsACgII1C+gL06s84AVqFl/K1njKAgooej23fCWWMdgeG6mIoBAmQLW6++tD8LB+mWum7q6I+BHlQltMZ8FhVSYhgACCCCAAAIIIIAAAgjkF7ACsPLWlDeZwfp+JxT0lbctVnCXPte2tteabtWVp00KDgqVvGahOro2TfvDOkYUlKW76+pxFJI/QvtWn4coeGx5edn94Q9/SAJC044jBZSmBeeF1qFp1nFvvU6sepiOAAIIIIAAAmEBnaM1goKV/KG+jJJar1275qr6PiStD2H1BcJbE55q1WEloFvtsfp+4bWGp1pt0dz0b8JmaVNPnz4dfFrHrfqpSlqy9mdwwRZN1PGi6w1to16fMdcfllfWZlvHbdq1Tu4RQFRZ6I3o7tr7btu+57LayPMIRAlYgb96QY3qm0UUDDMhMEQBvf70o0zF/qKRoJ7YeaJ/8kj+T8Jj9btVfYpQUR/ECj4Mzc80BBAoV0DJ4KEvDqzRGspde/tq2793zP3f/9fX2tfwIbfYSijSNUDahe2Qm83qERh5AZ0Dzp07t2k7fcD+rh2bnhq5Cb96Ndc9VEZu++vYICuhiM+C6tBnHQgggAACCCCAAAIIIDBsgbo++wp9z1fHtlvf8SvIRfEng2y/lXCRFjyn50Kfdaiuubm5gUisoDUr+G2glY34wqHvJLTJ+qxAQVhdKnqNKPhMP7qrsDUyinX37SJWcqYggAACCCDQJYGqzn1K/rCCu9VPriuAXn3gUF9VfeBB4rF0jWFdZ1h9cuv6QMeb2mgtF3M8hrZRy2n/VrWPY9rV1nmsPrmSlgbZT2308K9XbfvMzEzwda3rW70e8h5robwMGaW9VnIlgKgyvdBDF8J3/vWXJIAIqK9sf/640w8ln4CCyUNlkBNNqD6mIYBAPgG9BvWBUn9R0tYTR7qRALL7//rX/s3n/5IFrCTArnUaS2alOgQGFtA5QB9M9BeN1qCAzZf3jfU/xf8I5BLQsWQlFB09ejRXXcyMAALlCugcoC+ZQx88KXHrlYOcA8oV72ZtVhIgnwV183hgqxFAAAEEEEAAAQQQ6JpAWlCHrsd1XZ6nWNfxVmBWnro1b1oAW6iutO94FFCkoPaixQpIskYd0Xr0XCjuRXUV8fZtt9qi57m+9Urxj1Zyz4kTg30vfePGjfhGDDinXnOhY02vibTXRdpqlaSk43RhYWHTbNZrc9OMERPyvu9EVMksCCCAAAIINFpg165dwfYN0ndQ/9DqI+oaQCMo1HXOVR84lByh9i0uLga3PWaitX3aLus6R8Hxei7Ud7l06VLhfpLae/78+WCzi/a9gpV1ZGJo/2jTtW8HSZxXX7bOov546FpYo+0VKTp+9drds2dP0i/vr0PrypsA0l+H/z/t/SH37Qt118dQ+fzqr92Xtz4OPcU0BHIJfHHzQ6efUCHwK6TCNATqE7Beg3r/t1639bWONY2CgI4lKwHE6oOMwnazDQi0QUAXFdYXVFbAZhu2izY2R0DJH0oCCRXr2AvNyzQEEKhGwHod/uS99WpWSK2dErhy9Z67cSu8ydZ1aHhupiKAAAIIIIAAAggggAAC7RWwAkSsoJu0LbUCrRRMVUaxguCsbUj7fNkK0Ippp4LNrOAh67MM1Zv2nBXAFtMey1f7Iy1wJ6Zu5nkgYB3fD+ZI/ysU+Ji+RPFnFfylRI3+H+tYiV1TmQGMVqINo9bE7g3mQwABBBAYFQGrLxsKHI/dZmvULvVn6kz+UHutPrC2b5D+kdWft9bn7az+zCD9cW2Ldf1E3JmXj3+0rrUG7Y8Pso/jW/9gTvW9+/vj+n+Q17au7wZ18C20jlk9n7aO3AkgelFaF6bWqA2+kTwiECNgHUc6waYdzDF1Mw8CCAwmoNeg1dn97L2fDVY5SyOwIWAlf6jvkXVhACACCFQvYF0Q/2Lj7u9W0Gb1rWINoyLw1r/eC26K3v+t/kdwASYigEAlAtY5QO//Ct6nIDCIgHUO0Ps/nwUNIsuyCCCAAAIIIIAAAggg0CYB6zOwIkEpVl0KLClSX7+jFbCTNuqG9dmCgs2Ktun06dP9TUv+T/tOUzOkfe+kQKAiRdsQGulBdXFzgyKi1SyjY7fo8VakRVZg4yBBlmntsOK50paxnuMzGUuG6QgggAACoypgnfuK9h2sUQd0vq47+UP7LK2PXLQPrD6NFbxu9f/98WP1kdP61X5Z69HajrT+v1UX06sTsJKGqlqjldhsHbtVtcOq10q0sa7rfT25E0C0oBWAeedff+nr5RGBwgJ3PvgfwWWt4y44MxMRQKAyAavzdXftfbd+905l66Xibgh89t7bwQ3VOaDMDyyDK2EiAghkCqS9Ft/6gODfTEBmMAXSAsizPhgyK+UJBBAoVUDnAOtDJit4v9QGUNnICmj0J6sfceLEiZHdbjYMAQQQQAABBBBAAAEEEOgXsJInigScpX2mZt2FuL891v9pAfRWsLvqSvt8+dixY9bqzOlqhxVEH3M9aRkVDThLc52dnTW3gyfyCxQN1FJgVdp+yt+SuCVC33FqG4q8tv0arWWt4Da/XOjReh1ZQbChOpiGAAIIIIDAKAhY5z6dd60A7bTttpKVFxcXhxYDZfWT1R+w+gRp2zhIwoW8LXPVm9dc+8lKyNa1CKU8gaL9cbVgaWmp0LE2SOut77gHTUSx+uRp18Wh7bA8rdeHr6NQAogV/PvlrY/NO3f7FfKIQJqARv+wAsitk09afTyHAALlC1gfUOq1qyQQCgJFBT6/uuLUlwgV60P40LxMQwCB6gT0JYV1YfyT99arWzE1j7zAD98JHz865qy+x8ijsIEINFDA+jxIwfsK4qcgUETgv/2LnURq9TuKrIdlEEAAAQQQQAABBBBAAIGmC1jBHVeuXMnddF1P6bO1UFHihBUYFZq/d5qCsKwAegW5WNugOtQe6zt/BZspECi2qB1W0ojWE3M9qc8drUAgbaMVhBNqozzlGipaj9pEKU/g0qVLuSvTMTM9Pe2sIK3cFeZYwAoAs47hrKq1LVawpbUuq07LQ8es9fqw6mI6AggggAACoyBgnUvz9A3loHNsaBmdX4f5/Xda31R9E/UzYsv8/LwZyG/1+/vrtuaTn3Xd0V+H/le7Z2ZmQk8l006dOmU+xxO2gHUdI29dw+Utum7Ks1/z1m/Nb72u024qYNXlp+v6Vcdpf0m7Ju6f1/9/48YN/+dDj9ZNIvxMhRJAhGF19K07d/sV8ohAmsBn7/0s+LReFNYxF1yAiQggUJmAXovWSfH2O2cqWy8Vj76AdQ7QMRfzQf3oC7GFCDRDwLoAT7t7dzNaTiuaKpB27Azzw6+metEuBIYpkPaaTAviH2abWXfzBawkUl0D8FlQ8/cfLUQAAQQQQAABBBBAAIHyBKzv30KBYzFrtT7L1bIK7lLAVp6idqQF0Ketz69nbm7OTIZQIFBMYoqCbNQOBR2FioK7rECl/vmtQDDVLaMYe7U5LZDfWkd/W8r637qWVoCWZVbWusuuxwp40rZYCTehNmj+PXv2RO3P0PKDTrNudKd25Q201D60Xofa99b7iLUNoaA1zZu3Hqt+piOAAAIIINA2gbT+R55tsfoq1s3W8tQ9yLzqJ1v99qx+du961Qe2ElK1DvX7Y4q+e7T6r1n9bF9/Vt89bR2+jroeiyQy19W20HoUt21dW+n6Lfb6QvNp/rTrJq0/tr5QW9Om6RizEjOUOGS9Xq06Nb91/Bd5jVvXnVl98kIJINoo603g86u/Nu/ebWEwHQEJ6M7vX9z8MIhhHW/BmZmIAAKVC1gnKo3eoNcyBYG8AmmjiFnHW951MD8CCJQjoIsi68LIGsWhnDVTy6gKpAWNcx0wqnud7WqrgD4csxJzFcTPKCBt3bPDa3fa6DGcA4a3X1gzAggggAACCCCAAAIIDEdAgTWhz14VBGMFhKS1VEFXofr8MgpYUVC8AqvSAm20bgXrTE1Nme1QYIr1mYFfnx61jWkJEQoIUgBOKChdbdRdVtPaoe2NDTZTexQMZgXVaLsVaK9EmZCP2qj2pgUxaVutgDatv4qStj7ZaX8r8cD/hLatinYVqVP7xyo6TnQ8pBUFZmkfpiUM9S5//vz53n9L+zst6FD7Q69DHWeh4943Qs9pnrREliKfpeg4CBUraSU0L9MQQAABBBAYJQGrb7i2tpZrM61R/NQHHxsbq+xH/Z6sov6y1WdUH9hfI4Tq0fPqh6X1gc+ePZv0+0PLh6Zpfquor6Rtsvosel59XCuAX9cfi4uLVvW1T1f/VXbaHv8j0yYX6zrPXy/F9mGz+u4yUJ1p9Q3iZPWVdT2kY1rHmY6jtOsjPa/5NH9oPh1vadcwVvt1LPQX1ZV2Pa/5x+5tlP4FY/5X47/+9a8HZ9227zn35OxrweeYiIAl8Ls3v+furr6/6WkdyNeuXXN6pCCAQHMEdA4Inci27H3GPfXqPzenobSkFQKfnHvd3fngl8G26hxgXXgEF2AiAghULqCLaOuC/szsuHt531jlbWAFoyGgYPGn//6PwaBxfbi2vLw8GhvKViAwQgL6AEofbIXKD14Yd99/nnNAyIZpYYH/5b/+0d24tfk59f91HUBBAAEEEEAAAQQQQAABBLomoKAYJVv0FwUt5Uls8Mv7oJzQd3p+Hv+o4JL+7+S1fNayWibv9/n6bCEU5OLbokddG+rHl6z51Q59npgVJOPr848KMFLQWNZ29gYDal7ZpBW1Y2VlJW2Wyp5T0F5s4JTMerfNN8raRwVDjJJqFezYX7TutM+B9V2EvpOwiva76piYmEhm+fTTT5N9k3a8aN9Y+0/PqU7t49D+U72hz8aytkPBYgoUiymqq7eorWpPWil6vFn7me9n07R5DgEEEEBg1AVCMXHqH/zhD3+I3vS0pOXoSgrMmNUn8VWqf6E2ZpXefon6l1l9TCULXLhwIavaTc/rGigrQaD/+iCmj6S2WAkMmxpR4oS0mJr+1Vj7zOp3KslcicFFipYLjVxhXRNoHdrnur5IK+qLajsef/zxZDYlTGn/WMeLXk/an5qnv+g51aeipOTQdbDVh03bDtVnLafnekv/saa+eKitvcvo7yLHm+oNvRaVSJKWHKX1PaJfRYqQtYLQhZYCOLe/cNw9suMbRapmmQ4KpN35XZlXOt4oCCDQLAG9NkMdAo0EpVFAtuzN7iQ2a4tozbAE1u/eMZM/1NdQp4qCAALNEtBrU+eA0MWaRgF5ed/XmtVgWtNYAY3+YY0YkHYnwMZuEA1DoAMC+vBOH7qFPuTSKCD/+3/+mtu+tQMQbOLAAhr9I5T8oYo5BwzMSwUIIIAAAggggAACCCDQUgEFJ4USQHQH4VDgS9Zm6hpeQTAKdMkKIA9d62fVr+/xVX/e7/MVGKM2pa1Tnz+HPoO22qQkGW1v3qLvoXx70pZNSyjoX86790+v638dR1kBdHW1ZdD1aL+m3YlXx7We109M8clUoWQULZ92TMbUb82jfaLXcMx+yXOsaX2DHG+hdak+vp+19iTTEUAAAQS6IKDzdn9ctPocOm/qe7KYUlWfImbdMfPofK/gcuvGn76OUF/BP9f/6Ovsnx7zv/poMu5371027/WBvmvSvhxG0XqzbIfRriLrVL9QlqE4UV+fjvfYY94fJ+q/h5bxrzXVvX//fr+KUh513adkFq0jreQ91lSXXk9FjjfrNRYzIt942kZkPZf2Zeztd85kLc7zCHwlkHa8KMCQggACzRNI+5A57TXdvC2hRcMW+OxffmY2wRp+zVyAJxBAoDaBo0ePBtelQE4FdFIQyBJQ4oeCxUNFH5zFfngWWp5pCCBQrYDVR9PrWoldFARiBJQ0Gir6IJnPgkIyTEMAAQQQQAABBBBAAIEuCOiaKPS5mIJjsoJULB8F2ChJQ49llkHq9YkjZbRJdSnYZpBrSZkrGChvIkvI07uUUVeo/phpo3STTTkWSTLqd1I92sf+O+5Bjpf+umP/V2BjWpxVbD298+nYLepjBZtZ3//0rpe/EUAAAQQQGGUB61x46dKlqM0u2m+PqrzEmdQfyhphIHZ1ZfSBB+3T97ZV/a6io2T01lP0b/U9y+73FW1LGcvJsoz+sxIk/LVpGfXl3TbtF43yV8Z1aO+6Bzl2dbOH/qJ2xiSTDJQAog8frJVoFBCN6kBBIEvgi5sfcuf3LCSeR6CBAjrRWCdiPwpIA5tNkxomoL6ClTCkDyzL7nA1bPNpDgKtFtCXJDoXhIoV0Bmal2ndFUgb/cP6UK27Wmw5As0S0HWAPhMKFSV2WSP7hOZnWjcFfvIeo390c8+z1QgggAACCCCAAAIIIBAjYH02FjvCQWgd+r5FgS4KhLKu6UPLWdMUzOQDd6x5sqbr82XV4QPys+YPPa/tUh3Wd5ahZaxpin1RXfp+qmjRtqgO67PzovXmXU77uAntyNtua35//BbdNzo+rl279lB8k47hYewnBc/ptVh0W7yR2q7X8yD72QpiLeP15NvJIwIIIIAAAm0U0Hk61GeO7Y+HRjVoqoPO++qbhLY3ts3qV6mOMvpWCqRXH6doXdoO9Y8GucaI3e6s+cpKmshaT13PD7JvtF+0fG/SvaYNI0lG69XxWsb1gN4rVFfR/rOSxULvK1ZeRv++Hru3Ufon5vlfGeEamjNUtu17zj05+1roKaYh8JXAb9/4L07B4qGii3C94CgIINBMAQ13pWGxQmXL3mfcU6/+c+gppiHwlcAn5143kwDVIR/0w8+vVsQfCCBQiYAuWK1hHt84Mu5eOThWyXqptP0CGinmf/mvfwxuiPr/ug6gIIBAswU0BLM1dPEPXhh333+ec0Cz9+DwWqcEoaf//o/BRCHOAcPbL6wZAQQQQAABBBBAAAEEmiXw9a9/fdOIHwoCUcBMGUXX9Qr+DgWbWPXrmk3JKQpu0d9lFn3nqM+a1Z6YOyYrIUCjXBQNtMlqu3zOnz/vrBES+pdXOxRAVLZL/3ry/i9LbYvuKqu/tT2yU0Cd2rpr167EMNRuLaf90l/0vUDRElpW686zH9WumGNX23no0CFz+7QNMvHHXe+2qk36jlJBav1F86kN/SXvdmh5BYf64yw2UFTvA9ouPRYNjPRt1/f8vdut6aq3rPcZvx4eEUAAAQQQaKOAzveh78FiYpms/kIdDkX6JL5d2ubTp08nfRQ/zXpUP0T9hqr6wOqnLS0tJe3R31lF2+2vDwbtI2WtK+/z6oOr/6r+no4NbY/6qir79+931j6zjiP1U/VTpKgt+ukv6o+rHTHFt0v9WP1tFe0HtdP3ya35dA2ouvTYW7Ssji899hcdq6F159kO1al9ofVq/8hF/2cVOenY17Wx349Zy1jPW+8zSiqJqXvgBBA1TAkgoYNCz/3Fa+fdozuf1p8UBDYJfH51xf32jb/bNF0T9GIMXVAHZ2YiAggMTUCdXZ2MQuWpV3/qtuydCj3FNASSkcI++q9/E5RQ500XTRQEEGi2gC5+9AVB6CJo+1bnPvyHrzk9UhDoF/ibN9fdL1bv9U9O/o/50Cy4IBMRQKB2gdCXxL4R//Mfv+Z27fD/8YjAA4EfvXvPWaOF6XMgfR5EQQABBBBAAAEEEEAAAQS6LmDdfKeKGygq1kPBSPqcd21t7avPexWMpKLAE/3EBgQNuu/UHh9/osQFFa1byQp61HdIeqyjKKhINvq5cePGV0FG8nj88ceTtqg9lOEI6DjRcav9o+L3hfZP0wL/kgZm/NJ2aHt6t0nHun60PdqusorsQjf75fP5soSpBwEEEECg7QI6H4diIboQ06o+sPoKeuztA/vrA/W5fL+rjv2sPpLao33irw/UN5qYmEj6SeojldlPqmObRmUdOkb04/uxVfVd6/Lq7Yf7bdK6dXz5/rgeyyozMzPBxJfYmMlSEkD04gpdGGgjuQN8Wbt6NOv599ePui9ufhjcuCo+vAquiIkIIDCQgE7i6vCGyiM7vuG++Y8/Dz3FNAQ2EgDtEaD4cJEDBIH2CFgZ6dqCl/eNuTOz4+3ZGFpai8CVq/fct99YD65LHxTFXswGK2AiAgjUKqA7ouiDqVDZv3fM/epVzgEhmy5PYwSoLu99th0BBBBAAAEEEEAAAQTyCFgBZ7oDamgUhTx1My8CCCAQusmjAtt0t2EKAggggAACCNwXCCVlK/hbca1lBoHjjQAC3ROwYm7z3CyvlG/iFaRj3Z3v86u/dnc++GX39g5bnCnw2Xs/M5M/dDwpG4yCAALNF9BrVR82h8qXtz52t989E3qKaR0XuLv6vlMfIVTUr9APBQEE2iGQ1m9764N7TsH+FAR6BY6fCyd/aB6rT9G7PH8jgEBzBDS8rdVv0/u/NdJPc7aAltQtkHYOWFxcrLs5rA8BBBBAAAEEEEAAAQQQaKyAAspOnDixqX3nz5/fNI0JCCCAQB4BJZjpxi79JfSe0z8P/yOAAAIIINAlgbm5uU2JHjqP6iaZFAQQQGAQgdD7iOJwrVyM0LpKSQBRxWmBOr9/e8mt370TWj/TOiqg48EKCteHWXzp39EDg81urUCow+s3RsleSgShIOAFdA74/dun/b+bHpXJSkEAgXYJpL1uX32bBJB27c1qW/ujd+853f09VHQdoNElKQgg0B6B3qFvQ60+fn7d3b4beoZpXRT4yXt2YqgSiZRQREEAAQQQQAABBBBAAAEEEHggEPr+TXcJDQWKPFiKvxBAAIF0Ab2HKHi1t+QNNutdlr8RQAABBBAYVQErKfv0aTvmaVQt2C4EEChXIHRzh7Q8jNDaS0sASbsDfFqwf6hRTBt9gU/Ov24mBemuAjp5UhBAoD0Ces1aJyCdAz459w/t2RhaWrmAEgCtpCB9maE+BQUBBNolkBa0uXbznlPQPwUBHQs/fMce/UNfOC0sLLipqSkSQThcEGi4gF6vJ0+eTF6vSgKxipI/Xn3bft1byzF99ASU/Pejd+1jgRuBjN4+Z4sQQAABBBBAAAEEEEBgcAEr4CwUKDL42qgBAQS6IhAKWuWzma7sfbYTAQQQQCCvQCiOSUnZodG08tbN/Agg0E0BJWTrfaS3FEnIHru3UXorGeRvBQAoWKe/Yb7Op179qduyd8r/y2NHBe6uvu9+9+b3gluvg/jatWvB55iIAALNF9A5wAoA+/Pv/JPbOvls8zeCFlYq8PnVFffbN/4uuA59kaFzgB4pCCDQPgFdA+g8oGuCUPm318bdxM6x0FNM64jAX76+7pQEElv0YZoSTDkvxIoxHwL1COgDbSV/WJ/9hFrx8++MuxcnOQeEbLoy7dtvrLsrV8PnAL3fE2TQlSOB7UQAAQQQQAABBBBAAIEiAnv27Nl0Hb68vOx0Yx4KAgggkEdAwWbHjh17aBG9l+g9hYIAAggggAACYQHOn2EXpiKAQDGBsq7xSxsBRJuhwJy0L2w/OWeP+lCMgaXaJpCMBLAx+odVzp49az3FdAQQaIFA6jlg47VvjfrQgk2jiSUI3B8Nxj4H6PghyLcEaKpAYEgCSuTVSG5W+Zs3153uBE/ppoBGAMiT/CGlpaUlpwtffaBGQQCB4Qso4WNmZib5yZP8oZYfP885YPh7cHgt+Ml798zkD/UfrNEkh9di1owAAggggAACCCCAAAIINEsg9P2bRtKlIIAAAnkFQu8dfDaTV5H5EUAAAQS6JjA7O7sp+fry5ctOPxQEEEAgj4DiX/q/a1dCtn7yllITQLTyw4cPmw1R4O8nKcH/eRvP/O0T0MgfCgAOlbRjJzQ/0xBAoHkCOhHp7q2hcj/4/x9CTzGtIwK/f3vJTALSsaMLJgoCCLRbYH5+3k1OTgY34sYt55QEQOmewC9W7zkF/xYpGlFGdyObnp7edBFcpD6WQQCBYgJKyNIoT0WHs1YC4N9uJAJSuieg5L+0879uBEISePeOC7YYAQQQQAABBBBAAAEE8gmEvkdXsFl/0Ei+WpkbAQS6JhAKNgsFtHbNhe1FAAEEEEAgRoCk7Bgl5kEAgSyBUEJ20YETSk8AUePTvry9u/q+u/PBL7O2kedHUOCz937mPr/66+CW6cv+ogdxsEImIoDA0AR0hxDdxTVU9B5w+90zoaeYNuICaed/zgEjvvPZvM4JpPXp3vrgntMPpTsCSvzRnf8HLfpCW6OBKMmIggAC9QnotafEj5MnTzolZA1Srly95370LueAQQzbtqwSfzQCmFV08wAlglMQQAABBBBAAAEEEEAAAQSyBUIxGKHAkeyamAMBBLoq0P+eoe9oQ8GsXfVhuxFAAAEEEEgT0I0w+2+KrO/R9ENBAAEEYgRCCdlpsbZZdVaSAKLA37QhAnUH8C9ufpjVNp4fIQHt79+/fdrcotAHVubMPIEAAo0WyArmv/3OmY1ksJVGbwONK1dA54C0EcAG6ciU21JqQwCBMgT0wUfaFwbHz6073Q2c0g0BBf4qALisoi+olAjCB2lliVIPAmEBJXso6UOj76yuroZnKjD1h++sO40KROmGgBIAlQgYKlmfHYaWYRoCCCCAAAIIIIAAAggg0GUBXUedOHHiIYJQ8MhDM/APAggg8CeB0PuFvsvRd/sUBBBAAAEEEIgTCMU39SdYxtXEXAgg0EWB/vcLXef3J5blcakkAUQNUKM0FGmorN+9kwSC6pEy+gLaz7/98d+ZG6ohJa1jxVyIJxBAoNECuotr2snpd2/+H45zQKN3YWmNyzrnZx0rpTWEihBAoFYBnQPS7uj97R+XmxRQ68axsmiBtGQffam0srKS2l+wVnT9+vUkKP3YsWMDj0hgrYPpCHRZ4OLFi0mi1dLSUm4GfUi1vLzsdJ1vFSUFkAho6YzOdI32kpbsc+HCBQIMRmd3syUIIIAAAggggAACCCBQk4BGx9UNeHpLfwBJ73P8jQACCHiB/vcKfYeT9hmeX45HBBBAAAEEEHggoO+4+2+GqRsXcvPCB0b8hQACYYFQQvagAyeM3dso4dUNPlV3jNTdWfUYKlsnn3V//p1/Cj3FtBES+PfXj5ojvig4RIFfOjlSEEBgtAT03p92x+BHdz7tnvruT9341m2jteFszUMCv3vze+7u6vsPTfP/6L3/2rVrnAM8CI8IjJhA1rXAxM4x96vvjrvtW0dsw9mcROAn791zr769bmoo8NcngWt0ASVzFBllQOcSfcjGF1UmNU8gEC2g5Cq9Fot8SK3Xou5CqkAUlaxrAc4BCdPI/nrrg3tOSYBW0ft22g0DrOWYjgACCCCAAAIIIIAAAggg4JLP0Kamph6i0Hct+t6dggACCIQEdKMXjfbriz7LU5wO7xtehEcEEEAAAQTyCczMzDjdUM0XJVbqBmkUBBBAwBJQLoW+j/dF35X2J5T552IfK00AUSMUOKAAYKs8dvAl98SRh4cqteZlevsEPjn3urvzwS/Nhuuisv8uJebMPIEAAq0TUCCnzgEKAAuVbfuec0/OvhZ6imkjIPD7t0+7z977mbklvcG/5kw8gQACrRbIuhZ4ed+YOzNb2aCErbZrc+OzAn+tC1l9CaW7kFn9hjQTfaimi2OuLdKUeA4BW0CJG6dPny78+tPdSfq/MM66FlASyL+9xjnA3ivtfEaju/zl63byh5L/dB1AQQABBBBAAAEEEEAAAQQQKC6gz9EuXbr0VQWHDh0i0f4rDf5AAIF+AQWp9n7urhu5+Bs09c/L/wgggAACCCCQLaDzqs6vvSX0XVnv8/yNAALdFVDCmL6L90UJ2YOO/qG6Kk8A0UoUSNA/nKCm+6LgXwUBU0ZL4Pa7Z9ztd86YG6UDmDv1mjw8gcDICGj4Kt1J2CokAloy7Z6u5D8lAVrl1KlTSf/Aep7pCCAwOgL9d5bq3zKSQPpF2v1/VuCvEjSUBG4V3fFAdyLrvWOKNW9ous4vSjDRBTMFAQSyBZSop9eckjXyFv/BVNqXxVnXApwD8qo3e36dA77943V3+264nToH6A5YvEeHfZiKAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIxAjUkgCihvQPe9TfOJJA+kXa/X9W4K8SP5QAQkEAgW4IKAFEwV9W4RxgybRz+t3V993v3vye2XiGPjRpeAKBkRXIOg+8cnDMvXGEu8C3/QDICvxVwO+1a9eiAn+VAKKg9N4hMGN9NAqBrjV0vqEggEBYQHcm0mssrY8eXvL+VCVaKeEqJpBf61EyoFVIArFk2jU95hyg5A9GamrXfqW1CCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDQPIHaEkAUXDA9PZ16V8m/eO28e3Tn081TokW5BLKSP7jjYy5OZkZgZASmpqZSzwEkgYzGrv7i5ofutz/+O7d+905wgzgHBFmYiEAnBLLOA2dmx52CgCntFKgi8FfXkAoaTxtNMk1LoxKUMWxm2jp4DoE2CijpQ0kZeo3lLerLLS4u5k6wykoEJAkk755o1vxZ5wC1VskfJOY1a7/RGgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQaKdAbQkg4tHdWxX4ZQUZjG/d5p767k9JAmnnsZS0Oiv5I89df1vMQNMRQCAgoPd+nQPS7uRNEkgArkWTspI/dA7grr8t2qE0FYGSBXQeyEoIJwmkZPSaqosJ/L1w4YJTQkaRsrq6mgSrX758OffiOvdolAKNVkBBoOsC6ocrEaPoa+nEiRNufn6+EGPMOYAkkEK0Q19I54Dj5+85PVpFyXgaCZaCAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIDC4wPjgVcTXsHv37iTwU0E4oaK7heuu4QogpbRPICb5Q4G/1v5v3xbTYgQQyCOg176CP9PeAz4597rTewmlfQJZyR/aIpI/2rdfaTECZQrEnAeOn1t3P3rXDiAtsz3UVY5ATPKHAn+LJn+olX70KI06kNaPCG2Rgs410kFW8lFoWaYhMEoCStzYs2dPoeQPvX5XVlYKJ3/IUa/drL7gWx9sJBJsnAco7RHw54C05A8l4ZH80Z59SksRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKD5ArWOAOI5zp07l9x10v/f/8hIIP0izf8/K/lDW6CAEQVvURBAoNsCuou3gjAVkGkVRgKxZJo5PSb5g7v+NnPf0SoEhiEQcx7gLvDD2DP513nl6j33t2+uu9t37WXLfv9X/0EjGFy8eNFeacozGglEwch5E0lSquQpBBotoNE+9JpJG4XP2gDdxEOJV4MkcPXXrdewElHSrgU4B/SrNfN/n/yRdg5Q4ofOAxQEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgfIEah0BxDc76wtgjQTy768f5S7wHqzhjzHJH/rCn+SPhu9ImodATQJ6L8gKAmIkkJp2Rgmrubv6fjJ6l87dVik7+NdaD9MRQKAdAjoPZI0Kx13gm78vtY++/UZ68oeSLcq+67sSNzSimI4hBafnLUtLS25qaqpwAkne9TE/AsMSUILFzMxMknhdJPlDr1/dxKHM5A9Z6DUccw7Ien8ZlivrvS+gBMBv/zj9HJD12R+WCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALFBIYyAohvatZIIJqPu8B7rWY+fvbez9zv3z6d2jgCf1N5eBKBzgrEnAO2v3DcbX/+eGeNmr7hsQmAZQf/Nt2F9iGAQJxAzEggEzvH3K++O+62b42rk7nqEfjRu/fcD99ZT11ZHYG/CnBXQsfCwkJqW6wnFdiu0Q2KJJJYdTIdgSYI+NdF2igbVjt9snbVN3CIPQf8/DvjbtcOq7VMH4ZATJJmHeeAYWw760QAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSaIDDUBBABxAQAP3bwJffEkRNN8KINPQIxd+gn+aMHjD8RQGCTQMw5YNu+55JkwE0LM2GoArffPeNuv3MmtQ2cA1J5eBIBBDYEYgKAFfirAGAlg1CGK3D7rnOvvr3uFPybVjRygBIr6ioa3eDYsWPu8uXLuVep0QhOnTrl1GYKAm0X0HvqyZMnW/NaiDkHKAFQiYCcA5pxdOoc8JP30s8BJH80Y1/RCgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGF2BoSeAiDYmAHjr5LPuyaOvufGt20Z3b7Rky9bv3nG/e/N77vOrv05tMYG/qTw8iQACfxKIOQc8uvNp99R3f8o5oAFHjc4Bv397yWn0j7TCOSBNh+cQQKBXIDYA+I0j4+7lfSSB9NrV+feNW879zZvrbu1meuDvMN//1adQ8HvRUQ+UtHLgwIE6WVkXAqUItHk0HJ0DlMClx7Sic8ArBzkHpBlV+ZwSAP924xxw5Wr6OYDkjyr3AnUjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC9wUakQCipsQGACsJRIHAlOEIfHHzwyT548tbH6c2YJiBX6kN40kEEGikQEzAphIAlQTCOWB4u1DngE/Ov+70mFY4B6Tp8BwCCIQEYgOAFfyrIGBKvQK/WL3njp9fdwoATitNeP9XILySQNS3KFI0EohGBNHIIBQE2iCgkW+UQKGRcPKW3bt3O71uh534pNft9PR0ZhLIi5Nj7szRcadRQSj1CSjpQ8kfWecAJdExmlJ9+4U1IYAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAdwUakwCiXRAbAPzEkTm3bd9z3d1rQ9ryz9772cZd30+nrl2BUsvLy25ycjJ1Pp5EAAEE+gUU/KvALwWApZUnjpxwjx18KW0WnqtA4O7q+0nyh0YASStNCP5Nax/PIYBAcwViA4AndioAeMzpkVKtgIJ9f/TuuvvJe+l3fNc1wIULF4YeRN6rMUhQvLZH57PDhw/3VsnfCDRKQAkfSna6ePFioXYp0UnB+jrem1B0DlAiS9b27Nrh3M+/M845oKad9qN377kfvrOeuTauATKJmAEBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAoTaBRCSDaqtgAYCWAKBFEd4SnVCugYF/d8V3Bv2lFgSMkf6QJ8RwCCGQJ6BwwMzOTeQfjrZPPOo0IxTkgS3Tw53UOuP3uGackwLTCOSBNh+cQQCBWQAHAMaM36O7v339+3GlEEEo1Ams3NerHPafHtKIRBJT80dQE8Pn5ebewsJC2CeZzGhVBQc3aRgoCTRJYWlpKjuusxOlQm3Vca6SGpr5mlQQSM4LPD14Y3zgPcA4I7eMypsWeA7gGKEObOhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIF8Ao1LAFHzFQCsL/31mFYe2fEN9+Tsa27L3qm02XhuAIHPr6643735f7isO74reETJH/ryn4IAAggMIhB7B3glfygJRMkglGoEvrj54cY54Hvuy1sfp65A5wAFyDY1kDC18TyJAAKNFIgN2t+/d2M0kNlxpzvCU8oR0Kgf/+1f4u74rkByJX80/RpAIyXo+lKjguQt2rYTJ044HZMUBIYtEPtZSaidOpb9qB+h55s0TQkges1mFUaEyhIq9nzsqB/q++scQJJcMWeWQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBIoKNDIBRBujAGB94X/x4sXMbXvs4Etu+/PHuRN8plT8DLF3fFeNs7OzyR1Emx74Fb/1zIkAAk0Q0Dkg5u6/jAZS/t7Kcw5oS/Bv+UrUiAACVQvoOkDnAl0XZBXuBJ8lFPf8lasbo36cW3c3bmXPPzc3l1wDZM/ZnDnyHFP9rVags0ZN0HmPgkDdAnof1Eg2GvmjSDl8+HCSrNuma3Ylu8SMDCgPjQalUaE0OhSluIDOAa++nT3yk9bA50DFnVkSAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGFSgsQkgfsNi7/6r0UCeOHKCO8F7uAEe73zwS/f7t5cyR/3QKhQEpeAvCgIIIFCFgILcTp48mVm1RgN54sic27bvucx5mSFdQCM/fXLu9cxRP1RLG4N/07eeZxFAoGkCGrlBAcBZIwOq3RoF5MdHxt2Lk2NN24zGt0cJH999e939YvVeZlsVQK5RnxRQ3sYyaCA9Qc9t3OvtbrMSl9Qf1vth3qKRGfR6bWvikl6vsTcGUfLHGxvngJf3cQ7Ie5xo5KdXN84Bb30Qdw7Q50B6L6QggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCAxHoPEJIGK5fPlyEvgVc/ffLXufcU/O/r1TQggln8AXNz/cSPw47T6/+uvMBRVIcuHCBac74VIQQACBKgXy3P1X5wAlAz668+kqmzSSdX956+PkHHB39f3M7Wt78G/mBjIDAgg0TiA2KVwN3793407wL4wlj43bkIY1SEG//+1f7rkfvrMe1TIFkSuYXNcCbS+6xlRQfUxyUf+26jxIAHS/Cv+XLaCEDx2jSgApUk6dOuX03jkKJTYpXNs6sXNsIxGEc0DMfvfngJ+8t+70d1bR5z86B/A5UJYUzyOAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALVCrQiAUQESv7Q3X8VqBNTHjv4ktv+/HGnu8JT0gUU9Hv7nTNOI3/EFN3tV1/6K/CJggACCNQhkOfuv2qPRgLZ/sJxkgEjds763Tvus3/5WXIeiJg9uYO0EgA5B8RoMQ8CCJQpoOsA3Qk+9i74GglEI4JoZBDKwwJ5g3619KiO/KcA+dOnTyfXmw8rZf83Sgkx2VvLHHUKcFxu1layls4BsUlbJANuNvRTipwDRimhyDvwiAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCLRVoDUJIB5Yd35cWFiICtBR8ocSQR77zy+RCOIBex590O9n7/3M6e+somBf7nSbpcTzCCBQpYDugKzALyWExBSSAW2lIucABX7Nzc3ZlfIMAgggULGA3v91LaBrgtiiRJBXDnI3eHkVCfpVkoOuAUb5ju9KKlL/IvZmA/3HHoHR/SL8X1RAxyAj06TrKTlG54HYQiLIA6ki5wC99zPqxwND/kIAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSaINC6BBCh5Q3QIRHk4UNNI3589t7bGyN+/I+oxA8trVE/FPi1e/fuhyvjPwQQQKBmAQX/KkhTySCxhRFBHkjlTfzQktzh/IEffyGAQDME8t4JXq1WELASQZQQ0rVy45ZzP3xn3b31wb3oTVfyd9cS/9S3UPC9rjfzFl0nKUha50wKAnkFiiS39a5jdnY2uV7vyghteT8TktXEzvvngJf3dfMc8JP37p8DlAQSU3QsnThxwinhhoIAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggg0CyBViaAeMJz584lATqxd4LXcl0OAv7i5ocbiR8/20j8+KUnzHzUl/4KZFICCAUBBBBokkCRIE2dA7b91V+7LXunmrQptbRFyX+33znj7q69H53818Xg31p2BitBAIHSBPKMDuhXumuHc0f/atwpCFh/j3JRwsdb/3rPXbkan/ghj64Fk/ceAwTi92rwdx0C6tPmGeGut01dTzwqcj2wfatL3v9fOTg+8ueAX6zec+c3zgN6zFO4AUgeLeZFAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE6hdodQKIuIoG6GzZ+8xGEPBGIPBGMPAoF93pXcG+SvxQAkie4u/425W7iOaxYV4EEGiGgM4BPvg3T4se3fm0e+zgS27rxLNOo0SNclHS351//aX7/Oqvc21ml4N/c0ExMwIIDF2g6LlADddoIIc2fl6cGHMKCh6FomQPJX38Yu2ei73Tu99ujV6hUf8mJyf9pM4+FhllxmORQOkleEwTKDKKha9PxxijM9zX8OeA06dPO/2dp/hRQUbpHLB2c+McoOS/jZ8i5wB9DsRIRnmOIuZFAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE6hdofQKIJ1OAzsmTJ93ly5f9pKhHBf4qAFiBwAoIHpVyd/V9l/zkuNO733bu9ugleEQAgbYIKIBuYWHBaWSoPMWfA7ZOPuv0MyrFj/iUZ7QPv+0E/3oJHhFAoG0CRc8Ffjt9Msj+ve0bGUR3d7+08aPkjxu3/BbFP+q9n6DfsJdPNM0bWK7aOKeGTZnq3Pz8vCuSsOCPK43SqdE/KA8E9BrV9YBes0WKzgF6/1dSYNtGh9J7vz8PFDkH6FjSOUAJ4BQEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgeYLjEwCiKdWAoi+9M+bCKLlH9nxjSQAeMveqVYGAg+S9KHtJ/BLChQEEGizwCDngLYng3x+deV+4t9GAuCXtz7OvRs5B+QmYwEEEGiowKCJINos3RV+/977I4RMfLN5o4PoDu9Xrrok4UNBv0UL7/1xcjqmdLOBixcvxi3QNxcjK/aBdPhf9VWPHTvmdEzlLRr1Q4kfumEDxRbw5wC9Xoskbqlmfw5QQsizGz9NGyGq9xzw/kbyR96RPrweiR9egkcEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgXQIjlwDi+QcJAlYdCgTesveZjZ+p5LGJo4PoDu+fX/31xs/9oF+/7XkfCfzKK8b8CCDQdIFBzwHaPo0I0pZzgM4F63fvFNotOgccPXqUO/4W0mMhBBBosoCCgHWHfY0OVTQI2G+fDwbWo+4Mr6Dguoru5q5g37WPNhI+fqPH4sG+vs30/71EvkcFlCsRpEjwvgKtFbwve0r3BPQepGMn72h1Xmpubi4ZoUFJIJQ4AZlrNJCiI630rkXv/RPfvJ8YMrGz/nPA9Vv33PtK+ivxHHDixAmSiXp3Mn8jgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACLRIY2QQQvw/KCAJWXT4hRIkgSgx59JtPJ9P8eqp+1N3cv7z12/sJH79ZcV98dLVwsK9vq+4cqi/9CULyIjwigMCoCegccP78+cLBdt5D54BHv7nXbfnWVPL+r3OBRo2qqyi544uP7if9+eS/ogkfvs1679cdyTkHeBEeEUBgVAUUBKygawUBFwnct1yUCLJ7x0ZQ8EYw8Pat94ODH996f+48CSJK8FBwr8raTec+/Q/nVjcSPj69e3+Ej/s1Dv5bgePq/+u9X8kIlGICPqhco04WKdoHSgQhkL+IXjuX0fuPkj907OQtk5OTbnFxkf5aXri++f05YHV1te+Z4v+27RygLZ2dnU0+A9JxRUEAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQaK/AyCeA+F2jYC8F6eiurUUCL3w9vY8+IPiRHU+5R578RhIM7AOCddf4vEUjeajcT/bYSPj45EHSR966rPkVaOS/9Cfwy1JiOgIIjJqAzgFl3QW+10YJgVWfA8pI+PNtJvjXS/CIAAJdFfCJgWVeEzTdUol+GulJiQckHZS3txRIrqB+HVN5i/aDEnE0qgNldAXU/zx27BjHSIN2sV63uibo0jlAyR5+tD/OAQ06GGkKAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggMIBAZxJAvJGSP/Rlv770L/Puj75+61GJIT45RPOUGdBrrbN3ug/8UvIHBQEEEOiygO4AfOnSpeRcUJeDTxj06/OJfv7/qh8V+KURnwj+rVqa+hFAoC0C/pqg7vNBXT4+4Ffv+yR9V6u+tLSU3GigyE0GtJ80GogeKaMjoGPBHxdFtkqvW436wWu3iF78MvpcyJ8Dirx+49dU/5w6dnQcqf/PcVS/P2tEAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEqhboXAJIL6juyKkv/c+fP19rMkhvG6r8m8CvKnWpGwEE2i4w6ucABXv5u/0S+NX2o5X2I4BAlQI+GeTKlSvJtUFbA4GV8H3o0KEk6Jf3/SqPmM1165jRSA+6tixSNBKIRgTh7vxF9Jq1jEaE0bGgfmbeotetEj8UuE+pV0CvXSWDaP8V2Xf1tja8Nv/5j84FJJWFjZiKAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIjIpApxNAeneivuTv/dK/97k2/U3gV5v2Fm1FAIGmCPhzgA/+bUq78rbDB34pcJDg37x6zI8AAgjcF9AogQoC1jlBj01NCNF7vvr++/fvTx5JHhj+EazjheD/4e+HYbSAJKBhqFezTs4B1bhSKwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAeQIkgAQsFbyh4B0f9KUAgKYWAr+aumdoFwIItFlACYGcA9q8B2k7AgggUJ6ArgX0s7a2ljzq77qTQtTn18/ExETyqMQPSnMF5ufn3cLCQqEGKolTo0CQyFmIbygLLS0tJfu7yPuCXtdnz55NXtdDaTwrzRRQorg+H9Kjrg+GdQ7Qe4LOAXr/13FD0l/mrmMGBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAYGQFSACJ2LUK5NCX/PrS3wd+6cv/uou+5O/90p/Ar7r3AOtDAIGuCuj9358D9P6vc0LdRe//Og8Q+FW3POtDAAEENgv46wOdE/Tz6aeffnVu8NM2LxWeoiBevb+r6G+9z/tp/jF5kl+tEtBxoNFA1H/IW7TfT5065ebm5vIuyvw1Cqg/ePLkSfZxjeZNWVWZ5wBtk/9sR699nQP8NM4BCQW/EEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQT6BEgA6QPJ86+CefwX/4MEffl1KrhXPyr79+9PHhUIwJf+CQW/EEAAgUYJKOhP5wAf2Kk7AqvkDfz1G9V7DlAg8OOPP54EBOsc4IPC/Lw8IoAAAggggEA7BM6dO5ckCajPkLeoP6DRQOgH5JWrdn7tSz/qR5E1aZQXjfqhPh4FAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDIK0ACSF6xnPPPz8+7hYUFcynd2VXzUBBAAAEERk+Ac8Do7VO2CAEEEEAAgbwCShjQSBFKBilSNBKIrhtJGCiiV+4yFy9eTPalEn7zFiX7KvGDhJ68csyPAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAr0C473/8DcCCCCAAAIIIIAAAggggAACCJQnoMQNBf4vLy9/NeJjnto12sSePXuckg8owxFQwsfMzEzyUyT5Qwk8KysrJH8MZ/exVgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBEZKgASQkdqdbAwCCCCAAAIIIIAAAggggEATBTTyw7Vr1wqN5qFRRJSAMD097YokIDTRoy1tUgLO1NRUoQQc7XMlfmhUOEZwacsep50IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgg0W4AEkGbvH1qHAAIIIIAAAggggAACCCAwQgJKBig6GsTly5eTZATVQalWYHV1NbE+efKkUwJOnqJkj8XFxWTUl8nJyTyLMi8CCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIJAqQAJIKg9PIoAAAggggAACCCCAAAIIIFCuwO7du5PkgAsXLuQeGULJCAsLC27Pnj1OCSGUcgXkq6QPjfqhJJC8ZXZ2NhnpZW5uLu+izI8AAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCGQKkACSScQMCCCAAAIIIIAAAggggAACCJQvcPjw4cLJAtevX3fT09Pu2LFjuUeoKH9LRqPGixcvJokfS0tLuTfIJ/WcPXs2d1JP7pWxAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgh0VoAEkM7uejYcAQQQQAABBBBAAAEEEEBg2ALbt293i4uLyYggk5OTuZtz7ty5ZDQQPVKKCfhkmpmZGae/85ZTp04liTwHDhzIuyjzI4AAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACuQRIAMnFxcwIIIAAAggggAACCCCAAAIIlC+g5IGVlRWnZAIlheQpt2/fTkYC0YggRRIY8qxr1Oadn59PRv24fPly7k3TPrt27ZpTHRQEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKAOARJA6lBmHQgggAACCCCAAAIIIIAAAghECCiZQIkghw8fjpj74VmUxLBnzx4SEh5mCf4nq6mpKbewsOCUQJOnKEHn7Nmzyagtu3fvzrMo8yKAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgMJkAAyEB8LI4AAAggggAACCCCAAAIIIFCugJIKLly4kPwUSTBQUoMSQYqMalHuljSvNiV7nDx50mm0lNXV1dwNnJ2dTUb90CMFAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCoW4AEkLrFWR8CCCCAAAIIIIAAAggggAACEQIaBUSjgczNzUXM/fAs169fT5IcZmZmco9w8XBNo/PfxYsXk8SYpaWl3Bs1OTmZjPihkT80AggFAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAYhgAJIMNQZ50IIIAAAggggAACCCCAAAIIRAgo2WBxcTFJBFESQt4ySNJD3nU1df5BkmHkf+rUqcT/wIEDTd1E2oUAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACHREgAaQjO5rNRAABBBBAAAEEEEAAAQQQaK+Akj80GoiSQfKOQHH79m138uTJZESQ1dXV9iIUaPn8/Hwy6sfly5dzL62ED5mrDgoCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIINAEARJAmrAXaAMCCCCAAAIIIIAAAggggAACEQJzc3NJUsLhw4cj5n54FiVBTE1NJQkNSgoZ5aJt3bNnj1tYWMi9mUqwuXDhglteXna7d+/OvTwLIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACVQmQAFKVLPUigAACCCCAAAIIIIAAAgggUIGAkhIGSVBQUoQSQYqMilHB5pRapRJbjh07lox2cv369dx1K8Hm2rVrrkiCTe6VsQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIJBTgASQnGDMjgACCCCAAAIIIIAAAggggEATBA4cOJCMBnLq1KnczVFyxPT0tJuZmXFFEiVyr7CGBc6dO5eM+qHHvGVycjKxXFxcdBoBhIIAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCDRRgASQJu4V2oQAAggggAACCCCAAAIIIIBAhICSFebn55PkBSWE5C0XL15MRgNZWlrKu2hj5vfJLBr5QyOA5CnyU9LHysqKUxIIBQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQaLIACSBN3ju0DQEEEEAAAQQQQAABBBBAAIEIASUvLC8vJ8kMeUewUNLEyZMnk0SQ1dXViLU1Yxa1W8kve/bscZcvX87dqMOHDyeJH3Nzc7mXZQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGAYAiSADEOddSKAAAIIIIAAAggggAACCCBQgYCSGa5du+aU3JC3KPljamoqSQbJO5JG3nUNOr8SPtTWhYWF3FXt3r3bXbhwIfnR3xQEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKAtAiSAtGVP0U4EEEAAAQQQQAABBBBAAAEEIgQ0AogSHDQiSJEEh6WlpSS54uLFixFrq3cWJabMzMy46elpd/369dwrV4LMyspKoQSZ3CtjAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoGQBEkBKBqU6BBBAAAEEEEAAAQQQQAABBJogcODAgWQ0kFOnTuVujpIrlGihnyKJFrlXGLGAElP27NnjiiSmyEKJH4uLi04JMhQEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKCNAiSAtHGv0WYEEEAAAQQQQAABBBBAAAEEIgXm5+eTRBAlQeQtSraYmppyqmNYZXV1NRnx4+TJk04jgOQpSvZQ0odGQ5mcnMyzKPMigAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKNEyABpHG7hAYhgAACCCCAAAIIIIAAAgggUK7A7t27kySIs2fP5h4BQ0kXCwsLSSLI5cuXy21YSm1ar5I+lIBSZL2HDx9OEl/m5uZS1sJTCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAewRIAGnPvqKlCCCAAAIIIIAAAggggAACCAwkMDs7myRF6DFvGWQkjrzr8iOPLC0t5V3U+WSXCxcu5E52yb0yFkAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBGoUIAGkRmxWhQACCCCAAAIIIIAAAggggMCwBbZv3+40Esjy8rKbnJzM3RwlZezZs8cpSaPscv36dTczM5P86O+85dSpU25lZcUdOHAg76LMjwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIINF6ABJDG7yIaiAACCCCAAAIIIIAAAggggED5AkqSULKEkiaUFJKn3L59O0nSmJ6edkUSNULrUmLJ1NRUocQSvy3z8/O5tyXUFqYhgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAJNFCABpIl7hTYhgAACCCCAAAIIIIAAAgggUJOAkiaKjppx+fLlZDQQ1VG0rK6uJokfJ0+edEosyVMGHc0kz7qYFwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQGLYACSDD3gOsHwEEEEAAAQQQQAABBBBAAIEhC+zevdstLy+7CxcuFBpBY2FhIUkEUUJIbFGyh5I+NOqHkkDyltnZWXft2jWnRwoCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIINAFARJAurCX2UYEEEAAAQQQQAABBBBAAAEEIgQOHz6cJFXMzc1FzP3wLNevX3fT09Pu2LFjmSN5XLx4MUkYWVpaeriSiP98ssrZs2cLJatErIJZEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBRgqQANLI3UKjEEAAAQQQQAABBBBAAAEEEBiOwPbt293i4mIyIsjk5GTuRpw7dy5J7tBjf/FJIjMzM5lJIv3L6v9Tp04lCSoHDhwIPc00BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgpAVIABnp3cvGIYAAAggggAACCCCAAAIIIFBMQEkWKysrSTKIkkLylNu3bycjgWhEECV9qMzPz7upqSl3+fLl5P88v9SWa9euJXXkWY55EUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBURIgAWSU9ibbggACCCCAAAIIIIAAAggggEDJAnNzc0kiyOHDh3PXrGSPPXv2JD8LCwu5R/1Q4snZs2eT0Uh2796de/0sgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIjJIACSCjtDfZFgQQQAABBBBAAAEEEEAAAQQqEFDyxYULF5KfIokYfhSQPE1T4olG/Zidnc2zGPMigAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIjK0ACyMjuWjYMAQQQQAABBBBAAAEEEEAAgXIFNArIysqKU3JGVWVycjIZ8WNxcdFpBBAKAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwH0BEkA4EhBAAAEEEEAAAQQQQAABBBBAIFpASRlKzlAiiJI1yiqq99SpU0m9Bw4cKKta6kEAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBEZGgASQkdmVbAgCCCCAAAIIIIAAAggggAAC9Qko+UNJIGWM1KGED9U1Pz9f3wawJgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoGUCJIC0bIfRXAQQQAABBBBAAAEEEEAAAQSaJKBEEI3eMUjZv3//wHUMsn6WRQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE2iDwSBsaSRsRQAABBBBAAAEEEEAAAQQQQKBZArdv33YnT550586dG7hhCwsL7vz588loIocPHx64PipAAAEEEEAAAQQQQKAuAfWH1ZelIIAAAnkENJqmbqjQW1ZXV5Pr7N5p/I0AAghkCei9RO8p/UWf2+l9hYIAAgjkEaCPkkeLeRFAIE3g6NGjbnZ2Nm2WVj1X5/Va/3txXf26/n7lMLe5rs/b+rdZB+X09HQtx2b/a6SubdbGLS8vP7SNw9rmOo+x/m0e1uvqIfgB/yEBZEBAFkcAAQQQQAABBBBAAAEEEECgawL6AEofiigJpKxy/fp1NzMz45QAog82d+/eXVbV1IMAAggggAACCCCAQGUC6sdevny5svqpGAEERlMgdD2tabyfjOb+ZqsQGIaAgql4TxmGPOtEoN0C9FHavf9oPQJNEti/f3+TmjNwW+q8Xut/Lx5Wv26Y2zzMz9vq6kP3v0a6uM11HmP9bwLDel31t2OQ/8cHWZhlEUAAAQQQQAABBBBAAAEEEECgOwL6IER3ADl27Jjr//CxLIWLFy+6qakpt7S0VFaV1IMAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIyEACOA5NiNCkI5ffp0jiWcU1ZWWtGw4FeuXEmbZdNzJ06cSO6IuukJJiCAAAIIVCag7N6FhYVc9VdxDugf/i1Xg5gZAQQQQAABBBAoKKBkDyVk5O0PFVxdklyiEUZ0zXz27Fmn4XcpCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA1wVIAMlxBBw4cKD0u5wqODgrQLi3idu3b3dqBwUBBBBAoF4Bfw7I856d1cK85wDVpwBICgIIIIAAAgggUKeAEmE14keRftDu3bvd4uJich2rhI5z587larpGHNFoIHNzc+7UqVNO18QUBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgqwLjXd3wItutQBONvjHMovUT8DLMPcC6EUCgywIKOhxmmZ2ddQqipCCAAAIIIIAAAnUIaNSPmZkZNz09XSj5Q32nlZWVZARLXccqkXV5eblQf0ajjygRRCNzUhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAga4KMAJIzj2vu46ePn3aKRCm7qKAGa2fggACCCAwHAElYCwsLBQKgCyjxcNOQCljG6gDAQQQQAABBNohoIQL9XuKXPtq5DSN+jE5OblpY/XctWvX3Pz8fFL/phlSJmgEEiWkqA4lk5AYm4LFUwgggAACCCCAAAJDF9i27zm37a/+eujtoAEIIDBcgd+/veS+uPnhQI2YeOpr7o3/558NVAcLI4BA+wXWfvtH9+r/+z8G3pAnjpxwj+7cO3A9VIAAAu0WoI/S7v1H6xFoksBba1+4t1a/aFKTam3Lozufdk8cmSu0zkHfi3WdqOvFvGXQfuUwt/nlyUfdyxOP5t1kN+g2a4W/Orot93q1wKCvkUE+Y/ztG39XqM1+oWFt8yCfg3z7/B3f/EKPRa+Xvrh51f3+7dOF1ll0IRJAcsopCUOjcCgQpu7C6B91i7M+BBBAYLOAkjCOHTu2+YmKpzD6R8XAVI8AAggggAACicDq6qo7efKku3z5cm4RXS+rrxRz4wIlgKh/o35V3nVpfo0Gomtk1UNBAAEEEEAAAQQQQKCJAo88+Q23Ze9UE5tGmxBAoEaB8T/7TwOvbfuWMffsbr7WHxiSChBAIBFQ8gd9FA4GBBCgj8IxgAACZQlcufFlWVW1sh69nxbtWw36Xqwg+WFcKw5zm3dtHx/KNuvgLGo96GtkmJ8xDmubh/k5SJuul8Zb+a455EYrmEWBLXUWrS8miKbONrEuBBBAoIsCw0rEYPSPLh5tbDMCCCCAAAL1CWikDyV+KLEib0KGWnn48OFkZI88160awWN5eTkZzSPvNbbaqxszFG1vfbKsCQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQKE+ABJAClgpM0Z1G6yyM/lGnNutCAAEE0gXqTsYYVtJJugLPIoAAAggggMCoCFy8eDFJpFhaWsq9ST6J48KFCy5vEodfmfo6165dK3TTA41YMj09nSSvKCmEggACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIjLIACSAF967ualo0uCXvKrWePHdRzVs/8yOAAAII5BOoOyGj7oSTfBrMjQACCCCAAAJtFbh+/bqbmZlJfvR33qI+ysrKijtw4EDeRTfNr+vexcXFZESQycnJTc9nTVDyyp49e5ySWSgIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggMCoCpAAUnDPKjilrlFAGP2j4E5iMQQQQKBCgbqSMupONqmQjKoRQAABBBBAoEECSpiYmpoqlDChhA+N2DE/P+90bVxmUd1KKlFfK2/dGgFECS0aEaRIQkuZ20FdCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAFQIkgAygWscoIAp4YfSPAXYSiyKAAAIVCdSVmFFXoklFTFSLAAIIIIAAAg0TuHz5cpL4cfLkSaeEiTxF16dnz55NRunYvXt3nkVzz6vkkqKji2gbNRqI6qAggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKjJEACyAB7U8EvVY8CwugfA+wgFkUAAQQqFqg6OaOuJJOKmageAQQQQAABBBogoGQPJX1odIzV1dXcLVK/RKN+6LGuoiST5eVld+HCBafr77xlYWEhSQRRQggFAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAYBQESQAbci1WOAqIAF0b/GHAHsTgCCCBQoUDVCRpVJ5hUSEPVCCCAAAIIINAggYsXLyaJEEtLS7lb5ZMwNPJHkSSM3CsMLHD48OEk+aTI9fH169eTpJdjx47lHvEk0BQmIYAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACQxUgAWRAfgXAVDUKCKN/DLhzWBwBBBCoQaCqJI2qk0tqoGEVCCCAAAIIIDBkAZ/8MDMzUyj5Qf0cjfpx4MCBIW+JS5JPFhcX3crKipucnMzdnnPnziVJMHqkIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACbRUgAaSEPVfFKCBKLClyd9MSNocqEEAAAQRyCFSVqFFVYkmOTWNWBBBAAAEEEGixwPz8vJuamnKXL1/OvRVK+FDih+poWlHyh5JAlAyi6+Y85fbt204jgUxPT7vV1dU8izIvAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDQCAESQErYDQo6KXsUEEb/KGHHUAUCCCBQk0DZyRpVJZXUxMFqEEAAAQQQQGCIAkr4UOLHwsJC7lE/dG174cIFt7y87Hbv3j3ErchetW6YoESQw4cPZ8/cN4c3UoKLkkIoCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAWwRIAClpT5U5CoiCbhj9o6QdQzUIIIBADQJlJ2yUnVBSAwGrQAABBBBAAIEhCww6uoWuQTXqR5GEimFtupJUlLCinyIJK0qSKTpKyrC2mfUigAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQLcFSAApaf+XOQoIo3+UtFOoBgEEEKhRoKykjbKTSWokYFUIIIAAAgggMCSBc+fOuT179jg95i2Tk5PJiB+Li4tO17VtLEpa0WggRW6kcP36dTc9Pe1mZmYYDaSNO582I4AAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggg0DEBEkBK3OEKNhk0YEbLFwlaKXEzqAoBBBBAoIBAWYkbZSWSFNgEFkEAAQQQQACBlgn45IVjx47lTl7Qtaf6HUqcOHDgQMu2fHNztT1KYim6PRcvXkySaJaWljZXzhQEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKAhAiSAlLgjFHCi0TsGKYz+MYgeyyKAAALDFRg0eaOsJJLhKrB2BBBAAAEEEKhDYH5+PklYuHz5cu7VKeFDiRKqY9TKICOa3L592508eTIZEWR1dXXUaNgeBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEBgBARJASt6Jg4wCwugfJe8MqkMAAQRqFhg0gWPQBJKaN5fVIYAAAggggMAQBJTwsWfPHrewsJB77bt373YXLlxwy8vLTn+PctG1+bVr19zhw4dzb6aMp6amkmQQJYVQEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBpgiQAFLynhhkFBBG/yh5Z1AdAgggMASBokkcgyaPDGFTWSUCCCCAAAII1CigRIRjx44lo1Ncv34995qVEKFRP4okROReWUMW0PX5IAkvS0tLSSLIxYsXG7JFNAMBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEui5AAkgFR0CRUUAY/aOCHUGVCCCAwBAEiiZyFE0cGcImskoEEEAAAQQQqFlAiQga9ePcuXO51zw5OZkkfiwuLjpdd3axHDhwIDEo0t9Sss3MzEzyUyTxpovebDMCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIVCdAAkgFtgqq0WgeeQqjf+TRYl4EEECg2QJ5gwuLJo00W4HWIYAAAggggMCgAqurq8mIHydPnnQaASRP0XWpkj406oeSQLpe5DE/P594KCEkb9EoIFNTU07JOBQEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQGBYAiSAVCSfZxQQBaJofgoCCCCAwGgI5E3oyJswMhpKbAUCCCCAAAIIWAJK9lCyghIOLl++bM1mTj98+HCS6MB15mYiJcMsLy+7s2fPOl2L5ynaL0rG0X5Rcg4FAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCoW4AEkIrEFUgSOwoIo39UtBOoFgEEEBiiQGxSR95kkSFuEqtGAAEEEEAAgRoElPChBIOFhYXca9u9e7e7cOFC8qO/KbaA+mDXrl1zesxblPyhfVRkZJa862J+BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgV4AEkF6Nkv+OGQWE0T9KRqc6BBBAoCECsYkdsYkiDdksmoEAAggggAACFQlodImZmRk3PT3trl+/nnst6lOsrKw4jf5BiRPQ9bhGAtGIIEUSZpaWltyePXvcxYsX41bIXAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwIACJIAMCJi2eMwoIIz+kSbIcwgggEC7BbKSO2KTRNqtQOsRQAABBBBAIEtgkESCAwcOJIkf8/PzTteglPwCMtRoIFl9t1DNgybuhOpkGgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggYAmQAGLJlDQ9bRQQBefoeQoCCCCAwGgKZCV4FAkyHE0ptgoBBBBAAIFuCqyurrqpqSl38uRJp0SCPEXXk4uLi8noFZOTk3kWZV5DQEk0SgRRQkjecvny5WRfqg4KAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQlQAJIFXJ/qleBeVolI9QYfSPkArTEEAAgdESsJI8spJDRkuBrUEAAQQQQACBXgEleyjpQ8kfSgLJWw4fPpwkKnBDgbxy2fPv3r07Sao5e/as0/V8nqL9urCwkOxXJYRQEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBsgVIAClbNFBfaBQQBZIQrBPAYhICCCAwYgJWooeVGDJim8/mIIAAAggggECfwMWLF5MEgaWlpb5nsv/1yQkXLlzInZyQXTtz9AqoD6fRQIpctyupZ3p6utDILr1t4G8EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKBfgASQfpEK/leyR/8oIIz+UQE0VSKAAAINFehP9rCSQhrafJqFAAIIIIAAAiUIXL9+3c3MzCQ/+jtvUX9CCQkHDhzIuyjzFxTQtfzi4mIyIsjk5GTuWpTks2fPHnfu3Lncy7IAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQEiABJKRSwTTdNVTBIyp6LHIX0WRhfiGAAAIItE6gP+GjPyGkdRtEgxFAAAEEEEAgl4ASAaamppxG/8hblPChxI/5+fm8izJ/SQLaBysrK059OH9dH1v17du33bFjx5IRQYok/sSuh/kQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoBsCJIDUtJ8VJOJHAWH0j5rQWQ0CCCDQIAGf9NGfDNKgJtIUBBBAAAEEEChZ4PLly0nix8mTJ50SAfIUXUOePXs2GX1i9+7deRZl3ooElISjRJAio7DoWNBoICTyVLRzqBYBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGOCDzSke1sxGZq1I/z588z+kcj9gaNQAABBOoVUOLHwsJCcufoetfM2hBAAAEEqhBQMP/q6moS1K9HlRs3brjeO/zr797/k5k2fimwf3Jy0v+bPO7fvz95VKB/789DM/FPawR0fOi8r5E/ihT1GxYXF5NjpcjyLFOdgF6fy8vLyWguSuwJvcbT1q7jQp8LKLmnSCJJWt08V6+AknpU/GP/OcCfJ5KZen5xDujB4E8EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgdwCJIDkJtu8gP+y3weAaY4rV65snvFPU2ZmZoLP+aAvHwzgH4MzMxEBBBBAoBEC/r3fP6pR1jlA7+vHjh0Ltrv/HKCZCAoMUjERAQQQqF1A/X29zyu4V4/6UWBv0aJl/TWEr6P/fz9diSIKOJ+YmEjOC/pf5xNKcwUuXryYnO+LHCPa1yQGNHff9rbs8OHDyWuySKKPkkamp6f//+z9TWwd153v/S7SgqEIaluxfAO7YT2SgGtlSNLIRAEeS2zn3gukZYs8ObBnFgUNLoz0ichocBqn7YiMnUaegUPKJ4FxB4Ioz2I8OZRsHw+eGzVFP0A0CUxqGPkAoq6MtpFjpWW3oAh+HPHyV8qSNzdrVa1VL3vXy3cBW5uqXS9rfap2rdp7//+1DIk+naLV/due9y9fvhyd//vVB+j8b/uE6mpRMwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBsgVIAAkQLupHf9cdQl1BXwoC0kPBwXq2P/oHVJ1ZEUAAAQRyCtg+QOdwJXjoWY+iiqsPsMmAOvcr+Jc+oChx1oMAAgjEC9jkDJ3rde53nZ/jly5+qu1/lFSgQHMVfSZQkqA+H+hZ/6f0X0DXBUr0zHLMqL8/fvy4mZ6e7n9DqIG3gPabRmo5cuRItO/1fg0p8/Pz0UgiWoeSQSjVENB7WA/1A3ruZ+nsA2w9bB+gzwZKRKIPsDI8I4AAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAu0QIAHEsZ9t4Jd+bO/3j/42yLg78KAz6Et/UxBAAAEEihPQOVePfvcBtj+K6wOUDGKDf7kbfHH7njUhgED7BHTNr/Ps+fPno+eqC+jzgQLH9VBR8K+CgNUn6JnSewElbpw6dcqo3w4t+iynUT8I4g6Vq878uiZbXl42c3NzUaJWyHGgeZU4dPbs2SiZROui9FZA51Ql2em6X89VL7YPUD2npqaic4fOI4cPH6YPqPrOo34IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAAQIkgHQg1i3wS0Fqetg7AXf+4E/wUMeO5U8EEEDAQ6BugV+2D1CgoYr6ABv4S+BgRMI/CCCAQKKArv0VcK1gX/UBdS6qv/oDPZQQqCQQAoF7s0fVHyt4P8sxpH2lxA/tL0ozBCYnJ6P9qaD80EQCHUsjIyPm5MmTRuvR8UEpT0DvWe0j9QPqD+pc1JbOpMDOPoDjqM57lrojgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC8QKD8ZPbM1U/9Cs4Y+/evVGwhf5W4EUdi+pt26L26O+6BzLUcT9QZwQQqI+A7QMUbGfPm6HBelVprfoAJQR2toU+oCp7h3oggEBVBBQkqyQJe+2vvzWtSUWjCSgQeHx83Hzzm9+MkhPoD4rfw3bUhtHR0UzHkAL8r169SvJH8bum72vUzRgWFhaiR5YbM9jrubp+L9H3HZBQAb1vdd5v+vWyPs8oMU19nZ45lhIOCl5CAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEaijQyhFAFOSlH8RPnTqVKVinDvtZbVRggx66E/yRI0ei4KIsASh1aC91RAABBHwFbGBsE+7262pzZx+g8776gImJCUMf4BJjOgIINF1Awa+69tdngCLL7p3G7Nk5YKLnRweiVT+9b+MWDuy7N71z6rUbxqzeWLs/Sf/XQ2XpD2vm5p+NuXz969fvvRL+r+3zlBCizwTHjx+PPhM0/Y7w2s9jJY6qIU8l28s3tGg/zM7ORiN3hS7L/PUS0DGoEdr0mdyO2unbAl3LKblI69AoMWW9Z21ymI7LJhf1Abr213u3yLJjmzFDTxTfB6iOS1eK7QP0OUB9gD4TlHU8FWnLutorMD09XUrjddx3nut0fi6i6PyiR1opq11p223i6+ojfc7nmJez9/Evx5W1IoAAAggggAACCCCAAAIIIIAAAggggAACCCAQKtCqBBAFAulH/6IDvwa3bTcPPrHPRM+7noz2wZadjxs9fMtXNz4xeqh8ef0jc/f2LfPlx1eiZ991uOZTUIceClJSAIl+9C/qh07XNpmOAAIIVE2grMCvIvqAu7f/ff2c/1FEVnQfoB/nFXSoh/oAmxBYtf1DfRBAAIEyBBQcpvOfzoV5ixI5hnatB/vuuhfsG5fY4bsNJYzsXk8ciSuvHPp6uk0U+eCKMSvrCSFKCrGJInHLJk3T5wHdCV6fCRQArM8ETUwM1D5XOxcXFwv/zKPjKOvd9BV4KnOCEZOO0ua9pv2ufa5rML33fIKEOxX03YWWOXnypNGoMUUXHc8qy8vLRa+6EuvT+UDJfzr/5S117wN0/tIxaPsAHVNN7APy7meW779AaMJcnhrrPaDvRw8cOBCdp7MkR+kc7VNn+v88e2rjsjqfYb7RpJf/w7+X2mwLAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAG3QOMTQHRXVgVNFBX4tXXfU+bB9SQPPZTgsXXfiFs36BX3eu5cWV5PCrmyniDyaZQccufKh0Fr7pxZFnroR0794K/gLwoCCCDQZIEig39tH7Bl52Pr/cC+9eS/J6PkvyL8tg0/Hbsa9QFKEFRiiB5F9QHcATiWm4kIINAAAV3/6477CvrNMkKDCHRX96fXEz4U8HtgfUQPJX30o9hEEdXBmHt1UAKI7gyvkUL0HJoQYn1kpM8CTQoCVtsU3KyiwParV69GfxfxjwI3fYIN47al4H+N+qHPYJR2Cuiu80pK0vtOx1HIucke17qZhUYD6byDfR5N1cUmRujvMhJM8tQvz7JFXP9H5/9vD6z3BSbqC/LUJ+uycX3AzdvGfKA+YP1xfiW8D1Bd5KOH+gAlhysAnoJAGwUUyG7fD7p+0GdknQuzJIK00Y82I4AAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII9E+gsQkgCpJQEEOewC/d1V3BvkrysEG//dhV97Y/smHTCghWEPCdP9x73vCix3/0I6eCoviB0wOLWRBAoHYCtg9QoJzOd1lL1Ad8+14fUFzCX1ht7m13vQ/Y//VyNhHE9gUaNSqkyETnfwUgEuQSIse8CCBQZQF77s96/a9A28PDA+a59Uee0T3KNlI9X9w/ED20LSWAKAj4nfWHAoJDig16bEoiSGdgvfo6JW3okafozt763JTlekIJH0r8UAIIBQEJKLBY7zcdU7oxQ0hRssbIyEi0DiVu5QlQ1vmyM6FJf+s4rXuSks5pakuW96vtA3T+Vz9Q1aIERdVPj9ef/7oP0Plf/UBIsX2AEkCUXFT3/R/SduZFoFvAnhfPnz9faLJd93b4PwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAJFCDQuASRv4JdG9dBd2PXoV7Cvz469nxRy6N7ct1c+WE8IWTZ61p3ifYv9gVOBcgQB+6oxHwIIVFlAgZ5Zg39tH6BzrGtEjiq03Y5E9dAzL0TVsed/+oAq7B3qgAAC/RDIGvRrA36VUNGvUT7yeqkNP3pmIHrozvDvXF4zb1xYM5ev+wcC2yBgBafnDSzP256syytRQzcA6Cy6HlCwfZagZn1OyhKkb7dfZ0vbBp7LEVDixsLCgsmaXKTjXMkjeZKLdGzrGLdFfys5WPWqY5GH6h+a+KFkCptQ16Q+wCYF+u5LHYt79+6NzpdNGhXKt/3Mh0CngJLtRkdHo1GbihpxqXP9/I0AAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAEQKDRaykKutQIIR+tO6886tP3RTwqyDav335rHnin39jHnn+eKWTP+LapEBl1Vv1VzvUHrXLtyjgQ27yUwAYBQEEEKibgM5dRfUBVU7+iNsvSlgpqg/Ie6f0uPoxDQEEEChLQEGruiN+6AgNCvj9zUuD5n/88wPrd1AfrG3yR7erDWb+/cv32vaTZweNEkR8i/08Vce+QMHf3UWfcXRshBbrEDpCg7ajYNHl5eUoOD/PCA2hdWb++glo1AUdKwq4Dy1KdBgfH48eoUkPOm/GHduaptfqVGygtixCHDR6hvqA/znbzD6gs38L6QP0eUp9qvoAnT8pCLRVIOv1Q1u9aDcCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEDvBRqRAKJABQX9Kugn5Efq7fu/bx478av7SR+6o3oTitphA4G/9dLPjdrpW+yPnPKsW/CHbxuZDwEEmiWgc1WW4F8leegcaRP/mtgHqI9THzC4bbvXTlcfYJMB4wIDvVbCTAgggEAPBOw1q+7QrABgn6IgWCVEKOD39MSgUQBwk4va+8qhgSjJRe09sM+vvZ19QV0+Dyhhw3UcqA2+fZrWoWMq9HOljiMle2hEBgX0c8fwJr+zim2bjhsF21+9etUoISS06NjWdbDeAz7Fnjtd83aPDOKar9/T1Q69T9V23/OUEuTUByjxTwkSbegDNDqUbW9oHyBb33Nnv48Hto9AGQK6JuAGOWXIsk4EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIEiBGqdAKI7PCpAJ+RujxoVQ8kR/8vs/2EenXi5diN9hO50BTirnWrvjmePeY8KksU2tG7MjwACCOQRsAFsIcG/SoTQuVBJH0r+qNtIH6FeGhlEfcATP/tvUd/nOzKU+gD1rbLV3xQEEECgSgJ2dAbfoDwlQigBQkGwSohQEHDbikY8+e2Jweihv32K/TyQJRnCZ/1FzaN6KnkxqaQFteuaQkH4IcHkndsbGxuLEj8mJyc7J/M3At4Ce/bsMYuLi+bMmTNRMpH3guszhiRD6PyZdG2n13yTSULqWOS8SkrQDSt862n7gI9+dq8P0P/bVpTsoj5A/WBIH2BHmdExRkGgjQKnTp1qY7NpMwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQA0EapsAEhqgozu7R0Gw60G/Dz3zgvfd0GuwD72qGAU9H7oX9CwH3yDg0DuKelWGmRBAAIGcAgr4UuCXb/Cvznn3k+HWz4W+58Cc1azM4uoD1Pcp8UUOW/c95VU33VFZzupzKQgggEC/BRSYHDI6gw36DQl47Xcby96+7gBvk2F8g4Btn1vVO8H7JKgoeNmVJKK+TokfrteT9okN2l9YWDD6m4JAXoGJiYloNBA9h5a0EWz0us9xrnmSkkRC61XU/HofhyQkdPcBbUz+67bvNul+Pe7/NuGmqn1AXJ2Z1g4B9bsaOcn3kUVF580qng+ztIVlEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEmiVQuwQQ/fhmA3R87kKoINfHTvzK/O3LZ832/d9v1t7L2Bo52CBgnyBoOSuwijvBZwRnMQQQKExAwRchwb828UPnPPqAe7tBDo+d+GXUN/omgigQUH2v+mAKAggg0A8Bm5SsYP20kiXANW2dTXvdGv3+5UGjpJC0YgOvfZIt0tZV5Os6HnyDkpXI0nn86Joiz2hXJ0+ejEb9UOApBYEiBXbs2BGNBKIRQbIkFrmStvT+9S0aNadKRe9dJSX7vN+V6PH682GjXVSprb2oi+0DfBMkbR+gc6b+piBQBYEjR45EIyfpXOnzWFtbi+YL7bdJAKnC3qYOCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC3QK1SgBRIINvAKqCfr/10s+jINet+0a6283/1wVCE0EUdCF/3zvug4wAAggUKaBzj85BncGbrvWT+OGS+Xq6+kabCOKTDGgTMBkN5GtD/kIAgfIFFGiqQGSfoFMF/f7kWYJ+Q/bK0K4B89sTg9FDAcFpRZ/HlIhZhYRAe2yk1bnzdRsAbz9X+gSTdy6vvxU4ury8HI2OpUB9CgJlCehYu3r1qlGyUWjR+6MzwUnX0T7X0HY7mrcqn/vtzSh8Eg/UB3z0swfMj54ZsE3hOUHAJoKoH/BJBtQ50/fzWMJmeQmBvgnovKpkkZBRlkLOnX1rGBtGAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBonUAtEkD0Q7+943vaHhrctt088vzxaISLbcNPp83O6+sCNhFkx7PHjPySig208gnCS1oPryGAAAK+Ava8owBg/Z1UdA7TuYwRP5KUNr6mRBA7KlRaH6AlNRqI+uS0fbFxK/wPAQQQCBdQkoHONz5ByM8ND5jfv/yAeeUQQb/h0iYK/NWd4HXXfCXSJJWQ/ZK0nryvKYkj9K7cqvs3v/nNaHTD0H5MyR6zs7NR4Ojw8HDe6rM8At4CSr5VIogCl0OLApcVsG+Tn0KW1zKh75OQ9afNq/e36q73elpR8oLOYeoD0s5haetq4+vyUxLI6Yn0PkD7RX0zSeFtPFKa02b15yRxNmd/0hIEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoI0CW6reaBtg5BN48NAzL5gdh9KTGKre5n7VT3YP/d0L5k9vz5lbl95PrIbu/Kh9s7CwYAiASqTiRQQQyCGg84wSP/ScVpT0pwRAn9Es0tbVxteVDLht6Glz873T5osLv04kUDDh3r17oz4gSzBi4sp5EQEEEFgX0LWmT+KfvXu5z53LgU0X0F3zDw8/YH789l3zzsqacwF9NtP+WVpaMmfOnHHOV9YLCkBWQmKW4vO5snu9Y2NjUTsJFu2W4f+9EtizZ0+UfOR7buysV5ZjXstrOb3PFCjd66JrTZ+bTijZ4/SRQaMkQEp+gRf3D5jnhh4wx84m9wHako6Ny5cvc27Mz84a+iCg/lx9u0+ScR+ql7hJfTeih66FPv/88+hvLaDvZh9++OEosUV/9+NzepXrlojawxe136yTNqtraRW7/9Tfa9/pueyifl79rerTeSwdOHAgOo5UD9WLggACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAtUUqHQCiO706HOnygd3Pbke9DtpdBdzSj4B3f390YmXzfbv/r35bP4189WNT5wr1I9WuvOjAkImJiac8/ECAgggkEVAwRg+dx5WwofOW/QBWZQ3LqM+QEk0Sgb57Oxr5svrH22coeN/ChawfcDk5GTHK/yJAAII5BPQXcV9gvuVrPDKofS7leerTfuWVlLNb14ajBJAFAR887bbQH21gsYWFxejQDH3nMW+ouSTXhQF3ynBRQFwFASqIKCAZR2POkf6jIyRt87axpEjR3oaAKrzis97XEkfSv5gxI+8e3nj8vJUH7B0Zc38xzeT+wAlJOl7Id0YpBfByhtryv8QyCewe/fufCvo4dIK0j979myUIK3P4XFF83QX9Rk6h+u5rKLrwFOnTgXVTQk46svKrptvm2WqpEOforrre/CQc57Wr75NTjpnxpXu/af1y0fftWibRRadu+3xFLfezrqUWY+4bTMNAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE/AUG/Wft7Zz6wd8n+WPHs8fM3758lsDfgnePAqnlqlFVkop+xPLdV0nr4TUEEECgU0Dnf51bdI5JKjpH0QckCWV7TYmVclUfm1Z891XaengdAQQQkIDO/WnJHzY49fXnCfwt86hRcPXvX37ApI2uosC/kZGRKBGkzPrYdSuArjMwzU4v+vnkyZPm6tWrUYBi0etmfQjkEbDBp0q86sWduXVe7lXRttK2F436MTEYJSmQ/FHentG5/6OfPZA6ukqv+4DyWsyaEYgXUKJCv4qud3TTBT10/ZP2/Uh3PRXor8QGjd6pv4ssSmRQvXQNGFo3taPMuoW0U3VRO2Sd9tD5TteHvskfWrcS27/5zW9Gv3G4kj/i6qt59ZlI+07rKKLYfaZjwvd4sPXQfpYPBQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBKojULkEEP04Yn88SmLSHd+j4NRD6cGpSevhNbeAvRP8Yyd+ZeSdVHRnUP2ApP1HQQABBLIK6ByioK+0Oxrr/KRzk0ar0N+UcgR2rPex6muVEJJUFPChoAn6gCQlXkMAgSQBnT98PgP4BqQmbYvX/AU0GshvTwyanzyb/LHRBpQpMK7MouPE5yYBeeqgQE8lfhQVbJenLiyLQJKAjtXl5eUoGLXou4N3blfv67Rr8875s/yt97Y+A+iaMqkM7bqXmPbi/oGk2XitIAGbcHl6PeEmKdlG+88GqBe0aVaDQOkCS0tLXtvoRaJdd0Xs9Y5NTOh+PfT/uk7Td7ZFfW+rPkGJCUUkBBRdtxAbe+7yuX5VPxuSeCkbfbZJS2xPq6/qqHVoXT71dK1PCR9aR9Z9Zq/10/pp1/aZjgACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAsULJEfyFL+9xDXaHxPSftDYvv/7XgGpiRvjRW8BOxrItuGnE5fRj0kEACcS8SICCCQI2B/f035Q3rrvKfPEz/4bIz8lWBb5kpI/Hvvxr4z63qSivlt9QFofnrQOXkMAgXYK2PN/2vlDSQhKRkgKQm2nYPmtfuXQQKq93Y9p/Xie2ioATtspoyiw78yZM1Fwn++dncuoB+tEIFRAyUpKBCnzLvVlvvd8zx1K+vj9y4NGiWmU3grI/rc/TrbXfvRJ4ultzdkaAvEC+v7ZJxB+YmLClJlgF1c7e04sI/HOJgGkXXPH1ctO0/u8jGTcXn+nbJ19LEKTP3QtrO9GdJwVVVTPrN+3qD5FJf9o/58/f76oZrEeBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQyCFQmQQQ/ZDhczerRydeNnpwx/ccez3DovL+1ks/j+62n7R4nh+kktbLawgg0GwB3x/fdzx7bH3kj1/SB/T4cFAf4NP/0gf0eMewOQQaIOBz/rd3IFcSAqV/Anb0Fd2B31W0P8sKAFagZhnBkGqLAjw16oeeKQjUUUBJS7oz+cLCgikjgcm+t4u28ekDtE2NQKEHpX8CdvSV54bdfYBqV1Yf0L+Ws+WmCei8o2B4n3Ly5Emf2Qqbx/ecmGeD9uZLWZITyn5/2+8T5FBmCXHOkvwhpzKKrXfIvpNp0Qk7ZV2Pl2HGOhFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQaLJAJX5B9/mBR8Gnf/vy2dQ7kDd5Z1WhbQ8988J68PWvEoOv7f7UMwUBBBBIE9C5Yu/evSbpnKE+QOeeHYeOpa2O10sU0CggGg1ky87HnVuxQQlJ+9O5MC8ggECrBHSeSEsA153eddfxtIDTVsH1sbFKxtEd+HU3+KRSRoBg0cFrtv4KltfIH72+w7fdPs8IFCkwNjZmygpY1t3Zfe6Y79sen2tG33OO7zaZL5+ATcjsRx+Qr+YsjYAxCprXSAhp157WSufSMhLq7PrjnnX91IvP0Tr/ho4IoaB/+ZVd1P6yrvlUd5++x7YxNPlDfWRZyR+2Tnbf2f8nPdt59UxBAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACB5gls6XeT9MOOhjBP+jHiwV1PRgGnCgCm9F9g676RKBnnj2/+o/ny+kexFdL+1H7VXUiHh4dj52EiAggg4NsHPHrkZaO+gNJ/Ae0HJWR++osf0gf0f3dQAwRqK6BrRQWeJd3BVncbV/KHAk4p1RLQnfj3PLpmfvruXWfFbABcEaNqKOhQ1wxlFB2DWv/k5GQZq2edCPRUQOfWMgNn9b7WaDl5i/2+IOl9rT7gNy8NGiUCUqoloD7gwLfXzLH53vQB1Wo9tamKwNmzZ83S0pJXdUKT13TtMj097bXuombStYgS7XyLklOU9Pfwww9Hi3z++edRkl7SebVz3ZpvZmbGzM7Odk6O/VvXSiF9i+p28OBBs3v37vt1U9uSrvs7N6xEkyNHjkTr6Jye92+fvsduIzT5Q+u21752HUnP+q788OHD92e5du1atP98jLTvdHymHaM6pnzWZyuhNmu/DQ0N2Unm/PnzxveYur8QfyCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEBPBPqaAKIfENKSP3S38Ueen0wccaInUmxkg4Du/q67wCsJ5M6VDze8Zv9jf1gjCcSK8IwAAp0CPn0ACYCdYtX5247K9dn8a+bWpfdjK0YfEMvCRAQQWBew54ekgCSN+HH6CMkfVT5gXjk0sB6YPVh6ALCOEwUollm0fgV7KvCNgkCdBXQs6xxbVtH70SfoNGn7tg9ICiglATBJsBqv3RsFpPw+oBqtpRZVFND5SI8ii64DNPJHr5NCdV70vdZRcoVGLlOgflxRsouSNZLOsXY5JQgcP37caJ1JxTexIclPiSaqm9bls9/k4WpjUl2TXtO2fVzUjtDvslVfn3YpaUcWLnMlyqieaX35qVOnomtX13q0vObxKUn7TX2+2qU6af9REEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoDoCg/2qin5w8Un+eHTiZZI/+rWTUrarAODHTvzSKEnHVfSDk/azzw9srnUwHQEEmidgzw1JP2rr3KKRJhj9qbr7X300fUB19w81Q6CKAvb8n3RtqKBS3fWdkT+quAc31kn7SneCTyq+wXaudSiIMel6wbVcyHStX/WkIFBnAQVmKpi37OIb5OqqR9r3AyR/uOSqN119wO9fTu6vdW7V3fwpCFRdQEHwCwsLPU/+kIvO3T7XOho1Ynl5OTExQkkTmkeJrT4lLfFEfYtP4L9NmkhKnrF1cyUtdNZX2/RJqOhcJulvnYt8Rlix7QgZzVr19Ol/tU90jCW1XwkiSj5RPZKKjpekBA+dd32OKdvepP2m+qpOvsdUUr15DQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBIoTSI7WKW47G9YUkvyxYUH+U0kBnwBg/dDm88NTJRtIpRBAoFABnQt8EwAL3TArK0VAfcAjzx93rtvu76Rgb+fCvIAAAo0TSAv89UkoaBxKzRvks8/S9ruLQMF/PsF6ruVDpms7PgGOIetkXgR6KaBkqV6VrAlTWi7pmtAnoaBXbWQ7fgI+CTva75xf/TyZq38C9nOrjtciEw98WnT27NnU2RSor+SBtMQAu6KkUSbsPHpWokBSe33qpvVo5BSfpAnVXyOY+JSizhvap2pnWlHdQkf+0DqTEjHsNpX84ttuOWr/pZWkNvnuN9XJZ7+pLiHzptWd1xFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCC/QM8TQPSDmn540bOrKJhUD0p9BNKSQHySfurTWmqKAAJZBWxQRVLgl0aUoA/IKtyf5R565oXEfab9Pj4+ntj396fmbBUBBHopoM8ASed/n0SCXtaXbfkLpO072//rOaTomOllSfuc2su6sC0EQgSmp6cTz68h6/KZV0G5oclZSlBJClZNO4/41It5+iPgkwSizwJJ1wD9qTlbRWCzgM5TIyMjieerzUtln6JzaVIChl3z8ePHE0eOsPPZZyUz+CQRaP6k83nSa3ZbSm5IGkHCzmefNb9P0sHly5ftIpmfdW2X1PfYFas+GjnFp152Gfvss37f5A+7To22kTRSiObTdXXc/tHx5HO+1X7QiCMhxfeYClkn8yKAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEA2gZ4mgNjAn6QfIdISCbI1k6V6IZC277Tf9aM/BQEE2iuQFvxL8kd9j420facghLSRX+rbemqOAAJpAgpOTgqOIvA3TbD6r6ftQ/tZUM8+RceMT0Ckz7p859H25ubmfGdnPgQqIaDj1ufu40VXNiRhSuf/pPeWEghef76nX08VzdH69aUlgejcT0J46w+T2gDoeNU5TtciZZelpSWvTYQkWNgVKrg/LYlA87rqoGQ/n+u2I0eO2E16Pyv5IK0k/X6QtqxeD0n+0MgfPlbd21Ud04yUVJJl3T7JGefPn++ukvEdOUVJRaFF+y1LW0K3w/wIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCQLtDTX9h1x8ekH290B3EFkFLqK5CWBKIfofQDHAUBBNonoD4g7u6EViItgcDOx3N1BdL2oa4B6AOqu/+oGQJlCejcPzMz41z9c8MD5vRETz+WOOvCC/kE0pJA1A/oeiCtKKA96ZhJWz7P69pu0mfWPOtmWQTKENC1VVrwaRnb1TZ93qdp139piQNl1J11liOgfXn6iLs/17mdm4KUY89ayxHQOS4pgbmIrfpccygRQCN6ZCk+SQSuhAHX9O56aLSK0KLkAyVdJD3yjjbhu+9Uh6y+Sd9xWZPDhw/bP4OefZaL20c+I6eovT7HRlyFsy4Xty6mIYAAAggggAACCCCAAAIIIIAAAggggAACCCCAQHaBLdkXDVtSd3tM+uFFQaOPPB9+56mwWjB3LwSUBKJy69L7sZvTcXDgwAGT5QfC2BUyEQEEKi+g933SXX/TEgcq30AqeF9A+/LOHz509gEKkNCdVHtxN9X7leIPBBDom4BP4G9SsGjfKs6GMwsoCeTm7UFz4u27sevQNcHQ0JBJupN1v5MFlaSiYEAKAlUX0HVVXPBnr+qt63vd+V13N48rShLRCHCuQvKHS6a+021S57H5+D5Ax6vOsXkDu+srRM2LEtB3iqEjT+j404gJPkkXtp46XhXwnjVBwK7H9exzDtd1U9ai71+TvovRenWuVoJW98gOrpFBOuviM5JH5/z2b22re3v2tV4/yyfr9xM+yRZnz551jrKSt63ab93F5/h29dvd64r7v88xFbcc0xBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKBYgZ4kgNgfeF1VJ/DXJVPf6UoC+erGp+bOlQ9jG6GgLv3YlOcHp9gVMxEBBConoB+fk+72/eCuJ41NHKtc5alQJgG7P12JgLqTqs7/3DkyEy8LIVAbAQWTJd2ZfvdOY37740GzY1ttmkRFPQV+9MyAuXx9wLx1aS12CV0XqB+ICxrsd0C7KqzPrwoGTEpSiW0YExHooYDOsUnX2L2qis7zy8vLsZvTaA+qZ1zRuf/0kQH6gDicmk9TIuC1G4Pmp+/GJ4Ho/KoAYj4L1HxH97n6u3fvjr2OSKqWrjsU6K/vKHTu0nNa0TlMn1/7mbSka6asxTfJIi4BxHX+7qyL3st1L9q/Oh9lcfYxkm1cokZRbrpu7bym9qlTnv3me0wV1T7WgwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAvECg/GTi5uqHzj0o7+rEPjrkqn/9G+99HOj/esqScEgrmWYjgAC9RLQD89J73WdIx778a/q1Shq6yWgJJCt+55yzquAmzKDIJwb5gUEEOiZgAKTXYF1Cvz9zUskf/RsZ/RhQ6cnBs2BfQPOLcclB+m6QdOrUBQM6BNAV4W6Uod2CugYrcK1lM7zCujvLgqyVlCqqygBUCOAUJop8MqhAaNEEFfhs4BLhum9EFCgv0b68g1kTxrROk99XdfJ3evMM/pIlqQGu33f+tn56/yc9fqzikZl1ynPMVXnY4S6I4AAAggggAACCCCAAAIIIIAAAggggAACCCBQNYHSE0DiAnsswpadjxP4azEa+Dy4bbtREoie44qCVbL+wBa3PqYhgED1BJICe9LOEdVrDTUKFUhKBLTJQaHrZH4EEKiHgEZxSAqWO32EwN967Ml8tfzf15N8NNJLXIn7LNCPpAsFf+quyXpoxI+TJ09Gj4WFhbhqMw2ByggcPnzYnDlz5v4xq7uX22O515Xsfu8q8UPTXEUJYiR/uHSaM/315937mc8CzdnPdW2Jkip8R/XQ8ZqU0JbVoAmJpr5JNFmNerWckiaUuBhamrAPQ9vM/AgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFANgS1lViPpjo8E/pYpX5112ySff33tSGylFByou4Uq2IqCAALNElDgr97jrvKtl/43o3MEpbkC6usfPfKy+fQXPzR3b9/a1FAbZJEl0GLTypiAAAKVEVAgVFKS70+eHTTPDbvvCl6ZhlCR3AJ2pJfvvHY3dl02UWhiYsKoT4gbRSB2wZSJCuq0dydWYOLu3bujJTRNr3W+nrIqXkagsgJK9kgrOh/rvaWipCs9VJaWlqLnztejCRn/sed9JU7Zv12r0qgQSSNDuJZjev0Evu4D/mJu3t5cfx2b+hzAZ4HNNkzpjYAS53RNoPNWWlECiM95N209TXtdfk0pSlzUMWGvIZvSLtqBAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACzRQoLQFEP+Qm3fHxkecnzYO7nmymKq3aIKD9/Mjzx82f3j61Ybr9j44T/YjKD2xWhGcE6i+g4LKpqSlnQ3RO2LpvxPk6LzRHQH2AkkD++OY/xjbK9gEE08TyMBGBWgokjQB4YN+AeeXQQC3bRaWzCegu/7oL/Im345NAdL2gPiApachu2fYVCjYcGhqKJuszhA0+tK/b+XlGoO0Cem/4vi86E0T0fY4Coj///HOjv1U6X48mdP2jhC4FSJ8/fz6at+vl6L/2fBD3GtOaKaBRoDTq1w/ejO8DCLhu5n6vU6t0HaFzV1rR+bDoYq9fil5vL9enPkJJE00puh5dXl72bo72oU8CkfcKC5hRyc/qsykIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDQbIHSEkCSAngeeuYFs33/95stS+s2CGif37mybG6vfLBhuv6jH8pCf2DbtBImIIBApQT0nnb9CL5t+GmjcwKlPQJ2n39x4dexjbZ9QBMCYGIbyEQEWiSgAGA94oruBP6/vzQY9xLTGi7wo2cGzNKVAfPOytqmlup6YWRkJApSP3z4cPS6Atf0UOlM8Igm8A8CCJQi0Pm+S0sasQkiqogNnFZwtBK6bMJIdyXVB5w+MmD0TGmXgEb9Uj/wxoXNfYAk7GeBdqnQ2roJuM5tedqhaxyf4vpuxWdZe472mbd7niomN3TX0ef/cj558qQZHx9PnV37OWRkIq07zVgj3R05ciR121ln6D6O1J+nJYBcu3Yt6+ZS25t5xSyIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACQQKlJIDohxLXD2O6E/iOQ8eCKsnMzRDQHeD/9foR89WNTzY1SMfL3NycmZyc3PQaExBAoF4Cei+7fgDfsvPxaDSIerWI2hYhoFFf7lz50Hx5/aNNq1Nwgu7+Ozs7u+k1JiCAQH0EFJymIE5X0R3ACfx16TR/uvb/d67/xVy7sbmtOnYUGNekO0hvbiVTEGiOQGewaWeyyN69e52NfOXQoNEIIJR2CmgkqKUrd83l65uTQPg+qJ3HBK2+J+CTZKH3SNZrJN/kkc7zut03PskNS0tLdvbgZ9f3Rp0r6uxjOqf7/q02LC4uGjnL0JWo3rm+okcm2r17d5To3LmNMv9WW9OK63ebtOX0uu8x5bMu5kEAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHsAoXfglc/IOiHEldREsDgtu2ul5neYAHt90cnXna2UMdN2h3KnAvzAgIIVELABvK7KqNzAH2AS6f507/10s+d+1+JQ3mCEJqvRwsRqL6AruVcAUG6+7celPYKRHf/n3B//EwaPay9arQcgfoI6EYgrs/zB/bdGwGiPq2hpmUIaAQYV+H7IJcM08sWcJ23urdrRybrnp73/3GJF93rzJNk4bOsEgbikgZ82pz1M7ySP0ZHRxMfSYnl3UZx/+9M/tDruuFEXDvjlvXd9oEDB+IW3zDNZx9sWCDnf4aGhlLXoP3me+x3r6zX7enePv9HAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBewLuCJyMQlNTU84ldfdvjQBCaa/A1n0jZsez8SPAKGDQ9we29grScgSqLaA+wBX8+9AzLxidAyjtFdAIMEmjgNEHtPfYoOX1F1AQkRK54koU+L8++gMFgaQgcF0/uI4h5BBAoNoCCiI9depUbCXTkr9iF2JiIwU0AsxPno2/HlAfkHQzmUaC0Ki+CygJwTcIXqM4lFF8EghC6tldR58RL1yjbPjUTe9dn2101+v8+fPdkzb93yc5ZtNCHROWl5c3JHwooeXkyZMdc7j/TPps07mUy65znqxJMp3rCPnbp05a3/z8fMhq78+bZX/fX5g/EEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoDCB+F9eM65ePxzoR6m4snXfU0bBvxQEFPzrSgTS8eM6hpBDAIFqC+i96/ohOC3wv9oto3ZFCtxLBHoqdpUKjMgahBC7QiYigEDPBJKSwE+vJ38oAJiCgAReOeQ+HrgDPMcIAvUUSBoBSu/53Tvr2S5qXbzAK4cGnMdD0neKxdeENbZdQIkLSdev3T55kxG612f/PzY2Zv9MfM7yOVnL+CS4HD58OHbbvokErgTA2JX+daLru6POZXwSUDrn9/l7cnLS+O5Ln+tSGaWNKqJjLcv+U3t0k4yBgQHnY3x8fFOzQ/ab6hZSfI+pkHUyLwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCGQTKCwBJO2HM43+QUHACjzy/KT9c9Mzd4DfRMIEBGohkPTefXTiZTO4bXst2kElyxd4dOKfnBtJGkXGuRAvIIBAXwUUwOVK4NWID88ND/S1fmy8WgJKBnr9effH0JBgzGq1jNog0E4Bnf9dga0a8eFHz9AHtPPIcLf69IS7D2AUELcbrxQnoGvXkZEREzIyg29QfWgtlYygkSnSit4bIfVN+56+c3uuJBTVy6fdSTcD6dyO/Xt6etorMcVVL7uerM+zs7Nei8ow6XsuuxKfeiYlStr1dD8n9a92XlfyzsTEhJ3F+RxyjGglmp9ztJOTFxBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQ6LnAlqK2ODc3F/0QELe+Hc+6R3yIm59pzRfYum/EbN//fXPr0vubGqu70+l40l3ZKAggUA+BpLsA6r2u9zwFASsQjQizfm1w893TdtL9ZwUVqA9QUAgFAQTqIZAUsJ8U5FmP1lHLMgRe3D9g3vrdgFm6srZp9TahyCfgcNPCTEAAgZ4LJAWDvv48yR893yE12KCSQ6N+4NLmPsAGkvsEVNegqVSxRIGlpaXgz4yXL1+Okih8RsXorLqOx7RRHjrnD/37+PHjXqORaLSHxcXF1IQRfaYeHR11fk/fWT8lCiS1TXXT+zKtKFFCCSNpo2vou6OkfsNuR9eBWl8ZRevWd8763iGtqO2aL+k7ahm5EiHt+nXMaZ9o/yV52/l1PZyWfCIfV6LHkSNHUuukbaneqk9aUoyOKdUn9L1j28MzAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIFC9QSAKIfgRwDfeuIM+H/u6F4mvOGmsvoFFAbl/+wNy9fWtTW/RjYNqPkJsWYgICCPRFQH2A6wd8jfqRNOJPXyrMRishsOPQMXPrd++br258sqk+tg8oK+Bj0waZgAACmQUUNOQKBPrJs4Nm987Mq2bBhgsoMPw7r20O/lWz1Q8oOI+CAALVFlBgrCswWAH+CvSnIBAnoGuEdy7/xdy8vflVJZaSALLZhSkbBZLOPxvnzP8/BfiXWfT9p75Xd11T223rdY1comB9LRNXdG2u66i0ddllT548af+MfdZ7UddkrnO9XUjfCynBQVZKluhOctDrSqRwfXdk12OflcBQZlG7ZaV6pRXVWQ6u7yeU9OJjpBFctP+0bdf+0zw6FlS3tJJ0XKo+qq/PcaD9ovl0XMW1Ufte52XVjYIAAggggAACCCCAAAIIIIAAAggggAACCCCAAALVERgsoir6IcT1g4lG/1AAMAWBbgEdFwoAjis6nvQDFAUBBKovYH8sjqvpQ8+8QB8QB8O0SOCR592BNL6BIVAigEB/BVzv1R3bjPlPf0fgb3/3TrW3PrTr3h3g42rZy6DOuO0zDQEE/ASS+gAF+FMQcAkoQfRHz8QfIwpE9gl+dq2b6QgUKaBAfQXTl1l8RmCw29f3pRqJYWBgIEq4UGC+Hkq++OY3vxk0SoMSEeIC/u227HPa6BB2PtVN/cLevXuj+mhUT9XV1s3VZ9jl7bO8XQkSdp68z1nMk7Z55syZTUkvcfPr/Na5//S3nGQkNyWI+Jz/tN+SRiXRtlUn36IRR+z2u+ukupH84SvJfAgggAACCCCAAAIIIIAAAggggAACCCCAAAII9E4g/tfWgO3rhwtXoP7WfU+Z7fu/H7A2Zm2bgILDNUpMXNEdz/TjIQUBBKoroPdo0ghQriSv6raImvVSYNvw00bXCnFFQQ+6xqAggEB1BZLep68cGjRKAqEgkCSQFCDuGySYtH5eQwCB8gSSErUU2M8IUOXZN2XNShR1XSvQBzRlL9e7HRrZwTf5IW9LNcJEaNKDzsP6Tl4P/R3yHarapsQDn6J500YK6VyP6qH66H2szwv627eEJGb4rtM1n7x9k3vUBjm7ihIyQo8VrVM+ctLfId9/+CR3qG06rkKKEj2y1ilkO8yLAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC+QVyJ4C4An9VNY3+QUEgTcB1nOgHw6Qf19LWy+sIIFC+gN6jriAD13u7/FqxhToJJB0nBH7VaU9S1zYKuN6j9+7qzegfbTwmQtusY+XF/fHHigLhuNtwqCjzI9A7AVcfwAhQvdsHdd+SjpWkUUB0R3oKAv0SUNLD4uKi16gORdVRQf2+CQl5tmnbFrIOJYuEJqiErN/OqyQK1a9XxSeRwtZF/V5SkoZ80kblsOvK8xxynGjeXnrmaRfLIoAAAggggAACCCCAAAIIIIAAAggggAACCCCAQJhArgQQBf3qrlBxRXf03rpvJO4lpiGwQUCjxLjuAM8oIBuo+A8ClRM4e/ZsbJ00sg8jQMXSMLFLQNcKrj5A1xiuBKOu1fBfBBDosYCCMl0BUEmjOvS4mmyuBgJJx0vSzQZq0DSqiEBjBZScpSStuKKAfteoDnHzM63dAkmjgNAHtPvY6GfrFcTf6+QP296FhYVSEy2UDJC1bUomKDMJpOz1W+POZ43c4Tu6ib6bOHr0aOfim/5WAkuZSSBad8g+0IgqctVzUcXXq6jtsR4EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCIF8iVAMKd3+NRmRou4LoDvH5c466P4Z4sgUAvBBSc7wr+db2ne1EvtlE/gaTjhZGg6rc/qXE7BFxBmQr6dY3o0A4ZWhkqkDQKSNK1Ruh2mB8BBIoTSOoDFNBPQcBXIGkUEEaC8lVkvqIENPqGkiMUxF9kwHxI/bRdBeyXEWRfRGKL6lZ0QoHaLPeQxIYQ07R55aJEEJ+i81LadxQ6fpTIU+QxZI8LrTu0KOlneXm5kJFAtI80GgwFAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE+i+QKwHEded3Rv/o/46tWw10B3iNGBBXZmZm4iYzDQEE+izgCvxi9I8+75gabj5pFBDXcVbDZlJlBBojoOS/pDu/N6ahNKRnAkmjgLhGnOxZ5dgQAghsENBNGlzvy+eGBhj9Y4MW//ERYBQQHyXmKUtAgf9KAFCAvJIQlARShaIg+6tXrxaSFFF0YouSAOSl57xF9mpnP91tcoVvW/Q9tetmKHYdY2Njhe0/rSuvt45zHd9ZE4tkpOQTJf9QEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoBoCW7JWI+lurEl38s66PZZrvoCOm8/mX9vUUBtk2M8fAzdVigkItFwg6W6s9AEtPzgyNl/Hzaevf7hpaRtkWERwyaaVMwEBBDIJuBKzdBdv7vyeibT1C9lRQN66tLbJQjcd4E7Dm1iYgEDfBFzJH6pQUjJX3yrMhisvYEcPe+PC5j5AI8L2czSGyuM1sIJZA9RDKfQdo4Li9chTyvyuUnWzo4Ho3Hv+/HmzsrLiVV0tq8SBI0eOFDLyQ/dGO+um96mu13zrphEpVC99xldiQWjRtos+TrQfda7R9w8+RW1VPZKKTSzRerX/Qoy0bu2/48ePp24nqQ6dr6k+uqaWuz7Pab+lJbKoHtpXStTp3FdF+3fWk78RQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEDATyBzAohr9A/d+V138qYgECqwff/3zc13T5uvbnyyaVEdb2X+qLppg0xAAIFEAVcfMLhtu9F7mYJAqICuHR7c9aT58vpHmxZVcIKCFCgIIFANAVfwL3d+r8b+qWstXvzugIlLAFFgmgLUFARHQQCB/gu4kgCfGx4wSuaiIJBF4EfPDJo3Lvxl06IKxlYfwGeBTTSNnVC3pE99V1n295UKwpeLHnpPKPlA10d6dBYF6Cu5Qo/OYP3OeYr+W3VTcoAetm6qn/7uLJpPjyKstB5ZFF3UhjKK9oXWbdevG6ok7T9rVUZdtE6tX0kpeqge2l96dBYdQ5pPz3GlDP+47TANAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE3AKZEkD044B+rIgr3Pk9ToVpvgLbv3svCaR7fgUa6oepXv2A2b19/o8AAl8L6Id8V/DvQ8+88PWM/IVAoICOn7iRoBSMoGsPBSBQEECgvwIKwuwO6LI1+tEzA/ZPnhEIFjiw717w+LUbmxdV4ikJIJtdmIJArwXsNVncdo/spw+Ic2Gan4CSh9QPLF3ZPAoIyeB+hszVDgF9L1pEEkUZWrZuVa1fGW3Oss4q+eg7Fj24zs6yJ1kGAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIH+Cgxm2bwr8Fd3ft829HSWVbIMApHAQ3/nDh53HXfQIYBAbwUU/OsqjP7hkmG6j4COH11LxBXX3abj5mUaAgiUJ+AaAWpo14DRg4JAHgHdAT6uJCUexc3PNAQQKEfA1QcoeF8jgFAQyCOgkaDiSlLiUdz8TEMAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSaLhAfYZPSateP/kr+cAVupqySlxGIBKIkouH4JCLXcQcdAgj0VsAViL9t/b27Zefjva0MW2ucwPb9fx/bpqTEo9gFmIgAAoULaOQP13uR0T8K527lCl9MGEHAdey1EopGI9AnAddNGQ6T/NGnPdKszaoP2LEtvk30AfEuTEUAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgXYKBCeAJN1576Fn3KM3tJOXVmcRcI0gkHTsZdkOyyCAQLjA6uqq0Xsxrrjeu3HzMg0Bl4DrONKxR+CXS43pCPRGIOk9+NxQ/F27e1MzttIUAQX+ukYROH/+fFOaSTsQqKXAxYsXjRIB44pr9J64eZmGQJKA63qCG4IkqfEaAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgi0TSA4AcT1o6vu+v7grifb5kd7SxDQKAKukWSSAg9LqAqrRACBLgHXezBp9J6uVfBfBBIFdC3hup5YWlpKXJYXEUCgXAFXAL4C9l137C63Rqy9iQKukQR0DeIKPm+iA21CoGoCrj5gaNeA2b2zarWlPnUVcI0oxg1B6rpHqTcCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCJQhEJwA4gr+VdA+BYGiBLbv//vYVbkSkGJnZiICCBQu4HoPbhuiDygcu8UrdI0C4roGaTEVTUegpwK6+3tccQXsx83LNATSBFx3f9dyrmMwbZ28jgAC+QVc12Ev7mcEqPy6rMEKJCUUuY5BuyzPCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQFsEghJAku645wrWbAsk7SxWwJVQlHQMFlsD1oYAAt0Cuuu23oNxxfWejZuXaQikCbiOp9XVVecxmLZOXkcAgXwCCrx3jb6QFLCfb6ss3UYBjSajUWXiimsEgrh5mYYAAsUJ6BpMj7hCEmCcCtPyCBzYF98HMBpgHlWWRQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBJglsCWmM646rW3Y+bh7c9WTIqpgXgUSBrftGzOC27ebu7Vub5tNxODExsWk6ExBAoFwB1x1X9V51BeyXWyPW3lQBe13x5fWPNjVRx+Hw8PCm6UxAAIFyBVyB9wrSVMA+ZaPA5etrZumKMXq+dmPja7t3GrPn0QHz9D5jXEGuG5do3/8UUP7Oytqmhrs+j26akQkIIFCogOtzgM5nelAQKFJAfcBblzb3Aa7jsMhtsy4EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgDgJBCSCuwC8Cf+N3tQJX71z50Hx141PTHcS69dsjRgGuSnTQM2WzwLahp82tS+9vekHHIQkgm1iYgEDpAq47rm7d91Tp22YD7RPQcdXdd0rBdRy2T4gWI9BbAVfgvWukht7Wrhpbu3nbRAGrb1y4uynpY3MN7wW2Knnmxf0D5kfPDBJE3YHkSoyxoxDs2bOnY27+RACBsgVc11+M/hEur+S2H7x517ng//X/ecD5WlteSLq20PXIwYMH20JBOxFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIFYgaAEEFfgl5IYKPcENGLFF//ya3Prd++vJ3584mRRYogtGj3loWdeMNv3f99O4nldYOu3n4pNAHEdh6AhgEC5Aq73HkmA4e5fXPi1ub3yf8YuqD7hkeePx77Wpok6ruTUXXQc3rx50+zYsaP7Jf6PAAIlCeg9t7KyErv2A+ujWFDWk9OurJlj8z6JHxu1lDTyxoW19cdfzE+eHTSvHBrYOENL/2dHFegePUUc6gdIBm/pgUGz+yag911ccSVrxc3LNGN0zj921p38gdHXAjq21Ld2Fx2LJIB0q/B/BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBom8Cgb4NdP/hreYJ/7yneXvnAfPxP/8HcfPd0YvJHt7nucP7Z/Gvm4//yg/WA4A+6X27t/zUCSFxJCkKMm59pCCCQX8DecTtuTSQBxqm4p+mc/6e3T0UjRCkZsPsRN+qFe23NfUXH1eC27bENdAWix87MRAQQyC3ges9p9IqhXSQsvHVpzXzv9fDkj+4d89N375rvvJZ/Pd3rrev/XSMLuEYiqGs7qTcCVRdQH6DP4HHl6fUgfYq/gJI/lARCSRc48O34Y4s+IN2OORBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoPkCuRNAtu57qvlKHi1UAscf3/xHoxFAshaNGKJ1aF0UEwX+6k74cSUpISlufqYhgEA+Add7bsvOx40eFH+Bz85yjvfVcl1juI5H3/UyHwIIhAm43nME/hqj5A+N/FFUuXx9bT0J5C8ECK+DukYWcB2PRe0D1oMAAhsFXEmASgBUIiDFT0D9xTsrm0e08Fu6fXM97RhhjD6gfccCLUYAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgc0C3gkgly9f3rz0+pSt3x6Jnd6miUrYuHXp/cKarHWRBHKP0xX86zoeC9sJrAgBBDYIuN5zjP6xgSn1Pxr5gxE+Upnuz+BKAuTOv/eJ+AOBngi43nPDLR/9Y+lKsckfdmfq7vDf+0VxSSV2vXV7do0uo1HJXKMR1K2N1BeBOgi4+oADjgD9OrSp13W8dsOYE29zXg9xdyUBah2upKSQ9TMvAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUGcB7wQQ1132XAH6dUYJqbuSNYpM/rDb1joVKNz24gou5wf/th8ZtL/XAs4+4NuMAuW7L+5cWTZfXPi17+zMty7gusagD+DwQKC3Aq73nOvu3L2tXX+2piQNn5E/dHf854YHzE+eHYweSQGtnS3RSCC6W3yby+6dxjm6gOuYbLMXbUegLAElXcUVV5JW3Lxtn6b+Qv0GJUzA1WfSB4Q5MjcCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCDRPwCsBJOkuqw8+8WTzVDxb9NWNT9aTNOZS5x7ctj0KYt3x7DGzbfhp47qjefeKFCisbbS5uKz4wb/NRwVt74eA6z3X5j4gZD/cvX2LkZ1CwP46rysJUHd+dwUjZtgMiyCAQIKA3m+u0RaGnhhIWLLZL/3Xf1kzuqN7UlHSx/+cfcD85qVB88qhgejx2xP3pum1tPLTd7lbvOsYc12XpJnyOgIIhAu4EsGHnghfVxuXeOPCmtGIUZRwgaFd8cvwOSDehakIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIINAegS0+TXX9uLpl5+NGyQ1tLTffPW0U1Osqsnnk+Umzff/3N81yL3nklLm98sGm1zonaBuPTrzcOalVf9tjLM5ZgSgHDx5slQeNRaAfAklBlq4krX7Us8rbVLJg2xP6su4fHWNfXv9o0+I6Lvfs2bNpOhMQQKBYAVcfkDQ6Q7E1qN7adBf3Ny4kJ2co6UMjf8QVjQqihJDnhgbN937hviu8Ekw0Ekib77J/4NsDsYHTly9fjqNlGgIIFCzg6gO0mTafm3yZdQ4/8XZyf+G7rjbOt3un+tHNyTNLS0tt5KDNCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAwH2B9FvPrs/quuNjmwN/Fch769L79yG7/1Dyx2M//lVs8ofmVWLDt176eWpyR9I2urfZ1P8/+MS+2Ka5EpNiZ2YiAghkFnC917bueyrzOtu0oBL9OJdn3+OuUWaSAhKzb40lEUCgW8D1XtsTBWV2z92O/79zec0oCcRVNLqHK/mjcxkFT79yKPnj2Dstz3NwjTDgujbp9OVvBBDIL+AaAerAPgXmU9IEjp3dnLyQtgyvfy3ACCBfW/AXAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggg0CmQHHH01zmvXbvWucz9v9ucAPLFhbfvO8T98eiRl42Pj0YH2Tb8dNwq7k+7c2X5/t9t/MPlSOBXG48G2twPAVfwr+u92Y86VnWbGr3os7OvVbV6tajXlkcfj60nd3+PZWEiAoULOIN/10dmaGs5v+IO6NXoHv/p7/xtfvTMgNEyrrKyfvf4NpeHHTaua5M2W9F2BMoQcN0MxPXeLKMOdV3nq++tRaM4ddef5JluEff/h56I70/5LshtxisIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIINAOAa8EENePq667creBTnd0dxXdFT8tqaNz2R2HjnX+d9PfX16/smlamyZs2flYbHOXlpZipzMRAQSKFXAF2mukI0qygJI/lATSXUie6RZx/9810owrKN29Jl5BAIEsAq7rrYe/kWVtzVjmgyvupIwX9ycndMQJuAJcNe/nCSONxK2radNcgdL0AU3b07SnqgKff/55bNWG10cworgFltb7iZ++e3fTDEr4Oz3h9TXcpmXbOCExQXJlpY0ktBkBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCIBLx+eXbdYXVw29+0kvHL6x+Zr2584mz7Q88873wt7gUFAicFUt/98+bg4bj1NHXag7v2NbVptAuBWgi4gixdgfm1aFQPKvnFhV+buGTBLTsfN2mJfz2oXm02MfiN+EQj1x2pa9MwKopAzQWGdtW8ARmrf3l9RI6bCUkZSgAJLQdaPJqKj5UrANj1GdVnncyDAAJ+Aq732e6dfsu3cS71EcfmNyd/yOJHzwwa7MKOChIBw7yYGwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE2iHglQDiDv4daYdSVyu//Pijrikb/xsy+odd8sEnSHKwFt3PrhFACP7tluL/CJQj4Ar8cgXml1OLeq1VSYI33zsdW+lHJ15eT/prZwJlLEjKREZLSQHiZQRKFnD1ATtaOgLIzT8ngw9luCv+zdvuEUWSt9aOV10jpLg+o7ZDhVYi0F8Bkhjc/q++d9dcu7H5dfUPrxwKTxLcvCamSMA1UjE6CCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQBsEUhNA+FF182GQNPoHd8Tf7JV3iu6WT0EAgf4JuAIsCcx375PP5n9m7t7ePHqTEgS37mtn8qRbK/0V1yhZrsD09DUyBwII+Aq4+oAsiQ6+26zyfB9ccdcuq8nl6+51EmTttnEdm+4leAUBBEIFXNdabU0CTPN7Z2XNvHEhPqnv9BGSP9L84l53jZLFd5VxWkxDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIG2CGROAGlzosOX15NHAMly8Ny58qFzMe6yb9bvlr891scVkBI7MxMRQCBYgODKYLJo5I+4c7rOY48eeTl8hSxhXKNkcXxycCCAQK8FlJBxYN+A4xFem5u3jVm6Eh8srLVlTSoJr0l1lxjaFV83PgfEuzAVgSIFXNdanJs2K+t8fuzs3c0vrE/5ybODnM9jZZiIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIJBFYEuWhdq+jEakcCXAbP/u94N5bq98kLjMg7v2Jb7ehhcV/BsXUO0KSGmDCW1EoBcCruBKRv+I11eC4M13T8e+qOQPVzJb7AJMRAABBPoscPHixdgatHlUihf3Dxg9iiqvvhcfLGzXf3i4uG3Zddbtecc2GbiTZOrWHuqLAALNFFDyh5JAuouSZV45xLm82yXv/z///PO8q2B5BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoLYCqQkgq6urtW1cWRV/5Pnjha76iwtvO9enYOGt+0acr/MCAggg0A+BwW/8TT82W/ltfnb2tdg6bht+2uhBySawZedjsQsqOP3gwYOxrzERAQTKE9izk0DWInTfuLBm9HAVjTTS5mQblwvTEUCgNwLcbMHf+Z2VNaNHXDl9hD4zzsV32tOO+6G4blTgu17mQwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBOoskDkBZOu3SUooYsd/Nv9a7MgWdt0PPfOC/ZNnBBBAAIEKC9x877TRCCDdRYl8Gv2Dkl1gy6OPZ1+YJRFAAIGKCSxdWTOvvrtm9OwqO7YZ8/rzBA27fJiOAALlC7gC7DWiBeVrgWs3jNHoH3HlJ88OGrziZJiGAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIJBHIDUBJM/KWdYtcHvlA+MKFrZLbdn5uHno7/ktlCwAAQAASURBVEgAkQd3f7dHBc8I9FaAO//6ed+5smxuvns6dmYlfygJhIIAAggg0C6Bm7eNufzxmvk8ejZm9bN7SR8KFk4rrz9P0LA1GnrC/rXxeWlpaeME/ocAAj0R2PGNnmymNhs5Nn/X6HzfXZT48cohkmW6Xfg/AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEB+ARJA8hsmruGrG5+Yr258Gs1z58qH5qvPPjG3L39g7t6+lbicgoW/9dLPCRr+qxJ3f088XHgRgdIEXHf+fXDXk6Vts24r1vlcoznFlW3DTxs9KAgggAAC7RNQ8sf3Xo+/K7xL497IH4Pmxf0EDVujh9dHQ6EggAACVRR444J7NKfTRziPV3GfUScEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgCQIkgJS8F29det95V3jXppX88diPf2UIsHYJMR0BBPotwIgWX+8BjeakZL/uIiON/kFBAAEEEEDAR0BJHz95dtDs3ukzN/MggAACCPRT4PL1NfPqe/FJfjqXawQQCgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAmUIkABShmqOdW7f/33zyPOTjPyRw5BFEUAAgV4J3F75wHxx4dexm1PyB4kysTRMRAABBBCIEbj8sTFvXVoz/+nvBoxGAqEggAACCFRX4NjZNXPz9ub6KfHjlUMkf2yWYQoCGwU02ujNmzc3Tlz/344dO8zw8PCm6UxAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGvBUgA+dqilL+27Hw8aL13/3zL3L78gVEiCAUBBBBAoLoCd2/fMp+dfS22gtuGnzZ6UBBAAAEEEPAV0N3k9XjjgjGnjwya54YJIPa1Yz4EEECglwKvvnfvfB23zdNHOHfHuTCt3QJK9Dh37pxZWloySvzQI60oCUSPw4cPm4MHD0aJIWnL8Hr9BC5evJhaaR0HSgyKKzqW4hKJOufds2eP0aPXxaduJDz1eq+wPQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEmiNAAkjJ+zI0AUR3k7d3lNfd4x/c9WTJNWT1CCCAAAJZBJT8oSSQ7qJRP3T+piCAAAIIIJBFQHeU/8Gbd83piUHz4n4CiS9fj1fsRyBffE2YigACbRJQot5P370b2+SfPDtoNAIIBQEE7gkouP/s2bNmfn4+mETB83poWQXJj42NmZMnT/YlkD+48izgLTA6Opo67+LiYpQEFDfj1NSUSUsi0XEzPT0dt3ip03zqpuQmtY+CAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCIQKDIYuwPy9Efjy+kfm01/80OiZggACCPRLwHWXxa8++6RfVarEdm9dej9K1ourjJI/lARCQQABBBBot8CenQPmNy8Nmt+euPd4/flBo+BgjeyxY1u6zbH5u+adlbX0GRs+x+d/jm/g7t27419gKgIIFCLgSrK6/HF7z0s2QS8OePdOY145RPJHnA3T2iegxA0F9uuRJfmjW0wjPGg9e/fuNUePHk0d8aF7ef6PAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQNMEGAGk5D26ZedjZsezxzZsRXeMv3Plw9TkDs2nJJC/ffmsCR1JZMMGG/AfVyKMKyilAU2mCQhUQmB4eDi2Hl/d+DR2ehsmfnXjE/Ont+dim7pt+GmjBwUBBBBAAAEFA+9eTwKx5cA++9e9aW9duncX+Ws37PTNz8fO3jUf7XvAK2Fk89JMQQABBLILuD5rKwmireXV9+4a1zlbozZRihdwebtuVFB8DVhjqIBGWpiZmQldzHt+JYKcO3fOnDlzJhoVxHtBZkQgRcBnlJCJiQlGoUlx5GUEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDojUDmBJC23/3dd/cocWPHoY0JIHZZJXh88S+/Nl9c+LXR33FF0z+b/5l57MQv415uzTSXjysopTUwNBQBBHouoHNy3DlJo3488vzxnteHDSKAAAJlCbius5autPfu70Vav7h/wDw39IA58fZdo2SQuKJAa732o2e+TiSJm49pCCCAAALlCqjve+NC/Lla5+gD+zhPl7EHXAkgQ0NDZWyOdeYQ0Cgd4+Pj5uLFiznW4reo3dbk5KSZnZ31W4i5EEgR8ElcOnjwIAkgKY68jAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAr0RSL1FoX7YiCttvvt7nEeWaQoWVnLIYz/+ldHfrqLRQu5cWXa9zHQEEEAAgR4JKGFP5+S4ovN520drinMpYtqdP8T3ga7g9CK2yToQQMAQ3NSDg2DHNmNef37QDO1yBw6/ceFuD2pS3U2sXF+LrRx3f49lYSICPRFo4yggr74bfy7SaE+vHEr9aq0n+4WNINAvASVkjI6O9iT5o7ONc3Nz5ujRo52T+BsBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBVghkHgGkFTo9auSDu56MkkA+/cUPY+8qr2rc+t1/N1v3jfSoRtXbzN0//3v1KkWNEGixwFc3Pmll6//09qnYdm/d95R56JkXYl9jYnkCJICUZ8uaEUgTUPCvkhco+QXkqLvHH5uPDy7W3c/1UJBxG8vn68daXBkeHo6bzDQEEChQQO+zlZWVTWu8/PEaI178VUUJfP/1X+LP35vgPCa8+t7mdT29z7TWe/WzzR4ejMzSQwGb/BF3ruhFNebn583u3bvN9PR0LzbHNhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCohEDmBJC2Bv+WtdeUBLJt6Glz69L7sZto+wggX17/KNaFwK9YFiYiUJiAexSodiaAuGC3f/f7QSM1fXn9imtVRglvcef8B594MnG0KOcKG/ACSYAN2Ik0obYCBP/2Zte9uF8JIO5tXV4fBWP3TvcoIe4l6//KzT/Xvw20AIG6CjDSTvqee2dlzehRVPnpu5tHffrJs4PrCSBFbaFe61ECZFxxfU6Nm5dp5Qpo5I9+JX/Yls3MzBgdE3pQEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEGiDQGoCiOvHMxJAij88tn/3750JIPK+e/tWa4N/XdoEpLhkmI5A+QKck742/mz+ta//k/MvJbx9+voPN63lsRO/au1IUK4kQEYA2XSYMAGBwgVc11quURkKr0CFVqgkjBNvu4N8f3tiMFdtdRd5bSOuXP7YmOeG415p/jSXCYngzd/3tLD/Aq4+4IP1XOa2JiT0f6+0qwarN+L7xXYpVLe1U1NTmZM/9H2z+vKHH374fgM///xzc+7cObO6unp/mu8fR48eNcvLy8Z13vJdD/PVS+DIkSPmwIEDiZV2/baRuFABL/rUje80CoBmFQgggAACCCCAAAIIIIAAAggggAACCCCAAAIItFQgNQEkyaWNwb8aoePW7+JH6dAoHo88fzyJLPE13d09qXz58UetDP6NuxO+nPhRN+lo4TUEihPQD9JxARhtPScVJ8ua8goQLJFXkOURSBdwvc/amJCgkSiWrpQXiLrjG+n7o21zuO78Lgc+C7TtaKC9/RAYGhqKgrG7t33zts6F7RyVqNuC/5cr4OoHSAIs191n7RcvXjRzc3M+s96fR3338ePHzeTkpLMfn52djZJKTp06Zebn5+8vm/aHvrNQfaanp9Nm5fUGCUxMTFS2NVWuW2XRqBgCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgh4C3jdptb1w6qCf9tWNBLHnSsfxj5cdyj3NRrctt13VuZbF3Adl+AggECxAq7g37u3/73YDbE2BGIEXEmAruMyZhVMQgCBHAK7d++OXXr1s/ISIWI3WIOJriDVGlS9slV03fmdzwGV3WVUrGECruuty9cb1lCaU0kB1whQqixJgP3fZTMzM0GVGBsbM1evXo0SNNL2n/r5M2fOmMXFxaB9raQRCgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAJtEPBKAHH9MPfl9SttMPJuo5JDKMULKOEmrriCUeLmZRoCCGQXcAVZtjEJMLsiS2YVcPWt9AFZRVkOgTABVx/QxmSHoSeS73afFKjqo375Y5Jqup1cQeauz6fdy/N/BBDIJ+C63nIlZ+XbGksjsFHAda1x8ODBjTPyv54LaGQOjQDiWzQSwsLCQlAyh9atfR2SBHLz5s2gUUN86898CCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCFRNYItPhQ4cOBD7w97dP9/yWbxR82zd99R6e07HtklBqndv3zJZR/JwBbnGbqxFE7/6LD6xxnVH6hbR0FQEeiLw8MMPx24n76hHsStlIgJdAq6+0RWU3rU4/0UAgZwCruDfpSvtS1bYsc2Y3TuNcQWknl9ZM88NJyeJuHaH1nnztuvVe9t1v9rcV1xJNfp8SkEAgfIFXNdb9pyl82JbyovfHTAHvp3tHN9t9NN373ZPuv//nzy7+T4tT++7/3Kr/rj8cXxzXdcm8XMztQyBs2fPeq9WyR8azSNr0XlISSAjIyNeqzh//rzRNosoSihZWVmJHvq7s6heOhZd58nOeXvxt61rd2JOr+q5urpq9OjevpJ45KQHpVwBewzYfdG5NR0HSqDW/qhCsXXtPl6qVs8qWFEHBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQRcAl4JIK47rN75w7Ixh1yrbub0LTsfS2zY7csfmO37v584j+vFO1fWPRPK4De2J7za3JdcowxU5UfW5srTMgTuCegH4pmZmU0crsD8TTM2aMK9JMD8Dbr75383rgQaJRE++MTmKK+29gHRtUYMOUmAMShMQqAEgaTrLQXnD+0aKGGr1V3lgX0D5q1La7EVfOfyWpTEkSUg2rVOuyElnrSxuIJ/k47LNjrRZgTKEtB3QQraVTBpd9GoRTontqW8uL+4tv70XbfaK4eK2457K/V4ZWX9OiOu8DkgTqV305QQ0R207dq6zh+zs7Oul72nq98/efJk7PcS3SvxrVv3cvb/ap8SXM6dOxd77rPzdT6PjY2Zw4cPGz27vkPvnD/ub51nNbJKUtF3M90B/FpG9U1rt+ql+h05cmTTOpK2mfSagvjt9uUWV+x3SdqHx48fLyw5x25L24/ro+zrepaZHrbIKs3Lztv9nGQ9PT29YXafuuk9kidhKcvxKgsdr9pumcdrXNtkoiQtvb/Sio5XHTOd+y5tGV5HAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKBNAl4JIK4Amy8/vtImq6itW3Y+bvRwBT7f+t37mRNAtGxSeXDXk0kvN/Y1V5C0fkiiIIBA+QKu95rrvVl+jfq3hcdO/LKQjSvh79PXfxi7LiV/FLWd2A3UbKKrv3Vdm9SseVQXgVoI6P0WF9Sl4PyhXbVoQmGV1N3fXckaGsHj1ffumtef33z39qQK6E76b1xw3w1eCSVtCrLutHKNAOK6Nulclr8RQKAYAb3f4oJrP1j/OujA5pzlYjbKWhBYF3D1AQQD9/fwCBn9Q0kbWQPMu1s5OTnplQBiRxYI/byohAAlK2RJDFAwux5TU1NRwLrqGtpunWdtskR32zv/b49/XZsfPXo09hq9c377t03WUAC+Aus1KktoHe269Dw3NxfVV+v1Kba+aqO2bdvhs2zSPEkJGZ3LdW7P7uvO133/lp+rdCeA+NRN9cqSAGLbkOV41TJ66HjVtpWkFXos+ByvnW3T/tf2tF3fYt9XWs/CwkJwHX23w3wIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRVwCs6qfNHks6G3r19y5kI0Tlf0/7eum/E2aQ7Vz40t1c+cL7uekHBwFrWVYq667xr/VWdnjQqSuiPuVVtI/VCoOoCCvpy/Ric9B6teruoX/UFlPxBAkj19xM1bL6A65rLFZjZZJHnhgZM0ggfb1xYcyaIxLkoaeQHb96NRg6Je13TtM02lqUr8Xd+l4XrmGyjE21GoGyBAwcOxG7CNTpD7MxMRCBQQMmResQV+oA4ld5NU1C2T9H3CFkC213r1ncSru+nu5dRsLlvUQLD+Pi4GR0dDQpOj1u/1qUEh71790YJIXHzFDFNSQgjIyPeyR/d29Q+VHt9kzc6l9cyWlbB/FmWV+KAlk9KpOjcHn9vFJC57Is4XrVm7Qcdr2XuD3u8hSR/dLZay6mOOnYoCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggg8LWA1wggml0/sMb9gKbg3+37H/96jS34a/t3/97cuuQereOzs6+Zx3b+yviO2KG76P/xzf+cKLdt+H9NfL2pL355PX6UGd8ffZvqQrsQ6LWA+oC4H2uVuJaUFNfrerK9Zgm4RplJSkpqlgCtQaAaAgr+jQsKWoq/TKtGpUuqhZI/fvTMoPnpu+4RO47NK6FjcH2+5MQNJThoXleAq23CT571ytm3szfmWaMLxBU+B8SpMA2B8gT0nou7K/0HCUla5dWGNbdFwJUEyOeA/h4BCsD2DcI+fvx44ZXViCKupLTOjek48Sn6njtkFA2fdWoeBekrqUQjgWh0hSKLrslV57xFbVcSwfLysveq1C4tE/f7gPdK/jpjEW0I3Wbd5y/Sv9NC69X+WFpaikZn6Xwt79/6HlHvhbzFvqdCjte822R5BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSqLpA7ASQKztxf9WYWWz8FO2tEDteIHRoZ5dNf/NB866X/LTUwWokkf3p7zmgZVxnctn09yebvXS83erprdAHu+Njo3U7jKiigIIu4BBBXgH4Fm0CVaihAH1DDnUaVGynguu7SCCAawSJpRIwmgvynvxswb1xYD+5bb7urnHj7bjTP4eEB89z6o7MoseH8yprxGUHlxf0DZvfOzqXb87drdAGfwM/2KNFSBMoXcPUBOgfqPDa0a+M5rvwasYU2CCz9YS22mSQBxrL0bGLcdwKujY+Njbleyjxd+7+oY8AmQCiwvKwyNzdntP4zZ84Usolr166ZU6dOFbIurUQGSijxHamlqOQP2wCSQKxE+rOOo6L9u7dqE/6LOl5V5yKSP2w9dbxOT09HDzuNZwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE2izgfTtZV6CNKwmi6ag7nj2W2MQoCeT1H5pPX/8H88WFX68niyybr258Ej3r75vvnTYf/5cfmM/mX0tM/tBGdhw6ZpQE0sbiOr5cx2MbjWgzAr0QcAVZuN6jvagT22i+gOv4og9o/r6nhdUSUPDvjh07YivVxjvAK+Hl9JH0j1Ea2eONC2vme6/f3fDQ6CE+yR8Kqn79+fTtxO6YBkx8Zz1JJq64rkni5mUaAgjkF9D535UE0saRoPKLsgYfAdcIIHwO8NErb57Lly97rVwjcPiOwuG1woJnUmC6kg/0XHZRUL0NrM+7La2n6DrHjfAUV08F3isAn9IfgbKTP2yrdIxpXxdRdLwUfbwWmQBVRBtZBwIIIIAAAggggAACCCCAAAIIIIAAAggggAAC/RTwHgHEFWiju78rsWHLzsf72Y6eb1ujgGzf/32jETySioJXXQGsScvZ1zTSyEPPvGD/26pnHVuukVFcx2OrgGgsAj0UcAV96T2q9+qDu57sYW3YVBsE7LEV11b6gDgVpiFQroDed+fOndu0EY1k0T3CxaaZGjhBbdboHG9dik9SyNvke0kmA60bXcW6uQJ/9Tp9gFXiGYHeCeh9Fxf4q/fqj54Z6F1F2FIrBJRAqUdcoQ+IU+ndtLjzQNzWXd8fxM3bj2lKevBti+pnE+GU1KKA9tXV1aDllWwikyq62LYk1U3z+CaK2P1pk4C0XllrHXpUoahucecSnxFu1B4dD70sU1NTQcdb9/GqY9anbbZN2tfy0aNqRW3RZ9IyRhiqWlupDwIIIIAAAggggAACCCCAAAIIIIAAAggggAACaQLet5S1P9zErVAjWrSxPPL8ZKlBzwqo/tZLP28jbdRmV+JMP35sa+1OoOEI/FXA/oAcB3L78gdxk5mGQC4B13GVdCzm2iALI4BAosDhw4djX08K1I9doEETT08MRkkgRTdJI3/8/uUHjJ7bWhj9o617nnZXVcA16oLeqzdvV7XW1KuuAkoujStJ30vGzc+04gUUfO1ThoaGfGbryzwKhJ+bm/Patr5/XFhYMP/2b/9mFhcXzZkzZ6L/Ly8vm6tXr5qTJ096rUczKYi/6KIgeNVvbW3t/kP1Uj31udm3xCV5dy4bkvyhpAFZqR56np2dvf9/uVUhqWBiYiKqk+rX+ehss+tv257O5ezfrmXyTFfSjO/xqnOk9n338ar6aVq/j1fte/t+sses6hV6vC4tLeUhZVkEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBoj4J0Aoha7fqS5vdLO4N/BbdvNYz/+VSlJIBr5Q+vWNtpabq/8n7FNdx2HsTMzEQEEChNwvffa2gcUBsuKYgXu/OHD2Omu4zB2ZiYigEBhAq73nu7Qffl6fKBmYRuv8IqUBPKTZ4M+UiW2Ruv6/cuDZvfOxNka/6Ir+NeViNR4EBqIQJ8Fku60/cH6KCAUBIoUcCUBJh2HRW6fdbkFNJKDTwlJPvBZX5Hz+CYz2CQB13GnYPvp6WmjpAaf9irxRI+iik1G6a6f6qW6+9ZL9UkKqFcCwvz8vFe1rZnrc4MSapSMoPkofgK+x6tstc9dtjpGQ45Xvdd997tPS3S8at/reO18v+hv1VkJQzp2fYrvechnXcyDAAIIIIAAAggggAACCCCAAAIIIIAAAggggECdBYKilVx3fXSN1FBnGN+62ySQbcNP+y6SOJ/W98jzx81jJ37Z6uSPu7dvGddx5ToOE2F5EQEEcgu4gi6/vP6R+erGJ7nXzwoQ6BRwjQDiOg47l+VvBBAoXkABOQosiitvXWp38O8rhwbM//jnB3KNBvLi/nvr0LraXpRQpMSiuOIKKIybl2kIIFCsQHeQsV27K2HLvs4zAiECGlHGNboY3wWFSPZ3Xtc1Y39rZYwCx32SMFR/31EJ7Lw+bTt16pTPbKnzaCQHV6C/XVjX7hqtwqckjeziW2f1ETLzKZrP1af4LN+WeXyTb7SvlVzRmVjhMtLxqlE4fMrZs2d9ZkudZ3JyMvV4Vd19jx8SQFLJmQEBBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgJQJBCSCuH2cUrN/mO8AraeNbL/08ejy468lMh47WsX3/983fvnzWPPTMC5nW0aSFXIG/+kHIdRw2qf20BYEqCijo0vWD8p0ry1WsMnWqqYCuKXRtEVcI/o1TYRoCvRFwvf8I/jXRiB0aDeR/zj5g9Pzc8EDiKB5DuwaihJHOZdo+6oc9il0JRUlJSHZZnhFAoDwBVxLuO5fXyttoQ9f82xODxvVoaJO9m+U6nvguyJuQGRMEfAPafRMn7Kb0PaXrOtnOo+dz586ZpGSLznldf+u9oIB6n6IkEV0/pZWkgHqfhBmtP9QsdP60NjTxdR0vPsU3WcmuS8dqWgKR5tW+VxJK3qKEJZ+ieilBJa3kfQ+lrZ/XEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCoi8CWkIrqRyZ9GR/344+CNYsaBSOkTlWaV+3XQ3fDv3Xp/ejZNYqF6r1l5+Nm674Rs/XbT5ltQ0+3esSP7v3oSijS8UdBAIH+CSiwYX5+flMFbv3u/SiJbdMLTIgVePCJJ9dHevpV7GuD39geO71NE119gIIBfAJY2mRFWxHopcCRI0fM3Nzcpk1qtAaN2qCkhraXHdtMlNihET06i72b+YF9G6d3zsPf9wRcCUUkgXOEINBfAb0Hjx49uqkSGrFBiVvd571NMzLhvgB9wX2KTX+8cSE+oYg+YBMVEzIIxH2f3b0afebM8t2jgtx91q+gfp/g++562f/rvaDv532L2hL3HY7P8gq0T0oOsetQe0I/p2t+LZe1bnbbTX4+f/58avPkmOV4PX78uJe9jlffhKO4ymY5Xn2OubhtMQ0BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgbYJBCWACEd3fYz7QUsjNuhu3RrJou1Fo4A8suv4BobOu+Nv2flYlPyxYQb+c18gaUQZ111H7y/MHwggUKqA3oNxP9Ar2e2rG59wbvPUV1+pBEBKvIBrFCgFn1MQQKB/AjYJK+5OsArYPD0x0L/KVXzLBPv67SAlyiihKK7QB8SpMA2B3gko4FiBnHF3JFfiFgkgvdsXTd2STSiNax/fBcWpMC1EwDeZIev1hoLwdZ5MG53g8uXLIdXeNO+BAwc2TUuasHv37qSXE1/zDcTP+v50fb+UWKkWvRj3+0t389UvZylJn+s617e0tJQrAWRoaKhzdal/h86fukJmQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgwQKDoW1z/bAQBe2vJ4FQ4gWikT402sf6QyN/UNwCty79d+eLruPPuQAvIIBAoQJ6DyqoIa64Rm2Im5dpCLgENIKWriniCn1AnArTEOitgCso7p3La0Z3gacgkEfgrd+txS6uuxsrUI2CAAL9FXAF+b6zngDiSt7qb43Zep0E3rhwN7a6+vzJ54BYmspO9Alc73XlfZMZsoymYNvis6xvPew6u59DR9rIc/3kux+zvj99vLrb35b/+x4nrn7Zx8lnv8Ul/vus284Tuo9Dj2+7HZ4RQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEGijQHACiL6Id/1AcOt377fRkDYXLPDFhbdj1zgxMeEMPI9dgIkIIFCKgKsPcL13S6kEK22sgCuRyN6hsrENp2EI1ERA12NxRckfSgKhIJBVQMfQW5fij6Hjx49nXS3LIYBAgQKuzwHahOv9W+DmWVXDBVzHUNJx13CSyjUvTzJBvxvjG8iep40+oxf4Bva7vEID5F038HCtP3R6Hi/Vrez6hbanKvOnjSRj6xl6PNjl9Pzwww93/jf277zHa+xKmYgAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKFCAQngGirrrtL3bnyofnqxieFVIyVtFPgzpVl5zHkOu7aKUWrEeifgOvu7zr/6z1MQSCrgI4hVwIIwb9ZVVkOgWIFFGTkupPrGxfig/eLrQFra6qAK/BX7SX4t6l7nXbVTUCBuq5EwLO/ix+9oW5tpL79EVAf4BpJjM8B/dkncVv1DdZfWlqKW7yv03wTQPJU0sfHN7DfVY88Af+udbqmX7582fXS/ek+bb4/c8wfeRJIYlbXmEm+iRd5jgfXZ7rGINIQBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQaLpApAUQBOK4feG6+e7rhZDSvTIFbv/vvsavXD1oEfsXSMBGBngvoR2LXj8xfXPh1z+vDBpsj4BpFRtcc9AHN2c+0pP4CrkTAy9fXzNIVkkDqv4f704I3LsQHj+v877ru6E9N2SoC7RZw9QHXbjAKSLuPjHytdyWRKjicAPF8tkUufeDAAa/V+Qave62sY6aLFy+agYGB1Mfo6GjHUv5/5g2Ib9qxmjdZxV+eObsFsO8W4f8IIIAAAggggAACCCCAAAIIIIAAAggggAACCCDQLZApASQpEPP25Q/M3du3urfD/xFIFdCd329dej92PleQSezMTEQAgdIFXO9Jjd7ASFCl8zdyA7p2uHUpPglQwb+uxNNGYtAoBCouoLu/u96Tr75LAkjFd18lq6c7vyt4PK64rjni5mUaAgiUL5CUDO4K4i+/VmyhzgJKHlUSaVxh9I84lf5N801wUPC6kjWKLr6JJa7r1KLrw/oQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQ6JdApgQQVfbkyZOxdVYA5xf/wh3gY3GYmCjgSv7QQgo0pCCAQHUEJicnnZVhJCgnDS8kCCQlkBL4lQDHSwj0ScD1vlQQpyuQv09VZbM1EPjpu/Gjf2jkD0aAqsEOpIqtE3B9H8RIUK07FAppsCt5VEH89AGFEBe2Et8EEG3w7NmzhW3Xrmhpacn+mfg8NDSU+LrrRd8EE9fyq6urrpdqOZ0R2Gq526g0AggggAACCCCAAAIIIIAAAggggAACCCCAAAIItEQgcwKIfgTSnR/jyhcXSACJc2GaWyBKHHIcN0r+4EdHtx2vINAPAQXjuBKzkgL5+1FXtlkPAVfikK41QgKN6tFaaolA/QWSEgFdwfz1bzUtKEMgafQPV5B5GfVgnQgg4C+goHzXHfZdwfz+a2fONgkocVSPuKJkU9dxFjc/08oX0Hdzvp/Nzp07ZzQSSFFF69I6fUpcHX2Opbz1bVoCyO7du1O587Y5b9JNagVrOoPrN5fu5uQ5ZrHv1uT/CCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUSyBzAoia6brzr4L5k0ZzqBcRte2FgEaN0XETV44cORI3mWkIINBngaQ+gJGg+rxzarZ5XTN8deOT2FrTB8SyMBGBvgsoiM6VCKiAft0FnoKAj4ArYUjHGHd+9xFkHgR6L6D3p+uzQFJAf+9ryharLpCUMJSUbFr1djW5fr6fzxSYPjc3VxjF/Py897rigufjkkLiVpgnKP7atWtxq9wwLa5uG2ao0H98kmaUAJInCSHPshWi6ltVyj5euSFT33YtG0YAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIFUgVwKIAnJcPwToTt6ugP7UWjFDqwSSRv/QD6N1+nG0VTuOxrZeQAEUrvenRoKiD2j9IeIN4Br9Q9cYrgBz75UzIwIIlCaQNDrDibdJACkNvkErThr9gzu/N2hH05RGCig43xUcTB/QyF1eeKOSkoX0GcB1fBVeEVYYJBCSnDkzM2PyBKjbiilJQOvyKfqeIu7YcX1/3b3OPPW9ePFi9+o2/d+3HpsW7MME36QZn3bHVT/rcnHrato013dt3e3keO0W4f8IIIAAAggggAACCCCAAAIIIIAAAggggAACCLRHIFcCiJhcgV+6kzd3gG/PgZSnpUmjf7iOrzzbY1kEEChOwPUeVfLHzfdOF7ch1tRYASULuUb/cB1fjcWgYQjUTCApSSspqLNmzaS6JQokjf7Bnd9LhGfVCBQgoABr1yggGgVKCV4UBJIEkkb/4HNAklx/X0u6/our2ejoaK4RIrTOqakp73W4zkuqtx5p5fz582mzxL6uQHyNhpFWhoaG0mapzOu+CSBnz57NVOes1pk2VsOFfPyz2utY9UkeOXDgQA3lqDICCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAu0QyJ0AorvyuX5A4w7w7TiI8rRSQb+uO7/rbme+dzzLUweWRQCB7AJJ79OkwP7sW2TJJgkkJQrp2oLRP5q0t2lLUwWSAjS5A3xT93ox7Xr1vTVz7Ub8uhj9I96FqQhUTSBpFBAleN28XbUaU5+qCChBSMmicSXpe8a4+ZnWe4Gk67/u2mj0DiWB+ASbdy+r/x89etTMz8/HvbRpmhLTkkYo8fmO8dy5c16JHN0bP3XqVPek2P8n1S92gT5OlKdPEkIWMx0Xvvu1jwR93bTP8ar3VZaRVHzt63S89nVnsXEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBPogkDsBRHV2/fCXFNjZh7ayyQoKuJI/VFXXcVXBZlAlBFotkPRe/dPbfkEQrQZsceMZAarFO5+mN0YgKVlLd4B/40J8cGdjAGhIJgElfrxx4W7ssgo2ZPSPWBomIlA5gaRRQPQ+/6//Qh9QuZ1WgQopMShpBKjZ2dkK1JIqJAkkXf/FLacgdSWBzM3Nxb0cO00JAuPj40FJAmkJpEeOHIndVvdEjTgSUtQ+n4B6JVPIrk7F10yJOiFlZmbGe1SXkPWWPa/PKC9F1cHXXpYhRcerT8KSjlWfBKCQbTMvAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIFCdQSAJI0t35dAf4L69/VFyNWVNjBO5cWTa3Lr0f256kUQViF2AiAgj0TSDp/Xp75QOj9zoFgW6BpBGgQgOKutfN/xFAoLcCCtRUEHBcefU97gAf59L2aT9+231cJB1PbXej/QhUUUAJW66AZgX5u0b6qWJbqFNvBHRt4Dou0gL4e1NDtuIjENpfK6FDiRV79+6NEkEUhB5XNJqBnU8jS/gWnwRSfXfhOl91bkfb9U0CUUKAElV8io7vuhXfESC033yTQJQsE5IMVCWz8+fP96w6Sr7QMZtWQuz1PtR+0nNaqePxmtYmXkcAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoEkChSSACOTMmTNOF+4A76Rp9Qt/envO2f6kEQWcC/ECAgj0TSDpLq2fzb/Wt3qx4eoKfDb/M2flko4n50K8gAACfRNIugO87vJ97Gz8SA99qzAb7qvAOytrRg9X8Ql0cy3LdAQQ6L2A+oCk9+2xefqA3u+V6m4xaXQwBeYzAlR19113zfTeX1hY6J6c+n8lTCi5YmRkxAwMDEQJIRodRIkh+r8dKcQnQL1zY/oeUXVKK77fNypBQYkdSSM+KJFB7Uiax9ZHx7duoFS3ElJveWj/uTy0T6enp70TRapopeQgOzKNEi/00DS1q4zie7zKPu14VV11vLqSrzrrr/dSHY/XzjbwNwIIIIAAAggggAACCCCAAAIIIIAAAggggAACTRfYUlQD9YO/HvoxobvcufKh0UggDz3zQvdL/L+lAjffO+0cGUY/MOlYoiCAQH0EdGdCvXf1o3N3iUZ6WH/P7zh0rPsl/t9SAY3+pGuDuKLzv+9dRuOWZxoCCPRHQEFPZ8+ejQ34sgH/zw0P9KdybLUyAkoI0ugfSUWBaQp2Iwg4SYnXEKiGgIJIFcgd9z2QreHSlTXzxoU186Nn6AOsSZufj511JwD6BvC32a9qbddnNyXv+46WEVd/JQu4Egbi5o+bps+PvtcN+t5C16xJ5y27DQX266F2HjhwwE42165di5YPqXfSjZPur7iif+i9GfddT1x15apknm6zy5cvR2ahiT1x2+j3NHtcdNejjCQQOer41jbTiq1Xt/3nn38eLR96vPokVKXVidcRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgPIHCEkBURf2YpR954ooC/rcNP2227Hw87mWmtUggCgZ/93Rsi/Xjku/dzWJXwEQEEOibgAI/9INz3A/6N9ff89uGnjYP7nqyb/Vjw9UQuHv7lkkaAYrRP6qxn6gFAlkE9FlAd/2NKxoF5KN9D5gd2+JeZVpbBE6sJ39cu5HcWl1HKJBUwZk6ppRkSkEAgWoJ6H2qu+PPzMx4VezV9+6aw8MPmN07vWZnpoYKvPremtEIIHFFAcsKzKfUT0CJFwru900QKLqFuk4ITa7Q/Eo4jfvuIq5+SmrQI2uRkY7xuhaNAqLvan3P+WpnXrN+WKmdIUkSvaqjjld59up41blYSScUBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQqLbAYJHVsz8Ixa1TAZ+fzf8s7iWmtUzgj2/+o7PFx48fNzqOKAggUD+BtASuz86+Vr9GUePCBXQc6JogrigwhkDfOBmmIVAPAQW2uYKFNPKDkkAo7RXQSDBvXYoP/I1T0cgCCs5UMohvwFvcepiGAALFCigIVe/NkEDgqA+Ypw8odk/Ua21K/Pjpu+5jIDSAv16tb35ttf/0Wa7XRZ8dFxcXTehIBfreUcv1ouj6uAk3OdDoFmV8Vg/dd2Xus6om6cgoy3GexUr7uAnHa5a2swwCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAnUTKDQBRI3XD36uAP47Vz40X1z4dd2MqG+BAhoJ5svrH8WuUT8y6QdFCgII1FdAfYArKEDvfZ0DKO0VuHXpfXN75YNYAF07MAJULA0TEaiVgAIAXYFcoQkAtWo4lU0U0KgfWROANMqAgs01yhgFAQT6J6BErPHx8Wikpyx3SF+6smY0AgSlfQJKAPrBm+7kD30GcH2P2D6t+rZYQeNJ14FFt0zB+nmC4vW9RZ7lfdqjxOiFhQWfWWsxj7xc3/dkaYDWVaWRf6r8fUQvjtdebCPLccIyCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggEC9QeAKIAr6S7tyXlAAQX0WmNkUgCv5+1x38nXTcNMWAdiDQBoGk9/LN9XPAnSvLbWCgjV0C6gP+9PZc19Sv/6uAIVfQ+Ndz8RcCCFRdIO2zwIm37xrdBZzSLgEF/ioAOGtRsLkCz/XIEniedbsshwAC9wSUiLV3797ciVgaAUKJIJR2CSgBUImAcUUBx9wIJE6mntMUzL+8vGzKHklBgfpFJG/YJBIdh0UX1VHJH036jKu2FJUEonXJ5+GHHy6aPvP6lIiW9H1W5hUXtKCOU72/yjhedTMXrbtJx2tB7KwGAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHKChSeAKKW6gc0/XAQV+7evmU+O/ua0TOlPQLa33988x+dDdYPo2X8gOXcIC8ggEBpAnovJ9058Y9v/mf6gNL0q7nitL5fd0bVg4IAAs0QSHpPKwng2Nm1XMkAzVBqTyuKTPrRKCAaDUTB6BQEEChfYGVlJRrxY2pqymgEkCLKf8yZEFZEHVhH7wTeuLBmNAJYXFGgcZWDrePqzLR0AQXRK0lAwf1Ff8+nBJOrV68WmjSkOirwXd9hFBH8ru/Etb6mJjbJyHqlHw3xc8hI+7GKI//oGKty4o7M5F/UDTS0L/R+1fooCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUC+BLWVVVz+cKUAn7g6t9i7gj068XNbmWW/FBJT88dWNT2JrpR9bm/rDaGyDmYhACwT0nj5//rxR0Fh3sQlhj534ZfdL/L+hAhr5Q31/XFEAA4FfcTJMQ6DeAnpfqw+I+yygEUB0N/DfvFRKLnq94RpW+7curRkF/7qKbhowPz8fFFiuIHQFo589ezYKVlPgGgUBBIoV0PtMiVYzMzOZVqxEQJ3/4z4LKBHwe7+4a37740GzY1um1bNQTQQ02ouSAF2FG4G4ZJox3SYEX7x4Meqz9R2xzi2hRZ8Xta7jx48b/V1W0XcYui5RPU+dOhV7/nJtW0kRto5Zk160jjKuacpar7yULKF+wnffykb7UcvZon2a1m61wVV8vEOPG+1L1Untst9rxX2m6axTXB196uYzT+d27N86VvXQdbTqqLr6FtVVbTxy5EiqvWudvsdVnItrnZruu96kdfAaAggggAACCCCAAAIIIIAAAggggAACCCCAAAJtESgtAURf2Cvwa3R0NNby1qX3zYO7njQPPfNC7OtMbI7An94+Ze5c+TC2QfY4iX2RiQggUGsB2wfEBXnonKBzwyPPH691G6l8uoD6ez1cRceJ+gIKAgg0S8Be47k+C+hu4AoKff15kkCatee/bk2U6DPvDvxV0JruNqwAYCV0KIAtpCiwXMeX1lPUXbtDts+8CDRVQMHaR48ejRI4QtuoIFtd2ylw1r5H4z4L6PygPuD0BH1AqHFd5tc+1mgvrqLgY52/Kc0X0PlAD50bdF7QOebatWvR37b1mqZAePu5UH8PDQ1F0/R3r4q2r+QEPXTusvXV9peWlu5XQ+e63bt3Gz2rfkXUUevQSAxFl7LWq3qq/dqvemgf6vH555/f37fy1H7UfDoG9NxdrHf3dN//lzVyReexYOtijwn7fz2rXa5SVt06t2f9bN20D1Q4XiMG/kEAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoNECpSWASE0/gigYx3XXSAX/btn5uNk2/HSjkdvcOAX9fnHh104C7vjopOEFBGovoEAD/eCtALK4onODEgG37/9+3MtMa4DAnSvL5rP515wtUR+QFDDhXJAXEECgFgJpnwU0MsTQrjXz4v6BWrSHSvoLXLtx7w7/riXsNYJeV4CdAgd1F+IsQecapUDJI1qHAoopCCCQTUDBo3oPhtxFvHNLuq5TQL/e0yr2fe76LKARgvY8umZeOUQfEIE16B+N8nLs7JrRc1xRELjO2ZT2Cei8oEcdis5lupbVg5Iu0AYre0yka/R+Dls3jtfe27NFBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQT6JVD67RY1JHzSjw+fnX3NfHn9o361n+2WKKD9mhT4yx0fS8Rn1QhURMDejdBVHZ0jlCRAaZ6A+oA/vvmfnQ3TtYGuESgIINBsAb3Pk4Lyj62PEKEgYEpzBBTw+4P1u767An8VoLawsLCpweoXrl69mmk0DwWuj4+PRyOCrK6ublo3ExBAIFlAiVR79+7NlPyh9+7y8nJ0Xaf3d2dJ+yzw03fpAzq9mvC3zv3f+8VdoxFA4ortA7qPlbh5mYYAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggEC9QegKINqsAn7hh3vXa3du3zKe/+KH56sYn+i+lIQIK/NV+dRXd8Y87Prp0mI5AswQ0CkjSXT6VJEAiYLP2ufr2P775j1EfH9cyG/gV9xrTEECgeQK65kvqB0687Q4UbZ5Gs1uUFvir1id9NtTrShpSMLmCykPLxYsXzcjISLSO0GWZH4E2CqysrETvmampKaNEqpCi6zld5y8uLiae49P6ABIBQ9SrP+9/XE8AdCV/qPZpnw2r30JqiAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCPRfoCcJIDbQ03WHv7RA0f4zUYMQASXzKPlD+zWu6DhQEIjreIhbhmkIIFBfAb3XFezpes/bRECSQOq7jztrbvdnUmKnAgVdx0PnuvgbAQSaIaD3e9K1n0/SQDMkmt0Kn/2o48AnsUM3D1BfkXT94NJUEPvMzEwU1K6EEAoCCGwW0PtESR9KmFISSGjRyB4asWdyctJrUb2fXTcF0QqUBPLOSvyIEV4bYKZKCGg/Ll1x70cdLzp2KAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC+QR6kgCiKuquv7rTn6vYESNcSQOu5ZheLQHtv6S7vqu2CuTS8UBBAIH2CNhATleLfc4drmWZXh0Bm/yRlMyTdhfo6rSGmiCAQJECuvZTALCr+CQPuJZlev8FfPafgn5DA3/HxsaCgsw7JRTUPjo6GgW5h45s0Lke/kagaQLnzp2LEj/m5uaCm2av6ZOS+uJWqkTAtISuY2eTR46IWy/TqiOQNpKLzv9J3wlWpyXUBAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEKi+QM8SQESR9oMvSSDVP2CSaugb+Otz19+k7fAaAgjUU0DBvwoWc5W00YNcyzG9GgI+fcDJkyeja4Fq1JhaIIBArwXS+gGfJIJe15ntpQv47Dd9Dky6BkjaigLHFTSsBCIdQ6FFQe579+41CnqnINBmgdXVVTM+Ph499Hdo0XWcRv3I+nle71+fRMC3LrlHkAitM/P3RiAt+SOt/+9NLdkKAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggg0ByBniaAiG1ycjIx+JMkkHoeXD6Bv2n7vp4tp9YIIBAi4JsIqGQQSn0EfPoA7fvp6en6NIqaIoBAKQJpiQA+yQSlVIyVZhLw2V9FBf4q6Hx5edkoCF1JISFFI4Ao8F0jgmQJfA/ZFvMiUEUBJUKNjIxkSoTSe0+JH0Vcx6WdD3ROSUsmqKJvW+uk/fWDN++apKSdtMSfttrRbgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCCPQM8TQFRZ3f1VwV+uQhKIS6aa07W//vW1I0bPrqL9rTv3UhBAAIG0ZDCfcwqK1RHwTf7Ieuf36rSUmiCAQFECui5MOicooPQ7ryUHlBZVF9aTXeDaDWO+94u75vJ19936ywj8VRC6EkGyjEJw8eLFKAi+iED27HIsiUDvBFZWVqJjfmpqyigRKqQo0Urnao3asWfPnpBFE+dN6wO0MEkgiYSVeNEmAL6zkt4HhCbtVaKBVAIBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCosEBfEkDkoUCCsbExJw0BwE6aSr2g/fTpL35oku7W7xPgUalGURkEEChdQH2Azg2u4pNU4FqW6b0TsH2Anl1Fwb8kALp0mI5AewV8rg8VAPzGBXdgaXv1+t9yJX1857W/eCV/lBH4q2B0BaUvLCyY0PUrCH5mZsbs3bvXKCGEgkATBXScK+lDo34oCSS06BytUT+SrtdD19k5v28foH6AUj0B9QG+CYCh5+jqtZYaIYAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAtUT6FsCiCgUAKzAUFdRUoGSC5ICS13LMr18gdsrH0T7R4HaruIT2OFalukIINBsAd8kkFuX3m82RE1b55v8oQBdAr9qupOpNgIlC/hcJ554+64hALjkHRG4+rcu3Qv81d3fXcWO/FH2+V83FFCQukYXCy2rq6tmdHTUHD16NHhkhNBtMT8CvRQ4d+5clPgxNzcXvFmbXKXr9LLfvz59gM43P3jzrkk63wQ3kgVyCSxdIfkjFyALI4AAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgUI9DUBRAEFCgw9ePCgsylKLvjX144YAoCdRH154YsLvzZ/fPMfDckffeFnowg0RsAnCeSz+deMzjmU6gioT1aCZlIf0Kvg3+qoUBMEEMgi4BsA/J3XCADO4lv0Mq++txYl5CQFY/f6/K/PlBppanl52WjboWV+fj4aDUTPFATqLGCTmsbHx43+Di0nT56MEqqSvp8JXWfa/D59wDsr6QkHadvh9WIENCrX915P7o973QcU0zLWggACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCNRLoK8JIKKySSD64T+pKAD4T2+fSpqF13ogoGBfn33hE8jRg+qyCQQQqIFAWhKImqDzv849SQkHNWhqI6rosy90R3ZG/mjE7qYRCPREwOe68fL1NfPkP/3F6M7jlN4LKOFDQb8/ffdu4sa1L/t1/lfQsZJAlAyiz5gh5ebNm9FIIBoRJEvgfMi2mBeBMgSmp6ejUT8uXrwYvHolfGgkHa2jH0XnjYWFhcT3rfqA7/3irlEyCKX3AuoDNBqXRuVKKiR/JOnwGgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFCcQN8TQGxTfAKAdQd4jQby1Y1P7GI891BA7rrje9poLD5BfD2sNptCAIEaCKgPmJycTKypHXXiy+sfJc7Hi+UIKPnm09f/IXU0Fp8gvnJqyFoRQKDOAj7nDpuEoFEoKL0TUNKNT/KN/QwQmnxRdEt0PaFEECUjhhYFz+/du7dvgfCh9WV+BHTMjoyMmJmZGaNEppCi96oSL5S0tWfPnpBFC5/XJ3lYfcAP3ryXhKC/Kb0RUPLNd177i3nrUnLfqz6gXwmAvZFgKwgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEB1BCqTACISnwBgBf4qCeT2ygfVUWxBTRR4Lfe0wGvdcVf7kYIAAgiECvicP3QO8klEC9028ycL3LmybD7+p/9g7lz5MHFGG/ybOBMvIoAAAg4BnwBgLapRKL7z2l1z7YZjRUwuREAB1rrbu0b+SAu29unDC6mU50oUzK7Adj2yBLYrmF6JIFlGU/CsIrMhkEtAyR5TU1NGo9asrKwEr0uJUhr1I0uiVPDGPBewo0foOam8ceHeaCBKTKCUK6CES5/+VseTvgfqdwJguRqsHQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEKiOQKUSQMTiEzyku5D/8c1/NH96+5TR35TyBKz1Z/OvJVrrh36fBJ7yasqaEUCgCQI+d4DXeUnnJPUD9AHl7nX5qq/99PUfplqrDyABsNz9wdoRaIOAAn8VlJwWAGzvSK5AYErxAhr143u/uGvSfO0IAmmjeBVfQ781Krhdo4Fkqd/q6moUXH/06NHgkRX8asdcCGQTOHfuXJSgNDc3F7wCnVs1SoO+d6lisL6tX1piyr0+4K5hRKjgQ8BrAeurhMu0out/HU8UBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHeCVQuAURNVwCwAnXSAhK+uPDraFQK3ZmcUryAXH1GW9F+UhCJ9hsFAQQQyCuggC+dU9Lu2K2RoDQqBSNC5RWPX972Aeprkwp9QJIOryGAQBYBnVf0WSDt2tKOUKG7k3Mn+CzSm5exphr1I83UN1B781Z6O0XHk4KTdUypzqFlfn4+CrbXMwWBfgrYpKTx8fHgpCS9D06ePBm9Dw4ePNjPZqRuW3XV6D2qb1pRgsL//b/8xShpjVKMgB31I60P0Gc1n766mFqxFgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBToFKJoCoggrO8QnS+erGJ9GdyRkNpHO35vvbjvqhO77LN6koeMTnLs1J6+A1BBBAoFvA9gFpAWr2fKXRQNLOV93b4P/xAp2jfqSZ+u6n+C0xFQEEEEgWsCMLKRg4qdg7lZ94+65RAgMlm8Bbl9bMk//0l9RRP7R2JecoWVP9QF2K7bOyjHxw8+ZNo5FARkdHzcrKSl2aTD0bJDA9PW1GRkbMxYsXg1ul62l9t6J11KmovkoESesDrt0wRklrx+bpA/LsXyXRKJnGZ9QPe0zVqQ/IY8OyCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQNUEKpsAIih7R8HJyclUN92hXHeCv3Xp/dR5mcEtYB197qiv/aLAr7SADPfWeAUBBBBwC+jconOMz91/dc7SiEU33zvtXiGvpAqoD1Vfmjbqh1Zkg3/TRmpJ3SgzIIAAAgkC9lzjE2T6xgX/BIaETbbuJQX9+gZPq2/2TcypKqQ+wygYXiOOhRYF3ysIX4HpSgqhIFC2gD3mZmZmgo85vV+VQOEzsl7Z7ci6fr1P9X5NSwrX+m0Sm0awIBnQX1xJlOoD9FAyTVpREh3fA6Up8ToCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQrkClE0Bs0/UDs8+dH3XX8s/mX4uCgO9cWbaL8+whIK+P/8sPjM9IKjYoW/uFggACCJQtoCBLnyAj9QE33z0dnct8ktjKrned1q8+4NPX/yHqQ+WYVJoQ/JvUPl5DAIHqCdiRG3ySwhX0q5FAdBdzBQNT3AL2rvkK+lUSSFqxd3xXUk7di5IX8wTGKxg/62gMdbej/r0RyDvqjM6XGqkzS6JTb1rovxW9X32TwtUHaASL77xGH5AmrD5Ao6Z85zW/PiCkL07bNq8jgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCOQTqEUCiJoYcufHL69/tB7I+sMomFV/U9wCNuhXXl/d+MQ9419f0X5QIInPHThTV8YMCCCAgKeAzjm+QWw6l/3xzX+M+gCSAZOBv7b6oblz5cPkmddf1X7QXZibEPyb2lhmQACBygnYu477jDxkA1tJBNm8G3W3dwX9ysYn8UOJfyH2m7dY3Sm2X/MZbay7Faurq2Z0dNSMj48Hj8zQvS7+j0CnwPz8vNm7d6/Rc2hRkL6SJfSe1Xu3SUVJ4boOVRvTCn2AWyiLjc6RvvbuLfMKAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEBRArVJAFGD7Z0ffYMZFMz6r68die5oThDwxkOmM/HDJ+hXwSO6S67PSCwbt8T/EEAAgWIEQs9DOrfZZED6gI37QIkfGjFLIz/5jJYi+6YG/26U4X8IIFB1AZsQ6Buw3xno+saFNaO7w7e1KNlDo33obu++o6PYBAmf0Vfq6qo+zgaWq72h5dy5c1Gw/tzcXOiizI/ABgGbVHT06NHgpCJ7raYg/SzH8YaKVPg/dhQK3++EbB/wf5v6i3n1PfoAm/zn2wdYb50jKQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC1RGoVQKIZVMAUkhgw61L7xME/Fc8Bfp++vo/RB4+iR9azI76oWcKAggg0G8Be07yHYWiMxFE/UGbi03+U+KHr0Ubgn/bfEzQdgTqKqBg1JBR6RQEfOLtu+bJf/pLNPqF/t+GooQXJb5otA8lf/iM+CEXJd4r8VsjCejvNhQFOmcdOeHmzZtmamoqGhFkZWWlDVy0sWABndM06sfFixeD16xrY30/0uRErW4U+52Q73cUOhf+9N27RokgSoLQSEhtKGq3kj2U+Kc+wDfxozOhyGfElTZY0kYEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgSgK1TAARoAKRFKCjwCTfoCQbBKzA1y8u/NrcvX2rSvuitLqonWqv2v3HN//R+CZ+dBorAICCAAIIVEVA56QzZ84EBabq3GdHvbj53ulW9QFK9lAfoBFRQvqAtgX/VuX4ph4IIOAn0Hmt6vt5wAbDKiHCjoShaU0r76ysRUHOCnZW4otvwov6V42uomBy38DqptkpsFzJRVnar+D9kZERo2B+JYVQEEgT0DGjxI+ZmZm0WTe9rvOertVCvhPZtJIaT7Dt1/dCIUkKNiFCfYAS5HzPj3WiUrKfEl1s0mNIwos9B7YpoahO+5a6IoAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAhKobQKI3X0KzFGAkgKVfJMUvrrxifnT26fM/2/q/xkFA2tUjCYWtUvBzmqn2qt2+xQb+BVyV2Wf9TIPAgggULSARqfQuWp2djaoD7j57uno3KikuKb2ARrtI0p4+af/ED2H9gFtDv4t+jhlfQggUK6APg+oL1BioO/nAdVIAbEKkFWSxA/evHdn9Dong3Qmfdj2hMhPTExEn6uUvBDiGLKNusyr9udJglQwvxJBsozmUBcj6plPQAlCR48ejUaNWV1dDV5Z6AgYwRuo0QJ2tDr1Ab7JgGqe+gAlyCkh0J4z65wM0tkeO9pHSJ+mPiD0c1WNDhOqigACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCDRKYEsTWqMAHQUqKQhiamrKzM/PezdLd0XXY8vOx8224aejx9Z9I97LV21GBTJHj8sfZLq7vRJp5Nj2oK+q7VfqgwACyQI6byloaW5uzpw6dcr7rtv2nDm4bbvZNnSvD1BfUNfy5fWPoj5N7fJN+OhsqwzVD4QEz3Uuz98IIIBAPwV0DlMySGhfoDoreUIPlaFdA+bw8IB5buje39HECv6jQGXd5f38er0/WH8OCfTtbA7n/k6NjX/bwHIdU6EjNCiof3R0NDomlahK37rRts3/0/cV+t4iyygxGulCiQ4hI160xVrnMj3kq/drSGJNXB/w9D5jDuwbqCyfzvnvXF4zS39Y77/Wn+kDKrurqBgCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQuEAjEkCsipIWFAyh4FX94K8f/n2LAmW/uPDr6GEDgbd++ymjZBAlh1S1qN66y7uCfe9c+TBT0ofapkAJgn6rupepFwII+AjkSQa8e/tWlDihhED1AVv3PXU/IbDKfYDqfXs94e/OHz6MnvX/LIU+IIsayyCAQBUFOvuCc+fOBQcBq026i7oeP33XmB3bjHl6PQB4eD0pRMHAQ08MRNP60XbVaenKvfop8SPPnerlpGQZrv/T96Q9puSloP3QUT10HGoZWSthldJeASUkaNSP0GNIYjoOOYb8jh1d1+qRJRFEW7B9gN2akkAOfLsafcDlj9cT/9YTPvSseuYpXP/n0WNZBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoL8CjUoAsZS6u2pnIoiCbkLurtkZCKx1PrjrSfPgE08aJYToWf/vV9Hd3b/8+KMo2FeJH1nu8G7rriCS48ePR8ER3JHWqvCMAAJ1F9C5TX2A7rad5S7wUVLFX0dTkoUSQJQMWKU+QH2Bkv70nKcQ+JVHj2URQKDKAuoLdI7TQ0HAZ8+ezRR0Hd1hvWN0ELV5905j9uy8FxCsv+3/9Zy3aHuXP14zn0fPxqx+Vkygr62XrvmPHDkSJSLIiOIvoBEXFhcXo+MpdPQGfRbVMjoOGb3B37wpc2r/ZxlFxrZfyUeMImM1/J9tH6CEG40QqO+FshQl3Olhiz3nKynk4W9opKj1BJ3ouZjRQrStzj7AjvZkt5/nme+A8uixLAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFAdgUYmgFhemwhig4AVcKO7boaWKOliPchWd4a3RXeH37LzMbPlUQUGP2UGv7G90MQQJXeoKMD3q88+WU/0+DT6224/z7NcbOIHgV95JFkWAQSqLKDz2/T0dBTkquBfBX5l6QOUaHfrkh7uPkAOShIpqqjfufvnW/dHdrIJH0Wsn+DfIhRZBwII1EnABgGrD1BfoD4hJDm8u60Kxr12Y2NAcOc8ult8Z4mCg7dtnKY7uHcWJX0o+aOsogByJX7omZJPQMeTHJXQoWMppKysrJiRkZHo2kSjOfBZLESvnvMq+UCjfmS5BtU1m77L4H2bb98fPHjQ6GH7ACWCZNkfthahfUCUMPLoxj5gZX30DiV52LK63qfkGdXJrsf1rParD9D5i4IAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUH+BRieA2N1jg4AVCKwf+5UIouc8RYkZX5fTX/+5/pcSQmyxSSL2/93PUZDv7VvR5Lt//vfcd3PvXn/n/xU4QuBXpwh/I4BAGwTUB0xOTkYPBeGpDwgN2Ox2SuoDNErU4Df+JlpkcFtycqBN8LPr37heO7WYZ/UBhw8fJvCrGE7WggACNRSwwdQKqC7qM0EcQ+ed4vX60hX9uzHhQ1PKLhqxwl77q+2U4gR0baGRPOSbJbhfo0HoGCS4v7h9UrU1KclMx4b2c5aia1eShLLIuZfp7gPOnz8f7Z88CYFxW+vuA+7N0/s+QO3VjT/0GYA+IG5PMQ0BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCor0ArEkA6d49+/NZDd3xUMIYCgXUn1iJLmQG8ofW0gV+606MClSgIIIBAmwXsHYAVcKkkkDL6ACX2dZbbKx90/renf9s+gMCvnrKzMQQQqIGA/UygwF99JlhaWoqeiw4E7jUF5/3eiuu64urVq9GIYzMzM0Eb1+fR8fHx6LOprksI0A7iq/TMSvDR8ZDlfKJjSseD3suU8gRsH6BELvUBZSWDlNeC+DXruFHCt9rHMRRvxFQEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgCQKDTWhEljYowEZ31VxeXo6CdpoUZKEf+tUeBSOpfWonyR9ZjhKWQQCBpgronNjdByjgrgklrg8gqLQJe5Y2IIBAGQLqD5QorSDgf/u3fzOLi4vRXffrEjir83tn/e21P+f9Mo4W9zo10qQ+e2W5llDw+cjIiFHSAKXeArqxxOjoqJmamgpO/tC5SJ/hdQ6qy/mn3nvr69orWcL2ATqHauSVLO/lr9fYu786+wD7/Y/ORxxDvdsHbAkBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDoh0DrRgCJQ9aP5goE1kN36VQQju4CfPHixWikkLhlqjRN9VeAwoEDB6I7PZLsUaW9Q10QQKDqAt19gM79ugtwXfoAnfMVuKY+QH2B2kNBAAEEEMgmoPOoHgqgVVFfoMfly5ejUQM1akM/i+qmwN6hoaGonpzz+7k3Nm5b+0LB+xphLDQBQJ9BtYxGJlMgOsHbG22r/j/tPzvqR5a62gQEPsdn0St2Gb33Ot9/9AHF+rI2BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIFiBEgA6XJU0IXuoquHioK8bNCXnnVXz34XAr/6vQfYPgIINFXAJlMoEE+lsw/Q+V/9QL+LDUwj4aPfe4LtI4BAGwR03a2HLQr0Vn+gh/5W0rhKkf2DEgnsY/fu3dH27f9tPXiuroA+R+o6QgkdSgYJKTquNBqIbkygUQhICAjR68+8eu8fPXo0umYMrYHe10r46TzHhK6D+csV0L7p3D/96AN07a9jpTMxpdxWs3YEEEAAAQQQKFLgzh+WzU1zushVsi4EEKihwFc3Psld69Wbd82rS3dyr4cVIIBAvQWurZ8Liii3fvffzZ0rHxaxKtaBAAI1FuAapcY7j6ojUDGBD1a/qliNelsdnU9vvpft+5+85+K3Ln9plq6F++e9ruxnm3W8vWrCPx/nbbOOqqyfy/O+R/r5HWO/2tzP70Gyfl766rP83/+Enr1IAEkR0w/tNhnEzqrAHAUF61l3A7ZBAHouqijYRz/w61l3+NXf/OhflC7rQQABBPwE0vqAa9eu3e8PyuwD7PlffQEFAQQQQKB/Aro27w4I7qyNPiPo0VnikkN0XtfDFnvtb//Pc70FtD8V2H/kyJFMyQEaTULJI1qHTUqtt0jzaq/rPiV+aPTQLEUJPkr00bFCqY+A9lcZfYAEtF4KAggggAACCDRPQIGVBFc2b7/SIgT6IaBAlVcvhge49KOubBMBBKovcOvS+9WvJDVEAIFaCHCNUovdRCURQKBkgSgZ4t1sCSB5q/bWypd5V5Fp+X62eWk9AUSPfpR+fS7v53eM/WpzP68x6vR5iQSQDGcCBeDqEReM0x30derUqcSgEK3j+PHj92uh9RIEcp+DPxBAAIHKCST1ATYh0FY6tA/oDgi26+EZAQQQQKAeAnHncYJ667Hvyqil9v3Vq1fN9PS0mZmZCdqErinGx8eN1qFEkM6EoaAVMXPhAkrQ0f7UPgot2p+zs7PR9wmhyzJ/9QXoA6q/j6ghAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggg0AQBEkAK3ovdP/jrjr9JdwXV6B4KAqEggAACCNRfwN4V2LaEPsBK8IwAAggggEB7BZQAolElNWJE3IgwSTKaf2RkJLppgNZD6Z+ARgCdmpoK3oeqsa4R7agf/WsBW0YAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBJggMNqERtAEBBBBAAAEEEEAAAQQQQACBqgroRgGLi4tmYWEhSgYIqadGmtCIE0oECU0gCdkO88YLyF+JH1n9NeqnRoKZnJyM3wBTEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBAAESQAKwmBUBBBBAAAEEEEAAAQQQQACBrAJ5kgE0AsXo6GiUjKCkBEr5AhrNU4kfc3NzwRvLk/QTvDEWQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHWCGxpTUtpKAIIIIAAAggggAACCCCAAAJ9FtixY4eZnZ01hw8fjpI5lNgRUpSMMD8/b86cOWOUUEIpXmB1dTXaN0oAyVJOnjxppqensyzKMggggAACCCCAAAI1FDh48GANa02VEUCg3wK6cUB30TR9pqQggAACIQJx5xMtf+TIEXPgwIGQVTEvAgggYOLOKVyjcGAggEAWgaZ9X9LLc2H3ubhX13Xd2+1nm3t1/HS3Wcd6rz6Xd7ex+/9Z3ndZl+lXm3t5jHXb9Ot91V2PPP8fWFsveVbAsskCCvqYmZlxzkRgiJOGFxBAAIHaC9AH1H4X0gAEEEAAAQRKF9D1wqlTp0yWUT30JZASQeK+mCq94g3dgBJs9Bme/dHQHUyzEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQqLnAYM3rT/URQAABBBBAAAEEEEAAAQQQqK2AEkCWl5dNljt6XLx40YyMjBitg5JPQCOxyHJqaio4+UOjuigRZ3FxkWScfLuBpRFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgRYAEkBQgXkYAAQQQQAABBBBAAAEEEECgTAGN4KHkgYWFBaNkgpCikSo0YoWSF5QQQgkTkJ+SPuSnJJDQMjExYa5evWr0TEEAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBMoWIAGkbGHWjwACCCCAAAIIIIAAAggggICHwNjYWJRMMDk56TH3xlmUvDA6OmqOHj0aPILFxjW153/nzp2LEj/m5uaCG22TdjTyR2jSTvDGWAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEPirAAkgHAoIIIAAAggggAACCCCAAAIIVERAyQSzs7PRiCDDw8PBtZqfnzd79+41eqbEC6yurprx8fHoob9Dy8mTJ6NEnYMHD4YuyvwIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEAuARJAcvGxMAIIIIAAAggggAACCCCAAALFCyi5YHl5OUoGCR1h4ubNm9FIIBoRJEuCQ/Gtqc4ap6eno1E/NPpHaNE+uXr1qtE6KAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQD8ESADphzrbRAABBBBAAAEEEEAAAQQQQMBDYHJyMkoEGRsb85h74ywXL16MRgMhYcEYWYyMjJiZmRmjBJmQogScM2fORKOy7NmzJ2RR5kUAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBAoVIAGkUE5WhgACCCCAAAIIIIAAAggggECxAko6WFhYiB5ZEhCU9LB3794oCaLYmlV/bUr2mJqaMhoNZWVlJbjCSsDRqB8TExPBy7IAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQtAAJIEWLsj4EEEAAAQQQQAABBBBAAAEEShDQKCDLy8tGSQmhZXV1NUqCOHr0aPAIGKHbqsr8586dixJf5ubmgqs0PDwcjfgxOztrNAIIBQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQqIIACSBV2AvUAQEEEEAAAQQQQAABBBBAAAEPASUjKClBiSBKUggt8/PzUVKEnptabLLL+Ph4cLKLfE+ePBn5Hjx4sKlEtAsBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEaipAAkhNdxzVRgABBBBAAAEEEEAAAQQQaK+Akj+UBJJlhIqbN28ajQQyOjpqlCzRpDI9PW1GRkbMxYsXg5ulhA+Zah0UBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgigIkgFRxr1AnBBBAAAEEEEAAAQQQQAABBDwEJicno6SFsbExj7k3zqIkib1790YJD0oKqXNRW5T4MTMzk2nUj4WFBbO4uGj27NlTZwbqjgACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAINFyABpOE7mOYhgAACCCCAAAIIIIAAAgg0W0BJC0pg0CNLAoOSJrKOmtFvWSWuTE1NRaOZrKysBFdHCTRXr141WRJogjfGAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQE4BEkByArI4AggggAACCCCAAAIIIIAAAlUQUBLD8vKyOXnyZHB1VldXoySK8fHx4BE0gjdW0ALz8/PRCCZzc3PBaxweHo5G/JidnTU7duwIXp4FEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBfgiQANIPdbaJAAIIIIAAAggggAACCCCAQAkCSmaYnp6OEkEOHjwYvIVz585lTqoI3ljGBWyyytGjR4OTVeSjBBklymTxyVhlFkMAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBAoRIAGkEEZWggACCCCAAAIIIIAAAggggEB1BPKMcHHz5k0zNTUVjQiysrJSnUat10TJLXv37jUXL14MrpcdIUXroCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAnUUIAGkjnuNOiOAAAIIIIAAAggggAACCCDgITA5OWmuXr1qlPwQWpRkMTIyEiVdKCmkn0V1UeLHzMxMcDX27NljFhYWoof+piCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAnUVIAGkrnuOeiOAAAIIIIAAAggggAACCCDgIbBjx44o+WFxcdFkSYBQ0oUSQbKMuuFRvcRZlHhy9OjRaDSS1dXVxHnjXlQCzPLycqYEmLj1MQ0BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEOinAAkg/dRn2wgggAACCCCAAAIIIIAAAgj0SODgwYNRMsTJkyeDt6jki9HRUTM+Pm56NRrI/Px8NOqHnkPL8PBw1NbZ2VmjBBgKAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDQBAESQJqwF2kDAggggAACCCCAAAIIIIAAAh4CSoaYnp6OkiOUEBJazp07FyVlzM3NhS7qPb9NNtHIH6HJJmqfkj406oeSQCgIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggECTBEgAadLepC0IIIAAAggggAACCCCAAAIIeAgoOWJxcTFKlggdIUNJGVNTU2ZkZMSsrKx4bM1/FiWn7N2711y8eNF/ob/OOTY2FiV+TE5OBi/LAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQB0ESACpw16ijggggAACCCCAAAIIIIAAAgiUIKBkiatXr5qJiYngtSv5Q0kgSgYJHamje2NK+FDix8zMTPdLqf/fs2ePWVhYiB76m4IAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCDRVgASQpu5Z2oUAAggggAACCCCAAAIIIICAh4BGADlz5kw0IkiWBIq5ubkoEeTcuXMeW9s4ixJHxsfHzejoqFldXd34osf/lMCyvLxsNPoHBQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQaLoACSBN38O0DwEEEEAAAQQQQAABBBBAAAEPgYMHD0ajgZw8edJj7o2zKHlDiRx6+CZyKHFEo35kSRwZHh6OEj9mZ2eNElgoCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAGwRIAGnDXqaNCCCAAAIIIIAAAggggAACCHgKTE9PR4kgSggJLUrmGBkZMUrucJWVlZVoxI+pqSmjEUBCipI9lPShUT+UBEJBAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTaJEACSJv2Nm1FAAEEEEAAAQQQQAABBBBAwENgz549ZnFx0Zw5cyZ4hA0ldSi5Q4kgSvawRdOVXKLpFy9etJO9n8fGxqLElMnJSe9lmBEBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEGiSAAkgTdqbtAUBBBBAAAEEEEAAAQQQQACBAgUmJiaipAs9hxYlfyjZQ8kgdmSQmZmZ0NUYm4yysLAQnIwSvDEWQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEKixAAkiFdw5VQwABBBBAAAEEEEAAAQQQQKDfAjt27IhGAtGIIErGCC1zc3NmfHzcrK6uhi5qTp48aZaXl83BgweDl2UBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgaQIkgDRtj9IeBBBAAAEEEEAAAQQQQAABBEoQUBLG1atXo6SMEla/YZXalhI/pqenGfVjgwz/QQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIE2C5AA0ua9T9sRQAABBBBAAAEEEEAAAQQQCBRQUoYSQcoYlUOjjczOzhqNNjI8PBxYM2ZHAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSaLUACSLP3L61DAAEEEEAAAQQQQAABBBBAoHCBPXv2REkaCwsLhY3QMTY2FiWWTE5OFl5fVogAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCDRBgASQJuxF2oAAAggggAACCCCAAAIIIIBAHwRs0kbe0UCUSFJkMkkfKNgkAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQugAJIKUTswEEEEAAAQQQQAABBBBAAAEEmimwurpqjh49ai5evJirgVrHuXPncq2DhRFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgaYLkADS9D1M+xBAAAEEEEAAAQQQQAABBBAoQWBubs6MjIwUkrhx8+ZNMz4+bkZHR42SSigIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACmwVIANlswhQEEEAAAQQQQAABBBBAAAEEEHAIrKysRIkfU1NTRokbRRaNJKKkkunp6SJXy7oQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIFGCJAA0ojdSCMQQAABBBBAAAEEEEAAAQQQKFdAyR5K+lCChpJAyirazszMTLQdJYRQEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBO4JDKytFzD8BM6dO2dOnTrlN/Nf51pdXTV6uMqePXuMHiHl5MmT5uDBgyGLMC8CCCCAQE4BBR8qEDGklNEHHD9+3IyNjYVUg3kRQAABBBBAAIHcAvo8rOSPpM+3ro3oM68+w87Pz7tmSZw+MTFhZmdnzY4dOxLn40UEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKDpAiSABOxh3YV07969Rs/9KgqcuXr1ar82z3YRQACBVguoD8gS9FgUmoIe1QcQ/FiUKOtBAAEEEEAAgTQBXfso8UMJIFmKbmAwPT0dLaqEWq0ry+ghuv5REoiSQSgIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEBbBQbb2vAs7VbAie683s+i4BkKAggggEB/BPp9DlYfRPJHf/Y9W0UAAQQQQKCNAnNzc2ZkZCRT8odG/FDiqk3+kJ+mLS8vG11ThV7T6EYMR48eNaOjo31NyG3jcUCbEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKA6AowAErgv+jkKCKN/BO4sZkcAAQRKEOjXKCAKkmT0jxJ2KKtEAAEEEEAAgU0CvRipo8iRRTY1gAkIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEBDBRgBJHDHKgC3X6OA9PvO84FUzI4AAgg0UqBf52JG/2jk4USjEEAAAQQQqJSAbngwNTUVjbKxsrISXLeJiYkoYVXPaUU3OFhYWIge+ju0zMzMGCXmKlmFggACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIItEWAEUAy7Ol+jALC6B8ZdhSLIIAAAiUJ9HoUEEb/KGlHsloEEEAAAQQQuC9w7tw5c/ToUaPPu6FleHjYzM7OmoMHD4YuGs2vbSqhY25uLtPySjjR9nXNREEAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBJoswAggGfZuP0YB6dcd5zPwsAgCCCDQeIFen5MZ/aPxhxQNRAABBBBAoG8Cq6ur0Ygf4+Pjwckf+mys66Ll5eXMyR9quNajBA6tR8kkoWV+fj4aDUTPFAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoMkCjACSce/qDqW6A7yeyy6M/lG2MOtHAAEEwgV6NQqIAiKvXr0aBUaG15IlEEAAAQQQQAABt8D09LQ5depUps+1Gu3jzJkzRp9Xiy4aCUQjgmT5vF1mvYpuJ+tDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQRCBRgBJFTsr/MrIFd3ZO9F6fWd5nvRJraBAAII1F2gV+dmRv+o+5FC/RFAAAEEEKiewMWLF83IyEimJAt9Fl5YWDCLi4ulJH9Ia3JyMhoNZGxsLBhPbVOirpJbKAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQNMEGAEkxx7V3UjLHgWE0T9y7CAWRQABBEoWKHsUEEb/KHkHsnoEEEAAAQRaJqDPsBpZQyNsZClKzFASrK5RelXOnTtnpqamzOrqavAm9Xlao5RoVBAKAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDQBAFGAMmxFxX0UvYoIL26w3wOBhZFAAEEWitQ9jma0T9ae2jRcAQQQAABBAoXUCKFklezJH8MDw9HI37Mzs72NPlDCBoFZHl5ORoVJBRFSSOjo6NmfHzcKPmFggACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1F2AEUBy7sEyRwFh9I+cO4fFEUAAgR4IlDUKiJIMr1692vMgyx6QsQkEEEAAAQQQ6KGAkiCOHj1qLl68GLxVe9OD6enp4GXLWGBlZSUaDSRrW5S8q1FMKAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQF0FGAEk556zATE5VxO7eNl3lo/dKBMRQAABBIIEyjpXM/pH0G5gZgQQQAABBBCIEVDihpJVsyRMHDx4MBp5oyrJH2penpFIdPOGqampaEQQJZJQEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBOgowAkgBe62MUUAY/aOAHcMqEEAAgR4JFD0KCKN/9GjHsRkEEEAAAQQaKqCED436odE/Qos+i87OzpqxsbHQRXs6vz6Hq43nzp3LtF07GoiuuygIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEBdBBgBpIA9VcYoIGXdUb6A5rIKBBBAAIEugaLP2Yz+0QXMfxFAAAEEEEDAS8AmRYyOjmZK/picnIxG/ah68ocw9Dl8YWHBLC4uGiWthJaZmRkzMjKSaXSU0G0xPwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUJQAI4AUJFnkKCCM/lHQTmE1CCCAQA8FihoFRMGMV69ejYIae1h9NoUAAggggAACNReYn583U1NTRp9NQ8vw8LA5c+aM0XMdi9o8NzdnlNSRpSjhRe3XdRgFAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCosgAjgBS0dxQooju2F1GKvpN8EXViHQgggAACyQJFnbsZ/SPZmVcRQAABBBBAYKPA6uqq0YgfR48eDU7+0OfY2dnZaNSPuiZ/SEPtmJ6ejtpx8ODBjUAe/zt37pxRMq+SSCgIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggECVBRgBpMC9o7uOKmhEz1kLo39klWM5BBBAoP8CeUcBUfAio3/0fz9SAwQQQAABBOoioKSHPKNeKPlDn0GbVuxoIFk+myuBRC51Tohp2v6kPQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDwtQAjgHxtkfsvBe7mHQWkqDvI524MK0AAAQQQCBbIew5n9I9gchZAAAEEEECglQIXL16Mbj6QJflDCR8LCwvRo4nJHzogJicno6TasbGx4ONDtiMjI2ZqairXzR2CN8wCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIICAhwAjgHgghcyiO4xmHQVEwTe68zsFAQQQQKC+AllHAWH0j/ruc2qOAAIIIIBArwT0eVOJCfPz85k2qcQIJazquqMtRQkdR48eNaurq8FN1md0jQaSJZEkeGMsgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIeAgwAogHUsgseUYByXvn+JB6Mi8CCCCAQDkCWc/ljP5Rzv5grQgggAACCDRFYG5uLrrZQJbkj+HhYbO8vBwlM7Qp+UP7/uDBg9GNFrJcoylpZHx8PHpkSSBpyrFHOxBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgOgKMAFLCvsgyCgijf5SwI1glAggg0CeB0FFAFIipEaDaFpDZp93DZhFAAAEEEKiVwMrKSjTqh0ayCC26tlDig0b+oJhoFBCNBoIlRwMCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1FWABJCS9tz09LSZmZnxXvuZM2fMxMSE9/zMiAACCCBQXQHdmVvBhb5FgZnqNygIIIAAAggggIAV0I0FNOpHyOdKu6yex8bGjD5nkmDaqXLvb12rTU1NGRmHFo2mIlc9UxBAAAEEEEAAAQQQkIBGi9Oj7KJr++7r0CzJzVnqqZuY6WFLr9qs7WlUv86iJPks1/Kd6/D5u7vN2qa23YvSrzZ3H2O9bLOO7c7Pr706xrrbrP3bq/dVd5u1bcylUF7pfl/36jhTi7rf1706zrrb3MtjrLvNbTh/d7+ve9XmuHOZjrt+bb+Xx1mVzHv1vu5ucxvPZf1sc6/eV/08f3cfYzqf9PJ9Tf/Rv/O39nXTz2VxfWav2tz9vm7juUzHWJ1LL8+F3efiXvU/3e+Rfra5V++R7jbrGG3jeaFfbe7lMdava4y4Y6ywc+EapRSBf/u3f1tb33Fr6zsq9bHewZdSB1aKAAIIINA/AZ3bffoA9RXqMygIIIAAAggggIAVWFxcXPO9lui+3tByWp6SLKDrr/WbMHhdr3Ub6//ro6pwDZdMzKsIIIAAAggggEBrBNZv7pL5ujLuWtM1bf1Hyk2mrnmLnq42dpZetVnt6C5yKLp9cevrbrM+Z8XNV8a0frW5+xjrZZu7P8f26hjrbrPsy9incevsbrO2jXn67+pxlr7Tut/XvTrOVL/u4lvnvPN1t7mXx1h3m9tw/u5+X/eqzXHnMvn3a/u9PM6qZJ73/eq7fHeb23gu62ebe/W+6uf5u/sY0/lE03yP0bzzaXudpY3mvWqzttNd8u4/3+W7j7Neva/72ebu93Wv2qx90l16dYx1t7m7HnX7fy/Phd3vkV7ts+73SD/b3Kv3SHebdVz6nsvyztf9HulVm+POC3nb4rt8d5t7eYx1n3P69b7qrkee/w+uw1NKEFDWzvHjx73WvH5Qe83HTAgggAAC9RHwPberr1CfQUEAAQQQQAABBHSHi/HxcTM6Omp0V5PQouuP5eVls/5lReiirZtf118ayWP9SyWznjQT3H6NzjIyMmLOnTsXvCwLIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACWQVIAMkq57Hc+h1BU4N6FWiyftdRj7UxCwIIIIBAnQR0bk8LJlTgofoKCgIIIIAAAgggoISCvXv3ZkooUMKHEj+mp6dTP4MivVFAdlevXjW+ybudSytJRwk7emRJ2OlcF38jgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAI+AiSA+ChlnEeBvWmjgGQJMslYHRZDAAEEEOixQNo5ntE/erxD2BwCCCCAAAIVFFhZWYlG/JiamjIaASSk6DPn7OxsNIrF8PBwyKLM2yWg5BklgmQZPUWjgGg0EK2DggACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIlClAAkiZuuvrThoFhNE/SsZn9QgggECfBZJGAWH0jz7vHDaPAAIIIIBAnwWU7KGEASUOXLx4Mbg2Y2NjUcICo4kF0zkX0Gf0xcVFc+bMmeCRVLQ/Z2ZmMu9PZ6V4AQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQ6BDY0vE3f5YgYEcBUSBId0m7M3z3/PwfAQQQQKB+AjrXHz16dFPFGf1jEwkTEEAAAQQQaI2ARozQiB+rq6vBbVaSghIUsoxUEbyxli6gJF4l2Ohz/NzcXJCCHdFFiTm6DtR3AhQEEEAAAQQQQACBdgps2fm40SO03P3zv5svr38UutiG+Q/syfbz3+rNu+ba+iNrydpmbe/OlQ+zbjZabuixB8yOrQPB68jb5sFt282DT+wL3q4W6Febb95ZM5c//UumOmuhPG3+8uMr5u7tW5m3vXvHoNmz/ggtedus7W3d91ToZqP587ZZK9GxrWM8S9G+Vvuzlqzva85l2cT7df7O877u17msn+fvvO/rrH1WEecyHZkP7nrSDH7jb4IP0rzv636ey/rVZiFnfV/36/ytOud9X2dtc973ddY+q4g2Z31f521znvN33nOZ3PK8r5dWv9IqMpes7+uvbnxi9Mha8rQ57/s6a5vznr9llfV9nbfNWd/X/Wxz3vd11jZrP+U9f/frXKa617m08bzQzzZnfY8UcV7I+r1A3r4n63chel/l7W/71eY81zj9OhcW9Xkt5HyY7RvgkC0wbzQKyKlTp4zuCGoLo39YCZ4RQACBZgsogFDBg50BngoE5G7dzd7vtA4BBBBAAIE4AV0PKPFDCSBZihIKdA1BUkEWvbBlZDw7O2sOHz4c7TMldoQUJY7Mz89HyTpKJqEggAACCCCAAAIItE9g+3e/b3YcOhbc8DtXls2nr/8weLnOBf6/R7Z3/tf771eX7phXL97xnr97xqxt1npW/9/f7V5d0P9f/399wzydIfElb5uV/PHYiV8G1dXO3K82f7AebPe9s9mTMPK0+dPX/yFXUNKLww+aVw5stYTez3nbrA1l3c9526xtKxAr6/v6/7G+r/MEfGR9X3Mu054LL1n3cxvPZf1sc973ddY+q4hzmY7KR56fXE9qGwk+QPO+r/t5LutXm4Wc9X3dr/O36pz3GiVrm/O+r7P2WUW0Oev7Om+b+3ldJrc87+sHZ25qFZlL1vf1zfdOm5vvns683Txtzvu+ztrmvOdvYWV9X+dtc9b3dT/bnPd9nbXN2k95z9/9Opep7nUubTwv9LPNWd8jRZwXsn4vkLfvyfpdiN5XefvbfrU5zzVOv86FRX1eCzkfht8iJWTtzBsJKHBEd3rvLIz+0anB3wgggECzBbrP+Yz+0ez9TesQQAABBBCIE1BCwMjISKbkD432sby8bKanp0n+iMMtcZq1zzKah24CMT4+bkZHRzckA5dYXVaNAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAg0XIAGkRzu48y6tjP7RI3Q2gwACCFREQKOA6NyvoqRA9QkUBBBAAAEEEGiHgEaOUOKHRv7oHBXSp/W6bjhz5oxZXFw0w8PDPoswT0kCSr5REo4SQkLLxYsXo2NA66AggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAJ5BEgAyaMXsKwCd+woIN13gg9YDbMigAACCNRUwJ77Gf2jpjuQaiOAAAIIIBAooGQPJX0o+UNJIKFFCaRXr141eqZUQ0AJvUrGWVhYiJJ6Q2ql42FmZiY6HpQQQkEAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBLIIbMmyEMtkE9Ad38+fP08ATzY+lkIAAQRqLaDgzVOnTjH6R633IpVHAAEE7gmsrq4aPRTUr6Dua9euRf/Xq3ZaiJVGdlDCuMqBAweiZ40yoGmM+hBx1O6fc+fORckfOk5Ci5IMNOpHlpEmQrfF/NkExsbGov2jhI65ubmglegcMTo6Gl0TKkHYvveDVsLMfRXQeV/7Ue9vPcroA2y/wHmgr7uajSOAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKVFCABJMdu6f7R//PPP4+CALRK+1rc6gcGBjZNtj/u6wX9/fDDD0fPNuiLoJBNZExAAAEE+ipgz/P2ubMPUMVcd3b+5je/uanecX2Agj/16Hxt04JMQAABBBAoXUBBvjqnK8DX/l30RrVeW2z/ocByW9QXqE8YGhqKgs7pG6xM9Z4VDK5RP5QAkqUoIWB6ejrLoizTYwF9Rp+dnTWHDx+O9nnn+9inKkocmZ+fj9bBKC8+Yv2ZR/tVj8uXL0fP+lvX/0UWrdMW2wfY/+vZfiagD+hU4W8EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgvQIkgHjue/0gr0dZP/qn/eBvE0EU7KUf/fWsBwUBBBBAoHwB+oDyjdkCAgggUBUBnfMVvL+0tBQlflShXrYfUr1sYog+C+jO8BoxRM8kjPd/TymgX/snS3C49qFG/VCgN6VeAtp3y8vLUeKORnsL2f+a9+jRo+bs2bPs/4rsdp1vlYRh+4CQ/VlWE+xII919gPoB9QEakYY+oCx91osAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAghUT4AEEMc+6Qz80t/9/tFf21cQgh626Ad+BZvYoC8SQqwMzwgggEA+gaoFfsX1AWphZx+gvykIIIAAAuECOscqqPb8+fPRtXa/r/t9W6C+Sg8lHaioH9BIBAoEJokgIunZP9oPCuLXc2jRZzqNIsEIEKFy1ZtfI7doP+pY6Pzc7lNTzb93717DCDA+WsXOY6+z1QeoL6hbH6BRZHTM6fsg2wfw3VCxxwhrQwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBKomQALIX/eIfuSvW+CXrbPqraLgIQV8cQfIiIN/EEAAAW8BnU8VeFe3wC/VWQ/dbVx9QGfwr/5PQQABBBCIF7DX0fa8Hz9XvabaPmFqaipKADly5EgUjE4ySHn7UceR+mCbhBO6JSULKPmDPjtUrrrz6/22uLgYfbeg96JGbggpOp7saCC6rqOUJ6DEiSb1AUpA00PHkI5DfTekfoBkkPKOIdaMAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII9EtgsF8brsJ2FbCjH/3Hx8fNN7/5zeiuiUqm0PQ6Ftse3f1R7VG71L66tqeO+4A6I4BAfQTsOdP2AXU/Z6o96sPoA+pzDFJTBBDovYCSJOx5Us86bzaxKOhcQcAaUWBkZITPBCXsZB078s2S/GGTBM6cOUPyRwn7pgqrVPD98vKymZycDK6O3r+jo6PRuYrP8sF8iQu0qQ/QuUnnf3ue4lhKPDR4EQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEKiVQCtHAFGwju6q2dSAL3sEqn16KLhNd5fV3R+5i6jV4RkBBNoqoMAv2wc0ORCqsw+wdwDWMwUBBBBom4DO9UqKPnXqVPDd+JOshnYNmB3fMObAtwei2Z7ed2/uPTsHzO6dSUtufO3mbWMuf7wWTbx2wxg9Vj9bi56XrtybvnGJsP/pjvD6PKDRCNQPHD9+vNF3hNf+VnsXFhbCoDznVnC+1q/ridCikT7kPz09Hboo89dQQPtbI7zoc7iOGb0XQ4rOW7qe0zr0eb6MouNYjyYfkzonyFFJcXr/FlVcfYD6Bb3mW5L6APUNej1PUZt1/teD74XySLIsAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUB2B1iSA6Ed/3QFRQb9F/ui/ZefjRo8Hdz1pBrdtNw8+oee/ifbwvb+3p+7tu7dvmS8//iia76sbnxg9omnXP4r+1v/zFgWP6KG7zSroSD/8KyCFggACCLRBoKzg3+4+wP5fpln6gLu3/z3qD4ruAxT0pof6AAUhqg/Q3xQEEECgyQK65lfAr85/6gfylAP7BtYDeu8F9Q49ERbcm7bdHdvWk0jW17+xfP3/KCHkxpr54IoxK9f1nC0g2PaF+kygpPCTJ082MjlcQc7a53oUnfioIHklEmU5nmSuET/ofzce6W343/DwcDQaiL6P0Dkp5PjRvEoe0fcYRR8/dt16buK1ofoAne+yvmc7j00ldBxYT/LTsxL8Np+zO+cO+zutD7AJIrYPuLzeD6hfyFLkoYfOR/YzQZb1sAwCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQH8FGp8AYgO/9CN33qIkj637noqSPRTku3XfSN5VRssrceTrdcWv886V5SgZ5Mv1pJA7Vz40es5S5KGgKAWeKCBKgV8EIWWRZBkEEKiDgO0DFASq4LY8RX2Akjqi5137Os7bedZqouTBr/sAY7YNP71phZ19gO0HNs3kMcF6qA9QoB99gAcasyCAQO0E7Lkuz/W/gnwPDw8YjepRZKBvFkwFG+/eeS/42JiBaBUKAF5aTwjRCCFZEkLsXf/1OUB9gfqEJhS1y+53Bc0ryLmIpHetV5+hQkdwkKm2r8D9opNRmrC/2taGycnJ6DiwSUoh7dcxuHfv3uj9WtRoHUpI0flSRXUqa9ScaAM9/KeoPkAJH8+t9wNDT6yP9rSeqNevYhNEVB/bBygBROf/pT+smXcuhycF2j5Anwma1Af0ax+xXQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKDXAo1NANEP2rrTo4J+sxYF+SoQV0kfncG5WdeXZ7l7219PDtl/by121JDbKx9kSghRILSCo/QgCDjPnmFZBBCookARgV826U/nX/UDStbrV+nuA1QPJYUoIVD9QJakQNsHKDhWgV96piCAAAJ1Fsh77legr5I+nhvqb7Cvzz5QgopGJPnRMwPR7AoEfmdlzZxff4TcGV5mSpRQELCSFOreFyiI3RZ93lG7Zmdn7aTgZ7sOBcpnKQr4Vx9bRBJKlu2zTPUElHSlRAt9T6HjVe/BkKJj2o4Gkuf9as+Xdtuqj75DybNOu65+Pds26Ro3S7F9gJL+lHhX5aL6vbh/IHqonkoKfOtSvj6ARJAq73HqhgACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCGwUaFwCiH70VxCTghdCy72ROJ6Kkj62DT3d12DftLrbUUNsYooSQm5f/iAKBFYwcEixQcAkgoSoMS8CCFRRIG/gl5L+7iX+jRiN9FTlci8xZcTsOHTM2D7gzh/WE0LW+wL937eov9RDAX8Kkh0eHvZdlPkQQACBSgjkCdK3Ab91SPpIwlbAsh6vP/91ILCCgW/eTlrq69fUf46OjkZ9QV2TApWk0T1Ch6YdPnw4atfXrfX7SwHx+lyp4yu0qC9Vn3qQ5MpQutbMrxFhdHwooSM0wci+X7UOJW5lSTDSsd1dNG15eTnT+rrX1cv/6z0qQ90AJPT9qvPmi9+tR+JfkqmSAl/Xo6MPCEkItN+jyZBzV5I0ryGAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAALVEBisRjXy18L+YL13794okDVkjQr2fXTiZfO/zP4f5lsv/dxs3//9Sid/xLVNCSGqt+qvdqg9aldIUSKI/LIGOoVsi3kRQACBIgUU7KW7KOscFnrX37g+oOrJH912tg/o7MtC+wAlgYyMjER9gPpUCgIIIFAHAQX96tyvZ9+iO6e//vyg+R///ID5zUuD0R3Ud2zzXbr680WBwOvt+5+zD5jTE4NRYohvrdUXKBGkbp8H1G8pkD6udI4KEvd69zStSwbj4+PBweQKxFcCjYLoSf7oluX/3QI6XhRsr+MlSwKukpRCz3+qg66V9V7vLjr2Q86l3cv34/9qiwz0/vdN/lAf8JNn7/UBvz3R3D7A9nFKdPQtSqLT+U8PHQ8UBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIFqCtQ+AUQ/8k9PT0dBqyFBvwrufeT54xuSPqq5i8JrZQOBlQzyxD//JmpnSDCzDaKQKwUBBBCouoAC1UKD33RO3PHssegcWdfEv6T9ouQPmxCovi5rH+AbSJdUF15DAAEEyhCwSWsK7vc9V+lO70r4UFDsj54ZMAoCbnp5cf+AUYCz2qy/fRNd7OeBugSDJx0HCmj2/VxjP1fGBcenHStK+FAgv++20tbH6+0RUPKHjh0lgygpJKTo/KfjXwH73SPgxK3Hzh/3mqYpkaIOgf82USEkWU19gJLidD585VA7+gAlf9h+T0kvvn2AzoH6fMX5zPVOYToCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDQX4FaJ4DYwK+Quz0qKPaxE7+Kgn4feuaF2o30EXq4KOhX7VQiiIKBt+57ymsVCgyRq370zxIA5bURZkIAAQRyCNg+ICnos3v1Ogfa5Lgdh44FJUZ0r6sO/1dCoO0D1PeFjAqiPkAjguju0hQEEECgKgI2eNk32Fn1VuLD718ejBIhQu6EXpU2F1EPJbso8Pmjnz0Q3fneJwjYWqsv8AksL6KeWdah64G0vurUqVOJQe1aR+goArauCthfWFgwi4uLZs+ePXYyzwgEC0xOTpqrV6+asbGx4GXtdbEC9vXedRWf62YlVVS5qI06L6nNPkWJH0qEs6N9+CzTtHnUByjpRX2ARsDyTYDkO6GmHQm0BwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEmiJQywQQBTSMj49Hd7n0vTvl9v3f70iCGGnK/gtqx73kl1+av335rJGHT5GvAux8AkV81sc8CCCAQF4BG5AaEvyrc57OfY+d+GVQEkTeulZp+a37Ru4nv4T0Aepv9UgKJqxSO6kLAgg0V8AGOPuOSqHED93pXYkPQ7sGmgsT0DIlftggYN+7wSv5Q8HWVbwTvPomn2B1e+3QTWWX1zWF7+fKznXkCdjvXA9/I2AF8iYU2QTeuOQITfMZNdV3PlvnXj3bc5Ha6FM6Ez/0N8VEI4BoBCzbN/okgvCdEEcOAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUD2B2iWA6O6uujtr2l1eLbVN/Hh04uXG3+ndtjnt+cFdTxp5aFQQ3yBgBdqF3GUzrQ68jgACCGQRCA3+7ewDdO6jmKgvDO0DQvtenBFAAIGiBZSM7Bukr0BfjfihxA+f4Nai61qH9WVJBLGB5VkSJcoy0WcU3/qoL+v8DKlAeH2u9AmI767/8PCwWV5eNrOzs0YB+xQEihY4ePBgdIydPHkyeNU2YL87gdcnWcpurGo3gLCjfviMRkTih92Lyc/3RsfyHxVK59uQBPzkrfMqAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEAegdokgNi7s3YHMbgar9EulOBA4odLaGMQ8NZ9T7ln/OsrNpCkasEgqRVnBgQQaIRASPCvzmka8YM+wL3rt+x8PCgZUP0wo4G4PXkFAQTKEdD1p5KQFXiaVpTs8dsTg9GDET/StO69bhNBfv/yA+a54fQ75Ns78HcmUvhtqfi5dGz4jgRgt65rCbVBQcwKhlffFlKU7KGkDyV/KAmEgkCZAjrelPig400JIaFF71MlOen8qfXoPeNb9N4IfX/5rjtkPtVD71efuqgPUOKf+gFG/PBTtn3ARz97wCghJK3YPsCnT05bF68jgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCGQXqEUCiP2R2efurLrD+2MnfmW+9dLPGfHD87hQEPBjJ34ZuenvtKIf+7nzY5oSryOAQFECIcG/985nv4rOaYz44bcHbCKI+k6fZEAFEyoYW30zBQEEEChTwPd8owDWnzw7aP7HPz9A0G/GHaLA6d+8dC9wOm3UFAVkKyFQyRT9LCGjGdh62msKjSgWWsbGxqJA/MnJydBFmR+BXAJKNlpcXMw04ozer3qv+iRQdFdSn/v7eb2n96kSWHzer+oDlMjmk8TQ3U7+b4z6USXPaPQsnwRKHVO+N2fBFwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgeIHKJ4Ao6UOBpml3qxzctt088vzx6I7vW/eNFC/VgjXKTaOmyFGeSUWBIEoC8UnKSVoPryGAAAJJAr7Bvzpn7Xj2WHQOow9IEnW/JjclA2rUlLQ+wAbQcvdftyevIIBAPgHdrd4nuFR3eVfQ7yuH0u9cnq9G7VhankqkUTB1WlEfoM9pCjDvddFnEJ+g8CLqtWfPHrOwsBA99DcFgX4JKPno6tWrRslIvSpZEq2KqJve4/q+Ie38omQFJS2oD1ASAyWfgPV8/fnBVE99TuPGIPm8WRoBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDIKpAe2ZN1zQUsp2ADn4CDbcNPmyd+9t/MQ8+8UMBWWYUc//bls6l3glcwhvZPv+/+yx5DAIFmCvjeWVajVuictePQsWZC9LhV2/d/P+pT1bemFe0j9QNpwXlp6+F1BBBAwArofKLEj7S71SvQVwGqvz0xaNJGrLDr5tlfQMHUPneCV1K47tDfy1ECdIz06vOHAu6Xl5d7GnDvv5eYs40CO3bsiJKRNCJILxKS9N7udcKv7/dA90b98Buxoo3HSp42/+iZe8mVSgpMKjo+lATSq4S8pLrwGgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIINAmgUomgCioR3eTTRtdQnco/9ZLP48eaXcrb9NOLaKtW3Y+Ht0J3mc0EAWE+Nyds4h6sQ4EEGi+gA3+TQs203lf5yiNWqFzFqU4gZD+1fcOzcXVjjUhgEBTBXT+1zWl7iqeVO7dofwBowBVSnkC9k7wac6++62omio5SNssswwPD0eJH7Ozs0YB9xQEqiZw8ODB6Bg9efJk6VXrxXtOjbDnkrTvgey5iZGfyt31Sq5UkmXaiFC++63c2rJ2BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoF0ClUsA0R0Elfyh56SiO75r1A+fO5QnrYfXkgV8RwPRHR8VsLe6upq8Ql5FAAEEEgRsAFFa8O+Du56MRv1g5KcEzAJesiNspfW16rN7fQf4AprHKhBAoEICvp8B7t2VnFE/ernrNNLKb14aNBp1xVXUf2vklrTAbdfyvtN1nKQliPquK24+JXso6UOjfigJhIJAlQV0vE5PT5urV68aJYSUVfT+9hmZNc/27WeAtJEkXtw/YH77Y0b9yGMduqwdESpttC0dIzoeKQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC5QtUKgFEAT0+SQT2ju+M+lH+AaIt2NFAdjx7LHGDvoF7iSvhRQQQaK2AziE+SQQ2MY1RP3pzqNjRQNT3JhXfwL2kdfAaAgi0U8DnM4CSD5SEoGQESu8FnhseML9/+QGjO+8nFQUAl5mgUWYQ+tjYWJT4MTk5mdREXkOgcgJ79uwxi4uL5syZM6WNWKPk7LTkjKwwvt8jnJ4YNHokJaNlrQPLJQvYkbcO7EvuAzRaTJnn6eRa8ioCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDQHoHKRFDpbrFK/lAAqasoCPVvXz5ruOO7S6jc6TsOHTOPnfiVSUq8sQHACuKgIIAAAr4CNvg3rQ/41ks/N2mJCL7bZL4wAZ/EG9sHlH0H+LCaMzcCCFRZwOf8r8BT3fFdSQiU/gno7u+/f3nQ6A78SWVqaqqUAGAllpTxGcMGzy8sLBj9TUGgrgITExPRaCB6LqOUEdhv+4CkkUSV8OFz7imjzazzawHth9+eGDQaiSup6HNAGcdK0jZ5DQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEGibQCUSQOwPxEmBvw/uetI88bP/ZvRM6Z/A1n0jURJO0n6wAcC6SygFAQQQSBOwgV9pfcBjP/6V2fb/Z+9uYuO47rzfHzK6gkbQYzGWn4E9sCAReKwsSRrZ2AtRjIK7yNiWOHkg7ywSWjwIMmOR8eIGEyciY83FbBxSnicw7kIQ5Z2FZx5SlserOJR8gWgzMMll5AtIujLGQWD5kT2CRvB1qMtfKSW1muecOlVd1V0v3wO0mqqul3M+VV2nuvv/rzO8P2l1vF6ggM79SsT09QHavIK+SAIpcEewagRqIhBy/tfdxpX8kTTyRE1IKtEM3YE/aSSW+PNdXg3SNYLuLF9UGR4eLmrVrBeBrgoMDAyYPXv2FLJNJWnMzMzktu6QPkDn/k/+IXn0odwqxYoSBXT+Vz/gK3n3Ab5t8RoCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDQRAH/r7ZdEAn5YXjHcz+IAk59I090oaps4s8CW3Y9ZZICsRWkNT4+brR/KQgggIBLICTwa9u+Z6NzTlLSgWsbTM9XIB6NS32zr5AE4tPhNQQQCDn/a6QJ3W1cdx2nlEtAd4BXALBv34R8zgttlfoUX6Jo6Hps8ymovcjkEts2mYZAUQI6nk+dOlXU6qP3im+0jtANh/QBGvVJCYC+80zo9pgvXwH1zxqVxbdv8uwD8q09a0MAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSqL9DTBJCQH4QHXjxmnph4vfrSNWuBAoD/8kf/aEICgBkJpGY7n+YgkJNASOCXzjFPvvbfDQmAOaHnuBr1zY8fOe5dI0kgXh5eRKCxAiHnfwWXJt1hvLGAJWl4lKCTEJytz3vz8/Md1fjixYum6M8TqqOOSwoCVRcoMlkqttE2OimhfcA//8ifYNBJHVi2cwGNzpKUoBPynV/nNWENCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQPMEepYAEvJDsIJLB1441ry9UqEWhwYAE1BVoZ1KVRHogkBI4BcJgF3YER1u4rGDLycmaZIE0iEyiyNQM4GQ878SP0j+qMaODwkAnp6eNvrsl7V0Gmweul3Vk4JAlQWUKKWEqaJLJ0lZGsknKUmFBMCi92B+64/7AD27is7/nF9dOkxHAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEsgn0JAFEgV9JPwArsSBpdIlsTWapvAWSAoAV5DE2Nma03ykIIIBASPAvCYDVOU7UV2t/+YoC/boRkOirA68hgEDvBUICf5X4oeBfSnUE4gDgge3uOmdNBpyZmTHXrl1zrzjHV9RPdTpaSY7VYVUIpBKIz6+pFupgZn2fo22mKZo/6XsBkj/SiJZj3rgP8CWB6NzaSSJgOVpKLRBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHyCHQ9ASQ08Jfkj/IcJCE1SQoAjoM9uhXAFVJn5kEAge4LxOcCX8AYCYDd3y+dbjGpD9D6x8fHSQTsFJrlEaiwQHz+9yUEk/xR3R0cBwD7kkAUMO7b/+2t1+eGU6dOtU8u9P+zs7PGd41S6MZZOQIdCHT72NX7U9tMU5QI5jsHkPyRRrNc8+rc/5uf9BtfEoj2v0apoSCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIdC7Q1QQQBdMoANQXVEPgb+c7tVdrSAoADtn/vao720UAgeIFdA7QXX/pA4q37sUWQvqApP3fi3qzTQQQ6I5AUvD/m0cY+aM7e6K4rSQlgYRcB7TWTsHCvmuG1nnz+lvb03YpCFRJoFej12hUB19CR6uh+gBf8D/JH61a1fxbSSD//KN+40sETEoCqmbLqTUCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDQfYGuJoAo8NM3AgTJH90/APLeYlIAsAJElAREQQCB5gkkBfzQB1T/mEjqA9IG/1ZfhBYggIAEFCS8sLDgxFDg76sH+5yv80J1BEKTQJJapEBxBbX3ovRy271oL9usvoCSK3pVQrat87/6AVch+cMlU73pe3bdHwnElQSizwJJN4WpXqupMQIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIINB9gS3d2iSBv92S7v12FAC8fuffzRfnTlkro2AuBYrMzc1ZX2ciAgjUT2BmZsZ719+BF48ZnTso1ReI9+PnCyetjVEioPqAM2fOWF9nIgII1Esgvu5ztYrAX5dMdacrCUQjuhxbWLc2Qv2APhu6+gEFCIcElVtXntNE1W9lZcUMDAzktEZWg0AxArrG1nuqV0XneCV3TE1NWasQX/dZX9yYOLqvz5ye6Op9SVxVYXpOAnEi4Pd/tW5u3dm8Ut0URufYxcXFzS8yJXcB3YgnpOj7ueHh4ZBZmafGAkrYO3v2bGILl5eXE+dhBgTKJJB0PRLXlXNhLMEzAggggAACCCCAAAIIIIAAAggggAACCCCAQBUEupIAoh+Q9HAVAn9dMtWd/tjBl83XNz4xty9/YG2EgkSGhobMxMSE9XUmIoBAfQR0J+3Z2Vlng5QwMPDCMefrvFA9gaREQF0TqA9wBQtWr8XUGAEEbALxnb5tr2magkQJ/HXpVHu6EnuMcSeBqB8YHR21fhbQNYNv1MhuyGj7+ryi4HoKAmUV0HF66tSpnldP71l9rm9PmFIfoEB/PduK+oD/8SOSP2w2VZ+WlAioz4c6v3KOLX5Ph46m5XqfFl9DtlAmAfUrocdMmepNXRBIEtA5LuTY5lyYJMnrCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAmUSKPzXdt1hST/6uwqBvy6Z6k9/YuJ1s314v7MhurOvjg8KAgjUV0ABBL4+YNu+Z43OFZT6CSgRMB4NxNY6+gCbCtMQqJfA+Pi4cQXR7NllzG9+UvhHkXqBVqw1SgK5nwhir7j6AV0ntBZ9NlDiRRlKGRJRyuBAHcoroPeQ6xzbzVqrDqpLe/Fd6w1sN+afN5I/9Eypp4DO/7940d3P6xzL90H13Pe9aJWOJQV3+x4cb73YM2wTAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgKIFCRwBRIIACv1xl6+5nCPx14dRk+hNHXzd/uPnjaDSQ9ibp+FBg+MrKSvtL/B8BBGoi4Av+VR/wlz/6x5q0lGbYBJTc883NP5i7Vz62vRxdI6gPaL9jtHVmJiKAQKUEdFdvBeHZCoG/NpV6TtMIL2ufrpu1G/c2NTD+rNj6WcAWRL5pwRwm7N271+ihMjw8bHbu3Bn1RfpbpfX1aAL/IFBCgcXFxahW8blW76k4wPn69esPEqzi14tsgkb1OXr0qDlw4EC0GY3woGmuopE/lAhIqbfAz1/oM6s3+sx7q5v7ALVcnxX5LFDvY6BbrdP1Q9K5Tuen5eXlblWJ7SCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQqEChCSD6Aa79rq5xa/q37zBP/uTX8X95rqmA9rMCvP/t5FGzfuf2plYqQEXHydzc3KbXmIAAAtUWUPBvHITW3hKdG5QgpmdKvQXiPuCbm59taqiuEZQIGAcwbpqBCQggUEkBnft1Z29XefNIvxna3ed6mek1E9BIL8/87E/m1p3NDdOxousFPTTyR1Lw5uY1PJyiZMLWBI49e/ZEL2qaXmt9/eFS/IVA9QXipAu15PDhw84GtSaI6Bos/q7m0qVL0TKtrztX4nlBn+sVzK/1+kYA1KgQo/voAzyUtXrp9NF+8/2b9kRAHSu6XuD7oFrtchqDAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAJdECgsASTpjo9K/iDwtwt7uASb2LLrqSjQ+49v/9RaGwV7HTp06MHdQq0zMREBBColoABOX/Cvkj80Agil/gKtiYC21up6QQ9fwKJtOaYhgEB5BXyBv68812f0oDRHQCO+6G7/339z3dpoXS8ogN113aDXVJTAMTQ0FP3dOkJH/Hr0Av8ggIBTQO+h0PeLAvP1UIkTs7788ssHyd2tr0czbfwTJ3QpoUTJJLaixA+NCkFpjoD6gNNH+8x3T96zNprvg6wsTEQAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQS8AoUkgOjHfl/g1+NHjhP4690t9Xtx+/B+M/DiMXPrwmlr43S86G6hCkqhIIBAtQWS+oDHDr5sdE6gNEdAyT7q+784d8raaPUBCkikD7DyMBGBSgloJAcFAduKRv3Q6B+U5gko6Ft3/f/lBXsSyNjYWHQH+NYRPJTkQUEAgd4IpEmyihNE9Bng1KlTDxJG2mseJ4O1T+f/9ReI+//Xztn7AL4Pqv8xQAsRQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCBfgUIisPTjrX78t5Vt+541Cv6lNE9g4IVjzsQf3UHUddff5knRYgSqLaC7uMZ3DG5vSZwI0D6d/9dfwJf4k5Q0VH8dWohAPQR07lfwr6voDuAKAKY0U0B3/VcQsKuoL1AyoB4kf7iUmI5A+QTi962eXQmAqrVGAqIPKN/+61aNXj3YZ5QMaCt8H2RTYRoCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDgFsg9AUR3f1xaWrJusX/7DvOXP/pH62tMbIaA9r+OA1tR0Hh891Db60xDAIHyCyjoy5XMpff+E0dfL38jqGFhAtr/rj5A1w6u64fCKsSKEUAgVwFfErhGf/AF/+daEVZWWgElAbmKkodcCaSuZZiOAALlEZiennbeCMQX/F+eFlCTogV8SUD6PsiXQFR03Vg/AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCWB3BNAFPjlKr7AT9cyTK+XwJZdTxmNBOIqChqhIIBAdQV872HfKEDVbTE1TyOQlATkCxxMsx3mRQCB7gsogcuVyKvED43+QEFAx4KSgWxFI4D4riNsyzANAQTKIaDz/8LCgrUye3aZjT7A/r63LsDE2gpoBJg3j7iPBfqA2u56GoYAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAjkLuH95zbChmZkZ511btw/vN3pQEHjs4Mtm275nrRC646Pu/EhBAIHqCSjoyxX8q/e83vsUBHzXA7rzO30AxwgC1RTwBW2+eYTkj2ru1WJqrWQg12gwvkSiYmrDWhFAIA8B1wiAWvevNgL+FfhPQUACrzzXZ0b32a8LfIlE6CGAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIPBTILQFEQZunTp16uOaWv5Lu+N0yK382ROCJiZ8ZHRe2ouAR3QGYggAC1RFIumv340eOV6cx1LRwAd+IYOoDdE1BQQCB6ggoccv1vn31oDvQszotpKZ5C/iSgnyB5HnXg/UhgEDnAr7ErZeG+4weFARaBU5PuL+KpA9oleJvBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAwC6wxT45/VRf0P7AC8ecwf7pt8QSdRDYsuupaDSAWxdOb2qOAskVSKgRZSgIIFANAb1nXYlbAy8eM1t3P1ONhlDLrggoAfDxI1Pm84WT1u3pmuLMmTPW15iIAALlEtC53xWsqTu+//wFd5BnuVpCbbopoLu/6y7w71y+t2mzugO8AsoPHz686TUmIIBA+QR8I0Bp9A8KAu0Ce3YZ84sX+80vL6y3vxQllGpkyYmJiU2vMaF3Aq6RPuMaDQwMmOHh4fi/D57Vn58/f95oeVuysJbRY3R0NGif67pTIwe3F9d3Ea3zaR5XO/bu3Wv0SFPi9al9qpOtXlrfgQMHHrSxk2sbrT+pnbLUvmgvavfZs2ej9rfuB7X56tWr7bMX/n+1RfXRsx62dqkteujYkJutXXlUVB7yuXTpUnSM6m9fiY1VL+1bPUKKq53ty8brb5+e9P+kemv5LOtWveN95dqG1qtHN/ZVfE6xeeoY0f44dOhQocdM0r7gdQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEuiWQSwKIfizRD7S2oqDfxw6+bHuJaQ0XUGLQ7d99YL65+dkmCY0mox/80/4Au2lFTEAAgcIF9GO9awSoKNnre/QBhe+ECm5gx3M/iPqAu1c+3lR7XVOcOHGCPmCTDBMQKJ+ALwFQyR9KAqEgYBNQ8O97a38yt+5sflUB5Qr2oyCAQLkFdM2m74NsRe9xBfpTELAJ/N33+sxbHxpvH1BUwLetPkzzC4yNjXlnUND18vLyg3kUpK2+3HV+iGdUELceOpdofn0GnJqail/e9Kx5k+qyaaE/T/Atq+2G3oRGbVLys9poS1xo376C5vXQNbO+4zx69GjwtlrXJR+tx1e0D7Qv4qL5VVfXckn7J15PXs/az6pPyHa1v/SIjw1dF+b5HYHWGyfFpGmf6qQS2+o8dfz48ei49Z2ztC0dA0llcXEx9TWwjsXx8fGkVUfJPr46xivQcS0ffc+XdV/Nzc2ZkG3F2/Q9qw46/tVOX1G9NY8eml/7JfR97VsvryGAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUFaBXG7H6LrrrxqtO3xTEHAJPH7kuPUl/WjjO66sCzERAQR6IqAfsfWetRWN/qHRHigI2AR0fLjK5OSk6yWmI4BASQR07nclACro99WDfSWpKdUoo8D9Y8T+cVSBXgo8oyCAQLkFXJ/ZlfynAH8KAi4BHSNvOkaI0fUFfYBLrtzTte/0OU7B6CGB462t0bIK2tay+ruMRcHkIyMj0fGZpY4y0XlzcHAwSm4oso2qq5JlXMkfRW67fd2qg9qsYyPtcaF1yVrnBNmHJFG0b7/1/0rgkIvqkoeN6hbvU995S4k/IUUjyqQtGr0kqSiBRglISUXJE3LWe7GTfaX93em+Ul21Dq1L9UpT4v2ituhvCgIIIIAAAggggAACCCCAAAIIIIAAAggggAACdRSwR9ykaKl+DHD9wLF9eL/Ztm8kxdqYtWkC94+RZ63N1nGV5ccm68qYiAAChQjoh1RX8O+2fc8ajfJAQcAloGsE1zGiYIw8AjJc22Y6Agh0LqCAHFdAje78TkEgSUAB4q5RYlyB5Unr5HUEEOiOgO/z+qsHGQGqO3uh2lt55bk+5ygxrs+Y1W5x/WuvwHrXd8ShrVegd9luBqDrXQWS69rEde0b2j7Np+86tb5OrVzblF9ZrqOUSKDjIo/vd2Wv9WU9PuRdVFKM6qZ6ufbp8PBwcAKGa7+6pockRxw6dMi1+IPpcQJWGfaVKiVP1amTEif85PG+7aQeLIsAAggggAACCCCAAAIIIIAAAggggAACCCCAQBECHUdm+X5Qco3uUERDWGd1BXx3gPcdX9VtMTVHoD4CvuBf33u7PgK0pFMB33Fy9uzZTlfP8gggUKCA6z2qkR0U1ElBIElAyR8KFLcVBZ+RCGiTYRoC5RBwfVZXH/DzF+gDyrGXyl8LV8Ko+gBXIHX5W9XMGipYW8HWeRQFtJdl/ytwXEkDebWt1ceXMNA6X5q/NfJHWezUPn1nlHdR+7TuNCVOLCo6EUD1cl2/howCovq5lre1V8dlSMKGRgDxlTLtK9VT9cnrOJZR2uPFZ8VrCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiURcAebRNYO98Psrqj95ZdTwWuidmaLKA7wGukAFvRD3RF/zhn2y7TEEAgWUDvTdedWfWeZgSoZEPmMNG1gmsUEP3gHxLMgCMCCHRfwPf+dAVzdr+WbLEKAowCUoW9RB0ReFRAwamuazT6gEet+J9fwDcKiCvJyL9GXu2FgAKs8wrWjutfhv2v7zyKSv6I26nAdH33mUfRfiiDm9qSZwC/zUbHW2hQv/Zj6Ly2baWd5trWxMRE0KrOnz8fNJ9mCjl2lPwxMDDgXGc39lWakTy0b/M+n/DbgnP38wICCCCAAAIIIIAAAggggAACCCCAAAIIIIBAhQU6SgBxBf7Kw3dH7wp7UfWCBFzHi36gKeJucQU1g9Ui0CgB/SDr+hHV9Z5uFBCNDRbwHS9lCWAJbgwzItAQAdfnAEb/aMgBkGMzfaOA+ILMc6wCq0IAgZQCrj5A7+eXhhj9IyVn42d3JQ0pySjNnfAbD9lDANf3Ap1USfs/JLi9k20kLavAeCVVFF20nTwMy/LZWd/j5h3Ab9sH2kbIMaLkg1Df4eFhMzc3Z5aXlx88zpw5Y6amprxJFK3107FrO2727t1rtP6kEtKmeB0hySKHDh2KZ9/03K19pe2EtEv7KU2yyKYGMQEBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgQYJZE4A0Rfyrh9zGP2jQUdQTk31jQJy9uzZnLbCahBAIE8BV+AXo3/kqdyMdWnEMNcoIAoSCA3WaIYWrUSg9wIKxrQFNalmriDO3teaGpRZgFFAyrx3qBsCjwr4grJfPdhvlARCQSCNgG8UENdnzjTrZ97eCGjUgRMnTpjFxcUokF5/HzhwIFVl2oPbFTzfGpgf/x0SVO9aVuuwjcwQmlygBml5tfN//a//Ze7duxc9VlZWokQC38gLMUZeQe9l+NysPiI0EUXHg5Irrl69+sBNf2ta6LGSlNwhE9fvF7F//Kztar8p2UPbjx/av0oKUd1CjjWtz5XscPTo0XhzzmdXAkn7AqHz6b1oK2XbV6pj0v5sbYfeW9pXeg/H7zu9B/Ve1D6jIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQd4HMCSC+gMzHDr5cdzfaV4CA6w7w+kEq9Me6AqrFKhFAwCKgPkDvTVt57OAR22SmIeAVcPUBaQI2vBvgRQQQyE3AlZyroF8FcVIQSCvgGzXA97kz7XaYHwEEOhfwBeQrmYuCQBaBo8/bv570fe7Msh2WKV5AAfIKolcQ9szMjFHwuQLp9XecsBGSFKGaticca7k4KL/1OWR9rmW1Ho3M0FpCEzLitipxQO1srYdeU3B6aNKAvvd0fcfSWresf8deSsSJ90l7u7Ouu3U5JX/Iz1fkJLM4+aa1Hvpbwft6TUkXSUVmGl3CVXQOCSnaVlLSgOqt47qTIvuQEjL6UUjb2o/L1m2XbV9pX4Z+/x+/97TfdGzHRftIbY6TeVrfk/E8PCOAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUBcB+y+sAa1z/eivO79v3f1MwBqYBYFHBTQKiO4CbyuuQEPbvExDAIHiBVzvSb2Htw/vL74CbKF2Ajp2dA1hK65rDtu8TEMAgWIFfElZuvM7BYGsAq7RY3TMhQS4Zd0uyyGAQDoB1/tRCYCM/pHOkrkfCviSh1zH3MOl+assAgreV+C+grNdRcHaCs4OKe0JICHL5DGPgtBDkhiS2qq6KAA9ZD7NGzpyhuYNKdq2rDUqguqghxJxlMSgv5WckmcJDeBXnZKSLVQvJdDokVRc309pubW1taTFo30Ush2tSMe47/hO2ljo8r42xdu4dOlS/Kfz+dChQ9bXyriv0iR/6PiVpa9oP2k+vQ8oCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUUSBTlJZ+gHP9CLfj+R/U0Yk2dUnAdQd43flMP05REECg9wJ6L7qCcBj9o/f7p8o1cB0/OuZC7oBZ5bZTdwSqIuA6/6v+jP5Rlb1Yznru2WXM6L4+a+VCguCsCzIRAQRyFVAf4Ppc/upB+/s31wqwstoK+EYRIxm8Ortdo0uEBFvrDv1Jwdtxq5MSMeL58nwOOeaURBHSVtVL84WMZuG7zk7bPgW/K8FDiRah9Uy7jfb5Q9xUH+3/0BJyTPm+o3L9ftG6/bQJHZ16Hj16tHXz1r9Vb1d/qwX0vgg5XlzWZdxX58+ft1q0T1QCUeg+0L7VMURBAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKCOApkSQFw/Euju3TueIwGkjgdKt9q0fWi/6d++w7q50DuBWRdmIgII5Cbg+5F5x3N/ndt2WFHzBDR6DCNBNW+/0+JqCbgC8V8a7jMK4Kcg0InAK8/bA8hJBu9ElWURyE/AFZw5tLvP6EFBoBMBVx+gIOiQIO5Ots2ynQsoIFvB/aElJAhe6+r2vtf2fIH3qpOCyjWSSZqi+ZOSXkKD+pO2q/r1YuQD33dFcZ3TBuOHHleu/kmJN7LwPUJHpInbkHR8xPO5nl1JGe3z+zx9r8Xr0XZciRIh37F3c1+Fnud1jtHxnaZodJek916a9TEvAggggAACCCCAAAIIIIAAAggggAACCCCAAAJlEciUAOL6kUGBmxQEOhFQ8oeSQGzFFXBom5dpCCBQnIArCVAJgK4EruJqw5rrJuAaScx17VG39tMeBMosoMAc12g8hzYSQCgIdCrw0lCf0V3gbYV+wKbCNAS6K+B6HzICVHf3Q123plGgXMmkfB9U/r2eNiEibRB3twRc57nW7R86dKj1v8F/hwT+X7p0KXh9rhnTjJDgWkfa6aGJM1kC8UOShVyfUXSc6dj0PdLUaXJy0nSaAKLthRz/vmPB91q871zHqfaVko18RfVL4xKvy7XN+HU92/ZVaKJXyPpbtxX/HfLei+flGQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBqghsSVtR/RDm+pHgsYNH0q6O+RHYJPDYwZfN7csfbJoe3w0s5EeyTQszAQEEchHw/ai/43lG/8gFueErUSLRrQunNyno2kPXIPxwv4mGCQh0TcAVEKeAfYJ/u7Ybar0hHUtKAnnn8r1N7VTwr+7gS0EAgd4I+L4LIgmwN/ukjlvVsfTWh5v7AB1/upM/pbwCQ0NDqSrnGpkg1UoKmDkksF51twWxJ1Vnz549SbOY0EB414qyjJDgWlea6SEeWd1C6qHvjPXIkrSQtH7tE7VPN0PRNvIoSmpJ2tdxv2t7r7g+l7XWzfXdSci+kmPIfK3bC/1bhvp+p7VdSRbxul1til93PStxZH5+3vUy0xFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQqKRA6gQQ15DqW3c/Y7bseqqSCFS6XALxsfTNzc82VUyBXySAbGJhAgJdE3DdeVXn/237RrpWDzZUX4H7x9Kz5u6Vjzc1UtcgWX/w37QyJiCAQGoBV0CcAvYpCOQloOBfWwKIAsPag8Xy2ibrQQCBZAHXd0EvbbxnXaM2JK+VORB4VEAJpbYEEAUMqx/g+6BHvcr0v7T7pohA/Tw8QgL8p6en89iUdR2dBt1nHSHBWpkUE69fv544t9rWaft8G9G+y3pcxdeZcf3izz3x/33bzfKaEnVCjiMlemje1hInhrROa/9b35u0Jli0vh6yr7QNPYoq8taoLHFZW1uL/3Q+t87vnMnxQtrzk2M1TEYAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoFQC/Wlr4/ryX3fspiCQl8D24f3WVbmOP+vMTEQAgdwFXD9+u96zuVeAFTZCYMfz9msK+oBG7H4aWVIBBd673oPc+b2kO62i1VIwuUYCsRXXMWibl2kIIJCvgOv9Rx+Qr3PT1za0251Q5DoGm25Wlva7gs1d9csaqO9aX17TQxJA8tqWaz267s5aOgmSz7pNLaeA/l6X0DrId2FhwUxOTpqRkRHT19cXPY+NjZnZ2dnooe++XN9/5dFOvV9Cbm4RJ6K0btM2rfV1/e1LBAp1al9nnv9vf591csyH1Cvt+SlkncyDAAIIIIAAAggggAACCCCAAAIIIIAAAggggECvBVIlgOgHAtcX8gT/9npX1mv7roQi/UDU/iNRvVpOaxAor4Dee64fil3v2fK2hpqVWWD7kD0JUNcgRQZhlNmEuiHQawHfe08B+xQE8hRwjSrjGoEgz22zLgQQ2Czg+y5odB99wGYxpnQi4Eoqog/oRJVlqyTg+t4lqQ1KqmlyoLvrN4vYTd9pKenj29/+dvSsJJCs1vE6O3n2JWnE67UlvtmmxfPHzyHJJfG8vXjO8t3+6OhoR1Vt8nujIzgWRgABBBBAAAEEEEAAAQQQQAABBBBAAAEEECitQKoEENcPDFt3P2O27HqqtI2kYtUT8B1TruOweq2kxghUS8AV/Kvzv96zFATyEujfvsN5TBH4lZcy60EgnYDrTrMkf6RzZO4wgdHv2APKXdciYWtlLgQQyCrg+gzuG60h67bqstylK/fMG+/fM99/c938l7//k/nf/tvDx3dPrpsfvr1u3vrwnrl+sy4tzq8drmsLXyJSfltnTQhUV6Cso6qUQXRmZsYMDg5GI3+UoT6qQ0iShpJaWvtgnQeTkie0XpIdNu/l4eHhzROZggACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAhQW2pKm7K+iS0T/SKDJvqICOq68+fHfT7ApAnJqa2jSdCQggUKyAqw/Ytm+k2A1XeO13r6yYu1c+Nnd/vxK1Qn+rKGkmTpyR37Z9zxolPVAeCmhUmS9unHo44c9/Efy7iYQJCHRFoDXwqHWD3PndRMG9CvQtovx//9e3ilht6dfpGgFEQXAKfCOAq/S7kArWTMCVBOgaqaFmzQ9uzq07xvzTb+9tJHasG/3tKms37pm1G8a8t3rPvHbOGCU8/OKFPqOEGooxurYY2G6shvosEBI0jSMCCCAQC2jUD4320WlRUoXOP7oW1aPTEq/P9TkrXr/64Pi8lzSvlgkZWSRed5Wev/zyy46qy3dJHfGxMAIIIIAAAggggAACCCCAAAIIIIAAAggggEAJBYITQOJgG1sbtg/tt01u1LRr/+35wtq79//6XWHrLvOKFRRtSwAJ+bGrzO2ibghUVcD1YylJgI/u0fU7t81Xv303On/pb1v55uZnRg8lhOg8p+SPxw6+bB773sskgvwZTEkxtqJAC12TcEdLmw7TEChGQHeZdd1pluBfY4pK/ihmb1ZjrQr8VSC0gqTbi65HSABpV+H/CBQr4PocsH9fsdut0tqVzHHsrD/xw9UeLavHL17sNz/fSAShGLN/IwlEJu2lNRC6/TX+j0C3BHQdUuTn0SLX3S0j23Y0QkmRo5TY1q3vkLMkfxw4cCCq6549e6Jn7fP4+nNsbMzWvEzTlKyR9D23Xp+bm4vW77oxS7xxHTsTExPxfzM/y1KPokqWdeeRdFNUe1gvAggggAACCCCAAAIIIIAAAggggAACCCCAAAK9EAhOAHH94K+g1a27n+lF3Uuzza9vfFKautSpIr6gch2P+jGOggAC3RGIg+5tW3MF6tvmrfu025c/MF+cmzeuxA9X+zX/rQuno2SQJ46+bnznP9c66jZd1xa6xrBZqg+I74BZt3bTHgTKKOAKttmzyxg9mlxsCQpN9siz7aMbgeW6Q357YTTAdhH+j0CxAq7vgrRVRoG6b//WhxrJY73jHfHLC+tm9UafOX20PxoBo+MVVngFOrZsCSCua5IKN5Wql0xAAfS64YCvKBif7yQfFVJyhK+/0NyaZ3Fx8dEFC/7f9PR00Ba03/Udw9GjR7u6b7VNjVDiK0rE17lPdUw6B4Z8TxKSfKHj+8yZM75q5fpayPHjuiFBSEWS3tMh62AeBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTKJtAfWiHXDwwE/hrz9ackgIQeR2nncx1fruMx7fqZHwEEwgRcP+THQfpha6n3XJ8vnDR62BIWQluuZf/49k+NEkkoxrj6AAX/UhBAoHsCrusuAn+NuX6ze/uhaVtyHV+u47FpPrQXgW4JuN5zrvdot+pVlu3klfwRt0dJD3kkk8Trq+qzkgBtxfW51DYv0xDIIqBA9KTCcbhZaOfOnZsntk3pdhC+Rs4ISRrQPl9ZWYkSHrqd2BMnnrRRbfqv2qJHUtGIIklFo5oklRC3pHWkeT20TlnrxXs2zd5gXgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGqCAQngLiCLbftG6lKWwur5zc3Pyts3U1f8bbv2I8v1/HYdC/aj0BRAmtra9ZVuwL0rTPXeKISP/JM2sh7fVWld11juAIRq9pO6o1A2QVc111Du/vKXvXC67f2aeGbaOwG9m/c/d1WFPiVNfjLtj6mIYCAX8D1OWD0O/b3qH9t9Xr10pVikjXeuXzPKLGkycV3jUEgb5OPjOLbHpIA4jovFl+78m4hJHGi2+/d0O8NNCpJyKgYRemHJG2cP3/e6OErockkZdxXIe87tT0kCcZm5Po8a5uXaQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIVEVgS2hFXT/SbN3tuC1f6IprMN/d36/UoBXlbMLWp5+xViz0RzzrwkxEAIHUAq73nCtAP/UGKrzAVx++m2vyR0yhJBCdAzXKSlOLK8HIdU3SVCfajUDRAq4+YGh30Vsu//ov/b7ZAbpF7qGB7cbs2WUfZUUJIL0M1Cuy3awbgbIJOPuAp8tW0+7W59YdY44trCduVIkMh4b7ovOZzmkfXTFGfYeSR3zljffXzSvPfcvoXNjUolFmbE46JkMCmJvqRrs7ExgdHTXz8/PelejzqEazUMB92qLj1zcShq5vqniNo/ekPHxtk5UC+A8fPpyWLUr+TUoAbj8vhAT9a5m03nl/HzExMWGmp6e9dq6+uBUy1LXdqXUdrX93c1+F1unUqVNmamqqtZpBfy8sLATNx0wIIIAAAggggAACCCCAAAIIIIAAAggggAACCFRJICgBxPcDC8G/xnz96cYv+JRCBFzBvzom9aNilh9bC6koK0Wg5gKuH5ubnJygXa4RoL44dypx78tp+/D+B/Ot37lt7qx+FC3/YKLlj8/PnjR/9fpZyyvNmOQ7vnRMht4lshlatBKBYgTiay7b2hWY2fSy9qk9gFcBu0NP49Pp8aHA6es3Nxsr8C40UKzTOrA8Ak0XcH0O8I3Q0ASzf/rtvY3zk7ul6gdOH+03L20kf7SW0Y17qPz8hfuJDf/17XWjRBJb0XRtQ/M2tSjR9JLl67br1683lYR2d0FAQfRJiQz6PlJJIjMzM6lqpOXGxsai7zNdC545c8YoIaCKRXZJgfYK4A9NVGg1mJycNL7EC60zy7WhEn7SFFefmGYdtnlD7GzLtU4LGUkknl/bU4KHr3R7X4XUSZ9N9d5LkwSi+fXeoyCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUDeBoAQQ148bvsDMukG52qPgXwXy2kr/9h0bd2/f+HWfkllAhlt2PWUNktZxmeXHvcyVYUEEGirg+pE9fn82lCVqdlLyh85fT0y8bmzJko8fOW40eohvHV/f+CRKFGlNHmmatxIB7175eFOz1QeQALKJhQkI5C7gSgRveuCvoBX46wrcVfLHb17rz31/NG2FwxsJIO+tbk4AIfi3aUcC7e2VgOu7INVHo1k0tejc/9aH7tE/lPzxm5/0G19fqSRKzfP9X7mTQLSNn7/wraYy/9lvcx/gOy4bi0XDEwX0vYaCwENuJBMSjB8Hx4d+JtW2k5I/VDdtu6rl6NGjiQkg2g9ZAvhd30vFVtp2lrK2tpZqsdnZ2VTzh86s5I2k5BnfutIeO/JKSgDJsq80kknWfSWDpDrJQPtAvweEvPfUXxS1z3z7g9cQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEOiGQGcJIE8/0406lnobCs51lccOvmwGXjjmepnpgQIkgARCMRsCBQm4gn+bnuCmBECN4uEqSpJ88ie/NkqUcRX1E/3b/5P5fOGkaxZz+/IHj4we4pyxpi/I0ZYA4joua8pAsxDomYArgKfJgb/xzrhmGZkifm30O33xnzx3IDD0tH1h+gC7C1MRyFvA9V5r+ghQ71y+50wA1D74Hz/yJ3/E+0kJIq8e7De/vGBPJlGiydqNe95EknhddXx2XWuQAFLHvd2dNik4fW5ublMSiM51e/fufVCJEydOJAbjxwkdi4uLUTD6g4Utf+iY1QgWSceuRv5QIH9VSxyUn9RO7Qf5JY2gonk0b1JihPZd1sQZfdbRdkLcVY+QBAXtvy+//DLVblT9VQfVJUtJ237NLzdXPx/XIc2+0jGe5OPbVzr+4+3F27c9y0jJVEnvPR2HSUlXtvUzDQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBqggE3ZbWdTesLU88VZV2FlbPrz91J4BsJUEmF/dt3xmxroc7/1pZmIhA7gKuH4SbPgrUrQunndZK+nji6Ove5I944R3P/WBjhJBn4/9uevYlmWyauYYTtux60toq17WJdWYmIoBAZgFX8JJGZmh6+eiKW8CVuOBegldsAq7gX1dikm0dTEMAgewCriBe13sz+5aqtaRv9I9XnuszaRJk/u57/v70kqevqZZa+tq6HLMGSKevAUtURWB0dDSoqgrg//a3v236+vrM4OBg9Ky/2xMMFKQ+NTWVuM44EF1B5lpH6/WJvkfRtPHxcTMyMmJc59N4Iwr+V+JJ1YsSbEKKRmXQPtBoIHKL39d61v+VCKDXZZhUXG4hx4W2p30Ub9+1LdVTCQ6hJWl/29aTNomjdR0aPSNtOXPmTNAivn2lhA+5aF8lJX9oY659FVfk+PHj8Z/eZ+0vve/ipJN4/+l9F9dJ77t4undlvIgAAggggAACCCCAAAIIIIAAAggggAACCCCAQEUFgkYAcX1Z7gtYrahH6mr7RgBpenB0akzHAhoBxFay/JhmWw/TEEDAL3Dp0iXrDK7AfOvMNZu4fud2NDKHq1ka2SNNH/DYwSPWUS7i9d+9srKRJDIS/7dRz1t377O213VtYp2ZiQggkFnAdb3V9OBfga5u3JXdVXRXd0rnAjh2bsgaEOhEwJUEuPeJ5p7jNCLH9Ztu1V+8GHSflQcrGNhuohE+tF5buR6NNtVcb/loJJT2ogBxjTZAQSCrgOtGF/H6FKiuQPKk+TS/jkc9OinanpJAql70vtRIDiGJG7JVokcnJd6ebR3Dw8O2yZumad8pgUH1HhoaikbG0Eyqn248EXocbFpxyglK4ghxa1+tjpssySOxXcg2i95XcZs0KszZs2cj+3ia71l1D6m/bx28hgACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAVQWCEkBcgV/9f7Gjqu3Ord6uBBDd/d2VuJDbxhuyIpcjwb8NOQBoZmkFXIH5pa1wjhW7s/aRc206/w+8cMz5uu2F7cP7bZMfTPvm5mcbf488+H+T/nAlGnUaYNMkQ9qKQBECJIAY4wrWVbAqPvkddbK0BVurH1DgGgUBBIoTcH0X1ORRjt65bE/U0F5Q0lqW8/+rB/s2znN91h2ZZX3WFVV04tDTfebSFbd5RZtFtXMW0PWARinIsyiofnFxMRploOjvH1X/kBFH8mxfkevSKCDqP1x9SF7b1j7yjWKhpAiN5qLkhaSifaxRPnpZVF+1Ke3xliX5I25nWfZVXB89632n0TsoCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggg4BcISgBx/fCQ5u7m/mpU81XdAf5+UO7m+m992n7H8s1zMiVJYOvTz1hnKfqHROtGmYhAAwVcgfauwPwmEN1ZdSeA7HjurzMRDLzoThpxJcJl2lDFFmpy2yu2q6huTQVcfcDeXX01bXFYs3Q3cltSgpZWsColPwEda/fvgJ/fOlkTAgh0JrBzI9GtqeXSFXfLDw1nO/+/8ly25dw1qc8rrmNN3weRBFif/dxpS3QshAb6p9mWRpBYXl4uNAlE21DAe52Kkhhit6K+u423of3uKxpZZXJy0jdLIa+5fktJ2piSOdKOaKGRQ7KW2HFsbMz0el/FbdB7Qok9ee43tVPHSlFtjOvOMwIIIIAAAggggAACCCCAAAIIIIAAAggggAAC3RToT9oYX4y7hb7+9BPni9u+M+J8jRfSCehu+hQEECifQFMD85X850sA2bYv2/lfo4a4HlnXWb6jJluNXMeaKzA921ZYCgEE0gg0/Y7ka5+670Y++p2+NJTMm1Eg5G7OGVfNYggg8GcB17VWU5MAlfjnGv1JZPu5D0ju753hjVFVbCVrcLVtXUyrh4BvJIhOWhgngSiAPO9S5Lrzrmva9cWJBUUkasXrll9SmZiYMHrkWULWl/X3lLTJHLLoZAQQucSevd5XrftIxnm+p5VkpXZSEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCok0BiAojrR9Vt+56tk0Omtty98rFzOdeoFc4FeMEr4EoCcQWkeFfGiwggECxAcOVmKt+5X3NvH96/eSGmdCTgSgDpaKUsjAACiQKuzwGJCzZghrUb7kYOPb35tUtX7pn3Vu+ZN96/Z965fM/o/74g4s1raO4UV0IN1yjNPSZoee8FmpoEmHTeHt1nT1bo/R6jBgjUX0DB6woYLyLIW4kGV69ezTWRYGpqyqysrBRS37Lsbe0LjQQyNzeXWzu1n+UWkvwRO+i4CEnaiOd3PWsECW1bo4qElCxJIErm0HZCS6fJH/F24n2ltunvPEqWfdW6Xe2zThM31BatQ3WhIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQN4HEBJC6NTjP9nzz+WfO1W3d/cym176+8Ym5e2XlkYfuJE9JFtj6NLfSTFZiDgTyF3AFVzY5CdA3+pPt3J//XmGNCCCAQHcEXEFLQ467cXenVuXYii8IOPZRksexhXXzn6f/ZL7/5rr54dvr5pcX1qNp+v93T95/TdOVFEJBAAEEECi3wNqn7vrF5/7WOW7dMdH5Xcl/Ou/Hj9fOrUcJgeonKH6BnX9hf/369ev2F5jaaAEFjCtAX895BbHHoFqfEgmU0KD1Zy1aVskkSopoSlGyi9rcSXKBAvgVyC//NAkSsbH2nZbPsqyW0fJqgxJP9H89ksrZs2eTZrG+niapI+2IIdYNtkycmZnJZV9pP2XdVy3VMbLIeuzovabzQRrP1m3zNwIIIIAAAggggAACCCCAAAIIIIAAAggggAACZRfYklRBV/Cva0SGpPXV6XVXELBs4ruVK+Hj9u/+xdxZ+8i4kj00r+4Y/9jBIw+Wq5MTbUEAAQTqJHD39yvO5sTn/tYZdO7XqCHqM5Q4+M3NPxglisR9hUaMInGkVWzz31t2Pbl54sYUjQLFnRytNExEoFCBAUcwZqEbLdnKXUHAA9uNUbCqEj9CkjoUHKyRQfT45QVjTk/0G+4gX7KdTXUQaKgAo0Bt3vGXfu9O2GjtG3VuV5KHqx+4dEXrvr8u9RsvDfWZX7zYb5o6sspm6YdThnY//Lv1L9d3la3z8LeJArBDHJJGU1Agd1JJWodt+SLWq8B8BevroWRmHSuupGbVSfOn+UypefVQAsfS0pK5dOlStA3XCMWaV9sYHR2NAtGzJqZoe0nn5azrjveNAuZV3yKK6qbkAj1iN+0XPWztihMtYjcZdlqUCKBH6/Zt+03bat1ntmNbySS2erfWMev+UKJMaGJHEfurDPuq3VHHjRKJtO/Onz8fHTft/YDqrX0lO+1n7cPWEvIesu3r1nXwNwIIIIAAAggggAACCCCAAAIIIIAAAggggAACZRLInABCsKoxGtHDVjRahQJ+//j2T6OgX9s8rdO+ufmZ+erDd6OHEkEeP3KcRJBWIM/fST+2eRblJQQQQCCTgM7ZrtLaN2q+WxdOm9uXP9g0uxJCWosSR3Y8/wPz2PdejhJDWl/jb2O2PPEUDAgggECpBFwjgOzZ1We+/6t143rd14jrN010d3gFAf/8hT7frLy2IfDll1/igAACBQooKNdWSFKzqRgz+p37522N6vFfN0Z2UhJISIlHCXnn8p/Mqwf7zJtH+kMWYx4EggTyCg7Paz3tlS5qvfF2FMyth4LB8y4KNlfChB7dKN0ITFfAfHvQfBFt0/4oYp+E1jWP7Re5P3RsFf3e6KZV6LaS5uv0PVfkPkuqO68jgAACCCCAAAIIIIAAAggggAACCCCAAAIIIFCEAL8sZ1TVyB6uoru6f/qzvwlK/mhfx53Vj8y/nTy6sax7/e3LNOH/rUHVre11BaW0zsPfCCCQXaD9jnrZ11SfJX0JIPEIIEr60Lnclvxhk4iTRdR3qB+gIIAAAgiUV0DBva6ixI8syR+t6/vlhfXozvGt05r89/599tbzOcDuwlQEEChOwHf+11Y14sf33wxP/miv6Vsf3jPfPZl9+fb18X8EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgXoKkACScb9+feOKc0kF72oEkKxFy/7hzR8TBNwCqKQaCgIIdF/AlQCy7Tsj3a9MCbboGvkprpoSQJT08fnCyUz9QDx6lJanIIAAAgiUU0AjdRRdFASsQGIKAggggEA1BHb+hckleU9JhBpJKnQEkWroUEsEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgTwFSADJqPnNzT9kXDJ8sc/PnjS+O82Hr4k5EUAAAQTyEFj/D39yn5ID80jeUBLJF+dO5VFl1oEAAgggkLNAmhE+9uwyZnRfn3nluT7z0nBf9HdodV47RwBwqBXzIYAAAr0WeOP9/M7Z6mfUB1AQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCwCWyxTWRaskDSXeBb16DRK7Y+ve/BJCV1hCR26E7wny/8g3nytf/+YFn+QAABBBAor8Ct90/nVrmvPnzXbN39jNnx3A9yWycrQgABBBDoXGDtRvI6lPDx6sE+M7S7zzqzRvf45YV14xtNRHd/VwDw6Qly9q2ITEQAAQRKJGAbsWNguzEvDd3vC4Z2m+icr+SO86v3vOd/NUv9xCvP30uVOFgiDqqCAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFCgAAkgGXHvXvk4cUkF7T528OUogLd9ZiWA6A7vCvBVooeraDuajwBglxDTEUAAgfII2M7nSgLcPrTfbHniKbNl11NRAuA3n39m7qx95D3/q1VfnJuPltU6KAgggAAC5RC4dOWetyJK2FACiK/o9Vee+5Y5trAeBfm65lUA8JtHjFEQMQUBBBBAoDoCv3ix3/zd9/o2n783zv86r+v8njTSk/qI/+f//FZ1Gk1NEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQS6IkACSAbmkNE/nnzt12bbvhHn2hUEPPDCsSix449v/9T41qkkERJAnJS8gAACCJRWYODFY9G53lZBJYtoxBCd411F8ygJ5ImJ112zMB0BBBBAoIsCvhE7VI37iR3+5I/W6ipZ5PrNdeNLKlGQsEYTaWr56Iq95cPDw/YXmIoAAgj0WCA0EXDo6X7z/V+tG9voIWqC+pz3NkYLeWm4uX1Aj3clm0cAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQRKKdBfylqVvFJff/qJt4aPHznuTf5oXViJIH/5o380vru7KznElyDSuj7+RgABBPIU2Lt3r3V1nJOsLI9M/KvXzzqTPzSjzvvqL5KSOzQKlG1kkUc2xn8QQAABBLoicOvOPTO6r8/50B3f05Y3j/gDexX8S9kssHPnzs0TmYIAArkJuD4H+BLWctt4hVekhL2kUaDi5g3t7jM/f8Hfb5ynD4i5eEYAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQ+LMAI4BkPBS27XvWuqQCeh87+LL1NdfEeDSQL86dcs1i7qx9ZLbufsb5et1fuPv7FWsTXUEp1pmZiAACqQVc7zESEvyUSuwIPWdrhKc7qx9FD9dab1/+l9R9i2tdTEcAAQQ6Fbj1H52uobrLK1j3N6/5EzbStk7r1N3dXYkeBFunFWV+BBDIQ8D1OSCPddd1HQPbTWJCR3vblTDy1of3R/tof03/f2+t2UmAazdsKsYMDAzYX2AqAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggg0ACBxAQQ14+qTb77u4J19cizKGnk1vunnXd5jxIgXshzi/VYF0Ep9diPtAKBOgkoqS9tIqASRpQE4ip3r6ykXqdrXVWcTsJRFfcada6DwPDwsLUZazeaHYxqRelw4iFPAohWLXMlilAQQAABBMor8NJQn1ESSNry6sF+89q5detit+40uw/40pF0OjQ0ZPViIgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIINAEgf6kRroCvwjGTJJL//r2of3Ohb7+9IrzNV5AAAEEECiPwMCLx1JXRkkj24fdfcDdKx+nXmedFnAlnbquUerUdtqCQC8FXIngvaxTXbc9us+f3NHkUVcu/d6ecEQieF3fDbSrCgLXb1ahlt2vo5L5spSk5ZrcB2TxZBkEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECg7gKJCSB1ByhT+7Z951lndZqecPPNzc+cNryAAALFCbiCf5ualLZl15OJ2L5kPt/CvgQQ9QFN7wdsdq7j0zYv0xBAIF8Bgn/z9dyzy7++LzfuAE95VIAEkEc9+B8CRQgcOHDAutprN+2JWdaZazQxKVlvf0Iyn4siqQ/4qMH3A1l1jDrG5wDX0cR0BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBogkDmBJCmBv8WeVDoDvC+4roDum+ZurzmSgBxBaTUpd20A4FeC7hGWGhqMkLSeVqv92/fkWm3Ja37608/ybTeOiy0/h//Xodm0AYEKing6geaGvzbq5249mmvttz77XKs9X4fUAME2gVISmsXuf//ge326SFTh3ZnGz0kZN1Vnsd1rLmuT6rcVuqOAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIhAokJoC4AuybGvwbClvEfOv/cbuI1bJOBBBAIJOAKzEr08oqtJAvwSMpicPXzG37RnwvN/o1VwIkgV+NPixofJcEXHfYdgVkdqlajdvMzr9oXJMfNNg12ozrc+qDBfkDAQQ6FnCNtNPUpLTR7xSXpDHQ4PO870Bd+7SZo834THgNAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQSEwA8RE1NfjXZ8Jr+QvcvbJiXakrGMU6MxMRQCCzgCvI/pubf8i8ziovuPXpfVWufuXq7ks4dQWmV66RVBiBEgu4+oCmBv/2alcN7e7Vlnu7XVfyR29rxdYRaI7Anj17rI29daeZQfl7dlk5cpnIaEd2xlt37NNJArS7MBUBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBJohsCWkmfph9eLFi5tmVfBvJ3c737RCJngFtux60vt6XV9cv/Pv1qaRAGJlYSICuQu4guzvJwE2b9SKpp6Lcz+wAlf49aefWOd0BaVbZ2YiAghkFti5c6d12dUbCv4t7k7o1o32eKKCUP/z9J+ctfjNa/1mdF82ExId7KyugGgCf+1eTEUgbwHX9dbajby3VI31DT3tr6fO5VmTRHz9QFNHgbp0xZ5o5Pp86t87vIoAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUR6CjEUC+vnGlPhKBLdFoFNf+2/POR+BqrLO5Eh3imZuabOMK/iUBJD4yeEagWAFX4FdTR4HauvsZJ/jXn2bvF12jHTk31pAXXMcZgV8NOQBoZs8FXIH2vkDVnle6oAoMbPevuJOAaFeQa7zFrIkl8fJVff7I0a3SB1R1j1Lvqgm43mtrn9oD86vWvrT1HdrdZ3x9QdK53LW9pOUYBepROdfn00fn4n8IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAfQWCEkBGR0etAhoBhPKoQCcBvL5lfQHHj9agfv/7+ob97u979uypX2NpEQIlFHC91+7+fqWEtS2+Stv2PevcyPqd28aVsOBc6M8vJCVVbn3anXiStO4qv+7ydF2bVLmt1B2BMgq4Em7XohFAyljjYuvku7N7UgCvr2aXfu8OplbAcVPLtc/tLkNDQ00lod0IdFXAlQSoEZGamAgo/P2ekZ7Or9rPWUk77b2E5fbuamY/4DrGSABJOqJ4HQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEE6i4QlADiCvxyBebXGS0pADcpgNdnc2f1I+fLvoBj50I1ecEV/OsKRqlJs2kGAqURcAXYuN6bpal4QRVRQp5vRCbfudxXpTur/7fz5f7tO4weTSyuRCPXHambaESbEShSwPU5QNtsYhKILxlDAbyuYFXfPlIg9TuX3UHDrzzXzMDf6Bj71C7nujaxz81UBBDoRMDVDzSxD5DjoWH3OVn9QFqXpD5AiYe+5MNO9m3Zl3UlR7puUFD29lA/BBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIC+BoAQQV4DN3Ssf51WPyqxHAbi+wN+vPjyXqS0a/cMXTL1t30im9dZhIVeikSsQpQ5tpg0IlEnA1QfonKURL5pYfOdk9QNpXXSe8/WpTU4C/PrTK9ZDzHVcWmdmIgIIdCTgSrq9ZH97drStsi/sC/xV3X9ybj11E954379M0jZTb7AiCygo2hVITR9QkZ1INWsh4Hq/rTkStGrRaE8jXhpyJ4BosWNn3Ql9ttWqD9D5zlWa2gfIY+1Tu6XrmHQZMh0BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCom0BHCSDCcAXn1w2qtT2+wF8FRH/14butswf9fevCaed8SjrZPrzf+XqdX1BijK3ozu8kgNhkmIZA/gK+99vXn36S/wYrsEbfOTnqB36brh/44twpb6t92/MuWPEXfUlGroD0ijeZ6iNQSgFXoKUrOL+UjcipUkmBv7r7u280j/ZqaN63PrQHuGrelzbuNN/UO7+7An991yXtvvwfAQQ6FxgaGrKuxDU6g3XmGk0c2G6Mb2Qm9Y3HFvyJfTFHUh+g+V49GPS1XbzK2jzL0ZUYw+eA2uxmGoIAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAhkFgn9Jdv3A2sTg36RA3Fvvn06VGKOEEd+d3x87+HLG3Vv9xVwurkDE6reYFiBQTgHXe871Hi1nK/KrlfoB32hQSuq7ffmDoA2qz/A5RkmAQyQBtmK6jsfWefgbAQTyE3AG/15xJy7kt/VyrSkp8Fe1VeDvD99eN9dvuuuuoNY33k8OEv7FC/47zbu3UP1XPnKMMOP6XFr9FtMCBMop4HrPXWpgHxDvoVee95+bldjh6wdC+4DRfU1OAoy1H33mc8CjHvwPAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSaKbAltNn6kfXixYubZr/7+4/Njud+sGl6nSfEgb+6M7mtrN+5bf7wqx+bgReOGV/yhuZT4K9vxBAF/j72veYmgLhGmBkdHbXRMw0BBAoSUPDv0tLSprXf/f2KMS9smtyICY8dPGJ8I3d8vnDSqJ9QX2Ar6gO+ODefmCiifkR9QROLrjFshcAvmwrTEChOwBX8qwQHPZo2QsUvXuw37639yXlncu0JjQTy3uqfjIJ3R79zP1B4518Y8+V/GLO6cVfzjzYCp113No/3pO4wP7TbH2Qcz1vHZ9foAq6EpDoa0CYEyiDg6gNUNyWB6DzXtBKd2zfa7UuCifsBjeQ0vHEuj/uAa59v9A9ryX2ATE9PBN+zpXa7wNUH+I7H2iHQIAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQMAhEJwA4gq0uXtlI/i3geXxI8fNH9/+qbPl9wN7T0UJHts37ty+dfczG4990fy607sSG+6sfuRcPn5BgcNNDfyVgeuu+PzoHx8hPCPQHQG952ZnZzdtzPUe3TRjDSfseO6vo3O8zveuopFAlOSnedUPaNSQr29cud8HrH1kfMtqnZq/yUmArmsMkgBdRxzTEShGYO/evUaPa9eubdqAgl+VqNCkooSXN4/0RyN9JLVbPr4AYdfyGmlE22hqUXKMy43PAU09Kmh3LwX0vrPdEEQj9Yze/5qjl9XrybaVnPHdk/5kQFXsfiJI+hGz1Ac0LcGydUcqScZW+BxgU2EaAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgg0TSA4AcQVaKO7m+uhINUmFY0CopFPbl/+wNtsBfdG81z2zmZ9cdu+Z70jiFgXqtFEBf66gqNdx2ONmk9TECiVgO89p/fqtn0jpapvNyqj5Lwnjr7uTQZUPXQe84305KvrExOvNzYJML6+sPn4jkfb/ExDAIHOBfS+W1hY2LSi8xsjXTQtAUQI99sclgSyCS1hgpI/fvOTfqPnphaNkGIrAwMDhj7AJsM0BIoVUNC9LQFEfcDPX2hWEmAsnSYZMF4m9Fl9zKsHm+kqo7WNkbJco2TRB4QeRcyHAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAJ1Fgi+rWx8518bRshIFrblqj5Ngbm6o3sRJQosnvhZEauuzDpdIwvwg39ldiEVrZmA673X1D5AuzdOBixiV6uPaWJiTWzpGv3Ddz0SL8szAgjkL+C647YrUD//GpRvjQrQ1R3g8yxK+jh9tN8M7W5u4K88FVRuK65rEdu8TEMAgfwEXO89X6B+flsv75qK6AeKWGd5Be01e2/NPn14eNgoEZCCAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIINF0gVcTS4cOHrV6uIE3rzDWb+Fevn41GAsmzWUr+ePInv27cqCrthq6gclcAYvvy/B8BBPIVOHTokHWFrveqdeYaTlSihkaEyrMUsc4869eNdbmOK1cAYjfqxDYQaLKA63OA7tD9niNYvwleCtT919f7zei+zhM2lPShkT9eGu58XVW3f2/NngDC54Cq71nqX1UBXX+5Au9d79eqtjVtvdUP/Oa1fqMRQTopUQLgRlJh3omFndSpV8uSBNgrebaLAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAJVEUiVAOIKuFGQ5vqd21Vpc+71VKDuX/7oH40SNzot2/Y9a57+h/9Z2MgindavW8t/c/Mz8/WNT6ybcwUgWmdmIgII5CbgCrzX+1WPJhf1AwMvHuuYQP2I+pO8E0o6rliXV6BrClcCiCsRqctVZHMINE5Agb+687atuAI1bfPWcVqUuLER/Kug3SyJIFpeyyqRpOkjf+j4uHTlnlFika3wOcCmwjQEuiPgev81vQ+Qvs79//r6t8wvXkyfCKLEDyWRfPIP34qeu7M3y7uV6zeN0cgytnL06FHbZKYhgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACjRPYkqbFrh/8tY47ax81OmB1+/B+8/S+/2m++u275vbvPkgdDL119zPmsYMvN9qw9Vh0Bf76gg9bl+dvBBDIX0CBv3v37jXXrl3btHK9Z3UOa3IZeOFYdA6/deG0uX35g1QUSvyQ32PfezmXZMJUGy/hzHevfOyslSsRybkALyCAQG4CCrxcXV3dtL6m3/09BlEArx4KXlUSw6Xf34v+vnbz/rPmU6Dv0NN90Z3ilewxum/j/xvPlIcC7/zOHvgbX4c8nJO/EECgmwK6IcjCwsKmTWoUKCVt6fzW5KL2//yFvo3Ht6IEhvfWjFndSGT4csNm7dOHiW0aKWTvrr6Nc//9xJH9G8kjTbdrPW5cCUX6HOpKRG1dnr8RQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBJgikSgARiJJAlpaWNtko+LfpdyxXAK8CgPW4e2Vl4/Gxufv7lSgZpP3u+Er42LLrKbNt34hR8oj+pjwUcAVP+5KQHi7NXwggUJSA3oPz8/ObVq/3bNMTQISic7lGA3n8yFSUGKmRjKLHp1c2jZSlEZ+27Hoy6gPUD1AeCvj6ACUCUhBAoDcC6gOmp6c3bVyBvwoAfmmYRAbhKLg3TgbZhMWERAFXQhF3fk+kYwYEChVQHzA5OWndht63Ou9R7gsosU8JHsZgkvaYeOvDdesiJIFbWZiIAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAINFUidAHLo0CFnAsj6ndvcufzPB5ISO/QwLzT0yOqg2UqWUcC0rej4oyCAQO8EdOdfWwJInOig5DaKifrCKCnyOTTSCuhawjUKFH1AWk3mRyBfgfju27ZRQM5eJgEkX+1mri0eScDWehLBbSpMQ6B7AkrC1fvQdkOQtz4kAaR7e6K+W1rbGDFFo2jZCkmANpXqTbt161Y0mtzFixfNl19+aR1ZTiO97Ny50yjpR39zA4Dq7WdqjAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggULxA6gQQ310fb1/+F+4AX/w+q/0WXHd+jwNOag9AAxEosYD6AAUAX7t2bVMt9d59fPfxTdOZgEAagTtrHzln1/FHQQCB3gooANOWABIH7g9s72392Hq1BZRIZCsKANX1BwUBBHoroD7AlgASB+5rBCQKAlkFlEhkKzr/MwKITaYa0/Tdgc4bZ8+etV5DtrdCySEqs7Oz0bOuAXTumZiYIBkkEqnXP2NjY4kNmpubi5KBbDNqdELbZ5PWeePjp3VaN/4OqZuOb7WPggACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgikFUidAKIgfP3otrCwsGlbX314jgSQTSpMSCtw+3cfWBch8NfKwkQEui6g96JtFBAlAT5+hASQru+Qmm3wqw/ftbZIx52uQSgIINBbAX0OUDCTrfzTb++Zn7/QZ3uJaQgkCuiu70okspXjx7m+sLkwDYFuC8TXY7qLf3v55YV1c3qiv30y/0cgSODWnY0+YM3eB/BdUBBh6WZSUP6pU6es3x+nqazWo4euP3UdeuLECZJC0wCWfN444cdXTVufE8+vYyNpHRrJthclpG69qBfbRAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgXoIZPp1/tChQ9bWf3PzM3P3yor1NSYiECJwZ/Ujo+PIVgj8sqkwDYHuC+juibayfue2cY3gY5ufaQi0C3x94xOjh624rj1s8zINAQSKE4iTwW1bOPu7ddtkpiEQJPDWh+7jh+DfIEJmQqArAgrAthUF7yuIn4JAFoF3NkaAch0/fBeURbR3yyhYf3Jy0oyMjHSc/NHeCt2MSOudmZlpf4n/I4AAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIINAogUwJIArA2bt3rxXq9u/+xTqdiQiECGgUGVvR8TY8PGx7iWkIINBlAb0XXe9H1wg+Xa4im6uogGv0D1/AeUWbSrURqLSAKxFQIzgogJOCQFoBBf26jh0FmzMCVFpR5kegOAFXML7vfVxcbVhzXQRcSYAHDhxwfv9Yl7bXqR0a8WBwcDD3xI9WIyWYzM7ORokgvpEhWpfhbwRCBPr6+kzSI2m0kZDtMA8CCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkIdApgQQbdgV+KW7v7tGcMijwqyjvgL3R5D52NpAV5CJdWYmIoBA4QKu9+TdKx87R3AovFJsoNICvhFkXHearnSDqTwCFRbwBWO+8zsSQCq8a3tWde783jN6NoxAagHdnEH9gK24gvht8zINgVhAfYCSSG3F9bnTNi/TeisQj87RraQMJZtoNBA9UxBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBomkDmBBBfMKZrFIem4dLedAK3Lpy2LsCd360sTESgpwIaCcp1N27XKA49rTAbL73AV79911lHAr+cNLyAQM8ETpw4Yd32pSv3jB4UBNIIuILGFWTuGnUszfqZFwEE8hVwXZsxElS+zk1Z21sf2q8blGykz52U8gso+WNycrLrFb127ZoZGxsz3Uo66XoD2SACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACDoHMCSD6IdaVBHL78r8Y3cmbgkCogEb/0OgxtuILNLfNzzQEEChewJeYxUhQxfvXbQu6ZnAlDqkP0DUHBQEEyiXguz5744I9kLNcLaA2ZRHw3fndNepkWepOPRBoqoDv+uyXF9abykK7MwgoaXTthv26wZVolGEzLFKgQK+SP+ImKflDSSAUBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBJokkDkBREiugJwokNNzJ+8mAdPWMAHX6B9a2nWH6bA1MxcCCBQl4AvI8b2ni6oP662ugEb/cCWO+o6z6raYmiNQfQElArren4wCUv39280WuILFfTcc6Gb92BYCCNgFXJ/TGQXE7sVUu8Br5+zJH74bDtjXxNReCKyurprp6elebPqRbZalHo9Uiv8ggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUKBARwkgBw4cMHrYiu7k7QrmtM3PtOYK+Eb/0Cgz3Pm9uccGLS+3gC8wk1FAyr3vylQ73+gfvuuMMrWBuiDQVIGpqSmjAE1bcQV02uZlWnMF3nj/nlGwuK24gstt8zINAQS6L+D7rO5K7Op+LdlimQU0ApRv9A/XNUaZ29S0uk1OThqNwJGmxMk9i4uL5urVq+bevXsPHisrK+bMmTPO75p925mfnzfXrl3zzcJrNRRYXl5+cPy0Hkutf8/MzPSk5SF10zwUBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQCCLQEcJINqgKzBHAZ233j+dpU4s0zAB30gBrlFmGkZEcxEorYCrD1CFvzh3qrT1pmLlEWD0j/LsC2qCQFoBBfC5RgFRQKcCOykIuARu3THmrQ/XrS/7kkytCzARAQR6IuD6LKDELiV4URDwCbgShXR9oSRTSrkFFFSvkTfSFJ0zlPShJI/Dhw9vuuHL8PCwUXKZguL10P/TlDKMRpKmvsyLAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQFaBjhNAfHfn1iggGt2BgoBL4OsbnxiNFGArvmPLNj/TEECg+wK+AM07qx+Zu1dWul8ptlgZAV0j6FrBVhTso6AgCgIIlFvANwqIAjsV5E9BwCbwxvvu48MVVG5bD9MQQKB3Ar5RQJTgRR/Qu31T9i37RoBScqmSQCjlFdCoH6dOhd/wQftTCR1KGgndt/pOUCOC6DwTWpaWlhgFJBSL+RBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBCot0HECiFrvC9DhDvCVPj4Kr7zv+PAdV4VXjA0ggECwgO+96hvhJ3gDzFhbAR0fGjHMVubm5myTmYYAAiUTUBCfaxQQ3QH+n37LHeBLtstKUR2NEPPWh/Zjw5dcWorKUwkEEHhEwPVZQMkfr52zj/LzyAr4T+MEdH3gGgFK1xWM/lH+Q2J+ft4oCSSkaJ8q+UMJHVmKRgtJkwSSJjElS31YBgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIEyCOSSAOIbqYE7wJdhN5ezDhr54+6Vj62V04+7WX8ctq6QiQggUJiAL1BT73HXKD+FVYgVV0JAo8O4jg3fdUUlGkclEWiYgAI11RfYikYBUaAnBYFWgdfO2ZM/NI8CPSkIIFAdAX1218httvLO5Xvm0hX3+922DNPqL/CTjcQg1+gwSgJXwgClvAJpR/9Qv+46R4S2Ms06Ll68GLpa53xqo0YT0YglY2Nj0aOvr8/Ej8HBwWja5OSkUTLM6uqqc13deEHbj+uqusX1/Pa3v/1IPa9du1ZIdWQ1PT0dbat1+7GTXuu1USENL9FKQ47X8fHxShyvcT2LOl5LtNuoCgIIIIAAAggggAACCCCAAAIIIIAAAggggAACHQnkkgCiGvju1P35wsmOKsnC9RPQHd+/ODfvbJjrLqLOBXgBAQR6KqD3rCtQR+911ygPPa00G++pAH1AT/nZOAK5Cuj877t2O7bAHeBzBa/4yjTyhy8gPPSO4hVnoPoI1EZAAZquzwFqJH1AbXZ1Lg15b/We0cNWlCSghCJKuQUUaB7aVx8+fNjokUfxfe/cun4lGoTWr3U5/a3kESV1KHFCQeizs7PRtPakEp33NG1hYSFKfBgZGTFKdlAySCfbjhM3XM9K8mgtqoMSVLT9uK6tQfOqS2s9VUe1q3We1vWl/Vv1ideptmtbrevW35qm12IjmeVZ1H6XVzy93U3/j19rfQ6pl2977cv75o23q3myFtnqeNW6Qo7XOFEn3hdyKPJ4bW+b6qtpScdrXM84gYjkoaxHCMshgAACCCCAAAIIIIAAAggggAACCCCAAAII1F2gP68G+n6o/ebmZ+bW+6fz2hTrqYGAjgdXQLgCCF13ka5B02kCArUU0Hv2+PHj1rYlJXxZF2JirQXUB3x94xNrGxX0pRFAKAggUC0B33tXwf4K+qcgoNFg3njfnxCkADYFh7UGECKHAALlFIiDehXU6Sr33/f0AS6fJk3XqB/Hzrr7gNAA/yaZlbGt58+fD65WnvtUnxFDvytMGzCuaw5de+iRNUFB69BIFwpa17mxyKKgfQX+q76+86+tDgquVx2ztlPrlK/WoaSTNNdrmjeud9bEA1ubmjZNjvH1ctb9qHVo/2k/tifIFOEZjxCT9njV/EoYydrOItrCOhFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQKItAbgkgapB+2HPd+fHWBXewZ1kwqEd3BO5eWTFfffiudWM6fqampqyvMREBBMotoPeuKyDj9uUPjN77FASU+KFrAltRH+AbRcC2DNMQQKA8Ar4gPwX9KwiY0mwBjQSgAOCkEgd7dSMgLakuvI4AApsFFPyrgEwFdIYE8f7ywrp35J/NW2BKHQWU/OHqA3yJpHW0qHKblEAQUjTyh+v7gZDlbfOE3iggTQKIAst1PksbmG6rn6bpnBgHu4ecH13rcU3XOjtJVInXq0SMLEH1sVeaxI94m/GzrNWGInzibdT1We8/Ha+h78MkB+0DJYJonZ3sU992dKx1mhSV9Xj11YvXEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBCoukCuCSBJgZufnz1ZdS/q36GARgL4fMF9HJw5c8boOKIggED1BPTe9QX/6r3vGvmneq2lxlkFfNcCGkUm7yChrPVkOQQQSC+gEQFdSVwK+Pzh2+67fqffGktUTUCjwGg0mNDSGpCWV1Bm6LaZDwEE7AJ6XyqwWYGiaQKstbbQBDD7lpladYH3Vu8ZPWwl6XOkbRmm9UYgTX989OjR3Cup7wzv3buX+Ai9sYwSTRVYrnNb3kVWWc6VSfVQfdOef13r1Pk8TdC/kj+0/TyK2qBRLCjhAvKXWRHHq/ZHUcdrlkQjm0qex75t/UxDAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKBqArkmgKjx+pFNwV+2ort+f3HulO0lpjVE4Itz8+abm59ZW6s7+ekOgRQEEKiugN7Drvex3vu+4P/qtpqahwrcet89GpiuHbjTe6gk8yFQXgHfaFBrN+6Z186RBFLevVdczTrZ9wpI012iiwrQLK7VrBmBegnEdx3PehdvjQJFH1CvYyK0Ndr3Gv3DVbgRiEumfNPTJICEjtbRq1YqKF0jHxRZlFyRZ8D+2bNncxv5Qe1WIkGoga7HlDCSZ0lzPOW53SquK8/kG1f7dTzomjtNUpBrXZquYyav5I94O6HHazw/zwgggAACCCCAAAIIIIAAAggggAACCCCAAAII1Fkg9wQQYenHW1f56sN3zd0rK66XmV5jgduXPzB62Iru+Og7bmzLMA0BBMop4AvgubP6kfM8UM7WUKu8BNT337pw2rk6+gAnDS8gUCmBpGs6jQLhugN4pRpKZYMF8hr9RQFkg4ODuQeSBTeEGRFoqEAcwKwg5k6DQt+5fM/oQWmWgEYAU19gK74bCNjmZ1pvBdbW1oIqoOQPXROWtSjxIK+RLJLaGJ9Dk+YLeb3Tc7BtG7q+UuB/UiERN0mouNeVSNGt41XHQl4js4QcV2nVlIxaxPsgbT2YHwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBMghsKaISuov3iRMnnHcR++Pb/4f5q9fPmi27nipi86yzhAL3R3+Zd9ZMx8vevXudr/MCAghUR0CBHnNzc84fqDUS0NannzFbdz9TnUZR044E1u/cNur7XUV9gGv0MNcyTEcAgfIKKOjP91lAdwH/193fMnt2lbcN1Cw/Ae1v3f09j6JAMgXA6Q7YShzk80MeqqwDAbeARmc7deqUyTOI89jCuhl6ut8M7e5zb5hXaiOg/a1RoGwlKWnUtgzTeisQei4o82e7+FoiVFLXGkePHjWtI5ooAP3SpUvBSalKONHoSRopL+8ia9Vt586d0aqVpKPthe4rLaSg+omJiWh52z9KElESQmhRnWTWehxo+fPnz0d1C11PUfOpra37M96ORr9IKvquq7VdSfPn8Xqa5A/b8apjQfbazyHHhfaV+n898i6qn+z37NkTrfr69evB9YrronYU8V6K188zAggggAACCCCAAAIIIIAAAggggAACCCCAAAJVESgkAUSN148E+nHB9gPR/UDQn0ZJIFWBop7ZBbS/Pz970ujZVvTDDz/c2GSYhkB1BfSDevwDc3sr4nPCkz/5tenfvqP9Zf5fQ4E/vv1TZx+g4IkiAgtqyEiTEKiUgO+zQDwixG9+0m8GtleqWVQ2pcAb7/tHfFlcXIyCyxWomKZofo0GokQj+pA0csyLQJiA3mPT09PW73OS1hAngytRy/Xe/v6v1s0n//At+oAkzIq/njTii2/kyIo3vbbVt33Ha2tsnIxge63X05SIETqCgIL9Xd9X6jsPXYdotIQQl9nZWaNldI7Mo+hztOqn71Tbi4L8dQ5X4kZISRrZRXUPKXFSl0b2aS/xd7/qF2QWkojQvo68/q8kBD2yFLnbzLOsK2SZNMk3vuNV+0SvK5lECRRJRcmfOl6zOrWvX+vROd9mp+m6ng89zpR85Xpftm+X/yOAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUGeB/iIbp4Ae1w9bGhHi84WTRW6edZdEQMkf2t+2ouNDxwkFAQTqJ+AL6In6gI1zA6X+Aurr71752NrQOEDE+iITEUCg8gK+zwK6G7hGhqDUV0CBv7+84N7HCkJTMNry8nL0eSBLgJkCxZQI4goyr68uLUOgGIE4aFh3QQ8JaG6vhYJFr169GgWN+voAJQIqCUTPlHoKXLqy0c9vjP7hKgreVR9AqZZAaNB+t0dISKOowPaQonNYUpC5rl1WVlZMSHtlp+STPIq2p+snWzC91h9/zg59j/nO90oWCEmY0TZVp6Rtqs6aT/NTkgVCkyL0/VvS8SpzHddJ+0i10vEa+l5JakX8PnEdr1peCSBJ9Y+3E3I8xvPyjAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAnQUKTQDRF/wK7HGV25c/MF99+K7rZabXQODW+6fNndWPnC3xBYU4F+IFBBCohED847Krsjo3fHEuLPjCtQ6ml1tA/bwerqJrhJBgGdfyTEcAgXIL6LOAgpFc5b1Vf3Coazmml18gSvDxBP4q8Kw1yEv/VwBl67TQVioITMHquqOxgtUoCCCQTUBBvkqoyhKgrPO9AnpbE8CTPgvoPPHaOXeCQLZWsFQZBLRv/+vb7n2r63/fd4VlaAN16EygrMH9Gk0h5FpBI3uEBMnHSqEJDRoZKY+i90+Icej7zJcAopFdQ4rMQj/baz7NT/ELhCbfKPlSj9Civlr9dlIJHUEmaT3a1yHHa+gx4Ttek+rC6wgggAACCCCAAAIIIIAAAggggAACCCCAAAII1Emg0AQQQSX9CKHgX19waJ2wm9YW7ddbF047m60fdnx3/3IuyAsIIFAZAb3HfT/iKgmQPqAyuzNVRZXg4xvpK+n6INXGmBkBBEor0B7o315RjRKhB6U+Agr81Z39XUVBf7bEIAWGKVAx9E7a7etXkFrW4PX2dfF/BJokECdRjY+PBwVGt9rofatrfY36Yftsr2m+AGSd/32jRLRui7+rIZA0uouOGQXLUxDohUBIMoOO0bQJqVrm+PHjiU3S+bbT4HVdR9nOt7aNK8g/ZF5fUoySEJKKtpPWTPOHJCEkbbvOr4ccr2q/7zs3m0/cd9tea52m4yJk/7cu0/639rG++wkpqleaxKuQdTIPAggggAACCCCAAAIIIIAAAggggAACCCCAAAJ1Fig8AUR4CvDRD1Su8sW5efP1jU9cLzO9ggJJgb/6AVLDu1MQQKD+Anqv+37EVZIASSD1Og7Up39+9qSzUbom8AUDOhfkBQQQqKSA3u++4DMF/5IEUsldu6nScfKHAoBtRYFdrSME2OZRH6EkEB03mj9NUaDa9PR0NCJIpwGWabbLvAhUVUDX6SMjI+bixYupm6Dzut6rSZ/rFeTrC/7U+f+tD0kETL0DSrhAUvKHqhw6UkIJm0eVaiAQcq7Tdxdprz9E4zvPtdKF1KF1/va/Dx061D7J+//R0VHv674XdS3lSw6Jlw1JfonnbX3OulzrOur8d8ixouM1SyKN7zu6VtNOr6dDtxNvc2hoKP6TZwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEgQ6EoCiOrg+5F3/c5t84df/ZgkkISdVZWXQwJ/FxcXq9Ic6okAAjkIkAiYA2JFVqE+QH26+nZbUTCN+oAsQTW29TENAQSqIaD3vS8hnCSQauxHXy0V+Hvs7D3jSv7QsknHQev6FTSu4PK0gWNahwLmFNSuwPSQwMXW7fI3Ak0Q0HtEI+bMzs6mfo/E13L6jic06DTps8Br50gErPpxFyd/KBHQVZKOA9dyTK+eQBn73tBkhrQJFvHe0fnQd61sAd3cAADEoUlEQVQbz3fp0qX4z0zPvqRq2wo7+dwdGvyftk5xPbMuFy9f52e9hzRiTFLJerzquAjx7/R4TZvQEVKnJBNeRwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgKQJdSwDRDwskgdT/sAoJ/E2662/9lWghAs0TUB/ge++TCFiPYyKpD1Ar0wQL1kOFViCAgATUDyQlf5EEUt1jJTTwN21Ql4IpddzoERpo3qqo4Pasoxu0roe/EaiLgAJKJycno1FyQgJL29utxKyrV69mSszSNaAvOJo+oF27Ov8P6QNOnDgRPEJCdVrevJrqei6khCYOhKwrr3lCk1J856mkuoRc52Q59yZt1/d6J+0JrWvWbWRdztfeurwW+h7Kcn0cG4WMDhNaj3id7c+d1K99XfwfAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEHhXoWgKINqsfdubm5h6tQcv/CABuwajgnyGBv2nu+ltBAqqMAAIeAfUBJAJ6gCr+UkgfwF1/K76TqT4CHQooAMjXD2j1BAB3iNyDxXW39+//at347vreaeCvRgHRaCBaT9qi4MWxsTEzPj5uQoM/026D+RGogsDCwkI06oee05b4Ol7f54QGgLdvQ8v5EsI1P31Au1r5/x+S/DExMRGNyFT+1lDDJAGdC0LK9evXQ2br6jwa+SikdBKwvnPnzsRNdBpQH5JkkliJwBm+/PLLxDk7rU/oMZVYkZrNEHrN2ol/yLEeWo+a8dMcBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQqIbCl27XUD78quuukrcRJIE/+5Ndm6+5nbLMwrYQCoYG/nfwwVcJmUyUEEEgpoB/3FThGH5ASruSzh/YB8TVAyZtD9RBAoECBOIhYozK4igKAjek3rzzX55qF6SURiJM/FADsKnkF/ip4fGZmJhp5YHp62oQGcsb1WlpaipZREolGMaAg0BQBJUHp2jvte0Y+et8dP348t+D9uA9QUpYrqFR9wNqNPvPmka7er6Qph0Ou7QxN/lDiD6UeAiEB42ppp0kOLi2dz0KS2FTPLJ89Q9vnql/dkhmK2o+tfupnKJsFumHf6fG+udZMQQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgW4KdD0BRI3Tj3D60W52dtbaVpJArCylnXj3yor549v/h9F+cxX94J/lx1fX+piOAALVFYjPBUlJIE8cfd1sH95f3YY2pOYhyR/a5/F+bwgLzUQAAY+AguN0bejqB7QoAcAewJK8FJr8kXfgbxxAPj8/H32edAWR25g0r5JHzp49Gx2DdQvUtLWZac0WUNKU63uXJBmNvKPE7bwDROP3sC8J5K0P75lbd9bN6QmSQJL2U69eD+kDdAOQvPuAXrWX7d4X2LNnTxCFgtfV5+Yd3K/1hpzTdOxl+fzZ6fku7/YGYTMTAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAIwV69mu6AhF8P8bFSSB3Vj9q5I6pSqNvX/7A/OHNH5P8UZUdRj0RKImAzv++YCD1AX98+6dG5xhKeQWUAPiHX/n7gKR9Xd7WUTMEEChSIOTcoADg+6OBFFkT1p1F4J3L98x3T65vBGi7lw7Zx+6lk1/RKB5Xr16NRgRJnvvRORRAqlFolAyiAFUKAnUT0Ggfg4ODQYHS7W1XAPTi4mL06DQYun3d8f/jJBBfsLTOM99/03+eidfHc3cFLl3Z2De/8u8b7WMdR5R6CaQZ1Vcjb+VdLl26FLTKrOcurgmCeJkJAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECgBAI9SwBR2xX8q8AgVyEA2CVTjulfffiu+XzhpLcyCszy7WPvwryIAAK1FtC5wZcEosbrHHPr/dO1dqhq40ISAEP2cVXbT70RQKBzgZBzREiiQec1YQ1pBN54PzkxJ2Tfptmma14FjyvAeHl52WQJ9tQoIkoEKSJI1VVnpiNQpICCl8fHx41G19Coq2mLPr+vrKxkSqxKu62QJJA40UCjTVDKIRCSmKPRY3Re9iX4lKM11CKtgBJAQvfrqVOn0q4+cX4lt4WUoaGhkNk2zaME0U5KaP062UY3lx0dHe3m5thWi0CW69qWxfkTAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEGiDQ0wQQ+SYlgWgeBQAnJRpoPkr3BLQ/vjjn/zFX+3Zubq57lWJLCCBQOYGQANFbF05HfYCSAinlEND5P6lfDtm35WgNtUAAgV4K6FyRFCSqwN9nfvYnQwBwL/eUiUb7+OHb6+aXF9a9FenF+V8BqQpaP3HihLduthcVJK+AeT2yBMzb1sk0BHohoIQmjfqRJaFJyRh6D+nze2hwdx5tDEkC0blfo028t3ovj02yjowCGvFJo3IljcylPkCJed08jjI2icUyCijBJ6QomSLPhAitLzRBwzZSic43ZSh1C+zvdB93unwZ9mkRdQg9Tjq5dsW+iD3HOhFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQ6J5AzxNA1FQlCuhOk75y/07jf2sIAPYpFf/aNzc/M/928qjR/vCVkMQe3/K8hgACzREICRSN+oBf/djoHETpnYD64D+8+bdGI0D5Ssg+9S3Pawgg0CwBBeklJYEo8PS7J9fNWx8SANyLo0MB2N89+afEAGwlYOhzQC+Kgo1nZmbM1atXjS3wM6lOCprXaCAKoqcgUCUBBURrxI/p6WmjEUDSFL1vlPSh5I9eBUeHJIGoD1ACmkYgonRfIE7C0egfvsJnAJ9OfV47dOhQcGN0XsqrhI4oovOa7XwWmpTUSVD8pUuXEpsbGtifuKIuzGBztG02NDGnfdlOkhfa11W3/4ceJ1nt5XX9+vVEtizX1IkrZQYEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDIRaAUCSBqiYIOkoKF7l752Hz6s78xd6+s5NJ4VpJOQO5K/vj6xifeBUn+8PLwIgIIWARCgoV07tE56M7qR5Y1MKlogdhffbGvhPTnvuV5DQEEmikQBwAnBTu9dm49CgJWMDClOwJKulHyzfWb/u3pM4ASMHpddAwpoUj1CQ32jOus4HkFqyoRpJMA0Hh9PCNQpICOV73nsh6vuou/EqaSbsZRZBvidasPUF2Sgo01AtH330w+H8Xr5blzASV9aASWpFG4epkA2HkrWUMaAZ07kq7X4vUpOD2PJBD1yQsLC/Fqvc+uEUpCA9nPnz/vXb/vxZBrh9HRUd8qSvVa6H4OabetYVmXs62rbtNkH3Id28nxGjJiWOgxUDd/2oMAAggggAACCCCAAAIIIIAAAggggAACCCCAQBUESpMAIqw4ANj3A8f9u4//2Nx6/3QVfGtTxy/Ondq46/uPvSOwaL8tLi5G+7E2DachCCDQNQH1AUl3gFcf8Me3f2p0TqJ0T0B9rpJvkkZgUbBtGYIIuyfDlhBAIE8BBf6G3IX+vdV75pmfJY9GkWfdmrguJXwo0FpJN76izwDqv9WPl6moPgooz1KvTkZUKJMBdamvgIJmlfgxOzubupEK5tR7Vp/dfd+9pF5xhwvE5xJX8Ha8+ktX7o9IlDQaRTw/z9kE4lFXji2sG1/SpfZbWRIAs7WUpbIIHD9+PHgxjawVmrxhW6mS3cbHx20vWacdPXrUOl0TQ5JAQoLibRsIbWNSoptt3b2aprqG9BOho7O0t+Ps2bPtk/h/i0CRx6uOc723kkqVEpaS2sLrCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgjUTaBUCSDCVYBOUgCw5rt14fRGQsLfJgajal5KdoH4ju9fffiudyVxEElSsIZ3JbyIAAKNF9AP3OoDku4yqHNSyIhEjQftEEAJH+pr1ef6ioJCFLSdJcjWt15eQwCB5gmEnk/iwNQfvu0PTG2eYD4tvj/qx5+MAq19RYGB6rdDAtR86ynqNR1PCkwOubaw1UFBq4ODgyZrMKhtnUxDoBOBa9euRYHQY2NjRn+nLRqlQddsZX7PKjElKaFYfYASE9QHJI1OlNaI+Y0JTbTUOVbnVz4DNO+o0Xs06TN7q8rk5GSmkUDihMyQQHVtT9clvvPboUOHWqtl/Vvn1tBkjtYVhCTk6T1Tte9NQ+orM10zpSlKZGQEEL9YyPGq90Zae201NGknZP/7W8GrCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUJRA6RJA1FD9YBdy99+7Vz6OAoCTkhOKwqv7euM7visJxFdC95dvHbyGAAIIxALxOcUXuKF54wQ1RoSK5fJ9jpNs1Nf6Sry/9ExBAAEE8hJQ0L4eSSUOUuVO8ElSYa+v3bj3YNQP3x3ftTYFhCnwtwrnf11TaDQQBb8r+DJNUWCd7jyeNeA+zbaYFwGfgAI8NepHloQkvQf0HcvMzEzq94CvTkW9Njc3FzRCifqA7578k1HSGqVzASXTKKkmJLkyPq9WoQ/oXIY12AT0Pk1TdA5TUmVIcoX6Xp2v1PcqCSS0qJ/3ldBg9unpaaM6hBbVNSQpL3T7odvtxnwhSQiqhxJgQveVbGVcxdLNpBUdLyHXrWnsZa73Ykg7Qrdfxf1InRFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQqINAKRNABKs7yYXcSXD9zm3zxblT3Ak+x6Px7pWVyDPpju/apO76p0CSkB+kcqwiq0IAgZoL6JyiPkDnmKSic9Wnf/9Do3MXpXMBJdZo1A/1repjfUV3+9V+SnP3V9/6eA0BBBBoFdA5RteZSeeY+E7w339z3SiBgZJeQIavnVvfCKReTxz1Q2sPDc5OX5Nil1CApo4pBS6nLQqUU/C91kFBoJsCCqjVsZc2IFl11DW13q9VSdZqdVXgaUi9056/WrfB3/cFZPjG+/eTaZRUk1QUZK99o+OL0lwBvUf1SFOUJKHRQL797W9Hz+pT1b/GD/1fSZdKFFFQe5okDPXtSfXRNWXIiDXabmjyieocMvqHnJISVNJYdmtemSZdi6suoWah83WrfWm3o5EzQhNd0q67fX6dY48fP94+edP/05gq+SM0+SZk25sqwwQEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBDomkB/17aUYUP6oSO++2/SD8vxneBDAlYzVKURiyjQ9/OFkxuBvz+O7qzva3S8b9Le8c+3Tl5DAAEE2gVCA0y/uflZdO7SOUx/U9ILtCZUJo36obVr36iPTuqf09eEJRBAAIGHArqzuAL2kwL6tMSlKwpeXTfHFtaN7mJOCRPQnfOf+VnYHfQVAKj9EZKgGbb17s+lNihweXFxMXUfpgA7BXkqGF/BqhQEihTQ8aYgTR1vWYJNFeSskW+q/H6N+4CQNsQjGGn0CvqA8CNTI2hpFJVfXlg3SgTxFV336/ypgHcKAhLQ58GQ5IB2LZ3fNBKI+lQlWsQP/V+jHOn1NEXHZsjIcVpn6GhgOu+qXjoPt5+D4/rr9dDkD52Ts1ilcShq3tDEFbmoz1KST7uZkn+UfKDknvbXiqp3EeuN26h2xsetnpXUVERR/6fjO6nE9bLZ6zW9r+LjOWldel2fvbIkTIesm3kQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAgH4FSJ4DETdSPZCF3ftT8X334rvn0Z38TPcfL8+wXUNDvrfc37qC/4Xb78gf+mTdejYMwtF8oCCCAQNEC+uE59G7dOof928mj0TktafSKoutdpfWn6TvjPiAkEK9KBtQVAQTKK6CgJwXrhwbsx8Gsupt5UjBreVtdfM3k9F/+/k/RyB8hTjrvqz9WP1CHouuLrMHxClxUEJ2C7BRUR0EgbwEFaiq4VMGyaYsCjPX9SZ0SdeNRTEKCpzWChc5tJAP6jxwlTaZxis+ZBAT7XZv2anyNFhKgXqRNmkQUnUfSJDToPKzzcV9fX5S8oOd4BJPQZFD5VPkGOmmTV5TcE5vJKrbLMpJVkcdNJ+vWtaD2f/wo6now7bFjs9c+0Mg6qmtISbvNkHUyDwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCOQvUIkEEDU7TcBpfBfzT//+h0EJDfmzVmeND4KlL5w2IcHS+pFUgV8hgRfVUaCmCCBQdgGdcxTIFhKoESW1bZzTSAZM3qvqA9RXho6epeDf0ITM5K0zBwIIIJBOIA4+1XNSUUKD7maukS2UCMLd4B+KxYkfocHRcR+swEUFhNWpxAFuWfs2BdnpTtZ6piCQh4DukK7kIgVq6u+0RdfKSmyqY5C+2pRmBKK057q01lWd/36S5Lr5/pthI6XoPJkmCbOqLtQ7u4C+r1U/2qtrBCV/hFwbtrZQn2vTLqPls5yXtVwvfbT9PIrOA1lKUYkRWeqStExZ+04l4OiRtmS1T5NQlbZOzI8AAggggAACCCCAAAIIIIAAAggggAACCCCAAAL5CVQmASRucpo7P35z8zPz+cLJKLg1ZGSLeBt1f1ZwdBz0Kx85JZU4AWdmZiZpVl5HAAEEChPQOSj07uOtyYAa4SIkya2wipdsxWn7gDoH/5Zs11AdBBBIEGgNRA1JSI4TQeK7nK/duJewhXq+LAclwsQOoQkxcfJ3WQPi8tpbcWC52ps2gFXBdRoJREH7WQND82oH66m2gK5zdcf00Dt0t7ZWx7ASP+r+eV3vzzTfCcmoNRFEo140sagPaHUI7QsVcKzjKkugfBOdm9zmOAlEz90sClTPEhivOmrZbtS3W9sp2l1WakvepUzXmEePHs27ebmtr5NjPU0ltB3O+WnEmBcBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgd4JVC4BRFRxcEPIneA1f2siyK33w0a60HJ1K9Fd8Tfar7vihyZ+KMAiDvzqxg+jdTOnPQggkL+AzkVKAgm9E7n6AI1woXOf+oCQpLf8a937NaoPUCKMRvwI7QNU67gPKFNgRu81qQECCPRaQIFJ6gtCPw+ovq13PdffTSgK8tVIH/95+k/RiCihiR8658tXweRpEyKq7Kr2qt1ZAt8UtK/RQLQOCgJpBHTsKPFjdnbWpL1bt96fCtbU3eVDkuLS1KvM87Z+JxR6jtJ5X6NefPfketQfKCmi7iXuAzQaVuioTzLR5y0dUzq2Qn3rbkn7kgXi4yZLH5q89kfn0HGpESmyJn9obVqHjvNO1vForR79Xx51fHSNvf+frHReyKtoJJbR0dG8VtfxetQ+HcdlLbIv8ngtcv1lNaVeCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAghUWaCSCSAxuIJrdDfC0KBUBf3eunDa/L/T/3sU/Pr1jU/iVdX6+e6Vlai9arfaH3oX/DiwjiCmWh8eNA6BygooWCBNkGaUBLdxDowTIHRubEJRXxeNhrWRAKNEmNAEmKYG/zbhmKCNCNRFQIF18eeBNMGGugN8nBSh59A7oVfFTUkeb314f7SPONA5tO4KIFdApQIiyxwAF9qeLPPFBnLIElCvIH4lgmQZxSFLfVmmugJK9pieno5Gj1ldXU3dEAWB6vuQooJBU1eoBwuoD9DngTQG7UkR763WKyFQiS3qA3T+j/uA0GQXnfMUACzT0O/ZerDb2WSJBeKkhyKT0vR+z2tkGtVXx3zozSVC6ePP0mmuT0PX3ev55C8z2XVS9H2O3MtWyn4NLHtdo3bq3+qu47XIZKjWbfE3AggggAACCCCAAAIIIIAAAggggAACCCCAAAII5CdQ6QQQMegHav1IkfbHxduXPzD/dvJoFAisO6KHBsTmR1/smtSe+E7vf3jzx0btDS0K9pJn1qCn0O0wHwIIINCpgPqAOFA1TZCSzok6NyoZpI6jgsR9gPo5PdTe0OS/VtOmBv92elyyPAIIdFeg9byVpi9QQGw8Ksh/+fs/mdfOrRslh1SxxEkfCvaN2xI62ofaK0MFlOUVUFlFw/Y6x8nwClBMW65duxYF9Y+Pj6ce0SHttpi/mgJLS0tRotD8/HzqBsSf1/MIwE298RIu0Hr+ytIH/PDt+6MkKSGwqskgcR8Qt0X9WZrkRgUSx6P+pUmmKeHhQJVKIqD3oq4pdJ7K6zOljk19V1nEuU99veqr90EngfVqd5bvqEuy24KroX2RNvkuXnl83V7G5A/VUftf+7DTYyFubxHPukbN43jVe1PvJ7U3r/dpEe1lnQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIGAX2GKfXL2p8Y+LCwsLRnddVdBNSFGQrO6IrsfW3c+YHc/9wGwf3m+27HoqZPFSzaO23Fn9KAr0zTK6iX6E0w9c/OBfqt1KZRBAIEBAfYAeWfoAjYykR9P7AAU6KAiDPiDggGMWBBAopUDcF2jkBX0eSDMCQxw8q7umD2w3Zv++PnNouM+Mbjzv2VW+5ip55aONZBUlrJzfuHt9mmSP1tbo+v/o0aNGgY/qByiPCsR9o4wmJydN2lEaFOSv41CfsWRMQUDfU+hYSnN+itV0PB4/fjwa+SiexvNDAZ3PFMQq27R9QJwQqKRAlZf+fP4f3WfM0O6+hxsp0V9KVlEfcOmKSZXs0dqE+JiiD2hV4e88BfTZUg+d+9Qnnj9/Pvj8p+NT13ajo6NGAe96jxdZtD2NKqRHXFf1+76+X8socP7QoUMd11Hr0Tks71LUerU/lDyga5xTp05F+9VlFe9LOel4aC36v/azr6gNrqLvMDSilq+kPXZajwX1KXp8+eWXzmNBx6ithNRN28pSWusYerxqO63vKZ9rUp1Cj6u02whdb1L9eB0BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSYI1CYBJN5Z+uFIj7RBwFpeSRNf3HiYDLJt37Nm276Rjcezpn/7jngTpXnW3dzvXvl447ESJX4oASRL0Q9hJH5kkWMZBBAom0BefYCSAJUMWOY+QPbx+V99QZbEP61DfQDBv5KgIIBAXQQU2KSHgrUUkKagqDRFgcAKrI3vBK8EEAUAD2889isY+Om+KEkkzTo7nVd3cl/79H6QbyfBvnE9uP6PJcKeFYymO11rtAYFlicFGrauVfNOT09HQa8KBEwbCNe6Lv6utoCCinX8ZCk6pynQVu9dil8g7gMUiKw+QN8NpS2tfUCcFFi3PkDJRPrslDX4OK0p8zdbQOcuJRrpoaKEED3UR7YmDaiP1DGp+fXoVVHCiR5xUR1V19aic02eRe3Oe52qX1Hrjduu/aTrm7i0W8X7NH69/bnTfV30dVXcp7TXO+T/RdctrkP78Rq/v+LX9Zy0H1rnDfm7qOOqqPWGtIl5EEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBComkDtEkDiHdBJELDWoUBaPb768N1olbozvBJB9Lz16Y3HxnO3S1SnT+/Xq5Ng37je+vEn/tE/nsYzAgggUAeB1j5AgV+tQSUh7VNCnc7/ZeoDVCf1A0r6uP/8cUhTnPMo0ILkPycPLyCAQA0E4oAtBUHFQcDtwXshzdToGtdvPkwI0TJKCtm7q28jMWQjsG77/cQQTe8kOUR3cldZu2HMl/9hzOpG0oe2reSPvIoCxJT01xrUmNe6m7AeBa7KTgkdaROLlJA0MjLyYDQQBbhRmiGgfa9RP3QuSlt0nCjxg/dsWrn7wa7x3emVBKJ+IEsf0J4UqJooKUTn+7gPGHramJ0b09QvZB01Suf6Wxvn/vt9TjF9gPpF9QH6rERBoJcCrUH/VTi/dSuQv5f7JK9tY5WXZPb1tL6/sq+FJRFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKDsArVNAInh4yBgBeicPXs2daBOvJ4o+WIj8La1KCFky64nzZYnnoqSQvq3/6f7/9+4c3zWogDfb27+YeOh5/vBvvFIH1nX2b6cflxV4od+/KcggAACdRaI+wAF3qkPyHIHYPnY+gAlAmqkkPhZf0d9Qg59wPqdfzdfbyT8ffP5/T5BSX95FZ371QdUIdAmrzazHgQQaLaAgqB0Z2IlvcWfCdQvdFLipBCNxmGMPUEjDhB2befaRlKJ1lN0UfvjgF/9TelMQIaLi4sma1C/RoDQNYkC0/k81tm+KPvSSjZQslDW608lHOm8RbJQZ3ta71mNvqKH9sX58+czfy8U10RJIUra8/UBmnd0X1+8yKZnJXvkmeC3aQN/nqDjR5+JdP1PH+BSYjoCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQLYHaJ4DEu0OBrnrEdwBW8FeWO3DG69NzUkDu/WDg5GSQONmjdd15/60f+gn8yluV9SGAQFUEFGCph4LoFPilwMtO+4A4KeTO6kdWhv7tOzaSA/dZX2ud2K0+QH0ggV+t8vyNAAJNE4iDYBUIqz4gTgZJO0pUqFscIBw6f57zqa067x86dCh6znPdrOu+gK4rVlZWzPz8vFFSR5qi429sbCzaN0pOIig7jV415tX1ppI/sow4obun67jQMUbJVyBODu9GH6CaxyM75duK5LXRByQbMQcCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQZYHGJIDEO0nBNQqm0ENBX/HdH7MEZsTrdD13I6jXtW1Nj3/0V+IHwSM+KV5DAIGmCKgPiO8ArIDfU6dORX1BEX1A3qM3pd1HcR9A8G9aOeZHAIEmCKg/0N319ehWIHDRrmqTkj5GR0dJ+iga+8/rV1+r6wq5K9g/7cgy+jyqZZSgqmORUn0BnU8mJydTHwtquY4njoXuHAO2PkDfDen9WNWiNul7H679q7oHqTcCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCAQLtC4BJBWGgXq6HHmzJkHySD6wV9BG1UtBH5Vdc9RbwQQ6LaA7q6s878eOvfHCYFV7wMI/Or2kcT2EECg6gKtgcBKCIz7BCUKFjU6SB5mChbXOV8JH3pWv0bpjYDsl5eXH4wGkiaxVPMqeUSjk+mahP3Ym33Y6Va1H7OMBhNvV99LMBpMrNHdZ1sfcOnSpagvoA/o7r5gawgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACYQKNTgBpJYqTQTRNP/Ir8Cv+0T9NAE/rOrvxd2vgl9qg4AUKAggggEA6AQXO6qHAOyWA6I7cVesDCP5Nt8+ZGwEEELAJ6Nq69XOBPgfoc4E+H6hfUB+hRy9KfJ4fGhqKkgRIFOjFXvBvU6N4TExMRCNA6FoiTdExNjIyEo0EolEgdCxSqiGgc4RG/chybtDnd11/6rxD6b2ArQ+Ivx9aW1uL+oIs+zmPlqkP0PGipD+d/+kD8lBlHQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggEA1BUgAsey3+Md0BfCotP/gr//3qhD41St5tosAAk0RUGCVzv+tfYDO+3HQl4L8elXi/knBv3F/0Ku6sF0EEECgCQLtwcBqs5JC1C8oCFiP69evR8/x9Kwu6n/0UFGAr7at837r9OhF/im1gPbb4uJilDiUJSlAo0goeYSkgFLv5qhyes9rH6dN9olbpmtNkn1ijXI+6/2sa249Wos+D+j8r0fcB+j1Tj4nxOd8rUd9gIq2Sx8QUfAPAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggECLAAkgLRiuP+OA29bXFfSlgA/9wP/ll19GQWB6vZMf/OP1x8EF2u7OnTujH/1bgwHi+XhGAAEEEChewNYHxAFf3egD4uBfPVMQQAABBHovEAcEJ9Uk7it888XX/b55eK2aAtq3V69eNTMzM2Z2djZVI3TsjI+PR6NCKBFEAeCUcgkoUUf7Vd8JpC26pjtz5ozh2i6tXHnmDzl30weUZ39REwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKBuAiSAZNyjcbCG74d/BQafPXvWLCwsOLcyMTFhjh8/TvCHU4gXEEAAgfIJxHfi9fUBShQ8depUYh9w9OhR41tP+VpPjRBAAAEEQgTiviJkXuapr4ASQPSZTyNFpL1ZgEaW0DL6vKj1UHovoOu76enp1PtSNVfymEb8iEeZ631rqEGRAvQBReqybgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKDZAv3Nbn6xrVdA7549e7wb0etxMol3Rl5EAAEEEKiUgM7tIX0AyR+V2q1UFgEEEEAAgdQCCgRfXl6ORn1QEkCaohEmNNLEyMhIpqSDNNtiXreA9oOScLLuh8OHD0cjwpD84TbmFQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAIEyABJMyJuRBAAAEEEEAAAQQQQAABBBDILKCRQK5evRqNCJJ2JRp5YmxsLBp9QskIlO4JaCQWJX4oESdtiZN/FhcXoxFA0i7P/AgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQLsACSDtIvwfAQQQQAABBBBAAAEEEEAAgQIENALImTNnohFBlByQtszPz5vBwUGjpARKsQLXrl0z4+Pj0UN/py0nTpwwKysrhtHe0soxPwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgj4BEgA8enwGgIIIIAAAggggAACCCCAAAI5CygpQKOBKElASSFpikYAUWKCRgTJkpiQZltNnVeJNhr1I0uijfatEj9mZmZS79umetNuBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEAgXIAEk3Io5EUAAAQQQQAABBBBAAAEEEMhNQEkCWUeJuHjxYpSkoHVQ8hFYXV2NTKenp40SbdKU1tFdhoeH0yzKvAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQLAACSDBVMyIAAIIIIAAAggggAACCCCAQL4Ce/fuNcvLy2ZxcTH1iBFKUpidnTWDg4NGCSGUbAJyVNKHRv1QEkjaMjExEY3oomcKAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQpAAJIEXqsm4EEEAAAQQQQAABBBBAAAEEAgQOHz4cJRFMTU0FzP3oLNeuXTNjY2NmcnIy9cgVj66pef9bWlqKEj/m5+dTNz5O3jlz5kzq5J3UG2MBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEBgQ4AEEA4DBBBAAAEEEEAAAQQQQAABBEogMDAwYObm5qIRQYaHh1PXaGFhIRoNRM8Uv0CcNDM+Pm70d9py4sSJKGHnwIEDaRdlfgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAILMACSCZ6VgQAQQQQAABBBBAAAEEEEAAgfwFlFSwsrISJYMoKSRNuXXrVjQSiEYEyZLYkGZbVZ13ZmYmGvXj4sWLqZugfXP16lWjdVAQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIFuC5AA0m1xtocAAggggAACCCCAAAIIIIBAgMDU1FSUCHL48OGAuR+dRckNg4ODJCq0sMhkZGTEzM7OGiXKpClKxDlz5kw0OsvevXvTLMq8CCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAbgIkgORGyYoQQAABBBBAAAEEEEAAAQQQyFdAyQaLi4vRI0vigZIdlAiSZbSLfFvSu7Up2WN6etpoVJTV1dXUFVEijkb9mJiYSL0sCyCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAnkKkACSpybrQgABBBBAAAEEEEAAAQQQQKAAAY0CsrKyYpSMkLZcu3YtSn4YHx9PPfJF2m2Vbf6FhYUoAWZ+fj511YaHh6MRP+bm5oxGAKEggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAK9FiABpNd7gO0jgAACCCCAAAIIIIAAAgggECCgJAQlIygRRMkJacvS0lLmZIi02+r1/HHSy+TkZOqkFzmfOHEicj5w4ECvm8L2EUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBBwIkgDyg4A8EEEAAAQQQQAABBBBAAAEEyi+g5A8lgWQZmeLWrVtmeno6GhFkdXW1/I3NUMOZmZko0eXixYupl1bCh2y1DgoCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFA2ARJAyrZHqA8CCCCAAAIIIIAAAggggAACAQJTU1Pm6tWr5vDhwwFzPzqLkiNGRkaiRAclhdShqE2Dg4NmdnY2dXM06sfi4qJZXl42e/fuTb08CyCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAt0QIAGkG8psAwEEEEAAAQQQQAABBBBAAIECBDpNXFCyhBJBlpaWCqhdd1apBJbJycloVJNr166l3mgniTSpN8YCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBABwIkgHSAx6IIIIAAAggggAACCCCAAAIIlEHgwIEDZmVlxZw4cSJ1dZQ0MT4+Hj2yJFCk3mCOCywsLESjfug5bRkeHo7M5ubmjBJpKAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQNkFSAAp+x6ifggggAACCCCAAAIIIIAAAggECCiJYWZmJkpqUEJI2qJRQDQayPz8fNpFuz7/6upqNOKHRv7QCCBpipyU9KGEGSWBUBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgaoIkABSlT1FPRFAAAEEEEAAAQQQQAABBBAIEFBSw/LycpTkkHZkCyVTTE9PR4kgSrIoW1H9lOSiRJWLFy+mrt7hw4ejxI+pqanUy7IAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDQawESQHq9B9g+AggggAACCCCAAAIIIIAAAgUIKMnh6tWrZmJiIvXalfyhJAslg6QdYSP1xgIXUMKH6jQ7Oxu4xMPZ9u7daxYXF6OH/qYggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAJVFCABpIp7jTojgAACCCCAAAIIIIAAAgggECCgEUDOnDkTjQiSJfFhfn4+SrpYWloK2FoxsygBZXx83IyNjZlr166l3ogSYVZWVoxG/6AggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAJVFiABpMp7j7ojgAACCCCAAAIIIIAAAgggECBw4MCBaDSQEydOBMz96CxKuugkAePRtaX7nxJQBgcHTZYEFLVZiR9zc3NGiTAUBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEECg6gIkgFR9D1J/BBBAAAEEEEAAAQQQQAABBAIFZmZmokQQJUekLRcvXoxGA9E6ii6rq6vRiB/T09NGI4CkKUr2UNLH8vKyGR4eTrMo8yKAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAqUW2FLq2lE5BBBAAAEEEEAAAQQQQAABBBDIVWDv3r1RcsTCwoJJm2ChZIzZ2Vlz/vz5KMkiSyKJrzHx+jXyR5Zy+PBhc+bMGUb8yILHMggggAACCCCAAAK5Cty6cNro0YuydfZWLzYbtbdXbf7+2ds9afPdKx+ba//t+Z5su4ltfuPiXaNHL0qv9rPaeunaN6aJ72va3L0jnXNZ96y1pV6dv+NW/uHNH8d/dvW5l+eyXrVZwJzLuneY9fL6u1fv616ev7Vnm/i+bmKbta85l0mhO6WJ57LuyBa3lSaeF3rZ5l6+R3r1vUATvwvp5TVOr67rspylGAEkixrLIIAAAggggAACCCCAAAIIIFBxgYmJiWg0ED2nLZ2M0OHa1tLSUjTCSJbkjzipZXFxkeQPFzDTEUAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKDyAiSAVH4X0gAEEEAAAQQQQAABBBBAAAEEsgkMDAxEI2YsLy+b4eHh1CtRssbg4KBR8kbWcu3aNTM+Ph499HfacuLECbOysmLyHo0kbT2YHwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQKFqABJCihVk/AggggAACCCCAAAIIIIAAAiUXUPKEkiiUTKGkkDTl1q1bUfLG2NiYSZvAoQSSkZGRTAkkcZ1nZmZS1zlN+5gXAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBAoiwAJIGXZE9QDAQQQQAABBBBAAAEEEEAAgR4LKJki62gaFy9ejEYD0TqSyurqapT4MT09bZRAkqZ0OmpJmm0xLwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUCYBEkDKtDeoCwIIIIAAAggggAACCCCAAAI9Fti7d69ZXl42i4uLmUbWmJ2djRJBlBDSXpTsoaQPjfqhJJC0ZWJiwly9etXomYIAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCDRNoO/eRmlao7vZXt35VMEvrnLixAkTcndU1/JMRwABBBAorwB9QHn3DTVDAAEEEEAAgTABJWzoM+38/HzYAm1zKVFjbm4uSiRZWloyk5OTqUf80CqVlHLmzBlz4MCBti3wXwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoDkCjADSnH1NSxFAAAEEEEAAAQQQQAABBBBIJTAwMBAlcGhEkOHh4VTLauaFhYVoNJCxsTEzPj6eKflDN07QqB8kf6TmZwEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQqJkACSA126E0BwEEEEAAAQQQQAABBBBAAIG8BZR8sbKy8mA0jzTr1ygiFy9eTLNINK+2qcQPRs1MTccCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFBTARJAarpjaRYCCCCAAAIIIIAAAggggAACeQtMTU1FiSCHDx/Oe9UP1qdRRxYXF41GHdm7d++D6fyBAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgg0XYAEkKYfAbQfAQQQQAABBBBAAAEEEEAAgRQCSspQgoYeeSdoKMFEo34UmWCSoqnMigACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIlEqABJBS7Q4qgwACCCCAAAIIIIAAAggggEA1BJSksbKyYpS00WkZHh6ORvyYm5szGgGEggACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIILBZgASQzSZMQQABBBBAAAEEEEAAAQQQQACBAAElaxw/ftwogaOTcvToUXPgwIFOVsGyCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA7QVIAKn9LqaBCCCAAAIIIIAAAggggAACCBQjMDMzYwYHB83q6mpHG5ienjZjY2Mdr6ejSrAwAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQcgESQEq+g6geAggggAACCCCAAAIIIIAAAmUTuHjxYpT4MTs7m1vVtM6RkRGjpJJbt27ltl5WhAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII1EWABJC67EnagQACCCCAAAIIIIAAAggggEDBAkrMmJycjEbruHbtWiFbU1KJEkGWlpYKWT8rRQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEqirQd2+jVLXy3a637kaqR5py6dIl7zIHDhwwo6OjaVZpDh8+bIaHh1Mtw8wIIIAAAp0JrK6upg5CLKIPUL+hBwUBBBBAAAEEEOi2wPz8vFFyRjdH59Dn37m5ObN3795uN5ftIYAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACpRMgASTFLtHdTQcHB1Mskf+sAwMD5urVq0bPFAQQQACB7gko0FF9QDcDHm2tUx9AAKRNhmkIIIAAAgggUJSAEmGnp6e9NzdwbVufXU+cOBElsHa6jqmpKddmmI4AAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCDRCoL8RrcypkQq4nZiYyGlt2VZz/Phxkj+y0bEUAggg0JGAghd1Du5lUR9E8kcv9wDbRgABBBBAoFkCSnydmZkxIyMjmZI/NHrHysqKUeKGRrFcXl42Z86cSf2ZVvVQ8ojqoWQUCgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggg0FQBRgBJued7OQqIgo8Z/SPlDmN2BBBAIEcBBR/2chQQRv/IcWeyKgQQQAABBBDwCly8eNFMTk4afQZOW5SwOjc3Z5QAYitxQsfCwoLt5cRpSijRqCL6jExBAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSaJMAIICn3di9HAWH0j5Q7i9kRQACBnAUUZNirUUAY/SPnncnqEEAAAQQQQMAqoOSM8fFxMzY2lin5Q4kZGvXDlfyhjeqaSiOBaESQLKObzc/PR0m5S0tL1jYwEQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQqKsAI4Bk2LO9GAVEATKM/pFhZ7EIAgggkLNAr0YBYfSPnHckq0MAAQQQQACBTQJKrJidnTW63klbDhw4EI36MTw8nHZRMzMzE2039YIbC2i7SibJkkiSZXssgwACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII9FKAEUAy6CuwRHdi72Zh9I9uarMtBBBAwC3Qi1FAGP3DvT94BQEEEEAAAQQ6F1hdXTUjIyNmeno6dfKHro3m5uai0TyyJH+o9koAUbKrkjnSlosXL0Z11zooCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA3QUYASTjHu7mKCCM/pFxJ7EYAgggUJBAt0cBYfSPgnYkq0UAAQQQQKDhArqm0YgfGvkjSzl8+HA0+oY+s+ZVFhYWMiWiaPtKQFEySpZEkrzqz3oQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIEiBRgBJKNuN0cBYfSPjDuJxRBAAIGCBLo5CgijfxS0E1ktAggggAACDRdYWlqKRs7Ikvyhz8PLy8tmcXHR5Jn8oV2iax8lv05NTaXeQxrJZGxsLHMCSeoNsgACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIINBlAUYA6QC8G6OAKJhGwS95B9V00GwWRQABBBDYEOjWKCCM/sHhhgACCCCAAAJ5Cuhz7PT0tFECSJZy4sSJKDmjG59RL168GNVViR1pi+qn0UCUUEJBAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTqIsAIIB3syW6MAsLoHx3sIBZFAAEEChRQUKHO0UUWBSyqr6EggAACCCCAAAJ5CGi0j5GRkUzJHwcOHIhuTjAzM2O6kfyh9mqbKysrRkknabepZN3JycloRBAlvVAQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIE6CDACSId7schRQBTgwugfHe4gFkcAAQQKFCh6FBBG/yhw57FqBBBAAAEEGiRQh5E09NlbCR1qS5aiJBIlr1AQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIEqCzACSId7r8hRQBj9o8Odw+IIIIBAwQJK1CtqFBBG/yh457F6BBBAAAEEGiCgZNXp6eloFIzV1dXULdb1iBJS9dzros/ey8vLZnFxMfVoIKr77OysGRwczJxA0uv2s30EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQEACjACSw3FQxCggjP6Rw45hFQgggEAXBIoaBYTRP7qw89gEAggggAACNRZYWlqKRszQtUraomSLM2fOmAMHDqRdtCvzq01K6Jifn8+0PSW0zM3NZUokybRBFkIAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBHISYASQHCCLGAWE0T9y2DGsAgEEEOiCQBGjgCgoUX0LBQEEEEAAAQQQSCugGxSMjY2Z8fFxkyX548SJE9GoH2VN/pCHrr+UwLGysmKGh4fTEpmFhYVoNBA9UxBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgSoJMAJITnsrz1FAFMyiO7/rmYIAAgggUH4BBVcODg5mCrK0tY7RP2wqTEMAAQQQQACBJIGZmRlz6tSpTNckSvjQqB9VTELVSCAaEUTXZGmL2q1kkiyJJGm3xfwIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggECnAowA0qngn5dXkIzu2J5HYfSPPBRZBwIIINA9ASXs6dydR2H0jzwUWQcCCCCAAALNErh48aIZGRnJlASh65jFxUWzvLxcyeQP7empqaloNJDDhw+n3vGxnZJnsiSQpN4gCyCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAh0IMAJIB3jti+YxCoiCbxj9o12W/yOAAALlF1DAYB6jgDD6R/n3NTVEAAEEEECgLAK6/pienjYLCwuZqqTEiRMnThh9Dq1LWVpaikz0+Txt0Y0dNAqKRgWhIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACZRRgBJAc90oeo4Aw+keOO4RVIYAAAl0UUOBkp6OAMPpHF3cYm0IAAQQQQKDiAkr6UPJpluSP4eHhaMSPubm5WiV/aJdqFJCVlZVoVJC0u1hJI2NjY2Z8fJzRQNLiMT8CCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIdEWAEUByZu5kFBAFDzP6R847hNUhgAACXRTodBQQRv/o4s5iUwgggAACCFRUQJ85JycnzcWLF1O3IE5YnZmZSb1sFRdYXV2NRgPJaqXRUTRKCgUBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEECiLACOA5LwnOhkFhNE/ct4ZrA4BBBDoskAcVJlls4z+kUWNZRBAAAEEEGiWgBI3NOpHloSGeGSMpiR/6MjoZKQTJfZOT09HI4IokYSCAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgiUQYARQArYC1lGAWH0jwJ2BKtEAAEEeiCQdRQQRv/owc5ikwgggAACCFREQAkfGvVDnzXTFt2kYG5uzigBpMlF12gyXFpaysSgkUA0Iog+u1MQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIFeCTACSAHyWUYBYfSPAnYEq0QAAQR6IJBlFBBG/+jBjmKTCCCAAAIIVEAgTloYGxvLlPyhpIWVlZXGJ39oV+sabXFx0SwvLxt9Zk9b5ufnzcjISOYEkrTbY34EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQMAmwAggNpUcpqUZBUSBKLrzu54pCCCAAALVF0g7Cgijf1R/n9MCBBBAAAEE8hZQwsHs7KzRdUXaMjw8bM6cOWP0TNksINPYd/OryVM0mopGVcmSSJK8duZAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQTcAowA4rbp6BUFguiO7iGF0T9ClJgHAQQQqI6AEvp0bg8pjP4RosQ8CCCAAAIINEdgdXXVaMSP6enp1MkfugZRYoJG/SD5w33MyGlmZia6EcOBAwfcMzpeWVpaikYDURIJBQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQ6KYAI4AUqB0yCogCTxj9o8CdwKoRQACBHgnoztKDg4OJgZuM/tGjHcRmEUAAAQQQKJkAo1L0bocsLCxkSrhRjZVow2grvdt3bBkBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIGmCTACSIF7PGQUEEb/KHAHsGoEEECghwIho4Aw+kcPdxCbRgABBBBAoEQCFy9ejEaUmJ2dTV0rfe5cXl42i4uLRn9T0gvomkxJuXpOWzRiy8jISOYEkrTbY34EEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQaLYAI4AUvP99o4Aw+kfB+KweAQQQ6LFA0iggjP7R4x3E5hFAAAEEEOixgD4vTk9Pm6WlpUw1OXHihJmamjL6bEnJR0DJOJOTk0b7Jm3RftBoIIcPH067KPMjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAJBAowAEsSUfSbfKCCM/pHdlSURQACBKggoCFDnelth9A+bCtMQQAABBBBojsD8/Hw0ckSW5I8DBw6YlZUVMzMzQ/JHzoeMbJWkq+SatEXJv+Pj42ZsbCxTAkna7TE/AggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCDRPgBFAurDPdefQwcHBR7akoGAFleiZggACCCBQXwHXKCCM/lHffU7LEEAAAQQQ8Amsrq5GI0zoOW3R58d41I+0yzJ/egF9ltdoIBoVJG3RvlIisJJ0KAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQF4CjACSl6RnPbZRQBj9wwPGSwgggECNBOLgv9YmMfpHqwZ/I4AAAggg0AwBJYVOT09Ho35kSf44fPhwdBOBqampZoCVoJX6LL+8vGwWFxeNrunSFO3v2dnZaH9nSSBJsy3mRQABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHmCDACSJf2desoIAocYfSPLsGzGQQQQKAEAu2jgDD6Rwl2ClVAAAEEEECgiwJLS0tR8oc+F6YtSkI4c+aMOXDgQNpFmT9HgTihY35+PtNalQA8NzeXOpEk08ZYCAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQRqK8AIIF3ata2jgDD6R5fQ2QwCCCBQEgEl/uncr8LoHyXZKVQDAQQQQACBLggo4WN8fDx6ZEn+OHHiRHTzAJI/urCzEjah6zklcGhEkOHh4YS5N7+8sLBgBgcHjZ4pCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAVgFGAMkql2E5BfyMjIww+kcGOxZBAAEEqi4QjwKysrJilBRIQQABBBCoh8DFixejhsTP+s/a2prReT+kKJB8586d0az6W0Hm6ifoK0L0yj3PzMyMOXXqVPCx0NoaJXxo1A+Og1aVcv3N/i3X/uhVbeJz/+rq6oP3On1Ar/YG20UAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQSaIUACSE77Of6xX0kerXd2vXTp0iNb0GvtQTyjo6MP5tFreijwK8tdRR+siD8QQAABBLomkHcfoIpzp++u7T42hAACCCQKKJlD53oF+l6/fj263o+DfhMX7mCGOCFEnxf0tx76rEApt4COjenp6eiYSVtTfQ7UKBMaMYxSfgF9vp+cnIzODVlqqxFelEhCKbdA3AeoH1Byh/a7/tb0IovO93qoD9Bz3A8UuU3WjQACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCBQfgESQFLso1786B8nguiH/j179kQ/+BMUnGKnMSsCCCCQo4ACOhXspeBfPetRZOBX3Aco4Et9gM7/6g80nYIAAgggUJyAzu865yvQV88K9i1LifsGBQSrX9CDUg4BXRPMzs6a+fn5TBVS0oeSP+jnM/H1dKGlpaUo6SfLuULXeRrthfdyT3fhIxvXftS5Xzf0UH+gR5mKjhX6gDLtEeqCAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIdFeABBCPd/xDv370L1vglwKA9Yh/9FfQCAUBBBBAID+BMgd+6ZyvwK+hoaHoWf0BBQEEEEAgu4AC9xXAXcbr/pBWHT58OPpcoGc+F4SI5T+Pjh+NBJElMVT7jASA/PdJt9dIAlC3xfPdnt7D58+fL913PyGt1OeCQ4cO8bkgBIt5EEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAgRoIkADSshOrHPgVBwPHP/pz19iWHcufCCCAQICA+gAl+1Ux8Evn/NbgX/qAgB3OLAgg0HiB+Npf530F/tal6HOB+oSjR49GCeN1aVdZ26GEUSV+6BoibVF/ffz4cTMzM5N2UeYvsYBuJKFjIsuoETomTpw4YaampkrcwnpUjT6gHvuRViCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAJNFGh8Akhdf/RX0JeSQfRMIHAT39q0GQEEQgTq2gfEdwFWH6BAYAoCCCCAwEOBhYWFKNmvTkkfD1v36F8aIUqJIPQHj7rk9T8lbpw6dcroeiJtUV+tUT/op9PKVWf++fl5Mzs7m/n4mJubI4mrgN0dj/ShvqDuRecX9QETExOca+q+s2kfAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgg0SqCxCSBNCvxSwFcc+NWoo5vGIoAAAg4BBX6dPXvWNCH4V30ACYGOA4HJCCDQGAGN0qBAfX0GyBKs74Pas8uYvbv6zM7txgzv7otm3fkXxgzt9i11/7Uv7xiz9un9v2/duWfWbhhz6z82pt24l7xwyjma9JlAo3EowaKoovVrhAcdV2mLkvOV+KH9Qam/gI6R6enpzNec8WggRd3UIT6G656IpHbq/K/r/7jNeR19efcBqtelK/n3ATonxskgebWd9SCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAII9EagUQkg+qFfd+BUwG/egV/923eYrU/vi/bitu+MPNibW3Y9ZfRwlW9ufmb0iMvd369Ef3796RWzfud2PDmXZwWN6M6Px48f5+6PuYiyEgQQqJKA+oCign9b+4Ctu58x+r9KUh+wfuffzdeffvKAseg+QMGm6gN0R3gKAggg0AQBBerr3K/r/06LgnyHNhI8lOQx9LQx8f87Xa9r+es3jbl2835SiBJClCiSR2KIAr0VVK4+oaigclebujFd+3xsbMysrKzk3t/pM6SC+RVInqVMTU1F9nV0z+LRpGV0DtKxkyX5QO9ZJQ0VkdSk94rK8vJyLXeHzgdK+sj6nm1FGdhI8ht6us+Mfqc7fcCtKEHwfh9w/c99QR6JITqelAii8xHnotY9zN8IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBAdQQakQCiYAsFfunH/05LHOSrJI+tTyvI9z+ZbfseJnx0uv725e9eWYkSRJQkosDgvBJD4iDgIoJI2tvA/xFAAIFeCujcn1fwb2sfECd3FNkHfH3jk6gPUJKI+oD2pMGsrjr3KxFEfQEFAQQQqKOAgn0V9NvJ9b+SPUY38rtH920E+278rYSPMhQFAH90ZeMO8b+/19Fd4hX4q76gTkHAStAYGRmJguzV1+UZ1K5jSgH8WW4koMTLubk5w2evMryDelcHHTvz8/PRTSmy1ELXbUoEyStoX9+TjI+PR1VZXFys1XWhzv26+UcnfYDO+Tr/K+FDz2XqAzRalPqC91azjxRSxz4gy/uKZRBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIEqCtQ6AURBOvrRP8tdNuOdqTu5b9v3bJTkob99o3nEyxT9HCWDbCSG3P39xyZOEMm6TQUjKfBLI4NQEEAAgToJ5BH8q3O+Ejy2fed+P1CWPkCJITr/373ysdHfWUt8F3j6gKyCLIcAAmUT6PT6/6XhPnNo41GmYN8kYwUAKxD4/MazRg1JW+oUBDwzM/NIcL2SLpTg0knRZ8nJyclMgeSxrepFQSAWWF1djZKJsiQn6JjSCD6dHtdKRhkcHHyQ0KT1Xr161ei5ykWmnSR+qA/Q+V/9QFkSPpL2R5wIQh+QJMXrCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQH0EapkA0kngl+7uroSP7cP7o6DfMgT7Jh1ucULIndWPjB5ZCkHAWdRYBgEEyiiguxnrDt1Zk//i87+eq9YHKCFk/c7t1LuFPiA1GQsggEDJBDoJ+lXA79Hn+oyeq17Wbtwz71zOlgyiwG8lhlc1WUH9vgLaW0unQe3tCSWt6076W6M1KAFFfSwFAZtAPBpI1lFlNBqIbuiQpehaWdtvLUoq0TFbxdJJUk2c+PfSUJ8Z2F7F1j+scx59gI6DqicCPRThLwQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKCeArVKAOkk6HfHcz+Ikj4U8FvlosDfO2v3E0GyJIMogERBHwcOHKgyA3VHAIEGCnQS/Ktzf/QY2m+UCFjlEicDqi9ImwyiIFX1AQpapSCAAAJVEFDQvwKZ9TkgTRna3WdePbiR9FGDgF9XuzUyiO4Ir4SQNKWqfcHY2JjRtUB7UZ+2uLjYPtn7f61Ho35kSSatqp8XhBcLE1Dyh461tOewuEIK1teIIGkC9pUsMTIyEq/ikeeVlZXMSSWPrKhL/5Gf+gDdBCRN0egerx7sN69sJP9VPenD1W4lg7z14T3z3to9c+uOa67N03Us6fMAIwRutmEKAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUBaBWiSAZL3bo0b62PH8RuJHDQJ+bQdUnAzy1Yfvmq9vfGKbxTlNCSC6o6gCmCgIIIBAmQWyBv9u3f2Meezgy7XvA27/7gOjkUHSFPUBCvzKelfpNNtiXgQQQCCLgIJ+47vnp1lewb5K/FACSFOKAn//6bf3zNnfrZvrN8NbXaXPAwr+VhC9qywvLxu1J6n0Ihg/qU683gyBbiYdKflD36HYiq79lARShRL3AXrfhhb1Aa8832dG9zWrD1ASyC8vpO8D+DwQemQxHwIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIINBdgUongOiH/tnZ2Sj4K5RNd3ZXwsfAi8fMll1PhS5W+fmUAKJEkNuXP0jVFt1NVHcVTXNH0VQbYGYEEEAgo0DW4F+N+KTEDyWANKV8c/Mzc+vC6WiEqDSjgmS5q3RTTGknAgj0TiBtoLTu7q47vf/d9+p7p/fQvaHRQHRHeN0ZPrTo88DMzEzo7F2fT9cDg4ODRs+uoqT2q1evul6OpmcJJo9XqKB5Jc+TOBmL8JxFIOu1bbwtjXajgH3fTRx0nGvEDF/ROnQNWNai5BUlfLmSWNrrrT7gfvJfv9HIH00uGhlKfcClK+F9AJ8HmnzE0HYEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEECirQGUTQJaWlqIf/X2BPq3oSvxQwO9j33vZ6O+mFgX+fvXbd6NkkNAgYAWQKKAp5K65TXWl3Qgg0F2BtMG/9AH390/cB2hUECWFhBQlAKoPUFAhBQEEEOilgK770yR/k/jh3lsK/n3jQngQcJkTHBTMrqD2pOJKZFEQudaha4u0RX1knDCfdlnmR8AloNHtlOCQ9zGp9Wr0j6TvUHRcK2FKz2UrSkZTPxBS6APcSmn7AL4TclvyCgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIINALgcolgChYQcEQSgAJKRrlQ6N96I7vlIcCcRCwRgUJTQRR8K+CgMsYCPKwZfyFAAJ1Fkgb/Evih/to0IhQGhUkNBGEPsBtySsIIFC8gAKh9RlAAcxJhaDfJKGHr+tu8D85t26u33w4zfeXK4nCt0yRr+m4GBsbC96Egtrj0RF0TaHEkdBg8vaN0C+2i/D/vAUWFhai5KSkhA3bdpW0pZE8Wm/iMD4+Hvw9io7vxcVF26p7Mi3tqB+vHuwzP3+h36g/oLgF0iaCaDQQHVcUBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHeClQqASTNqB9x0O/AC8d6K1zyradNBOHOjyXfoVQPgRoLpAn+jfuApo/6FHI4KBHw1vung5IBlQDIaCAhqsyDAAJ5CqS54/srz/WZN48Q9JvW/53L98xrG4kgt+4kL6mAcgWGlyEpXKMZKDA8tKjuy8vLJs01Rfu6+TzULsL/ixRQ8odGqFEySJaigH0lbumYVwJImqL3SmsCSZpl85w3TtQKSYR5abjP/GqjD9izK88a1H9d6gN+eSEsGVDJReoD4mS6+uvQQgQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQKB8ApVIANEP/bozq374Dyka8YOg3xCph/MoEeSLc/NGd4QPKdz5MUSJeRBAIC+BNMG/GvHp8SNTRkkglDCBOBlQI4KElImJiejuv2UI/g2pL/MggEA1BfQZQAHLClxOKkO7+8zpo31Gz5RsAkr+eOP9dfPWh/cSV6DzvwKAexkcrs+GCoxPWxS8nCZppHX9CqTX5yD6v1YV/u6GgM6DoaMgtddHx6seISMotS6rAH+NmtOroj4gdPRXJXycnug3o/voA7LuL/UB//Tb+4kgSevQ8aSRQPSZgIIAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggg0H2B0ieAKDhHP/qHBOls2/eseWLiZ2bLrqe6L1mTLX5945ONRJBT5u6VjxNbxJ0fE4mYAQEEOhRIE/yrPuDxI8fN1t3PdLjV5i7+zc3PzOcL/xDcB2g0EPUFFAQQQCBvAV37j42NGfUDvjKw3Zifv9BvXj1I0K/PKc1razfumWNn7xk9JxUFACshottFx8Xg4GDi8ZFXvZToorbS5+UlynqyCigp+tSpU1079pX0pG12u6T5HugXL/Zv9AP0AXnto+s3jTm2sG4uXUnuA3T+17mRggACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDQXYFSJ4AsLS1FyR9JgV+6y/sTR18324f3d1evxlv76sN3za33TxvdFd5XdOfHXt/911c/XkMAgeoKhAb/qg8YeGFj5KeDL1e3sSWr+Z3Vj8znZ08G9QFKAjl8+HDJWkB1EECgygILCwvRZ4CkNuhO77rju+78Tslf4I33w+4E34tRoTQyjD4rFl30WSce9aPobbF+BEIFNJKHbpIRMjpS6Dp982kUEI0G0q0S+j0QIz8Vu0c0GpRGhdLIIL6iBDl9J6TzJQUBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAoDsCpU0A0V0mZ2dnExWU9KHkDwUAU/IVUPKHAoAVCJxUFACs4C8KAgggkIdAaPAvIz/loW1fR5o+oFd3h7bXnKkIIFBlgenpaTM/P+9tAqN+eHlyfTF0NBCNjLG8vNyVAGAFvWt0mKJLLxJbim4T66+XQGiiRKetVoC/3t/dKKGfARj1oxt7wxiNBvLDt9cTR4RSH8DIgN3ZJ2wFAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQkEApE0B0N0v98O8rjPrh08n3tdDRQBQkpR/9KQgggEAnAiHBv4z60YlwumVDRwMhUDadK3MjgMBmgZDPANzxfbNb0VN09/fXzq2bdy7f825KIwToLvAKBC6yDA4OGo2AUFRRO/SZRkHvFATKLqDRUnXjjKTEuU7bofd20SO+hfQBGvHpn3/Ub9QXULonEDIilEYAUaJQ0X1A91rNlhBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHyCpQqAUTBC7qb6+rqqleMO757eQp58Zubn5k/vv1T8/WNT7zrV1CIAqb04z8FAQQQSCOgPkDJH0kJgFt3PxON/KRnSncENBqI+oC7Vz72blABX926A7y3IryIAAKVEtD5f3x83GhkB1955bk+8+aRfqMRQCjdF1ACiBJBlBDiKkUHAIeOEumqX9J0RrRKEuL1sgro/Knr6KTvUrLWX+/tq1evFvY5PyT546XhPnP6KH1A1n3Y6XLvrd4zx84m9wFKFiKBrlNtlkcAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQT8AqVJAAlN/njs4Mvm8SPH/a3i1cIEPl84aW5f/sC7fgKAvTy8iAACFoHQPmDHcz/Y6AOmjEYAoXRf4Nb7p82tC6e9G6YP8PLwIgIItAmEnv+V+PHqQe743sbX9f+u3bhnfvj2url+073popJANOqHRv8ooihYWUnsGv2DgkCVBZQkderUKaNza95lamrKzM3N5b1aE5L88YsX+83PX6APyB0/5Qp17lcfoL7AV3Q+1eiAFAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBYgRKkQCiYB7d9dd3t0oF+z5x9HWzfXh/MRKsNVhACSBKBPEVAoB9OryGAAKtAqHBv09MvG6UAELprcCd1Y/M52dPGo0K4ipFBf+6tsd0BBCopkDI+V+jffyPH/Wb0X0E/pZlL2sEkO//yh8AXEQ/oJEik0aJyWKkpA+NbEBBoC4CGk1PSRVFlJWVFaPP+nmU0D5ACYAaAYpSDgH1ARoJRCOC+ApJID4dXkMAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQ6E+h5AoiSPhTMox//XWXLrqfMX/7oH83W3c+4ZmF6lwW+vvGJ+cOvfuwNACYJpMs7hc0hUEGBkMAvJQA++ZNf0weUaP+qD1ASiJ5dpYjgX9e2mI4AAtUTCDn/79llzD9vJH8M7Sbwt4x7+NjCunnnsjsAOM9+YGlpKbphQFEOi4uL5vDhw0WtnvUi0DUBnVtHRkaMbrJRRNFnfCWB5FFUT99NQJQA+Juf0AfkYV3EOpL6AG2TJJAi5FknAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggYEx/LxFCkj+U9PFXr58l8LeXO8qy7ZD9ErJ/LatmEgIINEQgJPhX5xqSP8p3QITsl5D9W76WUSMEEOiGQMj5QUkf//r6t0j+6MYOybiN0xP+u/KH7OeQTWs9RY1mEG9f69d2KAhUXWB+fr6w5A/Z6DO+ttFp0XtO63IV9QEkf7h0yjFdfYAevqL9rBFpKAgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAAC+Qr0bAQQBdgk3Zly+/B+88TR143u/k4pp8D6ndvRSCC+u8AzEkg59x21QqCXAuoDNPqTL/ArTjKgD+jlnvJvW33AF+fmze3LHzhn1B3gdafovXv3OufhBQQQaJaAzv8XL150NjoO/NXd3ynlF9AoILoTvKt0OhLI9PR0LgHnrvrF06empszc3Fz8X54RqJyARv0YHBwsvN56T1+9etXoOUtJSgqgD8ii2rtlkvoA1UyfBfS9EAUBBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIB8B/+368tnGprXEgb8KUHCVHc/9wPzlj/6R5A8XUEmmKzBbd+fX/nIVBXgr0E/7nYIAAgjEfYAv+WPbvmejcwvJH+U+XrR/nph43dsHaH+Pj4/TB5R7V1I7BLomoMBfkj+6xt2VDb3yXJ/3LvBxv+/77OeqqK4V8hhtwLX+1unaju/YbJ2XvxEoo4DOr90oek9n3RbJH93YQ93dRlIfoNokJf53t8ZsDQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEKi+QNcTQOIAIF/gr5IJFFBKqYZASACw9rcCgCkIIICAAr+S+oAnX/vvJABW6FBJSgLR/iYRsEI7lKoiUJCARnJYWFhwrp27vjtpSv9CUgCwPgNmSQbMGmSeFUzHKAWBKgro3NrNBKalpaXU21Md6QOqeHQl1zmkD+CzQLIjcyCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAKhAl1PAFFQTVLgL8kfobuvXPMlBQArIKXbQVzlEqI2CCCgc4ACxlyFBECXTPmnJ/UB6vvpA8q/H6khAkUJKOjXN5IDyR9FyXdvvUkBwOoH0iSE63jxfW4somXanu84LWKbrBOBTgWUYNWL5KU013W6/vfNTx/Q6VHQ++WT+oD4ZjB6piCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIdCbQ1QSQpLv+Evjb2c4sw9JJAcBJwX9laAN1QACBYgT0/tfDVbYP72f0JxdORaYn9QFJwX8VaSbVRACBlAIKqifwNyVaRWdXAPAvXnR/xFRCeEigugKEZ2dne6Kg7V67dq0n22ajCGQR0HuqF0H1ep/MzMwkVpk+IJGoNjMkJYHoWAjpA2oDQkMQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBggT67m2Ugtb9yGoV9OsL/CL54xGuyv/n304eNV/f+MTZjsXFRXP48GHn67yAAAL1ElDA59jYmLNRW3c/Y578ya9N//Ydznl4oToCny+cNLcvf+Cs8JkzZ8zExITzdV5AAIH6CCgoeXBw0BmczF3f67OvW1tybGHdvHPZ/TEzqR/QSCG+EcNat1XE3/qcos8rFATKLpB0jd2N+l+9etXs3bvXuin1AfoMoMB/WxnYbsxvftJv1BdQ6iPwxvv3zC8vrDsbNDc3Z6amppyv8wICCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAgF+gKwkg+rFfP/rrx39bIfDXplLtaet3bps//OrHziSQgYEBs7y8bIaHh6vdUGqPAAKJAro78MjICH1AolS9ZvjDm39r7l752NmolZUV+gCnDi8gUB8BfQZQgLKtEPhrU6nPtB++vW7eW7Ungfg+C5QhoF17gYT1+hyLdW6JEux6PWLNgQMHos/2NmfdBMQ1AiB9gE2sPtOSEgH1fZCOHQoCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAQHqB/vSLpFtCSR+6gyvJH+ncqj637uLvu5u/jgcFg7iOi6q3n/ojgMBDAV8foHPFX/7oHxn54yFXbf7SflWCp6v4jgvXMkxHAIFqCczMzDiTP9SS//Ej7vperT2arranj7r3r++zgG/UyHQ16Gzu6elpPqt0RsjSBQvoHNvr5A81UUlbthF75ufnnckfWu7NI+5zhF6nVFvg9ES/Gd3nHtmFzwLV3r/UHgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEeitQ+Agg+lHXFgygZsdJAr4A0d7ysPVOBb6+8Uk0EohGBLGViYkJc+bMGdtLTEMAgRoIKHhSwV+2Qh9gU6nXtG9ufmb+7eRR4+oDDh8+HN1hvV6tpjUIICABBQRr9A9XUWDoK8+5A0NdyzG9WgLXbxrz3ZN/Mrfu2Os9NTVl5ubmHryogPbZ2dkH/+/mH/Gd6DU6ydDQULRpfVbZu3dvN6vBthAIFtA1tpKpvvzyS6NRV1WUENKLpBC9b65evWr0rJI0CuwvXuw3P3+BPiDCqvE/OverD1BfYCt8FrCpMA0BBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBIFig0AUQBCQr+dZUnX/u12bZvxPUy02sicGf1I/PHt3/qbM3i4qLRD/8UBBCol4CS/5QE6CpPTLxudjz3A9fLTK+JgBIBlQTiKgr8VQAwBQEE6iOggOSRkRFnELISP5QAQmmGwKUr98z331x3NnZ5edko+UJB64ODg875srwwPDwcBaQrKD1O6lBCR5zUEb+eZd0sg0DZBZSEofOxipLyVK5fv/7g3Nz6evRih/+0JnSpD9D6bUWjQvzmNfoAm00dp63d2OgDfrXuTATks0Ad9zptQgABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBogUKSwBRAI9+9I8DDtob8viR4+axgy+3T+b/NRX44twp89WH71pbp4Cs1ruFWmdiIgIIVEpA534Fcbr6AJ3/1Q9QmiGg87/6AVdZWVkxCsKlIIBAPQR8oz8N7d4I/P1JvxnYXo+20oowgTfev2d+ecGeBBJ/FlDSaByk7ltrawLH6OhoNKvWEfcjra/71sNrCCDwUCB+7+naPU7cyJIsoms6JYG7RvLZs8uYf339W/QBD+kb8dc7l++ZYwvuPkDHTZyY1wgQGokAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgh0KFBYAsjY2JgzgGf78H7zlz/6xw6rzuJVE/jDm39r7l752Fpt3fVXd/+lIIBAPQQUxKngL1vZuvsZ81evn7W9xLQaC2gkKI0IZSsK2lXgFwUBBKovoCBifQ6wFSV9KPlDSSCU5gloFBCNBmIr+iwQF/UJO3fujP4bT29N8Ijn4xkBBLov0Jogopt+6KGytrYWJX7rver6DKD5NPKHRgChNE/gh2+vm/dW3X0A3wc175igxQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggkF2gkASQ+fl5ozv/2sqWXU9Fgb/923fYXmZajQXW79w2n/7sb4yebWVxcdEcPnzY9hLTEECgQgIK+lICiK3o3K/kD/UFlGYJ6Nz/byePmm9ufmZt+IkTJ8zMzIz1NSYigEA1BBQYrBEA44Dg9lq/eaTfvHqQwN92l6b8//pNY7578k/m1h17ixX8Gyd82OdgKgIIlF1AfUA8gkh7XX/xYr/5+Qv0Ae0uTfm/zv3qA9QX2MqZM2fMxMSE7SWmIYAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgi0CeSeAKKAL/3orwAwW3nytV+bbftGbC8xrQECuvu77gJvK7pb6NWrV42eKQggUE0BnfsHBwedfYBGf9IoUJRmCnx945MoCcTVevUBe/fudb3MdAQQKLmAkrhmZ2ettdQd33Xnd0qzBXT3d90F3lZ0/lc/QEEAgWoK+G4EopGf/vV1+oBq7tn8aq1RoDQalK3wfZBNhWkIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIICAXSD3X+A18ocr+eOxgy+T/GHfD42ZqsDvHc/9wNpeHTeukWOsCzARAQRKJ6DAX1cfoPc/yR+l22VdrdDW3c+YgRePObc5OTnpfI0XEECg3AJKAnclfwxsN+b0RO4fO8oNQu2sAi8N9xk9bEXHECNB2WSYhkD5BXT97+oDVPvTR+3v+/K3jBrmKaBkUNdIYEnHUJ71YF0IIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIBA1QVyjcS6ePGiWVpaspps2fWUGXjBHfRpXYiJtRR4/MiU0fFgKwsLC0bHEQUBBKonsLq6anTnX1vp377DPHH0ddtLTGuYgK4FlAhiK77rCNv8TEMAgfII+JJ4f/5Cv9mzqzx1pSa9FfjVkX6jpCBbOXXqlDOR1DY/0xBAoBwCviTwX7zYbzQCCAUBCfiuCfRZUsmAFAQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABv0CuCSC+O3c/MfG6UQAwBQEdB48fOe6E8N051LkQLyCAQM8FfMG/Sv6gD+j5LipNBXzJQL7jqDQNoCIIIPCIgC95y3e370dWwn8aI6BkIAUA24ruAE8/YJNhGgLlFVDAvisJ/P77neSP8u697tdMCYBKBHQV3/eKrmWYjgACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCDRNwP2ra0oJ3536djz3A7Nt30jKNTJ7nQW2D+83etiKggg1EggFAQSqI6DRn/TetZVt+551vt9t8zOt/gIaAeSxgy9bG6ogwpmZGetrTEQAgXIK+JJ33zxC4G8591pva/XqwT7niAD6HMAd4Hu7f9g6AmkEfElbpydy+8opTZWYt+QCLw33GSWI2oo+U7o+V9rmZxoCCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCDQRIG+exul04brTq2Dg4NGz+1Fd3x/+h/+J3d+b4fh/+abm5+Zfzt51Kzfub1JY+/evebq1aubpjMBAQTKKaA+wBWs+fT/+c9my66nyllxatUzAZ37P/3Z31j7gIGBgagP0DMFAQTKLaAEwPHxcWslX3muzxD8a6Vh4obApSv3zPffXLdaTExMmDNnzlhfYyICCJRHQIH6Y2Nj1gopwP83r5EAYsVhorl+05j/8vd/skocOHDALC8vW19jIgIIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIICAMbn8Gq/RP2zJHwIeeOEYyR8caVYBBYT77gDPKCBWNiYiUDoB3526B148RvJH6fZYOSqkBNHHj0xZK6NrCl1bUBBAoPwCrju/D2w35s0juXzUKD8CNcwkoOBw1x3gfdcWmTbGQgggUIiAbwQoEgALIa/NSvfsMkaJorbCKCA2FaYhgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACDwU6jspSkOapU6cerrHlL1+Af8ts/Nlggce+97IzONwXTNJgMpqOQOkEXO9VBfjrPU5BwCWw47kfOPsAXVu4kktd62M6Agh0V0Cjf7hGf3r1YL9REggFAZ+AL0DcdX3hWx+vIYBA9wR8QfoK7FeAPwUBn4ASRV3XCvQBPjleQwABBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQACBpgt0nADiHf1j487vFAR8AgoQ1wgBtqKAQkYBsckwDYHyCPju0M0IUOXZT2WuyRMTr1urxyggVhYmIlAqAVcSuII5/+579rt6l6oBVKbnAr47wPuuMXpecSqAAALm7NmzToVfvNjxV03OdfNCfQR0vaCEUVvxJRjZ5mcaAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggg0CQB+y+tgQK+0T+27XvW6M7eFASSBHx3gOeuj0l6vI5AbwVc71FGgOrtfqnS1rftGzG6ZrAVRgGxqTANgXII+AIzGf2jHPuoKrXwBYq7rjOq0jbqiUBdBXw3a2D0j7ru9WLapYRR1yggrkTTYmrCWhFAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIHqCHSUALK0tGSUBGIrrlEdbPMyDQHX8eILLEENAQR6K6A+QO9RW3G9p23zMg0B1/GiawxGguL4QKCcAq47vzP6Rzn3V5lr5RsFxPd5s8xtom4I1F3AF5jvS+qquwvtSy/gGwXE93kz/ZZYAgEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEKiPQEcJIK47sm7d/czG3bxH6qNESwoX8I0C4gowLLxSbAABBLwCrsAvjf7BCFBeOl5sE0gaBaRtdv6LAAI9FvAl6DL6R493TkU37woYJxGwojuUatdawPe+ZPSPWu/6whqn48ZVXJ85XfMzHQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEGiCQOYEEN+d+B47+HIT7GhjzgKuO8BfvHjRrK6u5rw1VocAAp0IKPhX701b2fH8D2yTmYaAV+Cxg0esr+tY0zUHBQEEyiPgG5nHF8RZnhZQk7IJaBSQ0X32AGCCf8u2t6hP0wV0XaYkEFt59aD9fWybl2kIxAK+kaB81xzx8jwjgAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACTRPInADiGpWBO7837RDKr73bh/ab/u07rCsk8MvKwkQEeibgGgFK7+HHvkcSYM92TIU3vH14v9E1hK24rjls8zINAQSKF3C9J7nze/H2dd6CK3Dcl3RaZw/ahkBZBVyfzZXENbSbBJCy7rey18vVB/hGnCl7m6gfAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAgggUJRApgQQ3924ufN7Ubuq/utV4PiO5/7a2lDfXUatCzARAQQKFXCNyOBL5Cq0Qqy8FgKuUUB0vOnag4IAAr0X8L0fX3mewN/e76Hq1uCl4T6ju8DbiivpyDYv0xBAoDgBXY+5RuekDyjOvQlrVvKQK4Ho/PnzTSCgjQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggECwQKYEEFfgr7bKnd+D7ZnRIuAK/tVdH33HnWVVTEIAgYIEFhYWjN6TtjLw4jHbZKYhECSgJEDXSFA67igIINB7AVcQpgL3dfd3CgKdCBx93v7xlM8BnaiyLAL5CbhG/xjYboxGgaIg0ImAaxQQ9QEkg3ciy7IIIIAAAggggAACCCCAAAIIIIAAAggggAACCCCAAAIIIFA3AXuETUIrXXdg3fHcD5yBmwmr5GUEIoEtu54y24f3WzVcx511ZiYigEBhAq7g3237njV6D1MQyCqg5A+NImMr9AE2FaYh0H0BVyD+qwczfazofgPYYqkFXAHkSjwlEbDUu47KNUTA1Qe43rsNYaGZOQm8NNRnlExkK65jzzYv0xBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAIG6C6SO1FpdXTV62IorcN82L9MQcAm4jqOLFy9y10cXGtMR6JKAbzSeHc//oEu1YDN1Ftjx/F9bm6e7/rquP6wLMBEBBHIXUPClawSoQ8Pc+T138Aau0DeSjCsBtYFMNBmBngjoOsw1CgMJID3ZJbXbqJI/lARiKySD21SYhgACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggAACTRVInQDi+tHVN3JDU3FpdzYB30gy3PUxmylLIZCXgO896Bq5Ia9ts55mCGzbN+IcScZ1DdIMGVqJQO8FXAH4o/v6jAL3KQjkIfDK8/bgXyWDUxBAoHcCruswnf+Hdtvft72rLVuuqoArodSXgFTVtlJvBBBAAAEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAIKtA6gQQV+CNa9SGrBVjuWYLuALJXYGHzdai9Qh0T8D1HvQlbnWvdmypLgKuawpfAlJd2k47ECizgOs96ArYL3NbqFt5BVx3f/eNQlbe1lAzBOoj4PouyBWwX5+W05JuCry0MaKYRgKxFdcxaJuXaQgggAACCCCAAAIIIIAAAggggAACCCCAAAIIIIAAAggggECdBVIlgFy7ds3ornu2ouBfCgJ5CbiCf/WDv4K/KAgg0H0BX+Cl6z3b/VqyxToIuK4pfNchdWg3bUCgzAL6DOC6BtMIIBQE8hJQ4K/rmLp06VJem2E9CCCQQsB3DfbKc/QBKSiZNUDAlQjouhlBwCqZBQEEEEAAAQQQQAABBBBAAAEEEEAAAQQQQAABBBBAAAEEEKiVQKoEENddf7fsesps3f1MrWBoTG8FFEzev32HtRKu49A6MxMRQCA3Ad8dV0kAyY2ZFf3/7d1NbFzXnSD6I7ZgKITaISwjsAML1uJZWZIyeqMsLCX2yuMPZXrg7CwFWhlB27K1CV478WcG3YvYsnsCYxaCJe8sTI9lyeNVlEgaINo82OQy1ANaAoW2EVgG7RZoQU9DPv5LqZgi77lVxfpg3bq/A1BVvJ/n/O6tOlXi/3/OskB8pojPFkWl7D4s2t4yAgR6I5D7/DW5fVO6f1tvzuEoBJoCMQJ8Ucndh0XbWkaAQO8Ecp+/4v0/+gHldoGZuaX03oWl9NpHS+nwicX0yG9u/fz9O4uNZbH83OzS7Tv57a8CuVllcvfhX3f0hAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAjUR2NxJO3Mjrm7ZuauTw9iWQFsCW3Y+mBamz6/ZNu7DAwcOrFluAQEC/RXIjbgq+aPY/cbcxXTz6mfpxpWLhRvccd8DpYkOhTvVaGHcV1+feX9Ni+M+PHTo0JrlFhAg0F+B3PeAXJBmf2uzcUePIN6NKL873FHe/kZUsafnjPvq8Im1h4xZCOJnx44da1daQoBA3wRy3wNys/X0rSJDfODLV1N69fRiOjWzlOYX8hU9NX174kckvMV7nplUvjV7KDOzWMxEFkkge/fu/XZjzwgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFBDgY4SQHKj7dUp+DcCer888dbAb5UYDf2up54b+Hk38oRxXxUlgOTuw42sq3MTqINA7rUnCfDbqx99RCQtXJ/9tJH88e2a/LOY6SIMJx4/mJ31Ir/36K4Jk6IEkNx9OLoSWkZgOARyr72Hdg5H/QZVCyO2D0Y6ZhWInwioXl3iXpQMvlrF7wT6K5DrA+qWBFikHMker320mN4+c3tiR9G2RcsiISR+3j6zKR3dv8mMKstIE+MpRXJRUZ8b96IEkKI7yTICBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBOok0HYCyPT0dIrR9opKzNRQl7L4zbXlwN5P6tLcDW1nLqjcyL8belmcvKYCzdddUfPrlARY1P5YtrhwbTk58Ei6duHj3CbZ5TFLyLUL8fNx2rr70eVkv0NpbHxrdvu6rCi7rwR+1eUu0M5hEcgF/kb9jP4+LFdp9OoR99Z7F9YGVJsNcPSutRYNt0B8D8j9X1BupobhblHvahfJH4+8sZhm5ta+V3V6ljjG372+lI4eGDMbyDLenh8UJ4DkZiTr1Nv2BAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQqLLAWLuVzwV+RfKHQNV2FW3XiUCMih8/RSV3PxZtaxkBAt0LRBJgUSl7nRZtP4rLYtaPK//4n9eV/LHaI5JA4lhxTCUtz4xSnGCqD3B3EBisQO41J/ljsNehbmeL4N+ikvtMUrStZQQIdC+Q6wMmt29qzNTQ/RmqeYReJn+sFDh4bLExI8jKZXV8npthTB9Qx7tBmwkQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQWC3QdgJIbpS9LT/YtfqYfifQM4HcLCC5+7FnJ3YgAgRuE8i95nKv0dt2HuFfYvaOz9/4eWMGkF41M2YTiWPGsetecp8xcvdj3b20n0C/BGZmZgoPnQvQL9zYQgIdCuQSjAT/dghpcwJdCuT6gMn7ujxwxXd/7aPezPxRxHDw+GKKBJM6l1wfELPRxKw0CgECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBOos0HYCSC7Q5o77Hqizn7b3WWDLD4pHf8/dj32ujsMTqK1A7jV3x/Z69wFfHPt1T5M/mjdYJIHEsetecjOA5O7HuntpP4F+CeRec7nRuftVD8etl8D921J2doHcjAT1EtJaAoMRyPUBdU4CvHw1pbfPLLW8APE+9uzDm9LRA2Ppd4fH0q8eH0tPTBXPbrTyYJH8cfjE4spFtXwes8wUldw9WbStZQQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEBhFgc3tNKpshL1ccGY7x7UNgVYCuQQjf/BvJWc9gd4K5AIt69wHXLvwcbo++0lL6M3b7k3jUw+lsfGtjW0juSP2uzF3sXTf2CbOsXX3o6XbjfLKXB/Q/FyyY8eOUW6+thEYCoHm662oMpP3FQdmFm1r2foFcgGw6z9idfaMe+zc7Nog6/gusHfv3uo0RE0JVFgg9z2gzjOAvH2mPDljYjylXz421kj+WHnp9+yM3zalSCB5YTnB49T02ve35vanZvLrmtuM+mPcYzNza1sZfcC+ffvWrrCEAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECNRFoKwEkF2wfQa3NgNaaeG1YM+vqXDa7QASiCPzasFvSiWskkOsDgqDsNTrqRAvT50ubGO/bd+9/sZH8UbTh9dlPl2f5eD3dvPpZ0erGsmt/rHcCSBjGPVaULHPp0qUkASR761hBoGcCuT6gbHaGnp18CA/0//33v+l5rWbmltLfvV4cTBxBxEf31zfRJmYYKEoAuXz5cs+vgwMSILBWID5v5Uqdk9M+LEncCK/fvTCWynyiD/3XZ8bSwWOL6b0LxYkeMQtIJIi0M2NI7hpVfXnDsMDn3LlzVW+a+hMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKArgbYSQHIjPtYx8HfLzl1px3//Y1foRTvHKO8RCFxUwjmCiOtaYoaBolH2y4JR6mql3QT6IZB7rdV59o+YxaMsASQSF+554belCTLRn3z/xePp8zd+XpjgENcy3vviXHVNAgyDSDYtSgCRBBg6CoH+C+T6gB3b6puU0Ev1GAX+kTfyyR+tgoh7WZdhPFYESReVXGJS0baWESCwfoFcH1CW3LD+s1Vjz0jMiPfuXPnV4+XJHyv3+81TY+nUzP9JccyiMnMlLSeAFK2px7LJ7cXtjNnJFAIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECNRZYKydxn/11VeFm9UxAaQQosuFEdiaS/6IoN/vPfNPtQ7+zd1nuWCULi+H3QkQWCWQC7LcvO2eVVvW59cbVy6WNjaS9nLvXSt3jPf4u546tHLRmuetzrVmhxFbkHM0+vuIXWjNGVqB3OetmJlB6U4gAn7//p3FbOBvBAbXOcg6dHMJILn7srsrYm8CBFYL5AYDyb02V+8/ir/PXCmesaPZ1n/4cfv9Y8zy9PTu/Pbn/lR+ruY5R/Vx8r5im9z301F10C4CBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAisFmgrAST3x9U77ntg9fH83qHAzaufNUZ/z+0WI8jH6Od1Lrkg85mZmTqzaDuBgQnkAu03313f96aiWYmaFyTes8enHmr+2vIxZgLJJTnEzmXnannwEdgg91lD8O8IXFxNqIRA7vPWd79TieoPdSUPn1hMM3PFwb3PPrypNCh4qBvWw8rt2Vkc/KsP6CGyQxFYh8DU9uLX5joONVK7xHtWJHV0UnLvc50cY1S3LbPUD4zqVdcuAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgTaEWgrAST3h9Wx8b9t5xy2KRH48zu/SIsL1wq3uPtAeyPIF+48Qgvv2L6zsDXz8/OFyy0kQKC3Ark+YMvOB3t7ohE5WifJH80mr2ef5r6j/phLgswlp466h/YRGLRA7vPW5PZB12S0zvfehaUUP0UlZv2I2T+UcgH9QLmPtQR6IXDu3LnCw9R5BpBCkL8sXI/LdztMGCk7/yiuyyXI5L6jjqKBNhEgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQGC1QFuRRbk/rMao5cr6Bb488Va6MXex8ABbdz+a4kdJaew7WwsZzp49W7jcQgIEeisgwLIzz7Hx4vessqPk3ufK9qnLutzsKLmg9Lq4aCeBQQnk+oAJM4Cs+xLErB8x+0dRidHO//WZtr6iFe0+kstywb/6gZG83BpVEYH1JDpUpGldVbNsxoquDmznNQL6gDUkFhAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgUCOBltFF/qjan7thYfp8+vrM+4UHj9HO73rqUOG6Oi7MBf/W0UKbCWyEQK4fkATYu6thRq31WeYC09d3NHsRIFAkkOsDYpYKZX0CB48vpfmF4n1/+dhYElhdbLN6ae7eXL2d3wkQWL+Az1qd2c3Mdba9rVsL5GYcc2+2trMFAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQKjK9AyAST3R1VB+eu/KRYXrqUvjr+ePcDdB15M6xlBPnvAEV6Rm51mhJusaQQGKiC4cjDcN69+NpgTVfQsW3Y+WFhz92chi4UECAyxwGsfLaWYAaSoxEwXzz4ssWa1jeDf1SJ+JzA4gdxnrdzMPIOr2cadafK+/Pv0zJV8gl+uxqemi/uE2H7PD/Lnyh1v1JZPjDMYtWuqPQQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIdC/QMgEkd4qx7/xtbpXlLQQi+SOSQIrKnQ//NBlVf61MLvhXAshaK0sI9FJAEmCxZu49KbaOGZ46LTfmLmZ3ueO+B7LrrCBAgEA/BXJ9gBkq1qceiR+vnl4s3HliPKWjB9b91azwmKOyUPDvqFxJ7SAwGgLxfp1LgInZnf7l9/mEjtUCsf17F/LbPzG5eg+/NwW++uqr5lOPBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQqJ1Ayyij3IiPtZPqUYMjMDgXHLx5271p4rGDPTqTwxAgQKB/AnVPAoxEvdxMTZHMcX3207bxY/tcvxAHKUs2afskFd8wZ50LTq94c1WfwNAI5L4H7NhmNO71XKTDJ/JBvs8+PJYk1qxH1T4ECBAYvMDTP8z3g5HoV5bUsbK2B48vpkgCKSqT2zel+Kl7mbyvWMD3gGIXSwkQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQqIdAywSQ3B9V79huRPJOb5GY9ePLE29ld7vrqeeyAcXZnawgQIAAgQ0RGJ98KHveL469nm5e/Sy7vrki+oWYFSpXtu5+VL+wjJP7zJELTs95Wk6AAIGNEohg4HOzxQkgkfjxDz8W5LtR18Z5CRAoFjh79mzhCslqKT29e1N2FpBAO3hsMb320VI2uePy1ZT+7vXFdGq6uF+IY/zmKf1COHx3ecYVhQABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgRuF9h8+6/t/5Ybjbv9I9Rvy69//342IDhGeB+fygcT10/r9hZv3nbP7Qv+8lsEpezdu7dwnYUECBDop8BdTx1KCzPnUyRxrC6R/PHvr+9P33vmn5dn8Ni1enXj95gl5MsTR1LMAFJUop+deNysUEU2lhEgQKBKAjG6e4wInyu/enwsTQhwzfFkZ0Y5d+5cdh8rCBDon4BZoG7Z/o9nxtIjbyymmbniJI5433/7TEpPTG5KO+7+Npnjw+Wkj9w+zat29MBYaYJJczuPBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFBPgXUngNSTa/2tjmDg+dNHsweI2T+UvMDmu+/Nr7SGAIG+CeRG/s3NyNC3igzhgSNB454Xfps+f+PnhUkgkRjy+W9+3pi9IhL8xr6zNS1+cytZZGH6fDbxo9nUSDDZvM17X9PDIwECBKoq8C+/X0ox2ntRiZH0YyR5JS9gtoG8jTUECGycQCTu/e6FsfRf3lnMzvAUCYAxA1RKxUkiRbWP5A/9QpGMZQQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECDQFJAA0pTo8+OXJ97KnmHr7kcbAcLZDawgQIDAkAmYBerWBYlEmEgC+fM7v8jO8BQzfORm+Si6rGF79/4XzQpVhGMZAQIEKiYQwb9vnymf/aNiTVJdAgQIEPiLQCMJ5PBYI8kjZvzIJfu1AxZJHzEjlKS3drRsQ4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgXoLjNW7+YNp/fXZT1OM9p4rE48fzK2ynAABAgSGXCCSQL7/4vEU7+XdJsZEQmAcK2YMUQgQIECg+gIx+0ckgRQVs38UqVhGgACBaglE0sfM3FL66pvu6j1zJWVnEunuyPYmQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgVETkAAygCs6f/po9iwR7Lt5273Z9VYQIECAwPALLH7zH+nmF591VdFm8sjNq593dRw7EyBAgMBwCJj9Yziug1oQIECgHwLxHn/4xGL6v/7v/7M801M+2a/dc0cSycFjt453bnap3d1sR4AAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQI1FNhcwzYPtMkx+8f12U+y59z6w/+UXWcFAQIECAy3wOLCtfTliSPp2oWPu65oHCuOEz9bdj6Y7j7wjxIEu1Z1AAIECGycQNnsHxPjKT0xuWnjKlehM8/MFVd2YmKieIWlBAgQ6LNAJH888sZiY+aPXp8qZhR55DeL6eiBsfT0bv1Er30djwABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAqMgIAGkz1fx6zPvZ88QAb5bdu7KrreCAAECwyrQ7WwXw9quTup1Y+5i+vM7v0g3r3Y380fROSNx8N9f35/ueeG36Y7tDxRtYhkBAgQIDLnA22cWszWMoN5IAlFaC3z1TfE2k5OTxSssJUCgJwK5JKuZK/WenaLd5I9mot/k9k1pcvutS/LVcuLIzJWUPpxeapk8ErOBpCQJpCc3s4MQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIEBgxAQkgPTxgkZQ8ML0+ewZtv7w0ew6K24XiEDropILSina1jICBDoX2Lt3b3rllVfW7Hjz6udrltVpQczW8fkbP0/xWFbGxrem8cmH0ua7723M6hHbRnJHJNAszJwv3b95DkkgZcLWESBAYDgF3ruwlCJIOFeefXgst8pyAgQIDIXA1NRUYT3K3tsKdxixha991Hrmj2cf3pR++dhYYaLfE8usv3xsUzo3u5QOnyhPBDl8YnF5tqi/KTzOiLFmmxMzoigECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgcLvAuhNAWgW93n6aev729ZkT2YZHUPDW3RJAskCrVuTut1xQyqrd/UqAAIGeCsTMH7n3peaJJh4/mO788U9TvN+vLCtnfopZouY/Opo9VpzjyxNvpXsO/7eVh/CcAAECAxHYsWNH4XnqPvp7IcqqhW+fyY+QH6PB379t1Q5+JUCAAIGhF4hkhLL392jA0QPtzdqxZ+em9LsXNqVH3sgnlESyTSSc/Oap+iYN5hJA9uzZM/T3iwoSIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgXwIt/4oco78XldyMDEXb1nXZtQv/K9v0GBFeIUCAAIHqCVyf/bQxi0eu5pHw8f0Xj6eJxw6uSf5Yvc+dD/80xQwfq5NEVm4XM4Zcu/DxykW1e577zGEWqNrdCho8YIFcAkjdR39vdRkiWHVmLp8A8vTuTa0OYf0KgUtf5C1XbOYpAQIDFKhrPxCzO5WVSNTo5D1+YjwtJ4GMlSYFtjpnWX2sI0CAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIHRFGiZADKaze5/qyJYt2x0+Aj6VQgQIFBVgZtXP6tq1buud8zaUVa+98w/pzu2P1C2yW3rYttIAikrC9Pny1aP/Lpcf2oWqJG/9BpIoJICb59ZLK33k1MSQEqBVq3Mjf6eG6hg1e5+JUCgC4FcImBdZ4L6cDqfABIzOz37cOfv75EE8qvH8/81F8k252bz5+3i8lZiV0mAlbhMKkmAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwIAF8n9lblGRxW/+o8UW9V5dFqy7edu9HQUH11vyVutjBPyikgtIKdrWMgIEOhfIBVfWOQGk7P196+5H05aduzqGjiSQ2DdXys6Z28dyAgQI9EIg91mrzsGorVzLAoQnt28qHem91bHruH7+mzq2WpsJDIdArg8YjtoNvhZlszs9+/C6/3stPTFZnjhyfnbwbR2WM0oCHJYroR4ECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECAyTQMu/UOdG174xd3GY2jFUdYnA6LJg3fGph4aqvlWujICUKl89dSdQPYHrs5+WVrqb9/dW+7Y6d2nFKr5SEmDFL6DqV1rAZ63OLl8EB+eCVeNIT+8uD/Lt7Gz12DoXcO3erMf118qNFZiYmCiswMxc4eKRXph7L2o2enJ781nnjzELSCQIKmsFJAGuNbGEAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECLRNAcn/wD7rFhWsECwTKkj9i8/WMDl9wmtosyiUbld2btcHRUAIDEMgFWNYxIWFxoXz2q1ZJHGWXq9W+rc5dduxRXZe7N0e1vdpFYCMEcq+zOo9GXnYd3ruwVLY67dlZutrKVQLzC6sWrPg1d2+u2MRTAgS6FJicnCw8wlc1nJmnVSLC5H3dJXBMfKeQurFwejm5sK4ll3iTG6ymrk7aTYAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBAvQRaJoAERy645sYVs4AU3S4L0/+7aHFj2dj41tQqyDe7c01XLH5TnGjkD/41vSE0e+ACuT6gjgkJG9nvbeS5B37TrThhLtFIEuAKJE8J9FHg/vvvLzz6/EJ9g1ELQf6y8MPpvMv924zwXmZXtG7mSrGn7wFFWpYR6L1A7vPWuT8VvzZ7XwNHDIGvSpLhRlmobEat3L05yh7aRoAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAgaZAVwkgN69+1jyOx78IxKwo12c/yXps2flgdp0VxQI5T3/wL/aylECvBXIJIHVNSMj5RoJft6UXx+i2DsO2fy7RSPDvsF0p9RlVgVwfMDM3qi1ef7siULUsWHXPzu5Gh19/zaq7Z+4+8z2gutdUzaslkPu8demqBJDVVzKXsLZ6u/X8Prl9PXtVf5/cfbZ3797qN04LCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0IVAWwkguT/6SwBZK59LVmhuuWXnruZTj20K3PyiONFocnKyzSPYjACBbgRyo7/nXpvdnGvY9x37Tj7JIxIAuy1lxyg7d7fnHeb9c4lGuaD0YW6LuhGookDutdbPQNcqOkWdz82WB0RPbpcA0um1vZwJMt+zZ0+nh7I9AQLrEMj9X1BZsts6TlOJXVol8eUS1tptXFkfMjFez/7j/GyxniTAYhdLCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBCoj0BbCSC54N/rf/q0PlJttnRh+nzplmYAKeUpXHnz6ueFy3PBKIUbW0iAwLoFciOs5gLz132iCux4x/adpbW8MXexdH3Zylb7tjp32bGrvC7nkvtsUuW2qjuBYRTI9QHzC+WzXQxjW/pdpw+nyxNA9pR3If2uXiWPnwuoziUmVbKRKk1giAUi0D4XbF+WsDDETeqqavdvy+9+qkUfkN8zpVb7Tt5XtvforpueK+5XDQYyutdcywgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQaE+grQSQXKD9jSuZ4fjaO/dIbnV9tjwp5o7tD4xku/vZqNysKgK/+qnu2AS+Fci91nKB+d/uOXrPNm+7p7RRCzPlSYBlO7fat9W5y45d5XW5+ywXlF7ltqo7gWEVyH0XmMkEZg5rO/pdr1YeZgDp/ArkAsxz92TnZ7AHAQKtBHKvt1yCVqvjVXl92Swg8X6Ve89q1ea3zxQnOjT3e2hnPWcAyfWrvgc07wyPBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAjUVaCrBJDFhWvp5tXP6mq3pt1hUeZh9o81ZC0X5AJ/Y8dcIErLg9qAAIGOBHIJIHGQVklvHZ2oAhtv3nZvip9c+frM+yn6xk5L7HPtjx9nd2t13uyOFV9R9jlDH1Dxi6v6lRLIvd7WG+haqca3WdnLV8tnRCkLGm7zFLXbLBf4GxC5e7J2SBpMYAACe/bsKTxLHfuAJ6fKEzEOHltMMUNWJyWSP8oso/+YGO/kiKOxbdlMY/qA0bjGWkGAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwPoF2koAmZiYSLkA4LIA/fVXq5p7trIw+0fn1/XGlYuFOxnxsZDFQgJ9E8i95m7M1W8mqPGph7LOkbDwxfHXs+tzK748caQ0gbDsnLljjsLyshmg4rOJQoDAYAQmJycLT3Sufl1AoUMsLEtWiPWT2+NfpROB3P2V+0zSybFtS4BA+wK5YPtW73vtn6E6Wz6xnABy/7Z8fSMZ8JE3Flv2Cc0jvPbRUjp8YrH5a+Hj0z8sTzop3GkEFp5fnlGlqMT/TfoeUCRjGQECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECdRJoKwEkQHKBNnUb/b3s5sglKzT3kQDSlGj/8fqfPincOBeEUrixhQQIdC2QG/m3jn3AnQ8/Veq5MH0+/fmdX7Q9E8gXx15P1y7kZ/+Ik7U6Z2mFKrwy16/qAyp8UVW9kgK57wER/NvpSOeVBGij0jNXyje6f1s9A3jLVcrX5kbE1weUu1lLoNcCuT6g1cxHva7HsBzvjafK/xst+sa/e30xRXJHGBWV9y7c2ubV0+XJH5PbN6Wnd9ez/9AHFN05lhEgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQOCWwOZ2IXIj/+ZG5273uKO03fU/fVranM3b7i1db+VagVxwee5+XHsESwgQ6IVALtiyjn1AvJff+fBP09dn3s/SRhLIv8/tTxOPH0zjkw+lsfGta7aNpI/500dLZ/6InbbufjTVtf/I9au5hKQ1yBYQINATgegDYrTt+fn5NceLEbpjRPS6l3N/Kh6pvOliBpCmRPuPudHf9QHtG9qSQC8E4v0/Zl24dOnSmsNFkH7dEhSiz4ufU9Pl7/uR3PHq6dSYMWTHX5IA579pPWNUE3liPKWj++vbv+ZmgdIHNO8QjwQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQI1Fmg7QSQ3KiPN+YuNkY5LwpurRvsjSuzpU3esnNX6Xorbxe4efWzbGB07n68/Qh+I0CgVwK519ziwrUU/UDdZjiaeOxgiiSPeJ/KlVgXs3tE2bLzwds2azdxJvrWu546dNu+dfkl7q2cUy4hqS422klgIwSiHzh58uSaU0fwrwSQlC5dLQ8Ebgb/rgG0oFCgbHaZ3GeSwgNZSIBATwTidXfs2LE1x/pwOQmibgkggXB0/1h65OpiiveqVuXWTCmtt1t9nN8szzQSM4DUsYRZzlYfUMc7QpsJECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIEFgtMLZ6Qe735si/ResXZs4XLa7dsghWzRUJMjmZ/PIIri4qMfpo/CgECAxOIEb+zQXd54L0B1e7wZ8p3tO/98w/Fc7sUVSbMFr5U7TN6mVxjnte+G3b51i9f9V/z91XcS8K/Kr61VX/KgrkRtyO4F8lpQhWLSv3bytba91qgfcuFN9XZd9JVx/D7wQI9E7gySefLDxYbqaewo1HaGHMzvG7F8bSnp29T9BozPxxYKyWiTXNWySSS4tK/D9Q7jtp0faWESBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAYFQF2k4ACYBcwOX1P30yqj5tt+v67Kel295x387S9VauFciZ5u7DtUewhACBXgrkXnsL0/+7l6epzLFi1pN+JWg0kz/qNrPKyoufSwLM3Ycr9/WcAIHeC+zbt6/woGWjdBfuMIILc6OUN5sq+aMp0f7juczEirkg9PaPbEsCBNYjkPv8Nb+Q0qmaJgI2kkAOj6VfPd7Rf6uV8seMH5FYUsdZVVbC5JJLc/fhyn09J0CAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQB0EOvpLdS7gxgwgKS0u/Efp/WIGkFKeNStjNpVc8G/uPlxzEAsIEOipQG7095ipoWwGpJ5WYsgOFgka9/36f6YtOx/sWc3iWN9/8Xiqc/JHYOaSAHP3Yc8ugAMRIFAoUDYDWy5Yv/BAI7hw/pvyRu3Y1vsR4svPWO21ZUlFuUSkardY7QkMv0DZDGy5YP3hb1VvavjLxzal//e//k1XSRuRKHh0edaP/+fFsRRJIHUuZUlF/i+ozneGthMgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgsFJg88pfWj3PjbbXDNYfn3qo1SFGdv2NKxdL21b3QN5SnIKVEVCeK7n7MLe95QQI9EYggi4j+Gt+fn7NASMRcOvuR9csr8OCxmwdh/9bunbh4zR/+mi6efWzdTV787Z708TjB2vruBLtxtzFrKPg35VSnhMYrEC8/o4cObLmpO9dWErPPlzfgNXJ+5ZHbF8eBT5XJr6TW2N5kUAumDySkKampop2sYwAgQEIRPD92bNn15zp1MzSmmV1W9BM4PjV4ynFe1jMijJzZSlFMkOuRKLH5H0pPTm1KT2x/KPcEii7n/xfkLuEAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECtwQ6SgBpBt1MT0+v8YvZGuqcABIjtk8s/7E/V3o5OnzuHKO0PAKpi0ozAL1onWUECPRfIIJuTp48ueZE0QfUNQGkiRHtj5+YuSI84qdVMkgkfWzZuavRf9a5D20aNh9zfUAE/sZnEYUAgY0R2L9/f2ECyMzccqDr8k9dRy2fGE9pz07Bu726K98+s1h4KIG/hSwWEhiYQHwXf/7559ecrzljgySGlCIRJBIim0mRYROJICtLJAXWtb9c6ZB7nksC9H9BOTHLCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBCoo0BHCSABFIFfhQkgy6O/17lEAG/8KN0LNGeUKTpSjDqqECCwcQLxGswlgMRrN2bDqHtp9gd3PfVcgyISQoqKPqNI5daySJ4pKvqAIhXLCAxOoJmEdenSpTUnjVlAfrM8mrlCoBuBSCS6fLX4CPE9VCFAYOMEygYEiaB9CSBrr40EwbUmZUvi/T9mTykqvgcUqVhGgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEBdBcY6bXiMuldUyoL2i7a3jEBOYKEkmSh3/+WOZTkBAr0VKHsNXrvwv3p7shE5WjMhZPXjiDSv580omznlwIEDPT+fAxIg0JlArh+IBBCFQLcCb58pvo8i8NwMIN3q2p9A9wK5RKzoA2K2C4VANwK5zxITExMp9/mjm/PZlwABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAlUV6DgBpDnqY1GDr134uGixZQQ6Evj6zPuF28cf/OMP/woBAhsnUBZ8ow/YuOsySmfOzf7RnHlglNqqLQSqKJAL/o3A31zgZhXbqc6DF4h76NRMcQKIwN/BXw9nJFAkUJaMqw8oErOsE4Hjf1ws3Nz/BRWyWEiAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQI0FOk4ACavnnnuukKxs1O7CHSwksErg5tXP0o25i6uW3vr1ySefLFxuIQECgxXIBf/Gazf3+h1sDZ2tqgIxm1gukSh331W1repNoKoCkYwVP0XlvT8WB+8XbWsZgdUCkfyRm0Eg9/1z9TH8ToBAfwXKksHfPlMcvN/fGjn6qAicml5Kl68Wt8b3gGIXSwkQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQqK/AuhJAykZgzQVu1pdYyzsRmD99tHDzCDQpG220cCcLCRDoi0DZCKy5GXz6UhEHHTmBr39fPANUNFQfMHKXW4MqLJALxj83u5Rm5iSBVPjSbmjVXz1dHDy+d+/eFLNQKgQIDIdALhg/gvcjiF8hsB6Bt88U3zvx/h/9gEKAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAEC3wqsKwGkLBhf8O+3uJ51JhAjvy/MnC/cSeBvIYuFBDZMIPeajCTAeC0rBNYjcO2PHxfuVpZ0VLiDhQQI9FWg7DWZC+Dsa4UcvPICkTxk5PfKX0YNqIlA9AG5pCx9QE1ugh43M5JHox8oKrmk06JtLSNAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEBdBNaVABI4uVEfI/DXLCB1uX16284Y+T0XOO6P/r21djQC3QqUvSbLZnHo9rz2H12B+Oxw8+pnhQ0su98Kd7CQAIG+CpQlg793IR/I39dKOXilBV47XRz4G0HmuaTTSjdY5QlUXCD32SyC+HOB/BVvsur3USCXOFT2eaOP1XFoAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIDL3AuhNA9u7dm6ampgobOH/6aOFyCwnkBCLxIzd7TNxruRFGc8eznACB/grEazJG/y0q8VrOJXMVbW8ZgRDIfXaIey36AYUAgeESyAX/Ri1fPb04XJVVm6EWKAsYzw06MNQNUjkCNRCIxKwIzi8quYSuom0tIxCzP0XyaFEpm3GsaHvLCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgUBeBdSeABFAu8CtG8DYLSF1uod60c2HmfDZg/KWXXurNSRyFAIGeCuT6gEZC1/KMPgqBdgXKZv/QB7SraDsCgxWI5KzczAxmARnstaj62XLB4hFcfujQoao3T/0JjKRA2cwMZUldI4mhUV0JlCWN+h7QFa2dCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAYYYGuEkAi6CuCv4pKbiTvom0tq7dABIt/eeJIIYKR3wtZLCQwFAIxK0NuZgazgAzFJapMJXKfGcqCCyvTOBUlMMICuUTAaHJZQOcIk2hahwJlgeJxf0U/oBAgMJwCZX1ALrFrOFuiVhslMDO3lJ39o+z/Gzeqvs5LgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgACBYRHoKgEkGpEbkc8sIMNyiYe/Hl8vzxQQSSBFJXd/FW1rGQECgxfYv39/4UnNAlLIYmGBQNnsH2WBhQWHsogAgQELTE1NZRMBYxaQCOxUCJQJHD5RfI9E4ofZP8rkrCOw8QJlM0GVJXdtfM3VYFgEcn1A1M//BQ3LVVIPAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSGUaDrBJB9+/ZlR2aNEb1zgf3DiKFOgxdoBImfeb/wxGUBJYU7WEiAwMAFykZmNQvIwC9HJU9YNvuH4N9KXlKVrpnAm2++mW1xWWBndicraiNQliRk9o/a3AYaWnGBsiD9g8cWK9461e+nQFmSUNl3zH7WybEJECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBQFYGuE0BidNbcH/1jFpCY3UEhkBOY/yifJJS7r3LHspwAgY0RyL1WI8HryxNHNqZSzloJgegD4rNCURH8W6RiGYHhE4hZQCJQs6hEcOep6eIZHoq2t6w+AvMLKR0+URwcbvaP+twHWlp9gRi0Ifdd4PLVlN4+ow+o/lXuTwvKEoRy91R/auKoBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQqJ5A1wkg0eQYoTv+8F9UYgT4XHBn0faW1UfgxtzFFPdHUTH7R5GKZQSGUyACf/fu3VtYuWsXPk7xWlcIrBYomwFK8O9qLb8TGG6BskDNF5aD/CPYXyGwUuC1j/L3hQTAlVKeExh+gfj/oPjsVlTKXutF21tWD4HXPlpKkSBUVMr+f7Foe8sIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBQR4GeJIAEXC7w69YI8G/V0VabWwh8eSJ/X7z77rst9raaAIFhEsj1AVHHstf6MLVBXQYr8MXx11N8Rigqb775ZjaQsGh7ywgQ2FiBViPARwCwQqApEDPD5GYFiHvp5Zdfbm7qkQCBCghE8kckbhWVSAA8eFwfUGRT12W3ZoYpvifiXir7XllXM+0mQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIDAaoGeJYCUjQC/MH0+xY9CoCkQM39cn/2k+ettjzGTQG42gds29AsBAkMjEK/Zffv2FdYnXuu52X4Kd7Bw5AWuz36a/VwwNTWV4jOFQoBAtQTKRoCPYP8I+lcIhMDhE/l7IRIAFQIEqicQiVuRwFVUTk0vpfhRCITAwWP5GaAi+SOSQBQCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQKBfoWQJInKZspL6ykb7Lq2jtqAnEiO/zHx3NNsvsH1kaKwgMtUBZ0Ga85m9e/Wyo669ygxGIPuCLY69nT1Z2H2V3soIAgQ0XiIDNstdvWcDnhldeBQYm8NpHS2lmrjgQvCyZdGAVdCICBNYtUPY9PmYBidlAlHoLlCWERhJ4JJMqBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINBaoKcJIBG0kxu1uxHweTwf8Nm6qrYYFYGyZKBIIsqNHDoq7dcOAqMqEK/dXCLgraD/X49q07WrA4GyZKCYRSY+SygECFRTIL4H5F7Dl6+m9NpHi9VsmFr3RCBmgXn1dPE9EAlEZcHjPamAgxAg0FeBeP/PzQgYyR+RBKLUVyCS/8o+B5QlkdZXTcsJECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBQLNDTBJA4RfzRNgJ4isrC9PkUP0p9Bb4+8372HojgcSM+1vfe0PLREIjXcC6J6/rsJyneA5T6CsRngNw9IPi3vveFlo+WQATx574LxMjfp6aLZ38YLQWtWS3QCP4+lg/+fu6557KfH1Yfy+8ECAyvQFkfEO//0Q8o9RQ4eHwpOwtMfIfMJZDWU0urCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgUC7Q8wSQVgGcMfvDzaufldfK2pEUuDF3McXI77lSFiyS28dyAgSGS6BVH/DlibdSvBco9RNoNRNYzB6TCxqvn5YWE6iuQCQB5maDilbFCPAxG4hSL4Gy6z41NZVefvnleoFoLYERFWj1XSBmgIiZIJR6CRw+kb/urT431EtKawkQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQINCeQM8TQOK0+/bta/wUVSECQP/8zi+KVlk2wgLNwN94LCpxzxjxsUjGMgLVE4jXctlsPpEImHsvqF5r1bhdgej7c9e91T3T7jlsR4DAcAiUjeQdM0H8/Tv5mSCGowVq0UuBsplfWgWL97IejkWAwGAEyv4/qNkHxKNSD4H3LpTP/GIgkHrcB1pJgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEBvBfqSABJVLPsjboz+HqPAK/UR+PLEkeyo/zHiY9wvCgECoyMQo7/Ha7uo3OoDjhStsmxEBWL2p+uznxS2TvBvIYuFBCovUPZdIEZ/P3hMEkjlL3IbDTg3u5Ri5Pdcic8LMQOIQoDAaAlEH5D7LhCzQMWsQMroC0R/36oPMBDI6N8HWkiAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQO8F+pYA0iqg8+sz76drFz7ufYsccegE4jqXXeuyAMGha4wKESDQlkD0AR988EF223hPiH5AGX2Bhenzaf700WxDywIEsztZQYDA0AtE4G+8vnMlRgSPH2V0BSLw97+UzPYSswSUzRg2ujJaRmD0BVr9f9Cp6fLEgNEXGv0WtprtJZL/Xn755dGH0EICBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAj0QaBvCSBR11ZBPWWzQvShrQ65AQLXZz9NXxx7PXvmGPXXiI9ZHisIVFoggnriNZ4rMRNUJAcooysQs718cTzfBxw4cKDxWWF0BbSMQL0FWn0XiFlAIghYGT2BCPw9eHwpxWNRaZUgVLSPZQQIVEsgvueXfRd4+4xEwGpd0fZrG+/9j7yxmGK2l6LSarCAon0sI0CAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAIFvBTYtLZdvf+3Ps127dqXp6enCg4+Nb03ff/F42rzt3sL1FlZXIAJ/P3/j52lx4VphIyIg5A9/+EPhOgsJEBgdgR/96Efp7NmzhQ2KPuCeF36b7tj+QOF6C6srEO/9V/7xP2f7gEgQij4gAsAUAgRGW6Dsu8DEeEq/e2EsTW7fNNoINWpdM/A3ZgApKvG+H+//0Q8oBAiMvsBPfvKTdPLkyWxD//WZsfTElD4gC1TBFZHgWTbLV/QBBgKp4IVVZQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEhkagrzOANFv5wQcfZAM8I0D0z+/8Ihsg2jyGx2oJtLquEfgV94VCgMDoC8RrPUb6LirxXlGWKFa0j2XDL9DqukYf8O6772Y/Gwx/C9WQAIFOBMqSvVolC3RyHtsOh8DB44spl/wRNXzzzTclfwzHpVILAgMRiM98ZQlfrd4zBlJJJ+mZQKvkj+gDJH/0jNuBCBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBCoqcBAEkAi8Lcs2L/VTBE1vTaVbXYz8Pfm1c+ybSgLBMzuZAUBApUUaCZ8xWNRab5nxKMyGgKR1BN9e660CgTM7Wc5AQLVFIj3/7LPfpJAqnldi2odgb+npotn/ojtDx06lA4cOFC0q2UECIyoQPQBZYm/+oDRufCtkj/i/T/6AYUAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIEuhMYSAJIVDFG+IuR/nIlAkVjJhCl2gLNQG6Bv9W+jmpPoNcCMepvqz7ATCC9Vt+Y431x7PXS5I+4D/bt27cxlXNWAgQ2TKBVPyAAeMMuTc9O3E7gb9lngZ5VxIEIEBg6gegDIhEwV/QBOZnqLG/VB8Q9EIlACgECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAh0LzCwBJCoaqsRX6/PfpIicFSppkA7yR+t7oFqtlytCRBoRyBGfH3ppZeym5oNKktTmRXRh1+78HG2vkb9zdJYQaAWAvEeUBb8KQC4urdBO4G/kj+qe33VnEAvBFolAOgDeqG8Mcdopw8oSwDamFo7KwECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBKorsGlpuQy6+j/5yU/SyZMns6fduvvRdNdTh9LY+NbsNlYMl0A7yR+tgv6Gq0VqQ4BAvwR+9rOfpWPHjmUPf8f2B9I9L/xWH5AVGs4VrZI/YiYwgV/Dee3UisCgBVr1AxPjKf3uhbE0uX3ToKvmfB0KRMD24ROL6b0L+a+UzZH/JyYmOjy6zQkQGEWB+B4Q/UCu6ANyMsO5vFXyR7z3/9u//VvSBwzn9VMrAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgSqKbAhCSDz8/PpRz/6UZqens6qCQDO0gzdinaSP/bt25c++OCDoau7ChEgsDECrYJ/9QEbc13Wc9boA744/npamD6f3V3wb5bGCgK1FWjVDwgAHv5bo53R+r3/D/91VEMCGyFw5MiR9Pzzz2dPHX3Ab54aS0/vlgiYRdrgFe0kAEbSRySAR1+gECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEDvBDYkASSqLwmkdxdxI4908+pn6c/v/CLdmLuYrYbAryyNFQRqLbBr166WiYB3738xRTKIMpwC7SQA6gOG89qpFYFhEGgnCUQA8DBcqbV1uHw1pb9/ZzHNzJn5Y62OJQQItCPQqg+IYxw9IAmkHctBb9NOAqDkj0FfFecjQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQKBOAhuWABLI7SaBCAAezlsykj4+f+PnKQKAc0Xgb07GcgIE2ukDxsa3pnte+K0kkCG8XaIPiJk/JAAO4cVRJQIVEmgnAPhXj4+lXz5mFPhhuayR9PHIG4spAoBzxXeAnIzlBAisFGinD4hZQCIRRBkOgegDDh5fKk0AlPwxHNdKLQgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQGF2BDU0ACdZ2A4C/98w/py07d43ulahYyxamzzcCfyV/VOzCqS6BIRNotw+466lDaevuR4es9vWtjgTA+l57LSfQD4F2A4BjNpCJ8X7UwDHbFXjvwnLg77HF0s0lf5TyWEmAwCqBY8eOpegHysqenZvS/3hGH1BmNIh1p6Yj+aM8AVDyxyCuhHMQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFB3gQ1PAIkL0E4AcGx311PPpTsf/mk8VTZQYP6jo2n+9NHSGgj8KuWxkgCBFQLRB0TQ18mTJ1csXft04vGDaeKxg2tXWDJQgWsXPk5fHHu99Jz6gFIeKwkQKBB4+eWX0yuvvFKw5ttFk9uXR4HfvynFozJYgZjt4/CJxRQJIGXF+3+ZjnUECOQE2kkCuX9bSv+6nASiD8gp9nf5ax8tpVdPt04A/OCDD9KOHTv6WxlHJ0CAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQM0FhiIBJK5Bu0kgMQJ8jAQ/Nr615pdu8M2P2T6+OP56itk/ysqBAwfSu+++W7aJdQQIEFgj0M4I8Ft2Ppi+98w/6QPW6PV/QfQBX544kiIBpKwI/i3TsY4AgTKBdgKAYwaQmAnk6d2SQMose7luZi5GfF9K8VhW9u3b1/gOEKO/KwQIEOhUIPqA559/vvF/Q2X7Rh/w7MP6gDKjXq67fDU1Zn46N1veB/gO0Et1xyJAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEC5wNAkgDSr2U4A8B3bH0h3738xxaMyGIHrs582Rny/efWz0hNK/ijlsZIAgRYC7fQBkQD4vWf+OW3ZuavF0azulcCNuYuNBMB4LCvRB7z55ptJ8G+ZknUECJQJxGxQ0RdEcnhZeWIqZgMZS5EQovRP4O0zS+m1jxZTzABSVnwHKNOxjgCBdgWmp6fTj370o5Z9wJ6dy33AgbEUs4Io/RM4NR0JgO31Ab4D9O86ODIBAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgRWCwxdAkhUMEZ9PHLkyOq6rvn9rqeeS3c+/NM1yy3orcD8R0fT/OmjLQ8af/A/dOhQy+1sQIAAgTKBdkaAj/0nHj+YJh47WHYo63og8PWZ91P0AzEDSFmJ9//oBxQCBAh0KxABwD/5yU/SpUuXSg8VyR+RBBLJIEpvBdod8T3OGjP/RQKIQoAAgV4IxHt/9AHRF5SV6AN++ZjZQMqM1rsukv4i8SMSQFqVl156Kb388sutNrOeAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBHooMJQJING+dgOAt+x8MN194B/T5m339pDFoUKg3RHfY6T3CPzat28fOAIECPREoN0R4M0I1RPuwoPEjE9fHPt1uj77SeH6lQsF/67U8JwAgV4IxAwgEQB89uzZloczG0hLoo42aHfWj/gO8MEHH6S9e/d2dHwbEyBAoJVA9AExMEj8v1CrYjaQVkKdrX/vwlI6fKL1rB/+H6gzV1sTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECglwJDmwASjWx39N+x8a2NmUCMBN+bWyNGef/698sjvrcx68fU1FQj+SMeFQIECPRSIPqAn/3sZ42+oNVxYzaQO3/80xT9gdK9QLuzfkTg1x/+8IekD+je3BEIECgWiFHFX3nlleKVK5YaCX4FxjqfnpuNoN+lNDPXesT3SPqI5I/oBxQCBAj0SyBmho1EkHbKrx4fS//w400p+gOlc4F4748+IPqCVsX/A7USsp4AAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQL9FRjqBJBoeiej/8ZI8Hc9dSht2bmrv2ojfPTrs58uj/j+eoqR31uVmPEjRn0X+NVKynoCBNYr0MnovzET1F1PPZfGpx5a7+lqv1/0AV+eONKYAaoVhuDfVkLWEyDQK4GYBSRmA4k+oVW5f1tKRw+MpRgRXmlP4PLVlF49vZhi1Pd2yksvvZQiMUchQIDAIATaHRgk6hJ9QCSCPL1bH9DutZlfSI0ZP9rtAw4dOpSiH/D/QO0K244AAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQK9Fxj6BJBmk9sd/Te2j+DfCAKOYGClPYFI+Pji2K/T9dlPWu4Qf+iPP/jHH/4VAgQIDELg2LFjjdF/2wn+3bLzwUYfEEmBSnsC0QfErE/XLnzc1g6Cf9tishEBAj0UiPf/mBXq5MmTbR01EkB++fgmiSAlWhH0+9pHi+ntM+0lfuzYsaOR/B0JgAoBAgQGKRB9QMwGFTOCtFP0Aa2Vog/4l98vLfcBiymetyrx/0AxAEgMBKIQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwMYKVCYBJJhi5McI/IrHdsrW3Y82ZgQZG9/azua13KbToN+pqanGH/3jUSFAgMAgBS5dutQYAb6TPmDi8YOSAUsu0uLCtfT1799vJH+UbPbXVRH8+8EHHyR9wF9JPCFAYMACkQAS3wfaSQiMqgkCXnuBOg36jSMY8X2toyUECAxeIGaEij4gvhe0U/QBa5XW0weY/XWtoyUECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBDYSIFKJYAEVKcjP0byx50P/zTd+eOfJokg395qnSZ+xJ5GfP/WzzMCBDZOoJMZoaKWkQwoEeT269VM/Pj6zPspnrdTBP+2o2QbAgQGIRDfBzqZDSTqFEHAzz68KT0xtWkQVRzKc1y+mtKrpxfTexfam/EjGmHWj6G8lCpFoNYCnf6fUGBNbr/VBzy9u959QMz2EX1AOzN+hFv0AW+++aZZPwJDIUCAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAwBAJVC4BpGkXIz8+//zzbc8GEvsJAk5pPYkfe/fubfzR34jvzbvPIwECGy0Qs4BEHxB9Qbsl+oBICLxj+wPt7jJy2zX7gIWZ820nfgj+HbnbQIMIjIxApyPBR8Pv35aWE0HGUgQBT4yPDEVpQ05NL6XjywG/8dhumZiYSM8991yKpEuFAAECwyiwnu8D0Qfs/+GtPiCe16Gcm11K7/1x+aeD5L9wiQFAIgE8+gOFAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBIZLoLIJIE3GCEp66623GjODNJe1ehyfeqiRDBKPdSnXLnycrv3x43R99pO2mxx/6G/+0b/tnWxIgACBAQocO3askQgSIwG3W7bsfDBt/eGjjX6g3X2qvt3C9PkU/UA8tlsE/7YrZTsCBDZa4MiRI+mVV17p6PtA1DmSQJ5cnhFkFGcFidk+YqT3D5eTPuJ5J+XAgQON7wCRAKgQIEBg2AXW830g2hTv/fuX+4FR7QPi/T/6gU77gBgA5N13323M/jHs1179CBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgUFeByieAxIWLwN8YCT7+8N9J2bzt3r8GAcfzUSsx0vvXZ040An7jeSclRnqM5A+jPXaiZlsCBDZCIPqACPyNAOBOytj41uUkkP/USAQZxVlB4n2/mfzXaR8g+LeTO8m2BAgMg0D0Bc1EkE7rE6PANxNB9uzc1OnuQ7N9BPlGwG+M8j4z1/5sH80GRNBvfP6PR4UAAQJVEmj2AZ0ODhJtjNmgnpisfkKgPqBKd6y6EiBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAoDuBkUgAaRJMT083EkHOnj3bXNT2YwT/bt39aIpZQaqcDBJBvs2R3m/MXWy7/c0N9+3bl958802jPTZBPBIgUBmBS5cuNRJBOk0GjAbqA25dZsG/lbndVZQAgYxAN31BHLIZCLznB8ujwi8HBMfvw1zOzS6lU8tJH+dm07qSPqJt3vuH+QqrGwECnQh0kwgS54n3/IeWEwEjKTASAiNBcJhLJPudmrmV/LeexL9oW8z2FP8HFP8XpBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAgQIBANQRGKgGkSR4JIDEa/HoSQeIYEQgciSBbdj64/LOredihfbw++2kj6eP67CdpPUkf0TCBX0N7eVWMAIEOBbrtAyIJMPqAW/3A8PcB8b6/MHO+0Q/oAzq8WWxOgMDICkQiSIwEH0mBERC83jK5PYKAU7r1uLHBwPMLy0keV5bS+eVkj3N/iqSPzmf5WOng8/9KDc8JEBglgW4TQZoWkQASiSCRFDh5362+oLluIx7jfT/6gOnlxI/zy8+jX1hv0QesV85+BAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQ2HiBkUwAabJ2GwQcxxkb39pIBImkkGFJCImEj0j2uP6nW4/N9q7n8cCBA2n//v2NBJD17G8fAgQIDKtAzArVDP7tpo6N9/4f7LrVF9z3QKNf6OZ43e4bfcCNudnlfuBWH7C4cG3dhxT4tW46OxIgUBGBCAKOJJDoDyIppNsSo8NP3rccCLw9LY8Mf+txx/Jjr0eJjyDfrxoJH7cCfWNk98tXu639rf3j8/9LL71kxr/ecDoKAQJDLBB9wMmTJxsDhPSiD4imRkLIoPuAeP9f7wwfqy+P/wNaLeJ3AgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIVE9gpBNAmpcj/tAfM4JE8FcvSiSD3LEcBLz57nsbAcFj39namDWkF8deeYwI7l1c+I9048rFxsweN69+tu4ZPlYed2JiIu3bt0/g10oUzwkQGFmB6AOaiSDdjALfBIoZQhr9wF/6grHxv13uC3o/U0jM5rH4zbVGwt/NL5bf///SFzTr0c1jBH4999xzaWpqqpvD2JcAAQKVEogg4OPHjzeCgftR8UgCiWSQKI3nd996Xnau+YWl5aDeW1vMf9O7AN/V59yxY0fjfT/e/+O7gEKAAIG6CcQAIdEH9Or/hVb7NRMEY/l3l5MFp5ZnkGqnxIxOUfrdB8TAH9EHRH+gECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEC1BWqRANK8RL0eAbh53OZjzBZyx307G79u3nZPI0Gkua7VYwT33rz6eWOzmN2jHyUCfSPgN5I/BH71Q9gxCRAYZoHmCMCRDBKzg/SjxGwhUbrpA25cmV1O/lv/rB65dgn+zclYToBA3QQiMbCZDNKv/mAYTOPzfnzul/A3DFdDHQgQGBaBQXwnGIa2NvsAM74Ow9VQBwIECBAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECPRWoFYJICvpItgrgoAj+CsCAEa1xB/9Y5TH+KO/kd5H9SprFwECnQpEH9AcBT4CgUe1CPwa1SurXQQI9Epg1JJBItlv79696cknn2wkf/TKyXEIECAwigKj1gc0P/vrA0bxbtUmAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIfCtQ2wSQbwlSIwnkww8/HJlkEIFfK6+u5wQIECgXGLVkEIFf5dfbWgIECOQEIhD47Nmz6dy5c5X6XhAJH3v27GkkfEj4zl1dywkQIFAu0JwZJPqA6AuiT6hCiff9ZsKHPqAKV0wdCRAgQIAAAQIECBAgQIAAAQIECBAgQIAAAQIECBAg0L2ABJBVhhEIfHJ5VpDmH/1XrR7aXwV+De2lUTECBCokEH1ABHxFUmA8VqUI/KrKlVJPAgSqJBB9QvzE94Lm842ufyT5xef+ycnJxmM8VwgQIECg9wIrkwKHqQ+Iz/2R9Bfv//qA3l93RyRAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgEAVBCSAlFylGAEy/tAfQcDNwK9YttElAr/80X+jr4LzEyBQB4F4/4+fmZmZxuMw9gHRH0S/oBAgQIBA/wWiT4ig4PiJ7wfN7wu9PnPM6Bc/8R7/3e9+txHk6/2+18qOR4AAgc4EVvcBsXcs63Vp9gHxeP/99zf6gOayXp/L8QgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECgegISQDq8ZhHs1Rz98fLly43gr/i9H0HBzUQPf/Tv8CLZnAABAn0SiD4gfiLQq999QDQhRvWNviBGeo/A3/iJPkEhQIAAgeETWBkEvPJ5q5rGe3u810dZ+byxwD8ECBAgUAmBlf8vtPJ5q8qvfN+X5NFKy3oCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQCAEJID28D1b+kb+ToK9mFSLQN0oEgEUQgEKAAAEC1RFY2QesfN5uC/QB7UrZjgABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBQTwEJIPW87lpNgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIECBAgAABAgQIVEhgrEJ1VVUCBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQC0FJIDU8rJrNAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFAlAQkgVbpa6kqAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAjUUkACSC0vu0YTIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECVRKQAFKlq6WuBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAQC0FJIDU8rJrNAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIFAlAQkgVbpa6kqAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAgQIECAAAECBAjUUuD/B5pkvigYYnf3AAAAAElFTkSuQmCC", + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "csw_img = Image.open('csw.png')\n", + "display(csw_img)" + ] + }, + { + "cell_type": "code", + "execution_count": 114, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Text(0, 0.5, 'Time')" + ] + }, + "execution_count": 114, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAkUAAAG1CAYAAAD3BIBFAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8fJSN1AAAACXBIWXMAAA9hAAAPYQGoP6dpAABWiUlEQVR4nO3deXgUVfr//XcnZIcE2UKAEBBhkGUQWUR2RgVkmUFR3AF1dFR0ZBhUcFRgHEVx+fkouDAqqCjiiAiCjuB3WERQQAkCyqZhMRADCglJSGer549Dp9NJd5MOSS/x87quurq76q7quwvovjl16hybZVkWIiIiIr9xYYFOQERERCQYqCgSERERQUWRiIiICKCiSERERARQUSQiIiICqCgSERERAVQUiYiIiABQJ9AJhIqSkhIOHz5MvXr1sNlsgU5HREREKsGyLE6ePEmzZs0IC/PeFqSiqJIOHz5McnJyoNMQERGRKjh06BAtWrTwGqOiqJLq1asHwKHdu4k//dxFeDhERztf5+Z6PlhYGMTEVC02Lw88DUJus0FsbNViT52CkhLPecTFVS02Px+Ki6snNjbW5A1gt0NRUfXExsSY8wxQUACFhdUTGx1t/l74GltYaOI9iYqCOnV8jy0qMufCk8hIiIjwPba42PzZeRIRYeJ9jS0pMX/XqiO2Th1zLsD8m8jLq55YX/7d6zvCfexv9TsiLw/OOw+ApkAhEOH5qOQDjj+pcCDSS6wdKKlCbBgQ5SW2AHCc/axffgmZ74js7GySk5NLf8e9sWmaj8rJzs4mISGBrKws4uPjA52OiIiEstxcqFsXgDjAS+kdlEKpdPDl91sdrUVERERQUSQiIiICqCgSERHxvzp1YNw45gNeejOJn4VsUfTiiy/SunVroqOj6datG59//rnX+LVr19KtWzeio6M599xzefnll/2UqYiISDlRUTB/PjdjOjBLcAjJomjRokVMnDiRf/zjH2zdupV+/fpx+eWXc/DgQbfxaWlpDBs2jH79+rF161YefPBB/vrXv7J48WI/Zy4iIiLBKiTvPrvooou48MILeemll0rXnX/++YwaNYqZM2dWiH/ggQdYtmwZ33//fem6O+64g23btrFx40a372G327GXud3QcUuf7j4TEZGzdnq4h7i6dUPuzjPQ3WdBo6CggK+//prBgwe7rB88eDAbNmxwu8/GjRsrxA8ZMoQtW7ZQ6GEMiZkzZ5KQkFC6aOBGERGpNnl5ULcuuUDsGYPFX0KuKDp27BjFxcUkJia6rE9MTCQjI8PtPhkZGW7ji4qKOHbsmNt9pk6dSlZWVuly6NCh6vkAIiIiEpRCdkTr8vOPWZbldU4yd/Hu1jtERUURFeVtbE8RERGpTUKupahRo0aEh4dXaBXKzMys0Brk0LRpU7fxderUoWHDhjWWq4iEti++gNtvh/btISHB3DDUvDmMGAGvvup99g1/mD8fpk+H/fsDmwfAhx+aXFJTz+44GzfCn/4EjRubGTM6dIBHH/U+84NIdQm5oigyMpJu3bqxatUql/WrVq2id+/ebve5+OKLK8SvXLmS7t27ExHhbbYZEfktysuDa66Bvn3h3/+GAwcgORl+/3vTP3bFCrjtNmjbFrZvD1ye8+fDjBnBUxTNmHF2RdHbb0O/frBsmSlAzz8f9u2DRx6B/v29T0MnUh1CrigCmDRpEq+++iqvv/4633//PX/72984ePAgd9xxB2D6A40dO7Y0/o477uDAgQNMmjSJ77//ntdff53XXnuNyZMnB+ojiEiQKiyEwYPhvfegaVN44w349VfYsQM2b4bDh2HnTvjLX+DoUfjhh0BnXDvs3w+33mrmA501Cw4dgm++gb174Xe/M+f+/vsDnaXUdiHZp+iaa67hl19+4Z///CdHjhyhU6dOfPzxx6SkpABw5MgRlzGLWrduzccff8zf/vY35syZQ7NmzXj++ecZPXp0oD6CiASpGTPMZbPERHMpp1WrijEdOsDLL8ONNzonRJez89RTZtL1wYPhvvuc61NS4PXXoU8fmDsXHn7Y/NmI1AhLKiUrK8sCrKysrECnIiI15MQJy6pXz7LAshYurPpxli+3rCFDLKthQ8uKjLSsVq0s6847LevgQffxKSnmPdPSLGvjRssaOtSy6te3rNhYy+rb17L+7/9c41evNvGelnnzXONzcy3riScsq1s38/liYiyrSxfLmjXLsvLzXWMffdQco2NHyzp1qmKur71mticlWdaxYyZnb7lMm3bm81VSYo4HlrVokfuY9u3N9ldeOfPxQsKpU5Z11VXWe2BFgUWILaHEl99v/R9HROS0FSvg5EnTyfeqq6p2jKlTTUfsTz81HYU7d4bMTHjpJejSBbZs8bzv8uWm78zmzdCmDUREwPr1MGQIrFnjjEtIMC0njnHoOnUyrx1L2ZaU9HTo0QOmTIFt28y2Vq3MJcD774dLL4VTp1zzv/his33KFNf89u+HiRPN89deg4YNITravGeTJmZ927auubRseeZzdvAgHDlinvfp4z7Gsf6rr858vJAQHQ3/+Q9jAPsZg8Vv/FCk1QpqKfIfguB/Qb4uUjtMmGBaI0aNqtr+H31k9q9Tx7IWLHCuz8qyrCuuMNtatbKsvDzX/RwtRRERljVzpmUVFZn1BQWWdcMNZttFF1V8vwEDzLbVq93nU1xsWb17m5hrr7WsjAzntkOHLKtfP7Nt8mTX/fbts6y4OMuy2Sxr1SrnsRzxd95Z8b3GjXPfSlUZq1aZfaOiTKuRO489ZmL69fP9+PLbppYiEZEqSE83j61bV23/J54wjxMmwA03ONfHx8OCBdCokWltWbjQ/f5Dh5rWmfBw8zoiAp57ztyJ9dVXcPy4b/msWAEbNpiWorfecm1BatECFi2CunVN/6iyrUVt2sCzz5oLYOPHm/edNQs+/xzatYOnn/YtjzNxfK769cHTcHPnnOMaK1ITVBSJiJx28qR5jIvzfd+cHNMxG+Ceeypuj401t/EDrFzp/hh//nPFdY0aOTt7//ijbzl98IF5HD8e6ri5rSYpyRRMOTnw9deu226/3VwGTE+HK66AadPMMRYsMJ+lOjnGIIqM9BzjGEu3bPEW0nJzTQVoswV+wCspFZJ3n4mI1IR69cxjVX6j9u2DkhLz433uue5jOnY0j3v2uN/epo379U2awO7dpnjxhWMMpZdegnfecR/jyMXRSlbWq6+aPlFr15rX06ebIqq6RUebx4ICzzGO+bljYqr//UUcVBSJiJzWvLl5TEvzfV9HwdK4sedLQI7LV44WqfI8tVA5bvv3dWLyrCzzuGPHmWPdtcAkJppCbs0ak8P48b69f2U5Lo2dOGE+o7vz57hs5ogVqQm6fCYicppjUPwNG6CoyLd969Y1j0ePei5efv7ZPDpapGqaI6dVq7zdNO/sO1TenDnOgqikxFz+87Uwq4y2bc2j3W4Gx3THcenQEStSE1QUiYicNmyYKSQyM+H9933b97zzTPFgt3vu+7Nzp3ls1+7s8nTwMgc2YAaZhMq1FJW3Z4+5ZT8szEy70bq1Ka5mz65aLt60bGlGDwczcKY7jvUXXVT19xE5ExVFIiKn1a/v7CQ9ceKZ5xT74gvTqgSmmHK0NL3wQsXYU6dMHx0w4w5VB0f/Gk+dj6+80jy+8opvE6oWFcFNN5m5xv7+dxg+HN580xRIDzxg+jf5mos3NpvpzA1m/KPyNmyAXbvM3Xh//KPvxxepLBVFIiJlTJ9uBi/8+Wfz+NZbFQuKPXvMbfcDB5pWJYcHHjCPL77o2rH55EkYO9ZcWmvVCq69tnpydXTodnSELu+KK6BXL1NQjBxpOoOXZbeb2/ZvucV1/b/+BZs2mU7Wjz5q1vXtC5Mnm6LnxhsrXl505LJuXdUusd13n7n7bOVKM+WH4xgHDjjz+/OfnS1KIjXCD+Mm1QoavNF/CILBGH1dpHY5edKyRo929riJibGsTp0sq0cPy2re3Lm+RQvL2r7ddd8pU5zbk5Mtq3t3MxAiWNY551jWpk0V36/sNB/ueBqkcd0653u1a2dZ/fub2E8+ccYcPmxZXbs64847zwwE2aGDmYIELCsx0Rn/1Vdm8MnISMtKTXV9P7vdTA8ClvXII67b9u1zHi8lxQyyOGCAb4M5vvGGZYWFmWM0b27yjogwr7t1s6ycnMofK+idOmVZw4aZxd18KlJtfPn91rd5Jako8p9AFzgqisRh3TrLuvVWU3DUrWt+9Js1s6zhw80cYOVHpnb46CPLuuwyUwRFRpoi4Y47Kjf3mTveRq5+5x3L6tnTWXjhZlTp/HzLevFFUzQ5ckpONvOqzZhhWd99Z+Jyc81nBTOytjvbt5uRp+vUMQVUWZ9+anKNjzejYVPJuc/K+uILyxoxwrIaNDDv87vfWdb06aobpOp8+f22WVZN3EtQ+2RnZ5OQkEBWVhbxjgmHpEbYzqbHZoDon5GISHDy5fdbfYpEREREUFEkIiLif7m5ZrTOuDhN8xFENKK1iIhIIOTlBToDKUctRSIiIiKoKBIREREBVBSJiIiIACqKRERERAB1tJYgpDF//EdjQomIOKkoEhER8bewMBgwwPlcgoKKIhEREX+LiYE1awKdhZSj8lREREQEFUUiIiIigIoiERER/8vNhcaNzaJpPoKG+hSJiIgEwrFjgc5AylFLkYiIiAgqikREREQAFUUiIiIigIoiEREREUBFkYiIiAigu89ERET8LywMund3PpegoKJIRETE32JiYPPmQGch5ag8FRERESEEi6KZM2fSo0cP6tWrR5MmTRg1ahS7d+/2us+aNWuw2WwVll27dvkpaxEREQl2IVcUrV27lgkTJvDll1+yatUqioqKGDx4MLmVGCZ99+7dHDlypHRp27atHzIWEREpJy8PWrUyS15eoLOR00KuT9F///tfl9fz5s2jSZMmfP311/Tv39/rvk2aNKF+/fo1mJ2IiEglWBYcOOB8LkEh5Iqi8rKysgBo0KDBGWO7du1Kfn4+HTp04KGHHmLQoEEeY+12O3a7vfR1dnb22ScrEmQsfRn7hc1mC3QKPtPfDfktCrnLZ2VZlsWkSZPo27cvnTp18hiXlJTE3LlzWbx4MR988AG/+93vuOSSS1i3bp3HfWbOnElCQkLpkpycXBMfQURERIKEzQrh/w5MmDCBFStWsH79elq0aOHTviNHjsRms7Fs2TK32921FCUnJ5OVlUV8fPxZ5S0ivy1qKZIKcnOhbl3zPCcH4uICm08tlp2dTUJCQqV+v0O2peiee+5h2bJlrF692ueCCKBXr17s3bvX4/aoqCji4+NdFhEREam9Qq5PkWVZ3HPPPSxZsoQ1a9bQunXrKh1n69atJCUlVXN2IiIiEqpCriiaMGEC77zzDkuXLqVevXpkZGQAkJCQQExMDABTp04lPT2dN998E4DnnnuOVq1a0bFjRwoKCliwYAGLFy9m8eLFAfscIiLyG2azQYcOzucSFEKuKHrppZcAGDhwoMv6efPmMX78eACOHDnCwYMHS7cVFBQwefJk0tPTiYmJoWPHjqxYsYJhw4b5K20RERGn2FjYuTPQWUg5Id3R2p986aglIlKWOlqLBM5voqO1iIiISHVSUSQiIuJveXnQsaNZNM1H0Ai5PkUiIiIhz7Lgu++czyUoqKVIREREBBVFIiIiIoCKIhERERFARZGIiIgIoKJIREREBNDdZyIiIv5ns0FKivO5BAUVRSIiIv4WGwv79wc6CylHl89EREREUEuRiEiN0zxi/qE55uRsqaVIRETEz6KBTQA9esCpUwHORhxUFImIiPhZGNADYMsWKCkJcDbioKJIREREBBVFIiIiIoCKIhERERFARZGIiIgIoKJIREREBFBRJCIiEhBHARo1CnQaUoYGbxQREfGzPKAJYB09GuhUpAy1FImIiIigokhEREQEUFEkIiLid9HAaoCBAzXNRxBRUSQiIuJnYcBAgLVrNc1HEFFRJCIiIoKKIhERERFARZGIiIgIoHGKfJebC+HhFdeHh0N0tGucJ2FhEBNTtdi8PLAs97E2G8TGVi321Cnv17Xj4qoWm58PxcXVExsba/IGsNuhqKh6YmNizHkGKCiAwsLqiY2Odv5d8SW2sNDEexIVBXXq+B5bVGTOhSeRkRAR4XtscbH5s/MkIsLE+xpbUuK9A6ovsXXqmHMB5t9EXl71xPry717fEe5jq/M7ooxIvP/Alf1TPVPsKcBxliJOL2cbG1t+hb4jfI/15TuisiyplKysLAuwssxXSMVl2DDXHWJj3ceBZQ0Y4BrbqJHn2O7dXWNTUjzHdujgGtuhg+fYlBTX2O7dPcc2auQaO2CA59jYWNfYYcM8x5b/63fVVd5jc3KcsePGeY/NzHTG3nWX99i0NGfs5MneY3fscMZOm+Y9dtMmZ+ysWd5jV692xs6e7T12+XJn7Lx53mPfe88Z+9573mPnzXPGLl/uPXb2bGfs6tXeY2fNcsZu2uQ9dto0Z+yOHd5jJ092xqaleY+96y5nbGam99hx45yxOTneY6+6ynLhLVbfEWbRd4T7vEPsO2LYGT7TXWBxehlwhtjJZWK7nyF2WpnYDmeInVUmFrCysrKsM1FLkYiISCBERztbniUo2CzLsgKdRCjIzs4mISGBrMOHiY+PrxigpnH3sbp85nusmsbNc10+q1qsviPMc31H+B7rw7/7OjYbUZ4jKQAcZzQMMy6TJ4WnF19jbUBMJWMBsrKy3P9+l6GiqJJKi6JKnFQREZHazOYoKENIZX6/1W4nIiIigooiERER/8vPh+HDzeLtkrL4VcgVRdOnT8dms7ksTZs29brP2rVr6datG9HR0Zx77rm8/PLLfspWRETEjeJi+Phjs1RymAGpeSF591nHjh357LPPSl+Huxs36LS0tDSGDRvGbbfdxoIFC/jiiy+46667aNy4MaNHj/ZHuiIiIhICQrIoqlOnzhlbhxxefvllWrZsyXPPPQfA+eefz5YtW3j66adVFImIiEipkLt8BrB3716aNWtG69atufbaa/nxxx89xm7cuJHBgwe7rBsyZAhbtmyh0Mvtj3a7nezsbJdFREREaq+QK4ouuugi3nzzTT799FP+/e9/k5GRQe/evfnll1/cxmdkZJCYmOiyLjExkaKiIo4dO+bxfWbOnElCQkLpkpycXK2fQ0RERIJLyBVFl19+OaNHj6Zz585ceumlrFixAoA33njD4z7lx1NwDM3kbZyFqVOnkpWVVbocOnSoGrIXERGRYBWSfYrKiouLo3Pnzuzdu9ft9qZNm5KRkeGyLjMzkzp16tCwYUOPx42KiiIqytt4nSJS66V/ATvfgJ/WQe4RKM6HmEbQpCu0GQXnXwcRcWc8TI3ZMR+y90PH8ZDQKnB5AOz9EI6mwnmjoMkFvu+fmwH7V0LGJrMc3QbFBdDpVhjyavXmKuJByBdFdrud77//nn79+rndfvHFF/PRRx+5rFu5ciXdu3cnIsLbXMci8ptVmAf/vRn2vGde14mGhDZQJwZy0uHHFWbZ8AiM/hQadw5Mnjvnw09rIXlg4IuiHz40BWR8q6oVRbvehTV/q+akglhcnOcpViRgQq4omjx5MiNHjqRly5ZkZmbyr3/9i+zsbMaNGweYy17p6em8+eabANxxxx3Mnj2bSZMmcdttt7Fx40Zee+01Fi5cGMiPISLBqrgQ3h8Mh7+AuKbQ70lodzVElJll6Zfv4JvnYcdrcOKHwBVFtUlkPKRcBk17muXgZ7D1hUBnJb8xIVcU/fTTT1x33XUcO3aMxo0b06tXL7788ktSUlIAOHLkCAcPHiyNb926NR9//DF/+9vfmDNnDs2aNeP555/X7fgi4t7GGaYgik2E6za6b4Fp2AEuexnOvxFsIdc1Mzh1vsUsDpnfBC4X+c3ShLCVpAlhRX4D7FkwNxkKTsLwhdD+2qod58cVppUjYwsUnoS4ZtD6cug5FeLd3Mn671aQfQD+nGb61mycAUe+NH1qEi+E3jOg5R+c8YfWwHuDPL//kHnQabzzdWGeyWfPf+D4HigpgnPawfk3QNe/Qp0y/Se//Bd88TA07Ag3bjGXDsva/jqsvBXikmDcdnOuXm3tOZeLp0Hv6Z63e7JhujkPtbVPUX4+3HSTef7WWxDtbW744KMJYUVEarsfV5gf+ZjG0O6qqh3j86mwZATs/9T0QWrUGfIyYdtL8FYXUyh5fP/lsKg/ZGyG+m0gPALS18PiIaYQcohMgGZ9zCUngEadzGvHEldmGJKT6fB2D/h8ium8HJto+v38shPW3Q/vXwqFp5zxPadC0sVm++dTXPPL2g9rJprnQ16DmIamaGrWB2KbmPXntHXNpV7LKp3GWq+4GN5/3yya5iNohNzlMxGRGnN4g3ls3gfCqvD1+MNy2PSE2XfofNMSA2DPhv+Oh31L4KOrYfx3rn2UHNZMgt7/hB73QVi46d/06c3w/dumQLn+SxOX2BWuWw+LBpqO1n94wXS2Ls8qgeVjTB+o310Lg55zFkwnf4IV10P656bD+ICnzPqwcBj2FrzZxfSbOncEpFxqjvXJWFM0drnTtHyB6Xd13Xrz+Xa+AT0fdG2lEgkhaimSoFN+wt9QWKSWyEk3jwleLgd5s+kJ83jBBGdBBBAVD8MWmNv5s/fDLg83erQaChdNMYUJmJaigc9BeBQc+Qryj/uWz48rTKHXtIcpdMq2INVrASMWQURd2Paya2tR/TYw8FnAMsVO/nHYNMsUUOe0gwFP+5aH1DqWZYXMkpWVVenPpaJIRMSh4KR5rMrYQwU5cGSjed71norbI2Kh823m+YGV7o/R+c8V18U2Mpe7ALI8T2nk1t4PzGPH8e5bvuommYKpMAd+/tp12+9vN61EOemw9ArYOM0cY9gC81lEaiFdPhMRcYisZx4Lc33f98Q+c4kpPAoSznUf06ijeTy+x/32+m3cr49tAsd3m8LLF8e2m8dtL8H377iPceTiaCUra/Cr8EZnc4kO4OLppogSqaVUFImIONRtbh6z0nzf11GwxDQGT5dUY09fvnK0SJXnqYWq9LZ/H28Wtp++bHBsx5lji05VXBeXaAq5Q2tMDuorJLWciiIREYdmvSF1jumHU1LkW2fryLrm8dRRM1Kxu8Io7+fTsfXOPtfKiDid01WrTGdpX22d4yyIrBJYeZsZwVv96KSWUp8iERGH1sNMIZGXCXve923f+ueZ4qHY7rnvz7Gd5vGcdmeXp8OZipOGHU6/byVaisr7dY+5Zd8WBqOWmc7nB1bB1tmekvH9PX7LYmMhJ8csseqjFSxUFImIOETXd3aSXj3RjMvjTfoXkH76Nv7IuqalCdxPT1F4CrafHoSw1ZBqSBYzDhK4v/QF0PZK8/jtK1CUX/njlhTBJzdBUR50+zucOxyGvmkKpM8fgF93+56LuLLZzPxncXFqeQsiKopERMrqPd0MXpj3Myy8GL57q2JB8ese+GwCvDfQtCo59HjAPKa+6NqxueAk/HesubQW38qMGVQdHB26D611v73tFZDUC37dBUtGwvF9rtuL7Oa2/f/e4rr+y3+ZmeobdYY+j5p1LfpC98mm6Pn4RlM4ucvlp3Wa6FRClqb5qCRN8+E/oTjuj/4Z1TIFOWZ8nr2Lzes6MebOsDoxkHPYeadW3RYw+hMzorTD51Od4xXVSzadq3/93tzRFn2O6ZNT/g6ustN8uJtrzTFI45jVroM0/vS5GQEbzCW5uKaADXpOgdZDzfqcI7BkOGRuNa/rn2dGoi44ae6YKy4wOd6ZYbYf2QTv9jGtQtdvgiZdnO9XXABv9zQjY/d6BPrMcG478QPM72Bi4lPMSNa2MDMcQGU6aGcfgre6Ol8X5ZkCLDzK2TcKYNRSM7hmqLPb4S9/Mc9feQWiorzHS5X58vutliIRkfIi68If34dr1pm5t+olm0tpR7cBlrmcNPg1uGWPa0EE0G8mjPrIzPhemAPHvjWDNna5A27aVr23tLfoB8PeMbPK56SbVpqf1pr50xzqJpmJbS95EVr0h/xfTIFUcNLs13uGKbbAzJH2yU2mFejiGa4FEUB4pBmnKDwKNj1uCiiH+m3M524xwAz2mL7e5JK9v3KfxSo2uTkWx2W4Yrvr+pLCKp+uoFJUBG+8YZaiojPHi1+opaiS1FLkP2opEpFaLzcX6p5uAcvJMX2LpEaopUhERETERyqKRERERFBRJCIiIgKoKBIREREBVBSJiIiIAJr7TERExP9iYyEz0/lcgoKKIhEREX+z2aBx40BnIeWoKJKgE4pj/oTi2EoQmudaRKSmqE+RiIiIv9ntMGGCWez2QGcjp6koEhER8beiInjxRbNomo+goaJIREREBBVFIiIiIoCKIhERERFARZGIiIgIoKJIREREBFBRJCIiIgJo8EYRERH/i4mBtDTncwkKKopERET8LSwMWrUKdBZSji6fiYiIiKCiSERExP8KCuC++8xSUBDobOQ0m6UZISslOzubhIQEsrKyiI+PD3Q6EmQ0IayI+CQ3F+rWNc9zciAuLrD51GK+/H6rpUhERESEEC2KWrVqhc1mq7BMmDDBbfyaNWvcxu/atcvPmYuIiEiwCsm7zzZv3kxxcXHp6x07dnDZZZdx9dVXe91v9+7dLk1njRs3rrEcRUREJLSEZFFUvph54oknaNOmDQMGDPC6X5MmTahfv34NZiYiIiKhKiQvn5VVUFDAggULuOWWW87Y2bVr164kJSVxySWXsHr1aq+xdrud7Oxsl0VERERqr5BsKSrrww8/5MSJE4wfP95jTFJSEnPnzqVbt27Y7XbeeustLrnkEtasWUP//v3d7jNz5kxmzJhRQ1lLbaO7uMSbULw7UX+n5bco5G/JHzJkCJGRkXz00Uc+7Tdy5EhsNhvLli1zu91ut2O320tfZ2dnk5ycrFvyRcRnKoqkgpIS+P578/z8880I11IjfLklP6Rbig4cOMBnn33GBx984PO+vXr1YsGCBR63R0VFERUVdTbpiYiIuBcWBh07BjoLKSekS9N58+bRpEkThg8f7vO+W7duJSkpqQayEhERkVAUsi1FJSUlzJs3j3HjxlGnjuvHmDp1Kunp6bz55psAPPfcc7Rq1YqOHTuWdsxevHgxixcvDkTqIiLyW1dQAI8/bp4/+CBERgY2HwFCuCj67LPPOHjwILfcckuFbUeOHOHgwYOlrwsKCpg8eTLp6enExMTQsWNHVqxYwbBhw/yZsoiIiFFYCI6bee67T0VRkAj5jtb+ornPRKSq1NFaKtDcZ36juc9EREREfKSiSERERAQVRSIiIiKAiiIRERERQEWRiIiICBDCt+SLiIiErOho2LTJ+VyCgooiERERfwsPhx49Ap2FlKPLZyIiIiKopUhERMT/Cgrg//v/zPN779WI1kHirEa03rp1KwsXLmTXrl3k5eXx2WefAWb2+q+++opLL72UBg0aVFuygaQRrUWkqjSitVSgEa39xpff7yq3FN1///0888wzpf9wyv6jtyyL66+/nmeeeYZ77723qm8hIiIi4jdV6lM0b948nn76aUaMGMG3337L1KlTXba3atWKnj17smzZsmpJUkQklFmWFXKLzWYLuUXkbFWppejFF1/k/PPPZ/HixdSpU4dIN9dC27dvX3o5TURERCTYVaml6LvvvuOyyy6jTh3PNVViYiKZmZlVTkxERETEn6pUFNWpU4eCggKvMYcPH6auoxOZiIiISJCrUlHUuXNnVq9eTUlJidvtjjvRunXrdlbJiYiIiPhLlYqiW265hd27d3PnnXdWaDHKzs5m/PjxZGRkcNttt1VLkiIiIrVKdDSsXm0WTfMRNKo8TtENN9zAwoULqVu3LvXr1yc9PZ1u3brx/fffk5uby/jx43n99derO9+A0ThFIvJbEop3c2lsJXHHl9/vKk/z8fbbb/PKK6/QunVr0tPTsSyLLVu20LJlS1566aVaVRCJiIhI7XdWI1o7nDp1iuPHjxMfH19rO1erpUhEfkvUUlTDCgth7lzz/PbbISIisPnUYr78fldLUfRboKJIRH5LVBTVME3z4Td+mebDoaSkhJ9//pnCwkK321u2bHm2byEiIiJS46pcFC1cuJBZs2axc+dOiouL3cbYbDaKioqqnJyIiIiIv1SpKHrmmWe4//77iYiIoH///iQlJXkd3VpEREQk2FWpknn++edp3rw5GzZsoEWLFtWdk4iIiIjfVemW/KNHjzJ69GgVRCIiIlJrVKkoat++PcePH6/uXEREREQCpkpF0d///neWLl3KgQMHqjsfERGR2i8qCpYvN0tUVKCzkdOq1KfohhtuICMjg969e3PXXXfRpUsXj/f+9+/f/6wSDDq5uRAeXnF9eLjr/DW5uZ6PERYGMTFVi83LA09jcdhsEBtbtdhTp8DDBL+A6xgavsTm54OHuxN9jo2NNXkD2O3g7c5GX2JjYsx5BigoMIOqVUdsdLTz74ovsYWFJt6TqChw3NjgS2xRkTkXnkRGOgeQ8yW2uNj82XkSEWHifY0tKTF/16ojtk4d5w+PZZl/G9UR68u/+xD7jogByp7RaLz/L7rsWfIlNgpw841apVgsK7S+I4YO1XcE+Oc7orKsKnrooYesuLg4KywszOtSW2RlZVmAlWX+2VVchg1z3SE21n0cWNaAAa6xjRp5ju3e3TU2JcVzbIcOrrEdOniOTUlxje3e3XNso0ausQMGeI6NjXWNHTbMc2z5v35XXeU9NifHGTtunPfYzExn7F13eY9NS3PGTp7sPXbHDmfstGneYzdtcsbOmuU9dvVqZ+zs2d5jly93xs6b5z32vfecse+95z123jxn7PLl3mNnz3bGrl7tPXbWLGfspk3eY6dNc8bu2OE9dvJkZ2xamvfYu+5yxmZmeo8dN84Zm5PjPfaqqywX3mJD7DsiDSzKLJu8fLbMcrGrvcTmlItd7u2clYt97wyx+o44veg7wiynvyNKf7+zsqwzqVJL0SOPPMLjjz9O48aNufbaa3VLvohILdMqJQVr/37nih49YMsWt7GNGzXCOnrUuWLgQFi71m1sXGwsVtmWr+HD4eOPPeZhWZbzxdVXw/vvVyL7ELF8OeN79uRtYCLwlJfQgYMG4TijdwFzvMQOHzECxxkdB8z3Env1mDE4zuhVwH+8xI6/+WbeuPlmAKzly71Ehq4qTfPRokUL6tWrx+bNm2vtXGfllQ4Tfviw+0uFIdY0rstnZejymRHKTeO6fOZ8re8I8zzYvyNycyExEYA4oBDwNvtZPuA4o3UAbxeF7EBxFWLDMZcoPSkAHGfJKiwMme+IGp/7LC4ujjvvvJOnn37a111DluY+ExGRalNm7rM4XPtOhYIqlA4B48vvd5XuPuvcuTNHjhypUnIiIiIiwahKRdE//vEPPvzwQ7755pvqzkdEREQkIKrUO/r48eNcdtll9O7dmxtvvJELLrjAY5PU2LFjzypBEREREX+oUp+isLAwbDabyzVFm6PD2mmWZWGz2Sj21inOjXXr1vHUU0/x9ddfc+TIEZYsWcKoUaNcjjtjxgzmzp3L8ePHueiii5gzZw4dO3b0etzFixfz8MMP88MPP9CmTRsee+wxrrjiikrnpT5FIiJSbdSnyG98+f2uUkvRvHnzqpRYZeTm5tKlSxduvvlmRo8eXWH7rFmzePbZZ5k/fz7t2rXjX//6F5dddhm7d++mXr16bo+5ceNGrrnmGh599FGuuOIKlixZwpgxY1i/fj0XXXRRjX0WERERCR1VainyF5vN5tJSZFkWzZo1Y+LEiTzwwAMA2O12EhMTefLJJ/nLX/7i9jjXXHMN2dnZfPLJJ6Xrhg4dyjnnnMPChQvd7mO327GXud0wOzub5ORktRSJiMjZKyqCJUu4eswYluC8LT5UBHHpUEGN330WKGlpaWRkZDB48ODSdVFRUQwYMIANGzZ43G/jxo0u+wAMGTLE6z4zZ84kISGhdElOTj77DyAiIgJmPKyrr+Z9Qq8gqs1CqijKyMgAIPH0gFcOiYmJpds87efrPlOnTiUrK6t0OXTo0FlkLiIiIsGuUn2KwsLCCAsL47vvvqNdu3alHa3PxGazUeRtlNAq8tSpuzr3iYqKIkozF4uISE04ffnsKgjJy2e1VaWKov79+2Oz2Yg9PeS747W/NW3aFDAtP0lJSaXrMzMzK7QEld+vfKvQmfYREfniC3jjDVi3Do4cMbMPNGoEXbvCqFFw3XWus1D42/z5sH8/jB8PrVoFLg+ADz+E1FRzXi64oOrH2bgRnngCNmyAnBxo3dqc5/vuc50lJeTZ7TBmDP8hNO8+q7XOOGVsAAHWkiVLSl+XlJRYTZs2tZ588snSdXa73UpISLBefvllj8cZM2aMdfnll7usGzp0qHXttddWOhdfZtkVkdCWm2tZY8Y4J9uOjrasjh3NhPRJSc71SUmW9e23gctzwICKE6gHimNS+rITqftqwQLLCg83x2ne3LK6drWsiAjzukcP8+dSa+TklP5FigWLEFtCiS+/35XuUxQeHs6jjz7qU8FVFTk5OaSmppKamgqYztWpqakcPHgQm83GxIkTefzxx1myZAk7duxg/PjxxMbGcv3115ceY+zYsUydOrX09b333svKlSt58skn2bVrF08++SSfffYZEydOrPHPIyKhpbAQBg+G996Dpk1NS9Gvv8KOHbB5Mxw+DDt3wl/+AkePwg8/BDrj2mH/frj1VjMf6KxZcOgQfPMN7N0Lv/udOff33x/oLKW2q/Q4RZZl+eUWvC1btjBo0KDS15MmTQJg3LhxzJ8/n/vvv59Tp05x1113lQ7euHLlSpcxig4ePEhYmLPe6927N++++y4PPfQQDz/8MG3atGHRokUao0hEKpgxw1w2S0w0l3LcXZbq0AFefhluvNE5IbqcnaeeMleUBg82l8ocUlLg9dehTx+YOxcefrh0cnmR6lfZ5iebzWbNmDHjLBqwQpsun4nUfidOWFa9euaqxsKFVT/O8uWWNWSIZTVsaFmRkZbVqpVl3XmnZR086D4+JcW8Z1qaZW3caFlDh1pW/fqWFRtrWX37Wtb//Z9r/OrVzkt47pbyl7Bycy3riScsq1s38/liYiyrSxfLmjXLsvLzXWMffdQco2NHyzp1qmKur73mvHR47JjJ2Vsu06ad+XyVlDgvSy5a5D6mfXuz/ZVXzny8kKDLZ35TI5fPRERquxUr4ORJaNwYrrqqaseYOhVGjIBPP4WYGOjcGTIz4aWXoEsX2LLF877Ll0P//uZSUZs2EBEB69fDkCGwZo0zLiHBtJw4xqHr1Mm8dixlW1LS06FHD5gyBbZtM9tatTKXAO+/Hy69FE6dcs3/4ovN9ilTXPPbvx8cvQ5eew0aNjSdn/v0gSZNzPq2bV1zadnyzOfs4EHTkR3MPu441n/11ZmPJ1Jlla20bDab9c9//vOsqrVQppYi/yEI/hfk6yK1w4QJ5j/vo0ZVbf+PPjL716ljOg07ZGVZ1hVXmG2tWllWXp7rfo6WoogIy5o507KKisz6ggLLuuEGs+2iiyq+35k6WhcXW1bv3ibm2mstKyPDue3QIcvq189smzzZdb99+ywrLs6ybDbLWrXKeSxH/J13Vnyvs+lovWqV2TcqyrQaufPYYyamXz/fjx+UyrQUWTk5gc6mVvPl99unuc/+3//7fz7Ne2az2fhBvRBFJESkp5vH1q2rtv8TT5jHCRPghhuc6+PjYcEC0z9m/35YuBBuuaXi/kOHurbORETAc8/B+++bFpLjx+Gccyqfz4oV5tb2Hj3grbfMIMoOLVrAokXQrp3pH/XPf5qWLTCtVM8+azqTjx8P27fDK6/A55+b+KefrnwOlXH8uHmsXx88jfbi+NyO2JAXGQmO39PIyMDmIqV8KopOnDjBiRMnaigVEZHAOnnSPFZl7KGcHNMxG+Ceeypuj42F226DmTNh5Ur3RdGf/1xxXaNG5nLX7t3w44/QrVvlc/rgA/M4frxrQeSQlGQKptWr4euvoW9f57bbb4ePPjKX9K64wny2OnVMcXd6yLpqk59vHr3VBo6xdMte6gtpERHmD0aCik9F0fTp03nkkUdqKhcRkYBy3MSam+v7vvv2QUmJ+fE+91z3MR07msc9e9xvb9PG/fomTUxRlJPjW07bt5vHl16Cd95xH+PIxdFKVtarr5o+UWvXmtfTp5siqro5BmUsKPAc45if29GaJVITfCqKRERqs+bNzWNamu/7OgqWxo09XwJydIB2tEiV56mFynHbv6+jomRlmccdO84c664FJjHRFHJr1pgcaqphw3Fp7MQJ8xndnT/HZTNfLh8GtaIi0xsfTE96d0154ne6+0xE5LTevc3jhg3mN8sXdeuax6NHPRcvP/9sHssMq1ajHDmtWuXtpnmzuCt45sxxFkQlJebyX00MV9e2rXm0283gmO78+KNrbMiz281tiiNGOJvBJOBUFImInDZsmCkkMjNN52ZfnHeeKR7sducPeHk7d5rHdu3OLk+HM01B2aGDeaxMS1F5e/aYW/bDwmDZMtP5fNUqmD27arl407KlGT0czMCZ7jjWa8xdqUkqikRETqtf39lJeuJEc6eYN198YVqVwBRTjpamF16oGHvqlOmjA+ZqSXVw9K/x1Pn4yivN4yuvODszV0ZREdx0E+Tlwd//DsOHw5tvmgLpgQdM/yZfc/HGZjOducGMf1Tehg2wa5fpm/zHP/p+fJHKqnRRVFJSok7WIlLrTZ9uBi/8+Wfz+NZbFQuKPXvMbfcDB5pWJYcHHjCPL77o2rH55EkYO9ZcWmvVCq69tnpydXTodnSELu+KK6BXL1NQjBxpOoOXZbeb2/bL3wn3r3/Bpk2mk7Vjysu+fWHyZFP03HhjxcuLjlzWravaJbb77jN3n61caab8cBzjwAFnfn/+s7NFSaRG+GHcpFpBgzf6D0EwGKOvi9QuJ09a1ujRzh43MTGW1amTmam9eXPn+hYtLGv7dtd9p0xxbk9Otqzu3c1AiGBZ55xjWZs2VXy/stN8uONpkMZ165zv1a6dZfXvb2I/+cQZc/iwmW3eEXfeeWYgyA4dzBQkYFmJic74r74yg09GRlpWaqrr+9ntZnoQsKxHHnHdtm+f83gpKWaQxQEDfBvM8Y03LCsszByjeXOTd0SEed2tWy0b41CDN/qNpvkQETkLdeuaPkXr1pmZ25OTzaW0bdvMr9jw4eYyz549ZoqNsmbONOP7XHaZuSPt22/NWEN33GH2r85b2vv1My1SPXuaW+rXrTOtRhkZzpikJDPG0IsvmilEfvkFtm41rVc9e5oJcFevNrF5eeayWVGRWd+li+v7RUaacYqiouDxx01rkkObNuZzDxhg7hRbv97kcqZLkGWNHWsGiBwxwrRIffedaYGaPt0cryrjR4n4wmZZNXEvQe2TnZ1NQkICWVlZxDsmHJIaYTubHpsBon9GIuKT3Fzn7YE5Oar4apAvv98aGEFERMTfIiOdt/Jpmo+goaJIRETE3yIiTG99CSrqUyQiIiKCWopERET8r7jY9CoH02M+PDyw+QigokhERMT/8vNh0CDzXB2tg4Yun4mIiIigokhEREQE0OUzCUIa88d/NCaUiIiTWopEREREUFEkIiIiAqgoEhEREQHUp0hERMT/IiJg1izncwkKKopERET8LTIS7rsv0FlIObp8JiIiIoJaikRERPyvuBi++cY8v/BCTfMRJFQUiYiI+Ft+PvTsaZ5rmo+goctnIiIiIqgoEhEREQFUFImIiIgAKopEREREABVFIiIiIoCKIhEREREgCIuidevWMXLkSJo1a4bNZuPDDz8s3VZYWMgDDzxA586diYuLo1mzZowdO5bDhw97Peb8+fOx2WwVlvz8/Br+NCIiIm5ERMC0aWbRNB9BI+jGKcrNzaVLly7cfPPNjB492mVbXl4e33zzDQ8//DBdunTh+PHjTJw4kT/+8Y9s2bLF63Hj4+PZvXu3y7ro6Ohqz19EROSMIiNh+vRAZyHlBF1RdPnll3P55Ze73ZaQkMCqVatc1r3wwgv07NmTgwcP0rJlS4/HtdlsNG3atNJ52O127HZ76evs7OxK7ysiIiKhJ+gun/kqKysLm81G/fr1vcbl5OSQkpJCixYtGDFiBFu3bvUaP3PmTBISEkqX5OTkasxaJDhYlhVySyhyd/k+2BepYSUlsHOnWUpKAp2NnBbSRVF+fj5Tpkzh+uuvJz4+3mNc+/btmT9/PsuWLWPhwoVER0fTp08f9u7d63GfqVOnkpWVVbocOnSoJj6CiIj8Fp06BZ06meXUqUBnI6cF3eWzyiosLOTaa6+lpKSEF1980Wtsr1696NWrV+nrPn36cOGFF/LCCy/w/PPPu90nKiqKqKioas1ZREREgldIFkWFhYWMGTOGtLQ0/ve//3ltJXInLCyMHj16eG0pEhERkd+WkLt85iiI9u7dy2effUbDhg19PoZlWaSmppKUlFQDGYqIiEgoCrqWopycHPbt21f6Oi0tjdTUVBo0aECzZs246qqr+Oabb1i+fDnFxcVkZGQA0KBBAyIjIwEYO3YszZs3Z+bMmQDMmDGDXr160bZtW7Kzs3n++edJTU1lzpw5/v+AIiIiEpSCrijasmULgwYNKn09adIkAMaNG8f06dNZtmwZABdccIHLfqtXr2bgwIEAHDx4kLAwZyPYiRMnuP3228nIyCAhIYGuXbuybt06evbsWbMfRkREREKGzQrVe1z9LDs7m4SEBLKysnzuwyQiv22heIu7fhpqWG4u1K1rnufkQFxcYPOpxXz5/Q66liIREZFaLyICJk92PpegoKJIRETE3yIj4amnAp2FlBNyd5+JiIiI1AS1FImIiPhbSQkcPGiet2wJYWqjCAYqikRERPzt1Clo3do8V0froKHSVERERAQVRSIiIiKAiiIRERERQEWRiIiICKCiSERERARQUSQiIiIC6JZ8EZEap3nE/COU5piLBJ4FJtx1F9TRT3GwUEuRiIiInxUAdwPMmQNRUQHORhxUFImIiIigokhERCQgGgEcPQq6vBo0VBSJiIj4WSxwFKBJE8jLC3A24qCiSERERAQVRSIiIiKAiiIRERERQEWRiIiICKCiSERERARQUSQiIiICqCgSERHxuyJgPsC4cZrmI4ioKBIREfGzAuBmgPnzNc1HEFFRJCIiIoKKIhERkYCIBcjN1TQfQURFkYiIiJ/FArkAdetqmo8got5dvsrNhfDwiuvDwyE62jXOk7AwiImpWmxenuf/VdhsEBtbtdhTp6CkxHMecXFVi83Ph+Li6omNjTV5A9jtUFRUPbExMeY8AxQUQGFh9cRGRzv/rvgSW1ho4j2JinJ2zPQltqjInAtPIiMhIsL32OJi82fnSUSEifc1tqTE/F2rjtg6dZz9NizL+4+QL7G+/LvXd4T72Or8jigjEu8/cGX/VM8UewpwnKWI08vZxsaWX6HvCN9jffmOqCxLKiUrK8sCrCzzFVJxGTbMdYfYWPdxYFkDBrjGNmrkObZ7d9fYlBTPsR06uMZ26OA5NiXFNbZ7d8+xjRq5xg4Y4Dk2NtY1dtgwz7Hl//pddZX32JwcZ+y4cd5jMzOdsXfd5T02Lc0ZO3my99gdO5yx06Z5j920yRk7a5b32NWrnbGzZ3uPXb7cGTtvnvfY995zxr73nvfYefOcscuXe4+dPdsZu3q199hZs5yxmzZ5j502zRm7Y4f32MmTnbFpad5j77rLGZuZ6T123DhnbE6O99irrrJceIvVd4RZavA7IhYsTi/zvB0TrEZlYmefITalTOysM8R2KBM77QyxpUtOjr4jHGrgO6L09zsryzoTXT4TEZFaITcnB8uysCyL8ePGeY09mplZGjvhrru8xu5PSyuNvW/yZK+xO3fsKI2dPm2az58hVAwfMcLr9gl3343NZsNmszFw0CCvsffdf39pbI+ePb3GTp8xozS2Y6dOXmOfevppbDYbCQkJXuPKslmWZVU6+jcsOzubhIQEsg4fJj4+vmKAmsbdx+ryme+xaho3z3X5rGqx+o4wz4P9OyI3FxITzfOcHPP3OYS+I+rYbHgbSKAAMxYTmM7L0V5iC08vvsbagJhKxgJkZWW5//0uQ0VRJZUWRZU4qSIiIl7l5ppO1mCKorLFXwiwOQrKEFKZ329dPhMRERFBd5+JiIj4X3g4XHWV87kEBRVFIiIi/hYdDf/5T6CzkHKC7vLZunXrGDlyJM2aNcNms/Hhhx+6bB8/fnxpz3PH0qtXrzMed/HixXTo0IGoqCg6dOjAkiVLaugTiIiISCgKuqIoNzeXLl26MHv2bI8xQ4cO5ciRI6XLxx9/7PWYGzdu5JprruGmm25i27Zt3HTTTYwZM4avvvqqutMXERGREBXUd5/ZbDaWLFnCqFGjSteNHz+eEydOVGhB8uaaa64hOzubTz75pHTd0KFDOeecc1i4cGGljqG7z0REpNro7jO/q7V3n61Zs4YmTZrQrl07brvtNjIzM73Gb9y4kcGDB7usGzJkCBs2bPC4j91uJzs722URERGR2ivkiqLLL7+ct99+m//9738888wzbN68mT/84Q/YvQw4lZGRQaJjkKzTEhMTycjI8LjPzJkzSUhIKF2Sk5Or7TOIiIhI8Am5u8+uueaa0uedOnWie/fupKSksGLFCq688kqP+5Vv6rMsy2vz39SpU5k0aVLp6+zsbBVGIiIitVjIFUXlJSUlkZKSwt69ez3GNG3atEKrUGZmZoXWo7KioqKIivI2iLmI1HrpX8DON+CndZB7BIrzIaYRNOkKbUbB+ddBRAD7guyYD9n7oeN4SGgVuDwA9n4IR1PhvFHQ5ALf98/NgP0rIWOTWY5ug+IC6HQrDHm1enMV8SDkLp+V98svv3Do0CGSkpI8xlx88cWsWrXKZd3KlSvp3bt3TacnIqGoMA8+ugbe7Qvb/w0nD0C9ZGj0ezNX2I8rYNVt8FpbOLo9cHnunA8bZ5jCKNB++NDkkplatf13vQv/HQepcyBjsymIRPws6FqKcnJy2LdvX+nrtLQ0UlNTadCgAQ0aNGD69OmMHj2apKQk9u/fz4MPPkijRo244oorSvcZO3YszZs3Z+bMmQDce++99O/fnyeffJI//elPLF26lM8++4z169f7/fOJSJArLoT3B8PhLyCuKfR7EtpdDRFlpp785Tv45nnY8Rqc+AEadw5cvrVFZDykXAZNe5rl4Gew9YVAZyW/MUFXFG3ZsoVBgwaVvnb06xk3bhwvvfQS27dv58033+TEiRMkJSUxaNAgFi1aRL169Ur3OXjwIGFhzkaw3r178+677/LQQw/x8MMP06ZNGxYtWsRFF13kvw8mIqFh4wxTEMUmwnUb3V+WatgBLnsZzr8RbCHf4B4cOt9iFofMbwKXiz+Eh8OwYc7nEhSCepyiYKJxikR+A+xZMDcZCk7C8IXQ/tqqHefHFaaVI2MLFJ6EuGbQ+nLoORXi3dyw8e9WkH0A/pxm+tZsnAFHvjSXkBIvhN4zoOUfnPGH1sB7gyoex2HIPOg03vm6MM/ks+c/cHwPlBTBOe3g/Bug61+hTpn+k1/+C754GBp2hBu3QJ1o12Nvfx1W3gpxSTBuuzlXr7b2nMvF06D3dM/bPdkw3ZwH9SkKShqnSESktvtxhfmRj2kM7a6q2jE+nwpLRsD+T6FODDTqDHmZsO0leKuLKZQ8vv9yWNTf9Kmp3wbCIyB9PSweYgohh8gEaNbHXHICaNTJvHYscWVuIjmZDm/3gM+nmM7LsYkQ3wp+2Qnr7of3L4XCU874nlMh6WKz/fMprvll7Yc1E83zIa9BTENTNDXrA7FNzPpz2rrmUq9llU6jSCAE3eUzEZGAOXx6QNfmfSCsCl+PPyyHTU+YfYfONy0xAPZs+O942LcEProaxn/n2kfJYc0k6P1P6HEfhIWb/k2f3gzfv20KlOu/NHGJXeG69bBoIPy0Fv7wAiQPrHg8qwSWjzF9oH53LQx6zlkwnfwJVlwP6Z/DhkdgwFNmfVg4DHsL3uxi+k2dOwJSLjXH+mSsKRq73GlavsD0u7puvfl8O9+Ang+6tlKJhBC1FNVy5SfPDYVFJGBy0s1jgpfLQd5sesI8XjDBWRABRMXDsAXmdv7s/bDLw/RCrYbCRVNMYQKmpWjgcxAeBUe+gvzjvuXz4wpT6DXtYQqdsi1I9VrAiEUQURe2vezaWlS/DQx8FrBMsZN/HDbNMgXUOe1gwNO+5SEV5eaaqT3i4szzEGNZVsgsWVlZlf5cKopERBwKTprHqow9VJADRzaa513vqbg9IhY632aeH1jp/hid/1xxXWwjc7kLIOtH33La+4F57DjefctX3SRTMBXmwM9fu277/e2mlSgnHZZeARunmWMMW2A+i5y9vDyzSNDQ5TMREYfI03exFlbhf+4n9plLTOFRkHCu+5hGHc3j8T3ut9dv4359bBM4vtsUXr44dnoMpW0vwffvuI9x5OJoJStr8KvwRmdziQ7g4ummiBKppVQUiYg41G1uHrPSfN/XUbDENAZPl4FjT1++crRIleephar0tn8fbxa2n75scGzHmWOLTlVcF5doCrlDa0wO6isktZyKIhERh2a9zYjKhzeY29Z96WwdWdc8njpqRr12Vxjl/Xw6tl7FbTUh4nROV60ynaV9tXWOsyCySmDlbTD6U89Fn0iIU58iERGH1sNMIZGXCXve923f+ueZ4qHY7rnvz7Gd5vGcdmeXp8OZipOGHU6/byVaisr7dY+5Zd8WBqOWmc7nB1bB1tmekvH9PUSCjIoiERGH6PrOTtKrJ5pxebxJ/wLST9/GH1nXtDSB++kpCk/B9tODELYaUg3JYsZBAveXvgDaXmkev30FivIrf9ySIvjkJijKg25/h3OHw9A3TYH0+QPw627fcxEJASqKRETK6j3dDF6Y9zMsvBi+e6tiQfHrHvhsArw30LQqOfR4wDymvujasbngJPx3rLm0Ft/KjBlUHRwdug+tdb+97RWQ1At+3QVLRsLxfa7bi+zmtv3/3uK6/st/mZnqG3WGPo+adS36QvfJpuj5+EZTOLnL5ad15vKheBcWBgMGmCVMP8XBQtN8VFKoTvMRiuP+6K+kBFxBjhmfZ+9i87pOjLkzrE4M5Bx23qlVtwWM/sSMKO3w+VTneEX1kk3n6l+/N3e0RZ9j+uSUv4Or7DQf7uZacwzSOGa16yCNP31uRsAGc0kurilgg55ToPVQsz7nCCwZDplbzev655mRqAtOmjvmigtMjndmmO1HNsG7fUyr0PWboEkX5/sVF8DbPc3I2L0egT4znNtO/ADzO5iY+BQzkrUtzAwHUJkO2tmH4K2uztdFeaYAC49y9o0CGLXUDK4pUkm+/H6rPBURKS+yLvzxfbhmnZl7q16yuZR2dBtgmctJg1+DW/a4FkQA/WbCqI/MjO+FOXDsWzNoY5c74KZt1XtLe4t+MOwdM6t8TrpppflprZk/zaFukpnY9pIXoUV/yP/FFEgFJ81+vWeYYgvMHGmf3GRagS6e4VoQAYRHmnGKwqNg0+OmgHKo38Z87hYDzGCP6etNLtn7K/dZrGKTm2NxXIYrtruuLyms8ukSORO1FFWSWor8R38lRUSkuqilSEREJJjl5kLjxmYJwWk+aiuNUyQiIhIIx44FOgMpRy1FIiIiIqgoEhEREQFUFImIiIgAKopEREREABVFIiIiIoDuPqv1NOaPeKNxrEQCJCwMund3PpegoKJIRETE32JiYPPmQGch5ag8FREREUFFkYiIiAigokhERMT/8vKgVSuz5OUFOhs5TX2KRERE/M2y4MAB53MJCmopEhEREUFFkYiIiAigokhEREQEUFEkIiIiAqgoEhEREQF095mIiIj/2WzQoYPzuQQFFUUiIiL+FhsLO3cGOgspR5fPRERERFBRJCIiIgIEYVG0bt06Ro4cSbNmzbDZbHz44Ycu2202m9vlqaee8njM+fPnu90nPz+/hj+NiIiIG3l50LGjWTTNR9AIuj5Fubm5dOnShZtvvpnRo0dX2H7kyBGX15988gm33nqr29iy4uPj2b17t8u66Ojos09YRETEV5YF333nfC5BIeiKossvv5zLL7/c4/amTZu6vF66dCmDBg3i3HPP9Xpcm81WYV8RERERh6C7fOaLn3/+mRUrVnDrrbeeMTYnJ4eUlBRatGjBiBEj2Lp1q9d4u91Odna2yyIiIiK1V0gXRW+88Qb16tXjyiuv9BrXvn175s+fz7Jly1i4cCHR0dH06dOHvXv3etxn5syZJCQklC7JycnVnb5IwFmWFXKLp36FwbyISGiwWVbwXsy02WwsWbKEUaNGud3evn17LrvsMl544QWfjltSUsKFF15I//79ef75593G2O127HZ76evs7GySk5PJysoiPj7ep/cTkeoTikVGEH/NSqDk5kLduuZ5Tg7ExQU2n1osOzubhISESv1+B12fosr6/PPP2b17N4sWLfJ537CwMHr06OG1pSgqKoqoqKizSVFERERCSMgWRa+99hrdunWjS5cuPu9rWRapqal07ty5BjITERE5A5sNUlKczyUoBF1RlJOTw759+0pfp6WlkZqaSoMGDWjZsiVgmsL+85//8Mwzz7g9xtixY2nevDkzZ84EYMaMGfTq1Yu2bduSnZ3N888/T2pqKnPmzKn5DyQiIlJebCzs3x/oLKScoCuKtmzZwqBBg0pfT5o0CYBx48Yxf/58AN59910sy+K6665ze4yDBw8SFubsQ37ixAluv/12MjIySEhIoGvXrqxbt46ePXvW3AcRERGRkBLUHa2DiS8dtUSk5qijtYj4wpff75C+JV9ERCQknToFPXqY5dSpQGcjpwXd5TMREZFar6QEtmxxPpegoJYiEREREVQUiYiIiAAqikREREQAFUUiIiIigIoiEREREUB3n4mIiARGo0aBzkDKUVEkIiLib3FxcPRooLOQcnT5TERERAQVRSIiIiKALp+JSIjRPGLiSSjNixcNfAIMHDAAPvkEYmICnZKgliIRERG/CwMGAqxdq2k+goiKIhERERFUFImIiIgAKopEREREABVFIiIiIoCKIhERERFARZGIiEhA5ALExgY6DSlD4xSJiIj4WR5QF7BycwOdipShliIRERERVBSJiIiIACqKRERE/C4KWA4wfDjk5wc4G3FQUSQiIuJn4cBwgI8/huLiAGcjDiqKRERERFBRJCIiIgKoKBIREREBVBSJiIiIACqKRERERACNaF1plmUBkJ2dHeBMREQk1FlA6a9JdrbuQKtBjt9tx++4NyqKKunkyZMAJCcnBzgTEREJdaeABMeLZs0CmMlvx8mTJ0lISPAaY7MqUzoJJSUlHD58mHr16mGz2ar12NnZ2SQnJ3Po0CHi4+Or9djipPPsHzrP/qHz7D861/5RU+fZsixOnjxJs2bNCAvz3mtILUWVFBYWRosWLWr0PeLj4/UPzg90nv1D59k/dJ79R+faP2riPJ+phchBHa1FREREUFEkIiIiAqgoCgpRUVFMmzaNqKioQKdSq+k8+4fOs3/oPPuPzrV/BMN5VkdrEREREdRSJCIiIgKoKBIREREBVBSJiIiIACqKRERERAAVRQH34osv0rp1a6Kjo+nWrRuff/55oFOqdWbOnEmPHj2oV68eTZo0YdSoUezevTvQadVqM2fOxGazMXHixECnUiulp6dz44030rBhQ2JjY7ngggv4+uuvA51WrVJUVMRDDz1E69atiYmJ4dxzz+Wf//wnJSUlgU4tpK1bt46RI0fSrFkzbDYbH374oct2y7KYPn06zZo1IyYmhoEDB7Jz506/5aeiKIAWLVrExIkT+cc//sHWrVvp168fl19+OQcPHgx0arXK2rVrmTBhAl9++SWrVq2iqKiIwYMHk5ubG+jUaqXNmzczd+5cfv/73wc6lVrp+PHj9OnTh4iICD755BO+++47nnnmGerXrx/o1GqVJ598kpdffpnZs2fz/fffM2vWLJ566ileeOGFQKcW0nJzc+nSpQuzZ892u33WrFk8++yzzJ49m82bN9O0aVMuu+yy0vlHa5wlAdOzZ0/rjjvucFnXvn17a8qUKQHK6LchMzPTAqy1a9cGOpVa5+TJk1bbtm2tVatWWQMGDLDuvffeQKdU6zzwwANW3759A51GrTd8+HDrlltucVl35ZVXWjfeeGOAMqp9AGvJkiWlr0tKSqymTZtaTzzxROm6/Px8KyEhwXr55Zf9kpNaigKkoKCAr7/+msGDB7usHzx4MBs2bAhQVr8NWVlZADRo0CDAmdQ+EyZMYPjw4Vx66aWBTqXWWrZsGd27d+fqq6+mSZMmdO3alX//+9+BTqvW6du3L//3f//Hnj17ANi2bRvr169n2LBhAc6s9kpLSyMjI8PldzEqKooBAwb47XdRE8IGyLFjxyguLiYxMdFlfWJiIhkZGQHKqvazLItJkybRt29fOnXqFOh0apV3332Xb775hs2bNwc6lVrtxx9/5KWXXmLSpEk8+OCDbNq0ib/+9a9ERUUxduzYQKdXazzwwANkZWXRvn17wsPDKS4u5rHHHuO6664LdGq1luO3z93v4oEDB/ySg4qiALPZbC6vLcuqsE6qz9133823337L+vXrA51KrXLo0CHuvfdeVq5cSXR0dKDTqdVKSkro3r07jz/+OABdu3Zl586dvPTSSyqKqtGiRYtYsGAB77zzDh07diQ1NZWJEyfSrFkzxo0bF+j0arVA/i6qKAqQRo0aER4eXqFVKDMzs0KVLNXjnnvuYdmyZaxbt44WLVoEOp1a5euvvyYzM5Nu3bqVrisuLmbdunXMnj0bu91OeHh4ADOsPZKSkujQoYPLuvPPP5/FixcHKKPa6b777mPKlClce+21AHTu3JkDBw4wc+ZMFUU1pGnTpoBpMUpKSipd78/fRfUpCpDIyEi6devGqlWrXNavWrWK3r17Byir2smyLO6++24++OAD/ve//9G6detAp1TrXHLJJWzfvp3U1NTSpXv37txwww2kpqaqIKpGffr0qTCkxJ49e0hJSQlQRrVTXl4eYWGuP5Hh4eG6Jb8GtW7dmqZNm7r8LhYUFLB27Vq//S6qpSiAJk2axE033UT37t25+OKLmTt3LgcPHuSOO+4IdGq1yoQJE3jnnXdYunQp9erVK22dS0hIICYmJsDZ1Q716tWr0EcrLi6Ohg0bqu9WNfvb3/5G7969efzxxxkzZgybNm1i7ty5zJ07N9Cp1SojR47kscceo2XLlnTs2JGtW7fy7LPPcssttwQ6tZCWk5PDvn37Sl+npaWRmppKgwYNaNmyJRMnTuTxxx+nbdu2tG3blscff5zY2Fiuv/56/yTol3vcxKM5c+ZYKSkpVmRkpHXhhRfqNvEaALhd5s2bF+jUajXdkl9zPvroI6tTp05WVFSU1b59e2vu3LmBTqnWyc7Otu69916rZcuWVnR0tHXuueda//jHPyy73R7o1ELa6tWr3X4fjxs3zrIsc1v+tGnTrKZNm1pRUVFW//79re3bt/stP5tlWZZ/yi8RERGR4KU+RSIiIiKoKBIREREBVBSJiIiIACqKRERERAAVRSIiIiKAiiIRERERQEWRiIiICKCiSERERARQUSQiIiICqCgSkRCSl5fH448/zoUXXkjdunWJjo6mRYsW9OvXj6lTp/LDDz+UxrZq1YpWrVpVy/vOnz8fm83G/Pnzq+V4IhKcNCGsiISEkydP0rdvX7799lvOO+88brzxRurXr8+hQ4fYuXMnTzzxBG3atKFNmzaBTlVEQpSKIhEJCc899xzffvstt956K//+97+x2Wwu29PS0rDb7QHKTkRqA10+E5GQsHHjRgDuvvvuCgURQOvWrWnfvj379+/HZrNx4MABDhw4gM1mK12mT58OQEFBAS+88AJDhgwhOTmZqKgomjRpwpVXXsnWrVtdjjt+/HhuvvlmAG6++WaX45V18uRJpk2bRseOHYmJiaF+/foMHTqU9evX18DZEJGaoJYiEQkJDRo0AGDfvn1ccMEFHuPq16/PtGnTeO655wCYOHFi6baBAwcC8OuvvzJx4kT69evHsGHDOOecc/jxxx9ZtmwZn3zyCevWraNHjx4AjBo1ihMnTrB06VL+9Kc/uX3vX3/9lf79+7Nz50769evHkCFDyMrKYunSpQwaNIj//Oc/jBo1qhrOgojUJJtlWVagkxAROZOlS5cyatQo4uPjufPOOxk8eDBdu3blnHPOcRvv6GS9f//+CtvsdjvHjh2jefPmLut37txJr1696NWrF6tWrSpdP3/+fG6++WbmzZvH+PHjKxzvhhtu4J133uH1118vbVUC+Pnnn+nRowf5+fkcPHiQ6Oho3z+4iPiNLp+JSEj405/+xKxZsygpKeHJJ5/kkksuoUGDBpx33nncfffd7N27t9LHioqKqlAQAXTs2JFBgwaxbt06CgsLK3WsY8eOsWjRIi655BKXggggMTGR++67j6NHj/LZZ59VOj8RCQxdPhORkHHfffdxxx138N///pcNGzawZcsWvvrqK+bMmcNrr73GokWL+OMf/1ipY6WmpjJr1izWr19PRkZGhSLo2LFjJCUlnfE4mzdvpri4mPz8/NI+S2U5irVdu3YxYsSISuUmIoGhokhEQkq9evW4+uqrufrqqwHIysriwQcf5MUXX+TWW28lPT2dyMhIr8fYsGEDf/jDHwAYPHgwbdu2pW7duthsNj788EO2bdtW6TvZfv31VwC++OILvvjiC49xubm5lTqeiASOiiIRCWkJCQnMnj2bFStWcODAAbZv3063bt287vPYY49ht9tZv349ffr0cdn25Zdfsm3btkq/f3x8PAB///vfefrpp33/ACISNNSnSERCns1mIzY21mVdeHg4xcXFbuN/+OEHGjRoUKEgysvL45tvvqkQHx4eDuD2eD169MBms5UOGSAioUtFkYiEhFdeeYXNmze73fbBBx+wa9cu6tevT6dOnQBzC/+xY8fIz8+vEJ+SksLx48fZuXNn6bri4mImT57M0aNHK8Q7hgP46aefKmxr2rQpY8aMYcOGDTz11FO4u6H3q6++Ii8vr3IfVEQCRrfki0hIGDVqFEuXLuW8886jT58+NGvWjJycHFJTU/n8888JCwtjwYIFXHfddQA88MADzJo1i0svvZR+/foRGRlJ37596du3L8uXL2fkyJHUr1+fMWPGEB0dzZo1a0hPT6dz586sWbOGtLS00tv6f/31V1q0aEFUVBS33HILjRs3BmDKlCml2y+55BJSU1Pp3LkzF198MQkJCRw6dIivv/6avXv3cuTIEZo2bRqQcycilWSJiISAXbt2WbNmzbIuu+wyq3Xr1lZ0dLQVHR1ttWnTxho3bpy1ZcsWl/iTJ09at912m5WUlGSFhYVZgDVt2rTS7e+//7514YUXWrGxsVajRo2sMWPGWD/88IM1btw4C7DS0tJcjrdixQqrR48eVkxMjAVY5b8+8/LyrFmzZlndunWz4uLirJiYGKt169bWqFGjrDfffNMqLCysqVMjItVELUUiIiIiqE+RiIiICKCiSERERARQUSQiIiICqCgSERERAVQUiYiIiAAqikREREQAFUUiIiIigIoiEREREUBFkYiIiAigokhEREQEUFEkIiIiAqgoEhEREQHg/wfqaO+Mf+qPVAAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "from random import randint\n", + "from datasets import CSWDataset\n", + "\n", + "probs = [1, 1, 1] # Probability of the context appropriate transition for the 2nd-4th state. Deterministic for now.\n", + "contexts_to_load = [0,1,0,1,0] # indices of the contexts for each trial (5 states per trial)\n", + "n_samples_per_context = [1,1,1,1,1] # number of times to visit each context before transitioning\n", + "ds = CSWDataset(n_samples_per_context, contexts_to_load, probs=probs)\n", + "\n", + "# Plot some example data from the CSW task.\n", + "plt.imshow(ds.xs[:20], cmap='Greys', aspect='auto')\n", + "for i, x in enumerate(np.linspace(0, 15, 4)):\n", + " plt.axhline(x-0.5, color='red', linestyle='--')\n", + " if i%2 == 0:\n", + " plt.text(5, x+1, 'Context 0', fontsize=16, color='blue')\n", + " else:\n", + " plt.text(5, x+1, 'Context 1', fontsize=16, color='darkorange')\n", + "plt.axvline(8.5, color='red', linestyle='--')\n", + "plt.xlabel('State', fontsize=14)\n", + "plt.ylabel('Time', fontsize=14)" + ] + }, + { + "cell_type": "code", + "execution_count": 98, + "metadata": {}, + "outputs": [], + "source": [ + "from run import gen_data_loader, gen_model\n", + "\n", + "def calc_prob(em_preds, test_ys):\n", + " '''Calculate the probability of the EM model predicting the correct state through EM retrieval.\n", + " '''\n", + " # Only consider the terminal three states (they are the only predictable transitions).\n", + " em_preds_new, test_ys_new = em_preds[:, 2:-1, :], test_ys[:, 2:-1, :]\n", + " em_probability = (em_preds_new*test_ys_new).sum(-1).mean(-1)\n", + " trial_probs = (em_preds*test_ys)\n", + " return em_probability, trial_probs\n", + "\n", + "def run_participant(params, training_paradigm):\n", + " performance_data = {'seed':[], 'paradigm':[], 'trial':[], 'probability':[]}\n", + " loss_fn = nn.BCELoss()\n", + " data_loader = gen_data_loader(training_paradigm, params['probs'])\n", + " context_module, em_module = gen_model(params)\n", + " optimizer = torch.optim.SGD(lr=params.episodic_lr, params=context_module.parameters())\n", + " em_preds = []\n", + " utils.set_random_seed(params.seed)\n", + " for trial, (x,_,y) in enumerate(data_loader):\n", + " for _ in range(params['n_optimization_steps']):\n", + " context = context_module(x)\n", + " if trial > 0:\n", + " optimizer.zero_grad()\n", + " pred_em = em_module(x,context)\n", + " loss = loss_fn(pred_em,y)\n", + " loss.backward()\n", + " optimizer.step()\n", + " else:\n", + " pred_em = torch.zeros([1,params.output_d]).float()\n", + " with torch.no_grad():\n", + " em_module.write(x,context,y)\n", + " em_preds.append(pred_em.cpu().detach().numpy())\n", + "\n", + " # Collect some metrics from the training run for analysis.\n", + " em_preds = np.stack(em_preds).squeeze()\n", + " em_preds = np.vstack([em_preds, np.zeros([1,11])]).reshape(-1,5,11)\n", + " test_ys = np.vstack([data_loader.dataset.ys.cpu().numpy(), np.zeros([1,11])]).reshape(-1,5,11)\n", + " correct_prob, _ = calc_prob(em_preds, test_ys)\n", + " performance_data['probability'].extend(correct_prob)\n", + " performance_data['seed'].extend([params.seed]*len(correct_prob))\n", + " performance_data['paradigm'].extend([training_paradigm]*len(correct_prob))\n", + " performance_data['trial'].extend(list(range(len(correct_prob))))\n", + " return pd.DataFrame(performance_data)" + ] + }, + { + "cell_type": "code", + "execution_count": 100, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 100, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjcAAAHFCAYAAAAOmtghAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8fJSN1AAAACXBIWXMAAA9hAAAPYQGoP6dpAACWqUlEQVR4nOzdd3hT1f8H8PfNzezeEygbCsgqQ6hQ9t74AxHZqICKgAgCKkO2fhEXQ2U4QAG3iAxlCyqjUFlltLRQWkpbOrNzz++P0kDoIClNb5p8Xs/TB3Jzkrw7knxyzrnncIwxBkIIIYQQJyEROwAhhBBCSEWi4oYQQgghToWKG0IIIYQ4FSpuCCGEEOJUqLghhBBCiFOh4oYQQgghToWKG0IIIYQ4FSpuCCGEEOJUqLghhBBCiFOh4oa4rLi4OIwbNw61atWCUqmEh4cHWrZsiZUrVyIrK8vczmAwYP369WjdujX8/Pzg5uaGiIgIDBw4ED/++CMA4M6dO5BIJJg8eXKxx3n11VfBcRzmzJlT7LoJEyaA53ncvXu3xIwcx1n1dfDgwcf+eajVaixYsMDq+7p+/bpFBolEAn9/f/Tp0wfHjx8v1u6999577IyV6eGfsbe3Nzp16oTffvutwh9r27ZtaNy4MVQqFTiOw5kzZyr8MQhxJVKxAxAihs8++wxTpkxBgwYN8Prrr6NRo0YwGAw4efIk1q1bh+PHj5sLl1GjRuGHH37AtGnTsHDhQigUCiQkJGD37t3Ys2cPBg8ejMDAQDRu3BgHDhwo9lgHDx6Eu7t7qdc1b94cvr6+JeZ8sEgAgHfeeQcHDhzA/v37LY43atSovD8KM7VajYULFwIAOnXqZPXtXnnlFTz77LMwmUw4f/48Fi5ciM6dO+P48eNo0aLFY+cS09NPP43XXnsNgiAgISEBixcvRv/+/fHrr7+ib9++FfIYd+7cwahRo9CrVy+sWbMGCoUC9evXr5D7JsRlMUJczLFjxxjP86xXr15Mq9UWu16n07Gff/6ZMcZYQkICA8DefvvtEu/LZDKZ///KK68wACw1NdV8LDMzk3Ecx2bOnMmkUinLzc01X3fjxg0GgL322mtWZx8zZgxzd3e3ur0t7ty5wwCw+fPnW9U+MTGRAWDvvvuuxfE///yTAWATJ04ss52jA8Beeukli2NXr15lAFi3bt0e+/7VajUTBIEdPXqUAWDbtm177PssUlBQUGH3RUhVRMNSxOUsXboUHMfh008/hUKhKHa9XC7HgAEDAACZmZkAgNDQ0BLvSyK5/xTq3LkzAFgM6xw6dAhSqRQzZ84EABw5csR8XVFPTtHtykuv12Px4sVo2LAhFAoFAgMDMW7cONy5c8ei3f79+9GpUyf4+/tDpVKhRo0aGDp0KNRqNa5fv47AwEAAwMKFC81DMWPHjrU5z5NPPgkASEpKKnbdqlWrUKtWLXh4eKBdu3b4+++/La4/efIknnnmGdSsWRMqlQo1a9bEiBEjit2XWq3GzJkzzUOKfn5+aNWqFb755pti9zdgwAD4+flBqVSiRYsW2L59u83fU5E6deogMDDQIo81j7F582ZwHIe9e/di/PjxCAwMhJubG0aMGIGnnnoKADB8+HBwHGfRa/bLL7+gXbt2cHNzg6enJ7p3716sN2/BggXgOA6nT5/G008/DV9fX9SpUwcAULNmTfTr1w87d+5EixYtoFKpEBkZiZ07d5pzRUZGwt3dHW3atMHJkyeL/fys+X0UfX8HDhzA5MmTERAQAH9/fwwZMgS3bt0q9nPcunUr2rVrBw8PD3h4eKB58+bYsGGDRZs//vgDXbt2hZeXF9zc3BAdHY0///zTml8TITTnhrgWk8mE/fv3IyoqCtWrV39k+8jISPj4+GDhwoX49NNPcf369VLbxsTEQCKRWAw/HThwAK1atUJwcDCioqIsCp8DBw6A53l06NCh3N+PIAgYOHAgli9fjmeffRa//fYbli9fjn379qFTp07QaDQACue99O3bF3K5HBs3bsTu3buxfPlyuLu7Q6/XIzQ0FLt37wZQOA/o+PHjOH78ON566y2bM129ehUAzMVSkU8++QT79u3D6tWrsWXLFhQUFKBPnz7Iyckxt7l+/ToaNGiA1atXY8+ePVixYgVSU1PRunVrZGRkmNvNmDEDa9euxdSpU7F792589dVX+L//+z9zMQoU/nyjo6ORnZ2NdevW4eeff0bz5s0xfPhwbN682ebvCwDu3r2LzMxM8/dm62OMHz8eMpkMX331Fb777jssWbIEn3zyCYDCovv48eNYs2YNgMICYODAgfDy8sI333yDDRs24O7du+jUqROOHj1a7L6HDBmCunXrYseOHVi3bp35+NmzZzFnzhzMnj0bP/zwA7y9vTFkyBDMnz8fn3/+OZYuXYotW7YgJycH/fr1M//N2PL7KDJx4kTIZDJs3boVK1euxMGDB/Hcc89ZtHn77bcxcuRIhIWFYfPmzfjxxx8xZswYi4Lp66+/Ro8ePeDl5YUvvvgC27dvh5+fH3r27EkFDrGO2F1HhFSmtLQ0BoA988wzVt/mt99+YwEBAQwAA8D8/f3Z//3f/7FffvmlWNvmzZuz+vXrmy8/8cQT7I033mCMMTZr1izWqlUr83W1atVibdq0sSn/w8NS33zzDQPAvv/+e4t2J06cYADYmjVrGGOMfffddwwAO3PmTKn3Xd5hqRUrVjCDwcC0Wi07deoUa926NQPAfvvtN4t2TzzxBDMajebb//vvvwwA++abb0p9DKPRyPLz85m7uzv74IMPzMebNGnCBg0aVGa+hg0bshYtWjCDwWBxvF+/fiw0NNRiSLEkANiUKVOYwWBger2eXbx4kfXu3ZsBYJ988olNj7Fp0yYGgI0ePbrY4xw4cIABYDt27DAfM5lMLCwsjD3xxBMWOfPy8lhQUBBr3769+dj8+fNLHTqNiIhgKpWK3bx503zszJkzDAALDQ21GL766aefGIAS/66LlPb7KPr+pkyZYtF+5cqVFkO1CQkJjOd5NnLkyFIfo6CggPn5+bH+/ftbHDeZTKxZs2Y2P2eIa6KeG0IeoU+fPkhOTsaPP/6ImTNnonHjxvjpp58wYMAAvPzyyxZtO3fujMuXL+PWrVvIzMzEuXPnzMMMMTExiI2NRU5ODpKTk5GYmPjYQ1I7d+6Ej48P+vfvD6PRaP5q3rw5QkJCzD1FzZs3h1wuxwsvvIAvvvgCCQkJj/W4D5o9ezZkMhmUSiWioqKQnJyM9evXo0+fPhbt+vbtC57nzZebNm0KwHL4Kj8/H7Nnz0bdunUhlUohlUrh4eGBgoICXLx40dyuTZs2+P333/HGG2/g4MGDFr0NQGHv0aVLlzBy5EgAsPjZ9OnTB6mpqYiPj3/k97ZmzRrIZDLI5XJERkbi2LFjWLRoEaZMmVKuxxg6dKg1P1LEx8fj1q1bGDVqlMXQp4eHB4YOHYq///4barXaqvtu3rw5wsPDzZcjIyMBFE4ad3NzK3a8PL+PIkXDuUUe/h3v27cPJpMJL730Uqnf+7Fjx5CVlYUxY8ZY/EwFQUCvXr1w4sQJFBQUlHp7QgA6W4q4mICAALi5uSExMdGm26lUKgwaNAiDBg0CACQnJ6N379745JNPMHnyZDRu3BhAYXHz/vvv4+DBg1AoFOB5HtHR0QBgnltx5MgR8/DJ4xY3t2/fRnZ2NuRyeYnXFw0d1KlTB3/88QdWrlyJl156CQUFBahduzamTp2KV1999bEyvPrqq3juuecgkUjg4+ODWrVqgeO4Yu38/f0tLhfNd3qwMHn22Wfx559/4q233kLr1q3h5eUFjuPQp08fi3YffvghqlWrhm3btmHFihVQKpXo2bMn3n33XdSrVw+3b98GAMycOdM836m0n01Zhg0bhtdffx0cx8HT0xN16tQxF2jleYzS5m49rKy5XmFhYRAEAXfv3rUoTkq7bz8/P4vLRX8rpR3XarXmY9b+Poo86ndcNA+sWrVqJWYF7v9cn3766VLbZGVlwd3dvdTrCaHihrgUnufRtWtX/P7777h582aZL7JlqVGjBl544QVMmzYN58+fNxc3HTt2BM/z5uKmZcuW8PDwAAB4eXmhefPmOHDgALKysiCVSs2FT3kVTdwsmi/zME9PT/P/O3TogA4dOsBkMuHkyZP46KOPMG3aNAQHB+OZZ54pd4Zq1aqhVatW5b59kZycHOzcuRPz58/HG2+8YT6u0+ks1h0CAHd3dyxcuBALFy7E7du3zb04/fv3x6VLlxAQEAAAmDNnDoYMGVLi4zVo0OCRmQIDA0v93srzGCUVfSUpKhJSU1OLXXfr1i1IJJJiywdYe9/WsuX3Ya2iuUo3b94sdc5b0c/1o48+Mk9Of1hwcHC5Hp+4DipuiMuZM2cOdu3aheeffx4///xzsV4Pg8GA3bt3o3///sjLywPHceYC5UFF3fJhYWHmY97e3mjRooW5uHl4aCYmJgYHDhzA3bt30aZNmxLv1xb9+vXDt99+C5PJhLZt21p1G57n0bZtWzRs2BBbtmzB6dOn8cwzz5TYk1KZOI4DY6zYGWyff/45TCZTqbcLDg7G2LFjcfbsWaxevRpqtRoNGjRAvXr1cPbsWSxdutQuee35GA0aNEB4eDi2bt2KmTNnmguXgoICfP/99+YzqOypvL+PsvTo0QM8z2Pt2rVo165diW2io6Ph4+ODCxcuFBv2JcRaVNwQl9OuXTusXbsWU6ZMQVRUlHlYyWAwIDY2Fp9++imaNGmC/v37Iz4+Hj179sQzzzyDmJgYhIaG4u7du/jtt9/w6aefolOnTmjfvr3F/Xfu3BnvvvsuOI7DihUrLK6LiYnB+++/D8aYea7G43jmmWewZcsW9OnTB6+++iratGkDmUyGmzdv4sCBAxg4cCAGDx6MdevWYf/+/ejbty9q1KgBrVaLjRs3AgC6desGoLCXJyIiAj///DO6du0KPz8/BAQEoGbNmo+d0xpeXl7o2LEj3n33XfPjHjp0CBs2bICPj49F27Zt26Jfv35o2rQpfH19cfHiRXz11VcWb/rr169H79690bNnT4wdOxbh4eHIysrCxYsXcfr0aezYseOxM9vrMSQSCVauXImRI0eiX79+ePHFF6HT6fDuu+8iOzsby5cvf+zsj2LL78NaNWvWxNy5c/HOO+9Ao9FgxIgR8Pb2xoULF5CRkYGFCxfCw8MDH330EcaMGYOsrCw8/fTTCAoKwp07d3D27FncuXMHa9eurdhvljgfkSc0EyKaM2fOsDFjxrAaNWowuVzO3N3dWYsWLdjbb7/N0tPTGWOM3b17ly1evJh16dKFhYeHm9s1b96cLV68mKnV6mL3u2vXLgaA8TzPcnJyLK7LyspiEomEAWD79u2zOXNJi/gZDAb23nvvsWbNmjGlUsk8PDxYw4YN2YsvvsiuXLnCGGPs+PHjbPDgwSwiIoIpFArm7+/PYmJiip0Z88cff7AWLVowhULBALAxY8aUmsXaxfnKaoeHzs66efMmGzp0KPP19WWenp6sV69e7Ny5cywiIsIiyxtvvMFatWrFfH19mUKhYLVr12bTp09nGRkZFvd/9uxZNmzYMBYUFMRkMhkLCQlhXbp0YevWrSszc1G2hxfxK4k1j1F0NtGJEyeK3b6ks6WK/PTTT6xt27ZMqVQyd3d31rVrV/bXX39ZtCk6W+rOnTvFbh8REcH69u1r1fdW0u/J2t9Had9f0fd24MABi+Nffvkla926tfnvtUWLFmzTpk0WbQ4dOsT69u3L/Pz8mEwmY+Hh4axv374l/pwIeRjHGGNiFFWEEEIIIfZAp4ITQgghxKlQcUMIIYQQp0LFDSGEEEKciqjFzeHDh9G/f3+EhYWB4zj89NNPj7zNoUOHEBUVBaVSidq1a1vsoUIIIYQQImpxU1BQgGbNmuHjjz+2qn1iYiL69OmDDh06IDY2FnPnzsXUqVPx/fff2zkpIYQQQqoKhzlbiuM4/Pjjj+bl7Usye/Zs/PLLLxZ7mkyaNAlnz57F8ePHKyElIYQQQhxdlVrE7/jx4+jRo4fFsZ49e2LDhg0wGAyQyWTFbqPT6aDT6cyXBUFAVlYW/P39K3y5ckIIIYTYB2MMeXl5CAsLs9hQtiRVqrhJS0srtqdIcHAwjEYjMjIyStw4btmyZVi4cGFlRSQOpGnTpvDx8TEvI5+dnY24uDixYxFCCHkMN27ceOS+gFWquAGKbw5XNKpWWi/MnDlzMGPGDPPlnJwc1KhRAzdu3ICXl5f9gjoIQRCwc+dObNi4EQa9Hnr/utBXawXwxXu5nI133LfgcP/vw8fXD81ecY0J6Jw+H4rrxyHNT4O3jw9mTJ+O1q1bix2LEELKLTc3F9WrV7fYELg0Vaq4CQkJQVpamsWx9PR0SKVS8y66D1MoFMU2fgMK901x9uLmzp07WLFiBU6ePAkmU0LfsAtMvjXAix1MRLxCJXaEyqFQwdCkP3D7PApSTuGdd95B//79MXnyZLtvuEgIIfZkzZSSKrXOTbt27bBv3z6LY3v37kWrVq1KnG/jyvbv349x48bh5MmTMPjUQEHjwTD51hA7VqUyeQSjaLY8u3fZpXAcDCFNUNBoAExufvj1118xceJEnD9/XuxkhBBiV6IWN/n5+Thz5gzOnDkDoPBU7zNnziA5ORlA4ZDS6NGjze0nTZqEpKQkzJgxAxcvXsTGjRuxYcMGzJw5U4z4DkmtVmPZsmVYtGgR8jU6aGo+BW3drmAyF+mxeIC2bmeYvMIhSBUweYVDW7ez2JFEIah8oY7sD11oU9y6lYpXXnkFX331FUwmk9jRCCHELkQ9FfzgwYPo3Ln4G86YMWOwefNmjB07FtevX8fBgwfN1x06dAjTp0/H+fPnERYWhtmzZ2PSpElWP2Zubi68vb2Rk5PjdMNSly9fxsKFC5GSkgKTeyA0tWPAlM71PZLHw+elQZVwGJw+H82bN8e8efMQGBgodixCCHkkW96/HWadm8rijMWNIAj4/vvvsW7dephMRuhCm0If1hJ4xKlyxEUZdVBe/wuyu9fh6eWFOW+8gfbt24udihBCykTFTRmcrbi5e/culi9fjn/++QdM5gZN7Y4weYWJHYs4OsYgy7gMZfI/gGDEkCFD8OKLL5Y4+Z4QQhyBLe/f9NG+Cjtz5gzGj5+Af/75B0bv6ihoPJAKG2IdjoMhsAEKGvWHoPLFDz/8gClTpuDmzZtiJyOEkMdGxU0VxBjD9u3bMWPGDNzNzoa2elto6nVzyUnDZeEMGqji98D99Bao4veAM2jEjuRwBJUvChr1hz4oEteuXcMLL76IY8eOiR2LEEIeCxU3VYxarcaiRYuwZs0amKQqFDTsA0NIY4C2kihGmXAYfO4tSEw68Lm3oEw4LHYkxySRQhfRDpraMVBrdJg7dy42bdpEZ1MRQqosKm6qkOTkZEyePBkHDhyA0TMEBY0GQPAIEjuWw+IL0u+vUAwGvuCOyIkcm9G/Dgoi+0FQeOGLL77AnDlzkJubK3YsQgixGRU3VcTRo0fx4osvIikpCfrgxtA06EXDUI9iMlos4geTQcQwVYPg5oeCRv1h9K6Of//9Fy+88AKuXLkidixCCLEJFTcOjjGGbdu24c0334RGb4CmdifoarQFOPrVPRpD0WAdd+8ysYJUAU29btCFt0RaWhpefuUV/P3332KnIoQQq9E7pAMzmUz48MMPsXbtWjC5Owoa9oPRv7bYsaoMxissem4YT6c5W43joA9rDnW9btDpjZgzZy5++eUXsVMRQohVqLhxUBqNBm+//TZ+/PFHmNz8CudCuPmJHatKERReZV4mj2byqYGChn0gSBVYtWoVPv30UwiCIHYsQggpExU3DigrKwvTpk3HX3/9BaNXONQN+4LJ3cWOVeVItDkWw1ISXY6YcaoswT2gsLhWemPr1q1YvHgx9Hq92LEIIaRUVNw4mLS0NLz00kuIj78EfUB9aOp1B3ja8bxcOM5yQjHodPnyYgpPFET2g9EzBPv378fs2bOh0dC6QYQQx0TFjQO5ffs2pk2bhtTUVOjCW0JXM5r2h3oMHASLnhuOJhQ/HqkCmvo9YfCthdjYWMyZMwdarVbsVIQQUgy9czqI9PR0TJs2DWlpadCFR0Ef1pwW5ntMjFn23LjWLmp2IuGhrRMDg18tnDlzhgocQohDouLGAaSnp+PVB3ps9GHNxI7kFDjBYNlzI9A6NxWCk0BbOwYG35qIjY3FvHnzoNPpxE5FCCFmVNyILCMjA9OnT0fqrVvQhbUo7LEhFePhji/qCas4nATa2p1g8K2JU6dOUYFDCHEoVNyISKvVYu7cuUhJSYEurDn04S3EjuRUTB4hFsNSJo9gMeM4H8m9AscnAidPnsT//vc/MBr7I4Q4ACpuRMIYw8qVK3H58mXoAxtAH0aFTUXT1ukEk1c4BKkCJq9waOt0EjuS85FICn/OHkHYu3cvtm3bJnYiQgiBVOwArmrLli3Yv38/jJ4h0NV4koZM7IDJVNA06Cl2DOcn4aGp2xXuF37B+vXrUbNmTTz55JNipyKEuDDquRHB0aNH8fnnn4MpPKCt0xmQ8GJHIuSxMJkK6rrdwDgeixYtQlJSktiRCCEujIqbSpaSkoLFi5cAvLTwzYB29rYbzqCBKn4P3E9vgSp+DzgDLTpnT4K7PzS1OkCtVmPu3Ll0ijghRDRU3FQixhjee+89aLUaaCKeor2i7EyZcBh87i1ITDrwubegTDgsdiSnZ/SrBX3IE0hJScGmTZvEjkMIcVFU3FSi3377DbGxsTD4RMDoV0vsOE5PUpBhXpWYA4NEnSFyItegC28BQemN7dt34NKlS2LHIYS4ICpuKklGRgbWrF0L8HLoItrRBOJKILgHgN1b7IaBg+AWIHIiFyGRQlszGowJWLlyJYxGo9iJCCEuhoqbSvLBBx9AXVAAbfXWYHI3seO4BF311mC8vHDrBV4OXfXWYkdyGSbPEOgDGyIhIQHffPON2HEIIS6GiptKcP78eRw5cgRGzxAYAuqLHcdlKJL+BmfSFW69YNJBkfS32JFciq56KzC5G77esgW5ublixyGEuBAqbirB1q1bAQD68CgajqpEfP5ti72l+PzbYsZxPbwcupAnoNNq8eOPP4qdhhDiQqi4sbPExET89ddfMHoEw+RJy/8T12IIqA8mVeK777+HRkOn4hNCKgcVN3b27bffAgD0oU1FTuJ6iubbADDPuyGVjJdBH9wIebm52LVrl9hpCCEugoobO8rMzMQff/wBk8oXJu9qYsdxPYxZDEsBtKmjGPRBkQAvxfbt22ljTUJIpaDixo6OHz8Ok8kEQ2ADmmsjAsEj0PJUcPdAkRO5KKkCBt9auH37Ni5fvix2GkKIC6Dixo6OHTsGADD61BA5iWuiU8Edh9GnOoD7zwlCCLEnKm7sRKfT4eTJUzCpfMEUHmLHcUmKGyfAmfT3TgXXQ3HjhNiRXJbRKxzgJFTcEEIqBRU3dnLq1Cno9TrzJ1ZS+Wj7BQfCy2D0DMWVK1dw584dsdMQQpwcFTd2cu7cOQCAyStc5CSuS1B5W5wtJSi9xYzj8ozehc+F8+fPi5yEEOLsqLixk5s3bwIABJWPuEFcmESdXeZlUrmKngtFzw1CCLEXqdgBnNXNmzcLJ7NKlWJHcVmcoLc8FVzQi5iGCAovAFTcEELsj3pu7EAQBNy8eROC0otOARcZe+hfIh6m8AA4CRU3hBC7o+LGDrKysqDX682fVIl4uIf+JSLiJBAUHkhJSRE7CSHEyVFxYwc6nQ4AwHiZyElcG+MVD22/oBAzDgHAJDLo9TQ8SAixLypu7OD+EvPUXyAmTd3OwAMngxdeJqLiOJgEQewUhBAnR8WNHQhFL95U24hKkfofgPu/hqLLREwcGBU3hBA7o+LGDqjnxjHQIn6OiKPNMwkhdkfFjR3IZPfm2ghGcYO4OME9wHLjTLcAkRMRCMb7zw9CCLETKm7swN/fHwAg0atFTuLatLU7wuQVBkGqgMkrDNraHcWO5PIkhgIEBtLu7IQQ+6JF/OxAoVDA29sHWboCsaO4NCZTQdOgp9gxSBHBCM6oQ1BQkNhJCCFOjnpu7CQoKBC8gYobQopw+sLnA/XcEELsjYobOwkKCgJMBsCoEzsKIQ5BossHQMUNIcT+qLixkzp16gAA+AI6Q4cQAODVmQDuPzcIIcReqLixk4YNGwKg4oaQIpKCOwCAyMhIkZMQQpwdFTd20qBBAwD3X9AJcXV8QQYCAgIQEECn5BNC7IuKGzvx9/dHUFAQpAUZAC1aRlwcp1dDoi+gXhtCSKWg4saOmjRpAs6ghkSbI3YUQkTF594CADRu3FjkJIQQV0DFjR09+eSTAAA++4bISVwTZ9BAFb8H7qe3QBW/B5xBI3YklyXNKXwOFD0nCCHEnqi4saM2bdqA4zjzCzupXMqEw+Bzb0Fi0oHPvQVlwmGxI7kmJkCam4KQkBBERESInYYQ4gKouLEjHx8fNGrUCNL8dMCoFzuOy5Hk37HcOJMmd4uCz08HZ9TjySefBMfRZrKEEPuj4sbO2rVrV/jJlXpvKh9XVNrAXOKQyie9mwzg3nOBEEIqARU3dtapUycAgCzzmrhBXBBn3hO8sKy5X+qQSsMEyLIS4OXlhZYtW4qdhhDiIqi4sbNq1aqhcePGkOam0ITWSmZy87fouTG5+YsZxyXxuangDGp06dIFMplM7DiEEBdBxU0l6NGjB8AYpJkJYkdxLQ+vL0TrDVU6WeZVAPeeA4QQUkmouKkEnTp1Ai+Vml/oSeWQaO5aDEtJtHfFjON6TAbI7iYhPLwaLd5HCKlUVNxUAm9vbzwVHQ1enQlJPp2xU1kEla/FsJSg9BUzjsuRZV4FBCN69+5FZ0kRQioVFTeVZNCgQQAAefoFcYO4koffUOkNtvIwBnn6RUilUvTt21fsNIQQF0PFTSVp3rw5atWqBVlWIk0sriQSdZblsJQmS8w4LoXPS4VEk42uXbvC15d6zAghlUv04mbNmjWoVasWlEoloqKicOTIkTLbb9myBc2aNYObmxtCQ0Mxbtw4ZGZmVlLa8uM4DoMHDy48NfZOvNhxXILgHmA+GZyBg+BGu1FXFtntwh7KwYMHi5yEEOKKRC1utm3bhmnTpmHevHmIjY1Fhw4d0Lt3byQnJ5fY/ujRoxg9ejQmTJiA8+fPY8eOHThx4gQmTpxYycnLp3v37nD38IA8/SIgGMWO4/S0tTvC5BUGQaqAySsM2todxY7kEjhtDmTZNxAZGYmGDRuKHYcQ4oJELW5WrVqFCRMmYOLEiYiMjMTq1atRvXp1rF27tsT2f//9N2rWrImpU6eiVq1aeOqpp/Diiy/i5MmTlZy8fFQqFQYPGgTOoIEs44rYcZwek6mgadATBS1GQtOgJ5hMJXYklyBPjQPA8Oyzz4odhRDiokQrbvR6PU6dOlVs/YsePXrg2LFjJd6mffv2uHnzJnbt2gXGGG7fvo3vvvuuzAmLOp0Oubm5Fl9ievrpp6FQKKBI/Q8QBFGzEFLROF0+5JnXULNmTURHR4sdhxDiokQrbjIyMmAymRAcHGxxPDg4GGlpaSXepn379tiyZQuGDx8OuVyOkJAQ+Pj44KOPPir1cZYtWwZvb2/zV/Xq1Sv0+7CVj48PBgwYAE6fD2kWbclAnIs87T+ACRg5ciQkEtGn9BFCXJTorz4Pr3/BGCt1TYwLFy5g6tSpePvtt3Hq1Cns3r0biYmJmDRpUqn3P2fOHOTk5Ji/btwQfwPL4cOHQyqVQpF6FmDUe2MvnEEDVfweuJ/eAlX8HjpLzc44vRryjMsICwtD586dxY5DCHFhohU3AQEB4Hm+WC9Nenp6sd6cIsuWLUN0dDRef/11NG3aFD179sSaNWuwceNGpKamlngbhUIBLy8viy+xBQQEoF+/fpBoc2nujR0pEw6Dz70FiUkHPvcWlAmHxY7k1OS3YgHBhFGjRkEqlYodhxDiwsr1CvTnn3/izz//RHp6OoSH5o1s3LjRqvuQy+WIiorCvn37LE4X3bdvHwYOHFjibdRqdbEXTZ7nART2+FQlo0ePxu+7d4PdioXBrw7A05tBRZMUZJh3AufAIFFniJzIeXHaHMgzLiMiIoL2kSKEiM7mnpuFCxeiR48e+PPPP5GRkYG7d+9afNlixowZ+Pzzz7Fx40ZcvHgR06dPR3JysnmYac6cORg9erS5ff/+/fHDDz9g7dq1SEhIwF9//YWpU6eiTZs2CAsLs/VbEZWfnx+eGT68sCs//bzYcZwSrXNTeRQ3TwGM4YUXXjB/4CCEELHY3F2wbt06bN68GaNGjXrsBx8+fDgyMzOxaNEipKamokmTJti1axciIiIAAKmpqRZr3owdOxZ5eXn4+OOP8dprr8HHxwddunTBihUrHjuLGIYNG4affvoJOan/QR/YAJAqxY7kVLS1O0KZcBgSdQYEtwBa58ZOJPl3ILt7HU2aNEH79u3FjkMIIeCYjeM5/v7++Pfff1GnTh17ZbKr3NxceHt7IycnxyHm3/zwww/48MMPoQ9qCF0EvTGQKoYxuF38DXxBOj766CM88cQTYicihDgpW96/bR6WmjhxIrZu3VrucMTSgAEDULNmTcjT4yEpcPxtJAh5kDTzKviCdHTt2pUKG0KIw7B5WEqr1eLTTz/FH3/8gaZNm0Imk1lcv2rVqgoL5wqkUileffVVTJ8+Hcrk41A37Eu7V1cQiToLqku/gzPpwHgFNA17Q3DzEzuW8zDqoLx5AkqVCpMnTxY7DSGEmNlc3MTFxaF58+YAgHPnzllcV9r6NKRsLVq0QJcuXbB//35IM6/CGFBP7EhOoaiw4QDApIPq0u8oaDlS7FhOQ5ESC86gxdgJkxAQQJO1CSGOw+bi5sCBA/bI4fImT56MY8eOg908iXyf6jS5uAKYCxvAXOCQiiEpyIA8/SJq1IjA008/LXYcQgix8FiL+N28eRMpKSkVlcWlBQYGYsKE8eAMGiiT/xE7jlNgvAJFs+XZvcukAggmKBOPAmCYPn0aLdhHCHE4Nhc3giBg0aJF8Pb2RkREBGrUqAEfHx+88847xRb0I7YZMmQIGjVqDFnmNfDZyY++ASmToPAs8zIpH3nqWfCaLAwcOBAtWrQQOw4hhBRjc3Ezb948fPzxx1i+fDliY2Nx+vRpLF26FB999BHeeuste2R0GTzPY/bsWZDJZFBdPwYYaRjlcfCaTIthKV6TJWYcpyBRZ0KRGoeg4GC8+OKLYschhJAS2VzcfPHFF/j8888xefJkNG3aFM2aNcOUKVPw2WefYfPmzXaI6FoiIiIwfvx4cAY1DU89rodXcKpiW3Q4HEEoHI5iAma9/jrc3NzETkQIISWyubjJyspCw4YNix1v2LAhsrLok3FF+L//+z80bNgQssyrkGYmiB2nyjJ5BFvMuTF5lLwhK7GOPOUUeHUm+vXrh1atWokdhxBCSmVzcdOsWTN8/PHHxY5//PHHaNasWYWEcnVSqRRvvvkmlEoVVEnHwOnyxI5UJWnrdobJKxyCVAGTVzi0dTuLHanK4nNSoEj7D9WrV8eUKVPEjkMIIWWyefuFQ4cOoW/fvqhRowbatWsHjuNw7Ngx3LhxA7t27UKHDh3slbVCONr2C2XZu3cvli5dCpN7YOHifpLHOrmNkHLhDBq4n/8JMmbA2rVrUa8ercNECKl8dt1+ISYmBpcvX8bgwYORnZ2NrKwsDBkyBPHx8Q5f2FQ1PXr0QM+ePcEX3IH81mmx41Q5nEEDVfweuJ/eAlX8HnAGjdiRqh7GoEw4DM6gweTJk6mwIYRUCTb33FR1VannBgDUajWef/55pKSkQF2vO0w+1cWOVGWo4veAz70FDgwMHExeYdA06Cl2rCpFfussFCmn0L59eyxZsoRWISeEiMaW92+rVt+Ki4tDkyZNIJFIEBcXV2bbpk2bWp+UPJKbmxsWLFiAl156GUg4hPxG/cGU3mLHqhIkBRng7k0p5sAgUWeInKhq4bNvQJFyCkHBwZg1axYVNsRhZGdnY9OmTdBqtRbHAwICMG7cOFpYklhX3DRv3hxpaWkICgpC8+bNwXEcSurw4TgOJpOpwkO6unr16mHWrNexePFiuF39EwWR/QFe9ugbujjBzQ9cXio4FJ4tJaho00xrcdocuCUcglyuwJLFi+Hj4yN2JELMtm3bhp9//rnE6+rWrYvOnenkAVdnVXGTmJiIwMBA8/9J5evWrRsuX76M7du3Q5lwGNq6XWj38Ed5uAB3rRHY8jMZ4Hb1T8Ckx+y5b9E8G+JQNBoNdv72G5hMWfhB795SnZxBDfeLO/HDDz9QcUOsm1AcERFh7pJOSkpCeHg4IiIiLL7Cw8ORlJRk17Cu7oUXXkBUVBRk2UmQ3zojdhyHJ1FnWaxQLKEVih/t3gRiiSYbw4cPR9euXcVORIiFn3/+GXm5udAHRoIpPMEUHmAKDwgeQTD41MB///2H2NhYsWMSkdl8tlTnzp1LXKwvJyeHqmU7k0qlePvttxESEgrFrVhIM6+JHcmxcZzFIn4A9XQ9iuLGCciykxAVFYXnn39e7DiEWMjOzsaWLVsAqRz64MbFrteHFe51tnbtWpoi4eJsLm4YYyVOLMzMzIS7u3uFhCKl8/b2xooVy+Hh4QFV4hHwualiR3JYHASLnhuu2H4M5EGy2xcgv30ONWvWxIIFC2hSJnE4n376KfLy8qANawlI5cWuF9z9YQioh8uXL+OXX34RISFxFFa/eg0ZMgRA4aThsWPHQqFQmK8zmUyIi4tD+/btKz4hKSYiIgJLly7FjBkz7k0w7gtB5St2LIfD7tXuRROKGfXclEp6NwnK5L/h7++PFStWwNOTdlAnjuXo0aPYtWsXTG7+MAQV3wKoiK5aK8iyk7Fu3TpERUWhRo0alZiSOAqre268vb3h7e0Nxhg8PT3Nl729vRESEoIXXngBX3/9tT2zkgc0bdoUc+fOBUx6uF3eC06vFjuS42HMouem+E6aBAAk+elQJRyEUqnC8uXLERxMe3ARx5KSkoIVK1YAEim0tWMArvS3LiZTQVMzGjqdDgsWLIBaTa+NrsjqnptNmzYBAGrWrInXaUdgh9ClSxekp6dj3bp1cLu8BwUN+wBSxaNv6CIEj0BwDyziJ7gHih3J4Ug0d+F+5Q9IwLBo0UI6M4o4nPz8fMydOxd5eXnQ1OoAQeXzyNsYfWtCH9QICQkXsGzZMixYsAA8z9s/LHEYNs+5GT16NFJSUoodv3LlCq5fv14RmYgNhg8fjqFDh0KiuQu3y3sBk0HsSA5DW7sjTF5h9zbODIO2dkexIzkUTpsLt/g9gFGLWbNmoU2bNmJHIsSCRqPBG2+8gaSkJOhDmsAYYH3xravRBkavMBw5cgSrVq0qcW024rxsLm7Gjh2LY8eOFTv+zz//YOzYsRWRidiA4zi89NJL6N27N/iCO1Bd+QMQjGLHcghMpoKmQU8UtBgJTYOeYDKV2JEcBqcvgPvl3eAMakydOhW9evUSOxIhFtRqNebOnYtz587B4F8HumqtbbsDTgJN3a4wuQfgt99+w+rVqyEIgn3CEodjc3ETGxuL6OjoYseffPJJnDlzpiIyERtJJBLMnDkTnTp1gjQvFaqrBwB6EpNScAYN3OJ3g9PlY+LEieaTBQhxFNnZ2Zg+fQZiY2Nh8K0Jba0O5Vu0lJdBXb8nTG7++Pnnn7FkyRIYDNS77QpsLm44jkNeXl6x4zk5ObSugIh4nse8efPw5JNPQppzA8qEQwCjAoc8xKiD6vJeSLQ5ePbZZ/Hcc8+JnYgQC4mJiZg8eTLi4y9BH1gf2jqdypxA/EhSBdQNesPoGYI///wTr732GrKzsysqLnFQNv/FdOjQAcuWLbMoZEwmE5YtW4annnqqQsMR28hkMixcuBAtWrSA7G4ilNdcu8DhDBqo4vfA/fQWqOL3gDNoxI4kLqMObvG7waszMWjQIFqkjzicI0eOYMqUKUhNTYUurAV0EdGPV9gUkcqhqd8DBr9aiIuLw6RJk3DlypXHv1/isDhm4yyrCxcuoGPHjvDx8UGHDh0AFP5B5ubmYv/+/WjSpIldglYUW7ZMr6o0Gg3mzp17v0u3didAUgEvEFWMKn4P+AfOljJ5hUHToKfYscRh1N4rbLIwcOBAvPrqq5C44N8EcUwGgwHr1q3D999/D0ik0NTqCKNfzYp/IMYgTz0LRcppSKVSvPzyyxg4cCDteF9F2PL+bfOrW6NGjRAXF4dhw4YhPT0deXl5GD16NC5duuTwhY2rUKlUWLZs2b0enOtQJhwABNcbMpQUZJhXJebAIFFniJxIHJxBC7dLhYXN4MGDMW3aNCpsiMO4du0aJk2ahO+//x6CyhcFjQbYp7ABAI6DPqw51PV7wsBJsXr1asybNw+ZmZn2eTwiGpt7bqo6V+i5KaLVajFv3jycOnUKBp8a0NbpDEhcZ60H6rm5P3lYormLoUOH4uWXX6ZPqcQhGI1GfPvtt9i0aTNMJiP0gQ2hq94G4Ctn2w9Or4Yy8QikuSnw9PTEtGnT0KVLF3p+ODBb3r+tKm7i4uLQpEkTSCQSxMXFldm2adOmtqWtZK5U3ACATqfDvHnzcPLkSRi9q0FTp0ulvXiIjTNoCne4VmdAcAuAtnZHlzodnNMXwC1+DyTabPzf//0fpkyZQi/cxCGcP38e7733PyQmJoDJ3aGpGQ2Td7XKD8IYZHfiobz5L2Ayok2bNpg+fTpCQ0MrPwt5pAovbiQSCdLS0hAUFASJRAKO40pcEInjOIc/Y8rVihsA5mXIjx8/DqNnCDT1ugF88U3nnI25uCnIgODuWsUNp82F++U94HR5GDFiBF544QUqbIjosrOzsWHDBuzcuROMscLemmpRoq+szunyoEw6DmnOTcjlCowa9RyGDRtmsYciEV+FFzdJSUmoUaMGOI5DUlJSmW0jIiJsS1vJXLG4AQq7gJcuXYr9+/fD5B4ATb0eYDKl2LHsSnXpd/B5qeaNM02eodA07C12LLuTqO/C7coecHo1nn/+eYwcOVLsSMTFGY1G/PLLL9iwYSMKCvIhqHyhiWgPwdOB9jFjDNKsRChv/APOoEFISCheemkKnnrqKfpg4CAqvLhxJq5a3ACFp+yvXr0av/76KwSVD9T1e4HJnXePMI+Tm8A98OfNOAnyW40VL1AlkOTfgfuVvYBRh2nTpmHQoEFiRyIujDGGo0ePYv369bh58yYgVUAb1qJwV++KOMXbHkwGyG+dgeL2eYAJaNq0KaZMmYKGDUvfiZxUjgovbn755RerH3zAgAFWtxWDKxc3QOGLzfr16/Htt9+CKTxRUL8nmNI5fw4eJzaZz5YCAAYO+a3HiZjIvvjcVLhd/QMSZsIbb7yBHj16iB2JuLC4uDh89tln+O+//wBOUjgEFd4ckFaNHmNOmwPFzZOQ3S0crejcuTPGjx+P6tWri5zMddllzo3FjR6ac/Nglx3NuXF8jDF8/fXX2LBhA5hMBXW9HhDc/cWOVeFUF3eBz0+7PyzlEQJNZB+xY9mFNCsRqoRDkPISzJ8/37wGFSGV7fLly9iwYQP++ecfAIDBNwK6aq3AlN4iJysfPi8NihsnwBfcgUQiQa9evTBmzBgEBzvQkJqLqPB1bgRBMH/t3bsXzZs3x++//47s7Gzk5ORg165daNmyJXbv3l0h3wCxL47jMGrUKMyYMQMSow7u8bvA56aKHavCaet2hskr/N6u4OHQ1u0sdiS7kKVfguraAbiplHj33XepsCGiuHLlCt5880288MIL+Oeff2D0CkNBZH9o63atsoUNAJg8Q6CO7AdN3a4wKryxa9cujBw5EqtXr0Z6errY8UgpbJ5z06RJE6xbt67YVgtHjhzBCy+8gIsXL1ZowIpGPTeWDh8+jEWLFsFoEqCpHQOjXy2xIxFrMQb5rVgobp2Bj68v3l25EvXq1RM7FXExly9fxpdffomjR48CAIwewdCHt4TJywlPp2YCpFmJUNw6A4k2B1KpFH379sWIESMQEhIidjqnZ9cJxSqVCv/++y+eeOIJi+NxcXFo27YtNBrH3r+HipviYmNjMW/ePKjVamgj2sEQFCl2JPIoTIAi6W/I71xCaFgY/vfeewgLCxM7FXEhcXFx+Prrr/Hvv/8CKCpqWsDkGVq+HbyrkoeKHJ7n0b17d4wcOZLm5NiRXYubjh07QiaT4euvvzYvdJSWloZRo0ZBr9fj0KFD5U9eCai4KdnVq1fx+uuzcPduFnShzaAPb+n8L1BVlWCE8tohyLKTULduXaxcuRJ+fn5ipyIugDGGf/75B1u3bjUv6Gr0CoU+tDlMniGV+prhEOtYFRU5qXGQaO6C4zjExMRgxIgRaNCgQeVmcQF2LW6uXr2KwYMHIz4+HjVq1AAAJCcno379+vjpp59Qt27d8ievBFTclC41NRWvv/46bt68CUNAPWgjol1yw02HZtRBdWUfpPnpiIqKwqJFi+Du7i52KuLkjEYjDh48iK1btyIhIaHwmE916EKbQfAIEiWTQ22vwhik2cmQp54FX1C4h12rVq0wYsQItGzZktbJqSB2X+eGMYZ9+/bh0qVLYIyhUaNG6NatW5X4BVJxU7bs7GzMnTsXFy5cuLddQ2eAl4kdiwDgdPlwu7wXEm02unfvjlmzZkEmo98NsR+1Wo3ffvsNO3bsKJw8y0lg8KsNfcgTENx8Rc3mcforcCaD+TLj5chv+ZyIiQAwBj4vFfLU/yDNTQEA1K1bD888MxydOnWCVOoaW9/YS6Ut4qfVaqFQKKpEUVOEiptH02q1WLhwIY4fP35vNePuLrNtgaOSqLPgdmUvOL0aI0aMwPPPP087exO7uXPnDn744Qf8/MsvUBcUALwU+oAG0Ac3BlN4iB0PQOE6VoWrV+HealaOtY6VpCAT8rT/ILubCDCGoKAgPP300+jbty/1tpaTXYsbQRCwZMkSrFu3Drdv38bly5dRu3ZtvPXWW6hZsyYmTJjwWOHtjYob6xiNRqxevRo7d+6EoPCCun4Pp13sz9EVLs73JzjBgFdeeQVDhgwROxJxUleuXMH27dsLt2kxmcBkKuiDG0Ef2FD0/Z8e5nFiIx78WM0A5LceL1acUnG6fMhvn4c8Ix4wGeHm5oZ+/fph6NChtFaOjexa3CxatAhffPEFFi1ahOeffx7nzp1D7dq1sX37drz//vs4fvz4Y4W3NypurMcYw5dffolNmzaByZRQ1+0OwSNQ7FhWc4gJh49JmpkAVeJhyKQ85s2bh06dOokdiTgZQRDw999/Y8eOHYiNjQUAmFS+0Ic0gdGvNiDhRU5YMvfTW8CZdOaeG8YrUNDSgfdRM+ogvxMPefpFcPoCSCQSdOrUCcOGDaOtHaxk1+Kmbt26WL9+Pbp27QpPT0+cPXsWtWvXxqVLl9CuXTvcvXv3scLbGxU3tvvtt9/wv//9DwIkUNfpDJNP1TjV0aEmHJaDLO0clDf+hbu7B5YuXYJmzZqJHYk4EY1Gg71792LHjh2F+z4BMHqFQx/SBCavMIc/W1KizoLq0u/gTDowXgFNw94Q3KrAWYOCCdKsRMhvnwevzgRQuH7csGHDEB0dDZ53zGLSEdjy/m3z7KaUlJQSz4gSBAEGg6GEW5Cqrm/fvvDz88OCBQuAq39AExENY2B9sWM9kqQgw7y3FAcGiTpD5ERWYgyKG/9Cfvs8AgIC8e67K1GrFi2uSCrGnTt38OOPP+KXX35Bfn4+IOGhD6gPQ0hjCCpxJwnbQnDzc+yemtJIeBgD6sLoXwd8Xhpkt8/j3LlzOHfuHEJCQjF06BD06dOH5uU8JpuLm8aNG+PIkSOIiIiwOL5jxw60aNGiwoIRx9KuXTusXr0as2e/AVw/Cp1BDX1oM4f+dCeofME9sLeUoKwCL9yCCcrEw5BlJaJmrVpYuWIFgoLEOdWWOJf4+Hjs2LEDBw4cuD+f5t4O3VVtuNYpcBxMXqEweYVCp82B/PYFpKVfwSeffIKNGzehf/9+GDx4sHk9OWIbm4ub+fPnY9SoUUhJSYEgCPjhhx8QHx+PL7/8Ejt37rRHRuIgIiMjsWbNJ3j99deRmnIanF4NXcSTAOegZ+08XHg5cCEGADDpobryJ6R5qWjevDneeecdeHp6ip2KVGEmkwnHjx/H9u3bzYvuVYX5NK6GKb2hi2gHXXhLyO/Eg6VfwPbt27Fjx3fo2LEDhg0bhsaNG4sds0op16nge/bswdKlS3Hq1CkIgoCWLVvi7bffRo8ePeyRsULRnJvHl5WVhVmzZuHq1asw+NaEtnZHQOJ46ze4n94CiUlnvixIFSho4Zjd2JxBA9XlveDVmYiJicG8efMgl8vFjkWqKI1Gg927d2PHd9/hVkrheitG72qF82lcYXuEqk4QIL2bCHnaefD3htMbNWqEYcOGoUOHDi47L8duE4qNRiOWLFmC8ePHV9n9M6i4qRgFBQV48803ERsbC6NnCDR1uwFSx3ozrioTijltbuHifLpcDBo0CK+88orLvniRx5OZmYkff/wRP//8M/Ly8grn0/jXhSG4MQSVj9jxiK0YA59/G7K0c5BlJwMAQkJC8X//9zR69+4NNzc3kQNWLrueLeXh4YFz586hZs2aj5NRNFTcVBy9Xo+lS5fi4MGDMLn5QVO/B5jMcZ5s5lPB1RkQ3BzzVHBJQWbh4nwGDcaPH49Ro0ZVqUUxiWNISkrCtm3bsHfvXhiNxsL5NEGRMAQ2cLi/eVI+nDb33no5VwDBCA8PDwwaNAiDBw+Gv7+/2PEqhV2Lm0GDBmHQoEEYO3bs42QUDRU3FctkMuGjjz7CTz/9BKbwREH9nrTYn5UKF+f7A5xgwowZ09G/f3+xI5Eq5vz589i6dSv++usvAICg9IY+pAkM/nUccqiYVACjDvL0i4Xr5Rg0kMlk6NWrF4YPH45q1aqJnc6u7FrcrF+/HgsWLMDIkSMRFRVV7HS1AQMG2J64ElFxU/EYY/jqq6+wceNGMJkb1A16VqlTSsXAZ9+A27X9kEo4zJ8/Hx06dBA7EqkiGGM4ceIEtmzZgrNnzwIAjB5B0Ic2hcm7Os2ncRWCEbLMa5CnnYNEmwOOk6BTpxiMHDnS4TewLi+7Fjdl7WfDcRxMJpMtd1fpqLixnx9++AEffvghIFWioH4PCO4BYkdySEWrDivkMixZsgStWrUSOxKpAgRBwLFjx/DVV18hPj4eAGD0rg596BMweYaInK7yVdlF/CoaEyC9e29H8nuLArZv3x6jRo1CZGSkyOEqVqVtnFkVUXFjX3v27MHy5SvAJDzU9bq75ItuWWR34qG8/hfc3T2wcuUKOr2TPBJjDEePHsWmTZuQkJAAADD41YI+tJlrvpnfU+W2X7A3xsDn3oI89SykeWkAgNatW2Ps2LFO8zpjtxWKk5KSzBPWYmJi0KhRo8cKSpxPz5494ebmhoULF8Lt8h6o63SpMts12Jss7T8ob5yAj48P3nvvPaftOiYVgzGG48ePY+PGTbh69QrAcTD414UutBmYylvseKIrKmwAFP77wLIPLonjYPIOh8Y7HHxeGuS3zuLEiRM4ceIE2rZti3HjxrnUHlZW99wcPnwYffr0gVqtBgBIpVJ88cUXGDFihF0DVjTquakcJ0+exLx586DTG6Gu2xkmnxpiRxKVPPUsFDdPITAwEKtWraqySymQynH27Fl8+umnOH/+PAAOBv860IU1p8n6D6Cem0eT5N2G4lYspLm3AAAxMTGYOHFilX39scuwVExMDLy8vLB+/XqoVCrMmTMHv/32G27cuFEhoSsLFTeVJy4uDq/PmgWdTg913S4uW+DIb52FIuUUgkNCsPr992k5dVKqpKQkrFu3DsePHwcAGHxrQhceRT01JVCd/xW8+o65uDG5BULTmM44LAmflwbFzZPg89MhkUjQt29fjB8/Hr6+VevED7sUN35+fjh8+DCaNGkCoHARNy8vL2RkZFSpHxAVN5UrLi4Os2bNhlanKxyi8nWtAkd+6wwUKacRHBKCD1avRkgIzUEixeXm5uKLL77Ajz/+CEEQYPQMha56a5qUXwaP01+DM+nNlxkvR37L50RM5OAYA599A8qUk5BosuHm5obRo0dj6NChkMlkYqezii3v31ZvCpSdnW2xgZ+7uzvc3NyQnZ1d7qDE+TVt2hTvvrsSSoWi8NTnu0liR6o0RYVNSEgoPvzgAypsSDGMMfz+++8Y+dxz+P7772GUe0Jdrzs0DXpRYfMIDByY+f+Fl0kZOA4m3xooaDwI2oj2KNALWLduHcaNG4fTp0+Lna7C2TSh+MKFC0hLSzNfZozh4sWLhct839O0adOKS0ecwhNPPIH33nsXr7/+OnDtANR1uzr9JGN5ahwUKacRGhqKDz74gHb2JsUkJSXhf//7X+GGlrwM2uptYAiKpM0srSUIFhOKGRPETFN1cBIYghrC4FcLiltncDPlAmbMmIHu3bvjpZdego+Pj9gJK4TVw1ISiQQcx6Gk5kXHaZ0bUpZz587htddeg85ghLp+T6c9TVyWfgnKpGMIDgnBRx9+SIUNsWAymfDdd9/h888/h8FgKJxXU6MtmNz90TcmZjShuGJI1JlQXj8GvuAOvL29MXPmTIddVNQuw1KJiYlISEhAYmJisa+i40VrMNhizZo1qFWrFpRKJaKionDkyJEy2+t0OsybNw8RERFQKBSoU6cONm7caPPjksrXpEkTLFmyBFIJB7crf0BSkCF2pAonzUyAMukYfHx98b/33qPChlhIT0/H9OnTsXbtWughhbpeN2jrdqHCphw4WPbc3B+kIrYQ3PyhjuwLbY22yMkrwFtvvYVly5aZz4yuqqweloqIiKjwB9+2bRumTZuGNWvWIDo6GuvXr0fv3r1x4cIF1KhR8sTTYcOG4fbt29iwYQPq1q2L9PR0GI3GCs9G7KNVq1Z4++23MX/+Arhd3gt1wz5Os1sxn30DqsTDcHf3wP/ee8/p93khtjl+/DiWLl2KvLw8GPxqQRvRDpAqxY5VZbF7n83NPTc056b8OAkMwY1h9K4GVcJh7NmzBxcuXsTCBQtQu3ZtsdOVi6grFLdt2xYtW7bE2rVrzcciIyMxaNAgLFu2rFj73bt345lnnkFCQgL8/Mq3MicNSzmGXbt2YeXKlWBydxQ07Aum8BA70mPh89LgdnkPFDIpVq1a5TQrgpLH9+Dea5Dw0NRoB2NAPdoD6jF5nPoKnGAwX2YSGfKjRomYyEkIAuQpp6BI+w9yuQJz585Bp06dxE4FwE7DUhVNr9fj1KlT6NGjh8XxHj164NixYyXe5pdffkGrVq2wcuVKhIeHo379+pg5cyY0Gk2pj6PT6ZCbm2vxRcTXp08fTJkyBZy+AG5X9gEmw6Nv5KA4bS7crv4JnuOwePFiKmyImV6vx+LFi7Fx40YICk8URPaHMbA+FTYVgHESy7OlONHezpyLRAJ99dZQ1+sGvUnAggUL8OWXX5Y439aRifbXkJGRAZPJhODgYIvjwcHBFmdkPSghIQFHjx7FuXPn8OOPP2L16tX47rvv8NJLL5X6OMuWLYO3t7f5q6quzOiMhg0bhiFDhkCiuQvVtYNAVTzbwagrLM6MOrz++ky0bt1a7ETEQWg0GsydOxd//vknjB7BUEf2d+m9oCoaxx6ac1MVXz8cmMmnBgoi+0FQeGLjxo34+OOPIQhV52cseqnLPfQJpuisq5IIggCO47Blyxa0adMGffr0wapVq7B58+ZSe2/mzJmDnJwc81dVW1HZ2U2ZMgVt27aFNOcGFDdOiB3HNoIA1dX9kGhzMHLkSPTq1UvsRMRBaDQazJ49GydPnoTBJwKaBr3AZDS/piJRz439CSpfqCP7waTyw/fff4/333+/yvTg2PzX0KVLlxIX7svNzUWXLl2svp+AgADwPF+slyY9Pb1Yb06R0NBQhIeHw9v7/lLkkZGRYIzh5s2bJd5GoVDAy8vL4os4DqlUirfffhs1a9WC/PZ5yNIviR3JOoxBkXQM0rxUxMTEYMKECWInIg5Cr9fjrbfeQlxcHAx+taGt25nWrrGHh9a5qZI9v1UAk6mgbtgbJrcA/Prrr1i7dm2VKHBsLm4OHjwIvV5f7LhWq33kadwPksvliIqKwr59+yyO79u3D+3bty/xNtHR0bh16xby8/PNxy5fvgyJREJnplRh7u7uWL5sGXx8faFMPg4+r+RhSUciS78IecZlNGjQEHPmzIFEQp8aSWHP8/vvv3+vx6YGtLU7AtSjYB8Sy54b+jnbkVQBdYMeEFQ+2L59O3766SexEz2S1X8NcXFxhStponCl4qLLcXFxiI2NxYYNGxAeHm7Tg8+YMQOff/45Nm7ciIsXL2L69OlITk7GpEmTABQOKY0ePdrc/tlnn4W/vz/GjRuHCxcu4PDhw3j99dcxfvx4qFQqmx6bOJaQkBAsfucd8BIJVAkHwRlKnyQuNklBBpQ3TsDH1xdLliyGUknDDaTQDz/8gN9//x0m90Bo63SiN1w74h44+ZvWuakEUiXU9XuCyVT46KOPEBsbK3aiMlm9zk3z5s3BcRw4jitx+EmlKvyGbTF8+HBkZmZi0aJFSE1NRZMmTbBr1y7zmjqpqalITk42t/fw8MC+ffvwyiuvoFWrVvD398ewYcOwePFimx6XOKYmTZrghRdewNq1a6FMOAxN/R6Od1aJUQfVtQPgIOCtN99EQADt/0MKXblyBWvWrAWTuUFTtysgsWl3G2Ijk3sg+Nxb4MDAwMHkHih2JKfH5O5Q1+0K90u7sHjxYmzYsMFht2uwep2bpKQkMMZQu3Zt/PvvvwgMvP+HJJfLERQUBJ53/HFlWufGsTHGMG/ePBw7dgy68JbQhzUXO9J9jEF5bT9kd5MwZswYjBs3TuxExEHo9Xo8//zzSEpKgrpBL5i8wsSO5PQk6iyoLv0OzqQD4xXQNOxNZ6NVElnqf1DePIFOnTphwYIFlfa4trx/27xCcVU6FYxUPRzH4Y033sDE559HekosTJ4hDrMHlSz9ImR3k9CiRQuL4VJCvv32WyQlJUEf3IgKm0qiuHECnElfODRl0kNx4wQ0DXqKHcslGEKaQJpzAwcPHsTx48fRrl07sSMVY/OA8LJly0rcy2njxo1YsWJFhYQirs3LywsL5s+HRMJBmXjEIRb447S5UN48CR8fH7z55ptVopeSVI709HR89fXXYHJ36MKjxI7jMiT5d8zzbDgwSAruiJzIhXAcdBHtAU6Cjz/+2CG3QLK5uFm/fj0aNmxY7Hjjxo2xbt26CglFSKNGjTBixAhIdHlQpJwWNwxjUF4/CghGTJs2Df7+/uLmIQ7liy++gEGvhza8JcDLxI7jOjjO8mwp2luqUgkqH+gDGyIlJQW//fab2HGKsbm4SUtLQ2hoaLHjgYGBSE1NrZBQhADA6NGjUb1GDchvn4ckP120HLI78ZDmpaFjx44Os8cKcQy3b9/G7t27Iah8YPSvI3Ycl2IeksK9s6VMxZcoIfalD2sGSKTYunWrw/Xe2FzcVK9eHX/99Vex43/99RfCwmismVQchUKB2bNmgeM4qK4fBQRTpWfgdPlQ3jwBDw8PvPrqq5X++MSxff/99zCZTNCFNKXTvonLYTIV9IENcPv2bRw6dEjsOBZsfjZOnDgR06ZNw6ZNm5CUlISkpCRs3LgR06dPx/PPP2+PjMSFNWnS5N7+U9mQp52r9MdX3PgXMBnw8ssv03AUsaDT6fD777+DyVQw+tUSO47LMXkEWwxLmTxKXtme2Jc+KBIA8PPPP4ucxJLNCzHMmjULWVlZmDJlinmlYqVSidmzZ2POnDkVHpCQCRMm4M/9+5GdFgdDYH0wWeUs2Mjn3Ybs7nU0bdoUPXvSWRjE0vHjx5GXlwd9aFPaXkEE2rqdoUw4DIk6A4JbQOFq0KTSMaUXjF7hiIuLw61btxxmBMfmnhuO47BixQrcuXMHf//9N86ePYusrCy8/fbb9shHCNzc3DBu7FjAZID81pnKeVDGzBt5Tp48udTNXInr2r9/PwDA6F9X5CQujhYmFp3h3nyzoueEIyj3IHFaWhqysrJQp04dKBSKKrGRFqm6+vbtWzi5+E48OG2O3R9Pevc6+IJ0dO7cGZGRkXZ/PFK16PV6/PvvvxCU3hBUPmLHcUnKhMPgc29BYtKBz70FZcJhsSO5LKNPDYCT4NixY2JHMbO5uMnMzETXrl1Rv3599OnTx3yG1MSJE/Haa69VeEBCgMLdwye9+CLABChunrTvgwkCFCmnwEulNI+MlOj8+fPQarUwelcXO4rL4gvSLda54WmdG/FI5TB6BOPixYvIy8sTOw2AchQ306dPh0wmQ3JyMtzc3MzHhw8fjt27d1doOEIe1L59ezRp0gSyu0mQaLLt9jjSu4mQaHMxcMAAhxk/Jo6laBNho1fxZTFIJTEZLde5cYDFPl2ZyTMEjDH8999/YkcBUI7iZu/evVixYgWqVatmcbxevXpISkqqsGCEPIzjOIwYMQIAILttpzOnGIM87RwkEgmGDRtmn8cgVV58fDwAQKDNGkVkuSs4Tb4Rl8kjCMD954bYbC5uCgoKLHpsimRkZEChUFRIKEJK065dO1SrVg3yzGvgDJoKv38+Lw28OhOdOnVCSIhj7GlFHE9CYiKYzA1MphQ7istivMKi54bx9P4jpqJNSxMTE0VOUsjm4qZjx4748ssvzZc5joMgCHj33XfRuXPnCg1HyMPMPSqCCbL0ixV+//K0wi5V6rUhpTEajUi/fRsmZdm7EhP70jTsbS5winYFJ+JhUiUgkSItLU3sKADKsc7Nu+++i06dOuHkyZPQ6/WYNWsWzp8/j6ysrBJXLiakovXs2ROfb9iAnDuXoA9tDkgqZmVYTpsLac5NNG3atMT90wgBgJycHAiCACYr3oNNKo/g5oeCliPFjkGKcBwEuRsyMjPFTgKgHD03jRo1QlxcHNq0aYPu3bujoKAAQ4YMQWxsLOrUob1ViP0pFAr07NEDnEELPjelwu5XlnkNANC/f/8Ku0/ifNRqNQCA8XKRk7g2Se4teJzYBI8TG+FxYhMkubfEjuTyGC9HQX6+2DEA2NhzYzAY0KNHD6xfvx4LFy60VyZCHqlbt27YsWMHZJnXYPKpgNNxGYMsKwEKhQLR0dGPf3/EaQmCUPgfWthRVG7xe1A0qZiBwS1+D/JbjxM7lovjHGbNO5t6bmQyGc6dO0ertRLR1a9fHzVq1IAsO7lCTgGVFGRAos1Bhw4dSpwwT0gRqfTeZ0IRNnIlD6KzpRwOE+4/P0Rm87DU6NGjsWHDBntkIcRqHMehe/fugGCE9O7jL0Egy0oAgML7JKQM3t7eAADOqBU5iavjLNe5AX3oFhtn1MLHx0fsGADKMaFYr9fj888/x759+9CqVSu4u7tbXL9q1aoKC0dIWTp37owNGzZAmnMDxoDH299Hmn0D7h4eiIqKqqB0xFm5u7vDzc0NeTrHWInVVanrdILbtQPmAkddp5OYcYhghESvRlBQPbGTAChHcXPu3Dm0bNkSAHD58mWL62i4ilSm8PBwhISEIDXjFsAEgCvfWVOcNhcSXS6i2nZ0mC5V4rg4jkNERAQuxl8uHJqiHcFFoUi/BAD35twUXtb41RI1kyuTaHMAMNSsWVPsKABsLG5MJhMWLFiAJ554An5+fvbKRIhVOI5D69at8euvv0KizoLgHlCu+5HeO8uidevWFRmPOLFGjRrh4sWLkKgzIdxbmZVULj4/zWLODZ9/W8w4Lo/PK/z5O8pGwzZ91OV5Hj179kROjv13ZSbEGkXDSNKc8p8Szt8rblq1alUhmYjza968OQBAmnNT3CCu7OH5ww5ylo6rKnoNLnpuiM3mfvwnnngCCQkJ9shCiM2ioqLAcRz4vNTy3QFjkOalIjQ0FKGhtAkisU6rVq0gk8shvXud3lRFYvIItphQbPIIFjOOazPqIc1NQd269RAc7Bi/B5uLmyVLlmDmzJnYuXMnUlNTkZuba/FFSGXy9PREtWrVwKszy/Umw+nzwRl1aNy4sR3SEWelUqnwVHQ0eE02JAV3xI7jknThzXH/jCnu3mUiBlnWNYAJ6Natq9hRzGwubnr16oWzZ89iwIABqFatGnx9feHr6wsfHx/4+vraIyMhZapfvz44ow6cvsDm2/IFhUuF16vnGDP8SdXRr18/AID8dsXvcUYeTXX1AO6vdcPuXSaVjjHI0y+Cl0rRs2dPsdOY2XxqyIED9AdEHEu9evXw559/gldnwKjwsOm2EnVhcVO/fn17RCNOrGXLlqhTpw6uJSRAp2sJpvAUO5JL4Uw6y0X8TDoR07guaXYyJJps9OzTx6E6OGwubmJiYuyRg5ByKypMJAWZgG9Nm27L3ytu6tZ9vHVyiOvhOA6jRo3CggULoEg5DW1tem2sTIyXAya9+VRw2utLBEyAIuU0OE6CZ555Ruw0Fsq1qEd2djY2bNiAixcvguM4NGrUCOPHjzev3FkVFBQUgOeLr0/B8zyUSqVFu9JIJBKoVKpytVWr1aXuwcFxnMUWALa01Wg09/e+KcGDiy7a0lar1cJkKn25eVvaurm5mddE0ul0MBqNj9U2ICAAJpMJnOb+WXyCyQhWRgaJTAaOk0CizYGvry8kEkmpvz+lUmn+W9Hr9TAYSt/u4cG2BoMBer2+1LYKhcK8ro4tbY1GI3S60j+lyuVyyGQym9uaTCZotaWvuiuTySCXy21uKwgCNBpNhbSVSqVQKBQAAMaYeRPLx21ry/P+wbYdO3ZE7dq1ceXKZTC/uhDcLJcj4DgOEtn9N12TvvTfxcNtBYOu1GlkHAdIZIpyttWXuf8PLy9nW6MeTKiYthKZ3Py8F4wGsBJep0wKL0jVGffvU+lTatv791v4vAesf42wqq1UBk5ie1tmMkIos60U3L11lGxqK5gglPG6KuF5cLy0HG0FCMb7r3+yjMtg+Rno3rs3/P39odfr7f4aYTVmoxMnTjA/Pz8WHh7OBg8ezAYNGsSqVavG/P392alTp2y9u0qXk5PDcK/QL+mrT58+Fu3d3NxKbRsTE2PRNiAgoNS2rVq1smgbERFRattGjRpZtG3UqFGpbSMiIizatmrVqtS2AQEBFm1jYmJKbevm5mbRtk+fPmX+3B709NNPl9k2Pz/f3HbMmDFltk1PTze3nTJlSpltW8X0Yi1nfsFazvyCBbXqXWbbyLFLWMvXNrGYTp1Z69aty2z777//mjOsXLmyzLYHDhwwt/3444/LbLtz505z202bNpXZdvv27ea227dvL7Ptpk2bzG137txZZtuPP/7Y3PbAgQNltl25cqW57b///ltm2/nz55vbnjt3rsy2M2fONLdNTEwss+2UKVPMbdPT08tsO2bMGHPb/Pz8Mts+/fTTFn/DZbV9+DVCqVSW2tajWkPz32TLmV8wqcqz1LZuwbUs2sq9Sn89UfqHWbRV+oeV2lbuFWDR1i24VqltpSpPi7Ye1RqW2lYilVu09arVrMyf24NtfeqX/ZxrNvVTc1u/xk+V2KZ9+/YsJibG/NWhaw8W0Lxrmffb+Pn3bHuNuNc2pN2gMts2GDnf3Da84/Ay29Yb9oa5bfWuo8psW2fwdHPbiF4Ty2xbq/9L5ra1+r9UZtuIXhPNbesMnl5m2+pdR5nb1hv2Rplt7f0aUfT+nZOTwx7F5p6b6dOnY8CAAfjss88sPklOnDgR06ZNw+HDh229S0IqhMRQgNI/sxXH6dUAE+DpSXMlSPlJJOVbGZs8nvz8fPj4+IDjCs+YElR+AOjMNVKIu/cpxWoqlQqxsbFo2LChxfELFy6gVatWZXb9OoLc3Fx4e3vj1q1b8PLyKnY9DUuV3NaRh6UA4O2338bJkyeRHzUK4OVWdTlL827DLf53jBw5Es8++2ypbWlYqhANS5XeNjc3Fy+88CKyc3NR0KAPBFXhxEoalipfW2uGpdyv7oO0IN0858bkEYKCet1pWAqw+7CU9O51qBIPo1GjRnj33XfNr3n2fo0oev/Oyckp8f37QTb33Hh5eSE5OblYcXPjxo0q9QnY3d292KafpbWz5T6t9WBBUpFtHyygKrLtgy/mFdlWoVCY34Aep22NGjVw6tQpSHQFENzkkPBSgC/7z5vT5wMAqlWrZvXvTi6XWz32K5PJzIVDRbaVSqVW74FlS1ue563+OdjSViKR2KUtx3F2aQvY/rx3d3fHvHlz8cYbb8DjxjEURA4o8e/vwTf4R3mwIKnYttbPXbCprdRebUt+XvDaHIuzpSTau6W2LfF+rXiNsHdbjpeCt0dbCQ9ebt2eZ7a1lUDK9PBIOQGluwfeeuutUosMe71GWMvm/tThw4djwoQJ2LZtG27cuIGbN2/i22+/xcSJEzFixIgKDUeItXx8fAAAnLH0TwoPK2pbdFtCHseTTz6Jp59+GhJNNpRJx0ArF9uXoPJF0U+YARCUjnMastMSjIXrCZn0mDFjOqpVqyZ2olLZ3HPz3nvvgeM4jB492jxEIJPJMHnyZCxfvrzCAxJijaIz9Wwqbgw6i9sS8rheeOEFXLhwARcuXIDJPQCG4EZiR3JeTCj7MqlYjEGZdBy8OgP9+vVDjx49xE5UJpt7buRyOT744APcvXsXZ86cQWxsLLKysvD+++9bPbxASEW7X9xYv5BXUSFExQ2pKHK5HAsXLoSPry+UN/4BTxtr2g1fcMdyV3DaBsOuZGnnIMu4goYNG2Lq1Klix3kkq4sbk8mEuLg486QfNzc3PPHEE2jatCk4jkNcXFyZk1MJsaeicd/yDEs9amIaIbYIDAzEsqVLIZPJ4HbtgHkVbFLBuIcvP3yAVBRpVgKUN08gMDAQS5YssX3NGRFYXdx89dVXGD9+fInflFwux/jx47F169YKDUeItcyTrk2ln8n0ME4wWt6WkAoSGRmJN+fNAycY4XZ5LzgtbSpc0UweIRZzbmhXcPvgc29BlXAEbu7uWL58Ofz9/cWOZBWri5sNGzZg5syZpa7qO2vWLHz66acVGo4QaxUNiXJC6adKFiMYwUulJf5NE/K4YmJiMG3aNHAGDdwv7wanyxc7klPR1WgLxisKV3njFdDVaCt2JKfD592G29U/IJNKsGzpUtSpU0fsSFazuriJj4/Hk08+Wer1rVu3xsWLtDsuEYe5R5GVvl7DwzjBBEUV6F4lVdfAgQPx/PPPg9Plw40KnAqlSP7HvHkmZ9JBkfyP2JGciiQ/HW5X9oEHsGjRIjRr1kzsSDaxurgpWqiqNHl5eQ6/gB9xXuXruTHRJHhidyNHjsTYsWMh0ebCPf53cLo8sSM5BT4/zXJCcf5tMeM4FT4vDe6X94CHCfPnv4127dqJHclmVhc39erVw7Fjx0q9/ujRo6hXr16FhCLEVuYF8GwobjhmsnrhPEIex9ixYzFx4kRwujy4X9oFieau2JGqvoeXEaJ1hSoEn30Dbpf3QMoxLFq0CB07dhQ7UrlYXdw8++yzePPNNxEXF1fsurNnz+Ltt98ucwl7QuyJK+eZEuW9HSG2eu655/DKK6+A0xfA/dIu8HlpYkeq0kwewTShuIJJM67A7eofUMhlWLZsGaKjo8WOVG5WL+I3ffp0/P7774iKikK3bt3QsGFDcByHixcv4o8//kB0dDSmT59uz6yElKp8RQqj4oZUqqFDh8LLywvLl6+AW/xuaGp1gNG/6kzSdCTaup2hTDgMiToDglsAtLWrZg+DQ2AM8pTTUKSehaeXF1YsX45Gjar2ApRWFzcymQx79+7F+++/j61bt+Lw4cNgjKF+/fpYsmQJpk2bRl38RDTlKlIY9dyQyte9e3cEBATgzTffAhIOQafJhj68Ja3TYiMmU0HToKfYMao+kwHKxCOQ3b2OatWqYfny5Q69rYK1bNp+QSaTYdasWZg1a5a98hDymGjcnTi+Fi1aYM2aTzB37lykpJwFr86CpnZHQEoT3K3FGTSFPTcFGRDcC3tumMz6zYAJwGlz4Xb1T0g0d9GiRQssXLjQaRY1tXn7BUIckclUNJHYhj9pjnvgdoRUroiICKxbtw5t27aFNOcG3C/8AklBhtixqgzltYPgc1MgMenA56ZAee2g2JGqFGnWdXhc+AUSzV08/fTTePfdd52msAGouCFOwlyk2NC1zzgJjFTcEBF5enpi6dKlGDduHHh9Ptwv7oTs9nk688cKfF6q5angNEHbOoIRiqS/obq2H0oZj7feegsvv/wypFKb99F2aFTcEKdQtEM9OFt6biT3b0eISHiex5gxY/Dee+/Bx8cbyuR/oLqyD5xBI3a0KoYKwkeRqLPgfuFXyNMvoFat2vj00/Xo2rWr2LHswqp3grIW7yPEERT13DAbixsaliKOIioqCps2bsSTTz4Jac5NuJ/7EdKsRLFjObBiO2eKkqJKYALkqXGFQ5+auxg6dCjWrVuLGjVqiJ3Mbqx6J/D19UV6ejoAoEuXLsjOzrZnJkJsZjDc2zDTpuKGg0Fv/UabhNibr68vli1bhunTp0PJM6iuHYDy2gHqxSkB4+UW69wwnrZSKYlEcxduF3+D4uZJ+Pv7YcWKFXjllVecfnV2q94JPDw8kJmZCQA4ePDg/TcSQhyEXq8v/I/E+k0wmUQKg9EARvMbiAPhOA4DBw7Exo0b0bRpU8iyEuFx7gdIM67SXJwHMWYx54aGpR4imCBPiYX7+Z/BF9xB9+7dsXnTJrRt6xobjFo1g6hbt27o3LkzIiMjAQCDBw++v1HhQ/bv319x6QixUlFxw2wobsDxYIIAo9FIazQRhxMeHo7Vq1fjl19+wbr164HEwzBmXoE2oj2Y0lvseKIT3PzA3dtfigEQVH5iR3IYfG4qlMnHIdFkIzAwEDNmzKiS+0M9DquKm6+//hpffPEFrl27hkOHDqFx48Zwc3OzdzZCrHa/58b6Gf9FhZBer6fihjgkiUSCQYMGoX379vjggw/w119/wePcj9CFNoU+pCnAO9cZLjZ5+MxIWgQRnEEDxY0TkGVeBcdJMHjIEEycONEl36+temaoVCpMmjQJAHDy5EmsWLECPj4+9sxFiE3MPTecDT0394obnU4Hd3d3e8QipEIEBQVhyZIl+Ouvv/DBBx8g/dYZyDOuQlu9DYy+ES75xi5RZ1kMS0k0WWLGEZcgQJZ+AcpbZwCTHg0aNMSMGdPRoEEDsZOJxuay/8CBA+b/F81VoCXsidh0Ol3hf2wZlrrXy2O+LSEOLjo6Gi1btsSWLVvw7bffgru2H0bPUOhqtIHg5i92vEoluAeAy70FDgwMHAS3ALEjiYLPvgHljROQaLPh6eWFF55/BX369AHP2/Ba6ITKtc7Nl19+iSeeeAIqlQoqlQpNmzbFV199VdHZCLGaVqsFUDhJ2FqMihtSBalUKkycOBFffPEFoqOjIc1Lhfv5n6FIPArOoBY7XqXR1u4Ik1cYBKkCJq8wl9s4U6K5C1X8Hrhd2QepPheDBw/G1i1b0L9/f5cvbIBy9NysWrXKvKJhdHQ0GGP466+/MGnSJGRkZNDO4EQU93tu7v9JP3LvmXttiwojQqqS8PBwLFmyBKdPn8bHH3+MhITLkN9NgC74CehDmgC8i8wjc7GTpDi9GvKU05BnXAHA0LZtW0yePBk1a9YUO5pDsbm4+eijj7B27VqMHj3afGzgwIFo3LgxFixYQMUNEYW55+aBCZbKhMPg73Vbc7m3oEw4bLGLcFHPjXkyMiFVUMuWLfHZZ59h9+7d2LBhA7JuxUJ+5xJ0YS1gCKgPSJxzIfpHPb+djkkPedo5KNLOAYIRtWrVxqRJL7rMqd22srm4SU1NRfv27Ysdb9++PVJTUyskFCG2KqnnRlKQAe7exzoODBK15aaERcWNRkMLpJGqjed59O3bF126dMGOHTuw9ZtvwCUdg/z2OejCo2D0rel0k44f9fx2GoIJsvRLUKSeBWfUwj8gABMnTECPHj1o+KkMNpf0devWxfbt24sd37ZtG+rVq1choQixVUlzbgT3ADDz8l4lTDh84GwpQpyBSqXC6NGj8c3WrRgyZAhkRjVU1w7A7cIv4HNuOtUigI98fld1TID0zmV4/Pc9lDf+gYdCihdffBFbt2xB7969qbB5BJt7bhYuXIjhw4fj8OHDiI6OBsdxOHr0KP78888Six5CKoN53swDxY22dsfCOTfqDAhuAcUmHDKac0OclK+vL6ZOnYr/+7//w8aNG/HHH3+Av7wXRo9g6KtFweQZInbEx/ao53eVxRikWYlQ3IqFRJsDuVyBoSNG4Nlnn4Wnp6fY6aoMm4uboUOH4p9//sH777+Pn376CYwxNGrUCP/++y9atGhhj4yEPFJR78uDPTdMpip7DJ6n4oY4t9DQUMybNw8jRozAxo0bcfToUUgv7YLRKxy68JYQPALFjvj4nKUzijHw2TegSDkFXnMXPC9F/0GDMGrUKPj7u9Zp/hWhXMtbRkVF4euvv66QAGvWrMG7776L1NRUNG7cGKtXr0aHDh0eebu//voLMTExaNKkCc6cOVMhWUjVdb/nxra9pQAaliLOr3bt2li8eDEuXryIjRs34sSJE5DmpsDoU72wyKmCa+Q4zYRixsDnpkCRchp8QQY4ToJevXtj9OjRCA0NFTtdlSXq2t3btm3DtGnTsGbNGkRHR2P9+vXo3bs3Lly4UOZW7Dk5ORg9ejS6du2K27dvV2Ji4qhK6rl5JCpuiIuJjIzEu+++i7i4OHz++eeIi4uDNPsGDL41oQ9vAUHlK3ZEq1X5CcWMgc9LLSxq8tPBcRw6d+mCsWPHlvn+R6wjanGzatUqTJgwARMnTgQArF69Gnv27MHatWuxbNmyUm/34osv4tlnnwXP8/jpp58qKS1xZCWdLfUoNOeGuKqmTZvigw8+wOnTp7Fx40acP38esrvXYfCrBX1YCwgqH7EjPlJVXqGYz02F/FYspHlpAICOHTti7NixqF27tsjJnIdoxY1er8epU6fwxhtvWBzv0aMHjh07VurtNm3ahGvXruHrr7/G4sWLH/k4Op3O4pN5bm5u+UMTh1VYoHAAZ8MJgA9snEmIq+E4DlFRUWjZsiX+/fdfbNy4CfHxlyDLug6Df23owpo79O7jVXFCMZ+XBnlKLKR5hcumREdHY+zYsXSmsR2IVtxkZGTAZDIhODjY4nhwcDDS0tJKvM2VK1fwxhtv4MiRI5BKrYu+bNkyLFy48LHzEsem1+sLJwjbsJYH9dwQUljktG3bFm3atMHx48exadMmXLlyBbLMBBj869wrcrzEjlnMI08YcCCS/HQoUk5DmnsLAPDkk09i3LhxLr2xpb3ZXNxs3rwZw4YNq7At1B/edJMxVuJGnCaTCc8++ywWLlyI+vXrW33/c+bMwYwZM8yXc3NzUb169fIHJg5Jp9PZtiM4AHC0zg0hRTiOQ/v27dGuXTv89ddf2LhxIxISrkKWdQ16/3rQhzUHU3iIHdPskdurOABJQUZhUZNzEwDQpk0bjBs3DpGRkSInc342Fzdz5swxr58wYcKEElcrtkZAQAB4ni/WS5Oenl6sNwcA8vLycPLkScTGxuLll18GAAiCAMYYpFIp9u7diy5duhS7nUKhgEKhKFdGUnXodDowW3YEB2hYipAScByHp556Cu3bt8eRI0ewadMmXL9+GfLMq9AH1Ic+rBmY3F3smFBeOwg+LxUcAC43BcprB6Fp2FvsWAAAiToT8pRYyLKTAQAtWrTA+PHj8cQTT4iczHXYXNzcvHkTv/32GzZv3ozOnTujVq1aGDduHMaMGYOQEOsXhpLL5YiKisK+ffswePBg8/F9+/Zh4MCBxdp7eXnhv//+szi2Zs0a7N+/H9999x1q1apl67dCnIherzf3xFiL9pYipHQSiQQxMTF46qmncPDgQWzatAk3b16CPPMK9IENoQ9tKmpPCZ+fhqI+fg4Any/+mbMSTXZhUXM3EUDhxO3x48ejefPm4gZzQTYXNzzPY8CAARgwYADS09Px9ddfY/PmzXjrrbfQq1cvTJgwAf3794fEis3aZsyYgVGjRqFVq1Zo164dPv30UyQnJ2PSpEkACnuJUlJS8OWXX0IikaBJkyYWtw8KCoJSqSx2nLgenV5PPTeE2AHP8+jatStiYmLwxx9/YPPmL5CWdh7yjHjoghpBH/IEIBWhd/zhxftE3FqC0+VBcesMZBlXATBERkZiwoQJiIqKKnGaBbG/x5pQHBQUhOjoaMTHx+Py5cv477//MHbsWPj4+GDTpk3o1KlTmbcfPnw4MjMzsWjRIqSmpqJJkybYtWsXIiIiABRu0pmcnPw4EYmLKOy5sbGrnJMAHEfFDSFWkEql6NWrF7p164bff/8dm7/4ApmpcVDciYcu5AnogxqZV/2uDCaPYHPvDbt3ubJxBg3kt85CfucSwATUqVMHEyZMQLt27aioERnHmO3l7u3bt/HVV19h06ZNSEhIwKBBgzBhwgR069YNGo0Gb775Jr777jskJSXZI/Njyc3Nhbe3N3JycuDl5XhnAJDy6dqtG3RKP2gi+9l0O89TX6JRw/pYs2aNnZIR4px0Oh1+/vlnfPX118jLzQWTu0EX1gKGgHq2LclQTuYJxQ+cCl5pw2QmA+Rp/0Fx+xxgMiIsPBwTxo9H586drRq1IOVjy/u3zcVN//79sWfPHtSvXx8TJ07E6NGj4efnZ9Hm1q1bqFatGgRBsD29nVFx43wYY+jcuTOMnqE2Tyj0jP0a9WpF4LPPPrNTOkKcW0FBAbZv345vt22DTquFoPSBtlormHyq27Q0Q5XABMjuxENx6ww4gwZ+fn4YN24cevfubfXyJKT8bHn/tvm3ERQUhEOHDqFdu3altgkNDUViYqKtd01IuRiNxsL/lOMTE+N4GAyGCk5EiOtwd3fHuHHjMGDAAHz11Vf49ddfIbn6B4yeodBVbwPBvertW1UMY+BzbkB54wQk2hy4ubnh2TET8fTTT0OpVIqdjpTA5neDmJgYtGzZsthxvV6PL7/8EkDhqYRF82YIsbei4sTmdW4AME5CxQ0hFcDf3x/Tpk3D5s2bER0dDWleKtwv/Axl4hFwBo3Y8cpNorkL1eU9cLvyB6T6PAwaNAhbt27Fc889R4WNA7O5uBk3bhxycnKKHc/Ly8O4ceMqJBQhtjD33JRnnJ/j7t+eEPLYqlevjiVLluD9999H3bp1Icu4Ao//vocs7T/AAacqlMqogyL5b7if/wnS3Ft48sknsWnTJkybNg0+Pj5ipyOPYPOwVGkrCN+8eRPe3o67DwlxXo9X3EhgMpkqNhAhBC1atMD69evx+++/49NPPwNunIA84wq0Ee1h8rR+TbRKxxikWQlQ3vgXnEGD6tWr4+WXX0bbtm3FTkZsYHVx06JFC3AcB47j0LVrV4vJUyaTCYmJiejVq5ddQhJSFnNxUo7JiwxU3BBiLzzPo1+/foiJicGGDRvw888/w+3SLugD6kFXvY046+OUgdPmQpn0F6S5qVAoFBj74ot4+umnIZPJxI5GbGR1cTNo0CAAwJkzZ9CzZ094eNzfY0Qul6NmzZoYOnRohQck5FGKem5YOXtujEaac0OIPXl6emLatGno1asX/ve//xVuzJlzE9qIdjD61hQ7XuFZULcvQJlyGhCMiI6OxtSpU0vcCohUDVYXN/PnzwcA1KxZE8OHD6eJVMRh3O+5Kd+cG+q5IaRyNGzYEGvXrsV3332HDRs3gru6Hwb/OtDWaAdI5aJk4nR5UCYchjT/Nnx8fDBt2jTExMTQInxVnM1zbsaMGWOPHISU2/2lmsrxYsRxEATxlm0nxNVIpVI888wziI6OxrJly3HhwnlI825DU7tjpc/FkWZchSr5OGAyoGvXrpg6dSrNHXUSVhU3fn5+uHz5MgICAuDr61tmRZuVlVVh4QixhnmxyHJ90uIgmKrQGRyEOInq1avjww8/wNatW/HFF1/ALf536MKjCveqsnevickIRfJxyDOuwM3dHa/NeANdu3a172OSSmVVcfP+++/D09PT/H/qriOO5P5K2OXruWGMihtCxCCVSjF69GhERUVh/vwFyLh5EpL8dGhrxwC8fSbxcrp8qK7+AV6dhQYNGmLBgvkIDQ21y2MR8VhV3Dw4FDV27Fh7ZSGkXO733Nh+WwbOIbcJIcSVNG7cGJ9//hkWL16MkydPgr+4E+p63cEUHo++sQ0kebfhdm0/OIMGgwYNwksvvURnQjkpq4qb3Nxcq++Q9msile1x59yUY+9YQkgF8/HxwfLly7F27Vp8//33cL+0E+p6PSG4+VbI/fPZyXC7dgA8B7w6YwYGDBhQIfdLHJNVxY2Pj88jh6KKFvejM08IIYSUh1QqxSuvvILw8HB8+OGHcI/fhYL6PSG4Bzze/WZegyrxMBQKBZYsXoxWrVpVUGLiqKwqbg4cOGDvHISIhvptCHEsQ4YMgaenJ5YtWwb3y3tQ0KAXBLfybcApzboOVeJheLi7Y+XKlWjUqFEFpyWOyKriJiYmxt45CCGEELPu3buD4zgsWbIEbpf3oiCyH5jC06b74HNToUo4CDeVCu+99x4aNmxop7TE0VhV3MTFxaFJkyaQSCSIi4srs23Tpk0rJBghlYbm3BDikLp164aCggK8//77cLuyDwWR/QDeusX+OG0O3K7th5SXYPny5VTYuBiripvmzZsjLS0NQUFBaN68ObhSJmHSnBtSFdHSBoQ4roEDB+LmzZvYsWMHlNePFZ4m/qjnrGCE6uoBwKjDrLlz6UO3C7KquElMTERgYKD5/4Q4kvvFSTl6YErZ5Z4Q4jhefPFFXLp0Cf/99x+M3uEwBtQrs73ixknwmiwMHjwYPXr0qKSUxJFYVdxERESU+H9CHIG5OCnX6BKz/2qohJDHIpVK8dZbbxWuuXbjX+R7VwOTqUpsK8lPhzz9AmrWrIlJkyZVclLiKMqx0yAQHx+Pl19+GV27dkW3bt3w8ssvIz4+vqKzEWKVx+m54VCu1XEIIZUsKCgIL774ImDUQX7zVMmNGIMy6TgAYObMmVAoFJWYkDgSm4ub7777Dk2aNMGpU6fQrFkzNG3aFKdPn0aTJk2wY8cOe2QkpEwSSdGfcfmGpSQSvkLzEELso3///qhZqxbkGVfAaXKKXS+9mwhenYnu3bujSZMmIiQkjsLm4mbWrFmYM2cOjh8/jlWrVmHVqlU4duwY5s6di9mzZ9sjIyFlMhc35RyWkvDl6sAkhFQynufx/MSJABjkaf9ZXskY5Kn/QSKRYPz48aLkI47D5lf1tLQ0jB49utjx5557DmlpaRUSihBb3O+5KcceUYyBl1BxQ0hV0a5dO1SvXh3yrGuAUWs+zuffBq/ORKdOnWgjTGJ7cdOpUyccOXKk2PGjR4+iQ4cOFRKKEFvc77kp77AUFTeEVBUSiaRwXyjBBOX1Y5CnnC78ujcPZ9CgQeIGJA7BqrOlfvnlF/P/BwwYgNmzZ+PUqVN48sknAQB///03duzYgYULF9onJSFl4PnCOTNcuRbjozk3hFQ13bp1w2effQ7cvQ7cvX+8evXqeOKJJ0TLRRwHx6zYEtnaT7ZVYRG/3NxceHt7Iycnh3YwdxKZmZkYOnQoDH51oK1j21Yhbv/9gEA3Hj/88L2d0hFC7CE1NRV37tyxOFajRg34+PiIE4jYnS3v31b13AhCOeYyEFJJinpuyjPnhmMCeKl1y7kTQhxHaGgoza0hpbKquCHEkZmLm4c6ITmDBsqEw5AUZEBwD4C2dsfiC38xAVKehqUIIcSZlKu4KSgowKFDh5CcnAy9Xm9x3dSpUyskGCHWul/cWPbcKBMOg8+9BQ4MXO4tKBMOQ9Og50O3Zg/0/BBCCHEGNhc3sbGx6NOnD9RqNQoKCuDn54eMjAy4ubkhKCiIihtS6e5PKLYsbiQFGeDuLX7DgUGizih2W44JkEqpA5MQQpyJzefATp8+Hf3790dWVhZUKhX+/vtvJCUlISoqCu+99549MhJSJnNx8lBxI7gHgN3bXIGBg+AWUPzGTKCeG0IIcTI2FzdnzpzBa6+9Bp7nwfM8dDodqlevjpUrV2Lu3Ln2yEhImXieL9xf6qHiRlu7I0xeYRCkCpi8wqCt3bHYbannhhBCnI/Nr+oymcy8UWFwcDCSk5MRGRkJb29vJCcnV3hAQqwh4fliw1JMpiphjs1DqOeGEEKcjs3FTYsWLXDy5EnUr18fnTt3xttvv42MjAx89dVXtHgSEY1MKoWelWPJAoF6bgghxNnYPCy1dOlS89oC77zzDvz9/TF58mSkp6fj008/rfCAhFiDl0qLDUs9EmMAGBU3hBDiZGx+VW/VqpX5/4GBgdi1a1eFBiKkPGRSKaC1tbgpbC+TyeyQiBBCiFjK/ZE1PT0d8fHx4DgODRo0QGBgYEXmIsQmMpkMnEZn243uFTfUc0MIIc7F5mGp3NxcjBo1CuHh4YiJiUHHjh0RFhaG5557Djk5OfbISMgjSaVSgNm4rxkVN4QQ4pRsLm4mTpyIf/75Bzt37kR2djZycnKwc+dOnDx5Es8//7w9MhLySFKp1OZdwbl7xRANSxFCiHOx+SPrb7/9hj179uCpp54yH+vZsyc+++wz9OrVq0LDEWItmUxm+4RigXpuCCHEGdncc+Pv7w9vb+9ix729veHr61shoQixVWHPDU0oJoQQUo7i5s0338SMGTOQmppqPpaWlobXX38db731VoWGI8RahT03ts25KRqWop4bQghxLla9qrdo0cK8KjEAXLlyBREREahRowYAIDk5GQqFAnfu3MGLL75on6SElEEqlQICTSgmhBBiZXEzaNAgO8cg5PGYh5aYAHBWdkgKNCxFCCHOyKriZv78+fbOQchjsdgZ3Mrihs6WIoQQ51Tu/vhTp07h4sWL4DgOjRo1QosWLSoyFyE2sei5sda9U8dpWIoQQpyLza/q6enpeOaZZ3Dw4EH4+PiAMYacnBx07twZ3377La1UTERRVKBwggnM2k2+aUIxIYQ4JZvPlnrllVeQm5uL8+fPIysrC3fv3sW5c+eQm5uLqVOn2iMjIY90v+fGhoX86FRwQghxSjZ/ZN29ezf++OMPREZGmo81atQIn3zyCXr06FGh4Qix1v05N9afMcXRhGJCCHFKNvfcCIJQ4puBTCaDINi4iBohFcRiQrG17hVCPG/tOBYhhJCqwObipkuXLnj11Vdx69Yt87GUlBRMnz4dXbt2rdBwhFirqODmbCmwaViKEEKcks3Fzccff4y8vDzUrFkTderUQd26dVGrVi3k5eXho48+skdGQh6pXMNStIgfIYQ4JZtf1atXr47Tp09j3759uHTpEhhjaNSoEbp162aPfIRYpXzDUlTcEEKIM7LpVd1oNEKpVOLMmTPo3r07unfvbq9chNjEfCq4LcUNTSgmhBCnZNOwlFQqRUREBEwmG/fwIcTOyrOIH22cSQghzqlcu4LPmTMHWVlZ9shDSLmYCxRbNs+kYSlCCHFKNr+qf/jhh7h69SrCwsIQEREBd3d3i+tPnz5dYeEIsdb9OTe2L+JHxQ0hhDgXm1/VBw4cCI7j7JGFkHIznwpuw9lSNOeGEEKck83FzYIFC+wQg5DHU56zpWjODSGEOCer59yo1Wq89NJLCA8PR1BQEJ599llkZGTYMxshVqNdwQkhhBSxuriZP38+Nm/ejL59++KZZ57Bvn37MHnyZHtmI8Rq5ToV/F7PDQ1LEUKIc7H6I+sPP/yADRs24JlnngEAPPfcc4iOjobJZKK9eYjozAWKDWdLcbT9AiGEOCWre25u3LiBDh06mC+3adMGUqnUYo+p8lizZg1q1aoFpVKJqKgoHDlypNS2P/zwA7p3747AwEB4eXmhXbt22LNnz2M9PnEO5VqhWKCzpQghxBlZXdyYTCbI5XKLY1KpFEajsdwPvm3bNkybNg3z5s1DbGwsOnTogN69eyM5ObnE9ocPH0b37t2xa9cunDp1Cp07d0b//v0RGxtb7gzEOdw/W4qGpQghxNVZ/ZGVMYaxY8dCoVCYj2m1WkyaNMlirZsffvjB6gdftWoVJkyYgIkTJwIAVq9ejT179mDt2rVYtmxZsfarV6+2uLx06VL8/PPP+PXXX9GiRQurH5c4H1rEjxBCSBGrX9XHjBlT7Nhzzz1X7gfW6/U4deoU3njjDYvjPXr0wLFjx6y6D0EQkJeXBz8/v1Lb6HQ66HQ68+Xc3NzyBSYO7XG2X6CeG0IIcS5WFzebNm2q0AfOyMiAyWRCcHCwxfHg4GCkpaVZdR//+9//UFBQgGHDhpXaZtmyZVi4cOFjZSWO73EW8aOeG0IIcS427y1V0R5e7ZgxZtUKyN988w0WLFiAbdu2ISgoqNR2c+bMQU5Ojvnrxo0bj52ZOJ77Z0vZ1nMjk8loxW1CCHEyon1kDQgIAM/zxXpp0tPTi/XmPGzbtm2YMGECduzYgW7dupXZVqFQWMwTIs7p/rCULT03JkilNCRFCCHORrSeG7lcjqioKOzbt8/i+L59+9C+fftSb/fNN99g7Nix2Lp1K/r27WvvmKSKKDqTj7NxQvHDZwASQgip+kSdbDBjxgyMGjUKrVq1Qrt27fDpp58iOTkZkyZNAlA4pJSSkoIvv/wSQGFhM3r0aHzwwQd48sknzb0+KpUK3t7eon0fRHzmIsWGnhtOMEGuUNopESGEELGIWtwMHz4cmZmZWLRoEVJTU9GkSRPs2rULERERAIDU1FSLNW/Wr18Po9GIl156CS+99JL5+JgxY7B58+bKjk8ciLm4sannxgQF9dwQQojTEf00kSlTpmDKlCklXvdwwXLw4EH7ByJVknlYypaeG1Z8YUpCCCFVn+hnSxFSEXieByeR2La3lGCiNW4IIcQJUXFDnALHcVDI5eAEK7cDYQwQjFAqac4NIYQ4GypuiNOQKxTW99wwAWCMhqUIIcQJUXFDnIZSobC+5+ZeEUQ9N4QQ4nyouCFOQ6FQWL3ODceM5tsQQghxLlTcEKehUqms77kxFbajnhtCCHE+VNwQp6FUKgHBUDhZ+BE4wXD/NoQQQpwKFTfEaahUqsLChlmxeea9nhuVSmXnVIQQQiobFTfEaZh7Ye71ypSlaPiKem4IIcT5UHFDnEZRLwxnsqK4udeGem4IIcT5iL79AiEVxd3dHUBh4cIAcAYNlAmHISnIgOAeAG3tjmCye8XMvd6dotsQQghxHtRzQ5yGm5tb4X/u9cooEw6Dz70FiUkHPvcWlAmHzW2Lem7MtyGEEOI0qLghTsPcc3OvV0ZSkAEOhWdOcWCQqDPMbTmT3uI2hBBCnAcVN8RpFPXCcMbCwkVwDwADBwBg4CC4BZjbUs8NIYQ4LypuiNO4P+emsLjR1u4Ik1cYBKkCJq8waGt3vN/YpAMAeHh4VHpOQggh9kUTionT8PT0BHC/uGEyFTQNepbYtqh3h4obQghxPtRzQ5xGUXGDe4VLWYoKICpuCCHE+VBxQ5xGUaHC3RtyKgtn1EOpVEIqpc5LQghxNlTcEKdhHpYyWlHcmHT3e3oIIYQ4FSpuiNOwqefGpIOXl5e9IxFCCBEBFTfEaUilUrh7eDy654YJ4Ix6eHt7V04wQgghlYqKG+JUvL28HlncFF1PPTeEEOKcqLghTsXb2xsSKm4IIcSlUXFDnIq3tzcgGAGTsdQ2nFELAPDx8amkVIQQQioTFTfEqRTNoykqYErCGai4IYQQZ0bFDXEqRQVLmcXNvetoQjEhhDgnKm6IUzEXNwZNqW1oWIoQQpwbFTfEqVjVc3NvWMrX17cyIhFCCKlkVNwQp+Ln5wcAkJTVc3PvOipuCCHEOVFxQ5xKUcFS9rCUBpxEQqeCE0KIk6LihjiV+3Nuyh6W8vbyAs/zlZSKEEJIZaLihjgVc8+NsfSeG4lRYx6+IoQQ4nyouCFORSaTwdPTs/RhKcEEzqij4oYQQpwYFTfE6fj5+ZU6obio6KHihhBCnBcVN8Tp+Pv7F54KLgjFrqPihhBCnB8VN8TpFBUuJc27KTpGxQ0hhDgvKm6I0zEXNyUMTUn0aos2hBBCnA8VN8Tp3C9u1MWuKyp4/P39KzUTIYSQykPFDXE6RYVLSZOKac4NIYQ4PypuiNMpa1iqqDeHihtCCHFeVNwQp1PUc8Ppiw9LSQwayOVyeHh4VHYsQgghlYSKG+J0zMVNKcNS/v7+4DiusmMRQgipJFTcEKfj4eEBqVQKycMTihmDxKCmISlCCHFyVNwQp8NxHPz8/Ir33Jj0ABOouCGEECdHxQ1xSv7+/pAYNQBj5mMSOg2cEEJcAhU3xCn5+fkBggkwGczHinpyinYOJ4QQ4pyouCFOqaTTwYtOA6eeG0IIcW5U3BCnVFTcSIwPFjdaANRzQwghzo6KG+KUStqCgRbwI4QQ10DFDXFKRb0zRb01ACAxFv7fx8dHjEiEEEIqCRU3xCkVFTCWc25oQjEhhLgCKm6IUzL33Bjv99xwBi2UKhWUSqVYsQghhFQCKm6IUyqx58aoga8P9doQQoizo+KGOCXzFgxFPTeMQWLUwsfHW9xghBBC7I6KG+KUOI6Dt7f3/QnFghEQTDTfhhBCXAAVN8Rp+fj4gDPpANyfe0NnShFCiPOj4oY4LW9vb3BGHcAEcw+OtzcNSxFCiLOj4oY4LfOkYqPO3HNDxQ0hhDg/Km6I0yoqZDijtrAHB4CXl5eYkQghhFQCKm6I0yoqZAp7bqi4IYQQV0HFDXFanp6ehf8x6s0Ti6m4IYQQ50fFDXFa5p4b0/2eG3PBQwghxGmJXtysWbMGtWrVglKpRFRUFI4cOVJm+0OHDiEqKgpKpRK1a9fGunXrKikpqWqKChmac0MIIa5F1OJm27ZtmDZtGubNm4fY2Fh06NABvXv3RnJycontExMT0adPH3To0AGxsbGYO3cupk6diu+//76Sk5OqwMPDAwDAmQzgTHqLY4QQQpyXqMXNqlWrMGHCBEycOBGRkZFYvXo1qlevjrVr15bYft26dahRowZWr16NyMhITJw4EePHj8d7771XyclJVWAubox6cCY9pFIpFAqFyKkIIYTYm2jFjV6vx6lTp9CjRw+L4z169MCxY8dKvM3x48eLte/ZsydOnjwJg8Fgt6ykarrfc6MDjHrqtSGEEBchFeuBMzIyYDKZEBwcbHE8ODgYaWlpJd4mLS2txPZGoxEZGRkIDQ0tdhudTgedTme+nJOTAwDIzc193G+BODiTyQSj0QijtgC8rgAKbz/6vRNCSBVV9PrNGHtkW9GKmyIcx1lcZowVO/ao9iUdL7Js2TIsXLiw2PHq1avbGpVUcccBbN++XewYhBBCHkNeXt4jV5sXrbgJCAgAz/PFemnS09OL9c4UCQkJKbG9VCqFv79/ibeZM2cOZsyYYb4sCAKysrLg7+9fZhFFnENubi6qV6+OGzdu0JlShDgZen67FsYY8vLyEBYW9si2ohU3crkcUVFR2LdvHwYPHmw+vm/fPgwcOLDE27Rr1w6//vqrxbG9e/eiVatWkMlkJd5GoVAUm0RKO0O7Hi8vL3rxI8RJ0fPbdVi7P6CoZ0vNmDEDn3/+OTZu3IiLFy9i+vTpSE5OxqRJkwAU9rqMHj3a3H7SpElISkrCjBkzcPHiRWzcuBEbNmzAzJkzxfoWCCGEEOJgRJ1zM3z4cGRmZmLRokVITU1FkyZNsGvXLkRERAAAUlNTLda8qVWrFnbt2oXp06fjk08+QVhYGD788EMMHTpUrG+BEEIIIQ6GY9ZMOyakitLpdFi2bBnmzJlDa9wQ4mTo+U1KQ8UNIYQQQpyK6HtLEUIIIYRUJCpuCCGEEOJUqLghhBBCiFOh4oY4rOvXr4PjOJw5c8Yh768Ix3H46aefKvQ+CSH3HTx4EBzHITs7W+woj7RgwQI0b95c7Bguj4obIpqxY8eC4zjzl7+/P3r16oW4uDixoxFCHjJ27FgMGjTI6vZU9BMxUXFDRNWrVy+kpqYiNTUVf/75J6RSKfr16yd2LEKIgzAYDGJHIFUQFTdEVAqFAiEhIQgJCUHz5s0xe/Zs3LhxA3fu3Cmx/aFDh9CmTRsoFAqEhobijTfegNFoNF8vCAJWrFiBunXrQqFQoEaNGliyZEmJ9yUIAp5//nnUr18fSUlJAIBff/0VUVFRUCqVqF27NhYuXGhx/1euXEHHjh2hVCrRqFEj7Nu3rwJ/GoRUDZ06dcLUqVMxa9Ys+Pn5ISQkBAsWLDBfX7NmTQDA4MGDwXGc+TLw6OcYx3FYt24dBg4cCHd3dyxevLjEDMeOHUPHjh2hUqlQvXp1TJ06FQUFBebrv/76a7Rq1Qqenp4ICQnBs88+i/T0dACFz/1q1aph3bp1Fvd5+vRpcByHhIQEAEBOTg5eeOEFBAUFwcvLC126dMHZs2ctbrN8+XIEBwfD09MTEyZMgFartfnnSSoeFTfEYeTn52PLli2oW7duiRuhpqSkoE+fPmjdujXOnj2LtWvXYsOGDRYvfnPmzMGKFSvw1ltv4cKFC9i6dWuJG7Hq9XoMGzYMJ0+exNGjRxEREYE9e/bgueeew9SpU3HhwgWsX78emzdvNhdHgiBgyJAh4Hkef//9N9atW4fZs2fb7wdCiAP74osv4O7ujn/++QcrV67EokWLzMX+iRMnAACbNm1Camqq+fKjnmNF5s+fj4EDB+K///7D+PHjiz32f//9h549e2LIkCGIi4vDtm3bcPToUbz88svmNnq9Hu+88w7Onj2Ln376CYmJiRg7diwAQCKR4JlnnsGWLVss7nfr1q1o164dateuDcYY+vbti7S0NOzatQunTp1Cy5Yt0bVrV2RlZQEAtm/fjvnz52PJkiU4efIkQkNDsWbNmor5AZPHwwgRyZgxYxjP88zd3Z25u7szACw0NJSdOnWKMcZYYmIiA8BiY2MZY4zNnTuXNWjQgAmCYL6PTz75hHl4eDCTycRyc3OZQqFgn332WYmPV3R/R44cYd26dWPR0dEsOzvbfH2HDh3Y0qVLLW7z1VdfsdDQUMYYY3v27GE8z7MbN26Yr//9998ZAPbjjz9WxI+EEIc1ZswYNnDgQMYYYzExMeypp56yuL5169Zs9uzZ5sslPS8e9Rwrut20adMs2hw4cIABYHfv3mWMMTZq1Cj2wgsvWLQ5cuQIk0gkTKPRlJj/33//ZQBYXl4eY4yx06dPM47j2PXr1xljjJlMJhYeHs4++eQTxhhjf/75J/Py8mJardbifurUqcPWr1/PGGOsXbt2bNKkSRbXt23bljVr1qzEDKTyUM8NEVXnzp1x5swZnDlzBv/88w969OiB3r17m4eJHnTx4kW0a9cOHMeZj0VHRyM/Px83b97ExYsXodPp0LVr1zIfc8SIEcjPz8fevXstdpg9deoUFi1aBA8PD/PX888/j9TUVKjValy8eBE1atRAtWrVzLdp165dBfwUCKl6mjZtanE5NDTUPOxTmkc9x4q0atXqkfezefNmi/vp2bMnBEFAYmIiACA2NhYDBw5EREQEPD090alTJwAw71fYokULNGzYEN988w2AwiHv9PR0DBs2zPwY+fn58Pf3t3icxMREXLt2DcD916QH0WuCYxB140xC3N3dUbduXfPlqKgoeHt747PPPsPEiRMt2jLGLAqbomNA4Ti9SqWy6jH79OmDr7/+Gn///Te6dOliPi4IAhYuXIghQ4YUu41SqTQ/1oMezkOIq5DJ/r+9e42J4mrjAP5fbiK7y8VFWbW7UDESCFCCaLml0gTchthCP9gGEbDQFkEXiCWsjbY2EWpps5YWEpS0EaJgarRpbVXCpcW0KBqsBiF0a1IqH4BeiCkBlAqc94MvEwcU1iu6/H/JJJyZc86c5ySTPJk5y3GUlRUKBcbHx6dtM9MzNkGpVM7YT2ZmJnJycqZc0+v1GBoawtq1a7F27VocOnQICxcuRHd3NwwGA/777z+pbnJyMmpqarB9+3bU1NTAYDDA09NTusfixYvR1NQ05R7u7u7Tjo9mH5MbeqIoFArY2dnh+vXrU64FBATg2LFjsiTnzJkzUKvVWLp0KRYuXIj58+ejsbFxSmJ0u6ysLAQGBuKVV17BiRMnsGbNGgBAaGgoLBaLLNmafP/u7m709PRgyZIlAICzZ88+aMhENsnR0RFjY2OyczM9Y9YKDQ1FR0fHXfu5fPky/vnnH3z00UfQ6XQAgNbW1in1NmzYgJ07d+LChQs4evQoysvLZffo6+uDg4ODbEH07fz9/dHS0oLU1FTpXEtLywNERg8LkxuaVSMjI+jr6wMAXLt2DWVlZRgcHMTLL788pW52djZKSkpgNBqxdetWWCwW7Nq1C9u2bYOdnR2cnZ1hMplQUFAAJycnREVF4e+//0ZHRwcyMjJkfRmNRoyNjWHdunU4deoUoqOj8f7772PdunXQ6XRYv3497Ozs0NbWhsuXL6OwsBCxsbHw8/NDamoqzGYzBgYGsGPHjscyT0RPGx8fHzQ2NiIqKgrz5s2Dh4fHjM+YtUwmE8LDw7Flyxa89dZbUCqV6OzsRH19PUpLS6HX6+Hk5ITS0lJs3rwZ7e3t2L1795R+nn32WURGRiIjIwOjo6NISEiQrsXGxiIiIgKJiYkoLi6Gn58fenp6cPLkSSQmJiIsLAy5ublIS0tDWFgYoqOjUV1djY6ODixbtuyhzCE9gNld8kNzWVpamgAgHWq1WqxatUocPXpUCDF1QbEQQjQ1NYlVq1YJJycnodVqhclkEjdv3pSuj42NicLCQuHt7S0cHR2FXq+XFjDeqT+z2SzUarVobm4WQghRW1srIiMjxfz584Wrq6tYvXq1qKiokOpbLBYRHR0tnJycxIoVK0RtbS0XFNOcMHlBcW5urux6QkKCSEtLk8rHjx8Xy5cvFw4ODsLb21s6P9MzdqfnafKCYiFuLRCOi4sTKpVKKJVKERwcLIqKiqTrNTU1wsfHR8ybN09ERESI48ePT3n+hbj1owQAIjU1dUrMAwMDwmg0iiVLlghHR0eh0+lEcnKy6O7uluoUFRUJT09PoVKpRFpamigoKOCC4ieAQog7LCQgIiIiekrx11JERERkU5jcEBERkU1hckNEREQ2hckNERER2RQmN0RERGRTmNwQERGRTWFyQ0RERDaFyQ0R3ZOYmBjk5eVZXf+PP/6AQqHApUuXHtmYnkSVlZWyPYg++OADhISEzNp4iOYSJjdENkqhUEx7bNq06b76/frrr+/4r+zvRqfTobe3F4GBgfd1P2tNJFETh4eHB1544QWcPn36kd7XWvn5+WhsbJztYRDNCUxuiGxUb2+vdJSUlMDV1VV27rPPPpPVv3nzplX9LliwAGq12upx2NvbQ6vVwsHh8Wxl19DQgN7eXpw+fRqurq6Ij49HV1fXffV1+w7SD0qlUkGj0Ty0/ojo7pjcENkorVYrHW5ublAoFFL5xo0bcHd3x5EjRxATEwNnZ2ccOnQI/f39SEpKwjPPPAMXFxcEBQXh8OHDsn4nf5by8fHBhx9+iPT0dKjVauj1elRUVEjXJ3+WampqgkKhQGNjI8LCwuDi4oLIyEhYLBbZfQoLC7Fo0SKo1Wq8+eab2L59u1WfdTQaDbRaLYKDg7F//34MDw+jrq7O6ti2bt2Kbdu2wdPTE3FxcQCAvXv3IigoCEqlEjqdDtnZ2RgcHJS1rayshF6vh4uLC1599VX09/fLrk/+LDU6OoqcnBy4u7tDo9HAZDIhLS0NiYmJsvEYjUbk5eXBw8MDXl5eqKiowNDQEN544w2o1Wr4+vri1KlTM84L0VzC5IZoDjOZTMjJyUFnZycMBgNu3LiBlStX4vvvv0d7ezvefvttpKSk4Ny5c9P2YzabERYWhosXLyI7OxtZWVn49ddfp22zY8cOmM1mtLa2wsHBAenp6dK16upqFBUVobi4GBcuXIBer0d5efk9x+fi4gLg1lspa2OrqqqCg4MDmpubsX//fgCAnZ0dPv/8c7S3t6Oqqgo//PADCgoKpDbnzp1Deno6srOzcenSJbz44osz7nJdXFyM6upqHDhwAM3NzRgYGMA333wzpV5VVRU8PT1x/vx5GI1GZGVlYf369YiMjMQvv/wCg8GAlJQUDA8P3/P8ENms2d65k4gevQMHDgg3NzepPLFDeklJyYxt4+PjxTvvvCOVJ+8I7e3tLTZu3CiVx8fHxaJFi0R5ebnsXhO7MU/s8NzQ0CC1OXHihAAgrl+/LoQQ4vnnnxdbtmyRjSMqKmra3ZYn32dwcFBkZmYKe3t70dbWZnVsISEhd5+M/zty5IjQaDRSOSkpSbz00kuyOq+//rpsznft2iUbv5eXl/jkk0+k8ujoqNDr9dLO2xPjiY6OltVRKpUiJSVFOtfb2ysAiLNnz844bqK5gm9uiOawsLAwWXlsbAxFRUUIDg6GRqOBSqVCXV0duru7p+0nODhY+nvi89dff/1ldZvFixcDgNTGYrFg9erVsvqTy3cTGRkJlUoFtVqN7777DpWVlQgKCrI6tslzAgA//vgj4uLisHTpUqjVaqSmpqK/vx9DQ0MAgM7OTkRERMjaTC7f7t9//8Wff/4pi8ne3h4rV66cUvf2ebK3t4dGo0FQUJB0zsvLCwBmnG+iueTxrPAjoieSUqmUlc1mMz799FOUlJRIa0zy8vJmXFjr6OgoKysUCoyPj1vdRqFQAICszcS5CUKIafub8NVXXyEgIEBayzLB2tgmz8nVq1cRHx+PzZs3Y/fu3ViwYAF+/vlnZGRkSIuwrR3bZNbEeKe5nWnuiOY6vrkhIslPP/2EhIQEbNy4Ec899xyWLVuGK1euPPZx+Pn54fz587Jzra2tVrXV6XTw9fWd8suk+42ttbUVo6OjMJvNCA8Px4oVK9DT0yOrExAQgJaWFtm5yeXbubm5wcvLSxbj2NgYLl68aE2IRDQDJjdEJFm+fDnq6+tx5swZdHZ2IjMzE319fY99HEajEV9++SWqqqpw5coVFBYWoq2tbcqbjntxv7H5+vpidHQUpaWl+P3333Hw4EHs27dPVicnJwe1tbX4+OOP8dtvv6GsrAy1tbUzxrhnzx58++23sFgsyM3NxbVr1x4oRiK6hckNEUnee+89hIaGwmAwICYmBlqtVvbT5MclOTkZ7777LvLz8xEaGoquri5s2rQJzs7O993n/cYWEhKCvXv3ori4GIGBgaiursaePXtkdcLDw/HFF1+gtLQUISEhqKurw86dO6ft12QyISkpCampqYiIiIBKpYLBYHigGInoFoW434/FRESPUVxcHLRaLQ4ePDjbQ3kkxsfH4e/vj9dee+2e/gM0EU3FBcVE9MQZHh7Gvn37YDAYYG9vj8OHD6OhoQH19fWzPbSH5urVq6irq8OaNWswMjKCsrIydHV1YcOGDbM9NKKnHpMbInriKBQKnDx5EoWFhRgZGYGfnx+OHTuG2NjY2R7aQ2NnZ4fKykrk5+dDCIHAwEA0NDTA399/todG9NTjZykiIiKyKVxQTERERDaFyQ0RERHZFCY3REREZFOY3BAREZFNYXJDRERENoXJDREREdkUJjdERERkU5jcEBERkU1hckNEREQ25X8PjoThLT41dQAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Set up the parameters for the experiment.\n", + "params = utils.Map(\n", + " n_participants = 1,\n", + " state_d = 11, # dimensionality of the state input\n", + " context_d = 11, # dimensionality of the learned context representations\n", + " output_d = 11, # dimensionality of the output layer\n", + " episodic_lr = 1, # learning rate for the episodic pathway\n", + " persistance = -0.8, # bias towards memory retention in the recurrent context module\n", + " temperature = 0.1, # temperature for EM retrieval (lower is more argmax-like)\n", + " n_optimization_steps = 10, # number of optimization steps to take for each state\n", + " probs = [1, 1, 1], # probability of the context appropriate transition for the 2nd-4th state\n", + " seed = 0 # random seed for reproducibility\n", + ")\n", + "\n", + "# Run a single participant through the CSW task using the Blocked paradigm.\n", + "blocked_results = run_participant(params, 'Blocked')\n", + "interleaved_results = run_participant(params, 'Interleaved')\n", + "\n", + "# Plot the performance of the model on the CSW task during the test phase\n", + "results_df = pd.concat([blocked_results, interleaved_results])\n", + "test_phase = results_df[results_df.trial >=160]\n", + "sns.violinplot(data=test_phase, x='paradigm', y='probability', inner='point', scale='count')\n", + "plt.ylim(0,1)\n", + "plt.ylabel('Probability of Correct Prediction')\n", + "plt.xlabel('Training Paradigm')\n", + "plt.title('CSW Test Phase Performance')\n", + "plt.axhline(0.5, color='black', linestyle='--')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 4. Put it all together!" + ] + }, + { + "cell_type": "code", + "execution_count": 115, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAsAAAAGHCAYAAAC+muSmAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8fJSN1AAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOydd5hjV33+39t1VWek6bO9eYt3vet1wY11i7vj0GxKwA6YQHiIAw4E/AsBHDDGDh2MKSE2BELcsAEHg4278bpu7222TJ9Rr7f//rg6R9JImrKe3Z2ZPZ/n2WdnNNLVkXR173u+9z3vl3McxwGDwWAwGAwGg3GSwJ/oATAYDAaDwWAwGMcTJoAZDAaDwWAwGCcVTAAzGAwGg8FgME4qmABmMBgMBoPBYJxUMAHMYDAYDAaDwTipYAKYwWAwGAwGg3FSwQQwg8FgMBgMBuOkgglgBoPBYDAYDMZJBRPADAaDwWAwGIyTCiaAGQzGlOP+++8Hx3H0n8fjQVtbGy666CLceeedGBwcPOpt79ixA1/+8pdx8ODByRvwJPLlL38ZHMcd1WOfe+45cByH5557bkKP++EPf4j777+/6vaDBw+C47iafztaUqkU7rjjDpxxxhkIBoNQFAXz5s3Dhz/8YWzYsKHivq+++ire8Y53YM6cOVAUBa2trTjnnHPwz//8z/Q+11xzDQKBAEzTrHjsxo0bwXEc2tvbq8bw4osvguM4fO9735u018VgMKYXTAAzGIwpy3333Yf169fjqaeewj333IPVq1fjrrvuwrJly/DnP//5qLa5Y8cO3H777VNWAN98881Yv379UT329NNPx/r163H66adP6HH1BHB7ezvWr1+Pq6+++qjGM5L9+/djzZo1+PrXv46LLroIv/71r/Hkk0/i9ttvx8DAANauXYtkMgkA+L//+z+ce+65SKVSuPvuu/Hkk0/iu9/9Ls477zw88MADdJsXXXQRMpkM3njjjYrneu655+Dz+dDf349du3ZV/Y08lsFgnJyIJ3oADAaDUY9TTz0VZ5xxBv39Xe96Fz796U/j/PPPxzvf+U7s3bsXra2tJ3CEJXK5HLxe71vezqxZszBr1qyjemwwGMTb3va2tzwGgqIok7Y9y7Lwjne8A8PDw1i/fj1OPfVU+rd169bhxhtvxBNPPAFJkgAAd999N+bPn48//elPEMXSqeq9730v7r77bvo7EbHPPfdcxVife+45XHfddXj22Wfx7LPPYunSpRV/a2pqqhgDg8E4uWAVYAaDMa2YM2cOvvnNbyKdTuPHP/5xxd/eeOMN/PVf/zXC4TA8Hg/WrFmDBx98kP79/vvvx3ve8x4ArnAiFovy6uef//xnXHLJJQgGg/B6vTjvvPPw9NNPVzwPsSls2LAB7373u9HY2IiFCxcCAObNm4drrrkGjz/+ONasWQNVVbFs2TI8/vjjdAzLli2Dz+fDWWedVVW5rGWBINv84x//iNNPPx2qqmLp0qX4r//6r4r71bJAHDhwAO9973vR0dFBbQSXXHIJNm3aRLe9fft2PP/88/T9mDdvHoD6Fohdu3bhfe97H1pbW6EoCubMmYMPfehD0DStzqcGPPbYY9i6dStuu+22usLzyiuvpJOIaDSKpqamCvFL4PnSqWv16tVobGyseM22bePFF1/EhRdeiHXr1uHZZ5+lf9N1HevXr8eFF1541FYTBoMx/WECmMFgTDuuuuoqCIKAF154gd727LPP4rzzzkMikcCPfvQj/Pa3v8Xq1atxww03UAF39dVX42tf+xoA4J577sH69esrLvH/8pe/xGWXXYZgMIif//znePDBBxEOh3H55ZdXiWAAeOc734lFixbhoYcewo9+9CN6++bNm3Hbbbfhc5/7HH7zm98gFArhne98J770pS/hP//zP/G1r30Nv/rVr5BMJnHNNdcgn8+P+Zo3b96Mf/7nf8anP/1p/Pa3v8WqVavwkY98pOI9qPdevfnmm7j77rvx1FNP4d5778WaNWuQSCQAAI8++igWLFiANWvW0Pfj0UcfHXUcZ555Jl555RX8+7//O5544gnceeed0DQNuq7XfdyTTz4JAPibv/mbMV8rAJxzzjl49dVXccstt+DVV1+FYRg178fzPN7+9rfjpZdeoj7gTZs2IR6PY926dVi3bh2ef/55ev9XXnkF+Xye2R8YjJMdh8FgMKYY9913nwPAef311+vep7W11Vm2bBn9fenSpc6aNWscwzAq7nfNNdc47e3tjmVZjuM4zkMPPeQAcJ599tmK+2WzWSccDjvXXnttxe2WZTmnnXaac9ZZZ9HbvvSlLzkAnC9+8YtV45o7d66jqqrT3d1Nb9u0aZMDwGlvb3ey2Sy9/bHHHnMAOL/73e+qtj1ymx6Pxzl06BC9LZ/PO+Fw2PnYxz5Gb3v22WcrXtvw8LADwPnOd75TNc5yVqxY4axbt67q9q6uLgeAc99999HbLr74YqehocEZHBwcdZsjueKKKxwATqFQGNf9h4eHnfPPP98B4ABwJElyzj33XOfOO+900ul0xX2/853vOACcl19+2XEcx/nmN7/ptLe3O47jODt27HAAONu2bXMcx3Fuv/12B4CzY8eOCY2fwWDMLFgFmMFgTEscx6E/79u3D7t27cIHPvABAIBpmvTfVVddhb6+PuzevXvU7b388suIxWK48cYbKx5v2zauuOIKvP7668hmsxWPede73lVzW6tXr0ZnZyf9fdmyZQCACy+8sMInTG4/dOjQmK939erVmDNnDv3d4/FgyZIloz42HA5j4cKF+I//+A9861vfwsaNG2Hb9pjPVY9cLofnn38e119/PZqbm496O+MhEongxRdfxOuvv46vf/3ruO6667Bnzx7cdtttWLlyJYaHh+l9y33A5P9169YBcN/jlpYWaoN47rnn0NraSt97BoNxcsIEMIPBmHZks1lEo1F0dHQAAAYGBgAAn/nMZyBJUsW/T3ziEwBQIZhqQbbx7ne/u2obd911FxzHQSwWq3hMrYgtwBWe5ciyPOrthUJhzNcciUSqblMUZVT7BMdxePrpp3H55Zfj7rvvxumnn47m5mbccsstSKfTYz7nSOLxOCzLOqpFekS8d3V1TehxZ5xxBj73uc/hoYceQm9vLz796U/j4MGDFQvhVq5ciaamJjz77LPU/0sEMAC8/e1vx3PPPQdN07B+/Xpmf2AwGCwFgsFgTD/+7//+D5Zl4cILLwQANDU1AQBuu+02vPOd76z5mFNOOWXUbZJtfP/736+bfDAycWI6LKKaO3cufvaznwEA9uzZgwcffBBf/vKXoet6hW95PITDYQiCgO7u7gmP4/LLL8dPfvITPPbYY/j85z8/4ccDgCRJ+NKXvoRvf/vb2LZtG72d4zisW7cOf/zjH/Haa68hkUhUCOB169bhy1/+MtavX49CocAEMIPBYAKYwWBMLw4fPozPfOYzCIVC+NjHPgbAFbeLFy/G5s2b6SK3eiiKAgBVldPzzjsPDQ0N2LFjBz75yU8em8GfYJYsWYIvfOELeOSRRyqaToxVSSaoqop169bhoYcewh133EEnDePhuuuuw8qVK3HnnXfimmuuqZkE8ac//QkXXHABvF4v+vr6albYd+7cCQC0+k+46KKL8Mgjj+A//uM/0NLSUmFxWLduHaLRKL7//e/T+zIYjJMbJoAZDMaUZdu2bdSLOzg4iBdffBH33XcfBEHAo48+WuFD/fGPf4wrr7wSl19+OW666SZ0dnYiFoth586d2LBhAx566CEAoMLrJz/5CQKBADweD+bPn49IJILvf//7uPHGGxGLxfDud78bLS0tGBoawubNmzE0NIR77733hLwPR8uWLVvwyU9+Eu95z3uwePFiyLKMZ555Blu2bKmowq5cuRL/+7//iwceeAALFiyAx+PBypUra27zW9/6Fs4//3ycffbZ+PznP49FixZhYGAAv/vd7/DjH/8YgUCg5uPIZ3bZZZfhnHPOwT/8wz/goosugs/nw6FDh/Dwww/j97//PeLxOAC3Yjxr1ixce+21WLp0KWzbxqZNm/DNb34Tfr8f//RP/1SxfSJqH330Ubz73e+u+Nupp56KSCSCRx99FJ2dnVi8ePFRv6cMBmNmwAQwg8GYsvzd3/0dANcr29DQgGXLluFzn/scbr755qpFWBdddBFee+013HHHHfjUpz6FeDyOSCSC5cuX4/rrr6f3mz9/Pr7zne/gu9/9Li688EJYloX77rsPN910E/72b/8Wc+bMwd13342PfexjSKfTaGlpwerVq3HTTTcdz5c+KbS1tWHhwoX44Q9/iCNHjoDjOCxYsADf/OY38Y//+I/0frfffjv6+vrw0Y9+FOl0GnPnzq3bKe+0007Da6+9hi996Uu47bbbkE6n0dbWhosvvph6muuxcOFCbNiwAd///vfx6KOP4t5774WmaWhvb6dRZqFQCADwhS98Ab/97W/x7W9/G319ffR+l156KW677baqRWzLly9HW1sb+vv7K+wPgGuRuOCCC/DYY49R2wyDwTi54ZzypdQMBoPBYDAYDMYMh6VAMBgMBoPBYDBOKpgAZjAYDAaDwWCcVDABzGAwGAwGg8E4qWACmMFgMBgMBoNxUsEEMIPBYDAYDAbjpIIJYAaDwWAwGAzGScVJlwNs2zZ6e3sRCASmRRtTBoPBYDAYjJMNx3GQTqfR0dEBnp/8eu1JJ4B7e3sxe/bsEz0MBoPBYDAYDMYYHDlyBLNmzZr07Z50Api06XzxxSNoaAie4NEwjjWmYYDLH8KsTh6SdNLt7icdhm7i0CEOfHAWREk60cNhHGPY9/vkwjALOJQ6DN4/FyKvnOjhMI4xiWQCF6y6oG579bfKSXfEILaHhoYgwmEmgGc6hmbAEvwIhhRIMhNEMx1DM+D3W1AagpAU9nnPdNj3++TCMHLwO1EogRAkyXuih8M4ThwruypbBMdgMBgMBoPBOKlgApjBYDAYDAaDcVLBBDCDwWAwGAwG46SCCWAGg8FgMBgMxkkFE8AMBoPBYDAYjJMKJoAZDAaDwWAwGCcVTAAzGAwGg8FgME4qmABmMBgMBoPBYJxUMAHMYDAYDAaDwTipYAKYwWAwGAwGg3FSwQQwg8FgMBgMBuOkgglgBoPBYDAYDMZJBRPADAaDwWAwGIyTCiaAGQwGg8FgMBgnFUwAMxgMBoPBYDBOKpgAZjAYDAaDwWCcVDABzGAwGAwGg8E4qWACmMFgMBgMBoNxUsEEMIPBYDAYDAbjpIIJYAaDwWAwGAzGSQUTwAwGg8FgMBiMk4oTKoBfeOEFXHvttejo6ADHcXjsscfGfMzzzz+PtWvXwuPxYMGCBfjRj3507AfKYDAYDAaDwZgxnFABnM1mcdppp+EHP/jBuO7f1dWFq666ChdccAE2btyI//f//h9uueUWPPLII8d4pAwGg8FgMBiMmYJ4Ip/8yiuvxJVXXjnu+//oRz/CnDlz8J3vfAcAsGzZMrzxxhv4xje+gXe9613HaJTA/v170N19kP6+cuVahMORivvEYlFs3frmMRvDyQzHcRN+TGtrB045ZcUxGM30xnEc2LYN0zRhWTYsy6I/m6ZJ78dxHP1X+r30Wdi2A8ep/mc7dtnvqPybbdccU63Pd+RtY/3e2BBCMBiY+BvCYDAYjJOSEyqAJ8r69etx2WWXVdx2+eWX42c/+xkMw4AkSVWP0TQNmqbR31Op1ISe89ChA1i3bikcx6G3zZ27EL/73SsV93vHO87DgQN7JrRtxrHlwQefxVlnnDcp27JtG339A9h/4BD27T+I/V0Hkc5k0RAKIhQMQBRFDA4NY3AoinQmA7/Ph4DfB0VREE8kMByNIxaLo6BpME0LhmHAMEyYlgnTtMDzPERRgCiKsCwLum5A13XohgFDN6DrBizbqhpX+X451m1E8M5EFFnGKy88jhVLlhzT58nlsti3bxei0SH4fH74/UGIoohYbBjR6BBSqURxssCD4zhoWoH+4zgOgiBCEAQYho58Pod8Pgee5yHLCmRZgWVZyOezyOWyyOdzyGYz9Odczr1d1zXYtg3HsWHb7mfqODY4jkdzcxva22ehubkVhUIe6XQSyWQC6XQSqVQCmUwasqwgGGxAMBhCMNiAUKgBoVAjFMVT8zWT53InShZs24ZtW9A0DbHYMOLxYaRSyeIYOAAjJ07uP57n4fP5EQo1IhhsgK7rSCRiSCSiME0ToiiC5wV4vT5EIs0Ih5vg9wchSRJEUSq+bwYsy0RTUws+/OFbwGHik+OpTiaTRT5fQGNjCKLonqJ1Xcfg0DBSqQwCAR+CgQD8fh8EQah4rGEYOHjoCEzTgiC4+2A2m0MylUYqnYYoiAgE/Aj4fQgE/O5xKuBDPl/Ake5eHOnpRTKZBuB+dqIowOfzwquqEAQByVQK8XgSqXSG7g+OAwQCPjSEggj4/ShoGlKpNL0PmQy7+1GtibJDJ9SCwMPn9cLn84LjOESjcQxHY/D5vPjXf7kFPFu1xJhEppUA7u/vR2tra8Vtra2tME0Tw8PDaG9vr3rMnXfeidtvv/2on/PQof1wHAeyrKCzcy66uvbg0KH9eOmlF+HxqPQkR8Tv/PlLjqpiyZg8ensPo1DI4803X3lLAthxHHzqX76Ep555AQcPHkG+UJjEUU5NOI6rKaDH8zie51xBUlYt5opiiOPJz5WPG/lM4xL0I37VdB2aruOJP/15UgVwMpnAhg2vYNOm17Bx42vYtWsrenoOT9r2jwV9fd3YsuWNMe83MNB7HEZzbIlEWvE3f/3eEzoGx3FgmAaOdPfi4OFuGLru3l78m2Va0A0DXV2HsXPPXuzbdxD5gjsZ4jkOHM9D4HnwAo9kKo0jR3oRiyfo9v1+H0RBQCJZXbjheR5trc2YM6sDTU0RdB08jL37uqAbxnF69ceXzo5WfPhD7z7Rw2DMIKaVAAaqL32Sk2M90Xnbbbfh1ltvpb+nUinMnj173M9XKOQBAPPnL8bTT2/B4sX+YtXGQUdHBwBXcAGALHvwxz++Cb/fP/4XxJh03v/+y/H880++5Wrn/gMH8b0f/oz+7p5wmtDZ3obZs9oRCgWQTmeRTKVhGAYaG0JoijQiGAwgny8gk81B0zSEgkE0NobQ2BCCx+OBLEtQZBmiKEKS3Iqgbdv0ZMkLPDyKAkVRoCiy+09WaEUHAEDsBUU1yHM8ON6tspVbF4CSDUEURciyDEkUIQoCBEmEwPN0HKTaVF6ZAcptDzb9XxAE8DxPn49Q7/tJ/saVVQdrit0R6nY8Fb53f+Dv8cSTz0Ivio+JsnPnVvT29mDWrDlobe3Axo2v4n/+56f4859/X3ObgUADGhsjKBTyKBTyME0Dfn8QwWADAoEgAK5YnXUgyzJk2f0s3SqYBcuyIEkyFEWBLHvgOA5M04Cua+B5Hh6PCo9HhaKoUFUvvF4ffD4/vF4fVNULWZbB8wKtNAuCQPehgYFe9PYeQSw2DEXxwO8PIBBoQCAQQGNjBIFACLZtIh6PIR6PIpGII5VKIptNwTBMVE9JyOSm9Hm7/9yrFQ0NYYTDTWhsDNMxlfYbgOP44tUHC5ZlIp1OIZGIIZVKQBRlhEKNaGhohKp6YZomTNNALpcpji2GbDZDb7dtC4Ig4o03/oKBgR4cOtR1VJ93PSzbQjKZhGEYtLqpaTre2LAZBw4epqI1ny/g0JEeHDrcjSPdfeju7YemHd2+NxaZTJb+LAg8VI8HhYIGs1iJ7+0bQG/fQMVjyPHFdmw4tgNFkeHzqlBVFbZtI18oIJ8vIJcvVIw7FPSjuSmCYMA9fzlwBbym68gXNFimBZ/Pi0DAB7/PC1EQwQtuSTaXyyOTzSGXy0OWJfi8KrxeFaIoVkyO6WR4xLGA49331rJtFPIF5AsaHMdBKBjAjl37cPBwN/bs3XdM3mPGycu0EsBtbW3o7++vuG1wcBCiKCISidR8jFIUEkdLPp8DAMiyAo7j0NbWia6uvRgeHsC8eYsAAMPD7gEoEmmuuiTFOP6Qz6Ce53S8xBNJAEC4sQEP/vKHWLxwAQIBPzweDzyKhx68DdO1KwiCAKW4nzCOH6qqAgAMfeKVry1b3sTVV58Nu2gvcQVbab9pamrDwoVLsXjxCixbthKnnroGs2bNgcfj2gVccWa6n72iQJZluv85jlO0PbjikTE5fPSj78Yf/vAING1yrsjoho7tO3bhP771Qxw4eAQ+rwc+rxdD0Rg2bd2JQkEbcxuCIKC5KQxRFOiVDq44ORR4Hk2RRsye1Y55czoRCoVgWzbsooWFTDa9Xi/mzu7EwoXz0BQJIxqLY3BwGNlMFq1tzehsb4fX61oD8loB0WgMB7oOYf+BQxgcGkZHeytOXX4Klp6yGKqqVthPBEGAwAsABzp5tmwLuq4jnc5AEHj4/X4osgJJluj+ynEctdnYlg2O5yCKIkRBrJhwlyY5FhzHqZgske2MXFdA1gVU3J/j6ftCJtqfve0r+M49/4n+gSGYllnvI2AwJsy0EsDnnHMOfv/731fc9uSTT+KMM86o6f+dDEgFWJbdE15Hxyx0de3F4GAfvc/QkCvKI5EWJoCnADxPBPBbqwCnUq4XriEUwIVvP989gdRAEiVI4rHZ/xhjo8gyAMAwJ35yvOeeu2HbFmRZgWHocBwbHo+Ks85ah0suuRbnn38xmpqa4PP5qNBmnFg8HvdzKF/bMREGhwbR19cHwzCQTmfxi/95BL9++Pd1q7iRcCNOWTIfjgNYluvX72hrQXtbM1qaIuhsb8XcuZ1obGyEz+ejjyMCz7ZsCKKAUCgEv98PVVVL4s92qJDkeb7iOLJo4eivY9GChTj7zDMBuCKe47gTexw6mqeucUgVOKHiWNvW2gIAGBwcRio9sTU8DMZonFABnMlksG9f6bJGV1cXNm3ahHA4jDlz5uC2225DT08PfvGLXwAAPv7xj+MHP/gBbr31Vnz0ox/F+vXr8bOf/Qy//vWvj9kYyyvAANDe7tonhocH6X3Iz+EwqwBPBUoV4PF7WU3LhKEbFSInXbz86PWq4DlWwZuqyLJ75tUnWAHu7e3Gn/70GADgc5+7G/Pnn4J4fBiBQAhz587D7NmzEQqFJnu4jLcI+Y5OtAJsmAa6urrwn/f9DzZv3Ymuw904dLiH7jcrV5yCyy65AIZuIl8owOfz4rRTl6IpEkIwGATP87TiL4kS/AE/wuEwAoGAO0HynLgJkizJJ+y5jzWtLU0AgGgs7h6TmcOQMUmcUAH8xhtv4KKLLqK/E6/ujTfeiPvvvx99fX04fLi06GT+/Pn4wx/+gE9/+tO455570NHRge9973vHNAKNVICJjaKjYxaAku2h/OemplZ2qXMKQD6DiVSAh4aGMDg4iNNWnUZvIxVgr1dltoYpjKIUK8DG+CvAjuPgpz/9LgxDx9y5i3DxxVego6MDlmWB4zgEAgH2mU9RShXg/Jj3dRwHhUIB6Uwa3d3duOtb9+LB3/yh4j5zZnfiHz7yAaw6dTF8Xp9rDdB02I4Nr9eL2bNno6OjA7IsQ9d1VwBLEjx1UjMYk0trSzMAIJFMY3hwEI0+NillTA4nVABfeOGFo644v//++6tuW7duHTZs2HAMR1VJqQJMLBBzAADRaKkCTCwQLS1t7KQ5BShZIMbvAc7n80ilUtANnVZTkkUB7PN6J3+QjElDLlogtAksguvp6cFvfvPfAICLL/5rRCIRtnh1mlASwKNXgOOJOA4cOIB0Oo1CvoCnnn2Jit8P3HAdTl+9EvPnzoLP6/r2Z8+ejdmzZ8NxHOTzeeiajlBDCH5fab84kVXekxVigUgkU0hn0lDyMjzBEzwoxoxgWnmATwSlCrArgNvb3QpwuQAmFeCWluoYNsbxp2SBGH8FOJPJQNM0FAoFKoBj8TgAwO9jAngqQz3A47RA9PX14YEHfo7h4QH4fAFceOEVCARYE43pgqq638exBHD3kW4MDAwgEolg996D+N69rpXu5htvwAdu+Gvk8/likkUD5s+fj0g4QgsY5aKXcWJpbXUtEMlUBrlcAdlsDuETPCbGzIAJ4DHI5VwfKFn13dHheoBjsSF6HyKA29o6j/PoGLWYaAqE7dgVAjgYcMsLJHvT52cCeCpDPcDG2BXgnp4e7Ny5E08//TsAwPnnX4bOzlm0isyY+oxnEVw2l0U0FkU4HMbWHXvwmf93B0zLwlWXXYh3/83l8Hg8mDdvHhoaGlx/L/P4T1mamyI0jSKfzyOZ5DH+IFMGoz5MAI9B9SI4twKcTieh6+4BOJl0K4VMAE8NiAXCssYngLWCBl3XYRgGCmXNLpJFARxiLXanNMSfP9YiuKGhIezcuROx2CA2b34NALBu3VVobGw85mNkTB5EAJOrc7WIx+LIZDJ45Ld/wg9/+kvYto21a07Fv3z6YzAMHUuWLEG4kdURpwOSJCHc2IBoLI6CZsJJJlDQNEgSK0ww3hpMAI9BLucKYHLQbWhwW4ZqWqEiCUKWFYTDTSdkjIxKJmqBKGgFaJoGVVWRTqfp7aQC7C+LNmJMPWSJVIBHF8DJZBKWZeE3v/k5AOD008/F3LkLWNLDNIMci0lL6JHYjo0DXV34yl334LU3twAArrvmr/C5Wz+OeCyGJYuZ+J1uNDdFEI3Fkc0X4BSAdDqNgJ9NXBlvDXbdZwzyeRKF5YogjuPQ2up2gBse7qf2h8bGpmOWRcyYGCQFYrwtfTXNPZGqqopUKkUfl0y5AjgQYAJ4KjPeFIh4PI4DB3bhxRefBM/zeMc7bkRjYyPL951mlAvgWt0ek8kk/vfh3+O1N7dAUWTc/oVP49+/cCvyuRwaGxsxa/as4z1kxluERKENRxMA3DUbI8lms+jv70c2mz2qdu6Mkw9WAR6DfN69zOYrWxTR0tKGw4cPFMWvu2iCZQBPHSZcAS4UwHM8FEVBPp9HQStA9ag0Bi3IFkhNaeRxLIIrFArIZjP41a9+CAC44op3oq1tFpqa2FWb6UZ5CoRlW1VVnGg0ij17DwAAPvyh6/E311wGraDBMAysWLGCxZdNQ1poFnACi5QWJItdOssZGBjAzp074fV6EQgEMGvWLLS1tR3voTKmEUwAjwHxAKtqqQrY0kIqwKUs4MbGJpYBPEWYaAxaOp2BIAqQZRmJeAL5fB6yLCOddqv/wQBbET6VoRVgs74AzufzeOqp36Graw98vgBuuOGj4DgBwSDLU5pukBQIw9CrvuO6oaO/vx+Hu91OnacsXgDLsjA0PIQ5c+agpbnluI+X8dYhFeBYNA5JkpDJZqDresXi1Xg8Dq/XC5/Ph1gsBk3T0NzMClOM+jDFNgaFAhHAJcN9e7u72G1oaKDMAhFhX7QpAvkcal0eHYnjOMhk0pBlGYIgwHZsFAoF6JqObNb97ANMAE9pSh5gE7ZTe9IzNDSIRx65HwDw/vf/PURRRjAYZNm/05DRLBCJRALJRBIHD3cDABYvnIfBgUE0Nzdj0aJFLKd9mkKygKPRBBRZRkEr0PU5gHuFJ5PJwOPxwOPxIBwOI5/PI5vNnqghM6YBTACPAbFAkBg0oJT2MDw8wNogT0EmEoNGFsCRJAGe46kNIptjAng6QD47wzDqev9+97v/RTqdQGfnHFxzzXuRy+XQ3t7OBNE0ZDQBnE6n0d07AMMw4fWq8CgSVK+KU045hVkfpjFUAMcT4EURtmVXCOBcLodCoUDP07IswzCMaS+A9+3fh1gsdqKHMWNhAngMSLvNygqwu4hiaKifdoFrbGxiAniKULJAjF0B1jQ3Ao0sYJRkCclkErquI5d3I9GYB3hqQ3KADcOsO+nZu3cnAOCccy6GYRjwer0Ih1kSwHSk3AM88vPWNA2HjvQAAObPnQ3LsnDKKafQbG/G9KStzRXA8XjR+8uhSgDbtl1xDuZ5HqniQubpiG3biA5HK14nY3JhAngMShXg0kpxIoDdCrBrgQiHm1k1aYpADoLjWQlcKBRgWRZE0bXDK4qCbDaLVCoFTXMbK4RYBXhKU0qBMGoKYE3T0Nt7GIB79SaVSqGlpQVe1uJ6WlIRgzaiAqzrOg4d6QUAzJndjlmzZqG1pfW4j5ExuXS0uZ9hNJ4AAKgeD2KxGD3GJxKJqhQmVVURi8XqTooLhQK2bdtGz/GTheM4MMaIZBwPmqbRq5GMYwMTwGWkUikMDw9X3EbabVYKYLcPTTIZRyLhXp5obm5jFeApAlmMON4KMIfSxEWWZei6jt7efnqbz89i0KYypHW1XqcCnMvl0N/vVgWbm9125a2tTBRNV0oCWK+yQOiajq6DRwAA82Z3wscyvGcEbS3NAIBkMg3LsqHICrU9mKaJRCJRYVMEXAGcy+Xq2iASiQR6e3urzvlvlYGBAezatestb4dcncxlWQX4WMEEcBnJZBJ9fX0VtxEBXJ4VGok00c5wgNsEIxAIMQE8RZhIJ7hMJgNRKoWhyLIMQzeQKMbsSJIIj0ep93DGFIBUgE3DgGNXV/1zuRy9UuP3N6Chwf3HmJ4QAew4NnS9VB1zHAeGaaDrkCuA58+dxbLZZwjNzaQdsoNUyl3sRiqkI/2/juMgEU/QYkY9ATw8PIxsNovu7u5JqdgShoaGEI1G33JlmXQnzeVyLNf4GMFi0MowTRPpdBqO44DjOFiWBcNwL4OXV4AFQUA43Iz+fnelcSTiLoBjMWhTg/HmADuOg1QqVRGlQ2wsiaJ3zKuq7HOd4hAPcL0UiO7uw9B1DTzPw+v1o6Ojg01WpzHl6zHy+RzQ4B6bTctEKp1B/4Bb0Zs/bza9OsCY3oiiiIaGIOLxJBLxNMKtPBzHQS6XgyiKyGQyeOrxp7B9y3bs3bUXmUwG1737Olx3w3VIpVJVecD5fB7RaBQtLS1IJpMYHh5Ge3v7Wx5nPp9HLBZDLpdDOp0etcmObdujnls0zc2uJkK4/DxFxHWt7Y+1XUaJo3qXEokE/vM//xO33XYbXaG4YcMG9PT0TOrgjjeGYcAwDGiaBqBU/QVKneCAkgAmRCIt4HmenVSnCONNgdB0DbpWmSUJALzA0wqw16syb/cURylejTHN2haIPXt2AHB9+sFgCJFI5LiOjzG5yLJMv5Pl1T3TNLFv/0EAbm5sQ0MQkswqwDOFprDb+jgRdxsUCYKAVCqFTCaDl59/Gb/8r19i4xsbaZe4rZu21vUBJxJu3rvf74ckSejp6Rl3bvxoJJNJ5HK5MRfgpVIpbN68GYVCfX9vNpuFKIowTbOqQt3T04P9+/dXPaa/v/8t2S8KhcKkV5tt256yixEnLIC3bNmCJUuW4K677sI3vvENJBIJAMCjjz6K2267bbLHd1whMy1dd6u+mUya/s3jKVUdBEFAU1MpUJ0sgGMCeGow3hQITdOg6VqVAFZkBdHixM6rethseopTqgBXL4LTNA2HD3cBAJqa2tDUFGGL36Y5HMfRK3LZbKklrmVZ2N/lLnZcMH8ORFFkFogZRHOzO3FNxFwx5fF4kEwmEYvFEB2KAgDOueAc/MsX/wUAEB2OUh/wSDvC8PAwBEEAx3FoaGhANBadlLgxsl1VVTE8PFxTVJumiQMHDmB4eHhMAez1eiuKcoRUKoXBwcEKYWnbNrq7uxGPx49KzJumiZ07dyIej0/4saMxMDCA3bt3j/paTxQTPrPfeuutuOmmm7B3794K0/mVV16JF154YVIHd7ypJ4BFUaqKV4lESotoGhuZBWIqMd4KcKFQgG3ZNAGCEAwFIcvuvq2qHvAc+1ynMiUPcHUFOJfLoa/P9YQ2NbVWtDRnTF+UYqZvOl0SAKZpYv+BQwCABfNmQxTFqsktY/rS3FQUwAn3vOzxeKgHOBZ1xevpZ56O0886HQCQTCTBcRw0TaNVYaBkfyBNcMgkqb+//y1VP/P5PAYHB/Gn3/8J/b39yGazNf3H3d3d6Ovrg2maVcKWYJomBgcGsW/3Pti2XVEBNk3TTSpKpzA4NEhvj8ViiEajNQXzeEin07QyfjToul5V6dV1HQcPHkQsHpv0tI3JYMJn9tdffx0f+9jHqm7v7OxEf39/jUdMH0zThK7rdOch1QVZVioug3Mch5aWkqcoEmmGKIrsUvkUoZQCMboANvTaCx8EQYBZXEDn86rgePa5TmWIANYNs2oRnHtSche2Nje3sYrgDIFWgHMlgWGZFroOuesy5s3phKIoEHh2VW6mQAVw0QJBFrlpmobooFsBbmlrQTAUpAub47E4eJ5HuuxqbjweRz6fr7gS1NjQiIGBgbd0qT6VSuG5p57D//78f3HfvfdB13Wk0+mK+8RiMXR1ddEW7KTYNhJN0/CT7/0Et3/uduzcurNCABNvcMAfQF9vH7UtEAFfrmEmQjqdRjKZPGqhGo1GsX379opKel9fn1uRtuyZIYA9Hk/NnWT37t1obm6u8YjpAcnusyxrTAEMAC0tJcN8Y2NTVRWRceIYbwXYQf3ZPmmDzDzAUx+y0Mk0TVg1LBCkWU1zczv7ns4QyNXH8pOqYRo4WBTAc2d3MKvLDKO1pQkAEC9aIMhx2bZtDA0OAQCaW107YqQolmPDMXg8HvT19uHIkSPIZDIYHh6GJEl45NeP4MFfPgjA3Z90XZ+QDSKXy1VooeHhYRw8cBAAcKjrEHiBRzKZpH/XdR0HDhyAaZoIBAIQBKFuQoWmaTjU5V7N6O3prbAPEAEcDoeRyWQQjUWRSqUwMDCAcDgM27arhHWhUMCePXvqvj7HcTA4OEgXhh8Nmqahr68Pu3fvRjqdRi6Xw6FDhxAIBMDz/JTsyjdhAXzdddfh3//93+mMhOM4HD58GJ///Ofxrne9a9IHeLwgC2gEQUCuWFUoCWC5SgS1t3fSn0OhMKssTSHG6wEe7XIXEcA+L0uBmOqQCrBb/ais6huGgaEhtwLc1NTKvqczBLImo1AmgA8f6UE2l4coCGhvbRl1BT5j+tHSXFkBBoBIJAK/z49U0hVtLcWOcUQADw8NIxQKwbIsbN26Fa+//ror9GwHP/vhz3Dfj+5DT7FzoM/nQ19f37gi0RzHwb59+/DGG29g3759SCQSiEaj6C5OwFLJFEzdRDQapdpi//79GBgYQFOTK+RlWa4rCvP5PKLDblU7nUxXdIPLF/JwHAeCIEBRFPT29KK/vx+GYdCJ4cgKcCaTwYEDB7Bhwwbs3r27qhpLmj8Fg0Fks1mYpjnmezCSbDaLQCCAVCqFXbt24dChQ8hmswiFQlAUha4Xm0pM+Mz+jW98A0NDQ2hpaUE+n8e6deuwaNEiBAIB3HHHHcdijMcFy7Jg2zY8Hg8yGXenJEK4VgW4tXUW/ZlVgKcW460Aj/b3DKkAsxi0KU+5z7NQqDzwZ7NZJBLuiaStrZMJ4BlCqR1ynk5kt+/YDcCNPxNFgaaDMGYGba2uuE0kShVKRVGQLCb2+Pw+2vikvALM8zzC4TBmzZoFr9cLWZaRKtvGxtc3AgACgQD1wY5FIpHA4OAgRFHEnj17sHXrVmQyGVq1BYB4NI5cLodMJoPe3l4cPnwYLS0t9PwkSRJyuVxNsdnf1w/TcG9PxpPIZrN0Px/oG8DTf3waqWQKoVAIsXgM/f391FbB8VyVwCUVZL/fj/3792Pjxo0Vvmi386mGYDB41B7ibDYLWZbR0tKC4eFhHD58GI2NbnKHopQal4yH45V7PGHVFgwG8dJLL+GZZ57Bhg0bYNs2Tj/9dFx66aXHYnzHDdu2YVkWFEWBrutuB5ZRBHBTUzOuuOJd8Hq98PkC7MQ6hRi3BaKY91wLMuP2+ZgFYqpDKsCAW/nwljWE6u09Asdx4PGoaGxsYt/TGQIRwAWtAKf4Pd+xay8AYPHCeQDAItBmGC3UAlHpqx3sdxeCtbSWJTM1hQGAVlEJqqpCVVVs27SN3rbh9Q245p3X0FSIgYGBMe2c/f39sCwLDQ0NCIVCSCaTSCVSKORLAq+vpw+Rlgj6+/vR398Pr9cLRSlvoCUjk8lA1/WqAlrX/i76czKRpBGtsizjV/f9Cv/7i/+FXtDxkU98BIBb4Z01yy3KyZJcIW7J390cdC9UVUVvby8OHTqE5cuXg+M4xGIxmppCBPBEuiiSx0iSGxjQ2trqHouLNiRFUZBKp5DP56s69o3ENE3s2rUL7e3tx3z9zVGXLS+++GJcfPHFADAlS9sThVym8Hq9yGaz0HXdDVkHIMvVUViiKOI977kZHR0d6OvrG3cFOJ1Og+M4ugKVMfmM1wIxso1qOaQC7PN5WQrEFIcksNi2jbxWqBDAPT1uRaalpQOiKLIrNTMEWgHWC7CL3+PefrfbX3tbCziOY5OdGUZrsR1yOpWBZVkQi+sbqQAu2h9s24ZS7N45UgATBvoG6M+b3twE0zQhiiICgQDtEFdPAGYyGQwMDJQqrsUotU2vb6q4X/fhbqw5aw0GBgaoZ7ccSZKg6zoKhUKFX91xnMpKcixO06kEQaB/21uc8LW2tMK2bVqoIZVly7IgCAL19RLxzXEcmpqa0NPTg5aWFgQCAcRiMfh8PnocnWhkGUnQIuK2/Fjbta8LXr8Xju0gn8/TqnA9enp60N/fj+bm5mM+iZ3wmf2uu+7CAw88QH+//vrrEYlE0NnZic2bN0/q4I4nlmXBsizIskzTIEoV4GoP8MjM3/FkABuGgVjMvVxRK5ybMTlMpAKMOhPMcg8wS4GY+ihFG8RIC0Rvr5sL29zcxgTwDIIK4EKBfs8zafd47VE9ECUWgTbTaG1pAs8X2yEnS1XgwYFSBVjTNPT29qKtw01pqieAyWMAIJ/LY9d2t3mEz+dDLpcbNQt3YGAAuVyuSiDv27MPAKj4PnLoCEKhEAzDqFlRJmJz5II1wzDQ19tHf49FYxUJVcODbqfD/Xv3w3Ec8DwPURThOA62bNgCraDR+wOgLaPLq8+KooDneTeirNi5johwnucnnNhAFuaNnHRGh6O45aO34LZ/ug0cx1VVpkeSSCTQ1dV13DKDJyyAf/zjH2P27NkAgKeeegpPPfUUnnjiCVx55ZX47Gc/O+kDPF7Ytg3HceiOpGkaNajXskCUV4TJTjgW5HKHLMuIRqMYHh4+KrM5Y3TGG4NmWRZ4nkcuX8DX/uMevLFhC/0brQB7vcwCMQ0gzTDKD5y2baOvz12U0tzcDo/Hwz7LGQJZ4KZpBZr8kUq7J1fVo0ASJVYBnmHIsoxgwL1ymoiVBCoRs6FG1w87f/58LFu2DIDrAa4FqQCTCfGG1zbQv3m9XvT29iKfzyORSGBgYACxmCtCC4UCent7EQgEqrZJBDDJIe4+3A1FUdDS0lJXH5Cc4nI0TauoUGczWSowC4UChoddAZxJZzA0METv9+arb+Jzt3wOP/neTyqi0AqFAjStuuFTJBLB0NAQBgYGwHEcHNtBOpWGJElV8W1joet6zRbMWzdthWmY6OvpAweuIhVjJIZhYP/+/dB07bgVKiYsgPv6+qgAfvzxx3H99dfjsssuw7/8y7/g9ddfn/QBHi9GClG3AuweUBWl+sQ50QqwaZpIpVKQZRmyLMPr9SKRSEzZFoHTmfFWgC3LAsdxeOrpF/HAI4/jnp/8N/1bLufOgL0+tghuOkAO7oWyk4lt2xgYcFd4NzW1juk9Y0wfKiwQtgXHcZApFiw8HgWiJEISmQCeSfAcj8YG13YQj5eEFBGB/qAfc2bPwdKlSzF3/lwAbgpELQaKdplz150LoFIAE0vAa6+9htdeew0bN27E62+8jldeeQV79uxBOp2m9geC4zjYv8dtTXzmuWcCAPp6S4kStm3jR9/9ER7+n4crHlcrCk3TNBrrRkjGk9B13V3UG0vQ2w/sO0B/fvO1NwEAPUfcts7lAti2qxs+CYKAQCCA/v5+qKqKO/7tDrz/uvcjEU8gm81O6Ap1vTzjXdtKbZlz2dyoC+GOHDni+q+bjl+c7oTP7I2NjThyxO2s9Mc//pEufnMcZ1RP5VSHiCGgtFOOlgM8UQGcTqdp9Zfcn3h1jteKx5OFicag7d1/EADQWzbrJhVgv8/LBPA0gFSAc2WX7izLok0wIpEWJoBnEKrqXn7WNA2OY8MyTWpbUmSJVftnILzAI9zYAABIRMsqwEUPcDgShqq6i5Zb291OrflcviJCDHCP+6TCesW1VwAA9uzaQ6PUJElCQ0MDFEVBU1MTOjo60NzUDMdx0Nffh1AoVLVvDfYPIpPOQBRFLF+5HB6PB7Zlo7/XzSDfvmU7fvvQb/GzH/4MLz77In0cWQhXjqZptLUzIZlMQtM0dB/urtAL5QJ459ad7ntTXJNFRGk+n6/7XQiFQggEAggEAti8YTNMw8SRg0cm3Ewjm83WrNru2LaD/pxOpVHQCjXtFT29Pdi3bx8aGxuPq01twmf2d77znXj/+9+Pv/qrv0I0GsWVV14JANi0aRMWLVo06QM8XhALBOB+AbLZLF0EN54K8GgiyTRNpNNpmKaG/fs3wzDcHZN0sqk3e2IcHROJQeM4DvuK4eWDQ1GYpiuas1QA+9iJdBpQywNcLoCbmtqYJ3QGQSwQuq7Bth2Ylols8aqNUrzCxphZCIKApiZ3AVV/UcBapkUtAeGmUh5/KBSCR3UnvCNtEJl0BvnivrLs1GWYt2AeHMfBpjc20ft4vV54vV4qxkRRRCgUQntb+6j2h7kL5kKSJLTPchtlHTnkFgvffPVNet/v/8f3aWVakiRqbyDk8jkqgAMh97nSyTQymQwOdh2seN4De10BXCgU6BiSiSQcOFRoptNpPP3E03jh6Rdqvq/BYBDpVBq54jkvHosXQwBq+4BjsVhVQ41MJlNlOdI0jY4PABLxBF0IV05fXx927tgJVVXh8/lQyBeOW1FwwgL429/+Nj75yU9i+fLleOqpp2iaQV9fHz7xiU9M+gCPF+UWCEmSUCiUZiq1KsAcx4HjOJimCY7jRhXA2azr4Tl4cCt27nwVW7e6M0BBEGBZFhPAk0xJAI9eASaepX0HDtHfh4ajcBynJID9PpYCMQ0gq4W1sstr0egQ8nn38mJTUytbADeDUFVX4OpaAZblwDQtaltSPQprgjEDEQQBy09ZAADYudW9tB4djsK2bIiSiFBDiB77BUFAOFI7Co3YHxoaGxCPx7H6jNUA3Di08WKZFrZv2U6vehP7w4JFC8DzPDpmdQBwfcBAyWLhUT1Ip9L41te+Bdu2aRJEebW1t7uX/j5/4XwAQCrhRoiR7ZHj3f697vPu2bmHjsW2bJi6W3SzLAt7d+/Fr/7rV7jr3++q8AyX09vdS3+ODkXpOqiRmKaJffv2oaenp+K2QqGAPTv3IJ0qeYf37tpb4QqIDkWrFsINDg5i165dkCQJoVAIAPDDb/8Qt/79rXjid0/UHOtkMuEzuyRJ+MxnPoPvfve7WLNmDb39U5/6FG6++eZJHdzxhAhZAGVJEO7JtF4FmAhY8nMtbNtGKpWCKIrIZhMAgJ6efRgednegWiZ4xluDWCAsa+wKcCaTw1DZ5aa+/kHougGz+MUN+Mefhcg4cZAKsFHWCe7QIbf6EA43QZYVtihqBkE8wLqhwbYt2Fa5APawz3oGwnM8TlvlLm7btWM3LNOiC+CaW5ohCAKd5I4qgIvV4+aWZmiahuWrlgNwRep4K4//8/P/wWc+8Rl8/Utfh23btPo6b+E8yLKM2XPddVLdh7uRiCfo32+/63YoioKNr2/E7x7+XUXqFOBWckk75cZwI412SyVTMAwD/X2upWLZqe770N/bj2w2ix1bS1YDwF04l8/nkc1maaKEbdn43SO/q/l6ygUwqU7X8uoODw8jGo0iHo9T3aLrOjZv2IzbP3c77vzinfS+O7ftrHhsdDjqNi5JJhGNRrFt2zZs3brVfa1l0WhbN21FKplCsKHSZ30sOKqSyJ49e/Dcc89hcHCw6jLzF7/4xUkZ2PGGZOwB7uUOd1bm7gCyrFRVeEn2qGEY4DiurgDO5/MoFArFeJXSzGfLlhexbt17qA+41gpKxtFB3kfHGXsR3MHijJrQPzCEuXNKXf58PnYpdTpA7A3lrZAPH3YFcGtrZ8XJkTH9oQJY0+DYFnTDQL5of/F6PczuMkNZvHAufH4V2Uwe+/buqxDA4FBZASbNMEb4acljmlqb4PV6sWDxAkiyhKHBIRzcfxDzF80fdQy6puPxRx8HALz03Ev4xU9/QQXunHlzIIoi5i9wt9F9uBub3tgEx3Ewb+E8rDp9FW7+5M2455v34P6f3I+rrrsKjuNQsTkwMIAjh13bRHNrM5qa3eYfiXjCbeteXBw3a84sHDl0BPFoHF37uqoEcD6Xh2EYSKVSFROAJ373BN530/uqLEIVAnhwGKIoVnmTLctCd3c3BEFAPp9HJpOBoijQNI1WwDe+sRHdh7sxa84sKoAjTRFEh6OIDkVpS+Q33nwDcFyrSvlYhoeG0d/bD47jcNqa00b9HCaDCSuun/70p1i+fDm++MUv4uGHH8ajjz5K/z322GPHYIjHB13XadmfhEeTnbJWBZjnefA8D9M0qRiuBWl84Tg2NM29rC5JCrLZJPbv3wxRFGnINWNyGHcOsO3g4KFqAZylbZBZJWm6QLrB6Ubpe9Td7VpbWls7aJcjxsygvAJsWQ5SqTSt3vkDPvZZz1BEWcaSU+YAAHZs2UEXwDW1NEHgKye5RDzWqwA3NbsCmOd5nHH2GQCAF56p7ZMt58VnX0QqkaI2mwf++wHEo3HX+jC7A6qqYsnSJQBcDzBJZ1h71loAwNV/czX8fj+0goaebvdKsKZp0DQNR44cQbqYcdzU0kSr2PFYHKZpUjEfbgpjzlz3fdi/Zz9dAOctFmwyqQx0Q0cymUQinqBjz2ayePLxJ6teU7kAHhoYgqIoSKfTFRXxaDSKaDSKSCQCx3FoVJqu6zSbGAD+9Pif4DgOzVY+78Lz6OO9Xi88Hg+aIk1ob2+vEuLbNrsd+ubMnwN/4Ng3C5uwAP7qV7+KO+64A/39/di0aRM2btxI/23YMH4PzVTDMIyi9zNLfSvEAlEvBYIIYJ7nwXEc+vv7KwzemqYhl8vB4/Egn88UHydi5crzAQB7925AoZCpGYbNOHrG3QnOttBVFMBkAlMhgL3quBqcME48JAVCL1tMMjzsnuhCoTCrAM8wiAfY0HVYtoVk2l3BL/A8/D4fZIlVgGcikiRh8SmuvWDb5m3U09rU0lTV6Ia0Rh65CI6I5khzhBa73n7J2wEAzz/9/Jg2iN//5vcAgOs/eD1u+OAN9PbOOZ3geR4+nw8LFi9w/a7pDNa/uB5ASQBzHIfO2Z0A3Mgy1x6ZxeDgIFKpFFIJd1+ONEXQRNo/R+NwHAexqPtawpEwZs+bTcecyWSgeBSsXL0SgGuZcGwHuq7TdIuGYoLGYw89VpXY1dtT5gGORiEIAu1SB7jFpJ6eHvA8D0mSoCgKhoeH4Tjuc5THzf35iT+j50gP4rE4RFHEmW87k34OHMchFArVnaCSFtVLli0Z9TOYLCYsgOPxON7znvcci7GcUIiQJR86x3H0w/d41JoCmOM4aoEwTROZTIYGZgOgYloURSqAVTWAjo6FaGrqhG1bOHx4V8VzMd46E+kE11Vcpbvq1KUAXA8wiUBTVQ8V04ypDfUAFyeyACoWsbIK8MyiVAHWYdsmkkn3+Or1qe5nfYxbqDJODKIoYlGxArx9y3ZazY00R2hHNAIRwPUqwJGm0mNOP+t0KIqCvp4+7Nu9r+7z7921F7t37IYoirji2ivwoY9+CBdcdAEAYMXKFbBtG16vF36/H5GmCAC36qooClasWkG1wayiza77cDeNQjty5Aj8fj+1aISbwq61A654FAQB8WL8W6Q5gnkL5wEoeW2XLl+KSLP7nOVRaKSifM07rkEwFMRA3wAV5YB7HiyvANuWjWw6C13XkclkkMvlMDg4iMGhQerV9Xq9yGQyyOfzyOfzFe9xIp7Az3/ycwDAwsULEWx0vbzDQ8NjTi5IBXjKCuD3vOc9ePLJ6hL6dMayLLoIjlgfXB+w6ymrZ4EQBIFaIEzThOM4tI0iEcTkpJvLuTuhx+OFaZpoaZlDb5dlGfl8ns7KDMNAPp9n+cBHCRHAo+VS244N27bRddCtAF9QDC/vHxhCNldqgywIzJc9HSCeT003YFMB7H6OouhWLFic3cyBCGDD0KHrFpIptzGC16NCkRWIAqv2z0QkScLcuW6kYTKRpN7XcLj6Kk9rh5sFPFIAkwpwY6QRqqrStsBnn3c2ALeiWg/i/T3/ovPR0NgAnufx2S9+Fv/61X/F33387wCANrtq62yjj1u5eiVMy8ShQ4dgmiY655QqwCQJIpFIIBgMVuQaExtHJpOBV/XSlIXmlmYsXLywYmzLVy6nVd5EPEFbGhMB3NbRhqv/5moAwG/+9zf0calkCtmMm5YTanCTGEj75e3bt+PVV1/F9u3bIfACPc56PB4UCgWk02mk02lqzTjngnMAuN5oAFh66lLaNETXdPo8tUgmkjjU5drWFi9dXPd+k8mEz+6LFi3Cv/3bv+Gmm27CN7/5TXzve9+r+DcdIeKV2BosyyqG5rsVREWpDtDnOA6iKMI0TYiiiEKhAJ7n4fV6kUwmEY/HKxpf5PPuTijLXhQKBRrkns9nK3zA6XQafX196OnpweDgIKsMHwUlC0T9CrDjOIjFE0imXI/2OWe77Sv7yiwQquphFohpAvEAG4YB23YFcKFQEsCsCcbMolwA27ZJO4OpXg9UlgE8Y3Gv5IhYvMztOUCu8oSbw5BluWKS29buCtBoMdoScDOAyeKucCQMn8/nZs8WCtQG8cLTL9Q8d6RTaTz31HMAgGvfeS29XZIknH/h+fAH/OA4DrIsQxRFdHR20PucfvbpKBQK8Pv9yOVymDW7sgIcj8fh8/nA8zyNaQs3hdEYbqQaYs/OPQBc8RkIBtAxq4NmHQOuAFa97vciGU9ClmWk02nqAQ5HwrjmndeAF3js3LaTPg+p/ja3NNP4tuGhYbS2ttJqdjAYREtLC30uEgObSCQwODCIQt7VKR/8yAcr3rNlpy4DOMBXTFMauSCxnO1btgNwFxIGgtVZy8eCCU+Tf/KTn8Dv9+P555/H889XzpQ4jsMtt9wyaYM7Xti2TZsiAO6XjJjSgZLfbCSiKMKyLPA8j0KhAEmS6CyUtDgm3tJSBdhX/BK7O26hkAHP83AcB/F4HLlcDoIgQFVVpNNp5HI5hEIhhMPhY/oezCTIez6aB9hxHBzoOgwAmN3ZjrnFGXk6ncFg8UvqVVUIzAIxLShPgXBABLB7UBZFmQngGUa5ADYNC8nighw3A5h91jMVgXOPxytWLcf2zaXkg4ZGt3NbOa2tbgXYNEykkimEGkK0uhpsCELxuNaohoYGDA4O4sy3nQnVq2JocAi7tu/C8pXLMdg/iL88/xd0H+7Gnp17oOs6FixaQGPIyjEMA5JU6kJIqryA6//VdA3hxjCSyWSFBUIURbS1tcHj8SCbydIqabgpDK/Xi8ZwIwb6B+iiskhzBKIoQlEUzJ47G3t37QXHcVh26jL09bmRZ4l4Aj6fD6ZpUgHcGGlEOBLGkqVLsGv7LmzduBWtV7ZSAdwxq4MuPBseHIYkSaPaxlRVRSwWo93uGiONmL9oPlauXomtm9x4s2UrlsG0TTRGGpHNZBGNRjF3wdya2yP2h1NXn1r3OSebCQvgrq6uYzGOEwqxQJAZpCAIMAwDhYI7uyQH25FIkkRnlqZp0hWNHo8HmqZV7DwkAs3j8UFRFGSz7ltfKLitkEn3Oa/XS6uOfr8fmqYhmUzC7/ezaJ9xQt6/0SwkjuPgwEHX/7tw4Vz4fT4EAn6k0xnsL16GUVUPBJEJ4OmAp6IC7FZviAVCkiT23ZlhVAhg26ALfbyqyrzeMxiOd4tUK4rZvYCblysIQtV33KO6ldJ0Ko3YcAyhhhCtera2ueJYEAR6HpcVGedecC6e/tPTeO6p57B75278/Cc/h1aozOl/xw3vqGmnIgKYjIM0sWhtb8XsubPR19cHRVFgGAbtFJdJZ6g4B0r2jGAoCI/HA4/qQUOkAQP9A9i9czeAkgD2+/1UAM9dMBc+v48K2EQ8AUVR0NjYSL8bJFFi1ZpV2LV9F7Zs3IJLr7yULoDrmNUBrWj7JHFro6GqKpLJJM0ZJu/pFddega2btqKppQnNrc3o7e1FpCmC7kPdo1aAiQBeedrKMZ97snhLRikiMKa7t840zYoKMIAqD3Atyi+PO45TEYU2cjZKLBAejx+KokCW1eLjbOh6Hh6Pt2aVihjkNU1jJ/FxMp5OcOX+30UL5gEA2lubXQF8wK0Me1UPy2aeJtAKsGEADqkAF1vjKh6WADHDoIvgdA22ZSOVLi6CY9/ZGQ1Zk7F0xSngeR62baOltQWWZVWdcwVBQENjA9KpNIaHhjF/0XwqMIlYI5VUYkN8+6Vvx9N/epomPbjPtRSr165G5+xOzF80HwsWLag5NsMwKtonn3raqfjEP38Cy091xTrHcfB4PDRJoaW1BYMDg+g+3E0FMBHoLa0t4DgOqkdFJOIubNu9wxXA4Yjb8tnr9WLV2lV45k/P4O0XufYNYh0gVd9ELEFfZ76QRyAYwKo1q/DgLx/Elo1bAAC9R1wB3NbRhmzWrT6Xx5qV8/r61/GNr34Dn/nCZ3DmOWdWZBOTph3rLl2Hgf4BLF2+1O1vIPB0QeBIPzYhl8vRLOHlK5fDcqzjMpE9qiPFL37xC6xcuRKqqkJVVaxatQr//d//PdljO27Ytl0lYN0YkNEtEOSkS/zD9bdvoVDIFrflp33GiQgm7VprQdoskx2TMTblneDqVYHLEyAWFS/JtLW6K26JNcKrVi9+ZExNyMnPMAzYxQYoRABLksyqgjMMGoNm6DBNq+Tb93pY6/IZDDnPen1e2rCiuXjcHjnJFQQBjWE3tYDEh1GBWRRroihCVVV61XbNGWuoiPT6vPjHz/4jvnnvN3Hj39+IS6+8FAsXL6x7TtB1HT5fqXOoqqp42/lvw8LFC2EYBgRBQCQSgaqqKBQKFQvhCGR8za3NtDpNkh1IJZcs3pMkCStXr8Svf/9rXP/B62GaJl0El81kYRgGfd2hxhAKhQLy+TyWr1wOQRAw0DeAgb4BWgFubW+l72W9CvDvHvkdUskU/vzEnwG4V9eIWC6vqr/vxvdhzZlr3CvrklwSwHUqwDu37oRt22jraEOoMUSj1o41Ez5SfOtb38I//MM/4KqrrsKDDz6IBx54AFdccQU+/vGP49vf/vaxGOMxx7IsOI5TIZYcx4FRDNUfzQPsOA7t6V0PInB5XoDH41ocAoEAFMUVwEQc10OWZRQKBRhlGaeM+pTHoNUTwG4FuGiBKArg9uJBsVD0fvu8XnYynSYo1ANsUguEppUsTKwCPLMgFWDHcWCYJtJF36RXVellcsbMQygrUp12utspjOThjixCCYKAhnADgFLlsbwNMklykmUZXq+X2hY/+2+fxbvf/278+Jc/xlXXXVX3ikL5OiHA1RHlAtjr9cK03NgzXdehKAqCwSBddFe+EI4w2Fdq7EFyjUmcG6GhsYEmTZDfiW2TJFMAbqpCPBan9/H7/cjn81C9Ko0Z27JxC/UAt7S10IWD5bm+BF3XsXWj6+3du3uvO86mJmRS7tWX1vbWmo8RRZGK43oVYOIZPvW0U6mV5HgI4AmfFb7//e/j3nvvxYc+9CF623XXXYcVK1bgy1/+Mj796U9P6gCPBySbz3EcOrszyjpK1RPAJDXCcZxRT7DE/qCqftpBzu2I4kM6HRtTAJOUCbLQjjE65RaIegL4yJFe5PIFCIKAecWZeGtx9kvweBR2Mp0mVKRAFD/z8kWs7Hszsyhfl6EXCsjl3MkOs0DMbDieAzjAsW184MMfQFt7Gy667CKk0qmq77ggCGiMuBVgUnkkFdamlqaK2LSGhgYMD7ui78xzzsSZ55w55liGhtwqaUdHB93nym2KsizDKSbS6LoOv98PSZIQDocxNDRUuwI8UMo1JuKciFJCOBKGoih00T1JotJ1HR6PB8FQEIl4Aol4gjYBCTWEoKoqzQdetWYVdm7biZeeewmZon0o0hShaQ2x4Rgs06pYA7N9y3Z6TO3r6UMmnYE/4KfNSIjILccwDPh8PrR3up7nWgI4n8vTXOJTTzuVVtKPR9FiwkeKvr4+nHvuuVW3n3vuuXQF4nRjpP8XADStFD9WzwNMOsCR/+tRngBB7i9JEvz+BgCgTTLqQSJHmA1ifJTHoNUTwDt2uZEyc+d00gNn+wgB7PWq7GQ6TSCd4AzDpFdzyHdYVb2sAjzDKI+8ymsFmt2tMgE8oxEEAYIgwLYdeL1eXPuua+Hz++ji9ZH3Hek9rWidXNwWAPh8vgnl7hcKBXg8HjQ0NCCRSNDiWXnVslyQa7pG83D9fnehWucsVwCXV4BJokKkKQJZliEIQpWwDDeFIYoiZNm1dpECHhXADe7zJOOlCnCoIUSbd9m2jZVr3IVmb7zyBn0/RElEpClSfH9tap8gbHitstPv/r374TgOnVQ0hBuQTCYr7kMEcHkkXTmapuH2227H4YOH4ff7cda5Z0HXdQQCxycG7ahygB988MGq2x944AEsXnx8wosnG2KBKIf4f0VRrOvvJQfasU6uIyvA5MDt97s76sgKcK3xKIrCbBDjhHwujlNfAO87cBAAaPUXKHmACT5vdQdAxtREkYseYNOEYzvF9uLud9jrZRXgmYYrNtzChKZpyNIKMJu0zmQE3hVxVtkCZxJFWssDTJIPXv3Lq3jfte+jjSSI0COPUVWVVlPHQzKZRFNTE+bNm0e9tUSUEsqPOY7tUHuEz+cmQREfcm9PLyzLwkDfAPbv2Q+O4zBr7iyoqnuVY6S1oDHSSJ+LWB8A90p2MBSkC+oS8UTJA9zg+mpVVXV9wKcup0IXADo6O2gSFvERj7RBbHjdFcAke3jvrr3IpDPIFf33Hq8HiUSiIkOZ2EJIU5J4LE4bVOm6jq/+61ex+c3NUFUVX/nmV9AYbqTd9I4HEy6L3H777bjhhhvwwgsv4LzzzgPHcXjppZfw9NNP1xTG0wHSzrjcAkFOnpKk1G2oQO4vj9F2k0SgKYqvQgB7ve4sp1wAp1IxvPjiI5g1awlOO20dvZ3ZIMZPqROcm+9c63SYLwZ3+3ylLxrxABNYNWn6UKoAG3BsG7puUBuTzxdgn+MMRFE8KBTy0LQC/T57vew7O5MhFkJnhMgiftlyBEHAkmVLEGmKIDocpckInbM6IStyRQWYLIQjzSpGw7btYjfXFjQ3N6O5uRnd3d1obGysEsDloppUh1VVpQJPkiUYuoHB/kE8+9SzAIBVp69CqDFEU6HKBTCJRyOFOY/Hg1zx6odt21BVtaIbHKkAB0IB+P1+twFULIbm5macsvwU2kmvY1YHLMuitpHocBRDA0M07zgei+PA3gMAgCv/+ko8+sCj2Ld7H1afsRpAyYfs9XqRz+crvNCyLKO5udmtPls2kvEkwk1hfPeu7+KNV96Aoii4/T9ux9IVSwG4uup4+H+Bo6gAv+td78Krr76KpqYmPPbYY/jNb36DpqYmvPbaa3jHO94x4QH88Ic/xPz58+HxeLB27Vq8+OKLo97/V7/6FU477TR4vV60t7fj7/7u7xCN1s+WGw+GYVQdNEsCWAZQ/9KIa6QfXwXY4/FVVJN9vmDx7yUBPDh4GLZt4/DhXRgaKl0aITaIXC7HWiSPQYUHuM5np+vurFkum0w0F3vDE5ifcPpAPcDFCnA2V7IVNRQrIoyZBfEBa7qOHBXArAI8k+F5HjzHVbS5J8KtlgWiMdyI79/3ffz697/GPffdg6984yu46wd3wbLcmC1SjJJlGY2NjbRL3Gik02n4/X7afnnevHlQVZVaEgiSJEEUReRyOdogA3DP5ZFIBLquUxvEkcNHaLLCpVdcCsd26P3LhXVTcxPtQgu4V7cMw6ApVh7FQ5MvRnqAJUmizwu47ZkJHbM66FomYhspT4LY+PpGAMCiJYuw9uy1AIB9u/dVxMo5jlvlJt35iLVUUZQKa8bw8DD6e/vxzJ+eAcdx+Lc7/42OxbIs8AJ/3CJfj+pIsXbtWvzyl7/Em2++iQ0bNuCXv/wl1qxZM+HtPPDAA/jUpz6Ff/3Xf8XGjRtxwQUX4Morr8Thw4dr3v+ll17Chz70IXzkIx/B9u3b8dBDD+H111/HzTfffDQvg0IEcLmwJP5BSZJhmrXzZImfdywLBPEAK4q34ktaboEgz51KlcT81q0vVXzRFUVBOp1GT08PYrEY3dEYlYzHA0wOAuUHLFEU0NxU6rjnVT3gwCwQ0wFZKgpg3YTt2MjnSpPKYLDxRA2LcQwhFgjdMGgF2O/zMgE8gyE+1vLjummaNc/D5UKxobEBCxYvwBlvOwORpghs266qMkYikZr2w5Fks1m0d7RTkRYOh9HR0QG/319hmSsXwLIsVzyf3++Hbdt0IdxT//cU+nr6oKoqzr/w/Iqxi6JI0yxIE4xy64ZlWTRtQVEUKmDLUyBCja4H2O93bZiWZWHV6avoeEgLZI/qobFr5RaIN197EwBw+lmnY9EStw11T3cPuva5jdFIlVpRFHpuNQyDWjV4ni9F0g3H8MyTzwAAVq9djbVnraXPQ2LTjlfnzqM6UliWhYcffhhf+cpX8NWvfhWPPPLIuL0z5XzrW9/CRz7yEdx8881YtmwZvvOd72D27Nm49957a97/lVdewbx583DLLbdg/vz5OP/88/Gxj30Mb7zxxtG8DIqu6+B5HkeOHKjwpwDuzJDMsEZCjPejHXBt26YWh5EVYGKBsCyTXq5NpYjxnEM2m8T+/Zvp/YmHxzAMxGIxDAwMVMSwMFzG0whDL/qmRh4028siZ3xeL0uBmCaUV4BtB3TBqCiyLnAzFdoNTteRKxYDWAV4ZiPwAjieh2WVLBC2bdddqyPLck0Lo2VZVceFUChEM3rroWmaWyUNR+htHMdh0aJFmD9/fsV9iVDN5XNV6xDI7+0dbjrCS8+9BAA4/6Lz4VE9tDss2Q4RjyO9y4qiFCNbDSqyie85HosjFitVgHmeh8/nq/ABk+10zu6kjTeaWyqzgG3bphXgtWetRaghRKPZXn7hZQBubjFp9EEW2pmmSePMyl/D8NAwnvmjK4Avvvziqvf3eHbunPCRYtu2bViyZAluvPFGPProo/jNb36DG2+8EYsXL8bWrVvHvR1d1/Hmm2/isssuq7j9sssuw8svv1zzMeeeey66u7vxhz/8wV19ODCAhx9+GFdffXXd59E0DalUquLfSAzDwBtvvITPfe7DePjh/yqOr+QBJjtYLUiWYD1IdZfjeEhS5SV1SZIhijK9n2VZyGQSAIBly84CAOzduwHZbGnMgiBAVVX4/X4YhkGFOqPEeHKAdY1McCr91G1tpYVwXh87mU4XqAfYdBthkDbIsqywz3CGQirABU2jFgi/38eyu2cwbtFJhI1KD3A9wSRJUsVVVIJt21WP8Xq9CIVCo9ogUqkUGhoaEApV2qo8Hk+F75WMVVVVWKZVlWqgqioURala4HbpFZfCsixwPEcFsCAItCpL4tGIcCX3IQkQkiShqbkJgJsuYRpuYTIYCtIKcSgUQi6Xg0f14J8+90/44M0fxOy5s+m4mlrcxw8PuBXgg/sPIh6Lw6N6sGyl6wledIpbBT6wz/UFk8YdjY2N8Hg8yOfzMAyjIq6NCPOXX3gZPd09UDwKzlt3XsXrN00TXq93VE01mUz4SHHzzTdjxYoV6O7uxoYNG7BhwwYcOXIEq1atwt///d+PezvDw8OwLAutrZU7QGtrK/r7+2s+5txzz8WvfvUr3HDDDW4+XlsbGhoa8P3vf7/u89x5550IhUL03+zZsyv+TmYqvb2HAAAHD7oBz7ruHlBlWYFlWTUFMOkiM3oTjFICBLFMEHieh8fjfmkKhQwymQQcx4Yoyli48DQ0NXXAti3s2LG+5rY5jmM2iBpwxRPgaDnApAIsSZUV4LayrkKKIoOlQEwPSAXYNC3AsZErRgvKssI+wxkKqQDzkgCtOKEN+Hzsqs0MRxSqF8HVWzQlSVJFYgShvMJaTnNzc82rqrquo6+vDxzHobOzc9yTarJgbaQ4Jp7g5rLkodb2Vpy6+lQ311cQ6fgkScJ177kOl197OS64+AIqKMnfOJ6DpmlUZBMBTCLVAsEATYwAXKsH0TOXXnkp3n/T+6mPWpZlGrtGLBCvrX8NgJsdTMZEBDChqdlt3OH3+xEIBJDP5ys64wmCgHDRXkiqyee+/VyoXrViO5qujbkIcTKZsADevHkz7rzzTjQ2lnx1jY2NuOOOO7Bp06YJD2Dkyak8iWEkO3bswC233IIvfvGLePPNN/HHP/4RXV1d+PjHP153+7fddhuSyST9d+TIkYq/m6ZZtCm4QjIWc8v+pAJM8ibrVVo7OjpGna2QBAivN1BHALurQfP5LPX/BoNhcByHFSvc2dHAwCHa1aocSZKQz+drznBPZkoVYGcUD3BRAI+0QBSTIHzFS6msejg9oB5gw4Bt28gVLRCsAjxzIT7BXJnf2+dlHuCZjiBJVVFbiqe2AJZlue4a9lrn7WAwCFmWK0RwLBZDNBpFa2sr1qxZg/b29nGPlaRL1BLogUCAVnYB4JIrLgHP89Q6QKq8giBg4eKF+LuP/x18fl/FtmRZhiRKKBQKVGwSAUxoDDcCXCke1OfzgRf4CtsqSdIQBIE2rYhFY3jz1TfxP/f/DwDg7HPPBuDay0jFmBCOhEutmyMRaJoG0zTpmERRpBVg+novv6TqPbGt4xeBBhxFDNopp5yCgYEBrFixouL2wcFBLFq0qM6jqmlqcoOoR1Z7BwcHq6rChDvvvBPnnXcePvvZzwIAVq1aBZ/PhwsuuABf/epXa+6YiqKMGqlhWVZRALuXTKPRwWKIfskCIQhCTV+QZVljVpdIBZj4fcsPzq5nhlSAs7AsV5QFg5Hi/2GEQk1IJofR39+FuXOXV2xbkiTkcjnouk4zA8eDbdtIJpOQZblqZjoTKPcA14uwq7UIDihVgL3FDGB2Mp0elFeAbdtGrlCyQDBmJqQCHC92t5IkEYpHZt/ZGY40wgPsOA6dAI9EEIS6RZBaAtjv9yMYDCKbzUJRFOqhXblyJdra2ia8b5F1O7UWdfl8Pnh9XsyeNxuDfYO49IpLAbhFOSImARSjVl1RPrLaTYQysTcAtQWwwJdsE16vF6pHpX5moJSkIUkS2trbIIgCLNPClz/3ZZimiXMuOAeXXePaVROJBJpaK5+jIdJAvb5koZ1pmnRMgiBUjKsx0ojVa1dXvSfktR4vJnyk+NrXvoZbbrkFDz/8MLq7u9Hd3Y2HH34Yn/rUp3DXXXeN6rUtR5ZlrF27Fk899VTF7U899VTNTnMAkMvlqnZAshMfbTSYbduwLIt6BjWtgFwuA8MoVYBFUaSVpXLI6tPRKHWBc8v65YK50gKRpQvgiAAGgI6OhQCA3t4DVdsmyRUTWQhnGAaGhoYwODg4YzvLlR/Y6lXHR1og3MYJOlavWo72thasO+8sN7OZpUBMC0oeYBOW7aBAPcBMEM1URgpgr8omrScDXp+/atF9ecvecnih/r5QK72J53k0Nzcjn88jmUzCNE0sXbq0ot3xRJAkCT6fr6YAJgvYvv6dr+Oe+++hlVcicsufT1EUmKZZUwCTRWPkdn/AX3EfktFLzovkvuW2TtJOmdg6yRV+0zSx5sw1+Pztn4coivQKfSAYoF7hYIPrLyaVW7LQThAEOg6O4+jiOgC46NKLqj4zoqeOVwYwcBQV4GuuuQYAcP3111MxR8TntddeS3/nRmT11eLWW2/FBz/4QZxxxhk455xz8JOf/ASHDx+mlobbbrsNPT09+MUvfkG3/9GPfhT33nsvLr/8cvT19eFTn/oUzjrrLHR0dEz0pQAoWSCIAAZcGwSJQZNlhXZMIR1nyGss/70WjuMgmXR9NKpaLYDdCrB7u2uBIAK4dKmgvX0Bdu58FcPDvdC0HBSl8vKAIAjI5XJoaGgY87Xm83kMDw+jUCjQ3uEzERKDBrifL2ocG0daIOLxONLpNObNm4cnHr0fWkFDLp9jfsJpAjlomqYJ2zTLBLCHeYBnKEQAJ5IJAKUECLYIbmaj+rwQRbcrKk1KqJPFX+t2ct6uZ10MhULgBR65fA4rlq9AW1vbUY81GAxi9uzZNcU28eUGG4LUHwu4x7CRV3RlWaZe5vIKKVlop+t6RbU1EApAG3QLY6HGUIWdj+M4+P1+pNNpuh3Lsqj3VpIkNLU2YXhoGMtXLscXv/ZF+py6rkOWZZimiYWLF2J4cBitba0wTZOKfI/HA7/fT+9LaOssvY8XX1GZ/gCAJlkczwrwhAXws88+O2lPfsMNNyAajeLf//3f0dfXh1NPPRV/+MMfMHfuXABAX19fRSbwTTfdhHQ6jR/84Af453/+ZzQ0NODiiy/GXXfdddRjIBaIXFlwfjQ6VHaJXAbPu2KehGeXP240/2802otUKgqeF9HU1Amg2lPq87nWiFQqCk1zT9qBQLjs70GEQs1IJofQ19eFefMqrSeyLEPX9YqDQS0Mw8Dw8DA0zTWZm6ZJ/42VYzzdqKwA147n04uxcyJZRavpkCSJTt5sxwYHVk2aLpCGJqZpwrZs5IrNZdzvL/sMZyIjK8DEt88mPDMbn+qF6skjl8shFApVZOaOpNb5mVzur/eYYDCIcGMY4XAYnZ2db2msiqKgpaWl7t9IzGr5WCzLqqoYK4oCx3YLjSNfE+m+RgQwz/MIhoIYHnSLbw2NDVU5yUQDEMoFrCAIuOGDN2D/7v34m+v/hrY+BoBCoQBFcYuCCxYvwKt/eRXtHe1V3dsikQiy2WyFmA2Hw7j+g9fD4/FgwaIFVe+Hrus0Nu14MWHls27durHvNAE+8YlP4BOf+ETNv91///1Vt/3jP/4j/vEf/3HSnp9EZZUvpIhGByti0ARBhG3bFRYI8vtoonPfPjfDd86cJZBlT03LBPEGE/Hr8wVhWTby+TRd1dnZuRDJ5BB6ew9UCWBBEJDP52l+Xi3cSnSStnkklwl1XT95BbBeaYFwHAeCINDFAI7jgOOZAJ4ulHuALdtGriwGjQmimcnICrCqeo5bfBLjxCGKbqRWb+8gfD4fzeOvBWmcYds2PZaTwlW9x4iiiFWrVtEF8McKj8dDC1jlFV/HcaqqoKIo0gYgI8/zqqoiGAzSsQqCgGAoSP8ebAhWvd6RAtu2bXqbKIqYM38O3nbe26rGrGkaWlpakM1mse7SdUglUrjyuivBcVzFuILBIBoaGipeh6IouOKvr6i7iNAwDITD4eN6zp3wM/3xj3/ESy+9RH+/5557sHr1arz//e9HPB6f1MEdD8gsqFwAx2JDZTFopS9BuaWDCOB6X5BUKoqhoSMAOCxYsIred+SHGwiERvwegWma4DiOjq293Z0tRaO9tKkGgbRIHi28O5fLIZFIQC165IDS4oCZmCDBlV0CLV8sUQ7JR5REkeYuiqIIq9j1j1SCmXiaHpCqgWGasByLdoJTFCaAZypUAKeSANzOjUwAnxw0NDTAtuyKxVu1IMJvZGrEWB1cj8dxg1gRalkRR74eUpzh+Gqx39LSgnnz5lXcN1TW/j0UClEBTSAeY6IxykU3uW+tBeSmaSIUCiEQCED1qvjkZz6JBYsWVIn2xsZGLF++vKoz3mhrtQzDOK4RaMBRCODPfvazdIHb1q1bceutt+Kqq67CgQMHcOutt076AI81JMkhn69XAa4vgIFSqkNv737s2fMm7ehGOri1t8+HzxeivbpHfqlIhZkQCkXgOE5FRxqvN4CGBvcySl9fV9VrEEUR+XyemuTLG0CYpol4PF5xmchxHMRi/bBt66g6+E11yg8Q9brBlS+CI/4pgReoz6rehIUxNSlZICzYloV8MdZQklgM2kxFVd31EOQY5vWqkMT6V+QYMwd/MQ4sn8+PWs0lnVrLz90kZWEqXPkcaUUg552RAliSJLfSzVeLfUVRKtKceJ6vEMBkkdrIxxD7BaG89TIR3LVQVRU+n4+Ou3wBXTkjP5OxJqdE9xxPJrwHdHV1YflyN47rkUcewbXXXouvfe1r2LBhA6666qpJH+CxhlRyKz3Ag/B6iSG8tIpx5I5KRKbj2Ni06TlYlolDh3Zi8eLT0dOzHwCwaNFp9P4jZ2EAiislvcjl3EkF8f+SCi2pRHZ0LEQiMYienn2YP//Uim2QOLTu7m66fWJ653ke+Xy+Yma1Z8+b2LPnTSxadDrCZS0dZwrlXzTTMABUm+qNslbImqZBURTq8wZKlgiWAjE9IBYI27ahGwbyuVzxdhaDNlMhApj+7lFGXfXPmDn4vD74fD4MDw8jGKwWeAQigEfaF6fKlSGPx1NRFc3lclRgliMIAniBr9vyeeR9Q40lARwIBqqOg0QAl9svygWwWLwyWi62dV2nKRHl4pjYBkezg5Jx1UPXdXAcd9yP1xM+WsiyjFzx5PLnP/+ZtjIOh8NjRp9NRSzLgq5rFR+oa4EoxaARIVk+Wyq3P2SzKeo1LRSy2Lr1RTiOjUikg1ZuSQV4JG7sR+lAHgy6FeByfxAAdHQsAMfxiMcHMDBwuGIbgiDQ9oGkykxivXI5tw85GattWzh4cAcAIJ2OTShCrV6m7lSj/H0eTwyarusIBoNu16AyCwRbUDN9KL/8Zug6bWzDKsAzF2KBIKiqZ0pU9RjHHo7jEGmK0Czb0SrAtSwQxzNpYDQ8Hg/19gJuk4lwOFzl0RVFEaIgjmr3KL8vSYWSZAmKqlQ9hud5ar8YaQkRRbGqag6AFopIG2eiMwzDoF3kRqPeZ6TrOgYHB9HZ2TmuNKvJZMJnhvPPPx+33norvvKVr+C1117D1VdfDQDYs2cPZs2aNekDPNbYtl3VZa3SA6xQk71pmnRHLd85yuPLFi1aQz2oixatpvdxHKdu7iARwIIg0Y5xpEkFEd2q6seCBW7ld9u2v1Qt7iJfjPJMQDKTLN/x+vsPQdfd16tpWRoDNxaapmFgYGBaWCbKrQv1FsEZNAZNgmEYCATcWTKxQDi2A4FnfsLpAqkAA4Cm68gXSAWYxaDNVKoEsId5gE8mQsEQVFUddbFaLQvEVBLApHkEKXSZpolwOFx1PyLka1kNRsLzPBojbo5vOBIGHNR8veVpUOXCmjzHyHM9SZASRZEW6AzDoA0vxvrulS/kIxiGgcHBQcyZMwennHLKcZ/ATlgA/+AHP4Aoinj44Ydx77330piQJ554AldcccWkD/BY4zgOzQAWi/6xfD6HRMIVtaSTFPkSkS9S+RcqnXbvGwo1Y9mys3Dhhdfj3HOvRUvL7IrnqbWDuGV/90AeDIbpzsFxHL0MQgTqkiVr4fG4dgniMZ4ohw/vpD/n81lY1vh8wIZhIJ/PI5+vbsk8FSHvtVm3AlxcBFdMgSBfaLooAA67nDqNKD9wmoZBr2woiodVgGcoIy0QXlYBPqnw+/3wer2jXjYneb+1LBBTAZJ7axgGjRgLBoNV95MkqaKxxFisWr0KZ557Jt77ofcCqF19JfYLYmEg3x3iQR5ZAdZ1HaGQa61QFLeqrGkaDMMYV/tikshBtmvbNgYGBjBnzhwsWbJkzMr2sWDCR4s5c+bg8ccfr7r929/+9qQM6HhTLoCDwQZoWgHZbBoDA70AKgUwuVxAOsOREyupABP/rt8fgt8fGvlUNU/EHMchFGoBsB0tLbPppXee52kmnqZpUFUVoihj+fJzsGHD09i7dyM6OxfD56v+stQjl0tjaKib/q5pORiGUbGja5oGXddpBBvBNE1omoZ0Ok2j1KYypApv16sAUzuLU9Gu0i6mRtSbsDCmJhzHQZFlaLoO09BpI5up4vVjTD4jK8BeVWWTnZMIVVURCATGrObKslyRklQrSuxEIUkSPB4PstksNE1DMBis8v8CpQrweAWwz+/DLf9yC5qamtDb21uzmEPsF8TCUD55VFW1ytLqOA4VusR2mUwmYVnWuBavjbSj6LoOr9eLBQsWnLCK/FEdLfbv348vfOELeN/73ofBwUEAbjza9u3bJ3VwxwPbtlEoXi5VVR8iEdeza5quQJJlhQpSx3Hoh1feBY5UgMs7uNWinge4qakTf/VXH8TixafTvEJy6WbkKtGOjoVoauqAbVvYvv3lCb3Ww4d3AQCamjpp8kShkKnYfjabRTKZrIor0TSNZg6PFrk2VSDitZ69o9zPrSgKPB5PRUyLbdvMAjHNICc13TSgUwHMKsAzFa/XN+J3D+sCd5IxHt+oLMtV1cx6rZNPBIFAALquo1AooKWlpeaEnSwQUzzjE8CiKFac+2p1xCNV3Hw+T7vSEUa+ZyTpoVzoBoNB6LpeM7e4FiMFMKl4H+/kh3ImfLR4/vnnsXLlSrz66qv4zW9+g0zGTU/YsmULvvSlL036AI81juPQBTOq6kUk0lzxd+IBJt4VEjNGBLBpGshm3RzKsQRwrR27VO0tmcrL2xaSntpkZ+Q4Dqeeej4AYGDgUJV/uR62bePIEVcAz527rKIBBxGDlmUhk8nQznLl7xGJCnPbRk99GwRph2zW7QRXfH3FWa0kShUzYMdhFojphqy4AtgwTGh6yQLBKsAzk5EnTq+qstblJxmtra11O60RSKJBxW11WiefCHw+H3Rdd1sYj7jyWo7H44Esja9SWt46GahtgSgXwCMry7IsVwjo8gVwBFVVacFoPBX1kX7sQqGAhoaGE3p8nvAZ/vOf/zy++tWv4qmnnqpQ/RdddBHWr18/qYM7HrgWCDcD2OPxIhyuFMAjT6DlObs8zyOTSQAAZNlTkeZQ63nqWSDKQ6dHCmBFUagNghAINMLncy0WqVR0XK9zcPAwCoUcZNmD1tZ5UFX3i1YoZKkBv1AoQNd1urKTQIzuoihClmWk0+kpvxiOfOHrpUAYRQ+wbVkIBd33sjymjlkgph8kC1g3NGqBYJ3gZi6q6hvxO6v2M6qRZbnqiuZU8ooTm1YgEKjp/yW0t7fXXCBXC1EUAWf0PHtRFOH1eivaIBNGClpN0+D1eqs6uwGgi/bHM6ZyAWxZ1qiC/3gw4aPF1q1b8Y53vKPq9ubmZkSj4xNjUwnXAlFeAa6cTSpKaccgBm4igjmOowJ0tOovyfKtZ4EoXxk5Mi2CdIsZKeSCQTe/dzwCOJdLY9u2vwAAZs1aUlzx6b4uXXcrwI7jIJstNQMpF8DkNfM8T6PZpnoVmArgOkKdCGBBFKB61YrHkGxoJoCnF+QgbOgGDFYBnvGMtEAwAcyoRfn5tF43tRMJKXI1NzePOq5wODzuTmmkjwApqNUT/KFQqGb+7shxaJpWZTUhCRbjiWYDQMdBzq88z59Q+wNwFAK4oaEBfX19Vbdv3LiRJkJMJ8orwKrqRThcKYAlSaYHVY7jaIWUCGDi/yUL4Oo9B3n8SMr9xUCpYUY5nmK8T3nVlQjgZHJ0AZzPZ7B+/ePI5zPw+UI0mk1R3JNHoeAmQRQKBeRyOTobLRe4RCCTarVb+c6M2tZwNEhGsWVZR72NsSAWiFoVYMdxqMCXRQlqcTGNJEkQxKLdxBm7cw1jakEqwIZpVFggmCiamVQvgmOfNaMaIgYB93wg8PU7x50IFEVBKBSiCQuTAbHvkcJVvddLMn1rdZ4jEN0xslpLmmmMJ5qNQKwVuq5DluXpJ4Df//7343Of+xz6+/vppfu//OUv+MxnPoMPfehDx2KMx5TKFIggIpGmir+XZwwSEVreBnlkAkS95yi3NYxkZKeakTsT2VHKbRCk4jxaBZiI31wuBa83iHPOuRaK4nZxIZ3u8nl3EVw2m4VhGLSjCxGogDv7O3BgEw4edBc5ejwe5HK5o14Ml0gk0N3djSNHjqC7uxvxePyotjMaJQtEdQW4XBSrXpVe/hGFYgecYjMMdjKdXshyaRGcYRABrLIK8AylpgBmi+AYIygXf+QK61SyQHg8HsybN2/c9obxQDzOxMpX71xGfL212hgTu0IikUBjYyMikcqusbIsw+PxUGvkeCBRo4VCAR6Pp8p6cbyZ8F5wxx134KabbkJnZyccx8Hy5cthWRbe//734wtf+MKxGOMxxkGhkMOiRYuwdu0yCIKGFStWYMeOHXAcp+IESgRweSV2PAkQY3UVK5+h1vIKk0xgUnXlOI5WgDOZhDurpZfvLQwMHMaRI7sxOHgEjmNDVf0455xrqGfONE34/Q0AXAEMuCKXjJGkPZB4lOHhPhw+vAMAh/b2BVAUlS6GO5oZXD6fp19MXdeRTqcRCoUmVXCWGmFUV4B1vWTv8HpVeNSiAJaKHiXbquvZZkxdiAA2DYN2cmQxaDOXKgHsVdnCVUYV5Pza198Hy7QQCoWmlAAGXAvpZFK+BmY0CwQRwCMrwMTaYBgGdF3HrFmzam7D7/cjm82Ou6JOKsCapiESiZzwc+yE9gLHcdDb24uf/vSn+MpXvoINGzbAtm2sWbMGixcvPlZjPKbYtlsBbm1tLVa0dbznPe/B0NAQHnvstxUfOmmHTCwBmpanKQzjsUDU+7DLo0HqeYXJTMs0zWJurR+SJMMwdGQycYRCbuV606bn0NOzjz6usbEVa9ZcTFMfABQ7nzUAcFMgLMuiqzzJeIhNQBRFJBLD5JVgYOAQ5sxZShfDjdaHvRamacIwDMiyTMO9c7kczTqeLOgBwK4lgHX6c2O4kVaNSAWYTHBYNWl6QaoQpmVSAezxsGzYmQq5OucWKmRIssQ+a0YVqqqivb0dXq8XPp+vpuCbaZBilmmao1ogPB7PqAI4FoshFArVFejj9SQTJEmiWme0BX/HiwkL4MWLF2P79u1YvHgxFixYcKzGddwgHuCGBld8ybIXyWQUzc3NuPrqq6jvFUBF7+ty/6/XG6Rd5GoxMtlhJGTnJEK51s5KbBDZbBaSJNEqcDTah1QqilCoCZqWR0/PfgDAwoWrMHv2UgQCjTWfMxAIQRBEWJYJXc+B43wV3VxIQLYkSTTpAgD6+rqoAM5kMsjn8xNayUkSJYhYIf7no60m12O0FIiKCnDZc5IOOEQgs8rh9IJUgDVNowdZZoGYubiLdzwoFPLwedVRj7GMkxe/34/Vq1ef6GEcV4iFwTRNqKM0iJEkCUuXLq2yIhABbJom5syZU3fC0NraWmWNGA1RFOnV1fF0jzvWTOhowfM8Fi9ePC3THurhXsrP0SqmzxfB448/AcD9cMliN6Ak1ogNYTz+X2BsC0R5k41ywT0Sn89HI9iA6iSIwcEjABwEgxEsX35OTfFLhIHb+cxfHJ9JkyZ27XodicQgtUG4/uAkffzwcDcMQ6eV6nQ6PaGFbCQ4u/wLKUkSstls3aYVR8Noi+B0wxW4oli9GMLjUUuZyyxTdFqhFCdVuVwpzeREL7JgHFtIp06vV6179YzBONkgApgUsUajlg+X+KTD4fCo9gxFUWp2rhttXOQK8FQ4Nk/4aHH33Xfjs5/9LLZt23YsxnPcIYvgyE7iNqVQkU6nix7VTJUgJQJuvB3gxorUKo9CG+tyhSRJ9BL9SAE8MHAIANDaOrfucxELhaIo1BaRz2fA8zwOH96JvXs3YNu2v9B2z246RKklom3bGBw8TMcz0cVwmqZVvZ+yLEPTtIpFfm+V0SrAmkYEsFhlc1AUuWSBYCfTaYVUrABni6kupELImLmQz9fn9boVYGZbYjBo1zUiNo8Gv9+P2bNnT2qb4vKWzid6ARxwFAL4b//2b/Haa6/htNNOg6qqCIfDFf+mGyMFMMcJCIdbcOTIEQCArmcq7k9sEOUV4LEE8FhNFYggHMsq4Xp/S2kQpSi0GGzbwtBQNwCgtXVO3eciXwi3l7crgHO5NACgv/9QcXtR6h8qFPLI510B3NHhWl76+roAlLzC5fnBo+FmLheqZqTlNojJYrQUCE0vVYBHVnkVRYFj14+tY0xdlGI1MFfcH2VZmXKLXRiTC60Aq27eM7tqw2CU4lVJwetoWLJkyaRH25JmGJO96P2oxzPRB3znO985BsM4cRAPMDlR8jyPSKQF3d07sXz58qoKcLmxfDwZwOQ5RjsRl1srxvKxeb1eajtwLQ4cDKOAnp79ME0dsqyioaF+a0jbtumlB7IQLp9PwzA0RKO9xftYyGaT4HkZmUyyKMwFLFiwCr29BzA4eBiWZUIQRCiKgkwmQz1DgiDA46mdx+lWlHOIx/vQ2bmowjctiiJyuRwaGxsnRXiOlgJBBbAgVo2zvEXmVPiCMsYP8QAX9FIXOPYZzmxIBdirMg8wg0EgXU3figAe2RxjMnCvsEtTYgEccBQC+MYbbzwW4zhhuB7gbEUFOBJpxiuvPAkA0LQ09fwCoLMqQeBoddHnG/vDHO3ATCwQlmVV5A7XYmQahN8fQiaTwL59GwG41d96jyevg1zS8Pvd4O1cLk0j0wjJ5BDC4VlIJKL0vg0NLfB4fCgUshga6kZb2zxIkoRcLoehoaHi+8chEomgsbHaf2yaJg4e3I5Dh7ZhYOAgzjzzCjpWWZaRz+dRKBQmxRs0mgdYK7gVdFEUqt4rUSoTwOxy6rRCUYqd4Ex3kWN5ExvGzMRTFMBqsQLMPm8GA9RKSRZ2TxVI++Wp4P8FjsICMdPQtAIsy6qoAJ9++rkIh9vhOA4sy0A+n6b3JysjSdC+KMpUbI3GaKK23AIx1iXbejYIktQwlv+XNLoAKivAxD9MTiCJxBAkSaJVbr/frcy2t88HAPT3d9Gx+3w+BAIBBAIBSJKEZDJZ08+raRoyGXd7AwOH0du7n/6N2CkKhcKkdIcrz0WuGode8gBXCWBBpAsRmQViekE6wWWLi+AUxTOlOj4xJh9aAfZ6WAWYwShChC+5KjtVEEURqqoyATxVyGZdj2+pAswjGGzAbbd9A7LspiTEYgP0/pIkwefzwTRdESXLYxu5x2qqQCoXbmV57J2VxIc4jkMFMOCK1+bmWXUfR6rGRGSTlIhCIYeBAXdh29y5ywEAyeQwFEWBZblC1ucLIZvNorV1HgDXL1xeMSYoigLDMJBIJKqE7MgFddu2/QW6XlpAJ0kSotEouru7EYvF3pIneNRFcEUBLInVHXKEMl8w8xNOL8glO/KdlmXWBGOmQ5ph+IgFgl21YTAAuOfTqTYpVFUVp5xyyoSSI44lU+edOUHkcqWTJeBaIAiy7H5I8XhJABMLgU59hqMLYNJNbbTLEOUpEOMRwOU2iHIBHIl01M0jtiwLhmFAVUu5qKrqg1D0vLr+YQ/mzVsBwE2WsG2LVpZVNVB8viYIggjD0JDNpmo+F/EpZzKlBYSmaSKfz9POc6oagK4XsH37+orXpapuDFksFkNvby/i8fhRVYRJVd6uFYNWTIEQBAEcKgUSmTVzPLucOt0gHuDSpFZmi+BmOKQC7PEoEMZxJY7BOFkgAniqHQOnQv4v4aQ/w5PM0PLGDARJqhbABGKBGEsAFwoFBIPBUSM/iAAe72yt3AYRCpUEcC37g23byGaztGFF+czLXbDmq3i8zxeCKMqwbQvpdJwKYI/HD4/HA8uy6KI/Er82EnLZJR6P06YShmFQO4UkyVi79hIAQHf3nmJ+cfmYPPD7/bQiHI1GaQOSbDaLRCIBwzCqn3jEGAC3K9hISCOMWhYIQRDo4jgmgKcXtAKcK6VAsM9wZtPZ6SbedHa0QRCZAGYwCORq71SyQEw1Jnx2+PCHP4x0Ol11ezabxYc//OFJGdTxwo3wGmmBKO0sRACnUlGYZqXgGk8FWNd1SJI0Zqc0IrZGywAeCZlFybIKv78RgiCirW1exX2IYPR4PGhvb0dbW1vFyk5BEKCqpbG1ts4Fx3G0rfLg4BEq9D0eP2RZhmVZNPatngB27++Bpmno7e3F8PAw8vk8bajh9zegsbEV8+efCgA4cGBrzW3IsgyPx4N4PI6+vj709PSgr68Pg4ODGBgYGNUiQYRPLQ8wEeWSJGJEAdidNQs88wBPQ5RiBTiXywFw9x8mgGc2f//RW/GVz38J579tLTvRMxhlkI6x7BhYnwm/Mz//+c9rCo98Po9f/OIXkzKo4wmxQEhSaREcgecliKICx3GQSAxVPE7X3fegngB2HAeapiEYDI4ZJ0J20omILkVRIAgCbNvGuedeiwsvfA9UtdSXmzSWiEQiaG9vh8/nq9o2z/P0MTwvUP9wQ4MrgHt69gIAvN4AeF6gYxyrAkxek9/vB8/zSCQSiMfj1P/r97ve4zlzlgIAotG+mnm9QGnVqK7rEASBLrjTNA0DAwNIpVI1LRKlFIhqn3IpBq06BULghdKBYww/YS6Xg13DB804MZAcYPKZSBKrAM90ZFnB/DlzwQFT7lIvg3EiIdZL9r2oz7jPDqlUCslkEo7jIJ1OI5VK0X/xeBx/+MMf0NJSP392KuI4DrVAkJ2EKxM9juPA52sAUG2DGKsCTKq/fr+/5t9HMtEKMLm0YVkWFEWF11uKYsvlcrAsCy0tLWhsbKwrAnieh9/fAABobp5F/cOhkNv6MJ2OA3ArthzHUe+x1+vGp5FGIPUgfmm/37VP6HqObg9w85MVxQvbNmvaTAhupVqlwpQkTwDA0NBQzU50pRSI+gJYkqotEECpcjjaIriCVkA0Fh13ExDGsUdWKv3vzAJx8jDe9RMMxsmCIAjMAjEG454aNDQ0UPGxZMmSqr9zHIfbb799Ugd3rCFNMADQxWAjT5g+XyOSyYFxCeB8Pk9TBxzHQSQSGXcbQdK7e7wnbCKWyeV8gm3bsG0bra2tY1ovAGD+/BXQtAIWLFhRfF06tX4QyCSAWBI8nlILZcPQIEljB2YLgkD9xEQAcxyH5uZZ6O7eg6GhbjQ1TazrjMfjQSaTQTabrYpVGS0GTR8lBo1sN51Kj1qNz+VyyGTcFtI+n29arz53HAe2Y9MOh+XNQKYTI6+0sBSImQ+1OjljR0gyGCcTpALMBHB9xn3EePbZZ+E4Di6++GI88sgjFW2PZVnG3Llz0dHRcUwGeawgFWCO4yAI7oG03APsOA683gYA1dXOkQLYsixYloVQKERnXROJ+iACeCI7q6IoVXYUUgkZbxcXVfVi7twV1Aqh6zo8Hh9EUaZRbz5fiM4mvV4vMpkMVNWPfD6DVCqKSMT93Pfu3Yjh4R6cccZfVYlix3GoABZFD3K5HLxeLxXAg4PdWLbs7HG/doIsy8hkMgiFQhVJG6NVgEdbBAe47+to3inTMpFOp+HxeJDP5ZHP5+HzTo1Yl4mSzWURi8VgWRYcx4Esy+ho7zguwtEwDZiGOWmZkCQFovQ7qwDPfNz9lFWAGYxKmAAem3EL4HXr1gEAurq6MGdO/W5j041cLlNROYjH42hubqW/K4q72EzTshUd4UYKYNLEIhwOH9UOR5ITJvK+1qp4kCzh8VZDygUCacfstiqMIBbrA+BGlpHxkYp2IBAuCuAYIpEOmKaBPXvehG1bOHJkDxYsWFnxPPl8GrZtged5yLIXtm3DcRw0N7tV31RqGJqWh6JMTAxJkoRMJoN8Pl8hgEeLQSNNOkazQAjFjGDLtpDL5eD3+el98/k8dF2H3+9HNptFKpWCV/WO+dklkm42cmNDdZe8E4FhGohGo9B1HYqiwLZt6LoO0zIh1YnTm0zy+TzS6TQ6PJMjuD01KsCMmQ3ZbxwH7ETPYJQhSRJdK8SozYTLI8888wwefvjhqtsfeugh/PznP5+UQR0viAWiXCyWFwwdx6GCzI3hKnU3q1UBJlXco4H0yJ4IoihWLQCzbXtC4yAd2ADQjniiKNKFbkBJALviVYYsy9QWQRbCDQ11U7sBWTxXTjqdAODaKUg2oWEYUBQvzTIeGuoe/4sv4lbvBaTT6Yr3YjwVYKlOBVgQBMiSK/RjsRgGBgYQT8TpBCGdTtMFgapHRTabrelDHkk+n0cqlaqKZnMcB1YNq8axxF3YmYCmafD5fJAkCZIkwbIsmGbtBYmTjWmY0DQNuqGPfedxMNJuxCrAM5/KRcvss2YwCA0NDVi2bNmJHsaUZsJHjK9//etoamqqur2lpQVf+9rXJmVQxwtXAOfKhCdXNVsSBJGK3ELB9QvbtgXDqOwEZ1kWXaR1NHg8nnF5divHJtAGGgR3Udz4K1/lVWfDMCDLMhRFKfP9eiAIEt0mz/Pwer100R2xhvT3H6TbTCSGkMkkK56H2B98PncBnaIo1ItL0ieGhycugMm28vl8hQgtxaCNboGolSAhSRIEUUAqmUIikYAoiojFYkhn0sVudjl4igH8JHs0la6dRkGwHbe6qmlalW0lmUqiv7+/ZmbxsSKXzyGZTFZUrnmeL7b/Pj5iXNM1GIYxZqbzePF4mAf4pKP4+XIcprUPn8E4FrDq7+hM+Ihx6NAhzJ8/v+r2uXPn4vDhw5MyqOMFqQCXC2AiAghu+oFrgygU3BSD8kowucxK/JNHi6IoE/ZCkqpsucgjFobxQgSCbduwLAuqqkJRFITDHQiH26mVobxKXi6QUynXPzowcAgA6Hs1sgqcyZQSJXiep41BXBuEK4CHhrqPqusb+ZKTznOmaYJsplZllQhvjgNSyepudj6vDx6PB7F4DIqi0PSLaDSKZCoJOMBgNIH9B3vd16y4i/Ey2Uzd8RuGAdM0wXN8RXSbYRpIJpPIZDJV7aNtx0ZBG7uyXI9sLotkKol8Pg/TMmE7NkzLREErIBaLuZ7zEc0DOHCTJkhHw3bcxiaWZY2rej4eRn7/JInlAM90yic4rHU5g8GYCBNeNtvS0oItW7Zg3rx5Fbdv3rwZkUik9oOmMLlcriICjed5agVwb+OgKD4AMWiaK4CJ/UGSPBWxacd7tlUehVb+3BNZDT1SRBNhKkkyzj33WnAch3Q6XbFNN9osBJ4XYNsmurt3F9MgPFi69Cxs2vQcenr2YsmStfQERSrAXm+QVpGTySQMw0A43AaeF1Ao5JDJxMFxPLq796KxsRWtrXPG9TpkWUY2m4Vt29SjC7iX2UdiFJuacByQLxRgmEaF51UQBXi93opJjcfjQTabLb4XEv74zIvQDROKLGFWRzMM08DAwAAKoQIaGhqqPLSmacK2bXi9XlqtVlUVyWQSuq7D5/UhkUi4XfB8fpiWiVgsBk3T0NbWVrU9y7ZGbf2azWUxMDAAy7LcZIei3cNxHDrZqRXRx/FcVbLIscA0TViWBVmWkc/nK/z1R8tIDzBZzMiYuZR7gNlkh8FgTIQJHzHe+9734pZbbsGzzz5Lkw+eeeYZ/NM//RPe+973HosxHjNGVoCJn7T8EnBlBdi1QGhadQSa4zjHPYaHpEaQ8ZIYq4kIcSKANU2DKIruArAyUUyEyUiBLYoSAgF3MdfevRsBAG1tc9DevgA8LyKbTSGZLDUPIR5g4id2q8i+YoMLEZFIOwDgtdf+iGeffQB7927Aa689gSNHdo/rdUiSBNM0qT9XKnp4c/kcsplKf3A+Xyh7jEEXxRGy2SwMw6iqyPt8PnhVL6KJNPSisH5z8244jgNVVaF6VCQSCfT391dVbg3DAEcsNg6QyWZQ0Ao0TUKU3MlMLBZDLpfD4OAgkskktIJWJUgt23I74RVqd8LLF/IYGnLf+0AgAK/XS60uoiBCkRUqfvd19SCeLHV2FEURuq6PWonXdO2oKvUVr8F0jx0exQPDMCbFB6xUVYCZAJ7plF/BYgKYwWBMhAkfMb761a/i7LPPxiWXXOKe9FUVl112GS6++OJp6QHO5bIVFeBqAYwqC0StBIgT1XJQlmVavSVJFBMR4uV5woqiQBRF2j2GVC1HxrORRAifzxXA+bxrPWhrmw9RlNDWNhcA0N3t2iA0LQ/DcN8zj8dPJxyknbNrg5gNAMjlXDFGFuFt2vQcDh3aCQAwTQNDQ93o7d2PgYHDiEb76GdCOs/5/W7LZrF4ad92HMTjMfT191E7QCbjTmTkYkQMaZ0LuOIyk8nUtbMIooDu3pKw7x+Ko6dvmP7N7/dTn3A5hUKBXqJVPAqy2SwSiQQs06LvB2kfPTg0iGw2S2P0tEKlQNcKGnK5XM0mHLqhY3h4GKZp0veXLLCUZdkV2qIrhnfuPYynX9yAR37/AnbsOehGSfHu/l/Pj5wv5DE8NPyWrBmAGyUHBxAlEZZlTYrtopYHmImimQ3JpmctXxkMxkSZcMlSlmU88MAD+MpXvoLNmzdDVVWsXLkSc+fOPRbjO+bk81k0NJBLwa7QqzwZc/B4XCFCKsAjBTDJoDwRQeySJFWkOBBxOl7K2zCPFEz5fJ5Wf0duUxRF2tDCfYxIG1nMmrUYvb370du7H8uXn0PtD6rqB8+XotSI4DYMA3PmnIJEYhCq6se8ecuhqgFs3/4yurq2YcuWF3Do0E6kUlE4I1oPC4KEiy66vqINtDsed7wc3Ba5mqZhcHAQPM/TRWii5Fa8c7kctUHk83nomg6vz1v3PTtSFMDBgA+pdBZvbt6NzvYmeiIWRRG5bA6NDY3gOI4ugCP7hyRJKBQKMM3KDFzyGRiGAb/fjV0TRFegNzY20moXeSzNPy7aI2zHRjQahVbQ4POPnkvsOA527D4IALBsGy++shW9/VGcf9apME0TplkdheY4jutXzmYQCAageo4+v7f8O8ZxHAqFAvy+sbsmmpYJga8dFyiP8L4rSu0ujYyZAzl28fzYrcsZDAajnKM+YsybNw+rVq3CFVdcMW3F78gUCCIGR1ogSBbwyAowOcFaljXhJhaTRXmSgWmaR5VEQSq+5VVPkgtbT1TLskxbIgOVrZSbm2dBkjzQtDxefvn3tILr9zdWBNaLokhtEJKkYO3aS7F8+dvg9QbBcRxWrDgXCxasAgAkk0NwHBsejw/hcBtCoSZIkgLLMtDVta3qNZVSICzwggivR0UgEIDH46EL5KRitdwwDBQKhWKzjgzAlRIRNm3bh31dPXS72VwBsbi7cO6yC8+AIPAVVWAAkCUZmq5B093KrWEY6BuI4vdPvoL+QTc1Q/WoNG2iHEEQ4PF46GcoSzJ0Q6cWAduxkc1l4fG41oHySnM2m0U2k4XqVcfcBwaG44jGUxAEHmesPgU8x2H/wV48v35L3SSIbC6LTDpDBetbIZ8vYMvOLgwNuykbxAdcD8dxkEqn0NPTg8GhQeQL1fev7gTHFsGNhuM4x8XvfSzhOA4gFWCBfdYMBmP8TPiIkcvl8JGPfARerxcrVqygyQ+33HILvv71r0/6AI8lmlaAZZljWCDKK8AjF8G5J1yyaO5EnGzLo9CONomC2B7KHyvLMl0wVWubgiBUVIDb2+fRn3lewNKlZ4LjOMTj/TQRwu9vqPITj5Z8wXEcli9/G04//RKcdto6XHzx+3DppR/Aeeddh7e//V1YvfpCAMDhwzthmpWX0OvFoAmCAKOYc0s+M47jkMvloOlaRcTZ9t0H8eqGnXjmpY1Ipt3qP7E/NEcaEGkMYtkSd/L3xubdsCz3uQRRoE0lAHdisn33IUTjKWzevs99bkmEoihIprL41SN/xhuba3udBdHdH8m2NE2Dpml00pJOpWm6QyKRgCCO7wrAjt1uasfCeZ1Yu2oJLr/4TABAb/8wHMepygI2LRPxeBy8wENRFBQKBdhOdcTceHAcB7v3HcbWXQfx3MubIInSqD5gy7YQjUUxODAI27aRTqfR29uLoeGhCuE8shOcxzP2ROBkJp/PY3h4+LjF3h0LqP2BG38beQaDwQCOQgDfdttt2Lx5M5577jmaGAAAl156KR544IFJHdyxJp12s2pLFWBXAJeLpvJFcJqWK1ZNqptgvJUItLcCEcBkzEdjwyAtjkcudOM4jlaVaz2vJCloauqAx+NDa6srBHO5HCzLwrx5y3HJJR/AkiVr6QSCxJ2VP48kSVVV93I4jkNn5yLMmbMUPl+wQtC0ts6FzxeEYegVi+U0LYdg0L2cXisHmFx+J53gSI5wOp12JzOSiFQ6h1c3uJVrx3GwcYsr4o/0DgIAZnc0AwBWr1gEQeAxMBTHLx9+Ei+9uhVD0QR4nqfVWU3TMTDkxsB191UKjl17DyOTzePNzXswFE3UfQ+IbUPTNKDY9YoIUdJgo1AoVHwn61Eo6DhQjHBbcYr7uXW0udneumHCMKyqhYFk+6qqQhRcf/h4fbtkckYwLRODw+77EUukkc1rsC275vZMy3QbkcTj8Hg8UFUVfr8fiqwglUyhp7cHvX29SKVTEKXKfV+WPUwUjQK58jGdq8BkAss8wAwGY6JM+Ijx2GOP4Qc/+AHOP//8CjGyfPly7N+/f1IHd6xJpVwBrKquwOW46m5srkByq5SOY0PXC1UCGEBNkXg8IPYEkthwNALY7/dXRdiRCiNQW1QTEXv22VfjkkveB1n2FLvlGfTyuKr6cMopZ+CSS96Pyy77EJqaOqusIrIsQ5Kko1oExXEc5s93c4q7urbCcWxks0k8//wjmD+/Heedd16VZxgAjGKCA/GMEhsEaafsOA5eWL8ZpmmhIeiK9z0HupFMZWgFeHZnCwDA5/Vg3TmnwasqKGgGtu8+iEf/8BJiSbc7nGEa6OsfoqkRpmmhb8C1QTiOg64jfXRcf3ltGxWKqXQOD/3uOTz9wgbqGbZsC9lsll7qJSf8VCqFVCo17tiv3fuPwLJtNIWDaI40uO+BIMDvc/fzbF6rSILQdK1i+4Io1BWsI3EcB0PDQ4jFYvQ20zQxHCs1SjnU3Q9wqLJVEE8zWRBYLnBFSYQ/4IeqqtA0DQMDA8jlKhcFKoqHVYBHwTAMekVhuuJWfzlwPM88wAwGY0JM+IgxNDSElpaWqtuz2ey0O9lkMm7iABHAPC/UFMA8L1ARXChk6y6COxEQAazr+lH7kHm++vKhW+GV6i7uI5UXx3HogjPbtql3eOR9FUWtmSjhVtg9R91+d/bsUyBJCrLZFLq6tmP9+sdpXvOFF14ISap+P3TDwJo1axBuW4AtO7toBalQKECWZezcexg9/cMQBR5XXHw2Znc0w3Ec/PnFDdB0A7IkojkScivGpoXFC2bhA++6FFddcjbaWsJwHAd7D3TTVr8Hy0QuABzucavIiWQGyVS22BpawMBQHHsOdCOTzePxp15GLJHGvoM9MAzXApHJZKBpGhS55HUl+cSki99YOI6DnXtc+8PyJfMqvrPBgCv2szmtIgkim6m9fV2rrBzajl3ly02lU3ThHNmeruuIJ0ti9eCRAXfhYC6HbC5LK8aJRAKpVAper7dudU8Q3Mxmr+pFPlcZC+fxzJwK8FuNnasFEb7ZbLayAYttI5PJ1Lx6MlXheZ41wmAwGBNiwmeHM888E//3f/9Hfycn0J/+9Kc455xzJm9kx4F02l3MRNIPSAW4ortQ8We3GYbrAy4XwLVyco8nHMfRCupkJ1GoqlpTHAMl4T2yDfNIS0Y55O8jt+cuTDu6E7woSpg71+13vn37y8jnM/D5QsjlXJ/s7DktVdv2eFRcffXV4DgO+w+64pSIqIJm4JU3dwAAzlyzDKGgD2tPOwUAMBx1q5ad7c2wbduNUMu7YpvneczubMHZp7tjOXhkgEaqkQVybS1utBuxUXQd6QcAzGpvwtpVSwAAr765A48/uR7pTEnM9Q/Fiws28+57WLZwTpREamEhmJaFv7y2Dff/7x/xzEsb0TcQLVZiE3hh/RYk01nIkohF8zsrFryFAu420tkcTYIwTAOpdKpK/AqiULEQzXZsDAwMYGh4iDYayRfyiMfjkCQJuq5TG8fAUMx9/4r7Qd9AFCh2oOvr60M8Hkc6k0YsFoPH4xnXd0sQBfj8lckdoigjkUjQDoHTlWw2i97eXqTT6bHvPE5I9BxpSV5ezc/n3RzpyXy+YwXP8QBJgZghkx0Gg3F8mLBauvPOO3HFFVdgx44dME0T3/3ud7F9+3asX78ezz///LEY4zEjk3EFMElz4HkesixTAUcqg4CbBZxKjawAq0fVfGKykWWZLsSbzHGQZIh6FeCRXfNIrJcoijQZoxxSIR55pYBMOkZ2tBsv8+adiv37t8BxbHi9QZxzzrX42c++jfb2EAIBLw73d6Ojcx4A1+O65vSz6JjTmRySqSxCQR94nseufUdgGCaawkGcutRt+d3a3IhZHc0l+0NHM0zThCzJMDgDhmHQKwetzY3weT3I5goYGE5BkmUMFr29Z5++DL/708tIJDNIpbM4eNgVwPNmt2HJwtnYte8wkqks8gUdfp+K1uZG7D/Yi57+YbS3NLjNNGjnK6ds3yxZceKJNP78wpuIJVzxsvdAN/Ye6IYiS9D0ksg5ddl8SJKIbDYL0zTh9/tpBTidzlNhXCgUaCwb4KZgmKYJn+qmUJiWSePjspksHDjQNA2NjY1IJpN025blTgb8Pj96+933sa01jFyugHgyg+6+YSya3wnDMBCNRemVgolYi/y+UvSbIAgoFAqIxaI0I/pYQCrVqqqOy389EWzbRiKRQDxemgCZpomGhoa3fLWNdOLzeDzI5XLQdZ0ufE2n09A0jfquR6ZrTCXcYzQPnmed4BgMxsSY8BHj3HPPxcsvv4xcLoeFCxfiySefRGtrK9avX4+1a9dOeAA//OEPMX/+fHg8HqxduxYvvvjiqPfXNA3/+q//irlz50JRFCxcuBD/9V//NeHnBUoCmJy4eF6kIpJcki8XwIDbqMGyXCFBfK8Tzd6dbMiYJ3shnizLUJTazQSIQCmv9DqOQ0+YtfyhRADXeh7Sye1oIF7jSKQD55xzDVTVB9sG/vznPwMAdnftxbY9B9E/GMOLr2yBPxBAMpmEY7tjJBVZANSTu3jBLPBll1TXnraE/jyroxmWacGjehAIBCq8qxzHYeG8DgDAoe4B9PYPw7JseFUFrc2NaGtxm4fs3HuYLnqbO7sNgsDj/LNWguM4+LweXHvZOVi8wF002NsfpXnFsizjT8++jgd++yzSmf/P3nmHx1Fd7/+d3Snbd7WralX33g0YG0xxAUIPSSD0llBCCJCQb2gxJaGkAAECgZCEkORHCISaEMA0G1Pcey9qVpdW2l5n5vfH3Xu1q2bJlmxZup/n8WN7y8zdmd2Zc899z3sym23sLa/BG/9dAW9bACaTjAUnTsP4McUQRSNi8QSMBgNGl43AWQtPwJxUVlvTNNhsNkTCEThTemdfgGRM47E4AoFA+wRF0/D2+yvx+rvLEYnFkUwkkYgnmD8wBKInj8fjKU1umGWmTYoJkUgEsXgM9SkNdI7HhdLifABARSobLkkSrFYrJEnq0SGkK6iUhGxHTrXctiIWix3yd+tghEIhtLS0wO/39/i6joWAB0PTNDQ2NqKlpSXVepw0eGlpaUFra+thf55kMsmkW+lFltFoFKFQCHa7HYlEAm1tbQMiv+gvSAAMCOAZ4IMRT8Th8/sO/kIOZ5jQpwxwIpHA97//fdx3333461//etg7f/XVV3Hbbbfh2Wefxfz58/H888/jrLPOwvbt21FSUtLle77zne+goaEBf/rTnzBmzBg0NjYe8s2AaoDllKZSEAysFXAymczIslAng0DAm3qtAEmSU618j24ATKUP/R0Am0ymHrNatDgrHWot1tWyc3daaYPBAJPJhGAweMjZprFjZ2Ls2JkZ21yzZg1OOukUOBxWrNqwkz2naRpef/11XH/1pdAhobqmEVMmjEQ0GmcFamXFBRnbz89xY8HcaRAEAXabBYFAAIqiQFGUdm1uauyjywqxeft+VNU0QkoFZCPySaOMksI81DV4sXn7vtR2s2Axk/cVjcjBd84/FRazCbIkQlFkCALgD4QQi5PObj5/mAWL7328ChecdRIUWcL+ylp8snI9dJ1IKk47aSYsZhMmji3FvDmT0eT1IdvthJJmFaYmSZaeBq0WE/n++PxEzx+NRRGLxVj2tOpAA5NmVFTXo6woh1mXtbS0IhCOYaTVCqvVimQiCSFtWdooGqFGSBa4ydsGAMjxOGG1mLFx615U1zRCVTUYjQa2EpOOruto8wURjcVhNikwm2TIcmfPa0kUkUyqkGUTJEmCJEmIxWJIJBL93qgmkUigtbUVmqZlZFE7Eo1G4ff7oes6srOze3WtiEajCAaDGe4sdHWKBtx0kmC32/tchJt+zZQkibm3hEIhNqm3WMj33GKxwG6392n7vaGv7Yu7ej39Py+COzjBQBDBUJAUlBqPfNMmzsATiUQQi8XgcrmO9lCOCfr0K5AkCW+++Sbuu+++ftn5448/juuuuw7XX389AODJJ5/EBx98gOeeew6PPPJIp9e///77WL58Ofbv3w+3m+gpy8rKDnn/1AaN3rQMBiML4KgusmMG2O9vAQBIkokt29Mb09GCBsBHOghPb8JB/273VBYylunTx9oVJpPpoFm0vmAwEH3yju37ccXFFyCa8KO51Y9wJIZ1a1ejuroaolFHAsT7NqmqqDzQAF3X4clywGHv3AmOev5S2YsoipAlGQ6HAy0tLczSLcfjhMNmgT8Yxp5yYjdWWEBsxooLc7Fq/Q5oGjleZSWZgbbL0b5Ur8gSst0uNLW0oba+GeNGF2PXvmr2fJsviA8/W4tpk0bh489J8Dt+dDFOmTc947jLsoTClM1ZOvEECdgcdgc0TUM01XI5nkgiqWqIRCLMDg8Adu5t33dFdQNGleQhGo0iGo1i5eptaGzx4eQTpmLS+DLm2KBpOhqbW5GbnQVJkuAPBNDWRiZHOR4XbFYzzCYZkWgcdQ0tKErZywGkQ92OXZWoPFCPxuY25qRBMZsUnDhnEsaMLGRjlCQJkWgMsqwwayxdJ7KMvmaUe4JKH6LRKOx2O4LBICKRSEYAnEwm4ff7mRSE0psgmE4siZQjhGQyAZvNxQpTqTc0DVizszuf347jTf9OJBIJBAJeSBKxMwyHwwgGgwgGg2zSSyf2VArRn0434XAYXq8XFosFVqv1oBPfZJLY4ZnN5oyuiKwLXKoot68kkgmEQiE4HI4hHUBTLT+1L+QB8NAkGAwiHA7DbrcfdjzQ1f17qNHnX/yFF16It95667B3HI/HsW7dOixZsiTj8SVLluDLL7/s8j3vvPMO5syZg1/96lcoLCzEuHHj8JOf/IQt33VFLBZjNlH0D6U9A9xuhyUIAmRZZhng9gCYZIAjkWDqPeQm0d2y/pGEaiWPdCvm9AA43eGB6obTb/oHKxZM114DYN62PZn0005WXS3R0v0kEgmMKRmFxSfPxuXfWoxrLz0LWzZvJvsUDbCYFSRVDfUNXpRXEflDWUk+204ikYDP58uQelDdMw12HA4HzCYzCSICJIgoLc5jxwUACvOzSYZbMsBqac+qlxXns+9oV59jRD6xp6utb4Gqqtiz/wAAoieWRCNq65vx/ieroWk6RpUWYMGJ03t90Uomk7BarRAEAQ6HAw6HHWYT+S5HIgmoqsq+26FwFNU1Dey9dQ0t0HTy+6qpa0BjqkBw3eY9SCbbz9lXa7fh7fe/wOoNOyDLMpqaWqHpOkyKDJuVNKooLSLHe+feKkRTzhJNzW1487+f44s1W3GgrhnxRBKi0QCH3QI5FVxHojF8snIDPvxsLcIR2pyGtpsm54Z+53q6RqSj63qv7N1CoRB8Ph8sFgubDAUCAXa+NU1Dc3MzWlpaYDQaYbfbYbFY4Pf7D9p8Qtd1hMPhlLtLDCtWvIHly19jq0+0VTnVHQeDwW69fKPRKBobGzNs6ACgubkOa9e+j9Wr/8cmCeFwOEPPDpCJaSwW61MhYSwWO6i3cCgUQjgcRktLC2pra9Hc3Nyj1MLv9yMUCsHr9aKlpYUd5/b2432/2dNJTFNTE9pa2/r8/mMJem67anJzLBMIBNDS0nJUmrlomjaonFISCTKZSyQTh+3tres6Ghsb+zUpNRjpc8Q0ZswYPPTQQ/jyyy8xe/ZsWNMKTwDSEa430JtAXl5exuN5eXmor6/v8j379+/HypUrYTKZ8Oabb6K5uRk333wzvF5vtzrgRx55BA888ECXz1EbIBosGY3kwk/1qOlZBdoOmTIYLNAooijC4XAc8UA8/XPTAjaaiabNJejNlAbI3S150gCeVqfrug6Hw8GWYDseY1VVWZCgqmqn7F56IEIRBAGSKDKXAlkSkTMiB7v3HcD+ylocqCPFWSNTWVld11lziXg8zjJjiUSCuBOk7N9Eo4j8gnwSOOnkZlNUEMSWHeUAAKfdCotZYV3LigtzsXNPFdwuO5wOK4LBIGRZRiwW6yQ5GZGfjU3b9qG2oRmVBxoQiydgMZswbdJouLPseP+TNWybp580K0O33BOapkGAwLreGQ1GuFwu2G1mRKIxBMMRFOS3e0Pv3lcNXSdOFrF4Aq1tAdTWt2BEXhb2V7b/XsORKLbtqsD0yaNR3+jF1p3kGGzdUY4pE0aiJVWcl+NxIh6PIx6Po7Q4Dzv3VmFfRS32V9bCk+VES6sPuk6y4LOmjcWIvGy4s+zs+5NMqti0fR/Wb96Niup61DW04Pyz5rPvW7usSWAyiGQy2e0kkcoY/H4/NE1DXl5etxlPVVVJV7w0Sz9Zltnyo9lshs/n6/TdpbICv9/PXFvohDs9q0m9eU0mE7Zt+4LZ+u3cuQbHHXdGxlhkWUYgEEAoFOo2+0y/u06nk/3GmppqAOhoa2tCKOSDKBKNuSRJaGmpA6DD4xnBxuf3+2G32w86yY5EImhsbISiKMjPz+/yNfRGbTabmYNNW1sbrFZrl1n6SIS4iZjN5lR3SSI78Xg8rBUy/S32hXAkDJ/PB1EU4W31QjEpsFqsXb42qSZhNBgzJpdEltMGRVYyXFgOlYHKuKmaCr/f3359jffdc32wEgwG4ff7EYvF4PF4+nQPpEWmLpfrkPTjbW1tvVp9OVJEIhE2eY9EIoe14pVIJBAOh1nzo6PV52Cg6XMA/OKLL8LlcmHdunVYt25dxnOCIPQ6AE5/Tzo9XQTo0vM//vEPOJ1OAERG8a1vfQu///3vuzzhd911F+644w72f7/fj+LiYgCAmvIlpV/+9OX7dE0n0J4BptAA+Gg7QNAxDIRG72DQCYKmaUwKQo+F2WzOyBrRALi7G6goktbAgUAABoMBubm5LDvp9/thtRKXBk3TEIsRn1qn0wlJktDc3JyhDyQXAYHttyO0EYYoiSgpzMXufQewc281CbrtFrhd5FjG43FIkkS0t20+IBWbaqrWKVAVjSJbVjQYDch2O+F0WOHzh1BYkE2CEMWEWDyGiWNLUN/QgplTxwJoLx6kumL6/U8mkrBbSGY8EIxgfaob3bjRpECvpDAPZ55+HBqb2jB9ymgYjb2/iCeTSUiyBMXU/h0XRREOuwWNzW3w+dt9enVdx869pOX5hDEl8AVCaG0LoKqmEUUFHpRXkcxwaVEeKg80YOPWvRg/uhgrvtoEAKyAbv3m3Uz6keNxIR6PQxRFFOS6MXv6OJRX1sHbFmBNMsaMLMS8OZNhNne+qYmiEbOnjUNZcT4+WbkB3lY/vl63A1Lq+yWniuDo77MrHTD9LtEMZzQaZd/n9MlbR+iNwdrBdQIA+863trZCUZRO1wYaBFPrMeq5azab2fUrHo9D0zT4/c2orNzB3ltfX4G2tka4XJk+7JIkIRAIsAA1mUyisbERoVAIiqLA4XAgFAohFouxpi9+f3tGuKGhEqWlk1MTNBVfffUf6LqOk0++EC5XDmRZRjAYRCAQQFZWVpfHBGgPfmOxGFud6UoTTW/U9PpKawm6umnTyQZ9HUAsC2ngarfaYTQYYEhlgH1+HwwGA+y2nq+Hqqai1dsKQRBgNptZNlqSJMhS5pipxZ/ZlCm/CIVJAaQ7y90pAA6GgojH4kyqdzBox0Om6Rb7V25Cv6+xWAyRaO9WQwY7VAakKArLcOfk5HQ6F7quM117OrFYjBXq9tXBJX3FxGaz9bsDTF/RdR2BYKC9qDVlUXmoE6poNMpWCnw+36AJ8vubPk17dF3Hp59+im3btqG8vLzTn/379/d6W1QH1zHb29jY2CkrTCkoKEBhYSELfgFg4sSJ0HUdBw4c6PI99AaQ/odCTzAr1DGSYhBRFDu16O3YVYo6QJD3Hd0A+GhBPX2pZVb6zY5KGuiyZncewOlQ32G32800TB6PBzabDYFAAIFAgGV9c3NzkZOTA4fDwZZpgcysLUBudB1JpM67JIkozM+GILRrmMuKC9i44/E4nE4nbFYbIIB9F3ToPRYcKrICm82GqRPK4LRbMXFsKZEb2KwQRREuhxUXX3A6xowsZHZxDruDtTamxysSjcBkUpDjId/3llayHDV+TDHbV0lhHubMGM8Cv94Sj8dhNpszMmdGoxGOlJeuL9AeANc1tMAfCEOSRIwqLUBZyrmhuqYRDc1+RKIxmBQZCxfMgtNuRTQWx9vvr0SrLwizScaSU+cAIBri6pSVXLbHCQFCyjEkjjnTx+Pb552KS7+5EKfOn4HzzpiHhSfP6jL4TceT5cDiU2ZDEARUHWhAYRFxzpDk9olEug6YEg6HUVNTg5qaGjQ3NyORSDA9qtFoRCAQ6HJJXtd1+P1+tppRVbUT69Z9hGiUOHRQVwhN07r9jhiNRhbw0iA6fX+0hfbmzcQRp6hoHIqKiAvJjh1r2Hai0RBaWmqhKAq7mWtaZve89PqEUIicU2JJ1x4A19dXwmg0wmazoaZmT6p7oo5Nm1awpAMNsrtbPg+Hw2hsbEQikWAOEl11mNN1HcFgkN2oVTWJ8vKt8PkamJ45HSp9SA+M6QoTzfwJSE1y4sS6zdfmg9ZFB8h0fG0+RKLtAbfZbEY8FofX6+303ng8Tiz1Wr3wp7zjY/EYWlpakEwmuwwoo9EogqFgl9efrqDnr6WlBTU1NfC2etlK1eGQ/n2tqK5HIBRh9oUDha7riMVj8Pl9aPO1Ddh+4vE4goEgJJk4x9CJX7rcSdd1tLa2or6hvtN3Kx6PIxqLHpJ8gq5eES390ffLjkajZAWr1Q9d1RGLHlyGROnqNx0KkQZNdDWrtxKyY40+B8Djxo1DTU3NYe9YlmXMnj0by5Yty3h82bJlmDdvXpfvmT9/PmprazMyi7t374bBYEBR6sbXF5KpC0x6UGYwGJCdnQ1RFNmSEUAcItJlELQJxqF2XxsK0ICWXljSM2YddcCapmUUVHWF2WxGdnZ2RpZFFEVkZ2cjLy8P+fn5KCwsRGFhIRwOB8vuOZ1OJJNJaJrGsrbUuaCrH3eSZoCNRkiiiLzs9qzWyJT+lwbRdrsdikmBSSEyCBqwHmxJyGa1obQwF985/1Q4HRY2po4WcXRZ3mQ2ZXwOWsigKApys13s9Xk5WRmFcocCteTqmG0zGoxwOcm2/WkBMM3+ji4bASnVBc9iNiGRJA03AGDsqELEIlFMnzwKANCWyiDPO24KyorzUVxIGpJQrW6W00baGdtsgN6eqbfbLBg/uhgFeZmtuXvC5bBhQmpSMHvOcQBIBjiRCGHjxk8RDLZl6IATiQRaWlqYXMFms7HJFwAm3+nocAKQ7GUkEoGiKNi1ay02bVqO2tp92LHja7acH4lEWGCr6zpqa/fhyy/fRU3N3i7HT7P/1HM5Go2ivn4f/P4WSJKCSZPmYvz4ORAEA5qbD6Cp6QDKy7fik09exZdfvovq6l0QRRF+vx9er7fL7nnURi+ZTKa6Crax57zeOsTjsVQiYTd73O9vRnn5VnZMYrEYC6IpqqrC6/WioaGBeT7T4sOudMM00yvLMurqyvHpp//C1q1fYMuWFYhGIxlBcywWg8/ngyzLnSbOtE4jEPCnfICNaGtrY0FNLNp9e+doLAqf3weTyQRvawD+AHE9MVvIqlXH99KMPJGHtLAmLfF4HFartVNAqes6opEo4vF4j+NIh35uevyoNtrn90HVVCSSCQSCAdQ31KOmtgb1DfVoam5CaxtpGhOJRLoMtsORMMnMN/uwbPk6fPrFJlYI118k1SSCoSD8AT/afG1kjDU1qK+v73Yi2R/s3LITX374Jbat2wYAJAhWk2hqamKrEK2trfB6vex8pBONRpFMJA9JE03vBWazmf12+0p/6odDoRDqq+qxZvkabF6zmWXHD4aqqmhobMj4XdMJn6IobMWMyo66en9fjp+qqp1qao4mfQqADQYDxo4di5aWln7Z+R133IEXX3wRf/7zn7Fjxw7cfvvtqKqqwo033giAyBeuvPJK9vpLL70UHo8H11xzDbZv344VK1bgzjvvxLXXXntIehd6EaC6SYOBZEGorVB6AAy0O0EA7RngnnStQx0a/NNjlL68LIoiTCYT8xVNJpMHtWlTFKVLk39Jkog+1W4nWcsOEw6r1cr2RbO2VAOqaxrQIaNDM8A6yA2iuJAsKVOvXk3TkEwmiWbSKMIgGGC1WVl3NGqv1RMms4llpmPRGPG2FSXS9U5rvyEkk0nitSwY2FKa3++H2WSGx+OByWRCrqd91WL86OKudtcn1KTKJCfpCIKALCfZFw2Ao7E49leS4sAJY0rY68pSRX40oB07shCCQUBRvodJSEoKc5kn8nEzxrP9mE0KRCNpgW2z2aCYlC6zhX1h9vTxEI0G5OTkYvz48SgbWYKamk2ort6NPXvWZ9ihtba2siXhriav9LGOWQ+aTdN1HTt3rsLu3e0SsAMH9sDvb2FZZEEQEAr5sWrV/7Bu3UdoaanF+vUfo7p6V6f90WJSaqcXiYSxd+96AMCECcdDUcywWOys4+GqVf/D1q1fMD/yXbvWQhRJ4w/alKPj56LBeTQaRWtrEzRNhcEgwmZzkS6BTdXw+ZoRCLTCYDBi0qS5qW2vQTgcYIV+Pp+PFbAFg0E0NDSwQr90SQhdzeh4XkkzjwTWrVuGtWs/RCRCsmfJZALhsD/jmFOttKIoCIV8aGioygimyG/FB13XoKpJBPwBWC1W1jSkO6gevKnFhzfeW4E3/vs5wpEY62wZjWUGM1QaQ+VJLS0tCAaD7PtDrwuUpJokf5LJTtvqCjpemiBQFLKCRAuRamtrWUAZCoWY+4fP50NLcwvq60nA2djYmBEEJ5IJeL1eCIKAHSkHlzZfEJFoz77YfQ1YvV4v6urq0NjQiKamJuJXLsmwWCxQVbXXWfCeiMainbZTW0Ucdir3VGL3FjJxs1qsiMfjaG5pJsFvqxeKorDkCIU2+aHyve6gRbEdj0k0GoVgICsjmqb1KQtMm83U1dUdUuDckWQyiWAwiOr95Bw315P6qt5kbWmHzjZfGwtK6SQ16CMTWLPZjFAohNbW1oxjFYvF2O+/N+i6zopY++Nz9wd9jtx+9atf4c4778TWrVsPe+cXX3wxnnzySTz44IOYMWMGVqxYgffeew+lpcRuqq6uDlVVVez1NpsNy5YtQ1tbG+bMmYPLLrsM5557Lp566qlD2j/VALcvlRpZIRf9kx6MdZUBptmO4Qi9KdIfRcebbk5ODgoKCmCxWAbUpYJmgROJBMxmMxwOR/tYBDBnAQq9+FtSRTVjRxUhPycLx82YwPTfZpM5o3sYzRBSXePBLJMMggF2B1kKhkAywgDJTOpob4qgae16YqOBfA6r1QpPtgeSKEFRFGS7nTCbFZhMMkalAsqDoSbVjKIICrE7I7P7rnSGHjeRW0RjCcRicezYXQlV1eDJcmRkomkDC4DIGWxWExRZgWAQcNr8GZgycSROmTeD/X5yPC6MKi1I/dvJMtA08D/cynSrxYQpE0n2+YILLsCMGZPYMW5srIbBYEAymURbWxvLkPa0GkGX/NMv+HQyV1m5hWVGp0yZj8LCMQCA7du/YhPi6upd+Oyzf6Gpiezb4yHnbePGz1BdvQvJZAJ1deXYuvULNDRUMv17MBhEY2MFsz0rLZ3IguPRo6fDYBCh6xpEUcKUKfNhMtkQjYZQVbWTNa6hvzNVTWLXrrX4+OP/h9rafRAEIbXMTmRnDocb+fllAIi+mGZ/8/PLMGrUNLjd+VDVJLZsWcl06vF4HHV1daitrUV9fT3C4TCTWqRDV3/SA2DaDbC1tRaNjVWphMpMpmkOhdqYDIK6wJjNZsTjMXzxxTtYvfp/OHBgT8Y+ACAWjyMWjcJgNMAoEheanuQHoVAIqqrh05UboOtALJ7Al2u2sm2GgqF26ZamZujBzWYzdF2HxWxh57qjs0IinkB9gxeiKJHPcxA5Bq3aT/89CgKZINKlferXTQsFrVYrbDYbbHaSsLFYLQgGg/B6vWyFh070VI3IlSje1kC3hXCqpqK+oR6xeO8mpPFEHOFwmExmU2OxWq0QpXY70b5IDLoKvlVNRUtzS0aQqes62lKNhABg99bdqNhTAUEQYLFYEA4Rmz1ZkpmckUqLABL4JZIJyIrc5eSbTiZr68jkI90NgdUIpM4X/e32JqjTNA2tra0s6+r1eg/bwSISiaChpgGh1KqbrusItKZWBQ6y7UQiAU3VEA6F2YpNOBzGns178PkHn6Omoob59Hu9XtTX15PgOBhkGX7aofJg0KJcOhEfDPQ5Irn88ssRDocxffp0yLLcKfPa0W7nYNx88824+eabu3zupZde6vTYhAkTOskmDpWOGWCjUcwonKHBMNWvphfCpUsghrpXXk/QzJLJZOoU4FJdodVqZQVPA4XVaoXdbmfBLw2ABQjQtCToZVVVVVaI5XC0NxA4/6yT2LbUpAqnw5kR5CqywrKzvS14sFgsRApiFFmxmSilpDVJFUbRCAFCRtGN3WaHyWRij8myDJNJwQVnzIdRNGY0suiImlQRjUVZwYcsyUgkycXGYrZA00nw27GYJx2z2cR8eVt9QWzdWQEAmDZpFHt9NBpFQW4WJElEIpHEhNElJGPucEKHDlEUMf+4KZ22Pe+4KZAlCRPGFMNgMLAMtMViYcVZh1NtPGPKGKxev51dkzyeUvh8dYjHowgEWmA0kkxGbzyzaeFXJBKBzWZjxWqxWBSVlWTJdcaMU1FcPB55eaWoq9uP5uZaNDZWo62tkWWHs7NHYOrUk2G1OrF160pUVGzHxo2fYfPmFSzjUlW1CwsXfpdlberqiFSitHQim5DRQGv27IVoaanDqFFTYTbbYDSK2LRpOfbs2YCSkgmQJAm6rqO+vgLbtn3FMqx79mzA/PkXIBwOo62tGQDgcHiQl1eKvXs3skkCQDTHgiBg2rQFWLHidTQ2VsHrrYPHM6JPLaWNRiOCwSAr0PX5fIjFYmhqIvUao0ZNx4QJx0NVv0JbWyOCwVbEYrGMxiGiKGLjxpXMCWPbti+Rm1vEkhFmszkl6xDYeZckCeFQqvCrg6sDDTbXbtqDYDgKq8WEcCSGfRW1GDuqCEX52YjFY4gn4lBkBfF4HD5/EElVx4h8he0TAIKhCCqq6lGU7ybZxdSutu3ajw+Wr8OkcaWYOWUUm1B3Rzwex4qvNyMeVzFnxvgM+Q/VYR4Mg8EAi5kU61Ive7/PD4vZgnVbdme81tsW7LYQLhwOIxQkmmtFPrijApXtdOW+YDAYoGs61KQK9KJHUyJJpEkul4u509B9hCPh1MSDyNUi4SjCIfKdGDluJMp3l2PLmi1QTAoKigtgtVozHJpooSWVnCUSCTTWNBI7yyI5o2CMWhhGIhG2KuD3+2Gz2VhBbTwRh8Vsydi21+uF0+nMkFOlo6oqWlpa4PMR+Y3RaGSZ/O6uxweDTo4PpOwxqSTR2+RFVm4Wq/XojqryKrR6W1FUVoS2tjZIEpm0tTSSrO7OzTtRUFLA/MfDkTDq6uqg6RqMBmLvSJsApd/fyUpTK1udTSaTrMjUaDQiHA4f8mfuT/ockTz55JMDMIyjA9UA03NgNIqs4IMGUTTAIwFwewZYUUy90rUOdejyLZ0sdAVd1htIjEYj8vPz2YXHwIq7NIhGGeFIGGbBnDHbt9msTMNFx0czEOnuCPQz2Gw2ol+UetdxTxIl1sKWBtOSSDLhVDNoFI2Q5MzMT/r2RSP1G47DbG6/KWiaBk3VoOnEizKRSKRa/7Y3FpAlmbQ/9fng9/shCAI8bg8cTke3RviiKMJmNSMSjWPD1r0IR6IwmxQmZUgmiH5QVVXMmzMZtQ3NGD2yAMlEAmYLudB6vV7mmAG06xutFhNOmTcd0Wg0o3U3tZEK+AOsoJIGh/R8SJKUUVhJtayKrLCmG4osYdeOrcjNL0RbaxQXfWsJZNmAurpyNDRUoaxsKvM+TieZTCAYbEMo5EMo5EdWVi5ycoqI+0YgwGy7SFaoFpqmwW53o7iYyDosFjtGjpyCffs2Y+3aZdA0cm7HjJmBCROOZ9eHKVNOAiCgomIbNE2DxUIbkASxf/9mjBkzC/X1VUyGQAvf4vE4bDYbgsEg8vPLWNYWIMHqvn2bEAy2Yf/+zcjJKcKOHavh9RLZislkRTQaRiDgRTwegaYJCATIzc3p9CArKxeybEI8TjIyimJBTg6pp7Dbs1BUNA5VVTuxf/9WlsXuCU1TEQr5YbO5mAyCZoyIFZcRTU1kqbagYCQAEogDgM/XzIIN6ozR2FjNZCMWix3hcABbtnyBOXMWAyC/c8WkwGIxse+GwWAABJLp7RgAx2Ix7Nl/APsq6yAIAhafMgflVXXYtG0fPv96M75z/mlQVZW4AKUC4M++3IxWXxAnHT8VkyeQYx+LJ/CfD7+CLxDCnOlj4XS1y5QqUkvzeytqMXVi6UED4Pr6ZlRUkwztOx98iVGlBThh1qQum/H0hFE0QlZIu2xRFMl1xSBg5x5yvAsLslFT14zmVj/TLadfBzRdI7KTZAKhYAhOh7PHextdyqcFjR0RBAE69F5LIHw+H3w+4uJhymm3GaU64mgsyiQlLU3kO6yYFUyePRmapqFybyU2fLUBVrsVjrTzEWgLQDbJ7JohiiL8Pj+2r9sOAHB5XEzaBpDEWDweZxaGmqYhFAqxhinxeBw+rw+bdm7CpJmTkJWdBavVinAkjHA4DLPFDJfTlbHKRItTqXc4DcyJDp3INDpel3pDLBZDU0MTvE1eQAAmzpyIbeu2obm+GaMmjmI2Zl2RTCaxZvkaJJNJ5OTmwCgbyTnw+sikBUA4GMaB8gMoGV1CViGsNpY4TE9WRKPRDPcNaikZDAYhSRL7bSqKAlVTEY1Eu7T9PNL0OQC+6qqrBmIcRwVVzWx2QTPANENEZ9906aW7DPBwhh6nwTARSD8X9AKj6zrsDhtkmTTMiKQVpciSDJPJBJ/Pxx5LJpMQpa7bSptMJpKtlHr/s8n2ZNrHUNslarwviVKPXZkEQYDZZEYk3J6xUZMqwpEwu7AYjUY47A6YLWaYOriVKLKCnOwcYgsloMcbMUBkGHarGU0tPlQdIPZmkyeUseMZT8TJeKIRjBtdhAljS5ikQlEUpm+m+njqOyoYhIwmMzabLSPDbrPZEI1Goes6OSapJVSa0YhEIggF2ws1REkk9lWhMBQo7GLc6m3Gv998B2efdQkAIDe3BHV15WhsrMb48XM6TcQCgVZ8+eW7iMfbj6/BYMTChd+F2WxmF3FRJPurqyNON0VFYzO2M2bMLFRV7UIiEQMgYOrU+Sgrm9zpXE6ZMh8FBSOhKBbYbC40NlZh9er3UV6+FaNGTUNLCw0OR0GWTazokh6fjl7GBoMB48cfh3XrlmHPnvUs82wwGDF69DSMGTMTq1a9B6+3Ho2NVcjJKUMgQGzFHA4PBMGAvLxSFmQWFY3N+B2NGjUVVVU7UV9fgXA4AIula3uxQKAVVVU7ceDAHsTjEYwbNxvjx89hRXKRSAQWiwVNTdVQ1SRMJhucTvLbUJRU4aW/hWWHyA1Tw+bNKwAAI0dOQXHxeHz++Ruoq9uPurr9KCggkpfc7Fy4PRJ27z+AlV9vxqnzZ6J4RDaxqUomMqQFra0+rNlEZBSzpo1FXk4W3Fl2lFfWwR8MY82GnZgxeSTC4TAcdgcaGpvRmtJCfrlmK1wuG0bkefDpyg3MKaWlNUAK5XTird2YWpqPxxNobvHDbrN1G0zquo6KahIwK7KEeCKB/ZV1qKppxDcWntCpGDSpqhB7WL2QZZnpWq1WK/ZX1iISjcFiVnD8zIl4s+5zNLWQZeiOHeGikShb8YjGoojFYxmZ2I5EY+3WdZqm4UBtE8qr6xGPJ1MTWB2jSwt6ZaEViURYwSNdNTCbzIjFU1ZlZgsi0QjLXLc0kZVmu8OOeDyOKXOmIBQIobmhGWtWrMHJZ54MURSxY8MO7N+1H+4cN6afOJ0VN9fX1LPJdcAfyAiAaSFyezKF1LrQLHA4HMaBfQfgbfJi+8btmL9oPgsOaXBXF6mDO8vNalq8Xm+n4BdoX0FtaWmBLMt9XgELh8Oo3FMJACgoKkDJqBJs37Ad4WAYsUgM4UgYTqezyzjF2+xl0oWm+iaUjClBKBSCP+U2RK+9u7fuRlFZEQwpm82OY+yY0aVSJ5PJxK73yWSSBchGg5FZUB5zATBAUvlvvfUWduzYAUEQMGnSJJx33nnHnBtCIpHMOJk0A0z1v3TGQvU/HTXAsVhiQJf1jwVopvxod8PrCP3Bq5oGm92BokIFRskIY1277Z6iyCyIpBc8KufoSh+ryApyc3MPu42oLBMdcDLRXjXfE4pJYbph6vFot9vh8XhIkCj0LMOhQXdvMKS6rVGMBgMmpVpAA+S3b3GR5+msP5FIwGEnrWQVkwJJltiyaCQSgdVGMtItLS0wplpUd7zwWcwWjBgxgkyoutBXJ9UkKyiUZFJMKBpFtLa1wttCdI+yLENJTVzklBY2J4foS9vaGhGLRaAo7cdB13Vs2rQc8XgEkqTAbs9CNBpCOBzA3r0bMWXKfOY2AgDhsB9eL/n+FBZmBsCyrGD69AXYs2cjxo2blZGl7XgusrML2f9zc0vgcuWgra0JO3euRm3tPgBgBW/0GNtsNpaFSr/mJJNJ5OWVwunMhs/XDEBAScl4jBs3G2YzCSzz8krh9dajoaES+fkjWbBvtToRiUQyAuDi4nHMSlBRFNjtbmRnF6G5mThPTJ58YqfPtHPnGuzZsz7jsb17N6KkZAJr8EJ9vOvrKwAABQVlEAQhld12QRQlJJMJxGIhyLIFZrMZ27d/hUgkCLPZjgkTjocoShg9egb27t2ALVtWwu0ugFEgmc5YLI6VX29GIqliy/Z9GFmSz7LPkj3VjEfXsLe8GolEEllOG2alfLglUcTJc6fhvx99jW27yjF5fCl5bzSCfRU1qfMGaLqOZZ+txeiRhag80N4V0dsWyHBWaG1r16oeqGvBiHwPaTsukWYpiST5vQiCgHgijtoGks2cPKEMo0pH4POvN6OhqRX/+2Q1zl40F3k5WWjzB/Hpyo1obG5levqRpQVw2Dpr2dN/W9t3k+Bo/JhiZLsdEEUjEokk/P5wJ91mINUZNd2XuacAOBKOIJlUsW7zHuzaW80KYtMJhaMYN6a0i3e3o+kaWtta0dTSBsCI7Cyy2kG7a6qqCtEiQkyKCIUi0Mw6vM1kEme2m5FIkhXaWfNn4fMPPkc4GMa6leuIh3QTeZ23yQtN1RCJRmCJW9gSPwCE/KEMrSw9j+nH1WQysaxmNBpl+mNvoxeBtgDsqcJfo8HInEFaWlpYzYi3xYtt67ZBMSmYdty0jCQK9cwPBAK99o0GSJLB2+JFQ6pD56gJo2AUjcjKzoK30Quf1weThXSKTLd/pTQ3NLN/N9U3YezksVBVFQEf+R6MnjgaVfuqEAlFUF1ejdJuzqMsy6zglRWjpyQidEW9q6CZZtSPZuKsz+nLvXv3YuLEibjyyivxxhtv4PXXX8fll1+OyZMnY9++fQMxxgEjmcwMYNM1wLTil/5NipVIBlgQDDAapUHRBe5oQ7O/g20iQM+Llrb8ZhAMTP9rNBqYvIB2/gNSAV4PXZ0ON/gF0B50671ro00lOZqqsaUph90BSZQ6dac6XIxGI5xpNmtjRxWx9sjUE9ZsMsPhdLACFwFCRiEfs4ZKkBUWl9MFl8sFp9OJYCiY0UaaQrr0Sd0WF4pGEVaLlXhE20ijAEEQkOXKgifbwzw5zztnCSaOn4wpU4kdmslkZUvsdOmdUlGxDa2tDRBFCaec8i3Mn38+pk1bAACorNyBSCSUcWxpAVZ2diHM5s7LlQUFo7BgwTe7DX67QhAEjBtHfJKrqnZCVZOw2bLgduczA3+73Q5BEGC1WqFpWoa3Ng1U5sxZjHHjZuPUU7+N6dNPYcEvQAJgAGhpqUVbG1lqt1od0DRys/d4RsDjGYHi4vGw293M4olWkY8aNYWNL9nBm/bAgT0s+M3LK8Nxx50BtzsfmqZi1661bGmXZpPq60lAlp9fxryZyXcjBwCxXiPfnxgqKsgS9fTpCyCmJqTjxs2CzeZCLBbBmjUfQFWT0HUdX67djkTqt1Hf1IpQmMhs/D4/kxvFYjHUN5LMYdGIHDZJ1jQNRSNykJ/rhq4DFdUNTPZSXUN8q2dPH48cjxOxeALbd1UAIO3IAcDnDyEWjxNnieZWqGp70VvlgfZtNTU3obauFo2NjcwfN5FIoDEVzBXkeuDJcuCcxSdiRL4HiUQS7330NdZs3Il/v7ucva6ppQ2r1u/AP9/8BC//60P8d9lXWL1hR8bqFkAC8Zo6EuRMHEu+A54sEgg1t/ozCuGiMVLgSQPejsWAHVE1FYFgAJu2l2PDlj0IR6IwKRKmTBiJ+cdPwaxpZHIRDEUQj/VsxxUMBuHz+fHJF5vxwWdrEAjFSHfDcIg0qIknsX13JURRQixOGtf4vCQpZXPYYLVYiTRNkTHn5DkwGA1orm9Ga1MrWc0zkWtNoC2AeIwU7fm87at+4WDmZMDX5kNbcxsi4Qj7/DQuCAaD8Hl9GZ+pYk9Fp88kSRIsVgtr1ezz+tBc34yaihp88dEXGSt66d0We7IuC4fDGc4OkUgE5bvLoWs6sjxZcOe4EQqH4EwVMrfUkxWV1tbWLrfb3NQeAHubSDbYbDazSUNOQQ7GTCIFvru37u62oI7KROg+qKVax6xzMpnEqk9XYfXy1ZDEdlceCrWuO5L0OQC+9dZbMXr0aFRXV2P9+vXYsGEDqqqqMHLkyD53gTvaJJPpRTdCRhBHA9/0AMluz0JOThHKyiZnmOwPZ6hOerAFwFQDnG45BrTrUUVRhGAgLVSpNy8N8HpT/HE4UI/knjrjpSOJEitoi0QjzLd2IDAajBk+w1MnjWL/TiQSJMtqUlhxQzAY7NRRzmw2AzoQiUZYoxKDQBqcUC/k/up0RQNsd5Yb0WgUJ8yZgVu+dztysvPZJCg3l9i3NTa2B8CRSBA7dqwGQKzGaMCYnV3IAri9ezew1xOPXBIAU21uMplEKBRirYi7g2oIg8Fglxf43NxiZGW1d3ejxW80e0TPtclkYtci2omKdn8zGhWMHz8HdnvnTm02m4vpjfftI935HA4Pm+ypqoZ5887FjBmnAiDnmRYRqaqK3NwSWK1OJJPxDBu3trYmbNq0HAAwZsxMHH/8GcjPL8PEiScAAKqrdyMQaC+K9nrrkUhEIUkmuN0FRC+Zcm1xu4mtns9HAs6amr3QdQ1OZzbTJAMkSTFnzmJIkozW1gZs2PQpahvrUF3XBIPBAKeDTEz2V9bBbCZSndbWVhZs0y6DeTkk0xaJREiDj0QSY0eRzPze8pqUY0YEDamgc1RpAc447ThYUo1ZJo0rxYwpY2C1kICxzRdCIpFAbX2q0YvbCZMiIRqLo9mb8mf2EXtDRVHg9XoRDAXR1OxFKByDIAjIzSHnThSNOPO045Gf60Y8kcT6zXuQVDUU5mfjorMX4OQTpqYa+AiIxuI4UNeMDVv24r2PVzGLx6Sq4pOVZGJSWpQHu82CSCQCd8rnu6UtgEg0gngiDlVTEQ6lMq2pzKQsyxma245EIhGEQhHsrSDyjZOOn4rLv7UY84+fgikTRmLqBHLdiETjiMVjGTpgTdeIl3KrF/UN9WhpaUFdUyvr0Lm3ogaaqrFs62dfbMLnX2/GvopaQCcBFrXosjtJ4bPZbEYoHILL7cL046cDAuBwObDgzAUoKCbuM61NraRxSTiCQFqWPhQMZfjVr1mxBhu+3ICP3voI77/+Pr765CvEIsQzPBqNEr0t2utEDpQfYN7y6RhTtqoWq4VlaQHA3+rHyg9XMqkBPd6JRKJL72yA3LcaGxvR0NDA3CwCgQBqK8nxHzl+JAtQcwvItaS5sRmSJBHNss+Xce3RdR2tqe82QO6TLQ0tRDoRJUW3Lo8LpWNLYTKbEA1HseJ/K1C1r4plifdu34u1K9eitbmVuWzQFsod76G6rmPz6s1orGtEQ00DWhpbMlxiAoEAc5U4kvQ5elu+fDl+9atfZaTqPR4PHn30USxfvrxfBzfQqKqa0f443dOXVmnS5X1aZDR37tmYMqW9UcfR1r0ebag28mAev0earjLAQHoA3N6VjmrYqL5SVgb2s9CgmxZ2HQxBEGAym9gMmy6fDgT0Rjxm5AjMmTGeefoCxN6JWpcZDUa2rGa1WjM6yikKKUyTJCljiUs0isjJyYHb7e7X8QuCAIu1vWCF0F7VnZtLvJMbG6uh6ySDumXLSqhqAllZeRlaXUEQMH48zcjuQCRCbkhtbY0IhXwwGkXk55chGAyyQpnc3Fymg+sIzf6ZTMQXOhTKzKpRazWaBTYYxIziN9oRESBZJbPZnPIKjsBkMsHtdiMrK4s1a+iOvDwyCWhtJTdihyObuSykZ7/o2Ox2O5xOJ/tMI0eSLPD+/Vtw4MAe1NdXYM2aD6BpKvLySjBhwnFsG253PvLzRwLQsWPHKvZ4fX05ACA/v4RZdWVlZaWa3RSkjjMJIKklGy00TMdud+P448+CwUAK6jbtJq4cs6aOxZQJpLBuf2Ut+V6knBECwQD8/gBafWSikpvtYsWcDocD0WgUo0pHwGAQ0NLqRzAUxf7KGmiaDofNApfDBqvFjAvOOgkLT56FeceT40FtA9t8pEiyIRUcuV02FBYQ7WtNg5fYKtptzKbNYDCgubkZldV1qe04IInp2lARZ51+PAry3BCNBsw7bjLOXjwX2R4nJo0vwzlLTsS13z0LF37jZJw8dxpMiozmFh8++2ITyYiv3opmrx8mRcZJJ0xlqwn5eeS+3eL1s26I1dXVaG1rTQW9cQRD7e4HXXnJhsNhtLa2ory6HsmkiiynDZPGl2ashiqKxD6Pv0OGNRYl/rHeFi/rOFZ5oIk9v7e8hljIBUNoavHBmwpW91XUQJRE+H3Exs5gMMDuJM2CPB4PDAKxqSwaWYQlFyzBgjMXwGq3IjuXnIeWxhYIBgEtTS0Zv5VIIMJs3yLhCJM3CIKAZCKJ5vpm7N+9nzVAooHrqAmjYLUTm7qayq6bgwmCAF3VWQA8+6TZsDlsiIaj+GLZF0yKQVebu8oCU+s0mqRpaGhAW1sbKvdVIh6NQzbJKCguIMkRqw3uXDdkRYaaVNHW3EZah/t9GdenRCIBfxv5HGYrmWA31Tex8bg8LkSixEZt2vFEshH0B7Fp1Sa8//r7+Oy/n2HHxh2oq6rDjo07mGwmGAwSr/Wm1gybuoo9FaipaD9GVXurIBgEpv2mLdT9fv8RbZLR5wCYet51JBgMDrog6GBkZoANGdWssixnVGp2dVJ4ERz54brd7kGXAW4PgDPPWyy1dEUzsAAphhMEAfFYnEx8DAMva1EUhXzHerkvKjEYyOwvRZZlnDBzPGZPG8ce03UdOjK7x1Ev0o7jEY0irFYrHA5Hp0yALMk96goPFeokkUhz+aDnNysrD5IkI5GIYceOVVix4nU0NFRCEAyYPv2UTsG4xzMCbncBNE3D1q1fpJppbARAlu6JBMqIgoIC5Ofnw+l0IieHLOHTQj6azSESAw/y8/ORk5PDsuaki1m75tLhyMGsWQtxwglnQpYVJJNJGI3GTnIcmpnVNA1ZWVmQJIl5r3YVgJOub0E4HLkZj9tsLubvSQtXAHJjpE1sSEMZouEtLh4PUZQRDvuxYcMnWLPmA0SjIdhsLsycubDTMZw4kbhfNDRUYd++TfD5mpn+Nz9/JNMF08+XnU0cJvx+L9ramuDzNUMQDBgxYjQA8jumjTEAEmTPnr0IANlvltOGGVPGYGQJCaQbmloRCIZhFI0QJRFerxe19U0pKy1i9ReNRuF2u8n1SxJhEEjzFgCoONCIA7VkibikKI99PrvNgjEjC2FMfbey3WQS2OonEyIqU3DYLSgeQb4TldUNEDrcJ6h9W0092Ud+dhZr+0wnIbIs4dwl83DVJWdi6sRRnY6xKBqRm+3CpHGlWHzqHBgMAvZX1uK9j77Gjj3EP//0k2fCZjWzIq+CPBIMtvoCkGTiFENX8ZKqjtffXY5X3/4UwRBpzBEOhZmPMdXq1jfUIxaLYW85Cd4njS/rNDZBEGBPtVUPBMLQ0mQh8QQpSLbZbanvs4ADKZ9iUgiYxIH6ZkiyhD2pfQBATX0LNA1oayNZaavDyuRhZrMZLpcLsVgMmqZBMRNPcgDwpAoJ/W2kTXBTKkvv8rgAkKA3FiXva2og3xHFrOAb3/kGph0/DQBQva+adQOkGeCc/Bymi63YU9Ht8n1DTQM0VYPFZoE71435S+bDk+tBMpnE159+jca6xtT57joL7Pf7EQgEYLFa2O+lpaUF1fvIilbJqBLo0CFAgNPphMVsgTu1wtFY10jiGggZUohQMIRomBzHklSDo6a6JvbZ3DluQCcJo7zCPCy6YBEmzZwEk9kELTX5oMfV2+SFAAHJZBLhCGnHvOqzVfj8g8/x5UdfonxXOevWVza2jByT2gaocRXhcBhNzU3EfShV6NvVdWyg6HP0ds455+D73/8+Vq1axWbxX3/9NW688Uacd955AzHGASPdd5Te1OhNM12nmG6/lM5wboIx2KESiE4BcOoCIBrbA2BJTnX903rW//YndrsdWVmdl6u7Q5IkWMyWAc3+UkRRBDpcy2n3uPRJrmgUkZuby/ww0/F4PMhy9f7zHS6CIDCtLFK/U0EgGYZ4PI6cHJIF3rdvM/x+LwwGEVOmzO9SMkCywLMBkAYRGzd+ioaGCgBE/hCPx6EoCgseARKYZmdns65MqqrC6XSioKAAbrebrSTl5ubCYrEgGo3C6XRixIgRcLlINnLEiNHIzi5kmTfaCjsd6rZBG6YAJNDPyspiy5CxWIwFvqqqwuVyweXKZTpaMl4Xu+nQBhcAWFabnuusrKzU6pcRs2adjoKCkcjOLoTTmY2srDwcd9yZkLqwBbTZXCgpIRrZ7du/xooV/0YkEoTRKCInp4i1kaXHz+HIgiQp0HUN27d/DYBkrWnRYjgcZseNkp9fhhnTToHH5capc6fDaDTAajEx5wTawdBkMiGZSKI+dXPPzXEhEiHyHJfLBUVW4HA4EIvFMGYkkUHsK69BXSMJZkuKckk3yy6WubOcZIWktY1Maryp7KDLYUVpUT4kSUQ4EmOZ4XSsVitavGQSlO1xwGKxQJEVNnGi38WeXB8oI/I8OPkEEqwdSOl+Z08fh+IRJKCPxWJktSLHA4tZga4DLa1+YsMoSTCKIj5asQ6hcBTJpIpd+6pZcVNtTS0OHDiA2ppaNDc3QxRFtAUiaPMHIYlGjBtV1OWYaAAcCkczWkVHIpGM++b+ylpouo5stwOTxpOActfeKkTjSVZsaLUQ16XqmmaEg+TYWO1Wdm8GAJfLxYpF0+/VikmBLSX9CLQFWMOI/MJ8JvkIpAoZWXDsdsFgNKB4ZDFkk0yy1jUNaGtpg5pUISkSHFkOFI8qhsFogL/VnyEpSKemimQ+84ryEIsRucsJp56A3IJcaKqGNcvXoC61EkCzwD6fD5FIJOXb3ZaRLDGbzYhH42x/pWNL2fWCrjS5c0kAXFNZg3iM/KYjkQiampoQj8fR1EA+p2JWUFhKvvNBf5Blqp0eZ4YNrCRJGD1xNBaetxAnn3kyzrjoDMxbOA9WG5mQNzc0sxbgzXXt2uKWxhZsXbcVuq6joKQAU+ZMQXZqElZbVYukmoQik2sa1Vn7/EeuVXKfo7ennnoKo0ePxoknnsgO9vz58zFmzBj87ne/G4gxDhiqmsyQQKRngF0uF3JzycWD/li7muENdwnEYKW9wCVTAkELPySx3W1ANJK2wPQHfySQRKlPmVBZkuHJ9gx49hfo3NEPIFkbWZY7aXdpMVpHDuZMMRCYzWZIsgI9lW2Kx+MQBAGJRIItpVutTkyefCIWL74cZWWTut1WdnYhxo8/Djk5xcjJKUJOThFGjZrKgjfa7jgdh8OBvLw85OXlobCwEDk5OZ3OlyzLyM3NxYgRI5CTkwNZllkQSoO7aDQKWZa7bQuenZ3dyUTebDbD7XazG4mu67BarcjPz089boLHU5jahgJRVJgWnbaspQmN9EmgzWZjgWdeXinmzFmCE088BwsWXISTTroANpuz22M4adJcjBs3Gx7PCIgiCZILC8dCENoLjSmiKMJuJ4FrSwvRNaZLQYxGIrmhTQwoBQWjcMLU2RlSndGlJJu8P6WPBACrzYo2P8kseVxkYuF2u9k1gE428rKdkCURwXAU4UiUZFlTModINJJxY04mkrBZyOdq9QUQCIYRjpAVCLfLDpNJQVE++UxrN+1GQ1Nrxj0kHk+gNVVxT15vQn5BPjxuD3E86WN78AljSzAtpdkvGpGDWVPJ8aOFk1arFbIkIzv1eRqb2gO21et3sgJBgASgBoMBsiK3t3ZWk6y5z7ZUIeDYUUWQUz7mTOseIA1kqD46GGrvSKZqxGM5/Tq7p5wEiKNKR6Ag1wWABPGr1u0AABSPyGFONBXVjYhFyDmwOjI7EBoMpM6ABu6URCIBl5ts19voRZu3DQBgz7LDkgrSg34yWaSBIX29wWhAySiSIa3aW8WcE7JziQZbVmSMKCHfty8//hKrV6xGbVUty3gn4gk01ZJt5hTkwGq1IhaPwSgacdyC41BQTFaa1q1ch9qqWsiyDFVV0djYyNpfJ9Vkp4lwbUp7nTciD7JCJDU0ASDLMvKK8mC2mhENR7Hui3Uk456ycGtqakJzI/kcNocNJosJjlRxJC3uc7jIb81qtWZ8Dw1GA1xuF5s45KRWORrrGpkFHw2ipx0/jTlTOLOcmHEC6QxamirIrN5XDYvJkvFdMJlNiIQjvWrj3B/0ed3a5XLh7bffxt69e7Fjxw7ouo5JkyZhzJgxAzG+AYVUPbZngNMzuhZL+3JDestLCr0Q8gB4cEKDOLXDTDKaJoGgS2QACSDUpDro7Nwo1H3hSECdLtK7I3UX9A0mDAJpBKJpGrvpu1wutLW1wWTKx1lnXcusDtOhbgpU809XgsaNm9VpH9SXt7vvCe161hMdbYFoK++GhgbmrOHxeLqdjHW3SkEyvS4mkUjvUmk2m5GdXYSGhgq43XlkmVjJ7GwWj8chSVLGZ6M31nA4nPF96A2iKDE9ta7riMcjkGUz20/GaoIowunMhtdbmzpGJuTllTC/UI/Hw7K0tGNVV6hJFSWFuRAEoLG5DYFgmGUim5rbAADuLFsnq0NJlOB0OtHY2Iiyknzs3kc6axUV5CCpJlm2OhQMwWojbhyRaAR5udmQJRHxRBKVB8hStt1mhtlM2hiPLitAeXUDauub8db/VsKVsl8bO6oI9akA1Gm3wqyk2lgbRaKRNwhobm5mbXwptDtZd3LDubMnYdzoYmQ5bazDKbUkNJlIFjU324WqmkZU1zYhNzsLrb4ANm8nDk6nnzQTK1dtQSAYQW19C9MxpxMMRVBZTewAJ6c017S9rcVsgdlC/LnNJnJ8Q5H2av9wJIL3Pl4Fk6Lg5LnTSNfCVOBdWpQLi1lBXrYLDc1tKK8iWdEpE0fBYbdgzcZdqK33QpLJ8bA5bJ2+B1QPXN9Qz1r0iqIIZ7YTB8oPoKaqBvEomRhb7VbYnXb4W/0IBUkRI82qUnkEAJSMLsHe7XvRWNfIus9l52ezIHv8tPEI+ALweX1oONCAhgMNcLgcmHPyHGK/pmmwOWwwW81MK6tpGgxGA2bNn4VNqzbhQPkBrP9iPQwGA/KLSJv59N9xOmpSRXU5kT+Uji1lq0n0d0xXdqYePxXrPl+H5vpm7Ni4A5NnTWYNdVpbyOe02q2kQDnXzbTNDpcDgkFgk3Of38e64XYktyAXFbsr0FjbiGnHTUPQF0QsGoMoiSgeSbLjE2eQlSD6OfKL8qGYFMSiMdTX1LMJBEDqYyCAtasfaA5ZuDlmzJhjMuhNh/w4yJdGEAzMnqoj6Yb86Q0WuARi8NIugcjMANOlXmNaERzQ3o63Oxuu4YTBaIBgENj3nbljDNLJQToWs4X4OasaCwhjMWKr1F0bX2qOTzvMUe/dLjPhKWeG/j4WNpsNgUAAfr+fLeUeKnQ1Kx0SABfjuOPOgMuVg0RCa+/El5JVBINBuN3uTkGF2Wxm7hPdBZ5Ut0h91DsGbuT7Q25oiUSCtSynEBlHLspJnRyKisbAYCCtlK1WK5xOJztOfr+/yxuypmkIR0iAUpDnQW19C/aW12Dm1LEIhiIIR2IwCAKynLYuzx/ddllRLguAS4pySQCTytLTKndN0+BwOGCz2eB22VHf1IryapL5ynKSwkWr1YrSogKccZqI/ZX1KK+sRZsviE9WbkAgGEYiQa5NeblZgJDZYMDpdLIlcPpdCIVCMBqNpNFAIAizxdzpGAiCwKzOKIl4gshwUtfEEfkeYBNQ19CC/yz7ir1u2qTRJDBv9GL77krs3FvVKQCORuNYuWoLdJ0cY7fLjkiEZMY9bg+cLieMBiOyXFlo9AawbvNehMJRdt09UNOAxmbixPH6u8uZXGVEngeyRAL7UaX5aEhNVlwOK4pH5JBakywHvK1+xGPkuNld9i5/o1arFS6nC8FgEFlZpEsbXfmLR8k4nG4nIADOLCdqKmoQDoQR8AcQDpLvj9lmZt8xq92K7LxsNDc0I+gn3/PsvGwkEglmT7jgzAXwt/lRU1GDqn1V8Lf5seL9FTCluneOKBkBg8EAq9WKeDxOugOmWibPOGEGdF1HTUUN1q5ci+NOPg55hXkZv+OGmgbs3LwTiXiCeE7HE7BYLfDkehCLxzpdLywWCyw2C2bMnYF1K9dh/879cGY5UTSyiMhEUjIQW+q3kJ2XjYpUVt+d42Y9EEwmE6wWK0LhEGzWztckTx7xoo+Gowj6gkzKkVeYxxpndIyRDAYDSkaXYM+2PdizdQ9am1oRDoVhNBox9bipMJuIo0d6Fn+g6PPd/lvf+hYeffTRTo//+te/xre//e1+GdSRoqsiuK5ID4C7epwz+EifqKRDNcCSmOmfK0tylz/w4QjNgtLveyxGOkIdCwGwLIkwiEYossJuCrRwrCsJE82yOBwOFBYWoqioCC6Xi2U8O0JbKfd3JtxgMMDpdMJut8PlcvX7dYUW9ebkFEOSTKyine7barV2WXQHkMCs41JoOrRVvMfjYcuwoVAoQ6qQjq7rXUp5cnPbG4QUFY0jHtOCgKysrIyCZNqSvCO0Y5ggCEwGsWnbPkQiMTSksq0etxOSsWvfcups4smyI8tlhyxLKMrLZkWBsiQj20OWvi0WCzweD2RZhidVCNeWsuZypZbmDYIBVpsVHpcNp580E1d8ewmmTyZFfWs27sLWnaSrYF62i00aKNQ20Gg0IhaLseA3NycXBQUFsNltiIQjCAaCiEajzB+8I6qqkg6QlvbjXVpciLKiPLhddrhd5LNOHl+GE2ZNAACMTxVFlVfWsaJhgGiq//XOZ6g80ABBIK4bmka8yam7Cw2ySSBOJi3BEOlgqOka6urb3R5i8QQqUpnkMSOJ9t2kmDCqdASk1BL7lAmk+C8YCKK0qL2QUzGRLp5dnUdamD1ixAhkZ2cTqz2nA1Z7u3d3VjbR/ttTGu5wMMzaK5utZuJQEI2w6wAtFAMAk8UEk9UEwSBkuKg4XA5MnDERp5x1CrJyspBMJFnAnFeUxzL3NpstwxVDMAiYMXcGRpSMgK7pWPv5WjTXt+toA20BrF25Fv5WPyKhCBIx8rsaNXEUovEos6RMhzqN5BXlYexk4sm8ec1mxFJe0XRcNgcJgN057YXs7lwSANOEoN1uB/TO9TQAyTbTYriG2gbUp84ntZ7rCC0QLhldQjK9bX7s37Uf9QfqUVNZg02rN5HVdk3v1ne4P+lzBnj58uVYunRpp8fPPPNM/OY3v+mXQR0p0m3QaLvDrqCBbvqXlmbFBvOS8HCGuUCoXWeAB0Pr5sGK0WBkEz6aFU3XSw5mBEGASTEhKbW7uFDLua4ymPQxWtAmiiKysrJY0JGeWaG/+YFq32m1ksBpINx06HZp1qqjDMNkIkv23U1yrFYrfD5fpyVZ2jXO4/Gwok5VVeHz+eD1tpvrU6iEpKtMst3uwsiR0yHLEpzObNaNLv140xtyIBDIuEFG43GIEpEPNLc0o6w4F9t3O9DS6seXa7fBlLI2zM12scClK6jG9RsLj081n0my4Jc+n5eXR7zPjaRbaI7bBaCSbcPpsLBzaDKRyYaaVCHLEubOngSrxYQv12xDMqUTdWfZ2fbSMSkmuFwuNDU1Ee14Ti6boOTl5rGJQDRK2harEbVTZ0naqji93sCkKJh/3CRYbV1P5HI8TrhddnjbAthbUYviEbn4at02VFSR4CbLacMp82YgLyeL2fF1tWLhTmWio7E44vEE4vE4m4hMnzwagiBg49a9kCURJUW50DUVFqsF8UQcJ86egFZfCOPHFpMiTKMBhXlubNhCtm1zkglbd+fRYDBk/I7MZjNc2S6EUu2rXR4XjEYj3NmkWCwcDDMHBGeWE6JIvku0FXpOQQ5kRUY8Fkd2XjY5n5LMXLEyfksWE+YtnIcdG3dg/879yPJkwWQxsRoTutqY3tbcYDBg5ryZpFnMgXqsXrEa8xbNg81uw9qVa6GpGrLzszFh2gTyuWURJrOJeYF3PI/s9x5PYPy08WiobYC/1Y99O/ahdGwp1CT5HVpsFjbBGz15NAKtAeQU5DA3GIB856l7De2LkB4r5RbkoqmuCeW7yhGNRGEUjcyLuCPhcBg6SAOxqbOnoqmhCRarBbIiY9fmXairqkNlXiU8+Z4u39/f9DkA7s7uTJIk1jL4WCE9A2wwGHvs6mY0GjP8+bgEYnDTnQtEPLUUlm6DxsmEBkjRaJRVAB8pd4zDJnUfSNer0vH7/f5OgVcsFoPD4ch4XBRFeDwe1NXVMckD0O4aM1CZcFrAMlDbtlqt8Hq97N/p33+6HNvd/mnBM126pcRiMciynKF9NhqNzKKtpaUFwWCQBUismUoXx1AURZSWTmbbok4aHW/uZrMZVquVNBZRVZiSScTjCWTnFLDnWlpasODE6Xjrf59jb3kNlFShFmkH3H3nSkmU4LA70OJtgcliRjAYZ24bFKul/f+CIGBEQU7G81nOdm2qSTFBkRXEE3GYU3K7qRNHwWxS8OkXG+CwW2E1Kd0WtzqdTrLqYLFm/AYFQYDNaoPNaiOTkFgUzc3NGZM2qmt3Z2X6bkuyBFESibOL1HUGdfyYEny1dhvWbdqNr9Zsg5qa/M2YMhqzp41jPsHJZJJ58HbEarVAkkTSejlAlrRbWon8IcfjwuiyERg/uhiCAIhGA4SUR7LFYkFRQTYmjiP64kBKBqJpGrKcNrT6grDYLd1KFrtClmW4sl2oSRXc2V1k0uHJ9sBgNEBTNdSlNMc2pw2STDzMLRYLaWIS8GPMpDHYsXEHikcVI5FMwOkguttAINBJH28wGDB51mSUjC6ByWxiMgUqJTObzQiGgrCJtoz3zJo/C6s+W4WWhhas+nQVsrKzEPQHYTKbMGverIyGQ+FwGCaTqcvvjsFggNlsRpuvDSbBhAnTJmD18tUo313OCv+ohloUSRH4iNIRsE22IR6PZ/xGDAYDPB4PTCYTIpEIYrEYW50RBIFlgKOpVti5I3JhFI2dJsu0KNnldKG1tRXFY4pRNq4sY8zbN2zHtvXbMOukWUDXSeR+pc8RwJQpU/Dqq692evyf//wnJk3qvqp6MJKeETpYACxJUkYwRT2AeRZxcJLe5jQdOomRJBEC+LnrDkmSoKoq4rE4bDZbv3VuG2gEQWBBcPpv02KxdJJB0H93DHAAEmTRTHAkQqrYE4lEt9rgYwFZlpnko2MW+2CZbVoM13EVLJFIwOVydZpY0EwtzZZSb8+eJCTUhSe93XNXgbIgCMjNzUV+fj6xhErEYLfZWGMWs4kUrGW7HawxRiw18fVkpTrn9eC/TZuqRKNRlh3riZzsLBhTekdJEmGztmtzBUGAzW5j3fsoY0YW4tJvLsQFZ50ECOh24mEQDMjJzulxAkoLZLM92WzciUSCBadd+XRLkoREsmuJCgCMHVUIg0FAJBqDqpEudN86ZwGOnzmRfbZYLAZFUbodm0EwwJ5qshAIRRCJRNGW0p5S2YjLaYPTYUMikWAdI6kjj5pUWabS6SCe1NMml8LuVJBbmN2niagkScjJy4FiVuDJ9cAoGZn2nUojmCzAaWMFx9QJQdd0jBw/Emdfcjay87KhazqbFHZ0JknH7rRDSk2+0s+xzWYDdHRa5jcaiTuEI8uBeCxOHBUEYNb8zOCXdml0OBzdJnJoR05VVZE7IhdZnixoqoYdG4i7hs1hS3WRJJlp+v2kcVH6dY7UEGSjqKgIhYWFTF5is9kgGAUWVANE/hCLxRAMBhEMBYlMRlURj8eRlZWFrKwssnoRzpQxjZowCrkjiDXc9nXbu7Qd7G/6nAG+7777cNFFF2Hfvn04/fTTAQAff/wxXnnlFbz22mv9PsCBJJlMpM1yjD0Gs3TGS9E0jS+jD2K67QTHNMD83PWEKIrtRSCWzgHiYIVmJTquztA2wjQDCXTtepAOLbwKhUKIRCLdalePFajtWU/FbD1Bi+GCwSCTyJjN5h4L9uiNs6GhAbFYjBXXdAXVntObO81Mdfdau90ORVIQMfiQ7bGwLKSstMs9jpsxAeVV9QiGIjCbFJhk6aBuKoqssCyy2+1m8oeeXp/ltKHZ64c7lVlMzzBbrVYEA8FOkhqrhTjPqP1kv2g2m+HxeNDY2EgasLg9sNs6u5LQgLljAEIneclEEmaLGXOmj0flgQZMnzwaZcX5GddLquXMzs7uJN1Ix2G3wtsWQDgcQ5svAFXVIBqNMBoyV2l0XWcTDSotiMfjUFUVdrsdZrOZyE+yHZh7cgmiurlPzZdEUYTFZsG8xfNgtVgRDAWZZaDD6chojWx32jOCVZPJxL5PiqJA1dplPDRA9vl93U5iNE3rVORosVjgcDhI9hg6zKb2SZMkSZh76lysXLYS4WAYE6ZNgCc3UxJAV156mhjR5iCtra0wmUwYP308vv7kazaJtaRJdajzjaqp0PTOE2QKzWDT3yWViuXk56BybyXRHY/IQzQWZR7itGiUFiULggCXy8U82ukYBIFooVf8bwXCwTC2bdiGiTMndvv5+oM+B8DnnXce3nrrLTz88MN4/fXXYTabMW3aNHz00Uc45ZRTBmKMA4aqqhkZ4J6WxDs+x7vADW66s0GLx9olEDwB3D30+FH917GCIAgks9+paxbxu/X5fEz+QrOXPekIs7Ky4HQ6mS/rsRwAU+sxWuDSV2RZRnZ2Nssw0iYbB8uIW61WeDweNDU1dSr2Sodmo5LJJMsUHyzIEQQBitkEMd1WzkCK+dra2mCzKVgwdxre/3QNRpYWkEBE7vmzky5mxN2gq9WBTuOWROR4nCwANhqNGUGhJErMmotmTSnJlBd9f/mP220kS59IJODK6uwjTVFMCnToxL1DB3ToTAIjSRIikQhmTh2LmVPHdvl+Onk8mFuJy0GeD4YjqE8VmWW5bBBA9MnUg5rKroB2uU4wGCQTHRvRuJrNZujQoaUSUX3tPmo2EXs2cnlolxu53C7Wythqt0KSO9sUWq1WtLW1QVEUJBPJDBs/el3pyrIMSNUaGTsUORoMyMnJgd1uZx0A6WolnWgvOHMBAr4AK9ij0PObnZ3d4zGg0gWDwQBvqxeOLAc8eR60NJDzYLW1+yjTjK+matC13l8fqNY4Z0QOKvdVorCsEEbRCD1K/MTNZjNLILjd7VIc2mmypaUlQ46omBTMmj8L+3fvx+SZk6FhYBtiHJIN2tlnn42zzz67v8dyRCFZhnYRelf+oOl0lDvouj7o2v9y2knXAGeYzydoEVzPGf/hjtFgZNrOY+k4kQC46y6Ndrsd8XgcoVCIXeB7o22merpjOfgF0B5EHGIADJCl20OxaHM4HGwZtLt900LkeDzOssuHitlsRltrG3RdR3FhLq78zhJIohHhcLjHjCXFZDZleKv2hGgUMWPKWIiiiPGjCjO6k6WPx+12o6mxCaJRhFEk1ydqCddfBabELsx9UM9m2k3QYDCwAESWZSiyglA4hEg40m1ARzsNetyeg0qjnKkObOFIjPkwu1ISg0gkAjWpIqkmO+nCqQyCBlH0MVmWEffFYDD1LFns7jMLgsCKz+j9mza9AABHlqPLCQmdUFHZDz12QObqUlcrFslkErIid4oX6O+RauupPIl2f7Pb7aylcfq2IpEIPB4PXC4XDka6i0pTcxPGThnLAmCb08aOodFohCRLiEaiPRaJdrV9i8UCu8uOxRcshizLbIWJZtjtdnuX/ug0sUDryuix8+R6IJtliJLI7tcDxbCN4OgyAP2iG409F0V1/LHpun7MagGHA+02aBprjQu0F8FxCUTPiJLIlh2PJeg57aoTndlsRkFBAWs12tFhYDhgtVr7VDzUX9AbcW8Cs1Ao1GNBXm9QFAWiJLYX3ckSkokkyc52UfjVEYNg6HUrb1LYY8e0iWU9Sjwcdgfi8XiqMYuJ6S4HoqjyYOdXNIrIzem6Up+u+nRc8UgmkohEI6wbIdVc94QrZTMWCscQT2k6XQ5i2yXJEvw+PyCg0yRAUYgMxeFob/0uGkVYzGbEG2OwWgx9TkDRLCf9XMz2Ky3IpLZgHWMBGnwnEokMuQbQniFuaWlhwV96VlPV1B5lNx3196IosnbY6YE49Sinjiu9/Q0LgsDkFkaDEdOOn0Yy4KbMoNxsMsPv82ccm97ApCupjnSRaKRXqzdGoxF5eXkwmUzw+XwIBlPe1j3o8/ubYRsAa1rfAuB0rRK7yXIJxKClXQPcIQPMbdB6hSzJyMvNO+aOUXcaYAp1KKAd44bbbzhdv3ek6Y1tJC2+NJkOz3daNIrM+YMG0qpGdMW9yQD3FUVRAB0Q0H32jPrTCoIAn8+HWCxGApGDaIyPNAbBALvDjsbGRna/SyZI8OVyueB0Ons95iwnCZIDoTCMqd+ay0FkBjaFtOaNxWKdJqIGwYDc3NxOmXGzyQSD0QixQ5FWb6DWfx2X46lURNd12FMtqTtiNBphs9m6lfE4ncQ6jbokhEIhFjT3RVIAkO+S3W5Ha2sre5+qqizz25fglyIIAht/6ZhSqCppSZ1+DGnQ3pNLSnfjpTaTtB1ybxMnVGZmNpvR2trKJr9HimEbANOqzb5KINK7wR1rwcFwIqMTXBcZYC6BODjH4vFpD4B7Hv+x0NRjOEIL4SwWy2HfCC0WC/y+9q5xyWSyk09ufyGJEnSQYLFHO02DEdmebFgtVrS2tZI6lINoko8G1JOaWmJFohF43H0PvrJcJAMci7W7JLicVohGkRQPZmV129q6q0wgDbYOZXWAZlrTJVAA0Z2OmzoOoUAIjixHj1aAxpRVW8fXSJLEirxUVYXf70drayviiTgEg9BnyRH1uqZNZsLhcEYR2aFgMplYJ0H6m0gPdGmGnEpFeovRaITZbIY/4Ge/375eX00mE/Lz8xEMBonURB9Y7S+lVwGw3+/v1XLHsURXEoiDBcDpThC8CG5ww2zQVC3DCo1ngIc2AnrOAHMGN7RbVn9IU6wWK6w2K3Nf0LWBkRsARDJEK+l7E+yYzWYoJuJ0MBAZ6cNFNIqw2+xo8bYgHo/D6XT2WFTXHRaLGYosMRs6h90CRZaZBtput8NgMGQ06ugJSSQuHoeqYacrIOnvFwQB46aMg9/v77G5hslEPJ2pZ3Z30FUmRVHQ0tJySJr79CwwLQp0u92HdU2jkq9oNEpWHlKShfRxi6J4SJMLs9kMn8/HXB0OdYJCHT9CoRCZgA2wBrhXRzMrKwuNjY0AgNNPPx1tbW0DOaYjQscM8MEConSPSqDrIhvO4KF7G7T2884D4KEH8QEWuEf3MQrtytcfgSo13adL+DoOvfjvYEiixFwseqthNAi9D/yOBlablbXBPtROkEaDETZru/6VuWRQ+1HBwFweeovT6YT9EAoxgfYgsGOAll681VORpsvl6pUzCNDeNTA7O/uQ6oXsdjsrJjvUbaRDZRCqqkLVVChy5m+M7utQHWJoAavFYjmsa68oikRmM0BNgTL21ZsX2Ww2tLS0IDc3F5999lm3ps/HElQD3O6DJx1UA0y9L9Mf4wxO0m3Q0jXANPMvGrkEYkiSdkr5+T32EEWRaD/7KblgMpngcDjQ2trKlmcHAuqP2hud87GCIpPAS1GUQ85SE0s5C1paSZdYl9MG0SgeluuF0Wg45O+HLMvIy+tc20ADONoSvTu6cjM42P4ONZBTFAUej+eQJR/dbVMURcRisS5dKQ5mrdYd9HNq+uG5txxpevVJFy1ahNNOOw0TJxJT4gsvvLDbE/LJJ5/03+gGkGSqCw6d7Yhiz5XRNABOJpMZWWDO4IRqgHW9YxEcPe9G3gluCMJs0HgG+JilP1fWaAV8KEQ6kA1UBhjAIdnDDXZs1sP/TE5H+zZcDitkpbNN3JGkuy6EA9nm/FDpa8B9MKi8KJFIdBnoHurvg9qhaZp2RDK3/UWvAuC///3v+Otf/4p9+/Zh+fLlmDx58jFnj9SRRMqSJV0CcbALryRJiEajrDKWSyAGL5kuEO2PJ5gPsATBwAOkocbBXCA4ww9FVuBwOEjV+wBaLB1LHROPJC5nWgBstw7KAEkURSimQ1v+P5agMoiODhD9AbU+O5bsYXsVAJvNZtx4440AgLVr1+Kxxx7rlQnzYKZjBliSDj4rFUWRZRMNhkNfhuEMPBkBMLryAeYSiKEIywBj6CxFcw4fWp3POfJQL2BFlmAxK4MyQJIkCVmurEGXAR4ITCYT86HuT46mxeKh0mexx6effsr+TYPBY/FGo6qZGWCj8eDm8AaDgXVrGUpar6GIkNKYaZraoRNcqghO4kVwQxF6Tvnvk5OOQTDAIPKExdGgtCgfOR4nyooLSJexQeh6AfSuK+RQQJKkLnXQw5FDuiK8/PLLmDp1KmsPOm3aNPztb3/r77ENKDQDTAPg3sxcaGU5l0AMftIzwOmegu0aYB4AD0WoCwT/fXI4gwOTScGSU2Zh6sQyYicqDr4M8HCD3/sIfZ6KPf7447jvvvtwyy23YP78+dB1HV988QVuvPFGNDc34/bbbx+IcfY7iUSSefsCgCwfPABub6+rcwnEICdDApHuA5ygEogj3w6WM/C0d2nkGWAOZzBA3TdoU4dDcRngcAaCPn8Tn376aTz33HO48sor2WPnn38+Jk+ejPvvv/+YCYBVNZnxQzT10KubwporaBr/EQ9y0nVm6dq/BM8AD2nSfYA5HM7RhzYIicViUBTlsCzQOJz+pM/fxLq6OsybN6/T4/PmzUNdXV2/DOpIkEwmMkTgvckA04BJVVV+gx3kGNKqvTMkEAneCGMoQ4vfDIOw0IbDGY4YDSQATiQSx1yRFGdo0+cobsyYMfjXv/7V6fFXX30VY8eO7ZdBHQmSyXQfPKFXGV0qmeAZ4MFPRgY4mZYBTvAM8FCGFr8NpN0Vh8PpG9RmdKjbjHGOLfocxT3wwAO4+OKLsWLFCsyfPx+CIGDlypX4+OOPuwyMByu07SFAHAN6k9Gl7ZBVVR2UVi6cdrqTQLQXwUm8EcYQRQDRAHM4nMGBJElc/8sZdPQ5A3zRRRdh1apVyM7OxltvvYU33ngD2dnZWL16NS688MKBGOOAkB4A97agjb6OFsFxBi9Cms5M1doDYNoKWZZ4EdxQRTAYeJMTDmcQIYriMdckgTP0OaTp2OzZs/H3v/+9v8dyREmXQAhC75oiUGslA2+zOuhJv9Amu5BAiKIRPAE8NEl3d+FwOEcfo9EIWZJ5BpgzqBi2aUxVTaRlgI29zujS4imeAR7cZEog0ovgSAZYkiR+DocoxAiCz244nMGCaBQhSiLX5nMGFcN2OpZMqmw22peMLg+Ajw3Sz0+6BphLIIY+giCQDD+HwxkUmEwmyIrMr7mcQcWwjeISiXhaBrj3jgDU05AHwIOb9EmKpnUhgeAuEEMWPkHlcAYX3JmFMxgZtneJdB9gg6F3GmDyWh78HitQGUQyvRFGgmaAeQA8VBEMBhgNw3Zxi8PhcDi9oM+R3EsvvYRwODwQYzmiJJPtneBoVrc30LaOPAge/FAnCC09AE5JIHgjjKGLAbxIlcPhcDg90+co7q677kJ+fj6uu+46fPnllwMxpiNCxwxwbwNaWmHOA+DBD80A0wBY1/X2DLDMNcBDFaPBwH2AORwOh9MjfY7iDhw4gL///e9obW3FaaedhgkTJuCxxx5DfX39QIxvwEgkEmlFcL2XQAiCwBpicAY3tB1yQiVBr6qprCCON8IYuhiMPAPM4XA4nJ7pcwBsNBpx3nnn4Y033kB1dTW+//3v4x//+AdKSkpw3nnn4e2334amaQff0FEmMwPc++VwRVFgs9m4z+gxAD1HtBVyPB5nz3EXiKGLyWyCopiO9jA4HA6HM4g5rEqR3NxczJ8/H7t27cLu3buxZcsWXH311XC5XPjLX/6CU089tZ+G2f+kN8LoS0bXYDDAbrcP5NA4/QSTQGg6ACAWaw+AJZn3pB+qFOTlQ3HnHO1hcDicPqCqGhLJnpNniaQKNWlEMq5C0JNHaGScgcQoHb0V9UMKgBsaGvC3v/0Nf/nLX7B//35ccMEF+M9//oNFixYhEong3nvvxVVXXYXKysqDbuvZZ5/Fr3/9a9TV1WHy5Ml48skncfLJJx/0fV988QVOOeUUTJkyBRs3buzzZ+hYBMcZelAJBJU9xNIywIokH5UxcTgcDqcdXddR3xJEW4Ben7sPhnRoSGpuJKMxCEgcmQFyBgwdOiAArnwXjNKRj8P6HACfe+65+OCDDzBu3Dh873vfw5VXXgm3282eN5vN+PGPf4wnnnjioNt69dVXcdttt+HZZ5/F/Pnz8fzzz+Oss87C9u3bUVJS0u37fD4frrzySixcuBANDQ19/QgAiAaYBr4G7k84JGESCBoApzLABoMAUeI2WRwOh3O0qW8Joi2YQG5uLixmM3pKBmq6joQWh2CQ0eMLOccEuqajvr4eQW8QjlzHEc8E9zkKyM3NxfLly3HiiSd2+5qCggKUl5cfdFuPP/44rrvuOlx//fUAgCeffBIffPABnnvuOTzyyCPdvu+GG27ApZdeCqPRiLfeequvHwEAkUDwAHhoQ506VC0zAyyKInfx4HA4nKOMqmpoC8SRm5sLjzvroK/XdA0GFRCMCrO55BzbZGdno7a2FpqqwXiEO3j2+Rt0yimnYNasWZ0ej8fjePnllwEQp4TS0tIetxOPx7Fu3TosWbIk4/ElS5b0aK/2l7/8Bfv27cPSpUt7Nd5YLAa/35/xB+ASiOFAx0YY8VgMAPcA5nA4nMEA1fxazOajPBLO0YI6MumpWp0jSZ8D4GuuuQY+n6/T44FAANdcc02vt9Pc3AxVVZGXl5fxeF5eXreWanv27MHPfvYz/OMf/2DB68F45JFH4HQ62Z/i4mIAHTPAfDl8KMIkEBoplojHU22QRe7jzOFwOIMDgasZhjNH8dz3OQrQdb3L7NmBAwfgdDr7PICO2+pu+6qq4tJLL8UDDzyAcePG9Xr7d911F3w+H/tTXV0NINMFggdDQxMqbdGYBphkgCVR5H3pORwOh8MZxvQ69Tlz5kwIggBBELBw4cKMDKyqqigvL8eZZ57Z6x1nZ2fDaDR2yvY2NjZ2ygoDJMO8du1abNiwAbfccgsAQNM06LoOURTx4Ycf4vTTT+/0PkVRoChKp8eTSRVGI7HCMhp5BngoQjPAuk6W2WIsAyxC4J3COBwOhzNAVFRUYPyY8Vi9djWmz5g+6LZHUUQF//r3v3D++ef32zaPFXod+V1wwQUAgI0bN+KMM86AzWZjz8myjLKyMlx00UW93rEsy5g9ezaWLVuGCy+8kD2+bNmyLk+Ew+HAli1bMh579tln8cknn+D111/HyJEje71vgGaAyWfgRXBDk44uELQRhsg7+XE4HA7nMLj+2uvxt5f/xv7vdrsxe85sPPLoI5g6bepRHBmnt/Q6AKZFZ2VlZbj44othMh1+p6U77rgDV1xxBebMmYMTTzwRL7zwAqqqqnDjjTcCIPKFmpoavPzyyzAYDJgyZUrG+3Nzc2EymTo93htUtV0DzIvghibMBUKlGWDqAsE1wBwOh8M5PJacsQR//NMfAQAN9Q1Y+vOluPD8C7G3fO9RHhmnN/Q5Crjqqqv6JfgFgIsvvhhPPvkkHnzwQcyYMQMrVqzAe++9xxwk6urqUFVV1S/76ki6D7Ao8q5gQ5H2RhgkAI7H2m3Q+KSHw+FwOIeDoijIz89Hfn4+ps+Yjp/c+RNUV1ejqampy9evWL4C8+fOh91iR2lRKe656x4kk+0d7TRNw29+9RtMHD8RdosdY0aOwaMPP9rltjRNw0033ITJEyezpmP/efc/mHv8XDisDowfOx6/ePAXGdvfs2cPFp66EA6rA9OnTsdHyz7qx6Nx7NGrDLDb7cbu3buRnZ2NrKysHpePvV5vnwZw88034+abb+7yuZdeeqnH995///24//77+7Q/iqqqaTZoXAM8FOnUCCOVAZZEEQbuIcnhcDiDDl3XEQ6Hu3xO0zXE1RgEY3JAfIAtFsshy+OCwSBeeeUVjB4zGh6PB6FQKOP5mpoanH/u+bjiqivwp5f+hF27duHmG26GyWTCfUvvAwDce/e9+POf/oxf//bXmDd/Hurr6rFr165O+4rH47jy8iuxf/9+fLr8U+Tm5uLDDz7ENVddg8effBzzT5qP/fv24+abSGx178/vhaZpuPjbFyPbk43Pv/gcfr8fP/nxTw7psw4VehX5PfHEE7Db7ezfQ0E/mW6DxgPgoUlnH+BUBljiRXAcDoczGAmHw7A53Qd/4QDg9XlhtVp7/fr3/vse3KmxhkIhFBQU4M233+xSYvf8c8+jqLgIv3vqdxAEARMmTEBdbR3uuese3HPfPQiFQnjm6Wfw5FNP4oorrwAAjB49GvNPmp+xnWAwiPPPPR/RSBTLPl7G3Lcee+Qx3PnTO9l7R40ahfsfuB93/+xu3Pvze/HxRx9j546d2L1vN4qKigAADz70IM4757y+H6ghQq8iv6uuuor9++qrrx6osRxR0m3Q+HL40KSjDVoiQZaCuAaYw+FwOIfLKaeegqd//zQAoNXbiuf/8DzOO+c8rPxqZafX7ty5E3Pnzs1IIM6bNw/BYBAHDhxAQ30DYrEYTjv9tB73eeXlV6KwsBDvL3sfFouFPb5+/XqsXbsWjz7SLplQVRXRaBThcBg7d+5EcUkxC34BYO6Jcw/5sw8FehUA0+5pvcHhcBzyYI4k6RlgSZKP8mg4AwE9v1rKBi2eJoEYCqsYHA6HM9SwWCwI+rqWUrZLIAamFXJ6QNkbrFYrxowZw/4/a/Ys5Lhz8OcX/4xrrstsDNZVjwNdJ93PBEGAuZfd8M446wy88o9XsOrrVRnBsqZpuG/pfbjgwgs6vcdkMrF9pTPci66A9AAAOWxJREFU74O9CoBdLtdBDxQ9uVRvOdhRVS2tCI5LIIYiNMuraakAOJHyAeY2aBwOhzMoEQShWxmCpmuQVHHAAuDDRRAEGAwGRCKRTs9NnDgRb775ZkYg/NVXX8Fut6OwsBA5OTkwm8349JNPMfK67m1db7jhBkyePBkXXXgR3nrnLSw4ZQEA0qthz+49GQF5x/1XV1WjtrYWI0aMAAB8/dXXh/uRj2l6Ffl9+umnAz2OI04i0S6B4C4QQ5N2F4jMIjiRZ4A5HA6Hc5jEYjHWzKu1tRXPPfscgsEgzj7n7E6vveGmG/D0U0/jth/dhptuvgm7d+/GQw88hB/d9iMYDAaYTCb85M6f4O6f3Q1ZlnHivBPR3NSM7du345prM7PJP7jlB1BVFReefyHe+c87mH/SfNx979248PwLUVRUhIu+dREEg4CtW7Zi65ateOChB7Bw0UKMGz8O1159LX7161/B7/dj6c+XHpHjNFjpVQB8yimnDPQ4jjjpEggeAA9NmAuE1qERhsRdIDgcDodzeHz4wYcoLSK2rXa7HePHj8crr76CU049BRUVFRmvLSwsxNvvvo27/u8uHPficXC73bj6mqtx1z13sdfcfe/dEEURD97/IGpra1FQUIDvff97Xe771h/dCk3TcP655+Pd/76LJWcswZtvv4mHf/Ewfvub30KSJIwfP55JMQwGA/71+r9w4/duxPwT56O0rBSPP/E4zj373IE5OMcAvQqAN2/ejClTpsBgMGDz5s09vnbatGn9MrCBRtfbpRpcAjE0YRpgKoFgrZC5BILD4XA4h86Lf34RL/75xW6fLysrQywZy3hswSkL8MXXX3T7HoPBgJ/d/TP87O6f9Wp7t91+G267/Tb2/yVnLMGSM5Z0u/1x48bhk+WfZDzWcZvDiV5FfjNmzEB9fT1yc3MxY8YMCILQraD6WNEAA+3j50VwQ5NuA2CjCPD4l8PhcDicYUuvAuDy8nLk5OSwfw8F9JQzAMAD4KEKs0FjRXDtrZB5BpjD4XA4nOFLrwJg2pq447+PZWgGW9fBPWGHKN1ngHkAzOFwOBzOcOaQxK+7du3C008/jR07drCOJj/84Q8xfvz4/h7fgEEVHIIg8EYYQ5SONmixKNE6ybLMA2AOh8PhcIYxfU59vv7665gyZQrWrVuH6dOnY9q0aVi/fj2mTJmC1157bSDGOODwYGho0lECEU25QMiyxM85h8PhcDjDmD5ngH/605/irrvuwoMPPpjx+NKlS/F///d/+Pa3v91vgxtYqAbYwIOhIUpHCQTNACs8A8zhcDgczrCmzxng+vp6XHnllZ0ev/zyy5kh9LGEIAg8GBqitPsApzLAMSqB4BlgDofD4XCGM30OgE899VR8/vnnnR5fuXIlTj755H4Z1JGAxj+DsZ0ip3/oJIFIZYAliXeC43A4HA5nONMrCcQ777zD/n3eeefh//7v/7Bu3TrMnTsXAPD111/jtddewwMPPDAwoxxADAYugRiq0AwwdfyIphfBcSNgDofD4XCGLb0KgC+44IJOjz377LN49tlnMx77wQ9+gBtvvLFfBjbQ0OBIEHgAPFRpd4EgzVliMaoBliAY+DnncDgczrHH8s+WY8miJWhoboDL5Traw+mRhx54CO+88w7WrFtztIfSiV6t/2ua1qs/x04XuPYAmC6Tc4YeHTXAEW6DxuFwOJx+4Pprr8e3vvmtXr9eERW8/fbbAzgiTl8ZtgLY9gCYZ4CHKh01wDFug8bhcDicY5hEInG0hzBkOKQAOBQK4b333sMf/vAHPPXUUxl/jhVEkag/DIZD6gXCOQboaINGNcCKwjPAHA6Hw+kfFp++GLffdjvu+r+7kJ+Tj5LCEjz0wEPs+XGjxwEAvnPRd6CICvs/APzn3f9g7vFz4bA6MH7sePziwV8gmUyy5xVRwQvPv4CLLrwIWY4sPPLLR7ocw1dffoWFpy6E0+bE6LLRuP222xEKhdjz/+8f/w8nnnAiPC4PSgpLcOXlV6KxsREAuUeOKh2FF55/IWObG9ZvgCIq2L9/PwDA5/PhphtvQlFBEbKzsnHGojOwedPmjPf8+rFfo3hEMTwuD2743g2IRqOHckiPCH0OgDds2IAxY8bgu9/9Lm655Rb84he/wG233Ya7774bTz755AAMcWCgwRHvAjd06dwKmWSAeQDM4XA4gxNd1xGPx3v4kzjI84f+hxZMHwp/f/nvsFqt+PzLz/Hwow/jl7/4JT5a9hEA4IuvvwAA/PFPf0TlgUr2/w8/+BDXXHUNfnDLD7Bxy0b8/tnf4+WXX8ajDz+ase2HHngI5553LtZtXIerrrmq0763btmKc75xDs6/8Hys3bAWf/9/f8eXX3yJ2269jb0mHo9j6f1LsWb9Grz279dQUV6B66+9HgBZCf/2d76Nf/6/f2Zs95+v/BNz587FqFGjoOs6Ljj3AjTUN+Dtd9/GV6u/woyZM3DmkjPh9XoBAK+/9joefOBBPPDQA/hy1ZfIz8/H8394/pCP6UDT5/Tn7bffjnPPPRfPPfccXC4Xvv76a0iShMsvvxw/+tGPBmKMAwLXAA996LnVtZQLRIxngDkcDmcwk0gk8MivfntU9v3jn/4Ysiwf0nunTp2Ke39+LwBg7NixeO73z+HTTz7FosWLkJOTAwBwupzIz89n73nskcdw50/vxBVXXgEAGDVqFO5/4H7c/bO72bYA4OJLLsbV11zN/l9RXpGx78d/+zgu/u7FuPVHt7L9P/7E41h0+iI8/funYTKZMt4/atQoPP7k45h/4nwEg0HYbDZ899Lv4ndP/g6VlZUoLS2Fpml47V+v4ac/+ykA4LNPP8PWrVtxoO4AFEUh4//1Y3jnnXfwxr/fwPXfux5P/+5pXHXNVbj2umsBAA889AA++fgTRGODMwvc5wzwxo0b8eMf/xhGoxFGoxGxWAzFxcX41a9+hbvvvnsgxjggpGuAOUOTjkVw8RjRTskS1wBzOBwOp/+YMm1Kxv/zC/LR1NTU43vWr1+PX/7il3A73ezPTTfchLq6OoTDYfa62XNmH3Q7f/vr3zK2c843zoGmaSgvLwcAbNywERddeBHGjhoLj8uDxQsXAwCqq6oBADNmzsD4CePxr3/+CwCwYvkKNDY24lvf/hbbRzAYREFuQcZ+KsorsH8fkUjs3LmT2eNSTph7Qo9jP5r0OQMspQUPeXl5qKqqwsSJE+F0OlFVVdXvAxwoeAZ46NNug6ZB13WWATaZTDwA5nA4nEGIJEm466c/7vI5TdcQV+MQjPKANLGSJKnf3isIApPfdYemabhv6X244MILOj1nMpnYvy1Wy0G3c/33r8cPbvlBp+dKSkoQCoVw9llnY9HiRfjLX/+C7JxsVFdV45xvnMOkgQDw3e9+F//85z9x5//diVf/+SoWL1mM7OxsAGQltaCgAB9+/GGnfQx2K7bu6HMAPHPmTKxduxbjxo3Daaedhp///Odobm7G3/72N0ydOnUgxjgg0CI43glu6JLuApH+I1cU3giDw+FwBiOCIHQrQ9B0DVD1AQuABxJJkqCpmQHxzJkzsWf3HowZM+awtj1z5kxs37a92+1s3bIVzc3N+MXDv0BxcTEAYP269Z1ed/F3L8bSny/F+nXr8ca/38DTv3+aPTdj5gzU19dDFEWUlZV1uZ8JEyZg1apVuPyKy9ljq1etPoxPNrD0+Rv08MMPo6CgAADw0EMPwePx4KabbkJjYyNeeOGFg7x78MAzwEOf9iI4lTlAAIBJUXgGmMPhcDhHjNKyUnzyySeor69Ha2srAODue+/G3//2dzz0wEPYvm07duzYgdf+9RqW3re0T9v+yZ0/waqvV+HWH96KTRs3Yc+ePXj33Xdx249uAwAUlxRDlmU8+8yz2L9/P9599108/MuHO21n5MiROPHEE3HD929AMpnEueedy55buGgh5s6di29f9G18+MGHqKiowFdffoWl9y3FurXrAAC33HoL/vqXv+Klv7yE3bt348H7H8T27dsP8YgNPH0OgOfMmYPTTjsNAJCTk4P33nsPfr8f69evx/Tp0/t9gAMFzQBzF4ihS7oLRCRlxSIIAkTRCJ4A5nA4HM6R4rFfPYaPP/oYo8tG44Q5RBe75IwlePPtN/HxRx9j3tx5WDB/AX73xO9QUlrSp21PnTYVH33yEfbu2YvTTz0dJ8w5AQ8sfQAF+SRZmZOTgxf//CL+/e9/Y8bUGfjNY7/Bo4892uW2Lrn0EmzetBkXXHgBzGYze1wQBLz9n7dx0skn4Ybv3YApE6fgisuuQGVlJXLzcgEA3/7Ot3HPvffgnrvuwYnHn4iqqip8/4bvH8rhOiII+iH6fjQ2NmLXrl0QBAHjx49nVY6DHb/fD6fTiSVLlmDevHkoLJyAb3zj4qM9LM4A8MQTD+E3v/k5zlq4BL97/D6Mm34yFFnGB2+9hBPmngCTYjr4RjjHFIlYAuX7VSjuYkjKoev5OMcGiVgCaqAcxSUKJJmf72ONaCyJ8lo/RpaVZGheu4NogGMQjMoxJ4HgdE00GkVVRRUc+Q6IcqYq19vqxdTSqfD5fHA4HP2+7z5/g/x+P6644goUFhbilFNOwYIFCzBixAhcfvnl8Pl8/T7AgYL7AA99WAZY1xEOkwywrJAiTu7+weFwOBzO8KXPUcD111+PVatW4T//+Q/a2trg8/nwn//8B2vXrsX3vve9gRjjgNDeCY4HwEOVdBcI2o1GliRAANcAczgcDoczjOmzC8R///tffPDBBzjppJPYY2eccQb++Mc/4swzz+zXwQ0kvAhu6JOuAQ6HIwAAWSZNMHgAzOFwOBzO8KXPGWCPxwOn09npcafTiaysrH4Z1JGAZ4CHPuk2aCwDnNIJ8gCYw+FwOJzhS58D4HvvvRd33HEH6urq2GP19fW48847cd999/Xr4AYSujzOtaBDlwwXiAjJACuyxDPAHA6Hw+EMc3olgZg5c2ZGwLBnzx6UlpaipIRYdVRVVUFRFDQ1NeGGG24YmJH2M+0SiD6rQDjHCO2tkFVEUl3gaCdD3giDw+FwOJzhS6+ivwsuuGCAh3Hk4T7AQ590CQTVACsK1wBzOBwOhzPc6VUAvHRp37qSHAtwG7ShDz23uq6zTnCyJHHZC4fD4XA4w5xDXv9ft24dduzYAUEQMGnSJMycObM/xzXg0AywKHLz9KFKhg1aSgKhyDIM3ECdw+FwOJxhTZ8D4MbGRlxyySX47LPP4HK5oOs6fD4fTjvtNPzzn/88ZjrCtWeAuQZ4qJJZBEdcICRZgmDg8gcOh8PhcIYzfY7+fvjDH8Lv92Pbtm2YOHEiAGD79u246qqrcOutt+KVV17p90EOBDQ4oplgztCDaoBVTWMSCEXmEggOh8MZ9Ox7sfNjug6jlgQMYr/XcWijr+/ze+rr6/HoI4/if+/9D7U1tcjNzcW06dPww1t/iNMXnt5vY1t8+mJMmzENv338t/22zb5s960338IfX/gjNqzfgJaWFqxeuxrTZ0zv17EcDfoc/b3//vv46KOPWPALAJMmTcLvf/97LFmypF8HN5DwDPDQJyMDHKWNMCReAMfhcDicw6KiogKnLTgNTpcTjzz6CKZMnYJkIokPP/wQP7r1R9iybcvRHmK/EQqFMG/ePFz0rYtw0w03He3h9Bt9ToVpmgZJ6qyblSQJmqb1y6COBO0ZYK4BHqqkB8C8CI7D4XA4/cWtt9wKQRDwxVdf4JsXfRPjxo3DpMmTcNvtt+HzLz5nr6uqqsJFF14Et9ON7KxsXHrJpWhoaGDPP/TAQzhu9nH4x9//gXGjxyHHnYPLL70cgUAAAHD9tddjxYoVeOapZ6CIChRRQUVFBQBgx/YdOO+c8+B2ulE8ohjXXHUNmpubAQDLP1sOm9mGlZ+vZPt64vEnMCJvBOrq6nrcbkcuu/wy3HPfPf2a1R4M9DkSOP300/GjH/0ItbW17LGamhrcfvvtWLhwYb8ObiBpL4LjGeChCpVA6OkBsCLDyLv/cTgcDucQ8Xq9+PCDD3HjTTfCarV2et7lcgEgDkTfvujbaPW24qNPPsJ777+H/fv34/LvXp7x+v379uOdt9/Bm2+/iTfffhOfr/gcv37s1wCA3z7xW8ydOxfXXn8tKg9UovJAJYqLi1FXV4dFpy/C9OnT8eWqL/Huf99FQ0MDLrvkMgDAKaeegh/e+kNce/W18Pl82LxpM5betxTPPf8cCgoKut3ucKLP0d8zzzyD888/H2VlZSguLoYgCKiqqsLUqVPx97//fSDGOCC0SyB4BnioQjO9apoLhCzxIjgOh8PhHDr79u6DrusYP2F8j6/7+KOPsWXzFuzau4sFl3956S+YMW0G1q5ZiznHzQFAVilf/POLsNvtAIBLL7sUn37yKQDA6XRClmVYLBbk5+ezbb/whxcwY+YMPPTLh9ofe/EFjC4bjd27d2PcuHF44KEH8Mknn+DmG2/G9u3bcdnll+H8C87vcbvDiT4HwMXFxVi/fj2WLVuGnTt3Qtd1TJo0CYsWLRqI8Q0YVAfalZyDMzTI1AATFwjaCIPD4XA4nENB13Xyj4PcSnbu3Imi4qKMzOrESRPhcrmwc+dOFgCXlpWy4BcA8gvy0djU2OO2169fj+WfLYfb6e703P59+zFu3DjIsoyX/voSZs+cjZLSEvzm8d/08hMOD/oUACeTSZhMJmzcuBGLFy/G4sWLB2pcRwwugRi6pHeCoxIIiWuAORwOh3MYjBk7BoIgYNeOXcD53b9O1/UuEy4dH++YiBMEAbqm9zgGTdNw9jln45eP/LLTcwUFBezfX331FQCg1dsKr9fbpWRjuNKnSEAURZSWlkJV1X4bwLPPPouRI0fCZDJh9uzZ+Pzzz7t97RtvvIHFixcjJycHDocDJ554Ij744IPD2j8vghu68CI4DofD4fQ3brcbi5csxh+e+wNCoVCn59va2gAAEydORHVVNaqrq9lzO7bvgM/nw4QJE3q9P0mWOsVdM2fOxPbt21FWVoYxY8Zk/KFB7r59+3Dnj+/Ec88/h+NPOB7XXX1dhllBV9sdTvQ5Erj33ntx1113wev1HvbOX331Vdx222245557sGHDBpx88sk466yzUFVV1eXrV6xYgcWLF+O9997DunXrcNppp+Hcc8/Fhg0bDmn/uq5DkuTD+QicQUx6AByL0QywyANgDofD4RwWTz3zFFRVxfwT5+PNN97Enj17sGPHDjzz9DNYcNICAMDCRQsxddpUXH3F1diwfgPWrF6Da6+5FgsWLMDsObN7va/SslKsWbUGFRUVaG5uhqZpuPHmG9HqbcUVl12BNavXYP/+/Vj24TJ8//rvQ1VVqKqKa6+6FosWL8JVV1+FP/7pj9i2bRueePyJHrfbFV6vF5s2bsKO7TsAALt378amjZtQX19/GEfw6NPn9f+nnnoKe/fuxYgRI1BaWtopnb5+/fpeb+vxxx/Hddddh+uvJwbUTz75JD744AM899xzeOSRRzq9/sknn8z4/8MPP4y3334b77777iG1YtZ1nQVJnKFHhgQiFgfAG2FwOBzOMUFXjSl0Daoag2BUIBzllvYjR47E12u+xqOPPIr/u/P/UFdXh5ycHMycNRNPP/M0ACJleO3fr+H2H92OhacthMFgwJIzluCJ3z1xkK1ncvsdt+P6a67HjKkzEIlEsGvvLpSVleHTFZ/inrvuwTnfOAexWAwlpSVYsmQJDAYDHv7Fw6isrMQbb78BAMjPz8dzLzyHyy65DIsWLcL0GdO73W5H/vPuf/C9677H/n/5pcTF4t777sV9S+87xCN49BF0pubuHffff3+PRURLly7t1Xbi8TgsFgtee+01XHjhhezxH/3oR9i4cSOWL19+0G1omoaysjL89Kc/xS233NLla2KxGMv+AYDf70dxcTF+9rOfQZJkXH31XTCZTL0aM+fYYuXKT3DxxQtRUlQMi0XBzt178fDSH+OS71yIkWUjj/bwOANAIpZA+X4VirsYksLlTUOdRCwBNVCO4hIFkszP97FGNJZEea0fI8tKenUf1nQN8UESAHP6h2g0iqqKKjjyHRDlzJyst9WLqaVT4fP54HA4+n3ffc4A33///f2y4+bmZqiqiry8vIzH8/Lyep1W/+1vf4tQKITvfOc73b7mkUcewQMPPNDlczwDPLTpSgIhyxI/5xwOh8PhDHN6PYUKh8P4wQ9+gMLCQuTm5uLSSy9lHUcOh47Z5O6qJjvyyiuv4P7778err76K3Nzcbl931113wefzsT/pYnRd1/ly+BAmowguzQfYwDMHHA6Hw+EMa3qdAV66dCleeuklXHbZZTCZTHjllVdw00034bXXXjukHWdnZ8NoNHbK9jY2NnbKCnfk1VdfxXXXXYfXXnvtoP7DiqJAUZQun9P1zgE4Z+iQrgGOJ1IaYEWGwcgDYA6Hw+FwhjO9DoDfeOMN/OlPf8Ill1wCALj88ssxf/58qKp6SEvKsixj9uzZWLZsWYYGeNmyZTj//O6N9V555RVce+21eOWVV3D22Wf3eb+c4UNXNmiSyCUQHA6Hw+EMd3odAFdXV+Pkk09m/z/++OMhiiJqa2sPuX/0HXfcgSuuuAJz5szBiSeeiBdeeAFVVVW48cYbARD5Qk1NDV5++WUAJPi98sor8bvf/Q5z585l2WOz2Qyn03kIIxB4BngIk6EBjpMMsCyLPADmcDgcDmeY0+sAWFVVyHKmZ64oikgmk4e884svvhgtLS148MEHUVdXhylTpuC9995DaWkpAKCuri7DE/j5559HMpnED37wA/zgBz9gj1911VV46aWXDmkMPAAeulAJRCKRYGbfiqJw3TeHw+FwOMOcXgfAuq7j6quvztDTRqNR3HjjjRlewG+88UafBnDzzTfj5ptv7vK5jkHtZ5991qdtHxwe/A5laKAbjUXZY4oi8wwwh8PhcDjDnF4HwFdddVWnxy6//PJ+HcyRhwfAQxka6FL5AwCYTDwDzOFwOBzOcKfXAfBf/vKXgRzHUYIHwEOZjpleSRIhiiKMBp4B5nA4HA5nODOsU2Fc/zu0MXQIdGVJgiAI3AaNw+FwOJxhTp87wQ0leCvFoU3HDLAsSzAYDFwDzOFwOIOcF/7Y+f6s6QJUTQIMxn5PYF3/Pa3P76mvr8ejjzyK/733P9TW1CI3NxfTpk/DD2/9IU5feHq/jW3x6YsxbcY0/Pbx3/bbNnu73UQigaX3LcX777+P8v3lcDqdOH3h6fjFw7/AiBEj+nU8R5phHQBzLejQprMEggfAHA6Hwzl8KioqcNqC0+B0OfHIo49gytQpSCaS+PDDD/GjW3+ELdu2HO0h9gvhcBgbNmzA3ffcjanTpqKttQ0/ueMnuOjCi/DVqq+O9vAOi2EdAfIM8NCm4/mVJeIBzFshczgcDudwuPWWWyEIAr746gt886JvYty4cZg0eRJuu/02fP7F5+x1VVVVuOjCi+B2upGdlY1LL7kUDQ0N7PmHHngIx80+Dv/4+z8wbvQ45LhzcPmllyMQCAAArr/2eqxYsQLPPPUMFFGBIiqoqKgAAOzYvgPnnXMe3E43ikcU45qrrkFzczMAYPlny2Ez27Dy85VsX088/gRG5I1AXV1dj9tNx+l04n8f/A/f+va3MH78eJww9wQ88bsnsH7d+gyb2mORYR0JdNSIcoYWXUkgRHFYL3pwOBwO5zDxer348IMPceNNmTawFJfLBYDYx377om+j1duKjz75CO+9/x7279+Py7+b6aC1f99+vPP2O3jz7Tfx5ttv4vMVn+PXj/0aAPDbJ36LuXPn4trrr0XlgUpUHqhEcXEx6urqsOj0RZg+fTq+XPUl3v3vu2hoaMBll1wGADjl1FPww1t/iGuvvhY+nw+bN23G0vuW4rnnn0NBQUG32+0NPp8PgiCwz3msMqyjAb4UPrTpSgIhSdJRGg2Hw+FwhgL79u6DrusYP2F8j6/7+KOPsWXzFuzau4sFl3956S+YMW0G1q5ZiznHzQFAupW++OcXYbfbAQCXXnYpPv3kUwAkAyvLMiwWC/Lz89m2X/jDC5gxcwYe+uVD7Y+9+AJGl43G7t27MW7cODzw0AP45JNPcPONN2P79u247PLLcP4F5/e43YMRjUZx7z334pLvXgKHw9Hr9w1GeAaYM2TpKgPMA2AOh8PhHA66rpN/HKQOb+fOnSgqLsrIrE6cNBEulws7d+5kj5WWlbLgFwDyC/LR2NTY47bXr1+P5Z8th9vpZn+mTZ4GgGSUAUCWZbz015fw5htvIhKJ4DeP/6YvH7MTiUQCl196OTRNw1PPPHVY2xoM8AwwZ8jScYIjSVwCweFwOJzDY8zYMRAEAbt27ALO7/51uq536VbR8fGOiRlBEKBreo9j0DQNZ59zNn75yC87PVdQUMD+/dVXpFCt1dsKr9fbpWSjNyQSCVx6yaWoqKjAB8s+OOazv8CwzwDzYGgo0ykDzCUQHA6HwzlM3G43Fi9ZjD889weEQqFOz7e1tQEAJk6ciOqqalRXV7PndmzfAZ/PhwkTJvR6f5IsQVXVjMdmzpyJ7du3o6ysDGPGjMn4Q4Pcffv24c4f34nnnn8Ox59wPK67+jpomtbjdruCBr979+7F/z74HzweT6/HPpgZ1gEwzwYObXgRHIfD4XAGgqeeeQqqqmL+ifPx5htvYs+ePdixYweeefoZLDhpAQBg4aKFmDptKq6+4mpsWL8Ba1avwbXXXIsFCxZg9pzZvd5XaVkp1qxag4qKCjQ3N0PTNNx4841o9bbiisuuwJrVa7B//34s+3AZvn/996GqKlRVxbVXXYtFixfhqquvwh//9Eds27YNTzz+RI/b7UgymcQl37kE69etx19f/itUVUV9fT3q6+sRj8cP/0AeRYZ1NGA08mzgUKaTDZoscQs0DofDOQb4fheNKTRdQ1xNQDAajrqN6ciRI/H1mq/x6COP4v/u/D/U1dUhJycHM2fNxNPPPA2ASBle+/druP1Ht2PhaQthMBiw5IwleOJ3Txxk65ncfsftuP6a6zFj6gxEIhHs2rsLZWVl+HTFp7jnrntwzjfOQSwWQ0lpCZYsWQKDwYCHf/EwKisr8cbbbwAA8vPz8dwLz+GySy7DokWLMH3G9G63m86BAwfwn3f/AwA4bvZxGc99+NGHOOXUUw7xCB59BJ2puYcHfr8fTqcTP/vZz1BSMhXnn3/p0R4SZ4CIx+MYOVJh/z/7zFPxwjO/xoiCY7t7Dad7ErEEyverUNzFkBQ+wR3qJGIJqIFyFJcokGR+vo81orEkymv9GFlWApPJdNDXkwA4BsGoHPUAmNM/RKNRVFVUwZHvgChn5mS9rV5MLZ0Kn883IJrjYf0N4svhQ5uOEghFlnnhI4fD4XA4nOEeAPOMwVCmY6trReEBMIfD4XA4nGEeAMuycvAXcY5ZBEHICIIVWekUFHM4HA6Hwxl+DOtogAfAQ590L2CeAeZwOBwOhwMM8wBYkngAPNTJyAArMs8AczgcDofDGe4BsHy0h8AZYNIzvmaTAiNvf83hcDgczrBnmAfA3AViqJMhgZAVGIzD+ivP4XA4HA4HwzwAFkWeAR7qGNMCXpOJa4A5HA6Hw+EM8wCYB0NDn8wiOIWfcw6Hw+FwOMM9AOYSiKFOhgbYbOKtkDkcDocz5KmoqIAiKti0cdPRHsqgZVhHgLwIbuiTXvRmMnHXDw6HwzkWeGH9i50e03Qdqp4EBBGCIPTr/q6fdX2vX6uIPd9LrrjyCrz4587j7w3jRo/DLbfeglt/dOshvZ/Te4Z1AMw7wQ19hDTbM5PCA2AOh8PhHB6VByrZv1/712t48P4HsWX7FvaY2Ww+GsPi9JFhvR4sisM6/h8WpEsgbDbbURwJh8PhcIYC+fn57I/T6YQgCBmPfb7ic8w9fi4cVgfGjx2PXzz4CySTSfb+hx54CGNGjoHdYkdZcRluv+12AMDi0xejsrISd/74Tiii0mOmWREVPP+H53Hu2efCaXNi3Jhx+Pfr/+70uvLycixZuAQuuwtzZs3B1199zZ5raWnBFZddgVGlo+CyuzBrxiy8+s9XM97/xr/fwKwZs+C0OVGQW4Azl5yJUCjEnv/rS3/FtCnT4LA6MHXyVPzhuT8c8nE90vAAmDOkSQ+ArVbLURwJh8PhcIY6H37wIa656hr84JYfYOOWjfj9s7/Hyy+/jEcffhQACSif+t1TeObZZ7Bt5za89u/XMGXKFADAq6+/iqKiIiy9fykqD1RmZJq74oGlD+DCb16INevX4NJLL8UVl12BHTt2ZLzm5/f9HLfdcRtWr1uNsWPH4srLr2TBeDQaxcxZM/Hm229i/ab1uO7663DNVddg9arVAIC6ujpccdkVuOrqq7Bp6yYs+3gZLrjwAui6DgD404t/wtL7luLBhx7Epq2b8NAvHsIDSx/A317+W78e04Fi2EaAuq5zCcQwIF0DbOcZYA6Hw+EMII898hju/OmduOLKKwAAo0aNwv0P3I+7f3Y37v35vaiqqkJefh4WLloISZJQUlKC444/DgDgdrthNBphs9uQn59/0H1981vfxLXXXQsAuP/B+/HxRx/j2d8/i6efeZq95vY7bsc3zv4GAODnS3+OGdNmYO/evZgwYQIKCwtxx4/vYK/9wS0/wIcffIh/v/5vHH/C8aivq0cymcQFF16A0tJSAMCUqVPY6x/55SN47NeP4YILLwAAjBw5Eju278CLL7zIPv9gZtgGwKqqcUusYYCBZ4A5HA6Hc4RYv3491q5di0cfeZQ9pqoqotEowuEwLvrWRXjmqWcwYewELDljCc4860ycfc7Zh7QiPXfu3Iz/nzD3BGzetDnjsalTp7J/5xeQoLqpsQkTJkyAqqr49WO/xmuvvYbamlrEYjHEYjFYrVYAwLTp03Da6adh9ozZWLxkMRYtXoRvXvRNZGVloampCdXV1bjhezfgphtuYvtIJpNwOp19/ixHg2EbAGua2u9VpJzBR4YEwsIDYA6Hw+EMHJqm4b6l97GsaDomkwnFxcXYsn0LPvroI3zy8Se49ZZb8fhvHsdHn34ESTr8VemOcU36NulzmqYBAJ54/Ak89bun8JvHf4MpU6bAYrXgJ3f8BPF4HAC5f/7vg//hqy+/wkfLPsKzv38WS+9bis+//ByW1P30ueefYxlsyrGSXBzGATDPAA8HqO+vKBohyVzywuFwOJyBY+bMmdizew/GjBnT7WvMZjPOPfdcnHvuubjxphsxbfI0bN2yFTNnzYQkS1BVtVf7WrVqFS6/4nL2/9WrVmP6zOm9HusXK7/Aueedi0svuxQAiYuoPIIiCALmzZ+HefPn4Z777sHYUWPx9ltv47bbb0NhYSHK95fju5d+t9f7HEzwAJgzpKESCFnmbZA5HA6HM7Dcfe/duPD8C1FUVISLvnURBIOArVu2YuuWrXjgoQfw8l9fhqqqOP7442G2mPH//v7/YDabUVJaAgAoLS3Fys9X4jsXfweKoiA7O7vbfb3x+huYPXs25s2fh3/+v39izZo1eP6Pz/d6rKNHj8Zbb76Fr778Cq4sF5568ik01DewAHj1qtX49JNPsWjxIuTk5mDN6jVoampiz9/783txx213/P/27j4oqrLvA/j3ALsLIiwiybIpSJpp4jABVjL5Mt4TiCmUZZjOKLcvz1CiIlq+PYY2zq2VkVO+zgSmj82oM6GPMzoZTEg45uQDaEqElCtYgdyQLCrCLuz1/KGsrru8drNn4Xw/MzsD17kO+9vL3zn+9tprz4GPrw9ip8XC1GxCYWEhbt26hdSVqT0fRCdRcAEs4Oam6ItgKEJb0atWqVgAExH1Ef/l4MYUFmGBqbUZkrsGkove1TMmNgbH/vcY/rXlX/hk+ydQqVR45pln8M9F/wQAaP202P7hdry3+j20trYiLCwM2cezMXjwYABA+qZ0LH1nKcaMGnN/TW5Lc7vPtTF9I44eOYrlKcuh0+lw4H8OYMyzY7oc6/r/Xo/r169jxvQZGDBgABYtXoT4hHgYjUYAgI+vDwoKCvD5Z5+joaEBwSHB+PDjDzEtbhoAYOGihRgwYAAyPsnA+rXr4e3tjbCwMCxbsaynw+dUkmi7noVCNDQ0QKvV4p13UrBhw+ed70B92vS48bj00/8hcEgAfi7Kg/8gf7lDol5kbjbDcK0VGv9hUGm45KW/Mzeb0XrbgGHBGi5x6oOamltg+LMBocOD4enp2Wn/vlAAO4vGQ4OjXx9FQkKC3KH8LU1NTai8XglfnS881LZzsn/d+gvjQsbBaDTC19f3P/7cis2gtkXg1L+5cQaYiIiIHqPYAlhhE9+K1XYdYI1GzSUvREREBEDBa4BJGdqKXrVaZXNTDCIior6qo7XB1DWKnRLjDLAyPFwCoYabu2LTnYiIiB6h2IqA9a8yPLoEgmuAiYhcjeD/x0om47+9ggtgHnFK0DYDrFHzS3BERK5E5XG/BGm8d0/mSEguZrMZAgKSm/PvzKvYNcCsf5Xh4QywxnpXOCIikp+7uxv8fNSo+fe/AQADvLwgdVAHWYSA2WKC5AZ02JH6BGERqK2thYfGQ5YliootgEkZ3B8cVBqNWuZIiIjocbrBAwHcQU1NzYOW9gtbAQtaLC2Q3DwgKfcD7H5DQAAS4KfzgyTDGxoFF8B896gE1iUQLICJiFyOJEkICvDBkEEWmFs6vj6/ueUefm+ogdp7GFQqLydFSL3JXeUuS/ELsACmfq5t2YOnWiNzJERE1B53dzfrJ3bt9nFzh7tHKzzU7vBQKbh8of8I2T9D2L17N0JDQ+Hp6YnIyEgUFBR02D8/Px+RkZHw9PTEU089hb179/boeeV6x0HO1fbFN40nC2AiIiK6T9YC+MiRI0hNTcWGDRtQXFyMiRMnIi4uDpWVlQ77GwwGTJ8+HRMnTkRxcTHWr1+P5cuX4+uvv+72c/NLcMrAJRBERET0OFkL4IyMDCxatAiLFy/GmDFjsGPHDgwbNgx79uxx2H/v3r0IDg7Gjh07MGbMGCxevBgLFy7E9u3bu/3cnAFWBp+BvgAAra+PzJEQERGRq5BtEY3JZEJhYSHWrl1r0x4TE4Nz58453OeHH35ATEyMTVtsbCwyMzNhNpuhUqns9mlubkZz88NbBhqNxgfP34K//mr4uy+DXNzct96GSjQhfvo/0FD/l9zhUC8zm1pw546ERrcGeDg4H1D/0mI2Q7p3Bw3GRqi4JrTfM7c04c7tu2gURni4NckdDvWyemM9gN67b4NsZ4za2lq0trYiMDDQpj0wMBDV1dUO96murnbYv6WlBbW1tQgKCrLbZ+vWrdi8ebNd+8GD+3Dw4L6/8QqoLzl45Cu5QyAiIqJuqqurg1ar/Y//XdnfMj++FEEI0eHyBEf9HbW3WbduHdLS0qy/19fXIyQkBJWVlb0yoP1VQ0MDhg0bhhs3bsDX11fucPoEjlnPcNy6j2PWMxy37uOY9QzHrfuMRiOCg4Ph7+/fK39ftgI4ICAA7u7udrO9NTU1drO8bXQ6ncP+Hh4eGDx4sMN9NBoNNBr7KwBotVomYQ/4+vpy3LqJY9YzHLfu45j1DMet+zhmPcNx6z43t975uppsX4JTq9WIjIxETk6OTXtOTg6io6Md7jNhwgS7/t9++y2ioqIcrv8lIiIiInqcrFeBSEtLwxdffIGsrCyUlpZi5cqVqKysRHJyMoD7yxfmz59v7Z+cnIyKigqkpaWhtLQUWVlZyMzMxOrVq+V6CURERETUx8i6BjgxMRF1dXX44IMPUFVVhbCwMJw6dQohISEAgKqqKptrAoeGhuLUqVNYuXIldu3aBb1ej88++wyvv/56l59To9EgPT3d4bIIah/Hrfs4Zj3Dces+jlnPcNy6j2PWMxy37uvtMZNEb11fgoiIiIjIBcl+K2QiIiIiImdiAUxEREREisICmIiIiIgUhQUwERERESmK4grg3bt3IzQ0FJ6enoiMjERBQYHcIbmMrVu3Yvz48fDx8cGQIUPw6quvoqyszKZPUlISJEmyebz44osyRewaNm3aZDcmOp3Oul0IgU2bNkGv18PLywtTpkxBSUmJjBHLb/jw4XZjJkkSli5dCoB51ub777/HzJkzodfrIUkSjh8/brO9K7nV3NyMZcuWISAgAN7e3oiPj8fvv//uxFfhXB2Nmdlsxpo1azBu3Dh4e3tDr9dj/vz5+PPPP23+xpQpU+zyb86cOU5+Jc7VWa515Zhkrh232e7oHCdJEj7++GNrH6XlWlfqDGed1xRVAB85cgSpqanYsGEDiouLMXHiRMTFxdlcak3J8vPzsXTpUpw/fx45OTloaWlBTEwM7t69a9Nv2rRpqKqqsj5OnTolU8SuY+zYsTZjcvnyZeu2jz76CBkZGdi5cycuXLgAnU6Hl19+Gbdv35YxYnlduHDBZrzabnAze/Zsax/mGXD37l2Eh4dj586dDrd3JbdSU1Nx7NgxHD58GGfPnsWdO3cwY8YMtLa2OutlOFVHY9bY2IiioiJs3LgRRUVFyM7OxtWrVxEfH2/Xd8mSJTb5t2/fPmeEL5vOcg3o/Jhkrtl6dKyqqqqQlZUFSZLsLt2qpFzrSp3htPOaUJDnn39eJCcn27SNHj1arF27VqaIXFtNTY0AIPLz861tCxYsEAkJCfIF5YLS09NFeHi4w20Wi0XodDqxbds2a1tTU5PQarVi7969TorQ9a1YsUKMGDFCWCwWIQTzzBEA4tixY9bfu5Jb9fX1QqVSicOHD1v7/PHHH8LNzU188803TotdLo+PmSM//vijACAqKiqsbZMnTxYrVqzo3eBcmKNx6+yYZK51nmsJCQli6tSpNm1Kz7XH6wxnntcUMwNsMplQWFiImJgYm/aYmBicO3dOpqhcm9FoBAD4+/vbtJ85cwZDhgzBqFGjsGTJEtTU1MgRnkspLy+HXq9HaGgo5syZg2vXrgEADAYDqqurbfJOo9Fg8uTJzLsHTCYTDh06hIULF0KSJGs786xjXcmtwsJCmM1mmz56vR5hYWHMvweMRiMkSYKfn59N+1dffYWAgACMHTsWq1evVvQnNm06OiaZax27efMmTp48iUWLFtltU3KuPV5nOPO8Juud4JyptrYWra2tCAwMtGkPDAxEdXW1TFG5LiEE0tLS8NJLLyEsLMzaHhcXh9mzZyMkJAQGgwEbN27E1KlTUVhYqNg73Lzwwgs4ePAgRo0ahZs3b2LLli2Ijo5GSUmJNbcc5V1FRYUc4bqc48ePo76+HklJSdY25lnnupJb1dXVUKvVGDRokF0fnveApqYmrF27FnPnzoWvr6+1fd68eQgNDYVOp8OVK1ewbt06XLp0ybpUR4k6OyaZax07cOAAfHx8MGvWLJt2JeeaozrDmec1xRTAbR6dYQLu/wM83kZASkoKfvrpJ5w9e9amPTEx0fpzWFgYoqKiEBISgpMnT9od2EoRFxdn/XncuHGYMGECRowYgQMHDli/JMK8a19mZibi4uKg1+utbcyzrutJbjH/7n8hbs6cObBYLNi9e7fNtiVLllh/DgsLw9NPP42oqCgUFRUhIiLC2aG6hJ4ek8y1+7KysjBv3jx4enratCs519qrMwDnnNcUswQiICAA7u7udu8Oampq7N5pKN2yZctw4sQJ5OXlYejQoR32DQoKQkhICMrLy50Unevz9vbGuHHjUF5ebr0aBPPOsYqKCuTm5mLx4sUd9mOe2etKbul0OphMJty6davdPkpkNpvx5ptvwmAwICcnx2b215GIiAioVCrm3yMePyaZa+0rKChAWVlZp+c5QDm51l6d4czzmmIKYLVajcjISLuPFXJychAdHS1TVK5FCIGUlBRkZ2fju+++Q2hoaKf71NXV4caNGwgKCnJChH1Dc3MzSktLERQUZP1o69G8M5lMyM/PZ94B2L9/P4YMGYJXXnmlw37MM3tdya3IyEioVCqbPlVVVbhy5Ypi86+t+C0vL0dubi4GDx7c6T4lJSUwm83Mv0c8fkwy19qXmZmJyMhIhIeHd9q3v+daZ3WGU89rf+fbe33N4cOHhUqlEpmZmeLnn38WqampwtvbW1y/fl3u0FzC22+/LbRarThz5oyoqqqyPhobG4UQQty+fVusWrVKnDt3ThgMBpGXlycmTJggnnzySdHQ0CBz9PJZtWqVOHPmjLh27Zo4f/68mDFjhvDx8bHm1bZt24RWqxXZ2dni8uXL4q233hJBQUGKHjMhhGhtbRXBwcFizZo1Nu3Ms4du374tiouLRXFxsQAgMjIyRHFxsfWKBV3JreTkZDF06FCRm5srioqKxNSpU0V4eLhoaWmR62X1qo7GzGw2i/j4eDF06FBx8eJFm/Ncc3OzEEKIX3/9VWzevFlcuHBBGAwGcfLkSTF69Gjx3HPP9dsxE6LjcevqMclcsz0+hRDCaDSKAQMGiD179tjtr8Rc66zOEMJ55zVFFcBCCLFr1y4REhIi1Gq1iIiIsLnEl9IBcPjYv3+/EEKIxsZGERMTI5544gmhUqlEcHCwWLBggaisrJQ3cJklJiaKoKAgoVKphF6vF7NmzRIlJSXW7RaLRaSnpwudTic0Go2YNGmSuHz5sowRu4bTp08LAKKsrMymnXn2UF5ensNjcsGCBUKIruXWvXv3REpKivD39xdeXl5ixowZ/XosOxozg8HQ7nkuLy9PCCFEZWWlmDRpkvD39xdqtVqMGDFCLF++XNTV1cn7wnpZR+PW1WOSuWZ7fAohxL59+4SXl5eor6+321+JudZZnSGE885r0oOAiIiIiIgUQTFrgImIiIiIABbARERERKQwLICJiIiISFFYABMRERGRorAAJiIiIiJFYQFMRERERIrCApiIiIiIFIUFMBEREREpCgtgIqI+7ssvv4Sfn1+39hk+fDh27NjRK/EQEbk6FsBERC5EkqQOH0lJSXb7JCYm4urVq84Ploioj/KQOwAiInqoqqrK+vORI0fw/vvvo6yszNrm5eVl099sNsPLy8uunYiI2scZYCIiF6LT6awPrVYLSZKsvzc1NcHPzw9Hjx7FlClT4OnpiUOHDtktgfjtt9+QkJCAwMBADBw4EOPHj0dubq58L4qIyMWwACYi6mPWrFmD5cuXo7S0FLGxsXbb79y5g+nTpyM3NxfFxcWIjY3FzJkzUVlZKUO0RESuh0sgiIj6mNTUVMyaNavd7eHh4QgPD7f+vmXLFhw7dgwnTpxASkqKM0IkInJpnAEmIupjoqKiOtx+9+5dvPfee3j22Wfh5+eHgQMH4pdffuEMMBHRA5wBJiLqY7y9vTvc/u677+L06dPYvn07Ro4cCS8vL7zxxhswmUxOipCIyLWxACYi6mcKCgqQlJSE1157DcD9NcHXr1+XNygiIhfCJRBERP3MyJEjkZ2djYsXL+LSpUuYO3cuLBaL3GEREbkMFsBERP3Mp59+ikGDBiE6OhozZ85EbGwsIiIi5A6LiMhlSEIIIXcQRERERETOwhlgIiIiIlIUFsBEREREpCgsgImIiIhIUVgAExEREZGisAAmIiIiIkVhAUxEREREisICmIiIiIgUhQUwERERESkKC2AiIiIiUhQWwERERESkKCyAiYiIiEhR/h9ItUoWTEXITAAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Set up the parameters for the experiment.\n", + "params.n_participants = 10\n", + "params.paradigms = ['Blocked', 'Interleaved']\n", + "params.sim_thresh = 0.1 # filtering criterion that will be useful later.\n", + "df, _, context_reps, _ = run.run_experiment(params)\n", + "fig = utils.plot_results(df, 'Deterministic CSW')\n", + "fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "analysis", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py new file mode 100644 index 00000000000..4707ad758ef --- /dev/null +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py @@ -0,0 +1,93 @@ +""" +DECLAN Params: ************************************************************************** +√ episodic_lr = 1 # learning rate for the episodic pathway +√ temperature = 0.1 # temperature for EM retrieval (lower is more argmax-like) +√ n_optimization_steps = 10 # number of update steps +sim_thresh = 0.8 # threshold for discarding bad seeds -- can probably ignore this for now +Filter runs whose context representations are too uniform (i.e. not similar to "checkerboard" foil) + +May need to pad the context reps because there will be 999 reps +def filter_run(run_em, thresh=0.8): + foil = np.zeros([4,4]) + foil[::2, ::2] = 1 + foil[1::2, 1::2] = 1 + run_em = run_em.reshape(200, 5, 11).mean(axis=1) + mat = cosine_similarity(run_em, run_em) + vec = mat[:160, :160].reshape(4, 40, 4, 40).mean(axis=(1, 3)).ravel() + return cosine_similarity(foil.reshape(1, -1), vec.reshape(1, -1))[0][0] + +# Stack the model predictions (should be 999x11), pad with zeros, and reshape into trials for averaging. +em_preds = np.vstack([em_preds, np.zeros([1,11])]).reshape(-1,5,11) + +# Stack the ground truth states (should be 999x11), pad with zeros, and reshape into trials for averaging. +ys = np.vstack([data_loader.dataset.ys.cpu().numpy(), np.zeros([1,11])]).reshape(-1,5,11) + +# compute the probability as a performance metric +def calc_prob(em_preds, test_ys): + em_preds, test_ys = em_preds[:, 2:-1, :], test_ys[:, 2:-1, :] + em_probability = (em_preds*test_ys).sum(-1).mean(-1) + trial_probs = (em_preds*test_ys) + return em_probability, trial_probs + +Calculate the retrieval probability of the correct response as a performance metric (probs) +probs, trial_probs = calc_prob(em_preds, test_ys) +""" +from psyneulink.core.llvm import ExecutionMode +from psyneulink.core.globals.keywords import ALL, ADAPTIVE, CONTROL, CPU, Loss, MPS, OPTIMIZATION_STEP, RUN, TRIAL + +model_params = dict( + + # Names: + name = "EGO Model CSW", + state_input_layer_name = "STATE", + previous_state_layer_name = "PREVIOUS STATE", + context_layer_name = 'CONTEXT', + em_name = "EM", + prediction_layer_name = "PREDICTION", + + # Structural + state_d = 11, # length of state vector + previous_state_d = 11, # length of state vector + context_d = 11, # length of context vector + memory_capacity = ALL, # number of entries in EM memory; ALL=> match to number of stims + memory_init = (0,.0001), # Initialize memory with random values in interval + # memory_init = None, # Initialize with zeros + concatenate_keys = False, + # concatenate_keys = True, + + # environment + # curriculum_type = 'Interleaved', + curriculum_type = 'Blocked', + # num_stims = 100, # Integer or ALL + num_stims = ALL, # Integer or ALL + + # Processing + integration_rate = .69, # rate at which state is integrated into new context + # state_weight = 1, # weight of the state used during memory retrieval + # context_weight = 1, # weight of the context used during memory retrieval + state_weight = .5, # weight of the state used during memory retrieval + context_weight = .5, # weight of the context used during memory retrieval + normalize_field_weights = False, # whether to normalize the field weights during memory retrieval + # normalize_field_weights = True, # whether to normalize the field weights during memory retrieval + # softmax_temperature = None, # temperature of the softmax used during memory retrieval (smaller means more argmax-like + softmax_temperature = .1, # temperature of the softmax used during memory retrieval (smaller means more argmax-like + # softmax_temperature = ADAPTIVE, # temperature of the softmax used during memory retrieval (smaller means more argmax-like + # softmax_temperature = CONTROL, # temperature of the softmax used during memory retrieval (smaller means more argmax-like + # softmax_threshold = None, # threshold used to mask out small values in softmax + softmax_threshold = .001, # threshold used to mask out small values in softmax + enable_learning=[True, False, False], # Enable learning for PREDICTION (STATE) but not CONTEXT or PREVIOUS STATE + learn_field_weights = False, + loss_spec = Loss.BINARY_CROSS_ENTROPY, + # loss_spec = Loss.MSE, + learning_rate = .5, + # num_optimization_steps = 1, + num_optimization_steps = 10, + synch_weights = RUN, + synch_values = RUN, + synch_results = RUN, + # execution_mode = ExecutionMode.Python, + execution_mode = ExecutionMode.PyTorch, + device = CPU, + # device = MPS, +) +#endregion \ No newline at end of file diff --git a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition with WM.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with RNN.py similarity index 98% rename from Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition with WM.py rename to Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with RNN.py index 00fe97f5e74..8a45bb6ab14 100644 --- a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW using EMComposition with WM.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with RNN.py @@ -147,8 +147,8 @@ MEMORY_CAPACITY = 5 CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script DISPLAY_MODEL = ( # Only one of the following can be uncommented: - None # suppress display of model - # {} # show simple visual display of model + # None # suppress display of model + {} # show simple visual display of model # {'show_node_structure': True} # show detailed view of node structures and projections ) RUN_MODEL = True # True => run the model @@ -404,7 +404,7 @@ def construct_model(model_name:str=MODEL_NAME, model = construct_model() assert 'DEBUGGING BREAK POINT' # print(model.scheduler.consideration_queue) - # gs.output_graph_image(model.scheduler.graph, 'EGO_comp-scheduler.png') + # gs.output_graph_image(model.scheduler.graph, 'show_graph OUTPUT/EGO_comp-scheduler.png') if DISPLAY_MODEL is not None: if model: diff --git a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Learning.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py similarity index 65% rename from Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Learning.py rename to Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py index b62564dd1ea..5cb51d00181 100644 --- a/Scripts/Models (Under Development)/EGO/EGO Model (sim 2) - CSW with Learning.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py @@ -3,37 +3,36 @@ # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and limitations under the License. - -# CONTROL FLOW: -# - EM EXECUTES FIRST: -# - RETRIEVES USING PREVIOUS STATE NODE AND CONTEXT (PRE-INTEGRATION) TO RETRIEVE PREDICTED CURRENT STATE -# - STORES VALUES OF PREVIOUS STATE, CURRENT STATE (INPUT) AND CONTEXT (PRE-INTEGRATION) INTO EM -# - THEN: -# - PREVIOUS_STATE EXECUTES TO GET CURRENT_STATE_INPUT (FOR RETRIEVAL ON NEXT TRIAL) -# - INTEGRATOR LAYER EXECUTES, INTEGRATING CURRENT_STATE_INPUT INTO MEMORY -# - CONTEXT LAYER EXECUTES TO GET LEARNED CONTEXT (FOR RETRIEVAL ON NEXT TRIAL) -# - PREDICTED CURRENT STATE IS COMPARED WITH ACTUAL CURRENT STATE (TARGET) TO UPDATE INTEGRATOR -> CONTEXT WEIGHTS - -# ISSUES: -# * Using TransferMechanism (to avoid recurrent in PyTorch): -# -> input is always just linearly integrated, and the integral is tanh'd -# (not sure tanh is even necessary, since integral is always between 0 and 1) -# -> how is recurrence implemented in PyTorch? -# * ??Possible bug: for nodes in nested composition (such as EMComposition): calling of execute_node on the -# nested Composition rather than the outer one to which they now belong in -# PytorchCompositionWrapper - -# TODO: -# -# SCRIPT STUFF: -# √ REPLACE INTEGRATOR RECURRENTTRANSFERMECHANISM WITH TRANSFERMECHANISM IN INTEGRATOR MODE -# OR TRY USING LCA with DECAY? -# - CHECK THAT VERSION WITH TRANSFERMECHANISM FOR CONTEXT PRODUCES CORRECT EM ENTRIES PER PREVOUS BENCHMARKING -# - DEBUG LEARNING -# """ + +CONTROL FLOW: + - EM EXECUTES FIRST: + - RETRIEVES USING PREVIOUS STATE NODE AND CONTEXT (PRE-INTEGRATION) TO RETRIEVE PREDICTED CURRENT STATE + - STORES VALUES OF PREVIOUS STATE, CURRENT STATE (INPUT) AND CONTEXT (PRE-INTEGRATION) INTO EM + - THEN: + - PREVIOUS_STATE EXECUTES TO GET CURRENT_STATE_INPUT (FOR RETRIEVAL ON NEXT TRIAL) + - INTEGRATOR LAYER EXECUTES, INTEGRATING CURRENT_STATE_INPUT INTO MEMORY + - CONTEXT LAYER EXECUTES TO GET LEARNED CONTEXT (FOR RETRIEVAL ON NEXT TRIAL) + - PREDICTED CURRENT STATE IS COMPARED WITH ACTUAL CURRENT STATE (TARGET) TO UPDATE INTEGRATOR -> CONTEXT WEIGHTS + +ISSUES: + * Using TransferMechanism (to avoid recurrent in PyTorch): + -> input is always just linearly integrated, and the integral is tanh'd + (not sure tanh is even necessary, since integral is always between 0 and 1) + -> how is recurrence implemented in PyTorch? + * ??Possible bug: for nodes in nested composition (such as EMComposition): calling of execute_node on the + nested Composition rather than the outer one to which they now belong in + PytorchCompositionWrapper + +TODO: + +SCRIPT STUFF: +√ REPLACE INTEGRATOR RECURRENTTRANSFERMECHANISM WITH TRANSFERMECHANISM IN INTEGRATOR MODE + OR TRY USING LCA with DECAY? +- CHECK THAT VERSION WITH TRANSFERMECHANISM FOR CONTEXT PRODUCES CORRECT EM ENTRIES PER PREVOUS BENCHMARKING +- DEBUG LEARNING + QUESTIONS: NOTES: @@ -131,134 +130,63 @@ """ -import matplotlib.pyplot as plt + import numpy as np import graph_scheduler as gs +from importlib import import_module from enum import IntEnum - +import matplotlib.pyplot as plt import torch torch.manual_seed(0) - from psyneulink import * from psyneulink._typing import Union, Literal -#region SCRIPT SETTINGS -# ====================================================================================================================== -# SCRIPT SETTINGS -# ====================================================================================================================== -# Settings for running script: - -CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script -DISPLAY_MODEL = ( # Only one of the following can be uncommented: - None # suppress display of model - # { # show simple visual display of model - # 'show_pytorch': True, # show pytorch graph of model - # 'show_learning': True - # # 'show_projections_not_in_composition': True, - # # 'exclude_from_gradient_calc_style': 'dashed'# show target mechanisms for learning - # # {'show_node_structure': True # show detailed view of node structures and projections - # } -) -RUN_MODEL = True, # True => run the model -# RUN_MODEL = False # False => don't run the model -# EXECUTION_MODE = ExecutionMode.Python -EXECUTION_MODE = ExecutionMode.PyTorch -# REPORT_OUTPUT = ReportOutput.FULL # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] -REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] -REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run -PRINT_RESULTS = True # print model.results to console after execution -SAVE_RESULTS = False # save model.results to disk -# PLOT_RESULTS = True # plot results (PREDICTIONS) vs. TARGETS -PLOT_RESULTS = False # plot results (PREDICTIONS) vs. TARGETS -ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution -#endregion +from ScriptControl import (MODEL_PARAMS, CONSTRUCT_MODEL, DISPLAY_MODEL, RUN_MODEL, + REPORT_OUTPUT, REPORT_PROGRESS, PRINT_RESULTS, SAVE_RESULTS, PLOT_RESULTS) +import Environment +import_module(MODEL_PARAMS) +model_params = import_module(MODEL_PARAMS).model_params + -#region ENVIRONMENT +#region TASK ENVIRONMENT # ====================================================================================================================== -# ENVIRONMENT +# TASK ENVIRONMENT # ====================================================================================================================== -# Task environment: -import Environment - -# CURRICULUM_TYPE = 'Blocked' # 'Blocked' or 'Interleaved' -CURRICULUM_TYPE = 'Interleaved' # 'Blocked' or 'Interleaved' - -NUM_STIMS = 7 # Integer or ALL -dataset = Environment.generate_dataset(condition=CURRICULUM_TYPE) -if NUM_STIMS is ALL: +dataset = Environment.generate_dataset(condition=model_params['curriculum_type'],) +if model_params['num_stims'] is ALL: INPUTS = dataset.xs.numpy() TARGETS = dataset.ys.numpy() else: - INPUTS = dataset.xs.numpy()[:NUM_STIMS] - TARGETS = dataset.ys.numpy()[:NUM_STIMS] + INPUTS = dataset.xs.numpy()[:model_params['num_stims']] + TARGETS = dataset.ys.numpy()[:model_params['num_stims']] TOTAL_NUM_STIMS = len(INPUTS) #endregion -#region PARAMETERS +#region MODEL # ====================================================================================================================== -# MODEL PARAMETERS +# MODEL # ====================================================================================================================== -model_params = dict( - - # Names: - name = "EGO Model CSW", - state_input_layer_name = "STATE", - previous_state_layer_name = "PREVIOUS STATE", - context_layer_name = 'CONTEXT', - em_name = "EM", - prediction_layer_name = "PREDICTION", - - # Structral - state_d = 11, # length of state vector - previous_state_d = 11, # length of state vector - context_d = 11, # length of context vector - memory_capacity = TOTAL_NUM_STIMS, # number of entries in EM memory - memory_init = (0,.001), # Initialize memory with random values in interval - # memory_init = None, # Initialize with zeros - concatenate_keys = False, - - # Processing - integration_rate = .69, # rate at which state is integrated into new context - state_weight = 1, # weight of the state used during memory retrieval - context_weight = 1, # weight of the context used during memory retrieval - normalize_field_weights = True, # whether to normalize the field weights during memory retrieval - # softmax_temperature = None, # temperature of the softmax used during memory retrieval (smaller means more argmax-like - softmax_temperature = .1, # temperature of the softmax used during memory retrieval (smaller means more argmax-like - # softmax_temperature = ADAPTIVE, # temperature of the softmax used during memory retrieval (smaller means more argmax-like - # softmax_temperature = CONTROL, # temperature of the softmax used during memory retrieval (smaller means more argmax-like - # softmax_threshold = None, # threshold used to mask out small values in softmax - softmax_threshold = .001, # threshold used to mask out small values in softmax - enable_learning=[True, False, False], # Enable learning for PREDICTION (STATE) but not CONTEXT or PREVIOUS STATE - learn_field_weights = False, - loss_spec = Loss.BINARY_CROSS_ENTROPY, - # loss_spec = Loss.MSE, - learning_rate = .5, - device = CPU, - # device = MPS, -) - -# EM structdural params: +# EM structural params: EMFieldsIndex = IntEnum('EMFields', ['STATE', 'CONTEXT', 'PREVIOUS_STATE'], start=0) -STATE_RETRIEVAL_WEIGHT = 0 +state_retrieval_weight = 0 RANDOM_WEIGHTS_INITIALIZATION=RandomMatrix(center=0.0, range=0.1) # Matrix spec used to initialize all Projections if is_numeric_scalar(model_params['softmax_temperature']): # translate to gain of softmax retrieval function - RETRIEVAL_SOFTMAX_GAIN = 1/model_params['softmax_temperature'] + retrieval_softmax_gain = 1/model_params['softmax_temperature'] else: # pass along ADAPTIVE or CONTROL spec - RETRIEVAL_SOFTMAX_GAIN = model_params['softmax_temperature'] -#endregion + retrieval_softmax_gain = model_params['softmax_temperature'] -#region MODEL -# ====================================================================================================================== -# MODEL -# ====================================================================================================================== +if model_params['memory_capacity'] is ALL: + memory_capacity = TOTAL_NUM_STIMS +elif not isinstance(model_params['memory_capacity'], int): + raise ValueError(f"memory_capacity must be an integer or ALL; got {model_params['memory_capacity']}") def construct_model(model_name:str=model_params['name'], @@ -276,15 +204,15 @@ def construct_model(model_name:str=model_params['name'], # EM: em_name:str=model_params['em_name'], - retrieval_softmax_gain=RETRIEVAL_SOFTMAX_GAIN, + retrieval_softmax_gain=retrieval_softmax_gain, retrieval_softmax_threshold=model_params['softmax_threshold'], - state_retrieval_weight:Union[float,int]=STATE_RETRIEVAL_WEIGHT, + state_retrieval_weight:Union[float,int]=state_retrieval_weight, previous_state_retrieval_weight:Union[float,int]=model_params['state_weight'], context_retrieval_weight:Union[float,int]=model_params['context_weight'], normalize_field_weights = model_params['normalize_field_weights'], concatenate_keys = model_params['concatenate_keys'], learn_field_weights = model_params['learn_field_weights'], - memory_capacity = model_params['memory_capacity'], + memory_capacity = memory_capacity, memory_init=model_params['memory_init'], # Output: @@ -421,7 +349,7 @@ def construct_model(model_name:str=model_params['name'], model = None if CONSTRUCT_MODEL: - print(f'Constructing {model_params["name"]}') + print(f"Constructing '{model_params['name']}'...") model = construct_model() assert 'DEBUGGING BREAK POINT' # print(model.scheduler.consideration_queue) @@ -445,12 +373,21 @@ def print_stuff(**kwargs): print('\nPrediction: \n', model.nodes['PREDICTION'].parameters.value.get(kwargs['context'])) # print('\nLoss: \n', - # model.parameters.tracked_loss._get(kwargs['context'])) + # model.parameters.minibatch_loss._get(kwargs['context'])) print('\nProjections from context to EM: \n', model.projections[7].parameters.matrix.get(kwargs['context'])) print('\nEM Memory: \n', model.nodes['EM'].parameters.memory.get(model.name)) - # print("MODEL NOT YET FULLY EXECUTABLE") - print(f"Running {model_params['name']}") + if INPUTS[0][9]: + sequence_context = 'context 1' + else: + sequence_context = 'context 2' + if INPUTS[1][1]: + sequence_state = 'state 1' + else: + sequence_state = 'state 2' + + print(f"Running '{model_params['name']}' with {MODEL_PARAMS} for {model_params['num_stims']} stims " + f"using {model_params['curriculum_type']} training starting with {sequence_context}, {sequence_state}...") context = model_params['name'] start_time = timeit.default_timer() model.learn(inputs={model_params['state_input_layer_name']:INPUTS}, @@ -460,10 +397,14 @@ def print_stuff(**kwargs): # model.projections[7].parameters.matrix.get(context)), # # model.projections[7].matrix) # call_after_minibatch=print_stuff, - optimizations_per_minibatch=1, + # optimizations_per_minibatch=model_params['num_optimization_steps'], + synch_projection_matrices_with_torch=model_params['synch_weights'], + synch_node_values_with_torch=model_params['synch_values'], + synch_results_with_torch=model_params['synch_results'], learning_rate=model_params['learning_rate'], - execution_mode=ExecutionMode.PyTorch, - # minibatch_size=3, + execution_mode= model_params['execution_mode'], + # minibatch_size=1, + # epochs=1 ) stop_time = timeit.default_timer() print(f"Elapsed time: {stop_time - start_time}") @@ -471,27 +412,30 @@ def print_stuff(**kwargs): model.show_graph(**DISPLAY_MODEL) if PRINT_RESULTS: print("MEMORY:") - print(model.nodes['EM'].parameters.memory.get(model.name)) - model.run(inputs={model_params["state_input_layer_name"]:INPUTS[4]}, - # report_output=REPORT_OUTPUT, - # report_progress=REPORT_PROGRESS - ) + print(np.round(model.nodes['EM'].parameters.memory.get(model.name),3)) + # model.run(inputs={model_params["state_input_layer_name"]:INPUTS[TOTAL_NUM_STIMS-1]}, + # # report_output=REPORT_OUTPUT, + # # report_progress=REPORT_PROGRESS + # ) print("CONTEXT INPUT:") - print(model.nodes['CONTEXT'].parameters.variable.get(model.name)) + print(np.round(model.nodes['CONTEXT'].parameters.variable.get(model.name),3)) print("CONTEXT OUTPUT:") - print(model.nodes['CONTEXT'].parameters.value.get(model.name)) - print("PREDICTION OUTPUT:") - print(model.nodes['PREDICTION'].parameters.value.get(model.name)) - print("CONTEXT WEIGHTS:") - print(model.projections[7].parameters.matrix.get(model.name)) - plt.imshow(model.projections[7].parameters.matrix.get(model.name)) - def test_weights(weight_mat): + print(np.round(model.nodes['CONTEXT'].parameters.value.get(model.name),3)) + print("STATE:") + print(np.round(model.nodes['STATE'].parameters.value.get(model.name),3)) + print("PREDICTION:") + print(np.round(model.nodes['PREDICTION'].parameters.value.get(model.name),3)) + # print("CONTEXT WEIGHTS:") + # print(model.projections[7].parameters.matrix.get(model.name)) + + + def eval_weights(weight_mat): # checks whether only 5 weights are updated. weight_mat -= np.eye(11) col_sum = weight_mat.sum(1) row_sum = weight_mat.sum(0) return np.max([(row_sum != 0).sum(), (col_sum != 0).sum()]) - print(test_weights(model.projections[7].parameters.matrix.get(model.name))) + print(eval_weights(model.projections[7].parameters.matrix.get(model.name))) if SAVE_RESULTS: np.save('EGO PREDICTIONS', model.results) @@ -499,8 +443,18 @@ def test_weights(weight_mat): np.save('EGO TARGETS', TARGETS) if PLOT_RESULTS: - plt.plot(1 - np.abs(model.results[2:TOTAL_NUM_STIMS,2]-TARGETS[:TOTAL_NUM_STIMS-2])) + fig, axes = plt.subplots(3, 1, figsize=(5, 12)) + # Weight matrix + axes[0].imshow(model.projections[7].parameters.matrix.get(model.name), interpolation=None) + # L1 of loss + axes[1].plot((1 - np.abs(model.results[1:TOTAL_NUM_STIMS,2]-TARGETS[:TOTAL_NUM_STIMS-1])).sum(-1)) + axes[1].set_xlabel('Stimuli') + axes[1].set_ylabel(model_params['loss_spec']) + # Logit of loss + axes[2].plot( (model.results[1:TOTAL_NUM_STIMS,2]*TARGETS[:TOTAL_NUM_STIMS-1]).sum(-1) ) + axes[2].set_xlabel('Stimuli') + axes[2].set_ylabel('Correct Logit') + plt.suptitle(f"{model_params['curriculum_type']} Training") plt.show() - plt.savefig('EGO PLOT.png') - + # plt.savefig('../show_graph OUTPUT/EGO PLOT.png') #endregion diff --git a/Scripts/Models (Under Development)/EGO/EGO Model (sim 1) - MDP using EMComposition.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - Revaluation.py similarity index 99% rename from Scripts/Models (Under Development)/EGO/EGO Model (sim 1) - MDP using EMComposition.py rename to Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - Revaluation.py index 3420f1191d8..c9e827cf197 100644 --- a/Scripts/Models (Under Development)/EGO/EGO Model (sim 1) - MDP using EMComposition.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - Revaluation.py @@ -125,8 +125,8 @@ CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script DISPLAY_MODEL = ( # Only one of the following can be uncommented: - None # suppress display of model - # {} # show simple visual display of model + # None # suppress display of model + {} # show simple visual display of model # {'show_node_structure': True} # show detailed view of node structures and projections ) RUN_MODEL = True # True => run the model diff --git a/Scripts/Models (Under Development)/EGO/Environment.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/Environment.py similarity index 100% rename from Scripts/Models (Under Development)/EGO/Environment.py rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Environment.py diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py new file mode 100644 index 00000000000..8b40d9403ca --- /dev/null +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py @@ -0,0 +1,29 @@ +from psyneulink.core.compositions.report import ReportOutput, ReportProgress + +# Settings for running script: + +# MODEL_PARAMS = 'TestParams' +MODEL_PARAMS = 'DeclanParams' + +CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script +DISPLAY_MODEL = ( # Only one of the following can be uncommented: + None # suppress display of model + # { # show simple visual display of model + # 'show_pytorch': True, # show pytorch graph of model + # 'show_learning': True + # # 'show_projections_not_in_composition': True, + # # 'exclude_from_gradient_calc_style': 'dashed'# show target mechanisms for learning + # # {'show_node_structure': True # show detailed view of node structures and projections + # } +) +# RUN_MODEL = False # False => don't run the model +RUN_MODEL = True, # True => run the model +# REPORT_OUTPUT = ReportOutput.FULL # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] +REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] +REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run +# PRINT_RESULTS = False # don't print model.results to console after execution +PRINT_RESULTS = True # print model.results to console after execution +SAVE_RESULTS = False # save model.results to disk +# PLOT_RESULTS = False # don't plot results (PREDICTIONS) vs. TARGETS +PLOT_RESULTS = True # plot results (PREDICTIONS) vs. TARGETS +ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py new file mode 100644 index 00000000000..39a4c9ccbc3 --- /dev/null +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py @@ -0,0 +1,56 @@ +from psyneulink.core.llvm import ExecutionMode +from psyneulink.core.globals.keywords import ALL, ADAPTIVE, CONTROL, CPU, Loss, MPS, OPTIMIZATION_STEP, RUN, TRIAL + +model_params = dict( + + # Names: + name = "EGO Model CSW", + state_input_layer_name = "STATE", + previous_state_layer_name = "PREVIOUS STATE", + context_layer_name = 'CONTEXT', + em_name = "EM", + prediction_layer_name = "PREDICTION", + + # Structural + state_d = 11, # length of state vector + previous_state_d = 11, # length of state vector + context_d = 11, # length of context vector + memory_capacity = ALL, # number of entries in EM memory; ALL=> match to number of stims + memory_init = (0,.0001), # Initialize memory with random values in interval + # memory_init = None, # Initialize with zeros + # concatenate_keys = False, + concatenate_keys = True, + + # environment + # curriculum_type = 'Interleaved', + curriculum_type = 'Blocked', + num_stims = 7, # Integer or ALL + # num_stims = ALL, # Integer or ALL + + # Processing + integration_rate = .69, # rate at which state is integrated into new context + state_weight = 1, # weight of the state used during memory retrieval + context_weight = 1, # weight of the context used during memory retrieval + normalize_field_weights = False, # whether to normalize the field weights during memory retrieval + # normalize_field_weights = True, # whether to normalize the field weights during memory retrieval + # softmax_temperature = None, # temperature of the softmax used during memory retrieval (smaller means more argmax-like + softmax_temperature = .1, # temperature of the softmax used during memory retrieval (smaller means more argmax-like + # softmax_temperature = ADAPTIVE, # temperature of the softmax used during memory retrieval (smaller means more argmax-like + # softmax_temperature = CONTROL, # temperature of the softmax used during memory retrieval (smaller means more argmax-like + # softmax_threshold = None, # threshold used to mask out small values in softmax + softmax_threshold = .001, # threshold used to mask out small values in softmax + enable_learning=[True, False, False], # Enable learning for PREDICTION (STATE) but not CONTEXT or PREVIOUS STATE + learn_field_weights = False, + loss_spec = Loss.BINARY_CROSS_ENTROPY, + # loss_spec = Loss.MSE, + learning_rate = .5, + num_optimization_steps = 10, + # execution_mode = ExecutionMode.Python, + synch_weights = RUN, + synch_values = RUN, + synch_results = RUN, + execution_mode = ExecutionMode.PyTorch, + device = CPU, + # device = MPS, +) +#endregion \ No newline at end of file diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/__init__.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/Scripts/Models (Under Development)/EGO/EGO Model - MDP.py b/Scripts/Models (Under Development)/EGO/Using EpisodicMemoryMechanism/EGO Model - MDP.py similarity index 100% rename from Scripts/Models (Under Development)/EGO/EGO Model - MDP.py rename to Scripts/Models (Under Development)/EGO/Using EpisodicMemoryMechanism/EGO Model - MDP.py diff --git a/Scripts/Models (Under Development)/EGO/Using EpisodicMemoryMechanism/__init__.py b/Scripts/Models (Under Development)/EGO/Using EpisodicMemoryMechanism/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/Scripts/Models (Under Development)/EGO/__init__.py b/Scripts/Models (Under Development)/EGO/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/Scripts/Models (Under Development)/nback/nback.py b/Scripts/Models (Under Development)/nback/nback.py index 5930c3ef6c0..c1513137d36 100644 --- a/Scripts/Models (Under Development)/nback/nback.py +++ b/Scripts/Models (Under Development)/nback/nback.py @@ -890,7 +890,7 @@ def network_test(network:AutodiffComposition, coded_responses, stats = analyze_results([network.results,conditions], test=True) import torch cross_entropy_loss = \ - [network.loss(torch.Tensor(output[0]),torch.Tensor(np.array(target))).detach().numpy().tolist() + [network.loss_function(torch.Tensor(output[0]),torch.Tensor(np.array(target))).detach().numpy().tolist() for output, target in zip(network.results, targets)] coded_responses_flat = [] diff --git a/Scripts/Models (Under Development)/nback/nback_og_pnl.py b/Scripts/Models (Under Development)/nback/nback_og_pnl.py index aa1b9f0fc30..fcbab06dc66 100644 --- a/Scripts/Models (Under Development)/nback/nback_og_pnl.py +++ b/Scripts/Models (Under Development)/nback/nback_og_pnl.py @@ -882,7 +882,7 @@ def network_test(network:AutodiffComposition, coded_responses, stats = analyze_results([network.results,conditions], test=True) import torch cross_entropy_loss = \ - [network.loss(torch.Tensor(output[0]),torch.Tensor(np.array(target))).detach().numpy().tolist() + [network.loss_function(torch.Tensor(output[0]),torch.Tensor(np.array(target))).detach().numpy().tolist() for output, target in zip(network.results, targets)] coded_responses_flat = [] for nback_level in nback_levels: diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 305226c7712..ba8f0755585 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1463,7 +1463,7 @@ def _get_compilation_params(self): "objective_mechanism", "agent_rep", "projections", "outcome_input_ports", "state_input_ports", # autodiff specific types - "pytorch_representation", "optimizer", + "pytorch_representation", "optimizer", "synch_projection_matrices_with_torch", # duplicate "allocation_samples", "control_allocation_search_space", # not used in computation @@ -1490,10 +1490,14 @@ def _get_compilation_params(self): "error_matrix", "error_signal", "activation_input", "activation_output", "error_sources", "covariates_sources", "target", "sample", "learning_function", - "device", + "minibatch_size", "optimizations_per_minibatch", "device", + "retain_torch_trained_outputs", "retain_torch_targets", "retain_torch_losses" + "torch_trained_outputs", "torch_targets", "torch_losses", # should be added to relevant _gen_llvm_function... when aug: # SoftMax: - 'mask_threshold', 'adapt_scale', 'adapt_base', 'adapt_entropy_weighting' + 'mask_threshold', 'adapt_scale', 'adapt_base', 'adapt_entropy_weighting', + # LCAMechanism + "mask" } # Mechanism's need few extra entries: # * matrix -- is never used directly, and is flatened below diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 79177e1c3b2..c2216c625a4 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -3504,6 +3504,8 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): include_probes_in_output=False \ disable_learning=False, \ learning_rate=None, \ + minibatch_size=1, \ + optimizations_per_minibatch=1, \ controller=None, \ enable_controller=None, \ controller_mode=AFTER, \ @@ -3571,6 +3573,19 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): that do not have their own `learning_rate ` otherwise specified (see `Composition_Learning_Rate` for additional details). + minibatch_size : int : default 1 + specifies the default for the Composition for the number of distinct inputs from the training set used to + compute the `error_signal ` in one step of learning; it can be overridden by + specifying the **minibatch_size** argument in the `learn ` method (see `minibatch_size + ` for additional details. + + optimizations_per_minibatch : int : default 1 + specifies the default for the Composition for the number of repetitions of each stimulus in the training set + is used to compute the `error_signal ` for a given `minibatch + `; it can be overridden by specifying the **minibatch_size** argument in the `learn + ` method (see `optimizations_per_minibatch ` for + additional details. + controller : `OptimizationControlMechanism` : default None specifies the `OptimizationControlMechanism` to use as the `Composition's controller `. @@ -3842,6 +3857,16 @@ class Composition(Composition_Base, metaclass=ComponentsMeta): ` Parameter of a LearningMechanism (see `Composition_Learning_Rate` for additional details). + minibatch_size : int + determines the number of input stimuli from the training set used to compute the `error_signal + ` in one gradient step of learning if this is not specified in the call to + `learn ` (see `minibatch ` for additional details). + + optimizations_per_minibatch : int + determines the number of repetitions of each stimulus in the training set used to compute an `error_signal + ` for single gradient step in learning if this is not specified in the call + to `learn ` (see `minibatch ` for additional details). + learning_components : list[list] a list of the learning-related components in the Composition, all or many of which may have been created automatically in a call to one of its `add_<*learning_type*>_pathway' methods (see @@ -3936,6 +3961,18 @@ class Parameters(Composition_Base.Parameters): :default value: [] :type: ``list`` + minibatch_size + see `minibatch_size ` + + :default value: 1 + :type: ``int`` + + optimizations_per_minibatch + see `optimizations_per_minibatch ` + + :default value: 1 + :type: ``int`` + results see `results ` @@ -3954,6 +3991,8 @@ class Parameters(Composition_Base.Parameters): :default value: [] :type: ``list`` """ + minibatch_size = Parameter(1, modulable=True, pnl_internal=True) + optimizations_per_minibatch = Parameter(1, modulable=True, pnl_internal=True) results = Parameter([], loggable=False, pnl_internal=True) learning_results = Parameter([], loggable=False, pnl_internal=True) simulation_results = Parameter([], loggable=False, pnl_internal=True) @@ -3961,6 +4000,15 @@ class Parameters(Composition_Base.Parameters): input_specification = Parameter(None, stateful=False, loggable=False, pnl_internal=True) value = Parameter(NotImplemented, read_only=True) # replaces deletion in constructor below + def _validate_minibatch_size(self, minibatch_size): + if minibatch_size < 1: + raise CompositionError(f"`minibatch_size` ({minibatch_size}) must an int greater than or equal to 1.") + + def _validate_optimizations_per_minibatch(self, optimizations_per_minibatch): + if optimizations_per_minibatch < 1: + raise CompositionError(f"`optimizations_per_minibatch` ({optimizations_per_minibatch}) " + f"must an int greater than or equal to 1.") + class _CompilationData(ParametersBase): execution = None @@ -3974,6 +4022,8 @@ def __init__( include_probes_in_output: bool = False, disable_learning: bool = False, learning_rate:Optional[Union[float, int]] = None, + minibatch_size:int = 1, + optimizations_per_minibatch:int = 1, controller: ControlMechanism = None, enable_controller=None, controller_mode: Literal['before', 'after'] = 'after', @@ -4059,6 +4109,8 @@ def __init__( self._initialize_parameters( **param_defaults, + minibatch_size=minibatch_size, + optimizations_per_minibatch=optimizations_per_minibatch, retain_old_simulation_data=retain_old_simulation_data, context=context ) @@ -9917,7 +9969,7 @@ def _infer_target_nodes(self, targets: dict, execution_mode): if execution_mode is pnlvm.ExecutionMode.PyTorch: # Reassign target inputs from output Nodes to target mechanisms constructed for PyTorch execution - return {target: value for target, value in zip(self.target_output_map.keys(), targets.values())} + return {target: value for target, value in zip(self.targets_from_outputs_map.keys(), targets.values())} ret = {} for node, values in targets.items(): @@ -10143,7 +10195,7 @@ def _parse_input_dict(self, inputs, context=None): # If Composition is in learning mode, not called from COMMAND_LINE, and not still preparing, # presumably inputs have already been parsed so shouldn't do it again - # FIX: 11/3/23 - NOTE: This circumvents parsing of inputs when they are a func and called from autodiff_training + # FIX: 11/3/23 - NOTE: This circumvents parsing of inputs when they are a func and called from autodiff_forward if (context and (context.runmode & ContextFlags.LEARNING_MODE) and (context.source & ContextFlags.COMPOSITION) and not (context.execution_phase & ContextFlags.PREPARING)): @@ -10862,7 +10914,7 @@ def run( context=None, base_context=Context(execution_id=None), **kwargs - ): + )->list: """Pass inputs to Composition, then execute sets of nodes that are eligible to run until termination conditions are met. @@ -11326,6 +11378,8 @@ def run( content='run_start', context=context) + self.TRIAL_NUM = -1 + # Loop over the length of the list of inputs - each input represents a TRIAL for trial_num in range(num_trials): @@ -11353,7 +11407,7 @@ def run( break # execute processing, passing stimuli for this trial - # IMPLEMENTATION NOTE: for autdoiff, the following is the forward pass for the current trial + # IMPLEMENTATION NOTE: for autodiff, the following executes the forward pass for a single input trial_output = self.execute(inputs=execution_stimuli, scheduler=scheduler, termination_processing=termination_processing, @@ -11369,20 +11423,20 @@ def run( skip_initialization=True, execution_mode=execution_mode, report=report, - report_num=report_num + report_num=report_num, + **kwargs ) # --------------------------------------------------------------------------------- # store the result of this execution in case it will be the final result - - assert "AFFTER FOWARD PASS" - - # object.results.append(result) trial_output = copy_parameter_value(trial_output) - results.append(trial_output) - self.parameters.results._set(convert_to_np_array(results), context) + self._update_results(results, + trial_output, + execution_mode, + kwargs['synch_with_pnl_options'] if 'synch_with_pnl_options' in kwargs else None, + context) if not self.parameters.retain_old_simulation_data._get(): if self.controller is not None: @@ -11464,19 +11518,18 @@ def learn( num_trials: Optional[int] = None, epochs: int = 1, learning_rate: Optional[Union[int,float]]=None, - minibatch_size: int = 1, - optimizations_per_minibatch: int = 1, + minibatch_size:Optional[int]=None, + optimizations_per_minibatch:Optional[int]=None, patience: Optional[int] = None, min_delta: int = 0, - synchronize_pnl_values: bool = True, - context: Optional[Context] = None, execution_mode: pnlvm.ExecutionMode = pnlvm.ExecutionMode.Python, randomize_minibatches=False, call_before_minibatch=None, call_after_minibatch=None, + context: Optional[Context] = None, *args, **kwargs - ): + )->list: """ Runs the composition in learning mode - that is, any components with disable_learning False will be executed in learning mode. See `Composition_Learning` for details. @@ -11521,12 +11574,15 @@ def learn( the learn method (see `Composition_Learning_Rate` for additional details). minibatch_size : int (default=1) - specifies the size of the minibatches to use. The input trials will be batched and run, after which - learning mechanisms with learning mode TRIAL will update weights + specifies the number of inputs used to calculate the `error_signal ` + for one step (gradient update) of learning, after which LearningMechanisms with learning mode TRIAL + will update the `matrix ` parameter of the `MappingProjection` for which + they are responsible; this overrides the Composition's default value. optimizations_per_minibatch : int (default=1) - specified the number of executions and weight updates of learnable pathways are carried out for - each set of stimuli in a minibatch. + specifies the number of executions and weight updates of learnable pathways that are carried out for + each set of stimuli in a `minibatch `; this overrides the Composition's + default value. .. hint:: This can be used to implement the `backprop-to-activation proceedure @@ -11536,7 +11592,7 @@ def learn( downstream purpose. randomize_minibatch: bool (default=False) - specifies whether the order of the input trials should be randomized on each epoch + specifies whether the order of the input trials should be randomized in each epoch patience : int or None (default=None) used for early stopping of training; If a model has more than `patience` bad consecutive epochs, @@ -11547,19 +11603,10 @@ def learn( Any reduction less than this value is considered to be a bad epoch. Used for early stopping of training, in combination with `patience`. - synchronize_pnl_values : bool : default True - specifies whether to synchronize the `values ` of the `Mechanisms ` - in the PsyNeuLink Composition with the corresponding modules of the PyTorch implementation after each - forward pass when an `AutodiffComposition` is used is executed in ``PyTorch mode - `. - scheduler : Scheduler the scheduler object that owns the conditions that will instruct the execution of the Composition If not specified, the Composition will use its automatically generated scheduler. - context - context will be set to self.default_execution_id if unspecified - call_before_minibatch : callable called before each minibatch is executed @@ -11587,6 +11634,9 @@ def learn( specifies where output and progress should be reported; see `Report_To_Device` for additional details and `ReportDevices` for options. + context + context will be set to self.default_execution_id if unspecified + Returns --------- @@ -11609,14 +11659,12 @@ def learn( warnings.warn(f"learn() method called on '{self.name}', but it has no learning components; " f"it will be run but no learning will occur.") + # Prepare graph and context for learning context.add_flag(ContextFlags.LEARNING_MODE) - execution_phase_at_entry = context.execution_phase context.execution_phase=ContextFlags.PREPARING - self._analyze_graph() self._check_nested_target_mechs() - context.execution_phase = execution_phase_at_entry result = runner.run_learning( @@ -11625,11 +11673,14 @@ def learn( num_trials=num_trials, epochs=epochs, learning_rate=learning_rate, - minibatch_size=minibatch_size, - optimizations_per_minibatch=optimizations_per_minibatch, + minibatch_size=minibatch_size + or self.parameters.minibatch_size._get(context) + or self.parameters.minibatch_size.default_value, + optimizations_per_minibatch=optimizations_per_minibatch + or self.parameters.optimizations_per_minibatch._get(context) + or self.parameters.optimizations_per_minibatch.default_value, patience=patience, min_delta=min_delta, - synchronize_pnl_values=synchronize_pnl_values, randomize_minibatches=randomize_minibatches, call_before_minibatch=call_before_minibatch, call_after_minibatch=call_after_minibatch, @@ -11727,7 +11778,8 @@ def execute( report_to_devices:ReportDevices=None, report=None, report_num=None, - ): + **kwargs + )->np.ndarray: """ Passes inputs to any `Nodes ` receiving inputs directly from the user (via the "inputs" argument) then coordinates with the `Scheduler` to execute sets of Nodes that are eligible to execute until @@ -11809,7 +11861,7 @@ def execute( Returns --------- - output_values : List + output_values : np.ndarray These are the values of the Composition's output_CIM.output_ports, excluding those the source of which are from a (potentially nested) Node with NodeRole.PROBE in its enclosing Composition. """ @@ -12812,7 +12864,15 @@ def get_results_by_nodes(self, else: return {k:np.array(v).tolist() for k,v in result_set} - def _update_learning_parameters(self, context): + def _update_results(self, results, trial_output, execution_mode, synch_with_pnl_options, context): + """Update results by appending most recent trial_output + This is included as a helper so it can be overriden by subclasses (such as AutodiffComposition) + that may need to do this less frequently for scallable exeuction + """ + results.append(trial_output) + self.parameters.results._set(convert_to_np_array(results), context) + + def do_gradient_optimization(self, retain_in_pnl_options, context, optimization_num=None): pass @handle_external_context(fallback_most_recent=True) diff --git a/psyneulink/core/compositions/showgraph.py b/psyneulink/core/compositions/showgraph.py index feb22cc7cde..29177bd5398 100644 --- a/psyneulink/core/compositions/showgraph.py +++ b/psyneulink/core/compositions/showgraph.py @@ -826,13 +826,6 @@ def show_graph(self, rcvrs = list(processing_graph.keys()) for rcvr in rcvrs: - # # MODIFIED 7/10 NEW: - # # FIX: NOT SURE WHAT THE PURPOSE OF THIS WAS, AND DOESN'T EVER SEEM TO GET CALLED: - # if any(n is rcvr for nested_comp in self._get_nodes(composition, context) - # if isinstance(nested_comp, Composition) for n in self._get_nodes(nested_comp, context)): - # continue - # # MODIFIED 7/10 END - # If show_controller is true, objective mechanism is handled in _assign_controller_components if (show_controller and composition.controller diff --git a/psyneulink/core/globals/keywords.py b/psyneulink/core/globals/keywords.py index c688746286c..4ed8c8335a9 100644 --- a/psyneulink/core/globals/keywords.py +++ b/psyneulink/core/globals/keywords.py @@ -28,8 +28,8 @@ 'ADAPTIVE', 'ADAPTIVE_INTEGRATOR_FUNCTION', 'ADAPTIVE_MECHANISM', 'ADD_INPUT_PORT', 'ADD_OUTPUT_PORT', 'ADDITIVE', 'ADDITIVE_PARAM', 'AFTER', 'ALL', 'ALLOCATION_SAMPLES', 'ALLOW_PROBES', 'ANGLE', 'ANGLE_FUNCTION', 'ANY', 'ARGUMENT_THERAPY_FUNCTION', 'ARRANGEMENT', 'ASSERT', 'ASSIGN', 'ASSIGN_VALUE', 'AUTO','AUTO_ASSIGN_MATRIX', - 'AUTO_ASSOCIATIVE_PROJECTION', 'HAS_INITIALIZERS', 'AUTOASSOCIATIVE_LEARNING_MECHANISM', 'AUTODIFF_COMPOSITION', - 'BACKPROPAGATION_FUNCTION', 'BINOMIAL_DISTORT_FUNCTION', + 'AUTO_ASSOCIATIVE_PROJECTION', 'HAS_INITIALIZERS', 'AUTOASSOCIATIVE_LEARNING_MECHANISM', + 'AUTODIFF_COMPOSITION', 'AUTODIFF_RESULTS', 'BACKPROPAGATION_FUNCTION', 'BINOMIAL_DISTORT_FUNCTION', 'BEFORE', 'BETA', 'BIAS', 'BOLD', 'BOTH', 'BOUNDS', 'BUFFER_FUNCTION', 'CHANGED', 'CLAMP_INPUT', 'COMBINATION_FUNCTION_TYPE', 'COMBINE', 'COMBINE_MEANS_FUNCTION', 'COMBINE_OUTCOME_AND_COST_FUNCTION', 'COMMAND_LINE', 'comparison_operators', 'COMPARATOR_MECHANISM', 'COMPONENT', @@ -48,9 +48,10 @@ 'DRIFT_DIFFUSION_INTEGRATOR_FUNCTION', 'DRIFT_ON_A_SPHERE_INTEGRATOR_FUNCTION', 'DROPOUT_FUNCTION', 'DUAL_ADAPTIVE_INTEGRATOR_FUNCTION', 'EFFERENTS', 'EID_SIMULATION', 'EID_FROZEN', 'EITHER', 'ENABLE_CONTROLLER', 'ENABLED', 'ENERGY', 'ENTROPY', - 'EM_COMPOSITION', 'EM_STORAGE_FUNCTION', 'EM_STORAGE_MECHANISM', 'EPISODIC_MEMORY_MECHANISM', 'EPOCHS', 'EQUAL', - 'ERROR_DERIVATIVE_FUNCTION', 'EUCLIDEAN', 'EVC_MECHANISM', 'EVC_SIMULATION', 'EXAMPLE_FUNCTION_TYPE', - 'EXECUTE_UNTIL_FINISHED', 'EXECUTING', 'EXECUTION', 'EXECUTION_COUNT', 'EXECUTION_ID', 'EXECUTION_PHASE', + 'EM_COMPOSITION', 'EM_STORAGE_FUNCTION', 'EM_STORAGE_MECHANISM', 'EPISODIC_MEMORY_MECHANISM', 'EPOCH', 'EPOCHS', + 'EQUAL', 'ERROR_DERIVATIVE_FUNCTION', 'EUCLIDEAN', 'EVC_MECHANISM', 'EVC_SIMULATION', 'EXAMPLE_FUNCTION_TYPE', + 'EXECUTE_UNTIL_FINISHED', 'EXECUTING', + 'EXECUTION', 'EXECUTION_COUNT', 'EXECUTION_ID', 'EXECUTION_MODE', 'EXECUTION_PHASE', 'EXPONENTIAL', 'EXPONENT', 'EXPONENTIAL_DIST_FUNCTION', 'EXPONENTIAL_FUNCTION', 'EXPONENTS', 'FEEDBACK', 'FITZHUGHNAGUMO_INTEGRATOR_FUNCTION', 'FINAL', 'FLAGS', 'FULL', 'FULL_CONNECTIVITY_MATRIX', 'FUNCTION', 'FUNCTIONS', 'FUNCTION_COMPONENT_CATEGORY','FUNCTION_CHECK_ARGS', @@ -60,26 +61,27 @@ 'GAUSSIAN', 'GAUSSIAN_FUNCTION', 'GILZENRAT_INTEGRATOR_FUNCTION', 'GREATER_THAN', 'GREATER_THAN_OR_EQUAL', 'GRADIENT_OPTIMIZATION_FUNCTION', 'GRID_SEARCH_FUNCTION', 'HARD_CLAMP', 'HEBBIAN_FUNCTION', 'HETERO', 'HIGH', 'HOLLOW_MATRIX', 'IDENTITY_MATRIX', 'INCREMENT', 'INDEX', - 'INIT_EXECUTE_METHOD_ONLY', 'INIT_FULL_EXECUTE_METHOD', 'INIT_FUNCTION_METHOD_ONLY', 'INITIALIZE_CYCLE_VALUES', - 'INITIALIZE_CYCLE', 'INITIALIZATION', 'INITIALIZED', 'INITIALIZER', 'INITIALIZING', 'INITIALIZATION_STATUS', + 'INIT_EXECUTE_METHOD_ONLY', 'INIT_FULL_EXECUTE_METHOD', 'INIT_FUNCTION_METHOD_ONLY', + 'INITIALIZE', 'INITIALIZED', 'INITIALIZER', 'INITIALIZE_CYCLE', 'INITIALIZE_CYCLE_VALUES', + 'INITIALIZING', 'INITIALIZATION', 'INITIALIZATION_STATUS', 'INPUT', 'INPUTS', 'INPUT_CIM_NAME', 'INPUT_LABELS_DICT', 'INPUT_PORT', 'INPUT_PORTS', 'INPUT_PORT_PARAMS', 'INPUT_PORT_VARIABLES', 'INPUTS_DIM', 'INSET', 'CURRENT_VALUE', 'INTEGRATION_TYPE', 'INTEGRATOR_FUNCTION','INTEGRATOR_FUNCTION', 'INTEGRATOR_FUNCTION_TYPE', 'INTEGRATOR_MECHANISM', 'LAST_INTEGRATED_VALUE', 'INTERCEPT', 'INTERNAL', 'INTERNAL_ONLY', 'K_VALUE', 'KOHONEN_FUNCTION', 'KOHONEN_MECHANISM', 'KOHONEN_LEARNING_MECHANISM', 'KWTA_MECHANISM', - 'LABELS', 'LCA_MECHANISM', 'LEAKY_COMPETING_INTEGRATOR_FUNCTION', 'LEAK', 'LEARNABLE', - 'LEARNED_PROJECTIONS', 'LEARNING', 'LEARNING_FUNCTION', 'LEARNING_FUNCTION_TYPE', - 'LEARNING_OBJECTIVE', 'LEARNING_MECHANISM', 'LEARNING_MECHANISMS', 'LEARNING_PATHWAY', 'LEARNING_PROJECTION', - 'LEARNING_PROJECTION_PARAMS', 'LEARNING_RATE', 'LEARNING_SIGNAL', 'LEARNING_SIGNAL_SPECS', 'LEARNING_SIGNALS', - 'LESS_THAN', 'LESS_THAN_OR_EQUAL', 'LINEAR', 'LINEAR_COMBINATION_FUNCTION', 'LINEAR_FUNCTION', - 'LINEAR_MATRIX_FUNCTION', 'LOG_ENTRIES', 'LOGISTIC_FUNCTION', 'Loss', 'LOW', 'LVOC_CONTROL_MECHANISM', + 'LABELS', 'LCA_MECHANISM', 'LEAKY_COMPETING_INTEGRATOR_FUNCTION', 'LEAK', 'LEARNABLE', 'LEARNED_PROJECTIONS', + 'LEARNING', 'LEARNING_FUNCTION', 'LEARNING_FUNCTION_TYPE', 'LEARNING_OBJECTIVE', 'LEARNING_MECHANISM', + 'LEARNING_MECHANISMS', 'LEARNING_PATHWAY', 'LEARNING_PROJECTION', 'LEARNING_PROJECTION_PARAMS', 'LEARNING_RATE', + 'LEARNING_SCALE', 'LEARNING_SCALE_LITERALS', 'LEARNING_SCALE_NAMES', 'LEARNING_SIGNAL', 'LEARNING_SIGNAL_SPECS', + 'LEARNING_SIGNALS', 'LESS_THAN', 'LESS_THAN_OR_EQUAL', 'LINEAR', 'LINEAR_COMBINATION_FUNCTION', 'LINEAR_FUNCTION', + 'LINEAR_MATRIX_FUNCTION', 'LOG_ENTRIES', 'LOGISTIC_FUNCTION', 'Loss', 'LOSSES', 'LOW', 'LVOC_CONTROL_MECHANISM', 'MAPPING_PROJECTION', 'MAPPING_PROJECTION_PARAMS', 'MASKED_MAPPING_PROJECTION', 'MATRIX', 'MATRIX_KEYWORD_NAMES', 'MATRIX_KEYWORD_SET', 'MATRIX_KEYWORD_VALUES', 'MATRIX_KEYWORDS','MatrixKeywords', - 'MAX_ABS_DIFF', 'MAX_ABS_INDICATOR', 'MAX_ONE_HOT', 'MAX_ABS_ONE_HOT', 'MAX_ABS_VAL', + 'MATRIX_WEIGHTS', 'MAX_ABS_DIFF', 'MAX_ABS_INDICATOR', 'MAX_ONE_HOT', 'MAX_ABS_ONE_HOT', 'MAX_ABS_VAL', 'MAX_EXECUTIONS_BEFORE_FINISHED', 'MAX_INDICATOR', 'MAX_VAL', 'MAYBE', 'MEAN', 'MECHANISM', 'MECHANISM_COMPONENT_CATEGORY', 'MECHANISM_DEFAULT', 'MECHANISM_DEFAULT_INPUT_VALUE', 'MECHANISM_DEFAULTParams', 'MECHANISM_EXECUTED_LOG_ENTRY', 'MECHANISM_NAME', 'MECHANISM_PARAM_VALUE', - 'MECHANISM_TYPE', 'MECHANISM_VALUE', 'MEDIAN', 'METRIC', 'MIN_VAL', 'MIN_ABS_VAL', 'MIN_ABS_INDICATOR', + 'MECHANISM_TYPE', 'MECHANISM_VALUE', 'MEDIAN', 'METRIC', 'MIN_VAL', 'MIN_ABS_VAL', 'MIN_ABS_INDICATOR', 'MINIBATCH', 'MOD_AFFERENTS', 'MODE', 'MODULATES','MODULATION', 'MODULATORY_PROJECTION', 'MODULATORY_SIGNAL', 'MODULATORY_SIGNALS', 'MONITOR', 'MONITOR_FOR_CONTROL', 'MONITOR_FOR_LEARNING', 'MONITOR_FOR_MODULATION', 'MODEL_SPEC_ID_GENERIC', 'MODEL_SPEC_ID_INPUT_PORTS', 'MODEL_SPEC_ID_OUTPUT_PORTS', @@ -88,11 +90,13 @@ 'MODEL_SPEC_ID_PARAMETER_INITIAL_VALUE', 'MODEL_SPEC_ID_PARAMETER_SOURCE', 'MPS', 'MODEL_SPEC_ID_PARAMETER_VALUE', 'MODEL_SPEC_ID_TYPE', 'MULTIPLICATIVE', 'MULTIPLICATIVE_PARAM', 'MUTUAL_ENTROPY', - 'NAME', 'NESTED', 'NEWEST', 'NODE', 'NODES', 'NOISE', 'NORMAL_DIST_FUNCTION', 'NORMALIZE', 'NORMED_L0_SIMILARITY', + 'NAME', 'NESTED', 'NEWEST', 'NODE', 'NODES', 'NODE_VALUES', 'NODE_VARIABLES', 'NOISE', + 'NORMAL_DIST_FUNCTION', 'NORMALIZE', 'NORMED_L0_SIMILARITY', 'NOT_EQUAL', 'NUM_EXECUTIONS_BEFORE_FINISHED', 'OBJECTIVE_FUNCTION_TYPE', 'OBJECTIVE_MECHANISM', 'OBJECTIVE_MECHANISM_OBJECT', 'OFF', 'OFFSET', 'OLDEST', 'ON', - 'ONLINE', 'ONLY', 'OPERATION', 'OPTIMIZATION_FUNCTION_TYPE', 'ORIGIN','ORNSTEIN_UHLENBECK_INTEGRATOR_FUNCTION', - 'OUTCOME', 'OUTCOME_FUNCTION', 'OUTPUT', 'OUTPUT_CIM_NAME', 'OUTPUT_LABELS_DICT', 'OUTPUT_MECHANISM', + 'ONLINE', 'ONLY', 'OPERATION', 'OPTIMIZATION_FUNCTION_TYPE', 'OPTIMIZATION_STEP', 'ORIGIN', + 'ORNSTEIN_UHLENBECK_INTEGRATOR_FUNCTION', + 'OUTCOME', 'OUTCOME_FUNCTION', 'OUTPUT', 'OUTPUTS', 'OUTPUT_CIM_NAME', 'OUTPUT_LABELS_DICT', 'OUTPUT_MECHANISM', 'OUTPUT_PORT', 'OUTPUT_PORT_PARAMS', 'output_port_spec_to_parameter_name', 'OUTPUT_PORTS', 'OUTPUT_TYPE', 'OVERRIDE', 'OVERRIDE_PARAM', 'OVERWRITE', 'OWNER', 'OWNER_EXECUTION_COUNT', 'OWNER_EXECUTION_TIME', 'OWNER_VALUE', 'OWNER_VARIABLE', @@ -113,13 +117,13 @@ 'SAMPLE', 'SAVE_ALL_VALUES_AND_POLICIES', 'SCALAR', 'SCALE', 'SCHEDULER', 'SELF', 'SENDER', 'SEPARATE', 'SEPARATOR_BAR', 'SHADOW_INPUT_NAME', 'SHADOW_INPUTS', 'SIMPLE', 'SIMPLE_INTEGRATOR_FUNCTION', 'SIMULATIONS', 'SINGLE', 'SINGLETON', 'SIZE', 'SLOPE', 'SOFT_CLAMP', 'SOFTMAX_FUNCTION', 'SOURCE', 'STABILITY_FUNCTION', - 'STANDARD_ARGS', 'STANDARD_DEVIATION', 'STANDARD_OUTPUT_PORTS', 'SUBTRACTION', 'SUM', + 'STANDARD_ARGS', 'STANDARD_DEVIATION', 'STANDARD_OUTPUT_PORTS', 'STORE', 'SUBTRACTION', 'SUM', 'TARGET', 'TARGET_MECHANISM', 'TARGET_LABELS_DICT', 'TERMINAL', 'TARGETS', 'TERMINATION_MEASURE', 'TERMINATION_THRESHOLD', 'TERMINATION_COMPARISION_OP', 'TERSE', 'TEXT', 'THRESHOLD', - 'TIME', 'TIME_STEP_SIZE', 'TIME_STEPS_DIM', 'TRAINING_SET', + 'TIME', 'TIME_STEP_SIZE', 'TIME_STEPS_DIM', 'TRAINED_OUTPUTS', 'TRAINING_SET', 'TRANSFER_FUNCTION_TYPE', 'TRANSFER_MECHANISM', 'TRANSFER_WITH_COSTS_FUNCTION', 'TRIAL', 'TRIALS_DIM', - 'UNCHANGED', 'UNIFORM_DIST_FUNCTION', 'USER_DEFINED_FUNCTION', 'USER_DEFINED_FUNCTION_TYPE', + 'UNCHANGED', 'UNIFORM_DIST_FUNCTION', 'UPDATE', 'USER_DEFINED_FUNCTION', 'USER_DEFINED_FUNCTION_TYPE', 'VALUES', 'VALIDATE', 'VALIDATION', 'VALUE', 'VALUE_ASSIGNMENT', 'VALUE_FUNCTION', 'VARIABLE', 'VARIANCE', 'VECTOR', 'WALD_DIST_FUNCTION', 'WEIGHT', 'WEIGHTS', 'X_0', 'ZEROS_MATRIX', 'SHARED_COMPONENT_TYPES', ] @@ -133,6 +137,8 @@ from psyneulink._typing import Literal +#region ----------------------------------------- MATRICES ----------------------------------------------------------- + class MatrixKeywords: """ Attributes @@ -206,7 +212,9 @@ def _names(self): MATRIX_KEYWORD_SET = MATRIX_KEYWORDS._set() MATRIX_KEYWORD_VALUES = MATRIX_KEYWORDS._values() MATRIX_KEYWORD_NAMES = MATRIX_KEYWORDS._names() +#endregion +#region ---------------------------------------- DISTANCE METRICS ---------------------------------------------------- class DistanceMetrics: """Distance between two arrays. @@ -305,6 +313,72 @@ def _is_metric(metric): # ENTROPY = 'entropy' CONVERGENCE = 'CONVERGENCE' +#endregion + +#region ------------------------------------------- LEARNING ----------------------------------------------------- + + +class LearningScale: + """Scales at which `learning ` occurs + + Used to specify the scales over which learning-related events occur when `learning ` is + executed in a `Composition`. + + Attributes + ---------- + + OPTIMIZATION_STEP + a single step of gradient calculation, of which there can be one or more in a `minibatch + `, based on a Composition's `mini_batch_size ` + Parameter. + + TRIAL + identical to MINIBACH when `minibatch_size `= 1; otherwise a warning is raised, + and unanticipated results can occur. + + MINIBATCH + a subset of the training set used to calculate an `error_signal ` + (i.e. one step along the gradient) used to and update the weights of a MappingProjection's + `matrix ` Parameter. + + EPOCH + a complete pass through the training set; the number of gradient calculations and weight updates that occur + in an epoch depends on the `mini_batch_size ` and `optimizations_per_minibatch + ` Parameters of the Composition. + + RUN + a complete execution of the `learn ` method of the Composition, involving + `num_epochs ` epochs. + + """ + def __init__(self): + self.OPTIMIZATION_STEP = OPTIMIZATION_STEP + self.TRIAL = MINIBATCH + self.MINIBATCH = MINIBATCH + self.EPOCH = EPOCH + self.RUN = RUN + + def _values(self): + return list(self.__dict__.values()) + + def _set(self): + return set(self.__dict__.values()) + + def _names(self): + return list(self.__dict__) + + +OPTIMIZATION_STEP = 'optimization_step' +# TRIAL = 'trial' # defined below in section on Composition +MINIBATCH = 'minibatch' +EPOCH = 'epoch' +RUN = 'run' + +LEARNING_SCALE = LearningScale() +LEARNING_SCALE_SET = LEARNING_SCALE._set() +LEARNING_SCALE_VALUES = LEARNING_SCALE._values() +LEARNING_SCALE_NAMES = LEARNING_SCALE._names() +LEARNING_SCALE_LITERALS = Literal[tuple(LEARNING_SCALE_VALUES)] # Used for type hinting class Loss(Enum): @@ -363,6 +437,10 @@ class Loss(Enum): SUM = L0 +LOSSES = 'losses' + +#endregion + # ********************************************************************************************************************** # ****************************************** CONSTANTS ************************************************************* # ********************************************************************************************************************** @@ -404,11 +482,15 @@ class Loss(Enum): FLAGS = 'flags' INITIALIZATION_STATUS = 'initialization_status' EXECUTION_PHASE = 'execution_phase' +EXECUTION_MODE = 'execution_mode' SOURCE = 'source' +INITIALIZE = "initialize" # Used as instruction to some methods INITIALIZING = " INITIALIZING " # Used as status and context for Log INITIALIZED = " INITIALIZED " # Used as status EXECUTING = " EXECUTING " # Used in context for Log and ReportOutput pref ASSIGN_VALUE = ': Assign value' +UPDATE = 'update' +STORE = 'store' VALIDATE = 'Validate' COMMAND_LINE = "COMMAND_LINE" CHANGED = 'CHANGED' @@ -420,6 +502,7 @@ class Loss(Enum): COUNT = 'COUNT' INPUT = 'input' OUTPUT = 'output' +OUTPUTS = 'outputs' PARAMETER = 'parameter' RANDOM = 'random' BEFORE = 'before' @@ -432,6 +515,8 @@ class Loss(Enum): DICT = 'dict' TEXT = 'text' +ADD = 'add' +SUBTRACT = 'subtract' LESS_THAN = '<' LESS_THAN_OR_EQUAL = '<=' EQUAL = '==' @@ -485,6 +570,7 @@ class Loss(Enum): # Composition Categories COMPOSITION = 'Composition' AUTODIFF_COMPOSITION = 'AutodiffComposition' +AUTODIFF_RESULTS = 'AutodiffResults' COMPOSITION_FUNCTION_APPROXIMATOR = 'CompositionFunctionApproximator' EM_COMPOSITION = 'EMComposition' @@ -499,6 +585,8 @@ class Loss(Enum): LEARNING_PATHWAY = "learning_pathway" NODE = 'NODE' NODES = 'NODES' +NODE_VARIABLES = 'node_variables' +NODE_VALUES = 'node_values' INPUTS = 'inputs' TARGETS = 'targets' EPOCHS = 'epochs' @@ -749,8 +837,9 @@ class Loss(Enum): #region ------------------------------------------ AUTODIFF COMPOSITION ---------------------------------------------- -TRAINING_SET = 'training set' LEARNING_RATE = "learning_rate" +TRAINING_SET = 'training set' +TRAINED_OUTPUTS = 'trained_outputs' #endregion @@ -967,6 +1056,7 @@ class Loss(Enum): FEEDBACK = 'feedback' MONITOR_FOR_LEARNING = 'monitor_for_learning' LEARNABLE = 'learnable' +MATRIX_WEIGHTS = 'matrix_weights' AUTO = 'auto' HETERO = 'hetero' diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index d4dea623080..e2204c6ecdd 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -1672,7 +1672,7 @@ def _log_value(self, value, context=None): context_str = ContextFlags._get_context_string(ContextFlags.COMMAND_LINE) log_condition_satisfied = True - # standard loggingd + # standard logging else: if self.log_condition is None or self.log_condition is LogCondition.OFF: return diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py index 869d86b7051..0e1c814782f 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py @@ -642,7 +642,7 @@ def _validate_params(self, request_set, target_set=None, context=None): f"a list or 2d np.array containing entries that have the same shape " f"({memory_matrix.shape}) as an entry (row) in 'memory_matrix' arg.") - # Ensure the number of fields is equal to the number of items in variable + # Ensure the number of fields is equal to the numbder of items in variable if FIELDS in request_set: fields = request_set[FIELDS] if len(fields) != len(self.variable): diff --git a/psyneulink/library/components/projections/pathway/autoassociativeprojection.py b/psyneulink/library/components/projections/pathway/autoassociativeprojection.py index 2b281fc3dab..dbf0b5ef076 100644 --- a/psyneulink/library/components/projections/pathway/autoassociativeprojection.py +++ b/psyneulink/library/components/projections/pathway/autoassociativeprojection.py @@ -108,6 +108,7 @@ from psyneulink.core.components.functions.nonstateful.transferfunctions import LinearMatrix from psyneulink.core.components.functions.function import get_matrix from psyneulink.core.components.projections.pathway.mappingprojection import MappingError, MappingProjection +from psyneulink.library.components.projections.pathway.maskedmappingprojection import MaskedMappingProjection from psyneulink.core.components.projections.projection import projection_keywords from psyneulink.core.components.shellclasses import Mechanism from psyneulink.core.components.ports.outputport import OutputPort @@ -129,7 +130,7 @@ class AutoAssociativeError(MappingError): pass -class AutoAssociativeProjection(MappingProjection): +class AutoAssociativeProjection(MaskedMappingProjection): """ AutoAssociativeProjection( ) diff --git a/psyneulink/library/compositions/autodiffcomposition.py b/psyneulink/library/compositions/autodiffcomposition.py index a93e94f23f8..c851c32afd2 100644 --- a/psyneulink/library/compositions/autodiffcomposition.py +++ b/psyneulink/library/compositions/autodiffcomposition.py @@ -166,6 +166,13 @@ *PyTorch mode* ~~~~~~~~~~~~~~ +# 7/10/24 - FIX: +.. _AutodiffComposition_PyTorch_LearningScale: + ADD DESCRIPTION OF HOW LearningScale SPECIFICATIONS MAP TO EXECUTOIN OF pytorch_rep: + OPTIMIZATION STEP: + for AutodiffCompositions, this corresponds to a single call to `foward()` and `backward()` + methods of the Pytorch model + This is the default for an AutodiffComposition, but, can be specified explicitly by setting **execution_mode** = `ExecutionMode.PyTorch` in the `learn ` method (see `example ` in `BasicsAndPrimer`). In this mode, the AutodiffComposition is automatically translated to a `PyTorch @@ -323,6 +330,7 @@ import collections from packaging import version from pathlib import Path, PosixPath +from typing import Optional try: import torch @@ -335,10 +343,8 @@ from psyneulink.library.compositions.pytorchwrappers import PytorchCompositionWrapper from psyneulink.library.compositions.pytorchshowgraph import PytorchShowGraph -from psyneulink.core.components.functions.stateful.statefulfunction import StatefulFunction from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism from psyneulink.core.components.mechanisms.processing.compositioninterfacemechanism import CompositionInterfaceMechanism -from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base from psyneulink.core.components.projections.modulatory.modulatoryprojection import ModulatoryProjection_Base from psyneulink.core.components.ports.inputport import InputPort @@ -346,8 +352,12 @@ from psyneulink.core.compositions.report import (ReportOutput, ReportParams, ReportProgress, ReportSimulations, ReportDevices, EXECUTE_REPORT, LEARN_REPORT, PROGRESS_REPORT) from psyneulink.core.globals.context import Context, ContextFlags, handle_external_context, CONTEXT -from psyneulink.core.globals.keywords import AUTODIFF_COMPOSITION, CPU, CUDA, Loss, MPS, SOFT_CLAMP -from psyneulink.core.globals.utilities import is_numeric_scalar, get_torch_tensor +from psyneulink.core.globals.keywords import (AUTODIFF_COMPOSITION, CPU, CUDA, EXECUTION_MODE, + LEARNING_SCALE_LITERALS, LEARNING_SCALE_NAMES, LEARNING_SCALE_VALUES, + Loss, LOSSES, MATRIX_WEIGHTS, MINIBATCH, MPS, NODE_VALUES, NODE_VARIABLES, + OPTIMIZATION_STEP, RESULTS, RUN, SOFT_CLAMP, + TARGETS, TRAINED_OUTPUTS, TRIAL) +from psyneulink.core.globals.utilities import is_numeric_scalar from psyneulink.core.scheduling.scheduler import Scheduler from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.scheduling.time import TimeScale @@ -361,6 +371,30 @@ 'AutodiffComposition' ] +def _get_torch_trained_outputs(owning_component=None, context=None): + if not context.execution_id: + return None + pytorch_rep = owning_component.parameters.pytorch_representation._get(context) + if not pytorch_rep: + return None + return np.array(pytorch_rep.retained_trained_outputs) + +def _get_torch_targets(owning_component=None, context=None): + if not context.execution_id: + return None + pytorch_rep = owning_component.parameters.pytorch_representation._get(context) + if not pytorch_rep: + return None + return np.array(pytorch_rep.retained_targets) + +def _get_torch_losses(owning_component, context): + if not context.execution_id: + return None + pytorch_rep = owning_component.parameters.pytorch_representation._get(context) + if not pytorch_rep: + return None + return np.array(pytorch_rep.retained_losses) + class AutodiffCompositionError(CompositionError): def __init__(self, error_value): @@ -372,8 +406,25 @@ def __str__(self): class AutodiffComposition(Composition): """ + AutodiffComposition( \ + optimizer_type='sgd', + loss_spec=Loss.MSE, + weight_decay=0, + learning_rate=0.001, + disable_learning=False, + synch_projection_matrices_with_torch=RUN, + synch_node_variables_with_torch=None, + synch_node_values_with_torch=RUN, + synch_results_with_torch=RUN, + retain_torch_trained_outputs=MINIBATCH, + retain_torch_targets=MINIBATCH, + retain_torch_losses=MINIBATCH, + device=CPU + ) + Subclass of `Composition` that trains models using either LLVM compilation or `PyTorch `_; - see and `Composition ` for additional arguments and attributes. + see and `Composition ` for additional arguments and attributes. See `Composition` + for additional arguments to constructor. Arguments --------- @@ -389,18 +440,69 @@ class AutodiffComposition(Composition): learning_rate : float : default 0.001 specifies the learning rate passed to the optimizer if none is specified in the `learn - ` method of the AutodiffComposition - (see `learning_rate ` for additional details). + ` method of the AutodiffComposition; + see `learning_rate ` for additional details. disable_learning : bool: default False specifies whether the AutodiffComposition should disable learning when run in `learning mode `. - device : torch.device : default device-dependnet + synch_projection_matrices_with_torch : `LearningScale` : default RUN + specifies the default for the AutodiffComposition for when to copy Pytorch parameters to PsyNeuLink + `Projection matrices ` (connection weights), which can be overridden by specifying + the **synch_projection_matrices_with_torch** argument in the `learn ` method; + see `synch_projection_matrices_with_torch ` + for additional details. + + synch_node_variables_with_torch : `LearningScale` : default None + specifies the default for the AutodiffComposition for when to copy the current input to Pytorch nodes + to the PsyNeuLink `variable ` attribute of the corresponding PsyNeuLink `nodes + `, which can be overridden by specifying the **synch_node_variables_with_torch** argument + in the `learn ` method; see `synch_node_variables_with_torch + ` for additional details. + + synch_node_values_with_torch : `LearningScale` : default RUN + specifies the default for the AutodiffComposition for when to copy the current output of Pytorch nodes to the + PsyNeuLink `value ` attribute of the corresponding PsyNeuLink `nodes `, + which can be overridden by specifying the **synch_node_values_with_torch** argument in the `learn + ` method; see `synch_node_values_with_torch + ` for additional details. + + synch_results_with_torch : `LearningScale` : default RUN + specifies the default for the AutodiffComposition for when to copy the outputs of the Pytorch model + to the AutodiffComposition's `results ` attribute, which can be overridden by + specifying the **synch_results_with_torch** argument in the `learn ` method. + Note that this differs from **retain_torch_trained_outputs**, which specifies the frequency at which + the outputs of the PyTorch model are tracked, all of which are stored in the AutodiffComposition's + `torch_trained_outputs ` attribute at the end of the run; + see `synch_results_with_torch ` for + additional details. + + retain_torch_trained_outputs : `LearningScale` : default MINIBATCH + specifies the default for the AutodiffComposition for scale at which the outputs of the Pytorch + model are tracked, all of which are stored in the AutodiffComposition's `torch_trained_outputs + ` attribute at the end of the run; this can be overridden + by specifying the **retain_torch_trained_outputs** argument in the `learn ` method. + Note that this differs from **synch_results_with_torch**, which specifies the frequency with + which values are called to the AutodiffComposition's `results` attribute; see `retain_torch_trained_outputs + ` for additional details. + + retain_torch_targets : `LearningScale` : default MINIBATCH + specifies the default for the AutodiffComposition for when to copy the targets used for training the + Pytorch model to the AutodiffComposition's `torch_targets ` attribute, which can be + overridden by specifying the **retain_torch_targets** argument in the `learn ` method; + see `retain_torch_targets ` for additional details. + + retain_torch_losses : `LearningScale` : default MINIBATCH + specifies the default for the AutodiffComposition for the scale at which the losses of the Pytorch model + are tracked, all of which are stored in the AutodiffComposition's `torch_losses ` + attribute at the end of the run; see `retain_torch_losses ` for + additional details. + + device : torch.device : default device-dependent specifies the device on which the model is run. If None, the device is set to 'cuda' if available, then 'mps`, otherwise 'cpu'. - Attributes ---------- @@ -430,17 +532,77 @@ class AutodiffComposition(Composition): **learnable** parameter of its constructor as `False`; this applies to MappingProjections at any level of `nesting `. - device : torch.device - the device on which the model is run. - - losses : list of floats - tracks the average loss after each weight update (i.e. each minibatch) during learning. - + synch_projection_matrices_with_torch : OPTIMIZATION_STEP, MINIBATCH, EPOCH or RUN + determines when to copy PyTorch parameters to PsyNeuLink `Projection matrices ` + (connection weights) if this is not specified in the call to `learn `. Copying more + frequently keeps the PsyNeuLink representation more closely synchronized with parameter updates in Pytorch, + but slows performance (see `AutodiffComposition_PyTorch_LearningScale` for information about settings). + + synch_node_variables_with_torch : OPTIMIZATION_STEP, TRIAL, MINIBATCH, EPOCH, RUN or None + determines when to copy the current input to Pytorch nodes (modules) to the PsyNeuLink `variable + ` attribute of the corresponding PsyNeuLink `nodes `, if this is not + specified in the call to `learn `. + COMMENT: + 8/8/24 - FIX: ADD EXPLANATION OF WHY THIS IS NOT GENERALLY USEFUL ALONG THE LINES OF THE FOLLOWING + This is supported for inspection and debugging, but is not generally useful, as PsyNeuLink uses `Lazy + Evaluation `, in which the variable of a node is determined by the input it receives + during execution. + COMMENT + Copying more frequently keeps the PsyNeuLink + representation more closely copying more frequently keeps them synchronized with parameter updates in Pytorch, + but slows performance (see `AutodiffComposition_PyTorch_LearningScale` for information about settings). + + synch_node_values_with_torch : OPTIMIZATION_STEP, MINIBATCH, EPOCH or RUN + determines when to copy the current output of Pytorch nodes (modules) to the PsyNeuLink `value + ` attribute of the corresponding PsyNeuLink `nodes `, if this is not + specified in the call to `learn `. Copying more frequently keeps the PsyNeuLink + representation more closely copying more frequently keeps them synchronized with parameter updates in Pytorch, + but slows performance (see `AutodiffComposition_PyTorch_LearningScale` for information about settings). + + synch_results_with_torch : OPTIMIZATION_STEP, TRIAL, MINIBATCH, EPOCH or RUN + determines when to copy the current outputs of Pytorch nodes to the PsyNeuLink `results + ` attribute of the AutodiffComposition if this is not specified in + the call to `learn `. Copying more frequently keeps the PsyNeuLink + representation more closely synchronized with parameter updates in Pytorch, but slows performance + (see `AutodiffComposition_PyTorch_LearningScale` for information about settings). + + retain_torch_trained_outputs : OPTIMIZATION_STEP, MINIBATCH, EPOCH, RUN or None + determines the scale at which the outputs of the Pytorch model are tracked, all of which are stored in the + AutodiffComposition's `results ` attribute at the end of the run if this is not specified + in the call to `learn `(see `AutodiffComposition_PyTorch_LearningScale` for + information about settings) + + retain_torch_targets : OPTIMIZATION_STEP, TRIAL, MINIBATCH, EPOCH, RUN or None + determines the scale at which the targets used for training the Pytorch model are tracked, all of which + are stored in the AutodiffComposition's `targets ` attribute at the end of the run + if this is not specified in the call to `learn ` + (see `AutodiffComposition_PyTorch_LearningScale` for information about settings). + + retain_torch_losses : OPTIMIZATION_STEP, MINIBATCH, EPOCH, RUN or None + determines the scale at which the losses of the Pytorch model are tracked, all of which are stored in + the AutodiffComposition's `torch_losses ` attribute at the end of the run + if this is nota specified in the call to `learn ` + (see `AutodiffComposition_PyTorch_LearningScale` for information about settings). + + torch_trained_outputs : List[ndarray] + stores the outputs (converted to np arrays) of the Pytorch model trained during learning, at the frequency + specified by `retain_torch_trained_outputs ` if it is set + to *MINIBATCH*, *EPOCH*, or *RUN*; see `retain_torch_trained_outputs + ` for additional details. + + torch_targets : List[ndarray] + stores the targets used for training the Pytorch model during learning at the frequency specified by + `retain_torch_targets ` if it is set to *MINIBATCH*, *EPOCH*, + or *RUN*; see `retain_torch_targets ` for additional details. + + torch_losses : list of floats + stores the average loss after each weight update (i.e. each minibatch) during learning, at the frequency + specified by `retain_torch_trained_outputs ` if it is set to *MINIBATCH*, + *EPOCH*, or *RUN*; see `retain_torch_losses ` for additonal details. + + COMMENT: FIX: NOT CURRENTLY BEING POPULTED, BUT SEEMS TO BE USED BY _get_total_loss() and early_stopper trial_losses = Parameter([]) - - tracked_loss = Parameter(None, pnl_internal=True) - - tracked_loss_count = Parameter(0, pnl_internal=True) + COMMENT last_saved_weights : path path for file to which weights were last saved. @@ -448,6 +610,8 @@ class AutodiffComposition(Composition): last_loaded_weights : path path for file from which weights were last loaded. + device : torch.device + the device on which the model is run. """ componentCategory = AUTODIFF_COMPOSITION @@ -459,15 +623,71 @@ class Parameters(Composition.Parameters): pytorch_representation = None optimizer = None learning_rate = Parameter(.001, fallback_default=True) - losses = Parameter([]) - trial_losses = Parameter([]) - tracked_loss = Parameter(None, pnl_internal=True) - tracked_loss_count = Parameter(0, pnl_internal=True) + synch_projection_matrices_with_torch = Parameter(RUN, fallback_default=True) + synch_node_variables_with_torch = Parameter(None, fallback_default=True) + synch_node_values_with_torch = Parameter(RUN, fallback_default=True) + synch_results_with_torch = Parameter(RUN, fallback_default=True) + retain_torch_trained_outputs = Parameter(MINIBATCH, fallback_default=True) + retain_torch_targets = Parameter(MINIBATCH, fallback_default=True) + retain_torch_losses = Parameter(MINIBATCH, fallback_default=True) + torch_trained_outputs = Parameter([], getter=_get_torch_trained_outputs) + torch_targets = Parameter([], getter=_get_torch_targets) + torch_losses = Parameter([], getter=_get_torch_losses) + trial_losses = Parameter([]) # FIX <- related to early_stopper, but not getting assigned anywhere device = None - def _validate_memory_template(self, device): - if isinstance(device, str) and device not in [CPU, CUDA, MPS]: - raise AutodiffCompositionError(f"Device must be one of {CPU}, {CUDA}, or {MPS}") + # def _validate_memory_template(self, device): + # if isinstance(device, str) and device not in [CPU, CUDA, MPS]: + # raise AutodiffCompositionError(f"Device must be one of {CPU}, {CUDA}, or {MPS}") + # + def _validate_synch_projection_matrices_with_torch(self, spec): + if spec is not None and spec not in LEARNING_SCALE_VALUES: + raise AutodiffCompositionError(f"Value of 'synch_projection_matrices_with_torch' arg " + f"must be one of the following keywords: " + f"{', '.join(LEARNING_SCALE_NAMES)}") + + def _validate_synch_node_variables_with_torch(self, spec): + if spec is not None and spec not in LEARNING_SCALE_VALUES: + raise AutodiffCompositionError(f"Value of 'synch_node_variables_with_torch' arg " + f"must be one of the following keywords: " + f"{', '.join(LEARNING_SCALE_NAMES)}") + + def _validate_synch_node_values_with_torch(self, spec): + if spec is not None and spec not in LEARNING_SCALE_VALUES: + raise AutodiffCompositionError(f"Value of 'synch_node_values_with_torch' arg " + f"must be one of the following keywords: " + f"{', '.join(LEARNING_SCALE_NAMES)}") + + def _validate_synch_results_with_torch(self, spec): + if spec is not None and spec not in LEARNING_SCALE_VALUES: + raise AutodiffCompositionError(f"Value of 'synch_results_with_torch' arg " + f"must be one of the following keywords: " + f"{', '.join(LEARNING_SCALE_NAMES)}") + if spec is OPTIMIZATION_STEP: + arg_vals = LEARNING_SCALE_NAMES.copy() + arg_vals.remove('OPTIMIZATION_STEP') + raise AutodiffCompositionError(f"'OPTIMIZATION_STEP can't be used with 'synch_results_with_torch';" + f"use another value of {', '.arg_vals}") + + + def _validate_retain_torch_trained_outputs(self, spec): + if spec is not None and spec not in LEARNING_SCALE_VALUES: + raise AutodiffCompositionError(f"Value of `retain_torch_trained_outputs` arg " + f"must be one of the following keywords: " + f"{', '.join(LEARNING_SCALE_NAMES)}") + + def _validate_retain_torch_targets(self, spec): + if spec is not None and spec not in LEARNING_SCALE_VALUES: + raise AutodiffCompositionError(f"Value of `retain_torch_targets` arg " + f"must be one of the following keywords: " + f"{', '.join(LEARNING_SCALE_NAMES)}") + + def _validate_retain_torch_losses(self, spec): + if spec is not None and spec not in LEARNING_SCALE_VALUES: + raise AutodiffCompositionError(f"Value of `retain_torch_losses` arg " + f"must be one of the following keywords: " + f"{', '.join(LEARNING_SCALE_NAMES)}") + # TODO (CW 9/28/18): add compositions to registry so default arg for name is no longer needed @check_user_specified @@ -480,6 +700,13 @@ def __init__(self, disable_learning=False, force_no_retain_graph=False, refresh_losses=False, + synch_projection_matrices_with_torch:Optional[str]=RUN, + synch_node_variables_with_torch:Optional[str]=None, + synch_node_values_with_torch:Optional[str]=RUN, + synch_results_with_torch:Optional[str]=RUN, + retain_torch_trained_outputs:Optional[str]=MINIBATCH, + retain_torch_targets:Optional[str]=MINIBATCH, + retain_torch_losses:Optional[str]=MINIBATCH, device=None, disable_cuda=True, cuda_index=None, @@ -492,16 +719,25 @@ def __init__(self, show_graph_attributes = kwargs.pop('show_graph_attributes', {}) - super(AutodiffComposition, self).__init__(name = name, - pathways=pathways, - optimizer_type = optimizer_type, - loss_spec = loss_spec, - learning_rate = learning_rate, - weight_decay = weight_decay, - **kwargs) + super(AutodiffComposition, self).__init__( + name = name, + pathways=pathways, + optimizer_type = optimizer_type, + loss_spec = loss_spec, + learning_rate = learning_rate, + weight_decay = weight_decay, + synch_projection_matrices_with_torch = synch_projection_matrices_with_torch, + synch_node_variables_with_torch = synch_node_variables_with_torch, + synch_node_values_with_torch = synch_node_values_with_torch, + synch_results_with_torch = synch_results_with_torch, + retain_torch_trained_outputs = retain_torch_trained_outputs, + retain_torch_targets = retain_torch_targets, + retain_torch_losses = retain_torch_losses, + **kwargs) self._built_pathways = False - self.target_output_map = {} + self.targets_from_outputs_map = {} # Map from TARGETS nodes to any OUTPUT nodes from which they receive input + self.outputs_to_targets_map = {} # Map from trained OUTPUT nodes to their TARGETS self.optimizer_type = optimizer_type self.loss_spec = loss_spec self._runtime_learning_rate = None @@ -509,7 +745,7 @@ def __init__(self, self.refresh_losses = refresh_losses self.weight_decay = weight_decay self.disable_learning = disable_learning - self.loss = None + self.loss_function = None self.last_saved_weights = None self.last_loaded_weights = None @@ -520,7 +756,6 @@ def __init__(self, self.execution_sets = None # # MODIFIED 7/10/24 OLD: - # FIX: REMOVE WHEN SUPPORT FOR MPS ADDED BELOW if not disable_cuda and torch.cuda.is_available(): if cuda_index is None: self.device = torch.device('cuda') @@ -530,7 +765,7 @@ def __init__(self, self.device = torch.device('cpu') else: self.device = device - # # MODIFIED 7/10/24 NEW: + # # MODIFIED 7/10/24 NEW: NEEDED FOR torch MPS SUPPORT # FIX: ADD AFTER USE OF utilities.get_torch_tensor() AND COMPATIBLITY WITH MPS IS VALIDATED # if device is None: # # Try setting device by default @@ -739,7 +974,7 @@ def create_pathway(node)->list: for value in mech.value], dtype=object), name= 'TARGET for ' + mech.name) - for mech in output_mechs_for_learning if mech not in self.target_output_map.values()] + for mech in output_mechs_for_learning if mech not in self.targets_from_outputs_map.values()] # Suppress warnings about role assignments context = Context(source=ContextFlags.METHOD) self.add_nodes(target_mechs, required_roles=[NodeRole.TARGET, NodeRole.LEARNING], context=context) @@ -747,7 +982,7 @@ def create_pathway(node)->list: self.exclude_node_roles(target_mech, NodeRole.OUTPUT, context) for output_port in target_mech.output_ports: output_port.parameters.require_projection_in_composition.set(False, override=True) - self.target_output_map.update({target: output for target, output + self.targets_from_outputs_map.update({target: output for target, output in zip(target_mechs, output_mechs_for_learning)}) else: # Construct entire PNL backpropagation learning pathways for each INPUT Node @@ -755,6 +990,7 @@ def create_pathway(node)->list: self.add_backpropagation_learning_pathway(pathway=pathway, loss_spec=self.loss_spec) + self.outputs_to_targets_map = {output: target for target, output in self.targets_from_outputs_map.items()} self._analyze_graph() return self.learning_components @@ -768,7 +1004,6 @@ def _build_pytorch_representation(self, context=None, refresh=False): model = self.pytorch_composition_wrapper_type(composition=self, device=self.device, context=context) - self.parameters.pytorch_representation._set(model, context, skip_history=True, skip_log=True) # Set up optimizer function @@ -777,15 +1012,16 @@ def _build_pytorch_representation(self, context=None, refresh=False): if old_opt is None or refresh: opt = self._make_optimizer(self.optimizer_type, learning_rate, self.weight_decay, context) self.parameters.optimizer._set(opt, context, skip_history=True, skip_log=True) + self.parameters.pytorch_representation._get(context).optimizer = opt # Set up loss function - if self.loss is not None: - logger.warning("Overwriting loss function for AutodiffComposition {}! Old loss function: {}".format( - self, self.loss)) + if self.loss_function is not None: + logger.warning("Overwriting 'loss_function' for AutodiffComposition {}! Old loss function: {}".format( + self, self.loss_function)) if callable(self.loss_spec): - self.loss = self.loss_spec + self.loss_function = self.loss_spec else: - self.loss = self._get_loss(self.loss_spec) + self.loss_function = self._get_loss(self.loss_spec) return self.parameters.pytorch_representation._get(context) @@ -830,132 +1066,136 @@ def _get_loss(self, loss_spec): elif loss_spec == Loss.KL_DIV: return nn.KLDivLoss(reduction='sum') else: - raise AutodiffCompositionError(f"Loss type {loss_spec} not recognized. Loss argument must be a " + raise AutodiffCompositionError(f"Loss type {loss_spec} not recognized. 'loss_function' argument must be a " f"Loss enum or function. Currently, the recognized loss types are: " f"L1 (Mean), SSE (sum squared error), CROSS_ENTROPY, NLL (negative log " f"likelihood), POISSONNLL (Poisson negative log likelihood, " f"and KL_DIV (KL divergence.") - def autodiff_training(self, inputs, targets, synchronize_pnl_values:bool=True, context=None, scheduler=None): - """Perform learning/training on all input-target pairs received for given number of epochs""" + def autodiff_forward(self, inputs, targets, + synch_with_pnl_options, retain_in_pnl_options, + execution_mode, scheduler, context): + """Perform forward pass of model and compute loss for a single trial (i.e., a single input) in Pytorch mode. + Losses are accumulated in pytorch_rep.track_losses, over calls to this method within a minibatch; + at the end of a minibatch, they are averaged and backpropagated by compositionrunner.run_learning() + before the next time it calls run(), in a call to backward() by do_gradient_optimization() + in _batch_inputs() or _batch_function_inputs(), + """ + assert execution_mode == pnlvm.ExecutionMode.PyTorch + pytorch_rep = self.parameters.pytorch_representation._get(context) - # Compute total loss over OUTPUT nodes for current trial - tracked_loss = self.parameters.tracked_loss._get(context) - if tracked_loss is None: - self.parameters.tracked_loss._set(torch.zeros(1, device=self.device).double(), - context=context, - skip_history=True, - skip_log=True) - tracked_loss = self.parameters.tracked_loss._get(context) + # --------- Do forward computation on current inputs ------------------------------------------------- + # should return 2d values for each component - curr_tensor_inputs = {} - curr_tensor_targets = {} + # Get value of INPUT nodes for current trial + curr_tensors_for_inputs = {} for component in inputs.keys(): - curr_tensor_inputs[component] = torch.tensor(inputs[component], device=self.device).double() + curr_tensors_for_inputs[component] = torch.tensor(inputs[component], device=self.device).double() + + # Get value of all OUTPUT nodes for current trial + curr_tensors_for_outputs = pytorch_rep.forward(curr_tensors_for_inputs, None, context) + + # --------- Compute the loss (TARGET-OUTPUT) for each trained OUTPUT node --------------------------- + + # Get value of OUTPUT nodes that are being trained (i.e., for which there are TARGET nodes) + curr_tensors_for_trained_outputs = {k:v for k,v in curr_tensors_for_outputs.items() + if k in self.outputs_to_targets_map} # Get value of TARGET nodes for current trial + curr_tensors_for_targets = {} for component in targets.keys(): - curr_tensor_targets[self.target_output_map[component]] = [torch.tensor(np.atleast_1d(target), - device=self.device).double() - for target in targets[component]] - - # Do forward computation on current inputs - # should return 2d values for each component - pytorch_rep = self.parameters.pytorch_representation._get(context) - curr_tensor_outputs = pytorch_rep.forward(curr_tensor_inputs, context) - - # Update values of all PNL nodes executed in forward pass (if specified) - if synchronize_pnl_values: - pytorch_node_values = {} - for pnl_node, pytorch_node in pytorch_rep.nodes_map.items(): - if pytorch_node.value is None: - assert pytorch_node.exclude_from_gradient_calc, \ - (f"PROGRAM ERROR: Value of PyTorch wrapper for {pnl_node.name} is None " - f"but it is not excluded from gradient calculation.") - continue - if isinstance(pytorch_node.value, list): - value = np.array([val.detach().cpu().numpy() for val in pytorch_node.value], dtype=object) - else: - value = pytorch_node.value.detach().cpu().numpy() - pnl_node.parameters.value._set(value, context) - if isinstance(pnl_node.function, StatefulFunction): - pnl_node.function.parameters.previous_value._set(value, context) - # 7/10/24 - FIX: THIS NEEDS TO BE ALIGNED WITH HANDLING OF INTEGRATION BEFORE NONLINEARITY IN PYTORCH - # HANDLED IN forward() METHOD OF PytorchMechanismWrapper?? - # if isinstance(pnl_node, TransferMechanism) and pnl_node.integrator_mode: - # pnl_node.integrator_function.parameters.previous_value._set(value, context) - pytorch_node_values[pnl_node] = value - - # Compute the loss (TARGET-OUTPUT) for each trained OUTPUT node - outputs_for_targets = {k:v for k,v in curr_tensor_outputs.items() if k in self.target_output_map.values()} - for component in outputs_for_targets.keys(): - # possibly add custom loss option, which is a loss function that takes many args - # (outputs, targets, weights, and more) and returns a scalar - new_loss = 0 - for i in range(len(outputs_for_targets[component])): - new_loss += self.loss(outputs_for_targets[component][i], - curr_tensor_targets[component][i]) - tracked_loss += new_loss + curr_tensors_for_targets[component] = [torch.tensor(np.atleast_1d(target), + device=self.device).double() + for target in targets[component]] + + # Get value of TARGET nodes for trained OUTPUT nodes + curr_target_tensors_for_trained_outputs = {} + for trained_output, target in self.outputs_to_targets_map.items(): + curr_target_tensors_for_trained_outputs[trained_output] = curr_tensors_for_targets[target] + + # Calculate and track the loss over the trained OUTPUT nodes + for component in curr_tensors_for_trained_outputs.keys(): + trial_loss = 0 + for i in range(len(curr_tensors_for_trained_outputs[component])): + trial_loss += self.loss_function(curr_tensors_for_trained_outputs[component][i], + curr_target_tensors_for_trained_outputs[component][i]) + pytorch_rep.minibatch_loss += trial_loss + pytorch_rep.minibatch_loss_count += 1 + + # --------- Return the values of OUTPUT of trained nodes and all nodes --------------------------------------- # Get values of trained OUTPUT nodes - trained_outputs = [] + trained_output_values = [] trained_outputs_CIM_input_ports = [port for port in self.output_CIM.input_ports - if port.path_afferents[0].sender.owner in self.target_output_map.values()] + if port.path_afferents[0].sender.owner in self.targets_from_outputs_map.values()] for input_port in trained_outputs_CIM_input_ports: assert (len(input_port.all_afferents) == 1), \ f"PROGRAM ERROR: {input_port.name} of ouput_CIM for '{self.name}' has more than one afferent." port, source, _ = self.output_CIM._get_source_info_from_output_CIM(input_port) idx = source.output_ports.index(port) - trained_outputs += [outputs_for_targets[source][idx].detach().cpu().numpy().copy().tolist()] + trained_output_values += [curr_tensors_for_trained_outputs[source][idx].detach().cpu().numpy().copy().tolist()] # Get values of all OUTPUT nodes - all_outputs = [] + all_output_values = [] for input_port in self.output_CIM.input_ports: assert (len(input_port.all_afferents) == 1), \ f"PROGRAM ERROR: {input_port.name} of ouput_CIM for '{self.name}' has more than one afferent." port, component, _ = self.output_CIM._get_source_info_from_output_CIM(input_port) idx = component.output_ports.index(port) - all_outputs += [curr_tensor_outputs[component][idx].detach().cpu().numpy().copy().tolist()] - - # Update tracked loss and loss count - self.parameters.tracked_loss_count._set(np.array(self.parameters.tracked_loss_count._get(context=context) + 1), - context=context, - skip_history=True, - skip_log=True) - - return trained_outputs, all_outputs + all_output_values += [curr_tensors_for_outputs[component][idx].detach().cpu().numpy().copy().tolist()] + pytorch_rep.all_output_values = all_output_values + + # Get values of TARGET nodes + target_values = [value[0].detach().cpu().numpy().copy().tolist() + for value in list(curr_tensors_for_targets.values())] + pytorch_rep.target_values = target_values + + # Synchronize outcomes after every trial if specified + # IMPLEMENTATION NOTE: RESULTS is not included here as it is handled in call to autodiff._update_results() + pytorch_rep.synch_with_psyneulink(synch_with_pnl_options, + [OPTIMIZATION_STEP, TRIAL], + context, + [NODE_VARIABLES, NODE_VALUES]) + pytorch_rep.retain_for_psyneulink({TRAINED_OUTPUTS: trained_output_values, + TARGETS: target_values}, + retain_in_pnl_options, + context) + + return trained_output_values, all_output_values def clear_losses(self, context=None): self.losses = [] - self.parameters.losses.set([], context=context) + if self.pytorch_representation: + self.pytorch_representation.retained_losses = [] - def _update_learning_parameters(self, context): - """Carry out backpropagation learning (backward computation) for one or more trials. - Update parameters (weights) based on trials run since last update, - using Pytorch backward method to compute gradients and update weights - Then execute (i.e., do forward computation for) nodes in pytorch_rep._nodes_to_execute_after_gradient_calc + def do_gradient_optimization(self, retain_in_pnl_options, context, optimization_num=None): + """Compute loss and use in call to autodiff_backward() to compute gradients and update PyTorch parameters. + Update parameters (weights) based on trial(s) executed since last optimization, + Reinitizalize minibatch_loss and minibatch_loss_count """ - optimizer = self.parameters.optimizer._get(context=context) pytorch_rep = self.parameters.pytorch_representation._get(context=context) + minibatch_loss = pytorch_rep.minibatch_loss / pytorch_rep.minibatch_loss_count - optimizer.zero_grad() + self.autodiff_backward(minibatch_loss, context) - # Compute and log average loss over all trials since last update - tracked_loss = self.parameters.tracked_loss._get(context=context) / int(self.parameters.tracked_loss_count._get(context=context)) - tracked_loss.backward(retain_graph=not self.force_no_retain_graph) - self.parameters.losses._get(context=context).append(tracked_loss.detach().cpu().numpy()[0]) - self.parameters.tracked_loss._set(torch.zeros(1, device=self.device).double(), context=context, skip_history=True, skip_log=True) - self.parameters.tracked_loss_count._set(np.array(0), context=context, skip_history=True, skip_log=True) + # # Save loss for current round of optimization + pytorch_rep.retain_for_psyneulink({LOSSES: minibatch_loss}, retain_in_pnl_options, context) + # Reset minibatch_loss for next round of optimization + pytorch_rep.minibatch_loss = torch.zeros(1, device=self.device).double() + pytorch_rep.minibatch_loss_count = 0 + + def autodiff_backward(self, minibatch_loss, context): + """Calculate gradients and apply to PyTorch model parameters (weights)""" + pytorch_rep = self.parameters.pytorch_representation._get(context=context) + optimizer = pytorch_rep.optimizer + + # Gradient updates + optimizer.zero_grad() + # Compute and log average loss over all trials since last update + minibatch_loss.backward(retain_graph=not self.force_no_retain_graph) # Update weights and copy to PNL optimizer.step() - pytorch_rep.detach_all() - pytorch_rep.copy_weights_to_psyneulink(context) - - # do forward computation on nodes that should be executed after gradient calculation - with torch.no_grad(): - for node, variable in pytorch_rep._nodes_to_execute_after_gradient_calc.items(): - node.wrapper_type.execute_node(node, variable, context) def _gen_llvm_function(self, *, ctx:pnlvm.LLVMBuilderContext, tags:frozenset): if "run" in tags: @@ -987,7 +1227,7 @@ def _get_autodiff_inputs_values(self, input_dict: dict): def _get_autodiff_targets_values(self, input_dict): """Return dict with values for TARGET Nodes - Get Inputs to TARGET Nodes used for computation of loss in autodiff_training(). + Get Inputs to TARGET Nodes used for computation of loss in autodiff_forward(). Uses input_dict to get values for TARGET Nodes that are INPUT Nodes of the AutodiffComposition, If a TARGET Node is not an INPUT Node, it is assumed to be the target of a projection from an INPUT Node and the value is determined by searching recursively for the input Node that projects to the TARGET Node. @@ -1006,7 +1246,7 @@ def get_target_value(target): target = target.path_afferents[0].sender.owner return get_target_value(target) - for target in self.target_output_map: + for target in self.targets_from_outputs_map: target_values[target] = get_target_value(target) return target_values @@ -1044,12 +1284,29 @@ def _identify_target_nodes(self, context): return target_nodes @handle_external_context() - def learn(self, *args, synchronize_pnl_values:bool = True, **kwargs): - execution_phase_at_entry = kwargs[CONTEXT].execution_phase - kwargs[CONTEXT].execution_phase = ContextFlags.PREPARING + def learn(self, + *args, + synch_projection_matrices_with_torch:Optional[LEARNING_SCALE_LITERALS]=NotImplemented, + synch_node_variables_with_torch:Optional[LEARNING_SCALE_LITERALS]=NotImplemented, + synch_node_values_with_torch:Optional[LEARNING_SCALE_LITERALS]=NotImplemented, + synch_results_with_torch:Optional[LEARNING_SCALE_LITERALS]=NotImplemented, + retain_torch_trained_outputs:Optional[LEARNING_SCALE_LITERALS]=NotImplemented, + retain_torch_targets:Optional[LEARNING_SCALE_LITERALS]=NotImplemented, + retain_torch_losses:Optional[LEARNING_SCALE_LITERALS]=NotImplemented, + **kwargs)->list: + """Override to handle synch and retain args + Note: defaults for synch and retain args are set to NotImplemented, so that the user can specify None if + they want to locally override the default values for the AutodiffComposition (see docstrings for run() + and _parse_synch_and_retain_args() for additonal details). + """ + + context = kwargs[CONTEXT] + + execution_phase_at_entry = context.execution_phase + context.execution_phase = ContextFlags.PREPARING execution_mode = self._get_execution_mode(kwargs.pop('execution_mode', None)) - kwargs[CONTEXT].execution_phase = execution_phase_at_entry + context.execution_phase = execution_phase_at_entry any_nested_comps = [node for node in self.nodes if isinstance(node, Composition)] if any_nested_comps: @@ -1068,10 +1325,100 @@ def learn(self, *args, synchronize_pnl_values:bool = True, **kwargs): f"that are not AutodiffCompositions: {' ,'.join(nested_comps)}.") if self._built_pathways is False: - self.infer_backpropagation_learning_pathways(execution_mode, context=kwargs[CONTEXT]) + self.infer_backpropagation_learning_pathways(execution_mode, context=context) self._built_pathways = True - return super().learn(*args, execution_mode=execution_mode, **kwargs) + synch_with_pnl_options, retain_in_pnl_options = ( + self._parse_synch_and_retain_args(synch_projection_matrices_with_torch, + synch_node_variables_with_torch, + synch_node_values_with_torch, + synch_results_with_torch, + retain_torch_trained_outputs, + retain_torch_targets, + retain_torch_losses, + **kwargs)) + + return super().learn(*args, + synch_with_pnl_options=synch_with_pnl_options, + retain_in_pnl_options=retain_in_pnl_options, + execution_mode=execution_mode, + **kwargs) + + def _parse_synch_and_retain_args(self, + synch_projection_matrices_with_torch:Optional[LEARNING_SCALE_LITERALS], + synch_node_variables_with_torch:Optional[LEARNING_SCALE_LITERALS], + synch_node_values_with_torch:Optional[LEARNING_SCALE_LITERALS], + synch_results_with_torch:Optional[LEARNING_SCALE_LITERALS], + retain_torch_trained_outputs:Optional[LEARNING_SCALE_LITERALS], + retain_torch_targets:Optional[LEARNING_SCALE_LITERALS], + retain_torch_losses:Optional[LEARNING_SCALE_LITERALS], + **kwargs + ): + # Remove args from kwargs in case called from run() (won't be there if called from learn() + if synch_projection_matrices_with_torch == NotImplemented: + synch_projection_matrices_with_torch = kwargs.pop('synch_projection_matrices_with_torch', NotImplemented) + if synch_projection_matrices_with_torch == NotImplemented: + synch_projection_matrices_with_torch = self.parameters.synch_projection_matrices_with_torch.default_value + if synch_node_variables_with_torch == NotImplemented: + synch_node_variables_with_torch = kwargs.pop('synch_node_variables_with_torch', NotImplemented) + if synch_node_variables_with_torch == NotImplemented: + synch_node_variables_with_torch = self.parameters.synch_node_variables_with_torch.default_value + if synch_node_values_with_torch == NotImplemented: + synch_node_values_with_torch = kwargs.pop('synch_node_values_with_torch', NotImplemented) + if synch_node_values_with_torch == NotImplemented: + synch_node_values_with_torch = self.parameters.synch_node_values_with_torch.default_value + if synch_results_with_torch == NotImplemented: + synch_results_with_torch = kwargs.pop('synch_results_with_torch', NotImplemented) + if synch_results_with_torch == NotImplemented: + synch_results_with_torch = self.parameters.synch_results_with_torch.default_value + if retain_torch_trained_outputs == NotImplemented: + retain_torch_trained_outputs = kwargs.pop('retain_torch_trained_outputs', NotImplemented) + if retain_torch_trained_outputs == NotImplemented: + retain_torch_trained_outputs = self.parameters.retain_torch_trained_outputs.default_value + if retain_torch_targets == NotImplemented: + retain_torch_targets = kwargs.pop('retain_torch_targets', NotImplemented) + if retain_torch_targets == NotImplemented: + retain_torch_targets = self.parameters.retain_torch_targets.default_value + if retain_torch_losses == NotImplemented: + retain_torch_losses = kwargs.pop('retain_torch_losses', NotImplemented) + if retain_torch_losses == NotImplemented: + retain_torch_losses = self.parameters.retain_torch_losses.default_value + + if self.minibatch_size > 1: + args_str = [] + if retain_torch_trained_outputs in {OPTIMIZATION_STEP, TRIAL}: + args_str.append('retain_torch_trained_outputs') + if retain_torch_losses in {OPTIMIZATION_STEP,TRIAL}: + args_str.append('retain_torch_losses') + if retain_torch_targets in {OPTIMIZATION_STEP,TRIAL}: + args_str.append('retain_torch_targets') + if args_str: + arg_args = 'args' if len(args_str) == 1 else 'arg' + is_are = 'is' if len(args_str) == 1 else 'are' + raise AutodiffCompositionError(f"The {' ,'.join(args_str)} {arg_args} in the learn() method for " + f"'{self.name}' {is_are} specifed as 'OPTIMIZATION' or 'TRIAL', but " + f"'minibatch_size` ({self.minibatch_size}) != 1, so " + f"{', '.join([arg.split('_')[-1] for arg in args_str])} " + f"will be updated only at the end of a minibatch; " + f"use 'MINIBATCH' for the {arg_args} to avoid this warning.") + + # Package options for synching and tracking into dictionaries as arguments to learning and exec methods + context = kwargs[CONTEXT] + synch_with_pnl_options = {MATRIX_WEIGHTS: synch_projection_matrices_with_torch + or self.parameters.synch_projection_matrices_with_torch._get(context), + NODE_VARIABLES: synch_node_variables_with_torch + or self.parameters.synch_node_variables_with_torch._get(context), + NODE_VALUES: synch_node_values_with_torch + or self.parameters.synch_node_values_with_torch._get(context), + RESULTS: synch_results_with_torch + or self.parameters.synch_results_with_torch._get(context)} + + retain_in_pnl_options = {TRAINED_OUTPUTS: retain_torch_trained_outputs + or self.parameters.retain_torch_trained_outputs._get(context), + TARGETS: retain_torch_targets or self.parameters.retain_torch_targets._get(context), + LOSSES: retain_torch_losses or self.parameters.retain_torch_losses._get(context)} + + return synch_with_pnl_options, retain_in_pnl_options def _get_execution_mode(self, execution_mode): """Parse execution_mode argument and return a valid execution mode for the learn() method @@ -1091,6 +1438,7 @@ def execute(self, inputs=None, num_trials=None, minibatch_size=1, + optimizations_per_minibatch=1, do_logging=False, scheduler=None, termination_processing=None, @@ -1108,7 +1456,8 @@ def execute(self, runtime_params=None, execution_mode:pnlvm.ExecutionMode = pnlvm.ExecutionMode.PyTorch, skip_initialization=False, - synchronize_pnl_values=True, + synch_with_pnl_options:Optional[dict]=None, + retain_in_pnl_options:Optional[dict]=None, report_output:ReportOutput=ReportOutput.OFF, report_params:ReportOutput=ReportParams.OFF, report_progress:ReportProgress=ReportProgress.OFF, @@ -1116,8 +1465,8 @@ def execute(self, report_to_devices:ReportDevices=None, report=None, report_num=None, - ): - """Override to execute autodiff_training() in learning mode if execute_mode is not Python""" + )->np.ndarray: + """Override to execute autodiff_forward() in learning mode if execute_mode is not Python""" if (self._is_learning(context) and execution_mode is not pnlvm.ExecutionMode.PyTorch and any([isinstance(node, Composition) for node in self.nodes])): @@ -1146,6 +1495,7 @@ def execute(self, autodiff_inputs = self._get_autodiff_inputs_values(inputs) autodiff_targets = self._get_autodiff_targets_values(inputs) + # Begin reporting of learning TRIAL: report(self, LEARN_REPORT, # EXECUTE_REPORT, @@ -1155,18 +1505,19 @@ def execute(self, context=context) self._build_pytorch_representation(context) - trained_outputs, all_outputs = self.autodiff_training(inputs=autodiff_inputs, + trained_output_values, all_output_values = \ + self.autodiff_forward(inputs=autodiff_inputs, targets=autodiff_targets, - synchronize_pnl_values=True, - context=context, - scheduler=scheduler) - + synch_with_pnl_options=synch_with_pnl_options, + retain_in_pnl_options=retain_in_pnl_options, + execution_mode=execution_mode, + scheduler=scheduler, + context=context) execution_phase = context.execution_phase context.execution_phase = ContextFlags.PROCESSING - - self.output_CIM.execute(all_outputs, context=context) context.execution_phase = execution_phase + # Complete TRIAL Panel for output report, and report progress report(self, # [LEARN_REPORT], [EXECUTE_REPORT, PROGRESS_REPORT], @@ -1177,7 +1528,7 @@ def execute(self, scheduler.get_clock(context)._increment_time(TimeScale.TRIAL) - return all_outputs + return all_output_values # Call Composition execute in Python mode return super(AutodiffComposition, self).execute(inputs=inputs, @@ -1197,6 +1548,73 @@ def execute(self, report_num=report_num ) + @handle_external_context() + def run(self, *args, + synch_projection_matrices_with_torch:Optional[LEARNING_SCALE_LITERALS]=NotImplemented, + synch_node_variables_with_torch:Optional[LEARNING_SCALE_LITERALS]=NotImplemented, + synch_node_values_with_torch:Optional[LEARNING_SCALE_LITERALS]=NotImplemented, + synch_results_with_torch:Optional[LEARNING_SCALE_LITERALS]=NotImplemented, + retain_torch_trained_outputs:Optional[LEARNING_SCALE_LITERALS]=NotImplemented, + retain_torch_targets:Optional[LEARNING_SCALE_LITERALS]=NotImplemented, + retain_torch_losses:Optional[LEARNING_SCALE_LITERALS]=NotImplemented, + **kwargs): + """Override to handle synch and retain args if run called directly from run() rather than learn() + Note: defaults for synch and retain args are NotImplemented, so that the user can specify None if they want + to locally override the default values for the AutodiffComposition (see _parse_synch_and_retain_args() + for details). This is distinct from the user assigning the Parameter default_values(s), which is done + in the AutodiffComposition constructor and handled by the Parameter._specify_none attribute. + """ + + if not ('synch_with_pnl_options' in kwargs and 'retain_in_pnl_options' in kwargs): + # No synch_with_pnl_options and retain_in_pnl_options dicts: + # - so must have been called from run directly rather than learn + # - therefore, must validate, parse and package options into those dicts + if synch_results_with_torch is NotImplemented: + # IMPLEMENTATION NOTE: + # If synch_results_with_torch is not specified by the user in call from run(), set it to + # MINIBATCH (rather than RUN, which is the default_value for calls from AutodiffComposition); + # this is required for calling _update_results() from Composition.run(), which does not itself + # know about synch and retain options, and the expected default behavior of which is to update + # results on every try in a call to run(). + synch_results_with_torch = MINIBATCH + synch_with_pnl_options, retain_in_pnl_options = ( + self._parse_synch_and_retain_args(synch_projection_matrices_with_torch, + synch_node_variables_with_torch, + synch_node_values_with_torch, + synch_results_with_torch, + retain_torch_trained_outputs, + retain_torch_targets, + retain_torch_losses, + **kwargs)) + kwargs['synch_with_pnl_options'] = synch_with_pnl_options + kwargs['retain_in_pnl_options'] = retain_in_pnl_options + + # If called from AutodiffComposition in Pytorch mode, provide chance to update results after run() + results = super(AutodiffComposition, self).run(*args, **kwargs) + if EXECUTION_MODE in kwargs and kwargs[EXECUTION_MODE] is pnlvm.ExecutionMode.PyTorch: + # Synchronize specified outcomes at end of learning run + context = kwargs[CONTEXT] + pytorch_rep = self.parameters.pytorch_representation.get(context) + if pytorch_rep: + pytorch_rep.synch_with_psyneulink(kwargs['synch_with_pnl_options'], RUN,context) + return results + + def _update_results(self, results, trial_output, execution_mode, synch_with_pnl_options, context): + if execution_mode is pnlvm.ExecutionMode.PyTorch: + # FIX: FOR NOW, USE THIS FOR BOTH TRIAL AND MINIBATCH, SINCE CURRENTLY NO DIFFERENCE; + # NEED TO FIGURE OUT WHAT TO DO ABOUT UPDATING RESULTS ONCE TRUE BATCHING IS IMPLEMENTED + if (RESULTS in synch_with_pnl_options + and synch_with_pnl_options[RESULTS] in {TRIAL, MINIBATCH}): + # Use Composition's own _update_results method since no savings when done trial-by-trial + super()._update_results(results, trial_output, execution_mode, synch_with_pnl_options, context) + elif (RESULTS in synch_with_pnl_options + and synch_with_pnl_options[RESULTS] == RUN): + # Use pytorch_reps method to keep a local list of results that are copied to autodiff.results after run + self.parameters.pytorch_representation._get(context).retain_results(trial_output) + else: + super()._update_results(results, trial_output, execution_mode, synch_with_pnl_options, context) + + @handle_external_context(fallback_most_recent=True) def save(self, path:PosixPath=None, directory:str=None, filename:str=None, context=None): """Saves all weight matrices for all MappingProjections in the AutodiffComposition diff --git a/psyneulink/library/compositions/compositionrunner.py b/psyneulink/library/compositions/compositionrunner.py index 108e732b267..6888977be4f 100644 --- a/psyneulink/library/compositions/compositionrunner.py +++ b/psyneulink/library/compositions/compositionrunner.py @@ -9,12 +9,16 @@ # ********************************************* AutodiffComposition ************************************************* import numpy as np +from typing import Optional +from types import GeneratorType from psyneulink.core.llvm import ExecutionMode from psyneulink.core.compositions.composition import Composition from psyneulink.core.compositions.report import Report, ReportProgress, ReportDevices, LEARN_REPORT, PROGRESS_REPORT from psyneulink.core.components.mechanisms.modulatory.learning.learningmechanism import LearningMechanism -from psyneulink.core.globals.keywords import OBJECTIVE_MECHANISM, TRAINING_SET +from psyneulink.core.globals.keywords import (EPOCH, MATRIX_WEIGHTS, MINIBATCH, OBJECTIVE_MECHANISM, OPTIMIZATION_STEP, + RUN, TRAINING_SET, TRIAL, NODE_VALUES, NODE_VARIABLES) +from psyneulink.core.globals.context import Context from psyneulink.core.globals.parameters import copy_parameter_value from inspect import isgeneratorfunction @@ -48,19 +52,22 @@ def _batch_inputs(self, inputs: dict, epochs: int, num_trials: int, - batch_size: int = 1, + minibatch_size: int = 1, optimizations_per_minibatch: int = 1, randomize: bool = True, + synch_with_pnl_options:Optional[dict] = None, + retain_in_pnl_options:Optional[dict] = None, call_before_minibatch=None, call_after_minibatch=None, early_stopper=None, execution_mode:ExecutionMode=ExecutionMode.Python, - context=None): + context=None)->GeneratorType: + """Execute inputs and update pytorch parameters for one minibatch at a time. + Partition inputs dict into ones of length minibatch_size (or, for the last set, the remainder) + Execute all inputs in that dict and then update weights (parameters), and repeat for all batches + within an epoch Synchronize weights, values and results with PsyNeuLink as specified in + synch_with_pnl_options and retain_in_pnl_options dicts. """ - Chunks input dict into pieces where each chunk is a dict with values of length batch_size - (or for the last chunk, the remainder) - """ - assert early_stopper is None or not self._is_llvm_mode, "Early stopper doesn't work in compiled mode" assert call_before_minibatch is None or not self._is_llvm_mode, "minibatch calls don't work in compiled mode" assert call_after_minibatch is None or not self._is_llvm_mode, "minibatch calls don't work in compiled mode" @@ -68,36 +75,71 @@ def _batch_inputs(self, #This is a generator for performance reasons, # since we don't want to copy any data (especially for very large inputs or epoch counts!) for epoch in range(epochs): - indices = list(range(0, num_trials)) + indices_of_all_trials = list(range(0, num_trials)) if randomize: - np.random.shuffle(indices) - for i in range(0, num_trials, batch_size): + np.random.shuffle(indices_of_all_trials) + + # Cycle over minibatches + for i in range(0, num_trials, minibatch_size): if call_before_minibatch: call_before_minibatch() - curr_indices = indices[i:i + batch_size] - for idx in curr_indices: - chunk = {} + + # Cycle over trials (stimui) within a minibatch + indices_of_trials_in_batch = indices_of_all_trials[i:i + minibatch_size] + + # FIX: IMPLEMENT PARALLELIZATION FOR minibatch_size > 1 + # # assert IF MINIBATCH > 1 THEN OPTIMIZATIONS_PER_STIMULUS == 1 + # if minibatch_size > 1 and optimizations_per_minibatch == 1: + # yield DICT WITH STIMULI FOR BATCH RUN THROUGH copy_parameter_value(stim) + # FIX: _gen_pytorch_fct's need to be refactored to handle batch dimension + + for trial_idx in indices_of_trials_in_batch: + inputs_for_minibatch = {} + # Get inputs for the current minibatch for k, v in inputs.items(): - chunk[k] = v[idx % len(v)] - for rep_idx in range(optimizations_per_minibatch): - # Return current stimulus - yield copy_parameter_value(chunk) + inputs_for_minibatch[k] = v[trial_idx % len(v)] + + # Cycle over optimizations per trial (stimulus + for optimization_num in range(optimizations_per_minibatch): + # Return current set of stimuli for minibatch + yield copy_parameter_value(inputs_for_minibatch) # Update weights if in PyTorch execution_mode; # handled by Composition.execute in Python mode and in compiled version in LLVM mode if execution_mode is ExecutionMode.PyTorch: - self._composition._update_learning_parameters(context) + self._composition.do_gradient_optimization(retain_in_pnl_options, context, optimization_num) + from torch import no_grad + pytorch_rep = self._composition.parameters.pytorch_representation.get(context) + with no_grad(): + for node, variable in pytorch_rep._nodes_to_execute_after_gradient_calc.items(): + node._composition_wrapper_owner.execute_node(node, variable, + optimization_num, context) + + # Synchronize after every optimization step for a given stimulus (i.e., trial) if specified + pytorch_rep.synch_with_psyneulink(synch_with_pnl_options, OPTIMIZATION_STEP, context, + [MATRIX_WEIGHTS, NODE_VARIABLES, NODE_VALUES]) - if call_after_minibatch: - try: - # Try with the hope that the function uses **kwargs (or these args) - call_after_minibatch(epoch=epoch, - batch=i // batch_size, - num_batches=num_trials // batch_size, - context=context) - except TypeError: - # If not, try without the args - call_after_minibatch() + if execution_mode is ExecutionMode.PyTorch: + # Synchronize specified outcomes after every stimulus (i.e., trial) + pytorch_rep.synch_with_psyneulink(synch_with_pnl_options, TRIAL, context) + + if execution_mode is ExecutionMode.PyTorch: + # Synchronize specified outcomes after every minibatch + pytorch_rep.synch_with_psyneulink(synch_with_pnl_options, MINIBATCH, context) + + if call_after_minibatch: + try: + # Try with the hope that the function uses **kwargs (or these args) + call_after_minibatch(epoch=epoch, + minibatch = i // minibatch_size, + num_minibatches = num_trials // minibatch_size, + context = context) + except TypeError: + # If not, try without the args + call_after_minibatch() + + if execution_mode is ExecutionMode.PyTorch: + pytorch_rep.synch_with_psyneulink(synch_with_pnl_options, EPOCH, context) # Compiled mode does not need more identical inputs. # number_of_runs will be set appropriately to cycle over the set @@ -108,16 +150,24 @@ def _batch_inputs(self, # end early if patience exceeded pass + if execution_mode is ExecutionMode.PyTorch: + # Synchronize specified outcomes at end of learning run + pytorch_rep.synch_with_psyneulink(synch_with_pnl_options, RUN, context) + + # 8/8/24 - FIX: THIS NEEDS TO BE BROUGHT INTO ALINGMENT WITH REFACTORING OF _batch_inputs ABOVE def _batch_function_inputs(self, inputs: dict, epochs: int, num_trials: int, batch_size: int = 1, + optimizations_per_minibatch: int = 1, + synch_with_pnl_options:Optional[dict] = None, + retain_in_pnl_options:Optional[dict] = None, call_before_minibatch=None, call_after_minibatch=None, early_stopper=None, execution_mode:ExecutionMode=ExecutionMode.Python, - context=None): + context=None)->GeneratorType: assert early_stopper is None or not self._is_llvm_mode, "Early stopper doesn't work in compiled mode" assert call_before_minibatch is None or not self._is_llvm_mode, "minibatch calls don't work in compiled mode" @@ -147,10 +197,12 @@ def _batch_function_inputs(self, if call_after_minibatch: call_after_minibatch() + # 7/10/24 - FIX: REVISE TO ACCOMODATE optimizations_per_minibatch + # AND ADD HANDLING OF synch_with_pnl_options AND retain_in_pnl_options # Update weights if in PyTorch execution_mode; # handled by Composition.execute in Python mode and in compiled version in LLVM mode if execution_mode is ExecutionMode.PyTorch: - self._composition._update_learning_parameters(context) + self._composition.do_gradient_optimization(retain_in_pnl_options, context) else: break @@ -171,11 +223,13 @@ def run_learning(self, patience: int = None, min_delta: int = 0, randomize_minibatches: bool = True, + synch_with_pnl_options:Optional[dict] = None, + retain_in_pnl_options:Optional[dict] = None, call_before_minibatch = None, call_after_minibatch = None, context=None, execution_mode:ExecutionMode = ExecutionMode.Python, - **kwargs): + **kwargs)->np.ndarray: """ Runs the composition repeatedly with the specified parameters. @@ -258,6 +312,9 @@ def run_learning(self, stim_epoch, num_trials, minibatch_size, + optimizations_per_minibatch=optimizations_per_minibatch, + synch_with_pnl_options=synch_with_pnl_options, + retain_in_pnl_options=retain_in_pnl_options, call_before_minibatch=call_before_minibatch, call_after_minibatch=call_after_minibatch, early_stopper=early_stopper, @@ -267,9 +324,11 @@ def run_learning(self, minibatched_input = self._batch_inputs(inputs=stim_input, epochs=stim_epoch, num_trials=num_trials, - batch_size=minibatch_size, + minibatch_size=minibatch_size, optimizations_per_minibatch=optimizations_per_minibatch, randomize=randomize_minibatches, + synch_with_pnl_options=synch_with_pnl_options, + retain_in_pnl_options=retain_in_pnl_options, call_before_minibatch=call_before_minibatch, call_after_minibatch=call_after_minibatch, early_stopper=early_stopper, @@ -285,21 +344,36 @@ def run_learning(self, # (Passing num_trials * stim_epoch + 1 works) run_trials = num_trials * stim_epoch if self._is_llvm_mode else None + # IMPLEMENTATION NOTE: for autodiff composition, the following executes an MINIBATCH's worth of training self._composition.run(inputs=minibatched_input, num_trials=run_trials, skip_initialization=skip_initialization, skip_analyze_graph=True, + optimizations_per_minibatch=optimizations_per_minibatch, + synch_with_pnl_options=synch_with_pnl_options, + retain_in_pnl_options=retain_in_pnl_options, execution_mode=execution_mode, context=context, **kwargs) skip_initialization = True + if execution_mode == ExecutionMode.PyTorch: + pytorch_rep = (self._composition.parameters.pytorch_representation._get(context). + copy_weights_to_psyneulink(context)) + if pytorch_rep and synch_with_pnl_options[MATRIX_WEIGHTS] == MINIBATCH: + pytorch_rep.copy_weights_to_psyneulink(context) + num_epoch_results = num_trials // minibatch_size # number of results expected from final epoch # return self._composition.parameters.results.get(context)[-1 * num_epoch_results:] # assign results from last *epoch* to learning_results self._composition.parameters.learning_results._set( self._composition.parameters.results.get(context)[-1 * num_epoch_results:], context) # return result of last *trial* (as usual for a call to run) + + if execution_mode == ExecutionMode.PyTorch and synch_with_pnl_options[MATRIX_WEIGHTS] == EPOCH: + # Copy weights at end of learning run + pytorch_rep.copy_weights_to_psyneulink(context) + return self._composition.parameters.results.get(context)[-1] class EarlyStopping(object): diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index d5cf790d205..a6da921c761 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -2433,7 +2433,8 @@ def _encode_memory(self, context=None): # Assign updated matrix to Projection self.retrieved_nodes[i].path_afferents[0].parameters.matrix.set(field_memories, context) - def learn(self, *args, **kwargs): + # 7/10/24 - FIX: WHY BOTHER WITH OVERRIDE IF NOTHING IS DONE: + def learn(self, *args, **kwargs)->list: return super().learn(*args, **kwargs) def _get_execution_mode(self, execution_mode): @@ -2469,5 +2470,6 @@ def infer_backpropagation_learning_pathways(self, execution_mode, context=None): raise EMCompositionError(f"EMComposition does not support learning with 'concatenate_keys'=True.") super().infer_backpropagation_learning_pathways(execution_mode, context=context) - def _update_learning_parameters(self, context): + def do_gradient_optimization(self, retain_in_pnl_options, context, optimization_num=None): + # 7/10/24 - MAKE THIS CONTEXT DEPENDENT: CALL super() IF BEING EXECUTED ON ITS OWN? pass diff --git a/psyneulink/library/compositions/pytorchEMcompositionwrapper.py b/psyneulink/library/compositions/pytorchEMcompositionwrapper.py index b2e3b915cf6..38c67017cac 100644 --- a/psyneulink/library/compositions/pytorchEMcompositionwrapper.py +++ b/psyneulink/library/compositions/pytorchEMcompositionwrapper.py @@ -57,12 +57,15 @@ def __init__(self, *args, **kwargs): self.retrieve_projection_wrappers = [self.projections_map[pnl_retrieve_proj] for pnl_retrieve_proj in pnl_retrieve_projs] - def execute_node(self, node, variable, context): + def execute_node(self, node, variable, optimization_num, context): """Override to handle storage of entry to memory_matrix by EMStorage Function""" if node is self.storage_node: - self.store_memory(variable, context) + # Only execute store after last optimization repetition for current mini-batch + # 7/10/24: FIX: MOVE PASSING OF THESE PARAMETERS TO context + if not (optimization_num + 1) % context.composition.parameters.optimizations_per_minibatch.get(context): + self.store_memory(variable, context) else: - super().execute_node(node, variable, context) + super().execute_node(node, variable, optimization_num, context) @property def memory(self)->Optional[torch.Tensor]: @@ -77,6 +80,9 @@ def memory(self)->Optional[torch.Tensor]: for j in range(num_fields)]) for i in range(memory_capacity)])) + # # MODIFIED 7/29/24 NEW: NEEDED FOR torch MPS SUPPORT + # @torch.jit.script_method + # MODIFIED 7/29/24 END def store_memory(self, memory_to_store, context): """Store variable in memory_matrix (parallel EMStorageMechanism._execute) @@ -108,13 +114,20 @@ def store_memory(self, memory_to_store, context): storage_prob = mech.parameters.storage_prob._get(context) # modulable, so use getter field_weights = mech.parameters.field_weights.get(context) # modulable, so use getter concatenation_node = mech.concatenation_node + # MODIFIED 7/29/24 OLD: num_match_fields = 1 if concatenation_node else len([i for i in mech.field_types if i==1]) + # # MODIFIED 7/29/24 NEW: NEEDED FOR torch MPS SUPPORT + # if concatenation_node: + # num_match_fields = 1 + # else: + # num_match_fields = 0 + # for i in mech.field_types: + # if i==1: + # num_match_fields += 1 + # MODIFIED 7/29/24 END # Find weakest memory (i.e., with lowest norm) - field_norms = torch.empty((len(memory),len(memory[0]))) - for row in range(len(memory)): - for col in range(len(memory[0])): - field_norms[row][col] = torch.linalg.norm(memory[row][col]) + field_norms = torch.linalg.norm(memory, dim=2) if field_weights is not None: field_norms *= field_weights row_norms = torch.sum(field_norms, axis=1) @@ -126,7 +139,7 @@ def store_memory(self, memory_to_store, context): # For match projections, get entry to store from value of sender of Projection matrix # (this is to accomodate concatenation_node) axis = 0 - entry_to_store = field_projection.sender.value + entry_to_store = field_projection.sender.output if concatenation_node is None: assert (entry_to_store == memory_to_store[i]).all(), \ f"PROGRAM ERROR: misalignment between inputs and fields for storing them" diff --git a/psyneulink/library/compositions/pytorchshowgraph.py b/psyneulink/library/compositions/pytorchshowgraph.py index 46d8ebbc6c2..6452ccf9f6a 100644 --- a/psyneulink/library/compositions/pytorchshowgraph.py +++ b/psyneulink/library/compositions/pytorchshowgraph.py @@ -36,9 +36,9 @@ class PytorchShowGraph(ShowGraph): in `PyTorch mode ` (also see `AutodiffComposition_PyTorch`). In this mode, any `nested Compositions ` are "flattened" (i.e., incorporated into the outermost Composition); also, any `Nodes `` designated as `exclude_from_gradient_calc - ` will be moved to the end of the graph (as they are executed + ` are moved to the end of the graph (as they are executed after the gradient calculation), and any Projections designated as `exclude_in_autodiff - ` will not be shown as they are not used in the gradient calculations at all. + ` are not shown as they are not used in the gradient calculations at all. Arguments --------- diff --git a/psyneulink/library/compositions/pytorchwrappers.py b/psyneulink/library/compositions/pytorchwrappers.py index f739cfc259c..48737faa556 100644 --- a/psyneulink/library/compositions/pytorchwrappers.py +++ b/psyneulink/library/compositions/pytorchwrappers.py @@ -8,50 +8,154 @@ # ********************************************* PytorchComponent ************************************************* """PyTorch wrappers for Composition, Mechanism, Projection, and Functions for use in AutodiffComposition""" +from psyneulink._typing import Optional, Literal, Union import graph_scheduler import torch import torch.nn as nn +import numpy as np + +from enum import Enum, auto from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination, PRODUCT, SUM from psyneulink.core.components.functions.stateful.integratorfunctions import IntegratorFunction +from psyneulink.core.components.functions.stateful import StatefulFunction +from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism from psyneulink.core.compositions.composition import NodeRole, CompositionInterfaceMechanism from psyneulink.library.compositions.pytorchllvmhelper import * from psyneulink.library.compositions.compiledoptimizer import AdamOptimizer, SGDOptimizer from psyneulink.library.compositions.compiledloss import MSELoss, CROSS_ENTROPYLoss -from psyneulink.core.globals.keywords import AFTER, BEFORE, DEFAULT_VARIABLE, Loss, NODE, TARGET_MECHANISM +from psyneulink.core.globals.keywords import (ADD, AFTER, ALL, BEFORE, DEFAULT_VARIABLE, EPOCH, INPUTS, + LEARNING_SCALE_LITERALS, Loss, LOSSES, MATRIX_WEIGHTS, + NODE, NODE_VALUES, NODE_VARIABLES, OUTPUTS, RESULTS, RUN, + TARGETS, TARGET_MECHANISM, ) from psyneulink.core.globals.context import Context, ContextFlags, handle_external_context -from psyneulink.core.globals.utilities import get_deepcopy_with_shared +from psyneulink.core.globals.utilities import convert_to_np_array, get_deepcopy_with_shared, convert_to_list from psyneulink.core.globals.log import LogCondition from psyneulink.core import llvm as pnlvm __all__ = ['PytorchCompositionWrapper', 'PytorchMechanismWrapper', 'PytorchProjectionWrapper'] +class DataTypeEnum(Enum): + + TRAINED_OUTPUTS = 0 + TARGETS = auto() + LOSSES = auto() + +# # MODIFIED 7/29/24 OLD: class PytorchCompositionWrapper(torch.nn.Module): +# # MODIFIED 7/29/24 NEW: NEEDED FOR torch MPS SUPPORT +# class PytorchCompositionWrapper(torch.jit.ScriptModule): +# MODIFIED 7/29/24 END """Wrapper for a Composition as a Pytorch Module - Set up parameters of PyTorch model & information required for forward computation + Class that wraps a `Composition ` as a PyTorch module. + + Two main responsibilities: + + 1) Set up parameters of PyTorch model & information required for forward computation: + Handle nested compositions (flattened in infer_backpropagation_learning_pathways): + Deal with Projections into and/or out of a nested Composition as shown in figure below: + (note: Projections in outer Composition to/from a nested Composition's CIMs are learnable, + and ones in a nested Composition from/to its CIMs are not) + [ OUTER ][ NESTED ][ OUTER ] + \\learnable// \\not learnable// \\not learnable// \\learnable// + ---> [Node] ----> [input_CIM] ~~~> [INPUT Node] ----> [OUTPUT Node] ~~~> [output_CIM] ----> [Node] ---> + sndr rcvr nested_rcvr nested_sndr sndr rcvr + ^--projection-->^ ^---projection-->^ + ^----PytorchProjectionWrapper---->^ ^----PytorchProjectionWrapper---->^ + ENTRY EXIT + + 2) Handle coordination of passing data and outcomes back to PsyNeuLink objects, handled by two main methods: + + - synch_with_psyneulink() + Copies matrix weights, node variables, node values, and/or autoutdiff results + at user-specified intervals (LearningScale: OPTIMIZATION_STEP, TRIAL, MINIBATCH, EPOCH, RUN); + these are specified by the user in the following arguments to run() or learn(): + synch_projection_matrices_with_torch=RUN, + synch_node_variables_with_torch=None, + synch_node_values_with_torch=RUN, + synch_results_with_torch=RUN, + and consolidated in the synch_with_pnl_options dict used by synch_with_psyneulink + + - retain_for_psyneulink() + Retains learning-specific data used and outcomes generated during execution of PyTorch model + (TRAINED_OUTPUT_VALUES, corresponding TARGETS and LOSSES), that are copied to PsyNeuLink + at the end of a call to learn(); these are specified by the user in the following arguments + to learn(): + retain_torch_trained_outputs=MINIBATCH, + retain_torch_targets=MINIBATCH, + retain_torch_losses=MINIBATCH, + and consolidated in the retain_in_pnl_options dict used by retain_for_psyneulink + + - Note: RESULTS is handled in an idiosyncratic way: it is specified along with the synchronization + parameters, since it is a value ordinarily generated in the execution of a Composition; + however it's helper parallels the retain_for_psyneulink helper methods, and it is called + from _update_results if TRIAL is specified, in order to integrate with the standard execution + of a Composition. + + Arguments + --------- - Handle nested compositions (flattened in infer_backpropagation_learning_pathways): - Deal with Projections into or out of a nested Composition as follows: - - [ OUTER ][ NESTED ][ OUTER ] - \\learnable// \\not learnable// \\not learnable// \\learnable// - ---> [Node] ----> [input_CIM] ~~~> [INPUT Node] ----> [OUTPUT Node] ~~~> [output_CIM] ----> [Node] ---> - sndr rcvr nested_rcvr nested_sndr sndr rcvr - ^--projection-->^ ^---projection-->^ - ^----PytorchProjectionWrapper---->^ ^----PytorchProjectionWrapper---->^ - ENTRY EXIT Attributes ---------- - nodes : List[PytorchMechanismWrapper] + _composition: Composition + `AutodiffComposition` being wrapped. + + wrapped_nodes : List[PytorchMechanismWrapper] + list of nodes in the PytorchCompositionWrapper corresponding to PyTorch modules. Generally these are + `Mechanisms ` wrapped in a `PytorchMechanismWrapper`, however, if the `AutodiffComposition` + being wrapped is itself a nested Composition, then the wrapped nodes are `PytorchCompositionWrapper` objects. + When the PyTorch model is executed these are "flattened" into a single PyTorch module, which can be visualized + using the AutodiffComposition's `show_graph ` method and setting its *show_pytorch* + argument to True (see `PytorchShowGraph` for additional information). + + nodes_map : Dict[Node: PytorchMechanismWrapper or PytorchCompositionWrapper] + maps psyneulink `Nodes ` to PytorchCompositionWrapper nodes. + + projection_wrappers = List[PytorchProjectionWrapper] + list of PytorchCompositionWrappers in the PytorchCompositionWrapper, each of which wraps a `Projection` + in the AutodiffComposition being wrapped. + + projections_map : Dict[Projection: PytorchProjectionWrapper] + maps `Projections ` in the AutodiffComposition being wrapped to `PytorchProjectionWrappers` in + the PytorchCompositionWrapper. + + _nodes_to_execute_after_gradient_calc : Dict[node : torch.Tensor] + contains nodes specified as `exclude_from_gradient_calc` as keys, and their current variable as values - projections_map : Dict[Projection, PytorchProjectionWrapper] - keys are Projections in the Composition being wrapped, and keys are the ProjectionWrappers to which they - are mapped (see above). + optimizer : torch + assigned by AutodffComposition after the wrapper is created, which passes the parameters to the optimizer + device : torch.device + device used to process torch Tensors in PyTorch modules + + params : nn.ParameterList() + list of PyTorch parameters (connection weight matrices) in the PyTorch model. + + minibatch_loss : torch.Tensor + accumulated loss over all trials (stimuli) within a batch. + + minibatch_loss_count : int + count of losses (trials) within batch, used to calculate average loss per batch. + + retained_results : List[ndarray] + list of the `output_values ` of the AutodiffComposition for ever trial executed + in a call to `run ` or `learn `. + + retained_trained_outputs : List[ndarray] + values of the trained `OUTPUT ` Node (i.e., ones associated with `TARGET `. + + retained_targets : List[ndarray] + values of the `TARGET `. + + retained_losses : List[ndarray] + losses per batch, epoch or run accumulated over a call to learn() """ + def __init__(self, composition, device, @@ -62,19 +166,38 @@ def __init__(self, from psyneulink.library.compositions.autodiffcomposition import AutodiffComposition + # Assign attributes self.name = f"PytorchCompositionWrapper[{composition.name}]" + self._composition = composition + self.device = device + self.optimizer = None # This gets assigned by self._composition after the wrapper is created, + # as the latter is needed to pass the parameters to the optimizer self.wrapped_nodes = [] # can be PytorchMechanismWrapper or PytorchCompositionWrapper - self.nodes_map = {} # maps Node (Mech or nested Comp) -> PytorchMechanismWrapper or PytorchCompositionWrapper + self.nodes_map = {} # maps Node (Mech or nested Comp) -> PytorchMechanismWrapper or PytorchCompositionWrapper + self._nodes_to_execute_after_gradient_calc = {} # Nodes requiring execution after Pytorch forward/backward pass self.projection_wrappers = [] # PytorchProjectionWrappers self.projections_map = {} # maps Projections -> PytorchProjectionWrappers self.params = nn.ParameterList() - self.device = device - self._composition = composition - self._nodes_to_execute_after_gradient_calc = {} # Nodes requiring execution after Pytorch forward/backward pass + self.minibatch_loss = torch.zeros(1, device=self.device).double() # Accumulated losses within a batch + self.minibatch_loss_count = 0 # Count of losses within batch + + # Data retained by the wrapper during execution and copied to pnl as specified by retain_for_psyneulink + self.retained_results = [] # Values of all output NODES + self.retained_trained_outputs = [] # Values of trained output NODES (i.e. associated with TARGETS) + self.retained_targets = [] # # Values of targets for all trials + self.retained_losses = [] # Losses per trial or batch accumulated over a run + + # The following is a list of methods called in retain_for_psyneulink, indexed by keywords using DataTypeEnum + # (this is constructed as a form of hash table for efficiency since that method can be called alot; + # it is constructed here to avoid doing so in the retain_for_psyneulink method itself) + self.retain_method = [None] * len(DataTypeEnum) + self.retain_method[DataTypeEnum.TRAINED_OUTPUTS.value] = self.retain_trained_outputs + self.retain_method[DataTypeEnum.TARGETS.value] = self.retain_targets + self.retain_method[DataTypeEnum.LOSSES.value] = self.retain_losses # Instantiate pytorch Mechanisms nodes = list(set(composition.nodes) - set(composition.get_nodes_by_role(NodeRole.LEARNING))) @@ -205,7 +328,7 @@ def _assign_input_nodes(nodes): self.execution_sets = [x for x in self.execution_sets if len(x) > 0] - # Flattening for forward() and AutodiffComposition._update_learning_parameters + # Flattening for forward() and AutodiffComposition.do_gradient_optimization # Flatten nested execution sets: nested_execution_sets = {} @@ -223,7 +346,7 @@ def _assign_input_nodes(nodes): # Flatten maps for node_wrapper in self.wrapped_nodes: if isinstance(node_wrapper, PytorchCompositionWrapper): - # For copying weights back to PNL in AutodiffComposition._update_learning_parameters + # For copying weights back to PNL in AutodiffComposition.do_gradient_optimization self.projections_map.update(node_wrapper.projections_map) # Not sure if this is needed, but just to be safe self.nodes_map.update(node_wrapper.nodes_map) @@ -231,7 +354,7 @@ def _assign_input_nodes(nodes): self.nodes_map = {k: v for k, v in self.nodes_map.items() if not isinstance(v, PytorchCompositionWrapper)} # Flatten projections so that they are all in the outer Composition and visible by _regenerate_paramlist - # needed for call to backward() in AutodiffComposition._update_learning_parameters + # needed for call to backward() in AutodiffComposition.do_gradient_optimization # FIX: MAYBE SHOULD DO THIS AS LIST IS CREATED ABOVE? self.projection_wrappers = list(self.projections_map.values()) @@ -336,7 +459,8 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): if node._mechanism in input_nodes: continue node_z_value = z_values[node] - activation_func_derivative = node._gen_llvm_execute_derivative_func(ctx, builder, state, params, node_z_value) + activation_func_derivative = node._gen_llvm_execute_derivative_func(ctx, builder, + state, params, node_z_value) error_val = builder.alloca(z_values[node].type.pointee) error_dict[node] = error_val @@ -351,7 +475,9 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): node_target = builder.gep(model_input, [ctx.int32_ty(0), ctx.int32_ty(target_idx)]) # 2) Lookup desired output value - node_output = builder.gep(model_output, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(node._idx), ctx.int32_ty(0)]) + node_output = builder.gep(model_output, [ctx.int32_ty(0), ctx.int32_ty(0), + ctx.int32_ty(node._idx), + ctx.int32_ty(0)]) tmp_loss = loss.gen_inject_lossfunc_call( ctx, builder, loss_fn, node_output, node_target) @@ -404,17 +530,24 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): continue for proj in node.afferents: # get a_(l-1) - afferent_node_activation = builder.gep(model_output, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(proj.sender._idx), ctx.int32_ty(0)]) + afferent_node_activation = builder.gep(model_output, [ctx.int32_ty(0), + ctx.int32_ty(0), + ctx.int32_ty(proj.sender._idx), + ctx.int32_ty(0)]) # get dimensions of weight matrix weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, state, params) - pnlvm.helpers.printf_float_matrix(builder, weights_llvmlite, prefix= f"{proj.sender._mechanism} -> {proj.receiver._mechanism}\n", override_debug=False) + pnlvm.helpers.printf_float_matrix(builder, weights_llvmlite, + prefix= f"{proj.sender._mechanism} -> {proj.receiver._mechanism}\n", + override_debug=False) # update delta_W node_delta_w = builder.gep(delta_w, [ctx.int32_ty(0), ctx.int32_ty(proj._idx)]) dim_x, dim_y = proj.matrix.shape - with pnlvm.helpers.for_loop_zero_inc(builder, ctx.int32_ty(dim_x), "weight_update_loop_outer") as (b1, weight_row): - with pnlvm.helpers.for_loop_zero_inc(b1, ctx.int32_ty(dim_y), "weight_update_loop_inner") as (b2, weight_column): + with pnlvm.helpers.for_loop_zero_inc(builder, ctx.int32_ty(dim_x), + "weight_update_loop_outer") as (b1, weight_row): + with pnlvm.helpers.for_loop_zero_inc(b1, ctx.int32_ty(dim_y), + "weight_update_loop_inner") as (b2, weight_column): a_val = b2.load(b2.gep(afferent_node_activation, [ctx.int32_ty(0), weight_row])) d_val = b2.load(b2.gep(err_val, @@ -469,7 +602,7 @@ def _get_compiled_optimizer(self): return optimizer @handle_external_context() - def forward(self, inputs, context=None)->dict: + def forward(self, inputs, optimization_rep, context=None)->dict: """Forward method of the model for PyTorch and LLVM modes Returns a dictionary {output_node:value} of output values for the model """ @@ -480,7 +613,7 @@ def forward(self, inputs, context=None)->dict: # If node is nested Composition (wrapped in PytorchCompositionWrapper), # calls its forward method recursively if isinstance(node, PytorchCompositionWrapper): - node.forward(inputs=None) + node.forward(inputs=None, optimization_rep=optimization_rep, context=context) continue elif node._is_input or node._is_bias: @@ -524,6 +657,7 @@ def forward(self, inputs, context=None)->dict: if node.exclude_from_gradient_calc: if node.exclude_from_gradient_calc == AFTER: + # Cache variable for later exce execution self._nodes_to_execute_after_gradient_calc[node] = variable continue elif node.exclude_from_gradient_calc == BEFORE: @@ -533,14 +667,15 @@ def forward(self, inputs, context=None)->dict: (f'PROGRAM ERROR: Bad assignment to {node.name}.exclude_from_gradient_calc: ' f'{node.exclude_from_gradient_calc}; only {AFTER} is currently supported') - # Execute the node using wrapper_type for Composition to which it belongs + # Execute the node using composition_wrapper_owner for Composition wrapper to which it belongs # Note: this is to support overrides of execute_node method by subclasses (such as in EMComposition) - node.wrapper_type.execute_node(node, variable, context) + node._composition_wrapper_owner.execute_node(node, variable, optimization_rep, context) + # 7/20/24 FIX: CACHE get_nested_output_nodes_at_all_levels() IN composition # Add entry to outputs dict for OUTPUT Nodes of pytorch representation # note: these may be different than for actual Composition, as they are flattened if (node._mechanism in self._composition.get_nested_output_nodes_at_all_levels()): - outputs[node._mechanism] = node.value + outputs[node._mechanism] = node.output # NOTE: Context source needs to be set to COMMAND_LINE to force logs to update independently of timesteps # if not self._composition.is_nested: @@ -552,44 +687,193 @@ def forward(self, inputs, context=None)->dict: return outputs - def execute_node(self, node, variable, context=None): + def execute_node(self, node, variable, optimization_num, context=None): """Execute node and store the result in the node's value attribute - Implemented as method (and includes context as arg) so that it can be overridden - by subclasses of PytorchCompositionWrapper + Implemented as method (and includes optimization_rep and context as args) + so that it can be overridden by subclasses of PytorchCompositionWrapper """ value = node.execute(variable, context) - assert 'DEBUGGING BREAK POINT' - def detach_all(self): - for projection in self.projections_map.values(): - projection.matrix.detach() + def synch_with_psyneulink(self, + synch_with_pnl_options:dict, + current_condition:LEARNING_SCALE_LITERALS, + context:Context, + params:Optional[list]=None): + """Copy weights, values, and/or results from Pytorch to PsyNeuLink at specified junctures + params can be used to restrict copy to a specific (set of) param(s). If params is not specified, all are copied; + """ + # 8/7/24: FIX - THIS COULD BE MADE TO BE MORE EFFICIENT ALONG THE LINES OF retain_for_psyneulink() + # AND REFACTORED TO USE DICT WITH DATATYPES AS KEYS AND PARAMS AS VALUES; + all = [MATRIX_WEIGHTS, NODE_VARIABLES, NODE_VALUES, RESULTS] + params = convert_to_list(params) or all + illegal_params = [param for param in params if param not in all] + assert not illegal_params, \ + f"PROGRAM ERROR: Illegal attributes ({' ,'.join(illegal_params)}) specified in call to synch_with_psyneulink" + + if MATRIX_WEIGHTS in params and synch_with_pnl_options[MATRIX_WEIGHTS] == current_condition: + self.copy_weights_to_psyneulink(context) + + if NODE_VARIABLES in params and synch_with_pnl_options[NODE_VARIABLES] == current_condition: + self.copy_node_variables_to_psyneulink(ALL, context) + + if NODE_VALUES in params and synch_with_pnl_options[NODE_VALUES] == current_condition: + self.copy_node_values_to_psyneulink(ALL, context) + + if RESULTS in params and synch_with_pnl_options[RESULTS] == current_condition: + self.copy_results_to_psyneulink(current_condition, context) def copy_weights_to_psyneulink(self, context=None): for projection, pytorch_rep in self.projections_map.items(): - projection.parameters.matrix._set( - pytorch_rep.matrix.detach().cpu().numpy(), context) - projection.parameters.matrix._set( - pytorch_rep.matrix.detach().cpu().numpy(), context) - projection.parameter_ports['matrix'].parameters.value._set( - pytorch_rep.matrix.detach().cpu().numpy(), context) + matrix = pytorch_rep.matrix.detach().cpu().numpy() + projection.parameters.matrix._set(matrix, context) + projection.parameters.matrix._set(matrix, context) + projection.parameter_ports['matrix'].parameters.value._set(matrix, context) def log_weights(self): for proj_wrapper in self.projection_wrappers: proj_wrapper.log_matrix() + def copy_node_variables_to_psyneulink(self, nodes:Optional[Union[list,Literal[ALL, INPUTS]]]=ALL, context=None): + """Copy input to Pytorch nodes to variable of AutodiffComposition nodes. + IMPLEMENTATION NOTE: list included in nodes arg to allow for future specification of specific nodes to copy + """ + if nodes == ALL: + nodes = self.nodes_map.items() + for pnl_node, pytorch_node in nodes: + # First get variable in numpy format + if isinstance(pytorch_node.input, list): + variable = np.array([val.detach().cpu().numpy() for val in pytorch_node.input], dtype=object) + else: + variable = pytorch_node.input.detach().cpu().numpy() + # Set pnl_node's value to value + pnl_node.parameters.variable._set(variable, context) + + def copy_node_values_to_psyneulink(self, nodes:Optional[Union[list,Literal[ALL, OUTPUTS]]]=ALL, context=None): + """Copy output of Pytorch nodes to value of AutodiffComposition nodes. + IMPLEMENTATION NOTE: list included in nodes arg to allow for future specification of specific nodes to copy + """ + if nodes == ALL: + nodes = self.nodes_map.items() + # elif nodes == OUTPUTS: + # nodes = [(node, self.nodes_map[node]) for node in self._composition.get_output_nodes()] + + def update_autodiff_all_output_values(): + """Update autodiff's output_values by executing its output_CIM's with pytorch_rep all_output_values""" + if self.all_output_values: + self._composition.output_CIM.execute(self.all_output_values, context=context) + + # Allow selective updating of just autodiff.output_values if specified + if nodes == OUTPUTS: + update_autodiff_all_output_values() + return + + for pnl_node, pytorch_node in nodes: + # Update each node's value with the output of the corresponding wrappter in the PyTorch representation + if pytorch_node.output is None: + assert pytorch_node.exclude_from_gradient_calc, \ + (f"PROGRAM ERROR: Value of PyTorch wrapper for {pnl_node.name} is None during forward pass, " + f"but it is not excluded from gradient calculation.") + continue + # First get value in numpy format + if isinstance(pytorch_node.output, list): + value = np.array([val.detach().cpu().numpy() for val in pytorch_node.output], dtype=object) + else: + value = pytorch_node.output.detach().cpu().numpy() + + # Set pnl_node's value to value + pnl_node.parameters.value._set(value, context) + + # If pnl_node's function is Stateful, assign value to its previous_value parameter + # so that if Python implementation is run it picks up where PyTorch execution left off + if isinstance(pnl_node.function, StatefulFunction): + pnl_node.function.parameters.previous_value._set(value, context) + # Do same for integrator_function of TransferMechanism if it is in integrator_mode + if isinstance(pnl_node, TransferMechanism) and pnl_node.integrator_mode: + pnl_node.integrator_function.parameters.previous_value._set(pytorch_node.integrator_previous_value, + context) + # Finally, update the output_values of the autodiff Composition by executing its output_CIM + update_autodiff_all_output_values() + def log_values(self): for node_wrapper in [n for n in self.wrapped_nodes if not isinstance(n, PytorchCompositionWrapper)]: node_wrapper.log_value() + def copy_results_to_psyneulink(self, current_condition, context=None): + """Copy outputs of Pytorch forward() to AutodiffComposition.results attribute.""" + # IMPLEMENTATION NOTE: no need to do amything for TRIAL or MINIBATCH, + # as Composition's _update_results() method is getting called to do that locally + if current_condition in {EPOCH, RUN}: + self._composition.parameters.results._set(convert_to_np_array(self.retained_results), context) + + def retain_for_psyneulink(self, + data:dict, + retain_in_pnl_options:dict, + context): + """Store outputs, targets, and losses from Pytorch execution for copying to PsyNeuLink at end of learn(). + Arguments + --------- + data : dict + specifies local data available to retain (for copying to pnl at end of run; + keys must be one or more of the keywords OUTPUTS, TARGETS, or LOSSES; value must be a torch.Tensor + retain_in_pnl_options : dict + specifies which data the user has requested be retained (and copied to pnl at end of run) + keys must be OUTPUTS, TARGETS, or LOSSES; value must be a LearningScale.name or None (which suppresses copy) + Note: does not actually copy data to pnl; that is done by _getter methods for the relevant autodiff Parameters + """ + try: + for data_type, data_val in data.items(): + try: + if retain_in_pnl_options[data_type]: + retain_method_idx = DataTypeEnum._member_map_[data_type.upper()].value + self.retain_method[retain_method_idx](data_val) + except KeyError: + assert False, \ + (f"PROGRAM ERROR: No entry for {data_type} found in retain_in_pnl_options " + f"in call to retain_for_psyneulink()") + except KeyError: + assert False, \ + (f"PROGRAM ERROR: Invalid key(s) specified in call to retain_for_psyneulink: {list(data.keys())}") + + def retain_results(self, results:list): + """Track outputs and copy to AutodiffComposition.pytorch_outputs at end of learn().""" + if len(results): + self.retained_results.append(results) + + def retain_trained_outputs(self, trained_outputs:list): + """Track outputs and copy to AutodiffComposition.pytorch_outputs at end of learn().""" + self.retained_trained_outputs.append(trained_outputs) + + def retain_targets(self, targets:list): + """Track targets and copy to AutodiffComposition.pytorch_targets at end of learn().""" + self.retained_targets.append(targets) + + def retain_losses(self, loss:torch.Tensor): + """Track losses and copy to AutodiffComposition.pytorch_targets at end of learn().""" + self.retained_losses.append(loss.detach().cpu().numpy().copy().tolist()) + + def detach_all(self): + for projection in self.projections_map.values(): + projection.matrix.detach() + class PytorchMechanismWrapper(): """Wrapper for a Mechanism in a PytorchCompositionWrapper + These comprise nodes of the PytorchCompositionWrapper, and generally correspond to modules of a Pytorch model. Attributes ---------- + _mechanism : Mechanism + the PsyNeuLink `Mechanism` being wrapped. + + afferents : List[PytorchProjectionWrapper] + list of `PytorchProjectionWrapper` objects that project to the PytorchMechanismWrapper. + + input : torch.Tensor + most recent input to the PytorchMechanismWrapper. + function : _gen_pytorch_fct - Pytorch version of the Mechanism's function assigned in __init__ + Pytorch version of the Mechanism's function assigned in __init__. integrator_function : _gen_pytorch_fct Pytorch version of the Mechanism's integrator_function assigned in __init__ if mechanism @@ -597,17 +881,27 @@ class PytorchMechanismWrapper(): that is used to determine whether to execute the integrator_function first, and use its result as the input to its function. + output : torch.Tensor + most recent output of the PytorchMechanismWrapper. + + efferents : List[PytorchProjectionWrapper] + list of `PytorchProjectionWrapper` objects that project from the PytorchMechanismWrapper. + exclude_from_gradient_calc : bool or str[BEFORE | AFTER]: False used to prevent a node from being included in the Pytorch gradient calculation by excluding it in calls to the forward() and backward(). If AFTER is specified, the node is executed after at the end of the `update_learning_parameters` method. BEFORE is not currently supported """ def __init__(self, - mechanism, # Mechanism to be wrapped - composition, # Composition to which node belongs (used for execution of nested Compositions) - component_idx, # index of the Mechanism in the Composition - device, # needed for Pytorch + mechanism, # Mechanism to be wrapped + composition_wrapper, # Composition wrapper to which node belongs (for executing nested Compositions) + component_idx, # index of the Mechanism in the Composition + device, # needed for Pytorch context=None): + # # MODIFIED 7/10/24 NEW: NEEDED FOR torch MPS SUPPORT + # super().__init__() + # MODIFIED 7/10/24 END + self.name = f"PytorchMechanismWrapper[{mechanism.name}]" self._mechanism = mechanism self._idx = component_idx self._context = context @@ -615,15 +909,17 @@ def __init__(self, self._is_bias = False self._curr_sender_value = None # Used to assign initializer or default if value == None (i.e., not yet executed) self.exclude_from_gradient_calc = False # Used to execute node before or after forward/backward pass methods - self.wrapper_type = composition + self._composition_wrapper_owner = composition_wrapper + + self.input = None + self.output = None - self.name = f"PytorchMechanismWrapper[{mechanism.name}]" - self.afferents = [] - self.efferents = [] if mechanism.parameters.has_initializers._get(context) and mechanism.parameters.value.initializer: - self.default_value = mechanism.parameters.value.initializer.get(context) + self.default_output = mechanism.parameters.value.initializer.get(context) else: - self.default_value = mechanism.defaults.value + self.default_output = mechanism.defaults.value + self.afferents = [] + self.efferents = [] from psyneulink.core.components.functions.function import FunctionError from psyneulink.library.compositions.autodiffcomposition import AutodiffCompositionError @@ -640,8 +936,6 @@ def __init__(self, except: raise AutodiffCompositionError(f"Function {pnl_fct} is not currently supported by AutodiffComposition") - self.value = None - self._target_mechanism = None def add_efferent(self, efferent): """Add ProjectionWrapper for efferent from MechanismWrapper. @@ -667,9 +961,9 @@ def aggregate_afferents(self, port=None): f"PROGRAM ERROR: No afferents found for '{self._mechanism.name}' in AutodiffComposition" for proj_wrapper in self.afferents: - curr_val = proj_wrapper.sender.value + curr_val = proj_wrapper.sender.output if curr_val is not None: - proj_wrapper._curr_sender_value = proj_wrapper.sender.value[proj_wrapper._value_idx] + proj_wrapper._curr_sender_value = proj_wrapper.sender.output[proj_wrapper._value_idx] else: proj_wrapper._curr_sender_value = torch.tensor(proj_wrapper.default_value) @@ -695,7 +989,7 @@ def aggregate_afferents(self, port=None): def execute(self, variable, context): """Execute Mechanism's _gen_pytorch version of function on variable. - Enforce result to be 2d, and assign to self.value + Enforce result to be 2d, and assign to self.output """ def execute_function(function, variable, fct_has_mult_args=False, is_combination_fct=False): """Execute _gen_pytorch_fct on variable, enforce result to be 2d, and return it @@ -705,8 +999,8 @@ def execute_function(function, variable, fct_has_mult_args=False, is_combination if ((isinstance(variable, list) and len(variable) == 1) or (isinstance(variable, torch.Tensor) and len(variable.squeeze(0).shape) == 1) or isinstance(self._mechanism.function, LinearCombination)): - # Enforce 2d on value of MechanismWrapper (using unsqueeze) - # for single InputPort or if CombinationFunction (which reduces output to single item from multi-item input) + # Enforce 2d on value of MechanismWrapper (using unsqueeze) for single InputPort + # or if CombinationFunction (which reduces output to single item from multi-item input) if isinstance(variable, torch.Tensor): variable = variable.squeeze(0) return function(variable).unsqueeze(0) @@ -730,16 +1024,14 @@ def execute_function(function, variable, fct_has_mult_args=False, is_combination fct_has_mult_args=True) # Keep track of previous value in Pytorch node for use in next forward pass self.integrator_previous_value = variable + + self.input = variable + # Compute main function of mechanism and return result from psyneulink.core.components.functions.nonstateful.combinationfunctions import CombinationFunction - self.value = execute_function(self.function, variable, + self.output = execute_function(self.function, variable, is_combination_fct=isinstance(self._mechanism.function, CombinationFunction)) - # Assign previous_value back to integrator_function of pnl node - # so that if Python implementation is run it picks up where PyTorch execution left off - if isinstance(self._mechanism.function, IntegratorFunction): - self._mechanism.integrator_function.parameters.previous_value._set(self.value, context) - - return self.value + return self.output def _gen_llvm_execute(self, ctx, builder, state, params, mech_input, data): mech_func = ctx.import_llvm_function(self._mechanism) @@ -761,13 +1053,16 @@ def _gen_llvm_execute(self, ctx, builder, state, params, mech_input, data): mech_input, mech_output]) - pnlvm.helpers.printf_float_array(builder, builder.gep(mech_output, [ctx.int32_ty(0), ctx.int32_ty(0)]), prefix=f"{self} output:\n", override_debug=False) + pnlvm.helpers.printf_float_array(builder, + builder.gep(mech_output, [ctx.int32_ty(0), ctx.int32_ty(0)]), + prefix=f"{self} output:\n", + override_debug=False) return mech_output def log_value(self): if self._mechanism.parameters.value.log_condition != LogCondition.OFF: - detached_value = self.value.detach().cpu().numpy() + detached_value = self.output.detach().cpu().numpy() self._mechanism.output_port.parameters.value._set(detached_value, self._context) self._mechanism.parameters.value._set(detached_value, self._context) @@ -825,6 +1120,19 @@ class PytorchProjectionWrapper(): actually being learned, and that projection will be referenced in the `PytorchCompositionWrapper.projections_map` (see `PytorchCompositionWrapper` for descriptive figure and additional details); the actual projection is stored in pnl_proj. + + Attributes + ---------- + + _projection : Projection + PsyNeuLink `Projection` being wrapped. + + sender : PytorchMechanismWrapper + the PytorchMechanismWrapper node from which the PytorchProjectionWrapper receives its variable. + + receiver : PytorchMechanismWrapper + the PytorchMechanismWrapper node from which the PytorchProjectionWrapper sends it value. + """ def __init__(self, projection, @@ -914,9 +1222,15 @@ def _gen_llvm_execute(self, ctx, builder, state, params, data): output_vec = gen_inject_vxm(ctx, builder, input_vec, proj_matrix) - pnlvm.helpers.printf_float_array(builder, input_vec, prefix=f"{self.sender._mechanism} -> {self.receiver._mechanism} input:\n", override_debug=False) - pnlvm.helpers.printf_float_matrix(builder, proj_matrix, prefix=f"{self.sender._mechanism} -> {self.receiver._mechanism} mat:\n", override_debug=False) - pnlvm.helpers.printf_float_array(builder, output_vec, prefix=f"{self.sender._mechanism} -> {self.receiver._mechanism} output:\n", override_debug=False) + pnlvm.helpers.printf_float_array(builder, input_vec, + prefix=f"{self.sender._mechanism} -> {self.receiver._mechanism} input:\n", + override_debug=False) + pnlvm.helpers.printf_float_matrix(builder, proj_matrix, + prefix=f"{self.sender._mechanism} -> {self.receiver._mechanism} mat:\n", + override_debug=False) + pnlvm.helpers.printf_float_array(builder, output_vec, + prefix=f"{self.sender._mechanism} -> {self.receiver._mechanism} output:\n", + override_debug=False) return output_vec diff --git a/tests/composition/test_autodiffcomposition.py b/tests/composition/test_autodiffcomposition.py index 2c561936bd0..858390f6581 100644 --- a/tests/composition/test_autodiffcomposition.py +++ b/tests/composition/test_autodiffcomposition.py @@ -27,6 +27,7 @@ # or override functions in Composition def _single_learn_results(composition, *args, **kwargs): + kwargs['synch_results_with_torch'] = 'run' composition.learn(*args, **kwargs) return composition.learning_results @@ -607,6 +608,8 @@ def test_pytorch_equivalence_with_autodiff_composition(self, autodiff_mode): D_h = nh D_o = nf * nd + np.random.seed(0) + wih = np.random.rand(D_i, D_h) * 0.02 - 0.01 wch = np.random.rand(D_c, D_h) * 0.02 - 0.01 wco = np.random.rand(D_c, D_o) * 0.02 - 0.01 @@ -617,7 +620,7 @@ def test_pytorch_equivalence_with_autodiff_composition(self, autodiff_mode): learning_rate = 100 il = TransferMechanism(size=D_i, name='input') - cl = TransferMechanism(size=D_c, name='control') + cl = TransferMechanism(size=D_c, name='task') hl = TransferMechanism(size=D_h, name='hidden', function=Logistic(bias=-2)) ol = TransferMechanism(size=D_o, name='output', @@ -710,7 +713,7 @@ def test_pytorch_equivalence_with_autodiff_composition(self, autodiff_mode): np.testing.assert_allclose(output,comparator, atol=1e-6) - def test_pytorch_equivalence_with_autodiff_training_disabled_on_proj(self): + def test_pytorch_equivalence_with_autodiff_forward_disabled_on_proj(self): iSs = np.array( [np.array([0.47360805, 0.8009108, 0.5204775, 0.53737324, 0.7586156, 0.1059076, 0.9025985, 0.44994998, 0.61306345, 0.75068617, @@ -831,7 +834,7 @@ def test_pytorch_equivalence_with_autodiff_training_disabled_on_proj(self): learning_rate = 100 il = TransferMechanism(size=D_i, name='input') - cl = TransferMechanism(size=D_c, name='control') + cl = TransferMechanism(size=D_c, name='task') hl = TransferMechanism(size=D_h, name='hidden', function=Logistic(bias=-2)) ol = TransferMechanism(size=D_o, name='output', @@ -871,10 +874,8 @@ def test_pytorch_equivalence_with_autodiff_training_disabled_on_proj(self): min_delta=min_delt, execution_mode=pnl.ExecutionMode.PyTorch, ) - - print(mnet.parameters.results.get(mnet)) mnet.run( - inputs=input_set['inputs'], + inputs=input_set['inputs'] ) output = np.array(mnet.parameters.results.get(mnet)[-15:]).reshape(225) @@ -3578,6 +3579,9 @@ def test_autodiff_logging(self): xor.learn(inputs={"inputs": {xor_in: xor_inputs}, "targets": {xor_out: xor_targets}, "epochs": num_epochs}, + synch_projection_matrices_with_torch=pnl.MINIBATCH, + synch_results_with_torch=pnl.MINIBATCH, + # synch_results_with_torch=pnl.RUN, execution_mode=pnl.ExecutionMode.PyTorch) exec_id = xor.default_execution_id @@ -3661,7 +3665,7 @@ def test_autodiff_loss_tracking(self): # and minibatch_size is 1, then there should be num_epochs * num_minibatches = num_epochs * 4 # total entries expected_loss_length = num_epochs * len(xor_inputs) - assert len(losses) == expected_loss_length + assert len(xor.torch_losses) == expected_loss_length # test clearing ad losses xor.clear_losses(context=xor) @@ -3934,8 +3938,8 @@ def test_cross_entropy_loss(self): # classes = torch.Tensor([2, 1]) # target = torch.Tensor([1]) # # Equation for loss taken from https://pytorch.org/docs/stable/nn.html#torch.nn.CrossEntropyLoss - # assert np.allclose(adc.loss(classes, target).detach().numpy(), -1 + np.log(np.exp(2) + np.exp(1))) - # assert np.allclose(adc.loss(output, target).detach().numpy(), -1 + np.log(np.exp(2) + np.exp(1))) + # assert np.allclose(adc.loss_function(classes, target).detach().numpy(), -1 + np.log(np.exp(2) + np.exp(1))) + # assert np.allclose(adc.loss_function(output, target).detach().numpy(), -1 + np.log(np.exp(2) + np.exp(1))) # Current implementation uses one-hot target specification: output = [2,1] @@ -3957,6 +3961,6 @@ def test_cross_entropy_loss(self): output = torch.Tensor(output) target = torch.Tensor(target) - ce_torch = adc.loss(output, target).detach().numpy() + ce_torch = adc.loss_function(output, target).detach().numpy() np.testing.assert_allclose(ce_numpy, ce_torch) diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index b42a8eab28b..ecfc2f2ef19 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -480,11 +480,11 @@ def test_multiple_trials_concatenation_and_storage_node(self, exec_mode, concate assert "EMComposition does not support learning with 'concatenate_keys'=True." in str(error.value) else: - if exec_mode == pnl.ExecutionMode.Python: - # FIX: Not sure why Python mode reverses last two rows/entries (dict issue?) - expected_memory = [[[0.15625, 0.3125, 0.46875], [0.171875, 0.328125, 0.484375]], - [[400., 500., 600.], [444., 555., 666.]], - [[25., 50., 75.], [27.75, 55.5, 83.25]], - [[2.5, 3.125, 3.75 ], [2.5625, 3.1875, 3.8125]]] + # if exec_mode == pnl.ExecutionMode.Python: + # # FIX: Not sure why Python mode reverses last two rows/entries (dict issue?) + expected_memory = [[[0.15625, 0.3125, 0.46875], [0.171875, 0.328125, 0.484375]], + [[400., 500., 600.], [444., 555., 666.]], + [[25., 50., 75.], [27.75, 55.5, 83.25]], + [[2.5, 3.125, 3.75 ], [2.5625, 3.1875, 3.8125]]] em.learn(inputs=inputs, execution_mode=exec_mode) np.testing.assert_equal(em.memory, expected_memory) diff --git a/tests/composition/test_report.py b/tests/composition/test_report.py index b6a9ecccb54..cdf4cc36e28 100644 --- a/tests/composition/test_report.py +++ b/tests/composition/test_report.py @@ -588,6 +588,8 @@ def test_autodiff_report(self): xor.learn(inputs= training_inputs, + synch_node_variables_with_torch=pnl.TRIAL, + synch_node_values_with_torch=pnl.TRIAL, report_output=ReportOutput.OFF, report_progress=ReportProgress.ON, report_to_devices=ReportDevices.DIVERT, @@ -627,9 +629,10 @@ def test_autodiff_report(self): report_output=ReportOutput.FULL, report_progress=ReportProgress.ON, report_to_devices=ReportDevices.DIVERT, + synch_node_values_with_torch='trial', execution_mode=pnl.ExecutionMode.PyTorch) actual_report = xor.rich_diverted_reports - expected_report = '\n ╔══ EXECUTION OF autodiff_composition ═══╗\n ║ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 0 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9933057795354014]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 1 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.999331787548446]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 2 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993317875516309]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 3 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998504229552773]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 4 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9933055512239266]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 5 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993317539824547]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 6 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993317539856401]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 7 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998504138991838]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 8 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9933053228968025]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 9 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993317204136144]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 10 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993317204168003]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 11 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.999850404842228]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ╚═════════════════════════════════════════╝\n\nautodiff_composition: Trained 12 trials\n' + expected_report = '\n ╔══ EXECUTION OF autodiff_composition ═══╗\n ║ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 0 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998504316537016]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 1 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9933057795354014]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 2 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.999331787548446]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 3 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993317875516309]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 4 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998504229552773]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 5 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9933055512239266]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 6 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993317539824547]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 7 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993317539856401]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 8 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998504138991838]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 9 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9933053228968025]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 10 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993317204136144]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 11 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993317204168003]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ╚═════════════════════════════════════════╝\n\nautodiff_composition: Trained 12 trials\n' assert actual_report == expected_report xor.run(inputs={xor_in:xor_inputs}, @@ -667,7 +670,7 @@ def test_autodiff_report(self): execution_mode=pnl.ExecutionMode.PyTorch) actual_report = xor.rich_diverted_reports # expected_report = '\n ╔══ EXECUTION OF autodiff_composition ═══╗\n ║ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 0 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.5]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9933044094317858]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 1 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.5]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993315861097587]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 2 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.5]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993315861129465]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 3 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.5]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503686057807]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 4 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.5]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9933041810263933]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 5 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.5]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.999331552526669]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 6 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.5]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993315525298574]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 7 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.5]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503595445122]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 8 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.5]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9933039526053421]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 9 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.5]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993315189407287]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 10 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.5]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993315189439175]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 11 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.5]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503504823807]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ╚═════════════════════════════════════════╝\n\n' - expected_report = '\n ╔══ EXECUTION OF autodiff_composition ═══╗\n ║ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 0 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9933044094317858]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 1 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993315861097587]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 2 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993315861129465]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 3 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503686057807]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 4 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9933041810263933]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 5 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.999331552526669]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 6 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993315525298574]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 7 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503595445122]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 8 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9933039526053421]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 9 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993315189407287]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 10 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9993315189439175]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 11 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503504823807]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ╚═════════════════════════════════════════╝\n\n' + expected_report = '\n ╔══ EXECUTION OF autodiff_composition ═══╗\n ║ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 0 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503773091209]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 1 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503773091209]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 2 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503773091209]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 3 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503773091209]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 4 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503773091209]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 5 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503773091209]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 6 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503773091209]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 7 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503773091209]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 8 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503773091209]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 9 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503773091209]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 10 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503773091209]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ║ ┏━ autodiff_composition: Trial 11 ━┓ ║\n ║ ┃ ┃ ║\n ║ ┃ input: [[1.0, 1.0], [0.0]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┃ result: [[0.9998503773091209]] ┃ ║\n ║ ┃ ┃ ║\n ║ ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛ ║\n ║ ║\n ╚═════════════════════════════════════════╝\n\n' assert actual_report == expected_report xor.run(inputs={xor_in:xor_inputs}, From c52c3d1754aa1e5cb413bd03723e01996de8ca9e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 19 Aug 2024 18:07:38 -0400 Subject: [PATCH 310/410] llvm/LLVMBinaryFunction: Enforce contiguous arrays as Numpy inputs Signed-off-by: Jan Vesely --- psyneulink/core/llvm/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 5a9788102f4..b4e082febaf 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -154,7 +154,7 @@ def __init__(self, name: str, *, ctype_ptr_args=()): for i, arg in enumerate(self.np_arg_dtypes): if i not in ctype_ptr_args and self.byref_arg_types[i] is not None: - args[i] = np.ctypeslib.ndpointer(dtype=arg.base, shape=arg.shape) + args[i] = np.ctypeslib.ndpointer(dtype=arg.base, shape=arg.shape, flags='C_CONTIGUOUS') middle = time.perf_counter() self.__c_func_type = ctypes.CFUNCTYPE(return_type, *args) From bcbc500e456c5e14dc6254c216aae221d99e4bc3 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 20 Aug 2024 13:02:14 -0400 Subject: [PATCH 311/410] llvm/LLVMBinaryFunction: Add support for dynamically sized inputs Numpy ndarray only tests exact match for provided attributes. Replace shape check with ndim for dynamically sized arguments. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/__init__.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index b4e082febaf..34e4ade75cf 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -123,7 +123,7 @@ def _llvm_build(target_generation=_binary_generation + 1): class LLVMBinaryFunction: - def __init__(self, name: str, *, ctype_ptr_args=()): + def __init__(self, name: str, *, ctype_ptr_args:tuple=(), dynamic_size_args:tuple=()): self.name = name self.__c_func = None @@ -154,7 +154,10 @@ def __init__(self, name: str, *, ctype_ptr_args=()): for i, arg in enumerate(self.np_arg_dtypes): if i not in ctype_ptr_args and self.byref_arg_types[i] is not None: - args[i] = np.ctypeslib.ndpointer(dtype=arg.base, shape=arg.shape, flags='C_CONTIGUOUS') + if i in dynamic_size_args: + args[i] = np.ctypeslib.ndpointer(dtype=arg.base, ndim=len(arg.shape) + 1, flags='C_CONTIGUOUS') + else: + args[i] = np.ctypeslib.ndpointer(dtype=arg.base, shape=arg.shape, flags='C_CONTIGUOUS') middle = time.perf_counter() self.__c_func_type = ctypes.CFUNCTYPE(return_type, *args) @@ -233,14 +236,14 @@ def np_buffer_for_arg(self, arg_num, *, extra_dimensions=(), fill_value=np.nan): @staticmethod @functools.lru_cache(maxsize=32) - def from_obj(obj, *, tags:frozenset=frozenset(), ctype_ptr_args:tuple=()): + def from_obj(obj, *, tags:frozenset=frozenset(), ctype_ptr_args:tuple=(), dynamic_size_args:tuple=()): name = LLVMBuilderContext.get_current().gen_llvm_function(obj, tags=tags).name - return LLVMBinaryFunction.get(name, ctype_ptr_args=ctype_ptr_args) + return LLVMBinaryFunction.get(name, ctype_ptr_args=ctype_ptr_args, dynamic_size_args=dynamic_size_args) @staticmethod @functools.lru_cache(maxsize=32) - def get(name: str, *, ctype_ptr_args:tuple=()): - return LLVMBinaryFunction(name, ctype_ptr_args=ctype_ptr_args) + def get(name: str, *, ctype_ptr_args:tuple=(), dynamic_size_args:tuple=()): + return LLVMBinaryFunction(name, ctype_ptr_args=ctype_ptr_args, dynamic_size_args=dynamic_size_args) _cpu_engine = None From 7a01378cee2257da029af54570197fe8e5b3ab4d Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 13 Aug 2024 22:01:26 -0400 Subject: [PATCH 312/410] llvm/execution: Use numpy arrays to return results of LLVM/PTX Run Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 34 ++++++++++--------------------- 1 file changed, 11 insertions(+), 23 deletions(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index c49c801f0b0..2fec47fc47b 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -29,19 +29,6 @@ __all__ = ['CompExecution', 'FuncExecution', 'MechExecution'] -def _convert_ctype_to_python(x): - if isinstance(x, ctypes.Structure): - return [_convert_ctype_to_python(getattr(x, field_name)) for field_name, _ in x._fields_] - if isinstance(x, ctypes.Array): - return [_convert_ctype_to_python(el) for el in x] - if isinstance(x, (ctypes.c_double, ctypes.c_float)): - return x.value - if isinstance(x, (float, int)): - return x - - assert False, "Don't know how to convert: {}".format(x) - - def _tupleize(x): try: return tuple(_tupleize(y) for y in x) @@ -557,7 +544,8 @@ def _bin_run_func(self): if self.__bin_run_func is None: self.__bin_run_func = pnlvm.LLVMBinaryFunction.from_obj(self._composition, tags=self.__tags.union({"run"}), - ctype_ptr_args=(3, 4)) + ctype_ptr_args=(3,), + dynamic_size_args=(4,)) return self.__bin_run_func @@ -572,11 +560,11 @@ def _prepare_run(self, inputs, runs, num_input_sets): inputs = self._get_run_input_struct(inputs, num_input_sets) # Create output buffer - outputs = (self._bin_run_func.byref_arg_types[4] * runs)() + outputs = self._bin_func.np_buffer_for_arg(4, extra_dimensions=(runs,)) + assert ctypes.sizeof(self._bin_run_func.byref_arg_types[4]) * runs == outputs.nbytes if "stat" in self._debug_env: - print("Output struct size:", _pretty_size(ctypes.sizeof(outputs)), - "for", self._composition.name) + print("Output struct size:", _pretty_size(outputs.nbytes), "for", self._composition.name) runs_count = np.asarray(runs, dtype=np.uint32).copy() input_count = np.asarray(num_input_sets, dtype=np.uint32) @@ -584,34 +572,34 @@ def _prepare_run(self, inputs, runs, num_input_sets): return inputs, outputs, runs_count, input_count def run(self, inputs, runs, num_input_sets): - ct_inputs, ct_outputs, runs_count, input_count = self._prepare_run(inputs, runs, num_input_sets) + ct_inputs, outputs, runs_count, input_count = self._prepare_run(inputs, runs, num_input_sets) self._bin_run_func(self._state_struct, self._param_struct, self._data_struct, ct_inputs, - ct_outputs, + outputs, runs_count, input_count) # Extract only #trials elements in case the run exited early assert runs_count <= runs, "Composition ran more times than allowed!" - return _convert_ctype_to_python(ct_outputs)[0:runs_count] + return self._get_indexable(outputs[0:runs_count]) def cuda_run(self, inputs, runs, num_input_sets): - ct_inputs, ct_outputs, runs_count, input_count = self._prepare_run(inputs, runs, num_input_sets) + ct_inputs, outputs, runs_count, input_count = self._prepare_run(inputs, runs, num_input_sets) self._bin_run_func.cuda_call(self._cuda_state_struct, self._cuda_param_struct, self._cuda_data_struct, jit_engine.pycuda.driver.In(np.ctypeslib.as_array(ct_inputs)), - jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(ct_outputs)), + jit_engine.pycuda.driver.Out(outputs), jit_engine.pycuda.driver.InOut(runs_count), jit_engine.pycuda.driver.In(input_count)) # Extract only #trials elements in case the run exited early assert runs_count <= runs, "Composition ran more times than allowed: {}".format(runs) - return _convert_ctype_to_python(ct_outputs)[0:runs_count] + return self._get_indexable(outputs[0:runs_count]) def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:bool): ocm = self._composition.controller From 7267a1a8cfc159fca8e8fc02838d49d88ff9aaf7 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 20 Aug 2024 22:08:15 -0400 Subject: [PATCH 313/410] llvm/execution: Use numpy arrays to return result of compiled evaluate Simplify optimization step of grid evaluate. Signed-off-by: Jan Vesely --- .../nonstateful/optimizationfunctions.py | 18 +++------ psyneulink/core/llvm/execution.py | 38 ++++++++++--------- 2 files changed, 26 insertions(+), 30 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py index bc4d323c606..649be335df9 100644 --- a/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/optimizationfunctions.py @@ -2096,14 +2096,11 @@ def _function(self, # if ocm is not None and ocm.parameters.comp_execution_mode._get(context) in {"PTX", "LLVM"}: if ocm is not None and ocm.parameters.comp_execution_mode._get(context) in {"PTX", "LLVM"}: - ct_values = all_values - num_values = len(ct_values) - # Reduce array of values to min/max # select_min params are: - # params, state, min_sample_ptr, sample_ptr, min_value_ptr, value_ptr, opt_count_ptr, count + # params, state, min_sample_ptr, sample_ptr, min_value_ptr, value_ptr, opt_count_ptr, start, stop min_tags = frozenset({"select_min", "evaluate_type_objective"}) - bin_func = pnlvm.LLVMBinaryFunction.from_obj(self, tags=min_tags, ctype_ptr_args=(0, 1, 3, 5)) + bin_func = pnlvm.LLVMBinaryFunction.from_obj(self, tags=min_tags, ctype_ptr_args=(0, 1, 3), dynamic_size_args=(5,)) ct_param = bin_func.byref_arg_types[0](*self._get_param_initializer(context)) ct_state = bin_func.byref_arg_types[1](*self._get_state_initializer(context)) @@ -2114,15 +2111,12 @@ def _function(self, bin_func(ct_param, ct_state, optimal_sample, - None, # samples. NULL, it's generated by the function. + None, # samples. NULL, it's generated by the function. optimal_value, - ct_values, + all_values, number_of_optimal_values, - bin_func.c_func.argtypes[7](0), # start - bin_func.c_func.argtypes[8](num_values)) # stop - - # Convert outputs to Numpy/Python - all_values = np.ctypeslib.as_array(ct_values) + 0, # start + len(all_values)) # stop # Python version else: diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 2fec47fc47b..e877bc0a5a7 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -606,7 +606,7 @@ def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results eval_type = "evaluate_type_all_results" if all_results else "evaluate_type_objective" tags = {"evaluate", "alloc_range", eval_type} - bin_func = pnlvm.LLVMBinaryFunction.from_obj(ocm, tags=frozenset(tags), ctype_ptr_args=(4, 5)) + bin_func = pnlvm.LLVMBinaryFunction.from_obj(ocm, tags=frozenset(tags), ctype_ptr_args=(5,), dynamic_size_args=(4,)) self.__bin_func = bin_func # There are 8 arguments to evaluate_alloc_range: @@ -623,31 +623,31 @@ def _prepare_evaluate(self, inputs, num_input_sets, num_evaluations, all_results # Construct input variable, the 5th parameter of the evaluate function ct_inputs = self._get_run_input_struct(inputs, num_input_sets, 5) - # Output ctype - out_el_ty = bin_func.byref_arg_types[4] + # Output buffer + extra_dims = (num_evaluations,) if all_results: num_trials = ocm.parameters.num_trials_per_estimate.get(self._execution_context) - if num_trials is None: - num_trials = num_input_sets - out_el_ty *= num_trials - out_ty = out_el_ty * num_evaluations + assert num_trials is not None + extra_dims = extra_dims + (num_trials,) + + outputs = self._bin_func.np_buffer_for_arg(4, extra_dimensions=extra_dims) num_inputs = np.asarray(num_input_sets, dtype=np.uint32) if "stat" in self._debug_env: print("Evaluate result struct type size:", - _pretty_size(ctypes.sizeof(out_ty)), + _pretty_size(ctypes.sizeof(outputs.nbytes)), "( evaluations:", num_evaluations, "element size:", ctypes.sizeof(out_el_ty), ")", "for", self._obj.name) - return comp_params, comp_state, comp_data, ct_inputs, out_ty(), num_inputs + return comp_params, comp_state, comp_data, ct_inputs, outputs, num_inputs def cuda_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:bool=False): - comp_params, comp_state, comp_data, ct_inputs, ct_results, num_inputs = \ + comp_params, comp_state, comp_data, ct_inputs, results, num_inputs = \ self._prepare_evaluate(inputs, num_input_sets, num_evaluations, all_results) cuda_args = (jit_engine.pycuda.driver.In(comp_params), jit_engine.pycuda.driver.In(comp_state), - jit_engine.pycuda.driver.Out(np.ctypeslib.as_array(ct_results)), # results + jit_engine.pycuda.driver.Out(results), # results jit_engine.pycuda.driver.In(np.ctypeslib.as_array(ct_inputs)), # inputs jit_engine.pycuda.driver.In(comp_data), # composition data jit_engine.pycuda.driver.In(num_inputs), # number of inputs @@ -655,10 +655,10 @@ def cuda_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:boo self.__bin_func.cuda_call(*cuda_args, threads=int(num_evaluations)) - return ct_results + return results def thread_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:bool=False): - comp_params, comp_state, comp_data, ct_inputs, ct_results, num_inputs = \ + comp_params, comp_state, comp_data, ct_inputs, outputs, num_inputs = \ self._prepare_evaluate(inputs, num_input_sets, num_evaluations, all_results) jobs = min(os.cpu_count(), num_evaluations) @@ -667,11 +667,13 @@ def thread_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:b parallel_start = time.time() with concurrent.futures.ThreadPoolExecutor(max_workers=jobs) as ex: - # Create input and result typed casts once, they are the same - # for every submitted job. - results_arg = ctypes.cast(ct_results, self.__bin_func.c_func.argtypes[4]) + # Create input typed cast once, it is the same for every submitted job. input_arg = ctypes.cast(ct_inputs, self.__bin_func.c_func.argtypes[5]) + # numpy dynamic args expect only one extra dimension + output_arg = outputs.reshape(-1, *self.__bin_func.np_arg_dtypes[4].shape) + assert output_arg.base is outputs + # There are 8 arguments to evaluate_alloc_range: # comp_param, comp_state, from, to, results, input, comp_data, input length results = [ex.submit(self.__bin_func, @@ -679,7 +681,7 @@ def thread_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:b comp_state, int(i * evals_per_job), min((i + 1) * evals_per_job, num_evaluations), - results_arg, + output_arg, input_arg, comp_data, num_inputs) @@ -695,4 +697,4 @@ def thread_evaluate(self, inputs, num_input_sets, num_evaluations, all_results:b exceptions = [r.exception() for r in results] assert all(e is None for e in exceptions), "Not all jobs finished sucessfully: {}".format(exceptions) - return ct_results + return outputs From 42f881f684185cbab27f1f46c014f38829461361 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 22 Aug 2024 22:59:19 -0400 Subject: [PATCH 314/410] requirements: update grpcio requirement from <1.66.0 to <1.67.0 (#3034) Updates the requirements on [grpcio](https://github.com/grpc/grpc) to permit the latest version. - [Release notes](https://github.com/grpc/grpc/releases) - [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md) - [Commits](https://github.com/grpc/grpc/compare/v0.65.0...v1.66.0) --- updated-dependencies: - dependency-name: grpcio dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 8966c89ce8d..229aa73383c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ dill<0.3.9 fastkde>=1.0.24, <1.0.31 graph-scheduler>=1.2.1, <1.3.0 graphviz<0.21.0 -grpcio<1.66.0 +grpcio<1.67.0 leabra-psyneulink<0.3.3 llvmlite<0.44 matplotlib<3.7.6 From 1bea5a094cd328cc2902b12afc890b3e57eb190b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 30 Aug 2024 15:42:22 -0400 Subject: [PATCH 315/410] requirements: update jupyter requirement from <1.0.1 to <1.1.1 (#3039) Updates the requirements on [jupyter](http://jupyter.org) to permit the latest version. --- updated-dependencies: - dependency-name: jupyter dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- tutorial_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index afc8bbefd42..63d3c972956 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,4 +1,4 @@ -jupyter<1.0.1 +jupyter<1.1.1 packaging<25.0 pytest<8.3.3 pytest-benchmark<4.0.1 diff --git a/tutorial_requirements.txt b/tutorial_requirements.txt index 8f7bd2eaa14..20971d675e1 100644 --- a/tutorial_requirements.txt +++ b/tutorial_requirements.txt @@ -1,3 +1,3 @@ graphviz<0.21.0 -jupyter<1.0.1 +jupyter<1.1.1 matplotlib<3.7.6 From 9d1a34118fe6644d67a3b827c2809ed91fac34d8 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 30 Aug 2024 22:19:24 -0400 Subject: [PATCH 316/410] requirements: update torch requirement from >=1.8.0,<2.4.0 to >=1.8.0,<2.5.0 (#3040) Updates the requirements on [pytorch](https://github.com/pytorch/pytorch) to permit the latest version. - [Release notes](https://github.com/pytorch/pytorch/releases) - [Commits](https://github.com/pytorch/pytorch/compare/v2.4.0...v2.5.0) Signed-off-by: Jan Vesely --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 229aa73383c..58951c0a317 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,4 +19,4 @@ protobuf<3.20.4 rich>=10.1, <10.13 scipy>=1.7.3, <1.15 toposort<1.11 -torch>=1.10.0, <2.4.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' +torch>=1.10.0, <2.5.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' From 85333ef3cea57ec2b45b763d55fb39bef8e528f6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 31 Aug 2024 00:54:29 -0400 Subject: [PATCH 317/410] requirements: update jupyter requirement from <1.1.1 to <1.1.2 (#3041) Updates the requirements on [jupyter](https://jupyter.org) to permit the latest version. --- updated-dependencies: - dependency-name: jupyter dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- tutorial_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index 63d3c972956..7ff58b0c3c8 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,4 +1,4 @@ -jupyter<1.1.1 +jupyter<1.1.2 packaging<25.0 pytest<8.3.3 pytest-benchmark<4.0.1 diff --git a/tutorial_requirements.txt b/tutorial_requirements.txt index 20971d675e1..7f6ba148e14 100644 --- a/tutorial_requirements.txt +++ b/tutorial_requirements.txt @@ -1,3 +1,3 @@ graphviz<0.21.0 -jupyter<1.1.1 +jupyter<1.1.2 matplotlib<3.7.6 From 0f01946e7e878adc99c07671e96b6bc41a5557b4 Mon Sep 17 00:00:00 2001 From: kmantel <1592123+kmantel@users.noreply.github.com> Date: Tue, 10 Sep 2024 21:17:22 -0400 Subject: [PATCH 318/410] IntegratorMechanism: correct broken size argument (#3046) size is used only as fallback for default_variable, but IntegratorMechanism always explicitly passed up a default_variable, even if not passed in during construction --- .../processing/integratormechanism.py | 4 ++++ tests/components/test_component.py | 22 +++++++++++-------- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/psyneulink/core/components/mechanisms/processing/integratormechanism.py b/psyneulink/core/components/mechanisms/processing/integratormechanism.py index 47bc4580822..e6cf9a5e20c 100644 --- a/psyneulink/core/components/mechanisms/processing/integratormechanism.py +++ b/psyneulink/core/components/mechanisms/processing/integratormechanism.py @@ -271,6 +271,10 @@ def _handle_default_variable(self, default_variable=None, size=None, input_ports variable_shape[-1] = function_variable.shape[-1] # self.parameters.variable.default_value = np.zeros(tuple(variable_shape)) variable = np.zeros(tuple(variable_shape)) + else: + variable = default_variable + else: + variable = default_variable # IMPLEMENTATON NOTE: # Don't worry about case in which length of function's variable is 1 and Mechanism's is > 1 diff --git a/tests/components/test_component.py b/tests/components/test_component.py index d8a152e7a96..ffb328c6705 100644 --- a/tests/components/test_component.py +++ b/tests/components/test_component.py @@ -139,19 +139,23 @@ def __init__(self, default_variable=None, **kwargs): super().__init__(default_variable=default_variable, **kwargs) @pytest.mark.parametrize( - 'cls_', + 'cls_', [pnl.ProcessingMechanism, pnl.TransferMechanism, pnl.IntegratorMechanism] + ) + @pytest.mark.parametrize( + 'size, expected_variable', [ - pnl.ProcessingMechanism, - pytest.param( - pnl.IntegratorMechanism, - marks=pytest.mark.xfail(reason='size currently unsupported at all on IntegratorMechanism') - ) + (1, [[0]]), + (2, [[0, 0]]), + (3, [[0, 0, 0]]), + ((1, 1), [[0], [0]]), + ((2, 2), [[0, 0], [0, 0]]), + ((3, 3), [[0, 0, 0], [0, 0, 0]]), ] ) @pytest.mark.parametrize('params_dict_entry', [NotImplemented, 'params']) - def test_size(self, cls_, params_dict_entry): - c = cls_(**nest_dictionary({'size': 5}, params_dict_entry)) - assert len(c.defaults.variable[-1]) == 5 + def test_size(self, cls_, params_dict_entry, size, expected_variable): + c = cls_(**nest_dictionary({'size': size}, params_dict_entry)) + np.testing.assert_array_equal(c.defaults.variable, expected_variable) @pytest.mark.parametrize( 'cls_, function_params, expected_values', From 486f6cc3835279de40f1c1c43597e7783d7c57e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 12 Sep 2024 20:36:32 +0000 Subject: [PATCH 319/410] requirements: update pytest requirement from <8.3.3 to <8.3.4 (#3045) --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index 7ff58b0c3c8..f9b148b20d2 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,6 +1,6 @@ jupyter<1.1.2 packaging<25.0 -pytest<8.3.3 +pytest<8.3.4 pytest-benchmark<4.0.1 pytest-cov<5.0.1 pytest-forked<1.7.0 From 923c227782f9b5aa9e020846282a912045a0733e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 20 Sep 2024 21:12:54 -0400 Subject: [PATCH 320/410] requirements: update pandas requirement from <2.2.3 to <2.2.4 (#3049) Updates the requirements on [pandas](https://github.com/pandas-dev/pandas) to permit the latest version. - [Release notes](https://github.com/pandas-dev/pandas/releases) - [Commits](https://github.com/pandas-dev/pandas/compare/0.2.2...v2.2.3) --- updated-dependencies: - dependency-name: pandas dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 58951c0a317..bb53237fa0a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ networkx<3.4 numpy>=1.21.0, <1.26.5 optuna<3.4.0 packaging<25.0 -pandas<2.2.3 +pandas<2.2.4 pillow<10.5.0 pint<0.22.0 protobuf<3.20.4 From 5fad4168ed2eb33195511acc2dbdec4fa92922ab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 29 Sep 2024 20:42:26 -0400 Subject: [PATCH 321/410] requirements: update beartype requirement from <0.19.0 to <0.20.0 (#3050) Updates the requirements on [beartype](https://github.com/beartype/beartype) to permit the latest version. - [Release notes](https://github.com/beartype/beartype/releases) - [Changelog](https://github.com/beartype/beartype/blob/main/doc/RELEASE.rst) - [Commits](https://github.com/beartype/beartype/compare/v0.18.0...v0.19.0) --- updated-dependencies: - dependency-name: beartype dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index bb53237fa0a..e1932ce1f4c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -beartype<0.19.0 +beartype<0.20.0 dill<0.3.9 fastkde>=1.0.24, <1.0.31 graph-scheduler>=1.2.1, <1.3.0 From f35f6c950c4269acef13b210f891bd326c37669e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 1 Oct 2024 14:27:28 -0400 Subject: [PATCH 322/410] requirements: update dill requirement from <0.3.9 to <0.3.10 (#3054) Updates the requirements on [dill](https://github.com/uqfoundation/dill) to permit the latest version. - [Release notes](https://github.com/uqfoundation/dill/releases) - [Commits](https://github.com/uqfoundation/dill/compare/dill-0.3.8...0.3.9) --- updated-dependencies: - dependency-name: dill dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e1932ce1f4c..7bfd6563c49 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ beartype<0.20.0 -dill<0.3.9 +dill<0.3.10 fastkde>=1.0.24, <1.0.31 graph-scheduler>=1.2.1, <1.3.0 graphviz<0.21.0 From 9677c8d89b0c29499a46ef6c808017e2303373eb Mon Sep 17 00:00:00 2001 From: Younes Strittmatter Date: Wed, 2 Oct 2024 22:14:29 -0400 Subject: [PATCH 323/410] docs: add quotation to pip install (#3051) --- README.rst | 4 ++-- docs/source/index.rst | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index a534c0043b8..5bd44d8454d 100644 --- a/README.rst +++ b/README.rst @@ -172,13 +172,13 @@ To run the tutorial locally, you must run python 3.5 and install additional pack :: - pip install psyneulink[tutorial] + pip install "psyneulink[tutorial]" or if you downloaded the source: :: - pip install .[tutorial] + pip install ".[tutorial]" To access the tutorial, make sure you fulfill the requirements mentioned above, download the tutorial notebook (/tutorial/PsyNeuLink Tutorial.ipynb), then run the terminal command diff --git a/docs/source/index.rst b/docs/source/index.rst index 9bc213049ea..cacebb5ea38 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -211,13 +211,13 @@ To run the tutorial locally, you must run python 3.6 and install additional pack :: - pip install psyneulink[tutorial] + pip install "psyneulink[tutorial]" or if you downloaded the source: :: - pip install .[tutorial] + pip install ".[tutorial]" To access the tutorial, make sure you fulfill the requirements From 0e5a1256704fc2767a0975704996257f99df57f9 Mon Sep 17 00:00:00 2001 From: jdcpni Date: Mon, 7 Oct 2024 15:24:28 -0400 Subject: [PATCH 324/410] Feat/emcomposition/add soft max args (#3058) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * • emcomposition.py - implement softmax_output_format Parameter - docstring edits - ALL -> WEIGHTED - MAX_INDICATOR -> ARG_MAX - more docstring edits - add check that ARG_MAX is not used with enable_learning • test_emcomposition.py - add test_softmax_choice * • project: 3D -> 3d in docstrings and comments Axis -> axis in docstrings and comments * • readme: update of What PsyNeuLink is not --- .../stability_flexibility_pec_optimize.py | 2 +- .../EGO/Using EMComposition/ScriptControl.py | 26 +- docs/source/BasicsAndPrimer.rst | 3 +- docs/source/index.rst | 9 +- psyneulink/core/components/component.py | 2 +- .../functions/nonstateful/fitfunctions.py | 6 +- .../nonstateful/transferfunctions.py | 22 +- .../functions/stateful/memoryfunctions.py | 2 +- psyneulink/core/compositions/composition.py | 2 +- .../parameterestimationcomposition.py | 2 +- .../mechanisms/processing/leabramechanism.py | 2 +- .../library/compositions/emcomposition.py | 332 +++++++++++------- tests/composition/test_emcomposition.py | 19 +- .../test_parameterestimationcomposition.py | 2 +- tests/mechanisms/test_control_mechanism.py | 2 +- tests/mechanisms/test_integrator_mechanism.py | 2 +- 16 files changed, 270 insertions(+), 165 deletions(-) diff --git a/Scripts/Debug/stability_flexibility/stability_flexibility_pec_optimize.py b/Scripts/Debug/stability_flexibility/stability_flexibility_pec_optimize.py index 46a6a523b2e..a4d30c288e0 100644 --- a/Scripts/Debug/stability_flexibility/stability_flexibility_pec_optimize.py +++ b/Scripts/Debug/stability_flexibility/stability_flexibility_pec_optimize.py @@ -99,7 +99,7 @@ def reward_rate(sim_data): """ Objective function for PEC to optimize. This function takes in the simulation data, - a 3D array of shape (num_trials, num_estimates, num_outcome_vars), and returns a + a 3d array of shape (num_trials, num_estimates, num_outcome_vars), and returns a scalar value that is the reward rate. """ return np.mean(sim_data[:, :, 0][:] / sim_data[:, :, 1][:]) diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py index 8b40d9403ca..1d5a22bb892 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py @@ -7,23 +7,23 @@ CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script DISPLAY_MODEL = ( # Only one of the following can be uncommented: - None # suppress display of model - # { # show simple visual display of model + # None # suppress display of model + { # show simple visual display of model # 'show_pytorch': True, # show pytorch graph of model - # 'show_learning': True - # # 'show_projections_not_in_composition': True, - # # 'exclude_from_gradient_calc_style': 'dashed'# show target mechanisms for learning - # # {'show_node_structure': True # show detailed view of node structures and projections - # } + 'show_learning': True + # 'show_projections_not_in_composition': True, + # 'exclude_from_gradient_calc_style': 'dashed'# show target mechanisms for learning + # {'show_node_structure': True # show detailed view of node structures and projections + } ) -# RUN_MODEL = False # False => don't run the model -RUN_MODEL = True, # True => run the model +RUN_MODEL = False # False => don't run the model +# RUN_MODEL = True, # True => run the model # REPORT_OUTPUT = ReportOutput.FULL # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run -# PRINT_RESULTS = False # don't print model.results to console after execution -PRINT_RESULTS = True # print model.results to console after execution +PRINT_RESULTS = False # don't print model.results to console after execution +# PRINT_RESULTS = True # print model.results to console after execution SAVE_RESULTS = False # save model.results to disk -# PLOT_RESULTS = False # don't plot results (PREDICTIONS) vs. TARGETS -PLOT_RESULTS = True # plot results (PREDICTIONS) vs. TARGETS +PLOT_RESULTS = False # don't plot results (PREDICTIONS) vs. TARGETS +# PLOT_RESULTS = True # plot results (PREDICTIONS) vs. TARGETS ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution diff --git a/docs/source/BasicsAndPrimer.rst b/docs/source/BasicsAndPrimer.rst index 445148d20ba..6faba06536e 100644 --- a/docs/source/BasicsAndPrimer.rst +++ b/docs/source/BasicsAndPrimer.rst @@ -143,7 +143,8 @@ of the examples further below. PsyNeuLink picks sensible defaults when necessary Components are not specified. In the example above no `Projections ` were actually specified, so PsyNeuLink automatically created the appropriate types (in this case, `MappingProjections`), and sized them appropriately to connect each pair of Mechanisms. Each -Projection has a `matrix ` parameter that weights the connections between the elements of the output +Projection has a `matrix ` parameter that weights the connections between the elements of the +output of its `sender ` and those of the input to its `receiver `. Here, the default is to use a `FULL_CONNECTIVITY_MATRIX`, that connects every element of the sender's array to every element of the receiver's array with a weight of 1. However, it is easy to specify a Projection explicitly, including its diff --git a/docs/source/index.rst b/docs/source/index.rst index cacebb5ea38..894afab26e6 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -55,6 +55,8 @@ combine these components to implement published models. As an open source proje to be enhanced and extended, and its library is meant to provide an expanding repository of models, written in a concise, executable, and easy to interpret form, that can be shared, compared, and extended by the scientific community. +*(Note: the PsyNeuLink development effort was initiated and named in 2016, entirely independently and without +awareness of Neuralink, with which it bears no association nor any intentional relationsip.)* .. _What_PsyNeuLink_IS: @@ -102,11 +104,12 @@ The longterm goal of PsyNeuLink is to provide an environment that integrates com and behavior at all levels of analysis. While it is designed to be fully general, and can in principle be used to implement models at any level, it is still under development, and current efficiency considerations make it more suitable for some of forms of modeling than others. In its present form, it is well suited to the creation of -simple to moderately complex models, and for the integration of disparate models into a single environment, while in +simple to moderately complex models, and for the integration of disparate models into a single environment, and the +creation of systems-level neuroscientific models, as well as cognitive neuroscientific and modestly scaled machine +learning-style models, while in it is presently less well suited to efforts involving massively large computations, such as: - - extensive model fitting - - large scale simulations + - large scale machine learning simulations - highly detailed biophysical models of neurons or neuronal populations Other packages currently better suited to such applications are: diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index ba8f0755585..b08c15dab5c 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1704,7 +1704,7 @@ def checkAndCastInt(x): # CAVEAT: assuming here that object dtype implies there are list objects (i.e. array with # different sized arrays/lists inside like [[0, 1], [2, 3, 4]]), even though putting a None # value in the array will give object dtype. This case doesn't really make sense in our - # context though, so ignoring this case in the interest of quickly fixing 3D variable behavior + # context though, so ignoring this case in the interest of quickly fixing 3d variable behavior variable = np.atleast_1d(variable) else: variable = np.atleast_2d(variable) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index df0dd449825..ae4ade2a2f3 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -113,9 +113,9 @@ def simulation_likelihood( Parameters ---------- - sim_data: Data collected over many simulations. This must be either a 2D or 3D numpy array. + sim_data: Data collected over many simulations. This must be either a 2d or 3d numpy array. If 2D, the first dimension is the simulation number and the second dimension is data points. That is, - each row is a simulation. If 3D, the first dimension is the trial, the second dimension is the + each row is a simulation. If 3d, the first dimension is the trial, the second dimension is the simulation number, and the final dimension is data points. exp_data: This must be a numpy array with identical format as the simulation data, with the exception @@ -275,7 +275,7 @@ class PECOptimizationFunction(OptimizationFunction): PEC is trying to solve. The function is used to evaluate the `values ` of the `outcome_variables `, according to which combinations of `parameters ` are assessed; this must be an `Callable` - that takes a 3D array as its only argument, the shape of which must be (**num_estimates**, **num_trials**, + that takes a 3d array as its only argument, the shape of which must be (**num_estimates**, **num_trials**, number of **outcome_variables**). The function should specify how to aggregate the value of each **outcome_variable** over **num_estimates** and/or **num_trials** if either is greater than 1. diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index 66ef69aadfc..dc035390ce7 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -2931,8 +2931,8 @@ class SoftMax(TransferFunction): *Thresholding and Adaptive Gain* - For cases in which SoftMax is used with vector that sparse (e.g., one-hots), the value(s) of the (most( significant - entries (e.g., the one's in a one-hot) can be sensitive to (diminished by) the number of other values in the vector + For cases in which SoftMax is used with sparse vectors (e.g., one-hots), the value(s) of the most significant + entries (e.g., the 1s in a one-hot) can be sensitive to (diminished by) the number of other values in the vector (i.e., its length). For example, whereas for ``[1 0]`` the SoftMax is ``[0.73105858 0.26894142]``, for ``[1 0 0 0]`` it is ``[0.47536689 0.1748777 0.1748777 0.1748777]``. This can be addressed in one of two ways: either by thresholding `variable ` before applying the SoftMax function, or by adapting the `gain @@ -2955,7 +2955,7 @@ class SoftMax(TransferFunction): .. _SoftMax_Derivative: - *Derivatve* + *Derivative* `derivative ` returns the derivative of the SoftMax. If *OUTPUT_TYPE* for the SoftMax is *ALL*, returns Jacobian matrix (derivative for each element of the output array with respect to each of the @@ -2978,12 +2978,12 @@ class SoftMax(TransferFunction): specifies the value by which to multiply `variable ` before SoftMax transformation, which functions as the inverse "temperature" of the function. If it is a scalar, it must be greater than zero. If *ADAPTIVE* is specified, the value is determined dynamically based on the `variable - ` `SoftMax_AdaptGain` for details). + `; see `Thresholding and Adaptive Gain ` for details). mask_threshold : scalar : default None specifies whether to mask_threshold the `variable ` before applying the SoftMax function; this only applies if `gain ` is specified as a scalar; otherwise it is ignored - (see `SoftMax_AdaptGain` for details). + (see `Thresholding and Adaptive Gain ` for details). adapt_scale : scalar : default 1 specifies the *scale* parameter using by the `adapt_gain ` method (see method for details). @@ -3027,14 +3027,14 @@ class SoftMax(TransferFunction): determines how `variable ` is scaled before the SoftMax transformation, determining the "sharpness" of the distribution (it is equivalent to the inverse of the temperature of the SoftMax function); if it is 'ADAPTIVE', it is determined dynamically adjusted using the `adapt_gain ` method - (see `SoftMax_AdaptGain` for additional details). + (see `Thresholding and Adaptive Gain ` for additional details). mask_threshold : scalar or None determines whether the `variable ` is thresholded before applying the SoftMax function; if it is a scalar, only elements of `variable ` with an absolute value greater than that value are considered when applying the SoftMax function (which are then scaled by the `gain ` parameter; all other elements are assigned 0. This only applies if `gain ` is specified as a - scalar; otherwise it is ignored (see `SoftMax_AdaptGain` for details). + scalar; otherwise it is ignored (see `Thresholding and Adaptive Gain ` for details). adapt_scale : scalar determined the *scale* parameter using by the `adapt_gain ` method (see method for details). @@ -3049,10 +3049,10 @@ class SoftMax(TransferFunction): output : ALL, MAX_VAL, MAX_INDICATOR, or PROB determines how the SoftMax-transformed values of the elements in `variable ` are reported in the array returned by `function `: - * **ALL**: array of all SoftMax-transformed values (the default); - * **MAX_VAL**: SoftMax-transformed value for the element with the maximum such value, 0 for all others; - * **MAX_INDICATOR**: 1 for the element with the maximum SoftMax-transformed value, 0 for all others; - * **PROB**: probabilistically chosen element based on SoftMax-transformed values after setting the + * *ALL*: array of all SoftMax-transformed values (the default); + * *MAX_VAL*: SoftMax-transformed value for the element with the maximum such value, 0 for all others; + * *MAX_INDICATOR*: 1 for the element with the maximum SoftMax-transformed value, 0 for all others; + * *PROB*: probabilistically chosen element based on SoftMax-transformed values after setting the sum of values to 1 (i.e., their `Luce Ratio `_), 0 for all others. diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index e421f55f386..9ca229f552e 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -916,7 +916,7 @@ class ContentAddressableMemory(MemoryFunction): # ------------------------------ value added to `variable `) before storing in `memory ` (see `noise ` for additional details). If a 2d array (or `Function` that returns one), its shape must be the same as `variable - `; that is, each array in the outer dimension (Axis 0) must have the + `; that is, each array in the outer dimension (axis 0) must have the same length as the corresponding one in `variable `, so that it can be added Hadamard style to `variable ` before storing it in `memory `. diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index c2216c625a4..455178747d3 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -10481,7 +10481,7 @@ def _instantiate_input_dict(self, inputs): # shapes of entries will be validated in _validate_input_shapes_and_expand_for_all_trials()) else: - # 3D ragged array or 2d array + # 3d ragged array or 2d array entry = convert_to_np_array(_inputs) ragged_array = entry.dtype == object if ragged_array: diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index 0616bb8c900..1b39557d5bc 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -128,7 +128,7 @@ * **objective_function** - specifies a function used to evaluate the `values ` of the `outcome_variables `, according to which combinations of `parameters ` are assessed; this must be an `Callable` - that takes a 3D array as its only argument, the shape of which will be (**num_estimates**, **num_trials**, + that takes a 3d array as its only argument, the shape of which will be (**num_estimates**, **num_trials**, number of **outcome_variables**). The function should specify how to aggregate the value of each **outcome_variable** over **num_estimates** and/or **num_trials** if either is greater than 1. diff --git a/psyneulink/library/components/mechanisms/processing/leabramechanism.py b/psyneulink/library/components/mechanisms/processing/leabramechanism.py index 84c8576d96a..9d988c5c6e2 100644 --- a/psyneulink/library/components/mechanisms/processing/leabramechanism.py +++ b/psyneulink/library/components/mechanisms/processing/leabramechanism.py @@ -551,7 +551,7 @@ def convert_to_2d_input(array_like): if isinstance(array_like, (np.ndarray, list)): if isinstance(array_like[0], (np.ndarray, list)): if isinstance(array_like[0][0], (np.ndarray, list)): - print("array_like ({}) is at least 3D, which may cause conversion errors".format(array_like)) + print("array_like ({}) is at least 3d, which may cause conversion errors".format(array_like)) out = [] for a in array_like: out.append(np.array(a)) diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index a6da921c761..6a264a55b3f 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -273,18 +273,25 @@ The EMComposition implements a configurable, content-addressable form of episodic, or eternal memory, that emulates an `EpisodicMemoryMechanism` -- reproducing all of the functionality of its `ContentAddressableMemory` `Function` -- in the form of an `AutodiffComposition` that is capable of learning how to differentially weight different cues used -for retrieval,, and that adds the capability for `memory_decay `. Its `memory -` is configured using the ``memory_template`` argument of its constructor, which defines how -each entry in `memory ` is structured (the number of fields in each entry and the length of -each field), and its ``field_weights`` argument that defines which fields are used as cues for retrieval -- "keys" -- -and whether and how they are differentially weighted in the match process used for retrieval, and which are treated -as "values" that are retrieved but not used by the match process. The inputs corresponding to each key (i.e., used -as "queries") and value are represented as `INPUT ` `Nodes ` of the EMComposition -(listed in its `query_input_nodes ` and `value_input_nodes +for retrieval,, and that adds the capability for `memory_decay `. Its `memory +` is configured using two arguments of its constructor: **memory_template** argument, that defines +how each entry in `memory ` is structured (the number of fields in each entry and the length +of each field); and **field_weights** argument, that defines which fields are used as cues for retrieval, i.e., "keys", +including whether and how they are differentially weighted in the match process used for retrieval); and which +fields are treated as "values" that are stored retrieved, but not used by the match process. The inputs to an +EMComposition, corresponding to each key ("query") and value field are assigned to each of its `INPUT ` +`Nodes ` (listed in its `query_input_nodes ` and `value_input_nodes ` attributes, respectively), and the retrieved values are represented as `OUTPUT ` `Nodes ` of the EMComposition. The `memory ` can be accessed using its `memory ` attribute. + .. technical_note:: + The memories of an EMComposition are actually stored in the `matrix ` attribute of a + set of `MappingProjections ` (see `note below `). The `memory + ` attribute compiles and formats these as a single 3d array, the rows of which (axis 0) + are each entry, the columns of which (axis 1) are the fields of each entry, and the items of which (axis 2) + are the values of each field (see `EMComposition_Memory` for additional details). + .. _EMComposition_Organization: **Organization** @@ -294,27 +301,29 @@ *Entries and Fields*. Each entry in memory can have an arbitrary number of fields, and each field can have an arbitrary length. However, all entries must have the same number of fields, and the corresponding fields must all have the same length across entries. Each field is treated as a separate "channel" for storage and retrieval, and is associated with -its own corresponding input (key or value) and output (retrieved value) `Node ` some or all of +its own corresponding input (key or value) and output (retrieved value) `Node ` some or all of which can be used to compute the similarity of the input (key) to entries in memory, that is used for retreieval. -Fields can be differentially weighted to determine the influence they have on retrieval, using the -`field_weights ` parameter (see `retrieval ` below). -The number and shape of the fields in each entry is specified in the ``memory_template`` argument of the EMComposition's -constructor (see `memory_template `). Which fields treated as keys (i.e., matched against -queries during retrieval) and which are treated as values (i.e., retrieved but not used for matching retrieval) is -specified in the ``field_weights`` argument of the EMComposition's constructor (see `field_weights -`). +Fields can be differentially weighted to determine the influence they have on retrieval, using the `field_weights +` parameter (see `retrieval ` below). The number and +shape of the fields in each entry is specified in the **memory_template** argument of the EMComposition's constructor +(see `memory_template `). Which fields treated as keys (i.e., matched against queries during +retrieval) and which are treated as values (i.e., retrieved but not used for matching retrieval) is specified in the +**field_weights** argument of the EMComposition's constructor (see `field_weights `). .. _EMComposition_Operation: **Operation** -*Retrieval.* The values retrieved from `memory ` (one for each field) are based on -the relative similarity of the keys to the entries in memory, computed as the dot product of each key and the +*Retrieval.* The values retrieved from `memory ` (one for each field) are based +on the relative similarity of the keys to the entries in memory, computed as the dot product of each key and the values in the corresponding field for each entry in memory. These dot products are then softmaxed, and those softmax distributions are weighted by the corresponding `field_weights ` for each field -and then combined, to produce a single softmax distribution over the entries in memory, that is used to generate a -weighted average as the retrieved value across all fields, and returned as the `result ` of the -EMComposition's `execution `. +and then combined, to produce a single softmax distribution over the entries in memory. That is then used to generate +a weighted average of the retrieved values across all fields, which is returned as the `result ` +of the EMComposition's `execution ` (an EMComposition can also be configured to return the +entry with the highest dot product weighted by field, however then it is not compatible with learning; +see `softmax_choice `). + COMMENT: TBD DISTANCE ATTRIBUTES: The distances used for the last retrieval is stored in XXXX and the distances of each of their corresponding fields @@ -324,7 +333,7 @@ *Storage.* The `inputs ` to the EMComposition's fields are stored in `memory ` after each execution, with a probability determined by `storage_prob -`. If `memory_decay ` is specified, then the `memory +`. If `memory_decay_rate ` is specified, then the `memory ` is decayed by that amount after each execution. If `memory_capacity ` has been reached, then each new memory replaces the weakest entry (i.e., the one with the smallest norm across all of its fields) in `memory `. @@ -344,7 +353,7 @@ * **memory_template**: This specifies the shape of the entries to be stored in the EMComposition's `memory `, and can be used to initialize `memory ` with pre-specified entries. - The ``memory_template`` argument can be specified in one of three ways (see `EMComposition_Examples` for + The **memory_template** argument can be specified in one of three ways (see `EMComposition_Examples` for representative use cases): * **tuple**: interpreted as an np.array shape specification, that must be of length 2 or 3. If it is a 3-item tuple, @@ -354,8 +363,8 @@ filled with zeros or the value specified by `memory_fill `. .. warning:: - If the ``memory_template`` is specified with a 3-item tuple and `memory_capacity ` - is also specified with a value that does not match the first item of ``memory_template``, and error is + If **memory_template** is specified with a 3-item tuple and `memory_capacity ` + is also specified with a value that does not match the first item of **memory_template**, and error is generated indicating the conflict in the number of entries specified. .. hint:: @@ -363,7 +372,7 @@ specifying the shape of an entry, and so it can't be used to specify the number of entries each of which has a single field. - * **2d list or array**: interpreted as a template for memory entries. This can be used to specify fields of + * **2d list or array**: interpreted as a template for memory entries. This can be used to specify fields of different lengths (i.e., entries that are ragged arrays), with each item in the list (axis 0 of the array) used to specify the length of the corresponding field. The template is then used to initialze all entries in `memory `. If the template includes any non-zero elements, then the array is replicated for all @@ -372,15 +381,15 @@ .. hint:: To specify a single entry, with all other entries filled with zeros - or the value specified in ``memory_fill``, use a 3d array as described below. + or the value specified in **memory_fill**, use a 3d array as described below. - * **3d list or array**: used to initialize `memory ` directly with the entries specified in + * **3d list or array**: used to initialize `memory ` directly with the entries specified in the outer dimension (axis 0) of the list or array. If `memory_capacity ` is not - specified, then it is set to the number of entries in the list or array. If ``memory_capacity`` *is* specified, - then the number of entries specified in ``memory_template`` must be less than or equal to ``memory_capacity``. If - is less than ``memory_capacity``, then the remaining entries in `memory ` are filled with - zeros or the value specified in ``memory_fill`` (see below): if all of the entries specified contain only - zeros, and ``memory_fill`` is specified, then the matrix is filled with the value specified in ``memory_fill``; + specified, then it is set to the number of entries in the list or array. If **memory_capacity** *is* specified, + then the number of entries specified in **memory_template** must be less than or equal to **memory_capacity**. If + is less than **memory_capacity**, then the remaining entries in `memory ` are filled with + zeros or the value specified in **memory_fill** (see below): if all of the entries specified contain only + zeros, and **memory_fill** is specified, then the matrix is filled with the value specified in **memory_fill**; otherwise, zeros are used to fill all entries. .. _EMComposition_Memory_Capacity: @@ -390,7 +399,7 @@ * **memory_capacity**: specifies the number of items that can be stored in the EMComposition's memory; when `memory_capacity ` is reached, each new entry overwrites the weakest entry (i.e., the one with the smallest norm across all of its fields) in `memory `. If `memory_template - EMComposition_Memory_Template>` is specified as a 3-item tuple or 3d list or array (see above), then that is used + ` is specified as a 3-item tuple or 3d list or array (see above), then that is used to determine `memory_capacity ` (if it is specified and conflicts with either of those an error is generated). Otherwise, it can be specified using a numerical value, with a default of 1000. The `memory_capacity ` cannot be modified once the EMComposition has been constructed. @@ -398,13 +407,13 @@ .. _EMComposition_Memory_Fill: * **memory_fill**: specifies the value used to fill the `memory `, based on the shape specified - in the ``memory_template`` (see above). The value can be a scalar, or a tuple to specify an interval over which + in the **memory_template** (see above). The value can be a scalar, or a tuple to specify an interval over which to draw random values to fill `memory ` --- both should be scalars, with the first specifying - the lower bound and the second the upper bound. If ``memory_fill`` is not specified, and no entries are specified - in ``memory_template``, then `memory ` is filled with zeros. + the lower bound and the second the upper bound. If **memory_fill** is not specified, and no entries are specified + in **memory_template**, then `memory ` is filled with zeros. .. hint:: - If memory is initialized with all zeros and ``normalize_memories`` set to ``True`` (see `below + If memory is initialized with all zeros and **normalize_memories** set to ``True`` (see `below `) then a numpy.linalg warning is issued about divide by zero. This can be ignored, as it does not affect the results of execution, but it can be averted by specifying `memory_fill ` to use small random values (e.g., ``memory_fill=(0,.001)``). @@ -412,7 +421,7 @@ .. _EMComposition_Field_Weights: * **field_weights**: specifies which fields are used as keys, and how they are weighted during retrieval. The - number of entries specified must match the number of fields specified in ``memory_template`` (i.e., the size of + number of entries specified must match the number of fields specified in **memory_template** (i.e., the size of of its first dimension (axis 0)). All non-zero entries must be positive; these designate *keys* -- fields that are used to match queries against entries in memory for retrieval (see `Match memories by field `). Entries of 0 designate *values* -- fields that are ignored during the matching @@ -420,11 +429,11 @@ corresponding `retrieved_node `. This distinction between keys and value corresponds to the format of a standard "dictionary," though in that case only a single key and value are allowed, whereas here there can be one or more keys and any number of values; if all fields are keys, this implements a full - form of content-addressable memory. If ``learn_field_weight`` is True (and `enable_learning + form of content-addressable memory. If **learn_field_weight** is True (and `enable_learning ` is either True or a list), then the field_weights can be modified during training (this functions similarly to the attention head of a Transformer model, although at present the - field can only be scalar values rather than vecdtors); if ``learn_field_weight`` is False, then the field_weights are - fixed. The following options can be used to specify ``field_weights``: + field can only be scalar values rather than vecdtors); if **learn_field_weight** is False, then the field_weights are + fixed. The following options can be used to specify **field_weights**: * *None* (the default): all fields except the last are treated as keys, and are weighted equally for retrieval, while the last field is treated as a value field; @@ -462,8 +471,8 @@ not all equal (i.e., all non-zero weights are not equal -- see `field_weights `) and/or `normalize_memories ` is set to False. Setting concatenate_keys to True in either of those cases issues a warning, and the setting is ignored. If the key `field_weights ` - (i.e., all non-zero values) are all equal *and* ``normalize_memories`` is set to True, then setting - ``concatenate_keys`` causes a concatenate_keys_node ` to be created that + (i.e., all non-zero values) are all equal *and* **normalize_memories** is set to True, then setting + **concatenate_keys** causes a `concatenate_keys_node ` to be created that receives input from all of the `query_input_nodes ` and passes them as a single vector to the `mactch_node `. @@ -483,7 +492,7 @@ * **memory_decay_rate**: specifies the rate at which items in the EMComposition's memory decay; the default rate is *AUTO*, which sets it to 1 / `memory_capacity `, such that the oldest memories are the most likely to be replaced when `memory_capacity ` is reached. If - ``memory_decay_rate`` is set to 0 None or False, then memories do not decay and, when `memory_capacity + **memory_decay_rate** is set to 0 None or False, then memories do not decay and, when `memory_capacity ` is reached, the weakest memories are replaced, irrespective of order of entry. .. _EMComposition_Retrieval_Storage: @@ -498,14 +507,50 @@ .. _EMComposition_Softmax_Gain: -* **softmax_gain** : specifies the gain (inverse temperature) used for softmax normalizing the dot products of queries - and keys in memory (see `EMComposition_Execution` below). If a value is specified, that is used. If the keyword - *ADAPTIVE* is specified, then the `Softmax.adapt_gain ` function is used to adaptively set the - `softmax_gain ` based on the entropy of the dot products in order to preserve the - the distribution over non-(or near) zero entries irrespective of how many (near) zero entries there are (see - `SoftMax_AdaptGain` for additional details), If *CONTROL* is specified, this feature is implemented by creaeting a - `ContrlMechanism`, the `ControlSignal` of which is used to modulate the `softmax_gain ` - parameter of the `Softmax` function. If None is specified, the the default value of the `Softmax` function is used. +* **softmax_gain** : specifies the gain (inverse temperature) used for softmax normalizing the dot products of + queries and keys in memory (see `EMComposition_Execution` below). The following options can be used: + + * numeric value: the value is used as the gain of the `SoftMax` Function for the EMComposition's + `softmax_nodes `. + + * *ADAPTIVE*: the `adapt_gain ` method of the `SoftMax` Function is used to adaptively set + the `softmax_gain ` based on the entropy of the dot products, in order to preserve + the distribution over non- (or near) zero entries irrespective of how many (near) zero entries there are + (see `Thresholding and Adaptive Gain ` for additional details). + + * *CONTROL*: a `ControlMechanism` is created, and its `ControlSignal` is used to modulate the `softmax_gain + ` parameter of the `SoftMax` function of the EMComposition's `softmax_nodes + `. + + If *None* is specified, the default value for the `SoftMax` function is used. + +.. _EMComposition_Softmax_Threshold: + +* **softmax_threshold**: if this is specified, and **softmax_gain** is specified with a numeric value, + then any values below the specified threshold are set to 0 before the dot products are softmaxed + (see *mask_threhold* under `Thresholding and Adaptive Gain ` for additional details). + +.. _EMComposition_Softmax_Choice: + +* **softmax_choice** : specifies how the `SoftMax` Function of each of the EMComposition's `softmax_nodes + ` is used, with the dot products of queries and keys, to generate a retrieved item; + the following are the options that can be used and the retrieved value they produce: + + * *WEIGHTED*: softmax-weighted average of entries, based on their dot products with the key(s); this is the default; + + * *ARG_MAX*: entry with the largest dot product. + + .. warning:: + Use of the *ARG_MAX* option is not compatible with learning, as it implements a discrete choice and thus is not + differentiable; use of this with `enable_learning ` set to ``True`` will generate + an error. + + .. technical_note:: + The *WEIGHTED* option is passed as *ALL* to the **output** argument of the `SoftMax` Function, and + *ARG_MAX* is passed as *MAX_INDICATOR*; the *MAX_VAL* and *PROB* arguments are not currently. + COMMENT: + * *PROB*: probabilistically-chosen entry, based on the softmax transformation of thee dot products. + COMMENT .. _EMComposition_Learning: @@ -516,7 +561,7 @@ * **enable_learning** : specifies whether learning is enabled for the EMComposition and, if so, which `retrieved_nodes ` are used to compute errors, and propagate these back through the network. If - ``enable_learning`` is False, then no learning occurs, including of `field_weights `). + **enable_learning** is False, then no learning occurs, including of `field_weights `). If it is True, then all of the `retrieved_nodes ` participate in learning: For those that do not project to an outer Composition (i.e., one in which the EMComposition is `nested `), a `TARGET ` node is constructed for each, and used to compute errors that @@ -524,19 +569,19 @@ `value_input_nodes `, and on to any nodes that project to it from a composition in which the EMComposition is `nested `; retrieved_nodes that *do* project to an outer Composition receive their errors from those nodes, which are also backpropagated through the EMComposition. - If ``enable_learning`` is a list, then only the `retrieved_nodes ` specified in the + If **enable_learning** is a list, then only the `retrieved_nodes ` specified in the list participate in learning, and errors are computed only for those nodes. The list must contain the same number of entries as there are `fields ` and corresponding `retreived_nodes `, and each entry must be a boolean that specifies whether the corresponding `retrieved_node ` is used for learning. * **learn_field_weight** : specifies whether `field_weights ` are modifiable during - learning (see `field_weights ` and `EMComposition_Learning` for additional - information. For learning of `field_weights ` to occur, ``enable_learning`` must + learning (see `field_weights ` and `Learning ` for additional + information. For learning of `field_weights ` to occur, **enable_learning** must also be True, or it must be a list with at least one True entry. * **learning_rate** : specifies the rate at which `field_weights ` are learned if - ``learn_field_weight`` is True; see `EMComposition_Learning` for additional information. + **learn_field_weight** is True; see `Learning ` for additional information. .. _EMComposition_Structure: @@ -557,27 +602,29 @@ *Memory* ~~~~~~~~ -The `memory ` attribute contains a record of the entries in the EMComposition's memory. This is -in the form of a 2d array, in which rows (axis 0) are entries and columns (axis 1) are fields. The number of fields -is determined by the `memory_template ` argument of the EMComposition's constructor, -and the number of entries is determined by the `memory_capacity ` argument. +The `memory ` attribute contains a record of the entries in the EMComposition's memory. This +is in the form of a 3d array, in which rows (axis 0) are entries, columns (axis 1) are fields, and items (axis 2) are +the values of an entry in a given field. The number of fields is determined by the `memory_template +` argument of the EMComposition's constructor, and the number of entries is determined +by the `memory_capacity ` argument. .. _EMComposition_Memory_Storage: .. technical_note:: - The memories are actually stored in the `matrix ` parameters of the `MappingProjections` + The memories are actually stored in the `matrix ` parameters of the`MappingProjections` from the `combined_softmax_node ` to each of the `retrieved_nodes - `. Memories associated with each key are also stored (in inverted form) in the - `matrix ` parameters of the `MappingProjections` from the `query_input_nodes - ` to each of the corresponding `match_nodes `. - This is done so that the match of each query to the keys in memory for the corresponding field can be computed - simply by passing the input for each query through the Projection (which computes the dot product of the input with - the Projection's `matrix ` parameter) to the corresponding match_node; and, similarly, - retrieivals can be computed by passing the softmax distributions and weighting for each field computed - in the `combined_softmax_node ` through its Projection to each - `retrieved_node ` (which are inverted versions of the matrices of the - `MappingProjections` from the `query_input_nodes ` to each of the corresponding - `match_nodes `), to compute the dot product of the weighted softmax over - entries with the corresponding field of each entry that yields the retreieved value for each field. + `. Memories associated with each key are also stored (in inverted form) + in the `matrix ` parameters of the `MappingProjection ` + from the `query_input_nodes ` to each of the corresponding `match_nodes + `. This is done so that the match of each query to the keys in memory for the + corresponding field can be computed simply by passing the input for each query through the Projection (which + computes the dot product of the input with the Projection's `matrix ` parameter) to + the corresponding match_node; and, similarly, retrieivals can be computed by passing the softmax distributions + and weighting for each field computed in the `combined_softmax_node ` + through its Projection to each `retrieved_node ` (which are inverted versions + of the matrices of the `MappingProjections ` from the `query_input_nodes + ` to each of the corresponding `match_nodes `), + to compute the dot product of the weighted softmax over entries with the corresponding field of each entry + that yields the retreieved value for each field. .. _EMComposition_Output: @@ -638,13 +685,13 @@ * **Softmax normalize matches over fields**. The dot product for each key field is passed from the `match_node ` to the corresponding `softmax_node `, which applies - the `SoftMax` function to normalize the dot products for each key field. If a numerical value is specified for - `softmax_gain `, that is used as the gain (inverse temperature) for the SoftMax function; - if *ADAPTIVE* is specified, then the `SoftMax.adapt_gain` function is used to adaptively set the gain based on - the dot products in each field (see `Softmax_AdaptGain` for additional details); if *CONTROL* is specified, then the - dot products are monitored by a `ControlMechanism` that uses the `adapt_gain ` method of the - `SoftMax` function to modulate the `gain ` parameter of the Softmax function; if None is specified, - the default value of the `Softmax` function is used as the `gain ` parameter. + the `SoftMax` Function to normalize the dot products for each key field. If a numerical value is specified for + `softmax_gain `, that is used as the gain (inverse temperature) for the SoftMax Function; + if *ADAPTIVE* is specified, then the `SoftMax.adapt_gain` function is used to adaptively set the gain based on the + dot products in each field; if *CONTROL* is specified, then the dot products are monitored by a `ControlMechanism` + that uses the `adapt_gain ` method of the SoftMax Function to modulate its `gain ` + parameter; if None is specified, the default value of the `Softmax` Function is used as the `gain ` + parameter (see `Softmax_Gain ` for additional details). * **Weight fields**. If `field weights ` are specified, then the softmax normalized dot product for each key field is passed to the corresponding `field_weight_node ` @@ -659,7 +706,7 @@ ` to compute the retrieved value for each field. * **Decay memories**. If `memory_decay ` is True, then each of the memories is decayed - by the amount specified in `memory_decay `. + by the amount specified in `memory_decay_rate `. .. technical_note:: This is done by multiplying the `matrix ` parameter of the `MappingProjection` from @@ -701,15 +748,16 @@ *Training* ~~~~~~~~~~ -If `learn ` is called, ``enable_learning`` is True or a list with at least one True entry, -then errors will be computed for each of the `retrieved_nodes ` that is specified for -learning (see `EMComposition_Learning` for details about specification). These errors are derived either from -any errors backprpated to the EMComposition from an outer Composition in which it is `nested `, or -locally by the difference between the `retrieved_nodes ` and the `target_nodes -` that are created for each of the `retrieved_nodes ` that -do not project to an outer Composition. These errors are then backpropagated through the EMComposition to the -`query_input_nodes ` and `value_input_nodes `, and -on to any nodes that project to it from a composition in which the EMComposition is `nested `. +If `learn ` is called, `enable_learning ` is True or a list with at +least one True entry, then errors will be computed for each of the `retrieved_nodes ` +that is specified for learning (see `Learning ` for details about specification). These errors +are derived either from any errors backprpated to the EMComposition from an outer Composition in which it is `nested +`, or locally by the difference between the `retrieved_nodes ` +and the `target_nodes ` that are created for each of the `retrieved_nodes +` that do not project to an outer Composition. These errors are then backpropagated +through the EMComposition to the `query_input_nodes ` and `value_input_nodes +`, and on to any nodes that project to it from a composition in which the +EMComposition is `nested `. If `learn_field_weights ` is also True, then the `field_weights ` are modified to minimize the error passed to the EMComposition retrieved nodes, using the @@ -814,7 +862,7 @@ **List or array specification** Note that in the example above the two fields have the same length (5). This is always the case when a tuple is used, -as it generates a regular array. A list or numpy array can also be used to specify the ``memory_template`` argument. +as it generates a regular array. A list or numpy array can also be used to specify the **memory_template** argument. For example, the following is equivalent to the examples above:: >>> em = EMComposition(memory_template=[[0,0,0],[0,0,0]], memory_capacity=4) @@ -834,10 +882,10 @@ **Memory fill** Note that the examples above generate a warning about the use of zeros to initialize the memory. This is -because the default value for ``memory_fill`` is ``0``, and the default value for `normalize_memories +because the default value for **memory_fill** is ``0``, and the default value for `normalize_memories ` is True, which will cause a divide by zero warning when memories are normalized. While this doesn't crash, it will result in nan's that are likely to cauase problems elsewhere. -This can be avoided by specifying a non-zero value for ``memory_fill``, such as small number:: +This can be avoided by specifying a non-zero value for **memory_fill**, such as small number:: >>> em = EMComposition(memory_template=[[0,0,0],[0]], memory_capacity=4, memory_fill=.001) >>> em.memory @@ -846,7 +894,7 @@ [[array([0.001, 0.001, 0.001]), array([0.001])]], [[array([0.001, 0.001, 0.001]), array([0.001])]]] -Here, a single value was specified for ``memory_fill`` (which can be a float or int), that is used to fill all values. +Here, a single value was specified for **memory_fill** (which can be a float or int), that is used to fill all values. Random values can be assigned using a tuple to specify and internval between the first and second elements. For example, the following uses random values between 0 and 0.01 to fill all entries:: @@ -873,8 +921,8 @@ [[array([0., 0., 0.]), array([0.])]]] Note that the two entries must have exactly the same shapes. If they do not, an error is generated. -Also note that the remaining entries are filled with zeros (the default value for ``memory_fill``). -Here again, ``memory_fill`` can be used to specify a different value:: +Also note that the remaining entries are filled with zeros (the default value for **memory_fill**). +Here again, **memory_fill** can be used to specify a different value:: >>> em = EMComposition(memory_template=[[[7],[24,5]],[[100],[3,106]]], memory_capacity=4, memory_fill=(0,.01)) >>> em.memory @@ -891,9 +939,9 @@ By default, all of the fields specified are treated as keys except the last, which is treated as a "value" field -- that is, one that is not included in the matching process, but for which a value is retrieved along with the key fields. For example, in the `figure ` above, the first field specified was used as a key field, -and the last as a value field. However, the ``field_weights`` argument can be used to modify this, specifying which +and the last as a value field. However, the **field_weights** argument can be used to modify this, specifying which fields should be used as keys fields -- including the relative contribution that each makes to the matching process --- and which should be used as value fields. Non-zero elements in the ``field_weights`` argument designate key fields, +-- and which should be used as value fields. Non-zero elements in the **field_weights** argument designate key fields, and zeros specify value fields. For example, the following specifies that the first two fields should be used as keys while the last two should be used as values:: @@ -961,18 +1009,19 @@ from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.keywords import \ - (ADAPTIVE, AUTO, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, - GAIN, IDENTITY_MATRIX, MULTIPLICATIVE_PARAM, NAME, PARAMS, PRODUCT, PROJECTIONS, RANDOM, SIZE, VARIABLE) + (ADAPTIVE, ALL, AUTO, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, + GAIN, IDENTITY_MATRIX, MAX_INDICATOR, MULTIPLICATIVE_PARAM, NAME, PARAMS, PRODUCT, PROJECTIONS, + RANDOM, SIZE, VARIABLE) from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric_scalar from psyneulink.core.globals.context import ContextFlags from psyneulink.core.llvm import ExecutionMode -__all__ = [ - 'EMComposition' -] +__all__ = ['EMComposition', 'WEIGHTED', 'ARG_MAX'] STORAGE_PROB = 'storage_prob' +WEIGHTED = ALL +ARG_MAX = MAX_INDICATOR QUERY_AFFIX = ' [QUERY]' VALUE_AFFIX = ' [VALUE]' @@ -1054,7 +1103,7 @@ class EMComposition(AutodiffComposition): --------- memory_template : tuple, list, 2d or 3d array : default [[0],[0]] - specifies the shape of an items to be stored in the EMComposition's memory; + specifies the shape of an item to be stored in the EMComposition's memory; see `memory_template ` for details. memory_fill : scalar or tuple : default 0 @@ -1091,8 +1140,12 @@ class EMComposition(AutodiffComposition): see `Softmax normalize matches over fields ` for additional details. softmax_threshold : float : default .0001 - specifies the temperature used for softmax normalizing the dot products of keys and memories; assign ``None`` - to disable; see `Softmax normalize matches over fields ` for additional details. + specifies the threshold used to mask out small values in the softmax calculation; + see *mask_threshold* under `Thresholding and Adaptive Gain ` for details). + + softmax_choice : WEIGHTED, ARG_MAX : default WEIGHTED + specifies how the softmax over dot products of keys and memories is used for retrieval; see `Softmax + normalize matches over fields ` description of each option. storage_prob : float : default 1.0 specifies the probability that an item will be stored in `memory ` @@ -1116,7 +1169,7 @@ class EMComposition(AutodiffComposition): learning_rate : float : default .01 specifies rate at which `field_weights ` are learned - if ``learn_field_weights`` is True. + if `learn_field_weights ` is True. # 7/10/24 FIX: STILL TRUE? DOES IT PRECLUDE USE OF EMComposition as a nested Composition?? .. technical_note:: @@ -1139,9 +1192,9 @@ class EMComposition(AutodiffComposition): Attributes ---------- - memory : list[list[list[float]]] - list of entries in memory, in which each row (outer dimensions) is an entry and each item in the row is the - value for the corresponding field; see `EMComposition_Memory` for additional details. + memory : ndarray + 3d array of entries in memory, in which each row (axis 0) is an entry, each column (axis 1) is a field, and + each item (axis 2) is the value for the corresponding field; see `EMComposition_Memory` for additional details. .. note:: This is a read-only attribute; memories can be added to the EMComposition's memory either by @@ -1185,8 +1238,12 @@ class EMComposition(AutodiffComposition): over fields ` for additional details. softmax_threshold : float - determines the threshold used to mask out small values in the softmax calculation; see `_SoftMax_AdaptGain` - for details). + determines the threshold used to mask out small values in the softmax calculation; + see *mask_threshold* under `Thresholding and Adaptive Gain ` for details). + + softmax_choice : WEIGHTED or ARG_MAX + determines how the softmax over dot products of keys and memories is used for retrieval; see `Softmax + normalize matches over fields ` description of each option. storage_prob : float determines the probability that an item will be stored in `memory ` @@ -1213,7 +1270,7 @@ class EMComposition(AutodiffComposition): learning_rate : float determines whether the rate at which `field_weights ` are learned - if `learn_field_weights` is True; see `EMComposition_Learning>` for additional details. + if `learn_field_weights` is True; see `Learning ` for additional details. .. _EMComposition_Nodes: @@ -1243,7 +1300,7 @@ class EMComposition(AutodiffComposition): concatenate_keys_node : TransferMechanism `TransferMechanism` that concatenates the inputs to `query_input_nodes ` into a single vector used for the matching processing if `concatenate keys ` is True. - This is not created if the ``concatenate_keys`` argument to the EMComposition's constructor is False or is + This is not created if the **concatenate_keys** argument to the EMComposition's constructor is False or is overridden (see `concatenate_keys `), or there is only one query_input_node. match_nodes : list[TransferMechanism] @@ -1412,6 +1469,11 @@ class Parameters(AutodiffComposition.Parameters): :default value: 1.0 :type: ``float, ADAPTIVE or CONTROL`` + softmax_choice + see `softmax_choice ` + :default value: WEIGHTED + :type: ``keyword`` + softmax_threshold see `softmax_threshold ` :default value: .001 @@ -1433,6 +1495,7 @@ class Parameters(AutodiffComposition.Parameters): normalize_memories = Parameter(True) softmax_gain = Parameter(1.0, modulable=True) softmax_threshold = Parameter(.001, modulable=True, specify_none=True) + softmax_choice = Parameter(WEIGHTED, modulable=False, specify_none=True) storage_prob = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) memory_decay_rate = Parameter(AUTO, modulable=True) enable_learning = Parameter(True, structural=True) @@ -1510,6 +1573,7 @@ def __init__(self, normalize_memories:bool=True, softmax_gain:Union[float, ADAPTIVE, CONTROL]=1.0, softmax_threshold:Optional[float]=.001, + softmax_choice:Optional[Union[WEIGHTED, ARG_MAX]]=WEIGHTED, storage_prob:float=1.0, memory_decay_rate:Union[float,AUTO]=AUTO, enable_learning:Union[bool,list]=True, @@ -1545,7 +1609,6 @@ def __init__(self, if softmax_gain == CONTROL: self.parameters.softmax_gain.modulable = False - # Instantiate Composition ------------------------------------------------------------------------- super().__init__(name=name, @@ -1556,6 +1619,7 @@ def __init__(self, concatenate_keys = concatenate_keys, softmax_gain = softmax_gain, softmax_threshold = softmax_threshold, + softmax_choice = softmax_choice, storage_prob = storage_prob, memory_decay_rate = memory_decay_rate, normalize_memories = normalize_memories, @@ -1567,6 +1631,8 @@ def __init__(self, **kwargs ) + self._validate_softmax_choice(softmax_choice, enable_learning) + self._construct_pathways(self.memory_template, self.memory_capacity, self.field_weights, @@ -1574,6 +1640,7 @@ def __init__(self, self.normalize_memories, self.softmax_gain, self.softmax_threshold, + self.softmax_choice, self.storage_prob, self.memory_decay_rate, self.use_storage_node, @@ -1656,7 +1723,7 @@ def __init__(self, # ***************************************************************************************************************** # *********************************** Memory Construction Methods *********************************************** # ***************************************************************************************************************** - + #region def _validate_memory_specs(self, memory_template, memory_capacity, memory_fill, field_weights, field_names, name): """Validate the memory_template, field_weights, and field_names arguments """ @@ -1892,10 +1959,12 @@ def _parse_memory_shape(self, memory_template): num_fields = len(memory_template) if single_entry else len(memory_template[0]) return num_entries, num_fields + #endregion + # ***************************************************************************************************************** # ****************************** Nodes and Pathway Construction Methods ***************************************** # ***************************************************************************************************************** - + #region def _construct_pathways(self, memory_template, memory_capacity, @@ -1904,6 +1973,7 @@ def _construct_pathways(self, normalize_memories, softmax_gain, softmax_threshold, + softmax_choice, storage_prob, memory_decay_rate, use_storage_node, @@ -1931,7 +2001,8 @@ def _construct_pathways(self, self.softmax_nodes = self._construct_softmax_nodes(memory_capacity, field_weights, softmax_gain, - softmax_threshold) + softmax_threshold, + softmax_choice) self.field_weight_nodes = self._construct_field_weight_nodes(field_weights, concatenate_keys, use_gating_for_weighting) @@ -2131,7 +2202,13 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_k return match_nodes - def _construct_softmax_nodes(self, memory_capacity, field_weights, softmax_gain, softmax_threshold)->list: + def _validate_softmax_choice(self, softmax_choice, enable_learning): + if softmax_choice == ARG_MAX and enable_learning: + raise EMCompositionError(f"The ARG_MAX option for the 'softmax_choice' arg of '{self.name}' " + f"can not be used when 'enable_learning' is set to True; " + f"use WEIGHTED or set 'enable_learning' to False.") + def _construct_softmax_nodes(self, memory_capacity, field_weights, + softmax_gain, softmax_threshold, softmax_choice)->list: """Create nodes that, for each key field, compute the softmax over the similarities between the input and the memories in the corresponding match_node. """ @@ -2151,6 +2228,7 @@ def _construct_softmax_nodes(self, memory_capacity, field_weights, softmax_gain, name=f'MATCH to SOFTMAX for {self.key_names[i]}')}, function=SoftMax(gain=softmax_gain, mask_threshold=softmax_threshold, + output=softmax_choice, adapt_entropy_weighting=.95), name='SOFTMAX' if len(self.match_nodes) == 1 else f'{self.key_names[i]} [SOFTMAX]') @@ -2275,8 +2353,11 @@ def _construct_retrieved_nodes(self, memory_template)->list: retrieved_nodes = self.retrieved_key_nodes + self.retrieved_value_nodes # Return nodes in order sorted by self.field_names + # IMPLEMENTATION NOTE: + # "in" is used below instead of "==" in case more than one EMComposition is created, + # in which case retrieved_nodes will have "-" appended to their name return [node for name in self.field_names for node in retrieved_nodes - if node in retrieved_nodes if (name + RETRIEVED_AFFIX) == node.name] + if node in retrieved_nodes if (name + RETRIEVED_AFFIX) in node.name] def _construct_storage_node(self, memory_template, @@ -2320,7 +2401,7 @@ def _construct_storage_node(self, for i in range(self.num_fields)], fields=[self.input_nodes[i] for i in range(self.num_fields)], field_types=[0 if weight == 0 else 1 for weight in field_weights], - concatenation_node=self.concatenate_keys_node, + concatenation_node=concatenate_keys_node, memory_matrix=memory_template, learning_signals=learning_signals, storage_prob=storage_prob, @@ -2342,11 +2423,12 @@ def _set_learning_attributes(self): else: projection.learnable = False + #endregion # ***************************************************************************************************************** # *********************************** Execution Methods ********************************************************** # ***************************************************************************************************************** - + # region def execute(self, inputs=None, context=None, @@ -2473,3 +2555,5 @@ def infer_backpropagation_learning_pathways(self, execution_mode, context=None): def do_gradient_optimization(self, retain_in_pnl_options, context, optimization_num=None): # 7/10/24 - MAKE THIS CONTEXT DEPENDENT: CALL super() IF BEING EXECUTED ON ITS OWN? pass + + #endregion diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index ecfc2f2ef19..73807aa442f 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -7,7 +7,7 @@ import psyneulink as pnl -from psyneulink.core.globals.keywords import AUTO, CONTROL +from psyneulink.core.globals.keywords import AUTO, CONTROL, ALL, MAX_VAL, MAX_INDICATOR, PROB from psyneulink.core.components.mechanisms.mechanism import Mechanism from psyneulink.library.compositions.emcomposition import EMComposition, EMCompositionError @@ -232,6 +232,23 @@ def test_memory_fill(start, memory_fill): elif repeat and repeat < memory_capacity: # Multi-entry specification and repeat = number entries; remainder test_memory_fill(start=repeat, memory_fill=memory_fill) + def test_softmax_choice(self): + for softmax_choice in [pnl.WEIGHTED, pnl.ARG_MAX]: + em = EMComposition(memory_template=[[[1,.1,.1]], [[.1,1,.1]], [[.1,.1,1]]], + softmax_choice=softmax_choice, + enable_learning=False) + result = em.run(inputs={em.query_input_nodes[0]:[[0,1,0]]}) + if softmax_choice == pnl.WEIGHTED: + np.testing.assert_allclose(result, [[0.21330295, 0.77339411, 0.21330295]]) + if softmax_choice == pnl.ARG_MAX: + np.testing.assert_allclose(result, [[.1, 1, .1]]) + + with pytest.raises(pnl.ComponentError) as error_text: + em = EMComposition(memory_template=[[[1,.1,.1]], [[.1,1,.1]], [[.1,.1,1]]], + softmax_choice=pnl.ARG_MAX) + assert ("The ARG_MAX option for the 'softmax_choice' arg of 'EM_Composition-2' can not be used " + "when 'enable_learning' is set to True; use WEIGHTED or set 'enable_learning' to False." + in str(error_text.value)) @pytest.mark.pytorch class TestExecution: diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/test_parameterestimationcomposition.py index 12c39a64a3a..d6938818461 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/test_parameterestimationcomposition.py @@ -187,7 +187,7 @@ def test_parameter_optimization_ddm(func_mode, opt_method, expected_result): def reward_rate(sim_data): """ Objective function for PEC to optimize. This function takes in the simulation data, - a 3D array of shape (num_trials, num_estimates, num_outcome_vars), and returns a + a 3d array of shape (num_trials, num_estimates, num_outcome_vars), and returns a scalar value that is the reward rate. """ return np.mean(sim_data[:, :, 0][:] / sim_data[:, :, 1][:]) diff --git a/tests/mechanisms/test_control_mechanism.py b/tests/mechanisms/test_control_mechanism.py index ef622863db3..b67e32cd9dd 100644 --- a/tests/mechanisms/test_control_mechanism.py +++ b/tests/mechanisms/test_control_mechanism.py @@ -88,7 +88,7 @@ def test_lc_control_mech_basic(self, benchmark, mech_mode): expected = [[3.001397762387422]] # The difference in result shape is caused by shape mismatch in output port values. # The default shape is 1D, giving 2D overall result in compiled mode. - # The true results are 2D per port, giving 3D overall result in Python mode. + # The true results are 2D per port, giving 3d overall result in Python mode. if mech_mode == 'Python': expected = [[ex] for ex in expected] np.testing.assert_allclose(val, expected) diff --git a/tests/mechanisms/test_integrator_mechanism.py b/tests/mechanisms/test_integrator_mechanism.py index 22c950bfdea..934f6b4db4b 100644 --- a/tests/mechanisms/test_integrator_mechanism.py +++ b/tests/mechanisms/test_integrator_mechanism.py @@ -461,7 +461,7 @@ def test_FitzHughNagumo_simple_scalar(self, benchmark, mech_mode): expected = [[0.10501801629915011], [0.10501801629915011], [0.10501801629915011]] # The difference in results is caused by a shape mismatch; # default output port values are 1D, giving 2D results in compiled mode - # in reality Python returns 2D value per output port for a 3D result + # in reality Python returns 2D value per output port for a 3d result if mech_mode == 'Python': expected = [[e] for e in expected] From 8b3250f4e1e99c4d16fe8b1f0c5501cd8e9e564d Mon Sep 17 00:00:00 2001 From: jdcpni Date: Tue, 8 Oct 2024 07:58:46 -0400 Subject: [PATCH 325/410] Feat/emcomposition/softmax choice prob (#3060) * emcomposition.py - add PROBABILISTIC option for softmax_choice option - move validation of softmax_choice to learn() - add warning at construction if enable_learning==True and softmax_choice is ARG_MAX or PROBABILISTIC --- .../nonstateful/transferfunctions.py | 5 +- .../library/compositions/emcomposition.py | 56 +++++++++++-------- tests/composition/test_emcomposition.py | 18 +++--- 3 files changed, 45 insertions(+), 34 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index dc035390ce7..1eac3390527 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -3188,7 +3188,7 @@ def _validate_adapt_entropy_weighting(self, adapt_entropy_weighting): return f'must be a scalar greater than 0' def _validate_output(self, output): - options = {ALL, MAX_VAL, MAX_INDICATOR, PROB} + options = {ALL, MAX_VAL, MAX_INDICATOR, PROB, PROB_INDICATOR} if output in options: return None else: @@ -3277,7 +3277,6 @@ def apply_softmax(self, input_value, gain, mask_threshold, output_type): sm = v / np.sum(v, axis=0) # Generate one-hot encoding based on selected output_type - if output_type in {MAX_VAL, MAX_INDICATOR}: return self.one_hot_function(sm) elif output_type in {PROB, PROB_INDICATOR}: @@ -3316,8 +3315,8 @@ def _function(self, if isinstance(gain, str) and gain == ADAPTIVE: gain = self.adapt_gain(variable, context) per_item = self._get_current_parameter_value(PER_ITEM, context) - # Compute softmax and assign to sm + # Compute softmax and assign to sm if per_item and len(np.shape(variable)) > 1: output = [] for item in variable: diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index 6a264a55b3f..880ea4e80f6 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -55,7 +55,7 @@ # - define "key weights" explicitly as field_weights for all non-zero values # - make it clear that full size of memory is initialized (rather than "filling up" w/ use) # - write examples for run() -# - FIX: ADD NOISE (AND/OR SOFTMAX PROBABILISTIC RETRIEVAL MODE) +# - FIX: ADD NOISE # - FIX: ?ADD add_memory() METHOD FOR STORING W/O RETRIEVAL, OR JUST ADD retrieval_prob AS modulable Parameter # - FIX: CONFIDENCE COMPUTATION (USING SIGMOID ON DOT PRODUCTS) AND REPORT THAT (EVEN ON FIRST CALL) # - FIX: ALLOW SOFTMAX SPEC TO BE A DICT WITH PARAMETERS FOR _get_softmax_gain() FUNCTION @@ -322,7 +322,7 @@ a weighted average of the retrieved values across all fields, which is returned as the `result ` of the EMComposition's `execution ` (an EMComposition can also be configured to return the entry with the highest dot product weighted by field, however then it is not compatible with learning; -see `softmax_choice `). +see `softmax_choice `). COMMENT: TBD DISTANCE ATTRIBUTES: @@ -540,17 +540,17 @@ * *ARG_MAX*: entry with the largest dot product. + * *PROBABISTIC*: probabilistically chosen entry based on softmax-transformed distribution of dot products. + .. warning:: - Use of the *ARG_MAX* option is not compatible with learning, as it implements a discrete choice and thus is not - differentiable; use of this with `enable_learning ` set to ``True`` will generate - an error. + Use of the *ARG_MAX* and *PROBABILISTIC* options is not compatible with learning, as these implement a discrete + choice and thus are not differentiable. Constructing an EMComposition with **softmax_choice** set to either of + these options and **enable_learning** set to True will generate a warning, and calling the EMComposition's + `learn ` method will generate an error; it must be changed to *WEIGHTED* to execute learning. .. technical_note:: - The *WEIGHTED* option is passed as *ALL* to the **output** argument of the `SoftMax` Function, and - *ARG_MAX* is passed as *MAX_INDICATOR*; the *MAX_VAL* and *PROB* arguments are not currently. - COMMENT: - * *PROB*: probabilistically-chosen entry, based on the softmax transformation of thee dot products. - COMMENT + The *WEIGHTED* option is passed as *ALL* to the **output** argument of the `SoftMax` Function, *ARG_MAX* is + passed as *MAX_INDICATOR*; *PROBALISTIC* is passed as *PROB_INDICATOR*; and *MAX_VAL* is not currently supported. .. _EMComposition_Learning: @@ -1008,20 +1008,22 @@ from psyneulink.core.components.mechanisms.modulatory.control.gating.gatingmechanism import GatingMechanism from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection from psyneulink.core.globals.parameters import Parameter, check_user_specified +from psyneulink.core.globals.context import handle_external_context from psyneulink.core.globals.keywords import \ - (ADAPTIVE, ALL, AUTO, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, - GAIN, IDENTITY_MATRIX, MAX_INDICATOR, MULTIPLICATIVE_PARAM, NAME, PARAMS, PRODUCT, PROJECTIONS, + (ADAPTIVE, ALL, AUTO, CONTEXT, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, + GAIN, IDENTITY_MATRIX, MAX_INDICATOR, MULTIPLICATIVE_PARAM, NAME, PARAMS, PROB_INDICATOR, PRODUCT, PROJECTIONS, RANDOM, SIZE, VARIABLE) from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric_scalar from psyneulink.core.globals.context import ContextFlags from psyneulink.core.llvm import ExecutionMode -__all__ = ['EMComposition', 'WEIGHTED', 'ARG_MAX'] +__all__ = ['EMComposition', 'WEIGHTED', 'ARG_MAX', 'PROBABILISTIC'] STORAGE_PROB = 'storage_prob' WEIGHTED = ALL ARG_MAX = MAX_INDICATOR +PROBABILISTIC = PROB_INDICATOR QUERY_AFFIX = ' [QUERY]' VALUE_AFFIX = ' [VALUE]' @@ -1143,9 +1145,9 @@ class EMComposition(AutodiffComposition): specifies the threshold used to mask out small values in the softmax calculation; see *mask_threshold* under `Thresholding and Adaptive Gain ` for details). - softmax_choice : WEIGHTED, ARG_MAX : default WEIGHTED - specifies how the softmax over dot products of keys and memories is used for retrieval; see `Softmax - normalize matches over fields ` description of each option. + softmax_choice : WEIGHTED, ARG_MAX, PROBABILISTIC : default WEIGHTED + specifies how the softmax over dot products of keys and memories is used for retrieval; + see `softmax_choice ` for a description of each option. storage_prob : float : default 1.0 specifies the probability that an item will be stored in `memory ` @@ -1241,9 +1243,9 @@ class EMComposition(AutodiffComposition): determines the threshold used to mask out small values in the softmax calculation; see *mask_threshold* under `Thresholding and Adaptive Gain ` for details). - softmax_choice : WEIGHTED or ARG_MAX - determines how the softmax over dot products of keys and memories is used for retrieval; see `Softmax - normalize matches over fields ` description of each option. + softmax_choice : WEIGHTED, ARG_MAX or PROBABILISTIC + determines how the softmax over dot products of keys and memories is used for retrieval; + see `softmax_choice ` for a description of each option. storage_prob : float determines the probability that an item will be stored in `memory ` @@ -1573,7 +1575,7 @@ def __init__(self, normalize_memories:bool=True, softmax_gain:Union[float, ADAPTIVE, CONTROL]=1.0, softmax_threshold:Optional[float]=.001, - softmax_choice:Optional[Union[WEIGHTED, ARG_MAX]]=WEIGHTED, + softmax_choice:Optional[Union[WEIGHTED, ARG_MAX, PROBABILISTIC]]=WEIGHTED, storage_prob:float=1.0, memory_decay_rate:Union[float,AUTO]=AUTO, enable_learning:Union[bool,list]=True, @@ -2203,10 +2205,11 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_k return match_nodes def _validate_softmax_choice(self, softmax_choice, enable_learning): - if softmax_choice == ARG_MAX and enable_learning: - raise EMCompositionError(f"The ARG_MAX option for the 'softmax_choice' arg of '{self.name}' " - f"can not be used when 'enable_learning' is set to True; " - f"use WEIGHTED or set 'enable_learning' to False.") + if softmax_choice in {ARG_MAX, PROBABILISTIC} and enable_learning: + warnings.warn(f"The 'softmax_choice' arg of '{self.name}' is set to {softmax_choice} with " + f"'enable_learning' set to True; this will generate an error if its 'learn' " + f"method is called; set 'softmax_choice' to WEIGHTED to use learning.") + def _construct_softmax_nodes(self, memory_capacity, field_weights, softmax_gain, softmax_threshold, softmax_choice)->list: """Create nodes that, for each key field, compute the softmax over the similarities between the input and the @@ -2516,7 +2519,12 @@ def _encode_memory(self, context=None): self.retrieved_nodes[i].path_afferents[0].parameters.matrix.set(field_memories, context) # 7/10/24 - FIX: WHY BOTHER WITH OVERRIDE IF NOTHING IS DONE: + @handle_external_context() def learn(self, *args, **kwargs)->list: + arg = self.parameters.softmax_choice.get(kwargs[CONTEXT]) + if arg in {ARG_MAX, PROBABILISTIC}: + raise EMCompositionError(f"The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg " + f"of '{self.name}' cannot be used during learning; change to WEIGHTED.") return super().learn(*args, **kwargs) def _get_execution_mode(self, execution_mode): diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index 73807aa442f..d37a8455b14 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -233,7 +233,7 @@ def test_memory_fill(start, memory_fill): test_memory_fill(start=repeat, memory_fill=memory_fill) def test_softmax_choice(self): - for softmax_choice in [pnl.WEIGHTED, pnl.ARG_MAX]: + for softmax_choice in [pnl.WEIGHTED, pnl.ARG_MAX, pnl.PROBABILISTIC]: em = EMComposition(memory_template=[[[1,.1,.1]], [[.1,1,.1]], [[.1,.1,1]]], softmax_choice=softmax_choice, enable_learning=False) @@ -242,13 +242,17 @@ def test_softmax_choice(self): np.testing.assert_allclose(result, [[0.21330295, 0.77339411, 0.21330295]]) if softmax_choice == pnl.ARG_MAX: np.testing.assert_allclose(result, [[.1, 1, .1]]) + if softmax_choice == pnl.PROBABILISTIC: # NOTE: actual stochasticity not tested here + np.testing.assert_allclose(result, [[.1, 1, .1]]) + + em = EMComposition(memory_template=[[[1,.1,.1]], [[.1,1,.1]], [[.1,.1,1]]]) + for softmax_choice in [pnl.ARG_MAX, pnl.PROBABILISTIC]: + with pytest.raises(pnl.ComponentError) as error_text: + em.parameters.softmax_choice.set(softmax_choice) + em.learn() + assert (f"The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg " + f"of '{em.name}' cannot be used during learning; change to WEIGHTED." in str(error_text.value)) - with pytest.raises(pnl.ComponentError) as error_text: - em = EMComposition(memory_template=[[[1,.1,.1]], [[.1,1,.1]], [[.1,.1,1]]], - softmax_choice=pnl.ARG_MAX) - assert ("The ARG_MAX option for the 'softmax_choice' arg of 'EM_Composition-2' can not be used " - "when 'enable_learning' is set to True; use WEIGHTED or set 'enable_learning' to False." - in str(error_text.value)) @pytest.mark.pytorch class TestExecution: From 253f8ca17c3c21d6f8e65dc199a1dae53615d963 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Oct 2024 11:49:14 -0400 Subject: [PATCH 326/410] requirements: update pytest-pycodestyle requirement (#3061) Updates the requirements on [pytest-pycodestyle](https://github.com/henry0312/pytest-pycodestyle) to permit the latest version. - [Release notes](https://github.com/henry0312/pytest-pycodestyle/releases) - [Commits](https://github.com/henry0312/pytest-pycodestyle/compare/v2.3.0...v2.4.0) --- updated-dependencies: - dependency-name: pytest-pycodestyle dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index f9b148b20d2..1fd284fccfc 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -6,6 +6,6 @@ pytest-cov<5.0.1 pytest-forked<1.7.0 pytest-helpers-namespace<2021.12.30 pytest-profiling<1.7.1 -pytest-pycodestyle<2.4.0 +pytest-pycodestyle<2.5.0 pytest-pydocstyle<2.4.0 pytest-xdist>=3.2.0, <3.7.0 From 9534a9d9900aa22756f7dbb6d2b177c90bfbcd20 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 10 Oct 2024 19:58:02 -0400 Subject: [PATCH 327/410] requirements: update pytest-pydocstyle requirement from <2.4.0 to <2.5.0 (#3064) Updates the requirements on [pytest-pydocstyle](https://github.com/henry0312/pytest-pydocstyle) to permit the latest version. - [Release notes](https://github.com/henry0312/pytest-pydocstyle/releases) - [Commits](https://github.com/henry0312/pytest-pydocstyle/compare/v2.3.0...v2.4.0) --- updated-dependencies: - dependency-name: pytest-pydocstyle dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index 1fd284fccfc..d474ca88134 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -7,5 +7,5 @@ pytest-forked<1.7.0 pytest-helpers-namespace<2021.12.30 pytest-profiling<1.7.1 pytest-pycodestyle<2.5.0 -pytest-pydocstyle<2.4.0 +pytest-pydocstyle<2.5.0 pytest-xdist>=3.2.0, <3.7.0 From 962807bbf58833d756b46d35c1d54f0e4b45db41 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 11 Oct 2024 10:07:22 -0400 Subject: [PATCH 328/410] treewide: Use ndarray.item() to convert single element numpy arrays to scalars (#3063) Reduces number of warnings from ~84000 -> ~11000 (~87%) Signed-off-by: Jan Vesely --- psyneulink/core/components/functions/function.py | 4 ++-- .../functions/nonstateful/combinationfunctions.py | 2 +- .../functions/nonstateful/distributionfunctions.py | 10 +++++----- .../components/functions/stateful/memoryfunctions.py | 12 ++++++------ .../ports/modulatorysignals/controlsignal.py | 2 +- psyneulink/core/compositions/report.py | 4 ++-- psyneulink/core/scheduling/condition.py | 2 +- .../mechanisms/processing/integrator/ddm.py | 2 +- psyneulink/library/compositions/regressioncfa.py | 2 +- 9 files changed, 20 insertions(+), 20 deletions(-) diff --git a/psyneulink/core/components/functions/function.py b/psyneulink/core/components/functions/function.py index d9be6640493..a63a5a23976 100644 --- a/psyneulink/core/components/functions/function.py +++ b/psyneulink/core/components/functions/function.py @@ -375,7 +375,7 @@ def _random_state_getter(self, owning_component, context, modulated=False): # 'has_modulation' indicates that seed has an active modulatory projection # 'modulated' indicates that the modulated value is requested if has_modulation and modulated: - seed_value = [int(owning_component._get_current_parameter_value(seed_param, context))] + seed_value = [int(owning_component._get_current_parameter_value(seed_param, context).item())] else: seed_value = [int(seed_param._get(context=context))] @@ -832,7 +832,7 @@ def convert_output_type(self, value, output_type=None): # Note: if 2D or 1D array has more than two items, generate exception elif output_type is FunctionOutputType.NP_0D_ARRAY: if object_has_single_value(value): - value = np.array(float(value)) + value = np.asfarray(value) else: raise FunctionError(f"Can't convert value ({value}) with more than a single number to a raw number.") diff --git a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py index e4011db2eab..1831048c769 100644 --- a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/combinationfunctions.py @@ -2191,7 +2191,7 @@ def _function(self, delta values : 1d np.array """ - gamma = self._get_current_parameter_value(GAMMA, context) + gamma = self._get_current_parameter_value(GAMMA, context).item() sample = variable[0] reward = variable[1] delta = np.zeros(sample.shape) diff --git a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py index b5126da2946..838f340dc93 100644 --- a/psyneulink/core/components/functions/nonstateful/distributionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/distributionfunctions.py @@ -1225,13 +1225,13 @@ def _function(self, """ - attentional_drift_rate = float(self._get_current_parameter_value(DRIFT_RATE, context)) - stimulus_drift_rate = float(variable) + attentional_drift_rate = self._get_current_parameter_value(DRIFT_RATE, context).item() + stimulus_drift_rate = variable.item() drift_rate = attentional_drift_rate * stimulus_drift_rate threshold = self._get_current_parameter_value(THRESHOLD, context) - starting_value = float(self._get_current_parameter_value(STARTING_VALUE, context)) - noise = float(self._get_current_parameter_value(NOISE, context)) - non_decision_time = float(self._get_current_parameter_value(NON_DECISION_TIME, context)) + starting_value = self._get_current_parameter_value(STARTING_VALUE, context).item() + noise = self._get_current_parameter_value(NOISE, context).item() + non_decision_time = self._get_current_parameter_value(NON_DECISION_TIME, context).item() # drift_rate = float(self.drift_rate) * float(variable) # threshold = float(self.threshold) diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index 9ca229f552e..38a07307527 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -396,10 +396,10 @@ def _distance_field_weights_setter(value, owning_component=None, context=None): # NOTE: need the following to accommodate various forms of specification (single value, None's, etc) # that are resolved elsewhere # FIX: STANDARDIZE FORMAT FOR FIELDWEIGHTS HERE (AS LIST OF INTS) AND GET RID OF THE FOLLOWING - test_val = np.array([int(val) if val else 0 for val in value]) - test_val = np.full(len(variable),test_val) if len(test_val) == 1 else test_val - test_curr_field_weights = np.array([int(val) if val else 0 for val in current_field_weights]) - test_curr_field_weights = (np.full(len(variable),test_curr_field_weights) if len(variable) == 1 + test_val = np.array([int(np.array(val).item()) if val else 0 for val in value]) + test_val = np.full(len(variable), test_val) if len(test_val) == 1 else test_val + test_curr_field_weights = np.array([int(np.array(val).item()) if val else 0 for val in current_field_weights]) + test_curr_field_weights = (np.full(len(variable), test_curr_field_weights) if len(variable) == 1 else test_curr_field_weights) if np.all(test_curr_field_weights == test_val) and not owning_component.is_initializing: pass @@ -2777,8 +2777,8 @@ def get_memory(self, query_key:Union[list, np.ndarray], context=None): indices_of_selected_items = np.flatnonzero(selection_array) # Single key identified - if len(indices_of_selected_items)==1: - index_of_selected_item = int(np.flatnonzero(selection_array)) + if len(indices_of_selected_items) == 1: + index_of_selected_item = int(np.flatnonzero(selection_array).item()) # More than one key identified else: selected_keys = _memory[KEYS] diff --git a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py index 6a6513a9901..423f1e5662a 100644 --- a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py @@ -1112,6 +1112,6 @@ def compute_costs(self, intensity, context=None): all_costs = [[intensity_cost, adjustment_cost, duration_cost]] # Combine the costs. Convert to a float because reRedcu - combined_cost = float(self.combine_costs_function(all_costs, context=context)) + combined_cost = float(self.combine_costs_function(all_costs, context=context).item()) return max(0.0, combined_cost) diff --git a/psyneulink/core/compositions/report.py b/psyneulink/core/compositions/report.py index 25b7784b2ca..aadb87b3825 100644 --- a/psyneulink/core/compositions/report.py +++ b/psyneulink/core/compositions/report.py @@ -1380,9 +1380,9 @@ def node_execution_report(self, # FIX: kmantel: previous version would fail on anything but iterables of things that can be cast to floats # if you want more specific output, you can add conditional tests here try: - input_string = [float("{:0.3}".format(float(i))) for i in input_val].__str__().strip("[]") + input_string = ", ".join([np.format_float_positional(i.item(), precision=2, trim='0') for i in input_val]) # input_string = re.sub(r'[\[,\],\n]', '', str([float("{:0.3}".format(float(i))) for i in input_val])) - except TypeError: + except ValueError: input_string = node.parameters.variable.get(context) input_report = f"input: {input_string}" diff --git a/psyneulink/core/scheduling/condition.py b/psyneulink/core/scheduling/condition.py index bc617f5fa01..34e051157bd 100644 --- a/psyneulink/core/scheduling/condition.py +++ b/psyneulink/core/scheduling/condition.py @@ -324,7 +324,7 @@ def func(threshold, comparator, indices, atol, rtol, execution_id): for i in indices: param_value = param_value[i] - param_value = float(param_value) + param_value = float(np.array(param_value).item()) if comparator == '==': return np.isclose(param_value, threshold, atol=atol, rtol=rtol) diff --git a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py index de0f9e40642..b210657a8d8 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py @@ -1110,7 +1110,7 @@ def _execute( format(self.function.name, self.name)) # Convert ER to decision variable: - threshold = float(self.function._get_current_parameter_value(THRESHOLD, context)) + threshold = float(self.function._get_current_parameter_value(THRESHOLD, context).item()) random_state = self._get_current_parameter_value(self.parameters.random_state, context) if random_state.uniform() < return_value[self.PROBABILITY_LOWER_THRESHOLD_INDEX]: return_value[self.DECISION_VARIABLE_INDEX] = np.atleast_1d(-1 * threshold) diff --git a/psyneulink/library/compositions/regressioncfa.py b/psyneulink/library/compositions/regressioncfa.py index be8cbd4dc63..345ca573940 100644 --- a/psyneulink/library/compositions/regressioncfa.py +++ b/psyneulink/library/compositions/regressioncfa.py @@ -638,7 +638,7 @@ def compute_terms(self, control_allocation, context=None): # Compute value of each control_signal from its variable c = np.zeros((len(control_allocation), )) for i, var in enumerate(control_allocation): - c[i] = self.control_signal_functions[i](var, context=context) + c[i] = self.control_signal_functions[i](var, context=context).item() computed_terms[PV.C] = c # Compute costs for new control_signal values From 6e6a78d5348c4fd4f25c28044fac48fc78d049c4 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 11 Oct 2024 10:59:17 -0400 Subject: [PATCH 329/410] llvm/OneHot: Codestyle cleanup Signed-off-by: Jan Vesely --- .../nonstateful/selectionfunctions.py | 72 +++++++++++-------- 1 file changed, 43 insertions(+), 29 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index 164801fa377..8209ef60bf8 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -259,91 +259,105 @@ def _validate_params(self, request_set, target_set=None, context=None): format(MODE, self.__class__.__name__, Function.__name__, PROB, prob_dist)) def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset): - idx_ptr = builder.alloca(ctx.int32_ty) - builder.store(ctx.int32_ty(0), idx_ptr) + best_idx_ptr = builder.alloca(ctx.int32_ty) + builder.store(best_idx_ptr.type.pointee(0), best_idx_ptr) if self.mode in {PROB, PROB_INDICATOR}: - dice_ptr = builder.alloca(ctx.float_ty) + sum_ptr = builder.alloca(ctx.float_ty) + builder.store(sum_ptr.type.pointee(-0.0), sum_ptr) + + random_draw_ptr = builder.alloca(ctx.float_ty) rand_state_ptr = ctx.get_random_state_ptr(builder, self, state, params) rng_f = ctx.get_uniform_dist_function_by_state(rand_state_ptr) - builder.call(rng_f, [rand_state_ptr, dice_ptr]) - dice = builder.load(dice_ptr) - sum_ptr = builder.alloca(ctx.float_ty) - builder.store(ctx.float_ty(-0.0), sum_ptr) + builder.call(rng_f, [rand_state_ptr, random_draw_ptr]) + random_draw = builder.load(random_draw_ptr) + prob_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(1)]) arg_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) - with pnlvm.helpers.array_ptr_loop(builder, arg_in, "search") as (b1, index): - idx = b1.load(idx_ptr) - prev_ptr = b1.gep(arg_in, [ctx.int32_ty(0), idx]) - current_ptr = b1.gep(arg_in, [ctx.int32_ty(0), index]) - prev = b1.load(prev_ptr) + with pnlvm.helpers.array_ptr_loop(builder, arg_in, "search") as (b1, idx): + best_idx = b1.load(best_idx_ptr) + best_ptr = b1.gep(arg_in, [ctx.int32_ty(0), best_idx]) + + current_ptr = b1.gep(arg_in, [ctx.int32_ty(0), idx]) current = b1.load(current_ptr) - prev_res_ptr = b1.gep(arg_out, [ctx.int32_ty(0), idx]) - cur_res_ptr = b1.gep(arg_out, [ctx.int32_ty(0), index]) if self.mode not in {PROB, PROB_INDICATOR}: fabs = ctx.get_builtin("fabs", [current.type]) + prev_best = b1.load(best_ptr) + if self.mode == MAX_VAL: cmp_op = ">=" - cmp_prev = prev + cmp_prev = prev_best cmp_curr = current val = current + elif self.mode == MAX_ABS_VAL: cmp_op = ">=" - cmp_prev = b1.call(fabs, [prev]) + cmp_prev = b1.call(fabs, [prev_best]) cmp_curr = b1.call(fabs, [current]) val = b1.call(fabs, [current]) + elif self.mode == MAX_INDICATOR: cmp_op = ">=" - cmp_prev = prev + cmp_prev = prev_best cmp_curr = current val = current.type(1.0) + elif self.mode == MAX_ABS_INDICATOR: cmp_op = ">=" - cmp_prev = b1.call(fabs, [prev]) + cmp_prev = b1.call(fabs, [prev_best]) cmp_curr = b1.call(fabs, [current]) val = current.type(1.0) + elif self.mode == MIN_VAL: cmp_op = "<=" - cmp_prev = prev + cmp_prev = prev_best cmp_curr = current val = current + elif self.mode == MIN_ABS_VAL: cmp_op = "<=" - cmp_prev = b1.call(fabs, [prev]) + cmp_prev = b1.call(fabs, [prev_best]) cmp_curr = b1.call(fabs, [current]) val = current + elif self.mode == MIN_INDICATOR: cmp_op = "<=" - cmp_prev = prev + cmp_prev = prev_best cmp_curr = current val = current.type(1.0) + elif self.mode == MIN_ABS_INDICATOR: cmp_op = "<=" - cmp_prev = b1.call(fabs, [prev]) + cmp_prev = b1.call(fabs, [prev_best]) cmp_curr = b1.call(fabs, [current]) val = current.type(1.0) + elif self.mode in {PROB, PROB_INDICATOR}: # Update prefix sum - current_prob_ptr = b1.gep(prob_in, [ctx.int32_ty(0), index]) + current_prob_ptr = b1.gep(prob_in, [ctx.int32_ty(0), idx]) sum_old = b1.load(sum_ptr) sum_new = b1.fadd(sum_old, b1.load(current_prob_ptr)) b1.store(sum_new, sum_ptr) - old_below = b1.fcmp_ordered("<=", sum_old, dice) - new_above = b1.fcmp_ordered("<", dice, sum_new) + old_below = b1.fcmp_ordered("<=", sum_old, random_draw) + new_above = b1.fcmp_ordered("<", random_draw, sum_new) cond = b1.and_(new_above, old_below) - cmp_prev = ctx.float_ty(1.0) - cmp_curr = b1.select(cond, cmp_prev, ctx.float_ty(0.0)) + + cmp_prev = current.type(1.0) + cmp_curr = b1.select(cond, cmp_prev, cmp_prev.type(0.0)) cmp_op = "==" if self.mode == PROB: val = current else: - val = ctx.float_ty(1.0) + val = current.type(1.0) else: assert False, "Unsupported mode: {}".format(self.mode) + prev_res_ptr = b1.gep(arg_out, [ctx.int32_ty(0), best_idx]) + cur_res_ptr = b1.gep(arg_out, [ctx.int32_ty(0), idx]) + # Make sure other elements are zeroed builder.store(cur_res_ptr.type.pointee(0), cur_res_ptr) @@ -351,7 +365,7 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, with builder.if_then(cmp_res): builder.store(prev_res_ptr.type.pointee(0), prev_res_ptr) builder.store(val, cur_res_ptr) - builder.store(index, idx_ptr) + builder.store(idx, best_idx_ptr) return builder From 67317050dc14400e9d08f3bc6c3baca6a8f10407 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 11 Oct 2024 12:58:36 -0400 Subject: [PATCH 330/410] llvm/OneHot: Use sharp inequalities and unordered comparison Use NaN as the initial best value. This combines to returning the first, rather than the last of multiple extreme elements. Signed-off-by: Jan Vesely --- .../nonstateful/selectionfunctions.py | 24 +++++++++++-------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index 8209ef60bf8..2fb47c144cb 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -284,52 +284,56 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, if self.mode not in {PROB, PROB_INDICATOR}: fabs = ctx.get_builtin("fabs", [current.type]) - prev_best = b1.load(best_ptr) + + is_first = b1.icmp_unsigned("==", idx, idx.type(0)) + + # Allow the first element to win the comparison + prev_best = b1.select(is_first, best_ptr.type.pointee(float("NaN")), b1.load(best_ptr)) if self.mode == MAX_VAL: - cmp_op = ">=" + cmp_op = ">" cmp_prev = prev_best cmp_curr = current val = current elif self.mode == MAX_ABS_VAL: - cmp_op = ">=" + cmp_op = ">" cmp_prev = b1.call(fabs, [prev_best]) cmp_curr = b1.call(fabs, [current]) val = b1.call(fabs, [current]) elif self.mode == MAX_INDICATOR: - cmp_op = ">=" + cmp_op = ">" cmp_prev = prev_best cmp_curr = current val = current.type(1.0) elif self.mode == MAX_ABS_INDICATOR: - cmp_op = ">=" + cmp_op = ">" cmp_prev = b1.call(fabs, [prev_best]) cmp_curr = b1.call(fabs, [current]) val = current.type(1.0) elif self.mode == MIN_VAL: - cmp_op = "<=" + cmp_op = "<" cmp_prev = prev_best cmp_curr = current val = current elif self.mode == MIN_ABS_VAL: - cmp_op = "<=" + cmp_op = "<" cmp_prev = b1.call(fabs, [prev_best]) cmp_curr = b1.call(fabs, [current]) val = current elif self.mode == MIN_INDICATOR: - cmp_op = "<=" + cmp_op = "<" cmp_prev = prev_best cmp_curr = current val = current.type(1.0) elif self.mode == MIN_ABS_INDICATOR: - cmp_op = "<=" + cmp_op = "<" cmp_prev = b1.call(fabs, [prev_best]) cmp_curr = b1.call(fabs, [current]) val = current.type(1.0) @@ -361,7 +365,7 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, # Make sure other elements are zeroed builder.store(cur_res_ptr.type.pointee(0), cur_res_ptr) - cmp_res = builder.fcmp_ordered(cmp_op, cmp_curr, cmp_prev) + cmp_res = builder.fcmp_unordered(cmp_op, cmp_curr, cmp_prev) with builder.if_then(cmp_res): builder.store(prev_res_ptr.type.pointee(0), prev_res_ptr) builder.store(val, cur_res_ptr) From 9d5cc1d7030990d94c55ed8f668835096c79980a Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 11 Oct 2024 13:36:18 -0400 Subject: [PATCH 331/410] llvm/OneHot: Return absolute value in MIN_ABS_VAL mode Add MIN_ABS_VAL Neg test. Refactor tests to include names on the same line instead of an external list. Signed-off-by: Jan Vesely --- .../nonstateful/selectionfunctions.py | 2 +- tests/functions/test_selection.py | 48 +++++++------------ 2 files changed, 18 insertions(+), 32 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index 2fb47c144cb..55ac699e7c9 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -324,7 +324,7 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, cmp_op = "<" cmp_prev = b1.call(fabs, [prev_best]) cmp_curr = b1.call(fabs, [current]) - val = current + val = b1.call(fabs, [current]) elif self.mode == MIN_INDICATOR: cmp_op = "<" diff --git a/tests/functions/test_selection.py b/tests/functions/test_selection.py index 7d71976f990..75deaad78b1 100644 --- a/tests/functions/test_selection.py +++ b/tests/functions/test_selection.py @@ -24,36 +24,22 @@ llvm_res['fp32'][expected_philox_ind] = (1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) test_data = [ - (Functions.OneHot, test_var, {'mode':kw.MAX_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.)), - (Functions.OneHot, test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.)), - (Functions.OneHot, -test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.)), - (Functions.OneHot, test_var, {'mode':kw.MAX_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.)), - (Functions.OneHot, test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.)), - (Functions.OneHot, test_var, {'mode':kw.MIN_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0., -0.23311696)), - (Functions.OneHot, test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.)), - (Functions.OneHot, test_var, {'mode':kw.MIN_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 0., 1.)), - (Functions.OneHot, test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.)), - (Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB}, (0., 0., 0., 0.08976636599379373, 0., 0., 0., 0., 0., 0.)), - (Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB_INDICATOR}, (0., 0., 0., 1., 0., 0., 0., 0., 0., 0.)), - (Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB}, expected_philox_prob), - (Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB_INDICATOR}, expected_philox_ind), -] - -# use list, naming function produces ugly names -names = [ - "OneHot MAX_VAL", - "OneHot MAX_ABS_VAL", - "OneHot MAX_ABS_VAL_NEG", - "OneHot MAX_INDICATOR", - "OneHot MAX_ABS_INDICATOR", - "OneHot MIN_VAL", - "OneHot MIN_ABS_VAL", - "OneHot MIN_INDICATOR", - "OneHot MIN_ABS_INDICATOR", - "OneHot PROB", - "OneHot PROB_INDICATOR", - "OneHot PROB Philox", - "OneHot PROB_INDICATOR Philox", + pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot MAX_VAL"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot MAX_ABS_VAL"), + pytest.param(Functions.OneHot, -test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot MAX_ABS_VAL Neg"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot MAX_INDICATOR"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot MAX_ABS_INDICATOR"), + pytest.param(Functions.OneHot, -test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot MAX_ABS_INDICATOR Neg"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0., -0.23311696), id="OneHot MIN_VAL"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), id="OneHot MIN_ABS_VAL"), + pytest.param(Functions.OneHot, -test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), id="OneHot MIN_ABS_VAL Neg"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 0., 1.), id="OneHot MIN_INDICATOR"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), id="OneHot MIN_ABS_INDICATOR"), + pytest.param(Functions.OneHot, -test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), id="OneHot MIN_ABS_INDICATOR Neg"), + pytest.param(Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB}, (0., 0., 0., 0.08976636599379373, 0., 0., 0., 0., 0., 0.), id="OneHot PROB"), + pytest.param(Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB_INDICATOR}, (0., 0., 0., 1., 0., 0., 0., 0., 0., 0.), id="OneHot PROB_INDICATOR"), + pytest.param(Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB}, expected_philox_prob, id="OneHot PROB Philox"), + pytest.param(Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB_INDICATOR}, expected_philox_ind, id="OneHot PROB_INDICATOR Philox"), ] GROUP_PREFIX="SelectionFunction " @@ -61,7 +47,7 @@ @pytest.mark.function @pytest.mark.integrator_function @pytest.mark.benchmark -@pytest.mark.parametrize("func, variable, params, expected", test_data, ids=names) +@pytest.mark.parametrize("func, variable, params, expected", test_data) def test_basic(func, variable, params, expected, benchmark, func_mode): benchmark.group = GROUP_PREFIX + func.componentName + params['mode'] From 77b7777ea624587c68cd8120afa2f1e130ca600e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 11 Oct 2024 20:33:49 -0400 Subject: [PATCH 332/410] requirements: update networkx requirement from <3.4 to <3.5 (#3067) Updates the requirements on [networkx](https://github.com/networkx/networkx) to permit the latest version. - [Release notes](https://github.com/networkx/networkx/releases) - [Commits](https://github.com/networkx/networkx/compare/networkx-3.0...networkx-3.4.1) --- updated-dependencies: - dependency-name: networkx dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 7bfd6563c49..1e9b795ab6a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ leabra-psyneulink<0.3.3 llvmlite<0.44 matplotlib<3.7.6 modeci_mdf<0.5, >=0.4.3; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' -networkx<3.4 +networkx<3.5 numpy>=1.21.0, <1.26.5 optuna<3.4.0 packaging<25.0 From ecfb6f2c51ee8c00b808f9755f4a6aaab23199b3 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 15 Oct 2024 15:26:18 -0400 Subject: [PATCH 333/410] Add a PEC stability flexibility test. Since Bryant is a main user of the PEC. Add his composition --- .../test_parameterestimationcomposition.py | 4 +- .../composition/pec/test_stab_flex_pec_fit.py | 497 ++++++++++++++++++ 2 files changed, 499 insertions(+), 2 deletions(-) rename tests/composition/{ => pec}/test_parameterestimationcomposition.py (99%) create mode 100644 tests/composition/pec/test_stab_flex_pec_fit.py diff --git a/tests/composition/test_parameterestimationcomposition.py b/tests/composition/pec/test_parameterestimationcomposition.py similarity index 99% rename from tests/composition/test_parameterestimationcomposition.py rename to tests/composition/pec/test_parameterestimationcomposition.py index aa24010d9ca..4a6aa2a3a5d 100644 --- a/tests/composition/test_parameterestimationcomposition.py +++ b/tests/composition/pec/test_parameterestimationcomposition.py @@ -185,7 +185,7 @@ def test_pec_run_input_formats(inputs_dict, error_msg): @pytest.mark.parametrize( "opt_method, optuna_kwargs, expected_result", [ - ("differential_evolution", expected_differential_evolution), + ("differential_evolution", None, expected_differential_evolution), (optuna.samplers.RandomSampler(seed=0), None, [0.01]), (optuna.samplers.QMCSampler(seed=0), None, [0.01]), (optuna.samplers.RandomSampler, {'seed': 0}, [0.01]), @@ -289,7 +289,7 @@ def reward_rate(sim_data): pec.run(inputs={comp: trial_inputs}) if expected_result is not None: - np.testing.assert_allclose(list(pec.optimized_parameter_values.values()), result) + np.testing.assert_allclose(list(pec.optimized_parameter_values.values()), expected_result) def test_parameter_estimation_ddm_cond(func_mode): diff --git a/tests/composition/pec/test_stab_flex_pec_fit.py b/tests/composition/pec/test_stab_flex_pec_fit.py new file mode 100644 index 00000000000..152c25719bc --- /dev/null +++ b/tests/composition/pec/test_stab_flex_pec_fit.py @@ -0,0 +1,497 @@ +import psyneulink as pnl + +from psyneulink.core.components.functions.nonstateful.fitfunctions import ( + PECOptimizationFunction, +) + +import numpy as np +import pandas as pd + + +# Define function to generate a counterbalanced trial sequence with a specified switch trial frequency +def generate_trial_sequence(n, frequency, seed: int = None): + + # Compute trial number + nTotalTrials = n + switchFrequency = frequency + + nSwitchTrials = int(nTotalTrials * switchFrequency) + nRepeatTrials = int(nTotalTrials - nSwitchTrials) + + # Determine task transitions + transitions = [1] * nSwitchTrials + [0] * nRepeatTrials + rng = np.random.RandomState(seed) + order = rng.permutation(list(range(nTotalTrials))) + transitions[:] = [transitions[i] for i in order] + + # Determine stimuli with 50% congruent trials + stimuli = ( + [[1, 1]] * int(nSwitchTrials / 4) + + [[1, -1]] * int(nSwitchTrials / 4) + + [[-1, -1]] * int(nSwitchTrials / 4) + + [[-1, 1]] * int(nSwitchTrials / 4) + + [[1, 1]] * int(nRepeatTrials / 4) + + [[1, -1]] * int(nRepeatTrials / 4) + + [[-1, -1]] * int(nRepeatTrials / 4) + + [[-1, 1]] * int(nRepeatTrials / 4) + ) + stimuli[:] = [stimuli[i] for i in order] + + # stimuli[:] = [[1, 1]] * nTotalTrials + + # Determine cue-stimulus intervals + CSI = ( + [1200] * int(nSwitchTrials / 8) + + [1200] * int(nSwitchTrials / 8) + + [1200] * int(nSwitchTrials / 8) + + [1200] * int(nSwitchTrials / 8) + + [1200] * int(nSwitchTrials / 8) + + [1200] * int(nSwitchTrials / 8) + + [1200] * int(nSwitchTrials / 8) + + [1200] * int(nSwitchTrials / 8) + + [1200] * int(nRepeatTrials / 8) + + [1200] * int(nRepeatTrials / 8) + + [1200] * int(nRepeatTrials / 8) + + [1200] * int(nRepeatTrials / 8) + + [1200] * int(nRepeatTrials / 8) + + [1200] * int(nRepeatTrials / 8) + + [1200] * int(nRepeatTrials / 8) + + [1200] * int(nRepeatTrials / 8) + ) + CSI[:] = [CSI[i] for i in order] + + # Set the task order + tasks = [[1, 0]] * (nTotalTrials + 1) + for i in list(range(nTotalTrials)): + if transitions[i] == 0: + tasks[i + 1] = tasks[i] + if transitions[i] == 1: + if tasks[i] == [1, 0]: + tasks[i + 1] = [0, 1] + if tasks[i] == [0, 1]: + tasks[i + 1] = [1, 0] + tasks = tasks[1:] + + # Determine correct response based on stimulus and task input + correctResponse = np.sum(np.multiply(tasks, stimuli), axis=1) + + return tasks, stimuli, CSI, correctResponse + + +# Stability-Flexibility Model +def make_stab_flex( + gain=3.0, + leak=3.0, + competition=2.0, + lca_time_step_size=0.01, + non_decision_time=0.2, + automaticity=0.01, + starting_value=0.0, + threshold=0.1, + ddm_noise=0.1, + lca_noise=0.0, + scale=0.2, + ddm_time_step_size=0.01, + rng_seed=None, +): + + GAIN = gain + LEAK = leak + COMP = competition + AUTOMATICITY = automaticity # Automaticity Weight + + STARTING_POINT = starting_value # Starting Point + THRESHOLD = threshold # Threshold + NOISE = ddm_noise # Noise + SCALE = scale # Scales DDM inputs so threshold can be set to 1 + NON_DECISION_TIME = non_decision_time + + # Task Layer: [Color, Motion] {0, 1} Mutually Exclusive + # Origin Node + taskLayer = pnl.TransferMechanism( + size=2, + function=pnl.Linear(slope=1, intercept=0), + output_ports=[pnl.RESULT], + name="Task Input [I1, I2]", + ) + + # Stimulus Layer: [Color Stimulus, Motion Stimulus] + # Origin Node + stimulusInfo = pnl.TransferMechanism( + size=2, + function=pnl.Linear(slope=1, intercept=0), + output_ports=[pnl.RESULT], + name="Stimulus Input [S1, S2]", + ) + + # Cue-To-Stimulus Interval Layer + # Origin Node + cueInterval = pnl.TransferMechanism( + size=1, + function=pnl.Linear(slope=1, intercept=0), + output_ports=[pnl.RESULT], + name="Cue-Stimulus Interval", + ) + + # Correct Response Info + # Origin Node + correctResponseInfo = pnl.TransferMechanism( + size=1, + function=pnl.Linear(slope=1, intercept=0), + output_ports=[pnl.RESULT], + name="Correct Response Info", + ) + + # Control Module Layer: [Color Activation, Motion Activation] + controlModule = pnl.LCAMechanism( + size=2, + function=pnl.Logistic(gain=GAIN), + leak=LEAK, + competition=COMP, + self_excitation=0, + noise=lca_noise, + termination_measure=pnl.TimeScale.TRIAL, + termination_threshold=1200, + time_step_size=lca_time_step_size, + name="Task Activations [Act1, Act2]", + ) + + # Control Mechanism Setting Cue-To-Stimulus Interval + csiController = pnl.ControlMechanism( + monitor_for_control=cueInterval, + control_signals=[(pnl.TERMINATION_THRESHOLD, controlModule)], + modulation=pnl.OVERRIDE, + ) + + # Hadamard product of controlModule and Stimulus Information + nonAutomaticComponent = pnl.TransferMechanism( + size=2, + function=pnl.Linear(slope=1, intercept=0), + input_ports=pnl.InputPort(combine=pnl.PRODUCT), + output_ports=[pnl.RESULT], + name="Non-Automatic Component [S1*Act1, S2*Act2]", + ) + + # Multiply Stimulus Input by the automaticity weight + congruenceWeighting = pnl.TransferMechanism( + size=2, + function=pnl.Linear(slope=AUTOMATICITY, intercept=0), + output_ports=[pnl.RESULT], + name="Automaticity-weighted Stimulus Input [w*S1, w*S2]", + ) + + # Summation of nonAutomatic and Automatic Components + ddmCombination = pnl.TransferMechanism( + size=1, + function=pnl.Linear(slope=1, intercept=0), + input_ports=pnl.InputPort(combine=pnl.SUM), + output_ports=[pnl.RESULT], + name="Drift = (w*S1 + w*S2) + (S1*Act1 + S2*Act2)", + ) + + # Ensure upper boundary of DDM is always correct response by multiplying DDM input by correctResponseInfo + ddmRecodeDrift = pnl.TransferMechanism( + size=1, + function=pnl.Linear(slope=1, intercept=0), + input_ports=pnl.InputPort(combine=pnl.PRODUCT), + output_ports=[pnl.RESULT], + name="Recoded Drift = Drift * correctResponseInfo", + ) + + # Scale DDM inputs + ddmInputScale = pnl.TransferMechanism( + size=1, + function=pnl.Linear(slope=SCALE, intercept=0), + output_ports=[pnl.RESULT], + name="Scaled DDM Input", + ) + + # Decision Module + decisionMaker = pnl.DDM( + function=pnl.DriftDiffusionIntegrator( + starting_value=STARTING_POINT, + threshold=THRESHOLD, + noise=NOISE, + time_step_size=ddm_time_step_size, + non_decision_time=NON_DECISION_TIME, + ), + reset_stateful_function_when=pnl.AtTrialStart(), + output_ports=[pnl.DECISION_OUTCOME, pnl.RESPONSE_TIME], + name="DDM", + ) + + taskLayer.set_log_conditions([pnl.RESULT]) + stimulusInfo.set_log_conditions([pnl.RESULT]) + cueInterval.set_log_conditions([pnl.RESULT]) + correctResponseInfo.set_log_conditions([pnl.RESULT]) + controlModule.set_log_conditions([pnl.RESULT, "termination_threshold"]) + nonAutomaticComponent.set_log_conditions([pnl.RESULT]) + congruenceWeighting.set_log_conditions([pnl.RESULT]) + ddmCombination.set_log_conditions([pnl.RESULT]) + ddmRecodeDrift.set_log_conditions([pnl.RESULT]) + ddmInputScale.set_log_conditions([pnl.RESULT]) + decisionMaker.set_log_conditions([pnl.DECISION_OUTCOME, pnl.RESPONSE_TIME]) + + # Composition Creation + stabilityFlexibility = pnl.Composition() + + # Node Creation + stabilityFlexibility.add_node(taskLayer) + stabilityFlexibility.add_node(stimulusInfo) + stabilityFlexibility.add_node(cueInterval) + stabilityFlexibility.add_node(correctResponseInfo) + stabilityFlexibility.add_node(controlModule) + stabilityFlexibility.add_node(csiController) + stabilityFlexibility.add_node(nonAutomaticComponent) + stabilityFlexibility.add_node(congruenceWeighting) + stabilityFlexibility.add_node(ddmCombination) + stabilityFlexibility.add_node(ddmRecodeDrift) + stabilityFlexibility.add_node(ddmInputScale) + stabilityFlexibility.add_node(decisionMaker) + + # Projection Creation + stabilityFlexibility.add_projection(sender=taskLayer, receiver=controlModule) + stabilityFlexibility.add_projection( + sender=controlModule, receiver=nonAutomaticComponent + ) + stabilityFlexibility.add_projection( + sender=stimulusInfo, receiver=nonAutomaticComponent + ) + stabilityFlexibility.add_projection( + sender=stimulusInfo, receiver=congruenceWeighting + ) + stabilityFlexibility.add_projection( + sender=nonAutomaticComponent, receiver=ddmCombination + ) + stabilityFlexibility.add_projection( + sender=congruenceWeighting, receiver=ddmCombination + ) + stabilityFlexibility.add_projection(sender=ddmCombination, receiver=ddmRecodeDrift) + stabilityFlexibility.add_projection( + sender=correctResponseInfo, receiver=ddmRecodeDrift + ) + stabilityFlexibility.add_projection(sender=ddmRecodeDrift, receiver=ddmInputScale) + stabilityFlexibility.add_projection(sender=ddmInputScale, receiver=decisionMaker) + + # Hot-fix currently necessary to allow control module and DDM to execute in parallel in compiled mode + # We need two gates in order to output both values (decision and response) from the ddm + decisionGate = pnl.ProcessingMechanism(size=1, name="DECISION_GATE") + stabilityFlexibility.add_node(decisionGate) + + responseGate = pnl.ProcessingMechanism(size=1, name="RESPONSE_GATE") + stabilityFlexibility.add_node(responseGate) + + stabilityFlexibility.add_projection( + sender=decisionMaker.output_ports[0], receiver=decisionGate + ) + stabilityFlexibility.add_projection( + sender=decisionMaker.output_ports[1], receiver=responseGate + ) + + # Sets scheduler conditions, so that the gates are not executed (and hence the composition doesn't finish) until decisionMaker is finished + stabilityFlexibility.scheduler.add_condition( + decisionGate, pnl.WhenFinished(decisionMaker) + ) + stabilityFlexibility.scheduler.add_condition( + responseGate, pnl.WhenFinished(decisionMaker) + ) + + return stabilityFlexibility + +def get_node(comp, name): + """ + Get the node from the composition with the given name. The name needs to match from the beginning, but it + can have any numeric suffix after the name. + """ + for node in comp.nodes: + if node.name.startswith(name): + return node + return None + + +def make_input_dict(stab_flex_comp, taskTrain, stimulusTrain, cueTrain, correctResponse): + inputs = { + get_node(stab_flex_comp, "Task Input [I1, I2]"): [[np.array(v)] for v in taskTrain], + get_node(stab_flex_comp, "Stimulus Input [S1, S2]"): [[np.array(v)] for v in stimulusTrain], + get_node(stab_flex_comp, "Cue-Stimulus Interval"): [[np.array(v)] for v in cueTrain], + get_node(stab_flex_comp, "Correct Response Info"): [[np.array(v)] for v in correctResponse] + } + + return inputs + +def run_stab_flex_cond( + taskTrain, + stimulusTrain, + cueTrain, + correctResponse, + num_trials, + **kwargs): + """ + Create a stability flexibility composition and run it with the given parameters. Return the composition and the + results as a pandas DataFrame. If any of the parameters are a list, then that parameter is assumed to be trial-wise + and the length of the list should be the number of trials. A control mechanism will be added to the composition to + override the parameter with the value from the input. + """ + + # Remove any parameters that are trial-wise from the kwargs, these values will be passed in as inputs + # to the composition. + cond_params = {name: value for name, value in kwargs.items() + if isinstance(value, list) or isinstance(value, np.ndarray)} + + # Remove the trial-wise parameters from the kwargs + kwargs = {name: value for name, value in kwargs.items() if name not in cond_params} + + # Make a stability flexibility composition + comp = make_stab_flex(**kwargs) + + inputs = make_input_dict(comp, taskTrain, stimulusTrain, cueTrain, correctResponse) + + # A dict to map keyword arg name to the corresponding mechanism in the composition + param_map = { + "gain": ("gain", comp.nodes["Task Activations [Act1, Act2]"]), # Gain + "automaticity": ("slope", comp.nodes["Automaticity-weighted Stimulus Input [w*S1, w*S2]"]), # Automaticity + "threshold": ("threshold", comp.nodes["DDM"]), # Threshold + "non_decision_time": ("non_decision_time", comp.nodes["DECISION_GATE"]), # Non-decision time + } + # Go through the parameters and check if any are trial-wise, if so, add a control mechanism to override the value on + # trial-by-trial basis with the value from the input. + pec_mechs = {} + for (name, value) in cond_params.items(): + + if len(value) != num_trials: + raise ValueError("Length of trial-wise parameter must be equal to the number of trials.") + + pec_mechs[name] = pnl.ControlMechanism(name=f"{name}_control", + control_signals=param_map[name], + modulation=pnl.OVERRIDE) + comp.add_node(pec_mechs[name]) + inputs[pec_mechs[name]] = [[np.array([value[i]])] for i in range(num_trials)] + + comp.run(inputs, execution_mode=pnl.ExecutionMode.LLVMRun) + + df = pd.DataFrame( + np.squeeze(np.array(comp.results))[:, 1:], columns=["decision", "response_time"] + ) + df["decision"] = df["decision"].astype("category") + + # Add the trial-wise parameters to the DataFrame as well. + for name in pec_mechs.keys(): + df[name] = cond_params[name] + + assert len(comp.input_ports) > 0 + + return comp, df + +def test_stab_flex_cond_fit(): + from psyneulink.core.globals.utilities import set_global_seed + + # Let's make things reproducible + pnl_seed = 0 + set_global_seed(pnl_seed) + trial_seq_seed = 0 + + # High-level parameters the impact performance of the test + num_trials = 75 + time_step_size = 0.01 + num_estimates = 100 + + sf_params = dict( + gain=3.0, + leak=3.0, + competition=2.0, + lca_time_step_size=time_step_size, + non_decision_time=0.2, + automaticity=0.01, + starting_value=0.0, + threshold=0.1, + ddm_noise=0.1, + lca_noise=0.0, + scale=0.2, + ddm_time_step_size=time_step_size, + ) + + # Generate some sample data to run the model on + taskTrain, stimulusTrain, cueTrain, correctResponse = generate_trial_sequence(240, 0.5, seed=trial_seq_seed) + taskTrain = taskTrain[0:num_trials] + stimulusTrain = stimulusTrain[0:num_trials] + cueTrain = cueTrain[0:num_trials] + correctResponse = correctResponse[0:num_trials] + + # CSI is in terms of time steps, we need to scale by ten because original code + # was set to run with timestep size of 0.001 + cueTrain = [c / 10.0 for c in cueTrain] + + # We will generate a dataset that comprises two different conditions. Each condition will have a different threshold. + # Randomly select which trials will be in each condition uniformly. + rng = np.random.default_rng(pnl_seed) + threshold = rng.choice([0.3, 0.7], size=num_trials, replace=True) + + # Run + _, data_to_fit = run_stab_flex_cond( + taskTrain, + stimulusTrain, + cueTrain, + correctResponse, + num_trials, + **{**sf_params, 'threshold': threshold} + ) + + # Turn our trial-wise threshold into a condition + data_to_fit['condition'] = np.where(data_to_fit['threshold'] == 0.3, 'threshold=0.3', 'threshold=0.7') + data_to_fit.drop(columns=['threshold'], inplace=True) + + # %% + # Create a parameter estimation composition to fit the data we just generated and hopefully recover the + # parameters of the composition. + comp = make_stab_flex(**sf_params) + + controlModule = get_node(comp, "Task Activations [Act1, Act2]") + congruenceWeighting = get_node(comp, "Automaticity-weighted Stimulus Input [w*S1, w*S2]") + decisionMaker = get_node(comp, "DDM") + decisionGate = get_node(comp, "DECISION_GATE") + responseGate = get_node(comp, "RESPONSE_GATE") + + fit_parameters = { + ("gain", controlModule): np.linspace(1.0, 10.0, 1000), # Gain + ("slope", congruenceWeighting): np.linspace(0.0, 0.1, 1000), # Automaticity + ("threshold", decisionMaker): np.linspace(0.01, 0.5, 1000), # Threshold + ("non_decision_time", decisionMaker): np.linspace(0.1, 0.4, 1000), # Threshold + } + + pec = pnl.ParameterEstimationComposition( + name="pec", + nodes=comp, + parameters=fit_parameters, + depends_on={("threshold", decisionMaker): 'condition'}, + outcome_variables=[ + decisionGate.output_ports[0], + responseGate.output_ports[0], + ], + data=data_to_fit, + optimization_function=PECOptimizationFunction( + method="differential_evolution", max_iterations=1 + ), + num_estimates=num_estimates, + initial_seed=42, + ) + + pec.controller.parameters.comp_execution_mode.set("LLVM") + pec.controller.function.parameters.save_values.set(True) + + inputs = make_input_dict(comp, taskTrain, stimulusTrain, cueTrain, correctResponse) + + ret = pec.run(inputs=inputs) + optimal_parameters = pec.optimized_parameter_values + + # These aren't the recovered parameters, we are doing too few trials and too few estimates to get the correct + # results. + expected_results = { + 'Task Activations [Act1, Act2]-1.gain': 1.3820085411824117, + 'Automaticity-weighted Stimulus Input [w*S1, w*S2]-1.slope': 0.014784656669402561, + 'DDM-1.threshold[threshold=0.7]': 0.48339391888454464, + 'DDM-1.threshold[threshold=0.3]': 0.3098280374938238, + 'DDM-1.non_decision_time': 0.1278625281322982 + } + + for key, value in expected_results.items(): + np.testing.assert_allclose(optimal_parameters[key], value, rtol=1e-6) From 3a0cdbe1c05b836111378d095fbab27277f4f654 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 15 Oct 2024 16:02:06 -0400 Subject: [PATCH 334/410] Add a tolerance to differential_evolution --- .../composition/pec/test_parameterestimationcomposition.py | 7 ++++++- tests/composition/pec/test_stab_flex_pec_fit.py | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/tests/composition/pec/test_parameterestimationcomposition.py b/tests/composition/pec/test_parameterestimationcomposition.py index 4a6aa2a3a5d..47979ad5082 100644 --- a/tests/composition/pec/test_parameterestimationcomposition.py +++ b/tests/composition/pec/test_parameterestimationcomposition.py @@ -289,7 +289,12 @@ def reward_rate(sim_data): pec.run(inputs={comp: trial_inputs}) if expected_result is not None: - np.testing.assert_allclose(list(pec.optimized_parameter_values.values()), expected_result) + if opt_method == "differential_evolution": + np.testing.assert_allclose( + list(pec.optimized_parameter_values.values()), expected_result, rtol=1e-3 + ) + else: + np.testing.assert_allclose(list(pec.optimized_parameter_values.values()), expected_result) def test_parameter_estimation_ddm_cond(func_mode): diff --git a/tests/composition/pec/test_stab_flex_pec_fit.py b/tests/composition/pec/test_stab_flex_pec_fit.py index 152c25719bc..bf1ace8a012 100644 --- a/tests/composition/pec/test_stab_flex_pec_fit.py +++ b/tests/composition/pec/test_stab_flex_pec_fit.py @@ -480,7 +480,7 @@ def test_stab_flex_cond_fit(): inputs = make_input_dict(comp, taskTrain, stimulusTrain, cueTrain, correctResponse) - ret = pec.run(inputs=inputs) + pec.run(inputs=inputs) optimal_parameters = pec.optimized_parameter_values # These aren't the recovered parameters, we are doing too few trials and too few estimates to get the correct From 9ec8c1d5e0a225113d4ce4f8d7a45e4962cf9bba Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 15 Oct 2024 16:32:47 -0400 Subject: [PATCH 335/410] Add a tolerance to differential_evolution --- tests/composition/pec/test_parameterestimationcomposition.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/composition/pec/test_parameterestimationcomposition.py b/tests/composition/pec/test_parameterestimationcomposition.py index 47979ad5082..8ae98129f14 100644 --- a/tests/composition/pec/test_parameterestimationcomposition.py +++ b/tests/composition/pec/test_parameterestimationcomposition.py @@ -291,7 +291,7 @@ def reward_rate(sim_data): if expected_result is not None: if opt_method == "differential_evolution": np.testing.assert_allclose( - list(pec.optimized_parameter_values.values()), expected_result, rtol=1e-3 + list(pec.optimized_parameter_values.values()), expected_result, atol=1e-2 ) else: np.testing.assert_allclose(list(pec.optimized_parameter_values.values()), expected_result) From 2ef2b2566f625220339620215b1543653fce2d8c Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 16 Oct 2024 11:20:21 -0400 Subject: [PATCH 336/410] Fix test_stab_flex_pec_fit for fp32 Strange that the results are so different for fp64 vs fp32. I need to investigate further. --- .../composition/pec/test_stab_flex_pec_fit.py | 27 +++++++++++++------ 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/tests/composition/pec/test_stab_flex_pec_fit.py b/tests/composition/pec/test_stab_flex_pec_fit.py index bf1ace8a012..9ade30f11ac 100644 --- a/tests/composition/pec/test_stab_flex_pec_fit.py +++ b/tests/composition/pec/test_stab_flex_pec_fit.py @@ -1,4 +1,5 @@ import psyneulink as pnl +import pytest from psyneulink.core.components.functions.nonstateful.fitfunctions import ( PECOptimizationFunction, @@ -484,14 +485,24 @@ def test_stab_flex_cond_fit(): optimal_parameters = pec.optimized_parameter_values # These aren't the recovered parameters, we are doing too few trials and too few estimates to get the correct - # results. - expected_results = { - 'Task Activations [Act1, Act2]-1.gain': 1.3820085411824117, - 'Automaticity-weighted Stimulus Input [w*S1, w*S2]-1.slope': 0.014784656669402561, - 'DDM-1.threshold[threshold=0.7]': 0.48339391888454464, - 'DDM-1.threshold[threshold=0.3]': 0.3098280374938238, - 'DDM-1.non_decision_time': 0.1278625281322982 - } + # results. FP32 results are very different from FP64 results, this seems strange, we will need to investigate + # what is going on here. Maybe it effects the randomization somehow? + if pytest.helpers.llvm_current_fp_precision() == 'fp32': + expected_results = { + 'Task Activations [Act1, Act2]-1.gain': 2.316350155331747, + 'Automaticity-weighted Stimulus Input [w*S1, w*S2]-1.slope': 0.04222951406320785, + 'DDM-1.threshold[threshold=0.7]': 0.49284829086077964, + 'DDM-1.threshold[threshold=0.3]': 0.3469243996445839, + 'DDM-1.non_decision_time': 0.38630189671150356 + } + else: + expected_results = { + 'Task Activations [Act1, Act2]-1.gain': 1.3820085411824117, + 'Automaticity-weighted Stimulus Input [w*S1, w*S2]-1.slope': 0.014784656669402561, + 'DDM-1.threshold[threshold=0.7]': 0.48339391888454464, + 'DDM-1.threshold[threshold=0.3]': 0.3098280374938238, + 'DDM-1.non_decision_time': 0.1278625281322982 + } for key, value in expected_results.items(): np.testing.assert_allclose(optimal_parameters[key], value, rtol=1e-6) From bf95aff9c57ed3ef3f6137372606372b27a56bb5 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 16 Oct 2024 11:29:32 -0400 Subject: [PATCH 337/410] Swap out optimizer for CmaEsSampler in stab flex test It runs faster, max_iterations controls the number of evaluations. It also seems to get rid of the issue of differences between fp32 and fp64 results. Will need to look into what is going on with differential evolution. --- .../composition/pec/test_stab_flex_pec_fit.py | 30 +++++++------------ 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/tests/composition/pec/test_stab_flex_pec_fit.py b/tests/composition/pec/test_stab_flex_pec_fit.py index 9ade30f11ac..220caa304c6 100644 --- a/tests/composition/pec/test_stab_flex_pec_fit.py +++ b/tests/composition/pec/test_stab_flex_pec_fit.py @@ -1,4 +1,6 @@ import psyneulink as pnl + +import optuna import pytest from psyneulink.core.components.functions.nonstateful.fitfunctions import ( @@ -470,7 +472,7 @@ def test_stab_flex_cond_fit(): ], data=data_to_fit, optimization_function=PECOptimizationFunction( - method="differential_evolution", max_iterations=1 + method=optuna.samplers.CmaEsSampler(seed=0), max_iterations=10 ), num_estimates=num_estimates, initial_seed=42, @@ -485,24 +487,14 @@ def test_stab_flex_cond_fit(): optimal_parameters = pec.optimized_parameter_values # These aren't the recovered parameters, we are doing too few trials and too few estimates to get the correct - # results. FP32 results are very different from FP64 results, this seems strange, we will need to investigate - # what is going on here. Maybe it effects the randomization somehow? - if pytest.helpers.llvm_current_fp_precision() == 'fp32': - expected_results = { - 'Task Activations [Act1, Act2]-1.gain': 2.316350155331747, - 'Automaticity-weighted Stimulus Input [w*S1, w*S2]-1.slope': 0.04222951406320785, - 'DDM-1.threshold[threshold=0.7]': 0.49284829086077964, - 'DDM-1.threshold[threshold=0.3]': 0.3469243996445839, - 'DDM-1.non_decision_time': 0.38630189671150356 - } - else: - expected_results = { - 'Task Activations [Act1, Act2]-1.gain': 1.3820085411824117, - 'Automaticity-weighted Stimulus Input [w*S1, w*S2]-1.slope': 0.014784656669402561, - 'DDM-1.threshold[threshold=0.7]': 0.48339391888454464, - 'DDM-1.threshold[threshold=0.3]': 0.3098280374938238, - 'DDM-1.non_decision_time': 0.1278625281322982 - } + # results. + expected_results = { + 'Task Activations [Act1, Act2]-1.gain': 3.87419, + 'Automaticity-weighted Stimulus Input [w*S1, w*S2]-1.slope': 0.0125, + 'DDM-1.threshold[threshold=0.7]': 0.30939, + 'DDM-1.threshold[threshold=0.3]': 0.22168, + 'DDM-1.non_decision_time': 0.28659999999999997 + } for key, value in expected_results.items(): np.testing.assert_allclose(optimal_parameters[key], value, rtol=1e-6) From 2e58bc1fa43b9b26cd5a646bf1597bf1a648d48b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 16 Oct 2024 23:54:58 -0400 Subject: [PATCH 338/410] requirements: update pillow requirement from <10.5.0 to <11.1.0 (#3070) Updates the requirements on [pillow](https://github.com/python-pillow/Pillow) to permit the latest version. - [Release notes](https://github.com/python-pillow/Pillow/releases) - [Changelog](https://github.com/python-pillow/Pillow/blob/main/CHANGES.rst) - [Commits](https://github.com/python-pillow/Pillow/compare/10.4.0...11.0.0) --- updated-dependencies: - dependency-name: pillow dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 1e9b795ab6a..c9cfa33cb14 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ numpy>=1.21.0, <1.26.5 optuna<3.4.0 packaging<25.0 pandas<2.2.4 -pillow<10.5.0 +pillow<11.1.0 pint<0.22.0 protobuf<3.20.4 rich>=10.1, <10.13 From ebfdb55d5caf22e08f134c15e1e4137a4476c555 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 22:57:12 -0400 Subject: [PATCH 339/410] requirements: update grpcio requirement from <1.67.0 to <1.68.0 (#3072) Updates the requirements on [grpcio](https://github.com/grpc/grpc) to permit the latest version. - [Release notes](https://github.com/grpc/grpc/releases) - [Changelog](https://github.com/grpc/grpc/blob/master/doc/grpc_release_schedule.md) - [Commits](https://github.com/grpc/grpc/compare/v1.66.0...v1.67.0) --- updated-dependencies: - dependency-name: grpcio dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c9cfa33cb14..c07a079587d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,7 +3,7 @@ dill<0.3.10 fastkde>=1.0.24, <1.0.31 graph-scheduler>=1.2.1, <1.3.0 graphviz<0.21.0 -grpcio<1.67.0 +grpcio<1.68.0 leabra-psyneulink<0.3.3 llvmlite<0.44 matplotlib<3.7.6 From 97c8ed4785d99e49a9ceadc4bdac3a58c5a898d7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sun, 20 Oct 2024 00:58:33 -0400 Subject: [PATCH 340/410] requirements: update pytest-profiling requirement from <1.7.1 to <1.8.1 (#3074) Updates the requirements on [pytest-profiling](https://github.com/man-group/pytest-plugins) to permit the latest version. - [Release notes](https://github.com/man-group/pytest-plugins/releases) - [Changelog](https://github.com/man-group/pytest-plugins/blob/master/CHANGES.md) - [Commits](https://github.com/man-group/pytest-plugins/compare/v1.7.0...1.8.0) --- updated-dependencies: - dependency-name: pytest-profiling dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index d474ca88134..d82bd9d7ca6 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -5,7 +5,7 @@ pytest-benchmark<4.0.1 pytest-cov<5.0.1 pytest-forked<1.7.0 pytest-helpers-namespace<2021.12.30 -pytest-profiling<1.7.1 +pytest-profiling<1.8.1 pytest-pycodestyle<2.5.0 pytest-pydocstyle<2.5.0 pytest-xdist>=3.2.0, <3.7.0 From ac83b2afb7835f8a3b6993a5fb7f2d0410fdb45b Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 19 Oct 2024 19:39:11 -0400 Subject: [PATCH 341/410] tests/IntegratorFunctions: Cleanup Consolidate parameter overrides. Create of copy of the params dict argument instead of modifying it. Add testing of DriftOnASphereIntegrator. Do not duplicate function instantiation. Drop redundant import. Signed-off-by: Jan Vesely --- tests/functions/test_integrator.py | 100 ++++++++++++++++++++--------- 1 file changed, 69 insertions(+), 31 deletions(-) diff --git a/tests/functions/test_integrator.py b/tests/functions/test_integrator.py index 0577cf2f6e4..ce17b411cf7 100644 --- a/tests/functions/test_integrator.py +++ b/tests/functions/test_integrator.py @@ -1,9 +1,7 @@ - import numpy as np import pytest import psyneulink as pnl -import psyneulink.core.llvm as pnlvm import psyneulink.core.components.functions.stateful.integratorfunctions as Functions from psyneulink.core.components.functions.function import FunctionError from psyneulink.core.components.functions.nonstateful.transferfunctions import Angle @@ -25,6 +23,7 @@ def SimpleIntFun(init, value, iterations, noise, rate, offset, **kwargs): if "initializer" in kwargs: return [4.91845218, 4.78766907, 4.73758993, 5.04920442, 4.09842889, 4.2909061, 4.05866892, 5.23154257, 5.23413599, 4.86548903] + else: return [4.12672714, 4.25877415, 4.16954537, 4.12360778, 4.02739283, 4.2037768, 4.03845052, 4.39892272, 4.45597924, 3.99547688] @@ -32,6 +31,7 @@ def SimpleIntFun(init, value, iterations, noise, rate, offset, **kwargs): if "initializer" in kwargs: return [6.07047464, 1.45183492, 2.13615798, 3.22296925, 3.29867927, 0.9734048, 2.54011924, 3.21213761, 1.54651058, 2.7026355, ] + else: return [5.2787496, 0.92294, 1.56811342, 2.29737262, 3.22764321, 0.8862755, 2.51990084, 2.37951776, 0.76835383, 1.83262335] @@ -39,12 +39,14 @@ def SimpleIntFun(init, value, iterations, noise, rate, offset, **kwargs): if "initializer" in kwargs: return [5.53160614, 4.86244369, 3.79932695, 5.06809088, 2.1305511, 3.8879681, 2.16602771, 5.74284825, 4.47697989, 3.78677378] + else: return [4.7398811, 4.33354877, 3.23128239, 4.14249424, 2.05951504, 3.8008388, 2.14580932, 4.9102284, 3.69882314, 2.91676163] def AdaptiveIntFun(init, value, iterations, noise, rate, offset, **kwargs): assert iterations == 3 + if np.isscalar(noise): if "initializer" in kwargs: return [3.44619156, 3.44183529, 3.38970396, 3.49707692, 3.08413924, @@ -52,10 +54,12 @@ def AdaptiveIntFun(init, value, iterations, noise, rate, offset, **kwargs): else: return [3.13125441, 3.23144828, 3.16374378, 3.12888752, 3.05588209, 3.18971771, 3.06427238, 3.33778941, 3.38108243, 3.03166509] + elif isinstance(noise, pnl.DistributionFunction): if "initializer" in kwargs: return [4.18870661, 1.3561085, 1.69287182, 1.94643064, 2.12581409, 1.05242466, 2.05628752, 1.90164378, 1.18394637, 1.39578569] + else: return [3.87376946, 1.14572149, 1.46691163, 1.57824123, 2.09755694, 1.01776584, 2.04824492, 1.57043925, 0.8744065, 1.04970702] @@ -63,26 +67,31 @@ def AdaptiveIntFun(init, value, iterations, noise, rate, offset, **kwargs): if "initializer" in kwargs: return [3.91143701, 3.49857235, 2.67777415, 3.51140748, 1.59096419, 2.91863753, 1.63622751, 4.05695955, 3.11611173, 2.55924237] + else: return [3.59649986, 3.28818534, 2.45181396, 3.14321808, 1.56270704, 2.88397872, 1.62818492, 3.72575501, 2.80657186, 2.2131637] def DriftIntFun(init, value, iterations, noise, **kwargs): assert iterations == 3 + if np.isscalar(noise): if "initializer" not in kwargs: return ([0.35782281, 4.03326927, 4.90427264, 0.90944534, 1.45943493, 2.31791882, 3.05580281, 1.20089146, 2.8408554 , 1.93964773], [3., 3., 3., 3., 3., 3., 3., 3., 3., 3.]) + else: return ([1.14954785, 4.56216419, 5.4723172 , 1.83504198, 1.53047099, 2.40504812, 3.07602121, 2.0335113 , 3.61901215, 2.80965988], [3., 3., 3., 3., 3., 3., 3., 3., 3., 3.]) + else: if "initializer" not in kwargs: return ([0.17810305, 4.06675934, 4.20730295, 0.90582833, 1.60883329, 2.27822395, 2.2923697 , 1.10933472, 2.71418965, 1.86808107], [3., 3., 3., 3., 3., 3., 3., 3., 3., 3.]) + else: return ([0.96982809, 4.59565426, 4.77534751, 1.83142497, 1.67986935, 2.36535325, 2.3125881 , 1.94195457, 3.4923464 , 2.73809322], @@ -96,11 +105,13 @@ def LeakyFun(init, value, iterations, noise, **kwargs): return [2.20813608, 2.25674001, 2.22389663, 2.2069879, 2.17157305, 2.23649656, 2.17564317, 2.30832598, 2.32932737, 2.15982541] else: return [2.93867224, 2.74475902, 2.74803958, 3.06104933, 2.23711905, 2.31689203, 2.19429898, 3.07659637, 3.04734388, 2.96259823] + elif isinstance(noise, pnl.DistributionFunction): if "initializer" not in kwargs: return [2.55912037, 1.24455938, 1.43417309, 1.638423, 1.91298882, 1.22700281, 1.71226825, 1.67794471, 1.20395947, 1.48326449] else: return [3.28965653, 1.73257839, 1.95831604, 2.49248443, 1.97853482, 1.30739828, 1.73092406, 2.4462151, 1.92197598, 2.28603731] + else: if "initializer" not in kwargs: return [2.39694798, 2.27976578, 1.9349721, 2.21280371, 1.5655935, 2.11241762, 1.59283164, 2.46577518, 2.09617208, 1.82765063] @@ -115,27 +126,56 @@ def AccumulatorFun(init, value, iterations, noise, **kwargs): # variable is not used in Accumulator return [[1.38631136, 1.38631136, 1.38631136, 1.38631136, 1.38631136, 1.38631136, 1.38631136, 1.38631136, 1.38631136, 1.38631136]] + else: return [[1.40097107, 1.39610447, 1.39682937, 1.40344986, 1.38762668, 1.38792466, 1.38668573, 1.40172829, 1.40071984, 1.40242065]] + elif isinstance(noise, pnl.DistributionFunction): if "initializer" not in kwargs: return [[1.46381634, 0.97440038, 0.54931704, 0.28681701, 0.26162584, 0.66800459, 1.1010486, 0.02587729, 0.38761176, -0.56452977]] + else: return [[1.47847605, 0.98419348, 0.55983505, 0.30395551, 0.26294116, 0.66961789, 1.10142297, 0.04129421, 0.40202024, -0.54842049]] + else: if "initializer" not in kwargs: return [[1.65907194, 1.41957474, 0.96892655, 1.39471298, 0.51090402, 1.20706503, 0.5443729, 1.61376489, 1.04949166, 0.90644658]] + else: return [[1.67373165, 1.42936784, 0.97944456, 1.41185147, 0.51221934, 1.20867833, 0.54474727, 1.62918182, 1.06390014, 0.92255587]] +def DriftOnASphereFun(init, value, iterations, noise, **kwargs): + assert iterations == 3 -GROUP_PREFIX="IntegratorFunction " + if np.isscalar(noise): + if "initializer" not in kwargs: + return [1.10030505e-01, 6.77893188e-06, 4.36876221e-06, -4.83568579e-06, + 4.55349584e-05, 1.77044532e-04, 1.27797893e-03, -1.92233627e-02, + 9.74815346e-01, -2.22179738e-01, -6.97708243e-06] + + else: + return [-1.32269048e-01, 4.35051787e-05, 3.87398441e-05, -3.95620568e-06, + 1.27324586e-04, -5.01625256e-04, -8.37794371e-04, 1.25048720e-01, + 7.47570336e-01, -6.52303943e-01, -6.57270465e-05] + else: + if "initializer" not in kwargs: + return [ 0.23690849, 0.00140115, 0.0020072, -0.00128063, + -0.00096267, -0.01620475, -0.02644836, 0.46090672, + 0.82875571, -0.31584261, -0.00132534] + + else: + return [-3.72900858e-03, -3.38148799e-04, -6.43154678e-04, 4.36274120e-05, + 6.67038983e-04, -2.87440868e-03, -2.08163440e-03, 4.41976901e-01, + 5.31162110e-01, -7.22848147e-01, 4.66808385e-04] + + +GROUP_PREFIX="IntegratorFunction " @pytest.mark.function @pytest.mark.integrator_function @@ -146,53 +186,51 @@ def AccumulatorFun(init, value, iterations, noise, **kwargs): @pytest.mark.parametrize("noise", [RAND2, test_noise_arr, pnl.NormalDist], ids=["SNOISE", "VNOISE", "FNOISE"]) @pytest.mark.parametrize("func", [ - (Functions.AdaptiveIntegrator, AdaptiveIntFun), - (Functions.SimpleIntegrator, SimpleIntFun), - (Functions.DriftDiffusionIntegrator, DriftIntFun), - (Functions.LeakyCompetingIntegrator, LeakyFun), - (Functions.AccumulatorIntegrator, AccumulatorFun), + (pnl.AdaptiveIntegrator, AdaptiveIntFun), + (pnl.SimpleIntegrator, SimpleIntFun), + (pnl.DriftDiffusionIntegrator, DriftIntFun), + (pnl.LeakyCompetingIntegrator, LeakyFun), + (pnl.AccumulatorIntegrator, AccumulatorFun), + (pnl.DriftOnASphereIntegrator, DriftOnASphereFun), ], ids=lambda x: x[0]) @pytest.mark.benchmark def test_execute(func, func_mode, variable, noise, params, benchmark): - benchmark.group = GROUP_PREFIX + func[0].componentName + func_class, func_res = func + benchmark.group = GROUP_PREFIX + func_class.componentName + try: noise = noise() except TypeError as e: if "object is not callable" not in str(e): raise e from None else: - assert isinstance(noise, pnl.DistributionFunction) - if func[1] == DriftIntFun: - pytest.skip("DriftDiffusionIntegrator doesn't support functional noise") + if issubclass(func_class, (pnl.DriftDiffusionIntegrator, pnl.DriftOnASphereIntegrator)): + pytest.skip("{} doesn't support functional noise".format(func_class.componentName)) if 'DriftOnASphereIntegrator' in func[0].componentName: + params = {**params, 'dimension': len(variable) + 1} if func_mode != 'Python': pytest.skip("DriftOnASphereIntegrator not yet compiled") - params.update({'dimension':len(variable) + 1}) - else: - if 'dimension' in params: - params.pop('dimension') - - if 'AccumulatorIntegrator' in func[0].componentName: - params = { - **params, - 'increment': RAND0_1, - } - params.pop('offset') - - # If we are dealing with a DriftDiffusionIntegrator, noise and time_step_size defaults - # have changed since this test was created. Hard code their old values. - if 'DriftDiffusionIntegrator' in str(func[0]): - f = func[0](default_variable=variable, noise=np.sqrt(noise), time_step_size=1.0, **params) - else: - f = func[0](default_variable=variable, noise=noise, **params) + elif issubclass(func_class, pnl.AccumulatorIntegrator): + params = {**params, 'increment': RAND0_1} + params.pop('offset', None) + + elif issubclass(func_class, pnl.DriftDiffusionIntegrator): + # If we are dealing with a DriftDiffusionIntegrator, noise and + # time_step_size defaults have changed since this test was created. + # Hard code their old values. + params = {**params, 'time_step_size': 1.0} + noise = np.sqrt(noise) + + f = func_class(default_variable=variable, noise=noise, **params) ex = pytest.helpers.get_func_execution(f, func_mode) ex(variable) ex(variable) res = benchmark(ex, variable) - expected = func[1](f.initializer, variable, 3, noise, **params) + + expected = func_res(f.initializer, variable, 3, noise, **params) np.testing.assert_allclose(res, expected, rtol=1e-5, atol=1e-8) From caa2a67e21db275135ee26d2c017afb94fc5cb27 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 19 Oct 2024 21:32:28 -0400 Subject: [PATCH 342/410] tests/AutodiffComposition: Cleanup use of ExecutionMode.Python Most tests checking for ExecutionMode.Python and waiving all instances can be re-enabled with ExecutionMode.PyTorch. Re-enabled tests: * test_loss_specs[ExecutionMode.PyTorch-Loss.L1-expected1] - add current results as expected results * test_loss_specs[ExecutionMode.PyTorch-Loss.POISSON_NLL-expected3] - add current results as expected results * test_pytorch_loss_spec[ExecutionMode.PyTorch] * test_xor_nested_no_train_then_train[ExecutionMode.PyTorch-400-4-10-1e-05] * test_params_stay_separate[ExecutionMode.PyTorch] Provide execution_mode=pnl.ExecutionMode.PyTorch explicitly to AutodiffComposition.learn to avoid warnings. Improves pytest tests/composition/test_autodiffcomposition.py -n0 from: = 65 passed, 26 skipped, 25 warnings in 154.82s (0:02:34) = to: = 70 passed, 21 skipped, 7 warnings in 159.48s (0:02:39) = Signed-off-by: Jan Vesely --- tests/composition/test_autodiffcomposition.py | 378 ++++++------------ 1 file changed, 129 insertions(+), 249 deletions(-) diff --git a/tests/composition/test_autodiffcomposition.py b/tests/composition/test_autodiffcomposition.py index 858390f6581..80242efee9d 100644 --- a/tests/composition/test_autodiffcomposition.py +++ b/tests/composition/test_autodiffcomposition.py @@ -78,13 +78,12 @@ def test_autodiff_without_torch(): mech_B = TransferMechanism() comp = AutodiffComposition([mech_A, mech_B]) comp.run() - result = comp.learn(inputs={mech_A:[[1], [2]]}, - epochs=3, - execution_mode = pnl.ExecutionMode.Python) + result = comp.learn(inputs={mech_A:[[1], [2]]}, epochs=3, execution_mode=pnl.ExecutionMode.Python) + assert comp.pytorch_representation is None np.testing.assert_allclose(result,[[1.95634283]], atol=1e-08, rtol=1e-08) - # If torch is not installed, should raise exception for learn() with ExecutionMode.Python + # If torch is not installed, should raise exception for learn() with ExecutionMode.PyTorch from psyneulink.library.compositions.autodiffcomposition import torch_available if not torch_available: mech_A = TransferMechanism() @@ -94,7 +93,7 @@ def test_autodiff_without_torch(): with pytest.raises(AutodiffCompositionError) as error: result = comp.learn(inputs={mech_A:[[1], [2]]}, epochs=3, - execution_mode = pnl.ExecutionMode.PyTorch) + execution_mode=pnl.ExecutionMode.PyTorch) np.testing.assert_allclose(result,[[1.95634283]], atol=1e-08, rtol=1e-08) assert (f"'autodiff_composition-1.learn()' has been called with ExecutionMode.Pytorch, " f"but Pytorch module ('torch') is not installed. " @@ -488,7 +487,7 @@ def test_semantic_net_training_correctness(self, eps, opt, autodiff_mode, benchm 0.26739429, 0.25464059, 0.25453138, 0.49761396]]] - if pytest.helpers.llvm_current_fp_precision() == 'fp32' and autodiff_mode != pnl.ExecutionMode.Python: + if pytest.helpers.llvm_current_fp_precision() == 'fp32' and autodiff_mode != pnl.ExecutionMode.PyTorch: accuracy_args = {"atol": 1e-8, "rtol": 1e-6} else: accuracy_args = {} @@ -1332,28 +1331,18 @@ def test_xor_nested_train_then_no_train(self, num_epochs, learning_rate, (400, 4, 10, .00001), ] ) - def test_xor_nested_no_train_then_train(self, num_epochs, learning_rate, - patience, min_delta, autodiff_mode): - if autodiff_mode is not pnl.ExecutionMode.Python: - pytest.skip("") - # the inputs we will provide to the model - xor_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) + def test_xor_nested_no_train_then_train(self, num_epochs, learning_rate, patience, min_delta, autodiff_mode): + if autodiff_mode != pnl.ExecutionMode.PyTorch: + pytest.skip("LLVM not available") - # the outputs we wish to see from the model + xor_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) xor_targets = np.array([[0], [1], [1], [0]]) # ----------------------------------------------------------------- - xor_in = pnl.TransferMechanism(name='xor_in', - default_variable=np.zeros(2)) - - xor_hid = pnl.TransferMechanism(name='xor_hid', - default_variable=np.zeros(10), - function=Logistic()) - - xor_out = pnl.TransferMechanism(name='xor_out', - default_variable=np.zeros(1), - function=Logistic()) + xor_in = pnl.TransferMechanism(name='xor_in', default_variable=np.zeros(2)) + xor_hid = pnl.TransferMechanism(name='xor_hid', default_variable=np.zeros(10), function=Logistic()) + xor_out = pnl.TransferMechanism(name='xor_out', default_variable=np.zeros(1), function=Logistic()) hid_map = pnl.MappingProjection(name='input_to_hidden', matrix=np.random.randn(2, 10) * 0.1, @@ -1367,9 +1356,7 @@ def test_xor_nested_no_train_then_train(self, num_epochs, learning_rate, # ----------------------------------------------------------------- - xor_autodiff = AutodiffComposition( - learning_rate=learning_rate, - ) + xor_autodiff = AutodiffComposition(learning_rate=learning_rate) xor_autodiff.add_node(xor_in) xor_autodiff.add_node(xor_hid) @@ -1387,7 +1374,9 @@ def test_xor_nested_no_train_then_train(self, num_epochs, learning_rate, parentComposition.add_node(xor_autodiff) input = {xor_autodiff: input_dict} no_training_input = {xor_autodiff: no_training_input_dict} + learning_context = Context() + result1 = xor_autodiff.run(inputs=input[xor_autodiff]['inputs'], execution_mode=autodiff_mode, context=learning_context) xor_autodiff.learn(inputs=input_dict, execution_mode=autodiff_mode, context=learning_context, patience=patience, min_delta=min_delta) result2 = parentComposition.run(inputs=no_training_input, execution_mode=autodiff_mode, context=learning_context) @@ -1723,10 +1712,6 @@ def test_semantic_net_nested(self, eps, opt, autodiff_mode): sem_net.learn(inputs=input_dict, execution_mode=autodiff_mode) - if autodiff_mode is not pnl.ExecutionMode.Python: - #FIXME: Enable the rest of the test when recompilation is supported - return - parentComposition.run(inputs=no_training_input) @@ -2313,12 +2298,14 @@ def test_nested_autodiff_learning_with_input_func(self): def get_inputs_auto_diff(idx): return {"inputs": {xor_in_func: xor_inputs_func[idx]}, "targets": {xor_out_func: xor_targets_func[idx]}} + def get_inputs_comp(idx): return {xor_in_func: xor_inputs_func[idx]} + def get_targets_comp(idx): return {xor_out_func: xor_targets_func[idx]} - xor_func.learn(inputs=get_inputs_auto_diff) + xor_func.learn(inputs=get_inputs_auto_diff, execution_mode=pnl.ExecutionMode.PyTorch) results = xor_func.results np.testing.assert_allclose(results, [[[0.62245933]],[[0.62813197]],[[0.6282438]],[[0.6341436]]]) @@ -2353,21 +2340,25 @@ def test_error_for_running_nested_learning_in_Python_mode(self): # Test for error on learning if nested is Composition nested = pnl.Composition(name='nested', nodes=[hidden_mech]) autodiff = pnl.AutodiffComposition([input_mech, nested, output_mech], name='comp') - autodiff.run() + autodiff.run([0, 0]) + error_msg = (f"Unable execute learning for 'comp' because it contains nested Composition(s) " f"that are not AutodiffCompositions: 'nested'.") with pytest.raises(AutodiffCompositionError) as error: - autodiff.learn(inputs={input_mech: [[0, 0]]}) + autodiff.learn(inputs={input_mech: [[0, 0]]}, execution_mode=pnl.ExecutionMode.PyTorch) + assert error_msg in str(error.value) # Test for error on learning if nested is AutodiffComposition but execution_mode is Python nested = pnl.AutodiffComposition(name='nested', nodes=[hidden_mech]) autodiff = pnl.AutodiffComposition([input_mech, nested, output_mech], name='comp') - autodiff.run() + autodiff.run([0, 0]) + error_msg = ("Unable to execute learning in Python mode for 'comp-1' " "because it contains one or more nested Compositions: 'nested-1'.") with pytest.raises(AutodiffCompositionError) as error: autodiff.learn(inputs={input_mech: [[0, 0]]}, execution_mode=pnl.ExecutionMode.Python) + assert error_msg in str(error.value) @@ -2420,7 +2411,9 @@ def test_parallel_inputs_to_output_ports_converge_internal(self): # autodiff.show_graph(show_all=True) result_autodiff_ports = autodiff.learn(inputs={input_A: [[0, 0], [0, 1], [1, 0], [1, 1]], input_B: [[1, 2], [1, 2], [1, 2], [1, 2]]}, - learning_rate = .01, epochs=3) + learning_rate=.01, + epochs=3, + execution_mode=pnl.ExecutionMode.PyTorch) # # Autodiff: PARALLEL PATHWAYS USING SEPARATE HIDDEN NODES (hidden_1 VS. hidden_2) sizes = {HIDDEN_B: 3} @@ -2431,19 +2424,15 @@ def test_parallel_inputs_to_output_ports_converge_internal(self): hidden_B = nodes[HIDDEN_B] hidden_C = nodes[HIDDEN_C] output = nodes[OUTPUT_A] - autodiff = pnl.AutodiffComposition(pathways=[[input_A, - hidden_A, - hidden_C, - output], - [input_B, - hidden_B, - hidden_C, - output]], - name='autodiff') + autodiff = pnl.AutodiffComposition(pathways=[[input_A, hidden_A, hidden_C, output], + [input_B, hidden_B, hidden_C, output]], + name='autodiff') result_autodiff_nodes = autodiff.learn(inputs={input_A: [[0, 0], [0, 1], [1, 0], [1, 1]], - input_B: [[1, 2], [1, 2], [1, 2], [1, 2]]}, - learning_rate = .01, epochs=3) + input_B: [[1, 2], [1, 2], [1, 2], [1, 2]]}, + learning_rate=.01, + epochs=3, + execution_mode=pnl.ExecutionMode.PyTorch) # # Composition: PARALLEL PATHWAYS USING SEPARATE INPUT AND OUTPUT PORTS ON hidden_1 sizes = {HIDDEN_A: (2,3)} @@ -2467,8 +2456,9 @@ def test_parallel_inputs_to_output_ports_converge_internal(self): hidden_B, output]) result_comp_ports = comp.learn(inputs={input_A: [[0, 0], [0, 1], [1, 0], [1, 1]], - input_B: [[1, 2], [1, 2], [1, 2], [1, 2]]}, - learning_rate = .01, epochs=3) + input_B: [[1, 2], [1, 2], [1, 2], [1, 2]]}, + learning_rate=.01, + epochs=3) # # Comp: PARALLEL PATHWAYS USING SEPARATE HIDDEN NODES (hidden_1 VS. hidden_2) sizes = {HIDDEN_B: 3} @@ -2489,8 +2479,9 @@ def test_parallel_inputs_to_output_ports_converge_internal(self): hidden_C, output]) result_comp_nodes = comp.learn(inputs={input_A: [[0, 0], [0, 1], [1, 0], [1, 1]], - input_B: [[1, 2], [1, 2], [1, 2], [1, 2]]}, - learning_rate = .01, epochs=3) + input_B: [[1, 2], [1, 2], [1, 2], [1, 2]]}, + learning_rate=.01, + epochs=3) expected = [[-0.14091702170015408, 0.11156579308015635]] np.testing.assert_allclose(result_autodiff_ports, expected, rtol=1e-8, atol=1e-8) @@ -2501,7 +2492,7 @@ def test_parallel_inputs_to_output_ports_converge_internal(self): def test_single_input_to_multiple_output_ports_converge_internal(self): # Autodiff: DIVERGENT PATHWAY USING SEPARATE INPUT AND OUTPUT PORTS ON hidden_1 - sizes = {HIDDEN_A: (2,3)} + sizes = {HIDDEN_A: (2, 3)} nodes = nodes_for_testing_nested_comps(sizes) input = nodes[INPUT_A] hidden_A = nodes[HIDDEN_A] @@ -2521,7 +2512,9 @@ def test_single_input_to_multiple_output_ports_converge_internal(self): output]], name='autodiff') result_autodiff_ports = autodiff.learn(inputs={input: [[1, 2], [1, 2], [1, 2], [1, 2]]}, - learning_rate = .01, epochs=3) + learning_rate=.01, + epochs=3, + execution_mode=pnl.ExecutionMode.PyTorch) # # Autodiff: DIVERGENT PATHWAY USING SEPARATE HIDDEN NODES (hidden_1 VS. hidden_2) sizes = {HIDDEN_B: 3} @@ -2535,10 +2528,12 @@ def test_single_input_to_multiple_output_ports_converge_internal(self): [input, hidden_B, hidden_C, output]], name='autodiff') result_autodiff_nodes = autodiff.learn(inputs={input: [[1, 2], [1, 2], [1, 2], [1, 2]]}, - learning_rate = .01, epochs=3) + learning_rate=.01, + epochs=3, + execution_mode=pnl.ExecutionMode.PyTorch) # Composition: DIVERGENT PATHWAY USING SEPARATE INPUT AND OUTPUT PORTS ON hidden_1 - sizes = {HIDDEN_A: (2,3)} + sizes = {HIDDEN_A: (2, 3)} nodes = nodes_for_testing_nested_comps(sizes) input = nodes[INPUT_A] hidden_A = nodes[HIDDEN_A] @@ -2558,7 +2553,8 @@ def test_single_input_to_multiple_output_ports_converge_internal(self): hidden_B, output]) result_comp_ports = comp.learn(inputs={input: [[1, 2], [1, 2], [1, 2], [1, 2]]}, - learning_rate = .01, epochs=3) + learning_rate=.01, + epochs=3) # Comp: DIVERGENT PATHWAY USING SEPARATE HIDDEN NODES (hidden_1 VS. hidden_2) sizes = {HIDDEN_B: 3} @@ -2572,7 +2568,8 @@ def test_single_input_to_multiple_output_ports_converge_internal(self): comp.add_backpropagation_learning_pathway([input, hidden_A, hidden_C, output]) comp.add_backpropagation_learning_pathway([input, hidden_B, hidden_C, output]) result_comp_nodes = comp.learn(inputs={input: [[1, 2], [1, 2], [1, 2], [1, 2]]}, - learning_rate = .01, epochs=3) + learning_rate=.01, + epochs=3) expected = [[-0.5448706019245989, -0.45743872720005285]] np.testing.assert_allclose(result_autodiff_ports, expected, rtol=1e-8, atol=1e-8) @@ -2600,7 +2597,9 @@ def test_single_input_to_multiple_output_ports_converge_on_OUTPUT_Node(self): output] ], name='autodiff') result_autodiff_ports = autodiff.learn(inputs={input: [[0, 0], [0, 1], [1, 0], [1, 1]]}, - learning_rate = .01, epochs=3) + learning_rate=.01, + epochs=3, + execution_mode=pnl.ExecutionMode.PyTorch) # Autodiff: PARALLEL PATHWAYS USE SEPARATE HIDDEN NODES (hidden_1 VS. hidden_2) sizes = {HIDDEN_B: 3} @@ -2609,11 +2608,11 @@ def test_single_input_to_multiple_output_ports_converge_on_OUTPUT_Node(self): hidden_A = nodes[HIDDEN_A] hidden_B = nodes[HIDDEN_B] output = nodes[OUTPUT_A] - autodiff = pnl.AutodiffComposition(pathways=[[input, hidden_A, output], - [input, hidden_B, output] ], - name='autodiff') + autodiff = pnl.AutodiffComposition(pathways=[[input, hidden_A, output], [input, hidden_B, output]], name='autodiff') result_autodiff_nodes = autodiff.learn(inputs={input: [[0, 0], [0, 1], [1, 0], [1, 1]]}, - learning_rate = .01, epochs=3) + learning_rate=.01, + epochs=3, + execution_mode=pnl.ExecutionMode.PyTorch) # Comp: PARALLEL PATHWAYS USE SEPARATE HIDDEN NODES (hidden_1 VS. hidden_2) sizes = {HIDDEN_B: 3} @@ -2626,7 +2625,8 @@ def test_single_input_to_multiple_output_ports_converge_on_OUTPUT_Node(self): comp.add_backpropagation_learning_pathway([input, hidden_A, output]) comp.add_backpropagation_learning_pathway([input, hidden_B, output]) result_comp_nodes = comp.learn(inputs={input: [[0, 0], [0, 1], [1, 0], [1, 1]]}, - learning_rate = .01, epochs=3) + learning_rate=.01, + epochs=3) # Comp: PARALLEL PATHWAYS USE SEPARATE INPUT AND OUTPUT PORTS ON hidden_c sizes = {HIDDEN_A: (2,3)} @@ -2646,7 +2646,8 @@ def test_single_input_to_multiple_output_ports_converge_on_OUTPUT_Node(self): pnl.MappingProjection(hidden.output_ports[1],output), output]) result_comp_ports = comp.learn(inputs={input: [[0, 0], [0, 1], [1, 0], [1, 1]]}, - learning_rate = .01, epochs=3) + learning_rate=.01, + epochs=3) expected = [[3.3178720430554267, 3.3245710462077773]] np.testing.assert_allclose(result_autodiff_ports, expected, rtol=1e-8, atol=1e-8) @@ -2662,20 +2663,17 @@ def test_two_output_ports_on_OUTPUT_Node(self): input_A = nodes[INPUT_A] input_B = nodes[INPUT_B] output = nodes[OUTPUT_A] - autodiff = AutodiffComposition(pathways=[[input_A, - MappingProjection(input_A, output.input_ports[0]), - output], - [input_B, - MappingProjection(input_B, output.input_ports[1]), - output]], + autodiff = AutodiffComposition(pathways=[[input_A, MappingProjection(input_A, output.input_ports[0]), output], + [input_B, MappingProjection(input_B, output.input_ports[1]), output]], name='autodiff') result_autodiff_ports = autodiff.learn(inputs={input_A: [[0, 0], [0, 1], [1, 0], [1, 1]], input_B: [[1, 2], [1, 2], [1, 2], [1, 2]]}, - learning_rate = .01, epochs=3) + learning_rate=.01, + epochs=3, + execution_mode=pnl.ExecutionMode.PyTorch) # Autodiff: SEPARATE INPUT NODES TO SEPARATE OUTPUT NODES WITH ONE OUTPUTPORT EACH - sizes = {OUTPUT_A: 2, - OUTPUT_B: 3} + sizes = {OUTPUT_A: 2, OUTPUT_B: 3} nodes = nodes_for_testing_nested_comps(sizes) input_A = nodes[INPUT_A] input_B = nodes[INPUT_B] @@ -2686,7 +2684,9 @@ def test_two_output_ports_on_OUTPUT_Node(self): name='autodiff') result_autodiff_nodes = autodiff.learn(inputs={input_A: [[0, 0], [0, 1], [1, 0], [1, 1]], input_B: [[1, 2], [1, 2], [1, 2], [1, 2]]}, - learning_rate = .01, epochs=3) + learning_rate=.01, + epochs=3, + execution_mode=pnl.ExecutionMode.PyTorch) for port, node in zip(result_autodiff_ports, result_autodiff_nodes): np.testing.assert_allclose(port, node) @@ -2799,26 +2799,19 @@ def test_training_then_processing(self, autodiff_mode): @pytest.mark.parametrize( 'loss, expected', [ + (Loss.CROSS_ENTROPY, [[[0.99330715]], [[0.99933202]], [[0.99933202]], [[0.99985049]]]), + (Loss.L1, [[[0.99330641]], [[0.9993319 ]], [[0.9993319 ]], [[0.99985045]]]), (Loss.MSE, [[[0.99330509]], [[0.99933169]], [[0.99933169]], [[0.9998504]]]), - (Loss.L1, []), - (Loss.POISSON_NLL, []), - (Loss.CROSS_ENTROPY, [[[0.99330715]], [[0.99933202]], [[0.99933202]], [[0.99985049]]]) + (Loss.POISSON_NLL, [[[0.99330385]], [[0.99933149]], [[0.99933149]], [[0.99985034]]]), ] ) def test_loss_specs(self, loss, expected, autodiff_mode): - if autodiff_mode is not pnl.ExecutionMode.Python and loss in [Loss.POISSON_NLL, Loss.L1]: + if autodiff_mode is not pnl.ExecutionMode.PyTorch and loss in [Loss.POISSON_NLL, Loss.L1]: pytest.skip("Loss spec not yet implemented!") - xor_in = TransferMechanism(name='xor_in', - default_variable=np.zeros(2)) - - xor_hid = TransferMechanism(name='xor_hid', - default_variable=np.zeros(10), - function=Logistic()) - - xor_out = TransferMechanism(name='xor_out', - default_variable=np.zeros(1), - function=Logistic()) + xor_in = TransferMechanism(name='xor_in', default_variable=np.zeros(2)) + xor_hid = TransferMechanism(name='xor_hid', default_variable=np.zeros(10), function=Logistic()) + xor_out = TransferMechanism(name='xor_out', default_variable=np.zeros(1), function=Logistic()) hid_map = MappingProjection() out_map = MappingProjection() @@ -2833,34 +2826,24 @@ def test_loss_specs(self, loss, expected, autodiff_mode): xor.add_projection(sender=xor_hid, projection=out_map, receiver=xor_out) xor_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) - xor_targets = np.array([[0], [1], [1], [0]]) - xor.learn(inputs = {"inputs": {xor_in:xor_inputs}, - "targets": {xor_out:xor_targets}, - "epochs": 10}, execution_mode=autodiff_mode) + xor.learn(inputs={"inputs": {xor_in: xor_inputs}, "targets": {xor_out: xor_targets}, "epochs": 10}, + execution_mode=autodiff_mode) - tol = {'atol': 2e-6, 'rtol': 2e-6} if autodiff_mode != pnl.ExecutionMode.Python and loss == Loss.CROSS_ENTROPY else {} + tol = {'atol': 2e-6, 'rtol': 2e-6} if loss == Loss.CROSS_ENTROPY else {} np.testing.assert_allclose(xor.learning_results, expected, **tol) def test_pytorch_loss_spec(self, autodiff_mode): - - if autodiff_mode is not pnl.ExecutionMode.Python: + if autodiff_mode is not pnl.ExecutionMode.PyTorch: pytest.skip("Loss spec not yet implemented!") import torch ls = torch.nn.SoftMarginLoss(reduction='sum') - xor_in = TransferMechanism(name='xor_in', - default_variable=np.zeros(2)) - - xor_hid = TransferMechanism(name='xor_hid', - default_variable=np.zeros(10), - function=Logistic()) - - xor_out = TransferMechanism(name='xor_out', - default_variable=np.zeros(1), - function=Logistic()) + xor_in = TransferMechanism(name='xor_in', default_variable=np.zeros(2)) + xor_hid = TransferMechanism(name='xor_hid', default_variable=np.zeros(10), function=Logistic()) + xor_out = TransferMechanism(name='xor_out', default_variable=np.zeros(1), function=Logistic()) hid_map = MappingProjection() out_map = MappingProjection() @@ -2873,18 +2856,14 @@ def test_pytorch_loss_spec(self, autodiff_mode): xor.add_projection(sender=xor_in, projection=hid_map, receiver=xor_hid) xor.add_projection(sender=xor_hid, projection=out_map, receiver=xor_out) - xor_inputs = np.array( # the inputs we will provide to the model - [[0, 0], [0, 1], [1, 0], [1, 1]]) - xor_targets = np.array( # the outputs we wish to see from the model - [[0], [1], [1], [0]]) + xor_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) + xor_targets = np.array([[0], [1], [1], [0]]) - xor.learn(inputs={"inputs": {xor_in:xor_inputs}, - "targets": {xor_out:xor_targets}, - "epochs": 10}, execution_mode=autodiff_mode) - xor.learn(inputs={"inputs": {xor_in: xor_inputs}, - "targets": {xor_out: xor_targets}, - "epochs": 10}, execution_mode=autodiff_mode) + xor.learn(inputs={"inputs": {xor_in: xor_inputs}, "targets": {xor_out: xor_targets}, "epochs": 10}, + execution_mode=autodiff_mode) + xor.learn(inputs={"inputs": {xor_in: xor_inputs}, "targets": {xor_out: xor_targets}, "epochs": 10}, + execution_mode=autodiff_mode) @pytest.mark.benchmark(group="Optimizer specs") @@ -2942,19 +2921,12 @@ def test_optimizer_specs(self, learning_rate, weight_decay, optimizer_type, expe # test whether pytorch parameters and projections are kept separate (at diff. places in memory) def test_params_stay_separate(self, autodiff_mode): - if autodiff_mode is not pnl.ExecutionMode.Python: + if autodiff_mode is not pnl.ExecutionMode.PyTorch: pytest.skip("Compiled weights are always copied back!") - xor_in = TransferMechanism(name='xor_in', - default_variable=np.zeros(2)) - - xor_hid = TransferMechanism(name='xor_hid', - default_variable=np.zeros(10), - function=Logistic()) - - xor_out = TransferMechanism(name='xor_out', - default_variable=np.zeros(1), - function=Logistic()) + xor_in = TransferMechanism(name='xor_in', default_variable=np.zeros(2)) + xor_hid = TransferMechanism(name='xor_hid', default_variable=np.zeros(10), function=Logistic()) + xor_out = TransferMechanism(name='xor_out', default_variable=np.zeros(1), function=Logistic()) hid_m = np.random.rand(2,10) out_m = np.random.rand(10,1) @@ -2979,16 +2951,13 @@ def test_params_stay_separate(self, autodiff_mode): xor.add_projection(sender=xor_in, projection=hid_map, receiver=xor_hid) xor.add_projection(sender=xor_hid, projection=out_map, receiver=xor_out) - xor_inputs = np.array( # the inputs we will provide to the model - [[0, 0], [0, 1], [1, 0], [1, 1]]) + xor_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) - xor_targets = np.array( # the outputs we wish to see from the model - [[0], [1], [1], [0]]) + xor_targets = np.array([[0], [1], [1], [0]]) # train the model for a few epochs - result = xor.learn(inputs={"inputs": {xor_in:xor_inputs}, - "targets": {xor_out:xor_targets}, - "epochs": 10}, execution_mode=autodiff_mode) + xor.learn(inputs={"inputs": {xor_in:xor_inputs}, "targets": {xor_out:xor_targets},"epochs": 10}, + execution_mode=autodiff_mode) # get weight parameters from pytorch pt_weights_hid = xor.parameters.pytorch_representation.get(xor).params[0].detach().numpy().copy() @@ -3676,20 +3645,11 @@ def test_autodiff_loss_tracking(self): class TestBatching: def test_call_before_minibatch(self): # SET UP MECHANISMS FOR COMPOSITION - - xor_in = TransferMechanism(name='xor_in', - default_variable=np.zeros(2)) - - xor_hid = TransferMechanism(name='xor_hid', - default_variable=np.zeros(10), - function=Logistic()) - - xor_out = TransferMechanism(name='xor_out', - default_variable=np.zeros(1), - function=Logistic()) + xor_in = TransferMechanism(name='xor_in', default_variable=np.zeros(2)) + xor_hid = TransferMechanism(name='xor_hid', default_variable=np.zeros(10), function=Logistic()) + xor_out = TransferMechanism(name='xor_out', default_variable=np.zeros(1), function=Logistic()) # SET UP PROJECTIONS FOR COMPOSITION - hid_map = MappingProjection(name='hid_map', matrix=np.random.rand(2, 10), sender=xor_in, @@ -3701,7 +3661,6 @@ def test_call_before_minibatch(self): receiver=xor_out) # SET UP COMPOSITION - xor = AutodiffComposition(learning_rate=10) xor.add_node(xor_in) @@ -3710,19 +3669,10 @@ def test_call_before_minibatch(self): xor.add_projection(sender=xor_in, projection=hid_map, receiver=xor_hid) xor.add_projection(sender=xor_hid, projection=out_map, receiver=xor_out) - # SET UP INPUTS AND TARGETS - - xor_inputs_1 = np.array( # the inputs we will provide to the model - [[0, 0], - [0, 1], - [1, 0], - [1, 1]]) - xor_targets_1 = np.array( # the outputs we wish to see from the model - [[0], - [1], - [1], - [0]]) + # SET UP INPUTS AND TARGETS + xor_inputs_1 = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) + xor_targets_1 = np.array([[0], [1], [1], [0]]) # TRAIN COMPOSITION inputs_dict_1 = {"inputs": {xor_in: xor_inputs_1}, @@ -3731,32 +3681,20 @@ def test_call_before_minibatch(self): a = [0] - def cbm(a): + def cbm(): a[0] += 1 - xor.learn( - inputs=inputs_dict_1, - call_before_minibatch=lambda: cbm(a) - ) + xor.learn(inputs=inputs_dict_1, call_before_minibatch=cbm, execution_mode=pnl.ExecutionMode.PyTorch) assert a[0] == 4 def test_call_after_minibatch(self): # SET UP MECHANISMS FOR COMPOSITION - - xor_in = TransferMechanism(name='xor_in', - default_variable=np.zeros(2)) - - xor_hid = TransferMechanism(name='xor_hid', - default_variable=np.zeros(10), - function=Logistic()) - - xor_out = TransferMechanism(name='xor_out', - default_variable=np.zeros(1), - function=Logistic()) + xor_in = TransferMechanism(name='xor_in', default_variable=np.zeros(2)) + xor_hid = TransferMechanism(name='xor_hid', default_variable=np.zeros(10), function=Logistic()) + xor_out = TransferMechanism(name='xor_out', default_variable=np.zeros(1), function=Logistic()) # SET UP PROJECTIONS FOR COMPOSITION - hid_map = MappingProjection(name='hid_map', matrix=np.random.rand(2, 10), sender=xor_in, @@ -3768,7 +3706,6 @@ def test_call_after_minibatch(self): receiver=xor_out) # SET UP COMPOSITION - xor = AutodiffComposition(learning_rate=10) xor.add_node(xor_in) @@ -3779,18 +3716,8 @@ def test_call_after_minibatch(self): xor.add_projection(sender=xor_hid, projection=out_map, receiver=xor_out) # SET UP INPUTS AND TARGETS - - xor_inputs_1 = np.array( # the inputs we will provide to the model - [[0, 0], - [0, 1], - [1, 0], - [1, 1]]) - - xor_targets_1 = np.array( # the outputs we wish to see from the model - [[0], - [1], - [1], - [0]]) + xor_inputs_1 = np.array([[0, 0], [0, 1], [1, 0],[1, 1]]) + xor_targets_1 = np.array([[0], [1], [1], [0]]) # TRAIN COMPOSITION inputs_dict_1 = {"inputs": {xor_in: xor_inputs_1}, @@ -3799,14 +3726,10 @@ def test_call_after_minibatch(self): a = [0] - def cam(a): + def cam(): a[0] += 1 - xor.learn( - inputs=inputs_dict_1, - call_after_minibatch=lambda: cam(a) - ) - + xor.learn(inputs=inputs_dict_1, call_after_minibatch=cam, execution_mode=pnl.ExecutionMode.PyTorch) assert a[0] == 4 @pytest.mark.parametrize( @@ -3814,20 +3737,11 @@ def cam(a): ) def test_batching_with_epochs_specified(self, eps): # SET UP MECHANISMS FOR COMPOSITION - - xor_in = TransferMechanism(name='xor_in', - default_variable=np.zeros(2)) - - xor_hid = TransferMechanism(name='xor_hid', - default_variable=np.zeros(10), - function=Logistic()) - - xor_out = TransferMechanism(name='xor_out', - default_variable=np.zeros(1), - function=Logistic()) + xor_in = TransferMechanism(name='xor_in', default_variable=np.zeros(2)) + xor_hid = TransferMechanism(name='xor_hid', default_variable=np.zeros(10), function=Logistic()) + xor_out = TransferMechanism(name='xor_out', default_variable=np.zeros(1), function=Logistic()) # SET UP PROJECTIONS FOR COMPOSITION - hid_map = MappingProjection(name='hid_map', matrix=np.random.rand(2, 10), sender=xor_in, @@ -3839,7 +3753,6 @@ def test_batching_with_epochs_specified(self, eps): receiver=xor_out) # SET UP COMPOSITION - xor = AutodiffComposition(learning_rate=10, # optimizer_type=opt ) @@ -3850,19 +3763,10 @@ def test_batching_with_epochs_specified(self, eps): xor.add_projection(sender=xor_in, projection=hid_map, receiver=xor_hid) xor.add_projection(sender=xor_hid, projection=out_map, receiver=xor_out) - # SET UP INPUTS AND TARGETS - - xor_inputs_1 = np.array( # the inputs we will provide to the model - [[0, 0], - [0, 1], - [1, 0], - [1, 1]]) - xor_targets_1 = np.array( # the outputs we wish to see from the model - [[0], - [1], - [1], - [0]]) + # SET UP INPUTS AND TARGETS + xor_inputs_1 = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) + xor_targets_1 = np.array([[0], [1], [1], [0]]) c1 = Context(execution_id='context1') @@ -3871,50 +3775,26 @@ def test_batching_with_epochs_specified(self, eps): "targets": {xor_out: xor_targets_1}, "epochs": eps} - xor.learn( - inputs=inputs_dict_1, - context=c1, - minibatch_size=2 - ) + xor.learn(inputs=inputs_dict_1, context=c1, minibatch_size=2, execution_mode=pnl.ExecutionMode.PyTorch) c2 = Context(execution_id='context2') - xor_inputs_2 = np.array( # the inputs we will provide to the model - [[0, 0], - [0, 1]]) - - xor_targets_2 = np.array( # the outputs we wish to see from the model - [[0], - [1]]) + xor_inputs_2 = np.array([[0, 0], [0, 1]]) + xor_targets_2 = np.array([[0], [1]]) inputs_dict_2 = {"inputs": {xor_in: xor_inputs_2}, "targets": {xor_out: xor_targets_2}, "epochs": 1} - xor_inputs_3 = np.array( - [[1, 0], - [1, 1]] - ) - - xor_targets_3 = np.array( - [[1], - [0]] - ) + xor_inputs_3 = np.array([[1, 0], [1, 1]]) + xor_targets_3 = np.array([[1],[0]]) inputs_dict_3 = {"inputs": {xor_in: xor_inputs_3}, "targets": {xor_out: xor_targets_3}, "epochs": 1} for _ in range(eps): - xor.learn( - inputs=inputs_dict_2, - context=c2, - minibatch_size=TRAINING_SET - ) - xor.learn( - inputs=inputs_dict_3, - context=c2, - minibatch_size=TRAINING_SET - ) + xor.learn(inputs=inputs_dict_2, context=c2, minibatch_size=TRAINING_SET, execution_mode=pnl.ExecutionMode.PyTorch) + xor.learn(inputs=inputs_dict_3, context=c2, minibatch_size=TRAINING_SET, execution_mode=pnl.ExecutionMode.PyTorch) c1_results = xor.parameters.results._get(c1) c2_results = xor.parameters.results._get(c2) From 1239e4a7f6590a8d17bff633025fc2f20a8fdc46 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 20 Oct 2024 00:45:09 -0400 Subject: [PATCH 343/410] tests/TransferFunction: Cleanup Use top level pnl names for functions. Codestyle. Signed-off-by: Jan Vesely --- tests/functions/test_transfer.py | 133 +++++++++++++++++-------------- 1 file changed, 72 insertions(+), 61 deletions(-) diff --git a/tests/functions/test_transfer.py b/tests/functions/test_transfer.py index b98792c10de..0b5d15d3f52 100644 --- a/tests/functions/test_transfer.py +++ b/tests/functions/test_transfer.py @@ -3,7 +3,6 @@ import pytest import psyneulink as pnl -import psyneulink.core.components.functions.nonstateful.transferfunctions as Functions import psyneulink.core.globals.keywords as kw import psyneulink.core.llvm as pnlvm @@ -48,55 +47,57 @@ def binomial_distort_helper(seed): test_data = [ - pytest.param(Functions.Linear, test_var, {kw.SLOPE:RAND1, kw.INTERCEPT:RAND2}, test_var * RAND1 + RAND2, id="LINEAR"), - pytest.param(Functions.Exponential, test_var, {kw.SCALE:RAND1, kw.RATE:RAND2}, RAND1 * np.exp(RAND2 * test_var), id="EXPONENTIAL"), - pytest.param(Functions.Logistic, test_var, {kw.GAIN:RAND1, kw.X_0:RAND2, kw.OFFSET:RAND3, kw.SCALE:RAND4}, logistic_helper, id="LOGISTIC"), - pytest.param(Functions.Tanh, test_var, {kw.GAIN:RAND1, kw.BIAS:RAND2, kw.X_0:RAND3, kw.OFFSET:RAND4}, tanh_helper, id="TANH"), - pytest.param(Functions.ReLU, test_var, {kw.GAIN:RAND1, kw.BIAS:RAND2, kw.LEAK:RAND3}, relu_helper, id="RELU"), - - # Angle doesn't have a helper using 'test_var', hardcode the input as well - pytest.param(Functions.Angle, [0.5488135, 0.71518937, 0.60276338, 0.54488318, 0.4236548, - 0.64589411, 0.43758721, 0.891773, 0.96366276, 0.38344152], {}, + pytest.param(pnl.Linear, test_var, {kw.SLOPE:RAND1, kw.INTERCEPT:RAND2}, test_var * RAND1 + RAND2, id="LINEAR"), + pytest.param(pnl.Exponential, test_var, {kw.SCALE:RAND1, kw.RATE:RAND2}, RAND1 * np.exp(RAND2 * test_var), id="EXPONENTIAL"), + pytest.param(pnl.Logistic, test_var, {kw.GAIN:RAND1, kw.X_0:RAND2, kw.OFFSET:RAND3, kw.SCALE:RAND4}, logistic_helper, id="LOGISTIC"), + pytest.param(pnl.Tanh, test_var, {kw.GAIN:RAND1, kw.BIAS:RAND2, kw.X_0:RAND3, kw.OFFSET:RAND4}, tanh_helper, id="TANH"), + pytest.param(pnl.ReLU, test_var, {kw.GAIN:RAND1, kw.BIAS:RAND2, kw.LEAK:RAND3}, relu_helper, id="RELU"), + + # Angle doesn't have a helper using 'test_var', hardcode bopth the input and output + pytest.param(pnl.Angle, + [0.5488135, 0.71518937, 0.60276338, 0.54488318, 0.4236548, + 0.64589411, 0.43758721, 0.891773, 0.96366276, 0.38344152], + {}, [0.85314409, 0.00556188, 0.01070476, 0.0214405, 0.05559454, 0.08091079, 0.21657281, 0.19296643, 0.21343805, 0.92738261, 0.00483101], id="ANGLE"), # Distort - pytest.param(Functions.Gaussian, test_var, {kw.STANDARD_DEVIATION:RAND1, kw.BIAS:RAND2, kw.SCALE:RAND3, kw.OFFSET:RAND4}, gaussian_helper, id="GAUSSIAN"), - pytest.param(Functions.GaussianDistort, test_var, {kw.BIAS: RAND1, kw.VARIANCE:RAND2, kw.OFFSET:RAND3, kw.SCALE:RAND4 }, gaussian_distort_helper(0), id="GAUSSIAN DISTORT GLOBAL SEED"), - pytest.param(Functions.GaussianDistort, test_var, {kw.BIAS: RAND1, kw.VARIANCE:RAND2, kw.OFFSET:RAND3, kw.SCALE:RAND4, 'seed':0 }, gaussian_distort_helper(0), id="GAUSSIAN DISTORT"), - pytest.param(Functions.BinomialDistort, test_var, {'seed':0, 'p':RAND1 }, binomial_distort_helper(0), id="BINOMIAL DISTORT"), + pytest.param(pnl.Gaussian, test_var, {kw.STANDARD_DEVIATION:RAND1, kw.BIAS:RAND2, kw.SCALE:RAND3, kw.OFFSET:RAND4}, gaussian_helper, id="GAUSSIAN"), + pytest.param(pnl.GaussianDistort, test_var, {kw.BIAS: RAND1, kw.VARIANCE:RAND2, kw.OFFSET:RAND3, kw.SCALE:RAND4 }, gaussian_distort_helper(0), id="GAUSSIAN DISTORT GLOBAL SEED"), + pytest.param(pnl.GaussianDistort, test_var, {kw.BIAS: RAND1, kw.VARIANCE:RAND2, kw.OFFSET:RAND3, kw.SCALE:RAND4, 'seed':0 }, gaussian_distort_helper(0), id="GAUSSIAN DISTORT"), + pytest.param(pnl.BinomialDistort, test_var, {'seed':0, 'p':RAND1 }, binomial_distort_helper(0), id="BINOMIAL DISTORT"), # SoftMax 1D input - pytest.param(Functions.SoftMax, test_var, {kw.GAIN:RAND1, kw.PER_ITEM:False}, softmax_helper, id="SOFT_MAX ALL"), - pytest.param(Functions.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:False}, np.where(softmax_helper == np.max(softmax_helper), softmax_helper, 0), id="SOFT_MAX MAX_VAL"), - pytest.param(Functions.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM:False}, np.where(softmax_helper == np.max(softmax_helper), 1, 0), id="SOFT_MAX MAX_INDICATOR"), - pytest.param(Functions.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.PROB, kw.PER_ITEM:False}, + pytest.param(pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.PER_ITEM:False}, softmax_helper, id="SOFT_MAX ALL"), + pytest.param(pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:False}, np.where(softmax_helper == np.max(softmax_helper), softmax_helper, 0), id="SOFT_MAX MAX_VAL"), + pytest.param(pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM:False}, np.where(softmax_helper == np.max(softmax_helper), 1, 0), id="SOFT_MAX MAX_INDICATOR"), + pytest.param(pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.PROB, kw.PER_ITEM:False}, [0.0, 0.0, 0.0, 0.0, test_var[4], 0.0, 0.0, 0.0, 0.0, 0.0], id="SOFT_MAX PROB"), # SoftMax 2D testing per-item - pytest.param(Functions.SoftMax, [test_var], {kw.GAIN:RAND1, kw.PER_ITEM:True}, [softmax_helper], id="SOFT_MAX ALL 2D"), - pytest.param(Functions.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:True}, + pytest.param(pnl.SoftMax, [test_var], {kw.GAIN:RAND1, kw.PER_ITEM:True}, [softmax_helper], id="SOFT_MAX ALL 2D"), + pytest.param(pnl.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:True}, [np.where(softmax_helper == np.max(softmax_helper), softmax_helper, 0)], id="SOFT_MAX MAX_VAL 2D"), - pytest.param(Functions.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM:True}, + pytest.param(pnl.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM:True}, [np.where(softmax_helper == np.max(softmax_helper), 1, 0)], id="SOFT_MAX MAX_INDICATOR 2D"), - pytest.param(Functions.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.PROB, kw.PER_ITEM:True}, + pytest.param(pnl.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.PROB, kw.PER_ITEM:True}, [[0.0, 0.0, 0.0, 0.0, test_var[4], 0.0, 0.0, 0.0, 0.0, 0.0]], id="SOFT_MAX PROB 2D"), # SoftMax per-item with 2 elements in input - pytest.param(Functions.SoftMax, [test_var, test_var], {kw.GAIN:RAND1, kw.PER_ITEM: True}, softmax_helper2, id="SOFT_MAX ALL PER_ITEM"), - pytest.param(Functions.SoftMax, [test_var, test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM: True}, + pytest.param(pnl.SoftMax, [test_var, test_var], {kw.GAIN:RAND1, kw.PER_ITEM: True}, softmax_helper2, id="SOFT_MAX ALL PER_ITEM"), + pytest.param(pnl.SoftMax, [test_var, test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM: True}, np.where(softmax_helper2 == np.max(softmax_helper2), softmax_helper2, 0), id="SOFT_MAX MAX_VAL PER_ITEM"), - pytest.param(Functions.SoftMax, [test_var, test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM: True}, + pytest.param(pnl.SoftMax, [test_var, test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM: True}, np.where(softmax_helper2 == np.max(softmax_helper2), 1, 0), id="SOFT_MAX MAX_INDICATOR PER_ITEM"), # Linear Matrix - pytest.param(Functions.LinearMatrix, test_var, {kw.MATRIX:test_matrix}, np.dot(test_var, test_matrix), id="LINEAR_MATRIX SQUARE"), - pytest.param(Functions.LinearMatrix, test_var, {kw.MATRIX:test_matrix_l}, np.dot(test_var, test_matrix_l), id="LINEAR_MATRIX WIDE"), - pytest.param(Functions.LinearMatrix, test_var, {kw.MATRIX:test_matrix_s}, np.dot(test_var, test_matrix_s), id="LINEAR_MATRIX TALL"), + pytest.param(pnl.LinearMatrix, test_var, {kw.MATRIX:test_matrix}, np.dot(test_var, test_matrix), id="LINEAR_MATRIX SQUARE"), + pytest.param(pnl.LinearMatrix, test_var, {kw.MATRIX:test_matrix_l}, np.dot(test_var, test_matrix_l), id="LINEAR_MATRIX WIDE"), + pytest.param(pnl.LinearMatrix, test_var, {kw.MATRIX:test_matrix_s}, np.dot(test_var, test_matrix_s), id="LINEAR_MATRIX TALL"), # Dropout is just identity in non-learning mode - pytest.param(Functions.Dropout, test_var, {}, test_var, id="DROPOUT"), + pytest.param(pnl.Dropout, test_var, {}, test_var, id="DROPOUT"), ] @pytest.mark.function @@ -117,20 +118,20 @@ def test_execute(func, variable, params, expected, benchmark, func_mode): derivative_test_data = [ - (Functions.Linear, test_var, {kw.SLOPE:RAND1, kw.INTERCEPT:RAND2}, RAND1), - (Functions.Exponential, test_var, {kw.SCALE:RAND1, kw.RATE:RAND2}, RAND1 * RAND2 * np.exp(RAND2 * test_var)), - (Functions.Logistic, test_var, {kw.GAIN:RAND1, kw.X_0:RAND2, kw.OFFSET:RAND3, kw.SCALE:RAND4}, RAND1 * RAND4 * logistic_helper * (1 - logistic_helper)), - (Functions.ReLU, test_var, {kw.GAIN:RAND1, kw.BIAS:RAND2, kw.LEAK:RAND3}, np.where((test_var - RAND2) > 0, RAND1, RAND1 * RAND3)), - (Functions.Tanh, test_var, {kw.GAIN:RAND1, kw.BIAS:RAND2, kw.OFFSET:RAND3, kw.SCALE:RAND4}, tanh_derivative_helper), + (pnl.Linear, test_var, {kw.SLOPE:RAND1, kw.INTERCEPT:RAND2}, RAND1), + (pnl.Exponential, test_var, {kw.SCALE:RAND1, kw.RATE:RAND2}, RAND1 * RAND2 * np.exp(RAND2 * test_var)), + (pnl.Logistic, test_var, {kw.GAIN:RAND1, kw.X_0:RAND2, kw.OFFSET:RAND3, kw.SCALE:RAND4}, RAND1 * RAND4 * logistic_helper * (1 - logistic_helper)), + (pnl.ReLU, test_var, {kw.GAIN:RAND1, kw.BIAS:RAND2, kw.LEAK:RAND3}, np.where((test_var - RAND2) > 0, RAND1, RAND1 * RAND3)), + (pnl.Tanh, test_var, {kw.GAIN:RAND1, kw.BIAS:RAND2, kw.OFFSET:RAND3, kw.SCALE:RAND4}, tanh_derivative_helper), # SoftMax per-item=False - (Functions.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:False}, + (pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:False}, [-0.010680386821751537, -0.011118109698906909, -0.01082040340318878, -0.010670257514724047, -0.010362498859374309, -0.010933660158663306, -0.010397412260182806, -0.011602329078808718, 0.09684744183944892, -0.010262384043848513]), - (Functions.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM:False}, + (pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM:False}, [-0.010680386821751537, -0.011118109698906909, -0.01082040340318878, -0.010670257514724047, -0.010362498859374309, -0.010933660158663306, -0.010397412260182806, -0.011602329078808718, 0.09684744183944892, -0.010262384043848513]), - (Functions.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.ALL, kw.PER_ITEM:False}, + (pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.ALL, kw.PER_ITEM:False}, [[ 0.088635686173821480, -0.010058549286956951, -0.009789214523259433, -0.009653377599514660, -0.009374948470179183, -0.009891677863509920, -0.009406534609578588, -0.010496622361458180, -0.010680386821751540, -0.009284374637613039], [-0.010058549286956951, 0.091856076128865180, -0.010190413769852785, -0.010049009732287338, -0.009759169518165271, @@ -151,16 +152,17 @@ def test_execute(func, variable, params, expected, benchmark, func_mode): -0.010933660158663310, -0.010397412260182810, -0.011602329078808723, 0.096847441839448930, -0.010262384043848514], [-0.009284374637613039, -0.009664883625423075, -0.009406089929318929, -0.009275569312222474, -0.009008037180482098, -0.009504543118853646, -0.009038387119898062, -0.010085811650299970, -0.010262384043848514, 0.08553008061795979]]), - # SoftMax per-tem=True - (Functions.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:True}, + + # SoftMax per-tem=True + (pnl.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:True}, [[-0.010680386821751537, -0.011118109698906909, -0.01082040340318878, -0.010670257514724047, -0.010362498859374309, -0.010933660158663306, -0.010397412260182806, -0.011602329078808718, 0.09684744183944892, -0.010262384043848513]]), - (Functions.SoftMax, [test_var, test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM:True}, + (pnl.SoftMax, [test_var, test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM:True}, [[-0.010680386821751537, -0.011118109698906909, -0.01082040340318878, -0.010670257514724047, -0.010362498859374309, -0.010933660158663306, -0.010397412260182806, -0.011602329078808718, 0.09684744183944892, -0.010262384043848513], [-0.010680386821751537, -0.011118109698906909, -0.01082040340318878, -0.010670257514724047, -0.010362498859374309, -0.010933660158663306, -0.010397412260182806, -0.011602329078808718, 0.09684744183944892, -0.010262384043848513]]), - (Functions.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.ALL, kw.PER_ITEM:True}, + (pnl.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.ALL, kw.PER_ITEM:True}, [[ 0.088635686173821480, -0.010058549286956951, -0.009789214523259433, -0.009653377599514660, -0.009374948470179183, -0.009891677863509920, -0.009406534609578588, -0.010496622361458180, -0.010680386821751540, -0.009284374637613039], [-0.010058549286956951, 0.091856076128865180, -0.010190413769852785, -0.010049009732287338, -0.009759169518165271, @@ -186,26 +188,32 @@ def test_execute(func, variable, params, expected, benchmark, func_mode): @pytest.mark.function @pytest.mark.transfer_function @pytest.mark.benchmark -@pytest.mark.parametrize("func, variable, params, expected", derivative_test_data, ids=lambda x: getattr(x, 'name', None) or getattr(x, 'get', lambda p, q: None)(kw.OUTPUT_TYPE, None)) +@pytest.mark.parametrize("func, variable, params, expected", + derivative_test_data, + ids=lambda x: getattr(x, 'name', None) or getattr(x, 'get', lambda p, q: None)(kw.OUTPUT_TYPE, None)) def test_transfer_derivative(func, variable, params, expected, benchmark, func_mode): - if func == Functions.SoftMax and params[kw.OUTPUT_TYPE] == kw.ALL and func_mode != "Python": + benchmark.group = "TransferFunction " + func.componentName + " Derivative" + if func == pnl.SoftMax and params[kw.OUTPUT_TYPE] == kw.ALL and func_mode != "Python": pytest.skip("Compiled derivative using 'ALL' is not implemented") f = func(default_variable=variable, **params) - benchmark.group = "TransferFunction " + func.componentName + " Derivative" + if func_mode == 'Python': ex = f.derivative + elif func_mode == 'LLVM': ex = pnlvm.execution.FuncExecution(f, tags=frozenset({"derivative"})).execute + elif func_mode == 'PTX': ex = pnlvm.execution.FuncExecution(f, tags=frozenset({"derivative"})).cuda_execute + else: assert False, "unknown function mode: {}".format(func_mode) res = benchmark(ex, variable) # Tanh and Logistic need reduced accuracy in single precision mode - if func_mode != 'Python' and func in {Functions.Tanh, Functions.Logistic} and pytest.helpers.llvm_current_fp_precision() == 'fp32': + if func_mode != 'Python' and pytest.helpers.llvm_current_fp_precision() == 'fp32' and func in {pnl.Tanh, pnl.Logistic}: tolerance = {'rtol': 5e-7, 'atol': 1e-8} else: tolerance = {} @@ -214,12 +222,12 @@ def test_transfer_derivative(func, variable, params, expected, benchmark, func_m derivative_out_test_data = [ - (Functions.Logistic, logistic_helper, {kw.GAIN:RAND1, kw.X_0:RAND2, kw.OFFSET:RAND3, kw.SCALE:RAND4}, RAND1 * RAND4 * logistic_helper * (1 - logistic_helper)), - (Functions.ReLU, relu_helper, {kw.GAIN:RAND1, kw.BIAS:RAND2, kw.LEAK:RAND3}, np.where((test_var - RAND2) > 0, RAND1, RAND1 * RAND3)), - (Functions.SoftMax, softmax_helper, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:False}, + (pnl.Logistic, logistic_helper, {kw.GAIN:RAND1, kw.X_0:RAND2, kw.OFFSET:RAND3, kw.SCALE:RAND4}, RAND1 * RAND4 * logistic_helper * (1 - logistic_helper)), + (pnl.ReLU, relu_helper, {kw.GAIN:RAND1, kw.BIAS:RAND2, kw.LEAK:RAND3}, np.where((test_var - RAND2) > 0, RAND1, RAND1 * RAND3)), + (pnl.SoftMax, softmax_helper, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:False}, [-0.010680386821751537, -0.011118109698906909, -0.01082040340318878, -0.010670257514724047, -0.010362498859374309, -0.010933660158663306, -0.010397412260182806, -0.011602329078808718, 0.09684744183944892, -0.010262384043848513]), - (Functions.SoftMax, [softmax_helper, softmax_helper], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:True}, + (pnl.SoftMax, [softmax_helper, softmax_helper], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:True}, [[-0.010680386821751537, -0.011118109698906909, -0.01082040340318878, -0.010670257514724047, -0.010362498859374309, -0.010933660158663306, -0.010397412260182806, -0.011602329078808718, 0.09684744183944892, -0.010262384043848513], [-0.010680386821751537, -0.011118109698906909, -0.01082040340318878, -0.010670257514724047, -0.010362498859374309, @@ -230,25 +238,27 @@ def test_transfer_derivative(func, variable, params, expected, benchmark, func_m @pytest.mark.benchmark @pytest.mark.parametrize("func, variable, params, expected", derivative_out_test_data, ids=lambda x: getattr(x, 'name', None) or getattr(x, 'get', lambda p, q: None)(kw.OUTPUT_TYPE, None)) def test_transfer_derivative_out(func, variable, params, expected, benchmark, func_mode): - if func == Functions.SoftMax and params[kw.OUTPUT_TYPE] == kw.ALL and func_mode != "Python": - pytest.skip("Compiled SoftMax derivative using 'ALL' is not implemented") + benchmark.group = "TransferFunction " + func.componentName + " Derivative" f = func(default_variable=variable, **params) - benchmark.group = "TransferFunction " + func.componentName + " Derivative" + if func_mode == 'Python': def ex(x): return f.derivative(input=None, output=x) + elif func_mode == 'LLVM': ex = pnlvm.execution.FuncExecution(f, tags=frozenset({"derivative_out"})).execute + elif func_mode == 'PTX': ex = pnlvm.execution.FuncExecution(f, tags=frozenset({"derivative_out"})).cuda_execute + else: assert False, "unknown function mode: {}".format(func_mode) res = benchmark(ex, variable) # Logistic needs reduced accuracy in single precision mode because it uses exp() - if func_mode != 'Python' and func is Functions.Logistic and pytest.helpers.llvm_current_fp_precision() == 'fp32': + if func_mode != 'Python' and func is pnl.Logistic and pytest.helpers.llvm_current_fp_precision() == 'fp32' and func is pnl.Logistic: tolerance = {'rtol': 1e-7, 'atol': 1e-8} else: tolerance = {} @@ -259,12 +269,13 @@ def ex(x): def combine_costs(costs): return functools.reduce(lambda x, y: x | y, costs, pnl.CostFunctions.NONE) -@pytest.mark.parametrize("cost_functions", map(combine_costs, pytest.helpers.power_set(cf for cf in pnl.CostFunctions if cf != pnl.CostFunctions.NONE and cf != pnl.CostFunctions.ALL))) +@pytest.mark.parametrize("cost_functions", + map(combine_costs, pytest.helpers.power_set(cf for cf in pnl.CostFunctions if cf != pnl.CostFunctions.NONE and cf != pnl.CostFunctions.ALL))) @pytest.mark.benchmark @pytest.mark.function def test_transfer_with_costs(cost_functions, func_mode, benchmark): - f = Functions.TransferWithCosts(enabled_cost_functions=cost_functions) + f = pnl.TransferWithCosts(enabled_cost_functions=cost_functions) def check(cost_function, if_enabled, if_disabled, observed): if cost_function in cost_functions: @@ -343,12 +354,12 @@ def check(cost_function, if_enabled, if_disabled, observed): def test_transfer_with_costs_toggle(): - f = Functions.TransferWithCosts() + f = pnl.TransferWithCosts() result = f(1) np.testing.assert_allclose(result, 1) - f.toggle_cost(Functions.CostFunctions.INTENSITY) + f.toggle_cost(pnl.CostFunctions.INTENSITY) - f = Functions.TransferWithCosts(enabled_cost_functions=Functions.CostFunctions.INTENSITY) + f = pnl.TransferWithCosts(enabled_cost_functions=pnl.CostFunctions.INTENSITY) result = f(2) np.testing.assert_allclose(result, 2) np.testing.assert_allclose(f.intensity_cost, 7.38905609893065) @@ -356,7 +367,7 @@ def test_transfer_with_costs_toggle(): assert f.duration_cost is None np.testing.assert_allclose(f.combined_costs, 7.38905609893065) - f.toggle_cost(Functions.CostFunctions.ADJUSTMENT) + f.toggle_cost(pnl.CostFunctions.ADJUSTMENT) result = f(3) np.testing.assert_allclose(result, 3) np.testing.assert_allclose(f.intensity_cost, 20.085536923187668) @@ -364,7 +375,7 @@ def test_transfer_with_costs_toggle(): assert f.duration_cost is None np.testing.assert_allclose(f.combined_costs, 21.085536923187668) - f.toggle_cost(Functions.CostFunctions.DURATION) + f.toggle_cost(pnl.CostFunctions.DURATION) result = f(5) np.testing.assert_allclose(result, 5) np.testing.assert_allclose(f.intensity_cost, 148.413159102576603) @@ -391,7 +402,7 @@ def test_transfer_with_costs_shapes( expected_func_variable, expected_func_value ): - twc = Functions.TransferWithCosts(default_variable=default_variable) + twc = pnl.TransferWithCosts(default_variable=default_variable) np.testing.assert_array_equal( getattr(twc.parameters, func_name).get().defaults.variable, From 719978f5022a28487bade8b50dae5704d5da144f Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 19 Oct 2024 20:50:06 -0400 Subject: [PATCH 344/410] tests/llvm: Add llvm_not_implemented mark. Combination of llvm and llvm_not_implemented marks is automatically skipped. Signed-off-by: Jan Vesely --- conftest.py | 3 + setup.cfg | 1 + tests/composition/test_autodiffcomposition.py | 15 +--- tests/composition/test_composition.py | 4 +- tests/functions/test_integrator.py | 4 +- tests/functions/test_memory.py | 18 ++-- tests/functions/test_transfer.py | 89 +++++++++---------- tests/mechanisms/test_episodic_memory.py | 5 +- 8 files changed, 67 insertions(+), 72 deletions(-) diff --git a/conftest.py b/conftest.py index a23219aac1d..e486c1cbe2a 100644 --- a/conftest.py +++ b/conftest.py @@ -50,6 +50,9 @@ def pytest_runtest_setup(item): if m in item.keywords and not item.config.getvalue(m): pytest.skip('{0} tests not requested'.format(m)) + if 'llvm' in item.keywords and 'llvm_not_implemented' in item.keywords: + pytest.skip('LLVM implementation not available') + if 'cuda' in item.keywords and not pnlvm.ptx_enabled: pytest.skip('PTX engine not enabled/available') diff --git a/setup.cfg b/setup.cfg index 911094866c0..b1a5abf0a3e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -32,6 +32,7 @@ markers = acnested composition: PsyNeuLink Composition tests llvm: Tests using LLVM runtime compiler + llvm_not_implemented: Tests that should use LLVM runtime compiler but the functionality is not yet implemented cuda: Tests using LLVM runtime compiler and CUDA GPGPU backend control: Tests including control mechanism and/or control projection state_features: Tests for OptimizationControlMechanism state_features specifications diff --git a/tests/composition/test_autodiffcomposition.py b/tests/composition/test_autodiffcomposition.py index 80242efee9d..988f5f46a1f 100644 --- a/tests/composition/test_autodiffcomposition.py +++ b/tests/composition/test_autodiffcomposition.py @@ -1331,10 +1331,8 @@ def test_xor_nested_train_then_no_train(self, num_epochs, learning_rate, (400, 4, 10, .00001), ] ) + @pytest.mark.llvm_not_implemented def test_xor_nested_no_train_then_train(self, num_epochs, learning_rate, patience, min_delta, autodiff_mode): - if autodiff_mode != pnl.ExecutionMode.PyTorch: - pytest.skip("LLVM not available") - xor_inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) xor_targets = np.array([[0], [1], [1], [0]]) @@ -2800,15 +2798,12 @@ def test_training_then_processing(self, autodiff_mode): @pytest.mark.parametrize( 'loss, expected', [ (Loss.CROSS_ENTROPY, [[[0.99330715]], [[0.99933202]], [[0.99933202]], [[0.99985049]]]), - (Loss.L1, [[[0.99330641]], [[0.9993319 ]], [[0.9993319 ]], [[0.99985045]]]), + pytest.param(Loss.L1, [[[0.99330641]], [[0.9993319 ]], [[0.9993319 ]], [[0.99985045]]], marks=pytest.mark.llvm_not_implemented), (Loss.MSE, [[[0.99330509]], [[0.99933169]], [[0.99933169]], [[0.9998504]]]), - (Loss.POISSON_NLL, [[[0.99330385]], [[0.99933149]], [[0.99933149]], [[0.99985034]]]), + pytest.param(Loss.POISSON_NLL, [[[0.99330385]], [[0.99933149]], [[0.99933149]], [[0.99985034]]], marks=pytest.mark.llvm_not_implemented), ] ) def test_loss_specs(self, loss, expected, autodiff_mode): - if autodiff_mode is not pnl.ExecutionMode.PyTorch and loss in [Loss.POISSON_NLL, Loss.L1]: - pytest.skip("Loss spec not yet implemented!") - xor_in = TransferMechanism(name='xor_in', default_variable=np.zeros(2)) xor_hid = TransferMechanism(name='xor_hid', default_variable=np.zeros(10), function=Logistic()) xor_out = TransferMechanism(name='xor_out', default_variable=np.zeros(1), function=Logistic()) @@ -2834,10 +2829,8 @@ def test_loss_specs(self, loss, expected, autodiff_mode): tol = {'atol': 2e-6, 'rtol': 2e-6} if loss == Loss.CROSS_ENTROPY else {} np.testing.assert_allclose(xor.learning_results, expected, **tol) + @pytest.mark.llvm_not_implemented def test_pytorch_loss_spec(self, autodiff_mode): - if autodiff_mode is not pnl.ExecutionMode.PyTorch: - pytest.skip("Loss spec not yet implemented!") - import torch ls = torch.nn.SoftMarginLoss(reduction='sum') diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 81c13b223a2..b9525494c24 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -5569,11 +5569,9 @@ def test_partially_overlapping_local_and_control_mech_control_specs_in_unnested_ class TestImportComposition: @pytest.mark.pytorch @pytest.mark.composition + @pytest.mark.llvm_not_implemented def test_import_composition(self, comp_mode): - if comp_mode != pnl.ExecutionMode.Python: - pytest.skip('Compilation not yet support for Composition.import.') - em = EMComposition(memory_template=(2,5), memory_capacity=4) i1 = ProcessingMechanism() diff --git a/tests/functions/test_integrator.py b/tests/functions/test_integrator.py index ce17b411cf7..3eb57d17f75 100644 --- a/tests/functions/test_integrator.py +++ b/tests/functions/test_integrator.py @@ -191,7 +191,7 @@ def DriftOnASphereFun(init, value, iterations, noise, **kwargs): (pnl.DriftDiffusionIntegrator, DriftIntFun), (pnl.LeakyCompetingIntegrator, LeakyFun), (pnl.AccumulatorIntegrator, AccumulatorFun), - (pnl.DriftOnASphereIntegrator, DriftOnASphereFun), + pytest.param((pnl.DriftOnASphereIntegrator, DriftOnASphereFun), marks=pytest.mark.llvm_not_implemented), ], ids=lambda x: x[0]) @pytest.mark.benchmark def test_execute(func, func_mode, variable, noise, params, benchmark): @@ -209,8 +209,6 @@ def test_execute(func, func_mode, variable, noise, params, benchmark): if 'DriftOnASphereIntegrator' in func[0].componentName: params = {**params, 'dimension': len(variable) + 1} - if func_mode != 'Python': - pytest.skip("DriftOnASphereIntegrator not yet compiled") elif issubclass(func_class, pnl.AccumulatorIntegrator): params = {**params, 'increment': RAND0_1} diff --git a/tests/functions/test_memory.py b/tests/functions/test_memory.py index 8654833f94b..2173cb3cb2c 100644 --- a/tests/functions/test_memory.py +++ b/tests/functions/test_memory.py @@ -34,7 +34,9 @@ # (Functions.Buffer, test_var, {'rate':RAND1}, [[0.0],[0.0]]), pytest.param(Functions.Buffer, test_var[0], {'history':512, 'rate':RAND1, 'initializer':[test_var[0]]}, # TODO: Why is the first result using rate^2 ? - [test_var[0] * RAND1 * RAND1, test_var[0] * RAND1], id="Buffer"), + [test_var[0] * RAND1 * RAND1, test_var[0] * RAND1], + marks=pytest.mark.llvm_not_implemented, + id="Buffer"), # Tests using Mersenne-Twister as function PRNG pytest.param(Functions.DictionaryMemory, test_var, {'seed': module_seed}, @@ -71,15 +73,19 @@ # ContentAddressableMemory pytest.param(Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'retrieval_prob':0.1, 'seed': module_seed}, np.zeros_like(test_var), + marks=pytest.mark.llvm_not_implemented, id="ContentAddressableMemory Low Retrieval"), pytest.param(Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'storage_prob':0.1, 'seed': module_seed}, np.zeros_like(test_var), + marks=pytest.mark.llvm_not_implemented, id="ContentAddressableMemory Low Storage"), pytest.param(Functions.ContentAddressableMemory, test_var, {'rate':RAND1, 'retrieval_prob':0.9, 'storage_prob':0.9, 'seed': module_seed}, [test_var[0], test_var[1]], + marks=pytest.mark.llvm_not_implemented, id="ContentAddressableMemory High Storage/Retrieval"), pytest.param(Functions.ContentAddressableMemory, test_var, {'initializer':test_initializer, 'rate':RAND1, 'seed': module_seed}, [test_var[0], test_var[1]], + marks=pytest.mark.llvm_not_implemented, id="ContentAddressableMemory Initializer"), # Tests using philox var @@ -117,15 +123,19 @@ # ContentAddressableMemory pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.1, 'seed': module_seed}, np.zeros_like(philox_var), + marks=pytest.mark.llvm_not_implemented, id="ContentAddressableMemory Low Retrieval Philox"), pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'storage_prob':0.01, 'seed': module_seed}, np.zeros_like(philox_var), + marks=pytest.mark.llvm_not_implemented, id="ContentAddressableMemory Low Storage Philox"), pytest.param(Functions.ContentAddressableMemory, philox_var, {'rate':RAND1, 'retrieval_prob':0.98, 'storage_prob':0.98, 'seed': module_seed}, [philox_var[0], philox_var[1]], + marks=pytest.mark.llvm_not_implemented, id="ContentAddressableMemory High Storage/Retrieval Philox"), pytest.param(Functions.ContentAddressableMemory, philox_var, {'initializer':philox_initializer, 'rate':RAND1, 'seed': module_seed}, [philox_var[0], philox_var[1]], + marks=pytest.mark.llvm_not_implemented, id="ContentAddressableMemory Initializer Philox"), ] @@ -134,11 +144,6 @@ @pytest.mark.benchmark @pytest.mark.parametrize("func, variable, params, expected", test_data) def test_basic(func, variable, params, expected, benchmark, func_mode): - if func is Functions.Buffer and func_mode != 'Python': - pytest.skip("Not implemented") - if func is Functions.ContentAddressableMemory and func_mode != 'Python': - pytest.skip("Not implemented") - benchmark.group = func.componentName f = func(default_variable=variable, **params) if variable is philox_var: @@ -153,6 +158,7 @@ def test_basic(func, variable, params, expected, benchmark, func_mode): # "duplicate_keys" if len(variable) == 2: EX([variable[0], variable[1] * 4]) + res = benchmark(EX, variable) # This still needs to use "allclose" as the key gets manipulated before diff --git a/tests/functions/test_transfer.py b/tests/functions/test_transfer.py index 0b5d15d3f52..238b3c5c352 100644 --- a/tests/functions/test_transfer.py +++ b/tests/functions/test_transfer.py @@ -131,28 +131,28 @@ def test_execute(func, variable, params, expected, benchmark, func_mode): (pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM:False}, [-0.010680386821751537, -0.011118109698906909, -0.01082040340318878, -0.010670257514724047, -0.010362498859374309, -0.010933660158663306, -0.010397412260182806, -0.011602329078808718, 0.09684744183944892, -0.010262384043848513]), - (pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.ALL, kw.PER_ITEM:False}, - [[ 0.088635686173821480, -0.010058549286956951, -0.009789214523259433, -0.009653377599514660, -0.009374948470179183, - -0.009891677863509920, -0.009406534609578588, -0.010496622361458180, -0.010680386821751540, -0.009284374637613039], - [-0.010058549286956951, 0.091856076128865180, -0.010190413769852785, -0.010049009732287338, -0.009759169518165271, - -0.010297076447528582, -0.009792050177702091, -0.010926813872042194, -0.011118109698906910, -0.009664883625423075], - [-0.009789214523259433, -0.010190413769852785, 0.089669339130699100, -0.009779930406389987, -0.009497851156931268, - -0.010021354713444461, -0.009529851380888969, -0.010634229847424508, -0.010820403403188785, -0.009406089929318929], - [-0.009653377599514660, -0.010049009732287338, -0.009779930406389987, 0.088560779144081720, -0.009366057244326959, - -0.009882296570138368, -0.009397613427348460, -0.010486667337129447, -0.010670257514724050, -0.009275569312222474], - [-0.009374948470179183, -0.009759169518165271, -0.009497851156931268, -0.009366057244326959, 0.08627659236704915, - -0.009597264807784339, -0.009126561218167337, -0.010184203911638403, -0.010362498859374313, -0.009008037180482098], - [-0.009891677863509920, -0.010297076447528582, -0.010021354713444461, -0.009882296570138368, -0.009597264807784339, - 0.090503011588098000, -0.009629599976882700, -0.010745537931292683, -0.010933660158663310, -0.009504543118853646], - [-0.009406534609578588, -0.009792050177702091, -0.009529851380888969, -0.009397613427348460, -0.009126561218167337, - -0.009629599976882700, 0.086536526770559590, -0.010218516599910580, -0.010397412260182810, -0.009038387119898062], - [-0.010496622361458180, -0.010926813872042194, -0.010634229847424508, -0.010486667337129447, -0.010184203911638403, - -0.010745537931292683, -0.010218516599910580, 0.095380732590004670, -0.011602329078808723, -0.01008581165029997], - [-0.010680386821751540, -0.011118109698906910, -0.010820403403188785, -0.010670257514724050, -0.010362498859374313, - -0.010933660158663310, -0.010397412260182810, -0.011602329078808723, 0.096847441839448930, -0.010262384043848514], - [-0.009284374637613039, -0.009664883625423075, -0.009406089929318929, -0.009275569312222474, -0.009008037180482098, - -0.009504543118853646, -0.009038387119898062, -0.010085811650299970, -0.010262384043848514, 0.08553008061795979]]), - + pytest.param(pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.ALL, kw.PER_ITEM:False}, + [[ 0.088635686173821480, -0.010058549286956951, -0.009789214523259433, -0.009653377599514660, -0.009374948470179183, + -0.009891677863509920, -0.009406534609578588, -0.010496622361458180, -0.010680386821751540, -0.009284374637613039], + [-0.010058549286956951, 0.091856076128865180, -0.010190413769852785, -0.010049009732287338, -0.009759169518165271, + -0.010297076447528582, -0.009792050177702091, -0.010926813872042194, -0.011118109698906910, -0.009664883625423075], + [-0.009789214523259433, -0.010190413769852785, 0.089669339130699100, -0.009779930406389987, -0.009497851156931268, + -0.010021354713444461, -0.009529851380888969, -0.010634229847424508, -0.010820403403188785, -0.009406089929318929], + [-0.009653377599514660, -0.010049009732287338, -0.009779930406389987, 0.088560779144081720, -0.009366057244326959, + -0.009882296570138368, -0.009397613427348460, -0.010486667337129447, -0.010670257514724050, -0.009275569312222474], + [-0.009374948470179183, -0.009759169518165271, -0.009497851156931268, -0.009366057244326959, 0.08627659236704915, + -0.009597264807784339, -0.009126561218167337, -0.010184203911638403, -0.010362498859374313, -0.009008037180482098], + [-0.009891677863509920, -0.010297076447528582, -0.010021354713444461, -0.009882296570138368, -0.009597264807784339, + 0.090503011588098000, -0.009629599976882700, -0.010745537931292683, -0.010933660158663310, -0.009504543118853646], + [-0.009406534609578588, -0.009792050177702091, -0.009529851380888969, -0.009397613427348460, -0.009126561218167337, + -0.009629599976882700, 0.086536526770559590, -0.010218516599910580, -0.010397412260182810, -0.009038387119898062], + [-0.010496622361458180, -0.010926813872042194, -0.010634229847424508, -0.010486667337129447, -0.010184203911638403, + -0.010745537931292683, -0.010218516599910580, 0.095380732590004670, -0.011602329078808723, -0.01008581165029997], + [-0.010680386821751540, -0.011118109698906910, -0.010820403403188785, -0.010670257514724050, -0.010362498859374313, + -0.010933660158663310, -0.010397412260182810, -0.011602329078808723, 0.096847441839448930, -0.010262384043848514], + [-0.009284374637613039, -0.009664883625423075, -0.009406089929318929, -0.009275569312222474, -0.009008037180482098, + -0.009504543118853646, -0.009038387119898062, -0.010085811650299970, -0.010262384043848514, 0.08553008061795979]], + marks=pytest.mark.llvm_not_implemented), # SoftMax per-tem=True (pnl.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:True}, [[-0.010680386821751537, -0.011118109698906909, -0.01082040340318878, -0.010670257514724047, -0.010362498859374309, @@ -162,27 +162,28 @@ def test_execute(func, variable, params, expected, benchmark, func_mode): -0.010933660158663306, -0.010397412260182806, -0.011602329078808718, 0.09684744183944892, -0.010262384043848513], [-0.010680386821751537, -0.011118109698906909, -0.01082040340318878, -0.010670257514724047, -0.010362498859374309, -0.010933660158663306, -0.010397412260182806, -0.011602329078808718, 0.09684744183944892, -0.010262384043848513]]), - (pnl.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.ALL, kw.PER_ITEM:True}, - [[ 0.088635686173821480, -0.010058549286956951, -0.009789214523259433, -0.009653377599514660, -0.009374948470179183, - -0.009891677863509920, -0.009406534609578588, -0.010496622361458180, -0.010680386821751540, -0.009284374637613039], - [-0.010058549286956951, 0.091856076128865180, -0.010190413769852785, -0.010049009732287338, -0.009759169518165271, - -0.010297076447528582, -0.009792050177702091, -0.010926813872042194, -0.011118109698906910, -0.009664883625423075], - [-0.009789214523259433, -0.010190413769852785, 0.089669339130699100, -0.009779930406389987, -0.009497851156931268, - -0.010021354713444461, -0.009529851380888969, -0.010634229847424508, -0.010820403403188785, -0.009406089929318929], - [-0.009653377599514660, -0.010049009732287338, -0.009779930406389987, 0.088560779144081720, -0.009366057244326959, - -0.009882296570138368, -0.009397613427348460, -0.010486667337129447, -0.010670257514724050, -0.009275569312222474], - [-0.009374948470179183, -0.009759169518165271, -0.009497851156931268, -0.009366057244326959, 0.08627659236704915, - -0.009597264807784339, -0.009126561218167337, -0.010184203911638403, -0.010362498859374313, -0.009008037180482098], - [-0.009891677863509920, -0.010297076447528582, -0.010021354713444461, -0.009882296570138368, -0.009597264807784339, - 0.090503011588098000, -0.009629599976882700, -0.010745537931292683, -0.010933660158663310, -0.009504543118853646], - [-0.009406534609578588, -0.009792050177702091, -0.009529851380888969, -0.009397613427348460, -0.009126561218167337, - -0.009629599976882700, 0.086536526770559590, -0.010218516599910580, -0.010397412260182810, -0.009038387119898062], - [-0.010496622361458180, -0.010926813872042194, -0.010634229847424508, -0.010486667337129447, -0.010184203911638403, - -0.010745537931292683, -0.010218516599910580, 0.095380732590004670, -0.011602329078808723, -0.01008581165029997], - [-0.010680386821751540, -0.011118109698906910, -0.010820403403188785, -0.010670257514724050, -0.010362498859374313, - -0.010933660158663310, -0.010397412260182810, -0.011602329078808723, 0.096847441839448930, -0.010262384043848514], - [-0.009284374637613039, -0.009664883625423075, -0.009406089929318929, -0.009275569312222474, -0.009008037180482098, - -0.009504543118853646, -0.009038387119898062, -0.010085811650299970, -0.010262384043848514, 0.08553008061795979]]), + pytest.param(pnl.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.ALL, kw.PER_ITEM:True}, + [[ 0.088635686173821480, -0.010058549286956951, -0.009789214523259433, -0.009653377599514660, -0.009374948470179183, + -0.009891677863509920, -0.009406534609578588, -0.010496622361458180, -0.010680386821751540, -0.009284374637613039], + [-0.010058549286956951, 0.091856076128865180, -0.010190413769852785, -0.010049009732287338, -0.009759169518165271, + -0.010297076447528582, -0.009792050177702091, -0.010926813872042194, -0.011118109698906910, -0.009664883625423075], + [-0.009789214523259433, -0.010190413769852785, 0.089669339130699100, -0.009779930406389987, -0.009497851156931268, + -0.010021354713444461, -0.009529851380888969, -0.010634229847424508, -0.010820403403188785, -0.009406089929318929], + [-0.009653377599514660, -0.010049009732287338, -0.009779930406389987, 0.088560779144081720, -0.009366057244326959, + -0.009882296570138368, -0.009397613427348460, -0.010486667337129447, -0.010670257514724050, -0.009275569312222474], + [-0.009374948470179183, -0.009759169518165271, -0.009497851156931268, -0.009366057244326959, 0.08627659236704915, + -0.009597264807784339, -0.009126561218167337, -0.010184203911638403, -0.010362498859374313, -0.009008037180482098], + [-0.009891677863509920, -0.010297076447528582, -0.010021354713444461, -0.009882296570138368, -0.009597264807784339, + 0.090503011588098000, -0.009629599976882700, -0.010745537931292683, -0.010933660158663310, -0.009504543118853646], + [-0.009406534609578588, -0.009792050177702091, -0.009529851380888969, -0.009397613427348460, -0.009126561218167337, + -0.009629599976882700, 0.086536526770559590, -0.010218516599910580, -0.010397412260182810, -0.009038387119898062], + [-0.010496622361458180, -0.010926813872042194, -0.010634229847424508, -0.010486667337129447, -0.010184203911638403, + -0.010745537931292683, -0.010218516599910580, 0.095380732590004670, -0.011602329078808723, -0.01008581165029997], + [-0.010680386821751540, -0.011118109698906910, -0.010820403403188785, -0.010670257514724050, -0.010362498859374313, + -0.010933660158663310, -0.010397412260182810, -0.011602329078808723, 0.096847441839448930, -0.010262384043848514], + [-0.009284374637613039, -0.009664883625423075, -0.009406089929318929, -0.009275569312222474, -0.009008037180482098, + -0.009504543118853646, -0.009038387119898062, -0.010085811650299970, -0.010262384043848514, 0.08553008061795979]], + marks=pytest.mark.llvm_not_implemented), ] @pytest.mark.function @@ -193,8 +194,6 @@ def test_execute(func, variable, params, expected, benchmark, func_mode): ids=lambda x: getattr(x, 'name', None) or getattr(x, 'get', lambda p, q: None)(kw.OUTPUT_TYPE, None)) def test_transfer_derivative(func, variable, params, expected, benchmark, func_mode): benchmark.group = "TransferFunction " + func.componentName + " Derivative" - if func == pnl.SoftMax and params[kw.OUTPUT_TYPE] == kw.ALL and func_mode != "Python": - pytest.skip("Compiled derivative using 'ALL' is not implemented") f = func(default_variable=variable, **params) diff --git a/tests/mechanisms/test_episodic_memory.py b/tests/mechanisms/test_episodic_memory.py index 68066e6e15b..26a93fac68e 100644 --- a/tests/mechanisms/test_episodic_memory.py +++ b/tests/mechanisms/test_episodic_memory.py @@ -199,11 +199,9 @@ def test_with_dictionary_memory(variable, func, params, expected, benchmark, mec @pytest.mark.parametrize('name, func, func_params, mech_params, test_var,' 'input_port_names, output_port_names, expected_output', test_data, ids=names) +@pytest.mark.llvm_not_implemented def test_with_contentaddressablememory(name, func, func_params, mech_params, test_var, input_port_names, output_port_names, expected_output, mech_mode): - if mech_mode != 'Python': - pytest.skip("Compiled execution not yet implemented for ContentAddressableMemory") - f = func(seed=0, **func_params) # EpisodicMemoryMechanism(function=f, **mech_params) em = EpisodicMemoryMechanism(function=f, **mech_params) @@ -212,7 +210,6 @@ def test_with_contentaddressablememory(name, func, func_params, mech_params, tes EX = pytest.helpers.get_mech_execution(em, mech_mode) - # EX(test_var) actual_output = EX(test_var) for i,j in zip(actual_output,expected_output): From 8926b285f7eda4ca4e4b23b0dbaf89b2aa956154 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 21 Oct 2024 13:12:00 -0400 Subject: [PATCH 345/410] test/IntegratorFunctions: Use issubclass to check Function class Fixes: ac83b2afb7835f8a3b6993a5fb7f2d0410fdb45b ("tests/IntegratorFunctions: Cleanup") Signed-off-by: Jan Vesely --- tests/functions/test_integrator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/functions/test_integrator.py b/tests/functions/test_integrator.py index 3eb57d17f75..7c853f1dcd9 100644 --- a/tests/functions/test_integrator.py +++ b/tests/functions/test_integrator.py @@ -207,7 +207,7 @@ def test_execute(func, func_mode, variable, noise, params, benchmark): if issubclass(func_class, (pnl.DriftDiffusionIntegrator, pnl.DriftOnASphereIntegrator)): pytest.skip("{} doesn't support functional noise".format(func_class.componentName)) - if 'DriftOnASphereIntegrator' in func[0].componentName: + if issubclass(func_class, pnl.DriftOnASphereIntegrator): params = {**params, 'dimension': len(variable) + 1} elif issubclass(func_class, pnl.AccumulatorIntegrator): From 37a4a97d9d2370bbb3356f9e1b28e5d2c77cd0e2 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 21 Oct 2024 13:13:14 -0400 Subject: [PATCH 346/410] treewide: Allow Mapping type for 'params' in Component construction Signed-off-by: Jan Vesely --- psyneulink/_typing.py | 1 + .../nonstateful/transferfunctions.py | 4 ++-- .../functions/stateful/integratorfunctions.py | 24 +++++++++---------- .../functions/stateful/memoryfunctions.py | 9 +++---- .../functions/stateful/statefulfunction.py | 4 ++-- .../modulatory/controlprojection.py | 4 ++-- .../modulatory/gatingprojection.py | 4 ++-- .../modulatory/learningprojection.py | 6 ++--- psyneulink/core/compositions/composition.py | 4 ++-- .../compositions/autodiffcomposition.py | 6 ++--- .../library/compositions/compositionrunner.py | 14 +++++------ 11 files changed, 39 insertions(+), 41 deletions(-) diff --git a/psyneulink/_typing.py b/psyneulink/_typing.py index adbda772047..3a6fc21131a 100644 --- a/psyneulink/_typing.py +++ b/psyneulink/_typing.py @@ -16,4 +16,5 @@ Dict as Dict, Iterable as Iterable, Set as Set, + Mapping as Mapping, ) diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index 1eac3390527..5018ef1bccb 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -76,7 +76,7 @@ torch = None from beartype import beartype -from psyneulink._typing import Optional, Union, Callable +from psyneulink._typing import Callable, Mapping, Optional, Union from psyneulink.core import llvm as pnlvm from psyneulink.core.components.component import parameter_keywords @@ -3205,7 +3205,7 @@ def __init__(self, adapt_entropy_weighting: Optional[ValidParamSpecType] = None, output=None, per_item=None, - params: Optional[dict] = None, + params: Optional[Mapping] = None, owner=None, prefs: Optional[ValidPrefSet] = None): diff --git a/psyneulink/core/components/functions/stateful/integratorfunctions.py b/psyneulink/core/components/functions/stateful/integratorfunctions.py index 306fdaab016..eab72c5cdf4 100644 --- a/psyneulink/core/components/functions/stateful/integratorfunctions.py +++ b/psyneulink/core/components/functions/stateful/integratorfunctions.py @@ -31,7 +31,7 @@ import numpy as np from beartype import beartype -from psyneulink._typing import Optional, Union, Callable +from psyneulink._typing import Callable, Mapping, Optional, Union from psyneulink.core import llvm as pnlvm from psyneulink.core.components.component import DefaultsFlexibility @@ -230,7 +230,7 @@ def __init__(self, rate=None, noise=None, initializer=None, - params: Optional[dict] = None, + params: Optional[Mapping] = None, owner=None, prefs: Optional[ValidPrefSet] = None, context=None, @@ -559,7 +559,7 @@ def __init__(self, increment=None, noise=None, initializer=None, - params: Optional[dict] = None, + params: Optional[Mapping] = None, owner=None, prefs: Optional[ValidPrefSet] = None): @@ -836,7 +836,7 @@ def __init__(self, noise=None, offset=None, initializer=None, - params: Optional[dict] = None, + params: Optional[Mapping] = None, owner=None, prefs: Optional[ValidPrefSet] = None): super().__init__( @@ -1072,7 +1072,7 @@ def __init__(self, noise=None, offset=None, initializer=None, - params: Optional[dict] = None, + params: Optional[Mapping] = None, owner=None, prefs: Optional[ValidPrefSet] = None): @@ -1592,7 +1592,7 @@ def __init__(self, long_term_rate=None, operation=None, offset=None, - params: Optional[dict] = None, + params: Optional[Mapping] = None, owner=None, prefs: Optional[ValidPrefSet] = None): @@ -2028,7 +2028,7 @@ def __init__(self, min_val: Optional[ValidParamSpecType] = None, noise=None, initializer=None, - params: Optional[dict] = None, + params: Optional[Mapping] = None, owner=None, prefs: Optional[ValidPrefSet] = None, # **kwargs @@ -2451,7 +2451,7 @@ def __init__( threshold=None, time_step_size=None, seed=None, - params: Optional[dict] = None, + params: Optional[Mapping] = None, owner=None, prefs: Optional[ValidPrefSet] = None, **kwargs @@ -3010,7 +3010,7 @@ def __init__(self, initializer=None, angle_function=None, seed=None, - params: Optional[dict] = None, + params: Optional[Mapping] = None, owner=None, prefs: Optional[ValidPrefSet] = None, **kwargs): @@ -3466,7 +3466,7 @@ def __init__( non_decision_time=None, time_step_size=None, starting_value=None, - params: Optional[dict] = None, + params: Optional[Mapping] = None, seed=None, owner=None, prefs: Optional[ValidPrefSet] = None, @@ -3761,7 +3761,7 @@ def __init__(self, offset=None, time_step_size=None, initializer=None, - params: Optional[dict] = None, + params: Optional[Mapping] = None, owner=None, prefs: Optional[ValidPrefSet] = None, **kwargs): @@ -4466,7 +4466,7 @@ def __init__(self, mode=None, uncorrelated_activity=None, integration_method=None, - params: Optional[dict] = None, + params: Optional[Mapping] = None, owner=None, prefs: Optional[ValidPrefSet] = None, **kwargs): diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index 38a07307527..47edfa92195 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -29,14 +29,11 @@ import warnings from collections import deque -from psyneulink._typing import Callable, List, Literal +from psyneulink._typing import Callable, List, Literal, Mapping, Optional, Union import numpy as np from beartype import beartype -from typing import Optional, Union -# from psyneulink._typing import - from psyneulink.core import llvm as pnlvm from psyneulink.core.components.functions.function import ( DEFAULT_SEED, FunctionError, _random_state_getter, _seed_setter, EPSILON, _noise_setter @@ -253,8 +250,8 @@ def __init__(self, history:Optional[int]=None, # history: Optional[int] = None, initializer=None, - params: Optional[dict] = None, - # params: Optional[dict] = None, + params: Optional[Mapping] = None, + # params: Optional[Mapping] = None, owner=None, prefs: Optional[ValidPrefSet] = None ): diff --git a/psyneulink/core/components/functions/stateful/statefulfunction.py b/psyneulink/core/components/functions/stateful/statefulfunction.py index 3e25d51284d..2aeed80c09e 100644 --- a/psyneulink/core/components/functions/stateful/statefulfunction.py +++ b/psyneulink/core/components/functions/stateful/statefulfunction.py @@ -25,7 +25,7 @@ import numpy as np from beartype import beartype -from psyneulink._typing import Optional +from psyneulink._typing import Mapping, Optional from psyneulink.core import llvm as pnlvm from psyneulink.core.components.component import DefaultsFlexibility, _has_initializers_setter, ComponentsMeta @@ -235,7 +235,7 @@ def __init__(self, rate=None, noise=None, initializer=None, - params: Optional[dict] = None, + params: Optional[Mapping] = None, owner=None, prefs: Optional[ValidPrefSet] = None, context=None, diff --git a/psyneulink/core/components/projections/modulatory/controlprojection.py b/psyneulink/core/components/projections/modulatory/controlprojection.py index 71f6ff36fe4..6c20811ad52 100644 --- a/psyneulink/core/components/projections/modulatory/controlprojection.py +++ b/psyneulink/core/components/projections/modulatory/controlprojection.py @@ -111,7 +111,7 @@ from beartype import beartype -from psyneulink._typing import Optional +from psyneulink._typing import Mapping, Optional from psyneulink.core.components.component import parameter_keywords from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear @@ -243,7 +243,7 @@ def __init__(self, weight=None, exponent=None, function=None, - control_signal_params:Optional[dict]=None, + control_signal_params:Optional[Mapping]=None, params=None, name=None, prefs: Optional[ValidPrefSet] = None, diff --git a/psyneulink/core/components/projections/modulatory/gatingprojection.py b/psyneulink/core/components/projections/modulatory/gatingprojection.py index 746f861bee4..37ca2c4959d 100644 --- a/psyneulink/core/components/projections/modulatory/gatingprojection.py +++ b/psyneulink/core/components/projections/modulatory/gatingprojection.py @@ -101,7 +101,7 @@ """ from beartype import beartype -from psyneulink._typing import Optional +from psyneulink._typing import Optional, Mapping from psyneulink.core.components.component import parameter_keywords from psyneulink.core.components.functions.function import FunctionOutputType @@ -244,7 +244,7 @@ def __init__(self, function=None, weight=None, exponent=None, - gating_signal_params:Optional[dict]=None, + gating_signal_params:Optional[Mapping]=None, params=None, name=None, prefs: Optional[ValidPrefSet] = None, diff --git a/psyneulink/core/components/projections/modulatory/learningprojection.py b/psyneulink/core/components/projections/modulatory/learningprojection.py index 1d7f38d38f4..8381376f12b 100644 --- a/psyneulink/core/components/projections/modulatory/learningprojection.py +++ b/psyneulink/core/components/projections/modulatory/learningprojection.py @@ -204,7 +204,7 @@ import numpy as np from beartype import beartype -from psyneulink._typing import Optional, Union, Callable, Literal +from psyneulink._typing import Callable, Literal, Mapping, Optional, Union from psyneulink.core.components.component import parameter_keywords from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination @@ -481,12 +481,12 @@ def __init__(self, error_function: Optional[Callable] = None, learning_function: Optional[Callable] = None, # FIX: 10/3/17 - TEST IF THIS OK AND REINSTATE IF SO - # learning_signal_params:Optional[dict]=None, + # learning_signal_params:Optional[Mapping]=None, # learning_rate: Optional[ValidParamSpecType] = None, learning_enabled: Optional[Union[bool, Literal['online', 'after']]] = None, weight=None, exponent=None, - params: Optional[dict] = None, + params: Optional[Mapping] = None, name=None, prefs: Optional[ValidPrefSet] = None, **kwargs diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 455178747d3..c10706330d0 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -2897,7 +2897,7 @@ def input_function(env, result): from PIL import Image from beartype import beartype -from psyneulink._typing import Optional, Union, Literal, Type, Callable, List, Set +from psyneulink._typing import Callable, Literal, List, Mapping, Optional, Set, Type, Union from psyneulink.core import llvm as pnlvm from psyneulink.core.components.component import Component, ComponentError, ComponentsMeta @@ -11514,7 +11514,7 @@ def run( def learn( self, inputs: dict, - targets: Optional[dict] = None, + targets: Optional[Mapping] = None, num_trials: Optional[int] = None, epochs: int = 1, learning_rate: Optional[Union[int,float]]=None, diff --git a/psyneulink/library/compositions/autodiffcomposition.py b/psyneulink/library/compositions/autodiffcomposition.py index c851c32afd2..27f2e1ac1bc 100644 --- a/psyneulink/library/compositions/autodiffcomposition.py +++ b/psyneulink/library/compositions/autodiffcomposition.py @@ -330,7 +330,6 @@ import collections from packaging import version from pathlib import Path, PosixPath -from typing import Optional try: import torch @@ -343,6 +342,7 @@ from psyneulink.library.compositions.pytorchwrappers import PytorchCompositionWrapper from psyneulink.library.compositions.pytorchshowgraph import PytorchShowGraph +from psyneulink._typing import Mapping, Optional from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism from psyneulink.core.components.mechanisms.processing.compositioninterfacemechanism import CompositionInterfaceMechanism from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base @@ -1456,8 +1456,8 @@ def execute(self, runtime_params=None, execution_mode:pnlvm.ExecutionMode = pnlvm.ExecutionMode.PyTorch, skip_initialization=False, - synch_with_pnl_options:Optional[dict]=None, - retain_in_pnl_options:Optional[dict]=None, + synch_with_pnl_options:Optional[Mapping]=None, + retain_in_pnl_options:Optional[Mapping]=None, report_output:ReportOutput=ReportOutput.OFF, report_params:ReportOutput=ReportParams.OFF, report_progress:ReportProgress=ReportProgress.OFF, diff --git a/psyneulink/library/compositions/compositionrunner.py b/psyneulink/library/compositions/compositionrunner.py index 6888977be4f..4d45ccc6d06 100644 --- a/psyneulink/library/compositions/compositionrunner.py +++ b/psyneulink/library/compositions/compositionrunner.py @@ -9,9 +9,9 @@ # ********************************************* AutodiffComposition ************************************************* import numpy as np -from typing import Optional from types import GeneratorType +from psyneulink._typing import Mapping, Optional from psyneulink.core.llvm import ExecutionMode from psyneulink.core.compositions.composition import Composition from psyneulink.core.compositions.report import Report, ReportProgress, ReportDevices, LEARN_REPORT, PROGRESS_REPORT @@ -55,8 +55,8 @@ def _batch_inputs(self, minibatch_size: int = 1, optimizations_per_minibatch: int = 1, randomize: bool = True, - synch_with_pnl_options:Optional[dict] = None, - retain_in_pnl_options:Optional[dict] = None, + synch_with_pnl_options:Optional[Mapping] = None, + retain_in_pnl_options:Optional[Mapping] = None, call_before_minibatch=None, call_after_minibatch=None, early_stopper=None, @@ -161,8 +161,8 @@ def _batch_function_inputs(self, num_trials: int, batch_size: int = 1, optimizations_per_minibatch: int = 1, - synch_with_pnl_options:Optional[dict] = None, - retain_in_pnl_options:Optional[dict] = None, + synch_with_pnl_options:Optional[Mapping] = None, + retain_in_pnl_options:Optional[Mapping] = None, call_before_minibatch=None, call_after_minibatch=None, early_stopper=None, @@ -223,8 +223,8 @@ def run_learning(self, patience: int = None, min_delta: int = 0, randomize_minibatches: bool = True, - synch_with_pnl_options:Optional[dict] = None, - retain_in_pnl_options:Optional[dict] = None, + synch_with_pnl_options:Optional[Mapping] = None, + retain_in_pnl_options:Optional[Mapping] = None, call_before_minibatch = None, call_after_minibatch = None, context=None, From 4cfd3d837ff1118060815eded4a3b2fe972815b1 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Mon, 21 Oct 2024 13:16:12 -0400 Subject: [PATCH 347/410] tests: Convert dict test arguments to an immutable MappingProxyType Signed-off-by: Jan Vesely --- conftest.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/conftest.py b/conftest.py index e486c1cbe2a..d473349c7e6 100644 --- a/conftest.py +++ b/conftest.py @@ -6,6 +6,7 @@ import pytest import re import sys +import types import graph_scheduler as gs import psyneulink @@ -46,6 +47,13 @@ def pytest_runtest_setup(item): # Check that all 'cuda' tests are also marked 'llvm' assert 'llvm' in item.keywords or 'cuda' not in item.keywords + # It the item is a parametrized function. It has a 'callspec' attribute. + # Convert any dict arguments to an unmutable MappingProxyType. + if hasattr(item, 'callspec'): + for k, v in item.callspec.params.items(): + if isinstance(v, dict): + item.callspec.params[k] = types.MappingProxyType(v) + for m in marks_default_skip: if m in item.keywords and not item.config.getvalue(m): pytest.skip('{0} tests not requested'.format(m)) From c838f803a151412ceebd3b6d976afe94a75e9332 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 22 Oct 2024 22:44:55 -0400 Subject: [PATCH 348/410] requirements: update torch requirement from >=1.10.0,<2.5.0 to >=1.10.0,<2.6.0 (#3077) Updates the requirements on [pytorch](https://github.com/pytorch/pytorch) to permit the latest version. - [Release notes](https://github.com/pytorch/pytorch/releases) - [Commits](https://github.com/pytorch/pytorch/compare/v2.4.0...v2.5.0) Signed-off-by: Jan Vesely --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index c07a079587d..bdfac55c50b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,4 +19,4 @@ protobuf<3.20.4 rich>=10.1, <10.13 scipy>=1.7.3, <1.15 toposort<1.11 -torch>=1.10.0, <2.5.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' +torch>=1.10.0, <2.6.0; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' From f6883db92fadc201c992b2f7b37ae6243a455de7 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 8 Oct 2024 23:48:46 +0000 Subject: [PATCH 349/410] conditions: add When alias for Condition --- psyneulink/core/scheduling/condition.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/psyneulink/core/scheduling/condition.py b/psyneulink/core/scheduling/condition.py index 34e051157bd..d6a5d9b0b48 100644 --- a/psyneulink/core/scheduling/condition.py +++ b/psyneulink/core/scheduling/condition.py @@ -351,3 +351,6 @@ def as_mdf_model(self): m.kwargs['parameter'] = f'{self.dependency.name}_OutputPort_0' return m + + +When = Condition From 14fc2cb8cdfd5a162f685c326a389093a0f58e81 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Tue, 8 Oct 2024 23:15:27 +0000 Subject: [PATCH 350/410] conditions: explicitly specify pnl Conditions in __all__ psyneulink-wrapped versions of graph_scheduler Conditions are dynamically generated. This prevented some IDEs from recognizing their names. --- psyneulink/core/scheduling/condition.py | 28 ++++++++++++++++++++++--- tests/scheduling/test_condition.py | 7 +++++++ 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/scheduling/condition.py b/psyneulink/core/scheduling/condition.py index d6a5d9b0b48..632cd9fa752 100644 --- a/psyneulink/core/scheduling/condition.py +++ b/psyneulink/core/scheduling/condition.py @@ -9,7 +9,6 @@ # ********************************************* Condition ************************************************************** import collections -import copy import inspect import numbers import warnings @@ -24,8 +23,31 @@ from psyneulink.core.globals.parameters import parse_context from psyneulink.core.globals.utilities import parse_valid_identifier, toposort_key -__all__ = copy.copy(graph_scheduler.condition.__all__) -__all__.extend(['Threshold']) + +__all__ = [ # noqa: F822 (dynamically generated) + 'AbsoluteCondition', 'AddEdgeTo', 'AfterCall', + 'AfterConsiderationSetExecution', 'AfterEnvironmentSequence', + 'AfterEnvironmentStateUpdate', 'AfterNCalls', 'AfterNCallsCombined', + 'AfterNConsiderationSetExecutions', 'AfterNEnvironmentSequences', + 'AfterNEnvironmentStateUpdates', 'AfterNPasses', 'AfterNRuns', + 'AfterNTimeSteps', 'AfterNTrials', 'AfterNode', 'AfterNodes', + 'AfterPass', 'AfterRun', 'AfterTimeStep', 'AfterTrial', 'All', + 'AllHaveRun', 'Always', 'And', 'Any', 'AtConsiderationSetExecution', + 'AtEnvironmentSequence', 'AtEnvironmentSequenceNStart', + 'AtEnvironmentSequenceStart', 'AtEnvironmentStateUpdate', + 'AtEnvironmentStateUpdateNStart', 'AtEnvironmentStateUpdateStart', + 'AtNCalls', 'AtPass', 'AtRun', 'AtRunNStart', 'AtRunStart', + 'AtTimeStep', 'AtTrial', 'AtTrialNStart', 'AtTrialStart', + 'BeforeConsiderationSetExecution', 'BeforeEnvironmentStateUpdate', + 'BeforeNCalls', 'BeforeNode', 'BeforeNodes', 'BeforePass', + 'BeforeTimeStep', 'BeforeTrial', 'CompositeCondition', 'Condition', + 'ConditionBase', 'ConditionError', 'ConditionSet', + 'CustomGraphStructureCondition', 'EveryNCalls', 'EveryNPasses', + 'GraphStructureCondition', 'JustRan', 'NWhen', 'Never', 'Not', + 'Operation', 'Or', 'RemoveEdgeFrom', 'Threshold', 'TimeInterval', + 'TimeTermination', 'WhenFinished', 'WhenFinishedAll', + 'WhenFinishedAny', 'When', 'While', 'WhileNot', 'WithNode', +] # avoid restricting graph_scheduler versions for this code diff --git a/tests/scheduling/test_condition.py b/tests/scheduling/test_condition.py index aedb26b04b2..182d85bfd7a 100644 --- a/tests/scheduling/test_condition.py +++ b/tests/scheduling/test_condition.py @@ -1,5 +1,6 @@ import logging +import graph_scheduler as gs import numpy as np import psyneulink as pnl import pytest @@ -22,6 +23,12 @@ logger = logging.getLogger(__name__) +class TestModule: + def test_all_attr_parity(self): + missing = set(gs.condition.__all__) - set(pnl.core.scheduling.condition.__all__) + assert len(missing) == 0, (f'Conditions in graph_scheduler must be added to psyneulink condition.py: {missing}') + + class TestCondition: def test_invalid_input_WhenFinished(self): From c7b8cc729ce10d0eedeb4597681e9fcd92c4c005 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 10 Oct 2024 03:40:06 +0000 Subject: [PATCH 351/410] condition: add python interface from graph_scheduler provides IDE argument hints and class tooltips --- psyneulink/core/scheduling/condition.pyi | 1849 ++++++++++++++++++++++ 1 file changed, 1849 insertions(+) create mode 100644 psyneulink/core/scheduling/condition.pyi diff --git a/psyneulink/core/scheduling/condition.pyi b/psyneulink/core/scheduling/condition.pyi new file mode 100644 index 00000000000..1b32f3554c2 --- /dev/null +++ b/psyneulink/core/scheduling/condition.pyi @@ -0,0 +1,1849 @@ +import enum +from typing import Callable, ClassVar, Dict, Hashable, Iterable, Set, Union + +import _abc +import graph_scheduler.time +import pint +from _typeshed import Incomplete + +__all__ = ['Operation', 'ConditionError', 'ConditionSet', 'ConditionBase', 'Condition', 'AbsoluteCondition', 'While', 'When', 'WhileNot', 'Always', 'Never', 'CompositeCondition', 'All', 'Any', 'And', 'Or', 'Not', 'NWhen', 'TimeInterval', 'TimeTermination', 'BeforeConsiderationSetExecution', 'AtConsiderationSetExecution', 'AfterConsiderationSetExecution', 'AfterNConsiderationSetExecutions', 'BeforePass', 'AtPass', 'AfterPass', 'AfterNPasses', 'EveryNPasses', 'BeforeEnvironmentStateUpdate', 'AtEnvironmentStateUpdate', 'AfterEnvironmentStateUpdate', 'AfterNEnvironmentStateUpdates', 'AtEnvironmentSequence', 'AfterEnvironmentSequence', 'AfterNEnvironmentSequences', 'BeforeNCalls', 'AtNCalls', 'AfterCall', 'AfterNCalls', 'AfterNCallsCombined', 'EveryNCalls', 'JustRan', 'AllHaveRun', 'WhenFinished', 'WhenFinishedAny', 'WhenFinishedAll', 'AtEnvironmentStateUpdateStart', 'AtEnvironmentStateUpdateNStart', 'AtEnvironmentSequenceStart', 'AtEnvironmentSequenceNStart', 'Threshold', 'GraphStructureCondition', 'CustomGraphStructureCondition', 'BeforeNodes', 'BeforeNode', 'WithNode', 'AfterNodes', 'AfterNode', 'AddEdgeTo', 'RemoveEdgeFrom'] + + +SubjectOperation = Union['Operation', str, Dict[Hashable, Union['Operation', str]]] +ConditionSetDict = Dict[Hashable, Union['ConditionBase', Iterable['ConditionBase']]] +GraphDependencyDict = Dict[Hashable, Set[Hashable]] + + +class Operation(enum.Enum): + + """ + Used in conjunction with `GraphStructureCondition` to indicate how a + set of source nodes (**S** below) should be combined with a set of + comparison nodes (**C** below) to produce a result set. Many + Operations correspond to standard set operations. + + Each enum item can be called with a source set and comparison set as + arguments to produce the result set. + + Attributes: + KEEP: Returns **S** + + REPLACE: Returns **C** + + DISCARD: Returns the empty set + + INTERSECTION: Returns the set of items that are in both **S** + and **C** + + UNION: Returns the set of items in either **S** or **C** + + MERGE: Returns the set of items in either **S** or **C** + + DIFFERENCE: Returns the set of items in **S** but not **C** + + INVERSE_DIFFERENCE: Returns the set of items in **C** but not + **S** + + SYMMETRIC_DIFFERENCE: Returns the set of items that are in one + of **S** or **C** but not both + + """ + _member_names_: ClassVar[list] = ... + _member_map_: ClassVar[dict] = ... + _member_type_: ClassVar[type[object]] = ... + _value2member_map_: ClassVar[dict] = ... + KEEP: ClassVar[Operation] = ... + REPLACE: ClassVar[Operation] = ... + DISCARD: ClassVar[Operation] = ... + INTERSECTION: ClassVar[Operation] = ... + UNION: ClassVar[Operation] = ... + MERGE: ClassVar[Operation] = ... + DIFFERENCE: ClassVar[Operation] = ... + INVERSE_DIFFERENCE: ClassVar[Operation] = ... + SYMMETRIC_DIFFERENCE: ClassVar[Operation] = ... + def __call__(self, source_neighbors: set[Hashable], comparison_neighbors: set[Hashable]) -> set[Hashable]: + """ + Returns the set resulting from applying an `Operation` on + **source_neighbors** and **comparison_neighbors** + + Args: + source_neighbors (Set[Hashable]) + comparison_neighbors (Set[Hashable]) + + Returns: + Set[Hashable] + """ + @classmethod + def __init__(cls, value) -> None: ... + +class ConditionError(Exception): + __init__: ClassVar[wrapper_descriptor] = ... + +class ConditionSet: + + """Used in conjunction with a `Scheduler ` to store the `Conditions ` associated with a node. + + Arguments + --------- + + *condition_sets + each item is a dict or ConditionSet mapping nodes to one or more + conditions to be added via `ConditionSet.add_condition_set` + + conditions + a dict or ConditionSet mapping nodes to one or more conditions + to be added via `ConditionSet.add_condition_set`. Maintained for + backwards compatibility with versions 1.x + + Attributes + ---------- + + conditions : Dict[Hashable: Union[ConditionBase, Iterable[ConditionBase]]] + the key of each entry is a node, and its value is a condition + associated + with that node. Conditions can be added to the + ConditionSet using the ConditionSet's `add_condition` method. + + conditions_basic : Dict[Hashable: `Condition `] + a dict mapping nodes to their single `basic Conditions + ` + + conditions_structural : Dict[Hashable: List[`GraphStructureCondition`]] + a dict mapping nodes to their `graph structure Conditions + ` + + structural_condition_order : List[`GraphStructureCondition`] + a list storing all `GraphStructureCondition` s in this + ConditionSet in the order in which they were added (and will be + applied to a `Scheduler`) + + """ + def __init__(self, *condition_sets: ConditionSetDict, conditions: ConditionSetDict = ...) -> None: ... + def __contains__(self, item) -> bool: ... + def __iter__(self): ... + def __getitem__(self, key): ... + def __setitem__(self, key, value) -> None: ... + def add_condition(self, owner: Hashable, condition: ConditionBase): + """ + Adds a `basic ` or `graph + structure ` Condition to the + ConditionSet. + + If **condition** is basic, it will overwrite the current basic + Condition for **owner**, if present. If you want to add multiple + basic Conditions to a single owner, instead add a single + `Composite Condition ` to accurately + specify the desired behavior. + + Arguments + --------- + + owner : node + specifies the node with which the **condition** should be associated. **condition** + will govern the execution behavior of **owner** + + condition : ConditionBase + specifies the condition associated with **owner** to be + added to the ConditionSet. + """ + def remove_condition(self, owner_or_condition: Hashable | ConditionBase) -> ConditionBase | None: + """ + Removes the condition specified as or owned by + **owner_or_condition**. + + Args: + owner_or_condition (Union[Hashable, `ConditionBase`]): + Either a condition or the owner of a condition + + Returns: + The condition removed, or None if no condition removed + + Raises: + ConditionError: + - when **owner_or_condition** is an owner and it owns + multiple conditions + - when **owner_or_condition** is a condition and its + owner is None + """ + def add_condition_set(self, conditions: ConditionSet | ConditionSetDict): + """ + Adds a set of `basic ` or + `graph structure ` Conditions (in the + form of a dict or another ConditionSet) to the ConditionSet. + + Any basic Condition added here will overwrite the current basic + Condition for a given owner, if present. If you want to add + multiple basic Conditions to a single owner, instead add a + single `Composite Condition ` to + accurately specify the desired behavior. + + Arguments + --------- + + conditions + specifies collection of Conditions to be added to this ConditionSet, + + if a dict is provided: + each entry should map an owner node (the node whose + execution behavior will be governed) to a `Condition + ` or + `GraphStructureCondition`, or an iterable of them. + + """ + @property + def conditions(self): ... + +class ConditionBase: + + """ + Abstract base class for `basic conditions + ` and `graph structure + conditions ` + + Attributes: + owner (Hashable): + the node with which the Condition is associated, and the + execution of which it determines. + + """ + owner: Incomplete + def __init__(self, _owner: Hashable = ..., **kwargs) -> None: ... + +class Condition(ConditionBase): + + """ + Used in conjunction with a :class:`Scheduler` to specify the condition under which a node should be + allowed to execute. + + Arguments + --------- + + func : callable + specifies function to be called when the Condition is evaluated, to determine whether it is currently satisfied. + + args : *args + specifies formal arguments to pass to `func` when the Condition is evaluated. + + kwargs : **kwargs + specifies keyword arguments to pass to `func` when the Condition is evaluated. + """ + def __init__(self, func, *args, **kwargs) -> None: ... + def is_satisfied(self, *args, execution_id: Incomplete | None = ..., **kwargs): + """ + the function called to determine satisfaction of this Condition. + + Arguments + --------- + args : *args + specifies additional formal arguments to pass to `func` when the Condition is evaluated. + these are appended to the **args** specified at instantiation of this Condition + + kwargs : **kwargs + specifies additional keyword arguments to pass to `func` when the Condition is evaluated. + these are added to the **kwargs** specified at instantiation of this Condition + + Returns + ------- + True - if the Condition is satisfied + False - if the Condition is not satisfied + """ + @property + def absolute_intervals(self): ... + @property + def absolute_fixed_points(self): ... + @property + def is_absolute(self): ... + +class AbsoluteCondition(Condition): + def __init__(self, func, *args, **kwargs) -> None: ... + @property + def is_absolute(self): ... + +class _DependencyValidation: ... +class While(ConditionBase): + + """ + Used in conjunction with a :class:`Scheduler` to specify the condition under which a node should be + allowed to execute. + + Arguments + --------- + + func : callable + specifies function to be called when the Condition is evaluated, to determine whether it is currently satisfied. + + args : *args + specifies formal arguments to pass to `func` when the Condition is evaluated. + + kwargs : **kwargs + specifies keyword arguments to pass to `func` when the Condition is evaluated. + """ + def __init__(self, func, *args, **kwargs) -> None: ... + def is_satisfied(self, *args, execution_id: Incomplete | None = ..., **kwargs): + """ + the function called to determine satisfaction of this Condition. + + Arguments + --------- + args : *args + specifies additional formal arguments to pass to `func` when the Condition is evaluated. + these are appended to the **args** specified at instantiation of this Condition + + kwargs : **kwargs + specifies additional keyword arguments to pass to `func` when the Condition is evaluated. + these are added to the **kwargs** specified at instantiation of this Condition + + Returns + ------- + True - if the Condition is satisfied + False - if the Condition is not satisfied + """ + @property + def absolute_intervals(self): ... + @property + def absolute_fixed_points(self): ... + @property + def is_absolute(self): ... + +class When(ConditionBase): + + """ + Used in conjunction with a :class:`Scheduler` to specify the condition under which a node should be + allowed to execute. + + Arguments + --------- + + func : callable + specifies function to be called when the Condition is evaluated, to determine whether it is currently satisfied. + + args : *args + specifies formal arguments to pass to `func` when the Condition is evaluated. + + kwargs : **kwargs + specifies keyword arguments to pass to `func` when the Condition is evaluated. + """ + def __init__(self, func, *args, **kwargs) -> None: ... + def is_satisfied(self, *args, execution_id: Incomplete | None = ..., **kwargs): + """ + the function called to determine satisfaction of this Condition. + + Arguments + --------- + args : *args + specifies additional formal arguments to pass to `func` when the Condition is evaluated. + these are appended to the **args** specified at instantiation of this Condition + + kwargs : **kwargs + specifies additional keyword arguments to pass to `func` when the Condition is evaluated. + these are added to the **kwargs** specified at instantiation of this Condition + + Returns + ------- + True - if the Condition is satisfied + False - if the Condition is not satisfied + """ + @property + def absolute_intervals(self): ... + @property + def absolute_fixed_points(self): ... + @property + def is_absolute(self): ... + +class WhileNot(Condition): + + """ + WhileNot + + Parameters: + + func : callable + specifies function to be called when the Condition is evaluated, to determine whether it is currently satisfied. + + args : *args + specifies formal arguments to pass to `func` when the Condition is evaluated. + + kwargs : **kwargs + specifies keyword arguments to pass to `func` when the Condition is evaluated. + + Satisfied when: + + - **func** is False + + """ + def __init__(self, func, *args, **kwargs) -> None: ... + +class Always(Condition): + + """Always + + Parameters: + + none + + Satisfied when: + + - always satisfied. + + """ + def __init__(self) -> None: ... + +class Never(Condition): + + """Never + + Parameters: + + none + + Satisfied when: + + - never satisfied. + """ + def __init__(self) -> None: ... + +class CompositeCondition(Condition): + owner: Incomplete + def __init__(self, func, *args, **kwargs) -> None: ... + @property + def absolute_intervals(self): ... + @property + def absolute_fixed_points(self): ... + @property + def is_absolute(self): ... + +class All(CompositeCondition): + + """All + + Parameters: + + args: one or more `Conditions ` + + Satisfied when: + + - all of the Conditions in args are satisfied. + + Notes: + + - To initialize with a list (for example):: + + conditions = [AfterNCalls(node, 5) for node in node_list] + + unpack the list to supply its members as args:: + + composite_condition = All(*conditions) + + """ + def __init__(self, *args, **dependencies) -> None: ... + def satis(self, *conds, **kwargs): ... + +class Any(CompositeCondition): + + """Any + + Parameters: + + args: one or more `Conditions ` + + Satisfied when: + + - one or more of the Conditions in **args** is satisfied. + + Notes: + + - To initialize with a list (for example):: + + conditions = [AfterNCalls(node, 5) for node in node_list] + + unpack the list to supply its members as args:: + + composite_condition = Any(*conditions) + + """ + def __init__(self, *args, **dependencies) -> None: ... + def satis(self, *conds, **kwargs): ... + +class And(CompositeCondition): + + """All + + Parameters: + + args: one or more `Conditions ` + + Satisfied when: + + - all of the Conditions in args are satisfied. + + Notes: + + - To initialize with a list (for example):: + + conditions = [AfterNCalls(node, 5) for node in node_list] + + unpack the list to supply its members as args:: + + composite_condition = All(*conditions) + + """ + def __init__(self, *args, **dependencies) -> None: ... + def satis(self, *conds, **kwargs): ... + +class Or(CompositeCondition): + + """Any + + Parameters: + + args: one or more `Conditions ` + + Satisfied when: + + - one or more of the Conditions in **args** is satisfied. + + Notes: + + - To initialize with a list (for example):: + + conditions = [AfterNCalls(node, 5) for node in node_list] + + unpack the list to supply its members as args:: + + composite_condition = Any(*conditions) + + """ + def __init__(self, *args, **dependencies) -> None: ... + def satis(self, *conds, **kwargs): ... + +class Not(Condition): + + """Not + + Parameters: + + condition(Condition): a `Condition` + + Satisfied when: + + - **condition** is not satisfied. + + """ + owner: Incomplete + def __init__(self, condition) -> None: ... + +class NWhen(Condition): + + """NWhen + + Parameters: + + condition(Condition): a `Condition` + + n(int): the maximum number of times this condition will be satisfied + + Satisfied when: + + - the first **n** times **condition** is satisfied upon evaluation + + """ + owner: Incomplete + def __init__(self, condition, n: int = ...) -> None: ... + def satis(self, condition, n, *args, scheduler: Incomplete | None = ..., execution_id: Incomplete | None = ..., **kwargs): ... + +class TimeInterval(AbsoluteCondition): + + """TimeInterval + + Attributes: + + repeat + the interval between *unit*s where this condition can be + satisfied + + start + the time at/after which this condition can be + satisfied + + end + the time at/fter which this condition can be + satisfied + + unit + the `pint.Unit` to use for scalar values of *repeat*, + *start*, and *end* + + start_inclusive + if True, *start* allows satisfaction exactly at the time + corresponding to *start*. if False, satisfaction can occur + only after *start* + + end_inclusive + if True, *end* allows satisfaction exactly until the time + corresponding to *end*. if False, satisfaction can occur + only before *end* + + + Satisfied when: + + Every *repeat* units of time at/after *start* and before/through + *end* + + Notes: + + Using a `TimeInterval` as a + `termination Condition ` may + result in unexpected behavior. The user may be inclined to + create **TimeInterval(end=x)** to terminate at time **x**, but + this will do the opposite and be true only and always until time + **x**, terminating at any time before **x**. If in doubt, use + `TimeTermination` instead. + + If the scheduler is not set to `exact_time_mode = True`, + *start_inclusive* and *end_inclusive* may not behave as + expected. See `Scheduler_Exact_Time` for more info. + """ + def __init__(self, repeat: int | str | pint.Quantity = ..., start: int | str | pint.Quantity = ..., end: int | str | pint.Quantity = ..., unit: str | pint.Unit = ..., start_inclusive: bool = ..., end_inclusive: bool = ...) -> None: ... + @property + def absolute_intervals(self): ... + @property + def absolute_fixed_points(self): ... + +class TimeTermination(AbsoluteCondition): + + """TimeTermination + + Attributes: + + t + the time at/after which this condition is satisfied + + unit + the `pint.Unit` to use for scalar values of *t*, *start*, + and *end* + + start_inclusive + if True, the condition is satisfied exactly at the time + corresponding to *t*. if False, satisfaction can occur + only after *t* + + Satisfied when: + + At/After time *t* + """ + def __init__(self, t: int | str | pint.Quantity, inclusive: bool = ..., unit: str | pint.Unit = ...) -> None: ... + @property + def absolute_fixed_points(self): ... + +class BeforeConsiderationSetExecution(Condition): + + """BeforeConsiderationSetExecution + + Parameters: + + n(int): the 'CONSIDERATION_SET_EXECUTION' before which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting `CONSIDERATION_SET_EXECUTION`\\ s (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + Satisfied when: + + - at most n-1 `CONSIDERATION_SET_EXECUTION`\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. + + Notes: + + - Counts of TimeScales are zero-indexed (that is, the first `CONSIDERATION_SET_EXECUTION` is 0, the second `CONSIDERATION_SET_EXECUTION` is 1, etc.); + so, `BeforeConsiderationSetExecution(2)` is satisfied at `CONSIDERATION_SET_EXECUTION` 0 and `CONSIDERATION_SET_EXECUTION` 1. + + """ + def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AtConsiderationSetExecution(Condition): + + """AtConsiderationSetExecution + + Parameters: + + n(int): the `CONSIDERATION_SET_EXECUTION` at which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting `CONSIDERATION_SET_EXECUTION`\\ s (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + Satisfied when: + + - exactly n `CONSIDERATION_SET_EXECUTION`\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. + + Notes: + + - Counts of TimeScales are zero-indexed (that is, the first 'CONSIDERATION_SET_EXECUTION' is pass 0, the second 'CONSIDERATION_SET_EXECUTION' is 1, etc.); + so, `AtConsiderationSetExecution(1)` is satisfied when a single `CONSIDERATION_SET_EXECUTION` (`CONSIDERATION_SET_EXECUTION` 0) has occurred, and `AtConsiderationSetExecution(2)` is satisfied + when two `CONSIDERATION_SET_EXECUTION`\\ s have occurred (`CONSIDERATION_SET_EXECUTION` 0 and `CONSIDERATION_SET_EXECUTION` 1), etc.. + + """ + def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AfterConsiderationSetExecution(Condition): + + """AfterConsiderationSetExecution + + Parameters: + + n(int): the `CONSIDERATION_SET_EXECUTION` after which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting `CONSIDERATION_SET_EXECUTION`\\ s (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + Satisfied when: + + - at least n+1 `CONSIDERATION_SET_EXECUTION`\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. + + Notes: + + - Counts of TimeScals are zero-indexed (that is, the first `CONSIDERATION_SET_EXECUTION` is 0, the second `CONSIDERATION_SET_EXECUTION` is 1, etc.); so, + `AfterConsiderationSetExecution(1)` is satisfied after `CONSIDERATION_SET_EXECUTION` 1 has occurred and thereafter (i.e., in `CONSIDERATION_SET_EXECUTION`\\ s 2, 3, 4, etc.). + + """ + def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AfterNConsiderationSetExecutions(Condition): + + """AfterNConsiderationSetExecutions + + Parameters: + + n(int): the number of `CONSIDERATION_SET_EXECUTION`\\ s after which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting `CONSIDERATION_SET_EXECUTION`\\ s (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + + Satisfied when: + + - at least n `CONSIDERATION_SET_EXECUTION`\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. + + """ + def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class BeforePass(Condition): + + """BeforePass + + Parameters: + + n(int): the 'PASS' before which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + Satisfied when: + + - at most n-1 `PASS`\\ es have occurred within one unit of time at the `TimeScale` specified by **time_scale**. + + Notes: + + - Counts of TimeScales are zero-indexed (that is, the first `PASS` is 0, the second `PASS` is 1, etc.); + so, `BeforePass(2)` is satisfied at `PASS` 0 and `PASS` 1. + + """ + def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AtPass(Condition): + + """AtPass + + Parameters: + + n(int): the `PASS` at which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + Satisfied when: + + - exactly n `PASS`\\ es have occurred within one unit of time at the `TimeScale` specified by **time_scale**. + + Notes: + + - Counts of TimeScales are zero-indexed (that is, the first 'PASS' is pass 0, the second 'PASS' is 1, etc.); + so, `AtPass(1)` is satisfied when a single `PASS` (`PASS` 0) has occurred, and `AtPass(2)` is satisfied + when two `PASS`\\ es have occurred (`PASS` 0 and `PASS` 1), etc.. + + """ + def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AfterPass(Condition): + + """AfterPass + + Parameters: + + n(int): the `PASS` after which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + Satisfied when: + + - at least n+1 `PASS`\\ es have occurred within one unit of time at the `TimeScale` specified by **time_scale**. + + Notes: + + - Counts of TimeScales are zero-indexed (that is, the first `PASS` is 0, the second `PASS` is 1, etc.); so, + `AfterPass(1)` is satisfied after `PASS` 1 has occurred and thereafter (i.e., in `PASS`\\ es 2, 3, 4, etc.). + + """ + def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AfterNPasses(Condition): + + """AfterNPasses + + Parameters: + + n(int): the number of `PASS`\\ es after which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + + Satisfied when: + + - at least n `PASS`\\ es have occurred within one unit of time at the `TimeScale` specified by **time_scale**. + + """ + def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class EveryNPasses(Condition): + + """EveryNPasses + + Parameters: + + n(int): the frequency of passes with which this condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + Satisfied when: + + - `PASS` 0 + + - the specified number of `PASS`\\ es that has occurred within a unit of time (at the `TimeScale` specified by + **time_scale**) is evenly divisible by n. + + """ + def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class BeforeEnvironmentStateUpdate(Condition): + + """BeforeEnvironmentStateUpdate + + Parameters: + + n(int): the `ENVIRONMENT_STATE_UPDATE ` before which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting `ENVIRONMENT_STATE_UPDATE `\\ s + (default: TimeScale.ENVIRONMENT_SEQUENCE) + + Satisfied when: + + - at most n-1 `ENVIRONMENT_STATE_UPDATE `\\ s have occurred within one unit of time at the `TimeScale` + specified by **time_scale**. + + Notes: + + - Counts of TimeScales are zero-indexed (that is, the first `ENVIRONMENT_STATE_UPDATE ` is 0, the second + `ENVIRONMENT_STATE_UPDATE ` is 1, etc.); so, `BeforeEnvironmentStateUpdate(2)` is satisfied at `ENVIRONMENT_STATE_UPDATE ` 0 + and `ENVIRONMENT_STATE_UPDATE ` 1. + + """ + def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AtEnvironmentStateUpdate(Condition): + + """AtEnvironmentStateUpdate + + Parameters: + + n(int): the `ENVIRONMENT_STATE_UPDATE ` at which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting `ENVIRONMENT_STATE_UPDATE `\\ s + (default: TimeScale.ENVIRONMENT_SEQUENCE) + + Satisfied when: + + - exactly n `ENVIRONMENT_STATE_UPDATE `\\ s have occurred within one unit of time at the `TimeScale` + specified by **time_scale**. + + Notes: + + - Counts of TimeScales are zero-indexed (that is, the first `ENVIRONMENT_STATE_UPDATE ` is 0, + the second `ENVIRONMENT_STATE_UPDATE ` is 1, etc.); so, `AtEnvironmentStateUpdate(1)` is satisfied when one + `ENVIRONMENT_STATE_UPDATE ` (`ENVIRONMENT_STATE_UPDATE ` 0) has already occurred. + + """ + def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AfterEnvironmentStateUpdate(Condition): + + """AfterEnvironmentStateUpdate + + Parameters: + + n(int): the `ENVIRONMENT_STATE_UPDATE ` after which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting `ENVIRONMENT_STATE_UPDATE `\\ s. + (default: TimeScale.ENVIRONMENT_SEQUENCE) + + Satisfied when: + + - at least n+1 `ENVIRONMENT_STATE_UPDATE `\\ s have occurred within one unit of time at the `TimeScale` + specified by **time_scale**. + + Notes: + + - Counts of TimeScales are zero-indexed (that is, the first `ENVIRONMENT_STATE_UPDATE ` is 0, the second + `ENVIRONMENT_STATE_UPDATE ` is 1, etc.); so, `AfterPass(1)` is satisfied after `ENVIRONMENT_STATE_UPDATE ` 1 + has occurred and thereafter (i.e., in `ENVIRONMENT_STATE_UPDATE `\\ s 2, 3, 4, etc.). + + """ + def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AfterNEnvironmentStateUpdates(Condition): + + """AfterNEnvironmentStateUpdates + + Parameters: + + n(int): the number of `ENVIRONMENT_STATE_UPDATE `\\ s after which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting `ENVIRONMENT_STATE_UPDATE `\\ s + (default: TimeScale.ENVIRONMENT_SEQUENCE) + + Satisfied when: + + - at least n `ENVIRONMENT_STATE_UPDATE `\\ s have occured within one unit of time at the `TimeScale` + specified by **time_scale**. + + """ + def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AtEnvironmentSequence(Condition): + + """AtEnvironmentSequence + + Parameters: + + n(int): the `ENVIRONMENT_SEQUENCE` at which the Condition is satisfied + + Satisfied when: + + - exactly n `ENVIRONMENT_SEQUENCE`\\ s have occurred. + + Notes: + - `ENVIRONMENT_SEQUENCE`\\ s are managed by the environment using the Scheduler (e.g. `end_environment_sequence ` ) and are not automatically updated by this package. + + """ + def __init__(self, n) -> None: ... + +class AfterEnvironmentSequence(Condition): + + """AfterEnvironmentSequence + + Parameters: + + n(int): the `ENVIRONMENT_SEQUENCE` after which the Condition is satisfied + + Satisfied when: + + - at least n+1 `ENVIRONMENT_SEQUENCE`\\ s have occurred. + + Notes: + - `ENVIRONMENT_SEQUENCE`\\ s are managed by the environment using the Scheduler (e.g. `end_environment_sequence ` ) and are not automatically updated by this package. + + """ + def __init__(self, n) -> None: ... + +class AfterNEnvironmentSequences(Condition): + + """AfterNEnvironmentSequences + + Parameters: + + n(int): the number of `ENVIRONMENT_SEQUENCE`\\ s after which the Condition is satisfied + + Satisfied when: + + - at least n `ENVIRONMENT_SEQUENCE`\\ s have occured. + + Notes: + - `ENVIRONMENT_SEQUENCE`\\ s are managed by the environment using the Scheduler (e.g. `end_environment_sequence ` ) and are not automatically updated by this package. + + """ + def __init__(self, n) -> None: ... + +class BeforeNCalls(_DependencyValidation, Condition): + + """BeforeNCalls + + Parameters: + + dependency (Hashable): the node on which the Condition depends + + n(int): the number of executions of **dependency** before which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting executions of **dependency** + (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + Satisfied when: + + - the node specified in **dependency** has executed at most n-1 times + within one unit of time at the `TimeScale` specified by **time_scale**. + + """ + def __init__(self, dependency, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AtNCalls(_DependencyValidation, Condition): + + """AtNCalls + + Parameters: + + dependency (Hashable): the node on which the Condition depends + + n(int): the number of executions of **dependency** at which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting executions of **dependency** + (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + Satisfied when: + + - the node specified in **dependency** has executed exactly n times + within one unit of time at the `TimeScale` specified by **time_scale**. + + """ + def __init__(self, dependency, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AfterCall(_DependencyValidation, Condition): + + """AfterCall + + Parameters: + + dependency (Hashable): the node on which the Condition depends + + n(int): the number of executions of **dependency** after which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting executions of **dependency** + (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + Satisfied when: + + - the node specified in **dependency** has executed at least n+1 times + within one unit of time at the `TimeScale` specified by **time_scale**. + + """ + def __init__(self, dependency, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AfterNCalls(_DependencyValidation, Condition): + + """AfterNCalls + + Parameters: + + dependency (Hashable): the node on which the Condition depends + + n(int): the number of executions of **dependency** after which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting executions of **dependency** + (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + Satisfied when: + + - the node specified in **dependency** has executed at least n times + within one unit of time at the `TimeScale` specified by **time_scale**. + + """ + def __init__(self, dependency, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AfterNCallsCombined(_DependencyValidation, Condition): + + """AfterNCallsCombined + + Parameters: + + *dependencies (Hashable): one or more nodes on which the Condition depends + + n(int): the number of combined executions of all nodes specified in **dependencies** after which the + Condition is satisfied (default: None) + + time_scale(TimeScale): the TimeScale used as basis for counting executions of **dependency** + (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + + Satisfied when: + + - there have been at least n+1 executions among all of the nodes specified in **dependencies** + within one unit of time at the `TimeScale` specified by **time_scale**. + + """ + def __init__(self, *dependencies, n: Incomplete | None = ..., time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class EveryNCalls(_DependencyValidation, Condition): + + '''EveryNCalls + + Parameters: + + dependency (Hashable): the node on which the Condition depends + + n(int): the frequency of executions of **dependency** at which the Condition is satisfied + + + Satisfied when: + + - the node specified in **dependency** has executed at least n times since the last time the + Condition\'s owner executed. + + + Notes: + + - scheduler\'s count of each other node that is "useable" by the node is reset to 0 when the + node runs + + ''' + def __init__(self, dependency, n) -> None: ... + +class JustRan(_DependencyValidation, Condition): + + """JustRan + + Parameters: + + dependency (Hashable): the node on which the Condition depends + + Satisfied when: + + - the node specified in **dependency** executed in the previous `CONSIDERATION_SET_EXECUTION`. + + Notes: + + - This Condition can transcend divisions between `TimeScales `. + For example, if A runs in the final `CONSIDERATION_SET_EXECUTION` of an `ENVIRONMENT_STATE_UPDATE `, + JustRan(A) is satisfied at the beginning of the next `ENVIRONMENT_STATE_UPDATE `. + + """ + def __init__(self, dependency) -> None: ... + +class AllHaveRun(_DependencyValidation, Condition): + + """AllHaveRun + + Parameters: + + *dependencies (Hashable): an iterable of nodes on which the Condition depends + + time_scale(TimeScale): the TimeScale used as basis for counting executions of **dependency** + (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + + Satisfied when: + + - all of the nodes specified in **dependencies** have executed at least once + within one unit of time at the `TimeScale` specified by **time_scale**. + + """ + def __init__(self, *dependencies, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class WhenFinished(_DependencyValidation, Condition): + + """WhenFinished + + Parameters: + + dependency (Hashable): the node on which the Condition depends + + Satisfied when: + + - the `is_finished` methods of the node specified in **dependencies** returns `True`. + + Notes: + + - This is a dynamic Condition: Each node is responsible for managing its finished status on its + own, which can occur independently of the execution of other nodes. Therefore the satisfaction of + this Condition) can vary arbitrarily in time. + + - The is_finished method is called with `execution_id` as its sole positional argument + + """ + def __init__(self, dependency) -> None: ... + +class WhenFinishedAny(_DependencyValidation, Condition): + + """WhenFinishedAny + + Parameters: + + *dependencies (Hashable): zero or more nodes on which the Condition depends + + Satisfied when: + + - the `is_finished` methods of any nodes specified in **dependencies** returns `True`. + + Notes: + + - This is a convenience class; WhenFinishedAny(A, B, C) is equivalent to + Any(WhenFinished(A), WhenFinished(B), WhenFinished(C)). + If no nodes are specified, the condition will default to checking all of scheduler's nodes. + + - This is a dynamic Condition: Each node is responsible for managing its finished status on its + own, which can occur independently of the execution of other nodes. Therefore the satisfaction of + this Condition) can vary arbitrarily in time. + + - The is_finished method is called with `execution_id` as its sole positional argument + + """ + def __init__(self, *dependencies) -> None: ... + +class WhenFinishedAll(_DependencyValidation, Condition): + + """WhenFinishedAll + + Parameters: + + *dependencies (Hashable): zero or more nodes on which the Condition depends + + Satisfied when: + + - the `is_finished` methods of all nodes specified in **dependencies** return `True`. + + Notes: + + - This is a convenience class; WhenFinishedAny(A, B, C) is equivalent to + All(WhenFinished(A), WhenFinished(B), WhenFinished(C)). + If no nodes are specified, the condition will default to checking all of scheduler's nodes. + + - This is a dynamic Condition: Each node is responsible for managing its finished status on its + own, which can occur independently of the execution of other nodes. Therefore the satisfaction of + this Condition) can vary arbitrarily in time. + + - The is_finished method is called with `execution_id` as its sole positional argument + + """ + def __init__(self, *dependencies) -> None: ... + +class AtEnvironmentStateUpdateStart(AtPass): + + """AtEnvironmentStateUpdateStart + + Satisfied when: + + - at the beginning of an `ENVIRONMENT_STATE_UPDATE ` + + Notes: + + - identical to `AtPass(0) ` + """ + def __init__(self) -> None: ... + +class AtEnvironmentStateUpdateNStart(All): + + """AtEnvironmentStateUpdateNStart + + Parameters: + + n(int): the `ENVIRONMENT_STATE_UPDATE ` on which the Condition is satisfied + + time_scale(TimeScale): the TimeScale used as basis for counting `ENVIRONMENT_STATE_UPDATE `\\ s + (default: TimeScale.ENVIRONMENT_SEQUENCE) + + Satisfied when: + + - on `PASS` 0 of the specified `ENVIRONMENT_STATE_UPDATE ` counted using 'TimeScale` + + Notes: + + - identical to All(AtPass(0), AtEnvironmentStateUpdate(n, time_scale)) + + """ + def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... + +class AtEnvironmentSequenceStart(AtEnvironmentStateUpdate): + + """AtEnvironmentSequenceStart + + Satisfied when: + + - at the beginning of an `ENVIRONMENT_SEQUENCE` + + Notes: + + - identical to `AtEnvironmentStateUpdate(0) ` + """ + def __init__(self) -> None: ... + +class AtEnvironmentSequenceNStart(All): + + """AtEnvironmentSequenceNStart + + Parameters: + + n(int): the `ENVIRONMENT_SEQUENCE` on which the Condition is satisfied + + Satisfied when: + + - on `ENVIRONMENT_STATE_UPDATE ` 0 of the specified `ENVIRONMENT_SEQUENCE` counted using 'TimeScale` + + Notes: + + - identical to `All(AtEnvironmentStateUpdate(0), AtEnvironmentSequence(n))` + + """ + def __init__(self, n) -> None: ... + +class Threshold(_DependencyValidation, Condition): + + """Threshold + + Attributes: + + dependency + the node on which the Condition depends + + parameter + the name of the parameter of **dependency** whose value is + to be compared to **threshold** + + threshold + the fixed value compared to the value of the **parameter** + + comparator + the string comparison operator determining the direction or + type of comparison of the value of the **parameter** + relative to **threshold** + + indices + if specified, a series of indices that reach the desired + number given an iterable value for **parameter** + + atol + absolute tolerance for the comparison + + rtol + relative tolerance (to **threshold**) for the comparison + + custom_parameter_getter + if specified, a function that returns the value of + **parameter** for **dependency**; to support class + structures other than <**dependency**>.<**parameter**> + without subclassing + + custom_parameter_validator + if specified, a function that throws an exception if there + is no **parameter** for **dependency**; to support class + structures other than <**dependency**>.<**parameter**> + without subclassing + + Satisfied when: + + The comparison between the value of the **parameter** and + **threshold** using **comparator** is true. If **comparator** is + an equality (==, !=), the comparison will be considered equal + within tolerances **atol** and **rtol**. + + Notes: + + The comparison must be done with scalars. If the value of + **parameter** contains more than one item, **indices** must be + specified. + """ + def __init__(self, dependency, parameter, threshold, comparator, indices: Incomplete | None = ..., atol: int = ..., rtol: int = ..., custom_parameter_getter: Incomplete | None = ..., custom_parameter_validator: Incomplete | None = ...) -> None: ... + def get_parameter_value(self, execution_id: Incomplete | None = ...): ... + def validate_parameter(self, dependency, parameter, custom_parameter_validator: Incomplete | None = ...): ... + +class GraphStructureCondition(ConditionBase): + + """ + Abstract base class for `graph structure conditions + ` + + Subclasses must implement: + `_process` + """ + __abstractmethods__: ClassVar[frozenset] = ... + _abc_impl: ClassVar[_abc._abc_data] = ... + def modify_graph(self, graph: GraphDependencyDict) -> GraphDependencyDict: + """ + Modifies **graph** based on the transformation specified by this + condition + + Args: + graph: a graph dependency dictionary + + Raises: + ConditionError + + Returns: + A copy of **graph** with modifications applied + """ + def __init__(self, _owner: Hashable = ..., **kwargs) -> None: ... + +class CustomGraphStructureCondition(GraphStructureCondition): + + """ + Applies a user-defined function to a graph + + Args: + process_graph_function (Callable): a function taking an optional + 'self' argument (as the first argument, if present), and a + graph dependency dictionary + kwargs (**kwargs): optional arguments to be stored as attributes + """ + __abstractmethods__: ClassVar[frozenset] = ... + _abc_impl: ClassVar[_abc._abc_data] = ... + def __init__(self, process_graph_function: Callable, **kwargs) -> None: ... + +class _GSCUsingNodes(GraphStructureCondition): + + """ + Attributes: + nodes: the subject nodes + """ + __abstractmethods__: ClassVar[frozenset] = ... + _abc_impl: ClassVar[_abc._abc_data] = ... + def __init__(self, *nodes: Hashable, **kwargs) -> None: ... + +class _GSCSingleNode(_GSCUsingNodes): + + """ + Attributes: + node: the subject node + """ + __abstractmethods__: ClassVar[frozenset] = ... + _abc_impl: ClassVar[_abc._abc_data] = ... + def __init__(self, node: Hashable, **kwargs) -> None: ... + @property + def node(self): ... + +class _GSCWithOperations(_GSCUsingNodes): + + """ + Args: + owner_senders: `Operation` that determines how the original + senders of `owner ` (the Operation + source) combine with the union of all original senders of + all subject `nodes <_GSCUsingNodes.nodes>` (the + Operation comparison) to produce the new set of senders of + `owner ` after `modify_graph` + owner_receivers: `Operation` that determines how the + original receivers of `owner ` (the + Operation source) combine with the union of all original + receivers of all subject `nodes + <_GSCUsingNodes.nodes>` (the Operation comparison) + to produce the new set of receivers of `owner + ` after `modify_graph` + subject_senders: `Operation` that determines how the + original senders for each of the subject `nodes + <_GSCUsingNodes.nodes>` (the Operation source) combine with + the original senders of `owner ` (the + Operation comparison) to produce the new set of senders for + the subject `nodes <_GSCUsingNodes.nodes>` after + `modify_graph`. Operations are applied individually to each + subject node, and this argument may also be specified as a + dictionary mapping nodes to separate operations. + subject_receivers: `Operation` that determines how the + original receivers for each of the subject `nodes + <_GSCUsingNodes.nodes>` (the Operation source) combine with + the original receivers of `owner ` (the + Operation comparison) to produce the new set of receivers + for the subject `nodes <_GSCUsingNodes.nodes>` after + `modify_graph`. Operations are applied individually to each + subject node, and this argument may also be specified as a + dictionary mapping nodes to separate operations. + reconnect_non_subject_receivers: If True, `modify_graph` + will create an edge from all prior senders of `owner` to + all receivers of `owner` that are not in `nodes`, if + there is no longer a path from that sender to that + receiver. + Defaults to True. + remove_new_self_referential_edges: If True, `modify_graph` + will remove any newly-created edges from a node to + itself. + Defaults to True. + prune_cycles: If True, `modify_graph` will attempt to prune + any newly-created cycles, preferring to remove edges + adjacent to `owner` that affect the placement of `owner` + more than any subject `node `. + Defaults to True. + ignore_conflicts: If True, when any two operations give + different results for the new senders and receivers of a + node in `modify_graph`, an error will not be raised. + Defaults to False. + + Attributes: + nodes: the subject nodes + """ + __abstractmethods__: ClassVar[frozenset] = ... + _abc_impl: ClassVar[_abc._abc_data] = ... + def __init__(self, *nodes: Hashable, owner_senders: Operation | str = ..., owner_receivers: Operation | str = ..., subject_senders: SubjectOperation = ..., subject_receivers: SubjectOperation = ..., reconnect_non_subject_receivers: bool = ..., remove_new_self_referential_edges: bool = ..., prune_cycles: bool = ..., ignore_conflicts: bool = ..., **kwargs) -> None: ... + +class _GSCReposition(_GSCUsingNodes): + _already_valid_message: ClassVar[str] = ... + __abstractmethods__: ClassVar[frozenset] = ... + _abc_impl: ClassVar[_abc._abc_data] = ... + def __init__(self, *nodes: Hashable, **kwargs) -> None: ... + +class BeforeNodes(_GSCReposition, _GSCWithOperations): + + """ + Adds a dependency from the owner to each of the specified nodes and + optionally modifies the senders and receivers of all affected nodes + + Args: + owner_senders: `Operation` that determines how the original + senders of `owner ` (the Operation + source) combine with the union of all original senders of + all subject `nodes <_GSCUsingNodes.nodes>` (the + Operation comparison) to produce the new set of senders of + `owner ` after `modify_graph` + owner_receivers: `Operation` that determines how the + original receivers of `owner ` (the + Operation source) combine with the union of all original + receivers of all subject `nodes + <_GSCUsingNodes.nodes>` (the Operation comparison) + to produce the new set of receivers of `owner + ` after `modify_graph` + subject_senders: `Operation` that determines how the + original senders for each of the subject `nodes + <_GSCUsingNodes.nodes>` (the Operation source) combine with + the original senders of `owner ` (the + Operation comparison) to produce the new set of senders for + the subject `nodes <_GSCUsingNodes.nodes>` after + `modify_graph`. Operations are applied individually to each + subject node, and this argument may also be specified as a + dictionary mapping nodes to separate operations. + subject_receivers: `Operation` that determines how the + original receivers for each of the subject `nodes + <_GSCUsingNodes.nodes>` (the Operation source) combine with + the original receivers of `owner ` (the + Operation comparison) to produce the new set of receivers + for the subject `nodes <_GSCUsingNodes.nodes>` after + `modify_graph`. Operations are applied individually to each + subject node, and this argument may also be specified as a + dictionary mapping nodes to separate operations. + reconnect_non_subject_receivers: If True, `modify_graph` + will create an edge from all prior senders of `owner` to + all receivers of `owner` that are not in `nodes`, if + there is no longer a path from that sender to that + receiver. + Defaults to True. + remove_new_self_referential_edges: If True, `modify_graph` + will remove any newly-created edges from a node to + itself. + Defaults to True. + prune_cycles: If True, `modify_graph` will attempt to prune + any newly-created cycles, preferring to remove edges + adjacent to `owner` that affect the placement of `owner` + more than any subject `node `. + Defaults to True. + ignore_conflicts: If True, when any two operations give + different results for the new senders and receivers of a + node in `modify_graph`, an error will not be raised. + Defaults to False. + + Attributes: + nodes: the subject nodes + """ + _already_valid_message: ClassVar[str] = ... + __abstractmethods__: ClassVar[frozenset] = ... + _abc_impl: ClassVar[_abc._abc_data] = ... + def __init__(self, *nodes, owner_senders: Operation | str = ..., owner_receivers: Operation | str = ..., subject_senders: SubjectOperation = ..., subject_receivers: SubjectOperation = ..., reconnect_non_subject_receivers: bool = ..., remove_new_self_referential_edges: bool = ..., prune_cycles: bool = ..., ignore_conflicts: bool = ...) -> None: ... + +class BeforeNode(BeforeNodes, _GSCSingleNode): + + """ + Adds a dependency from the owner to the specified node and + optionally modifies the senders and receivers of both + + Args: + owner_senders: `Operation` that determines how the original + senders of `owner ` (the Operation + source) combine with the union of all original senders of + all subject `nodes <_GSCUsingNodes.nodes>` (the + Operation comparison) to produce the new set of senders of + `owner ` after `modify_graph` + owner_receivers: `Operation` that determines how the + original receivers of `owner ` (the + Operation source) combine with the union of all original + receivers of all subject `nodes + <_GSCUsingNodes.nodes>` (the Operation comparison) + to produce the new set of receivers of `owner + ` after `modify_graph` + subject_senders: `Operation` that determines how the + original senders for each of the subject `nodes + <_GSCUsingNodes.nodes>` (the Operation source) combine with + the original senders of `owner ` (the + Operation comparison) to produce the new set of senders for + the subject `nodes <_GSCUsingNodes.nodes>` after + `modify_graph`. Operations are applied individually to each + subject node, and this argument may also be specified as a + dictionary mapping nodes to separate operations. + subject_receivers: `Operation` that determines how the + original receivers for each of the subject `nodes + <_GSCUsingNodes.nodes>` (the Operation source) combine with + the original receivers of `owner ` (the + Operation comparison) to produce the new set of receivers + for the subject `nodes <_GSCUsingNodes.nodes>` after + `modify_graph`. Operations are applied individually to each + subject node, and this argument may also be specified as a + dictionary mapping nodes to separate operations. + reconnect_non_subject_receivers: If True, `modify_graph` + will create an edge from all prior senders of `owner` to + all receivers of `owner` that are not in `nodes`, if + there is no longer a path from that sender to that + receiver. + Defaults to True. + remove_new_self_referential_edges: If True, `modify_graph` + will remove any newly-created edges from a node to + itself. + Defaults to True. + prune_cycles: If True, `modify_graph` will attempt to prune + any newly-created cycles, preferring to remove edges + adjacent to `owner` that affect the placement of `owner` + more than any subject `node `. + Defaults to True. + ignore_conflicts: If True, when any two operations give + different results for the new senders and receivers of a + node in `modify_graph`, an error will not be raised. + Defaults to False. + + Attributes: + nodes: the subject nodes + + Attributes: + node: the subject node + """ + __abstractmethods__: ClassVar[frozenset] = ... + _abc_impl: ClassVar[_abc._abc_data] = ... + def __init__(self, *nodes, owner_senders: Operation | str = ..., owner_receivers: Operation | str = ..., subject_senders: SubjectOperation = ..., subject_receivers: SubjectOperation = ..., reconnect_non_subject_receivers: bool = ..., remove_new_self_referential_edges: bool = ..., prune_cycles: bool = ..., ignore_conflicts: bool = ...) -> None: ... + +class WithNode(_GSCReposition, _GSCWithOperations, _GSCSingleNode): + + """ + Adds a dependency from each of the senders of both the owner and the + specified node to both the owner and the specified node, and + optionally modifies the receivers of both + + Args: + owner_senders: `Operation` that determines how the original + senders of `owner ` (the Operation + source) combine with the union of all original senders of + all subject `nodes <_GSCUsingNodes.nodes>` (the + Operation comparison) to produce the new set of senders of + `owner ` after `modify_graph` + owner_receivers: `Operation` that determines how the + original receivers of `owner ` (the + Operation source) combine with the union of all original + receivers of all subject `nodes + <_GSCUsingNodes.nodes>` (the Operation comparison) + to produce the new set of receivers of `owner + ` after `modify_graph` + subject_senders: `Operation` that determines how the + original senders for each of the subject `nodes + <_GSCUsingNodes.nodes>` (the Operation source) combine with + the original senders of `owner ` (the + Operation comparison) to produce the new set of senders for + the subject `nodes <_GSCUsingNodes.nodes>` after + `modify_graph`. Operations are applied individually to each + subject node, and this argument may also be specified as a + dictionary mapping nodes to separate operations. + subject_receivers: `Operation` that determines how the + original receivers for each of the subject `nodes + <_GSCUsingNodes.nodes>` (the Operation source) combine with + the original receivers of `owner ` (the + Operation comparison) to produce the new set of receivers + for the subject `nodes <_GSCUsingNodes.nodes>` after + `modify_graph`. Operations are applied individually to each + subject node, and this argument may also be specified as a + dictionary mapping nodes to separate operations. + reconnect_non_subject_receivers: If True, `modify_graph` + will create an edge from all prior senders of `owner` to + all receivers of `owner` that are not in `nodes`, if + there is no longer a path from that sender to that + receiver. + Defaults to True. + remove_new_self_referential_edges: If True, `modify_graph` + will remove any newly-created edges from a node to + itself. + Defaults to True. + prune_cycles: If True, `modify_graph` will attempt to prune + any newly-created cycles, preferring to remove edges + adjacent to `owner` that affect the placement of `owner` + more than any subject `node `. + Defaults to True. + ignore_conflicts: If True, when any two operations give + different results for the new senders and receivers of a + node in `modify_graph`, an error will not be raised. + Defaults to False. + + Attributes: + nodes: the subject nodes + + Attributes: + node: the subject node + """ + _already_valid_message: ClassVar[str] = ... + __abstractmethods__: ClassVar[frozenset] = ... + _abc_impl: ClassVar[_abc._abc_data] = ... + def __init__(self, node, owner_receivers: Operation | str = ..., subject_receivers: SubjectOperation = ..., reconnect_non_subject_receivers: bool = ..., remove_new_self_referential_edges: bool = ..., prune_cycles: bool = ..., ignore_conflicts: bool = ...) -> None: ... + +class AfterNodes(_GSCReposition, _GSCWithOperations): + + """ + Adds a dependency from each of the specified nodes to the owner + and optionally modifies the senders and receivers of all + affected nodes + + Args: + owner_senders: `Operation` that determines how the original + senders of `owner ` (the Operation + source) combine with the union of all original senders of + all subject `nodes <_GSCUsingNodes.nodes>` (the + Operation comparison) to produce the new set of senders of + `owner ` after `modify_graph` + owner_receivers: `Operation` that determines how the + original receivers of `owner ` (the + Operation source) combine with the union of all original + receivers of all subject `nodes + <_GSCUsingNodes.nodes>` (the Operation comparison) + to produce the new set of receivers of `owner + ` after `modify_graph` + subject_senders: `Operation` that determines how the + original senders for each of the subject `nodes + <_GSCUsingNodes.nodes>` (the Operation source) combine with + the original senders of `owner ` (the + Operation comparison) to produce the new set of senders for + the subject `nodes <_GSCUsingNodes.nodes>` after + `modify_graph`. Operations are applied individually to each + subject node, and this argument may also be specified as a + dictionary mapping nodes to separate operations. + subject_receivers: `Operation` that determines how the + original receivers for each of the subject `nodes + <_GSCUsingNodes.nodes>` (the Operation source) combine with + the original receivers of `owner ` (the + Operation comparison) to produce the new set of receivers + for the subject `nodes <_GSCUsingNodes.nodes>` after + `modify_graph`. Operations are applied individually to each + subject node, and this argument may also be specified as a + dictionary mapping nodes to separate operations. + reconnect_non_subject_receivers: If True, `modify_graph` + will create an edge from all prior senders of `owner` to + all receivers of `owner` that are not in `nodes`, if + there is no longer a path from that sender to that + receiver. + Defaults to True. + remove_new_self_referential_edges: If True, `modify_graph` + will remove any newly-created edges from a node to + itself. + Defaults to True. + prune_cycles: If True, `modify_graph` will attempt to prune + any newly-created cycles, preferring to remove edges + adjacent to `owner` that affect the placement of `owner` + more than any subject `node `. + Defaults to True. + ignore_conflicts: If True, when any two operations give + different results for the new senders and receivers of a + node in `modify_graph`, an error will not be raised. + Defaults to False. + + Attributes: + nodes: the subject nodes + """ + _already_valid_message: ClassVar[str] = ... + __abstractmethods__: ClassVar[frozenset] = ... + _abc_impl: ClassVar[_abc._abc_data] = ... + def __init__(self, *nodes, owner_senders: Operation | str = ..., owner_receivers: Operation | str = ..., subject_senders: SubjectOperation = ..., subject_receivers: SubjectOperation = ..., reconnect_non_subject_receivers: bool = ..., remove_new_self_referential_edges: bool = ..., prune_cycles: bool = ..., ignore_conflicts: bool = ...) -> None: ... + +class AfterNode(AfterNodes, _GSCSingleNode): + + """ + Adds a dependency from the specified node to the owner and + optionally modifies the senders and receivers of both + + Args: + owner_senders: `Operation` that determines how the original + senders of `owner ` (the Operation + source) combine with the union of all original senders of + all subject `nodes <_GSCUsingNodes.nodes>` (the + Operation comparison) to produce the new set of senders of + `owner ` after `modify_graph` + owner_receivers: `Operation` that determines how the + original receivers of `owner ` (the + Operation source) combine with the union of all original + receivers of all subject `nodes + <_GSCUsingNodes.nodes>` (the Operation comparison) + to produce the new set of receivers of `owner + ` after `modify_graph` + subject_senders: `Operation` that determines how the + original senders for each of the subject `nodes + <_GSCUsingNodes.nodes>` (the Operation source) combine with + the original senders of `owner ` (the + Operation comparison) to produce the new set of senders for + the subject `nodes <_GSCUsingNodes.nodes>` after + `modify_graph`. Operations are applied individually to each + subject node, and this argument may also be specified as a + dictionary mapping nodes to separate operations. + subject_receivers: `Operation` that determines how the + original receivers for each of the subject `nodes + <_GSCUsingNodes.nodes>` (the Operation source) combine with + the original receivers of `owner ` (the + Operation comparison) to produce the new set of receivers + for the subject `nodes <_GSCUsingNodes.nodes>` after + `modify_graph`. Operations are applied individually to each + subject node, and this argument may also be specified as a + dictionary mapping nodes to separate operations. + reconnect_non_subject_receivers: If True, `modify_graph` + will create an edge from all prior senders of `owner` to + all receivers of `owner` that are not in `nodes`, if + there is no longer a path from that sender to that + receiver. + Defaults to True. + remove_new_self_referential_edges: If True, `modify_graph` + will remove any newly-created edges from a node to + itself. + Defaults to True. + prune_cycles: If True, `modify_graph` will attempt to prune + any newly-created cycles, preferring to remove edges + adjacent to `owner` that affect the placement of `owner` + more than any subject `node `. + Defaults to True. + ignore_conflicts: If True, when any two operations give + different results for the new senders and receivers of a + node in `modify_graph`, an error will not be raised. + Defaults to False. + + Attributes: + nodes: the subject nodes + + Attributes: + node: the subject node + """ + __abstractmethods__: ClassVar[frozenset] = ... + _abc_impl: ClassVar[_abc._abc_data] = ... + def __init__(self, *nodes, owner_senders: Operation | str = ..., owner_receivers: Operation | str = ..., subject_senders: SubjectOperation = ..., subject_receivers: SubjectOperation = ..., reconnect_non_subject_receivers: bool = ..., remove_new_self_referential_edges: bool = ..., prune_cycles: bool = ..., ignore_conflicts: bool = ...) -> None: ... + +class AddEdgeTo(_GSCSingleNode): + + """ + Adds an edge from `AddEdgeTo.owner ` to + `AddEdgeTo.node` + + Attributes: + node: the subject node + """ + __abstractmethods__: ClassVar[frozenset] = ... + _abc_impl: ClassVar[_abc._abc_data] = ... + def __init__(self, node: Hashable) -> None: ... + +class RemoveEdgeFrom(_GSCSingleNode): + + """ + Removes an edge from `RemoveEdgeFrom.node` to `RemoveEdgeFrom.owner + ` + + Attributes: + node: the subject node + """ + __abstractmethods__: ClassVar[frozenset] = ... + _abc_impl: ClassVar[_abc._abc_data] = ... + def __init__(self, node: Hashable) -> None: ... From 24f682304040171a9b110d533cb2deb1efd8ce30 Mon Sep 17 00:00:00 2001 From: jdcpni Date: Fri, 25 Oct 2024 14:34:54 -0400 Subject: [PATCH 352/410] Fix/one hot max indicator (#3079) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • emcomposition.py - field_inputs_nodes -> input_nodes_by_fields - refactor naming of retrieval nodes to use input_nodes_by_fields - revise naming of retrieval nodes to use name_without_suffix for reference - WEIGHTED -> WEIGHTED_AVG * • port.py: _parse_port_spec(): enhance error message for bad MECHANISM entry in modulatory param specification * • lcamechanism.py: make matrix, auto, hetero and competition params non-modulable until that is implemented * • selectionfunctions.py - OneHot._function: add ARG_MAX, ARG_MAX_INDICATOR, ARG_MIN, ARG_MIN_INDICATOR * • selectionfunctions.py - OneHot: add arg_max, arg_max_indicator, arg_min, and arg_min_indicator options - move ARG_XXX keywords to keywords.py * • transferfunctions.py - OneHot: - add ARG_MAX_ABS, ARG_MAX_ABS_INDICATOR, ARG_MIN_ABS, ARG_MIN_ABS_INDICATOR - change default mode to ARG_MAX - relabel options for _gen_llvm from XXX_VAL to ARG_XXX • memoryfunctions.py - DictionaryMemory._gen_llvm_function_body: replace MIN_VAL with ARG_MIN in selection.function.mode * • test_emcomposition.py test warning for ARG_MAX or PROBABILISTIC with enable_learning = True • test_selection.py - test_basic: add tests of OneHot for ARG_XXX - exclude LLVM tests for XXX_VAL - test now use llvm_not_implemented for OneHot modes not supported by LLVM • test_transfer.py - test_basic: add tests of SoftMax for ARG_XXX - exclude LLVM tests for XXX__VAL --- .../EGO Model - CSW with Simple Integrator.py | 3 +- .../EGO/Using EMComposition/ScriptControl.py | 4 +- .../EGO/Using EMComposition/TestParams.py | 6 +- .../nonstateful/selectionfunctions.py | 179 +++++++++++++----- .../nonstateful/transferfunctions.py | 52 ++--- .../functions/stateful/memoryfunctions.py | 25 ++- .../processing/processingmechanism.py | 11 +- psyneulink/core/globals/keywords.py | 23 ++- psyneulink/core/globals/registry.py | 9 + .../processing/transfer/lcamechanism.py | 7 +- .../library/compositions/emcomposition.py | 100 +++++----- tests/composition/test_emcomposition.py | 35 ++-- tests/functions/test_selection.py | 37 ++-- tests/functions/test_transfer.py | 25 ++- tests/mechanisms/test_episodic_memory.py | 1 + tests/mechanisms/test_processing_mechanism.py | 2 + 16 files changed, 356 insertions(+), 163 deletions(-) diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py index 5cb51d00181..acb3189d810 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py @@ -147,7 +147,6 @@ import_module(MODEL_PARAMS) model_params = import_module(MODEL_PARAMS).model_params - #region TASK ENVIRONMENT # ====================================================================================================================== # TASK ENVIRONMENT @@ -173,7 +172,7 @@ EMFieldsIndex = IntEnum('EMFields', ['STATE', 'CONTEXT', - 'PREVIOUS_STATE'], + 'PREVIOUS STATE'], start=0) state_retrieval_weight = 0 RANDOM_WEIGHTS_INITIALIZATION=RandomMatrix(center=0.0, range=0.1) # Matrix spec used to initialize all Projections diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py index 1d5a22bb892..d448f79c29e 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py @@ -2,8 +2,8 @@ # Settings for running script: -# MODEL_PARAMS = 'TestParams' -MODEL_PARAMS = 'DeclanParams' +MODEL_PARAMS = 'TestParams' +# MODEL_PARAMS = 'DeclanParams' CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script DISPLAY_MODEL = ( # Only one of the following can be uncommented: diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py index 39a4c9ccbc3..ae259795c1d 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py @@ -18,8 +18,8 @@ memory_capacity = ALL, # number of entries in EM memory; ALL=> match to number of stims memory_init = (0,.0001), # Initialize memory with random values in interval # memory_init = None, # Initialize with zeros - # concatenate_keys = False, - concatenate_keys = True, + concatenate_keys = False, + # concatenate_keys = True, # environment # curriculum_type = 'Interleaved', @@ -40,6 +40,8 @@ # softmax_threshold = None, # threshold used to mask out small values in softmax softmax_threshold = .001, # threshold used to mask out small values in softmax enable_learning=[True, False, False], # Enable learning for PREDICTION (STATE) but not CONTEXT or PREVIOUS STATE + # enable_learning=[True, True, True], # Enable learning for PREDICTION (STATE) but not CONTEXT or PREVIOUS STATE + # enable_learning=False, learn_field_weights = False, loss_spec = Loss.BINARY_CROSS_ENTROPY, # loss_spec = Loss.MSE, diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index 55ac699e7c9..50d10e80ab7 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -14,7 +14,6 @@ COMMENT: * TBI Threshold -* TBI MaxVal * `KWTA` COMMENT @@ -22,7 +21,7 @@ """ -__all__ = ['SelectionFunction', 'OneHot', 'max_vs_avg', 'max_vs_next', 'MAX_VS_NEXT', 'MAX_VS_AVG'] +__all__ = ['SelectionFunction', 'OneHot', 'max_vs_avg', 'max_vs_next'] import numpy as np from beartype import beartype @@ -36,15 +35,22 @@ _random_state_getter, _seed_setter, ) from psyneulink.core.globals.keywords import \ - MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, MIN_ABS_INDICATOR, \ - MODE, ONE_HOT_FUNCTION, PROB, PROB_INDICATOR, SELECTION_FUNCTION_TYPE, PREFERENCE_SET_NAME + (ARG_MAX, ARG_MAX_ABS, ARG_MAX_ABS_INDICATOR, ARG_MAX_INDICATOR, + ARG_MIN, ARG_MIN_ABS, ARG_MIN_ABS_INDICATOR, ARG_MIN_INDICATOR, + MAX_ABS_INDICATOR, MAX_ABS_VAL, MAX_INDICATOR, MAX_VAL, + MIN_ABS_INDICATOR, MIN_ABS_VAL, MIN_INDICATOR, MIN_VAL, + MODE, ONE_HOT_FUNCTION, PREFERENCE_SET_NAME, PROB, PROB_INDICATOR, + SELECTION_FUNCTION_TYPE) + from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import \ REPORT_OUTPUT_PREF, PreferenceEntry, PreferenceLevel, ValidPrefSet - -MAX_VS_NEXT = 'max_vs_next' -MAX_VS_AVG = 'max_vs_avg' +options = [ ARG_MAX, ARG_MAX_ABS, ARG_MAX_INDICATOR, ARG_MAX_ABS_INDICATOR, + MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, + ARG_MIN, ARG_MIN_ABS, ARG_MIN_INDICATOR, ARG_MIN_ABS_INDICATOR, + MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, MIN_ABS_INDICATOR, + PROB, PROB_INDICATOR] # FIX: IMPLEMENT AS Functions def max_vs_next(x): @@ -79,32 +85,71 @@ class OneHot(SelectionFunction): ) Return an array with one non-zero value. + COMMENT: + TBI: + refactor to have four parameters: (can continue to use KEYWORDS INTERNALLY and for LLVM) + extremum: max/min + value: scalar/indicator + ties: lowest/highest/all (re: indices) + prob: True/False (if True, ties are resolved probabilistically) + COMMENT .. _OneHot: `function ` returns an array the same length as the first item in `variable `, - with all of its values zeroed except one identified in first item `variable ` as specified by - `mode `: + with all of its values zeroed except one, unless there are ties, which are handled according to the choice of + `mode `, as follows: + + * *ARG_MAX*: signed value of a single element with the maximum signed value, + or the one with lowest index if there are ties. + + * *ARG_MAX_ABS*: absolute value of a single element with the maximum absolute value, + or the one with lowest index if there are ties. + + * *ARG_MAX_INDICATOR*: 1 in place of single element with maximum signed value, + or the one with lowest index if there are ties. + + * *ARG_MAX_ABS_INDICATOR*: 1 in place of single element with maximum absolute value, + or the one with lowest index if there are ties. + + * *MAX_VAL*: signed value of the element with the maximum signed value, + or all elements with the maximum value if there are ties. + + * *MAX_ABS_VAL*: absolute value of the element with the maximum absolute value, + or all elements with the maximum value if there are ties. - * *MAX_VAL*: signed value of the element with the maximum signed value; + * *MAX_INDICATOR*: 1 in place of the element with the maximum signed value, + or all elements with the maximum value if there are ties. - * *MAX_ABS_VAL*: absolute value of the element with the maximum absolute value; + * *MAX_ABS_INDICATOR*: 1 in place of the element(s) with the maximum absolute value, + or all elements with the maximum value if there are ties. - * *MAX_INDICATOR*: 1 in place of the element with the maximum signed value; + * *ARG_MIN*: signed value of a single element with the minium signed value, + or the one with lowest index if there are ties. - * *MAX_ABS_INDICATOR*: 1 in place of the element with the maximum absolute value; + * *ARG_MIN_ABS*: absolute value of a single element with the minium absolute value, + or the one with lowest index if there are ties. - * *MIN_VAL*: signed value of the element with the minimum signed value; + * *ARG_MIN_INDICATOR*: 1 in place of single element with minimum signed value, + or the one with lowest index if there are ties. - * *MIN_ABS_VAL*: absolute value of element with the minimum absolute value; + * *MIN_VAL*: signed value of the element with the minimum signed value, + or all elements with the minimum value if there are ties. - * *MIN_INDICATOR*: 1 in place of the element with the minimum signed value; + * *MIN_ABS_VAL*: absolute value of element with the minimum absolute value, + or all elements with the minimum value if there are ties. - * *MIN_ABS_INDICATOR*: 1 in place of the element with the minimum absolute value; + * *MIN_INDICATOR*: 1 in place of the element with the minimum signed value, + or all elements with the minimum value if there are ties. + + * *MIN_ABS_INDICATOR*: 1 in place of the element with the minimum absolute value, + or all elements with the minimum value if there are ties. * *PROB*: value of probabilistically chosen element based on probabilities passed in second item of variable; + if there are ties, a single element is chosen probabilistically. - * *PROB_INDICATOR*: same as *PROB* but chosen item is assigned a value of 1. + * *PROB_INDICATOR*: same as *PROB* but chosen item is assigned a value of 1; + if there are ties, a single element is chosen probabilistically. Arguments @@ -114,9 +159,12 @@ class OneHot(SelectionFunction): First (possibly only) item specifies a template for the array to be transformed; if `mode ` is *PROB* then a 2nd item must be included that is a probability distribution with same length as 1st item. - mode : MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, - MIN_ABS_INDICATOR, PROB or PROB_INDICATOR : default MAX_VAL - specifies the nature of the single non-zero value in the array returned by `function ` + mode : ARG_MAX, ARG_MAX_ABS, ARG_MAX_INDICATOR, ARG_MAX_ABS_INDICATOR, + MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, + ARG_MIN, ARG_MIN_ABS, ARG_MIN_INDICATOR, ARG_MIN_ABS_INDICATOR, + MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, MIN_ABS_INDICATOR, + PROB or PROB_INDICATOR : default ARG_MAX + specifies how the single non-zero value in the array returned by `function ` is determined (see `mode ` for details). params : Dict[param keyword: param value] : default None @@ -143,9 +191,12 @@ class OneHot(SelectionFunction): distribution, each element of which specifies the probability for selecting the corresponding element of the 1st item. - mode : MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, - MIN_ABS_INDICATOR, PROB or PROB_INDICATOR - determines the nature of the single non-zero value in the array returned by `function ` + mode : ARG_MAX, ARG_MAX_ABS, ARG_MAX_INDICATOR, ARG_MAX_ABS_INDICATOR, + MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, + ARG_MIN, ARG_MIN_ABS, ARG_MIN_INDICATOR, ARG_MIN_ABS_INDICATOR, + MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, MIN_ABS_INDICATOR, + PROB or PROB_INDICATOR + determines how the single non-zero value in the array returned by `function ` is determined (see `above ` for options). random_state : numpy.RandomState @@ -193,13 +244,7 @@ class Parameters(SelectionFunction.Parameters): seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) def _validate_mode(self, mode): - options = {MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, - MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, MIN_ABS_INDICATOR, - PROB, PROB_INDICATOR} - if mode in options: - # returns None indicating no error message (this is a valid assignment) - return None - else: + if mode not in options: # returns error message return 'not one of {0}'.format(options) @@ -207,9 +252,12 @@ def _validate_mode(self, mode): @beartype def __init__(self, default_variable=None, - mode: Optional[Literal['MAX_VAL', 'MAX_ABS_VAL', 'MAX_INDICATOR', 'MAX_ABS_INDICATOR', - 'MIN_VAL', 'MIN_ABS_VAL', 'MIN_INDICATOR', 'MIN_ABS_INDICATOR', - 'PROB', 'PROB_INDICATOR']] = None, + mode: Optional[Literal[ + ARG_MAX, ARG_MAX_ABS, ARG_MAX_INDICATOR, ARG_MAX_ABS_INDICATOR, + MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, + ARG_MIN, ARG_MIN_ABS, ARG_MIN_INDICATOR, ARG_MIN_ABS_INDICATOR, + MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, MIN_ABS_INDICATOR, + PROB, PROB_INDICATOR]] = None, seed=None, params=None, owner=None, @@ -290,49 +338,49 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, # Allow the first element to win the comparison prev_best = b1.select(is_first, best_ptr.type.pointee(float("NaN")), b1.load(best_ptr)) - if self.mode == MAX_VAL: + if self.mode == ARG_MAX: cmp_op = ">" cmp_prev = prev_best cmp_curr = current val = current - elif self.mode == MAX_ABS_VAL: + elif self.mode == ARG_MAX_ABS: cmp_op = ">" cmp_prev = b1.call(fabs, [prev_best]) cmp_curr = b1.call(fabs, [current]) val = b1.call(fabs, [current]) - elif self.mode == MAX_INDICATOR: + elif self.mode == ARG_MAX_INDICATOR: cmp_op = ">" cmp_prev = prev_best cmp_curr = current val = current.type(1.0) - elif self.mode == MAX_ABS_INDICATOR: + elif self.mode == ARG_MAX_ABS_INDICATOR: cmp_op = ">" cmp_prev = b1.call(fabs, [prev_best]) cmp_curr = b1.call(fabs, [current]) val = current.type(1.0) - elif self.mode == MIN_VAL: + elif self.mode == ARG_MIN: cmp_op = "<" cmp_prev = prev_best cmp_curr = current val = current - elif self.mode == MIN_ABS_VAL: + elif self.mode == ARG_MIN_ABS: cmp_op = "<" cmp_prev = b1.call(fabs, [prev_best]) cmp_curr = b1.call(fabs, [current]) val = b1.call(fabs, [current]) - elif self.mode == MIN_INDICATOR: + elif self.mode == ARG_MIN_INDICATOR: cmp_op = "<" cmp_prev = prev_best cmp_curr = current val = current.type(1.0) - elif self.mode == MIN_ABS_INDICATOR: + elif self.mode == ARG_MIN_ABS_INDICATOR: cmp_op = "<" cmp_prev = b1.call(fabs, [prev_best]) cmp_curr = b1.call(fabs, [current]) @@ -357,7 +405,7 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, else: val = current.type(1.0) else: - assert False, "Unsupported mode: {}".format(self.mode) + assert False, "Unsupported mode in LLVM: {} for OneHot Function".format(self.mode) prev_res_ptr = b1.gep(arg_out, [ctx.int32_ty(0), best_idx]) cur_res_ptr = b1.gep(arg_out, [ctx.int32_ty(0), idx]) @@ -400,8 +448,27 @@ def _function(self, """ - - if self.mode == MAX_VAL: + if self.mode == ARG_MAX: + max_idx = np.argmax(variable) + result = np.zeros_like(variable) + result[max_idx] = variable[max_idx] + + elif self.mode == ARG_MAX_ABS: + max_idx = np.argmax(np.absolute(variable)) + result = np.zeros_like(variable) + result[max_idx] = np.absolute(variable[max_idx]) + + elif self.mode == ARG_MAX_INDICATOR: + max_idx = np.argmax(variable) + result = np.zeros_like(variable) + result[max_idx] = 1 + + elif self.mode == ARG_MAX_ABS_INDICATOR: + max_idx = np.argmax(np.absolute(variable)) + result = np.zeros_like(variable) + result[max_idx] = 1 + + elif self.mode == MAX_VAL: max_value = np.max(variable) result = np.where(variable == max_value, variable, 0) @@ -417,7 +484,27 @@ def _function(self, max_value = np.max(np.absolute(variable)) result = np.where(np.absolute(variable) == max_value, 1, 0) - if self.mode == MIN_VAL: + elif self.mode == ARG_MIN: + max_idx = np.argmin(variable) + result = np.zeros_like(variable) + result[max_idx] = variable[max_idx] + + elif self.mode == ARG_MIN_ABS: + max_idx = np.argmin(np.absolute(variable)) + result = np.zeros_like(variable) + result[max_idx] = np.absolute(variable[max_idx]) + + elif self.mode == ARG_MIN_INDICATOR: + max_idx = np.argmin(variable) + result = np.zeros_like(variable) + result[max_idx] = 1 + + elif self.mode == ARG_MIN_ABS_INDICATOR: + max_idx = np.argmin(np.absolute(variable)) + result = np.zeros_like(variable) + result[max_idx] = 1 + + elif self.mode == MIN_VAL: min_value = np.min(variable) result = np.where(variable == min_value, min_value, 0) diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index 5018ef1bccb..c6b60e192ce 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -85,7 +85,7 @@ get_matrix, is_function_type, ) from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination -from psyneulink.core.components.functions.nonstateful.selectionfunctions import OneHot +from psyneulink.core.components.functions.nonstateful.selectionfunctions import OneHot, ARG_MAX, ARG_MAX_INDICATOR from psyneulink.core.components.functions.stateful.integratorfunctions import SimpleIntegrator from psyneulink.core.components.shellclasses import Projection from psyneulink.core.globals.context import ContextFlags, handle_external_context @@ -2895,6 +2895,9 @@ def _gen_pytorch_fct(self, device, context=None): # SoftMax # ********************************************************************************************************************** +softmax_modes = {ALL, ARG_MAX, ARG_MAX_INDICATOR, MAX_VAL, MAX_INDICATOR, PROB, PROB_INDICATOR} + + class SoftMax(TransferFunction): """ SoftMax( \ @@ -2964,9 +2967,9 @@ class SoftMax(TransferFunction): .. math:: D_jS_i = S_i(\\delta_{i,j} - S_j),\\ where\\ \\delta_{i,j}=1\\ if\\ i=j\\ and\\ \\delta_{i,j}=0\\ if\\ i≠j. - If *OUTPUT_TYPE* is *MAX_VAL* or *MAX_INDICATOR*, returns 1d array of the derivatives of the maximum - value with respect to the others (calculated as above). If *OUTPUT_TYPE* is *PROB*, raises an exception - (since it is ambiguous as to which element would have been chosen by the SoftMax function) + If *OUTPUT_TYPE* is *ARG_MAX*, *ARG_MAX_INDICATOR*, *MAX_VAL*, *MAX_INDICATOR*, returns 1d array of the + derivatives of the maximum value(s) with respect to the others (calculated as above). If *OUTPUT_TYPE* is *PROB*, + raises an exception (since it is ambiguous as to which element would have been chosen by the SoftMax function) Arguments --------- @@ -2995,7 +2998,7 @@ class SoftMax(TransferFunction): specifies the *entropy_weighting* parameter using by the `adapt_gain ` method (see method for details). - output : ALL, MAX_VAL, MAX_INDICATOR, or PROB : default ALL + output : ALL, ARG_MAX, ARG_MAX_INDICATOR, MAX_VAL, MAX_INDICATOR, or PROB : default ALL specifies the format of array returned by `function ` (see `output ` for details). @@ -3046,12 +3049,16 @@ class SoftMax(TransferFunction): determines the *entropy_weighting* parameter using by the `adapt_gain ` method (see method for details). - output : ALL, MAX_VAL, MAX_INDICATOR, or PROB + output : ALL, ARG_MAX, ARG_MAX_INDICATOR, MAX_VAL, MAX_INDICATOR, or PROB determines how the SoftMax-transformed values of the elements in `variable ` are reported in the array returned by `function `: * *ALL*: array of all SoftMax-transformed values (the default); - * *MAX_VAL*: SoftMax-transformed value for the element with the maximum such value, 0 for all others; - * *MAX_INDICATOR*: 1 for the element with the maximum SoftMax-transformed value, 0 for all others; + * *ARG_MAX*: 1 for single element with the maximum SoftMax-transformed value, 0 for all others; + (one with lowest index of there are multiple maximum values); + * *ARG_MAX_INDICATOR*: 1 for a single element with the maximum SoftMax-transformed value, 0 for all others; + (one with lowest index of there are multiple maximum values); + * *MAX_VAL*: SoftMax-transformed value for the element(s) with the maximum such value, 0 for all others; + * *MAX_INDICATOR*: 1 for the element(s) with the maximum SoftMax-transformed value, 0 for all others; * *PROB*: probabilistically chosen element based on SoftMax-transformed values after setting the sum of values to 1 (i.e., their `Luce Ratio `_), 0 for all others. @@ -3060,7 +3067,7 @@ class SoftMax(TransferFunction): for 2d variables, determines whether the SoftMax function is applied to the entire variable (per_item = False), or applied to each item in the variable separately (per_item = True). - bounds : None if `output ` == MAX_VAL, else (0,1) : default (0,1) + bounds : None if `output ` in {ARG_MAX, MAX_VAL}, else (0,1) : default (0,1) owner : Component `component ` to which the Function has been assigned. @@ -3188,11 +3195,8 @@ def _validate_adapt_entropy_weighting(self, adapt_entropy_weighting): return f'must be a scalar greater than 0' def _validate_output(self, output): - options = {ALL, MAX_VAL, MAX_INDICATOR, PROB, PROB_INDICATOR} - if output in options: - return None - else: - return 'not one of {0}'.format(options) + if output not in softmax_modes: + return 'not one of {0}'.format(softmax_modes) @check_user_specified @beartype @@ -3277,7 +3281,7 @@ def apply_softmax(self, input_value, gain, mask_threshold, output_type): sm = v / np.sum(v, axis=0) # Generate one-hot encoding based on selected output_type - if output_type in {MAX_VAL, MAX_INDICATOR}: + if output_type in {ARG_MAX, ARG_MAX_INDICATOR, MAX_VAL, MAX_INDICATOR}: return self.one_hot_function(sm) elif output_type in {PROB, PROB_INDICATOR}: return self.one_hot_function([input_value, sm]) @@ -3352,8 +3356,9 @@ def derivative(self, input=None, output=None, context=None): derivative(output) .. technical note:: - If MAX_VAL is specified for the `output ` parameter, and there is a tie for the maximum - value, the element with the lower index is used to compute the derivative (see IMPLEMENTATION NOTE below). + If ARG_MAX or MAX_VAL is specified for the `output ` parameter, and there is more than one + equivalent maximum value, the element with the lowest index is used to compute the derivative (see + IMPLEMENTATION NOTE below). Returns ------- @@ -3386,11 +3391,11 @@ def derivative(self, input=None, output=None, context=None): else: d = 0 derivative[j, i] = sm[i] * (d - sm[j]) - elif output_type in {MAX_VAL, MAX_INDICATOR}: + elif output_type in {ARG_MAX, ARG_MAX_INDICATOR, MAX_VAL, MAX_INDICATOR}: # Return 1d array of derivatives for max element (i.e., the one chosen by SoftMax) derivative = np.empty(size) # Get the element of output returned as non-zero (max val) when output_type is not ALL - # IMPLEMENTATION NOTES: + # IMPLEMENTATION NOTE: # if there is a tie for max, this chooses the item in sm with the lowest index in sm: index_of_max = int(np.where(sm == np.max(sm))[-1][0]) # the following would randomly choose a value in case of a tie, @@ -3468,7 +3473,7 @@ def __gen_llvm_apply(self, ctx, builder, params, state, arg_in, arg_out, output_ one_hot_out = arg_out one_hot_in = builder.alloca(one_hot_f.args[2].type.pointee) - if output_type in {MAX_VAL, MAX_INDICATOR}: + if output_type in {ARG_MAX, ARG_MAX_INDICATOR}: with pnlvm.helpers.array_ptr_loop(builder, arg_in, "exp_div") as (b, i): self.__gen_llvm_exp_div(ctx=ctx, vi=arg_in, vo=one_hot_in, gain=gain, exp_sum=exp_sum, builder=b, index=i) @@ -3490,7 +3495,7 @@ def __gen_llvm_apply(self, ctx, builder, params, state, arg_in, arg_out, output_ builder.call(one_hot_f, [one_hot_p, one_hot_s, one_hot_in, one_hot_out]) else: - assert False, "Unsupported output in {}: {}".format(self, output_type) + assert False, "Unsupported output in {} for LLVM execution mode: {}".format(self, output_type) return builder @@ -3519,8 +3524,9 @@ def _gen_llvm_function_derivative_body(self, ctx, builder, params, state, arg_in def __gen_llvm_apply_derivative(self, ctx, builder, params, state, all_out, arg_out, *, tags:frozenset): - assert self.output in {MAX_VAL, MAX_INDICATOR}, \ - "Derivative of SoftMax is only implemented for MAX_VAL and MAX_INDICATOR! ({})".format(self.output) + assert self.output in {ARG_MAX, ARG_MAX_INDICATOR, MAX_VAL, MAX_INDICATOR}, ( + "Derivative of SoftMax is only implemented for ARG_MAX and ARG_MAX_INDICATOR " + "in LLVM execution mode ({})".format(self.output)) max_pos_ptr = builder.alloca(ctx.int32_ty) builder.store(max_pos_ptr.type.pointee(-1), max_pos_ptr) diff --git a/psyneulink/core/components/functions/stateful/memoryfunctions.py b/psyneulink/core/components/functions/stateful/memoryfunctions.py index 47edfa92195..cd97cdb4189 100644 --- a/psyneulink/core/components/functions/stateful/memoryfunctions.py +++ b/psyneulink/core/components/functions/stateful/memoryfunctions.py @@ -39,14 +39,14 @@ DEFAULT_SEED, FunctionError, _random_state_getter, _seed_setter, EPSILON, _noise_setter ) from psyneulink.core.components.functions.nonstateful.objectivefunctions import Distance -from psyneulink.core.components.functions.nonstateful.selectionfunctions import OneHot +from psyneulink.core.components.functions.nonstateful.selectionfunctions import OneHot, ARG_MIN, ARG_MIN_INDICATOR from psyneulink.core.components.functions.nonstateful.transferfunctions import SoftMax from psyneulink.core.components.functions.stateful.integratorfunctions import StatefulFunction from psyneulink.core.globals.context import handle_external_context from psyneulink.core.globals.keywords import \ ADDITIVE_PARAM, BUFFER_FUNCTION, MEMORY_FUNCTION, COSINE, \ ContentAddressableMemory_FUNCTION, DictionaryMemory_FUNCTION, \ - MIN_INDICATOR, MULTIPLICATIVE_PARAM, NEWEST, NOISE, OLDEST, OVERWRITE, RATE, RANDOM, SINGLE, WEIGHTED + MIN_INDICATOR, MIN_VAL, MULTIPLICATIVE_PARAM, NEWEST, NOISE, OLDEST, OVERWRITE, RATE, RANDOM, SINGLE, WEIGHTED from psyneulink.core.globals.parameters import Parameter, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.utilities import \ @@ -2032,7 +2032,7 @@ class DictionaryMemory(MemoryFunction): # ------------------------------------- specifies the function used during retrieval to compare the first item in `variable ` with keys in `memory `. - selection_function : OneHot or function : default OneHot(mode=MIN_VAL) + selection_function : OneHot or function : default OneHot(mode=ARG_MIN_VAL) specifies the function used during retrieval to evaluate the distances returned by `distance_function ` and select the item to return. @@ -2102,7 +2102,7 @@ class DictionaryMemory(MemoryFunction): # ------------------------------------- function used during retrieval to compare the first item in `variable ` with keys in `memory `. - selection_function : OneHot or function : default OneHot(mode=MIN_VAL) + selection_function : OneHot or function : default OneHot(mode=ARG_MIN_VAL) function used during retrieval to evaluate the distances returned by `distance_function ` and select the item(s) to return. @@ -2221,7 +2221,7 @@ class Parameters(StatefulFunction.Parameters): selection_function see `selection_function ` - :default value: `OneHot`(mode=MIN_INDICATOR) + :default value: `OneHot`(mode=ARG_MIN_INDICATOR) :type: `Function` storage_prob @@ -2284,6 +2284,8 @@ def __init__(self, retrieval_prob=retrieval_prob, storage_prob=storage_prob, initializer=initializer, + distance_function=distance_function, + selection_function=selection_function, duplicate_keys=duplicate_keys, equidistant_keys_select=equidistant_keys_select, rate=rate, @@ -2397,6 +2399,19 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, b.call(distance_f, [distance_params, distance_state, distance_arg_in, distance_arg_out]) + # MODIFIED 10/13/24 NEW: + # IMPLEMENTATION NOTE: + # REPLACE MIN_VAL with ARG_MIN and MIN_INDICATOR with ARG_MIN_INDICATOR + # until the MIN_XXX args are implemented in LLVM + # since, at present, the tests don't seem to distinguish between these (i.e., return of multiple values; + # should add tests that do so once MIN_VAL and related args are implemented in LLVM) + if isinstance(self.selection_function, OneHot): + mode = self.selection_function.mode + if mode == MIN_VAL: + self.selection_function.mode = ARG_MIN + elif mode == MIN_INDICATOR: + self.selection_function.mode = ARG_MIN_INDICATOR + # MODIFIED 10/13/24 END selection_f = ctx.import_llvm_function(self.selection_function) selection_params, selection_state = ctx.get_param_or_state_ptr(builder, self, "selection_function", param_struct_ptr=params, state_struct_ptr=state) selection_arg_out = builder.alloca(selection_f.args[3].type.pointee) diff --git a/psyneulink/core/components/mechanisms/processing/processingmechanism.py b/psyneulink/core/components/mechanisms/processing/processingmechanism.py index c2ccbcecab4..bceadd645b3 100644 --- a/psyneulink/core/components/mechanisms/processing/processingmechanism.py +++ b/psyneulink/core/components/mechanisms/processing/processingmechanism.py @@ -188,7 +188,8 @@ import numpy as np from psyneulink.core.components.functions.nonstateful.transferfunctions import SoftMax -from psyneulink.core.components.functions.nonstateful.selectionfunctions import OneHot +from psyneulink.core.components.functions.nonstateful.selectionfunctions import OneHot, ARG_MAX, ARG_MAX_ABS, \ + ARG_MAX_INDICATOR, ARG_MAX_ABS_INDICATOR from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base, Mechanism, MechanismError from psyneulink.core.components.ports.inputport import InputPort from psyneulink.core.components.ports.outputport import OutputPort @@ -254,13 +255,13 @@ class ProcessingMechanism_Base(Mechanism_Base): {NAME: MAX_ABS_VAL, FUNCTION:lambda x: np.max(np.absolute(x))}, {NAME: MAX_ONE_HOT, - FUNCTION: OneHot(mode=MAX_VAL)}, + FUNCTION: OneHot(mode=ARG_MAX)}, {NAME: MAX_ABS_ONE_HOT, - FUNCTION: OneHot(mode=MAX_ABS_VAL)}, + FUNCTION: OneHot(mode=ARG_MAX_ABS)}, {NAME: MAX_INDICATOR, - FUNCTION: OneHot(mode=MAX_INDICATOR)}, + FUNCTION: OneHot(mode=ARG_MAX_INDICATOR)}, {NAME: MAX_ABS_INDICATOR, - FUNCTION: OneHot(mode=MAX_ABS_INDICATOR)}, + FUNCTION: OneHot(mode=ARG_MAX_ABS_INDICATOR)}, {NAME: PROB, VARIABLE: OWNER_VALUE, FUNCTION: SoftMax(output=PROB)}]) diff --git a/psyneulink/core/globals/keywords.py b/psyneulink/core/globals/keywords.py index 4ed8c8335a9..56616e4c1a4 100644 --- a/psyneulink/core/globals/keywords.py +++ b/psyneulink/core/globals/keywords.py @@ -25,9 +25,11 @@ __all__ = [ 'ACCUMULATOR_INTEGRATOR', 'ACCUMULATOR_INTEGRATOR_FUNCTION', - 'ADAPTIVE', 'ADAPTIVE_INTEGRATOR_FUNCTION', 'ADAPTIVE_MECHANISM', 'ADD_INPUT_PORT', 'ADD_OUTPUT_PORT', - 'ADDITIVE', 'ADDITIVE_PARAM', 'AFTER', 'ALL', 'ALLOCATION_SAMPLES', 'ALLOW_PROBES', 'ANGLE', 'ANGLE_FUNCTION', - 'ANY', 'ARGUMENT_THERAPY_FUNCTION', 'ARRANGEMENT', 'ASSERT', 'ASSIGN', 'ASSIGN_VALUE', 'AUTO','AUTO_ASSIGN_MATRIX', + 'ADAPTIVE', 'ADAPTIVE_INTEGRATOR_FUNCTION', 'ADAPTIVE_MECHANISM', 'ADD_INPUT_PORT', 'ADD_OUTPUT_PORT', 'ADDITIVE', + 'ADDITIVE_PARAM', 'AFTER', 'ALL', 'ALLOCATION_SAMPLES', 'ALLOW_PROBES', 'ANGLE', 'ANGLE_FUNCTION','ANY', + 'ARG_MAX', 'ARG_MAX_ABS', 'ARG_MAX_INDICATOR', 'ARG_MAX_ABS_INDICATOR', + 'ARG_MIN', 'ARG_MIN_ABS', 'ARG_MIN_INDICATOR', 'ARG_MIN_ABS_INDICATOR', + 'ARGUMENT_THERAPY_FUNCTION', 'ARRANGEMENT', 'ASSERT', 'ASSIGN', 'ASSIGN_VALUE', 'AUTO','AUTO_ASSIGN_MATRIX', 'AUTO_ASSOCIATIVE_PROJECTION', 'HAS_INITIALIZERS', 'AUTOASSOCIATIVE_LEARNING_MECHANISM', 'AUTODIFF_COMPOSITION', 'AUTODIFF_RESULTS', 'BACKPROPAGATION_FUNCTION', 'BINOMIAL_DISTORT_FUNCTION', 'BEFORE', 'BETA', 'BIAS', 'BOLD', 'BOTH', 'BOUNDS', 'BUFFER_FUNCTION', @@ -78,7 +80,7 @@ 'MAPPING_PROJECTION', 'MAPPING_PROJECTION_PARAMS', 'MASKED_MAPPING_PROJECTION', 'MATRIX', 'MATRIX_KEYWORD_NAMES', 'MATRIX_KEYWORD_SET', 'MATRIX_KEYWORD_VALUES', 'MATRIX_KEYWORDS','MatrixKeywords', 'MATRIX_WEIGHTS', 'MAX_ABS_DIFF', 'MAX_ABS_INDICATOR', 'MAX_ONE_HOT', 'MAX_ABS_ONE_HOT', 'MAX_ABS_VAL', - 'MAX_EXECUTIONS_BEFORE_FINISHED', 'MAX_INDICATOR', 'MAX_VAL', 'MAYBE', 'MEAN', + 'MAX_VS_NEXT', 'MAX_VS_AVG', 'MAX_EXECUTIONS_BEFORE_FINISHED', 'MAX_INDICATOR', 'MAX_VAL', 'MAYBE', 'MEAN', 'MECHANISM', 'MECHANISM_COMPONENT_CATEGORY', 'MECHANISM_DEFAULT', 'MECHANISM_DEFAULT_INPUT_VALUE', 'MECHANISM_DEFAULTParams', 'MECHANISM_EXECUTED_LOG_ENTRY', 'MECHANISM_NAME', 'MECHANISM_PARAM_VALUE', 'MECHANISM_TYPE', 'MECHANISM_VALUE', 'MEDIAN', 'METRIC', 'MIN_VAL', 'MIN_ABS_VAL', 'MIN_ABS_INDICATOR', 'MINIBATCH', @@ -1135,8 +1137,19 @@ class Loss(Enum): STANDARD_DEVIATION = 'standard_deviation' VARIANCE = 'variance' -# Note: These are used only as names of StandardOutputPorts (hence upper case) +ARG_MAX = 'arg_max' +ARG_MAX_ABS = 'arg_max_abs' +ARG_MAX_INDICATOR = 'arg_max_indicator' +ARG_MAX_ABS_INDICATOR = 'arg_max_abs_indicator' +ARG_MIN = 'arg_min' +ARG_MIN_ABS = 'arg_min_abs' +ARG_MIN_INDICATOR = 'arg_min_indicator' +ARG_MIN_ABS_INDICATOR = 'arg_min_abs_indicator' +MAX_VS_NEXT = 'max_vs_next' +MAX_VS_AVG = 'max_vs_avg' + +# Note: These are used only as names of StandardOutputPorts (hence upper case) MAX_VAL = 'MAX_VAL' MAX_ABS_VAL = 'MAX_ABS_VAL' MAX_ONE_HOT = 'MAX_ONE_HOT' diff --git a/psyneulink/core/globals/registry.py b/psyneulink/core/globals/registry.py index a849dab40a7..8f02ab8d41c 100644 --- a/psyneulink/core/globals/registry.py +++ b/psyneulink/core/globals/registry.py @@ -50,6 +50,15 @@ numeric_suffix_pat = re.compile(r'(.*)-\d+$') +def name_without_suffix(name): + """Return name with any numeric suffix removed""" + match = numeric_suffix_pat.match(name) + if match: + name_stripped_of_suffix = match.groups()[0] + return name_stripped_of_suffix + else: + return name + class RegistryError(Exception): def __init__(self, error_value): diff --git a/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py index 48e5292a9d7..c3c04072f1b 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py @@ -196,14 +196,15 @@ from psyneulink._typing import Optional, Union from psyneulink.core.components.functions.nonstateful.objectivefunctions import Distance, MAX_ABS_DIFF -from psyneulink.core.components.functions.nonstateful.selectionfunctions import max_vs_avg, max_vs_next, MAX_VS_NEXT, MAX_VS_AVG +from psyneulink.core.components.functions.nonstateful.selectionfunctions import max_vs_avg, max_vs_next from psyneulink.core.components.functions.stateful.integratorfunctions import LeakyCompetingIntegrator from psyneulink.core.components.functions.nonstateful.transferfunctions import Logistic from psyneulink.core.components.mechanisms.mechanism import MechanismError from psyneulink.core.components.mechanisms.processing.transfermechanism import _integrator_mode_setter from psyneulink.core.globals.keywords import \ - CONVERGENCE, FUNCTION, GREATER_THAN_OR_EQUAL, LCA_MECHANISM, LESS_THAN_OR_EQUAL, MATRIX, NAME, \ - RESULT, TERMINATION_THRESHOLD, TERMINATION_MEASURE, TERMINATION_COMPARISION_OP, VALUE, INVERSE_HOLLOW_MATRIX, AUTO + (CONVERGENCE, FUNCTION, GREATER_THAN_OR_EQUAL, LCA_MECHANISM, LESS_THAN_OR_EQUAL, + MATRIX, MAX_VS_NEXT, MAX_VS_AVG, NAME, RESULT, TERMINATION_THRESHOLD, TERMINATION_MEASURE, + TERMINATION_COMPARISION_OP, VALUE, INVERSE_HOLLOW_MATRIX, AUTO) from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.library.components.mechanisms.processing.transfer.recurrenttransfermechanism import \ diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index 880ea4e80f6..4f9ff7c0b7f 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -428,12 +428,12 @@ process, but the values of which are retrieved and assigned as the `value ` of the corresponding `retrieved_node `. This distinction between keys and value corresponds to the format of a standard "dictionary," though in that case only a single key and value are allowed, whereas - here there can be one or more keys and any number of values; if all fields are keys, this implements a full - form of content-addressable memory. If **learn_field_weight** is True (and `enable_learning - ` is either True or a list), then the field_weights can be modified - during training (this functions similarly to the attention head of a Transformer model, although at present the - field can only be scalar values rather than vecdtors); if **learn_field_weight** is False, then the field_weights are - fixed. The following options can be used to specify **field_weights**: + here there can be one or more keys and any number of values; if all fields are keys, this implements a full form of + content-addressable memory. If **learn_field_weight** is True (and `enable_learning` + is either True or a list with True for at least one entry), then the field_weights can be modified during training + (this functions similarly to the attention head of a Transformer model, although at present the field can only be + scalar values rather than vecdtors); if **learn_field_weight** is False, then the field_weights are fixed. + The following options can be used to specify **field_weights**: * *None* (the default): all fields except the last are treated as keys, and are weighted equally for retrieval, while the last field is treated as a value field; @@ -536,21 +536,24 @@ ` is used, with the dot products of queries and keys, to generate a retrieved item; the following are the options that can be used and the retrieved value they produce: - * *WEIGHTED*: softmax-weighted average of entries, based on their dot products with the key(s); this is the default; + * *WEIGHTED_AVG* (default): softmax-weighted average of entries, based on their dot products with the key(s). - * *ARG_MAX*: entry with the largest dot product. + * *ARG_MAX*: entry with the largest dot product (one with lowest index in `memory `)\ + if there are identical ones). * *PROBABISTIC*: probabilistically chosen entry based on softmax-transformed distribution of dot products. .. warning:: Use of the *ARG_MAX* and *PROBABILISTIC* options is not compatible with learning, as these implement a discrete choice and thus are not differentiable. Constructing an EMComposition with **softmax_choice** set to either of - these options and **enable_learning** set to True will generate a warning, and calling the EMComposition's - `learn ` method will generate an error; it must be changed to *WEIGHTED* to execute learning. + these options and **enable_learning** set to True (or a list with any True entries) will generate a warning, and + calling the EMComposition's `learn ` method will generate an error; it must be changed to + *WEIGHTED_AVG* to execute learning. .. technical_note:: - The *WEIGHTED* option is passed as *ALL* to the **output** argument of the `SoftMax` Function, *ARG_MAX* is - passed as *MAX_INDICATOR*; *PROBALISTIC* is passed as *PROB_INDICATOR*; and *MAX_VAL* is not currently supported. + The *WEIGHTED_AVG* option is passed as *ALL* to the **output** argument of the `SoftMax` Function, *ARG_MAX* is + passed as *ARG_MAX_INDICATOR*; and *PROBALISTIC* is passed as *PROB_INDICATOR*; the other SoftMax options are + not currently supported. .. _EMComposition_Learning: @@ -748,8 +751,8 @@ *Training* ~~~~~~~~~~ -If `learn ` is called, `enable_learning ` is True or a list with at -least one True entry, then errors will be computed for each of the `retrieved_nodes ` +If `learn ` is called, `enable_learning ` is True or a list with +any True entries, then errors will be computed for each of the `retrieved_nodes ` that is specified for learning (see `Learning ` for details about specification). These errors are derived either from any errors backprpated to the EMComposition from an outer Composition in which it is `nested `, or locally by the difference between the `retrieved_nodes ` @@ -997,6 +1000,7 @@ # from psyneulink.library.compositions import torch_available from psyneulink.core.components.functions.nonstateful.transferfunctions import SoftMax, LinearMatrix from psyneulink.core.components.functions.nonstateful.combinationfunctions import Concatenate, LinearCombination +from psyneulink.core.components.functions.nonstateful.selectionfunctions import ARG_MAX, ARG_MAX_INDICATOR from psyneulink.core.components.functions.function import \ DEFAULT_SEED, _random_state_getter, _seed_setter from psyneulink.core.compositions.composition import CompositionError, NodeRole @@ -1014,15 +1018,14 @@ GAIN, IDENTITY_MATRIX, MAX_INDICATOR, MULTIPLICATIVE_PARAM, NAME, PARAMS, PROB_INDICATOR, PRODUCT, PROJECTIONS, RANDOM, SIZE, VARIABLE) from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric_scalar -from psyneulink.core.globals.context import ContextFlags +from psyneulink.core.globals.registry import name_without_suffix from psyneulink.core.llvm import ExecutionMode -__all__ = ['EMComposition', 'WEIGHTED', 'ARG_MAX', 'PROBABILISTIC'] +__all__ = ['EMComposition', 'WEIGHTED_AVG', 'PROBABILISTIC'] STORAGE_PROB = 'storage_prob' -WEIGHTED = ALL -ARG_MAX = MAX_INDICATOR +WEIGHTED_AVG = ALL PROBABILISTIC = PROB_INDICATOR QUERY_AFFIX = ' [QUERY]' @@ -1145,7 +1148,7 @@ class EMComposition(AutodiffComposition): specifies the threshold used to mask out small values in the softmax calculation; see *mask_threshold* under `Thresholding and Adaptive Gain ` for details). - softmax_choice : WEIGHTED, ARG_MAX, PROBABILISTIC : default WEIGHTED + softmax_choice : WEIGHTED_AVG, ARG_MAX, PROBABILISTIC : default WEIGHTED_AVG specifies how the softmax over dot products of keys and memories is used for retrieval; see `softmax_choice ` for a description of each option. @@ -1243,7 +1246,7 @@ class EMComposition(AutodiffComposition): determines the threshold used to mask out small values in the softmax calculation; see *mask_threshold* under `Thresholding and Adaptive Gain ` for details). - softmax_choice : WEIGHTED, ARG_MAX or PROBABILISTIC + softmax_choice : WEIGHTED_AVG, ARG_MAX or PROBABILISTIC determines how the softmax over dot products of keys and memories is used for retrieval; see `softmax_choice ` for a description of each option. @@ -1267,8 +1270,8 @@ class EMComposition(AutodiffComposition): learn_field_weights : bool determines whether `field_weights ` are learnable during training; - requires `enable_learning ` to be True for the corresponding field; - see `Learning ` for additional details. + requires `enable_learning ` to be True or a list with at least one True + entry for the corresponding field; see `Learning ` for additional details. learning_rate : float determines whether the rate at which `field_weights ` are learned @@ -1295,7 +1298,7 @@ class EMComposition(AutodiffComposition): Full list of `INPUT ` `Nodes ` ordered with query_input_nodes first followed by value_input_nodes; used primarily for internal computations - field_input_nodes : list[TransferMechanism] + input_nodes_by_fields : list[TransferMechanism] Full list of `INPUT ` `Nodes ` in the same order specified in the **field_names** argument of the constructor and in `self.field_names `. @@ -1361,8 +1364,8 @@ class EMComposition(AutodiffComposition): ` (see `Retrieve values by field ` for additional details); these are assigned the same names as the `query_input_nodes ` and `value_input_nodes ` to which they correspond appended with the suffix - * [RETRIEVED]*, and are in the same order as `field_input_nodes ` to which - to which they correspond. + * [RETRIEVED]*, and are in the same order as `input_nodes_by_fields ` + to which to which they correspond. storage_node : EMStorageMechanism `EMStorageMechanism` that receives inputs from the `query_input_nodes ` and @@ -1473,7 +1476,7 @@ class Parameters(AutodiffComposition.Parameters): softmax_choice see `softmax_choice ` - :default value: WEIGHTED + :default value: WEIGHTED_AVG :type: ``keyword`` softmax_threshold @@ -1497,7 +1500,7 @@ class Parameters(AutodiffComposition.Parameters): normalize_memories = Parameter(True) softmax_gain = Parameter(1.0, modulable=True) softmax_threshold = Parameter(.001, modulable=True, specify_none=True) - softmax_choice = Parameter(WEIGHTED, modulable=False, specify_none=True) + softmax_choice = Parameter(WEIGHTED_AVG, modulable=False, specify_none=True) storage_prob = Parameter(1.0, modulable=True, aliases=[MULTIPLICATIVE_PARAM]) memory_decay_rate = Parameter(AUTO, modulable=True) enable_learning = Parameter(True, structural=True) @@ -1575,7 +1578,7 @@ def __init__(self, normalize_memories:bool=True, softmax_gain:Union[float, ADAPTIVE, CONTROL]=1.0, softmax_threshold:Optional[float]=.001, - softmax_choice:Optional[Union[WEIGHTED, ARG_MAX, PROBABILISTIC]]=WEIGHTED, + softmax_choice:Optional[Union[WEIGHTED_AVG, ARG_MAX, PROBABILISTIC]]=WEIGHTED_AVG, storage_prob:float=1.0, memory_decay_rate:Union[float,AUTO]=AUTO, enable_learning:Union[bool,list]=True, @@ -1906,8 +1909,9 @@ def _parse_fields(self, self.num_fields = len(self.entry_template) keys_weights = [i for i in parsed_field_weights if i != 0] self.num_keys = len(keys_weights) - # Get indices of field_weights that specify keys: - self.key_indices = np.nonzero(parsed_field_weights)[0] + # Get indices of field_weights that specify keys and values: + self.key_indices = np.flatnonzero(parsed_field_weights) + self.value_indices = np.where(parsed_field_weights==0)[0] self.num_values = self.num_fields - self.num_keys if parsed_field_names: @@ -1993,10 +1997,15 @@ def _construct_pathways(self, self.query_input_nodes = self._construct_query_input_nodes(field_weights) self.value_input_nodes = self._construct_value_input_nodes(field_weights) self.input_nodes = self.query_input_nodes + self.value_input_nodes - # Order input_nodes according to self.field_names - self.field_input_nodes = [node for name in self.field_names for node in self.input_nodes - if node in self.input_nodes - if (node.name in {name + QUERY_AFFIX, name + VALUE_AFFIX})] + + # Get list of nodes in order specified in self.field_names + self.input_nodes_by_fields = [None] * len(field_weights) + for i in range(self.num_keys): + self.input_nodes_by_fields[self.key_indices[i]] = self.query_input_nodes[i] + for i in range(self.num_values): + self.input_nodes_by_fields[self.value_indices[i]] = self.value_input_nodes[i] + assert all(self.input_nodes_by_fields), "PROGRAM ERROR: input_nodes_by_fields not fully populated." + self.concatenate_keys_node = self._construct_concatenate_keys_node(concatenate_keys) self.match_nodes = self._construct_match_nodes(memory_template, memory_capacity, concatenate_keys,normalize_memories) @@ -2206,9 +2215,9 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_k def _validate_softmax_choice(self, softmax_choice, enable_learning): if softmax_choice in {ARG_MAX, PROBABILISTIC} and enable_learning: - warnings.warn(f"The 'softmax_choice' arg of '{self.name}' is set to {softmax_choice} with " - f"'enable_learning' set to True; this will generate an error if its 'learn' " - f"method is called; set 'softmax_choice' to WEIGHTED to use learning.") + warnings.warn(f"The 'softmax_choice' arg of '{self.name}' is set to '{softmax_choice}' with " + f"'enable_learning' set to True (or a list); this will generate an error if its " + f"'learn' method is called. Set 'softmax_choice' to WEIGHTED_AVG before learning.") def _construct_softmax_nodes(self, memory_capacity, field_weights, softmax_gain, softmax_threshold, softmax_choice)->list: @@ -2224,6 +2233,11 @@ def _construct_softmax_nodes(self, memory_capacity, field_weights, f"PROGRAM ERROR: number of keys ({self.num_keys}) does not match number of " \ f"non-zero values in field_weights ({len(key_indices)})." + if softmax_choice == ARG_MAX: + # ARG_MAX would return entry multiplied by its dot product + # ARG_MAX_INDICATOR returns the entry unmodified + softmax_choice = ARG_MAX_INDICATOR + softmax_nodes = [TransferMechanism(input_ports={SIZE:memory_capacity, PROJECTIONS: MappingProjection( sender=match_node.output_port, @@ -2355,12 +2369,12 @@ def _construct_retrieved_nodes(self, memory_template)->list: for i in range(self.num_values)] retrieved_nodes = self.retrieved_key_nodes + self.retrieved_value_nodes + # Return nodes in order sorted by self.field_names - # IMPLEMENTATION NOTE: - # "in" is used below instead of "==" in case more than one EMComposition is created, - # in which case retrieved_nodes will have "-" appended to their name + # (use name_without_suffix as reference in case more than one EMComposition is created, + # in which case retrieved_nodes will have "-" appended to their name) return [node for name in self.field_names for node in retrieved_nodes - if node in retrieved_nodes if (name + RETRIEVED_AFFIX) in node.name] + if node in retrieved_nodes if (name + RETRIEVED_AFFIX) == name_without_suffix(node.name)] def _construct_storage_node(self, memory_template, @@ -2518,13 +2532,13 @@ def _encode_memory(self, context=None): # Assign updated matrix to Projection self.retrieved_nodes[i].path_afferents[0].parameters.matrix.set(field_memories, context) - # 7/10/24 - FIX: WHY BOTHER WITH OVERRIDE IF NOTHING IS DONE: @handle_external_context() def learn(self, *args, **kwargs)->list: + """Override to check for inappropriate use of ARG_MAX or PROBABILISTIC options for retrieval with learning""" arg = self.parameters.softmax_choice.get(kwargs[CONTEXT]) if arg in {ARG_MAX, PROBABILISTIC}: raise EMCompositionError(f"The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg " - f"of '{self.name}' cannot be used during learning; change to WEIGHTED.") + f"of '{self.name}' cannot be used during learning; change to WEIGHTED_AVG.") return super().learn(*args, **kwargs) def _get_execution_mode(self, execution_mode): diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index d37a8455b14..996c0af2760 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -34,11 +34,11 @@ @pytest.mark.autodiff_constructor class TestConstruction: - # def test_two_calls_no_args(self): - # comp = EMComposition() - # comp_2 = EMComposition() - # assert isinstance(comp, EMComposition) - # assert isinstance(comp_2, EMComposition) + def test_two_calls_no_args(self): + comp = EMComposition() + comp_2 = EMComposition() + assert isinstance(comp, EMComposition) + assert isinstance(comp_2, EMComposition) # def test_pytorch_representation(self): # comp = EMComposition() @@ -233,17 +233,17 @@ def test_memory_fill(start, memory_fill): test_memory_fill(start=repeat, memory_fill=memory_fill) def test_softmax_choice(self): - for softmax_choice in [pnl.WEIGHTED, pnl.ARG_MAX, pnl.PROBABILISTIC]: - em = EMComposition(memory_template=[[[1,.1,.1]], [[.1,1,.1]], [[.1,.1,1]]], + for softmax_choice in [pnl.WEIGHTED_AVG, pnl.ARG_MAX, pnl.PROBABILISTIC]: + em = EMComposition(memory_template=[[[1,.1,.1]], [[1,.1,.1]], [[.1,.1,1]]], softmax_choice=softmax_choice, enable_learning=False) - result = em.run(inputs={em.query_input_nodes[0]:[[0,1,0]]}) - if softmax_choice == pnl.WEIGHTED: - np.testing.assert_allclose(result, [[0.21330295, 0.77339411, 0.21330295]]) + result = em.run(inputs={em.query_input_nodes[0]:[[1,0,0]]}) + if softmax_choice == pnl.WEIGHTED_AVG: + np.testing.assert_allclose(result, [[0.93016008, 0.1, 0.16983992]]) if softmax_choice == pnl.ARG_MAX: - np.testing.assert_allclose(result, [[.1, 1, .1]]) + np.testing.assert_allclose(result, [[1, .1, .1]]) if softmax_choice == pnl.PROBABILISTIC: # NOTE: actual stochasticity not tested here - np.testing.assert_allclose(result, [[.1, 1, .1]]) + np.testing.assert_allclose(result, [[1, .1, .1]]) em = EMComposition(memory_template=[[[1,.1,.1]], [[.1,1,.1]], [[.1,.1,1]]]) for softmax_choice in [pnl.ARG_MAX, pnl.PROBABILISTIC]: @@ -251,7 +251,16 @@ def test_softmax_choice(self): em.parameters.softmax_choice.set(softmax_choice) em.learn() assert (f"The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg " - f"of '{em.name}' cannot be used during learning; change to WEIGHTED." in str(error_text.value)) + f"of '{em.name}' cannot be used during learning; change to WEIGHTED_AVG." in str(error_text.value)) + + for softmax_choice in [pnl.ARG_MAX, pnl.PROBABILISTIC]: + with pytest.warns(UserWarning) as warning: + em = EMComposition(softmax_choice=softmax_choice, enable_learning=True) + warning_msg = (f"The 'softmax_choice' arg of '{em.name}' is set to '{softmax_choice}' with " + f"'enable_learning' set to True (or a list); this will generate an error if its " + f"'learn' method is called. Set 'softmax_choice' to WEIGHTED_AVG before learning.") + assert warning_msg in str(warning[0].message) + @pytest.mark.pytorch diff --git a/tests/functions/test_selection.py b/tests/functions/test_selection.py index 75deaad78b1..06e81f38802 100644 --- a/tests/functions/test_selection.py +++ b/tests/functions/test_selection.py @@ -24,22 +24,35 @@ llvm_res['fp32'][expected_philox_ind] = (1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0) test_data = [ - pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot MAX_VAL"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot MAX_ABS_VAL"), - pytest.param(Functions.OneHot, -test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot MAX_ABS_VAL Neg"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot MAX_INDICATOR"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot MAX_ABS_INDICATOR"), - pytest.param(Functions.OneHot, -test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot MAX_ABS_INDICATOR Neg"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0., -0.23311696), id="OneHot MIN_VAL"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), id="OneHot MIN_ABS_VAL"), - pytest.param(Functions.OneHot, -test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), id="OneHot MIN_ABS_VAL Neg"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 0., 1.), id="OneHot MIN_INDICATOR"), - pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), id="OneHot MIN_ABS_INDICATOR"), - pytest.param(Functions.OneHot, -test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), id="OneHot MIN_ABS_INDICATOR Neg"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MAX}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot ARG_MAX"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MAX_ABS}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot ARG MAX_ABS"), + pytest.param(Functions.OneHot, -test_var, {'mode':kw.ARG_MAX_ABS}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), id="OneHot ARG MAX_ABS Neg"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MAX_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot ARG_MAX_INDICATOR"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot ARG_MAX_ABS_INDICATOR"), + pytest.param(Functions.OneHot, -test_var, {'mode':kw.ARG_MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), id="OneHot ARG_MAX_ABS_INDICATOR Neg"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MIN}, (0., 0., 0., 0., 0., 0., 0., 0., 0, -0.23311696), id="OneHot ARG_MIN"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MIN_ABS}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), id="OneHot ARG_MIN_ABS"), + pytest.param(Functions.OneHot, -test_var, {'mode':kw.ARG_MIN_ABS}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), id="OneHot ARG_MIN_ABS Neg"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MIN_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 0., 1.), id="OneHot ARG_MIN_INDICATOR"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.ARG_MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), id="OneHot ARG_MIN_ABS_INDICATOR"), + pytest.param(Functions.OneHot, -test_var, {'mode':kw.ARG_MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), id="OneHot ARG_MIN_ABS_INDICATOR Neg"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_VAL"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_VAL"), + pytest.param(Functions.OneHot, -test_var, {'mode':kw.MAX_ABS_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0.92732552, 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_VAL Neg"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_INDICATOR"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_INDICATOR"), + pytest.param(Functions.OneHot, -test_var, {'mode':kw.MAX_ABS_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 1., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MAX_ABS_INDICATOR Neg"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_VAL}, (0., 0., 0., 0., 0., 0., 0., 0., 0., -0.23311696), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_VAL"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_VAL"), + pytest.param(Functions.OneHot, -test_var, {'mode':kw.MIN_ABS_VAL}, (0., 0., 0., 0.08976637, 0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_VAL Neg"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_INDICATOR}, (0., 0., 0., 0., 0., 0., 0., 0., 0., 1.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_INDICATOR"), + pytest.param(Functions.OneHot, test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_INDICATOR"), + pytest.param(Functions.OneHot, -test_var, {'mode':kw.MIN_ABS_INDICATOR}, (0., 0., 0., 1.,0., 0., 0., 0., 0., 0.), marks=pytest.mark.llvm_not_implemented, id="OneHot MIN_ABS_INDICATOR Neg"), pytest.param(Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB}, (0., 0., 0., 0.08976636599379373, 0., 0., 0., 0., 0., 0.), id="OneHot PROB"), pytest.param(Functions.OneHot, [test_var, test_prob], {'mode':kw.PROB_INDICATOR}, (0., 0., 0., 1., 0., 0., 0., 0., 0., 0.), id="OneHot PROB_INDICATOR"), pytest.param(Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB}, expected_philox_prob, id="OneHot PROB Philox"), pytest.param(Functions.OneHot, [test_var, test_philox], {'mode':kw.PROB_INDICATOR}, expected_philox_ind, id="OneHot PROB_INDICATOR Philox"), + ] GROUP_PREFIX="SelectionFunction " diff --git a/tests/functions/test_transfer.py b/tests/functions/test_transfer.py index 238b3c5c352..fd7b8bce527 100644 --- a/tests/functions/test_transfer.py +++ b/tests/functions/test_transfer.py @@ -70,13 +70,23 @@ def binomial_distort_helper(seed): # SoftMax 1D input pytest.param(pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.PER_ITEM:False}, softmax_helper, id="SOFT_MAX ALL"), - pytest.param(pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:False}, np.where(softmax_helper == np.max(softmax_helper), softmax_helper, 0), id="SOFT_MAX MAX_VAL"), - pytest.param(pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM:False}, np.where(softmax_helper == np.max(softmax_helper), 1, 0), id="SOFT_MAX MAX_INDICATOR"), + pytest.param(pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:pnl.ARG_MAX, kw.PER_ITEM:False}, + np.where(softmax_helper == np.max(softmax_helper), softmax_helper, 0), id="SOFT_MAX ARG_MAX"), + pytest.param(pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:pnl.ARG_MAX_INDICATOR, kw.PER_ITEM:False}, + np.where(softmax_helper == np.max(softmax_helper), 1, 0), id="SOFT_MAX ARG_MAX_INDICATOR"), + pytest.param(pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:False}, + np.where(softmax_helper == np.max(softmax_helper), softmax_helper, 0), id="SOFT_MAX MAX_VAL"), + pytest.param(pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM:False}, + np.where(softmax_helper == np.max(softmax_helper), 1, 0), id="SOFT_MAX MAX_INDICATOR"), pytest.param(pnl.SoftMax, test_var, {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.PROB, kw.PER_ITEM:False}, [0.0, 0.0, 0.0, 0.0, test_var[4], 0.0, 0.0, 0.0, 0.0, 0.0], id="SOFT_MAX PROB"), # SoftMax 2D testing per-item pytest.param(pnl.SoftMax, [test_var], {kw.GAIN:RAND1, kw.PER_ITEM:True}, [softmax_helper], id="SOFT_MAX ALL 2D"), + pytest.param(pnl.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:pnl.ARG_MAX, kw.PER_ITEM:True}, + [np.where(softmax_helper == np.max(softmax_helper), softmax_helper, 0)], id="SOFT_MAX ARG_MAX 2D"), + pytest.param(pnl.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:pnl.ARG_MAX_INDICATOR, kw.PER_ITEM:True}, + [np.where(softmax_helper == np.max(softmax_helper), 1, 0)], id="SOFT_MAX ARG_MAX_INDICATOR 2D"), pytest.param(pnl.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM:True}, [np.where(softmax_helper == np.max(softmax_helper), softmax_helper, 0)], id="SOFT_MAX MAX_VAL 2D"), pytest.param(pnl.SoftMax, [test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM:True}, @@ -86,6 +96,10 @@ def binomial_distort_helper(seed): # SoftMax per-item with 2 elements in input pytest.param(pnl.SoftMax, [test_var, test_var], {kw.GAIN:RAND1, kw.PER_ITEM: True}, softmax_helper2, id="SOFT_MAX ALL PER_ITEM"), + pytest.param(pnl.SoftMax, [test_var, test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:pnl.ARG_MAX, kw.PER_ITEM: True}, + np.where(softmax_helper2 == np.max(softmax_helper2), softmax_helper2, 0), id="SOFT_MAX ARG_MAX PER_ITEM"), + pytest.param(pnl.SoftMax, [test_var, test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:pnl.ARG_MAX_INDICATOR, kw.PER_ITEM: True}, + np.where(softmax_helper2 == np.max(softmax_helper2), 1, 0), id="SOFT_MAX ARG_MAX_INDICATOR PER_ITEM"), pytest.param(pnl.SoftMax, [test_var, test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_VAL, kw.PER_ITEM: True}, np.where(softmax_helper2 == np.max(softmax_helper2), softmax_helper2, 0), id="SOFT_MAX MAX_VAL PER_ITEM"), pytest.param(pnl.SoftMax, [test_var, test_var], {kw.GAIN:RAND1, kw.OUTPUT_TYPE:kw.MAX_INDICATOR, kw.PER_ITEM: True}, @@ -106,6 +120,13 @@ def binomial_distort_helper(seed): @pytest.mark.parametrize("func, variable, params, expected", test_data) def test_execute(func, variable, params, expected, benchmark, func_mode): benchmark.group = "TransferFunction " + func.componentName + + if func_mode != 'Python': + if ('output' in params + and params['output'] in {kw.MAX_VAL, kw.MAX_ABS_VAL, kw.MAX_INDICATOR, kw.MAX_ABS_INDICATOR, + kw.MIN_VAL, kw.MIN_ABS_VAL, kw.MIN_INDICATOR, kw.MIN_ABS_INDICATOR}): + pytest.skip("{params['mode']} is not supported in {func_mode}") + f = func(default_variable=variable, **params) ex = pytest.helpers.get_func_execution(f, func_mode) diff --git a/tests/mechanisms/test_episodic_memory.py b/tests/mechanisms/test_episodic_memory.py index 26a93fac68e..79ace446800 100644 --- a/tests/mechanisms/test_episodic_memory.py +++ b/tests/mechanisms/test_episodic_memory.py @@ -5,6 +5,7 @@ from psyneulink.core.components.functions.function import FunctionError from psyneulink.core.components.functions.stateful.memoryfunctions import DictionaryMemory, \ ContentAddressableMemory +from psyneulink.core.components.functions.nonstateful.selectionfunctions import OneHot, ARG_MIN from psyneulink.library.components.mechanisms.processing.integrator.episodicmemorymechanism import \ EpisodicMemoryMechanism, EpisodicMemoryMechanismError diff --git a/tests/mechanisms/test_processing_mechanism.py b/tests/mechanisms/test_processing_mechanism.py index 54c9a123cc7..b9a78c5abb2 100644 --- a/tests/mechanisms/test_processing_mechanism.py +++ b/tests/mechanisms/test_processing_mechanism.py @@ -3,6 +3,8 @@ from psyneulink.core import llvm as pnlvm from psyneulink.core.components.functions.function import FunctionError +from psyneulink.core.components.functions.nonstateful.selectionfunctions import ( + ARG_MAX, ARG_MAX_ABS_INDICATOR, ARG_MAX_INDICATOR) from psyneulink.core.components.functions.nonstateful.learningfunctions import Hebbian, Reinforcement, TDLearning from psyneulink.core.components.functions.nonstateful.objectivefunctions import Distance from psyneulink.core.components.functions.nonstateful.distributionfunctions import NormalDist, ExponentialDist, \ From 2fdfc610301a207989682784dde40df6f4402a8e Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 25 Oct 2024 14:42:04 -0400 Subject: [PATCH 353/410] Swap out optimizer for optuna.RandomSampler I think CmaES has some issues with seeding. --- .../composition/pec/test_stab_flex_pec_fit.py | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/composition/pec/test_stab_flex_pec_fit.py b/tests/composition/pec/test_stab_flex_pec_fit.py index 220caa304c6..508974e1f09 100644 --- a/tests/composition/pec/test_stab_flex_pec_fit.py +++ b/tests/composition/pec/test_stab_flex_pec_fit.py @@ -388,10 +388,10 @@ def run_stab_flex_cond( def test_stab_flex_cond_fit(): from psyneulink.core.globals.utilities import set_global_seed - # Let's make things reproducible - pnl_seed = 0 + # # Let's make things reproducible + pnl_seed = 42 set_global_seed(pnl_seed) - trial_seq_seed = 0 + trial_seq_seed = 43 # High-level parameters the impact performance of the test num_trials = 75 @@ -426,7 +426,7 @@ def test_stab_flex_cond_fit(): # We will generate a dataset that comprises two different conditions. Each condition will have a different threshold. # Randomly select which trials will be in each condition uniformly. - rng = np.random.default_rng(pnl_seed) + rng = np.random.default_rng(12345) threshold = rng.choice([0.3, 0.7], size=num_trials, replace=True) # Run @@ -472,7 +472,7 @@ def test_stab_flex_cond_fit(): ], data=data_to_fit, optimization_function=PECOptimizationFunction( - method=optuna.samplers.CmaEsSampler(seed=0), max_iterations=10 + method=optuna.samplers.RandomSampler, max_iterations=10 ), num_estimates=num_estimates, initial_seed=42, @@ -489,11 +489,11 @@ def test_stab_flex_cond_fit(): # These aren't the recovered parameters, we are doing too few trials and too few estimates to get the correct # results. expected_results = { - 'Task Activations [Act1, Act2]-1.gain': 3.87419, - 'Automaticity-weighted Stimulus Input [w*S1, w*S2]-1.slope': 0.0125, - 'DDM-1.threshold[threshold=0.7]': 0.30939, - 'DDM-1.threshold[threshold=0.3]': 0.22168, - 'DDM-1.non_decision_time': 0.28659999999999997 + 'Task Activations [Act1, Act2]-1.gain': 2.3965500000000004, + 'Automaticity-weighted Stimulus Input [w*S1, w*S2]-1.slope': 0.0058000000000000005, + 'DDM-1.threshold[threshold=0.7]': 0.43483, + 'DDM-1.threshold[threshold=0.3]': 0.30449, + 'DDM-1.non_decision_time': 0.3124 } for key, value in expected_results.items(): From d4f2411d4069667032f1c14338e492c101127a09 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 25 Oct 2024 14:43:31 -0400 Subject: [PATCH 354/410] Get rid of unused pytest import. --- tests/composition/pec/test_stab_flex_pec_fit.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/composition/pec/test_stab_flex_pec_fit.py b/tests/composition/pec/test_stab_flex_pec_fit.py index 508974e1f09..67c7e29cde5 100644 --- a/tests/composition/pec/test_stab_flex_pec_fit.py +++ b/tests/composition/pec/test_stab_flex_pec_fit.py @@ -1,7 +1,6 @@ import psyneulink as pnl import optuna -import pytest from psyneulink.core.components.functions.nonstateful.fitfunctions import ( PECOptimizationFunction, From 65763b15197994aeb7bd74cb68c247885f4ad220 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 25 Oct 2024 17:26:35 -0400 Subject: [PATCH 355/410] tests/llvm: Drop unused ctypes import Signed-off-by: Jan Vesely --- tests/llvm/test_builtins_matrix.py | 1 - tests/llvm/test_builtins_mt_random.py | 1 - tests/llvm/test_builtins_philox_random.py | 1 - tests/llvm/test_builtins_vector.py | 1 - tests/llvm/test_compile.py | 1 - tests/llvm/test_custom_func.py | 1 - 6 files changed, 6 deletions(-) diff --git a/tests/llvm/test_builtins_matrix.py b/tests/llvm/test_builtins_matrix.py index 9280eb0db98..8628f0d0ff9 100644 --- a/tests/llvm/test_builtins_matrix.py +++ b/tests/llvm/test_builtins_matrix.py @@ -1,4 +1,3 @@ -import ctypes import numpy as np import pytest diff --git a/tests/llvm/test_builtins_mt_random.py b/tests/llvm/test_builtins_mt_random.py index d8c0f51d1ce..f19f01e78e2 100644 --- a/tests/llvm/test_builtins_mt_random.py +++ b/tests/llvm/test_builtins_mt_random.py @@ -1,4 +1,3 @@ -import ctypes import numpy as np import pytest import random diff --git a/tests/llvm/test_builtins_philox_random.py b/tests/llvm/test_builtins_philox_random.py index af9f4228d71..56b6485b751 100644 --- a/tests/llvm/test_builtins_philox_random.py +++ b/tests/llvm/test_builtins_philox_random.py @@ -1,4 +1,3 @@ -import ctypes import numpy as np import pytest diff --git a/tests/llvm/test_builtins_vector.py b/tests/llvm/test_builtins_vector.py index 70ced0e8864..131514fdacc 100644 --- a/tests/llvm/test_builtins_vector.py +++ b/tests/llvm/test_builtins_vector.py @@ -1,4 +1,3 @@ -import ctypes import numpy as np import pytest diff --git a/tests/llvm/test_compile.py b/tests/llvm/test_compile.py index 4a1cff96317..a058f47b1c7 100644 --- a/tests/llvm/test_compile.py +++ b/tests/llvm/test_compile.py @@ -1,4 +1,3 @@ -import ctypes import numpy as np import pytest diff --git a/tests/llvm/test_custom_func.py b/tests/llvm/test_custom_func.py index 87936eb54e0..ede4323473a 100644 --- a/tests/llvm/test_custom_func.py +++ b/tests/llvm/test_custom_func.py @@ -1,4 +1,3 @@ -import ctypes import numpy as np import pytest From 499d8716e3aa70f86a5b0e322ad9a480f4ab65e8 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 25 Oct 2024 17:50:30 -0400 Subject: [PATCH 356/410] tests: Use top level module import of ExecutionMode Signed-off-by: Jan Vesely --- tests/composition/test_interfaces.py | 8 ++++---- tests/composition/test_learning.py | 6 +++--- tests/mechanisms/test_transfer_mechanism.py | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/composition/test_interfaces.py b/tests/composition/test_interfaces.py index a84c7603346..3d2815ecd35 100644 --- a/tests/composition/test_interfaces.py +++ b/tests/composition/test_interfaces.py @@ -1,7 +1,7 @@ import numpy as np import pytest -import psyneulink.core.llvm as pnlvm +import psyneulink as pnl from psyneulink.core.components.functions.nonstateful.transferfunctions import Identity, Linear from psyneulink.core.components.mechanisms.processing.compositioninterfacemechanism import CompositionInterfaceMechanism @@ -179,7 +179,7 @@ def test_connect_compositions_with_simple_states(self, comp_mode): # output = 180.0 comp3.run(inputs={comp1: [[5.]]}, execution_mode=comp_mode) np.testing.assert_allclose(comp3.results, [[[180.0]]]) - if comp_mode is pnlvm.ExecutionMode.Python: + if comp_mode is pnl.ExecutionMode.Python: np.testing.assert_allclose(comp1.output_port.parameters.value.get(comp3), [30.0]) np.testing.assert_allclose(comp2.output_port.parameters.value.get(comp3), [180.0]) np.testing.assert_allclose(comp3.output_port.parameters.value.get(comp3), [180.0]) @@ -242,7 +242,7 @@ def test_connect_compositions_with_complicated_states(self, comp_mode): ) np.testing.assert_allclose(output, [[180.], [1800.]]) - if comp_mode is pnlvm.ExecutionMode.Python: + if comp_mode is pnl.ExecutionMode.Python: np.testing.assert_allclose(inner_composition_1.get_output_values(outer_composition), [[30.], [300.]]) np.testing.assert_allclose(inner_composition_2.get_output_values(outer_composition), [[180.], [1800.]]) np.testing.assert_allclose(outer_composition.get_output_values(outer_composition), [[180.], [1800.]]) @@ -312,7 +312,7 @@ def test_compositions_as_origin_nodes(self, comp_mode): ) np.testing.assert_allclose(output, [[36.]]) - if comp_mode is pnlvm.ExecutionMode.Python: + if comp_mode is pnl.ExecutionMode.Python: np.testing.assert_allclose(A.get_output_values(outer_composition), [[1.0]]) np.testing.assert_allclose(B.get_output_values(outer_composition), [[2.0]]) np.testing.assert_allclose(C.get_output_values(outer_composition), [[9.0]]) diff --git a/tests/composition/test_learning.py b/tests/composition/test_learning.py index 77e94d70d4a..02b912193dc 100644 --- a/tests/composition/test_learning.py +++ b/tests/composition/test_learning.py @@ -8,7 +8,6 @@ from psyneulink.core.compositions.composition import Composition, CompositionError, RunError from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism from psyneulink.core.components.functions.nonstateful.learningfunctions import BackPropagation -import psyneulink.core.llvm as pnlvm from psyneulink.core.globals.keywords import Loss # from psyneulink.library.components.mechanisms.processing.objective.comparatormechanism import SSE, MSE, L0 @@ -509,8 +508,9 @@ def test_indepedence_of_learning_pathways_using_same_mechs_in_different_comps(se num_trials=2) np.testing.assert_allclose(comp2.results, comp1.results) - @pytest.mark.parametrize('execution_mode', - [pnlvm.ExecutionMode.LLVM, pnlvm.ExecutionMode.PyTorch]) + # Use explicit parametrize instead of the autodiff_mode fixture to avoid + # applying marks. This test doesn't execute pytorch or compiled mode + @pytest.mark.parametrize('execution_mode', [pnl.ExecutionMode.LLVM, pnl.ExecutionMode.PyTorch]) def test_execution_mode_pytorch_and_LLVM_errors(self, execution_mode): A = TransferMechanism(name="learning-process-mech-A") B = TransferMechanism(name="learning-process-mech-B") diff --git a/tests/mechanisms/test_transfer_mechanism.py b/tests/mechanisms/test_transfer_mechanism.py index 2578f122ee8..05477e0e06a 100644 --- a/tests/mechanisms/test_transfer_mechanism.py +++ b/tests/mechanisms/test_transfer_mechanism.py @@ -1,7 +1,7 @@ import numpy as np import pytest -import psyneulink.core.llvm as pnlvm +import psyneulink as pnl from psyneulink.core.components.component import ComponentError from psyneulink.core.components.functions.nonstateful.learningfunctions import Reinforcement from psyneulink.core.components.functions.stateful.integratorfunctions import AccumulatorIntegrator, AdaptiveIntegrator @@ -1745,7 +1745,7 @@ def test_termination_measures(self, comp_mode): result = comp.run(inputs=inputs, execution_mode=comp_mode) np.testing.assert_allclose(result, [[0.43636140750487973, 0.47074475219780554]]) - if comp_mode is pnlvm.ExecutionMode.Python: + if comp_mode is pnl.ExecutionMode.Python: assert decision.num_executions.time_step == 1 assert decision.num_executions.pass_ == 2 assert decision.num_executions.trial== 1 From 3f51efb545c0d599a0024dd7137ef5803f11a80b Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 25 Oct 2024 19:00:13 -0400 Subject: [PATCH 357/410] tests: Drop unused import of core.llvm Signed-off-by: Jan Vesely --- tests/functions/test_combination.py | 4 +--- tests/functions/test_distance.py | 1 - tests/functions/test_distribution.py | 1 - tests/functions/test_fhn_integrator.py | 1 - tests/functions/test_identity.py | 1 - tests/functions/test_memory.py | 1 - tests/functions/test_optimization.py | 1 - tests/functions/test_selection.py | 1 - tests/functions/test_stability.py | 1 - tests/functions/test_user_defined_func.py | 2 -- tests/mechanisms/test_control_mechanism.py | 1 - tests/mechanisms/test_ddm_mechanism.py | 1 - tests/mechanisms/test_episodic_memory.py | 1 - tests/mechanisms/test_integrator_mechanism.py | 1 - tests/mechanisms/test_objective_mechanism.py | 1 - tests/mechanisms/test_processing_mechanism.py | 1 - tests/mechanisms/test_recurrent_transfer_mechanism.py | 1 - tests/ports/test_output_ports.py | 1 - 18 files changed, 1 insertion(+), 21 deletions(-) diff --git a/tests/functions/test_combination.py b/tests/functions/test_combination.py index cf83c64580e..3cccac7336b 100644 --- a/tests/functions/test_combination.py +++ b/tests/functions/test_combination.py @@ -2,8 +2,6 @@ import pytest import psyneulink as pnl -import psyneulink.core.llvm as pnlvm -from psyneulink import ParameterError class TestRearrange: @@ -210,7 +208,7 @@ def test_reduce_function(variable, operation, exponents, weights, scale, offset, weights=weights, scale=scale, offset=offset) - except ParameterError as e: + except pnl.ParameterError as e: if not np.isscalar(scale) and "scale must be a scalar" in str(e): pytest.xfail("vector scale is not supported") if not np.isscalar(offset) and "vector offset is not supported" in str(e): diff --git a/tests/functions/test_distance.py b/tests/functions/test_distance.py index e9af47d3444..c2b4e9d1b0d 100644 --- a/tests/functions/test_distance.py +++ b/tests/functions/test_distance.py @@ -1,5 +1,4 @@ import numpy as np -import psyneulink.core.llvm as pnlvm import psyneulink.core.components.functions as Functions import psyneulink.core.globals.keywords as kw import pytest diff --git a/tests/functions/test_distribution.py b/tests/functions/test_distribution.py index ce91bec9850..d677757f8b7 100644 --- a/tests/functions/test_distribution.py +++ b/tests/functions/test_distribution.py @@ -4,7 +4,6 @@ from packaging import version as pversion -import psyneulink.core.llvm as pnlvm import psyneulink.core.components.functions.nonstateful.distributionfunctions as Functions from psyneulink.core.globals.utilities import _SeededPhilox diff --git a/tests/functions/test_fhn_integrator.py b/tests/functions/test_fhn_integrator.py index e31c671e33f..2285638ba72 100644 --- a/tests/functions/test_fhn_integrator.py +++ b/tests/functions/test_fhn_integrator.py @@ -1,5 +1,4 @@ import numpy as np -import psyneulink.core.llvm as pnlvm import psyneulink.core.components.functions.stateful.integratorfunctions import pytest diff --git a/tests/functions/test_identity.py b/tests/functions/test_identity.py index 4884103a9cd..6830d3104dc 100644 --- a/tests/functions/test_identity.py +++ b/tests/functions/test_identity.py @@ -2,7 +2,6 @@ import pytest import psyneulink.core.components.functions.nonstateful.transferfunctions as Functions -import psyneulink.core.llvm as pnlvm @pytest.mark.function @pytest.mark.identity_function diff --git a/tests/functions/test_memory.py b/tests/functions/test_memory.py index 2173cb3cb2c..4b2ae3fabcc 100644 --- a/tests/functions/test_memory.py +++ b/tests/functions/test_memory.py @@ -4,7 +4,6 @@ import pytest import psyneulink.core.components.functions.stateful.memoryfunctions as Functions -import psyneulink.core.llvm as pnlvm from psyneulink import * from psyneulink.core.globals.utilities import _SeededPhilox, convert_all_elements_to_np_array diff --git a/tests/functions/test_optimization.py b/tests/functions/test_optimization.py index 6fa7ef51850..3cee6f2e059 100644 --- a/tests/functions/test_optimization.py +++ b/tests/functions/test_optimization.py @@ -1,5 +1,4 @@ import numpy as np -import psyneulink.core.llvm as pnlvm import psyneulink.core.components.functions.function as Function import psyneulink.core.components.functions.nonstateful.objectivefunctions as Functions import psyneulink.core.components.functions.nonstateful.optimizationfunctions as OPTFunctions diff --git a/tests/functions/test_selection.py b/tests/functions/test_selection.py index 06e81f38802..aa238af2bf1 100644 --- a/tests/functions/test_selection.py +++ b/tests/functions/test_selection.py @@ -3,7 +3,6 @@ import psyneulink.core.components.functions.nonstateful.selectionfunctions as Functions import psyneulink.core.globals.keywords as kw -import psyneulink.core.llvm as pnlvm from psyneulink.core.globals.utilities import _SeededPhilox np.random.seed(0) diff --git a/tests/functions/test_stability.py b/tests/functions/test_stability.py index 0f38e738d38..b69a601f1a4 100644 --- a/tests/functions/test_stability.py +++ b/tests/functions/test_stability.py @@ -1,6 +1,5 @@ import numpy as np -import psyneulink.core.llvm as pnlvm import psyneulink.core.components.functions.function as Function import psyneulink.core.components.functions.nonstateful.objectivefunctions as Functions import psyneulink.core.globals.keywords as kw diff --git a/tests/functions/test_user_defined_func.py b/tests/functions/test_user_defined_func.py index ac1b4d1fa74..8b5e10dd88a 100644 --- a/tests/functions/test_user_defined_func.py +++ b/tests/functions/test_user_defined_func.py @@ -9,8 +9,6 @@ from psyneulink.core.components.mechanisms.processing import TransferMechanism from psyneulink.core.compositions.composition import Composition -import psyneulink.core.llvm as pnlvm - # default val is same shape as expected output # we only use param1 and param2 to avoid automatic shape changes of the variable diff --git a/tests/mechanisms/test_control_mechanism.py b/tests/mechanisms/test_control_mechanism.py index b67e32cd9dd..340dae76db1 100644 --- a/tests/mechanisms/test_control_mechanism.py +++ b/tests/mechanisms/test_control_mechanism.py @@ -4,7 +4,6 @@ import pytest import psyneulink.core.components.functions.nonstateful.transferfunctions -import psyneulink.core.llvm as pnlvm class TestLCControlMechanism: diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index 5959fd96fd4..c8af9e9bf06 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -2,7 +2,6 @@ import pytest import psyneulink as pnl -import psyneulink.core.llvm as pnlvm from psyneulink.core.components.component import ComponentError from psyneulink.core.components.functions.nonstateful.distributionfunctions import DriftDiffusionAnalytical, NormalDist diff --git a/tests/mechanisms/test_episodic_memory.py b/tests/mechanisms/test_episodic_memory.py index 79ace446800..d96a918ffc8 100644 --- a/tests/mechanisms/test_episodic_memory.py +++ b/tests/mechanisms/test_episodic_memory.py @@ -1,7 +1,6 @@ import numpy as np import pytest -import psyneulink.core.llvm as pnlvm from psyneulink.core.components.functions.function import FunctionError from psyneulink.core.components.functions.stateful.memoryfunctions import DictionaryMemory, \ ContentAddressableMemory diff --git a/tests/mechanisms/test_integrator_mechanism.py b/tests/mechanisms/test_integrator_mechanism.py index 934f6b4db4b..414c6837f13 100644 --- a/tests/mechanisms/test_integrator_mechanism.py +++ b/tests/mechanisms/test_integrator_mechanism.py @@ -2,7 +2,6 @@ import pytest import psyneulink as pnl -import psyneulink.core.llvm as pnlvm from psyneulink.core.compositions.composition import Composition from psyneulink.core.components.functions.function import FunctionError diff --git a/tests/mechanisms/test_objective_mechanism.py b/tests/mechanisms/test_objective_mechanism.py index 4b49963856e..079f5191de2 100644 --- a/tests/mechanisms/test_objective_mechanism.py +++ b/tests/mechanisms/test_objective_mechanism.py @@ -1,7 +1,6 @@ import numpy as np import pytest -import psyneulink.core.llvm as pnlvm from psyneulink.core.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism VECTOR_SIZE=4 diff --git a/tests/mechanisms/test_processing_mechanism.py b/tests/mechanisms/test_processing_mechanism.py index b9a78c5abb2..e7716c5b870 100644 --- a/tests/mechanisms/test_processing_mechanism.py +++ b/tests/mechanisms/test_processing_mechanism.py @@ -1,7 +1,6 @@ import numpy as np import pytest -from psyneulink.core import llvm as pnlvm from psyneulink.core.components.functions.function import FunctionError from psyneulink.core.components.functions.nonstateful.selectionfunctions import ( ARG_MAX, ARG_MAX_ABS_INDICATOR, ARG_MAX_INDICATOR) diff --git a/tests/mechanisms/test_recurrent_transfer_mechanism.py b/tests/mechanisms/test_recurrent_transfer_mechanism.py index 469fed705a6..5dc162a19cb 100644 --- a/tests/mechanisms/test_recurrent_transfer_mechanism.py +++ b/tests/mechanisms/test_recurrent_transfer_mechanism.py @@ -2,7 +2,6 @@ import pytest import psyneulink as pnl -import psyneulink.core.llvm as pnlvm from psyneulink.core.compositions.composition import Composition from psyneulink.core.components.functions.nonstateful.combinationfunctions import Reduce diff --git a/tests/ports/test_output_ports.py b/tests/ports/test_output_ports.py index c330035353c..397d77b23c8 100644 --- a/tests/ports/test_output_ports.py +++ b/tests/ports/test_output_ports.py @@ -1,6 +1,5 @@ import numpy as np import psyneulink as pnl -import psyneulink.core.llvm as pnlvm import pytest From 89976749391e02926c347fff6e1aa3ad302169c4 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 25 Oct 2024 19:07:38 -0400 Subject: [PATCH 358/410] tests: Comment out imports only used in commented out code Signed-off-by: Jan Vesely --- tests/composition/test_composition.py | 2 +- tests/mechanisms/test_input_output_labels.py | 12 ++++++------ tests/models/test_greedy_agent.py | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index b9525494c24..3569f1386cd 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -45,7 +45,7 @@ from psyneulink.core.scheduling.scheduler import Scheduler, SchedulingMode from psyneulink.core.scheduling.time import TimeScale from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel -from psyneulink.library.components.mechanisms.processing.objective.comparatormechanism import ComparatorMechanism +#from psyneulink.library.components.mechanisms.processing.objective.comparatormechanism import ComparatorMechanism from psyneulink.library.components.mechanisms.modulatory.control.agt.lccontrolmechanism import LCControlMechanism from psyneulink.library.components.mechanisms.processing.transfer.recurrenttransfermechanism import \ RecurrentTransferMechanism diff --git a/tests/mechanisms/test_input_output_labels.py b/tests/mechanisms/test_input_output_labels.py index 714bf1d43d8..79cbb9f9584 100644 --- a/tests/mechanisms/test_input_output_labels.py +++ b/tests/mechanisms/test_input_output_labels.py @@ -1,10 +1,10 @@ -import numpy as np -import pytest +#import numpy as np +#import pytest -from psyneulink.core.compositions.composition import Composition -from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism -from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism -from psyneulink.core.globals.keywords import ENABLED, INPUT_LABELS_DICT, OUTPUT_LABELS_DICT +#from psyneulink.core.compositions.composition import Composition +#from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism +#from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism +#from psyneulink.core.globals.keywords import ENABLED, INPUT_LABELS_DICT, OUTPUT_LABELS_DICT # FIX 5/8/20 ELIMINATE SYSTEM [JDC] -- CONVERTED TO COMPOSITION, BUT REQUIRE REFACTORING OF LABEL HANDLING # class TestMechanismInputLabels: diff --git a/tests/models/test_greedy_agent.py b/tests/models/test_greedy_agent.py index f283bc21a43..45049892aa3 100644 --- a/tests/models/test_greedy_agent.py +++ b/tests/models/test_greedy_agent.py @@ -9,7 +9,7 @@ from psyneulink.core.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism -from psyneulink.core.components.ports.inputport import SHADOW_INPUTS +#from psyneulink.core.components.ports.inputport import SHADOW_INPUTS from psyneulink.core.components.ports.modulatorysignals.controlsignal import ControlSignal from psyneulink.core.compositions.composition import Composition, NodeRole from psyneulink.core.globals.keywords import VARIANCE, NORMED_L0_SIMILARITY From 5981a01a5e3444f9682ad6764c05e33b9c266c6c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 26 Oct 2024 09:17:44 -0400 Subject: [PATCH 359/410] tests: Drop unused imports Signed-off-by: Jan Vesely --- conftest.py | 1 - tests/composition/test_composition.py | 5 +---- tests/composition/test_control.py | 1 - tests/composition/test_emcomposition.py | 11 +---------- tests/composition/test_graph.py | 7 +------ tests/mechanisms/test_episodic_memory.py | 1 - tests/mechanisms/test_processing_mechanism.py | 2 -- 7 files changed, 3 insertions(+), 25 deletions(-) diff --git a/conftest.py b/conftest.py index d473349c7e6..22050caa6b5 100644 --- a/conftest.py +++ b/conftest.py @@ -5,7 +5,6 @@ import numpy as np import pytest import re -import sys import types import graph_scheduler as gs diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 3569f1386cd..1b9b188133c 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -9,8 +9,7 @@ import psyneulink as pnl from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination -from psyneulink.core.components.functions.nonstateful.learningfunctions import \ - LearningFunction, Reinforcement, BackPropagation, TDLearning +from psyneulink.core.components.functions.nonstateful.learningfunctions import Reinforcement, BackPropagation, TDLearning from psyneulink.core.components.functions.nonstateful.optimizationfunctions import GridSearch from psyneulink.core.components.functions.nonstateful.transferfunctions import \ Linear, Logistic, INTENSITY_COST_FCT_MULTIPLICATIVE_PARAM @@ -49,8 +48,6 @@ from psyneulink.library.components.mechanisms.modulatory.control.agt.lccontrolmechanism import LCControlMechanism from psyneulink.library.components.mechanisms.processing.transfer.recurrenttransfermechanism import \ RecurrentTransferMechanism -from psyneulink.library.components.mechanisms.processing.integrator.episodicmemorymechanism import \ - EpisodicMemoryMechanism from psyneulink.library.compositions.emcomposition import EMComposition logger = logging.getLogger(__name__) diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 04a512b15ee..44898e5db5d 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -5,7 +5,6 @@ import psyneulink as pnl from psyneulink.core.globals.keywords import ALLOCATION_SAMPLES, CONTROL, PROJECTIONS -from psyneulink.core.globals.log import LogCondition from psyneulink.core.globals.sampleiterator import SampleIterator, SampleIteratorError, SampleSpec from psyneulink.core.globals.utilities import _SeededPhilox from psyneulink.core.components.mechanisms.modulatory.control.optimizationcontrolmechanism import \ diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index 996c0af2760..f748cfd8c1b 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -1,22 +1,13 @@ -import logging -import timeit as timeit -import os import numpy as np import pytest import psyneulink as pnl -from psyneulink.core.globals.keywords import AUTO, CONTROL, ALL, MAX_VAL, MAX_INDICATOR, PROB +from psyneulink.core.globals.keywords import AUTO, CONTROL from psyneulink.core.components.mechanisms.mechanism import Mechanism from psyneulink.library.compositions.emcomposition import EMComposition, EMCompositionError -module_seed = 0 -np.random.seed(0) - -logger = logging.getLogger(__name__) - - # All tests are set to run. If you need to skip certain tests, # see http://doc.pytest.org/en/latest/skipping.html diff --git a/tests/composition/test_graph.py b/tests/composition/test_graph.py index e4386f8b71e..f6c3b7134cb 100644 --- a/tests/composition/test_graph.py +++ b/tests/composition/test_graph.py @@ -1,9 +1,4 @@ -import numpy as np -import pytest - -from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism -from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection -from psyneulink.core.compositions.composition import Composition, Graph, Vertex +from psyneulink.core.compositions.composition import Graph, Vertex class TestGraph: diff --git a/tests/mechanisms/test_episodic_memory.py b/tests/mechanisms/test_episodic_memory.py index d96a918ffc8..63ecb5f409b 100644 --- a/tests/mechanisms/test_episodic_memory.py +++ b/tests/mechanisms/test_episodic_memory.py @@ -4,7 +4,6 @@ from psyneulink.core.components.functions.function import FunctionError from psyneulink.core.components.functions.stateful.memoryfunctions import DictionaryMemory, \ ContentAddressableMemory -from psyneulink.core.components.functions.nonstateful.selectionfunctions import OneHot, ARG_MIN from psyneulink.library.components.mechanisms.processing.integrator.episodicmemorymechanism import \ EpisodicMemoryMechanism, EpisodicMemoryMechanismError diff --git a/tests/mechanisms/test_processing_mechanism.py b/tests/mechanisms/test_processing_mechanism.py index e7716c5b870..5c2c206eb1f 100644 --- a/tests/mechanisms/test_processing_mechanism.py +++ b/tests/mechanisms/test_processing_mechanism.py @@ -2,8 +2,6 @@ import pytest from psyneulink.core.components.functions.function import FunctionError -from psyneulink.core.components.functions.nonstateful.selectionfunctions import ( - ARG_MAX, ARG_MAX_ABS_INDICATOR, ARG_MAX_INDICATOR) from psyneulink.core.components.functions.nonstateful.learningfunctions import Hebbian, Reinforcement, TDLearning from psyneulink.core.components.functions.nonstateful.objectivefunctions import Distance from psyneulink.core.components.functions.nonstateful.distributionfunctions import NormalDist, ExponentialDist, \ From 04e9a595019ce54da6d667dacf52849c20be9a03 Mon Sep 17 00:00:00 2001 From: jdcpni Date: Sun, 27 Oct 2024 06:11:17 -0400 Subject: [PATCH 360/410] Refactor/emcomposition mechs and concatenate (#3084) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * • emcomposition.py - TransferMechanism -> ProcessingMechanism * • emcomposition.py concatenate_keys -> concatenate_queries --- .../EGO/Using EMComposition/DeclanParams.py | 4 +- .../EGO Model - CSW with Simple Integrator.py | 4 +- .../EGO/Using EMComposition/TestParams.py | 4 +- .../modulatory/learning/EMstoragemechanism.py | 8 +- .../library/compositions/emcomposition.py | 233 +++++++++--------- tests/composition/test_emcomposition.py | 26 +- 6 files changed, 137 insertions(+), 142 deletions(-) diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py index 4707ad758ef..16f881f7c06 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py @@ -52,8 +52,8 @@ def calc_prob(em_preds, test_ys): memory_capacity = ALL, # number of entries in EM memory; ALL=> match to number of stims memory_init = (0,.0001), # Initialize memory with random values in interval # memory_init = None, # Initialize with zeros - concatenate_keys = False, - # concatenate_keys = True, + concatenate_queries = False, + # concatenate_queries = True, # environment # curriculum_type = 'Interleaved', diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py index acb3189d810..aabdecfd655 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py @@ -209,7 +209,7 @@ def construct_model(model_name:str=model_params['name'], previous_state_retrieval_weight:Union[float,int]=model_params['state_weight'], context_retrieval_weight:Union[float,int]=model_params['context_weight'], normalize_field_weights = model_params['normalize_field_weights'], - concatenate_keys = model_params['concatenate_keys'], + concatenate_queries = model_params['concatenate_queries'], learn_field_weights = model_params['learn_field_weights'], memory_capacity = memory_capacity, memory_init=model_params['memory_init'], @@ -260,7 +260,7 @@ def construct_model(model_name:str=model_params['name'], context_retrieval_weight ), normalize_field_weights=normalize_field_weights, - concatenate_keys=concatenate_keys, + concatenate_queries=concatenate_queries, learn_field_weights=learn_field_weights, learning_rate=learning_rate, enable_learning=enable_learning, diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py index ae259795c1d..3b8caf6c0bc 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py @@ -18,8 +18,8 @@ memory_capacity = ALL, # number of entries in EM memory; ALL=> match to number of stims memory_init = (0,.0001), # Initialize memory with random values in interval # memory_init = None, # Initialize with zeros - concatenate_keys = False, - # concatenate_keys = True, + concatenate_queries = False, + # concatenate_queries = True, # environment # curriculum_type = 'Interleaved', diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py index 0e1c814782f..f9d296eef87 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/EMstoragemechanism.py @@ -282,7 +282,7 @@ class EMStorageMechanism(LearningMechanism): concatenation_node : OutputPort or Mechanism : default None specifies the `OutputPort` or `Mechanism` in which the `value ` of the `key fields - ` are concatenated (see `concatenate keys ` + ` are concatenated (see `concatenate keys ` for additional details). memory_matrix : List or 2d np.array : default None @@ -657,12 +657,12 @@ def _validate_params(self, request_set, target_set=None, context=None): f"the same number of items as its 'fields' arg ({len(fields)}).") num_keys = len([i for i in field_types if i==1]) - concatenate_keys = 'concatenation_node' in request_set and request_set['concatenation_node'] is not None + concatenate_queries = 'concatenation_node' in request_set and request_set['concatenation_node'] is not None # Ensure the number of learning_signals is equal to the number of fields + number of keys if LEARNING_SIGNALS in request_set: learning_signals = request_set[LEARNING_SIGNALS] - if concatenate_keys: + if concatenate_queries: num_match_fields = 1 else: num_match_fields = num_keys @@ -674,7 +674,7 @@ def _validate_params(self, request_set, target_set=None, context=None): # Ensure shape of learning_signals matches shapes of matrices for match nodes (i.e., either keys or concatenate) for i, learning_signal in enumerate(learning_signals[:num_match_fields]): learning_signal_shape = learning_signal.parameters.matrix._get(context).shape - if concatenate_keys: + if concatenate_queries: memory_matrix_field_shape = np.array([np.concatenate(row, dtype=object).flatten() for row in memory_matrix[:,0:num_keys]]).T.shape else: diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index 4f9ff7c0b7f..e0ef65c2443 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -7,12 +7,12 @@ # ********************************************* EMComposition ************************************************* +# # TODO: # - QUESTION: # - SHOULD differential of SoftmaxGainControl Node be included in learning? # - SHOULD MEMORY DECAY OCCUR IF STORAGE DOES NOT? CURRENTLY IT DOES NOT (SEE EMStorage Function) -# - FIX: NAMING # - FIX: Concatenation: # - LLVM for function and derivative # - Add Concatenate to pytorchcreator_function @@ -456,7 +456,7 @@ they sum to 1.0, and are used to weight the corresponding fields during retrieval (see `Weight fields `). If False, the raw values of the `field_weights ` are used to weight (i.e., multiply) the retrieved value of each field. This setting is ignored if **field_weights** - is None or `concatenate_keys ` is in effect. + is None or `concatenate_queries ` is in effect. .. _EMComposition_Field_Names: @@ -464,16 +464,16 @@ match the number of fields specified in the memory_template. If specified, the names are used to label the nodes of the EMComposition. If not specified, the fields are labeled generically as "Key 0", "Key 1", etc.. -.. _EMComposition_Concatenate_Keys: +.. _EMComposition_Concatenate_Queries: -* **concatenate_keys**: specifies whether keys are concatenated before a match is made to items in memory. +* **concatenate_queries**: specifies whether keys are concatenated before a match is made to items in memory. This is False by default. It is also ignored if the `field_weights ` for all keys are not all equal (i.e., all non-zero weights are not equal -- see `field_weights `) and/or - `normalize_memories ` is set to False. Setting concatenate_keys to True in either + `normalize_memories ` is set to False. Setting concatenate_queries to True in either of those cases issues a warning, and the setting is ignored. If the key `field_weights ` (i.e., all non-zero values) are all equal *and* **normalize_memories** is set to True, then setting - **concatenate_keys** causes a `concatenate_keys_node ` to be created that - receives input from all of the `query_input_nodes ` and passes them as a single + **concatenate_queries** causes a `concatenate_queries_node ` to be created + that receives input from all of the `query_input_nodes ` and passes them as a single vector to the `mactch_node `. .. note:: @@ -484,8 +484,8 @@ .. note:: All `query_input_nodes ` and `retrieved_nodes ` - are always preserved, even when `concatenate_keys ` is True, so that separate - inputs can be provided for each key, and the value of each key can be retrieved separately. + are always preserved, even when `concatenate_queries ` is True, so that + separate inputs can be provided for each key, and the value of each key can be retrieved separately. .. _EMComposition_Memory_Decay_Rate @@ -667,7 +667,7 @@ `field_weights ` are the same for all `keys ` and `normalize_memories ` is True, then the inputs provided to the `query_input_nodes ` are concatenated into a single vector (in the - `concatenate_keys_node `), which is passed to a single `match_node + `concatenate_queries_node `), which is passed to a single `match_node `. This may be more computationally efficient than passing each query through its own `match_node `, COMMENT: @@ -676,15 +676,13 @@ the same stimulus). COMMENT however it will not necessarily produce the same results as passing each query through its own `match_node - ` (see `concatenate keys <`concatenate_keys_node - >` for additional - information). + ` (see `concatenate keys <`concatenate_queries_node>` for additional information). * **Match memories by field**. The values of each `query_input_node ` (or the - `concatenate_keys_node ` if `concatenate_keys ` - attribute is True) are passed through a `MappingProjection` that computes the dot product of the input with each - memory for the corresponding field, the result of which is passed to the corresponding `match_node - `. + `concatenate_queries_node ` if `concatenate_queries + ` attribute is True) are passed through a `MappingProjection` that computes + the dot product of the input with each memory for the corresponding field, the result of which is passed to the + corresponding `match_node `. * **Softmax normalize matches over fields**. The dot product for each key field is passed from the `match_node ` to the corresponding `softmax_node `, which applies @@ -977,12 +975,12 @@ **Use of field_weights to specify relative contribution of fields to matching process.** -Note that in this case, the `concatenate_keys_node ` has been replaced by a -pair of `retreival_weighting_nodes `, one for each key field. This is because +Note that in this case, the `concatenate_queries_node ` has been replaced by +a pair of `retreival_weighting_nodes `, one for each key field. This is because the keys were assigned different weights; when they are assigned equal weights, or if no weights are specified, and `normalize_memories ` is `True`, then the keys are concatenated and are -concatenated for efficiency of processing. This can be suppressed by specifying `concatenate_keys` as `False` -(see `concatenate_keys ` for additional details). +concatenated for efficiency of processing. This can be suppressed by specifying `concatenate_queries` as `False` +(see `concatenate_queries ` for additional details). COMMENT .. _EMComposition_Class_Reference: @@ -993,11 +991,10 @@ import numpy as np import graph_scheduler as gs import warnings + import psyneulink.core.scheduling.condition as conditions from psyneulink._typing import Optional, Union - -# from psyneulink.library.compositions import torch_available from psyneulink.core.components.functions.nonstateful.transferfunctions import SoftMax, LinearMatrix from psyneulink.core.components.functions.nonstateful.combinationfunctions import Concatenate, LinearCombination from psyneulink.core.components.functions.nonstateful.selectionfunctions import ARG_MAX, ARG_MAX_INDICATOR @@ -1007,7 +1004,6 @@ from psyneulink.library.compositions.autodiffcomposition import AutodiffComposition, torch_available from psyneulink.library.components.mechanisms.modulatory.learning.EMstoragemechanism import EMStorageMechanism from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism -from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import ControlMechanism from psyneulink.core.components.mechanisms.modulatory.control.gating.gatingmechanism import GatingMechanism from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection @@ -1087,7 +1083,7 @@ class EMComposition(AutodiffComposition): field_weights=None, \ normalize_field_weights=True, \ field_names=None, \ - concatenate_keys=False, \ + concatenate_queries=False, \ normalize_memories=True, \ softmax_gain=THRESHOLD, \ storage_prob=1.0, \ @@ -1132,9 +1128,9 @@ class EMComposition(AutodiffComposition): specifies the optional names assigned to each field in the memory_template; see `field names ` for details. - concatenate_keys : bool : default False + concatenate_queries : bool : default False specifies whether to concatenate the keys into a single field before matching them to items in - the corresponding fields in memory; see `concatenate keys ` for details. + the corresponding fields in memory; see `concatenate keys ` for details. normalize_memories : bool : default True specifies whether keys and memories are normalized before computing their dot product (similarity); @@ -1229,9 +1225,9 @@ class EMComposition(AutodiffComposition): determines which names that can be used to label fields in `memory `; see `field_names ` for additional details. - concatenate_keys : bool + concatenate_queries : bool determines whether keys are concatenated into a single field before matching them to items in `memory - `; see `concatenate keys ` for additional details. + `; see `concatenate keys ` for additional details. normalize_memories : bool determines whether keys and memories are normalized before computing their dot product (similarity); @@ -1279,7 +1275,7 @@ class EMComposition(AutodiffComposition): .. _EMComposition_Nodes: - query_input_nodes : list[TransferMechanism] + query_input_nodes : list[ProcessingMechanism] `INPUT ` `Nodes ` that receive keys used to determine the item to be retrieved from `memory `, and then themselves stored in `memory ` (see `Match memories by field ` for additional details). @@ -1287,29 +1283,30 @@ class EMComposition(AutodiffComposition): however, if `field_names ` is specified, then the name of each query_input_node is assigned the corresponding field name appended with * [QUERY]*. - value_input_nodes : list[TransferMechanism] + value_input_nodes : list[ProcessingMechanism] `INPUT ` `Nodes ` that receive values to be stored in `memory `; these are not used in the matching process used for retrieval. By default these are assigned the name *VALUE_n_INPUT* where n is the field number (starting from 0); however, if `field_names ` is specified, then the name of each value_input_node is assigned the corresponding field name appended with * [VALUE]*. - input_nodes : list[TransferMechanism] + input_nodes : list[ProcessingMechanism] Full list of `INPUT ` `Nodes ` ordered with query_input_nodes first followed by value_input_nodes; used primarily for internal computations - input_nodes_by_fields : list[TransferMechanism] + input_nodes_by_fields : list[ProcessingMechanism] Full list of `INPUT ` `Nodes ` in the same order specified in the **field_names** argument of the constructor and in `self.field_names `. - concatenate_keys_node : TransferMechanism - `TransferMechanism` that concatenates the inputs to `query_input_nodes ` into a - single vector used for the matching processing if `concatenate keys ` is True. - This is not created if the **concatenate_keys** argument to the EMComposition's constructor is False or is - overridden (see `concatenate_keys `), or there is only one query_input_node. + concatenate_queries_node : ProcessingMechanism + `ProcessingMechanism` that concatenates the inputs to `query_input_nodes ` + into a single vector used for the matching processing if `concatenate keys ` + is True. This is not created if the **concatenate_queries** argument to the EMComposition's constructor is + False or is overridden (see `concatenate_queries `), or there is only one + query_input_node. - match_nodes : list[TransferMechanism] - `TransferMechanisms ` that receive the dot product of each key and those stored in + match_nodes : list[ProcessingMechanism] + `ProcessingMechanisms ` that receive the dot product of each key and those stored in the corresponding field of `memory ` (see `Match memories by field ` for additional details). These are assigned names that prepend *MATCH_n* to the name of the corresponding `query_input_nodes `. @@ -1320,8 +1317,8 @@ class EMComposition(AutodiffComposition): `softmax_gain ` is specified as *CONTROL* (see `softmax_gain ` for details). - softmax_nodes : list[TransferMechanism] - `TransferMechanisms ` that compute the softmax over the vectors received + softmax_nodes : list[ProcessingMechanism] + `ProcessingMechanisms ` that compute the softmax over the vectors received from the corresponding `match_nodes ` (see `Softmax normalize matches over fields ` for additional details). @@ -1350,8 +1347,8 @@ class EMComposition(AutodiffComposition): only if `use_gating_for_weighting ` is True and more than one `key field ` is specified (see `Fields ` for additional details). - combined_softmax_node : TransferMechanism - `TransferMechanism` that receives the softmax normalized dot products of the keys and memories + combined_softmax_node : ProcessingMechanism + `ProcessingMechanism` that receives the softmax normalized dot products of the keys and memories from the `softmax_nodes `, weighted by the `field_weights_nodes ` if more than one `key field ` is specified (or `retrieval_gating_nodes ` if `use_gating_for_weighting @@ -1359,8 +1356,8 @@ class EMComposition(AutodiffComposition): retrieve the corresponding memory for each field from `memory ` (see `Retrieve values by field ` for additional details). - retrieved_nodes : list[TransferMechanism] - `TransferMechanisms ` that receive the vector retrieved for each field in `memory + retrieved_nodes : list[ProcessingMechanism] + `ProcessingMechanisms ` that receive the vector retrieved for each field in `memory ` (see `Retrieve values by field ` for additional details); these are assigned the same names as the `query_input_nodes ` and `value_input_nodes ` to which they correspond appended with the suffix @@ -1391,8 +1388,8 @@ class Parameters(AutodiffComposition.Parameters): Attributes ---------- - concatenate_keys - see `concatenate_keys ` + concatenate_queries + see `concatenate_queries ` :default value: False :type: ``bool`` @@ -1496,7 +1493,7 @@ class Parameters(AutodiffComposition.Parameters): field_weights = Parameter(None) normalize_field_weights = Parameter(True) field_names = Parameter(None, structural=True) - concatenate_keys = Parameter(False, structural=True) + concatenate_queries = Parameter(False, structural=True) normalize_memories = Parameter(True) softmax_gain = Parameter(1.0, modulable=True) softmax_threshold = Parameter(.001, modulable=True, specify_none=True) @@ -1574,7 +1571,7 @@ def __init__(self, field_names:Optional[list]=None, field_weights:tuple=None, normalize_field_weights:bool=True, - concatenate_keys:bool=False, + concatenate_queries:bool=False, normalize_memories:bool=True, softmax_gain:Union[float, ADAPTIVE, CONTROL]=1.0, softmax_threshold:Optional[float]=.001, @@ -1599,13 +1596,13 @@ def __init__(self, memory_capacity, memory_fill, field_weights) - field_weights, field_names, concatenate_keys = self._parse_fields(field_weights, - normalize_field_weights, - field_names, - concatenate_keys, - normalize_memories, - learning_rate, - name) + field_weights, field_names, concatenate_queries = self._parse_fields(field_weights, + normalize_field_weights, + field_names, + concatenate_queries, + normalize_memories, + learning_rate, + name) if memory_decay_rate is AUTO: memory_decay_rate = 1 / memory_capacity @@ -1621,7 +1618,7 @@ def __init__(self, memory_capacity = memory_capacity, field_weights = field_weights, field_names = field_names, - concatenate_keys = concatenate_keys, + concatenate_queries = concatenate_queries, softmax_gain = softmax_gain, softmax_threshold = softmax_threshold, softmax_choice = softmax_choice, @@ -1641,7 +1638,7 @@ def __init__(self, self._construct_pathways(self.memory_template, self.memory_capacity, self.field_weights, - self.concatenate_keys, + self.concatenate_queries, self.normalize_memories, self.softmax_gain, self.softmax_threshold, @@ -1701,10 +1698,8 @@ def __init__(self, # Suppress warnings for no efferent Projections for node in self.value_input_nodes: - node.output_ports['RESULT'].parameters.require_projection_in_composition.set(False, override=True) - for port in self.combined_softmax_node.output_ports: - if 'RESULT' in port.name: - port.parameters.require_projection_in_composition.set(False, override=True) + node.output_port.parameters.require_projection_in_composition.set(False, override=True) + self.combined_softmax_node.output_port.parameters.require_projection_in_composition.set(False, override=True) # Suppress field_weight_nodes as INPUT nodes of the Composition for node in self.field_weight_nodes: @@ -1873,7 +1868,7 @@ def _parse_fields(self, field_weights, normalize_field_weights, field_names, - concatenate_keys, + concatenate_queries, normalize_memories, learning_rate, name): @@ -1923,14 +1918,14 @@ def _parse_fields(self, self.value_names = [f'{i} [VALUE]' for i in range(self.num_values)] if self.num_values > 1 else ['VALUE'] parsed_field_names = self.key_names + self.value_names - user_specified_concatenate_keys = concatenate_keys or False - parsed_concatenate_keys = (user_specified_concatenate_keys + user_specified_concatenate_queries = concatenate_queries or False + parsed_concatenate_queries = (user_specified_concatenate_queries and self.num_keys > 1 and np.all(keys_weights == keys_weights[0]) and normalize_memories) - # if concatenate_keys was forced to be False when user specified it as True, issue warning - if user_specified_concatenate_keys and not parsed_concatenate_keys: - # Issue warning if concatenate_keys is True but either + # if concatenate_queries was forced to be False when user specified it as True, issue warning + if user_specified_concatenate_queries and not parsed_concatenate_queries: + # Issue warning if concatenate_queries is True but either # field weights are not all equal and/or normalize_memories is False fw_error_msg = nm_error_msg = fw_correction_msg = nm_correction_msg = None if not all(np.all(keys_weights[i] == keys_weights[0] for i in range(len(keys_weights)))): @@ -1945,11 +1940,11 @@ def _parse_fields(self, else: error_msg = fw_error_msg or nm_error_msg correction_msg = fw_correction_msg or nm_correction_msg - warnings.warn(f"The 'concatenate_keys' arg for '{name}' is True but {error_msg}; " + warnings.warn(f"The 'concatenate_queries' arg for '{name}' is True but {error_msg}; " f"concatenation will be ignored. To use concatenation, {correction_msg}.") self.learning_rate = learning_rate - return parsed_field_weights, parsed_field_names, parsed_concatenate_keys + return parsed_field_weights, parsed_field_names, parsed_concatenate_queries def _parse_memory_shape(self, memory_template): """Parse shape of memory_template to determine number of entries and fields""" @@ -1975,7 +1970,7 @@ def _construct_pathways(self, memory_template, memory_capacity, field_weights, - concatenate_keys, + concatenate_queries, normalize_memories, softmax_gain, softmax_threshold, @@ -1991,7 +1986,7 @@ def _construct_pathways(self, # Construct Nodes -------------------------------------------------------------------------------- - field_weighting = len([weight for weight in field_weights if weight]) > 1 and not concatenate_keys + field_weighting = len([weight for weight in field_weights if weight]) > 1 and not concatenate_queries # First, construct Nodes of Composition with their Projections self.query_input_nodes = self._construct_query_input_nodes(field_weights) @@ -2006,16 +2001,16 @@ def _construct_pathways(self, self.input_nodes_by_fields[self.value_indices[i]] = self.value_input_nodes[i] assert all(self.input_nodes_by_fields), "PROGRAM ERROR: input_nodes_by_fields not fully populated." - self.concatenate_keys_node = self._construct_concatenate_keys_node(concatenate_keys) + self.concatenate_queries_node = self._construct_concatenate_queries_node(concatenate_queries) self.match_nodes = self._construct_match_nodes(memory_template, memory_capacity, - concatenate_keys,normalize_memories) + concatenate_queries,normalize_memories) self.softmax_nodes = self._construct_softmax_nodes(memory_capacity, field_weights, softmax_gain, softmax_threshold, softmax_choice) self.field_weight_nodes = self._construct_field_weight_nodes(field_weights, - concatenate_keys, + concatenate_queries, use_gating_for_weighting) self.weighted_softmax_nodes = self._construct_weighted_softmax_nodes(memory_capacity, use_gating_for_weighting) self.softmax_gain_control_nodes = self._construct_softmax_gain_control_nodes(softmax_gain) @@ -2026,30 +2021,30 @@ def _construct_pathways(self, if use_storage_node: self.storage_node = self._construct_storage_node(memory_template, field_weights, - self.concatenate_keys_node, + self.concatenate_queries_node, memory_decay_rate, storage_prob) - # Do some validation and get singleton Nodes for concatenated keys - if self.concatenate_keys: + # Do some validation and get singleton softmax and match Nodes for concatenated queries + if self.concatenate_queries: softmax_node = self.softmax_nodes.pop() assert not self.softmax_nodes, \ - f"PROGRAM ERROR: Too many softmax_nodes ({len(self.softmax_nodes)}) for concatenated keys." + f"PROGRAM ERROR: Too many softmax_nodes ({len(self.softmax_nodes)}) for concatenated queries." assert len(self.softmax_gain_control_nodes) <= 1, \ (f"PROGRAM ERROR: Too many softmax_gain_control_nodes " - f"{len(self.softmax_gain_control_nodes)}) for concatenated keys.") + f"{len(self.softmax_gain_control_nodes)}) for concatenated queries.") match_node = self.match_nodes.pop() - assert not self.softmax_nodes, \ - f"PROGRAM ERROR: Too many match_nodes ({len(self.match_nodes)}) for concatenated keys." + assert not self.match_nodes, \ + f"PROGRAM ERROR: Too many match_nodes ({len(self.match_nodes)}) for concatenated queries." assert not self.field_weight_nodes, \ - f"PROGRAM ERROR: There should be no field_weight_nodes for concatenated keys." + f"PROGRAM ERROR: There should be no field_weight_nodes for concatenated queries." # Construct Pathways -------------------------------------------------------------------------------- # Set up pathways WITHOUT PsyNeuLink learning pathways if not self.enable_learning: self.add_nodes(self.query_input_nodes + self.value_input_nodes) - if self.concatenate_keys: - self.add_nodes([self.concatenate_keys_node, match_node, softmax_node]) + if self.concatenate_queries: + self.add_nodes([self.concatenate_queries_node, match_node, softmax_node]) else: self.add_nodes(self.match_nodes + self.softmax_nodes + @@ -2068,7 +2063,7 @@ def _construct_pathways(self, # Key pathways for i in range(self.num_keys): # Regular pathways - if not self.concatenate_keys: + if not self.concatenate_queries: pathway = [self.query_input_nodes[i], self.match_nodes[i], self.softmax_nodes[i], @@ -2080,7 +2075,7 @@ def _construct_pathways(self, # Key-concatenated pathways else: pathway = [self.query_input_nodes[i], - self.concatenate_keys_node, + self.concatenate_queries_node, match_node, softmax_node, self.combined_softmax_node] @@ -2122,10 +2117,10 @@ def _construct_query_input_nodes(self, field_weights)->list: f"PROGRAM ERROR: number of keys ({self.num_keys}) does not match number of " \ f"non-zero values in field_weights ({len(self.key_indices)})." - # query_input_nodes = [TransferMechanism(size=len(self.entry_template[self.key_indices[i]]), + # query_input_nodes = [ProcessingMechanism(size=len(self.entry_template[self.key_indices[i]]), # name=f'{self.key_names[self.key_indices[i]]} [QUERY]') # for i in range(self.num_keys)] - query_input_nodes = [TransferMechanism(size=len(self.entry_template[self.key_indices[i]]), + query_input_nodes = [ProcessingMechanism(size=len(self.entry_template[self.key_indices[i]]), name=f'{self.key_names[i]} [QUERY]') for i in range(self.num_keys)] @@ -2144,22 +2139,22 @@ def _construct_value_input_nodes(self, field_weights)->list: f"PROGRAM ERROR: number of values ({self.num_values}) does not match number of " \ f"non-zero values in field_weights ({len(value_indices)})." - value_input_nodes = [TransferMechanism(size=len(self.entry_template[value_indices[i]]), + value_input_nodes = [ProcessingMechanism(size=len(self.entry_template[value_indices[i]]), name= f'{self.value_names[i]} [VALUE]') for i in range(self.num_values)] return value_input_nodes - def _construct_concatenate_keys_node(self, concatenate_keys)->ProcessingMechanism: + def _construct_concatenate_queries_node(self, concatenate_queries)->ProcessingMechanism: """Create node that concatenates the inputs for all keys into a single vector Used to create a matrix for Projectoin from match / memory weights from concatenate_node -> match_node """ # One node that concatenates inputs from all keys - if not concatenate_keys: + if not concatenate_queries: return None else: return ProcessingMechanism(function=Concatenate, - input_ports=[{NAME: 'CONCATENATE_KEYS', + input_ports=[{NAME: 'CONCATENATE_QUERIES', SIZE: len(self.query_input_nodes[i].output_port.value), PROJECTIONS: MappingProjection( name=f'{self.key_names[i]} to CONCATENATE', @@ -2168,17 +2163,17 @@ def _construct_concatenate_keys_node(self, concatenate_keys)->ProcessingMechanis for i in range(self.num_keys)], name='CONCATENATE KEYS') - def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_keys, normalize_memories)->list: + def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_queries, normalize_memories)->list: """Create nodes that, for each key field, compute the similarity between the input and each item in memory. - - If self.concatenate_keys is True, then all inputs for keys from concatenated_keys_node are assigned a single - match_node, and weights from memory_template are assigned to a Projection from concatenated_keys_node to - that match_node. + - If self.concatenate_queries is True, then all inputs for keys from concatenated_keys_node are + assigned a single match_node, and weights from memory_template are assigned to a Projection + from concatenated_keys_node to that match_node. - Otherwise, each key has its own match_node, and weights from memory_template are assigned to a Projection from each query_input_node[i] to each match_node[i]. - Each element of the output represents the similarity between the query_input and one key in memory. """ - if concatenate_keys: + if concatenate_queries: # Get fields of memory structure corresponding to the keys # Number of rows should total number of elements over all keys, # and columns should number of items in memory @@ -2186,10 +2181,10 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_k for i in range(memory_capacity)]).transpose() matrix = np.array(matrix.tolist()) match_nodes = [ - TransferMechanism( + ProcessingMechanism( input_ports={NAME: 'CONCATENATED_INPUTS', SIZE: memory_capacity, - PROJECTIONS: MappingProjection(sender=self.concatenate_keys_node, + PROJECTIONS: MappingProjection(sender=self.concatenate_queries_node, matrix=matrix, function=LinearMatrix( normalize=normalize_memories), @@ -2199,7 +2194,7 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_k # One node for each key else: match_nodes = [ - TransferMechanism( + ProcessingMechanism( input_ports= { SIZE:memory_capacity, PROJECTIONS: MappingProjection(sender=self.query_input_nodes[i].output_port, @@ -2238,7 +2233,7 @@ def _construct_softmax_nodes(self, memory_capacity, field_weights, # ARG_MAX_INDICATOR returns the entry unmodified softmax_choice = ARG_MAX_INDICATOR - softmax_nodes = [TransferMechanism(input_ports={SIZE:memory_capacity, + softmax_nodes = [ProcessingMechanism(input_ports={SIZE:memory_capacity, PROJECTIONS: MappingProjection( sender=match_node.output_port, matrix=IDENTITY_MATRIX, @@ -2267,12 +2262,12 @@ def _construct_softmax_gain_control_nodes(self, softmax_gain)->list: return softmax_gain_control_nodes - def _construct_field_weight_nodes(self, field_weights, concatenate_keys, use_gating_for_weighting)->list: + def _construct_field_weight_nodes(self, field_weights, concatenate_queries, use_gating_for_weighting)->list: """Create ProcessingMechanisms that weight each key's softmax contribution to the retrieved values.""" field_weight_nodes = [] - if not concatenate_keys and self.num_keys > 1: + if not concatenate_queries and self.num_keys > 1: if use_gating_for_weighting: field_weight_nodes = [GatingMechanism(input_ports={VARIABLE: np.array(field_weights[i]), PARAMS:{DEFAULT_INPUT: DEFAULT_VARIABLE}, @@ -2347,7 +2342,7 @@ def _construct_retrieved_nodes(self, memory_template)->list: """Create nodes that report the value field(s) for the item(s) matched in memory. """ self.retrieved_key_nodes = \ - [TransferMechanism(input_ports={SIZE: len(self.query_input_nodes[i].variable[0]), + [ProcessingMechanism(input_ports={SIZE: len(self.query_input_nodes[i].variable[0]), PROJECTIONS: MappingProjection( sender=self.combined_softmax_node, @@ -2358,7 +2353,7 @@ def _construct_retrieved_nodes(self, memory_template)->list: for i in range(self.num_keys)] self.retrieved_value_nodes = \ - [TransferMechanism(input_ports={SIZE: len(self.value_input_nodes[i].variable[0]), + [ProcessingMechanism(input_ports={SIZE: len(self.value_input_nodes[i].variable[0]), PROJECTIONS: MappingProjection( sender=self.combined_softmax_node, @@ -2379,7 +2374,7 @@ def _construct_retrieved_nodes(self, memory_template)->list: def _construct_storage_node(self, memory_template, field_weights, - concatenate_keys_node, + concatenate_queries_node, memory_decay_rate, storage_prob)->list: """Create EMStorageMechanism that stores the key and value inputs in memory. @@ -2396,8 +2391,8 @@ def _construct_storage_node(self, - **field_types** -- a list of the same length as ``fields``, containing 1's for key fields and 0's for value fields; - - **concatenate_keys_node** -- node used to concatenate keys (if `concatenate_keys - ` is `True`) or None; + - **concatenate_queries_node** -- node used to concatenate keys + (if `concatenate_queries ` is `True`) or None; - **memory_matrix** -- `memory_template `); @@ -2418,7 +2413,7 @@ def _construct_storage_node(self, for i in range(self.num_fields)], fields=[self.input_nodes[i] for i in range(self.num_fields)], field_types=[0 if weight == 0 else 1 for weight in field_weights], - concatenation_node=concatenate_keys_node, + concatenation_node=concatenate_queries_node, memory_matrix=memory_template, learning_signals=learning_signals, storage_prob=storage_prob, @@ -2489,19 +2484,19 @@ def _encode_memory(self, context=None): row_norms = np.sum(field_norms, axis=1) idx_of_min = np.argmin(row_norms) - # If concatenate_keys is True, assign entry to col of matrix for Projection from concatenate_node to match_node - if self.concatenate_keys_node: - # Get entry to store from concatenate_keys_node - entry_to_store = self.concatenate_keys_node.value[0] + # If concatenate_queries=True, assign entry to col of matrix for Projection from concatenate_node to match_node + if self.concatenate_queries_node: + # Get entry to store from concatenate_queries_node + entry_to_store = self.concatenate_queries_node.value[0] # Get matrix of weights for Projection from concatenate_node to match_node - field_memories = self.concatenate_keys_node.efferents[0].parameters.matrix.get(context) + field_memories = self.concatenate_queries_node.efferents[0].parameters.matrix.get(context) # Decay existing memories before storage if memory_decay_rate is specified if self.memory_decay_rate: field_memories *= self.parameters.memory_decay_rate._get(context) # Assign input vector to col of matrix that has lowest norm (i.e., weakest memory) field_memories[:,idx_of_min] = np.array(entry_to_store) # Assign updated matrix to Projection - self.concatenate_keys_node.efferents[0].parameters.matrix.set(field_memories, context) + self.concatenate_queries_node.efferents[0].parameters.matrix.set(field_memories, context) # Otherwise, assign input for each key field to col of matrix for Projection from query_input_node to match_node else: @@ -2570,8 +2565,8 @@ def _identify_target_nodes(self, context)->list: return target_nodes def infer_backpropagation_learning_pathways(self, execution_mode, context=None): - if self.concatenate_keys: - raise EMCompositionError(f"EMComposition does not support learning with 'concatenate_keys'=True.") + if self.concatenate_queries: + raise EMCompositionError(f"EMComposition does not support learning with 'concatenate_queries'=True.") super().infer_backpropagation_learning_pathways(execution_mode, context=context) def do_gradient_optimization(self, retain_in_pnl_options, context, optimization_num=None): diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index f748cfd8c1b..3c90676b990 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -75,7 +75,7 @@ def test_two_calls_no_args(self): [[0,2],[0,0,0],[0,0]]], .1, [1,1,0], None, None, None, 2, 3, 2, 1, False,), (12.3, [[[0,1],[0,0,0],[0,0]], # two entries specified, fields have same weights, but concatenate is False [[0,2],[0,0,0],[0,0]]], .1, [1,1,0], None, None, None, 2, 3, 2, 1, False), - (13, [[[0,1],[0,0,0],[0,0]], # two entries specified, fields have same weights, and concatenate_keys is True + (13, [[[0,1],[0,0,0],[0,0]], # two entries specified, fields have same weights, and concatenate_queries is True [[0,2],[0,0,0],[0,0]]], .1, [1,1,0], True, None, None, 2, 3, 2, 1, True), (14, [[[0,1],[0,0,0],[0,0]], # two entries specified, all fields are keys [[0,2],[0,0,0],[0,0]]], .1, [1,1,1], None, None, None, 2, 3, 3, 0, False), @@ -91,7 +91,7 @@ def test_two_calls_no_args(self): [[0,3],[0,0,0],[0,0]], [[0,4],[0,0,0],[0,0]]], .1, [1,2,0], None, None, None, 4, 3, 2, 1, False), ] - args_names = "test_num, memory_template, memory_fill, field_weights, concatenate_keys, normalize_memories, " \ + args_names = "test_num, memory_template, memory_fill, field_weights, concatenate_queries, normalize_memories, " \ "softmax_gain, repeat, num_fields, num_keys, num_values, concatenate_node" @pytest.mark.parametrize(args_names, test_structure_data, @@ -104,7 +104,7 @@ def test_structure(self, memory_template, memory_fill, field_weights, - concatenate_keys, + concatenate_queries, normalize_memories, softmax_gain, repeat, @@ -133,8 +133,8 @@ def test_structure(self, params.update({'memory_fill': memory_fill}) if field_weights is not None: params.update({'field_weights': field_weights}) - if concatenate_keys is not None: - params.update({'concatenate_keys': concatenate_keys}) + if concatenate_queries is not None: + params.update({'concatenate_queries': concatenate_queries}) # FIX: DELETE THE FOLLOWING ONCE CONCATENATION IS IMPLEMENTED FOR LEARNING params.update({'enable_learning': False}) if normalize_memories is not None: @@ -182,8 +182,8 @@ def test_structure(self, # Validate node structure assert len(em.query_input_nodes) == num_keys assert len(em.value_input_nodes) == num_values - assert isinstance(em.concatenate_keys_node, Mechanism) == concatenate_node - if em.concatenate_keys: + assert isinstance(em.concatenate_queries_node, Mechanism) == concatenate_node + if em.concatenate_queries: assert em.field_weight_nodes == [] assert bool(softmax_gain == CONTROL) == bool(len(em.softmax_gain_control_nodes)) else: @@ -353,7 +353,7 @@ class TestExecution: ] args_names = "test_num, memory_template, memory_fill, memory_capacity, memory_decay_rate, field_weights, " \ - "concatenate_keys, normalize_memories, softmax_gain, storage_prob, inputs, expected_retrieval" + "concatenate_queries, normalize_memories, softmax_gain, storage_prob, inputs, expected_retrieval" @pytest.mark.parametrize(args_names, test_execution_data, ids=[x[0] for x in test_execution_data]) @@ -369,7 +369,7 @@ def test_simple_execution_witemhout_learning(self, memory_fill, memory_decay_rate, field_weights, - concatenate_keys, + concatenate_queries, normalize_memories, softmax_gain, storage_prob, @@ -395,8 +395,8 @@ def test_simple_execution_witemhout_learning(self, params.update({'memory_decay_rate': memory_decay_rate}) if field_weights is not None: params.update({'field_weights': field_weights}) - if concatenate_keys is not None: - params.update({'concatenate_keys': concatenate_keys}) + if concatenate_queries is not None: + params.update({'concatenate_queries': concatenate_queries}) # FIX: DELETE THE FOLLOWING ONCE CONCATENATION IS IMPLEMENTED FOR LEARNING params.update({'enable_learning': False}) if normalize_memories is not None: @@ -474,7 +474,7 @@ def test_multiple_trials_concatenation_and_storage_node(self, exec_mode, concate memory_capacity=4, softmax_gain=100, memory_fill=(0,.001), - concatenate_keys=concatenate, + concatenate_queries=concatenate, enable_learning=learning, use_storage_node=use_storage_node) @@ -498,7 +498,7 @@ def test_multiple_trials_concatenation_and_storage_node(self, exec_mode, concate if concatenate: with pytest.raises(EMCompositionError) as error: em.learn(inputs=inputs, execution_mode=exec_mode) - assert "EMComposition does not support learning with 'concatenate_keys'=True." in str(error.value) + assert "EMComposition does not support learning with 'concatenate_queries'=True." in str(error.value) else: # if exec_mode == pnl.ExecutionMode.Python: From 529c967cbce7755597265290858f520a0f179829 Mon Sep 17 00:00:00 2001 From: jdcpni Date: Mon, 28 Oct 2024 10:28:17 -0400 Subject: [PATCH 361/410] Fix/emcomposition fieldweights assignment (#3086) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * • emcomposition.py - _parse_fields: fix bug to properly assign field_weights using self.key_indices - add _validate_options_with_learning() that checks compatibility of softmax_choice and normalize_field_weights with learning - learn(): raise exception if normalize_fields_weights is False and loss_spec is BINARY_CROSS_ENTROPY - docstring mods • test_emcomposition.py - test_normalize_field_weights_with_learning_enabled() --- .../EGO/Using EMComposition/DeclanParams.py | 4 +- .../EGO/Using EMComposition/ScriptControl.py | 24 ++-- .../EGO/Using EMComposition/TestParams.py | 11 +- .../nonstateful/learningfunctions.py | 2 +- psyneulink/core/compositions/composition.py | 4 +- .../parameterestimationcomposition.py | 2 +- .../library/compositions/emcomposition.py | 122 +++++++++++------- tests/composition/test_emcomposition.py | 18 ++- 8 files changed, 114 insertions(+), 73 deletions(-) diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py index 16f881f7c06..9f5f652b28a 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py @@ -67,8 +67,8 @@ def calc_prob(em_preds, test_ys): # context_weight = 1, # weight of the context used during memory retrieval state_weight = .5, # weight of the state used during memory retrieval context_weight = .5, # weight of the context used during memory retrieval - normalize_field_weights = False, # whether to normalize the field weights during memory retrieval - # normalize_field_weights = True, # whether to normalize the field weights during memory retrieval + # normalize_field_weights = False, # whether to normalize the field weights during memory retrieval + normalize_field_weights = True, # whether to normalize the field weights during memory retrieval # softmax_temperature = None, # temperature of the softmax used during memory retrieval (smaller means more argmax-like softmax_temperature = .1, # temperature of the softmax used during memory retrieval (smaller means more argmax-like # softmax_temperature = ADAPTIVE, # temperature of the softmax used during memory retrieval (smaller means more argmax-like diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py index d448f79c29e..04027649aa3 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py @@ -2,22 +2,22 @@ # Settings for running script: -MODEL_PARAMS = 'TestParams' -# MODEL_PARAMS = 'DeclanParams' +# MODEL_PARAMS = 'TestParams' +MODEL_PARAMS = 'DeclanParams' CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script DISPLAY_MODEL = ( # Only one of the following can be uncommented: - # None # suppress display of model - { # show simple visual display of model - # 'show_pytorch': True, # show pytorch graph of model - 'show_learning': True - # 'show_projections_not_in_composition': True, - # 'exclude_from_gradient_calc_style': 'dashed'# show target mechanisms for learning - # {'show_node_structure': True # show detailed view of node structures and projections - } + None # suppress display of model + # { # show simple visual display of model + # # 'show_pytorch': True, # show pytorch graph of model + # 'show_learning': True + # # 'show_projections_not_in_composition': True, + # # 'exclude_from_gradient_calc_style': 'dashed'# show target mechanisms for learning + # # {'show_node_structure': True # show detailed view of node structures and projections + # } ) -RUN_MODEL = False # False => don't run the model -# RUN_MODEL = True, # True => run the model +# RUN_MODEL = False # False => don't run the model +RUN_MODEL = True, # True => run the model # REPORT_OUTPUT = ReportOutput.FULL # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] REPORT_OUTPUT = ReportOutput.OFF # Sets console output during run [ReportOutput.ON, .TERSE OR .FULL] REPORT_PROGRESS = ReportProgress.OFF # Sets console progress bar during run diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py index 3b8caf6c0bc..2c9bb768d2a 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/TestParams.py @@ -31,8 +31,8 @@ integration_rate = .69, # rate at which state is integrated into new context state_weight = 1, # weight of the state used during memory retrieval context_weight = 1, # weight of the context used during memory retrieval - normalize_field_weights = False, # whether to normalize the field weights during memory retrieval - # normalize_field_weights = True, # whether to normalize the field weights during memory retrieval + # normalize_field_weights = False, # whether to normalize the field weights during memory retrieval + normalize_field_weights = True, # whether to normalize the field weights during memory retrieval # softmax_temperature = None, # temperature of the softmax used during memory retrieval (smaller means more argmax-like softmax_temperature = .1, # temperature of the softmax used during memory retrieval (smaller means more argmax-like # softmax_temperature = ADAPTIVE, # temperature of the softmax used during memory retrieval (smaller means more argmax-like @@ -40,10 +40,13 @@ # softmax_threshold = None, # threshold used to mask out small values in softmax softmax_threshold = .001, # threshold used to mask out small values in softmax enable_learning=[True, False, False], # Enable learning for PREDICTION (STATE) but not CONTEXT or PREVIOUS STATE - # enable_learning=[True, True, True], # Enable learning for PREDICTION (STATE) but not CONTEXT or PREVIOUS STATE + # enable_learning=[True, True, True] + # enable_learning=True, # enable_learning=False, - learn_field_weights = False, + learn_field_weights = True, + # learn_field_weights = False, loss_spec = Loss.BINARY_CROSS_ENTROPY, + # loss_spec = Loss.CROSS_ENTROPY, # loss_spec = Loss.MSE, learning_rate = .5, num_optimization_steps = 10, diff --git a/psyneulink/core/components/functions/nonstateful/learningfunctions.py b/psyneulink/core/components/functions/nonstateful/learningfunctions.py index 96b4b3c3085..c8f4d6ea349 100644 --- a/psyneulink/core/components/functions/nonstateful/learningfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/learningfunctions.py @@ -1014,7 +1014,7 @@ class Kohonen(LearningFunction): # -------------------------------------------- and :math:`w_j` is the column of the matrix in `variable `\\[2] that corresponds to the jth element of the activity array in `variable `\\[1]. - .. _note:: + .. note:: the array of activities in `variable `\\[1] is assumed to have been generated by the dot product of the input pattern in `variable `\\[0] and the matrix in `variable `\\[2], and thus the element with the greatest value in `variable `\\[1] diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index c10706330d0..98116b80c70 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -5297,7 +5297,7 @@ def _determine_node_roles(self, context=None): ORIGIN: - all Nodes that are in first consideration_set (i.e., self.scheduler.consideration_queue[0]). - .. _note:: + .. note:: - this takes account of any Projections designated as feedback by graph_processing (i.e., self.graph.comp_to_vertex[efferent].feedback == EdgeType.FEEDBACK) - these will all be assigined afferent Projections from Composition.input_CIM @@ -5390,7 +5390,7 @@ def _determine_node_roles(self, context=None): - or for which any efferent projections are either: - to output_CIM OR - assigned as feedback (i.e., self.graph.comp_to_vertex[efferent].feedback == EdgeType.FEEDBACK - .. _note:: + .. note:: - this insures that for cases in which there are nested CYCLES (e.g., LearningMechanisms for a `learning Pathway `), only the Node in the *outermost* CYCLE that is specified as a FEEDBACK_SENDER diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index 1b39557d5bc..024f9152da4 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -363,7 +363,7 @@ class ParameterEstimationComposition(Composition): number of trials executed (see `number of trials ` for additional information). - .. _note:: + .. note:: The **num_trials_per_estimate** is distinct from the **num_trials** argument of the ParameterEstimationComposition's `run ` method. The latter determines how many full fits of the `model ` are carried out (that is, how many times the diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index e0ef65c2443..807f316fe55 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -453,11 +453,18 @@ * **normalize_field_weights**: specifies whether the `field_weights ` are normalized or their raw values are used. If True, the `field_weights ` are normalized so that - they sum to 1.0, and are used to weight the corresponding fields during retrieval (see `Weight fields - `). If False, the raw values of the `field_weights ` are - used to weight (i.e., multiply) the retrieved value of each field. This setting is ignored if **field_weights** + they sum to 1.0, and are used to weight (i.e., multiply) the corresponding fields during retrieval (see `Weight + fields `). If False, the raw values of the `field_weights ` + are used to weight the retrieved value of each field. This setting is ignored if **field_weights** is None or `concatenate_queries ` is in effect. + .. warning:: + If **normalize_field_weights** is False and **enable_learning** is True, a warning is issued indicating that + this may produce an error if the `loss_spec ` for the EMComposition (or an + `AutodiffComposition` that contains it) requires all values to be between 0 and 1, and calling the + EMComposition's `learn ` method will generate an error if the loss_spec is specified is + one known to be incompatible (e.g., `BINARY_CROSS_ENTROPY `). + .. _EMComposition_Field_Names: * **field_names**: specifies names that can be assigned to the fields. The number of names specified must @@ -698,7 +705,7 @@ product for each key field is passed to the corresponding `field_weight_node ` where it is multiplied by the corresponding `field_weight ` (if `use_gating_for_weighting ` is True, this is done by using the `field_weight - ` to output gate the `softmax_node `). The weighted softamx + ` to output gate the `softmax_node `). The weighted softmax vectors for all key fields are then passed to the `combined_softmax_node `, where they are haddamard summed to produce a single weighting for each memory. @@ -997,9 +1004,7 @@ from psyneulink._typing import Optional, Union from psyneulink.core.components.functions.nonstateful.transferfunctions import SoftMax, LinearMatrix from psyneulink.core.components.functions.nonstateful.combinationfunctions import Concatenate, LinearCombination -from psyneulink.core.components.functions.nonstateful.selectionfunctions import ARG_MAX, ARG_MAX_INDICATOR -from psyneulink.core.components.functions.function import \ - DEFAULT_SEED, _random_state_getter, _seed_setter +from psyneulink.core.components.functions.function import DEFAULT_SEED, _random_state_getter, _seed_setter from psyneulink.core.compositions.composition import CompositionError, NodeRole from psyneulink.library.compositions.autodiffcomposition import AutodiffComposition, torch_available from psyneulink.library.components.mechanisms.modulatory.learning.EMstoragemechanism import EMStorageMechanism @@ -1010,15 +1015,15 @@ from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.context import handle_external_context from psyneulink.core.globals.keywords import \ - (ADAPTIVE, ALL, AUTO, CONTEXT, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, - GAIN, IDENTITY_MATRIX, MAX_INDICATOR, MULTIPLICATIVE_PARAM, NAME, PARAMS, PROB_INDICATOR, PRODUCT, PROJECTIONS, - RANDOM, SIZE, VARIABLE) + (ADAPTIVE, ALL, ARG_MAX, ARG_MAX_INDICATOR, AUTO, CONTEXT, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, + EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, GAIN, IDENTITY_MATRIX, MULTIPLICATIVE_PARAM, NAME, + PARAMS, PROB_INDICATOR, PRODUCT, PROJECTIONS, RANDOM, SIZE, VARIABLE, Loss) from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric_scalar from psyneulink.core.globals.registry import name_without_suffix from psyneulink.core.llvm import ExecutionMode -__all__ = ['EMComposition', 'WEIGHTED_AVG', 'PROBABILISTIC'] +__all__ = ['EMComposition', 'EMCompositionError', 'WEIGHTED_AVG', 'PROBABILISTIC'] STORAGE_PROB = 'storage_prob' WEIGHTED_AVG = ALL @@ -1029,7 +1034,7 @@ MATCH_TO_KEYS_AFFIX = ' [MATCH to KEYS]' RETRIEVED_AFFIX = ' [RETRIEVED]' WEIGHTED_SOFTMAX_AFFIX = ' [WEIGHTED SOFTMAX]' -RETRIEVE_NODE_NAME = 'RETRIEVE' +COMBINED_SOFTMAX_NODE_NAME = 'RETRIEVE' STORE_NODE_NAME = 'STORE' @@ -1290,26 +1295,18 @@ class EMComposition(AutodiffComposition): `field_names ` is specified, then the name of each value_input_node is assigned the corresponding field name appended with * [VALUE]*. - input_nodes : list[ProcessingMechanism] - Full list of `INPUT ` `Nodes ` ordered with query_input_nodes first - followed by value_input_nodes; used primarily for internal computations - - input_nodes_by_fields : list[ProcessingMechanism] - Full list of `INPUT ` `Nodes ` in the same order specified in the - **field_names** argument of the constructor and in `self.field_names `. - concatenate_queries_node : ProcessingMechanism `ProcessingMechanism` that concatenates the inputs to `query_input_nodes ` into a single vector used for the matching processing if `concatenate keys ` is True. This is not created if the **concatenate_queries** argument to the EMComposition's constructor is False or is overridden (see `concatenate_queries `), or there is only one - query_input_node. + query_input_node. This node is named *CONCATENATE_KEYS* match_nodes : list[ProcessingMechanism] `ProcessingMechanisms ` that receive the dot product of each key and those stored in the corresponding field of `memory ` (see `Match memories by field - ` for additional details). These are assigned names that prepend *MATCH_n* to the - name of the corresponding `query_input_nodes `. + ` for additional details). These are named the same as the corresponding + `query_input_nodes ` appended with the suffix *[MATCH to KEYS]*. softmax_gain_control_nodes : list[ControlMechanism] `ControlMechanisms ` that adaptively control the `softmax_gain ` @@ -1320,7 +1317,8 @@ class EMComposition(AutodiffComposition): softmax_nodes : list[ProcessingMechanism] `ProcessingMechanisms ` that compute the softmax over the vectors received from the corresponding `match_nodes ` (see `Softmax normalize matches over fields - ` for additional details). + ` for additional details). These are named the same as the corresponding + `query_input_nodes ` appended with the suffix *[SOFTMAX]*. field_weight_nodes : list[ProcessingMechanism] `ProcessingMechanisms `, each of which use the `field weight ` @@ -1328,7 +1326,8 @@ class EMComposition(AutodiffComposition): `weighted_softmax_node `. These are implemented only if more than one `key field ` is specified (see `Fields ` for additional details), and are replaced with `retrieval_gating_nodes ` if - `use_gating_for_weighting ` is True. + `use_gating_for_weighting ` is True. These are named the same as the + corresponding `query_input_nodes ` appended with the suffix *[WEIGHT]*. weighted_softmax_nodes : list[ProcessingMechanism] `ProcessingMechanisms `, each of which receives the output of the corresponding @@ -1336,9 +1335,10 @@ class EMComposition(AutodiffComposition): for a given `field `, and multiplies them to produce the weighted softmax for that field; these are implemented only if more than one `key field ` is specified (see `Fields ` for additional details) and `use_gating_for_weighting - ` is False (in which case, `field_weights ` + ` is False (otherwise, `field_weights ` are applied through output gating of the `softmax_nodes ` by the - `retrieval_gating_nodes `). + `retrieval_gating_nodes `). These are named the same as the corresponding + `query_input_nodes ` appended with the suffix *[WEIGHTED SOFTMAX]*. retrieval_gating_nodes : list[GatingMechanism] `GatingMechanisms ` that uses the `field weight ` for each @@ -1348,32 +1348,41 @@ class EMComposition(AutodiffComposition): `key field ` is specified (see `Fields ` for additional details). combined_softmax_node : ProcessingMechanism - `ProcessingMechanism` that receives the softmax normalized dot products of the keys and memories - from the `softmax_nodes `, weighted by the `field_weights_nodes + `ProcessingMechanism` that receives the softmax normalized dot products of the keys and memories from the + `softmax_nodes `, weighted by the `field_weights_nodes ` if more than one `key field ` is specified - (or `retrieval_gating_nodes ` if `use_gating_for_weighting + (or by `retrieval_gating_nodes ` if `use_gating_for_weighting ` is True), and combines them into a single vector that is used to retrieve the corresponding memory for each field from `memory ` (see `Retrieve values by - field ` for additional details). + field ` for additional details). This node is named *RETRIEVE*. retrieved_nodes : list[ProcessingMechanism] `ProcessingMechanisms ` that receive the vector retrieved for each field in `memory - ` (see `Retrieve values by field ` for additional details); - these are assigned the same names as the `query_input_nodes ` and + ` (see `Retrieve values by field ` for additional details). + These are assigned the same names as the `query_input_nodes ` and `value_input_nodes ` to which they correspond appended with the suffix * [RETRIEVED]*, and are in the same order as `input_nodes_by_fields ` to which to which they correspond. storage_node : EMStorageMechanism `EMStorageMechanism` that receives inputs from the `query_input_nodes ` and - `value_input_nodes `, and stores these in the corresponding field of - `memory ` with probability `storage_prob ` after a retrieval - has been made (see `Retrieval and Storage ` for additional details). + `value_input_nodes `, and stores these in the corresponding field of`memory + ` with probability `storage_prob ` after a retrieval has been + made (see `Retrieval and Storage ` for additional details). This node is named *STORE*. .. technical_note:: The `storage_node ` is assigned a Condition to execute after the `retrieved_nodes ` have executed, to ensure that storage occurs after retrieval, but before any subequent processing is done (i.e., in a composition in which the EMComposition may be embededded. + + input_nodes : list[ProcessingMechanism] + Full list of `INPUT ` `Nodes ` ordered with query_input_nodes first + followed by value_input_nodes; used primarily for internal computations + + input_nodes_by_fields : list[ProcessingMechanism] + Full list of `INPUT ` `Nodes ` in the same order specified in the + **field_names** argument of the constructor and in `self.field_names `. + """ componentCategory = EM_COMPOSITION @@ -1618,6 +1627,7 @@ def __init__(self, memory_capacity = memory_capacity, field_weights = field_weights, field_names = field_names, + normalize_field_weights = normalize_field_weights, concatenate_queries = concatenate_queries, softmax_gain = softmax_gain, softmax_threshold = softmax_threshold, @@ -1633,7 +1643,7 @@ def __init__(self, **kwargs ) - self._validate_softmax_choice(softmax_choice, enable_learning) + self._validate_options_with_learning(softmax_choice, normalize_field_weights, enable_learning) self._construct_pathways(self.memory_template, self.memory_capacity, @@ -1666,7 +1676,7 @@ def __init__(self, self.scheduler.add_condition(self.storage_node, conditions.AllHaveRun(*self.retrieved_nodes)) # # Generates expected results, but execution_sets has a second set for INPUT nodes - # and the the match_nodes again with storage_node + # and the match_nodes again with storage_node # # --------------------------------------- # @@ -1904,11 +1914,18 @@ def _parse_fields(self, self.num_fields = len(self.entry_template) keys_weights = [i for i in parsed_field_weights if i != 0] self.num_keys = len(keys_weights) + # Get indices of field_weights that specify keys and values: self.key_indices = np.flatnonzero(parsed_field_weights) + assert len(self.key_indices) == self.num_keys, \ + f"PROGRAM ERROR: number of keys ({self.num_keys}) does not match number of " \ + f"non-zero values in field_weights ({len(self.key_indices)})." self.value_indices = np.where(parsed_field_weights==0)[0] - self.num_values = self.num_fields - self.num_keys + assert len(self.value_indices) == self.num_values, \ + f"PROGRAM ERROR: number of values ({self.num_values}) does not match number of " \ + f"zero values in field_weights ({len(self.value_indices)})." + if parsed_field_names: self.key_names = [parsed_field_names[i] for i in self.key_indices] # self.value_names = parsed_field_names[self.num_keys:] @@ -2208,12 +2225,17 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_q return match_nodes - def _validate_softmax_choice(self, softmax_choice, enable_learning): + def _validate_options_with_learning(self, softmax_choice, normalize_field_weights, enable_learning): if softmax_choice in {ARG_MAX, PROBABILISTIC} and enable_learning: warnings.warn(f"The 'softmax_choice' arg of '{self.name}' is set to '{softmax_choice}' with " f"'enable_learning' set to True (or a list); this will generate an error if its " f"'learn' method is called. Set 'softmax_choice' to WEIGHTED_AVG before learning.") + if enable_learning and not normalize_field_weights: + warnings.warn(f"The 'normalize_field_weights' arg of '{self.name}' is set to False with " + f"'enable_learning' set to True (or a list); this may generate an error if " + f"the 'loss_spec' used for learning requires values to be between 0 and 1.") + def _construct_softmax_nodes(self, memory_capacity, field_weights, softmax_gain, softmax_threshold, softmax_choice)->list: """Create nodes that, for each key field, compute the softmax over the similarities between the input and the @@ -2221,12 +2243,7 @@ def _construct_softmax_nodes(self, memory_capacity, field_weights, """ # Get indices of field_weights that specify keys: - key_indices = np.where(np.array(field_weights) != 0) - key_weights = [field_weights[i] for i in key_indices[0]] - - assert len(key_indices[0]) == self.num_keys, \ - f"PROGRAM ERROR: number of keys ({self.num_keys}) does not match number of " \ - f"non-zero values in field_weights ({len(key_indices)})." + key_weights = [field_weights[i] for i in self.key_indices] if softmax_choice == ARG_MAX: # ARG_MAX would return entry multiplied by its dot product @@ -2269,7 +2286,8 @@ def _construct_field_weight_nodes(self, field_weights, concatenate_queries, use_ if not concatenate_queries and self.num_keys > 1: if use_gating_for_weighting: - field_weight_nodes = [GatingMechanism(input_ports={VARIABLE: np.array(field_weights[i]), + field_weight_nodes = [GatingMechanism(input_ports={VARIABLE: + np.array(field_weights[self.key_indices[i]]), PARAMS:{DEFAULT_INPUT: DEFAULT_VARIABLE}, NAME: 'OUTCOME'}, gate=[key_match_pair[1].output_ports[0]], @@ -2278,8 +2296,9 @@ def _construct_field_weight_nodes(self, field_weights, concatenate_queries, use_ for i, key_match_pair in enumerate(zip(self.query_input_nodes, self.softmax_nodes))] else: - field_weight_nodes = [ProcessingMechanism(input_ports={VARIABLE: np.array(field_weights[i]), - PARAMS:{DEFAULT_INPUT: DEFAULT_VARIABLE}, + field_weight_nodes = [ProcessingMechanism(input_ports={VARIABLE: + np.array(field_weights[self.key_indices[i]]), + PARAMS: {DEFAULT_INPUT: DEFAULT_VARIABLE}, NAME: 'FIELD_WEIGHT'}, name= 'WEIGHT' if self.num_keys == 1 else f'{self.key_names[i]} [WEIGHT]') @@ -2330,7 +2349,7 @@ def _construct_combined_softmax_node(self, name=f'WEIGHTED SOFTMAX to RETRIEVAL for ' f'{self.key_names[i]}') for i, s in enumerate(input_source)]}], - name=RETRIEVE_NODE_NAME)) + name=COMBINED_SOFTMAX_NODE_NAME)) assert len(combined_softmax_node.output_port.value) == memory_capacity, \ 'PROGRAM ERROR: number of items in combined_softmax_node ' \ @@ -2534,6 +2553,9 @@ def learn(self, *args, **kwargs)->list: if arg in {ARG_MAX, PROBABILISTIC}: raise EMCompositionError(f"The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg " f"of '{self.name}' cannot be used during learning; change to WEIGHTED_AVG.") + if self.loss_spec in {Loss.BINARY_CROSS_ENTROPY} and not self.normalize_field_weights: + raise EMCompositionError(f"The 'loss_spec' arg of '{self.name}' is set to '{self.loss_spec.name}' with " + f"'normalize_field_weights' set to False; this must be True to use this loss_spec.") return super().learn(*args, **kwargs) def _get_execution_mode(self, execution_mode): diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index 3c90676b990..d0206a020a4 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -238,7 +238,7 @@ def test_softmax_choice(self): em = EMComposition(memory_template=[[[1,.1,.1]], [[.1,1,.1]], [[.1,.1,1]]]) for softmax_choice in [pnl.ARG_MAX, pnl.PROBABILISTIC]: - with pytest.raises(pnl.ComponentError) as error_text: + with pytest.raises(EMCompositionError) as error_text: em.parameters.softmax_choice.set(softmax_choice) em.learn() assert (f"The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg " @@ -252,6 +252,22 @@ def test_softmax_choice(self): f"'learn' method is called. Set 'softmax_choice' to WEIGHTED_AVG before learning.") assert warning_msg in str(warning[0].message) + def test_normalize_field_weights_with_learning_enabled(self): + with pytest.warns(UserWarning) as warning: + em = EMComposition(normalize_field_weights=False, + enable_learning=True, + memory_fill=(0,.1), + loss_spec=pnl.Loss.BINARY_CROSS_ENTROPY) + warning_msg = (f"The 'normalize_field_weights' arg of 'EM_Composition' is set to False with " + f"'enable_learning' set to True (or a list); this may generate an error if the " + f"'loss_spec' used for learning requires values to be between 0 and 1.") + assert warning_msg in str(warning[0].message) + + with pytest.raises(EMCompositionError) as error_text: + em.learn() + assert (f"The 'loss_spec' arg of 'EM_Composition' is set to 'BINARY_CROSS_ENTROPY' with " + f"'normalize_field_weights' set to False; this must be True to use this loss_spec." + in str(error_text.value)) @pytest.mark.pytorch From 14a2986f8c89cf24764422fe42c7b436bfe73ab2 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 29 Oct 2024 13:53:03 -0400 Subject: [PATCH 362/410] Convert mappingproxy to dict --- .../pec/test_parameterestimationcomposition.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/composition/pec/test_parameterestimationcomposition.py b/tests/composition/pec/test_parameterestimationcomposition.py index 8ae98129f14..00c5da7623d 100644 --- a/tests/composition/pec/test_parameterestimationcomposition.py +++ b/tests/composition/pec/test_parameterestimationcomposition.py @@ -158,6 +158,11 @@ def _run_ddm_with_params( @pytest.mark.composition @pytest.mark.parametrize("inputs_dict, error_msg", run_input_test_args) def test_pec_run_input_formats(inputs_dict, error_msg): + + # Need to convert from mapping proxy + # to dict to make the test pass. + inputs_dict = {k:v for k,v in inputs_dict.items()} + if error_msg: with pytest.raises(pnl.ParameterEstimationCompositionError) as error: pec.run(inputs=inputs_dict) @@ -202,6 +207,10 @@ def test_pec_run_input_formats(inputs_dict, error_msg): def test_parameter_optimization_ddm(func_mode, opt_method, optuna_kwargs, expected_result): """Test parameter optimization of a DDM in integrator mode""" + # Dicts are being converted to mappingproxy objects, need to convert to dict. + if optuna_kwargs: + optuna_kwargs = {k: v for k, v in optuna_kwargs.items()} + if func_mode == "Python": pytest.skip( "Test not yet implemented for Python. Parameter estimation is too slow." From bfe44611cb77e5ecc7d2a5b17e5cc0bf278564cb Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Tue, 29 Oct 2024 23:25:22 -0400 Subject: [PATCH 363/410] docs: Use pattern rule to execute generator scripts (#3087) find -exec does not propagate errors. Signed-off-by: Jan Vesely --- docs/Makefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/Makefile b/docs/Makefile index 99feb08f21c..12f8ebbd8bd 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -240,6 +240,10 @@ gh-pages: git ci -m "Generated gh-pages for `git log devel -1 --pretty=short --abbrev-commit`" && git push origin gh-pages ; git checkout devel make clean +source/generator_scripts/%: source/generator_scripts/%.py + python $< + +GENERATED=$(patsubst %.py, %, $(shell find source/generator_scripts -name "*.py")) + .PHONY: generated -generated: - find source/generator_scripts -name "*.py" -exec python {} \; +generated: $(GENERATED) From 5a0f4e7cc71874db2112d6b154e624aa51a349d2 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 30 Oct 2024 12:52:01 -0400 Subject: [PATCH 364/410] llvm: Use per jit-engine printf pointer Allows printf to be called without crash (or output) on GPUs. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/builder_context.py | 6 +- psyneulink/core/llvm/builtins.py | 39 +++++++++--- psyneulink/core/llvm/helpers.py | 60 +++++++++---------- psyneulink/core/llvm/jit_engine.py | 2 + .../library/compositions/compiledoptimizer.py | 35 ++++++----- .../library/compositions/pytorchwrappers.py | 42 ++++++------- tests/llvm/test_helpers.py | 2 +- 7 files changed, 107 insertions(+), 79 deletions(-) diff --git a/psyneulink/core/llvm/builder_context.py b/psyneulink/core/llvm/builder_context.py index edc77fddad9..0dcb6bae853 100644 --- a/psyneulink/core/llvm/builder_context.py +++ b/psyneulink/core/llvm/builder_context.py @@ -46,10 +46,10 @@ def module_count(): _BUILTIN_PREFIX = "__pnl_builtin_" -_builtin_intrinsics = frozenset(('pow', 'log', 'exp', 'tanh', 'coth', 'csch', - 'sin', 'cos', +_builtin_intrinsics = frozenset(('pow', 'log', 'exp', 'tanh', 'coth', 'csch', 'sin', 'cos', 'is_close_float', 'is_close_double', - 'mt_rand_init', 'philox_rand_init')) + 'mt_rand_init', 'philox_rand_init', + 'get_printf_address')) class _node_assembly(): diff --git a/psyneulink/core/llvm/builtins.py b/psyneulink/core/llvm/builtins.py index ede1ef10c7c..20920ccf59e 100644 --- a/psyneulink/core/llvm/builtins.py +++ b/psyneulink/core/llvm/builtins.py @@ -8,8 +8,9 @@ # ********************************************* PNL LLVM builtins ************************************************************** -from llvmlite import ir - +from ctypes import util +from llvmlite import ir, binding +import sys from . import helpers from .builder_context import LLVMBuilderContext, _BUILTIN_PREFIX @@ -469,18 +470,39 @@ def setup_pnl_intrinsics(ctx): ir.Function(ctx.module, single_intr_ty, name=_BUILTIN_PREFIX + "log") ir.Function(ctx.module, double_intr_ty, name=_BUILTIN_PREFIX + "pow") + # printf address + ir.Function(ctx.module, ir.FunctionType(ir.IntType(64), []), name=_BUILTIN_PREFIX + "get_printf_address") - -def _generate_intrinsic_wrapper(module, name, ret, args): - intrinsic = module.declare_intrinsic("llvm." + name, list(set(args))) - +def _generate_new_function(module, name, ret, args): func_ty = ir.FunctionType(ret, args) - function = ir.Function(module, func_ty, name=_BUILTIN_PREFIX + name) + function = ir.Function(module, func_ty, name=name) function.attributes.add('alwaysinline') block = function.append_basic_block(name="entry") builder = ir.IRBuilder(block) builder.debug_metadata = LLVMBuilderContext.get_debug_location(function, None) - builder.ret(builder.call(intrinsic, function.args)) + + return builder + +def _generate_intrinsic_wrapper(module, name, ret, args): + intrinsic = module.declare_intrinsic("llvm." + name, list(set(args))) + + builder = _generate_new_function(module, _BUILTIN_PREFIX + name, ret, args) + intrinsic_result = builder.call(intrinsic, builder.block.function.args) + builder.ret(intrinsic_result) + +def _generate_get_printf_address(module): + builder = _generate_new_function(module, _BUILTIN_PREFIX + "get_printf_address", ir.IntType(64), []) + + libc_name = "msvcrt" if sys.platform == "win32" else "c" + libc = util.find_library(libc_name) + assert libc is not None, "Standard libc library not found" + + binding.load_library_permanently(libc) + # Address will be none if the symbol is not found + printf_address = binding.address_of_symbol("printf") + assert printf_address is not None, "'printf' symbol not found in {}".format(libc) + + builder.ret(ir.IntType(64)(printf_address)) def _generate_cpu_builtins_module(_float_ty): """Generate function wrappers for log, exp, and pow intrinsics.""" @@ -489,6 +511,7 @@ def _generate_cpu_builtins_module(_float_ty): _generate_intrinsic_wrapper(module, intrinsic, _float_ty, [_float_ty]) _generate_intrinsic_wrapper(module, "pow", _float_ty, [_float_ty, _float_ty]) + _generate_get_printf_address(module) return module diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index d4fcc8cd2f5..b1542e0438f 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -9,12 +9,9 @@ # ********************************************* PNL LLVM helpers ************************************************************** from contextlib import contextmanager -from ctypes import util import warnings -import sys from llvmlite import ir -import llvmlite.binding as llvm from .debug import debug_env @@ -401,55 +398,52 @@ def call_elementwise_operation(ctx, builder, x, operation, output_ptr): for (inp_ptr, out_ptr) in recursive_iterate_arrays(ctx, builder, x, output_ptr): builder.store(operation(ctx, builder, builder.load(inp_ptr)), out_ptr) -def printf(builder, fmt, *args, override_debug=False): +def printf(ctx, builder, fmt, *args, override_debug=False): if "print_values" not in debug_env and not override_debug: return - #FIXME: Fix builtin printf and use that instead of this - libc_name = "msvcrt" if sys.platform == "win32" else "c" - libc = util.find_library(libc_name) - assert libc is not None, "Standard libc library not found" - - llvm.load_library_permanently(libc) - # Address will be none if the symbol is not found - printf_address = llvm.address_of_symbol("printf") - assert printf_address is not None, "'printf' symbol not found in {}".format(libc) - - # Direct pointer constants don't work - printf_ty = ir.FunctionType(ir.IntType(32), [ir.IntType(8).as_pointer()], var_arg=True) - printf = builder.inttoptr(ir.IntType(64)(printf_address), printf_ty.as_pointer()) - ir_module = builder.function.module - fmt += "\0" - + # Set up the formatting string as global symbol int8 = ir.IntType(8) - fmt_data = bytearray(fmt.encode("utf8")) + fmt_data = bytearray((fmt + "\0").encode("utf8")) fmt_ty = ir.ArrayType(int8, len(fmt_data)) - global_fmt = ir.GlobalVariable(ir_module, fmt_ty, + + ir_module = builder.function.module + global_fmt = ir.GlobalVariable(ir_module, + fmt_ty, name="printf_fmt_" + str(len(ir_module.globals))) global_fmt.linkage = "internal" global_fmt.global_constant = True global_fmt.initializer = fmt_ty(fmt_data) - fmt_ptr = builder.gep(global_fmt, [ir.IntType(32)(0), ir.IntType(32)(0)]) - conv_args = [builder.fpext(a, ir.DoubleType()) if is_floating_point(a) else a for a in args] - builder.call(printf, [fmt_ptr] + conv_args) + printf_ty = ir.FunctionType(ir.IntType(32), [ir.IntType(8).as_pointer()], var_arg=True) + get_printf_addr_f = ctx.get_builtin("get_printf_address", []) + printf_address = builder.call(get_printf_addr_f, []) + + printf_is_not_null = builder.icmp_unsigned("!=", printf_address, printf_address.type(0)) + with builder.if_then(printf_is_not_null, likely=True): + printf_f = builder.inttoptr(printf_address, printf_ty.as_pointer()) + fmt_ptr = builder.gep(global_fmt, [ir.IntType(32)(0), ir.IntType(32)(0)]) + conv_args = [builder.fpext(a, ir.DoubleType()) if is_floating_point(a) else a for a in args] + builder.call(printf_f, [fmt_ptr] + conv_args) -def printf_float_array(builder, array, prefix="", suffix="\n", override_debug=False): - printf(builder, prefix, override_debug=override_debug) + +def printf_float_array(ctx, builder, array, prefix="", suffix="\n", override_debug=False): + printf(ctx, builder, prefix, override_debug=override_debug) with array_ptr_loop(builder, array, "print_array_loop") as (b1, i): - printf(b1, "%lf ", b1.load(b1.gep(array, [i.type(0), i])), override_debug=override_debug) + printf(ctx, b1, "%lf ", b1.load(b1.gep(array, [i.type(0), i])), override_debug=override_debug) - printf(builder, suffix, override_debug=override_debug) + printf(ctx, builder, suffix, override_debug=override_debug) -def printf_float_matrix(builder, matrix, prefix="", suffix="\n", override_debug=False): - printf(builder, prefix, override_debug=override_debug) +def printf_float_matrix(ctx, builder, matrix, prefix="", suffix="\n", override_debug=False): + printf(ctx, builder, prefix, override_debug=override_debug) with array_ptr_loop(builder, matrix, "print_row_loop") as (b1, i): row = b1.gep(matrix, [i.type(0), i]) - printf_float_array(b1, row, suffix="\n", override_debug=override_debug) - printf(builder, suffix, override_debug=override_debug) + printf_float_array(ctx, b1, row, suffix="\n", override_debug=override_debug) + + printf(ctx, builder, suffix, override_debug=override_debug) class ConditionGenerator: diff --git a/psyneulink/core/llvm/jit_engine.py b/psyneulink/core/llvm/jit_engine.py index 73fbf36683b..5f9be454ff7 100644 --- a/psyneulink/core/llvm/jit_engine.py +++ b/psyneulink/core/llvm/jit_engine.py @@ -280,12 +280,14 @@ def _init(self): self._jit_engine.set_object_cache(self._object_cache) +# FIXME: Get device side printf pointer _ptx_builtin_source = """ __device__ {type} __pnl_builtin_sin({type} a) {{ return sin(a); }} __device__ {type} __pnl_builtin_cos({type} a) {{ return cos(a); }} __device__ {type} __pnl_builtin_log({type} a) {{ return log(a); }} __device__ {type} __pnl_builtin_exp({type} a) {{ return exp(a); }} __device__ {type} __pnl_builtin_pow({type} a, {type} b) {{ return pow(a, b); }} +__device__ int64_t __pnl_builtin_get_printf_address() {{ return 0; }} """ diff --git a/psyneulink/library/compositions/compiledoptimizer.py b/psyneulink/library/compositions/compiledoptimizer.py index 20aa5e673ca..db8527c7f29 100644 --- a/psyneulink/library/compositions/compiledoptimizer.py +++ b/psyneulink/library/compositions/compiledoptimizer.py @@ -106,8 +106,8 @@ def step(self, ctx): t = builder.gep(optim_struct, [zero, ctx.int32_ty(self._T_NUM)]) # get methods needed - pow = ctx.import_llvm_function("__pnl_builtin_pow") - sqrt = ctx.get_builtin("sqrt", [ctx.float_ty]) + pow_f = ctx.import_llvm_function("__pnl_builtin_pow") + sqrt_f = ctx.get_builtin("sqrt", [ctx.float_ty]) lr = ctx.float_ty(self.lr) eps = ctx.float_ty(self.eps) @@ -120,13 +120,12 @@ def step(self, ctx): builder.store(builder.fadd(builder.load(t), one_float), t) t_val = builder.load(t) # 1.5) calculate values to be used later (based on incremented t) - b1_pow = builder.call(pow, [b1, t_val]) - b2_pow = builder.call(pow, [b2, t_val]) + b1_pow = builder.call(pow_f, [b1, t_val]) + b2_pow = builder.call(pow_f, [b2, t_val]) one_minus_b1_pow = builder.fsub(one_float, b1_pow) one_minus_b2_pow = builder.fsub(one_float, b2_pow) - pnlvm.helpers.printf( - builder, f"%f b1_pow_sub %f\nb2 pow sub %f\n",t_val, one_minus_b1_pow, one_minus_b2_pow) + pnlvm.helpers.printf(ctx, builder, f"%f b1_pow_sub %f\nb2 pow sub %f\n",t_val, one_minus_b1_pow, one_minus_b2_pow) # 2) update first moments for idx, proj in enumerate(self._pytorch_model.projection_wrappers): @@ -144,7 +143,11 @@ def step(self, ctx): # m_t = m_t + (1-b1)*g_t gen_inject_mat_add(ctx, builder, m_t_ptr, tmp_val, m_t_ptr) - pnlvm.helpers.printf_float_matrix(builder, m_t_ptr, prefix=f"mt val: {proj.sender._mechanism} -> {proj.receiver._mechanism}\n", override_debug=False) + pnlvm.helpers.printf_float_matrix(ctx, + builder, + m_t_ptr, + prefix=f"mt val: {proj.sender._mechanism} -> {proj.receiver._mechanism}\n", + override_debug=False) # 3) update second moments for idx, proj in enumerate(self._pytorch_model.projection_wrappers): proj_idx_ir = ctx.int32_ty(idx) @@ -180,24 +183,28 @@ def step(self, ctx): delta_w_ptr = builder.gep( delta_w, [zero, proj_idx_ir]) - pnlvm.helpers.printf_float_matrix(builder, delta_w_ptr, prefix=f"grad val: {proj.sender._mechanism} -> {proj.receiver._mechanism}\n", override_debug=False) + pnlvm.helpers.printf_float_matrix(ctx, + builder, + delta_w_ptr, + prefix=f"grad val: {proj.sender._mechanism} -> {proj.receiver._mechanism}\n", + override_debug=False) # this is messy - #TODO - cleanup this weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, state, params) dim_x, dim_y = proj.matrix.shape weight_row = None - pnlvm.helpers.printf(builder, "biascorr2 %.20f\n", one_minus_b2_pow, override_debug=False) + pnlvm.helpers.printf(ctx, builder, "biascorr2 %.20f\n", one_minus_b2_pow, override_debug=False) with pnlvm.helpers.for_loop_zero_inc(builder, ctx.int32_ty(dim_x), "optimizer_w_upd_outer") as (b1, weight_row): weight_column = None with pnlvm.helpers.for_loop_zero_inc(b1, ctx.int32_ty(dim_y), "optimizer_w_upd_inner") as (b2, weight_column): # sqrt(v_t) + eps v_t_value = b2.load(b2.gep(v_t_ptr, [zero, weight_row, weight_column])) - value = b2.call(sqrt, [v_t_value]) - denom = b2.call(sqrt, [one_minus_b2_pow]) + value = b2.call(sqrt_f, [v_t_value]) + denom = b2.call(sqrt_f, [one_minus_b2_pow]) value = b2.fdiv(value, denom) value = b2.fadd(value, eps) - pnlvm.helpers.printf(builder, "val %.20f\n", value, override_debug=False) + pnlvm.helpers.printf(ctx, builder, "val %.20f\n", value, override_debug=False) # alpha_t * m_t m_t_value = b2.load(b2.gep( m_t_ptr, [zero, weight_row, weight_column])) @@ -213,9 +220,9 @@ def step(self, ctx): value = b2.fadd(b2.load(old_weight_ptr), value) b2.store(value, old_weight_ptr) - pnlvm.helpers.printf(b1, "\n", override_debug=False) + pnlvm.helpers.printf(ctx, b1, "\n", override_debug=False) - pnlvm.helpers.printf(builder, f"\t\t\tOPTIM DONE UPDATE\n",override_debug=False) + pnlvm.helpers.printf(ctx, builder, f"\t\t\tOPTIM DONE UPDATE\n",override_debug=False) builder.ret_void() diff --git a/psyneulink/library/compositions/pytorchwrappers.py b/psyneulink/library/compositions/pytorchwrappers.py index 48737faa556..2a5cdb056bf 100644 --- a/psyneulink/library/compositions/pytorchwrappers.py +++ b/psyneulink/library/compositions/pytorchwrappers.py @@ -475,20 +475,16 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): node_target = builder.gep(model_input, [ctx.int32_ty(0), ctx.int32_ty(target_idx)]) # 2) Lookup desired output value - node_output = builder.gep(model_output, [ctx.int32_ty(0), ctx.int32_ty(0), - ctx.int32_ty(node._idx), - ctx.int32_ty(0)]) + node_output = builder.gep(model_output, + [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(node._idx), ctx.int32_ty(0)]) tmp_loss = loss.gen_inject_lossfunc_call( ctx, builder, loss_fn, node_output, node_target) - pnlvm.helpers.printf_float_array( - builder, node_target, prefix=f"{node}\ttarget:\t") - pnlvm.helpers.printf_float_array( - builder, node_output, prefix=f"{node}\tvalue:\t") + pnlvm.helpers.printf_float_array(ctx, builder, node_target, prefix=f"{node}\ttarget:\t") + pnlvm.helpers.printf_float_array(ctx, builder, node_output, prefix=f"{node}\tvalue:\t") - pnlvm.helpers.printf( - builder, f"{node}\tloss:\t%f\n", tmp_loss, override_debug=False) + pnlvm.helpers.printf(ctx, builder, f"{node}\tloss:\t%f\n", tmp_loss, override_debug=False) builder.store(builder.fadd(builder.load( total_loss), tmp_loss), total_loss) loss_derivative = loss._gen_inject_loss_differential( @@ -519,10 +515,8 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): gen_inject_vec_hadamard( ctx, builder, activation_func_derivative, error_val, error_val) - pnlvm.helpers.printf_float_array( - builder, activation_func_derivative, prefix=f"{node}\tdSigma:\t") - pnlvm.helpers.printf_float_array( - builder, error_val, prefix=f"{node}\terror:\t") + pnlvm.helpers.printf_float_array(ctx, builder, activation_func_derivative, prefix=f"{node}\tdSigma:\t") + pnlvm.helpers.printf_float_array(ctx, builder, error_val, prefix=f"{node}\terror:\t") # 4) compute weight gradients for (node, err_val) in error_dict.items(): @@ -537,7 +531,9 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): # get dimensions of weight matrix weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, state, params) - pnlvm.helpers.printf_float_matrix(builder, weights_llvmlite, + pnlvm.helpers.printf_float_matrix(ctx, + builder, + weights_llvmlite, prefix= f"{proj.sender._mechanism} -> {proj.receiver._mechanism}\n", override_debug=False) # update delta_W @@ -558,8 +554,7 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): b2.store(new_val, b2.gep(node_delta_w, [ctx.int32_ty(0), weight_row, weight_column])) - pnlvm.helpers.printf(builder, "TOTAL LOSS:\t%.20f\n", - builder.load(total_loss), override_debug=False) + pnlvm.helpers.printf(ctx, builder, "TOTAL LOSS:\t%.20f\n", builder.load(total_loss), override_debug=False) builder.ret_void() return builder.function @@ -1053,7 +1048,8 @@ def _gen_llvm_execute(self, ctx, builder, state, params, mech_input, data): mech_input, mech_output]) - pnlvm.helpers.printf_float_array(builder, + pnlvm.helpers.printf_float_array(ctx, + builder, builder.gep(mech_output, [ctx.int32_ty(0), ctx.int32_ty(0)]), prefix=f"{self} output:\n", override_debug=False) @@ -1222,13 +1218,19 @@ def _gen_llvm_execute(self, ctx, builder, state, params, data): output_vec = gen_inject_vxm(ctx, builder, input_vec, proj_matrix) - pnlvm.helpers.printf_float_array(builder, input_vec, + pnlvm.helpers.printf_float_array(ctx, + builder, + input_vec, prefix=f"{self.sender._mechanism} -> {self.receiver._mechanism} input:\n", override_debug=False) - pnlvm.helpers.printf_float_matrix(builder, proj_matrix, + pnlvm.helpers.printf_float_matrix(ctx, + builder, + proj_matrix, prefix=f"{self.sender._mechanism} -> {self.receiver._mechanism} mat:\n", override_debug=False) - pnlvm.helpers.printf_float_array(builder, output_vec, + pnlvm.helpers.printf_float_array(ctx, + builder, + output_vec, prefix=f"{self.sender._mechanism} -> {self.receiver._mechanism} output:\n", override_debug=False) diff --git a/tests/llvm/test_helpers.py b/tests/llvm/test_helpers.py index cd2227ded05..34c4cde90b3 100644 --- a/tests/llvm/test_helpers.py +++ b/tests/llvm/test_helpers.py @@ -224,7 +224,7 @@ def test_helper_printf(capfd, ir_argtype, format_spec, values_to_check): block = function.append_basic_block(name="entry") builder = ir.IRBuilder(block) - pnlvm.helpers.printf(builder, format_str, *ir_values_to_check, override_debug=True) + pnlvm.helpers.printf(ctx, builder, format_str, *ir_values_to_check, override_debug=True) builder.ret_void() bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) From 5c7352d0eef9e798f0a4b772bcb736b83cfe5d84 Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 30 Oct 2024 13:30:57 -0400 Subject: [PATCH 365/410] Handle MappingProxyType in check_user_specified If a user passes a MappingProxyType as argument to a constructor, it gets cached in _user_specified_args. Later, if the object is deepcopied, the MappingProxyType can't be pickled so we get an error. I am handling this by converting the mapping proxy type to a normal dict. This was uncovered by Jan's changes to tests that always passes dictionary --- psyneulink/core/globals/parameters.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index e2204c6ecdd..5e9d96bd4d0 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -315,6 +315,7 @@ def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, co import types import typing import weakref +from types import MappingProxyType import toposort @@ -337,6 +338,7 @@ def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, co update_array_in_place, ) from psyneulink.core.rpc.graph_pb2 import Entry, ndArray +from types import MappingProxyType __all__ = [ 'Defaults', 'get_validator_by_function', 'Parameter', 'ParameterAlias', 'ParameterError', @@ -464,6 +466,12 @@ def check_user_specified_wrapper(self, *args, **kwargs): except AttributeError: self._prev_constructor = constructor if '__init__' in type(self).__dict__ else None self._user_specified_args = copy.copy(kwargs) + + # If any of the kwargs are MappingProxyType, convert them to dict + for k, v in self._user_specified_args.items(): + if isinstance(v, MappingProxyType): + self._user_specified_args[k] = v.copy() + else: # add args determined in constructor to user_specifed. # since some args are set by the values of other From 551240e607984c97f95e374ae3f9ecd34d32a39f Mon Sep 17 00:00:00 2001 From: David Turner Date: Wed, 30 Oct 2024 14:09:15 -0400 Subject: [PATCH 366/410] Handle MappingProxyType in PEC. --- .../components/functions/nonstateful/fitfunctions.py | 5 +++-- .../core/compositions/parameterestimationcomposition.py | 4 ++++ .../pec/test_parameterestimationcomposition.py | 9 --------- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index 43ce2dff813..ca146716070 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -25,6 +25,7 @@ Union, Type, Literal, + Mapping, ) @@ -308,7 +309,7 @@ class Parameters(OptimizationFunction.Parameters): def __init__( self, method: Union[Literal["differential_evolution"], optuna.samplers.BaseSampler, Type[optuna.samplers.BaseSampler]], - optuna_kwargs: Optional[Dict] = None, + optuna_kwargs: Optional[Mapping] = None, objective_function: Optional[Callable] = None, search_space=None, save_samples: Optional[bool] = None, @@ -318,7 +319,7 @@ def __init__( **kwargs, ): self.method = method - self._optuna_kwargs = {} if optuna_kwargs is None else optuna_kwargs + self._optuna_kwargs = {} if optuna_kwargs is None else optuna_kwargs.copy() self.direction = direction diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index df075990b1d..c85687b05f4 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -915,6 +915,10 @@ def run(self, *args, **kwargs): if ContextFlags.PROCESSING not in context.flags: self.controller.check_pec_inputs(inputs) + # Copy the inputs so we don't modify the original dict, note, we can't copy the keys because they + # are object\mechanisms that are in the underlying composition. + inputs = {k: v.copy() for k, v in inputs.items()} + # Run parse input dict on the inputs, this will fill in missing input ports with default values. There # will be missing input ports because the user doesn't know about the control mechanism's input ports that # have been added by the PEC for the fitting parameters. diff --git a/tests/composition/pec/test_parameterestimationcomposition.py b/tests/composition/pec/test_parameterestimationcomposition.py index 00c5da7623d..8ae98129f14 100644 --- a/tests/composition/pec/test_parameterestimationcomposition.py +++ b/tests/composition/pec/test_parameterestimationcomposition.py @@ -158,11 +158,6 @@ def _run_ddm_with_params( @pytest.mark.composition @pytest.mark.parametrize("inputs_dict, error_msg", run_input_test_args) def test_pec_run_input_formats(inputs_dict, error_msg): - - # Need to convert from mapping proxy - # to dict to make the test pass. - inputs_dict = {k:v for k,v in inputs_dict.items()} - if error_msg: with pytest.raises(pnl.ParameterEstimationCompositionError) as error: pec.run(inputs=inputs_dict) @@ -207,10 +202,6 @@ def test_pec_run_input_formats(inputs_dict, error_msg): def test_parameter_optimization_ddm(func_mode, opt_method, optuna_kwargs, expected_result): """Test parameter optimization of a DDM in integrator mode""" - # Dicts are being converted to mappingproxy objects, need to convert to dict. - if optuna_kwargs: - optuna_kwargs = {k: v for k, v in optuna_kwargs.items()} - if func_mode == "Python": pytest.skip( "Test not yet implemented for Python. Parameter estimation is too slow." From 29983890eba300d61cc4ad918f2ca82ec246cb8e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 30 Oct 2024 20:38:07 -0400 Subject: [PATCH 367/410] llvm: Add env variable control over debugging output Covert printf override_debug into tags. Setting 'printf_tags' in PNL_LLVM_DEBUG environent variables enables prints of statements with overlapping tags. Two special tag values are supported; * tags={"always"} prints a statement irrespective of PNL_LLVM_DEBUG setting. * PNL_LLVM_DEBUG='printf_tags={"all"}' enables all print statemetns irrespective of their tags. Concert all printf statements in compiled torch code to use tags={"torch"}. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/debug.py | 2 +- psyneulink/core/llvm/helpers.py | 24 ++++---- .../library/compositions/compiledoptimizer.py | 15 ++--- .../library/compositions/pytorchwrappers.py | 56 ++++++++----------- tests/llvm/test_debug_composition.py | 3 +- tests/llvm/test_helpers.py | 2 +- 6 files changed, 49 insertions(+), 53 deletions(-) diff --git a/psyneulink/core/llvm/debug.py b/psyneulink/core/llvm/debug.py index 3d22caca13e..aa7e8c736b3 100644 --- a/psyneulink/core/llvm/debug.py +++ b/psyneulink/core/llvm/debug.py @@ -20,7 +20,7 @@ * "stat" -- prints code generation and compilation statistics * "time_stat" -- print compilation and code generation times * "comp_node_debug" -- print intermediate results after execution composition node wrapper. - * "print_values" -- Enabled printfs in llvm code (from ctx printf helper) + * "printf_tags" -- Enabledprintfs in compiled caode with the specififed tags Compilation modifiers: * "const_data" -- hardcode initial output values into generated code, diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index b1542e0438f..9581b62bebc 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -8,6 +8,7 @@ # ********************************************* PNL LLVM helpers ************************************************************** +import ast from contextlib import contextmanager import warnings @@ -398,8 +399,11 @@ def call_elementwise_operation(ctx, builder, x, operation, output_ptr): for (inp_ptr, out_ptr) in recursive_iterate_arrays(ctx, builder, x, output_ptr): builder.store(operation(ctx, builder, builder.load(inp_ptr)), out_ptr) -def printf(ctx, builder, fmt, *args, override_debug=False): - if "print_values" not in debug_env and not override_debug: +def printf(ctx, builder, fmt, *args, tags:set): + + tags = frozenset(tags) + user_tags = frozenset(ast.literal_eval(debug_env.get("printf_tags", "[]"))) + if "all" not in user_tags and "always" not in tags and not tags.intersection(user_tags): return # Set up the formatting string as global symbol @@ -428,22 +432,22 @@ def printf(ctx, builder, fmt, *args, override_debug=False): builder.call(printf_f, [fmt_ptr] + conv_args) -def printf_float_array(ctx, builder, array, prefix="", suffix="\n", override_debug=False): - printf(ctx, builder, prefix, override_debug=override_debug) +def printf_float_array(ctx, builder, array, prefix="", suffix="\n", *, tags:set): + printf(ctx, builder, prefix, tags=tags) with array_ptr_loop(builder, array, "print_array_loop") as (b1, i): - printf(ctx, b1, "%lf ", b1.load(b1.gep(array, [i.type(0), i])), override_debug=override_debug) + printf(ctx, b1, "%lf ", b1.load(b1.gep(array, [i.type(0), i])), tags=tags) - printf(ctx, builder, suffix, override_debug=override_debug) + printf(ctx, builder, suffix, tags=tags) -def printf_float_matrix(ctx, builder, matrix, prefix="", suffix="\n", override_debug=False): - printf(ctx, builder, prefix, override_debug=override_debug) +def printf_float_matrix(ctx, builder, matrix, prefix="", suffix="\n", *, tags:set): + printf(ctx, builder, prefix, tags=tags) with array_ptr_loop(builder, matrix, "print_row_loop") as (b1, i): row = b1.gep(matrix, [i.type(0), i]) - printf_float_array(ctx, b1, row, suffix="\n", override_debug=override_debug) + printf_float_array(ctx, b1, row, suffix="\n", tags=tags) - printf(ctx, builder, suffix, override_debug=override_debug) + printf(ctx, builder, suffix, tags=tags) class ConditionGenerator: diff --git a/psyneulink/library/compositions/compiledoptimizer.py b/psyneulink/library/compositions/compiledoptimizer.py index db8527c7f29..9f65a875085 100644 --- a/psyneulink/library/compositions/compiledoptimizer.py +++ b/psyneulink/library/compositions/compiledoptimizer.py @@ -125,7 +125,7 @@ def step(self, ctx): one_minus_b1_pow = builder.fsub(one_float, b1_pow) one_minus_b2_pow = builder.fsub(one_float, b2_pow) - pnlvm.helpers.printf(ctx, builder, f"%f b1_pow_sub %f\nb2 pow sub %f\n",t_val, one_minus_b1_pow, one_minus_b2_pow) + pnlvm.helpers.printf(ctx, builder, f"%f b1_pow_sub %f\nb2 pow sub %f\n",t_val, one_minus_b1_pow, one_minus_b2_pow, tags={"torch"}) # 2) update first moments for idx, proj in enumerate(self._pytorch_model.projection_wrappers): @@ -147,7 +147,7 @@ def step(self, ctx): builder, m_t_ptr, prefix=f"mt val: {proj.sender._mechanism} -> {proj.receiver._mechanism}\n", - override_debug=False) + tags={"torch"}) # 3) update second moments for idx, proj in enumerate(self._pytorch_model.projection_wrappers): proj_idx_ir = ctx.int32_ty(idx) @@ -187,14 +187,14 @@ def step(self, ctx): builder, delta_w_ptr, prefix=f"grad val: {proj.sender._mechanism} -> {proj.receiver._mechanism}\n", - override_debug=False) + tags={"torch"}) # this is messy - #TODO - cleanup this weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, state, params) dim_x, dim_y = proj.matrix.shape weight_row = None - pnlvm.helpers.printf(ctx, builder, "biascorr2 %.20f\n", one_minus_b2_pow, override_debug=False) + pnlvm.helpers.printf(ctx, builder, "biascorr2 %.20f\n", one_minus_b2_pow, tags={"torch"}) with pnlvm.helpers.for_loop_zero_inc(builder, ctx.int32_ty(dim_x), "optimizer_w_upd_outer") as (b1, weight_row): weight_column = None with pnlvm.helpers.for_loop_zero_inc(b1, ctx.int32_ty(dim_y), "optimizer_w_upd_inner") as (b2, weight_column): @@ -204,7 +204,8 @@ def step(self, ctx): denom = b2.call(sqrt_f, [one_minus_b2_pow]) value = b2.fdiv(value, denom) value = b2.fadd(value, eps) - pnlvm.helpers.printf(ctx, builder, "val %.20f\n", value, override_debug=False) + pnlvm.helpers.printf(ctx, builder, "val %.20f\n", value, tags={"torch"}) + # alpha_t * m_t m_t_value = b2.load(b2.gep( m_t_ptr, [zero, weight_row, weight_column])) @@ -220,9 +221,9 @@ def step(self, ctx): value = b2.fadd(b2.load(old_weight_ptr), value) b2.store(value, old_weight_ptr) - pnlvm.helpers.printf(ctx, b1, "\n", override_debug=False) + pnlvm.helpers.printf(ctx, b1, "\n", tags={"torch"}) - pnlvm.helpers.printf(ctx, builder, f"\t\t\tOPTIM DONE UPDATE\n",override_debug=False) + pnlvm.helpers.printf(ctx, builder, f"\t\t\tOPTIM DONE UPDATE\n", tags={"torch"}) builder.ret_void() diff --git a/psyneulink/library/compositions/pytorchwrappers.py b/psyneulink/library/compositions/pytorchwrappers.py index 2a5cdb056bf..69a4c9a74dc 100644 --- a/psyneulink/library/compositions/pytorchwrappers.py +++ b/psyneulink/library/compositions/pytorchwrappers.py @@ -458,9 +458,9 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): for node in exec_set: if node._mechanism in input_nodes: continue + node_z_value = z_values[node] - activation_func_derivative = node._gen_llvm_execute_derivative_func(ctx, builder, - state, params, node_z_value) + activation_func_derivative = node._gen_llvm_execute_derivative_func(ctx, builder, state, params, node_z_value) error_val = builder.alloca(z_values[node].type.pointee) error_dict[node] = error_val @@ -470,29 +470,24 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): # 1) Lookup desired target value terminal_sequence = self._composition._terminal_backprop_sequences[node._mechanism] - target_idx = self._composition.get_nodes_by_role( - NodeRole.INPUT).index(terminal_sequence[TARGET_MECHANISM]) + target_idx = self._composition.get_nodes_by_role(NodeRole.INPUT).index(terminal_sequence[TARGET_MECHANISM]) node_target = builder.gep(model_input, [ctx.int32_ty(0), ctx.int32_ty(target_idx)]) # 2) Lookup desired output value node_output = builder.gep(model_output, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(node._idx), ctx.int32_ty(0)]) - tmp_loss = loss.gen_inject_lossfunc_call( - ctx, builder, loss_fn, node_output, node_target) + tmp_loss = loss.gen_inject_lossfunc_call(ctx, builder, loss_fn, node_output, node_target) - pnlvm.helpers.printf_float_array(ctx, builder, node_target, prefix=f"{node}\ttarget:\t") - pnlvm.helpers.printf_float_array(ctx, builder, node_output, prefix=f"{node}\tvalue:\t") + pnlvm.helpers.printf_float_array(ctx, builder, node_target, prefix=f"{node}\ttarget:\t", tags={"torch"}) + pnlvm.helpers.printf_float_array(ctx, builder, node_output, prefix=f"{node}\tvalue:\t", tags={"torch"}) - pnlvm.helpers.printf(ctx, builder, f"{node}\tloss:\t%f\n", tmp_loss, override_debug=False) - builder.store(builder.fadd(builder.load( - total_loss), tmp_loss), total_loss) - loss_derivative = loss._gen_inject_loss_differential( - ctx, builder, node_output, node_target) - # compute δ_l = dσ/da ⊙ σ'(z) + pnlvm.helpers.printf(ctx, builder, f"{node}\tloss:\t%f\n", tmp_loss, tags={"torch"}) + builder.store(builder.fadd(builder.load(total_loss), tmp_loss), total_loss) + loss_derivative = loss._gen_inject_loss_differential(ctx, builder, node_output, node_target) - gen_inject_vec_hadamard( - ctx, builder, activation_func_derivative, loss_derivative, error_val) + # compute δ_l = dσ/da ⊙ σ'(z) + gen_inject_vec_hadamard(ctx, builder, activation_func_derivative, loss_derivative, error_val) else: # We propagate error backwards from next layer @@ -503,25 +498,22 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): weights_llvmlite = proj._extract_llvm_matrix(ctx, builder, state, params) if proj_idx == 0: - gen_inject_vxm_transposed( - ctx, builder, efferent_node_error, weights_llvmlite, error_val) + gen_inject_vxm_transposed(ctx, builder, efferent_node_error, weights_llvmlite, error_val) else: - new_val = gen_inject_vxm_transposed( - ctx, builder, efferent_node_error, weights_llvmlite) + new_val = gen_inject_vxm_transposed(ctx, builder, efferent_node_error, weights_llvmlite) - gen_inject_vec_add( - ctx, builder, new_val, error_val, error_val) + gen_inject_vec_add(ctx, builder, new_val, error_val, error_val) - gen_inject_vec_hadamard( - ctx, builder, activation_func_derivative, error_val, error_val) + gen_inject_vec_hadamard(ctx, builder, activation_func_derivative, error_val, error_val) - pnlvm.helpers.printf_float_array(ctx, builder, activation_func_derivative, prefix=f"{node}\tdSigma:\t") - pnlvm.helpers.printf_float_array(ctx, builder, error_val, prefix=f"{node}\terror:\t") + pnlvm.helpers.printf_float_array(ctx, builder, activation_func_derivative, prefix=f"{node}\tdSigma:\t", tags={"torch"}) + pnlvm.helpers.printf_float_array(ctx, builder, error_val, prefix=f"{node}\terror:\t", tags={"torch"}) # 4) compute weight gradients for (node, err_val) in error_dict.items(): if node in input_nodes: continue + for proj in node.afferents: # get a_(l-1) afferent_node_activation = builder.gep(model_output, [ctx.int32_ty(0), @@ -535,7 +527,7 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): builder, weights_llvmlite, prefix= f"{proj.sender._mechanism} -> {proj.receiver._mechanism}\n", - override_debug=False) + tags={"torch"}) # update delta_W node_delta_w = builder.gep(delta_w, [ctx.int32_ty(0), ctx.int32_ty(proj._idx)]) @@ -554,7 +546,7 @@ def _gen_llvm_training_backprop(self, ctx, optimizer, loss): b2.store(new_val, b2.gep(node_delta_w, [ctx.int32_ty(0), weight_row, weight_column])) - pnlvm.helpers.printf(ctx, builder, "TOTAL LOSS:\t%.20f\n", builder.load(total_loss), override_debug=False) + pnlvm.helpers.printf(ctx, builder, "TOTAL LOSS:\t%.20f\n", builder.load(total_loss), tags={"torch"}) builder.ret_void() return builder.function @@ -1052,7 +1044,7 @@ def _gen_llvm_execute(self, ctx, builder, state, params, mech_input, data): builder, builder.gep(mech_output, [ctx.int32_ty(0), ctx.int32_ty(0)]), prefix=f"{self} output:\n", - override_debug=False) + tags={"torch"}) return mech_output @@ -1222,17 +1214,17 @@ def _gen_llvm_execute(self, ctx, builder, state, params, data): builder, input_vec, prefix=f"{self.sender._mechanism} -> {self.receiver._mechanism} input:\n", - override_debug=False) + tags={"torch"}) pnlvm.helpers.printf_float_matrix(ctx, builder, proj_matrix, prefix=f"{self.sender._mechanism} -> {self.receiver._mechanism} mat:\n", - override_debug=False) + tags={"torch"}) pnlvm.helpers.printf_float_array(ctx, builder, output_vec, prefix=f"{self.sender._mechanism} -> {self.receiver._mechanism} output:\n", - override_debug=False) + tags={"torch"}) return output_vec diff --git a/tests/llvm/test_debug_composition.py b/tests/llvm/test_debug_composition.py index ffc6d83df0b..c2143d4feda 100644 --- a/tests/llvm/test_debug_composition.py +++ b/tests/llvm/test_debug_composition.py @@ -26,7 +26,7 @@ def preserve_env(): debug_options = ["const_input=[[[7]]]", "const_input", "const_params", "const_data", "const_state", - "stat", "time_stat", "unaligned_copy"] + "stat", "time_stat", "unaligned_copy", "printf_tags={'always'}"] options_combinations = (";".join(c) for c in pytest.helpers.power_set(debug_options)) @pytest.mark.composition @@ -63,6 +63,5 @@ def test_debug_comp(mode, debug_env): if "const_state" in debug_env: expected2 = expected1 - np.testing.assert_allclose(expected1, output1[0][0]) np.testing.assert_allclose(expected2, output2[0][0]) diff --git a/tests/llvm/test_helpers.py b/tests/llvm/test_helpers.py index 34c4cde90b3..fa43c7fd31d 100644 --- a/tests/llvm/test_helpers.py +++ b/tests/llvm/test_helpers.py @@ -224,7 +224,7 @@ def test_helper_printf(capfd, ir_argtype, format_spec, values_to_check): block = function.append_basic_block(name="entry") builder = ir.IRBuilder(block) - pnlvm.helpers.printf(ctx, builder, format_str, *ir_values_to_check, override_debug=True) + pnlvm.helpers.printf(ctx, builder, format_str, *ir_values_to_check, tags={"always"}) builder.ret_void() bin_f = pnlvm.LLVMBinaryFunction.get(custom_name) From 6db642fb375482b8f4c949afa694712eff93e2db Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 31 Oct 2024 10:14:39 -0400 Subject: [PATCH 368/410] Delete _user_specified_args Katherine suggested deleting _user_specified_args at the end of Component.__init__ to get around the issue with pickling mapping proxy type, it seems to work. --- psyneulink/core/components/component.py | 3 +++ psyneulink/core/globals/parameters.py | 6 ------ 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index b08c15dab5c..92d7b980db4 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1269,6 +1269,9 @@ def __init__(self, self.compositions = weakref.WeakSet() + # Delete the _user_specified_args attribute, we don't need it anymore + del self._user_specified_args + def __repr__(self): return '({0} {1})'.format(type(self).__name__, self.name) #return '{1}'.format(type(self).__name__, self.name) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 5e9d96bd4d0..458b27d95f4 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -466,12 +466,6 @@ def check_user_specified_wrapper(self, *args, **kwargs): except AttributeError: self._prev_constructor = constructor if '__init__' in type(self).__dict__ else None self._user_specified_args = copy.copy(kwargs) - - # If any of the kwargs are MappingProxyType, convert them to dict - for k, v in self._user_specified_args.items(): - if isinstance(v, MappingProxyType): - self._user_specified_args[k] = v.copy() - else: # add args determined in constructor to user_specifed. # since some args are set by the values of other From 093405096093edfdc16a5f660c9e3364f5c91992 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 30 Oct 2024 22:23:26 -0400 Subject: [PATCH 369/410] llvm/ConditionGenerator: Use IntEnum to index the timestamp structure Signed-off-by: Jan Vesely --- psyneulink/core/llvm/helpers.py | 66 ++++++++++++++++++--------------- 1 file changed, 36 insertions(+), 30 deletions(-) diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index 9581b62bebc..07506bd29f3 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -9,6 +9,7 @@ # ********************************************* PNL LLVM helpers ************************************************************** import ast +from enum import IntEnum from contextlib import contextmanager import warnings @@ -451,6 +452,11 @@ def printf_float_matrix(ctx, builder, matrix, prefix="", suffix="\n", *, tags:se class ConditionGenerator: + class TimeIndex(IntEnum): + TRIAL = 0, + PASS = 1, + STEP = 2, + def __init__(self, ctx, composition): self.ctx = ctx self.composition = composition @@ -461,6 +467,8 @@ def get_private_condition_struct_type(self, composition): self.ctx.int32_ty, # Pass self.ctx.int32_ty]) # Step + assert len(time_stamp_struct) == len(self.TimeIndex) + status_struct = ir.LiteralStructType([ self.ctx.int32_ty, # number of executions in this run time_stamp_struct # time stamp of last execution @@ -510,6 +518,7 @@ def bump_ts(self, builder, cond_ptr, count=(0, 0, 1)): ts = builder.load(ts_ptr) assert len(ts.type) == len(count) + # Update run, pass, step of ts for idx in range(len(ts.type)): if all(v == 0 for v in count[:idx]): @@ -517,6 +526,7 @@ def bump_ts(self, builder, cond_ptr, count=(0, 0, 1)): el = builder.add(el, el.type(count[idx])) else: el = self.ctx.int32_ty(0) + ts = builder.insert_value(ts, el, idx) builder.store(ts, ts_ptr) @@ -552,19 +562,28 @@ def __get_node_status_ptr(self, builder, cond_ptr, node): def __get_node_ts(self, builder, cond_ptr, node): status_ptr = self.__get_node_status_ptr(builder, cond_ptr, node) - ts_ptr = builder.gep(status_ptr, [self.ctx.int32_ty(0), - self.ctx.int32_ty(1)]) + ts_ptr = builder.gep(status_ptr, [self.ctx.int32_ty(0), self.ctx.int32_ty(1)]) return builder.load(ts_ptr) def get_global_ts(self, builder, cond_ptr): ts_ptr = builder.gep(cond_ptr, [self._zero, self._zero, self._zero]) return builder.load(ts_ptr) + def _extract_global_time(self, builder, cond_ptr, time_index): + global_ts = self.get_global_ts(builder, cond_ptr) + return builder.extract_value(global_ts, time_index.value) + + def get_global_trial(self, builder, cond_ptr): + return self._extract_global_time(builder, cond_ptr, self.TimeIndex.TRIAL) + + def get_global_pass(self, builder, cond_ptr): + return self._extract_global_time(builder, cond_ptr, self.TimeIndex.PASS) + def generate_update_after_run(self, builder, cond_ptr, node): status_ptr = self.__get_node_status_ptr(builder, cond_ptr, node) status = builder.load(status_ptr) - # Update number of runs + # Update total number of runs runs = builder.extract_value(status, 0) runs = builder.add(runs, runs.type(1)) status = builder.insert_value(status, runs, 0) @@ -576,31 +595,27 @@ def generate_update_after_run(self, builder, cond_ptr, node): builder.store(status, status_ptr) def generate_ran_this_pass(self, builder, cond_ptr, node): - global_ts = self.get_global_ts(builder, cond_ptr) - global_trial = builder.extract_value(global_ts, 0) - global_pass = builder.extract_value(global_ts, 1) + global_trial = self.get_global_trial(builder, cond_ptr) + global_pass = self.get_global_pass(builder, cond_ptr) node_ts = self.__get_node_ts(builder, cond_ptr, node) - node_trial = builder.extract_value(node_ts, 0) - node_pass = builder.extract_value(node_ts, 1) + node_trial = builder.extract_value(node_ts, self.TimeIndex.TRIAL.value) + node_pass = builder.extract_value(node_ts, self.TimeIndex.PASS.value) pass_eq = builder.icmp_signed("==", node_pass, global_pass) trial_eq = builder.icmp_signed("==", node_trial, global_trial) return builder.and_(pass_eq, trial_eq) def generate_ran_this_trial(self, builder, cond_ptr, node): - global_ts = self.get_global_ts(builder, cond_ptr) - global_trial = builder.extract_value(global_ts, 0) + global_trial = self.get_global_trial(builder, cond_ptr) node_ts = self.__get_node_ts(builder, cond_ptr, node) - node_trial = builder.extract_value(node_ts, 0) + node_trial = builder.extract_value(node_ts, self.TimeIndex.TRIAL.value) return builder.icmp_signed("==", node_trial, global_trial) # TODO: replace num_exec_locs use with equivalent from nodes_states - def generate_sched_condition(self, builder, condition, cond_ptr, node, - is_finished_callbacks, num_exec_locs, - nodes_states): + def generate_sched_condition(self, builder, condition, cond_ptr, node, is_finished_callbacks, num_exec_locs, nodes_states): if isinstance(condition, Always): @@ -646,16 +661,13 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, elif isinstance(condition, AtTrial): trial_num = condition.args[0] - global_ts = self.get_global_ts(builder, cond_ptr) - trial = builder.extract_value(global_ts, 0) - return builder.icmp_unsigned("==", trial, trial.type(trial_num)) + current_trial = self.get_global_trial(builder, cond_ptr) + return builder.icmp_unsigned("==", current_trial, current_trial.type(trial_num)) elif isinstance(condition, AtPass): pass_num = condition.args[0] - global_ts = self.get_global_ts(builder, cond_ptr) - current_pass = builder.extract_value(global_ts, 1) - return builder.icmp_unsigned("==", current_pass, - current_pass.type(pass_num)) + current_pass = self.get_global_pass(builder, cond_ptr) + return builder.icmp_unsigned("==", current_pass, current_pass.type(pass_num)) elif isinstance(condition, EveryNCalls): target, count = condition.args @@ -670,9 +682,7 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, elif isinstance(condition, BeforeNCalls): target, count = condition.args scale = condition.time_scale.value - target_num_execs_in_scale = builder.gep(num_exec_locs[target], - [self.ctx.int32_ty(0), - self.ctx.int32_ty(scale)]) + target_num_execs_in_scale = builder.gep(num_exec_locs[target], [self.ctx.int32_ty(0), self.ctx.int32_ty(scale)]) num_execs = builder.load(target_num_execs_in_scale) return builder.icmp_unsigned('<', num_execs, num_execs.type(count)) @@ -680,18 +690,14 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, elif isinstance(condition, AtNCalls): target, count = condition.args scale = condition.time_scale.value - target_num_execs_in_scale = builder.gep(num_exec_locs[target], - [self.ctx.int32_ty(0), - self.ctx.int32_ty(scale)]) + target_num_execs_in_scale = builder.gep(num_exec_locs[target], [self.ctx.int32_ty(0), self.ctx.int32_ty(scale)]) num_execs = builder.load(target_num_execs_in_scale) return builder.icmp_unsigned('==', num_execs, num_execs.type(count)) elif isinstance(condition, AfterNCalls): target, count = condition.args scale = condition.time_scale.value - target_num_execs_in_scale = builder.gep(num_exec_locs[target], - [self.ctx.int32_ty(0), - self.ctx.int32_ty(scale)]) + target_num_execs_in_scale = builder.gep(num_exec_locs[target], [self.ctx.int32_ty(0), self.ctx.int32_ty(scale)]) num_execs = builder.load(target_num_execs_in_scale) return builder.icmp_unsigned('>=', num_execs, num_execs.type(count)) From fd8ca8ae21066a6e295f692faa91f9b1a58d0c9f Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 30 Oct 2024 23:12:24 -0400 Subject: [PATCH 370/410] llvm/Composition: Add debugging output to scheduled node executions Signed-off-by: Jan Vesely --- psyneulink/core/llvm/codegen.py | 55 +++++++++++++++++++++++++++++---- psyneulink/core/llvm/helpers.py | 3 ++ 2 files changed, 52 insertions(+), 6 deletions(-) diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index cd14fadc52e..b29f30efe8d 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -802,7 +802,7 @@ def _gen_composition_exec_context(ctx, composition, *, tags:frozenset, suffix="" def gen_composition_exec(ctx, composition, *, tags:frozenset): - simulation = "simulation" in tags + is_simulation = "simulation" in tags node_tags = tags.union({"node_assembly"}) with _gen_composition_exec_context(ctx, composition, tags=tags) as (builder, data, params, cond_gen): @@ -856,8 +856,17 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): builder.call(node_reinit_f, [state, params, comp_in, data, data]) # Run controller if it's enabled in 'BEFORE' mode - if simulation is False and composition.enable_controller and composition.controller_mode == BEFORE: + if is_simulation is False and composition.enable_controller and composition.controller_mode == BEFORE: assert composition.controller is not None + + helpers.printf(ctx, + builder, + "<%u/%u/%u> Executing: {}/{}\n".format(composition.name, composition.controller.name), + cond_gen.get_global_trial(builder, cond), + cond_gen.get_global_pass(builder, cond), + cond_gen.get_global_step(builder, cond), + tags={"scheduler"}) + controller_w = ctx.get_node_assembly(composition, composition.controller) controller_f = ctx.import_llvm_function(controller_w, tags=node_tags) builder.call(controller_f, [state, params, comp_in, data, data]) @@ -896,9 +905,9 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): builder.position_at_end(loop_body) previous_step = builder.load(run_set_ptr) - zero = ctx.int32_ty(0) any_cond = ctx.bool_ty(0) + # Calculate execution set before running the mechanisms for idx, node in enumerate(composition.nodes): run_set_node_ptr = builder.gep(run_set_ptr, [zero, ctx.int32_ty(idx)], name="run_cond_ptr_" + node.name) @@ -914,6 +923,17 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): any_cond = builder.or_(any_cond, node_cond, name="any_ran_cond") builder.store(node_cond, run_set_node_ptr) + prefix = "[SIMULATION] " if is_simulation else "" + helpers.printf(ctx, + builder, + "{}<%u/%u/%u> Considered: {}/{}: %d\n".format(prefix, composition.name, node.name), + cond_gen.get_global_trial(builder, cond), + cond_gen.get_global_pass(builder, cond), + cond_gen.get_global_step(builder, cond), + builder.select(node_cond, zero.type(1), zero), + tags={"scheduler" if not is_simulation else "simulation_scheduler"}) + + # Reset internal TIME_STEP clock for each node # NOTE: This is done _after_ condition evaluation, otherwise # TIME_STEP related conditions will only see 0 executions @@ -932,14 +952,25 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): node_w = ctx.get_node_assembly(composition, node) node_f = ctx.import_llvm_function(node_w, tags=node_tags) builder.block.name = "invoke_" + node_f.name + + prefix = "[SIMULATION] " if is_simulation else "" + helpers.printf(ctx, + builder, + "{}<%u/%u/%u> Executing: {}/{}\n".format(prefix, composition.name, node.name), + cond_gen.get_global_trial(builder, cond), + cond_gen.get_global_pass(builder, cond), + cond_gen.get_global_step(builder, cond), + tags={"scheduler" if not is_simulation else "simulation_scheduler"}) + # Wrappers do proper indexing of all structures # Mechanisms have only 5 args args = [state, params, comp_in, data, output_storage] if len(node_f.args) >= 6: # Composition wrappers have 6 args args.append(cond) - builder.call(node_f, args) + builder.call(node_f, args) cond_gen.generate_update_after_run(builder, cond, node) + builder.block.name = "post_invoke_" + node_f.name # Writeback results @@ -959,6 +990,7 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): cond_gen.bump_ts(builder, cond) builder.block.name = "update_iter_count" + # Increment number of iterations iters = builder.load(iter_ptr, name="iterw") iters = builder.add(iters, iters.type(1), name="iterw_inc") @@ -966,12 +998,15 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): max_iters = len(composition.scheduler.consideration_queue) completed_pass = builder.icmp_unsigned("==", iters, iters.type(max_iters), name="completed_pass") + # Increment pass and reset time step with builder.if_then(completed_pass): builder.block.name = "inc_pass" builder.store(zero, iter_ptr) + # Bumping automatically zeros lower elements cond_gen.bump_ts(builder, cond, (0, 1, 0)) + # Reset internal PASS clock for each node for time_loc in num_exec_locs.values(): num_exec_time_ptr = builder.gep(time_loc, [zero, ctx.int32_ty(TimeScale.PASS.value)]) @@ -981,9 +1016,17 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): builder.position_at_end(exit_block) - if simulation is False and composition.enable_controller and \ - composition.controller_mode == AFTER: + if is_simulation is False and composition.enable_controller and composition.controller_mode == AFTER: assert composition.controller is not None + + helpers.printf(ctx, + builder, + "<%u/%u/%u> Executing: {}/{}\n".format(composition.name, composition.controller.name), + cond_gen.get_global_trial(builder, cond), + cond_gen.get_global_pass(builder, cond), + cond_gen.get_global_step(builder, cond), + tags={"scheduler"}) + controller_w = ctx.get_node_assembly(composition, composition.controller) controller_f = ctx.import_llvm_function(controller_w, tags=node_tags) builder.call(controller_f, [state, params, comp_in, data, data]) diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index 07506bd29f3..1d04264975c 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -579,6 +579,9 @@ def get_global_trial(self, builder, cond_ptr): def get_global_pass(self, builder, cond_ptr): return self._extract_global_time(builder, cond_ptr, self.TimeIndex.PASS) + def get_global_step(self, builder, cond_ptr): + return self._extract_global_time(builder, cond_ptr, self.TimeIndex.STEP) + def generate_update_after_run(self, builder, cond_ptr, node): status_ptr = self.__get_node_status_ptr(builder, cond_ptr, node) status = builder.load(status_ptr) From 2af563fcd286ee7933692e6bdc88447ac4b9ff5f Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 31 Oct 2024 08:42:00 -0400 Subject: [PATCH 371/410] tests/Scheduler: Move condition construction args to test parametrization Signed-off-by: Jan Vesely --- tests/scheduling/test_scheduler.py | 96 +++++++++++------------------- 1 file changed, 34 insertions(+), 62 deletions(-) diff --git a/tests/scheduling/test_scheduler.py b/tests/scheduling/test_scheduler.py index 07feba1fcad..585dd8d7aaf 100644 --- a/tests/scheduling/test_scheduler.py +++ b/tests/scheduling/test_scheduler.py @@ -1574,80 +1574,52 @@ def test_time_termination_measures(self, comp_mode, timescale, expected): np.testing.assert_allclose(result, expected) @pytest.mark.composition - @pytest.mark.parametrize("condition,scale,expected_result", - [(pnl.BeforeNCalls, TimeScale.TRIAL, [[.05, .05]]), - (pnl.BeforeNCalls, TimeScale.PASS, [[.05, .05]]), - (pnl.EveryNCalls, None, [[0.05, .05]]), - (pnl.AtNCalls, TimeScale.TRIAL, [[.25, .25]]), - (pnl.AtNCalls, TimeScale.RUN, [[.25, .25]]), - (pnl.AfterNCalls, TimeScale.TRIAL, [[.25, .25]]), - (pnl.AfterNCalls, TimeScale.PASS, [[.05, .05]]), - (pnl.WhenFinished, None, [[1.0, 1.0]]), - (pnl.WhenFinishedAny, None, [[1.0, 1.0]]), - (pnl.WhenFinishedAll, None, [[1.0, 1.0]]), - (pnl.All, None, [[1.0, 1.0]]), - (pnl.Any, None, [[1.0, 1.0]]), - (pnl.Not, None, [[.05, .05]]), - (pnl.AllHaveRun, None, [[.05, .05]]), - (pnl.Always, None, [[0.05, 0.05]]), - (pnl.AtPass, None, [[.3, .3]]), - (pnl.AtTrial, None, [[0.05, 0.05]]), + @pytest.mark.parametrize("condition,condition_params,expected_result", + [(pnl.BeforeNCalls, {"time_scale": TimeScale.TRIAL, "n": 5}, [[.05, .05]]), + (pnl.BeforeNCalls, {"time_scale": TimeScale.PASS, "n": 5}, [[.05, .05]]), + (pnl.EveryNCalls, {"n": 1}, [[0.05, .05]]), + (pnl.AtNCalls, {"time_scale": TimeScale.TRIAL, "n": 5}, [[.25, .25]]), + (pnl.AtNCalls, {"time_scale": TimeScale.RUN, "n": 5}, [[.25, .25]]), + (pnl.AfterNCalls, {"time_scale": TimeScale.TRIAL, "n": 5}, [[.25, .25]]), + # Mechanisms run only once per PASS unless they are in 'run_until_finished' mode. + (pnl.AfterNCalls, {"time_scale": TimeScale.PASS, "n": 1}, [[.05, .05]]), + (pnl.WhenFinished, {}, [[1.0, 1.0]]), + (pnl.WhenFinishedAny, {}, [[1.0, 1.0]]), + (pnl.WhenFinishedAll, {}, [[1.0, 1.0]]), + (pnl.All, {}, [[1.0, 1.0]]), + (pnl.Any, {}, [[1.0, 1.0]]), + (pnl.Not, {}, [[.05, .05]]), + (pnl.AllHaveRun, {}, [[.05, .05]]), + (pnl.Always, {}, [[0.05, 0.05]]), + (pnl.AtPass, {"n": 5}, [[.3, .3]]), + (pnl.AtTrial, {"n": 0}, [[0.05, 0.05]]), #(pnl.Never), #TODO: Find a good test case for this! ]) # 'LLVM' mode is not supported, because synchronization of compiler and # python values during execution is not implemented. @pytest.mark.usefixtures("comp_mode_no_llvm") - def test_scheduler_conditions(self, comp_mode, condition, scale, expected_result): - decisionMaker = pnl.DDM( - function=pnl.DriftDiffusionIntegrator(starting_value=0, - threshold=1, - noise=0.0, - time_step_size=1.0), - reset_stateful_function_when=pnl.AtTrialStart(), - execute_until_finished=False, - # Use only the decision variable in this test - output_ports=[pnl.DECISION_VARIABLE], - name='DDM') + def test_scheduler_conditions(self, comp_mode, condition, condition_params, expected_result): + decisionMaker = pnl.DDM(function=pnl.DriftDiffusionIntegrator(starting_value=0, + threshold=1, + noise=0.0, + time_step_size=1.0), + reset_stateful_function_when=pnl.AtTrialStart(), + execute_until_finished=False, + # Use only the decision variable in this test + output_ports=[pnl.DECISION_VARIABLE], + name='DDM') response = pnl.ProcessingMechanism(size=2, name="GATE") comp = pnl.Composition() comp.add_linear_processing_pathway([decisionMaker, response]) - if condition is pnl.BeforeNCalls: - comp.scheduler.add_condition(response, condition(decisionMaker, 5, - time_scale=scale)) - elif condition is pnl.AtNCalls: - comp.scheduler.add_condition(response, condition(decisionMaker, 5, - time_scale=scale)) - elif condition is pnl.AfterNCalls: - # Mechanisms run only once per PASS unless they are in - # 'run_until_finished' mode. - c = 1 if scale is TimeScale.PASS else 5 - comp.scheduler.add_condition(response, condition(decisionMaker, c, - time_scale=scale)) - elif condition is pnl.EveryNCalls: - comp.scheduler.add_condition(response, condition(decisionMaker, 1)) - elif condition is pnl.WhenFinished: - comp.scheduler.add_condition(response, condition(decisionMaker)) - elif condition is pnl.WhenFinishedAny: - comp.scheduler.add_condition(response, condition(decisionMaker)) - elif condition is pnl.WhenFinishedAll: - comp.scheduler.add_condition(response, condition(decisionMaker)) - elif condition is pnl.All: - comp.scheduler.add_condition(response, condition(pnl.WhenFinished(decisionMaker))) - elif condition is pnl.Any: - comp.scheduler.add_condition(response, condition(pnl.WhenFinished(decisionMaker))) - elif condition is pnl.Not: + if condition in {pnl.All, pnl.Any, pnl.Not}: comp.scheduler.add_condition(response, condition(pnl.WhenFinished(decisionMaker))) - elif condition is pnl.AllHaveRun: - comp.scheduler.add_condition(response, condition(decisionMaker)) - elif condition is pnl.Always: - comp.scheduler.add_condition(response, condition()) - elif condition is pnl.AtPass: - comp.scheduler.add_condition(response, condition(5)) - elif condition is pnl.AtTrial: - comp.scheduler.add_condition(response, condition(0)) + elif condition in {pnl.Always, pnl.Never, pnl.AtPass, pnl.AtTrial}: + comp.scheduler.add_condition(response, condition(**condition_params)) + else: + comp.scheduler.add_condition(response, condition(decisionMaker, **condition_params)) result = comp.run([0.05], execution_mode=comp_mode) np.testing.assert_allclose(result, expected_result) From 375b89d3e84bd6257bc798c04bea2ec3023f2132 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 31 Oct 2024 08:56:23 -0400 Subject: [PATCH 372/410] tests/Scheduler: Enable testing in per-node compiled mode Cleanup. Signed-off-by: Jan Vesely --- tests/scheduling/test_scheduler.py | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/tests/scheduling/test_scheduler.py b/tests/scheduling/test_scheduler.py index 585dd8d7aaf..7b768c7849d 100644 --- a/tests/scheduling/test_scheduler.py +++ b/tests/scheduling/test_scheduler.py @@ -1595,9 +1595,6 @@ def test_time_termination_measures(self, comp_mode, timescale, expected): (pnl.AtTrial, {"n": 0}, [[0.05, 0.05]]), #(pnl.Never), #TODO: Find a good test case for this! ]) - # 'LLVM' mode is not supported, because synchronization of compiler and - # python values during execution is not implemented. - @pytest.mark.usefixtures("comp_mode_no_llvm") def test_scheduler_conditions(self, comp_mode, condition, condition_params, expected_result): decisionMaker = pnl.DDM(function=pnl.DriftDiffusionIntegrator(starting_value=0, threshold=1, @@ -1634,16 +1631,12 @@ def test_scheduler_conditions(self, comp_mode, condition, condition_params, expe [(pnl.AtTrial, None, [[[1.0]], [[2.0]]]), ]) def test_run_term_conditions(self, mode, condition, scale, expected_result): - incrementing_mechanism = pnl.ProcessingMechanism( - function=pnl.SimpleIntegrator - ) - comp = pnl.Composition( - pathways=[incrementing_mechanism] - ) - comp.scheduler.termination_conds = { - pnl.TimeScale.RUN: condition(2) - } + incrementing_mechanism = pnl.ProcessingMechanism(function=pnl.SimpleIntegrator) + comp = pnl.Composition(pathways=[incrementing_mechanism]) + + comp.scheduler.termination_conds = {pnl.TimeScale.RUN: condition(2)} r = comp.run(inputs=[1], num_trials=5, execution_mode=mode) + np.testing.assert_allclose(r, expected_result[-1]) np.testing.assert_allclose(comp.results, expected_result) From e5461ab89bcc90da0c891980461fd0cf3e87255d Mon Sep 17 00:00:00 2001 From: David Turner Date: Thu, 31 Oct 2024 15:27:26 -0400 Subject: [PATCH 373/410] Remove unused MappingProxyType import. --- psyneulink/core/globals/parameters.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index 458b27d95f4..e2204c6ecdd 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -315,7 +315,6 @@ def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, co import types import typing import weakref -from types import MappingProxyType import toposort @@ -338,7 +337,6 @@ def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, co update_array_in_place, ) from psyneulink.core.rpc.graph_pb2 import Entry, ndArray -from types import MappingProxyType __all__ = [ 'Defaults', 'get_validator_by_function', 'Parameter', 'ParameterAlias', 'ParameterError', From 18cbb10b86d874afba124e844f17d0d6cfe2fa16 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 31 Oct 2024 10:15:49 -0400 Subject: [PATCH 374/410] llvm/Scheduler: Do not use precomputed locations of execution counts The information can be extracted from nodes_states. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/codegen.py | 12 +++++----- psyneulink/core/llvm/helpers.py | 40 +++++++++++++++++---------------- 2 files changed, 27 insertions(+), 25 deletions(-) diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index b29f30efe8d..c15fc1b6358 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -848,7 +848,6 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): cond, node, is_finished_callbacks, - num_exec_locs, nodes_states) with builder.if_then(reinit_cond): node_w = ctx.get_node_assembly(composition, node) @@ -893,7 +892,6 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): cond, None, is_finished_callbacks, - num_exec_locs, nodes_states) trial_cond = builder.not_(trial_term_cond, name="not_trial_term_cond") @@ -916,7 +914,6 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): cond, node, is_finished_callbacks, - num_exec_locs, nodes_states) ran = cond_gen.generate_ran_this_pass(builder, cond, node) node_cond = builder.and_(node_cond, builder.not_(ran), name="run_cond_" + node.name) @@ -1112,9 +1109,12 @@ def gen_composition_run(ctx, composition, *, tags:frozenset): # Generate a while not 'end condition' loop builder.position_at_end(loop_condition) - run_term_cond = cond_gen.generate_sched_condition( - builder, composition.termination_processing[TimeScale.RUN], - cond, None, None, None, nodes_states) + run_term_cond = cond_gen.generate_sched_condition(builder, + composition.termination_processing[TimeScale.RUN], + cond, + None, + None, + nodes_states) run_cond = builder.not_(run_term_cond, name="not_run_term_cond") # Iter cond diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index 1d04264975c..7d4ef3836e7 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -617,9 +617,15 @@ def generate_ran_this_trial(self, builder, cond_ptr, node): return builder.icmp_signed("==", node_trial, global_trial) - # TODO: replace num_exec_locs use with equivalent from nodes_states - def generate_sched_condition(self, builder, condition, cond_ptr, node, is_finished_callbacks, num_exec_locs, nodes_states): + def _node_executions_for_scale(self, builder, node, node_states, time_scale:TimeScale): + node_idx = self.composition._get_node_index(node) + node_state = builder.gep(node_states, [self._zero, self.ctx.int32_ty(node_idx)]) + num_exec_ptr = get_state_ptr(builder, node, node_state, "num_executions") + count_ptr = builder.gep(num_exec_ptr, [self._zero, self.ctx.int32_ty(time_scale.value)]) + return builder.load(count_ptr) + + def generate_sched_condition(self, builder, condition, cond_ptr, self_node, is_finished_callbacks, nodes_states): if isinstance(condition, Always): return self.ctx.bool_ty(1) @@ -628,13 +634,13 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, is_finish return self.ctx.bool_ty(0) elif isinstance(condition, Not): - orig_condition = self.generate_sched_condition(builder, condition.condition, cond_ptr, node, is_finished_callbacks, num_exec_locs, nodes_states) + orig_condition = self.generate_sched_condition(builder, condition.condition, cond_ptr, self_node, is_finished_callbacks, nodes_states) return builder.not_(orig_condition) elif isinstance(condition, All): agg_cond = self.ctx.bool_ty(1) for cond in condition.args: - cond_res = self.generate_sched_condition(builder, cond, cond_ptr, node, is_finished_callbacks, num_exec_locs, nodes_states) + cond_res = self.generate_sched_condition(builder, cond, cond_ptr, self_node, is_finished_callbacks, nodes_states) agg_cond = builder.and_(agg_cond, cond_res) return agg_cond @@ -658,7 +664,7 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, is_finish elif isinstance(condition, Any): agg_cond = self.ctx.bool_ty(0) for cond in condition.args: - cond_res = self.generate_sched_condition(builder, cond, cond_ptr, node, is_finished_callbacks, num_exec_locs, nodes_states) + cond_res = self.generate_sched_condition(builder, cond, cond_ptr, self_node, is_finished_callbacks, nodes_states) agg_cond = builder.or_(agg_cond, cond_res) return agg_cond @@ -674,34 +680,30 @@ def generate_sched_condition(self, builder, condition, cond_ptr, node, is_finish elif isinstance(condition, EveryNCalls): target, count = condition.args - assert count == 1, "EveryNCalls isonly supprted with count == 1" + assert count == 1, "EveryNCalls is only supported with count == 1 (count: {})".format(count) target_ts = self.__get_node_ts(builder, cond_ptr, target) - node_ts = self.__get_node_ts(builder, cond_ptr, node) + node_ts = self.__get_node_ts(builder, cond_ptr, self_node) # If target ran after node did its TS will be greater node's return self.ts_compare(builder, node_ts, target_ts, '<') elif isinstance(condition, BeforeNCalls): - target, count = condition.args - scale = condition.time_scale.value - target_num_execs_in_scale = builder.gep(num_exec_locs[target], [self.ctx.int32_ty(0), self.ctx.int32_ty(scale)]) - num_execs = builder.load(target_num_execs_in_scale) + node, count = condition.args + num_execs = self._node_executions_for_scale(builder, node, nodes_states, condition.time_scale) return builder.icmp_unsigned('<', num_execs, num_execs.type(count)) elif isinstance(condition, AtNCalls): - target, count = condition.args - scale = condition.time_scale.value - target_num_execs_in_scale = builder.gep(num_exec_locs[target], [self.ctx.int32_ty(0), self.ctx.int32_ty(scale)]) - num_execs = builder.load(target_num_execs_in_scale) + node, count = condition.args + num_execs = self._node_executions_for_scale(builder, node, nodes_states, condition.time_scale) + return builder.icmp_unsigned('==', num_execs, num_execs.type(count)) elif isinstance(condition, AfterNCalls): - target, count = condition.args - scale = condition.time_scale.value - target_num_execs_in_scale = builder.gep(num_exec_locs[target], [self.ctx.int32_ty(0), self.ctx.int32_ty(scale)]) - num_execs = builder.load(target_num_execs_in_scale) + node, count = condition.args + num_execs = self._node_executions_for_scale(builder, node, nodes_states, condition.time_scale) + return builder.icmp_unsigned('>=', num_execs, num_execs.type(count)) elif isinstance(condition, WhenFinished): From 68b75f6d307db95bdeff66fadcddaff14ab8800e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 31 Oct 2024 11:10:59 -0400 Subject: [PATCH 375/410] llvm/codegen: Factor out resetting of execution counts to a helper function With the exception of TIME_STEP reset which uses a bitmask of executed nodes to eliminate unnecessary writes. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/codegen.py | 42 ++++++++++++++++----------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index c15fc1b6358..0ef30af9ffd 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -801,6 +801,17 @@ def _gen_composition_exec_context(ctx, composition, *, tags:frozenset, suffix="" builder.ret_void() +def _reset_composition_nodes_exec_counts(ctx, builder, composition, comp_state, time_scales): + nodes_states = helpers.get_state_ptr(builder, composition, comp_state, "nodes") + for idx, node in enumerate(composition._all_nodes): + node_state = builder.gep(nodes_states, [ctx.int32_ty(0), ctx.int32_ty(idx)]) + num_exec_vec_ptr = helpers.get_state_ptr(builder, node, node_state, "num_executions") + + for scale in time_scales: + num_exec_time_ptr = builder.gep(num_exec_vec_ptr, [ctx.int32_ty(0), ctx.int32_ty(scale.value)]) + builder.store(num_exec_time_ptr.type.pointee(0), num_exec_time_ptr) + + def gen_composition_exec(ctx, composition, *, tags:frozenset): is_simulation = "simulation" in tags node_tags = tags.union({"node_assembly"}) @@ -827,19 +838,16 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): is_finished_callbacks[node] = (wrapper, args) - # Reset internal TRIAL/PASS/TIME_STEP clock for each node - # This also resets TIME_STEP counter for input_CIM and parameter_CIM - # executed above - for time_loc in num_exec_locs.values(): - for scale in (TimeScale.TRIAL, TimeScale.PASS, TimeScale.TIME_STEP): - num_exec_time_ptr = builder.gep(time_loc, [ctx.int32_ty(0), ctx.int32_ty(scale.value)]) - builder.store(num_exec_time_ptr.type.pointee(0), num_exec_time_ptr) + # Resetting internal TRIAL/PASS/TIME_STEP clock for each node + # also resets TIME_STEP counter for input_CIM and parameter_CIM + # executed when setting up the context + _reset_composition_nodes_exec_counts(ctx, builder, composition, state, [TimeScale.TRIAL, TimeScale.PASS, TimeScale.TIME_STEP]) - # Check if there's anything to reset + # Check if there's any stateful node to to reset for node in composition._all_nodes: - # FIXME: This should not be necessary. The code gets DCE'd, - # but there are still some problems with generation - # 'reset' function + # FIXME: This should not be necessary. The code gets DCE'd, but + # there are still some issues with generating the 'reset' + # function. if node is composition.controller: continue @@ -1004,10 +1012,7 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): # Bumping automatically zeros lower elements cond_gen.bump_ts(builder, cond, (0, 1, 0)) - # Reset internal PASS clock for each node - for time_loc in num_exec_locs.values(): - num_exec_time_ptr = builder.gep(time_loc, [zero, ctx.int32_ty(TimeScale.PASS.value)]) - builder.store(num_exec_time_ptr.type.pointee(0), num_exec_time_ptr) + _reset_composition_nodes_exec_counts(ctx, builder, composition, state, [TimeScale.PASS]) builder.branch(loop_condition) @@ -1084,12 +1089,7 @@ def gen_composition_run(ctx, composition, *, tags:frozenset): builder.store(data_in.type.pointee(input_init), data_in) builder.store(inputs_ptr.type.pointee(1), inputs_ptr) - # Reset internal 'RUN' clocks of each node - for idx, node in enumerate(composition._all_nodes): - node_state = builder.gep(state, [ctx.int32_ty(0), ctx.int32_ty(0), ctx.int32_ty(idx)]) - num_executions_ptr = helpers.get_state_ptr(builder, node, node_state, "num_executions") - num_exec_time_ptr = builder.gep(num_executions_ptr, [ctx.int32_ty(0), ctx.int32_ty(TimeScale.RUN.value)]) - builder.store(num_exec_time_ptr.type.pointee(None), num_exec_time_ptr) + _reset_composition_nodes_exec_counts(ctx, builder, composition, state, [TimeScale.RUN]) # Allocate and initialize condition structure cond_gen = helpers.ConditionGenerator(ctx, composition) From a3200879ba2d9a9e6728b49a170bb39d7d11e88c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 31 Oct 2024 17:12:23 -0400 Subject: [PATCH 376/410] llvm/scheduler: Drop tracking of the number of node executions Not used anywhere. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/codegen.py | 2 +- psyneulink/core/llvm/helpers.py | 51 ++++++++++++++------------------- 2 files changed, 23 insertions(+), 30 deletions(-) diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index 0ef30af9ffd..e340bfd4fb2 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -974,7 +974,7 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): args.append(cond) builder.call(node_f, args) - cond_gen.generate_update_after_run(builder, cond, node) + cond_gen.generate_update_after_node_execution(builder, cond, node) builder.block.name = "post_invoke_" + node_f.name diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index 7d4ef3836e7..e5dc9f612df 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -469,19 +469,12 @@ def get_private_condition_struct_type(self, composition): assert len(time_stamp_struct) == len(self.TimeIndex) - status_struct = ir.LiteralStructType([ - self.ctx.int32_ty, # number of executions in this run - time_stamp_struct # time stamp of last execution - ]) - structure = ir.LiteralStructType([ - time_stamp_struct, # current time stamp - ir.ArrayType(status_struct, len(composition.nodes)) # for each node - ]) - return structure + nodes_time_stamps_array = ir.ArrayType(time_stamp_struct, len(composition.nodes)) + + return ir.LiteralStructType((time_stamp_struct, nodes_time_stamps_array)) def get_private_condition_initializer(self, composition): - return ((0, 0, 0), - tuple((0, (-1, -1, -1)) for _ in composition.nodes)) + return ((0, 0, 0), tuple((-1, -1, -1) for _ in composition.nodes)) def get_condition_struct_type(self, node=None): node = self.composition if node is None else node @@ -507,14 +500,14 @@ def bump_ts(self, builder, cond_ptr, count=(0, 0, 1)): """ Increments the time structure of the composition. Count should be a tuple where there is a number in only one spot, and zeroes elsewhere. - Indices greater than that of the one are zeroed. + Indices greater than the incremented one are zeroed. """ # Only one element should be non-zero assert count.count(0) == len(count) - 1 # Get timestruct pointer - ts_ptr = builder.gep(cond_ptr, [self._zero, self._zero, self._zero]) + ts_ptr = self.__get_global_ts_ptr(builder, cond_ptr) ts = builder.load(ts_ptr) assert len(ts.type) == len(count) @@ -556,13 +549,20 @@ def ts_compare(self, builder, ts1, ts2, comp): return result - def __get_node_status_ptr(self, builder, cond_ptr, node): + def __get_global_ts_ptr(self, builder, cond_ptr): + # derefence the structure, the first element (private structure), + # and the first element of the private strucutre is the global ts. + return builder.gep(cond_ptr, [self._zero, self._zero, self._zero]) + + def __get_node_ts_ptr(self, builder, cond_ptr, node): node_idx = self.ctx.int32_ty(self.composition.nodes.index(node)) + + # derefence the structure, the first element (private structure), the + # second element is the node time stamp array, use index in the array return builder.gep(cond_ptr, [self._zero, self._zero, self.ctx.int32_ty(1), node_idx]) def __get_node_ts(self, builder, cond_ptr, node): - status_ptr = self.__get_node_status_ptr(builder, cond_ptr, node) - ts_ptr = builder.gep(status_ptr, [self.ctx.int32_ty(0), self.ctx.int32_ty(1)]) + ts_ptr = self.__get_node_ts_ptr(builder, cond_ptr, node) return builder.load(ts_ptr) def get_global_ts(self, builder, cond_ptr): @@ -582,20 +582,13 @@ def get_global_pass(self, builder, cond_ptr): def get_global_step(self, builder, cond_ptr): return self._extract_global_time(builder, cond_ptr, self.TimeIndex.STEP) - def generate_update_after_run(self, builder, cond_ptr, node): - status_ptr = self.__get_node_status_ptr(builder, cond_ptr, node) - status = builder.load(status_ptr) - - # Update total number of runs - runs = builder.extract_value(status, 0) - runs = builder.add(runs, runs.type(1)) - status = builder.insert_value(status, runs, 0) - - # Update time stamp - ts = self.get_global_ts(builder, cond_ptr) - status = builder.insert_value(status, ts, 1) + def generate_update_after_node_execution(self, builder, cond_ptr, node): + # Update time stamp of the last execution + global_ts_ptr = self.__get_global_ts_ptr(builder, cond_ptr) + global_ts = builder.load(global_ts_ptr) - builder.store(status, status_ptr) + node_ts_ptr = self.__get_node_ts_ptr(builder, cond_ptr, node) + builder.store(global_ts, node_ts_ptr) def generate_ran_this_pass(self, builder, cond_ptr, node): global_trial = self.get_global_trial(builder, cond_ptr) From efa28752d0db751d28edcd068c91b8f8f18a7733 Mon Sep 17 00:00:00 2001 From: David Turner Date: Fri, 1 Nov 2024 15:09:33 -0400 Subject: [PATCH 377/410] Fixes for Jan's PR review. --- .../functions/nonstateful/fitfunctions.py | 2 +- .../parameterestimationcomposition.py | 6 +- .../test_parameterestimationcomposition.py | 84 ++++--------------- 3 files changed, 21 insertions(+), 71 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index ca146716070..5a962a51af4 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -319,7 +319,7 @@ def __init__( **kwargs, ): self.method = method - self._optuna_kwargs = {} if optuna_kwargs is None else optuna_kwargs.copy() + self._optuna_kwargs = {} if optuna_kwargs is None else {**optuna_kwargs} self.direction = direction diff --git a/psyneulink/core/compositions/parameterestimationcomposition.py b/psyneulink/core/compositions/parameterestimationcomposition.py index c85687b05f4..6ed2dd2ad60 100644 --- a/psyneulink/core/compositions/parameterestimationcomposition.py +++ b/psyneulink/core/compositions/parameterestimationcomposition.py @@ -165,7 +165,7 @@ from beartype import beartype -from psyneulink._typing import Optional, Union, Dict, List, Callable, Literal +from psyneulink._typing import Optional, Union, Dict, List, Callable, Literal, Mapping import psyneulink.core.llvm as pnllvm from psyneulink.core.globals.utilities import ContentAddressableList, convert_to_np_array @@ -479,7 +479,7 @@ def __init__( num_trials_per_estimate: Optional[int] = None, initial_seed: Optional[int] = None, same_seed_for_all_parameter_combinations: Optional[bool] = None, - depends_on: Optional[Dict] = None, + depends_on: Optional[Mapping] = None, name: Optional[str] = None, context: Optional[Context] = None, **kwargs, @@ -1240,7 +1240,7 @@ def set_parameters_in_inputs(self, parameters, inputs): if in_arr.dtype != object: in_arr = np.atleast_3d(in_arr) - # If the inputs don't have columns for the fitting parameters, then we need to add them + # If the inputs don't have columns for the fitting parameters, then we need to add them if in_arr.shape[1] != len(self.composition.input_ports): num_missing = len(self.composition.input_ports) - in_arr.shape[1] if in_arr.ndim == 3: diff --git a/tests/composition/pec/test_parameterestimationcomposition.py b/tests/composition/pec/test_parameterestimationcomposition.py index 8ae98129f14..a4e52eea79b 100644 --- a/tests/composition/pec/test_parameterestimationcomposition.py +++ b/tests/composition/pec/test_parameterestimationcomposition.py @@ -3,6 +3,7 @@ import pandas as pd import pytest import scipy +import contextlib from packaging import version as pversion @@ -183,13 +184,15 @@ def test_pec_run_input_formats(inputs_dict, error_msg): @pytest.mark.composition @pytest.mark.parametrize( - "opt_method, optuna_kwargs, expected_result", + "opt_method, optuna_kwargs, expected_result, execution_context", [ - ("differential_evolution", None, expected_differential_evolution), - (optuna.samplers.RandomSampler(seed=0), None, [0.01]), - (optuna.samplers.QMCSampler(seed=0), None, [0.01]), - (optuna.samplers.RandomSampler, {'seed': 0}, [0.01]), - (optuna.samplers.RandomSampler(), None, None) + ("differential_evolution", None, expected_differential_evolution, contextlib.nullcontext()), + (optuna.samplers.RandomSampler(seed=0), None, [0.01], contextlib.nullcontext()), + (optuna.samplers.QMCSampler(seed=0), None, [0.01], contextlib.nullcontext()), + (optuna.samplers.RandomSampler, {'seed': 0}, [0.01], + pytest.warns(UserWarning, match="Overriding seed passed to optuna sampler with seed passed to PEC.")), + (optuna.samplers.RandomSampler(), None, None, + pytest.warns(UserWarning, match="initial_seed on PEC is not None, but instantiated optuna sampler is being used.")) ], ids=[ "differential_evolution", @@ -199,7 +202,7 @@ def test_pec_run_input_formats(inputs_dict, error_msg): "optuna_random_sampler_no_seed" ], ) -def test_parameter_optimization_ddm(func_mode, opt_method, optuna_kwargs, expected_result): +def test_parameter_optimization_ddm(func_mode, opt_method, optuna_kwargs, expected_result, execution_context): """Test parameter optimization of a DDM in integrator mode""" if func_mode == "Python": @@ -273,28 +276,14 @@ def reward_rate(sim_data): trial_inputs[0] = np.abs(trial_inputs[0]) trial_inputs[-1] = np.abs(trial_inputs[-1]) - inputs_dict = {decision: trial_inputs} - - # If we are testing an instantiated optuna sampler, make sure the warning is generated about - # random seeds - if isinstance(opt_method, optuna.samplers.RandomSampler): - with pytest.warns(UserWarning, match="initial_seed on PEC is not None, but instantiated optuna sampler is being used."): - pec.run(inputs=inputs_dict) - - elif isinstance(opt_method, type) and issubclass(opt_method, optuna.samplers.BaseSampler): - with pytest.warns(UserWarning, match="Overriding seed passed to optuna sampler with seed passed to PEC."): - pec.run(inputs=inputs_dict) - - else: + with execution_context: pec.run(inputs={comp: trial_inputs}) if expected_result is not None: - if opt_method == "differential_evolution": - np.testing.assert_allclose( - list(pec.optimized_parameter_values.values()), expected_result, atol=1e-2 - ) - else: - np.testing.assert_allclose(list(pec.optimized_parameter_values.values()), expected_result) + tolerance_args = {"atol": 1e-2} if opt_method == "differential_evolution" else {} + np.testing.assert_allclose( + list(pec.optimized_parameter_values.values()), expected_result, **tolerance_args + ) def test_parameter_estimation_ddm_cond(func_mode): @@ -304,44 +293,6 @@ def test_parameter_estimation_ddm_cond(func_mode): "Test not yet implemented for Python. Parameter estimate is too slow." ) - def _run_ddm_with_params( - starting_value, - rate, - noise, - threshold, - non_decision_time, - time_step_size, - trial_inputs, - ): - """Create a composition with DDM and run it with the given parameters.""" - - # Create a simple one mechanism composition containing a DDM in integrator mode. - decision = pnl.DDM( - function=pnl.DriftDiffusionIntegrator( - starting_value=starting_value, - rate=rate, - noise=noise, - threshold=threshold, - non_decision_time=non_decision_time, - time_step_size=time_step_size, - ), - output_ports=[pnl.DECISION_OUTCOME, pnl.RESPONSE_TIME], - name="DDM", - ) - - comp = pnl.Composition(pathways=decision) - - # Run the composition to generate some data to fit - comp.run(inputs={decision: trial_inputs}) - results = comp.results - - data_to_fit = pd.DataFrame( - np.squeeze(np.array(results)), columns=["decision", "response_time"] - ) - data_to_fit["decision"] = data_to_fit["decision"].astype("category") - - return comp, data_to_fit - # High-level parameters the impact performance of the test num_trials = 50 time_step_size = 0.01 @@ -365,7 +316,6 @@ def _run_ddm_with_params( starting_value=0.0, rate=0.3, noise=1.0, - threshold=0.6, non_decision_time=0.15, time_step_size=time_step_size, ) @@ -380,8 +330,8 @@ def _run_ddm_with_params( threshold=0.3, ) - comp, data_cond1 = _run_ddm_with_params(**{**ddm_params, **params_cond1}, trial_inputs=trial_inputs) - _, data_cond2 = _run_ddm_with_params(**{**ddm_params, **params_cond2}, trial_inputs=trial_inputs) + comp, data_cond1 = _run_ddm_with_params(**ddm_params, **params_cond1, trial_inputs=trial_inputs) + _, data_cond2 = _run_ddm_with_params(**ddm_params, **params_cond2, trial_inputs=trial_inputs) # Combine the data from the two conditions data_cond1['condition'] = 'cond_t=0.7' From 839b7385da357314c524803ba7e9bb1188ffd535 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Fri, 1 Nov 2024 15:52:15 -0400 Subject: [PATCH 378/410] llvm/scheduler: Assert that enabled controller uses TRIAL time scale Signed-off-by: Jan Vesely --- psyneulink/core/llvm/codegen.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index e340bfd4fb2..55043a18bb0 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -865,6 +865,7 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): # Run controller if it's enabled in 'BEFORE' mode if is_simulation is False and composition.enable_controller and composition.controller_mode == BEFORE: assert composition.controller is not None + assert composition.controller_time_scale == TimeScale.TRIAL helpers.printf(ctx, builder, @@ -1020,6 +1021,7 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): if is_simulation is False and composition.enable_controller and composition.controller_mode == AFTER: assert composition.controller is not None + assert composition.controller_time_scale == TimeScale.TRIAL helpers.printf(ctx, builder, From 19481241a85a2d69b11e1ada96afa388e3274679 Mon Sep 17 00:00:00 2001 From: kmantel <1592123+kmantel@users.noreply.github.com> Date: Fri, 1 Nov 2024 23:10:11 -0400 Subject: [PATCH 379/410] condition: fix pnl-specific names in condition.py stub and __all__ (#3089) --- psyneulink/core/scheduling/condition.py | 37 ++-- psyneulink/core/scheduling/condition.pyi | 218 +++++++++++------------ tests/scheduling/test_condition.py | 10 +- 3 files changed, 133 insertions(+), 132 deletions(-) diff --git a/psyneulink/core/scheduling/condition.py b/psyneulink/core/scheduling/condition.py index 632cd9fa752..f3b9d20fcac 100644 --- a/psyneulink/core/scheduling/condition.py +++ b/psyneulink/core/scheduling/condition.py @@ -25,28 +25,21 @@ __all__ = [ # noqa: F822 (dynamically generated) - 'AbsoluteCondition', 'AddEdgeTo', 'AfterCall', - 'AfterConsiderationSetExecution', 'AfterEnvironmentSequence', - 'AfterEnvironmentStateUpdate', 'AfterNCalls', 'AfterNCallsCombined', - 'AfterNConsiderationSetExecutions', 'AfterNEnvironmentSequences', - 'AfterNEnvironmentStateUpdates', 'AfterNPasses', 'AfterNRuns', - 'AfterNTimeSteps', 'AfterNTrials', 'AfterNode', 'AfterNodes', - 'AfterPass', 'AfterRun', 'AfterTimeStep', 'AfterTrial', 'All', - 'AllHaveRun', 'Always', 'And', 'Any', 'AtConsiderationSetExecution', - 'AtEnvironmentSequence', 'AtEnvironmentSequenceNStart', - 'AtEnvironmentSequenceStart', 'AtEnvironmentStateUpdate', - 'AtEnvironmentStateUpdateNStart', 'AtEnvironmentStateUpdateStart', - 'AtNCalls', 'AtPass', 'AtRun', 'AtRunNStart', 'AtRunStart', - 'AtTimeStep', 'AtTrial', 'AtTrialNStart', 'AtTrialStart', - 'BeforeConsiderationSetExecution', 'BeforeEnvironmentStateUpdate', - 'BeforeNCalls', 'BeforeNode', 'BeforeNodes', 'BeforePass', - 'BeforeTimeStep', 'BeforeTrial', 'CompositeCondition', 'Condition', - 'ConditionBase', 'ConditionError', 'ConditionSet', - 'CustomGraphStructureCondition', 'EveryNCalls', 'EveryNPasses', - 'GraphStructureCondition', 'JustRan', 'NWhen', 'Never', 'Not', - 'Operation', 'Or', 'RemoveEdgeFrom', 'Threshold', 'TimeInterval', - 'TimeTermination', 'WhenFinished', 'WhenFinishedAll', - 'WhenFinishedAny', 'When', 'While', 'WhileNot', 'WithNode', + 'AbsoluteCondition', 'AddEdgeTo', 'AfterCall', 'AfterNCalls', + 'AfterNCallsCombined', 'AfterNode', 'AfterNodes', 'AfterNPasses', + 'AfterNRuns', 'AfterNTimeSteps', 'AfterNTrials', 'AfterPass', + 'AfterRun', 'AfterTimeStep', 'AfterTrial', 'All', 'AllHaveRun', + 'Always', 'And', 'Any', 'AtNCalls', 'AtPass', 'AtRun', + 'AtRunNStart', 'AtRunStart', 'AtTimeStep', 'AtTrial', + 'AtTrialNStart', 'AtTrialStart', 'BeforeNCalls', 'BeforeNode', + 'BeforeNodes', 'BeforePass', 'BeforeTimeStep', 'BeforeTrial', + 'CompositeCondition', 'Condition', 'ConditionBase', + 'ConditionError', 'ConditionSet', 'CustomGraphStructureCondition', + 'EveryNCalls', 'EveryNPasses', 'GraphStructureCondition', 'JustRan', + 'Never', 'Not', 'NWhen', 'Operation', 'Or', 'RemoveEdgeFrom', + 'Threshold', 'TimeInterval', 'TimeTermination', 'When', + 'WhenFinished', 'WhenFinishedAll', 'WhenFinishedAny', 'While', + 'WhileNot', 'WithNode', ] diff --git a/psyneulink/core/scheduling/condition.pyi b/psyneulink/core/scheduling/condition.pyi index 1b32f3554c2..6dc7fda5d3c 100644 --- a/psyneulink/core/scheduling/condition.pyi +++ b/psyneulink/core/scheduling/condition.pyi @@ -6,7 +6,7 @@ import graph_scheduler.time import pint from _typeshed import Incomplete -__all__ = ['Operation', 'ConditionError', 'ConditionSet', 'ConditionBase', 'Condition', 'AbsoluteCondition', 'While', 'When', 'WhileNot', 'Always', 'Never', 'CompositeCondition', 'All', 'Any', 'And', 'Or', 'Not', 'NWhen', 'TimeInterval', 'TimeTermination', 'BeforeConsiderationSetExecution', 'AtConsiderationSetExecution', 'AfterConsiderationSetExecution', 'AfterNConsiderationSetExecutions', 'BeforePass', 'AtPass', 'AfterPass', 'AfterNPasses', 'EveryNPasses', 'BeforeEnvironmentStateUpdate', 'AtEnvironmentStateUpdate', 'AfterEnvironmentStateUpdate', 'AfterNEnvironmentStateUpdates', 'AtEnvironmentSequence', 'AfterEnvironmentSequence', 'AfterNEnvironmentSequences', 'BeforeNCalls', 'AtNCalls', 'AfterCall', 'AfterNCalls', 'AfterNCallsCombined', 'EveryNCalls', 'JustRan', 'AllHaveRun', 'WhenFinished', 'WhenFinishedAny', 'WhenFinishedAll', 'AtEnvironmentStateUpdateStart', 'AtEnvironmentStateUpdateNStart', 'AtEnvironmentSequenceStart', 'AtEnvironmentSequenceNStart', 'Threshold', 'GraphStructureCondition', 'CustomGraphStructureCondition', 'BeforeNodes', 'BeforeNode', 'WithNode', 'AfterNodes', 'AfterNode', 'AddEdgeTo', 'RemoveEdgeFrom'] +__all__ = ['Operation', 'ConditionError', 'ConditionSet', 'ConditionBase', 'Condition', 'AbsoluteCondition', 'While', 'When', 'WhileNot', 'Always', 'Never', 'CompositeCondition', 'All', 'Any', 'And', 'Or', 'Not', 'NWhen', 'TimeInterval', 'TimeTermination', 'BeforeTimeStep', 'AtTimeStep', 'AfterTimeStep', 'AfterNTimeSteps', 'BeforePass', 'AtPass', 'AfterPass', 'AfterNPasses', 'EveryNPasses', 'BeforeTrial', 'AtTrial', 'AfterTrial', 'AfterNTrials', 'AtRun', 'AfterRun', 'AfterNRuns', 'BeforeNCalls', 'AtNCalls', 'AfterCall', 'AfterNCalls', 'AfterNCallsCombined', 'EveryNCalls', 'JustRan', 'AllHaveRun', 'WhenFinished', 'WhenFinishedAny', 'WhenFinishedAll', 'AtTrialStart', 'AtTrialNStart', 'AtRunStart', 'AtRunNStart', 'Threshold', 'GraphStructureCondition', 'CustomGraphStructureCondition', 'BeforeNodes', 'BeforeNode', 'WithNode', 'AfterNodes', 'AfterNode', 'AddEdgeTo', 'RemoveEdgeFrom'] SubjectOperation = Union['Operation', str, Dict[Hashable, Union['Operation', str]]] @@ -635,87 +635,87 @@ class TimeTermination(AbsoluteCondition): @property def absolute_fixed_points(self): ... -class BeforeConsiderationSetExecution(Condition): +class BeforeTimeStep(Condition): - """BeforeConsiderationSetExecution + """BeforeTimeStep Parameters: - n(int): the 'CONSIDERATION_SET_EXECUTION' before which the Condition is satisfied + n(int): the 'TIME_STEP' before which the Condition is satisfied - time_scale(TimeScale): the TimeScale used as basis for counting `CONSIDERATION_SET_EXECUTION`\\ s (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + time_scale(TimeScale): the TimeScale used as basis for counting `TIME_STEP`\\ s (default: TimeScale.TRIAL) Satisfied when: - - at most n-1 `CONSIDERATION_SET_EXECUTION`\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. + - at most n-1 `TIME_STEP`\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. Notes: - - Counts of TimeScales are zero-indexed (that is, the first `CONSIDERATION_SET_EXECUTION` is 0, the second `CONSIDERATION_SET_EXECUTION` is 1, etc.); - so, `BeforeConsiderationSetExecution(2)` is satisfied at `CONSIDERATION_SET_EXECUTION` 0 and `CONSIDERATION_SET_EXECUTION` 1. + - Counts of TimeScales are zero-indexed (that is, the first `TIME_STEP` is 0, the second `TIME_STEP` is 1, etc.); + so, `BeforeTimeStep(2)` is satisfied at `TIME_STEP` 0 and `TIME_STEP` 1. """ def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... -class AtConsiderationSetExecution(Condition): +class AtTimeStep(Condition): - """AtConsiderationSetExecution + """AtTimeStep Parameters: - n(int): the `CONSIDERATION_SET_EXECUTION` at which the Condition is satisfied + n(int): the `TIME_STEP` at which the Condition is satisfied - time_scale(TimeScale): the TimeScale used as basis for counting `CONSIDERATION_SET_EXECUTION`\\ s (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + time_scale(TimeScale): the TimeScale used as basis for counting `TIME_STEP`\\ s (default: TimeScale.TRIAL) Satisfied when: - - exactly n `CONSIDERATION_SET_EXECUTION`\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. + - exactly n `TIME_STEP`\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. Notes: - - Counts of TimeScales are zero-indexed (that is, the first 'CONSIDERATION_SET_EXECUTION' is pass 0, the second 'CONSIDERATION_SET_EXECUTION' is 1, etc.); - so, `AtConsiderationSetExecution(1)` is satisfied when a single `CONSIDERATION_SET_EXECUTION` (`CONSIDERATION_SET_EXECUTION` 0) has occurred, and `AtConsiderationSetExecution(2)` is satisfied - when two `CONSIDERATION_SET_EXECUTION`\\ s have occurred (`CONSIDERATION_SET_EXECUTION` 0 and `CONSIDERATION_SET_EXECUTION` 1), etc.. + - Counts of TimeScales are zero-indexed (that is, the first 'TIME_STEP' is pass 0, the second 'TIME_STEP' is 1, etc.); + so, `AtTimeStep(1)` is satisfied when a single `TIME_STEP` (`TIME_STEP` 0) has occurred, and `AtTimeStep(2)` is satisfied + when two `TIME_STEP`\\ s have occurred (`TIME_STEP` 0 and `TIME_STEP` 1), etc.. """ def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... -class AfterConsiderationSetExecution(Condition): +class AfterTimeStep(Condition): - """AfterConsiderationSetExecution + """AfterTimeStep Parameters: - n(int): the `CONSIDERATION_SET_EXECUTION` after which the Condition is satisfied + n(int): the `TIME_STEP` after which the Condition is satisfied - time_scale(TimeScale): the TimeScale used as basis for counting `CONSIDERATION_SET_EXECUTION`\\ s (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + time_scale(TimeScale): the TimeScale used as basis for counting `TIME_STEP`\\ s (default: TimeScale.TRIAL) Satisfied when: - - at least n+1 `CONSIDERATION_SET_EXECUTION`\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. + - at least n+1 `TIME_STEP`\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. Notes: - - Counts of TimeScals are zero-indexed (that is, the first `CONSIDERATION_SET_EXECUTION` is 0, the second `CONSIDERATION_SET_EXECUTION` is 1, etc.); so, - `AfterConsiderationSetExecution(1)` is satisfied after `CONSIDERATION_SET_EXECUTION` 1 has occurred and thereafter (i.e., in `CONSIDERATION_SET_EXECUTION`\\ s 2, 3, 4, etc.). + - Counts of TimeScals are zero-indexed (that is, the first `TIME_STEP` is 0, the second `TIME_STEP` is 1, etc.); so, + `AfterTimeStep(1)` is satisfied after `TIME_STEP` 1 has occurred and thereafter (i.e., in `TIME_STEP`\\ s 2, 3, 4, etc.). """ def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... -class AfterNConsiderationSetExecutions(Condition): +class AfterNTimeSteps(Condition): - """AfterNConsiderationSetExecutions + """AfterNTimeSteps Parameters: - n(int): the number of `CONSIDERATION_SET_EXECUTION`\\ s after which the Condition is satisfied + n(int): the number of `TIME_STEP`\\ s after which the Condition is satisfied - time_scale(TimeScale): the TimeScale used as basis for counting `CONSIDERATION_SET_EXECUTION`\\ s (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + time_scale(TimeScale): the TimeScale used as basis for counting `TIME_STEP`\\ s (default: TimeScale.TRIAL) Satisfied when: - - at least n `CONSIDERATION_SET_EXECUTION`\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. + - at least n `TIME_STEP`\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. """ def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... @@ -728,7 +728,7 @@ class BeforePass(Condition): n(int): the 'PASS' before which the Condition is satisfied - time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.TRIAL) Satisfied when: @@ -750,7 +750,7 @@ class AtPass(Condition): n(int): the `PASS` at which the Condition is satisfied - time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.TRIAL) Satisfied when: @@ -773,7 +773,7 @@ class AfterPass(Condition): n(int): the `PASS` after which the Condition is satisfied - time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.TRIAL) Satisfied when: @@ -795,7 +795,7 @@ class AfterNPasses(Condition): n(int): the number of `PASS`\\ es after which the Condition is satisfied - time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.TRIAL) Satisfied when: @@ -813,7 +813,7 @@ class EveryNPasses(Condition): n(int): the frequency of passes with which this condition is satisfied - time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + time_scale(TimeScale): the TimeScale used as basis for counting `PASS`\\ es (default: TimeScale.TRIAL) Satisfied when: @@ -825,150 +825,150 @@ class EveryNPasses(Condition): """ def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... -class BeforeEnvironmentStateUpdate(Condition): +class BeforeTrial(Condition): - """BeforeEnvironmentStateUpdate + """BeforeTrial Parameters: - n(int): the `ENVIRONMENT_STATE_UPDATE ` before which the Condition is satisfied + n(int): the `TRIAL ` before which the Condition is satisfied - time_scale(TimeScale): the TimeScale used as basis for counting `ENVIRONMENT_STATE_UPDATE `\\ s - (default: TimeScale.ENVIRONMENT_SEQUENCE) + time_scale(TimeScale): the TimeScale used as basis for counting `TRIAL `\\ s + (default: TimeScale.RUN) Satisfied when: - - at most n-1 `ENVIRONMENT_STATE_UPDATE `\\ s have occurred within one unit of time at the `TimeScale` + - at most n-1 `TRIAL `\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. Notes: - - Counts of TimeScales are zero-indexed (that is, the first `ENVIRONMENT_STATE_UPDATE ` is 0, the second - `ENVIRONMENT_STATE_UPDATE ` is 1, etc.); so, `BeforeEnvironmentStateUpdate(2)` is satisfied at `ENVIRONMENT_STATE_UPDATE ` 0 - and `ENVIRONMENT_STATE_UPDATE ` 1. + - Counts of TimeScales are zero-indexed (that is, the first `TRIAL ` is 0, the second + `TRIAL ` is 1, etc.); so, `BeforeTrial(2)` is satisfied at `TRIAL ` 0 + and `TRIAL ` 1. """ def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... -class AtEnvironmentStateUpdate(Condition): +class AtTrial(Condition): - """AtEnvironmentStateUpdate + """AtTrial Parameters: - n(int): the `ENVIRONMENT_STATE_UPDATE ` at which the Condition is satisfied + n(int): the `TRIAL ` at which the Condition is satisfied - time_scale(TimeScale): the TimeScale used as basis for counting `ENVIRONMENT_STATE_UPDATE `\\ s - (default: TimeScale.ENVIRONMENT_SEQUENCE) + time_scale(TimeScale): the TimeScale used as basis for counting `TRIAL `\\ s + (default: TimeScale.RUN) Satisfied when: - - exactly n `ENVIRONMENT_STATE_UPDATE `\\ s have occurred within one unit of time at the `TimeScale` + - exactly n `TRIAL `\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. Notes: - - Counts of TimeScales are zero-indexed (that is, the first `ENVIRONMENT_STATE_UPDATE ` is 0, - the second `ENVIRONMENT_STATE_UPDATE ` is 1, etc.); so, `AtEnvironmentStateUpdate(1)` is satisfied when one - `ENVIRONMENT_STATE_UPDATE ` (`ENVIRONMENT_STATE_UPDATE ` 0) has already occurred. + - Counts of TimeScales are zero-indexed (that is, the first `TRIAL ` is 0, + the second `TRIAL ` is 1, etc.); so, `AtTrial(1)` is satisfied when one + `TRIAL ` (`TRIAL ` 0) has already occurred. """ def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... -class AfterEnvironmentStateUpdate(Condition): +class AfterTrial(Condition): - """AfterEnvironmentStateUpdate + """AfterTrial Parameters: - n(int): the `ENVIRONMENT_STATE_UPDATE ` after which the Condition is satisfied + n(int): the `TRIAL ` after which the Condition is satisfied - time_scale(TimeScale): the TimeScale used as basis for counting `ENVIRONMENT_STATE_UPDATE `\\ s. - (default: TimeScale.ENVIRONMENT_SEQUENCE) + time_scale(TimeScale): the TimeScale used as basis for counting `TRIAL `\\ s. + (default: TimeScale.RUN) Satisfied when: - - at least n+1 `ENVIRONMENT_STATE_UPDATE `\\ s have occurred within one unit of time at the `TimeScale` + - at least n+1 `TRIAL `\\ s have occurred within one unit of time at the `TimeScale` specified by **time_scale**. Notes: - - Counts of TimeScales are zero-indexed (that is, the first `ENVIRONMENT_STATE_UPDATE ` is 0, the second - `ENVIRONMENT_STATE_UPDATE ` is 1, etc.); so, `AfterPass(1)` is satisfied after `ENVIRONMENT_STATE_UPDATE ` 1 - has occurred and thereafter (i.e., in `ENVIRONMENT_STATE_UPDATE `\\ s 2, 3, 4, etc.). + - Counts of TimeScales are zero-indexed (that is, the first `TRIAL ` is 0, the second + `TRIAL ` is 1, etc.); so, `AfterPass(1)` is satisfied after `TRIAL ` 1 + has occurred and thereafter (i.e., in `TRIAL `\\ s 2, 3, 4, etc.). """ def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... -class AfterNEnvironmentStateUpdates(Condition): +class AfterNTrials(Condition): - """AfterNEnvironmentStateUpdates + """AfterNTrials Parameters: - n(int): the number of `ENVIRONMENT_STATE_UPDATE `\\ s after which the Condition is satisfied + n(int): the number of `TRIAL `\\ s after which the Condition is satisfied - time_scale(TimeScale): the TimeScale used as basis for counting `ENVIRONMENT_STATE_UPDATE `\\ s - (default: TimeScale.ENVIRONMENT_SEQUENCE) + time_scale(TimeScale): the TimeScale used as basis for counting `TRIAL `\\ s + (default: TimeScale.RUN) Satisfied when: - - at least n `ENVIRONMENT_STATE_UPDATE `\\ s have occured within one unit of time at the `TimeScale` + - at least n `TRIAL `\\ s have occured within one unit of time at the `TimeScale` specified by **time_scale**. """ def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... -class AtEnvironmentSequence(Condition): +class AtRun(Condition): - """AtEnvironmentSequence + """AtRun Parameters: - n(int): the `ENVIRONMENT_SEQUENCE` at which the Condition is satisfied + n(int): the `RUN` at which the Condition is satisfied Satisfied when: - - exactly n `ENVIRONMENT_SEQUENCE`\\ s have occurred. + - exactly n `RUN`\\ s have occurred. Notes: - - `ENVIRONMENT_SEQUENCE`\\ s are managed by the environment using the Scheduler (e.g. `end_environment_sequence ` ) and are not automatically updated by this package. + - `RUN`\\ s are managed by the environment using the Scheduler (e.g. `end_environment_sequence ` ) and are not automatically updated by this package. """ def __init__(self, n) -> None: ... -class AfterEnvironmentSequence(Condition): +class AfterRun(Condition): - """AfterEnvironmentSequence + """AfterRun Parameters: - n(int): the `ENVIRONMENT_SEQUENCE` after which the Condition is satisfied + n(int): the `RUN` after which the Condition is satisfied Satisfied when: - - at least n+1 `ENVIRONMENT_SEQUENCE`\\ s have occurred. + - at least n+1 `RUN`\\ s have occurred. Notes: - - `ENVIRONMENT_SEQUENCE`\\ s are managed by the environment using the Scheduler (e.g. `end_environment_sequence ` ) and are not automatically updated by this package. + - `RUN`\\ s are managed by the environment using the Scheduler (e.g. `end_environment_sequence ` ) and are not automatically updated by this package. """ def __init__(self, n) -> None: ... -class AfterNEnvironmentSequences(Condition): +class AfterNRuns(Condition): - """AfterNEnvironmentSequences + """AfterNRuns Parameters: - n(int): the number of `ENVIRONMENT_SEQUENCE`\\ s after which the Condition is satisfied + n(int): the number of `RUN`\\ s after which the Condition is satisfied Satisfied when: - - at least n `ENVIRONMENT_SEQUENCE`\\ s have occured. + - at least n `RUN`\\ s have occured. Notes: - - `ENVIRONMENT_SEQUENCE`\\ s are managed by the environment using the Scheduler (e.g. `end_environment_sequence ` ) and are not automatically updated by this package. + - `RUN`\\ s are managed by the environment using the Scheduler (e.g. `end_environment_sequence ` ) and are not automatically updated by this package. """ def __init__(self, n) -> None: ... @@ -984,7 +984,7 @@ class BeforeNCalls(_DependencyValidation, Condition): n(int): the number of executions of **dependency** before which the Condition is satisfied time_scale(TimeScale): the TimeScale used as basis for counting executions of **dependency** - (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + (default: TimeScale.TRIAL) Satisfied when: @@ -1005,7 +1005,7 @@ class AtNCalls(_DependencyValidation, Condition): n(int): the number of executions of **dependency** at which the Condition is satisfied time_scale(TimeScale): the TimeScale used as basis for counting executions of **dependency** - (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + (default: TimeScale.TRIAL) Satisfied when: @@ -1026,7 +1026,7 @@ class AfterCall(_DependencyValidation, Condition): n(int): the number of executions of **dependency** after which the Condition is satisfied time_scale(TimeScale): the TimeScale used as basis for counting executions of **dependency** - (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + (default: TimeScale.TRIAL) Satisfied when: @@ -1047,7 +1047,7 @@ class AfterNCalls(_DependencyValidation, Condition): n(int): the number of executions of **dependency** after which the Condition is satisfied time_scale(TimeScale): the TimeScale used as basis for counting executions of **dependency** - (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + (default: TimeScale.TRIAL) Satisfied when: @@ -1069,7 +1069,7 @@ class AfterNCallsCombined(_DependencyValidation, Condition): Condition is satisfied (default: None) time_scale(TimeScale): the TimeScale used as basis for counting executions of **dependency** - (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + (default: TimeScale.TRIAL) Satisfied when: @@ -1115,13 +1115,13 @@ class JustRan(_DependencyValidation, Condition): Satisfied when: - - the node specified in **dependency** executed in the previous `CONSIDERATION_SET_EXECUTION`. + - the node specified in **dependency** executed in the previous `TIME_STEP`. Notes: - This Condition can transcend divisions between `TimeScales `. - For example, if A runs in the final `CONSIDERATION_SET_EXECUTION` of an `ENVIRONMENT_STATE_UPDATE `, - JustRan(A) is satisfied at the beginning of the next `ENVIRONMENT_STATE_UPDATE `. + For example, if A runs in the final `TIME_STEP` of an `TRIAL `, + JustRan(A) is satisfied at the beginning of the next `TRIAL `. """ def __init__(self, dependency) -> None: ... @@ -1135,7 +1135,7 @@ class AllHaveRun(_DependencyValidation, Condition): *dependencies (Hashable): an iterable of nodes on which the Condition depends time_scale(TimeScale): the TimeScale used as basis for counting executions of **dependency** - (default: TimeScale.ENVIRONMENT_STATE_UPDATE) + (default: TimeScale.TRIAL) Satisfied when: @@ -1222,13 +1222,13 @@ class WhenFinishedAll(_DependencyValidation, Condition): """ def __init__(self, *dependencies) -> None: ... -class AtEnvironmentStateUpdateStart(AtPass): +class AtTrialStart(AtPass): - """AtEnvironmentStateUpdateStart + """AtTrialStart Satisfied when: - - at the beginning of an `ENVIRONMENT_STATE_UPDATE ` + - at the beginning of an `TRIAL ` Notes: @@ -1236,57 +1236,57 @@ class AtEnvironmentStateUpdateStart(AtPass): """ def __init__(self) -> None: ... -class AtEnvironmentStateUpdateNStart(All): +class AtTrialNStart(All): - """AtEnvironmentStateUpdateNStart + """AtTrialNStart Parameters: - n(int): the `ENVIRONMENT_STATE_UPDATE ` on which the Condition is satisfied + n(int): the `TRIAL ` on which the Condition is satisfied - time_scale(TimeScale): the TimeScale used as basis for counting `ENVIRONMENT_STATE_UPDATE `\\ s - (default: TimeScale.ENVIRONMENT_SEQUENCE) + time_scale(TimeScale): the TimeScale used as basis for counting `TRIAL `\\ s + (default: TimeScale.RUN) Satisfied when: - - on `PASS` 0 of the specified `ENVIRONMENT_STATE_UPDATE ` counted using 'TimeScale` + - on `PASS` 0 of the specified `TRIAL ` counted using 'TimeScale` Notes: - - identical to All(AtPass(0), AtEnvironmentStateUpdate(n, time_scale)) + - identical to All(AtPass(0), AtTrial(n, time_scale)) """ def __init__(self, n, time_scale: graph_scheduler.time.TimeScale = ...) -> None: ... -class AtEnvironmentSequenceStart(AtEnvironmentStateUpdate): +class AtRunStart(AtTrial): - """AtEnvironmentSequenceStart + """AtRunStart Satisfied when: - - at the beginning of an `ENVIRONMENT_SEQUENCE` + - at the beginning of an `RUN` Notes: - - identical to `AtEnvironmentStateUpdate(0) ` + - identical to `AtTrial(0) ` """ def __init__(self) -> None: ... -class AtEnvironmentSequenceNStart(All): +class AtRunNStart(All): - """AtEnvironmentSequenceNStart + """AtRunNStart Parameters: - n(int): the `ENVIRONMENT_SEQUENCE` on which the Condition is satisfied + n(int): the `RUN` on which the Condition is satisfied Satisfied when: - - on `ENVIRONMENT_STATE_UPDATE ` 0 of the specified `ENVIRONMENT_SEQUENCE` counted using 'TimeScale` + - on `TRIAL ` 0 of the specified `RUN` counted using 'TimeScale` Notes: - - identical to `All(AtEnvironmentStateUpdate(0), AtEnvironmentSequence(n))` + - identical to `All(AtTrial(0), AtRun(n))` """ def __init__(self, n) -> None: ... diff --git a/tests/scheduling/test_condition.py b/tests/scheduling/test_condition.py index 182d85bfd7a..a823f534f29 100644 --- a/tests/scheduling/test_condition.py +++ b/tests/scheduling/test_condition.py @@ -25,7 +25,15 @@ class TestModule: def test_all_attr_parity(self): - missing = set(gs.condition.__all__) - set(pnl.core.scheduling.condition.__all__) + missing = { + c for c + in set(gs.condition.__all__) - set(pnl.core.scheduling.condition.__all__) + if ( + 'ConsiderationSetExecution' not in c + and 'EnvironmentStateUpdate' not in c + and 'EnvironmentSequence' not in c + ) + } assert len(missing) == 0, (f'Conditions in graph_scheduler must be added to psyneulink condition.py: {missing}') From 04a671611e2c9d65953c60528651c6cff704d8ed Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 2 Nov 2024 19:56:51 -0400 Subject: [PATCH 380/410] llvm/scheduler: Use position in consideration queue to honor data dependencies It's not required that any node in earlier consideration queue slot executes before any node in later consideration slot. Using Any(AllHaveRun()) to represent consideratio queue ordering is thus both expensive and inaccurate. The compiled compiler already uses 'iteration count' per pass that is equivalent to consideration queue slots. It can thus be used directly to filter nodes that should be considered at each step. This more closely follows the Python implementation. Rename iter_count -> consideration_index. Signed-off-by: Jan Vesely --- psyneulink/core/compositions/composition.py | 18 ++++-------- psyneulink/core/llvm/codegen.py | 32 +++++++++------------ 2 files changed, 20 insertions(+), 30 deletions(-) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 98116b80c70..83629dfd19b 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -12946,22 +12946,16 @@ def disable_all_history(self): self._set_all_parameter_properties_recursively(history_max_length=0) def _get_processing_condition_set(self, node): - dep_group = [] - for group in self.scheduler.consideration_queue: + for index, group in enumerate(self.scheduler.consideration_queue): if node in group: break - dep_group = group - - # This condition is used to check of the step has passed. - # Not all nodes in the previous step need to execute - # (they might have other conditions), but if any one does we're good - # FIXME: This will fail if none of the previously considered - # nodes executes in this pass, but that is unlikely. - conds = [Any(*(AllHaveRun(dep, time_scale=TimeScale.PASS) for dep in dep_group))] if len(dep_group) else [] + + assert index is not None + if node in self.scheduler.conditions: - conds.append(self.scheduler.conditions[node]) + return index, self.scheduler.conditions[node] - return All(*conds) + return index, Always() def _input_matches_variable(self, input_value, var): var_shape = convert_to_np_array(var).shape diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index 55043a18bb0..26a5dfd14af 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -879,15 +879,13 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): controller_f = ctx.import_llvm_function(controller_w, tags=node_tags) builder.call(controller_f, [state, params, comp_in, data, data]) - # Allocate run set structure run_set_type = ir.ArrayType(ctx.bool_ty, len(composition.nodes)) run_set_ptr = builder.alloca(run_set_type, name="run_set") builder.store(run_set_type(None), run_set_ptr) - - iter_ptr = builder.alloca(ctx.int32_ty, name="iter_counter") - builder.store(iter_ptr.type.pointee(0), iter_ptr) + consideration_index_ptr = builder.alloca(ctx.int32_ty, name="consideration_index_loc") + builder.store(consideration_index_ptr.type.pointee(0), consideration_index_ptr) # Start the main loop structure loop_condition = builder.append_basic_block(name="scheduling_loop_condition") @@ -914,18 +912,17 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): previous_step = builder.load(run_set_ptr) zero = ctx.int32_ty(0) any_cond = ctx.bool_ty(0) + consideration_index = builder.load(consideration_index_ptr) # Calculate execution set before running the mechanisms for idx, node in enumerate(composition.nodes): run_set_node_ptr = builder.gep(run_set_ptr, [zero, ctx.int32_ty(idx)], name="run_cond_ptr_" + node.name) - node_cond = cond_gen.generate_sched_condition(builder, - composition._get_processing_condition_set(node), - cond, - node, - is_finished_callbacks, - nodes_states) - ran = cond_gen.generate_ran_this_pass(builder, cond, node) - node_cond = builder.and_(node_cond, builder.not_(ran), name="run_cond_" + node.name) + node_consideration_index, node_condition = composition._get_processing_condition_set(node) + + is_consideration_turn = builder.icmp_unsigned("==", consideration_index, consideration_index.type(node_consideration_index)) + node_cond = cond_gen.generate_sched_condition(builder, node_condition, cond, node, is_finished_callbacks, nodes_states) + node_cond = builder.and_(node_cond, is_consideration_turn, name="run_cond_" + node.name) + any_cond = builder.or_(any_cond, node_cond, name="any_ran_cond") builder.store(node_cond, run_set_node_ptr) @@ -998,17 +995,16 @@ def gen_composition_exec(ctx, composition, *, tags:frozenset): builder.block.name = "update_iter_count" # Increment number of iterations - iters = builder.load(iter_ptr, name="iterw") - iters = builder.add(iters, iters.type(1), name="iterw_inc") - builder.store(iters, iter_ptr) + consideration_index = builder.add(consideration_index, consideration_index.type(1), name="consideration_index_inc") + builder.store(consideration_index, consideration_index_ptr) - max_iters = len(composition.scheduler.consideration_queue) - completed_pass = builder.icmp_unsigned("==", iters, iters.type(max_iters), name="completed_pass") + max_considerations = consideration_index.type(len(composition.scheduler.consideration_queue)) + completed_pass = builder.icmp_unsigned("==", consideration_index, max_considerations, name="completed_pass") # Increment pass and reset time step with builder.if_then(completed_pass): builder.block.name = "inc_pass" - builder.store(zero, iter_ptr) + builder.store(consideration_index_ptr.type.pointee(0), consideration_index_ptr) # Bumping automatically zeros lower elements cond_gen.bump_ts(builder, cond, (0, 1, 0)) From 1efa9251e1e9c504d0c480ed41ecbebdea4b90e0 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sat, 2 Nov 2024 20:53:10 -0400 Subject: [PATCH 381/410] conftest: Inherit the original class of the benchmark fixture (#3093) Needed to pass fixture 'isinstance' checks introduced in pytest-benchmark-5.0.0. Signed-off-by: Jan Vesely --- conftest.py | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/conftest.py b/conftest.py index 22050caa6b5..ea4a1b2f206 100644 --- a/conftest.py +++ b/conftest.py @@ -155,28 +155,24 @@ def comp_mode_no_llvm(): # dummy fixture to allow 'comp_mode' filtering pass -class FirstBench(): - def __init__(self, benchmark): - super().__setattr__("benchmark", benchmark) +@pytest.fixture +def benchmark(benchmark): - def __call__(self, f, *args, **kwargs): - res = [] - # Compute the first result if benchmark is enabled - if self.benchmark.enabled: - res.append(f(*args, **kwargs)) + orig_class = type(benchmark) - res.append(self.benchmark(f, *args, **kwargs)) - return res[0] + class _FirstBench(orig_class): + def __call__(self, f, *args, **kwargs): + res = [] + # Compute the first result if benchmark is enabled + if self.enabled: + res.append(f(*args, **kwargs)) - def __getattr__(self, attr): - return getattr(self.benchmark, attr) + res.append(orig_class.__call__(self, f, *args, **kwargs)) + return res[0] - def __setattr__(self, attr, val): - return setattr(self.benchmark, attr, val) + benchmark.__class__ = _FirstBench -@pytest.fixture -def benchmark(benchmark): - return FirstBench(benchmark) + return benchmark @pytest.helpers.register def llvm_current_fp_precision(): From b9eb21fd51686f19e09e1a9c67aa87d36bb61b8a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 2 Nov 2024 22:51:01 -0400 Subject: [PATCH 382/410] requirements: update pytest-benchmark requirement from <4.0.1 to <5.1.1 (#3091) Updates the requirements on [pytest-benchmark](https://github.com/ionelmc/pytest-benchmark) to permit the latest version. - [Changelog](https://github.com/ionelmc/pytest-benchmark/blob/master/CHANGELOG.rst) - [Commits](https://github.com/ionelmc/pytest-benchmark/compare/v4.0.0...v5.1.0) --- updated-dependencies: - dependency-name: pytest-benchmark dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index d82bd9d7ca6..9f06766b7a6 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -1,7 +1,7 @@ jupyter<1.1.2 packaging<25.0 pytest<8.3.4 -pytest-benchmark<4.0.1 +pytest-benchmark<5.1.1 pytest-cov<5.0.1 pytest-forked<1.7.0 pytest-helpers-namespace<2021.12.30 From 4fd0948b1c73512962a9cd28c0b5dbabb382123e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 3 Nov 2024 01:24:00 -0400 Subject: [PATCH 383/410] llvm/compiler: Use node num_executions to implement AllHaveRun condition Signed-off-by: Jan Vesely --- psyneulink/core/llvm/codegen.py | 8 ++++++++ psyneulink/core/llvm/helpers.py | 30 ++++-------------------------- 2 files changed, 12 insertions(+), 26 deletions(-) diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index 26a5dfd14af..259c00316cb 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -782,7 +782,15 @@ def _gen_composition_exec_context(ctx, composition, *, tags:frozenset, suffix="" params = builder.alloca(const_params.type, name="const_params_loc") builder.store(const_params, params) + for scale in TimeScale: + num_executions_ptr = helpers.get_state_ptr(builder, composition, state, "num_executions") + num_exec_time_ptr = builder.gep(num_executions_ptr, [ctx.int32_ty(0), ctx.int32_ty(scale.value)]) + num_exec = builder.load(num_exec_time_ptr) + num_exec = builder.add(num_exec, num_exec.type(1)) + builder.store(num_exec, num_exec_time_ptr) + node_tags = tags.union({"node_assembly"}) + # Call input CIM input_cim_w = ctx.get_node_assembly(composition, composition.input_CIM) input_cim_f = ctx.import_llvm_function(input_cim_w, tags=node_tags) diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index e5dc9f612df..c6a2c991122 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -590,26 +590,6 @@ def generate_update_after_node_execution(self, builder, cond_ptr, node): node_ts_ptr = self.__get_node_ts_ptr(builder, cond_ptr, node) builder.store(global_ts, node_ts_ptr) - def generate_ran_this_pass(self, builder, cond_ptr, node): - global_trial = self.get_global_trial(builder, cond_ptr) - global_pass = self.get_global_pass(builder, cond_ptr) - - node_ts = self.__get_node_ts(builder, cond_ptr, node) - node_trial = builder.extract_value(node_ts, self.TimeIndex.TRIAL.value) - node_pass = builder.extract_value(node_ts, self.TimeIndex.PASS.value) - - pass_eq = builder.icmp_signed("==", node_pass, global_pass) - trial_eq = builder.icmp_signed("==", node_trial, global_trial) - return builder.and_(pass_eq, trial_eq) - - def generate_ran_this_trial(self, builder, cond_ptr, node): - global_trial = self.get_global_trial(builder, cond_ptr) - - node_ts = self.__get_node_ts(builder, cond_ptr, node) - node_trial = builder.extract_value(node_ts, self.TimeIndex.TRIAL.value) - - return builder.icmp_signed("==", node_trial, global_trial) - def _node_executions_for_scale(self, builder, node, node_states, time_scale:TimeScale): node_idx = self.composition._get_node_index(node) node_state = builder.gep(node_states, [self._zero, self.ctx.int32_ty(node_idx)]) @@ -645,13 +625,11 @@ def generate_sched_condition(self, builder, condition, cond_ptr, self_node, is_f run_cond = self.ctx.bool_ty(1) for node in dependencies: - if condition.time_scale == TimeScale.TRIAL: - node_ran = self.generate_ran_this_trial(builder, cond_ptr, node) - elif condition.time_scale == TimeScale.PASS: - node_ran = self.generate_ran_this_pass(builder, cond_ptr, node) - else: - assert False, "Unsupported 'AllHaveRun' time scale: {}".format(condition.time_scale) + count = self._node_executions_for_scale(builder, node, nodes_states, condition.time_scale) + + node_ran = builder.icmp_unsigned(">", count, count.type(0)) run_cond = builder.and_(run_cond, node_ran) + return run_cond elif isinstance(condition, Any): From 538a3242a3294dbf2c1532613f5a1eb906a58a18 Mon Sep 17 00:00:00 2001 From: jdcpni Date: Sun, 3 Nov 2024 05:57:17 -0500 Subject: [PATCH 384/410] refactor/emcomposition_softmax_after_dot (#3095) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * • emcomposition.py - move softmax operation to after combining weighted dot products - update use_gating_for_weighting and softmax CONTROL option to work with refactoring - validate that use_gating_for_weighting is False when learn_field_weights is True - docstring updates * Update Environment.py • test_emcomposition.py - update test_execution --------- Co-authored-by: Younes Strittmatter --- .../EGO/Using EMComposition/DeclanParams.py | 8 +- .../EGO Model - CSW with Simple Integrator.py | 8 +- .../EGO/Using EMComposition/Environment.py | 2 +- .../EGO/Using EMComposition/ScriptControl.py | 4 +- .../control/gating/gatingmechanism.py | 4 +- .../library/compositions/emcomposition.py | 703 +++++++++--------- .../library/compositions/pytorchwrappers.py | 5 +- tests/composition/test_emcomposition.py | 93 +-- 8 files changed, 417 insertions(+), 410 deletions(-) diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py index 9f5f652b28a..ddc95037997 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py @@ -50,14 +50,14 @@ def calc_prob(em_preds, test_ys): previous_state_d = 11, # length of state vector context_d = 11, # length of context vector memory_capacity = ALL, # number of entries in EM memory; ALL=> match to number of stims - memory_init = (0,.0001), # Initialize memory with random values in interval - # memory_init = None, # Initialize with zeros + # memory_init = (0,.0001), # Initialize memory with random values in interval + memory_init = None, # Initialize with zeros concatenate_queries = False, # concatenate_queries = True, # environment - # curriculum_type = 'Interleaved', - curriculum_type = 'Blocked', + curriculum_type = 'Interleaved', + # curriculum_type = 'Blocked', # num_stims = 100, # Integer or ALL num_stims = ALL, # Integer or ALL diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py index aabdecfd655..432bacf4c3e 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py @@ -363,7 +363,7 @@ def construct_model(model_name:str=model_params['name'], if RUN_MODEL: import timeit def print_stuff(**kwargs): - print(f"\n**************\n BATCH: {kwargs['batch']}\n**************\n") + print(f"\n**************\n BATCH: {kwargs['minibatch']}\n**************\n") print(kwargs) print('\nContext internal: \n', model.nodes['CONTEXT'].function.parameters.value.get(kwargs['context'])) print('\nContext hidden: \n', model.nodes['CONTEXT'].parameters.value.get(kwargs['context'])) @@ -407,8 +407,8 @@ def print_stuff(**kwargs): ) stop_time = timeit.default_timer() print(f"Elapsed time: {stop_time - start_time}") - if DISPLAY_MODEL is not None: - model.show_graph(**DISPLAY_MODEL) + # if DISPLAY_MODEL is not None: + # model.show_graph(**DISPLAY_MODEL) if PRINT_RESULTS: print("MEMORY:") print(np.round(model.nodes['EM'].parameters.memory.get(model.name),3)) @@ -450,7 +450,7 @@ def eval_weights(weight_mat): axes[1].set_xlabel('Stimuli') axes[1].set_ylabel(model_params['loss_spec']) # Logit of loss - axes[2].plot( (model.results[1:TOTAL_NUM_STIMS,2]*TARGETS[:TOTAL_NUM_STIMS-1]).sum(-1) ) + axes[2].plot( (model.results[2:TOTAL_NUM_STIMS,2]*TARGETS[:TOTAL_NUM_STIMS-2]).sum(-1) ) axes[2].set_xlabel('Stimuli') axes[2].set_ylabel('Correct Logit') plt.suptitle(f"{model_params['curriculum_type']} Training") diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/Environment.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/Environment.py index 0ce08fafaaf..78aca55b459 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/Environment.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/Environment.py @@ -2,7 +2,7 @@ import torch from torch.utils.data import dataset from torch import utils -from numpy.random import randint +from random import randint def one_hot_encode(labels, num_classes): """ diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py index 04027649aa3..a06c4a95058 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py @@ -24,6 +24,6 @@ PRINT_RESULTS = False # don't print model.results to console after execution # PRINT_RESULTS = True # print model.results to console after execution SAVE_RESULTS = False # save model.results to disk -PLOT_RESULTS = False # don't plot results (PREDICTIONS) vs. TARGETS -# PLOT_RESULTS = True # plot results (PREDICTIONS) vs. TARGETS +# PLOT_RESULTS = False # don't plot results (PREDICTIONS) vs. TARGETS +PLOT_RESULTS = True # plot results (PREDICTIONS) vs. TARGETS ANIMATE = False # {UNIT:EXECUTION_SET} # Specifies whether to generate animation of execution diff --git a/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py index f76fbb90981..6df5e05f0be 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py @@ -28,7 +28,7 @@ -------- A GatingMechanism is a subclass of `ControlMechanism` that is restricted to using only `GatingSignals `, -which modulate the `input ` or `output ` of a `Mechanism `, +which modulate the `input ` or `output ` of a `Mechanism `, but not the paramaters of its `function `. Accordingly, its constructor has a **gate** argument in place of a **control** argument. It also lacks several attributes related to control, including those related to costs and net_outcome. In all other respects it is identical to its parent class, ControlMechanism. @@ -58,7 +58,7 @@ *Specifying gating* ~~~~~~~~~~~~~~~~~~~ -A GatingMechanism is used to modulate the value of an `InputPort` or `OutputPort`. An InputPort or OutputPort can +A GatingMechanism is used to modulate the value of an `InputPort` or `InputPort`. An InputPort or OutputPort can be specified for gating by assigning it a `GatingProjection` or `GatingSignal` anywhere that the Projections to a Port or its `ModulatorySignals can be specified `. A `Mechanism ` can also be specified for gating, in which case the `primary InputPort ` of the specified Mechanism is used. Ports diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index 807f316fe55..11eac6e1d20 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -264,7 +264,6 @@ - `Field Weights ` * `EMComposition_Class_Reference` - .. _EMComposition_Overview: Overview @@ -315,14 +314,15 @@ **Operation** *Retrieval.* The values retrieved from `memory ` (one for each field) are based -on the relative similarity of the keys to the entries in memory, computed as the dot product of each key and the -values in the corresponding field for each entry in memory. These dot products are then softmaxed, and those -softmax distributions are weighted by the corresponding `field_weights ` for each field -and then combined, to produce a single softmax distribution over the entries in memory. That is then used to generate -a weighted average of the retrieved values across all fields, which is returned as the `result ` -of the EMComposition's `execution ` (an EMComposition can also be configured to return the -entry with the highest dot product weighted by field, however then it is not compatible with learning; -see `softmax_choice `). +on the relative similarity of the keys to the entries in memory, computed as the distance of each key and the +values in the corresponding field for each entry in memory. By default, normalized dot products (comparable to cosine +similarity) are used to compute the similarity of each query to each key in memory. These distances are then +weighted by the corresponding `field_weights ` for each field (if specified) and then +summed, and the sum is softmaxed to produce a softmax distribution over the entries in memory. That is then used to +generate a softmax-weighted average of the retrieved values across all fields, which is returned as the `result +` of the EMComposition's `execution ` (an EMComposition can also be +configured to return the entry with the lowest distance weighted by field, however then it is not compatible +with learning; see `softmax_choice `). COMMENT: TBD DISTANCE ATTRIBUTES: @@ -428,11 +428,11 @@ process, but the values of which are retrieved and assigned as the `value ` of the corresponding `retrieved_node `. This distinction between keys and value corresponds to the format of a standard "dictionary," though in that case only a single key and value are allowed, whereas - here there can be one or more keys and any number of values; if all fields are keys, this implements a full form of - content-addressable memory. If **learn_field_weight** is True (and `enable_learning` + here there can be one or more keys and any number of values; if all fields are keys, this implements a full form of + content-addressable memory. If **learn_field_weights** is True (and `enable_learning` is either True or a list with True for at least one entry), then the field_weights can be modified during training (this functions similarly to the attention head of a Transformer model, although at present the field can only be - scalar values rather than vecdtors); if **learn_field_weight** is False, then the field_weights are fixed. + scalar values rather than vecdtors); if **learn_field_weights** is False, then the field_weights are fixed. The following options can be used to specify **field_weights**: * *None* (the default): all fields except the last are treated as keys, and are weighted equally for retrieval, @@ -458,13 +458,6 @@ are used to weight the retrieved value of each field. This setting is ignored if **field_weights** is None or `concatenate_queries ` is in effect. - .. warning:: - If **normalize_field_weights** is False and **enable_learning** is True, a warning is issued indicating that - this may produce an error if the `loss_spec ` for the EMComposition (or an - `AutodiffComposition` that contains it) requires all values to be between 0 and 1, and calling the - EMComposition's `learn ` method will generate an error if the loss_spec is specified is - one known to be incompatible (e.g., `BINARY_CROSS_ENTROPY `). - .. _EMComposition_Field_Names: * **field_names**: specifies names that can be assigned to the fields. The number of names specified must @@ -485,9 +478,8 @@ .. note:: While this is computationally more efficient, it can affect the outcome of the `matching process - `, since computing the normalized dot product of a single vector comprised of the - concatentated inputs is not identical to computing the normalized dot product of each field independently and - then combining the results. + `, since computing the distance of a single vector comprised of the concatentated + inputs is not identical to computing the distance of each field independently and then combining the results. .. note:: All `query_input_nodes ` and `retrieved_nodes ` @@ -514,41 +506,41 @@ .. _EMComposition_Softmax_Gain: -* **softmax_gain** : specifies the gain (inverse temperature) used for softmax normalizing the dot products of - queries and keys in memory (see `EMComposition_Execution` below). The following options can be used: +* **softmax_gain** : specifies the gain (inverse temperature) used for softmax normalizing the combined distances + used for retrieval (see `EMComposition_Execution` below). The following options can be used: * numeric value: the value is used as the gain of the `SoftMax` Function for the EMComposition's - `softmax_nodes `. + `softmax_node `. * *ADAPTIVE*: the `adapt_gain ` method of the `SoftMax` Function is used to adaptively set - the `softmax_gain ` based on the entropy of the dot products, in order to preserve + the `softmax_gain ` based on the entropy of the distances, in order to preserve the distribution over non- (or near) zero entries irrespective of how many (near) zero entries there are (see `Thresholding and Adaptive Gain ` for additional details). * *CONTROL*: a `ControlMechanism` is created, and its `ControlSignal` is used to modulate the `softmax_gain - ` parameter of the `SoftMax` function of the EMComposition's `softmax_nodes - `. + ` parameter of the `SoftMax` function of the EMComposition's `softmax_node + `. If *None* is specified, the default value for the `SoftMax` function is used. .. _EMComposition_Softmax_Threshold: * **softmax_threshold**: if this is specified, and **softmax_gain** is specified with a numeric value, - then any values below the specified threshold are set to 0 before the dot products are softmaxed + then any values below the specified threshold are set to 0 before the distances are softmaxed (see *mask_threhold* under `Thresholding and Adaptive Gain ` for additional details). .. _EMComposition_Softmax_Choice: -* **softmax_choice** : specifies how the `SoftMax` Function of each of the EMComposition's `softmax_nodes - ` is used, with the dot products of queries and keys, to generate a retrieved item; +* **softmax_choice** : specifies how the `SoftMax` Function of the EMComposition's `softmax_node + ` is used, with the combined distances, to generate a retrieved item; the following are the options that can be used and the retrieved value they produce: - * *WEIGHTED_AVG* (default): softmax-weighted average of entries, based on their dot products with the key(s). + * *WEIGHTED_AVG* (default): softmax-weighted average based on combined distances of queries and keys in memory. - * *ARG_MAX*: entry with the largest dot product (one with lowest index in `memory `)\ + * *ARG_MAX*: entry with the smallest distance (one with lowest index in `memory `)\ if there are identical ones). - * *PROBABISTIC*: probabilistically chosen entry based on softmax-transformed distribution of dot products. + * *PROBABISTIC*: probabilistically chosen entry based on softmax-transformed distribution of combined distance. .. warning:: Use of the *ARG_MAX* and *PROBABILISTIC* options is not compatible with learning, as these implement a discrete @@ -585,13 +577,14 @@ `, and each entry must be a boolean that specifies whether the corresponding `retrieved_node ` is used for learning. -* **learn_field_weight** : specifies whether `field_weights ` are modifiable during +* **learn_field_weights** : specifies whether `field_weights ` are modifiable during learning (see `field_weights ` and `Learning ` for additional information. For learning of `field_weights ` to occur, **enable_learning** must - also be True, or it must be a list with at least one True entry. + also be True, or it must be a list with at least one True entry. If **learn_field_weights** is True, + **use_gating_for_weighting** must be False (see `note `). * **learning_rate** : specifies the rate at which `field_weights ` are learned if - **learn_field_weight** is True; see `Learning ` for additional information. + **learn_field_weights** is True; see `Learning ` for additional information. .. _EMComposition_Structure: @@ -621,20 +614,19 @@ .. _EMComposition_Memory_Storage: .. technical_note:: The memories are actually stored in the `matrix ` parameters of the`MappingProjections` - from the `combined_softmax_node ` to each of the `retrieved_nodes - `. Memories associated with each key are also stored (in inverted form) - in the `matrix ` parameters of the `MappingProjection ` - from the `query_input_nodes ` to each of the corresponding `match_nodes + from the `combined_matches_node ` to each of the `retrieved_nodes + `. Memories associated with each key are also stored (in inverted form) in the + `matrix ` parameters of the `MappingProjection ` from the + `query_input_nodes ` to each of the corresponding `match_nodes `. This is done so that the match of each query to the keys in memory for the corresponding field can be computed simply by passing the input for each query through the Projection (which - computes the dot product of the input with the Projection's `matrix ` parameter) to - the corresponding match_node; and, similarly, retrieivals can be computed by passing the softmax distributions - and weighting for each field computed in the `combined_softmax_node ` - through its Projection to each `retrieved_node ` (which are inverted versions - of the matrices of the `MappingProjections ` from the `query_input_nodes - ` to each of the corresponding `match_nodes `), - to compute the dot product of the weighted softmax over entries with the corresponding field of each entry - that yields the retreieved value for each field. + computes the distance of the input with the Projection's `matrix ` parameter) to the + corresponding match_node; and, similarly, retrieivals can be computed by passing the softmax distributions for + each field computed in the `combined_matches_node ` through its Projection + to each `retrieved_node ` (which are inverted versions of the matrices of the + `MappingProjections ` from the `query_input_nodes ` to each + of the corresponding `match_nodes `), to compute the distance of the weighted + softmax over entries with the corresponding field of each entry that yields the retreieved value for each field. .. _EMComposition_Output: @@ -668,7 +660,7 @@ * **Concatenation**. By default, the input to every `query_input_node ` is passed to a to its own `match_node ` through a `MappingProjection` that computes its - dot product with the corresponding field of each entry in `memory `. In this way, each + distance with the corresponding field of each entry in `memory `. In this way, each match is normalized so that, absent `field_weighting `, all keys contribute equally to retrieval irrespective of relative differences in the norms of the queries or the keys in memory. However, if the `field_weights ` are the same for all `keys ` and @@ -685,32 +677,65 @@ however it will not necessarily produce the same results as passing each query through its own `match_node ` (see `concatenate keys <`concatenate_queries_node>` for additional information). -* **Match memories by field**. The values of each `query_input_node ` (or the - `concatenate_queries_node ` if `concatenate_queries - ` attribute is True) are passed through a `MappingProjection` that computes - the dot product of the input with each memory for the corresponding field, the result of which is passed to the - corresponding `match_node `. - -* **Softmax normalize matches over fields**. The dot product for each key field is passed from the `match_node - ` to the corresponding `softmax_node `, which applies - the `SoftMax` Function to normalize the dot products for each key field. If a numerical value is specified for - `softmax_gain `, that is used as the gain (inverse temperature) for the SoftMax Function; - if *ADAPTIVE* is specified, then the `SoftMax.adapt_gain` function is used to adaptively set the gain based on the - dot products in each field; if *CONTROL* is specified, then the dot products are monitored by a `ControlMechanism` - that uses the `adapt_gain ` method of the SoftMax Function to modulate its `gain ` - parameter; if None is specified, the default value of the `Softmax` Function is used as the `gain ` - parameter (see `Softmax_Gain ` for additional details). - -* **Weight fields**. If `field weights ` are specified, then the softmax normalized dot - product for each key field is passed to the corresponding `field_weight_node ` - where it is multiplied by the corresponding `field_weight ` (if - `use_gating_for_weighting ` is True, this is done by using the `field_weight - ` to output gate the `softmax_node `). The weighted softmax - vectors for all key fields are then passed to the `combined_softmax_node `, - where they are haddamard summed to produce a single weighting for each memory. - -* **Retrieve values by field**. The vector of softmax weights for each memory generated by the `combined_softmax_node - ` is passed through the Projections to the each of the `retrieved_nodes +.. _EMComposition_Distance_Computation: + +* **Match memories by field**. The values of each `query_input_node ` + (or the `concatenate_queries_node ` if `concatenate_queries + ` attribute is True) are passed through a `MappingProjection` that + computes the distance between the corresponding input (query) and each memory (key) for the corresponding field, + the result of which is possed to the corresponding `match_node `. By default, the + distance is computed as the normalized dot product (i.e., between the normalized query vector and the normalized + key for the corresponding `field `, that is comparable to using cosine similarity). However, + if `normalize_memories ` is set to False, just the raw dot product is computed. + The distance can also be customized by specifying a different `function ` for the + `MappingProjection` to the `match_node `. The result is assigned as the `value + ` of the corresponding `match_node `. + +.. _EMComposition_Field_Weighting: + +* **Weight distances**. If `field weights ` are specified, then the distance computed + by the `MappingProjection` to each `match_node ` is multiplied by the corresponding + `field_weight ` using the `field_weight_node `. + By default (if `use_gating_for_weighting ` is False), this is done using + the `weighted_match_nodes `, each of which receives a Projection from a + `match_node ` and the corresponding `field_weight_node ` + and multiplies them to produce the weighted distance for that field as its output. However, if + `use_gating_for_weighting ` is True, the `field_weight_nodes` are implemented + as `GatingMechanisms `, each of which uses its `field weight ` as a + `GatingSignal ` to output gate (i.e., multiplicatively modulate the output of) the corresponding + `match_node `. In this case, the `weighted_match_nodes` are not implemented, + and the output of the `match_node ` is passed directly to the `combined_matches_node + `. + + + .. _EMComposition_Gating_For_Weighting: + .. note:: + Setting `use_gating_for_weighting ` to True reduces the size and + complexity of the EMComposition, by eliminating the `weighted_match_nodes `. + However, doing to precludes the ability to learn the `field_weights `, + since `GatingSignals ` are `ModulatorySignal>` that cannot be learned. If learning is required, + then `use_gating_for_weighting` should be set to False. + +* **Combine distances**. If `field weights ` are used to specify more than one `key field + `, then the (weighted) distances computed for each field (see above) are summed across fields + by the `combined_matches_node `, before being passed to the `softmax_node + `. If only one key field is specified, then the output of the `match_node + ` is passed directly to the `softmax_node `. + +* **Softmax normalize distances**. The distances, passed either from the `combined_matches_node + `, or directly from the `match_node ` if there is + only one key field, are passed to the `softmax_node `, which applies the `SoftMax` + Function, which generates the softmax distribution used to retrieve entries from `memory `. + If a numerical value is specified for `softmax_gain `, that is used as the gain (inverse + temperature) for the SoftMax Function; if *ADAPTIVE* is specified, then the `SoftMax.adapt_gain` function is used + to adaptively set the gain based on the summed distance (i.e., the output of the `combined_matches_node + `; if *CONTROL* is specified, then the summed distance is monitored by a + `ControlMechanism` that uses the `adapt_gain ` method of the `SoftMax` Function to modulate its + `gain ` parameter; if None is specified, the default value of the `Softmax` Function is used as the + `gain ` parameter (see `Softmax_Gain ` for additional details). + +* **Retrieve values by field**. The vector of softmax weights for each memory generated by the `softmax_node + ` is passed through the Projections to the each of the `retrieved_nodes ` to compute the retrieved value for each field. * **Decay memories**. If `memory_decay ` is True, then each of the memories is decayed @@ -718,7 +743,7 @@ .. technical_note:: This is done by multiplying the `matrix ` parameter of the `MappingProjection` from - the `combined_softmax_node ` to each of the `retrieved_nodes + the `combined_matches_node ` to each of the `retrieved_nodes `, as well as the `matrix ` parameter of the `MappingProjection` from each `query_input_node ` to the corresponding `match_node ` by `memory_decay `, @@ -733,7 +758,7 @@ .. technical_note:: This is done by adding the input vectors to the the corresponding rows of the `matrix ` - of the `MappingProjection` from the `retreival_weighting_node ` to each + of the `MappingProjection` from the `combined_matches_node ` to each of the `retrieved_nodes `, as well as the `matrix ` parameter of the `MappingProjection` from each `query_input_node ` to the corresponding `match_node ` (see note `above ` for @@ -964,7 +989,7 @@ **Use of field_weights to specify keys and values.** -Note that the figure now shows `RETRIEVAL WEIGHTING ` `nodes `, +Note that the figure now shows ` [WEIGHT] ` `nodes `, that are used to implement the relative contribution that each key field makes to the matching process specifed in `field_weights ` argument. By default, these are equal (all assigned a value of 1), but different values can be used to weight the relative contribution of each key field. The values are normalized so @@ -983,7 +1008,7 @@ **Use of field_weights to specify relative contribution of fields to matching process.** Note that in this case, the `concatenate_queries_node ` has been replaced by -a pair of `retreival_weighting_nodes `, one for each key field. This is because +a pair of `weighted_match_node `, one for each key field. This is because the keys were assigned different weights; when they are assigned equal weights, or if no weights are specified, and `normalize_memories ` is `True`, then the keys are concatenated and are concatenated for efficiency of processing. This can be suppressed by specifying `concatenate_queries` as `False` @@ -1029,25 +1054,37 @@ WEIGHTED_AVG = ALL PROBABILISTIC = PROB_INDICATOR -QUERY_AFFIX = ' [QUERY]' -VALUE_AFFIX = ' [VALUE]' -MATCH_TO_KEYS_AFFIX = ' [MATCH to KEYS]' +QUERY_NODE_NAME = 'QUERY' +QUERY_AFFIX = f' [{QUERY_NODE_NAME}]' +VALUE_NODE_NAME = 'VALUE' +VALUE_AFFIX = f' [{VALUE_NODE_NAME}]' +MATCH = 'MATCH' +MATCH_AFFIX = f' [{MATCH}]' +MATCH_TO_KEYS_NODE_NAME = f'{MATCH} to KEYS' +WEIGHT = 'WEIGHT' +WEIGHT_AFFIX = f' [{WEIGHT}]' +MATCH_TO_KEYS_AFFIX = f' [{MATCH_TO_KEYS_NODE_NAME}]' +WEIGHTED_MATCH_NODE_NAME = 'WEIGHTED MATCH' +WEIGHTED_MATCH_AFFIX = f' [{WEIGHTED_MATCH_NODE_NAME}]' +CONCATENATE_QUERIES_NAME = 'CONCATENATE QUERIES' +COMBINE_MATCHES_NODE_NAME = 'COMBINE MATCHES' +COMBINE_MATCHES_AFFIX = f' [{COMBINE_MATCHES_NODE_NAME}]' +SOFTMAX_NODE_NAME = 'RETRIEVE' +SOFTMAX_AFFIX = f' [{SOFTMAX_NODE_NAME}]' +RETRIEVED_NODE_NAME = 'RETRIEVED' RETRIEVED_AFFIX = ' [RETRIEVED]' -WEIGHTED_SOFTMAX_AFFIX = ' [WEIGHTED SOFTMAX]' -COMBINED_SOFTMAX_NODE_NAME = 'RETRIEVE' STORE_NODE_NAME = 'STORE' - def _memory_getter(owning_component=None, context=None)->list: """Return list of memories in which rows (outer dimension) are memories for each field. These are derived from `matrix ` parameter of the `afferent - ` MappingProjections to each of the `retrieved_nodes `. + ` MappingProjections to each of the `2472s `. """ # If storage_node (EMstoragemechanism) is implemented, get memory from that if owning_component.is_initializing: return None - if owning_component.use_storage_node: + if owning_component._use_storage_node: return owning_component.storage_node.parameters.memory_matrix.get(context) # Otherwise, get memory from Projection(s) to each retrieved_node @@ -1142,7 +1179,7 @@ class EMComposition(AutodiffComposition): see `Match memories by field ` for additional details. softmax_gain : float, ADAPTIVE or CONTROL : default 1.0 - specifies the temperature used for softmax normalizing the dot products of keys and memories; + specifies the temperature used for softmax normalizing the distance of queries and keys in memory; see `Softmax normalize matches over fields ` for additional details. softmax_threshold : float : default .0001 @@ -1150,7 +1187,7 @@ class EMComposition(AutodiffComposition): see *mask_threshold* under `Thresholding and Adaptive Gain ` for details). softmax_choice : WEIGHTED_AVG, ARG_MAX, PROBABILISTIC : default WEIGHTED_AVG - specifies how the softmax over dot products of keys and memories is used for retrieval; + specifies how the softmax over distances of queries and keys in memory is used for retrieval; see `softmax_choice ` for a description of each option. storage_prob : float : default 1.0 @@ -1170,8 +1207,8 @@ class EMComposition(AutodiffComposition): learn_field_weights : bool : default True specifies whether `field_weights ` are learnable during training; - requires **enable_learning** to be True to have any effect; see `learn_field_weights - ` for additional details. + requires **enable_learning** to be True to have any effect, and **use_gating_for_weighting** must be False; + see `learn_field_weights ` for additional details. learning_rate : float : default .01 specifies rate at which `field_weights ` are learned @@ -1186,14 +1223,8 @@ class EMComposition(AutodiffComposition): the EMComposition into another Composition; to do so, use_storage_node must be True (default). use_gating_for_weighting : bool : default False - specifies whether to use a `GatingMechanism` to modulate the `combined_softmax_node - ` instead of a standard ProcessingMechanism. If True, then - a GatingMechanism is constructed and used to gate the `OutputPort` of each `field_weight_node - EMComposition.field_weight_nodes`; otherwise, the output of each `field_weight_node - EMComposition.field_weight_nodes` projects to the `InputPort` of the `combined_softmax_node - EMComposition.combined_softmax_node` that receives a Projection from the corresponding - `field_weight_node `, and multiplies its `value - `. + specifies whether to use output gating to weight the `match_nodes ` instead of + a standard input (see `Weight distances ` for additional details). Attributes ---------- @@ -1217,9 +1248,10 @@ class EMComposition(AutodiffComposition): field_weights : tuple[float] determines which fields of the input are treated as "keys" (non-zero values) that are used to match entries in - `memory ` for retrieval, and which are used as "values" (zero values), that are stored - and retrieved from memory, but not used in the match process (see `Match memories by field - `. see `field_weights ` additional details. + `memory ` for retrieval, and which are used as "values" (zero values) that are stored + and retrieved from memory but not used in the match process (see `Match memories by field + `; also determines the relative contribution of each key field to the match process; + see `field_weights ` additional details. normalize_field_weights : bool : default True determines whether `fields_weights ` are normalized over the number of keys, or @@ -1239,16 +1271,16 @@ class EMComposition(AutodiffComposition): see `Match memories by field ` for additional details. softmax_gain : float, ADAPTIVE or CONTROL - determines gain (inverse temperature) used for softmax normalizing the dot products of keys and memories - by the `softmax` function of the `softmax_nodes `; see `Softmax normalize matches - over fields ` for additional details. + determines gain (inverse temperature) used for softmax normalizing the summed distances of queries and keys in + memory by the `SoftMax` Function of the `softmax_node `; see `Softmax normalize + distances ` for additional details. softmax_threshold : float determines the threshold used to mask out small values in the softmax calculation; see *mask_threshold* under `Thresholding and Adaptive Gain ` for details). softmax_choice : WEIGHTED_AVG, ARG_MAX or PROBABILISTIC - determines how the softmax over dot products of keys and memories is used for retrieval; + determines how the softmax over distances of queries and keys in memory is used for retrieval; see `softmax_choice ` for a description of each option. storage_prob : float @@ -1300,61 +1332,47 @@ class EMComposition(AutodiffComposition): into a single vector used for the matching processing if `concatenate keys ` is True. This is not created if the **concatenate_queries** argument to the EMComposition's constructor is False or is overridden (see `concatenate_queries `), or there is only one - query_input_node. This node is named *CONCATENATE_KEYS* + query_input_node. This node is named *CONCATENATE_QUERIES* match_nodes : list[ProcessingMechanism] - `ProcessingMechanisms ` that receive the dot product of each key and those stored in + `ProcessingMechanisms ` that compute the dot product of each query and the key stored in the corresponding field of `memory ` (see `Match memories by field ` for additional details). These are named the same as the corresponding `query_input_nodes ` appended with the suffix *[MATCH to KEYS]*. - softmax_gain_control_nodes : list[ControlMechanism] + field_weight_nodes : list[ProcessingMechanism or GatingMechanism] + Nodes used to weight the distances computed by the `match_nodes ` with the + `field weight ` for the corresponding `key field ` + (see `Weight distances ` for implementation). These are named the same + as the corresponding `query_input_nodes `. + + weighted_match_nodes : list[ProcessingMechanism] + `ProcessingMechanisms ` that combine the `field weight ` + for each `key field ` with the dot product computed by the corresponding the + `match_node `. These are only implemented if `use_gating_for_weighting + ` is False (see `Weight distances ` + for details), and are named the same as the corresponding `query_input_nodes ` + appended with the suffix *[WEIGHTED MATCH]*. + + combined_matches_node : ProcessingMechanism + `ProcessingMechanism` that receives the weighted distances from the `weighted_match_nodes + ` if more than one `key field ` is specified + (or directly from `match_nodes ` if `use_gating_for_weighting + ` is True), and combines them into a single vector that is passed + to the `softmax_node ` for retrieval. This node is named *COMBINE MATCHES*. + + softmax_node : list[ProcessingMechanism] + `ProcessingMechanisms ` that computes the softmax over the summed distances of keys + and memories (output of the `combined_match_node `) + from the corresponding `match_nodes ` (see `Softmax over summed distances + ` for additional details). This is named *RETRIEVE* (as it yields the + softmax-weighted average over the keys in `memory `). + + softmax_gain_control_node : list[ControlMechanism] `ControlMechanisms ` that adaptively control the `softmax_gain ` - for the corresponding `softmax_nodes `. These are implemented only if - `softmax_gain ` is specified as *CONTROL* (see `softmax_gain - ` for details). - - softmax_nodes : list[ProcessingMechanism] - `ProcessingMechanisms ` that compute the softmax over the vectors received - from the corresponding `match_nodes ` (see `Softmax normalize matches over fields - ` for additional details). These are named the same as the corresponding - `query_input_nodes ` appended with the suffix *[SOFTMAX]*. - - field_weight_nodes : list[ProcessingMechanism] - `ProcessingMechanisms `, each of which use the `field weight ` - for a given `field ` as its (fixed) input and provides this to the corresponding - `weighted_softmax_node `. These are implemented only if more than one - `key field ` is specified (see `Fields ` for additional details), - and are replaced with `retrieval_gating_nodes ` if - `use_gating_for_weighting ` is True. These are named the same as the - corresponding `query_input_nodes ` appended with the suffix *[WEIGHT]*. - - weighted_softmax_nodes : list[ProcessingMechanism] - `ProcessingMechanisms `, each of which receives the output of the corresponding - `softmax_node ` and `field_weight_node ` - for a given `field `, and multiplies them to produce the weighted softmax for that field; - these are implemented only if more than one `key field ` is specified (see `Fields - ` for additional details) and `use_gating_for_weighting - ` is False (otherwise, `field_weights ` - are applied through output gating of the `softmax_nodes ` by the - `retrieval_gating_nodes `). These are named the same as the corresponding - `query_input_nodes ` appended with the suffix *[WEIGHTED SOFTMAX]*. - - retrieval_gating_nodes : list[GatingMechanism] - `GatingMechanisms ` that uses the `field weight ` for each - field to modulate the output of the corresponding `softmax_node ` before it - is passed to the `combined_softmax_node `. These are implemented - only if `use_gating_for_weighting ` is True and more than one - `key field ` is specified (see `Fields ` for additional details). - - combined_softmax_node : ProcessingMechanism - `ProcessingMechanism` that receives the softmax normalized dot products of the keys and memories from the - `softmax_nodes `, weighted by the `field_weights_nodes - ` if more than one `key field ` is specified - (or by `retrieval_gating_nodes ` if `use_gating_for_weighting - ` is True), and combines them into a single vector that is used to - retrieve the corresponding memory for each field from `memory ` (see `Retrieve values by - field ` for additional details). This node is named *RETRIEVE*. + of the `softmax_node `. This is implemented only if `softmax_gain + ` is specified as *CONTROL* (see `softmax_gain ` for + details). retrieved_nodes : list[ProcessingMechanism] `ProcessingMechanisms ` that receive the vector retrieved for each field in `memory @@ -1615,7 +1633,8 @@ def __init__(self, if memory_decay_rate is AUTO: memory_decay_rate = 1 / memory_capacity - self.use_storage_node = use_storage_node + self._use_storage_node = use_storage_node + self._use_gating_for_weighting = use_gating_for_weighting if softmax_gain == CONTROL: self.parameters.softmax_gain.modulable = False @@ -1643,7 +1662,10 @@ def __init__(self, **kwargs ) - self._validate_options_with_learning(softmax_choice, normalize_field_weights, enable_learning) + self._validate_options_with_learning(enable_learning, + use_gating_for_weighting, + learn_field_weights, + softmax_choice) self._construct_pathways(self.memory_template, self.memory_capacity, @@ -1655,10 +1677,10 @@ def __init__(self, self.softmax_choice, self.storage_prob, self.memory_decay_rate, - self.use_storage_node, + self._use_storage_node, self.enable_learning, self.learn_field_weights, - use_gating_for_weighting) + self._use_gating_for_weighting) # if torch_available: # from psyneulink.library.compositions.pytorchEMcompositionwrapper import PytorchEMCompositionWrapper @@ -1669,7 +1691,7 @@ def __init__(self, # Assign learning-related attributes self._set_learning_attributes() - if self.use_storage_node: + if self._use_storage_node: # --------------------------------------- # # CONDITION: @@ -1709,7 +1731,7 @@ def __init__(self, # Suppress warnings for no efferent Projections for node in self.value_input_nodes: node.output_port.parameters.require_projection_in_composition.set(False, override=True) - self.combined_softmax_node.output_port.parameters.require_projection_in_composition.set(False, override=True) + self.softmax_node.output_port.parameters.require_projection_in_composition.set(False, override=True) # Suppress field_weight_nodes as INPUT nodes of the Composition for node in self.field_weight_nodes: @@ -1861,7 +1883,7 @@ def _construct_entries(entry_template, num_entries, memory_fill=None)->np.ndarra # Get remaining entries populated with memory_fill remaining_entries = _construct_entries(memory_template[0], num_entries_needed, memory_fill) assert bool(num_entries_needed == len(remaining_entries)) - # I any remaining entries, concatenate them with the entries that were specified + # If any remaining entries, concatenate them with the entries that were specified if num_entries_needed: memory = np.concatenate((np.array(memory_template, dtype=object), np.array(remaining_entries, dtype=object))) @@ -1942,23 +1964,23 @@ def _parse_fields(self, and normalize_memories) # if concatenate_queries was forced to be False when user specified it as True, issue warning if user_specified_concatenate_queries and not parsed_concatenate_queries: - # Issue warning if concatenate_queries is True but either - # field weights are not all equal and/or normalize_memories is False + # Issue warning if concatenate_queries is True but: + # field weights are not all equal and/or + # normalize_memories is False and/or + # there is only one key fw_error_msg = nm_error_msg = fw_correction_msg = nm_correction_msg = None - if not all(np.all(keys_weights[i] == keys_weights[0] for i in range(len(keys_weights)))): - fw_error_msg = f" field weights ({field_weights}) are not all equal" - fw_correction_msg = f"remove `field_weights` specification or make them all the same." - if not normalize_memories: - nm_error_msg = f" normalize_memories is False" - nm_correction_msg = f" or set normalize_memories to True" - if fw_error_msg and nm_error_msg: - error_msg = f"{fw_error_msg} and {nm_error_msg}" - correction_msg = f"{fw_correction_msg} and/or {nm_correction_msg}" - else: - error_msg = fw_error_msg or nm_error_msg - correction_msg = fw_correction_msg or nm_correction_msg + if self.num_keys == 1: + error_msg = f"there is only one key" + correction_msg = "" + elif not all(np.all(keys_weights[i] == keys_weights[0] for i in range(len(keys_weights)))): + error_msg = f" field weights ({field_weights}) are not all equal" + correction_msg = (f" To use concatenation, remove `field_weights` " + f"specification or make them all the same.") + elif not normalize_memories: + error_msg = f" normalize_memories is False" + correction_msg = f" To use concatenation, set normalize_memories to True." warnings.warn(f"The 'concatenate_queries' arg for '{name}' is True but {error_msg}; " - f"concatenation will be ignored. To use concatenation, {correction_msg}.") + f"concatenation will be ignored.{correction_msg}") self.learning_rate = learning_rate return parsed_field_weights, parsed_field_names, parsed_concatenate_queries @@ -2021,19 +2043,21 @@ def _construct_pathways(self, self.concatenate_queries_node = self._construct_concatenate_queries_node(concatenate_queries) self.match_nodes = self._construct_match_nodes(memory_template, memory_capacity, concatenate_queries,normalize_memories) - self.softmax_nodes = self._construct_softmax_nodes(memory_capacity, - field_weights, - softmax_gain, - softmax_threshold, - softmax_choice) self.field_weight_nodes = self._construct_field_weight_nodes(field_weights, concatenate_queries, use_gating_for_weighting) - self.weighted_softmax_nodes = self._construct_weighted_softmax_nodes(memory_capacity, use_gating_for_weighting) - self.softmax_gain_control_nodes = self._construct_softmax_gain_control_nodes(softmax_gain) - self.combined_softmax_node = self._construct_combined_softmax_node(memory_capacity, + self.weighted_match_nodes = self._construct_weighted_match_nodes(memory_capacity, field_weights) + + self.combined_matches_node = self._construct_combined_matches_node(memory_capacity, field_weighting, use_gating_for_weighting) + self.softmax_node = self._construct_softmax_node(memory_capacity, + softmax_gain, + softmax_threshold, + softmax_choice) + + self.softmax_gain_control_node = self._construct_softmax_gain_control_node(softmax_gain) + self.retrieved_nodes = self._construct_retrieved_nodes(memory_template) if use_storage_node: @@ -2043,86 +2067,73 @@ def _construct_pathways(self, # Do some validation and get singleton softmax and match Nodes for concatenated queries if self.concatenate_queries: - softmax_node = self.softmax_nodes.pop() - assert not self.softmax_nodes, \ - f"PROGRAM ERROR: Too many softmax_nodes ({len(self.softmax_nodes)}) for concatenated queries." - assert len(self.softmax_gain_control_nodes) <= 1, \ - (f"PROGRAM ERROR: Too many softmax_gain_control_nodes " - f"{len(self.softmax_gain_control_nodes)}) for concatenated queries.") - match_node = self.match_nodes.pop() - assert not self.match_nodes, \ + assert len(self.match_nodes) == 1, \ f"PROGRAM ERROR: Too many match_nodes ({len(self.match_nodes)}) for concatenated queries." assert not self.field_weight_nodes, \ f"PROGRAM ERROR: There should be no field_weight_nodes for concatenated queries." # Construct Pathways -------------------------------------------------------------------------------- + # LEARNING NOT ENABLED -------------------------------------------------- # Set up pathways WITHOUT PsyNeuLink learning pathways if not self.enable_learning: self.add_nodes(self.query_input_nodes + self.value_input_nodes) - if self.concatenate_queries: - self.add_nodes([self.concatenate_queries_node, match_node, softmax_node]) - else: - self.add_nodes(self.match_nodes + - self.softmax_nodes + - self.field_weight_nodes + - self.weighted_softmax_nodes) - self.add_nodes(self.softmax_gain_control_nodes + - [self.combined_softmax_node] + - self.retrieved_nodes) if use_storage_node: self.add_node(self.storage_node) - # self.add_projections(proj for proj in self.storage_node.efferents) - + if self.concatenate_queries_node: + self.add_node(self.concatenate_queries_node) + self.add_nodes(self.match_nodes + self.field_weight_nodes + self.weighted_match_nodes) + if self.combined_matches_node: + self.add_node(self.combined_matches_node) + self.add_nodes([self.softmax_node] + self.retrieved_nodes) + if self.softmax_gain_control_node: + self.add_node(self.softmax_gain_control_node) + + # LEARNING ENABLED ----------------------------------------------------- # Set up pathways WITH psyneulink backpropagation learning field weights else: - - # Key pathways - for i in range(self.num_keys): - # Regular pathways - if not self.concatenate_queries: - pathway = [self.query_input_nodes[i], - self.match_nodes[i], - self.softmax_nodes[i], - self.combined_softmax_node] - if self.weighted_softmax_nodes: - pathway.insert(3, self.weighted_softmax_nodes[i]) - # if self.softmax_gain_control_nodes: - # pathway.insert(4, self.softmax_gain_control_nodes[i]) - # Key-concatenated pathways + # Query-specific pathways + if not self.concatenate_queries: + if self.num_keys == 1: + self.add_linear_processing_pathway([self.query_input_nodes[i], + self.match_nodes[i], + self.softmax_node]) else: + for i in range(self.num_keys): + pathway = [self.query_input_nodes[i], + self.match_nodes[i], + self.combined_matches_node] + if self.weighted_match_nodes: + pathway.insert(2, self.weighted_match_nodes[i]) + self.add_linear_processing_pathway(pathway) + self.add_linear_processing_pathway([self.combined_matches_node, self.softmax_node]) + # Query-concatenated pathways + else: + for i in range(self.num_keys): pathway = [self.query_input_nodes[i], self.concatenate_queries_node, - match_node, - softmax_node, - self.combined_softmax_node] - # if self.softmax_gain_control_nodes: - # pathway.insert(4, self.softmax_gain_control_nodes[0]) # Only one, ensured above - # self.add_backpropagation_learning_pathway(pathway) - self.add_linear_processing_pathway(pathway) + self.match_nodes[0]] + self.add_linear_processing_pathway(pathway) + self.add_linear_processing_pathway([self.match_nodes[0], self.softmax_node]) # softmax gain control is specified: - for gain_control_node in self.softmax_gain_control_nodes: - self.add_node(gain_control_node) + if self.softmax_gain_control_node: + self.add_node(self.softmax_gain_control_node) # field_weights -> weighted_softmax pathways if self.field_weight_nodes: for i in range(self.num_keys): - # self.add_backpropagation_learning_pathway([self.field_weight_nodes[i], - # self.weighted_softmax_nodes[i]]) - self.add_linear_processing_pathway([self.field_weight_nodes[i], self.weighted_softmax_nodes[i]]) + self.add_linear_processing_pathway([self.field_weight_nodes[i], self.weighted_match_nodes[i]]) self.add_nodes(self.value_input_nodes) # Retrieval pathways for i in range(len(self.retrieved_nodes)): - # self.add_backpropagation_learning_pathway([self.combined_softmax_node, self.retrieved_nodes[i]]) - self.add_linear_processing_pathway([self.combined_softmax_node, self.retrieved_nodes[i]]) + self.add_linear_processing_pathway([self.softmax_node, self.retrieved_nodes[i]]) # Storage Nodes if use_storage_node: self.add_node(self.storage_node) - # self.add_projections(proj for proj in self.storage_node.efferents) def _construct_query_input_nodes(self, field_weights)->list: """Create one node for each key to be used as cue for retrieval (and then stored) in memory. @@ -2145,7 +2156,7 @@ def _construct_query_input_nodes(self, field_weights)->list: def _construct_value_input_nodes(self, field_weights)->list: """Create one input node for each value to be stored in memory. - Used to assign new set of weights for Projection for combined_softmax_node -> retrieved_node[i] + Used to assign new set of weights for Projection for combined_matches_node -> retrieved_node[i] where i is selected randomly without replacement from (0->memory_capacity) """ @@ -2171,14 +2182,14 @@ def _construct_concatenate_queries_node(self, concatenate_queries)->ProcessingMe return None else: return ProcessingMechanism(function=Concatenate, - input_ports=[{NAME: 'CONCATENATE_QUERIES', + input_ports=[{NAME: 'CONCATENATE', SIZE: len(self.query_input_nodes[i].output_port.value), PROJECTIONS: MappingProjection( name=f'{self.key_names[i]} to CONCATENATE', sender=self.query_input_nodes[i].output_port, matrix=IDENTITY_MATRIX)} for i in range(self.num_keys)], - name='CONCATENATE KEYS') + name=CONCATENATE_QUERIES_NAME) def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_queries, normalize_memories)->list: """Create nodes that, for each key field, compute the similarity between the input and each item in memory. @@ -2225,60 +2236,7 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_q return match_nodes - def _validate_options_with_learning(self, softmax_choice, normalize_field_weights, enable_learning): - if softmax_choice in {ARG_MAX, PROBABILISTIC} and enable_learning: - warnings.warn(f"The 'softmax_choice' arg of '{self.name}' is set to '{softmax_choice}' with " - f"'enable_learning' set to True (or a list); this will generate an error if its " - f"'learn' method is called. Set 'softmax_choice' to WEIGHTED_AVG before learning.") - - if enable_learning and not normalize_field_weights: - warnings.warn(f"The 'normalize_field_weights' arg of '{self.name}' is set to False with " - f"'enable_learning' set to True (or a list); this may generate an error if " - f"the 'loss_spec' used for learning requires values to be between 0 and 1.") - - def _construct_softmax_nodes(self, memory_capacity, field_weights, - softmax_gain, softmax_threshold, softmax_choice)->list: - """Create nodes that, for each key field, compute the softmax over the similarities between the input and the - memories in the corresponding match_node. - """ - - # Get indices of field_weights that specify keys: - key_weights = [field_weights[i] for i in self.key_indices] - - if softmax_choice == ARG_MAX: - # ARG_MAX would return entry multiplied by its dot product - # ARG_MAX_INDICATOR returns the entry unmodified - softmax_choice = ARG_MAX_INDICATOR - - softmax_nodes = [ProcessingMechanism(input_ports={SIZE:memory_capacity, - PROJECTIONS: MappingProjection( - sender=match_node.output_port, - matrix=IDENTITY_MATRIX, - name=f'MATCH to SOFTMAX for {self.key_names[i]}')}, - function=SoftMax(gain=softmax_gain, - mask_threshold=softmax_threshold, - output=softmax_choice, - adapt_entropy_weighting=.95), - name='SOFTMAX' if len(self.match_nodes) == 1 - else f'{self.key_names[i]} [SOFTMAX]') - for i, match_node in enumerate(self.match_nodes)] - - return softmax_nodes - - def _construct_softmax_gain_control_nodes(self, softmax_gain)->list: - """Create nodes that set the softmax gain (inverse temperature) for each softmax_node.""" - - softmax_gain_control_nodes = [] - if softmax_gain == CONTROL: - softmax_gain_control_nodes = [ControlMechanism(monitor_for_control=match_node, - control_signals=[(GAIN, self.softmax_nodes[i])], - function=get_softmax_gain, - name='SOFTMAX GAIN CONTROL' if len(self.softmax_nodes) == 1 - else f'SOFTMAX GAIN CONTROL {self.key_names[i]}') - for i, match_node in enumerate(self.match_nodes)] - - return softmax_gain_control_nodes - + # FIX: CONVERT TO _construct_weight_control_nodes def _construct_field_weight_nodes(self, field_weights, concatenate_queries, use_gating_for_weighting)->list: """Create ProcessingMechanisms that weight each key's softmax contribution to the retrieved values.""" @@ -2291,71 +2249,131 @@ def _construct_field_weight_nodes(self, field_weights, concatenate_queries, use_ PARAMS:{DEFAULT_INPUT: DEFAULT_VARIABLE}, NAME: 'OUTCOME'}, gate=[key_match_pair[1].output_ports[0]], - name= 'RETRIEVAL WEIGHTING' if self.num_keys == 1 - else f'RETRIEVAL WEIGHTING {i}') + name= 'WEIGHT' if self.num_keys == 1 + else f'{self.key_names[i]}{WEIGHT_AFFIX}') for i, key_match_pair in enumerate(zip(self.query_input_nodes, - self.softmax_nodes))] + self.match_nodes))] else: field_weight_nodes = [ProcessingMechanism(input_ports={VARIABLE: np.array(field_weights[self.key_indices[i]]), PARAMS: {DEFAULT_INPUT: DEFAULT_VARIABLE}, NAME: 'FIELD_WEIGHT'}, - name= 'WEIGHT' if self.num_keys == 1 - else f'{self.key_names[i]} [WEIGHT]') + name= WEIGHT if self.num_keys == 1 + else f'{self.key_names[i]}{WEIGHT_AFFIX}') for i in range(self.num_keys)] return field_weight_nodes - def _construct_weighted_softmax_nodes(self, memory_capacity, use_gating_for_weighting)->list: - - if use_gating_for_weighting: - return [] - - weighted_softmax_nodes = \ - [ProcessingMechanism( - default_variable=[self.softmax_nodes[i].output_port.value, - self.softmax_nodes[i].output_port.value], - input_ports=[ - {PROJECTIONS: MappingProjection(sender=sm_fw_pair[0], - matrix=IDENTITY_MATRIX, - name=f'SOFTMAX to WEIGHTED SOFTMAX for {self.key_names[i]}')}, - {PROJECTIONS: MappingProjection(sender=sm_fw_pair[1], - matrix=FULL_CONNECTIVITY_MATRIX, - name=f'WEIGHT to WEIGHTED SOFTMAX for {self.key_names[i]}')}], - function=LinearCombination(operation=PRODUCT), - name=self.key_names[i] + WEIGHTED_SOFTMAX_AFFIX) - for i, sm_fw_pair in enumerate(zip(self.softmax_nodes, + def _construct_weighted_match_nodes(self, memory_capacity, field_weights)->list: + """Create nodes that weight the output of the match node for each key.""" + + weighted_match_nodes = \ + [ProcessingMechanism(default_variable=[self.match_nodes[i].output_port.value, + self.match_nodes[i].output_port.value], + input_ports=[{PROJECTIONS: + MappingProjection(sender=match_fw_pair[0], + matrix=IDENTITY_MATRIX, + name=f'{MATCH} to {WEIGHTED_MATCH_NODE_NAME} ' + f'for {self.key_names[i]}')}, + {PROJECTIONS: + MappingProjection(sender=match_fw_pair[1], + matrix=FULL_CONNECTIVITY_MATRIX, + name=f'{WEIGHT} to {WEIGHTED_MATCH_NODE_NAME} ' + f'for {self.key_names[i]}')}], + function=LinearCombination(operation=PRODUCT), + name=self.key_names[i] + WEIGHTED_MATCH_AFFIX) + for i, match_fw_pair in enumerate(zip(self.match_nodes, self.field_weight_nodes))] - return weighted_softmax_nodes - def _construct_combined_softmax_node(self, + return weighted_match_nodes + + def _construct_softmax_gain_control_node(self, softmax_gain)->Optional[ControlMechanism]: + """Create nodes that set the softmax gain (inverse temperature) for each softmax_node.""" + + if softmax_gain == CONTROL: + return ControlMechanism(monitor_for_control=self.combined_matches_node, + control_signals=[(GAIN, self.softmax_node)], + function=get_softmax_gain, + name='SOFTMAX GAIN CONTROL') + else: + return None + + def _construct_combined_matches_node(self, memory_capacity, field_weighting, use_gating_for_weighting )->ProcessingMechanism: - """Create nodes that compute the weighting of each item in memory. - """ + """Create node that combines weighted matches for all keys into one match vector.""" + + if self.num_keys == 1 or self.concatenate_queries_node: + return if not field_weighting or use_gating_for_weighting: - # If use_gating_for_weighting, then softmax_nodes are output gated by gating nodes - input_source = self.softmax_nodes + input_source = self.match_nodes else: - input_source = self.weighted_softmax_nodes + input_source = self.weighted_match_nodes - combined_softmax_node = ( + combined_matches_node = ( ProcessingMechanism(input_ports=[{SIZE:memory_capacity, - # PROJECTIONS:[s for s in input_source]}], PROJECTIONS:[MappingProjection(sender=s, matrix=IDENTITY_MATRIX, - name=f'WEIGHTED SOFTMAX to RETRIEVAL for ' - f'{self.key_names[i]}') + name=f'{WEIGHTED_MATCH_NODE_NAME} ' + f'for {self.key_names[i]} to ' + f'{COMBINE_MATCHES_NODE_NAME}') for i, s in enumerate(input_source)]}], - name=COMBINED_SOFTMAX_NODE_NAME)) + name=COMBINE_MATCHES_NODE_NAME)) + + assert len(combined_matches_node.output_port.value) == memory_capacity, \ + 'PROGRAM ERROR: number of items in combined_matches_node ' \ + f'({len(combined_matches_node.output_port)}) does not match memory_capacity ({self.memory_capacity})' - assert len(combined_softmax_node.output_port.value) == memory_capacity, \ - 'PROGRAM ERROR: number of items in combined_softmax_node ' \ - '({len(combined_softmax_node.output_port)}) does not match memory_capacity ({self.memory_capacity})' + return combined_matches_node - return combined_softmax_node + def _construct_softmax_node(self, memory_capacity, softmax_gain, softmax_threshold, softmax_choice)->list: + """Create node that applies softmax to output of combined_matches_node.""" + + if self.num_keys == 1 or self.concatenate_queries_node: + input_source = self.match_nodes[0] + proj_name =f'{MATCH} to {SOFTMAX_NODE_NAME}' + # elif self.concatenate_queries_node: + # input_source = self.concatenate_queries_node + # proj_name =f'{CONCATENATE_QUERIES_NAME} to {SOFTMAX_NODE_NAME}' + else: + input_source = self.combined_matches_node + proj_name =f'{COMBINE_MATCHES_NODE_NAME} to {SOFTMAX_NODE_NAME}' + + if softmax_choice == ARG_MAX: + # ARG_MAX would return entry multiplied by its dot product + # ARG_MAX_INDICATOR returns the entry unmodified + softmax_choice = ARG_MAX_INDICATOR + + softmax_node = ProcessingMechanism(input_ports={SIZE:memory_capacity, + PROJECTIONS: MappingProjection( + sender=input_source, + matrix=IDENTITY_MATRIX, + name=proj_name)}, + function=SoftMax(gain=softmax_gain, + mask_threshold=softmax_threshold, + output=softmax_choice, + adapt_entropy_weighting=.95), + name=SOFTMAX_NODE_NAME) + + return softmax_node + + def _validate_options_with_learning(self, + enable_learning, + use_gating_for_weighting, + learn_field_weights, + softmax_choice): + if use_gating_for_weighting and learn_field_weights: + warnings.warn(f"The 'learn_field_weights' option for '{self.name}' cannot be used with " + f"'use_gating_for_weighting' set to True; this will generate an error if its " + f"'learn' method is called. Set 'use_gating_for_weighting' to True in order " + f"to enable learning of field weights.") + + if softmax_choice in {ARG_MAX, PROBABILISTIC} and enable_learning: + warnings.warn(f"The 'softmax_choice' arg of '{self.name}' is set to '{softmax_choice}' with " + f"'enable_learning' set to True (or a list); this will generate an error if its " + f"'learn' method is called. Set 'softmax_choice' to WEIGHTED_AVG before learning.") def _construct_retrieved_nodes(self, memory_template)->list: """Create nodes that report the value field(s) for the item(s) matched in memory. @@ -2364,7 +2382,7 @@ def _construct_retrieved_nodes(self, memory_template)->list: [ProcessingMechanism(input_ports={SIZE: len(self.query_input_nodes[i].variable[0]), PROJECTIONS: MappingProjection( - sender=self.combined_softmax_node, + sender=self.softmax_node, matrix=memory_template[:,i], name=f'MEMORY FOR {self.key_names[i]} [RETRIEVE KEY]') }, @@ -2375,7 +2393,7 @@ def _construct_retrieved_nodes(self, memory_template)->list: [ProcessingMechanism(input_ports={SIZE: len(self.value_input_nodes[i].variable[0]), PROJECTIONS: MappingProjection( - sender=self.combined_softmax_node, + sender=self.softmax_node, matrix=memory_template[:, i + self.num_keys], name=f'MEMORY FOR {self.value_names[i]} [RETRIEVE VALUE]')}, @@ -2466,13 +2484,13 @@ def execute(self, **kwargs): """Set input to weights of Projections to match_nodes and retrieved_nodes if not use_storage_node.""" results = super().execute(inputs=inputs, context=context, **kwargs) - if not self.use_storage_node: + if not self._use_storage_node: self._store_memory(inputs, context) return results def _store_memory(self, inputs, context): """Store inputs to query and value nodes in memory - Store memories in weights of Projections to softmax_nodes (queries) and retrieved_nodes (values). + Store memories in weights of Projections to match_nodes (queries) and retrieved_nodes (values). Note: inputs argument is ignored (included for compatibility with function of MemoryFunctions class; storage is handled by call to EMComopsition._encode_memory """ @@ -2549,13 +2567,18 @@ def _encode_memory(self, context=None): @handle_external_context() def learn(self, *args, **kwargs)->list: """Override to check for inappropriate use of ARG_MAX or PROBABILISTIC options for retrieval with learning""" - arg = self.parameters.softmax_choice.get(kwargs[CONTEXT]) - if arg in {ARG_MAX, PROBABILISTIC}: + softmax_choice = self.parameters.softmax_choice.get(kwargs[CONTEXT]) + use_gating_for_weighting = self._use_gating_for_weighting + learn_field_weights = self.parameters.learn_field_weights.get(kwargs[CONTEXT]) + + if use_gating_for_weighting and learn_field_weights: + raise EMCompositionError(f"Field weights cannot be learned when 'use_gating_for_weighting' is True; " + f"Construct '{self.name}' with the 'learn_field_weights' arg set to False.") + + if softmax_choice in {ARG_MAX, PROBABILISTIC}: raise EMCompositionError(f"The ARG_MAX and PROBABILISTIC options for the 'softmax_choice' arg " f"of '{self.name}' cannot be used during learning; change to WEIGHTED_AVG.") - if self.loss_spec in {Loss.BINARY_CROSS_ENTROPY} and not self.normalize_field_weights: - raise EMCompositionError(f"The 'loss_spec' arg of '{self.name}' is set to '{self.loss_spec.name}' with " - f"'normalize_field_weights' set to False; this must be True to use this loss_spec.") + return super().learn(*args, **kwargs) def _get_execution_mode(self, execution_mode): diff --git a/psyneulink/library/compositions/pytorchwrappers.py b/psyneulink/library/compositions/pytorchwrappers.py index 69a4c9a74dc..cdf13733d80 100644 --- a/psyneulink/library/compositions/pytorchwrappers.py +++ b/psyneulink/library/compositions/pytorchwrappers.py @@ -1123,7 +1123,8 @@ class PytorchProjectionWrapper(): """ - def __init__(self, projection, + def __init__(self, + projection, pnl_proj, component_idx, port_idx, device, @@ -1131,7 +1132,7 @@ def __init__(self, projection, receiver=None, context=None): self._projection = projection # Projection being wrapped (may *not* be the one being learned; see note above) - self._pnl_proj = pnl_proj # Projection that directly projects to/from sender/receiver (see above) + self._pnl_proj = pnl_proj # Projection that directly projects to/from sender/receiver (see above) self._idx = component_idx # Index of Projection in Composition's list of projections self._port_idx = port_idx # Index of sender's port (used by LLVM) self._value_idx = 0 # Index of value in sender's value (used in aggregate_afferents) diff --git a/tests/composition/test_emcomposition.py b/tests/composition/test_emcomposition.py index d0206a020a4..55c01ad7b51 100644 --- a/tests/composition/test_emcomposition.py +++ b/tests/composition/test_emcomposition.py @@ -185,16 +185,14 @@ def test_structure(self, assert isinstance(em.concatenate_queries_node, Mechanism) == concatenate_node if em.concatenate_queries: assert em.field_weight_nodes == [] - assert bool(softmax_gain == CONTROL) == bool(len(em.softmax_gain_control_nodes)) + assert bool(softmax_gain == CONTROL) == bool(em.softmax_gain_control_node) else: if num_keys > 1: assert len(em.field_weight_nodes) == num_keys else: assert em.field_weight_nodes == [] if softmax_gain == CONTROL: - assert len(em.softmax_gain_control_nodes) == num_keys - else: - assert em.softmax_gain_control_nodes == [] + assert em.softmax_gain_control_node assert len(em.retrieved_nodes) == num_fields def test_memory_fill(start, memory_fill): @@ -252,23 +250,6 @@ def test_softmax_choice(self): f"'learn' method is called. Set 'softmax_choice' to WEIGHTED_AVG before learning.") assert warning_msg in str(warning[0].message) - def test_normalize_field_weights_with_learning_enabled(self): - with pytest.warns(UserWarning) as warning: - em = EMComposition(normalize_field_weights=False, - enable_learning=True, - memory_fill=(0,.1), - loss_spec=pnl.Loss.BINARY_CROSS_ENTROPY) - warning_msg = (f"The 'normalize_field_weights' arg of 'EM_Composition' is set to False with " - f"'enable_learning' set to True (or a list); this may generate an error if the " - f"'loss_spec' used for learning requires values to be between 0 and 1.") - assert warning_msg in str(warning[0].message) - - with pytest.raises(EMCompositionError) as error_text: - em.learn() - assert (f"The 'loss_spec' arg of 'EM_Composition' is set to 'BINARY_CROSS_ENTROPY' with " - f"'normalize_field_weights' set to False; this must be True to use this loss_spec." - in str(error_text.value)) - @pytest.mark.pytorch class TestExecution: @@ -304,11 +285,11 @@ class TestExecution: (4, [[[1,2,3],[4,6]], # Equal field_weights (but not concatenated) [[1,2,5],[4,6]], [[1,2,10],[4,6]]], (0,.01), 4, 0, [1,1], None, None, 100, 0, [[[1, 2, 3]], - [[4, 6]]], [[0.90323092, - 1.80586151, - 4.00008914], - [3.61161172, - 5.41731422]] + [[4, 6]]], [[0.99750462, + 1.99499376, + 3.51623568], + [3.98998465, + 5.9849743]] ), (5, [[[1,2,3],[4,6]], # Equal field_weights with concatenation [[1,2,5],[4,8]], @@ -321,44 +302,44 @@ class TestExecution: (6, [[[1,2,3],[4,6]], # Unequal field_weights [[1,2,5],[4,8]], [[1,2,10],[4,10]]], (0,.01), 4, 0, [9,1], None, None, 100, 0, [[[1, 2, 3]], - [[4, 6]]], [[0.96869477, - 1.93719534, - 3.1307577], - [3.87435467, - 6.02081578]]), + [[4, 6]]], [[0.99996025, + 1.99992024, + 3.19317783], + [3.99984044, + 6.19219795]]), (7, [[[1,2,3],[4,6]], # Store + no decay [[1,2,5],[4,8]], [[1,2,10],[4,10]]], (0,.01), 4, 0, [9,1], None, None, 100, 1, [[[1, 2, 3]], - [[4, 6]]], [[0.96869477, - 1.93719534, - 3.1307577], - [3.87435467, - 6.02081578]]), + [[4, 6]]], [[0.99996025, + 1.99992024, + 3.19317783], + [3.99984044, + 6.19219795]]), (8, [[[1,2,3],[4,6]], # Store + default decay (should be AUTO) [[1,2,5],[4,8]], [[1,2,10],[4,10]]], (0,.01), 4, None, [9,1], None, None, 100, 1, [[[1, 2, 3]], - [[4, 6]]], [[0.96869477, - 1.93719534, - 3.1307577 ], - [3.87435467, - 6.02081578]]), + [[4, 6]]], [[0.99996025, + 1.99992024, + 3.19317783], + [3.99984044, + 6.19219795]]), (9, [[[1,2,3],[4,6]], # Store + explicit AUTO decay [[1,2,5],[4,8]], [[1,2,10],[4,10]]], (0,.01), 4, AUTO, [9,1], None, None, 100, 1, [[[1, 2, 3]], - [[4, 6]]], [[0.96869477, - 1.93719534, - 3.1307577 ], - [3.87435467, - 6.02081578]]), + [[4, 6]]], [[0.99996025, + 1.99992024, + 3.19317783], + [3.99984044, + 6.19219795]]), (10, [[[1,2,3],[4,6]], # Store + numerical decay [[1,2,5],[4,8]], [[1,2,10],[4,10]]], (0,.01), 4, .1, [9,1], None, None, 100, 1, [[[1, 2, 3]], - [[4, 6]]], [[0.96869477, - 1.93719534, - 3.1307577 ], - [3.87435467, - 6.02081578]]), - (11, [[[1,2,3],[4,6]], # Same as 10, but with equal weights and concatenate keysdd + [[4, 6]]], [[0.99996025, + 1.99992024, + 3.19317783], + [3.99984044, + 6.19219795]]), + (11, [[[1,2,3],[4,6]], # Same as 10, but with equal weights and concatenate keys [[1,2,5],[4,8]], [[1,2,10],[4,10]]], (0,.01), 4, .1, [1,1], True, None, 100, 1, [[[1, 2, 3]], [[4, 6]]], [[0.99922544, @@ -366,6 +347,8 @@ class TestExecution: 3.38989346], [3.99689126, 6.38682264]]), +# [3.99984044, +# 6.19219795]]), ] args_names = "test_num, memory_template, memory_fill, memory_capacity, memory_decay_rate, field_weights, " \ @@ -376,7 +359,7 @@ class TestExecution: @pytest.mark.parametrize('enable_learning', [False, True], ids=['no_learning','learning']) @pytest.mark.composition @pytest.mark.parametrize('exec_mode', [pnl.ExecutionMode.Python, pnl.ExecutionMode.PyTorch]) - def test_simple_execution_witemhout_learning(self, + def test_simple_execution_without_learning(self, exec_mode, enable_learning, test_num, @@ -441,10 +424,10 @@ def test_simple_execution_witemhout_learning(self, np.testing.assert_allclose(retrieved, expected) # Validate that sum of weighted softmax distributions in field_weight_node itself sums to 1 - np.testing.assert_allclose(np.sum(em.combined_softmax_node.value), 1.0, atol=1e-15) + np.testing.assert_allclose(np.sum(em.softmax_node.value), 1.0, atol=1e-15) # Validate that sum of its output ports also sums to 1 - np.testing.assert_allclose(np.sum([port.value for port in em.combined_softmax_node.output_ports]), + np.testing.assert_allclose(np.sum([port.value for port in em.softmax_node.output_ports]), 1.0, atol=1e-15) # Validate storage From 1a90de7c72973af9de26c10aadc398552ff54393 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 3 Nov 2024 13:28:23 -0500 Subject: [PATCH 385/410] llvm/scheduler: Move compiled scheduler to its own file Signed-off-by: Jan Vesely --- psyneulink/core/llvm/codegen.py | 8 +- psyneulink/core/llvm/execution.py | 4 +- psyneulink/core/llvm/helpers.py | 312 ----------------------------- psyneulink/core/llvm/scheduler.py | 313 ++++++++++++++++++++++++++++++ 4 files changed, 319 insertions(+), 318 deletions(-) create mode 100644 psyneulink/core/llvm/scheduler.py diff --git a/psyneulink/core/llvm/codegen.py b/psyneulink/core/llvm/codegen.py index 259c00316cb..c476f5280d5 100644 --- a/psyneulink/core/llvm/codegen.py +++ b/psyneulink/core/llvm/codegen.py @@ -18,7 +18,7 @@ from psyneulink.core.globals.keywords import AFTER, BEFORE from psyneulink.core.scheduling.condition import Never from psyneulink.core.scheduling.time import TimeScale -from . import helpers +from . import helpers, scheduler from .debug import debug_env from .warnings import PNLCompilerWarning @@ -604,7 +604,7 @@ def gen_node_assembly(ctx, composition, node, *, tags:frozenset): if not is_mech and "reset" not in tags: # Add condition struct of the parent composition # This includes structures of all nested compositions - cond_gen = helpers.ConditionGenerator(ctx, composition) + cond_gen = scheduler.ConditionGenerator(ctx, composition) cond_ty = cond_gen.get_condition_struct_type().as_pointer() args.append(cond_ty) @@ -762,7 +762,7 @@ def gen_node_assembly(ctx, composition, node, *, tags:frozenset): @contextmanager def _gen_composition_exec_context(ctx, composition, *, tags:frozenset, suffix="", extra_args=[]): - cond_gen = helpers.ConditionGenerator(ctx, composition) + cond_gen = scheduler.ConditionGenerator(ctx, composition) name = "_".join(("wrap_exec", *tags, composition.name + suffix)) args = [ctx.get_state_struct_type(composition).as_pointer(), @@ -1098,7 +1098,7 @@ def gen_composition_run(ctx, composition, *, tags:frozenset): _reset_composition_nodes_exec_counts(ctx, builder, composition, state, [TimeScale.RUN]) # Allocate and initialize condition structure - cond_gen = helpers.ConditionGenerator(ctx, composition) + cond_gen = scheduler.ConditionGenerator(ctx, composition) cond_type = cond_gen.get_condition_struct_type() cond = builder.alloca(cond_type, name="scheduler_metadata") cond_init = cond_type(cond_gen.get_condition_initializer()) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index e877bc0a5a7..02a7cecad09 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -23,7 +23,7 @@ from psyneulink.core import llvm as pnlvm from psyneulink.core.globals.context import Context -from . import helpers, jit_engine, builder_context +from . import builder_context, helpers, jit_engine, scheduler from .debug import debug_env __all__ = ['CompExecution', 'FuncExecution', 'MechExecution'] @@ -347,7 +347,7 @@ def _set_bin_node(self, node): @property def _conditions(self): if self.__conditions is None: - gen = helpers.ConditionGenerator(None, self._composition) + gen = scheduler.ConditionGenerator(None, self._composition) conditions_ctype = self._bin_func.byref_arg_types[4] conditions_initializer = gen.get_condition_initializer() diff --git a/psyneulink/core/llvm/helpers.py b/psyneulink/core/llvm/helpers.py index c6a2c991122..c3dc3336bfe 100644 --- a/psyneulink/core/llvm/helpers.py +++ b/psyneulink/core/llvm/helpers.py @@ -9,17 +9,12 @@ # ********************************************* PNL LLVM helpers ************************************************************** import ast -from enum import IntEnum from contextlib import contextmanager import warnings from llvmlite import ir - from .debug import debug_env -from psyneulink.core.scheduling.condition import All, AllHaveRun, Always, Any, AtPass, AtTrial, BeforeNCalls, AtNCalls, AfterNCalls, \ - EveryNCalls, Never, Not, WhenFinished, WhenFinishedAny, WhenFinishedAll, Threshold -from psyneulink.core.scheduling.time import TimeScale @contextmanager @@ -449,310 +444,3 @@ def printf_float_matrix(ctx, builder, matrix, prefix="", suffix="\n", *, tags:se printf_float_array(ctx, b1, row, suffix="\n", tags=tags) printf(ctx, builder, suffix, tags=tags) - - -class ConditionGenerator: - class TimeIndex(IntEnum): - TRIAL = 0, - PASS = 1, - STEP = 2, - - def __init__(self, ctx, composition): - self.ctx = ctx - self.composition = composition - self._zero = ctx.int32_ty(0) if ctx is not None else None - - def get_private_condition_struct_type(self, composition): - time_stamp_struct = ir.LiteralStructType([self.ctx.int32_ty, # Trial - self.ctx.int32_ty, # Pass - self.ctx.int32_ty]) # Step - - assert len(time_stamp_struct) == len(self.TimeIndex) - - nodes_time_stamps_array = ir.ArrayType(time_stamp_struct, len(composition.nodes)) - - return ir.LiteralStructType((time_stamp_struct, nodes_time_stamps_array)) - - def get_private_condition_initializer(self, composition): - return ((0, 0, 0), tuple((-1, -1, -1) for _ in composition.nodes)) - - def get_condition_struct_type(self, node=None): - node = self.composition if node is None else node - - subnodes = getattr(node, 'nodes', []) - structs = [self.get_condition_struct_type(n) for n in subnodes] - if len(structs) != 0: - structs.insert(0, self.get_private_condition_struct_type(node)) - - return ir.LiteralStructType(structs) - - def get_condition_initializer(self, node=None): - node = self.composition if node is None else node - - subnodes = getattr(node, 'nodes', []) - data = [self.get_condition_initializer(n) for n in subnodes] - if len(data) != 0: - data.insert(0, self.get_private_condition_initializer(node)) - - return tuple(data) - - def bump_ts(self, builder, cond_ptr, count=(0, 0, 1)): - """ - Increments the time structure of the composition. - Count should be a tuple where there is a number in only one spot, and zeroes elsewhere. - Indices greater than the incremented one are zeroed. - """ - - # Only one element should be non-zero - assert count.count(0) == len(count) - 1 - - # Get timestruct pointer - ts_ptr = self.__get_global_ts_ptr(builder, cond_ptr) - ts = builder.load(ts_ptr) - - assert len(ts.type) == len(count) - - # Update run, pass, step of ts - for idx in range(len(ts.type)): - if all(v == 0 for v in count[:idx]): - el = builder.extract_value(ts, idx) - el = builder.add(el, el.type(count[idx])) - else: - el = self.ctx.int32_ty(0) - - ts = builder.insert_value(ts, el, idx) - - builder.store(ts, ts_ptr) - return builder - - def ts_compare(self, builder, ts1, ts2, comp): - assert comp == '<' - - # True if all elements to the left of the current one are equal - prefix_eq = self.ctx.bool_ty(1) - result = self.ctx.bool_ty(0) - - assert ts1.type == ts2.type - for element in range(len(ts1.type)): - a = builder.extract_value(ts1, element) - b = builder.extract_value(ts2, element) - - # Use existing prefix_eq to construct expression - # for the current element - element_comp = builder.icmp_signed(comp, a, b) - current_comp = builder.and_(prefix_eq, element_comp) - result = builder.or_(result, current_comp) - - # Update prefix_eq - element_eq = builder.icmp_signed('==', a, b) - prefix_eq = builder.and_(prefix_eq, element_eq) - - return result - - def __get_global_ts_ptr(self, builder, cond_ptr): - # derefence the structure, the first element (private structure), - # and the first element of the private strucutre is the global ts. - return builder.gep(cond_ptr, [self._zero, self._zero, self._zero]) - - def __get_node_ts_ptr(self, builder, cond_ptr, node): - node_idx = self.ctx.int32_ty(self.composition.nodes.index(node)) - - # derefence the structure, the first element (private structure), the - # second element is the node time stamp array, use index in the array - return builder.gep(cond_ptr, [self._zero, self._zero, self.ctx.int32_ty(1), node_idx]) - - def __get_node_ts(self, builder, cond_ptr, node): - ts_ptr = self.__get_node_ts_ptr(builder, cond_ptr, node) - return builder.load(ts_ptr) - - def get_global_ts(self, builder, cond_ptr): - ts_ptr = builder.gep(cond_ptr, [self._zero, self._zero, self._zero]) - return builder.load(ts_ptr) - - def _extract_global_time(self, builder, cond_ptr, time_index): - global_ts = self.get_global_ts(builder, cond_ptr) - return builder.extract_value(global_ts, time_index.value) - - def get_global_trial(self, builder, cond_ptr): - return self._extract_global_time(builder, cond_ptr, self.TimeIndex.TRIAL) - - def get_global_pass(self, builder, cond_ptr): - return self._extract_global_time(builder, cond_ptr, self.TimeIndex.PASS) - - def get_global_step(self, builder, cond_ptr): - return self._extract_global_time(builder, cond_ptr, self.TimeIndex.STEP) - - def generate_update_after_node_execution(self, builder, cond_ptr, node): - # Update time stamp of the last execution - global_ts_ptr = self.__get_global_ts_ptr(builder, cond_ptr) - global_ts = builder.load(global_ts_ptr) - - node_ts_ptr = self.__get_node_ts_ptr(builder, cond_ptr, node) - builder.store(global_ts, node_ts_ptr) - - def _node_executions_for_scale(self, builder, node, node_states, time_scale:TimeScale): - node_idx = self.composition._get_node_index(node) - node_state = builder.gep(node_states, [self._zero, self.ctx.int32_ty(node_idx)]) - num_exec_ptr = get_state_ptr(builder, node, node_state, "num_executions") - - count_ptr = builder.gep(num_exec_ptr, [self._zero, self.ctx.int32_ty(time_scale.value)]) - return builder.load(count_ptr) - - def generate_sched_condition(self, builder, condition, cond_ptr, self_node, is_finished_callbacks, nodes_states): - - if isinstance(condition, Always): - return self.ctx.bool_ty(1) - - if isinstance(condition, Never): - return self.ctx.bool_ty(0) - - elif isinstance(condition, Not): - orig_condition = self.generate_sched_condition(builder, condition.condition, cond_ptr, self_node, is_finished_callbacks, nodes_states) - return builder.not_(orig_condition) - - elif isinstance(condition, All): - agg_cond = self.ctx.bool_ty(1) - for cond in condition.args: - cond_res = self.generate_sched_condition(builder, cond, cond_ptr, self_node, is_finished_callbacks, nodes_states) - agg_cond = builder.and_(agg_cond, cond_res) - return agg_cond - - elif isinstance(condition, AllHaveRun): - # Extract dependencies - dependencies = self.composition.nodes - if len(condition.args) > 0: - dependencies = condition.args - - run_cond = self.ctx.bool_ty(1) - for node in dependencies: - count = self._node_executions_for_scale(builder, node, nodes_states, condition.time_scale) - - node_ran = builder.icmp_unsigned(">", count, count.type(0)) - run_cond = builder.and_(run_cond, node_ran) - - return run_cond - - elif isinstance(condition, Any): - agg_cond = self.ctx.bool_ty(0) - for cond in condition.args: - cond_res = self.generate_sched_condition(builder, cond, cond_ptr, self_node, is_finished_callbacks, nodes_states) - agg_cond = builder.or_(agg_cond, cond_res) - return agg_cond - - elif isinstance(condition, AtTrial): - trial_num = condition.args[0] - current_trial = self.get_global_trial(builder, cond_ptr) - return builder.icmp_unsigned("==", current_trial, current_trial.type(trial_num)) - - elif isinstance(condition, AtPass): - pass_num = condition.args[0] - current_pass = self.get_global_pass(builder, cond_ptr) - return builder.icmp_unsigned("==", current_pass, current_pass.type(pass_num)) - - elif isinstance(condition, EveryNCalls): - target, count = condition.args - assert count == 1, "EveryNCalls is only supported with count == 1 (count: {})".format(count) - - target_ts = self.__get_node_ts(builder, cond_ptr, target) - node_ts = self.__get_node_ts(builder, cond_ptr, self_node) - - # If target ran after node did its TS will be greater node's - return self.ts_compare(builder, node_ts, target_ts, '<') - - elif isinstance(condition, BeforeNCalls): - node, count = condition.args - num_execs = self._node_executions_for_scale(builder, node, nodes_states, condition.time_scale) - - return builder.icmp_unsigned('<', num_execs, num_execs.type(count)) - - elif isinstance(condition, AtNCalls): - node, count = condition.args - num_execs = self._node_executions_for_scale(builder, node, nodes_states, condition.time_scale) - - return builder.icmp_unsigned('==', num_execs, num_execs.type(count)) - - elif isinstance(condition, AfterNCalls): - node, count = condition.args - num_execs = self._node_executions_for_scale(builder, node, nodes_states, condition.time_scale) - - return builder.icmp_unsigned('>=', num_execs, num_execs.type(count)) - - elif isinstance(condition, WhenFinished): - # The first argument is the target node - assert len(condition.args) == 1 - target = is_finished_callbacks[condition.args[0]] - is_finished_f = self.ctx.import_llvm_function(target[0], tags=frozenset({"is_finished", "node_assembly"})) - return builder.call(is_finished_f, target[1]) - - elif isinstance(condition, WhenFinishedAny): - assert len(condition.args) > 0 - - run_cond = self.ctx.bool_ty(0) - for node in condition.args: - target = is_finished_callbacks[node] - is_finished_f = self.ctx.import_llvm_function(target[0], tags=frozenset({"is_finished", "node_assembly"})) - node_is_finished = builder.call(is_finished_f, target[1]) - - run_cond = builder.or_(run_cond, node_is_finished) - - return run_cond - - elif isinstance(condition, WhenFinishedAll): - assert len(condition.args) > 0 - - run_cond = self.ctx.bool_ty(1) - for node in condition.args: - target = is_finished_callbacks[node] - is_finished_f = self.ctx.import_llvm_function(target[0], tags=frozenset({"is_finished", "node_assembly"})) - node_is_finished = builder.call(is_finished_f, target[1]) - - run_cond = builder.and_(run_cond, node_is_finished) - - return run_cond - - elif isinstance(condition, Threshold): - target = condition.dependency - param = condition.parameter - threshold = condition.threshold - comparator = condition.comparator - indices = condition.indices - - # Convert execution_count to ('num_executions', TimeScale.LIFE). - # These two are identical in compiled semantics. - if param == 'execution_count': - assert indices is None - param = 'num_executions' - indices = TimeScale.LIFE - - assert param in target.llvm_state_ids, ( - f"Threshold for {target} only supports items in llvm_state_ids" - f" ({target.llvm_state_ids})" - ) - - node_idx = self.composition._get_node_index(target) - node_state = builder.gep(nodes_states, [self.ctx.int32_ty(0), self.ctx.int32_ty(node_idx)]) - param_ptr = get_state_ptr(builder, target, node_state, param) - - # parameters in state include history of at least one element - # so they are always arrays. - assert isinstance(param_ptr.type.pointee, ir.ArrayType) - - if indices is None: - indices = [0, 0] - elif isinstance(indices, TimeScale): - indices = [indices.value] - - param_ptr = builder.gep(param_ptr, [self.ctx.int32_ty(x) for x in [0] + list(indices)]) - - val = builder.load(param_ptr) - val = convert_type(builder, val, ir.DoubleType()) - threshold = val.type(threshold) - - if comparator == '==': - return is_close(self.ctx, builder, val, threshold, condition.rtol, condition.atol) - elif comparator == '!=': - return builder.not_(is_close(self.ctx, builder, val, threshold, condition.rtol, condition.atol)) - else: - return builder.fcmp_ordered(comparator, val, threshold) - - assert False, "Unsupported scheduling condition: {}".format(condition) diff --git a/psyneulink/core/llvm/scheduler.py b/psyneulink/core/llvm/scheduler.py new file mode 100644 index 00000000000..bb2a1d5300a --- /dev/null +++ b/psyneulink/core/llvm/scheduler.py @@ -0,0 +1,313 @@ +from enum import IntEnum +from llvmlite import ir + +from . import helpers +from psyneulink.core.scheduling.time import TimeScale +from psyneulink.core.scheduling.condition import All, AllHaveRun, Always, Any, AtPass, AtTrial, BeforeNCalls, AtNCalls, AfterNCalls, \ + EveryNCalls, Never, Not, WhenFinished, WhenFinishedAny, WhenFinishedAll, Threshold + +class ConditionGenerator: + class TimeIndex(IntEnum): + TRIAL = 0, + PASS = 1, + STEP = 2, + + def __init__(self, ctx, composition): + self.ctx = ctx + self.composition = composition + self._zero = ctx.int32_ty(0) if ctx is not None else None + + def get_private_condition_struct_type(self, composition): + time_stamp_struct = ir.LiteralStructType([self.ctx.int32_ty, # Trial + self.ctx.int32_ty, # Pass + self.ctx.int32_ty]) # Step + + assert len(time_stamp_struct) == len(self.TimeIndex) + + nodes_time_stamps_array = ir.ArrayType(time_stamp_struct, len(composition.nodes)) + + return ir.LiteralStructType((time_stamp_struct, nodes_time_stamps_array)) + + def get_private_condition_initializer(self, composition): + return ((0, 0, 0), tuple((-1, -1, -1) for _ in composition.nodes)) + + def get_condition_struct_type(self, node=None): + node = self.composition if node is None else node + + subnodes = getattr(node, 'nodes', []) + structs = [self.get_condition_struct_type(n) for n in subnodes] + if len(structs) != 0: + structs.insert(0, self.get_private_condition_struct_type(node)) + + return ir.LiteralStructType(structs) + + def get_condition_initializer(self, node=None): + node = self.composition if node is None else node + + subnodes = getattr(node, 'nodes', []) + data = [self.get_condition_initializer(n) for n in subnodes] + if len(data) != 0: + data.insert(0, self.get_private_condition_initializer(node)) + + return tuple(data) + + def bump_ts(self, builder, cond_ptr, count=(0, 0, 1)): + """ + Increments the time structure of the composition. + Count should be a tuple where there is a number in only one spot, and zeroes elsewhere. + Indices greater than the incremented one are zeroed. + """ + + # Only one element should be non-zero + assert count.count(0) == len(count) - 1 + + # Get timestruct pointer + ts_ptr = self.__get_global_ts_ptr(builder, cond_ptr) + ts = builder.load(ts_ptr) + + assert len(ts.type) == len(count) + + # Update run, pass, step of ts + for idx in range(len(ts.type)): + if all(v == 0 for v in count[:idx]): + el = builder.extract_value(ts, idx) + el = builder.add(el, el.type(count[idx])) + else: + el = self.ctx.int32_ty(0) + + ts = builder.insert_value(ts, el, idx) + + builder.store(ts, ts_ptr) + return builder + + def ts_compare(self, builder, ts1, ts2, comp): + assert comp == '<' + + # True if all elements to the left of the current one are equal + prefix_eq = self.ctx.bool_ty(1) + result = self.ctx.bool_ty(0) + + assert ts1.type == ts2.type + for element in range(len(ts1.type)): + a = builder.extract_value(ts1, element) + b = builder.extract_value(ts2, element) + + # Use existing prefix_eq to construct expression + # for the current element + element_comp = builder.icmp_signed(comp, a, b) + current_comp = builder.and_(prefix_eq, element_comp) + result = builder.or_(result, current_comp) + + # Update prefix_eq + element_eq = builder.icmp_signed('==', a, b) + prefix_eq = builder.and_(prefix_eq, element_eq) + + return result + + def __get_global_ts_ptr(self, builder, cond_ptr): + # derefence the structure, the first element (private structure), + # and the first element of the private strucutre is the global ts. + return builder.gep(cond_ptr, [self._zero, self._zero, self._zero]) + + def __get_node_ts_ptr(self, builder, cond_ptr, node): + node_idx = self.ctx.int32_ty(self.composition.nodes.index(node)) + + # derefence the structure, the first element (private structure), the + # second element is the node time stamp array, use index in the array + return builder.gep(cond_ptr, [self._zero, self._zero, self.ctx.int32_ty(1), node_idx]) + + def __get_node_ts(self, builder, cond_ptr, node): + ts_ptr = self.__get_node_ts_ptr(builder, cond_ptr, node) + return builder.load(ts_ptr) + + def get_global_ts(self, builder, cond_ptr): + ts_ptr = builder.gep(cond_ptr, [self._zero, self._zero, self._zero]) + return builder.load(ts_ptr) + + def _extract_global_time(self, builder, cond_ptr, time_index): + global_ts = self.get_global_ts(builder, cond_ptr) + return builder.extract_value(global_ts, time_index.value) + + def get_global_trial(self, builder, cond_ptr): + return self._extract_global_time(builder, cond_ptr, self.TimeIndex.TRIAL) + + def get_global_pass(self, builder, cond_ptr): + return self._extract_global_time(builder, cond_ptr, self.TimeIndex.PASS) + + def get_global_step(self, builder, cond_ptr): + return self._extract_global_time(builder, cond_ptr, self.TimeIndex.STEP) + + def generate_update_after_node_execution(self, builder, cond_ptr, node): + # Update time stamp of the last execution + global_ts_ptr = self.__get_global_ts_ptr(builder, cond_ptr) + global_ts = builder.load(global_ts_ptr) + + node_ts_ptr = self.__get_node_ts_ptr(builder, cond_ptr, node) + builder.store(global_ts, node_ts_ptr) + + def _node_executions_for_scale(self, builder, node, node_states, time_scale:TimeScale): + node_idx = self.composition._get_node_index(node) + node_state = builder.gep(node_states, [self._zero, self.ctx.int32_ty(node_idx)]) + num_exec_ptr = helpers.get_state_ptr(builder, node, node_state, "num_executions") + + count_ptr = builder.gep(num_exec_ptr, [self._zero, self.ctx.int32_ty(time_scale.value)]) + return builder.load(count_ptr) + + def generate_sched_condition(self, builder, condition, cond_ptr, self_node, is_finished_callbacks, nodes_states): + + if isinstance(condition, Always): + return self.ctx.bool_ty(1) + + if isinstance(condition, Never): + return self.ctx.bool_ty(0) + + elif isinstance(condition, Not): + orig_condition = self.generate_sched_condition(builder, condition.condition, cond_ptr, self_node, is_finished_callbacks, nodes_states) + return builder.not_(orig_condition) + + elif isinstance(condition, All): + agg_cond = self.ctx.bool_ty(1) + for cond in condition.args: + cond_res = self.generate_sched_condition(builder, cond, cond_ptr, self_node, is_finished_callbacks, nodes_states) + agg_cond = builder.and_(agg_cond, cond_res) + return agg_cond + + elif isinstance(condition, AllHaveRun): + # Extract dependencies + dependencies = self.composition.nodes + if len(condition.args) > 0: + dependencies = condition.args + + run_cond = self.ctx.bool_ty(1) + for node in dependencies: + count = self._node_executions_for_scale(builder, node, nodes_states, condition.time_scale) + + node_ran = builder.icmp_unsigned(">", count, count.type(0)) + run_cond = builder.and_(run_cond, node_ran) + + return run_cond + + elif isinstance(condition, Any): + agg_cond = self.ctx.bool_ty(0) + for cond in condition.args: + cond_res = self.generate_sched_condition(builder, cond, cond_ptr, self_node, is_finished_callbacks, nodes_states) + agg_cond = builder.or_(agg_cond, cond_res) + return agg_cond + + elif isinstance(condition, AtTrial): + trial_num = condition.args[0] + current_trial = self.get_global_trial(builder, cond_ptr) + return builder.icmp_unsigned("==", current_trial, current_trial.type(trial_num)) + + elif isinstance(condition, AtPass): + pass_num = condition.args[0] + current_pass = self.get_global_pass(builder, cond_ptr) + return builder.icmp_unsigned("==", current_pass, current_pass.type(pass_num)) + + elif isinstance(condition, EveryNCalls): + target, count = condition.args + assert count == 1, "EveryNCalls is only supported with count == 1 (count: {})".format(count) + + target_ts = self.__get_node_ts(builder, cond_ptr, target) + node_ts = self.__get_node_ts(builder, cond_ptr, self_node) + + # If target ran after node did its TS will be greater node's + return self.ts_compare(builder, node_ts, target_ts, '<') + + elif isinstance(condition, BeforeNCalls): + node, count = condition.args + num_execs = self._node_executions_for_scale(builder, node, nodes_states, condition.time_scale) + + return builder.icmp_unsigned('<', num_execs, num_execs.type(count)) + + elif isinstance(condition, AtNCalls): + node, count = condition.args + num_execs = self._node_executions_for_scale(builder, node, nodes_states, condition.time_scale) + + return builder.icmp_unsigned('==', num_execs, num_execs.type(count)) + + elif isinstance(condition, AfterNCalls): + node, count = condition.args + num_execs = self._node_executions_for_scale(builder, node, nodes_states, condition.time_scale) + + return builder.icmp_unsigned('>=', num_execs, num_execs.type(count)) + + elif isinstance(condition, WhenFinished): + # The first argument is the target node + assert len(condition.args) == 1 + target = is_finished_callbacks[condition.args[0]] + is_finished_f = self.ctx.import_llvm_function(target[0], tags=frozenset({"is_finished", "node_assembly"})) + return builder.call(is_finished_f, target[1]) + + elif isinstance(condition, WhenFinishedAny): + assert len(condition.args) > 0 + + run_cond = self.ctx.bool_ty(0) + for node in condition.args: + target = is_finished_callbacks[node] + is_finished_f = self.ctx.import_llvm_function(target[0], tags=frozenset({"is_finished", "node_assembly"})) + node_is_finished = builder.call(is_finished_f, target[1]) + + run_cond = builder.or_(run_cond, node_is_finished) + + return run_cond + + elif isinstance(condition, WhenFinishedAll): + assert len(condition.args) > 0 + + run_cond = self.ctx.bool_ty(1) + for node in condition.args: + target = is_finished_callbacks[node] + is_finished_f = self.ctx.import_llvm_function(target[0], tags=frozenset({"is_finished", "node_assembly"})) + node_is_finished = builder.call(is_finished_f, target[1]) + + run_cond = builder.and_(run_cond, node_is_finished) + + return run_cond + + elif isinstance(condition, Threshold): + target = condition.dependency + param = condition.parameter + threshold = condition.threshold + comparator = condition.comparator + indices = condition.indices + + # Convert execution_count to ('num_executions', TimeScale.LIFE). + # These two are identical in compiled semantics. + if param == 'execution_count': + assert indices is None + param = 'num_executions' + indices = TimeScale.LIFE + + assert param in target.llvm_state_ids, ( + f"Threshold for {target} only supports items in llvm_state_ids" + f" ({target.llvm_state_ids})" + ) + + node_idx = self.composition._get_node_index(target) + node_state = builder.gep(nodes_states, [self.ctx.int32_ty(0), self.ctx.int32_ty(node_idx)]) + param_ptr = helpers.get_state_ptr(builder, target, node_state, param) + + # parameters in state include history of at least one element + # so they are always arrays. + assert isinstance(param_ptr.type.pointee, ir.ArrayType) + + if indices is None: + indices = [0, 0] + elif isinstance(indices, TimeScale): + indices = [indices.value] + + param_ptr = builder.gep(param_ptr, [self.ctx.int32_ty(x) for x in [0] + list(indices)]) + + val = builder.load(param_ptr) + val = helpers.convert_type(builder, val, ir.DoubleType()) + threshold = val.type(threshold) + + if comparator == '==': + return helpers.is_close(self.ctx, builder, val, threshold, condition.rtol, condition.atol) + elif comparator == '!=': + return builder.not_(helpers.is_close(self.ctx, builder, val, threshold, condition.rtol, condition.atol)) + else: + return builder.fcmp_ordered(comparator, val, threshold) + + assert False, "Unsupported scheduling condition: {}".format(condition) From 587706e0c4bcbddb066812864a23e5a16760609f Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 3 Nov 2024 19:58:39 -0500 Subject: [PATCH 386/410] llvm/execution: Drop unused import Signed-off-by: Jan Vesely --- psyneulink/core/llvm/execution.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 02a7cecad09..5a172a83eec 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -23,7 +23,7 @@ from psyneulink.core import llvm as pnlvm from psyneulink.core.globals.context import Context -from . import builder_context, helpers, jit_engine, scheduler +from . import builder_context, jit_engine, scheduler from .debug import debug_env __all__ = ['CompExecution', 'FuncExecution', 'MechExecution'] From a57eaad50e9b2b4f0db760f5442bcd3c53b38782 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Sun, 3 Nov 2024 22:38:20 -0500 Subject: [PATCH 387/410] llvm/compiler: Use TimeIndex enum to determine the size of the timestamp structure Use Enum instead of IntEnum. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/scheduler.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/psyneulink/core/llvm/scheduler.py b/psyneulink/core/llvm/scheduler.py index bb2a1d5300a..e98bafd1423 100644 --- a/psyneulink/core/llvm/scheduler.py +++ b/psyneulink/core/llvm/scheduler.py @@ -1,4 +1,4 @@ -from enum import IntEnum +from enum import Enum from llvmlite import ir from . import helpers @@ -7,7 +7,7 @@ EveryNCalls, Never, Not, WhenFinished, WhenFinishedAny, WhenFinishedAll, Threshold class ConditionGenerator: - class TimeIndex(IntEnum): + class TimeIndex(Enum): TRIAL = 0, PASS = 1, STEP = 2, @@ -18,18 +18,16 @@ def __init__(self, ctx, composition): self._zero = ctx.int32_ty(0) if ctx is not None else None def get_private_condition_struct_type(self, composition): - time_stamp_struct = ir.LiteralStructType([self.ctx.int32_ty, # Trial - self.ctx.int32_ty, # Pass - self.ctx.int32_ty]) # Step - - assert len(time_stamp_struct) == len(self.TimeIndex) - + time_stamp_struct = ir.LiteralStructType([self.ctx.int32_ty for _ in self.TimeIndex]) nodes_time_stamps_array = ir.ArrayType(time_stamp_struct, len(composition.nodes)) return ir.LiteralStructType((time_stamp_struct, nodes_time_stamps_array)) def get_private_condition_initializer(self, composition): - return ((0, 0, 0), tuple((-1, -1, -1) for _ in composition.nodes)) + init_global = tuple(0 for _ in self.TimeIndex) + init_node = tuple(-1 for _ in self.TimeIndex) + + return (init_global, tuple(init_node for _ in composition.nodes)) def get_condition_struct_type(self, node=None): node = self.composition if node is None else node @@ -140,9 +138,9 @@ def get_global_step(self, builder, cond_ptr): def generate_update_after_node_execution(self, builder, cond_ptr, node): # Update time stamp of the last execution global_ts_ptr = self.__get_global_ts_ptr(builder, cond_ptr) - global_ts = builder.load(global_ts_ptr) - node_ts_ptr = self.__get_node_ts_ptr(builder, cond_ptr, node) + + global_ts = builder.load(global_ts_ptr) builder.store(global_ts, node_ts_ptr) def _node_executions_for_scale(self, builder, node, node_states, time_scale:TimeScale): From 191389df8713bd3b9f852a4fe10d2bc2e640e5de Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 22:37:56 +0000 Subject: [PATCH 388/410] requirements: update optuna requirement from <3.4.0 to <4.1.0 (#3042) --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index bdfac55c50b..d77c65770b0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ matplotlib<3.7.6 modeci_mdf<0.5, >=0.4.3; (platform_machine == 'AMD64' or platform_machine == 'x86_64' or platform_machine == 'arm64' or platform_machine == 'aarch64') and platform_python_implementation == 'CPython' and implementation_name == 'cpython' networkx<3.5 numpy>=1.21.0, <1.26.5 -optuna<3.4.0 +optuna<4.1.0 packaging<25.0 pandas<2.2.4 pillow<11.1.0 From b3ddf2fea0c531b30f5b3cead9b83eb26e33f79f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 19:17:40 -0500 Subject: [PATCH 389/410] requirements: update pytest-cov requirement from <5.0.1 to <6.0.1 (#3097) Updates the requirements on [pytest-cov](https://github.com/pytest-dev/pytest-cov) to permit the latest version. - [Changelog](https://github.com/pytest-dev/pytest-cov/blob/master/CHANGELOG.rst) - [Commits](https://github.com/pytest-dev/pytest-cov/compare/v5.0.0...v6.0.0) --- updated-dependencies: - dependency-name: pytest-cov dependency-type: direct:development ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- dev_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev_requirements.txt b/dev_requirements.txt index 9f06766b7a6..15e000ee8dd 100644 --- a/dev_requirements.txt +++ b/dev_requirements.txt @@ -2,7 +2,7 @@ jupyter<1.1.2 packaging<25.0 pytest<8.3.4 pytest-benchmark<5.1.1 -pytest-cov<5.0.1 +pytest-cov<6.0.1 pytest-forked<1.7.0 pytest-helpers-namespace<2021.12.30 pytest-profiling<1.8.1 From a761476aae28d51e18105913a4b364ee008888f7 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 17 Oct 2024 03:13:22 +0000 Subject: [PATCH 390/410] ci: test-release: add --verbose to twine upload --- .github/workflows/test-release.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-release.yml b/.github/workflows/test-release.yml index 8df8c2bfef2..db32a9303b6 100644 --- a/.github/workflows/test-release.yml +++ b/.github/workflows/test-release.yml @@ -55,7 +55,7 @@ jobs: # It's not possible to condition steps on env or secrets, # We need an explicit check here if [ -n "$TWINE_USERNAME" -a -n "$TWINE_PASSWORD" ]; then - twine upload dist/* + twine upload --verbose dist/* else echo "::warning::Not uploading to test PyPI, no credentials available!" fi @@ -157,7 +157,7 @@ jobs: # It's not possible to condition steps on env or secrets, # We need an explicit check here if [ -n "$TWINE_USERNAME" -a -n "$TWINE_PASSWORD" ]; then - twine upload dist/* + twine upload --verbose dist/* else echo "::warning::Not uploading to PyPI, no credentials available!" fi From c2914e8202ea5b3793964743bdaa094e7bcd3515 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 17 Oct 2024 03:14:18 +0000 Subject: [PATCH 391/410] ci: test-release: use github cli to create github release --- .github/workflows/test-release.yml | 56 ++++-------------------------- 1 file changed, 6 insertions(+), 50 deletions(-) diff --git a/.github/workflows/test-release.yml b/.github/workflows/test-release.yml index db32a9303b6..14f4fb47483 100644 --- a/.github/workflows/test-release.yml +++ b/.github/workflows/test-release.yml @@ -181,53 +181,9 @@ jobs: path: dist/ - name: Upload dist files to release - uses: actions/github-script@v7 - with: - script: | - const fs = require('fs') - tag = context.ref.split('/').pop() - console.log('running on:' + context.ref); - console.log('Looking for release for tag:' + tag); - - var release - try { - release = await github.rest.repos.getReleaseByTag({ - owner: context.repo.owner, - repo: context.repo.repo, - tag: tag - }); - console.log('Release found at: ' + release.data.html_url); // ' - } catch (err) { - if (err.status == 404) { - console.log('Release not found, creating a new one'); - release = await github.rest.repos.createRelease({ - owner: context.repo.owner, - repo: context.repo.repo, - tag_name: tag, - name: 'Release ' + tag, - body: 'New features and fixed bugs' - }); - } else { - throw err; - } - } - - console.log('Using release upload url: ' + release['data']['upload_url']); - // Determine content-length for header to upload asset - for (asset of ['${{ needs.create-python-dist.outputs.wheel }}', '${{ needs.create-python-dist.outputs.sdist }}']) { - const file_path = 'dist/' + asset; - const file_size = file_path => fs.statSync(file_path).size; - console.log('Uploading: ' + file_path); - - // Setup headers for API call, see Octokit Documentation: - // https://octokit.github.io/rest.js/#octokit-routes-repos-upload-release-asset for more information - const headers = { 'content-type': 'application/zip', 'content-length': file_size(file_path) }; - - // Upload a release asset - const uploadAssetResponse = await github.rest.repos.uploadReleaseAsset({ - url: release.data.upload_url, - headers, - name: asset, - file: fs.readFileSync(file_path) - }); - } + shell: bash + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + previous_release=$(gh --repo ${{ github.repository }} release list | head -n 1 | awk '{print $1}') + gh --repo ${{ github.repository }} release create --generate-notes --notes-start-tag "$previous_release" ${{ github.ref }} dist/* From 44368fca720b5910dfaaa5397b690d0f6e979c92 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 31 Oct 2024 04:25:42 +0000 Subject: [PATCH 392/410] ci: test-release: include python 3.10, 3.11, 3.12 --- .github/workflows/test-release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/test-release.yml b/.github/workflows/test-release.yml index 14f4fb47483..a0a74cb200c 100644 --- a/.github/workflows/test-release.yml +++ b/.github/workflows/test-release.yml @@ -69,7 +69,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: [3.7, 3.8, 3.9] + python-version: [3.7, 3.8, 3.9, '3.10', 3.11, 3.12] os: [ubuntu-latest, macos-latest, windows-latest] dist: [wheel, sdist] From f9f7763f8329f6efbc8b0f0329dca75d7162d1bc Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 12 Sep 2024 02:59:21 +0000 Subject: [PATCH 393/410] Component: rework size argument to accept and validate numpy shapes - size=int - single input item, with numpy shape (x,) - size=iterable - one or more input items, each with respective numpy shapes - size containing float - no longer supported, because numpy rejects as shape (prior behavior casted to int, because size as float isn't defined) ComponentError thrown if default_variable and size arguments are both provided and conflict --- psyneulink/core/components/component.py | 230 ++++++++---------- .../core/components/mechanisms/mechanism.py | 7 +- psyneulink/core/components/ports/port.py | 52 ---- .../transfer/contrastivehebbianmechanism.py | 2 - tests/composition/test_interfaces.py | 4 +- tests/mechanisms/test_ddm_mechanism.py | 25 +- tests/mechanisms/test_transfer_mechanism.py | 153 ++++-------- 7 files changed, 161 insertions(+), 312 deletions(-) diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 92d7b980db4..92ac1bf5a70 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -94,8 +94,8 @@ * **variable** - used as the input to its `function `. Specification of the **default_variable** argument in the constructor for a Component determines both its format (e.g., whether its value is numeric, its dimensionality and shape if it is an array, etc.) as well as its `default_value ` (the value - used when the Component is executed and no input is provided), and takes precedence over the specification of `size - `. + used when the Component is executed and no input is provided). + It may alternatively be specified by `size `. .. technical_note:: Internally, the attribute **variable** is not directly used as input to functions, to allow for parallelization. @@ -105,10 +105,17 @@ .. _Component_Size: -* **size** - the dimension of the `variable ` attribute. The **size** argument of the +* **size** - the numpy shape or iterable of shapes matching the + `variable ` attribute. The **size** argument of + the constructor for a Component can be used as a convenient method for specifying the `variable `, - attribute in which case it will be assigned as an array of zeros of the specified size. For example, - setting **size** = 3 is equivalent to setting **variable** = [0, 0, 0] and setting **size** = [4, 3] is equivalent + attribute in which case it will be assigned as an array of zeros of + the specified shape. When **size** is an iterable, each item in the + iterable is treated as a single shape, and the entire iterable is then + assigned as an array. When **size** is an integer, it is treated the + same as a one-item iterable containing that integer. For example, + setting **size** = 3 is equivalent to setting + **variable** = [[0, 0, 0]] and setting **size** = [4, 3] is equivalent to setting **variable** = [[0, 0, 0, 0], [0, 0, 0]]. .. note:: @@ -324,10 +331,9 @@ _instantiate_function method checks that the input of the Component's `function ` is compatible with its `variable `). - * `_handle_size ` converts the `variable ` and `size ` - arguments to the correct dimensions (for `Mechanism `, this is a 2D array and 1D - array, respectively). If **variable** is not passed as an argument, this method attempts to infer `variable - ` from the **size** argument, and vice versa if the **size** argument is missing. + * `_handle_size ` attempts to infer + `variable ` from the **size** argument if + **variable** is not passed as an argument. The _handle_size method then checks that the **size** and **variable** arguments are compatible. * `_instantiate_defaults ` first calls the validation methods, and then @@ -516,6 +522,7 @@ import graph_scheduler import numpy as np +from psyneulink._typing import Iterable, Union from psyneulink.core import llvm as pnlvm from psyneulink.core.globals.context import \ Context, ContextError, ContextFlags, INITIALIZATION_STATUS_FLAGS, _get_time, handle_external_context @@ -540,7 +547,7 @@ from psyneulink.core.globals.utilities import \ ContentAddressableList, convert_all_elements_to_np_array, convert_to_np_array, get_deepcopy_with_shared, \ is_instance_or_subclass, is_matrix, iscompatible, kwCompatibilityLength, \ - get_all_explicit_arguments, is_numeric, call_with_pruned_args, safe_equals, safe_len, parse_valid_identifier, try_extract_0d_array_item, contains_type + get_all_explicit_arguments, is_numeric, call_with_pruned_args, safe_equals, safe_len, parse_valid_identifier, try_extract_0d_array_item, contains_type, is_iterable from psyneulink.core.scheduling.condition import Never from psyneulink.core.scheduling.time import Time, TimeScale @@ -808,9 +815,10 @@ class Component(MDFSerializable, metaclass=ComponentsMeta): specifies template for the input to the Component's `function `, and the value used as the input to the Component if none is provided on execution (see `Component_Variable` for additional information). - size : int, list or np.ndarray of ints : default None + size : int, or Iterable of tuple or int : default None specifies default_variable as array(s) of zeros if **default_variable** is not passed as an argument; - if **default_variable** is specified, it takes precedence over the specification of **size** (see + if **default_variable** is specified, it is checked for + compatibility against **size** (see `size ` for additonal details). COMMENT: @@ -839,7 +847,7 @@ class Component(MDFSerializable, metaclass=ComponentsMeta): variable : 2d np.array see `variable ` - size : int or array of ints + size : Union[int, Iterable[Union[int, tuple]]] see `size ` function : Function, function or method @@ -1105,7 +1113,7 @@ def _parse_modulable(self, param_name, param_value): def __init__(self, default_variable, param_defaults, - size=NotImplemented, # 7/5/17 CW: this is a hack to check whether the user has passed in a size arg + size=None, function=None, name=None, reset_stateful_function_when=None, @@ -1649,33 +1657,68 @@ def _handle_default_variable(self, default_variable=None, size=None): None otherwise """ default_variable = self._parse_arg_variable(default_variable) + default_variable = self._handle_size(size, default_variable) - if default_variable is None: - default_variable = self._handle_size(size, default_variable) - - if default_variable is None or default_variable is NotImplemented: - return None - else: - self._variable_shape_flexibility = self._specified_variable_shape_flexibility + if default_variable is None or default_variable is NotImplemented: + return None else: self._variable_shape_flexibility = self._specified_variable_shape_flexibility return convert_to_np_array(default_variable, dimension=1) + def _parse_size( + self, size: Union[int, Iterable[Union[int, tuple]]] + ) -> np.ndarray: + """ + Returns the equivalent 'variable' array specified by **size** + + Args: + size (Union[int, Iterable[Union[int, tuple]]]) + + Returns: + np.ndarray + """ + def get_size_elem(s, idx=None): + try: + return np.zeros(s) + except (TypeError, ValueError) as e: + if idx is not None: + idx_str = f' at index {idx}' + else: + idx_str = '' + + raise ComponentError( + f'Invalid size argument of {self}{idx_str}. size must be a' + ' valid numpy shape or a list of shapes for use with' + f' numpy.zeros: {e}' + ) from e + + if not is_iterable(size, exclude_str=True): + variable_from_size = np.asarray([get_size_elem(size)]) + else: + if len(size) == 0: + raise ComponentError( + f'Invalid size argument of {self}. size must not be an empty list' + ) + variable_from_size = [] + for i, s in enumerate(size): + variable_from_size.append(get_size_elem(s, i)) + variable_from_size = convert_all_elements_to_np_array(variable_from_size) + + return variable_from_size + # ELIMINATE SYSTEM # IMPLEMENTATION NOTE: (7/7/17 CW) Due to System and Process being initialized with size at the moment (which will # be removed later), I’m keeping _handle_size in Component.py. I’ll move the bulk of the function to Mechanism # through an override, when Composition is done. For now, only Port.py overwrites _handle_size(). def _handle_size(self, size, variable): """If variable is None, _handle_size tries to infer variable based on the **size** argument to the - __init__() function. This method is overwritten in subclasses like Mechanism and Port. - If self is a Mechanism, it converts variable to a 2D array, (for a Mechanism, variable[i] represents - the input from the i-th InputPort). If self is a Port, variable is a 1D array and size is a length-1 1D - array. It performs some validations on size and variable as well. This function is overridden in Port.py. - If size is NotImplemented (usually in the case of Projections/Functions), then this function passes without - doing anything. Be aware that if size is NotImplemented, then variable is never cast to a particular shape. + __init__() function. If size is None (usually in the case of + Projections/Functions), then this function passes without + doing anything. If both size and variable are not None, a + ComponentError is thrown if they are not compatible. """ - if size is not NotImplemented: + if size is not None: self._variable_shape_flexibility = self._specified_variable_shape_flexibility # region Fill in and infer variable and size if they aren't specified in args # if variable is None and size is None: @@ -1683,109 +1726,52 @@ def _handle_size(self, size, variable): # 6/30/17 now handled in the individual subclasses' __init__() methods because each subclass has different # expected behavior when variable is None and size is None. - def checkAndCastInt(x): - if not isinstance(x, numbers.Number): - raise ComponentError("An element ({}) in size is not a number.".format(x)) - if x < 1: - raise ComponentError("An element ({}) in size is not a positive number.".format(x)) - try: - int_x = int(x) - except: - raise ComponentError( - "Failed to convert an element ({}) in size argument for {} {} to an integer. size " - "should be a number, or iterable of numbers, which are integers or " - "can be converted to integers.".format(x, type(self), self.name)) - if int_x != x: - if hasattr(self, 'prefs') and hasattr(self.prefs, VERBOSE_PREF) and self.prefs.verbosePref: - warnings.warn("When an element ({}) in the size argument was cast to " - "integer, its value changed to {}.".format(x, int_x)) - return int_x + # implementation note: for good coding practices, perhaps add setting to enable easy change of the default + # value of variable (though it's an unlikely use case), which is an array of zeros at the moment - if variable is not None: - variable = np.array(variable) - if variable.dtype == object: - # CAVEAT: assuming here that object dtype implies there are list objects (i.e. array with - # different sized arrays/lists inside like [[0, 1], [2, 3, 4]]), even though putting a None - # value in the array will give object dtype. This case doesn't really make sense in our - # context though, so ignoring this case in the interest of quickly fixing 3d variable behavior - variable = np.atleast_1d(variable) + def conflict_error(reason=None): + if reason is not None: + reason_str = f': {reason}' else: - variable = np.atleast_2d(variable) + reason_str = '' - variable = convert_all_elements_to_np_array(variable) + return ComponentError( + f'size and default_variable arguments of {self} conflict{reason_str}' + ) - try: - if size is not None: - size = np.atleast_1d(size) - if len(np.shape(size)) > 1: # number of dimensions of size > 1 - if hasattr(self, 'prefs') and hasattr(self.prefs, VERBOSE_PREF) and self.prefs.verbosePref: - warnings.warn( - "size had more than one dimension (size had {} dimensions), so only the first " - "element of its highest-numbered axis will be used".format(len(np.shape(size)))) - while len(np.shape(size)) > 1: # reduce the dimensions of size - size = size[0] - except: - raise ComponentError("Failed to convert size (of type {}) to a 1D array.".format(type(size))) + variable_from_size = self._parse_size(size) - if size is not None: - size = np.array(list(map(checkAndCastInt, size))) # convert all elements of size to int + if variable is None: + return variable_from_size - # implementation note: for good coding practices, perhaps add setting to enable easy change of the default - # value of variable (though it's an unlikely use case), which is an array of zeros at the moment - if variable is None and size is not None: - try: - variable = [] - for s in size: - variable.append(np.zeros(s)) - variable = convert_to_np_array(variable) - # TODO: fix bare except - except: - raise ComponentError("variable (possibly default_variable) was not specified, but PsyNeuLink " - "was unable to infer variable from the size argument, {}. size should be" - " an integer or an array or list of integers. Either size or " - "variable must be specified.".format(size)) - - # the two regions below (creating size if it's None and/or expanding it) are probably obsolete (7/7/17 CW) - - if size is None and variable is not None: - size = [] - try: - for input_vector in variable: - size.append(len(input_vector)) - size = np.array(size) - except: - raise ComponentError( - "{}: size was not specified, and unable to infer it from the variable argument ({}) " - "-- it can be an array, list, a 2D array, a list of arrays, array of lists, etc. ". - format(self.name, variable)) - # endregion - - if size is not None and variable is not None: - if len(size) == 1 and len(variable) > 1: - new_size = np.empty(len(variable)) - new_size.fill(size[0]) - size = new_size - - # the two lines below were used when size was a param and are likely obsolete (7/7/17 CW) - # param_defaults['size'] = size # 7/5/17 potentially buggy? Not sure (CW) - # self.user_params_for_instantiation['size'] = None # 7/5/17 VERY HACKY: See Changyan's Notes on this. - - # Both variable and size are specified - if variable is not None and size is not None: - # If they conflict, give warning - if len(size) != len(variable): - if hasattr(self, 'prefs') and hasattr(self.prefs, VERBOSE_PREF) and self.prefs.verbosePref: - warnings.warn("The size arg of {} conflicts with the length " - "of its variable arg ({}) at element {}: variable takes precedence". - format(self.name, size, variable)) + if is_iterable(size, exclude_str=True): + assert len(size) == len(variable_from_size) + + if variable.ndim == 0: + raise conflict_error( + 'size gives a list of items but default_variable is 0d' + ) + elif len(size) != len(variable): + raise conflict_error( + f'len(size) is {len(size)};' + f' len(default_variable) is {len(variable)}' + ) else: for i in range(len(size)): - if size[i] != len(variable[i]): - if hasattr(self, 'prefs') and hasattr(self.prefs, VERBOSE_PREF) and self.prefs.verbosePref: - warnings.warn("The size arg of {} ({}) conflicts with the length " - "of its variable arg ({}) at element {}: variable takes precedence". - format(self.name, size[i], variable[i], i)) + if variable_from_size[i].shape != variable[i].shape: + raise conflict_error( + f'size[{i}].shape: {variable_from_size[i].shape};' + f' default_variable[{i}].shape: {variable[i].shape}' + ) + else: + if variable_from_size.shape != variable.shape: + raise conflict_error( + f'size.shape: {variable_from_size.shape};' + f' default_variable.shape: {variable.shape}' + ) + # if variable_from_size is created an error has not been thrown + # so far, variable is equal return variable def _get_allowed_arguments(self) -> set: diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index fed6c4a6327..79caf6c97d4 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -1222,12 +1222,15 @@ class Mechanism_Base(Mechanism): of its `function ` if those are not specified. If it is not specified, then a subclass-specific default is assigned (usually [[0]]). - size : int, list or np.ndarray of ints : default None + size : int, or Iterable of tuples or ints : default None specifies default_variable as array(s) of zeros if **default_variable** is not passed as an argument; - if **default_variable** is specified, it takes precedence over the specification of **size**. + if **default_variable** is specified, it must be equivalent to + **size**. For example, the following Mechanisms are equivalent:: my_mech = ProcessingMechanism(size = [3, 2]) my_mech = ProcessingMechanism(default_variable = [[0, 0, 0], [0, 0]]) + When specified as an iterable, each element of **size** is used + as the size of the corresponding InputPort. input_ports : str, list, dict, or np.ndarray : default None specifies the InputPorts for the Mechanism; if it is not specified, a single InputPort is created diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index 1bfeb0a36a4..a3d23e6da86 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -1124,58 +1124,6 @@ def __init__(self, if context.source == ContextFlags.COMMAND_LINE: owner.add_ports([self]) - def _handle_size(self, size, variable): - """Overwrites the parent method in Component.py, because the variable of a Port - is generally 1D, rather than 2D as in the case of Mechanisms - """ - if size is not NotImplemented: - - def checkAndCastInt(x): - if not isinstance(x, numbers.Number): - raise PortError("Size ({}) is not a number.".format(x)) - if x < 1: - raise PortError("Size ({}) is not a positive number.".format(x)) - try: - int_x = int(x) - except: - raise PortError( - "Failed to convert size argument ({}) for {} {} to an integer. For Ports, size " - "should be a number, which is an integer or can be converted to integer.". - format(x, type(self), self.name)) - if int_x != x: - if hasattr(self, 'prefs') and hasattr(self.prefs, VERBOSE_PREF) and self.prefs.verbosePref: - warnings.warn("When size ({}) was cast to integer, its value changed to {}.".format(x, int_x)) - return int_x - - # region Convert variable to a 1D array, cast size to an integer - if size is not None: - size = checkAndCastInt(size) - try: - if variable is not None: - variable = np.atleast_1d(variable) - except: - raise PortError("Failed to convert variable (of type {}) to a 1D array.".format(type(variable))) - # endregion - - # region if variable is None and size is not None, make variable a 1D array of zeros of length = size - if variable is None and size is not None: - try: - variable = np.zeros(size) - except: - raise ComponentError("variable (perhaps default_variable) was not specified, but PsyNeuLink " - "was unable to infer variable from the size argument, {}. size should be" - " an integer or able to be converted to an integer. Either size or " - "variable must be specified.".format(size)) - #endregion - - if variable is not None and size is not None: # try tossing this "if" check - # If they conflict, raise exception - if size != len(variable): - raise PortError("The size arg of {} ({}) conflicts with the length of its variable arg ({})". - format(self.name, size, variable)) - - return variable - def _validate_variable(self, variable, context=None): """Validate variable and return validated variable diff --git a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py index a08a6185f18..460063a0d15 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py @@ -1032,7 +1032,6 @@ def __init__(self, self.target_start = 0 self._target_included = False self.target_end = self.target_start + target_size - size = self.recurrent_size default_variable = [np.zeros(input_size), np.zeros(self.recurrent_size)] # Set InputPort sizes in _instantiate_input_ports, @@ -1059,7 +1058,6 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, input_ports=input_ports, combination_function=combination_function, function=function, diff --git a/tests/composition/test_interfaces.py b/tests/composition/test_interfaces.py index 3d2815ecd35..b1cf2975b64 100644 --- a/tests/composition/test_interfaces.py +++ b/tests/composition/test_interfaces.py @@ -500,8 +500,8 @@ def test_user_added_ports(self): mech = ProcessingMechanism() comp.add_node(mech) # instantiate custom input and output ports - inp = InputPort(size=2) - out = OutputPort(size=2) + inp = InputPort() + out = OutputPort() # NOTE: Adding ports to CIM from command line is currenlty disallowed # # add custom input and output ports to CIM diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index c8af9e9bf06..b9b8bad54a4 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -528,25 +528,6 @@ def test_DDM_size_int_inputs(): # INVALID INPUTS -# ------------------------------------------------------------------------------------------------ -# TEST 1 -# size = 0, check less-than-one error - - -def test_DDM_mech_size_zero(): - with pytest.raises(ComponentError) as error_text: - T = DDM( - name='DDM', - size=0, - function=DriftDiffusionIntegrator( - noise=0.0, - rate=-5.0, - time_step_size=1.0 - ), - execute_until_finished=False, - ) - assert "is not a positive number" in str(error_text.value) - # ------------------------------------------------------------------------------------------------ # TEST 2 # size = -1.0, check less-than-one error @@ -556,7 +537,7 @@ def test_DDM_mech_size_negative_one(): with pytest.raises(ComponentError) as error_text: T = DDM( name='DDM', - size=-1.0, + size=-1, function=DriftDiffusionIntegrator( noise=0.0, rate=-5.0, @@ -564,7 +545,7 @@ def test_DDM_mech_size_negative_one(): ), execute_until_finished=False, ) - assert "is not a positive number" in str(error_text.value) + assert "negative dimensions" in str(error_text.value) # ------------------------------------------------------------------------------------------------ # TEST 3 @@ -575,7 +556,7 @@ def test_DDM_size_too_large(): with pytest.raises(DDMError) as error_text: T = DDM( name='DDM', - size=3.0, + size=3, function=DriftDiffusionIntegrator( noise=0.0, rate=-5.0, diff --git a/tests/mechanisms/test_transfer_mechanism.py b/tests/mechanisms/test_transfer_mechanism.py index 05477e0e06a..7f20beed9ee 100644 --- a/tests/mechanisms/test_transfer_mechanism.py +++ b/tests/mechanisms/test_transfer_mechanism.py @@ -998,48 +998,6 @@ def test_transfer_mech_size_int_inputs_floats(self): # val = T.execute([Linear().execute(), NormalDist().execute(), Exponential().execute(), ExponentialDist().execute()]) # np.testing.assert_allclose(val, [[np.array([0.]), 0.4001572083672233, np.array([1.]), 0.7872011523172707]] - # ------------------------------------------------------------------------------------------------ - # TEST 5 - # size = float, check if variable is an array of zeros - - @pytest.mark.mechanism - @pytest.mark.transfer_mechanism - def test_transfer_mech_size_float_inputs_check_var(self): - T = TransferMechanism( - name='T', - size=4.0, - ) - np.testing.assert_array_equal(T.defaults.variable, [[0, 0, 0, 0]]) - assert len(T.size == 1) and T.size[0] == 4.0 and isinstance(T.size[0], np.integer) - - # ------------------------------------------------------------------------------------------------ - # TEST 6 - # size = float, variable = list of ints - - @pytest.mark.mechanism - @pytest.mark.transfer_mechanism - def test_transfer_mech_size_float_inputs_ints(self): - T = TransferMechanism( - name='T', - size=4.0 - ) - val = T.execute([10, 10, 10, 10]) - np.testing.assert_array_equal(val, [[10.0, 10.0, 10.0, 10.0]]) - - # ------------------------------------------------------------------------------------------------ - # TEST 7 - # size = float, variable = list of floats - - @pytest.mark.mechanism - @pytest.mark.transfer_mechanism - def test_transfer_mech_size_float_inputs_floats(self): - T = TransferMechanism( - name='T', - size=4.0 - ) - val = T.execute([10.0, 10.0, 10.0, 10.0]) - np.testing.assert_array_equal(val, [[10.0, 10.0, 10.0, 10.0]]) - # ------------------------------------------------------------------------------------------------ # TEST 8 # size = float, variable = list of functions @@ -1069,18 +1027,6 @@ def test_transfer_mech_size_list_of_ints(self): assert len(T.defaults.variable) == 3 and len(T.defaults.variable[0]) == 2 and len(T.defaults.variable[1]) == 3 and len(T.defaults.variable[2]) == 4 # ------------------------------------------------------------------------------------------------ - # TEST 10 - # size = list of floats, check that variable is correct - - @pytest.mark.mechanism - @pytest.mark.transfer_mechanism - def test_transfer_mech_size_list_of_floats(self): - T = TransferMechanism( - name='T', - size=[2., 3., 4.] - ) - assert len(T.defaults.variable) == 3 and len(T.defaults.variable[0]) == 2 and len(T.defaults.variable[1]) == 3 and len(T.defaults.variable[2]) == 4 - # note that this output under the Linear function is useless/odd, but the purpose of allowing this configuration # is for possible user-defined functions that do use unusual shapes. @@ -1089,7 +1035,7 @@ def test_transfer_mech_size_list_of_floats(self): def test_transfer_mech_size_var_both_lists(self): T = TransferMechanism( name='T', - size=[2., 3.], + size=[2, 3], default_variable=[[1, 2], [3, 4, 5]] ) assert len(T.defaults.variable) == 2 @@ -1103,13 +1049,14 @@ def test_transfer_mech_size_var_both_lists(self): @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_size_scalar_var_2d(self): - T = TransferMechanism( - name='T', - size=2, - default_variable=[[1, 2], [3, 4]] - ) - np.testing.assert_array_equal(T.defaults.variable, [[1, 2], [3, 4]]) - assert len(T.size) == 2 and T.size[0] == 2 and T.size[1] == 2 + with pytest.raises( + ComponentError, match=r'size and default_variable arguments.*conflict.*' + ): + TransferMechanism( + name='T', + size=2, + default_variable=[[1, 2], [3, 4]] + ) # ------------------------------------------------------------------------------------------------ # TEST 13 @@ -1131,14 +1078,14 @@ def test_transfer_mech_var_2d_array(self): @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_var_1D_size_wrong(self): - T = TransferMechanism( - name='T', - default_variable=[1, 2, 3, 4], - size=2 - ) - np.testing.assert_array_equal(T.defaults.variable, [[1, 2, 3, 4]]) - val = T.execute([10.0, 10.0, 10.0, 10.0]) - np.testing.assert_array_equal(val, [[10.0, 10.0, 10.0, 10.0]]) + with pytest.raises( + ComponentError, match=r'size and default_variable arguments.*conflict.*' + ): + TransferMechanism( + name='T', + default_variable=[1, 2, 3, 4], + size=2 + ) # ------------------------------------------------------------------------------------------------ # TEST 15 @@ -1147,14 +1094,14 @@ def test_transfer_mech_var_1D_size_wrong(self): @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_var_1D_size_wrong_2(self): - T = TransferMechanism( - name='T', - default_variable=[1, 2, 3, 4], - size=[2, 3, 4] - ) - np.testing.assert_array_equal(T.defaults.variable, [[1, 2, 3, 4]]) - val = T.execute([10.0, 10.0, 10.0, 10.0]) - np.testing.assert_array_equal(val, [[10.0, 10.0, 10.0, 10.0]]) + with pytest.raises( + ComponentError, match=r'size and default_variable arguments.*conflict.*' + ): + TransferMechanism( + name='T', + default_variable=[1, 2, 3, 4], + size=[2, 3, 4] + ) # ------------------------------------------------------------------------------------------------ # TEST 16 @@ -1163,14 +1110,14 @@ def test_transfer_mech_var_1D_size_wrong_2(self): @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_size_var_incompatible1(self): - T = TransferMechanism( - name='T', - size=2, - default_variable=[[1, 2], [3, 4, 5]] - ) - assert len(T.defaults.variable) == 2 - np.testing.assert_array_equal(T.defaults.variable[0], [1, 2]) - np.testing.assert_array_equal(T.defaults.variable[1], [3, 4, 5]) + with pytest.raises( + ComponentError, match=r'size and default_variable arguments.*conflict.*' + ): + TransferMechanism( + name='T', + size=2, + default_variable=[[1, 2], [3, 4, 5]] + ) # ------------------------------------------------------------------------------------------------ # TEST 17 @@ -1179,33 +1126,19 @@ def test_transfer_mech_size_var_incompatible1(self): @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_size_var_incompatible2(self): - T = TransferMechanism( - name='T', - size=[2, 2], - default_variable=[[1, 2], [3, 4, 5]] - ) - assert len(T.defaults.variable) == 2 - np.testing.assert_array_equal(T.defaults.variable[0], [1, 2]) - np.testing.assert_array_equal(T.defaults.variable[1], [3, 4, 5]) + with pytest.raises( + ComponentError, match=r'size and default_variable arguments.*conflict.*' + ): + TransferMechanism( + name='T', + size=[2, 2], + default_variable=[[1, 2], [3, 4, 5]] + ) # ------------------------------------------------------------------------------------------------ # INVALID INPUTS - # ------------------------------------------------------------------------------------------------ - # TEST 1 - # size = 0, check less-than-one error - - @pytest.mark.mechanism - @pytest.mark.transfer_mechanism - def test_transfer_mech_size_zero(self): - with pytest.raises(ComponentError) as error_text: - T = TransferMechanism( - name='T', - size=0, - ) - assert "is not a positive number" in str(error_text.value) - # ------------------------------------------------------------------------------------------------ # TEST 2 # size = -1.0, check less-than-one error @@ -1216,9 +1149,9 @@ def test_transfer_mech_size_negative_one(self): with pytest.raises(ComponentError) as error_text: T = TransferMechanism( name='T', - size=-1.0, + size=-1, ) - assert "is not a positive number" in str(error_text.value) + assert "negative dimensions" in str(error_text.value) # this test below and the (currently commented) test immediately after it _may_ be deprecated if we ever fix # warnings to be no longer fatal. At the time of writing (6/30/17, CW), warnings are always fatal. From 61a98f85b0c02efbe83e49e9280aa415b3b6f217 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Thu, 26 Sep 2024 04:54:11 +0000 Subject: [PATCH 394/410] treewide: rename Component arg size -> input_shapes --- ..._Shallice_debugging_Interactive_activation | 10 +- Scripts/Debug/Hebbian_Simon.py | 2 +- ...on_Reward_rate_with_penalty_with_inputs.py | 12 +- Scripts/Debug/Markus Stroop.py | 18 ++- .../Debug/Predator-Prey Sebastian REDUCED.py | 6 +- Scripts/Debug/Predator-Prey Sebastian.py | 6 +- Scripts/Debug/StabilityFlexibility.py | 9 +- Scripts/Debug/Yotam LCA Model LLVM.py | 29 ++-- Scripts/Debug/Yotam LCA Model.py | 26 ++-- Scripts/Debug/bryant_lca_with_termination.py | 6 +- .../laura_test_no_noise_stroop_09_11_2018.py | 20 +-- Scripts/Debug/lca/pytorch_lca.py | 2 +- Scripts/Debug/markus_test_umemoto.py | 3 +- .../predator_prey_opt/predator_prey_dmt.py | 8 +- .../stability_flexibility.py | 24 +-- .../stability_flexibility_nn.py | 26 ++-- Scripts/Debug/stability_flexibility_simple.py | 9 +- .../Basics And Primer/Stroop Model - Basic.py | 12 +- .../Stroop Model - Conflict Monitoring.py | 17 ++- Scripts/Examples/Basics And Primer/XOR Model | 6 +- .../Examples/Botvinick Model Composition.py | 21 ++- .../Examples/Gating-Mechanism. with UDF.py | 2 +- .../Gilbert_Shallice_Composition_Model.py | 21 ++- Scripts/Examples/Lena Rumelhart script.py | 12 +- Scripts/Examples/RL-DDM.py | 2 +- .../Examples/Rumelhart Semantic Network.py | 16 +- Scripts/Examples/StabilityFlexibility.py | 9 +- Scripts/Examples/Stroop Model.py | 14 +- .../Rumelhart Semantic Network (autodiff).py | 12 +- .../Examples/Tutorial/Stroop Model - EVC.py | 14 +- Scripts/Examples/_Gating-Mechanism.py | 2 +- Scripts/Examples/_Leabra-Demo.py | 4 +- Scripts/Examples/_Leabra-Learning-Demo.py | 4 +- .../Examples/_Reinforcement-Learning REV.py | 4 +- .../Adaptive Replay Model.py | 22 +-- .../Bustamante_Stroop_XOR_LVOC_Model.py | 6 +- .../Bustamante_Stroop_XOR_LVOC_Model_VZ.py | 6 +- .../EGO Model - CSW with RNN.py | 10 +- .../EGO Model - CSW with Simple Integrator.py | 10 +- .../EGO Model - Revaluation.py | 16 +- .../EGO Model - MDP.py | 24 +-- .../GreedyAgentInteractiveInputs.py | 10 +- .../GreedyAgentModel.py | 10 +- .../GreedyAgentModel_LLVM_TEST.py | 6 +- .../PanickyAgentModel.py | 8 +- .../Predator-Prey Model DEMO.py | 8 +- .../Predator-Prey Model DQN LVOC.py | 6 +- .../Predator-Prey Model DQN [ORIG].py | 8 +- .../Predator-Prey Model DQN.py | 8 +- .../Predator-Prey Model INPUT LAYER.py | 20 +-- .../Predator-Prey Model I_0 Nested Comp.py | 20 +-- .../Predator-Prey Model.py | 8 +- .../Models (Under Development)/nback/nback.py | 28 ++-- .../nback/nback_og_pnl.py | 26 ++-- docs/source/BasicsAndPrimer.rst | 38 ++--- .../BotvinickConflictMonitoringModel.rst | 14 +- docs/source/Cohen_HustonModel.rst | 12 +- docs/source/NieuwenhuisModel.rst | 6 +- docs/source/PCTC_model.rst | 16 +- docs/source/RefactoredLearningGuide.rst | 8 +- docs/source/UserGuide_TBD.rst | 2 +- psyneulink/core/components/component.py | 142 +++++++++--------- .../nonstateful/learningfunctions.py | 6 +- .../nonstateful/objectivefunctions.py | 30 ++-- .../nonstateful/transferfunctions.py | 18 +-- .../functions/userdefinedfunction.py | 8 +- .../core/components/mechanisms/mechanism.py | 44 +++--- .../modulatory/control/controlmechanism.py | 12 +- .../control/gating/gatingmechanism.py | 12 +- .../modulatory/learning/learningmechanism.py | 4 +- .../modulatory/modulatorymechanism.py | 4 +- .../compositioninterfacemechanism.py | 4 +- .../processing/defaultprocessingmechanism.py | 6 +- .../processing/integratormechanism.py | 14 +- .../processing/objectivemechanism.py | 8 +- .../processing/processingmechanism.py | 12 +- .../processing/transfermechanism.py | 34 ++--- psyneulink/core/components/ports/inputport.py | 34 ++--- .../ports/modulatorysignals/controlsignal.py | 4 +- .../ports/modulatorysignals/gatingsignal.py | 10 +- .../ports/modulatorysignals/learningsignal.py | 4 +- .../modulatorysignals/modulatorysignal.py | 4 +- .../core/components/ports/outputport.py | 4 +- .../core/components/ports/parameterport.py | 8 +- psyneulink/core/components/ports/port.py | 8 +- psyneulink/core/components/shellclasses.py | 4 +- psyneulink/core/compositions/composition.py | 6 +- psyneulink/core/compositions/showgraph.py | 6 +- psyneulink/core/globals/keywords.py | 4 +- psyneulink/core/globals/log.py | 4 +- psyneulink/core/globals/parameters.py | 2 +- .../autoassociativelearningmechanism.py | 4 +- .../learning/kohonenlearningmechanism.py | 4 +- .../mechanisms/processing/integrator/ddm.py | 12 +- .../integrator/episodicmemorymechanism.py | 44 +++--- .../mechanisms/processing/leabramechanism.py | 6 +- .../objective/comparatormechanism.py | 2 +- .../objective/predictionerrormechanism.py | 4 +- .../transfer/contrastivehebbianmechanism.py | 6 +- .../processing/transfer/kohonenmechanism.py | 4 +- .../processing/transfer/kwtamechanism.py | 8 +- .../processing/transfer/lcamechanism.py | 4 +- .../transfer/recurrenttransfermechanism.py | 14 +- .../compositions/autodiffcomposition.py | 4 +- .../library/compositions/emcomposition.py | 24 +-- psyneulink/library/models/Cohen_Huston1994.py | 14 +- .../models/Cohen_Huston1994_horse_race.py | 21 ++- psyneulink/library/models/GilzenratModel.py | 6 +- .../library/models/Kalanthroff_PCTC_2018.py | 16 +- .../library/models/Nieuwenhuis2005Model.py | 6 +- tests/components/test_component.py | 6 +- .../test_parameterestimationcomposition.py | 8 +- .../composition/pec/test_stab_flex_pec_fit.py | 24 +-- tests/composition/test_autodiffcomposition.py | 82 +++++----- tests/composition/test_composition.py | 67 +++++---- tests/composition/test_control.py | 33 ++-- tests/composition/test_gating.py | 4 +- tests/composition/test_interfaces.py | 2 +- tests/composition/test_learning.py | 125 ++++++++------- tests/composition/test_models.py | 32 ++-- tests/composition/test_show_graph.py | 10 +- tests/control/test_gilzenrat.py | 4 +- .../functions/test_accumulator_integrator.py | 4 +- tests/functions/test_combination.py | 2 +- tests/functions/test_memory.py | 12 +- tests/functions/test_user_defined_func.py | 12 +- tests/log/test_log.py | 32 ++-- tests/log/test_rpc.py | 24 +-- tests/mdf/model_varied_matrix_sizes.py | 8 +- tests/mdf/stroop_conflict_monitoring.py | 16 +- tests/mechanisms/test_control_mechanism.py | 17 ++- tests/mechanisms/test_ddm_mechanism.py | 24 +-- tests/mechanisms/test_episodic_memory.py | 10 +- tests/mechanisms/test_gating_mechanism.py | 12 +- tests/mechanisms/test_input_output_labels.py | 12 +- tests/mechanisms/test_input_port_spec.py | 64 ++++---- tests/mechanisms/test_kwta.py | 78 +++++----- tests/mechanisms/test_lca.py | 53 ++++--- tests/mechanisms/test_leabra_mechanism.py | 30 ++-- tests/mechanisms/test_mechanisms.py | 10 +- tests/mechanisms/test_processing_mechanism.py | 4 +- .../test_recurrent_transfer_mechanism.py | 122 +++++++-------- tests/mechanisms/test_transfer_mechanism.py | 86 +++++------ tests/models/test_botvinick.py | 21 ++- tests/models/test_greedy_agent.py | 24 +-- tests/ports/test_input_ports.py | 18 +-- .../test_projection_specifications.py | 24 +-- tests/scheduling/test_scheduler.py | 8 +- 148 files changed, 1312 insertions(+), 1219 deletions(-) diff --git a/Scripts/Debug/Gilbert_Shallice_debugging_Interactive_activation b/Scripts/Debug/Gilbert_Shallice_debugging_Interactive_activation index e01144bdcf3..c40a12ff480 100644 --- a/Scripts/Debug/Gilbert_Shallice_debugging_Interactive_activation +++ b/Scripts/Debug/Gilbert_Shallice_debugging_Interactive_activation @@ -5,16 +5,16 @@ import psyneulink as pnl ### LAYERS -WORD_INPUT_LAYER = pnl.TransferMechanism(size = 3, +WORD_INPUT_LAYER = pnl.TransferMechanism(input_shapes = 3, function=pnl.Linear, name='WORD INPUT LAYER') -COLOR_INPUT_LAYER = pnl.TransferMechanism(size = 3, +COLOR_INPUT_LAYER = pnl.TransferMechanism(input_shapes = 3, function=pnl.Linear, name='COLOR INPUT LAYER') -WORD_OUTPUT_LAYER = pnl.RecurrentTransferMechanism(size = 3, +WORD_OUTPUT_LAYER = pnl.RecurrentTransferMechanism(input_shapes = 3, auto=0.0, hetero=0.0,#-2.0, function=pnl.Linear(), @@ -25,7 +25,7 @@ WORD_OUTPUT_LAYER.set_log_conditions('InputPort-0') -COLOR_OUTPUT_LAYER = pnl.RecurrentTransferMechanism(size = 3, +COLOR_OUTPUT_LAYER = pnl.RecurrentTransferMechanism(input_shapes = 3, auto=0.0, hetero=0.0,#-2.0, function=pnl.Linear(), @@ -35,7 +35,7 @@ COLOR_OUTPUT_LAYER = pnl.RecurrentTransferMechanism(size = 3, COLOR_OUTPUT_LAYER.set_log_conditions('value') -TASK_DEMAND_LAYER = pnl.RecurrentTransferMechanism(size = 2, +TASK_DEMAND_LAYER = pnl.RecurrentTransferMechanism(input_shapes = 2, auto=0.0, hetero=0.0,#-2.0, function=pnl.Linear(), diff --git a/Scripts/Debug/Hebbian_Simon.py b/Scripts/Debug/Hebbian_Simon.py index f2722741205..6037fa14155 100644 --- a/Scripts/Debug/Hebbian_Simon.py +++ b/Scripts/Debug/Hebbian_Simon.py @@ -14,7 +14,7 @@ Hebb_comp = pnl.Composition() Hebb_mech=pnl.RecurrentTransferMechanism( - size=sizeF, + input_shapes=sizeF, function=pnl.Linear, #integrator_mode = True, #integration_rate = 0.5, diff --git a/Scripts/Debug/Jason_Reward_rate_with_penalty_with_inputs.py b/Scripts/Debug/Jason_Reward_rate_with_penalty_with_inputs.py index 759f1a1c708..3fba66424af 100644 --- a/Scripts/Debug/Jason_Reward_rate_with_penalty_with_inputs.py +++ b/Scripts/Debug/Jason_Reward_rate_with_penalty_with_inputs.py @@ -72,18 +72,18 @@ def get_stroop_model(unit_noise_std=.01, dec_noise_std=.1): punish = pnl.TransferMechanism(name='punish') inp_clr = pnl.TransferMechanism( - size=N_UNITS, function=pnl.Linear, name='COLOR INPUT' + input_shapes=N_UNITS, function=pnl.Linear, name='COLOR INPUT' ) inp_wrd = pnl.TransferMechanism( - size=N_UNITS, function=pnl.Linear, name='WORD INPUT' + input_shapes=N_UNITS, function=pnl.Linear, name='WORD INPUT' ) # task layer, represent the task instruction; color naming / word reading inp_task = pnl.TransferMechanism( - size=N_UNITS, function=pnl.Linear, name='TASK' + input_shapes=N_UNITS, function=pnl.Linear, name='TASK' ) # hidden layer for color and word hid_clr = pnl.TransferMechanism( - size=N_UNITS, + input_shapes=N_UNITS, function=hidden_func, integrator_mode=True, integration_rate=integration_rate, @@ -92,7 +92,7 @@ def get_stroop_model(unit_noise_std=.01, dec_noise_std=.1): name='COLORS HIDDEN' ) hid_wrd = pnl.TransferMechanism( - size=N_UNITS, + input_shapes=N_UNITS, function=hidden_func, integrator_mode=True, integration_rate=integration_rate, @@ -102,7 +102,7 @@ def get_stroop_model(unit_noise_std=.01, dec_noise_std=.1): ) # output layer output = pnl.TransferMechanism( - size=N_UNITS, + input_shapes=N_UNITS, function=pnl.Logistic, integrator_mode=True, integration_rate=integration_rate, diff --git a/Scripts/Debug/Markus Stroop.py b/Scripts/Debug/Markus Stroop.py index afc339a101f..1eda026211b 100644 --- a/Scripts/Debug/Markus Stroop.py +++ b/Scripts/Debug/Markus Stroop.py @@ -7,16 +7,19 @@ import psyneulink.core.components.functions.stateful.integratorfunctions import psyneulink.core.components.functions.nonstateful.transferfunctions -colors_input_layer = pnl.TransferMechanism(size=2, +colors_input_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear, name='COLORS_INPUT') -words_input_layer = pnl.TransferMechanism(size=2, +words_input_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear, name='WORDS_INPUT') # Task layer, tasks: ('name the color', 'read the word') -task_layer = pnl.TransferMechanism(size=2, +task_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear, name='TASK') @@ -26,14 +29,16 @@ # randomly distributed noise to the net input # time averaging = integration_rate = 0.1 unit_noise = 0.001 -colors_hidden_layer = pnl.TransferMechanism(size=2, +colors_hidden_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic(gain=1.0, x_0=4.0), #should be able to get same result with offset = -4.0 integrator_mode=True, noise=psyneulink.core.components.functions.nonstateful.distributionfunctions.NormalDist(mean=0, standard_deviation=unit_noise).function, integration_rate=0.1, name='COLORS HIDDEN') # words_hidden: ('RED','GREEN') -words_hidden_layer = pnl.TransferMechanism(size=2, +words_hidden_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic(gain=1.0, x_0=4.0), integrator_mode=True, noise=psyneulink.core.components.functions.nonstateful.distributionfunctions.NormalDist(mean=0, standard_deviation=unit_noise).function, @@ -43,7 +48,8 @@ # OUTPUT UNITS # Response layer, provide input to accumulator, responses: ('red', 'green') -response_layer = pnl.TransferMechanism(size=2, +response_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic, integrator_mode=True, noise=psyneulink.core.components.functions.nonstateful.distributionfunctions.NormalDist(mean=0, standard_deviation=unit_noise).function, diff --git a/Scripts/Debug/Predator-Prey Sebastian REDUCED.py b/Scripts/Debug/Predator-Prey Sebastian REDUCED.py index 15ec7e4928b..54348a7050f 100644 --- a/Scripts/Debug/Predator-Prey Sebastian REDUCED.py +++ b/Scripts/Debug/Predator-Prey Sebastian REDUCED.py @@ -36,9 +36,9 @@ def get_new_episode_flag(): # ************************************** PROCESSING MECHANISMS ******************************************************** # Perceptual Mechanisms -player_percept = ProcessingMechanism(size=2, function=GaussianDistort(), name="PLAYER PERCEPT") -predator_percept = ProcessingMechanism(size=2, function=GaussianDistort(), name="PREDATOR PERCEPT") -prey_percept = ProcessingMechanism(size=2, function=GaussianDistort(), name="PREY PERCEPT") +player_percept = ProcessingMechanism(input_shapes=2, function=GaussianDistort(), name="PLAYER PERCEPT") +predator_percept = ProcessingMechanism(input_shapes=2, function=GaussianDistort(), name="PREDATOR PERCEPT") +prey_percept = ProcessingMechanism(input_shapes=2, function=GaussianDistort(), name="PREY PERCEPT") # Mechanism used to encode trialtype from environment trial_type_input_mech = ProcessingMechanism(name="TRIAL TYPE INPUT") diff --git a/Scripts/Debug/Predator-Prey Sebastian.py b/Scripts/Debug/Predator-Prey Sebastian.py index 491cb9f5a63..711631b4c77 100644 --- a/Scripts/Debug/Predator-Prey Sebastian.py +++ b/Scripts/Debug/Predator-Prey Sebastian.py @@ -105,9 +105,9 @@ def get_optimal_action(observation): # ************************************** PROCESSING MECHANISMS ******************************************************** # Perceptual Mechanisms -player_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") -predator_percept = ProcessingMechanism(size=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") -prey_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PREY PERCEPT") +player_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") +predator_percept = ProcessingMechanism(input_shapes=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") +prey_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PREY PERCEPT") # Mechanism used to encode trialtype from environment trial_type_input_mech = ProcessingMechanism(name="TRIAL TYPE INPUT") diff --git a/Scripts/Debug/StabilityFlexibility.py b/Scripts/Debug/StabilityFlexibility.py index 3bc8d4bdf45..3035eeb921d 100644 --- a/Scripts/Debug/StabilityFlexibility.py +++ b/Scripts/Debug/StabilityFlexibility.py @@ -77,7 +77,7 @@ def computeAccuracy(variable): # first element is color task attendance, second element is motion task attendance inputLayer = pnl.TransferMechanism(#default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports = [pnl.RESULT], name='Input') @@ -100,7 +100,7 @@ def computeAccuracy(variable): stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size = 2, + input_shapes= 2, function = pnl.Linear(slope=1, intercept=0), output_ports = [pnl.RESULT], name = "Stimulus Info") @@ -108,7 +108,7 @@ def computeAccuracy(variable): stimulusInfo.set_log_conditions([pnl.RESULT]) controlledElement = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size = 2, + input_shapes= 2, function=pnl.Linear(slope=1, intercept= 0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports = [pnl.RESULT], @@ -116,7 +116,8 @@ def computeAccuracy(variable): controlledElement.set_log_conditions([pnl.RESULT]) -ddmCombination = pnl.TransferMechanism(size = 1, +ddmCombination = pnl.TransferMechanism( + input_shapes= 1, function = pnl.Linear(slope=1, intercept=0), output_ports = [pnl.RESULT], name = "DDM Integrator") diff --git a/Scripts/Debug/Yotam LCA Model LLVM.py b/Scripts/Debug/Yotam LCA Model LLVM.py index a947ad0509d..bea30c52943 100644 --- a/Scripts/Debug/Yotam LCA Model LLVM.py +++ b/Scripts/Debug/Yotam LCA Model LLVM.py @@ -132,13 +132,15 @@ def get_trained_network(bipartite_graph, num_features=3, num_hidden=200, epochs= lr = learning_rate # Instantiate layers and projections - il = pnl.TransferMechanism(size=D_i, name='input') - cl = pnl.TransferMechanism(size=D_c, name='control') + il = pnl.TransferMechanism(input_shapes=D_i, name='input') + cl = pnl.TransferMechanism(input_shapes=D_c, name='control') - hl = pnl.TransferMechanism(size=D_h, name='hidden', + hl = pnl.TransferMechanism( + input_shapes=D_h, name='hidden', function=pnl.Logistic(bias=-2)) - ol = pnl.TransferMechanism(size=D_o, name='output', + ol = pnl.TransferMechanism( + input_shapes=D_o, name='output', function=pnl.Logistic(bias=-2)) pih = pnl.MappingProjection(matrix=wih) @@ -190,7 +192,8 @@ def get_trained_network(bipartite_graph, num_features=3, num_hidden=200, epochs= # Apply LCA transform (values from Sebastian's code -- supposedly taken from the original LCA paper from Marius & Jay) if attach_LCA: - lca = pnl.LCAMechanism(size=D_o, + lca = pnl.LCAMechanism( + input_shapes=D_o, leak=leak, competition=competition, self_excitation=self_excitation, @@ -251,14 +254,16 @@ def get_trained_network_multLCA(bipartite_graph, num_features=3, num_hidden=200, lr = learning_rate # Instantiate layers and projections - il = pnl.TransferMechanism(size=D_i, name='input') - cl = pnl.TransferMechanism(size=D_c, name='control') + il = pnl.TransferMechanism(input_shapes=D_i, name='input') + cl = pnl.TransferMechanism(input_shapes=D_c, name='control') - hl = pnl.TransferMechanism(size=D_h, + hl = pnl.TransferMechanism( + input_shapes=D_h, name='hidden', function=pnl.Logistic(bias=-2)) - ol = pnl.TransferMechanism(size=D_o, + ol = pnl.TransferMechanism( + input_shapes=D_o, name='output', function=pnl.Logistic(bias=-2)) @@ -323,7 +328,8 @@ def get_trained_network_multLCA(bipartite_graph, num_features=3, num_hidden=200, lca_matrix = get_LCA_matrix(output_dims, num_features, self_excitation, competition) - lca = pnl.RecurrentTransferMechanism(size=D_o, + lca = pnl.RecurrentTransferMechanism( + input_shapes=D_o, matrix=lca_matrix, integrator_mode=True, integrator_function=lci, @@ -339,7 +345,8 @@ def get_trained_network_multLCA(bipartite_graph, num_features=3, num_hidden=200, # Dummy to save mnet results if str(LCA_BIN_EXECUTE).startswith("LLVM"): - dummy = pnl.TransferMechanism(size=D_o, + dummy = pnl.TransferMechanism( + input_shapes=D_o, name="MNET_OUT") wrapper_composition.add_linear_processing_pathway([mnet, dummy]) diff --git a/Scripts/Debug/Yotam LCA Model.py b/Scripts/Debug/Yotam LCA Model.py index 812a49a88d5..d1fec4ab013 100644 --- a/Scripts/Debug/Yotam LCA Model.py +++ b/Scripts/Debug/Yotam LCA Model.py @@ -117,13 +117,15 @@ def get_trained_network(bipartite_graph, num_features=3, num_hidden=200, epochs= lr = learning_rate # Instantiate layers and projections - il = pnl.TransferMechanism(size=D_i, name='input') - cl = pnl.TransferMechanism(size=D_c, name='control') + il = pnl.TransferMechanism(input_shapes=D_i, name='input') + cl = pnl.TransferMechanism(input_shapes=D_c, name='control') - hl = pnl.TransferMechanism(size=D_h, name='hidden', + hl = pnl.TransferMechanism( + input_shapes=D_h, name='hidden', function=pnl.Logistic(bias=-2)) - ol = pnl.TransferMechanism(size=D_o, name='output', + ol = pnl.TransferMechanism( + input_shapes=D_o, name='output', function=pnl.Logistic(bias=-2)) pih = pnl.MappingProjection(matrix=wih) @@ -174,7 +176,8 @@ def get_trained_network(bipartite_graph, num_features=3, num_hidden=200, epochs= # Apply LCA transform (values from Sebastian's code -- supposedly taken from the original LCA paper from Marius & Jay) if attach_LCA: - lca = pnl.LCAMechanism(size=D_o, + lca = pnl.LCAMechanism( + input_shapes=D_o, leak=leak, competition=competition, self_excitation=self_excitation, @@ -237,14 +240,16 @@ def get_trained_network_multLCA(bipartite_graph, num_features=3, num_hidden=200, lr = learning_rate # Instantiate layers and projections - il = pnl.TransferMechanism(size=D_i, name='input') - cl = pnl.TransferMechanism(size=D_c, name='control') + il = pnl.TransferMechanism(input_shapes=D_i, name='input') + cl = pnl.TransferMechanism(input_shapes=D_c, name='control') - hl = pnl.TransferMechanism(size=D_h, + hl = pnl.TransferMechanism( + input_shapes=D_h, name='hidden', function=pnl.Logistic(bias=-2)) - ol = pnl.TransferMechanism(size=D_o, + ol = pnl.TransferMechanism( + input_shapes=D_o, name='output', function=pnl.Logistic(bias=-2)) @@ -304,7 +309,8 @@ def get_trained_network_multLCA(bipartite_graph, num_features=3, num_hidden=200, lca_matrix = get_LCA_matrix(output_dims, num_features, self_excitation, competition) - lca = pnl.RecurrentTransferMechanism(size=D_o, + lca = pnl.RecurrentTransferMechanism( + input_shapes=D_o, matrix=lca_matrix, integrator_mode=True, integrator_function=lci, diff --git a/Scripts/Debug/bryant_lca_with_termination.py b/Scripts/Debug/bryant_lca_with_termination.py index 610e2a76769..0399473ac29 100644 --- a/Scripts/Debug/bryant_lca_with_termination.py +++ b/Scripts/Debug/bryant_lca_with_termination.py @@ -1,19 +1,19 @@ import psyneulink as pnl cueInterval = pnl.TransferMechanism(default_variable=[[0.0]], - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Cue-Stimulus Interval') taskLayer = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Task Input [I1, I2]') activation = pnl.LCAMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Logistic(gain=1), leak=.5, competition=2, diff --git a/Scripts/Debug/laura_test_no_noise_stroop_09_11_2018.py b/Scripts/Debug/laura_test_no_noise_stroop_09_11_2018.py index c8c23a59b93..d234874b81e 100644 --- a/Scripts/Debug/laura_test_no_noise_stroop_09_11_2018.py +++ b/Scripts/Debug/laura_test_no_noise_stroop_09_11_2018.py @@ -8,16 +8,16 @@ # # INPUT UNITS # # # colors: ('red', 'green'), words: ('RED','GREEN') -# colors_input_layer = pnl.TransferMechanism(size=2, +# colors_input_layer = pnl.TransferMechanism(input_shapes=2, # function=pnl.Linear, # name='COLORS_INPUT') # -# words_input_layer = pnl.TransferMechanism(size=2, +# words_input_layer = pnl.TransferMechanism(input_shapes=2, # function=pnl.Linear, # name='WORDS_INPUT') # # # Task layer, tasks: ('name the color', 'read the word') -# task_layer = pnl.TransferMechanism(size=2, +# task_layer = pnl.TransferMechanism(input_shapes=2, # function=pnl.Linear, # name='TASK') # @@ -28,7 +28,7 @@ # # randomly distributed noise to the net input # # time averaging = integration_rate = 0.1 # unit_noise = 0.005 -# # colors_hidden_layer = pnl.TransferMechanism(size=2, +# # colors_hidden_layer = pnl.TransferMechanism(input_shapes=2, # # function=pnl.Logistic(gain=1.0, bias=4.0), # # # should be able to get same result with offset = -4.0 # # integrator_mode=True, @@ -36,7 +36,7 @@ # # integration_rate=0.1, # # name='COLORS HIDDEN') # -# colors_hidden_layer = pnl.TransferMechanism(size=2, +# colors_hidden_layer = pnl.TransferMechanism(input_shapes=2, # function=pnl.Logistic(gain=1.0, x_0=4.0), # # should be able to get same result with offset = -4.0 # integrator_mode=True, @@ -44,13 +44,13 @@ # integration_rate=0.1, # name='COLORS HIDDEN') # # words_hidden: ('RED','GREEN') -# # words_hidden_layer = pnl.TransferMechanism(size=2, +# # words_hidden_layer = pnl.TransferMechanism(input_shapes=2, # # function=pnl.Logistic(gain=1.0, bias=4.0), # # integrator_mode=True, # # noise=pnl.NormalDist(mean=0, standard_deviation=unit_noise).function, # # integration_rate=0.1, # # name='WORDS HIDDEN') -# words_hidden_layer = pnl.TransferMechanism(size=2, +# words_hidden_layer = pnl.TransferMechanism(input_shapes=2, # function=pnl.Logistic(gain=1.0, x_0=4.0), # integrator_mode=True, # noise=0.0, @@ -62,13 +62,13 @@ # # Response layer, provide input to accumulator, responses: ('red', 'green') # # time averaging = tau = 0.1 # # randomly distributed noise to the net input -# # response_layer = pnl.TransferMechanism(size=2, +# # response_layer = pnl.TransferMechanism(input_shapes=2, # # function=pnl.Logistic, # # name='RESPONSE', # # integrator_mode=True, # # noise=pnl.NormalDist(mean=0, standard_deviation=unit_noise).function, # # integration_rate=0.1) -# response_layer = pnl.TransferMechanism(size=2, +# response_layer = pnl.TransferMechanism(input_shapes=2, # function=pnl.Logistic, # name='RESPONSE', # integrator_mode=True, @@ -295,7 +295,7 @@ # dataframes = [] # first = True # for log_layer in mechanism_list: -# layer_size = log_layer.size[0] +# layer_size = log_layer.input_shapes[0] # log_dict = log_layer.log.nparray_dictionary() # # # Extract out all keys, treating value specially since it's already an np array diff --git a/Scripts/Debug/lca/pytorch_lca.py b/Scripts/Debug/lca/pytorch_lca.py index cb70c6639ad..56e4ebbdbd4 100644 --- a/Scripts/Debug/lca/pytorch_lca.py +++ b/Scripts/Debug/lca/pytorch_lca.py @@ -275,7 +275,7 @@ def make_pnl_lca( lca = pnl.LCAMechanism( default_variable=[[0.0 for _ in range(num_lca_dim)]], - size=num_lca_dim, + input_shapes=num_lca_dim, threshold=threshold, function=activation_function, leak=leak, diff --git a/Scripts/Debug/markus_test_umemoto.py b/Scripts/Debug/markus_test_umemoto.py index 973c7d1b3c6..03fd510c0cf 100644 --- a/Scripts/Debug/markus_test_umemoto.py +++ b/Scripts/Debug/markus_test_umemoto.py @@ -99,7 +99,8 @@ # Decision.loggable_items # Outcome Mechanisms: -Reward = pnl.TransferMechanism(size = 1, +Reward = pnl.TransferMechanism( + input_shapes= 1, name='Reward') # Processes: diff --git a/Scripts/Debug/predator_prey_opt/predator_prey_dmt.py b/Scripts/Debug/predator_prey_opt/predator_prey_dmt.py index 9d862001c2e..9977211a108 100644 --- a/Scripts/Debug/predator_prey_opt/predator_prey_dmt.py +++ b/Scripts/Debug/predator_prey_opt/predator_prey_dmt.py @@ -134,9 +134,11 @@ def get_new_episode_flag(): # ************************************** PROCESSING MECHANISMS ******************************************************** # Perceptual Mechanisms - self.player_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") - self.predator_percept = ProcessingMechanism(size=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") - self.prey_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PREY PERCEPT") + self.player_percept = ProcessingMechanism( + input_shapes=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") + self.predator_percept = ProcessingMechanism( + input_shapes=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") + self.prey_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PREY PERCEPT") # Mechanism used to encode trialtype from environment self.prey_pred_trial_input_mech = ProcessingMechanism(name="PREY PREDATOR TRIAL") diff --git a/Scripts/Debug/stability_flexibility/stability_flexibility.py b/Scripts/Debug/stability_flexibility/stability_flexibility.py index 38420d88380..4d70590d29d 100644 --- a/Scripts/Debug/stability_flexibility/stability_flexibility.py +++ b/Scripts/Debug/stability_flexibility/stability_flexibility.py @@ -118,7 +118,7 @@ def make_stab_flex( # Task Layer: [Color, Motion] {0, 1} Mutually Exclusive # Origin Node taskLayer = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Task Input [I1, I2]", @@ -127,7 +127,7 @@ def make_stab_flex( # Stimulus Layer: [Color Stimulus, Motion Stimulus] # Origin Node stimulusInfo = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Stimulus Input [S1, S2]", @@ -136,7 +136,7 @@ def make_stab_flex( # Cue-To-Stimulus Interval Layer # Origin Node cueInterval = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Cue-Stimulus Interval", @@ -145,7 +145,7 @@ def make_stab_flex( # Correct Response Info # Origin Node correctResponseInfo = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Correct Response Info", @@ -153,7 +153,7 @@ def make_stab_flex( # Control Module Layer: [Color Activation, Motion Activation] controlModule = pnl.LCAMechanism( - size=2, + input_shapes=2, function=pnl.Logistic(gain=GAIN), leak=LEAK, competition=COMP, @@ -174,7 +174,7 @@ def make_stab_flex( # Hadamard product of controlModule and Stimulus Information nonAutomaticComponent = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], @@ -183,7 +183,7 @@ def make_stab_flex( # Multiply Stimulus Input by the automaticity weight congruenceWeighting = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear(slope=AUTOMATICITY, intercept=0), output_ports=[pnl.RESULT], name="Automaticity-weighted Stimulus Input [w*S1, w*S2]", @@ -191,7 +191,7 @@ def make_stab_flex( # Summation of nonAutomatic and Automatic Components ddmCombination = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.SUM), output_ports=[pnl.RESULT], @@ -200,7 +200,7 @@ def make_stab_flex( # Ensure upper boundary of DDM is always correct response by multiplying DDM input by correctResponseInfo ddmRecodeDrift = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], @@ -209,7 +209,7 @@ def make_stab_flex( # Scale DDM inputs ddmInputScale = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=SCALE, intercept=0), output_ports=[pnl.RESULT], name="Scaled DDM Input", @@ -284,10 +284,10 @@ def make_stab_flex( # Hot-fix currently necessary to allow control module and DDM to execute in parallel in compiled mode # We need two gates in order to output both values (decision and response) from the ddm - decisionGate = pnl.ProcessingMechanism(size=1, name="DECISION_GATE") + decisionGate = pnl.ProcessingMechanism(input_shapes=1, name="DECISION_GATE") stabilityFlexibility.add_node(decisionGate) - responseGate = pnl.ProcessingMechanism(size=1, name="RESPONSE_GATE") + responseGate = pnl.ProcessingMechanism(input_shapes=1, name="RESPONSE_GATE") stabilityFlexibility.add_node(responseGate) stabilityFlexibility.add_projection( diff --git a/Scripts/Debug/stability_flexibility/stability_flexibility_nn.py b/Scripts/Debug/stability_flexibility/stability_flexibility_nn.py index c0ae1c70f64..f564f6b2bfb 100644 --- a/Scripts/Debug/stability_flexibility/stability_flexibility_nn.py +++ b/Scripts/Debug/stability_flexibility/stability_flexibility_nn.py @@ -105,24 +105,24 @@ def make_stab_flex( # Task Input: [Parity, Magnitude] {0, 1} Mutually Exclusive # Origin Node - taskInput = pnl.TransferMechanism(name="Task Input", size=2) # Note default function is linear + taskInput = pnl.TransferMechanism(name="Task Input", input_shapes=2) # Note default function is linear # Stimulus Input: [Odd, Even, Small, Large] {0, 1} # Origin Node - stimulusInput = pnl.TransferMechanism(name="Stimulus Input", size=4) + stimulusInput = pnl.TransferMechanism(name="Stimulus Input", input_shapes=4) # Cue-To-Stimulus Interval Input # Origin Node - cueInterval = pnl.TransferMechanism(name="Cue-Stimulus Interval", size=1) + cueInterval = pnl.TransferMechanism(name="Cue-Stimulus Interval", input_shapes=1) # Correct Response Info {1, -1} # Origin Node - correctResponseInfo = pnl.TransferMechanism(name="Correct Response Info", size=1) + correctResponseInfo = pnl.TransferMechanism(name="Correct Response Info", input_shapes=1) # Control Units: [Parity Activation, Magnitude Activation] controlModule = pnl.LCAMechanism( name="Task Activations [C1, C2]", - size=2, + input_shapes=2, function=pnl.Logistic(gain=GAIN), leak=LEAK, competition=COMP, @@ -143,14 +143,14 @@ def make_stab_flex( # Stimulus Input to Hidden Weighting stimulusWeighting = pnl.TransferMechanism( name="Stimulus Input to Hidden Weighting", - size=4, + input_shapes=4, function=pnl.Linear(slope=STIM_HIDDEN_WT, intercept=0), ) # Hidden Units [Odd, Even, Small, Large] hiddenLayer = pnl.TransferMechanism( name="Hidden Units", - size=4, + input_shapes=4, function=pnl.Logistic(gain=1, bias=-4), input_ports=pnl.InputPort(combine=pnl.SUM) ) @@ -158,14 +158,14 @@ def make_stab_flex( # Hidden to Response Weighting hiddenWeighting = pnl.TransferMechanism( name="Hidden Unit to Response Weighting", - size=4, + input_shapes=4, function=pnl.Linear(slope=HIDDEN_RESP_WT, intercept=0) ) # Response Units [Left, Right] responseLayer = pnl.TransferMechanism( name="Response Units", - size=2, + input_shapes=2, function=pnl.Logistic(gain=1), input_ports=pnl.InputPort(combine=pnl.SUM) ) @@ -173,14 +173,14 @@ def make_stab_flex( # Difference in activation of response units ddmCombination = pnl.TransferMechanism( name="Drift", - size=1, + input_shapes=1, input_ports=pnl.InputPort(combine=pnl.SUM) ) # Ensure upper boundary of DDM is always correct response by multiplying DDM input by correctResponseInfo ddmRecodeDrift = pnl.TransferMechanism( name="Recoded Drift = Drift * correctResponseInfo", - size=1, + input_shapes=1, input_ports=pnl.InputPort(combine=pnl.PRODUCT) ) @@ -270,10 +270,10 @@ def make_stab_flex( # Hot-fix currently necessary to allow control module and DDM to execute in parallel in compiled mode # We need two gates in order to output both values (decision and response) from the ddm - decisionGate = pnl.ProcessingMechanism(size=1, name="DECISION_GATE") + decisionGate = pnl.ProcessingMechanism(input_shapes=1, name="DECISION_GATE") stabilityFlexibility.add_node(decisionGate) - responseGate = pnl.ProcessingMechanism(size=1, name="RESPONSE_GATE") + responseGate = pnl.ProcessingMechanism(input_shapes=1, name="RESPONSE_GATE") stabilityFlexibility.add_node(responseGate) stabilityFlexibility.add_projection( diff --git a/Scripts/Debug/stability_flexibility_simple.py b/Scripts/Debug/stability_flexibility_simple.py index d8a6ed9d30b..ade1f99d491 100644 --- a/Scripts/Debug/stability_flexibility_simple.py +++ b/Scripts/Debug/stability_flexibility_simple.py @@ -71,7 +71,7 @@ def computeAccuracy(variable): # first element is color task attendance, second element is motion task attendance inputLayer = pnl.TransferMechanism( # default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Input') @@ -93,7 +93,7 @@ def computeAccuracy(variable): activation.set_log_conditions([pnl.RESULT, "mod_gain"]) stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Stimulus Info") @@ -101,7 +101,7 @@ def computeAccuracy(variable): stimulusInfo.set_log_conditions([pnl.RESULT]) controlledElement = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], @@ -109,7 +109,8 @@ def computeAccuracy(variable): controlledElement.set_log_conditions([pnl.RESULT]) -ddmCombination = pnl.TransferMechanism(size=1, +ddmCombination = pnl.TransferMechanism( + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="DDM Integrator") diff --git a/Scripts/Examples/Basics And Primer/Stroop Model - Basic.py b/Scripts/Examples/Basics And Primer/Stroop Model - Basic.py index 234bc6b36d9..948ce72b10e 100644 --- a/Scripts/Examples/Basics And Primer/Stroop Model - Basic.py +++ b/Scripts/Examples/Basics And Primer/Stroop Model - Basic.py @@ -2,22 +2,22 @@ import numpy as np # Construct the color naming pathway: -color_input = ProcessingMechanism(name='COLOR INPUT', size=2) # Note: default function is Linear +color_input = ProcessingMechanism(name='COLOR INPUT', input_shapes=2) # Note: default function is Linear color_input_to_hidden_wts = np.array([[1, -1], [-1, 1]]) -color_hidden = ProcessingMechanism(name='COLOR HIDDEN', size=2, function=Logistic(bias=-4)) +color_hidden = ProcessingMechanism(name='COLOR HIDDEN', input_shapes=2, function=Logistic(bias=-4)) color_hidden_to_output_wts = np.array([[1, -1], [-1, 1]]) -output = ProcessingMechanism(name='OUTPUT', size=2, function=Logistic) +output = ProcessingMechanism(name='OUTPUT', input_shapes=2, function=Logistic) color_pathway = [color_input, color_input_to_hidden_wts, color_hidden, color_hidden_to_output_wts, output] # Construct the word reading pathway (using the same output_layer) -word_input = ProcessingMechanism(name='WORD INPUT', size=2) +word_input = ProcessingMechanism(name='WORD INPUT', input_shapes=2) word_input_to_hidden_wts = np.array([[2, -2], [-2, 2]]) -word_hidden = ProcessingMechanism(name='WORD HIDDEN', size=2, function=Logistic(bias=-4)) +word_hidden = ProcessingMechanism(name='WORD HIDDEN', input_shapes=2, function=Logistic(bias=-4)) word_hidden_to_output_wts = np.array([[2, -2], [-2, 2]]) word_pathway = [word_input, word_input_to_hidden_wts, word_hidden, word_hidden_to_output_wts, output] # Construct the task specification pathways -task_input = ProcessingMechanism(name='TASK INPUT', size=2) +task_input = ProcessingMechanism(name='TASK INPUT', input_shapes=2) task_color_wts = np.array([[4,4],[0,0]]) task_word_wts = np.array([[0,0],[4,4]]) task_color_pathway = [task_input, task_color_wts, color_hidden] diff --git a/Scripts/Examples/Basics And Primer/Stroop Model - Conflict Monitoring.py b/Scripts/Examples/Basics And Primer/Stroop Model - Conflict Monitoring.py index 7b24cee5b19..f4e9656118b 100644 --- a/Scripts/Examples/Basics And Primer/Stroop Model - Conflict Monitoring.py +++ b/Scripts/Examples/Basics And Primer/Stroop Model - Conflict Monitoring.py @@ -4,23 +4,23 @@ # CONSTRUCT THE MODEL *********************************** # Construct the color naming pathway: -color_input = ProcessingMechanism(name='COLOR INPUT', size=2) # Note: default function is Linear +color_input = ProcessingMechanism(name='COLOR INPUT', input_shapes=2) # Note: default function is Linear color_input_to_hidden_wts = np.array([[2, -2], [-2, 2]]) -color_hidden = ProcessingMechanism(name='COLOR HIDDEN', size=2, function=Logistic(bias=-4)) +color_hidden = ProcessingMechanism(name='COLOR HIDDEN', input_shapes=2, function=Logistic(bias=-4)) color_hidden_to_output_wts = np.array([[2, -2], [-2, 2]]) -output = ProcessingMechanism(name='OUTPUT', size=2, function=Logistic) +output = ProcessingMechanism(name='OUTPUT', input_shapes=2, function=Logistic) color_pathway = [color_input, color_input_to_hidden_wts, color_hidden, color_hidden_to_output_wts, output] # Construct the word reading pathway (using the same output_layer) -word_input = ProcessingMechanism(name='WORD INPUT', size=2) +word_input = ProcessingMechanism(name='WORD INPUT', input_shapes=2) word_input_to_hidden_wts = np.array([[3, -3], [-3, 3]]) -word_hidden = ProcessingMechanism(name='WORD HIDDEN', size=2, function=Logistic(bias=-4)) +word_hidden = ProcessingMechanism(name='WORD HIDDEN', input_shapes=2, function=Logistic(bias=-4)) word_hidden_to_output_wts = np.array([[3, -3], [-3, 3]]) word_pathway = [word_input, word_input_to_hidden_wts, word_hidden, word_hidden_to_output_wts, output] # Construct the task specification pathways -task_input = ProcessingMechanism(name='TASK INPUT', size=2) -task = LCAMechanism(name='TASK', size=2, initial_value=[0.5,0.5]) +task_input = ProcessingMechanism(name='TASK INPUT', input_shapes=2) +task = LCAMechanism(name='TASK', input_shapes=2, initial_value=[0.5, 0.5]) task_color_wts = np.array([[4,4],[0,0]]) task_word_wts = np.array([[0,0],[4,4]]) task_color_pathway = [task_input, task, task_color_wts, color_hidden] @@ -33,7 +33,8 @@ # Construct control mechanism control = ControlMechanism(name='CONTROL', objective_mechanism=ObjectiveMechanism(name='Conflict Monitor', - function=Energy(size=2, + function=Energy( + input_shapes=2, matrix=[[0,-2.5],[-2.5,0]]), monitor=output), default_allocation=[0.5], diff --git a/Scripts/Examples/Basics And Primer/XOR Model b/Scripts/Examples/Basics And Primer/XOR Model index f67a654a839..b85e9863bf9 100644 --- a/Scripts/Examples/Basics And Primer/XOR Model +++ b/Scripts/Examples/Basics And Primer/XOR Model @@ -1,9 +1,9 @@ from psyneulink import * import numpy as np -input_mech = TransferMechanism(name='INPUT', size=2) -hidden_mech = TransferMechanism(name='HIDDEN', size=10, function=Logistic) -output_mech = TransferMechanism(name='OUTPUT', size=1, function=Logistic) +input_mech = TransferMechanism(name='INPUT', input_shapes=2) +hidden_mech = TransferMechanism(name='HIDDEN', input_shapes=10, function=Logistic) +output_mech = TransferMechanism(name='OUTPUT', input_shapes=1, function=Logistic) input_to_hidden_projection = MappingProjection(name='INPUT_TO_HIDDEN', matrix=np.random.rand(2,10), sender=input_mech, diff --git a/Scripts/Examples/Botvinick Model Composition.py b/Scripts/Examples/Botvinick Model Composition.py index 0c0c4540f4b..b188e00cee0 100644 --- a/Scripts/Examples/Botvinick Model Composition.py +++ b/Scripts/Examples/Botvinick Model Composition.py @@ -2,20 +2,24 @@ import numpy as np -colors_input_layer = pnl.TransferMechanism(size=3, +colors_input_layer = pnl.TransferMechanism( + input_shapes=3, function=pnl.Linear, name='COLORS_INPUT') -words_input_layer = pnl.TransferMechanism(size=3, +words_input_layer = pnl.TransferMechanism( + input_shapes=3, function=pnl.Linear, name='WORDS_INPUT') -task_input_layer = pnl.TransferMechanism(size=2, +task_input_layer = pnl.TransferMechanism( + input_shapes=2, function=pnl.Linear, name='TASK_INPUT') # Task layer, tasks: ('name the color', 'read the word') -task_layer = pnl.RecurrentTransferMechanism(size=2, +task_layer = pnl.RecurrentTransferMechanism( + input_shapes=2, function=pnl.Logistic(), hetero=-2, integrator_mode=True, @@ -24,14 +28,16 @@ # Hidden layer # colors: ('red','green', 'neutral') words: ('RED','GREEN', 'NEUTRAL') -colors_hidden_layer = pnl.RecurrentTransferMechanism(size=3, +colors_hidden_layer = pnl.RecurrentTransferMechanism( + input_shapes=3, function=pnl.Logistic(x_0=4.0), # bias 4.0 is -4.0 in the paper see Docs for description integrator_mode=True, hetero=-2, integration_rate=0.01, # cohen-huston text says 0.01 name='COLORS_HIDDEN') -words_hidden_layer = pnl.RecurrentTransferMechanism(size=3, +words_hidden_layer = pnl.RecurrentTransferMechanism( + input_shapes=3, function=pnl.Logistic(x_0=4.0), integrator_mode=True, hetero=-2, @@ -39,7 +45,8 @@ name='WORDS_HIDDEN') # Response layer, responses: ('red', 'green') -response_layer = pnl.RecurrentTransferMechanism(size=2, +response_layer = pnl.RecurrentTransferMechanism( + input_shapes=2, function=pnl.Logistic(), hetero=-2.0, integrator_mode=True, diff --git a/Scripts/Examples/Gating-Mechanism. with UDF.py b/Scripts/Examples/Gating-Mechanism. with UDF.py index 91d51d3fc83..a7c22e3a94f 100644 --- a/Scripts/Examples/Gating-Mechanism. with UDF.py +++ b/Scripts/Examples/Gating-Mechanism. with UDF.py @@ -69,7 +69,7 @@ def my_sinusoidal_fct(input, Gating_Mechanism = pnl.GatingMechanism( # default_gating_allocation=0.0, - size=[1], + input_shapes=[1], gating_signals=[ # Output_Layer Output_Layer.output_port, diff --git a/Scripts/Examples/Gilbert_Shallice_Composition_Model.py b/Scripts/Examples/Gilbert_Shallice_Composition_Model.py index 285e758b7f4..babe1d1db90 100644 --- a/Scripts/Examples/Gilbert_Shallice_Composition_Model.py +++ b/Scripts/Examples/Gilbert_Shallice_Composition_Model.py @@ -5,15 +5,18 @@ ### LAYERS -WORD_INPUT_LAYER = pnl.TransferMechanism(size = 3, +WORD_INPUT_LAYER = pnl.TransferMechanism( + input_shapes= 3, function=pnl.Linear, name='WORD INPUT LAYER') -COLOR_INPUT_LAYER = pnl.TransferMechanism(size = 3, +COLOR_INPUT_LAYER = pnl.TransferMechanism( + input_shapes= 3, function=pnl.Linear, name='COLOR INPUT LAYER') -WORD_OUTPUT_LAYER = pnl.IntegratorMechanism(size = 3, +WORD_OUTPUT_LAYER = pnl.IntegratorMechanism( + input_shapes= 3, # auto= 0.0, # hetero= -2.0, function= pnl.InteractiveActivationIntegrator(decay= 0.0015, rest=-6), @@ -21,7 +24,8 @@ WORD_OUTPUT_LAYER.set_log_conditions('value') -COLOR_OUTPUT_LAYER = pnl.IntegratorMechanism(size = 3, +COLOR_OUTPUT_LAYER = pnl.IntegratorMechanism( + input_shapes= 3, # auto= 0.0, # hetero= -2.0, function= pnl.InteractiveActivationIntegrator(decay= 0.0015, rest=-6, ), @@ -31,18 +35,21 @@ COLOR_OUTPUT_LAYER.set_log_conditions('value') -TASK_DEMAND_LAYER = pnl.IntegratorMechanism(size = 2, +TASK_DEMAND_LAYER = pnl.IntegratorMechanism( + input_shapes= 2, # auto= 0.0, # hetero= -2.0, function= pnl.InteractiveActivationIntegrator(decay= 0.0015, max_val=1, min_val= 1, rest= -4), name='TASK DEMAND LAYER') -WORD_RECURRENT_LAYER = pnl.TransferMechanism(size = 3, +WORD_RECURRENT_LAYER = pnl.TransferMechanism( + input_shapes= 3, function=pnl.Linear, name = 'WORD RECURRENT LAYER') -COLOR_RECURRENT_LAYER = pnl.TransferMechanism(size = 3, +COLOR_RECURRENT_LAYER = pnl.TransferMechanism( + input_shapes= 3, function=pnl.Linear, name = 'COLOR RECURRENT LAYER') diff --git a/Scripts/Examples/Lena Rumelhart script.py b/Scripts/Examples/Lena Rumelhart script.py index e7b78640f86..6d837743148 100644 --- a/Scripts/Examples/Lena Rumelhart script.py +++ b/Scripts/Examples/Lena Rumelhart script.py @@ -79,32 +79,32 @@ def gen_input_vals(nouns, relations): ) h1 = pnl.TransferMechanism(name="hidden_nouns", - size=9, + input_shapes=9, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic() ) h2 = pnl.TransferMechanism(name="hidden_mixed", - size=16, + input_shapes=16, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic() ) out_sig_I = pnl.TransferMechanism(name="sig_outs_I", - size=len(nouns), + input_shapes=len(nouns), function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic() ) out_sig_is = pnl.TransferMechanism(name="sig_outs_is", - size=len(is_list), + input_shapes=len(is_list), function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic() ) out_sig_has = pnl.TransferMechanism(name="sig_outs_has", - size=len(has_list), + input_shapes=len(has_list), function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic() ) out_sig_can = pnl.TransferMechanism(name="sig_outs_can", - size=len(can_list), + input_shapes=len(can_list), function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic() ) diff --git a/Scripts/Examples/RL-DDM.py b/Scripts/Examples/RL-DDM.py index 254dfecbe29..2e11215c490 100644 --- a/Scripts/Examples/RL-DDM.py +++ b/Scripts/Examples/RL-DDM.py @@ -11,7 +11,7 @@ import psyneulink.core.components.functions.nonstateful.learningfunctions input_layer = pnl.TransferMechanism( - size=2, + input_shapes=2, name='Input Layer' ) diff --git a/Scripts/Examples/Rumelhart Semantic Network.py b/Scripts/Examples/Rumelhart Semantic Network.py index 2fcf81b1c8e..7eab228f8bc 100644 --- a/Scripts/Examples/Rumelhart Semantic Network.py +++ b/Scripts/Examples/Rumelhart Semantic Network.py @@ -19,14 +19,14 @@ # Representation_Input (REP_IN) # Construct Mechanisms -rep_in = TransferMechanism(size=10, name='REP_IN') -rel_in = TransferMechanism(size=11, name='REL_IN') -rep_hidden = TransferMechanism(size=4, function=Logistic, name='REP_HIDDEN') -rel_hidden = TransferMechanism(size=5, function=Logistic, name='REL_HIDDEN') -rep_out = TransferMechanism(size=10, function=Logistic, name='REP_OUT') -prop_out = TransferMechanism(size=12, function=Logistic, name='PROP_OUT') -qual_out = TransferMechanism(size=13, function=Logistic, name='QUAL_OUT') -act_out = TransferMechanism(size=14, function=Logistic, name='ACT_OUT') +rep_in = TransferMechanism(input_shapes=10, name='REP_IN') +rel_in = TransferMechanism(input_shapes=11, name='REL_IN') +rep_hidden = TransferMechanism(input_shapes=4, function=Logistic, name='REP_HIDDEN') +rel_hidden = TransferMechanism(input_shapes=5, function=Logistic, name='REL_HIDDEN') +rep_out = TransferMechanism(input_shapes=10, function=Logistic, name='REP_OUT') +prop_out = TransferMechanism(input_shapes=12, function=Logistic, name='PROP_OUT') +qual_out = TransferMechanism(input_shapes=13, function=Logistic, name='QUAL_OUT') +act_out = TransferMechanism(input_shapes=14, function=Logistic, name='ACT_OUT') # Construct Composition comp = Composition(name='Rumelhart Semantic Network') diff --git a/Scripts/Examples/StabilityFlexibility.py b/Scripts/Examples/StabilityFlexibility.py index 8de929a983f..49a9ed63d50 100644 --- a/Scripts/Examples/StabilityFlexibility.py +++ b/Scripts/Examples/StabilityFlexibility.py @@ -78,7 +78,7 @@ def computeAccuracy(variable): # first element is color task attendance, second element is motion task attendance inputLayer = pnl.TransferMechanism(#default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports = [pnl.RESULT], name='Input') @@ -101,7 +101,7 @@ def computeAccuracy(variable): stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size = 2, + input_shapes= 2, function = pnl.Linear(slope=1, intercept=0), output_ports = [pnl.RESULT], name = "Stimulus Info") @@ -109,7 +109,7 @@ def computeAccuracy(variable): stimulusInfo.set_log_conditions([pnl.RESULT]) controlledElement = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size = 2, + input_shapes= 2, function=pnl.Linear(slope=1, intercept= 0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports = [pnl.RESULT], @@ -117,7 +117,8 @@ def computeAccuracy(variable): controlledElement.set_log_conditions([pnl.RESULT]) -ddmCombination = pnl.TransferMechanism(size = 1, +ddmCombination = pnl.TransferMechanism( + input_shapes= 1, function = pnl.Linear(slope=1, intercept=0), output_ports = [pnl.RESULT], name = "DDM Integrator") diff --git a/Scripts/Examples/Stroop Model.py b/Scripts/Examples/Stroop Model.py index 982fb67cec1..720b38efc0c 100644 --- a/Scripts/Examples/Stroop Model.py +++ b/Scripts/Examples/Stroop Model.py @@ -4,23 +4,23 @@ # CONSTRUCT THE MODEL *********************************** # Construct the color naming pathway: -color_input = ProcessingMechanism(name='COLOR INPUT', size=2) # Note: default function is Linear +color_input = ProcessingMechanism(name='COLOR INPUT', input_shapes=2) # Note: default function is Linear color_input_to_hidden_wts = np.array([[2, -2], [-2, 2]]) -color_hidden = ProcessingMechanism(name='COLOR HIDDEN', size=2, function=Logistic(bias=-4)) +color_hidden = ProcessingMechanism(name='COLOR HIDDEN', input_shapes=2, function=Logistic(bias=-4)) color_hidden_to_output_wts = np.array([[2, -2], [-2, 2]]) -output = ProcessingMechanism(name='OUTPUT', size=2, function=Logistic) +output = ProcessingMechanism(name='OUTPUT', input_shapes=2, function=Logistic) color_pathway = [color_input, color_input_to_hidden_wts, color_hidden, color_hidden_to_output_wts, output] # Construct the word reading pathway (using the same output_layer) -word_input = ProcessingMechanism(name='WORD INPUT', size=2) +word_input = ProcessingMechanism(name='WORD INPUT', input_shapes=2) word_input_to_hidden_wts = np.array([[3, -3], [-3, 3]]) -word_hidden = ProcessingMechanism(name='WORD HIDDEN', size=2, function=Logistic(bias=-4)) +word_hidden = ProcessingMechanism(name='WORD HIDDEN', input_shapes=2, function=Logistic(bias=-4)) word_hidden_to_output_wts = np.array([[3, -3], [-3, 3]]) word_pathway = [word_input, word_input_to_hidden_wts, word_hidden, word_hidden_to_output_wts, output] # Construct the task specification pathways -task_input = ProcessingMechanism(name='TASK INPUT', size=2) -task = LCAMechanism(name='TASK', size=2, initial_value=[0.5,0.5]) +task_input = ProcessingMechanism(name='TASK INPUT', input_shapes=2) +task = LCAMechanism(name='TASK', input_shapes=2, initial_value=[0.5, 0.5]) task_color_wts = np.array([[4,4],[0,0]]) task_word_wts = np.array([[0,0],[4,4]]) task_color_pathway = [task_input, task, task_color_wts, color_hidden] diff --git a/Scripts/Examples/Tutorial/Rumelhart Semantic Network (autodiff).py b/Scripts/Examples/Tutorial/Rumelhart Semantic Network (autodiff).py index 653601b6918..291b97f2ad1 100644 --- a/Scripts/Examples/Tutorial/Rumelhart Semantic Network (autodiff).py +++ b/Scripts/Examples/Tutorial/Rumelhart Semantic Network (autodiff).py @@ -96,32 +96,32 @@ def gen_input_vals(nouns, relations): #For the hidden layers, we will be using logistic functions hn = pnl.TransferMechanism(name="hidden_nouns", - size=9, + input_shapes=9, function=pnl.Logistic() ) hm = pnl.TransferMechanism(name="hidden_mixed", - size=n_units, + input_shapes=n_units, function=pnl.Logistic() ) out_sig_I = pnl.TransferMechanism(name="sig_outs_I", - size=len(nouns), + input_shapes=len(nouns), function=pnl.Logistic() ) out_sig_is = pnl.TransferMechanism(name="sig_outs_is", - size=len(is_list), + input_shapes=len(is_list), function=pnl.Logistic() ) out_sig_has = pnl.TransferMechanism(name="sig_outs_has", - size=len(has_list), + input_shapes=len(has_list), function=pnl.Logistic() ) out_sig_can = pnl.TransferMechanism(name="sig_outs_can", - size=len(can_list), + input_shapes=len(can_list), function=pnl.Logistic() ) diff --git a/Scripts/Examples/Tutorial/Stroop Model - EVC.py b/Scripts/Examples/Tutorial/Stroop Model - EVC.py index e4ab6f08344..de6b28b659b 100644 --- a/Scripts/Examples/Tutorial/Stroop Model - EVC.py +++ b/Scripts/Examples/Tutorial/Stroop Model - EVC.py @@ -4,23 +4,23 @@ # CONSTRUCT THE MODEL *********************************** # Construct the color naming pathway: -color_input = ProcessingMechanism(name='COLOR INPUT', size=2) # Note: default function is Linear +color_input = ProcessingMechanism(name='COLOR INPUT', input_shapes=2) # Note: default function is Linear color_input_to_hidden_wts = np.array([[2, -2], [-2, 2]]) -color_hidden = ProcessingMechanism(name='COLOR HIDDEN', size=2, function=Logistic(bias=-4)) +color_hidden = ProcessingMechanism(name='COLOR HIDDEN', input_shapes=2, function=Logistic(bias=-4)) color_hidden_to_output_wts = np.array([[2, -2], [-2, 2]]) -output = ProcessingMechanism(name='OUTPUT', size=2, function=Logistic) +output = ProcessingMechanism(name='OUTPUT', input_shapes=2, function=Logistic) color_pathway = [color_input, color_input_to_hidden_wts, color_hidden, color_hidden_to_output_wts, output] # Construct the word reading pathway (using the same output_layer) -word_input = ProcessingMechanism(name='WORD INPUT', size=2) +word_input = ProcessingMechanism(name='WORD INPUT', input_shapes=2) word_input_to_hidden_wts = np.array([[3, -3], [-3, 3]]) -word_hidden = ProcessingMechanism(name='WORD HIDDEN', size=2, function=Logistic(bias=-4)) +word_hidden = ProcessingMechanism(name='WORD HIDDEN', input_shapes=2, function=Logistic(bias=-4)) word_hidden_to_output_wts = np.array([[3, -3], [-3, 3]]) word_pathway = [word_input, word_input_to_hidden_wts, word_hidden, word_hidden_to_output_wts, output] # Construct the task specification pathways -task_input = ProcessingMechanism(name='TASK INPUT', size=2) -task = LCAMechanism(name='TASK', size=2, initial_value=[0.5,0.5]) +task_input = ProcessingMechanism(name='TASK INPUT', input_shapes=2) +task = LCAMechanism(name='TASK', input_shapes=2, initial_value=[0.5, 0.5]) task_color_wts = np.array([[4,4],[0,0]]) task_word_wts = np.array([[0,0],[4,4]]) task_color_pathway = [task_input, task, task_color_wts, color_hidden] diff --git a/Scripts/Examples/_Gating-Mechanism.py b/Scripts/Examples/_Gating-Mechanism.py index 3391fb3a5c7..ff6ca5e6c72 100644 --- a/Scripts/Examples/_Gating-Mechanism.py +++ b/Scripts/Examples/_Gating-Mechanism.py @@ -29,7 +29,7 @@ Gating_Mechanism = pnl.GatingMechanism( # default_gating_allocation=0.0, - size=[1], + input_shapes=[1], gating_signals=[ Hidden_Layer_1, Hidden_Layer_2, diff --git a/Scripts/Examples/_Leabra-Demo.py b/Scripts/Examples/_Leabra-Demo.py index 1db82c5bd4f..0bef2cef590 100644 --- a/Scripts/Examples/_Leabra-Demo.py +++ b/Scripts/Examples/_Leabra-Demo.py @@ -62,8 +62,8 @@ ) -T1 = pnl.TransferMechanism(name='T1', size=input_size, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear) -T2 = pnl.TransferMechanism(name='T2', size=output_size, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear) +T1 = pnl.TransferMechanism(name='T1', input_shapes=input_size, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear) +T2 = pnl.TransferMechanism(name='T2', input_shapes=output_size, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear) proj = pnl.MappingProjection(sender=T2, receiver=L.input_ports[1]) comp = pnl.Composition(pathways=[[T1, L], [T2, proj, L]]) diff --git a/Scripts/Examples/_Leabra-Learning-Demo.py b/Scripts/Examples/_Leabra-Learning-Demo.py index 7a8fce2b26c..17ad59d0d2a 100644 --- a/Scripts/Examples/_Leabra-Learning-Demo.py +++ b/Scripts/Examples/_Leabra-Learning-Demo.py @@ -26,8 +26,8 @@ hidden_sizes=None, training_flag=True, quarter_size=20) ### building the PsyNeuLink network -T_input = pnl.TransferMechanism(size=n_input) -T_target = pnl.TransferMechanism(size=n_output) +T_input = pnl.TransferMechanism(input_shapes=n_input) +T_target = pnl.TransferMechanism(input_shapes=n_output) # target_projection connects T_target to the TARGET InputPort of Leab target_projection = pnl.MappingProjection(sender=T_target, receiver = Leab.input_ports[1]) comp = pnl.Composition(pathways=[[T_input, Leab], [T_target, target_projection, Leab]]) diff --git a/Scripts/Examples/_Reinforcement-Learning REV.py b/Scripts/Examples/_Reinforcement-Learning REV.py index f351c63e716..a00f6652145 100644 --- a/Scripts/Examples/_Reinforcement-Learning REV.py +++ b/Scripts/Examples/_Reinforcement-Learning REV.py @@ -4,12 +4,12 @@ import psyneulink.core.components.functions.nonstateful.transferfunctions input_layer = pnl.TransferMechanism( - size=3, + input_shapes=3, name='Input Layer' ) action_selection = pnl.TransferMechanism( - size=3, + input_shapes=3, function=psyneulink.core.components.functions.nonstateful.transferfunctions.SoftMax( output=pnl.ALL, gain=1.0), diff --git a/Scripts/Models (Under Development)/Adaptive Replay Model.py b/Scripts/Models (Under Development)/Adaptive Replay Model.py index 069dbb00d75..e1d6d1a9a27 100644 --- a/Scripts/Models (Under Development)/Adaptive Replay Model.py +++ b/Scripts/Models (Under Development)/Adaptive Replay Model.py @@ -14,35 +14,35 @@ # PERCEPTUAL AND ACTION MECHANISMS # ********************************************************************************************* stim_in = ProcessingMechanism(name='Stimulus', - size=stim_size) + input_shapes=stim_size) context_in = ProcessingMechanism(name='Context', - size=context_size) + input_shapes=context_size) reward_in = ProcessingMechanism(name='Reward', - size=1) + input_shapes=1) perceptual_state = ProcessingMechanism(name='Current Port', function=Concatenate, input_ports=[{NAME:'STIM', - SIZE:stim_size, - PROJECTIONS:stim_in}, + INPUT_SHAPES:stim_size, + PROJECTIONS:stim_in}, {NAME:'CONTEXT', - SIZE:context_size, + INPUT_SHAPES:context_size, PROJECTIONS:context_in}]) # action = ProcessingMechanism(name='Action', -# size=num_actions, +# input_shapes=num_actions, # input_ports={NAME: 'Q values', # PROJECTIONS:perceptual_state}) action = ProcessingMechanism(name='Action', - size=num_actions) + input_shapes=num_actions) # ********************************************************************************************* # RL AGENT NESTED COMPOSITION # ********************************************************************************************* -rl_agent_state = ProcessingMechanism(name='RL Agent Port', size=5) -rl_agent_action = ProcessingMechanism(name='RL Agent Action', size=5) +rl_agent_state = ProcessingMechanism(name='RL Agent Port', input_shapes=5) +rl_agent_action = ProcessingMechanism(name='RL Agent Action', input_shapes=5) rl_agent = Composition(name='RL Agent') rl_learning_components = rl_agent.add_reinforcement_learning_pathway([rl_agent_state, rl_agent_action]) # rl_agent.add_required_node_role(rl_agent_action, NodeRole.OUTPUT) @@ -52,7 +52,7 @@ # MEMORY AND CONTROL MECHANISMS # ********************************************************************************************* # q_rep = ProcessingMechanism(name='Q rep', -# size=num_actions*stim_size, +# input_shapes=num_actions*stim_size, # function=SoftMax(output=PROB, gain=1.0)) # # em = EpisodicMemoryMechanism(name='Episodic Memory', diff --git a/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model.py b/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model.py index db888ec5ebf..32e325600c8 100644 --- a/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model.py +++ b/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model.py @@ -49,13 +49,13 @@ def objective_function(v): # return np.sum(v[0] * v[1]) -color_stim = pnl.TransferMechanism(name='Color Stimulus', size=8) -word_stim = pnl.TransferMechanism(name='Word Stimulus', size=8) +color_stim = pnl.TransferMechanism(name='Color Stimulus', input_shapes=8) +word_stim = pnl.TransferMechanism(name='Word Stimulus', input_shapes=8) color_task = pnl.TransferMechanism(name='Color Task') word_task = pnl.ProcessingMechanism(name='Word Task', function=w_fct_UDF) -reward = pnl.TransferMechanism(name='Reward', size=2) +reward = pnl.TransferMechanism(name='Reward', input_shapes=2) task_decision = pnl.DDM( name='Task Decision', diff --git a/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py b/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py index 4bc078ea5f5..f247f16a261 100644 --- a/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py +++ b/Scripts/Models (Under Development)/Bustamante_Stroop_XOR_LVOC_Model_VZ.py @@ -71,13 +71,13 @@ def adj_cost_fct(v): from math import e return e**(.25 * np.abs(v) - 1) -color_stim = pnl.TransferMechanism(name='Color Stimulus', size=8) -word_stim = pnl.TransferMechanism(name='Word Stimulus', size=8) +color_stim = pnl.TransferMechanism(name='Color Stimulus', input_shapes=8) +word_stim = pnl.TransferMechanism(name='Word Stimulus', input_shapes=8) color_task = pnl.TransferMechanism(name='Color Task') word_task = pnl.ProcessingMechanism(name='Word Task', function=w_fct_UDF) -reward = pnl.TransferMechanism(name='Reward', size=2) +reward = pnl.TransferMechanism(name='Reward', input_shapes=2) task_decision = pnl.DDM( name='Task Decision', diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with RNN.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with RNN.py index 8a45bb6ab14..c0ed1f5e408 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with RNN.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with RNN.py @@ -276,14 +276,14 @@ def construct_model(model_name:str=MODEL_NAME, # ------------------------------------------------- Nodes ------------------------------------------------------ # ---------------------------------------------------------------------------------------------------------------- - state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) - previous_state_layer = ProcessingMechanism(name=previous_state_input_name, size=state_size) + state_input_layer = ProcessingMechanism(name=state_input_name, input_shapes=state_size) + previous_state_layer = ProcessingMechanism(name=previous_state_input_name, input_shapes=state_size) integrator_layer = RecurrentTransferMechanism(name=integrator_name, function=Tanh, - size=integrator_size, + input_shapes=integrator_size, auto=1-integration_rate, hetero=0.0) - context_layer = ProcessingMechanism(name=context_name, size=context_size) + context_layer = ProcessingMechanism(name=context_name, input_shapes=context_size) em = EMComposition(name=em_name, memory_template=[[0] * state_size, # state @@ -305,7 +305,7 @@ def construct_model(model_name:str=MODEL_NAME, ) prediction_layer = ProcessingMechanism(name=prediction_layer_name, - size=state_size) + input_shapes=state_size) # ---------------------------------------------------------------------------------------------------------------- diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py index 432bacf4c3e..c6408d3cc6a 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py @@ -232,11 +232,11 @@ def construct_model(model_name:str=model_params['name'], # ------------------------------------------------- Nodes ------------------------------------------------------ # ---------------------------------------------------------------------------------------------------------------- - state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) - previous_state_layer = ProcessingMechanism(name=previous_state_input_name, size=state_size) - # context_layer = ProcessingMechanism(name=context_name, size=context_size) + state_input_layer = ProcessingMechanism(name=state_input_name, input_shapes=state_size) + previous_state_layer = ProcessingMechanism(name=previous_state_input_name, input_shapes=state_size) + # context_layer = ProcessingMechanism(name=context_name, input_shapes=context_size) context_layer = TransferMechanism(name=context_name, - size=context_size, + input_shapes=context_size, function=Tanh, integrator_mode=True, integration_rate=integration_rate) @@ -267,7 +267,7 @@ def construct_model(model_name:str=model_params['name'], device=device ) - prediction_layer = ProcessingMechanism(name=prediction_layer_name, size=state_size) + prediction_layer = ProcessingMechanism(name=prediction_layer_name, input_shapes=state_size) # ---------------------------------------------------------------------------------------------------------------- diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - Revaluation.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - Revaluation.py index c9e827cf197..deced4903f2 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - Revaluation.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - Revaluation.py @@ -468,15 +468,15 @@ def construct_model(model_name:str=MODEL_NAME, # ------------------------------------------------- Nodes ------------------------------------------------------ # ---------------------------------------------------------------------------------------------------------------- - task_input_layer = ProcessingMechanism(name=task_input_name, size=task_size) - state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) - time_input_layer = ProcessingMechanism(name=time_input_name, size=time_size) - reward_input_layer = ProcessingMechanism(name=reward_input_name, size=reward_size) - attend_external_layer = ProcessingMechanism(name=attend_external_layer_name, size=state_size) - attend_memory_layer = ProcessingMechanism(name=attend_memory_layer_name, size=state_size) - retrieved_reward_layer = TransferMechanism(name=retrieved_reward_name, size=reward_size) + task_input_layer = ProcessingMechanism(name=task_input_name, input_shapes=task_size) + state_input_layer = ProcessingMechanism(name=state_input_name, input_shapes=state_size) + time_input_layer = ProcessingMechanism(name=time_input_name, input_shapes=time_size) + reward_input_layer = ProcessingMechanism(name=reward_input_name, input_shapes=reward_size) + attend_external_layer = ProcessingMechanism(name=attend_external_layer_name, input_shapes=state_size) + attend_memory_layer = ProcessingMechanism(name=attend_memory_layer_name, input_shapes=state_size) + retrieved_reward_layer = TransferMechanism(name=retrieved_reward_name, input_shapes=reward_size) context_layer = RecurrentTransferMechanism(name=context_name, - size=state_size, + input_shapes=state_size, auto=1-context_integration_rate, hetero=0.0) em = EMComposition(name=em_name, diff --git a/Scripts/Models (Under Development)/EGO/Using EpisodicMemoryMechanism/EGO Model - MDP.py b/Scripts/Models (Under Development)/EGO/Using EpisodicMemoryMechanism/EGO Model - MDP.py index e6c7b4bd368..cd1507b3a26 100644 --- a/Scripts/Models (Under Development)/EGO/Using EpisodicMemoryMechanism/EGO Model - MDP.py +++ b/Scripts/Models (Under Development)/EGO/Using EpisodicMemoryMechanism/EGO Model - MDP.py @@ -485,15 +485,15 @@ def construct_model(model_name:str=MODEL_NAME, # ------------------------------------------------- Mechanisms ------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------- - task_input_layer = ProcessingMechanism(name=task_input_name, size=task_size) - state_input_layer = ProcessingMechanism(name=state_input_name, size=state_size) - time_input_layer = ProcessingMechanism(name=time_input_name, size=time_size) - reward_input_layer = ProcessingMechanism(name=reward_input_name, size=reward_size) - attend_external_layer = ProcessingMechanism(name=attend_external_layer_name, size=state_size) - attend_memory_layer = ProcessingMechanism(name=attend_memory_layer_name, size=state_size) - retrieved_reward_layer = TransferMechanism(name=retrieved_reward_name, size=reward_size) + task_input_layer = ProcessingMechanism(name=task_input_name, input_shapes=task_size) + state_input_layer = ProcessingMechanism(name=state_input_name, input_shapes=state_size) + time_input_layer = ProcessingMechanism(name=time_input_name, input_shapes=time_size) + reward_input_layer = ProcessingMechanism(name=reward_input_name, input_shapes=reward_size) + attend_external_layer = ProcessingMechanism(name=attend_external_layer_name, input_shapes=state_size) + attend_memory_layer = ProcessingMechanism(name=attend_memory_layer_name, input_shapes=state_size) + retrieved_reward_layer = TransferMechanism(name=retrieved_reward_name, input_shapes=reward_size) context_layer = RecurrentTransferMechanism(name=context_name, - size=state_size, + input_shapes=state_size, auto=1-context_integration_rate, hetero=0.0) em = EpisodicMemoryMechanism(name=em_name, @@ -501,10 +501,10 @@ def construct_model(model_name:str=MODEL_NAME, [0] * time_size, # time [0] * state_size, # context [0] * reward_size], # reward - input_ports=[{NAME:state_input_name, SIZE:state_size}, - {NAME:time_input_name, SIZE:time_size}, - {NAME:context_name, SIZE:state_size}, - {NAME:reward_input_name, SIZE:reward_size}], + input_ports=[{NAME:state_input_name, INPUT_SHAPES:state_size}, + {NAME:time_input_name, INPUT_SHAPES:time_size}, + {NAME:context_name, INPUT_SHAPES:state_size}, + {NAME:reward_input_name, INPUT_SHAPES:reward_size}], function=ContentAddressableMemory( # selection_function=SoftMax(gain=retrieval_softmax_gain), distance_field_weights=[state_retrieval_weight, diff --git a/Scripts/Models (Under Development)/GreedyAgentInteractiveInputs.py b/Scripts/Models (Under Development)/GreedyAgentInteractiveInputs.py index 6fa96600729..4100ec0a3c1 100644 --- a/Scripts/Models (Under Development)/GreedyAgentInteractiveInputs.py +++ b/Scripts/Models (Under Development)/GreedyAgentInteractiveInputs.py @@ -28,14 +28,14 @@ # ********************************************************************************************************************* if PERCEPT_DISTORT: - player = ProcessingMechanism(size=prey_len, function=GaussianDistort(variance=0), name="PLAYER OBS") - prey = ProcessingMechanism(size=prey_len, function=GaussianDistort(variance=0), name="PREY OBS") + player = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(variance=0), name="PLAYER OBS") + prey = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(variance=0), name="PREY OBS") else: - player = TransferMechanism(size=prey_len, name="PLAYER OBS") - prey = TransferMechanism(size=prey_len, name="PREY OBS") + player = TransferMechanism(input_shapes=prey_len, name="PLAYER OBS") + prey = TransferMechanism(input_shapes=prey_len, name="PREY OBS") # For future use: -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey: diff --git a/Scripts/Models (Under Development)/GreedyAgentModel.py b/Scripts/Models (Under Development)/GreedyAgentModel.py index 2855548e350..adab17a1fd3 100644 --- a/Scripts/Models (Under Development)/GreedyAgentModel.py +++ b/Scripts/Models (Under Development)/GreedyAgentModel.py @@ -32,14 +32,14 @@ # ********************************************************************************************************************* if PERCEPT_DISTORT: - player = ProcessingMechanism(size=prey_len, function=GaussianDistort(variance=0), name="PLAYER OBS") - prey = ProcessingMechanism(size=prey_len, function=GaussianDistort(variance=0), name="PREY OBS") + player = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(variance=0), name="PLAYER OBS") + prey = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(variance=0), name="PREY OBS") else: - player = TransferMechanism(size=prey_len, name="PLAYER OBS") - prey = TransferMechanism(size=prey_len, name="PREY OBS") + player = TransferMechanism(input_shapes=prey_len, name="PLAYER OBS") + prey = TransferMechanism(input_shapes=prey_len, name="PREY OBS") # For future use: -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey: diff --git a/Scripts/Models (Under Development)/GreedyAgentModel_LLVM_TEST.py b/Scripts/Models (Under Development)/GreedyAgentModel_LLVM_TEST.py index b20c5718370..9f28b4ef672 100644 --- a/Scripts/Models (Under Development)/GreedyAgentModel_LLVM_TEST.py +++ b/Scripts/Models (Under Development)/GreedyAgentModel_LLVM_TEST.py @@ -18,11 +18,11 @@ player_len = prey_len = predator_len = obs_len -player = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") -prey = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") +player = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") +prey = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") # For future use: -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey: diff --git a/Scripts/Models (Under Development)/PanickyAgentModel.py b/Scripts/Models (Under Development)/PanickyAgentModel.py index df3d0942a00..f1c692f774b 100644 --- a/Scripts/Models (Under Development)/PanickyAgentModel.py +++ b/Scripts/Models (Under Development)/PanickyAgentModel.py @@ -78,12 +78,12 @@ def control_allocation_function(variable): # ********************************************************************************************************************* # Perceptual Mechanisms -player_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") -prey_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") -predator_obs = TransferMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR OBS") +player_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") +prey_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") +predator_obs = TransferMechanism(input_shapes=predator_len, function=GaussianDistort, name="PREDATOR OBS") # Value and Reward Mechanisms (not yet used; for future use) -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # Action Mechanism diff --git a/Scripts/Models (Under Development)/Predator-Prey Model DEMO.py b/Scripts/Models (Under Development)/Predator-Prey Model DEMO.py index 7a30b14d0bd..c565dd4c8a9 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model DEMO.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model DEMO.py @@ -96,12 +96,12 @@ def get_optimal_action(observation): # ************************************** PROCESSING MECHANISMS ******************************************************** # Perceptual Mechanisms -player_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") -predator_percept = ProcessingMechanism(size=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") -prey_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PREY PERCEPT") +player_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") +predator_percept = ProcessingMechanism(input_shapes=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") +prey_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PREY PERCEPT") # Mechanism used to encode optimal action from call to Run -optimal_action_mech = ProcessingMechanism(size=action_len, name="OPTIMAL ACTION") +optimal_action_mech = ProcessingMechanism(input_shapes=action_len, name="OPTIMAL ACTION") actual_agent_frame_buffer = None diff --git a/Scripts/Models (Under Development)/Predator-Prey Model DQN LVOC.py b/Scripts/Models (Under Development)/Predator-Prey Model DQN LVOC.py index 52db2a7a4fa..6a6926f10fc 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model DQN LVOC.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model DQN LVOC.py @@ -104,9 +104,9 @@ def get_optimal_action(observation): # ************************************** PROCESSING MECHANISMS ******************************************************** # Perceptual Mechanisms -player_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") -predator_percept = ProcessingMechanism(size=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") -prey_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PREY PERCEPT") +player_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") +predator_percept = ProcessingMechanism(input_shapes=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") +prey_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PREY PERCEPT") # Mechanism used to encode trialtype from environment trial_type_input_mech = ProcessingMechanism(name="TRIAL TYPE INPUT") diff --git a/Scripts/Models (Under Development)/Predator-Prey Model DQN [ORIG].py b/Scripts/Models (Under Development)/Predator-Prey Model DQN [ORIG].py index 9e9630238d8..2e1b5ea13d7 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model DQN [ORIG].py +++ b/Scripts/Models (Under Development)/Predator-Prey Model DQN [ORIG].py @@ -54,12 +54,12 @@ # ************************************** PROCESSING MECHANISMS ******************************************************** # Perceptual Mechanisms -player_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER PERCEPT") -predator_percept = ProcessingMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR PERCEPT") -prey_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY PERCEPT") +player_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER PERCEPT") +predator_percept = ProcessingMechanism(input_shapes=predator_len, function=GaussianDistort, name="PREDATOR PERCEPT") +prey_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY PERCEPT") # Value and Reward Mechanisms (not yet used; for future use) -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # env = ForagerEnv() diff --git a/Scripts/Models (Under Development)/Predator-Prey Model DQN.py b/Scripts/Models (Under Development)/Predator-Prey Model DQN.py index 3354bd89e72..2c95015f2e0 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model DQN.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model DQN.py @@ -89,12 +89,12 @@ def get_optimal_action(observation): # ************************************** PROCESSING MECHANISMS ******************************************************** # Perceptual Mechanisms -player_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") -predator_percept = ProcessingMechanism(size=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") -prey_percept = ProcessingMechanism(size=prey_len, function=GaussianDistort(), name="PREY PERCEPT") +player_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PLAYER PERCEPT") +predator_percept = ProcessingMechanism(input_shapes=predator_len, function=GaussianDistort(), name="PREDATOR PERCEPT") +prey_percept = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort(), name="PREY PERCEPT") # Mechanism used to encode optimal action from call to Run -optimal_action_mech = ProcessingMechanism(size=action_len, name="OPTIMAL ACTION") +optimal_action_mech = ProcessingMechanism(input_shapes=action_len, name="OPTIMAL ACTION") actual_agent_frame_buffer = None diff --git a/Scripts/Models (Under Development)/Predator-Prey Model INPUT LAYER.py b/Scripts/Models (Under Development)/Predator-Prey Model INPUT LAYER.py index 4649a0c0bd4..095f78ef527 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model INPUT LAYER.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model INPUT LAYER.py @@ -40,23 +40,23 @@ # ********************************************************************************************************************* # Input Mechanisms -player_input = ProcessingMechanism(size=prey_len, name="PLAYER INPUT") -prey_input = ProcessingMechanism(size=prey_len, name="PREY INPUT") -predator_input = TransferMechanism(size=predator_len, name="PREDATOR INPUT") +player_input = ProcessingMechanism(input_shapes=prey_len, name="PLAYER INPUT") +prey_input = ProcessingMechanism(input_shapes=prey_len, name="PREY INPUT") +predator_input = TransferMechanism(input_shapes=predator_len, name="PREDATOR INPUT") # Perceptual Mechanisms if PERCEPTUAL_DISTORT: - player_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") - prey_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") - predator_obs = TransferMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR OBS") + player_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") + prey_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") + predator_obs = TransferMechanism(input_shapes=predator_len, function=GaussianDistort, name="PREDATOR OBS") else: - player_obs = ProcessingMechanism(size=prey_len, name="PLAYER OBS") - prey_obs = ProcessingMechanism(size=prey_len, name="PREY OBS") - predator_obs = TransferMechanism(size=predator_len, name="PREDATOR OBS") + player_obs = ProcessingMechanism(input_shapes=prey_len, name="PLAYER OBS") + prey_obs = ProcessingMechanism(input_shapes=prey_len, name="PREY OBS") + predator_obs = TransferMechanism(input_shapes=predator_len, name="PREDATOR OBS") # Value and Reward Mechanisms (not yet used; for future use) -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # Action Mechanism diff --git a/Scripts/Models (Under Development)/Predator-Prey Model I_0 Nested Comp.py b/Scripts/Models (Under Development)/Predator-Prey Model I_0 Nested Comp.py index cce5386ad72..82d1596dfb8 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model I_0 Nested Comp.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model I_0 Nested Comp.py @@ -41,23 +41,23 @@ # ********************************************************************************************************************* # Input Mechanisms -player_input = ProcessingMechanism(size=prey_len, name="PLAYER INPUT") -prey_input = ProcessingMechanism(size=prey_len, name="PREY INPUT") -predator_input = TransferMechanism(size=predator_len, name="PREDATOR INPUT") +player_input = ProcessingMechanism(input_shapes=prey_len, name="PLAYER INPUT") +prey_input = ProcessingMechanism(input_shapes=prey_len, name="PREY INPUT") +predator_input = TransferMechanism(input_shapes=predator_len, name="PREDATOR INPUT") # Perceptual Mechanisms if PERCEPTUAL_DISTORT: - player_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") - prey_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") - predator_obs = TransferMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR OBS") + player_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") + prey_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") + predator_obs = TransferMechanism(input_shapes=predator_len, function=GaussianDistort, name="PREDATOR OBS") else: - player_obs = ProcessingMechanism(size=prey_len, name="PLAYER OBS") - prey_obs = ProcessingMechanism(size=prey_len, name="PREY OBS") - predator_obs = TransferMechanism(size=predator_len, name="PREDATOR OBS") + player_obs = ProcessingMechanism(input_shapes=prey_len, name="PLAYER OBS") + prey_obs = ProcessingMechanism(input_shapes=prey_len, name="PREY OBS") + predator_obs = TransferMechanism(input_shapes=predator_len, name="PREDATOR OBS") # Value and Reward Mechanisms (not yet used; for future use) -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # Action Mechanism diff --git a/Scripts/Models (Under Development)/Predator-Prey Model.py b/Scripts/Models (Under Development)/Predator-Prey Model.py index dd2c13682af..2eee31e1742 100644 --- a/Scripts/Models (Under Development)/Predator-Prey Model.py +++ b/Scripts/Models (Under Development)/Predator-Prey Model.py @@ -40,11 +40,11 @@ # ********************************************************************************************************************* # Perceptual Mechanisms -player_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") -prey_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") -predator_obs = TransferMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR OBS") +player_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") +prey_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") +predator_obs = TransferMechanism(input_shapes=predator_len, function=GaussianDistort, name="PREDATOR OBS") # Value and Reward Mechanisms (not yet used; for future use) -values = TransferMechanism(size=3, name="AGENT VALUES") +values = TransferMechanism(input_shapes=3, name="AGENT VALUES") reward = TransferMechanism(name="REWARD") # Action Mechanism diff --git a/Scripts/Models (Under Development)/nback/nback.py b/Scripts/Models (Under Development)/nback/nback.py index c1513137d36..1453e5576c5 100644 --- a/Scripts/Models (Under Development)/nback/nback.py +++ b/Scripts/Models (Under Development)/nback/nback.py @@ -305,28 +305,28 @@ def construct_model(stim_size:int = STIM_SIZE, # output: match [1,0] or non-match [0,1] # Must be trained to detect match for specified task (1-back, 2-back, etc.) input_current_stim = TransferMechanism(name=FFN_STIMULUS_INPUT, - size=stim_size, + input_shapes=stim_size, function=FFN_TRANSFER_FUNCTION) input_current_context = TransferMechanism(name=FFN_CONTEXT_INPUT, - size=context_size, + input_shapes=context_size, function=FFN_TRANSFER_FUNCTION) input_retrieved_stim = TransferMechanism(name=FFN_STIMULUS_RETRIEVED, - size=stim_size, + input_shapes=stim_size, function=FFN_TRANSFER_FUNCTION) input_retrieved_context = TransferMechanism(name=FFN_CONTEXT_RETRIEVED, - size=context_size, + input_shapes=context_size, function=FFN_TRANSFER_FUNCTION) input_task = TransferMechanism(name=FFN_TASK, - size=num_nback_levels, + input_shapes=num_nback_levels, function=FFN_TRANSFER_FUNCTION) hidden = TransferMechanism(name=FFN_HIDDEN, - size=hidden_size, + input_shapes=hidden_size, function=FFN_TRANSFER_FUNCTION) dropout = TransferMechanism(name=FFN_DROPOUT, - size=hidden_size, + input_shapes=hidden_size, function=Dropout(p=DROPOUT_PROB)) output = ProcessingMechanism(name=FFN_OUTPUT, - size=2, + input_shapes=2, # function=ReLU ) @@ -354,7 +354,7 @@ def construct_model(stim_size:int = STIM_SIZE, print(f"constructing '{NBACK_MODEL}'...") # Stimulus Encoding: takes STIM_SIZE vector as input - stim = TransferMechanism(name=MODEL_STIMULUS_INPUT, size=stim_size) + stim = TransferMechanism(name=MODEL_STIMULUS_INPUT, input_shapes=stim_size) # Context Encoding: takes scalar as drift step for current trial context = ProcessingMechanism(name=MODEL_CONTEXT_INPUT, @@ -365,16 +365,16 @@ def construct_model(stim_size:int = STIM_SIZE, # Task: task one-hot indicating n-back (1, 2, 3 etc.) - must correspond to what ffn has been trained to do task = ProcessingMechanism(name=MODEL_TASK_INPUT, - size=num_nback_levels) + input_shapes=num_nback_levels) # Episodic Memory: # - entries: stimulus (field[0]) and context (field[1]); randomly initialized # - uses Softmax to retrieve best matching input, subject to weighting of stimulus and context by STIM_WEIGHT em = EpisodicMemoryMechanism(name=EM, input_ports=[{NAME:"STIMULUS_FIELD", - SIZE:stim_size}, + INPUT_SHAPES:stim_size}, {NAME:"CONTEXT_FIELD", - SIZE:context_size}], + INPUT_SHAPES:context_size}], function=ContentAddressableMemory( initializer=[[[0] * stim_size, [0] * context_size]], distance_field_weights=[retrieval_stimulus_weight, @@ -385,13 +385,13 @@ def construct_model(stim_size:int = STIM_SIZE, ) logit = TransferMechanism(name='LOGIT', - size=2, + input_shapes=2, # output_ports=[{VARIABLE: (OWNER_VALUE,0), # FUNCTION: lambda x : np.log(x)}], function=Logistic) decision = TransferMechanism(name=DECISION, - size=2, + input_shapes=2, function=SoftMax(output=MAX_INDICATOR)) # Control Mechanism diff --git a/Scripts/Models (Under Development)/nback/nback_og_pnl.py b/Scripts/Models (Under Development)/nback/nback_og_pnl.py index fcbab06dc66..18b8e77413f 100644 --- a/Scripts/Models (Under Development)/nback/nback_og_pnl.py +++ b/Scripts/Models (Under Development)/nback/nback_og_pnl.py @@ -317,24 +317,24 @@ def construct_model(stim_size:int = STIM_SIZE, # output: match [1,0] or non-match [0,1] # Must be trained to detect match for specified task (1-back, 2-back, etc.) stim_context_input = TransferMechanism(name=FFN_INPUT, - size=ffn_input_size) + input_shapes=ffn_input_size) task_input = ProcessingMechanism(name=FFN_TASK, - size=task_size) + input_shapes=task_size) task_embedding = ProcessingMechanism(name=FFN_TASK, - size=h1_size) + input_shapes=h1_size) h1 = ProcessingMechanism(name=FFN_H1, - size=h1_size, + input_shapes=h1_size, function=FFN_TRANSFER_FUNCTION) add_layer = ProcessingMechanism(name=FFN_ADD_LAYER, - size=h1_size) + input_shapes=h1_size) dropout = ProcessingMechanism(name=FFN_DROPOUT, - size=h1_size, + input_shapes=h1_size, function=Dropout(p=DROPOUT_PROB)) h2 = ProcessingMechanism(name=FFN_H2, - size=h2_size, + input_shapes=h2_size, function=FFN_TRANSFER_FUNCTION) output = ProcessingMechanism(name=FFN_OUTPUT, - size=2, + input_shapes=2, function = Linear # function=ReLU ) @@ -358,7 +358,7 @@ def construct_model(stim_size:int = STIM_SIZE, print(f"constructing '{NBACK_MODEL}'...") # Stimulus Encoding: takes stim_size vector as input - stim = TransferMechanism(name=MODEL_STIMULUS_INPUT, size=stim_size) + stim = TransferMechanism(name=MODEL_STIMULUS_INPUT, input_shapes=stim_size) # Context Encoding: takes scalar as drift step for current trial context = ProcessingMechanism(name=MODEL_CONTEXT_INPUT, @@ -369,16 +369,16 @@ def construct_model(stim_size:int = STIM_SIZE, # Task: task one-hot indicating n-back (1, 2, 3 etc.) - must correspond to what ffn has been trained to do task = ProcessingMechanism(name=MODEL_TASK_INPUT, - size=task_size) + input_shapes=task_size) # Episodic Memory: # - entries: stimulus (field[0]) and context (field[1]); randomly initialized # - uses Softmax to retrieve best matching input, subject to weighting of stimulus and context by STIM_WEIGHT em = EpisodicMemoryMechanism(name=EM, input_ports=[{NAME:"STIMULUS_FIELD", - SIZE:stim_size}, + INPUT_SHAPES:stim_size}, {NAME:"CONTEXT_FIELD", - SIZE:context_size}], + INPUT_SHAPES:context_size}], function=ContentAddressableMemory( initializer=[[[0] * stim_size, [0] * context_size]], distance_field_weights=[retrieval_stimulus_weight, @@ -395,7 +395,7 @@ def construct_model(stim_size:int = STIM_SIZE, function=Concatenate) decision = TransferMechanism(name=DECISION, - size=2, + input_shapes=2, function=SoftMax(output=MAX_INDICATOR)) # Control Mechanism diff --git a/docs/source/BasicsAndPrimer.rst b/docs/source/BasicsAndPrimer.rst index 6faba06536e..83b300fd8df 100644 --- a/docs/source/BasicsAndPrimer.rst +++ b/docs/source/BasicsAndPrimer.rst @@ -95,9 +95,9 @@ encoder network, the first layer of which takes an an array of length 5 as its i `Logistic` function:: # Construct the Mechanisms: - input_layer = ProcessingMechanism(size=5, name='Input') - hidden_layer = ProcessingMechanism(size=2, function=Logistic, name='hidden') - output_layer = ProcessingMechanism(size=5, function=Logistic, name='output') + input_layer = ProcessingMechanism(input_shapes=5, name='Input') + hidden_layer = ProcessingMechanism(input_shapes=2, function=Logistic, name='hidden') + output_layer = ProcessingMechanism(input_shapes=5, function=Logistic, name='output') # Construct the Composition: my_encoder = Composition(pathways=[[input_layer, hidden_layer, output_layer]]) @@ -189,22 +189,22 @@ of those to perform based on a task instruction. These all converge on a common drift diffusion (DDM) decision mechanism responsible for determining the response:: # Construct the color naming pathway: - color_input = ProcessingMechanism(name='COLOR INPUT', size=2) # note: default function is Linear + color_input = ProcessingMechanism(name='COLOR INPUT', input_shapes=2) # note: default function is Linear color_input_to_hidden_wts = np.array([[2, -2], [-2, 2]]) - color_hidden = ProcessingMechanism(name='COLOR HIDDEN', size=2, function=Logistic(bias=-4)) + color_hidden = ProcessingMechanism(name='COLOR HIDDEN', input_shapes=2, function=Logistic(bias=-4)) color_hidden_to_output_wts = np.array([[2, -2], [-2, 2]]) - output = ProcessingMechanism(name='OUTPUT', size=2 , function=Logistic) + output = ProcessingMechanism(name='OUTPUT', input_shapes=2 , function=Logistic) color_pathway = [color_input, color_input_to_hidden_wts, color_hidden, color_hidden_to_output_wts, output] # Construct the word reading pathway (using the same output_layer) - word_input = ProcessingMechanism(name='WORD INPUT', size=2) + word_input = ProcessingMechanism(name='WORD INPUT', input_shapes=2) word_input_to_hidden_wts = np.array([[3, -3], [-3, 3]]) - word_hidden = ProcessingMechanism(name='WORD HIDDEN', size=2, function=Logistic(bias=-4)) + word_hidden = ProcessingMechanism(name='WORD HIDDEN', input_shapes=2, function=Logistic(bias=-4)) word_hidden_to_output_wts = np.array([[3, -3], [-3, 3]]) word_pathway = [word_input, word_input_to_hidden_wts, word_hidden, word_hidden_to_output_wts, output] # Construct the task specification pathways - task_input = ProcessingMechanism(name='TASK INPUT', size=2) + task_input = ProcessingMechanism(name='TASK INPUT', input_shapes=2) task_color_wts = np.array([[4,4],[0,0]]) task_word_wts = np.array([[0,0],[4,4]]) task_color_pathway = [task_input, task_color_wts, color_hidden] @@ -325,7 +325,7 @@ that uses a `leaky competing accumulator `, and use control = ControlMechanism(name='CONTROL', objective_mechanism=ObjectiveMechanism(name='Conflict Monitor', monitor=output, - function=Energy(size=2, + function=Energy(input_shapes=2, matrix=[[0,-2.5],[-2.5,0]])), default_allocation=[0.5], control_signals=[(GAIN, task)]) @@ -937,14 +937,14 @@ For example, the following implements a network for learning semantic representa # Representation_Input # Construct Mechanisms - rep_in = pnl.ProcessingMechanism(size=10, name='REP_IN') - rel_in = pnl.ProcessingMechanism(size=11, name='REL_IN') - rep_hidden = pnl.ProcessingMechanism(size=4, function=Logistic, name='REP_HIDDEN') - rel_hidden = pnl.ProcessingMechanism(size=5, function=Logistic, name='REL_HIDDEN') - rep_out = pnl.ProcessingMechanism(size=10, function=Logistic, name='REP_OUT') - prop_out = pnl.ProcessingMechanism(size=12, function=Logistic, name='PROP_OUT') - qual_out = pnl.ProcessingMechanism(size=13, function=Logistic, name='QUAL_OUT') - act_out = pnl.ProcessingMechanism(size=14, function=Logistic, name='ACT_OUT') + rep_in = pnl.ProcessingMechanism(input_shapes=10, name='REP_IN') + rel_in = pnl.ProcessingMechanism(input_shapes=11, name='REL_IN') + rep_hidden = pnl.ProcessingMechanism(input_shapes=4, function=Logistic, name='REP_HIDDEN') + rel_hidden = pnl.ProcessingMechanism(input_shapes=5, function=Logistic, name='REL_HIDDEN') + rep_out = pnl.ProcessingMechanism(input_shapes=10, function=Logistic, name='REP_OUT') + prop_out = pnl.ProcessingMechanism(input_shapes=12, function=Logistic, name='PROP_OUT') + qual_out = pnl.ProcessingMechanism(input_shapes=13, function=Logistic, name='QUAL_OUT') + act_out = pnl.ProcessingMechanism(input_shapes=14, function=Logistic, name='ACT_OUT') # Construct Composition comp = Composition(name='Rumelhart Semantic Network') diff --git a/docs/source/BotvinickConflictMonitoringModel.rst b/docs/source/BotvinickConflictMonitoringModel.rst index 5473b967595..61a3e5db4f8 100644 --- a/docs/source/BotvinickConflictMonitoringModel.rst +++ b/docs/source/BotvinickConflictMonitoringModel.rst @@ -43,30 +43,30 @@ bidirectional way. The response layer receives inputs from both hidden layers. A Network System ~~~~~~~~~~~~~~ -**COLOR INPUT LAYER**: a `TransferMechanism` with **size** = 3 (one unit for the input of one color, respectively +**COLOR INPUT LAYER**: a `TransferMechanism` with **input_shapes** = 3 (one unit for the input of one color, respectively here blue & green), and assigned a `Linear` function with **slope** = 1.0 and **intercept** = 0.0. -**WORD INPUT LAYER**: a `TransferMechanism` with **size** = 3 (one unit for the input of one word, respectively, +**WORD INPUT LAYER**: a `TransferMechanism` with **input_shapes** = 3 (one unit for the input of one word, respectively, here blue & green), and assigned a `Linear` function with **slope** = 1.0 and **intercept** = 0.0. -**TASK INPUT LAYER**: a `TransferMechanism` with **size** = 2 (one unit specified with a task +**TASK INPUT LAYER**: a `TransferMechanism` with **input_shapes** = 2 (one unit specified with a task value of one, the other element set to zero), and assigned a `Linear` function with **slope** = 1.0 and **intercept** = 0.0. -**COLOR HIDDEN LAYER**: a `RecurrentTransferMechanism` with **size** = 3 (one element for each of the two colors, one +**COLOR HIDDEN LAYER**: a `RecurrentTransferMechanism` with **input_shapes** = 3 (one element for each of the two colors, one element for the neutral color and assigned a `Logistic` function with **gain** = 4.0 and **bias** = 1.0. The **integrator_mode** = `True` and **smoothing_factor** = 0.01. Both units receive mutually inhibitory weights (**hetero** = -2). -**WORD HIDDEN LAYER**: a `RecurrentTransferMechanism` with **size** = 3 (one element for each of the two colors, one +**WORD HIDDEN LAYER**: a `RecurrentTransferMechanism` with **input_shapes** = 3 (one element for each of the two colors, one element for the neutral color and assigned a `Logistic` function with **gain** = 4.0 and **bias** = 1.0. The **integrator_mode** = `True` and **smoothing_factor** = 0.01. Both units receive mutually inhibitory weights (**hetero** = -2). -**TASK DEMAND LAYER**: a `RecurrentTransferMechanism` with **size** = 2 (one element for each of the two tasks, and +**TASK DEMAND LAYER**: a `RecurrentTransferMechanism` with **input_shapes** = 2 (one element for each of the two tasks, and assigned a `Logistic` function with **gain** = 1.0 and **bias** = 0.0. The **integrator_mode** = `True` and **smoothing_factor** = 0.01. Both units receive mutually inhibitory weights (**hetero** = -2). -**RESPONSE LAYER**: a `RecurrentTransferMechanism` with **size** = 2 (one element for each of the two responses, and +**RESPONSE LAYER**: a `RecurrentTransferMechanism` with **input_shapes** = 2 (one element for each of the two responses, and assigned a `Logistic` function with **gain** = 1.0 and **bias** = 0.0. The **integrator_mode** = `True` and **smoothing_factor** = 0.01. Both units receive mutually inhibitory weights (**hetero** = -2). diff --git a/docs/source/Cohen_HustonModel.rst b/docs/source/Cohen_HustonModel.rst index f949fd55dd7..909b58df9e5 100644 --- a/docs/source/Cohen_HustonModel.rst +++ b/docs/source/Cohen_HustonModel.rst @@ -58,26 +58,26 @@ Below the Graph of the model is shown. Composition ~~~~~~~~~~~ -**COLOR INPUT LAYER**: a `TransferMechanism` with **size** = 3 (one element for the input to each color in the +**COLOR INPUT LAYER**: a `TransferMechanism` with **input_shapes** = 3 (one element for the input to each color in the *HIDDEN COLOR LAYER*, respectively), and assigned a `Linear` function with **slope** = 1.0 and **intercept** = 0.0. -**WORD INPUT LAYER**: a `TransferMechanism` with **size** = 3 (one element for the input to each word in the +**WORD INPUT LAYER**: a `TransferMechanism` with **input_shapes** = 3 (one element for the input to each word in the *HIDDEN WORD LAYER*, respectively), and assigned a `Linear` function with **slope** = 1.0 and **intercept** = 0.0. -**TASK INPUT LAYER**: a `TransferMechanism` with **size** = 2 (one element for the input to each task in the +**TASK INPUT LAYER**: a `TransferMechanism` with **input_shapes** = 2 (one element for the input to each task in the *TASK LAYER*, respectively), and assigned a `Linear` function with **slope** = 1.0 and **intercept** = 0.0. -**HIDDEN COLOR LAYER**: a `RecurrentTransferMechanism` Mechanism of **size** = 3 (one element each for the color units), +**HIDDEN COLOR LAYER**: a `RecurrentTransferMechanism` Mechanism of **input_shapes** = 3 (one element each for the color units), and assigned a `Logistic` Function with a bias = 4.0 and intercept = 0.0. Each element is connected to every other element by mutually inhibitory connections with a weight specified by **hetero** = -2.0. An integrator mechanism is specified by setting the **integrator_mode** = `True` and **smoothing_factor** = 0.1. -**HIDDEN WORD LAYER**: a `RecurrentTransferMechanism` specified as the *HIDDEN COLOR LAYER* with **size** = 3, +**HIDDEN WORD LAYER**: a `RecurrentTransferMechanism` specified as the *HIDDEN COLOR LAYER* with **input_shapes** = 3, a `Logistic` Function with a **bias** = 4.0 and **intercept** = 0.0, mutually inhibitory connections with a weight specified by **hetero** = -2.0, **integrator_mode** = `True` and **smoothing_factor** = 0.1.. **RESPONSE LAYER**: a `RecurrentTransferMechanism` specified as the *HIDDEN COLOR LAYER* with the only difference of -changing the bias to 0 in the `Logistic` Function, and the size of 2. +changing the bias to 0 in the `Logistic` Function, and the input_shapes of 2. **TASK LAYER**: a `RecurrentTransferMechanism` specified as the *RESPONSE LAYER*. diff --git a/docs/source/NieuwenhuisModel.rst b/docs/source/NieuwenhuisModel.rst index 7c4c07d33f4..e4faf3a8510 100644 --- a/docs/source/NieuwenhuisModel.rst +++ b/docs/source/NieuwenhuisModel.rst @@ -65,17 +65,17 @@ associated `ObjectiveMechanism`, as shown in the figure below: Behavioral Network Subsystem ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -**INPUT LAYER**: a `TransferMechanism` with **size**\ =3 (one element for the input to the T1, T2 and distractor units +**INPUT LAYER**: a `TransferMechanism` with **input_shapes**\ =3 (one element for the input to the T1, T2 and distractor units of the *DECISION LAYER*, respectively), and assigned a `Linear` function with **slope**\ =1.0 and **intercept**\ =0.0. -**DECISION LAYER**: an `LCAMechanism` Mechanism of **size**\ =3 (one element each for the T1, T2 and distractor units), +**DECISION LAYER**: an `LCAMechanism` Mechanism of **input_shapes**\ =3 (one element each for the T1, T2 and distractor units), and assigned a `Logistic` Function with a slope=1.0 and intercept=0.0. Each element has a self-excitatory connection with a weight specified by **self_excitation**\ =2.5, a **leak**\ =-1.0, and every element is connected to every other element by mutually inhibitory connections with a weight specified by **competition** =1.0. An ordinary differential equation describes the change in state over time, implemented in the LCAMechanism mechanism by setting **integrator_mode** = `True` and **time_step_size**\ =0.02. -**RESPONSE LAYER**: an `LCAMechanism` Mechanism of **size**\ =2, with one element each for the response to T1 and T2, +**RESPONSE LAYER**: an `LCAMechanism` Mechanism of **input_shapes**\ =2, with one element each for the response to T1 and T2, respectively, **self_excitation**\ =2.0, **leak**\ =-1.0, and no mutually inhibitory weights (**competition**\ =0). **PROJECTIONS**: The weights of the behavioral network are implemented as `MappingProjections `. diff --git a/docs/source/PCTC_model.rst b/docs/source/PCTC_model.rst index 058bc81331f..c07996d78de 100644 --- a/docs/source/PCTC_model.rst +++ b/docs/source/PCTC_model.rst @@ -56,33 +56,33 @@ model. A graph of the model is shown below. Network System ~~~~~~~~~~~~~~ -**COLOR INPUT LAYER**: a `TransferMechanism` with **size**\ =2 (one unit for the input of one color, respectively +**COLOR INPUT LAYER**: a `TransferMechanism` with **input_shapes**\ =2 (one unit for the input of one color, respectively here blue & green), and assigned a `Linear` function with **slope**\ =1.0 and **intercept**\ =0.0. -**WORD INPUT LAYER**: a `TransferMechanism` with **size**\ =2 (one unit for the input of one word, respectively, +**WORD INPUT LAYER**: a `TransferMechanism` with **input_shapes**\ =2 (one unit for the input of one word, respectively, here blue & green), and assigned a `Linear` function with **slope**\ =1.0 and **intercept**\ =0.0. -**BIAS INPUT LAYER**: a `TransferMechanism` with **size**\ =2 (one unit for the bias of one of the hidden layers, +**BIAS INPUT LAYER**: a `TransferMechanism` with **input_shapes**\ =2 (one unit for the bias of one of the hidden layers, which is the same in this model), and assigned a `Linear` function with **slope**\ =1.0 and **intercept**\ =0.0. -**PROACTIVE CONTROL INPUT LAYER**: a `TransferMechanism` with **size**\ =2 (one unit specified with a proactive control +**PROACTIVE CONTROL INPUT LAYER**: a `TransferMechanism` with **input_shapes**\ =2 (one unit specified with a proactive control value, the other one set to zero), and assigned a `Linear` function with **slope**\ =1.0 and **intercept**\ =0.0. -**COLOR HIDDEN LAYER**: a `RecurrentTransferMechanism` with **size**\ =2 (one element for each of the two colors, and +**COLOR HIDDEN LAYER**: a `RecurrentTransferMechanism` with **input_shapes**\ =2 (one element for each of the two colors, and assigned a `Logistic` function with **gain**\ =4.0 and **bias**\ =1.0. The **integrator_mode**\ =\ `True` and **smoothing_factor**\ =0.03. Both units receive mutually inhibitory weights (**hetero**\ =-2). A python function that sets the output of the `Logistic` function to 0 when it receives 0 as an input is specified on the `output_ports`. It simply subtracts 0.018 from the output of a logistic function and if this leads to a value below 0, outputs a 0 as a minimum value. -**WORD HIDDEN LAYER**: a `RecurrentTransferMechanism` with **size**\ =2 (one element for each of the two words, and +**WORD HIDDEN LAYER**: a `RecurrentTransferMechanism` with **input_shapes**\ =2 (one element for each of the two words, and assigned a `Logistic` function with **gain**\ =4.0 and **bias**\ =1.0. The **integrator_mode**\ =\ `True` and **smoothing_factor**\ =0.03. Both units receive mutually inhibitory weights (**hetero**\ =-2). A python function that sets the output of the `Logistic` function to 0 when it receives 0 as an input is specified on the `output_ports`. It simply subtracts 0.018 from the output of a logistic function and if this leads to a value below 0, outputs a 0 as a minimum value. -**TASK DEMAND LAYER**: a `RecurrentTransferMechanism` with **size**\ =2 (one element for each of the two tasks, and +**TASK DEMAND LAYER**: a `RecurrentTransferMechanism` with **input_shapes**\ =2 (one element for each of the two tasks, and assigned a `Logistic` function with **gain**\ =4.0 and **bias**\ =1.0. The **integrator_mode**\ =\ `True` and **smoothing_factor**\ =0.03. Both units receive mutually inhibitory weights (**hetero**\ =-2). A python function that sets the output of the `Logistic` function to 0 when it receives 0 as an input is specified on the `output_ports`. It @@ -90,7 +90,7 @@ simply subtracts 0.018 from the output of a logistic function and if this leads outputs a 0 as a minimum value. A second OutputPort is specified that computes the conflict between the two task units. -**WORD HIDDEN LAYER**: a `RecurrentTransferMechanism` with **size**\ =2 (one element for each of the two responses, and +**WORD HIDDEN LAYER**: a `RecurrentTransferMechanism` with **input_shapes**\ =2 (one element for each of the two responses, and assigned a `Logistic` function with **gain**\ =4.0 and **bias**\ =1.0. The **integrator_mode**\ =\ `True` and **smoothing_factor**\ =0.03. Both units receive mutually inhibitory weights (**hetero**\ =-2). A python function that sets the output of the `Logistic` function to 0 when it receives 0 as an input is specified on the `output_ports`. It diff --git a/docs/source/RefactoredLearningGuide.rst b/docs/source/RefactoredLearningGuide.rst index 5b0cfa840c4..40d47c2a805 100644 --- a/docs/source/RefactoredLearningGuide.rst +++ b/docs/source/RefactoredLearningGuide.rst @@ -100,8 +100,8 @@ This is demonstrated in the following codeblocks: This is the OLD code: ->>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, size = 3) -... my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, size = 2) +>>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, input_shapes = 3) +... my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, input_shapes = 2) ... my_projection = pnl.MappingProjection(matrix=np.random.randn(3,2), ... sender=my_mech_1, ... receiver=my_mech_2) @@ -124,8 +124,8 @@ This is the OLD code: And this is equivalent code AFTER the changes: ->>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, size = 3) -... my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, size = 2) +>>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, input_shapes = 3) +... my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, input_shapes = 2) ... my_projection = pnl.MappingProjection(matrix=np.random.randn(3,2), ... sender=my_mech_1, ... receiver=my_mech_2) diff --git a/docs/source/UserGuide_TBD.rst b/docs/source/UserGuide_TBD.rst index 3f0b20d6fa7..5441679d8f6 100644 --- a/docs/source/UserGuide_TBD.rst +++ b/docs/source/UserGuide_TBD.rst @@ -45,7 +45,7 @@ Components , as well as two other fundamental types of Components (`Ports ` and `Functions `), that are described in the section below on `Components `. The other primary type of object, `Composition`, has two primary types: -`Processes ` and `Systems ` that allow Compositions of different degrees of size and complexity to +`Processes ` and `Systems ` that allow Compositions of different degrees of input_shapes and complexity to be created. These are described in the section below on `Compositions `. In each case, examples are provided that illustrate how these objects are implemented, and that parallel those used in the interactive `Tutorial `. diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 92ac1bf5a70..c3520735c12 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -95,7 +95,7 @@ argument in the constructor for a Component determines both its format (e.g., whether its value is numeric, its dimensionality and shape if it is an array, etc.) as well as its `default_value ` (the value used when the Component is executed and no input is provided). - It may alternatively be specified by `size `. + It may alternatively be specified by `input_shapes `. .. technical_note:: Internally, the attribute **variable** is not directly used as input to functions, to allow for parallelization. @@ -103,25 +103,25 @@ During parallelization however, the attribute may not accurately represent the most current value of variable being used, due to asynchrony inherent to parallelization. -.. _Component_Size: +.. _Component_Input_Shapes: -* **size** - the numpy shape or iterable of shapes matching the - `variable ` attribute. The **size** argument of +* **input_shapes** - the numpy shape or iterable of shapes matching the + `variable ` attribute. The **input_shapes** argument of the constructor for a Component can be used as a convenient method for specifying the `variable `, attribute in which case it will be assigned as an array of zeros of - the specified shape. When **size** is an iterable, each item in the + the specified shape. When **input_shapes** is an iterable, each item in the iterable is treated as a single shape, and the entire iterable is then - assigned as an array. When **size** is an integer, it is treated the + assigned as an array. When **input_shapes** is an integer, it is treated the same as a one-item iterable containing that integer. For example, - setting **size** = 3 is equivalent to setting - **variable** = [[0, 0, 0]] and setting **size** = [4, 3] is equivalent + setting **input_shapes** = 3 is equivalent to setting + **variable** = [[0, 0, 0]] and setting **input_shapes** = [4, 3] is equivalent to setting **variable** = [[0, 0, 0, 0], [0, 0, 0]]. .. note:: - The size attribute serves a role similar to + The input_shapes attribute serves a role similar to `shape in Numpy `_, with the difference that - size permits the specification of `ragged arrays `_ -- that is, ones + input_shapes permits the specification of `ragged arrays `_ -- that is, ones that have elements of varying lengths, such as [[1,2],[3,4,5]]. .. _Component_Function: @@ -331,10 +331,10 @@ _instantiate_function method checks that the input of the Component's `function ` is compatible with its `variable `). - * `_handle_size ` attempts to infer - `variable ` from the **size** argument if + * `_handle_input_shapes ` attempts to infer + `variable ` from the **input_shapes** argument if **variable** is not passed as an argument. - The _handle_size method then checks that the **size** and **variable** arguments are compatible. + The _handle_input_shapes method then checks that the **input_shapes** and **variable** arguments are compatible. * `_instantiate_defaults ` first calls the validation methods, and then assigns the default values for all of the attributes of the instance of the Component being created. @@ -535,7 +535,7 @@ MODEL_SPEC_ID_INPUT_PORTS, MODEL_SPEC_ID_OUTPUT_PORTS, \ MODEL_SPEC_ID_MDF_VARIABLE, \ MODULATORY_SPEC_KEYWORDS, NAME, OUTPUT_PORTS, OWNER, PARAMS, PREFS_ARG, \ - RESET_STATEFUL_FUNCTION_WHEN, SIZE, VALUE, VARIABLE, SHARED_COMPONENT_TYPES + RESET_STATEFUL_FUNCTION_WHEN, INPUT_SHAPES, VALUE, VARIABLE, SHARED_COMPONENT_TYPES from psyneulink.core.globals.log import LogCondition from psyneulink.core.globals.parameters import \ Defaults, SharedParameter, Parameter, ParameterAlias, ParameterError, ParametersBase, check_user_specified, copy_parameter_value, is_array_like @@ -750,7 +750,7 @@ class Component(MDFSerializable, metaclass=ComponentsMeta): """ Component( \ default_variable=None, \ - size=None, \ + input_shapes=None, \ params=None, \ name=None, \ prefs=None, \ @@ -772,7 +772,7 @@ class Component(MDFSerializable, metaclass=ComponentsMeta): The variable(s) can be a function reference, in which case the function is called to resolve the value; however: it must be "wrapped" as an item in a list, so that it is not called before being passed it must of course return a variable of the type expected for the variable - The size argument is an int or array of ints, which specify the size of variable and set variable to be array(s) + The input_shapes argument is an int or array of ints, which specify the input_shapes of variable and set variable to be array(s) of zeros. The default variableList is a list of default values, one for each of the variables defined in the child class The params argument is a dictionary; the key for each entry is the parameter name, associated with its value. @@ -815,11 +815,11 @@ class Component(MDFSerializable, metaclass=ComponentsMeta): specifies template for the input to the Component's `function `, and the value used as the input to the Component if none is provided on execution (see `Component_Variable` for additional information). - size : int, or Iterable of tuple or int : default None + input_shapes : int, or Iterable of tuple or int : default None specifies default_variable as array(s) of zeros if **default_variable** is not passed as an argument; if **default_variable** is specified, it is checked for - compatibility against **size** (see - `size ` for additonal details). + compatibility against **input_shapes** (see + `input_shapes ` for additonal details). COMMENT: param_defaults : : default None, @@ -847,8 +847,8 @@ class Component(MDFSerializable, metaclass=ComponentsMeta): variable : 2d np.array see `variable ` - size : Union[int, Iterable[Union[int, tuple]]] - see `size ` + input_shapes : Union[int, Iterable[Union[int, tuple]]] + see `input_shapes ` function : Function, function or method see `function ` @@ -930,7 +930,7 @@ class Component(MDFSerializable, metaclass=ComponentsMeta): componentCategory = None componentType = None - standard_constructor_args = {EXECUTE_UNTIL_FINISHED, FUNCTION_PARAMS, MAX_EXECUTIONS_BEFORE_FINISHED, RESET_STATEFUL_FUNCTION_WHEN, SIZE} + standard_constructor_args = {EXECUTE_UNTIL_FINISHED, FUNCTION_PARAMS, MAX_EXECUTIONS_BEFORE_FINISHED, RESET_STATEFUL_FUNCTION_WHEN, INPUT_SHAPES} # helper attributes for MDF model spec _model_spec_id_parameters = 'parameters' @@ -1113,7 +1113,7 @@ def _parse_modulable(self, param_name, param_value): def __init__(self, default_variable, param_defaults, - size=None, + input_shapes=None, function=None, name=None, reset_stateful_function_when=None, @@ -1124,7 +1124,7 @@ def __init__(self, Initialization arguments: - default_variable (anything): establishes type for the variable, used for validation - - size (int or list/array of ints): if specified, establishes variable if variable was not already specified + - input_shapes (int or list/array of ints): if specified, establishes variable if variable was not already specified - params_default (dict): assigned as default Note: if parameter_validation is off, validation is suppressed (for efficiency) (Component class default = on) @@ -1141,7 +1141,7 @@ def __init__(self, self.reset_stateful_function_when = Never() parameter_values, function_params = self._parse_arguments( - default_variable, param_defaults, size, function, function_params, kwargs + default_variable, param_defaults, input_shapes, function, function_params, kwargs ) self._initialize_parameters( @@ -1646,9 +1646,9 @@ def _gen_llvm_function(self, *, ctx:pnlvm.LLVMBuilderContext, # Handlers # ------------------------------------------------------------------------------------------------------------------ - def _handle_default_variable(self, default_variable=None, size=None): + def _handle_default_variable(self, default_variable=None, input_shapes=None): """ - Finds whether default_variable can be determined using **default_variable** and **size** + Finds whether default_variable can be determined using **default_variable** and **input_shapes** arguments. Returns @@ -1657,7 +1657,7 @@ def _handle_default_variable(self, default_variable=None, size=None): None otherwise """ default_variable = self._parse_arg_variable(default_variable) - default_variable = self._handle_size(size, default_variable) + default_variable = self._handle_input_shapes(input_shapes, default_variable) if default_variable is None or default_variable is NotImplemented: return None @@ -1666,19 +1666,19 @@ def _handle_default_variable(self, default_variable=None, size=None): return convert_to_np_array(default_variable, dimension=1) - def _parse_size( - self, size: Union[int, Iterable[Union[int, tuple]]] + def _parse_input_shapes( + self, input_shapes: Union[int, Iterable[Union[int, tuple]]] ) -> np.ndarray: """ - Returns the equivalent 'variable' array specified by **size** + Returns the equivalent 'variable' array specified by **input_shapes** Args: - size (Union[int, Iterable[Union[int, tuple]]]) + input_shapes (Union[int, Iterable[Union[int, tuple]]]) Returns: np.ndarray """ - def get_size_elem(s, idx=None): + def get_input_shapes_elem(s, idx=None): try: return np.zeros(s) except (TypeError, ValueError) as e: @@ -1688,43 +1688,43 @@ def get_size_elem(s, idx=None): idx_str = '' raise ComponentError( - f'Invalid size argument of {self}{idx_str}. size must be a' + f'Invalid input_shapes argument of {self}{idx_str}. input_shapes must be a' ' valid numpy shape or a list of shapes for use with' f' numpy.zeros: {e}' ) from e - if not is_iterable(size, exclude_str=True): - variable_from_size = np.asarray([get_size_elem(size)]) + if not is_iterable(input_shapes, exclude_str=True): + variable_from_input_shapes = np.asarray([get_input_shapes_elem(input_shapes)]) else: - if len(size) == 0: + if len(input_shapes) == 0: raise ComponentError( - f'Invalid size argument of {self}. size must not be an empty list' + f'Invalid input_shapes argument of {self}. input_shapes must not be an empty list' ) - variable_from_size = [] - for i, s in enumerate(size): - variable_from_size.append(get_size_elem(s, i)) - variable_from_size = convert_all_elements_to_np_array(variable_from_size) + variable_from_input_shapes = [] + for i, s in enumerate(input_shapes): + variable_from_input_shapes.append(get_input_shapes_elem(s, i)) + variable_from_input_shapes = convert_all_elements_to_np_array(variable_from_input_shapes) - return variable_from_size + return variable_from_input_shapes # ELIMINATE SYSTEM - # IMPLEMENTATION NOTE: (7/7/17 CW) Due to System and Process being initialized with size at the moment (which will - # be removed later), I’m keeping _handle_size in Component.py. I’ll move the bulk of the function to Mechanism - # through an override, when Composition is done. For now, only Port.py overwrites _handle_size(). - def _handle_size(self, size, variable): - """If variable is None, _handle_size tries to infer variable based on the **size** argument to the - __init__() function. If size is None (usually in the case of + # IMPLEMENTATION NOTE: (7/7/17 CW) Due to System and Process being initialized with input_shapes at the moment (which will + # be removed later), I’m keeping _handle_input_shapes in Component.py. I’ll move the bulk of the function to Mechanism + # through an override, when Composition is done. For now, only Port.py overwrites _handle_input_shapes(). + def _handle_input_shapes(self, input_shapes, variable): + """If variable is None, _handle_input_shapes tries to infer variable based on the **input_shapes** argument to the + __init__() function. If input_shapes is None (usually in the case of Projections/Functions), then this function passes without - doing anything. If both size and variable are not None, a + doing anything. If both input_shapes and variable are not None, a ComponentError is thrown if they are not compatible. """ - if size is not None: + if input_shapes is not None: self._variable_shape_flexibility = self._specified_variable_shape_flexibility - # region Fill in and infer variable and size if they aren't specified in args - # if variable is None and size is None: + # region Fill in and infer variable and input_shapes if they aren't specified in args + # if variable is None and input_shapes is None: # variable = self.class_defaults.variable # 6/30/17 now handled in the individual subclasses' __init__() methods because each subclass has different - # expected behavior when variable is None and size is None. + # expected behavior when variable is None and input_shapes is None. # implementation note: for good coding practices, perhaps add setting to enable easy change of the default # value of variable (though it's an unlikely use case), which is an array of zeros at the moment @@ -1736,41 +1736,41 @@ def conflict_error(reason=None): reason_str = '' return ComponentError( - f'size and default_variable arguments of {self} conflict{reason_str}' + f'input_shapes and default_variable arguments of {self} conflict{reason_str}' ) - variable_from_size = self._parse_size(size) + variable_from_input_shapes = self._parse_input_shapes(input_shapes) if variable is None: - return variable_from_size + return variable_from_input_shapes - if is_iterable(size, exclude_str=True): - assert len(size) == len(variable_from_size) + if is_iterable(input_shapes, exclude_str=True): + assert len(input_shapes) == len(variable_from_input_shapes) if variable.ndim == 0: raise conflict_error( - 'size gives a list of items but default_variable is 0d' + 'input_shapes gives a list of items but default_variable is 0d' ) - elif len(size) != len(variable): + elif len(input_shapes) != len(variable): raise conflict_error( - f'len(size) is {len(size)};' + f'len(input_shapes) is {len(input_shapes)};' f' len(default_variable) is {len(variable)}' ) else: - for i in range(len(size)): - if variable_from_size[i].shape != variable[i].shape: + for i in range(len(input_shapes)): + if variable_from_input_shapes[i].shape != variable[i].shape: raise conflict_error( - f'size[{i}].shape: {variable_from_size[i].shape};' + f'input_shapes[{i}].shape: {variable_from_input_shapes[i].shape};' f' default_variable[{i}].shape: {variable[i].shape}' ) else: - if variable_from_size.shape != variable.shape: + if variable_from_input_shapes.shape != variable.shape: raise conflict_error( - f'size.shape: {variable_from_size.shape};' + f'input_shapes.shape: {variable_from_input_shapes.shape};' f' default_variable.shape: {variable.shape}' ) - # if variable_from_size is created an error has not been thrown + # if variable_from_input_shapes is created an error has not been thrown # so far, variable is equal return variable @@ -2188,7 +2188,7 @@ def alias_conflicts(alias, passed_name): ) def _parse_arguments( - self, default_variable, param_defaults, size, function, function_params, kwargs + self, default_variable, param_defaults, input_shapes, function, function_params, kwargs ): if function_params is None: function_params = {} @@ -2201,7 +2201,7 @@ def _parse_arguments( parameter_values = { **{ 'function': function, - 'size': size, + 'input_shapes': input_shapes, 'default_variable': default_variable, 'function_params': function_params }, @@ -3725,7 +3725,7 @@ def name(self, value): self._name = value @property - def size(self): + def input_shapes(self): s = [] try: diff --git a/psyneulink/core/components/functions/nonstateful/learningfunctions.py b/psyneulink/core/components/functions/nonstateful/learningfunctions.py index c8f4d6ea349..142f976ba24 100644 --- a/psyneulink/core/components/functions/nonstateful/learningfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/learningfunctions.py @@ -822,10 +822,10 @@ def __init__(self, prefs=prefs, ) - def _handle_default_variable(self, default_variable=None, size=None): + def _handle_default_variable(self, default_variable=None, input_shapes=None): # If default_variable was not specified by user... - if default_variable is None and size in {None, NotImplemented}: + if default_variable is None and input_shapes in {None, NotImplemented}: # but mu_0 and/or sigma_0 was specified as an array... if isinstance(self.mu_0, (list, np.ndarray)) or isinstance(self.sigma_0, (list, np.ndarray)): # if both are specified, make sure they are the same size @@ -842,7 +842,7 @@ def _handle_default_variable(self, default_variable=None, size=None): else: default_variable = [np.zeros_like(self.sigma_0), np.zeros((1,1))] - return super()._handle_default_variable(default_variable=default_variable, size=size) + return super()._handle_default_variable(default_variable=default_variable, input_shapes=input_shapes) def initialize_priors(self): """Set the prior parameters (`mu_prior `, `Lamba_prior `, diff --git a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py index 66d45844e32..4a7d890028c 100644 --- a/psyneulink/core/components/functions/nonstateful/objectivefunctions.py +++ b/psyneulink/core/components/functions/nonstateful/objectivefunctions.py @@ -33,7 +33,7 @@ CORRELATION, COSINE, COSINE_SIMILARITY, CROSS_ENTROPY, \ DEFAULT_VARIABLE, DIFFERENCE, DISTANCE_FUNCTION, DISTANCE_METRICS, DOT_PRODUCT, \ ENERGY, ENTROPY, EUCLIDEAN, HOLLOW_MATRIX, MATRIX, MAX_ABS_DIFF, NORMALIZE, \ - NORMED_L0_SIMILARITY, OBJECTIVE_FUNCTION_TYPE, SIZE, STABILITY_FUNCTION + NORMED_L0_SIMILARITY, OBJECTIVE_FUNCTION_TYPE, INPUT_SHAPES, STABILITY_FUNCTION from psyneulink.core.globals.parameters import FunctionParameter, Parameter, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.utilities import DistanceMetricLiteral, safe_len, convert_to_np_array, convert_all_elements_to_np_array @@ -100,7 +100,7 @@ class Stability(ObjectiveFunction): variable : list or 1d array of numbers: Default class_defaults.variable specifies shape and default value of the array for which stability is calculated. - size : int : None + input_shapes : int : None specifies length of the array over which stability is calculated; can be used in place of default_value, in which case zeros are assigned as the value(s). An error is generated if both are specified but size != len(default_value). @@ -211,7 +211,7 @@ class Parameters(ObjectiveFunction.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, matrix=None, # metric:is_distance_metric=None, metric: Optional[DistanceMetricLiteral] = None, @@ -221,12 +221,12 @@ def __init__(self, owner=None, prefs: Optional[ValidPrefSet] = None): - if size: + if input_shapes: if default_variable is None: - default_variable = np.zeros(size) - elif size != len(default_variable): - raise FunctionError(f"Both {repr(DEFAULT_VARIABLE)} ({default_variable}) and {repr(SIZE)} ({size}) " - f"are specified for {self.name} but are {SIZE}!=len({DEFAULT_VARIABLE}).") + default_variable = np.zeros(input_shapes) + elif input_shapes != len(default_variable): + raise FunctionError(f"Both {repr(DEFAULT_VARIABLE)} ({default_variable}) and {repr(INPUT_SHAPES)} ({input_shapes}) " + f"are specified for {self.name} but are {INPUT_SHAPES}!=len({DEFAULT_VARIABLE}).") super().__init__( default_variable=default_variable, @@ -497,7 +497,7 @@ class Energy(Stability): variable : list or 1d array of numbers: Default class_defaults.variable specifies shape and default value of the array for which energy is calculated. - size : int : None + input_shapes : int : None specifies length of the array over which energy is calculated; can be used in place of default_value, in which case zeros are assigned as the value(s). An error is generated if both are specified but size != len(default_value). @@ -564,7 +564,7 @@ class Energy(Stability): @check_user_specified def __init__(self, default_variable=None, - size=None, + input_shapes=None, normalize:bool=None, # transfer_fct=None, matrix=None, @@ -574,7 +574,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, metric=ENERGY, matrix=matrix, # transfer_fct=transfer_fct, @@ -588,7 +588,7 @@ class Entropy(Stability): """ Entropy( \ default_variable=None, \ - size=None, \ + input_shapes=None, \ matrix=INVERSE_HOLLOW_MATRIX, \ transfer_fct=None \ normalize=False, \ @@ -607,10 +607,10 @@ class Entropy(Stability): variable : list or 1d array of numbers: Default class_defaults.variable specifies shape and default value of the array for which entropy is calculated. - size : int : None + input_shapes : int : None specifies length of the array over which entropy is calculated; can be used in place of default_value, in which case zeros are assigned as the value(s). An error is generated if both are specified but - size != len(default_value). + input_shapes != len(default_value). matrix : list, np.ndarray, or matrix keyword : default INVERSE_HOLLOW_MATRIX specifies the matrix of recurrent weights; must be a square matrix with the same width as the @@ -644,7 +644,7 @@ class Entropy(Stability): variable : 1d array array for which entropy is calculated. - size : int + input_shapes : int length of array for which energy is calculated. matrix : list, np.ndarray, or matrix keyword diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index c6b60e192ce..14028424f78 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -4338,7 +4338,7 @@ class TransferWithCosts(TransferFunction): """ TransferWithCosts( \ default_variable=None, \ - size=None, \ + input_shapes=None, \ transfer_fct=Line \ enabled_cost_functions=None, \ intensity_fct=Exponential \ @@ -4412,11 +4412,11 @@ class TransferWithCosts(TransferFunction): ` on which costs are calculated. - size : int : None + input_shapes : int : None specifies length of the array for `variable ` used by `function ` and on which costs are calculated; can be used in place of default_value, in which case zeros are assigned as the value(s). An error is generated if both are - specified but size != len(default_value). + specified but input_shapes != len(default_value). transfer_fct : TransferFunction : Linear specifies the primary function, used to generate the value it returns. @@ -4459,7 +4459,7 @@ class TransferWithCosts(TransferFunction): value used by `function `, and on which `intensity ` and associated costs are calculated. - size : int + input_shapes : int length of array for `variable `. intensity : 1 array @@ -4814,7 +4814,7 @@ class Parameters(TransferFunction.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, transfer_fct: Optional[Callable] = None, enabled_cost_functions: Optional[Union[CostFunctions, list]] = None, intensity_cost_fct: Optional[Callable] = None, @@ -4825,11 +4825,11 @@ def __init__(self, owner=None, prefs: Optional[ValidPrefSet] = None): - # if size: + # if input_shapes: # if default_variable is None: - # default_variable = np.zeros(size) - # elif size != len(default_variable): - # raise FunctionError(f"Both {repr(DEFAULT_VARIABLE)} ({default_variable}) and {repr(SIZE)} ({size}) " + # default_variable = np.zeros(input_shapes) + # elif input_shapes != len(default_variable): + # raise FunctionError(f"Both {repr(DEFAULT_VARIABLE)} ({default_variable}) and {repr(SIZE)} ({input_shapes}) " # f"are specified for {self.name} but are {SIZE}!=len({DEFAULT_VARIABLE}).") super().__init__( diff --git a/psyneulink/core/components/functions/userdefinedfunction.py b/psyneulink/core/components/functions/userdefinedfunction.py index 383a0380988..39f10221485 100644 --- a/psyneulink/core/components/functions/userdefinedfunction.py +++ b/psyneulink/core/components/functions/userdefinedfunction.py @@ -144,8 +144,8 @@ class UserDefinedFunction(Function_Base): array([[6]]) Note that the function treats its argument, x, as a 2d array, and accesses its first item for the calculation. - This is because the `variable ` of ``my_mech`` is defined in the **size** argument of - its constructor as having a single item (a 1d array of length 3; (see `size `). In the + This is because the `variable ` of ``my_mech`` is defined in the **input_shapes** argument of + its constructor as having a single item (a 1d array of length 3; (see `input_shapes `). In the following example, a function is defined for a Mechanism in which the variable has two items, that are summed by the function:: @@ -267,7 +267,7 @@ class UserDefinedFunction(Function_Base): >>> L = pnl.Logistic(gain = 2) >>> def my_fct(variable): ... return L(variable) + 2 - >>> my_mech = pnl.ProcessingMechanism(size = 3, function = my_fct) + >>> my_mech = pnl.ProcessingMechanism(input_shapes = 3, function = my_fct) >>> my_mech.execute(input = [1, 2, 3]) #doctest: +SKIP array([[2.88079708, 2.98201379, 2.99752738]]) @@ -280,7 +280,7 @@ class UserDefinedFunction(Function_Base): For example, the following assigns ``my_sinusoidal_fct`` to the `function ` of an OutputPort of ``my_mech``, rather the Mechanism's `function `:: - >>> my_wave_mech = pnl.ProcessingMechanism(size=1, + >>> my_wave_mech = pnl.ProcessingMechanism(input_shapes=1, ... function=pnl.Linear, ... output_ports=[{pnl.NAME: 'SINUSOIDAL OUTPUT', ... pnl.VARIABLE: [(pnl.OWNER_VALUE, 0),pnl.EXECUTION_COUNT], diff --git a/psyneulink/core/components/mechanisms/mechanism.py b/psyneulink/core/components/mechanisms/mechanism.py index 79caf6c97d4..e261ce5eb32 100644 --- a/psyneulink/core/components/mechanisms/mechanism.py +++ b/psyneulink/core/components/mechanisms/mechanism.py @@ -419,7 +419,7 @@ a Mechanism's InputPorts and the items of its `variable `, their size along their outermost dimension (axis 0) must be equal; that is, the number of items in the Mechanism's `variable ` attribute must equal the number of InputPorts in its `input_ports ` attribute. A -Mechanism's constructor does its best to insure this: if its **default_variable** and/or its **size** argument is +Mechanism's constructor does its best to insure this: if its **default_variable** and/or its **input_shapes** argument is specified, it constructs a number of InputPorts (and each with a `value `) corresponding to the items specified for the Mechanism's `variable `, as in the examples below:: @@ -444,7 +444,7 @@ print(my_mech_C.variable) > [array([0, 0]) array([0])] -If both the **default_variable** (or **size**) and **input_ports** arguments are specified, then the number and format +If both the **default_variable** (or **input_shapes**) and **input_ports** arguments are specified, then the number and format of their respective items must be the same (see `Port ` for additional examples of specifying Ports). If InputPorts are added using the Mechanism's `add_ports ` method, then its @@ -478,9 +478,9 @@ ` of the corresponding InputPorts for any that are not explicitly specified in the **input_ports** argument or *INPUT_PORTS* entry (see below). .. -* **size** (int, list or ndarray) -- specifies the number and length of items in the Mechanism's variable, +* **input_shapes** (int, list or ndarray) -- specifies the number and length of items in the Mechanism's variable, if **default_variable** is not specified. For example, the following mechanisms are equivalent:: - T1 = TransferMechanism(size = [3, 2]) + T1 = TransferMechanism(input_shapes = [3, 2]) T2 = TransferMechanism(default_variable = [[0, 0, 0], [0, 0]]) The relationship to any specifications in the **input_ports** argument or *INPUT_PORTS* entry of a **params** dictionary is the same as for the **default_variable** argument, @@ -488,7 +488,7 @@ .. * **input_ports** (list) -- this can be used to explicitly `specify the InputPorts ` created for the Mechanism. Each item must be an `InputPort specification `, and the number - of items must match the number of items in the **default_variable** argument or **size** argument + of items must match the number of items in the **default_variable** argument or **input_shapes** argument if either of those is specified. If the `variable ` and/or `value ` is `explicitly specified for an InputPort ` in the **input_ports** argument or *INPUT_PORTS* entry of a **params** dictionary, it must be compatible with the value of the corresponding @@ -1148,7 +1148,7 @@ class Mechanism_Base(Mechanism): """ Mechanism_Base( \ default_variable=None, \ - size=None, \ + input_shapes=None, \ input_ports, \ function, \ output_ports, \ @@ -1222,21 +1222,21 @@ class Mechanism_Base(Mechanism): of its `function ` if those are not specified. If it is not specified, then a subclass-specific default is assigned (usually [[0]]). - size : int, or Iterable of tuples or ints : default None + input_shapes : int, or Iterable of tuples or ints : default None specifies default_variable as array(s) of zeros if **default_variable** is not passed as an argument; if **default_variable** is specified, it must be equivalent to - **size**. + **input_shapes**. For example, the following Mechanisms are equivalent:: - my_mech = ProcessingMechanism(size = [3, 2]) + my_mech = ProcessingMechanism(input_shapes = [3, 2]) my_mech = ProcessingMechanism(default_variable = [[0, 0, 0], [0, 0]]) - When specified as an iterable, each element of **size** is used + When specified as an iterable, each element of **input_shapes** is used as the size of the corresponding InputPort. input_ports : str, list, dict, or np.ndarray : default None specifies the InputPorts for the Mechanism; if it is not specified, a single InputPort is created using the value of default_variable as its `variable `; if more than one is specified, the number and, if specified, their values must be compatible with any specifications made for - **default_variable** or **size** (see `Mechanism_InputPorts` for additional details). + **default_variable** or **input_shapes** (see `Mechanism_InputPorts` for additional details). input_labels : dict specifies labels (strings) that can be used to specify numeric values as input to the Mechanism; @@ -1698,7 +1698,7 @@ def _parse_output_ports(self, output_ports): @abc.abstractmethod def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_ports=None, input_labels=None, function=None, @@ -1717,7 +1717,7 @@ def __init__(self, NOTES: * Since Mechanism is a subclass of Component, it calls super.__init__ - to validate size and default_variable and param_defaults; + to validate input_shapes and default_variable and param_defaults; it uses INPUT_PORT as the default_variable * registers Mechanism with MechanismRegistry @@ -1761,7 +1761,7 @@ def __init__(self, super(Mechanism_Base, self).__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, param_defaults=params, prefs=prefs, @@ -1794,9 +1794,9 @@ def _parse_arg_variable(self, variable): # Handlers # ------------------------------------------------------------------------------------------------------------------ - def _handle_default_variable(self, default_variable=None, size=None, input_ports=None, function=None, params=None): + def _handle_default_variable(self, default_variable=None, input_shapes=None, input_ports=None, function=None, params=None): """ - Finds whether default_variable can be determined using **default_variable** and **size** + Finds whether default_variable can be determined using **default_variable** and **input_shapes** arguments. Returns @@ -1827,20 +1827,20 @@ def _handle_default_variable(self, default_variable=None, size=None, input_ports if default_variable_from_input_ports is not None: if default_variable is None: - if size is None: + if input_shapes is None: default_variable = default_variable_from_input_ports else: if input_ports_variable_was_specified: - size_variable = self._handle_size(size, None) - if iscompatible(size_variable, default_variable_from_input_ports): + input_shapes_variable = self._handle_input_shapes(input_shapes, None) + if iscompatible(input_shapes_variable, default_variable_from_input_ports): default_variable = default_variable_from_input_ports else: raise MechanismError( f'Default variable for {self.name} determined from the specified input_ports spec ' f'({default_variable_from_input_ports}) is not compatible with the default variable ' - f'determined from size parameter ({size_variable}).') + f'determined from input_shapes parameter ({input_shapes_variable}).') else: - # do not pass input_ports variable as default_variable, fall back to size specification + # do not pass input_ports variable as default_variable, fall back to input_shapes specification pass else: if input_ports_variable_was_specified: @@ -1853,7 +1853,7 @@ def _handle_default_variable(self, default_variable=None, size=None, input_ports # do not pass input_ports variable as default_variable, fall back to default_variable specification pass - return super()._handle_default_variable(default_variable=default_variable, size=size) + return super()._handle_default_variable(default_variable=default_variable, input_shapes=input_shapes) def _handle_arg_input_ports(self, input_ports): """ diff --git a/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py index 4f77c962ef0..549489c5ce6 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py @@ -630,7 +630,7 @@ EID_SIMULATION, FEEDBACK, FUNCTION, GATING_SIGNAL, INIT_EXECUTE_METHOD_ONLY, INTERNAL_ONLY, NAME, \ MECHANISM, MULTIPLICATIVE, MODULATORY_SIGNALS, MONITOR_FOR_CONTROL, MONITOR_FOR_MODULATION, \ OBJECTIVE_MECHANISM, OUTCOME, OWNER_VALUE, PARAMS, PORT_TYPE, PRODUCT, PROJECTION_TYPE, PROJECTIONS, \ - REFERENCE_VALUE, SEPARATE, SIZE, VALUE + REFERENCE_VALUE, SEPARATE, INPUT_SHAPES, VALUE from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.context import Context from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet @@ -1276,7 +1276,7 @@ def _validate_output_ports(self, control): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, monitor_for_control: Optional[Union[Iterable, Mechanism, OutputPort]] = None, objective_mechanism=None, allow_probes: bool = False, @@ -1341,7 +1341,7 @@ def __init__(self, super(ControlMechanism, self).__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, modulation=modulation, params=params, name=name, @@ -1498,9 +1498,9 @@ def _instantiate_objective_mechanism(self, input_ports=None, context=None): # Get size of ObjectiveMechanism's OUTCOME OutputPort, and then append sizes of other any InputPorts passed in outcome_input_port_size = self.objective_mechanism.output_ports[OUTCOME].value.size - outcome_input_port = {SIZE:outcome_input_port_size, - NAME:OUTCOME, - PARAMS:{INTERNAL_ONLY:True}} + outcome_input_port = {INPUT_SHAPES:outcome_input_port_size, + NAME:OUTCOME, + PARAMS:{INTERNAL_ONLY:True}} other_input_port_value_sizes, _ = self._handle_arg_input_ports(other_input_ports) input_port_value_sizes = [outcome_input_port_size] + other_input_port_value_sizes input_ports = [outcome_input_port] + other_input_ports diff --git a/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py index 6df5e05f0be..de5626b8780 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/gating/gatingmechanism.py @@ -257,11 +257,11 @@ class GatingMechanism(ControlMechanism): the default value for each of the GatingMechanism's GatingSignals; its length must equal the number of items specified in the **gate** argument. - size : int, list or 1d np.array of ints + input_shapes : int, list or 1d np.array of ints specifies default_gating_allocation as an array of zeros if **default_gating_allocation** is not passed as an - argument; if **default_gating_allocation** is specified, it takes precedence over the specification of **size**. + argument; if **default_gating_allocation** is specified, it takes precedence over the specification of **input_shapes**. As an example, the following mechanisms are equivalent:: - T1 = TransferMechanism(size = [3, 2]) + T1 = TransferMechanism(input_shapes = [3, 2]) T2 = TransferMechanism(default_variable = [[0, 0, 0], [0, 0]]) monitor_for_gating : List[OutputPort or Mechanism] : default None @@ -308,7 +308,7 @@ class GatingMechanism(ControlMechanism): variable : value, list or ndarray used as the input to the GatingMechanism's `function `. Its format is determined - by the **default_gating_allocation** or **size** argument of the GatingMechanism's constructor (see above), + by the **default_gating_allocation** or **input_shapes** argument of the GatingMechanism's constructor (see above), and is the same format as its `gating_allocation ` (unless a custom `function ` has been assigned). @@ -442,7 +442,7 @@ class Parameters(ControlMechanism.Parameters): @beartype def __init__(self, default_gating_allocation=None, - size=None, + input_shapes=None, monitor_for_gating=None, function=None, default_allocation: Optional[Union[int, float, list, np.ndarray]] = None, @@ -470,7 +470,7 @@ def __init__(self, f"'default_gating_allocation'.") super().__init__(default_variable=default_gating_allocation, - size=size, + input_shapes=input_shapes, monitor_for_control=monitor_for_gating, function=function, default_allocation=default_allocation, diff --git a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py index 7512d990c32..5ad1a3cac55 100644 --- a/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/learning/learningmechanism.py @@ -1091,7 +1091,7 @@ def _parse_error_sources(self, error_sources): def __init__(self, # default_variable:Union[list, np.ndarray], default_variable=None, - size=None, + input_shapes=None, covariates_sources: Optional[Union[InputPort, list]] = None, error_sources: Optional[Union[Mechanism, list]] = None, function=None, @@ -1121,7 +1121,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, covariates_sources=covariates_sources, error_sources=error_sources, function=function, diff --git a/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py b/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py index 2c3a634c416..093c4143d4a 100644 --- a/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/modulatorymechanism.py @@ -194,7 +194,7 @@ class Parameters(Mechanism_Base.Parameters): @check_user_specified def __init__(self, default_variable, - size, + input_shapes, modulation, params, name, @@ -208,7 +208,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, modulation=modulation, params=params, name=name, diff --git a/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py b/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py index 8176a6913f3..d6cbd10861f 100644 --- a/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py +++ b/psyneulink/core/components/mechanisms/processing/compositioninterfacemechanism.py @@ -180,7 +180,7 @@ class Parameters(ProcessingMechanism_Base.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_ports: Optional[Union[Iterable, Mechanism, OutputPort, InputPort]] = None, function=None, composition=None, @@ -197,7 +197,7 @@ def __init__(self, OUTPUT_PORTS: set() } super(CompositionInterfaceMechanism, self).__init__(default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, function=function, params=params, diff --git a/psyneulink/core/components/mechanisms/processing/defaultprocessingmechanism.py b/psyneulink/core/components/mechanisms/processing/defaultprocessingmechanism.py index 3b4255bdfe8..88e3c29769e 100644 --- a/psyneulink/core/components/mechanisms/processing/defaultprocessingmechanism.py +++ b/psyneulink/core/components/mechanisms/processing/defaultprocessingmechanism.py @@ -59,7 +59,7 @@ class Parameters(Mechanism_Base.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, params=None, name=None, prefs: Optional[ValidPrefSet] = None, @@ -69,14 +69,14 @@ def __init__(self, """Add Linear as default function, assign default name, and call super.__init__ :param default_variable: (value) - :param size: (int or list/array of ints) + :param input_shapes: (int or list/array of ints) :param params: (dict) :param name: (str) :param prefs: (PreferenceSet) """ super(DefaultProcessingMechanism_Base, self).__init__(default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, params=params, name=name, diff --git a/psyneulink/core/components/mechanisms/processing/integratormechanism.py b/psyneulink/core/components/mechanisms/processing/integratormechanism.py index e6cf9a5e20c..e87174cd023 100644 --- a/psyneulink/core/components/mechanisms/processing/integratormechanism.py +++ b/psyneulink/core/components/mechanisms/processing/integratormechanism.py @@ -44,7 +44,7 @@ >>> my_time_averaging_mechanism = pnl.IntegratorMechanism(function=pnl.AdaptiveIntegrator(rate=0.5)) The **default_variable** argument specifies the format of its input (i.e., whether it is a single scalar or an -array), as well as the value to use if none is provided when Mechanism is executed. Alternatively, the **size** +array), as well as the value to use if none is provided when Mechanism is executed. Alternatively, the **input_shapes** argument can be used to specify the length of the array, in which case it will be initialized with all zeros. .. _IntegratorMechanism_Structure: @@ -67,11 +67,11 @@ When an IntegratorMechanism is executed, it carries out the specified integration, and assigns the result to the `value ` of its `primary OutputPort `. For the default function -(`IntegratorFunction`), if the value specified for **default_variable** is a list or array, or **size** is greater +(`IntegratorFunction`), if the value specified for **default_variable** is a list or array, or **input_shapes** is greater than 1, each element of the array is independently integrated. If its `rate ` parameter is a single value, that rate is used for integrating each element. If the `rate ` parameter is a list or array, then each element is used as the rate for the corresponding element of the input (in this case, `rate -` must be the same length as the value specified for **default_variable** or **size**). +` must be the same length as the value specified for **default_variable** or **input_shapes**). Integration can be reset to the value of its `function `\\s `initializer by setting its `reset ` parameter to a non-zero value, as described below. @@ -204,7 +204,7 @@ class Parameters(ProcessingMechanism_Base.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_ports:Optional[Union[list, dict]]=None, function=None, reset_default=0, @@ -217,7 +217,7 @@ def __init__(self, """ super(IntegratorMechanism, self).__init__(default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, reset_default=reset_default, params=params, @@ -232,7 +232,7 @@ def __init__(self, # def _parse_function_variable(self, variable, context=None, context=None): # super()._parse_function_variable(variable, context, context) - def _handle_default_variable(self, default_variable=None, size=None, input_ports=None, function=None, params=None): + def _handle_default_variable(self, default_variable=None, input_shapes=None, input_ports=None, function=None, params=None): """If any parameters with len>1 have been specified for the Mechanism's function, and Mechanism's default_variable has not been specified, reshape Mechanism's variable to match function's, but make sure function's has the same outer dimensionality as the Mechanism's @@ -281,7 +281,7 @@ def _handle_default_variable(self, default_variable=None, size=None, input_ports # as the reshaping of the function's variable will be taken care of in _instantiate_function return super()._handle_default_variable(default_variable=variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, function=function, params=params) diff --git a/psyneulink/core/components/mechanisms/processing/objectivemechanism.py b/psyneulink/core/components/mechanisms/processing/objectivemechanism.py index 1599b540756..44abba283cf 100644 --- a/psyneulink/core/components/mechanisms/processing/objectivemechanism.py +++ b/psyneulink/core/components/mechanisms/processing/objectivemechanism.py @@ -73,7 +73,7 @@ monitor the specified OutputPort. In general, the `value ` of each specified OutputPort determines the format of the `variable ` of the InputPort that is created for it by the ObjectiveMechanism. However, this can be overridden using the ObjectiveMechanism's `default_variable ` -or `size ` attributes (see `Mechanism InputPort specification +or `input_shapes ` attributes (see `Mechanism InputPort specification `), or by specifying a Projection from the OutputPort to the InputPort (see `Input Source Specification `). If an item in the **monitor** argument specifies an InputPort for the ObjectiveMechanism, but not the OutputPort to @@ -154,7 +154,7 @@ By default, the format of the `variable ` for each InputPort is determined by the `value ` of the monitored OutputPort(s) to which it corresponds. However, if either the -**default_variable** or **size** argument is specified in an Objective Mechanism's constructor, or a `variable +**default_variable** or **input_shapes** argument is specified in an Objective Mechanism's constructor, or a `variable ` is `specified for an InputPort ` for one or more of the items in its **monitor** argument, then that is used as the format for the corresponding InputPort(s). This can be used to transform the `value ` of a monitored OutputPort into different form for the `variable @@ -567,7 +567,7 @@ class Parameters(ProcessingMechanism_Base.Parameters): def __init__(self, monitor=None, default_variable=None, - size=None, + input_shapes=None, function=None, output_ports: Optional[Union[str, Iterable]] = None, params=None, @@ -590,7 +590,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, monitor=monitor, output_ports=output_ports, function=function, diff --git a/psyneulink/core/components/mechanisms/processing/processingmechanism.py b/psyneulink/core/components/mechanisms/processing/processingmechanism.py index bceadd645b3..e22ae1f5784 100644 --- a/psyneulink/core/components/mechanisms/processing/processingmechanism.py +++ b/psyneulink/core/components/mechanisms/processing/processingmechanism.py @@ -64,7 +64,7 @@ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As with any `Mechanism`, the number of InputPorts can be specified using the **input_ports**, **default_variable** or -**size** arguments of the constructor (see `Mechanism_InputPorts`), and OutputPorts can be specified using the +**input_shapes** arguments of the constructor (see `Mechanism_InputPorts`), and OutputPorts can be specified using the **output_ports** argument (see `Mechanism_OutputPorts`). These can be used to configure processing in a variety of ways. Some common ones are described below (also see `ProcessingMechanism_Examples`). @@ -270,7 +270,7 @@ class ProcessingMechanism_Base(Mechanism_Base): @check_user_specified def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_ports=None, function=None, output_ports=None, @@ -283,7 +283,7 @@ def __init__(self, """Abstract class for processing mechanisms :param variable: (value) - :param size: (int or list/array of ints) + :param input_shapes: (int or list/array of ints) :param params: (dict) :param name: (str) :param prefs: (PreferenceSet) @@ -291,7 +291,7 @@ def __init__(self, """ super().__init__(default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, function=function, output_ports=output_ports, @@ -379,7 +379,7 @@ class ProcessingMechanism(ProcessingMechanism_Base): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_ports:Optional[Union[Iterable, Mechanism, OutputPort, InputPort]]=None, output_ports:Optional[Union[str, Iterable]]=None, function=None, @@ -388,7 +388,7 @@ def __init__(self, prefs: Optional[ValidPrefSet] = None, **kwargs): super(ProcessingMechanism, self).__init__(default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, function=function, output_ports=output_ports, diff --git a/psyneulink/core/components/mechanisms/processing/transfermechanism.py b/psyneulink/core/components/mechanisms/processing/transfermechanism.py index 61268e3d066..842e72e40c3 100644 --- a/psyneulink/core/components/mechanisms/processing/transfermechanism.py +++ b/psyneulink/core/components/mechanisms/processing/transfermechanism.py @@ -110,7 +110,7 @@ ~~~~~~~~~~~~~ By default, a TransferMechanism has a single `InputPort`; however, more than one can be specified -using the **default_variable** or **size** arguments of its constructor (see `Mechanism`). The `value +using the **default_variable** or **input_shapes** arguments of its constructor (see `Mechanism`). The `value ` of each InputPort is used as a separate item of the Mechanism's `variable `, and transformed independently by its `function `. @@ -417,12 +417,12 @@ `value ` and the `value ` of its `output_ports ` without using its `integrator_function `, as in the following example:: - # >>> my_mech = pnl.TransferMechanism(size=2) + # >>> my_mech = pnl.TransferMechanism(input_shapes=2) # >>> my_mech.execute([0.5, 1]) # array([[0.5, 1. ]]) >>> my_logistic_tm = pnl.TransferMechanism(function=pnl.Logistic, - ... size=3) + ... input_shapes=3) >>> my_logistic_tm.execute([-2.0, 0, 2.0]) array([[0.11920292, 0.5 , 0.88079708]]) @@ -431,7 +431,7 @@ the value is simply added to the result, as shown in the example below, that uses the TransferMechanism's default `function `, `Linear`:: - >>> my_linear_tm = pnl.TransferMechanism(size=3, + >>> my_linear_tm = pnl.TransferMechanism(input_shapes=3, ... noise=2.0) >>> my_linear_tm.execute([1.0, 1.0, 1.0]) array([[3., 3., 3.]]) @@ -452,7 +452,7 @@ is specified, it is applied to all elements; however, on each execution, the function is executed indpendently for each element. This is shown below using the `NormalDist` function:: - >>> my_linear_tm = pnl.TransferMechanism(size=3, + >>> my_linear_tm = pnl.TransferMechanism(input_shapes=3, ... noise=pnl.NormalDist) >>> my_linear_tm.execute([1.0, 1.0, 1.0]) array([[2.1576537 , 1.60782117, 0.75840058]]) @@ -466,7 +466,7 @@ can also be used in a list to specify **noise**, together with other functions or with numeric values; however, when used in a list, functions must be instances, as shown below:: - >>> my_linear_tm = pnl.TransferMechanism(size=3, + >>> my_linear_tm = pnl.TransferMechanism(input_shapes=3, ... noise=[pnl.NormalDist(), pnl.UniformDist(), 3.0]) >>> my_linear_tm.execute([1.0, 1.0, 1.0]) array([[-0.22503678, 1.36995517, 4. ]]) @@ -509,7 +509,7 @@ results that begin close to its `initializer ` and asymptotically approach the value of the current input, which in this example is [1.0, 1.0, 1,0] for each execution:: - >>> my_linear_tm = pnl.TransferMechanism(size=3, + >>> my_linear_tm = pnl.TransferMechanism(input_shapes=3, ... function=pnl.Linear, ... integrator_mode=True, ... initial_value=np.array([[0.1, 0.5, 0.9]]), @@ -662,7 +662,7 @@ and the scalar returned is compared to **termination_threshold** using the comparison operator specified by **termination_comparison_op**. Execution continues until this returns True, as in the following example:: - >>> my_mech = pnl.TransferMechanism(size=2, + >>> my_mech = pnl.TransferMechanism(input_shapes=2, ... integrator_mode=True, ... termination_measure=max, ... termination_threshold=0.9, @@ -687,7 +687,7 @@ ` is automatically set to *GREATER_THAN_OR_EQUAL*). For example, ``my_mech`` is configured below to execute at least twice per trial:: - >>> my_mech = pnl.TransferMechanism(size=2, + >>> my_mech = pnl.TransferMechanism(input_shapes=2, ... integrator_mode=True, ... termination_measure=TimeScale.TRIAL, ... termination_threshold=2) @@ -713,20 +713,20 @@ which feature of the stimulus should be attended) before a stimulus is presented, and then allowing that Mechanism to continue to integrate the instruction and impact stimulus processing once the stimulus is presented:: - >>> stim_input = pnl.ProcessingMechanism(size=2) - >>> stim_percept = pnl.TransferMechanism(size=2, function=pnl.Logistic) - >>> decision = pnl.TransferMechanism(name='Decision', size=2, + >>> stim_input = pnl.ProcessingMechanism(input_shapes=2) + >>> stim_percept = pnl.TransferMechanism(input_shapes=2, function=pnl.Logistic) + >>> decision = pnl.TransferMechanism(name='Decision', input_shapes=2, ... integrator_mode=True, ... execute_until_finished=False, ... termination_threshold=0.65, ... termination_measure=max, ... termination_comparison_op=pnl.GREATER_THAN) - >>> instruction_input = pnl.ProcessingMechanism(size=2, function=pnl.Linear(slope=10)) - >>> attention = pnl.LCAMechanism(name='Attention', size=2, function=pnl.Logistic, + >>> instruction_input = pnl.ProcessingMechanism(input_shapes=2, function=pnl.Linear(slope=10)) + >>> attention = pnl.LCAMechanism(name='Attention', input_shapes=2, function=pnl.Logistic, ... leak=8, competition=8, self_excitation=0, time_step_size=.1, ... termination_threshold=3, ... termination_measure = pnl.TimeScale.TRIAL) - >>> response = pnl.ProcessingMechanism(name='Response', size=2) + >>> response = pnl.ProcessingMechanism(name='Response', input_shapes=2) ... >>> comp = pnl.Composition() >>> comp.add_linear_processing_pathway([stim_input, [[1,-1],[-1,1]], stim_percept, decision, response]) #doctest: +SKIP @@ -1287,7 +1287,7 @@ def _validate_termination_comparison_op(self, termination_comparison_op): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_ports: Optional[Union[Iterable, Mechanism, OutputPort, InputPort]] = None, function=None, noise=None, @@ -1320,7 +1320,7 @@ def __init__(self, super(TransferMechanism, self).__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, output_ports=output_ports, initial_value=initial_value, diff --git a/psyneulink/core/components/ports/inputport.py b/psyneulink/core/components/ports/inputport.py index 07b90a33684..f226a1bdfdb 100644 --- a/psyneulink/core/components/ports/inputport.py +++ b/psyneulink/core/components/ports/inputport.py @@ -59,10 +59,10 @@ `. InputPorts can also be specified in the **input_ports** argument of a Mechanism's constructor (see `below `). -The `variable ` of an InputPort can be specified using the **variable** or **size** arguments of -its constructor. It can also be specified using the **projections** argument, if neither **variable** nor **size** is +The `variable ` of an InputPort can be specified using the **variable** or **input_shapes** arguments of +its constructor. It can also be specified using the **projections** argument, if neither **variable** nor **input_shapes** is specified. The **projections** argument is used to `specify Projections ` to the InputPort. If -neither the **variable** nor **size** arguments is specified, then the value of the `Projections(s) ` or +neither the **variable** nor **input_shapes** arguments is specified, then the value of the `Projections(s) ` or their `sender `\\s (all of which must be the same length) is used to determine the `variable ` of the InputPort. @@ -590,7 +590,7 @@ INPUT_PORT, INPUT_PORTS, INPUT_PORT_PARAMS, \ LEARNING_SIGNAL, MAPPING_PROJECTION, MATRIX, NAME, OPERATION, OUTPUT_PORT, OUTPUT_PORTS, OWNER, \ PARAMS, PROJECTIONS, REFERENCE_VALUE, \ - SENDER, SHADOW_INPUTS, SHADOW_INPUT_NAME, SIZE, PORT_TYPE, SUM, VALUE, VARIABLE, WEIGHT + SENDER, SHADOW_INPUTS, SHADOW_INPUT_NAME, INPUT_SHAPES, PORT_TYPE, SUM, VALUE, VARIABLE, WEIGHT from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.preferences.preferenceset import PreferenceLevel @@ -681,7 +681,7 @@ class InputPort(Port_Base): `GatingProjection(s) ` to be received by the InputPort, and that are listed in its `path_afferents ` and `mod_afferents ` attributes, respectively (see `InputPort_Compatability_and_Constraints` for additional details). If **projections** but - neither **variable** nor **size** are specified, then the `value ` of the Projection(s) + neither **variable** nor **input_shapes** are specified, then the `value ` of the Projection(s) or their `senders ` specified in **projections** argument are used to determine the InputPort's `variable `. @@ -701,7 +701,7 @@ class InputPort(Port_Base): variable : value, list or np.ndarray the template for the `value ` of each Projection that the InputPort receives, each of which must match the format (number and types of elements) of the InputPort's - `variable `. If neither the **variable** or **size** argument is specified, and + `variable `. If neither the **variable** or **input_shapes** argument is specified, and **projections** is specified, then `variable ` is assigned the `value ` of the Projection(s) or its `sender `. @@ -878,7 +878,7 @@ def __init__(self, owner=None, reference_value=None, variable=None, - size=None, + input_shapes=None, default_input=None, function=None, projections=None, @@ -892,8 +892,8 @@ def __init__(self, context=None, **kwargs): - if variable is None and size is None and projections is not None: - variable = self._assign_variable_from_projection(variable, size, projections) + if variable is None and input_shapes is None and projections is not None: + variable = self._assign_variable_from_projection(variable, input_shapes, projections) # If combine argument is specified, save it along with any user-specified function for _validate_params() if combine: @@ -922,7 +922,7 @@ def __init__(self, super(InputPort, self).__init__( owner, variable=variable, - size=size, + input_shapes=input_shapes, projections=projections, function=function, weight=weight, @@ -938,7 +938,7 @@ def __init__(self, if self.name is self.componentName or self.componentName + '-' in self.name: self._assign_default_port_Name() - def _assign_variable_from_projection(self, variable, size, projections): + def _assign_variable_from_projection(self, variable, input_shapes, projections): """Assign variable to value of Projection in projections """ from psyneulink.core.components.projections.projection import \ @@ -1153,16 +1153,16 @@ def _parse_port_specific_specs(self, owner, port_dict, port_specific_spec, conte # if MECHANISM in port_specific_spec: # if OUTPUT_PORTS in port_specific_spec - if any(spec in port_specific_spec for spec in {SIZE, COMBINE}): + if any(spec in port_specific_spec for spec in {INPUT_SHAPES, COMBINE}): - if SIZE in port_specific_spec: + if INPUT_SHAPES in port_specific_spec: if (VARIABLE in port_specific_spec or - any(key in port_dict and port_dict[key] is not None for key in {VARIABLE, SIZE})): + any(key in port_dict and port_dict[key] is not None for key in {VARIABLE, INPUT_SHAPES})): raise InputPortError(f"PROGRAM ERROR: SIZE specification found in port_specific_spec dict " f"for {self.__name__} specification of {owner.name} when SIZE or VARIABLE " f"is already present in its port_specific_spec dict or port_dict.") - port_dict.update({VARIABLE:np.zeros(port_specific_spec[SIZE])}) - del port_specific_spec[SIZE] + port_dict.update({VARIABLE:np.zeros(port_specific_spec[INPUT_SHAPES])}) + del port_specific_spec[INPUT_SHAPES] if COMBINE in port_specific_spec: fct_err = None @@ -1395,7 +1395,7 @@ def _port_spec_allows_override_variable(spec): Returns ------- True - if **spec** outlines a spec for creating an InputPort whose variable can be - overridden by a default_variable or size argument + overridden by a default_variable or input_shapes argument False - otherwise ex: specifying an InputPort with a Mechanism allows overriding diff --git a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py index 423f1e5662a..4fab5f23733 100644 --- a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py @@ -795,7 +795,7 @@ def __init__(self, owner=None, reference_value=None, default_allocation=None, - size=None, + input_shapes=None, transfer_function=None, cost_options: Optional[Union[CostFunctions, list]] = None, intensity_cost_function:Optional[Callable] = None, @@ -857,7 +857,7 @@ def __init__(self, owner=owner, reference_value=reference_value, default_allocation=default_allocation, - size=size, + input_shapes=input_shapes, transfer_function=transfer_function, modulation=modulation, modulates=control, diff --git a/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py b/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py index 74494a578ad..2e76d8381e8 100644 --- a/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/gatingsignal.py @@ -182,9 +182,9 @@ *MULTIPLICATIVE_PARAM* of an InputPort's `function `. In the example, this is changed so that it *adds* the `value ` of the `GatingSignal` to the `value ` of each InputPort:: - >>> my_input_layer = pnl.TransferMechanism(size=3) - >>> my_hidden_layer = pnl.TransferMechanism(size=5) - >>> my_output_layer = pnl.TransferMechanism(size=2) + >>> my_input_layer = pnl.TransferMechanism(input_shapes=3) + >>> my_hidden_layer = pnl.TransferMechanism(input_shapes=5) + >>> my_output_layer = pnl.TransferMechanism(input_shapes=2) >>> my_gating_mechanism = pnl.GatingMechanism(gating_signals=[{pnl.NAME: 'GATE_ALL', ... pnl.PROJECTIONS: [my_input_layer, ... my_hidden_layer, @@ -422,7 +422,7 @@ def __init__(self, owner=None, reference_value=None, default_allocation=defaultGatingAllocation, - size=None, + input_shapes=None, transfer_function=None, modulation:Optional[str]=None, gate=None, @@ -466,7 +466,7 @@ def __init__(self, super().__init__(owner=owner, reference_value=reference_value, default_allocation=default_allocation, - size=size, + input_shapes=input_shapes, modulation=modulation, control=gate, params=params, diff --git a/psyneulink/core/components/ports/modulatorysignals/learningsignal.py b/psyneulink/core/components/ports/modulatorysignals/learningsignal.py index b4d74b151eb..6429c510f8b 100644 --- a/psyneulink/core/components/ports/modulatorysignals/learningsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/learningsignal.py @@ -357,7 +357,7 @@ def __init__(self, owner=None, reference_value=None, variable=None, - size=None, + input_shapes=None, index=PRIMARY, assign=None, function=None, @@ -378,7 +378,7 @@ def __init__(self, super().__init__(owner=owner, reference_value=reference_value, variable=variable, - size=size, + input_shapes=input_shapes, modulation=modulation, index=index, assign=None, diff --git a/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py b/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py index bdfafa0b7bc..de87ee2595e 100644 --- a/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/modulatorysignal.py @@ -563,7 +563,7 @@ class Parameters(OutputPort.Parameters): @check_user_specified def __init__(self, owner=None, - size=None, + input_shapes=None, reference_value=None, default_allocation=defaultModulatoryAllocation, function=None, @@ -601,7 +601,7 @@ def __init__(self, super().__init__(owner=owner, reference_value=reference_value, variable=default_allocation, - size=size, + input_shapes=input_shapes, projections=modulates, index=index, assign=assign, diff --git a/psyneulink/core/components/ports/outputport.py b/psyneulink/core/components/ports/outputport.py index fa46d5a54e6..968d3273c33 100644 --- a/psyneulink/core/components/ports/outputport.py +++ b/psyneulink/core/components/ports/outputport.py @@ -916,7 +916,7 @@ def __init__(self, owner=None, reference_value=None, variable=None, - size=None, + input_shapes=None, function=None, projections=None, params=None, @@ -971,7 +971,7 @@ def __init__(self, super().__init__( owner, variable=variable, - size=size, + input_shapes=input_shapes, projections=projections, params=params, name=name, diff --git a/psyneulink/core/components/ports/parameterport.py b/psyneulink/core/components/ports/parameterport.py index b46b11191b9..10994e8acc6 100644 --- a/psyneulink/core/components/ports/parameterport.py +++ b/psyneulink/core/components/ports/parameterport.py @@ -187,7 +187,7 @@ >>> import psyneulink as pnl >>> my_mechanism = pnl.RecurrentTransferMechanism( - ... size=5, + ... input_shapes=5, ... noise=pnl.ControlSignal(), ... function=pnl.Logistic( ... gain=(0.5, pnl.ControlSignal), @@ -198,7 +198,7 @@ default noise value, why are we using a ControlSignal here?? COMMENT -The first argument of the constructor for the Mechanism specifies its `size ` parameter by +The first argument of the constructor for the Mechanism specifies its `input_shapes ` parameter by directly assigning a value to it. The second specifies the `noise ` parameter by assigning a default `ControlSignal`; this will use the default value of the `noise ` attribute. The **function** argument is specified using the constructor for @@ -700,7 +700,7 @@ def __init__(self, owner, reference_value=None, variable=None, - size=None, + input_shapes=None, function=None, projections=None, params=None, @@ -726,7 +726,7 @@ def __init__(self, # Note: pass name of Mechanism (to override assignment of componentName in super.__init__) super(ParameterPort, self).__init__(owner, variable=variable, - size=size, + input_shapes=input_shapes, projections=projections, function=function, params=params, diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index a3d23e6da86..8e92734b6ba 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -1019,7 +1019,7 @@ class Parameters(Port.Parameters): def __init__(self, owner: Union[Mechanism, Projection], variable=None, - size=None, + input_shapes=None, projections=None, function=None, params=None, @@ -1039,9 +1039,9 @@ def __init__(self, - variable (value): value of the Port: must be list or tuple of numbers, or a number (in which case it will be converted to a single-item list) must match input and output of Port's _update method, and any sending or receiving projections - - size (int or array/list of ints): + - input_shapes (int or array/list of ints): Sets variable to be array(s) of zeros, if **variable** is not specified as an argument; - if **variable** is specified, it takes precedence over the specification of **size**. + if **variable** is specified, it takes precedence over the specification of **input_shapes**. - params (dict): + if absent, implements default Port determined by PROJECTION_TYPE param + if dict, can have the following entries: @@ -1100,7 +1100,7 @@ def __init__(self, # VALIDATE VARIABLE, PARAM_SPECS, AND INSTANTIATE self.function super(Port_Base, self).__init__( default_variable=variable, - size=size, + input_shapes=input_shapes, function=function, projections=projections, param_defaults=params, diff --git a/psyneulink/core/components/shellclasses.py b/psyneulink/core/components/shellclasses.py index f3865cc1dbf..b39e2de826b 100644 --- a/psyneulink/core/components/shellclasses.py +++ b/psyneulink/core/components/shellclasses.py @@ -73,14 +73,14 @@ class Mechanism(ShellClass): @check_user_specified def __init__(self, default_variable=None, - size=None, + input_shapes=None, function=None, param_defaults=None, name=None, prefs=None, **kwargs): super().__init__(default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, param_defaults=param_defaults, name=name, diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 8ceb616d9cd..3ea74f1719a 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -1383,11 +1383,11 @@ >>> B = ProcessingMechanism(name='B', default_variable=[0,0,0]) >>> inner_nested_comp = Composition(nodes=[A, B]) - >>> C = ComparatorMechanism(name='C', size=3) + >>> C = ComparatorMechanism(name='C', input_shapes=3) >>> nested_comp_1 = Composition(nodes=[C, inner_nested_comp]) - >>> D = ComparatorMechanism(name='D', size=3) - >>> E = ComparatorMechanism(name='E', size=3) + >>> D = ComparatorMechanism(name='D', input_shapes=3) + >>> E = ComparatorMechanism(name='E', input_shapes=3) >>> nested_comp_2 = Composition([D, E]) >>> F = ComparatorMechanism(name='F') diff --git a/psyneulink/core/compositions/showgraph.py b/psyneulink/core/compositions/showgraph.py index 29177bd5398..2bb18641d9e 100644 --- a/psyneulink/core/compositions/showgraph.py +++ b/psyneulink/core/compositions/showgraph.py @@ -134,16 +134,16 @@ | >>> from psyneulink import * | .. figure:: _static/Composition_show_graph_basic_fig.svg | | >>> a = ProcessingMechanism( | | | name='A', | | -| ... size=3, | | +| ... input_shapes=3, | | | ... output_ports=[RESULT, MEAN] | | | ... ) | | | >>> b = ProcessingMechanism( | | | ... name='B', | | -| ... size=5 | | +| ... input_shapes=5 | | | ... ) | | | >>> c = ProcessingMechanism( | | | ... name='C', | | -| ... size=2, | | +| ... input_shapes=2, | | | ... function=Logistic(gain=pnl.CONTROL) | | | ... ) | | | >>> comp = Composition( | | diff --git a/psyneulink/core/globals/keywords.py b/psyneulink/core/globals/keywords.py index 56616e4c1a4..6d45edb8487 100644 --- a/psyneulink/core/globals/keywords.py +++ b/psyneulink/core/globals/keywords.py @@ -118,7 +118,7 @@ 'RESET_STATEFUL_FUNCTION_WHEN', 'RELU_FUNCTION', 'REST', 'RESULT', 'RESULT', 'ROLES', 'RL_FUNCTION', 'RUN', 'SAMPLE', 'SAVE_ALL_VALUES_AND_POLICIES', 'SCALAR', 'SCALE', 'SCHEDULER', 'SELF', 'SENDER', 'SEPARATE', 'SEPARATOR_BAR', 'SHADOW_INPUT_NAME', 'SHADOW_INPUTS', 'SIMPLE', 'SIMPLE_INTEGRATOR_FUNCTION', 'SIMULATIONS', - 'SINGLE', 'SINGLETON', 'SIZE', 'SLOPE', 'SOFT_CLAMP', 'SOFTMAX_FUNCTION', 'SOURCE', 'STABILITY_FUNCTION', + 'SINGLE', 'SINGLETON', 'INPUT_SHAPES', 'SLOPE', 'SOFT_CLAMP', 'SOFTMAX_FUNCTION', 'SOURCE', 'STABILITY_FUNCTION', 'STANDARD_ARGS', 'STANDARD_DEVIATION', 'STANDARD_OUTPUT_PORTS', 'STORE', 'SUBTRACTION', 'SUM', 'TARGET', 'TARGET_MECHANISM', 'TARGET_LABELS_DICT', 'TERMINAL', 'TARGETS', 'TERMINATION_MEASURE', 'TERMINATION_THRESHOLD', 'TERMINATION_COMPARISION_OP', 'TERSE', 'TEXT', 'THRESHOLD', @@ -918,7 +918,7 @@ class Loss(Enum): MEAN = 'MEAN' MEDIAN = 'MEDIAN' MECHANISM_VALUE = 'MECHANISM_VALUE' -SIZE = 'size' +INPUT_SHAPES = 'input_shapes' K_VALUE = 'k_value' RATIO = 'ratio' diff --git a/psyneulink/core/globals/log.py b/psyneulink/core/globals/log.py index 6d2cce9dde6..5585c4aff39 100644 --- a/psyneulink/core/globals/log.py +++ b/psyneulink/core/globals/log.py @@ -167,8 +167,8 @@ `MappingProjection` from the first to the second:: # Create a Process with two TransferMechanisms, and get a reference for the Projection created between them: - >>> my_mech_A = pnl.TransferMechanism(name='mech_A', size=2) - >>> my_mech_B = pnl.TransferMechanism(name='mech_B', size=3) + >>> my_mech_A = pnl.TransferMechanism(name='mech_A', input_shapes=2) + >>> my_mech_B = pnl.TransferMechanism(name='mech_B', input_shapes=3) >>> my_composition = pnl.Composition(pathways=[my_mech_A, my_mech_B]) >>> proj_A_to_B = my_mech_B.path_afferents[0] diff --git a/psyneulink/core/globals/parameters.py b/psyneulink/core/globals/parameters.py index e2204c6ecdd..1d2bfeae32d 100644 --- a/psyneulink/core/globals/parameters.py +++ b/psyneulink/core/globals/parameters.py @@ -148,7 +148,7 @@ def _modulatory_mechanism_costs_getter(owning_component=None, context=None): def _recurrent_transfer_mechanism_matrix_setter(value, owning_component=None, context=None): try: - value = get_matrix(value, owning_component.size[0], owning_component.size[0]) + value = get_matrix(value, owning_component.input_shapes[0], owning_component.input_shapes[0]) except AttributeError: pass diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py index c7b079750f1..45f60f1c837 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/autoassociativelearningmechanism.py @@ -320,7 +320,7 @@ class Parameters(LearningMechanism.Parameters): @beartype def __init__(self, default_variable: Union[list, np.ndarray], - size=None, + input_shapes=None, function: Optional[Callable] = None, learning_signals: Optional[list] = None, modulation: Optional[str] = None, @@ -344,7 +344,7 @@ def __init__(self, # self._learning_rate = learning_rate super().__init__(default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, modulation=modulation, learning_rate=learning_rate, diff --git a/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py b/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py index 8100535b78b..924ad0c5736 100644 --- a/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/learning/kohonenlearningmechanism.py @@ -321,7 +321,7 @@ class Parameters(LearningMechanism.Parameters): @beartype def __init__(self, default_variable: Union[list, np.ndarray], - size=None, + input_shapes=None, matrix: Optional[ParameterPort] = None, function: Optional[Callable] = None, learning_signals: Optional[list] = None, @@ -345,7 +345,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, modulation=modulation, learning_rate=learning_rate, diff --git a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py index b210657a8d8..86358052b65 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py @@ -767,7 +767,7 @@ class Parameters(ProcessingMechanism.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_format: Optional[Literal['SCALAR', 'ARRAY', 'VECTOR']] = None, function=None, input_ports=None, @@ -799,7 +799,7 @@ def __init__(self, # These are created here rather than as StandardOutputPorts # since they require input_format==ARRAY to be meaningful if input_format in {ARRAY, VECTOR}: - size=1 # size of variable for DDM Mechanism + input_shapes=1 # size of variable for DDM Mechanism input_ports = [ {NAME:'ARRAY', VARIABLE: np.array([[0.0, 0.0]]), @@ -848,7 +848,7 @@ def __init__(self, # IMPLEMENTATION NOTE: this manner of setting default_variable works but is idiosyncratic # compared to other mechanisms: see TransferMechanism.py __init__ function for a more normal example. - if default_variable is None and size is None: + if default_variable is None and input_shapes is None: try: default_variable = params[FUNCTION_PARAMS][STARTING_VALUE] if not is_numeric(default_variable): @@ -859,7 +859,7 @@ def __init__(self, pass # # Conflict with above - # self.size = size + # self.input_shapes = input_shapes # New (1/19/2021) default behaviour of DDM mechanism is to execute until finished. That # is, it should execute until it reaches its threshold. @@ -882,7 +882,7 @@ def __init__(self, params=params, name=name, prefs=prefs, - size=size, + input_shapes=input_shapes, **kwargs), self._instantiate_plotting_functions() @@ -964,7 +964,7 @@ def _validate_variable(self, variable, context=None): raise DDMError("Length of input to DDM ({}) is greater than 1, implying there are multiple " "input ports, which is currently not supported in DDM, but may be supported" " in the future under a multi-process DDM. Please use a single numeric " - "item as the default_variable, or use size = 1.".format(variable)) + "item as the default_variable, or use input_shapes = 1.".format(variable)) # # MODIFIED 6/28/17 (CW): changed len(variable) > 1 to len(variable[0]) > 1 # # if not isinstance(variable, numbers.Number) and len(variable[0]) > 1: # if not is_numeric(variable) and len(variable[0]) > 1: diff --git a/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py b/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py index ab53a4aff54..83673854297 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/episodicmemorymechanism.py @@ -77,9 +77,9 @@ .. _EpisodicMemoryMechanism_Creation_Default_Variable_and_Size: - * **default_variable** or **size** -- these are specified in the standard way that the `variable + * **default_variable** or **input_shapes** -- these are specified in the standard way that the `variable ` is specified for any `Component` (see `default_variable `, - `size `, respectively); the specified value is passed to the constructor for the + `input_shapes `, respectively); the specified value is passed to the constructor for the EpisodicMemoryMechanism's `function `), which determines the shape of an entry in `memory `; the `memory ` itself remains empty until the Mechanism is executed and an item is stored. @@ -108,7 +108,7 @@ of the entry stored in `memory `, and used to retrieve one similar to it. By default, `input_port ` are named *FIELD_n_INPUT*, where "n" is replaced by the index of each field; however, they can be named explicitly by specifying a list of strings in the **input_ports** argument of -the constructor; the number of these must equal the number of fields specified in **default_variable** or **size**. +the constructor; the number of these must equal the number of fields specified in **default_variable** or **input_shapes**. .. _EpisodicMemoryMechanism_Creation_Function_Parameters: @@ -155,7 +155,7 @@ .. technical_note:: The shape of an entry in `memory ` is determined by the shape of the Mechanism's - `variable `. specified in the **default_variable** or **size** arguments of its constructor + `variable `. specified in the **default_variable** or **input_shapes** arguments of its constructor (see `EpisodicMemoryMechanism_Creation`). Each item of `variable ` corresponds to a field. Both `memory ` and all entries are stored in the EpisodicMemoryMechanism's `function ` as np.ndarrays, the dimensionality of which is determined by the shape of an @@ -176,7 +176,7 @@ ` of that function. By default InputPorts are named *FIELD_n_INPUT* (see `EpisodicMemoryMechanism_Creation`). If the Mechanism is assigned `DictionaryMemory` as its `function `, then it is assigned at least one InputPort (named *KEY_INPUT* by default), -and optionally a second (named *VALUE_INPUT*) if **default_variable** or **size** specifies two items; any additional +and optionally a second (named *VALUE_INPUT*) if **default_variable** or **input_shapes** specifies two items; any additional fields are ignored. .. _EpisodicMemoryMechanism_Function: @@ -290,16 +290,16 @@ .. _EpisodicMemoryMechanism_Examples_Size: -*Format entries using* **size** +*Format entries using* **input_shapes** ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The **size** argument can also be used to format entries:: +The **input_shapes** argument can also be used to format entries:: - >>> my_em = EpisodicMemoryMechanism(size=[2,3]) + >>> my_em = EpisodicMemoryMechanism(input_shapes=[2,3]) >>> my_em.execute([[1,2],[3,4,5]]) array([array([0, 0]), array([0, 0, 0])], dtype=object) -Note that each element of **size** specifies the length of a field +Note that each element of **input_shapes** specifies the length of a field (see `EpisodicMemoryMechanism_Creation_Default_Variable_and_Size` for additional details). .. _EpisodicMemoryMechanism_Examples_Memory_Init: @@ -317,8 +317,8 @@ >>> my_em.execute([[1,2],[3,4,6]]) array([array([1., 2.]), array([3., 4., 6.])], dtype=object) -Note that there was no need to use **default_variable** or **size** to format entries here, since that is determined -by the entries in the **memory** argument. If **default_variable** or **size** is specified, its shape must be the +Note that there was no need to use **default_variable** or **input_shapes** to format entries here, since that is determined +by the entries in the **memory** argument. If **default_variable** or **input_shapes** is specified, its shape must be the same as the entries specified in **memory**. In this example, since `memory ` was initialized, the first execution returns the closest value to the input, which is used as the retrieval cue. In the second execution, the input from the first execution is returned, since it was stored after the first retrieval. The @@ -377,7 +377,7 @@ The names of `input_ports ` can be customized by specifying a list of names in the **input_ports** argument of the Mechanism's constructor:: - >>> my_em = EpisodicMemoryMechanism(size=[2,2,2], + >>> my_em = EpisodicMemoryMechanism(input_shapes=[2,2,2], ... input_ports=['KEY', 'VALUE', 'LABEL']) >>> my_em.input_ports.names ['KEY', 'VALUE', 'LABEL'] @@ -525,7 +525,7 @@ def _parse_memory(self, memory): @check_user_specified def __init__(self, default_variable:Union[int, list, np.ndarray]=None, - size:Optional[Union[int, list, np.ndarray]]=None, + input_shapes:Optional[Union[int, list, np.ndarray]]=None, memory:Optional[Union[list, np.ndarray]]=None, function:Optional[Function]=None, params=None, @@ -539,21 +539,21 @@ def __init__(self, and function.__name__ is DictionaryMemory.__name__)) if self._dictionary_memory: # Identify and warn about any deprecated args, and return their values for reassignment - deprecated_arg_values = deprecation_warning(self, kwargs, {'content_size':'size'}) + deprecated_arg_values = deprecation_warning(self, kwargs, {'content_size':'input_shapes'}) # Assign value of deprecated args to current ones - if 'size' in deprecated_arg_values: - size = deprecated_arg_values['size'] + if 'input_shapes' in deprecated_arg_values: + input_shapes = deprecated_arg_values['input_shapes'] # Need to handle assoc_size specially, since it needs to be added to what was content_size if 'assoc_size' in kwargs: - if isinstance(size, int): - size = [size,kwargs['assoc_size']] + if isinstance(input_shapes, int): + input_shapes = [input_shapes, kwargs['assoc_size']] else: - size += kwargs['assoc_size'] + input_shapes += kwargs['assoc_size'] kwargs.pop('assoc_size') super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, params=params, name=name, @@ -562,7 +562,7 @@ def __init__(self, **kwargs ) - def _handle_default_variable(self, default_variable=None, size=None, input_ports=None, function=None, params=None): + def _handle_default_variable(self, default_variable=None, input_shapes=None, input_ports=None, function=None, params=None): """Override to initialize or validate default_variable based on _memory_init or function.memory - if memory argument for Mechanism is specified and default_variable is not, use former to specify latter; - if both are specified, validate that they are the same shape; @@ -601,7 +601,7 @@ def _handle_default_variable(self, default_variable=None, size=None, input_ports f"does not match the shape of entries ({entry_shape}) in " f"the memory of its function ({self.function.name}).") - return super()._handle_default_variable(default_variable, size, input_ports, function, params) + return super()._handle_default_variable(default_variable, input_shapes, input_ports, function, params) def _instantiate_input_ports(self, context=None): """Override to assign default names to input_ports""" diff --git a/psyneulink/library/components/mechanisms/processing/leabramechanism.py b/psyneulink/library/components/mechanisms/processing/leabramechanism.py index 9d988c5c6e2..7a2efd12c18 100644 --- a/psyneulink/library/components/mechanisms/processing/leabramechanism.py +++ b/psyneulink/library/components/mechanisms/processing/leabramechanism.py @@ -73,8 +73,8 @@ LeabraMechanism. Here is an example of how to do this. In the example, T2 passes the training_data to the *LEARNING_TARGET* InputPort of L (L.input_ports[1]):: L = LeabraMechanism(input_size=input_size, output_size=output_size) - T1 = TransferMechanism(name='T1', size=input_size, function=Linear) - T2 = TransferMechanism(name='T2', size=output_size, function=Linear) + T1 = TransferMechanism(name='T1', input_shapes=input_size, function=Linear) + T2 = TransferMechanism(name='T2', input_shapes=output_size, function=Linear) p1 = Process(pathway=[T1, L]) proj = MappingProjection(sender=T2, receiver=L.input_ports[1]) p2 = Process(pathway=[T2, proj, L]) @@ -512,7 +512,7 @@ def __init__(self, ] super().__init__( - size=size, + input_shapes=size, network=network, input_size=input_size, output_size=output_size, diff --git a/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py b/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py index aa2a8516e42..bc212dfa277 100644 --- a/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py +++ b/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py @@ -113,7 +113,7 @@ TARGET InputPorts in the **default_variable** argument of the ComparatorMechanism's constructor, as follows:: >>> import psyneulink as pnl - >>> my_action_selection_mech = pnl.TransferMechanism(size=5, + >>> my_action_selection_mech = pnl.TransferMechanism(input_shapes=5, ... function=pnl.SoftMax(output=pnl.PROB)) >>> my_reward_mech = pnl.TransferMechanism() diff --git a/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py b/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py index 3f67c9777c7..c8b85868739 100644 --- a/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py +++ b/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py @@ -140,9 +140,9 @@ value of which is a vector of the same length as the output of sample. >>> import psyneulink as pnl - >>> sample_mech = pnl.TransferMechanism(size=5, + >>> sample_mech = pnl.TransferMechanism(input_shapes=5, ... function=pnl.Linear()) - >>> reward_mech = pnl.TransferMechanism(size=5) + >>> reward_mech = pnl.TransferMechanism(input_shapes=5) >>> prediction_error_mech = pnl.PredictionErrorMechanism(sample=sample_mech, ... target=reward_mech) diff --git a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py index 460063a0d15..0da5204996d 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/contrastivehebbianmechanism.py @@ -64,7 +64,7 @@ ~~~~~~ The **input_size** argument of the constructor must always be specified (this is comparable to specifying the -**size** or *default_variable** arguments of other types of `Mechanism`). If it is specified on its own, +**input_shapes** or *default_variable** arguments of other types of `Mechanism`). If it is specified on its own, it determines the total number of processing units. If either the **hidden_size** and/or **target_size** arguments are specified, then those units are treated as distinct from the input units (see `ContrastiveHebbian_Execution` for details). @@ -345,7 +345,7 @@ from psyneulink.core.globals.context import ContextFlags, handle_external_context from psyneulink.core.globals.keywords import \ CONTRASTIVE_HEBBIAN_MECHANISM, COUNT, FUNCTION, HARD_CLAMP, HOLLOW_MATRIX, MAX_ABS_DIFF, NAME, \ - SIZE, SOFT_CLAMP, TARGET, VARIABLE + INPUT_SHAPES, SOFT_CLAMP, TARGET, VARIABLE from psyneulink.core.globals.parameters import Parameter, SharedParameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet from psyneulink.core.globals.utilities import ValidParamSpecType, NumericCollections @@ -1112,7 +1112,7 @@ def _instantiate_input_ports(self, input_ports=None, reference_value=None, conte # Assign InputPort specification dictionaries for required InputPorts sizes = dict(INPUT=self.input_size, RECURRENT=self.recurrent_size, TARGET=self.target_size) for i, input_port in enumerate((s for s in self.input_ports if s in {INPUT, TARGET, RECURRENT})): - self.input_ports[i] = {NAME:input_port, SIZE: sizes[input_port]} + self.input_ports[i] = {NAME:input_port, INPUT_SHAPES: sizes[input_port]} super()._instantiate_input_ports(input_ports, reference_value, context) diff --git a/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py index b8e5d80dad6..270b00a5561 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/kohonenmechanism.py @@ -275,7 +275,7 @@ class Parameters(TransferMechanism.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, function=None, # selection_function=OneHot(mode=MAX_INDICATOR), # RE-INSTATE WHEN IMPLEMENT NHot function integrator_function=None, @@ -311,7 +311,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, function=function, integrator_function=integrator_function, integrator_mode=integrator_mode, diff --git a/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py index 11a7e7a383c..e26961ffa3f 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/kwtamechanism.py @@ -346,7 +346,7 @@ class Parameters(RecurrentTransferMechanism.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, function=None, matrix=None, auto: Optional[NumericCollections] = None, @@ -378,7 +378,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, function=function, matrix=matrix, @@ -439,7 +439,7 @@ def _kwta_scale(self, current_input, context=None): int_k_value = int(k_value) # ^ this is hacky but necessary for now, since something is # incorrectly turning k_value into an array of floats - n = self.size[0] + n = self.input_shapes[0] if (k_value[0] > 0) and (k_value[0] < 1): k = int(round(k_value[0] * n)) elif (int_k_value < 0): @@ -513,7 +513,7 @@ def _validate_params(self, request_set, target_set=None, context=None): format(k_param, self)) except AttributeError: raise KWTAError("k-value parameter ({}) for {} was an unexpected type.".format(k_param, self)) - if abs(k_num) > self.size[0]: + if abs(k_num) > self.input_shapes[0]: raise KWTAError("k-value parameter ({}) for {} was larger than the total number of elements.". format(k_param, self)) diff --git a/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py index c3c04072f1b..022b7a3dd86 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py @@ -443,7 +443,7 @@ def _validate_integration_rate(self, integration_rate): @beartype def __init__(self, default_variable=None, - size: Optional[Union[int, list, np.ndarray]] = None, + input_shapes: Optional[Union[int, list, np.ndarray]] = None, input_ports: Optional[Union[list, dict]] = None, function=None, initial_value=None, @@ -515,7 +515,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, # matrix=matrix, auto=self_excitation, diff --git a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py index e65e5896b9b..6e173e83aec 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py @@ -646,7 +646,7 @@ class Parameters(TransferMechanism.Parameters): @beartype def __init__(self, default_variable=None, - size=None, + input_shapes=None, input_ports: Optional[Union[list, dict]] = None, has_recurrent_input_port=None, combination_function: Optional[Callable] = None, @@ -688,7 +688,7 @@ def __init__(self, super().__init__( default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports, function=function, integrator_function=integrator_function, @@ -766,7 +766,7 @@ def _validate_params(self, request_set, target_set=None, context=None): if isinstance(matrix_param, AutoAssociativeProjection): err_msg = ("Number of rows in {} param for {} ({}) must be same as the size of variable for " "{} {} (whose size is {} and whose variable is {})". - format(MATRIX, self.name, rows, self.__class__.__name__, self.name, self.size, + format(MATRIX, self.name, rows, self.__class__.__name__, self.name, self.input_shapes, self.defaults.variable)) else: err_msg = ("Size of {} param for {} ({}) must be the same as its variable ({})". @@ -779,9 +779,9 @@ def _validate_params(self, request_set, target_set=None, context=None): if (auto_param is not None) and not isinstance(auto_param, (np.ndarray, list, numbers.Number)): raise RecurrentTransferError("auto parameter ({}) of {} is of incompatible type: it should be a " "number, None, or a 1D numeric array".format(auto_param, self)) - if isinstance(auto_param, (np.ndarray, list)) and safe_len(auto_param) != 1 and safe_len(auto_param) != self.size[0]: + if isinstance(auto_param, (np.ndarray, list)) and safe_len(auto_param) != 1 and safe_len(auto_param) != self.input_shapes[0]: raise RecurrentTransferError("auto parameter ({0}) for {1} is of incompatible length with the size " - "({2}) of its owner, {1}.".format(auto_param, self, self.size[0])) + "({2}) of its owner, {1}.".format(auto_param, self, self.input_shapes[0])) if HETERO in target_set: hetero_param = target_set[HETERO] @@ -790,9 +790,9 @@ def _validate_params(self, request_set, target_set=None, context=None): "number, None, or a 2D numeric array".format(hetero_param, self)) hetero_shape = np.array(hetero_param).shape if hetero_shape != (1,) and hetero_shape != (1, 1): - if isinstance(hetero_param, (np.ndarray, list, np.matrix)) and (hetero_param.ndim > 0 and hetero_shape[0] != self.size[0]): + if isinstance(hetero_param, (np.ndarray, list, np.matrix)) and (hetero_param.ndim > 0 and hetero_shape[0] != self.input_shapes[0]): raise RecurrentTransferError("hetero parameter ({0}) for {1} is of incompatible size with the size " - "({2}) of its owner, {1}.".format(hetero_param, self, self.size[0])) + "({2}) of its owner, {1}.".format(hetero_param, self, self.input_shapes[0])) if isinstance(hetero_param, (np.ndarray, list, np.matrix)) and (hetero_param.ndim > 0 and hetero_shape[0] != hetero_shape[1]): raise RecurrentTransferError("hetero parameter ({}) for {} must be square.".format(hetero_param, self)) diff --git a/psyneulink/library/compositions/autodiffcomposition.py b/psyneulink/library/compositions/autodiffcomposition.py index 27f2e1ac1bc..5ce5f1eb188 100644 --- a/psyneulink/library/compositions/autodiffcomposition.py +++ b/psyneulink/library/compositions/autodiffcomposition.py @@ -279,8 +279,8 @@ >>> import psyneulink as pnl >>> # Set up PsyNeuLink Components - >>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, size = 3) - >>> my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, size = 2) + >>> my_mech_1 = pnl.TransferMechanism(function=pnl.Linear, input_shapes = 3) + >>> my_mech_2 = pnl.TransferMechanism(function=pnl.Linear, input_shapes = 2) >>> my_projection = pnl.MappingProjection(matrix=np.random.randn(3,2), ... sender=my_mech_1, ... receiver=my_mech_2) diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index 11eac6e1d20..a8bbe69a235 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -1042,7 +1042,7 @@ from psyneulink.core.globals.keywords import \ (ADAPTIVE, ALL, ARG_MAX, ARG_MAX_INDICATOR, AUTO, CONTEXT, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, GAIN, IDENTITY_MATRIX, MULTIPLICATIVE_PARAM, NAME, - PARAMS, PROB_INDICATOR, PRODUCT, PROJECTIONS, RANDOM, SIZE, VARIABLE, Loss) + PARAMS, PROB_INDICATOR, PRODUCT, PROJECTIONS, RANDOM, INPUT_SHAPES, VARIABLE, Loss) from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric_scalar from psyneulink.core.globals.registry import name_without_suffix from psyneulink.core.llvm import ExecutionMode @@ -2145,10 +2145,11 @@ def _construct_query_input_nodes(self, field_weights)->list: f"PROGRAM ERROR: number of keys ({self.num_keys}) does not match number of " \ f"non-zero values in field_weights ({len(self.key_indices)})." - # query_input_nodes = [ProcessingMechanism(size=len(self.entry_template[self.key_indices[i]]), + # query_input_nodes = [ProcessingMechanism(input_shapes=len(self.entry_template[self.key_indices[i]]), # name=f'{self.key_names[self.key_indices[i]]} [QUERY]') # for i in range(self.num_keys)] - query_input_nodes = [ProcessingMechanism(size=len(self.entry_template[self.key_indices[i]]), + query_input_nodes = [ProcessingMechanism( + input_shapes=len(self.entry_template[self.key_indices[i]]), name=f'{self.key_names[i]} [QUERY]') for i in range(self.num_keys)] @@ -2167,7 +2168,8 @@ def _construct_value_input_nodes(self, field_weights)->list: f"PROGRAM ERROR: number of values ({self.num_values}) does not match number of " \ f"non-zero values in field_weights ({len(value_indices)})." - value_input_nodes = [ProcessingMechanism(size=len(self.entry_template[value_indices[i]]), + value_input_nodes = [ProcessingMechanism( + input_shapes=len(self.entry_template[value_indices[i]]), name= f'{self.value_names[i]} [VALUE]') for i in range(self.num_values)] @@ -2183,7 +2185,7 @@ def _construct_concatenate_queries_node(self, concatenate_queries)->ProcessingMe else: return ProcessingMechanism(function=Concatenate, input_ports=[{NAME: 'CONCATENATE', - SIZE: len(self.query_input_nodes[i].output_port.value), + INPUT_SHAPES: len(self.query_input_nodes[i].output_port.value), PROJECTIONS: MappingProjection( name=f'{self.key_names[i]} to CONCATENATE', sender=self.query_input_nodes[i].output_port, @@ -2211,7 +2213,7 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_q match_nodes = [ ProcessingMechanism( input_ports={NAME: 'CONCATENATED_INPUTS', - SIZE: memory_capacity, + INPUT_SHAPES: memory_capacity, PROJECTIONS: MappingProjection(sender=self.concatenate_queries_node, matrix=matrix, function=LinearMatrix( @@ -2224,7 +2226,7 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_q match_nodes = [ ProcessingMechanism( input_ports= { - SIZE:memory_capacity, + INPUT_SHAPES:memory_capacity, PROJECTIONS: MappingProjection(sender=self.query_input_nodes[i].output_port, matrix = np.array( memory_template[:,i].tolist()).transpose().astype(float), @@ -2313,7 +2315,7 @@ def _construct_combined_matches_node(self, input_source = self.weighted_match_nodes combined_matches_node = ( - ProcessingMechanism(input_ports=[{SIZE:memory_capacity, + ProcessingMechanism(input_ports=[{INPUT_SHAPES:memory_capacity, PROJECTIONS:[MappingProjection(sender=s, matrix=IDENTITY_MATRIX, name=f'{WEIGHTED_MATCH_NODE_NAME} ' @@ -2346,7 +2348,7 @@ def _construct_softmax_node(self, memory_capacity, softmax_gain, softmax_thresho # ARG_MAX_INDICATOR returns the entry unmodified softmax_choice = ARG_MAX_INDICATOR - softmax_node = ProcessingMechanism(input_ports={SIZE:memory_capacity, + softmax_node = ProcessingMechanism(input_ports={INPUT_SHAPES: memory_capacity, PROJECTIONS: MappingProjection( sender=input_source, matrix=IDENTITY_MATRIX, @@ -2379,7 +2381,7 @@ def _construct_retrieved_nodes(self, memory_template)->list: """Create nodes that report the value field(s) for the item(s) matched in memory. """ self.retrieved_key_nodes = \ - [ProcessingMechanism(input_ports={SIZE: len(self.query_input_nodes[i].variable[0]), + [ProcessingMechanism(input_ports={INPUT_SHAPES: len(self.query_input_nodes[i].variable[0]), PROJECTIONS: MappingProjection( sender=self.softmax_node, @@ -2390,7 +2392,7 @@ def _construct_retrieved_nodes(self, memory_template)->list: for i in range(self.num_keys)] self.retrieved_value_nodes = \ - [ProcessingMechanism(input_ports={SIZE: len(self.value_input_nodes[i].variable[0]), + [ProcessingMechanism(input_ports={INPUT_SHAPES: len(self.value_input_nodes[i].variable[0]), PROJECTIONS: MappingProjection( sender=self.softmax_node, diff --git a/psyneulink/library/models/Cohen_Huston1994.py b/psyneulink/library/models/Cohen_Huston1994.py index ff455921d40..f12e08fe9e9 100644 --- a/psyneulink/library/models/Cohen_Huston1994.py +++ b/psyneulink/library/models/Cohen_Huston1994.py @@ -24,26 +24,26 @@ # Create mechanisms --------------------------------------------------------------------------------------------------- # Linear input units, colors: ('red', 'green'), words: ('RED','GREEN') colors_input_layer = pnl.TransferMechanism( - size=3, + input_shapes=3, function=pnl.Linear, name='COLORS_INPUT' ) words_input_layer = pnl.TransferMechanism( - size=3, + input_shapes=3, function=pnl.Linear, name='WORDS_INPUT' ) task_input_layer = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear, name='TASK_INPUT' ) # Task layer, tasks: ('name the color', 'read the word') task_layer = pnl.RecurrentTransferMechanism( - size=2, + input_shapes=2, function=pnl.Logistic(), hetero=inhibition, integrator_mode=True, @@ -53,7 +53,7 @@ # Hidden layer units, colors: ('red','green') words: ('RED','GREEN') colors_hidden_layer = pnl.RecurrentTransferMechanism( - size=3, + input_shapes=3, function=pnl.Logistic(x_0=bias), integrator_mode=True, hetero=inhibition, @@ -63,7 +63,7 @@ ) words_hidden_layer = pnl.RecurrentTransferMechanism( - size=3, + input_shapes=3, function=pnl.Logistic(x_0=bias), hetero=inhibition, integrator_mode=True, @@ -73,7 +73,7 @@ ) # Response layer, responses: ('red', 'green'): RecurrentTransferMechanism for self inhibition matrix response_layer = pnl.RecurrentTransferMechanism( - size=2, + input_shapes=2, function=pnl.Logistic(), hetero=inhibition, integrator_mode=True, diff --git a/psyneulink/library/models/Cohen_Huston1994_horse_race.py b/psyneulink/library/models/Cohen_Huston1994_horse_race.py index 39365f0c08d..88d5bf528e6 100644 --- a/psyneulink/library/models/Cohen_Huston1994_horse_race.py +++ b/psyneulink/library/models/Cohen_Huston1994_horse_race.py @@ -35,20 +35,24 @@ # Create mechanisms --------------------------------------------------------------------------------------------------- # Linear input units, colors: ('red', 'green'), words: ('RED','GREEN') -colors_input_layer = pnl.TransferMechanism(size=3, +colors_input_layer = pnl.TransferMechanism( + input_shapes=3, function=pnl.Linear, name='COLORS_INPUT') -words_input_layer = pnl.TransferMechanism(size=3, +words_input_layer = pnl.TransferMechanism( + input_shapes=3, function=pnl.Linear, name='WORDS_INPUT') -task_input_layer = pnl.TransferMechanism(size=2, +task_input_layer = pnl.TransferMechanism( + input_shapes=2, function=pnl.Linear, name='TASK_INPUT') # Task layer, tasks: ('name the color', 'read the word') -task_layer = pnl.RecurrentTransferMechanism(size=2, +task_layer = pnl.RecurrentTransferMechanism( + input_shapes=2, function=pnl.Logistic(), hetero=-2, integrator_mode=True, @@ -56,7 +60,8 @@ name='TASK') # Hidden layer units, colors: ('red','green') words: ('RED','GREEN') -colors_hidden_layer = pnl.RecurrentTransferMechanism(size=3, +colors_hidden_layer = pnl.RecurrentTransferMechanism( + input_shapes=3, function=pnl .Logistic(x_0=4.0), integrator_mode=True, @@ -65,7 +70,8 @@ integration_rate=0.1, # cohen-huston text says 0.01 name='COLORS HIDDEN') -words_hidden_layer = pnl.RecurrentTransferMechanism(size=3, +words_hidden_layer = pnl.RecurrentTransferMechanism( + input_shapes=3, function=pnl.Logistic(x_0=4.0), hetero=-2, integrator_mode=True, @@ -73,7 +79,8 @@ integration_rate=0.1, name='WORDS HIDDEN') # Response layer, responses: ('red', 'green'): RecurrentTransferMechanism for self inhibition matrix -response_layer = pnl.RecurrentTransferMechanism(size=2, +response_layer = pnl.RecurrentTransferMechanism( + input_shapes=2, function=pnl.Logistic(), hetero=-2.0, integrator_mode=True, diff --git a/psyneulink/library/models/GilzenratModel.py b/psyneulink/library/models/GilzenratModel.py index cb8c1eda1a3..79a31a7e1b9 100644 --- a/psyneulink/library/models/GilzenratModel.py +++ b/psyneulink/library/models/GilzenratModel.py @@ -60,7 +60,7 @@ # Input Layer --- [ Target, Distractor ] input_layer = pnl.TransferMechanism( - size=2, + input_shapes=2, initial_value=np.array([[0.0, 0.0]]), name='INPUT LAYER' ) @@ -68,7 +68,7 @@ # Create Decision Layer --- [ Target, Distractor ] decision_layer = pnl.LCAMechanism( - size=2, + input_shapes=2, time_step_size=dt, leak=1.0, self_excitation=w_XiXi, @@ -84,7 +84,7 @@ # Create Response Layer --- [ Target ] response_layer = pnl.LCAMechanism( - size=1, + input_shapes=1, time_step_size=dt, leak=1.0, self_excitation=w_X3X3, diff --git a/psyneulink/library/models/Kalanthroff_PCTC_2018.py b/psyneulink/library/models/Kalanthroff_PCTC_2018.py index 3dce04f48eb..939a4f65c7e 100644 --- a/psyneulink/library/models/Kalanthroff_PCTC_2018.py +++ b/psyneulink/library/models/Kalanthroff_PCTC_2018.py @@ -28,25 +28,25 @@ # Create mechanisms --------------------------------------------------------------------------------------------------- # 4 Input layers for color, word, task & bias colors_input_layer = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear, name='COLORS_INPUT' ) words_input_layer = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear, name='WORDS_INPUT' ) task_input_layer = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear, name='PROACTIVE_CONTROL' ) bias_input = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear, name='BIAS' ) @@ -72,7 +72,7 @@ def my_conflict_function(variable): # Create color feature layer, word feature layer, task demand layer and response layer color_feature_layer = pnl.RecurrentTransferMechanism( - size=2, # Define unit size + input_shapes=2, # Define unit size function=pnl.Logistic(gain=4, x_0=1), # to 4 & bias to 1 integrator_mode=True, # Set IntegratorFunction mode to True integration_rate=Lambda, # smoothing factor == integration rate @@ -86,7 +86,7 @@ def my_conflict_function(variable): # The word_feature_layer is set up as the color_feature_layer word_feature_layer = pnl.RecurrentTransferMechanism( - size=2, # Define unit size + input_shapes=2, # Define unit size function=pnl.Logistic(gain=4, x_0=1), # to 4 & bias to 1 integrator_mode=True, # Set IntegratorFunction mode to True integration_rate=Lambda, # smoothing factor == integration rate @@ -101,7 +101,7 @@ def my_conflict_function(variable): # The response_layer is set up as the color_feature_layer & the word_feature_layer response_layer = pnl.RecurrentTransferMechanism( - size=2, # Define unit size + input_shapes=2, # Define unit size function=pnl.Logistic(gain=4, x_0=1), # to 4 & bias to 1 integrator_mode=True, # Set IntegratorFunction mode to True integration_rate=Lambda, # smoothing factor == integration rate @@ -117,7 +117,7 @@ def my_conflict_function(variable): # The task_demand_layer is set up as the color_feature_layer but with a different python function on it's OutputPort # and a differnet inhibition weight on the hetero task_demand_layer = pnl.RecurrentTransferMechanism( - size=2, # Define unit size + input_shapes=2, # Define unit size function=pnl.Logistic(gain=4, x_0=1), # to 4 & bias to 1 integrator_mode=True, # Set IntegratorFunction mode to True integration_rate=Lambda, # smoothing factor == integration rate diff --git a/psyneulink/library/models/Nieuwenhuis2005Model.py b/psyneulink/library/models/Nieuwenhuis2005Model.py index 446a02d5579..66856886a84 100644 --- a/psyneulink/library/models/Nieuwenhuis2005Model.py +++ b/psyneulink/library/models/Nieuwenhuis2005Model.py @@ -52,14 +52,14 @@ # First, we create the 3 layers of the behavioral network, i.e. INPUT LAYER, DECISION LAYER, and RESPONSE LAYER. input_layer = pnl.TransferMechanism( - size=3, # Number of units in input layer + input_shapes=3, # Number of units in input layer initial_value=[[0.0, 0.0, 0.0]], # Initial input values name='INPUT LAYER' # Define the name of the layer; this is optional, ) # but will help you to overview your model later on # Create Decision Layer --- [ Target 1, Target 2, Distractor ] decision_layer = pnl.LCAMechanism( - size=3, # Number of units in input layer + input_shapes=3, # Number of units in input layer initial_value=[[0.0, 0.0, 0.0]], # Initial input values time_step_size=dt, # Integration step size leak=1.0, # Sets off diagonals to negative values @@ -79,7 +79,7 @@ # Create Response Layer --- [ Target1, Target2 ] response_layer = pnl.LCAMechanism( - size=2, # Number of units in input layer + input_shapes=2, # Number of units in input layer initial_value=[[0.0, 0.0]], # Initial input values time_step_size=dt, # Integration step size leak=1.0, # Sets off diagonals to negative values diff --git a/tests/components/test_component.py b/tests/components/test_component.py index ffb328c6705..08237bec3e8 100644 --- a/tests/components/test_component.py +++ b/tests/components/test_component.py @@ -142,7 +142,7 @@ def __init__(self, default_variable=None, **kwargs): 'cls_', [pnl.ProcessingMechanism, pnl.TransferMechanism, pnl.IntegratorMechanism] ) @pytest.mark.parametrize( - 'size, expected_variable', + 'input_shapes, expected_variable', [ (1, [[0]]), (2, [[0, 0]]), @@ -153,8 +153,8 @@ def __init__(self, default_variable=None, **kwargs): ] ) @pytest.mark.parametrize('params_dict_entry', [NotImplemented, 'params']) - def test_size(self, cls_, params_dict_entry, size, expected_variable): - c = cls_(**nest_dictionary({'size': size}, params_dict_entry)) + def test_input_shapes(self, cls_, params_dict_entry, input_shapes, expected_variable): + c = cls_(**nest_dictionary({'input_shapes': input_shapes}, params_dict_entry)) np.testing.assert_array_equal(c.defaults.variable, expected_variable) @pytest.mark.parametrize( diff --git a/tests/composition/pec/test_parameterestimationcomposition.py b/tests/composition/pec/test_parameterestimationcomposition.py index a4e52eea79b..68690d25d75 100644 --- a/tests/composition/pec/test_parameterestimationcomposition.py +++ b/tests/composition/pec/test_parameterestimationcomposition.py @@ -52,10 +52,10 @@ def _run_ddm_with_params( return comp, data_to_fit -input_node_1 = pnl.ProcessingMechanism(size=1) -input_node_2 = pnl.ProcessingMechanism(size=3) -input_node_3 = pnl.ProcessingMechanism(size=2) -output_node = pnl.ProcessingMechanism(size=2) +input_node_1 = pnl.ProcessingMechanism(input_shapes=1) +input_node_2 = pnl.ProcessingMechanism(input_shapes=3) +input_node_3 = pnl.ProcessingMechanism(input_shapes=2) +output_node = pnl.ProcessingMechanism(input_shapes=2) model = pnl.Composition( [{input_node_1, input_node_2, input_node_3}, output_node], name="model" ) diff --git a/tests/composition/pec/test_stab_flex_pec_fit.py b/tests/composition/pec/test_stab_flex_pec_fit.py index 67c7e29cde5..358ece1fdb6 100644 --- a/tests/composition/pec/test_stab_flex_pec_fit.py +++ b/tests/composition/pec/test_stab_flex_pec_fit.py @@ -111,7 +111,7 @@ def make_stab_flex( # Task Layer: [Color, Motion] {0, 1} Mutually Exclusive # Origin Node taskLayer = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Task Input [I1, I2]", @@ -120,7 +120,7 @@ def make_stab_flex( # Stimulus Layer: [Color Stimulus, Motion Stimulus] # Origin Node stimulusInfo = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Stimulus Input [S1, S2]", @@ -129,7 +129,7 @@ def make_stab_flex( # Cue-To-Stimulus Interval Layer # Origin Node cueInterval = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Cue-Stimulus Interval", @@ -138,7 +138,7 @@ def make_stab_flex( # Correct Response Info # Origin Node correctResponseInfo = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Correct Response Info", @@ -146,7 +146,7 @@ def make_stab_flex( # Control Module Layer: [Color Activation, Motion Activation] controlModule = pnl.LCAMechanism( - size=2, + input_shapes=2, function=pnl.Logistic(gain=GAIN), leak=LEAK, competition=COMP, @@ -167,7 +167,7 @@ def make_stab_flex( # Hadamard product of controlModule and Stimulus Information nonAutomaticComponent = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], @@ -176,7 +176,7 @@ def make_stab_flex( # Multiply Stimulus Input by the automaticity weight congruenceWeighting = pnl.TransferMechanism( - size=2, + input_shapes=2, function=pnl.Linear(slope=AUTOMATICITY, intercept=0), output_ports=[pnl.RESULT], name="Automaticity-weighted Stimulus Input [w*S1, w*S2]", @@ -184,7 +184,7 @@ def make_stab_flex( # Summation of nonAutomatic and Automatic Components ddmCombination = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.SUM), output_ports=[pnl.RESULT], @@ -193,7 +193,7 @@ def make_stab_flex( # Ensure upper boundary of DDM is always correct response by multiplying DDM input by correctResponseInfo ddmRecodeDrift = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], @@ -202,7 +202,7 @@ def make_stab_flex( # Scale DDM inputs ddmInputScale = pnl.TransferMechanism( - size=1, + input_shapes=1, function=pnl.Linear(slope=SCALE, intercept=0), output_ports=[pnl.RESULT], name="Scaled DDM Input", @@ -277,10 +277,10 @@ def make_stab_flex( # Hot-fix currently necessary to allow control module and DDM to execute in parallel in compiled mode # We need two gates in order to output both values (decision and response) from the ddm - decisionGate = pnl.ProcessingMechanism(size=1, name="DECISION_GATE") + decisionGate = pnl.ProcessingMechanism(input_shapes=1, name="DECISION_GATE") stabilityFlexibility.add_node(decisionGate) - responseGate = pnl.ProcessingMechanism(size=1, name="RESPONSE_GATE") + responseGate = pnl.ProcessingMechanism(input_shapes=1, name="RESPONSE_GATE") stabilityFlexibility.add_node(responseGate) stabilityFlexibility.add_projection( diff --git a/tests/composition/test_autodiffcomposition.py b/tests/composition/test_autodiffcomposition.py index 988f5f46a1f..4b8c5255d3a 100644 --- a/tests/composition/test_autodiffcomposition.py +++ b/tests/composition/test_autodiffcomposition.py @@ -618,11 +618,13 @@ def test_pytorch_equivalence_with_autodiff_composition(self, autodiff_mode): min_delt = 0.00001 learning_rate = 100 - il = TransferMechanism(size=D_i, name='input') - cl = TransferMechanism(size=D_c, name='task') - hl = TransferMechanism(size=D_h, name='hidden', + il = TransferMechanism(input_shapes=D_i, name='input') + cl = TransferMechanism(input_shapes=D_c, name='task') + hl = TransferMechanism( + input_shapes=D_h, name='hidden', function=Logistic(bias=-2)) - ol = TransferMechanism(size=D_o, name='output', + ol = TransferMechanism( + input_shapes=D_o, name='output', function=Logistic(bias=-2)) input_set = { @@ -832,11 +834,13 @@ def test_pytorch_equivalence_with_autodiff_forward_disabled_on_proj(self): min_delt = 0.00001 learning_rate = 100 - il = TransferMechanism(size=D_i, name='input') - cl = TransferMechanism(size=D_c, name='task') - hl = TransferMechanism(size=D_h, name='hidden', + il = TransferMechanism(input_shapes=D_i, name='input') + cl = TransferMechanism(input_shapes=D_c, name='task') + hl = TransferMechanism( + input_shapes=D_h, name='hidden', function=Logistic(bias=-2)) - ol = TransferMechanism(size=D_o, name='output', + ol = TransferMechanism( + input_shapes=D_o, name='output', function=Logistic(bias=-2)) input_set = { @@ -1444,7 +1448,7 @@ def test_xor_nested_no_train_then_train(self, num_epochs, learning_rate, patienc # # input_dict = {'inputs': {xor_in: xor_inputs}, 'targets': {xor_out: xor_targets}, 'epochs': num_epochs} # xor_autodiff.run(inputs = input_dict) - # myTransfer = pnl.TransferMechanism(size = 2) + # myTransfer = pnl.TransferMechanism(input_shapes = 2) # myMappingProj = pnl.MappingProjection(sender = myTransfer, receiver = xor_autodiff) # # no_training_input_dict = {xor_in: xor_inputs} @@ -1722,15 +1726,15 @@ class TestNestedLearning: @pytest.fixture def nodes_for_testing_nested_comps(self): - input_nodes = [pnl.ProcessingMechanism(name='input_1', size=2), - pnl.ProcessingMechanism(name='input_2', size=3), - pnl.ProcessingMechanism(name='input_3', size=3)] - hidden_nodes = [pnl.ProcessingMechanism(name='hidden_1', size=3), - pnl.ProcessingMechanism(name='hidden_2', size=4), - pnl.ProcessingMechanism(name='hidden_3', size=5), - pnl.ProcessingMechanism(name='hidden_4', size=6)] - output_nodes = [pnl.ProcessingMechanism(name='output_1', size=3), - pnl.ProcessingMechanism(name='output_2', size=5)] + input_nodes = [pnl.ProcessingMechanism(name='input_1', input_shapes=2), + pnl.ProcessingMechanism(name='input_2', input_shapes=3), + pnl.ProcessingMechanism(name='input_3', input_shapes=3)] + hidden_nodes = [pnl.ProcessingMechanism(name='hidden_1', input_shapes=3), + pnl.ProcessingMechanism(name='hidden_2', input_shapes=4), + pnl.ProcessingMechanism(name='hidden_3', input_shapes=5), + pnl.ProcessingMechanism(name='hidden_4', input_shapes=6)] + output_nodes = [pnl.ProcessingMechanism(name='output_1', input_shapes=3), + pnl.ProcessingMechanism(name='output_2', input_shapes=5)] def _get_nodes(num_input_nodes, num_hidden_nodes, num_output_nodes): return (input_nodes[0:num_input_nodes], hidden_nodes[0:num_hidden_nodes], @@ -1833,7 +1837,7 @@ def test_1_input_to_1_nested_hidden_with_2_output_ports(self, nodes_for_testing_ nodes = nodes_for_testing_nested_comps(1, 1, 2) input_nodes, hidden_nodes, output_nodes = nodes inputs = {input_nodes[0]:np.array([[0, 0], [0, 1], [1, 0], [1, 1]])} - hidden_with_two_output_ports = pnl.ProcessingMechanism(size=3, output_ports=['FIRST','SECOND']) + hidden_with_two_output_ports = pnl.ProcessingMechanism(input_shapes=3, output_ports=['FIRST', 'SECOND']) nested = AutodiffComposition([hidden_nodes[0], hidden_with_two_output_ports], name='nested') pathway_a = [input_nodes[0], @@ -1937,7 +1941,7 @@ def test_2_inputs_to_2_input_ports_of_single_nested_hidden(self, nodes_for_testi nodes = nodes_for_testing_nested_comps(2, 0, 1) input_nodes, hidden_nodes, output_nodes = nodes - hidden_with_2_inputs = pnl.ProcessingMechanism(name='hidden_x', size=(3,3), function=pnl.LinearCombination) + hidden_with_2_inputs = pnl.ProcessingMechanism(name='hidden_x', input_shapes=(3, 3), function=pnl.LinearCombination) inputs = {input_nodes[0]:np.array([[0, 0], [0, 1], [1, 0], [1, 1]])} @@ -2174,7 +2178,7 @@ def test_inputs_to_multiple_input_ports_and_INPUT_nodes(self, nodes_for_testing_ # input_nodes, hidden_nodes, output_nodes = nodes # inputs = {input_nodes[0]:np.array([[0, 0], [0, 1], [1, 0], [1, 1]])} # - # hidden_2d = pnl.ProcessingMechanism(name='hidden 2d', size=(2,2)) + # hidden_2d = pnl.ProcessingMechanism(name='hidden 2d', input_shapes=(2,2)) # nested = AutodiffComposition(nodes = [hidden_nodes[0], hidden_2d], name='nested') # pathway_a = [input_nodes[0], # MappingProjection(input_nodes[0], hidden_2d), @@ -2313,14 +2317,14 @@ def get_targets_comp(idx): # input_nodes, hidden_nodes, output_nodes = nodes # inputs = {input_nodes[0]:np.array([[0, 0], [0, 1], [1, 0], [1, 1]])} # - # hidden_1 = pnl.ProcessingMechanism(name='hidden_1', size=3) + # hidden_1 = pnl.ProcessingMechanism(name='hidden_1', input_shapes=3) # nested_01 = AutodiffComposition(name='nested_01', nodes=[hidden_1], learning_rate=.01) # autodiff_01_results = execute_learning(comp_type='autodiff', # execution_mode=pnl.ExecutionMode.PyTorch, # pathways=[input_nodes[0], nested_01, output_nodes[0]], # inputs=inputs) # - # hidden_2 = pnl.ProcessingMechanism(name='hidden_2', size=3) + # hidden_2 = pnl.ProcessingMechanism(name='hidden_2', input_shapes=3) # nested_1 = AutodiffComposition(name='nested_2', nodes=[hidden_2], learning_rate=.1) # autodiff_1_results = execute_learning(comp_type='autodiff', # execution_mode=pnl.ExecutionMode.PyTorch, @@ -2331,9 +2335,9 @@ def get_targets_comp(idx): # np.testing.assert_allclose(autodiff_01_results, autodiff_1_results) def test_error_for_running_nested_learning_in_Python_mode(self): - input_mech = pnl.ProcessingMechanism(name='input_mech', size=2) - hidden_mech = pnl.ProcessingMechanism(name='hidden_mech', size=2) - output_mech = pnl.ProcessingMechanism(name='output_mech', size=2) + input_mech = pnl.ProcessingMechanism(name='input_mech', input_shapes=2) + hidden_mech = pnl.ProcessingMechanism(name='hidden_mech', input_shapes=2) + output_mech = pnl.ProcessingMechanism(name='output_mech', input_shapes=2) # Test for error on learning if nested is Composition nested = pnl.Composition(name='nested', nodes=[hidden_mech]) @@ -2370,14 +2374,14 @@ def test_error_for_running_nested_learning_in_Python_mode(self): OUTPUT_A = 'output_A' OUTPUT_B = 'output_B' def nodes_for_testing_nested_comps(sizes): - return {INPUT_A: pnl.ProcessingMechanism(name=INPUT_A, size=sizes.pop(INPUT_A, 2)), - INPUT_B: pnl.ProcessingMechanism(name=INPUT_B, size=sizes.pop(INPUT_B, 2)), - INPUT_C: pnl.ProcessingMechanism(name=INPUT_C, size=sizes.pop(INPUT_C, 2)), - HIDDEN_A: pnl.ProcessingMechanism(name=HIDDEN_A, size=sizes.pop(HIDDEN_A, 2)), - HIDDEN_B: pnl.ProcessingMechanism(name=HIDDEN_B, size=sizes.pop(HIDDEN_B, 2)), - HIDDEN_C: pnl.ProcessingMechanism(name=HIDDEN_C, size=sizes.pop(HIDDEN_C, 2)), - OUTPUT_A: pnl.ProcessingMechanism(name=OUTPUT_A, size=sizes.pop(OUTPUT_A, 2)), - OUTPUT_B: pnl.ProcessingMechanism(name=OUTPUT_B, size=sizes.pop(OUTPUT_B, 2))} + return {INPUT_A: pnl.ProcessingMechanism(name=INPUT_A, input_shapes=sizes.pop(INPUT_A, 2)), + INPUT_B: pnl.ProcessingMechanism(name=INPUT_B, input_shapes=sizes.pop(INPUT_B, 2)), + INPUT_C: pnl.ProcessingMechanism(name=INPUT_C, input_shapes=sizes.pop(INPUT_C, 2)), + HIDDEN_A: pnl.ProcessingMechanism(name=HIDDEN_A, input_shapes=sizes.pop(HIDDEN_A, 2)), + HIDDEN_B: pnl.ProcessingMechanism(name=HIDDEN_B, input_shapes=sizes.pop(HIDDEN_B, 2)), + HIDDEN_C: pnl.ProcessingMechanism(name=HIDDEN_C, input_shapes=sizes.pop(HIDDEN_C, 2)), + OUTPUT_A: pnl.ProcessingMechanism(name=OUTPUT_A, input_shapes=sizes.pop(OUTPUT_A, 2)), + OUTPUT_B: pnl.ProcessingMechanism(name=OUTPUT_B, input_shapes=sizes.pop(OUTPUT_B, 2))} @pytest.mark.pytorch @@ -3565,17 +3569,17 @@ def test_autodiff_logging(self): np.testing.assert_equal(in_np_dict_vals[0:4], xor_inputs) np.testing.assert_equal(in_np_vals, in_np_dict_vals) - assert in_np_dict_vals.shape == (expected_length, 1, xor_in.size) + assert in_np_dict_vals.shape == (expected_length, 1, xor_in.input_shapes) - assert hid_map_np_dict_mats.shape == (expected_length, xor_in.size, xor_hid.size) + assert hid_map_np_dict_mats.shape == (expected_length, xor_in.input_shapes, xor_hid.input_shapes) np.testing.assert_equal(hid_map_np_mats, hid_map_np_dict_mats) - assert hid_np_dict_vals.shape == (expected_length, 1, xor_hid.size) + assert hid_np_dict_vals.shape == (expected_length, 1, xor_hid.input_shapes) - assert out_map_np_dict_mats.shape == (expected_length, xor_hid.size, xor_out.size) + assert out_map_np_dict_mats.shape == (expected_length, xor_hid.input_shapes, xor_out.input_shapes) np.testing.assert_equal(out_map_np_mats, out_map_np_dict_mats) - assert out_np_dict_vals.shape == (expected_length, 1, xor_out.size) + assert out_np_dict_vals.shape == (expected_length, 1, xor_out.input_shapes) xor_out.log.print_entries() diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 1b9b188133c..04a6dfadee2 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -469,10 +469,10 @@ def test_add_linear_processing_pathway_with_noderole_specified_in_tuple(self): def test_add_linear_processing_pathway_containing_nodes_with_existing_projections(self): """ Test that add_linear_processing_pathway uses MappingProjections already specified for Hidden_layer_2 and Output_Layer in the pathway it creates within the Composition""" - Input_Layer = TransferMechanism(name='Input Layer', size=2) - Hidden_Layer_1 = TransferMechanism(name='Hidden Layer_1', size=5) - Hidden_Layer_2 = TransferMechanism(name='Hidden Layer_2', size=4) - Output_Layer = TransferMechanism(name='Output Layer', size=3) + Input_Layer = TransferMechanism(name='Input Layer', input_shapes=2) + Hidden_Layer_1 = TransferMechanism(name='Hidden Layer_1', input_shapes=5) + Hidden_Layer_2 = TransferMechanism(name='Hidden Layer_2', input_shapes=4) + Output_Layer = TransferMechanism(name='Output Layer', input_shapes=3) Input_Weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) Middle_Weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) Output_Weights_matrix = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 3) @@ -490,10 +490,10 @@ def test_add_linear_processing_pathway_containing_nodes_with_existing_projection def test_add_backpropagation_learning_pathway_containing_nodes_with_existing_projections(self): """ Test that add_backpropagation_learning_pathway uses MappingProjections already specified for Hidden_layer_2 and Output_Layer in the pathway it creates within the Composition""" - Input_Layer = TransferMechanism(name='Input Layer', size=2) - Hidden_Layer_1 = TransferMechanism(name='Hidden Layer_1', size=5) - Hidden_Layer_2 = TransferMechanism(name='Hidden Layer_2', size=4) - Output_Layer = TransferMechanism(name='Output Layer', size=3) + Input_Layer = TransferMechanism(name='Input Layer', input_shapes=2) + Hidden_Layer_1 = TransferMechanism(name='Hidden Layer_1', input_shapes=5) + Hidden_Layer_2 = TransferMechanism(name='Hidden Layer_2', input_shapes=4) + Output_Layer = TransferMechanism(name='Output Layer', input_shapes=3) Input_Weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) Middle_Weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) Output_Weights_matrix = (np.arange(4 * 3).reshape((4, 3)) + 1) / (4 * 3) @@ -3208,7 +3208,7 @@ def test_inputs_key_errors(self, input_args): def test_input_shape_errors(self): # Mechanism with single InputPort - mech = pnl.TransferMechanism(name='input', size=2) + mech = pnl.TransferMechanism(name='input', input_shapes=2) comp = pnl.Composition(mech, name='comp') with pytest.raises(CompositionError) as error_text: @@ -3228,7 +3228,7 @@ def test_input_shape_errors(self): assert "is incorrect for Mechanism with a single InputPort" in str(error_text.value) # Mechanism with two InputPorts - mech2 = pnl.TransferMechanism(name='input', size=(2,2)) + mech2 = pnl.TransferMechanism(name='input', input_shapes=(2, 2)) comp = pnl.Composition(mech2, name='comp') with pytest.raises(CompositionError) as error_text: @@ -3844,9 +3844,9 @@ def test_LPP_wrong_component(self): pytest.param(pnl.ExecutionMode.PTXExec, marks=[pytest.mark.llvm, pytest.mark.cuda]), ]) def test_execute_no_inputs(self, mode): - m_inner = ProcessingMechanism(size=2) + m_inner = ProcessingMechanism(input_shapes=2) inner_comp = Composition(pathways=[m_inner]) - m_outer = ProcessingMechanism(size=2) + m_outer = ProcessingMechanism(input_shapes=2) outer_comp = Composition(pathways=[m_outer, inner_comp]) with pytest.warns(UserWarning, match="No inputs provided in call"): @@ -3856,9 +3856,9 @@ def test_execute_no_inputs(self, mode): @pytest.mark.composition def test_run_no_inputs(self, comp_mode): - m_inner = ProcessingMechanism(size=2) + m_inner = ProcessingMechanism(input_shapes=2) inner_comp = Composition(pathways=[m_inner]) - m_outer = ProcessingMechanism(size=2) + m_outer = ProcessingMechanism(input_shapes=2) outer_comp = Composition(pathways=[m_outer, inner_comp]) with pytest.warns(UserWarning, match="No inputs provided in call"): @@ -4080,7 +4080,7 @@ def test_3_mechanisms_2_origins_1_terminal_mimo_all_sum(self, benchmark, comp_mo @pytest.mark.benchmark(group="Recurrent") def test_run_recurrent_transfer_mechanism(self, benchmark, comp_mode): comp = Composition() - A = RecurrentTransferMechanism(size=3, function=Linear(slope=5.0), name="A") + A = RecurrentTransferMechanism(input_shapes=3, function=Linear(slope=5.0), name="A") comp.add_node(A) sched = Scheduler(composition=comp) output1 = comp.run(inputs={A: [[1.0, 2.0, 3.0]]}, scheduler=sched, execution_mode=comp_mode) @@ -4095,7 +4095,8 @@ def test_run_recurrent_transfer_mechanism(self, benchmark, comp_mode): @pytest.mark.benchmark(group="Recurrent") def test_run_recurrent_transfer_mechanism_hetero(self, benchmark, comp_mode): comp = Composition() - R = RecurrentTransferMechanism(size=1, + R = RecurrentTransferMechanism( + input_shapes=1, function=Logistic(), hetero=-2.0, output_ports = [RESULT]) @@ -4114,7 +4115,8 @@ def test_run_recurrent_transfer_mechanism_hetero(self, benchmark, comp_mode): @pytest.mark.benchmark(group="Recurrent") def test_run_recurrent_transfer_mechanism_integrator(self, benchmark, comp_mode): comp = Composition() - R = RecurrentTransferMechanism(size=1, + R = RecurrentTransferMechanism( + input_shapes=1, function=Logistic(), hetero=-2.0, integrator_mode=True, @@ -4135,7 +4137,7 @@ def test_run_recurrent_transfer_mechanism_integrator(self, benchmark, comp_mode) @pytest.mark.benchmark(group="Recurrent") def test_run_recurrent_transfer_mechanism_vector_2(self, benchmark, comp_mode): comp = Composition() - R = RecurrentTransferMechanism(size=2, function=Logistic()) + R = RecurrentTransferMechanism(input_shapes=2, function=Logistic()) comp.add_node(R) comp._analyze_graph() val = comp.run(inputs={R: [[1.0, 2.0]]}, num_trials=1, execution_mode=comp_mode) @@ -4152,7 +4154,8 @@ def test_run_recurrent_transfer_mechanism_vector_2(self, benchmark, comp_mode): @pytest.mark.benchmark(group="Recurrent") def test_run_recurrent_transfer_mechanism_hetero_2(self, benchmark, comp_mode): comp = Composition() - R = RecurrentTransferMechanism(size=2, + R = RecurrentTransferMechanism( + input_shapes=2, function=Logistic(), hetero=-2.0, output_ports = [RESULT]) @@ -4171,7 +4174,8 @@ def test_run_recurrent_transfer_mechanism_hetero_2(self, benchmark, comp_mode): @pytest.mark.benchmark(group="Recurrent") def test_run_recurrent_transfer_mechanism_integrator_2(self, benchmark, comp_mode): comp = Composition() - R = RecurrentTransferMechanism(size=2, + R = RecurrentTransferMechanism( + input_shapes=2, function=Logistic(), hetero=-2.0, integrator_mode=True, @@ -4284,7 +4288,7 @@ def _check_comp_ex(self, comp, comparison, comp_mode, struct_name, context=None, def test_multiple_runs_with_parameter_change(self, comp_mode): struct_name = '_param' - A = TransferMechanism(size=2) + A = TransferMechanism(input_shapes=2) comp = Composition([A]) inputs_dict = {A: [1, 1]} @@ -4329,7 +4333,7 @@ def test_multiple_runs_with_parameter_change(self, comp_mode): def test_multiple_runs_with_parameter_change_arr(self, comp_mode): struct_name = '_state' - A = TransferMechanism(size=2, integrator_mode=True) + A = TransferMechanism(input_shapes=2, integrator_mode=True) comp = Composition([A]) inputs_dict = {A: [1, 1]} @@ -4375,7 +4379,7 @@ def test_multiple_runs_with_parameter_change_from_data_struct(self, comp_mode): # non-existence of compiled structures after set struct_name = '_data' - A = TransferMechanism(size=2, integrator_mode=True) + A = TransferMechanism(input_shapes=2, integrator_mode=True) comp = Composition([A]) inputs_dict = {A: [1, 1]} @@ -6548,25 +6552,28 @@ def inputs_generator_function(): def test_get_input_format(self, form, use_labels, show_nested, num_trials, expected_format_string): """Also tests input_labels_dict""" - A = pnl.ProcessingMechanism(size=1, name='A', + A = pnl.ProcessingMechanism( + input_shapes=1, name='A', input_labels={0:{'red':0, 'green':1}, 1:{'blue':2, 'yellow':3}}) - B = pnl.ProcessingMechanism(size=2, name='B') - C = pnl.ProcessingMechanism(size=[3,3], + B = pnl.ProcessingMechanism(input_shapes=2, name='B') + C = pnl.ProcessingMechanism( + input_shapes=[3, 3], input_ports=['C INPUT 1', 'C INPUT 2'], input_labels={'C INPUT 1':{'red':[0,0,0], 'green':[1,1,1], 'orange':[2,2,2]}, 'C INPUT 2':{'blue':[3,3,3], 'yellow':[4,4,4], 'purple':[5,5,5]}}, name='C') assert C.variable.shape == (2,3) - X = ProcessingMechanism(size=[3,3], + X = ProcessingMechanism( + input_shapes=[3, 3], input_ports=['X INPUT 1', 'X INPUT 2'], name='X', # input_labels={0:{'red':[0,0,0], 'green':[1,1,1]}} # Specify dict for only one port ) # Use TransferMechanism so that 2nd OutputPort uses 2nd item of Mechanism's value # (i.e. ,without having to specify that explicitly, as would be the case for ProcessingMechanism) - Y = pnl.TransferMechanism(input_ports=[{NAME:'Y INPUT 1', pnl.SIZE: 3, pnl.FUNCTION: pnl.Reduce}, - {NAME:'Y INPUT 2', pnl.SIZE: 3}], + Y = pnl.TransferMechanism(input_ports=[{NAME:'Y INPUT 1', pnl.INPUT_SHAPES: 3, pnl.FUNCTION: pnl.Reduce}, + {NAME:'Y INPUT 2', pnl.INPUT_SHAPES: 3}], # Test specification of labels for all InputPorts of Mechanism: input_labels={'red':[0,0,0], 'green':[1,1,1]}, name='Y') @@ -7787,7 +7794,7 @@ def test_force_two_control_mechanisms_as_OUTPUT(self): assert {ctl_mech_B} == set(comp.get_nodes_by_role(NodeRole.TERMINAL)) def test_LEARNING_hebbian(self): - A = RecurrentTransferMechanism(name='A', size=2, enable_learning=True) + A = RecurrentTransferMechanism(name='A', input_shapes=2, enable_learning=True) comp = Composition(pathways=A) pathway = comp.pathways[0] assert pathway.target is None diff --git a/tests/composition/test_control.py b/tests/composition/test_control.py index 44898e5db5d..0e4d2fdd8e6 100644 --- a/tests/composition/test_control.py +++ b/tests/composition/test_control.py @@ -1204,7 +1204,7 @@ def test_ocm_state_feature_specs_and_warnings_and_errors(self, state_feature_arg ib = pnl.ProcessingMechanism(name='IB') ic = pnl.ProcessingMechanism(name='IC') oa = pnl.ProcessingMechanism(name='OA') - ob = pnl.ProcessingMechanism(name='OB', size=3) + ob = pnl.ProcessingMechanism(name='OB', input_shapes=3) oc = pnl.ProcessingMechanism(name='OC') ext = pnl.ProcessingMechanism(name='EXT') icomp = pnl.Composition(pathways=[ia,ib,ic], name='INNER COMP') @@ -1447,7 +1447,7 @@ def test_state_features_in_nested_composition_as_agent_rep(self, nested_agent_re icomp = pnl.Composition(nodes=[I1,I2], name='INNER COMP') A = pnl.ComparatorMechanism(name='A') B = pnl.ProcessingMechanism(name='B') - C = pnl.ProcessingMechanism(name='C', size=3) + C = pnl.ProcessingMechanism(name='C', input_shapes=3) D = pnl.ProcessingMechanism(name='D') mcomp = pnl.Composition(pathways=[[A,B,C], icomp], name='MIDDLE COMP') ocomp = pnl.Composition(nodes=[mcomp], name='OUTER COMP') @@ -1700,7 +1700,7 @@ def test_ocm_state_and_state_dict(self): ib = pnl.ProcessingMechanism(name='IB') ic = pnl.ProcessingMechanism(name='IC') oa = pnl.ProcessingMechanism(name='OA') - ob = pnl.ProcessingMechanism(name='OB', size=3) + ob = pnl.ProcessingMechanism(name='OB', input_shapes=3) oc = pnl.ProcessingMechanism(name='OC') icomp = pnl.Composition(pathways=[ia,ib,ic], name='INNER COMP') ocomp = pnl.Composition(pathways=[icomp], name='OUTER COMP') @@ -2092,20 +2092,20 @@ def test_two_tier_ocm(self): # Task Layer: [Color, Motion] {0, 1} Mutually Exclusive taskLayer = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - # size=2, + # input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Task Input [I1, I2]') # Stimulus Layer: [Color Stimulus, Motion Stimulus] stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - # size=2, + # input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Stimulus Input [S1, S2]") congruenceWeighting = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=congruentWeight, intercept=0), name='Congruence * Automatic Component') @@ -2125,14 +2125,15 @@ def test_two_tier_ocm(self): # Hadamard product of Activation and Stimulus Information nonAutomaticComponent = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], name='Non-Automatic Component') # Summation of nonAutomatic and Automatic Components - ddmCombination = pnl.TransferMechanism(size=1, + ddmCombination = pnl.TransferMechanism( + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.SUM), output_ports=[pnl.RESULT], @@ -2149,13 +2150,14 @@ def test_two_tier_ocm(self): name='DDM') weightingFunction = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], name='Bias') - topCorrect = pnl.TransferMechanism(size=1, + topCorrect = pnl.TransferMechanism( + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], @@ -2309,7 +2311,7 @@ def test_multilevel_control(self, comp_mode, benchmark): @pytest.mark.composition def test_recurrent_control(self, comp_mode): monitor = pnl.TransferMechanism(default_variable=[[0.0]], - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='monitor') @@ -3478,7 +3480,7 @@ def computeAccuracy(trialInformation): # Task Layer: [Color, Motion] {0, 1} Mutually Exclusive # Origin Node taskLayer = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Task Input [I1, I2]') @@ -3486,7 +3488,7 @@ def computeAccuracy(trialInformation): # Stimulus Layer: [Color Stimulus, Motion Stimulus] # Origin Node stimulusInfo = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name="Stimulus Input [S1, S2]") @@ -3506,14 +3508,15 @@ def computeAccuracy(trialInformation): # Hadamard product of Activation and Stimulus Information nonAutomaticComponent = pnl.TransferMechanism(default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.PRODUCT), output_ports=[pnl.RESULT], name='Non-Automatic Component [S1*Activity1, S2*Activity2]') # Summation of nonAutomatic and Automatic Components - ddmCombination = pnl.TransferMechanism(size=1, + ddmCombination = pnl.TransferMechanism( + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), input_ports=pnl.InputPort(combine=pnl.SUM), output_ports=[pnl.RESULT], diff --git a/tests/composition/test_gating.py b/tests/composition/test_gating.py index e27cda9f4b7..04750f7414e 100644 --- a/tests/composition/test_gating.py +++ b/tests/composition/test_gating.py @@ -25,7 +25,7 @@ def test_gating(benchmark, comp_mode): ) Gating_Mechanism = pnl.GatingMechanism( - size=[1], + input_shapes=[1], gating_signals=[Output_Layer.output_port] ) @@ -73,7 +73,7 @@ def test_gating(benchmark, comp_mode): # ) # # Gating_Mechanism = pnl.ControlMechanism( -# size=[1], +# input_shapes=[1], # control_signals=[Output_Layer.output_port] # ) # diff --git a/tests/composition/test_interfaces.py b/tests/composition/test_interfaces.py index b1cf2975b64..928428282f1 100644 --- a/tests/composition/test_interfaces.py +++ b/tests/composition/test_interfaces.py @@ -446,7 +446,7 @@ def test_input_specification_multiple_nested_compositions(self): level_2 = Composition(name="level_2") A2 = TransferMechanism(name="A2", - size=2, + input_shapes=2, function=Linear(slope=1.)) B2 = TransferMechanism(name="B2", function=Linear(slope=2.)) diff --git a/tests/composition/test_learning.py b/tests/composition/test_learning.py index 02b912193dc..9c22e58f55d 100644 --- a/tests/composition/test_learning.py +++ b/tests/composition/test_learning.py @@ -244,7 +244,7 @@ def test_target_dict_spec_multi_trial_lists_bp(self): def test_dict_target_spec_converging_pathways(self): A = TransferMechanism(name="converging-learning-pathways-mech-A") B = TransferMechanism(name="converging-learning-pathways-mech-B") - C = TransferMechanism(name="converging-learning-pathways-mech-C", size=2) + C = TransferMechanism(name="converging-learning-pathways-mech-C", input_shapes=2) D = TransferMechanism(name="converging-learning-pathways-mech-D") E = TransferMechanism(name="converging-learning-pathways-mech-E") comp = Composition() @@ -264,7 +264,7 @@ def test_dict_target_spec_converging_pathways(self): def test_function_target_spec_converging_pathways(self): A = TransferMechanism(name="converging-learning-pathways-mech-A") B = TransferMechanism(name="converging-learning-pathways-mech-B") - C = TransferMechanism(name="converging-learning-pathways-mech-C", size=2) + C = TransferMechanism(name="converging-learning-pathways-mech-C", input_shapes=2) D = TransferMechanism(name="converging-learning-pathways-mech-D") E = TransferMechanism(name="converging-learning-pathways-mech-E") comp = Composition() @@ -590,7 +590,7 @@ def test_simple_hebbian(self): size = 9 Hebb2 = pnl.RecurrentTransferMechanism( - size=size, + input_shapes=size, function=pnl.Linear, enable_learning=True, hetero=0., @@ -614,7 +614,8 @@ def test_simple_hebbian(self): class TestReinforcement: def test_rl(self): - input_layer = pnl.TransferMechanism(size=2, + input_layer = pnl.TransferMechanism( + input_shapes=2, name='Input Layer') input_layer.log.set_log_conditions(items=pnl.VALUE) action_selection = pnl.DDM(input_format=pnl.ARRAY, @@ -654,7 +655,8 @@ def test_rl(self): ) def test_reinforcement_fixed_targets(self): - input_layer = pnl.TransferMechanism(size=2, + input_layer = pnl.TransferMechanism( + input_shapes=2, name='Input Layer', ) @@ -1454,7 +1456,8 @@ def test_prediction_error_delta_first_run(self): err_msg="mismatch on timestep {}".format(i)) def test_rl_enable_learning_false(self): - input_layer = pnl.TransferMechanism(size=2, + input_layer = pnl.TransferMechanism( + input_shapes=2, name='Input Layer') input_layer.log.set_log_conditions(items=pnl.VALUE) action_selection = pnl.DDM(input_format=pnl.ARRAY, @@ -1627,29 +1630,29 @@ def Concatenate(variable): return np.append(variable[0], variable[1]) stim_in = pnl.ProcessingMechanism(name='Stimulus', - size=stim_size) + input_shapes=stim_size) context_in = pnl.ProcessingMechanism(name='Context', - size=context_size) + input_shapes=context_size) reward_in = pnl.ProcessingMechanism(name='Reward', - size=1) + input_shapes=1) perceptual_state = pnl.ProcessingMechanism(name='Current Port', function=Concatenate, input_ports=[{pnl.NAME: 'STIM', - pnl.SIZE: stim_size, + pnl.INPUT_SHAPES: stim_size, pnl.PROJECTIONS: stim_in}, {pnl.NAME: 'CONTEXT', - pnl.SIZE: context_size, + pnl.INPUT_SHAPES: context_size, pnl.PROJECTIONS: context_in}]) action = pnl.ProcessingMechanism(name='Action', - size=num_actions) + input_shapes=num_actions) # Nested Composition rl_agent_state = pnl.ProcessingMechanism(name='RL Agent Port', - size=5) + input_shapes=5) rl_agent_action = pnl.ProcessingMechanism(name='RL Agent Action', - size=5) + input_shapes=5) rl_agent = pnl.Composition(name='RL Agent') rl_learning_components = rl_agent.add_reinforcement_learning_pathway([rl_agent_state, rl_agent_action]) @@ -1784,11 +1787,13 @@ def test_nested_learn_then_run(self): wco = np.random.rand(D_c, D_o) * 0.02 - 0.01 who = np.random.rand(D_h, D_o) * 0.02 - 0.01 - il = pnl.TransferMechanism(size=D_i, name='input') - cl = pnl.TransferMechanism(size=D_c, name='control') - hl = pnl.TransferMechanism(size=D_h, name='hidden', + il = pnl.TransferMechanism(input_shapes=D_i, name='input') + cl = pnl.TransferMechanism(input_shapes=D_c, name='control') + hl = pnl.TransferMechanism( + input_shapes=D_h, name='hidden', function=pnl.Logistic(bias=-2)) - ol = pnl.TransferMechanism(size=D_o, name='output', + ol = pnl.TransferMechanism( + input_shapes=D_o, name='output', function=pnl.Logistic(bias=-2)) pih = pnl.MappingProjection(matrix=wih) pch = pnl.MappingProjection(matrix=wch) @@ -1852,9 +1857,9 @@ def test_stranded_nested_target_mech_error(self): ) def test_no_learning_of_spanning_nested_compositions(self): - input_mech = pnl.ProcessingMechanism(name='input_mech', size=2) - hidden_mech = pnl.ProcessingMechanism(name='hidden_mech', size=2) - output_mech = pnl.ProcessingMechanism(name='output_mech', size=2) + input_mech = pnl.ProcessingMechanism(name='input_mech', input_shapes=2) + hidden_mech = pnl.ProcessingMechanism(name='hidden_mech', input_shapes=2) + output_mech = pnl.ProcessingMechanism(name='output_mech', input_shapes=2) nested = pnl.Composition(name='nested', nodes=[hidden_mech]) error_msg = ('Learning in Python mode does not currently support nested Compositions; ' 'try using an AutodiffComposition with ExecutionMode.PyTorch.') @@ -1866,10 +1871,12 @@ def test_no_learning_of_spanning_nested_compositions(self): class TestBackPropLearning: def test_matrix_spec_and_learning_rate(self): - T1 = pnl.TransferMechanism(size = 2, + T1 = pnl.TransferMechanism( + input_shapes= 2, initial_value= [[0.0,0.0]], name = 'INPUT LAYER') - T2 = pnl.TransferMechanism(size= 1, + T2 = pnl.TransferMechanism( + input_shapes= 1, function =pnl.Logistic, name = 'OUTPUT LAYER') W = np.array([[0.1],[0.2]]) @@ -1918,15 +1925,15 @@ def test_different_learning_rate_specs_for_comp(self, spec_types): def test_basic_python_back_prop(self): input_layer = pnl.TransferMechanism(name="input", - size=2, + input_shapes=2, function=pnl.Logistic()) hidden_layer = pnl.TransferMechanism(name="hidden", - size=2, + input_shapes=2, function=pnl.Logistic()) output_layer = pnl.TransferMechanism(name="output", - size=2, + input_shapes=2, function=pnl.Logistic()) comp = pnl.Composition(name="backprop-composition") @@ -1974,17 +1981,17 @@ def test_backprop_fct_with_2_inputs_to_linear_combination_product(self, test_var comp_type = test_vars[1] exec_mode = test_vars[2] input_layer1 = pnl.TransferMechanism(name="input1", - size=2, + input_shapes=2, function=pnl.Linear()) input_layer2 = pnl.TransferMechanism(name="input2", - size=2, + input_shapes=2, function=pnl.Linear()) hidden_layer = pnl.ProcessingMechanism(name="hidden", input_ports=['input1','input2'], - size=(4,4), + input_shapes=(4, 4), function=pnl.LinearCombination(operation=pnl.PRODUCT)) output_layer = pnl.TransferMechanism(name="output", - size=2, + input_shapes=2, function=pnl.Linear()) i1_h_wts = pnl.MappingProjection(name='input_to_hidden1', sender=input_layer1, @@ -2036,20 +2043,20 @@ def test_backprop_fct_with_3_inputs_to_linear_combination_product(self, test_var comp_type = test_vars[1] exec_mode = test_vars[2] input_layer1 = pnl.TransferMechanism(name="input1", - size=2, + input_shapes=2, function=pnl.Linear()) input_layer2 = pnl.TransferMechanism(name="input2", - size=2, + input_shapes=2, function=pnl.Linear()) input_layer3 = pnl.TransferMechanism(name="input3", - size=2, + input_shapes=2, function=pnl.Linear()) hidden_layer = pnl.ProcessingMechanism(name="hidden", input_ports=['input1','input2','input3'], - size=(5,5,5), + input_shapes=(5, 5, 5), function=pnl.LinearCombination(operation=pnl.PRODUCT)) output_layer = pnl.TransferMechanism(name="output", - size=2, + input_shapes=2, function=pnl.Linear()) i1_h_wts = pnl.MappingProjection(name='input_to_hidden1', sender=input_layer1, @@ -2100,9 +2107,9 @@ def test_backprop_fct_with_3_inputs_to_linear_combination_product(self, test_var def test_two_output_ports_on_OUTPUT_Node(self): - input_A = pnl.ProcessingMechanism(name='INPUT_A', size=2) - input_B = pnl.ProcessingMechanism(name='INPUT_B', size=2) - output = pnl.ProcessingMechanism(name='OUTPUT', size=(2,3)) + input_A = pnl.ProcessingMechanism(name='INPUT_A', input_shapes=2) + input_B = pnl.ProcessingMechanism(name='INPUT_B', input_shapes=2) + output = pnl.ProcessingMechanism(name='OUTPUT', input_shapes=(2, 3)) comp = Composition(name='comp') with pytest.raises(CompositionError) as error_text: @@ -2242,19 +2249,19 @@ def test_multilayer_truth(self, expected_quantities): input_layer = pnl.TransferMechanism(name='input_layer', function=pnl.Logistic, - size=2) + input_shapes=2) hidden_layer_1 = pnl.TransferMechanism(name='hidden_layer_1', function=pnl.Logistic, - size=5) + input_shapes=5) hidden_layer_2 = pnl.TransferMechanism(name='hidden_layer_2', function=pnl.Logistic, - size=4) + input_shapes=4) output_layer = pnl.TransferMechanism(name='output_layer', function=pnl.Logistic, - size=3) + input_shapes=3) input_weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) middle_weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) @@ -2734,10 +2741,10 @@ def test_stroop_model_learning(self, order): word_to_hidden_wts = np.arange(4).reshape((2, 2)) hidden_to_response_wts = np.arange(4).reshape((2, 2)) - color_comp = pnl.TransferMechanism(size=2, name='Color') - word_comp = pnl.TransferMechanism(size=2, name='Word') - hidden_comp = pnl.TransferMechanism(size=2, function=pnl.Logistic(), name='Hidden') - response_comp = pnl.TransferMechanism(size=2, function=pnl.Logistic(), name='Response') + color_comp = pnl.TransferMechanism(input_shapes=2, name='Color') + word_comp = pnl.TransferMechanism(input_shapes=2, name='Word') + hidden_comp = pnl.TransferMechanism(input_shapes=2, function=pnl.Logistic(), name='Hidden') + response_comp = pnl.TransferMechanism(input_shapes=2, function=pnl.Logistic(), name='Response') if order == 'color_full': color_pathway = [color_comp, @@ -2955,11 +2962,13 @@ def test_pytorch_equivalence_with_learning_enabled_composition(self): wco = np.random.rand(D_c, D_o) * 0.02 - 0.01 who = np.random.rand(D_h, D_o) * 0.02 - 0.01 - il = pnl.TransferMechanism(size=D_i, name='input') - cl = pnl.TransferMechanism(size=D_c, name='control') - hl = pnl.TransferMechanism(size=D_h, name='hidden', + il = pnl.TransferMechanism(input_shapes=D_i, name='input') + cl = pnl.TransferMechanism(input_shapes=D_c, name='control') + hl = pnl.TransferMechanism( + input_shapes=D_h, name='hidden', function=pnl.Logistic(bias=-2)) - ol = pnl.TransferMechanism(size=D_o, name='output', + ol = pnl.TransferMechanism( + input_shapes=D_o, name='output', function=pnl.Logistic(bias=-2)) pih = pnl.MappingProjection(matrix=wih) pch = pnl.MappingProjection(matrix=wch) @@ -3092,14 +3101,14 @@ class TestRumelhartSemanticNetwork: def test_rumelhart_semantic_network_sequential(self): - rep_in = pnl.TransferMechanism(size=10, name='REP_IN') - rel_in = pnl.TransferMechanism(size=11, name='REL_IN') - rep_hidden = pnl.TransferMechanism(size=4, function=pnl.Logistic, name='REP_HIDDEN') - rel_hidden = pnl.TransferMechanism(size=5, function=pnl.Logistic, name='REL_HIDDEN') - rep_out = pnl.TransferMechanism(size=10, function=pnl.Logistic, name='REP_OUT') - prop_out = pnl.TransferMechanism(size=12, function=pnl.Logistic, name='PROP_OUT') - qual_out = pnl.TransferMechanism(size=13, function=pnl.Logistic, name='QUAL_OUT') - act_out = pnl.TransferMechanism(size=14, function=pnl.Logistic, name='ACT_OUT') + rep_in = pnl.TransferMechanism(input_shapes=10, name='REP_IN') + rel_in = pnl.TransferMechanism(input_shapes=11, name='REL_IN') + rep_hidden = pnl.TransferMechanism(input_shapes=4, function=pnl.Logistic, name='REP_HIDDEN') + rel_hidden = pnl.TransferMechanism(input_shapes=5, function=pnl.Logistic, name='REL_HIDDEN') + rep_out = pnl.TransferMechanism(input_shapes=10, function=pnl.Logistic, name='REP_OUT') + prop_out = pnl.TransferMechanism(input_shapes=12, function=pnl.Logistic, name='PROP_OUT') + qual_out = pnl.TransferMechanism(input_shapes=13, function=pnl.Logistic, name='QUAL_OUT') + act_out = pnl.TransferMechanism(input_shapes=14, function=pnl.Logistic, name='ACT_OUT') comp = pnl.Composition() diff --git a/tests/composition/test_models.py b/tests/composition/test_models.py index 10169b1f2c7..5bbff13c2a1 100644 --- a/tests/composition/test_models.py +++ b/tests/composition/test_models.py @@ -64,16 +64,19 @@ def test_bustamante_Stroop_model(self): # INPUT UNITS # colors: ('red', 'green'), words: ('RED','GREEN') - colors_input_layer = pnl.TransferMechanism(size=2, + colors_input_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear, name='COLORS_INPUT') - words_input_layer = pnl.TransferMechanism(size=2, + words_input_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear, name='WORDS_INPUT') # Task layer, tasks: ('name the color', 'read the word') - task_layer = pnl.TransferMechanism(size=2, + task_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear, name='TASK') @@ -84,7 +87,8 @@ def test_bustamante_Stroop_model(self): # randomly distributed noise to the net input # time averaging = integration_rate = 0.1 unit_noise = 0.005 - colors_hidden_layer = pnl.TransferMechanism(size=2, + colors_hidden_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions .Logistic(gain=1.0, x_0=4.0), # should be able to get same result with offset = -4.0 @@ -94,7 +98,8 @@ def test_bustamante_Stroop_model(self): integration_rate=0.1, name='COLORS HIDDEN') # words_hidden: ('RED','GREEN') - words_hidden_layer = pnl.TransferMechanism(size=2, + words_hidden_layer = pnl.TransferMechanism( + input_shapes=2, function=pnl.Logistic(gain=1.0, x_0=4.0), integrator_mode=True, noise=pnl.NormalDist(mean=0, @@ -107,7 +112,8 @@ def test_bustamante_Stroop_model(self): # Response layer, provide input to accumulator, responses: ('red', 'green') # time averaging = tau = 0.1 # randomly distributed noise to the net input - response_layer = pnl.TransferMechanism(size=2, + response_layer = pnl.TransferMechanism( + input_shapes=2, function=psyneulink.core.components.functions.nonstateful.transferfunctions.Logistic, name='RESPONSE', integrator_mode=True, @@ -305,26 +311,26 @@ def switch_trial_type(): # def test_botvinick_model(self): # - # colors_input_layer = pnl.TransferMechanism(size=3, + # colors_input_layer = pnl.TransferMechanism(input_shapes=3, # function=pnl.Linear, # name='COLORS_INPUT') # - # words_input_layer = pnl.TransferMechanism(size=3, + # words_input_layer = pnl.TransferMechanism(input_shapes=3, # function=pnl.Linear, # name='WORDS_INPUT') # - # task_input_layer = pnl.TransferMechanism(size=2, + # task_input_layer = pnl.TransferMechanism(input_shapes=2, # function=pnl.Linear, # name='TASK_INPUT') # - # task_layer = pnl.RecurrentTransferMechanism(size=2, + # task_layer = pnl.RecurrentTransferMechanism(input_shapes=2, # function=pnl.Logistic(), # hetero=-2, # integrator_mode=True, # integration_rate=0.01, # name='TASK_LAYER') # - # colors_hidden_layer = pnl.RecurrentTransferMechanism(size=3, + # colors_hidden_layer = pnl.RecurrentTransferMechanism(input_shapes=3, # function=pnl.Logistic(bias=4.0), # # bias 4.0 is -4.0 in the paper see Docs for description # integrator_mode=True, @@ -332,7 +338,7 @@ def switch_trial_type(): # integration_rate=0.01, # cohen-huston text says 0.01 # name='COLORS_HIDDEN') # - # words_hidden_layer = pnl.RecurrentTransferMechanism(size=3, + # words_hidden_layer = pnl.RecurrentTransferMechanism(input_shapes=3, # function=pnl.Logistic(bias=4.0), # integrator_mode=True, # hetero=-2, @@ -340,7 +346,7 @@ def switch_trial_type(): # name='WORDS_HIDDEN') # # # Response layer, responses: ('red', 'green') - # response_layer = pnl.RecurrentTransferMechanism(size=2, + # response_layer = pnl.RecurrentTransferMechanism(input_shapes=2, # function=pnl.Logistic(), # hetero=-2.0, # integrator_mode=True, diff --git a/tests/composition/test_show_graph.py b/tests/composition/test_show_graph.py index 296f5964eed..7f9ecf2b208 100644 --- a/tests/composition/test_show_graph.py +++ b/tests/composition/test_show_graph.py @@ -74,13 +74,13 @@ class TestNested: def test_multiple_projections_to_node_of_nested_composition(self): '''This is based on the nback script''' - stim = TransferMechanism(name='STIM', size=5) - context = TransferMechanism(name='CONTEXT', size=5) + stim = TransferMechanism(name='STIM', input_shapes=5) + context = TransferMechanism(name='CONTEXT', input_shapes=5) # Nested comp (ffn) - stim_input_layer = TransferMechanism(name='STIM INPUT LAYER', size=5) - context_input_layer = TransferMechanism(name='CONTEXT INPUT LAYER', size=5) - match_output_layer = TransferMechanism(name='MATCH LAYER', size=1) + stim_input_layer = TransferMechanism(name='STIM INPUT LAYER', input_shapes=5) + context_input_layer = TransferMechanism(name='CONTEXT INPUT LAYER', input_shapes=5) + match_output_layer = TransferMechanism(name='MATCH LAYER', input_shapes=1) ffn = Composition(name='FFN', pathways=[[stim_input_layer, match_output_layer], [context_input_layer, match_output_layer]]) diff --git a/tests/control/test_gilzenrat.py b/tests/control/test_gilzenrat.py index 1f2c546a21e..d8d79f0af7a 100644 --- a/tests/control/test_gilzenrat.py +++ b/tests/control/test_gilzenrat.py @@ -185,7 +185,7 @@ def test_fitzHughNagumo_gilzenrat_figure_2(self): # input_weights = np.array([[1, .33], [.33, 1]]) # # # Implement self-excitatory (auto) and mutually inhibitory (hetero) connections within the decision layer -# decision_layer = GilzenratTransferMechanism(size=2, +# decision_layer = GilzenratTransferMechanism(input_shapes=2, # initial_value=np.array([[1, 0]]), # matrix=np.array([[1, 0], [0, -1]]), # # auto=1.0, @@ -201,7 +201,7 @@ def test_fitzHughNagumo_gilzenrat_figure_2(self): # # # Implement response layer with a single, self-excitatory connection # # To do Markus: specify recurrent self-connrection weight for response unit to 2.00 -# response = GilzenratTransferMechanism(size=1, +# response = GilzenratTransferMechanism(input_shapes=1, # initial_value=np.array([[2.0]]), # matrix=np.array([[0.5]]), # function=Logistic(bias=2), diff --git a/tests/functions/test_accumulator_integrator.py b/tests/functions/test_accumulator_integrator.py index f4fca045d25..372c5a4be81 100644 --- a/tests/functions/test_accumulator_integrator.py +++ b/tests/functions/test_accumulator_integrator.py @@ -179,8 +179,8 @@ def test_accumulator_as_function_of_matrix_param_of_mapping_projection(self): # Test that accumulator is function of parameter_port of mapping project, # and that its increment param works properly (used as modulatory param by LearningProjetion) - T1 = TransferMechanism(size=3) - T2 = TransferMechanism(size=3) + T1 = TransferMechanism(input_shapes=3) + T2 = TransferMechanism(input_shapes=3) M = MappingProjection(sender=T1, receiver=T2) C = Composition() C.add_linear_processing_pathway([T1, M, T2]) diff --git a/tests/functions/test_combination.py b/tests/functions/test_combination.py index 3cccac7336b..c3e72a648a2 100644 --- a/tests/functions/test_combination.py +++ b/tests/functions/test_combination.py @@ -279,7 +279,7 @@ def test_linear_combination_function(variable, operation, exponents, weights, sc @pytest.mark.parametrize("offset", [None, 1.5, [1,2.5,0,0]], ids=["O_NONE", "O_SCALAR", "O_VECTOR"]) def test_linear_combination_function_in_mechanism(operation, input, input_ports, scale, offset, benchmark, mech_mode): f = pnl.LinearCombination(default_variable=input, operation=operation, scale=scale, offset=offset) - p = pnl.ProcessingMechanism(size=[len(input[0])] * len(input), function=f, input_ports=input_ports) + p = pnl.ProcessingMechanism(input_shapes=[len(input[0])] * len(input), function=f, input_ports=input_ports) EX = pytest.helpers.get_mech_execution(p, mech_mode) diff --git a/tests/functions/test_memory.py b/tests/functions/test_memory.py index 4b2ae3fabcc..2f00734b940 100644 --- a/tests/functions/test_memory.py +++ b/tests/functions/test_memory.py @@ -305,10 +305,10 @@ def test_DictionaryMemory_with_initializer_and_key_size_diff_from_val_size(self) # def test_DictionaryMemory_without_initializer_in_composition(): # - # content = TransferMechanism(size=5) - # assoc = TransferMechanism(size=3) - # content_out = TransferMechanism(size=5) - # assoc_out = TransferMechanism(size=3) + # content = TransferMechanism(input_shapes=5) + # assoc = TransferMechanism(input_shapes=3) + # content_out = TransferMechanism(input_shapes=5) + # assoc_out = TransferMechanism(input_shapes=3) # # # Episodic Memory, Decision and Control # em = EpisodicMemoryMechanism(name='EM', @@ -1495,7 +1495,7 @@ def test_ContentAddressableMemory_unique_functions(self, param_name): # with pytest.raises(FunctionError) as error_text: # f = ContentAddressableMemory(initializer=[[[[1,0],[1,0],[1,0]], [[1,0],[1,0],[1,0]], [[1,0],[1,0],[1,0]]], # [[[0,1],[0,1],[0,1]], [[0,1],[0,0],[1,0]], [[0,1],[0,1],[0,1]]]]) - # em = EpisodicMemoryMechanism(size = [1,1,1], function=f) + # em = EpisodicMemoryMechanism(input_shapes = [1,1,1], function=f) # em.execute([[[0,1],[0,1],[0,1]], [[0,1],[0,0],[1,0]], [[0,1],[0,1],[0,1]]]) # assert 'Attempt to store and/or retrieve an entry in ContentAddressableMemory that has more than 2 dimensions (' \ # '3); try flattening innermost ones.' in str(error_text.value) @@ -1503,7 +1503,7 @@ def test_ContentAddressableMemory_unique_functions(self, param_name): # # Initializer with >2d ragged array # with pytest.raises(FunctionError) as error_text: # f = ContentAddressableMemory(initializer=[ [[1,2,3], [4]], [[1,2,3], [[1],[4]]] ]) - # em = EpisodicMemoryMechanism(size = [1,1,1], function=f) + # em = EpisodicMemoryMechanism(input_shapes = [1,1,1], function=f) # em.execute([[[0,1],[0,1],[0,1]], [[0,1],[0,0],[1,0]], [[0,1],[0,1],[0,1]]]) # assert 'Attempt to store and/or retrieve an entry in ContentAddressableMemory that has more than 2 dimensions (' \ # '3); try flattening innermost ones.' in str(error_text.value) diff --git a/tests/functions/test_user_defined_func.py b/tests/functions/test_user_defined_func.py index 8b5e10dd88a..64334e07fc6 100644 --- a/tests/functions/test_user_defined_func.py +++ b/tests/functions/test_user_defined_func.py @@ -539,7 +539,7 @@ def test_udf_in_mechanism(mech_mode, benchmark): def myFunction(variable, param1, param2): return sum(variable[0]) + 2 - myMech = ProcessingMechanism(function=myFunction, size=4, name='myMech') + myMech = ProcessingMechanism(function=myFunction, input_shapes=4, name='myMech') # assert 'param1' in myMech.parameter_ports.names # <- FIX reinstate when problem with function params is fixed # assert 'param2' in myMech.parameter_ports.names # <- FIX reinstate when problem with function params is fixed e = pytest.helpers.get_mech_execution(myMech, mech_mode) @@ -608,8 +608,8 @@ def test_udf_composition_origin(comp_mode, benchmark): def myFunction(variable, context): return [variable[0][1], variable[0][0]] - myMech = ProcessingMechanism(function=myFunction, size=3, name='myMech') - T = TransferMechanism(size=2, function=Linear) + myMech = ProcessingMechanism(function=myFunction, input_shapes=3, name='myMech') + T = TransferMechanism(input_shapes=2, function=Linear) c = Composition(pathways=[myMech, T]) benchmark(c.run, inputs={myMech: [[1, 3, 5]]}, execution_mode=comp_mode) np.testing.assert_allclose(c.results[0][0], [3, 1]) @@ -621,8 +621,8 @@ def test_udf_composition_terminal(comp_mode, benchmark): def myFunction(variable, context): return [variable[0][2], variable[0][0]] - myMech = ProcessingMechanism(function=myFunction, size=3, name='myMech') - T2 = TransferMechanism(size=3, function=Linear) + myMech = ProcessingMechanism(function=myFunction, input_shapes=3, name='myMech') + T2 = TransferMechanism(input_shapes=3, function=Linear) c2 = Composition(pathways=[[T2, myMech]]) benchmark(c2.run, inputs={T2: [[1, 2, 3]]}, execution_mode=comp_mode) np.testing.assert_allclose(c2.results[0][0], [3, 1]) @@ -635,7 +635,7 @@ def myFunction(variable, context): return L(variable) + 2 U = UserDefinedFunction(custom_function=myFunction, default_variable=[[0, 0, 0]]) - myMech = ProcessingMechanism(function=myFunction, size=3, name='myMech') + myMech = ProcessingMechanism(function=myFunction, input_shapes=3, name='myMech') val1 = myMech.execute(input=[1, 2, 3]) val2 = U.execute(variable=[[1, 2, 3]]) np.testing.assert_allclose(val1, val2) diff --git a/tests/log/test_log.py b/tests/log/test_log.py index f691a60ccbc..30375fe9dfe 100644 --- a/tests/log/test_log.py +++ b/tests/log/test_log.py @@ -11,8 +11,8 @@ class TestLog: def test_log(self): - T_1 = pnl.TransferMechanism(name='log_test_T_1', size=2) - T_2 = pnl.TransferMechanism(name='log_test_T_2', size=2) + T_1 = pnl.TransferMechanism(name='log_test_T_1', input_shapes=2) + T_2 = pnl.TransferMechanism(name='log_test_T_2', input_shapes=2) PS = pnl.Composition(name='log_test_PS', pathways=[T_1, T_2]) PJ = T_2.path_afferents[0] @@ -262,9 +262,9 @@ def test_log_initialization(self): def test_log_dictionary_without_time(self): T1 = pnl.TransferMechanism(name='log_test_T1', - size=2) + input_shapes=2) T2 = pnl.TransferMechanism(name='log_test_T2', - size=2) + input_shapes=2) PS = pnl.Composition(name='log_test_PS', pathways=[T1, T2]) PJ = T2.path_afferents[0] @@ -495,9 +495,9 @@ def test_log_dictionary_without_time(self): def test_run_resets(self): import psyneulink as pnl T1 = pnl.TransferMechanism(name='log_test_T1', - size=2) + input_shapes=2) T2 = pnl.TransferMechanism(name='log_test_T2', - size=2) + input_shapes=2) COMP = pnl.Composition(name='COMP', pathways=[T1, T2]) T1.set_log_conditions('mod_slope') T2.set_log_conditions('value') @@ -519,10 +519,10 @@ def test_run_resets(self): def test_log_dictionary_with_time(self): T1 = pnl.TransferMechanism(name='log_test_T1', - size=2) + input_shapes=2) T2 = pnl.TransferMechanism(name='log_test_T2', function=psyneulink.core.components.functions.nonstateful.transferfunctions.Linear(slope=2.0), - size=2) + input_shapes=2) COMP = pnl.Composition(name='log_test_COMP', pathways=[T1, T2]) assert T1.loggable_items == { @@ -958,14 +958,14 @@ def test_log_csv_multiple_contexts(self): ) def test_log_multi_calls_single_timestep(self, scheduler_conditions, multi_run): lca = pnl.LCAMechanism( - size=2, + input_shapes=2, leak=0.5, threshold=0.515, reset_stateful_function_when=pnl.AtTrialStart() ) lca.set_log_conditions(pnl.VALUE) m0 = pnl.ProcessingMechanism( - size=2 + input_shapes=2 ) comp = pnl.Composition() comp.add_linear_processing_pathway([m0, lca]) @@ -1000,8 +1000,8 @@ class TestClearLog: def test_clear_log(self): # Create Composition - T_1 = pnl.TransferMechanism(name='log_test_T_1', size=2) - T_2 = pnl.TransferMechanism(name='log_test_T_2', size=2) + T_1 = pnl.TransferMechanism(name='log_test_T_1', input_shapes=2) + T_2 = pnl.TransferMechanism(name='log_test_T_2', input_shapes=2) COMP = pnl.Composition(name="log_test_COMP", pathways=[T_1, T_2]) PJ = T_2.path_afferents[0] @@ -1210,19 +1210,19 @@ def test_multilayer(self): input_layer = pnl.TransferMechanism(name='input_layer', function=pnl.Logistic, - size=2) + input_shapes=2) hidden_layer_1 = pnl.TransferMechanism(name='hidden_layer_1', function=pnl.Logistic, - size=5) + input_shapes=5) hidden_layer_2 = pnl.TransferMechanism(name='hidden_layer_2', function=pnl.Logistic, - size=4) + input_shapes=4) output_layer = pnl.TransferMechanism(name='output_layer', function=pnl.Logistic, - size=3) + input_shapes=3) input_weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) middle_weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) diff --git a/tests/log/test_rpc.py b/tests/log/test_rpc.py index b90b27f8d2c..51c7b03c5ea 100644 --- a/tests/log/test_rpc.py +++ b/tests/log/test_rpc.py @@ -9,8 +9,8 @@ class TestRPC: def test_transfer_mech(self): - T_1 = pnl.TransferMechanism(name='log_test_T_1', size=2) - T_2 = pnl.TransferMechanism(name='log_test_T_2', size=2) + T_1 = pnl.TransferMechanism(name='log_test_T_1', input_shapes=2) + T_2 = pnl.TransferMechanism(name='log_test_T_2', input_shapes=2) PS = pnl.Composition(name='log_test_PS', pathways=[T_1, T_2]) con_with_rpc_pipeline = pnl.Context(rpc_pipeline=Queue(), execution_id=PS) @@ -77,9 +77,9 @@ def test_delivery_initialization(self): def test_run_resets(self): T1 = pnl.TransferMechanism(name='log_test_T1', - size=2) + input_shapes=2) T2 = pnl.TransferMechanism(name='log_test_T2', - size=2) + input_shapes=2) COMP = pnl.Composition(name='COMP', pathways=[T1, T2]) con_with_rpc_pipeline = pnl.Context(rpc_pipeline=Queue(), execution_id=COMP) pipeline = con_with_rpc_pipeline.rpc_pipeline @@ -111,10 +111,10 @@ def test_run_resets(self): def test_log_dictionary_with_time(self): T1 = pnl.TransferMechanism(name='log_test_T1', - size=2) + input_shapes=2) T2 = pnl.TransferMechanism(name='log_test_T2', function=pnl.Linear(slope=2.0), - size=2) + input_shapes=2) COMP = pnl.Composition(name='log_test_COMP', pathways=[T1, T2]) con_with_rpc_pipeline = pnl.Context(rpc_pipeline=Queue(), execution_id=COMP) pipeline = con_with_rpc_pipeline.rpc_pipeline @@ -390,14 +390,14 @@ def test_log_csv_multiple_contexts(self): ) def test_log_multi_calls_single_timestep(self, scheduler_conditions, multi_run): lca = pnl.LCAMechanism( - size=2, + input_shapes=2, leak=0.5, threshold=0.515, reset_stateful_function_when=pnl.AtTrialStart() ) lca.set_delivery_conditions(pnl.VALUE) m0 = pnl.ProcessingMechanism( - size=2 + input_shapes=2 ) comp = pnl.Composition() comp.add_linear_processing_pathway([m0, lca]) @@ -438,19 +438,19 @@ class TestFullModels: def test_multilayer(self): input_layer = pnl.TransferMechanism(name='input_layer', function=pnl.Logistic, - size=2) + input_shapes=2) hidden_layer_1 = pnl.TransferMechanism(name='hidden_layer_1', function=pnl.Logistic, - size=5) + input_shapes=5) hidden_layer_2 = pnl.TransferMechanism(name='hidden_layer_2', function=pnl.Logistic, - size=4) + input_shapes=4) output_layer = pnl.TransferMechanism(name='output_layer', function=pnl.Logistic, - size=3) + input_shapes=3) input_weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) middle_weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) diff --git a/tests/mdf/model_varied_matrix_sizes.py b/tests/mdf/model_varied_matrix_sizes.py index 900f0b570f1..cea792773b9 100644 --- a/tests/mdf/model_varied_matrix_sizes.py +++ b/tests/mdf/model_varied_matrix_sizes.py @@ -1,10 +1,10 @@ import psyneulink as pnl comp = pnl.Composition(name='comp') -A = pnl.TransferMechanism(name='A', size=2) -B = pnl.TransferMechanism(name='B', size=3) -C = pnl.TransferMechanism(name='C', size=4) -D = pnl.TransferMechanism(name='D', size=5) +A = pnl.TransferMechanism(name='A', input_shapes=2) +B = pnl.TransferMechanism(name='B', input_shapes=3) +C = pnl.TransferMechanism(name='C', input_shapes=4) +D = pnl.TransferMechanism(name='D', input_shapes=5) for n in [A, B, C, D]: comp.add_node(n) diff --git a/tests/mdf/stroop_conflict_monitoring.py b/tests/mdf/stroop_conflict_monitoring.py index d46b18a70b8..6bc9b19bfe2 100644 --- a/tests/mdf/stroop_conflict_monitoring.py +++ b/tests/mdf/stroop_conflict_monitoring.py @@ -5,14 +5,14 @@ # Construct the color naming pathway: color_input = pnl.ProcessingMechanism( - name="color_input", size=2 + name="color_input", input_shapes=2 ) # Note: default function is Linear color_input_to_hidden_wts = np.array([[2, -2], [-2, 2]]) color_hidden = pnl.ProcessingMechanism( - name="color_hidden", size=2, function=pnl.Logistic(bias=-4) + name="color_hidden", input_shapes=2, function=pnl.Logistic(bias=-4) ) color_hidden_to_output_wts = np.array([[2, -2], [-2, 2]]) -output = pnl.ProcessingMechanism(name="OUTPUT", size=2, function=pnl.Logistic) +output = pnl.ProcessingMechanism(name="OUTPUT", input_shapes=2, function=pnl.Logistic) color_pathway = [ color_input, color_input_to_hidden_wts, @@ -22,10 +22,10 @@ ] # Construct the word reading pathway (using the same output_layer) -word_input = pnl.ProcessingMechanism(name="word_input", size=2) +word_input = pnl.ProcessingMechanism(name="word_input", input_shapes=2) word_input_to_hidden_wts = np.array([[3, -3], [-3, 3]]) word_hidden = pnl.ProcessingMechanism( - name="word_hidden", size=2, function=pnl.Logistic(bias=-4) + name="word_hidden", input_shapes=2, function=pnl.Logistic(bias=-4) ) word_hidden_to_output_wts = np.array([[3, -3], [-3, 3]]) word_pathway = [ @@ -37,8 +37,8 @@ ] # Construct the task specification pathways -task_input = pnl.ProcessingMechanism(name="task_input", size=2) -task = pnl.LCAMechanism(name="TASK", size=2, initial_value=[0.5, 0.5]) +task_input = pnl.ProcessingMechanism(name="task_input", input_shapes=2) +task = pnl.LCAMechanism(name="TASK", input_shapes=2, initial_value=[0.5, 0.5]) task_color_wts = np.array([[4, 4], [0, 0]]) task_word_wts = np.array([[0, 0], [4, 4]]) task_color_pathway = [task_input, task, task_color_wts, color_hidden] @@ -53,7 +53,7 @@ name="CONTROL", objective_mechanism=pnl.ObjectiveMechanism( name="Conflict Monitor", - function=pnl.Energy(size=2, matrix=[[0, -2.5], [-2.5, 0]]), + function=pnl.Energy(input_shapes=2, matrix=[[0, -2.5], [-2.5, 0]]), monitor=output, ), default_allocation=[0.5], diff --git a/tests/mechanisms/test_control_mechanism.py b/tests/mechanisms/test_control_mechanism.py index 340dae76db1..85051f9d928 100644 --- a/tests/mechanisms/test_control_mechanism.py +++ b/tests/mechanisms/test_control_mechanism.py @@ -135,14 +135,15 @@ def test_control_modulation(self): def test_identicalness_of_control_and_gating(self): """Tests same configuration as gating in tests/mechansims/test_gating_mechanism""" - Input_Layer = pnl.TransferMechanism(name='Input Layer', function=pnl.Logistic, size=2) - Hidden_Layer_1 = pnl.TransferMechanism(name='Hidden Layer_1', function=pnl.Logistic, size=5) - Hidden_Layer_2 = pnl.TransferMechanism(name='Hidden Layer_2', function=pnl.Logistic, size=4) - Output_Layer = pnl.TransferMechanism(name='Output Layer', function=pnl.Logistic, size=3) - - Control_Mechanism = pnl.ControlMechanism(size=[1], control=[Hidden_Layer_1.input_port, - Hidden_Layer_2.input_port, - Output_Layer.input_port]) + Input_Layer = pnl.TransferMechanism(name='Input Layer', function=pnl.Logistic, input_shapes=2) + Hidden_Layer_1 = pnl.TransferMechanism(name='Hidden Layer_1', function=pnl.Logistic, input_shapes=5) + Hidden_Layer_2 = pnl.TransferMechanism(name='Hidden Layer_2', function=pnl.Logistic, input_shapes=4) + Output_Layer = pnl.TransferMechanism(name='Output Layer', function=pnl.Logistic, input_shapes=3) + + Control_Mechanism = pnl.ControlMechanism( + input_shapes=[1], control=[Hidden_Layer_1.input_port, + Hidden_Layer_2.input_port, + Output_Layer.input_port]) Input_Weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) Middle_Weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) diff --git a/tests/mechanisms/test_ddm_mechanism.py b/tests/mechanisms/test_ddm_mechanism.py index b9b8bad54a4..e4d4949d791 100644 --- a/tests/mechanisms/test_ddm_mechanism.py +++ b/tests/mechanisms/test_ddm_mechanism.py @@ -173,7 +173,7 @@ def test_is_finished_stops_composition(self): class TestInputPorts: def test_regular_input_mode(self): - input_mech = ProcessingMechanism(size=2) + input_mech = ProcessingMechanism(input_shapes=2) ddm = DDM( function=DriftDiffusionAnalytical(), output_ports=[SELECTED_INPUT_ARRAY, DECISION_VARIABLE_ARRAY], @@ -191,7 +191,7 @@ def test_regular_input_mode(self): np.testing.assert_allclose(result, [[1.0], [1.0]]) def test_array_mode(self): - input_mech = ProcessingMechanism(size=2) + input_mech = ProcessingMechanism(input_shapes=2) ddm = DDM( input_format=ARRAY, function=DriftDiffusionAnalytical(), @@ -486,13 +486,13 @@ def test_DDM_rate_fn(): # ------------------------------------------------------------------------------------------------ # TEST 1 -# size = int, check if variable is an array of zeros +# input_shapes = int, check if variable is an array of zeros def test_DDM_size_int_check_var(): T = DDM( name='DDM', - size=1, + input_shapes=1, function=DriftDiffusionIntegrator( noise=0.0, rate=-5.0, @@ -504,13 +504,13 @@ def test_DDM_size_int_check_var(): # ------------------------------------------------------------------------------------------------ # TEST 2 -# size = float, variable = [.4], check output after execution +# input_shapes = float, variable = [.4], check output after execution def test_DDM_size_int_inputs(): T = DDM( name='DDM', - size=1, + input_shapes=1, function=DriftDiffusionIntegrator( noise=0.0, rate=-5.0, @@ -530,14 +530,14 @@ def test_DDM_size_int_inputs(): # ------------------------------------------------------------------------------------------------ # TEST 2 -# size = -1.0, check less-than-one error +# input_shapes = -1.0, check less-than-one error def test_DDM_mech_size_negative_one(): with pytest.raises(ComponentError) as error_text: T = DDM( name='DDM', - size=-1, + input_shapes=-1, function=DriftDiffusionIntegrator( noise=0.0, rate=-5.0, @@ -549,14 +549,14 @@ def test_DDM_mech_size_negative_one(): # ------------------------------------------------------------------------------------------------ # TEST 3 -# size = 3.0, check size-too-large error +# input_shapes = 3.0, check input_shapes-too-large error def test_DDM_size_too_large(): with pytest.raises(DDMError) as error_text: T = DDM( name='DDM', - size=3, + input_shapes=3, function=DriftDiffusionIntegrator( noise=0.0, rate=-5.0, @@ -568,14 +568,14 @@ def test_DDM_size_too_large(): # ------------------------------------------------------------------------------------------------ # TEST 4 -# size = [1,1], check too-many-input-ports error +# input_shapes = [1,1], check too-many-input-ports error def test_DDM_size_too_long(): with pytest.raises(DDMError) as error_text: T = DDM( name='DDM', - size=[1, 1], + input_shapes=[1, 1], function=DriftDiffusionIntegrator( noise=0.0, rate=-5.0, diff --git a/tests/mechanisms/test_episodic_memory.py b/tests/mechanisms/test_episodic_memory.py index 63ecb5f409b..16753a657e6 100644 --- a/tests/mechanisms/test_episodic_memory.py +++ b/tests/mechanisms/test_episodic_memory.py @@ -47,7 +47,7 @@ @pytest.mark.parametrize('variable, func, params, expected', test_data, ids=names) def test_with_dictionary_memory(variable, func, params, expected, benchmark, mech_mode): f = func(seed=0, **params) - m = EpisodicMemoryMechanism(size=len(variable[0]), assoc_size=len(variable[1]), function=f) + m = EpisodicMemoryMechanism(input_shapes=len(variable[0]), assoc_size=len(variable[1]), function=f) EX = pytest.helpers.get_mech_execution(m, mech_mode) EX(variable) @@ -85,7 +85,7 @@ def test_with_dictionary_memory(variable, func, params, expected, benchmark, mec # func_params {'default_variable': [[0,0],[0,0],[0,0,0]]}, # mech_params - {'size':[2,2,3]}, + {'input_shapes':[2,2,3]}, # test_var [[10.,10.],[20., 30.],[40., 50., 60.]], # expected input_port names @@ -111,7 +111,7 @@ def test_with_dictionary_memory(variable, func, params, expected, benchmark, mec {'initializer':np.array([[np.array([1]), np.array([2, 3]), np.array([4, 5, 6])], [list([10]), list([20, 30]), list([40, 50, 60])], [np.array([11]), np.array([22, 33]), np.array([44, 55, 66])]], dtype=object)}, - {'size':[1,2,3]}, + {'input_shapes':[1,2,3]}, [[10.],[20., 30.],[40., 50., 60.]], ['FIELD_0_INPUT', 'FIELD_1_INPUT', 'FIELD_2_INPUT'], ['RETRIEVED_FIELD_0', 'RETRIEVED_FIELD_1', 'RETRIEVED_FIELD_2'], @@ -136,7 +136,7 @@ def test_with_dictionary_memory(variable, func, params, expected, benchmark, mec {'initializer':np.array([[np.array([1,2]), np.array([3,4]), np.array([5, 6])], [[10,20], [30,40], [50,60]], [np.array([11,12]), np.array([22, 23]), np.array([34, 35])]])}, - {'size':[2,2,2]}, + {'input_shapes':[2,2,2]}, [[11,13], [22,23], [34, 35]], ['FIELD_0_INPUT', 'FIELD_1_INPUT', 'FIELD_2_INPUT'], ['RETRIEVED_FIELD_0', 'RETRIEVED_FIELD_1', 'RETRIEVED_FIELD_2'], @@ -249,7 +249,7 @@ def test_contentaddressable_memory_warnings_and_errors(): with pytest.raises(FunctionError) as error_text: f = ContentAddressableMemory(initializer=[[[[1],[0],[1]], [[1],[0],[0]], [[0],[1],[1]]], [[[0],[1],[0]], [[0],[1],[1]], [[1],[1],[0]]]]) - em = EpisodicMemoryMechanism(size = [1,1,1], function=f) + em = EpisodicMemoryMechanism(input_shapes= [1, 1, 1], function=f) em.execute([[[0],[1],[0]], [[0],[1],[1]], [[1],[1],[0]]]) assert 'Attempt to store and/or retrieve an entry in ContentAddressableMemory ' \ '([[[1]\n [0]\n [1]]\n\n [[1]\n [0]\n [0]]\n\n [[0]\n [1]\n [1]]]) ' \ diff --git a/tests/mechanisms/test_gating_mechanism.py b/tests/mechanisms/test_gating_mechanism.py index 05f1a325009..717d5ec24ae 100644 --- a/tests/mechanisms/test_gating_mechanism.py +++ b/tests/mechanisms/test_gating_mechanism.py @@ -15,12 +15,12 @@ def test_gating_with_composition(): """Tests same configuration as control of InputPort in tests/mechansims/test_identicalness_of_control_and_gating """ - Input_Layer = TransferMechanism(name='Input Layer', function=Logistic, size=2) - Hidden_Layer_1 = TransferMechanism(name='Hidden Layer_1', function=Logistic, size=5) - Hidden_Layer_2 = TransferMechanism(name='Hidden Layer_2', function=Logistic, size=4) - Output_Layer = TransferMechanism(name='Output Layer', function=Logistic, size=3) + Input_Layer = TransferMechanism(name='Input Layer', function=Logistic, input_shapes=2) + Hidden_Layer_1 = TransferMechanism(name='Hidden Layer_1', function=Logistic, input_shapes=5) + Hidden_Layer_2 = TransferMechanism(name='Hidden Layer_2', function=Logistic, input_shapes=4) + Output_Layer = TransferMechanism(name='Output Layer', function=Logistic, input_shapes=3) - Gating_Mechanism = GatingMechanism(size=[1], gate=[Hidden_Layer_1, Hidden_Layer_2, Output_Layer]) + Gating_Mechanism = GatingMechanism(input_shapes=[1], gate=[Hidden_Layer_1, Hidden_Layer_2, Output_Layer]) Input_Weights_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) Middle_Weights_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) @@ -137,7 +137,7 @@ def my_sinusoidal_fct( ) Gating_Mechanism = pnl.GatingMechanism( - size=[1], + input_shapes=[1], gating_signals=[ # Output_Layer Output_Layer.output_port, diff --git a/tests/mechanisms/test_input_output_labels.py b/tests/mechanisms/test_input_output_labels.py index 79cbb9f9584..ae1fb6e35e1 100644 --- a/tests/mechanisms/test_input_output_labels.py +++ b/tests/mechanisms/test_input_output_labels.py @@ -187,9 +187,9 @@ # "green": [0, 0]} # output_labels_dict_M2 = {"red": [0, 0], # "green": [1, 1]} -# M1 = ProcessingMechanism(size=2, +# M1 = ProcessingMechanism(input_shapes=2, # params={INPUT_LABELS_DICT: input_labels_dict_M1}) -# M2 = ProcessingMechanism(size=2, +# M2 = ProcessingMechanism(input_shapes=2, # params={OUTPUT_LABELS_DICT: output_labels_dict_M2}) # C = Composition() # learning_pathway = C.add_backpropagation_learning_pathway(pathway=[M1, M2], learning_rate=0.25) @@ -216,9 +216,9 @@ # output_labels_dict_M2 = {0: {"red": [0, 0], # "green": [1, 1]} # } -# M1 = ProcessingMechanism(size=2, +# M1 = ProcessingMechanism(input_shapes=2, # params={INPUT_LABELS_DICT: input_labels_dict_M1}) -# M2 = ProcessingMechanism(size=2, +# M2 = ProcessingMechanism(input_shapes=2, # params={OUTPUT_LABELS_DICT: output_labels_dict_M2}) # C = Composition() # @@ -273,7 +273,7 @@ # "green": [0.0, 1.0]} # output_labels_dict = {"red": [1.0, 0.0], # "green": [0.0, 1.0]} -# M = ProcessingMechanism(size=2, +# M = ProcessingMechanism(input_shapes=2, # params={INPUT_LABELS_DICT: input_labels_dict, # OUTPUT_LABELS_DICT: output_labels_dict}) # C = Composition(pathways=[M]) @@ -301,7 +301,7 @@ # "blue": [2.0, 2.0]} # output_labels_dict = {"red": [1.0, 0.0], # "green": [0.0, 1.0]} -# M = ProcessingMechanism(size=2, +# M = ProcessingMechanism(input_shapes=2, # params={INPUT_LABELS_DICT: input_labels_dict, # OUTPUT_LABELS_DICT: output_labels_dict}) # C = Composition(pathways=[M]) diff --git a/tests/mechanisms/test_input_port_spec.py b/tests/mechanisms/test_input_port_spec.py index 8c36b6f7f3a..482deefa83e 100644 --- a/tests/mechanisms/test_input_port_spec.py +++ b/tests/mechanisms/test_input_port_spec.py @@ -16,7 +16,7 @@ mismatches_specified_default_variable_error_text = 'not compatible with its specified default variable' mismatches_default_variable_format_error_text = 'is not compatible with its expected format' -mismatches_size_error_text = 'not compatible with the default variable determined from size parameter' +mismatches_input_shapes_error_text = 'not compatible with the default variable determined from input_shapes parameter' mismatches_more_input_ports_than_default_variable_error_text = 'There are more InputPorts specified' mismatches_fewer_input_ports_than_default_variable_error_text = 'There are fewer InputPorts specified' mismatches_specified_matrix_pattern = r'The number of rows \(\d\) of the matrix provided for .+ does not equal the length \(\d\) of the sender vector' @@ -283,7 +283,7 @@ def test_specification_dict(self): def test_default_variable_override_mech_list(self): - R2 = TransferMechanism(size=3) + R2 = TransferMechanism(input_shapes=3) # default_variable override of OutputPort.value T = TransferMechanism( @@ -301,8 +301,8 @@ def test_default_variable_override_mech_list(self): # 2-item tuple specification with default_variable override of OutputPort.value def test_2_item_tuple_spec(self): - R2 = TransferMechanism(size=3) - T = TransferMechanism(size=2, input_ports=[(R2, np.zeros((3, 2)))]) + R2 = TransferMechanism(input_shapes=3) + T = TransferMechanism(input_shapes=2, input_ports=[(R2, np.zeros((3, 2)))]) np.testing.assert_array_equal(T.defaults.variable, np.array([[0, 0]])) assert len(T.input_ports) == 1 assert len(T.input_port.path_afferents[0].sender.defaults.variable) == 3 @@ -311,10 +311,10 @@ def test_2_item_tuple_spec(self): # ------------------------------------------------------------------------------------------------ # TEST 12.1 - # 2-item tuple specification with value as first item (and no size specification for T) + # 2-item tuple specification with value as first item (and no input_shapes specification for T) def test_2_item_tuple_value_for_first_item(self): - R2 = TransferMechanism(size=3) + R2 = TransferMechanism(input_shapes=3) T = TransferMechanism(input_ports=[([0,0], R2)]) np.testing.assert_array_equal(T.defaults.variable, np.array([[0, 0]])) assert len(T.input_ports) == 1 @@ -327,8 +327,8 @@ def test_2_item_tuple_value_for_first_item(self): # 4-item tuple Specification def test_projection_tuple_with_matrix_spec(self): - R2 = TransferMechanism(size=3) - T = TransferMechanism(size=2, input_ports=[(R2, None, None, np.zeros((3, 2)))]) + R2 = TransferMechanism(input_shapes=3) + T = TransferMechanism(input_shapes=2, input_ports=[(R2, None, None, np.zeros((3, 2)))]) np.testing.assert_array_equal(T.defaults.variable, np.array([[0, 0]])) assert len(T.input_ports) == 1 assert T.input_port.path_afferents[0].sender.defaults.variable.shape[-1] == 3 @@ -340,10 +340,10 @@ def test_projection_tuple_with_matrix_spec(self): # Standalone Projection specification with Mechanism as sender def test_projection_list_mech_as_send(self): - R2 = TransferMechanism(size=3) + R2 = TransferMechanism(input_shapes=3) P = MappingProjection(sender=R2) T = TransferMechanism( - size=2, + input_shapes=2, input_ports=[P] ) np.testing.assert_array_equal(T.defaults.variable, np.array([[0, 0]])) @@ -357,10 +357,10 @@ def test_projection_list_mech_as_send(self): # Standalone Projection specification with Port as sender def test_projection_list_port_as_sender(self): - R2 = TransferMechanism(size=3) + R2 = TransferMechanism(input_shapes=3) P = MappingProjection(sender=R2.output_port) T = TransferMechanism( - size=2, + input_shapes=2, input_ports=[P] ) np.testing.assert_array_equal(T.defaults.variable, np.array([[0, 0]])) @@ -374,10 +374,10 @@ def test_projection_list_port_as_sender(self): # Projection specification in Tuple def test_projection_in_tuple(self): - R2 = TransferMechanism(size=3) + R2 = TransferMechanism(input_shapes=3) P = MappingProjection(sender=R2) T = TransferMechanism( - size=2, + input_shapes=2, input_ports=[(R2, None, None, P)] ) np.testing.assert_array_equal(T.defaults.variable, np.array([[0, 0]])) @@ -501,9 +501,9 @@ def test_dict_with_variable_mismatches_default_multiple_input_ports(self): # ------------------------------------------------------------------------------------------------ # TEST 24 - def test_dict_with_variable_matches_size(self): + def test_dict_with_variable_matches_input_shapes(self): T = TransferMechanism( - size=2, + input_shapes=2, input_ports=[{NAME: 'FIRST', VARIABLE: [0, 0]}] ) np.testing.assert_array_equal(T.defaults.variable, np.array([[0, 0]])) @@ -512,13 +512,13 @@ def test_dict_with_variable_matches_size(self): # ------------------------------------------------------------------------------------------------ # TEST 25 - def test_dict_with_variable_mismatches_size(self): + def test_dict_with_variable_mismatches_input_shapes(self): with pytest.raises(MechanismError) as error_text: TransferMechanism( - size=1, + input_shapes=1, input_ports=[{NAME: 'FIRST', VARIABLE: [0, 0]}] ) - assert mismatches_size_error_text in str(error_text.value) + assert mismatches_input_shapes_error_text in str(error_text.value) # ------------------------------------------------------------------------------------------------ # TEST 26 @@ -562,7 +562,7 @@ def test_InputPort_mismatches_default(self): # TEST 31 def test_projection_with_matrix_and_sender(self): - m = TransferMechanism(size=2) + m = TransferMechanism(input_shapes=2) p = MappingProjection(sender=m, matrix=[[0, 0, 0], [0, 0, 0]]) T = TransferMechanism(input_ports=[p]) @@ -574,13 +574,13 @@ def test_projection_with_matrix_and_sender(self): def tests_for_projection_with_matrix_and_sender_mismatches_default(self): with pytest.raises(MechanismError) as error_text: - m = TransferMechanism(size=2) + m = TransferMechanism(input_shapes=2) p = MappingProjection(sender=m, matrix=[[0, 0, 0], [0, 0, 0]]) TransferMechanism(default_variable=[0, 0], input_ports=[p]) assert mismatches_specified_default_variable_error_text in str(error_text.value) with pytest.raises(FunctionError) as error_text: - m = TransferMechanism(size=3, output_ports=[pnl.MEAN]) + m = TransferMechanism(input_shapes=3, output_ports=[pnl.MEAN]) p = MappingProjection(sender=m, matrix=[[0,0,0], [0,0,0]]) T = TransferMechanism(input_ports=[p]) assert re.match( @@ -589,7 +589,7 @@ def tests_for_projection_with_matrix_and_sender_mismatches_default(self): ) with pytest.raises(FunctionError) as error_text: - m2 = TransferMechanism(size=2, output_ports=[pnl.MEAN]) + m2 = TransferMechanism(input_shapes=2, output_ports=[pnl.MEAN]) p2 = MappingProjection(sender=m2, matrix=[[1,1,1],[1,1,1]]) T2 = TransferMechanism(input_ports=[p2]) assert re.match( @@ -601,7 +601,7 @@ def tests_for_projection_with_matrix_and_sender_mismatches_default(self): # TEST 33 def test_projection_with_sender_and_default(self): - t = TransferMechanism(size=3) + t = TransferMechanism(input_shapes=3) p = MappingProjection(sender=t) T = TransferMechanism(default_variable=[[0, 0]], input_ports=[p]) @@ -801,25 +801,25 @@ def test_list_of_mechanisms_with_gating_mechanism(self): assert T2.output_ports[0].mod_afferents[0].sender.name=='b' # ------------------------------------------------------------------------------------------------ - # THOROUGH TESTING OF mech, 2-item, 3-item and 4-item tuple specifications with and without default_variable/size + # THOROUGH TESTING OF mech, 2-item, 3-item and 4-item tuple specifications with and without default_variable/input_shapes # (some of these may be duplicative of tests above) # pytest does not support fixtures in parametrize, but a class member is enough for this test - transfer_mech = TransferMechanism(size=3) + transfer_mech = TransferMechanism(input_shapes=3) - @pytest.mark.parametrize('default_variable, size, input_ports, variable_len_state, variable_len_mech', [ + @pytest.mark.parametrize('default_variable, input_shapes, input_ports, variable_len_state, variable_len_mech', [ # default_variable tests ([0, 0], None, [transfer_mech], 2, 2), ([0, 0], None, [(transfer_mech, None)], 2, 2), ([0, 0], None, [(transfer_mech, 1, 1)], 2, 2), ([0, 0], None, [((RESULT, transfer_mech), 1, 1)], 2, 2), ([0, 0], None, [(transfer_mech, 1, 1, None)], 2, 2), - # size tests + # input_shapes tests (None, 2, [transfer_mech], 2, 2), (None, 2, [(transfer_mech, None)], 2, 2), (None, 2, [(transfer_mech, 1, 1)], 2, 2), (None, 2, [(transfer_mech, 1, 1, None)], 2, 2), - # no default_variable or size tests + # no default_variable or input_shapes tests (None, None, [transfer_mech], 3, 3), (None, None, [(transfer_mech, None)], 3, 3), (None, None, [(transfer_mech, 1, 1)], 3, 3), @@ -836,10 +836,10 @@ def test_list_of_mechanisms_with_gating_mechanism(self): # ([[0]], None, [{VARIABLE: [[0], [0]], FUNCTION: LinearCombination}], 2, 1), # (None, 1, [{VARIABLE: [0, 0], FUNCTION: Reduce(weights=[1, -1])}], 2, 1), ]) - def test_mech_and_tuple_specifications_with_and_without_default_variable_or_size( + def test_mech_and_tuple_specifications_with_and_without_default_variable_or_input_shapes( self, default_variable, - size, + input_shapes, input_ports, variable_len_state, variable_len_mech, @@ -849,7 +849,7 @@ def test_mech_and_tuple_specifications_with_and_without_default_variable_or_size T = TransferMechanism( default_variable=default_variable, - size=size, + input_shapes=input_shapes, input_ports=input_ports ) assert T.input_ports[0].socket_width == variable_len_state diff --git a/tests/mechanisms/test_kwta.py b/tests/mechanisms/test_kwta.py index b9449ea4cbf..a481a27ae90 100644 --- a/tests/mechanisms/test_kwta.py +++ b/tests/mechanisms/test_kwta.py @@ -17,17 +17,17 @@ def test_kwta_empty_spec(self): K = KWTAMechanism() np.testing.assert_allclose(K.value, K.defaults.value) assert K.defaults.variable == [[0]] - assert K.size == [1] + assert K.input_shapes == [1] assert K.matrix.base == [[5]] def test_kwta_check_attrs(self): K = KWTAMechanism( name='K', - size=3 + input_shapes=3 ) np.testing.assert_allclose(K.value, K.defaults.value) np.testing.assert_allclose(K.defaults.variable, [[0., 0., 0.]]) - assert K.size == [3] + assert K.input_shapes == [3] np.testing.assert_allclose(K.matrix.base, [[5, 0, 0], [0, 5, 0], [0, 0, 5]]) assert K.recurrent_projection.sender is K.output_port assert K.recurrent_projection.receiver is K.input_port @@ -54,7 +54,7 @@ def test_kwta_inputs_list_of_strings(self): with pytest.raises(MechanismError) as error_text: K = KWTAMechanism( name='K', - size = 4, + input_shapes= 4, ) K.execute(["one", "two", "three", "four"]) assert ('Input to \'K\' ([\'one\' \'two\' \'three\' \'four\']) is incompatible with its corresponding ' @@ -73,7 +73,7 @@ def test_recurrent_mech_inputs_mismatched_with_default_longer(self): with pytest.raises(MechanismError) as error_text: K = KWTAMechanism( name='K', - size=4 + input_shapes=4 ) K.execute([1, 2, 3, 4, 5]) assert ("Shape ((5,)) of input ([1 2 3 4 5]) does not match required shape ((4,)) " @@ -83,7 +83,7 @@ def test_recurrent_mech_inputs_mismatched_with_default_shorter(self): with pytest.raises(MechanismError) as error_text: K = KWTAMechanism( name='K', - size=6 + input_shapes=6 ) K.execute([1, 2, 3, 4, 5]) assert ("Shape ((5,)) of input ([1 2 3 4 5]) does not match required shape ((6,)) " @@ -97,7 +97,7 @@ def test_kwta_function_various_spec(self): for s in specs: K = KWTAMechanism( name='K', - size=5, + input_shapes=5, function=s, k_value=4 ) @@ -106,7 +106,7 @@ def test_kwta_function_various_spec(self): def test_kwta_log_gain(self): K = KWTAMechanism( name='K', - size=3, + input_shapes=3, function=Logistic(gain=2), k_value=2 ) @@ -116,7 +116,7 @@ def test_kwta_log_gain(self): def test_kwta_log_offset(self): K = KWTAMechanism( name='K', - size=3, + input_shapes=3, function=Logistic(offset=-.2), k_value=2 ) @@ -127,7 +127,7 @@ def test_kwta_log_offset(self): def test_kwta_log_gain_offset(self): K = KWTAMechanism( name='K', - size=2, + input_shapes=2, function=Logistic(gain=-.2, offset=4), k_value=1 ) @@ -138,7 +138,7 @@ def test_kwta_linear(self): # inhibition would be positive: so instead it is set K = KWTAMechanism( name='K', threshold=3, - size=3, + input_shapes=3, k_value=2, function=Linear ) @@ -149,7 +149,7 @@ def test_kwta_linear_slope(self): K = KWTAMechanism( name='K', threshold=.5, - size=5, + input_shapes=5, k_value=2, function=Linear(slope=2) ) @@ -159,7 +159,7 @@ def test_kwta_linear_slope(self): def test_kwta_linear_system(self): K=KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=3, function=Linear ) @@ -172,7 +172,7 @@ def test_kwta_matrix_keyword_spec(self): if m != RANDOM_CONNECTIVITY_MATRIX: K = KWTAMechanism( name='K', - size=4, + input_shapes=4, matrix=m ) val = K.execute([10, 10, 10, 10]) @@ -181,7 +181,7 @@ def test_kwta_matrix_keyword_spec(self): def test_kwta_matrix_auto_hetero_spec(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, auto=3, hetero=2 ) @@ -190,7 +190,7 @@ def test_kwta_matrix_auto_hetero_spec(self): def test_kwta_matrix_hetero_spec(self): K = KWTAMechanism( name='K', - size=3, + input_shapes=3, hetero=-.5, ) np.testing.assert_allclose(K.recurrent_projection.matrix.base, [[5, -.5, -.5], [-.5, 5, -.5], [-.5, -.5, 5]]) @@ -198,7 +198,7 @@ def test_kwta_matrix_hetero_spec(self): def test_kwta_matrix_auto_spec(self): K = KWTAMechanism( name='K', - size=3, + input_shapes=3, auto=-.5, ) np.testing.assert_allclose(K.recurrent_projection.matrix.base, [[-.5, 0, 0], [0, -.5, 0], [0, 0, -.5]]) @@ -210,7 +210,7 @@ class TestKWTARatio: def test_kwta_ratio_empty(self): K = KWTAMechanism( name='K', - size=4 + input_shapes=4 ) c = Composition(pathways=[K], prefs=TestKWTARatio.simple_prefs) @@ -226,7 +226,7 @@ def test_kwta_ratio_empty(self): def test_kwta_ratio_1(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, ratio=1 ) c = Composition(pathways=[K], @@ -243,7 +243,7 @@ def test_kwta_ratio_1(self): def test_kwta_ratio_0(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, ratio=0 ) c = Composition(pathways=[K], @@ -261,7 +261,7 @@ def test_kwta_ratio_0(self): def test_kwta_ratio_0_3(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, ratio=0.3 ) c = Composition(pathways=[K], @@ -279,7 +279,7 @@ def test_kwta_ratio_2(self): with pytest.raises(KWTAError) as error_text: K = KWTAMechanism( name='K', - size=4, + input_shapes=4, ratio=2 ) assert "must be between 0 and 1" in str(error_text.value) @@ -288,7 +288,7 @@ def test_kwta_ratio_neg_1(self): with pytest.raises(KWTAError) as error_text: K = KWTAMechanism( name='K', - size=4, + input_shapes=4, ratio=-1 ) assert "must be between 0 and 1" in str(error_text.value) @@ -298,7 +298,7 @@ class TestKWTAKValue: def test_kwta_k_value_empty_size_4(self): K = KWTAMechanism( name='K', - size=4 + input_shapes=4 ) assert K.k_value.base == 0.5 c = Composition(pathways=[K], @@ -312,7 +312,7 @@ def test_kwta_k_value_empty_size_4(self): def test_kwta_k_value_empty_size_6(self): K = KWTAMechanism( name='K', - size=6 + input_shapes=6 ) assert K.k_value.base == 0.5 c = Composition(pathways=[K], @@ -327,7 +327,7 @@ def test_kwta_k_value_empty_size_6(self): def test_kwta_k_value_int_size_5(self): K = KWTAMechanism( name='K', - size=5, + input_shapes=5, k_value=3 ) assert K.k_value.base == 3 @@ -339,7 +339,7 @@ def test_kwta_k_value_int_size_5(self): # for size_val, expected_int_k in size_and_int_k_pairs: # K = KWTA( # name='K', - # size=size_val, + # input_shapes=size_val, # k_value=0.4 # ) # assert K.k_value.base == 0.4 @@ -353,7 +353,7 @@ def test_kwta_k_value_bad_float(self): with pytest.raises(KWTAError) as error_text: K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=2.5 ) assert "must be an integer, or between 0 and 1." in str(error_text.value) @@ -362,7 +362,7 @@ def test_kwta_k_value_list(self): with pytest.raises(KWTAError) as error_text: K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=[1, 2] ) assert "must be a single number" in str(error_text.value) @@ -371,7 +371,7 @@ def test_kwta_k_value_too_large(self): with pytest.raises(KWTAError) as error_text: K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=5 ) assert "was larger than the total number of elements" in str(error_text.value) @@ -380,7 +380,7 @@ def test_kwta_k_value_too_low(self): with pytest.raises(KWTAError) as error_text: K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=-5 ) assert "was larger than the total number of elements" in str(error_text.value) @@ -392,14 +392,14 @@ class TestKWTAThreshold: def test_kwta_threshold_empty(self): K = KWTAMechanism( name='K', - size=4 + input_shapes=4 ) assert K.threshold.base == 0 def test_kwta_threshold_int(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, threshold=-1 ) c = Composition(pathways=[K], @@ -412,7 +412,7 @@ def test_kwta_threshold_int(self): def test_kwta_threshold_float(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, threshold=0.5 ) c = Composition(pathways=[K], @@ -434,7 +434,7 @@ class TestKWTALongTerm: def test_kwta_size_10_k_3_threshold_1(self): K = KWTAMechanism( name='K', - size=10, + input_shapes=10, k_value=3, threshold=1, ) @@ -471,7 +471,7 @@ class TestKWTAAverageBased: def test_kwta_average_k_2(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=2, threshold=0, function=Linear, @@ -486,7 +486,7 @@ def test_kwta_average_k_2(self): def test_kwta_average_k_1(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=1, threshold=0, function=Linear, @@ -501,7 +501,7 @@ def test_kwta_average_k_1(self): def test_kwta_average_k_1_ratio_0_2(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=1, threshold=0, ratio=0.2, @@ -517,7 +517,7 @@ def test_kwta_average_k_1_ratio_0_2(self): def test_kwta_average_k_1_ratio_0_8(self): K = KWTAMechanism( name='K', - size=4, + input_shapes=4, k_value=1, threshold=0, ratio=0.8, diff --git a/tests/mechanisms/test_lca.py b/tests/mechanisms/test_lca.py index 9996dca42d6..6482dd68b0e 100644 --- a/tests/mechanisms/test_lca.py +++ b/tests/mechanisms/test_lca.py @@ -65,9 +65,9 @@ def test_LCAMechanism_length_2(self, benchmark, comp_mode): # Note: since the LCAMechanism's threshold is not specified in this test, each execution only updates # the Mechanism once. - T = TransferMechanism(function=Linear(slope=1.0), size=2) + T = TransferMechanism(function=Linear(slope=1.0), input_shapes=2) L = LCAMechanism(function=Linear(slope=2.0), - size=2, + input_shapes=2, self_excitation=3.0, leak=0.5, competition=1.0, @@ -126,16 +126,16 @@ def test_equivalance_of_threshold_and_when_finished_condition(self): # that causes the LCAMechanism it to execute until it reaches threshold (2nd test). # loop Mechanism's call to execute - lca_until_thresh = LCAMechanism(size=2, leak=0.5, threshold=0.7) # Note: , execute_to_threshold=True by default - response = ProcessingMechanism(size=2) + lca_until_thresh = LCAMechanism(input_shapes=2, leak=0.5, threshold=0.7) # Note: , execute_to_threshold=True by default + response = ProcessingMechanism(input_shapes=2) comp = Composition() comp.add_linear_processing_pathway([lca_until_thresh, response]) result1 = comp.run(inputs={lca_until_thresh:[1,0]}) # loop Composition's call to Mechanism - lca_single_step = LCAMechanism(size=2, leak=0.5, threshold=0.7, execute_until_finished=False) + lca_single_step = LCAMechanism(input_shapes=2, leak=0.5, threshold=0.7, execute_until_finished=False) comp2 = Composition() - response2 = ProcessingMechanism(size=2) + response2 = ProcessingMechanism(input_shapes=2) comp2.add_linear_processing_pathway([lca_single_step,response2]) comp2.scheduler.add_condition(response2, WhenFinished(lca_single_step)) result2 = comp2.run(inputs={lca_single_step:[1,0]}) @@ -143,9 +143,9 @@ def test_equivalance_of_threshold_and_when_finished_condition(self): def test_LCAMechanism_matrix(self): matrix = [[0,-2],[-2,0]] - lca1 = LCAMechanism(size=2, leak=0.5, competition=2) + lca1 = LCAMechanism(input_shapes=2, leak=0.5, competition=2) np.testing.assert_allclose(lca1.matrix.base, matrix) - lca2 = LCAMechanism(size=2, leak=0.5, matrix=matrix) + lca2 = LCAMechanism(input_shapes=2, leak=0.5, matrix=matrix) np.testing.assert_allclose(lca1.matrix.base, lca2.matrix.base) # Note: In the following tests, since the LCAMechanism's threshold is specified @@ -154,7 +154,7 @@ def test_LCAMechanism_matrix(self): @pytest.mark.lca_mechanism @pytest.mark.benchmark(group="LCAMechanism") def test_LCAMechanism_threshold(self, benchmark, comp_mode): - lca = LCAMechanism(size=2, leak=0.5, threshold=0.7) + lca = LCAMechanism(input_shapes=2, leak=0.5, threshold=0.7) comp = Composition() comp.add_node(lca) @@ -163,7 +163,7 @@ def test_LCAMechanism_threshold(self, benchmark, comp_mode): @pytest.mark.composition def test_LCAMechanism_threshold_with_max_vs_next(self): - lca = LCAMechanism(size=3, leak=0.5, threshold=0.1, threshold_criterion=MAX_VS_NEXT) + lca = LCAMechanism(input_shapes=3, leak=0.5, threshold=0.1, threshold_criterion=MAX_VS_NEXT) comp = Composition() comp.add_node(lca) result = comp.run(inputs={lca:[1,0.5,0]}) @@ -171,7 +171,7 @@ def test_LCAMechanism_threshold_with_max_vs_next(self): @pytest.mark.composition def test_LCAMechanism_threshold_with_max_vs_avg(self): - lca = LCAMechanism(size=3, leak=0.5, threshold=0.1, threshold_criterion=MAX_VS_AVG) + lca = LCAMechanism(input_shapes=3, leak=0.5, threshold=0.1, threshold_criterion=MAX_VS_AVG) comp = Composition() comp.add_node(lca) result = comp.run(inputs={lca:[1,0.5,0]}) @@ -181,7 +181,7 @@ def test_LCAMechanism_threshold_with_max_vs_avg(self): @pytest.mark.lca_mechanism @pytest.mark.benchmark(group="LCAMechanism") def test_LCAMechanism_threshold_with_convergence(self, benchmark, comp_mode): - lca = LCAMechanism(size=3, leak=0.5, threshold=0.01, threshold_criterion=CONVERGENCE) + lca = LCAMechanism(input_shapes=3, leak=0.5, threshold=0.01, threshold_criterion=CONVERGENCE) comp = Composition() comp.add_node(lca) @@ -200,19 +200,20 @@ def test_equivalance_of_threshold_and_termination_specifications_just_threshold( # Note: This tests the equivalence of using LCAMechanism-specific threshold arguments and # generic TransferMechanism termination_<*> arguments - lca_thresh = LCAMechanism(size=2, leak=0.5, threshold=0.7) # Note: , execute_to_threshold=True by default - response = ProcessingMechanism(size=2) + lca_thresh = LCAMechanism(input_shapes=2, leak=0.5, threshold=0.7) # Note: , execute_to_threshold=True by default + response = ProcessingMechanism(input_shapes=2) comp = Composition() comp.add_linear_processing_pathway([lca_thresh, response]) result1 = comp.run(inputs={lca_thresh:[1,0]}, execution_mode=comp_mode) - lca_termination = LCAMechanism(size=2, + lca_termination = LCAMechanism( + input_shapes=2, leak=0.5, termination_threshold=0.7, termination_measure=max, termination_comparison_op='>=') comp2 = Composition() - response2 = ProcessingMechanism(size=2) + response2 = ProcessingMechanism(input_shapes=2) comp2.add_linear_processing_pathway([lca_termination,response2]) result2 = comp2.run(inputs={lca_termination:[1,0]}, execution_mode=comp_mode) np.testing.assert_allclose(result1, result2) @@ -222,27 +223,28 @@ def test_equivalance_of_threshold_and_termination_specifications_max_vs_next(sel # Note: This tests the equivalence of using LCAMechanism-specific threshold arguments and # generic TransferMechanism termination_<*> arguments - lca_thresh = LCAMechanism(size=3, leak=0.5, threshold=0.1, threshold_criterion=MAX_VS_NEXT) - response = ProcessingMechanism(size=3) + lca_thresh = LCAMechanism(input_shapes=3, leak=0.5, threshold=0.1, threshold_criterion=MAX_VS_NEXT) + response = ProcessingMechanism(input_shapes=3) comp = Composition() comp.add_linear_processing_pathway([lca_thresh, response]) result1 = comp.run(inputs={lca_thresh:[1,0.5,0]}) - lca_termination = LCAMechanism(size=3, + lca_termination = LCAMechanism( + input_shapes=3, leak=0.5, termination_threshold=0.1, termination_measure=max_vs_next, termination_comparison_op='>=') comp2 = Composition() - response2 = ProcessingMechanism(size=3) + response2 = ProcessingMechanism(input_shapes=3) comp2.add_linear_processing_pathway([lca_termination,response2]) result2 = comp2.run(inputs={lca_termination:[1,0.5,0]}) np.testing.assert_allclose(result1, result2) # def test_LCAMechanism_threshold_with_str(self): - # lca = LCAMechanism(size=2, threshold=0.7, threshold_criterion='MY_OUTPUT_PORT', + # lca = LCAMechanism(input_shapes=2, threshold=0.7, threshold_criterion='MY_OUTPUT_PORT', # output_ports=[RESULT, 'MY_OUTPUT_PORT']) - # response = ProcessingMechanism(size=2) + # response = ProcessingMechanism(input_shapes=2) # comp = Composition() # comp.add_linear_processing_pathway([lca,response]) # comp.scheduler.add_condition(response, WhenFinished(lca)) @@ -250,8 +252,8 @@ def test_equivalance_of_threshold_and_termination_specifications_max_vs_next(sel # np.testing.assert_allclose(result, [[0.71463572, 0.28536428]]) # # def test_LCAMechanism_threshold_with_int(self): - # lca = LCAMechanism(size=2, threshold=0.7, threshold_criterion=1, output_ports=[RESULT, 'MY_OUTPUT_PORT']) - # response = ProcessingMechanism(size=2) + # lca = LCAMechanism(input_shapes=2, threshold=0.7, threshold_criterion=1, output_ports=[RESULT, 'MY_OUTPUT_PORT']) + # response = ProcessingMechanism(input_shapes=2) # comp = Composition() # comp.add_linear_processing_pathway([lca,response]) # comp.scheduler.add_condition(response, WhenFinished(lca)) @@ -261,7 +263,8 @@ def test_equivalance_of_threshold_and_termination_specifications_max_vs_next(sel @pytest.mark.composition @pytest.mark.lca_mechanism def test_LCAMechanism_DDM_equivalent(self, comp_mode): - lca = LCAMechanism(size=2, leak=0., threshold=1, auto=0, hetero=0, + lca = LCAMechanism( + input_shapes=2, leak=0., threshold=1, auto=0, hetero=0, initial_value=[0, 0], execute_until_finished=False) comp1 = Composition() comp1.add_node(lca) diff --git a/tests/mechanisms/test_leabra_mechanism.py b/tests/mechanisms/test_leabra_mechanism.py index fe11e19ad99..90f61dc271d 100644 --- a/tests/mechanisms/test_leabra_mechanism.py +++ b/tests/mechanisms/test_leabra_mechanism.py @@ -105,10 +105,10 @@ def test_leabra_prec_no_train(self): L_net = LeabraMechanism(leabra_net2) # leabra_net should be identical to the network inside L_net - T1_spec = TransferMechanism(name='T1_spec', size=in_size, function=Linear) - T2_spec = TransferMechanism(name='T2_spec', size=out_size, function=Linear) - T1_net = TransferMechanism(name='T1_net', size=in_size, function=Linear) - T2_net = TransferMechanism(name='T2_net', size=out_size, function=Linear) + T1_spec = TransferMechanism(name='T1_spec', input_shapes=in_size, function=Linear) + T2_spec = TransferMechanism(name='T2_spec', input_shapes=out_size, function=Linear) + T1_net = TransferMechanism(name='T1_net', input_shapes=in_size, function=Linear) + T2_net = TransferMechanism(name='T2_net', input_shapes=out_size, function=Linear) proj_spec = MappingProjection(sender=T2_spec, receiver=L_spec.input_ports[1]) c_spec = Composition(pathways=[[T1_spec, L_spec],[T2_spec, proj_spec, L_spec]]) @@ -154,10 +154,10 @@ def test_leabra_prec_with_train(self): L_net = LeabraMechanism(leabra_net2) # leabra_net should be identical to the network inside L_net - T1_spec = TransferMechanism(name='T1_spec', size=in_size, function=Linear) - T2_spec = TransferMechanism(name='T2_spec', size=out_size, function=Linear) - T1_net = TransferMechanism(name='T1_net', size=in_size, function=Linear) - T2_net = TransferMechanism(name='T2_net', size=out_size, function=Linear) + T1_spec = TransferMechanism(name='T1_spec', input_shapes=in_size, function=Linear) + T2_spec = TransferMechanism(name='T2_spec', input_shapes=out_size, function=Linear) + T1_net = TransferMechanism(name='T1_net', input_shapes=in_size, function=Linear) + T2_net = TransferMechanism(name='T2_net', input_shapes=out_size, function=Linear) proj_spec = MappingProjection(sender=T2_spec, receiver=L_spec.input_ports[1]) c_spec = Composition(pathways=[[T1_spec, L_spec],[T2_spec, proj_spec, L_spec]]) @@ -205,10 +205,10 @@ def test_leabra_prec_half_train(self): L_net = LeabraMechanism(leabra_net2) # leabra_net should be identical to the network inside L_net - T1_spec = TransferMechanism(name='T1', size=in_size, function=Linear) - T2_spec = TransferMechanism(name='T2', size=out_size, function=Linear) - T1_net = TransferMechanism(name='T1', size=in_size, function=Linear) - T2_net = TransferMechanism(name='T2', size=out_size, function=Linear) + T1_spec = TransferMechanism(name='T1', input_shapes=in_size, function=Linear) + T2_spec = TransferMechanism(name='T2', input_shapes=out_size, function=Linear) + T1_net = TransferMechanism(name='T1', input_shapes=in_size, function=Linear) + T2_net = TransferMechanism(name='T2', input_shapes=out_size, function=Linear) proj_spec = MappingProjection(sender=T2_spec, receiver=L_spec.input_ports[1]) c_spec = Composition(pathways=[[T1_spec, L_spec], [T2_spec, proj_spec, L_spec]]) @@ -249,11 +249,11 @@ def test_leabra_prec_half_train(self): # class TestLeabraMechInSystem: # # def test_leabra_mech_learning(self): -# T1 = TransferMechanism(size=5, function=Linear) -# T2 = TransferMechanism(size=3, function=Linear) +# T1 = TransferMechanism(input_shapes=5, function=Linear) +# T2 = TransferMechanism(input_shapes=3, function=Linear) # L = LeabraMechanism(input_size=5, output_size=3, hidden_layers=2, hidden_sizes=[4, 4]) # train_data_proj = MappingProjection(sender=T2, receiver=L.input_ports[1]) -# out = TransferMechanism(size=3, function=Logistic(bias=2)) +# out = TransferMechanism(input_shapes=3, function=Logistic(bias=2)) # p1 = Process(pathway=[T1, L, out], learning=LEARNING, learning_rate=1.0, target=[0, .1, .8]) # p2 = Process(pathway=[T2, train_data_proj, L, out]) # s = System(processes=[p1, p2]) diff --git a/tests/mechanisms/test_mechanisms.py b/tests/mechanisms/test_mechanisms.py index 2835140936f..f07b2810aa2 100644 --- a/tests/mechanisms/test_mechanisms.py +++ b/tests/mechanisms/test_mechanisms.py @@ -46,8 +46,8 @@ def test_value_shapes(self, mechanism_type, default_variable, mechanism_value, f [pnl.GaussianDistort, pnl.NormalDist] ) def test_noise_assignment_equivalence(self, noise): - t1 = pnl.TransferMechanism(name='t1', size=2, noise=noise()) - t2 = pnl.TransferMechanism(name='t2', size=2) + t1 = pnl.TransferMechanism(name='t1', input_shapes=2, noise=noise()) + t2 = pnl.TransferMechanism(name='t2', input_shapes=2) t2.integrator_function.parameters.noise.set(noise()) t1.integrator_function.noise.seed.base = 0 @@ -79,7 +79,7 @@ def test_numeric_noise_specifications( except TypeError: size = 1 - t = pnl.TransferMechanism(size=size, noise=noise) + t = pnl.TransferMechanism(input_shapes=size, noise=noise) assert all(p in t.parameter_ports for p in included_parameter_ports) assert all(p not in t.parameter_ports for p in excluded_parameter_ports) @@ -100,7 +100,7 @@ def test_noise_change_warning_to_numeric(self, noise): except TypeError: size = 1 - t = pnl.TransferMechanism(size=size, noise=noise) + t = pnl.TransferMechanism(input_shapes=size, noise=noise) with pytest.warns( UserWarning, @@ -122,7 +122,7 @@ def test_noise_change_warning_to_function(self, noise): except TypeError: size = 1 - t = pnl.TransferMechanism(size=size, noise=noise) + t = pnl.TransferMechanism(input_shapes=size, noise=noise) with pytest.warns( UserWarning, diff --git a/tests/mechanisms/test_processing_mechanism.py b/tests/mechanisms/test_processing_mechanism.py index 5c2c206eb1f..192c2e18eb5 100644 --- a/tests/mechanisms/test_processing_mechanism.py +++ b/tests/mechanisms/test_processing_mechanism.py @@ -133,8 +133,8 @@ def test_processing_mechanism_TDLearning_function(self): def test_processing_mechanism_multiple_input_ports(self): - PM1 = ProcessingMechanism(size=[4, 4], function=LinearCombination, input_ports=['input_1', 'input_2']) - PM2 = ProcessingMechanism(size=[2, 2, 2], function=LinearCombination, input_ports=['1', '2', '3']) + PM1 = ProcessingMechanism(input_shapes=[4, 4], function=LinearCombination, input_ports=['input_1', 'input_2']) + PM2 = ProcessingMechanism(input_shapes=[2, 2, 2], function=LinearCombination, input_ports=['1', '2', '3']) PM1.execute([[1, 2, 3, 4], [5, 4, 2, 2]]) PM2.execute([[2, 0], [1, 3], [1, 0]]) np.testing.assert_allclose(PM1.value, [[6, 6, 5, 6]]) diff --git a/tests/mechanisms/test_recurrent_transfer_mechanism.py b/tests/mechanisms/test_recurrent_transfer_mechanism.py index 5dc162a19cb..7b7ad71d6a3 100644 --- a/tests/mechanisms/test_recurrent_transfer_mechanism.py +++ b/tests/mechanisms/test_recurrent_transfer_mechanism.py @@ -77,7 +77,7 @@ def test_recurrent_mech_empty_spec(self): def test_recurrent_mech_check_attrs(self): R = RecurrentTransferMechanism( name='R', - size=3, + input_shapes=3, auto=1.0 ) print("matrix = ", R.matrix.base) @@ -90,7 +90,7 @@ def test_recurrent_mech_check_attrs(self): def test_recurrent_mech_check_proj_attrs(self): R = RecurrentTransferMechanism( name='R', - size=3 + input_shapes=3 ) np.testing.assert_allclose(R.recurrent_projection.matrix.base, R.matrix.base) assert R.recurrent_projection.sender is R.output_port @@ -101,8 +101,8 @@ def test_recurrent_mech_check_proj_attrs(self): @pytest.mark.benchmark(group="RecurrentTransferMechanism") @pytest.mark.parametrize("variable, params", [ - pytest.param(([10, 12, 0, -1], [1, 2, 3, 0]), {'size': 4}, id="list_of_ints"), - pytest.param(([1.0, 1.2, 0., -1.3], [1., 5., 3., 0.]), {'size': 4}, id="list_of_floats"), + pytest.param(([10, 12, 0, -1], [1, 2, 3, 0]), {'input_shapes': 4}, id="list_of_ints"), + pytest.param(([1.0, 1.2, 0., -1.3], [1., 5., 3., 0.]), {'input_shapes': 4}, id="list_of_floats"), pytest.param(([10], [10]), {}, id="no_init_params"), ]) def test_recurrent_mech_inputs(self, benchmark, params, variable, mech_mode): @@ -122,7 +122,8 @@ def test_recurrent_mech_inputs(self, benchmark, params, variable, mech_mode): @pytest.mark.recurrent_transfer_mechanism @pytest.mark.benchmark(group="RecurrentTransferMechanism") def test_recurrent_mech_integrator(self, benchmark, mech_mode): - R = RecurrentTransferMechanism(size=2, + R = RecurrentTransferMechanism( + input_shapes=2, function=Logistic(), hetero=-2.0, integrator_mode=True, @@ -148,7 +149,8 @@ def test_recurrent_mech_integrator(self, benchmark, mech_mode): @pytest.mark.benchmark(group="RecurrentTransferMechanism") def test_recurrent_mech_lci(self, benchmark, mech_mode): LCI = pnl.LeakyCompetingIntegrator(rate=0.4) - R = RecurrentTransferMechanism(size=2, + R = RecurrentTransferMechanism( + input_shapes=2, hetero=-2.0, integrator_mode=True, integrator_function=LCI, @@ -170,7 +172,7 @@ def test_recurrent_mech_lci(self, benchmark, mech_mode): # def test_recurrent_mech_inputs_list_of_fns(self): # R = RecurrentTransferMechanism( # name='R', - # size=4, + # input_shapes=4, # integrator_mode=True # ) # val = R.execute([Linear().execute(), NormalDist().execute(), Exponential().execute(), ExponentialDist().execute()]) @@ -204,7 +206,7 @@ def test_recurrent_mech_inputs_mismatched_with_default_longer(self): with pytest.raises(MechanismError) as error_text: R = RecurrentTransferMechanism( name='R', - size=4 + input_shapes=4 ) R.execute([1, 2, 3, 4, 5]) assert ("Shape ((5,)) of input ([1 2 3 4 5]) does not match required shape ((4,)) " @@ -214,7 +216,7 @@ def test_recurrent_mech_inputs_mismatched_with_default_shorter(self): with pytest.raises(MechanismError) as error_text: R = RecurrentTransferMechanism( name='R', - size=6 + input_shapes=6 ) R.execute([1, 2, 3, 4, 5]) assert ("Shape ((5,)) of input ([1 2 3 4 5]) does not match required shape ((6,)) " @@ -230,19 +232,19 @@ def test_recurrent_mech_matrix_keyword_spec(self, matrix): pytest.skip("Random test") R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, matrix=matrix ) val = R.execute([10, 10, 10, 10]) np.testing.assert_allclose(val, [[10., 10., 10., 10.]]) - np.testing.assert_allclose(R.recurrent_projection.matrix.base, get_matrix(matrix, R.size[0], R.size[0])) + np.testing.assert_allclose(R.recurrent_projection.matrix.base, get_matrix(matrix, R.input_shapes[0], R.input_shapes[0])) @pytest.mark.parametrize("matrix", [pnl.array_from_matrix_string('1 2; 3 4'), np.array([[1, 2], [3, 4]]), [[1, 2], [3, 4]], '1 2; 3 4']) def test_recurrent_mech_matrix_other_spec(self, matrix): R = RecurrentTransferMechanism( name='R', - size=2, + input_shapes=2, matrix=matrix ) val = R.execute([10, 10]) @@ -256,7 +258,7 @@ def test_recurrent_mech_matrix_other_spec(self, matrix): def test_recurrent_mech_matrix_auto_spec(self): R = RecurrentTransferMechanism( name='R', - size=3, + input_shapes=3, auto=2 ) assert isinstance(R.matrix.base, np.ndarray) @@ -266,7 +268,7 @@ def test_recurrent_mech_matrix_auto_spec(self): def test_recurrent_mech_matrix_hetero_spec(self): R = RecurrentTransferMechanism( name='R', - size=3, + input_shapes=3, hetero=-1 ) # (7/28/17 CW) these numbers assume that execute() leaves its value in the outputPort of the mechanism: if @@ -286,7 +288,7 @@ def test_recurrent_mech_matrix_hetero_spec(self): def test_recurrent_mech_matrix_auto_hetero_spec_size_1(self): R = RecurrentTransferMechanism( name='R', - size=1, + input_shapes=1, auto=-2, hetero=4.4 ) @@ -298,7 +300,7 @@ def test_recurrent_mech_matrix_auto_hetero_spec_size_1(self): def test_recurrent_mech_matrix_auto_hetero_spec_size_4(self): R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, auto=2.2, hetero=-3 ) @@ -311,7 +313,7 @@ def test_recurrent_mech_matrix_auto_hetero_matrix_spec(self): # when auto, hetero, and matrix are all specified, auto and hetero should take precedence R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, auto=2.2, hetero=-3, matrix=[[1, 2, 3, 4]] * 4 @@ -325,7 +327,7 @@ def test_recurrent_mech_auto_matrix_spec(self): # auto should override the diagonal only R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, auto=2.2, matrix=[[1, 2, 3, 4]] * 4 ) @@ -336,7 +338,7 @@ def test_recurrent_mech_auto_matrix_spec(self): def test_recurrent_mech_auto_array_matrix_spec(self): R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, auto=[1.1, 2.2, 3.3, 4.4], matrix=[[1, 2, 3, 4]] * 4 ) @@ -348,7 +350,7 @@ def test_recurrent_mech_hetero_float_matrix_spec(self): # hetero should override off-diagonal only R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, hetero=-2.2, matrix=[[1, 2, 3, 4]] * 4 ) @@ -362,7 +364,7 @@ def test_recurrent_mech_hetero_float_matrix_spec(self): def test_recurrent_mech_hetero_matrix_matrix_spec(self): R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, hetero=np.array([[-4, -3, -2, -1]] * 4), matrix=[[1, 2, 3, 4]] * 4 ) @@ -377,7 +379,7 @@ def test_recurrent_mech_auto_hetero_matrix_spec_v1(self): # auto and hetero should override matrix R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, auto=[1, 3, 5, 7], hetero=np.array([[-4, -3, -2, -1]] * 4), matrix=[[1, 2, 3, 4]] * 4 @@ -392,7 +394,7 @@ def test_recurrent_mech_auto_hetero_matrix_spec_v1(self): def test_recurrent_mech_auto_hetero_matrix_spec_v2(self): R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, auto=[3], hetero=np.array([[-4, -3, -2, -1]] * 4), matrix=[[1, 2, 3, 4]] * 4 @@ -407,7 +409,7 @@ def test_recurrent_mech_auto_hetero_matrix_spec_v2(self): def test_recurrent_mech_auto_hetero_matrix_spec_v3(self): R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, auto=[3], hetero=2, matrix=[[1, 2, 3, 4]] * 4 @@ -423,7 +425,7 @@ def test_recurrent_mech_matrix_too_large(self): with pytest.raises(RecurrentTransferError) as error_text: R = RecurrentTransferMechanism( name='R', - size=3, + input_shapes=3, matrix=[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]] ) @@ -433,7 +435,7 @@ def test_recurrent_mech_matrix_too_small(self): with pytest.raises(RecurrentTransferError) as error_text: R = RecurrentTransferMechanism( name='R', - size=5, + input_shapes=5, matrix=[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]] ) assert "must be the same as its variable" in str(error_text.value) @@ -442,7 +444,7 @@ def test_recurrent_mech_matrix_strings(self): with pytest.raises(RecurrentTransferError) as error_text: R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, matrix=[['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd']] ) assert "has non-numeric entries" in str(error_text.value) @@ -451,7 +453,7 @@ def test_recurrent_mech_matrix_nonsquare(self): with pytest.raises(RecurrentTransferError) as error_text: R = RecurrentTransferMechanism( name='R', - size=4, + input_shapes=4, matrix=[[1, 3]] ) assert "must be square" in str(error_text.value) @@ -460,7 +462,7 @@ def test_recurrent_mech_matrix_3d(self): with pytest.raises(FunctionError) as error_text: R = RecurrentTransferMechanism( name='R', - size=2, + input_shapes=2, matrix=[[[1, 3], [2, 4]], [[5, 7], [6, 8]]] ) assert "more than 2d" in str(error_text.value) @@ -472,7 +474,7 @@ def test_recurrent_mech_function_logistic(self): R = RecurrentTransferMechanism( name='R', - size=10, + input_shapes=10, function=Logistic(gain=2, offset=1) ) val = R.execute(np.ones(10)) @@ -484,7 +486,7 @@ def test_recurrent_mech_function_psyneulink(self): R = RecurrentTransferMechanism( name='R', - size=7, + input_shapes=7, function=a ) val = R.execute(np.zeros(7)) @@ -622,12 +624,12 @@ def test_recurrent_mech_transfer_mech_process_three_runs(self): # this test ASSUMES that the ParameterPort for auto and hetero is updated one run-cycle AFTER they are set by # lines by `R.auto = 0`. If this (potentially buggy) behavior is changed, then change these values R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=0, hetero=-1 ) T = TransferMechanism( - size=3, + input_shapes=3, function=Linear ) c = Composition(pathways=[R, T], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs) @@ -644,11 +646,11 @@ def test_recurrent_mech_transfer_mech_process_three_runs(self): def test_transfer_mech_process_matrix_change(self): from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection T1 = TransferMechanism( - size=4, + input_shapes=4, function=Linear) proj = MappingProjection(matrix=[[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]) T2 = TransferMechanism( - size=4, + input_shapes=4, function=Linear) c = Composition(pathways=[[T1, proj, T2]]) c.run(inputs={T1: [[1, 2, 3, 4]]}) @@ -663,11 +665,11 @@ def test_transfer_mech_process_matrix_change(self): def test_recurrent_mech_process_matrix_change(self): R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=1, hetero=-1) T = TransferMechanism( - size=4, + input_shapes=4, function=Linear) c = Composition(pathways=[T, R], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs) R.matrix = [[2, 0, 1, 3]] * 4 @@ -683,11 +685,11 @@ def test_recurrent_mech_process_matrix_change(self): # this test must wait until we create a property such that R.recurrent_projection.matrix sets R.auto and R.hetero def test_recurrent_mech_process_proj_matrix_change(self): R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=1, hetero=-1) T = TransferMechanism( - size=4, + input_shapes=4, function=Linear) c = Composition(pathways=[T, R], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs) R.recurrent_projection.matrix = [[2, 0, 1, 3]] * 4 @@ -708,11 +710,11 @@ def test_recurrent_mech_transfer_mech_composition_three_runs(self): # this test ASSUMES that the ParameterPort for auto and hetero is updated one run-cycle AFTER they are set by # lines by `R.auto = 0`. If this (potentially buggy) behavior is changed, then change these values R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=0, hetero=-1) T = TransferMechanism( - size=3, + input_shapes=3, function=Linear) c = Composition(pathways=[R,T]) @@ -729,11 +731,11 @@ def test_recurrent_mech_transfer_mech_composition_three_runs(self): @pytest.mark.xfail(reason='Unsure if this is correct behavior - see note for _recurrent_transfer_mechanism_matrix_setter') def test_recurrent_mech_composition_auto_change(self): R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=[1, 2, 3, 4], hetero=-1) T = TransferMechanism( - size=3, + input_shapes=3, function=Linear) c = Composition(pathways=[R, T], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs) c.run(inputs={R: [[1, 2, 3, 4]]}) @@ -751,11 +753,11 @@ def test_recurrent_mech_composition_auto_change(self): @pytest.mark.xfail(reason='Unsure if this is correct behavior - see note for _recurrent_transfer_mechanism_matrix_setter') def test_recurrent_mech_composition_hetero_change(self): R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=[1, 2, 3, 4], hetero=[[-1, -2, -3, -4]] * 4) T = TransferMechanism( - size=5, + input_shapes=5, function=Linear) c = Composition(pathways=[R, T], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs) c.run(inputs={R: [[1, 2, 3, -0.5]]}) @@ -773,11 +775,11 @@ def test_recurrent_mech_composition_hetero_change(self): @pytest.mark.xfail(reason='Unsure if this is correct behavior - see note for _recurrent_transfer_mechanism_matrix_setter') def test_recurrent_mech_composition_auto_and_hetero_change(self): R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=[1, 2, 3, 4], hetero=[[-1, -2, -3, -4]] * 4) T = TransferMechanism( - size=5, + input_shapes=5, function=Linear) c = Composition(pathways=[R,T], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs) c.run(inputs={R: [[1, 2, 3, -0.5]]}) @@ -795,11 +797,11 @@ def test_recurrent_mech_composition_auto_and_hetero_change(self): @pytest.mark.xfail(reason='Unsure if this is correct behavior - see note for _recurrent_transfer_mechanism_matrix_setter') def test_recurrent_mech_composition_matrix_change(self): R = RecurrentTransferMechanism( - size=4, + input_shapes=4, auto=1, hetero=-1) T = TransferMechanism( - size=4, + input_shapes=4, function=Linear) c = Composition(pathways=[T, R], prefs=TestRecurrentTransferMechanismInComposition.simple_prefs) R.parameters.matrix.set([[2, 0, 1, 3]] * 4, c) @@ -812,7 +814,8 @@ def test_recurrent_mech_composition_matrix_change(self): np.testing.assert_allclose(R.parameters.value.get(c), [[21, 3, 12, 35]]) def test_recurrent_mech_with_learning(self): - R = RecurrentTransferMechanism(size=4, + R = RecurrentTransferMechanism( + input_shapes=4, function=Linear, matrix=np.full((4, 4), 0.1), enable_learning=True @@ -864,7 +867,8 @@ def test_recurrent_mech_with_learning(self): ) def test_recurrent_mech_change_learning_rate(self): - R = RecurrentTransferMechanism(size=4, + R = RecurrentTransferMechanism( + input_shapes=4, function=Linear, enable_learning=True, learning_rate=0.1 @@ -897,7 +901,7 @@ def test_recurrent_mech_change_learning_rate(self): def test_learning_of_orthognal_inputs(self): size=4 R = RecurrentTransferMechanism( - size=size, + input_shapes=size, function=Linear, enable_learning=True, auto=0, @@ -1026,7 +1030,7 @@ class TestCustomCombinationFunction: def test_rt_without_custom_comb_fct(self): R1 = RecurrentTransferMechanism( has_recurrent_input_port=True, - size=2, + input_shapes=2, ) result = R1.execute([1,2]) np.testing.assert_allclose(result, [[1,2]]) @@ -1036,7 +1040,7 @@ def my_fct(x): return x[0] * x[1] if len(x) == 2 else x[0] R2 = RecurrentTransferMechanism( has_recurrent_input_port=True, - size=2, + input_shapes=2, combination_function=my_fct ) result = R2.execute([1,2]) @@ -1170,7 +1174,7 @@ class TestDebugProperties: def test_defaults(self): R = RecurrentTransferMechanism(name='R', - size=3) + input_shapes=3) print("\n\nTEST DEFAULTS") print("\n\nAuto Values -----------------------------------") print("R.auto = ", R.auto) @@ -1207,7 +1211,7 @@ def test_defaults(self): def test_auto(self): auto_val = 10.0 R = RecurrentTransferMechanism(name='R', - size=3, + input_shapes=3, auto=auto_val) print("\n\nTEST AUTO [auto = ", auto_val, "]") @@ -1246,7 +1250,7 @@ def test_auto(self): def test_hetero(self): hetero_val = 10.0 R = RecurrentTransferMechanism(name='R', - size=3, + input_shapes=3, hetero=hetero_val) print("\n\nTEST HETERO [hetero = ", hetero_val, "]") print("\n\nAuto Values -----------------------------------") @@ -1289,7 +1293,7 @@ def test_auto_and_hetero(self): hetero_val = 5.0 R = RecurrentTransferMechanism(name='R', - size=3, + input_shapes=3, auto=auto_val, hetero=hetero_val) print("\n\nTEST AUTO AND HETERO\n [auto = ", auto_val, " | hetero = ", hetero_val, "] ") @@ -1331,7 +1335,7 @@ def test_matrix(self): [10.0, 10.0, 5.0]] R = RecurrentTransferMechanism(name='R', - size=3, + input_shapes=3, matrix=matrix_val) print("\n\nTEST MATRIX\n", matrix_val) print("\n\nAuto Values -----------------------------------") diff --git a/tests/mechanisms/test_transfer_mechanism.py b/tests/mechanisms/test_transfer_mechanism.py index 7f20beed9ee..49482129b28 100644 --- a/tests/mechanisms/test_transfer_mechanism.py +++ b/tests/mechanisms/test_transfer_mechanism.py @@ -43,7 +43,7 @@ def test_transfer_mech_inputs_list_of_ints(self, benchmark): T.reset_stateful_function_when = Never() val = benchmark(T.execute, [10 for i in range(VECTOR_SIZE)]) np.testing.assert_allclose(val, [[10.0 for i in range(VECTOR_SIZE)]]) - assert len(T.size) == 1 and T.size[0] == VECTOR_SIZE and isinstance(T.size[0], np.integer) + assert len(T.input_shapes) == 1 and T.input_shapes[0] == VECTOR_SIZE and isinstance(T.input_shapes[0], np.integer) # this test assumes size is returned as a 1D array: if it's not, then several tests in this file must be changed @pytest.mark.mechanism @@ -953,10 +953,10 @@ class TestTransferMechanismSize: def test_transfer_mech_size_int_check_var(self): T = TransferMechanism( name='T', - size=4 + input_shapes=4 ) np.testing.assert_array_equal(T.defaults.variable, [[0, 0, 0, 0]]) - assert len(T.size) == 1 and T.size[0] == 4 and isinstance(T.size[0], np.integer) + assert len(T.input_shapes) == 1 and T.input_shapes[0] == 4 and isinstance(T.input_shapes[0], np.integer) @pytest.mark.mechanism @@ -964,35 +964,35 @@ def test_transfer_mech_size_int_check_var(self): def test_transfer_mech_size_int_inputs_ints(self): T = TransferMechanism( name='T', - size=4 + input_shapes=4 ) val = T.execute([10, 10, 10, 10]) np.testing.assert_array_equal(val, [[10.0, 10.0, 10.0, 10.0]]) # ------------------------------------------------------------------------------------------------ # TEST 3 - # size = int, variable = list of floats + # input_shapes = int, variable = list of floats @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_size_int_inputs_floats(self): T = TransferMechanism( name='T', - size=VECTOR_SIZE + input_shapes=VECTOR_SIZE ) val = T.execute([10.0 for i in range(VECTOR_SIZE)]) np.testing.assert_array_equal(val, [[10.0 for i in range(VECTOR_SIZE)]]) # ------------------------------------------------------------------------------------------------ # TEST 4 - # size = int, variable = list of functions + # input_shapes = int, variable = list of functions #@pytest.mark.mechanism #@pytest.mark.transfer_mechanism # def test_transfer_mech_size_int_inputs_fns(self): # T = TransferMechanism( # name='T', - # size=4, + # input_shapes=4, # integrator_mode=True # ) # val = T.execute([Linear().execute(), NormalDist().execute(), Exponential().execute(), ExponentialDist().execute()]) @@ -1000,14 +1000,14 @@ def test_transfer_mech_size_int_inputs_floats(self): # ------------------------------------------------------------------------------------------------ # TEST 8 - # size = float, variable = list of functions + # input_shapes = float, variable = list of functions #@pytest.mark.mechanism #@pytest.mark.transfer_mechanism # def test_transfer_mech_size_float_inputs_fns(self): # T = TransferMechanism( # name='T', - # size=4.0, + # input_shapes=4.0, # integrator_mode=True # ) # val = T.execute([Linear().execute(), NormalDist().execute(), Exponential().execute(), ExponentialDist().execute()]) @@ -1015,14 +1015,14 @@ def test_transfer_mech_size_int_inputs_floats(self): # ------------------------------------------------------------------------------------------------ # TEST 9 - # size = list of ints, check that variable is correct + # input_shapes = list of ints, check that variable is correct @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_size_list_of_ints(self): T = TransferMechanism( name='T', - size=[2, 3, 4] + input_shapes=[2, 3, 4] ) assert len(T.defaults.variable) == 3 and len(T.defaults.variable[0]) == 2 and len(T.defaults.variable[1]) == 3 and len(T.defaults.variable[2]) == 4 @@ -1035,7 +1035,7 @@ def test_transfer_mech_size_list_of_ints(self): def test_transfer_mech_size_var_both_lists(self): T = TransferMechanism( name='T', - size=[2, 3], + input_shapes=[2, 3], default_variable=[[1, 2], [3, 4, 5]] ) assert len(T.defaults.variable) == 2 @@ -1044,17 +1044,17 @@ def test_transfer_mech_size_var_both_lists(self): # ------------------------------------------------------------------------------------------------ # TEST 12 - # size = int, variable = a compatible 2D array: check that variable is correct + # input_shapes = int, variable = a compatible 2D array: check that variable is correct @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_size_scalar_var_2d(self): with pytest.raises( - ComponentError, match=r'size and default_variable arguments.*conflict.*' + ComponentError, match=r'input_shapes and default_variable arguments.*conflict.*' ): TransferMechanism( name='T', - size=2, + input_shapes=2, default_variable=[[1, 2], [3, 4]] ) @@ -1073,65 +1073,65 @@ def test_transfer_mech_var_2d_array(self): # ------------------------------------------------------------------------------------------------ # TEST 14 - # variable = a 1D array, size does not match: check that variable and output are correct + # variable = a 1D array, input_shapes does not match: check that variable and output are correct @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_var_1D_size_wrong(self): with pytest.raises( - ComponentError, match=r'size and default_variable arguments.*conflict.*' + ComponentError, match=r'input_shapes and default_variable arguments.*conflict.*' ): TransferMechanism( name='T', default_variable=[1, 2, 3, 4], - size=2 + input_shapes=2 ) # ------------------------------------------------------------------------------------------------ # TEST 15 - # variable = a 1D array, size does not match again: check that variable and output are correct + # variable = a 1D array, input_shapes does not match again: check that variable and output are correct @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_var_1D_size_wrong_2(self): with pytest.raises( - ComponentError, match=r'size and default_variable arguments.*conflict.*' + ComponentError, match=r'input_shapes and default_variable arguments.*conflict.*' ): TransferMechanism( name='T', default_variable=[1, 2, 3, 4], - size=[2, 3, 4] + input_shapes=[2, 3, 4] ) # ------------------------------------------------------------------------------------------------ # TEST 16 - # size = int, variable = incompatible array, check variable + # input_shapes = int, variable = incompatible array, check variable @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_size_var_incompatible1(self): with pytest.raises( - ComponentError, match=r'size and default_variable arguments.*conflict.*' + ComponentError, match=r'input_shapes and default_variable arguments.*conflict.*' ): TransferMechanism( name='T', - size=2, + input_shapes=2, default_variable=[[1, 2], [3, 4, 5]] ) # ------------------------------------------------------------------------------------------------ # TEST 17 - # size = array, variable = incompatible array, check variable + # input_shapes = array, variable = incompatible array, check variable @pytest.mark.mechanism @pytest.mark.transfer_mechanism def test_transfer_mech_size_var_incompatible2(self): with pytest.raises( - ComponentError, match=r'size and default_variable arguments.*conflict.*' + ComponentError, match=r'input_shapes and default_variable arguments.*conflict.*' ): TransferMechanism( name='T', - size=[2, 2], + input_shapes=[2, 2], default_variable=[[1, 2], [3, 4, 5]] ) @@ -1141,7 +1141,7 @@ def test_transfer_mech_size_var_incompatible2(self): # ------------------------------------------------------------------------------------------------ # TEST 2 - # size = -1.0, check less-than-one error + # input_shapes = -1.0, check less-than-one error @pytest.mark.mechanism @pytest.mark.transfer_mechanism @@ -1149,7 +1149,7 @@ def test_transfer_mech_size_negative_one(self): with pytest.raises(ComponentError) as error_text: T = TransferMechanism( name='T', - size=-1, + input_shapes=-1, ) assert "negative dimensions" in str(error_text.value) @@ -1163,25 +1163,25 @@ def test_transfer_mech_size_negative_one(self): # with pytest.raises(UserWarning) as error_text: # T = TransferMechanism( # name='T', - # size=3.5, + # input_shapes=3.5, # ) # assert "cast to integer, its value changed" in str(error_text.value) # ------------------------------------------------------------------------------------------------ # TEST 4 - # size = 2D array, check too-many-dimensions warning + # input_shapes = 2D array, check too-many-dimensions warning # def test_transfer_mech_size_2d(self): # with pytest.raises(UserWarning) as error_text: # T = TransferMechanism( # name='T', - # size=[[2]], + # input_shapes=[[2]], # ) # assert "had more than one dimension" in str(error_text.value) # ------------------------------------------------------------------------------------------------ # TEST 5 - # size = 2D array, check variable is correctly instantiated + # input_shapes = 2D array, check variable is correctly instantiated # for now, since the test above doesn't work, we use this tesT.6/30/17 (CW) @pytest.mark.mechanism @@ -1189,10 +1189,10 @@ def test_transfer_mech_size_negative_one(self): def test_transfer_mech_size_2d(self): T = TransferMechanism( name='T', - size=[[2]], + input_shapes=[[2]], ) assert len(T.defaults.variable) == 1 and len(T.defaults.variable[0]) == 2 - assert len(T.size) == 1 and T.size[0] == 2 + assert len(T.input_shapes) == 1 and T.input_shapes[0] == 2 class TestTransferMechanismMultipleInputPorts: @@ -1267,7 +1267,7 @@ def test_multiple_output_ports_for_multiple_input_ports(self, benchmark, mech_mo class TestIntegratorMode: def test_integrator_mode_simple_on_and_off(self): - T = TransferMechanism(size=2) + T = TransferMechanism(input_shapes=2) np.testing.assert_allclose(T.execute([0.5, 1]), [[0.5, 1]]) T.integrator_mode=True np.testing.assert_allclose(T.execute([0.5, 1]), [[0.25, 0.5 ]]) @@ -1654,21 +1654,21 @@ def test_reset_spec(self): # python values during execution is not implemented. @pytest.mark.usefixtures("comp_mode_no_llvm") def test_termination_measures(self, comp_mode): - stim_input = ProcessingMechanism(size=2, name='Stim Input') - stim_percept = TransferMechanism(name='Stimulus', size=2, function=Logistic) - instruction_input = ProcessingMechanism(size=2, function=Linear(slope=10)) - attention = LCAMechanism(name='Attention', size=2, function=Logistic, + stim_input = ProcessingMechanism(input_shapes=2, name='Stim Input') + stim_percept = TransferMechanism(name='Stimulus', input_shapes=2, function=Logistic) + instruction_input = ProcessingMechanism(input_shapes=2, function=Linear(slope=10)) + attention = LCAMechanism(name='Attention', input_shapes=2, function=Logistic, leak=8, competition=8, self_excitation=0, noise=0, time_step_size=.1, termination_threshold=3, termination_measure=TimeScale.TRIAL) - decision = TransferMechanism(name='Decision', size=2, + decision = TransferMechanism(name='Decision', input_shapes=2, integrator_mode=True, execute_until_finished=False, termination_threshold=0.65, termination_measure=max, termination_comparison_op=GREATER_THAN) - response = ProcessingMechanism(size=2, name='Response') + response = ProcessingMechanism(input_shapes=2, name='Response') comp = Composition() comp.add_linear_processing_pathway([stim_input, [[1,-1],[-1,1]], stim_percept, decision, response]) diff --git a/tests/models/test_botvinick.py b/tests/models/test_botvinick.py index e75704529bf..daa0fb4eca6 100644 --- a/tests/models/test_botvinick.py +++ b/tests/models/test_botvinick.py @@ -27,20 +27,24 @@ def test_botvinick_model(benchmark, comp_mode, reps): # SET UP MECHANISMS ---------------------------------------------------------------------------------------------------- # Linear input layer # colors: ('red', 'green'), words: ('RED','GREEN') - colors_input_layer = pnl.TransferMechanism(size=3, + colors_input_layer = pnl.TransferMechanism( + input_shapes=3, function=pnl.Linear, name='COLORS_INPUT') - words_input_layer = pnl.TransferMechanism(size=3, + words_input_layer = pnl.TransferMechanism( + input_shapes=3, function=pnl.Linear, name='WORDS_INPUT') - task_input_layer = pnl.TransferMechanism(size=2, + task_input_layer = pnl.TransferMechanism( + input_shapes=2, function=pnl.Linear, name='TASK_INPUT') # Task layer, tasks: ('name the color', 'read the word') - task_layer = pnl.RecurrentTransferMechanism(size=2, + task_layer = pnl.RecurrentTransferMechanism( + input_shapes=2, function=pnl.Logistic, hetero=-2, integrator_mode=True, @@ -49,14 +53,16 @@ def test_botvinick_model(benchmark, comp_mode, reps): # Hidden layer # colors: ('red','green', 'neutral') words: ('RED','GREEN', 'NEUTRAL') - colors_hidden_layer = pnl.RecurrentTransferMechanism(size=3, + colors_hidden_layer = pnl.RecurrentTransferMechanism( + input_shapes=3, function=pnl.Logistic(x_0=4.0), # bias 4.0 is -4.0 in the paper see Docs for description integrator_mode=True, hetero=-2, integration_rate=0.01, # cohen-huston text says 0.01 name='COLORS_HIDDEN') - words_hidden_layer = pnl.RecurrentTransferMechanism(size=3, + words_hidden_layer = pnl.RecurrentTransferMechanism( + input_shapes=3, function=pnl.Logistic(x_0=4.0), integrator_mode=True, hetero=-2, @@ -64,7 +70,8 @@ def test_botvinick_model(benchmark, comp_mode, reps): name='WORDS_HIDDEN') # Response layer, responses: ('red', 'green') - response_layer = pnl.RecurrentTransferMechanism(size=2, + response_layer = pnl.RecurrentTransferMechanism( + input_shapes=2, function=pnl.Logistic, hetero=-2.0, integrator_mode=True, diff --git a/tests/models/test_greedy_agent.py b/tests/models/test_greedy_agent.py index 45049892aa3..b7536456ad6 100644 --- a/tests/models/test_greedy_agent.py +++ b/tests/models/test_greedy_agent.py @@ -33,10 +33,10 @@ def test_simplified_greedy_agent(benchmark, comp_mode): player_len = prey_len = predator_len = obs_len # The original needs GaussianDistort -# player = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") -# prey = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") - player = TransferMechanism(size=prey_len, name="PLAYER OBS") - prey = TransferMechanism(size=prey_len, name="PREY OBS") +# player = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") +# prey = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") + player = TransferMechanism(input_shapes=prey_len, name="PLAYER OBS") + prey = TransferMechanism(input_shapes=prey_len, name="PREY OBS") # Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey: # note: unitization is done in main loop, to allow compilation of LinearCombination function) (TBI) @@ -70,8 +70,8 @@ def test_simplified_greedy_agent_random(benchmark, comp_mode): player_len = prey_len = predator_len = obs_len - player = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") - prey = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") + player = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") + prey = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") # Use ComparatorMechanism to compute direction of action as difference of coordinates between player and prey: # note: unitization is done in main loop, to allow compilation of LinearCombination function) (TBI) @@ -117,14 +117,14 @@ def test_predator_prey(benchmark, mode, ocm_mode, prng, samples, fp_type): player_len = prey_len = predator_len = obs_coords # Input Mechanisms - player_pos = ProcessingMechanism(size=player_len, name="PLAYER POS") - prey_pos = ProcessingMechanism(size=prey_len, name="PREY POS") - predator_pos = ProcessingMechanism(size=predator_len, name="PREDATOR POS") + player_pos = ProcessingMechanism(input_shapes=player_len, name="PLAYER POS") + prey_pos = ProcessingMechanism(input_shapes=prey_len, name="PREY POS") + predator_pos = ProcessingMechanism(input_shapes=predator_len, name="PREDATOR POS") # Perceptual Mechanisms - player_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PLAYER OBS") - prey_obs = ProcessingMechanism(size=prey_len, function=GaussianDistort, name="PREY OBS") - predator_obs = TransferMechanism(size=predator_len, function=GaussianDistort, name="PREDATOR OBS") + player_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PLAYER OBS") + prey_obs = ProcessingMechanism(input_shapes=prey_len, function=GaussianDistort, name="PREY OBS") + predator_obs = TransferMechanism(input_shapes=predator_len, function=GaussianDistort, name="PREDATOR OBS") def action_fn(variable): diff --git a/tests/ports/test_input_ports.py b/tests/ports/test_input_ports.py index 897053c2f60..fdf5773b40e 100644 --- a/tests/ports/test_input_ports.py +++ b/tests/ports/test_input_ports.py @@ -9,10 +9,10 @@ class TestInputPorts: def test_combine_param_alone(self): - t1 = pnl.TransferMechanism(size=2) - t2 = pnl.TransferMechanism(size=2) + t1 = pnl.TransferMechanism(input_shapes=2) + t2 = pnl.TransferMechanism(input_shapes=2) t3 = pnl.TransferMechanism( - size=2, + input_shapes=2, input_ports=pnl.InputPort( combine=pnl.PRODUCT)) c = pnl.Composition(pathways=[[t1, t3], [t2, t3]]) @@ -21,10 +21,10 @@ def test_combine_param_alone(self): np.testing.assert_allclose(val, [[3, 8]]) def test_combine_param_redundant_fct_class_spec(self): - t1 = pnl.TransferMechanism(size=2) - t2 = pnl.TransferMechanism(size=2) + t1 = pnl.TransferMechanism(input_shapes=2) + t2 = pnl.TransferMechanism(input_shapes=2) t3 = pnl.TransferMechanism( - size=2, + input_shapes=2, input_ports=pnl.InputPort(function=psyneulink.core.components.functions.nonstateful.combinationfunctions .LinearCombination, combine=pnl.PRODUCT)) @@ -34,10 +34,10 @@ def test_combine_param_redundant_fct_class_spec(self): np.testing.assert_allclose(val, [[3, 8]]) def test_combine_param_redundant_fct_constructor_spec(self): - t1 = pnl.TransferMechanism(size=2) - t2 = pnl.TransferMechanism(size=2) + t1 = pnl.TransferMechanism(input_shapes=2) + t2 = pnl.TransferMechanism(input_shapes=2) t3 = pnl.TransferMechanism( - size=2, + input_shapes=2, input_ports=pnl.InputPort(function=psyneulink.core.components.functions.nonstateful.combinationfunctions.LinearCombination(operation=pnl.PRODUCT), combine=pnl.PRODUCT)) c = pnl.Composition(pathways=[[t1, t3],[t2, t3]]) diff --git a/tests/projections/test_projection_specifications.py b/tests/projections/test_projection_specifications.py index adee0838155..d850aff939a 100644 --- a/tests/projections/test_projection_specifications.py +++ b/tests/projections/test_projection_specifications.py @@ -18,10 +18,10 @@ def test_projection_specification_formats(self): (currently it should be ignored; in the future, if/when Projections between the same sender and receiver in different Compositions are allowed, then it should be used) """ - M1 = pnl.ProcessingMechanism(size=2) - M2 = pnl.ProcessingMechanism(size=5) - M3 = pnl.ProcessingMechanism(size=4) - M4 = pnl.ProcessingMechanism(size=3) + M1 = pnl.ProcessingMechanism(input_shapes=2) + M2 = pnl.ProcessingMechanism(input_shapes=5) + M3 = pnl.ProcessingMechanism(input_shapes=4) + M4 = pnl.ProcessingMechanism(input_shapes=3) M1_M2_matrix = (np.arange(2 * 5).reshape((2, 5)) + 1) / (2 * 5) M2_M3_matrix = (np.arange(5 * 4).reshape((5, 4)) + 1) / (5 * 4) @@ -483,8 +483,8 @@ def test_no_warning_when_matrix_specified(self): # KDM: this is a good candidate for pytest.parametrize def test_masked_mapping_projection(self): - t1 = pnl.TransferMechanism(size=2) - t2 = pnl.TransferMechanism(size=2) + t1 = pnl.TransferMechanism(input_shapes=2) + t2 = pnl.TransferMechanism(input_shapes=2) proj = pnl.MaskedMappingProjection(sender=t1, receiver=t2, matrix=[[1,2],[3,4]], @@ -495,8 +495,8 @@ def test_masked_mapping_projection(self): val = c.execute(inputs={t1:[1,2]}) np.testing.assert_allclose(val, [[8, 12]]) - t1 = pnl.TransferMechanism(size=2) - t2 = pnl.TransferMechanism(size=2) + t1 = pnl.TransferMechanism(input_shapes=2) + t2 = pnl.TransferMechanism(input_shapes=2) proj = pnl.MaskedMappingProjection(sender=t1, receiver=t2, matrix=[[1,2],[3,4]], @@ -507,8 +507,8 @@ def test_masked_mapping_projection(self): val = c.execute(inputs={t1:[1,2]}) np.testing.assert_allclose(val, [[1, 8]]) - t1 = pnl.TransferMechanism(size=2) - t2 = pnl.TransferMechanism(size=2) + t1 = pnl.TransferMechanism(input_shapes=2) + t2 = pnl.TransferMechanism(input_shapes=2) proj = pnl.MaskedMappingProjection(sender=t1, receiver=t2, mask=[[1,2],[3,4]], @@ -522,8 +522,8 @@ def test_masked_mapping_projection_mask_conficts_with_matrix(self): with pytest.raises(pnl.MaskedMappingProjectionError) as error_text: - t1 = pnl.TransferMechanism(size=2) - t2 = pnl.TransferMechanism(size=2) + t1 = pnl.TransferMechanism(input_shapes=2) + t2 = pnl.TransferMechanism(input_shapes=2) pnl.MaskedMappingProjection(sender=t1, receiver=t2, mask=[[1,2,3],[4,5,6]], diff --git a/tests/scheduling/test_scheduler.py b/tests/scheduling/test_scheduler.py index 7b768c7849d..3a108a729f5 100644 --- a/tests/scheduling/test_scheduler.py +++ b/tests/scheduling/test_scheduler.py @@ -1493,21 +1493,21 @@ def test_objective_and_control(self): def test_inline_control_mechanism_example(self): cueInterval = pnl.TransferMechanism( default_variable=[[0.0]], - size=1, + input_shapes=1, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Cue-Stimulus Interval' ) taskLayer = pnl.TransferMechanism( default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Linear(slope=1, intercept=0), output_ports=[pnl.RESULT], name='Task Input [I1, I2]' ) activation = pnl.LCAMechanism( default_variable=[[0.0, 0.0]], - size=2, + input_shapes=2, function=pnl.Logistic(gain=1), leak=.5, competition=2, @@ -1606,7 +1606,7 @@ def test_scheduler_conditions(self, comp_mode, condition, condition_params, expe output_ports=[pnl.DECISION_VARIABLE], name='DDM') - response = pnl.ProcessingMechanism(size=2, name="GATE") + response = pnl.ProcessingMechanism(input_shapes=2, name="GATE") comp = pnl.Composition() comp.add_linear_processing_pathway([decisionMaker, response]) From 314f4fa05c6b3869be5fec1d0068db07ceb41250 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Sat, 2 Nov 2024 01:39:05 +0000 Subject: [PATCH 395/410] showgraph: make default subdirectory name consistent both "show_graph OUTPUT" and "show_graph output" were used. use lowercase version. --- psyneulink/core/compositions/showgraph.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/psyneulink/core/compositions/showgraph.py b/psyneulink/core/compositions/showgraph.py index 2bb18641d9e..e42e84ca601 100644 --- a/psyneulink/core/compositions/showgraph.py +++ b/psyneulink/core/compositions/showgraph.py @@ -288,6 +288,10 @@ NUM_NESTING_LEVELS = 'num_nesting_levels' COMP_HIERARCHY = 'comp_hierarchy' # dict specifying the enclosing composition at each level of nesting + +default_showgraph_subdir = 'show_graph output' + + class ShowGraphError(Exception): def __init__(self, error_value): @@ -2655,7 +2659,7 @@ def get_index_of_node_in_G_body(node, node_type: Literal['MECHANISM', 'Projectio try: if output_fmt == 'pdf': # G.format = 'svg' - G.view(composition.name.replace(" ", "-"), cleanup=True, directory='show_graph OUTPUT/PDFS') + G.view(composition.name.replace(" ", "-"), cleanup=True, directory=f'{default_showgraph_subdir}/PDFS') # Generate images for animation elif output_fmt == 'gif': @@ -2814,7 +2818,7 @@ def _set_up_animation(self, context): if isinstance(composition._animate, dict): # Assign directory for animation files from psyneulink._version import root_dir - default_dir = root_dir + '/../show_graph output/GIFs/' + composition.name # + " gifs" + default_dir = root_dir + f'/../{default_showgraph_subdir}/GIFs/' + composition.name # + " gifs" # try: # rmtree(composition._animate_directory) # except: From 6ea23610353e33feb59cb2993538727cf796bac7 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Sat, 2 Nov 2024 03:42:31 +0000 Subject: [PATCH 396/410] showgraph: rename default subdirectory from "show_graph output" to "pnl-show_graph-output" (no space, with pnl label) --- psyneulink/core/compositions/showgraph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/psyneulink/core/compositions/showgraph.py b/psyneulink/core/compositions/showgraph.py index e42e84ca601..b2c7759d7e8 100644 --- a/psyneulink/core/compositions/showgraph.py +++ b/psyneulink/core/compositions/showgraph.py @@ -289,7 +289,7 @@ COMP_HIERARCHY = 'comp_hierarchy' # dict specifying the enclosing composition at each level of nesting -default_showgraph_subdir = 'show_graph output' +default_showgraph_subdir = 'pnl-show_graph-output' class ShowGraphError(Exception): From a36900bba65b00ad98913d32257f2ef1a6651e14 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Sat, 2 Nov 2024 01:42:41 +0000 Subject: [PATCH 397/410] showgraph: use pathlib to create output dirs instead of str append creating paths split by '/' may break on windows. pathlib handles OS path separators --- psyneulink/core/compositions/composition.py | 5 +++-- psyneulink/core/compositions/showgraph.py | 18 ++++++++++++------ 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 3ea74f1719a..7643eff21f9 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -2882,6 +2882,7 @@ def input_function(env, result): import numbers import itertools import logging +import pathlib import sys import typing import warnings @@ -11056,7 +11057,7 @@ def run( are animated; if it is greater than the number of trials being run, only the number being run are animated. - * *MOVIE_DIR*: str (default=project root dir) -- specifies the directdory to be used for the movie file; + * *MOVIE_DIR*: str or os.PathLike (default=project root dir) -- specifies the directdory to be used for the movie file; by default a subdirectory of /show_graph_OUTPUT/GIFS is created using the `name ` of the `Composition`, and the gif files are stored there. @@ -11490,7 +11491,7 @@ def run( if self._animate is not False: # Save list of gifs in self._animation as movie file - movie_path = self._animation_directory + '/' + self._movie_filename + movie_path = pathlib.Path(self._animation_directory, self._movie_filename) self._animation[0].save(fp=movie_path, format='GIF', save_all=True, diff --git a/psyneulink/core/compositions/showgraph.py b/psyneulink/core/compositions/showgraph.py index b2c7759d7e8..c30f7b36c2d 100644 --- a/psyneulink/core/compositions/showgraph.py +++ b/psyneulink/core/compositions/showgraph.py @@ -216,6 +216,7 @@ """ import inspect +import pathlib import warnings from psyneulink._typing import Union @@ -2659,7 +2660,7 @@ def get_index_of_node_in_G_body(node, node_type: Literal['MECHANISM', 'Projectio try: if output_fmt == 'pdf': # G.format = 'svg' - G.view(composition.name.replace(" ", "-"), cleanup=True, directory=f'{default_showgraph_subdir}/PDFS') + G.view(composition.name.replace(" ", "-"), cleanup=True, directory=pathlib.Path(default_showgraph_subdir, 'PDFS')) # Generate images for animation elif output_fmt == 'gif': @@ -2818,7 +2819,7 @@ def _set_up_animation(self, context): if isinstance(composition._animate, dict): # Assign directory for animation files from psyneulink._version import root_dir - default_dir = root_dir + f'/../{default_showgraph_subdir}/GIFs/' + composition.name # + " gifs" + default_dir = pathlib.Path(root_dir, '..', default_showgraph_subdir, 'GIFs', composition.name) # try: # rmtree(composition._animate_directory) # except: @@ -2850,9 +2851,14 @@ def _set_up_animation(self, context): raise ShowGraphError(f"{repr(SIMULATIONS)} entry of {repr('animate')} argument for " f"{repr('show_graph')} method of {composition.name} ({composition._animate_num_trials}) " f"must a boolean.") - if not isinstance(composition._animation_directory, str): - raise ShowGraphError(f"{repr(MOVIE_DIR)} entry of {repr('animate')} argument for {repr('run')} " - f"method of {composition.name} ({composition._animation_directory}) must be a string.") + try: + composition._animation_directory = pathlib.Path(composition._animation_directory) + except TypeError: + raise ShowGraphError( + f"{repr(MOVIE_DIR)} entry of 'animate' argument for 'run'" + f" method of {composition.name} ({composition._animation_directory})" + " must be a string or os.PathLike." + ) if not isinstance(composition._movie_filename, str): raise ShowGraphError(f"{repr(MOVIE_NAME)} entry of {repr('animate')} argument for {repr('run')} " f"method of {composition.name} ({composition._movie_filename}) must be a string.") @@ -2934,7 +2940,7 @@ def create_time_string(time, spec): G.attr(fontsize='14') index = repr(composition._component_animation_execution_count) image_filename = '-'.join([repr(run_num), repr(trial_num), index]) - image_file = composition._animation_directory + '/' + image_filename + '.gif' + image_file = pathlib.Path(composition._animation_directory, image_filename + '.gif') G.render(filename=image_filename, directory=composition._animation_directory, cleanup=True, From d5a658fa22f83f953acf9f8d4a2dd52235da6e12 Mon Sep 17 00:00:00 2001 From: Katherine Mantel Date: Sat, 2 Nov 2024 02:13:41 +0000 Subject: [PATCH 398/410] showgraph: do not store output files in site-packages showgraph used the psyneulink module directory to determine where to store output by default. If psyneulink isn't installed as editable, this would be in python's site-packages directory. This commit instead uses the user's current directory in this case. --- psyneulink/_version.py | 1 - psyneulink/core/compositions/composition.py | 9 ++++--- psyneulink/core/compositions/showgraph.py | 29 ++++++++++++++++++--- 3 files changed, 32 insertions(+), 7 deletions(-) diff --git a/psyneulink/_version.py b/psyneulink/_version.py index f79bbcc8130..054815f08bb 100644 --- a/psyneulink/_version.py +++ b/psyneulink/_version.py @@ -19,7 +19,6 @@ from typing import Any, Callable, Dict, List, Optional, Tuple import functools -root_dir = os.path.abspath(os.path.dirname(__file__)) def get_keywords() -> Dict[str, str]: """Get the keywords needed to look up the version information.""" diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 7643eff21f9..2b41a117863 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -11057,9 +11057,12 @@ def run( are animated; if it is greater than the number of trials being run, only the number being run are animated. - * *MOVIE_DIR*: str or os.PathLike (default=project root dir) -- specifies the directdory to be used for the movie file; - by default a subdirectory of /show_graph_OUTPUT/GIFS is created using the `name - ` of the `Composition`, and the gif files are stored there. + * *MOVIE_DIR*: str or os.PathLike (default=PsyNeuLink root + dir or current dir) -- specifies the directory to be used + for the movie file; by default a subdirectory of + /pnl-show_graph-output/GIFs is created using + the `name ` of the `Composition`, and + the gif files are stored there. * *MOVIE_NAME*: str (default=\\ `name ` + 'movie') -- specifies the name to be used for the movie file; it is automatically appended with '.gif'. diff --git a/psyneulink/core/compositions/showgraph.py b/psyneulink/core/compositions/showgraph.py index c30f7b36c2d..1c766592f2d 100644 --- a/psyneulink/core/compositions/showgraph.py +++ b/psyneulink/core/compositions/showgraph.py @@ -217,12 +217,14 @@ import inspect import pathlib +import site import warnings from psyneulink._typing import Union import numpy as np from beartype import beartype +import psyneulink from psyneulink._typing import Optional, Union, Literal from PIL import Image @@ -293,6 +295,28 @@ default_showgraph_subdir = 'pnl-show_graph-output' +def get_default_showgraph_dir(): + pnl_module_dir = pathlib.Path(psyneulink.__file__).parent.absolute() + try: + site_packages_dirs = site.getsitepackages() + except AttributeError: + # virtualenv <20 overrides site and has no getsitepackages + site_packages_dirs = [] + + # if psyneulink is installed in site-packages (not local/editable), + # don't put show_graph files there + for d in site_packages_dirs: + if pathlib.Path(d) in pnl_module_dir.parents: + default_dir = pathlib.Path('.') + break + else: + default_dir = pnl_module_dir.parent + + default_dir = default_dir.joinpath(default_showgraph_subdir) + + return default_dir + + class ShowGraphError(Exception): def __init__(self, error_value): @@ -2660,7 +2684,7 @@ def get_index_of_node_in_G_body(node, node_type: Literal['MECHANISM', 'Projectio try: if output_fmt == 'pdf': # G.format = 'svg' - G.view(composition.name.replace(" ", "-"), cleanup=True, directory=pathlib.Path(default_showgraph_subdir, 'PDFS')) + G.view(composition.name.replace(" ", "-"), cleanup=True, directory=get_default_showgraph_dir().joinpath('PDFS')) # Generate images for animation elif output_fmt == 'gif': @@ -2818,8 +2842,7 @@ def _set_up_animation(self, context): if isinstance(composition._animate, dict): # Assign directory for animation files - from psyneulink._version import root_dir - default_dir = pathlib.Path(root_dir, '..', default_showgraph_subdir, 'GIFs', composition.name) + default_dir = get_default_showgraph_dir().joinpath('GIFs', composition.name) # try: # rmtree(composition._animate_directory) # except: From 23e0ee14296e5be8bcfb57eabef845d30d7f24c0 Mon Sep 17 00:00:00 2001 From: jdcpni Date: Wed, 6 Nov 2024 08:21:21 -0500 Subject: [PATCH 399/410] refactor/linearmatrix_matrixtransform (#3101) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • combinationfunctions(.py) -> transformfunctions(.py) • transferfunctions.py: - rename LinearCombination as MatrixTransform and move to transformfunctions.py • transformfunctions: - MatrixTransform: add operation parameter • recurrenttransfermechanism.py - correct names for standard_outputports ENERGY and ENTROPY • lccontrolmechanism.py: - add keywords for modulable fitzhugh-nagumo params • emcomposition.py: - use MatrixTransform(operation=L0) w/o normalization for retrieval if len(key)==1 • 'EGO Model - CSW with Simple Integrator.py': - Younes' script corrections, now functions propery! --- .../EGO/Using EMComposition/DeclanParams.py | 6 +- .../EGO Model - CSW with Simple Integrator.py | 20 +- .../EGO/Using EMComposition/ScriptControl.py | 2 +- .../NeuroML Example.py | 4 +- .../nback/nback_og_pytorch.py | 6 +- psyneulink/core/components/component.py | 6 +- .../core/components/functions/__init__.py | 6 +- .../nonstateful/transferfunctions.py | 636 +---------------- ...tionfunctions.py => transformfunctions.py} | 674 +++++++++++++++++- .../modulatory/control/controlmechanism.py | 4 +- .../processing/objectivemechanism.py | 6 +- .../processing/transfermechanism.py | 2 +- psyneulink/core/components/ports/inputport.py | 4 +- .../ports/modulatorysignals/controlsignal.py | 2 +- psyneulink/core/components/ports/port.py | 4 +- .../modulatory/learningprojection.py | 2 +- .../projections/pathway/mappingprojection.py | 12 +- .../core/components/projections/projection.py | 14 +- psyneulink/core/compositions/composition.py | 2 +- psyneulink/core/globals/keywords.py | 8 +- .../control/agt/lccontrolmechanism.py | 23 +- .../mechanisms/processing/integrator/ddm.py | 2 +- .../objective/comparatormechanism.py | 4 +- .../objective/predictionerrormechanism.py | 2 +- .../processing/transfer/lcamechanism.py | 2 +- .../transfer/recurrenttransfermechanism.py | 6 +- .../pathway/autoassociativeprojection.py | 12 +- .../pathway/maskedmappingprojection.py | 2 +- .../library/compositions/emcomposition.py | 35 +- .../library/compositions/pytorchwrappers.py | 4 +- tests/composition/test_composition.py | 2 +- tests/functions/test_combination.py | 8 +- tests/functions/test_transfer.py | 6 +- tests/log/test_log.py | 4 + tests/mechanisms/test_input_port_spec.py | 2 +- tests/mechanisms/test_processing_mechanism.py | 29 +- .../test_recurrent_transfer_mechanism.py | 2 +- tests/mechanisms/test_transfer_mechanism.py | 2 +- tests/ports/test_input_ports.py | 8 +- 39 files changed, 819 insertions(+), 756 deletions(-) rename psyneulink/core/components/functions/nonstateful/{combinationfunctions.py => transformfunctions.py} (75%) diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py index ddc95037997..7209121c186 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py @@ -56,8 +56,8 @@ def calc_prob(em_preds, test_ys): # concatenate_queries = True, # environment - curriculum_type = 'Interleaved', - # curriculum_type = 'Blocked', + # curriculum_type = 'Interleaved', + curriculum_type = 'Blocked', # num_stims = 100, # Integer or ALL num_stims = ALL, # Integer or ALL @@ -75,7 +75,7 @@ def calc_prob(em_preds, test_ys): # softmax_temperature = CONTROL, # temperature of the softmax used during memory retrieval (smaller means more argmax-like # softmax_threshold = None, # threshold used to mask out small values in softmax softmax_threshold = .001, # threshold used to mask out small values in softmax - enable_learning=[True, False, False], # Enable learning for PREDICTION (STATE) but not CONTEXT or PREVIOUS STATE + enable_learning=[False, False, True], # Enable learning for PREDICTION (STATE) but not CONTEXT or PREVIOUS STATE learn_field_weights = False, loss_spec = Loss.BINARY_CROSS_ENTROPY, # loss_spec = Loss.MSE, diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py index c6408d3cc6a..561f8a881a2 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py @@ -251,13 +251,21 @@ def construct_model(model_name:str=model_params['name'], softmax_gain=retrieval_softmax_gain, softmax_threshold=retrieval_softmax_threshold, # Input Nodes: - field_names=[state_input_name, - previous_state_input_name, + # field_names=[state_input_name, + # previous_state_input_name, + # context_name, + # ], + # field_weights=(state_retrieval_weight, + # previous_state_retrieval_weight, + # context_retrieval_weight + # ), + field_names=[previous_state_input_name, context_name, + state_input_name, ], - field_weights=(state_retrieval_weight, - previous_state_retrieval_weight, - context_retrieval_weight + field_weights=(previous_state_retrieval_weight, + context_retrieval_weight, + state_retrieval_weight, ), normalize_field_weights=normalize_field_weights, concatenate_queries=concatenate_queries, @@ -450,7 +458,7 @@ def eval_weights(weight_mat): axes[1].set_xlabel('Stimuli') axes[1].set_ylabel(model_params['loss_spec']) # Logit of loss - axes[2].plot( (model.results[2:TOTAL_NUM_STIMS,2]*TARGETS[:TOTAL_NUM_STIMS-2]).sum(-1) ) + axes[2].plot( (model.results[1:TOTAL_NUM_STIMS,2]*TARGETS[:TOTAL_NUM_STIMS-1]).sum(-1) ) axes[2].set_xlabel('Stimuli') axes[2].set_ylabel('Correct Logit') plt.suptitle(f"{model_params['curriculum_type']} Training") diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py index a06c4a95058..78a9bf96f1b 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py @@ -9,7 +9,7 @@ DISPLAY_MODEL = ( # Only one of the following can be uncommented: None # suppress display of model # { # show simple visual display of model - # # 'show_pytorch': True, # show pytorch graph of model + # 'show_pytorch': True, # show pytorch graph of model # 'show_learning': True # # 'show_projections_not_in_composition': True, # # 'exclude_from_gradient_calc_style': 'dashed'# show target mechanisms for learning diff --git a/Scripts/Models (Under Development)/NeuroML Example.py b/Scripts/Models (Under Development)/NeuroML Example.py index 0d04d910a02..de8edd29220 100644 --- a/Scripts/Models (Under Development)/NeuroML Example.py +++ b/Scripts/Models (Under Development)/NeuroML Example.py @@ -35,7 +35,7 @@ composition.add_projection( projection=pnl.MappingProjection( name="MappingProjection from syn1[OutputPort-0] to fnPop1[InputPort-0]", - function=pnl.LinearMatrix(matrix=[[1.0]]), + function=pnl.MatrixTransform(matrix=[[1.0]]), matrix=[[1.0]], ), sender=syn1, @@ -44,7 +44,7 @@ composition.add_projection( projection=pnl.MappingProjection( name="MappingProjection from fnPop1[OutputPort-0] to fnPop2[InputPort-0]", - function=pnl.LinearMatrix(matrix=[[1.0]]), + function=pnl.MatrixTransform(matrix=[[1.0]]), matrix=[[1.0]], ), sender=fnPop1, diff --git a/Scripts/Models (Under Development)/nback/nback_og_pytorch.py b/Scripts/Models (Under Development)/nback/nback_og_pytorch.py index 632f6cf5a31..a69a7b0de2c 100644 --- a/Scripts/Models (Under Development)/nback/nback_og_pytorch.py +++ b/Scripts/Models (Under Development)/nback/nback_og_pytorch.py @@ -74,7 +74,7 @@ FFWMGraph.add_projection( projection=pnl.MappingProjection( name="MatMul_6_as_edge", - function=pnl.LinearMatrix( + function=pnl.MatrixTransform( default_variable=np.zeros((90,), dtype="float32"), matrix=hid1_layer_weight ), ), @@ -84,7 +84,7 @@ FFWMGraph.add_projection( projection=pnl.MappingProjection( name="MatMul_16_as_edge", - function=pnl.LinearMatrix( + function=pnl.MatrixTransform( default_variable=np.zeros((90,), dtype="float32"), matrix=hid2_layer_weight ), ), @@ -94,7 +94,7 @@ FFWMGraph.add_projection( projection=pnl.MappingProjection( name="MatMul_19_as_edge", - function=pnl.LinearMatrix( + function=pnl.MatrixTransform( default_variable=np.zeros((80,), dtype="float32"), matrix=out_layer_weight ), ), diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index c3520735c12..1ca300b305e 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -4013,7 +4013,7 @@ def _get_mdf_parameters(self): from psyneulink.core.compositions.composition import Composition from psyneulink.core.components.ports.port import Port from psyneulink.core.components.ports.outputport import OutputPort - from psyneulink.core.components.functions.nonstateful.transferfunctions import LinearMatrix + from psyneulink.core.components.functions.nonstateful.transformfunctions import MatrixTransform def parse_parameter_value(value, no_expand_components=False, functions_as_dill=False): if isinstance(value, (list, tuple)): @@ -4154,11 +4154,11 @@ def parse_parameter_value(value, no_expand_components=False, functions_as_dill=F # class default val = p.default_value else: - # special handling because LinearMatrix default values + # special handling because MatrixTransform default values # can be PNL-specific keywords. In future, generalize # this workaround if ( - isinstance(self, LinearMatrix) + isinstance(self, MatrixTransform) and p.name == 'matrix' ): val = self.parameters.matrix.values[None] diff --git a/psyneulink/core/components/functions/__init__.py b/psyneulink/core/components/functions/__init__.py index 07f954dd7fa..3bc91c54816 100644 --- a/psyneulink/core/components/functions/__init__.py +++ b/psyneulink/core/components/functions/__init__.py @@ -1,12 +1,12 @@ from . import function -from .nonstateful import selectionfunctions, objectivefunctions, optimizationfunctions, combinationfunctions, \ +from .nonstateful import selectionfunctions, objectivefunctions, optimizationfunctions, transformfunctions, \ learningfunctions, transferfunctions, distributionfunctions, fitfunctions from . import stateful from .stateful import integratorfunctions, memoryfunctions from . import userdefinedfunction from .function import * -from psyneulink.core.components.functions.nonstateful.combinationfunctions import * +from psyneulink.core.components.functions.nonstateful.transformfunctions import * from psyneulink.core.components.functions.nonstateful.transferfunctions import * from psyneulink.core.components.functions.nonstateful.selectionfunctions import * from psyneulink.core.components.functions.nonstateful.distributionfunctions import * @@ -21,7 +21,7 @@ __all__ = list(function.__all__) __all__.extend(userdefinedfunction.__all__) -__all__.extend(combinationfunctions.__all__) +__all__.extend(transformfunctions.__all__) __all__.extend(transferfunctions.__all__) __all__.extend(selectionfunctions.__all__) __all__.extend(stateful.__all__) diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index 14028424f78..b49f27c1306 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -22,7 +22,6 @@ * `BinomialDistort` * `Dropout` * `SoftMax` -* `LinearMatrix` * `TransferWithCosts` Overview @@ -84,7 +83,7 @@ DEFAULT_SEED, Function, Function_Base, FunctionError, _random_state_getter, _seed_setter, function_keywords, get_matrix, is_function_type, ) -from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination +from psyneulink.core.components.functions.nonstateful.transformfunctions import LinearCombination from psyneulink.core.components.functions.nonstateful.selectionfunctions import OneHot, ARG_MAX, ARG_MAX_INDICATOR from psyneulink.core.components.functions.stateful.integratorfunctions import SimpleIntegrator from psyneulink.core.components.shellclasses import Projection @@ -92,11 +91,11 @@ from psyneulink.core.globals.utilities import is_numeric_scalar from psyneulink.core.globals.keywords import \ (ADAPTIVE, ADDITIVE_PARAM, ALL, ANGLE_FUNCTION, BIAS, BINOMIAL_DISTORT_FUNCTION, DROPOUT_FUNCTION, - EXPONENTIAL_FUNCTION, GAIN, GAUSSIAN_DISTORT_FUNCTION, GAUSSIAN_FUNCTION, HAS_INITIALIZERS, HOLLOW_MATRIX, - IDENTITY_FUNCTION, IDENTITY_MATRIX, INTERCEPT, LEAK, LINEAR_FUNCTION, LINEAR_MATRIX_FUNCTION, LOGISTIC_FUNCTION, - TANH_FUNCTION, MATRIX_KEYWORD_NAMES, MATRIX, MAX_INDICATOR, MAX_VAL, MULTIPLICATIVE_PARAM, NORMALIZE, + EXPONENTIAL_FUNCTION, GAIN, GAUSSIAN_DISTORT_FUNCTION, GAUSSIAN_FUNCTION, + IDENTITY_FUNCTION, INTERCEPT, LEAK, LINEAR_FUNCTION, LOGISTIC_FUNCTION, + TANH_FUNCTION, MAX_INDICATOR, MAX_VAL, MULTIPLICATIVE_PARAM, OFF, OFFSET, ON, OUTPUT_TYPE, PER_ITEM, PROB, PRODUCT, PROB_INDICATOR, - RATE, RECEIVER, RELU_FUNCTION, SCALE, SLOPE, SOFTMAX_FUNCTION, STANDARD_DEVIATION, SUM, + RATE, RELU_FUNCTION, SCALE, SLOPE, SOFTMAX_FUNCTION, STANDARD_DEVIATION, SUM, TRANSFER_FUNCTION_TYPE, TRANSFER_WITH_COSTS_FUNCTION, VARIANCE, VARIABLE, X_0, PREFERENCE_SET_NAME) from psyneulink.core.globals.parameters import \ FunctionParameter, Parameter, get_validator_by_function, check_user_specified, copy_parameter_value @@ -105,7 +104,7 @@ from psyneulink.core.globals.utilities import ValidParamSpecType, convert_all_elements_to_np_array, safe_len, is_matrix_keyword __all__ = ['Angle', 'BinomialDistort', 'Dropout', 'Exponential', 'Gaussian', 'GaussianDistort', 'Identity', - 'Linear', 'LinearMatrix', 'Logistic', 'ReLU', 'SoftMax', 'Tanh', 'TransferFunction', 'TransferWithCosts' + 'Linear', 'Logistic', 'ReLU', 'SoftMax', 'Tanh', 'TransferFunction', 'TransferWithCosts' ] class TransferFunction(Function_Base): @@ -3608,629 +3607,6 @@ def _gen_pytorch_adapt_gain_fct(self, device, context=None): * torch.log(1 / (1 + torch.exp(-1 * x))))))) -# ********************************************************************************************************************** -# LinearMatrix -# ********************************************************************************************************************** - -class LinearMatrix(TransferFunction): # ------------------------------------------------------------------------------- - """ - LinearMatrix( \ - default_variable, \ - matrix=None, \ - normalize=False, \ - params=None, \ - owner=None, \ - name=None, \ - prefs=None \ - ) - - .. _LinearMatrix: - - Matrix transform of `variable `. - - `function ` returns dot product of variable with matrix: - - .. math:: - variable \\bullet matrix - - If **normalize** is True, the result is normalized by the product of the norms of the variable and matrix: - - .. math:: - \\frac{variable \\bullet matrix}{\\|variable\\| \\cdot \\|matrix\\|} - - COMMENT: [CONVERT TO FIGURE] - ---------------------------------------------------------------------------------------------------------- - MATRIX FORMAT - INDICES: - Output elements: - 0 1 2 3 4 - 0 [0,0] [0,1] [0,2] [0,3] [0,4] - Input elements: 1 [1,0] [1,1] [1,2] [1,3] [1,4] - 2 [2,0] [2,1] [2,2] [2,3] [2,4] - - matrix.shape => (input/rows, output/cols) - - ---------------------------------------------------------------------------------------------------------- - ARRAY FORMAT - INDICES - [ [ Input 0 (row0) ], [ Input 1 (row1) ]... ] - [ [ out0, out1, out2, out3 ], [ out0, out1, out2, out3 ]... ] - matrix[input/rows, output/cols]: [ [ row0, row0, row0, row0 ], [ row1, row1, row1, row1 ]... ] - [ [ col0, col1, col2, col3 ], [ col0, col1, col2, col3 ]... ] - [ [[0,0], [0,1], [0,2], [0,3] ], [[1,0], [1,1], [1,2], [1,3] ]... ] - - ---------------------------------------------------------------------------------------------------------- - COMMENT - - - Arguments - --------- - - variable : list or 1d array : default class_defaults.variable - specifies a template for the value to be transformed; length must equal the number of rows of `matrix - `. - - matrix : number, list, 1d or 2d np.ndarray, function, or matrix keyword : default IDENTITY_MATRIX - specifies matrix used to transform `variable ` - (see `matrix ` for specification details). - - When LinearMatrix is the `function ` of a projection: - - - the matrix specification must be compatible with the variables of the `sender ` - and `receiver ` - - - a matrix keyword specification generates a matrix based on the sender and receiver shapes - - When LinearMatrix is instantiated on its own, or as the function of a `Mechanism ` or `Port`: - - - the matrix specification must be compatible with the function's own `variable ` - - - if matrix is not specified, a square identity matrix is generated based on the number of columns in - `variable ` - - - matrix keywords are not valid matrix specifications - - normalize : bool : default False - specifies whether to normalize the result of `function ` by dividing it by the - norm of `variable ` x the norm of `matrix `. - - bounds : None - - params : Dict[param keyword: param value] : default None - a `parameter dictionary ` that specifies the parameters for the - function. Values specified for parameters in the dictionary override any assigned to those parameters in - arguments of the constructor. - - owner : Component - `component ` to which to assign the Function. - - name : str : default see `name ` - specifies the name of the Function. - - prefs : PreferenceSet or specification dict : default Function.classPreferences - specifies the `PreferenceSet` for the Function (see `prefs ` for details). - - Attributes - ---------- - - variable : 1d array - contains value to be transformed. - - matrix : 2d array - matrix used to transform `variable `. - Can be specified as any of the following: - * number - used as the filler value for all elements of the :keyword:`matrix` (call to np.fill); - * list of arrays, 2d array - assigned as the value of :keyword:`matrix`; - * matrix keyword - see `MatrixKeywords` for list of options. - Rows correspond to elements of the input array (outer index), and - columns correspond to elements of the output array (inner index). - - normalize : bool - determines whether the result of `function ` is normalized, by dividing it by the - norm of `variable ` x the norm of `matrix `. - - - owner : Component - `component ` to which the Function has been assigned. - - name : str - the name of the Function; if it is not specified in the **name** argument of the constructor, a default is - assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names). - - prefs : PreferenceSet or specification dict : Function.classPreferences - the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's - constructor, a default is assigned using `classPreferences` defined in __init__.py (see `PreferenceSet` - for details). - """ - - componentName = LINEAR_MATRIX_FUNCTION - - DEFAULT_FILLER_VALUE = 0 - - _model_spec_generic_type_name = 'onnx::MatMul' - - class Parameters(TransferFunction.Parameters): - """ - Attributes - ---------- - - matrix - see `matrix ` - - :default value: None - :type: - - normalize - see `normalize ` - - :default value: False - :type: bool - """ - variable = Parameter(np.array([0]), read_only=True, pnl_internal=True, constructor_argument='default_variable', mdf_name='A') - matrix = Parameter(None, modulable=True, mdf_name='B') - normalize = Parameter(False) - bounds = None - - # def is_matrix_spec(m): - # if m is None: - # return True - # if m in MATRIX_KEYWORD_VALUES: - # return True - # if isinstance(m, (list, np.ndarray, types.FunctionType)): - # return True - # return False - - @check_user_specified - @beartype - def __init__(self, - default_variable=None, - matrix=None, - normalize=None, - params=None, - owner=None, - prefs: Optional[ValidPrefSet] = None): - - # Note: this calls _validate_variable and _validate_params which are overridden below; - # the latter implements the matrix if required - # super(LinearMatrix, self).__init__(default_variable=default_variable, - super().__init__( - default_variable=default_variable, - matrix=matrix, - normalize=normalize, - params=params, - owner=owner, - prefs=prefs, - ) - - self.parameters.matrix.set( - self.instantiate_matrix(self.parameters.matrix.get()), - skip_log=True, - ) - - # def _validate_variable(self, variable, context=None): - # """Insure that variable passed to LinearMatrix is a max 2D array - # - # :param variable: (max 2D array) - # :param context: - # :return: - # """ - # variable = super()._validate_variable(variable, context) - # - # # Check that variable <= 2D - # try: - # if not variable.ndim <= 2: - # raise FunctionError("variable ({0}) for {1} must be a numpy.ndarray of dimension at most 2".format(variable, self.__class__.__name__)) - # except AttributeError: - # raise FunctionError("PROGRAM ERROR: variable ({0}) for {1} should be a numpy.ndarray". - # format(variable, self.__class__.__name__)) - # - # return variable - - - def _validate_params(self, request_set, target_set=None, context=None): - """Validate params and assign to targets - - This overrides the class method, to perform more detailed type checking (see explanation in class method). - Note: this method (or the class version) is called only if the parameter_validation attribute is `True` - - :param request_set: (dict) - params to be validated - :param target_set: (dict) - destination of validated params - :param context: (str) - :return none: - """ - - super()._validate_params(request_set, target_set, context) - - param_set = target_set - # proxy for checking whether the owner is a projection - if hasattr(self.owner, "receiver"): - sender = self.defaults.variable - sender_len = np.size(np.atleast_2d(self.defaults.variable), 1) - - # FIX: RELABEL sender -> input AND receiver -> output - # FIX: THIS NEEDS TO BE CLEANED UP: - # - AT LEAST CHANGE THE NAME FROM kwReceiver TO output_template OR SOMETHING LIKE THAT - # - MAKE ARG? OR ADD OTHER PARAMS: E.G., FILLER? - # - OR REFACTOR TO INCLUDE AS MATRIX SPEC: - # IF MATRIX IS 1D, USE AS OUTPUT TEMPLATE - # IF ALL ITS VALUES ARE 1'S => FULL CONNECTIVITY MATRIX - # IF ALL ITS VALUES ARE 0'S => RANDOM CONNECTIVITY MATRIX - # NOTE: NO NEED FOR IDENTITY MATRIX, AS THAT WOULD BE SQUARE SO NO NEED FOR OUTPUT TEMPLATE - # - DOCUMENT WHEN DONE - # MODIFIED 3/26/17 OLD: - # Check for and validate kwReceiver first, since it may be needed to validate and/or construct the matrix - # First try to get receiver from specification in params - if RECEIVER in param_set: - self.receiver = param_set[RECEIVER] - # Check that specification is a list of numbers or an array - if ((isinstance(self.receiver, list) and all( - isinstance(elem, numbers.Number) for elem in self.receiver)) or - isinstance(self.receiver, np.ndarray)): - self.receiver = np.atleast_1d(self.receiver) - else: - raise FunctionError("receiver param ({0}) for {1} must be a list of numbers or an np.array". - format(self.receiver, self.name)) - # No receiver, so use sender as template (assuming square -- e.g., identity -- matrix) - else: - if (self.owner and self.owner.prefs.verbosePref) or self.prefs.verbosePref: - print("Identity matrix requested but kwReceiver not specified; sender length ({0}) will be used". - format(sender_len)) - self.receiver = param_set[RECEIVER] = sender - - receiver_len = len(self.receiver) - - # Check rest of params - message = "" - for param_name, param_value in param_set.items(): - - # Receiver param already checked above - if param_name == RECEIVER: - continue - - # Not currently used here - if param_name in function_keywords: - continue - - if param_name == HAS_INITIALIZERS: - continue - - # Matrix specification param - elif param_name == MATRIX: - - # A number (to be used as a filler), so OK - if isinstance(param_value, numbers.Number): - continue - - # np.matrix or np.ndarray provided, so validate that it is numeric and check dimensions - elif isinstance(param_value, (list, np.ndarray, np.matrix)): - # get dimensions specified by: - # variable (sender): width/cols/outer index - # kwReceiver param: height/rows/inner index - - weight_matrix = np.atleast_2d(param_value) - if 'U' in repr(weight_matrix.dtype): - raise FunctionError("Non-numeric entry in MATRIX " - "specification ({}) for the {} " - "function of {}".format(param_value, - self.name, - self.owner_name)) - - if weight_matrix.ndim != 2: - raise FunctionError("The matrix provided for the {} function of {} must be 2d (it is {}d". - format(weight_matrix.ndim, self.name, self.owner_name)) - - matrix_rows = weight_matrix.shape[0] - matrix_cols = weight_matrix.shape[1] - - # Check that number of rows equals length of sender vector (variable) - if matrix_rows != sender_len: - raise FunctionError("The number of rows ({}) of the " - "matrix provided for {} function " - "of {} does not equal the length " - "({}) of the sender vector " - "(variable)".format(matrix_rows, - self.name, - self.owner_name, - sender_len)) - - # Auto, full or random connectivity matrix requested (using keyword): - # Note: assume that these will be properly processed by caller - # (e.g., MappingProjection._instantiate_receiver) - elif is_matrix_keyword(param_value): - continue - - # Identity matrix requested (using keyword), so check send_len == receiver_len - elif param_value in {IDENTITY_MATRIX, HOLLOW_MATRIX}: - # Receiver length doesn't equal sender length - if not (self.receiver.shape == sender.shape and self.receiver.size == sender.size): - # if self.owner.prefs.verbosePref: - # print ("Identity matrix requested, but length of receiver ({0})" - # " does not match length of sender ({1}); sender length will be used". - # format(receiver_len, sender_len)) - # # Set receiver to sender - # param_set[kwReceiver] = sender - raise FunctionError("{} requested for the {} function of {}, " - "but length of receiver ({}) does not match length of sender ({})". - format(param_value, self.name, self.owner_name, receiver_len, - sender_len)) - continue - - # list used to describe matrix, so convert to 2D array and pass to validation of matrix below - elif isinstance(param_value, list): - try: - param_value = np.atleast_2d(param_value) - except (ValueError, TypeError) as error_msg: - raise FunctionError( - "Error in list specification ({}) of matrix for the {} function of {}: {})". - # format(param_value, self.__class__.__name__, error_msg)) - format(param_value, self.name, self.owner_name, error_msg)) - - # string used to describe matrix, so convert to np.array and pass to validation of matrix below - elif isinstance(param_value, str): - try: - param_value = np.atleast_2d(param_value) - except (ValueError, TypeError) as error_msg: - raise FunctionError("Error in string specification ({}) of the matrix " - "for the {} function of {}: {})". - # format(param_value, self.__class__.__name__, error_msg)) - format(param_value, self.name, self.owner_name, error_msg)) - - # function so: - # - assume it uses random.rand() - # - call with two args as place markers for cols and rows - # - validate that it returns an array - elif isinstance(param_value, types.FunctionType): - test = param_value(1, 1) - if not isinstance(test, np.ndarray): - raise FunctionError("A function is specified for the matrix of the {} function of {}: {}) " - "that returns a value ({}) that is not an array". - # format(param_value, self.__class__.__name__, test)) - format(self.name, self.owner_name, param_value, test)) - - elif param_value is None: - raise FunctionError("TEMP ERROR: param value is None.") - - else: - raise FunctionError("Value of {} param ({}) for the {} function of {} " - "must be a matrix, a number (for filler), or a matrix keyword ({})". - format(param_name, - param_value, - self.name, - self.owner_name, - MATRIX_KEYWORD_NAMES)) - else: - continue - if message: - raise FunctionError(message) - - # owner is a mechanism, state - # OR function was defined on its own (no owner) - else: - if MATRIX in param_set: - param_value = param_set[MATRIX] - - # numeric value specified; verify that it is compatible with variable - if isinstance(param_value, (float, list, np.ndarray, np.matrix)): - param_size = np.size(np.atleast_2d(param_value), 0) - param_shape = np.shape(np.atleast_2d(param_value)) - variable_size = np.size(np.atleast_2d(self.defaults.variable),1) - variable_shape = np.shape(np.atleast_2d(self.defaults.variable)) - if param_size != variable_size: - raise FunctionError("Specification of matrix and/or default_variable for {} is not valid. The " - "shapes of variable {} and matrix {} are not compatible for multiplication". - format(self.name, variable_shape, param_shape)) - - # keyword matrix specified - not valid outside of a projection - elif is_matrix_keyword(param_value): - raise FunctionError("{} is not a valid specification for the matrix parameter of {}. Keywords " - "may only be used to specify the matrix parameter of a Projection's " - "LinearMatrix function. When the LinearMatrix function is implemented in a " - "mechanism, such as {}, the correct matrix cannot be determined from a " - "keyword. Instead, the matrix must be fully specified as a float, list, " - "np.ndarray". - format(param_value, self.name, self.owner.name)) - - # The only remaining valid option is matrix = None (sorted out in instantiate_attribs_before_fn) - elif param_value is not None: - raise FunctionError("Value of the matrix param ({}) for the {} function of {} " - "must be a matrix, a number (for filler), or a matrix keyword ({})". - format(param_value, - self.name, - self.owner_name, - MATRIX_KEYWORD_NAMES)) - - def _instantiate_attributes_before_function(self, function=None, context=None): - # replicates setting of receiver in _validate_params - if isinstance(self.owner, Projection): - self.receiver = copy_parameter_value(self.defaults.variable) - - matrix = self.parameters.matrix._get(context) - - if matrix is None and not hasattr(self.owner, "receiver"): - variable_length = np.size(np.atleast_2d(self.defaults.variable), 1) - matrix = np.identity(variable_length) - self.parameters.matrix._set(self.instantiate_matrix(matrix), context) - - def instantiate_matrix(self, specification, context=None): - """Implements matrix indicated by specification - - Specification is derived from MATRIX param (passed to self.__init__ or self._function) - - Specification (validated in _validate_params): - + single number (used to fill self.matrix) - + matrix keyword (see get_matrix) - + 2D list or np.ndarray of numbers - - :return matrix: (2D list) - """ - from psyneulink.core.components.projections.projection import Projection - if isinstance(self.owner, Projection): - # Matrix provided (and validated in _validate_params); convert to array - if isinstance(specification, np.matrix): - return np.array(specification) - - sender = copy_parameter_value(self.defaults.variable) - sender_len = sender.shape[0] - try: - receiver = self.receiver - except: - raise FunctionError("Can't instantiate matrix specification ({}) for the {} function of {} " - "since its receiver has not been specified". - format(specification, self.name, self.owner_name)) - # receiver = sender - receiver_len = receiver.shape[0] - - matrix = get_matrix(specification, rows=sender_len, cols=receiver_len, context=context) - - # This should never happen (should have been picked up in validate_param or above) - if matrix is None: - raise FunctionError("MATRIX param ({}) for the {} function of {} must be a matrix, a function " - "that returns one, a matrix specification keyword ({}), or a number (filler)". - format(specification, self.name, self.owner_name, MATRIX_KEYWORD_NAMES)) - else: - return matrix - else: - return np.array(specification) - - - def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset): - # Restrict to 1d arrays - if self.defaults.variable.ndim != 1: - warnings.warn("Shape mismatch: {} (in {}) got 2D input: {}".format( - self, self.owner, self.defaults.variable), - pnlvm.PNLCompilerWarning) - arg_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) - if self.defaults.value.ndim != 1: - warnings.warn("Shape mismatch: {} (in {}) has 2D output: {}".format( - self, self.owner, self.defaults.value), - pnlvm.PNLCompilerWarning) - arg_out = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(0)]) - - matrix = ctx.get_param_or_state_ptr(builder, self, MATRIX, param_struct_ptr=params, state_struct_ptr=state) - normalize = ctx.get_param_or_state_ptr(builder, self, NORMALIZE, param_struct_ptr=params) - - # Convert array pointer to pointer to the fist element - matrix = builder.gep(matrix, [ctx.int32_ty(0), ctx.int32_ty(0)]) - vec_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) - vec_out = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(0)]) - - input_length = ctx.int32_ty(arg_in.type.pointee.count) - output_length = ctx.int32_ty(arg_out.type.pointee.count) - - # if normalize: - # if vec_in is not zeros: - # # FIX: NORMALIZE vec_in and matrix here - # vec_in_sum = fsum(builder, vec_in) - # vec_in = fdiv(builder, vec_in, vec_in_sum) - # if matrix is not zeros: - # # FIX: NORMALIZE matrix here - - builtin = ctx.import_llvm_function("__pnl_builtin_vxm") - builder.call(builtin, [vec_in, matrix, input_length, output_length, vec_out]) - return builder - - def _function(self, - variable=None, - context=None, - params=None, - ): - """ - - Arguments - --------- - variable : list or 1d array - array to be transformed; length must equal the number of rows of `matrix `. - - params : Dict[param keyword: param value] : default None - a `parameter dictionary ` that specifies the parameters for the - function. Values specified for parameters in the dictionary override any assigned to those parameters in - arguments of the constructor. - - Returns - --------- - - dot product of variable and matrix : 1d array - length of the array returned equals the number of columns of `matrix `. - - """ - vector = np.array(variable) - matrix = self._get_current_parameter_value(MATRIX, context) - normalize = self._get_current_parameter_value(NORMALIZE, context) - if normalize: - if np.any(vector): - vector = vector / np.linalg.norm(vector) - if np.any(matrix): - # FIX: the axis along which norming is carried out should probably be a parameter - # Also need to deal with column- (or row-) wise zeros which cause div by zero - # Replace columns (if norming axis 0) or rows (if norming axis 1) of zeros with 1's - # matrix = matrix / np.linalg.norm(matrix,axis=-1,keepdims=True) - matrix = matrix / np.linalg.norm(matrix,axis=0,keepdims=True) - - result = np.dot(vector, matrix) - return self.convert_output_type(result) - - @staticmethod - def keyword(obj, keyword): - - from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection - rows = None - cols = None - # use of variable attribute here should be ok because it's using it as a format/type - if isinstance(obj, MappingProjection): - if isinstance(obj.sender.defaults.value, numbers.Number): - rows = 1 - else: - rows = len(obj.sender.defaults.value) - if isinstance(obj.receiver.defaults.variable, numbers.Number): - cols = 1 - else: - cols = obj.receiver.socket_width - matrix = get_matrix(keyword, rows, cols) - - if matrix is None: - raise FunctionError("Unrecognized keyword ({}) specified for the {} function of {}". - format(keyword, obj.name, obj.owner_name)) - else: - return matrix - - def param_function(owner, function): - sender_len = len(owner.sender.defaults.value) - receiver_len = len(owner.receiver.defaults.variable) - return function(sender_len, receiver_len) - - def _is_identity(self, context=None, defaults=False): - if defaults: - matrix = self.defaults.matrix - else: - matrix = self.parameters.matrix._get(context) - - # if matrix is not an np array with at least one dimension, - # this isn't an identity matrix - try: - size = matrix.shape[0] - except (AttributeError, IndexError): - return False - - # check if the matrix is the same as the identity matrix - # note that we can use the first dimension size to create the identity matrix - # because if the matrix is not square, this comparison will fail anyway - identity_matrix = np.identity(size) - # numpy has deprecated == comparisons of arrays - try: - return np.array_equal(matrix, identity_matrix) - except TypeError: - return matrix == identity_matrix - -# def is_matrix_spec(m): -# if m is None: -# return True -# if isinstance(m, (list, np.ndarray, types.FunctionType)): -# return True -# if m in MATRIX_KEYWORD_VALUES: -# return True -# return False - - # ********************************************************************************************************************** # TransferWithCosts # ********************************************************************************************************************** diff --git a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py b/psyneulink/core/components/functions/nonstateful/transformfunctions.py similarity index 75% rename from psyneulink/core/components/functions/nonstateful/combinationfunctions.py rename to psyneulink/core/components/functions/nonstateful/transformfunctions.py index 1831048c769..935fee17f07 100644 --- a/psyneulink/core/components/functions/nonstateful/combinationfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transformfunctions.py @@ -15,6 +15,7 @@ * `Reduce` * `LinearCombination` * `CombineMeans` +* `MatrixTransform` * `PredictionErrorDeltaFunction` Overview @@ -23,7 +24,7 @@ Functions that combine multiple items with the same shape, yielding a result with a single item that has the same shape as the individual items. -All CombinationFunctions must have two attributes - **multiplicative_param** and **additive_param** - +All Transformfunctions must have two attributes - **multiplicative_param** and **additive_param** - each of which is assigned the name of one of the function's parameters; this is for use by ModulatoryProjections (and, in particular, GatingProjections, when the CombinationFunction is used as the function of an InputPort or OutputPort). @@ -32,8 +33,11 @@ """ import numbers +import types +import warnings import numpy as np + try: import torch except ImportError: @@ -43,25 +47,33 @@ from psyneulink._typing import Optional, Union, Literal from psyneulink.core import llvm as pnlvm -from psyneulink.core.components.functions.function import Function_Base, FunctionError, FunctionOutputType -from psyneulink.core.globals.keywords import \ - ADDITIVE_PARAM, ARRANGEMENT, COMBINATION_FUNCTION_TYPE, COMBINE_MEANS_FUNCTION, CONCATENATE_FUNCTION, \ - CROSS_ENTROPY, DEFAULT_VARIABLE, EXPONENTS, LINEAR_COMBINATION_FUNCTION, MULTIPLICATIVE_PARAM, OFFSET, OPERATION, \ - PREDICTION_ERROR_DELTA_FUNCTION, PRODUCT, REARRANGE_FUNCTION, REDUCE_FUNCTION, SCALE, SUM, WEIGHTS, \ - PREFERENCE_SET_NAME -from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, convert_to_np_array, is_numeric, is_numeric_scalar, np_array_less_than_2d, ValidParamSpecType +from psyneulink.core.components.functions import function +from psyneulink.core.components.functions.function import ( + Function_Base, FunctionError, FunctionOutputType, function_keywords, get_matrix) +from psyneulink.core.components.shellclasses import Projection +from psyneulink.core.globals.keywords import ( + ADDITIVE_PARAM, ARRANGEMENT, COMBINATION_FUNCTION_TYPE, COMBINE_MEANS_FUNCTION, CONCATENATE_FUNCTION, + CROSS_ENTROPY, DEFAULT_VARIABLE, DOT_PRODUCT, EXPONENTS, + HAS_INITIALIZERS, HOLLOW_MATRIX, IDENTITY_MATRIX, + LINEAR_COMBINATION_FUNCTION, LINEAR_TRANSFORM_FUNCTION, L0, + MATRIX_KEYWORD_NAMES, MATRIX, MULTIPLICATIVE_PARAM, NORMALIZE, + OFFSET, OPERATION, PREDICTION_ERROR_DELTA_FUNCTION, PRODUCT, + REARRANGE_FUNCTION, RECEIVER, REDUCE_FUNCTION, SCALE, SUM, WEIGHTS, PREFERENCE_SET_NAME) +from psyneulink.core.globals.utilities import ( + convert_all_elements_to_np_array, convert_to_np_array, is_numeric, is_matrix_keyword, is_numeric_scalar, + np_array_less_than_2d, ValidParamSpecType) from psyneulink.core.globals.context import ContextFlags, handle_external_context -from psyneulink.core.globals.parameters import Parameter, check_user_specified +from psyneulink.core.globals.parameters import Parameter, check_user_specified, copy_parameter_value from psyneulink.core.globals.preferences.basepreferenceset import \ REPORT_OUTPUT_PREF, ValidPrefSet, PreferenceEntry, PreferenceLevel -__all__ = ['CombinationFunction', 'Concatenate', 'CombineMeans', 'Rearrange', 'Reduce', 'LinearCombination', - 'PredictionErrorDeltaFunction'] +__all__ = ['CombinationFunction', 'Concatenate', 'CombineMeans', 'Rearrange', 'Reduce', + 'LinearCombination', 'MatrixTransform', 'PredictionErrorDeltaFunction'] class CombinationFunction(Function_Base): """Function that combines multiple items, yielding a result with the same shape as its operands - All CombinationFunctions must have two attributes - multiplicative_param and additive_param - + All Transformfunctions must have two attributes - multiplicative_param and additive_param - each of which is assigned the name of one of the function's parameters; this is for use by ModulatoryProjections (and, in particular, GatingProjections, when the CombinationFunction is used as the function of an InputPort or OutputPort). @@ -1596,6 +1608,644 @@ def _gen_pytorch_fct(self, device, context=None): f"by AutodiffComposition; use 'SUM' or 'PRODUCT' if possible.") +# ********************************************************************************************************************** +# MatrixTransform +# ********************************************************************************************************************** + +class MatrixTransform(CombinationFunction): # ------------------------------------------------------------------------------- + """ + MatrixTransform( \ + default_variable, \ + matrix=None, \ + normalize=False, \ + params=None, \ + owner=None, \ + name=None, \ + prefs=None \ + ) + + .. _MatrixTransform: + + Matrix transform of `variable `. + + `function ` returns dot product of variable with matrix: + + .. math:: + variable \\bullet matrix + + If **normalize** is True, the result is normalized by the product of the norms of the variable and matrix: + + .. math:: + \\frac{variable \\bullet matrix}{\\|variable\\| \\cdot \\|matrix\\|} + + COMMENT: [CONVERT TO FIGURE] + ---------------------------------------------------------------------------------------------------------- + MATRIX FORMAT + INDICES: + Output elements: + 0 1 2 3 4 + 0 [0,0] [0,1] [0,2] [0,3] [0,4] + Input elements: 1 [1,0] [1,1] [1,2] [1,3] [1,4] + 2 [2,0] [2,1] [2,2] [2,3] [2,4] + + matrix.shape => (input/rows, output/cols) + + ---------------------------------------------------------------------------------------------------------- + ARRAY FORMAT + INDICES + [ [ Input 0 (row0) ], [ Input 1 (row1) ]... ] + [ [ out0, out1, out2, out3 ], [ out0, out1, out2, out3 ]... ] + matrix[input/rows, output/cols]: [ [ row0, row0, row0, row0 ], [ row1, row1, row1, row1 ]... ] + [ [ col0, col1, col2, col3 ], [ col0, col1, col2, col3 ]... ] + [ [[0,0], [0,1], [0,2], [0,3] ], [[1,0], [1,1], [1,2], [1,3] ]... ] + + ---------------------------------------------------------------------------------------------------------- + COMMENT + + + Arguments + --------- + + variable : list or 1d array : default class_defaults.variable + specifies a template for the value to be transformed; length must equal the number of rows of `matrix + `. + + matrix : number, list, 1d or 2d np.ndarray, function, or matrix keyword : default IDENTITY_MATRIX + specifies matrix used to transform `variable ` + (see `matrix ` for specification details). + + When MatrixTransform is the `function ` of a projection: + + - the matrix specification must be compatible with the variables of the `sender ` + and `receiver ` + + - a matrix keyword specification generates a matrix based on the sender and receiver shapes + + When MatrixTransform is instantiated on its own, or as the function of a `Mechanism ` or `Port`: + + - the matrix specification must be compatible with the function's own `variable ` + + - if matrix is not specified, a square identity matrix is generated based on the number of columns in + `variable ` + + - matrix keywords are not valid matrix specifications + + normalize : bool : default False + specifies whether to normalize the result of `function ` by dividing it by the + norm of `variable ` x the norm of `matrix `. + + bounds : None + + params : Dict[param keyword: param value] : default None + a `parameter dictionary ` that specifies the parameters for the + function. Values specified for parameters in the dictionary override any assigned to those parameters in + arguments of the constructor. + + owner : Component + `component ` to which to assign the Function. + + name : str : default see `name ` + specifies the name of the Function. + + prefs : PreferenceSet or specification dict : default Function.classPreferences + specifies the `PreferenceSet` for the Function (see `prefs ` for details). + + Attributes + ---------- + + variable : 1d array + contains value to be transformed. + + matrix : 2d array + matrix used to transform `variable `. + Can be specified as any of the following: + * number - used as the filler value for all elements of the :keyword:`matrix` (call to np.fill); + * list of arrays, 2d array - assigned as the value of :keyword:`matrix`; + * matrix keyword - see `MatrixKeywords` for list of options. + Rows correspond to elements of the input array (outer index), and + columns correspond to elements of the output array (inner index). + + normalize : bool + determines whether the result of `function ` is normalized, by dividing it by the + norm of `variable ` x the norm of `matrix `. + + + owner : Component + `component ` to which the Function has been assigned. + + name : str + the name of the Function; if it is not specified in the **name** argument of the constructor, a default is + assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names). + + prefs : PreferenceSet or specification dict : Function.classPreferences + the `PreferenceSet` for function; if it is not specified in the **prefs** argument of the Function's + constructor, a default is assigned using `classPreferences` defined in __init__.py (see `PreferenceSet` + for details). + """ + + componentName = LINEAR_TRANSFORM_FUNCTION + + DEFAULT_FILLER_VALUE = 0 + + _model_spec_generic_type_name = 'onnx::MatMul' + + class Parameters(CombinationFunction.Parameters): + """ + Attributes + ---------- + + matrix + see `matrix ` + + :default value: None + :type: + + operation + see `operation ` + + :default value: DOT_PRODUCT + :type: bool + + normalize + see `normalize ` + + :default value: False + :type: bool + """ + variable = Parameter(np.array([0]), read_only=True, pnl_internal=True, constructor_argument='default_variable', mdf_name='A') + matrix = Parameter(None, modulable=True, mdf_name='B') + operation = Parameter(DOT_PRODUCT, stateful=False) + normalize = Parameter(False) + bounds = None + + # def is_matrix_spec(m): + # if m is None: + # return True + # if m in MATRIX_KEYWORD_VALUES: + # return True + # if isinstance(m, (list, np.ndarray, types.FunctionType)): + # return True + # return False + + @check_user_specified + @beartype + def __init__(self, + default_variable=None, + matrix=None, + operation=None, + normalize=None, + params=None, + owner=None, + prefs: Optional[ValidPrefSet] = None): + + # Note: this calls _validate_variable and _validate_params which are overridden below; + # the latter implements the matrix if required + # super(MatrixTransform, self).__init__(default_variable=default_variable, + super().__init__( + default_variable=default_variable, + matrix=matrix, + operation=operation, + normalize=normalize, + params=params, + owner=owner, + prefs=prefs, + ) + + self.parameters.matrix.set( + self.instantiate_matrix(self.parameters.matrix.get()), + skip_log=True, + ) + + # def _validate_variable(self, variable, context=None): + # """Insure that variable passed to MatrixTransform is a max 2D array + # + # :param variable: (max 2D array) + # :param context: + # :return: + # """ + # variable = super()._validate_variable(variable, context) + # + # # Check that variable <= 2D + # try: + # if not variable.ndim <= 2: + # raise FunctionError("variable ({0}) for {1} must be a numpy.ndarray of dimension at most 2".format(variable, self.__class__.__name__)) + # except AttributeError: + # raise FunctionError("PROGRAM ERROR: variable ({0}) for {1} should be a numpy.ndarray". + # format(variable, self.__class__.__name__)) + # + # return variable + + + def _validate_params(self, request_set, target_set=None, context=None): + """Validate params and assign to targets + + This overrides the class method, to perform more detailed type checking (see explanation in class method). + Note: this method (or the class version) is called only if the parameter_validation attribute is `True` + + :param request_set: (dict) - params to be validated + :param target_set: (dict) - destination of validated params + :param context: (str) + :return none: + """ + + super()._validate_params(request_set, target_set, context) + + param_set = target_set + # proxy for checking whether the owner is a projection + if hasattr(self.owner, 'receiver'): + sender = self.defaults.variable + sender_len = np.size(np.atleast_2d(self.defaults.variable), 1) + + # Check for and validate receiver first, since it may be needed to validate and/or construct the matrix + # First try to get receiver from specification in params + if RECEIVER in param_set: + self.receiver = param_set[RECEIVER] + # Check that specification is a list of numbers or an array + if ((isinstance(self.receiver, list) and all( + isinstance(elem, numbers.Number) for elem in self.receiver)) or + isinstance(self.receiver, np.ndarray)): + self.receiver = np.atleast_1d(self.receiver) + else: + raise FunctionError("receiver param ({0}) for {1} must be a list of numbers or an np.array". + format(self.receiver, self.name)) + # No receiver, so use sender as template (assuming square -- e.g., IDENTITY -- matrix) + else: + if (self.owner and self.owner.prefs.verbosePref) or self.prefs.verbosePref: + print("Identity matrix requested but 'receiver' not specified; sender length ({0}) will be used". + format(sender_len)) + self.receiver = param_set[RECEIVER] = sender + + receiver_len = len(self.receiver) + + # Check rest of params + message = "" + for param_name, param_value in param_set.items(): + + # receiver param already checked above + if param_name == RECEIVER: + continue + + # Not currently used here + if param_name in function_keywords: + continue + + if param_name == HAS_INITIALIZERS: + continue + + # matrix specification param + elif param_name == MATRIX: + + # A number (to be used as a filler), so OK + if isinstance(param_value, numbers.Number): + continue + + # np.matrix or np.ndarray provided, so validate that it is numeric and check dimensions + elif isinstance(param_value, (list, np.ndarray, np.matrix)): + # get dimensions specified by: + # variable (sender): width/cols/outer index + # kwReceiver param: height/rows/inner index + + weight_matrix = np.atleast_2d(param_value) + if 'U' in repr(weight_matrix.dtype): + raise FunctionError("Non-numeric entry in MATRIX " + "specification ({}) for the {} " + "function of {}".format(param_value, + self.name, + self.owner_name)) + + if weight_matrix.ndim != 2: + raise FunctionError("The matrix provided for the {} function of {} must be 2d (it is {}d". + format(weight_matrix.ndim, self.name, self.owner_name)) + + matrix_rows = weight_matrix.shape[0] + matrix_cols = weight_matrix.shape[1] + + # Check that number of rows equals length of sender vector (variable) + if matrix_rows != sender_len: + raise FunctionError("The number of rows ({}) of the " + "matrix provided for {} function " + "of {} does not equal the length " + "({}) of the sender vector " + "(variable)".format(matrix_rows, + self.name, + self.owner_name, + sender_len)) + + # Auto, full or random connectivity matrix requested (using keyword): + # Note: assume that these will be properly processed by caller + # (e.g., MappingProjection._instantiate_receiver) + elif is_matrix_keyword(param_value): + continue + + # Identity matrix requested (using keyword), so check send_len == receiver_len + elif param_value in {IDENTITY_MATRIX, HOLLOW_MATRIX}: + # Receiver length doesn't equal sender length + if not (self.receiver.shape == sender.shape and self.receiver.size == sender.size): + # if self.owner.prefs.verbosePref: + # print ("Identity matrix requested, but length of receiver ({0})" + # " does not match length of sender ({1}); sender length will be used". + # format(receiver_len, sender_len)) + # # Set receiver to sender + # param_set[kwReceiver] = sender + raise FunctionError("{} requested for the {} function of {}, " + "but length of receiver ({}) does not match length of sender ({})". + format(param_value, self.name, self.owner_name, receiver_len, + sender_len)) + continue + + # list used to describe matrix, so convert to 2D array and pass to validation of matrix below + elif isinstance(param_value, list): + try: + param_value = np.atleast_2d(param_value) + except (ValueError, TypeError) as error_msg: + raise FunctionError( + "Error in list specification ({}) of matrix for the {} function of {}: {})". + # format(param_value, self.__class__.__name__, error_msg)) + format(param_value, self.name, self.owner_name, error_msg)) + + # string used to describe matrix, so convert to np.array and pass to validation of matrix below + elif isinstance(param_value, str): + try: + param_value = np.atleast_2d(param_value) + except (ValueError, TypeError) as error_msg: + raise FunctionError("Error in string specification ({}) of the matrix " + "for the {} function of {}: {})". + # format(param_value, self.__class__.__name__, error_msg)) + format(param_value, self.name, self.owner_name, error_msg)) + + # function so: + # - assume it uses random.rand() + # - call with two args as place markers for cols and rows + # - validate that it returns an array + elif isinstance(param_value, types.FunctionType): + test = param_value(1, 1) + if not isinstance(test, np.ndarray): + raise FunctionError("A function is specified for the matrix of the {} function of {}: {}) " + "that returns a value ({}) that is not an array". + # format(param_value, self.__class__.__name__, test)) + format(self.name, self.owner_name, param_value, test)) + + elif param_value is None: + raise FunctionError("TEMP ERROR: param value is None.") + + else: + raise FunctionError("Value of {} param ({}) for the {} function of {} " + "must be a matrix, a number (for filler), or a matrix keyword ({})". + format(param_name, + param_value, + self.name, + self.owner_name, + MATRIX_KEYWORD_NAMES)) + + # operation param + elif param_name == OPERATION: + if param_value == L0 and NORMALIZE in param_set and param_set[NORMALIZE]: + raise FunctionError(f"The 'operation' parameter for the {self.name} function of " + f"{self.owner_name} is set to 'L0', so the 'normalize' parameter " + f"should not be set to True " + f"(normalization is not needed, and can cause a divide by zero error). " + f"Set 'normalize' to False or change 'operation' to 'DOT_PRODUCT'.") + else: + continue + + # owner is a mechanism, state + # OR function was defined on its own (no owner) + else: + if MATRIX in param_set: + param_value = param_set[MATRIX] + + # numeric value specified; verify that it is compatible with variable + if isinstance(param_value, (float, list, np.ndarray, np.matrix)): + param_size = np.size(np.atleast_2d(param_value), 0) + param_shape = np.shape(np.atleast_2d(param_value)) + variable_size = np.size(np.atleast_2d(self.defaults.variable),1) + variable_shape = np.shape(np.atleast_2d(self.defaults.variable)) + if param_size != variable_size: + raise FunctionError("Specification of matrix and/or default_variable for {} is not valid. The " + "shapes of variable {} and matrix {} are not compatible for multiplication". + format(self.name, variable_shape, param_shape)) + + # keyword matrix specified - not valid outside of a projection + elif is_matrix_keyword(param_value): + raise FunctionError("{} is not a valid specification for the matrix parameter of {}. Keywords " + "may only be used to specify the matrix parameter of a Projection's " + "MatrixTransform function. When the MatrixTransform function is implemented in a " + "mechanism, such as {}, the correct matrix cannot be determined from a " + "keyword. Instead, the matrix must be fully specified as a float, list, " + "np.ndarray". + format(param_value, self.name, self.owner.name)) + + # The only remaining valid option is matrix = None (sorted out in instantiate_attribs_before_fn) + elif param_value is not None: + raise FunctionError("Value of the matrix param ({}) for the {} function of {} " + "must be a matrix, a number (for filler), or a matrix keyword ({})". + format(param_value, + self.name, + self.owner_name, + MATRIX_KEYWORD_NAMES)) + + def _instantiate_attributes_before_function(self, function=None, context=None): + # replicates setting of receiver in _validate_params + if isinstance(self.owner, Projection): + self.receiver = copy_parameter_value(self.defaults.variable) + + matrix = self.parameters.matrix._get(context) + + if matrix is None and not hasattr(self.owner, "receiver"): + variable_length = np.size(np.atleast_2d(self.defaults.variable), 1) + matrix = np.identity(variable_length) + self.parameters.matrix._set(self.instantiate_matrix(matrix), context) + + def instantiate_matrix(self, specification, context=None): + """Implements matrix indicated by specification + + Specification is derived from MATRIX param (passed to self.__init__ or self._function) + + Specification (validated in _validate_params): + + single number (used to fill self.matrix) + + matrix keyword (see get_matrix) + + 2D list or np.ndarray of numbers + + :return matrix: (2D list) + """ + from psyneulink.core.components.projections.projection import Projection + if isinstance(self.owner, Projection): + # Matrix provided (and validated in _validate_params); convert to array + if isinstance(specification, np.matrix): + return np.array(specification) + + sender = copy_parameter_value(self.defaults.variable) + sender_len = sender.shape[0] + try: + receiver = self.receiver + except: + raise FunctionError("Can't instantiate matrix specification ({}) for the {} function of {} " + "since its receiver has not been specified". + format(specification, self.name, self.owner_name)) + # receiver = sender + receiver_len = receiver.shape[0] + + matrix = get_matrix(specification, rows=sender_len, cols=receiver_len, context=context) + + # This should never happen (should have been picked up in validate_param or above) + if matrix is None: + raise FunctionError("MATRIX param ({}) for the {} function of {} must be a matrix, a function " + "that returns one, a matrix specification keyword ({}), or a number (filler)". + format(specification, self.name, self.owner_name, MATRIX_KEYWORD_NAMES)) + else: + return matrix + else: + return np.array(specification) + + + def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset): + # Restrict to 1d arrays + if self.defaults.variable.ndim != 1: + warnings.warn("Shape mismatch: {} (in {}) got 2D input: {}".format( + self, self.owner, self.defaults.variable), + pnlvm.PNLCompilerWarning) + arg_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) + if self.defaults.value.ndim != 1: + warnings.warn("Shape mismatch: {} (in {}) has 2D output: {}".format( + self, self.owner, self.defaults.value), + pnlvm.PNLCompilerWarning) + arg_out = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(0)]) + + matrix = ctx.get_param_or_state_ptr(builder, self, MATRIX, param_struct_ptr=params, state_struct_ptr=state) + normalize = ctx.get_param_or_state_ptr(builder, self, NORMALIZE, param_struct_ptr=params) + + # Convert array pointer to pointer to the fist element + matrix = builder.gep(matrix, [ctx.int32_ty(0), ctx.int32_ty(0)]) + vec_in = builder.gep(arg_in, [ctx.int32_ty(0), ctx.int32_ty(0)]) + vec_out = builder.gep(arg_out, [ctx.int32_ty(0), ctx.int32_ty(0)]) + + input_length = ctx.int32_ty(arg_in.type.pointee.count) + output_length = ctx.int32_ty(arg_out.type.pointee.count) + + # if normalize: + # if vec_in is not zeros: + # # FIX: NORMALIZE vec_in and matrix here + # vec_in_sum = fsum(builder, vec_in) + # vec_in = fdiv(builder, vec_in, vec_in_sum) + # if matrix is not zeros: + # # FIX: NORMALIZE matrix here + + builtin = ctx.import_llvm_function("__pnl_builtin_vxm") + builder.call(builtin, [vec_in, matrix, input_length, output_length, vec_out]) + return builder + + def _function(self, + variable=None, + context=None, + params=None, + ): + """ + + Arguments + --------- + variable : list or 1d array + array to be transformed; length must equal the number of rows of `matrix `. + + params : Dict[param keyword: param value] : default None + a `parameter dictionary ` that specifies the parameters for the + function. Values specified for parameters in the dictionary override any assigned to those parameters in + arguments of the constructor. + + Returns + --------- + + dot product of variable and matrix : 1d array + length of the array returned equals the number of columns of `matrix `. + + """ + vector = np.array(variable) + matrix = self._get_current_parameter_value(MATRIX, context) + operation = self._get_current_parameter_value(OPERATION, context) + normalize = self._get_current_parameter_value(NORMALIZE, context) + + if operation == DOT_PRODUCT: + if normalize: + if np.any(vector): + vector = vector / np.linalg.norm(vector) + if np.any(matrix): + # FIX: the axis along which norming is carried out should probably be a parameter + # Also need to deal with column- (or row-) wise zeros which cause div by zero + # Replace columns (if norming axis 0) or rows (if norming axis 1) of zeros with 1's + # matrix = matrix / np.linalg.norm(matrix,axis=-1,keepdims=True) + matrix = matrix / np.linalg.norm(matrix,axis=0,keepdims=True) + result = np.dot(vector, matrix) + + elif operation == L0: + normalization = 1 + if normalize: + normalization = np.sum(np.abs(vector - matrix)) + result = np.sum(((1 - np.abs(vector - matrix)) / normalization),axis=0) + + return self.convert_output_type(result) + + @staticmethod + def keyword(obj, keyword): + + from psyneulink.core.components.projections.pathway.mappingprojection import MappingProjection + rows = None + cols = None + # use of variable attribute here should be ok because it's using it as a format/type + if isinstance(obj, MappingProjection): + if isinstance(obj.sender.defaults.value, numbers.Number): + rows = 1 + else: + rows = len(obj.sender.defaults.value) + if isinstance(obj.receiver.defaults.variable, numbers.Number): + cols = 1 + else: + cols = obj.receiver.socket_width + matrix = get_matrix(keyword, rows, cols) + + if matrix is None: + raise FunctionError("Unrecognized keyword ({}) specified for the {} function of {}". + format(keyword, obj.name, obj.owner_name)) + else: + return matrix + + def param_function(owner, function): + sender_len = len(owner.sender.defaults.value) + receiver_len = len(owner.receiver.defaults.variable) + return function(sender_len, receiver_len) + + def _is_identity(self, context=None, defaults=False): + if defaults: + matrix = self.defaults.matrix + else: + matrix = self.parameters.matrix._get(context) + + # if matrix is not an np array with at least one dimension, + # this isn't an identity matrix + try: + size = matrix.shape[0] + except (AttributeError, IndexError): + return False + + # check if the matrix is the same as the identity matrix + # note that we can use the first dimension size to create the identity matrix + # because if the matrix is not square, this comparison will fail anyway + identity_matrix = np.identity(size) + # numpy has deprecated == comparisons of arrays + try: + return np.array_equal(matrix, identity_matrix) + except TypeError: + return matrix == identity_matrix + +# def is_matrix_spec(m): +# if m is None: +# return True +# if isinstance(m, (list, np.ndarray, types.FunctionType)): +# return True +# if m in MATRIX_KEYWORD_VALUES: +# return True +# return False + + + class CombineMeans(CombinationFunction): # ------------------------------------------------------------------------ # FIX: CONFIRM THAT 1D KWEIGHTS USES EACH ELEMENT TO SCALE CORRESPONDING VECTOR IN VARIABLE # FIX CONFIRM THAT LINEAR TRANSFORMATION (OFFSET, SCALE) APPLY TO THE RESULTING ARRAY diff --git a/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py b/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py index 549489c5ce6..fb3003db099 100644 --- a/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py +++ b/psyneulink/core/components/mechanisms/modulatory/control/controlmechanism.py @@ -615,8 +615,8 @@ from psyneulink._typing import Optional, Union, Callable, Literal, Iterable from psyneulink.core.components.functions.nonstateful.transferfunctions import Identity -from psyneulink.core.components.functions.nonstateful.combinationfunctions import Concatenate -from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination +from psyneulink.core.components.functions.nonstateful.transformfunctions import Concatenate +from psyneulink.core.components.functions.nonstateful.transformfunctions import LinearCombination from psyneulink.core.components.mechanisms.mechanism import Mechanism, Mechanism_Base, MechanismError from psyneulink.core.components.mechanisms.modulatory.modulatorymechanism import ModulatoryMechanism_Base from psyneulink.core.components.ports.inputport import InputPort diff --git a/psyneulink/core/components/mechanisms/processing/objectivemechanism.py b/psyneulink/core/components/mechanisms/processing/objectivemechanism.py index 44abba283cf..a253c1df2c0 100644 --- a/psyneulink/core/components/mechanisms/processing/objectivemechanism.py +++ b/psyneulink/core/components/mechanisms/processing/objectivemechanism.py @@ -180,7 +180,7 @@ ` and/or 'exponent ` attributes of the corresponding InputPorts, it can be configured to calculate differences, ratios, etc. (see `example ` below). The `function ` can also -be replaced with any `CombinationFunction `, or any python function that takes an 2d array as +be replaced with any `CombinationFunction `, or any python function that takes an 2d array as its input (with a number of items in axis 0 equal to the number of the ObjectiveMechanism's InputPorts), and generates a 1d array as its result. If it implements :keyword:`weight` and/or :keyword:`exponent` attributes, those are assigned from `weight ` and `exponent ` attributes of its `input_ports @@ -372,7 +372,7 @@ from psyneulink._typing import Optional, Union -from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination +from psyneulink.core.components.functions.nonstateful.transformfunctions import LinearCombination from psyneulink.core.components.mechanisms.mechanism import MechanismError from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism_Base from psyneulink.core.components.ports.inputport import InputPort, INPUT_PORT @@ -465,7 +465,7 @@ class ObjectiveMechanism(ProcessingMechanism_Base): function : CombinationFunction, ObjectiveFunction, function, or method the function used to evaluate the values monitored by the ObjectiveMechanism. The function can be any - `CombinationFunction ` or a Python function that takes a 2d array with an arbitrary + `CombinationFunction ` or a Python function that takes a 2d array with an arbitrary number of items or a number equal to the number of items in the ObjectiveMechanism's variable (i.e., its number of input_ports) and returns a 1d array. diff --git a/psyneulink/core/components/mechanisms/processing/transfermechanism.py b/psyneulink/core/components/mechanisms/processing/transfermechanism.py index 842e72e40c3..32e72ee825c 100644 --- a/psyneulink/core/components/mechanisms/processing/transfermechanism.py +++ b/psyneulink/core/components/mechanisms/processing/transfermechanism.py @@ -828,7 +828,7 @@ from psyneulink._typing import Optional, Union, Literal from psyneulink.core import llvm as pnlvm -from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination, SUM +from psyneulink.core.components.functions.nonstateful.transformfunctions import LinearCombination, SUM from psyneulink.core.components.functions.nonstateful.distributionfunctions import DistributionFunction from psyneulink.core.components.functions.function import Function, is_function_type from psyneulink.core.components.functions.nonstateful.objectivefunctions import Distance diff --git a/psyneulink/core/components/ports/inputport.py b/psyneulink/core/components/ports/inputport.py index f226a1bdfdb..251995cfcbc 100644 --- a/psyneulink/core/components/ports/inputport.py +++ b/psyneulink/core/components/ports/inputport.py @@ -581,7 +581,7 @@ from psyneulink.core.components.component import DefaultsFlexibility from psyneulink.core.components.functions.function import Function -from psyneulink.core.components.functions.nonstateful.combinationfunctions import CombinationFunction, LinearCombination +from psyneulink.core.components.functions.nonstateful.transformfunctions import CombinationFunction, LinearCombination from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.components.ports.port import PortError, Port_Base, _instantiate_port_list, port_type_keywords from psyneulink.core.globals.context import ContextFlags, handle_external_context @@ -727,7 +727,7 @@ class InputPort(Port_Base): expected for any `path_afferent Projections `. function : Function - if it is a `CombinationFunction `, it combines the `values ` of + if it is a `CombinationFunction `, it combines the `values ` of the `PathwayProjections ` (e.g., `MappingProjections `) received by the InputPort (listed in its `path_afferents ` attribute), under the possible influence of `GatingProjections ` received by the InputPort (listed in its `mod_afferents diff --git a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py index 4fab5f23733..7f60c82a093 100644 --- a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py @@ -408,7 +408,7 @@ # SHOULD THEY BE LIMITED TO EVC?? from psyneulink.core import llvm as pnlvm from psyneulink.core.components.functions.function import is_function_type -from psyneulink.core.components.functions.nonstateful.combinationfunctions import Reduce +from psyneulink.core.components.functions.nonstateful.transformfunctions import Reduce from psyneulink.core.components.functions.nonstateful.transferfunctions import Exponential, Linear, CostFunctions, \ TransferWithCosts from psyneulink.core.components.functions.stateful.integratorfunctions import SimpleIntegrator diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index 8e92734b6ba..c9040259df7 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -783,7 +783,7 @@ def test_multiple_modulatory_projections_with_mech_and_port_Name_specs(self): from psyneulink.core.components.component import ComponentError, DefaultsFlexibility, component_keywords from psyneulink.core.components.functions.function import \ Function, get_param_value_for_keyword, is_function_type, RandomMatrix -from psyneulink.core.components.functions.nonstateful.combinationfunctions import CombinationFunction, LinearCombination +from psyneulink.core.components.functions.nonstateful.transformfunctions import CombinationFunction, LinearCombination from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear from psyneulink.core.components.shellclasses import Mechanism, Projection, Port from psyneulink.core.globals.context import ContextFlags, handle_external_context @@ -2011,7 +2011,7 @@ def set_projection_value(projection, value, context): if projection_variable is None: projection_variable = projection.sender.parameters.value._get(context) # KDM 8/14/19: this fallback seems to always happen on the first execution - # of the Projection's function (LinearMatrix). Unsure if this is intended or not + # of the Projection's function (MatrixTransform). Unsure if this is intended or not if projection_variable is None: projection_variable = projection.function.defaults.value projection.parameters.variable._set(projection_variable, context) diff --git a/psyneulink/core/components/projections/modulatory/learningprojection.py b/psyneulink/core/components/projections/modulatory/learningprojection.py index 8381376f12b..f1ed398bcc0 100644 --- a/psyneulink/core/components/projections/modulatory/learningprojection.py +++ b/psyneulink/core/components/projections/modulatory/learningprojection.py @@ -207,7 +207,7 @@ from psyneulink._typing import Callable, Literal, Mapping, Optional, Union from psyneulink.core.components.component import parameter_keywords -from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination +from psyneulink.core.components.functions.nonstateful.transformfunctions import LinearCombination from psyneulink.core.components.functions.nonstateful.learningfunctions import BackPropagation, Reinforcement from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear from psyneulink.core.components.mechanisms.modulatory.learning.learningmechanism import LearningMechanism diff --git a/psyneulink/core/components/projections/pathway/mappingprojection.py b/psyneulink/core/components/projections/pathway/mappingprojection.py index f60514dda42..712a26c1523 100644 --- a/psyneulink/core/components/projections/pathway/mappingprojection.py +++ b/psyneulink/core/components/projections/pathway/mappingprojection.py @@ -33,7 +33,7 @@ A MappingProjection transmits the `value ` of an `OutputPort` of one `ProcessingMechanism ` (its `sender `) to the `InputPort` of another (its `receiver `). The default `function ` for a MappingProjection is -`LinearMatrix`, which uses the MappingProjection's `matrix ` attribute to transform the +`MatrixTransform`, which uses the MappingProjection's `matrix ` attribute to transform the value received from its `sender ` and provide the result to its `receiver `. @@ -256,7 +256,7 @@ This conforms to the general procedures for modulation used by `ModulatoryProjections ` A LearningProjection `modulates ` the `function ` of the *MATRIX* ParameterPort, which is responsible for keeping a record of the value of the MappingProjection's matrix, -and providing it to the MappingProjection's `function ` (usually `LinearMatrix`). By +and providing it to the MappingProjection's `function ` (usually `MatrixTransform`). By default, the function for the *MATRIX* ParameterPort is an `AccumulatorIntegrator`. A LearningProjection modulates it by assigning the value of its `additive_param ` (`increment `), which is added to its `previous_value ` @@ -264,7 +264,7 @@ executes its *MATRIX* ParameterPort, the `weight changes ` conveyed to the MappingProjection from any LearningProjection(s) are added to the record of the matrix kept by the *MATRIX* ParameterPort's `AccumulatorIntegrator` function in its `previous_value ` -attribute. This is then the value of the matrix used by the MappingProjection's `LinearMatrix` function when it is +attribute. This is then the value of the matrix used by the MappingProjection's `MatrixTransform` function when it is executed. It is important to note that the accumulated weight changes received by a MappingProjection from its LearningProjection(s) are stored by the *MATRIX* ParameterPort's function, and not the MappingProjection's `matrix ` parameter itself; the latter stores the original value of the matrix before learning (that @@ -291,7 +291,7 @@ from psyneulink.core.components.component import parameter_keywords from psyneulink.core.components.functions.stateful.integratorfunctions import AccumulatorIntegrator -from psyneulink.core.components.functions.nonstateful.transferfunctions import LinearMatrix +from psyneulink.core.components.functions.nonstateful.transformfunctions import MatrixTransform from psyneulink.core.components.functions.function import get_matrix from psyneulink.core.components.projections.pathway.pathwayprojection import PathwayProjection_Base from psyneulink.core.components.projections.projection import ProjectionError, projection_keywords @@ -416,7 +416,7 @@ class Parameters(PathwayProjection_Base.Parameters): function see `function ` - :default value: `LinearMatrix` + :default value: `MatrixTransform` :type: `Function` matrix @@ -425,7 +425,7 @@ class Parameters(PathwayProjection_Base.Parameters): :default value: `AUTO_ASSIGN_MATRIX` :type: ``str`` """ - function = Parameter(LinearMatrix, stateful=False, loggable=False) + function = Parameter(MatrixTransform, stateful=False, loggable=False) matrix = FunctionParameter( DEFAULT_MATRIX, setter=_mapping_projection_matrix_setter diff --git a/psyneulink/core/components/projections/projection.py b/psyneulink/core/components/projections/projection.py index 6418c6de96a..025eb4ab27b 100644 --- a/psyneulink/core/components/projections/projection.py +++ b/psyneulink/core/components/projections/projection.py @@ -409,7 +409,7 @@ from psyneulink.core.components.component import ComponentError from psyneulink.core.components.functions.function import get_matrix, ValidMatrixSpecType from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism -from psyneulink.core.components.functions.nonstateful.transferfunctions import LinearMatrix +from psyneulink.core.components.functions.nonstateful.transformfunctions import MatrixTransform from psyneulink.core.components.ports.modulatorysignals.modulatorysignal import _is_modulatory_spec from psyneulink.core.components.ports.port import PortError from psyneulink.core.components.shellclasses import Mechanism, Process_Base, Projection, Port @@ -511,7 +511,7 @@ class Projection_Base(Projection): """ Projection_Base( \ sender=None, \ - function=LinearMatrix, \ + function=MatrixTransform, \ receiver=None, \ feedback=None \ ) @@ -546,7 +546,7 @@ class Projection_Base(Projection): the context in which the Projection is used, or its initialization will be `deferred `. - function : TransferFunction : default LinearMatrix + function : TransferFunction : default MatrixTransform specifies function used to convey (and potentially convert) `value ` of `sender ` `Port` to `variable ` of `receiver ` Port. @@ -634,7 +634,7 @@ class Parameters(Projection.Parameters): function see `function ` - :default value: `LinearMatrix` + :default value: `MatrixTransform` :type: `Function` weight @@ -645,7 +645,7 @@ class Parameters(Projection.Parameters): """ weight = Parameter(None, modulable=True) exponent = Parameter(None, modulable=True) - function = Parameter(LinearMatrix, stateful=False, loggable=False) + function = Parameter(MatrixTransform, stateful=False, loggable=False) registry = ProjectionRegistry @@ -845,7 +845,7 @@ def _validate_params(self, request_set, target_set=None, context=None): # if MATRIX in target_set and target_set[MATRIX] is not None: # matrix = target_set[MATRIX] # # If matrix_spec is keyword and sender and receiver have been instantiated, implement matrix - # # so that it can be passed to function (e.g., LinearMatrix) if needed. + # # so that it can be passed to function (e.g., MatrixTransform) if needed. # if not is_matrix(matrix): # raise ProjectionError(f"Matrix ('{matrix}') specified for '{self.name}' is not a legal matrix spec.") # if self.sender_instantiated and self.receiver_instantiated: @@ -1209,7 +1209,7 @@ def as_mdf_model(self, simple_edge_format=True): func_model = [f for f in edge_node.functions if f.id == parse_valid_identifier(f'{edge_node.id}_{edge_function.name}')][0] var_name = _get_variable_parameter_name(edge_function) - # 2d variable on LinearMatrix will be incorrect on import back to psyneulink + # 2d variable on MatrixTransform will be incorrect on import back to psyneulink func_model.metadata[var_name] = func_model.metadata[var_name][-1] pre_edge = mdf.Edge( diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index 2b41a117863..cecbe74c70e 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -2903,7 +2903,7 @@ def input_function(env, result): from psyneulink.core import llvm as pnlvm from psyneulink.core.components.component import Component, ComponentError, ComponentsMeta from psyneulink.core.components.functions.function import is_function_type, Function, RandomMatrix -from psyneulink.core.components.functions.nonstateful.combinationfunctions import \ +from psyneulink.core.components.functions.nonstateful.transformfunctions import \ LinearCombination, PredictionErrorDeltaFunction from psyneulink.core.components.functions.nonstateful.learningfunctions import \ LearningFunction, Reinforcement, BackPropagation, TDLearning diff --git a/psyneulink/core/globals/keywords.py b/psyneulink/core/globals/keywords.py index 6d45edb8487..fd7751974a8 100644 --- a/psyneulink/core/globals/keywords.py +++ b/psyneulink/core/globals/keywords.py @@ -70,13 +70,13 @@ 'INPUT_PORT_VARIABLES', 'INPUTS_DIM', 'INSET', 'CURRENT_VALUE', 'INTEGRATION_TYPE', 'INTEGRATOR_FUNCTION','INTEGRATOR_FUNCTION', 'INTEGRATOR_FUNCTION_TYPE', 'INTEGRATOR_MECHANISM', 'LAST_INTEGRATED_VALUE', 'INTERCEPT', 'INTERNAL', 'INTERNAL_ONLY', - 'K_VALUE', 'KOHONEN_FUNCTION', 'KOHONEN_MECHANISM', 'KOHONEN_LEARNING_MECHANISM', 'KWTA_MECHANISM', + 'K_VALUE', 'KOHONEN_FUNCTION', 'KOHONEN_MECHANISM', 'KOHONEN_LEARNING_MECHANISM', 'KWTA_MECHANISM', 'L0', 'LABELS', 'LCA_MECHANISM', 'LEAKY_COMPETING_INTEGRATOR_FUNCTION', 'LEAK', 'LEARNABLE', 'LEARNED_PROJECTIONS', 'LEARNING', 'LEARNING_FUNCTION', 'LEARNING_FUNCTION_TYPE', 'LEARNING_OBJECTIVE', 'LEARNING_MECHANISM', 'LEARNING_MECHANISMS', 'LEARNING_PATHWAY', 'LEARNING_PROJECTION', 'LEARNING_PROJECTION_PARAMS', 'LEARNING_RATE', 'LEARNING_SCALE', 'LEARNING_SCALE_LITERALS', 'LEARNING_SCALE_NAMES', 'LEARNING_SIGNAL', 'LEARNING_SIGNAL_SPECS', 'LEARNING_SIGNALS', 'LESS_THAN', 'LESS_THAN_OR_EQUAL', 'LINEAR', 'LINEAR_COMBINATION_FUNCTION', 'LINEAR_FUNCTION', - 'LINEAR_MATRIX_FUNCTION', 'LOG_ENTRIES', 'LOGISTIC_FUNCTION', 'Loss', 'LOSSES', 'LOW', 'LVOC_CONTROL_MECHANISM', + 'LINEAR_TRANSFORM_FUNCTION', 'LOG_ENTRIES', 'LOGISTIC_FUNCTION', 'Loss', 'LOSSES', 'LOW', 'LVOC_CONTROL_MECHANISM', 'MAPPING_PROJECTION', 'MAPPING_PROJECTION_PARAMS', 'MASKED_MAPPING_PROJECTION', 'MATRIX', 'MATRIX_KEYWORD_NAMES', 'MATRIX_KEYWORD_SET', 'MATRIX_KEYWORD_VALUES', 'MATRIX_KEYWORDS','MatrixKeywords', 'MATRIX_WEIGHTS', 'MAX_ABS_DIFF', 'MAX_ABS_INDICATOR', 'MAX_ONE_HOT', 'MAX_ABS_ONE_HOT', 'MAX_ABS_VAL', @@ -740,11 +740,12 @@ class Loss(Enum): ARGUMENT_THERAPY_FUNCTION = "Contradiction Function" USER_DEFINED_FUNCTION = "USER DEFINED FUNCTION" -# CombinationFunctions: +# Transformfunctions: REDUCE_FUNCTION = "Reduce Function" CONCATENATE_FUNCTION = "Concatenate Function" REARRANGE_FUNCTION = 'Rearrange Function' LINEAR_COMBINATION_FUNCTION = "LinearCombination Function" +LINEAR_TRANSFORM_FUNCTION = "MatrixTransform Function" COMBINE_MEANS_FUNCTION = "CombineMeans Function" # TransferFunctions: @@ -761,7 +762,6 @@ class Loss(Enum): BINOMIAL_DISTORT_FUNCTION = 'BinomialDistort Function' DROPOUT_FUNCTION = 'Dropout Function' SOFTMAX_FUNCTION = 'SoftMax Function' -LINEAR_MATRIX_FUNCTION = "LinearMatrix Function" TRANSFER_WITH_COSTS_FUNCTION = "TransferWithCosts Function" # SelectionFunctions: diff --git a/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py b/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py index 2ce1e6afeab..8d696aec8ef 100644 --- a/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py +++ b/psyneulink/library/components/mechanisms/modulatory/control/agt/lccontrolmechanism.py @@ -327,11 +327,32 @@ __all__ = [ 'CONTROL_SIGNAL_NAME', 'LCControlMechanism', 'LCControlMechanismError', - 'MODULATED_MECHANISMS', + 'MODULATED_MECHANISMS', 'MODE_FITZHUGHNAGUMO', 'TIME_STEP_SIZE_FITZHUGHNAGUMO', 'T_0_FITZHUGHNAGUMO', + 'TIME_CONSTANT_V_FITZHUGHNAGUMO', 'TIME_CONSTANT_W_FITZHUGHNAGUMO', + 'A_V_FITZHUGHNAGUMO', 'B_V_FITZHUGHNAGUMO', 'C_V_FITZHUGHNAGUMO', 'D_V_FITZHUGHNAGUMO', 'E_V_FITZHUGHNAGUMO', + 'F_V_FITZHUGHNAGUMO', 'A_W_FITZHUGHNAGUMO', 'B_W_FITZHUGHNAGUMO', 'C_W_FITZHUGHNAGUMO', + 'THRESHOLD_FITZHUGHNAGUMO', 'MODE_FITZHUGHNAGUMO', 'UNCORRELATED_ACTIVITY_FITZHUGHNAGUMO' ] MODULATED_MECHANISMS = 'modulated_mechanisms' CONTROL_SIGNAL_NAME = 'LCControlMechanism_ControlSignal' +MODE_FITZHUGHNAGUMO = 'mode_FitzHughNagumo' +TIME_STEP_SIZE_FITZHUGHNAGUMO = 'time_step_size_FitzHughNagumo' +T_0_FITZHUGHNAGUMO = 't_0_FitzHughNagumo' +A_V_FITZHUGHNAGUMO = 'a_v_FitzHughNagumo' +B_V_FITZHUGHNAGUMO = 'b_v_FitzHughNagumo' +C_V_FITZHUGHNAGUMO = 'c_v_FitzHughNagumo' +D_V_FITZHUGHNAGUMO = 'd_v_FitzHughNagumo' +E_V_FITZHUGHNAGUMO = 'e_v_FitzHughNagumo' +F_V_FITZHUGHNAGUMO = 'f_v_FitzHughNagumo' +TIME_CONSTANT_V_FITZHUGHNAGUMO = 'time_constant_v_FitzHughNagumo' +A_W_FITZHUGHNAGUMO = 'a_w_FitzHughNagumo' +B_W_FITZHUGHNAGUMO = 'b_w_FitzHughNagumo' +C_W_FITZHUGHNAGUMO = 'c_w_FitzHughNagumo' +THRESHOLD_FITZHUGHNAGUMO = 'threshold_FitzHughNagumo' +UNCORRELATED_ACTIVITY_FITZHUGHNAGUMO = 'uncorrelated_activity_FitzHughNagumo' +TIME_CONSTANT_W_FITZHUGHNAGUMO = 'time_constant_w_FitzHughNagumo' + class LCControlMechanismError(ControlMechanismError): pass diff --git a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py index 86358052b65..f61442192de 100644 --- a/psyneulink/library/components/mechanisms/processing/integrator/ddm.py +++ b/psyneulink/library/components/mechanisms/processing/integrator/ddm.py @@ -373,7 +373,7 @@ DriftDiffusionIntegrator, IntegratorFunction from psyneulink.core.components.functions.nonstateful.distributionfunctions import STARTING_VALUE, \ DriftDiffusionAnalytical -from psyneulink.core.components.functions.nonstateful.combinationfunctions import Reduce +from psyneulink.core.components.functions.nonstateful.transformfunctions import Reduce from psyneulink.core.components.mechanisms.modulatory.control.controlmechanism import _is_control_spec from psyneulink.core.components.mechanisms.mechanism import MechanismError from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism diff --git a/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py b/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py index bc212dfa277..8583222f154 100644 --- a/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py +++ b/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py @@ -146,7 +146,7 @@ from psyneulink._typing import Optional, Union -from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination +from psyneulink.core.components.functions.nonstateful.transformfunctions import LinearCombination from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base, MechanismError from psyneulink.core.components.mechanisms.processing.objectivemechanism import ObjectiveMechanism from psyneulink.core.components.shellclasses import Mechanism @@ -220,7 +220,7 @@ class ComparatorMechanism(ObjectiveMechanism): (see `ComparatorMechanism_Structure` for additional details). function : CombinationFunction, function or method - used to compare the `sample` with the `target`. It can be any `CombinationFunction `, + used to compare the `sample` with the `target`. It can be any `CombinationFunction `, or a python function that takes a 2d array with two items and returns a 1d array of the same length as the two input items. diff --git a/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py b/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py index c8b85868739..88b82ca8e03 100644 --- a/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py +++ b/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py @@ -170,7 +170,7 @@ from psyneulink._typing import Optional, Union -from psyneulink.core.components.functions.nonstateful.combinationfunctions import PredictionErrorDeltaFunction +from psyneulink.core.components.functions.nonstateful.transformfunctions import PredictionErrorDeltaFunction from psyneulink.core.components.mechanisms.mechanism import Mechanism_Base from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.globals.keywords import PREDICTION_ERROR_MECHANISM, SAMPLE, TARGET diff --git a/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py index 022b7a3dd86..afef287e087 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/lcamechanism.py @@ -6,7 +6,7 @@ # See the License for the specific language governing permissions and limitations under the License. # NOTES: -# * NOW THAT NOISE AND BETA ARE PROPRETIES THAT DIRECTLY REFERERNCE integrator_function, +# * NOW THAT NOISE AND BETA ARE PROPERTIES THAT DIRECTLY REFERERNCE integrator_function, # SHOULD THEY NOW BE VALIDATED ONLY THERE (AND NOT IN TransferMechanism)?? # * ARE THOSE THE ONLY TWO integrator PARAMS THAT SHOULD BE PROPERTIES?? diff --git a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py index 6e173e83aec..ff58048f456 100644 --- a/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py +++ b/psyneulink/library/components/mechanisms/processing/transfer/recurrenttransfermechanism.py @@ -193,7 +193,7 @@ from psyneulink.core import llvm as pnlvm from psyneulink.core.components.component import _get_parametervalue_attr -from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination +from psyneulink.core.components.functions.nonstateful.transformfunctions import LinearCombination from psyneulink.core.components.functions.function import Function, get_matrix from psyneulink.core.components.functions.nonstateful.learningfunctions import Hebbian from psyneulink.core.components.functions.nonstateful.objectivefunctions import Stability @@ -239,8 +239,8 @@ # Used to specify learning_condition UPDATE = 'UPDATE' CONVERGENCE = 'CONVERGENCE' -ENERGY_OUTPUT_PORT_NAME='ENERGY' -ENTROPY_OUTPUT_PORT_NAME='ENTROPY' +ENERGY_OUTPUT_PORT_NAME=ENERGY +ENTROPY_OUTPUT_PORT_NAME=ENTROPY diff --git a/psyneulink/library/components/projections/pathway/autoassociativeprojection.py b/psyneulink/library/components/projections/pathway/autoassociativeprojection.py index dbf0b5ef076..0b94fe3a1f3 100644 --- a/psyneulink/library/components/projections/pathway/autoassociativeprojection.py +++ b/psyneulink/library/components/projections/pathway/autoassociativeprojection.py @@ -64,7 +64,7 @@ ~~~~~~~~~~~~~~~~~~~~~~~~~ Due to its specialized nature, most parameters of the AutoAssociativeProjection are not configurable: the `variable` is -determined by the format of the output of the RecurrentTransferMechanism, the `function` is always LinearMatrix, and so +determined by the format of the output of the RecurrentTransferMechanism, the `function` is always MatrixTransform, and so on. The only configurable parameter is the matrix, configured through the **matrix**, **auto**, and/or **hetero** arguments for a RecurrentTransferMechanism: @@ -105,7 +105,7 @@ from psyneulink._typing import Optional from psyneulink.core.components.component import parameter_keywords -from psyneulink.core.components.functions.nonstateful.transferfunctions import LinearMatrix +from psyneulink.core.components.functions.nonstateful.transformfunctions import MatrixTransform from psyneulink.core.components.functions.function import get_matrix from psyneulink.core.components.projections.pathway.mappingprojection import MappingError, MappingProjection from psyneulink.library.components.projections.pathway.maskedmappingprojection import MaskedMappingProjection @@ -212,7 +212,7 @@ class Parameters(MappingProjection.Parameters): function see `function ` - :default value: `LinearMatrix` + :default value: `MatrixTransform` :type: `Function` hetero @@ -228,8 +228,8 @@ class Parameters(MappingProjection.Parameters): :type: ``str`` """ variable = Parameter(np.array([[0]]), read_only=True, pnl_internal=True, constructor_argument='default_variable') - # function is always LinearMatrix that requires 1D input - function = Parameter(LinearMatrix, stateful=False, loggable=False) + # function is always MatrixTransform that requires 1D input + function = Parameter(MatrixTransform, stateful=False, loggable=False) auto = SharedParameter(1, attribute_name=OWNER_MECH) hetero = SharedParameter(0, attribute_name=OWNER_MECH) @@ -271,7 +271,7 @@ def __init__(self, # temporary override to make sure matrix/auto/hetero parameters # get passed properly. should be replaced with a better organization # of auto/hetero, in which the base parameters are stored either on - # AutoAssociativeProjection or on LinearMatrix itself + # AutoAssociativeProjection or on MatrixTransform itself def _instantiate_parameter_classes(self, context): if FUNCTION not in self.initial_shared_parameters: try: diff --git a/psyneulink/library/components/projections/pathway/maskedmappingprojection.py b/psyneulink/library/components/projections/pathway/maskedmappingprojection.py index 922df7338c8..11991b8190a 100644 --- a/psyneulink/library/components/projections/pathway/maskedmappingprojection.py +++ b/psyneulink/library/components/projections/pathway/maskedmappingprojection.py @@ -158,7 +158,7 @@ class Parameters(MappingProjection.Parameters): :default value: `MULTIPLY` :type: ``str`` """ - variable = np.array([[0]]) # function is always LinearMatrix that requires 1D input + variable = np.array([[0]]) # function is always MatrixTransform that requires 1D input mask = None mask_operation = MULTIPLY diff --git a/psyneulink/library/compositions/emcomposition.py b/psyneulink/library/compositions/emcomposition.py index a8bbe69a235..46acce0308d 100644 --- a/psyneulink/library/compositions/emcomposition.py +++ b/psyneulink/library/compositions/emcomposition.py @@ -88,14 +88,14 @@ # - COMPILATION: # - Remove CIM projections on import to another composition # - Autodiff support for IdentityFunction -# - LinearMatrix to add normalization +# - MatrixTransform to add normalization # - _store() method to assign weights to memory # - LLVM problem with ComparatorMechanism # # - pytorchcreator_function: # SoftMax implementation: torch.nn.Softmax(dim=0) is not getting passed correctly # Implement LinearCombination -# - LinearMatrix Function: +# - MatrixTransform Function: # # - LEARNING - Backpropagation LearningFunction / LearningMechanism # - DOCUMENTATION: @@ -1027,8 +1027,9 @@ import psyneulink.core.scheduling.condition as conditions from psyneulink._typing import Optional, Union -from psyneulink.core.components.functions.nonstateful.transferfunctions import SoftMax, LinearMatrix -from psyneulink.core.components.functions.nonstateful.combinationfunctions import Concatenate, LinearCombination +from psyneulink.core.components.functions.nonstateful.transferfunctions import SoftMax +from psyneulink.core.components.functions.nonstateful.transformfunctions import ( + Concatenate, LinearCombination, MatrixTransform) from psyneulink.core.components.functions.function import DEFAULT_SEED, _random_state_getter, _seed_setter from psyneulink.core.compositions.composition import CompositionError, NodeRole from psyneulink.library.compositions.autodiffcomposition import AutodiffComposition, torch_available @@ -1040,9 +1041,9 @@ from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.context import handle_external_context from psyneulink.core.globals.keywords import \ - (ADAPTIVE, ALL, ARG_MAX, ARG_MAX_INDICATOR, AUTO, CONTEXT, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, - EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, GAIN, IDENTITY_MATRIX, MULTIPLICATIVE_PARAM, NAME, - PARAMS, PROB_INDICATOR, PRODUCT, PROJECTIONS, RANDOM, INPUT_SHAPES, VARIABLE, Loss) + (ADAPTIVE, ALL, ARG_MAX, ARG_MAX_INDICATOR, AUTO, CONTEXT, CONTROL, DEFAULT_INPUT, DEFAULT_VARIABLE, DOT_PRODUCT, + EM_COMPOSITION, FULL_CONNECTIVITY_MATRIX, GAIN, IDENTITY_MATRIX, INPUT_SHAPES, L0, + MULTIPLICATIVE_PARAM, NAME, PARAMS, PROB_INDICATOR, PRODUCT, PROJECTIONS, RANDOM, VARIABLE) from psyneulink.core.globals.utilities import convert_all_elements_to_np_array, is_numeric_scalar from psyneulink.core.globals.registry import name_without_suffix from psyneulink.core.llvm import ExecutionMode @@ -1621,8 +1622,7 @@ def __init__(self, self._validate_memory_specs(memory_template, memory_capacity, memory_fill, field_weights, field_names, name) memory_template, memory_capacity = self._parse_memory_template(memory_template, memory_capacity, - memory_fill, - field_weights) + memory_fill) field_weights, field_names, concatenate_queries = self._parse_fields(field_weights, normalize_field_weights, field_names, @@ -1810,7 +1810,7 @@ def _validate_memory_specs(self, memory_template, memory_capacity, memory_fill, f"in the 'field_names' arg for {name} must match " f"the number of fields ({field_weights_len}).") - def _parse_memory_template(self, memory_template, memory_capacity, memory_fill, field_weights)->(np.ndarray,int): + def _parse_memory_template(self, memory_template, memory_capacity, memory_fill)->(np.ndarray,int): """Construct memory from memory_template and memory_fill Assign self.memory_template and self.entry_template attributes """ @@ -2145,9 +2145,6 @@ def _construct_query_input_nodes(self, field_weights)->list: f"PROGRAM ERROR: number of keys ({self.num_keys}) does not match number of " \ f"non-zero values in field_weights ({len(self.key_indices)})." - # query_input_nodes = [ProcessingMechanism(input_shapes=len(self.entry_template[self.key_indices[i]]), - # name=f'{self.key_names[self.key_indices[i]]} [QUERY]') - # for i in range(self.num_keys)] query_input_nodes = [ProcessingMechanism( input_shapes=len(self.entry_template[self.key_indices[i]]), name=f'{self.key_names[i]} [QUERY]') @@ -2202,6 +2199,9 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_q from each query_input_node[i] to each match_node[i]. - Each element of the output represents the similarity between the query_input and one key in memory. """ + OPERATION = 0 + NORMALIZE = 1 + args = [(L0,False) if len(key) == 1 else (DOT_PRODUCT,normalize_memories) for key in memory_template[0]] if concatenate_queries: # Get fields of memory structure corresponding to the keys @@ -2216,8 +2216,9 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_q INPUT_SHAPES: memory_capacity, PROJECTIONS: MappingProjection(sender=self.concatenate_queries_node, matrix=matrix, - function=LinearMatrix( - normalize=normalize_memories), + function=MatrixTransform( + operation=args[0][OPERATION], + normalize=args[0][NORMALIZE]), name=f'MEMORY')}, name='MATCH')] @@ -2230,12 +2231,14 @@ def _construct_match_nodes(self, memory_template, memory_capacity, concatenate_q PROJECTIONS: MappingProjection(sender=self.query_input_nodes[i].output_port, matrix = np.array( memory_template[:,i].tolist()).transpose().astype(float), - function=LinearMatrix(normalize=normalize_memories), + function=MatrixTransform(operation=args[i][OPERATION], + normalize=args[i][NORMALIZE]), name=f'MEMORY for {self.key_names[i]} [KEY]')}, name=self.key_names[i] + MATCH_TO_KEYS_AFFIX) for i in range(self.num_keys) ] + return match_nodes # FIX: CONVERT TO _construct_weight_control_nodes diff --git a/psyneulink/library/compositions/pytorchwrappers.py b/psyneulink/library/compositions/pytorchwrappers.py index cdf13733d80..b9ecbeed8b4 100644 --- a/psyneulink/library/compositions/pytorchwrappers.py +++ b/psyneulink/library/compositions/pytorchwrappers.py @@ -17,7 +17,7 @@ from enum import Enum, auto -from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination, PRODUCT, SUM +from psyneulink.core.components.functions.nonstateful.transformfunctions import LinearCombination, PRODUCT, SUM from psyneulink.core.components.functions.stateful.integratorfunctions import IntegratorFunction from psyneulink.core.components.functions.stateful import StatefulFunction from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism @@ -1015,7 +1015,7 @@ def execute_function(function, variable, fct_has_mult_args=False, is_combination self.input = variable # Compute main function of mechanism and return result - from psyneulink.core.components.functions.nonstateful.combinationfunctions import CombinationFunction + from psyneulink.core.components.functions.nonstateful.transformfunctions import CombinationFunction self.output = execute_function(self.function, variable, is_combination_fct=isinstance(self._mechanism.function, CombinationFunction)) return self.output diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 04a6dfadee2..737496110fb 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -8,7 +8,7 @@ import pytest import psyneulink as pnl -from psyneulink.core.components.functions.nonstateful.combinationfunctions import LinearCombination +from psyneulink.core.components.functions.nonstateful.transformfunctions import LinearCombination from psyneulink.core.components.functions.nonstateful.learningfunctions import Reinforcement, BackPropagation, TDLearning from psyneulink.core.components.functions.nonstateful.optimizationfunctions import GridSearch from psyneulink.core.components.functions.nonstateful.transferfunctions import \ diff --git a/tests/functions/test_combination.py b/tests/functions/test_combination.py index c3e72a648a2..ebfbb98f7c0 100644 --- a/tests/functions/test_combination.py +++ b/tests/functions/test_combination.py @@ -63,8 +63,8 @@ def test_arrangement_has_non_numeric_index(self): # @pytest.mark.function # @pytest.mark.combination_function # def test_column_vector(self): - # R_function = pnl.core.components.functions.combinationfunctions.Reduce(operation=pnl.SUM) - # R_mechanism = pnl.ProcessingMechanism(function=pnl.core.components.functions.combinationfunctions.Reduce(operation=pnl.SUM), + # R_function = pnl.core.components.functions.transformfunctions.Reduce(operation=pnl.SUM) + # R_mechanism = pnl.ProcessingMechanism(function=pnl.core.components.functions.transformfunctions.Reduce(operation=pnl.SUM), # default_variable=[[1], [2], [3], [4], [5]], # name="R_mechanism") # @@ -78,8 +78,8 @@ def test_arrangement_has_non_numeric_index(self): # @pytest.mark.function # @pytest.mark.combination_function # def test_matrix(self): - # R_function = pnl.core.components.functions.combinationfunctions.Reduce(operation=pnl.SUM) - # R_mechanism = pnl.ProcessingMechanism(function=pnl.core.components.functions.combinationfunctions.Reduce(operation=pnl.SUM), + # R_function = pnl.core.components.functions.transformfunctions.Reduce(operation=pnl.SUM) + # R_mechanism = pnl.ProcessingMechanism(function=pnl.core.components.functions.transformfunctions.Reduce(operation=pnl.SUM), # default_variable=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], # name="R_mechanism") # diff --git a/tests/functions/test_transfer.py b/tests/functions/test_transfer.py index fd7b8bce527..a9667ef0836 100644 --- a/tests/functions/test_transfer.py +++ b/tests/functions/test_transfer.py @@ -106,9 +106,9 @@ def binomial_distort_helper(seed): np.where(softmax_helper2 == np.max(softmax_helper2), 1, 0), id="SOFT_MAX MAX_INDICATOR PER_ITEM"), # Linear Matrix - pytest.param(pnl.LinearMatrix, test_var, {kw.MATRIX:test_matrix}, np.dot(test_var, test_matrix), id="LINEAR_MATRIX SQUARE"), - pytest.param(pnl.LinearMatrix, test_var, {kw.MATRIX:test_matrix_l}, np.dot(test_var, test_matrix_l), id="LINEAR_MATRIX WIDE"), - pytest.param(pnl.LinearMatrix, test_var, {kw.MATRIX:test_matrix_s}, np.dot(test_var, test_matrix_s), id="LINEAR_MATRIX TALL"), + pytest.param(pnl.MatrixTransform, test_var, {kw.MATRIX:test_matrix}, np.dot(test_var, test_matrix), id="LINEAR_MATRIX SQUARE"), + pytest.param(pnl.MatrixTransform, test_var, {kw.MATRIX:test_matrix_l}, np.dot(test_var, test_matrix_l), id="LINEAR_MATRIX WIDE"), + pytest.param(pnl.MatrixTransform, test_var, {kw.MATRIX:test_matrix_s}, np.dot(test_var, test_matrix_s), id="LINEAR_MATRIX TALL"), # Dropout is just identity in non-learning mode pytest.param(pnl.Dropout, test_var, {}, test_var, id="DROPOUT"), diff --git a/tests/log/test_log.py b/tests/log/test_log.py index 30375fe9dfe..686c73e6d47 100644 --- a/tests/log/test_log.py +++ b/tests/log/test_log.py @@ -96,6 +96,7 @@ def test_log(self): 'func_max_executions_before_finished': 'OFF', 'func_normalize': 'OFF', 'func_num_executions_before_finished': 'OFF', + 'func_operation': 'OFF', 'func_value': 'OFF', 'has_initializers': 'OFF', 'func_variable': 'OFF', @@ -192,6 +193,7 @@ def test_log(self): 'func_max_executions_before_finished': 'OFF', 'func_normalize': 'OFF', 'func_num_executions_before_finished': 'OFF', + 'func_operation': 'OFF', 'func_value': 'OFF', 'func_variable': 'OFF', 'has_initializers': 'OFF', @@ -348,6 +350,7 @@ def test_log_dictionary_without_time(self): 'func_max_executions_before_finished': 'OFF', 'func_normalize': 'OFF', 'func_num_executions_before_finished': 'OFF', + 'func_operation': 'OFF', 'func_value': 'OFF', 'func_variable': 'OFF', 'has_initializers': 'OFF', @@ -448,6 +451,7 @@ def test_log_dictionary_without_time(self): 'func_max_executions_before_finished': 'OFF', 'func_normalize': 'OFF', 'func_num_executions_before_finished': 'OFF', + 'func_operation': 'OFF', 'func_value': 'OFF', 'func_variable': 'OFF', 'has_initializers': 'OFF', diff --git a/tests/mechanisms/test_input_port_spec.py b/tests/mechanisms/test_input_port_spec.py index 482deefa83e..c4bf7604d65 100644 --- a/tests/mechanisms/test_input_port_spec.py +++ b/tests/mechanisms/test_input_port_spec.py @@ -3,7 +3,7 @@ import pytest import re -from psyneulink.core.components.functions.nonstateful.combinationfunctions import Reduce +from psyneulink.core.components.functions.nonstateful.transformfunctions import Reduce from psyneulink.core.components.mechanisms.modulatory.control.gating.gatingmechanism import GatingMechanism from psyneulink.core.components.mechanisms.mechanism import MechanismError from psyneulink.core.components.mechanisms.processing.transfermechanism import TransferMechanism diff --git a/tests/mechanisms/test_processing_mechanism.py b/tests/mechanisms/test_processing_mechanism.py index 192c2e18eb5..c15489dbe2c 100644 --- a/tests/mechanisms/test_processing_mechanism.py +++ b/tests/mechanisms/test_processing_mechanism.py @@ -9,8 +9,9 @@ from psyneulink.core.components.functions.stateful.integratorfunctions import SimpleIntegrator, \ AdaptiveIntegrator, DriftDiffusionIntegrator, OrnsteinUhlenbeckIntegrator, FitzHughNagumoIntegrator, \ AccumulatorIntegrator, DualAdaptiveIntegrator -from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear, Exponential, Logistic, SoftMax, LinearMatrix -from psyneulink.core.components.functions.nonstateful.combinationfunctions import Reduce, LinearCombination, CombineMeans +from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear, Exponential, Logistic, SoftMax +from psyneulink.core.components.functions.nonstateful.transformfunctions import \ + CombineMeans, LinearCombination, MatrixTransform, Reduce from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.globals.keywords import \ @@ -206,23 +207,23 @@ def test_processing_mechanism_multiple_input_ports(self): assert mech.output_values == [[1],[2]] # Note: this is list of values of its OutputPorts -class TestLinearMatrixFunction: +class TestMatrixTransformFunction: def test_valid_matrix_specs(self): # Note: default matrix specification is None - PM_default = ProcessingMechanism(function=LinearMatrix()) + PM_default = ProcessingMechanism(function=MatrixTransform()) PM_default.execute(1.0) np.testing.assert_allclose(PM_default.value, 1.0) - PM_default_len_2_var = ProcessingMechanism(function=LinearMatrix(default_variable=[[0.0, 0.0]]), + PM_default_len_2_var = ProcessingMechanism(function=MatrixTransform(default_variable=[[0.0, 0.0]]), default_variable=[[0.0, 0.0]]) PM_default_len_2_var.execute([[1.0, 2.0]]) np.testing.assert_allclose(PM_default_len_2_var.value, [[1.0, 2.0]]) - PM_default_2d_var = ProcessingMechanism(function=LinearMatrix(default_variable=[[0.0, 0.0], + PM_default_2d_var = ProcessingMechanism(function=MatrixTransform(default_variable=[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]), default_variable=[[0.0, 0.0], @@ -240,17 +241,17 @@ def test_valid_matrix_specs(self): [0.0, 2.0], [3.0, 0.0]]) - # PM_float = ProcessingMechanism(function=LinearMatrix(matrix=4.0)) + # PM_float = ProcessingMechanism(function=MatrixTransform(matrix=4.0)) # PM_float.execute(1.0) # # np.testing.assert_allclose(PM_float.value, 4.0) - PM_1d_list = ProcessingMechanism(function=LinearMatrix(matrix=[4.0])) + PM_1d_list = ProcessingMechanism(function=MatrixTransform(matrix=[4.0])) PM_1d_list.execute(1.0) np.testing.assert_allclose(PM_1d_list.value, 4.0) - PM_2d_list = ProcessingMechanism(function=LinearMatrix(matrix=[[4.0, 5.0], + PM_2d_list = ProcessingMechanism(function=MatrixTransform(matrix=[[4.0, 5.0], [6.0, 7.0], [8.0, 9.0], [10.0, 11.0]], @@ -268,17 +269,17 @@ def test_valid_matrix_specs(self): [8.0, 9.0], [10.0, 11.0]]) - PM_1d_array = ProcessingMechanism(function=LinearMatrix(matrix=np.array([4.0]))) + PM_1d_array = ProcessingMechanism(function=MatrixTransform(matrix=np.array([4.0]))) PM_1d_array.execute(1.0) np.testing.assert_allclose(PM_1d_array.value, 4.0) - PM_2d_array = ProcessingMechanism(function=LinearMatrix(matrix=np.array([[4.0]]))) + PM_2d_array = ProcessingMechanism(function=MatrixTransform(matrix=np.array([[4.0]]))) PM_2d_array.execute(1.0) np.testing.assert_allclose(PM_2d_array.value, 4.0) - PM_matrix = ProcessingMechanism(function=LinearMatrix(matrix=np.array([[4.0]]))) + PM_matrix = ProcessingMechanism(function=MatrixTransform(matrix=np.array([[4.0]]))) PM_matrix.execute(1.0) np.testing.assert_allclose(PM_matrix.value, 4.0) @@ -286,7 +287,7 @@ def test_valid_matrix_specs(self): def test_invalid_matrix_specs(self): with pytest.raises(FunctionError) as error_text: - PM_mismatched_float = ProcessingMechanism(function=LinearMatrix(default_variable=0.0, + PM_mismatched_float = ProcessingMechanism(function=MatrixTransform(default_variable=0.0, matrix=[[1.0, 0.0, 0.0, 0.0], [0.0, 2.0, 0.0, 0.0], [0.0, 0.0, 3.0, 0.0], @@ -296,7 +297,7 @@ def test_invalid_matrix_specs(self): "not compatible for multiplication" in str(error_text.value) with pytest.raises(FunctionError) as error_text: - PM_mismatched_matrix = ProcessingMechanism(function=LinearMatrix(default_variable=[[0.0, 0.0], + PM_mismatched_matrix = ProcessingMechanism(function=MatrixTransform(default_variable=[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], matrix=[[1.0, 0.0, 0.0, 0.0], diff --git a/tests/mechanisms/test_recurrent_transfer_mechanism.py b/tests/mechanisms/test_recurrent_transfer_mechanism.py index 7b7ad71d6a3..3ec955f88ad 100644 --- a/tests/mechanisms/test_recurrent_transfer_mechanism.py +++ b/tests/mechanisms/test_recurrent_transfer_mechanism.py @@ -4,7 +4,7 @@ import psyneulink as pnl from psyneulink.core.compositions.composition import Composition -from psyneulink.core.components.functions.nonstateful.combinationfunctions import Reduce +from psyneulink.core.components.functions.nonstateful.transformfunctions import Reduce from psyneulink.core.components.functions.nonstateful.distributionfunctions import NormalDist from psyneulink.core.components.functions.function import FunctionError, get_matrix from psyneulink.core.components.functions.nonstateful.learningfunctions import Reinforcement diff --git a/tests/mechanisms/test_transfer_mechanism.py b/tests/mechanisms/test_transfer_mechanism.py index 49482129b28..84f73a09d4a 100644 --- a/tests/mechanisms/test_transfer_mechanism.py +++ b/tests/mechanisms/test_transfer_mechanism.py @@ -6,7 +6,7 @@ from psyneulink.core.components.functions.nonstateful.learningfunctions import Reinforcement from psyneulink.core.components.functions.stateful.integratorfunctions import AccumulatorIntegrator, AdaptiveIntegrator from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear, Exponential, Logistic, ReLU, SoftMax -from psyneulink.core.components.functions.nonstateful.combinationfunctions import Reduce +from psyneulink.core.components.functions.nonstateful.transformfunctions import Reduce from psyneulink.core.components.functions.userdefinedfunction import UserDefinedFunction from psyneulink.core.components.functions.nonstateful.distributionfunctions import NormalDist, UniformToNormalDist, \ ExponentialDist, \ diff --git a/tests/ports/test_input_ports.py b/tests/ports/test_input_ports.py index fdf5773b40e..73a1df9f67d 100644 --- a/tests/ports/test_input_ports.py +++ b/tests/ports/test_input_ports.py @@ -2,7 +2,7 @@ import pytest import psyneulink as pnl -import psyneulink.core.components.functions.nonstateful.combinationfunctions +import psyneulink.core.components.functions.nonstateful.transformfunctions import psyneulink.core.components.functions.nonstateful.transferfunctions @@ -25,7 +25,7 @@ def test_combine_param_redundant_fct_class_spec(self): t2 = pnl.TransferMechanism(input_shapes=2) t3 = pnl.TransferMechanism( input_shapes=2, - input_ports=pnl.InputPort(function=psyneulink.core.components.functions.nonstateful.combinationfunctions + input_ports=pnl.InputPort(function=psyneulink.core.components.functions.nonstateful.transformfunctions .LinearCombination, combine=pnl.PRODUCT)) c = pnl.Composition(pathways=[[t1, t3],[t2, t3]]) @@ -38,7 +38,7 @@ def test_combine_param_redundant_fct_constructor_spec(self): t2 = pnl.TransferMechanism(input_shapes=2) t3 = pnl.TransferMechanism( input_shapes=2, - input_ports=pnl.InputPort(function=psyneulink.core.components.functions.nonstateful.combinationfunctions.LinearCombination(operation=pnl.PRODUCT), + input_ports=pnl.InputPort(function=psyneulink.core.components.functions.nonstateful.transformfunctions.LinearCombination(operation=pnl.PRODUCT), combine=pnl.PRODUCT)) c = pnl.Composition(pathways=[[t1, t3],[t2, t3]]) input_dict = {t1:[1,2],t2:[3,4]} @@ -47,7 +47,7 @@ def test_combine_param_redundant_fct_constructor_spec(self): def test_combine_param_conflicting_fct_operation_spec(self): with pytest.raises(pnl.InputPortError) as error_text: - t = pnl.TransferMechanism(input_ports=pnl.InputPort(function=psyneulink.core.components.functions.nonstateful.combinationfunctions.LinearCombination(operation=pnl.SUM), + t = pnl.TransferMechanism(input_ports=pnl.InputPort(function=psyneulink.core.components.functions.nonstateful.transformfunctions.LinearCombination(operation=pnl.SUM), combine=pnl.PRODUCT)) assert "Specification of 'combine' argument (PRODUCT) conflicts with specification of 'operation' (SUM) " \ "for LinearCombination in 'function' argument for InputPort" in str(error_text.value) From 8059f06a71b5d1cc73dd347db1e04290fd79ae05 Mon Sep 17 00:00:00 2001 From: jdcpni Date: Wed, 6 Nov 2024 10:51:24 -0500 Subject: [PATCH 400/410] Refactor/onehot options (#3102) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • selectionfunctions.py OneHot: refactor to use options for direction, abs_val, indicator and tie --- .../EGO/Using EMComposition/ScriptControl.py | 4 +- psyneulink/core/components/component.py | 2 + .../nonstateful/selectionfunctions.py | 510 +++++++++++++----- psyneulink/core/globals/keywords.py | 21 +- .../library/compositions/compiledoptimizer.py | 2 +- 5 files changed, 382 insertions(+), 157 deletions(-) diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py index 78a9bf96f1b..462b438ae02 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/ScriptControl.py @@ -2,8 +2,8 @@ # Settings for running script: -# MODEL_PARAMS = 'TestParams' -MODEL_PARAMS = 'DeclanParams' +MODEL_PARAMS = 'TestParams' +# MODEL_PARAMS = 'DeclanParams' CONSTRUCT_MODEL = True # THIS MUST BE SET TO True to run the script DISPLAY_MODEL = ( # Only one of the following can be uncommented: diff --git a/psyneulink/core/components/component.py b/psyneulink/core/components/component.py index 1ca300b305e..fa00fbf1be4 100644 --- a/psyneulink/core/components/component.py +++ b/psyneulink/core/components/component.py @@ -1505,6 +1505,8 @@ def _get_compilation_params(self): "retain_torch_trained_outputs", "retain_torch_targets", "retain_torch_losses" "torch_trained_outputs", "torch_targets", "torch_losses", # should be added to relevant _gen_llvm_function... when aug: + # OneHot: + 'abs_val', 'indicator', # SoftMax: 'mask_threshold', 'adapt_scale', 'adapt_base', 'adapt_entropy_weighting', # LCAMechanism diff --git a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py index 50d10e80ab7..fb73085de62 100644 --- a/psyneulink/core/components/functions/nonstateful/selectionfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/selectionfunctions.py @@ -23,6 +23,8 @@ __all__ = ['SelectionFunction', 'OneHot', 'max_vs_avg', 'max_vs_next'] +import warnings + import numpy as np from beartype import beartype @@ -35,22 +37,24 @@ _random_state_getter, _seed_setter, ) from psyneulink.core.globals.keywords import \ - (ARG_MAX, ARG_MAX_ABS, ARG_MAX_ABS_INDICATOR, ARG_MAX_INDICATOR, + (ALL, ARG_MAX, ARG_MAX_ABS, ARG_MAX_ABS_INDICATOR, ARG_MAX_INDICATOR, ARG_MIN, ARG_MIN_ABS, ARG_MIN_ABS_INDICATOR, ARG_MIN_INDICATOR, - MAX_ABS_INDICATOR, MAX_ABS_VAL, MAX_INDICATOR, MAX_VAL, - MIN_ABS_INDICATOR, MIN_ABS_VAL, MIN_INDICATOR, MIN_VAL, + DETERMINISTIC, FIRST, LAST, + MAX, MAX_ABS_INDICATOR, MAX_ABS_VAL, MAX_INDICATOR, MAX_VAL, + MIN, MIN_ABS_INDICATOR, MIN_ABS_VAL, MIN_INDICATOR, MIN_VAL, MODE, ONE_HOT_FUNCTION, PREFERENCE_SET_NAME, PROB, PROB_INDICATOR, - SELECTION_FUNCTION_TYPE) + RANDOM, SELECTION_FUNCTION_TYPE) from psyneulink.core.globals.parameters import Parameter, check_user_specified from psyneulink.core.globals.preferences.basepreferenceset import \ REPORT_OUTPUT_PREF, PreferenceEntry, PreferenceLevel, ValidPrefSet -options = [ ARG_MAX, ARG_MAX_ABS, ARG_MAX_INDICATOR, ARG_MAX_ABS_INDICATOR, - MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, - ARG_MIN, ARG_MIN_ABS, ARG_MIN_INDICATOR, ARG_MIN_ABS_INDICATOR, - MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, MIN_ABS_INDICATOR, - PROB, PROB_INDICATOR] +mode_options = [DETERMINISTIC, PROB, PROB_INDICATOR, + ARG_MAX, ARG_MAX_ABS, ARG_MAX_INDICATOR, ARG_MAX_ABS_INDICATOR, + ARG_MIN, ARG_MIN_ABS, ARG_MIN_INDICATOR, ARG_MIN_ABS_INDICATOR, + MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, + MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, MIN_ABS_INDICATOR] +tie_options = [ALL, FIRST, LAST, RANDOM] # FIX: IMPLEMENT AS Functions def max_vs_next(x): @@ -78,6 +82,10 @@ class OneHot(SelectionFunction): OneHot( \ default_variable, \ mode=MAX_VAL, \ + direction=MAX, \ + abs_val=FALSE \ + indicator=FALSE, \ + tie=ALL, \ params=None, \ owner=None, \ name=None, \ @@ -85,72 +93,92 @@ class OneHot(SelectionFunction): ) Return an array with one non-zero value. - COMMENT: - TBI: - refactor to have four parameters: (can continue to use KEYWORDS INTERNALLY and for LLVM) - extremum: max/min - value: scalar/indicator - ties: lowest/highest/all (re: indices) - prob: True/False (if True, ties are resolved probabilistically) - COMMENT .. _OneHot: `function ` returns an array the same length as the first item in `variable `, - with all of its values zeroed except one, unless there are ties, which are handled according to the choice of - `mode `, as follows: + with all of its values zeroed except one (unless there is a tie, which is handled as specified by **tie**); the + following options can be used in any combination: + + * **mode**: determines how the non-zero value(s) in the array is (are) selected + + * *STANDARD*: value (or 1) for the element(s) with the maximum or minimum value(s) in the array, + as specified by the options below; all other elements are zeroed; this is the default. + + * *PROB*: value of probabilistically chosen element based on probabilities passed in second item + of variable; if there is a tie, a single element is chosen probabilistically. + + * *PROB_INDICATOR*: same as *PROB* but chosen item is assigned a value of 1; + if there is a tie, a single element is chosen probabilistically. + + * **direction**: *MAX* (default) or *MIN* + determines whether the maximum or minimum value(s) in the array are selected. + + * **abs_val**: *False* (default) or *True* + determines whether the absolute values of the elements in the array are used to + select the maximum or minimum value(s). - * *ARG_MAX*: signed value of a single element with the maximum signed value, - or the one with lowest index if there are ties. + * **indicator**: *False* (default) or *True* + determines whether the selected values(s) is (are) replace with a value of 1. - * *ARG_MAX_ABS*: absolute value of a single element with the maximum absolute value, - or the one with lowest index if there are ties. + * **tie**: *ALL* (default), *FIRST*, *LAST* or *RANDOM* + determines how a tie is handled when there is more than one element with the maximum or minimum value; - * *ARG_MAX_INDICATOR*: 1 in place of single element with maximum signed value, - or the one with lowest index if there are ties. + *ALL*: selects all elements in the tie; - * *ARG_MAX_ABS_INDICATOR*: 1 in place of single element with maximum absolute value, - or the one with lowest index if there are ties. + *FIRST*: selects the value of the element with the lowest index; - * *MAX_VAL*: signed value of the element with the maximum signed value, - or all elements with the maximum value if there are ties. + *LAST*: selects the value of the element with the lowest index; - * *MAX_ABS_VAL*: absolute value of the element with the maximum absolute value, - or all elements with the maximum value if there are ties. + *RANDOM*: randomly selects one of the tied elements; - * *MAX_INDICATOR*: 1 in place of the element with the maximum signed value, - or all elements with the maximum value if there are ties. + The following convenience keywords can be used to specify particular combinations of options for the **mode** + argument together with the **tie** argument (these are included mainly for backward compatibility): - * *MAX_ABS_INDICATOR*: 1 in place of the element(s) with the maximum absolute value, - or all elements with the maximum value if there are ties. + * *ARG_MAX*: signed value of a single element with the maximum signed value, + or the one with lowest index if there is a tie. - * *ARG_MIN*: signed value of a single element with the minium signed value, - or the one with lowest index if there are ties. + * *ARG_MAX_ABS*: absolute value of a single element with the maximum absolute value, + or the one with lowest index if there is a tie. - * *ARG_MIN_ABS*: absolute value of a single element with the minium absolute value, - or the one with lowest index if there are ties. + * *ARG_MAX_INDICATOR*: 1 in place of single element with maximum signed value, + or the one with lowest index if there is a tie. - * *ARG_MIN_INDICATOR*: 1 in place of single element with minimum signed value, - or the one with lowest index if there are ties. + * *ARG_MAX_ABS_INDICATOR*: 1 in place of single element with maximum absolute value, + or the one with lowest index if there is a tie. - * *MIN_VAL*: signed value of the element with the minimum signed value, - or all elements with the minimum value if there are ties. + * *MAX_VAL*: signed value of the element with the maximum signed value; + if there is a tie, which elements are returned is determined by `tie_index `. - * *MIN_ABS_VAL*: absolute value of element with the minimum absolute value, - or all elements with the minimum value if there are ties. + * *MAX_ABS_VAL*: absolute value of the element with the maximum absolute value; + if there is a tie, which elements are returned is determined by `tie_index `. - * *MIN_INDICATOR*: 1 in place of the element with the minimum signed value, - or all elements with the minimum value if there are ties. + * *MAX_INDICATOR*: 1 in place of the element with the maximum signed value; + if there is a tie, which elements are returned is determined by `tie_index `. - * *MIN_ABS_INDICATOR*: 1 in place of the element with the minimum absolute value, - or all elements with the minimum value if there are ties. + * *MAX_ABS_INDICATOR*: 1 in place of the element(s) with the maximum absolute value; + if there is a tie, which elements are returned is determined by `tie_index `. - * *PROB*: value of probabilistically chosen element based on probabilities passed in second item of variable; - if there are ties, a single element is chosen probabilistically. + * *ARG_MIN*: signed value of a single element with the minium signed value, + or the one with lowest index if there is a tie. - * *PROB_INDICATOR*: same as *PROB* but chosen item is assigned a value of 1; - if there are ties, a single element is chosen probabilistically. + * *ARG_MIN_ABS*: absolute value of a single element with the minium absolute value, + or the one with lowest index if there is a tie. + * *ARG_MIN_INDICATOR*: 1 in place of single element with minimum signed value, + or the one with lowest index if there is a tie. + + * *MIN_VAL*: signed value of the element with the minimum signed value, + or all elements with the minimum value if there is a tie. + + * *MIN_ABS_VAL*: absolute value of element with the minimum absolute value; + if there is a tie, which elements are returned is determined by `tie_index `. + + * *MIN_INDICATOR*: 1 in place of the element with the minimum signed value; + if there is a tie, which elements are returned is determined by `tie_index `. + + * *MIN_ABS_INDICATOR*: 1 in place of the element with the minimum absolute value; + if there is a tie, which elements are returned is determined by `tie_index `. Arguments --------- @@ -159,13 +187,31 @@ class OneHot(SelectionFunction): First (possibly only) item specifies a template for the array to be transformed; if `mode ` is *PROB* then a 2nd item must be included that is a probability distribution with same length as 1st item. - mode : ARG_MAX, ARG_MAX_ABS, ARG_MAX_INDICATOR, ARG_MAX_ABS_INDICATOR, - MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, + mode : DETERMINISITC, PROB, PROB_INDICATOR, + ARG_MAX, ARG_MAX_ABS, ARG_MAX_INDICATOR, ARG_MAX_ABS_INDICATOR, ARG_MIN, ARG_MIN_ABS, ARG_MIN_INDICATOR, ARG_MIN_ABS_INDICATOR, + MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, MIN_ABS_INDICATOR, - PROB or PROB_INDICATOR : default ARG_MAX - specifies how the single non-zero value in the array returned by `function ` is determined - (see `mode ` for details). + : default ARG_MAX + specifies how non-zero value(s) in the array returned by `function ` are determined + (see `above ` for details). + + direction : MAX or MIN : default MAX + specifies whether the maximum or minimum value(s) in the array are selected. + (see `above ` for details). + + abs_val : bool : default False + specifies whether the absolute values of the elements in the array are used to + select the maximum or minimum value(s). + (see `above ` for details). + + indicator : bool : default False + specifies whether the selected values(s) is (are) replace with a value of 1. + (see `above ` for details). + + tie : ALL, FIRST, LAST, RANDOM : default ALL + specifies how a tie is handled when there is more than one element with the maximum or minimum value; + (see `above ` for details). params : Dict[param keyword: param value] : default None a `parameter dictionary ` that specifies the parameters for the @@ -191,13 +237,31 @@ class OneHot(SelectionFunction): distribution, each element of which specifies the probability for selecting the corresponding element of the 1st item. - mode : ARG_MAX, ARG_MAX_ABS, ARG_MAX_INDICATOR, ARG_MAX_ABS_INDICATOR, - MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, + mode : DETERMINISITC, PROB, PROB_INDICATOR, + ARG_MAX, ARG_MAX_ABS, ARG_MAX_INDICATOR, ARG_MAX_ABS_INDICATOR, ARG_MIN, ARG_MIN_ABS, ARG_MIN_INDICATOR, ARG_MIN_ABS_INDICATOR, + MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, MIN_ABS_INDICATOR, - PROB or PROB_INDICATOR - determines how the single non-zero value in the array returned by `function ` is determined - (see `above ` for options). + : default ARG_MAX + specifies how non-zero value(s) in the array returned by `function ` are determined + (see `above ` for details). + + direction : MAX or MIN + determines whether the maximum or minimum value(s) in the array are selected. + (see `above ` for details). + + abs_val : bool + determines whether the absolute values of the elements in the array are used to + select the maximum or minimum value(s). + (see `above ` for details). + + indicator : bool + determines whether the selected values(s) is (are) replace with a value of 1. + (see `above ` for details). + + tie : ALL, FIRST, LAST, RANDOM + determines how a tie is handled when there is more than one element with the maximum or minimum value; + (see `above ` for details). random_state : numpy.RandomState private pseudorandom number generator @@ -230,7 +294,31 @@ class Parameters(SelectionFunction.Parameters): mode see `mode ` - :default value: `MAX_VAL` + :default value: `DETERMINISTIC` + :type: ``str`` + + direction + see `direction ` + + :default value: `MAX` + :type: ``str`` + + abs_val + see `abs_val ` + + :default value: `False` + :type: ``bool`` + + indicator + see `indicator ` + + :default value: `False` + :type: ``bool`` + + tie + see `tie ` + + :default value: `ALL` :type: ``str`` random_state @@ -239,25 +327,38 @@ class Parameters(SelectionFunction.Parameters): :default value: None :type: ``numpy.random.RandomState`` """ - mode = Parameter(MAX_VAL, stateful=False) + mode = Parameter(DETERMINISTIC, stateful=False) + direction = Parameter(MAX, stateful=False) + abs_val = Parameter(False, stateful=False) + indicator = Parameter(False, stateful=False) + tie = Parameter(ALL, stateful=False) random_state = Parameter(None, loggable=False, getter=_random_state_getter, dependencies='seed') seed = Parameter(DEFAULT_SEED(), modulable=True, fallback_default=True, setter=_seed_setter) def _validate_mode(self, mode): - if mode not in options: + if mode not in mode_options: + # returns error message + return 'not one of {0}'.format(mode_options) + + def _validate_ties(self, tie_index): + if tie_index not in tie_options: # returns error message - return 'not one of {0}'.format(options) + return 'not one of {0}'.format(tie_options) @check_user_specified @beartype def __init__(self, default_variable=None, mode: Optional[Literal[ + DETERMINISTIC, PROB, PROB_INDICATOR, ARG_MAX, ARG_MAX_ABS, ARG_MAX_INDICATOR, ARG_MAX_ABS_INDICATOR, - MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, ARG_MIN, ARG_MIN_ABS, ARG_MIN_INDICATOR, ARG_MIN_ABS_INDICATOR, - MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, MIN_ABS_INDICATOR, - PROB, PROB_INDICATOR]] = None, + MAX_VAL, MAX_ABS_VAL, MAX_INDICATOR, MAX_ABS_INDICATOR, + MIN_VAL, MIN_ABS_VAL, MIN_INDICATOR, MIN_ABS_INDICATOR]] = None, + direction: Optional[Literal[MAX, MIN]] = None, + abs_val: Optional[bool] = None, + indicator: Optional[bool] = None, + tie: Optional[Literal[ALL, FIRST, LAST, RANDOM]]= None, seed=None, params=None, owner=None, @@ -271,6 +372,10 @@ def __init__(self, super().__init__( default_variable=default_variable, mode=mode, + direction=direction, + abs_val=abs_val, + indicator=indicator, + tie=tie, seed=seed, params=params, owner=owner, @@ -306,6 +411,15 @@ def _validate_params(self, request_set, target_set=None, context=None): "array of probabilities that sum to 1". format(MODE, self.__class__.__name__, Function.__name__, PROB, prob_dist)) + elif request_set[MODE] != DETERMINISTIC: + # Ensure that mode is not specified counter to other options (except tie) + if any([self.parameters.direction._user_specified, + self.parameters.abs_val._user_specified, + self.parameters.indicator._user_specified]): + raise FunctionError(f"If {MODE} for {self.__class__.__name__} {Function.__name__} is not " + f"set to 'DETERMINIST', then the 'direction', 'abs_val', and 'indicator' args " + f"cannot be specified.") + def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset): best_idx_ptr = builder.alloca(ctx.int32_ty) builder.store(best_idx_ptr.type.pointee(0), best_idx_ptr) @@ -421,6 +535,114 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, return builder + def _parse_mode(self, mode): + """Convert mode spec to corresponding options. + Here for convenience, but mostly for backward compatibility with old mode spec. + """ + + direction = None + abs_val = None + indicator = None + tie = None + + if mode == ARG_MAX: + direction = MAX + abs_val = False + indicator = False + tie = FIRST + + elif mode == ARG_MAX_ABS: + direction = MAX + abs_val = True + indicator = False + tie = FIRST + + elif mode == ARG_MAX_INDICATOR: + direction = MAX + abs_val = False + indicator = True + tie = FIRST + + elif mode == ARG_MAX_ABS_INDICATOR: + direction = MAX + abs_val = True + indicator = True + tie = FIRST + + elif mode == MAX_VAL: + direction = MAX + abs_val = False + indicator = False + tie = ALL + + elif mode == MAX_ABS_VAL: + direction = MAX + abs_val = True + indicator = False + tie = ALL + + elif mode == MAX_INDICATOR: + direction = MAX + abs_val = False + indicator = True + tie = ALL + + elif mode == MAX_ABS_INDICATOR: + direction = MAX + abs_val = True + indicator = True + tie = ALL + + elif mode == ARG_MIN: + direction = MIN + abs_val = False + indicator = False + tie = FIRST + + elif mode == ARG_MIN_ABS: + direction = MIN + abs_val = True + indicator = False + tie = FIRST + + elif mode == ARG_MIN_INDICATOR: + direction = MIN + abs_val = False + indicator = True + tie = FIRST + + elif mode == ARG_MIN_ABS_INDICATOR: + direction = MIN + abs_val = True + indicator = True + tie = FIRST + + elif mode == MIN_VAL: + direction = MIN + abs_val = False + indicator = False + tie = ALL + + elif mode == MIN_ABS_VAL: + direction = MIN + abs_val = True + indicator = False + tie = ALL + + elif mode == MIN_INDICATOR: + direction = MIN + abs_val = False + indicator = True + tie = ALL + + elif mode == MIN_ABS_INDICATOR: + direction = MIN + abs_val = True + indicator = True + tie = ALL + + return direction, abs_val, indicator, tie + def _function(self, variable=None, context=None, @@ -433,7 +655,7 @@ def _function(self, variable : 2d np.array : default class_defaults.variable 1st item is an array to be transformed; if `mode ` is *PROB*, 2nd item must be an array of - probabilities (i.e., elements between 0 and 1) of equal length to the 1st item. + probabilities (i.e., elements between 0 and 1) of equal length as the 1st item. params : Dict[param keyword: param value] : default None a `parameter dictionary ` that specifies the parameters for the @@ -443,85 +665,24 @@ def _function(self, Returns ------- - array with single non-zero value : np.array - specified by `mode `. - + array with selected elements having non-zero values and all others having zeroes : np.array + specified by `mode `, `direction `, `abs_val `, + `indicator `, and `tie `. """ - if self.mode == ARG_MAX: - max_idx = np.argmax(variable) - result = np.zeros_like(variable) - result[max_idx] = variable[max_idx] - - elif self.mode == ARG_MAX_ABS: - max_idx = np.argmax(np.absolute(variable)) - result = np.zeros_like(variable) - result[max_idx] = np.absolute(variable[max_idx]) - - elif self.mode == ARG_MAX_INDICATOR: - max_idx = np.argmax(variable) - result = np.zeros_like(variable) - result[max_idx] = 1 - - elif self.mode == ARG_MAX_ABS_INDICATOR: - max_idx = np.argmax(np.absolute(variable)) - result = np.zeros_like(variable) - result[max_idx] = 1 - - elif self.mode == MAX_VAL: - max_value = np.max(variable) - result = np.where(variable == max_value, variable, 0) - - elif self.mode == MAX_ABS_VAL: - max_value = np.max(np.absolute(variable)) - result = np.where(np.absolute(variable)==max_value, np.absolute(variable), 0) - - elif self.mode == MAX_INDICATOR: - max_value = np.max(variable) - result = np.where(variable == max_value, 1, 0) - - elif self.mode == MAX_ABS_INDICATOR: - max_value = np.max(np.absolute(variable)) - result = np.where(np.absolute(variable) == max_value, 1, 0) - - elif self.mode == ARG_MIN: - max_idx = np.argmin(variable) - result = np.zeros_like(variable) - result[max_idx] = variable[max_idx] - - elif self.mode == ARG_MIN_ABS: - max_idx = np.argmin(np.absolute(variable)) - result = np.zeros_like(variable) - result[max_idx] = np.absolute(variable[max_idx]) - - elif self.mode == ARG_MIN_INDICATOR: - max_idx = np.argmin(variable) - result = np.zeros_like(variable) - result[max_idx] = 1 - - elif self.mode == ARG_MIN_ABS_INDICATOR: - max_idx = np.argmin(np.absolute(variable)) - result = np.zeros_like(variable) - result[max_idx] = 1 - - elif self.mode == MIN_VAL: - min_value = np.min(variable) - result = np.where(variable == min_value, min_value, 0) - - elif self.mode == MIN_ABS_VAL: - min_value = np.min(np.absolute(variable)) - result = np.where(np.absolute(variable) == min_value, np.absolute(variable), 0) - - elif self.mode == MIN_INDICATOR: - min_value = np.min(variable) - result = np.where(variable == min_value, 1, 0) - - elif self.mode == MIN_ABS_INDICATOR: - min_value = np.min(np.absolute(variable)) - result = np.where(np.absolute(variable) == min_value, 1, 0) - - elif self.mode in {PROB, PROB_INDICATOR}: + + mode = self.parameters.mode.get(context) + direction = self.parameters.direction.get(context) + abs_val = self.parameters.abs_val.get(context) + indicator = self.parameters.indicator.get(context) + tie = self.parameters.tie.get(context) + + if mode in {PROB, PROB_INDICATOR}: # 1st item of variable should be data, and 2nd a probability distribution for choosing + if np.array(variable).ndim != 2: + raise FunctionError(f"If {MODE} for {self.__class__.__name__} {Function.__name__} is set to " + f"'PROB' or 'PROB_INDICATOR', variable must be a 2d array with the first item " + f"being the data and the second being a probability distribution.") v = variable[0] prob_dist = variable[1] # if not prob_dist.any() and INITIALIZING in context: @@ -532,12 +693,65 @@ def _function(self, random_value = random_state.uniform() chosen_item = next(element for element in cum_sum if element > random_value) chosen_in_cum_sum = np.where(cum_sum == chosen_item, 1, 0) - if self.mode is PROB: + if mode is PROB: result = v * chosen_in_cum_sum else: result = np.ones_like(v) * chosen_in_cum_sum # chosen_item = np.random.choice(v, 1, p=prob_dist) # one_hot_indicator = np.where(v == chosen_item, 1, 0) # return v * one_hot_indicator + return result + + elif mode is not DETERMINISTIC: + direction, abs_val, indicator, tie = self._parse_mode(mode) + + # if np.array(variable).ndim != 1: + # raise FunctionError(f"If {MODE} for {self.__class__.__name__} {Function.__name__} is not set to " + # f"'PROB' or 'PROB_INDICATOR', variable must be a 1d array: {variable}.") + + array = variable + + max = None + min = None + + if abs_val is True: + array = np.absolute(array) + + if direction == MAX: + max = np.max(array) + if max == -np.inf: + warnings.warn(f"Array passed to {self.name} of {self.owner.name} " + f"is all -inf.") + else: + min = np.min(array) + if min == np.inf: + warnings.warn(f"Array passed to {self.name} of {self.owner.name} " + f"is all inf.") + + extreme_val = max if direction == MAX else min + + if tie == ALL: + if direction == MAX: + result = np.where(array == max, max, -np.inf) + else: + result = np.where(array == min, min, np.inf) + else: + if tie == FIRST: + index = np.min(np.where(array == extreme_val)) + elif tie == LAST: + index = np.max(np.where(array == extreme_val)) + elif tie == RANDOM: + index = np.random.choice(np.where(array == extreme_val)) + else: + assert False, f"PROGRAM ERROR: Unrecognized value for 'tie' in OneHot function: '{tie}'." + result = np.zeros_like(array) + result[index] = extreme_val + + if indicator is True: + result = np.where(result == extreme_val, 1, result) + if max is not None: + result = np.where(result == -np.inf, 0, result) + if min is not None: + result = np.where(result == np.inf, 0, result) return self.convert_output_type(result) diff --git a/psyneulink/core/globals/keywords.py b/psyneulink/core/globals/keywords.py index fd7751974a8..4b3a664c974 100644 --- a/psyneulink/core/globals/keywords.py +++ b/psyneulink/core/globals/keywords.py @@ -44,9 +44,11 @@ 'COST_FUNCTION', 'COUNT', 'CROSS_ENTROPY', 'CURRENT_EXECUTION_TIME', 'CUSTOM_FUNCTION', 'CUDA', 'CYCLE', 'DDM_MECHANISM', 'DECAY', 'DEFAULT', 'DEFAULT_CONTROL_MECHANISM', 'DEFAULT_INPUT', 'DEFAULT_MATRIX', 'DEFAULT_PREFERENCE_SET_OWNER', 'DEFAULT_PROCESSING_MECHANISM', 'DEFAULT_VARIABLE', - 'DEFERRED_ASSIGNMENT', 'DEFERRED_DEFAULT_NAME', 'DEFERRED_INITIALIZATION', 'DICT', 'DictionaryMemory_FUNCTION', - 'DIFFERENCE', 'DIFFERENCE', 'DIFFUSION', 'DIRECT', 'DISABLE', 'DISABLE_PARAM', 'DIST_FUNCTION_TYPE', 'DIST_MEAN', - 'DIST_SHAPE', 'DISTANCE_FUNCTION', 'DISTANCE_METRICS', 'DISTRIBUTION_FUNCTION_TYPE', 'DIVISION', 'DOT_PRODUCT', + 'DEFERRED_ASSIGNMENT', 'DEFERRED_DEFAULT_NAME', 'DEFERRED_INITIALIZATION', 'DETERMINISTIC', + 'DICT', 'DictionaryMemory_FUNCTION', 'DIFFERENCE', 'DIFFERENCE', + 'DIFFUSION', 'DIRECT', 'DISABLE', 'DISABLE_PARAM', + 'DIST_FUNCTION_TYPE', 'DIST_MEAN', 'DIST_SHAPE', 'DISTANCE_FUNCTION', 'DISTANCE_METRICS', + 'DISTRIBUTION_FUNCTION_TYPE', 'DIVISION', 'DOT_PRODUCT', 'DRIFT_DIFFUSION_INTEGRATOR_FUNCTION', 'DRIFT_ON_A_SPHERE_INTEGRATOR_FUNCTION', 'DROPOUT_FUNCTION', 'DUAL_ADAPTIVE_INTEGRATOR_FUNCTION', 'EFFERENTS', 'EID_SIMULATION', 'EID_FROZEN', 'EITHER', 'ENABLE_CONTROLLER', 'ENABLED', 'ENERGY', 'ENTROPY', @@ -108,7 +110,8 @@ 'PORT_PREFS', 'PORT_TYPE', 'port_value', 'PORTS', 'PREDICTION_MECHANISM', 'PREDICTION_MECHANISMS', 'PREDICTION_MECHANISM_OUTPUT', 'PREDICTION_MECHANISM_PARAMS', 'PREDICTION_MECHANISM_TYPE', 'PREFS_ARG', 'PREF_BASE_VALUE', 'PREF_CURRENT_VALUE', 'PREFERENCE_SET', - 'PREFERENCE_SET_NAME', 'PREF_LEVEL', 'PREFS', 'PREFS_OWNER', 'PREVIOUS_VALUE', 'PRIMARY', 'PROB', 'PROB_INDICATOR', + 'PREFERENCE_SET_NAME', 'PREF_LEVEL', 'PREFS', 'PREFS_OWNER', 'PREVIOUS_VALUE', 'PRIMARY', + 'PROB', 'PROB_INDICATOR', 'PROBABILISTIC', 'PROCESS', 'PROCESS_COMPONENT_CATEGORY', 'PROCESS_DEFAULT_MECHANISM', 'PROCESS_DEFAULT_PROJECTION_FUNCTION', 'PROCESS_EXECUTE', 'PROCESS_INIT', 'PROCESSES', 'PROCESSES_DIM', 'PROCESSING', 'PROCESSING_MECHANISM', 'PROCESSING_PATHWAY', 'PRODUCT', 'PROGRESS_BAR_CHAR', 'PROJECTION', 'PROJECTION_DIRECTION', 'PROJECTION_PARAMS', @@ -507,8 +510,14 @@ class Loss(Enum): OUTPUTS = 'outputs' PARAMETER = 'parameter' RANDOM = 'random' +FIRST= 'first' +LAST = 'last' BEFORE = 'before' AFTER = 'after' +LOW = 'low' +HIGH = 'high' +MAX = 'max' +MIN = 'min' OLDEST = 'oldest' NEWEST = 'newest' FULL = 'full' @@ -1078,8 +1087,6 @@ class Loss(Enum): RESET = "reset" RESET_STATEFUL_FUNCTION_WHEN = "reset_stateful_function_when" -LOW = 'low' -HIGH = 'high' BOUNDS = 'bounds' MODE = 'mode' REST = "rest" @@ -1137,6 +1144,8 @@ class Loss(Enum): STANDARD_DEVIATION = 'standard_deviation' VARIANCE = 'variance' +DETERMINISTIC = 'deterministic' +PROBABILISTIC = 'probabilistic' ARG_MAX = 'arg_max' ARG_MAX_ABS = 'arg_max_abs' diff --git a/psyneulink/library/compositions/compiledoptimizer.py b/psyneulink/library/compositions/compiledoptimizer.py index 9f65a875085..e65a8271609 100644 --- a/psyneulink/library/compositions/compiledoptimizer.py +++ b/psyneulink/library/compositions/compiledoptimizer.py @@ -231,7 +231,7 @@ def step(self, ctx): class SGDOptimizer(Optimizer): - """Implements compiled Stocastic Gradient Descent optimizer (without momentum)""" + """Implements compiled Stochastic Gradient Descent optimizer (without momentum)""" # sets up parameters of model & the information required for forward computation def __init__(self, pytorch_model, lr=1e-3): super().__init__(pytorch_model) From ea99c439f1d92875263649f5cddfa8adb969544e Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Wed, 6 Nov 2024 20:14:46 -0500 Subject: [PATCH 401/410] fitfunctions: Try using underscore name to construct fastKDE object keyword argument name changed in fastkde 2. Signed-off-by: Jan Vesely --- .../core/components/functions/nonstateful/fitfunctions.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index 5a962a51af4..db7a6753bb7 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -189,7 +189,11 @@ def simulation_likelihood( continue # Do KDE - fKDE = fastKDE.fastKDE(dsub, doSaveMarginals=False) + try: + fKDE = fastKDE.fastKDE(dsub, do_save_marginals=False) + except TypeError: + fKDE = fastKDE.fastKDE(dsub, doSaveMarginals=False) + pdf = fKDE.pdf axes = fKDE.axes From 314f2860c83bdb3f45962a4e93f5f3a2bdd694bb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 24 Oct 2024 22:18:59 +0000 Subject: [PATCH 402/410] requirements: update fastkde requirement to <2.0.2 Updates the requirements on [fastkde](https://github.com/LBL-EESA/fastkde) to permit the latest version. - [Release notes](https://github.com/LBL-EESA/fastkde/releases) - [Commits](https://github.com/LBL-EESA/fastkde/compare/v1.0.24...v2.0.1) --- updated-dependencies: - dependency-name: fastkde dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index d77c65770b0..0f5ff84f82a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ beartype<0.20.0 dill<0.3.10 -fastkde>=1.0.24, <1.0.31 +fastkde>=1.0.24, <2.0.2 graph-scheduler>=1.2.1, <1.3.0 graphviz<0.21.0 grpcio<1.68.0 From 9a4e5fe1642d6c9ce3f85fc64f2108d53dfbd8f2 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 7 Nov 2024 12:06:08 -0500 Subject: [PATCH 403/410] deps/fastkde: Bump minimum version to >=2.0.0 Drop fallback to 1.x interface using camelCase keyword arguments Signed-off-by: Jan Vesely --- .../core/components/functions/nonstateful/fitfunctions.py | 5 +---- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/psyneulink/core/components/functions/nonstateful/fitfunctions.py b/psyneulink/core/components/functions/nonstateful/fitfunctions.py index db7a6753bb7..75e94a3e7af 100644 --- a/psyneulink/core/components/functions/nonstateful/fitfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/fitfunctions.py @@ -189,10 +189,7 @@ def simulation_likelihood( continue # Do KDE - try: - fKDE = fastKDE.fastKDE(dsub, do_save_marginals=False) - except TypeError: - fKDE = fastKDE.fastKDE(dsub, doSaveMarginals=False) + fKDE = fastKDE.fastKDE(dsub, do_save_marginals=False) pdf = fKDE.pdf axes = fKDE.axes diff --git a/requirements.txt b/requirements.txt index 0f5ff84f82a..818c32c38d5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ beartype<0.20.0 dill<0.3.10 -fastkde>=1.0.24, <2.0.2 +fastkde>=2.0.0, <2.0.2 graph-scheduler>=1.2.1, <1.3.0 graphviz<0.21.0 grpcio<1.68.0 From 41b254d284afb1daad6a890f9c11c57799c1a1f1 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 7 Nov 2024 15:55:02 -0500 Subject: [PATCH 404/410] ga: Move wheel/sdist creation to install-pnl action (#3085) Use wheel package to install psyneulink instead of using editable install. Provide paths in install-pnl action outputs. Use install-pnl outputs to upload the hwheel/sdist. Signed-off-by: Jan Vesely --- .github/actions/install-pnl/action.yml | 20 ++++++++++++++++++-- .github/workflows/pnl-ci-docs.yml | 2 +- .github/workflows/pnl-ci.yml | 10 +++------- 3 files changed, 22 insertions(+), 10 deletions(-) diff --git a/.github/actions/install-pnl/action.yml b/.github/actions/install-pnl/action.yml index e6fa6949dd7..77be434ce55 100644 --- a/.github/actions/install-pnl/action.yml +++ b/.github/actions/install-pnl/action.yml @@ -5,6 +5,13 @@ inputs: description: 'PsyNeuLink features to install' required: true default: '' +outputs: + wheel: + description: 'Returns path to the built .whl package' + value: ${{ steps.dist.outputs.wheel }} + sdist: + description: 'Returns path to the built .sdist package' + value: ${{ steps.dist.outputs.sdist }} runs: using: "composite" @@ -83,12 +90,21 @@ runs: echo "new_package=''" >> $GITHUB_OUTPUT fi + - name: Build dist + id: dist + shell: bash + run: | + pip install setuptools wheel + python setup.py sdist bdist_wheel + echo "wheel=$(ls dist/*.whl)" | tee -a "$GITHUB_OUTPUT" + echo "sdist=$(ls dist/*.sdist)" | tee -a "$GITHUB_OUTPUT" + - name: Python dependencies shell: bash run: | - pip install -e .[${{ inputs.features }}] -c env_constraints.txt -c broken_trans_deps.txt + pip install ${{ steps.dist.outputs.wheel }}[${{ inputs.features }}] -c env_constraints.txt -c broken_trans_deps.txt - - name: "Cleanup old wheels" + - name: Cleanup old wheels shell: bash run: | pip cache info diff --git a/.github/workflows/pnl-ci-docs.yml b/.github/workflows/pnl-ci-docs.yml index fd2527fa8f5..5fadde30454 100644 --- a/.github/workflows/pnl-ci-docs.yml +++ b/.github/workflows/pnl-ci-docs.yml @@ -100,7 +100,7 @@ jobs: restore-keys: ${{ runner.os }}-python-${{ matrix.python-version }}-pip-wheels-${{ hashFiles('requirements.txt', 'doc_requirements.txt') }} # We need to install all PNL deps since docs config imports psyneulink module - - name: Install local, editable PNL package + - name: Install PNL package uses: ./.github/actions/install-pnl with: features: 'doc' diff --git a/.github/workflows/pnl-ci.yml b/.github/workflows/pnl-ci.yml index 710f2ffef43..cdb91a970bb 100644 --- a/.github/workflows/pnl-ci.yml +++ b/.github/workflows/pnl-ci.yml @@ -132,8 +132,9 @@ jobs: key: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ matrix.python-architecture }}-pip-wheels-${{ hashFiles('requirements.txt', 'dev_requirements.txt') }}-${{ github.sha }} restore-keys: ${{ runner.os }}-python-${{ matrix.python-version }}-${{ matrix.python-architecture }}-pip-wheels-${{ hashFiles('requirements.txt', 'dev_requirements.txt') }} - - name: Install local, editable PNL package + - name: Install PNL package uses: ./.github/actions/install-pnl + id: install with: features: 'dev' @@ -185,15 +186,10 @@ jobs: echo "::warning::Not uploading to coveralls.io, token not available!" fi - - name: Build dist - run: | - pip install setuptools wheel - python setup.py sdist bdist_wheel - - name: Upload dist packages uses: actions/upload-artifact@v4 if: matrix.version-restrict == '' with: name: dist-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.python-architecture }} - path: dist/ + path: ${{ steps.install.outputs.wheel }} ${{ steps.install.outputs.sdist }} retention-days: 2 From 9f59a874c8a4d5aa5dcf28575a13ed6e7db3445f Mon Sep 17 00:00:00 2001 From: jdcpni Date: Fri, 8 Nov 2024 05:37:47 -0500 Subject: [PATCH 405/410] Refactor/matrixtransform pytorch (#3105) * - transformfunctions.py - CombinationFunction -> TransformFunction - MatrixTransform(): add _gen_pytorch_fct() --- .../nonstateful/transformfunctions.py | 120 ++++--- .../processing/objectivemechanism.py | 8 +- psyneulink/core/components/ports/inputport.py | 10 +- .../ports/modulatorysignals/controlsignal.py | 6 +- psyneulink/core/components/ports/port.py | 6 +- psyneulink/core/globals/keywords.py | 18 +- .../integrator/collapsingboundmechanism.py | 300 ++++++++++++++++++ .../objective/comparatormechanism.py | 4 +- .../objective/predictionerrormechanism.py | 4 +- .../library/compositions/pytorchwrappers.py | 21 +- 10 files changed, 427 insertions(+), 70 deletions(-) create mode 100644 psyneulink/library/components/mechanisms/processing/integrator/collapsingboundmechanism.py diff --git a/psyneulink/core/components/functions/nonstateful/transformfunctions.py b/psyneulink/core/components/functions/nonstateful/transformfunctions.py index 935fee17f07..bd0403bfcf5 100644 --- a/psyneulink/core/components/functions/nonstateful/transformfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transformfunctions.py @@ -27,7 +27,7 @@ All Transformfunctions must have two attributes - **multiplicative_param** and **additive_param** - each of which is assigned the name of one of the function's parameters; this is for use by ModulatoryProjections (and, in particular, GatingProjections, -when the CombinationFunction is used as the function of an InputPort or OutputPort). +when the TransformFunction is used as the function of an InputPort or OutputPort). """ @@ -54,9 +54,8 @@ from psyneulink.core.globals.keywords import ( ADDITIVE_PARAM, ARRANGEMENT, COMBINATION_FUNCTION_TYPE, COMBINE_MEANS_FUNCTION, CONCATENATE_FUNCTION, CROSS_ENTROPY, DEFAULT_VARIABLE, DOT_PRODUCT, EXPONENTS, - HAS_INITIALIZERS, HOLLOW_MATRIX, IDENTITY_MATRIX, - LINEAR_COMBINATION_FUNCTION, LINEAR_TRANSFORM_FUNCTION, L0, - MATRIX_KEYWORD_NAMES, MATRIX, MULTIPLICATIVE_PARAM, NORMALIZE, + HAS_INITIALIZERS, HOLLOW_MATRIX, IDENTITY_MATRIX, LINEAR_COMBINATION_FUNCTION, L0, + MATRIX, MATRIX_KEYWORD_NAMES, MATRIX_TRANSFORM_FUNCTION, MULTIPLICATIVE_PARAM, NORMALIZE, OFFSET, OPERATION, PREDICTION_ERROR_DELTA_FUNCTION, PRODUCT, REARRANGE_FUNCTION, RECEIVER, REDUCE_FUNCTION, SCALE, SUM, WEIGHTS, PREFERENCE_SET_NAME) from psyneulink.core.globals.utilities import ( @@ -67,16 +66,16 @@ from psyneulink.core.globals.preferences.basepreferenceset import \ REPORT_OUTPUT_PREF, ValidPrefSet, PreferenceEntry, PreferenceLevel -__all__ = ['CombinationFunction', 'Concatenate', 'CombineMeans', 'Rearrange', 'Reduce', +__all__ = ['TransformFunction', 'Concatenate', 'CombineMeans', 'Rearrange', 'Reduce', 'LinearCombination', 'MatrixTransform', 'PredictionErrorDeltaFunction'] -class CombinationFunction(Function_Base): +class TransformFunction(Function_Base): """Function that combines multiple items, yielding a result with the same shape as its operands All Transformfunctions must have two attributes - multiplicative_param and additive_param - each of which is assigned the name of one of the function's parameters; this is for use by ModulatoryProjections (and, in particular, GatingProjections, - when the CombinationFunction is used as the function of an InputPort or OutputPort). + when the TransformFunction is used as the function of an InputPort or OutputPort). """ componentType = COMBINATION_FUNCTION_TYPE @@ -87,7 +86,7 @@ class Parameters(Function_Base.Parameters): ---------- variable - see `variable ` + see `variable ` :default value: numpy.array([0]) :type: ``numpy.ndarray`` @@ -116,7 +115,7 @@ def _gen_llvm_function_body(self, ctx, builder, params, _, arg_in, arg_out, *, t return builder -class Concatenate(CombinationFunction): # ------------------------------------------------------------------------ +class Concatenate(TransformFunction): # ------------------------------------------------------------------------ """ Concatenate( \ default_variable=class_defaults.variable, \ @@ -195,7 +194,7 @@ class Concatenate(CombinationFunction): # ------------------------------------- componentName = CONCATENATE_FUNCTION - class Parameters(CombinationFunction.Parameters): + class Parameters(TransformFunction.Parameters): """ Attributes ---------- @@ -340,7 +339,7 @@ def _gen_pytorch_fct(self, device, context=None): return lambda x: torch.hstack(tuple(x)) * scale + offset -class Rearrange(CombinationFunction): # ------------------------------------------------------------------------ +class Rearrange(TransformFunction): # ------------------------------------------------------------------------ """ Rearrange( \ default_variable=class_defaults.variable, \ @@ -447,7 +446,7 @@ class Rearrange(CombinationFunction): # --------------------------------------- """ componentName = REARRANGE_FUNCTION - class Parameters(CombinationFunction.Parameters): + class Parameters(TransformFunction.Parameters): """ Attributes ---------- @@ -622,7 +621,7 @@ def _function(self, return self.convert_output_type(result, FunctionOutputType.NP_2D_ARRAY) -class Reduce(CombinationFunction): # ------------------------------------------------------------------------ +class Reduce(TransformFunction): # ------------------------------------------------------------------------ # FIX: CONFIRM THAT 1D KWEIGHTS USES EACH ELEMENT TO SCALE CORRESPONDING VECTOR IN VARIABLE # FIX CONFIRM THAT LINEAR TRANSFORMATION (OFFSET, SCALE) APPLY TO THE RESULTING ARRAY # FIX: CONFIRM RETURNS LIST IF GIVEN LIST, AND SIMLARLY FOR NP.ARRAY @@ -730,7 +729,7 @@ class Reduce(CombinationFunction): # ------------------------------------------ componentName = REDUCE_FUNCTION - class Parameters(CombinationFunction.Parameters): + class Parameters(TransformFunction.Parameters): """ Attributes ---------- @@ -1006,7 +1005,7 @@ def _gen_llvm_combine(self, builder, index, ctx, vi, vo, params): class LinearCombination( - CombinationFunction): # ------------------------------------------------------------------------ + TransformFunction): # ------------------------------------------------------------------------ """ LinearCombination( \ default_variable, \ @@ -1188,7 +1187,7 @@ class LinearCombination( REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE), } - class Parameters(CombinationFunction.Parameters): + class Parameters(TransformFunction.Parameters): """ Attributes ---------- @@ -1612,16 +1611,17 @@ def _gen_pytorch_fct(self, device, context=None): # MatrixTransform # ********************************************************************************************************************** -class MatrixTransform(CombinationFunction): # ------------------------------------------------------------------------------- +class MatrixTransform(TransformFunction): # ------------------------------------------------------------------------------- """ - MatrixTransform( \ - default_variable, \ - matrix=None, \ - normalize=False, \ - params=None, \ - owner=None, \ - name=None, \ - prefs=None \ + MatrixTransform( \ + default_variable, \ + matrix=None, \ + operation=DOT_PRODUCT, \ + normalize=False, \ + params=None, \ + owner=None, \ + name=None, \ + prefs=None \ ) .. _MatrixTransform: @@ -1633,6 +1633,11 @@ class MatrixTransform(CombinationFunction): # --------------------------------- .. math:: variable \\bullet matrix + If *DOT_PRODUCT* is specified as the **operation*, the result is the dot product of `variable + ` and `matrix `; if *L0* is specified, the result is the + difference between `variable ` and `matrix ` (see + `operation ` for additional details). + If **normalize** is True, the result is normalized by the product of the norms of the variable and matrix: .. math:: @@ -1690,9 +1695,15 @@ class MatrixTransform(CombinationFunction): # --------------------------------- - matrix keywords are not valid matrix specifications + operation : DOT_PRODUCT or L0 : default DOT_PRODUCT + specifies whether to take the dot product or difference of `variable ` + and `matrix `. + normalize : bool : default False specifies whether to normalize the result of `function ` by dividing it by the - norm of `variable ` x the norm of `matrix `. + norm of `variable ` x the norm of `matrix `; this cannot + be used if `variable ` is a scalar (i.e., has only one element), and **operation** + is set to *L0* (since it is not needed, and can produce a divide by zero error). bounds : None @@ -1725,11 +1736,17 @@ class MatrixTransform(CombinationFunction): # --------------------------------- Rows correspond to elements of the input array (outer index), and columns correspond to elements of the output array (inner index). + operation : DOT_PRODUCT or L0 : default DOT_PRODUCT + determines whether dot product or difference of `variable ` and `matrix + ` is taken. If the length of `variable ` is greater + than 1 and L0 is specified, the `variable ` array is subtracted from each + array of `matrix ` and the resulting array is summed, to produce the corresponding + element of the array returned by the function. + normalize : bool determines whether the result of `function ` is normalized, by dividing it by the norm of `variable ` x the norm of `matrix `. - owner : Component `component ` to which the Function has been assigned. @@ -1743,13 +1760,13 @@ class MatrixTransform(CombinationFunction): # --------------------------------- for details). """ - componentName = LINEAR_TRANSFORM_FUNCTION + componentName = MATRIX_TRANSFORM_FUNCTION DEFAULT_FILLER_VALUE = 0 _model_spec_generic_type_name = 'onnx::MatMul' - class Parameters(CombinationFunction.Parameters): + class Parameters(TransformFunction.Parameters): """ Attributes ---------- @@ -2134,11 +2151,42 @@ def _gen_llvm_function_body(self, ctx, builder, params, state, arg_in, arg_out, builder.call(builtin, [vec_in, matrix, input_length, output_length, vec_out]) return builder + def _gen_pytorch_fct(self, device, context=None): + operation = self._get_pytorch_fct_param_value('operation', device, context) + normalize = self._get_pytorch_fct_param_value('normalize', device, context) + + def dot_product_with_normalization(vector, matrix): + if torch.any(vector): + vector = vector / torch.norm(vector) + if torch.any(matrix): + matrix = matrix / torch.norm(matrix) + return torch.matmul(vector, matrix) + + def diff_with_normalization(vector, matrix): + normalize = torch.sum(torch.abs(vector - matrix)) + return torch.sum((1 - torch.abs(vector - matrix) / normalize), axis=0) + + if operation is DOT_PRODUCT: + if normalize: + return dot_product_with_normalization + else: + return lambda x, y : torch.matmul(x, y) + + elif operation is L0: + if normalize: + return diff_with_normalization + else: + return lambda x, y: torch.sum((1 - torch.abs(x - y)),axis=0) + + else: + from psyneulink.library.compositions.autodiffcomposition import AutodiffCompositionError + raise AutodiffCompositionError(f"The 'operation' parameter of {function.componentName} is not supported " + f"by AutodiffComposition; use 'DOT_PRODUCT' or 'L0'.") + def _function(self, variable=None, context=None, - params=None, - ): + params=None): """ Arguments @@ -2154,7 +2202,7 @@ def _function(self, Returns --------- - dot product of variable and matrix : 1d array + dot product of or difference between variable and matrix : 1d array length of the array returned equals the number of columns of `matrix `. """ @@ -2246,7 +2294,7 @@ def _is_identity(self, context=None, defaults=False): -class CombineMeans(CombinationFunction): # ------------------------------------------------------------------------ +class CombineMeans(TransformFunction): # ------------------------------------------------------------------------ # FIX: CONFIRM THAT 1D KWEIGHTS USES EACH ELEMENT TO SCALE CORRESPONDING VECTOR IN VARIABLE # FIX CONFIRM THAT LINEAR TRANSFORMATION (OFFSET, SCALE) APPLY TO THE RESULTING ARRAY # FIX: CONFIRM RETURNS LIST IF GIVEN LIST, AND SIMLARLY FOR NP.ARRAY @@ -2425,7 +2473,7 @@ class CombineMeans(CombinationFunction): # ------------------------------------ REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE), } - class Parameters(CombinationFunction.Parameters): + class Parameters(TransformFunction.Parameters): """ Attributes ---------- @@ -2686,7 +2734,7 @@ def scale(self, val): GAMMA = 'gamma' -class PredictionErrorDeltaFunction(CombinationFunction): +class PredictionErrorDeltaFunction(TransformFunction): """ Calculate temporal difference prediction error. @@ -2704,7 +2752,7 @@ class PredictionErrorDeltaFunction(CombinationFunction): REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE), } - class Parameters(CombinationFunction.Parameters): + class Parameters(TransformFunction.Parameters): """ Attributes ---------- diff --git a/psyneulink/core/components/mechanisms/processing/objectivemechanism.py b/psyneulink/core/components/mechanisms/processing/objectivemechanism.py index a253c1df2c0..7fdff58ac58 100644 --- a/psyneulink/core/components/mechanisms/processing/objectivemechanism.py +++ b/psyneulink/core/components/mechanisms/processing/objectivemechanism.py @@ -180,7 +180,7 @@ ` and/or 'exponent ` attributes of the corresponding InputPorts, it can be configured to calculate differences, ratios, etc. (see `example ` below). The `function ` can also -be replaced with any `CombinationFunction `, or any python function that takes an 2d array as +be replaced with any `TransformFunction `, or any python function that takes an 2d array as its input (with a number of items in axis 0 equal to the number of the ObjectiveMechanism's InputPorts), and generates a 1d array as its result. If it implements :keyword:`weight` and/or :keyword:`exponent` attributes, those are assigned from `weight ` and `exponent ` attributes of its `input_ports @@ -426,7 +426,7 @@ class ObjectiveMechanism(ProcessingMechanism_Base): specifies the OutputPorts, the `values ` of which will be monitored, and evaluated by `function ` (see `ObjectiveMechanism_Monitor` for details of specification). - function : CombinationFunction, ObjectiveFunction, function or method : default LinearCombination + function : TransformFunction, ObjectiveFunction, function or method : default LinearCombination specifies the function used to evaluate the values listed in `monitor` ` (see `function ` for details). @@ -463,9 +463,9 @@ class ObjectiveMechanism(ProcessingMechanism_Base): contains the InputPorts of the ObjectiveMechanism, each of which receives a `MappingProjection` from the OutputPorts specified in its `monitor ` attribute. - function : CombinationFunction, ObjectiveFunction, function, or method + function : TransformFunction, ObjectiveFunction, function, or method the function used to evaluate the values monitored by the ObjectiveMechanism. The function can be any - `CombinationFunction ` or a Python function that takes a 2d array with an arbitrary + `TransformFunction ` or a Python function that takes a 2d array with an arbitrary number of items or a number equal to the number of items in the ObjectiveMechanism's variable (i.e., its number of input_ports) and returns a 1d array. diff --git a/psyneulink/core/components/ports/inputport.py b/psyneulink/core/components/ports/inputport.py index 251995cfcbc..ce123b7118c 100644 --- a/psyneulink/core/components/ports/inputport.py +++ b/psyneulink/core/components/ports/inputport.py @@ -581,7 +581,7 @@ from psyneulink.core.components.component import DefaultsFlexibility from psyneulink.core.components.functions.function import Function -from psyneulink.core.components.functions.nonstateful.transformfunctions import CombinationFunction, LinearCombination +from psyneulink.core.components.functions.nonstateful.transformfunctions import TransformFunction, LinearCombination from psyneulink.core.components.ports.outputport import OutputPort from psyneulink.core.components.ports.port import PortError, Port_Base, _instantiate_port_list, port_type_keywords from psyneulink.core.globals.context import ContextFlags, handle_external_context @@ -666,7 +666,7 @@ class InputPort(Port_Base): ` of the `Projections ` received by the InputPort. Any function can be assigned, however: a) it must produce a result that has the same format (number and type of elements) as the item of its owner Mechanism's `variable ` to which the InputPort has been - assigned; b) if it is not a CombinationFunction, it may produce unpredictable results if the InputPort + assigned; b) if it is not a TransformFunction, it may produce unpredictable results if the InputPort receives more than one Projection (see `function `. combine : SUM or PRODUCT : default None @@ -727,7 +727,7 @@ class InputPort(Port_Base): expected for any `path_afferent Projections `. function : Function - if it is a `CombinationFunction `, it combines the `values ` of + if it is a `TransformFunction `, it combines the `values ` of the `PathwayProjections ` (e.g., `MappingProjections `) received by the InputPort (listed in its `path_afferents ` attribute), under the possible influence of `GatingProjections ` received by the InputPort (listed in its `mod_afferents @@ -738,7 +738,7 @@ class InputPort(Port_Base): `. If the InputPort receives only one Projection, then any other function can be applied and it will generate a value that is the same length as the Projection's `value `. However, if the InputPort receives more than one Projection and - uses a function other than a CombinationFunction, a warning is generated and only the `value + uses a function other than a TransformFunction, a warning is generated and only the `value ` of the first Projection listed in `path_afferents ` is used by the function, which may generate unexpected results when executing the Mechanism or Composition to which it belongs. @@ -1032,7 +1032,7 @@ def _instantiate_function(self, function, function_params=None, context=None): del self.combine_function_args super()._instantiate_function(function=function, context=context) self._use_1d_variable = False - if not isinstance(self.function, CombinationFunction): + if not isinstance(self.function, TransformFunction): self._use_1d_variable = True self.function._variable_shape_flexibility = DefaultsFlexibility.RIGID else: diff --git a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py index 7f60c82a093..167fe9bf16c 100644 --- a/psyneulink/core/components/ports/modulatorysignals/controlsignal.py +++ b/psyneulink/core/components/ports/modulatorysignals/controlsignal.py @@ -900,13 +900,13 @@ def _validate_params(self, request_set, target_set=None, context=None): # cost_function = cost_function() # # # cost_function is Function object: - # # COMBINE_COSTS_FUNCTION must be CombinationFunction + # # COMBINE_COSTS_FUNCTION must be TransformFunction # # DURATION_COST_FUNCTION must be an IntegratorFunction # # others must be TransferFunction # if isinstance(cost_function, Function): # if cost_function_name == COMBINE_COSTS_FUNCTION: - # if not isinstance(cost_function, CombinationFunction): - # raise ControlSignalError("Assignment of Function to {} ({}) must be a CombinationFunction". + # if not isinstance(cost_function, TransformFunction): + # raise ControlSignalError("Assignment of Function to {} ({}) must be a TransformFunction". # format(COMBINE_COSTS_FUNCTION, cost_function)) # elif cost_function_name == DURATION_COST_FUNCTION: # if not isinstance(cost_function, IntegratorFunction): diff --git a/psyneulink/core/components/ports/port.py b/psyneulink/core/components/ports/port.py index c9040259df7..3388fb3417b 100644 --- a/psyneulink/core/components/ports/port.py +++ b/psyneulink/core/components/ports/port.py @@ -783,7 +783,7 @@ def test_multiple_modulatory_projections_with_mech_and_port_Name_specs(self): from psyneulink.core.components.component import ComponentError, DefaultsFlexibility, component_keywords from psyneulink.core.components.functions.function import \ Function, get_param_value_for_keyword, is_function_type, RandomMatrix -from psyneulink.core.components.functions.nonstateful.transformfunctions import CombinationFunction, LinearCombination +from psyneulink.core.components.functions.nonstateful.transformfunctions import TransformFunction, LinearCombination from psyneulink.core.components.functions.nonstateful.transferfunctions import Linear from psyneulink.core.components.shellclasses import Mechanism, Projection, Port from psyneulink.core.globals.context import ContextFlags, handle_external_context @@ -1466,12 +1466,12 @@ def _instantiate_projections_to_port(self, projections, context=None): 'Projections, but does not use a {}; unexpected results may occur when the {} ' 'or {} to which it belongs is executed.'. format(Projection.__name__, projection.sender.owner.name, self.__class__.__name__, - self.owner.name, self.name, CombinationFunction.__name__, Mechanism.__name__, + self.owner.name, self.name, TransformFunction.__name__, Mechanism.__name__, Composition.__name__)) # f'A {Projection.__name__} from {projection.sender.owner.name} is being added ' \ # f'to an {self.__class__.__name__} of {self.owner.name} ({self.name}) ' \ # f'that already receives other Projections, ' \ - # f'but does not use a {CombinationFunction.__name__}; ' \ + # f'but does not use a {TransformFunction.__name__}; ' \ # f'unexpected results may occur when the {Mechanism.__name__} ' \ # f'or {Composition.__name__} to which it belongs is executed.') diff --git a/psyneulink/core/globals/keywords.py b/psyneulink/core/globals/keywords.py index 4b3a664c974..415ed2d7c2e 100644 --- a/psyneulink/core/globals/keywords.py +++ b/psyneulink/core/globals/keywords.py @@ -78,10 +78,11 @@ 'LEARNING_MECHANISMS', 'LEARNING_PATHWAY', 'LEARNING_PROJECTION', 'LEARNING_PROJECTION_PARAMS', 'LEARNING_RATE', 'LEARNING_SCALE', 'LEARNING_SCALE_LITERALS', 'LEARNING_SCALE_NAMES', 'LEARNING_SIGNAL', 'LEARNING_SIGNAL_SPECS', 'LEARNING_SIGNALS', 'LESS_THAN', 'LESS_THAN_OR_EQUAL', 'LINEAR', 'LINEAR_COMBINATION_FUNCTION', 'LINEAR_FUNCTION', - 'LINEAR_TRANSFORM_FUNCTION', 'LOG_ENTRIES', 'LOGISTIC_FUNCTION', 'Loss', 'LOSSES', 'LOW', 'LVOC_CONTROL_MECHANISM', + 'LOG_ENTRIES', 'LOGISTIC_FUNCTION', 'Loss', 'LOSSES', 'LOW', 'LVOC_CONTROL_MECHANISM', 'MAPPING_PROJECTION', 'MAPPING_PROJECTION_PARAMS', 'MASKED_MAPPING_PROJECTION', - 'MATRIX', 'MATRIX_KEYWORD_NAMES', 'MATRIX_KEYWORD_SET', 'MATRIX_KEYWORD_VALUES', 'MATRIX_KEYWORDS','MatrixKeywords', - 'MATRIX_WEIGHTS', 'MAX_ABS_DIFF', 'MAX_ABS_INDICATOR', 'MAX_ONE_HOT', 'MAX_ABS_ONE_HOT', 'MAX_ABS_VAL', + 'MATRIX', 'MATRIX_KEYWORD_NAMES', 'MATRIX_KEYWORD_SET', 'MATRIX_KEYWORD_VALUES', 'MATRIX_KEYWORDS', + 'MatrixKeywords', 'MATRIX_TRANSFORM_FUNCTION', 'MATRIX_WEIGHTS', + 'MAX_ABS_DIFF', 'MAX_ABS_INDICATOR', 'MAX_ONE_HOT', 'MAX_ABS_ONE_HOT', 'MAX_ABS_VAL', 'MAX_VS_NEXT', 'MAX_VS_AVG', 'MAX_EXECUTIONS_BEFORE_FINISHED', 'MAX_INDICATOR', 'MAX_VAL', 'MAYBE', 'MEAN', 'MECHANISM', 'MECHANISM_COMPONENT_CATEGORY', 'MECHANISM_DEFAULT', 'MECHANISM_DEFAULT_INPUT_VALUE', 'MECHANISM_DEFAULTParams', 'MECHANISM_EXECUTED_LOG_ENTRY', 'MECHANISM_NAME', 'MECHANISM_PARAM_VALUE', @@ -108,9 +109,10 @@ 'PARAMETERS', 'PARAMS', 'PARAMS_DICT', 'PATH_AFFERENTS', 'PATHWAY', 'PATHWAY_PROJECTION', 'PEARSON', 'PORT', 'PORT_COMPONENT_CATEGORY', 'PORT_CONTEXT', 'Port_Name', 'port_params', 'PORT_PREFS', 'PORT_TYPE', 'port_value', 'PORTS', - 'PREDICTION_MECHANISM', 'PREDICTION_MECHANISMS', 'PREDICTION_MECHANISM_OUTPUT', 'PREDICTION_MECHANISM_PARAMS', - 'PREDICTION_MECHANISM_TYPE', 'PREFS_ARG', 'PREF_BASE_VALUE', 'PREF_CURRENT_VALUE', 'PREFERENCE_SET', - 'PREFERENCE_SET_NAME', 'PREF_LEVEL', 'PREFS', 'PREFS_OWNER', 'PREVIOUS_VALUE', 'PRIMARY', + 'PREDICTION_ERROR_DELTA_FUNCTION', 'PREDICTION_MECHANISM', 'PREDICTION_MECHANISMS', + 'PREDICTION_MECHANISM_OUTPUT', 'PREDICTION_MECHANISM_PARAMS', 'PREDICTION_MECHANISM_TYPE', + 'PREFS_ARG', 'PREF_BASE_VALUE', 'PREF_CURRENT_VALUE', + 'PREFERENCE_SET', 'PREFERENCE_SET_NAME', 'PREF_LEVEL', 'PREFS', 'PREFS_OWNER', 'PREVIOUS_VALUE', 'PRIMARY', 'PROB', 'PROB_INDICATOR', 'PROBABILISTIC', 'PROCESS', 'PROCESS_COMPONENT_CATEGORY', 'PROCESS_DEFAULT_MECHANISM', 'PROCESS_DEFAULT_PROJECTION_FUNCTION', 'PROCESS_EXECUTE', 'PROCESS_INIT', 'PROCESSES', 'PROCESSES_DIM', 'PROCESSING', 'PROCESSING_MECHANISM', @@ -750,12 +752,12 @@ class Loss(Enum): USER_DEFINED_FUNCTION = "USER DEFINED FUNCTION" # Transformfunctions: -REDUCE_FUNCTION = "Reduce Function" CONCATENATE_FUNCTION = "Concatenate Function" REARRANGE_FUNCTION = 'Rearrange Function' +REDUCE_FUNCTION = "Reduce Function" LINEAR_COMBINATION_FUNCTION = "LinearCombination Function" -LINEAR_TRANSFORM_FUNCTION = "MatrixTransform Function" COMBINE_MEANS_FUNCTION = "CombineMeans Function" +MATRIX_TRANSFORM_FUNCTION = "MatrixTransform Function" # TransferFunctions: IDENTITY_FUNCTION = 'Identity Function' diff --git a/psyneulink/library/components/mechanisms/processing/integrator/collapsingboundmechanism.py b/psyneulink/library/components/mechanisms/processing/integrator/collapsingboundmechanism.py new file mode 100644 index 00000000000..e87174cd023 --- /dev/null +++ b/psyneulink/library/components/mechanisms/processing/integrator/collapsingboundmechanism.py @@ -0,0 +1,300 @@ +# Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy of the License at: +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and limitations under the License. + + +# ************************************** IntegratorMechanism ************************************************* + +""" + +Contents +-------- + + * `IntegratorMechanism_Overview` + * `IntegratorMechanism_Creation` + * `IntegratorMechanism_Structure` + * `IntegratorMechanism_Execution` + * `IntegratorMechanism_Class_Reference` + + +.. _IntegratorMechanism_Overview: + +Overview +-------- + +An IntegratorMechanism integrates its input, possibly based on its prior values. The input can be a single +scalar value or an array of scalars (list or 1d np.array). If it is a list or array, then each value is +independently integrated. The default function (`IntegratorFunction`) can be parametrized to implement either a simple +increment rate, additive accumulator, or an (exponentially weighted) time-averaging of its input. It can also be +assigned a custom function. + +.. _IntegratorMechanism_Creation: + +Creating an IntegratorMechanism +------------------------------- + +An IntegratorMechanism can be created directly by calling its constructor, or using the `mechanism` command and +specifying *INTEGRATOR_MECHANISM* as its **mech_spec** argument. Its function is specified in the **function** +argument, which can be parametrized by calling its constructor with parameter values:: + + >>> import psyneulink as pnl + >>> my_time_averaging_mechanism = pnl.IntegratorMechanism(function=pnl.AdaptiveIntegrator(rate=0.5)) + +The **default_variable** argument specifies the format of its input (i.e., whether it is a single scalar or an +array), as well as the value to use if none is provided when Mechanism is executed. Alternatively, the **input_shapes** +argument can be used to specify the length of the array, in which case it will be initialized with all zeros. + +.. _IntegratorMechanism_Structure: + +Structure +--------- + +An IntegratorMechanism has a single `InputPort`, the `value ` of which is +used as the `variable ` for its `function `. +The default for `function ` is `AdaptiveIntegrator(rate=0.5)`. However, +a custom function can also be specified, so long as it takes a numeric value, or a list or np.ndarray of numeric +values as its input, and returns a value of the same type and format. The Mechanism has a single `OutputPort`, +the `value ` of which is assigned the result of the call to the Mechanism's +`function `. + +.. _IntegratorMechanism_Execution: + +Execution +--------- + +When an IntegratorMechanism is executed, it carries out the specified integration, and assigns the result to the +`value ` of its `primary OutputPort `. For the default function +(`IntegratorFunction`), if the value specified for **default_variable** is a list or array, or **input_shapes** is greater +than 1, each element of the array is independently integrated. If its `rate ` parameter is a +single value, that rate is used for integrating each element. If the `rate ` parameter is a +list or array, then each element is used as the rate for the corresponding element of the input (in this case, `rate +` must be the same length as the value specified for **default_variable** or **input_shapes**). +Integration can be reset to the value of its `function `\\s `initializer by setting +its `reset ` parameter to a non-zero value, as described below. + +.. _IntegratorMechanism_Reset: + +*Resetting the IntegratorMechanism* +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +An IntegatorMechanism has a `modulable ` `reset ` parameter +that can be used to reset its value to the value of its `function `\\s `initializer +`. This also clears the `value ` `history `, +thus effectively setting the `previous_value ` of its `function +` to None. + +The `reset ` parameter can be used to reset the IntegratorMechanism under the control of a +`ControlMechanism`. This simplest way to do this is to specify the `reset ` parameter of +the IntgeratorMechanism in the **control** argument of the ControlMechanism's constructor, and to specify *OVERRIDE* +in its **modulation** argument, as in the following example:: + + >>> my_integrator = IntegratorMechanism() + >>> ctl_mech = pnl.ControlMechanism(modulation=pnl.OVERRIDE, control=(pnl.RESET, my_integrator)) + +In this case, any non-zero value of the ControlMechanism's `ControlSignal` will reset the IntegratorMechanism. +*OVERRIDE* must be used as its `modulation ` parameter (instead of its default value +of *MULTIPLICATIVE*), so that the value of the ControlMechanism's `ControlSignal` is assigned directly to the +IntegratorMechanism's `reset ` parameter (otherwise, since the default of the `reset +` parameter is 0, the ControlSignal's value has no effect). An alternative is to specify +the **reset_default** agument in the IntegratorMechanism constructor with a non-zero value, and while allowing the +ControlMechanism to use its default value for `modulation ` (i.e., *MULTIPLICATIVE*):: + + >>> my_integrator = IntegratorMechanism(reset_default=1) + >>> ctl_mech = pnl.ControlMechanism(control=(pnl.RESET, my_integrator)) + +In this case, a ControlSignal with a zero value suppresses a reset by multiplying the `reset +` parameter by 0, whereas a ControlSignal with a non-zero value multiples the `reset +` parameter's non-zero default value, resulting in a non-zero value that elicits a reset. + +.. _IntegratorMechanism_Class_Reference: + +Class Reference +--------------- + +""" +from collections.abc import Iterable + +from beartype import beartype + +from psyneulink._typing import Optional, Union +import numpy as np + +from psyneulink.core.components.functions.function import Function +from psyneulink.core.components.functions.stateful.integratorfunctions import AdaptiveIntegrator +from psyneulink.core.components.mechanisms.processing.processingmechanism import ProcessingMechanism_Base +from psyneulink.core.components.mechanisms.mechanism import Mechanism, MechanismError +from psyneulink.core.globals.keywords import \ + DEFAULT_VARIABLE, INTEGRATOR_MECHANISM, VARIABLE, PREFERENCE_SET_NAME, RESET +from psyneulink.core.globals.parameters import Parameter, check_user_specified +from psyneulink.core.globals.preferences.basepreferenceset import ValidPrefSet, REPORT_OUTPUT_PREF +from psyneulink.core.globals.preferences.preferenceset import PreferenceEntry, PreferenceLevel + +__all__ = [ + 'DEFAULT_RATE', 'IntegratorMechanism', 'IntegratorMechanismError' +] + +# IntegratorMechanism parameter keywords: +DEFAULT_RATE = 0.5 + +class IntegratorMechanismError(MechanismError): + pass + + +class IntegratorMechanism(ProcessingMechanism_Base): + """ + IntegratorMechanism( \ + function=AdaptiveIntegrator(rate=0.5)) + + Subclass of `ProcessingMechanism ` that integrates its input. + See `Mechanism ` for additional arguments and attributes. + + Arguments + --------- + + function : IntegratorFunction : default IntegratorFunction + specifies the function used to integrate the input. Must take a single numeric value, or a list or np.array + of values, and return one of the same form. + + reset_default : number, list or np.ndarray : default 0 + specifies the default value used for the `reset ` parameter. + + Attributes + ---------- + + reset : int, float or 1d array of length 1 : default 0 + if non-zero, the IntegratorMechanism's `reset ` method is called, which resets the + `value ` of the IntegratorMechanism to its initial value (see + `IntegratorMechanism_Reset` for additional details). + + """ + + componentType = INTEGRATOR_MECHANISM + + classPreferenceLevel = PreferenceLevel.TYPE + # These will override those specified in TYPE_DEFAULT_PREFERENCES + classPreferences = { + PREFERENCE_SET_NAME: 'IntegratorMechanismCustomClassPreferences', + REPORT_OUTPUT_PREF: PreferenceEntry(False, PreferenceLevel.INSTANCE)} + + class Parameters(ProcessingMechanism_Base.Parameters): + """ + Attributes + ---------- + + function + see `function ` + + :default value: `AdaptiveIntegrator`(initializer=numpy.array([0]), rate=0.5) + :type: `Function` + + reset + see `reset ` + + :default value: None + :type: 'list or np.ndarray' + """ + function = Parameter(AdaptiveIntegrator(rate=0.5), stateful=False, loggable=False) + reset = Parameter([0], modulable=True, constructor_argument='reset_default') + + # + @check_user_specified + @beartype + def __init__(self, + default_variable=None, + input_shapes=None, + input_ports:Optional[Union[list, dict]]=None, + function=None, + reset_default=0, + output_ports:Optional[Union[str, Iterable]]=None, + params=None, + name=None, + prefs: Optional[ValidPrefSet] = None, + **kwargs): + """Assign type-level preferences, default input value (SigmoidLayer_DEFAULT_BIAS) and call super.__init__ + """ + + super(IntegratorMechanism, self).__init__(default_variable=default_variable, + input_shapes=input_shapes, + function=function, + reset_default=reset_default, + params=params, + name=name, + prefs=prefs, + input_ports=input_ports, + output_ports=output_ports, + **kwargs) + + # IMPLEMENT: INITIALIZE LOG ENTRIES, NOW THAT ALL PARTS OF THE MECHANISM HAVE BEEN INSTANTIATED + + # def _parse_function_variable(self, variable, context=None, context=None): + # super()._parse_function_variable(variable, context, context) + + def _handle_default_variable(self, default_variable=None, input_shapes=None, input_ports=None, function=None, params=None): + """If any parameters with len>1 have been specified for the Mechanism's function, and Mechanism's + default_variable has not been specified, reshape Mechanism's variable to match function's, + but make sure function's has the same outer dimensionality as the Mechanism's + """ + + # Get variable for Mechanism + user_specified = False + if default_variable is not None: + variable = np.atleast_1d(default_variable) + user_specified = True + else: + variable = self.parameters.variable.default_value + user_specified = self.parameters.variable._user_specified + + # Only bother if an instantiated function was specified for the Mechanism + if isinstance(function, Function): + function_variable = function.parameters.variable.default_value + function_variable_len = function_variable.shape[-1] + variable_len = variable.shape[-1] + + # Raise error if: + # - the length of both Mechanism and function variable are greater than 1 and they don't match, or + # - the Mechanism's variable length is 1 and the function's is > 1 (in which case would like to assign + # shape of function's variable to that of Mechanism) but Mechanism's variable is user-specified. + if ((variable_len>1 and function_variable_len>1 and variable_len!=function_variable_len) or + (function_variable_len>1 and variable_len==1 and user_specified)): + raise IntegratorMechanismError(f"Shape of {repr(VARIABLE)} for function specified for {self.name} " + f"({function.name}: {function.variable.shape}) does not match " + f"the shape of the {repr(DEFAULT_VARIABLE)} specified for the " + f"{repr(Mechanism.__name__)}.") + + # If length of Mechanism's variable is 1 but the function's is longer, + # reshape Mechanism's inner dimension to match function + elif variable_len==1 and function_variable_len>1: + variable_shape = list(variable.shape) + variable_shape[-1] = function_variable.shape[-1] + # self.parameters.variable.default_value = np.zeros(tuple(variable_shape)) + variable = np.zeros(tuple(variable_shape)) + else: + variable = default_variable + else: + variable = default_variable + + # IMPLEMENTATON NOTE: + # Don't worry about case in which length of function's variable is 1 and Mechanism's is > 1 + # as the reshaping of the function's variable will be taken care of in _instantiate_function + + return super()._handle_default_variable(default_variable=variable, + input_shapes=input_shapes, + input_ports=input_ports, + function=function, + params=params) + + def _execute(self, variable=None, context=None, runtime_params=None, **kwargs): + """Override to check for call to reset by ControlSignal""" + # IMPLEMENTATION NOTE: + # This could be augmented to use reset parameter value as argument to reset() + # if it is the same shape an an initializer for the Mechanism + value = super()._execute(variable=variable, context=context, runtime_params=runtime_params, **kwargs) + # No need to reset during initialization (which will occur if **reset_default** != 0) + if not self.is_initializing: + if np.array(self._get_current_parameter_value(RESET,context)).squeeze(): + self.reset(context=context) + value = self.parameters.value._get(context).reshape(value.shape) + return value diff --git a/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py b/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py index 8583222f154..b385a5488bc 100644 --- a/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py +++ b/psyneulink/library/components/mechanisms/processing/objective/comparatormechanism.py @@ -219,8 +219,8 @@ class ComparatorMechanism(ObjectiveMechanism): `MappingProjection` from the OutputPorts referenced by the `sample` and `target` attributes (see `ComparatorMechanism_Structure` for additional details). - function : CombinationFunction, function or method - used to compare the `sample` with the `target`. It can be any `CombinationFunction `, + function : TransformFunction, function or method + used to compare the `sample` with the `target`. It can be any `TransformFunction `, or a python function that takes a 2d array with two items and returns a 1d array of the same length as the two input items. diff --git a/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py b/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py index 88b82ca8e03..43ec7bc5edf 100644 --- a/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py +++ b/psyneulink/library/components/mechanisms/processing/objective/predictionerrormechanism.py @@ -217,7 +217,7 @@ class PredictionErrorMechanism(ComparatorMechanism): target : OutputPort, Mechanism_Base, dict, number, or str specifies the *TARGET* InputPort used by the function to evaluate `sample`. - function : CombinationFunction, ObjectiveFunction, function, or method : default PredictionErrorDeltaFunction + function : TransformFunction, ObjectiveFunction, function, or method : default PredictionErrorDeltaFunction the function used to evaluate the SAMPLE and TARGET inputs. learning_rate : Number : default 0.3 @@ -234,7 +234,7 @@ class PredictionErrorMechanism(ComparatorMechanism): the *TARGET* `InputPort`, the `value ` of which will be used to evaluate `sample `. - function : CombinationFunction, ObjectiveFunction, Function, or method : default PredictionErrorDeltaFunction + function : TransformFunction, ObjectiveFunction, Function, or method : default PredictionErrorDeltaFunction the function used to evaluate the sample and target inputs. output_ports : str, Iterable : default OUTCOME diff --git a/psyneulink/library/compositions/pytorchwrappers.py b/psyneulink/library/compositions/pytorchwrappers.py index b9ecbeed8b4..5fc9f41dbb2 100644 --- a/psyneulink/library/compositions/pytorchwrappers.py +++ b/psyneulink/library/compositions/pytorchwrappers.py @@ -860,11 +860,11 @@ class PytorchMechanismWrapper(): most recent input to the PytorchMechanismWrapper. function : _gen_pytorch_fct - Pytorch version of the Mechanism's function assigned in __init__. + Pytorch version of the Mechanism's function assigned in its __init__. integrator_function : _gen_pytorch_fct - Pytorch version of the Mechanism's integrator_function assigned in __init__ if mechanism - has an integrator_function; this assumes the mechanism also has an integrator_mode attribute + Pytorch version of the Mechanism's integrator_function assigned in its __init__ if Mechanism + has an integrator_function; this assumes the Mechanism also has an integrator_mode attribute that is used to determine whether to execute the integrator_function first, and use its result as the input to its function. @@ -987,7 +987,7 @@ def execute_function(function, variable, fct_has_mult_args=False, is_combination or (isinstance(variable, torch.Tensor) and len(variable.squeeze(0).shape) == 1) or isinstance(self._mechanism.function, LinearCombination)): # Enforce 2d on value of MechanismWrapper (using unsqueeze) for single InputPort - # or if CombinationFunction (which reduces output to single item from multi-item input) + # or if TransformFunction (which reduces output to single item from multi-item input) if isinstance(variable, torch.Tensor): variable = variable.squeeze(0) return function(variable).unsqueeze(0) @@ -1015,9 +1015,9 @@ def execute_function(function, variable, fct_has_mult_args=False, is_combination self.input = variable # Compute main function of mechanism and return result - from psyneulink.core.components.functions.nonstateful.transformfunctions import CombinationFunction + from psyneulink.core.components.functions.nonstateful.transformfunctions import TransformFunction self.output = execute_function(self.function, variable, - is_combination_fct=isinstance(self._mechanism.function, CombinationFunction)) + is_combination_fct=isinstance(self._mechanism.function, TransformFunction)) return self.output def _gen_llvm_execute(self, ctx, builder, state, params, mech_input, data): @@ -1121,6 +1121,9 @@ class PytorchProjectionWrapper(): receiver : PytorchMechanismWrapper the PytorchMechanismWrapper node from which the PytorchProjectionWrapper sends it value. + function : _gen_pytorch_fct + Pytorch version of the Projection's function assigned in its __init__. + """ def __init__(self, @@ -1169,8 +1172,12 @@ def __init__(self, if projection.learnable is False: self.matrix.requires_grad = False + self.function = projection.function._gen_pytorch_fct(device, context) + + def execute(self, variable): - return torch.matmul(variable, self.matrix) + # return torch.matmul(variable, self.matrix) + return self.function(variable, self.matrix) def log_matrix(self): if self._projection.parameters.matrix.log_condition != LogCondition.OFF: From f20015d5cee3f0461828f843b23a73cd948a35f8 Mon Sep 17 00:00:00 2001 From: jdcpni Date: Fri, 8 Nov 2024 17:15:18 -0500 Subject: [PATCH 406/410] Refactor/ego move figs (#3106) * EGO model: reorganize directories for figs, rename models --- ...pdf => EMComposition (generic example).pdf} | Bin .../{ => CSW}/DeclanParams.py | 0 .../EGO CSW Model (using RNN).py} | 4 +--- .../EGO CSW Model.py} | 0 .../{ => CSW}/Environment.py | 0 .../Figures/EGO CSW Model - PNL (basic).pdf | Bin 0 -> 31880 bytes .../EGO CSW Model - PNL (learning BIG).pdf} | Bin .../Figures/EGO CSW Model - PNL (learning).pdf | Bin 0 -> 33803 bytes .../CSW/Figures/EGO CSW Model - PyTorch.pdf | Bin 0 -> 33427 bytes .../CSW/Figures/EGO Paper Figure.jpg | Bin 0 -> 490620 bytes .../CSW/Figures/EMComposition only.pdf | Bin 0 -> 25456 bytes .../{ => CSW}/ScriptControl.py | 0 .../{ => CSW}/TestParams.py | 0 .../EGO/Using EMComposition/CSW/__init__.py | 0 .../EGO Model - Revaluation.py | 0 .../Figures}/EGO Model - MDP (simple).pdf | Bin .../EGO Revaluation Model - PNL version.pdf | Bin 0 -> 32813 bytes .../Figures}/EGO-Model using EMComposition.pdf | Bin ...O-Model with EMComposition and Learning.pdf | Bin .../Revaluation/Figures}/FIGURES.pdf | Bin .../Revaluation/__init__.py | 0 .../PDFS/EGO Model - MDP (detailed).pdf | Bin 39994 -> 0 bytes .../EM Composition (feedforward EM) figure.pdf | Bin 27934 -> 0 bytes .../EGO/show_graph OUTPUT/PDFS/FIGURES.pdf | Bin 417564 -> 0 bytes 24 files changed, 1 insertion(+), 3 deletions(-) rename Scripts/Models (Under Development)/EGO/{show_graph OUTPUT/PDFS/EM_composition BIG.pdf => EMComposition (generic example).pdf} (100%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{ => CSW}/DeclanParams.py (100%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{EGO Model - CSW with RNN.py => CSW/EGO CSW Model (using RNN).py} (99%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{EGO Model - CSW with Simple Integrator.py => CSW/EGO CSW Model.py} (100%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{ => CSW}/Environment.py (100%) create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PNL (basic).pdf rename Scripts/Models (Under Development)/EGO/{show_graph OUTPUT/PDFS/EM_composition BIG - LEARNING.pdf => Using EMComposition/CSW/Figures/EGO CSW Model - PNL (learning BIG).pdf} (100%) create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PNL (learning).pdf create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PyTorch.pdf create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO Paper Figure.jpg create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EMComposition only.pdf rename Scripts/Models (Under Development)/EGO/Using EMComposition/{ => CSW}/ScriptControl.py (100%) rename Scripts/Models (Under Development)/EGO/Using EMComposition/{ => CSW}/TestParams.py (100%) create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/__init__.py rename Scripts/Models (Under Development)/EGO/Using EMComposition/{ => Revaluation}/EGO Model - Revaluation.py (100%) rename Scripts/Models (Under Development)/EGO/{show_graph OUTPUT/PDFS => Using EMComposition/Revaluation/Figures}/EGO Model - MDP (simple).pdf (100%) create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/Revaluation/Figures/EGO Revaluation Model - PNL version.pdf rename Scripts/Models (Under Development)/EGO/{show_graph OUTPUT/PDFS => Using EMComposition/Revaluation/Figures}/EGO-Model using EMComposition.pdf (100%) rename Scripts/Models (Under Development)/EGO/{show_graph OUTPUT/PDFS => Using EMComposition/Revaluation/Figures}/EGO-Model with EMComposition and Learning.pdf (100%) rename Scripts/Models (Under Development)/EGO/{ => Using EMComposition/Revaluation/Figures}/FIGURES.pdf (100%) create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/Revaluation/__init__.py delete mode 100644 Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/EGO Model - MDP (detailed).pdf delete mode 100644 Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/EM Composition (feedforward EM) figure.pdf delete mode 100644 Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/FIGURES.pdf diff --git a/Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/EM_composition BIG.pdf b/Scripts/Models (Under Development)/EGO/EMComposition (generic example).pdf similarity index 100% rename from Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/EM_composition BIG.pdf rename to Scripts/Models (Under Development)/EGO/EMComposition (generic example).pdf diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/DeclanParams.py similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/DeclanParams.py rename to Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/DeclanParams.py diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with RNN.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/EGO CSW Model (using RNN).py similarity index 99% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with RNN.py rename to Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/EGO CSW Model (using RNN).py index c0ed1f5e408..bb3b97cac4d 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with RNN.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/EGO CSW Model (using RNN).py @@ -135,12 +135,10 @@ """ import numpy as np -import graph_scheduler as gs from enum import IntEnum from psyneulink import * -from psyneulink._typing import Union, Literal -from psyneulink.core.scheduling.condition import Any, And, AllHaveRun, AtRunStart +from psyneulink._typing import Union # Settings for running script: diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/EGO CSW Model.py similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/EGO Model - CSW with Simple Integrator.py rename to Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/EGO CSW Model.py diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/Environment.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Environment.py similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/Environment.py rename to Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Environment.py diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PNL (basic).pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PNL (basic).pdf new file mode 100644 index 0000000000000000000000000000000000000000..8749d22ff57c602b7e3349261caf8fee63103afa GIT binary patch literal 31880 zcmbrlb95z7*TANSN*-KW02 zySr*vcdcD#eKx6rh!`y+9V--R!+GT^6f*$>fvuqh6b}ypy$ry{)X9v1<+r2+MLMnvz$s%fKFebF~EzufPO zx*Sav=l!ndED&!BRy!y?tp0_dFOQy+Fp?u}Nf*uu7=ATax^S#}sWHqdAFL^HpYJ!j zPXqi1*F(PWbkU5fzs3n8V{&75>G>lsgWdOhQ=Bhv@QWwasD-V^2(V}vb)Fc zR|9S0x5P3S@|VHf-2iMO>w-`}FndzAL?&!@$t0CunIhNdDwk8PmaOf_6{WqS1BfrX}nID(iv?E2PJN^^HOmI zSB@8Bj^&-|rtrI?rMLSJ6&Kh*2j5}IyVYx5TWppdU^g@IvZB4G$`a%k?e3Q$)*NPe zXMZV;Qdvgv!J@_0lpOmeWbhHNi&F&RX`hlXbaT0V3wg_!?c3akaz;haY~TlpNc@RY zIS&`$YDeVu!*IC(Dw5rXNrOd9&37uVkJ2bb)cZ$iR2Bg=p_{iiSok3W!KJm0 zcI5Z{m~p0v1&{lgue}#->@lAYFHa9f=2FP}ELt9HpaGAV{ee7vlSol~=&+qJ0hlJt z$LZxolL0B;S_6Kl`I3ev-_jF|GxL-zK^Y`d;ZZX1<$gzvs6r5X3LT*eip*y{1nuQg zkZ+epjj4n>G8Xi!?g~b5!7d$L>9vN)gM3B!&>OkcZSL~%_PRe8-;Ta2!60dn7!+0# zVL*?u8?8$hlySKkCITzy>lHT|MY2H)U9L{FRE*LVff=u)O^4q|n?Ed$ zHjXD-rS5YIPQj;>gmcEaO%8aQfK{aFJN1uq~2k<&jS* zm>R?)nJJ*eS2vz`j>E_?QA%Hl86(e2b8E|8bpvw^H1BZ+o3VVS#-u4KVU#i&{8mFv z>`b+G4q?v@W8~#Ky@tm>-`-+1e0hC5J@>^&5=4z0x2QBPks-bZcxo9&M@=OdX-4X& z>(gR(Q6l`*fS4WP4XrEhJ`Ox1EF-Q#c=}0k!SKB|{JU_mESgsUoKuAhhPq}%$%k^# z0kj|D!JKD4SlDB1e_HWIpTmSxA(U_e>KWaS6@_BF^}w={$9mUcv$}%0Ov4)Ms-i2; zN=HwR=aX;}@o6@7a20`-Uu}ea{?@3`K#BfRD7Awb19&i8ncwLInqnYO@W3Fk{8T+g zo*haNJHQ|*X6*)_(6ZokvDqScGSidgCL#Gv9fX(lb3&#>4vhmf#HHR7@gP_+d>~5< zm-4}Kvl>kE?&E_1=q%CiUr3>$_&uBUzH=$S`^2FP6-xCtXYVZO?xx6~MNr*QL%Ys+ z7>k_gKU12&JA$~u@v!ngf0grp{T`GM*xD18Gh@&xSEw!nY znO>5NNdpsCRE^-cwq)*$##MW~MZ9?_<*9$z>t&Us1XG0QpBdn=IR9+0dWbc1gNJjy z-RqiBPTim^nkHURJjwL;e#&MlF0uC{eM$9Z!rmIPtZOEY{t!QS{1qK}SW@FN4P>KO z2Ui_^2Y);soTO`|6tgPt3o1PTXh!AlOidz^?6fS~Fy5+<=)a_^d|*(MQ(8{D)`CP_ z7KHeG8{cIc%|6x1gPX-?Tss$Ec9wbcf#XI-4`Y7xcRbejbR(~dI zR|q_II&{&r`mN{fgMk2jbds1RHLqAt!CV*2_E=Qu3nu9IRtV7D0?L|$0;~Y8>|b?= zrh(sGZ_$1b=2MErXcSM1R-UH5G2O@BMywrN!S$eJKA}v5O3t^=aKk{-7FE*`y33yE zq|LXH&Txtq#q;Q%Ps$a1-++CyB6N4ocBmDldIw7{C2>W%5V~2h4BVqdEBQTTX4p-c zXYIXbZu_A2Ia{)3L>LF;JGzT08geC@#$KO0O?qZ29QHRV*|B=xqM1FHY0qnqi`NrD zw_!38-c?9buzF05%EO0#eToE0W?@Q z6#k~6)w~*QU}_fIcx8>Ud(szl5GjTzN+pH(oF1+u!Z#b#NSRBxKg(HDp`w%*N>=qj z)#Jj5|BiA*F)UJ1u2Tw_nO*UCf~vbXqWdYOx(zxEV^60bZiV^BwHlWzpTq8ys7MQ^KtyW@?mp5sCW*N%(&&A) zj-JtvbDdZX82EN$N#Gq21=|Iv;orQeqOif)>ZMt@Vz}a4Sc|Q{Zbn{piyJ`nnNZ_z zn`=7?$eUH4#AEkT_D_e_xiFWTFZ>Fh6nuLN$eK3B4~NTYhyqbv5d z^NhOdSr4A!qoPH-K*67`sa}Nq(^Dney@ z8EdU>=%NdKffJf!UwmjnR~1^1ls#zWd9O5zVMKU@o?+ zM+af*b}85@4r%X|!2u_QVeI)VjI{%;3|H>SeM}*fVS-ibUIqy19^(dAg?xTNZjNe% z0@xV;I~M=_>rWK_Cv^X_{~f$Dak8-fG5Awu`cIJmH>?+Ra}rl}`W?~}@bLT*et+l< z7=Bj>=!FFd7zyZ&41RBa2l~Gc{A)`uW^3d0->gk&8E6>@SpQil{JDS$E#q&A?SD#4 zza{qnDKY<+IR24-U;X#@->u~Qt^TtVQE|5e5YQ_am;(N`Si!&n@Vfzw|C~oJ3ote} z5VCb6(E2Sg5HK<_u@kT}auDeJH5dNf+TZq$1bqg{kIjr<-e_WtU zK(FFpVB=`_=VnIk|HzUAP7cn0oAIZc!oRIV050Z603~suKPLZnkP^Vr*4e=b;7IVt z*-HPdTa14i_)ow7y~JYtub%v0Ypg#v`j;gm0V695Gu!`Ib*+1Pda2AeT&=vOa~{eZ zIGZq~rW(hmGpCLl$4fFLkf$=lLrS`Vk_vneM3RJ%ABYbW@A2gItI?TRMhZG|^kwpZ82>+8j(>*velq^9L%!-7wG$D*#~ zWgid-kWw}}hXQxi>5<1NJ^TksH!q05p=+g9UUcv#FLbOmIKrmpO2b*}CQp12tJg5H zR8qxS^&NlsVy8bXPCwV+zBCt_FTz9zayA3Hd*WD}!K;v-|30Ytq>j(}&--7GE%eoE z5&Ku8-!gE4TwM(~qxWx4xfWa@;i2U9pJ9^bD>F%T;^2Kvzf$acp@9{UB>PUM49=m6 zSb0{oNBKgHAdo)=^F10rw~K3BFnpLv93o%Z{{r)e@|FbO(MZFdg&pXARUUe-+Q?z9 znr^5N?7{wRs8lar+0cow2$MTUl|$d55!Vx%iVudkP&Uwa{_-?9Kt7Wya+!M4bKZDX zFuo%$3_mKL4&j9CB~oku#Egk|)im|R50{hsmT>N|gk9w|!5Fph5vB}c_i+=^%cTCC ze!KBLj@%F>QM-uL+BBr9Wy2Aw0Ph|{iuee#UkLBsmtmqCx zF886`i6F|ff;JFIi`1(@qN*i=_l&bwAb7C>rXMr_@GLW!%!Svh`kfB5{a8#8}l z_aiY;l?m^#ghJQ=5Y)gkb$PGqrqtmxcM~6wuP>|HvUQs}(Wv1lrk4!-4Zl(sFH-?q z{q@{*+fKpRWRET6nMGPxQ^Q<8EL!(zNvqw2Wjgg3HgaKd%CR&DGEEHrCEcMs;k1*H zdkg!0wn5e$ZF<2;YKM1yx79Kyy?)@Q@{{BM3okvf03bPCZhll9;JNQnGB%&s*{Ppw zspheUD$kA*w1C-&!J!7Q7gGkP?ZbUQemIfZ?%A-ys5vO-TMeAQSVqg*bMtOEu)`p( z6F4I~q{HM6unnF_;ZZoXYI6!@$1;OE+>sO;V=tE)TSCXuR)Y>t)@Y_#wOP#>rWxev zCs?OhLt@+RE=w(235PLo6J@uje5BdarcV@h}}Ql+_Y1pPG~ z(9@VnZND$n(|CcWP2HGnnr)JA5=i^=?YRX}yUdZ;D(8f)e{?>ZLb_8x*GE?nj~wH@ zg)d@=E3-J<3N?UIRR4x3{@bnF^dY2Z{}wa)7dApwdHf$b1F~r(+EG=-z@HWlP)|z zR#uzyR7)~MWslVW%W7F0Y%h6g7g=piTNlYi*6QC9f1VsTEk?6lX0$xalXtqgI6c{P z#aQWR?q0u+c`rt}$Q1K^j&;F)?66ZGwA{?L1DYRHv|jI;~>+G}}%`3CdP+p=j!bjxas_R|?o=Jchf8&VM+ zqr}NdQGX@RDwL&9+uU;`Hn$|EQU_cy@Knx(C72~HmoJw)WM)Mpgsri7ydRn7;m2&; zJuv%nzw}u*>9IeJv#k_WX&sR7(-BOC*7wLciho1F}b5}<|nk_Lqvn0HDgsvs7BOj78;E%tFO z2H&x6^DP~;l6vv4Vf(<~L0-~EC3o(4{bJRAX}pzP9GO8pEUJ{Bl~4A8{j~d_JxdcR z8WHcTk(%~orjxVXwLDqYEL6)FLqg3;f|#q&qg7x-jaA$W`lqtQFipXEPT%hqGp2IadyeJcXI-v0y@z3UJ}X*J zTtszYd%IrRPM({YHmYWE^qo321$-5)w;x}4BRr=`qHj(~ghXN{732ni%r@av!dNs= zNbOL!X5$i{->EA#Cw{zSoOn_}FUv8(JN$sr2;0u{paD=e5?)L(a?4x8tKc)ZvdnJc}j!5@O8f1zTCn@4J{1NVC1FHeP>}_M>>Nzg3!Qt^e=x_|MgC+IhyB;#zr;97j9zK z%j$Ip7@@H3o{@wx;c+^0u!yl$VeIFqR=!SlCkhrEB%_=SgrL|w0$L7nc5w^14dQTR zk=)?H0vWl~Gza`ov!0Du4jeA*{8FgWL$@6k45T6>SViYxkA_wjK!Ha`EBHfkOvF$> zIuwi#CF_*B<(OQB{Me}DoQa9)Jhl2nH+rFYT_H+*uTT&(h5i-BJYg-3iL|Bb9TyxY z$3V@PAa2$^J0!M z>ijc|9r}6nQeCoXaS!L*smljcZcAK@r#Bbw<=PJKHht?1f<>JHAhP|M#@8HcqQSrF z288OF=Vq<~o<{L$(wqjGrugN_^q!Xjl$nfe7n?45SU6`-&VAzA6*o~+Yu;Sx-zB-5 zWW+x?(;@?V$Ti#7UyxA|u>#EYUp&I*8~udOp+fw>TFP zL#qZKa`kSPT}ZJv69CuNzpj>7i5R&tpy}0?CAodUX?e|`$n^$l8hIBN&}ild!-)Z4 z_|i?ZP7MW3!fo%)a>Lwxvc~T{5s&wWTTE8M`6e?>>(NU7yf}FgiPv)b8 zCZ8+ujY9)xp>uVaPG$fuQYYmJuR-Z?u5Iw;*tWNZJrk zf*&qTwPty|(*Xwi7@JuBK;%=x`y2uG%X3WHA+&oKjLYQ8%=S9QjgLwnn?Z-B^XsOB@Zc?teL@)2!oU^Jk1zI47&Q+9&$e0o{CiT^MfuGI?w_o z>aYSND}^LKYC_Qxma-@DOaR2(J?5sgdpsq$O_6d#>LP}?`c;}ZLcXUy4#|#7?h7NF z%Y$X9DwAgATuhzm9N;n$)M@D?2UZS^+->P?>7I&;EC;3$vknqIjj@Lfhp86Ib?5gS zB#IJW;w_n?S&il@Em3#JTrnjR;_9*6BHJQ8qfDc?cE1~K89Y=3WR$0j7<3nhSr8Jr z-;u2cihi955J#C9*hOqnc~xONcp8bB9V3rfm_sp5K~)p$mMU*u?W)MoN=!^kL(J$K z@Zm~*c66!w8SE+ty}{`S86`Vsx{DhS+#Kr=v;T1xLJPkdt(jSmnQygY7caXOGrPcT zv-^Sr@9=1hvzNJZCpI}gTTZD>T?8#z%c}O;YA5pA`u(-)y7=WgUF=94p~p!mD@Jk{ zPyamLviXWpZXqEZ3iR^*9RY%n)P6unxk$}|dI>A>0lAO2iZfz_w7p_78mqRlw-LJf z!@B++ReQ^wlOCr-CHCWS(1oM*AzwLW%hQB_k77ig#ZN7U@J^Vnp35sp8*fC6Sp>=B zR-$&epW$?3%Cywosn-Oo6oM2EU<6Q4d{LQH}<} zuD{KI0gjKBi^Sk@Pr0QmeB2KwzoxBb8gy$}FNfu=ihbHynB9%(tiPU%KYZ>!Jts@v zDl6lZT-e=c$=mMm@<8iCe{;g~_-um?3sNuk1BS-`?xkY~mMVC^a6-sL=~7!;$`s&u z3#|h^i*7>W1kIH&2?N?U|Ay6vHH&r2C9UR?JcD`yWt(8bYQ<>HxM{VFv^I4~pI}|c zq})0db=;0`4dF)>cx(`i-rZS0ddH@srrV_xtX{k^)bQn_RX~kQz>TrpC z3B8WLfsY0E*18q6Wp}@9@%l|E5nEO9kc(j>ONs<7cQ=YAlC8MSafc7pJNY5Coc#Nq|v z1!dJqE|Dgl5}a|kQIA|xsL@Qr8<}hLfiS~L4z?B|Rj;qmI4ETU+dJEaoloXWkVM8v z_JTLelGtKF)Z!kKQThr;Ezm4l&;DG>oCF66<|n`QOd*l}%)S?$An z!2@&OHx-KM1H2--BKHc3koweRD4CM{dekw-6QD;!2h#Pn>fFZ!pdbMtgM8ZNq1r4; z^@@=qjNU~CCFZGXn0wbBy>uMY8SMiYuyoRa@LEIwh!xae^^UWpkRraKzSJm_3LQX7 zScI;n2HU(169%rs`6HMj2i*?$gTUHBS?Q`a%LCw|yH+eGva0p#gom`l zWIEdMkBOhS6RvNaL^_b9)|(k_@@x5NaENr=n99*7lHWQvZ2*s`5wt?LW`%rb2e51w zu>>fk2J8V?&%Kgf!IBc(49B1eWs96m~?3m%HkH_L3n0|(_JTRJ2 z-J|B1zo>SKeirLqK?{~lBQSs|0b@R2_zUPt79fmJz{{hZ42boixYkV*HA+-a(hep? zCfPZTKpE6D&s6gmwY;v>I=^ulWHi$OI<|)f@lDHJZAZ1MDq+`qFJoVrnt~gHn-bRu zH*A}k@8uTXF&|qlq1JF)soV8Dv_G4-uOr_thM4fDJXlyL;H2aWVE{OApKNbYcTwA3 z3+yNL`#GfB`=qOPUWuN72de!L(KjUfuzKw&){`P?<*!kMo%6i(EX?DqVX`w`z(WzS z{@9GOh*ZPlYEu27g1*<;(2dvg7u9?T?CJ^3r;U=iei-=@nNhV>ysT%$&$nlo%*PV_ zxYTo|CQ=^WQe+q7XI79PJHc}>ZQQH~qpsjqVpgQ|tCy1zKU7+Xg~KWZn+QQKv&#%0 zs!2-40tknqRxTpP$F01jpk7~o988LcnNUc_J1jMo+ZBr-h?0%KBHFkG130Sml8pXRJx|cM@nfTX z*y=QCTfl?hkYQbhDKQWUA2TVI1S?YIafoMGEBBXZ2l;$+NAFUsqm@;Eq-f)dfB#RKCI=cW#GJ=VIZ%r ztV_}+aV|e>u@Z`Mc^o7^^!Pzu-O_jje8gOgR=AUNyYIMf+CCc(KKFiE^46?gxWdDq zZNA0Jk7BUCCDTH8k zo$#t=W`vQ0Akmd=_bUM3298fV3ZYDSMJSIA@!TQT53t-S=y%64r=#%~9}&BObj!wV zyxsTG+tI{;7vGA>M|4!{1zX2{A+AQUgfZywv(9mWw&AQKd1hs_gMcg>aVR~71~ z5(_@^MW!n5bFRbB$TG0Oj`wb77{8|#d>ez3kaSKmgF{PhIE0g874o(EQ;s}Vn4F+u87@G*N(o9^XK;7wP49(ekAmK}1@C$TD{_o`zz&6r~Y z5C6`w_1^QnsZ$$VUf!4Gqoy#vJD__rS6Ys=m|Yq#K^%@?F9%o+*^QyN%iIu7W^zZ} zO8>I`pVyiViER|-YGt_Z7HdX)*>b5*=GLdj>3;M9(+%YB<-X#q1VVt1b_ad%!PQ)+-E)z7ObFOp z+;hJna*y$vK$_@5$xf^fUic;HgxLW)t8+)s*fkS&u(GFDAomF4s6SfG3tJw}stpA0w|63Vh!ib8DT6}8Cf&NeN1EUjyh+zkr zG)ekrbO+>?)HSh-g&FTBo7Z8ZJ&Ho|5qlY5lA_^|J>?rxQQ|@z%{qAa3_`3P_KLiO zV;C;zS*BJfW%To$Q@bF%f`B^=?+odEFKxud9%si#$InsTv3I}M;36+V{GGfvZ68cu zWM6;ZDBplj)uJOTr!nU?=P4+g9PJchZQ?a@93(!pNh29sB=j)s0sc_Pk&um(rl$>Y zI!AZHZ4ix$y4%xZ$~APlO6jJc@g7WMGv=oFwv1QTbtQ)+~RJ!Jg&&D$~(yl6|1sx$1byr0{il_DjY#=9?%`=)bH3jf=$BEdX=wes$1>+g<3JU zynZ&MplkR~>sMb63w%jUM|h7|bxCKRv^`j(Vu6qB>0_-h2-l}YFM=`L!`p>}zsJ6> z;`G%ZAiuamvqh~2T z5ng<6GsAnS2fHKdA}`qT_x!b)h3);*-D(}X_4of!@b*Cr(7n@QdchWJ*!cPuep)#w z(Rr{oK@1)6Y-AoIhIg-Oxu5wAB9(p!jM+_2J4G^6u)^O><8i%!Jqlg!x>gx!2aPuCnxkDI#GSfAF#E&6A)KAqHJ?}QSO?P|Yd&E@ zcb*RVguMu(HASb^qF8;{xIhh4Y=V*@gJd}lAe*Pso+yvpFD11DY@5Ft%{fJe(f5Y; zcPNBdvQyhD8*g z?S&X$*N=bTu8QLWvb88X<`em4@@Ic^KRzMsAu+hA8KT%PW;@(B^2t>c8;nu0s@(T5 zNo{noVr2&AYcnB2%4(h(B;L!}YWcaG%i|!cb-w!b^EV}|!Rv#7fWX)`fhKSQHhVe8 za+6-vU)S?-rS@(k2F0ie%*JYDpQX}gMvXXP@kVy$Dx)kQ1iwF6d6SW0*C z6i05+NEB9Aau#a49}gUtk8-I>{i7Rj#!g-_zwp%JSdSetnpY}IN|G%H6&#O03oxZE z2IW=AS~7x|GH)XKu*GJ$cjxt?g*5)OPcA ztV88Bo7$^#I`Gm?+4JYzPm}UdP=$QMztn+^4`ggq`s}0&)WbNlsXQvj%5W6B1vhmP zW?od75{Ei=21gHE7pBs}-rK|!H54<*op5zT>}+LVG=Z?97SFbZK|9+ko{H)8Y=AYE zy$AJqa|au%&*8DqJmv5Ikt%yXyK5-s>P@H_bO#J$KGc+_N^T7MCwA zHi<@EW-Q-$mxOFDs9F)rc_*yAk{Jwf+d*NQmMx)o6f}^^+4R7Dd%!!(h_`L(U+v~N zUo5uoaLsjf-()>+X2A5ePig33j*Ua)PU3*YgtgFfj95gjviH>DK4E@6pJuo0)Ax{( z?b23KS)$iUm|d}Lj08j$UYaIp$D4D?X6$2`>ikse72C@QKR{69{)QtNfxcN1o?NWsauZvAfGN(BWi-822WhOxLs-^~e! z{TurZsiga^mF@2MGp&WN-s{pD8=Ic0)My#q$e;rWWjAEZEnj*`i7rcr#;q0x@TXI! z?_^NZ{w zWpXE$m!}v~cXT(UwM~~8i@gnZ&aCx%+8;Un*XI{c`M4ENNMn+$KiPu9-gRr8O?)G* zGI+c8Qz93yDkf-we@wSNgjKw^P*`#n=K3x)|FFkQKvhxgP6=r<{4Vvg^Hm)tTP@)R zOPJaf4sqITMikF)lJMu=2)a9pyH`B=tlODEW^QH6{wrIyb@>9(xk5J_z0+Y;=qBueJmHB4dnQ-$#rf zh>^X`kgw~u22j9^&<HB1wC9!dBM__Z43D|Rlycu zdpKopID1(Ws-etY6%vM+FU$^Wo8~KgS+lqSbFW;WFa(b}0yP9-AZBDA?iSn@$D@Ha z^bSHVRbU1LkKFd|a95In4ZkaFrY?#NxGQ?5F^UbhYmu0*9}DpDR9uWJ&jle3bE|3j zn{vL)M>oc&4-I$h2D{j$XeahD?}{2^P@89Yd$d;BycVE8?UPU$h0b>x6)SAh8x3Mp)tkPf#+f{{$Vb%oNr7vyNm|FAQRdd!%N{1(Iy5 zv4XAf8fgHghGe+r;=<1sxhKZ2pxis!fewKlAepB)!?2V?EoEC=kcK7z=Jj#w1NPG= zJKh@JzC3qyCZ9_v)?O&5C{57;YMHegM$ zvo)9FhR&F;lRO&5bgO_R%TVlKSaR6bW(Jyc6gg;9&LXXk zHjx#ZBc?knj^%8u-r$s+k!)p=+NK9I8^qQ9P(UDEpbiAj^e7PLyQoz=%3nwMd=Bq~XR}g$47s}OUP_PCiX3;B_+MZ^`UvIH)WhrPd=5YUKB8$^+( zcnX$Bj$ru4kQs536(x^wdR+$q&jXk(igLzuLaV|3`WoSL-?9rc#&Kt!?E8yYKO!05 zJFxTwy=ve>;A}b%4i0u7(qblk`DjFrgr<8SZScTC+{*#|P(tXvgdmw|G3I#eF?@c< z1iZtjgEFM%P#icI{#L4zm5C*Y*4-ixmQQ@JwzGCo!Hz};=7m*7sXnX|PNyu&cqV2* z^u=Tj$-%sg!WGAM73N|UlwAvf6`Vc5WIqcH{0weVgR5rj48|`Qy>}Vk=Z#(>hVHV5 z=FS;)whiu?>@R;7>-2#IkMLq;78>}DXQ${Y$lRjLatx?g6171-z#+4yR~WGO?Tz&% zc|ad!5$Or>Hj04dK11PgvIrpo|I#GM1O7E4K?ft^O8h(|a3{BKegpeU28o*gopamIr1Y&W}vNHXp%@L_zp1?34cZpM#R?m1>Wc#y!y>?I4$c9Ex4P3SJ;*;j{xd$TZgTP+*&l#1#gmQl!?gdkD;JLZDutq#+&< zE1haX{zQR2a#w>_q(4R-Cnu+BgF#{3aH0V&oNv>ujq0sZH~dDuc_B z-R=@8yZGH9*Dmxan(FhwTgsi!HFO+TAs9M@1cgVLLu3T80H2&Qy7dRnH#k%}<;*LK z8E@SmJ4%;fXq5c-lWG@_2JQkaHCN_rFwJnAZgk;V(Bu@edEh@JI|==F)KR!23Ml3` z@JRj7N(q^quf8kAbN2@$4YI-Ee1obM?^44aO}8I12Msm0E?3Ydrb9XmXCAmVOjRc- zHB3sOTp>*(kv18jD3FLJk!TkojXe_%o2?<>W?BI~-=nyKxz*GygqhvLOh5|T*$69g zh^n`L5J75U6*9W*e`?m>jZH~Gt%713EGWdUze9HLq)ys#qR#x0rZp{qd5Xr~riTwK zB-FHaVB)k(+tkn)Klo%v&O)cOt2Z_HDAD8TKgszO9?iC=*%0N0lO!gy1Br2|$PVP$u9 zk=Av<6TQUZbEj#DFa2p1-I>m`-bnJz`ZE6I$^` z>D`JR+0)c@jI92P(_e1&gm{)!w(~yObuMQmmSH1PBJv`~rAG%xlSi3HImI^TE2_q; znIoKW!srL(qs!@a%%<{_IXHfhX!STzqK|uCaeWn{mPD3U=EaLI+7T$9>h=>W^Jibq z^jU>!+qPEpb)>D~oOaBUw?^6_ln~n?01$PEPFL9dUE*%|+f@)eYk^O4fgy-sm?6|V z;aJ+#CmJyof(K68+Q<6Mx%Czh=0h_i8NB?G*p*Twn89k0=Fi}*IA z%{)`mFpZO)HTPn}+*| z9xV)8>?>n9L+3rQ1hIThcwH)B`96j;(7&0#e?=+?8TdaO|4-WJZ^tvSGyli!|M2I3 zQ1X8f)_=fp5rCtSgSnlPt;3&z`pc5b8Cd`3yM-m?RAeQoRm`mcj&cB3C0lC)8wF)q zmH(KD8CaWJxf4+QwIKLyLh#py@?Qp42ByCeZ>IlP3;hcSr)6d1AfRPtU?X5+VEE03 zGyMMbFD_it$-v6oNYKXA3P8Z{_cTFAqd#Cd2Pfm->YtYWsnaqsvi%i=4eTTU=B8%9 z+4J9*`70?q0jyOCIR171pNhmkl|Oy`{b&K0ejEQU2>cH!{|^KHUyk|L^_&fz{?hI$ zzcKSa$Ns8929AKg(Eb01lgu3)orKK{9RBfxjKRO_jEw*4(*GO)0>=N!?iIE9ozQD; zV@g1;W^N;B<7obm@V5bK=EhEDj{oNC|8(d7UtweWlN!MG*D>r&e_qdj3J1p@@c+Lw zR@Oh3e+nndpU3R~F08*tmgRpPXJY@8Yw$ZS;rFq>1OEFVwEbpd{wHsl;O~_1Ke+t= zJ|_Qg`TxmZ_&;->{|A@P&d$N|zqovNFXe9!PdXb(?*Q|7GV_}m7E0^lpvGx1DsKU; zeoacx2$4~xFQ6Jw7t*+*)!Et6kz77LHV7psv%-_T?(epP~CCA=G z!S(oFaD-5!6xcl=RhFa*=h%zPp=(#1ctAyJG3~G>@^Op$}tn88=0^Gqx3(LnROY?a2wAPofmGNNi8w%16uOXgHJhavVUt~b0TyQRO( zP)u$x3oe~b%XMj zbh)HW7@K@a`x(Zowngd3x*b|Ue(zc_QW!UtPsm@4wlKsd>_(yge$04$0 zS)JCWR$HIv;TZPYjwD=p61?zlTf$M+YU^*a@ARZy9pLys=sTPr0vAqx4iGNBnLQKJ zQh!vN9iOtNuy45ZfhtQ$(_^x^90DvKn<}4~Cd!Brc08VSw@$QFkmcV596){?a_YdF zpDS^U-|N4XXX`7;FoJrK4@MI3me8vJ8;Ox<(8Mxr81H@ELyq$Qo_=QX3NL|Vf$`P$ zhs*kIG}BBCwJsz`#Sif4LH-2leJPc1-#8UG9Y7=&iuF~erOrlhDtV9hWw~9_pOPk$ z^T7n*((0doUhn8ES@}^Su(^Rf3u1>?)7lv3ozCzS7Yy7$xYyK+EYjF!Ywf8$p$G9j zr&z^4k?};Z3`@g6K5OkZ71~a4O%A7WFtZ*x^9*6inn)su5^u%pqX?qHFoG+In(GDPwY0<4l}tk?}`T>Q)@Q4~He=hbNl+_vwUa@r~NzNh@$&SAp5 zG2BM$>x~wBq5G-9Rt3F&70eMGD>R6Dya=M=B)swT=^NM{Y}`L`7l8_ZpLB#A(f6n1N+^j`V_buwRZth2#fZQr(YC?;{)Aj z)L@yweAEq0bkco&+8ZO_W_Np8N3vd*(Z=)2lCyAI>~}!r20#n?LN;?HaJDtNI|R-S z8M|Mfzk-`GywI93JG3mv#~MdSc(92|vS9?KBJB#&r)BRc@?d=M*vx*qn3oD_rNwn@ z^H(x>o~gXgu6z8o~07N z7J#`b_Idz97oP;qkuiQiW?oWUV8?WC-e&xj$}H3#ff0r%+lc?T)_s(Ke<3kV&jhfa z{>H?apvCn6ly}x)QEqLcA6i4u@Zq(izJ1u5z720^+(8l*u& zK|l}ac5nA~pYMIYKh9rgE@m#~e%5+c+_4rH&u`t}>b98#9N9VvA8=fivlAz!!D&OQ zL&8o_L+fViR3r0{n&$Klue!Ifd*`^}W#@H+Bo9es*wk9NCMzWTiRo!jmqIWICU2&n&P$ z9i5+)AC6q)IBAW1o1z)O@;r)9Dr-IxRp2|it{G5u-FTeFgtpeMsj2C``I3$o)RF_I zTa2JPv2IWH5F}(JC>2X@=-$0YOODfek;l97VyQea5q3h&&z}U`S-gdJT7Mrs&%mp~ z{&~fmYq(>$O?btK>yVjJ1iDlwR=~sY>7goqD|mGHvXY|lGaA9tnH}6v7a43!Voj>% zfcv^g5?+PdF)uA+>`-F}lL&))h-nw;tz)v$_~#!^8dSK!+tM%GqblhwIz|^H>fk-q zx)A{8I9skJ30*5;3!71@L?fmxntknmqEA5LB9GX4^Mq>*|^60D?yv|K&)V5)N0#(_=y~0h} zHQyj{s@hDMcKZZ=<94W#Ikyz~#v>&CpkQ)~ce|20EJQM&dKSC8BLxmBKjBDyJC}=+ z`Bz-51M#BKvMt-@gB{?+dlQ7_UX*5UDHrPOY@`r;6WqL|E^7Qu>@3nE*6e`{#<(o2 zR@v*|a(vl_h(~hKH4(lKLh;woUC%7*KMfpiyb-#JYgy3i1tW>HFEO_@-@MxpJ7Lm| zI$BPD9n_*TYiD}$G7Pm}Ic-W*1~Zs-Oxz;Aby5>!ReXy6(1?&hBaY}2n_{}dN@ zxxv=k`RT0Nx?g3#kWARjTe}aO;q#}ST1QP;4OVPzjEWm1*Nhgx)2En#O-$dt)$P=a zb*Qn^Fc`M{Oj0<5iR91? zWeZ~@G*oQIdqT@%o){Q^K(WCV+0ZJm+24)~YXk%3ID~aE=5|IlpO2qAc_ddK@dB1U zFmfz(Z^^^?BvA1x4$0>REwlc!grMhs=6H*X@W-TvL6N4*7{z-;m*ITM zH)?W`j$HXJ&ksiE+N^wpt!b8!8ifJZuexTOXVC&k*c&1h%3l(l`=iCRw?iHx%Ug$r z#6RMXx}U*luYv{d9X)bECA`jEcK_{iF?p@F@EgMNXJ$Yy5(x1;TC*4|2~I_aJaBeI zpt#}Nr;_Ox*8K0O!y%PWf_lDX(eHDAfVhQeu;<18Wc+G;fHwU;uiZ@6>&3I)jn4=td?I3Y|;cup}3j>u_3uJbKRb`a}PdCRYf=2_!2_HgSb7;hVJTi zPEo$;@X8ISXM^9vdRY79oKez>$gN%(L$v+K4>F+odafB@pI}!lVVxa;ToPeos+N^y zLoqFWOZ}pA!;Q%s9jlq;o^zRzdEE`&7!2rCl0fgoRyZFZErN9)-L& z;v|kyNe`^jW_$5WB@$k{-ED;F3+oqQp{dU+nHi^?^inU12pUxz#Fp5f67d(hBNz)* zenLzM#S;2f&(P#edH9v9No0uvY2!LIad%`*0zxAgv;h<+v-|f!c~#X;DfPVz1{M$Iy^e9j}QY@c>Cx0SKeNevl*%+QKn=)vBO3 z)J@C25^plTyxX5femj2w8?UD`usF~umIm)zfcUosb+M;SMB z>jG$k1P*_(?Px?l!J#PoncW}^wwmd6$a;ciH_dun6sZ9c1D6r&yZI1XAxFjrm2~%+ z7r}>*2H!)D#ZV1c$XV4QlkFx)Xk<~}Mfa(O%WRK0C%0YkZqlV6Dsb8=PhY^BB=BVV zYiNCrt?Tgp;vCGAbiJXxSX$cj$jRPQ1=r|N?!5(g@{*GRFfo1#a=~s5R@V59J4U$r zbpgWq3gakmo`1c-ocs3uR_=x~HmYxITb4ZeHtTJ$0<5U$U2>C6TavYsGPO;D;e?*r zzNl_qQNA>^xF?ct%#2v{c8GDnFGNPT!Pp;6gNSLEFN$|mujv~@+fd)jQAjF|i{yxa zMI^Htg`GS+J#q0)4BgmptMW#C2jMazxGpYL&*Jc?2I&Y)ip7g|cSt{+R=&i$OmN+bmo8T8 z5#xabYn*ZD@g2N0#iyH$8cvRdn5ZR6pEUTq(IfL%`6~VOLiyy<6uW*4&qP<3|9I}x zrlP;98{;MNC+~WrLj=~(eR+F^YI>u=A6k9xvG3kUobVnU$&_{i&q)wN z(W5X5mGrE(5?AOtvAgqcQGKDCliSt+zyA>355rPqo zmAK8s&4y%F7>JN*4|V7rq~v^(YVSw&U3EMftsGGYAa!rJ(xFltYFXWAjJ<|$r7R0P zA*{_rI5v{m#b&d+@?>#jAaas=!|O!4AG)sYfTA%(WZBxPfb>WM7QlUNFU=!xl@>Jl zY}EY`mFB>#-^|yj0Ca8~w(eF%#7w$DbFa!zsg{~`E2aw0=!+@Sk}fo~>iStmP_Bct zBPnbxR;^4U9u4kO^4a;KIX%6?Et=$y z)gcM@%Ld9+A$e{v_104?!^DrPDYKkCHoRGLz1~7#4u$dRzmjq0d&80o`pWEz(o;Tr zUTdH!+6|9brZHl{EzuM2?xF@XAtyKD>2GR!Yq%8)MSrgWUn5C0SdXki{Yd$As)x9T^L(91>20TVmDgZQm*1?vE6cEj3^PtoF984aA?eiEAdSx9b0^f_U?V; z!QsIQh`mIK`;_Dz^o{za>D$^+_YfWqL>@igWa1G{UoB&+MSq%X8hpxZS2Qn9xGD9} zlXnmGz2RQIIHY6vbVzppV#%+sKOLv$rDe8mo`mi$iE{sPfOZ6iJ3dbs5O1j<3h5;) z35^xk4u_Lct4S7AV`>9(t|Wkf*sSdBx`VYGKxE_%RZ>)WVGP-NGMPpNxvL zrX&yATklX`;dcvf8b9O>FvVoRJy6(8#rzcPwg>WtX*z(@tD`? zVA1>q#;Hj4WAd33phi7%_+V_E4RL*ycYW}j;8Y=dy{8Q+XO5NUz#A5qtz*TJ!A`*# zNRY85I?t0V>vY<5ns?Z#FMB$bWtrmoW_$Uh8!R{YwKA%Dg_5$0)9CxG?LTmRhI%*H z#56C~E;jUgfrrPDn|EOO1F~}3+Up}~k6FJMOSKI4g+j7AR{BqiBFuEB^kw==Rf?q` zbyAPOPd%)FsZMwA60JN_{=hXOT20os^h;Rsb<_DNjZWfA#@o>c!GYA7=UKZkx4W`u_6! zhOb-0Z=Ci7LdiZXJ8PVbz}a3O5X`=KT-NsKi_gu+8a1Q?M{nXdmvogIt~x=)Ao*7B zc%Y_;tVr6DJf`wT&KoLs8ASw>X>(e#UfZ?#%Vnrff~!!lv^^4Er=XE4?>25gC0y#c%1>lu8 zLz8Kj9D;~Us`aQ{6~X0MgauK@I?^Tlav#Mi_^Wp46p=m-UUVzG)icZn4Q}H2Zxfh~ z%jsW4>vSCMaZ)w-T`=*%K+ZvudIRxtbOH!-(&+go?}k=Z#daKgLt?cbsQNp*1UP@T zn9q=FQ5)kTlwVcuYmYHKQ}_$j82erbHRI&(S93MEHw1wQ{~Gx8NDKD~}=c z&3aqa9)_c2DnAO#%md6J0H?mYJiP{&#&RaMRPGPUQ+)YTdL6~X1N;jxAtg-9Fu>$(2dDJtsA+5}?w(Uf6_-k1{-b z*g+yNPC>ZNq708X>&xIlNk-34^Qil9Y@Q|-TwH;PC=>bW%f}*HD zD`f}dlg?ni3!H6!WGzqyKl4ji0>{T8rHR}BFV;}s7+5+9hR7Y5XZ6?w4C(R{zU{!I zE%V;7%PbIoq2uG_@u*+zYz}omE=#U8k-d?0K<#2FPPMIZ&(`g19Y*owXxU~zUooHs zfB9v#{Ffc9$3ESH4J|bh)7*h$s)((&MIQ=SfhAaLvJO+#C*lcrkPY+rL}unVyyBbrQ|(LnRP< znSyn?z6WwvYpx_mW2f}0a89qlY)#OdOZ~WGn5qt`>JRBlI)F2!rw*>7_6xz?qRuH_ zh{t)T5H55tgUng-xiT9C#p2&8M z-BtHU6;LDa*bB-<#fOiU7eH5Y7l0vfv5CF%8I`Z^YlTBpn3&)+(MZE zPn3#KAW~F`&HG4p&m(}%f$rkzAQtn&Zov|laaXO^0LA=Pk|A@h=Hi}Qv?=p;kGPd7 z)7N@_+0)JX`O%V5$sEZ0$NUC`rmStsGZytRZDW)h0#1_|dfP5r+m6m@ZRO?JfLBiD z?MFyscGB=PPouJDM4rlVhig>{!?UV0m_aJDFTMRGu1Z8`NIAaD;z%9Tkx4y%U#TlK za!mT|TaTh@?zbYF8ohYQy(^?P_499g?2ut$d}s6+2iz2#FV((*!e z(`IMiqth%~t;I3HScNKn_M)uj+?4_(Ny8qxnu#~0DN(S#)Xx_~_@KfY-n0^tL^>~& zUHLO#l(o;S3iO9>NYLd*DD^TbA~-yK@Tuw18D(ZrTF%Y9d=uHcp~S(rn?QpI%#SS> z!q%tT*VrI4P}hIfX0C5&|8zd7*g!6&UstHiiA@ei43 zl+|Ph?Q&xn)kp_DeJh2gqc)R~x0-E)ODDiP%V z89w(~V7yH8_LSJ6&s=upRyLf@$XqSR_8r-XUl5O?I5G>f*WkaC3~nHK0_Mlm34^TF zsu=FpPLdT$kJ*mee1Td3T>As?J8z_RnZLG72EV?SS&#ZY0ApRiQDF6cX5@VKhZ{5Q z#-v_wx@I~*4Cr3&paBMHk+kiKFm_;FUG_)?iJk;Qrxv*^o$^(75@ydBwj=v)Pu?*t z_Qr?&-KVwy7rY$imD11I3e~8G1-kf0_${sRM zF}Tqh2a$zl>*ZaIQIsP?idG=ksGfjtf%3Gc{bd6TXGx1ViwkR>>gR!G;6^;5(AGX4 zZNGvw)MIQK%TaaV?|6l_a&ps41Jajgma& z-iUCRaW;}3yfE_0cbM)P;!V<);S;+&NP!cv#@=4(d9y(S7YIke@B}IWwX6iqWqbi` zM_8>qpg_rflzih^&mMHMghKim%^iuv&zW31q;K`qm-FI3e>p+t+asJR=b5tFPvX@%s z{;dikxmlw}+p_gf(=$=+8nmB>L*we*7BvrH4jm31yxG1He=E!;NJQmD73vh}9$Vw9 z5KCw3?RmGx_ED#hsQwh88b`>ZacG_93{g|UC3ivQLDJ(TT3c9=5s7d!As7X^g^!3( z)qmr)U~&g(H0r3vCW%X~$d4 z@HDMao~(pV7+xO}MqEns`-hLuj9#4q@f(6ikw|AF>azqDNPJPf`y$s#o;85zh2P}l3%Psi?!x1NH!XvP3fT|k*;KSLr;TPN? zZxOfDX@|ZGb5~@U_L^;zY9=SR##q~prQUF?o+_ne0oeHiV$Dtjsy6cKx7wrTO`?q+ zVwe#H=Su5{!(hydXY^(v9;>Y+XYfd*)wo8UJZ2TVPwIdHXW2c?L;=$aCm;g>RFA5OZ8-4VLW*Ut~w_c)!4%u5|j%cKGqnC=%q1HA8g!DQK-#q zXcIjEI_I@@-f-deejLehKBP>IDpF2U_Ib8^A=I!_C$SC4cG~;#!R3eVIV8i zY{Z`SAbk8-C=*3rNHbP5C_nqdnC(OIcLaImeaP25C7&%O#q(cHk0x#?XLp%o`=d;1 zqJH^+@3b~Vv_4g2Z>knWhEvQ)p$gIDgg*SJA=#A|h|qOHv)Zg=qlA3n;Hj%vyMO?R ze{Yh&Y7#e=J!3BNyZ}CLl{;tNgbvGG!B@;{7%W*rnu>{cs#t8Mz^J6^VUqQHyu|70 z@P48ANch)R1*wOH80X!U%Ari*LCoA#bRVUhWOh0n@_AsrEuCJ=%AQ%ewglOZ+2O2o zPzq*G)+ebjBQKUr(ps=HQAeLVGEp9;S%&?U_)b-$sT?FoUX`845!8b zyR-ZGYxF!8<6<$Ags4@ogEV)e>TIxuHm)X@kBy=KuW9^lh1|J&5Jx0 z_BT7Fy7Rj0x<%#BJg(g7KCVpmPG|+NSX=nJv{%t;FD&Y!O4!0uPd`@E=c;^D$l*8B z5sjjStflJi#hOD5neeQL9Sx*^o>;B7h^Dn}u9$7EhA1FJ?&Pw2?_`qk07NX&@i2jR zlB;cgW>xfH;?4eBNWwlyD=f&Rf11lcU?>so8COPBHCl~%@^prQ(?k$s_*{f|oF5hT zTX6)FaYBD?)$*6GU~AY*d~NKw4P2Qj?l~Koi}O}A=#Y2q&YdC)vjoR~@}XYwRrkf!8kgrkA9(&ng|?XWK2syFNJj*yUKWU5V1fPD#JjcPmoOh8@#tQORZx?~ zRs9FeNNl@^>gq)8m?1Yf)Z?sh#T)m;gS=M4Br@iLqhDcBF<+EpB)6^;-$z&X#6NT3 zrin+d$IKilgij)S`-oh;!2t_JC`{>sQT)G^BP9;xge{kG~gcVcElFE^Mq&W1ehoL)2LDsYjHPcYe?maOhqD7^c#RH zqCO@dcGew zGO!g<21W_jx0+BNrCreX9bf?fUrJG|0b?iBPVQ@{8U9Eo!zI3RQ740>retr;m`>B) zyOEFg4PKK8@2k@m#5dOJB#>i=7tSRD&xFMvKi$v4Dz0I zFO&Br@FP0pJ)%u=-~l<}`BJZH-!#h%)C$E*Bx8qll%l-+!g$D~A=kSn8|E2l#VFcJ zeY1l&&b@yjLj)fnEvaANmzSB>U`ERHsv%=F#*cjWutb}9o62WL zIoA)V?*-*iEkTh?A&zM4N zeI0YzYpyxY%AZe$!&w+n1G770`;)6+0*ulsS;Z&&r#-U!WBaXL0KKvOkJA8#vKvJt zx1UCH#=aG_mD|tS)v;4Rn83t``9lD-Sn`whDz{6&lLtxq#?67+7%GjVuN}ckJJ4>w zZ>YRqVhkOwoJ_Ij*G)@#k{4-6Kk{TJSJS@W*cK3a69vI$?~p|Ck! z=fSHJ3TF1jwN^h6&Le#KzWzXUu@WK6txWl8W}mG`_TxOnQV9F@*eZC3SYn%rvs zyk#f&=<)5rXwo(`X-_b$x)bGF8&3PuMKN2OKquPzp7>w!P8`~u_=2eU(A*F#rrzd7 zD?W20I*DPRV&6G?=ikSY5wq&2Yh|K%fO5SpKzuPiBLTSS{H(o_J>IoC+@6)aHK60? z;#CK)``|Dpd+SmLGpIyutnu)ZD!g-Zky8r{@!qB#SDbazyCNd5I6^$#befA)?#PkhN+v2 zK~NVKd3$I>wK_EhLBKQ7A*_~MiAvc@qr`|<-B?<&HCk$_fezso>A8WU`&@9ME9!5} z>*b^IVW-jH{OMrXEISqUWZs`4*Rze)-JfDr!d`xid9E*{Zs=(St1!E(^gaSPG|Wur zMRIB)vIUNj1SaCsBBTQKDSq}-4`-T)(mk8=O3&Fwt+dqW1via^ zrnfx9#KomwF=ZX-g$@F$uh0Y;Dr#)p4cF?xk3+G8UP4oMv0XU(%y_>dQWO_1MSkn3 z%BGrj3i7lbiU~v|){R<9f3-j$||&2p$&7ao!pN0lPKE3?^6om7H&_o~;SMXF98Q|L~&>q2^a zQ&qlFqz-DF((*vSB*p2G+_D)jBLlg}ZO!(hN1xLl8&o%k6P~q~y<5_sfG=o*p-oMX!sF^x z)jF|4XIM%VzSVRhs9H{Q`WH-??fJNfX+0Lt(0h;KY^{@P=dP2B(j18KkVurUp^8~&OgNY+DHVi z(h(66K8;$7D?qN9rteqE7p8@@9Fy{CrA#<4q}1*Hbjdq?BF&P4jU9&7m}#+CG@S_b zF{-dqKZ4wE2o!}YAO3~mxO>+9Pk06l`ZHSq$K6x(KVgNR561s_WdFZD82>kTCib08 z0Pq%`aYLbKl0YyIUwj$WjFv__FP`kR8%=!EjHQ* z?h6eycH!-B%eNc1B1QsNZuc`vzC`El3$NNT-PtR*7p(`wp6>0l>=IYLaIG&f2zVct zg5-H05Z!_hVBknF>y;tET}u6^JuycUY2W@^)^@Ap9(G( z1;t}NA+gDhAQCKFqO~+-zdmjDo1d4;27Mb23)4X?Zo8tiI46ANMxN9kt09#b+-)CP zl8k2#o!;n1a1hhxOnNC&@%5oq3adF^R4!la>7%#IF+{QsM(<)=^~q)OH1Y;)W^Faj>2WF9j-s1^C|k!m48(_X$8r^|CF9Md7(Z+E(B6)*Et4I2 z>|lSC*BW$RD>;baJgSk0zj-7nOUUij5doy)jp)B<@J=azXb{B3^CyM#dm@FuLm=Pz zjyus03P=8-Dnw3`Sqy4!VD&HT&3E~KAZ~bgxNeCXPOd+R8}9!^+<<}H-@EY(ar1*$ z`5STbqs)Ir-26yJ@mDs9Uyb;GN!0qq`I;+1PDHySL~a@GAs+%CGa+=RiSZ!`6Ja;ht&tH3v}=Mwt}#)fE~BzkB};MrH*Hqpb*865=UzjeYO}+OF=dYEEB?i z|KUKhT%>O%@!tGAuQV-5OrjA>1u{;k!UOaS2Ti4RA5D+@c~v4oiUkrnyZMukYKowk zeZmIcc8T7-8KP5tFCAHh^wDoP;Z?G8QKnuCz<&2%RQ5CW+|iMof5zosr}6(9k#6}v z(4X=72hjM3L;XM)@50Pu14pR2>n)mh=Nq?xlc$=E$ zN09suVg4D=et?XBaf9Dm|CRn@2>>1%*gpTFqo0f9?HG7@f4UXuUm_6be-VLhy_vTT$fNv+N3%J> z?fu^L_Z$8ZNI3sQmO;M;k{{;rQ;NTfoo<0@j$hUu(650|;x?7=Ef`M8 zXlP&#wWZ`>5wE-AYZN26krVh7N4DP}2uLcA~fO%*e^`HaGGA zqTXM7L(tCv{THV$VQ|Za+nRnK#rN@i539e9+ZAeTf&!!j@uA#O<$t^=dAPZ6ZNr4} zhYSSd<-2vGyBDR+A2Q%Ah<+PY{w@OoIsT>GUAO*Lj)R-?ZZ-a!j1$bw^P7wh1pcQC z1mx!WtzFRVa`cb;0YO|`w<`Tx8ysA}*S(eT{Iky-T;RJu7XMKf{9C&mw_W~cT~1ye z?tj+3mGS*^9GrX{z~9R8aookUzqQNB$9K0W`G*Yrn?5+fz~Ao&<^bM?#J|@CbNn_} zoL~;{Z|#CP?_A^`b-6*eKJX72AMiFr|3e1m{=FQS@1Obr0fE2G3kdjopKoQiM<4vX z4G@s?4t@VyySKLSo8CY`KJI^x4Fm-9{UURO8r+_40R5gT{HeLS@trT+Z2^>RZ5=7U n`_zv}Bx_@0OZnZ)zx$7aqXE?MyZ3-NfFKYG9i6126w3br?!Z^x literal 0 HcmV?d00001 diff --git a/Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/EM_composition BIG - LEARNING.pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PNL (learning BIG).pdf similarity index 100% rename from Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/EM_composition BIG - LEARNING.pdf rename to Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PNL (learning BIG).pdf diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PNL (learning).pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PNL (learning).pdf new file mode 100644 index 0000000000000000000000000000000000000000..75f8931d6ed5f4b8ffa85c147a751a65a8824b36 GIT binary patch literal 33803 zcmagG1CT9E*CpJ#ZQI6e+qT`eZQHhO+qP}nwr#uTJ})M|`Tv-hii*mUtM#)J{`V|zBwcp7e1|&k+q4VDL&I*NC6TbAD>pl%+k@w z{%>ii=V&BkWME@x1j)k#>ELK@q-O=`nmMGQQG-2V=bfWNA2Sv(?9~In4}kl&s-W+W zD!w5PO$07OsKcjTXW8q+yQA`Fi?PGQxd{XU$-lQMyX-o>`s3)+=*{?R0&zm!T*u4O z>4CaRClPrC@7v{T>iuM>=JO_Gr-t{8N4*( zqVAY^sfr!%{QddsKH;X@R%f^A>B?n-m%ID3hPKD&$7ko~DaPmH;Kk?rq{T-acZbIF zba{nW=iuE1;3xR!r}6XIrD9|Aoj1beVxlRf-RN@V^W7V0$%f{osid~(a0PMY`YYLe zcc!F?xy5J}F88Rj2zzBn4m9#An{;+-xo(Q@!pk}&NA>}|ni6n=k6rGiv75cCA~ywj zS@~euchd^2lr(~ zb1NRIVg#*Ahb%GQ)Ah69XPUnWdt-=D+oq(CoQx>2jV z05)F&YXY)aMV6IAmge0V^fsIGc~s3I4LzvbLvEgW%dH2Ik7Yr%owF1-in(No;eNbi z$;J@HGMhSUz0ntd;9C8jb;={xO&bi)-67kpqpE$PU{5$z5PJa9N?;g!u4be++z|je zpkhzF(vH6J*gxGDFmK=IL$p3`SE3THGEc`5wYp+Bs;d%PdT`0Xz}WHN*AJCda{D|x z6eC^*)tv(p9Bv_1ee!V+F- zrp>urX|eQ{`On=p6q>jr)c2;d470F_+9pgaITCII@m^m!F*euxkE}iJo?llDnZx|r zkH3OJt^-@ZL>rpKv`?b4i=#_vcM;6(?(gdvYibMTp5Msgv;gR%)jyr#P1n_EyUF3+ zj|<+eTz?)NOsLe{DAe3^|LkS2kRr*l9qf$_`ZIY(j5kJT^pU;!UP#{C1i}RVvDACW zieC!oM956nGMeOEXW)GmbpkrRDM-rig%bfWf8<2&Q;>Gl+Au%B!MDB_zLa-3 z@8qNs1F?8#?orm;N6ez?R22jPKS-&yD*WW6FQ@a4!_6QN5Na%Fz5%+@!$>JdjMCAF zBE5NtTlm^dbNRzI)ZsqE?uja#n#oyjkb?JcZs6xWTgRX$G>lL;>J12uNt1-nUnC$^ z{tK9nnUBh^SLjm+5{x;N+8PBA#y8Zo>{?wnceLt@1qYoi_NgxAw3jX5!ueg!AJ}ZA z34i(aK>hZpLZ{RHt4k?&k0dny^s^wmf)leLXDT0@3;>r{YK64YZpXy z=y9nZQkwr>ZoFC*HBpS1hZIr7WKmzUh*0}2Q>$iz5Mmw_V_XOw3gq?h(v>CB=tY#Uv|7a*VQ6tbZp02d zzn9uV_+8r%iH%4bYNcR*-LB!nVxC`uz1rs*_)yi2$I9rXY!Q+hbkf(q)`Enlsh7B2lMRs+n`2l|%bNyA#=XPCVGg(+}`0#+;$SC?BzI2jx^F0JqVs&c3fEy8tJ2s-Fh zYuiZPO$0W9lB{qgaVrnWK>TK?BIoTb;h*SKlE2@cy3(E$YLUAJvoENNg|7dr1~diF z_HS%Tq&SH#clnKrD0@5x3_5!`T%Wv^wqOrjhJGSw43mg}Tqr_BJHpn0J8l?#dJ%kh z%7_tAg)5TQcQOKM?Sio)OL3>?e`mekSb&<&Ff(fzYM+^6iKic(AP?8k2o4ST)k z)Dj!7`Bpr;K|(z*<4r-mn8psh04@gbp|<=`@y=fEoZn&yuu*5Gh?0RDcN7`=NTa$H0TtRXx#Z95z3v_00%eU}w3MP%k_ z=jRd&q-^1$&0B(N>ayshiO6@(F`=jHXF^!&k5p*iN?Uq}-Dl!wBsT2|&I^H& zCsi*29gvM6GGEOm3l~3%_7>SO85)3~k4BV2JW7QpkEILBs3|`q?e&@PVHW;L}uByhLo3{7HNl35DQ865=WzN=zbp6e4p1 zkQeCKAMVJR*TlEsGIgvLqRp_Z8aKt|cAMhOk8Z1zuh;dsZ*v{*lib~iZ6Bu9uW$l-g3Va{mUF7&YJ>OMNe@?vvXy(&# zizjaqQ-wmOBUw=WahDup9?Sh5HoE_3Xrl>8v#_f$G6=s9`|`BqGbmdhn64!^j>`lfu5bJh@(4~8GHe{K8FyWtv9vRz*DG(3ne&^ z9KdvP4v!B1`H7cNhrSX!bE4Np;*;?vWQ-2HLhV%A_he739noK{}61=6bAs_PhQyY7ifo zV}gO}K7_JU`Vw1_zf%NeHD!UZC^Wu|K!*JkGC$)qsU^h~4$8?7hGaYw6{&5%cNw`* zjvs5fQ{6)1R(x~?UDx*8_i)K~6b-a@4OcP? z1YYb3>N7adlyFEMd|$A?V_P`Bp8eq?WNH#dWnHbT`{`P3*o{F7Av4*w@3OCbjsAx# z@Zz~t4@|imuvX%_Br*Sb_!j@e{jXzGGQ;mG26Mv549Ws%LhHIf^JeC76;PB8jmBRO zx=0K^Eb?aTBgOluS8PrZ38;dc3ho2A{jRD0TecB+v-#i1)^(5gZVKZDsbFV*p)M4d z4=FNny;y%j+$0KE_T4&|Ck4F_o_DEH3j6Ijt+eq9IJr8UXp6J^bDg>!IzxsyL1LFu zhcWvB4$z1EOrGc=M>?KMq){oM(GEU z>}?{6lI=wk9e1ZuF8PUJd(S?hyJ$R+u=#YaGOSyYT!k?y zA^^0!C}0Sr^u;s`=V6Oti6gO`c8G2Ds!JDOPE14Nbtl!BmT8UxE4NflgBnrPsNUvD zNdoLx^udj6Yns|w$*ub8R{*1|5BQ9n8Ch`BqBEP}F%zNxCLgTD$_kwYqHlmKdw6pN zhlY?{1tL61_3*{@B#BJ-+KoF@`vCplV0DI0cFrk_RSq+QWfz00U6)CFmzxS_Wi&xanAEU- zdd$=awo``J6ntN=%o9O>FwQBwbS6}^7=IjG3K|hyCL^lV;*1fTOMEpn~JLOcY1h2y)i13XAEGYKeNV=jb$b&MC%K z%B%T){fuq&spL$K97d+-bbnZPb|)TAQ}4kEslWvSj2I$V*wF^1aS)W<{$VQ=`K(fh zJPiYtzC9UI4zHnWYesB_VDFFN5+_twK)Exh=pk-hLFfWEl3%yzu4#gZm%@ zSSevcn&IGasWsaob3Oo?bxW}htc=>}N$Al>;}NT;TH1i{l7T!=HgU{S(RiDBqMd`J zYvU2-l0YGO6ISP%K$41B0X)Oa0H;6RR!R*g#=*@9M zh39&eGo-X(LC-ccbVl=7EYlYt(#$B19Jf+8gH*UsY9l>qYV}1ZQxLO3wvcaTZrRG` z$dY3Nabw3U>`ECZ@ScrKnT+i8GZ^;{iy^{`VFNx?Pt0a*_yW5eT!AN+e-)|Kx7+QMIORv4F!Dn|);&X} zssFuHjuy*uDlGy_FQ&gpSA5Q8;I@FX{p<8%&3BO=L(jC$yg0Z>E+G6`I4Svg;V)Ff zY(%jEy!Q?GHHy@YLBMK;xuJ2?YLkgzUqN;gdHR5S6S=c}MEHmk^-_303UrTOC< z`^d5a=O8-#)TXbQQ)}lHVq@23ar4sKl?5Cy(uj7V*)zy_!eQca6l0TgfrPCI7mKB* zdKu!DUh0!C(j$Z_6kMLkvUeo2*cRI~&(+)47bf!^U4?e^?M;IxQx19_cdp?Bpz^`J z*|X8OWBhHWYbqjUFt*x5i~a)#MGBy@4-3@+SIJddFojS43`O7y`XmJ@V7{-f1WuYb zY}qXWP6$MjCB~QVPD=32%+mF-=;0T#jzy)3 ze;B=g%D7@#$SjIpPq@C9iNq+ z8DHyP=7N7i`)luj{}03YZ#q-d-p0xHe}wyQ2meC<+h4`MZteA~9c=$`X5jW;SRCKc z-sxXq{L?4Fzg9v<&SnNi3Zer4nEYon6pS2foa_yZ9Ps}cS%v@6E%v|le@6U&u&ng| z#QcA!S^x3#Z$6eDpPq$|o#p=^UXMIHJT;c$ZZjQfaGQxQB$}TZt0=X`nyJ>}kJzFS znY*L$+*S#KJ1dcqIN|OM54gll1*9|}+4ws7{P@HL)&QvU;<7|haEFW(n~~=WHPTU0 z#nwSFf76h|SZe5BWH=a>7`Ev6bnNu(bfu*^7$0-pWU@O%qu>L?LUrN$c0LwgXSXoK zYyr#7gYuzKZ`F(AMs<52@OfkMy(~v@Q(C{TkVVu9y{!N?E>SL9cicT!38vHq2LKFx zsLCed5AVy-Q~{>u-9Af%Fj)t89aD;Zr}lSoTq5?7p$ySQi~w7ft&Vg->; zC|szO3Q92L4~!>lJzF3*fgB({?pz~3Oj_x3MH6MXHH|ydJI+Yo>BF2u)BCS!T&o!C z5^ocknr4+2jUG<2ksCWS4==zN&ci>C<-~Ju?l#)lWFu88MT~;wDRz$965Q%7)nfLF z`G;v!L(I1LO23j6NUEe3aw@iM*@U2=V39j$kx@`C9AM9%)&ri}vp66*#92kjbhU0O zt~#r!|9}k;$Qpq}R6F4$Zk2yjYS_+L)m)Wr=}<2E7f#KTJdhNuP1cSDuA<#ysj2ar zylX|C?QrgG+S!BUl*;{1MAs8L|EzR)RXBNzn-R#$8h|iECzE*>!xYEIxRTQC$Bfyf zLX2Rhf$;&EFCkzM5zIUP?HDhke>Z$iy1b;CIA1@nFl0eqyJl{#kAxw6yS$uht6ya# zpjMW@=>CwTu&6M~uTWQJ>CXZ`I?hl<>#B=r)MOmPTHO}hu{b9;PyrK2&CSNzJ zsjP}bsZ*UtgqBz?U2r5Bk$68BENtY=&fqk1+UKjKkOFyOLrD%7`E4)lk%#fh`jiLb zf>Vr|m7L zkJY>MupZ&uXFq2@n0I+Bp@$)im(z(GuUY0jOnFpv>^KM7atE#Yr%zD@ksv0!8X{K_ z6Wq0A8+c@@?z@~;2&Ci8LlnYh+XlWW;*exzUEKLSWks#HlwOA_=~j}d5f9FN4 zrGfJN#v`23zV0AA{qRIvS3W+NL!IMz2Y)CZ@q;ufhnNJCT9m67$m9V=TPcBXT2^}|`;4)5WVCY#+Wwz**E#;st0qH@A0!Nq$IOZmiP0gvrW%SuGpG|?! z$hw5@R%l&vj+r&_kaC@Ml5^D_rH6DQq|PQF{v|C$tM+LvhBPIAPI|~L230k3D9Uss z3?#nLAWEH=C{)-uYwOrzmJ^mfFCY|!d^j7x2OcI42B#J}qF|GUnxZZt;ec2lkcxu~ z=45-bmx`g<%=B@@$vTPK4chY={L~OaTP1ozSO2YWQuJ~W!rRUi>AgE&`)KaeXqQ2O z*4`wd)%(Z%chj%N>r4qcRKj^q=A_n4sU%;WIK!&F=8e&Jnbq3v1UjAE*Tz%ODwbo~ zOLh6U2?+}_)h}lH&*aI9P~HdE;_jbU9Gk0EhR@p-pZY0>gVUiCQ8=6%PLXq+P9K+` zC9Pcs)8)wJOb+LjQ!JB(f}F=1pZ49;1s|{Ldke$u2S&@c>)#I$$mxtHK?nT=bb|EE zuyX5cHGbmnOpO!lm1hXt$>puqYmsnePq{ee_@$Bf0JOF|xeHnBjh(o1kFa0%ei zXT*Ye2}|-XzR4*(aT>ltJJTKVU#GCEo?O}tMPwSR^!Ba}uQNL)ZQ7l1 znUyKsF{QJRUff10gKFAK&j}cx4!Y{XEkhvHV4fU=^1~0A2Cno|R*9Y?$mJ=$4VC0e zF<&ss>go#Xk~I2)XD{_Md$9zn%I@2$%APIIV9+IvVO{i>td#m1tATY;YTTSBDyT`Rq_x6O zy~I#G#8#eS*2E-bv+E8+xL`Vq85Oz#scK<$a71^vhETl7q76v;UCxnatwB>!Og$J8 z;(8~C(;_iknxINQ*^`H<+SX2}On=O)#4XhnZO?Axw5aBagQ^`OEMr1rcoAD51@=H| zauG|Zij(Ri$xQbrEqaW$0#rH1(m_OjigCW*M#=5H^NnI5Y?933xF(~=p_|Uf&)L0+ z2%hB>QBT?#Muaf#-GzfwkgO<<;Q5w$OvUi`<4!^#ny#F zsj3GKZU5xSUyhDw`2O%ynr!Fxy1KimiC*#6O{V_Wj`s!`vl+z6hG^|!v0;y7VU;NX zBZ6?bf&L_Ojk+B9Odyi#ba^s=j#H+rQ&kvx z#PKRKs)hrO+-H9k)2fd0Na0&fGM_f)i?DVTVG_k}7H(blOHW`WiN|Edmt~SRmABET zqTI`dFUMuKE8$cbaBW-$!F)Hf%v?UNHM%(eSVA@wtvRxAR2%$ihG^*Eh&O9Lt$i8G zie^%C_W11xbFFZ}s_W^0_e~-dnK=_b$UHTXv2Ef9$@BENW0_g;9ym0fj}pnsgVQa= zg99f&i-+<`r^)gwQK4r}L+F08yOR&0&z3cAtBHlr1iXdD5gmP9rZQ~pQqA%pF2suK z9vw>B)?nf6k_%OY|I)@XOZkQM+7*V~i_ftQ(H5#$E8LzP8uDc4`4z>Y4Qw%!w*pVwDMHw`!sth^l0&cVEF%h$GFgiRmLkz#hI~~R)a>LXGkNj0hq=_rZlt$hU9S?`fx!b?U*xJ|KRnWoOU59Vs3)$racQrf5N{<60K11bnBR_+oi7cbf~peYreQ8~Lx{@p zwsq@9it38+55+B;3$tBWOaMU>^xoET)tgbh8@v+bUG*v=;TtXL?8`QwN(ad*hh`;b4p-O(uD=p7g0&y*UuY+PRxl6jEnQ91 z!o|A!XF+j6qkws(FsyRhkMt+~tf>>}8_7?H!sOE?cn57qW&6bko<;RyAZyR3d=p_? z+IeA1z(%j8Vr5IiQRV2GA|Wl+j0V@dQMhdn??d~~qp*Kfueu?nGw9YHH@k;DUeb-q z^$S)dfuftMva8J0etK13)t=2EcNKPd8-AybA@q5L*)^N=m@B;!a!VjvVs>5$d`T zwP8gy8W45a_t{V0mnfZPd3?-|sp2VZBbzp;lV%L^PRVVVe{i|yLSB;Ms_V1sw;A|p zB9iCoPeP&T3E>P2Of@|kLRdBRBW%9JObkYJhb1`@?!ZNg12Fu>HRA?O9 z2MjoqxTEc?s&(W)Nd^7J3|on6C=x?PE((E(xk8+*cggB3)5#h{91A6;>7{+;;%QIv z?{h_ysnoMZ{4+9GN@;=AD9C}d+KNLBQ|_%^;V;wDAKI=N&lo38j`U3sTe_ZlQ>VQQ z@wUh=8M*Y&X)j%#IzA$vUvU+l|6tPh4o4x-fr>H<3WdG81mQjLVlF$9s;yd)r)1q_ z{-u&i+w8)3hr2xKSg-X>5KOE)A=UI?+3gCH^s8nPFa$=pPoo|7BR}XL4gt^rAwHC@CG}nKR_4u+fN-nho?(G?l7IEk^=5z+s`lz`qK> zPDmjv4_XXmD}zqkFF$8{Ke66vh7<-oG1}`DczJ~kXKIixOnX`$k1sS(N3(}sx}Nus z-7LC-oD)_w8ePO6`mg6)C@Jy2#s}A!QOC@YVWYlZzn2;)$dbS+ifO-|qq#}iXc)e( zJ!+y@cV>CnyKTP)oE@@(XX+p}6P?mM`T%%17%zsR1Mot0CZE+R!fbj-X%q>}!F67y1WnT9X6H7^$T)*~c5P88$Mm96B#L zIqk$>m-BS4<`JvL8yjD7x@Rb6-y7fH%XpC>ow6?zx6dZC6Gq_1=fhM~(IeXWQ`wWHOzMb$M%5Mf}}0?#We7N%92KpH=q5}6_KJ-pgISvqGH36&Yhz*3lZwxtt? zmPxz!8sdxuEx|S((tPb)E3a%{lj>A6aA4|T5nUa<;X?^4R^!W7r z0Jqm6eJ~wJoV_(XFql9v#!0xZ#q|Zcc!Ng@=}WhgKJlKM>AQ1|w> zn@g?Idqn5>dmMB`Wn7$L#an-KjPyXZ6TR5c>49NJ-VGVj7d;JmP5C=BE7MyOi~JL}uS8D`Dj+0(Kx;Zf*7XK*U-YT;E%$x)>F~DxS)Ffk zjR{9ATF=^>mBMkS8uXz7bS^r*LbmFMEPMhnoaC!JlnNv$h)ZA%PEV$;SehF||Drlz zttBC8K`n75^!x(Ea50&dZhxaBnKr+nv?PBwp3=`J|uZ2 zQDe0fIe)a);Z5Xwn}fcvG)w71-Sk;aGHz9ve!$hWR$MC?L6NK&{>9JzlHl%bkaxn0 z>YkB<(s0L{yc#+AYI%V6JJmI1#D~RM;kVH_%kSf$2sh!O&hUNwmd(Gb?lQFNK4A?B zn>P%SVGC_rr4W30Det%}=hA1RA`ashz>FZGKO8|Xv3CP+u|p7f&(*GIZVXTStYoT& zbgH|V`?CNNw}!-h-LQDn`$dYP_R4$$b%iY@5Q)m*81w>)26oyN>xYZ?C*mS9W3L$R zobN>K#gem#a!ysn%bHt|jR38_?A%qXOSocxAdn(_7zWl%McoYe%kt3vpT)YbN6Wv$t_n6# z-7`OjX}dp0 zwpDJw=-=*msor|rgtq6G;J)w@k-lMRO(MK+qFVO)&cKKJqyj#V}_l(f})ozKI_&B)$7WR;_ z1tvB{vj0@0QdW9)`6V!{kU&QHQ}&!x!J&uW+877DVG65yLVIT=Bmf6ru2enL9L^mz>CVEAIBy5mvF1zYUr^g&ka zkzqVTLGn`hm5e&y1G?rxIfo$)c28oa0*UUS6{09M=w%$Mu#4(ZC%gNvj>#Qc9=qHE z$MxD*o>kbAJ-fmOnK=aef|`u@_>&{FjtViT8wTm z3c;TgU9)EMy}}D#C`=+AgMZ7jK zYxP-9I{@)Y^p<&?drW>Ony$fw#`v{>+9?EB#lS&LfX^?hb?1gNbNOs zwHA)QKHZYIXDIt+fk+@5A0k|ik_r4Fu+X;T*7w$6uVf$e4&#n`N=mxr7(?sjlsD>| zY9alNgD$3U$UBK2osz~ES4FR7QfBN3!r;?b{<+F zp)^_BU1;HteMV2(uMJ9@~KV_7ncHkJhqz)vWQlJ4z8E& zV@N^I1l6eJz&BVfVtH+ei!SX;E(~t(ONVLL$S#U2j`!HdJ4BvP0O}1sq`Dw_k2-gn zjPX8ahGxW#rzcvx@T+qIi6Kchr#S5!#T(`uM_Y(zLJ!=*Ds1dctih{y4p+zx?Wo#4 z&s(ggz+7mV?=p6-5tG0H+q?)z)^y&=UWb46Kud^quEs4Cw=mqo!DD1BEl8k5o`x#z zB|}okNu9LZw>ST7{e1#j;HA!a?}FiinmZp%9>_RCy_rnZnrxaA^psK%L^CK_y>r~_D93}bQ8Lts;X zSAUO%H8I++v`*poz(4)zlSl$kUk$a=kR0KA`ok(Z>U7 znoBOTA+@uzXB6#F9D@+)GkPe~A1+K9cHJfOoZ>;Z!L4Z9NXYMG_HkUH+6TmGB=2W1 zOfuR0F`NPtx~Y{RxcAa>V-JTo@xuv&3;l|PQJ8lYMWtbjdmu16oIZK$gU4uOX)I#|3jahdl$lnkuy}*soUh0hQ2{{SqY`X-HFDW8Ay0{t- zlv6Z~4I@Exnqcy$g*APKnpih`8-P2$=v*Nzu4F=I1GU@>lF4~HIZ@vU0_l~(;PkmI zjRrf;``&s5JdZ&UbxXVjKR*!eU&{Ajt!bmJ^})HChVdOk#*b23r#>!w9nc)~FVwBF z9!d`mse+g2N}~lHbK^U~Shh27)Xg{kAkQ`kTr|OTD-7<(PpbPG1&n+{FIcyjd9}bF zTqrAU9c;kdHvwr&Z2f;)MVt1rUnwfbxj>>`uc{|JsT-hBO>k#4;V5;jkIf!?G#CQt z5Uyv5&w!-GGJScT|JVb)6#U?K%-YQOw#?%F3I*mFeDPTQDN8@ell>%lpMIa*O4H#Z zm=1w6F@?C!KiMOF1)ko3)&n;esmTLQL(KzoDQ&Q|IxdS0IqV;wRDbNG?FGyN^^&Dc zG=r)g zBAYc`(Suhv!Lj-4~q}H-8Z=@tireHKys_&eSioB+! zYgb^XSFc_cAJfv5?p9Y6p2GTxC{ABesxd_rm*LB2pkk%8KQ~MuEE&tU;H2}CO^8oa z9N^;4DEV+(4>YSmgBqQRPLj3{cO084OQxp$Nbg9mZ3h&zE|_oSYGHr|ZUdFVq)V!& zhG}$sYSJBWIY4fnPO77J z_oyclwjKdBVu58TzC2B)oQhm%QkOaUy^xsXRnc)f(E6@myTx*oJ5hu?lL@TSu*q5@ zcvpPBB~jl|W||A|UcKSGS~fmiL6WowI2NC^?8o(>+Hd-M{P17kbBn7l|dK5qAs zpL5#E#oiu-Z0Eu#$3;awh$L0~NqZx1YNGaUreaQ?A$2BM^T{aL{CS*|pg8ypH#pI7zuYEq(GWI}D+&X=-xKv~JPu`cdPmyu*8C`w* zpl|ZVPkp0I4Co}gkhQqD7GphI+Qrj198?h_ni{#xx3Z#6_$0sJ2D4%b~<2J2prHL(Z z>_G_z4C##iM!=v=w<}+XzmIoH4_X69ZaGh!pcMqLI*LiaK7U`Z(G>U`YmT=@jxjJy z>}H2l=*OP7b$_%%&Q25&n%sWQGXYL7#5{mgNiR^zVxGt!hN)$byItyK=i6zIpjr9S zRvMDPL8{&gxY*#lj&Vjzg~>k2aKiz0e{JliU~oGfzPj-2Fxj_&ymRTASTi_#i&j2F zvkVA8@a#OUG|c<(u3$CUDG-07QaBFnA`v{)83X?P4?%j9U9IjntpGh$ft?sWa_D6> z(TC*_dO3{CQwhSYX8BeB!1x6~aR^|cQ7hy);N=N`X2Pn?(TKG;Pan?CoVM zK*%H3-AgW@JN7XI9u6u6*Aq-ozuPGykFQt1hb<~U2n0J~@i~G!T^9eWVQ`=hp0#y! znOR{Tg$&q4K;3~9-6=(Q*YD4C+`$EZFboqgD{C4z7u)s5y1_y5wZXmc@zu3p%{{GG zbU(-&NE5r0lOos@l_46I)L8yNjlM{~RIq2L8$L)se~M10RUOQYm-t(baUa6YaDF(- z=kwe#!~w`3=~o`yA}&`|HV#sDceWYCfZ(WpZZU&R-I4oU$jx_9jf4NN{vL3hREea9>GM~Y9ZD>h4) z;pc)x^*KgJed6Kk*!zoJ?%O%o605{1=hjtzMe&U4EVwJ=Dmx~{0pZ^*hpd?IkR z8!WqR4BjAD@I9sz9~?X0e(z)?>tQ!&{hDh;Ww>OI*->Te=!&}cle=2}+I1{A&Ip5! zpX7-#qu@{`o5=LEYkL|kZ?=)q)(NV-PY$FkIv{0=0RKqoO=zs5Wod-hE>BtB&hacGsF(=dHXCmE=57Xl;K%en+CpG7z zXM<)LNtd+aB2)C<(b7Qjpo`o_rv}=%LtY6b2`2k9Ask32)8sCgx+1v~;U~baz=2M( z1o4I?k_ozfV;r6Kf0dW7$UH8AUmykPf zRWL{Rx?$7{^%$Q@WL7uKB+Y~W5+i~Bn}^1LoU}w`>=*WHU*8zB1Ez7_gjVF=JfXK8 zF8>pMWx5#F&KvTMeL9D+xF_d8hQO^&hRPTm=xE9`>1dsWh@T_1R~!%Z9v#Y>Nk?= zH^QV3mZy914_o#;#+InD{kMU^Q8zRCU*|j==-a_nJV1Ke=Yj!y52IZ^bbr&356}UN zxJz8$lGSDAZ$}a#@lX4p!|{NU`ZRbs8?~lZZkfl*(SgtvSDtv zH76~!yRWFa&#Z>WyNua6I3^3sr|c*)3BB|)vPmc0y!Rzz{`@m?Y*nItl6y=fqTIl2 zW8{lPhp8uWu0bbiL6UI4e-Fua(aN8>DXATdJ9$`YK0+6lG%w#!o<-P{X!RBBc%p7OKL`E_EaGZh6tLm<_0+0BK6 zg|m}?4=`aFDCC#4IxD;hP+DbiMio|8&2reNm(wh!FB?@$N<%gkKVIovqvIcL6CW!) z%0wofIOX~=EMHkWck{po3R(AzOr1V&+=!EkTbP@YH`2r@=1dLQ9qzO97f3nyT1j>l zY;!Cj+L*m_mRdmj?2Sl7S9rG{{+8ih&#qk8yQOEojT_LcimJ#d z#3+Tz?dQvlj>vh-P=zq;NC*V5A>)hh#1Z%|R?J@Fr$k@Kb6h-P;h8A;qD>sz zyKYILHhTH1Bz&+BtX{09ro<`RCaLGDy}YW6NSLoCkyK$=@8WuDuJ0sok}``2Yft&c zxlQFngmcaG4H!-;QW3}Z}R5hICQ&OK*Sue7@oKWQ> zx;Z{5Ti>j%rcqT{MYcrdrct#bO;mM-1MHqU?2R#cre{7n^fY_)Oj@#5m|f;+Cw0eR z?Zit4WUUb_$6%yXTqFf9rL-ou)+^M?k@h=g%Qq?vyG7=%J%>m(CXConBhd^VNc5q+ znDU3kdb2gG?|6z}FoT7Y%?61^{bqAn35v6ZOd;$zaeXQAuxEIU>zT>9bY3z{5u%-@ zjyr?|sz^}xx+OzFO zdSw5owV!$MTc}#RKk*e;xTC{UzKVW$AP^LG*CNW6j0Fd9NV$~Xyj-SUUjKik^}kfT z|B+hL)Blwu|5w;d|Br6@|0@Chw@mmSsqg>mO$%Ca($)cV@In8px3_?5YwOmAp}1Rd z3tHTQySqbiDDDo$-6;;mtrUtDhvM#Dpjfd2#oda-7wGAEd*qJy-tmv|Cm|zy?zPrl zYg$4wpY>eu_s~s>8B4+{kq4=Mz{k2%vOp2sW3E)S9!i0#JDe3S`T{k4%5j);++MyY zW9AHYzf!uz>^C9qk+HuD)rllwqu~x#BsMF*O`qSvEWhHD2_c`Ul*EQ~q+mJPQC{Jh z^dm$85$v)ld8-!J;KF+!xP!gvV=s*HPTvLr9n<^ZwY^4=KZ*$j*9>dL^B3h6p$eG# zkn5W1N$8pR65ef+TF~TGKV;k{?=^_(ndpf{ryAiorc*}aUtG@yYBU#M=B*br^=VrL z#yqTjj3_$0eJ0!xM*lCm|5eWpQYZ_@pH=S6zi)Bzv*i1+Y+cmE$;i>d-r3Iasa*U~ zByV5?DmWKcd#NaVOBB%OiGjRhO3u|`()l&-qs0rYy4fW3k z)&^#vnsb&vTMPfJP^V{O=LFCLnb-j=OiZ9+btcfOpM~mD&IZ;NMnbk`)+PX^r*1+{ zMvs;5oLtOLhmSLTJf>#>mF_<}i->`}q=|)@Ij9&OG|ZDo#o5G04Z!)c|Kow=uY<>h zfxOFv1@Kse{(DjSW1aplkNl5beh&BA(D|wQUKLdK{&?<5Ds13n@>KEvUvy;Q=;SP7 zZs7P!4YCG*95es1fWPV=@MNK%rSM|5piM?BY|Q|S>K3*_woVqm1WyxCw=i}#clx8= z|FM4l|J$9N{SlYI{uHynkHpFOsH*>VX9Jn>pLZ6HM_|L_odfjhvCXf)M;Q<_@?(o9 z%Y&c>b}wx$9(Pp%Jf(-h)t>x6%?CWsN*Di4-WO(c)=kZ4ZB7PEPcz1 zslT4qXQ!~fgR_1F$KQM=|WSjDa!val6W@X~VJJvr-c zebA}IKB(_F>)YxNcZ6m4mCuu413elX*34?0#p`1gUZ7n_uo-n<50sXt5Na$%tjmlx z>38tHTZ;jUEbKbLl-bm+lEtARXm^Aj+Nr|aA2PI4SO{Ks1($t*dMl^aj$lR$F1m;2 ziHPxxmq4Xqv5ytp2QqOvs>QE8W5MipIhezMiGE@b%O2q}ae*c)CJ|{pzN2yhayxRh z*Is11zRE(UgtfqJ+7CZt+MM!Ygk3BLD18&BA%K1;F?fx(ce}_g)9OSNeol7m1IgoC z5M?wACe!>j$)l!Uq^M6(VmYOxggh-`C1Kr!{`JeK#FDFE5O>I}mXZWt{R2L%PP8L| z`6R%+bHytiarMS|EwjFuyvwHYi1Zv>Bixsk3E@~oc0((`YHGp&= zwr1m_evENUFK)JNSeGCRQ^L(Wdr9IbPJh0s!3ENp`bwJQLfn9N5eJv^iKg>+&yn71 z&is=453M8!qi2+q)Dp@VC=}@7dLLZlPjpG+(q=vL)L*4cBuf(*4xoJ^Z3-=Al0vm1 zWWiA$!szoQuU*i@e?RrID+H-DJh?|^X!SUUtt;fEdcz_SHBn4bm>5G$`bGVs7=uz| z(%armUX#?D&O~fp?o#*B{yle_es%1WjDuJo7Kxp zLQ6`cZ4T2ZiAYfq@3Y*)zrxHjUaoagl_ z{hpw2=Zywo!HFsMk_jwa00BA|vq-W9bnM70^S6p$MPvoT;BbY4UBc794j-#GdLtdF3au?J z?r7cwn|IX%y_wj{^=iws_1G7d><5WRZ&ol$m5~xES)}2WJIaEr0iLVH{y}WLZP1C- z@PXr|U0+IOD(LAZKwsH2sHt|&Y8^sFZfi^vrh6OfO(hwK;I&=e6+FR3md_MXVZjgAfWQ=}855TuR8jV{v!OIvo8 ztTf}anPCOousKK;WX)IQr<6-JtT?Kg_5)RZ6FJHqVnzDx65cs@4lnmvEDBgAv-80R zOjDiv@r~(ny)+8h0bI%P=gt@(-cHTF%+Y)o7|d=ESfR~H2P+(+w`^lMzbAK@vPU4i z-`igD?hK%G+p71Lh!O}DXMmo7jff2|jy)Thx**Vqh}_9C7u>jBveI=*I`NjsZ!yH6 zE|ZF*DYWkW57FS%097l<;NE>)wFq3o#fwh-?}I9x6qv?~PTrnDS;WiR8ZHAAx9z!W zI3pa-j@Biez>_RZh20s*{XVh;3ldLTqp$lgpZZ<^H?VzsZ?p4HGMJ3*YYDGv&DV&o zp$8A^w9oF^1TIn)1DqT>d`58UYLDnoB3KSh^IUl%EG;kNPB`zKuSn+)gU4**aSQoG zvX?0FbQ}gF(bI8N`qIMXqG+u3H24dlb<3V<8_iCnwoN>RGhen-OzZ5;GZVp`JIn;%TFqWq2@xt>QxuJTe(@ciozw3DWZpJ$uude@dm48`q_00zE~0INlL!&9EU^=rFs`6(BZ@km;La;^h?26{Yu%~ z@X7596Jb)3lKt@VM8xn-m6e0+~@$eRpSxJuk*86h%Y90Gz7w{8arUv-#4#CW4IFr6K*?D zofo}V;1tl#8&DL--5JI@&7AYopQ~Ems&n_O1#^E8S)s2G2Gg5JyJKHf)+zIQf%?ro z*fo_mAM7E_EcBd~X^ZGTBLDE21d!E|Ngob=9ND!8dG9gWC((grCD}JN4&pd*=LW?@ zDzEH2GM>r2Q|FM9IxpuL+ihti4R{VJ#`BlN9FvV7J*|aTRrhZRdA|#d(0^_`J7m0I zG+)KzvOo00+Qa>jS=@g1J+u&u%_^9}<FU*n$;9GgRo6TkZQH@>lzIh9~)v=+ZZhvk0}UY)#y`HrFCEpg5Db& z#HwvvZ;|)~-N*FduFL0aor&&ilzIw|_uthx9nvPZoNaW214sJ_=YVUsnwW9(+5+b+5D6^MlOXvfbFns_<_ z=2s=s{ii4No5wC2qjXc{wX1RV7gr+Y0S|cnt?MuMM z0RH<|6%r?77yY`Qr;39MlT2mOO;Ur+#HnkWUr}Sq+6$?re(EJGDVNEoIW1<*NjDCn z%17DHk+|5~#t6=8U7nni=o>$14ZrhYTMl)uc=zSCMc~){%|`QtiSOJhT$4TN!zAfb z@I>XKMO9=B_;bT1!6WLX@~{2mG=7x$1o--tuO~ZxXsB4t)_$=ZE+5j`Ej8%q9Vd%> zK2PZ#yoI+d?-=%;|Ba~gkFATC?RIU-ELWLb^*QY7nS`P229DRgu~yX_uzYF~H_;yy z%x1WiCnqj4^5=CR zpV2>hk(+_waoJbFMz^KHWFhVOSr@6wUV$Bd*h}F6*1myE)YVjTLBW|%pXpu35UdyF z#kd+V#_{ypS5m`=fy&y+IB!N@DbkTBqfye1n47Hkpl_z;9Mp9x9UKlu@hDFmtEL zaYCdIClBX{7p1;2Z(@{^aV`@oVS0;LVnT!+M=A!fJu^~3=oxiE>{*#O-yG#>y|S(S zi68;XMksSSzDep5-n*O>M-a=ZZY0X?jY@Xir?9jDuap?*fU*i&nj{3wEK#pK#Z6o^ z0TIqFAW0Uwsi^iUHf*v}GTd(c>rNZ{7Z<3!y?40xd-st1$6l-FFu{;-5y+tUDHc{2 zaUPs9Vj70l*ba=n0$hbFm=|zt-i*D~-UGfdg)ze(LmX2!OQb^^W#H!Ualg&~CU<<% zLfq|5$w)o;eQ5`tJkD%sA_OMBd1jp+TO5{meF1{gLU3UhOEA)2zD-aQJZM6(zgMY& zl^plhp&;2It2+BMW7t7Ia?@BIu*PTBcG!nX-|PJG)OkmJh-c8koxE&yblYVp@5YyM zx!w2TffB3v+2TXVp;*G9BulGsE!Ypsb+PrH=RoFgRd_Wz3D9R((q8$aK~X;RVu!bC zn=PUr3DxXT_J`F!MkOCV=v0%sAM&BxZ(r>J74oIlB8E+P+YTp3WQau+9DIKmpHw4NE~aCvBkS9$H`hh*>vjL$^3D&U(jrxK!GFe{GvDi~*M^4F~(2oSkmF zQRs_`yn;I)=9d7uJMoB}l~P+0E^pt$^MXl<+$`z4&;*uK%ud7zgzk@cglz15Xu|}k zv>`Dpy>Zqn!HO3pUWGTTnQ+K?Q?etP#Q1c8+NhF=-Cj@~|tpWyA_?dV1NZj>Z6jddV~&4x863dLoCYnmyWUJosm z@ItFsBdTtar#@;z+#PsPLC#I(fgcF9gAPfPyN*ZKA2zsA^r=Xc?CoGxhq*AJbuEo` z_E!Qz^ z1^l=!$WFe^3p&|hBfAaHvI`1)tNh-&4Eo_&YmxEO_oc6BQd%%#@i;@xwYlsTrNvX@ z`v}sRcPeXtK2ZZ71H~V>SGq`5=@ufG=SGMz~OT_*by3=-ceP1zJ;5Vj$&$Q_|ZU^(K3ji_(q?@f9QdAX(@e&RKZyGsI3sl$jBAMJjlwqK^}Cr1#BG0A zFNw&{hh1kkZyGrQc_t%x5(b-C%-uZ_e&qR3?kexS7eY6Qdjk(`u8(a49uEjVu__~h z%iDu@IaB1T$xmvD6*TEx5`X>@TWnu9ot^8ht#vDs%YWsi3jUA%wF8~H+l4fTYJv-# zzS@yju6x!sTu2RLS1dlyugCcv528u!gsyzbA`+r9g+CgnDA?Icif6kwyQT4}SSMnK z%NRF^;h#wLdL>m;xJLWB@w8#X12nilk?o+-HKJ3Zs2f!x*8@4;0 z*HSl5>s@jd-LmGk;7;QC%vlY7=S-pSk~?|6N#+b{Jlkn-Us5l@4!poxM7 zX9VRzTTUXJlt;908*bXs3DcqB6E&_V9AaFo`@Zr7%r{kAjl9-fESYgudO_W^{eJJ& zwcg!Lxn!7zp8Hke!~NtCc{Up81Rh) zdn`CK{1U>csiYyHI+*kWWxu0ONoCY@6AelDNS{}vb#YP_9&r;u>ZzCB^@>#QE{t8k zY(dE=f4d?}(>A+qmQH$&C+`@!>?y3#eOu7jDk8np|3+kQp?RjP?d>?9fTEs`(Nv<6 zC6s4`qV5@nfQ(_B_c=sv(wA)a=|W~B@vL?DehdcT zJ4Hl0+gE~P)cHZojzX*gGaD?j!8Ak9G@wh~@O=BaiUcZ4#<}1blDAD?cOTldFT9*L zY?@NCW@OkH>;eCA@(z#4E_FNYCF^9J24~%+W1m)E<^tg~Phk3hh;#MbnA(f__VW7Y z0x>Svxnx1d1ONFFEa! z`DRw1P3{uirc98$dbpHuyvNUu#nnb>)p1y+M=@?|z1rTx&K&!0p`LaVl7!&u7n(_n zqq&7lryikd=Em@Yac^*edP_~%(l`ULJgBy98v`p?d=5{*P^N`r+51` z4Rfjzm=f4!^yv1rbV*_3UO3%EW%TC>sp~=nz4j_NqI%%aWI8dXdSF*T+Pm!e7WT!S zMkdy#aYPEHWZz}0vLWk~j&b^QaS@rL?_Y(Ss2U)+Dq{Uy`; zA%6dj>U38fPvEE)HQugshUJ{po5oa z5~5d-D!22nPjbg0KsaGHjWTeWJ~5xH6-_j7qPOApr7|#!dn2(j7j+{*LsOsO9Y!Y^ z>40>mPUunyc1x7sfz45aYiQjDpQ0M)g3TwO2jMhtSlF8>Ju+Ig554V7xdF_~&k3Tl zZ~5}Q_dVzYV2#|NDg4PjCW!a#$#&%5M1G`%3ArvKFK>vC9G*7{BNCXJgfWIlSf^En z(`O9Q{31gN`9OZt%Xg+rdcG*+n;6)7h>XDJ4YAGRtw7L}>2qC2S3Wdt8X2KZwv6*l zoIa^8FXPYp6qs`+We-O?4pET0fTn5789cP|jMPVv(<}L{;j| z(d-M>#hKxyn8?`)*B_v=Jq-IL`9@uQ99L9ghx9GMKEX|UWag9)J%S`dm zgvzQwis@$$TJTrlUZt6RINJMWcM$_Ulz5vk(h#!i)AdF;0>VTFhGbck3Y#h?1e4ag z&V8g4quFHHR$7KOt~&j%D61J?F}UdLHAf%5<&LU5#=WBMbGnT<;kj)(@!6e^9=jY{ zA1lq~h7NDUFzB_Ha;W}7Uj2OP^kNZ_8j_t7_*K8hswQGjEQm8UVHHI|h zb?5#Vu|l4x6gy5>e{{d>o7I|}YQg6{hOV5MRL0Pj%nqgnXAYIV3mplQ`L@58k&qSIqua9xjh+of#dUh50x-kOKZE&%MKd~otll}DQ@q_P^{So z@EzJ7cp<$5D0Yw9J{8Qt09pBJdvDmalkrfA^ZO6K=YG^j@~COdG6x(6PE})X~Nz&RTICUNO-Rp^Km#FqA`DUQ*b<%Y=`ZLkjaA zxoOT+1(TbrGT$O5cVApvi^%ol^j zgEbyRPHf=de--j^G0j&f!B8i>!#sO=CfRHtfiqtBYEA2|tK=TVS>A9m0q6qTr;*^W zz2vjk4OtqBR2rJ;UHy&8L&ESF`m}H^WP_9lBqh{qQ21_lEH1BJw8BPHN z(CoFqe5_}6vViZqF9HjayuO}Bj;tET+tqWY*`uvGl41F?fT_|NxdrK$=nOp%+&jYJvwqiVh+LMeFoz#B+uHHsG-D5 zkZk0|i+8H^hV0uXF0$FVbyW(wMA%ALxHMk0)hZ)&7Ew~gL4#*d)FwARI?8j{B87S( zy?Za*>uh73;l(bJ-0vA+Vvuc1GkF3TbhEwhS|7h>ex0y!!GY!|b34#~ONjk($;rX^ z5>ME!b?1mm>w$Bdr`~W~-f_CgE+KIq%ftuhrrFm`JtN=hGr6>PL3nBzuDgsDEykq} zoh9GN3kg51v0~XDXJJg#5Tqk!n=>|!@}-_zgD{7mh=QqdC8fpb^r_-)6CM1 zH(DOP)R2D&$pk`youSpmQdB^>bPze>9D1O;bh5jFUsQu1gwTHDsDO5LWS?FDQ#nSj zxaa6jo`+7f7dW`OJEHlK38gNCQXkH70p)mNc_7zp#pWms7a3w&4`xel_MIV!%iiyS z?Mo-FJwF{8VMEe_T!XeFO2?u8iwog06!#t!gd-2Oi^L<_1Bk+Q9@{{kZ(t2Q6@-a* zbX)CN7wz!%2{OmzbyTj;%~1U#9FTcsgVYCX)7r=9SAeS&>h?wsZ(*Fa7xeZpgPzlc z@Z^LdQmk2srX-}Ncw$}^KV#@nI`m_h^;d_!2RlTow&%Q1JYYIG0;Kru&Ow*QE$Vza zOta=AV^y{QkXNohyJU1D(v#4=lko7@$0~-DtEaobay&*&@vrvAIlO&)8=)6GGqvD! zTfczwHe|{kn9F_(cQ1UK7cWZ*+!pizZ%NknM`wjSBz%To05@^tLU2bG58x?GJy;|9 zPQ2#(Jfa%LNo^sHa{=n2oj4pyy*NJcyZU#V6Ls#S(GRZUF%8Q{F-LZir}k539m@4+ zM1j6&Fu3yhvA_XCM<`7ZTT0hh^?}{*PTXno1)CG)4Lu8v2Pn5V&H2Z)SFaFe;H-Ma zs_Exi<{n1xY1I>(kn;foI)T+~NC!QM`$66n#N`l2%xGcZ8pq1atf@i_eY-S{WbS^} zlrgb3mv3hV`o1HaAYHwNC{L&d7kXwUTZxPIura$lCv?AOa) zOxYi_9lC?64MlOMfi)=Xnj7odC0IC&>UU7+Oz)1YIZbe6d6VJ)HdSU+=x`rB?|W?_ zy>G$T9oz|`@;2cK+vuEd;B|&Vj>t9+H*R^gIghS9$7k&=_G@;&L8*-JY6meLE{-f2 zhy4Yb17%yX#2t#< zGE>kVNRSf5rA8*6Q&W^CyGYs%*z{+Pb-K9ih^D@PX|Spnx8+$7EpaFIj%p3L<;BKw z#GrPhS(Gg+P>;8%QLPsxkdla(vQLR2E6ov@pY}LbhBZqjA7y@#`UJjC+%ZKVMZE%vts|qnkhwbJET98> z<3{Gy%@LTk$3AhI+1A zY9O}_jI=Atj{o?A=*MHy%?y7pW(c_-d!NP#(vKZ?_BdivHL7atH7P4nVJGy9Pkb%oc*A9J!^KD(?`c=mU)2bsF;* ziwXtfQ7;Y|m3hRQk2spM+B>OMEVjNc`&%exYbW0TJzquO{XkAZDd-~6Sy#Udnyg<` z%*+(6+vs%|vTuRWmp)k0Nx?C2IN(h|9BV%$2j7r_TUcXthnVr?s+lZmc&X#*0ykm6 zrz|95sT+2^32n0%xt&{ktyC}_MG!}z$5WHvXxaa5P=yAXVh{$IDxyz1mjaEAfCd@* zmLIg|V332qZb$nUYZkn-7$h!feMKW@o%FQmx)0+HJo`29YSE)zZ`@y~)_i#R5zo~z zyXI@YtBS%wk-=E?Luo)bm>89UADOC?&Q#OA*zP*f|v~)V4~Juf_SEuf5(! z+kJLGSdD{SddKd8%~5DEklzHu^wGr^;S;gvk*VEj;{ZagtU&?^to00Nesdu`v z?p3=x2#vI-GiLF=apSkhsH*Ml>Ez}grcXE;S_OS5E#-t>k}+EQ%H6k;J41q&o?mpy z+Spi2A*Z%R_VN?@r{de|uenhl^Y;nZY{H_$84h?0km`DuQ$vc9#T##ZY|IV-m-tho z+w|j-jr^!KnKP&hI{~9b$u2jnQ?8COJ?~22@F~oes%e8WYs-s%jj7vCS;{n!Q;AVY zs=yU{0T9WXf}$DCqj+P5HyZzA<#U5Rr%Tr>+K#3P1P6(vJS3xe?nJbYS&P|NhSIA0 zw=V!NqMmKM4W@cI;*r!3kRdGmT%(q=OECo);m+<{Mramv2h^huZ}o_@n77^|Mu{g= z)0j%~n~Q%PwIWms{~jYc3tc{9&P0r`9ZgMtRRwh?5FbP{i?B_}aT}D&Np|XMCS0`mZP>&~+m7;O z515){#(Ww}nMTgSx(C+dUL-1{Wi=#p!Q~cNr|~ycPr51NS83h*W5DA&^2*wU7aC16 zK~Mlgd~sw^%s%5uT1i$_euOxA)mp5`TV(XkuWmKQ!K~+nG!iW!Bu;<(*{5!<=&juel( zvm^%Tt=Uh^rIE3FZd=`plVa~{15)ufmM6|b?^Um68a_4p4uWge38UQBe#}@OI~igp z@UF6pdsc<3aVvhtb9R%bG$+NR=AWxT)FSIIe5cSB8}_nOHic|>_#AGmt7S{(V^5X; zD>(d0FM(AQeO=rKbsRcM1An~>Uf!Vd3(~OJPYCnShT0LAR1482FP7#7iG!zR53T8v z4cnq-&4Bpojc8u8t`;j-fuD`su=h`E>)X{o&)`M_eFy}57nor9o@0W~%d+95F+x|g zLh0Xt335Sx(}4M=6VhRU2QePXRO3wuP=rVAo-Mv)62$3KO9z)Og9)<)I|hP5e>8fq zDpnzZTz+@qjw0UnoNP@xCA?Z=7~6~}9A(pdjrWL6INFHW8)s5FXaLp;ZJNwl81>xe^lwQ6IHV_fCZjH5%?n|Wu37aym+_#LGA?NVY7*-4jeB;{JK z-!fk{TPyOrb{BJN*w(1-6YXy-)Jt;6d*S8>9YEoeY<*hn!xX~vcQniqUn5xja9k|c z#7P7z!4+Vj##xt6ri!a%*zC?`eV2K9Mu8wqAX+ z^oSiNnDhvegKa|nLE;74?jq2Quzi>88f|tfR54sJh6x+AV1}S_H1cbbeE2&*hlnhe zW+MLy6sDJ3H}xMcm}|!`P(5Q>LsdMhOV~AQRV2_8p~wkn0a=3J4)rB0XHjLu(4-PQ z4h3OU0cpqpN-bn1*_UeYksh4`u&zn?a5b01{&tjv^B>Pk+j={Fg zUl6LtMO#Tq5N>luIFn0C#5xB_hKh4%QFg^WClsA`QD!c3m+F(rGeX$bkm6p=BNOMA zmI{@&tVN7+mMfgD7U$a~8VN!^RguW~Q8^B)%o@N;ER(l``d#gC=`L=Rb zLDvqGMP3)K!CKgh9XCkc$2FWgW*oYBsPZ$Y@m~_15n;CN4iRo`-3?u}4_R2k^yTJl z47v|EFriFj|C*(z>;W=yf`A}#^Z(n9ye+iZHDv&#n=LW@tnlKU%)_>k;Dd zN4?p>hoB>_xbB@+J8EPszL@3_pG#Q7yBC>IlT>gk+T?>{l((}^n~wLXsb|yeiPdqi zQn^;cqS_~4j_ry<;u#RonEqOofG^f+kukW_poCOfDu>IWatK3n1@l8B`YLouLA-1D z+Mg!$|hLER1)kR8gYerqqm*S@KeSC7Rj`+hYf^GwQDK~bLd-syzg0Z?(=%N*!Zp%)w_3o zgHdDn#C?Bz+T2sbL#Re*k=t?9*F38$O+p<1Q)fo+Hof+y`96{^S#4a@xI0qm;##_u zrGm#aQeXDV$y;J*E7QQK3bUy4JCu>ykM6vlYV?nfX*ZkZd%Dl=%?&0ch(=n>`QxXy zmMo-gpP)!fjTQ#0l(9RZD^Gl{4YZB4(q}3hF$Gl` zy|h#siE=|rkoB@$4XRW)LY925UnA7iB`g8rz!UnmK452EPPYZ} zFUr-RYC_R!H#iqsaErZUYC)|!f5+G}+%>4C^G!M)Kzq|iCEN5cvp6NgXGl(feX-& zPUeiWon8H1vENGQ6G`!#{Fp-$3u}Wf^Qlhewv_g&+2G;+A zPue9{(PX9$`t35TeK9pP-^gDH|I*fD`zL zXnQ>ODEZaq&pvTl2 z!tyxCPhI{uh$B#u$*=3VNSQDc1fK|Kg`r_2PVr6+8N3Z=eBPMSZ=_Wk&blxo>(BTE zgH-&g14xovBZO;m0)*(23nHFDKqQyEPzPXxd1|8j{xja5=J7|oar_x>EdK(^1gY$d=OaBf_Wcd~8EdN9${srnLYGG<>^2k90Vw2Hk0x*Ie z%(}lZ%76m)SJH=nJo}`4kfIrx0Z*&?-7qY_-Z_55lCk`>-eVm9H^b=0%Gq`U5Q8r= zI)}CmAqTjHi}Tazu5sqC2XfPu<|Jr)ot0ZibdTjIWc$9DPO(CfEu@zhUZF zzhNg(=t`T&G=;}~`x!4*2lv8qB0Uw(IJle-YjJa5t3se}CJFZIR}Kks{J11zx=J{V zaCvH^OeZbHc3&+|)ZA);5QPE}-JSd|(zV5oD1H0}4?DOcWy2IN7bT*qVUGPr6F;Q5 z6lc9^0dx45?EIvhKfHy7;n7ri-oEBd^SQtN|F`x3~&ji6JV)y!y zr0I80{atM$Am(@wWMJWF2ViDkW@TVz0Z^GcJKJ+Jf*7ViQZq*bdvgmTCk8u5 zGwPr3UZB+*zcvC<(f=R%f!6j1sT9jU-&I8nK&V?gv!_)&t>+J3ssD6xb2Kr9X9BQr z!GrL+f7}4B=9s`L1pjEMAlX00W99&% z*Z+16kPHYi=3nN*4DzhMuNTPl&vqagJ17zUr7sY)jK9wrgphvBUw>`K41CPWf0MEP zW4%D;Crs|&+OaeJb3FFP==ys*&cCk>$jtH2wXrY(L8G~?fB(*TT8@Q;LPAY!yfZnR%v_`gU5I{#xP(4nRV&HB$gU}IpUpaM`cadfh< zvjuU#F)#yksDxizSQ~>fr!a^sp50W38ldzVq(2XUqNw=Oy4;-U07CZmp!vu>fvy>t zso@zxFGU|o@2Et%S(sRW%*;&e%v?;IY+PDQOfNtW>fe^|M^a$^*POr!N_$U^0R*x< zz8(Ma5s*3mJi*HJn x8<3rYAE5gervWhj;Z2XaF>wJQ?m=z>QvZM4=+OqCfA;?LBOn_W#~*(5{{Vg?5_JFo literal 0 HcmV?d00001 diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PyTorch.pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PyTorch.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6d531c0d8fbfa359667abf9045eeca03b156a65f GIT binary patch literal 33427 zcmbrmWmFtZxUQW932wpNCAbXk8eD@r4DRj(cXxMpcXxMpcPF@fB=3H=oNupnejJ)L zGj-R~-BnLjH?wZ8Y7!YiVQP9B02E2xS@|;*BR(CzrLGASCnr9w1kl3J&Iq6BLz08S z$H%7?Ha4>Z+I+m5>DU1UfqItuKqzi*C|f%lppH3|Q)-{uiUsxnqI*U~;=nFnOuN=E z{9oi#)zO2YMZaPF@x1GRJ@|TykdYe`Y(Lky1ZW(aY{;>^Gt1J8(vO>!?2itPW*!fQ z3YunFSJ2PoZLRX1l)z!vn> zndI1nMwACUXJnf_fz7L$MMD z3GOR5(=5${Lutu{Ak>KVyujA0v8U1XpWo6_Q!u=Vz64a_gofY1CUi$bX?BZrR;D}# z9DN=QDcaVXYa%`S3<&y(WGa|$``NjqP+TI!z8DNFV~kW9q>P$L2c~eyB-6XRs%Ws$ z-2%yKa(c2p7B|5vC>)EMa1%`_jj-w%rWUjM(jrK+z7C%u#`))^#U&j45K1t4a4Xyu4So+O)bVL2kL-6p zGc)jgpU%u8{@;TtZ{UVqz38pnSLq!EUh}B>Gp|rOv|FwseH&HAM!1m~DWKT3OtsW` z1ARjN%&vCY`+DQY^m;E(HwT{Yr`IV0O)hoxkr%oM<6R)nRNRVy)a(SX7gPPu4o&vv z`7~3_LDmTDylKDWas@4a_V!n5J0~tF8hu|~M6}0;7l>-8%t8=QN?^zZ5ya~Cko5KU zDgy|Bw0-aWOp5#^i^@-eKAiO{NpWcHnjd`Svkso_f?8^W#o03Z#;AmPqaO*f67zZo zZ6P$C_I(dX8qATJBN})6NPy4ZQTBy)tWVe3YB^Y_pm97n9Bs+)U^@Wx`X*FpooZ^i zUu(}IZ$RnoVWhGGO=q^l3c(23wXN+rf5{H9_ZpLeX^%v^aqnN?wA#Z-sry{%-obhF za5V2xrEmF*8w{nVs5leDy(6GN?R=N8uqOaHI;%M`TA%TIfK}+n?4D(e6x_CZXa7aM>>{=pCYsh zrg*s3$>#)XrG~uy2_Ev(Zi0=;FX&TnqJ{5vFMMN*$IK=Hp2!o|oo>fuZKsl7i8Ch%o zLc()_){viEAa?XUv`0R0d&R#)M7HG-qHL~eB9a4d?mg}TUD(*Xux*yNXP73Jz0I1n z2ugm7i%@5_S`KeKzcq<=@~H@{$hoLrQ_ugHlgFcV#f^LZy?6ffd+#8DmZe%tfQlRB zYKuI{pDQz0c3J=$2OjEkgimlKmk-*5*kBi&`hFIcs#@fqTMrXAoC5mgVr?|ZnQhE3 zpLqZJyj#|Lgv92xSi52xc0>>sLX@8!8bgA|Qs$U+(V+PPrK;@D3&Jc?E%hZAsZM_}8&)2DDKqW)#A~%YY_w%@+sW~FC5NU=3*f?Aw zt_H5;f3s0hy zVfa>ixYO~xxZhVowtI-x-y0?gUlGk=RjHO-V1;dvgQMzjo=Hd7s*U@i1b&8&5)l(RoU&OOQS;s;bWbX9tB~4_|v(pSwSkR{Zazg_Xn6dG*C~LqB*zhN{sS zB>g@Ng`8)$8<`rVVC6Rwq)=%Tx3Mm^;_hx7q{Wxj=q@rH2g2#lUblc%=FpC%uL_Oz zed3g37GSz#8DFD_WWv9l;G{M)_0NS;(Bvc%W)m=jJjLi|`S*t1xc;a~!4(DN!f4me zc{bOd#BQqqYgOoKfY$TgP+s~U?xcJqImtkEHu}k#@VP}KiGKlD3wz0B;UhR&gnOZ+D ztQV5)N}9a!%eIB-$-f!wlzK*9;ujFD6Wj}ea3A1W<<2;inzc9M&h<3GtVB{axHO!6 z{t~^9!he;aLJVs?PV@fHW^P zhkLt+<0u20s0sgA+;~LXHockyn;=+?snU?Ctjn)@<00jQ+Tv>=0M(TXShchqWN3lx zcdt%Y3{XAyVT%i#)15e8i8bkmTTtV@$JZP9O{e;$*GK=+T@vpK(%~^w6_Z!aL6Ns&$kTPtD!6ddPX%4|~m! z;+YZ&9!?Oyb(?J!oIutITuK&p8qPGK(FY4}ag&3J5(#X~lh}=*O#*RJLECSvKAD7o zNKC339L2LNh1*BayHrdt?rxG6WG{;4G!1956ufc4oZIJ=SZpGa`J+*MX;23^LBrz&2^_99Q`DHCgu;0F8lLd_CFw z4wRE$D_Y%q$ljej{<2WQYILq2oDf{aC->>70IXNm-2?01*=5oIr79jq2geo&WQW;dv;(gxGnBw_<>!karmXW))aGOXx}fE4snq(~iWTV!yFm6QEi0~1D=nk@_N zN8?#aPTx=~Y=tGD)tS3luZFv#(0v>A7{5nTS4WBjf5+_R?JDPr9jNlc(X|9&q$22x z=GPbqhLfS?HIziMQlIQXwH{E6jgB2!M60MedTh-B0mlY>;p4~?CM`+{ic#bbjVp}X z{S~7aP3cw~T^i^BY>lgb-q&Oa_X>ZDhSOOUX-(Aq{5_UmeCTssG97>T3(F7ZrZCv( zZsV|_aa6)lRndAs#ar0IIA7S9x$Q#}QGz8N=Aj%Z(VpdL=j^X2{lEHx_^)4GWdh9e z;;uM0#fhND&aRm#XgDRbD>h&V3=bo9EjHRhn#g)Dqr|HtsLQaWlnd!$}U7z=sS{ z6l9z3#D=DRJ=Zo9=)_Trt+2#CAqG)=E#c=cDAKbq=FUU5SoFLUv7F`!m1}iwcfGn| zvwS-!V5Ie2$NipYD~t4i@GJy~Uu^JK8)HZ&6fiiw+N&K@Kr(E~RL&BlfR6|>xpIMj zq1OD;*vmObE)1U(situfr(cy|_7*V^4O zuUN|qM0Tpm@CPRQ^?a7}tg+BV7mNCH9!BmIm(SH!jX3i;E{t}rxYzr3+3wKHps7F@ z+))Kxzl9Rhn8koA3fQS4S!H(B#R5!?pt*AG3IqK7hZCn*=B%c<3tkWY`D@f|iNEB& zT)U<0tKHP2C}F39xO!V~(AwLuW-}1_ELirSn4xWH_5$x=&1E`p=cojdxm9hizq{64gjKWSSbBN(}mi>*`#c1h^vBQ5{zG za|yGgf+{!8G+5ca|tL&UZgmShn*yC3$<6?^&?SYoc-W(=KTpdKMgas z_Pn`;2;5)slFSMm6cQuARt6x8AFF6HZkFzlBZXMuzGa|(1}1Y3orBcT8tp1~p(=y3 z=Zr79Oh+>PneS15=ew~@!bG4LoP@e|QZapZ>TMf2@f=?;qv_6FnQ_pMXC_hW|7%{@NBoPIe;lb|01nJ}2iN z;p2x^hwh_*Pb8R=O0sk5ye-26sar@YxvY@iqR23w*TpBiax4dhpK){;L0` zKpvk~!A8fz*6Pp6^ql{dMe*%y?Eg07Pd5cVq6C2s#(F?G5&l0R|B)a$psl67jULbz z|4*{z{#&=`|1|L5e*K4qME_qs`QJRGKPUPpk{+KPz{JS>KU}3XS64TMxw^~c=Tx=> ziG6zmhNL9@=v2m}QT=F9h8VIWx@bsIComG;U%p795Ym0oz9L=Ty(b_L(82 z@S9kC*=pr2Pw+y!H#K%Id;gv|JE|wbSlhQuIyC3Fktm&Kel71kFy(O#kG16AQ}<1@ zm8&6pm%~5Pa6UOY>ad0HU7xVeJ3_)kNozmC#LtzdlW0W2dm4Tr-~LPm%PUIyi$?w{ zt14poX~8bV6D0y~?gY%QaJ=je_EEmzK?V_sTygJnj9B^2AKZsSb-N~3V0#rgXxU0b z2iZ!Rf&8%hdpCh%-MA$K#{z;3&a4$SJqLR152y+r=py`?pMLR_roaJn85F;jD91nM zjAr=b+2X+Ppm1vtj7eS~HTRB98@N|YQl7^;9RF?#<`{|DkzeJDPzoMmNF#C{H4wUp z@6GD98tq}t4v-VF3Q4L?L8@5N9s2I$-la<(9bzK_1L;T7o ziUcd;DGX=eX*W^_hm52Dt{l)X6W8O^E6xwnS!G$IQLMbD8Ag?Yx>EntZrq z=&w!OAp`8`g~4VhJ`_UQ*M!kOZ=5C%AccB28PPs7i+c01eHWouLTJ4bo?b(pRc!m} z(k%L2&9>q@l(9pKPgzqPwq#~^%>Ec(`cH&blk|>kY{{(Z(%G$9g$LzP(y>aq)cA)QvVPFkhl2W2d)?}xGuHAoi@4*3MKU$IDoi7oalebx zRd0jSj|&WF&S&>=4BdCdnm8T+BStr6l2#PfYHr}3{ycM*%&H-slIlXeG`iziJ*la> z-wBV9qa?*BzmR3*OVXxn?Aj6;n-YCj242!}mQM%87{x7>E|uD(XM`gJtund19vbH0 zMXcZ6F?w=5^_Vwku{?~jtat>*)SBGF=g>ybkTt}R7rEwn9>>4tNJrq}u8KkZpjNTc zrd&Z|J6!1y)IuYr?dPs<48$NVU|U13+*N~ms5ENvR!20kFC!e$Sny+RJSTzo)pTgv zdrEZMXGl^f?xXM}^J@}Q0ym z=wV;*zXfb@FYY&!xbduFdBEU8UQmZ6v~Rmj0o0%BZzLCnrV$Sc%B5$d6Fgwwt=_0l zQ~2}Q$^70pq`qXdBbvJI-CWUc63xDwnbwf2U*rv^I3I2}W`3j1fR-*u5GK zUfH^GL3T_Z7-1$v;Aouo%56~>xb0i?wHm~AVCx}J3iP!)9S^;=BbyUP>Myp^Cf(=2&y`$I#uKTiE3nvTYag?n;Mu_2RWA8yD&4_1_rZQDkL zh-L}(^AL(P9ElXJwQ#DAt8WrCaaLR{UrK=TnGqq-SX8ORS+yPvvZ)c87VVqGw4beo zmb?=aLVC(cWeS#IjOH~Pn}CbJjSWsxd;X-~)+J}fPPfv_^34e|1sJzh({fK)3f|!G zjr%iRt0L!yqv+)QnM3^rCX!*J-YP*QFDl?pUUgv=X`~hlY=1R08I8%LYKR;QCXn$V ztGB5~VvwOqp??JY!BDWZU{4H#_vvETt&XajO%UU)PW!YvNyS&;Su3u~9oi2D7g?_P}H4}5Xb42Et z45JSjeR`^XW02)R%Y%knD%!(D7(H@-sB7q~a)QP};gYQM6p%Z}ISJ@RcC-DB01QhyZj<%BO3~(R*U3Co8uOoHqG{3Tyt$T&H!ZiGdPo5U@_qNfu@? zv@2_=3?$<77p$x_**HVtQaMuDIT`Er{G7H_IuQi^zR~b0Z-Dn&JLW9aV_JQ^68#eg z5#XY7%?5@)Xsc@|W<+3=h73GpWJLh$F|3)poyCrv2^&c-YaPKaG6$cURfI*v1a6%u zSY9yOzduhxDk;SV@7<_tJ(3lh9V@pOs`$WZn+YAMKo3^d-ruFJnF*NZ($@U-t}r5G zpcf4a#)ASdp=>%LRVF<$Y&&aUU^qvqJl2VpZ(Ng)9No?D$4IVyi9SbAO=Tc%>Uhfz z$Hv-MHNuCJvBv_*I^V@>&6XoARgQ2Re8L=o<5-T}G&$p6AbXctf^*cRRL@RytweM% zq!dYU|gsKbA$ocg_i3mZL6t*yZ8~6$(rW z%z_&4G<}<1POT(|Olq8iS!c@9K82ei2mQ&7`QK742e%uZH9G!6_CQd{URC`o)>R?@ zshU3iTE^MwOP_~fJgOACzJ>`NX;Q7nMIU(veanT03r;4snd38$s8-nxl%%Q`N7`3W zjs^*lcea!eADrq&Nj+wqNbEZ%I@L*tu2RIBeStCKE%Z%Ug3#Z*!oKqMtaIV2W@%-L zF!GS@SEDaMH?$s#W`;Sl_b1E`wkfs|I7k;OD)@JhHW(2!>#-of)5?MW*aMpWeX zDt2Q!;?Q`f3Ax2iju<~3*_b8A?A&21w@-*~wReT!^oO2+WSx*1^bB*snJ7nGe?eZ8 z=fpSlt{9Lzi0Fu~av07;%zdajwym9#i@|{vKf-@;=vqhq3?BDZ30>hE__rTk=coR=6iBX&s@zsW;K zJw5L#`N5*ezudeM8Cq-qbqN@4F1u>Zg>JKQtO~A}7gaxx-;$i6he+EU&MWiN?<;;N zbpRNCtV2V!Q4ZInkIo)?;9^P_7JYv(At@-ND;|VkFR_lMZf6=*Q%rT0oLF^iG}Td^ z+-PY;?$A+`-5e%8fzkLU?+sTv?L4TVkl+|+kYZ;Lk>rV-H49tchQaMVwfzmb+}t+1 zjrNH*n_Dk58Z&}tdm9(Sz0;J#2c`MaBX!G-J6r-w58>Vfyb6nLf$9sA!()Ms*)obVgF(FbA5OSg$0Wnm#w?C^lAwVUud$b?`1Deru-Xci2-m9)= zPG)6^2*&m;yO&P)gdb8!GC_5vL7@{#zwOB2oEupSqhGFbGzw|bB?#g`9N)v~4ItznL z2nd~TN!NUZzD)awAdh|gx35XzS%H54VJKo|ge+oy7TGWnMM=0*th9NhqbyA=E-obn zF|DW1gFWfd)}bQV-%$#Boy`_9Omfz62gk?1G14Yt@9i{z8h#~QHN6%i*KFG=T5>gF zW}d@h=Ls9$=3XCrH+}n7czkrGltP`d09v#fp!D2qCHUO@>$&2p@aY##UwdvMQi_NAS>;A~^+}8YnyA-48VT{*9HYCR+S&c5Z9j2q};u6xr9T9y7 zLG-AZuoW&jm_}HhI z_}Jb+Ye3JS8Bp0lv&W3Xfc4D1VD@0nVBWBcD>)=gqZ~t7##jK%=*{Uj%$AT=CoX7X z%<~!Kn~Dfas7g3Xys0e_D-24+JUnEVKpDK|FSl;39F!ql%gH;kb%Tw?_-b^@jng?< zJT2(N383r~C{WIa4U}wMY=upaysN?XpueLkjN=*Z!}z}__;75P%9IQo1eCa%aTTrD zTztELUc+0*!-RWj-t^nF`n_cG{8KItOHuZoz{XpqIw@2|WK`jbS2Ul)G;Vu*oIAab zb?*p%5qfev%%Ek;2o4i^v{kDDu}hrh6NDGiIF`1efbJ68o-er(r9dxMjKI8M>1}LW z;k>}Syy7^!U;}3n_9)!2OSU1@aJufb#1&d!kZw6Ea}%MW+ZSkT{=oXg5FY~vGJo-nl3I1Fs22K+ z*1=u>zS&dkZ`S4@WpY0nKzVMtI7jk*GU{Dm6x)T~3pFpH`HChH=)mPbFdomnc{N4z5QfO%rBRRjgnN)3YbFWnf0R*B z_s56ETiFgl=~OjNS90n#Jug?=zp(41HPQgvwg&p~3`-p?ht(^}Vb{7ZBA*x<{OkQ2 z;#LXPEgKnsOD()&+&5i7t>QFOwraVkzc+4Og}$B-FyKwNFfozCiAm?f0I}iTnP0+g z!?xV!S&nP>vPib}NLFs$;#`4u-}eH9UXZMVYSkwI#|4z~U&095=eTH@7)Jp?lGAP= z10j*#SoAZ9-v>vP#CikyJg+jL>#yd{E4gD>lw%lA>P54?&~tyJhgDZ_0Zxe?Z%#27 zkAC#xP|g|}h`G3nk)Ds9nn8kY`_IC(Z~zd79lx3hn~~72T#SdrDl`!Z1eNnO5P)4| zmgwG95*G{m5DbJZpNEc)nz@TXJwL_nj|&PLkc&s#EH;!{6$&B#iKz8 z5;AA{$0Ca&{oZkeeipMD1J^|%IsK$i7M0jRu8vD{M7{L0dyHH~d0KNi4@D{=l{!j{ zJDz)bf}t@&A3aNpb+Ev6ma)y8dA>M*w=c*mRo)S%&mf1x$eTZa9O?XdFMg}+A4Gt4^Jtc#i(3+ zF;m}dwi~p}<3g}X0G42ibOeJ(3<^cT3luqRqM24py??ZVzP~u5b;#CGOUm6`zYN`S zk&0RvTlzH?>FU;VvAURQ-|kH**@{$9GAv{D$5d#b{PgR+)Z~>Mq07rJDA<>0Ybb6F zy!1siV0%$I%zu8_TTkh54?f|1$M3iWFW5M^@w07ZQAaq6PsfXWl9yx_-8iJ(a8m6+N@P;>k*4tzK%W>WEUcdjoJ7vmMwQ}wV z4}ZGx5-mN94tPnRhH#31eIqhfzVBdaf4{(M_59Qi4Mq^t0=@bv=pJDl-;I-<+8TF4 zc74hesh~+g!9y43lbaR%sh2vdxY(u&y1(%y#4P`}Rj`;XP;CB@RbI{hxLba%)3124 zR(@n&F^8KTPTx>@;oUwJ5PS-|q^Z*3fSm+`3nb>|TZK^jHXuU@cD$*z#i}A|AOLYA zZ$zLu=2po_47v$8COH@R?&$fB#nflgQaouhY;&nBD-9o1zAej|SV^xzH3}7Q)fqbrf=VKMc z6s5!dpZ`x)M@fA)ar2G3r7*_tMhCJNSWaU-^|LK&{8{#?&qk&7zk@9frpz z<`Oe}MbabK z$T#P;-_12a999;ubch4oAa#a%D=XG<;Mx!~ClB#X%DoMRgi2W(D#8xYu7e29F|DFt z$60H)@-*XZqithK$96#`W( zwKgNqNOEEOp_%S?r*I?U)22gh0*5v&cIUkZxTY^pH^(Jg`6mSUaA&YP4;Rktk z`xu|ygUN+v7dml$Wl)vQYTs^!$#sp9oLhvL_!O~>f;H7912)J zZg^N1p|N!)*aB54tGPKjqF68|}jQX2jSK-IDg~xF%;4FF&M~8==;vk>Zq? zBKSUDMreD)NE{$QaB83k6q78mF?G|E>j4Za2eXsz;RRK?9! zo_w{48!j&k60lXghqcSk2YH?(hC^J308Nsa2Xz-fSR}}yHEpCBI>Fkc(78Xlb8xGG z|F6i`)f|0xz_v+WQBY_P@ALNq0?$pk%($?q2V+d*Y{Mh$bK@;yB|Zq3AkTmkNRe(M z4zvu}2ZHloEk?Lcwcxj;9b|c%-mX&{8CdSg&Sq;^%^yA#ytN-4boZpNmcPj=7M`|= zms%Ebcn++EA6*+fGpWmv?(MTu_Ioa!V7V6peP)B*_9C-htJNg-tsivNLPyTKCuExk z(WCSOqO7$ctiUhBXdE{XmwboYjum?9e!Y#F#&Aan5xb56jNYKH5cYN~s*fvHfd16` zs&|;c?T7sy0XKqhRiTO1Fn|X$JD6^wglB)Sw$>>S`{k*xJd1nvxnaAUFid2kg;{vx9aaWk)vdv{b@TxOwELvS7Kiy3dRO zQ|>mJ{Lm>JiQMc`%0y}B?T+>0UMfkhcX%C6-_9*!3Rfu#aAcF#xLjUTlwjH~V|(_V$GgfQ0@eH2J3Jf^Kxhg!t|t7|UY`qq^I z-AV}PtMw%6p=zj*RS*PdFtwf9G*N$AvFUv}(F>uWyyG*?+MS-|GIDB~N7`2veG!wM z>tj~y7nW@jQRf{q%bn3jY7+si=fzcKW-Udr;S!pmej8$nPRNKG?$n|pO{O-L8#Q#0 zce{4cb$^sId8?8w^qnGm>eQ5aQrp=vW67*nn#NfM%35?4wN~lLQ_rhN@~qFM*x6FS zBWLo_jex$LN6%8GSk@5}wzuGct0^6n6tGqST=G}?^QsZM%!dDGrAbEbLeJmHy4CmgNaK}Gp7 zR>vgLtC*OyW})Ys$9zp_I;-1hmF0r^B7u)SxPvE|Dr{N$A%^)cX9l;HeY^sF^c~N8 zbU%op-Hm`RYnD1tAoS2Sm(?4;WG2?YXN*D1C&&DZUrY{BEcF~^&r~7|3y;v}3N|}Y z^=NxOQTTl#MII{nY$gq|Sn?@#@6>TfPfN7DH}D;Q9h!X5$uTtq@ILAZ&Yn4tne^-y zNhxDuGSkR7Iqv?&wGn<3`3pRQjsEByzMT%YSEe|O?FB3|Q*6I~BzOpb9bu4`s~;2u zBPp+AcRlCpRedJ@&g`$AGSPLML{@7md6l?P7*yoA^kT`abw64J@_QObD54HUQ#u)S zm-=~MnC`VtsGNZml?kq2w;_;qSJix)-ln-GtST~JNX~^gZ!1k`;Sx*p7B?-M$gaNU zZ34Fj6L$wQmsFwZN~{$jVTibctT4BzzQC6>is&$QOZf^wa4I8ELJ;_3g!bTU!fmqN z>$pR2BXocFO@rW++S(cHh}W^;afD6RM7H?qh?cI8Y{B7JAnfVI1adSH72(KvPC&)j zY*_jtpDXd!iT>_E#SyvAB77m#j&;PftOV)T;#%4ou2wRq#wdJmQ}VSMrHL79?>j%b zSI??OVheP^In5n4Bz@P+y;xa+M{eGa3URsAa!C-}ihAJ`@U){mS8Fkrty#as1z0K7*O5BMQnAXsMu+>@5G*c+%t=@PNmfAGnotS^M7FaK6iWhqa7p z0p1WfQ*|+_Ymf0f&Z$yJvjSW+4aD+?C4+5lq@zkjmV!0}OZ)EYmpDmW3$4LlQ7OJi zdFTk)BDidE$Z(6vx|E68?Vp%6lqoM*-EfC$fw;042=t}#)qvm}zvAC5S)GG`wg#YX zE2xjPlM;ec4MC96dtw&BiKEWeWTt~Pq3)_KDhc#TO#4oTPXb6$Xhdn|b)F29KndXU z40e;PEu@LKM{FQwD)qagA;n2&*+nOIAb`E!`M-TBxRN5uoWpM?hmIIpK?IfbP@HKZ z52Ek3pGt62 zCyXr7m9I2(2;DP+RF93cAYq8j?aBva4#;RykTs$aSozhfryf50B{M&56leOuy0-xE z7E1rxhN;EpRs|OTXVJdDzrS;r5;5+{O(l59KiLIofeRksTng-k;z#Qy08LMcFvewx z;PyJg=Nd%mmmo2QV#P-HHd7QWk1Ikn?-aZ&aWs)@&Jt4oiNGc z8W??|EhM!`@aLizC_Az$GZrqR=$Q8{W9tGTP0rWx(z#CZub8&d89k?W-=Tk<(|ZaT zxXm1xJ)_sy()rC`eUV(K(F5*3#D$rjuj4tInW!lvaf3X=+NWSj*aCS6_YII*ro+;+ zJJJ*H0)3c4sKw7!F9@Ff2!+eW#E%3rrAnLwGBxyr21d}4=y8DeR%*}q8g_~w^U($W zK2$D6cs#0zlk(j@^vzGAucefKoKVC67BLVBZ(0jR&{F>iUhnm5<%{iLRN|X}lenQB zd{Xfh9(KunjaRqO;)(QEC>HGsc;2k}lQs}P!${Wy-YsTgM;PRC!D`#i0dS`=-df(G zx@cgec#;L#13A{vZ53{T_6TK^l$4?cI=Oz`vC3D0T#HUMZ{mosYvXM{(lQQUXGxnr zs#6^)LstpxGAwska*z;KD#TXFf+RtF-Sq% z>p=xJVYSwGf=CSje!ZLCheqw4$izgH3Ml6OynMXc+iy0ml=0hklEKe}AK$2q3J!tDX&m@A;B`f{&4d}n$O zt~{u=?(YeDd`gNh>HSO8R|7SwJ9$(wgC^#o(grLydbKI#CWSo0E$GJPY{KOQYp^xw z8R}XaE~2nDMrg%+v55K15Nr~#HiMb@%o_3njAO=y7ZXFG-?_x|8AzBi3;}t-p?aGt zXtRO5tcar9h?^|_4$D+h z?#r~r6LGi#6gO2K?&;#+0)vk1Ub%3pU5RjUGGq@Ok5owyJTjy{IF_{M`Wqt4ZLs19 z%R4Iz)Q)|wXhkOP+YJNUsShh?_B4jIdZI7p7tv1-x|VIr8&i+cy=fAwTV)pk>ZWA*QltzuwAy-v3$Qy}=R5lEi9dwv=Qt3(H%*jfJ&!DZAJ3Vhmug@M zwi=X)@04{(o+PcIXY`(*OgYsO;F?xg&Us|kIGh%l1`SOJN(&wpAMPKHAEqB>6@Dy? zRMcNdA7YCVK-(`JUP`TDG?X6C!uEnht;G%#y5Dt+>M0X4B{aP>E?jt04?%ucwjNua zJNAB zix-MiK=l)%&EU!e@2n`tFVDr7F!P5p&+{<2&W$z{5OA609}yuMZM=;;xv-Sd_;nD^ z6lc1)I6K6+^(33CSU{DPFYlR~NH5?4CAE%jx}Q2XS_Lb{Km#gPA~kxBSK-Hvz95&L z@MdS*(dM|~7=otv4rt1$2b}$=?!d}O5n#=;O%g{^+};-U@?iQ_T~E$#6nBh{`ADmH z9^Hbpkz;87b7n3ZU%+k1EvS^CWRRhT*rK{4=q-O(U1;1nIk5mSNoY2`=1xN0u>9LW z!(cDry@_s6m};-2$Rg3X-Cf3dZI@TPdKUoTa&r zg^awU!hePd>zEsxIpdT6jllm1!T%dW@lSx6j^PL5&G4Vm{Qm&q)Bt8yd}>BIW_$)Z zx(_y-?&H%xT)3#6j+wC@pM{|r5TEYvZhW?Sf5389Hu}HCKP~-Jre>gL{woORSp5JR z8ybDE=O2gpE6Ljd%@y%k|Jnaf;m5y)KYjg}T0n-6;Qs>x|AWf^%Ygq^V*WXvy{_F~ z+FjuTGyhZfSLN5S1^$KZ|1U{0wz0JnFw(L4HxCjz|52u=|EEj;a{%!F4w2m7QwUjn zJg~sn!VsTU$=HI=!q)g-;cu;yvA&&=?SJs~f4cMkwlOpRSsK9nH!&;>e=g5|H&)g^ z;QxPV0KlKfKN}m0YT(@TL>$gOQl+`-S;fL%7c^T z#Ky0`o;|Wa4iCQm$id&IhJa+CQMug>q)m4=gyaVXAZ~XwUq+YE9LBe0bYZ3Q78>@;rX6^mmPf88b7WoH%+km zOQHPr;s?O}JAr)`bZEF7&ViGO;CG`@g11>JKr40TLsD!A)iX5uM);KS9&^aaOfN9G8yIu>?|s_ z;MN_qCm3|A1gVNgJ%{m!lT7Q?PRwk0NXPx2^Hv=3^Ih{}GKURot%}L0QS|oZRlq5sC9Ij&?&BQ?Bf@DukrkUNUpepg~Zjf9G3E$J=CPqdbNrjfftSTmQ*5 zXFKbp!9Cgp89&Eby_-_PiJsPKB}lVpH>W{CF#`q zT&6HqRly$k9c+0B71^lB^1^`izf?%yapW^}O6t@rgWf2Bgp5ZCqh94i_bkIn zW+Lg=_j}(@L5fL4Xn_W?DFWCKATle|d(r%Q!6}JyM53?E5FPom6ai=I!<(|WVg)Cs zX~F5j*orE#kTgBo%7XLytIrvLqdB_=4G!%%Y+t9j+{oDo#-*TP3@k7`!cpQCiwu&8 z9@)c##u9jQvv<6TtqSVd7O{Xa`ZMW=^H&+3A{-K)sOiEuo8-aoA4*R(ChLsxO*Umx zKj2(&r*A}1RFj2fzqFNP3d@8I4g{yp9tV{TR8-k9bb7NEf#@oQx#>ntrH~a;qTnRK z;+=&eBf-c)6oEig4{MIT7Xbyle-o3VQT1L00il=wG&9zkS)Y`45W}ubq6~9Ugyd=9 z?21RQIj28C8|3EP7w@jw-bdi~L#_k(3)@KBeAi;mr3Z;2%zMAaqN$$;-Sg|rG^ za!It8TmFJjEexh0Tn4n9^0f)!)w}ay+RQIoM!WaWWINGh8|VCoVCE`?8o2qX%=(}q z4?~M)$>_{Vh8QM=Zd14wAmP}#E_c^B^*ejo6RAKyfOFASvrrFq3Tt+}yEl92*4d5p zJdSd_L^Yd4i~%?b4C#Sv>%JnG`V?haqkTwpY2UKJd&>)L!URQc%xdp3_ddojT+*FG zM3Mt5a2_RINB}o;Q^^zS$&+C6yPbKF(8rIlEo%ZJV;b|G_Mjc`^AM^OYHsE}&Y+dI zm3wPsQk`hh%R=jpF}8ECt|~hso!9Yeb@DBnSNY`=jCp`ni}V%&w~<2JNx*rFvBp7} z(P%C-RC=cCj$@KLmnhlz4aOr63$@%phr}UaZgFmgt=DfKd> z8Rtr>`oDNMawjeBAT)lUbH|=vwniJ7<=j7gOJ_`t#kxZ4+d7#{4$lG?BW%w9F@&fB z8%{D$S5!y?l>W>0gC+wU7lRMr%hoY;1Ky`4?=UUg#B4M%9K%Xo}@JkKI3~D7pO_ zh|pdFg<{_dP6V+kv#CANO(wONB9gSYRV%)W4FN%u4TBk9CUGM)(Rw^xtn+j}rB^-L zkGIToKhe;$cGtEJJ!{2x4L~XoKWmxPZr=Y|cb*Noqn)VzPJFrEtV9T}J|ao#maAsd{=EBtzdRztxH##=Al$mec2_+gv=WNa5ETmj*a6oXkSg0csXHOMQ%2hKk zc)YN+n{#4LzgewOt*&Rzgjb)9suzlMEmVj^&0^aOpTi%@UsXlb;_hYo%zliya+{{^%kSPU_xK>VmO4PjM=KW-3pt z=t}&pNx?|48U>fG&%OEKTuDlqJVJoALFS_M*s_I6jERJQLd6$(3NM=SFb-mp|}VtSR`EyRi-CN z?fTGY5p}mh^uy>4(rsM~_U-tfdBCVYepT0+%l#uBr2}HX;_428!!e71O}=$qpZt5? zY3^bE;nF*tTd`$Lw_2xGuXK-AlPH@qpPYLJ5|4oQ3;s}PtofmD4Qc#kh4ApWh{V}s zDOo1tBee`0B?{KBT)K)b^!+NHqdXg=fH@)OC3$Bx%w+kW_> zwtSngcJJVcm&at=nPgO&DM2m`jQgj~*}Cs$IOC#@&p@&RLO%{J#H#v1ADjL{e{T_vKRj94mhUjD@j2Jy@vEZ2?Y*bC=(5~|d%94nAoWIUkZ?W; zyufuBvgZh+#xl_Pg0BR;oFoQqA|Og7QE2wbakEqefT;Ou$~3lXsRb>+O61t8^{-N7 z*vavg)H85gZ@f4N8)H;D4+5hS{BNvZZ)HTtpCZN31&_rOe?)B1E8eKP?N z=&0sFY*Bn@KSIiDYd!jKfq67)Y}M@{G#et=>gZ|}GR*YFwco(ek9@cg(>NIds+L4b z*IZg8_sxV|;I7EKoG_x30Q+8~e(Z9$AcZHn%gVho$sS*H;ef23iq0|PRRol|*SzI)e4MbZ-HW*GFcp%sj*&c|n9>M5dXRwBa6!eqoL_#nW6Zf6(*bb2awzGug8w=i+gJ)q! zgG#=t&Q(hIh~CwhP4I^GR`p(VWtj$fm5r78cH85%uuWf$RV~--aixbU7Mh3vTlQKE zNy!-X5Xe!N*>nwHIzZUmx}I}L!7ysR*LEY<4(h76I#*3o(VSz5b+k$=RHGq0HylBF zxU_b#zL*@NrM}ZS=#ipK%W`(5<5EI?F?~ln?(5pu1jOW(TCZL&1~2QUDGo6BT1MsK zaVg_LhD02)#LlYQ)^OqmqL7ZzRK)q5I@qDkdrfv$84P4*zayd0UKwKsklQL1VMS!} zG{k^D8EVdM=kR&M&MM2Qg+piXv4~{&Dbrcp3L?}dIh#6K321d;@pt{XSf9E~b9G%5 zq(YYsS#V^!8t8_(GfvR1Z^xgLBIELWs0 zq#fd5ll%ee)FJw=SqVBWUB3CT|A~xfXQp<>GX<{-N3AhEqo|*xnuAE#lH*-qPt{jC zRb|P0x#72sHY=?ZvMN>HkOWgjDSM{6ojbC+#vJYsFu0QvPl0?N?F4(Zo&w(=%Yz)> zbA!X6YPe+yb|O+xKr(&M`bN_lyn>_ z0xgJ=i$82$Y-F938~?+w;}GhEmvnMANGUAMx@iK&>6hEw9I=HCn8>$-sko=~(1^K7 zVUx!VObiZfjy_az?iH13bCq6Y52~ffr|Wu2phf~_`Qg(~&rhC*n%O53_hWfc<+U#f=p00gdpmeM9TKyw?y#-f3I z5au`aR_kN*waNqLhNDK(4arMX*VVD2>88`$u95zo-aN!o~?gsF|FT7VDlG=t_+v09H9~REv{u5+fPP zbvrbe!ryMrs6mC-J1lZE9VaDaOdb+yaOW6*_jbR|Pryu?9j;j}D7R3n-|L244WBaP z-+^s7{Sw%Gv#uqFq!H6|_de{PPz@%oru4r3&4`-SX8P^Y3KO;X={Z-bbJ z`IfszOYzL)5a7W0i=@T`dslUYPQC^mV>P`4{gtL)8n+)HP2A+P`eFy0F{{+G_z5NF zEB>ko?c8bimk` z67NMtbJq(4ybnz-mHcDRVtqft#f%Rnn@!G{PQAs_%@8z>j5U^}#AerSWPAE_GJsqJ zC*H^lK1wFDjhAWNQE)abypOYfvrkWOc-{G`I&PP3 z-!BFRQy4U%57ymQv(Q^|RTAeaht^)Gj>xVX+hf{Va0;0uIL3__*6quLO3fkrK0;ym zThC!sbe*H~)W&;Suvvr-7#R#ixAVmMe-#Wt?sIf>k<0QaS7+bbfeH|7*malNjxJ)5 z-L3Y{$sIq@T-@|3z{)$_lp0eG+1eae4pD2)T`7VLWJiVSXDkJjFZV@JtE`djdhSTN zK}G6oU60-5P4A=$c&~R4oZ|~IT&)gN_wuQ8HZ&Sdw`2vjs);^fDw#Qn!iKo zjL#J$2+vg8%7m0$b=|d2*wr*qeh$RWJOCIS}u2rE;aj!+R57diZwa zl*ee+&xtSiPE<0+^<6w?$HA#vsO}ilfdI^zCTPem1^rP+hrNXmo^@Te@0&U8CgV(BHr_ zQGV7k((Hhde|_x1mga3Kv=_ykW8QYNT(E);=@%9GLF2akrY;5*p-Xy-XS=VqCv;Y6 zjVu@hQ@<&UbTjmYsAnY)K{4?0gbn?jlb!m4?zjgao@dGBgV2()A_6#d$!yvq-L<>J zyuh5L+(o^nB+&7V&LL@y+lhNPN-wQ!>#i>>rc(JP)t&E}4`D7`6MHv@_i3Dak&BUu z(MHa+j0NCB?c;a8KF40E0bbYp_1Y8p9EW#qKHd$^lAGvWWB+Z%5ig?rOM$4J7fuK{ z>lMj;J*31kNBrp(VC!)HLU81YLJCv2`p!rO1zt@2*?T;_a`&0Rl}NqfSlMFOAx9>t z8H!UXB4M4(C+qM8GozXPjU7A>D+Xjr(dpSRY$hCafF{&0>J)8cZSRNN>P)8x$mRlz zvk#|A6*!FqjAViqm>ZD85_*(q^n;e#X3C|Od{(-A+=(9#m8doEQQ(R-xli`4jG}ce zd#s=*2o1hNOQ(!lv@l}pWYljr^DjMo1#Pasj^0~otz-2%NsQZU;T(T=*(TK%A3U-* z3rE{Rw;3PqgmWs|M7GR1pxo(yQ!==1j5uLZgpE7`2V?XuO0<(N<@lmZ9p(i99tF2v zMlrUvdx*Y6I*>d#8X}5|)Sql0LWoi!?dzAB)NBCfg?oS?B)$m$%767VhNUO_tO(d_*8JGv6v`?k$Q;@_y07ft(}41C2$%%DWg{Q2zc+TMmjA zTuIAXOZT{W!|6jwWQ$@9mm4YkUM2$$L_KCj{7#Vx?4eZztIF@-%6ko0-#17_6an zW7K6k8pHJk>T^m?ioLj6XJ+jP0i*#(QVw~AI(u{@4bO+e1zvKxm)FI!r24UV-Hdig zxHc@UFLK%BnB^+R)-zXqeM)nM3LB)&<1~wI^zEQNRaI@9{jbJ(Ybuoz~|8I zwXBV?&9;nd>jJ{`o4{NsoH@1QdVFw9_`CTBcxUa%N%NR(RWdZXC0YN0+toB;UVNdK{oArg;6$c%u=Jc+dhB~fJ63rrOrb5WZs~iS{PG@uAnnrko z=bxL)2em`HJ#U(49;I=ZLj?ojtpHxr1UOU04EBECkuaPLM%L>(t0 zmXv=7N?H~Yp+ryW_5hNxet#p(+m}IK$qkOR6*fna-%fQ*4EQ7#aOUID{xsYVesz>0 zoKal@6-O%+r+UTeSE~Qj3teM4Vq3;>SjO(0s2Ptan@m+wnJAASMzM_|Pjh>@QZ}GN zfx#hn(A4OHJG&9|rD1iAX56rSLzJHTJXumsYG(Qrs?4kgyOsc$BVT~0kH3lEYs}2D zz)VOns|s{Zl+=9*CYIv}Fm5!cY3(oC5vpZUy)W;_!QnhtnSuqz8-ep~%y4j*SRKQ$ z_DWLE<96G#k@k4XZ)6$XAGX;VPD#kLM@D*2@69ba!_-0+_0u763}okeUGp>hXQ*rY z)5X|k7fe4M#2&x@_IVSLc$x+KzS+)%OUJro%_8Db&UP|lhYRyOJQ9;<_QWD$mXp3Y zeoq~@VjhV5kL{Zubx)Q)x`zc31-u83qzFa%LR5m0)^Ayfh41`vc+cL0SGP*p>GVrZ z#gb`tm1f$`dq-Fa5V$< zaE=qD5&}iMX$GG^*)$uMbT*L%JqmdfAfU@dZ|LR@1wYmKo=(5P2APK)w7=SSQ*GX@ zNcI@iZj@%2>J3I3FW>N|x2#i+*vi`YANdrt65uyjJZ6k57Ky5p5Q33UANMS@Ip410 z7WaK)TuH@%LG!uOyqhqYC`Svk7QZ70-mtl}`F7(0`e@po^wd{}+q4&-$r%6f{Y<|A ze@Xu+X{O6r%}Rgs;2Wom{c4^;E}W@7!m&0`W6X+vkmjH~Mp(=6;YUPbmpNk75c7r6 zmu!n~+bxG-`#DfKUIxV7*Qi`+ZkbT2?-lIel`T!E%DiP}Pga+qmBCw_h@+LaV=4~z z&a^$7B8?q-L=n|^t>@edvPbICf7;>!4e?g0jr2TfHnz;~6AnHs%SsH%6T_yl<5=~^ z78a}9{S=JvXE)|fv3LTd%{V8!aBW-2!VZ$198b$C(MV2GshY+G3_D2EMtYJE5m8T& zH`*CD!BnQ=RSnhiISW&w7)XZNP9szO#0`@R8!0x!*|;*`>*K(8>KsN-@cYOnE;CZ7DN13y&PuTP;$LZ`1ODn2OW1YWB7_M+B z!!cspZA5DExGZdR`6F4!_to)>U|4molkaJ8AUv4wztEXM$)l6KOkY0dM3gt5UrwEVH^Dc|)9~+$ls?}?LE%>~?K34yseH+(?qeX`bQ8I|Nikg|u^tTgP+|!` z2=XHjVRa0Bf2?}U?Jdhg^lko5Y4G^{3hZ$M^5is{gb@>bA!o=0B%_+Qg{{lGZ^wx1 z7KQtI6U48y4xCYxXN=%8CZS(J`m3hb0QZ-!ZvwuF&SvZ6T(-BkDLAmEfC7*%+j_ss z0BD4ez;*~l%7s=McDbj*HRs}Q!N}b3A6a>85INJLDKeox<1GW%LeUIP85jDNR0x(9 zz#vL%tC7#_V6lBQM7mpN3h-iP1!36~4qx@AKS%#=y$${bUVh2%+bihPqsFv~m&2k~ zW9R&zVV7m&@kTV0xe!TT0C_8*Q;c7K5`T>YMc6J>{_o>Cj@L!*V6k-x4rvUx^2+- z+wer~3ZN)Jh!AQv?2h2ny7Gw)#=ZQbY3jSJsrvn^yLzVN-(iQnCoW^SeQE$4(APn_4f^<^`ke&Pv}pmLX32%)+p|Q zK4Avkr}_b$S~6iDd$iUy6yYRx984+Q@21$Jk;ZXN>6hPGlN+TbHOpy3yQN2COn!nO zOR((2{i41k3wuKwB&GOnR8+hkYwXUgDiV)0!I7q{k9Azs@XeK#f51JTq4JVLVTK3B z_7!*YtyD*7IjIhu*c1#6FX+kxDt>>@9_=olJ979^z^)hKA;pqBK^_7Tq|a*ac_fw0 zv8en+f-C)kr1{HO~d2`)P6G{f2outoH~ABdq}*z6|l>Yp*}hd4EZK0+~W zGlvv|_q!9OCZ^w>TrW=YttCw);YOPjL;kSrN6H9@$3e(#crYZv#G-cuGab8yUC zE}B1EzJ2#Po^tQaw-*B7jDF}97|Vew#@2$Wr;$q^Z(fO<;-PIJ`Z+fxCfe(LDTgkd zfakWtEcvEA29CP>rts8vGZoyP8JDel*>(lS-5(rA9oE8=!L)MrbsbaqMJAjZD^w}C z_c`qzwyE~E%Wm0F(C)<0Q6bhv$K6oPN$=4v>St`LzJrO`O!Nv$Lf;xML>=*~bEgmb z6LN?#9}Exn9w5!dE1|t$uHzN3Mg;;E8eGO?-lWaHzl95D4G3<5qZcvB>0%lzUxhOn z=YNyPpX6w`zYCjuEX2llZb}h&X)3vm!iK;X?oE4U>W2kQP;l~*3Aow=?T1)3VU0;? zKg5rIurvw>EMo$I;&|iHluVUqf$AcAO?}Ir-LtBR*QC-TR0J;>arjG^Fz7=!LlP92 z7+0^|_^gvqJ%O_W=8q9jCyw(1#0vi7p$igP;c;O#x%F>1ORB@vxT-*CFj5uA;}kQC zzFqKB=ZPNVr+!rmO2>TAkY`_1!ry;qln1Mf9<5+bDz#se5;_sEBWnZkkeDJ7AGbey z@(HI^v`f}Eqzm|X@`0LSPOfdZ;>V&UPe~6eX@usouFMRy4^u^q1b*BXP=W})c}ubA z_;Y*&7BB2(qYj89SUNHlTP0+z25>*=kxGo49{Klt3f_yXVSlo%|KutHE3b~Gmi#LA z9ksOPfj@;=h9pe9V)hlb5%oueg+j-OU5k||(wrcAyN&fP>yX2d)Qy?%yt;5gj%RY0 zR0*$A={g6k;&Em(7WNB{YSQyyX>04sRTYtBY@U+cH&=;FENJ#FHppr^2Fq(tT+$<=W_A0N*Wymk5aRqwT zNZpt8N^mM4MYGWt!Z>m&+(#5QHW#h)wdA>qgjf!v)OK0+6N^oLk~mQK7;LtTts-C$ zT9q70#R3UIl1HbgOBVSudGc-I=P1#m@ZMK|`V=PK1a){kwUh~2bF-wrO7|i7qL-uw!8i^={iQj zFsMWjYEu|Na1l~=L(2DIjmhAomjInU@KuqOyFqP9X3&#dZesUG#oVBp3xrxulj_9U zj>N1!4HBaM<&>N?8Oh2lf$+nsSqAK+&a&^a4i>d7-p08Esit|gE#%^hcWMO*RYwLP z3-)gZ_&>$oNg|SZ7uEPHa3DDu2ZvEbNt4r1ZUx7qBd3>nWi5GLLh|`m zdLpr;M@{|ORdrsXR)d+V7l!VDJ0+)&DZVPwC!BB5AY>0iR}m(8-ZblNH}@{D8dBpL zvpb{6P}Im+>IWKT6)QCq(7(&quU%%kPqa*LFgdzBr&sT*6ZN?*aKXuP50!>0|^K`K?BGgubPn2dmwhPJHOhtwgERV2`* z(KcU#Kuze{9s*)N46`lFF-VZ>?R|iK@wmztxGXR1O=qgvn3Wu-u^sP1i$HJx6Awqq zW)GfKjUJ75Om|Q|FfTa!6+%#quBO!@YhRc|aL?#D0e_NZa@{mMuxls6`b`BBL?S~o zkuL*&lQ5IEnsCw(p{s%3WvDOQaN27iZKjYO!NT>_BZt_J*Cw5#2$RjNY$0K=6J=VS_FSB-bRkd1(kp5e;$b;mrc1=XH$}gS-lH={1mu?zG_1mxVTtPRh5M^P)~=!RrPG=svM>)z8@}-8{KACs#@+fYKo*8Rmosx z{)id%WxLkY40rG{f1(wJVc%b}QFoP=sK=17N*CUE5q#>MB1j-?15Uz1BdmVj;*|oT zo)i@nfcE`fPW_}IeOe|Z_8EIs7V>S_g^7lQPtv@4qgChprojbTI{}CkfAD?zX8EqW zedsveggLNF9w1lXo(M!S!nTBg_oQbpt74E|!w9msd;Zwh3WeMs5;L#~PC>6eaAFSf zir@DQ#c|i?uOU;fzJed}oe{tvji#Z$n^?v{VeBW)GDWnzcL*I;UJ(9z_@Kur@dlm~ z0p${YSbXC(_V95;6zrvfyvg*00@AHU5(*L@lOCBHTpq}ah)w#NGYtt)QxH?ox1dBI zR~))FIUK(9AV?s&Ac6x#37}|aEbYZ0HaW2%G=kv1B42j{M=xR?(<R>9am;Sh8o zh=?Av5GompiBOl-SMVz4VWfu`w|Nu$f`x7mc~3;g^!9j67)6m0_EV)KkuR*Y{>BId ze4N!lE!JxTjbncjY5lsdh*;A40Bf)a^2%Zx5a3pt0UEgN!36D%n`^%Htt8{H!qAPm zh`G;Tpn_MHX~LpKrQ3wfoIn@MqxF?iB|?1As~<loA08dgq%W)y^ZPzh=3>2uaV4>}5OWbJ72w5*3By$^w#KQ5Dx2{jzih-LIYVu?8zhd*g(@&|R2Mi0 zdB4Cz^m@5V?CVY7N^uc2(X}=*rjxmSs+Cm-1_;rn*^ABGo_~uJT%tPO_zf`v;mB3( zz#W`zX&JjQ%Bbm;1$ewLCNyy23d^qB;;dZtSFHNyuS`1b&B5S=_sv~*$`qVd9~6{p zXHBn$%ElzHUh#%wbm)dae~uROUw|`Fy;vpRe_>ZVM;JPy?5IWzegPso<4cfJT-IoA@bA8 z4tt&yc3i(jk^EyAmsHa!;6}n~*!n>}k)8cp{>CYwhV?BUzRu*-o0Wha)5jS9?=X*o z`cI!O<8=*;A7e-$s~^9OT!nX@kCpoxjmZ}Xzb@5zQ!sgFmwb8fR!c!+^0h+dr5&wl z&5TKn23O7K;Okw~NOLbut6ihg(ZMo~8A^8T1C$qnNxFgu{$5m>PB6L|Ge#r*4CDyZg^D+`{UieZ+N635q}!oiwGlY z36>!?*c2*+bhfaL1dw&?r|q@q)WE*d+&PA{FydkH4L6C9k@*1fR_Qcw9i{|k2F-${ zTH;k%zk`<1+)8-ePKse+tr$i2P0FB(PB3qcv_+Lq@tS`TdlIw1$>h!c_dWmGh4+n_ z#+NvO`%0p$2TIece2$?cQ?(V;dA$fT*zh&0gNC%Dz5{k8aT8flC1qjLdyWlJ0$bmW zD1GoNaN#3O8Y^(REyngKQebCBNit!Lw=`?DEAr;9DjH!Z>T&u`_d8rRgZXZxb2;@L z2m;xYwkq)GXEx6c<4?Zq3&p`}MZ7&F4lDJ;%>E9^2fugJi!fPj{wtW5wDe9MmN%^W@rm2_qT^2C%D=b^B{J!lLk{=|#d znb1#<>-GD&_(Y5&=jRTohfDl*)|$i~NY&qLA$hn_Z9kL?k;1!Tr*Wd`inV-o-YYNo z8cs*yvRJh~#my7m7<@=dz+f?_du_N4N(cdBXw#vKwG8R3vlAt}Jbh1?I#wDJo(~$p zG0<}HRFi>R^}g9gmX~nWig5A?E90X$)x=YK;)SK{X-n@|6I>YgC~je?6$JPBaS5m0 zfbOLWp-3qKDf{RKN~IwkX<|qq@E`q>(0Eo#N=+t)7fBJ)>lw*DB$n73>lINX`29}g z(+OX&-Z>bK#GbUf5=9wD<=3y(XSw#A(yYhvz2D5*Y$Ts9oih>TC%>lpB;LbEv;Vr% zB!RcORpzdQT1n?nJdUu(34J=@P%UmJ>~iB(l24Nn4tPt_z2_x+Am_qG;e?D?N>0~( z(|L74$;BgBt8CTDzo_$BLw~4~m7VJ^W&ShY#P6)k4-Dp6^b=N+Rg_S9qe?4eZ>DGY zFNo3)`F{bEI5^mVU=lD@#h+jj`+ozIxR}^~G~*XA=_l6n8<_O7%>N2Z`kAu>XqA5? z-T2j_{|~{We;bhh0Y~~tPyIf`M9j}h`2QG3`iUYv14qw)&!3*LBmfW|dj9+qB6^lS zgGijLtUsjBd!J>0mip_cKhE%sD*cfCz?^=bdAr~O+bV)>twiu@XhSQVIjyU>KoUO|HM38Rw2DJg|wu~2d)4fe-NuS}|DIed^2nPlD~!JKb#os^WGmr4>VauW}+WG`b{l=UO9eA7tHcgcR$F-|B^3Q zD@NA3n+QJWI<<3fPamS6`&Ci?N4mS5d_Is43egl0`!6JQjKn!__Q4F#znbvT**r=U zdP3cHj3d79xb>Bh)|^L)V7nzs!5CY7*xk1>i4UYZsi6O`&0kN|h`4F;GHD1`Go3l= zf(%5kl@;ZlsROuF3!Z#e%0!wmEcW{`oES~)Yx9YZA7BiF%K0#scaGF61o~zZUo0#D z#K~}D6AkGqVNl=6Q6r=|s4KMlsCyviR0{+H=SN!m`BPFgMfOO2{CeN_v4_is$d#AH zBdeiL{YDbAs9z1|Ke6$@0n5MnGtdh{LWv#X z92!!28!q^~(51qplo~F&&?D+E_yhx$e5?J5lUl=Hea`Y1qDv|W2LlIBDyCE=!UXY9 zL-F|+9sPNs1oi>&*IbD8Um_4N0n?w+{J*vs=*@sSCJx0vJet7?{NSHe|2W|vfrRBR zx|{XaK=S{V)?cH?A5%WoUv1-m&H(=ZKac(~IDe_^ zFOUA!zW-nM=pP31caQ#|^`Anqzosew3y%f>I9XZ#S6=^}XLtB%XMf`G&l$}e zeuNEaGs9=P`iGDIF$)y7ae7X2_RF=0_18cs49qGIWax>g4D`(GZHSl|nAsSBR5O*S zqoXZ1BQR+jP-vhhe?OLmi}@ez zW8q?XCfa|igN2KO{qJJ{l>Os;tW2!`=o>2&`#A@>VDj?nyD9ySPK6Z?Dq6n}{0 zZ}~x;Kd)1>`->{PgoTE_O|e^JbGKV79##$dm-Cj zwevI;vi}8h%9n8cEe|NXepw#0vafK_F6*- zfSsVU=jqcyY#SgbI3yy>*>;!GQCBx5?k@mOZb%f`2&sE|g`c)`a@zZ|*1x^i;lE$h z!F4)8P`~ziU4KvihaeGepKvcgWhapPq<6SiFo3JTz2tQ|4D^}345X_>N1R@V-vU@X z40I5{#C6#BPk3n^KKT>o;Q5*7kh2vausj${0Z-2`UkDP=1nEjqUcR6nfjt1;8shCA z0^k?`TLcG(dIR`5fHgs$(*Py{c*n_qQIGGxfIU4={;Jc{)AtwrCoZ5QXn41O_~{_e zvp@g%?|cLYMFM*Lywt%{z%Oi%Gk61B5yOF@4!_`#llvY2O7{oY{RR6)?m7Gy>=kYW zX!&PZZ_nKhf59O^``5?&XPu|~ZPpp-C+r_#cjzzJC*1nrUvOBc^RKo}g;`ntmF^h^ zpud%k3_ScR-OtDFSN*Ymhg|;36Xkzo|6g!;;K5)0iS@Jkl^z!9{Hy;yA-22z%6}@% z<`?e4;UM*I?fUuK{VE&bcW8a=f8yfh`SUv`q0>+h_IG))f(+Asq_U5#FmVNpM;`>AAr zKH!6dA#q3sQiN0>O-LKs1{p%8&`!u2vWFa@Ly#+U9P)zvpkOEriiYB$^H4IB4qbz8 zKzR@rDu&9S$IvsV7HWi=p$_N`)B_DbV-OLVhQ2{$hzijW2n07m5Fv(;K_C%o2ra}m z#14c7!Wyv`;f!!aoIv;@LJ*ONc*F%nIwBj9hbTfkKvW>=5Y32Jh+f19ViGZn_>Nd( zV`md!6K7LkQ)km<+rehZ=D>D{&4bOC?F?Hi+eNmkY&Y5NvOQv}VQXf4!#2Q1Wc$Xp z%nq^hvrDpXWY=cj!EVLw$nM5|iam@yfjx~qhy5=56ZU%cPWFCwBKsWsDhC&b7>5#v zHis$49*)BtUL0pQ5;!tA@;J&kYB<_B`Z*>!$Q&@IAg4U17N-fP9p_O_U(RUGWX>GU z`<&IB?VJOg)0|W;E-pzf4K5?DJzTC_0bFrh8C-X`a9l6Bdby^!mbtmPWw^Dt&AA=9 zJ-H*eleuqkKjOx7_i;~i(|GuKkUZOXY^9Lo#v(UiSTLgne!du3*bA?ca!f4UpwDNz90Pj{L1{s{0I2``4jnX z@;~M8;-BPS6%ZBJEMO&YOdwL=sz9m0OMwxAAA*8{>Vi83-2@{9uL?d8Y!#dkqzQ=$ z=?K{gc?)5L@`Y-I`h*sQd4<)4ErpK@#|q~NR| z8sc{10pjW6kHz1MlO;qY3?z<7#7N{zypSMDvP-H<+DQgUW=cMj9F$y>QjoHg@{vlF zdLq>)wIVGiy;IspI!ziUJs`a%qaxo4=X1rh->F+q}(dTmH5W`hxm4 z`WXG^`V<2#10RC|gW>IB+xKlx+1|39X=r2^X86Q#)=1UpgwbuIp&jBooOWdF=rZOs z-ert2ZZuvsF*J!Vd1kU`s%;u*`q1=?nTDB~ISPTT$7TGaZmb)NNv zjk1leO}P!(cDrqy?aMu!dmQ#;?HRIDu=BEeU`Mt$vOj0v<{;p3&>_!(w0HB~u)X#B z*!J1)yT0$^ezpA}`)eH`M?1&sj^j=mPG_7N4sacCI*@=;l#_e{*?Y`TxyS4ib z_vvHXk0l=)^w98#@pyGy_IS|o7ieL$C%WW9X;=QE}yhJdHW>U%gQU) z>znsZ??1d}eawBbeP&LXpUOV<#n;^Ty6>zX$}h)n&fm&E&z};oCjc9;66hFM8psNA z3Bm>Q1fK}53lR+o3~38h42=oxJ*{;*`84s2$(b8x$YBm)_rnq49^ua;#DRhIHc~V4 zQsiWmMO1ziE&51wWsFEnNX*-_o6n}6or$%Hy%)z3=M~oyzcK!N{Ny>yb43Y=gp&y^ z7!}M#%ygn{V%d4V^MU8zC2dX0PFlL)a-sgB!o|dkQtvOuAaL3KGQI>;2PJpkZXfksI0PVk?h#) zsq6c$*ZiUMN7^4tH_$g;a4ZII8$_$&r%IdpqtuxG#G@{XVlaq?B0Z zT-Nr$=)r@B@((l1*~%l!XCEDV)bn`vZ}`5Z+o=0_k8w|ZuUT(XAF8jpe|LY!2fGh%2KEp14jvvH9y&He9QGdm zG7>aG8I2mHk0pNO{FpYrVLWF-e&Q}+69GpwB)*v3J^7Y&kTf>sHTCUN_$S8n#m|DD zb7qh;558>s(l~1~+w;}^>-4wN-{^D6^CI)N7c>{D7I!YbB_AbEQ^F{$?-@U2ev~d5 zEVV2$GQ>=TBQ;-78`{ z1^k-trNDP^g&;9tL-4u(X;=Tu@zZDe8w5>K z!L6PMLBcyBXx%hAv=d^}*Z*~cl=bURdwm_^+5pO%d^2h-nb-nMoPTxwY0HDezxw^J z-vAp>`}+0&d$Y!%i@-fvzu3Tkb~ZLPb`ExMfvJD};^GFgH@Lto{qy4adGW4aU{d|t zg;+1e&dv$`^Yd`={FVHVzggqJ^vYnpghY5a<$0Re5y}vo2!dS%!D>U?1vKUSvxh&s z4<4YO9GqO-JiL7T8z43WJ3AW(J0~Xxa7Gd5*4;}E5l+#KdX`*b2Rylz&xmh3pH;-8 zva6z5!nv2Ms(&&piI-1ON?Jx%O$scG5Q|G1Hpd-GP_-Qtpa_e;wjJbYGJRb5m2yso~b zwXMCQv+LFCzWxsbgG0k3qm!hmPt%`gzRZ55eE+euOkJU^t^ptSPjtZZPmKNrJ|X}g zHVzJU4(@e)5Ny%w$ck`qZq(xvwLHM>c}7fm+j$=GU0FpH&Acl5&SZ&`VZD5kss@v4 zly#JTV)Tz8l=Q!a(JzF4;lmn*1lSQ^z}Q6~6a>SXchjKX_iq~fqddUs`OP!`D6M|y z`%|u1z5fSz2G@ok#lb?7EGWq>p&c*3j2n4FO`=r8H%Q&W&(!80nP@ZnHfCArs3vn+ zH7f|l&$A#AG^SaU1$`+Az|i)8#W1eyME1^0tX`liv!LcvEQojuU2B_mf-E)HP4TrK zWLSnJ@2SeaIny6n!aw9Km9=Z}>LjAG5ARq8-A&W{U*C8&ObD8$O<;&s!o2S&&O^k` zc@NUqJN`DQ96OA-?$;-(gxzjb~v(<9|u z#+4qkpj;<|k!Nif(m%YO1N8Xl|3naAjs;h(t6^ZE?Px((S;!jrVnwLkCQ38FY53 zs>a$hRoQOPp)x=9C<|yVSSR#5A*{v!gF8XingwyoV45}WF^jn8o);M7S-#=d*S&-r)%{T=f9IYw;i{sW3jd;O_$(mY@Hp&aSL%Ipc55HF#_mKl0Zj!476 z)qWOq;U;EUA1#IX_5urEzn6f6-y_K}i|xWdy2HqrmkLOtP3{9Ej~}&uugj0XzQX*u zt_{Yi!8_jTSD_TE5Hw#F#2EmTf^YnO@Z1IuQ_;NI@y8;o0{tQh!hwTX)mL6!Hs)(* z$KTizwh5_~m31wUiNu^F(6psqBKbgvH(H1%jH7Gu=?7b#zHM#4iw@8m9-m09-eF`z zsIN>u%eA;AJ8E9B8V8F>glJ71s^Ko8$cPquX{H79W2njx+52 zBA>Mr*?;5~KxP4;u`du^8TEjEbQU9}E!S+azcAvK&Z(zO)pa>E8`bYhRnKeFNT-Hh z!V88@!m2LSm}WB+@d4&pC_|hDy@XY1DRixguSO>5OQ9Wi;?zq|4eqx05@jpu)~dQt zpf`)2ZeT$iZlE0#hBtV lpDEb#{vJfUqT^P_=4&J|~V9r*%^L53T`{ET@U;ZDW z->OgVkCC8Hf)1W!=5sUT9)K!deL#6Tj%0R@7U09_Rx#(_KPa29R(L#6ed9`1;Fbet zH7?~R?hk{sx(pCZ-GmQ#a@Y_ zvG*1Kr)<~T%|GQd+*lKTbIrzvTVf4C!j7iHe2yUZO(5N2kqRg(zOS^$Lg(mlEU%xD zRP2=}#U@85vY)xC_c}* z&PiES=N_!DrQU40uj$TARXsv|gc>e}h4?`q-4iA-8y+`pV8~Fu&B9xnc2xF7elw03 zd9nwVK5qTpgDK*z69)v{2uD+4L32P^akMkJJ3{*FRPdihy}Qa_igib{Eh@Yr=a1c`s(9frP!pz<>(!XfNZviU>(7N3_CKYwN8) zacnaSzgZNjUH`z?$8#_dbnfs##5?}I&j+i^FVAn8SMe_`IK?Y}rtpov-b+5{l}4i0 z>H-aj6So7VSyt;dx`tk%LK+QAa1QD^@fJ$1)t4h4@hX zD3Uq>$m$f5u=z`^(K8y(wyBCtNL!eS(m!@|^X7Q|D1KjpJ|mcBM>k;@et>lu zVHCm4OO1NPwx1^gOfKG)Rz`=#UhmQTw8Vyg_Zmst=s1^4_evO`FsuMs)e`!fpz5BY zxL{j5GPUmE@zTpd3!bJ}Z{ZM~4Evp3HHiil&0R#Mr$ige5Vf-P4Q(PJM6PRZmAL zbl$EoMxVyoGY>M8f&KRo^)#9x9=N{_OSPjgv6mMgsfbSahuNwX6o*`#3LiVB8Wz+o z_d$FG_rnkL7Zr#khoCOQ9B76%`165Z-b5Bf_A3>w(gr7#QaNgZ&N}eTKG_{|dqIEf zS%F2+8FV>NNp^VJ#^}^W1$y)sWDSNP)=AByotetyh&kv^KX`Pu{!V*GIWtp5gc{g& zGBGGBW8UXnA(##dLDAEpFLh~3EA7IgEXZaEMZ8ClkM|&tb&7m(>I^OQbBk;rZ_Kg0 z`ikHB-tn@NVS1m1?yiX-YFeYR{j=wa2--lqG<6kFbrqU(`NrRgfq#ugW zZ+_H7>Ph^-H)qmJQed8G3YMgXZH+NI^B^YR+%BKmn#Q4s2lHF*3{OVyWq-7j1;u)# znP#C|78!EQK-z+7K;}4uXj7yssHyw45BwE5$%k)8yPWYC%JT5|_;ySs_4w{AFQ;v9 z$9P}9>~zbAg>=FAm@-ts(;+^N9CC|}`dVL5Ok_a{{zFnuR^@xHe;jr6`~jcD!(;uk`BY!Oz=sWbU4zypqc(*An8(>Qc#YA!W{&0*(~I zG;hIs>Zax*xh!;Jz6LwCM{#IV^9x-a%3j<;wHjRJ(1a^i^Iv`~PGg*A-oX=#u@V-X zz}hHtpj(dwsG}wPOoCvE{!m|Izv&!|G8YT->v#q=y%ZW?RDo=GG84EQ%lEFGULO^f#%wQ4Ur_0=J~?J!rl4P;iD#hmAm(C5MpK@(L^fX4&h$G@yh% znOZDpx8qLa7*qVfPR{=ccfx5fU}(&=G*SUI=lB7$ZOj{OY z-NU_l2h;on`TbURGIn_pi0Z7q%Nk{974X!*N$o+Pa|yMX3`r}P7t9tM<1a3>5~?*a z3;P=_Bc*c`gdLMStepM2ZTx}<8t!@-C}JpwH%JoOE?~0$G#LKoNua>L@oF1t-fhjgEX+ z{CuN{P=0(r!&3adagxxz;RhQ}Lo-V3%#A1?7DO|?4)aRT569a)r8!Q9B9GBRj}4r1 zkCSe+EqzzodPeK^X6i9KU)wOwNveS_WJ%ui-8l5!jckEXjD))O*fMv z`i3e-xzpXMdxaoM#I_sic-8kVIrvvQlt&+bU^LQOd?{r`H8M0{)p62@p#YHN@S)u! z9W~R7n|k5cl6~yyo|&PD^qbnXid?fNU7hvd{u>8Arore0PE2!60Xq}SLjjzWQikM@ zsoGWr-DXFDmE0&vcx#-3i_?p1N$25_P(Rq+C;cYj?3BbRa6CD`)6Rlc>_HeHlY16V z-gD=$u8!oU>uuNkhjQz$=H#FG)EF)Gc%#4jot8~9P$vXLy)mYrdXc7225LTXWnP1{ zFM$AW2sYgh59Yg63r2GfCyswEJn!uQKwSrFUT&KRV@>Haj5?>$-(lL>q5xf zWUbTlMWyZl##fN}92gRe0EYO78!ip+zfX#kP&%c3mLc=V-^H-spW-@-ta=&NC26M+ zHBXc@rs96wX69ggUieanm`#nw^vz$4iuYkVCe*Jx8o5WG)ys{}N?h2r>E-NgC+;;L zS~)D#1{%?eL3d`yY%&Xe6c9yzee&Fq&fWFdZC**1n$PO#C3}T~h8A+?#^{a=1QXXJ z?$3hcSkS3%@sXI?R@s}(ck|c1xfGmtpTLQyGxmIa^r9WZK_I)MXoi4H%nM+Eu`4X7 zT6nILq$~O{a6&kHjP7h6LgG)48#}wdGxBKR<+iWGN^CCjaYJ^%$Fzw*NpWOsL0&;4 zLUcZvDD}^6e;%BvS!-j(zjbL95=T_})3*ZBSEd8b(|44%qTABu7)L<-V{D#%7yP=O zE=9$U>QgcLbg|If&+&CXDl$on=po;E;52f^0eoXXX=5|b;uvmf7cDVR%?)YJvcQ=N!Aii=U_f=Q zNZXjnkW-;=O_;z}sFlv6>Bng4ZT@FTsT%1v6+H`{)oFu+V%0uT*)kbdJFnq4mq|C{ zK9^6Snz0g?W<3FdH*>on<;89ukhpGxdp2weR{iDoGH0jC1BOUiq^TIS3i7v3s8L)x8uYzL=|>v+?^X0w zJCJ)jqMlxJ8+q-SwI7k@bQRTrF2}Mr88WspODsgcpyXkJujnyFNy_$z6aKYLdu!kb zfjxrL@3Xq@m~7ib#Zd@gu*Lx$gYm>0sG4>VsX+1v~ zhs?|Pr?oAi3baKcGAT0H_XxS^PGsEK$P!D{{&%XIUB}ZtRhaLKKL}(<4e*w8{JJ!W zFe}Vl>FxwZa3lG4)20}Ys^?1_jYs3l1})T#Dnj0WWHzV4xACVOlTIhRBUBDuut1Cj zXqR_R3MvoTyAP(n(ogs1ZS0V#nNcv3kL}^sCi7D_GeLiR{57!D0Gh`n8u2coK_h=x zU+^RK8|M`Fc)6UJYa9A;1{l;iEa>&tMidM3x{(Q|<=~%~FbV*}v;bz-j=<5h&~x3M z^c`PegWQ)DN55?dXo2MrWs+Nqb#te06|$SwFH9+Dvab#SWK@E+Q|5Xp1vI4}nW>Mx z2n4_CjPpxRa$#Xd=52<@6n5moWad2oWE^kEqeg{t54V749Y$L=Ox#`F@-jkhAUQSy zH-@Dt4>3gW>rSc(Lzj}$8Z6b>Pou@u9?EnMOsyK|kI&SRNIzj+aE=gd+_lu6@HGA; z&79m1tdUj)Um~GHTTw8s)k3Kz(1FgMb@Ov-gZzn%)0Wao8EFu#g(3C}(>ABfl@f)A zN9j~;+9Zjo+dQF6o_HKx=Kt;CN3}=MHMQ?PS6m?kj&r4SbB0YnteNnk zX?q>5Y%N^1=S#US^G@m6t}uZT2i5DEo#V{meo8?P z-I>A=XNLuTFmK`g@To>v2^Qo{kg+4zoZD}cw`i01f#BcaVtndSV|%s;-1**o>yAR< zqhLr0nC3SY9Lx)zn6D1s}C$FuscIhyyEF#1WL^I;jpV;uYKaIq<5#m+%x^0_m&)I(w2osLScb2 znlTZlFcO2KKBPSkC(Wz(wl<}g9A&2J7%mBieK+M?=9gBDP{vo`1>)stXyW2UDxTfU z=&qX^&A!`F_K=&K4d?U9k?B=;lVGjpys~M0*H_A!H(9pH*Kb^(^GA5!ORF( zmy8kesJWjPTtb-=-Rau<$1ChjHXB=e<}Oes9e#%=2c|H@ejMy;)aFRoLMwU_z?nf+ z6digTlJ{C^hyVD1ipMzH!uWg|(~$Wb*c|U1M;510ZE*xJbwA_IRHcQ6mKo>l6%C!+x4C=HX@6IE zjqCXMTi>F?)~eCsh;%-YaF?jr2dSuZ*o!WnA^wq zfH;!gB1oPvANey_e808u2SxU`~ty=f|W2>Cb-8Gi7 zh>z;yRsQ~g?5va+=dvP0airQY@4(v<-n)SC_*I6s&l#fJV>AB?c6(Z^1$Y_`zV7_! z{q>PC7d``h6-)Lmy%4D@{V@<(nbL7<>73#bZNcQ~(zD-Rq8esel@~ji;cgIY1L#wR ztpFSkfScG_?U`o$2!RxlV34^}a)p}r{&tI8zSe~wJh$8CB>gTUBk`NzVSEslrj2P0 z9hri8hv*hGB+REpw#Q^#W4MtcVsq{>?<&QGp9yN&iheKiBaO6kO2FiTE2bivA#;n& z4Z;>}kBvyS^X?Q`%w@y1W0a3NDL&p?9?kf@-_@;R+R$0>P$JB9Xepl#;=j$hcy9tt z6F2uwc#c5nw5A*0y7v&{b=<#5slG2yZ8&+P=GsGX&P`YD#k4~hH4Flvk&Y$r(xqu( z=c=XV&FT8zq5)@Zzg7IlSJ`a=700bg_FrUFHlKEw?~oSRDm~kW&%y%_9!?iD}w)l@JVuL}Gz#FFEZ*^_2WN&HY3ECIT?EPFu!E*x5oCU$Qs6@i@bU9d)KI5{K0@(i$s95m} zl5ES5mUsVx7kOfq5brnms3TZAf>6o4DUv8jIycfy-|+~nE&DPyb0GId)vGCNfbimmMn1-FK)gUa@i8#UiC`M8>P&rv^0#?jNjAC~SI`zB)4a-c+l6 zc$h1uPcm;eQFwK>`yNJY0yzLUZ9J2vH8X!c8kt&!6;F7(R@0R2v3)LgP;yjaU?R-r zjm61U6>95du`#gfdmR=E0{DJUc!R7hZN>;Eyh2vg6rnCJl~qeG2L~RF^Vcp^-w z?0Q({DoG|kWMZp1-WEmdBCHx>u3)LiKK#<|W!gKy%+<_4P{b=p^8W5&x+9H~F8=K) zV<)YgG`@c{Zv12*EmEsN+hlAdg^TlOp{w-fM5#C-dN{fXPxc|Spu}TLsUo!V6kE5d zxHw%&T8_c9Xu0@-rEqj#p*^@m%ijjkCs#YDEPUl zxCb@qQdbPp@2KIjH=4SqUm9z)ab*vN7pA+*z)S_!PLf0t1~F_b=%g-flW!8r4>xLh z__{SY=ZeHvkMZ_k?Xit{lDnB7coxF;$m>eLsc4EsG($uTPP?%Ji`C;;kn#|Tz}QJ3 zqSlOol3}P7v!LlKND4b95lGJ`U0~(r0MjzZ0I?QI4}m986}b#4eXv?cw}iV{&{Ndq zCk!7_rr;OJwT4jz!-e+F>sxO`rQf{!gI{$URDS(-oC_IvjU3K2Yw#SUy`di=#LIww z-}x$aT)t=7RA2IXg072WlAw)I^VPF5(mIru8A#+R3E#^N^8oG7!9`wVh;(B$lGaWUCQMw#!yJlNHx|wA@m#D)=E$U0Z?9iHWq-{bmWUc9&;UR*kZh`^)7FNSt zTw8dc(hcmrKqBlN^dJI3WLa_3)l4g>7&3$9CX~fTfK}CsaDlwSw-cGw-sX5Mi2HXp&hl zc@k%TbnG}S=PUoMJntA{V7V7!;}Pw-{Q4C62#G8@u)CYH3xxa3yaFF3^bury2CDqz z4Mw9bd494p$(*F`ejgP_Qpe4m9HYuHZyK_YC* z2i4~+eq3)nm56#h1oL>%dE$YNN_q;9jw{Ni_oo{b~bMzBH?LC0D*BZeDBvRRm~! zJGfhyT!0j+?Raq#C4H{U!LWjPU2Jq!ngt!o!TLE1VWx|2vY>iR_AU%D<0=SNbG?X< zJ*=|1{cCrH% zH$-#E>Tg>itZ@v$FOD0M`)J+GdH4$xp_Na{eavtDp);Qp^E#wcrXxHf4R*qNB5B*T znpseV@OUw8wuZ(X>O>>0Pw$tZF-)`|NImLY$g1!)G* zI$6;14VW*VaKPw?1wh*M=j&-FUu436RAZtrdQ|&**?}8;$+OMe! zKIjb685PWsQ&v&6toK?>V*n{n(gB>;)C6`%B$>q@rC{2%rARkAGxOW?yiEe#_m$|} zR@%_z4YrMxj^Cb=2Jw}%u^2|_NfdoEmL7+Gjwi>V7zIWxNV@>7i2vSUeE9-$;4~;V9=xVchi~^067Q_3)0b%a^HrOrh7EWi` zB4-!3l0PJO2MDR zjW|*@E>P{6Roh=NG%%A|sh|3A*0+a_tVD$?w4gq!+=wuqOdK!JT9XGu@d5sbs>CwH za;b*op$n)0e6kd9^5XePm(W5Ut-SBBZ>t2cJRY=P*=wpxGwk7>&ZAx z3AhYPwkKQ~U1<)bX`q_%$qAZ?V*#?h$y+W`wzjI+H-+?>ir}+G6D{EXbm!lHwlna* z_jh*Ihkt&!>A$-3wdkRLY)IYx?&3g0%D9_{P47N|hcb#r(wkq$C|%V&0TOF)0$GsCG;c;u4?6eYQ z0fzWGbHwW9ah+<4@H{sK?M@lL)bQxa{Skl4tjL>M+Hpsd=`_U@KP#}rL{%eSAcB|- zDiJMc5PAX6Z4ekLO!IA}z6td7^vhRHR+}@o`)c>8z;XMm!GEw@@SnGp@1MQRko!Hh z+HtO!jHLF_w94ZlX0d^EyyfI$Ps29?x6Srj%w?gDe%k-A^&SR^l)w|a@QJc3Y4<-g z2{BF&#v7NsQuliH!QgVWjI!0(dx!KB!}IhC^v4_G2u2O28F)Ly54yuBsut}^X}lLL zzx*pwGTyGZt8(~?Gta!i@F}Ys*XJ(xh5DEqb-6I};F24=5+82mhWOn6lbyEzz4G+G z-qrhOTkNcXe_!TUkbdZB@qVCjiW0{0Aa)KuS3d`>ZZ>{&88 zHsWrrVv{v;%g`6K`H_40EZPS+^(Gi{m`ECk%N;JSXRoUO%@>__0>z3at_wVz~Z>{)SEB-bu{vWrv-qT4lj18f%&=#5z zxwwr5Ni|y2y&TSz(hl0a7}PmgL#a*8=oZY-)_>&rcpzG0mz1i&io_ZS5pl&N^dJ>c zevZS7q}s~~7BORJN;@+pqqw%FZfIHSioKtw;nv!m%+m|6^HWVN*|#&*;c@(TZ=?)r z&W0ikU)zU%UCWSBVL|G)lnZd@W&BbS9|RBlOUIOg9(8CpSvimKMI&RzT5^%z0|pqO zfZDdat1wZ1u^Qe%RWL|-uLC|^Ok9cyF5knl?BD;7ML~C%D!)V10>v83_-t~YJES$!I3JLID8Zh)zfprhI13;Kb za88keUILErD=Zg=Vt{FB0=1IxUyML-%|d_$kplm+)m;eWco!zvE&ScL8Ozv*B;)Wz z8g5OH1%2wifTHEWEC>YPiQKTn3IhrHW{GFqBKxu+F?1UkHcU;|_oDWvq*};CW zzQ9q4*#*MDZj2MOO!`i+C}o_$%Lt+29~f4Wuo5w*_8*qrKVRLkuYI76ug`eT4Hb{J z8J>s5PbF5W6M#|KERLBL0Y`UpnU`ge@O8C>!vE4@1UGB&-w#j&EyZc@876yK!PgQ z+eKC5=6NR_M;a!<1l6?Bf3!P&F{h<(=3qs?a^+PUj?YKZowHgdF_+|KcYbv1>s*$w zj!<B#feYQJ$eA7uvDa(e0YJ?ZNELgOp?Aqkb(ixDwY>S2rlz`6O?%+QU1pOcw6L zBT*+ZX~r{DAb&|Ia9_RcTpjsB%W>o0$akkU-An%FQQ2Sk>;T)s#tFe)(J~5pGKI#V zv86ccgg8v|`kv~0NN{TD7OL_(+>en9s`iN9z0ZEk!LUiuAuhe!@Qd-05i{!S(;(y3 zAN_1i>pS&8SfjZOTM^2TFq#S}BC8Nxk{dlInJ)YK(Fe0JBBR4&Za0fh+&lbu@rhit znP6gEkNgvA1)X4>a0t`vgeT$%m9`AgxXGDhSd|gq2ZA;mMq&!6?)-j@`hcRnulTgC zD?P6VBvp~tm_5F}v8xI+JCpDVLq77@y*WZtO;% zYOH%DclM)X;&gO%<|v;QB@pJz0wdVZ%*GJ2*Jds)31^N9r0WfBv4nmC=ay;@eyjSX`ls&b4Qel5 z5VL!0^fqy<@K#|Pek{#t3b|Bg_SkE!HZdE)*v%{s8^OG@bo+0pXN2}e9@;n8A%h*0 z*@2IRy7w2ny7;Xw%53za&}oy;dguyF1$I84oJ%Dz^UbvBPK>Q-KkR!Lu7T8Y2agk0 zf^+`2Pwq~3y{3q|G@5uIPW4={HAV+n&CO5%8@f0a!A4#`T}k-8q6OKp&D_eB995*A zA02-G=xnx|o$Aq7sU;tZ!0W)>vG@F!QkxPOb}VWD6@%HJw` z%vStO-4ScsNC9pq>C9M!^^2_eLoVMVNy0QU6wF&aiBDT0a4*L5Qq-3D1>N>3e9m<2 zdsmmYDTDKmJ5Gl0T9xLx6&hTY#vjI?A|JU()Pl8}f=u5n)b~SRt*A25ZKRk~5`-fz| zZ5FTFci^U``RS6WYK#rKs&;-mnLu@;ag(}7RXUGH!kI*-iHH3HVgL7%hHTB)M-Hl4 zMWH+27f&P}2=zWEY>nB5tROHHKsb_v&a7rZ{(ynmmDLsmullvRBE2r zi#qh&c>cV0U$H!6m0D=*feRMyGtnZ0Z>(Ve9oZ8)k>tZHD6L6}W=$4u1)r*+7EQj} z8c-`1lQY^eIzG8=i@TTMQ0*4_^A!RP0-PLPeTmaiFRD&?jRg<_$n%dcwPUYG^lg4JyEspX`TpMKZA9~*wEbd|MsuekKx0Xa z^-bL1=uj+r-kKaWRQ*No{1xS0SH zWE$Q_IIT&DvVg z1I%(*I$;PnKsd0A&mWVL9)ABz!PaL|gE{r3HzYnC$nkD_Af89){ZQyQWISJQ+ z;si(ReQ}I~6aj+YQ}P1#OxZ{4fUbVCBV8UjTi-rBdOndKQ9#1y>yGrpLSI1r5efZB zq?ruel&0|#e7G=Zd#2nsLpOyR9Yvn^ZT-bp`W|Wa8^qqeT2a1r%VFvBJ&J$)Z13*f zCTHeTe30WMRE5jjI0-+BAsvd2y`M74Z}_62x{qVqgqVNV$=u6rzNPNcKryO9oXa9grs#+79ED-tx97uU1Ya6TF9ggex6j{`pRO) zJw4xtw?=uJ^zEsP2nslsQp=DErvlR=sVWBBZXr%X6w*ptD`_@^at-$>+h5$M3R9i0 z*ljcV{Zls2%*GfX0`p$fnluJ5ERxDF&1W#)i!{|pLee9=pSEz@gi5G_+K*wZfTeGk zvh3N5b~EShFARmmPmk0_TirfWFaz_3gYKpftw&-LY@=X#t}hpGQt)yDIa&J993m`G?339vi)GPiiScEE?wXA@jq0=jbN2^SWe< z&Lce$y9YudGM~{>wdWVjz8$(UQse&dn0kw<+3U~A{eqib zp*{tMX$78GfUQ`WR{(LNkxY^XuuB8n2uaJ_{h>M$4GMX&Dmp9@KhSjvkU zgBhwp$Q&Fk9Cc_0=QP1S*Gx`^W&ieWS@aXEZ{)+_*$P){l}#sn z{Hiy3xbQRdDyX* zb(ZY9+0DlLL^oHFY0}OXl79ES1mLhV1L-B+XF(8(n2W7!L{*h`Y6+r`_nsP4C?%kfTlB+?+dKno)7<#zhIH*$Z<@m!^tOYSfy- zX0F*@HnR>SeQQY9+D(3In7hq}+I6+#KzL*j+qJ2XK7!@Wu!iHK$VXK!Q=tq|Jj{QN zt_d*s|JZx;c&PujZ+ME*Cd!^=D*RHClr7s-ei4!oBVw{;3#l+=%!I62LMWBVPL{Dt zma$8ct;kqr5!r?r%NSeQwAv7 z484NHw@&T_-5_s~`Ry8dCo5AK-`KzHzLZ}p_r}3jPkWpsb+<=||IGxColsom;qDFr z@9DyM4M-=zdRG(U5S84QrS4j8k?Royr;gC&)Q3m-@0MNCGxuKd5zZUE?oxJ+FJ`_8 zl-;)6fPu?ZL8R`@=l0;M4BIEpwoEF2U3B`Yl=Y3b%vSu5+kYw{Ef>p^uCptQ^Tnrx zcf2V@#|+p^Lg`|WY`ZL5W^!xo@K)-ZphLaB{$59W_m!7@s?a)Eob%u?rN&(lz7hsx zO2T}SQzfz#4ry?c9Y)rJ5uXIuGGLPxo(*9Z-?|gGOoPrtaYWiB=;oi8#c5EnzcUhK z$GZ^)BAWoa@tXgFJU8TiK`;)xW45lOY=!(UaX>-;Gvez16C$1kr%riO4|~1Jx5>Ux z_cF=~@+hMf4`^trp|qR$5WuUm94R<=dlDwfwj3e34d|#R-MM}>bcUYxg2eY0s`|vy zZzGbfLh%K*v&9`xs;NcLDq+ABMx{CIVPXnu-?(MtUwXS7(J8DQ%yfZ69wd>&%6{I5 z%V|-+bok6fER{5k(WZ%+`0j zGjHf5_wq7MHI2Im54NSYRujueWZuLsBk{&i&;!|=5D6oVLq}VOzX^3T>Cxrano+&C zQ-|N7W9WSepGUl*Y{^WPa0rVi%>0V=>N)TWQnRz^-c7$0r5w?7^-tC@w3rc@Ya&dFojSOZ8ltKJW%OP$!+jya7ru zB9cKxg({|P6ljkW5H7rj-wUIa4PrL`z=wmHYD0z`pcKJ&g3BuD2gKnn15mvp&b8IS}K=8Tn_AOJIUFB%mnb;bb?i=pnOT5V;W=a5DsF zx$W4svrOVI2+tkZ=5|n5tN%Z~R2qEgM_~-^2I3)xbc9<)(#F5%F8nX7(zXAvO1RvA z`=enf8<)*_^oi92cpL@R^S4cKx;E^8K?K}@UV?+P%qR9qP<-A7?(L6s*gx4HL~A5l zKl|_N&JWgoI#~H1e)6}SfQ=O68t^p1Ul7~7(!_C$Kas}3Fy%2EDO_tG`=lzCYn1&@ zH%E;P6l=Q{to#^f9~V@+G%pBn;RFA6;fi6K2*a)Ql#Mj*cIf}ctrqwxtUNvqb6N0Riq^{t<=J#>)+-gc8b58uUAMtUjtf+|2%Xs+0 zP|CdX%;(Q{MXv8lLT-Po>XDm*rfuZAYC$r7X_xutPM|I!d-;{UsV#Jwa)G%vESN>^; z<=Yrp#jSwYCVx8}U5uK*OoeqHnZFvdiY`H&5MV|`$(VKG*tIa$l!MvU6wq5hld)3~ z?C77rAjJ;@*&Mb!*Kn(FlZFY#Xz4IvP53vM6gRg1pV&29%*yyL$TH}S=HCu(w8t+< z&;4JJCRI%74H)|uup!+5GkuKQ>Q2M-5y3NmVBTT>k|(<_jJ2>ivT2QJR|SXsQvNT9 zQXiN3`}+0cIHK6U#4+5O{vXmrVTxY@S$;qA|Cfd}_5WmT7|bEiy$*I4<^BtDvjVgE zd#93snobm0Pd@bvg2iv1#DDw+v4ZIdECb;EFDIxN=y!KZ?o%_a9hO7Sh0?rL%TfI( zufw4Fus(tF(VSc7@(Z&1`(g5Fpot#3@CQ)$FwDvgOd|K4lTCsF^bHI!fP4FbqRdT{ zFD-|FAdp1!17^+IaS9tr6kJ9tB9EDIriZbC z{u)ttQh9$AjoDM?cXUm9Szl`;d?*_?O@$|gpP1(cBGL`k&NlGhP`LOGKe3sEDNqw( zZ-DzG4kMAUqeSkW+B|H8-r*|5Hn#oH+f^U7u1-C<;ZJg2*)!6|FlP2Gxr8PI@n4XO zB1;*+)xoxv{QpKq7B`>Ipn`(Q!^~~~HZ`I!YoVT8D?F(ix_Jf`3uWkB0`ODt8Xu$Z z9|i;PeZg0Qo0g8~fU<=B@f_Jyd?JE*MwV-;G3CpNtATA9C~nPj0H*jc$OX3Qdv*y( z2uoG7W5D|)LRUQSl9)yBa*#?^0ZgX3|1WVsh~Q39*H}irrQhXF9|?5sKM|YE+{2Yt z0_5@-8bAR|1uR>n_Tuj@Sl;e_3+NM=K=nnPi-gyD#-7#}V$a@m_&nR^(`Z_(z>@|k zyCZ^F#7qfLm02+MPW)g4j2_uZ5D51-6O1-U^eCDaykUZ(QTLaf{1RbNKXT-Zfma** z7_F8Z7|J#(WllA7p+gT!EIR^^ZonHWYT_kuKlcb5oy`KoTM*aig6GA7x5_fUt?DL* zv-DAWm}V?nl==szA?vbx0QUrqy4s=CiG9%bqq4OnZAn;`H=r|!-hl+BU-|o9#xYJR z>Q9t2OBs?ySuPF`yb zpBN9fvXYR?c)ShK-o+Lk;qE`pq|yVY)J7Ifz>=Vc#;m^Sj8xXs?+BK(SA0O>7`)HK zv!h`#GdJTGsvuo_a>>u%B(V|@2dv|Et$yj2-6C;v*a9qB=0dAA17FQeAoIIjtbE?P z9oahhg)(^DFYcqVRid4g%l#N8&w0=QP9pX_kx{IqM@F22xh?NqNWU3sXSZNU9#gD; z$S0YdqW-BS(oR3Qt;F{dISRiAUIKHps2gZK5m@qRrGRup|pC| znf+ZN%c+2G+H5gg>Xs+^G#ynk-l6+ke?>h3p<(xQ<(*kFfjFRzNnBB55|v|%t2Z&t zFhF|&qg_KVu?N}!f z!WMehdmz+gg6A5I@#-#J##lmWLL3JT)Wl79{l;NY0I&Tc5c`jayL1%33eE)p#H%}k zR=9lY59yJN4gh%mK=*&JFO89hOSFoDxSpp} zp!cJ{0$)a7V01WW3_H3(fi~l}cNdgX-{~d&$4*u8G(&wOB9N72}jwQ;8Q-~pj6RR$DydhGz(f5)N@vcKDNdxQ)?`99Z zI$zR#;MGgH?*ez)AFHdOF)A}Xp7kn7>VL2^ zIaIkryj!A6gq?|L*FyZYU!ZQic}#$NKKt)$cl$383!v_f|0t6o%8rwXA%WloRObTp zX_Gc`ganQAx5*NeUurBs23fqfiTv}%De3PDi`zOL!CLWS+1y=YH09Q45-b!U#^q;f z>&F|WJfI3V45+d$C0)Jo2Y_=N8aRa-lm=!K4QTkn~k1!=Dthd1!4@Zmlr>S=)OL1JVQ1C`vM426?%QciYZn z3Ak>@C&HMKCJfoXoJ8VZJ|b|}etRo2upj?ql`i( z3tfO!X4DnWTS(xP*LSnk?%(%2KEGme#4SLt*a%mMf&sVkea5E?5_8B_=SdKV2pj#^)Q{P)aQc)SP18j31lUgzo~IJhc3UA$=vAo;%1>_NKYOM3{l}Z^j-LDV z=X!$!mD$2=+}+N8*%}c`dV1`W!RN^8y`GZ_gIE4ISdXf}x{p=*_SA}ZiQ}e-+#}$7 z!F9V$uLXY3?<4bf->~<$B)}~G*aI86^KT_NKC5>c#7pn8)k?>ZX>ErR7Kx69H!fF> zP1tsmU*yMFor4=*Ia9WHffxRLtB(qHp7$fOn!JGD4#otDwJ8gE*yb2aE||R^Su5%Z ziW+-owD-!#dCZ6FG2C+kY)Rm(GU%|lNKVuD-vT*<&$rZJDx?xq2HB1jrd@fe*Ezl) zN(a-Y{$+)pAXm57AVN5AQ3p7QK$EA}JnBg3NEq!RN48z|=hwO*pHWF}6f(K-Ho4N! z&s+4zoj_}mbws;w_%8^*o(j`=su?DR(#}H`U2N#W=6f;{5w<3^iA@qI1Lb3i9hldh zc<>6koB~ku!G9BF%X!>%8gYXe+VwL=NOn%_j@X-``@G>$UGvOeBJlO?HkRYiMu$nnt6TJUOxqai zxk^?-kRnA&^z1~=NIsh%_K{nvC(3Qdm!~x^iy2NWhhlfJMM#_g@kxbsX`%DwLrcTw zc1r&_Ov?^;Y>{XYnZt_e@v%ji8{~FcV3!%+Y=-KfY~pP@=|HnE^O_rC*}F;)^q*ez zOJDnVEOef?uG&B8xbT@Xg4YXYFGx8{pRirlHoxnAYp~ZVkS%1OG8hV$wTXE zb>RD|?8wsX7>Qi7(>>$i_|ZF_F<|hy4njR9Q9_J`RR%6Dv;G_%odYCfe2*K>O%(;7 zi%l(Oh8fp<9prNibrw_kD45uy7_Ue;4yqmA4cA0Qv86aK!NLLyGOFyz!J?r^ujZmc z5e+Xg^^cJ&Vmo2=Gke{qh_%NgD`R?@ZLDK}Osq;T*R2?ZVS13wq$WM+Ro0cvFoVjk zKSDJQr^J;yJlrXm3z{Oj8n3VgYSz_uGyBz#pIO|D!9Ze6v=t ze(~n(Q+LmH1S^>k9#30WS7{1FV0LpwQ6jY0dfS*9 z?TXsHbaB-IO6Oznb0WA(%+^(kSch+z%nrI9z97Y`(Zm|$8iO95v)Ywslo#^mqt%`> zNXY(_^6qre$Pc9@1)n!G0!K2$hurEi+|GH(B~p7Tle?YnnFdFj zfclQ(vjZu)v$itPS$0gfOO>HRrj;Q|x2hu4E*}k5+m)E(x&7UV$C>Awc_21rU1Pd$*FuV>rmSqtR$k1h{fa>{{&Yh!Un?SBPbj$=cBnx)2ibP<<)D!&)#sD+ z#evDQb>eMWS_gBs@1GPfUR8dAFUKv0(FEG#rj$bvdm5mmMu*F`JDd6+C@QJ{X#Xz1 zuZEUQ)s(n76|-fK26E}O-6;%_K(xx;PJ6LDX0~Q`D=zhEaN_i@8@Xye8=2N4puf1ij zd|w=8UAgx#h*3&?|79Q}v`TM3x({2~w@A;MbO#A1odaYdkFDqaiirzk>Y1@C@ec(Z zZ78?q;byjO9qcyC?~&1iaJQaN&n!UXl|8=H4xM6`^*d_v zTdYd9JP4a%CmF!^=lucB?}LS{#xHmrijV0Rgb6x3e)Hxy5^*KzM&9$Kz(B8iH(n$e z9g`dRj&&J)WybAxC#&bd6uSvch_tx+fVHuBl~^ZyZy@(bmf4mPY|{s`(V4)E2*J#P zika!UUl8IU*zh8j8wy*w0btAF_e3Q-SSyJCah7NQn&WfKEK=>2nIJQWWZ3qU|6;4l z`tD>~di^b=1l)j3#J@_oUVjXvEfd(O7e zO3lX?$G=WJfgjt6%F zEfi;AQP}-l3HB|T>v9|R=_ApX6s&;fg&mV=MTCwZ*SoU*iM-FQ;3TR3);9~=6l9+% zvdN(5^?!1T^0~DxAfNO5>h%G6c64q!S)0**?;Pf>Z=1mPSByWHvJ@r?LvtoWL)fx7 zyX-E}+^ktqQ8B-qFs~E6yb7e#q{k0M<^kH_M+pP8<4V)$GTY;xp*nETrS1^A-r2W+ z5S_il;(|!4mhZLCJl-F9r%EudH~!PLO52)91{0GExh62~pJI~J(VzhL_rd1u))Pd3 zBvvRdZs5!)*vFWCC@3lT>byXu=|eI{wC763<=y<#RjG7?jx*Z`SI_WC!A-S5;2A$OAmUh_El|a zH{dlE2fFaLXU6I^ySr?QfK=9;RE&H5g02bUF)$Vg>G8RN7bWuaoe3U=-Mn}1S*zE@ zTzs67c~_CWn#ScX|8|EPtUcvuEv@oamWcT`b{ zzQLXN$GQ!b&SSAw`Y|if+9$y}%5RD7SL;F42kT+j(cCD^P7s#>$v|jjO~`#v&kStK zBe9UR`mwM;Kex9YG>@8pRP4mRq>C}c{o+)yHedV1Wx zLqBXcBGsOJzL(!4cV@rL@obpty-adq*#%rcip}-VLSv(j#o?|&$8wuH_OOZ3?B9&{ z-ssOTJ`5=hR^IuPr8>9O4_H&%&rW(CVq~?Bx~5-r&3G87EvNERj(^Pjyu_5+#z>2A z+%E`F_U`z|Lzs@`kc=bcbh8NmYn?;pL%Bj1lv6vzcS?T?vCs-tK^)``V=LVr0|st^ z(XI|qaEEAre(W;c=Z*a9_}8ju?hY8g?^3z^j`TpYg@OkIA%^=#?}L&r)`2;U2;R+% z@U&&a77pAZeSdPv#hIDy{FL%|(j|cxZi1hlO2tgtW@<-us}1bFxk_icnigJg$JnQ^ z--31gfnAB_*WiBI2}I2qspcU5NfE(tqkR|7SG617Ruo)OYlQe_Y_mnIWI)Wp zr;p?Ut5SgL+BYV^i2RBnmBXThUU{H0u9hS=*32R4JMi!!_n=VOl_r$WwW=kGn^o0^7z+LvRO} zzL^Vnm$b|30q?9ozx#QoXVi2ygTtZU1;9ce$6wf;8TPFf&6T$KU5nIiaIYt z35rut>IK$OL%POPMmeXhR2}!w)8~rd7*CU}YRzjV< zU3Q;wu>FBq>V(OXf=&@zMx|R$5NsS0B3mievwJ#LrOs7o=X|Ru@fVa6WxF zE|U;kYgZrfWnajQ+0o9Ix;wA#@VjSXy59$$7m>lIZ$u~<^GtVL%J@+7rrm;2u_PoL zCSg8)dP-z99~A0cLI#qd&AuWiYg%b;E^%_!7B!+y>$zJZoNQ2cU0X^fc-zj>6p61% zPegGS3kRS7$P_8<8qyg+yaA3bY|O--1FnPH$EIT_{inu*>zitOix6cVpFimoZ1=vE zWF)(B?p&A1)+rTuOzYP+tCT(O2IIrwnObVn$4jgufy+x zobI(%6T>a&h1T!gmK`Txw-M=M?1XjctcGNgz|*Cs!|Yu&c!KY%&#&zIAB=5^Z+N{z z{y4Vkx&DFknxpWKEG_@+&x%ZtO$!5gI8rv|K-Fvm2%hr3=A>Zl%Jc7SUpz$}$JaZ*l89mmAG6yu-|_(8#^HqNNut}Jd?hI5 zOuM!~%A1SA!GZ8G*4RsW@*x!oK?7u*=G^qY%cZ7VZ&v-2 z^=5xqnW);}`c6Y&ppxmBue#%N1A!KA`#<)=v1o=g5Ya z58)yoywi2ud;sdPv|M-FP8-5z8BOmCx|qc1olfP zXjPUu3R0}Q6nI$I|DjEh=O=;yIH*mPDG8hyEh0Cu2k{ZdmH=+SAFn_}x^oQ5krmZy@VlsOxRQ<#^D+2AWA$Z4!U^<|Dl`r?_ItBJAN0S0Kz19 zPCTgRA#^b~pox!sovk~9f|E@50StS`Q!Dt@_>Ix(&fMsRnQIrFc&+cOIXr@|JLVFe z@}HUPHsDn`x^1p%8%OaIm^}kyDj*i!Sn4P%8gB6$y{#)1Eko~%og7%R{{xx-&}Vq* zc6Fqep^$;b=iHq$Qt_>g@#TeeOVfVFuiRF~z%2o~q2&U~gms4d=@5Lenijuef)Qog zJyi0M8Bf%){nO@#Rlo{=TcTi>)&BQ0O*%l8*l}AXg*mb4?Kb1{>WSS`m;)E2hpU4g z+u!J%t~H=a-RKhU?(R}s0V8s@s9X(9OB-=t)4>I|z6#oOUYwMl%=HsRp1jWex+fDh&KepwCv-SS1hwRqW_mM{#nZaa z6eRb^o*R7~5`RA?n~Z}Z#X8IE?UCBHW#632pLN_DhO2xj`?%A%F7{0hNCdCyBvmgW zU*pP$os!-+@=(m$a3b@`vl*fzYgFTvk?0cYsjT$NeXPr|#@wSHHh+c=i@yKhAFnxb z|E$!Bf_2@;nH^mQPbkB22Oqo7A&zkS@l_fTRm-ERUFhRXB}(s^WkkPf(bK5(PRb;e zIvso0R_FcHK$^%ZmVHo+5yR|YnV~<@P$tZ&VvP5KccJ~*E9_B8RF{yW!AIFRP3Yo? zXU{v1wM~AG(^u~itGV)T>nAc=tr$kRJ|D1!nmM@)Tr8fHi4Xy^c=Z{o^yLn--;Cm; z>5RDi=;Ea(e!bJM7f;_L6=jmvPF%Y0IIH!|(LlR9r|FTndCvS*)v5Xlz0Z(=LHrwK zrfdRR#E^1~Do^P(BWX}|OAHHj}f~I)Yn^2y5$UV)8dDHXwQrnQ8*3`cHAYs7RSaHD8XmDS&`P=sDfMjyJ~)bJ zDnWxQSaRQNSno?f4@2_BlKO$sh8j)W-V_3(f2GbI=UkRkd>my?exYX7fBUPq#b%l4 z_s^l0885EFV_J-L z>{#+_8|FjWF9LDpwzvNbUbvn6i?49v~a3LiIaauzTi_5;Lk(X}D8$Tdhn;3bos6NDf!ZJ?~7bVEPK(~_A zK)q3wmnMbY6;A!}VGt+00yM>1cKjjPxDBRDeapJV6D`)&__)mp|HYIL$Pd3)r@_`)qHXhG1%+ zT&rQK5t!f=fSao)-~s9JVcl`;4j@AVR1Ce(y}ix3#MU#0zF#=%h5dfrXbQd0)VcyYQk=($9A4v3B^QoPe^4m#RHp4{xo^d>MFo%IOZffnCj7qknZYP@6YNRZ{MN z4vuR_w9h=qlv2_-Zp*L-!0a(P<3Sc)a1F~;?pib~B+BPbrcvs7eJXF-jJ+6{Kl&kn z{d{Br(b5+O+ouOZ?ir%MTa_H7-3;o|D;N@$4B-xuHb?eWok{uho#_2vby>=T@! ziMPeY3n{b42bPAhKOQB{8CFA$70Xth%v}-=c)LSa0k?;fgK>xPa(l45)L?N4!M6={ zgAcGLO{JfB$o>3$E6lULv+?qLTg>Lw9`2rKmL3A#9f+^FuI13AqvTw8DowtwwpOgu zX`tYU-oP~Va~SrtOaol)rt6-2KMLj2;hTZF`}8cI;!Lfwjo#NO{DFVDm@v5iIlK>1 z$wyTpDHDdsO1Mr$1Y=}4-F|$qHE~|0OPHLcdCulXqTk#qyuY!?(_dDzuw_Cw-gxbV zdEp5)XAeVzdK-tY*Dd=*iUsbU^^R9N+10L{ygZ2uzK5Yc^cwnTpHN}A=JURQQ=7tBZmT+FL)zM zlzn$W6)z8BvRkj77nj{ez+=#YC@9m91|=`O0cBEG)AlE1dH6+WCTkA#Z^@Ktjxf}3%3NAZWJZhpzVwH&Yrkn9+Q=hd%}eU!A^!`XA^cSu>VECSyC{Q(dKN*W=#8uAd^u^dL+ zkJe%9d_{}0;e+I1Rnx0yrTVYbwJgp)%EnnahUVv@!F88us8sXwb*nmMo-0A zc;TMlMNYk~iO$!IRU24Y^o7h;$YEz!4?H$Jc0?4PlZfcWv850s#Rp6`+VYh33%$O= z*{z)6qZC*Q<5QN+rc_*+0^$^Vd3y712M|YTdk{v%zM&XRJ;$KMt8qq?y4M z^f}@pPom`XpFI@krqGw4fX*0Ld1^Sz-w@z;0)7O?*WrCWKRKUY4@38&CCzDkk&3EJ%k%ixIr?&b?|^shjNM=ECDtWcgNGNn;x{^ zjh660+}7B&5&t1^ceYye7+=ygE{mHkkcRc-#7JT*XZTrrak5)P+%z9oKn^srT=u{- zuBrNR{%FL_Z{eRqnL5c_^CNk{#GN0&ucb3;^iUi$ST~>;KY`qbn=(sK6~_6)m~$T2 z1IqV~Ca|+$jC%n2@_^MHbisYy1Nmo;(tq}|UxWW=(wE?-eYpbr>%1^bIjs})86?rb zj2->}lZ&~Y!p;XrXbRA_JoRPCtRXl;f#;#%ZnmhB1<}BZ*&Ry9@{elFgf>yO;O%&j zD&9PeeS&42w86}lBfOpPug^j^J`gEsGiVhMQbZQc|AMIKrLcp*BlMe&A-g~iyJ*)H zCYHu(3zShFeh}oX?qz@ItF~!C=jYOn133xLcEe7{q;Q_VSH0=ABy6X*C3kNZS>UOb z+Y|qyggs-N$lhV6vNvl?hRp}*k!?De5t~=-!a9Obiq>zm)~79o8J0s{)cntDD-x|D zo^{l2C2eZv45sQBbs6|^r$DY$bRM$}CBV}3niSWQOIQpPs}+SO)EtvN;^2NbMrUpO zCj|b63p|nvjTnrx8By$CKR7}Z zlsYpS%r!g?(hJVyh>!wif(3V?JZ&-EzBujvraM$=y1jVJP(ri0^fONT30)JVbjUo{ z?2X3lbu=z>DCG0hiLdUKd71lNCh)!3bRrMTN%t5@Y%$pvh;7KMq?jUXm|OPvP!HtrK)KK&%ISvOJrZ}`$+H(Ohq77lJ)y*lpD0)p?jbQ zn|6COsh3qZrkftxFphT?s>;{AAsgzFC|LlBaCnh#+mOFrFg+OSH7Qn=TBzi$c*&P; zEKbwhBdc3d9n2s(RSwptoGE2LD*j#^TQ-A+Q z&D7jT^+PGch{Bi>iNcQe7Ee=)a+kf(hS=BrZyv?m?0!FFI*;dJ2#BzQzN335`8h@4 zktG=ErajCrYAfpsYCPU5Qe)#e?oNrp-|)uVmW9nhP6~P-$`Qa{wrD;ju$>y%`lur< z0@pOg@U{)won8LwfxDQ?VyOBT%{I8eekbW&PCq4VjWaT8?WA6+`MOH|nYFm7${L_! z7wIe`pe=~NnFeerl`Q?s!-SEDs`}+cjTa;xkDk^XhV0MSM^by1U@lmKemk*Zge}Nw zhaN;7IF?vqikp}lwiIaGS9>K&4&g7YI4{6F#m5#Rw&NwL?4SfUl0m}#LL|;)uSekx zkN&C=jez_o4=%UdcHg=sP)~S|8#Ymz>h zX^9#f*7>Q8xebO(50(kxfS z$nbEmfJu|5n7h^MS(n)+Bl%2R$t9(4z4I-P7Q6yRPc)qGR@NN zL2NN>3ZV0mDe7Xzks-Fuf@oslXQK>FJ3W(A%P$^Y*>+`xPh;M;unlDS0 zZNN#Z=Erm%lJj-U_e}Iy?7zA*K>JR0itAWL0n>2~;fXrV3}-#!_5yl>D9}XYOlus8 zMjvISJIaKS!MypWy=4ZL)unK=THYOBmM0GyKo&Y2JgkcgKkcR5PpI_Cv9F%oqcW&I zNtKJ~`jY)5tM=Z_xG%~-g;QfD7SYlRFbTkYFj{$;uFO8@^o7gE9UM+-on(Iz$*O%X ztCF4kB>vikTUX)F6dBklz^RkWKrw-f!8_hWtHJY*JlY6bW5wMwValsW#+iL;QBG() z#QAF3Eb|f<(VztW-yLQPJ&a*M=c7u}PuO1+h9>hoa`= zkNI-Zsp!vWT}~dx6U#hS8;j;adD8-ZGcL)@5mX5`&w3p~F^me2n3ikle2nSq9ENN` zZC?*bN?I0%zDtuEsh%EO8Myzj>%moR;l^%qUfRht-KzQl#8BocjxDIqFk-@#TNRZF z>tqnsf6^&QHM{~SW*C^t=a;%tHtt90jI6~WzW9PH8AX}8n-*kd_PrLQ6JUbw*7OPD zyeUO->>;T-_PNE;BX7T(2)CA{@#*a0#@_Y%!OZ>kFwW2dj&QrqnM|$yfs|Ny)oI#0 z@nCrC0T1F!8e7U=+-!7#yiOwFqQx;ksv@`GYj%h)wQQ;2zQrMuGE3qWU|e&}4#)iP zid^T2S3ozm^zfP7C}>$#=TC+l^W5DD7aA<`)oNL2QvnWSm~P=WRj^@D#!=*b5J92A zhecSUTOa?cwlDZ*5W208QwS8=7NmGJV4LyRWpma_Fa@8 z^HQw$y5?jmO#(g`jVmfQeXpOa_@~Z)cw}rML4id5@#|GpumsH{8 zjm3xH9cq{e)&&&2mbsUn-CB#-uVWPq8&%rzdD6eQRyy+@y_NQ{Yx+-@!>{?9p9v7L z)kNw5O^hKow5Dq|^?IYMgCcP(rf(61Ox@*6LdC|niIYb}{8#RHAELKiP5v1qU&2w! z-V}i=wGX{3Fvd18>^Ucc<9d&I7CHv`7JP3juw9j!tvH3;v~lsXraQRC3uYyFGVu%t zPUG()G6JhZPmUJYQn#GfNXt)E?wG(o%l_pwy(fwqv+loVb)@5+s_DHwe`;({c$! zq{+q_j1NEexLW*8%agj8_+akSy=$TM!2&s(In)>uT7+#x>*`dFAb=Xj9aRZA$e2!k zgU_Eb>|UG_)berY;`JlcFJAw&U#Zi$_aKF`*czi-P$+0_C|%G2v2Fj@Y`e1A_5<{tpRk)B+0u;@|%zxjeoU_g}`mg|~4&{8!iM|K`7MV}n+m zmIr{ufmnm5f>`Mef6i-+7c4|ZD8YsXeQen_u<$4hL9mm&T|S)K@M72P6#3w?)4qxO zAMHot=5pYj#9BADND<>A2f%iBB81yXJanqs2IWB)DV7_Us;pS0zVuzn=u|#q+UB@$BcZ#lH6$6ySH%>>G*qepZIBV#q{z>v+giy z4VSBY92*iF3ttwi4sN=i zG1zunWI7@f>QSpZHt_&nzP6d_wMYC@X`I3;wKrfZ;5*3vg5?aIJo{JC=8N}CK9cn9 zU|wXH*-#a$=%dlTR7LkVg(-H;zl^K}R5P>+rzr2p?q5uj69bqfxg6D6XRy>! zAjB8yfI7|e%0sdkg3LsmE$RwwvA>tMq63Nbl8w;L5Wf7vU(&er%r3R{d}! z{v@2_xY0oe&#j2(Np`kV%j{ZpuY<=IK3u%32}x)4fo8o$3D#94j+EBg7mF3Jg&ky@ zFkPiAJXLPkb?^~h+8EpJI&yhv`WZCw{J7Sh*{u#u>$a1w&%fq=`l;EA3p82jke4Kk zU}Sy0cYVz&hlxzQ&dC{0zmrr=bq@b+AW6|g9`r7B_lJ>ovrs})rSp^-zekCm+QJ0( z)cSSwmc}K>(TbmvDzo+FQ_(c^A=yMtA#7p9R@nd*K9S7@=7#(SbK~R@6 zk&)NRPkTK|X&2irJ{5cW!a-0|`=e9DMCc1dZ2?h+Wvdx3=7NiuKD>_E$&|G;gL-Vm zeI2$vm6<#K>LqVd2)5#JQrXzYW}JrPKJP2#WO}tw!Mwrb%A*^M(SSuiC+FzY9RuyX z=jw_)Yc?OivaTwwf(jO(D-~9LkGs2-2{;{`OYz*@tDp!ihWV4FZHHWCWYQLxh>kN3 zH(MEJY_JIw5#Wd^CJI8r~mSvi?Xt@rTznh#3QCeJd40;-E z*Q;MG8!PG@(!Xa@M_MfMvxns?heb-&m;s*WCvA-re?~B+Drs$;YhUF%3h%I0W(#eQ ziELpZ(m^%cQyqa|?fKi=R!zL6@N)UmDEtTFq7*zIO@vj$!!dVEgBW+1ZRDBjAJS)b z`Agn1SP1&Klzs5Hbi_l4?{kO`fXfOKE^`D7kQQ1t00F_swv9(OSF6`F9{CjAVGM3s zs)!?Vt!_yK7j5?Z?zei0h2q`tV^2*7^u&<*_H?mOgnh-=JIYlHyO&BxwH^a8#&p>e z;*orsq(|^sO{vxhS!rvLAJ6B`t?1DMZ{n2()|of$PkeuuSfu>uRhr_mSg@YGKpl)! z4y%e!n}QL>TJaSE9RecuNJDbU6VZ{T!x`N!DKo2`%*t)Sn4x;zIE5W|F_7 zGnJ{;WZyVE?Fts^bsC8?_;9JA;=M_=NtE`R)%LKNPU*8%@iB@F0q&mrtmEv5w0x>Q z89KJWK+|g3$CR`6cQln}7Q@_={(S6x^UrN=7WsVcw=KV@IA~K41T15agVQn1WGi!$ zxI8_y8bp;cqo%fa?zEY(`I>9*M0$J3qqS2L5Rudy4(YAr2?25eYav4=ZfmtGr52sK zQZ9a|%;u3^AIW5LBP$!-@TnIsI{+i)Vk<%TqzgZ~+=`)DU_w8CfX3_S<`Fz=ON62t zn8J!K8c-e6jbQjR?itMWa?^24iwDMYLW9Cb-K#1JI@j)d_zZajtT?(ocaD46w|vtl zu?zxLSi_Zr3zs8`uO6W)PYLWoHzNQcYw0C&CgVE&SPZOEzIy1C+%*mF6sOC)Mm>&q zbIbTd$S@(i6FweKGZ7#Jf>SPocbed{qhrIBGvoVghfXTZ;9pQ7rj1A8w~~U_@rqLga1*=wP^eC0Npc9SOm* zKhC z+lJ^2jQ?95#O>E7Qwbwb1}UNmK!DI5!+awEMvf0<5A)t0D1P8~I!y+VHlt1*o$}H8 z84GWN(M%>`q&Wc*%t%x9FG$`GTrG@Lm9C=)LfN75wn+4JtRK;XaH;-`zg5bF5%jzl zLu!iqC)3v$vt`6dASKYVF`H{ZAz|OaYBfS&q@)^XrD1ewl_qMpYpJ2@ks*P|i~;4i zVOYQc@2fLE3uJvu&;aPSC^l)ctvRm&?CXoei=kwF`KOy6*VKw8B8n3{eaf2S3O@SP zmJUl7t??OxR|5e|*Ri1QOKMm#WX8(2?r4|l1iew#In6DnrYFCtJOMfnhgF)fg(SOR zd$|H09=UVY!~3Q$OvqKJNaiH=o$QiIUK1CwXmM&R>b!a&zgx`5;RDp(P@_k$WM5vK z#|WeP2-@Ez?aaU_}Ta1VCm03^L)RCtJlE0;vOqr=0ox&OqEiE67|LT2UGhpE) zuKwKk;+P>sDcFm30ld=9rqfKr8?@TR4HgWk1rtJcVZJ} zj=}GARi= z`smxu!U^91aTfz4sDf-L*GfLP1dj?i`t0TSj(a*+)iXrn+^lLXdh(w3xzp<%#^(luqRJg7z*)eyu*5IqASKBGue>Nw@rAOdFRV$a)wQ+9_AHfG z)W6g^Y<=zSRb&g>g@ePmOh_8gts`^LUxEK%!Hl$NQ+i_Y-QIKcVkunqz_`hDgl4m5 zpHB+#0(NX4NNDjLt0W_$(DGI3@nO!HNTlRTN0pQPqo@j0!l%TImu8CfJ72F#c^?wF zH9dS>oic8@?Ax4v6ZYJRj`Zx$tBxIub=GY4QLj}#gHVlMU^H_8t|7h6IL=A&hw*Sn zVEnR!s!*}0{cnmc_%QO0iws-c5bi$QV*a@JKx3crn(t^fh4p`t_ufHGMeV*Yii!e) z4M7m1(g{UGKxt7C5D*ZgC@mr&O+=I`B?<~im98K}q)Q1cbcpmOAiWccNDU;E5JlGi+;8U0e1AANA?(S{UTZz&R~}!`=nD{EMV`3d^a>WH(mU5HMOADz zMmrawWC@89Cu};*7rbZQAitnNHWr0Dh{cV<^#{tjeRPKkm9zyv^*=68x^8**4(7Lr z!JVV&0;I?kFH8r94b^$LRt+rkaMZrsaQ3N84UYS}wFCo?SN0Bxoe5I#G~X+k=N>=$ zY}D@~S>rKKOg%>x^PjXlLNuYB2DOo+ONb?JpD*0jO1V=qb~aDn+DW`2pf&pT!=g=T zhR^dvOs}>SK&EMDw&u?UfAh7a@DQ8yTn;U5YW}`c{?fwgbPS*Bm2>Azc*5^gZgXr! zg_p6P0;m8{6~pGpeW}LOVOlK4X(kLk8C$H`c7rzoX9KDjpfuE2+nwE@2&=!B%*|gO zhR-^gl?@mCPPiHk`{CKee?oWLcHFi1W`DK)9Q?7c9@`4_+BZiN;~}6uP@-8fGQCL; zY13fN(;0DFTOWK3$_9Klp9T@YwNa#1auGdYXUdxVk|_Ibqq&>e-fFNcceE~fO!uf$ z$jhs&(gAXbieA*Qc@h#jW=rBH*8+B5wq*@HUg@IMT-~kBzQ;pf>zJJDvQyusNV_s! zEK&d_y=ll%I60r50Gv`CXpZPo&&F+{9@+S_4q@BFIN__6kcYYsPT=#|hWQ0ddoSg} zZd5J%Ow_n`o7}X_(AsuFeLuG%s{HPUV;0Y>74KCslwTje{+*l;7d65J#^p`ZI#<=Q zomK9xHB@^&J?o^EdQ3GjdQ;m3qhVJvEjPZ8cCZJFy{{6iqUvrc0%-Qii3(yXcyKVi zediV*(>m)zrl7^R?B*G3PIGdf@Xlb=x~Z8%I;nGpzLlY zWO-*F4dQ?a8Zc+wL2W=`B-1E9&Y2F?Sx2j|?+OY}$zAXc^&Ptye;;wBuwOA6X@F|b z#m!`)&mgRw^=4_n6x&Vtr54FAzGbU7NA2oZs0@38)#cu!P2!(^@ePsCwDX5a+OwWv zZny(9B_fudl|9GflUtbL49Ho|Q*!Sfe`+OP$k~W;+yiw~%`Dxg33m+j1j+7I>N)X+ zSd-ClVZ0;S>FtGWu}TBw+_~b>q!Z13?@T0h&9t@iuwP1kBucdobrri`?&KR87rlv$ z-)tGn9I#$?2?LJgIEpCSyV*QM)GOg8ID(y~Q0>){*-ek@e?LUIFOd2op3Q&ONWYz- z;y&4qaFY7)!C&l&sJao3=Y5(V^vP@b2@iD812gu z%<<;wB1kiPT2yk6{Ug`whwnPGUUkq-(N*j6vwl`Fg-${eT?DJM*HrV(-*i#F?VvW2 zeOySGY39G9Vgg@XqrcseqVl-2(}l5^DRU1@msc#L{xOOKYxN$X?zreb_c8F%SBEz# z6=h@sVw3+Ex*XGh8}*reO^F~q)_fAPqF1`S@BHPyyQiGYTh<%Q>4`6%rw@K$D-$s+ zFu0nlzUd?2pcS{Gb3dzax_zI@gcZi3uAD+O_(L*n*?*nUK(svoDx;&G`o7N=PsaDi zkkKXHh)j^@w17OI0X306J2@>+G2AkpTJ9+V6zAW4FCF+<3vb16%QcM72L^t>@Md}b z1*^v?G`2vuZ+to1#Y~xP()fO8Q`5LnaLt09OO5r~H~{l!5rkDL!uJV1T}cB_+=YBb zNXU0;ef63JwUtbx3lD52guM4YT1Uo{gR$P&q8$+|cX25evS1S{v(wq^k^94N@gH9Y zQXU!vb8Q-3Mt5gFqt2?xXuGpT3$7V64}aL(^d5j;p?!(G9GsM7XlLx0BI~Uj!=dov zbKHf$_kSywaRm%O5O$D zENt25<=QlnR~oP_b|FZWWBN;gFX7eQf1!W*zcM#y;J+(z00FTBQcT=!Gt}rV=+-SI zS3~*mJw0$Lw1QDdXYpj^CEUKy`fC+V@Kl@wf%M=cF&1hGW2c{m6TVSmp<@+Mlt=%8 zGO&$VWNo9bS5Pwd(BamIX|Y$h9|{H@nHl6l{*UI|TB7y`t}W|a)YDIL_XhmL&{PJB z=!~+)oLw_POF~$!KmE$2x%N-C?*}@pe;(-g_V#>IxN&;8nWMBu+T)}f%4#UYKAv`7 znJW3vDW)NfqS{;?8OyTfv=hm-wRG#8BAK<*FK-7;en`it9Dzpb49)Bw-LLH#zeLNuU}aNhsnvCr1WmLieM=vAcp@4eBOVA3GL#N~GxhGwvaaX7 zh&9aDCESg7*R}7!_dQeszh`7-$)=r;r9;CbI_JS zxNpO6I;$K6e3nnR?F3UOlxngCKwTyn=x}4bSfJJ?faUVgY3Z5?q+W?a_NCUVf@Y}3 z$Ts5VJedXT=yMcl;Nc}4jM&B>_On6?Lk*}+Ys3-6+HK+@lstpUhwnD+eE1Jk@P9d- z3bRT42P6dhpODZgll|b4Jum3HDkR@WHARiuwW9Z<1-+E?fp#=sgMw<`e2L#>t`FYM z{7d8i2T1f!CchUuz%q7QP-jt3Twt`zM5TJ&;yHHq6$QUf!jELkZ!CrCJpu&8B3ar8 zaQ|0vGhg2=o%-N*J6=F>)GC-i6+WXk8bt^F zwl$W892c&^l)Pwlvv;ah69d9SKqJ(PE@@jsld>B;!2)U7U-rLHDKojE@Fc}!1okM2 zDP+78Ept`!SN&pF2v@*~`pwou%I{9rMi@oYj<^BIhzh!e^rxeZf&2K%se2Y=sH@ zGj(PfKd*{jOxWzzOzQ6xeG;VfOXK|TrV86YMNhou`9S1XB+;OE!ny@k0wyqg_0^(W zC0CgChDeHEt>LLKstlUcaS>>Jlr7Ts^udiWrt1xi97Lf&st9m4IFGWi$2kD-4MR>D zmY~VzE!mk=4qUBjsf$q+H*02Vym=ZyJaYn&9xfcIeach{7!#)PB3nJ3q!(kp(gDVn z6-twrf=k?2?g#vikDr^KI%6=f0YoL_dw*xNJVi}30qQl!;&-Ya>@KBZ*3V$P=V8{p za+&dVdG8utbN+bNOz(5%;dYFq5$Kqx6V*+Rw9%SJy*%Y`cC%nAiY91x@2Kj{#V)k) z3wgw<%DSS-k_nB+9yDa$O3jtIex3tp?*4pNGDkDODzM6OZR8IWeD-wjd6{mqaisjC z{6@vF*P~3-nnaaP9LvLGuWP^KL=2+Dy}5OeJ`PLOlEKcpcdxV=Wa^kI)hSL8Efiso zp>aG>A3d(JOHM7Pzr#JU5F61aWoWVGelr*A;wW94T>AEhq1x#Eohyp#d-XSJf)NT9 z{?jm`0pR;2w)CEZ6_M-$YVSh#2FsSd=#|bCZt*m*UbhcCV*e}ZH?kUdWl#*K zuMQ(-N?@l|&P~(w2J25c?n9a1w6j;`!hd!>uXrige0Cz?fxqVC3z+TzAiKR(hbQmS zBWO~@SdBgW5V^9WL~aN<#uGpp<88P5jo1Bv}^mt(_C zKcDvh^?7QT6k}>u!t8ZdKskX=Kg=K$#ripAcsVmOYE@oqaGUOOw_?~W*VxA?XV^aD z7IVq9&JP+~mvEohDc9J5j08IID-|fKp)=s&CZtMyy@z`S4QvYiqScgJ|A55Z=5Sy2 zUZ-+~H6RZu@y}3w-WLvN_pKEeJIrkgeo#A=V;3cCef~w&wPj>9lKA|CF!|H~sYl7g z3TSh8jW$2a%4*AHa%GIjVv(V1y^Mk}*&pv~3Spda{1~tmZg)|wm+%a3DNf|5!K+>n z&&}BgIV3CBUDxj8Ut2y;%C)qP2H2FIe>5`Y4?+N#A<~RKU4JRN5p|HJ*1v&Ww%L4K z!Dr(nj-HA%Q43XFI@#l6(t}`9IiW`dgPHhg97K2{p9qrO+-aJvUp#ZrDvBU+usQnN z{l^`!>7&+GmDa+`$YeAY_Zm9v)rfotBkH+~;MwXH8Wp1rfH9Q2TK`Nb&lB&( zyR%cp{1J<+U(Qz@tWn7gYJ0rugY+=(w_GW+<{)QgVhQehyav{=<-(RGQjuX^dE+2V1_~4#Per6I0AkU4Cgi#`HH)z zvh@FVh13EPVvzwU_*39_=ZvR_ZhS{h1kMU!W|O6k(xffeB#b8vtE++_)ojOpe8(>h zj{M>x>B}tDhF!$x*D*_MxT~lcz+*C%3Pcw562gczlSD$=6Ou$h z(I6&cVBiOvDsN$OWZ&*9lVx}7`!A`KUcEUz{=H)kjf|x^Y-yaBI-D3__CS zb{qq1pJspJ_LoCP(?2NmAqD(LVCDGlh|#=;pOn&h-E@H$mS@c*RQe88c#79~2pJyc z9SZ@&3~NO}p2Pxm?xRB<4b)oAB2U_zx_2Fp+`lvT3G62CXmhzfsCAxTnZ+sC_Sk`H zdd4hjrveo{r7GTH99)*E1Nt(^;oyzxobzwA`)i|B6nzCyAU!7HE{#b1A_kAPfHNb` z+pE3@6~@fs&((|7B$~}fHDbnL-#^fFC^GpPh4v<=I#@mRGotg9gqc4dL(4DYHyCC`F`;&Ba6(r?S~kZ}a3#{1zVI7~^@s8+&ig?_~q(sIMj)HkaaD z?s!k*Oz}q^*Sn`LiKVKm7&tBIn}Vbi2o!i!x*t>x#KFXeK6+%mGoq%SS%F_U7!#_0 zEqMxU5KSJwSzbOU|6Y_7v6^}GUjb_i*^)^Wb z$?qqmTf2LVLYDrP+sPp zX!EO1ediSv6o7Cive{$_(I49QK#VP>sZvty=X#J!M3CsbmC-j0L87-!YHR{JngWRK z$4Cp=&CbuZ?vwn}vfBl*VTSfl7Y}C2`n-Kj2%$2bj44AoMUBSJAPJeXJiwC#Sr%_s z^my1bB39hQW1_f{|4IqrDQlkS{pIhN!X<*fo@C=d#!_q2f={~R9%OBHlGQ?MlN>!p z-YyRTVGu_<0VCh-ck8~@+ibcUf_+#xFMU%+=g^bE)4S>VZ2gpZ$#Z^ZT#*J3z($j( zXB_o!_rw*yH65 z$1^e)K3p1>7ZO`)V5#$TByAJ6PGLPP;F6>*Go0l%3XCpG+eLMj5y+qc;d3?)D zuyVY0Iaf!Pmow%{BZ<#8DA%BXnQ(noN`86*b{*=*m0%j8#)fDS{%DxHd(#$2ZKmgj}{*R=HRn zemki=VU?rlcGY+clZnPXy>tIqO3J)BP;b)l1GPp2RHy)TarWn0Q7edZKL+83q< z&!gA&z&&Wu<5ZX^{R4AsH7~z;wL%Te2_#1hY)pS}!K^d%>ZD2ESmJnn6ulJ8!$cgx zJv;8x1nQEO<7N*Bo)A0rcuSTI8n`P{q9aM-rxy7hv`(7f@mgZm30BsceTF0OC5QVV zTcUH;VJV&j{;z(q18ODIsL%dap&zMClm)6%FIAS3wYfiWyRxjJME-0P_tc^CiL!vc z61#d%dI^{f2*XCQ&yeay5$}FV2xi|;}qeb zbW|b%2BslFFRJl|IE!De)v?O}G|RQU94?qmLs_fyY`8JP4fy8|zbC^G@~^su)%YS-b*6?&UW ziezv+2CPAC{t^`DEZP+c@Oqk}Xgyf7E~?rj81HJ?4;`{Q2wEi{5JP(cBJ)CZaI!W% z63~z?Qk>`iFhs$K+PxumEead&%gCk00g#jTRjOH(tZlf`5ih=#gcF`=XzdTo zxvW)zFT0(d=gR;rT4~xD6yvSCwQnbRUo}3+&oJUwOr1L$@y2pxG2pU>KJp!!c>AU+ z*w_>ie6v(A^6-v>JCdQn?we>z)cU$O;hB;6B0mGCV&b6#O>&p7Vf;@v88yy1Ju=L# z9P2qX%+~vCv4E>Dm9OruSlsoe3!M2D)i4>TgN!5Ib3n|bBTha7LbG%Yl7)IMy-x6F z)Y?;(0ob^2ku@Bucs40AhjT`_;3vAsZ4_^kOCzj!V=d)Ni}!rWlfutiY3f;=>T|JKPwy9gBI@(h#yhtqXTV@Yt-|bC;%)E5R;lq_hy^5? zhX}Pqe6!0q&fsUQ?X2IkHM6=5=?Nhi&$!f;qZeYX%fX$keybn-#TSI6NM?tiK-I}Z zgZ0rotyR!}7?`z9ocT68CB8|4uqSyTK}}+FeXJ-|-g>-V5iFQ`|ITV)+B)lUY|>Yk z(p%`OOG?Q07Lt&4*pnZF z8d&=*f#ixox19P@OMGmlb;PAoL+ay++naKe9SiRJZ?N1 zf+V4K;xgZ1qFTy8lNIlz$EI>e$V;x6xpQ~JuT3^sM|m=)G2$W7<)o-)DT@61fClkA z-myBcfm?i2u$XPy7%fqoD0b@)gZriD(;sDtU{va9vH_)(ssVM8U-dS+j5{@3GFkBX z6PC9Lzu zakX`pwB8Z68eQ=eVK72zUWxjn$&u(v4w2sQRnb$^5my8=TF>Wa1BHb!uso7 zLULx8<_4P7J6aSUW}ss{Jl4FbTEwoBj`bPq2s>TH{6ecz?1l?~P#&b0yKpn7t~wrw zGrMcy?jh9JDAakaeUpr8L&IKfnCyJRms!52UWR&8xkyOD&|Crh7KwE@x+O;|Si1Ms ziF3D4-cFumC!dZ~&Lal4R*+3iXy+5NEFE8D)*>o(Hc#YL(Ny%+_3}=n<3s7r^jyGP zdHX7xAP8)5lF4BtzjwF*f0%v0nMS&!@$4Ejz<#7e7bH z9Bz9P)ztXT#g46OA**NVpW;rFBv*zWI5GQwhdS1A?)CA12PHAF%tKuF?}A=h1!#}= z`&921e?UZtd}Bi zk>a>}Wfg6w4{qam80V)fIZy=gA#D_kqog1erd`|^#ur=$?1R7f$N##{uU9DAN$mzp zdGO1oWjxX3Sgoti5lkA8<_0GW%RGfFlg2$|o=vLU$7Nf>cXMbO8xMM1^5EXEpDq6a z>_C|e{VTt);u-Y+hoi&>|GR=9dJsFuEj;l~$WaK!YY*_q3sWOI50O8FX}kkp^a{b5 z>VAF3qDhxz9PKu`_N_DvMVh?<19}XiPQr2OAzwlGM3WrjX1K`&c8?BLY01#+FyayH z-R2EXZ*K9ki@Zq}%cA;mH>8n0*OSVxlt9}!8Pew$<(Iy0p+4$IX^9k`juV06^luzP zegGFWg9V~FAQ}G_&sTblKAKq3?e1IC(0b>Xr?y1x!w#eKjEw!MUS-F?8pJp3~00N`s_Np77Ug#Z8>U z&Nu(g$&r34ED>E4{Ff>&N%5m9F9Ft}Fo9+EVXchhC&#B!nP!4j#Z_w?Vj^2c$&|z{ zRDquk^&->-U>l8D_7Hp-C+)O+TY2i$Azy8mh0$ftU@xXtO*0NP_q^s5+w@isUI!ocJ@}}Uy(hJ8}E{U_yoFYm=-wl7s1zuIfrQdDWKvku=(koEUV7$KP zNXXXr^(!m`cvw%_ z&r*;LNTLNSm`{@Ak>--+pg;aVX4<^H*z8-}^J5+tZ|n+l6o_3dq9pfa!u$gh8;A4C zUI`Cz-_O>KaO%&hThSpno7nc#qbhNKZFhp(U}G<5+g{h75)RJyRV9tIe)z_#Fqu#i zU@+n7Y`}8xIftiIp({u~O39$G2uSm9epMelkD5R=R!ip1I6E#_RVHgO3zbL#kNRt@ zI08JVmC@t}AZ`#~y4Bzh0%+2dhFq_b)tf|<5DAIda*|5KWtWn@+j$*_a~9CtA+l)>Bu>4d?6CH}vM#E#zZ}sFjI}h;_je^*}4=yd*PlG`Y`^h(`+%_+)J?~2^L8c&7zDn$*@Ey8kI+` zOyBHFk5`pi9_g>vTSh8FoqQ+zeUDM)y`&mnFP$pEPca$hEc8+k`;S{V63%b!7}EngbPS37GbCxsoEI2hh+yKVe1Xt#4^f)eOr$n z7?-^!PqbxsIY@CsEvPW>SJ%I!N4b4+gN^aeJ_}YL_(&bK|8RL*dhB*39lq2_U!yG#w0Kkat6CXC#jeVGYI<{EAU?pLm_U#(P^77DzDlKlbcP~$%-^k| zTEwrTFSWdMxoA;PR$a|uI~yMtd0^$R-+O|Zg*pD5L9MIIx4rpN!t!YKrywDBBL|%= ziP}2$4oonNsDT`d`VLO%#P_%-ejd~ZB)+gYGm5)h>i&aw-6`GG-mV?b`fk5rF4VaA z=1}0~XOnISWFLVbt9atVwi>N7599PoZ044zZ8Yk)NxgRA1N-cU7Vaw8`Wad3Do~YK zoS0*6Rhq;m{l;(Z`9@eL&+~UXVvwr~n+PG4O%z4IvjIPfC%>zwEEBt$Up0T|Jwzrt z$V>2;HOGE<{y@ZT^vG%93+_Ud?0_=Ra2#QaI!W`4oi7Y5@Cv+HnS9^EXqzhfg)016 z;M9k-Svx}snqmc_TZ?Uvp{LoHj?seMy&DVw4=#9pe{8~^=|Ac^9&TvRLmFK2vrKuCHqli=*yK>^ zUS`{*Xx`)QKu>qjBhwHHfwqul+eG{IlUF zO7k4K-0cOJkv}(pUAi6pQFG_1cedxY6zgrr9y&W$^Cgm7 z?9QowHK^aUh))yzI%ENU}Y?B5Unslw&av^hJj<!)aeV zDnRHPZmO{QPn+sB^SB%z_l0vS87yrscfS-|BtOX%`l9tiI9#JvK0!sjRi;;qj^?0@ zfdmPh>H(vxh@vP09hgkB6%$!iE(Ok^$x4{Ky5$Nd1VKVeN9oOph`_aDxDD2A%$P!J z+WLZTefPvm+Ag3@q<4XXAwPer`l=}s`Y4V3spHRpYB0R0!VX?fRg#)B>y436KYk%i zi7`cH?GFQxc%oJ}ktaB*&cgJTz0ys~>&9&uy`iAvLS8LrKpGXOa#>f@e(_A|Aj21K zz%Tp4zK{*EPynaONG*}f8zOwQ81i6&##91bn^V9~(la0_)8Vybaroti=4h+?y1{mMRukhO9}E$Ks8g zEZkM9Bg=|@Vsw#{amugq9{j@D<6fh=fSRC}B4p+(P5)4wKZ8 zQf!rcpR?Wn=|0IE6mcxAD6Cx|5Eu*%?4nji6dTfKhq!3mAd3B5<~ z+1@b&ms9?e$YbuvNF+9xppipS`C;Bu!N=FX!{yd+NP5@){X{VAxwr=Z;7}Chf>7R! zG7tGPr6$duJ143C_w<O*&t;4uHu=lo%W zPo7t zI!sBJB2PuxxV%r-EtJe(x#{yk$9`wRIDw3?MA@-ZL@Edf(4PNV0!%Px()eHl+ue4i zMo{~(+eXZ>QTo)VB6Im==C|WsaAb`vEfG&UMURUjbj`qmIq}>|FG<#|2^G&ZZC#&| zt25)vhw%#?&-()c!;9|DuL7hK>l=L1inzU?|FO^q)4D>_ynp&Af(#KzO9*J_)GQY<(BTPFr-=*}VdAZnrv=!E9}27!;(x{DKd<1@Hg}#19;UZLcc6E(3r`OV{=x~I zVb-!78uqc8u7dHcRE)#k>_phzMM?hd@AUrq^vL+q+uVlv0A+Y#A=6woEmrCT@Y;}D z1XEX&KGDScLv=ImuSfl22d+hDVj<=o>Mf^4OCE|;enp5>{uk<9;)H|3vtMBsJ$ZkM zwBF&^9{vO@L)Z@ZsZU9{)oci~2k24a9aIf~EE)T{kE*tbrragpZn803dt)=#1rQtnfqqZ*LDH%ahYKWVPS%5nQ$&!QF|>Av53 zl1Cn$t8kfT#5vNoqzYFC!tyLJ^6_i_Td6bFK?T=lpL`m@3qTlST%(5yp&XSBsj*=zj z#lJjjoOJlQ>Qkatj%-3rI#{PEb$h~WxNSyszy-cvMA6~hghb;oKW8yb#=@QGJ7Ok- zCtKkZ6#Wf2<&fjTWC}4gcjh>})n&Gqk>bDn%CC_7%C~xKzZQt~4(|s0l z!jQ^!Ul)3o@89NN1yMd!pqvP(;G+>0A7WhIhRbw<@_Mx@Rr^+;HpR{ke11 zVx?aQaFG?PU!79Vx5;DbT)9^K5xVzu@9!e34prbyDyl!xUpXXAhZVr-whIW7EyLACS#9f;Q=~KTiHJ-Dc zXP)$Ji*ee2Pq0iyhUJE26NL5R-A3H^ValntAAJwo-io|v6w`04BZAGWg>FuViWF`& zGtTGw&b$|&voTV|Bh-?x+xeYhD#KZ5fu z9>*P>GkQ9365`*(D;l>M?{NMV4hTy8h4;2R3CdJq+DSU59w0)E6_nYG2=yNMt%9tG z@cR>Yk1)1en#pZg{s|^lh3;!)@(pEj4}gR`wk)BWQ#T{GLzk`F1CeqJd|#8eH;jgV z7;aF4=dP9E_huR=JEC;vF5@T*{x$yX=wjG+*eJSTgzpqZIIGqGSyD$%bAy~01}-#i z_%bl8g>W3`VG^pZO6OR%migf?hAzPvRgyv4tSEgwORV;~*r2lq;ye%$p(vL;*J`Mm zzBYsP*DP1|cY}8Po~R}5D~s71AG2jR;#bmi(fBDV;lk;jD^AG2dcJ^b{yj*nZdDd# zZr)&!CO@a6(mLL-!*V1%xst$tvUwo=;^CniJc$n98NV?2S8u0}huBq8d{j((Cygz& zb(F8FMmmK>Nqj?isx6PwU?__GDp|LNcqHwMlL9at#j>B6R&qI8<@zE*7MDmncH-tq z^`(iSEbyFlkfY} z#Td+c@&3j+yotTQ8!L5**TqdR^O;3pR^N%>01xFgx}+jP3F$e9305H2%v8Gk!b_s9 z#9f|zdzG&(VEgm*XqEm?hHIFq={O&KRw|6SAH&_7WQ z^SJhh4$4`@-v}o7^_r6rIp+6*_P`^c=+VGFvH^k+nh;3)gSb#lEDlbqa}z)fRE?Q> zMLP~&4Z(8F&?A%(nXy}Xb3S&AW$6?}#*})cE3dr*Hf}G_&!fM0=c8(K zpq;cI-rs2(Xpo&Vx&VgKAP*g}FO9%B&G?b#u*3kLT;=E-Dv+r{d%Nci2Pdt6b=z_}ZumE$ALatPb`WUa z+cJXmwclwR!sPAU1b5kiSUAP)LXSJn1CA$*lXU|EiJJ)p=82&4?}D0-gR)3726qp> z3Ea?+g7Q)%ivAh5cH<|I#C(G99DL0z0vzf8i~kURhW?wvA>BY=v$2NuraFOd|60A6 ze{Yh2|C54T`;C`b3|Cq=+uzR{@nK54U>hdDVD|v@1Z37X04YuAxb@61mMToCg1YAr zjMG2*%Re;bl$iXICjzv&}fWe+nUd9k#7`+A+Qb7ag` zATZ$ZXWBRAjETt4Bfoqtnq+4~5@e3^*=rjoYK-W%E!L{W7)3Zdwr?f*On`^RPqxnF z&5Qb3f6Vl;fRFF>?_HxcGtGK#{&@> zUrR=8M`@KKsF?^<(UD@OvIhM|u6u35J+$iw8E zexu%^i5j?3`I*It8Oobk{4r$${d0h}e$JGR=NVQ7j!1Pe*U=&YljR*j9i?8*qn#%T z1X5Qv46GuX)g@2n^!quz{0Fd5 z!p|f&`5co)z`SZS`es5-wXTyzD*0X)2~ZE1Fu#Z`GomwxpDznlMrL!+TKxyBEwe$h z1{^g3ExvGiNc|ba#8v!p$CGL~lTBL!58RUoJ=Z2!?GGUI$7|3NzyM7gf6|3%ts8`` zbx_e;%x#=NkbO;43Tn!^^%9*k9!zg6wcNs3I_-nLQQ%7uEklyBX{Xa?cL47hPQClp z()S7J2V$AI`*(^^ziMq0uC|}=i50Xof1Qp9y!H&sjJ9miNYqwy-`O0v?bmzU1}mz! zYBW?MH3)q!E4b38HXu>vd`0>=Hbay3j)Iu_!5r8PX_KX|I4zaI|JZ8#SO168dJ!`@ z(>#EO^bcRpju*amBrU(#1{6nagr+1 z9cYLJn4-k}OmkcVE!*L~Z%pKSxCJe-@ho?38cvH*PMrSfz)gE1VwJ z#MHLdaSdgP8I0w@&cK=OExkC}&flMaOTvu8Am;P}sMjKR zn|`()$hML}aU-EC`~pA?tsGk<<{_05>xtqhYbBr$mw zNdQ}zi60spw|k>>GTYM2ZpJgPFn)(;mn-tW>fKn8#tZG?FT6mGn~iHgzqMqf zFcGssR&W^NdXSY25+v5qo7oS{)Grq|9ek(1$4swzTcWu@(IDX7D&@hHBqee-b(1)AIAw9wb zm_l-s6bm)GfKq??&=4&T)ewW&v_Wwp*1W<%qxS+BI%4o2hK{hOpg>^TyNU}%kVRHe z`x+9cOE{v91}({@6;I`PirNl0*%|t;xx~FjRA~JdURh0~l zUa?Fghj8sseye>02dVcvZeo|LN>AH<;EoJdxjvw@%rIr^b7kBj4bk{GF8A7wIVKI0 zzqV7ysjnxaR}tf_%QM)udud<@%?_&=m zsv;=Dh3_yz6~e(%E-}Jv2^9)&H<)-sHx36E3z)32o9f2ngS(}@ynGZmY`Mrf8SZ^; zk3}_N;?)yvdYGGLQB6})vl}$A2pRRV3gReMj$b!YP}}ucS6S9~>2Q-a^kKxzM+BIY zy*E$AzlX8Z3O&WLP_@I38Ee%i>T2ND#*7kK@+pldhp; zSP*clLXA>qHrl*ENOlQQIq%GA{#HJoBY#sc=`4@tg9D@2){ZFWZ66F>PIZm^>}_kQ zV>(9E6!4os^jeG>T=rrBJ25< z^I?21uJ3toSl|fY1AxI&abRjsO{Hv9gg=*dx51d4T8^0(x#Fe|qrlzF*K0f%UYU$- ze=U8OAfa{I$kr=Q!ZNI6^P;sYXLwzDt()muZ4`i0*(t*4wRGx5d^g6Ii`YNmIC5;E zKFu9IWxl3-tM*+vmks3XY09j)Xx!@!Yj=suG}rFtxIuGZ(B&4Ra7^@=)<+ z*gL85qWd)ihLf@Zj!q`}2ZWu)S?Bw6?2%VTQZIXqE^kRJOcX&U&#Q$l4w0i?(Jq!D zCpaNCZu(bo6;@w%Ftl5O7;0t;#F?Mq&|1jKJ#n8_VP18l0b9wYs zTSYZ39(;ST5R+(4VWz)EJX6*npQ3~n_}&CiVvQZR`ZP*?OG(K@o&>2)C0#uZ}@i;maNyL0= z{8jlwSynevtk2>)=N|g&6D6y-`Bf!#xcKkneyFdYd|VXP4#wcfAXQ|*@(-Y0CYd$; z0(8zL)5G}q-GIH#8bYtH9S8j^$Z|FI=En}o?M;X8v|p;@zK$&Q3RsDN>!UI=A#c!W?A55C?DO=nDSi$*vSWMT&I_E5J^u+L@bp$L{*RU+Qlf;_&S^3ob$aHao)y!Vc4s_WK8QACs$>79s(AVrie zAQ6255fSMELO?{Mi&P;bib{tl3MeR15K$?SuC&k*k={XC04Y)uN(dxbp6UDb?|XMS z_uPGd``rCU9)av=HjFo+8juZS?EI~P*3fvv}4!+5alrmN6} zdPg_TSy-qbnik!i9dC?PUV6Lx!c*yo99;W!PtaH~bxsfy)aNKxM8Y?TpBb&I(Mk}| zjx!!SF!JrQrlPOBGuq`RlfqD>_+HQTV=ABJwN~L#2}I~{g<3X40j<(B%;|ACDmq8B zY1M^t@OolKznFQGs`$dim^HH(&dKgv z+GWUB)A!;nlW>#Lc|A}D_Ahl$%z8(#*?b_rF1vam>D**Y_OpmXyDvY!!}O9ho=8Xq zxHLsVtQvA8ijqMEXxTjw)ikd9)(WIll84bq^?z4nc;KeiZNGc+p;9&z@Nz6&05%C* zDxYebU`HwTDWY7b-yLZ2b-zaRw6}%rDv-W_|ALuKOBxo^_8dV>j^U_aLYvtMv4RAW z6`%|ma3+q0wWl{&3oeuqPXxf8g~>^nJ>&Xpk?{7S;Xzoprjle{ZeDoES~G%kryB3m zimBC|yRR(L@5jzNZ53;r}2SwZ_nja`C`|1MevV2`z{{bK52?B-HF#gT>g595&1#^{bq3O z9#FZHFkcRoaXd%{b6F<)u z%LH-#(lPcL1MYkb)do|do8|0{d!!Wb=w@x$(*jBOOFZH+#F0wP$8BIn&}tHp*wdd( zE5F(7Fn1d#2k}3G&`9#0@}{3mJ6b{*(sSTUOtHi?@=OQ<4wZRl5Rka#3)$N2?Cmy` zTM{tb-u{>0z|#*HqmY1b^;Hz8!}5u+?TVZi)jwu6q<~!q4#sHd8l|61-Ce+rx<4~P zFUo=rrceQ}#qcZ);(d%+`$Fhak45Evu!Yaf{$o?EpeaMTo+23oB}T&ON4|r~_sN)- z-h7{IoMs*V3mY#&bhiC{Fa1F;vD65AB5=Nob0DY}O@1=9$$?1Ai0rIG#OI$(2&I4d zmH3M&vhEppVKt&u-Sy@)+_Lg9MK3-h<%X$GanZDgTyL~OZFUaXAc)m_N1G&s38ZCW zWFm$2{=MGsLV3A$w3A_o*pbi;m3M-lw>8S5wnyl|;J;^Bf{&HldzWTE+NQEuhJm$# zh`?$5TX4+oY7Z|2Y$qJgsVaN3`3R!AC#m=BOU&7u-q@bK_LbGUS2P}NwKW(W5VyLI znznW3Ns0f^MN*RH$@pnRcGy+}a%5{ds=@TwNT-$enJ+r9Vn=BExb|+eEBuW7`d3!C zKEf?A>v~BQrVZLpre0KtMLm5V6h62O_W^7xBtzT~z!x-KErQxJ_Lh|miXe`K2mNHy zNPC_|SePWR;I86lV>TYqCx-Jwb zl+!zxV0e0MR67^vCj#^Mm4Pqky06vFSnvGNT(wtFfHJ1897*y!@)VU1vMQ2X#8rq1 z=1N_5rFuY=7l8n@HryqzpKD1w`^v)rQ%HJ=a0gG8F?Jw8=TAx2KbpQu9?^+x)OZ{3+*O+{_oWZwLh4NEJ$(? z2>%nre9Z!!YO^>&e}J5C(}xdJk=vVaIt%7uxOo(vhP;L7@ch%IkHP*`uUhk<`kb)mNv<3Ic2k>Oek?{nYlnnGe5ofmAn zZ<>fV9@UY#rOEL+ROEZMvC=#9fOqv=W@Cp-=p611tS7%bmlBw+C<37qV7^hKkPz3U zpG=wCuQ5}|T+A9joXkZ4hb_Qute*p$DFL{CX28y2U?-nLL$HGvmj$Bk_%-#NY}huX zW(M+geg-3G~NQcWCCFC7^xMk}W zoZ@Bya^b%k5I%~?djuS{fW*un&N4Ic$a+VV$yj{WWKozIqD{;^b3Xde)F78$S?nhxb&B#_&` zrNR%O_(AQPqff+pzuwylKG_U={`7i_!RKbCy!TQ=ZF0>WIE$^_Rgb5UwU~Rj65l%o zkPynBG>#*_*t;3j=Jxh zay&>4Sm)7?H<#W_a-(HB&%tuiv3f{;4>-wC_{Z^U_!W;7 z+=(^?&i%f;)bj{%RZ~2jP?85kw-D_MUGBj&iXj9D^RrjM68iPU;Fjmb%o&Uu&`+K~ zJP0BeJPx^Ws}M7V0Nbbna7cj`@H;0jgMc3Hmqw$;7-1Z}@$oM=V2E6>zrVr#uPuP` zsJ}PiNh{64oIeI19>UBb?qjL#54feE~bUUCVC1Zh-v`yG{L5}5X?_9z?{7a|)+J}E{6g}AU!LX$z#2T*N zl&)7t_Jk6x5r6KK3|+6`AHKi@^;Q3E(0^_JFJp8!qznIJ3z(oMQ=NPTe;RdXLkofA zp%hyq{KGobCLZH{c8|t;+he>9#i~wtAuo@U@H=XY8604zIs2OI#U9SOtgL2?A<~W{ z*53Wew7c31eTeV@JIY&+8J*xTDBu0v;h3`${F{&H8otdz>dHU>dnuv&Sa!*?lQ3RB zsWp65`^Psm_K5)=7UpkyJhRGYP!j$xFs{>MY$cZOO47!((e~T$gHQ*7bq|F0LLvsOAb3>Xgm8WwGS7*vK6yl=y!QsQO zdzS*YQVkym&SC%*3`5eaQMM-MM;ivq+K9~Zdw!GhYMF0i=;3?Y_80hgUCmfiI%IRn zQD+m?&o4J39a-L9!2{w{Ge}RxzTg;p255@BAb~> zw)lzD>;P=^a`#;$aD8^4OT0L)(>Q!v;KnO>TQ9}T;->u(nbANKnadDK$*!|%2L zel7}P1E8M0!{~ZkSx0p^S)YqbgprrNq}R7WVw2OUHutN7u{uTe%SyM;P`#ZSaW&Jz>Rr5?+q~JSwQja zvK${AdG(^47N*JKz0ABKCkEYDox9(8zRlJmZd$k1X6xKW}AyZNOSCiaw2~+;eZBHYqQNt5n%tNDq$YccmHH+h{Q}z zAH$JqN+#&?fk^0V)}MQa=??A1{H2NavO5`~e^r!*E-QgEb`~-ufrIx?5B%8Dk`J~F zwlSNZOb_P)su0L?U2}NNDfbDg^&){Dg`8+t^3eVNv>OYUsRp1^;(W1oK7gn6Pm>Yr z*!Q>j#IM1Y6a;^%I{*bh-3z)6q8iQNh^cUQ!k@Zr1O^CVCimiZ0Pgws`9S=d(!ZMz zJ;uEVFt?s7$QcaN1PQ>TG~3GmX)=Cq70kuQU%A4-%Q>S3K{u8S`$!Y(iwG(kNH2xz z>oA?ATS{w)JNJKy6PUe*@@q$4kylOFLIL92;ynTyVH+p}T>%J`cQ^rQ6BWum#>>2< zy3gxDR~#3k;49~xSl9U(q;4yYQ$m%^&uudG8>H)+#w-lie}V9Jz^z^wXTZPbSV&Eu zJ~s!71lS+3b?#GZLKcmFty+!fgDD)iX@v{JU&qgf%1+0s#Y(fiKh7k?#8r9fW(y>9eW&gBf!(g-BevbpEWwnufN^9WBUe6s5|DM_f0J zA_yHrxZ~NWq6WQa$;GmwkqOU%vUmK;jT#&UKMud&g(^Ba*HiGU?pfQGhlcKl|9m-E z9jQ&)A0c%JN?<7e3ha^nLwd_!+s+?+n12rbxAl#j~X zF<+)>T&_egSnJqoaW3O)nN}jTg+G}LO2bB>)u=p}{hs@HtVEaHn7>C}(v%u4lt|vL z(T?!ASdFqe`g%1)scB*^Q&)w*K{?s3bfL|DT%O+1Fg2J^r9y_)4d^P6;$tZqClc=Z zt#SA76KC6K3hD>74mIhQK^j#h+2h9)veBX2J(zb}B{zHsm-&UTlmYeA{5iU>oGGrY z>#lWmojDMCwWe6ILqPYzOuo6+`BS)UOoIeu^)$~)zSOQVs0e7@)l6_cZz^9nKuVf!)6-=He z7jCJ(Nz_cc)MQm})^OfOfS_0@R$5#xvfj{}IWs-Pc#%msjBo+$@Wh19CQ~Y~5qMUm z`^+)w(w>iNE+4rsp%ExG+pKM~2#=))Pj>De$)Af?)Am-^puM@h+oxsA(1Oagu~zjx zQiF5zDGgw;vh#wS{N*mE>5gbgXVs)=eUXnO0k)zjNY@-CnrF8tZE*hV!DFoNkX5Wx zNKmtYP#PQuiBcPkNkAF&zCP$P%(C|0@T3v{6Bh5Yuav1!IA;$xnp z%UPJ=XzRGHH67L~BHw~iA?Hl>uSR&$jo2)XVX!+iUm_O0D>@$qiifeN4iclwCDeTBy5G@Yt%U&h~V%E_#6{G-_b7iJ-lUx9d*iH|7dEX-$fh=TSTBL0JWh#?X~) zZ#QST+{0_1-^L4F^Oh+vykHTykXzvT213ptJXyl6ad&&o90RA|aX5~uU_T0CpJz(& zdOWmvlJ!|j=5w)dmQY14D`#_^zL~VOeU|b4lPgsbv9kH>e?bQPTYnK+fFfjqwG<{C zVsA`djm$q>LFjd*DHPT}7@JC6A?-!mY(SggDl}!&=_smV<}m$iQA&$>N!-%x_2%!1 zy4;Ccu71zg)3)k&2_1TwvC`A`vLab(UFeX{K_6ee)n!M0g*Pu0!Y`m)eXc!rP+h#e zu|~CMGUgZb0l9tx01LF)dHSCbLI1-k7umL~%LWIEE{``CH7N51_$9kKN93g zk_Z$d#tY1KrKn&DZtAY7Fc_=*iMCVp5AJ1C0!D|WXRcd{K8xAOsIPgSnH-m6ekoPe zl-LQ_(8OJ7vSak10f?1;izE}Hb5!w7>9N!|V+{vBs^p~4HoD!VKRbB!M6&2MeqpsS zL!C5nuZlICtkk5!h%)F|lI~ks5jR@PcaHzEVx{Msyi1cQLzCZS8u0B5QluDJbTgM} zTmp`ru1ys8zmO&$=U}i{FBQ!9@c1?PV`B|_xHdPiiI5oeEYY-~+HJ}v&Kt$xMfMh` zrhn#+cU|_$JpTD^D#s^-gZu{*`?=CivE6>&vy}|^djK-P0ZEl+ZCZsdz}a=h=uU-w zrHr5zZBcf$-pxJk1&6h*m`}Zb*}8eqW`3&~8IR*cxDg)gbT(t+?J{p<2$Se=_dcc9 zhL4=$ZeB@_vF$tzOEy9Ak}wb_@c2dOq4rS=U!uII?j#D-)~G976KB>t7anZ(OzPwP z6@wF1vqH>65%QZ`lxRi7v|wN6kp(vEC2fWO?)9z1DscM(#crTd|gv zdgTw?Q|<#Z?g_2vEICk@A>0!Bv8F2~;n|1U!0XmgUu?x-5#yGn7ZZ!juUvxd78- z3}|_Hgd1+e-te(4DSw0Xt=*gDYnjird)lyP%u?}gFEz7<58CGC=LI6P5gyb?gtefmg;Dr%yJtM zzHCbTjFQUo5JX%n|CF|W&L&-Wv~2vOkl!LxfguH$?GZwlI(48@Wgvu+({oGtlcVjh z#?_qmH=^kYc9c!s_M2ueP?a$+PRdqIONrU^fP`Q}I&Ut;6x^CfKiONu2MvVo8KJ~v zJBC+`h=vU>3A>G!YcG$hONqxcNwn{7cWomrqOxd*F?Gfm=ZVo`$9}pBO(p~yZPpms z!+izW#FF3tDe`EH{~|Ne2O64xfs{9)NKOUwlFFKC{@+wq8g;Tu(YRTuU1U z{b!}*eL{ZfB|sE*2$p-~$M@R|CV=~j z;4+8}vFJ1IA)E<9KtNyscYu!$syQ&Arj0Bc88vkeKAFTc8qDmw=lx!TPnq-I%1~wd$G<$5dhX{SAw<>sWpVLr2XEQ4{&`e0f94e8V7?qbopXmEq#%*Z{4jIs!(Twwsw zQyMj%8b#SMo}x@)v2>kO*p0!54XSKMu&c+0rxSqn6U@JkT$TsJ2nP@HL$~~7;y;Go z2lXI$^XN*%bWO>RY4x7+!_TgMbm%UdSDd+M)1AA)OOa+oAg+Vty3~3@Dx4;NR zSkj8|K>5eYjLoVAGwbZcBgz0+#yJ7P725x zPvUzhSy%t%(Z{+?Gb9_z9dvyZ^LsvnZ>&~l9Ws7GAg=_$7ZN%-vM9iF8Tq;E!b_g) z%(4I-OkgyAfG})9F{O&r1W*Th5N^K1KhB5C)Tasgxp}HJrd~|%xOnZ^i%#7)gcFDx z6I2ZqXuk%{2IWl_T;?QK)@tme##UJc_y&qTEv-H?eE8X&J<*vj%tFNZPrtG_y>4rN z4(CrlPJK4%8$SUHz{CDb9do<4Pgof)hC^as?%kp&K!y{z&?1pTM+xUD?s^tj!uO zZko;ExohblEE7QiZA+D|3fYIS8d2Mw)$zB3QVxuABFtC{#Ob9M(k~IhKw9=V4H=ce z3+Oc9^|Uar8{O^=#+QS|;;gW(?3JtG9V*WUr?62#DuxgsGdEKApe^0B8xmVQ@k2n6 zym<3VAj2rx$K~Pq{14x}4<7G0k=Q49LprT(sgc1(ohHIqLM*0OqvvmeTXVfT z-p9cNQT6Z9+B@7J}c(eQ)3?;;zm24FYIq?u&{F*uGDxt zv|Y|DwLU9r)mMs00IyJxzMpo|)2<_*91HtkZ+m9h@AgcqYFVaa3gg@P6$8Y_*MslS z9bj9$jG-PGTy`egbxsB^ZIjRRHlpPSMiLXIvCTWIf%jb@_xx$SlF$$MJcJK&ICi30 zOM$?zBksZV${oJrwU6*)3f_cWm8CbKoqk^&lj%s(MR*jhq-3Hb>Ry%5j#5*)^`{Kc zp-pyt@%hxVJj*4@+f36ROhczW&nXo1ti@mKw7WF|ITA=?@ya@1-q_mEl;2;etlM&^ z#uT(HlrL-3E~7hfg*go})K;1?HA#t$>T!}->bdYL&6Vft&A76CGiXzqCVm$qnK1bt z`2n_EK3$26#1u_5dLjjWqy#mud0tjjQXFS+JgS7e0!T^_EZT>Ig6$_ln_`MJ#(Kms~P=avTvTwr6Jr@pvNj-r^7 z!pHC7k;A3;d%t;pNhW>Tv z;z%wR29Y0<6_zTqx@1D$3wwYmg*S#F_j_)s$As~}9!PzhQhv({8Cb=0WWV`| z=QNuxfZtuVA~6GIQYV}+;X0nXiHPGc@{`V-ILpzc^kSdrpclEB?gMX#UH{268U^ip z0D1-9OGGT)rj$BJvkB2!bs%%Th!6F$7+owclT#!re5rA$>ds-oYPN&=jzSFydB|+OzxOUmF_^fRnJ}&?P0=zdcR0S-Z z*A&30-+_cm%3W%X+Z2Is!Bf=Xvaw6Jih;C4+xl7WL$^+e)J#xC$!$|M%STCB2Gl8% zp$!&b9-|Mrm(ai&C!L`h6#p>ky=H?Hmt_Dx1>XRH>zD#KgyANDq0|2Xd?dpKKuWJi&IvwOn< z8%)ImMFD_9A}r}#RBRqfikeFfZ2=@0cW(})J$O9i!PWKkg=I-lUKD&ONq(cDPC^$r zJTSZ5k-Sy>q2)VVpIo@IU4q_!DH{1^a5(XF9y8Nty*efu8{L^|3nBuWK>AM*ygQ>r zWS0GCmLZ2|SEvujTpfzO4umLMLUCePZZ?OlQ^d8{6XkF@S&4pzAYF}WF$r>t*aMXA zIkJ?f*T6~R&p`go>5d-Npp-`nC*B1~KfQlRgU2X;AzRbH^S#avb+{{EOYIO&-qTbO zk^Yz7&bM2 zPA#X{fVRbKPtDzhlrJNHXpQw*t6R6_EXuiN1nrAvw|gI- z#Cn*Gp-Igro7RFtm?EA8Aze9-qpw4J8?044w32^-gsqX$celmzJUY(2?cg`s;NR{h zwLqL;2aGt0f>YyO(XB}EhWs;wcBI)2k%ar5_(k))V1W(^b=fVJ%X^t4j(bw#rgLgR z{`4;{__|JB6brTZsxMuI$WWg)>Q~HqH0kc`^RVSmjFjB``J`Ru`@jm(6C1GJ%0z3R zz!n$L3tsf`U-{Otb~bg$yAuKmKvMZUZ{4<5=N_79?D;5M>+b!CmAR6QWlDLFI#(lJEvTg>W4~)c|xWCNwzR zw~KHCduM!4O%L5>>Qg*LQ2B?=mXYYA@*_vxuKB>uluk^_nSDg$Rb7au0(8HKw#FGA`C=%?Br5drXNQw8t zg31m!bnaQty`oht=Utv?Kzo1DFH!z30|drFnw$y9Jy>lHsX>55TK1}L9W&0n0e630 zd{}6N=!22zwhXt6hA6+0n)i%Pn8}QZA_ASybIKK4hZ||pOLrhaFWw%>*(0#^c&O-G zbAm`2EjS^1WX9>a*;1Fnz?F&?iy8Q^3dH71V~56cW&7xYG+ET?QgYFJ+;-is8GWSQAOlD=1=w9HmiCGpbY>{+5g+y1VbRJchH?SKu{;vZYRB{ls;3&lP=v;C-Vjb3!{d zk~9!Lue%6x%@IR@vfDQT<1rDfUIXh_i@RP~T%Iwo;;$eVl(Khxzoip7LJyyXZ~|VD zoF#j({Mve|OfsRCagmQC1|crHG~L4f#Tl@jwuea`YPtuGXZ0B5%Jj*(mj%p}RUqjq zm^uV7-NrS$YYst!x;zzwo8G(I^Axhn`j0K~9}F7Tvv+r~p8F)6HT)9lPgQDo7e=vS z6cVlzMnMhY&L-TjThnoBWy3&lkNaasy*(s3hu>nP49h}@UiTkyote%kUGH25I}VFK zcvt|`0_QsnsO*eUCMBfun9Q=+<-F^~)z9wpy1Dp7jQ>_Qf{9bxN?lR`BcU@=2$Roo z`&M;X(XAtVBe2NT&69yK`725?H7>!be!ndp{uq3d?aaL!nnDXmX=oUn&w8uWBbeo% zOymOO2#oTQ68zkG?QpL#x zVpGapA|Cen;i3pCMPsdmi<}qBuJ1K}D?Vq|Wq@FfxP}>qO(K@GNpQnomB8G@kC?#g zwRo?bdXYECGgcw`%+6g!;p>`M<>S`rmqY@XFn1hP1C*LIq&d<9$*^TOd9*fo1Q&<0 zPz@GI=+hd1<&k70X2kZ0Pvy1eeSIM-aaqeQodN3}Eu0rhhKdI+`e|IlDc)&V{R<;C zEGIRwM|)YFq8c&~cH6+J{`31u?%^kJ4|+R!l`a?u?N=lw=N4^6{KHrQhg_7>a}^Dr?C4k>%VbN z={dJvV%^30_Kgv{wo~v`6SOkQ9E1@8A}SFu5ujzMano=tB;lc92O#pL#?@}-FVvRL zODC=ESZ7?{m%58jSnT2Z-HCUuVy{Qo=H-35ihdD-X%s=kqE5LG^LIP@xt+8pWP zx_oy5*@b+dBRK`cF~rn_WE$nP7O;GzM^yAitAaIq0Qbvpw>7PpWzy&zq%FEgKM+Dk zAf-zv=S;e0UlyUaAhYUjXSA9+yQ+ft#a_RA5v+TzJ}}u+Z0p{}yD8t|2vR>AOqqYc zx#@mE73jDrnrMRYrBANcFDrUWDSx3|5|HFJs*ebnwE3=4kEb3((s^*xn8tkA6l?uN z97T%|O*abTdhur1(D|*wprFc!bJLvC?_YkqsQi@$Jxjkxb)Q^~hjW6wg4C82f=3tK z?2W^MNW5(Gk)$5Vkw{mrr>@dMcfRO}=aPLV-RPpg`?Dv8@x0~))opsP9uu|mAS$Lx z^6ltig+pW7N|u&>pjX|ri`e5LO%B#CwfDf;A@=)FR(ulx$$Sk+mbq9-Xgw8jnT2q zk4Z>*!JoXLC|efu{V}Inm0v<(mWG0sUpu-O-9spLtA~$FOu9uA+~A3!{vK#vvQ@>% zqIIE(ba5+>)@P=aLj?q7r7@{D^6Lo7c_2vPgvpeNLe`dc809JBIh`~7bx1=)=FH_W zle~$sO!kiw;}@D#J4LUb6ym^(8y>v$F6MhtMM;3}UB(-PhwmLs;b47hjT*WKeJHMe zT&VnGZ2#&T;7+}clN9vYFSU+cA=U&ik};FH7?LSr)Ssx|Qm-`ZOVrPat1@(VK9ZyO zL?HE{^2ix-^O{e_zQ?;}_3jLBSJ=$6E1-_FnP4{_1H^SN1z7wGY~^YE_&fS=S8J{krpR7RqtazfTAdE@{ISWjSB zmaDs;4sSSKz+}p82+fHjxWa_=rp2{?`y>!ttmo_JbLiPaP4i=i zqH#~|)gG!uj!r}SnYQ>9TO4ihfkzS*>WyMK>U&U6ril7`2oZT89z@3m7G zQh@PqD1jl{p#&KOG^`06JJ+kyFOy~l{f8CS>Ybbwi}{?aDnhYdmhdvbaRy!pIIUS& zskv`ZHyF89JmZJSd$DicJoY?Z^Qla~G|@=f@|O)9s)wOKILdw9%b<)0>mP4A9|b8p4WC<`Uytg$`xG<{8L~aN^0;*}P_3ui?(ps6^vBwH(;;oj9be&)0J<#sx6x8X zMU@-v1NMG&ge8PL&)^9#I?!^E<+9VgUVbWuW*EY5&q#%IhUw=?=Z-IIJ!ZV0@MXi} z#(xXPIxlSCqdOLi6`Id#ysU0n=`$|J{P?^b-vMKZPINX3!-IOEbH2Y6V^Gm~umQQb zfPn6o8XGcRBD}&`r50M$Rg-Ga5l}44r-KL-Da*($g7|N^W zr4=8Z5Qd#^-#7hqE8&CKfpE`Fmnr@GlnFKz;?3d-)mul>eZ_!J-D38R9&1id)sp(& z0u}Y9jtA}*ac1rytu351Ub%EY8Fk69#}z*n@V7X zc*I54++Fq8F0>YTr5IWXn}3>Q$k**uW0n>JzIh8-p>{MzdOY8cNthPqZffeajKxo= zCs2w#-|Ko;kO%MS!ch(%$Wco_nbbB*<2gld2R~O|qJO@5aYkyO8bcYP`cHynl3sl> z>;W+Z8$(Qj=KNFJzI^%S_Hw(@CYS2j1TQgdnTSrYhJ;BQXu9yuO;JywYvrj+=W{_l`+7v(%iBcie^kBOL%#bQ=nWB2OPWFMD|9Ddr&P;z_gZnWE!S5T z8uOQ*mOCvKyGarbf{CW#$I!FpCi1q9l(qF!97kZVm=^pPw5(@q_nAIJl~Mx6f^WhZ z%nEXGZq3NEuGBPooST|Weh*Rt;bJRF**%U%95;aw9C~gj)GBJp_3?YnYpTj5LU;uI zt-NA9D{H(Vr#1XNyrjA^Px_is(2Loi$r+cl8M+~FU_El3WgWYoyTO8! z-O0Cds}+4ei8iok+|RX6W(*%S9GkWCbq&wohRdOXL93AojKYA`-3!XMN5YFohxeXs zc|xdu*3!lMlL}7stMMg^Z=?4B4 zViJegTV<3JGHOkYnm(oey=m#C0HuEANX)bEw6B$SbmXQX7F%3Nc^x7K&H-s-6UUi) z5=))^v>bHg;V+7=ICpp5vbxh@i1#u>?vKq+ZG+1a#;v|5l`gXcl=LjRpH7``3P{n!H7k$3(En62e_YpsTcB(w~ zFd5Ns-DD)Laj)G-yx8pv{o?z!fJjDeRJ0#o<$JQ7wV4UdxKjJYO|Zt?tD<>!y} zl-&d;)T!*6n3u!uJgsx5N{)&e-KiWty=U^K^6jQ@7;E=ze)KyNXnAz$mcdV^F&hTV zEy~?4+qRCDIN_FzY`rp^87sdEd;f2Nc>el5{U)RG5ZSkZkeH^?Xw3dDZNxS9`>=3l zs@TBmy_d9{ri^w5m%?vHHLl(p-$ku+cXgf~MU?3~YrNoDIoc|8a9T`4`BZKrk)tSN zNi@r7#QjYTk_1yxuQTS43RtK#HmET)oy7AFg$&kf`NpX>kL`WsE4Hu^xVIv`G|bbbJ4!?D`!@jh>3aEh*vS7m$tv>G zoqW&)u81`Pt<^$Pp=LEx%FriMD4ixI(yyL>_Q5w^A3xQ`jaA-n0axD(L?giTxnI2|1@U=sIa%yf8Y7G? z`;?sFSkgN3yRZS`hozFDjJU|Uo<5&VMd)39zDv7)eUL3kx>z9|Wv zWh(!JzpOvI|9_!XGvS#TuWPtR=+~$=n5q1%t$krEmj*Alf3+EH` zS6VNBDHZC^k+ZM^j6T}X{&lkB4?VKPE0S?zyvzEt*;2#lYh>~v^g;Ah!WB=lD5Zpy z3=%4QM~<`*eIL|CvTs}O9I9eTQ`TvsheB#J=}8cWgVaeGFNk9b_x8Tpec|Ks$6E;I zmMNrSee#@<)t4WL(70auDMZ15jzVPK%lA$mbjxwv)O^6k{k@{+K4)jv)b2>eh#06h zzcPF&l+&%xba0me-_;ZVKYuNkr~cvdMt0^Mk?2PSL$zk}nmNDril@uQ`b;MdB}zv3 z$x?p5D% zodBn{P-4rdIg|UB^YYid0~L}AzkEA$dg}C?q@GS<=?=hxiNu_A+KVooymqVk-PNPh z$Ci}>x3HZ92*7NdJA$9J zhd3UA0uEBtI=}_ph}_X*!B`>6ed*6wn@wrlRcAp(2VR@Cd|=iBW4HAY-3g>w5S17Q zBkmwd&;K18mu?^f50Wo>AX&fxo&~O75G$p}f|c?JzUEF4#VmOj0$#Bb$U&QB;C?)k zN;ZU=#DO7nZWccSvOJ?fA55sDpby?y=n-HH82e{%>;LZmK_;RXeEafa+4TWl%+-d)9C&H=H`y zc9V9#O!+6%IvMg}m^R89O&0lB{bV{$qGtkpoF6ofWI;F)BD<(Rh$Tr^#yu3s?x79^ zy4|-AI|($0SYAo!2)%YQ)MlCxiI^lZBnBG=1d)O@+@2ERq_X@&aSrFsBe}&~YeODg zubrE5ZbGqu+$9c=jKhN%U)Zi;y;D_b&ZPc(?g=V;ixs7pYlG155^8}5$%$jI2t71W zjG^;aPP^4wlA@MRgabde2`Mn4srsvp0%c(To>(@+BIn)9pRayoedvi5O?PpP&gVGQ zljAAP*Si@m=qWK^SbtJlr;^3ygxRe|hjO_@F8OP(`0|fbOmso$Z2{SqCeclyi=-ZwN(ZKR%el$~Z3lGc+6Qh#uAA(nmt2XP5fG;vBz>gLNt zNT#}YMB3^+_Zfw%&dxI9Qing{`3GyHLRcv>zilv3oxo(`<|&O*?+hwX~- z1xddhawsr-`t>zx^7;CLReZajImGwLF5+>#OPF?`?{CqqAC2W-;|6ZLYqol!*%5e> zH#TTXO8iQ-75){ia%z<-ms5`vh3X_EK}oXxSq@0MkSlqDJE}=}DlLKE&x-1Q_jax2 zm5&_#LJ=pZSLuUyBu|%#nySwDG=+=WN(xC@ylrlxZr^mmzWyoOvTQ=I;M_hwr{HB| z2BFXvK_ae>=!bFM8pO7k;t7b!Z@$B$QwDeva}!34Zj@!`p?eVL`1nFB@w@bs2G2bQ zK3aa`($I;w7Z2HZm;KCzY;}C&orz(FVCb=gTzq9$FK@%iy*808L~_V&OH0o+eSfup zX@N|7@>c1F?h7gz_!hdTVKgoJ1rnxVRh%lZXz$CtAaW5O__i+OmDg0?BL_mjRcU(m zDGkn{7XI~P#b7*=^A)g!WeaPn$-9vdyJdoZ@TrzmKLz2Ibdjg^$6K`ZC}YS2fFY+! zDKi8Mv9nM5vMgH9j|9>)go{!*m?DMUesgWP;H}w&P5?<>8LXs~bed3zwry_-Ei5b@ z#$!KHOKyH^F6{kAA&coIs&snfO_}!bGP#@YlU{===pK$R$*Ea|_X$@-pA`D_ueaXX z&ZY*OABA^m)C0Z%h>u!FTGgj!O}az9SwAQ@C+v)R11w}+KQG<9EuLG6ZwG~vq#Fkj zq>H`}I<{`enT zCx(ZHf9o~X$>Y(S)=RlAa|bA)qL?~U#N-?jJ0SuoApvlKWXL$Vl5td`XRaycK#J|N ziv#MfYR+ZC*ZV(hG#%*yMfAjgzNLnR#Q%~&?kIK6tPM2^_Y*Jj zTa`3(lmcARP;;99(@j_$n*DB+yV?(a$lqo=RS$$xau_89ca!s`;JhtEG@BSqU+#lF z(>X)5Z@m@nFeEc6P%(s=yn|tNyRrzf!PU0yYx?i)yec?(oloF-%b0VO3Q?rX;xZa( ztB)4!0tDS~sxNHl=1sIp2!KPgDI!4q>~7#d2hMa%_jQsI#AgRidpLeQ7%XEn!mS3< zBnvXuy@B)S?VrBE??~u?9Thc-A-~5h%K^MAa}FeFbKuYHBYub;DM+-10_aXN+b5B9 z*%AQBK^W3CuuCTaUqyN?fokRsh?dcW_6b1x&d4Clhx|KFK$N@R6e{2y5f1p#6yZ6%AN=pgw%WQXATh z<|niLWJ)~&?4eA|JI4n^B^Wj9q6xA=V_+vCi!q$Ahx*yTAbbB4V(wI9_+-Wc6IK1GNUGMr@MQt3Zmtrhppt@Sz`@zmA-Qj6KmnlW6K z9ehZU&&)1E)67vm3*Cb$&WWa@(R?9cE#qPY^7(eP!tVS{dt2Yefx7rY#*1sptMO94 z4xcXNXf}me{RjS+bLW_4?#S*=%z5vgcI1U_Vcs`=cXv-wW7S^GM9uEK#wubgOo1|Y zkzY-h0Z@U}0aK?;At|LIwivO ziQ!bk(wd5%S7e!(_(cV5EqQAp7(>N_@Xh?8bqFUS~Z~ySmx?yg2@>Des z*hZOq194OxB&Cnc*X~bBr}&npbrbs2AJlx(T>h%Pi_UTj=nkx)V$tPmh|aAIEJ-PoP)nZ!*@R2o057Wrsfa$tMAn}8T5B}l95ky6h1m(*R zJ%$CsJqF^Ui8k|!d>GHRZD#0T0Wh(oe~}8UBXkM>dL;;_HrnGz))PDG4sbBAiq4-* zV)(A}ve01a8Y^AziUwmZu18VhaAS3L z;(wp0;eVLrCx7bHKXu>k-|qfDT$v3g1YTB zsFQk0zw=GtcKxzO{6QJP$&-DcLg`_p@G=n)^gk)zO_v0}JV*JG66mHheW;_kr{yM( zTKtxP4{P~|8E}-?miT>WkVQQ3AL+bTB}0@qQczn~9#|8$%st$d8v*OHu&w;TtX0Ei zKFb(CG10+J?;Py}7)iPy0ouO+Udq9bSGcoITa8*Zhn^09n!JEQj9^4q0nfYd1|8yy_ zB0J9U2lZMRW_BKLBC&3Ird2 zET&;@nsfXV<$2Tgq)qGh+{N1iqCzzy88Zaa-W7ye3-AYCw>-h_PQ!0Np!i7|P>j2F zE@l2Pp}`*!1pLu=|BJo%4r^*%zeQ0|6p$=bdWnhxQWXWHNbHD+2uLqc0qG#pBE&>c ziWCtL5K*H70#YJfT0%!aM0yhvf=Z1f2w_5!>HS>${Lb0?oc-MMyZ78b?!C`>p0$=s zn3lj7$9zL@6;!stXzGQONDIY9%yUvwvB??y|vE|G-=HurdwswLKAgbA~#l-0w;B$ zKN2|-!a!)sWLCj#<4CUDo=x1Ef6ycBCNO$|&=&C+2aI|yk?(2aWeia0X`Q%BeN;Jm zXyj)Dn>T3gjvCNWlf2NbZ^=H3>zU-O@j^;t7MjP7^)h-Xp20(lA69o=$hKZD;&_2` zrGuI3hNo6I?IT4`S!HQA^+alX>}~hEE@yZ0%Z8B#%|5CfJq6s_p3ET`r;Y1T2DL1n-3n!!%x-WulH=IOl}26197Hcg#cLMPM`$f5ab?J-*%C{iy%pd@J~74BSjuv8@vTo;soguox|H0OFw` z6Gs=|XwW&9qDA3&CW4qFEA%(d>**HsT6Y!juFm@V{HXtZ_W%84{@a)#{O88Z9_Iha zh+I`cS6MF_>Wm}WP+I`*acj}y18IKN>(%%c6tZlXi;xA8D$*ZheS}PWc!dt0mB zvJ`1my7grzY*=4rb60~>UvUHV@bd9*tRfF*#qKc;eedD$vbvg-qKD0-bAxt9q2qoZ zL1T7FZdI=3Z%Hc8}W{I_@ZVb)vYxZeZEb*ZA9~OOpZD({xw0h>bY+J9P`+qrALuIS1u6(yQRT(=nfrU1nNXCm&i=9# z(`4JdT9xksGAy)xEd8`fa8o?n`V)YRJA2wqH`D-kqHYDd*@5BJs&g1iOu#1d+xd!~ z6KnV%cZbAU;z8}|>6enJK^@!kayoYIvO>F+qV~Y5_(myxK4uq79b^K|Fn6~I!fMc6 z*MpS=nmy*dO=oL{la`8|yx#5F(j$!`40X+b=x6?;dgfS?o~dnTWjpC%Ok~t&P3bblzj{ytsZ{(Y&y5Jh|@1xB{I}i_ceF94^W`M8F=+M?UIBc~Mo7@b+-_ zHK=FG2IQodP1C=^Lj9_CGU|qvkH9}R>0Ig3x~I#JrxkgQQ-#s}^&*y~mt<^zb;_t_ zdDwav#xorJo?mxo^9NZtd^Ub|B*i?mGV}5lk7#8Kg13e_6)Hb9nm=cDg(5k8ZZ`*l5wssCoJIYk`WHv26FdjtFc+z{|mn6X zr>Z^M(=Yz ztKvSdC7r!)beT4~-B@^di1hkREIsG)u_?)0qsr<@(>F>qkBHGrz0^z)~`N}VXiEuwQj>BGIJP84wZ^p8*z$5LQ{9OiExGj1u$$Hez1 zrSZo1>HcFQHrY-WwMAJ4ea9c)`n>Ij<_o)$Tv-qA&^rdEv7eYnnyyXVAy=y{l^U){ zu}|Y0XHnZ#^vzI_C>P`dJ7RpY)_!H~WxukAC#U1wQ@OJqS@HLM^l2W~edddX!8bHa>%*=FSAI!& z-M?&f z3}wZ;MjI9gBEUN6SM|DP7F?qGH(juO|1%3(g7<^PAfgK2TL$Xn|Ij76!arD@FQAk+ zd&&QWQF6DvNd)9^@4w@J|9Yh9->eASH{hQ5|Dc{bfuiwm*dqW7TYUke5bXYsL(2hw z0wr0%BKTkU-#-Wx;(wrzWJnqR&qUJ<&{;4N8ts9-?tdk;@{NRX*s9 zg4n`n%++0XEPV+1)?|{xqrjp6L{KSn4HEAnphV zuN{n^n)bW7WsICa0D!bitikWm81GAIh%gnm^P(6ppSz{&>9B%4Ug42gjt03G?@5IA zw$p{*!bA8{p^6YZRVwaDoJGI=q!tGnuvEiEjT{cjy*7r?>cm*_?FN-n*N&kzk^+V%uH6t;WI=>SQP_9lw^Gh&XS8mj|1NkKcVY z{Zs6-*i%q`9^%-j`+{E`>)E`kH}_(e^%9CBYDiOrz;NcBhvIMtmMIoxCWr2tj=#9x zz>y@M0Q>zIF&aNYF0)P`9n{C@{;ZODQ>hy#Ue?ZqiBH>cYsZ@{;H%%cciZPB#MtWF ze%QRMaO3y-?zXOlHv&lzcXie={}FonU-|RjB;ow~W(+5hL<-{^a{VlR@%wL{8{48m znnDLUPh=fL8+gI=e_{xKMzi+1<2Np)ZiN3&-Pr$y%>M5GUk?9lf%?JB1QR>U;3#zN zk4nNYC5DF(`5uxKUyPO6YaKK>cK8&c>*KHI_wHVmL6gpRBh53L5(|bG z9Wm8KZb2sAxK-*8!}C`cZl=oJQRqOb?han9aRPZ>*0H$7zjIcelwUcbn3Mglaj{2= zd#)V*BUfa_Zr!t8nTdw^=hz4jw1|P&1g;+`Kqoa%CvIzJ&AC>tAE+(A{{x%a_-p@` zP;IB}OF-?hV|^aKQkrf`N^p$|bpLif> zzlP!(meI}mjt(C4$Czq_-4{Eg-QI38~_V1 z64KL#cLXpg(~k&@>jX5TCA53Y*2Z(J;*H_WzK_AV&n!op#Hm|8c1Sc3+li2#GtA#O zWkxfRV##s|`sZjeTWE;p30HOODb+l!v}AjVP5ZIUI?0zhB2#bk-`j~z0*iVA+<`A! zTby8)QL7S2ypU6KQwo%RVSv6g-M`Ns^IbRa{T(&S!`hpUIIK3vrmhk=GU-!({1bmf z^_xp^gwfz6D5XXFFN3VS?2A>}D5gr>W+NU@PVB4@e}(U-7)NRqjL-mXCfSQJGII(- zI=`E5Z>4O*?#?1ec`aTSN>tla(a#_g`Bu9gfkt=Zk3~R_y;p?01&^&AhI^0<54UED1a?=S&x)gSJ8m~4 zIHh>r4r{!%r8519O7eQCqF~c$lD*5a^rO{3^N=@VnC#WF9c~8Q+ad_>e@22-WRiON zl}Br>>GLihNa!O|?%}`EyZ)jt4=93eT3`4!ba5@@s>DTrDYx!`^bl=JqaiJ33(wA= zQv3xFYCk`N(Wo*LeV}dfMa--T?i4&sVFVI5yAiDOP|sb47R6gLp_|`lfPImhB*OLRdl0))BtW5CexyFmyz(J z69E_FK9!7@p1(FrJj^Hia&Q>m_ySc5cHKtyIqVr|kf}V~_AnPJXdT<{(Q7k4pHv8+ ztDkz_t&}x$a1>ZJim$yw%-x@w;YfomowpJqGOHR0Jh03>Ztq}FPs~vd)>zn?w~1xV z{&|Pb#!h{D)i&F{i>Hv!I~KghF(fRs6}*=aTqmUx5Yk0WX0!RwF|^3sY56)*atFTj zwUN&`>PnimLuZ zZrPpd5ii^Bp(+zcYGI)r+z7JoPCU&8FI2p=f<|Cf`co_igStI6E6TmBZkjCoIx_!U zD0lCsm&(*c5Ibm`0umpRAF7;bhDG~0mNY44L&c5Aie$qbTM|A>EL-;r5n&>X~TaAe(@ z0ah8_U~r}1AY+yh_%jBgu)?`SjyY4kVYkj$k-vO8rH=VX7;s-wIKs=3ejdWdAVuZu15&PYS?`Qy_SSSC;oKXQ~H zHzpq}V9T^_da3)xTl22Q@R?ydDf>Dce5n7TQ-trdZS;wwu}~uGPk7}8nVNyF%x$$t zM1&gTzQgFX=$4aABD`1&TPs?kADnxtbA`@PY6x{ZPNV^|*quzov~}webbO)h$Qi~Y zDqfcmsc#yPGm8@(5a&Mo*>f==jhA1-;N00=blxqoKLMi20hm*PBO=9`VMafI56luu zp5ys~5_j&4qdQ5D`mtVRAG5#Tlh7-?{C;Nn6=LhI`gPjmuo#E0|ky?xz3GidE}H<98f$0EJ!DqsEABB+NeoKZ*Q5NgM=r~kG>T|#V@m}_(k{;7;W3oeBn_JrXWB3(e zbPm3Rlq+As-p5fIlGl5=)VLx2>8|nUX)jt2bjUZte4$zPP>l0tuV4d?25h4lEonxw zn{UTl)Ig`t0ZGASZA4zv0tAg(&{7{9WX;I}X99 zii*<;&mA94KTr~TID6(R|LnrAEY<+`HBJ>P%2Z3qf%e;6D-I;j=FR4;Rjas?4fS)^F!px$TX2%^hv|=ImsF=SOv~~# z$xaVkvY({2JJ@*~16O&a>Hn))!1;K*TaJxM{=9~Y7OS_swDwkBjD``5$9mlZI4z5D z0IA;!##h9Lg*D0VU)!hDo9}!pE2*?HxKdtu_qkB!PidDV&T-<(S#Zo7?8N~C1en-* z@DYw-U-okGu^s_*>EY>Z7dT|@Gjfj?yj_)AYof;rV2UMXXge<0)yjRuF>S=edj^K( zwF+|2mETR@6f*@luIG3tAz<20EK>T)VzvaE%j*>*5>+ zk24;g3`PNW!f-28SylL|v!C|;qVff@_!+UW7)Sm3mm|X38l{DMkHy!?zBuZ%7$Dx) z6c(Ca>+z@T1U_-chpd0!Z5T^^uIv^|erNTtF z$MnqiQM&`4Ro9&_-``AGk~Iy5-StfipcE(O*2xkY`{BFZacF~8Q!DZtqx z;jOJ~;k)EXZK|>QGBICY25i}%u!rtDW7cQ}`L_%$OR8cuQEMa4XnXGK%pM41iL?eQ zE6xxm-ohcd)5o5G(T{+3B5y+lu)qX(cyvbFQrW_@mjzY1Bi_a{Fx!Rryv`Up}j3u2XP#Akhj zc=y5r;zWrdJoN`5PSBJiN7-5aK!()oe-uBfQo)r3J##K2MvR*`;Y$oeFu|M2Eh=9K zL-mAn0ED>_gzgz{{WtazFC@o_O*s< z9ozOo7FFx@7i?@-g0W~~_zPuv5`H$_->`t&KgmMF%V@?K?(L~r5F z$<%T(!>XXHrl;vs7Wc&_ptX7opc(53NJR)7(sWdd!WIKqHNl$op+@V`{T&|LH_nr% zKo-o+xsmqK>2~&gjw0OLhX3MCCO*Xzbu`8gOaUkD_c3_2#iAJXHV|xri3P+`RskZfPqjDP@zX(Hl!iZAnT~7saa8C!TN` z;~qwVxXmgttL`9pd(ybpXJty5g7eI6DYfj!2OE%}2rCJb4)1uu=7lQk*}yD2dUPD> z>osft5^w!kRu!xee*m41zKxW@&h*R9Uq&X4d5{s$%~r8LkvrXu2knnpYr?c6|u{zvLJe8&{0Cy7v)0|&H zw@(-t``?DWGd}cpPrD{M=rQT&`czEE(6?Mk){n}{5w29zCm}Sln6|va&3qQ`KvJ^= z@`yIH7ZkVlpd9$^{#5uJQv9p@Z&+e25&Gqmep2Z_>ez56nwQg3;I%N1Q({NghvwO*& z&_tH3NZjP4wR2#XVHNz$UK{mFldXr2F_7S!BGYHR#l9J7-i%>M`JOk0BLO~MelDgU zeelvzkSiJR#58H@?T~CBdL#t|N3BPYXTgTbgVzQt&TYSz##gvcPP-siANkQBuD6L& zXrq^u_AFa@3BYq?vngbSXI!RivNu7d`WG4+In*ER6V=`yI6m^ zhPskQqf0I%N{@0~cPBZBQ3apsaIn%f} z1%q?nsYr&5fgHM06G~#H!&d1~7ub1+L9EC(itMv66M&vpKeG|gxC)f^+rN2;$sk5~ z15`)I;KnGGruTqa3}S?a?UPC|6tp9GbY)@t$DR5kJ=CmfqQPIcJ&zd9*?jepKDdK+ zWY_lxmg9?wCGNx@_Z!JFcq&*6%1JMN^9VRk1rvO-+;{)5S9B7m*~-amUfTLR?D@_5 zHP)YX<~7!K5Z28FZQF0U&t+rYc#yh$f|Kq@6(c4dokc3{^5 zJIt+K*e@6Obry4vp^Wntv$xu~hK600q4k22k@8fg0)2^XKZ$EYZM_baxMs_jE_?np zdPnHTk%wobzlHh`@em(V>yN+g*1981CoeM>cwhf3(&0m2{LSUW5C_%de#Kn*8X^gev|1O#w_H6O2B#$_f-vCN(Ur= z_|<@O1-JbPkN1ROeF(O0wKWuYWGF> zS#Kxon&HInn`%F0+}3935Ep}E%O1ccWa8f-0A&PLO;DGaGtFn5_01-A z?Deu_2&1`ipSFBok0`C|JN%(tlPlkQ64{;(?3&>1Q{XhxKy`NRiA6C?)gh&}TDLsT zp+W2k*KCZzSQF$#O7-`W`&iogwBJ4Q!KHiOWNQ6mIqQ%!fQlfz0{WX$GdI*~**d_* zN;oi+5*=ExSZen*JnMzbJ6L!=P!x!wSJBEg7-hH`&Kh>V@C+iEZi?yT6g)f*0R?|EH?*0B~;~FD;hd(eGyz> z8e|!WfUMF&haTehrE!1&m3rNE&n_B5F@EVxx!*VIg%Hke3&1bgg80TG91DNunJLEv zN({&Ec7hn^faLhY;x`G9YYZao1f}Jw%P{>e56%$Z=tHJHx9j1EAX!3#01*y7!>H08&aN3)=mvC`t^O6?s2$iN0$Ddu zb`2qGT;W@m0?%y}j4W}yuPJm;p$!$Q zyGGX(y)Pd(eL}Zsvd>5PinjTBrN*b`+u|vk($G#v&}k$PJ-HtUrcXJtl#kv|>lrhI zchpbINPZr-tEp#;z6sjm7N|mW`6amfyz{M-P2XmCiQsXa=~UbTErG{Yfo~+?%V*kv z>g?Q1VVDsih1@t*ePw2BpR!Jn*PS0)y*+vMEyBgGUt9%!t-SIb48mN7+bYS7^Xia# z6=zS#v|E%KFiA%5asGl8n5u3}JE6pRm>nng$>vSC^Oe4>TRu6r=iq&jk{b?*w&bLWFE<5vz_`Qm%-GqoEFl-VN zLP6!Bdi)r7@&(mrSHK?8FSo=(4hdewnMF3c5~40Jm=0x#^_MxgqM%-Q8MQqO=!3>W-oGrrx)dj?%YCY{N*FWfyeoz7 zy_7bbOvz39aSkz&TIKwXJKguf?yoB|xynWH8u4ryedn$+eeE)Wy2caqG0D}^GRMSv zcVd@1Ak&S`co5#%1idsBFe!5K^b?Fyc-b`EeNTkS8Bdp+Ri-b6gB~-DlriHb=QcE7 z#nT}BlffSgmb-R`>IR{|+g^;kP|&GmSn{B!bkWi}y7|VwL&dR1W#hBN&+J4<3BBlb zV{(6@8FTlz(NwgsW4p>h+w89v-e*ucAL)JB+?QY?yd|O#7$d7!!Ph#!(?Mc;!=_61)>3O=QvRBZOM-jQX%pmU#`H&_co+?X=ny)aq zu<-pt^h9KUl;ilASQW3dTQJohvL3gi&~+I23(x@ooK#=F3PKAQ?)o&W8+zyal{F`NUp(mOH1@!y}Uh|hX8loKH(g~in3aR?0XvjarmMUJYJah^k~=cOZWD!)d37BZb! zeQox#ApOS7i|@=x-DKiciQLYgg?*EgFT#+oSFD%A|5o~e%0Z=!%e9d-pbA>TWza}Z zRmtOs!JOr#WX{t>lsYmMZ>4+lwNJrZB`NKlRN)Ov+TrEwJ~zIdOf4y$ zETt#M{CzY7C(<6y^^Tuj75NkFyN37OsfxYxnxhJ#XeZ8G9@1CNEA=w+s}X6m-0OnO zypOH`MXXD769$p>2J)@RZI&7(UVbQ2k@L$0nRfQak(c$MvK~DGiv5j@meDILl5a130$jc(oZm@=Jj>q-9+~z;tVgm9a`mC2Ka9hE^ZlEmt z$!-VV6a(BDj#UhY!}^FvAkbs9=muH;NL-2H-nXe*)MmIQ;x|Jff7}GDALAPh%X=y~ z`}ci7v_;=X%8OEEKUZZQc}2H^Yc(9Ytz#%_KAjENWFLu ze!7Cy?UQ7jsn$&XX}GM}nS+%P;|{KaEDfiU$v;Hy|L(sc%V@eK`?wSQr6xGRD`&D5 zG)?N-=W)QCav=Qn;UfT`+F3bO6nQvF_vH?2R;P{i4Qh8O-yJlYtUfYw@%Xb{Susx2 zl(%DWw1G0>a>XI@BF6LFrXD^g>Jxks<8v#g$48;_p5;?bzA?DghLjS!(M5%i+y`UdXK*YiJa z5u48jGEB!~tP6N?QZ1;BQ<``s3d1FllYwkQu>WK__ib0+$miDLzkM{&z>g%*klek` z$fsB)PZl2*6a5222(s&B`C z13S&aSMqTJvE1w&c8aUN$NHN#XOA|_@2I}mMYP8vAoaHiVeMnd@-5>(41Ipw$~e`f zvq*k{H40E7ityCUn5OYUUx7kq56hDxxzZ`!0 z4)g4<7PF8C$zHvSy0MD+y1Z{^Db<_4Oa+8r&OIMn<*?2*(H}1Kak<$*gDfVV2Fr3;+L&v8L8Rr>< z-yBxjEH8CAS!4Vce@EOZXjY>64|+ zhxwIdEp6#&*D@m{|jOPJ!s02I;S}B2Ze7Y^=-*78_L?dyxrvHy8k*qY)z(?P?i&spm2QH zQQ>?x;Yh~TK-sV(&Ya~PQH;?iVDxgNP zT1h|m{R;#z)8_js^5xZ~Tjal2A z96ROOUGbNz+%Iq&ieZi#X=ZU5@lHzIr}_cci>pued$hJ_vt~XdPrRR&*}+4s=fQ77 zGOXmtuuzSQZ``)TM2^A`+3`fdvNFtH5AMf_%)Qn7n+J?uZV-{sO^!pJ*)VM+tnUHy zo=5~{KljSb(7a$PGQ@i>B#uh4e8^1F{X{z5s5L4*>2b;hoyUo1&DLpk^S1($<4p$w z1cRhSCEu#mmhUZw(L>P#I9-Us2%Cy;8m5UoET53Q*(mJjkbT>k+Z2%4R8=R%L{6=> z7@x0a$W}QQwmkjnJ!4ck^EP;QVjKCn=?Deh;i^r)Lw!p2AdaoI=%&&s4?AiqDvh*i zPfy5vUDgh|@`1-%3Y2>RcX!Z-0WDsq#|&X9XIc-f6g)Ia>pbl0{s}W7{Tkywae0%R z=J=|eHOmvX_BOidH&0P@i=v@MK{~J-s`XJGFbgwIql9zK%$#HjICmK8_i%!v9A~H& z^HRDb>_YByM^1dQZiKVIKqZEMD^>=ORS!@KVBF%|JYzMWI`G~sNHxPAhxtrxnPwDU zxRS3Pr7%qU#i`mpC4G7S;oEldeH_G)l5#u8lsP^X9Z5nsVlmYNeG5&jv?h8n9UuC3 z|H;m2qFF1}lx7MklNXJr$h035dzOu96{j-cUlnFH)jw>LI3X zNxf-&L1aosr@|ww*6|K9caL=&avMnVAdeEJtD1}aXy|*I>B;@gk2ODM`5kj|{3NXS zc^a83eQlVk{Q$H_a6-HXBk!>%UR)Eu$J2)~bX_}XoA|4*uhf4Agm?QG?=W}h94eW< zS?891Eh~yp9-n1pB%fWFZX2qa!F4f1>tmW(d5{bPjB|Q=J6h7qY>p&5u-Ay5{q%;> znfSc3^Y${)7uN-*dN`s2EO{tp+8rx!y>QZ_|Le#7Q8akGqw=Ocg5ckFw?X8I`&G|* zUAf^Fw^3i&>_b*9r!`UwpC{L|Kt37njTu@A?j=;3HX=uFv>OkSRKI?0bS;=!92Wf| zLA#;nYZ2ZqH&%le#SIX6^~D$ls4)fmp*Lv$ZhjMyK5W}5g$YY1I^AM1$Aw;9+kM#o z#YNx3tFMR$gMJn-l^WcCLzY2#szZmO7uf}H$5?wz-R}H(s{qWSNm~2)YOQYJWZGek zt26g>Z`KLG+nH>CjG+U=v8uuE;j>j%&a{u|m6yi;w%*F3i@pt3yz z>QRIHveHjyqxF5Wvx{&^t9KEz`(xXfLbbUV^r?ELd~vJlG3daDjC&ozEwzcrQ|Qh| zVD4?*&%gqdX0C^h7sVYKXm7r{gCuzP@@~F0op!rt;vSY{^)tCK7=pl)u&ps(YsqF4 z%4(%6j;0H;u~P;T*z*i>D}`qcy{k?{&Lqd{FM&9>ppjOD^rH#o+Cs_7=r8^G60Mho z=89BDK|-@PYZesgXD)tgKOSlbH0~;;C!acp{p!`b8v9WUhdmt zwXX+@0Nh{>YqE`;_dV;BFlFv1e6sFf4A&-pI6pY6@7|0em9+fW@&$`TSEsp1VW{{E4GmZY5fR_Lf(cI>QGc;?u<8kM zq4lT#ehc`AtTmQX#h8$K9DGYU)cD~14zl2VR{Abz#llAngfGh z4zGgB-^|y<@zC|J{SZYCNdEw_3U|RIW02k(W4*ip#=d_V`XQQe1r0bNU(k?ipShHQ zIPEAapM>xAM-N!yQ51$9ew5HY)-ox|^*%6t%kx9x25qGc zMoRBys%UDGMGk4WPHnR%2M^S=p1}8f!>!G3oX>pap*1F($Ui=2+w!=1m!Q&qLB%a2 z{OfTv>qRqQ$YldnI5XSZ>=ZI-K6)L?esYi7nm_8CKE2wCHDgG@0_yBTP}kIpm$^)P zD?cG9Vsfkfxb2Ob2qnSY5@r$N%Y2}D%Zo9-R>DJ2N(3yaSxGcxBqHAf-o*I&SYqS? z^Q@0gX28)%y^v0f*|_K^x}RX zyCcVttY_TvBJwr-D8(kFdB`fq+M?#c;?27ewh|e4Mx`<9h89l7Y~w!-MJqsVWNu?6 zVDp4V;=`Jk)!Wkb(PP1Dms)U)$j^NjOLqe)EgKrADHA{wJbo%T5YeK1h&(0CyzJn( zKTSZUWxvE>e7%HJ?1pvS-wsGw$g=hp%4qQtmDo^p6SZf-Ny+`%xO=>n_}bm^+C*uYu7m|8>dA)v;aT^6jg`dU3Jn=5Gmu{VO`)k+{*_XyiEH z1BQqO{aFxq{a;fSLbE+Est1lFsrYx6s#!zEYA07(Z$HB?BH0$WK4jTW0$qYBKzo$q zN5nhl{7BDfSWKJa72fpM?QEIP1s{^W4kzPA$$Z#z4B%TS$0%#WsXiE=zwLFJMH5|+ za%fjpBAnjsYv_#rf^49yAE5{U|3iWg;N1~r&$+3Zi@$j?xb3gQyI%Ta3`Kc#2SsOp z^`%um4=Jt8;{}!ai5ONfYUxZw)pzyGGlM`I@nMbNkw( z!?U7VRH8+m8q+N04(VtzC`nH96LqW;XIRf5F4LOT&#kCHj-nc6kH<}|Md+(IsxLWB z`Na1K@#gxwehv96CSzKHC<=wY<6Ht@z?Ngq7=8rr!CEBJkPm_~;GR624otdgyNZm+ zxP$}!dc3fcGZO(uOGDy)B*UB_h6!K|_ZN#bPAXJalS)Ym6T&;E*YoASe|uMHc8^U; z{mKa}0VlS@gtX}*ntMj8n#9n%DtvBS-z}gwmv+4CnxX_xne@iBO_t)Vw_DYNCsIkj z(s!eU&1n8X@0!H(20gURJ^jlr9!WSTh8Z_KN+&{x)*=j!d2Aq~$$oyL(ddD!k|+T^ z`subq_MOYyZTEc;o)PB>-`jpU%o=n>;eu~>HwD5|D{2#G8^k+|6Dg0{{xfJiE<|zH zts}=G(_!25s6pcGkrg616ckrh+`#gKQa~^^;;40->a1xyHHI%H5AB|*NKH?BAJaJR zbjjP{S3Kb&XPiNd!0CZkCT*aPIZ8{9>!-s=re*xC`L6BjJlJDHEx;P<0?Eqa%k-D4 z*QY~0%Bw5BpHP!)(Y=#%G4P$qb#&Rru-cc@jT3_~$UH%U?3>2xco(mwNHkv{{-Iq+ zT!r$4e9MUYLXJaqvR0DS>0v=rTk&ttxPKxp=D1I!eULFD?jlt74kEGbE16Ew?hKDA zzBSR4Q%qz(=q8X33ml~-AZVaB;+hfkkZLr1rFSXFSm0yv{_offk%*MJ4xUHtu^TDy zjtusBjx1Ef*2Wl4RYhpVENM~%XM=k^gRco6tWI3ResktsX`FNT^?Xt!lwg#8O=I2x zZIRZwlBF2dWA(Fq*iqNJMs6N4cAq*~!u&-gEFN+0aX8Fy z%pDnC&Ic56iJM$!%B*EQVb0tSOq80Jl}TwTt2GUB_dhrryU?}oH_v377hB=EXym;N|)H)~nT|!$y#8s*|UlpCi)??1V{MuB)1C)p#EQC4QR6~9gRJ3bCts=1Rib%=B zDBSH+U0e6kW#5qAa|yT;oDIya*f2=1#YT4VIqcQMOrIhJ0c2N9?Pj@;kk!wWlBP zg>nH~j_$-a_Sz9P2to^8grAV98{AKPOQsI{{T+1$l2t$(Y9raB0*+5*9fP;d;)QOK zmw?*Ys@_fb0YpiJIS3x*1))}Tkq^^R6AUVvo16$?@EjRG@ILeVuntGL6C!{J>Y{8G6VP_ zQ>Mw)jd{^|)Cw?$eTAnoRE=Q+Kiq-%-#qxM_wa|nC-T5206~g63BKb)g*vo8P#1cX z%e}FW^D-Tl;R3*M-4oCJRRFcPu{UUm-v(gD1`XmQS0}X&ri6=tcW-&fm5k)NdxH zumRSl8AV}UZC>XG<=&o(TR7EN5Xjgz3+8|0GrtExXkiSob*MXc#J0yju!z<%ez+y_ zxzBh{CqianU2VWeO+|CZqMuHhgrxS;uS1Q*wNmKC)WVV|N0W(&Jl61b#-d-1zF+WG z?T?2^x5EUV!Mb?Rae(2DXh@7U05W7`75lE2T)S?0IwAlWy=WA7DK5ZQK>6-m$(Kia zHm>k7a@k;SY}EeEu0WACh zK@hhCGUx{@ZIj8i_U0PZ*zMG^_mS5gTAt_Q=lkKco&w%e8*uLyrLa!Sk~idk-a{$< z>AAS)%!*J^nR18P6p!V3r}3QKwboEL8`X^$iOMf%%rez%LAg0gbR-!n+`p&%0~tF^W>}&i#i$Gq1|H3D zXZS_kOf^~jM7pG6Q5X4WneWQ08)VB0Jfy%Ep2NM5SPaN^@J>74+KE2AkdMghmitm7 zCll##$l?v{$13@9%=gwGcV6Bx6*;4pJr&I|H~Orm@no`7yE1M0I2t4lfL_h|bL5hx zsEWG*&6Sefpe0_R=ra7{$HCO+-JHEIW3$3zIjh{n@~hl5u-_JwEIf0nRJ z^84X7&``&980C!~Bd!^!4)eYd&}gW0y0bpYY&g!N^eDBke$d<^*@M0zD=+%oKcMsd zh_rORchsG^2dzLO;s38~munG@<AI|#* z?;YoLxfWYX0|GCg4%VDKD3)?P!>xTu(2(jUKD(y4p`mL?ebYyutI5V$6m75ZF`V!)oV;jN>gN??unPkD)zsl=vYP<6Kr!H=e%L zi$^?)JW=b9?gKT2U977>K^tZn_XWP#p~5jj#QIFv4-}8`J`9kHr#Hgl@T~0MI-9*8 z->6vO7ef)$_nIb;@BeJW@t9WER2`y5J?nV*F*D*kZSsX!>?#U6I1Jv0H+eLq?OFTc zcV=``RX-&6Idx^iVUPR1pc5pYCLKQSTkb6b7P%b~UNW5KNR3WNx=*~$N%)=SK&s|V zZoho`_Kb)}*sGu&d6u2yh|S%+@$s%*<^Cd>TRfA(9}RNP zwF+K2`~LK6VIE7f2A;spJcm~ zB&BlI-P6U^LN#-S3dE9v4jd`({mpaxbk=Q{FZO%8-iMA8Dv3pmjm+0k#0U9CP1&9o z*W!$F6}sHKG=0aBpHUn!=LT4)mxx1R?i=;1(w8bsOyFUC|Nn!!FOP@vZTnU#A|(4d z6-5+nBHOefi3uUZB$b$$WXU>L5h6?~6fxOLF)fy|P1dngwh&@0*|!7; z`+1-H_j{k`x!?DG|9Ia&q|eN0=De=!IM3sIevjk$9%24(d1wlG1Tq{2;LBPX$uuK< zlJbG4`hJ-~?xc8UHcm$`p?bG4t6P8eCM3OL4^{+Ag?)vSj>yCI_VAt@IcIbF$b#vv zq^*kAyZuz3bBD(2sZK&a)Q6O2G=?c*#Xw_$8X9p=-P;L&^-9X@f?tJBvL)8+MmQ*A8?nmU*-p$Xo0t_pDZnKgw;hwj&s|(68FX z-wF`P)7%r-Q{I=QC)1(tZX#_ny*LQIvN}^47=?A5J}DFk)UL7==u-LGcaw!s7aW ztA+YM`d!8vs*BYjhK>%!0)}RkuHJ6nrY+mlvz@Q;tVD5q-^6GhYgzR0PS)D@et*!*4lVXZ zTvu}3$otQihQpqG$|EH7{TyoAkJ%g%!=u`}799?tz5xIJ5e5JMCp@gqmY^c%23aQn zoXSBX5ps|OL_ayVazIC&CE$i$-PXkf5Psu45O2^#S}4ptz&z8a*xE7nRu*#BtO=>M zBZ5|WqobtC71Are(=51%^ zzqP726b)RyZagmOb8(6MT}TwFyo$cur#e`J>~Hb;gfCQAEYQ9Xmf)qPWsPh-^nJ8^ zuhpLKH{&&Yt`KZ0SBm&ma-u7*S&3=4zB;P!e)MsVNks}Vr;LZS1E{8afTDe9M8|cl zX34qD8^0_+Qp_uC{h$z2&ZDQN3JM6kQ3XH)ls1fx(r5^B@gG#@y1wnHQ&dBg(?!xg zu8ZqO3`ddE6`i+7gYP%jcy5=vC_8HgJf1jxr z8%5Ji=6dWav2XD;>T<|eY{9RbYIlvv2CNv+8oNV;KV{`#ETvuZVCGG%M4=zXkI={!e zHZpp@q2!I3xP!b3A*wip?f9|e)lyDDLH!~YR?Vp_ zmsWUE5MdC8DY$izIAT|clQIqbnGft{c-YM@Ty^u^Jf*Oh02Kk_QtYsK;uT~#g!2*Y zi~*FV2JCf}W#NhzTZ(b9D=xdMv3~x&O2dKA*FS6ZmPQM=m0L20=;RoRYcs6W9FD%B z=Ugk{Yl=&&DK0sG(Z2u8wfUxxOO2xylVHA}_OKxw2Qh`)p~Zl%FRA<9msWt6w@$S7 z7+yIr-lVWmPFU^vzBOFriaVOJiGcGKTXJ+IqILuS?o(iu!ne% z&Gz(w<$>nuSyIvM`$1U9Grecx_H2(Zdc5`ihlO=hwL$Ilgz6!PEjo;42-P;n&?JWG zLJ@hhF%`+#27z*=caCx?XtQ&9H@;htUVV*h<}g$Tu~ZDK24erwHY~AgHPhz!x zNq0HAJqjeNrAEXnnM`(DZ+nulXW8 ziw@&ZEyHiPDrs=*gk7}PgP2EW(LROMM>K`sCF;uKQsCtw3K3W*rB}asD7Y@?m0!eQ zM5N+l@rA~=T%#Rd9t!;AAV+0%=_*UF>c)%$ZR%u3HzO;u`HHW*yJywm2-=2gqTjQ#=`S2%ja( z)(yZ@RHTN3(McHh;+sOz`w;7>qh*w(bC)h{BP{077s(GO9uxrkXhM(&iI=VOUDpN5 zz35jmxo4o_DZ;FAxcE>UePsq~3sq_pbl?xnEu2gZ;tw=4%oCS|?SGuzM`@D2Xk6nV z#9l}_CjUh*21U1#3#c9hTKA$NgX%SC5sy&gcTt`G*Xj(L0#lla4lkl!o-rpkp~8D) zfAmORG!hJU+1guU_?}tzHAm{G?duyE_=sW?R5|wja-{$kRzmT7@EK7n8BPuElu$0LH(n!wmOQf(y_Diuz%+DN2A+MBoOBWkLlWc#nLTIRP#0uD&#xItAD#=7 z(m8hpIcAe+c+2W>;7<--yVA++m)J{-&PUlRH$Y6sQJdzF=zwVixrG29PLI64;Ky^8 z^NL({HBdaxWp@Vm_?4mNdp%dsXJ^@}(?`HMD`TUOPIm|0J^ARnkA8EpwBVPg^s7f_ zyPrGj=m|nq6g1?7=`k71gy?5!GHk9^jd&vURhD1KXqVZ#vQBfiD}tF@E^X9f*;Kf8 z>H~@*V(V6M^$&nRKznW1g{VJDf^#@?`?=73GkF_TV!k`vD7hXaZtxT%6n`!w-YVMv z`k4ZxV@%$v;q`LR49*pYDqHnUyZYLd?pAW;wO-*D++GVMmGNN}qjz#DBS0EY#3Z*F z6tf>mwCiJr4)&pSN@ljc#ISFREeRF+-y*7B=LGU#dz~ga3qPf&lscfELi7vMP<2@l z;C~#vNgOSEn^Da49Mge`0n9)^H{+lbO{6snXHAqulG+Kb7 zN%J~@Wpc~_q0h63Hf3;*LibJuyjeA}V~tCz>P0hCQ1k^dtes=|o69oYFUyNV69S*r zt;#_wlh3lBf__P87*B+?fd&8=k>ywG#p#oiShXPbbRmKrYfRn+zY#A^FK;KMeKraN zE%swzM=bsJwHze7cZhF{^|pr6Nrca$0sn^KoCz`pU+nV&3z`}H+i>i3Gpy@uIV29J ze?n&ZN^`xN9{C)J}{Ff$~qlq=n{d0EVwK$c{2Mm5Y9C4qnXx zuV$~k+EsSq5TN_EI=vHzfkC2b>RM?^c6e8A;sxc9f1;i=O-bpMV z@{+Rv$!ESDIYDXf`D1EiIN|{!2*kXHhie-@0nRsS6xDzO%N|z7vV|=Xer}d5@@17s zo$KYU@m(c`EV5o-SKE%+`t>C`S;swkMJ>%th|qzQ6s{Zs7V?G@roS5`aYC0kSa2%D z1iEBTmUO<#F=dtxo!VcGSDQfbH*GB!bw0cf+4p(dP1RLCU?YGwsN6;w$b|}T#}<$u zkhq4w6GX4yJo8BX0Y244R$1>-wELqQKHTdtdC)Bq-4qWW;bHP|!Z$_`2}<2UQI5Jr zy-K}Hew81JpFqw0_>%nrhpr4!GN<_Y<WWqZ)V_JTAkwg6rEf0!bnIyq)gG z;?_xdRc1UV+UhE>NJs|@)R3W(cG`!odypF{WR+S;lVm@f6b{`ma5Znt%)($*UjLa` zZTQPq9nt;utl2${IZsElqQ%ss%-cI`v)~I2oLJqH;4z6JO$6@FviO!g_x+k*B%EbsN{ ztQ*gHmrp)_Iv6d|wCLjD>43G0(0QKuNr*_4a8Bh*<>3iUs>_n>ITsXH_}Ih#R<9E@ z@P@LL#0-O3bjv}ptlHX~FkMrDtO;YIC&uNZrk^$+ct3NubWJ7J;eVi*SRgSUV?UUZ z6ZU3rdV9eqyUt0ikguX|>&`S#@^2kwlEFzaB#H1@UWt>&RvIM9v3af(Ff5dH+Ix(tmqB*oWov+D#f5{=urI~oTqB~d*IR&1szKc*H%Znf0f=hc`s@9 z`9bLmJwNhEx+Ys>st)=v#}>g-p-lzX(Z#x+L#6nFi?z6KD^0t9JbW$`Z^Iy`1MCa& zRO}8C;Kbbs;$gTLkxq6De^=jyvyUmQGTxy=urS&gwUj>59LFKIG({YgHQn2!7Oo$& zvVXqM!C;}(kmX>|Lmk0_5LzW@28UsbxeP^A&PaDlR5uCc+_+Ky#Z=Uz^gQ|OmQMWsiE~E;JxxrA?!*X7eW6+R@;Ijd zg^iKSlYHKRsIDHRBTtHnL;kpZuTQF{S@^l$YOif5c+q=)mKj5Cnn)W))2-&Ds1Trw z4-M4ir90ppMd%%QnQ`OVem^{dTFW2pUnez;$CTE(AYG`fY&q6(wiMv!xj#c6u2BYq zuTh=aeR$nJ&-?layx(qndIo!w@Du%Rc?QMEgoT(*F#1x-?V57F&dnMEin5wdpWl`D z+eMhzFt1v#zaz*47}`g|cWpUJr73D~3Kq9jjKLTBU8uTQZhWgoC3VoX?YX~G6aMzM z&O=}KTMb5iQE=4Q=a92eS|z9z*BABJaX7ZKt(Rnre1&-NXg3JneU)5A>q%$~J~G>= zntVPLIV8%gYC$HUEJ4?Vv@1Ai3AAysfp zabk(P+F`0c>e3=tm8I`}NbBkH(&zsy#6nk&8>on-@qa=`PK=EG!fw6d_tQDUL?GgE zg8Y-a3oc#<%;Ry0=GK37Xpo* zm3qcxB6=X)aUI=sY`PKADs9WV(z(4O-#M0%VR?`&yJ>P`Aby&YK>+Sp26BS71#RG@ z2Wk3)EluoXc0+bgpW{rgUozy|ZH<3=N!6x-MmGuOW=?UQu}y`h$A;%3OmTz!$=x1m zKK`DpM@IDc8)BEWhCa0s>bzG9DP9vU+IXF&54?k!ug*Ls(rKltulJ}17WJ^~>ydkE zGyCu}SKgHa<-n`TUX?t!_pUuCf8MLrKqKYu<1Oi&upsP)d3^>o(JR{@MzDbI~gd+x_7{_~x zwzA}QF(zsnGfWZ;$T#p}IGb~?CBE%FKLg8YNp&Fo zxSodg_>H?H(uowD;-jLJrSaelP`|!|>GF%l7KT8zGjL{8e@~~g0L=%$l9%{T)9DQU zLpoipdT)6Gw#UZgZomT^rsFH2Wn$d>VQ$Hdq7M*vCnJppu4AE1LRLgTdSxi8H&MxXxYewJN3nZ=5WM4_ucd4AeZ1x ziIO~z#e*6Q^|rVf5Po(sUIDNp5@i1gPGMpEWSFk>>2Od4Wq!Cd=jf;JvOBi~(I3Jqh+T#%IQgpjKY!$v-rx8BJ(Tds+*4Vx_C`U_np zHogwws}wHVfayZh|-vS}nzhg7ihMfA#18l}>h@|2I z5y1qQS_+fN46yT#D zJ}2y)v{_J`KOQ4TbZFEfztWdr%fSwGo9d=PY?z%`YS^W+73;Wr8(1J!j(jFP-)eV9 za?3#V$ORQKIXh7WDgE3%AUDaKfgV@}96^k{yuKLxI?WDO;(v7BV_0ww%8?b5V5?xk^UI=^j#K#c``W=I4+_WJkb?G;;K zkEZH}ZhMKh5?tS&X$=u2y-*Sx)1I3?$x?z7sWf_l26_F27p~v$lP1Ah_Djx=Z~G4= zt!77&T~+%>w>9m*l_PyHL;m#iC#%Ht!o+$8?E&q>awUfGmXt1_LkJcwC;t0 z>^OQMXf=dLzb97R+GLrv5|~FH^IXB0`45+0VoP%p&@Qy@S!DSfZO z4&OwBqkO@4$uE?1npN$FYzbm!BIh%{ZoFP$HJjgLD)GXmNnVR{A7qpWMamz<-*?oY ztHnck#@iE2iXUv#A)okBXBvz#P;Q_O`W~(s7{9zT?S6JHNIZV%1Eif&S-& z9#%eqUNMS5*2|r!amWsNiCY2LdZC$!_#`9(4aDlBNhbHc^h=kugSfENjvbF1mE`-~ zxdV3EyA!>@&82~~)oy)n0OCUtj;O6<$d>GLj= zN98+K&9Mp&cA1IKIOOU=XG@hTGac=lYn96&i92#SOw|0p)N_`lM(9cmqIe41{p?MP z+rKWR`Tz+f;gv_Ip&LHBT^IvV5#E>i7hZ2Cf5yXF8>`uTbh(F<{o60|a7yb{mq#fn zBv|c8L8=;~xC(o>$7XJBNx)=A{>-iFz`d%UmuS?s(*1l(LqRt8JDf2Qo;H(@0R0<> z0D}8%jR1}%5+qmT8Hx(g)y<*5y<^izqg!y?Z7_6d#JZ7RNn@y4E(%K{m_yg8Y-el3 zs^B(8be+6WA|9DDF{>xtxdGhcag7xW%h!!K1S;Yc&d_}l=kyykY@I>To_#_+6q9wa zd93+SkoN;SANik z$~0JJK~@wFS!Mtsdvdipf6t#;zgOjr#jaZ@^4Z{ z=Xe@P|4h$vN-n9l7&#j{ti-wBzXO=m=GQCMz>pO|?If}v`oWMu9s(+&C+k1h7)l!f zm}}k@5{0JC!OB{UwL=V`fqLS~VOk7@c?7Wk6O!bq*Gvkii8X=2N%V4TF<=~Sbi6vR2lfN0U*U&oV2Y_%ijWG;vjH!vXh4?_5o}9 zw<<_VF&^HR8itO91|C!7y-lw6FTfU4T+sXTpim6~Jkvl}$#Wa*yXYi0Qc{;~Y5gKC zMD^QObm#)woE~NDQUut%TWcy_-94Zi*-eCW6fNT?Qc2mStAoB*=AUgp+2h_elh8lH zogtJ)eo#PxcP&pJ$eeVfIkhY3S6Gl z=t=M*Y`}2x{TP4r zWqre8VWP(8CFif#;nz%0SYnuXe-)~olkjt-o?{$|AK^SUk3XcVZ{FyxSCkq*^Tcv2 ze(sh+;-Is!`T30cLX&vg0M!c%bCDFj3R(6w`g5Ruos-c!m83iS$k^4pG~-(ItYxgW za4q^2Y)hZ2CJCj(=IPfvix8RcO+}_hEjh9$x_2E(rS2}BRPj%F<3|7fywA+wO9%OC zpzq?mz8h@Al1ILPhZ$tA6k8z*>tI@zY=tv4#TyEX(C3a0NH@WitZhgWR&L2_NIen} zXkU^uxU*@hsrvx;yQ_7$$t0EX%(<_@W-jrUQc$l;wlV|f=U)eRz8j+OgZ4()5bP%1 z1D*qO;qo%?TMJl)dvtvnGY3QV-`2ET=}-rPB@)xXa+EAeI|*C;Fc>QI)|t=^iEsH1NX)ac0mz+{au)ZF0;mo@#$;hJ%-7H zoSp5*TGQ+UbtIjOLlh;?NA2Rv)w{#R*Or<7POs*X@jl`g6uj4;8`(TOe0NIiG2q6T zOB_?+?cVb`gGtB)ZMFp6rB)}{OQoyG8lhKE^28ebRw?qvFD=c)k}GHQ&h!}qzkypF z0o#vf!5+2_Fdqw8Ap56UClFcrNG+_(x~$+Q z7{7m_U#Y#o-`cCn>GlOpJ$;8Vmd)fg?b9KrzgdMJmyaJ9ZCZ{a7%koo6}vqD#Xv1L z3xR71p>Tok^ocLQjR;-$~Mlzq)jnf`)z;bH#j{zsX$KS3EATvEMrV(CJIKT z*&DgK+$h}{zKidE#T`MeJZS~U0w_T(qH4;)bI;Znww@kzCm7ZW{kht|y_oZ8$0 z*6^CjsiE9u&f&b>V-^nw-G}^z*c+}@RFFzdvFYghUM25dJ-zHkbva5&JcJgzN+m-0 zx2-|+?EGh_>0=q_MvBK2!-T2u4lhe+w@Zw9?qT9nQaVst* zG-eYY3>JPh%)*a8cKYMse9H1WKChr$KiG^>bP_LfdaM;h#6l<$=x*z8fX5I9bAI&+ zenz0+THh?aTJlwTicqtI-f|;N)89}99@RFijK>51AeJnn;ws#r9swYDQqmnnI zy94J&-mQ{0Lsc$9*N!%|SvuCC^k(2->Jm4Y@pn`LP+s^iG1ou+KBSDM`zx{2PlC1b z{%!^|p)mv5bg*B5l><_0BLy^(#!WH6d3F`7TLrO{4Pfc~V=E6Vz*+x`#vlJ%<#FqN zcl}>D7LS5H9CP;wiX1TCfj8(alz|sLrXNs3eyK9oJ^GC5)!uBj6(hc`>E>?5Es9hT zlv~*N!DJx`*v+S;eMx!SQ2+U(0r|W;vC6Dd^vGG4-DA$1(Hi?=88A@Xl1Fl`N=T1A8S;@p;ObZ;AZwsHwDAhWL z*5(P6cs)kx$r)ZNPPzu&At=IBa(yUErS~(PUO`^!X=H)aE&PSVQ z!Z$#yS4aXU&&^h6>9VEUyG-cK_q<{6DSn?B*^>#*eSPm|X*D%T%z#y|l|66M|C_$B zyN7pE!FE*N&mCupKHo+bx7)k|(aDi@bd_Z)mTn03 zHrG+!-k#AVIt_k3d9E)9ThDtZTq|U7^8i)FeFJkRCBt5|NzbrNm9<&?`tEa#lC6!6 z0b7kE$v=s(Y9d>>nCS%^#`~b%7$NpC^E9~KD2DP**TCaWvj-Kitw+ah3m|)Ru=fU? z<~#vyy}(*DEH}go;2gz1!in_B*80M-_c3!b(I>Op?lf(tT29tIKb$^$ptpl?abz`RQM_UTnpav>Fo5)EjIFmT zX{$;}&B(^%h$vKZoW=ug_p2xd{3AXRJeD zl9H5xbTr*YDE5FU%LVSFRyp#vWyac0=ebxvi+fwUZzhr3S4*v%t7{cSsHeTx{7m@~ zI&h81(nFf<4%=x)Ec#W2nX?*3kUdvD@}~nHI~H+omd2$ny9_%jc*X}c=6*JO^2#E# zJTq_i>X8z)`cFUs8Dn(} zKra~N!+FWLO1CMAve+Xp*;5Pc$mZgKS85s#(9pi2u_l9pNTRRTr~B+pXz7(%VB4-= zZeG|}xI#t_9B_1J&6YyA`NbAMUFMnn9IXi;*AhPzvj zfNj54X9sbP-y|kfaxN}vv5|cSp7_nBG=uxi<)jA|`|&7>JFco>!FFhJ$o1PE6|WKN zX`;FswMp2bK=IM9_=kWPnoWK2Z`JYZ=5uCxLFeb=jK@gL8Z9=lp@aPzs(gf=zXD03 zr~Mv)gZOsxWDau%nC*5LoWd__-iH=L+z>0uKm%&!AUYbw)CIAV%&kD&=+(bb4C}ye zo^KRTyT(q!)}mOVJ`|2Z0woR$A5=h0HHYz>m?HG5q3Y@+hYSp_4FO2~M@Am_df7sD z6fl)I)VwQ22AXz%2Z)CQ$56d%EQ9<>21xtA8bKAh+ph-ey}IMXP>Z{rBiFYs^ZB7*PiE99ZD-EU$37zcn2zb>N$m-Y{%AO*|KrK zl+EBAhDcotRA~lXVi}_c^;)cUcP%#Uh@5Ss&Hk*)m^=N#sS6HszMr1kep7E&>f6YZ zYv(;%ZOr>_KWHVFI$K1sv7A`S02$&V?EX)4$!1*hx)y|vvvHl9EB7KGaouG zcw))gKp<6ah~KBA#m8Rf$Itb78^#BS@MdztYeR3^-Z;+fCoUgl8>8|dARfwkQ3BhenE7TbssY``Ng= zD{az(|bVwMlYE!OBe9L;99j?SWywa-5yFi{^EO#;=FU z1RV@I>#WS9ShPnD!NLnlY;nqvM;61R1`ASroG4+fML9$0t!_ojmUg}DHmO~^`7gZH zssCn%?iNE5Dev&pEF5bOq5e0Q0OeZmHCv&OprbjiBBx2d#xrSF�_<4}2Ykh=Mgm z5Z+G=QVmTSX#3{?Sby~(Zuv%6sz{r9`;Df(8S9xBnpfXRd*bD_221&nN!;ZA?_w%$ zJoD;eE^XfqX@4BelgPBWo+q)g8YXIPPtyK-#`pj7_jyeTappt4ukCNl4fzkfPb!SI-)9NBp?{jW^R-f z{|o+E_dYEob4-0#@#}7N%Yn7DF0g03A#{`8?JAyGM=$C6#>lCXNHn)HQ~cQWi+hGQ zr_|8uiut&VY}bG~TSSIA-hy@|gGS*(?5nqmDsfv%%AH0o zES!r*(a(cKLv$3WI#r;!w2=O~LZCc#N>62jE`jN&`$bS^tgXz13088bqrOs*GTjUm z6B7W-BI+F@GgS0E&okF`p)WfHe`{b?0KYf-NQKrVnrd9?(oic%XLHq*_U;Mk!BB9h zoB?Oc5GB4P&#fuO-RCppz6aDlpF zkn34m<@&$6enKa{+bn>I3vXg?m({HP3Mhd;5CrhI;A&{V;S$#zt_oodQ7Dj+ok{=( zwAisgo>)DpC0>lp{LNK!9E43N07jtxfDz!sc-FcI0aye5foj%3kkT)8`2KSMT8`sp z{~ZCXeeXXGWuHRP0|5X6`_8`upogFX?9Q8WoQ>oXEIX33*&N%9|MNJ;R;Y3g;2P>5 zxF+}nWu9>2AK}j~0Dma|I1Ip_f0+2+@Q={{_t%K8;;v%rcB0pi+Dj6`KQpMq_yFP> zh5ng(>z>JgXCS(EhmaG5o%G4yTu=Xw^a9x6$%JGgeRe|AGxYqqLCoF!gej`yuKPXq z_Eop_3U0Bp4Y|`Qt71nhjM^#gBv=KSDbH1}M*nCU7|n6kTdr5%)@CbZ^|3iw<&81% zoF92S5aDG3YPot6%~!G87U@RQEgjCFCE?kl*7Hp#)R`dQy&9<|%TOR+MJeb1RI}f; zOQqCTU*tiMb+w6rt{Ud{9#yZN2K8O#$Q0@(R0;eQ-BciuDa?uJ9m3Z>5OE!*L|GW+ z_8X`S;c(k2 ziU4s?aw)&{NQGoqko&;}pM!QawCTEow0zaY(TV_0g03(q#uNHn&SMmR#Pl%QEs-EW(R=YyB(<>^vJVktj-Nd)ZsT08sxV<{K`oXM`jhrG;g~!h27N2 zogMyZvq)hH2W=?eJfmF2Zw!(O-yyqdoh)4X`B&dr-3G5kUxo3F&`yYsq47_l=@vq; zs#zLisy0rej+pJX4Gy_Etn$|WVM3g6`}3V~OXOL*3g;cz3uNqm$aS9D3Z3Q{5tEwU zN(#z{FAg`O8BOfMm9?%-!N*sw|Ffz;|J}I%H&lUI{$=h3X!=uddDhfg6{C!d^iGFpi(u5}b)9(S(8ZLeV>+gB` zbef#OI&n&8or>5)L}R{ij4jo`!rNE$aWfM>oTeV%i6wG2PH}j~v#z-EwTH(&c8pjV zgKFea^mCQVkZZXn%G3m$Ei%E5sEZy*qbzzQ;YVIU3?C%?ym@1(#8NKp8?wM&fQE#G zU}ZcR zbTk{W1AB=CcKoV=?CZC|Zp90fvfo^~w7-lnVjmv_Cm8`y-}k&7+P5}M^5VhtSgRlU zh4w47ekc|H*AW@Wb~@OpKweVoH&;pm@I|#@tDBU(YevmaD> z5*i;nOlz_LPGyR-9Cp?z^UUOZ>aa*ZaQuC z=vU48?-aNMXossPk2r)*Sy56rV=H5zPYICI-sbn1ncVKVxHjTvJ2dc>*1K({%I^d7 zN%13v<$DME-%;5D4D7h-A2;vX+RZ~WK^6c7K7?m#fj)K@bg#0^;a)rVvF=&9H=Fj( zNF`>EO2u_`m}!xxWCln;oQCuvqPhtchFV{y=`@HJoM_y}FQ+fO1$Q_4`7P?tUtM|T zXFn_dm+35y{YJzNjCp6oZl@>lCb){@7~^wnqM7rkYW6 ztTOi^i@C!2s1NV~5VOA~v=#Y|k{-oRe-&GQ+*MdUc;@?agAK=ibBUlDKwo>fgh6J@ z0S0H|VY&dQemM=3nZAW>$X48Kd9xJH+hcWV=jXFr6l5Bs@5ba5i%mgvPtwc|kY%i* z$rkSVOer#&eyCoE-ryHlOXd{fHA<<9Moy6qU6S*yPhzTx&uswieNU~U@IsXkP{8Tq zx8%GjJDJYj%K^{Xg>$6wbdbb8wlA$mVyof0YZm7Z%8?FmhgAtKuH%4HRwWupKahC| zH1r3}a8Mw$Wqi3`T7O4@1h(5Q>HchnL#<}mWb2_l^S|mO1yyHq5k+Vq94m+HK)N9M zMSPFQFtN(v%q(jlHZrT|Zl>Ci3ny_KD!pM){A+c(qn^t};BhCVXp(ClibVm{OXM4x zUxOHflF~Q~B9t$x$NKxVKfEztY~bBIHG^$Zn$ZD3B^opd(3jlxjYwBG)}_C*Us+l1 zvc1gN-(2PDdnw0(n56usi&{5-y7zWzfF(?~ zRli4jN3Dc<=j!@-IpOT%obrLE9wq{z`+F`HulD+7PAGa7^Hmpe5i$^fA-v~ibQnGM zuhI7?{OrvG4~*wQ+s?Dv@!eSL!7D5F%&Mak)Zd1$9qv%@CnStWP;ddgQ8Xg;h2T5z5m&be2??I*rF*@|)Pm3y{* zm{gC;&8&Ch4-)jNC_gbwD|p=?y4#s~i|#VLG;m*>W=cUn(J*2k#7_eRd8)mN1n&l1+OXB)M$vT(;5hu}H_rd%kFm$hkDgYQ+KAwK*pV7@9lJAT4b6Fp3Ml)sTGd$${dwJZDA!V8I`?gz>Hsa2|{~L@%x0 z$LTY~a3n5%qz#;Ap@fD=Tb)D`5}{QSJsIXvP9o)+sU`C&J(!t6pQ>6GDG!-@WV*BU zS?uulrysCq|8fZzbEXY@DOV{xEZbp_Uaqm^fIA6$E*d?k5jOvNz!+)<20n3i1!&Pp zX)-SWpzb<|VT%ARk&%lYd<>MkR(Ge0f;FH)`pqQ`l9Tc<+yD8&FNDBf{pC|K{`yrQ zorsHm0U(s_5bmc0SwCA|WR`dx&5XzOBh9ppv$}fzgDL20ZI5U8VE&Uk-WEo6FyB%I zq+f|mdt2br&W|Y@n+bk(|3#2u0}Bkf$;QB5Ll=T`VwctvUabVuYztRx>N zgKqZt&2{~)?cVsU{YTP*-SN^FFH&q3xku3{J{qFa+jxR2_h-)PZFDjb;xqk-GDu-q z>m122jMd1Vp!L7EAdu_XAUiMQ0o(jNMreY<@0}d>C@`{&RbaRwml~7%*}GvoF)P@f zr5L0S#ASTw>PGE3riq%bnBA2AtCb?>}xR0ge#~PBTp@j6fJRm5zGF+D}uR2}0dybydn17!0b2z6k=$oaPWc0V9 zOWB!8BQYiR(A1X!D&2jrt~C@|*G@j+di>V-uqsX_sEp)I%mmTS)#1i}B{k;ne|r?y zyulBBt|PoWFx%cF;LRJQ>)-47ypPuTQN_M@jqODjy408udtO!W+t=Rcx$yl;LEnev zy9yQ+9fcoxakSJHzi{dDVaeN3} z|84rr3!#~|weZoyJBDpqSpc&E(tIEdMaN69MM!`r$G-DL zH=AMMLu4k)ur_h#P)^v)Cm&&U>I(okfGZeCBQ1Ib{2ju1TZ}lw5dJ4xqJhn43&2y! z|Awd5VBcT=2;jcI!x%tR{AmEdF(3p$Ki9l^wEW+JjP`#3GNa(s|ECccI21q|DS-Yo z{~i6g`~&)H{>un}{{B2$^5UUCp}#+m_<#6%ENj78*Ox&1cC3t(T`*6X&d=aDQd-{s zPt?rj^}``ss$fdq&rGx=049-Y(+%>MyL-PX2u~~Bu$cp8HVcRVa9SaFq*A+(6g#tc zR2LfN-zmKhTeIVuu-161{co<^lPg_|n+*;}XB{=MJ&>ClCA@Z;)m~?o9<}-Ji}tU- z$?G_2kSm~8DDRs?vH@fQe_NxTCz)q$}^aN777h!V}|+1_4fkO;Z5bN zVTsI-LQ%I z0UR>XFqY}xqKnCgA2>2%GsN!k`vmJ%k&_7il82rv;u7%&Vr@tlnr;eimnu_6|7{cOhHmz@<2X=z||*4gQzFwrzjs z^(yi(kyo*y(f(EK*S0a%v~D4~&oWjZn9o>MVd0r^!-DZ7bP4q?Vj4}yF?E65p&VVjH~I?5lWWcEVbij6wNT-br1yh^dmqZKD4)Vk$-tBnEi#2N@RmeB&rxNbq@Sm4 ztk~LTy?;*RemNQyC!nS6n%C8Ai zAlj@ev-Y6>iN{yk{FTV{*DEE$;3jmtAdxv?5Z&~T8PI@C;y*uf9qN6=bb)q|2L0u4 zu8)71RUSX4o5Z~N_7W2<#Mam>wg6cqb(^`a&^Hof*+!p|nX#Pr3N1H`AAOT4D6r7xb^mq|@h5jQ_ec!sqWJZ< zxgjdIHZ*OHS~s-rm}4J7rt`}^S=nP1Pw-zAyOP&HR9P_ z;R=O87`i_GC~zV*EuZtqUpT_6R6JfFtHiGN<%QvMQ5i3|F7YKrsm=&x@~si_Asj-7 z!a(A5Ja|S>^|HQoA!9xEos*f=4a;ZEt>GdJa$k2=RKIwtFYkT+1G-!rA%1KjF-ijz zbgLAz#X_0}(V@Fs*?Kv&{1~^UMk5!u?oY|>8^Xsvt}heZS=*Q(@uMA{417Hb;?faE z&mK5R>SUlkSCd_p+WWeLS63C5B@kY2mMSTP^!_4AGUzyd1P9XbXH-8rCDd(sbF$a- zC+)1w+%7MIAU!)`0JC`_m5=^qC(W$Q{ql#R$bh})UK*etk{c+nT5%-XWKalJinw1$ z**qzs?eHrgHsQ5iX5N-N8jb6@cWhX%mH+X?QNpGMhZE>v;?6?5G6RyM#fZ+x#hf^x@vB)kSzI+pZA@{%3nUHnv@JW`$)=)X~n5m842CGXSjEM33*@W~-6(YsI6`Yis# zf^Ne(dHt&whZ$raScgFySf1e?1mXY{{BTrLO~7^c7>(z5tBSORzY}&A#O{2OGd-;F zrNn_?D0c9tgSM)$MRdGo!E-xo@SA`Rv3WF=(p$_lJp;5P4%=0VG)5?Q1v#ml#Z{Q#+_ z1`54lZ4N|_js#JH-vmPjLDfU3-jhsgI;pk@78=6JxmxS1$Hdv2Bso0Z<1P59zUSaQ z`Eu3!^c=RR3G)!+yr=<<6VlH9eZ89ERE68J=NR51BV*Wr3FNqJ zhV1aSmu8)qLEjIp-rb9HEiCIO*a$e2kR+r^UyiPb+NOr=8lRugv@~|#@UZ@3)7~8> zx%r3Y!ySK&mN$co(xb~TQ|Z$c%CN+cJ$h?T?_1*v09tj=25LVSUb%e#^2;+9A12}7 zUst`V)pIz-tzehrKDD7XiaRk7$Su8CsQM${>%Y!FUH|7XlqV=y_c3C=BO7cf$6A4O z7?bjP5;+GA%wtwgg(m~|!)*w{;KZ-a+Ikqx*?op{jNAlP&Q2mb z9nHK2vg)}{g3(vV-}(NxO7?!Y{_7<4e>ezn{fqx!U*6G|7xpHxxJ_|?O;*a!Fe~%# zkNmi7RuJHZJ<$UZkSs_u!4}89V&ItT>BMl6>OF(){?HrO+l+&50R>9xTV6@2$~C!w zni2lT4P>AzO9mYkXLbSDb_m?X=dM6$3*9YK>ptMVzN&Yo;z-x_TaOyWHR6UWG7p^Y z=r|%&Hb#14GrvHeVuo?@z+or^UEEc}i@r)0{Rx!MdG7iYw6YD9ZQOUVD=>*i4#qWC za}C20p2tvcQFH_FMD|hdQLdr+E<%;^AV0N2qPQh!Uk+6*Y-4}g^_@}uM^@~55Zx`W z5UG+?rp6>`9VkUc(@%iR$J^9q*`I_OAYpkt+s@DET=%Q-qSNo~$}B{ROkA&2vUDE$ z3Qxp#tZp14cY`Xy1AwzLW6QQz9Dzlw@Z4;xwt-XM`x~5*j>3r&)t7=B7@}K0lY}0Kr)}sRRQjyrQc_0o)X&Rv2xDDK+b_AXo|RiJ#>1z~{S$qe{KJds z32qj|7I_B?u?}B9Fy5m_Zaz#37@ECtsYy4*;hQ#MwUqMpYk5jp;%$$22HO&E-x9Mr zf8|jC*TyqnUS>TV5Ck$qcfj4yVj=uM5v8d{lSVg1!Adji;^Nznp=6xj!l*9ov;S|+SJe(TlCPW&>G|@BOyf*^`o6lpAhdd2|3h1#*=_> zx0S0R0^Q&%g1k{blB-ssnY%1|*H$;QxW@ero#KaHdbjR);*tg$XNRNwDG@KoQ~7t$ z(Ga|g#Fp`V19}Y8ZAilGz^*6v^_8ELMU31w7#d-9}lk_=`DZ4Ib>i`HhtR9-c z{S}BwJq4T^3IT*emeW@+}}Kj2aXCKd5`ps3yC1T@*z{ibMrOK!{53 zDj-$zsxM7cRGQR?fOHV)5E4NFrHTp&2%$(XQL40rj(|w-0to>DX$d6+Qrsw{%pfFoq0Ggb9&?<8c1e&0G zf75jS@dv+*90S^QbAowG(+$cf1^%0LEjbsva*Q3i5%0D#MZe7`L%0(b!nq(6ok@i#crM@QO&!_hBI)bpWqV7fOBAb zvwx%bR06}?h!jJcHl_te0mTyrFL>H=qRQ}Sy`1w4j^ax!l*3;)Sp$uAsc!gC%~mt< zi#5Yp`nBhz#Qb2de>J$Sbw67SYZ-1U8yX)k8cu%1EQyE^nLa=#!fFD#a8n6*T&JcN z5B2U3Llnnk>z8clUsl%ddQ&g&=`UZCU4>36QEG=^E=>x*d50y2tL0Vr%5_!$2zYuG zx1%*aR-c)a@8*BSCAgz8z-;&ghPPPYZ1Bmzav=h)6&8hKmsfO0LS4SBe4hK^De>@t zUljUgP1D@k#U$>oqhdhqJ~Z_8kF7@C`{(3Hu4*5|a@Cg8R=?Nda0|hA_{gg35i~34 zWb&L&!rft9Re80*>qH@$n8Tr)AH2VkSjwltGUG#vqCBrL^g1yc`p#@RWq9LKMZ#Xl zr316fSv0H?TkxLhbNS)%U#>B}!n-!|`6W5q69@6pQe_Dz{|R9I*KbQ^W~?4sc83Pe ztJ&SySbAQZE5=~SeFW1+T%bu%S8p;3(L~##01uUyDdXlLJepmZrM!<0JTU{m7qk?K zVAKXqsFRWlyb6OJo!PR!I=EAgZTwlu8rgOOZc26dS&JoOpZ zQ!>W9d{HN6v;m?1_aFNef1)CXe&S^)Y%JdJ3N*%SvMR-|k8v3uNdkrxaakXzowDr3)!K17+;Gc={>8#S8z^zjia*)zbXAg3}g$aBU)YM(1N_?t@gM zX)L!tPZh+H`2~iL0hp*K{~EwTj?4|YYhR+fMvr%)&djR#W795Cg6&W3$)Cc#LiOKJ zT8U;DfOq!-UrZ6@h@nbIwltt3aFvw!aHffI@hZ?N{Kazb1Gl9fkPsGXoAMnfS)S`^ z1(DKxMQ;JQ__VB6ME(T+)?XE94Nx%G1(EqB*`@z zDWMH_gehR5SKzFD_VbIo6}7Q;52Sdd=z+hQZbkpA*px19c#pXq>wOns)~Oh(R22Ob z4?e$$&@aRssSk+Tjgh}KG__<_f9HC{)a~WGnoTvw?$$!q!?+L7$u5dCBNyg&P040d z`>^wga!iz&lHWPcGe)yZ&N{F_@__jsJNQ9AQ={iV`xl>qyn5MS(XcEu-SCE*dTQGv z9#*xlM+)OWQM+Uioz$r~bu~?Eeke1V2?F1x4P8EK9q2;`mmib@=g}0E4iPL*o;URF#NqLH3zH=^?L_qSEYt+b`a^{$t$GZBlW7 zeu#nXN})M9Z-uzva^;cx6XzycQ7dVGvB=ON;2ge8P^91n!B~0Rw&M+NO0)Qp>W*4@ zi@I?CB~u=5y!LgR`DdLh5U+xh?V&iR!#9g`{^un7AGg+C?NC7%6hup)FtF?4qt6d- z44vCM{}je^|AF^Q>xH$%D{o`1^D1ESK$r6u%jtZwB2c%!2<8kzJ31!&*=`%OD&l2# z-0+HRueA9PGDBJ-(Px~qPg_GR(8+XQdaw@%LJ*iSb(eO17MMuh5RfmGP<`vUc)N^Q z27?$~hu5f(=}PR6SI7H`72~$rdSnmikc{q&WIoqlJn%E1KgGczRro7eHqbI?;^KL^ zb`lF8#c2qd1S384nmz`dRHXbd`fJehPg`;FgPM1FQZS9G+|&B7)ij6kUl_VjF71K= zD0GgkqHEY<*Au2yDyID7haeyRC{Vhk#2`kn4Xf;T(SqprfWIZ5F_XhI55Y>c5Va0O z={GVvKTChNyN??xzpME)XvdNXbfI(wQZ4W+7vyhEIC|s!&<>|sDgNH&#Ir5-8(ngt zeTaM>2JT9-B-3VS&wdruJGOeO0`&l{q`D@XRkc5EI>;E&&^us~mv^bvU{vk{VU}m$ zJ7#OJIWtKQ7=G*O4hmgocC?MB|;$~nb1)|cPbYkPn?)dvu$n-yVeZX>9jJJrZ6o%~9k#K+(>F@*4D!3+d+burOHdT;%2sCWnH z(|kesH=+@r>-6W(#eRGjO|DoI)-ikOA+FXG+G{mtbD#22?;|0TU&!<>2<48*rLE50 z;dK$)w?v{B_g|0${yq*@`V~f<4dTo z6yAf641bRBCV)=hQAd(woc1xQ_BNqBE@<9Ebglkcv%_1pPl>0t3UYe;dxwu<>!?B` z9ts7#zYBB1R+1baMe<;ZE96nq4K?#)ySZM=<@}kZ&LVCdA%OS0SOzrUaPZs@h`&43 zQGuG28fq`8o~eR19r9Dp9lmFHG%l|(>-fW^3yH>Z5G~pmPr(E0vImQAGp6nZ2h}0d zg});13Z`f|zK&gKgvmL|3|9J6?F}qs16&yp(0>d6K$jmw0%}l;i=WYpw4=ihX99Cg z27RkC?rPzxty%;;HCcivm`HsCS6=?t8Jq zNfVYM3;viB)JfVo8;6mP^zZf7XZEi9#J8dd$DnpROFd^(Jr~ zUIP|HV|bLU8nu$fMZZZ6B2C{$Ta4{q)9k6Ptg#vDp}i;Q4VYI)^yZ9Rty-b&p%pUZ zDB7~h^Xe@N4`t52EZn-kWKd$GwxTdr({H~&1U^+Bd>eoSIKii;DIo8?10GND`CX}y zt&m#KE^)cbqe8{WkCpn`acQb9fqKCtII0y+l}06Kb1v^axSGl(So86>(P>W3SlxEt z{%&|BoOBcN93=pALQl}lNP3-f+_QmhW@vbQUHsVYRsF|-Ii~Kxr#~V2{`S+8q-Zb- zn5V%JJfQoLQS(OOe%FNB1S5Vlj!ngV4|$a2>Es{d(oYUQPAQdobnR`kNrdJtCIy{3 zozTy$R5+h)bu!N=DMaOaS-}>n8CHA?C{fnvXaDQlf@qfiDYi^Q|8w9c%%TyK`zRP9 zeL&Nf$87yzwXF}ee@Lb*97T@>Dh0pTncUQ?`&&-@8l#I z?>un$*Ry|Lde7(@q$k<){ZpdEGj(Eu;k~X;tkQc+@|-;zna{etU^b6EC^2)GiGk=)V7y{;R(pIQf>+p8^*u4JF(1qW1EoU(}Ecgjq zF2L{~p_NXZC73CGJgzlPNQdn4Jx*peN9ltAlN@24lmJ@`i=W7Xe8l17U?g4iMe#I8)4N)T^zztK+Y%ca;ewXedq+A;6KMs~rJU}BD!r1KM*oEHdQRPmIx zVTpJ6m{;XkS!$<%JRRov=1%dcYK*vTFO&qC_j=^ax(#7+gPJaU3p$z5mV)~%xfWCG zMU5$MTUb5FNcZ%RnX*f`c%?}}jP2vIm<_i$u;@C2A~Vd81UDGQ;j`g2@hKGaIoA~m zj@yO5^aLaOBUN8JRM$(THnn__KcCJ zO0E7yMS;ak=v8GOT8V?!v8U={6ia#on6sE1C~p8ywT?s?H89(|GJbJFc04kI{W@i% z%QZfVce_p3-rTr?Wclt*eK^0LefKmqXtB3%xfFFr+58u5O$ZDG)(S=;7nX(GSK)${ zt%&NosC}_c2VcS69$?8(+RmO^j1+28vQaMX{1lY^W!nO46y1CIN(=EquNq@)c|1{` zPpFp{q2Gcm;HAepRXYcT51Qad4P@;0n|^x8idO#$5ZT>!*7`VVIv+vu z-L^HE93NAuJO#D+Iq06cFX)!U-A1;2)C)lVT?QtNT(rMwtK1=S;0_T?h1)#vs0Z@1 zX;K zY5ZJk$$HS)P%Pn2Q(ABjQQ$!87xwSf5P=Z=I$8~OX>qe+H!qkudk7p{1u9u#W(L9jpmZs={IjjA!dXL0*X{cU%zwa-;k-7d5-7$gMkMq)Pp++N{W-Fp z*onYA1v7q@A_EB5<%#hQOa2IybHY@~yy7dSMj*`KofUR?<5;dQ%Pjw|eNj_bi1S$v zO%8?$73@on9;IF*&lwqo&DEeIvcEJwmVdd)z7@b2+#(_*S2{vus)|kg)ay_w)d{N_ z`yx>Jo+8H>wsR(SWsYZ>J~<4H$%f*NB#at8M^!^w`ls}Cm?KoDV)6FDLYK9_>J+UrDpWxIK_?;t9`*nJEpYDqlnsmbQq6Ix>gD7ZxG)u5=B;r7}TpAfcE>rphiEE6pnPwr})J4!bg6eDfMD^I8h863NM zJB!E|9)ac|Qb}HvQ=o)2CmzA^A~dxs+do%KS>Aa;34(^VG|=lw|AB%3>F(|vA z2{17`XOq@}QsQ0Lp#c+9OA2)>kFYD^r#$H~5 zCs_iTWbS+cnAq#JF(hI;cRix}8pO7k z#LJ#uo;RJU-#hnUdVFK+HX}bLy;V~3xkg&o09`LDFnO|^4huT5aKAyfk2CVnd;E)p z8?uG_;%uG{Rl1|R_pe|0uL54Sk~BT)%Ro5ZJX+FsV3ti~LZu4=(H*xN z4aLqBqgFr2p>{6Ai*|ddRxL5?JLH+SGYwR8bdz{t}B zjghWbxSengW--chZvP4-7EKWi1afbE3CyAulMC!v--%LN{$jCjhaX%&%7Suz|Nlbt z5vw#%ba&^`?}MhB9w18%sk`m-fmP!oY+)FA6-ZsD?SWwSF|Hgl^?%|7@*ZN;|DUuf z|K8(fjg3{qWnws7()n;cMX~rB#l^c~nJkB&>sP{KfV9tuDhF?cOkg`#33F{VQ5~?8 z860IFJ?dxM6WgpWZD{JM13t7C4qpHzS9uH;}kT{>9{SBfrZO_ z<1YB}PNa8jjw*qnpB&mVZmMSFfCP$(g$8V$caR5RZQn23+6N)~L963KSbP3- zvv!lf+toI^Df+;+fYi0JZ*&~L^;6l$Qv&fB1=`#FqrUKfjC#5fO%>S+ttp`MZO%v0 zm7(k6B{M5no@DC`-IZIL39#R#kJJw7-I?>XZ$dpnS3=j`kt6{-h6@J49$!v$`7D#5 zjeibMRamrz#uv)6CdUEKt)Z$u&O0yk20e&XRW&;CXYKw-?2Nv|XY%N8MDj)xSFL33 z%RQz{LeMoZ7)984ypt8LRv$^y(n1P)4@t{oW|*8g*Ws_`NER5XJPO$4i7mCN(n8wi zA+OWEpI}4~zWwwPi)G)q{QYSW-72dTN|m3ZpXnt)=%)kl4Hhf?UgZEXIl`rUg7c}i zSs=~y+qqJyVD55Vuv2#hbrRt9zd={+(fO??iC6|8{bUIy1CRzHiB;uS4G{Zu@Usv_ zoEm}Fxubn6Vh?SV1uu1dH2<5!2V6pZfpHPKT4irNf#$esJ_hIRm^<*toR^Y}biuQHeI`Mo zVKWj2!qn(zN!&@K2Evm>kL*CypS6ZovO@apPeG733k$=@*w&BsFtDUuLBeu{c~16QdObhrQI%3*mG%6_aP_y)20kQ7hhB1ER&I?G;<7nDFI^&e z;2hj|GjXh&ynE$f>+b!U5|0O=4>ows#pgFgsG`kRVN*!e(Y2iz#A%TYhnMGqJw;&= zBB#ll;aVZ~j68gXZNJ$Imd&bEaTbx(sk^e6G}T3H!MnAB?*875y_zU19Ur75IpHPn zpAoj|6ig~lafL0Gjg*{by6bpra|26to=2szS{pb+48Cou%vKt2Dhi z7IbFg;uoj*b!|uY9*;_-tyZVq?&fZshiO8aYY(lXxyRbA~Zz|3@%o%j_>WY zCmAxiZ;@kM0Qo3F>#Pv#!*RsWHK6W8QG#hc%;S~#4gh-dq&J_GS!WJLdfZ&4q$tAQ zK}kNy#=d-nJgIZzv>`16w|N8*zi^>AJQ~mh?I~hZcvjB(nOTuaYgDuBFA|m+U<>c% z#fk2e4)1B5dY+G+u`t~D1z!k#4JPJc^am&~TEFkT$Q+&G#qD(V1GKX-Sb8ScQnmCl zaC*FmI}XEtdeO@$*kOmQ%ua)c8<0@VDj}X#FMejY>ql;mU8pG*e<>~rE+B{UYd*qtW z&GMyAy|$Yd2qvL-kawBi(A?Et2{1tnQjt1{STz*r>rGy?C1B?vj?5caSoCG%lC`Z; z)h=_C)Go6@bjhb@3oUUtq2n4o2bsX^MIFzfUL&P->0})K&HvnbR)lFE&(A!phRFq6 z^j!AHNRe16W7{>;#EpSmL1p4CA@N6;PGD#VUvGo1Wza5L2Wqye-caPHj@q`iU)aY# zk2O@S;g8t6h_lx6BEq;4nxx2FBAVYiFEcgIs;Te$&^Z~sv)wB6E1lY4L)YR4!{-Zi zr9)qqJt3C&UK5$bogZR;3Y%*DNE4PZNfwDB%Q)k{Ga%2O$viAIspRtfV6Cx~;^dM0 z7Yl1mhS3;=s)_+#(#>edCWtqLygKuB6mhG}Etg%dPxu3@@x4=H)FRt}AoIiOM-u8~ zBMcAYSefM@H}e!4GZ3Vl-^-^A6x$|WJfD>}Tb9q?MIrw(fe8dvMr z_?^pAx{vbSx9N)f|KivG5#RRTy^rsIW0_UWfd6k4o&#*Fd-PL!%==K9I%*;e?4nk# znEyVC1gm)qFNnbf$pI;hSAtOHJ8@*ze|FFRjrxe~ui^h0Fb3v_5mnY}J!&30A<1N) zA;TyIq|oA-Ra>Fq`^pR6@2wgpbE~6ug~zVGR~K%b$AE9HkHY7pD;4RdxAD<*Cq}N~ zC%`OCEUPM$`OelfB6cM;i{0c#pc$?dCq8hCsm1Nj~s`SJRs|f^PsXynQweC z&)@pBTvV{;wPe4WpB*BMSXX1-(49K;D(#~4 zIM9#rQOm|Qjq0i@Lyh#X22mSNOdbfb_pE;-#^+#Ds8*z#ZDhJQBM*hoLXqGeUbia9 zK!$+hVnpeZlBPS;@`|>4EgJo^Qo{B=86Ms}{webW3zCFtxdJ}x9h`Iv{Tfyk3AiXn z;~6m<;%Hs+(R6DUL0%`v%%md2It6Pf&7=_?==+}+`fFeXmP}I^ejp(n#lr;fQOo6O z=xOQ!XZju0PNa0h=+0S%XB;a`lY1pHnK7cBKQvGJ%;JhueB-#@o&+uYzw0_}6Gjcp z9r6<&GV$NcHqAg&_9Hld`NNI3*k5TyS ze{6xhkX;qpWds<+m1t0D`1rO)eCgfEjgRqKA6PC}Wtqy);+o)8@g+LXFafZKZc=ne zn`;Ep7Vq1wDeZcM75a#;iYsD{@U zY~~yrUdBI!5Zs-1FldyEB2}6#j;TfEZ>xXuW_(s?tSTtk?S<3%Gkd3{&LYCkoeWmE zY%8wy=hLCWVtKEER2tT?t@5+rwr55UW0Tr%#e` zuc;*qwZG{a&{Md6SCeapf_X}HA{w3T2Z!c3g1Dw#7I+Z6no=kqyJ5${i+BI=6LQ=9 zIjTi{eFyy-UE_g?Ln^Lnss!Fn`OKLq*4X$+_u7?1lF%d%x+B@Phz7upmRr?|(M+k~ z#pDh@7sVu#N4*|T3*Jk{A7aR+ovz}$liR-sHI8qiS$kN2DR*6E>qY%a0qba)Q6XTZ zGZo2Bkq8U?1YNb*W@T0UQ%6Tt|4DaP=qFPYRf=uhgUr@yD@bx`Hi|mPT{-QlVfyJH zgA|IXalBW8D~?5ccbhk&O6jc|7a&J7FAXA$33n8Jx7{4}n3`f&FhAzP{UXQklhQ%8YoTq<%Y zC(?{>xtLb?Q+K?L@`fe@v=P+A+%^(V+Z4Qg^Ce6uhb)1Wh@G*RvQhVYtn`L`f5cav zZ#Qb)k9M0{OH^FD3$? z0p$?cYflm4BhFg)VF8?1MXmaxj`AQtC;% zDP$!pa?HxXwF=IVqji}!o4nSJH;}P2ZUeyh0m_eXBDGQS0JRwtt&um{{M8@}Uh{p( zJ#ApYK1l6n^}#Uha8AKm2wpx{VE806A9n_YFG539FyYXuj+9RHkuQ}Z7oPWsC&bmt zDhu;#`nDFzvI5qH41AwZJGut7&aY5c%63PB|MVTkM43HI2I7xN znRoB;E$TWN!Bnx~fBX9{7R&%?yU{)SIH3@AI#^Ns=RpJI5EazH1CCVOyuNo(k#zhw zwZ#h?pckNkzO@6L?xI+cQ`Ya3TIX7AA>+qV$7(FS!VKc5N%Q16g%^x(-uLf(6p*eOOB%E?V} zVh>PG$vpF)3=38R2Ajrm9GkC>4LG?6(N&>sLpW)sh(aDMh&g4M(18(nqR6p)+iN3+ z@y(ZZ^8=BhNbfFJWpB|nXDp!cq1EU7*`Ve;5x#bEwd_WTnl^Q)XExI|s)M_FyP`+B zy$@UK?AhFeWEZ*Zw~WKwP}5M#IJMgk?=|_v#nQ7$^e%y4Nh@b6Gaod5Zwp0BQ zat&~9cgF57`M$>m=B~K8OfbFyy7^Z+{a2%X4xIX~H7y}(r=fGWT+w48QuDn~+C}1_ z=M+pc>?HjRqa5}a$A@aw!@IrS^qmw3@qRJZy+;}n3?8o;4U;j;k!j!J8_peu*jta1 zdh#;X#=1tI&hd@)@ZGZQQIQ&F`7e2^~i#Wdf11)pJd(X;Qpvz|{ z!?h>Y`0YUNMZ)jNncynY&`XJ=5v%H5s{INTT3==UoY^-gTnh9isK`OYAC<#S!-tZx zoPRCbuyo&?3F>s4D{<~Nkk+$r>wNT^;MRY__UcEQw6$@t0p!!W^uJ)TLw&!omw>#( zk0S4MzqUHv+2ifE;-|sq1p5G0_8nLXm@mxWd;6$PoB*h4UgIb)BDGV8<)fG|@!%#L zou@=EZAL=-%1h6C-d{oz$`B4;V}!L#-LLX<<*m(NWrLA=eOgO=GP}+LS$W^qn-a$4 zMd1Ux)HDz74WX)7Laf4BR;pPO!jM$kJj(|LFYjum6q&ntD&u9!$!@VXT^V&3#aXy< zN+ud9>tKR7)2kO_T%G*umx0B5t4|O8IODCfxG0z>>V1MH#NB<}3`3jFRh*?)H$rsG zfi#Bk6aSS9Irt`Tj!F(ayTdkBznF$6UVrRprro!;nLR+J1`e5K=ycCz@2c=^!vO9% z)kcu)^e|?EEfK`JsV3*TLrtAcEFR^%qqf60fUslaV)+rLw#phE1Cf(zKa)R~@B1IT z(csod6HR+G11T;3LJ8e0*j?Zhj`D4)D6G@mxi%k7og;uS%3czzD{VyM(ASjPU%z@J zGK#KdUyIy$ajj$-_iBzuONdm^b|tW~v)0Yc`K8RBll|a*J>xa0>{Np61HU5En3n+o z)8n64>_6{H*S|^ghKEgCTU$*|H;oRLdh6z;eju1|FHKeYCY*BCK5y>9y(RSJ3~P`A z)HB$LZ-0)=PniL<9FQgWx>2a0sjsZ>SBB4t$~z(>iP7h{DE-&de*FzeZ*{OmX<2D~ zKXuy8|8XG8QP(ZI_V+TxWx}PtTUYL`Un{Mt{kBcac#tWRDH4|_AXyypkZ~smD@_hq zUm;1h9t3vh}fUqxz;s=ji8#7W167?zPjTG_Fx73*!ASALSnH8S2Fdg zgTErC*=%!jbxW;N{`~Pqg#U$0!TbDVq;y)hRA773@h5)OLH*_l^d`f$Kew~u*rJX|gCh?Xv@uHsRvC$pT zMb>CiV1&J`;H74GYPV}W@Bn01EEf`x5@R!`_OOX6i=8PR-i0!sHcwYYH?f}jA*=g_ zB}-~q+HK}mPgh|L(EEM>-LrqVtV5Du{|0c5GKKv+Q54T%)qu$GuZ#_Czkka$5kj|65S*f`AmXS44=*^2+Q`|9S-P;1+4 zSxJ3+v5k(Q-068PZ<;zrN7hcO<-|Hv@-(wN@DTS`>*y}*aD{b2sbNse# z`73F@cdqsIwH=9;8u-Uk&B;JbdKB;&!#~mg?n^(X>%oDjmjcxM+JZRrS}Y}6XSRH@ zOAS!Yyw*GQY}LwdQnndbZB0-jnK9KZk=^i$Ye|zNpP8V0Rvqs{4c=_~JVS*aIagfE zp{_-O<-(~FU34CPg45b5ymF&Mj0Jf%j{_J1u5#f5uG!W0|9p+Q`a6`Zd%S(D&sFQ{ zsjWdcZ^ORgDhX!Qy!b(zx9#TTzN~_{w@n&07Z`xaYUy4*8puySy~jMhPTHoZ4XrU{ z>5rlzClPw7tefYr)ublwoo5RtHfG`i#&AyybFc~QFY+Sui|)t0g{l5f`e^I)I$2Rj z+XYvr(CusZ*?fVGe&!~Vg8;nv*fnWa`^jwWUT2DkaUNo|q|O(((>B(J60ZKC{_rBc zA(Z!-+p5>}IE6q38_^?Mj8v_gv^#X4#*;4f%Hkd#ykXZE#Dt&IHz4NzbY6$dqo{QA zwIrH4avVzX1?)1MIh9P_m?s)hgFab{uJT`YTAT7rE4buR6GZIuBf>V-%X53L$uimh ze)q48JRBYeuO9nC3uexY?^V(@6Yg(U3w1w>GEL<+YaKuxITb6LL@-}G2g;$lu&6mm zBOj@t#nPz_9i`!jjmushTXAu8bPZRti@Xf9OU*3inj|B9Z5woQnqV76GRMy*9*ZDg zxTo?m;*u&N^og-Ka?q`4rZOLBBy_$UckuzVQH{59Q7(cQ}-{HSorQz z4r)2TkMYdbW_rTXFw}SYVi;%Lj;R1F50x1J+6WXJqX37`tVc(y7)7BW<$jUm+h&P( z73CFpb>+Dt{MYX?U4Z)51@#0)m4~jsCifDTBD7Bi5((~>$b@-W>YC?Em3-Mgy3Sw~yn}cBtk}*zG+|br@d?Yv zoPpNZHWA^|*QFMr#7*M#>XdsNdzuc#Eak(~1_V#LDal466S-(IcQe+u8Utwd4 z6xlzooUS(N<-YDEznPeI<>s>+`fo)a@d$W)!*ho>^_o+q$sm&A8qH~yd7he0sz!>_ zwZ~#toWzoI0%zGmmzVu3J{4-48Al(aQ+%nF{Y=UBUo5h}Xq%qK8Ua-r`z~1LDw>W%de&Z^>oqFRsi|5&n&<+}sD%ycM z3>K@>0F)ri8GyKR)amAm-xs%Lr=%tcbzin99fGH}n9J+?&)9mSNr1v;)Sh8QDaVp( zW0}XnWkG_5`%<4&R!HZ+`qYr2?;_CnzN?2LEKSKkx_)c%ma9#Aa)$Agl$zQ5#5dA>0TzZUb_SoAYoc}N?dU#wK=F?>#SB9#ZQdw{uWnKazLu=fdT6V zE~qdvyp>ZQ=K+bGRc{!f+h2VV;^$rP;bme%wPg)o{*#%cBfe*MFfU;ED&|=-Jl?PW zxn)Qd7x> zCUVdzRAkY>kWbWOJO1b?^+VTQ=Fs?716^6W zP3WU(S>`VRM}vZ>g9b#sCH>3zIK1}@z--x6tZV6VsK{hgtMYUTa7z7Tz6*5EmSrhx zlXxqW#;Y6*$*y-$PvEg3pgC<$|$ zs!96Q*5c11aPiLGX$UbDOJ=^9!g4dhw2$1@nu`~akF6TsusU>p3Ed<~95Dawff(n) z|2BQU?fzizH$rybe)wQ5uuek6=ULad?G9xt0r-B?iD21IL(X6Y7?CiihPgAZMN>L7 zDys8eKQa2g{rNAJhkARWz{c2{k8tB-@rZx4cCAA1D$!e+^uc-dnZMz|GENjYa&xZJ z5H_v>XFz&MK}$(w=I?c4XlPlyft+5F>*b%Mhry4`f1A@m4Nf^x#fSlvRnAa7ptb>t z(U0|litC_SQo8oFlnHe^#%3N8B%7bc*`axu!)Sp`oF}N^MN~(WX|m&Azo_1Xd5%0> z=> zJNYM_;$P?LpHRzx_ZIa(Bll$F|M!LstCGc-FB(t^HJD94!WnVz?`Um*ZxM}F>$FHs zU|~@Qq6QL<$;k$$(FJN9$-Et21&z>_c3vSEvHzAd997uJKN*^dQti2&OQ_L*YX9g` z&oeblBT@!&3Gk3HkyA^pu-9fcAAqgmYq|X4YEHAq;ch`Ax%npHSGd(wm|AnSY;>Lg zFqYTG$(8e)@z_&{%VcZ#5iFG2Bqza~ju;h@n&@Wvx<$rbiF(cU=bO->kwXk9s-I%oinCJM(JWPr!~JH_vR*sshM$yvDt#-gGiTsm)+aOx7B1`9Ffy^n zR!}6fgqTdwE(!n197hTb(od0C3ECOK2sT2uJfRj$S2S^(c{hx_UaH_X+@+-udQ3B> zMq2>!2)x7q%tBo-(%oKYIEv4gN1%B9He!#@+<%eu;0ePW|o7(iP4 z!UhIl8ZXKiKZ8x-W=sj9!8Z2#MBd5Wc#)D?-ihR1PFQ1x2P?yc0@ zrXRMloSM#xm$HlXFwr@C$b<{fxB+_mA{B-&dckDhrk(zYP@#Gt&r=h~H`INblzQC_ zT^{RBr)R1|rMCwDVhLObonprBGD37oZ}ERr6~LYhN^hl`OFCzU3OJ-n8R<{kq{@(1p5}p4G0q^5zN8Dm;pzi18R}? zOMqGw(@&L#cb$L*ylS*f@E|yRg=fy)mE23w+Hs47N;C0iX`_s0b5T5$H4sin3HL=u zA=xVvjE0mA^-D|Fv+ou0UMe&(Wqr=VH~xtc_=c20P4hQI4uY6n}ccM@PmxZN78Eydily7<&@7?()QmkHa(RIDXvdcA~mzyY)9U7LpSl;u))MCrW4?cFI3ywg;J zVTS0f#^JkhL2kS??%_(TS!EN?}3Vg*$mJO~aF*@Sp&i%J%0OawnoGVqX= z{kln_{KO;A&|yQ`d~~frrttEuqveTi!u0^jIEQW@Z^x007y8$xakTol+Fi_GG##`~ z!#_%(o)}mS->+sqNfsBsVc@TdKd2~bSG)Btz%vKemyRag`87k6U%`$ChyqZTZ!ZbK z4raTNP+;IQlTf`225eVE=$>f=lj|CnT8oud=U*)2{|oc)`^Ulr3I}QzrGt@ko_Y;m zL)r+FV$P1<`*hwfQmN(5Uo01_1L+=e!t9S_HFaU5@dz`COp$+74i}Nruycx7aVmBW z@6|?eiwTr4`MOoNT@E>POjB|Dz8!y_ng6xhB0jNg4=B9F zKv8>TarvkJ@B&I5N4h;XrHZdz>&U0#nt_egg0{%G;NrQab_f24*NWryL%Nk zkC}K!AvC=sme%g2AtfMphA@9pEVq<^v3=;?(EsfHp}GRHZG zr@r!WR1vy1HN`=N7H3i}&po zIN!uKT_J8*jyC)Swa~jF5^!upKjnbTKdVrZ0=QzYBABe?gpHGa&^fY>5K@ns*!9#n{fJEv4$F(9Ojh%or8n)4ZLSAZ|Bg4p zC#>_68i>p{eoz)TyX0$DBbW2j%Id^l@Z03fq>bur$QqF*8^}ps-5AxKH!W0W5OebvZX)K)n4b(QBt?^^H3 zsTJ#}PIv+u!aUmYsh^mhAmzIE+UX>;tzlCg^ncilEqyxl(hfxYv~WCHdZQN%Xm{#7 z$}{z9?%!{dGe0Fqy5##*;Ni35*DgOWJ?8{?6ug&JnFDV+)qpY>M&6g5SOYGPshSoyB`&BGQg*WOOF9b3u}y)gJOF;esk-!EAp&ZO#a z)W6T-W?>E*kug2q0wETWGWnux4?Y2hi{up~R21*_XgwCAG2zmunpd9S4n)ue=2R6L zx7#eo*e+yU{q4rAmzvW?(|0wfYQF4y#!D<4;wrhnRT$omyfQRDqTq?it2!a|J#-I1Y2_KTC&^>1fYU74=f6kwy92>oo#5d$*l9n}yw$Hzzg;uvjKH zLLA+SRF70sbik$I<(|pCu>a($ZVP)kj(OS3sBw%ckyDBjzcu!DvOw$|&I-wv8`Tht36_)DfrV3IPun_>g@SL_TTXSOX2ui{4GBS9mv3J4$~ z1k|7}JYCA`<-eP#TsirU>^6fhwSTY=Vsbg zcjMzypp})T#dLXB`b}2bBpE(QgRiX*8a>J~O4mZTakMLlvjO<%}$wMFHv!QB{Ylv$9D>PYJBj+NBr5fmzgH0+q$`|)gj|8 zo0F3Bo6*{r+CbP%c*c>M@l!L$Yop$}Jnl+9Zp+8dn&57>x6VVm5Xb~GDvGX<5N6KK zohw)3EqAot{at*;1ueUV1mhK(4bKS!ERcG45aO2aN1g5J z9_OPK&6;3;2G!DeMG4Th%_A!!P_p?;ce`QFf;2-H(dQl(r%W!X?b@Q_0%7ZV!~`B{ z0b#FAHuRZ+z}=U%T-LxS;{BJ{x?OR5W9s-ddE8QDxuaZ8nHoqMp?d`4ueH7X^qkFZE}F7Ai2<33PEpvi)HXtY$Ex#sHW~8v z%Lhjb{jHV@SmPG6DZQccz+2Ssj_P)wD?T-S%=ItD^bGC$-Y&BHJ%9O9k|1~5!h8Rk z#xhj1wlD*|SSinKldXq(k)5-Zq5hgfz7jl9ii93Iqe18g=E)*u`iNq zr)VlO*E;%!%W7XMl*TW;H8{>`cV_kz`4@2#)&$Q1O>=z+RYD?>#zVhi^cv11Z;zfg ze}y+|_NV#sI0?`WehWO^?Un#?)(1U+wXaP&++{o$MYQD?;nn`%(c9i^E_X#_j$g~ zJ1a;~lJsy-_Pd?_{E5v0N8~v=UeFi0Z}DOD03jFEYXj z%v~hDIZss#s2u9`ug6mc1ex z`T0&)11wMhtbyJFsc=8Eg?2dke9j5y(I)Ma-fON_!#}_9C zk_Oe&b<4Y^g2xhNFVAF^2sfXF+lmm81+GXS*h2Sj;hGiD{HM4z7sGRl-#!=`+6#`+ zChm<~K5b;_^9>fI83|Yo_weK{0yq{_06uUN`SOV8z$wez9caTzaF-f-<6!p7IZ1rB z=2~Uvy|9$bR$xR0h+{YZc8t3$554t6NomEsJdfdv*StfGRy@YPq(61Ccg(C-ciVPy zyKThg5HpU%?k-7d-}VMM#=bLM+t<0=VO`#;C$?KyfWIckn=G#t{_0$Iy1?KV{PUgAY{IQ-zReGbyN=vpWj8()AdJI3Y$8L4YJb3rKQtxW+uWi8Dxh zdiWM==@j-oMqG~>`WPPjisB1c(WQA(Zg*kn3QcAHanrF~!~w!ed5-2uXznLCxqj9C zeh-D5GXt5>NsBuRoqn3n8i90{|5e+tZJXgQs;B_S`=JkRtge!+U}3{E)?K=2=x_%! z81{jIji}n(USYv?#eFMuDNh@ndt$9k7!2(~eR(jLL*f@;bjAs?fww)GrnDN@C|%|C zP-1Jp#zA{L?Gk@8|2b~W7(u8?q2`65)Vx^mj>;#n3^*!i2RV3rJ!d%Lt*;%1Y_hiOeH|?7Vg1z zp22Lag!W347!QVJkkdJ#U++ICK0+;Qd==ec^wKszcX*FT0*yR#xio{=7W&*5YgR8@+0$ULjcnnCvug2+3_*?ZU*eI{B z%L-?j3^*l7z8}yR2(wU@wS9pGn6Ji){XXCq6uR2QPMrv zi}w1tHXig6S5wDx*dNViwe6~`VybSms(xRGs-p~`5OGmOX5lQwJ1D?g zQ5)-=l5FDOIq#);;;in7?EWRAKP!NnNW3lXVA575u#bZ~`G02WfSGV&c{VI!PvV>0 zPHjqFCw^ymi;iHN|GJhjTV-I>!oh6mEAmsJYuwfW6!{LuDiWtw-sI4(p|AyXQxT9I z-Ra)W>M~xknguXq70halAW@58rk7zHcRLUBK7OLET!H9GZ8|RUh3xLoc*dCKO*Y%N zYf3N@WQ^cLA(Z|;W-_fij7h4w^V#V&2iGW$X_8gDUgfKl$-t2&E0y!rc=X>Ay$%yc9I;~?B1ybkE}3z~-El-l#u7-@bZnj-GB zSEG8wFJhdmo5l628VN)59+^7im_NAeY&E5(C>Ev@&^Wmfg1;530KT5HIyi9;zjM&cds z-lLS8f1;CK;O`;ZSUpiV8E0D+PRvTn@;5KnrP3a*D@dP7Pva0r%-PDEZOV3aPW3L220mzDP=bP?+_f&rK38xNU|JHCE6_PG> z$(Hw{tjBSdGNlBPVml!PQI*^ld+UxsD5Evcb)$M^ULn}`v4Yv{?mK+nrJTC*T(hAy z(sm06$n241{UWNZt=1dy&VRy<>75283J zEZsMz!jHrmt2iEe3I>SNXtZ{r zLN1QWQBY9ap+@g0=5)(!JregS zdX5lG}UrUzb}1-_7cj z3a{8#EpUxwo?Y@Y^-a3-ju4)S_QqXCG1a@j7_L!#qjhZOwtk^iBe_jNp8J$w7&75KrSa^EGZ zk&%XpphIjXzj7)UC%+1B^gc<~p;AbUZL*)ws0+Bdr7B%&DS~?huGxWz`?Z>Ml}tYv z@`xZ_zRzar_ZCQ4Z=4BH65K2B_23<%5EC^SGEC~z0qch_j9*t>;MaPgAgyOX>7@Il zB(1b?!8=KJRIumi`54=iZO?;xCo?!SGk=GyOCyAs=nSswr zdA*a%rTnXoF|oCSh67Z>WDT`X|BT&>6Kd&?^kNTKV=gzYw|+M~y>4H36^S1YgZT@? zmPi!n+{-9TsY7;_Mt^eCfPcQq9GYnjs;m(hF2U_V8VN-t@&^nkc&jr(n$orxV-(-L zf7U*IGQop=@7Mv12S@-st1G{m!dagmXw4yj2T*4&MClvRI%qL&&FJ%`_xEjJ+}}{7 zw{oqBq|f}Tq}_#HH~Z#FlVECwoa`|33rZNhf-ZC=? zaH8JBVgdvB&oHvMw$1u?6!gmMZoB2Gd(hkq>ppfA$ghpQLwqrs^;j}lEh+3QN){_} zNjF;iyYiN2?c|@PlYLxqzYexPXBdjveNxLQfL2 ziAyQiPlSgX>4aK}w6$Nyk$06vq>k$sU%@-bs=cglv1{gtzl3_}v}$eub={r*OjH-N8GD zLqVSI3EtcPw_r{cK)ivWCI+^s^2ZbKo&g{4Lgz`{Ro{ppiO`jV8dp%}6ZZ4bC-3EZ z{Mdw?Hj6IOA;?q<^;c9(rKDlQBau@$BMB!7w;k3*pcBZOdZiz$80}$SEl4sWIcoO4 z8fTkeBhZ`ZH?|!s?6*cIrpSjTeDhZi5S}dJZW^l+Fo6{UO(QhsbA|7B^4bRcH};Z7 zRp<2Ada2TNogUo1>uo(xJP8vPgZTI-pE#aLT`X7IxZeWM%z7!aV_U0*1cG*Tnqe~t zE`Kvb0D=7^pfIftv`Y&f)0{ePa>c2aG+-U z7lCkt$0&0@m_Ci=Yz5I)`EZzB&22w2YF`S~<(&n-nmw9a6f z=0=vd+X!0U@yez){K*D4Qfo6<-*c!m%nlGrMdAj~C~yh^7f@ng7fQGrUOG%IWpYzU zX~|T&&SzH-(tP`|efaVhWk<0ipw40SFQg6v*#voLCNQ;tfv5qsEmxO@?os)dC#=qY z&%f3gc-(8qVO_A_OkSvxitgnT@B3Z*AS#105Y<2_F=ZrDiJTqeYO4hE2eruu^4?zT zPo=oAc4n2RQ>(|XMjDun&b&XY$_0aX-wL#SSpP?4Sy#1&pB$)NP&t)+z3gDucYCy3 zm&cKSr>1WyGjIG2vk#pU9=);Rh=q(`Ql!d%)Ve@u7D7b%%oaoiJo@3Ptz(vLi)9CI zzjE0F)J6ttp!iM179azbL-%)u1S;H_+4R#buf{+7Zty>T^8v`*T1|R|k5G^(;zTeP zE;A)w;uSA-Ou1ndgEbU)5$g8yZh*^*>AQj>Q@G{?&w`7Y_|gBSY*qJjd)z*6yKP$e zhmDhnPnu!xXSAHTc<_>%&3vOU=^j7Rebt(g2{9I9JdwD+7K=VxA-6hqPD{Axu);<^u$!p4D=?U%Qod2Ygb z8#!?%mF8uL8Jpll^6kNV+3>`;20y`>|KU66c=>*3ZWs%0G6H2rRJ#mIi`+CK9LLve zcyc*GIBs{Bis6MZ;-E%yQa6vko@?%=v8wpm*J#vXD;OLxYp8O#R0h+mz z7~2Wryn_@8p5h*4pfh2^UIlmBVg3^~5yyX7r8f!-^29Vg-myG$%*(#8tW{GvA1Xi8 z^UHVg1Jk4)U2Ix!gO5gU^fi4`2R&;%o4ipX9O|?EEsz+PyzbX+N8O)(=wVY|UcodU zj+)`3kRf<|fjN{$k_mK0@lJb6^F{T9wHhgnaKFb;*V1XwEFmR$ zSAl{2dNq*vazyHam%tVQVk{7XL8INF>x4d3BSq@Rqa7{z1f@+^S>?aH3-=tJ@msPg z2(CHTvfo)wHBSXaBO7i$o@bh`BNcz1MQc{GoEj5h-6jb z!{(zI(G^%lBfU`_-9t7RIDfG6>>=vg@*XmJ<7e)Az%JV8L>u!n-=wR) zy|pz5F8YmIA#r4Zr5gM7T~#I$WCCa7w3>_h-p6E=ZT2rFkPAu5kPvc^6gN3K(`Tc zrFm{5%R~l5m0aFsNuKiHEOziO_l`;k{uVe*VxDN_C|1P)4;aGPM+XOZwbURJzF6D@nk>P~mN zCL&{I=jruhvFuz;OhN1R)ZC_O*=j&M;QvyF><%7FjQ>UrwkP!B>AnorySwX@L$t+m zQjQIZPY@GbAlV@;tV7R?6kwAQaP9FtbzlH}IrdbAMDHI%fhSoT7PC=HO(g?CX$3D}#RkP)zS(jW(>OF`o?~*>Y15Pi2 z^$1K?&B+$&NfvmIXr?X6*Yn3XylzS((Dasf%-F2~P#`7r`Zizo+ls%PVS#EEMhbZN zP5h^jUCe<-&FIa!WteT<5-rMgM&$t;PGSFN*`T87W~Z#l^LHmB)!uMZQNhS393`1=Cus3UbJ;Xcm|TI8Qn0=Pvp z>^%zeBSfVVmz1y|*_)XTFLzkhXyxdtA`#Mc5Kn~y1Q*WaeBbi zt$f!{SyLsuEQ0S+HsUS;gpeP2Y{-Rwfux>fKPqoUoj=)eEvk`R~y9!C;rkIO{a zbYGn*f!D3~!O%ia0<3iS{fN{;3~n_(2CzuR!8yz*+9RQ5WD~LcBAWj)6lTWH9$t_> zRuuR(-#$7gCXaQ7(or~Pa(tmNk_SL#NFR8vRdCyO4%izZ!0NEFJ$V|_99=~u0$r#1 z=Wtr2d2>gwcyuY2OjyS=W`%RanQXl?6+UY+*Q{}k_z1!r2V4pL!LTP!euICY;<66p zpVt~0AXAuE*xkb6DO9-j!CQ%BXn+86XRCOrlsv!1?khHO$H(WgZz{8#%O^i7+;Mu4 z6_GJ%hq=(X9@{;(;5S0iCRp|s8hp~M*JufHF!kAsgd(z}s4rqsf zcxa>>p5KQ*6Z8BIvHRKIS|9us`0WnOdgHv*or1w<*B12;8n&5ijLu14yUysP6fSag z%GpuSLplVkgjALxo}M%4$#!5h4{a*wJT>d~I{rrOE@T~Z=8ImnT?6hu(nKfG_Tp$)KI~4SjD8t$p`C z`+3`yyY|hDH-#mXd9URLWs5If{Tbe_ShMy(T%1|M$~}Vbw!hG)8ScKs1lrxhlm{fo z*1m;Cbg>Tp^~@Yzc+Bcx202H)eA5~dLuu9OzgS<5FpcvT6Lb1x0j&B273C=jKOX1F&oB*r~pMx^}wnqj7~CV~(`` z@&V%V`N_jiiheXhvc+ez@hqi}TWhPye*#}4M8eFAAULYokB>#vm}xKReV9%i@r@?2 zgPHHz-N7|2{Nxz{);@8>Kqv20Dw6;>+^i33)ST~z{Y#BAkse-U@R&{>k@ka*st zbK@F!`u7bg>6W6|FT7OiTKA31UeWE$P5ITPboc2SR}-N%UtU$RPx8|@z%@re;l$kl zt|#wsnSIyQwimlGDfwBH>--9MbSY3AypD}0VoYozaeRFzfvq`FcI>wR37GB%Gav#x z#Seu?lSZ)%KS`YIMLZ|`z3JktW;GKet*(W5OYps=2)5@y9fHrSrf_zY0tPS})57(G z+vYf6o31RS@Dj1fFmr0U7j&5P01E|Y148+9WXm8I7{)gBZ=~ram~{cJH>ibMpW;>^ z@9lmIjCAl&o!l*Q?B4=i8pH28U=L_ztM=>NMhanW)(PKMu(R0?pj{{R-QKP7n*BsW zN4rNxA?xlRCc)S+km1cdi|W0O^yQIgmz+2K3|*O2d(zSL_HC~VvGuozZ4hVs2zfdj zmLP(ne9hh-`q`4Z^i*l8i*xyOnCESgwLLk2d`P&g{yaX-VP^ASqRedZ@H?WXB0cwj zljQ~Hn#NOD8~3u~9bab0b^FTCMV(O{4_JsoP;kvToI->AkUqbFr}^qN$!mynQ${C9 z=X>NVP~pN2q341ytAU|b2amYJYPN%!L;LlV3v=#30}#8#ibaYD44fIi)1BP@seAL8 z=y;tzhWgdD>o@7v{uoy3$pNIB5UvrLJ2U42jU~V=i;32~nX&{(IkIcF7#(L(jTqibLwb?e4A?yoODpF$}7-v1h(v}bzvr`Np7 zRe?uoc##{+fmj4am!C^=g$^}5b4%ONr45Vu`Tug3KHXkisX=3;iwW+{G5)ppD3T%QYFbG5Z2S1L*Fp0+m zmQD-<5`hS^G4A^gjY9>Xxs&NJ{-X-903RP_CY}A@?e{twckJ1?_`dTUcapi#Gwa-a zCwgzpM)xE>XWD9N)ahRsku|u)=Hg9u)0+<*EU#;nP-g_#m>ypubBtIpEt8B7dxmZB ze0Z=WpG0F^lS*r6x7Ej;j2SiZ{(gseia6p)>%LC_;`SVsuVKuh>ct`;BYNV@ZT1!& zLeULd)sVoR*cI)%o{CK?Wmb>{Nw17K!{f_I?*Era1w5N9cB|mJ8c4*me_JY4|Uc#oaqVtT2xMa=+>>KM%S=T!Z@r?4o8Y5Dvg^iQ^|zV>V4& z#)%C4DyeWFixQ?ycJ}u0-$>2vhqsWJcVUK)POWM0zFKTy={=vHE@-Ke)&nfbnSR^w z4Nm6|Z~kR+4f?LVf&P<$BUj#Lt%gZaGm;w?nhj;@`~)wq zogZMIhzOp%+%tFOPbl^eXQ}6Jy;${%qzF5=a#R;;Jb}&F=m9xlgKZVMar=1Z$`&1g zvkuwHHSLo_(x!An*ZQvQBf9|9*^Opip~&0|qM7!Ufu6Euxbugd8qA&sGd^644h||h zg06b}*Vt1%^pDsR>&ZE}3|v59unbv%1Ak@2Yyyx482jN1E4V)maKOR+fm|Lyqj9{w z;F~`Je)Jz`sP#|C@(O?Y|{&Y zxy43R_GuolL=Wzhu@9FK&+^A@t%g^CA#qfy*&#_~ z-80NbdTgSq4iGMTY%g;2i@G)+l|z$IiZzYMI-iH}_j&kIB~ODezkqHF3!6_dU4O&U zcI&&}0%k!geaOgUqzo>!WP-UA_DV$;l`CA3w zIw2;JD%f}uQ6@LT8?g}#Rwg1L66nD>&iphS2W<96)^CAFS$|^6zZ*sCI+V;=^DCw= znst+r1h8UWYZ??dk;o!} z4q7WX=eZ3_U#E+#PdIv|D-FLBB#FZ`Ux0F2+hR7~d0RR5 zN*)Ia%fc^b2^T0>8AoL4mIyn2d#e1U(^p43^$=Z{QNyYv5}Rr=j!^Uffp^uLu;dW2Hrm@Ev_#-Wj(O(mFpbjwv2ZPg zmv&Mg{_U{S9iDEl@#)IZ@Gly|zM4TkDqO#qq^6@iqTv!hh=ffi!dh6S*Frshi(ii^ z({wV9Ni?nKnl3q&Vo_OE7?E+UfcNgDC{#Hs3e^0hop|!HAP&ZZ;(AHovl?@D;ncyP z1x((yVyywqjH&{9j>@&5!|$9Pt&}7sJeR1;@RyAhr@xzdke;F8YNeD5H@!sI#7+w6AKK1;4%(~{!YIrF*om;NA@@@4ki}rZYSoW= zVb+=Vs{Nk(?~(d-m1Do+SPS{-B4{s1?)ZhMIQ336?zd23-!B^`eQ%pfy7$ruQIVhX(|MOihvx9l1GwT9NF1ebp|(Yl3Qy9grVsiB9~LsZL+mn-y|}uv zEaco;oYd63yqS`gnpX5~ffoUy@qZgUj1IpFa*YvYZ{sKNPO-Qar3XV8kvsC^GLd1sSFM>40%z%;YRTBw(yQL0Q zrk=Nl9w^^isJGX{3j0L^Sv08TFbAqSYP5LG~1Ni7&i zlDaQznJa2R3BPYanqNbyCg)_5 zYVPktCoJzhJXeUKU5v0K+hM~!(~$wFIH&?E1>YdzG5gv}Qtb?S*j6FL(8SDMCDPv7 z$Vd~nmEtX*-<*JlFOW)&Q6GT?zSe9HI_p(EYZK<3QVT}UOfQR>v;E(H!J`bF`@ZZ# z)*ywC2F5R9PDI{DqoV-1b}Gg>De}izQ_R2D-5!#vHUQ#nyAr7^0tvXq2Si6@5Qe zzH6f^${q`_4D$@A@-C}~q;Ow;3v7DLCo0HMHU|owlE{PI?kjbv)f?Ib#5@Rb5^`P9 z=x;UhcFF1iZ}Sce$dNvJ#~Va!?s(HBy6d+lmXxxN>|r&W6CD=&`9!`vO?8!ldGg*% zZXKqXpT_4@fN;zJ<#-r7hO@v*X{Zt@VeR31#L@6;uH>hSQF^LVT9B!#-}APO}`S=y%hJ!i>DsP^dvuepvFDTQ5x zx+hhN%~zcJi;SlYUM({Yp$aS_UtYO^!rMgv(OcWUUbN~)K55>nn_+)6Q;VIm@ zh!%qDP<}l~>_+-DBH0T3;1~;Oz-~~pVO-K=NU>bR9{2f2Fa^9|Z!XK<0)(B2` zAu=X@nA3o}1e`Nw4@51+?G_@{h6At+YJcwVZiXRJn!HUtRO$MlwPLq9F|<6mcB!fj zUo3XAp(}s-N_3_a=WM{k%+O}lbFcGcDp9>NZl_r?oA+l2ouXYpaHlHofMX)&n!)RQUPH;(4(-Zi9OE4{L|8AmwCBu`RfjP9g$)MI>pm{ zM7683^XWs+K;T2er_fKAmTEjdeE%c<48HB4Dr*{1PzPc!_po9x^QSX**odcZGpx)P ztl9+r{7XBjkp#@%`B`{z7O*)+yjDDrecOjxT;Py-t-Z#7`FwI|A`=i25@!e^aG-BJ!!gR7R%rO*EAZ-DWa|CJ=C8pZ#7BbZP>_O!9q>)bs}Kb>6o-#?G;!cgKJQk zM@oGGQh}VyE%GnP*<6HvW;Xktk9f8$xiH_y{@+ZL$9k_e`m)3r}VnAs>d?VqhdE8UrB&>sD~qy_AW}7mY+x} zJu4wPNLlYLgj#TjGo@hxO$6dcVWq&633YJ6LMU#_B}|{b30?P@1u|dxc<)IgK2Hl> zTQNDXesa%km;5Nx&uD|%i~eLM-P3Bx<-drdCGdTWBLE85k(KqO(0a)EilO{ReFgb< z30njN=|6C#eAT5&;%5S9yKRxhbP)16FX7Ade-`Vb>p!2pDOUZzJ3nxBk5Xi7^4Kdr&6CuU82cEb zcNT&l5B@3X3i4zAkaqp!Uy>X|%_bGYjcZD15uf1Q4uc7)V@1dr z0=NLAN7ubY`hhi{fru3B#{HYYVt1Z)5ZFIE-5YxN9SZx0j^f8^Ja zl!xWHTf)fPwTG=_UYE{dgu{z2TgYPzGceY#OEJgTW0`c@+ZM!qm_FCM>d z;1preN}6k2;2A+EyoaNR8zI4oOQyoVdUGvT=jJp$Z5Tz2#%(uKv>V&%?6KOH+ZXTX zUx)iTT3L0q=pc^icd2byFAaTeNeO!``mf5-0{Pajx)u~ z=2XdBEja5Yu6&>)OAL15ECA4(8IXNu_OgjnUQndqNd71LGuej)a$)%mfjhY3S^DtR zarDv};$k{|VSlu%Ud*{YrK3BO7!H=IOP4XX`Hhc+07DrBm;2OWl~Xpocy5JM{x zi~1{K0D&$5AB5$__!sceL(#VS4m@a|DPPEF3Ip-4HIqho_R-7DajdC z@x&Ui97ns{_OG#%OItnkGX49D;HLJfAFRC8p-PafL1JE}^zJhOA`ihG*E%blX-(S- zd+IkkIJy5gU;p0RTcTU*a|~I^rm+_s>Z@LCHGKzm!e}Y61fqreHE~x9T3Ge#$@9y*$?Gv5HZ~*h z40H`66nGufm#t?y)j{$Yg|8ago|4W?mrc2cl)gxGOlAJW&?`MiAkwEkOkOBQPqq#d z?pb-3qr=ea_VXng0t_5>Zm5%CpW{ezy|Dze*MEdj{&#-&Kcf@XxBo$m8{iQ6Ps4cX zSdPB3*@usd)CY9WNC(-Gg*%UI@4|5w#CZYYEZ7S7uk$F(J9o-GaDWQhiR@|gA93sw zfHRk4o4*QAqJgI@RpFZQHaW2!OFb+#CH&r;JIq>>1yf5%MLPAAesi`)YfZqWt_pMN z;efL&_t57A+yZxhL_EkhcTcx3_EnUm^iLq>M%)8%Zw{R>A|3=-k5y9qqFcWOdLj13 z?q!K>C)nLp;dWK>%dvZZh;iou$wdG+;A~Ne){~ zOk#M>^d?x;<@1cKv;Q~OLm!=c!eWLW2_@?LTO9`q`kVNnq|vuG&>YEg3cy6&ekA_<5PkoY8|n2IFqI%Jg$B=`j8r#XIQlly-Q7#u(HTcA249RN?RINWyb z8^qk@`U$2*02${c4NG9fk1VG)#@+ZgH^@cO>j-mF5Ks8GK#B7B2Oa)OV4YX|76|rV z0KkL8kdB`da zuG@%f2U)I{L3qQw#~cCe`+BUXfOde&;OfeYnC*eGoX_bl{!;|*K)@N(noEZ>Dke$Q z?R|CfSs&kAU|E^2s0;k8e*t`24rXK+i6mUXAy{9Y5Ti91=*La>3(>K>^>uV$dGs%4 zl!mZ24lCq%56h`(_yZ0*g%H;>L}XnW z$d&wYiN^NBj=cbNDgdc#bvb2(~g$nWsaVbRD1{hye4V zdfAD$Pls_7tfMzQzXjHqf}Fw`bu_b?RE$7BiUGu?iO?vBL3$qzPUY9|vl`(!O|VMu z5s{N);5n!-2nB;N>EG*yD-X=K;jG=!txsDWFC00g3=YUngWo^J7AQBsAe1VE05zsb z$0c~vtoSeTHrE|_M>;N5>)g$hyzhK&Ce4)^42P2TROm2$*xxvJbL!}n6};>1Kf2Vy z;|Dc(sF!4s+oQ61jP@gkJ0D=%Kyh>tA{qv>rrCC!2gB(8o3#T+S;U$L%Y~~=(Di4! zulACC1U!zNeh@P0mp{2yWtYyGK|-ou1fIV6y~~B!%i7NERnPMm?zB%$^}7{At8v0e zA4ME;f>&DOT1chpJv>!dfwOsKyXNHlLs!2yo9eEe@l+IuH`*S2)ZDfWY;{HcLwGdn zkFDOs$)W2oz)DusI#fG5rl8=XzqXLAym#AI47Xm z3WoL}$yswr;}QJFb~hvr%}mxsVDe0p#Wv;U4xAczlMKbPdgbq3Qw}_lQ1$tW<{1>& z#bI$sEi`wb7{wF41krxL@9=ynbzmO^lPW;0YQl~On|4eXS+`t`_`?PIr<>$ch%6nx zAh1+|*k3n;Ou~T0*@@+#ctZXEc(E4%S=~>>)mnbj9;V-k(5Oj$^i|0xw#G_{a8;E| z`lrk0^m|U)a4k7EXAV2nQMRZdeICafM*PI;CKPBLm@DO_n1|<=zyUn`O$lP|Kznp;V3Ea&kp8T!xB-!S1tE{LT zed7|7@sIoF-O5vC?6T@4`1OgEsKIQNSMSeO|7S;d#CkMGQ<#^M49+mPs%gT%1v;Kv zEHj-D(-yU;qZtB{7p?SBKv7%wt}@N)Ks3ez9%hj zYt|Ojl9I(DCEPl)c=TL|-o*pco=UlaVbYe*cb$*klHkdd0}S~f_aHDX_h>|(Brp%G zY>1XZa>6dH(+AGzehL=kAhZDy_W5U|ugEO5GWWN@6Bf+5KHb-YJVc3u_LIQOEayr5 z%XQe1DoZWEe)$Dl{4JnGT_Q}DaQ99e*y~(QsB0B(TKUDIhpW+d6II^0h9@4h3apuP zoTqeDaFr_m4aq*E;(aGimU(}RYJ;tG*~i2TS2^|TU+Huc$vaJVckTEvQ^S2(VBz`@ zJfpMCnJipCq3qDbU2&%O0(aaHo#oQrj6=#jaScJO0@rf%1IIpKY>hJ0jXJ$j{qfQ& zdd^Kt=`#sz3ywF9*l$$52LZ8faY#&AOkfa8E2DdQdj3Yz#HU!g;hq7T*DsS)?C>tx zZ>8q3ynQ3p`tYY1+B9#I4a_QMfo_`7tjoB1^x8{`kDf`c@$O65ZA3Lxo$SDRUb@MS z2mI7APJnu?dDEV@5S4-W!_qV9pCDQq0-W-<7#awe{XN4>M{oND192| zziS;*Ox`^pED)>u1KS6HAZ9)P18ysa%O2#vp5y4-lgi(9Pf%1?Mbmb};I zsJr{xr!lJ)oAz8T*gvFVqebh;R6NX6&TwNFlF!Ugz?l7k-z)>>D5>7(Cwm zCb^*f*BWHfpe^$KxfS%PN5s?$LGd2YI)&|1R<@WnaFV4zvx2AfB>kLesUI#+UtIMTr&e=vCUa>og>{MN!Lx%4kf+C_2}tpEX|1poL{wFmBHNh6PrV!MnO10c2aC?Is}J=cMDyDPhRIbdw# zp&19P3^eH%1sK(>FHnrLf5$zcQeiMUi`t|}h^;aKFBz%B+eu6ZBi=L5MhaKX4W5g( zn(yKQw&ruja@~G`5r~f4^;{PL!|-4`^rK@rYo~X>{+)64iX!riCZ-j=!RYoPjpR}wiHYuYGhrY&qDCxP9TV*t7 zY9RSAJEo;ts^9Nb+v(VoKaa5XIL^yQdT%qd307HgBb#;Q($_mR+x50Qeqagezg{`N zG_oEHzCJ24F_(1%1mBT<%B}I^T)#tSz0J?9zK#$*5W1VQ!q`mEmA<>H|1m$p&?@byp$h2NmesQS_mko2UAdJaT4}r z30`VSia~3?@FLl95xxG&=O!sjQ=HQxkZ87%cMQmRqL$}S@xi^IHlufGcQm1$)U1h! zuPaKSmjN)>2x5Se49a2RgZ&Ka4~)ZuK$a)Fc^-KJc=FSAwC*bR-vZs@2PQmwzPMQ} zmVo7x_zKPOYi$suOb7>i9N*&2=2wuY;=0@TP)niz}u0ySbcg z`jF+--w%%9Z;gl1YMxw zGjQPDYOG*SI0jn9bEawb4zo{}b+p!-Zs|9LeX?8%7TNxWaU#95+R#kfVAX}=WceN% z<|S7F>3YT(7%!)gRiTp8zMp^kq3V9>D7>e5*r3g^sou65cc$$!mAzP-kraVFo4N%b z+xl+NEhp=mi|*G!skyOl^O_t#;-nr3;UQpq{(yq2TnV_%^c%bk1)={>P>`6kgqMK7 zEh8^9(#b?}65wwqK;IaSFffgE4X|D7(k_q!whCYa7Pi+1iNrksD8R%P+;Qz0t_i4E zxLF3Qzi=Ia7iGlL(qRB=WAKFnAO#p?fGEsptxKYR;@GgRo1RN+l9w^9Kp1_Rp-3eT z$ejQv|6iL!>bj{m9x z^?J}R!H{$DwPqOaxcEauXIk-%&Y+iOH!qrn>26W|NerbZkidfC>@?^iHGtSfatyr@ zrw|pM&RP}5Q|!L9gSV-DkGZzy)$o|2EXysbY-L3BHke=y7QUJgTD6ktG@0ek84#VDYC^>6cH+lY*Pv0k|ZW6b49X+6d_~IvSyuBsKiuALX&-;u`fx82r)B; z?3uBQlbJdDed@Ze`?>Dr^?RQ0>-&6Pult{dPv%@c%dxzV_i-F<)`ue6ty1hPiaPBK zmKTAY8hi)egt3{>NY(!|6V3PA&6d$q$OHgB#ecefVL2-9G{2b$ zhKdnZ1ST9cIAl&SLk`+<@QUj>~7uA&pdn|{nH;rqW{bHn_~N0j6uLtUU_?z zBxQ^J(@C2_)2jgK7+O&*R|C$U+E3>XOo`#InTl0%AB66#*@82i%dr z3@Q8^A~=U3eoOr9Ov(RpD+|Gbv3ao^oM9)#+~eM^zZ>bcU&ni1xDUSy|NfySq0(u% z0r^;1_{q^dG}L^qFuy$Z^}VC=Pbpx>v~qy>>2w9>yLMY0P&xKR%wERW4A?GfXMiaX z*7O1{F@huMfeJRg8c%SVnd80Y&hb^Wr?py$(+>LG^@SN0f*-0FA&1*aTYjIUuG#Pw zb1OaBbWr@@%05mIz7e|}at6dg42{15yJkWiPMxBK3a+nmatJp;nzUEx#)IBTw$nP8 z_u3e)XcJqiJIArI9FEMG_@vcpXHV%Hi&?^evN*=SIZ&J+Pf%~}VnKoq6(>@Sd~;h&u_5YPB$_m5-o7k3Xw>@J0nU?+g@fXnG+ zBC>zE+%L}(Ukm-svt)U}m(}{skmz*O-vFf7V8OTY(kbKR;LL{#N1&Ib2!FXa>MZ>S z-^#{6j`g^{A|U$WSNm7gmO#SjtAoGcj98R=yMxdsoeT2rWqV4WT|3*bYD4t1Lb_o6R#uQ(N}Tua5U(`%+QZom!-tyEB@f~P4=Q=KM=#f-laVUJ#@}# zqIZymxyL&7cNwJbD(&`>o)vU0KbdqZetvhIcV?6H{^a8W6;Vhr+KJG=rWRXdT&=bN z-;v61W3Jg?vt_7Bnm7KW)W5y_fHI63d*&GVVY25{ft2ZX`$@ZiDN_8>4&{i}iBHMZ z-8cG|y1LBdXkM_~^*LR4>{Q72*xbbNrl+Mt3$s0j3U(fSNlCxuH2qn#7geO1w%^ ztbWd)O6h<5^FO8PU*~@i^gPeUOAzKN3&Xl7t9RwgyOM{Uqv(>1k<#FWrsW4k2yqd? zf_fD@srkTzl1a(%e`Ps(JcvOOqI!*294@6{(8tG~{WL3%y#_@Fp8FhKdj(X%;rp}8 zEbPm<1ic3*gRN%~uBJV@4 z_wEA9gbpjS_=hRe$2}OR@Y~n=KW2bR4gJ{Y)Y?8jbG^5zZDaSkeVvR{?ta1OD{Vff zAo0BmdpE(q?Cc|q^D;`RwC=v1YtcMZN9tb!9H}&VGzgA)HQqiIMPKETSso|(5*jv< zyCG?7u~y{L7*`D&j9$F^Q!?yp;5EImv`E1O1iD`J2yluQO>YFz?XCX#I~Wv$(BA|e$(uev6oa!H`!#G5 z?_CP@^KPCY&`+MVeZ0UY;9cAE&@u+~^{`ReElbFI?xgAzknt~g`5xzxX&k2TOx3$j z22WQU4OaQ+?7=QCMT5@WViZtAUKO5T6(Ad4u)45$9pCrkOjtMVOIdFt(=YBai&V+I zWIt${YG5wqWTgIyYzC`v&f~VicWLgD-w$*2aV$$jLZ= z8#62R`lFVch`Y5Y@hf3QZLRk?7b{Xk?qSKf+LnoYS6i~qS#efo|SFl{`9S^2ab}vA1PJD=Pn$KA;yH;QD6q~yKr%0nX>p;VC zd7fHhExQXBDg7uKI>M*VH`cjP`*2mEfbFv(!)n%rpkA8oiy?F!aEt(#L1Nkm5T7d;u4FfV1QdI5uZobayk&?2l?S2E?X^Qzus;T8vzC> znYae*>1E81pp1E1pw$g!y<=?9#s8yl@h@x42z*Iq;aQak54N>%AB|m zWaG>nRCA4MlT{6c9eAMU4a$^(+;uO~@~MdFo4_ct$pvoXR!l9LY1Ar6>DL5R0f?L% z{@NRb>o};*cK+(d-Gc%RSzqOA14cr$26y-imU^cqEcwkW{@~lUg%<}+yM3lC1e+qh z!`dC$PllDO{Gt;hzCRJ#*VQ_H!mZ8F1wRA2RS&0@{!NhlDNz396EiH#6%RqoTj>xl zp&b~;6Cag$wJFuspXm$zG2+k&>1HouibD zH4X-{ukm6KRUE|3&vwIaW|B$Sw&9Xk{hq-gUBa18-E*1^2V zwU-K$$T8FptP){>1@4BVDB~4K8w8&Nc46Zkg3|#8do~u)H0CM_-+>l(!+t=H+fcZkgqaTy zkuU&Q9DrO$i3=NHqM!>F9B~(vtXqe_)k)rS=<1Ymbgzg!3l1M1WvDdT=w+7@ev3lV z!6AQ&dgm8{c0h-$6vo33k!`c$+!JJ}*(lvJlP?bIBp!>;QA~exp#jw*aE0}dx`wsQ zng=i{m75kxrk^WU(OYuAosEkyE0(kx*nPm5Za?}90*xo(RAcA6_UNryT@Km%Gjz;CEPVsI`@`9 zuB}yF=STu&4WoTPnZ2bOO-o5dcT?yo_@g*2Ch@MWaiD&tGaY>|sqM1r!#!P3T3rFUv zmSwqJC^Wm}U=}IS;IaC>q(is_`{nLCT3j+%#xOA^8a20P90jWYJ{`k%!8cC=?|0jI zG2EcG(u7AmUZ>9X_BJ<8N=`WZlJa08zYir3cF8bY0d@?B?FD{*<%MXP0fiMkJj)ZG z?E@}{z!Ly}2DySCK({#F#@oV(pa&RPL&H=9vvunNio1OP0JQB>643I(?H_!TB_LJ- z{1JKM7U*AJ>~pqSBvo=TyLq8PtzVFRJ<8~{4?_$33pVUal`CZ zq!V3W(THB{zvcuSfnO=krd!W(MxJtm;V)(A80~d9XgN8aH=#;-lIZ`Rk zvnyM;nm{)EjB*&SP*S0eCXl!;JoAi*+(FMd&c9OYFJLr$jrQ)pSY-gom0{NPovU%YV6WoF%rmaER!#BbGTtr26L zBNmiQFDXfkFvY{)r&9ZlP`GZuH~w<^eJdM)p24~P7&@v(3t*y^LM-Bm?vd^VL0oRk zg`Yt3;z4`!wYD9usX3z@uFSrDk|dM3uX zOb6(I&+N)#1ejp4<@gnm3XtOV?;2wOxw{^iPc8Ad-@y|PtMPUQQ{E8%dsz7xsz2qg zybx`q7hu+|kSH{5*#npcaTgFnh}KM4)}Y2x*hjtq)hR)m%4UNj|IiFh`@!cBR|2&B zjovwB@B}BJmRCP>X7h@{!kud1gFGMoh5!2R=s?#YQOd|u{}8067rmmA4Nn4u)dd=v z#sQV!kPJ;+s)8IS|L`YV4n#{JmNbP@K;3x>t^rHEVMT1HYJEYP%)wm+SB#9*4OJPH zcVqS{j}%NY(+D-6U(blOt)Sjs-@cx#9x)%IDy7@?;1#fLstF6`@~a+pU|0`1So{D8 zQJ_6h1IgCB$2d8bk#HzVSodha&EHo-dntVra<)!;1LTct?iXa8X9yL>Y@1mk)q%Q7 zbu>ynMaC5+?17h^vYB)D^S0ej(2zd!q2|!_!S(JeJBAvNfT2)cJ6E)%zT9u7oi05H z(*G-7W3hH-ZdZP*!Oh5@ZEQY6c?23G?OI z?erzbn4K#7nV0O7kf!kJ`$5l-W@dYDmqbv&*^L3M>D2n0>iUI4$?BkI5gke2rI{_n zB1bgGvzwEpOmriYX7^czG}#;CDIilE)c9A-Y@~{h%Yx!mhlOE7^7b|-FYkcdrmUJw z-A`Y)l~zcewcO!7qjg68VQ&;Stc-;MX6o?R44}>gQ{OFBOOvF?S%*$8C9NdP+&A8@ z?{vH})UZ`u|Ldv=8;C}_o$wiSG`d+w6aXpjf>o#M=&I412||_IW2UoSt+P%&buL)y zw}?w$ROSd70eSV>{-$|jRho(!tqM+~AA0M?gLLO$hnv@AtPLKNXB;)_w~EkskZ;j< z0+MA{RcUm!0P3AGBnA$R9@mk6=_cEg_e!l2%DpK5&@#M-_2ojP-KqI478;&}u4{LE zCMK3Le^Q-tV1_1WW~RxtWm-fQVK-snf|E&Sx-lkIPg|Ux(2H)Co|q~#Q1{?}es~!i z#&vsG;@oX`hAFct4D7ZGJEnw^->;C14h<@%MBUwCFe86M|Ei9qu|?e!3RVV*YPt}StZj0y&Pp4~G3CTsmEMZ79!<2|$dJBK{OR+xzKO2d6K@q1NRjEhw1jhm zr;N6_Kd|{0<74MifpW*!VDpJw(F%4591jAE>^}YeIlcQ6`}@+FvXuQfD&Gwr zu1<24evl>gxc1qRuXoHLXHx)pQ|T63>7Ek zLKC$D?1Sewbfs>7@T_WS)}4G0p3X*RMW#J-7&Usz-eUT6#3t%&LgAESvya!dpW=gQ zhJyEbh2w?bis5yjU+zMc>HKN;qNkItxn7056Ihj&IEMlGb?4Xb2(I$RevO4BAm)Yk zWiv`0fwSHp`oU-RU74dxve-!|$d0PZ{N8vW0Hr>>wnuJbg5#Lzo6i}u8V{;+JJ&(` z(9D~Z(9xt*@Nvd#tC8a5^Lg>SZ+5O*pb<^dWItu&PZWU^mhV5sUgITcXIyTveyUhI zddpD}^YYG!F7+(5>GZIaewS*lSU-Tkh~xfbS%)6!d+%-r9t8@WX415XHUOWgvZ@hH zsCS^u*_L|bvcBl2nUtz4g?E34xVP0o-ML{e?iRt}LP!KnOVzdrOTVwz6yj^g+tb8a z>&Ll(mniK=4>q~+^50e#=VymzSo%hB338F|yabaSn6uL#vAzr3{XjPQ$x7O=KMI|A zZzuSod|zjp_hili$P~ot(XcwghQf)=^F$e(W)C6haC`9KDvcKm-tOKrc~GY#u%|45 z3{dle73x4VK@zIz@2AuO3vt05XtJgum5y%B*5pf;s;SLT_2YpK{*};9L(zZrlIWV; zZS9CfD;u1yJjQ9DgM7p|1=vt3(*nJWRvS=2nqqZ+@R@gjlIg~vgx?rYlYJBPvW?7_ zD&#)Og9?E$3N5ccsCRmR1L5>}MU@_#;<@Nv-Ggc&MS9lf9{UUwK5Jid+W4!AKm!EDX6U0Q$pc6>9apd&_*)IPqBa2qLdd>>$+d{@(Hrs zU_@aY$DIKiH4j@>@(5-xK!O5sN0@xO-v)=@Sn)}Sv0W~c9O zs7T-rLLRR_@3CLeDc<mbfDf(y18zSyiLnbm4T~V;4H~_ZH z9$X4rEJuALFWmO?bZ;T+84;Mq4KBRRNkA+!n}k=*C7RpJz2}4zb?>JgRd_Czp?%|qi5%h3hj)eV65P&Z zi$20ye_4@hI;%_G!V4e1KMK7oT0^)F3fW8C?rlev5T_>?Al_iXz6AO#9hoZkS?a-V zRB4Ea3@f~9R1nTuKVeGJar6}4VG|L7c0=_wHT0j9V&7$?Y!0*w+Z@XtH|Fo&&X~Y< zrpgSzZd!Bp(W^&~is$mtZE-qvpA!#>$%(hNdQ|p(@5VfYYB?~N0)tdwbKM037}N;+ z=Mn1_@t)T|__R5lOr$n*j#9e#RJnyh58?^}Z@C>vj9)+vKyLnVK>ru^67uC7f>{D# zMlo!S_WJ|PN5p>`KHrJ(&X9xPJYjH3n{M9o;#(0cb7v9R(Y$k4tD1Q!zG;W|gVQDp)ueG_WTPQdun|?DFujHeDnr&w;M;O1R$QL4 z*YrrfO?h>Y=t~X3KJt%`%sA`sZ}q@Kp~Rf<$zM?T?3Uc4jV=9D3Mx=HL9P@bz1ToCz>*99 z)F`Xqn;iQs6eh?NeVvMIX8{q>d3ecq#+74)HpMMC*I-=~4PIEaBGE!YJR+ef| zthgIH93U-_wgLddk}Cj(f|VntUkvLIAE=miHAtO3IDX|o!%l27<__45^_%DLA5elC zNk>bn;0*fsHw(J2tUI6at7AFY-E5yxt9;5=XB*u88`FeF;mB>c>%1&o3vU*|f)`Kb zhw(yAd@jPa;FW7>U(gVB!o7BQgEfb)x@i~|7l>MVC%H>Hts;mL`E*0jy* z_Tbf3bw3Q0kD7IG2U#}SD|X+}dnoi&P)N430BNJbf>}RvGymo5E?IyQG=G`}q=F$* z$fzKdauWdn7Xb5q@ENspG$ER526DLHU)UI)=jg$4)ZuhWzHv1qqI$oUp6jz=EW)z? z*+Wmy&%^V4l4p$V6IR?$pWflv^OW(S^DvWFchNe|BJ(T0oC{Fm;As3utR&jOmEvh8 zo!MC_m)3XDBKx?{3ssGY9I|b03cra+cr`zN3}!By)yj@#QD%A>lE_ie+}8p&Umu;7 z7bs)UbiH|{fs3lqXm!ah@E_sd|0O-Iva|qP-3b3>NV?b|(GqtycyPD9sG;utq%(RF zi((5^BczKwClO3l0z0uu z5e}2&pKAqr4y_YWhmA!o{QB;y$zNtbE*=Q5&OGEJ1aAcLnPt7Q`(Ub8`c!oq$EYOf zD~cs>NO&{}S&Q36k5y-A*5$ehOF(rBNPBtH7YTE=oucZ?6!y(f_dBfcg|DTgDGT-* z@YbPRao#)1RlC{f{v%I^HbCnnuj!gR9}G3>yjp1f@$qkJ358dWoLOZbOyR$M{WkEG zR(F$dnoM^@V1tJ1CO-eOAIGmTmU35}{pu$dHB&u2cJ{P78qE^CdxM*{&O-cd`BK>h zPBOdJJ>`;1BeR;^QDCA2qS?@$X+bt-jRmB0CgR`*7hULrn3rKNEsC@e;0i_)zAZwT?mJBAJhVU>-&HYmPt_dd z`2pOjMC~oUN1x@8atR~*F;Z6rauW2dMHg()GJrPy1OTqsl_P{cpypTksL}T z6vTNk2Vds*RLX6MvFmTz$?NWk-TFqakXTz|q%Cq$VP+n<*B0!f$9s{jDd&;ZwWhE% zva#Phg1fz-F|n8;??)9>8GVAeGb=L}rZB#FUCtVEfPpbJ7z-2>Z2++xyR zM-8<+BECA0VLTxVXz6-f)R5`E6u}1) z1InEu=9@HtyS{9}MxYDAK3C07Pcf12pFE|g)a*;pvN)i;uDSu|CvnAhUP=QYxi?hFK2;y{k5BNRW%(?bR)B*+(01K@agV1INL6RmhfXwQf|gWbf{uxW>wB#+tUi+VUUzgcgJRbcq@-~v*_ ztxVyFEATAS_?X=*5gyqLD>4(J>&07)v1B1#cd=g-xLdQ?TCg!k32E=uS7fTiyoA%< z;(n`;-Nh1)VJLP?X{z4c)+~~EvN&>mbJ|*(v)*<6f>14I@jDG>B9ZwQi%oPEu_J+B z^&U7JC>nI@G41IZMU3Z0!85W_r3qg`=+P&tLpx|OjDo%A2-Pu|z9-+vE<7@spQ{a? z@iOjekFFd?2kDSq#axP59LVx@;jUw>txTOi8Q6G8d!RU3bT0a;gYDMyy`IDET#f-SNMqG?7_mhEh&(Vl9OZYA*(Gpr_B(fvnZ^Qi;%$&T$%@@Z?v*Si)R z{gNvvmgph1Z%GJW`+yp}C*8vLr{VHh^||zeE;?V_S#3F!>}*B!87&gu&v`h>CKh<6 zWqdqh;SAD`w$&Yyea)~c)sib&iOL2v8$tZCa{TErWG~>*#NHY$Hny`hj44)DH@575b1Zr{dVS^ zN|&T;fG2Z?${eW7V1!^Xhv<*Mh1*+dilB8CD|9xzdR)=M~iZp*;%3UJ`ag zZkzs<)TevA5+iy+eQbWN?$Yob2C{v&#)7;4@7tJ&I|F9Af@uY2y7JdH=3#lb_Z2{G zt#W2iCq141I1(H%_jo?2uAQt1bzXvj=XqyH5kSBmDCYcZ6gXUX0Wjac&eqCuf?pD{ zx{hc5fs%-Zx1yn`F)zNX3h-{t$j3y0)T$4-APT9+v~#X1c5jn9E6 zoqk0bm0ld$_b-PuUYw_SV|5yIv3DDt=4?W_ef3>)Z7)j z7Yi7!I2y4WeFJ^zq)?kZef&7bi24Di|JhZS18wpITUs!nRYxJZGvjlkt=!wEW zX*3sPLcKZY6+Y5?i%;hLfN89Mj#mLF(E5jR^ge?N`3sA=u#S!(T@Z=IH z@mTnf9AXaoTk&Fx(JMxZYdAjtO|Hsn>FB_~QrVt~?WfYuzKvOBV^y=(r>O|X03i@3 z4hrg2Wcnc+awu?H&_2VH6DP2uR+?#%xn{7H_rfhMAys3xPndnWbC&+uhY*HrEh;be zmbWr^X`3EjJ|}7GuvX+tT*5d}ge=0{4^z_LhOq4Fo;pYFXXreTG40VwH ze?K+;5d>EqyfJts?rL}s>*GghRE)jor%E%KVjOWs@cfudL9gS1_hVn87W)<18p{Dk zF>3CvOZ>gc9IV$23LRL%8*4A7e74bOo5NdkopLP zyV^V##&l?$&042bYUcdn{O$P{Hey*7rYeUgi_vjd_y~vL)O!c7@6TahWLX~h8l;5dKwc%>Rog2V@XC$-Zs@{k;;mB($HwcvWUVLP z41c3#m35ccU@|bmMjNx#gazQ3BLNZEiuxL~iJ^RNi`};;9e|0k(Ddy%_8U0M{CmeQ zluQHEfPXb?#(v*%`AW16z>2U!8{K$dcqrUPwtkDD7ltm(mGEafcr06S9%02jUniV^z(mkZ0l=89t4AW}f)FB3lcZystA z@K6KcA3!tz;iB%%Ks2B_JuXNoPjluaQ<&R+xfw1m35&BDvncLbbk8$C-zAhH(gGEM z-N)TV8I~pH;5(oKzdBG|7<-{Ku949|-OSj1N$=IJb6@w=E|V?wOv(Hi`0pU~8%=vo z6@adwgQc!4k#}J4mG=_zoS(xUCp2yo9+!QnsId;+*LsjYimS*g@t~LS9&&vO6n}qu z@$Dh)LpOx!7sqC7gM3NcGrWgRpD%v2qEk~V%Id>4){#p|Rdn168O?t-*Qf0H%&QdP zPAvhekF0a=N+aCcjHU|oPxk5_Wf2>t(#gBAeOBy%E__d;MZs6QArmFfu}vM*CxUL> zQI_=zPA#Upy=usuVQ-1*F8Ixof%skBhgZLFP61@(6HCZwQYhA?v`Oa+xS0vIc6o z_?H9x>ziaE?=kusNFv(EVN9X#5hVdM0f>$Y;h704vp8es%62>6VfkISCbI-pM^r;z z_`1M5k`tdQA$D+K2?drM9vldk-TsuL z+JjE_8VErIHZq;rSl*L`+31p|uB<%PyST2pewp&@lnZtl@ApcGZ48-zaDd+pvE!Nn zx^^g>oZZH_%uz(zFDS4>B+0F>4g6HzdyZ{AoSz<8bt?u_fHS33^xyGrXg1#2w{rIr#Xo%ulmoe%%#FAEH97U!=~IPeANJ{ss>vVyv&vWuKw zYERhoTx3D--n9F2D~nesPF~%+|N9x;8?H-A?{ABY76nZKK`m8T_S25D;VQb9s5%st z$KLGQI(a5-oUZ{7HRANbTNTVEdkY_SE}!Svi?%C&+wwYZd!r#-f@auyf1!ak{p&E zX%*KEg;OcO&V@VWe*h}{P4hc_^m5vjk<-;7-an>(x!qVTKWaN+HS?pwbn25AT@ zpNZtpD%nG!9cF@*Si(+GjURXo7T&m$SOgT2Ur#}{`RUEVS94C%GQ{}{I+jCDu-3;8 z=gJP$<7La@zQ12N{B+~X-eKb4paq!rymLrTzu>#PJ9oAg$^cC$+jclDBQtB2(iVrVet2~+w-6}76 z!E6tCgEf6K6fvG5whZiffdNFQ4(LZR`~<`soqj6Mot^)F_!B7Ibkh!jNTt|GU+nS< znX#aqH7S*v^72b-w%dG@yZg0p6Q_jWm%C#gn9%r?=a+!~;YvqS&;KmC8wSpYa z)sv$)aiyh_SSF0weuGBYBVC;dL^BqDq_0l0WbxeE_$GG|C-z{JH}hNW;@jMDLGZvA zvcJnPo9JG#cz__(Y^AuNqlY$jjaB5aB4WR>jy!l15!4p`WdG%n*xUC{Y%QH!=ZoIR zy-6oVqBd(_CE*j_by@|dw64URYL@#R6Ec3B76&Sp(E*pxk1L{Ct-m23cBN6GkR4sR zdYKoZl|8hFK8~uSl~|UvZX9v+)M~oTbpU||6nuD~Y3LAzMGDo|az|}W|Gd{dIvLIH zt{wiwVg6|Afja}IA|Li0A-M6jzp_tJJR1#LQv^{J*Tp4NW!R5K%~H2oUeGz*nm17@ zIs8y;p1@T?!$+EsxPf6bBBn{@h8X`Esr$>wMQYz!#Bl+FV_@vcS^xu6npbpW3m=o7 zQRu)+uPH_^+@gvA4W0#9%p+3hKL+J8XFOQIh!RMiMUJO&l&~Mrf4zXJah#~@p5Fmf z_wWwiTQqw+>^STNis#(omFYS%hXzSL6tM}%x)sCPO&No2VHJ`mjp1NoQXChP)Q6J; zr~HcHd#KiebK0L#zulHM6P_{bVPxi-#Ceg_B<4+8 zM1$~Vf3G@i4SVa1u$0_SjnQ9;Uq>G=U6oH={2W>nD0%Y;4kHDJ?3fqav`1-2{ORxC zIm#+YPnK&qTX06qrg{=+qoGZTP~C3elrb=|Wa+&1_)VAKeRA=)vz65hu$bNks;n=V zPh&<>eXhmuw-grX3>@%w>W1XvKHRo9liC`X8>#sw0t1zHgd1H=uDQelGx|?OsE3LOxKv8Tij*v$NQd%gA%!PY|cRqffgY z;%gbE9JHeYqf`2RhyuF?PJ#Uke%=4uB1#A>Y{eG*)M*f!*6^njT6s@0-KDCQOWT>( zq8dyFyJiKjtgYivhYqKAnSbT(uS3ZLHde50AyyPk%f*Tzs{V#Ovvot8nR>)$;uFwmmjj*nh9=azFVCkKilDD+8MN4gM%Euh5cB0OxeQMj37+J-P$!7m$7NorkGQ za+W=5;at93Sf(X0Ms^MX^`fso8nwMB+}!nyb(Q?(oj`U=mBuC--Vz< zP@ZOu+G^ZB-m^*teS)KCd2vNX|Ktie^H8Lk+AN7-`|W8!vg-85u`JqkWj2Og2G7&m z2e8NnbCx<|td_FS3Q2MIe}4N(0&$KS&3R_AW>x3#Bi(B{VxY-bAJHnW2f8{NQU2=5 z?zXi9xykS(5YKlx$nOLAwtbvin}ziac6ecYcd;5nu#E z4N1g;riR&m2HS;cAPg^yH=UIrLg!teLbK2aFIIz$b%hR zm+>dGAr z^0b`zSyW?|&CjeN0Cnz4Ep(3Q^o4eC%?`oQ_C21DO#E)%43yLF+BIS{? z!DPTg3%+t}#d&%u2fjMLBoqVCRo8NE|kFX2(t*;9^AdQq#<3AlOA-u^zM4Jf7al+~^jfnDaPQ*a%9 z&|Y}UM9-VuMWyAR*kH=TR8BLr*Q`;odC!@(eeY8*Y+9#m!W=?<=W;PvR1 zIQ}@Ty7({KX_#j3HYG2`?0tKDTfM6A5naq6ll`BbMt4%@(3WUg*&K;`z_v?0g2GA) zo$0t;;C6F@Jo&3m2wT?vLOnAyqQjlmic))H&JU}(-WYks1!%U0|eKq1}<1JUoGc@J6$ zc0;Uz3~fOzA5x1!v2_grt9T1$(UNlu1k3K@t^h`wbUk|I_+bc`(#$Ia?i2AwVUFx> zP_AMFz+tWag2S#E!vgymF;^QV@S?P-NPH`{nZ%U@s(o)Vs|0BrK=I_nc)s}F|CZ?t zs%d=sOErzv|A#*d*HdBDl4*df6qZbfpzAFJ`j-&}`1&Weo|^Ucc3Yp@pG4CH_Q}NBwR*Ma|;%lk=k`@P}mQWSw(}b02!e@U|%a3zzTup0)jjnt#Swd)E^m@VT>9h|5 zP0^rW@?o52r~ZDNcBLI}8@4ct{m}NsL%e9RJo5f2Oz?_X&`H*@mFL{2D$c9yMK8Xm%R#VEP>A6n z;dx)6_>cYIbJz1@JMl{O<)9iZyjoHhs?ltJkBuLCUx6|HoV>iyZC^37Gya&F zeH<;EJTDh7;7;MgYfePJf&J>YNH`O|S6(}6;^3Jje{GH2=Fl_z^TOao49@*5qQ34H z46!sDrd2f1(y#zlc%t_K?j8AsyHA&dpU)=CyOu-c)yF5m;c>wR6AS%7nD{BwXNCyK zFtg9{vH;-J%5T%D>qsv=1ZNwCtM2AFy*c#Z(}_yK=vysozOAPm)|eH9YRbhGTDw6n zju^7gt0Dz(JTl_DyuaiU%DuCJ79g4LJNM>ZD?i=*ast0OlWvV&o9$Wm$ax& z;_|TGD=0&}KU7-+7XCT_D`&}jA-XM)zCX>te>YfvG#BJ}_I^eE&2{Pk5iBnWmsA_6lx zcb*r)v4oe5i$E>&TZ#4x-CH*#;rHG4Aw5IW&qDv|=KPzn{0wdlC>xwC2<&YDBUdLaql_6* z%LD8jR&f{&x|1qb*^_y`|9jhvN?Lon#(eC(dpZv+_|Bo3S!mjT+KP@>Bnm!91y*~Y zb2SzY-$ujK>!F8J#v0vcQSREh!S4L5#4IS=&_i*2Vq;01UR|D()|+sfF7xJZjc|kM zD_Ng$`h|z)$}P4*%~c$0oE|93zr-|PAA%EDd)Id@v-DwApc>UkjL9sk-I(>HcFJ1cON#nltVi? zdFexRQwH9Ys}D{aE$X`eyhS(VDp8nwg_p~9ynM(aeY|8ABpqJ!Q^K3b2R`=5wLHhz zKYC|@Gz4fw>@7bluews=E}&+TfI!oVs{0KRC`q9a!^!*br2Ttmu|`` zXt>Zs9F~DfxW6r8)IaEFm#T5sQ;a~x@pCK3{$d;aLldk2+wuR9wqV`=IZ!8<(KRqQ zZXwOk?=AXj$onC2Ua796^S4Q=kSDspH{7uGNIZ^OaTPrnev`oQz^0*@*OF!yQ8Pf& zukB>cMsUsjRdun%H@Y2 zUQ;bp+dh#bb!p_vYFtI=zOtL0_v!c9zYDtoP6QT~Yigv*fb+5mO)DYq;>99*zxC)# zu#TkT6y6j(c(>hE{ngOh+b{O>=^Zgk?OuhXKeCdG-xKR}FWiPNSX;#6?^XOawOJT; z7QH67=ky!+Ng$GG7*xDiqX`qD%eR$#3A%0yn!b$Y?74kdbk#Qs^Oe3Z)RC12(bl4V zcH{boUvFFgzxtDaq(&irnjdKJ*dGz=#ohPv;kCzMLNO@a%rI;j)~uJ2IWxh6kgZ-4 zb<8f~q$9V{ZQdnmrw1vt-BpOxds)Z0u=Q09IaT7z)Ba`1)prs{t+CrX7XkgMY)P$} zr%WR@3P&8#R}ZPZu-ja*&s*wz-Mvw4C4WoTr>dKXXj9GXikOBM_h;*Xd!$zM7xm#@ z?(S6WB>K1Py#spgywkT;Y8QZC7$L?{v94b>X5Z5Haj0s_2|`&U*3zfjr@km!Lt8sn zNb|bA|8%*ufBKzd`jYI=Za)m<;z4F05@s$9qGv$x4FdwrKaD7Vdi`AZfBc6>up!tD zqNBKdX@razuy{Tz#-X}<=xplqiCc-rJwt1OqCLL4TnSW@`=br~|Nkf1o|mP+545V+ zp-V%Ou)wE}-#^RG&93|4`RxE#PAKk~bI79D@)(*cD$GcyPSQTT{K0o)@CRScD>6}u zzW>(c;|>S7l@(qC@0;4c>6(R9e`br#Lh|^YE?cqgw3y%n#zlA(hy>llcdUd?xI61T ze{e#~M(1}Eh3ArcDVGotJMM{y>cpEk%W}IEH(Ef!I>Xq@gE7Woe7H0cInW{hqrdaE zM5`Fqp}2DSR7zrVW%j)T%b}y2be%G79$=rq2U%7${u!Wh*5<)k!ys1#M2?+9Jn_o- zHsY4=3f;I%Opf_+)hzndz>DbYHl?pDZ+ES4fd`v)GiA_w@IpvCRIn8D)-wm_jVGvL z&2a86`o6IT=YhAvJ8BO~2|O^`&+f>Xx4R_oR)1Ie#lV-;BHl+#GZvg$8WB_?0{!Up zUPB%rl)SNW-|&oq1m|i-hFz;K!2<0JfOZ1ry~8`7g9q5Yv<%rc*(#i#Qs+pvXzvs5 z#07Bfbtqn(k*&7)&sf24RvoW`KcLOZt3dCk0iZ(eL7kmAfTq_50OhPo!KhJacTr4Z zJtM2bP1Pu&)zH}E)Pu|N&#i^wh+bqv7ihsDSoec(X|VSPABv>Iy9jjixD@J!7Vu4( z==pDG?`44c|8Z%6g#P8)1o#$}D{73yMbnAMwszpr`mQAZ;M=kSo)AQZ@A^XsryQh4 zFF^JB6s8ezWv4w@RPuKU%tK9n!vHI+gL5Bpq(NgJ@Qk^iSmP5k^Ni4W$a#`l6mds` zQHEfZD%FDXa}CvMiYk}Z2Pg_&~)^v2EE z0fLiKjhP2lWj+qC=Htt2q$nv?f47kQpFP+u`k}9?AOLBTAwkX0>x!s3^8#{QhdPOO z#R0@8_`sKOP!O(Y_8$5jrYS|4{x)_vdZI{YR$%JL{mF9ks?nOk{fUNqqs)A7EyrAZ ztq^yBKMAUPP4Ti=(N+up8K!{{Gg9yDznS_pWEYT0!2n_ET+IYU@t1r-lG{|k<%yQT z29$Z&Fm6D|R724rQ$ppz!~6yGPKe%(EqU>SFZm@5%&=g7;D?m+bGJsb7i7?GzwWi` z|Lk6q_-x~4%TA=snYbW{=58e3mjk&soZFj;MdEc3cMBS1t#^W*kj$%4Mc=&1Ifv7| zE4+@*Tz0lZn%%3)7TU#Dc>l9mMp>tEU!!~X&m$p95bZaxGI9PsQy~ketHG;kzKFDNmsuE312)a|)@W z?f`QarrjGe#&N#4V8`)4d`b9BRijXU!?z~+Z4}lct(zZ}r~l_uUm|4xO4co7}1}?08o|51GB#fKljoC(42_s+0>Xf{0sCjS*#h90ARObwMy7 z=@djup)xJQP?|F)%x35-$Y$auVPB(I!X&OpCUQ?4!1S{fBm{j7*egL4TC4%$A|65c zHDtWf@)YX$jm$)m93FT}tgE z4m(dba3r}Fto1KAT5y^buMl!0h!3cyjQQgS*C%D#Wi4M;uOoL^6kwLq_uAZQGUYXN zfTkW0^OS|q=@KdDX&!y=l?isTO&Rw6)$5QFPYkwZVrWa%6d{EWeE}GAiWwKb{Yc+9 z`^=Z>gH2z=#!e*JcLvEi%vi=SGoS8v=X)*Zy1wVW&bjXU`@0|4-l;cXt}s@x zGA~9FdlQ1rTd5B+c6`<=co09I{YNE&+wc;tzw!fq@;d#P;7#AUAaGU>q=R67POO9h z&Xw92*or$r6tn0FLfWol$bM@Q2ITA#DMnDExEBxN_YE-Hir66{epvV|Rd7&dV=RFsAfPM<{HSBSsnzKiSa z;9-FqiG2nWuB^28qoS-cDp6<1euP4blGmLLhiW8Q8fU2&~IYM||^Q>|cO(cV=N~P2ogbssfEK>*SM3rUIr+=cwux{@!=-kp`)4 zH}4a5ACSlEsDwDA(OJX9Ro<*#?-X|Fkhb39?cW3yjas0fioiZZk`egfka{Hf5`0k8INjaxXrEJ$;*xwnM<(sf z$Y!+@>yyMZ&JQOzCodRwv9Vw>8U;4N1lsWeTq`nE5Bxh7-B>LIT4xS)+Rb-0$%h)e zk#Z?!I*y)9-KRx5vF{9G?9sCgeAn+N+NShYicWBal`WK8y_{Q5-4O>6$Ymed!OL%+0P4_Q3`Jnh@&bD@I9dPkY5xE7@3TT&AQX2k z9`3=97_pVB#U7h|n=Z5VCVuU-r*x<>T|Y39=vuSzWPbxqUq3a>a3b^@N(d8hom-gZ zSdBZkWrUz1$QG6b5#|Nt(kIfYuUn$zuhd==hSehPVqYQgmN_JrWX~3FlEir4` zbCd8Ajf3iN;IyAmrTIf_z0#v$X-*b031wato4)w!4o#zZ8&k3K*45SU)NvYjbCZkM z`2w4Gdu{X-O%FNqah->86nk%cP1bFkwhbJ>yhi`@*fHK9bQ?9||NL+Y=)iVC{C3JA zpa)YpR>RJ6ORecU^Gxv zym;^pXc0QuNmxs!5_0E%k3q%41caBnV83jY0tsCu`d&3S^YA(5NqieXN|IQ{YdF;h zpiow@2Y3B~@7eR;q?`ZM7Wt$1KPrL1f}e3wiPwQhH+{XE_2g+L38@i4m6xRvYJJp(@$NC|j(DMbRCiT$a_c&8RsdvLu z`-6t8UvK+l_+)I?y1)(u&P2gFXVWSH*N8&b+L~rPQ3?l>Qcpa?7WVLB$MUt6#JvvA zU2ZkfcWzHS^Tp4K5L)jMmmmKW;HoWzk?rip*#bwGWN3cb_IWQ??@Q_$)dygoEB5$s zCTxzJ{A8YgFAgw!h~q+ms7api;&CyW`ig?SaWCn0Me)Zw_nPHZvVU=ry1PqlTi5R? zF#tZ+w(sMDI<9_1o`@mrBVT%u;?nDfd6gFr`n%iRC$e@CQn7rq)#@775>ga{`y&e8 z2ZIhJA3o{OkpqjzoEM$Kr4uY+r zlPy)l;awq~Q`y^b8-4fTj=`<&u@xfpy1FI($*q1AaINotXg zMXy&;uoE|8C=oRpsk?epT32h_t_^=^lx6m@M!#$p7PI!LrHh?8>TD72fPeE=DKAtXxIa5Z_n8 zW$qfm56&G191YSz@Wug)wjGPew>wvQYv`P0Wt>=5+P6DU7p`JB4w?4Lh)MjFW9a%_ zFWip5!`UdjEcc)5HgKL)wZC_)X`-t1P?n9ZWVz8>!qjwrIqnd;ZoFuN`aMJM7_E?V zaIkt*OGC_9Wj${8C1GgGdcpITSuKh9iI*b%^;=?cUaqbpjr^!iD?>Jvb%X0**F2L28J7&yeaSgtVC-1O1-3aE4i{HB%8y?_3feSQ1qd;kB?;PbE3Kaod315ltM zF~lyzMl$A{mNJ{oS6HwO*sAoFRer$(L6yyW+trNU5S zHM(@EitAlX{CP%&@gqWb8MVzm0$a&S9FKJ5=z+vTPj{#F7MorjC3(p&3HtgpOaWfT zK-AuoQakpntlAd}wc1x0qXYr-r7jdM!WXS~FE@e}W z5P5McG;@{s)|1)HC?pMrbvQr+cmNboKLtT1lt^WCjld!*k~Vxzk)b=;CRp<9woY3R zdQF3uuj}$J9Fh`A_7W;`bLZs7^;kfoDUK$CC8<&XSrgPG?iSIoKvN_;bev|LJo?Sn z_2Idl>MMgM+M|oEYlmMNskt>>lqL%D!kJ?dl${Zi?K|5VC4?(LuwvG8CHi~c_nkca z?^{@<(v{fG0+MMXMh8yz#xQu(0W7|YgFa)(1?YX3U(r61SB7lXG%g5~m>gSe%Q~@S zmsr-pq109r^N?{GAZTkiv3LXG>mdOo9fv7Bw3eXZZ(;073?ZF)7MO85V2)3));xpn zy6BkVF_EV{%U}KoepDY}mWFyxdMZt<1d7&D12i;jYa2Se`1D!?vsb9hkt0MGCqUMex zldiKccAREPftyk;bl-p)rFPvodB3z+hvW9 zy4f=`e)jhDsiw-8TA$84xBKn9aV`mH`y@}>9}jIR(eisY`(bbx(=dG3ZRv{1Lo;&& zPJ(_{=IcmMW4ipW;LHE?YyT+%;C=sDsO)EHXR{H^)mRKM_9oc0K#=b|hdvmp^xg(7 z8+!Z!iYfLQT7g=)`uE%W7_){-;a&)^&fr^+G_`bS&$4>OYryr{QOr#DRoK0 zBC52$&2#_xAxt~trcb95W3}~8IDV;%(B2GjJA(yeL!(2xne>b9aIzI)pzuVoDf^|~ z$?6`n7ZsQSziH2fBNvnpe3a6prq) zQyO}6_gqY@|4hOl7QTg!X*|mQnkUOY*P4!PY|W3Mn;L|2(Pf}MEo`IqDw#eh(p28y zo$z4#W45ojO?H?r3U2m3nJfC-G8{EiD%Vk@eIugta;csanVRKG z)$(LkHFPp(C*y4r=eM=wiKeuf9Ons;J;THDaSz<~#y+$5-RBe>bUD;69ol%Lb0YI0 z=q>TQ0g}g7-&=DC%#j#eU9l(r9bT1sBG#eGU->%PQqx#&_@w!h#UzhvX=Zf_YEz zF2(V|t>&@snD26ndp4!0k`9C->dR%d=5>7Dg5p8YLYykY@L{y89KTNBfrOw_fXW&b zoCee|xBcNi0-I@cn=y1ZT9Woiux@#~(Xq~wT|1sWW;>R|e{(YER*glEi}j9cRp-;? zx7IOEBq=sjnSwy#`nRCLRHt<R8*+N{j=vTa=lJ zrd)F3WK$w{!7G&VbH%)CV!VUTqOHD8#3Zs^-mYwb**9K{TpXucflZMI?jfoajbv4t zA&?!`zH4)x>M5yeuMls~n&0)ofhH!Q;rtnkI=Y)V*uRZpG~rdh{KRov4`3fJ=l81F-KesRZOO_MCDQIlgsq; z3Z^oqi+%kLxS66kuVj`=zSFqMmot-6cY? zAwiw#u}Aq;w3KUqcG=%FI&b|ImWpfJV2FZ@9b0!NhL{1{br5u~x|te+{>)VFqKm3} zQqb7@$9t3{9ob)0a)djwI`y!5<}wYeH&ZdSfdpbSPKZE^ffq;Pe!_KdCf%wUdrUmj zYjFVxTiyum;PCL#7?Ii`D*Q<{<<3c=+5@!?KUn%f4QLADup;L|dpgMid=?>|Jd^zo z4m{B@c_nuRPJY~W(#&?qVmOX3^a(@023G#;4^(o|z`^A-fRvHRJ1}cASjHpdI|)G{ z@|PF73QoVO2b)6US(qjTZn%jhjK)M4(QbN@*Keq;6Z8xdl4}$XqS` zumxDl5ZD7I5qW=I)c>FS_xBQ85I%PkL_+sI!Ke3$crl-GiT+?Q#=Ehu)1h4;{5^tY zz91}~{0dr@ljPR76O_Q*j)bp(62M>^)WF39-W*G~@F#2W|5dMN{n7s)2dr4|GPD`8 z(FBepMZVVn-(G!e&KspCOfh(tAV`gkiikYbgYkPd5fdR>!`p(t{Q5S$)ua)EG2epW z=;NWusRcuK75#7#$rW+C>7`qs7?y!{_L{KEMUQr!>US0cPN-gsAL*u{2bk|6R@yCU z6iwi*Q7#SD(D&$;r@ZSknPr0m^M!_k))sd&oCQ`~83F;&?szf=+Vhew>w#gZmXh|j z?fno!;6`byDyC$!)xFp3ct+!p^>rb|+hexF$!CF}a6gF+6o2T@DEa_1oN$8!l?#tZ z+Sfz?nV$1^;Xdu*d2VilPW)@Opp>GQz>Sd+{xTj&7 z0(38h#>`lm)VY1Jya9XDb#DVQ93zgIjpla@d=1r8U(iI4^!bKe67qt&uhw1A#fDSP(%Y<;@g<%uTEgCExLz23sw9ofXU zP9BGL)h8fF-Tk|}m*ARd$@0nhim8$kjLde4F2_=+R69rE6`4RPp?Yz=+P%NnidS=w z=)v1t+fn&sBL=RdnR)*PW{uA=j4p|((@<(&&<3fYexJtRisb$;>?-!}YsQ*mn$NH2 z5gsR|O3~Qpmq!?<%M09{H1dL2uD>Z9+7l~Z3^?;u+Ca#{aPLhG{Uq+T>HMR$-jdL@kyOu?oYUZdbV}<)PHY1fq zy$)I4ie8@}6qhx5qNd&yWYNk^B~9hdKalc5RW9$_>(lJt%T5-vu15Ec@6tI>l@O&> zq}hnl&%RSguZ%j(Of`=ZWTovQ_A9dPMsRSJJ~(mc5J|qc%htx9Fz@SpOtk?4vtvhF zf9hUbUoD#rZJ!EbC-#S{nZg}urcH5owRxFX*Xq zodkC;js?g4%ojRxs7Z0X?(@~jE5>ii3d2u{x%4>suWY7xM-QEDJK7OLF>og-M(}kK2=g497$15DmmXF{`eEc+&=dEg-%6l!K_UN774gE zZRF?IJGfwIoqXG+WQMleDRP!sBJ~b4Kc8V0T(NjP-*N8F^yxF+BFx=l*1f=1>2Pyx zuHSl9bPiO5<0edq(LBAULI$WKD04=5bm(&$?2ZoLrVwAXW5d-^3&FV754l&TtRE>4 z1S8oFzXF<93oQ%j_v3Z$Rt4k4&wPVI(Cn|FMw2osx~pmoIRpCr+F%8-BqpvHbL$`z zH6Ki?q(g?6ag?@IJ^W$hB0szy)&ipJ6~y^}8Ee1u+8cP!kevV6dlI(SULi03weX?% za#>*A=)pjMqw|iv$=f%N$X;fv>HB0;ax)HP3eSn@zX{ntGhmI6|0VEqlc|%FZ&}Ep zRVI(>u`kLWgt8vS;|1{VJQT#MFv z^MOQJ(c`hUOO+K84s=#Ny0-F6Krc{bC{#H*ekS>HFq@qw7fP^x z%B`_)rp*9er-l3K4=+k2J;@7fYn+Gto8wg`KcRmB%K=MFN92>Kqjd?pbuL3)zm% zcYQyeApF|L_yi@xIwBbRW`OMsv17fb3$%B-qB2dVF+fj*b;3O z+C2kKVmvUhQVF~ZAH-0%%6b&lsuYp$-{ku7fes+|^EjnM-zbfC5dgvRPBAIBbwbrq zc5KJ*f2Pt^XPFrMs$RXy#CdMNhy?d~ZG1GS?a@N=+d%&-WW@}mE!U)S($a`k784x} zrYgg>UrQ=%+=ooZO@{=_r z*O&C~a{-j4q~ahKxOxg2KAl;7Pd18i7I$>B3B}Y76`1nc1vF#wu+chfId&)!z07^> zs33RAHqL^w)9)%pK7oQ&*~Cm!M?`RQ9rCf3Za!gxu-CRKAuVg{K&&wF>6Kib9^Shd z+m%n^b4-u>qHT_7mI=$Lq0KP0!f;{{upT4k;kL&)d>s;Joix11={||%EKZ8utrbMw z{BB~yGvYNt&$}x}eL!eSf%YVUgkU-|pFkW(3}u~X0%GPB2joRK*HrbY8q-|9P4Q`0 zsxQ;PsO?bYy>He|g{Nxk?;wY)8N5h{;~p^5-P28ed0pew1TJz*p!{V>Jwj%ziGS24 z?sLK-hIo;$gm6D87CrNu_47B4Cs1Mi`S&*3Iz1@#2aD($$Y##{G~dSdFQD%pOS9qG za)Lp3_VZXk7e{t5`S7S{U>MSVj~&2eJ3cHv7W6;z^1F#gE26&n~MiUJid zO##!XT6*j2pJE6983ys6c+Y=GAHvuFytwM;uYjMyKpcQ>xx6GZ7BWqXBF1Q zqf+;l`geMr-E~|@@VmNdSvqX}e4ydlju__Nizi_->>!jOVQRfXz3A)=OE21?eg?k^ z>ZMfuO%JX&ZX4y#wWFEq}99y_o?{KlmanktulKD?=f0F3)zzwzcc_oK{o# zd!4&N+^0y$RyacpFjaSK?}mFRu{-{3KP^m162~`j?Hu-FM#=6CyxYgS0h&bEVPp!}pKh=-#CURPN|(C{#AF!xLY|?WXv$H;q=_($3G%D?vVi@!}#~~uG4NCVb9E+A5tT`#kh5uOVvHa1*GmE&K4w^-MaKJr=hVw+&A^+ily7#Z<>LO`aXwUuunnjjU z-YjIbF^vubsU-bj7^hn1I@nbPQAPJNW?oE8D~u2feJK2vDsMX@3on*^3sGA1t-!dF zct(+%{oU)Uh{f;}@Q`)vFg!0z!4VCZsZyLqwCT;4A3>SUDGg+v&;CTh9cO+Q3BM_l z_i$*jVvP-Cv1XmXMq+p>UGZZy=eqKDf*Z*`4_+SBzI(5kQ_tdFZS7?7ec@J>XHtIR zYr~AF=}WvGD()JrY-&8d4>8Gj7lVWbR*IV#`}Jm+$L3agY^%t@*0v zTw_p2EoPu&BCBqSyCCmXzn^dIYCeC^#WJQUHcR}`r?tkAK|lG|n#O%iW@h8c4QjY2p^v^Z|cuFXX)OaZQ@(%uWS3< zRy9YY=&n!nm*HTJ^D$l0Drf!QM#astg^E)`sNcTo2v42}6(?_)c%rGJQ9f-S-pMZX zTC$i-iA`-UWXRD?=P|`f^*n+!+YZANvp%P;d``q}_k&k2MBNxt*LbCiIY*VM+l(lP zmdw<%y)iWRI@ob|U}d^N6Xr2%@$x(VwOPLjYN%1^A+9Xa4z)`2A}&4f>nr`TkTv4$ zS>Es2oJ!P+EG#7?G zU}-Y0k(Dx@hs!273ixeHK1I9PycfCrbZ;wslt}d7vQCD&wf4tMz=tYKKG1wx$~IJn z>_72~Z3bLB@9TK~wgonH17G~WBGE|HLhgZhlkZf!NMdk_tAAi1r_{N;hX)nYor|Y~)xUm(oug|5kEG+oFJ));8 z%dc+il3aBPfHw&G2*^}(>*iL|_~b=apdh1=RR8fJTs?9q zpw1TAsI+&;(M@D5ZF|Y^$sLBq{GNwmdZNT!&ay_`a{NS&pgICd^tD9~NTM`$*_@-; zVf1|vzeL5!ZPt4Anv>O!Shn3ULGnH9@Qy!PspC;%lHv8*L29;Cuk?Xwc-C>7ZnLaa zJABCX=RmA-DmD8rAmE=t!5=-|wvt5xN%q61C~;7O*Iob|cJwkplBPBmzVZuF2)-Ej z7?70@;OEo-jK%=~;#t1|;_P1tfR^?zWoPBK=KmCd`Xp>P^N(c8|DD&-SHV4=h7%KT z+u&YE?&_@EhH42)<2yU~&{HU(RlSQV%|0)JIz#peW5T0PF|}U5Z*a=7elutFWb(^m zg-Qcau36w-_JrnQ|Md6DQusYAYp5+q3xFf3mJgNsdnb`|f780QcZrT`)w)hUmqMZk(R1$+>m1&GB z=ecR7%?)&#*bURhf(uQarQX>}XgxJaX#DP|Ixn4qoK<91vJM0AnEvb&Zn^2~XZ*YR`bvJ~<%=cH1|1yi7Vl zz9T#E>LvL6_k|f7b$|9NhTYSxJe+`JL16IJLmaEb@rU2rCS*vt-)I4jn@$l87x9Ts zKBMeIf{??=h7%pX!G<~8T{}S#4fH1En-5>=I&R_<^=^Mi$m5{I9lWjOD!7{lVchJ> zX^*o=N6tYG)-8-x-H8!20y$5c53RxtqQLQeX+DNAhN;~HeuN3aJpfKDu$(rb+_VXW ztZ=mYz{wq@hbk*f-JM}-RBh~d%?S0|l4I^iL!-4?4qPPyZ5wWnV9ooH%n(h5B38XaFSw{G1q!7u#??E05A{srpAC+M|ULItb=wWA)6Z9 zo|2z5f!{ZOCcPr7rBvfHQ+kJIwBf>r-S=Af*g37Uccu>H2=DN1#0cZa-t2&BoCWpn zAHtJ9(mCg120~3r49ol02-SMLirV>bUqrH(rYN-G&p&Wko|ChgA5Mm-zi!``l~w zEP59p7SCDyofv^k#gH$mH`hNA2=lT71ws7RR>lJO@F;aX*sR<_Ffiz43zONUM7vxc z3`LOxagc!@6K)yCMLz^s8T9&8GT&D6mR5BT^I4o1Xi~z3T)lcobtwb^LEKR8rv;)+Q6asn-PY{y^&9z?J2Tfc=*^V*8 z_oq%Ch7@R`)@4FXCyuW*FUAAu8P_`OPWw6B-ux*{Nvlj`uNn2u!_7jbN??nym>)i^ z@fX;o8Y4m5V#`cdAq6``ehL~HGci$w_lio`AAi)RX|C;;-p`$H*E5X^OCv7F2448qWox1%FZ_%w)KV=8n+GKxYK-K z1JlepR6T_Mj2&tQHj*hsgunjx7 zWy5hlRq#leDV^ALHcEvvPNVe|_B<{VaBXC2$D=^$%y0Y-j8$=ny_3}Ne%FTNS<9Kb zW)3Dd8=@JOQz|-a$3-k?HtH>Ck9hKvivpI+~%R`jGq_)1`&Y297(%N<{CON8IX$XmeOm4FdY zQ9!dc&m#$T>>^dr=W*`Md%I$DnBOjodFq~AllB!@L(Ws}_H?!K!^)<=-#1)~8|xby z)jLlnlu}mG#o9lGh_1Bm4wdty1L+biV2Vhsn8=fx++Q>S;?LJcxnpP8irP1dg!O)L z*6m=2E%tv!7B4ry3?Y_1EGDopl#2_>5-s(@>g7FtVH@GR-qcCxxtBZM5V>;m;|t~7ZKwBJcfIu(!6WTn z%rwFW)Y}^Lj>a{Pa~=0uG*%RAJ5-eB*4bepdX&wml&6$kU>3L;!kw`kp|btfO{vmV zo&mOq;EfMy?zTH{`U9N+lD;nY>%*#JD@$nVDr|e!RVKn zWs}HSoKy{@ljw_;Hh!W9A-25sVPgQ0N+^ihaPeRWf_kJgruG!*K85c~B;uQxk~0^{ zub#%F&nr6ioxStjL2vfP?lt>-^vaDn=8*N1&|6F#nj77f%KCxkNhvy6(!Z7x1LaTX zxeW{Ph(@X1&{o-N5K7gSjLKS%603TNRKUH3s(8MxDC)eFg$;gehre?Qfk(n8&<=uT zPbApsqTuSF%>Xxyt*f`B>F2%+F24AnvQx>w5#zb=d^G|=?Uq~9v0er74OY-9si#a*Mq<7`wFw+D2cS+up;x5c z)birNTn~}Hmv35bye=ty+L(74fbCQl_04(sVExpStUOnSHu!u%40HpEvpXp=b-1W< znPG+>SKQ#7*5VbrHu2g`4F~<(8ciy-PnABi$o2H=DjrA>r;D|U#o8Sw6A%kax}4S# z#YMZ`Avyc4+|}_rNm>ER->T`JVI8erG-(Znm^B?_^J~=@C=~stgx$O2Cd)hBeI9d! zou=5DI@eEdw?}i^P7dyB8efmObH8Pq98*Y-Kh%ciPU)CfN~9o(py_)O&W*aD(3p_s zB-_5Dv&_$BDmvVc{T_1qA>0*r2ZR$m(avNI5?@YR8UlZ|)0URDb5m_$u;Y_-%gsY& zuA1@2J6JznzH%>sI8Fl52OH?`zvl?_amHzv;QOw_Y-xV)OlL|{q_K+k@`k;kI43>$ zE%g!KZ4ukg9N6ZVV49*2GkPg^JRKRc8tPhSRmgcYO{U%K93>IG&;& z$7!;-#TfAU`ZO~fFUHl>6H`obMv%OC#sshy-M`%EYQ!a}*0m%VF5aqPO5yD{|GAfe zl~P7+)FybC9@EvU2iBH*n32UoB@crof3V0hyqWpfB>H9d&Moy+xD|d-$nfAPAh$F8 zXsxZVm)WtRXA%LLxC?-)0WwE{{~~f<*k5I)|0urxA9>w+1%xN;v6xnAwQ>3p5e+EL zv41`6cfj&V7=D~=0NXr+Sp-n{>Q3;jGd>}+1vW8Y8DIRt^2`YOf}|dB!)%?8-@5a! zO~3vrnuh)O4>FMaXIz{;ogf#O95f(#piNbbg?&YU_6W?CWUyq%LII8E?pcOvr1&|0 zicv$J|9(%t`j}5;)3b_-&u@vbY=w42DQaOSZ#5*t8T@=cR+bd*#Z`Qpqwue?i0ziS(hzWb@NOGLs5P~X%k&4~MBFj}pVfQjJ(0;%JOcX^w@PnnzOC+#?dAJZpcSo8^u(l6DB1r0r9@^eD9uK}C zd4Cv4-|wse`-gk=5%;escp*EfY6{OW7|2uGhoc;&@I>n9yJn5r7QW1N<^Aw1EMHO2 zJY(}p*ks1GpLvV=b^V$$0CO29IV0bE=5n;4- z4+QA)7@%#t@WzG#!rg7~N&+pYjKPP3qIo8=Ac17$4;Jap-@hybQW2A)$bkhY0xpah zr^6uq%>TL({BLdqyUwlwfl3?kEWH1G(Izl`!~|i;t6sFj{Y)`fJ%S;pMu3d*w}GM` zh_+l8cTeJX&Z<}Mr%eGifq5Wa+SLx|40j7Syx6zz)*a-h2!_7r@nB)Zt{EXQ2OUfI#t&=XR|@Kryw$Cu>JeN=KOM#RLVbj59kio748Ec> zG~tOnC2QIy8FcoDo|Jyz#Xt9O)+bQ*#x{W^_<#SN#BS(dF*BMldyA6D1nswA{I%ed zqVR!bC_?GyC@4Yt`PZ#?%m^%)6@^=?dQCc*MZpDKeBDpS#%-7*d0d?7ZGfzof~i#} z%n~6HAbbc$ZN(HAfPpa?|LvF%JD`Il%xKJSuLYF#`l(u^DJ6h7ejW06@E)=8O=lq3 zB;~c}e*dw-wvcmNsL4ueVW=csIgz!JC*$-F}F&qZw7Tfe8N zQgO_bYUu>*1P;qMF==CEva9%|`4IIq%i{Z%21o!3SOrTL*io5{uOUPOs}OYaLEN;P z+~-Osu&jx*B4Gv9G*t@nQn%OSW|{_BI`@O`cfQ@o>@T{qGBrt+R?K8{tU+^>&qwz! z{mk-?rF)Yzq|Ql@_wnE%g74I~Z!i&a#Qto`(u=h^qZVWjQ*I4Ttj?YCbe_mh45I97 zU-ZKqk)J!Pp+3gCS8Jcvy)P)n2(%9(1=L_%%r;IHJHn3<8bYJsfnnJG|I+aV}ReJ>A2k&<)0M%f;uF z*Ii$T9nlhPB+!AIRDK_RQ!`Yi(7X3zs28Gb>ypMNx2uPaI?G)Rd!6veXu^cik;j(z z{)5KVaEW7%53HYC)=cJ1<}3@+{E*&_&xuZt>k&M<$@Ge_W+J{8 z&(1Jlrmu@4AE?m6hv&-AA2x^#xMqpf+4PPUS|v52AIt??FZR=g=Gub-Zc(53oTU~> zn*<$LvlO}v5NM6jNBmt#l5v!lL`F@GY_Zp6RPc=~B_cwU12*|?8XBD>_-=7pcaHBN zTp_tSR3HUH{RsZC7T=G*K$IKGK*1)AI>>M9O_25OOT2kV!B6*NO)c)8GxrJ^-dcle zewv$fZ?(*x_*lT}Ka*I`UwjFyrtT%3j!BI>I!05-hrxo)*un2jK~w!1f})bF>Sa%H zFHX?2c^m^73+u2(Bp2fZ#j8$T1kosd;_xpk`O^38Cr|W*)~FDtvNr^JX(qK}!sNq` z1S50~xrje`2KPVmU}qR2NAFF!En`EiW9A|CSjNY&_?|;lQm1-_oD^wgF_vGz$O7qp z0~oxn7p`72i%4?=`sHa~8E};N%a`S(Xdz39B*{`dE%{@n#LSiB#jTJy_lA-+1 zr~dzg$E?$c;WFs_y(t;y!=DU^uHCttKOw6$@aC&l#tcvdp{J*iy+(hgCISSN&3>DG zeThN+0Z++#y_XKoollvJi(rQ}0{N@16R1rAaD7Smw{A2z0SAV`hL>^mHoWZIZ$xJP zCmHRs$Z;F@1XT~)XE|&om7datXHUPD;nCr$%r5zubD82oEgRfS~ z+5M{4g#{$0eJ~b4rV2p9&F-vjW(Ed*x3(5PrK|5!q z&aL5@8*2v(qJxgp<0cU_85stkFjOS1rb z9Ds&`{lyJA+H)K2&rhJz4o!Bfp&{)yJdqi4o^{C^#;5KYr z+|4m<_QOzeBGYw{M_FDs;qRPC<2*r&4LxxX4^RJ5|D2R635&%jmo z6FdpD1Ita?m|-N9oAQJ?b65U`$>6+y;k)4c1 zf~Xo~sm$UX6h(tNM?u^{3(?+oO}}FwuGDwAGW7a1i-w7-POkK}6e;!tQ*tXY$d>$g z0P{_lg#?tiL$y3KjuXZ&t&i%@B=e{{n9uHe4l50MALMu& zxZLQ&+uw>h1B)s1V21G2o~XK<6CxK}Okmw?Dg$TybQ?)&Hi=0AtT%87mPVj}f*qF; zv;9{T)rF}Ch>n;7U%QF>4H^WI_|74y8PhoR4`%Fu8exd`20f|nZx21?D!$vaQpURowm?iAe4j!V82KxX3yfyY ze*tZXi!dpQ_&J7u{jFXBab*Ci5w!D_0C@e1+^eG69^882!l}F4Jkm0~wTeUqgp#%z z@10ina~0` zP77da0Pr*oz!TQu@8}84be`Xv4e*H>7}*rJrHS}^$bkQetii$mb!G}q2i>lqZwMH$ z{8!B2sQRyBhJSY&{yK!eqUHaZh5%3pV}<`d*?+~F1^=O3hOqtV&|7ZOIf{I!pfO*dW@3O0t&?dth#N|=w?S0q%Dx_*~ zh!4@kqtr3)`xziW06$xRo|3f&3|3Nlyza443C^M~Eod&0R0t3blEL!&|kS;PZ z`Pj3h;=g3)E58?JeO;0sgZHz#u4#`~!mLXdZ^J67=FWW)pyA6*at2fzZDK46aSyfg!&t+i`59y{^zd^?{ zE$cbeDGHUlc=xhdUA)b*$c6?P###{_HYXySCX;wOwn-WLcKB1yIqx>zw!}{iG7$fR zm0TEJB+*8v5Hv|^#(qnv zrTS>kv=>lq5^#Ocu0z!`e;jM@vbJN}1ZMQ(yu|{tGhc2q0-(eQhPKy4$KFtJ+VB;f zToT+eWQY1+NtQuf?QO!@c>QU8gaW`SoOm4m0#;S>$2;?q}yMy;!#ue37;N zpVtlhU!%wPM4(X*pG0E_;D8(1}AChBpIeL_vaqm&e0;IDh@i0E9lxjXgr zhH&`F=(l14_;v*C5ODMbDKbJqVhu{%*{&$sOPm%vrk!_{a@!RfI8r)08msKxY!Qdl z0;>eQ{EfB@GL7O41={g^oL+tXep~sAbtBmq^0H&1UliR!R_=+6DpCzem{ zHaW|tsC$H^k)NkEF?EgQWr5k7$YTQ+wnbW;gxy!_d&)(M^i$jO>od0JEvk!I#s;!1 zPG&Ycoo5x;1gAaQPoRtmw4+wgcrjCOuE7im9db#<=v8{B4{dK6SZie77{#)LrI8YS^&{2d;HUH)7@EpcWFcCm!$H z@mf@qD3^bm<)!eO8K>*!0;w_}?`bFke2xvea%B;VrPU;!SrGxNbKcpd?jSgole+NR zZS#NX@Bfg)=|4-D|8GY*e)<-~!}Sq`p-HRAQoaX{8|tsE>TSKq5ehakYA? z5o0!uVn#>MoHwO5mqF1vI2V%HjOfA!YvLPuXo^^pF);b|WFlR4wicu~0uUQB*lMqP2rxE?Ne|!+Wv(hgUl~@tLaXE8kX%{u} zLNtbLl8^xQk3-80-uekIQX*Lm4Irc_!!&!b=SJ4uu{JZ`OUOf|3Gt)$v4{hl-i}6K zvB`pawL*=P5h?AgQ!3W2ofcoRzc;e(mt7;^LwzaqA)1Gs=^6dQIl<%C z_<4IRF)49w7FK1?YseQbZlKv^ZzGOyBKUmArnvgYuxo&^q*w6#N$!ofEi+#wsXTR0{@37@%O^0fx+!SUX*ag8pi-Y#{rJU@8j?7NZ!P+Ew z)F(7}sFP2+DPQD9^P=jzjC*s0vPy=GuqL~0)4j7yEBq1q%aM*H(}X?M>I_NR^ebR( z2^!k{B_s=oY6+_H6+G!jHlNa!1sIK(+E6$O&%Gd1ix#D^PW2{in0~Am- zXG5wD@$RvE_QKV-j9BjlR!Nsj_f@HBt9*W&uf+eJz_IXVENdOx$SR(@>J05B&=KH# zkEd$|vQLcRrsTw38_BaVRnt#D?i-Uc+GxW&YWELfg!FV6!Zg;2bqb;hv+r`CPve)K zkWZ(DcP&2JcuqfZd@!Q|5ec>+meYk|GAo}{~0LY79GK!JZ*tp4m>7t`iJAJpQxp$C%jZmSoQV^`cELn}AS zj7^Wm#?4&r$+#%*3l8?fmxn%u1Wsx?nJ=wHUmP`_#crlY44nMJ7L@e;7DAyu?5YP{&T+gKt1r`;c}IxDrlIIeXvQU#Z7rKLz4i5Ix!yEeft=nmY zXXPf6Fhnq|$!0V^9L?w(Ei;pup2a&N1a#MlkK2G~f9_9+e)j{zmzG1n1eR9B;``$k zS%Da%UecNlNsGh^yu@axd-PvBpU`^VV#Grj#Nd|B4k`f4z^tJm);V$4XVC{hY^~!S%{DI7Ux` zah?`Lajm}z{1g*PT=4FLT!fyxW#;=Lg((byZ{O zea3H&o({jefq$z6MX>fk-0H#|g8k2NKVD4G;DenEfpa$u2a zQ1DL7lm#OX&wa>|WRUABl=I7&?fqYdgw!V{1LlckcE$x!wApo_PVyDFfH_L7{}3tW z{0om*nSO@PjrNMc@+Y?T^^2QlK#ERck#X$tR;?{!nQ4jo6Zx%nJ_%(N+3QZf?QqGs z`jSPDHh*SQAqa|k3$s~#%$&lMtqf4-UPvlu*eX0vT;V}1()TfJxwAuB1ug&;q~n7^ zYtZ*37UE)_ZwfE!JiJ%qEYSniJi67UM9%hl`;PAhX=di#nL8yCbmxatkGm+n%hP^g z7vDp^2X#|e6CpQ)?I1>qtN=SVpjX+<1gh_b`27vwu(a`#G?{C0l_}o$mz9I@)~st1teLx5DMZLW zfWc&HR}#VA5LLSd=O=e6pB>OULVIrah%~+Mjkc#F5VK^;@e8zMRrgva@#V%muY4dN%) z-!KkuV_~cAgd=+(DhPHLOp(D#qdt?Exkpffxl?7BeP*QC{TaTLtGoM_dOP;<=`#u*i!pEqCta`KUERW!EB9)1Mf7ZhLR$Vm4lWd|5ij zJ+`fJ;>`_jQ?s7N=5Y^DgLXx<(;{l?WfONT**b0E51b8PUaOJ2AnF8B>|%XYgJX{S zd~0*(vU=wdV2kgBzB`jepw67qUwB@&k>>A^MlMsh?2X7V!Zjoigf08Vpg`?8RDxv5F9Bmbs17jww@_GW zW8~#M)3?>JV4xjP@CVdg5CVQ~Sq;kehORw9YWX&14CpLFcSjt&hhTB#w26ifvecVAJr+{>6z^6m-YUt%$kFAFT(v|Hu+$T(t@4l)fT!(;Q zkqmqLI61FnQnaJzLQzdf*oR)nSYqpzenz_*((urwmky!`ev+)*Z0CY&clzregHY znSaZ4EWLVHL{P|Ot8fGm1D;TFL#y0+T3NLn6|4LW{A$1Dvz?c0kG{KgW2Bn}WTssU z*r{Mwoa=#ev9>6wFPUqxc~{%P4%EMReOpaZK+(b?=?fX&Ue0|S&k=5_G3~sOWvG^t z6O*|?=|KFmMOK#kM3j0VE<9UD<^p@b>m9pPg4%ysWM93IR%(Ib^pY!~5>!^f+CV+d z7C%aYv(bdwjiumNsNqgy=ZMO64|{8{o{~?6s*lMO_&;I9BWoJiD>(EI}^Rs9dQPv2Bix|r)XrDbk+ zQ1urc_1b9Pxwq?|;VjsI@Kq^*z52zTIugxvrgcUx7;cnguPG3GVq13c2>h+>Sao!g zfbuS~YkO}*b#sl~V@GSor>xhtGEKXli?|%S5Or43sqN6j1^FShr*^%UW6IJeW^CsK zk+tV&Do`MralVTn%GttP*+UL0FmKFOxY}o`Qfjp}O3V1RE-fg00!uAMzb&dHcB|A# z@JFkQt4&(MUs{KrUQ{M{tK*{T*4>CNeiAC$f_BPZ!+D`>x6>bdP+$@br6BO-B;z z!8a9)IwilI(-c@XnzLDtr3Lddd^ld*Xu@_gpy%v!i}o-;g>5PMn zppD}T0Y7u5SF{FHgrvXP4&S-4IIx=${Grv&^~(3rlcmqS)Vf|(F5^a{r}TiO76`h^ z1^EaG?kC7|9GIcu&s(2gjZ~l(2kIq-x<&|Jizr66@#4L}loBml%a(NjW`RBp)4k2- z$-Elg_;d<>leVzyocT*i|C%Qu-%>mV%C{2N;)RlTJr~e5UVJW&cS+8EUYls+Fmx!c zFWtD#lHTVs`<^YDHE!B79qswT(!QZAm>TQGe&vV;@?^cA`=L&6wDoH} z1<$J@U$$hiEi#|_i(006tkT8wJNEl3pnBc9lvrE12|paF6-84U2e}3`0}#dx&7xd* z-@gBmS+(TevtJ(aXh>4hZ%8YF-udD$i0>g)lJl^Y$~Me5K2ftO^8IE#9#ZA0pL#V6 zA1syKxUNAW`%6;Mz&w}2UTH#c)S203zU0sa%KD2ZJl43yUA(_G=COKz=29~EHQ5dw zh<(?@5g}6Df(WY}V#X3ZzxOBCyse`Trp>t%6H?l5C7;!@2!8f%LCmCV^URETgv}w? zsNP0C!1L3EO^-|cGW8Z(bGZatkU{_%1H6-e0&Xq{EEG8=`w$0~Qyws&`#qDa$$v{-dJMN8-jY@)5s2fc`smrOBz-W)Bvy#$&~Q><2b7q1buK zssL-e3{nZr+G1B@>8BZ^ND`F<_R2o)4{fXeQo)RW_Wu97$02^p@tJc6a}MO5KlOn% zx6r8b3s2ik+&nM72^>5WCLyM@Af$L4yBrmc+!i8y8zgfgCP07%oI^Z_m7!4l=@c00+8h>3~kDNyeM+elF$ad8`6F?FmYmcTR!o?DdpmOydxjJ1gqpe zOv^zTo>&^B&cJd&_XA>W4WU$Fx6m=u`K9YlKky+MdYgzoGtZKLOF#NiGeB(+)bTEw zy2AjKvsY61A*ZH|FMQ4l2+gcXN%hwnLpP$I1NLjagcRQ1bj0{i{btd5xweX}cKj z_pO~L#Q@8yi~{ze=n(bIC7}B5(M~cW7S6CZGu@%%Z9a6t#_Of>nEWgS5eRr96PszU?9|Cj{ z=;N^_+U0X~7wWzjbB?OS-FVLUg1N9Pb-k*!%9e3~;eFTH4PCmtBn^zRl_p?-Jvgg7 z3BoXn)mI*CB#n7jTI$noS6gU{S@X%3ozYR&$Vvov*$}#jh1jV^h$+LmUlx5Dw%^T8 z!(m;uob``li(^v#-!jM`P*tzSDS4IFy!lRY2%YG6cX zm%`jeforC`+SvW80wR7;QV$Wk^=jX&S@J}*Gp6G*c!P$U;v}-gP9A#Mde@@8jMbLq zDxb8+bG-dgpIx?`JT^WC=3^+aj>8w2n-KvQC3wm%oG;^T!9>(4Z z{Ad{$CYezFR!h$ZJvMQ>^E)-j)e{Qb=mgFS`eXJnZ&&ZU7P79X~K!B<~fl8wzHl| z^3JawnOn#Wuk5;X9#m`e4W9*^pmVGrTjXiN7p!h7li91 z?A4;z9vH=XZ^fz3PQ3llTw|PWkaRiw{AfE_?Ko@9Wu50Sn!M3#6VENLTZg}4FsIm} zjIU(tA+pdoBGa-|*yi%0Btp$g_j&xh;;j8tDO;4Yi(8;BVeX?cG-@0sC_~D}Si;K0d z^6fed_h0ULf4wcoi@;_%=gIbKFqKJBaM`);IhvbAwlVBZVx{D}N%*@k z+RO%0UyLtq?EXA^e91Ymi}#F_2I)4HemKPZ+{U200Y!�r{}|0SS|Y6#dQXo*m>~dTHH=2y0X+UQY%^) zvBy&T%_S`kwbG=Mu9R*s>KF=FoIk&whr>e}kB|xuuot539eOu$fw_Lpn{1GcRwP)ebB=cw~$Zf;#;4bs}Kl6hM`@Mx}9!UNh7;y4wdc`I)!4Z`5yqP#Uc~f zHR=jof>ScamVG*#N&OFxB3mqLpuMc=RU+M{kM^EuO)RsFJLcH-_?TQ}cXsq}#tLz;>2if*f;;OLqu(fbH`Jjk%Hvtv`k_4m8u&O8b0G2A)&ae>r? zUIkT>@%d~mRdK-ZkaryPeBf62PKOABwUOI2mB*`5PDt)p?e!ra!+j)0lbZ`V3X(XT zOO^xL7VCF^maS~PJ73N+!xtu^KVfPhSV^zm5)H>V4`I5Ch9t&&Zfc54AV=9()~^&NnEG=T!DlT9z35s`4rJEu%^V)oveTynzQzzfW$t}A^z z$D0avJDv(TG;ttPHyL~5<-~N4*VOFc>>A~XymS+f#n*H?`6WRJs#GRZ9}qVKZv+*U z$;1KEV#5$~eUeaQ?Ni^IM_W$5PvDUpdHl$>)#U`+7ph#(;b-glAfq(=hcDj929oeV z!|1^C`whOB5P8Wd>CbnD&GyBfwMAD&%8MAXhdbvh8CYt3HMyHM z7!uEj9=sapxO_BrLzHai%t1uPwm)H3-}lRIx40|H`}l5DpKJ**FXf!gTQ@2isjK*X zI{Tpx=DlDo!T8s@q46n_U~jnV*s~|wg@hJ@qpKTsnRaTS)kQ;01b4nXzrYhHW<$ow zxmy(X39Yjbw`OiEL!XLW8<83OCez64Ayy1nxd~XtSyUMMEk3gB)DV!ZnZ^qDPAuQJ zc*0S5)9JU@9;gpiXiKd=z(a}$H$TU(LH7EP5P>g@Q}xnfjDhr54kb6^h)xxS1rN;~ zjyA@QoOxq>UF{5MY^EowVC4KDU8!v_Fm<}>kcowt_k{0}IS|e^i`)OkBsL=2rM*N@ zTP&^K_cE@`=Y^jDavh?6-(1RagagnXL%@w`x#YZzC5@1f^(QLy$iet^{`*1?_4(Wx z7Nl>#0*>Znjue;o%GCH`zJLI$VCKjpHAoA3ou1eJ-Bv zov!zs@Dbd2M5oia8x-xl6Z$;rlSutIUaNDifqn14sZ;fzoumD~s$=!{r*ucY10U#s zG>#)hcG-DdLP`eYKdJJI2pMeuq_=2(sIrY9##E%v4z*>^-?;C?e;n8S3r|8VVF^b^ zB(W@kO!hB4>}!Ufloz1FR;Z7AF@K$jS9&esp8vrmV05hl>n~83Bg`$C5v@1$mI+b4 zx^jhZTlRzF2APYenM|^q#t9*y#mq5DLWBmm*++US(;T2pH(Ouc#IWJF*vSVrSTm%? zN0_L)^chn>OgB8>_&cJ$JOEee=9YoWmfVry#8?yKLbTkfu z3|@d0+*!MYKj6)g9rE(GRF2nqA6BO8Spn+W8LZ1pH(w>CaP))9OrSKTG}d6#7peaA zzHNt3xUWBXw6#)f*T!+YrB`~LGg;`N)hp}tpmhDcZl093AySgfq|MH-*I0teEnkJ` zQGV8b@Rm}Xb6k+nazqLF+myq}t6!(WzT?x92H{^X5}GBBYM^GqqsxgUzBoBH4&eIY zz!A6{VtTMFnfD3e$hTtW!q(~3YbOprCGNS{wc$PeM13}-uUsqBQZ}=oEx$}|FDI7z zWm~lNYD1V)@7<4DPnDDN7qeX+XOBChKl6uRsRPl%aH9da4RXYUFD4FTv9hI86lk_m zWnF`5he}0_x$WKf695lET!{McM|8_NIyxTOz_~>8v%po)*t=gl!ag0dvPCa{Usify z;PU`f2N?nh@2I_RIWlq0nFi&uX4kv>5w%+LF!u#VgGim6OODQ%YJJqG5)0T6fkLPr zep*L1lSW;P(hk}0+3=;mKf5;fiRoa)-088g^JoL-d1uy5?(HLnJV?zH@OT2+V(VDm z`Zrvsb&r_W%)IxS_O?2g+mC<2mCbpIMOn#qrtC;o(RIuyPhM=bO)0z;nW-f#gZ0`{ z7(UVKToF1OOT)uTc{J@nReR6Z$Y-K3ly@^JUj4Gt$ul`Sfc}f1>RCRjcMot_yPT0V z_Ee~Qa!9kNZYDZqhx#$*?nfz}Y3inpXVRy3^6&dP+4YdWfUwL;uhTBLVGV1`r(Q4( zRxo{S(e0}cmDp#P(4ID~`!e!U_pYf)YY+Xa>q^D0`_y7|)iTFXJcteCfPt0be&MOZ ziY>T>5&0^1pn#Hjr-=pk;YfShL;fd4$~rq^)Gw#-q9Y08{wv|#+5SA-hc}@+iG>;5 za_E^uOSjs@^K;i>j|b@qQN?xo0XpFaW9prljAQEs`At3FHh+JV?^_zsyR*G^RJrVe zvI$APPQ{@dHJXI1Q2~NM-qm~H3c^kwKjUUXf6lHrO3#)S^IIR4W>ai(2B(!tW1vV4 zXl?4=#?(7i)q|4rHO!wkI3Xbz%ILL#5a zSN)m?9VX2KA!pF7f!lhCqEO{m><0f!_H)&?j@etRo9()75P#hAvCg?Y*JdJfI_Dba z?B}J;vV&fbOVJIhgpDn4_54?OEC1}hBAjPnRu2;P2X)#lH8OlVPM0^-f5ZL6WuUS@V|KR%B zbLe>KrXt|db{8KGKd^A1xzffiRoQ{`!a3s$$kC+85B4fv zOV+EeWeN*=403JimXD(4&HJ?X#?a3vj;$+?ef2u1_E5in@u@oORKy2{q_$o?Sf3#b_J&gcfn6{iaG zr1m^$J9N4CgOdJ^XXt@#xTikVG{Z`ecj(yUL{MgGJx;4!vpaEOTDDla_8lSc#ksp8 z!!4Rxo8n@$GjOxL>?_?95Cgbn@IGu+beG%UVeY%?YTvs?6G|I64V&FAxhCh6zXS+g`PT=^O@G))@=x5^5c*_IKD3BDhiZIE)lWd%}7tK-ibg^;wC$_oN%sQ)$n8ihR)gb zp8O;`zkBC=3Tkj7-zmU*v$Ta;duL~(e)NRsnU<`Z3VS+Fx6n44U#fa=GU!FUM6fZ; zYj(sKA4hx<{C745Sm*yGrI6pdAHA2*PoBMk7RS=Hm?-3dKK>J#Bfb-ek7_c5kotS% z`9J~W4c3*ibO?2y$lB=x5;kY{kiO%Qbs$h2y8{LTVq1e>KCy}ia=Q7ODpUTG|JUz=KouEcR$-l@%{b9oLa@K&M9H8)U zY#BIK0u0`M)j@_rrjGN=vg2AWKB`|Rd7IF8qKq|{YIomH%X$7VHM+KGEyo+yf5tIq zsD0`mFPVDkeegJIVDoB3x0V51ZRmh41ZT*)p~1<;=w1FD`}D5#y&n}q{t~apL{EX{gXgPVL`qqUau{?En(L^;sPs* zVAM5oX6;I}K>@2zq~`qW#BBP#mZwuBJF+3n|B-2@t?DV(_N*MfIb90HaPfCE{UaC{ z)-*R3a>y2tavO|~oNq`pI_&6iy!Y~RY?aM}*T5yP-%;^8zkNPwe$yq3Q-@MQluXg- z1d8@Xt*rCo`ijE@P19gqli}A}5~s5qywYC=u32()Hy0OJwOg2M#-nSdd)XJ9IFN2j z$nY4rEvcuY7p=>4P;-x5-{P~C8Ru5*KYSY;L0Em?xaqsbT7#F@k14On%UOurQK{NE z`O4b3t@I6TE{z@%lWwWRKI!kk$e=GUoNLfxs`l68I?SY=#(szUsQ&l(Z+yAUL)r#2 z+5UtEbY4{O(E_G?U5}HFL~@~5tiqSJyU?1bLuX_P=>nX!QEt6Jka0gZ?I!S(=2$nr zxt1w0BD70&6cuzmcdU8A|HO5xfA)UL!gUh#wZitE4^c4wTl~J*OqU&G%LeqR7zS&n zl&MnW>ji>E%rX;vhYsEYbi8tb zr686fH&$I3sRpA61%r5xTaamBuO_4Y#Ro~W>KxRlleTb6GIj|l%yA*Cu7Q5TG@)1Q zRl3MKz;H^l{wH?8-`g2~_xeu+iT=M=@BD)b_k*2#g3$Idp42%7gS=gpjWLp(C6B--KCNpri z043`YR3nBjaN|me6oK-&*rh|{X3KE3GO75{d4aM`{^vA?4|2_$wFjmvtbWvsHvJOI^=x^kgyt6`q7r} zLYBW?Y2E+P$KI&1WwCi~+@k15AK{?ibkTGZd_)ov&;(9NFUz(KXzG*ibt3ln5J?2= zTyy55NoNwc--t^?94jwH?&EKuJJp{0p+|vdwZWRuUQTxUOLxlrtUJg4+MOB5w(7s_ zP9k$PdJ07GwKZLU?Oxi1oF&bRzKC{IAEeW|0NscP2MMyWf8j}eQ3(3PVias~#y_5l zw3Fv=no?kSU~deAAkhgYS=Pg8>91-r2mx4q3_Jd*{a=3J*^KJ(-Ge{fx<7QTRFSUE z3wR29%j7*?fk>YeLCsmBHt~U_+VOAJc6^2Qj_ymiMVzM0F}1F?9fjK8mFTO0(+=7e z2!Tf{E3_JMiDlw~J`q+ro>O{Uj1hGC%+hQnGGT?A_#%A%(Ld`H{CDpAKTnkBwG?Nc zK)ztX2s)8Nz3~EA2RA`SWM8w}YUxg+;HySZS=#-~^)IOynMG`Es4@|s8ye-W z?LI{8&ZxKC2H$KwC~tMi&1GlPq8_rypoGIoMqol<)K%-ZOG|TK`X{j0-s}y8n7!#D zT;!X+PIki6bN%Si7Kr*8(?fzm6Ti(d z*QN0{%MKvZZ&$|O0nN{!@HZ<1^>-`71b`D%;Qcq?RAJ05#yU~>2sO|E5O^ zFUu{h>BUh;mFK42@7YMFQ~07Nx_<$&9y*zNrw2seIKttR$l7@RsS9qu@T^VF2`R}f zMw&`y*L61(@J$gXx2R(A*WB^x(2phjw_P#3S(8gvKkkSwD#J63tY3XX4&i}y>QAf1Q0X#`t&Y@_^E>Zgueo)nR#Nhb)nOe0m|EX!O5wtwwp+}jTQS6wkud;X zOMk#CO!!~vRMEd{>)*gl^zXn8D9re`aN{e>iv0mM(Q^}MhMnjXj!0xZFRJGT{@4== z^Jm~gGqkjayhGR?+pp2^-QzAFl!0W?kpf6geCO z=r9t%g55ZLH^h0%)pz;+kYA&;qjWDb9LZ8&fqW(N{laqhRK6lSM7#y7RxR*mS*RLP;ab=NBH28T45JP@uq2#D{p9Srk@)67xwJ%vq2!8PY3# z^7MOJoZY_9gtSg}IR_8eB6UBU40M%g$ZBx;=4?qU;tQ+JpP97p!l)*Fx*C z3?DK*ldVK(g7Vxd%WbJp;1Nim-${BmIb_7*P`g6U#g?`WS?AZ?zO~{u_5m5*O`fV8 zBEd2lQ}3z79`ov!w|T9YO+u!tlrPPXf7EHF6%Ih4fzG55%7!v|ru`r*f~oSMQ^zlL zl4V4>^!e@v#@FZZ8M&h^WF9OP_`7cn=@v|Rz-3{`)Dy6<_23uVOKqIP!kyklrW+fM zw21GLOupBkiksg~ommaz24{llWg^swH8;y;^zWT=cD#0HN65i%7qod$x;*0!{~{{j z?@RQ{x@sIZ6Nq@hxZl>aIC>R`Y9rM4?`u5SD3wsDS^7wvZLxs9!wCRbqL z!`{FMWl8DP0AYVj7!D2}B@_+1DHa1J7v|_B(Q+&56ZrsVH$#QW0VbZ;dGd-qjDeJa zb?D8QYvkOsf-Fp4cG+F+@3yYZ8Z;5)kvb1I7;6g!Tkgwaj#fh`j z5I^TEXD#EUyrAQ)(@0IBGCxwPj7{c&o=*WiO3<^j7+B@|l3^$lSesS~hkxOTGJ&e= zfg3l|w7g*la0a$K0p}!T`M4V#NONScB}ap66yp3g;6VlcdOsL36dKr;WbBv*U=6M` zvj>^}XQ+9N7v!&OL>dcN)C8#cjwrGwzX>@8Bsn2uJ($zaS710B9f_P9x3S+=1B*Zf zL}J1Az>!)4g-%xD;T^45_&b`x1eaCjkh^Qh49%Ai^Am`SpB6y2LDWU!k|OC2g{A0( zT|OgEYx>7869A3jWEuEf{vYpQ$dMYPtRQrd1^0WZsZ@17q;x3+2S-NiYn6$sO_b9b zEXsX`no&X40OL*8g_{CuZk`9=uBC$~wIhziJi{pk@Jl#C1D?0+AX|we21?wkDp1+G z-?my8w;Y)aGg-CkcFU2Zgy9r@KjP2ca>b9gocYIg{`@5+9PuJrMYRHuw29!_1kO54&!>@7#0g-6yt2lp6(Q{aU%_xBQv9 z;FkeRm1iL!o&viI%l7lLq=_Klsdo6!v&|OExLx}f^f2OI-foN^4FDhlw%Q*ca^kNb zl8Q*Hfw=_GBk0fQp@AL?(u9&1IjVkt0+H2!29apk2RxJ=y|lsR?=b7vRRR49S$}=# z56Jq5rvb?Nu_yp!{oO+WWc}UKZmXYS*mH8UmAH?AGsxW&M{Z@28&*`aKsqUqJx(qH z1imv0giTI-L$S_>oJSbFmn89ppMu3JnnsrVymFV^!@1y%|B9u5ZP@=(Z^Zs}UH_)z zg8yGS?g!?s`!nW(#V!Q)1`2~Y^DFX-{TX=;{{y*xbE^oz1wKUJ4j3obF$%dI*{XR6 zW6TZBi01N6W-x%b*me-F1f&fA6K4|b}H|6^@UtNmjf}m!Y3cscL<7M3cN_y+3X3HPM zWci~i%guohWneqfj+Mev-+|rfTqcmW5~Vl3X}HRmsq~KAQsmBu-s2Z>E#S@2L6~s*9u<8f`Q(hallEz|5%e+ga#_8ap19E zH*=t0FUn$grz(q&M}Qsj$i6`870+xd}3a4X_8rr2qdwEkgpU8 z>>!b_AMO#7uT9Esr7LXCvzwGQ`gqyDTV#2ai@VVGeDApWn>Sv0?ksux@tL65(8Is} zs_c&2>p<4p&$#Ed0P1;K`Vk%zQ`0<>13@zPIPspF)!RZ7-nb#n@B@Y?-ro~;C}D*T z>G8ht@ns3JZFIw2(k`9b`Km81N|w(oXv|SgpGWvns@kh54IxjQMHDE4JB7cCWtbA- z9pp+(ug73rDpPRKG~NZ2bv3)J;BNiu(z9SU>4P!Cg9p~rt<7I4vdOv*e9TjbkS!RX zG=E)~OO4(H&`=HQWd8ap9K^`Jcv0V{`#sSxm=hn|@I9rU!G9yAicFgLh36QWPa91K zrZ&Gld>O`YSGU$LZV$F$31n#J}X6+7IXS-;QX75*WAJgszme z+(7IVj3m--d0bF|9GoR#?rd}2m9(qMyb4Qs@Je~?y;>$c905UB>Vjc}M13$I^DN7x?il-UdznwtbnsZ=+kje4>H}xCF(WjvY&!#F8;Tg@JM)b$G6n@2G3I8cbG5?D z*6qD5KQhyW8Qc=akYOE#T^J;dJ61twn)jtzrAnq9l=D)j*Tl}?FB`2e*4!_|BjaSI zYsMqDSMFt0k7#ki)S0$Cw1)iLC>00lVY4IvBlrMR6$&fS92BF4ZQSOMgn`6N>Q?M- zjiKAvlWda~lV;vl!b%ND0J*7hVXyzD$kf+>{*kwg`@%@eHS z*B;(~VR2bo@BnMQ79_wkaZpoxU9RD#CO&>`o=mxq+ve*F^qL55JOgd{kBXnIc#oF! z5v;^+@>g*m#GuDbOTIq28FI`eXXuhGs3&}&s-(wTjSZ^tvB5Sb!wpy81yyb$1AZY` zSbAhCQAtTcJGj+2Qc-R1o;{Lw59T(n59zUW^DmgH} zS6hTJ!x0Z-+TxfHXY*tod7a|g#EYsjLjfHV^(tGI6>3l?5|%+BSMA&z~rj~o@h@c`$@(j=S&tXUlzioM(4u&iEzwhFx@!%Ld-V}W@T zy9owI*lo}tPdZOggM=vzM;Xo&N=*}cYbX+9-ps!cpw|=qfbN=6n5=6P;gMeN|dIwF(DF!gTBvLs%Y5$FN=DyYOKuKBC3N!)~F~E%j)T zL2Aj)>s|BwiX0|XDG1;x0?Lz_5@*YFA+6*B0ud8|Q5=mEI#hS-qIu7y4_-OK(%EfZ zB1>!8ZiKIiXwK%Hh#+QgW;N2JB)Uc7LSgoc39}nZcgqgq&y)`Kx4l|PQhUY6=5n*J zu7JDY$J&+K;)(9jtI37E6x;_?4vjU99I;hc)CLEw{eNtNG_Xh&E7tT}G8}^LZM1BH zT%_8(N^a_V9IBQ5G;Mqv)Exgp*a}tlDxt5z0SwnUF%YXmOFxO!_kutbTo^dc^lYX! zOS75mFw;}56I*+HH1e5JT;c4eMxpfQOST`RF4K0UB){TYocy&eY#LHSDUwb-dWXSZ z%e0ta?4Ntt|GKcWdm^`J0DaOKFqxd(deJOzZbX^XRa#SzQq4x#?`+$C^#(WjRP`nI zYLFjnQ)i8pn27d1t@bl37gm(VLJj&lwGm=zr-iXrQr^fG78vaOywE2%f3SgB4r~FGgf`w~Fpc=-5O90m$ZNKu z(=R;la6x4z+BIq_7h6Y*Tc59wirGGJqndA;l@4CP3!=WDyju(+Uv(28G+1JpUX*(! zlzSxG{MK~btzB##Ls{2hkD!6K;2Z?h<;HSh6akA9={~+E_`APGyJEo6fl;G5$f!udaP}Cs@zU9ef z6XRN8o_u31S)O12%y6x#`hGzzSjOjjV;d2hlkbhkZrMf^*qo>{jr7|4yg}AUV zh~~-{ljdFj9kKwioc`a-G5k-j%flzY6dB49R~I9E?Wj(jAnd{}el|tZ@cj!AKbqn1 z#)&0$HzHN)d%)Z?3*dpy;DxO8@S$cxWwyLWCXKLbI!eukE713SP^ZDH6?EfnDd=ck z(JY%P{fNf|uoAEMgd!TE)kyL%&_mIOo)&R7ppbL>v3;G`c@QDdB5r^_u2@9M|NJrO z*N^x8>0=&b7*T^Nbpg>4-!JE5e$@zf#h$n1Qwm}{8C*Yj4wpGxB~kvq}f%e%b=r{v{_Y(s>o5CQ*r77>h&JgpfNV$8OBST4_bR$em z)k}P|=Qfr!6NYA7cjIiD86<}9W+abBU2V4uJSF^aJTBUO#k;3y;&_pNYq`s{TR&}K3<2HbCUmz#(-c}P1^)`5NNW}J|PGxf$# z?2&KvS-S23&f{^Pk*P1CT!{Xj>DoxOuWJ(1OodapRu0z^mF#v52S+~^TZ#1Sz~i4l8j3~MZ|0U%2olWeZ1a=Mi)3(~*F()%lx$LRNgPGO`=bdcd{JEe!qPF=gaUe=Z z{31cC7y~QYv-WXQu(tS~pi0XPTw*?+?koi9J!G-42O27D)%)gJ!J_Vjs3~ZM5x)Bz ztC*o4&uF6J!Z%xVswxa}j}@E|5kIROx#QTrV@s7H%Q$=j5m+a%3e1^~bGrn;h&0ld>O_r}gUHwBmb_K_tFG})M z8+QIwClxS_S6y#nt8l!FWbScJrBVZ2Mo1J?6n<7y)f%KL^iP3gfEoduERUwlYN)G2 zU6n6@Y{!v#UhbD~;D`Se{D40OFW`?U2gf(u)Y~tJR8b_1eTlsAedxQoFCt5(wwUs< zG~gtZJ8c6rt1EyElYu!P)bG>CGJ_uv3JOz>W)51|eN~j|@zXIveePs<1AIw_AGTBv zf~thUhm=JVp?lV4KBaek?rx(^ZUjM~TeR@Q8Xw2CT^)XecVv(7*#71Je4IguT7sW_ zy7?C#yt*V%RPbbb04xRg+z{-46r}3`9UV}l?WX{Wty~A6V%q^t5Jnea50jPqV#3mn zD8mZK&OZcJ=q7F{!EUL-rED43g=Lta;XU?uAZjYWyd$W zr0wU|2BL2tP%+{M@vBN+evT7)ki>NdW(d$9zYI~Ss3raQU~(U*(0gAO>Brijx|IMj zb9gZ94*`Y zR9pUF#>&FX~s_avwJWPB(*rPsEPUpNkP` zAL66uzRHRYBr{y`AILqEEWmJJg2Yrt1coCxq~=}Cnsbud6qarO_?;jv`&-DuzfIANj&__@i1m6YnCWBv*wf~66AHr2Vg-a8#E?qkD$*_)IF{DdMN?rn(LOYE)(qFqEsCUGdINU283?X0!J zUC|9$re1TH9s0=@q1PDli-%cB+(bl#&_P+HNV-iHzeebN?(BZ+#@^(2TGi7ZBRTsr zo6&S4}{tAOadbJXENv~o390kl&&a`ddnYX=}ld=;vUV zkVyf9)5QL0)=v2LeRTS}LF=7*#rZlCnBql9gFELmqbaJ4kEZXY5nUZ&Q`e$w{nWzH zZt(LvmDzQ7Z}9Wne1_5n)=#o^90;?{&%Sv0n&fDHZ|o2(@&&5QLyHrH!AxB_*{Uwt z4@VmmF;^0Hw288hnV7aQ&j@9if=AgQ8<0kH{^(s$3Hb}(kY(;`^qL9x39pXBW>uN;n%|^C zl^4MhR51z=65gn>O8nWCo2dmn!hRAU9*T`Z^_~N+1j93$PTdUeFWXJ5n)dw>I0P|9 z!!R)zq&oQRIeT03G4r8}g0Io6+huh2numjVQ!&0Iq%mbXEx1^a(xecV_NVl-_oAILnpjPq$PqGoUs0a$dlD`eG+TF5V zzVj%s(>kebRV@O`us49Uxk7ydp$1boW2-<3iiaZO@Q$4@xnV$ zTD`qQS9fpquKoME4t*z%h}SE!J<`S-(i}}o&bySmSf9D0c1DAL{uycx1SJE-!9zR> zDC`9CyaQ<;xX|vei(O5o7UL))9P%;$W6%pHA+d=D*NxrU?I%`{GeJFJWQ`$%$Wr3I zBSF`Q>);t&ho=gqXw=|+VTiKm6kYsn!<>J4=8_lqwPN^iH5ugkEA*%s(cXw`%%%35 z@+!rKl7k65-f|*&AP=k{kN(-sR$ zz-dT&<#1f2@1-HVxA^qXMsB;67Tv2)o+XK_{ozjE;y#6_i6vLRlBh-THI@h3&NY^F zx*3~Vi*jbeaCZ$adp<*bH%HcADk^4(aztdnsBB!gxubTzfA9GEp{e?2d z@S8`9zZH~k$ZhqE*!I%l6nFTls}$<_I6fQ#BhVHdoFdL_f0@dR9WqTF!U|pdyj$Y@ zhI=b34g0;CKA{3$+{S`{4t2nAvl-lr2r7BfChha+%z_Sn3_n1&|f72Vj^}bSU9SyQkb;(T?VEr7etlH(^|I z5a|p1?E(35H(0KSv*{caoCX@z_?8OCys~tVm!@JIb_J$RD`T4 zqPoGm7o$}TnS52qQ8H7%J}hfT>wtWw&)}Jda;hIboqL*+08}eE;EdI%@Q5p`oPyhRZh-l`uZ*7KnRGq5#_V!vTMR7d$&hCSxBAzJ$LqH}iIls@>&$vK2(9baw(x?>N^}S{f5ZG__UsXc&X#h_FAP znfu@%_F;VwPti|PEXlE1?R@vQ&ro#W*=fnc*Ui@gdB@=2G#eyp_=Lzem1OqDjp4(5N7aXwtg~wnS>`mg}3xeZ#aY8@3aHf*nr9xHDWF`%LeJ+ey zxxXYJYH)O*k2V~2Wa5T_{sP=&nU`5 zf_%?;?n^))N8O_^E&`WLpn-<8r%X3he>wD2$K8AOGjlZSdiA{%i?npe_rfT&7TL^E zfh8HEXJ9=>s!mZeN9|ef?)1pV?^Z_1KCt59pYThZ*Zp_ez2$#M{`*~0A$b=>Eh4{F zr7(<%unazuz~@5{iLYTfBvL~2K>k~DL+u8r^I~I-%GJ=q z4OPDG);tS-uc~|v&91uDrAUG_0r^k zxNA4sGq{*3HB*4_dbDh6eqwr?@k)NT9SM( z(IA;!tjJ%W)VM>2&!uha+xj=n{H#a~(h!$S1r?O#kZ5+KJ!(~6v8o+-l zh-(TEm5Z4_`0M|Fz+2|3t%41u`NWd^D^a0c3Zx z5t=49{-q9zk1J~8HoE4OJxl@q5pcLFb#BAX3}~=`Qk@K>i29ZT%_aXI_TD`l%J=OX zosuXtr4V8Yr9!f=Y$lamk|f#1B+4#Lg^+1(d$KKxB&G;SO!kS4y(DDMkQqdgVMZD^ zGjq4Det)dBj(7c@V;#?Xtmi$R_gLRQI=+27=AQeW>prjZ{9Nbe^WjgB4u3y`PdC;V z578e*w(J6LGu93!HEC0r7TA$k-$}F-+`Ay{su+TTTu7^zC{qHw z&)Bt-A`mpdMIlYtrD~`Z&3OVXM@b=nP_O<^Nc>*~wf~!Z?>|W!!21yL-|7!?4x%qv zuAYhlVEQVbF<>I}rXu9RRS>7b+p*K4Qh44Y5#XN9$R2#ZIUt`4rYVwm&nWZ9zknE! zuJn67MHK{uSnLuwUW1x#vj`e|IGJ+pKcy1Q_vb%B8KF-c$1=UE7}XpzFc4y(Kpe%b zBkFk6@dL%{A?x5#qZryK4J3d(rHy+>Lx=Et&I*z@0tapj(Mxd40Zky-KL!3%>^O$@ zTa1eW*jtIhv?H(Sg40sC1s=>o0P%V!;5A#I9vYe}L0{@HZ@c!_`f z@IQg$pV0gdmiVtiYX!*H|2Im+RSZa^gr5gMm+a6=GI&^s46>XYoQ%J83R4ryS?$sZ zZ{ED57wp<C*8L(gAV3$D#xg-#WB?*HohE;?MT7ywPO*STL1&^*67nz> zX*bXb+_Ey6)&NNm)cO?79f4|9)!HTN)0<4k7XIRcrv*`T zc7uOqB_Z*|i=NU)L}_|De}@&IkV-X#P1e{&U9t z=TQDn+T))*%|Ds2|39Yg{ta=$UsC`7N4m6FwGCo=khx-*$y8cHb zWB)yftZfpJL-|!$PasQwwo>*9~j2#L~bBtgPJuii6 z*)%kmg=WbXW}oyuRB*f0LYCyKuz6lwZ{y{V2S>z^5G`aU#5`xYip3?+*x0}{&`hTR z0&WiI4XQ(WvCAj)v_Kxqk&NizOp#U^GZ5d_P4#8h!1f$KQpwN7=8g~yS$ED9k5O` z+;vW13e6SAAX`G=9&q(1UM<0bvpY~{;^O_u43Ul9XR0lX*+}5N2Q+HiO~X?so*%fE zBk-I&SH?aIci-cffbkX)tcTCpR{cZY+p>1_)EcQgI_qX0FrYkH?eRE<|FLj};cmAr zkGZ}e1%{{4i)qZWNh3|3M-KMA(JUygwJGepDl_nD%)vy#;}rF3 z-{g*~3BCvDH&K(6H>zQ{^0s;sP+!#JqB)L%iC`sMn6m`_9QeV=g4NX*vnh5hdUx8V zTSIbdua_8?wANIH#Hfph7z}1pLM^G*2>M1vPu)J82euSy2&^Cr*`MukoEw+x&!SGQ zX&w0`KDaaKK@oRP?wwX0e(e6=(r8$}ivAWB@R?+sq-Xx(o=CDz?E2NzQ6%{0Y5U~E zgy|29!!Nr=wpr1WcU=9oqwpi24+@!iMXm?EZ*5!&T}xgqg`NR{F$emKN}BLXW-U%| z<2y$~pG(f#FL-geZ3VY(tL)jjnr*4Xvu3m|5E%pj@iu^lr|_44s(|{9k^7w3_JFXR z)S&)z@NQ(USJ5PY|3wX#8!OUhur5_y%t&8K(#hkPK`k++09R}~`zJUqg?SO1>NI%e zhiVUxt0grs@(OxInRe1i4C$Z2s=tv79PJ>^AJo7zMw{eEZX@{oe(+)%*4(+`8dK=_ zI?-XSV%o$kGciH7_y);Q>iIJ>^`_~0@jIhxHM&)pGv@@BgNz_)tTctbh~7vL;ni?0 zITCOhJzNO88J_&vu;MJA);zmGPRV@g+Deg-Cwyw%nOxyvz69RS@0d|Eg1*R94K+}v z1uC-D;0sxr%<=T25rNT{D9H`@#$6Is6^Y`0_e-9P+io$Gynh$e=sk^yir^6)DD)ak zsQ?%TFs%AgryyDD$db0Hbo`*vDCHPY9k}!H_OZ_m$%E@JgnX793oq4P2de)n7r{ik zPRnnWY4rfsy;G*#VoJNm`Gt0jcUYr0eRA-K}0a=|<_P;tQ(f~&kdBhUg+`#lf z6bPs~Z<&D8f=^eXR7Y&j_vS)1A1+LqVgx>A8W^{384`|L3q6i`LStS8s`UFZz?&=J zJ^0RhqlFV}pYpgoFz-j6|A;pV-yZZ%;MW-qbQ<2`*;*{(fSo*w=7V~waL|M`2GAdP zb3}O6kPB|)C$d?QxzBQ>gZ=F7`+kC|%J0hTFC$L+&+6_K6Ut^xqUy&2e|WQQK(J;Z zB1rb;8ykadafwIz%QOPRejI!?XZAG&^&H)fhZ9fM)0TaPXz+zLFk?eSav=Ic4DRFJF^AFlc>3D)+*|idqMyK87op_Cf`6(H1?pw$kGxDA7!2`_@ z^!#%O_#UQ-rLgm*KrfC}T$97Dp@81;Oy?zSe7TpbTl!KFs@?jFW6MmX&Q|O|KC;Ok zEXO=kkt_P0tX3LEG zH|}-Bk}KYXSjlieLtQ9n3z^lmUG;++KH&K3wj#Y4rX3a(1^Daq0`GLR5VsGKoFu3h zbGAOEPR*}rEwdI+HjF5HOnrIv98HT;g$?-F5$2D4PSO>luSVRPPO_^`h+(@5t^0!_ zJVpNYV?H1;Z?(-o1mAd(b&#-wbmkrG&9a$F_Ru>D9jOgN_HV6CxEff}Tw3lSq-VM7 zJU{c0mmu;5n!CRT05e&gnp3_MJNTq-ygv$b>_gbjRrv-7c zUn-|K-!SawF00pW>l>QnLLl!)wiD6w-G7%E-ctlM?B>dVzuWODoVzEwGA3>Ni_T`c z^*WairP{9Cx#dXR*P~GSxiaebyGD_gCvvHn&ZHI^So#)-nKov~l%Vf@{zdM=o9#c> znljoa1)@BPCbl&;yM7ZrEx~CG!G5_DiU!coWR}>iCF#L`f(-OJy)W zw%hD|a5qClYHjA4;6q97E9XIzE9*C%YrQLJSdSenQ_oIy4cWi0-PGK&GzdZ!lh23@e;U0@&ht01a}S=xTwX8 z0YsIuUW%7$=(V;}PtRPMbCw*a<;3~@cGP|1YJY3&ULF>I8ijE8lh^T{VZG3yPz9;p zLI?<9p(llW@C{je$~jjn)?`A(0w3;cOpx9ZP(1 z0yl3#r@)Ry`D(F0n{9R4&h`qwzIk!rl)AZJcz!zPrT)416EOm8K=CO^D#dVRtLcy= z!Mv|lnsbf2Pj>4U)fv;PtSzC?-ldTnM}F-sP2MKoTq~}2x@(6_Np(G$QDY2uGgF#z zg4r?SsYD-g;8KQ=syF|^V4b=0@=h_`i=zjYZei`} zA57s^et4+u-Hq{B5JWi_tXOilMTs1mhlD2rnDOxQdua06%*VB@O+kk zP}fx;HXkp%PL4Q)i1+w#PeSwzUGFD;KMSOGx@&wriI4K*4_SYSscX9=@t^ul$a4r! z9t~jIBq5s=Cl^N_)@tMu{9v0~+4f%dv&YnhWUf$y-^ray>594>b#$k=*&gL2+1?k? zC`>BEJOS7zGA66IVpHsVpnk@hT8w&c8SYb#-fTI!e^HY&TkLESw$MBJ>44n+%|`9l z!&}E(v#^B@2;gF&k7MA267zdGH;6-ZDlm?72$VG?V7}tnI`q4NLt0-&9nBQqmnX8j z#LMyWXUi_fFTH$LeW}&JIpQ*x2wpa&R*0jAjL9D2Js`L+PvGVODSz*;#(+9`0|mh_ zA?eogts)@LxXKv0R z3jMN!!Tr_=KRq{?v|Dp^bFaxuKeqgY^`Ow>aLijn^<-GdB!Ns^2PZZ95_I4Uch7w< z@OeOdN7-M=RQtX&b?Q^Zed{|jp;I3%)9oAUzMyACV__j=^HH$3_wmvpdK#q+OhAFq z>uF(ZI{CP>_~FZuS3p13<^$Ev@J2M>^Nx?xt_eX`jtIbwV8%9l0s#=I(c^|6ZDjGZ ztc)njXCFm%^b81bS&e-iB1vmkj%%(%qQUQ)Ww`>B3G+`BEK9&xrinGnqAurUQ@|@; zm7~)kAM80*IdhM5c4IGJhi>b2OWBev{QB@!%&-6mcE^o4w#X;S7Ody6U(O4~W{iku z)*h!Lo$M43EhRrv`>#7DGuLTVcUrnMLVH6D;<95AKY*q|SiY_EH~MTSX5|nzmGvyt z?IT#4`@gIHvOM~P;p5jM7j++BJZyR;*=SGi{)xwImnXa3l-KdZdvM>+Q(8z$W(L_pppk zJgL!Yw%Ff~B|5wbP?IWOu5EkBBN=pN-Rb@vt?S!*(?E+y2G{+@MH2;BS6~zN5!f2& z$lR-_wc-^z45?DPMv6CwE8JlkZA&)osk73kbu4KPzG*73Vq*mDr7%tL@UeXQB6kON zs_QZ50O2lEiJ-wE$0Vh^a2mxy9-em-dJB(+#u;s=er$mJie{9fr^nw|Ij$Yyktx^F zBOLY)c+(rC7gyf;K{j;ap4joA&FTBzWdg|TIND6nEHgFUCNcJGzXIx>7OvBhD*=|M zgr$K9PEC@doMqs9bP?6of~ir9^ONrVho&pEzgq_Tp9|BvzVIv2w{JZ~maqTz1ZDuVcZUY-|1IgO%#5F5&O2Q+GW~RQl~qNyovO0m!+Q z35X*JPOQ8G2@5auL_ZF( zOUAaI*F3m>k$RMY8?cP1_=EtWu6+a{O$WB^*~^hJY?Xm>|Rw=QQ9-u#II? z3F56Mr2>?IU*!%N^J0WT(2UcuQ+oC+iF$(#%#ANtj;#)z zS?Lp*2U{)9E?#(-e!f@wNXlo=r|0ho4s2wMv?{^LyuWN6zkwYX2(daBBPgzN4rsgHb=r?BP9qfab@u^S1NC_c{fY)UtUKE%bWD}ox$@ZKCl z?uKxV?1>FS&a#7!3^nwkvc%!*DM=F=Ccnw^j%UH@-6sga34b9u3(T>{v4frk_BBdb zMpPv2aH!uo(s8t>XeQOsOhZ~etb{}p%Zm5h3iZVCF{lZe3NFnh`M+v?e~08tNbGs3)+|hN-~4gYXie0SZ+sk? zY)U*WiN*|A35DEAqv0*IQgpn5pgAv-PLpyDJiF5D-W*R|ER}W zL$MV>Z*tyOJI0xQOAyRB^PX@bzNGl7-WC){+pyI*iUjvM9!M}fPe7x_Kc2L6S-T4yw!SFUZ9zbJQKbVbY zp+g8RlcZRhfEHF{?y06YZTOf?YU$HQPh0w@o8(QeGm;1QpNI;wd~&n?pj!QI%xD7O zHy@Y%8&K=^E5^69#Zufq9I`}fOwF^OatrRpSmKT{q;z)QS$cr6Av1>9TX@w58=wXw z$~7keb^$N5I6tFljh4QBblh03rnIKYC&lrSu@HZcLN^-zxCUgF(5Uz}{a# z4PSBVC}8`?KTVro<;VwWW8;pshH6l~);_PawwYGe+>fo@dOggi5k68YQ2fJSPbDv}Zj1K)!V%8^+q=r|we$MyX*5I0z&GE~H{2?=tFL+=yFXwb z_06UOg(qZjn<%c7$e%3qnJ)`& zW%%u0ck_qeEX+iH(sdYZ5-!?X-L^38>3Zns@WxF%;H$&KvGgh&a}F;8Mj3wRH_Z|p zgIKPHLsu&Mvj?J{`Bm>WrQ0rbzCmt?)Zf%GSy7`UEpd4<$Wekk?oXg>^punwsq0WBH$0RBFOUPvkl z;uX|7PMG5aocDdWmz^P`oBBQ`PBSth^y}7y@oU;s@&4qYV_X3B!vHz`X^JaO5^6YT z$=z}2d&6DKJcCK4{yd*?q3?P#*0mye@cVq-WzOCTN`~^Z>*hj%C!cLluJM0Rdf0Lb zS9T)j2cq}k3)YPk)@0A*y-AR%E17gME4o@^)yi_;U@?6Fk?AYj7XLa6P$*1fQ(~cE z4Fs*m%QVm;oG6FAIX41DkR3+E#{GIGE`ij%C&_B-j6OS`@^#F|s}bnvalvXA-gTr0 zk}ao%&`O6A0Y`<1_JE$zReJa4xvT@RRH0BQoKe1m*_~G@kq$jbrTm-7>InsCf7OwnFJpIj=HS@DoKi}+v)&YHyRxFvA@z^>~HeE-_=RivbxU1 zqj%@zM$58Dw+)Q1O}lQd<0TGtU@8L`X-on3MqZW{jf!Wd3~qcl@nN&t(sQ(L(Ct5{ zxgwt{dkG`c@`CAng=!Zry8PCMHsF|q8&y-U2r8VTfwxC;al&Ixflp_qZ4InxC!XGI zpuBa9HLImuBLMoOC&05}V0{uhYf#TV$cLjq*vA|UpZKk-lE<>XG#Aj7gE**DyFGWf zCDA{>fNBT4*|&gQQo&{Vsx7Ihe^3=wjjAF)9r#=hU1L5=JLWU*nlvuw)E%%R0=??~ zem7KGF$LJ`u`L)_E}vKR3Ob|1#P!!jMfBI;Pvlejx{CaNO-5`i}y)WqTUeVV zD}H`b8f)q>c_3B)%_|}Kx*1(-6s8ZFcja6q@bT)f?)UiE&v+RXw_d~E&PkCsMQ6Gu z)Mlt-twKT8je&mcGLq(dE@D$~6*TTHkR;gfg?At8vABcSft6NZord!px3~BZl{Fvg z*R8rF`j03+tBGotc>6t;8w~bxI*w0r^Mgvi?IUHu$ZFvP&|A0^U(wI3jfc z5lWfdyHY5mi6~Y)5$&UO_CZ=${TSpya~y%lOZg*Z z-8AIQ_ZvsLOlBmw+f0xx=sqmHgtU`zm6_T$p+`YmM)-OB_VrsE9@sExmWolmTvj=f zF!J@?+LaJ&&L^c+F8FEfP(vfG49yiiZ;p?`j*wZ~Cv&+)=gBcOrpj#ns=V(tv$%JKE@R0vA9_L~)R^R&~AL$h0thkEd9cvbc33Quq6dH?;2jz1{Wu^(FQK zRmZ1GqwO^E)0=zR1&*AsjDwiAxo{C(H$314OqxOsl+TVx$?wfwsQ7-OW!FZhkM)ep zf%Kpgkz6!1;6&IZiLUgueYk3U1V9lalk8vmUCSfAvDs9%TZ ztORr$se*tj1*9F6k(uu;j6(QLpzHB|SJtrxUha1%UMlq#nteUXF5@6aua&w*XV*1w zkM~iNljFT#7m72l#O_sIXg>J*)wh%{sDobIZQ!B*R!`hj2!(4AqMrxT;MjoH}S zU~8mdwn1kt^|kvFVJ0T$$#e6do!O-ezOwUq(|FL}9bQ7AKcbZ2=0$>-hf!@thepru^tJ#Re+kKP1((K!)Y#1Xfqzq`yRG;NpltT_aL+)*;>_ zdT;s9bHYQmNd<-*Tw<)M$HxwSd$7^+JpXxSG*?!h6U;U2O{}4-j@;;TEU_0hp0doa zs*X7pa(XdwfKR&~mXe5tcDgIh_ zj!(9NvzG(L>)YQFOaP6fBe1+)GjDD?g&qPi9nm3wO?Gi4ZE6Q{go(VE!bt`8IrOxK zPB8Ra-lhAvyc2EL?@)?ywU*mIx-0SX&4Q4Y4X(N945ld&R|8Q!nG4Ah-0*LO+BxTWz~I+_I8?QA^wroP%GlnTg*|5ho#leR5r znFB@}V%J|0tc85$ud~nEcx7+q)SH)Dx2#Kr6W^?Jy1&aQt^PsX7~X2Xwc%9WwgWdWoVWwRE){B+fuxDM z;HI+>700%#WiCcl*oy3UIOp3C#3t(K$<2K_vamTZMDz6B8-Ts>XAcfb{6s1L4w3~r z>_8@oVaSx?&N**qCB%*ofBIQ!)U@+ktWEofNL}%a~7%Xm_^g@T0k(SaoLuc zz^p;gM_Aa1I(g6WoxDn{JK&@c@OTlgid0vQxp{ew_{C#qe+w7-`%hbNCIt;n-74W* zEQbz32Z^HKtd+l#QGLS(M&^+p z==C932Z){ywwse>46O>{idj!#x!bE+FzDs4e84<0F)u3yeeiozgUR#y4kB8o9v0EsKs4K(>U3gC(53HqKnToZe)BTK@rO2WZ5!moWpfT&~Y z{zHeZU3IIh8hi7)#r|vPCTadF&oLE`Y?&(g5U@^}T9txfIDblYzDzrV{`IucHm8$? zmy8Z1RDV9-edX=v`rA9M3-tSAMzHg096vx##k5i#nKI?&Vz1r1>~(kFOTC&n&%`IbrB;RBv=fu`DQ!qvZQJZ4v?OC|w_Oj_y3;fb{Gk-(b+ zbQJdK531=!tv5@xbsP7U6AawRp5?Q2(*()C@1ZC*5Ksd+1+wTl;mEhWSI zO}M+vI7UdrQ-S$bJu@b;;rC*~`1ZF${yd%Y@2cODj6b|PtUOpKZG34b_2SM)8_&O! zqZDmME@A0!XiP`d5K2jY0uhCdg~|pqS?WJ0Tchzs_D){57g?6sMNLhd++Wvkn2V|N zJ@#02&LS0%n6}{XWs}AgO=26Sv5V`NzVNi`pt4_RZW}>!(qiThiqv)Z(&a;4SvZ62 z5bU`hC~2eKxcDuQsYIr4rL4HA0gc`n3%4qYzfp1uZO*j|1*O|3dG*RNAS=(cI8T5NwzF`gu z1*aF)t}f)^#zs@4FGDI{NqgikCtl!Ndjh&!+5u!+q?R?^^`SI_6B6=qMLcU`jR!Yl zWfnv#NR3Sb6~@0Mef? z+$Ws6BVNL;m}o14j&?q~Knp#ufAQWapLZMu16=0J>d6&!)s4lBg`{PG6v9-X1{VN- z!s-I~+6<18CK9@vRN)i9h0NFCeG4Fu4046>5?)}wvPPTHdU&MU3&kUYLP;=|UV&vg zf8CAsH zlHkgiaWoLxACx#KHC>}n`5GC_;I^l=?Vj297wf*}&IQ~Uy*T22xcDm71KXREQBBB|{9Nen)>A+H zjhwz)HZKHQ>!7q60TvS+sv9~XJ}f=|4@zp1_qa|jaNqZd=;n-J#WeE@9f+ze^|0I| zHE=4eJ#;c%&N!tbSwGk2?J5e>PlgWyqCp({D)<@;?&eCkopVBefHza7YyZPx%ad0_ zYnhFxOU5#fMfX37O1-$}+}4fX-HcH@wg1a#t$W>J@&tZFb`rPFG4<;!?SdeM)35JV zOnNr*w?S#KNIZp}1TimBMu3*fx{0D|O%vmp0d$&d;FbLgT6kfv^DldaJO!JNp!CL`Ff*7|DYOn^_h#Z1>3$=mc>WC`)9&_(vt7?pc=Fh9Ib3Y81 z^j#;TH};#BTr$^hc3*Eg6>~79>ht)U!quq>PzoSTLXC^NX4eE5RUB2Xadhuu>CEIO zwjKVWtU+LrYSCr;5QC=586DU=d?B?pSmn5bNYdx93=xRA~-^0f$n-UZf7y&8fGM=QG@FZN9JAn5LGCX z*;Cf%$N4zw>KN1kpuo+Dt*qvZbKE?OiO0vWuU{*{cX*&cLSJ&+X`nVs68eL}Q$ho1 z!hW5`ro9%|y5>Bo?R$N)?ccap#$JrPTVp&==d0A#vt{$KEZJmU7Ey%Qc`S($K1CL@ zYhu#Q^rku7yS<6OrM&Vyznb1Ym2rG#wfNqoea2U9+T}tFVzMcbXh2}u@C~!WKrwLC zs+V#FGvaHlzRopqZ9{UdgR^Tal^qD-jFrMr>AA{&KBu$>xzLcqr*@Wzp!xC-U*jTsZODU%yB zUjnB*=FI!hHav}~{Y8koAq`oo#`|loBsGCuBIe>Fsa{m{rnxx}%#ud6}+xP5ve zim!TkJq3`MMrPWps!HSDb!KB21!82bv-?aJfEhZ`Se7`-?CRW?$B)9y_ zo$CVTUbKInb#{WRMm(d^Lac$r`{J=y6^rjo^H157XSG>s~H1-_t{e=*pykt zm9=DHCjt`vczo5Wt~DaM(<+U;9KS4Kd53>5^;dD!hyg(@ga&4 z+cC_uiyOfIm>8^sUFy^WE!&<}BxdFty59AhnszqyJ@D|!=q2@X)$3Q1_rma^cM2Cmz_?85lC}rNcqBW_NvJz2n6W+c$MdwVoV~&)bJ&~v z8@LY=iE00nG)zG@x3eJ7EFsZYLlV$d+NXpz>-Yi>dj-KrH0rHDmxu$aO4ctlk z5idr!`b-LTVaPjoW4=TKZ;-vne#lF$lR|nV*qd@Kr)ZmhjLRo~(oxSXn@fFo>B-}d z=Np7GPF{A~67~V1r~!Spd$2Fa@QR-81CU|@z$R6;1*wUl*ut*cQT5)@Eoyvea8pj1 z+2HzEIr|eE?bISCP;^L7YKSZA3(Auq0;~W6cEYay-;`KHuKImM;IUEbUq16Lk)Lmw zX)RGV2JLJ87N&G%)a3K0&8aA45VSX~ut8QJysf;`ogLPT2mvB2{#@88&#UV2s9BLC z%}kN)P8HnN?(gY~7h2`pU0b-%M!i8g7f%;1oO!_^Gu;ukIjx!H^9don(SoywFK*>gWzzQcG=mzr}sFa%(JVN6dN(~Oox z5w7DOC#a^h`LJZa<~#NeZhWfD9#g5)EAzZHbB{p_I(8)l`~2yC^7o%pXDr27s5AE@7{A za|A%)Zq3RS*GJ=uw;@6pP`ul46Skp~?hnz>xXG=o#6p8qm7IZ%X>3RiqeGnX@ulc% zGcMiz@;A_b0}b)S{rPR^&@3T%KqifS1*V+nt0=kV_KVo1CFDoypjGJ1g9e&`zYR2$ z9e;~fOedZ^vdN!g$5rN~{l-SA!(RyIQR5KfNQM7fj%a5P`%KWjLA(wg;5fVV*)|6h z66!$)<)9n1(Hu4btgauiliJNV$vN$%Zto#wWxA)T!cb+J_wD(rc4bNOi(-p9Y*xYAeTv@>Q0IwFD4( zJ@`1C*_3A#SU!!rE12`QZfmD^OQ+t#yeSPQVxVZ`v68x`|Fzed>Sc9eQ`&sMo;~i# zd!@$}BX{nz4~V=?>B&W@n~CnE+8?oz#2Vrr0}4tKtQ;^8@IJp?Klf@b(!TW58XJ1P z<@dgUH^k;#f4;*JZNEzJ@CxCZ@GTq`+|h?ZFDnE8-Cd4}u6j#Qeh-7gxzMKH9-BwK zpb^>?q|?zg1-b7}`OGoF_)6=3Px_m03(OXQDgm z*6Bw=J4npH{ANCuJF;n(qrttPZDIkUbjF2aRP*7VsIx1A7yFWW!g;B)#{?x9gpZv6S z_vy^n9Z|b6Zht(38$F9 zQ_0z|K&_Jh$`=U?Z@Tzb6~Jpuyb`gdT(zCa#DhKit=0gd1~ znHIJ2QnSA^($-y=nt5-I-is!Gyy?C6H-lsf%;`484F@YFMJ<$fM{UG!CYKrVb z5V06o$gUDDV8;brICyDv;p+K0u?G)iw%@*i;^kyhNO-C@BGiyi?7}i_NkFP<|1^x- zN7hMD?71wvPpUrDhf=u-#Y>t`qI_zz)DH-@UEY2i^rG>IU9`Vjed;?`T&NXdnvvj( z>a#ksq8YX!fh3NTyLou-epe)#eL?O1MrgXmr;Yempt<1!L?>fQ^SPp~6T-3v@S^{| zf=LzE+a3lIed*Y8f5kH!FH6TfPm=$fj@WQsOMZHR;+@N;gfMgv^htQ3izKRr8*yoC zS}YfIX3JcoKi)94{>QM;Z924ZpnXb?VhILq;bS>;RYn|#0E9#m4EP9Il$su7&r6WX z+a!X#RB`WHaG;vaggLIeKJio{zODLD<&mZXZGW)=)6o(L!ZmlpdvzEXCW#&p%H_)o zd_Ov=Qf8??@O!Ls@%qJC6=s~&gE8%(7U(qSy`V9Jv~yi@hyX>Eoh^BN`mF=i)7L^X zVdeah--2=5=TYP$gQHNxIO%7`@RThP3hmU6bjV6$kN<)A9uDaYxW#-XJcf@ z+O8`U5)Iys;fm8H!K9KUkV5li@zviA2y=r24#mC&S(mTvepO1Z<7yW~!>jnlPNuEmo>=c*Is6~x8I?Qx~*2Rx?RG9Db+9YV|^4uVsI z>Tow(jaRD2$JxZangPrvVrBB!tk2863DG2vjU~Z*Bdaph{72HWeTD_R;yJc+ozMeY z?hfeh;em4-YB-Oe!GfLW25@VW8M76;B^$osW63!0g&Ds4W-Vs)7Tc6S(d}G{UclV= zO4Ip@`C$o`Itpy0VGkVh0w#>M-QY;z_X!Rc(`z7UE-~uXUtw9(eE zWtNqYzj5*@!nXICdbY2SW{4R;YgCQHV*LKHjk3SNw!uuCRsW)VU!Sc`>)fZo8RxAy zBR%mxhZomX(>Y&iLhr>0q0sZM|379MNB{XZl5I+VHB{-(NlQeEG-aO1QbnG*lUMY} z%qFl>VNm-T_Xa5JaGEJ=x3WU8PQX;d_JMW(SS0nNR%q(pOJUa*@ef$03$7=LD}4a0 zFu=q>rRpQ_;I?sGh!X?{G(Xh|SLF%>=TprJf1aBYXB1Sv$Tt&>$$9@QddZc9ev8r( zj;8}1>QpRi8qJC0z8*k9$ylyT_#f1PGB^&X?2)#BX_U%w%1sKL0?F3eFc%S#S{`Wo zhOaF-9x>SCAuHl!V%aqwyULb*ur>7JtGuEwC=J^B^RcUb(fiqzycH9G`W zH%W)8tbPE8Um)aomkaR4hKPPWcV@>FDe5vrMu z3OzpdVlPkVl^?tt-VHSXeGMl^EUYXLueWys9X>AIucyN7u20Z!d3m=aiE{!$7&Z<; zz2eFHBRn$X65NPFCHN-?QACgzXs9JE6VKH z|IGmybYlxae-Hsp&lF09dt;Kw@s(pq_kUx>XSLrp>*%F8!?(M34w&4w4_Qx1MuRRD zMg<+5YH^@lXM(~oVtFY=F)QQr8HuGTIqDW-dAXiiIj7}!RXyl^lPmDKL6GkV7&rs3 zHarhI%?}c8(9irR$}%T^XzB00jX@Fe8{rzTRdw*h<~igX3jG@yK7wJ3fc|8cO<32Y z;hg3^_@@;sF+YFor$k5o*I@^{Vb3L%FDLMFq2r$WOmDu5e;vA_{`MU47PyT(^liyN z0~$^TxG%Toi@+~$059&unt|<7NGp%nTHp@dM-JGWJE|~eP2UVuAfe%4M8puhUl;Bk zURJpXhW(a#fh1}m=VW(kjhz$p+Ih4=Z0xo0TWX8D-nKVZ+E%-w9)`5vxQXo^LN=dz z4URhww1ihpySc7Ip_rAOsT68#2(}r`PT{@LB#nacI;A3{A9Mm$bQKl^x;G=r<{z9@ zR9$H%&rQ^`sU!ELjx-rvD-_mEzu>bWtAeM)^#l*|5o#!HSk*3 z)S2WCivd5{mLE6gDkbFf-+tZuqu-$SmpuP9PX*UQYWtX`*ltBe5Jz(k!#qua4^{w| z_Xm|UE*jlDj$!J=F)AW@@jIj~f}1En3C>o*NA0Tv!hrW+Zp&VXMZRbyjrtWHD?JZjiIQhNi!0LO__FaTFSaUupB}%qq`UfLCItH!B$2cs_7fKQU-~Z* zu;bN969_+mHlJpKtuX5;{&wC8aHj9+pU2mILLSrvOI; zBb)aW5Su}wL7EbUMK2_f;}K1Bohaq236{CSTZq0h#|s95Kwm z?+l4IgmYkhU(8b08@2k8qGTmXD$Q>OntyfVX$R@2Pk3KlGv3d)yd4oP=Y?v12*9!a zptw7tTL*tCc_d@QYY)pn17jV#e7z+2^mm?AIGlr?NVXv_%afZIVI3S@`ymGlHAvS9 zdN>e*Z|Iq1lXf_LG?udWZ$m|(p-NW+S3c{q~eJn}#H5d_*mgSLR?X4WJ?6gge!(-j{&Q2Q?aIZlEe25 z%xg*ay*4WT@XLg~Q|fY^Sy_b4=&ej39}##?U>Z>S{tx4U@GR0!D_?1YhWpqd%s(iG zB1glEG+d{{?5RHH^L@nS!xA!tS9wFJ&N?c0y-A7uIa=~#11gEF3|4L!`Xo>B=;mzI zqVN>Kd0m4BYpo#GIZRa^7)}HZKicX!*jGEihMVYy4@I|u_&d&39zvA83S0^s2m4yG zJyNB7`eUZ}FHUxK=|+4!!W)aj;ei7JvP1a{OO83xH!jR8pn!cKh=WxpFya13w8D86 z+lR-!1H|3T@%$CFv3n0oo{-EtSn{Olpc+(zo~7vwu)7O7Bq! zNJi0T`{;`!H1^ZKG>9AedC!(xnfVO>B^iKhPcH$x>zCzvFQQz{yAQSRXu9q43*)gb zG9v;nXBaBs23EMzz}Pl8@*=nZByIQt2eIO&Oqx_h2*+7fEizK7Tj%a3#VmK+v+k(u zk52dTxsj&!@oj5r=gylUK}qjQxk_HcIa`8-u?;jAzUfLHVg3iCFoJO7$k;clljkjmT{pr{MmB5JT}zKEFY zTwsE%?0zK*NRmf%;PZ!RS+4dKK zI+I%>CGM+ezS^{PcIaBr^LJ4w;=FFQz16zLpo|^-Hex(O0!XWbWpX_SA$rd}C3fMv zp}eF?iMVA`y{&5n_~rCG3#;Xf>bRSy`s_!$IITN%Z|sUi&S2?u(1vRg37)$c%RJYC zmn3#o>QrTIW3n@RCa}VVqoqz>Zk7rOH{ZH`w!Wn~dL=#tambst-D|>j;+%pSWDGR( zxl)WAum!Px(yEn&Gfd+o{N%&D##2!iHh8xI)$3CS4T7}CE}aw~mp%S`7ii(T0@3?% z@IHJ!X>^fY$4YC)Q*lO>EXxR@nCHbb^K56ubkCrIx!cDa@8zj)?hwA!pu00;+I{5N ztAkLnC9G`8mA2<-fIAu*U;=OBM@&qZMwn!01w+>Q}*UyatUp!1!FKe|=C zOWyxDH!vh9$3H=`;i2979kEC>mR=1p{b)^CVXh+B=uH?{!Lm68q|+HEbUPiJQ{P`{ z_jJuam4w6>6gcik_RM^cT=GzAkEC!t3jGXpB8_0#$J800>>cwY*NnS^_$ z?&+BHE%*gqt4wZHG466L$X_&@oS`=CS-q=-B}gH@Sb99JLWe6l#5RD-CNXg!N}k6? zQrlYMVu~352Wwv*4rTcMt0b}{WXU##QrRk7*+$8hM2jTLlqH0;SZmBHl-)!j#I#tG z#;%ZQtRZ{$m>ElwZALX-X6EfYzJHx_e!p{F=eo}ML%F8jdERHcm(P9QpPNc=P4~I( zQ8=Hr3NBb>PoV#Ud)w|>@TQe+w-mp-6i(*jtk-~QYH?3Q0G!Y5t5`dj!@3B^{ch7E zMb|rePGPo=Smlk*uR6^m@->-iXB_W%`tcHe3-URKF{b}QiWSZ}^rzk@Nb zmm?-OA$Yn)&v^GhV~!!`p^>l;Y6tsHzEk`5iaM)-fD+9M0~0$ghrCnI+y3K__^Fy_ zb#H=pw)tMe0h5m#d!Dhz1XfODDDbu@BQoTg11uMMOHNUK$YnX2^Q)mHms!=|6a2+$ z9p~PDHO3sDn-vGL0%W{W^GI~Rp2Z0oN5z~IMe@W%W2D&^Ly!CP2iz#_f9JRL#d3~? zcy30P*QHp8Q_HFyr;2ePrb0$?Zm^*fiI3QBZOt2PQSu8m z-wZUT;)80^B$I=T29$hOF2XAh zKsR6Dr$*C)B6D55>jE^y+Vjs`RcXF$5W7;4;_dgwtNoBdy;t7tug6pv)5uOCPBy5P zBy7b#ptA6=(m$?5wZC-}j!QOEawxMw8K=H?uFj7qX!SwQV&&o=$M>Z3zlnh-w{Qkw zHvmvmg#mm=LJT-@5H-8Ohy-WRR>VWhF;B_0*|QTRuhqx)d_R9 zsMH8i(Hj84;4Y9!;MfZ<-D3d&i(EsIL4(zP)F2z*A_;``xx9BFj%Noi+^EhvR}*g| z9bXvR_w}gjFXLVK$yVeq4PDm5mYilvfTb>C2Y-UCz^c{3CiJ?K{Tf;mwUV(j1N+s) zr;f{?UK?oFs_v_Ff0pc`b>YK}J-fI3j$YCQ{~h>ck)Z(Vl-H%FePUgM?OOpx;vZL; zmm~AZr?L8eU{~rdXQ?le6)k>h&66gFj@FEjZC9Rt;a+>g9OOI)1xllmh>$CsO^;`u zW&B-hCOmGlioe*aowbnV^J!?~ahacf?uDx@f;Wz8THL9*TXDqj2s#-Ui^p|0Yfd86 z0gAa2xs0YhMYL*$Rs#L2&@lmFGMD`BnU%!`$pk-Uj9pL)QkT{+ayp!(I1SBX7G-=G zdcv^g0y8_k)#upHiy_(LO~~!zINUCvXP z6^`C~Agm1L?A}k*?txXHC9&7uowZZHrwFms*f1CC3xVg?GdUn_Arj==%^tvrkQSd) zp!s1dVzbR=SwcQ(+LM!J82pdRMzoiA!`%gJSv`qvm94^*{ls||uoBfEn{`1RmC}xN z*dJxyEL0eCPNQM+hVJeK&@fo}3Gqo8^{<%^m1ZN>;v68>dw^9)qvo17q(24mq?P z{Bi^*!#0O2=%hvj&G2uL+fHe|-wL2U_3=wj+TI?_l<{ilPvLWo+n&g8Ua46WG{b0<~JmYXf0LQw|sY})jXb6zoo{`4@d34iasJW zh_gNUg_j2MptM-+6vJ#N{r6g zxzvQWzY_?5<-mLmp^k&38x+=o9AJ|TV8szrfE}U7*V+j5i#FpXEtsm|q=3QR0jE{J z%1?(-U5;`(UE6F;e)wUImgeP#z0^u*=8-y&)RO4t#C222Mc|vd4**YU!#^S%Z4M%c zBx(tm-x*M~5lu#o1C$kv3J{ku($xV2%yc$xrFj@9T3TvtV{5VULs#IITa~3pgoaww z?fS+ewn7DQ|F}+Nf(|ce893`S3@l_G{ci^5tk7SK;F4hQHG9 zWxx8UCmmf{F)hyycJ5MHp9FX5obqE7tOhl~GuH%b(J|+Fi=+eEP*1eBv_fQ%n7#Uf zlmPv0YQk)|l@gb2kR94!u%lq-e13v;J!|tgh#+Mo412cbD8UbDYWj|h?PVqNjEP?d z<{YZ6qLTzNwxkVS6Xko&(lr7T_aZp< zwYWa{!-T15vj3=F$NF@iT0n7+go^`lhxTA|iKn09{pTrr1`Q2b{}O+=F~JdeMrDJ| zDW3)BV;;GRfWN3r!l-_$ULL+tc-*atN%HEe%&ny-RU8(qs_UlQELX!*@2X2GZ*lbc zH5G0Ilwl-D3dIx3@`+$g=zSz#aIOSCOH1O}7{=k1^C^RD)9X9)B(e&wICZ+o+M3+P zOB*B#rqsvPjXK*Oce1B~l>mT~N;c1eW*bnVjsQ zbzSwbT=spxsJ$a+i5gU$bUDeZAjMTMuJB98+h9TaDNr5M0*H(zC9%yjqOwnPJ>Vs}46;x)Ws9Qn#8o*&9&7oOA7jSRO zo?<{=dM1)tybh-GUq3G7W&@Q%T|}*06PhusZ7>kg*42Mm;89*~++dC&3~a}3LO`#3 ztnj+DySXv!$$C896|-{rKRS*3wT2~@03p)YgXivwL;IJKF-Zh^kT5&HXG((;fz=1+ zXZ$_~oTZ3mTuTm`^Ab&+B#w$PU%(H%#v-wM=o-txjQ z8XXHqq}0`}m4C?5@;udfWUKbNNVsaZ&mYju=ohCDEKLlK4-T6mhLh-aAY2@B)B0AJ zBg_6I<3`<^>O7foYr*q|3HaT24HT5)yAsOR{eFDdZ?UDJ4$L_TB40*RDWC&i!}0!l zAr=>|gR&EM)T~HxLAqr*W$Xq)4Ey?ENzJM9uauB4EQupJQ5sJxxtR};9d>Uv4H6); zBR!uoZ*g831m2;tPKumy1q<{7YJdDsv-z8O7gL(6V^xKyuO@O_l>t2RMms5Yte zM#$Jct;@v2+p77ej(G$bU1z=Af*F9=yBdE?L9n(BGd|8!wR9dMDthGpVPFhz|IBPx zW)R83@CiIK4Se|xtSwwdA7|pg?`~f5yUXZvO~uX^oZal`LD_yD`4r5}{BNq>`|G`G zj7|r=%rKk%n!8;|2OUD9Mh!ISvZbq7XUxU6x(+lbz7Yn@2+^qI-CNjJo4Ho5egL67 z#0p}c;3QSDnElroxR(oI*l^8pZ>DW~aInJm^|KL^lB;BDLm%cBizN$@w}GRTMkq~h zwh_d7adHsZPlP1wfeKE1vuqW3=JF}h5Wo?e0dmlu=i&oAT_Eg~PYDl#mJ%?^_jW9-u%`^ z2)ZP?zXl-KI>U+9fIQ^%X&TW7zC}mRlw%|;uZu9aZz_nzuhcxbb=gLLEo)njEvjyO zmYs^+0Q%&D@VYhB+QU-4zk=lKCo-RXBhAR6Irsc$jZxDkz`zOQnWG8Uoo{h3iK~kMfH1ucO-xxKl%ic{w?6&rULatQoG0*~aOdC?9(0%ZW+@M?b z2H4ZYNiR}mPP}9cdp{K|I1!TkC+o@8Atpmzt)yf+`=rbi#xU>T-S=+%pI<4E-UIjp zV!!3d197Rtum@=RR(X>x1r~SzKdu=Lq6&?KLw}~djZv2&G6h3F3z#M5nEsqB+#cLF zsKUN!LV&jX7C8VwDa`rkrnDpTY3Wa#uR1w3_dAZiHT`kg@5+v?+xLBOHFgT|XUiO#T}A(rYG3xre1_z|R2}E^-K( zm4bxPi-q2l4evpp-@J&BKj+2%|4^NzqN_a@N#1wlvWJsiN1IoEcz)yft@i<}M-gV+ z@CE0DJ0o|DQPE)6{Dv20=+Ov2FkE>I`Vbjh?`5BNA?A%6hHG(EYU&qG-^Y(kN?ngQ z#dko{u-RV$1Q+(1G)W_aXex;JC*S&0^<{KSb2yARw7bT*n?_0iEu zIeNsPuuJ57+})_VZ+h+5FG1uFxUFRAH)Z*qM?o7?#-I9*#m9sRL{(uw$S7!0rGUfZ!A(&;Rms zaaO_0b~YSrp`*#0Oj6fY9@aV&BI|hzA#b?&LVu-?*P`$8E!ULeeige9$Fj-?K1wxd z7K@}_#MnUOA{5;M8u{HyD9zlAU{%^Y)Ut_@Kg4J)hPJ13NFEYz{0h8lee;KXkDMb) zcPTMCdekSzbq;h|n`|t~t0?Qn~$NEPC-I%MVU~0p-kFXG+LD`?UTC@iJM4C+p|c zzK@%$n5y2(aMFC+`i08<@wTziX)qv21fixw714_q8TPcAB^j0)e5;dOi*9!RrmF2H zW)=i7EW_RX_si^hD{?~GPb zOOIlQQWqS4`1Q*OI14XbF0@&@9{1BYCssOXki8D1P%ohL1Bw~kkc*@QBX0OQ>!5Q_ zG0aPQr&|sxq`Un0gSv*KyqV~urpIy{r9{O|Jx{AH{^L^beFx?F!v~ZX?HRmGaL6K^ z5Y2gvxbL_%gSXj+8=h{hkvNuMAw{l{u z0EtGy(l~vNsW-4PZW-OO5n1wgm8-|EVAN#)#P=AP*<%7Bm`>gZmMK)cvUmqP1>DDo ztGGsth`~|D8&KPNmk9((W)D+i*W}HbHTy@)CApOu6Y@~SQ>A0Isk@?N%HQU0Mfac) zA+#MDWPU^|C1u*jlmT;1v=baT$4o928tAMZ%@*rq0&x?OiFDQC7YA78qh~h4nqz|t ze(89Kf^cF(%&v5u_rJ(aTkRfRc%RVGw7uac@%I-n^BxH3g)zk@v|NK65Nmu{7cpBV zLvR}WH#HqP707NySHFLEI_<&#S$WL$!&tP`0a48q9xgx?>kKyF1@Mt5k(t@cMJ)B3 zW5O+6JDd|M&wp1>EM9jj$lw`QtG>hO%x_;jP9zgWLO_}lkkQ++;DTG)oscYBh@pi4 z;nLyh`-_{6F$PhS}3Z`Vq43+XpKJZ@o~;SW?=5$35OKty>iv5VvV5@G~01njlMUcz} zTv;mcK7;|+sUVJ$sgpiMk%j%Ot(_(&uZ*{Q5a(^3PUpQ>6T}&RsLH0H4{y6|7g4Mz z#s-EEne4(x$238(Dy9v!jcw4cS}7;pbgA6RQAlr6-|>ogCF`=n@z-{yJh8`ZPiSv0 z=ynHO7bIeEp5-3`*puEmTDsElQEU~RsI?Fk*|D4=!}{dU1>xiSBFt}VRvev^Nh9q8 z?>94X>>n2xFycElH1hnTSE1T!$2&+us_ z@z?XvD1r-m(Vx45REjyuR@LPo#-(K`mu7iAQ7$#xE=HLr+|U#v46~)f{*%W5&et`9 z2s97Kr-ng-0w`VxUIcy#57^TPQ2fh@hdk84a$SPcZ~`}J-qLs~owz^eYAKe^YZ;jBWq2jMROGk#QC&d?z$Vz4*hjGcJ#--#5Da#Dkp zzTX>G+R(VTWVsm+^j2t&y$e*9>xQzl38TK)UG(Ow8Y4!HwJ+{txt;AtOPz&mJjWUa zC|$=yZMONX`>;Pkw`?9m5)kz%fqn(#kbTNLS5n2-;-FNiA8wG;Q)iF{{jI#gOjBH3f440I%FH`bKVw3(cZ zPBc0)$|;!wHB&m$6Dw&)2wvZ`Pv=;_8Myc2f&B)cVs3)UblJKfG(wm{Kdf}Lse-e} z%;{EXF1znQdiM?HrfA7cKDcPWkN&jxrEnu=1T6n=5D86ZOGi#oVuq#jDYEP%1MeFo z9kSu)&d!T9{WA&RFql`=#zwm_J^y@W1N9JjlH>}(UPSni-0+g(l%oy0iNX_5{Vg@) z-kzbrz04sCbDK1D<+*~2#wzxgdD5N+rFqo$D*PybIS2RS8?Q2n1q(H`;6mE(M>2%I zN~}KcOIo%yrZqTm2HQ60@eW;vW*{6#o2FLIQxc->Sz>UxWL*E)jXuLkqGQtfxUVG8z6$`)DJVr(1$;BqC`sGqwyN7Y?Q)#nfQ;Ls83)e_W{aio{Vac~kHZz!xIg zkn&uDv8DdVKoa4z0!Soi7l_6J2{>~@2aiSI8257Ufow{SEd zvI<1aL3qL3Tyf3FZq8FSDv(nUoL@Y-a6gAXo^y3Y=-#m+R@n^SluNPb-V?r0H`45?(SPN1eL89N(Z6+k)WEynz+8(}U zVD*H)aAo<7r~l(Jy}i@CJ@76{ss*HJaYCYz zHf|Uv#juIR9fhOM9~rs15tQvysFsvGd&vK=Py7dW%jE;1*j33NrxR|m8cE=?P%AKW z3=vk8WQ(gXRT(I9C{}#>`t*WKP(A%t)q^)zTE}n&@_R0I*Yqej_SAG_+g;gxV$8u7 z4;-vWH$)4}L7lzNbGm0l7vcA#3UBcSKz0e%+;BOuIAqPNG?^l@|5Nw#HT%+J?r|&s zhU2+`ll`@Y-#?&x0pe-}BqMv*8)QctOLYMw0iq|EZP@eE8B5h6n5H9MdRo7K>E6Aj zsCC=e-O-CoQ6(K-k}>gj-^SfdqIOfpOj-zg(A0Eke51dlH4_ipO&+fA`!l4hsQkZ&dLlD)@WD3zHxDsG;y5|*t_^@cK^^U_F(N8Fb`BFUDFpBHn^wEY^g~2QoyT^H8-rkT2skNNV@jDV zVn4+!n9Ab2MKm~8d$}a9`=?-9`uSfved!4x(b_9FMG+v*D(n5=k<-wuGNDS66FCRs zBxS_;3S=knYn~}>vNn}o&i=<$ps&Zpb^pIxFk_K%~hJn^TJyOCO#$!T$ z=u)}H($?7$-5Ppfwp;t+31UwVto0k=$J5=IYFSe#wwOJ$WAGXr8WuKX8R#dy;s})+ z*A~X<>yHb>Dm6NOA$i2K{1Cq!Hvfbr9*(Z`hYt}JdFl5MenXZ*NCh(pF3ufyadE!l zWcl)@hNxk8>f3joi)%W|X13A%vWMi|A0#(i@PUDS9ci$M5Qt%q!`!U1Y*|L)@19p! z^uRvGP`Zc4M2*MeS#7~!e#E+_=nQ5}m|#qGbPZ zdG*Amp*-m;Err+VNpZ_Yp-ZZ1vl_FuAxE_;a?hM((ZA^1@_s?lz19|+XvFbo<`mos z)FdCLP!ZpPDAS#Wf`@DhUp|wAnl-=o9C%g9ZU|5qjS7{zoOPF$42=*CHf8ZULf_q&dowY6)VHFL{Ud>m)mq{w9oWnW_|%1cp)S$)$X z^hAb8+>5HPcHpqYvTYc=trQU8YSlqf_uq-#ca~g& z>20St0KuiE05a$377bw8&@T#a^^&GE5@aTBQbucB_V<e5b{dyF2gyr|YCb<{qLL+VP+k(JS2)1Rj5^v%65EP~+2yF6$IK3UHNR@2+b$O8F+(eOtR$G?Dq9mq2Qz-ROag*n;V4zAPh^ov?oVBjGjBDk zQHKv`T72J@bfkMG{ACIc&H*CVfz)yHrWN=gYRQ>#v1Jqc@mK)>)eZw8s*iF+TQefl zw{PHYMkX>7mNMkvnrJ^)9cQV0>G;l~R=$_QUBXf5)%xuvAeBzY@gLXuJt`c5{}Y`@ z&qYKK`p1=0%(4L3Cb|jQ&_jUmB>i`vhXY0fJYqv&x&eU})`q;;OB~gJ4N!>fI+~Mo zAGMl8l7XnJ=yf9q6x@^7P#d=+HnABxv~Ds3ywMXsp^p1p>lKI)7%RdG`Pv}Oc~q$} zK0Y<~NULWz+Zc|$2dQTTzI80=?XRq}impsLuJ}yLTZzcsF>rY0kV3xkHdOa!9Gq#! zDMX}jK;@&U`9=ctP)2l2e+Pk_EWVm2{_^RQ|K)mC~ZrpNr*uw84B4xp6Bg3J$ zAg%PHI7p{LSI~`tAry5x$8h-uu^X&(4_j7angkyhW{U?mk;>BBh}*m+fbsVi*2ls!+~%bp5vn6!R=Vmr#2JVE?~ralCx*9L+Q*e`y%K5a9mjO>VK#M{Fe zG?ILik7tpiGiA()?7ja@yAnUW#oO=RwlC!$cHH^x>tEOHGXUKb9`Q$n{&FgDyD97| za9KWE9r^NYudc?loUm(G2$?xrbJ)deNV2fjW3jLB((fzJ8_VntAGp1B8IjN&&@P< z`+TKW!fnKUjY3bK20Y;S%)st4x=L%UaH5)OhUion@A;Z zBUzDCqSUR~zAkxFUBu3ukqWjxb+Z^=2oZDIj59>wv2x}3CKuJgzXLCQhOge|WUr_X zNz|6r-LU@fA;z)eu}ytv1>uSPpTA&8;88{NA*~SIR$~<3L=b~$2EAIs(rpFJ4a)T8 zS7Ab_6OUi*_F<-!o&DA0SEshSgMT;m2{{!?LBV=J30u;$n5%F+orvt{VTu2$riiHj zNxZjIo7x?#FfndZ;_`Ovio?~eJx99~V)K3nN!a!)c;2lI7oN}<_hDXv^}!y4+aF%mk?F2yp6a3Zk34ShLlYU#nnfU2Ypzst_;S*QANk%nD%a-4@yQ%{XOp-#n0~S4ZnKK%=m+^ z+a2P971NxPNuo|tMpc*|y++E|gLvB6N$JRQaWf%pPU+|B8*2HoG97L$cb+L1OX-cs zam2oolriRID6{l%<0xU$EefosOB-H#KsRaZ4-r1ifD*M<4wK{5njTe1RCFDWUUPLU zAu7E(c5F^{PMz|JL=QzTDznZby*b-qN7gy4STC{Oj)src8EdJDQB&R3qmz?f;-@ef zDv<6K6ZNk9WlZBfUv&%iC5GhE0!tqm#?S+Wn+Ri$G_~1=Ec#GWOXkklykEM9RbQr! z+dTBc^;I!rUZ**??j_r#+21X87ydPX2mp=6^Bo4UyKQ)wMkk!3Q^G4DDbR@^`r7cl zoBd8_YxtQ@3qM-j+q0S_vD(O(JE1mjjLJrr+Oy?REYWc`iat#48Jnhc6R9#wsB<;_ zlhaMry}zVIk|wq}T;Vn~;JYMvD*5bgY+cOO2B734051rdZuaRS^9ut4?A^$Ydkn~S zxJoEc``}+2zHw_mpMv?J+UCxq(X6A3B`YN*{g+H`$wtX+E!uYna~o^C8I|1z%<707 ztcrhPj@E#YRLJ<5p%Pr5Zj5VMf78WSB`1R3szk)<mKCSbqp0;L250k$$H zgG8Md-iZaWC&Dcxf2D;aI6XYqKUE($w{+Bb{@8rJ^O*x_dqsat4xQIX-N$oOSwKv)Jfv;W}Bl>K$jEuLT4p{z8_0D^t{A8-Zj zNSn=U&Z?lLu{SyI*+-`cqHH(D&ezvV2mdVT4cj`(DRx|#yfiF%`^(esXIIx}G`_}4 zO{_h_0f|BWK-%7*jf_Kd)xp;@z?(Y;aEgdKNT=R#OSV2ul8o9N z^5cPDjwJo7^v*sgY_S1w#@HN}y>cKY@ zU?n-3h&;@76L-WE5)QfF@~6aT`qT|ao^S4sfo7UAfdJg0^R z^h1vp8tiTRNdQ@H0BI+yp{cc)a$MxJ{}>)r4jxvBPY!V5S_W7>AkwAE>g=q`B;`WnF$n^xD||c9{MX z#QZ+{y4Pg8r#ObrxT%C1c^_T(>L}!&;pSW`Bp!!P6v6}cuqn;iaq2Y6gCRbIcT+Fg zsk0tTt?IrZ7nJk93UoT_DBwWjfP{J%@Qc8*0n4C6x@v_bt%c&q4gx#A za|)Pk4^i~{GXyc5mQRq8IDIz%RYB}u!wUhG6;aM1dMY9Do%=4CdOKgdw`+?w3mC?{ z7qIrQBO?ppi+v9dPyx&wKotMEG&LbA686T$B+S>{GHK@bHBal#be5e}=@X7kwt6C%BEmz-XuDJ8c>DjN>IFzGhL!>t-SXTwH zeMJ{QJ`WQ%Zr2jY{;&}*Vj=ligC(U+P(aI`xralf+k$XrsA!~Q&*SQyJ&^Zd-4nK-&B~*iChW7-) zhCKis_YNVL(`_CbSms|X?EJcMT4$h3_L^XZ{NN3XZ((zXKP>=G7@-$1unx9ehbb(1 zZ$dd~DUfROXow(LFB5n_;&;o2P6jjke&a9N`+XVe%QViPve4G=P4!6*^b}~8%adyGqYp3VOI+hmCE8{or zC*#tCWKP;)aZGR(4*V^-*op$pKpsOPe<_c1flGAznxa5KGHn;Xfs= zQ(wKbJ7vc6tD7%J8Y0}YKR^`%A3#X*Tc#z=h485Wp>hrxBK@UIp^@C`!8KrG_5I^IAVw!3Z2*NzlW z`3@SMqcdv%nL@uIy!e3bW6F6_f1Ty)GJkok!Dtw36--+QGrKZ4Th_mLt;YYrcEzLX z{T)kyRN+wq>LUh=3}RM`zN48Z4UTT6ULO9f0c$=*&Wi;C-2VrVrmt0ha{qQjgdf6g z)Jl5#XLQ1oD<-HIm)}?We$=1d>hP{P=(o8{DXUXv<)HAy#U^ij>}C)vf@Z)HD0Yn8b1S^O{qrB*{>}b>q#he4Fg;Q` zjX>t(h)E-1!xS@S9|ckKpi*=K!ZnH;81roD;U`mz$M<11maD8n401<5zDbp9%5Cqc zR|<{${Qf%wGv`l(#vw9>ZrcOvu|7kUZH?24A!pXwq%UW@YCrtU`&~etN0lYB^zzBu zeU-lwUOll8e|++#Fo{G3GGkpGRpH5r1n+qpiDAI>;VlnAIS-PqbUqJ^q3qr(@R{mB^N8@=_q-fx=-6~81+Y;`3y$xm{drR^R$NzL@QCaH=^y7&^^??eYafH8(1x#MDf{`FBJRJ-)0Jpc%v3>*7ci+8gi*Kg% z?5cX+G6BLKq>}uT2a5gPmJpaj z_y9Ngz{+xTdgzwe)mIOVe{sJ$c&V3nE_l(L1?EZ?1l<@Q`XxRRwFq6B-fDPeU@ALt zWavFx#?|jn%+-rG{cg?p>HTU^&sH{b3se%_?X7fUQo#OiJ7#8yb&ZYUq(ZJ5um-Ua zvjZZ_8rZ0R^zkQ#+`8_c|4Xgtq(_M}=6U|W^*XZ~ncE{f9qe*UVqKKoBHnOZy|(7O z`p2~w!o-||346#4`Cf_xvS4D=Vs_A&hsx7@wby!P~@!A_;PE4k(0-zR(&e8;ypiEkb= zgkKZ@68#X}9%)AJgytLgxe%~hKcTI#6|mhUSj^k-1bwwpP7^ci+8ZczDrA@0Eq|>E zr(DhU8kt;?sDv8lvcg4`MHJ{Kidg@qDsvJ5!TeJbj5e6S7-cmLHR^+qndprW?LP&e zrxqA&oi2v7%6b0dvV8SYc*q~tZ9;aSSQ@x5P^oScKH(}UD&UjnRB*G}_O|BC)=y!+ zHInCr&KwQodAHqm@|0tXf?1>5tC{zVtZrEZh=iNC~e$T72Mf z_1-0mKg6BhONB0u>PId)JGJRm;Gc)qhkM zHNKjIwmOH1E4*Y1{X!3{yn;94H)oxOeWsP8P-LR7`uO>*%QZDuF5X(IFjvV^ zqM#a{AkG_y@ct#>#|qezR>;nH*qLF)yi)z3hnuZFq$N{qZRRzp?Jo?Q{kbVAc~^z2 zPpze?*7)7qd#WeHU5&XmE+YJ)z!ZpJ1IY;FDi8G25A)L=GY2#a&n#myk+m00q8rEq z=%#=CDMt*SqbW`3<`25r*CG$kP_<8;?|HkgU+GMBxi5HEg$2PtqVlMM>Q(`)*i&XS zW37e4-$dJA!Sa<%+jt>qwydz_yw7gcRVRIS9*flHIlzvO$H2<|?Dr=ac%)Gt;4OU$ z^;XO*x*q@@nvk8}Bbh$%v<5Rhyk71teJ_iZIh%0!RR_23){=DM7q0iML4ztb5>K;aO8tCrEE@zJ2zN_$hN<@kuZ(>%f)W z5LuE$otuzkZZL4^st7kDjEz%Fvbr=mFkhRRF`yk_98~n-(WJM8VbiO-z(V2Me8C)47TzJ?$K?@t10WGDu`-RPr;aw1+LX z1ZZ>7rGw(wWM{09segRGHfP(j@CDIK^ zUOsqm7@js+dEH8HA6%Mg2rK##ddQu~d~+wssAo3xC8JUrGN~dEv{i=ILTI>XOaAD5`Iv+1Lrct&t4+I}dMo1Yv~b}bf^}GsmRpGh+|=Oh zSWQ=N8TA#0;Fx1---T8uU#o|e`>@(LPe|^R2vV8w;%^#YHl<1=gXTy89Ug7DWl)%Q zLVfO%UCJ+Np}hO=Qi^JHn%jIF0_sc{8tWFqi@>kG2E3nvXLiV=fGtP>rPA{ucK;@| z#Sa{he-v}q&t5pvsgJn)vihW~*|hzDp#3fJ+gq~k)os+3tzdpuVCD)~_kqJZOW=nV z_t9)P3Y%2y!Uk~NKQ1K#NZ(@a_ymrP8m6&_Ed^}Yz5ZkVNL`5jKdzI5#jO1QoBvkG z-2-wRiR%snOyGdL2axLDgP({?jv$&#M@{iAon&!iuXD1I-QU$}VhucLa`u)3R$mm% zKYpxnx5)0{Pa#Zv{3)IYiD2k=@o`u~I*1Jz0A9}hA&tI#4X==IB$f4y0l=G=M&U-tpfLd22Q?CZnO@WK$&uEO5_qFx;P3I3B|%&9 zv^y%J7PEZS4(=zDQ{Gf@K?Dk0L(o>D ztL2UQ{eL{h@WL^U6DEdPT&}eh&Zaic`8vKRe(x)vt$qx|p*DpcfLT-oQW(d1RZ24$G4iEWynq^?H)_MQIDU(x{?qTaYw)kzQ0| zj#xiiWB*cX?wDV$ME>$(v6GXOt#ZabJ3Dpw`-tta5bnGHyY(V4h(E)#1(E~FPskRy zX4*Gh3{GQ_?>m0)3HrJw)~c{|(%S*K5qeGJ%h&H^zQgm*T$^pw|9|~J|Ar9c;OjW9 z|M^E{-2!d_E-n@#YvFX*P7Iq?NIYbE%CDhlYRQFlwLWg(Vt>knGAUy&+10+^Ib!?W z-I_WCZr%`1YT$z9 zq1am8#wDaEqyita=q>+RHN&oO9 zXzKf@p(yWW(CSqehl%D2$L}YPJqwvhB-@)`uzQvu(!vF06f73AE7?o_yEXw39Ei)1 z_ZcYD9Y3Q=J=vlvR(*MWm5Htx(GY@TAA3Az?qkyS>dHr;(NkB*uavLN>xr_);RUK> zsL*B%jR?*Hg?ycToe?;kV6*kPmnaFOA&c-5;=jQaI>E7fe>@i7wt3(Y~L)Hm0g4(-{- zZI1}?atLf6I;cWDXy6E#rd4g4A<3IA`v->XMpg=*tNFgh?%Fx*V;+ERPLR57dXe)1 zqV@v?7`+t8czOw*rKCwJMe(dVUu==x8@o zB&TfIUnz8W{8Vqr>+-Vl*u6D154YVh*?sM>fUA zzi|NJQ)2;SLlLJGLYTrYQl>PdeD34p{*3%7u4JgqrhYw2mVIsODp9Qyb@bY?E2VA5 zD$Fr(G_M=!LgWGr2!!AAYt66cn(!WGxQ0mC>_339dg+f#WHBW2*Po>5PzXc3RPw>Zt+b>!I0 z{dG(@ap`g#7if?Slz4oeTWP%+{D%*PB`3C=H@nYuoT$sGAi1wZV`_94$sjf~sy{Wj{H7q}UTbx%XV{%w z%iX$Vttq)Ln}3^XU0Y5N&eV}w!q}WQM)fFzP6&ul9tVuJ*&L>FIg51#%g?Y+`U{E7 zUk*;Q9bXvUz%m^*+>V|&Mc~^qq*3fSB9B( z;AC@wldVIdj^klvbGF#M)-;e-XrNm_VV`2mW~ruKrNm{*;1}m5;l+}vxZ~Gx&r?%p zov`m;CSyq7z|wTBf&Pq$5U_G1{Be_%UGEz-y?cLQ_}Q33O)Jn|9HXG3GXG*W??XkJ zGQP{ufp#Qa`QEa@a0j|#n7tC?iA(lRZnuE+p9*F=Irw$xLdT%<_E>-{1W__kBIr zbziUNdhYvm|1n;+ne#l4<8yr0_vigdMk}52)p@3$+e;exz1+G~oqRcKF6y_{g;F-$zy?yGq&{nHFHF45-uTQ56| z-4h4iUwRC{pqdM(+F>5%8P(CUyWyhHJF<@<@}qw4*4+D%*KK`U{Q)xPN!|rtlER~p z;Z_wp*pk%2hfFCbn#Mu>6Qz;$j9vU1FEPtJvl*a_u0+8+M)IjjC?@ApT)Q>7IXtpuXH z6-21EY$0I1qEP^wHBR`OqZ=WQ>gl;#_v21Li*HMIAe^DQUr~?J!`#TxTSrX37Uyh@ z(1%>8^CV(Nq#_AiV_@G1C{aA{Q^1xiX9e>C{^J6VcQX0&`&YmHF`d}3V1P5xRtPGW zNP=I_1W^nva|Jl${I&Au18!;GWS-F14Y1QJxufg$tw%LOZ}^x`sKV~4WYkOQJ-sH^NJ=sFB_m-*-Z{n86frSDFo&;K^ zH9djF*gcIDVhTbJsDido6O~9VPPv<%8EDikc-*i4vF3P?$dsSuw5)}i@`J4w2g5}? zFPxM|uwu$wW4J|_C6hNP8gnKh(X&Q14Id(H+X%QMgP3CORCBKk!d%fKJGpcU>#MJC zP;a&WMdB=x;>|dK{sOCqwT16QT=dt}T3oWJ)*IckDuPb4CoK*J?oe#tGQN-tMhsj~Uk=x^)qhM(H3PT}wMT=eIH zTEWH)vcD3mF_diJ8gj}Ym}09PM=vuCsI$4nD7`kiv2$Xr`)IwNGO5Uky&H(xxGs*P5ARiDVv%yT?j7#p04 zoxK6HMH#rwR3P>hpmxC&hs|rhWmueg6NnLx2*6q2G6+oJeibgu$l{P*sLe4H)bQ5+ z+I($VbM8eJZuvae!E<-ptDcdI$^pnIg{il7PK%;=k^*Xm+@ zv|l>d==D4{Qrx4l1fRUa7QVxZfs?TKTAf&UAnrlSaVRC2jO4QoRxXptD=hB!)tzzg z4br@$WbC7>tAG1_`=^f$xMf{%#oJ(#0}(P*rSITAQ_y2*B^Hit2vzqG>=TPbSx;K;5auGKwd)8l_fw1NrDPBf^~2>Q zzqr?3dP(^WUF0K}Cna4e-SywBzEwdK{23yF2nG=_#Y)H+ic1B1V)h`QjPqyOkYN%t zsP@HiVXJfQ+$~MbffFfvmxI(ET6a00d)*$4H=X$HOADLh1!>we!1s_AgO$;j1^x-5 zQ*t2SO+d~=^T2!LTV3|%eB76>J&p1`5ETdz6(aLGWDHZv zXYr}mA8PS?K~$9$e2!K~k(c_sAuE6R53bM@M9e%ulH1fGPEoVLW>Ap_>V9B< zZh4sfsAgQrI~&x&D;~{sF{_2vgRIWKtOSZTmGq7pu$+K85d!Zu0sBGZ0R!wX)e2aK z8k2i8^rcG&A&4^bJIwg^SNr?Jp1q65uGdG;p2(->TOKi9Y+3wYjDYGfd}vN495P`T zLkPO($e~E@_7+e!&yIbSc1iTxiE*s=&8(#Q1BWaQW+?4CspxbQ`(%#>;Z45C_aF54 zK>!rI*mPWD(Lexee;KCsPw~8RGd3z&Xn9?sB(Vubn zx9VzKpU@^u9>{`W7adWBA8SRtZ0#AKHuF4ph<7D6X z9t6Iy5)I3gKF#1+H8ad~r6Rjvvi>=8Rn=gs*88eP)KH_+of>s>+({4l%F)aLjGsO* z!f)SV+VWsj8;{d-lDX9G+4UYtx#AM5kKga?;`Vr*v|Cl}p?4C{06JU2sDV z{w&-Z4jsa-bumQHT`)2HRc!36Dh*vu!X&+YTU@+7u<&A{4o{$Klzfx5itgk4`s{)= zQ9SAx@9tp4`w$1-jar~ZK!zyhI{q5yMCqYe6|l#NT&Aw`GcKL?B|a|HeynT4l-PUq zMsiKJ{H`A<|E{E%@!|2+`QvgfzilmmU2l)L1>S!iPkISgz~6w2qsLpc!7&L$h(h(9 z*`9}+>#9aWIxB7a`(r|uL2|eEKyvtA@lT(nQf5}S@8&7zNj-q}ygq+I*w%sra}-g9 zV9GoIH?_)20R?^?9zb>=#Pp>}IZQBx-&&MTy07GU>Op zx03M%ZfaZ`gY)rBF?$lcXB|?N1AYKZ`bG|=K1z!Te+300%YHmCz*H>>-y9!Y{MgjC zc(gX{Gu)4Gd(wFxb_ht6Hk<_J^-VDM*^+-Gp!L{a%#>+awWX+RD|S~ya%Xmyk>N7( zhn3mV_;~kN%+EwiThT|fZ|He&{&In@4RVcxts;;HyfsCZ9?mK>!0h#J)Q-oUCabAX zXS!-@M?6j;gKw5n?gWLzoygX<=9B#k@r>gXO=pr9aQuOQm#>y##H4^m2RVYP_3 zeDHHAx{na{uxv4#q329%Nv}Xu;7ZOU^o!(SU6{$Xm*;?qd~R1XE;##k3&#>!vI@phr+>l7w9QXu*XcTCbnKyBIir{VJqMC!n%N?sZH`C zuf#dGfXl^W#~x%GjDhA%ktBSr;N?%I2KlHnkdH?d*>Kdrq$x06M(*14gEd|x*c0O) z>spwirzj&ZZtw_m7b^|DHvr3txt=EIiaOnX|0LY+eU)SM`esr3WKBlj-yA(kkG{$3SY@d zC`vG2poc)jbiJM_mP!86DTtQ?Jhk>oXc>1zpKqpRRfZtTSbpZxw<}gZx<8^X<(xED z#zwz@7mNc~0GJ74j$8OHPt zg0q~2L#rrDkQoY{n8OgnNSH?;!8Pul8!6QzBl&8Mv8DOk`$Ou)#rp67HKvuGTi|yRct}pxD?I5l7E;yeTo<2(3Q6(F&8}2}N%yy$E4m$}0pmZ058lQG zOs{R~teva~CzUXzfeXAFqI%Z)mPZ6YG4pWQ*Ja5{Sw}urJoUTH>-5-4JeyMlqaA{@ zlwhTS+=DXX1bX0aj$Ig^^;oyCHj$~0_C`!h^WoP#46~Od77N|d?2XEWqVz!>#jLxg z8G*k_*a9F9;n@Z<0-$=Nx3gyTVD481(Um%w@~STNi$_?8)KRx@9&I9`T7CB1ow{o< zb$pf*f9y=HlgKw?H7*dE@B`0~XA|9rrP_$3Y;X1qK42fAArk(8EJJ3;uZ{}a>*KwE z9a4(>X%OWaMlZCH!CDGr2^piOUT3CJRfSUG`KNax)2X8`@)&{2BG#=Bm>7E_L0QK8kk^Qx&oS3w4Rz z%HB&`s82LgkFcc0Khh6$y@XhMKo2^u>a4hV$4)%x@aduELb3TmEsQU)pTCyypgW5x zwx@@lo<@I4ZTUF=anH}`&H%ElotS;gh}RHBDfRo6o**HsbGqfFdqo|;Xe;g~<^?W; zrZq4j$({ictZh1IgIUn&D;ENjSoPGj+ar9PuO`_fK(pAk`cRi>j{CDdLvNG8C(cff zZo#$C+Y@ternozRD6^u0RFq&~BY>p70NL!~N7f$+swre8zBv_iYn=YN&sVuN7yIIS zG$6~X2Scl;5SPKqq+!}lN60gbQM_l_pQ~s;62oRr`Sk_lz@Paw`M_uDYF#v5`1#9Q zv-Qz1tMwjZX9hBCiM9+uMC$Y4E zd;VAliY{UE`~J;wdb1074a+cQNg?|HaFe55F<)6U7qFj=z(GE|y3GRr&VvT`*9-^K z?td_ZnBb4m7?ffs+pv*}?3N~d7)R>hZo(WNlrujTj9afM#`6BmHuKR9Tb>4arf5)j zslmU;GR5HZmm`=!kq4gYv{@Yag!-3XeX^1FLAQNgzwg?0Rb{Q3)c_-IxXwBUZzXQ& z*1swcTSoNntW-eUL<9h<`eclc&Z96w-t#$}v_+r<2QNlZ_5jZykY{ zMwPAGJbU$#f*)Ik zyDYlwOu6J+W$0qs4n`cEhY6qu?h_MhKbRsz1HvNM%8j)RsM%g6>rRAkO^v{Z^y&{s zm!mcQfHS_8N$+Dl0mLic5Y?rPRy zujj4v1DwjHJa+~1BVdf3>O~HbY6p)s)32rh9;kh&dh$8Li%1!}Kb?{?A(Klg?w2z_ z>dpNj?+3*scEeJ>5L^i5-H|6ioyC&Yc_-bM@1M^Ml6d*_X;Gk%B%P{eO$Pa`3!?wZ zZ(Ujtci^4J_8_4{ur(~>6u6vqvog>_r)kAa{%T99=&e z^;4?vUP0cGurv8T6I-tQUNL)95M>6Ow&+lBt8nyA5U2|xlM`_IMM&_?sxdCUW_r45 z>1XJ2e%_y=kC>w!2F;Q~6yDffM;}ZWY=L-zTNAi#ckm<-uQxL?s&wGAvZe3=Xc5g4L zqyr~WQ9ymOb?w8(p)8GrZ0JmaE04MEt~*;Apq)g_cOIU^)}@7|t!b*d zW?MF75W2sR4(?WNi`F^*_4lFUhg2s(mKN{=aG(#f+pyGA*h$bb)ywb#JtIS`7f7ZR zPwl)rXcfsm)0orG*wa4Z*c|t#psK#UD%v_js@|G(>3O@wugB;4wq@A@M&O2Duz;o^ zz4hd%qarDGbK}_;MnLi?A*9~j!_R*ALY8cFs!$N~!r6>7_5;;_?M7#U)$E4H;Yq#3 zNy&~*5dC+-#F_WrEL*q5zOZbcHRoiM8%-f%Miwm0p5a!I^wT>b6(CHSv?9HXU;D$* zM0pY0XQf7F7JnV`%X0mx+`aW9$9&jMgS6{sCxz2e!bzmWKVPJKeGjH>FWeUY15EHzCPm9RBgOJ@a3`Plmv^%A6jGUMSf%dykHTt@IL+pQ3A|6U_F7+ zX#-{sChumGdq0}?zo@WdSHN|%|wqGe1vtZza^{`&v$IJA@BWgUFU16(E8~;8TKMg z=fn|<$Uhbw5n~Jm(4Q>=_p+bLZX;G<+mk<h2cLVY%{{8S z%$ghxcNqkJ^#uf^9Jb~ME^7^idXq#59Ua4-0|hm>qClCD?h(fk&#PR|R5;#VYMl{( zt8~uU#@yMOB>_%;GB|`W%9v|#%6g4a7hVAa4BFra85EZNH%DYPl5LJ#S_eKDScH03 z3Z9gW-J^q+XZOy^MV|PRxX;7o5AfX4N+T>#gR`66#((@~0@EaA@++(0Vfr zDTfHMKY{Cs2>#kMQKovP@<^EOs@vW}bz7PH|~g&!JNP-{Ko%;5sc2GnzS z$jF4vzX4XGhxP&LA%%O`_MlHB-X#RKsQUXw!cC7>#R0&8@GXBSp&Er4(L{6M6#n34bdecztU)WOLmQK+;G@9ZSG!d^i(^YxZtTyX1h(+* zpD7L2coG=Y=e!=X2)Pk@+TD?Y0dI>2g678KoJOyvT0OS?Ya&21E$sPz*|@u!O-|9G z!3Vr=k|@3f62U6H@{I#{O7O71s39>)mN)J_95HX zXKI7D=Yf_K!L*$P`fX;X;?5MG?-_}>0cHD^LMA;EHsWExg^IhSSfBOhnnyaTl)o^C zU*L`4BH#GMS56Ji4se|o8BE(#EY%(%1m(&5k?lJXa;SoKexNt~in8vC_7xUb)23^l z{rN@ETe86TI63e>LJxvz^Z)^7Px={8<)w#H|KMVoigW$JLDxzMT$NJq^z`=qvR<>b zQOpYSa!ph9lf0iD6%hs$8LW{Qn$f)I6 zL?#`1Ksfp!?9*N$Zjwt8ki}1`J#a`*md#&F6X~k=RDn*B*Y`8cY8xUu&PJ1kE&DiG zlPRNi!D-vertw=Y6L64OkeLS(KhAn+c6b>he>V`bFT&&1`Fc0sp2QzW(3tf4N^Di`h-3a5sVN;A-fL^%gUBJ zwr{>C*n|szIHY4{k{kHud7)#?F^k9tP8p^77wW)T-h+F+1HCLSdzkyW;rmb{YzbCR zf>omX1*1Cn^r1E5mLWhaHn_md4`LVw6HO=9g~d64ZLYa9lp~B-v2dUa`Ov*sKBR#x z=n+GykK5YHrg&N}eoR+-x~Sq3r{=91692Qia6h@l0w{Wr2ZX_|r$HJ$4^0mO5Wejd znqgufsi{AAS`tJP6cjQ7 z%1nf7V2;`h{^kfuOPs*VL>Pvq z#hM)&lj}#fNE(9f(sNTYdQ<&^rPkkW9jQ6nquF;ER8BHLWbB6bMnd`+HvdzG7SnR1 zMI+*L1C6&U($Oa4IOQhOLX*F!h<3y`y=%ejnTCGovC4I6&Y$j{{XV&Z&paHmG&k@hLf0;{yqr@sn4Qi zug%}?xM5TvNZsol_KmW0khZbb_VR%E}QSfSFe0c;I>9of^IZ)W_t9-&o*XPf&GpxMaY~-q&_I zO`q5}Rqq5++G7(T*tcypF2H2KZiVr*>||$8OtASYD^`p@qqz}$mjR#QX?=}%kumns z8mR9Z=XgX+%g(wsr^Hr2NlZI%bRblR$;`VIcK*rYM6BT!82%$f(_nk5%*sw{Iz{Md)Kg zOJ!zR>WtE(cj{8By*+I%} zpnIb_(aU_n*{RZ5_I;_|*F8_C!Pce1_rggDh^qL`H0UsU$0+$s)c(yOX+@K=%KG9T zGO4{baYoBKfPQS#{N_NnX=P?|`o?{ctd^#K00LNIwFpx@lVmWsMq@bnr<5+0A8D@s zP?x#3*%-Q_c<2hDk8|qKyZ2j9(;uj>^jYI-fN7f70Z~niDu9#9ipXfctB7!*ExvlC z^q?Et*e?B?mwKX|K#KYLG;xoy7v6!nWA;d(%4{afo`4DA1J@rOChPF7Aoxj@F&1BI znwqW^_R=NOA7*P96xCns*El<%j|~u_3`#I|3$w>(dJ@0eV0~(kp7Qk| zP3NJhjX+np_fen0thV`(5OrFwY-ywM-o92*&kay;*uP!Rls?YjoI$m&xLI|<6$t(} z87VP_SglrR#=Bs;Gl%zsXEyR12owaOPYEr+SMnc6ScwQ?<6YBxdy z8%J&u_Sdd(BRoMcE8Zk)4MfY7B+TYcPM%j2|S%O0Kb%A*6o}+e*p2zod404f+P== zZ>BIsRX&Dk{oeMAH_$lt8(MssjL(#xi5=;Fop?~9{lJ{F69@b)NH)>{)fFc=LG~Q+#*pz4JuX&uTBDM)CrYq-s6Ln>O{>as@x7J8CPp>sFE^e&``~I~SM!jns(Hy$i z1Ps|fM>XJ|qiXWcQLP;m^Uul?Y;I_&LJD6YOk4l*IMm(l7#a{F5EbL^P;z1bT{d)5 z&ZPsvxkMB+llO-SMI8IpTLRs&s~x7e-Ilo&_eD8T@Tq`c(1i?fq2Ie&?y_n@QEkeP zEg(j{@q^??ai4QX?(_e3-PSej=XwYu`iN@H2SlDUZ|RF8TUT`Vm8`C)aX}wP!Q%C% zO`6SjQo>(HT&B&`rCv$*2WW*~=cV8w_HA>-l(bpM^Q8|Uz06e$xIb$pxnjdUGlhkZ7zJwPez;z`8_sttT{oh|T@I&Vi+ zrJO4Ew7}%+rxmOcO#6dM1G*zyZgl?4upH{GJl&&tKAt0LKMyR1$d4!WBZ2!CV;Y7e zBNJOr(6Em}l3gFhSl`pS>Gkpy`S?OKO#iJM>~I0nT!SGAE+&q@z78G0t-%>8Y@!Po z^oDydp>SY`d0Oj9PAER!J8vMu#P24(lE6o*w>5Za-XlX9=!ls5VkJRKV@fKr1-OAk z#nEsS#Uk*qhcKk#*-y#8zE zc2TD6!Rty~w92zt~t222uBxbIbaEED9d;B2m0ypL? z1{5bj6ukxtGQ0DjfN&CSs_A?~!+7mjJR_{rttdLk>Y~5VO(7@E;4`t)J{JzX`Eq`- zf3Jukc#;ed#{Vk}SBH0ta7Nr@<%bd_mW^>8A08Qpc*2@$>g%7M>xZw8lfNvaeg3c| zeT>P+dIW0y%v(?cxZp|F6SN+t?VKs7MsPdPtP)IJc{_E42TBHlN(eJk6^{mryA~{3 zq^iQB1z2lWwC&G@;HG5M@`aG1OLy~BslYFZ!%Ynfo6ouj$e8l*t3NTh&G$t_&YOT& zgOi>RsTPA%fOw1*H2)4URpLoI{{g?(-7d(Ay)W>cZKmjOS8H-FL+fSnH`eRTpTrm1 zM`N%PN&gEm?p=_Cgn{u!Q}JwmRg%vB2xHbu0M1;cD;PCJZ#bB#eW-2xYn)bdHl39- zJZ3K8{Xw@bGPny}ap5Ga4?)>M6hr~xI*Rv)DG-?!kn0;%=A3hPq2hpH?kjttZR@Pl zA8PJy9<`kIEtASqDB6|2vw;3Y)H3!DyKZgDzdNE9SK_G3RtQr87uVRYUvL65OFanX}!|$k+ z+DrHma*Gb@IVfCs)Or2B4OrwgRf z@9R($@DdTuWvFKX8z|c~JYizQb4%gIN-h(BeEK>ix#^Z(jbA?QXY~Jk%Qcuk@nG0x zDp8iL3VMUh3{gn3{iE8!z02dXA`>cc6BkESo1%|Pxs8nK7FG0>NhC${oKyy^gE5_WZuWp~p2uBs&6cVb64)lr=(D((UA(w#wLq-5RIU4~IayKwjD~scUU2GX+ z-OG0OZq+-KOxN1l+VCf~Tw;w5KBDdsxzP_amMD4>1L8g;uy7(BfEFg1p4?oAr-dbG z8$4TAt*N)Qetxr7HrKxrfbYr2p2&roe&t>LBf%75f5OO^9le32qO3YaVjHuQ4RX_j zPDPTH)2y@DVed~VJaGk3gkN#v->;!gXfR0myZ%3qd;kCWI2^1S1^5FP(nCZYtov_{ z=|MJs3>EZG6|f4{wQx)ivS7+}lU|C}@UV)zmgOQr(_+8Chg4-=d3yuqm;ch^fcZ5Gj0cqfNMyBqr%_gPb*5XT-8(o;Hik(5FwQQ^e4YLM<0uFnlgB*-= z&s%?_o#kNHsFh(vJni2sNgBuY^4L(w>FnJh$A$mrg$~5D zc{44@B0H%ib&Nm2KV00NKQ~&|%wN_IN>Li8O||PNaaQKi)@!H4Ikz1xFo&^JKV0V^ zF!6v_FPp6irXK+0#0^g+d<@z0ZIJwzx1n0!dj*yW-h5*V*RMW!p5X8ozgaEI^(!af zJN}=)FuT!m&+%ghSdrJs{I=JHO{$@gQ||?swi1JDD#N_DTHnGTPPLDRo;D zKO)T6WK|b|$o?6!z!+kvp*?9}ROd8cNU1`8RCHL3<3EMsE4kzEJFIdt!R7 zJm#C9#Gf9f>;f4ljv})8h*X^}PyNw~+Q^F=)=dHHI@iM!7Y?x(PTiBsy2-(-nx8K+ zb@o5A4H}Rl*uvR}ThI^*3mu=PyHG`9Og+j2WF!lNsef+$p4Ql+P*mO1_I|Ta%McGu z=}c#)N}pdK(Yi37k!FT5EZJ=^9yC0{Ax8rX(nM|dRccFZg#v|; zVc`LCRZmmUP8UD_Rrp{y%B=st3pN`_3n>aA+oF#eWbZk_dS@mNb#xKN%jXz^7E_6-1GmOSZg_^B*b+RHoLH61^(YJ3D1>F(c?Cmr3V$M63k4% z=8T}a3{Hb?#B4G=ygUFQRrBAXR!%216>K|(c)YHx*H=8+WyWl_|xx29vU7t%XPiWuNHvGKh z!^8JP`&-a)16Ig?BBcM30Aox+3nZQ??nejN%V=M8M~fI>y-_1hF1H*UZw~g&|7I6? zJ^L;Fc$v=6{pvQq;&hO(fm9x=`>c!L|3jI6G$NRNvO7W=V$sa~D?YWn7#@}il zEj#*ghEjC9?~=jR1>2+Tsk~=77~216OpPt6F%0QFybh4Pg@^-i`eg$T1GBx!&+-jV z+b2_Qmd~Y^%6-Gm`cYI(y%)CbJM0L1L?93q5y+W_na^H3O7kYIxAGGvx@VQC-GwFo zw_guVCL!!aFDJYZUAR`c;p2rj!;^@JDK7dPVDtC#K+cI!HsAJ_qPl-{OA%ha$h>Bx;~n{JK%RytTeSGxFhY!a)(Q9v~5T zL9UWw!LXQhM?g)TQZk>zG70Xli_X=#r3}{? zbl`tmbLRi;>0$o<`tQ9=4Q8Pk7kJQMIH_!wAHst{GXan+87CEC2c^+;JlL(&hxcPg zw}*x<%H|fDOuBs@K6Ku(|BQ+0hxto)8Oey1ABaT?aLfxL4p4hi01pUEu0=hNp6+o4 z(yfK=P ztD^>f3SkiUU0H2>XcSF8?7Yit)j8v$+2GzJ@8|LtEJt#f@1auwz8elFEh1!~w0T5_ zeFBcEN9qRErO1~b;|lMU1hm}XAo^KUcW!R-rIrJr&LgphQ=Q)1;vnv;4~0u!2JP@n z*MPas8NYC{)-HL6-_!ouSr%|;?_&YBFP`)ScnfW#jecvgq-((L5~@ewb9VNuLha~v zkRxx|RM{qZNHOjRmLCAgLg%<^R0H~ktbinDIBoEo=qNyS&PkiJ zYbe`tOs@CVyI+_?$1;gI#Vf^s#-sWPp<>KY><09fhd5(j6JlA(4)5qw&G$VRC zctR3b-fDcURLo7_pTZJ0@v>&3s0#B{Vfw4!w^J*U<6W1?e9A4t!6h^86&3HKeF;rXp)Vw8CF1Yyl-y8y~9gf{z4h1&aHxK5x^1-^4QUR3%K=YL8rMEo*GJ|bkI8H zT-IEuV)Mu~Kbm5vf?zGJrED_*GdFNH($IEVFqu0~m~v~$^+;Qk(gVb3HkcQq6%Hba z%93__NmMC4$ku7hR4RXF*5g}4R=|&(ZmA3#`6^dHp4yXPw=?vXdM^P&;oqmNQDE)x zwYfQ`i4r@x00kF1s?wd~faI+G&xePO6dZ+pl9EN?R*7`(zd16+A)v>+VDa$ZL~f3q zNqBdVlw8IYu-#Ng#8gu&hQmw+mD`ph+2H4^^18&I@Ve{i>v_X>3z?=zjeE2mznhFM zpq4!uV9TP}V*t&+zE%#7Ox-(%UW7SKs)JGP?J?%n0&v7nwr^&Ob|FPA(#vNA)ibPl zqXIWD>^&p+D!{og$Iw|0IFy?Z^3l*Gt+6PlKNfC&A!^%Y6h@8UW#;~9wP)-P#j;cY z*^_Ay(;Hk3;21iMy9PU9Q;u$9XqgD9ugs4cc3E~_L0tAtfuX-MwZG+Kg4tU&pg(+HQ*ZtQ)4<{oL~YdoC__1Iy@(zS&Cx{*Jhnw3qM)%FKgUe_9jZckur@Kqm-R;}(P-%N`N8joTJoM9CAj}>O|tC0eRwcN(l_qY*=a3u zt%46J&SZ0U8UqR8^5JdZgm$(_LSiLdCH1k*2k!;Wt;z;h9TQH;R4%gSwXjQgx5SNk z8qeH&eHu{EN-?#B9oc+?f!=y)YyQobH{p}vqp;fC19-ysBaX?BwoK)OXC$NGuJ`~P zqzq%q?Vw_^p};UkAXA1_7>a;Rp=|cN#s8=0|tzFiUv!L@Sw?d8pov+*_PkPn6(1FtKhpt)vOW9O)9}lg0qJomK!w2;yu&z;%3C?Gc_00h%y-qBDHvQaz25VRH7Wi3 z(>h|th|*FDR0p!_`(J!!{1a`w#b#{=92gMtQ0;KjCiDd$DvOvlVJ3zI=YSSWMHuEw zGB5G*ypIm%c__itPrP37$bNrXNyh;99I5(rzYyt#dL1wA2ZjL-p6yPsv7xhOcZc>i zm!8TvV{LXY#qZ+avyg-LODJr_^oPo>`fWTNKDL1vv1dx}wrA@3_4uxt+P)43faEHd z&&Cr>oH`by5|Mv%yfFvC(+?mePgu_Yu%9@kO1Dm%gHedI1As4?yed_nQXy}?;W+SO z+Vok(>i1M+W?)3q-A6P;@o$jBYX^jsKiGn{jL?XCtY#~wXb+oT%AgCI;Qi4~)n!88 zCgS78RYxb?Z*J4y7A#&!o)saKa9`0!ZWJgYX(FNkP72l}n70iP4e|19sl2uyT++E!75C9EPWD944l^bJ%v`lmwWYrC?)}cf! zhtaI&I*)p4mmRWo4Yf105z_XnPr0x!-1V-~qoh}ziBE<0*SCV3yaIR*MJTnjKsxX- z|@oPki9e!66QR1EA*yRUf=j#^{jL^wXV-wE-qHG^VXC2KW$WdTX0F# zGD6m%gK^wF!nh7f?G06^1WpGbAhG<3*ue&OY}JF{G*i_arTNc-O^aO^esIZ~PoH8R zVMucb0&f>q;?-&LI}g2XqpxLsf);P=$_YKUKiOOHYrgF4OMJou2}8>!=zrBmgx(pyrQ`it;WPCa@0CXcd2#HWs|lS9>TIyd46 z*Be(i6qqLFV~dGSSD8+e=usX%^GXU`IM=#e81eDE?}xj`7OJss9KVRufUF6mz`0!@ zQSfyf8lsBPZE4tqX8SjRIis_p)c9DO&SLX$8O}ht-rBeR{quo08He2aW()MM&u*RU zL)SxzVB@#I%6DcH_pD?RW+|8ynC22sTf*-4*()sN?6MLuDpW zob4M|($;e422?%4(u|k}+$lYyY<#_%Z2N(8Nm)iSJtpny4~2N%T1)|8t5rf!Z(x}c z)$}-4ON#_*lYX039(ug_^t?`NcX!~4Tg&D%vjN#?A5V{={knEYrDMA9zpWlx??3F# zj0Lv^Fq1%!97e8r<01^O!1B=Ew&-+`OD3=#XG2r0F0ntSj+L>8Ka3tq>)SZf66BJdZjLdEf)e%XEQ)4^JL`-r$=q?TR&|$*#0AvP?^AKU(p=ECjEpgG`vH8~j z0leD*HI~;{l8QD47e0^iuYuBlcgq&NU{L1b#(w*@qbT{JwsJ%0=%LhOPql_(jQ0go z5D$z?DBuimgI5elN8Z3lFpmPs42MeRNrSC}+~+@Sr+lPzXOstN9IMZC$eS?Io4Hcv zn>DWUt&>OgRXV;5O!~M~LLN|L^Sz`EueJrW(3-h|g`$GliTVNZLrg8gX+&E#bMH2h zy+^N#_0)i|4k;-YzW~6KTc#Io8yp7Uh=*YV#(i#ttt3qC{wr5cUTuXn{aE;3%MlsQ zILGeCQ2(-G%FYMAMo;A_K-2eVj&tMB?;#cHZ-r`5PTM69q&s_$>N7@ub(p{o-#TD2 zlq);$^_*SAbOhZ?K#)C%kYeg{+6G;eMoci<6LDtjK&Hd+wB@+U`|u#7o$e{K=iyq_ zXMF0O#;8|wMh)3lr<821LNsb1Tf~g@nk|gVCh>Mh3Z5Nf9wt-Fg7loKHzMSva)XR1 zR~3#+eBHccDO9wJ?+_jdCrye>!sx*5iVj8huuULEh94>@Lg(Ha=;NQ4>SLj@b|>}4 zT&GVfas}vr!s;t7C@DZ)9B5wvgvU_b>{F!xa{|*5sNOp85znA_CoFjnHZoi_{trUp zN^@fxpvBoMl`%iSB1^oT9+eU_P3#02Pzm5w2=PY%!1PtkDE zEwb){_9>UPO9-Z36X%M_8}^zV=QF0GEQ-#ayZPej2kn6eN?uVWgU~S@Ha~ErxewE< zTKkFM2v>{rQ0y%r6KegO#7uT_jTWNQxMZ4X-RR_U$J6%q<8G?QKv-biafL9%(JEQ~ zBz7pAMAU&)0hj?pe+${)qvg^MKoC*L4Cb$3m^yLgD^FADashjBR@P=QbB%t%RAkY; z(W2p#>aCGq4-iWr2Ic+>%yM#O4`b=J&A@sTHzlzFT#nQyvSqp1iDw)(!fXeQyy0TN zaQ}{%%QN2d9xaS*0|$4$CKe9(SKXrEu~_J6A^Rk;hRC{04P(c=b9n^#+8aRh1w;@5 z2Ra}X?==Xi!aJqW;!JI-d>2`E0Pc`&n^LB~npQ2#XzD-1xH1@aP{FRuuV27PV#_D% zBZe(>r>XWm^#=tW8GfikNMw#T{o9`{e7tURzL@Tof; z1j=VmpF2=ir2d(S@gbWU4^sz&M|bNM@M8uGZC~aZL{s}XpnCcS5HUcq4srsIiJa^B zTNn=Xm@zRq1S>FrRH@67`BK6?(CBgYv1ak?qRqj^hi8|nc)stFz2?w`xuMJ|!{5X~ z>VV}})*=!xu)t_&90{AMAe6M>@UfYiGP->(dqP3>dDmym(w9dTvseaE8Es!9A&{^B zU`qn5z9$5<81IRQK-NQMIL!!@^T~K2l;fOzT=~^81B}3Owq&}^-9sOe`_tIU)Gybk z4qNdX5VHh$7_BZpaZDMJ-BNI>m7s0l6IaE|F2YLHk-n#9)(29bb#Z{vxEA2qQX)=+ z<-cZ!nH%iEi4jpaGH-`)Fs0&;{H29;ou4%Wj-g#Is_hQz9^WUJAQgPf+9_NBY}H-R zxbIGegW9?vgq#~})e%QPwmn3#(C^U`7lv=;ec7J29ghCy(JM40kdfLye-1=hr>jR>GMA|GNzYI75<;(srN3Q|n$bB?{}Mt7A=8ErqC5<*9I zY*mRqMr0yhVW{D$CFD;SZ)A+IRtBas2_yY&;Om_dSg^@CC#k_9rmNi%hUz zdkbc0&^;=L9p_3>2x~KmHfp@Fo?G|L-i_P=<$N`p%*}_`+{FQo+X{PFwcy<0h}0kq z<@p@`Ap#5#7QX?LG807gA2Mn9BmL*H=a{sl`%6ReQe=T_vR$8V&0lX;CVK|uI{s*| z_pGsi*W{lzMWly7QuBx@Rq~~eq#`hl^-ayh?EW#;_l%(TwXt(E%GD*$VFL~K6kkky zx1vmdWn2dvRSj}dvz$XM((Fl3p^*r?A{54fJ@Vcf*L?Vzk_(SvLb`Gp?fAtHdcRS+ny}~L@y0sZsOSuQ2*A_Z%)QbunRk96^L+~3m~(`B%qaOAlc44KOf;v z{nC&>O;~CS3~a0@r)#7ivRS+rMzc(0ysEyvFFXXdw9-K2!+a`v{Wr%%5p0{IA}Ih&@#Pa?( zt}gDsxw@gi)gAA-V8=7kNoc$xy=&c&>Jk0;e{^-T{|~sjU>kXNN{m6^;c!#)^I#q% zDH~6<^OtgfM&=*Z$2;KbE%MfFjdRsq2?Rwn<<<|!MQP>zaVnNv+6wT`B9OlRPSL5D zSSxgaS*pMFo|h}diHrx)ao{re{VOyGaAS&Elswq&^zmQJ;5wQFl{y-By|ahz?jd^> z2|f3kP_(<)k56a9j(|euwNh|5O+;$&IW#XOm5LrSsz>ZI8>G({swNveY|!(@OpQb7 zBDvTnem70<3&n#u_#N;0n+~MqLg8XKLF-+B{Ur@jlM_?zwvYd7rkCd}>K*qC(@;!P z%CJ72qmYtS(Xn`eG1;bRWC>D`60B;rNXp+FSUZgkDgylM2~xYe=YdSwVcNpzU6Z zT*k_ysVrp9p>~E5?8o$q8Wm^7ggwxG;rT8e{x(66dNEgyD$+|eE%sA z!`cOltbj>-sMRbt0}o0UGA_LJr|t@5e14NHLU@-3fD0EXZn;^bWf3>qp%P>`Ar zLxdBFJ;YjC_{MSlbvrDs6D^#aGFs7k#NR#`qvElXMYuPhaH@vRAA3a6QIN!BC zO^qP>d3HI78%IaZ0%1GfEq^ys)>q9&AU1doD)fe9?fv)UHAc9* z=m)WuuO|x%U(NJhxl>`IDjMbNBC964^|MIiMKIfvP##Nd4zyk#p%FTeNz(8udaAhp zy6tc#rJ&^2T4<&wY3v~2%1~cCQT$fv58@87e+e=o!S*9W{P@7je~2IR#0orZium_L z9FXPM;(`C!`s*G_8Bb$kz>a;vOK;xB-3@lQHzwXg0m6rr{JvXlFccPgzHZ24{^-jG zuRjJ(3m1b*>iz8XLReT1xKD3r0E`Tj2citQhoN3(@w^c+0V%$qYpc+8foygoQ2G6k zZQsaWOMc~aeMAC{n59sqb|@W{g=bG3^|yE_1!e5GyOP&q zlaF?MhYx|a{1{$3F!w|hD0Bko%>rk3X)>-0RWqOeayo3jTdzsHC-U~ix)9QPnb%ru zAFDtEAQCMEyFS|L$S+t8ww5e7H9^@81GtBTrpQbS0;Eb#2r>=~DE_qW^#U#mqa?AN zVn>MTg*DA-SWaTV@WkE}&!?KtcIsHgwKQ%6BVGlIT!x~hP|N|Po)K#^1o*%8MVAX+ zyq>1~))z_&-6{W~_)Vy`q;Vz1!t^?~>li8s-*N;9P~j7Cdc3kf$Z^o)L|~Q=wU}y- zt{O!OQ7=_g)eU)94VUk=#O!%hIQ{pr<^0E<0*7GdZobZ4>MxPtRJw@*)v5N4LSw9l zETGEK2SNe_X;TIDR0YvJGZ255R=o1HX@B zDAIgUzQiPI4LBTQupz~5>>t4z=IwXi)m8gQIp;?hxA>2P1nGvL)%p#yEnoYgV>)Px z54g&`iEo>hp?NjxK=YKLmUm%7Mue)r=czKJ$6pRNbkvG)>gnid0)0+z-GJQ>nHs>6 zxRw=GbdO}AdkLsy&emqz66beny1aLUS651j!r^{BJ>mTqb_=rzimwPZ|iu7rv9 zta&i+kY^{#YXaPz+D6^fbrVMj8WPDpn1lI$w|?%8`HpZ}(Bkgtf<<0KIUG;i&=45< zaq?v`IddBI;o6|qZU)a@hTQ@k;W!4EYBw_7pVu!|6^#GBTh`QFTWzkiB!0@Q?vOUuCh7uuvLDcnVPG_PX`5#6Eyt3AiRCC~?Q|uI`k8h|!0e-`F0iQY=Q~ z(7o-8TO|J$vn&}PN)+_Qx2SLp*T)zaX)7$r59>$U*JCRRCj)Bzf(#qS)=xgXDV@d* zy>(4?%TM&MC3nY0{gasp3;kv0v>Q;gVo|dKtnX2|BgSU0)pH}?r!1h-ZU^drTl8M#sgp>H+i*T?rA4t@ed(}s@2;e{5NBd z3r8@Wjf7x@?>rxH=He6JKfnKOY$y;4edDJ}lYbY+;{Wgp0qsW~sE+{|3xHfNfwh-z`vWPRxgn*gJU+4|H4)t!~5!5^0cH6=)1@2rWv#7PwU z9_?Bio^QQ7zTM3Z0cHk9<$hpx2Ybu5> zm{qg%%QFRzH}o}1UVfu`m=hmgB1%KtXdL^w#FZ`t9``z69KjG{JWRmh5#W9nvjgg# zy;+gn?^bV53eEPSW=UoSTWo*g)-Afd)WGm#!*Y)^kddf??*fRv?@nNuwq*akd4C=J zRxXPQN*8q?B7MM9Gl1!L!qX@K#hM5H&JI$n?hByD~+vdfds%dY<%i^yXfv&4XG8jNGv_emX4(R2-AxKe{+E<{MpwCaS zB~mr<@Te2#QhXIV50G{@wscKvpueCeQ8bA9_s<|11$-3CVbeM=4uP-W<9r)WfrmLj z!sS>Lw1N>S>>$S4_=#2-@4uSrR8u!o;ZeFzOw)Ei=aNEl0>|Y}X_PLUrUtnJ+uEp( zJV7xX$>Wo-P$Hd5>17Wx^~iC$D48G9!-h5AYD@C_WJ&8W(@o>T0atDw_;M{NU0Av^ z`hE&OKDc@l1iZc=XgECA4E#uo2KkB_UXIJZ=O+k~WCK)Zr{x#twRQY@zS!uFMX3lJ zlUGhs_oWs5mB?~&D1!Y^UU*0WD0>(I26K)n6v3Wl^)teYd<{H~RoY@sRuV3a1jz+1 zTKq`lWev*Mw((hRCTG0aEOFnA7lUu{K+!T#pxvpATA73UOx>Xukt*Wv>b@ReC|VV` z_t}L1PD`cZ-86^CjZQsx!DXSi8)^%0(ZSK2azr=_5o=RpS>xCxXvf`6?J$tn zuNm=<)B)k5Lt~&tgj$>bSjGhk)v7tNqvqH3Bunic$unECl*}Om@U#DyiPs>CyUPd` z%>+IUO`)3c68_WSEyOV1MQ30xkiL|y_R1I~>j#*eo_%nKXbid_Gv8g3UL5sT!G>wxvGF)?(3W#%QFHuMtIyE+=Phe*3 z)R3F|NC>9l(nE2(BfaDVUAt+KoJUV4^KpP8n@q07FmAH?Vc{muS*|41&pOgRbBGyx z--q6<+qCQAh^FQr&Vd@1$eSI{_N7sdHrBc4*(T_}q8z1@NvQumzay?}xHy0Ox2r_T z5ZnTTW{u(Oo_nU~jiicJL}fanqtQ(F{v>kM*h1)>-OjL_^J9ANxhk_-AX>Hw&v%~s zwQX~kUQ^N%`&1atQ^xA`!ESJj?z>YwdyqXZPv?Vn{cb0r;B!e7m^|I9tDG99vI#rHr!-EO>N>= zu5z|`n~@US6+C6au0ST^ZzF5Dep(A|th=fRFAU#r(At&yuJ1VVdmBd*rlOf>GNk!E zDp+gWiG7zvhjb=CWQS)09{j7>wuZ~gQg6I!IPN2r;jhj2YQJ%8@u1~(tL@lJ6yxA= zeFWwu(1u;V(n75feZjHxhjo~p6~!XPQEfWU5yOLelDopZ%YrcmL*)|s8{pws%rN9;`}L>A#CM)bwrnA;ykXm++H`q8;q|p_aKpd2}dz8b2X3N@mmVX@mBj)W4@Jlkf|L{?zsB zgC*a@ApI)#M_w7;7YXF=7|P7vIRLrc4anznk(DFpGlQ#Tj`QoE`(BSKKHPq|Pf;nt z(@5aU;l^MhKqUNMpSH|_ML_QQ^b7xqT{zu_Z9v! z#5@RTD6uIC)$ZeCPV@|P%ajXM9_F`d0i=b!;2PVE;{yX&5EAdzpMo>xl{G6s(Os&u zw%(tegSh5z?TUK+{56ywpOZQBxrcCok(ZQ`&NIPHli03y3ub`oefXMlAfr))F6DJ+ z_MCX@RckiFX;~4Nn6)3nirf>57H@5^*iXL_yy>st)OT=p#NH( z%9WkqI6*6CAT3rlIvOQrtW+^0ccAP1`=PSy*Mp|>YU6q;CiS=rir$Z{Codbl^AtGx zi^_Nf)PRIRL@DrN8hkI{5%QG*05hZ*rp79`dr#zvWk4guu^7z?BnD5iLr&%%{pye{ zFjc3BJ$nJ=zy;OR=*uWj-+C9y*!dP7L^Dr`J?u1+GRU#jbhqQExkagp+U7JSs1pM_ z1-38A-!QN^y<{3v0_vw|<_$6KF0uA{G%!qBF-m}ez3|fRQ0JIy?X&D_nQn$<<%I={ zL#}7GKicGbXL7S%&$#CM?7R0h5*o=htvlPu#vKeZSiexuPVTXN7BP}yE{L{hSw5<9aW zWV$v~x;eM0NAF2Ui^+A1IRu5YS-hQufe<)X#vPnP--ocYo+k0q$)0xir7b(fDBAKp zs;f3Ro0Kw~Nas{*YZ5;uGBQKu%@3Ts)mc$;Xi-Ur81t(Z%?$TpWWH+7x@C-HN?MQR z9c1ujI97FRe}3Z!CI1hfXw2!;&7Jp5JpZf0{|+7|*LEhdwAc>3ijeNcb97Xk_5}K; zYt#Do^BIG5>nn9pi)W47_!1>E7K~e!F1N>A!tIACfWoXFIz;}E3Tcu$Q|}Qf(MbR# zA8CGRw1AoVz*r+3uQ#9W6<(xKZ?4hw;B(-~H1Qe7)t!0zYdr!N7-*p37h40EuV#?d zg3+r*;sJdsZD|wzN+&CIANg){$sy1;f4pQcvx1l3t z{p@olf^AUZ{-4%Tk$qn+#wrp69*-lGd%9$T_2vl7>w5@+bOLl#jQxOR&|&3is#ZOS zbq|;B?L8H#|K1NkHx!p*9-mwtXHdukpeoZthPQTbPIGVYh$z~3a_olZ@^f=uXpAVa zEp-j3I29DA9i9()EVuL?$B$T1eWn!<#pPBPFSe$x+7nlDA*0zPe#jFR3vC!O=1c4H zREZDiE#f;Zftv`OwaHriEGT85rHXOv!{>=0a1QvED=072WFotbmr+fP0h}5C5D9{_ zTFGUNr~KabmzL*8x>OyZ800wRf`Pp zFt{+otDj?>fA^&Sy+am!q9oHktm`!QICz>Xb~E_<4c~GJ_!beYI4=i2NN&O0!wc_Y4bC{Ar&bsTUn4ft;&u4}wxX)!p(e;r^= zN|^IcW(kmj<%b-1h5gb~FdTLAb{7KHnaAz;w2z!R>40)s8 zj>`*jOaWg8^9CRn6rSafC!IP8G6g_0ljBnNGz@<>&c1o5`fuLDPd*E-kx%eN$&b3e zPoZ-5OY#5J6N@GX;vuy)t_+&v9fp~en6emY5$PydV{t0uyKP-P z(l?eqbSo=R_K;z%_Da{`-=MM78OTlrs<0iPfGZr$@t>;95o4`4NFf?z{z9tEQ~%oE zzWK*rb}MyrTJ8E)yc<*5;79)UMzP(h~4%J}_vEdW-iPS8|%!{qC_^ zw9HAX$D?7-i!AoNGCI4d^Y{nBb;}==&UCt*!WjBXxne^_bBw0O_n2r)_45a>g(yaq zzS&{vKS+#2(Xzc6`J^Vl(19q2g8*f|}3Vf@28#>!QnWae9eb6Jrh zkzL1;Z3bK!7tS@@D7jjUD-p+j40*A}=spR?p0oX86aK^kzq_L)w_ncI$)?rQN)!mutoOqNMdIQXHiKL_h`%b9lwFXg(1r=5VRrPYk%Zzic8S}) z+k6j|wFDhmV-rEG$)7N>GN)O>IfFgU5^EQ3Lbe+QJ05}J&JoUM%p41RUT?9|P+4?H z^vliM5pEZRfrXNRMZaT#yJIwcY2hJ90WfSjB{o{JwzRvhJ@}(x_z!U|m+NQW^|m@T zfbad&Ns6&PQ%5H1S=7|~^Ut=EK`hR^CdSPP?ZR-Rf(bn3$IWVBfB065@&BSSZ>%wt zi^HOhej5pHKuX!=hbM?-D*hfFuu<}r*vYCqlw=08)InIgZ!Aml9-e*ujE|gN*_&3mNhK*GaH_*5*qlwRPG!93m;-P>-ungU zlb5+32p_`gv#3#xQpLLOl$$=awt2Wbwr)qg_uE?c;qgB4lR|UH)TtAKTv@R1_)~eW z@HB$yUZ&&%i6eEo9^Xz#*c=fzGJ14TGx+z`H&&iDzSk_x^`=TcRvtEW?VPk=JKA+D zK^k3gjgqhNEn@%*6Pq`CEm``91?>zL2>YcFZ33s39J*Oo>t|SQ- zHsWZ2*)V9{2f58YMQ{C+nuLMjAy$~C0;I(AtdC*aNP#_0;*yWf;csBfMQ%a;j+Z>WBZ_e@Zz4;p_#PoT9fN%bP2-P0C0H|#OX6ui#Qsx5Wm+4XaqKoInx%6((fok&Tz?j;LYph~H}JFy z7+)#}LKvC~ZqC?5=dW>Vns*i{WK})R5nySc3j3d;DPaFsflPoZhA#xoC__6-!xYrY z)INN79|<~@Uz>L=wi8uhyQ9jkO5j`#(%*C22Z?cf%Wu?j5;Bg!b_ibL>HZqw@N+i# zK#k}PlM>krB*XVlKkpWW(7zG20cWM>1Pd=3n~ zZKHo8mV~b*w-6}?7g@Z_2akTauI9cd^_LVAsWuO;>h;9e&n(XzK5}q>mec%1jg8jV z{{JaRIr(Msoi-3ZvGn7@XPY8y2Q^DWwkp-ufoy12SWW4Ck<)pDKZmSdiv3i9w}KRe z-*EsG;7}D0kg`jEe&{0iLU&iI?RNXUGyUP&oS^&24uN^@Hl-MMeBz+F<@?$oR5yVE zTn0ZVBS+=pTY`X_d36`GXaRa9IpS0p>VvDq^puM00MTnGdG$q3yw8Z%>Q>7a5-D^( z?Grk`eh76d9!dr94k{J&IdgXd!lDRpcT(8Qd(*hLSrWY)(m&Q{TJdYt0LB#+v|#N6 zD{~{Ct;Bt5iP)4&H3f5+ye<|!KTP%$+tn*;N1W`K4RC&OZ^g6f*)NTK=De}6p)k3D z$hTXvp=fZK5(T_9=tO+xA=tnxNBLkVI^XFUU9k%Az(}KsPl>+)@#c2(;C5&KgQKM4 z#JcqBr=57N86SuQGLTD4ybDV5zzYj@5>jT=qJ-TqbT)5e={JRoyjA17{mXSe>!w2I z-YP!FTf4>2YIhU=5clnj2L3l1;Fp=;#m34YwAqatl9zyPq;<_Qj+|tb6=Xbm>pa?n z6!-gyuskv3gW#{_F#c+)1a4GbyEgSgD_>3iD0`KBN)37f-Zib4DsINvkCkBZO0WHtG(Fr<_a4_eFMPmR zz+}*+;?GO5Pj(a0Y+%jNVC&Lo99uhf#~7^d&eeuO@e#VNXYdrKb&o8AryX*-(}rF9 z>Gz-XgwJ38^dWiKuzAAt06Yk;B>}r@DEl=tkial!$*;}%v}#FwbidL(tf^;Ya)L5m zJY~j5YAM&{)Lk4I@iaZBY`x-iD|HbAZ_}D33PN`9wos0-@647IUC__v9_AnR>GW=E z<-P|;Z1!(~-lR=Xc?Eh5a?PGhjHzwwH zi>%=SU$S3{feQfID>Tyq4Ho)cA$M2(8k?JzH>R#9r{9ASa{im^#O_!u+55Oh;r(8{bRVWb-8FeJ^NqEVZN==+ zACCB$hnr;N!#~?>Htv=^iUa{NGu%7>l^HNEE{~v#idF;rp|$Zz`*k*I9Cl*g>cAB0 zIG2&0a9)W$T8@8!-*;}Ag&vmViVL^FBFuh3Ld*^sstQ`~nBc z$%DbaJM6NAl9R`F9jZ_Lfro{`dBu+yYrF_Xh#VvbX^r3RG9cQ))WA5O=qdtv(8&!bYIxDjomf#+TMGY z@}iNi;4ZSt#_%!fLGF~(@(MlDu0s;chZi+2u<>|K;PvKxjfQ^kYSBo!EP(?rgOa2H z`3gf376rEqQ4n_LKrmr57Dy@iFmq<7OAYzE;)C?uK5DKq&Pq7F_?|Jz(_Fu9RXP)p z-xyG1rX!%z2OkUj4GWtC*WV3~<|0N+HMwrXI92Wu)|u2p6uY<6{?d*`egR78pjwW} zPfwq)M1|cMT1%>;y-k7}qHY2s&~0ll=Q|Z?w&iI%l7p`68NH8mbxS_GXsN7XG--Zy%eKRA@$IOa4?lpVC?s~#*hmg10BG0jzZ>`Li4xi z-{Q%h=LS*LjGC=wu3zx&^~a3+4=O#kzSjVI6p-&Bp?zZuAUfhd%&XNSJRAknbk;68 znHhu^X?dgH_`4KvaIvG@YrfPXS(*uUN4m9b;3}C=Zj&v(xNzaA4X%^CJ?p(n%Uvz_ zQ3ty1J9jTAuqyg6z#~0!cl|?rSv5rPfRBr!<6FF%H;Ux@@LuRxbX908C&x|#{sL$-wx9=z)+qj^9rzFVFV?)CIig^ zZf#mAfn~-rg7@;?b5Q9TtcGxnEy9g%m#Gcq%Df3!^9VW*4^GnPIsEsb(_Ahvn+|(H zJTOsp@C_dTv9=)Sd2ypc3QJsxS42HA>8_i4Is!GS0(`|y3J)`uwv`~>xYV)pB zdwkSeZv9pW$87=gO9i2%gd4ndl5;oAll>Fg&+_SPR)yr~$v4oJv(sqTrpAF%;@^lD z@^rzpygfr*O=Y5A&KVrK^3oA?qz?=LfZn5@fCB|m)oM+htYWt^<+C04V(O|U^K|B%&GOC{nZlnDM88$hQ4m@p~$GZ;1?0Wy{4RpY&rPm9=e5+tu*t6~PF z9dpX=vQN6+_;}g$o?psM1>S`6@-Q~F4od<9-NyrTI>mr_>GD!MwZ@5I%j)Xd1+~%F z9_X29&35Jxe~&E%yixum!VbK8QgdF_1Eayn)}7wHsP56GmV!;dN=!coJKSLMh z#3oa%$YkiS@$ux6eHXsj@cBq#o<7{4aAB8w7xEJ`_EekB7$*Y4oMWafRBm3J*R79RShHHe{XE+@`D9+nX&u~Y>>*zTc*gGw zE;KP&2oDRXvC^8g{p;sn`Q6NO2k23M>Qz##fs5z#TT2np-GKMP8o<<@Wuo_*jk*J;@V6$ydt!@#N%_=NWOcE1J=7xz3nstBD1DF6j9YtEJ36tsq5_MF z$C0JfYM}N55?D~b2O@lW0G$kr;0Ef6sSAxtap%WM|1do5>%szG%jD;}$?!(kGFbPs z%(nNC_4|rl@Zc4YGKMRW4KAlQUf0LcVu> zTgNW@UGC1OA{xF{)#Z4L<4PrsH|2gneij7sFl-L^E)H(mLjf$@Wvz(|^!|sd)3>{B zQK;0gIOkE#`Gzllrj72}D4=_mo03a(Sy>dmm9Jh2x=Jrn*E2aM!2<38=C?F%Gn>A| z@M3zhpN?=NO2Yi7ez}%sy$*|Zp0X#q%d<=#RPA?8(1~v)|;LcJqTs zhd10!-fFDPiU9q+tT#B#>V)`HL2X)WN@|3()`ss3unFzkj|~j4@+&o-DgsI!R5Uu1$%n*PB(%#*Hw>F zwVd9rKi0bbzt}QW4$MdM^MCbBt#MAehiMqh7({E$J`zO@9N27>2f02k!M=fZ-29+y8q7@{IvA;3i7=~oTMDEWvLhZ1$aHZ5uO3iX2K^ z^>)69n`fndpr#@P$h4Iu3Ys?d5Qo6}e5o!&x8`aN%@66>pbT0D!fS`hM>ox-xr>Ed zyWDepUzFMs3Q}C-?kJf9(7z}PtS|T*0Bqb=cN00l0gcX4W4Cd{hq4G5P{5~5Swq) z8c$4bvzfhvZC<%f-D?MBk&`rG?sVWWm2 z+L2$bNt7}Y`%c+yw!`rC^72fEy1-9 zKU0~mV$nB#e5^W!2`_0Wxa4QpOxT^ATGQ=gkR>;)NL)V}cStAaqtJ0w0q8gdPL6bZ zW5(c*(e#v5Kff~2*ne?gkgM{>Ir;6+~7xq16?1m-6Ft}X@apF$G2Eir$Wj^uEcMrMpkAl z)-%-0EARJu`ij4p7yIv-J(;6pL%8LLZP$2H6`#$`Nc${2!>FTV+Je^PZ+0c1uDNb^ z&oy;{GxrZ69_DEY_Yy7)|A|C8)(tWAjs!$VP270&&T{*`&QLXM;odHZzv-Mi_!dBn z_>&B&8pLCd(EBNz4;k_b!;SlQGyOvMPzbTQ6Z=!XrA_Vfxh?t+k$u?d&YcyKl-?qN z8Co7w3`Mg`c$yyNweNtJF=vB(y-bhlJEoQDx-7 zjd^?UWL7h4Zm&h*?2>RB9`CnZ|H#|N6B(WDrTVOT3Cgu_&C~b1WQsA60x&~>RYY@y z!BafmfGIKn|6|eU_BM>(>zEslU?j4u;=$QM3OiHkXsas-Y^ z{nmlN)_{LLj%x$RC2Ev4SnCLOD!8H2fwbnd9+HG-QG#-w=NhEVJ-Q-q`{KH9ic{LY zx^^j{-rpwO8To5XoFG63AngXLzz_7^Q7uc9p*hR0PL|e(WY3nJ?tUJEhKnV)G&E{t zl=QWE3BEDekq)6nx#DOsw;ba&0qq4zG}8jm8jv2oVagT69r3HrC0+?-3nwQQ5-Qb^5?;4z}EUc
    z0^|LN{HqU`T0#7vth;9k+I`y1Vw5(ywR~|@?LpY-#|_8F?Ro+}l4+w_lY(?o2LV!6 z0T-LT$VnCYffD-dS-ebu#cy|(=_`#t8Fw$HWED;w^(*x8iq#mjPMW=Ci>t!7Tt|J| zNv-zb#Y@loaox1Z*ZF_HM!7rIt zb+!HCI&s(dDlXbz!ZiX4YI!6x5R{QHbd`sH>gM37CMYo!gWqSbybQ#y1V|@tK2GpT z&wQsjzJvypj)0HesDrTG*m4#VpVp-nx2mDJJaobOscxW;?wKb7f>8xJx3+cy($-XX z>us<(yuRRR<;aR!8djlL8=vwv7OP{BRGA!8XOHty_x$=bM<5a1I# zwBxPFVW|||_JH}_YU}$!m}&CMLpg3vk|FYXTs2TMP_b?0()&3{ZM!?Qb8^QlbC%zN zu8~wxKLZeb5h%CY*Ukwx4E-VQFyDftxh3DB;!MOAKlB$^-bH*z1po^NW;({!ghc3K zlc6Vh#WjSzpeMU3$9YWs4`JtT)w}9t!`(NPbUycJ^7#rk230H`zp;1L?hgRW3QXf$ z?3z`qDUu&$Bk2GIK9sEwJ*Y4b`SbZz}7XQWbtPF5M@%(ut z0He}?8dEk2wirVD{&bBWAb!07Z;w)erK;vC+49PRb$}|&1@J*85ouMhDB(Z-ag|}A zb4W&oSZ4uxqEnYSjA5>DJkKG>Sy#LW@50enKrb0x6F+)~x&{%K&XNg$4-^gCyR%B* z`Y@20*-=6ectaF3kn~?TOZq>=7VHjQ1&rik!R2pc?HEzB9LvvZC~jA(t!m1Zc?-9u z$tZq5lX_rq*6ofPUp7wxM9o0Srr{q#j(m-pkZr)-6#;Ks5JxA#2T3I9+7gT;g&5p9 zJaPW>=N@q(UhS+^cEWmi6LCIzRwJE|LZ+=Y zoif!vk;0x)8~Iw560$x255S^-3ZDP|>SBVFqDht$;A0)u)q)HHU{NI&^`0U$H>X8O zS+RnmLxM0Ci6QhpZ=1JXSL)1^zbe17zP$`DG@ia0H~Deh2Ht81%(atOg_Y#^bp!8l zADCgaaV1o%R+6k}Ii}~0W>|&zTKsv}m}Q;vO>-B@yRQ4rt;G6^X+rqhHSi4ptiG6D z2#T__Z>bZY(u3ZrU{25Z$Q68hN!@>UcX3;ptwb~ZnEI92>F^(~EYBb|!KouFR7N22 zH*Od2DSQAbY%{nu?(d3uuv$`L^K9>%cl{#Ii86Vj7t`+@vOJ;o<1~LT?kZ0 zu6i^9UvLB{y(YpSg#c(LjW~t~)BpE|praC$({ptviu~$z?ah%- z0z-lq)L-jh!WX3;S?Di#`5YkMY!qYlvVrH0_D5Yt#)FS9@>SvP|W}h&gOgFY{jpa|1%8(LW(|&brYVS=l)AM_)*mnIK)<4AD^tU(X15^*h zy>z{l#u?XezI5r5MPX$ReRJFUhmmR>DB4;8b?*#3Kt&jv=xlo3_0oUY<;afRGj<_D zQ(GE@5LS%8#<%+r?i+2{eCc3N4?)KOQnVC6$LvxF)gk{7Z54EA?BbW|h(0&_8imoD z^Ja%!{5XLNuR<<+UX=HqaS@dIgNIb`+}&%i5J&;}?t!ID_Cqpl0O)0u7(S=99qL@{ z>9rhNRhu7{?`14M)6gm-7Vy5Z#k)E0J)g%$2OUr9WRYJ|NL-|#XP{hqdTiS6!*8ty zP_x1pFP^)H5V?n-332Cm4WOijWCl~YyK~w>vB!rINwX2#>F=>4AxyU+G0?6}`uZb4 zg|fJ<>U|u(Z?9T|QTSw1Hi8Gjx#+_GV&u4)Hx5(%p>NIY1rlT4e$K>y(y=zs;F{Txp&3W7k7GbfPf0T}RvIiVNhtpj}` zj1A85Vifc(KWE9?=C{@^)Na$`0eQq;Ke`pLPbwxsbxvYiNg46MspVKv{*R;8)Wbot zNP=-n4G1a(N&vP;v{@WKMtz9-vl0P<9n1gKOd7awNFk29s{t17%EwJL%kmoWw0wEv z;RDQ1>)x-|Oyx$o<|ci=y7AWsJ_zK9*Su)%^zz*EgZomu`tT$=lUj)Z28P|J*7soj zkcnjq%~i~?M6M0-&5`Xy%Ju7C^*;-)y{gmh$xVtP4<5LOiX}n@YwSK=x#=+w8ikVm zs9|`MF z$wbbHB?~|eRl=+NdeXqKggPnnT=RNnx!k4MW&3Jm*K!<#z{Fy>>wahg zm_eu*__qJ04h%#%l`F&Jgn_J=4;W(=9u)Rz&yb}4cB**Ta>UaS-AGlgbHq8ZXbUE-cn`uPd`_#pK~QO1-TjY)2EK-uy4LMobKu@;n?kIlPLPTxbVVJL{)oQv*5k= zIo=O`LrcZ>Kvp0eih3T^%@oxT^O?K3cTScSHuy&PRU)ot%=ea21olHsp~4 z|AD58@gbT;8Y`NeWLcRId4B$bwQ@RR_;g^|)nx^a*L2sT?ljqoQvaTYhue!c{apfZ zQ+?nW`wJlPIeP394w@Q(+M!X_*ZQ-FZW4qtye8B5wq~3A+2t^cusqxGDTiC8zy(F0 z18FC!MvGDLnrp*LM_qTnh1ngJI{WF4)4C%O_kK9Py6&-a(7uKzsfSw>I=(fC9e%i1 zd%u&sUGW6I3X8|aqNv9IwN7LP@aD@v$u+LzX{Pxj94O>ojMkeK=3$zT6x{)_)N z@c%>dl0BbAKZNMN^N zLZ||k5vF;0;RdEBAIZeZU#kxpQk!&l`DBJyzpkpLtzOoo#ycE90BS>AwK?0vKXSE) zH1q-v7$rA-RW&}Da-^aKnUBCfFWe}Y2v=|}fh`uUUJXv_?_BYfS<8pPMuATDH4P(6 zo^@JpKUJU>Gkj{>_4RLdKUB`Jwo-O_dJ$wSuLzUiX3~a^Pr)-!pa3t5v2HSkgQusq z83jZ}SdVCyU+RdY>#pQ3>u20<`1qc4vEBafgRLvO^fzB6gYwLFz)@vYg zjSJCA{iRJ<+<>PL>hzBv|6$bthZyr_D1#q3smstUJncD(c@7`FJ`FLq54!z4Qz@+B z(K<2xm1CUtx!L{GrX2m{Hwk>sLhkyWpHTk&6d(39FuDr7KXo%#fR}@l_Xty)MMi6F zdru}9WUPlbP*ljnK^GMhKRI96!+slze5M-MPF%kfMxGsOTZ*?o6)?vTK0Bru_iV_% zCgt0Oqny{LE*pxb5B$t_%zW)^ng3&Sz}2-?ziazJ#bm*k7ZIEO?B2vJ#cxzKZHRK* z8(^`m!;rI?3yCkM0z&bd%Bv@?2fH0SGnOf>k)PviE!XJR+bzB~fBa>J)%Zb;`+#Te zoex)j*DPTom2JSx1jGR5T!oQFvpKOi%0*IK878=Qyo|9__UFgwR_R%9bEREd2hQ7; z9P~a<)x4AX=LFHT8M0xPIIvDY)CfKi&o++@HF7yib6+&;Zx@aRB~^7RtL>O^p6?*lccZWh^-s4;gt$ z+d+rM7OG@DmBw8Vg1lHYo{;ZL0c+V?ombP%)wb`jFq2P*15o14^6(UBaY6&W@D4(? zPm316&79UmTNgB{>0I-4p_ zy}_qcAO2!p(uKFE_k0#xX5%mG8!()>GDm>fF?j{}t6CF+m!KGqr^hcmlfboatRY7# zI@D*-BUR2+C1;FEWjHFR#rb(XcJOe}QJno!{10IUOF(YSt`DSJfIzJ++auYeOQ+k@ zN1_46>C`)qmtF#Qv1}A=81k}%)cYAuO;!`0rK)qU;HlKqL2_N{fJ%u=MZs{LeMGS?T$~!AQ?gc6#i~v_~wrKE;L>eLi0gJ;xPAsdZ0!7gxUP zbr^v*(iy`sHs1Kay4q9!)Ffg zG$0x21@1p`jiEfr@!)9#hxoQ4^TO`K_4V)TE!r&BZkJv=*;87wXC8TU%L-r(*}@UP zO#vYuBy>pQJ9p1*_U12Mqm9DFzb*t&irZ5|LxSGJUCR500Udbv5Th+h-qEp9 zS_+m4GX`63^*A4T-RZ`0A8HoCZpc`H!8sbOiUa+?OjP-jo!Uzu#I>Q69+*U0531WF>@vI6$@@YoQF86mka z$HLR?S~2(Wa*g~isR7c0QJ}5Ov#j4|%h_5qCGj$Q+2);i8fikw{p)p+8f*Ti$i=f8 z!jd3NeoSDxkmJxy52^sxk{6FVfIV2pOn)FR7+`|)8aegVDPui>7Zpa|JhLahvO(vu zGjTQ>#^8OZlwANF3sSpGc*x^0q{8a`a3pMnmYGbX(0hI=2Sm87VCs)zt_P)iCCEJ4 z(-IPJ2(#Z_?xiB(&uD3%rv;dag5JCU_9LpxXZ>#e_5h_I6QOteU;tGP12w2IuABr5 z5O7>!x-PNz0Q>jj%CSzxB{c7E)#Xf%JYi%PQ)-QWTrXQlPq~Iv{QOzpvf(RVtlhAN z!JJb9_%2Yh-yigxT)n3hYs=zvkuo#y;&MXotbMF!{PEpi=%8DmU-L)JBB&7qP>t!qwCZgAF=bN7lN8&>viuC%b62V^%GwkOmN<_ds}oUkC$ z;FshjEPO%=bN%po9nH_PZ1&Dza?X7zq#S;CIxAx=HSOlRAksAY=R+WF=)gS=)zGNW zq3_(CLtQh5yeHQJUBwbXRkqRmN}gNoaF(8MuS_Dk^7J-JWykGj*Q6*?XGN!^VG%Md z8k8bI6DTknq;6mUTNENQ11^k&i0us%>e$G45In#d#HXjc>5qzZma`!4-aUhgk@^q<+R`Ri3_e< zw4Z$y&>QHzkCqZ+b1hPB#T?!SVi11Cjcy)+N!$=}4{kF|K?$zfJTyLkJ#dxpK^YM9 zJdm=cw!60M;Erk6&nj4*I*UzmvPsc!Y5K>ohS{}y&HG&Q<5G1^P;OC2o~FPQqo)@Z zlpF!r^K%?5bOAiRnnyy>>cMSe65hLid<)I7As?$XP)cxTHCODH%+wkGiTW8OkS3wQ z7}HkIJ>B@^^T(a0?ma*$8_Y@;mGu1q7l@#?Bbl~OUb?rHLuig&XSCBy>}NNlhYJku zi$=p9rA!kchTQEAJcgnf_Zq^SNj$}5Jm0hD#&%2o>VVx=CK5o;aFYjWj^{LsSs*bZ zItfyx!CU_ub?+I~WY?_=gMc(KAWcAsQbeUGh@g~+N)r)K=|WVR)QEt9gakoAx}bo< z0}<)cdna^Mq$4G?04jo%gc5H_dA`N7&)#E?vCn(P_nz<9_ah7)ns8^Wx#pVln%BIh zyzl&BSAWLwv8)l#_Yv-|%bzX3VhVH{QO@G7kAIp7M)T^^B$TKp10sz{!X023sBk-d zT>BJ+ESmmFh+AVh((8U6?>d@4c;7RixQOxf<4$QL;}DhpM$@vCrUd~o6wH<$65Z^^ z9rc?|=a!pFg((#Ci^co_WwIwf{!q#pyCoO^&a^)OqTRpoBxEWC@^w}apN9GbNOh%< zph#rsrm3-;`tgR=);p(JZ58tBuT|H=q(}Rto^8o3SZLyQj?saofwK8QOgFAFAq3%s zWCGdjfuFUFR&NwLea^eT-4M}d&VSQf!Wd0sf`S;_ao~4MAV_zBn#u1Mco+O(P1AtB z>-8~R+5&0*TMMEsNtfgwt2t(hIEB3G5Mf#RUiOWJ!TB3x)xwNXEJ;u40C174MDUcL zA!)p9dSQ9*aPp<=3aYk7yk9I18a|8IU}fcYeV@$#iR)eCx)dEQ_mYh5++;@UR2#u0 zex_mSSglWs)2sbYHT-%eSuV~7;%N9fK&jba;6tBb+Xs-i=NcsOn7q6p|FM+^GO3maS<~70|~>JKM8X z0XJ7S^=i|;e|hyuFfP2BV?^VK^^~cQ^tUv|wy!5lm-Hh;B5~oDy1atqQ3Hh~t8BBi z4@~folIu)=nkX0a5-3+7bFsKwG(0O5cC`DoDh=SI`|3jru<3cqDtOi^+$4U!|bozXvq^Y(ihHyKF`-fuSCG@-sDhzL%nT2pO7{Q&Q=y+|J}+Uf1C;DU z)>KMeCmH5@JYaDvFaro-hBWOYBut)ev*DpsybCzF*>&O44CT@JUrzLbS{O>YiTSix zaFXtvUfcAYm)1IrZ$IC`eoQbpfD>Wc9R*&sR9k+cl}oKtq#0HSiFJ;r{!Yaa9_(?y znfzYEtqzt zc)#^hvqU3C~gnT>y>DlM^pOfDh)#1bAK897DL9 zRECXbu(-{WGIYEIONwi(EU%}1w>*TNlH6X1t<&Agbvowd;kb(+p|ETFRb@VU4T6}Z zEeBSqpE1K-9DwY!T%@3Qe%6`zxq1fq7j!597OISwhl2c6cDz2T#_Lm7*@^Uge1}r_ zk$`jtzdora$uHC>xOwU-<*!V`?I$R@`w&RcCxMCf&}i~~N{c6qGRL-3g>!sdcv z!v`cyqp`Jij&CH}s71IQV`t5Hej?VR51SkONJ17^NL>#hM|3%s zrkd;ayEdD5#(t_(vm$|FJ(f%ZkDP)a*5PWg>+N9K%&Gt<8R#j(LcFvkXhN0EuR7*f zD>E{ZTxBg98ZxB)OsWljamkPBMG1d8=EkY7N`DHT&I@}VPw1sogNM~bb3w1D!p22| zOG*(SZ!9p$`I)NH8C0-h`P75+Q3|ma(EgF}aO2u3yAUo9{;26likI?s(cdN+W50Uvfa!L`8sh*#3w#P#7#iB-L{ z9sOfI$S+R55gdP5?onTC!FKA%`W@KtR`r&I+vI!@W842Y!etcX!qZtOes&-yHptTD ze*UDZh4%{KF5k2*$bbc|^9ejXaXOX43UH(*Xo=H%_es%kzMQTaP3e z-FdZmI1x5g<}y^8U02=Im=HRMdi>t#3O0&4mFKT}=xpj_nf8P>KU$8g-2{);G+M}z zGaFlMH)={h<&^R0>^Z3u_n)=3zk`9ABWE&rvUXi?rvnC4uozI|A?IKJ$`Gf)NXGr8 zS@~8a-D8(LKd+>xb@U9sh(p<~!&{r)98cIRt!e3Ck88KE5Mh-#tDnn+<@%dR08|aZ=UxK+3Nih{#O1b@C!0Nu$wfBP zgkQo>`txME`YQa7hV?yqmavCEqjLPuCjXi@@i;px{*%^7Y-!Q8RbU#QGnebua&BCs|(uK50uR5QO2E6KQ6&jIO z71l^wCG;H4lO1b(hZ=7Y1ry$Zj}D8}K0v!c&)pa45p5Mfi;sBu&GQ*K<+)uT1pno1 z8}odmM$Tv|MSbFj|JJ?71@EeO4}=;BQVsXLBIuFu17~Uh`|PP~2ON@0h{XuleJY#_ zh~K^u)&(bM>x8(a-kMd*aHMos@qg@lcIqQbuIv9HVPUfT-z6-2LI8|d27AnAPfr9~ zbvPucMiX=}2MaoY>A;9#`Dp57^RGR*i$?|R7YXA_7SgL0Y0tg+ByDDl8HtCMetTax z3vj$Vx3q+Sv^p0UMWpv2gTpcJIzdr^d=a046S^JksVMjM;MfCFPt|~6JUUB2mt6OB zS&G`y>$W{Bya4Y=;Bik_#2$tm49mxIsK1|#n|5mi+nK8U0+lJC7eb|SU}omHzA_}S z?Yel>ua+l$T?l@uAUok}6Ec|^uj?A>sdnGLjVHZ4zWv?cIhXle$%;080Ma4%heUk& zO%rFK@-W5`HTi3<3qY|0QQ=EyW++1rg(m;a#dx#U8wC)L51vOIQszEr=_`W4la zFiB;=Bt{trX#&HHgN#`uhvrrPB|N)zcaGhv&0TVF>WIy$SVKS0!0(2ri(Hxut(ON= zn3*{c_-LZ0840(5idS+Nqq#1W$XPZ#4^5hz>U^}Se?z5EN@9$e;mV6x*9W&92o%~K zHMyxKXa(rLHxVAeICM!XFRR$F(J07HPQN**P}F_+n0G(+T=K|Ksc$@C8&V$;;4~2_ zN>jaye=~LAp3ffu!?U?{TF1}BSZAS#UfaxpgBc#6@u+?MTk1U-DO9CF)5l3Mw0@T6u9^K5xW~Lx^qQGQ01pEMtXdnfG{N5PwBPM|kb~N@ zT?9+O(-FqjHuVNl$LEA!-I*v#fX&&PR*tpo61$KF2oOM|`?(uglTQz$fs$CEYBDy< z34}sH@QPyP1V{_Ec;ODMZcf^v_p)y$Y%~!$gIq69kOdQwywrQFASHVy*d95p^)GPW zMZzIvc7`#22~UqrGhrlsLINPI{TqHOibsX<`5*{tb0)shk;9ka17&Nv)C+FNnoR&e z5jUr>hAA4x`n_<@+>v7Ezc8yB4GXW<1^{iJ^9g_atlS@f5Ql1m&w9i-iETgNJ1@>? zhu1E5Xgv)?pnrbi_$Ja z&&y>Ks(IAz3g!Dsnbt%v_h}dXkn`NX5bLnS>Uq-b&1p3T_kEH$Lmff@3Uvt9dGmQ4 z>AII=+}YV4T_=o0&sA)@-rsri7oEnEXwV5kM7VB zh7~b4M&88}2yC4D`=l#=05BY%ay5XA1!&@X11;bJasvJJ{xoCbG;^#rMgc0J?k5^m zc+%anC?pV0BFf?TXsXbwz+#2$xba#-W>k@#%wBN(x1t`!;=lBg^z=!U8AXyU*!-FY zP*^8Mg6#ZCF>JAj+w*3Rca%cud+Yb!KR*%+X$35_J*+1RQg^4Mf`uGHOHJVKDS=VQ z?HTTd3EksIx))3QP2W1FOkCq`{BAPU9zhV1vWRzeO03hn@i2YX`xijoT-ZDN5HYy& zvUM#|wpo43$b8rD@x2?b+PY<;lzjgzD25xHczr>K6*Sk=#~_n-32u^5Iz5i)DcPr6 zHLxk!F{klxdsM^09G>e3%*zR05lIsm{b$nn!3oSz=EeP4GQ=$V0~5{u3p|)z`?;;4 zsWw-I$3;Oovol4VKB)880u#}(9CrO;*;zXXpp!v67RXK$;-@&afNvE5>J2QKSHUEO z#f|n2+2(8bF#)T)y^jlBT%4=K*QFokfzD)kr&Lsb^50DQ{XhkuJ!yq6``YS)n8!Ppio!#x&N3BYr3|H%A;GIhp-R#1Hvz<3@!(M zD#*>+!TT4fgV1UJSTvjW5{&iNr7wFw68s)n%WszKy0cd|)W!dl%2Bgub$oyRp7j+e z!E@zdkVfia@49=IAyos!hYV&Y9=RmJ_;LW(YMy40_ zv528(nLqJKJ-$BzL+wN&i%b-hhDeC4F2D`u=`G z=gN?W07An7v}Lg&q6swl*;`VcPHaE&>Yn2evQuD0}h{T$h40e~(qcFIKE7^j6yjZ|C<{qq9C%#8KsRteua&p-C$ z6fFn*(Oc%PpD}U(;7NW9mS=}vw2tt?Ll9Kf%$HZ|=5K(mxn< zyuG3)rs0#3U4m)Hwl};0#d6kL5NqiS#MMp)mICPiF^{(@26UkL~(xfe7o=`^{e! zOTL~6Rlh%XqW0Ftnyu>-r#rwEfFZitB;hWxYvtsPsQh1#bdwV2r}ZDlWY!;%V;-`Y zi_7}idTca%L{rO!Ej;HcCtI_-fSXqGWfqbEb2nNHa>}L&LRS}%VVM${K}9gm6`@P2 z)f0-2nFhu84ft}NNLN+9lQ;aNx^fv{FzoWg3O=XxZeC=^;4Itv?G@LG@(*%VsgF~kWp2sM!N7lo*$cIoyNn%+pc-fbKzc{^+(B}Tj9Z%Zi}WufyFa$%JaMk$)xd|DH?>!+4x#RT^TU%o zkhCLBl>7;Vo00(NL&c&z6*rA1noa9|V?-ivAtm=H+jm{m8vlBE@BFzp8B5Qscz&sz zHH{Hdqx6IJwuA+Jnhb*U7syA>{Eg$5?Kmn)&TliC#=Y=vOEeUWFHLmip9$|Y$Xy6ryh3|Xd;RhsbZ6|u?-F}| zcn9qHZ~%_M|FBspN3p62JKgvzY zu*X0ML@;bf;P1mvl9A7EXxC>NLBvW2Bfbn z8>?&ZwLx+}lO8(VDibSka?;Xa57h!SNjI_@O(HUzrcbKDhawztQ}bP-%U$cvs&$}~ zy)U=1#&$}bS*l8}p~>{AWU)M5xds2%;N&KyiT)l>OhS@u0Rb53=4cT!9{lRH8ES1I z3#A=8Yv0^@CT!4f$mLVd%Ih~{A#WZsEUskJozSPjXrR<*N+-wAu!Vjlz50g1bcMLT zEH+OD{)u(V+1a$Yl>ZFPSL#l|$c^u~i}l~Wd9-a>Y|1No48oxV+l#0T9)Hic1Qq#t1}VlB-R<+a zkeX|w*XPOlN$zaCzjD8H!9{aN{mw#j#$ARo`Z&D~ki44+NYZ8Q(*6_ihujoFZ` zs+}1a7rvzRrRdVigi3*fYp~m8tCM~jlnv9LBl^gd26L2$M~&RY^K?Cd`ntE*4F#e_ z#7$TQHG%3B6Ug!|fdwSu0R-s=iwvU#f`GseeRzBwBOfquBG1Zh=5A@WOmcVcN1K6h zg2Kd+5UlTj!yOM5DduV#$mR})1hDiXxDdoDybwl+W>OslI|8}4NgN-kakmzC3P2U_ zmMDrWDaL;6cC=tij{fVBLD^0MDHSL+V3_LDHrq1AN6#C2VaHp-b1eG*vAPDDd(*oE zcwunro52lrW7Y_0j02BJAIES62=ruLBJFBy z4OUm%fJK15!{nFQ$&3`6pzWAqC#@DhN!NcgmR;YDQaWDXNDc!!^5N#WFCivay zSbMjLtXmu>!o`~l%#DN`5>&sq;p@PLAW@UQ+u`Lv1=IK)<|FQ~qkD;3+g@4Zy8TK7 zPybk*va#PIeGN4At>Y_{?tJwlHtd5raZ+zY^NHNNs0A(UX`{-7Vx?b*H@UqzVB-%KM9<)|Rz)^?*O zyys{1;<)d5u|R6qkOQC3{LOS04#`-8-uK0Pn%Fa{9?bzVrs|=^$NZ*28B;Yg-QQJp zYUE^Hs$R-B=?dYxx3UrUH3XeHD%YJJskY_WpN|`<#Tr^n6M3~ZV)wqwFJJQXmBAKugea+yd{!y+E zXPJmh`@l5;_#8t#7&R9HhhvGa8rj#3sj9A~u9(vh;@)oz8$Q*oPm3`-VRrXZ$hpGu zlgnFMx^$6rd-X&TQ6_%LKl=;(hqb~J zjkb~z)qqyBV{X{Sr@$fXfZj$V2hEe72^`A@Y!&EF6$t8_<8PWk%PuI$UX%PTg{}CT z$u+>=s$q<$dRwwQvfuDM!3y2^WOmvLw(^_7Q394OWe86M|Lri^lgx4j!uk-Ii6vJ? zO_D@*1;=zd?XHbD+1(3!Xjk7eI#Gj}G`q}(&8(w|SH{WG1l=jt@#m5}-70WAu??nHeIjN~HbM8GT1L_>c=gZVAtm>?(5^Ga}Q$tFU&__gX`&a~s3 z9zGsDa#bSnYGSr0D2Sb4stLCJrzq&{3a!H)0-YuEVT8OevT)av`TMnFyK4Vmunx z8&Nwgh--(c8XAy)T(rj8Mc$(_Rm0UhoqdsC${^V!6>R$w7*KgVHXKj(R$tQ$`G*Au zI}Z(#Qri)5ZQg()qDxo0kN?8Fh^uhp$$YNbo*)g#hSS_Cc*Jag<7Z&mwl5ureP{xm z0s~HwRO5h7BCc17sefrO)cykAJw`4kxnv#H-}-Z*rJ~V1<&@VATeh#oh0WoYpM=C? zM{TN9vdPf2XPJR*X)xeL0@5bN0s^N`vT?3DgBvMPOI zoehjR zSXDoow{bggVV+mR!tB(S`#LuYouQ?DNfC>EU1~qrx+|$~0ls#h^mtomh$cvxR{e@H z$vkdnoaMUVCs&`n<@qj3VWRK3O2vbAwWbfS9TM5U6B~);l`?1!eCJ9a^u~oU#^qkt z?`#WJ*b+`Y%L;HYF_!8~F?9usjFX<(R@un+1%OS%O#2K>x^14Mt0W9mj!7mcW+3I_ zmq2w{M78xjTo`(gY5uInPU~I7+}5;Dxo(OM>d|_ z(v$b6Ua{A#>-WsDJo63o)39dC%vL5Gk3WZX3HV?^^z5pP()QU7ytj<_se4x`Mq8M6 zp1j_rmU)r1@wUXsGvV}RiDdml(&Gzn1(w)Hz>*H??`vDLffOtZWD?mxC@u0CTCEg} zOUguZDqqF=paLJKBu+|yQXnR6sYYMi6{|6iSAPI+v}^cd_D#D5Y)54K39ua@n7%&I zW2#{vh<;LLk~-)F6UF90wn!AjtjeX zV!?ziS+*k&(2W#84jrUCf$FHBlU=TQdhof8`0|UBB>lLZd0saQ3U5u7HXIc1h!>`)n@gj!K}@;;mKp7da23JugP!zVlkn}Igt{=aIQ=j5wZ=CoBJYP3 zlKJ8a>-6Gs?tqlyO5JxPK^35@mT*pnCd~`1fIHx=;Q6(s86$8WRI^zYlvPur%$<*t zG`?yuhI-Hv!RT0d*)9Jj8lkGv?h^M|{SVd@M9=_k8l;qgHRwRAPVASUA?b-W6ff2- zV6`Jy$$DJzTm0gWX`=?DqM!MT;Iy&p;o0s{%Y+Y;myZ=G{q9{vat2uTq&Yln{fCl$ z;ahtkmkA%R=6b1}JP6s&I6~4(Id8(Y^5m)ROO5cB*PJL`EtSf%MbrBm7{oZVLT}tL zc?Jlw(SQ#1r!I&N@`@k&xPbw2IiVs zKE&|h+AX#NklmsNqorWVRFxB|FUsOngqXi^^Mij-3wwWOr5Djuk0V7CeOlqGdC3GWL zjh1ks4D|+%I<9q|h!VCU_1dPYMlh{Gt~(3oV#^Bq(6FQDE-dyDO!2g4`9EqWpi zpZFiurlzGjdTirA)utIwz_?okV*)OcgctvjCu4mKeI;cRH(vRaLeaJgDrU8#YfH*A zcTXl4+Sq@o=)tStU$G?Xvb_lrIC8m+a2-TfWYEl41}SmVxQDfQfl(bpJCNRQ7~kCP z@=tS=IL;W}I>Z8K(JRfrWNQ`PWa{1J)VnM$HxHm8EF`10?oFDed>#Ue0P0CM*Oj+# zjr%${6b59@-#8dFEk{2p{Ro*`|0l05C}1i2$9W5al>DmMOxbULP(5ay1G#J1EKS9U zvPhW-SY<_QI|A=HxL*ws z*n>d|D4GEO)pR{ld`6-EqPU=oezmdwt@6o6j|)Ou{HMw*CgWvf-K(!RysQq#;@vmH zkQ}NeeAo_TA#_{6{%akI>QQ@Et7j8=HgAmqAWJ68FSuGf?QMQHo_b_=6Rdk{I`C@y zl|Htk7;xys=i#TxCzF`hyNi?eKy`^7Y<;^_vo|;?SLunqc32`$8WMdQLi%e_qMWyi zrA#YZ?$}PNx||8s-^xmn{Khlv@o_$pegaelmz${IE49ze14DAyj1WRrmeB-KiIkQC zXu9o@VuE76`|+wVXWMIhJxXhvg4btw>Ql^Q2QVo4W6M1|*y#EFe*H^w3@!~K?r4B; z=hNkm`Kjf$SJG;~FZ@nD?J6F6BWaFPNy;i4XVzqHwG?rJnmdh4mbl7@%9MwYq|x3w z-Z?-~YjmzrsMXAGt&U#EZvRMdyd4X_zUHx3yz|Ry%@<1IkYv84K6d7mc*rp@vmq8s zdNk2y5{0eC(fB%-0E0Lz&3K?;QN+WyUvI*sGPckAlB-Q}?%F2-_ic?oXSK&ntzR(f zNP}s4z)JovOpSi{A@C0%vuYQ3zeZh3&dWM> z%^;iobTZPc(U>%cy(cRWQMl*$0G~De)>2I3h9<@AYeyBT)zFiz%_Zbj9 zy&Uvn&)dtLZdmXR7KHL`!}jo?Ajy!6>xG+ z|3||i4w!lVlBTij*_s?(R_KOF~t5eg9cUhFRd@9kYVq81Te|NAS(J{1Rm^M6Fm#q zdNgS03tpxPH1%j7s3%8;`eir=`{*~;%cuB{@7076?oRjm@{NcUA(hQU&&p@1KT1`j~i9&44R39}Jx0`AGkuX$Y375g0+;FCZgR zx2`~>8#h~%;kRlaKmQ|r@btXZ&4QjxkFKQ3j>C&K7c?S7)L4+K@C8^T7RHd6HgqA1 zbDIn;ya|(ZrBqDI@r_EVnz~^j9W<}z9rTm)?(p@_qt932E(z=QR!66Qtk2hpS4A_e z7QByUI`x0?`g`PmHs$?$pMw_zeJp@3m+%~@QeinDcsEW^6J7*0X!`gExq3<@%bz>S z6Ro#)_lrLAEE9XyJJ{4ExgLZIGENk+E=bjcgefnE6=X(zf%6aWzd;skDQUm0(0nPQ z+W)jz@*jW6kAg3jq@u|tQy}%1p$af&Q(KRZ;vG>hY*6ueq=LFj1*2W{wZhIioF@&2 zcp0~syfB+1A=)GR70-YDf~po^0QRBCLcgiHAeAGXq_o{qlv%#&Xe3y(@g4*{_5 zDUb=5Vthta4V3tlp_w6v1q2JyJ}Z84sdG;u^R0Y=fsEby``B`6me3%sJ)!@;8ujxp zaJ*=-S#ev+9%#`os&u0t9rTd>^k;v>L~^RTRFe} zi3l!+C|sp~Y;m*ayA}WSJUe@<;^;)X9PiH$v>%-KXvkN8v|WICS5N@pJA#n+f(ek0 zY{vq;u^kDx z<}~au!bxcg*E?T5808`R_59yVObUnYF1v4){v-~J; z55+H`M{Xp(J3-6unW4|;2IpR8+h3&rSSWz6u%Up{AYecuOfo=l|5PF;4VsbFqG=YD zpK=0%47R$qu6mg}W(G<-8QfsQeFyWx2fFd7(CW~Y#qwQ<7zudq+`*O{ZPABGLapZiGfeRm-G6nH^{ zlm@Ue;ed`)?)H-hAgwy1Xl?+r=@ax`D|O|}1}F4w*K#BdNNqa|Jkq|8d_b6K5A+nD zEn<&0KOB2W@P6W+DeI2qLNAT~H&`hW60}HBebFZL=bHS)4EM_un4`KM`h5!K`dCv^ zjdE7`T(<%rT$szvGF}onmKj_LH~ofTj_x1##q}ODtNe zR?E@quC7D=)Xkrz3jLi?BHzvL|6qOb0sgv{_!(b;B;CMIS6v3{@S4%x%15tkR#})% z;F^L;%7%UW3desKh?z8{_2UDgpM{;5iTtdtb&6&4`l+%QfV{uQRcPg}Kf;v*2g|!N zO!oU+Kx#XZ-qP<%A#!ql52K*wi1=AR>RP`f_kDial9EkYUNfPd30S6-ke{Ubl(@FC zjlyr&S5!F}E?+y-WZEhq5ky96g0FK1Pt3)_z*SrXuiU}a2zR1*RwaZ+v&X%5kQHvX z^rh#V-e3E^%_mnKb(y6Mj7bMpXUhC9UxH*8Kyp2mzh*+-q(qa!1agx9qC|QbO_zk7 z)~m`QM4_oy^9E`sPJGgVZ{+#KmG{2740j#=Pg6E_zH@8i5!3RdWjDBc|DDsw6b$-0 z!IKGYtF4=ZfiYi@6@Z>*5)I@!{bN1Q{0qN+K|4)B)9c@*n^P{z7`)ai?T+=<@a+*8 zVjn$k$w|U5x;)c9k5(UQRir77y&u<9TXg3RHZf{_&L`fVJHjcMc$Mi%Bd^|N?XR;# z|Ez$quOV;m@k7ODb$;qzie}p0WTweL=A;tEZ0P<%hHPgH+H{6T@w3>ua|s&$e+-&x zzb$Dgd)tTUVpwR$fg$|OgoZ(#;8eMQKGLK{M96NR3BN0$wkqdB(|Hih-*-K4YrWxk zFB;undi+eYOX6Rq?3>+!OQ6qYottD!F^BYv(1G+U^i`lx$MNtAyl&M$z4?vy@EIMU zCkIiP)_hD%Te=qX4E#gz+u$|04r8??{MyQ*97+(Sc0Z9&n{!H9I*id9oYfJ(X9tx*hLCuS><>=G*2we8nKHt%swm zAM|A<(1bOJo3tZEOa7gBfQB5=acE-{vFzaLoN`lCUW`Fy=I+vsgHCly|IWoy|;b{)cD2 zv-5A8qM;Y*UfKUe<4L(}H?3c}w^%0N=`Hbo0krGhcCevESgO-uI4s8Rx2 z8HXeve0$$tw9ku>_0?jI(@~Gj>T;#VJE^EE>fax-Y}A5ZJd8OH>33hkG>{!Aw(0sL zuL97Zx#1VJ)EMwICEB|9mPm9V`@=iz)HX0U2C(oTjZTU+$ruO7et_fzvxKG=|7PM} zsojfBx!&&}^u~)9G3C6q*pOsfA91bXO5S=8UM1}~CJ_Is}8 z6YzAP*J{+smu=l^z3lk3w9AJIV7uaGQ%{ds{!pWd;Hb*zQ=|lN0d`?)q(zOZTQ86! zqc(k~YV>2pUR2L&t6Ru9I((LWGh)#0-CVgb(A~zK%J_sJ1>nM)jmaQm1e#MpG|>j= zdRpArgj4sOE-6Q;4ioWI<$peOKY*@B=5?ZMf&PFI(`$LZS+;J4G6EJst#H);t zDJ95&?XU8l^_gpfCv>L2pltSVjKldLzw#&jVsK25+!qc%D>C-A$v!k|`5}gr+=WpJI;z-w z|HIVJ@k4}^8p(}}*dDrs!Gk9cOMai4<|_Mq*nJB~*#USkGy|MdWiTJK@pmZxq0(e$ zr?xC9zdJ*8tsP8Nh{$lyKbe*Pe3muI(9=()rN6qMh+mMs?=km%UMAS49D~~dTnGuE z3LC19K0`9;`n4HZM&=0UHOCnV4X=h7Q4xu-bi8+M!+d7?lTs~rvLyw86=9vta;POX zD45EM<{x9o-6hy`D&`g%dCOb89Qb@Fv0bpGx^nQC*OS2YZ$*L{cM2^KTmTY>S08Ml z0=%PL80!EdsCB0wu2>QbyFGTpfdl0{GP*9NfB(lu+nRdWuXF~-TdD+2ln(R))9{?M z6C~Vq(33=lM^u;bx=*Z##>DC087s-IPqV(uG$5F>wRp%R^~i_AY)lx*eM#?tvb7k{ zPKr@LPV0B5AvtUC+$)uC7e2Xo(@PGPK5)9~?se#qq4>0rnB*hVkS`Lz;Yif1OlF5|xB_=h)H8+V_G7vy$E16R?~}{Ufei(GOE4tN(FBsl zM0qX%%5ErDn5O@=Wc-&gDI!&}_tE%Tw{G{?7ZHVbGS=qNRSj@y4;DcN5<%?K2y8w_ zwZe>GILhBLI#GG9h2@z?33GS##o&*MC->6+r0~5qjF{0T&6PQPTF3S4MDXE25es~A4jVc62!z> z&TP$HEY7QyLcJ!HpSpd_vqz2wd?+!K!3)Gv#lca$3lOM;PM#}^6K(cVW2HOnRWCP= zxTX#KrD}XZNLC|l&cD`7cJ732-VXt0`ZdfLn0yxrFkaB%Rs{GQtVF1Um|HP~ugdPcj2lSg;Wc$vmXhFwZ8YW+}sIp zwXpMSjdIPlaXOOWxo0lD_!^V=Q;vQF>y!Y=Zm#)JkD{e3AqA>t0E@xUPLR=UAR_lx zrm5M|u2S`DXPErvaq9QUZ;#HtV`APP4x_)%Z~?;5sT~O-(9#uB48(nxq*uffM~OT6 zPW1v+Va9g7^xAtcUcvfgf=g<9%*Bgs)LsgOUal!jWx@2eBIsr4U^hQ*7^|yiK#JU{ zkU2XI`3tojS4yQ+2FTrb2hZ7z{G(+HfOTyMN~APC6UJd%a{d}@@wEMD+x!uidJ-dL zl>d863q@(?v=8lZ@U&)p()Fb}cor6%&~^PXwGNZ#NYYJwgtpW23vFrltG2C`wLe@v zi__P}O$9d;6@SQwQNdtq1Qtt-!;$VN@nQk$>l|lENpFH&lsfHBPTWn`8(mDlE&3KO zG_1GvC03!N_I-0Ea`vK=S3ozW-m)7@x|uM=uR{+5<#1R8uF@-1O17OKEi^IG{abb{ z_sc{T%+<>=x%ZuVSqYnsB)&BKVBALdA{U0+jRVXt+>Qrf8IpKi? zfc5vKPL~lhJ{1PHQZ6_cju>FJi$XXO1sPdFEV(&-)7@3|+z0MwK723Ly*)jv#0;jZ zDa+-w*YsLEaT5+G(cBmF(g|4LdCVAe8&t8J$i}=$1AbiIGpjKn^4a&wWDDKjS9IXlX`oCI z>dgTACxInSmQAPjFxctU+J{UEa7U)P^JYHh-fI#XAE}ZVPxbvdG-Lb4WFY!;sk!p! z;q`a0`Lbv8q(FuEaPrgM1wf`_Cmt>#br8NU8pT~7ShfC9Wo?T}SH$^vqLh;Gfw7NO z*4f=Iww0}ib!wj_mBrFdEX9J#&AGcu~c2^ zR%b?JzC0RfB;T-c&P2Z>YDHJ>94PAW+ z558`ZfxnVE;zSjNSs$xhhMxnheUg7&J2o8XZyODp%y0fHZ+a!N_ z=F@K>%4uS#t<@xv(??QKC-?<-LN7QuNSY9y!N~*8Lnb}>W(--SeNL$b%nY(EDr`0^ zb!{|G?nw7OZEIu{_^mk_S(S78SZumL;FsmU6dr?{?vX4_2oz1N)Xv%W`rT@^w(#p8sJ-k zmve?KrGTF8r&dGSNUxL&@E9}pFR=T<88JStSrF(%c=A12Q0Ix0t~0zB2pCGJQT5SR z@MD@LRCUIuy1nJF>V~66K_|2Pe2&Kj9<->bc<4EOELq{WM1BbnNQ9A^C|zJn8nEpp zcqUB;Qsm7haza$=kzcoUidTd#rfmGaIW)d)91*Zc-JZFM`j!-vdT-<`E7=xKGJr#u z@Dz7?JkSfl1Gx}ZZUL+5gvg?n~qalnJ4&e2Zb#FR8uC9i*re(>1Y(e*}>Q*CG$ z6R=oF3W$9KQ32B0Zl&q1)@E$fnTq7mmeqauJ7rI6cEq0GW_E=$bki+Oe9^I8QiGHHyVADkMjH(+s;c8r1 z)09~Kt4N^HH$x7+Tr++1L8ocQ%W#Sd5u z3B~met8h{fo|cx-4yTGK0TqaNJO+G;@cD{_4v-fxINt)@ctE$T4szFW=*|=wY1UuV z*oo5~#TKqU`Q>vW#gxUM{Ot|Xu-}sAAo!F6%3kv%KiLi}7y}kRig(x~=r4Tj?LdjU z`=s+oI<=@Z2G8yt>@R+Aa4%)VS*}**{&D|5Msk8Y^s|^@EKrt1PAL(4ny6PWLyY5f zq~7P6YD|DIYTEGW&Y^Zbb5r3!@u^$t!$umsAAnQ{3EWHUS0UjJBs-Q^H(wRmvl*Gf zyOPFe5(+DD<8KR9@d?aZ{eB|TxPJC{th7_(dYt}!`#$iddd3Jt9%L}#veXkl(_(80 z3Oi2|qW#j(-)`hvbk#Wb`&#@t+hVhJK>b|=G$3H9{?+j2Rd~`mTrl!Ed5T=<$NNLrE z2L;qU*~>8tb-ZAyeOKmNEYBdzwNm~ccB`e9TU5#=@J1Y#2s~W*;M(Rw*;GCe_DuV@ z>m^vEr({UAjl^F`b9c|2>GONkSCF6__y(0YC{1Nz-!=kH#ueEAoBdH;M-jyLNC3wW zvk@dCJnR>Si|icZcd%@c!_SC2_|n`ji_y{}?m@ZngvERPmo|)w*AELE;dGH^q5C{6 z_3x07t|BKq-@yy7bPDa2TXDUo8x$Gz=XyB9XMeCRV04v2hzEoq{iKgp&U!PaWNGvu@CSsfvCeEjX!3=g(i@| z;8t5QVWkCCiQ=B2dB6~aU|VBnU*pT2Pix01Uxt(K<++aQi-=`My@HDNKq)D#5hEE8 zMA@e*Izk$hd-ROzX&CDYo{wfMn3?7Dp3v=Zh@kh1<;IXl_#M{m{=(x9;nPEjmnD3t z;FfD&O4FPd+Et3clT47$2npSGN;jNY?lxxzA#2V&0)uk7U7f8=NX@0OdMctjWx%G0 z$q16~Z$OZy2GAbW(o^=K&4c)MocQYFbUW#p`IwgfhrKtChBAKRh6z!U5GpasR@us0 z$W*c=sf17_+1F{Y)|jzm$(j~IjG|OxvW_j&SVJW{V;N&fF*0KrcV_19`T9NYdCqyB z_dVx*mOtP3ALSf#&wXF__xruB>vLV#=c5PHcL@-lGe$ps>dI3{J$qy(K(yhN{tT5P zj(~Scfg4rFk&l5vQ6bKj9~_D41da>q==HE>yNaF5jg^=HYAP7ndggrCTwBjP za&(t3)fLEAH~VGk#6pSG=9xGHMWf6MZ#VQp>!#_{8V+%bsvgYUhE z$*v-F%5IW3#ZXi~M9{SrdmTr9idarrdWd6P2BF02P|-y}Dyd2zJU)Z?pSQfpNPFB^ zHSDl|kS6`tnzc>4{=S;HiKgMZK1Fl?H?PB&VB=a*S5XQpL-X@4R1mlGd=Bf!qp|aBM&{euZ0% zo16(iusnJKV`JOXa>&_R#w91z+w#-T^gfe4>)ZNeRT54lm7CLL*@|#8QbdOoi?2XL zRgy%m{=StE`}Lv%YD~}AM?>TJ-Z$w0IL%GVZ;Ip*q{T;(CJU)$gB9So5~sqSH_-G} zS`s30*EJV2tlB})K;Zv{@hjjS-J-9xyPHR^h5a0~#MjF`aN%o!nw zhos8DmueetJ}Rf3V*JT_Wo>Ss$5Y>O?p9f=+}eugqX(x>%_bJ$CwN8(7Xuv)oztP+mg;gp#`LWixPB{<@c`Fm&Yl$XCBwck}Z{n}}p)3Ax!qc-N4 zuSa7~CV$h{Pfsrj&Wm(&Ken^9af^@M>(K>$g~RoHTunCe%lKjiL6UnJ zESBS>$q%H8IgX?z%d@}nIJWCpR)f{6Gt7DHuKpq`Kej42*)`GY;{1UcRd(?+`!m{~ zgMv^kIV8xC3JW#A4U>iUP$9u~?J!>}B>iWvI|F_`sP0wKwY)>{;xeZMubD{du(gb~X_{AJgY27>3x-y?S2rc%wUhi_tFo+Y zC7i!7qK*BRkfCT~JxaFmC}hG4hm-(rkmiDX((!{MU4ZfnU3?@uLOUw>IDR)TD-9^Qf&;V! zM|Q`kOj8jij0|-AK16gWy%GwX9o+hsxbARR@o`(ele7QPgDv0ID}EBrL4X0^A-E|U z_#3rMx=ocZ63=pHByUQXAMnZ4bba6M739ALRI0e+*~%Kga0Pr^K| zi;mo-5fX5d$tZ@uXYKS!83UCTeuiGb)IxDeq!oHfbJtmcH){$qi$+~ng^Uo&q^CBJ zei6LA1UOOfvZ7rbp}uK{#ZX$>ZYXmakq|5)5=SWV8LW#Q{w;SoWBE-@ecai+DX+}# z16X&9wxxzceCyh*P+*u2{~J*I8;mBm5f-3gj`iv8Rj|~}F??6#r}(V;p=Vhh<*<|F z{Gv=n z#=IM_i!#Z5{Y(hZz}*6$OXAow<~aKi4X=yqIZ4=>F@m0_>`H-S0S7m?d%| zWyw`8{Ue`@DG#<0O95zkdlM=im_87@4f~lYJO{P198*3Hc{V)$-uowbGD+2YaW*e( z<@2NG#2bs3>#uT>m_U895(;NK!fmMin=x5C?LqNvcsH!=Rgv69|tMz@$YB_z#reG{A=k(xN zlf{cBt`Gk#t|uRhyB~DxV(!Z(W3A{&V{!07P{yB?rtZSg-}$08e0u%ZTM5&1@D8vq zo}xDKkn-2>fS8sgfxa&iDM6a7Ml!Cl0dG-!6M>&|p%J@$;;Ul+l>|;^rZ(abI5Smeak zF?-HaC9tw^C-E^yFVSuPz0#NOuc>6$QiN?Jlbp|jWR6Gc2D4^p4h*>?SbBh`6{Nsq z#uAhPG?sz1qPi2&*{**xelM!Nxsf z7^(gsVL?X9;0wz6?4!i9<$9usrvWWTwg@$lo*v@i{hTlQjQ}Y+Z2F4`V30=4&VE>h zbFi$A@smVLO7Q2H(ni-Uy*_5ptj_#Cc*6>P)LhK6YTL1zL$Ce5BPUC7jO&OI6?mHr zZNVBbWzaoEO{i+m+r-=va;_Yh^ZDsC0f1TIrniV;7 zXjt^^JhB`&?8f@V&D{*YE85c8=dfQlKKHL$TZD&7^}1^6EVS@+_=N6K>C84tPJZTb z;77Su8SH7inf?N%$+I>TRhnl7!v@rwsj^UJQ=@#fpVguP+oW$M&SLnUtof`5Eq!Q5 zGT&i-KGYK&!;84MwbOZJdL*ZLlR|VS#vekxg|O?iwVCX|hkmZ_l?!f934i&c6#3S^ zifDCpz->h_eZ1)%g->q)oeXpkO`$#-V)yTjxz@CizyJifuHYX}LG7SQ>TlY-(_zmt z3(q5kvM($f!yx*Q#%fzlEeHkvWUS?;mc8lxB}^F~bva#a z(QS7#nfchDpnUaszUX%}WeDEM;AUVXylAK$*d{r?0a!1L)UpxKSfKQ{;%e&~nFpdQ z$A+3x_l!RW($y}$OA(dxk&mK*BgKXzr$R$ZU~qlLTi^*0v6o`S-xLIkJk9Rc@7B48 zsrYE;*JYIxrj*G0$i+1DNCRD==q-+%^^d1Qr72AaygwwyoPGIp;i%O#dQVv1x^>Q# zFmg?jb>DRC&1y5FSK%p&$jpO zYbriIL9@Fe?gy1{3rUm9-8<5fh*V)P1A~Zm0PCKvHa@8C3IFsn2`gi`$2H0Lc#4JE zr}r>ZdI;x!gGhg#M2Tsi0B2gX9%c_@`O^1^^Eambg5`Cee$G(HzKF=$$127tcLK&X z=VgYju(64Tw)}REF+fVA>>9#fS@cWe&JHzYUntqqCUZO0wvn096M)%n_x?bWXPfw=k28+Ehy`yeCxX)sGyUOyc zknych|3JZW@Lk;G5&^IRueTzH#aCF1eLDB=Ya%#WjeTx41L`!pJho-OhFiJb`VJni ztFQA8A5_O{?v&=GG)RA@R%RyQe7YfRn^qSUDC({8yHehUZ<&<*(gVze9{iwvJJg`k}zD{ghb3j!NGT#s%?*b5$+82#`onVz~#4ObOndw zSnkWySp~*U>h&k&8M^}sIgJ81@|;g8MEblTx+uulYjpBeSl5*}!67>5l+#2L`A_3F zwYI%#xO;<;SFrdS2SljuB#PlJ%@NOJ-GxROL#<$b07+44wwKfB*ZGElLYoGi3J*?E zI!fdd{c=c3E4v?`s7XDZ_4Il2c?bSzq&ct)ewfb^0x&o~>j%yk8AmyP)R)9hAmdnXA!oO?#Dq+r+1wIrL8AKWZ6j&8B1Q;D@BE)KA z&N4L<0(GoZ7cZ!L_)A}?9h3<-i=jVRx~jgu*j|Az(54n_5m30E%s1Z0eT$n+ zGZx^!HdgX|Cp#3SW4Vfx8b3F3w43&YWo1;v?j2POwO&-h0Wh55PijU14kt6f;Y5`s z0Wy0jwK2+XX=0J7V;m;e3gvZmI;EE1(dYdSv0*EO4nR$HA&~qCRD-qr9V{{a=pR_Q z2{NO6Vaki6rZSNK-)beg0jd8(sU+Ap? zo93tgrCj<*(K@Kajj*?3B!QLhz@3fK+-z9HCuEQ`l5q>?H!pUkY|NLE_gSUB=7!di z!d#%`aL%P$?|I;pxXI@#(wuCg*0Ycg$L6A z%y_f#l_o4liEr$$uX&LNL9L1EV#}mXx#I<}f>77AqQPB15t_R2r9t~9{bkCpVdsV^ z$p}JaM}@bc^WISh0?L9~Yjo9(thBfw#VVm(uK# zZ2IRuJ+t~$c&_+PMfi?4+qO#QHqRn{0o;!NBDhEu1W4?28A)x_U23o-{(&CM|Ea1^ zl^`+8yTUxLAaoCKf?tOKa2=`E0&NCt1*f(ufC()0A`r8G{GVYo3A4 zVr_8YDCnAnih|$ivkI9C*M@)F>tA*>_u-p-nrcS$DI86i?bkb_VGMk0%p=?l(HLsA z14lx03fW>K<_(I1j2z679|VkTzTe^PIbS5KlE=nIL4??Rd%Hnp_$bM5`p$M9t_Zgr zhX$0nUHFfpV8}AxSg0~)N;E#`1!3l3y&JDtw5Fc@Svmuu?yb zhm2HMZVU&y9SxDT%rU5|g`Q1Y>WY?!Nm1<_?Z5YSPFZ=>HIUd^;kUo`;MJwZUpxxi zuJ14dau+J=F%#V;3h5A8`H1EXn@IW1@Z=pUdBM#GS^#9;?4mS&!PA7-^SJgYx%K&4DS*r z=MIHyd-Y7jHBOCP)gn*;l-rnI_zTG}&GoYQvFKV}8Pl?{y?uvJSzFBaK)`P=_`?Al zr(54`COM5;aOA@rY36j(hFvYQsMV%=KKoCp8>+Hvzy4k4QD^YJPVc^SSZogc{5v>( zP9b(;o4}h5LzB^oIH^GFPMC@pMNBp~i*%&~8rEv6AIu)qdqlqL&oDJoBX2h+ep5ei zg;M)1tf& z|M8qniw2UUSP8frDO8D!LTs&6wH9o`%9f|ul=YPq2#cHSTb#j3=sSdT8H#_kRQw@0 zjpK-+Vc~IxFf1$r{I;q&W34-~Q6bEwM{|jTynBaOX6^p{puS$__Vm39sb*)N8eMwo zvkUY2C+VIl=Qn9G(M5={PDVloL(`-tQe4LGLI~T5+dtwwJeu;T)tgw>wSRj-)P9eP zhv>WSIc%u?*eaOVU`r$Lg}Gf~5|ft}S4$A)RoSc!*1T!l1cfp#+@Y2_eC4dE+Sgk= zdkIs`)1dZulD23&HNM^inmLG{1H7gh>{E`IzP~s%GN;>8o)0$PH3zxn3cCw|8k88azyrnzvO7~^z*kK{O zPgBK-&uaa=bNC8rj@${_eQK&bg8v^68mx07X`5?(O(JFbYV;rHsG_Ve(O{30mD*}1 z(O;A4(vRXLxz(zwZ-eBcwS<0yRjo{djxWK&>TER}*_m^j@pGEx%P=b{XrHtQ-lGxB zy|YJsiO|bva*J|{wk+dU)(>7$R0%2)!s{)q?c$~rmV!|uCbYPB@6Pp&U4yJlGaUEN z=1zP+XnC?jhj&#McNkX*h(l}{60B3;<`TjdSV|yiWK6IxF#hpG?fZF{W9#*nTk4nf zs!aN#sZ^f2wCL@_R>9OeZ)1rS0)mKv*SP1JZKPo;8Ak*V^S~5tgfQjqi4&eT)_{8k zU1zq@gL0KL-HA#TH;8hlBA>}eDVrTTg~$hZ5=SnU`E&Ie=PZO{s~hFDY(M|Pb5p4c0{U{&GOSIHQwN=6b<@hNXqpL5`w>0^oC9w=%dfSf zYi$>$mfyc08~x*PdH(&E7D3aoSdvR6atox|Y%E*_p=sj>JKLKF?fr^Ie?K~vKGN#_ zaQ1`h_Q+^?{Gn5Ni_?1uE56(!tVGydSf~~3Gi#1kEuh5#Bk@>+K^@^GdY8vYfL|e$ zN2?q+8?pWK_rPHI!SlR`FS|t&z4DQJB!EyRS`(PQnG{;IOUW09w+LPp2fR%Gxh^r2fu5wAp?WDj%f z-ua5S`Q7P0($eeXtf;Xht>T`_%xDX7+e*>_B;{O#6$dWSc$<&GoxOn?4F`0N$CP2vMUSC*|sXD5bx$$i6Sy{$UwDBnq zi!O!mONY1ZV+dY8Wefo8=ubFuJGH!sBR<;dHE9zcl+}2M<9JEtT3Dz}jnpHb4lTi? z#Z0pkA%?}OipScPCFQF>UnEXzM&Tw`a0#FgH<;rJgbBZ{!_thOZ~jVWNH3Sn+`7He zjt=z`zvlMCYS;QADheFPROs*!*kro!C`t?owg_}YE`3Tf0U@UQFm@@vw32H)bkQop zjOD1**brP(6KQTRrC5DvXG=KGXe{AZ8jA@$?Q`5Ds9aqX9l^CCE0As7d}051E|m|d zy|I$XsqvDpeieN&7(ff%ca5^+`j-FxNb(e$adisliZeaJ%L)Yz_C`Z36SB<4EHAb<{M{h) zL4$g+*3*HYoDP?%L(Why<@QGB>(zciR zVL4}+)g^Dn`Mq0Mp^b`tKjTi7N@fQ67wzDc6_VsGWBc$GDrDrS=p>fJHp2eMmLw)% z)TdNKqJL6oe7pTIPtLY@o_KZKyYQOUQGSQN3*f2VP=%I28y9%f8u9I-k(3XB52=n} z6N9;wZ>=|;=k~jfl@<7n_`<#YMM%ErFR#y3#%1*If+M)IhhTl&h|p)|~POXtw>{v$K46%ik*EBvJo;eMk_T1UkN0b#5IA$nb0d??0#L zYDQo4(KJ!-iEEf7XR5?RRA_K2T^-v^{-rSeE?+d?1KjHOMx(H~7X3QHXVQpEgjdss zpM(y;(rRkN4t@VEH_`g)!K8cie%`S)!h|TKFvr<{nhgZcopd|kT_ZlysGV39ic=Mh z@#pnz*?#?x?Qq!#jb}GkzM6;})H!A${d0Tghc31o134MmWHVAhGmB>!PTEKL(9*Vt z-5Ckc;Tm-4neO)6s3_|)EK{Rf9aDUQik8y^_jT8JHz=G7c=Z6P3>;4bPNli)8`7zA zaH&Dlt)N{gcWl1#I>XVl#l@mAp(ayIPtFW`7toIF2M-R?;{V#4SL(Fh?0guOx{d*e zUaaTFD$m$Fj)FwPy?)1@fq0{8nL{ea5p%g^-Rs~x`Axwa_)o?3DtrKXGTGWq9wo(r zi^cxZ+ejPr8X#5LFK8*sITa0;gshF{e*2xs0oYN@Puw)nDslbCQ;OAs@14Y9CU*Vf zNw=~3$3w6{uJQsRdJ;$62HtMXmc{nt8Ft82M1?*XF_P4b*v4_29F4$Q+qO~8T`hr$ zvZ(~Lk5)G;!kB_yeVD7U-)K4Imv#-U7?4kG2Vky%7xxL!rA7+nLrHAYd9AU9eNA8M z1mjjG75l3kT}Nxo3S(0h#~R&xcO?k%-F;>Tn*jOP|6z)s0D3t53H)i0Dv-lX3t%gA z+#BbIvoGwwkjiM7g!`1=0(nR0QySAo{lQn|g*UEtBld3=?SGfj49^LfU% z*!;f5${c6!sq(}gha0i-c47%EPW_g}cHjs-xpmlG5Kz|x zV*(k5Cba6lfGTVAHasQA{=0C3Vos7kW$3f`3^6;;9rVwKzVmWoHfdZ0pK=_AbR$5c zDJVbC$!{0%qLn@}Uk8J&Mg}PS^UF!jjyI}~(>9`_{Cmvqdpw*FuxPb_$%2tyR7edO7yM1TDI35z4LF)bLG3_;e)ugS zAX|I>>YCHod(*iV8DZs{DFruquW40ar-%LyfRD;INCP3wC3t5y)Wx_@GktN-Jf43- zt1PZ5Y;Akv7Acl!B`i zjQT@qc45ptsM@AeT*y<^_)-UJ`XOIVdw;LO4rVJ(8OiWO(PbDpt)!it&0Vk$%6tPf z!rDZ|I%v6D?*AGL>5KhTsju&FF{8t=NZ)GjlQ_GS$m1bjwhlEdK}ORpusD=b6z0Yn z=Dr|sWMBF2tY2Ck9kCwUl|fGT|FigVr75-E^TLaJ4y)2^P^%>S8H-}O@Qe%HJ8@)l z-~`lnU;-HNiMr0#dTl4Q);{nleZdDMF9sMEoMG%Kd^lzA&{^Gk3(>jL;T3rMENy87 z{KI5|IeFJ<%@`B|Z#8`qFoUPy;{wrFtP95c^k30rHC(=<^9E1;*m_k#>FTuiUq7xL z=|&nQq>uH3n`Xqvwsi~(;K=9c_n8$2`6x+L6&Ti)@8!RZ{`)j{!fEcL08SjqIE(n} zw9W!fkN^e|0E_w~6VxwAk%%0l#eY2WqX%xcUxKgeZ=TT$Ie;Z+bM3lj(yo7bbj?(sbHK}%3(9&gMGy#o zwo3jA=;j?Q>VOCS)1Frff*IA$A}($eTxeG%@5UP21YnWCjYSvY$V6nN;G&g&)EnrY zdvDcs6}Ix!`r;qsHU@U(+p>C10N1GTSpU$4KM%yL6A)jei)SHbSNK{t3^2fb%jtvm}Xpnq#?gwj_z55Sx z_4vYH7+etK`)-H_PN8(-GWurasQLg#of+Lsn_FC9gGOf2prm!P%u8kUS%2syGc*rB;o_@#-Ye@_ zO171$J2zVdt9L^Q1}9Y|{btgQUt&56N+Uj-WyK{rGmRU7uH5aVjK6 zxK|U32g1YK;`2F<1C_&w78faI27V_V_c5G#I%3CfqXQd%?*y26{BqJgSv-eN0|T(6 zG?)*(mRuJ;rcJD_We{~&SW#1*e)OWotK;eFr5e@^zly3Cggnv|1n#Cs3-H`CUj)ut z@IFQwj4EbNPXbN$b=s%ycEbf_Z_h!uy2nE{kG)rF{ihQrd`i>rg%@<14Ct5c8R9+w z_&4eSXpn0>IgI^IBI24)V7I%>-+leUV3ew}5%K9O{x{r`XhFW2L&foR;jLIfWzF4im+nRaO%7tiHTv8=Oi1 z(otsq;(K`05b*Va06TF9A5W(;>=7e_Z8kA^qRhZ7=96aq>ZO8(?2IQKzK@5s18OU4 z^He(uy{pSIl!iw=E~UK0%Oie&+EHdB-pway&im}?%&vQ4i)}job*g&Hnt$FJJ06JK47GJb1~IHq?sF0v zkw`6DS`?iW{v>)9a+RF~_)%itW#=wHDmi}h=%_QO40$GrU+4|kTT6+Qy17f7^xQ=W zEWED(OBD*`*f0q6qC43ArurGWGT)qLjapQMa|%D|G3#-m;yhveYlQBf+;mt59z`%h zNl92Cj9ZztZIfG@L0sAUtDU^5^Xe)W^nLgf&GsGkOg%npGnM?2uv$C>@8uXVZJLV- z4_xGX)B-u`_is?HQLH}W-R>*f93sz5du3M4QnvC;f-e4ej`)3edo$Y@zyRRb98F6G zr*%nbrM0a|t(E95Z|Pa5xeN6{6E#79J)%$Jm2XI62L%AZMc5EWvKtmIV1v+ttbjS` z1314OInGS!t3+uhhX+9&{GsB`8HY49JU!g2iDMt$w>pkTZIcnWN$Tq6>?MjZR2E0r z=Q+wfvT;uW-h6)l;f@Up`%>-blRx5SoP}j`GJbs`B37#tkcxt^8w#)X55#I^W=L}aKpIZE&PQ9g2B;Z76yD>JV?qtNg0%)F%cE9$=1$_F!^k$#5kFeM zz|5vDso$#!-;Hy=r!OysB!v`vC?>?Fd-z#p@sqd7L@%EKQ_J%Z=$;Tf{wuqkvpWGt z*2DR48VJGPa3?;^=q;Jv%VZYZt$1D!k3{;(nT2?=xs3&}*qH%C}$|0gqLzSTo?*u~{ z(Z;`kU=W>lpN-}0j}tMRa%j4);rz*wsHR}gSj*Sy{Ls$VT_N*2wj#rtxqRLg_Z0ZW zCvC`T(2(83_QXhXQ*k~^`&!Yow77e`e(dLCCPtKqhsoC`0@8C9_r0b(ch%tK5j~o3 z_e35skeUOY^AyGw9D~y!9TnI=fb={j3u3TCK=i7By0r)zrfK!N36kYf~jp%H-Fk#lmC(Vz; zETB;ved_S?Sj4- z-5*AdC4heAe1G8gFIDP>&UM<{H=XP5>Fp8wb}60FFP%$?bPz#(!pm-|$1+ZJL+TFn zkx=#l?C?F}Lt{zvl5c*xpZ*Zuw5jwFN@g4!12prkAbG{DDTUA6Y=!Hp`EEuc&jO3R zK{9U5HGv8^D;UydTLSD^k+iQtiCH&li@(rN)}Q)Z-L0>+#;~7qEtg&7J)tL7X(MrX zP9C=n#W+oX)Mdfo+=3&N`_7qjq!xN%gxi`f<{V(d))~Qh>h|zY^)BGDSZORhI+)?= zvjyIiEzjZO#*@4k>--CnydHYl5gUVb0$ z)r%mR_-`!%48&28PA&u}feG`ai&O`L;q!`2M zdIvQ$iyHcGIKKK=Wjh<*_u`T9RIGjIaAGU&$)Y(dD$RXme8{&h^);aE>7ZmLV07Pn zwIg_vYU?vQFTLoj4zb=Zl6vm%W8Qhc^>$de1H?a*&}KrigPUh`0zatc{_k(N#JvKe)w ztuKOrQP7NIz$F0Id?y>;UwN9$H@UM{kD1oos=hfGUbG9QTLo9mJXJQkHv3|Bbi)O> z)j{0s9;7Ie{`NO@!)M?v9|(+MK;Ssp5Q?h=ZcUIhnRyv>EB|n4$`eYuAg3DP+#NUL zt!MGFl`vc|I!FH^ci+F;z7-1sdh#{!xjH0hYZ2FP9?V9z1mjytV!=tbgI|Z4pMPY2 zTdDSD^wdvJdn><)&dv{Zxx>5CKiaFVqBvqh@FgT$A2*`Q=!SD>gq6tuzlG}m-a^%@ zx5*ns`u2n+`;X_@j|Sip-3V#O)xNh58`eXA%dyR*8)OsCR%&I0EVaLz#><$$1^L_Q;)qyv<57{<_zYLJ zWE(fnCJTS;ZvCD;9c(r%s@(rU2R%w+`3&k>+6y~dYxulLZ=5%mdntA^B2scg7gQuK z;yU#kK{PsIS^tlx&m|Z29Lc!X7Jq*Vx9JM7jKQ3=N?$p|r|P6oJ`C|~65cRvkPd?rr!?X}9INOJ!8|=^T6E;pAu{sc`2&=g{wV zj#v`B^DHC^9@Z6l7z_rZ@lE=O9k8;AkNR3(j?!emTCl3$Eu%j4?b%aJ?)>$qn78++ zm9m}DizIq10K6o*2{ z&O0!|*=Av2s;Od~Bj+_~581fxOc71c{k9LD7j_qG3Dk`^+8RQ0;Mq$wBo)`ot@3g^ z;V}{zx9JA9{tXG=XPaXCDU6;V)o_85%1bJ_;kdB*Z?YvH6t-HvP}rp>JgAa+kWCBt&Qn|U?2K?uM7!JZv)-e;Jthax z*`Pz(w$2L5?wnLNB zoc+_AU=s_(m(eFU=LU=s)Q;Elu*T!&rk-of%^&u?m?!#*qP2QF(@vWGIePmPB8y5y zaOA`o2%3J|-bO(z8NajCSZ{pPisdyJJ3ZplccFHy`cgX@Pz)bS(-u<=wqA%c_F@Bc zoILBYvkoG8Fp5D}38WdpDD7PAQ5yUZ=-D8J^%@VSdsHXlR0m%9?Ae)K6@dgNNfPL1 z6g1umK57(%AHB0+S=|7|FfS}zMlV`UnbiUeG_bHdwO5dQcz;S@pCwVI>#hHTYZ0G- zOO}T}BLqqQ1V|axf=kFd*t7VLho%SFy>x+!P7Pj`)PJO$xwp2qTHNh++|=%1?-e3; zsBH$2l&vv^+vRpbIc(Jhs#v3(ZwO^DrWA4lG4lmd#Um^9UBBDj-Mc{t^_O3#FCRZh z`U)h+0Ok`th~Wo64dHI8tS%|RKxsXR<2o&3(p=CCBP!74C7l$db-db;f9fMs#PgU3 zkIg)JFQ$AY^$cl7~t)FrB|9SGbv z7kKaR_M^MEbMAtTpa(jL9^fmHK*UdiRR(=}?Y!VcQ)zt{Prxh2nN z^y;ETn*5?qOtRKt9)zC{o^emGZHXUBvhK$s;jRjASbD%BOgY{zPdF~}AJ2Z}pB*>x zvXO^Gt(!;y%^r?t&2n=9M=gmbh)O}St`9jlF}Mpd;nTs(rF2a(yTYov_$Rx2;;s>n zTq!=_?%_s-`$+#+g)M}{HiUoNlO%0c*bF_1wbeDJ+Vt8xbk3W(Wh*zg9;Bs59wbFm zAstd1)`0trH2I!dDG25XuMcF;nMQZpJ0v74W|Lyqc2|hMnxSj*SD?u=M`Cgdco3C< zwB`Q`2~BC*$O1S>nrtWqllV9g8>6EPTAbD&G&s6vQUd6Xywm>g>}{jdWVTw0MY;zc zIVGP62LcrvJOc!%^l`Qr=4KuDInL8YsGCWclFGWn+V>$O)U4aOZ%H8_C16fX`%`Pv zE|Yfg0F&h`{W!Y~&m}T$8*(xQWMI^UOne!tMHPT@Vdy4UdTAN@*{6UJX zP1Y^EbXw6W^l19gLh%chgsCN11VkOt#W?a&|t$Exw~+T-lBOIE&QtSl}U+1U&149U2nAjlHn78*H%Y)M3#TL}6u z>d=+ArqbHlp4CAWHJydp4@+)l-Rqut!7k)2Zvh%M?~sa{WZ*~A$fz=h2UuhmV}5>a z${|Iu-x#o}M%Z~c8(OJ8RL_`S1~08+wq4r{Q|TOX=k(;Eag+{64Q%)CfA-ChwK4Z0 zo;u^$Y1va}DYIz_d&nN1nFl9AmkY~`gEAJ_ZI{&UM_pQ;(*=Ox|4A>h6>_r|*~D^_ zmxF)m2dH*y#@Agr>WkwKHqEeDyeTAYGLYze?fom>rVHwYt5+P1D|xwLrfRk=6`#pH zpFKQI@XI~84;uW~CgAMG=@1?O<@j!p@sSk(N%oPPcBh|&h=O5XgBhnzigK=UVf56^ zfW{8}=hYehL{WDCmrh#9&rg1^kQX~5%!~C9tSK!>l$8uUd#`pEWB4Ob2Q7K+%bg#j zIJZM9=Ww5tUYXvZuJw{~mLT0`TG~Sr8ccUfw=Y3zf~`qr2M3o~Cl$WUn7)G&%|ox| zz%S_1bcp_>ga_x0W7v|QBf#jO0?1zg8H%=0Df$;PB5bcpnCFAOkxM`$(uU{yl+y95 zQ)&19R#=>om%r0`iS%Rvv@UJ}8}YElaqhG082;V>E8<|FnQe&R28xACuOIbtQC(&O zt|#_QxSIs{+Ga|uT2RCWfMvkjFbV_em1aLyzqr+==l7QIm-rvZuy70eqpL;r?uvHx zbm>&xI0}p(16~7nr;HafKD|_W)%Z(cdXI2k#q^!l0=p3x>L52aKedetOP zTJFCsa5>~}&cHL=a{_e0X3?2}+~iI7R>!$^MOaxt$C%g%2h*zFss~pC1L&mzDXT}H zPd|1?czud;d$8oW9Hye?C(%Bk$ptCLV4{&&_sQn9`7|>9xOTRe+8I=`$=8|khyOI+QZRU8ScwQ{sl3= z32~;!W0ViE%v~_0{I%nS?MD8fe}*@`{~b@X>R)MSGm8RPZ1h$f*_Y(mMH&%iH4ijc zGTxsI^m8m(3Xae_W}F*=K3Lb~6R`zF=9Sk8P%Pf}{QAI08A&bzh$@rZ5s|o$>-6h1 z0&uV8b18&Ic?>eqs>H^0V2PlPZh7RZl{IW~^2MbyjI6!m=ibexDMunr7C2qF$zP)X zNdT)4hQ+|!S^&3=MuP@^8&-MjI8&z%Pr2DoDpzRiV^mY!lR8&0OFG!Utk}`-caVMO zNNH&3q!)ZChOq*7rh-HDO|DbqaS(Rt+ia$OY>RGCY}yvANgn+qfH^#wP)VC24m3c4 zW-;!G`<=LnCmXR}qvoo=uK7Exbbzgq4NUOrKM~Fau#3+C6H+9mpOvsaP!i`i>{AMT z-ix$y*$Z1t)9%v_rd-zZQAF1j#Cjen;(}d2nF>VPK|WXycM#%mt5gx30EvNipt~`W zSetn?gX72)Y+F1y>e2)6{WgUZqYS&4RXAI{12CdeRau6L!1bFFmPewGKikF17yZ!% z>bOI!ct*+^J{8Etc1rwi+}i(oc~L`YP~%28?@V~8me16-tB1Z1oPH6!1yFzu3$qyi zfw>NFpRWQ($_lB;T|MdenwZ_Y#_y0JoiXP>I3)C;vs^`R_U4D=4~4~ipgh2^MLlTb zg#cc}zY6s*=B6&>Y%O{{9;WS-*_Flj_wJ~ZrJ95?mj0+SNzwm={Y6upGKt{{x@$oW z25(zqTR=rF0TlC?s&qoRS5$?yrvA^2uD=%r=|J*q0p4j07(^yBCRCAIFouQSnC4TR z36$yPx`@w}F&8W!8KL~^j&+rdJ31an)qnI(pn6LzY40ZSv0x6zgyE11m=Z?$SY9Nc zh^(vsrQsv#8CoGGEzj_N{nf|Qa?w}g0T86f1 z!H2n7V0KHUeIq5K)So@hZ6>xZ4m#K1L`RMrDRTGx?9^TJ4+o57MIbXD82~tnvNEx@o zLNrD(87V<%lq0nqn3lDVP;)kaFQQ3@&l(4E+bDC`djXW1HA0&x#R?YP!ss_POrzoG zybK##sQ#@prB(SQj`i^Jpkdr8_e6@SpU>->RAr?da~<|a&Z!=-H)`j|ErAx-0){!< z)&+!P6bMujbRhIUEBPoekS7o+1n^9W9Hsjz(KW0^Y?P=#=dww<>^&Z zw%sky{qpQozG&wX7}Vj&S~Bn~I=7&5X_(DQhk|c2fPWnO*=&)8w*G?;#YR>59eTV+ zsWK~Z@!?eqS@?2is|oHrq7t>Zm*D`2>yMSfKR_Okpw2l)6BYTaLh9GJW5;*&tlbrX z3Gg#kGvz|8lz`&DK+L}MV?*T(Td2GU`uGYfq zNua)k!nVDE^=_Aj9Y6{|{)4O}C8^ z*RJ=(Kd)Vi+_^G^;>e+<(J%BG0-rw}mV4 z-TmC7`rvQoz2aw_Ms6w)9Hv4CaCB|9A>_yMf%qnE682q)p^aMPt4qZfDH1ccCzg+2 z)0$WKn&&AqbGO6zGcN~VaAYSOs1tnr#{&ik4rMlzphMjp@ulVsYIK9+tMSvtP_*pj zCh7%Z-9Y8SgY1dlZq+rCn%@ z<0!I=e5&v#)ix=E<@Xbgvr_ce#aE{nr9N2r-Myt{R6SOFFrCZmjjIHeNHGpCk^3$P zFL9OBJes>Z>|t=9yz{Zwe~rUyJdab75MM{g*KS0bsnWo3ED3O~1IlA536?mXCYZ$U zge`jO*jfVFIH`VGEwO$z&Y#7nE)lmpcY1x=E&1qW(kuh%mj)f0dw>=4MKI9czE}8p z6m~HM+2Z#d1MSW`ko?$&zo`LQ;fITKSsDl^GZWZ{w*X&$Tt)qd@E+t94&PW@_&lwv3W?1Ie^Y_gXZ z_+PCbRm8statvnl!IC)(Kltu~t9o-uY|9UA4qWk}u4T9bo(N^NBY&R&XUy~ex`n_` zJNS!}KK=KkD2UCjGnbZn*R5|stx~=m72f6cg@O$<41m)a-gay);dQ{vU$Nz`BAU zkLu9i=Hlpwr);(1D?5L=sihA3Gk-NIfZiyrOGW}I;87FSL56fRR)KQ@GI@i!#)wPvdI_MBp;h&R>StI#JCE@+ zru?~KUngy~C)@nYtl}L)OfuxfkpgFxG+3|?!BV~NHz5h>v-iW_@%+Y`9Mgf)9JaGf z66p0AXv_&9`d_MdB>&?<*jhBZ8xr|1JDSaO58;T;ukNut^y=WDEsbFBMFZ5HTt;G8gHTR|*s6}4Zp)Oa zVoJ18bc%1ko|ik6cUZo!bk}Y5s*;9_tv`3ZU)koQ$NT6V&kw;VQI0J6rSB$+8tsnl z+>ooMrKpmfQa&-H>$b*kAN`%}(bKo}S&QO}qe(Zo-&@ zb0!Jd$jNtjh6Qy5@bb(tmYM);jP#N9YROj0d{DJF*zkmr?%n@F-kV25`N#32L}bgp zWSg>Zq~T%O%@s^q;AX%5l!Cr0%m;JtXaPap6MXzDvH(l=kz$((G2k zLNFm7!4LzSmOx-+T;>zI*I^Y|Dvn;v5d<@OVJ%!T*hLw4%n913Cd}AWbm>g~kHM+i z*h?N>`aT;EUDgBMU14-%dx!b|u0TZK#r}%=m#c1x9k&MkZoBtafe{7wg0su_7cl9d z#GkQC*WAy-;q(559v}bg%383-9HR)85=PcNPhIp9`zY)%C?-+hgH^p`BFHXE*FAj9eNr`UlPiFBAwe{LL!Mhn)t{&}gK)_z$ zXlFAgngM+a?UyJIcGN=N#H8sCa`GN0zu*7O@^r>!%b;q`8P5I^tE8tA1+fpgzp*UA zin(A#q=g(ts(_J5hkB5^dI3vq`WMvnO2g)f5H7JQE1{Oh*1cvfo}6}WXf-BIj0IKTTLmdUCdri^6nMX{u6Em z!cPt8+A2!2@@G9A)f;~A#5<%{CRJ?k5+s15emc6GC5B%)#yG@IgnUHdZ5>u%e;-kU zj}K@ud_GNHy4k~QUQN3;6nYH%%g>$p3F{Dl;nj{9QHm^seHwzzaOC}Wc5#MjS**$< z_}{wtUPLO6c1nNp_)sZ=AlaCD@Q-Sm0P91zR&5&te=bKuw`(~ zOMB;hgQ>RTU>}0u)eGPUKyQiNV{?tD?gZ^;u3AjXWOW@!P4z?ZT$F5Kdkjdf;)w z09-@`BdJ9>f~ia;KsRP)LMBLK$D0;rlYJ@ud22=^e%H-6Y}9KiK4$GWJj{c?;%nye z1*%1?snZG=1XzTOpRy-ir8Z>(knnck@9E@cQGUwCVQGs8b~HWwaAw!qGHM%&xdeK> zLF)-idcLDblm@M7zKy`0YM?pIj4q@`+*I3ECZ!hiSIE!?21;u`Y?jL($z5~KJszWSwfV&S z)1@kY5Hk`DvP!_^*RVql&px8#XC=9yZyHmC1RQL5l9A0W0t2R!2z7k->Hl)|%8TCr zS>LR`yP+tb^PDeV58JH{KRpwud~g6!sJx&UGh=}I;ZIW~Z33s$7k8f{47|wXz(6Au z&rP@RM5=m=cT{fui0+pujqEIkbMdM>Q(na0+pp-d5zn-Qh|hr}+h^irj0vfS8)r>u zr}F=NYaCfFQvCX6eq=sBOpkLeJWSSJK1c0=%dWV1rwd%5+&rY!{@mE_{%HTx?^1pa z{#45sAO)#i?A|N@Vw_M4j%03cg1V_Ar-PgMujlLd-F#IYk=qxd*n*QSRWXt-Rg z*jv|N`b56NPRd@bz`+d8`gE>~FW~iiy?XW;F!X>xdW!@-omot7mJKaBF~cYtO_5D! znZJsN>fDmajglTEzxzrC%ET{AMStOBU`<}CO})EM_pyHj0RiADN2HN`pFk=@O8Eef zjIvyhM#zhH+zKMo*v1#fl)MIs>i^tHRHD`KO$@V*`OjThwRmA-;{6 z??hDL*UTyQ35Ug2K*!H|<?2FR$?O~F< zAYh2KiPd_Pn@bV8ey zIlaUYxe2bk#hF1!huUXUi2&lQRO9JnePbJzp_#33^j&>@qlQLtJNafrUYha+pZiD=0;9LK|qCF>MCyw>{>vmbCicHvRot+#WN{p2bmgk6k81ImoP4RTk*xam+5%iwO*YxuOD&`0yZ0y5inz~FSV zPS=S>=dlkhm~wd%7yk@m2SfrBxHWn^Go_~^3PAEXh2=>X6!H-cX--WtTHRB1&z5mH zXI}ZEsmmWwYsd>D$@sY6Ly2>ub={bn(3eT(atL;4_%$&> zNPnLg`B3d_dw^N=wpUojehY0+|2qf8InU8vV)?)%oJ6|2b4!A@zDQZ(@4T_uVa1`l z;f$L?C+?s55GSoiJskTQ-U?^@1{UoSFo?hnXz1-)olCb+FzxNYiQmfbCZL2ALPqpn z-u{_KU5Fo53p9~)Q}@jA*0FU_Vk$tyVt@yo!%w0qhYRWM6s4gg3+9y%5%{O0AmnjGHoHE9taT;wT)|l#-0&HLxS=OO}k*A zt&A!nLs|{311vo@&L8aNvcuoKU8y$RgBjHRJxzvy+>laHG&jWLoF%+ON1lSw^CDKx zBYPh^HH8o_ZZ_51iBIAj zp^f2;UfNSP9}WIhImr^YW-RaJsebrjV#SUNGQ>5A0=IV3g`omY_7)2?A%ed6u3xJ; z4O^QDi!xh*FQ%TzxQ&kfO$j)xQ0L?K?50_Lw(IvJ*_5y&TF5aCg248dfM|iB<))V- zkUamY?GQ$3m9lUz|L7 zCXxsAC|gZ&?ji#r*mw?*TuJMa5&7u)TfH4q@%63s3u8O#)}&_& zr`}}7T6^jFeM*vwkU#M5i}&C?7rYfvnEu$QGu@f zz)x2hQTW6RY71*^6)cFBR+qeFmBcATQgQdk9| z&dbLAMx-hW!hj;en!dC9iCrZe$UOcz0ROvrG;I&%=F9s^lrtY}M$ZNB|6qQc1(1hW zP`rGKKq{CMf&y{e9I#0=JN-#C$+ss{3{F{@v;DempQxi*)A)70?DDmS1b6GhN0Wm$ zg@tq?g&uw&Mf=h#IKu7VK6il$>oxJvf*t2kas6TIR`~ zDcYm^wvbElZy@a~J4Y=H8Ah-H6VSn)Y`fVUflT@|n5d}V9sqUcw>8pbG!h+BQO&hQ zQ_@j!!59#~f@gcDmu>LkRr0sxji@IqRKG!YU%i?3zv0di>ol zf90y`*7%;rWpP3KFGwrJ9YNj#N;DC;Y$xfWWRq@m5>|mmNXDB%>T3;o6)QPoCXUnJ zCR|S{bb}5!YkBKJi{p`Uzwtl$yDemC>**|mSFnEf*o76-^LT&b?6r~=6C6<6W#4BNcOdJ`)FAEi+H4_zdM^dcZxmu&fQNv59;9uur0u)i z{^d&D2gj4kL;hBVthzTFeRi#zXnEDxH5PEkQn5i#R`VslG*bx-7A~VDa$I6Y3DkWR z(2A-$1{~p?K#g~zV2wnUVs8m&465(FAF?iok$|)B$}}3kEWNH==Brz|kTg=y?;q(I z@+eCC#rTbbrPT|bJG&t-W#AFT?#1?PSF**vx&jfeQq3$Wz3}6^#$5w1KZg5+92_?d z8c`5S-4}FUC=juXTs_Jp=te(gqB-q^?TO&Fq_2Q#gm7zJ$daP8Kj+aP3A@5P|4vmo zy0n8u`*6@OGrN6ir9b>PY5Da6T3(a6ZO4JUaU4>X{YC(DGY%yPBPC z^QPYppPv|3vw7d3^7AOyc19qsi*N&iQ3#rSx&ot;Hc(dZmbGW}RHII6uBl#?XX87? z;IJz~uIAD_I5$qHnFyWP4M1-|o>RdvVHdSr7v3yFzCiYgkCOtDd z#=PQ}J4XQEF}!^2WT4X?K~fki`eR1;D8>oD%2E3=ZuMxsYtuO#=~K7i>}ze&@9oz( z5R%rmHn%b^x3m`#26_OIJ_GiQHg+6V5BmwLbQ!A0c$2T%o*yyWsM+_*NGCuDM|L=p z_IN^&BSIie{L7WTO~k&-72aR~BNtl`!PD*rb!nr-hoXva&6!5U}pY9=4>(Hnms5v*A)+L`pgguyNZ>$88G){k|7UW!Y@ z9*lV0O^~==^El!Q`t9DTk(a%eho-u6h5286+iOl#borA@&J>QgOY~3}VJVTALecs* zUjkm#*3{IcgF)r9I|URSdY);i>=)d2^aZ&%x&ZP9BNJt*9k^;A5b5(WVJ2lppIBr- z6Is}}2~Eo$@a3y0o%?1yEN*A06A&b|(U+i@@GJHZA2);T4on-UdLiC`yrEb?DV2fr z){plv{9@)p*QP|L6(Q_QY??EEFEeC!Iy0hbg$^bjl+92JR+_ml=!8`J%6xNQ?BQaoJQJm;azp{-``7_i1%>S1fEXof{7)it z2Y}jBwmIIaMLTg@=-9UH@bV=@RE2!zU~OC{L5O94C;)PL4$|g>VA<|JkEtFhFH}A4dwBIN1ShS)qQoqB0P&1X2>J2I|R# z4?u|!=`~h!oaGuo7wSB;yy5Q0$PM!^KIT{b*CFy@Yr&u;s05+ixbR< zWhNlB&`dJta-iT5gGTykEgNy<;emEB?hSkRgBSBi63c9lKGyU|7pKDD)FB z6CwF)#`bn`6h7}8`IDE=`5ygf%=JdP%Thy%jpn5zyUTlay_M`l(Y)5FLf0wRAxdTn z*6eFY$+eC_)$8beTjmzd%O4ZwK28tdM1`oTwDX;y!6?Vf08K^aYz*x4)5xZAhu;W`DF%YfPn!oX^b#~U`#`twO#W5m7AAx6Y8^g;=3IEs zJPu6|WI5T>V_>x8|7;qoZ3E0e^|nt$HS$w?=S3&2C1Kgtr-R? z*24BWj%O5tMOzhw0l!K+4mRT9Tv!88k!Qkt78ySv3R~!NY{Iuk&Rn;bfPNInb83*J z0`U3I&&A?sw(FBM+cAJldF2WRk$ACbRA#nGC4i{u%IJ;9 zmlH@cNLU9)Eoa(rh)7XzKJ}A|48DL4@MGkhR6laXfT~lot&P5}(N`HpuJeF_!;uJa zv*8DI!8_!3?swlhM_Y;k@bFM#7(&QdJgRuY@3Y?tZGgN~zDPRR;1E;!o=+}-_`6q{ z)w(^o>Ob9P&q#qYA+W&tG793s#&*VO*-g^Dzqq*3ac3P}=7HqnYISk_Q>VEExnMw0 z{t>)TJLgl=CN4HS|CRa3?J zbdug&(Q~xuuE2gMI`W?>AHqvx6yQ=%b^)aVCusk6Wvt{nslesymz5(WydpK-f%MZV zj1blOXy6s|vVhRLL!<(zDzR%?KfNjT)|?c^skAEBKDXNrt(*R&KeqOyudu=M?4WGF zwC3G=c2x0V$2pV88eHel|DNo8PWRYw7-1co_c!p24l4O5=Dlz(## zedDGV&+wXO+{AbvpoIX5k8r>mr@l!Y7p2w%Dyp%1v}ot6nOm{*C=eob8ZXX}>RV(! zuPK}vR^|Vi&#Z~+w{;e}t98a&hezy)N7)a4)LD=(-=~CNbi&(EUltp*BZ0zoReWlE zF9VkvdL+c%wnSmy10r(rk3j6w&s%PR?Pbhnf3_50c*~rKw5M9oeJgrjQ7sU?z?yUvk$~@UAwSj)cvcU&O$n$ttuB%upV@k z7L>8}JZGK-X0sOGg_`0y4r1o{bpG~Dx0&2TcY8bNLY$o}^2Q_9c~uipIiJkI%;1dU zf&buZAkuGW@@_XIW+97_XSZ4GQMu)zOdFpHG56T|+%lx4WcM6*{^=nvIoGjE8-_8z z&hXDkZ=wF@;C4<*CFDC7FM;8zVgJK&FCkLUd{aLfws;%lKRL{FhkFM|bX1rg+BK&Y z7yf}SlIsHhB90}o!4b0APQO@!elJl`mNlgK!vjHYZNLdelyy#}Q=P+|W$8U;zse2w zJ(~PSCdI2yiu7=$kMI$Y(*bL?^C)b^jDTy&Wh_Z|#(}Tb;neV#5$VPv=T}cd_3i_e zpVGZOJ^fX%KO?!`<$f~AxiFZtn?#0O+Dw5X@y{IXJSvHEw3&|XK<&<#Q4REyQ zFYUQ>k5JVLeOp7Y~Z z#_vDbN23E%gMZvPRC$=87-IbA&o&qV(x@MBziRZUphNWzPyc-e z_p}95SG{Qsy-eG#EiJc*$LH!CatrH{bNuGhxNNf5PW4ON-#F^Del>gz56=5Z_G3`! zjhOZ)53McHmFz~yuoOAE=Oo{33)#ADSSDlQc#oB!58J#!yg#l$pt^lMl zw&wyrecR?f1ABHI>7$LFn6?M&M`EJs9tET!Sy-OI{>@YIt@4fh+XCp=OqSG_?98Er z$Wq7Mq#pBl9xf(!I|$E$NHxfsMk=6Itql`)reKL-oiy zG2s_w`MFNjeeRHS#l-OI@%!Jpd?BFmurb6Dz%zp|=UU;B7kYSFMb}jE9`sPWUCX`D z?*oM+HoiJ~sj858{0m{M&(37y1q1-%Sn>+s<-Y)WUbSUV?9{e@MIO`+7F6n;&YwFs zJ~eBu_<;~wS|6+K7jxv8TJ{U`J#wJ?CX-N|NKKx{O;&J3_OPFUAWGdth|P3!}rE2Q3wfU&ZuGebz9m=P@`a)T7-`AU#FU&acNp z+cxzrV$z?qyR`DwEF#gNX>?}~!Aw>*!GEBc=pUh5Jastz*%{GtBX0OFaOT5+6Cq!} z&XhMouoXuc`YeU)f4TNwfWf0*^B-a#x#|L%x*Q-_vzG|L2YRUkkxVU2)Ytk#&Cc=0 zvHZAiHzh66_i82&{}vn?g*_h zTiflFF)LA1w+_7SdME6hlkUvDLuW71tnnX*;KL^wC%~)d0|PGbFIOpO8z6dnez0%Ju1$h!COZE% z8H|DvhyuJowu8|_kp)fw*MkbU!^!X*cJA@!oeK#C54cW_rEZwlz zR5{D&4S=>jjk%mrM-mkW0^dmA6CqD4*{mK%00^EIX0o;fx>L}jZE~DmT($aO^6N>= zv3FnpCd6rmIWqh7=Y(G2fm9Nfm#Il;Q3tr-IQzD#k{CPcAR{V|lsd6#*kg2k}Jh*?-xVX3MPz#An5A0L}ie|45o6dIw z5}me2wEAo&VCk`vsT4jLl6dZG$UExWPySg=)}+E&W3?SC#b8Su`~KxR9LGG$l466M zBhe5^bG3j3hP;ZFia6O7Zu(7jD|(RhR<<*(ukVA?e3l$v#wqWRYl06!Dd|rpkwCkM zhBY@cv0|O{rB2iymOMuyXGYKcLV;_2qqXztZ!+FFry{&~-dxoBP;%{+sx)iIElB1D zI3{#(Btc;)9LF?ww`M@C3 z5915;?nQ`*{DYJGSOJ?*&>10i@-F)9rvsiLpW|2tpT_!5WW?QT_AEE|-;x5W+yxv< zkf%MY-q`7%nK)O|o0u=uY904_b?R~DV0F1P^F>rl(qT>7X?ZJ&Unc@}toiBFKy#o~ z2rOwAhPxG4`3RNB{}Nfh%I0E)l0{T0qOr!AT&t^XomZ;gjr?>OQ~2joAaXs&_^$~N zW#CN%QSL*~97U%@fh6bl76M83X#7u-1AZ$sAB(cdQNY!Un)}wi%WA{!{wTRHA2)#Q z1vV}mA`1S|pC|wnA>dB@a3|2zz%`8RfE9=%S7&D3m+ytWpBK>@zfdz$Z?0!^No62h zC`q?|m#?dH@Wn%_nL*t55fiy606BW=`bs zl#xp}FI~U>^rKYul~XMN5dk|w#2vVvh(u-;|xHIK10DHF!)$pkSoG-bwO*84Hj zaGiI21xy*Z9kYPc~YV( zqGgs`n$&N<`BNz4=RErTA=$I8DPi%1p6CbuC!bDv0Zg=bj);gw2mrhyULMgtci&9} zpJ1j=uJEoK37J-(mJ;6`wUC#W>>H*&lkB6OZ2T8&bj=*;L+4s( zo9Q8NOOlx**p+dIh)bBNZ;@HH(p|PJl~X!#`}S}j=j&xFJ{ z>Jd>FV}%aC8!;NtzS!787%ee7EnQ-8%&1^ms6|wl|I@-H31kB3OSGpzNfto65&by^ z7+?BL%hzyodzR+ z$Vk5FFTV0Iz|;4RSgZtzDEsusJ#ob>$<++)KlTh^@J7O%Zisf?ep<2z*N{n_hbTvk z@$!?m+(MevzmB0M!esIbznfG_XScRKkvR3RwaIEn-Jpd2lh`}Q`G0}$_kTjvgAue4 zfV662yAZU?xch+h1%&vB+rya*C2Fu&5>U>%;fo*r5Ab}Kn2zMWyP1o%AmdlHJ3-+U z2);YP&I$oC$kxOpGjNO~0)5`))lKSA2NUF%&YODP5ckqMxG$iSt;zO9hA-fU@l%R=zv`-t;UC+?{_ybIy17yBUh+O`&wPSq!A-Rf7iZn| z^$aJ05fsL;V~i29hs(%ADavLeAuho^#Qcs(Cwp4S;^-azfX~&vSN`DVkf7#ady3S5 zIc=wi^Ppio*GM5@tUhT>*RktfSI4?bTl($=tL%$M=F=1rdb?hvJruauX~fOKLBtK2 zk-q?7k)AlYu#^5Mu$P-aIe(m0!9=n1pj+EX9>zmjPCkLlVjqm|3GROBe$|#Q^|d?7 zPFU|XkFDY%!=6K7toCd4@f6w+d;kGhj2X7zbd896;fU&!v%)9F`o1Q7<5JH{ICg=5WBd4i1#Vh5AY)?3vHbq!DmTuKK(M4G zUFd*ZJ&v zI(~GWAI#Q7l5lNiVDHu>$@sJ)F^CV$*9|xm;5IvR-LAP|K=JLP(|ft%q>^fO9K58g z4Rgcs^xF`z9irXU2ahX(9c)0qVwuvE7#R{cY4S*F#~44rmV=Tzb|rCz3*!I zshW>FnTLUP1fGQk@)|Q!vi@f_-Lp4r>S*xkVu0X$JaUEo89A99q3GL({{aHBd7vWe z*=fL{|AP?mfy0?v5&l%iInql00#g?lzC-M}x+`l}UT1Ys%Bx-p@gXQIjAo+Zdy z4?OcaK(J&%pRvfU^Z0pvpuQMTNAS`IUJ|vGf|m1}J^I;rJxM;Nz?={tB65gvthu@3Z*kWl)8n6RG^SNO zvO(QiQ@E6?RYpT|gp1i(W_L-b*f|;G?RB{ccEMTt!azbic~bHEQlhngLTh%!qDKB@ z%0*iF^RVZ9Gg6Lqg&v;Tw$JTyUroBoihGpM9>!BZDnl2TJSMLA zd1<*Da&6m5L&Rcb;bV4iByG;RcGyTsenwZvXw8DRp}ry3AU9wEqpIFziE&RaU zmi^q!j|O!$xupCKqqTljVV-G8*qZo|sr-H8ny+1dR@T!LSe7IF9n`j*nt>+%Rzg2g zP1y%weQ>co`w0hVK%z0r9sIAJmeUL{$3%{yNe}ZvOLJYob&ODYSwQC0z`}P~AcBuA#ttkoJm7!1Ui;@;#7vdGtz=}1 z4x!PJOK$wTn%@Te^qdHPnyGJo@N|?uHJqt~v0x{IB3gcIA0&u32AmUZ0ss_9@2OM^ zp8Y7p!1U3&g}rNfLG-KiiU+O+n#t}zARFf`3}5s9SCXwOPUKPA)(!x=12{-58Ve$ z&>Zbs;}5&N$#pc&W7Fv=p1hxdoB{5e7xqtz-6)+&7a&$l~OPqM}IHC$kvuX9eL{DT%PLpw6 z(c$E>M6B3sM6I3ae1uEK*o{uRniP&z&W?xic6i<9(`xVcY{-rODBa6_+)0k-S?M#; zjj!xn;q;q`^L(4+U=eW@7!EbU$74>LZ5h@72`i zGoKysYn%a%56%ODe3ID%#Ljye+KfXn$RXf5i-BYNl=BsoWZ^)?*vmF;*z-9k3!vHP z!$&$PxgDUp<3QqlBM~&&KK*AsnmUaa^SiJzCLf*gKBRHW=UJAWaPcll>f=NDKl^Pb zcj4Cv9D#oDL_mo{qrLQPbrs-c1PiHGgw<6X9oZVtUfuw2*}X zo|rztcnt!W1gy41bpc24ENy3F9CQo4yRIqYpIu*ACrg{AR!u>xcGWkjJDzAt%B4pt z*nonVy*x`OWmumhvH<(iul(LjI0Z#v%2uC79p?%DLXJ#_X6ksi&8#$;M8%CP zoVNG}zYIJ$e>N~KzL^svi~wZKI5n@N5m@-$NrAj~m*1$hxmP@y8c*=g#tpnj9B}!^ zns4-Kvy?g)Bu}8-bzw=3P*7{(^z~Cf78S5WzN;Xo{>R;2U$(7ij{dj&Sb5yp-4;$M z-zfe)NUfUJ)ech6I3Q$nK$6l>U=mu4%p6cBpv9v)GZ}5HjS~=3KufK}$k^J8Z{#&_ zAt~;dJ(VqGBrer3jceLWkj0M;1*fPLRW;6%^kt-tZo7*K_Zm|%==sX?Q=Bkc`zdHk&z!=c*w4(IN?^_NU+e9Y@spJ; zjhuTq=@bQ~P~jox_|DFtg=s&=pk@;K6xxtm?T+P#-l4fhBujoB9PL4NEAplYnvp~A zn$@9&C%4#{Ox1gm15QlMPW@Wtb_)ZA^lQ#m?RuUQduqA@)gFreFd?=X170eWuq!-G zW>k8={n3;XnUlK}vK;Df>Z?$kqw=9sb&q3)_Dj&#@^V_byo!(X8y z!ik5G*Q(!*dS!s&OtW8nTvGOX>gFQDjOEr)wxH{cp9zSgdzFRQAV+$8Jqbs%7X@Z7 z9-~0}Vy;4ATGin$y(f-_Q&U7FV_?n0M)F)^#DyS~OV~H;=OngOpktD+H;TYZvSkhpnBCgZkN;Y{AmL z*Z2QMgMLh505ZkigMGVbt4Il~3OwH}BW6Qjnyb}TWjHBGSY^zV^x|_q<4)}7L2`uO z(_OkN!Q(E|4LfEvqf#>ke{c*U>Uazd)q(-FW$vBM=J+&&j1v|2+njHEr(+#Kw-ure zPo*@B<|LijQe3x-Rzl&mchn{eg^<#7YcfRfMK_~1% z#d0L9&pg0V8s>0uhOo-@G{OVS(LmidM$F@3Dt~-@Zg|)zCHtub4{lG1SR@o8r-V2n z+mZA|3QWVzP2(Fw6?<8K$*aOY@?rM}8I#gXdpU!KiXO%Y$K)l}Y{Qq+14X1Rk} zHe~`mx7Tevox_FE9x*iitv#i;q46Fy$4<{%LEeDXl-j+YwL+J-MY@|+A% zW(94-?`!Nw1fQ!sl<wa1dlwT#uZbL@ts%)(#)pua;`sa~G8UO~n$$_x!s$OE8XOw znE;j%JawIU6(W8`?uPu(4;pi}(4Fq0GB-^%a$kLWRTJwTV6n5Fd1ib{CB-#dE^eoV z>(#!>hsftR*j)K?eI~39byoUXhc?wg$xT-(j82*P95pI*@#j;NJ}Rf0k^K``|1y^R zX>f8svR~ja2$PPnF$;<52zNU>4S!%g4!tbq4K|B)PiVIBifZ}L{>`x3d#Xe%HFE-= z4hr{?la7oGc3d5B#L+$dLIY9lScNYkh1&K-HeGH{G*8Fxt$%MrIpy?D5ND`!UC!`; zU7B-@1+ciW2+k-7v9}u|G)1X@X_@UPF_uXM+-OVVOZAR??zWQiW#!HlKSogRokwnO zih}>i+jaX{`<`V~H-D#+5Cf>aSR07ggOF;I1#)xFOyHS6B#1$WRzN;85pib|ovTKu zq5YuY&zLvmrnlU2oL62J6z1mf;WTs=#Ir!W4-*)Y;4Z$L5eBLrtS<>KInsCIydynD z-Zn$mgKns{+UpK}Fk(gu?PL;K+kc8gj|0o90IoTY1vt+oJePZf3F9Sg)%MkLOuBxT zse9$y+OpW-ei5P--(k-FQX3V`#mRR&xGX~H>#@L&%%Tz?{+}F+h~^F)_dZ(uQ;|EN z`$G!(ChV*E$?Dnj?V*&!aeO~0P=pvi;BB;whh5R86;VOLQCmTBSo401 zh!^cLjO)CoW{RyfM19MwaqHJ{c+FybwozM25x47iEoWcPL&kH#*$3U|mU zT|~KpE{ZJCRr}J$rAIwKEOp)KI2Y{8LEyom0hyKrkbX*xggTsFK^;S!Ilyo3OGQqa zK)#$WgLC?8%Bbu#dh@YpI;tuZ<_DhTcWf7mMxTQtLE=o^;t2SACPxAX+qMxItD;n> zVSTQ)k2AEvdJ4LLr%XKIip^g!f&fmpp9tr;oS>VqGulAhIdc9vYS{z@YBU5PQY=)x zPTY31dY*&LxvYPlXnmvs30C&1}9v!Y+)vvai1tiNAuN6vwu>We$9Pfwde5j4`wdEBbg^K+@KG@ktIpjr_$*oHgLeaoIIWB{TjLzgAMv0gM4MS#_F*VF@UCD-VqN7)uLU<2XoSG0OIQ4XtdB-xeZf3 z_}A{2XYA2;uX_ulrS4j%@AXp&`OE#?^Gn-Z@B&?gg}umHQFa_S1m_)GR#`|eJews^ zYXjGM(^15!%`V+t%S~yf*>7|GD_m_nB`x=6sUmKUEWR^XIR3XxHcfga(X!N1H9B8hxyRaXT7xe2;5G=;c+=E&w z+pZVvTfhlI)O}=owhI2Y4!&nS^?zWZ^CsKPsNk+n_EHgX-48*l@wch%R^v+0YW!*H zw#OxRa+Kz~z4+Pgj_<#F&vI?gLs><|HYHL4#In-?2BN=HhpzcR*9=Ab$-2qdYAcKs zwD*sWCa*RW`D;JPax>2jvy#Gt9-x_&Cy1~)L}wH^Gl1CZuj(onkD{`|^Evk{BaojLwG zeElY-@e2>eXj-@HlMQ(1LKn|fNP8J?fGXJyj>}FML`x_RkjvpILZv;d0boTOV_P*| z{E@rNJajYHMOk(18sOUQYWhZ5cW#Pzzms~eBZUv;FpKt0Pg`e)uQXOq)j?J_R_t3yAV1g$!}DHlA-FJq8!HUdBw<=2YfTDQ!bgW{spa;aSjlv zoWM&U0ePtBPqz@pe%WeKmZzb+o3C2zuWznx80&Q&QFgek2{j8#THV!Zsr*%}6P5CE z5%ouE9Ha2ue^#Ic#?Yg4&$U!PK5sD1jmYKA`iIeDP#;#_LBGF@8sNVVUg)zvtVVcX zu^TQX%T$8QFqBk6SL!YuZEG&SB@^fJ#jDQsLUfA1zjj22wYtS^$8MwBPP>OMiE(d@ zFkOLYxDu|RCN@YiWpscMXm$ZRb{Bm=q68`b!O8P!-W*N1+_)gW*(O)W=qL14P zO_wrV6y5A`dH_pWmth<5*rHDZkm6?W|qpoyR>- zAT@8Frc8KmSUOWGVq^Z#6J;^jd!B7#)zU(1X%lO1td6$7bopaLy?khMq^y8hvv^djJnA*W61(^a`z923-Tq1y1}Rp3&lnmfb2Xw zCVsEh&_G`;qpR-GQIPli=h0nMI@r_godMP3z&!5MrGLZi!Cdz(kKpeON79*=M4p$e53uJ~^dgCcwY)s0MJ?38)&%_-h%TaQuqhWTNzt!7|`sXj#1C3xYdH8}zz z^N@uu)a_Phs$P;6yRR{S?EB=(Pvk^v)u~hFf-z@KLoe_wg*hg$f1M&=R5KW)V?L&9 z@sVOL4bpWTSMlXn@r*Suo!1%1%w#%*!ZuS{d>Q>f1KtGc3FJB&_16kRnP}FvHo7-1 zss8Bi)pXmlU+_B5Kd83x*2qw|X*gGqU_I4q%;tZgb4#4d$svY+J6o{$ z9Gss2l}4-U7Sc3Xmcr0HDxPOPH=#wJ#y8)A@^Nn*-JSc)ir3)KPxf-eldksY?+>*P z$n6zRVa74$*!LlCkb}r20#YyR=&Vd!m#KrUdSN?zlu>x+j2ZL46y7yog!cXV(OhlR zr*m^OuU@Jc=YanQfQbGzy3v(-Zh4WU7oiMI9;O``dVG8jf@Z4^??+L)nYVx(dxfI! zBdYnSjkH=T*qapkwO$)aaziKXts0xJsY|8OuXNCfa!E5+tzW0cqxWT;vBbNw69L&+ z=1x%F)DYff4{Ii}q!l{aL|{MVtf1)PDf?IF23~rsxP^X;%!?SDzDrx9D|bd&_O9>pKAwXBk{j-m~8h5ed+qg{inw=}wIxzC9k zLzM5kq?M}$s_++Z{|i9TEqnM7zMCNZhcLlIK_;{~sdsUtK4Wfr4!ecT8G2GC8jqmn}7b!}R;nV8_u*AvVg#!X7KP zxu1#Nggo0b;Hd@v9=k^ z>OT5R)OUOQiKtUU5eimciqT9KQbS1@efhpyN=e&^2bn3&zCZq9t^urP&O8Rb@=3_I z(w^bXE(^&W#Tb{{iu4sk+O>{Wj=uloINjG$lKjY2TI+0;R+pG)JP?;e;4DDYPC5`v zFntQ6N|WtEimY&Szp%JpOnTgOZYr&7dMYa){WR#CT26)L)0Y}26!stFyXW!?=)M0- zx5IP%_7PD)F#&){Gz6YCQ-7+Lq@T>XTMj4M*R)h8j9(>Gy{i_VEz4Q*l8_B%``_=6!9%^guH{_kI_G)Ld7d^fmPjm? z2%vtSEwUydx&c#&Rl!!OX5%h0VI8s6UPD#Cds=LkU%6KlgaNSHT8I9_!moD|;*l^zWJ3Nfj1{A>Bdu5PCGFwCRh#CtvGT9arU9aq`0icI&hCH%Cyt*~cAhuk zO148HLL4J7y?q!dS|5KzZ!=pFe4~3)go|Z0@7N(^6BF!((>|Lmh-0N~>&>L6$M^?~r_wRyS505}qcFkM-Otb{W5(Bhxv8yDk04 zn3dWoYM7+P*L1;2&^6XcJv@smJp&0NxAI9`x3U?D}IPuoYIfbVR zMR(0v7st~yX0B%%`zTv}66u@C`q;Gg#RL*1Asqcpkd=i2=ao zGVhpOP&k`6rxo`e%n{UQkl>^o#6&n%s{>2YvgPHOt8!ue1w&ParlL53fWJ!Gsx(@I zjXwQdevnOB_mZHbHcf`e@QyiWa3$D#K}I#2?NGoKW6QiEvAsWDQLJ~jGqHOI8a?fv zZv*EdGxVD0SmrgC^eb;pz&ot|ja+gG#sYIpmu71NjyKS@;1_uZrXOhXb3n8#p(c{Bc*T9NA>kmve(E!)TBa>Ln+!6(W~k25kq!}}-^#lW z4?IHT16U9Tbh^nXv{QtG_=%7;lVNmIs#9sA3Hx*18B?ssuP2ZTp%wB59sPFOOgoaE zrI)|>;qdIi<3C`XcYvcB8HXjBw8=tK=G%EuK$+xh5#aSDsqSUU47X*yna&q?vG)yr zLq7kOh~8EI?8Y6**%f(Nmo>S)sUJ4`W}LYSkAae(2aZ5;P^%N8w5#nf<6!>%!_OCX z_n@(;Ao=;8$E7jfE^Uuq_j#Uj5%~tmI0x*zRX`XEp2IUQVlJ_dmIg-bXHw&>)tGv5 z{q4G$G-=mbqU zjf1_ZKhdnH!hkPkO(Bv|Pu99u=qEquuWi>kkLSK#$Y-EdyX~d0M1^W>Q25A@FMAbe zAVyX@wY0}9Pid`3>E0=mm#5AkP|BGfcHfYcN?Ep_0cxYyj~$;wkL)d?_}$ zD^ES;B7R8f#IRl^vb{$o*}TtBHhV>8yjW%boXj z|2c{A$*uVi%uauXIKX2uz6O+8Dic>0j^(A_ z)HMI=HE2|nWK)x z*Nd5>gQrEy9}RTxN@OO!Bpi}A3!$~W#&zJ>8aVE5B+DGU*a~CAwCdCwZ|N{UG=hi8 z2GlHs+R$u!v|o5f9zFA_L-$6NHTuIg*B9RS2-0_c3+W60Z$Co+4F5Y1<^I?4eE;!j z%$LE~vTsprTRah6n^10{{liqW%I~4etH#1YS5u$Yi)>GEIH7`k^5}>@E&lT^q4hTQv7_tsgv+=01k5z1!49l zY8bfaW$NIsUj1opTU+LMy9+J^IZIiu!P2$!lWfUBjWh zx9;rglnyklt=U<)bV9Mt*??-V7-rg?gJjJ={*5~B_vvT?UDlwu!+li2vp=m#LD|{v ze_r?hn$v&#?kx3cYXfQT@2!Hq45PoMY%dOOh*sFTmzCs+CDpWk>QB%d`E&jRkp_mU z9XHgb!9CL7dNG)S_C^k(jauHfS>_uJIZW9SqQ@n!(*NFl=xFD?AwCBS>Hqk6*vCYG zN0|3&L#bqurOlu>WdVQWF4t!{*cmFFADr8F*ry#+y5}m>6VE5@VDWDsnJv229l&gf z)O7*fRWS+_xacItIv*^oK+DiU?>t?dz=Ye)Pi|TM{LIRAY?RowyZeniZZdxVcBVfh zyrm0I@Ia~sC8Z5ED%75IcC|~@h^Qh`^3WvdOYP}BLPDs512-*?9535`EnYi%*nB_6;Crh>Fy&!zhnsI6 z{@#U?rG25&p^ggTQarV*;*dB|?clPHzly$g{i|(Tz0dwXe-4P|=AyI%^z_tS3gT>Ans%0#>)U8O;(I0X zvs#ZxmVi_-XX5We@Fmi{7L_+n*>#+<>NCD=KZA91gKl*_)UE^XPIkQ(shQ&^9IO8| zj1RWp5fjx0dbqOV1U3+`-&3X@$z##~dhW9R)OPvt$RFbTuf)IoKN|6uk>9;i?Z?h- zZp5zc8C*s?V@BolbsQ`UTTFMCo8w>p-<_!oLU#XKMe2)sFc(LCsrfLxBlKAS%ebNq zm5?pFztCaoWvmn2?Hi*s$xLOYSC4b&!zMnj)F?s*?5>Q;n z_wOHS2s|FX>laq$G{w^$2uXFr?F*+%dqO2WkEXAnpS-=(QZEF=srlcZ{I9>(JN!FI zt)1ktIrlz9EGgr)f@Wi5Nc{d(fE9UN;-6&f-+nfJu2p+RPl&yG7MfyMR`^$rtsdsR z?z-EU@Zk5BK2g3!YIge%%@2x45Ypxf?7_=T-$9dPU4zSQ?*t~@T|GO^YIc~O|7EFh zGwg?WSoPxn!hT$>)lS$_rY){5jMXPV0xu02auX`tZOck>#7YC!CFI|EwOxF6DD0Iu z>3`ZyD#nRoKMzn{y&h}|1R&ae@6cO z%F|Z!U}Gdg{bu!o z!k$w<_ODpWZzJ@WvP5|KkRg@`Z0-g`N~;wo02AEEn2dned%E?Ve)6z*ghV?nUq4W# zp6V4T>g%*#wDYU&-kZyyFAeXUnb8bPB0se(S1=?(3Da zndS-(vD_WlTs`_0VYc8}!~}t1`QH8`p$b6%2z;NacIAk;)yL z^phDS(BKdS?_TCM9iHHN)ujXBk*|eY_YKi}T>BS@TYymu9AbY4dLF}m%)oUaM9qRlCkYg`SM{293c%z;=-!fY%!rrCMfJ*xzoGXM8ItPAL8= zc*p1NPt3sMo&yxp&rj#atoR3CF?1x4O@_2+0MuuV0 zz*TD#?59&q{AP7T;_uU7A()wrJ7;Klqei@diLA zP=#5hJ?jPgCRrJz*sYR;+=3G!?FPAxC6+_Eg>Q)d%36?C-YtzD%k!g$Uwl`7^5s#- zcFsV&OKF_nrv{GXP|K1 zM)r#C4I>grfrqg~GIrbY>pY8S99QN=`|i?% zQ_Y|c9n8T3o3F){YH%4Q&eEVA7Ae|F)_&Z2)mQ-8xW2m5TXOc z{PVmg<|^2Q6#$W*`tq;FmAdxas&vjlz&t`7h4N-`ZxIw$4`&HBZcj-c`^v~<`m!~R zwNqJ$Kt5<^x9=B$)o~KNLLeZt#{}VNq#ACBxWJc5Op@|KW2*1&2u2 zY7NUIz`hXra9a_|yIZ=+1!$N9PN3Q09hTtegayMm<&E9zMcl5W05RUmi%L>PSHUsg z-j}&*UC(emYL@aK4l6f`2?eUnM(?Tl?kN;jFygo;nQJrnigP~i;IHVK!RmvwKbgn- zCvA+{=J<44ipK7f!uhZd>qhS;?MmBSUZ|eOw~dh0$2M|@c3N?e+<+$bmVeFRHTn+Y z?Ikfs>!JSskmEPcZf1?|``c~W&BOlSIf8!%buByYX7v{vrxOHzuuF~vDviK!2Yf`| zioxS;?11VeIIK;WeVK+fb|<`>>Mr_Pzq>MPqapCAVOv(8m{{cv+M#Vb{%W=n7TV}8 z!8oyzRZCXwZYFVtE$=PP52D0==%0g?bY3GDhYmvz2=%L61uBEX3go47{W%s~QTAxL zCp(`y7k8g_bkB%RK|@_jifweYhV^W8`@~V}&q*&nwLB8KZaVtA2NF}{HGxd*QN&y4 z_y|?*!_ie!r_-yhkVw9#H{&Cd$W z46&L0nr5GGYULanN1AM$Yo8k<+-pX%P2jAry!U|c2FKoS5>KEcVjGUH6yp|}v8;^h zsL-<34eZq+!@KKd6}E3I-9F9@m}IJ(9UR&(cPEmcM|D8Xw{w&h-4eii=MfS*SMKlH z_77jji^Q7ouC(yGxL>NTwh{X5sW2{%zXGU?%m45VS)K(aH_Fl|$!!(d>BVty_4KsW zL3m}f*57v4E`KYeazx;xnYbTM*9LEGqbcm>i_xjhtp}VkATSE(|L(1CvA#x1hT)){k6#C5#?4>_+1_r) zO45TAZC%VPiaLmws_7p^M4rwB(LM{j3R3rbga~W3o6!>vJJ1mrp+9`%SHiWM8t-4~ z4kA{MIyl#PhM!_;L@ws3ypM#2*{)SZuHFbsM+p63TdYl1@+p3)@rg~ruyY*(wk`Q^j`OYI*Zo(DobzJT&x zjit@(UcRpvSIoH0{-$(`dIe|b0C`JYQ zw+e{jO8Ai`AknXY07#RaeVdsZTL2OZKP*J`{i)kq>A#E(pyz8r=EpGPq1_b+LFXK2?-Grk5q zasqi}`qSHXSTeq0tN-q0(Q{i3c#%V)(XHVWq;j`mo5)C|QFJ{zEmtXaMF9PC+VzNe z7vW1jBn)g65S_*YijsUGBl!4ca9jfchVVzkkU)3p&%1&RLrHV5;ybMbn*eZbPo=!xK^=SL*iU>^mt##Ney9MSfMch(w@D4UyE5%~^|}6VvExG(yKH~^@7c*s9>7v6 z+9VnFTnTpzFhLx~yB3*ClvSLX%MZL+yCiqY)E6H+GwT}s_nD%URCldMDE1r=sQ4^o zLpN8MS_ZpC+L>?gOI{wC(!{Sng%AT>S<0_CtetQ z7uq}%7Tz(f#0<)5?v6GRMW63wyV5mm3K5p4EP|Z>q#Zr`RP)Pcp@MNmVLX5GB!X1i}^&5d37)+KeFTf<^}5A7^upz{RA#S{f-?3fiu#A4ci zB-hB+lm+BVaP4V^jFAM5#K@x=zpIWZX^E)a5?YxJCX1CwtUq;Z;)5?1mvimd=vPZ` zs1NUS9P`aA)iidq50$=C|KOVORbWuaaK!>ZBs?*1FWZeY+7V2WorN@zXATDWq*!q* zUo-@@UtO_;p_tlt}LRn4GtdpJA#amxypFKw3sR(6pka^Kfm#Oshh18^{LLr zEAq?uU~ua0^n2v&XYf{jC~3H30WR65ytFx#r}#aDXT0GM6Q_$_d7b9dT^ZOJGm9kV zZ&|o6PiM{DX9 zHh8EH@|~wMms4g!xe7>H%2SZ?rO8}fPG@$lJF_ziELc6-w!Gtad%}BSEh*1DPMf~3 ze3CL{5bQa&>k`F_5STZ zJi5sBzDssYJ<1P>V}T}r>`noVWQDpPF{YG$%lAOhowGN;soVXzxlHcM>ck~%b=FpK z0zQfED8jcd5+s<2aCb1CkIH2dQ%61yTGnS=wL7&q=*b(CPUo(_JI;qK zPKk#0go$38ttB<=MHh=40&L5XEsS=)CK!u`jb{&=9KHKs0^WKjJ zWpiyuz+HF(+8fpf>L+BQXyi_8>^iTa!(592iw!|L2J2p1*9#7*Iv9|9ZO5griQR#Z zgii3if-PjR)`1+zV|;kk2udwnm_R9MlVi3e6tP38oqbQWn)_D@UgClTqarragvW(4 z%{tK)>BOZ@?70vKlh1HF?tQrdy!tJ|cfvIMjrS8W#>_MR$H9@2*-sy4gx*KQg4IPsiHT4Ec3u1sor8I5(~g4rX`CNzbx9kDQCK`Q6JP2}IC^ zT}VTGTtdbo>z%tMrqpQSEkdN#I=Vj5NO(C&Ltr^)1(i5;j5#NL$#dMvPyWt4=oCUu zz=GO0_C6Sh+igX0?V$b~fOaRgNrqKisPDhw^UTApgvcvu8Vs(Z%tlxL$==%(ZJ#M} zGt}_ zQbF;=?q%z<#$=kBvh%NQ1 z0TMq8mb?xc$WRGh72{Bt*`ML z@kFGIYsLPf4wtA#Ck|GXq_UDUwYwD7xSzF>?_V$x^sK2_noZ95fF(*}L7`y%24H)D zb_U3CB~C*-HLC0?!~8ZJk+AEy5H)ac{T4$Dl+K51$MI!U>NV_BKX)kUz#$2m#UA+2 z0FV6m*87y;ZSLJ6GY|dRE#oiNo3u41Pq`$2_;d*l)OI@rJIqd@;iBR7-OCG1A>#TA zT%E4sykmnvL#7{|kbGvc+*$2_zWx!M1A8x6#!9H4z;Mvp;SP->8JF^tNwb8GeANlf zvb&$qvMcCg+QpKmrPt0cmVHtZL+!FrJ~CF-%?BsT6i*fNDnsk_i@tv;NVERr>w8>S zEz&&W8ljgA3ID@~ix>vB2*@jC>j5Ak(k2Yla1MZ;XP~*r%EbB1Nz;;igDc|?Ywd^7 z^vT9ZYK`*61NpSOIvk0Z%`+GHFNNsZ+x1ua3>$^VR_VS{D$4Ye=!zPu`LL}Kn+TU_ zm13w-_2M8C%-LoRECvHFnnt-5e{h`BHWhZe|6>1-XR$Yhv|i!XU$C_q{?JY#aFA|5 z19^wpIA(Bn7F-JMkn(pUDhhvWw`ZgJOa?GWCW=P5=K+T5R{^P#(6^G zu@b!8quHl3ZuYegV;pK5^bh>$ymo(YYu)6D#;0fa_V#F(&kO8%Hx)?Y z_wzf#CqDmuXD4GL9&?zsq&0-mDkCl4V}ANxSDy|Dva~K~i15cKs90drLDDRW0@#(* zAW>6>6u6rruSa}?_NYp;O&FakA#kZhXJ>s2U)_arm8je_E zup<4ml1_tUYp~g?)YOfLb}+-A;Eu2aejJh?1W>(zxI^F<$9{9#X<`PazMK6;d+2=j zv_JL@dCL8y3jS|>L2s1Eqi&;iwO?7@N6tJuB(#xnpJ_|Y%%|~F0m>&4a5J!oF6-#o z(V*~attW`5-GZZNe+1^6?IKoK%7LB2T(dS;^O#U5b+=QaE1q*sG?@L9X0N>VDri@W zxA$m|iuNNgto>jV7Qg^yYcz1B2lUwcVz|<_Zn-Xu-j$*@)&47sRxMEP(CR7UzgfGW+rReQ)K21M5h#(s zs=~5$un8ChQ2$4GZ4n2E)Wfud7CWrfyS9dO$ChL6`u0Mp*VEc}WYm_B_wCiB6Abns z!nX62%0Q}t_H(9HGFO5U9@5OLuJtbL;A&4#S_PH<^eDr9A+rx+>AB1nmOPmJZFUQ7 ze!0K2w+wS?sI=~T9c*9!svC*+r$(dudHnbx_adAs<>FV7MwvhMYbM~@17<{&XT)Dv z;<+}#0CEGUF1a|P7q@I?Gt;jz{Qk)^{9JYYPr9OQcu2(Z#1E3|H3@D_hUwbbGNDb4 zpUo>VG`r(Q!Q#^=u_&#n9Fj$*K?-o1QOpiHRxn)*cA*WWUEw})d{rX*jNa@ygKyn-i92z>Q9vSZdwK%*Q0e!=@{j9( zhZUYwdPYYq<$~O#Vmi{bpNlB9saP7lFh)9Mpg!2#sVM$;?bV%qjP8V1tr^cls4HTC z%?_o1Hg37ZhW2UEWUpEZxUmd@RK##4Mp*zBk6eLzLB>}c=Q7u02&2F-q0$=}eNlIF zue;4ww-KZBu3rtcz7VW~y9?b&IZ)?y%CZ0huv00HP{bgDfy$@fVRW-z@X{kRk*H}! z=9yY~w=*26K3)#v{kPn*b|u4$M1AXG_wDSr<^2`AVZY6)FLs)nG7yhnCC_(}dXrO> zOC|8XJMJxZ+}rUbYN9v-_}D}r0;Pflil0G6k3qyJR($4Zunc=dTMR*b>rt8uoA~W= zZb?1<@biWS8^7);Ka0~F6>J`u%U$qnD;aOamEHIt1%9?70GlQ;rzAx#*+0*7Z&AHs z<{L1kR`k(zjbv?G-_*0joqKmh&AQBtxoqDApj%X_k*Jt6NCjltI}Xez&ym7$VrI8H zi2XxLvSA0T1RiPwEfEF62eUgs83~!G2j$}xq5TL7@W~lx5fdqpa0=tVIyENB{I6;D z66-d*_71v~e5zl`M^ir_M(mqz-buZ~>%q z8%Nf>TbtvBFwC`>tM=!IM^74>94hXttxWw?knw!>Y~B+IfW+!W!x?_$P9$gr$H4^f zS|IbqdjwgGGkoH={MC1gjkPT=R+2k&P4eMr(f`1*0~@k1(4u{)s$2+BNSqJmYFw$rnRRPZHHz4E>YIjqal_7ZZCNEGv_o{yuS?r zk;sbpB_y0Z{ZYkl<;$1;Ak{Cn9g;es&T>HmljmQhn9QcYGIl(gK7k%a1z`7GivCZqUdXY&OS#tURHkGv z4)1xkQxrM2aM5~Xgz5U0lO=aYym+3%`S6Bd#i_I?-rn-S~&^Z3HlD zie-)dGr|y#W(&~hz!#0ewy9%GnD0SP7I3}Cg=1;( zL`8i6$gd=!i;edu&NjXHlaC$}m6}@=c==i3%Y^EAzYj5gz!paS9oEMp0%`q$jE|7z z#p07rp92;zR-*K2ovV(I#8+HZ!1wVZHcGLGQ0)jd`cTT>`Wc`2{^4`dbq3{nXdzE~ z(WSh-gF5)5hu;l^Hx|CE9h9gQQ;YcCpvOG0x%bW;^#=%& zVP{LoBr_{*Hb~Qxdw;&?ntAYkcxEd|O|}QeM}$`gY6O6uaU2JIvw+u!zGi22qOlDP zvoLYQlUY|P^mRrw2dFiB-x5w$C|lgb>D+jYn0^Yzb?VDJ#)<||3AQ=Ov`=<1Gv|{2 z+cV)!X*>MP_(Y$Huw)<%fe{J}XdV_=L(=)IAU5u`P$PEd&+kh%0dsW@Us=ZrUhLH# zy!WB#@rP$Fb7SK*F?`FjH?4cd<9u=D*Z?eO=1H^v(Dx)1E>G|xCn?6nn(t#@ zQ^TT4rhpIO`#FqpkX9dRylzUod5e>A)hTzUr}=L_Y=Mjn7r2kaDlkb`(3=Q&l6_HJ zXO1c~+MK%xN{k>CQb~b#xrf#4RG`~|X7|smNM z`_!}Y_6*kv2T86j)Aij+ZW@!@H$)8c0`dh64jlNg_a<434Tp9mvaw}cgXEKRb8==any^1}ef|h)5lp|s1E-rCF zWQsCINY1YtT@D8)$dCOJXohG2v;4u>Q&kj<=;qoka|UA*C{=jlC5~Nb_WEh%w?*a5 zgOh2wqqlGUlH)2r2G5!Cs?7}}ND-=F2loLnI39BJF7IN8SjiYWbLo6W)oM_PinFAJ z7b^qNh)

    %$dzn*yZ%;OvqK(iXc1=Azp1}jZq55E#N8=Q{Gk$S^n^bvIWWA11)Zl zz%m%)Cr&Qx$*m=0Y@PIX%!Q}?Axhmv=mE(;&NpMNwK&!%^|t*GBSv7g5E>CiSmI$I zd0q&vaxf!`nnkav3RG-J;fmmTsdi`G|0J#-8qle}nrKE?n-p+(=bgR}edBLHD>fq7 zsEeYNw!_2%D_*fvIv!lf2hpc{({pRU8ad-9^b7Z+1rjgj;SoXk+d3H&EGb^J+Grhz z92=)GA8Ul(ru3wRV|p%=l^GYx$NOlfmE%v`^(DSZ*u&&A1Pt6LHFNkPV%JPFyPdh@ zWPZFgs<%1+`~WT8?&-?OOn$%TcE>7iwYKVN8sH5c@$;s-)$dro`$$ZEbiis))SwI( z96ur%(U3(?;K5N^;HxP(zYzgC=cr*1jc$L@zxp)c>np6L^x*SHJD$@zzIPZ9qo zd%Hynw__^^7kLmIOyb_(t4|x=V(gmP;Bbo?{a*Moxlm}&fw8<@|JK{I>&~8gh~p*R zSk2=@jEuI+NqMqS=bM53VuKbnP%V0{V1zULK5=SPZdXmWVy2&Ww3CX>@kh9MHApxe zh@8w;$i^CTf@#uK)}}iZ#>05Oa$o5BGJjU_w%d+W|7kcqRH+%=H9SKIgo4|&xt_cR zAQp=t0_mTC@WqagcD-XOvHTg*UH7#l8tcQQvHMJ0UR)>y)WP~Kq=_#-Q!Qar+G>Y{ zHZ{XgG%etlD%u;BM&YLrmov$6j^E~vlFYDSYRVBtW21jYAQ0LJ$m$}G*;0mA=}};S zY<5p23SbxE!J0KjG2_WuZ_H%Nuej%vJWKA!EsS6FQ!AG7wjN-7SY?TiX3@<8nxo{gJb;{nx5=9(yYv?vyxJ zcp&y8-@{y11{f`%Xt{u;kYOa;2Ld?6loxJ&3D7|{$}PvHy7HHu>KX+U(>`CK z-0gY!Pp@DzGmz**&clfDe10s2f*8+Y1+a}k?2$o!)aoD?nS1&BGMJN|cmYkR0+nKP z;!o=spy)w-Jd7MIrCRSVvToryx=)zPqO9DP6Ak`HJ-mzgB22Nw6nVH1;<_XDrvVkz zl}UsmCqTMANYgcA1{L3W&AxbL5O3OJ|C{0FXMLcT2jtOtXX5HxYIvlX-vDp}j+o#_ zb`&Jy#nH9nU?M(a<&BcO9oQka{6pV#8#N9FU$ya1mTswOtWUpfsNXCZ^D_JDAJ0mD zC>1jLUc$7B3JKmdaja9b(D!(G%fn&)f)nRWca5(1gTKoLYbezQ%5jx%nCrMDLW}BuP2HmxqPiDgN`UsvXTIq7iGQ{(>*qJ8_ruI`I4i9oV#)c7oVHe1Uzkr~!}Z ziR=Arx8JWFeQwuSRI~_OX(Ob7IFWU8XkmOISLzdt~%if5kl9i*pGX z(0A^>djpigMS>*VF)tuA;i-a}}hk7)ifX5Hs8X?pcmWes*0_^XV%yw-Os)ObTx> z=daeA2WumAK?2!b=YRDD83hOt0;RVd8k#-`&YC8ccq&+T6_8ns3jtuih zUg*JzzDf`GefVa==Xo!)#sn{KKNxr0BgAk#QjUEdCSgJOgSS01Tp_pkSigxZvi7;>(sZ z)Ax#XDw?e~e7|(=@srU*qTSNGTp5^oSI;J9f9)RT&WrCOA=U((-DUQb1buIpG%?6G+S&#pvi*N3z9YMUi|d8UeB|qQs3;7 zrk^=c`2h&&Yt*J(T-}dhu1UCHL4ele7KCTq4elO%1sCX0wBDE!h@ybATFZ4|km(^D zWv+d02SMD3Gdx(Wo;Fp`5dN~nz9w))X<9)oTcR~PwS878IE^X-RbnYc$a<|vpmKxR z#)UK(J(^MRwRVt;qLfv4n5JY0+|Eip84~2;ttm2-DcSyXB9#aNo8qq58n&Dx%SW{a z%6vk1e(t!&4(~LVXNIINnoA9#LrlYdTz7M~N%L`)Dwllns68?HHQ_U;vBon19Zo7x zsV~_g1z4gX!XLXEwi7hUy0&pFCGNL&MfSdTnM*E>IoEr?r))In;?KI19(g+^smV;G zLUCAz1-LOZ?*Ta#$mbL^@a1tV}2#^ zg3yu?RuaK*Uj-QwU)eyih4`Vbgj#zdE*3Cia(B>TTct_$G2V!bfUpqQ?kRVV`{hEt zb410iaY!N_0?T_HMjw00mC@o&(xpqOyz=RjFp&^#xHbzYM{ws|G}F8eG16> zx`xR=6!F`{Ke#6mNUS-Qh{4W^!|U{T@uBcd<&dpj0D_^OZeu`h_UPe)eO$a^y4@zW zDpo3Xlx&r(;3U|<^%l4r1qpM&_rp9~tEf;%Zhq`cSB~O;yBzmR{Ev4d))31$11PaJ z=wLl}JZB#*gZc(BcJJBuK}!zvVV~wUYw!DoZKFm4m$g;iwK`0mOSMwn$Yus5=1OM^ z?}0DYP=i(3a@~h}^&&$m#JteCJ|$f=)5l5=Oft{IYO3E6dU~Lp08kM)!tMlL1^c;D zz?A4j${QuLpdR|^YPu72Tmo~@%l)6#&#THF?nUc&fG+Ki#ATu?UJ{0}bfAVVEFtmp zTtyBog;dUyZFd6vxk)CDZMKf5tnRCiEZwg8wO63v1AGcxK*eu-eK>%9QCe`PB*;&c$;`+zpfr-+`)*J;*cn$zqHk6_n4MzH% z_mX?U>@P+zHDPkmTx+Ph+9&>td(|~sK%vPg#PnqS@L$1!{7GjG6P@o_{W{FKggMKO z0b5iCER$czaTrxAiEie}jG|%VLTsnxTonCN1#?le6A9xZBEBzu!muIASNvr1(4E4G zZ}=b*<|0${gR0IK%SVL?#chy?Ear{o^6h4?)~S6{~gZzhYvu} zW8RRMEAJJ7QkX9cbS$_A_lI`bAx|^m9WH7kI#r63i#ZjfDVY9WtEJ5u_srr$b#_+a zg?qGp<>pr5KgkTxg2=FtHOOdnlS?dS&J$rullSL_)U3QJUEQrCSLE=iFLi#)JG85( z9;H58@zFoShZtOohb|E)3$XZUId&6Ru01g`_8*s6P2XFc%fsAV23BUiMuY&ncC`R= z)AHxkc`+cqV0C`>9S0Q3?iA&Xg9E15N3;EoYC)QEm-soJ7FaPdBfP!<>&1Xk)LeSi zHSjzixNiJBPZ4&OmjKSvV=U1w2tmmL8UQW`4Jht!ATYadRD$@=JoyH~*HruRX;-yu z4Zf#7Po*>ODJaDYs0a1(MXZDscAJYZjyHRE}gTQ&Nhtxh7mG0whM`m4Pd(ewrW4{!54}Z91b_RaBZhmosC1 zF-Y&X=~77Q_ivcdWNlOQqDAZBt}ouRmkq~T*83pQ!(dgYym|s9Zd$UA_d*xaB!>-B z;2jgiLw83mp{`3^8en`)BV}v2qoQ5I!t&7_{#wruJc9oO@qiuRws?Ld;yZ!$*Ch7m zt{5VivDVx2OFJ9Z~sZ$FyL%xXNvgZV4DqE*p z{U5#x+^o|7K{B1dcHU((-5K&W_T158#(BI~rEcww=VOyrH`RhI*FF>beRzJ`wHko!CH?AmOSIxKdbPyCdN#?6<>o>s5~f*ms)Z()XnKS)N!K zG9$E)KFhfVbzX$}MhNd>I_&&E*wCDqwt@NMQuhUaj${3VO`hpv2*>OSOTVHx4!E@~ zD`-pjh;q7MpwBip+TF*@HK#D(9*NG`{52bNx4)DZ!#P~}O^&BI^L5>P6zVb#?c;+l zp{NOQ)m)=ePmh}W>h;mtDUN4%<&6CsD98n{<-aVOC`Bw+(x7`CAJXuF5oxBkLUCjw+IO^R03@?kxOL29>j1x+p~LUJo%*bLP7qsw{< z*)i={7{>O^Le>adk~HlWFxa!}&9f6eA-h^bWLUje4J_j}V5GZEW(_m4z)L9weIV!h zBb^aW%`BN|?+Ly)VYn_4|c!TuqX1`vE13+HPh*t$JQtX5hYd3SF zLywfw)m)k!^4_$&;aEu3N|i~z&gF{Iu}}Lhj-UQSPq}QgMQgRg45Ww=BX(d%!4?pH z8RPm8Y!i^gpb-H6&6sU}pX1TP^jrOaqac;6N&u?~u_$4vMfMQsN{l|QS}eDf{$g!H zz7Hcl2#I?zOS^C}1jfzG7~1N1<5$9N-Mjg_VB$(HBLI-Cn}w+~JWt$Nm*;)(!%3wY zP;gTMilacGC?n)|TNaHTdVZifh_UDC%Hn?2a|~ncw_1bo%g>+kXFYpxjS!^=7?iTF z7_fUNN-1&VdLg<*`4tS3DWDsQH8jSO?mZ*heA8%b$__gC;mG7ad`*2vin>wX@*TiJ0YXKId1w?)b%eFI|S-M zcwMqF69j8!&1^uEITUL1`tW+nTVAy%GxoxNF`uUHp!Ra!7V>o=bhKCtCw9ni zzk{PXn*Xl8fO85<)CwWTU6FA`G?+^+-pkwn>3%fMDnYAhRk2_Woe{dq5nG zV7S3R_=@3xh*=E{ea+rZOU_q`o;*+Xn|OWJd$3R)Q%E=7^lrRDb)O~-lA)ch9KY-E z+ETzxL&MTDC_T(s3(99bLs9$@vCxV$Zv(22CvNP{16^wt(APj;cuIOQ1z}g16uov~8eWgbuV?f& zb)+tgBrfd07pcGaN^VWFaf%AM+IIrJAVFP~3JdOR4qQ@|9@52|no#}gGBgu;3L-M= zaQZ6e=n@%EUUp@kW+55%1;Is&=EoURP#pUH-OBGLH6|^MLPgePJl2y@Tn+*ax)LGj z<3M{6FsB&=`frXD=3IU!#Fym|V?X@5u>s|~uD`a90tMe00T9HX)eanKhT6o@%}Ro#79Tu&dep?IxZSPUV?s{8$Nk&8Qyt#V4f%mM z69GO<$;y`?P}1^iMw&QA7<;C1F75qkCnkD6Hf_~5*`>fb2(3f!@Hx9ZNN_BDLcn=> zq@rTsn9P%$>V^=pmz8RIhAV1GRxQSSxS3o?Oq)GQMS`Z0XUOqnm576ZdO|T=g_)H9 zi?lxvhq`^^fMJD@WfEDkj4Y{!M0PTjLP^??b&7-rldNUTh!8?l6lIbmS;jULGIm8$ zb|YqxWEo~k(>F8oy`RhPdEVnaj`#iNIiBYqcSqf0uI0SW^>cpCZHwy(pYFU%`P^+c zc=dt5HtEr6*>FoK3KYPSXMp%t9+n#AmjlnyepWl(9VDyT5Q06K-8fmtP@h-gX+LNK zJ^i1OwHvOS1Fhhu`wlp6+4EYEZ;035Xqce2EapoL2mXn6vN$zz{v|tpp~1j*+Ea^| zhvvz;!;0?c^>*qA)$(J)D?ueEfAfC2YZRMEg{OI<{Gif!Vru_qxq zxSfxWY~LS|?Krdw8QfS0i537br)sVBAJj{m5onZa;R>nNZesoA?aTv|IZya+0A)@Y zfCl_Bhxtr`2G6pYeGuB;?KSvnMB_78s3Ez+1?SPB6xBTlUW&ToSU+Qx*~`5EPXCP6 zez=QTIhx|f@?7nB@8em^wyKiII$TE+F5>o?xCv60S7|3EoZ>+>h18dJJ2%b)brF}^|Big66-m7(ppcl0J=AqK_u;}nC z`W}hp1@brA9jjwl&RzB&W=?XLr}t=kd*`4{IEpj)0#KpU1w49rr%z4)Bkui6q37pL zTngn?Z{&dFg376kre5H^-M}U1=qbKUK|-gb9s}ExcXVeLp_`LG{);#}TigeOUS4wf z4J&QvHUllBkaX`)qohW^+HuarW9DDd@r_`BnuE;1ND;J%t#yFP5p&l{6;hl{ZVH%L z3i2RciHcitaK}|Ul07CSY7+&G+I~#wGNrHXyE3+QHl7C&TXz95pIQa>vh^C;FLiaI zhEvFY9-c>+2FSs=N1e|q0INoQ_HdmD^<85o9bY(a$3aZ)2m*}CvVwfQWdJ>K{(Q*V zJ|6@kHs|afn;+n4#Jiw)-(f&)g#mQI8`g7cI|C;`=+~V^rcyUIx!f#neiu^kv;ME! zsayws|M*J6T-oL*-PPscQ&k_x&l;tUT6CblR>y9I{WhkFvaAu{tR2BPu=y_HM0>ZY z$=7hE{7iUVqwuRIJ`xMzf#E@Jl1C)c)I=jRwUlsd>2%b(5Bc|ZU>yP}X>NaEDGh$P zE^dEOHo6k(tFy@V$dGZ3y&lDoIgI>i;^FzXBO~dS-`yEjL=ib)WWDLT=<=0@vLm!Y{YN*@m1?JB=} zld^t|WlzzIdk0@B>_w8NjKMOr{iu*$NmTm8?Bov$`>OlZM#;!m2?G_lo)!OmnMjM%^n=^8B5=M6XPy%S!#4dtaJBvyr{=;Vz$u|J)4*6P?u_bwNFkLWJl$v`vaG9dLiDzkK2ZwHVcYZAalCrmK@w(B}{ zC%qUv{>d$eV?K4w6q`}oId`*>SI8}53k+CukeLxA{j0$ zy|ltXV)dngfJ>q8AL|9*hAExFD8O+6$u=YXOrJlDH+>;a9A2eZNZU<2VG0;Af?wDKj8OIFW2N%o(onLTb ztWXcPTkq(omnu-)Q8`lCk=dS>GNvp*#B*{YayV5}V?7V7_a&at*!Ae)np(Q(x)ac9 zCPYXBe$RCQi;w|N?Y$2yrUjBg@+W4{NieE0$lU$}IVwdj4B= zx5SyQmyLFP2^XA%sw_8+rof6sKm}=pWOR31tzKG_2`hBkVYCZ#XKL@eqodB|L0tvKi_&f@+(Tcm+kXd) z9yQ*xaA40(r7Pmfv4YrHy>2389tn%u`vtJ&XXjJ2pa-napJxC$R4G9{O!4#T@Qsv}^&qP!9;p2>2?jYTq6mrF~dEZX2b`CtE}-u8_; zHq#T9zvugqo)4wA?v8_U@OZmQZ1BWe^DIbchPu zCoO}gd5KR1afA+-fj$=6N@aN=A(OwHB&c8-c^h40Rm0xh-Z5Z(IV2L9L^*^z-H>!D z+Xowp4Slt(@Ybo0(%*@jGA~{tfJ8DF`BCG-tmpcA5PIm_U^yO=jY~1I;~7PV&M0A| z{9R}%0WS5VOPS9fTg#SpX69*M(ND+-@NNp5MR@-F0yN{nt@(CxY`*je9ycS;R!zO0>vj9G zanY@>$M;Sx2{%|KoQau{7XUP9z#obe>q`%`2sQ+xM8N$D$unA&VQPKySvC6M8dG7Cfq5Qahq3L=02~-0e${ z8(0j9cm++P&aU!gsQ?Uh8#DuCq*`JN{3Dz*7tPae7kVx_*9GR1A0PWQ(Xk*W^W)!# ze?T2if?82Nh%$;{=fC{t=%xPzy3yN@9a!SPZgp4K`Hg^WLA{nE>X@5c$63fW=eZJ zPdjlgAtjcG`vXU!S*KRF1$gZSTWz;8Sj;ti;Vti}4cT^s%^_ae;DR@Z z553?iV&z|+e4st|IHc6^Ai&KlxfGb3dwKTn&36p{VdkEk(LHH*=OEz^?Ev?;#@<;= z0Vz5xejWf`ufV+|&Ne6aPYCAY}?9R2VnI~|EUS}PZR(z3zk4}kWzscJ4 zL4bdn9}Ac{wnKY9+(5+gOybbXNMWKR2u33!?;4hpAsxhW?{t)wN_ zi8K0B2mMhfri#v!KM%A7cN?L9N|m34cj4|`A{-dyDf&vK7IBytw8Xy+aC=q)U#&L7 zL(O`XEWh-yL9OmP;m4Zm<1bLM*6x0=4|qlx6k6S0+IhR_`4|{(_3s)9mfXnaz{FNF z=cRMtPgp#0CnxEen}2QELFcz$ir!uRjb9J?`oQ5Tn)yu?`zHgI0{Q7UGs0JDecdC# z)j7-2sfUN4G**vUkoJIjrtZpW*-oc?MP1*_Ov|Mc)|GefUvZUce}5D`uNO;X%;BK@ z@@+&>H{KqI%|$NetFt?wqJ817K}s%zn(wl9)%Ruh1x>n+-J@oG-GvT0b4dUJGE8y% zf#j0v)2ZBBuHevg(Tq~s=Hh!sit~W4E;-O=fMVfrTH(}!@S3zJb87Fbhqb2uu5TX* zVKKe@4i^>@91u`{ao%8pDJb62Ayg#KDiq#&b~=EdL|gd#b;-HGcdqcF=Qa83prO1= zRALtemx0~5U6E+POM4my;!DeIS6Gm{l6AwXby^3a}k?L2MLD585G6xn1j)l;@ z!3ra6>HA|#xAIe891W&e=$@N%)w^P^`*M!uXwuH*Cft{*G3#Hs$Cwiw3oi{O&+g z6~NfRVjcKk>c$Hz@T%jkgIn7XcIcq3s(e#()myd}K=qrPd-X(nd}Y#g$CkaWt#?0ioK@WE&&N{kgEpbr}3HRdm?mpV{ zSEgs`3fJ?#&^^KX!@O@;(#EoP5MjwF@U^8;Tuwg!87xi5+p`cUuZDHY$-0KzXZEL? zzfhDOHeK}1{$iLNdB!j#?uy`iT3xcbWh^0%FpOkb13}g9B96Cr!5}RZmp~YgTWW8R zy8Rk^EAjTrkI~`99%)vSE0!f{U3o$GUYRJ2X`hkO_X2mc&O+^NaLQv?)y!;Eg!Ki&r_w8QM*La_G_`N z8dc&?9(^9LCe|7IuMxEW6?1(DK4eo0p5dDE)VXcHeRT*1!Gu-Yl(vZ5k>c#HEC9jq;Q)16j4?zqUF6e>DeVQ`}*( za+wos?e>qpA>{#X(f{0vI;ZZtEZ<5?C%6Wyr)@V$-Sxyfqt_)6l==p-(}zspt$y6C zSR`c-Zf&sba2F@1X7=mWL=|Jfjk?19IQHv$Y@3} zc0+pT<;)l&+Wo0qm!VR~Z3r5& zlge@eF5^)TH;~x67fBp<1H^4P?B=p5-?=}GQ+Kc4CoJySB)QY@_VDRp0pun^*LQ86 zYARdOJuV!v9Yip~BI_>evW+k8uG~*>Qq*J#>;NczmOE|(cHqDT*=Y=u2>P1=Gq&iW zs%iC5QQc6m#=!EGVbwOi4%{Azi}b^BIW-OS=iGc-Fh)7}z->8ETzD%J81T)YhQvdJ za+L_Z)S9&Jw(snFA}2>J&uSH|oppEpwl{s2Z~V*jd9Cx&BWfp1J?=K+&xnWs35r~5 zz@?xWg_zpnA{LEtPt>)CoS7X=Gu&;yYSly0mS*0)jrB>yAS6iz{ zn#L`sab1AUW<@0ya~Ke%Ynvfm43`XeMr*_rjU z|4>h*FlNrlf!VzX1~O$prW2gjugH^p3W&mfHkrM|NUiFQ~d7U;ys&C2zxNah0fNR-V06E`!^WS$)m z7Juzi<+WXcvEl*>61N{@xc#9*R(>#gV~W=L{H)%5;bb#~-qig^sp&${X)Y1R7IJs=UaKmPMmAdeq7p4|E*6-(;s;H#;|r)Fpb* zSvKSefP(=dAuNPZg8M+@A$fcRm((m-%gP$#*PLUmlolQ@SGwocAN(JH;>jed>3FJl zd$p&)(MlX(@8bZiUk!=kn9>x))jr?E!cs~*akm;^K>k|zsz)j&pDxDGn2y==?U;P?$&A)(OD z1l>Q}wd$rsi@QFT`Xm}2t-I4`6WJagNK49+x9{qd^Nk3Aug2O@XoEm5ZZ{UZ!jm{@ z>;Z>TX!c}tp4aH}_%Jl%fEo^>J`BVVWLP|xl0*Gm0BxOe~ zIJZ^>s{c>JXxF05+PA@a^KteE5Ph=dBiRE@w=V{r`GVRn5a})M(nGKUUzG*^ckohK zNA;9{o`EOJ2yd)@2Ai$lA!bjl5TJ3!Ng;ekWT+zWbK$tC}DoF8-(Rr^~sD##zl*93ImH zG;TZ%!5nevK~~l;*Ax;uNL8!tq;d4oX7e&Dp)bs^`sTfJF>j(q51x|IzLa%tMPA$A z^kWxxaUG=34|!r?mq(5ORaXz##dcxWtH4nKJZg_J+8@J}T|uvzF8y-?f7ea;h-6*p zAD4l}CxN8-@ROURuaRVRa4rr*hOAC@3Z%7^w@kV|5c%RuKa!v zI-psDcbQKGs1_m%=hDTD-JMsy;$MaL?L)aRCNZB!Sq2dteYlc z%6ST}<6VW&rBlWQINii&k*^oFpPY7K9&q$; z4o_=;c`d8e6!q)72EKY5!kb?Nj+4F$>5XTy965fLr63n{bUvI6AZ!`% z9_$=cORiWkxbpVx6`_QH`I}n4LX0{FA58{^|PkP5o?(%3~^#)iczuanz50;jL({2|S1fy}!!kY`;f(WEVT);nBK**p6OnGcIa;hc2D-#aBK$R z8at+F*7KELZLH{=AKG~7rp6s`Pzf!nAjA+-1_!7Mri96&gU!JGRS{WET=zV^?H@x! zN`BJz*ULXD^;jGrKg_8&o^T4=NYMk zbt*nb(z~pG*6%E|AzEX8J}O?|>~y%Z)A^B5#7N!OvNP@5@1&c4x$Jb|lCXe)0Ani* z!hXg)eO!h^-oUg0A*zW?w-N-@kbY@l4%>SM4%was>4Wz+1ARG8zeV%0QTe8oB>gEH z`TR%xfioiicEW9#YEbU<;Q%`y_l_3@nQ;|(9;wsXot_4(9e>S7O81w+gX=oL^FS~!by923!MIxFm7cI(JDUYle9g144=PG@Em*Vb!F{ttQ?Yu}u>H3@za?|*umeW;HjZ@D$ z9}(rK!3app4+PbqvmD5&#L)kg_6gn1^PSC`M}t?2W8`hB9-IlX8eorL)Ap~oA*ViG zen$+|K`#!=>&o7}2dKD8F!M)%BJ5eR5@#cM%1+!kgrl-h}Y9rfm(oppA0lZ8FM9`C- z43Mv*sTPE9A-_s6cB$WXTzSFZ=&u%+C+bSEEv5pIJUNg-{m9QjFYjV4{U_jfWArIl zNI(S>;My(38O}BAo3k11nTx=T?5?|c_hMm<{U70{^}TcDJw2GZZ=>TxmW$)K5=f6e z0Glu+d`7CmDnQJyQW>67+!owxD8u0^Zhvi_QO$^ER_LK`XAPxAeRh4Hn)Uv59knJ7 zOM%xGr-TQ`7MBZIv}0;BmWxyhpn8UqYhZ5A&Eqv#H|JTy2=*tsx+6|MBL3zvD{SKB zUtdK2*ki;tdK2QFz+k#EIzU&Pa~Ptq<=SnbM>7FBy9N!nB+j_!B^#OH^PIK--ql|1 zt%^zB*AHN!HVDK7BH)R%63~E-k3*^k(5yUlkPKRBwp;+L%c@ZjXUrLX3@Ge8-w13sv_N`sM8>$zDs_YpQwdYnS6O0Y)^Hw~Y#m zrgE`(M}GZxu*NnZP;QLeiH={MeP#aZs#OXO&M}@XcI4plVJBU6iX+6SZ)<4SurdWL zRv}=Q9}SmuF&q{@*YMd%eIv?9j-f%lHmN07A`W;@p` zs)iE-`OHw27r^kR?$=&XM4QH;^5qF8+e$lYO8a6nCT||q(tiXqxdYTkNKEjgQQ7#t zz~M?tK(D1xzf@BbGA7(N68cTRw-xc>9po3{^}yxi=Cn_UuS&e{Ptgr?|5)qqJT(}Y zuzF4-_=WLw^=trRhT3pTfVUy&%L`LF!)QhZvGzDQj_x}H8LS70B7ZH2ZvVdmOVR(= zfn|wb8gIw6W3rX_Fsu@_(x!XfdUUzMaPnwRNCPU>{`IGG9kUG$`sWTw5}zF;e8jNa z^niEA0ey?@Q>Rzc=yimQn~bGCs~MGRl0IGL+<;Z@569XV4nA}L+z(QTz4hcB z!}yJ;DjMW{#D;;kAA?6p5@c&7pz_=hD#CqX4mI3Cw_P@cSS@Wq|-Ix9@_}-L=+}72as~C=2tEW)v zb>3y7M@*Fjx4F7==4V`Z+i{(wN-#djo{a*P5&Pmvh&v-yQrKMdW_P@=?r>LextFJV z?F*XdqoPdj6vM=WHKDem-lZP|6aa;A|Lrgw$Z=^?FgwOO;(m7!EQTxgYi$+G#Nh&h z@4A;bNoN&4*9jRK?WD8tBD}o$7Dui^IElYIk?IVd{Rjd?2WY?#RvriF0i#eYYnkbh=A4A7I)rZAy60B zK>O1hXQ%DD|EA1PsjK5?>k+&@R~K&tM#eL>Du;k#+xl~4)OoNV{o>V>0nMmMsjbzO zYH6_sWQ2sjF2V@%4!!1InX650kdy zbcda`>YrnGD`d={so2EN8vWNwZ0)Q!Zq^3x987v%MevZBA}QrBSSnBFM7?v%a)*Ao zuL$06Q`7}>8JypT0t*wgdlu4P=9Ltr#$If5$$FE`ePqrBGr%gEFqghqm7X)Yq#J3X@69;#Ro2jMi z0z8n7ilY--FRo=knvTm!$gU+_DYQ&fW5=y5|9s^^?NF6cPeUB4@5Y6sU%S!`>>Qsw zec@{@kD0gNM6x1zDnNS+VVA= z-->S=oPFE(9}$Af!88)Ubn6II6zeQ?tg-!8(sFk(F&OHfuBh2vqI$1lJC-Z{x~cR| z{Wm9x56*jch$%gCb=mLa&cbm{fc1h@MQb3a4CBUE+lW=c1zhz-7z{j*3!896ESR{M zt5yYtjb!VW$t@gudgiLD^o}zPhS617CRgqwJ_2M=4o`+O3pi%m;jM>QjyC+4uz@Ep zfL)*NH$Z9RJOA@D*>-1$@Hz8IaIki^FMpiYS1A!B-bwTW?Fu+vL8%wWF%$wR{o04= z`pc80bIie-SWy!*F2G4UJnh-UJDzJY zxeX0jiNJR9EqYu9BQk$?9Q6efQX{n^laX7f3{aF7A}9h@Y`Ab5C%xcyd?>QPnT*wX z#H@M$r~OT@P*0Z4$?B&D@o^}CI7dkHb2d=&h-=Di>oia%?mSsR{$^_Sqxm|& z6l^%R0l||6{n(qPSI4?%&XJ^`+jvxgM}SLC9rMS}^&g}Fg|sOR$|%xOMo)6C@pUVC zmdvr|lYw%D0O=7NH|Gs&&G+McAn`-fL)#vNzJF0E( zZDC1}zIZ6d1HLS=gSt)_!a}RylW8(=xi1gphR}(3s(%@DtF}sh-=b}Y%IQOrzN*rMbRXQg~G(Ae|gFUKuY@f?}#8Y)Byo? z5ZaHFIHf*Zi{2I$d1OGogT3IAZ9lsjFPA&dzE#eBo0?^|#`SN4ih0G${d{G6bwZA^;U- z2wQXY;bC-*?%#C0<8y01$!7+|)H{1q&$5uB>Yj=;0J&@dP#F%>yH|5?#pg5dkBN+j zXx2F#gaTNaP)Bg)g`gr8rj#KbWqWK$m(2L`C(q3AA4RBYw>wJwkQHXiy7(B^%{GsJ zV@2S@uM_IAgcV{duqceTR>t7dhj@ZeG=nPSpY639YMt%X_NSly7B(STc1{ztXlR6G z+1$J$E^+zMX_Y6^_$Ac9JX{i*(!Km(dWo#Cm@ib*I1v6!-r@ZI zLXPcy5BG2Ght2YOiTYR45BUE3@}^R9!|4_RQuP5yl^0SDGU#V?C!((?^|{QPJFKfU ztaNll2xm3 zW$yToUEb+^dP8zI=z7`psJ(=VP|Q;#xKUp+q)T1a0H0I;AA(4QUknnS^zF?WP|~?N z_QOJl`n(JE*xE-g;7@O}+DuZ!3KOKA73?)b481KA_ECq3Jb4T(I?L{c#Sz?nJWYOm z^HwNAPn;M1)l%2jxO6)8FV<{Dm%PipZt&QHeK#YWP0pQEml3^9SG!RhEP$@u2RY9C zUuXeHjC;Z#Q!Ua-zFW5w-9eT)aarogb3>Eu-@ctF>HSW7t$*z2D_7GqpDbi<$78zW zTgi~AAK**{ZM5iG{!`ct?se-tji0D`Xm^l znbkc3gb2W&PyDZq1PAuM8_-dX@^+9;HnIBnk6MS;4#}KPTF$9ASwd!F^LBsAj>y)} zn6%wUdvJ=noaW4hv&e=)9}Xj(=uc9B58OGm4Nb+kQdb z9w4dZQL7zP8f^NOEp)5nWrkq z)gTPhYrDJj#<}PDd9VUhGb3D=>QnxzKrgY@z0cXgCOg_7?kXkah~lNP3&sYrvqeOI zOsz?aUILY30<@Sc;WJ=47qn^(j#W?SJO)!3_at6v^*xF<21RF( za3C@daxV~Dd+2fmdAM7qyuPE?GlW7%P-(}Xk^;Z1y(NFKK6rUM?i=Cfy9@NRPlfk4 z!Ie&M2$JR5xa25JY#k&CgyHVou(3ShLu9ScmJ2>+oHsAY)lgH>$7wZgBgTsqMc)LP z0mSqSWPI~>q>z;k;o~43Z&RHdB=7u|#Uabnyo_j9qwNIJokM>I*JqhJvi5jvS^`DB zen_3ptOxBjvm{s?xVpj^%DZZ|PL9dW{InNKAvfJ9pUe0CI}sfkL~$Na^8Z!$#h|ow z-?q$`0ZoDc>TQW>t83nHRPGZ#6GY!zTplr0SiWMF8uG@_Z_S9$B7C_eRY|JROXKOW z1~hA_stQBpVXq6$B+JsTgxx+1^0`6J1x(4awg6kj&qM>$MiYm(Vz?$zvka4Vq+HbK zv9CJi8bjeKBF7ttf^^2iF$a==)F;oTiv3pket83_s0x~${&9ewN?Y%apBW_vNN)B^ z3;)OD>4t38SDN}PH{^UYzGC>M2EEJb3WvNVL;n4a{|?Q_1e_ZPNPXk!ik|~J$OK|v zYu0q0k><7X!{_-=&^3E>`#CSNxXbcm?XO;LiCXWmH}CWtG-@%bXvTXa3j=y& zrXSIl8@XnLPwU;cfqN`{lonA`y0CM9@uB6Ao~=D<(T_(^YCYF|@7LIvx!AM|NSVNr ze?jsD$5UQqb32h+u2@f-X|nl0ym!sU?DO>2$JLURPGxNJ8Y`zKxY6Sc5zwI=rv`Ru zw`^!s()qgDn|Cj`kMoS#EoQKGglK#P$HQe!7(~jm;1Cd$RY(ijc8Cbt!$e0+i1JN@ z`^Rt9Im{m~T*$mU(s}jgFGK2~cI}m3oohvRE>A!}m&9ejeIT~zXPyQ26oq$%FkI!+ zb#xis@UGE~SUf=PMof+Mzzp?>MBkkg)DZ5Ax$^`QgY?59>#Z*TI*6?itL2s;izIRy zzv*kU^hX7@R@#38?Ty&#q}_*3Exz)<7aP{ul)ZcemQa1foQI`=`HPPwNI`b^ZBP|g z+cAc{vq!7e(`~k$)`SU&8Vz{DRNHLk+TYhOWjBp(DY!0D2DT=GW~379=$(iOFz#Ik z!vo`;e)M)!dwjcg%Q&^XbMD%xd2Q*O7PGHz*xfFuuxH|MPes}5qc)kZScnFoF`MZu zj9xmi(y!Z53~r%Uk`kDBYj$Xt+{lCXb8EX7Ev!pE1ud7C2R$0?YkV?9nj$b|i+i}A z`?CYljBGS3teY8K(ESD;MpDYj?G{024_ZBX?$IP4@pJa^q+x2bMd%)b7x+1xG0kHcTX5ixDGiCG?W24}FfGO@%t2=1v5--5cSA*2#X7W2;l@w%$^D zcO~t#NwD%6k@)$G?_)5N@t|i^2E6?rn0Jv<`-JiUG2XXa$70@;xOyT* zKk&PvLj2HZHBW3<*HRLEVJX%CkI6QD)xbX8D14{-*&8zP`KLPZ{>T)&88AN2yIBOx zcn2U_FCqhg16&-?v+`-pQcFa6OWf;r%hAvU&M2)YzhY`ve9WihXq?~^?L=3FQ`d>N z(uVDP_!=Ns2d8kfpQgvT4~@S7yE{!}JWcn3N~a%}O*jpdy_CyMSf;rP`n`5Nb+V+f z{`j#yC$rNv$09361>hbOZ`v|o79kALVZ`Oy;U;m-^KKJSZ+ac`?b0tyjYWm)RwSt? zYNyR!=Fb@|ai}bJJ;?lgH$ezllNis{!;DA$n2{;2Pf=>krBFUaKe+TxJ<`)^ZzO&S zOCJ9{a6liE4PwWPam4KE2BE5mE`Apz!nIjMmGSKBhm^e72alsCcA_S2ZAcd$X};C` zH&AviG54^YfgJ6kz^6_8bHvsdD)9e-{kYVho-f0{O}&g3r$I?G^Y?ViwWE7~It)c5 zoqs$r?(Q~$BIiXOEI~K%X?qfBeZ`C^f{uxA5ybjUNTwE1m~j8f-5pWQ@8(EV#9yCq+rSR#2toQf_1AUPaaxGZdKN6_=gXWwi(miT^2Ic{}rW?c< zD8TRl$gdWPdEV^$)=AvVeA*uczuc7=D;oDg&G1CW0o=Pxy&y)YCaYINEFH0WwxL9V zSkw<$P+_rlt}|hRC}xG=IrAU!G-75`Nl7lZCYwu(0`_uJfz4f9xO&dNe02WN*)mRb zmtxl+i<949i~QNajy}!4S_Nvjg?22Nf47*UcgU9qA4@PBqm3_~7Aa(se*r}kCjo<^ zFzfv4gb6H{K;_B!%)Ug5__07^qeWEQ7SL6q?r?m+)rjszNh`6Y;X5ze_0sY$bx)C3 z0O7J9FrgCQcaY&xf&p8kZ57%>Y_aH1Fn3JAqp`b5Ws^}vr?2Q*vfT0H2PVtjJ~jKD z_vg2#@RayfM8;z(>jH9v|4CK}qyyrTiLL5*GwplL%297-rdOKGid;{W7(FW*Ebmwk z|3MO!G0Oe$u0H?UO7aeC!z-H^s-4lmWe1^Dd_l0zT>TI_$UU+?%s2B2JLhhStcK9G zKmZivY#{byk*pJkkQ?2d(?r_VQO+BP!YZB7YbV{kC6)JLDFRj=-T7-o!Lz~}h5vm# z@czpMi*96Jwwa&;`A##SZVcPdr(j$?;*ML!ase=YJ?r)HlzzFx>sBS@V*zdtvs!ob zJbZXu>r;sP*vk3ZK8yP7mydjd%BW-v%K-tIT<2{D8$85w;@?C2>mD7Mm@(<@mDx>= z3``A|$}rRiJy~_a`&v9=5L!4m5g#-2qG%nJ1mfNsqUs zS2IK+)F}AkS&$ai<0+HD@u;&uCm@)#Zm?VK zQf3g&xZ(1!j?reoLTpZXD<9$!=)5OKlJu}Xt4H!YKdl)c&sqUL^Q0DAWVvRBJih*S z#-%+ud@n`GufDqReU)pT=-~LZ2yt~=<;A;9;SS$Xf#u}X#7JfdI|CNa0dHlO{WM}c z;~4*0Fsz8T9H2Z7_iJdbX{@cbC+jjteypB-Z}(_GwCG`(G5cmx$iFK=v4E)61e~gE zlm@4npNp5|p23@*xcn6#`GSTu{^D5wIo6{~vEV@f>c_G1G)|Vu-tHZ1JMaa7d9K&o zWcdVFm1o9(u!fp_)6Q^^8nus}$#c^5)l;i zUF+7Jr=J7{tzpfK%$P=p#Ltt?)CZI?_}q5A{4G901ZKQ!&pZbWi}iD_f}^w}GM;0q zlDbb2gUPo?Nn&{0;(#3U;P_${`SSxs!@|S&ChlEd)OvY8+4%V{=wr8=0M8AqQN?g+ zPqRGXt6Ocz?NH;A>9<~|=hPFS*Xx^^8Rf)d$PIvTK`18{Y+Y*~uIxK3zDMhbpgJ`a2zDb{ z65hr01e>(OUx$KNq$5Lk#~EFB>vSa*lpQ6D!})O}Trem_)ahkttxUrCmgk6%8IVyV zeCALYam=q1y}lRq`#dZWKU5ne-%6zzSIt=54KiGEgpR#x$e+tL^-eS#?ybEy=5+jw zy-bVhFc(<+En}cPnaM)IqLep09cYqK9^E>n{lJ;6nOwM(8`SlT@X^v={W8unU^i01 zx?Yk-HKH;G&|~L8KPZ-!$9V<0aDDIx*p9JlWaKzRJY|P>DD+%F2iHY~Uktv3#-^YT zdVeb5%m@GJKYLjA|7p*g7|(Yj-z0Jx+@)9`(@LF(XCI?(Cn%18vlgNymKa18+Gakh z(+r(FsWGe@|8!rC<)-AVVwTQ&E#yQJ%LC*UT{sEs`b1a?zqbMEW#x66>~UM{c6d8v z$Ulnmr292UdV2+#|Fr2;*)5FF2)ylcKVoBws4RF*z^fGg0HI8lEm)Bpr{N}*X`-y7 z|9WRjb(4CCueqb_jcf8%G@AR--RUNQ)9EcHLEl70I3PDARK=$@p}?n&`FHV1t|x>$ z6|^-bM4K|SR(Ic?qifS+lNdGJJ$0{o_3nMKdIiaP?2}FSE?Xp z|HGb<$U5Jo9NkZQgmB`seJQ5Hf(OO`j57)cOI-jm&GJ{3MvGcHan+_4>C%k&<&Sv$ zq4L9C{$1@l4Pha3kLz`+K5q9**m^)dbgM9 z7m8vWsl`3?rA3JHp00Ik#d1HBDLd7O3m zc~nwo!_&R$i5N2-tpq8AqV@i{Ln(1)o2kI9OIYrw1IGX0<%(R>9SFF z#k5aw>9U8&$t!E_Zw&)RX0}x+ta|MVp0REV3T>na5W^n9p>$RN=5|<+4=lckdprtW z*UVb71}G@3_oCRnsYQ0MFT5}z^({T3N=1ykc9}elgC#@%&uDcVfYbH4w$SQq|32tW#Y7hS*&)4!+_@9+tzMxYSuYY;cYjpQiOrcPP;){n;rwmLL zG+kIr+C}O-DX)c0!}8`6hRoBSjm4C||H}~i z>8dc_o7TG`0Ot`IOKCo>xqdup?HSLU6j$LN1tn1t^b-=Tu4J3ypuJ-GFWtNK)rg|# zq?zL7g3B{`Xvly%BLVLUq~m(ufI&dWszb%CbV$tw zJdVo%KH5f0ugevQXNCpI*bS6lc|WA*g}Uqd&3f~}Ua+Bnhzi$K<2{1YROp}|sC*_; z84qaWl;xQtH)qf)i=*<}6E=OQudl5?raScJ?#HuV)qseaai&aB+stlM1f;I zKaRJA_zXU_WgTKW*W3u}BecaBv#fWe`aIsh-E4E6)L^{`n7b8ySeTek{vS3TWljJI z)IaUhf8io|3Y_4?1A^W91ygCuD2v1MqjykPcJOC ztK=K4OM2Bx4Quepmi@M_>K@3%ZaR)U;$iro00qI@p$Bi>DC2qX-|)=1ZgBst`gXdK z`%m`5l+u^7rbDH!Uf9jPh+T0be?R4W7aiWL7Gpj-2zV_WTQKRE8WL~&6ftR|78xJj zzj>*D*1?MzM&B0jdm)G9mxXdVVVQR)-Qk{7T-`V2ki)7VGxh%qp`Ev&|2r>9Tz=XO z+9LHaIB*`r=*_S(46wDVlM7a=c+i9RZuS`-YP5X^e`3KTtKysAJ8D)Re5^Zx^#1N! z$GGAWHwk|x)`GaQ$eM%0Y>v9avj+5w!oc~RmfPvs52Dd6&yhV`L=Gq8pkH52@8_wI z6ahb;VhzJn39v1mV7hchgs3&Kk?DW1smVuUzq?P+++WBoIvu<6`lq;ZxDhl8R$f28 zbcQg97BKP*JP6%#L@ZWyq;_3RS(FLQ2vxFPcUc1f)Gd{GacG=tNo?J_+^mWxum}`J z3_hyV^_}@u>Se3Es9pCvt3Jt#Dc^P%37CjaNBu-VdI;WjZ3iGx@8FNy0J~H9WOf=G z**Y3j0=Zmhmi#V~37T+DeNfC6f9UkB=Ve)D*VwL;_Yvb%(QD00qj>G2rF_Zky=&3E zcG^{0n3+#JcanJCYu~y4C{rke#y(lcu2RX)SZ0Q#Vwj=En_0SEKG%Qu@4nBue&649?mwoJ zbIQEmul2b+mg4vP?xua=cciWY%8M-qQnn;9$Y3egvoaU-n0Hj1wk{f~cZApq#QW@j z*C&6(n^C`O-^BxxYu)dYbBDNBaV-Sq6*Qy;S&QKs70>hHO)+}R;3tH4+q+2Hf2_|gl+3j`C3iq|eO+KuaVmPos#;ah_jb23Bt zOEl`n&oQ-L5H|{2CEiF&W$tbbHfoSz#WjB^xyuQ@9-3~N)cO~eqByh$QkyNNyvIP} zmCCe<&^r%CKZZX@-qvWQxc+n+!nfnLG5Il}exa{C&!E|PyF7n+8$9DI9 zUy;CH#siegq0G1qNmk}LD^Y$ITD(2PA8jS_Lr>-^W3oZII>O(oI!U3?%xed3SvWm* zd2%HBtKd%zq}UzLl>^?#N=pkXCD8VBR12VQ7f3G&>2N=&S6dz{?W&01b5B75Z130P zTu^un?vfWN2agzpJ&dHc_uDcu(Ra8hyr<1b zu#n!PnWvpJta-`a{LN^tEmJOLVcBzMwo(4upXJj3RQR0aTvVx)o>+T5O!jM@TNw?qg^$ARV#x%OhbHHHYRF2xU z@7dqhDs?+$ADd+e@ccsx`B#1?^50Jq_yc9|{{w8l7XYVZPNP%Fv;jE;-M#&@5Kz5) zs6&iC={~AmraQJs^v#~$``R>iTQoB1{pqWh%;ieJNJQJauS4hjq|8~;^rn?~>;bBx|*xE)HE#D!j!>Ge{*@eFgh`tA8h z{3WWiD%Pr)|3Uuf0CM;Fc|Cct=7g4%yxE0krm2v;3Iy)aXxTrhLY;4cJm=7MjvU-IP-b$tdr~xR5+eCfrk#To4{$751A3U zR=k%oT}#uesr}oCwjUaQR7Ys3o*XTmls}s}_wE*N%q{s!d6EGPZ!xs?6Pze_0Vv=q zzn{RiXmt7ud)<)06CwUX_DPM(9$_eRWVqLO&l`0V@7{4nGJPB9^X7+qDqeNHa#L&c z?huUGLCb~96$x&r2S*=9&B{`D1M*wJBtXd$FA~3Cprg*u-2mB*O5UUvxUHk&Z87cI zncN(DP8b3bIjry#4RnYPHiMfNs1egrr2y_Gj!qaea~=UUi>Z>1Q@?^q&C){{*QyL_ z(Sqgn2p6iGM`O{W#hw@A&qs-U1n6+#LNv!dgv#Gp0BLZHLp12A34icF?>Ki?+DgZ; zgU-adVI`$1w9Bz0z7>(l4S(h{V^}1}$s?`|Y@AJiRW!3fidr}@4d#6}(7sTC?0M1u zo4*T=U1nf!Cx)WZNe03Suee5om_{3Ru5v>kW`AFyV|-+McNj|~c(=T`*Yn9DLDpq< zGb8|V>0mG#1>?F3WKL*CJm2`Mf;+&d!2+pJFWpzP%k< zTlJyI@SglvVgT_CiZ-nQ9mE5$EqP`HM4oRz4NPGc>&UznRQapkCr9;U%%iScB>!IolE~)jVY2>qfHza@5D3ht)Qb4jrEug z^zO15k9x!9E{)-cD_h<*=jwk^(`|8}h*m9`ziXOfUOWZ2j0$%aoHNm=8tOw3j7yP8n1xi9!w*Lwp2$5>0>xRT zSUpBuW48CW^oSwF16Ia;=AWE0li)wq;okugNMxW+!tQ#n=8!k-g6(@Ng@&8$qOAb(Wm6`9A$i~-fJiwgm1680la(- z)#UDnfp|%cgiwZd9yeD`!;?9YtxkRjW z=?8k#`F`>5=h?a2QMXACioRtbc_&T(C~P7(;(-L5B6F?{4k?*M;%{ikUkeTDZqfW< zw7+&dY2sH#A?KXS?@8@}yAfVo2yn0oOw+%x@nSF}rBqSwE{-+#7%L@)IZk(fsL^O@ zR9NaUQk$(vlDQ}&j?reRH4?n)NHqurgw_E%AC<_ zG0;rrZaF?)!v#{Jv9*+I0Z(@rdDK-bS1Jitp*oZN6L?-sG%zzqjE3_?Z$NdNlTdFb ze;?~pyPC(Kx;IoB% z;nO#!eE`gBAFqtTJ>(bAyq~q1_!s8f=SeY`z<=8k#_!&4g3;|=6udP84($E#z0})bEzp^B1kf#?eMqI4+(G}NQB;nawm=}(7)8sZ%h+p z=lz^LjE?p%YJVVVH!w6VlJ2?Gotu35MD_8apy$z+ebK3H=46*%0I}i9;yKqt(x-tl zF1c-$ERB;4DGj}?qNY1%PpYp@9&ME`ym7n7Uv#L*wQ^cZELJ~)sCY_{10tKK_aW`A zxUJwX*0{c~xzvIL!;@BVx%(^RJt;*02zRBLQWv(Tx>B+@FHaNw1(;cY=<|>nE1KUO zPyQXXo%@>i4yOeMbEA#wK)laC(9bvGs%6s5T8MG$;Id1^8Y_Lt3eOvNYRw<78Ckmc zf*-L$%)rLGT#Y|>wV-$P z^cC+B-U}W{2I&aJaT?XF5o@Hv#KhG!7}^@@T^Z5#YBKuBDj42mQ;x?gMI1qaMBCM2)D~C1wiA$IHt{)%bEAz!MlAEO3%g z8)3`>h|&#B18J_>r(IbX8bT`1a}`g?EG~Tc58|Zwo-ZG2J*Ez*x?MbdVFotBIx&rh zbi~<{Kt|(lOA?N&otL0v}de`I)n--*i~!waSN_JghR_@ zWP#jV0N*OcwV;CoXM zMxbxldWTG#)1bJwXky(UV?X9);K1teeOsWYnN?k7)c|8D$uihzo~rJ}c__K}@$SvS zPcNCcG>ohphX}ShD7Rxac@TCsYL#aD-oN4VnxgWoJZe}c;n+FB=WhT?#t}VN2sXkX z2$wq1HHExruIFD^vCdQcOE$zP|*G1M^>`!ufCbtiq;2TGw)@9i_!%|_`iR7{Tu0l6V8#mA=R>xOhrA! z^-bwQe>MnF+xh}~q|+@o%CVn81taD4txdu}X+_b%$ol|F!%&4&2-@0KkV+gkkH`j* zTcy+V5>IEsZUvNnwqC-2)N57(2sv|*4h=XzM-NIdDwvlYcm-hlp%YP6q}YNKQH$Y?RSHvuSm)NMBaV*OuTib22TJ*9+hcO;q2ybV+9xC-s7ZsPXZai?ed3N zZcU4Y;owrqnFZekQGRlQ+Rwr%3ahdUSQMe8OLVC!%`TydbZ2B@2|J^BL=cMOj> zT>si58*nS)jM9G2u1@^RPP_=285q0~JLCHBB>2MYbnn@9yvK|e7;N46{|+(4G4eQew;_;#gv{i*rwoA$gvx7pu0j+xHiDF+tPI_|Ul?pjEY za~^cJ$tg(JSElU4I$oc}uT(TO1aK-ZW-uG|gbUII-v}ERGHbAdkNsFiGP&hZS41+Jp zBpiOR=|B9y|9<_iBAEZDqM9m9*^S&Y$_n=tPa<9*dS(?VbSJ#WmjlLq0;OjCCxY8O{08n->EHotpD$P@SCWx=3QLd0X%I5BaSs0 zL~53trmtxPW?EjfIFj)>!)mA2OYuF+%x;+f2b$gLfGKQ`_5b`Mv0}zZrY}*vI>}Y= zBoJZB%%2R7!zrVGeuu;Fgq2Uadj>4ze|{8tnKw9Lw|Y^}b@pI$W?m#p8PrY5%nPE* z|5~_BaFqrd&+xTa#Dp48uIY5I*tu7@@#2X$#k-c2xJ5%Sn{QQ`p*#CIX7dtzE(w_a zH$LbVQj5j|yhJq>LXLM(cj>@)-1!=UvzT-nxSaOUt;#LEJ*mi~6WOTQvTr#=g7^FX zF)9CpftBFi#<#XO3xE~!1=j<(D5=qazfERZea3e=t3#OA#xCpBsDLu#OAP}qO#@kt zy)_=}<;`hbzhHTG$@kvwt2<+z%62z};e|&C%rNr?z6e#*iUG9K=eez_s zPNNiyOxgC~N}V~*qm5g7I%usM=mmgIIfuWqg!dXN%A9E-Y}bLuBDNqE0;&urssFHp z%!91eHQr*BQl<<_Qq|6pNU??=wP5=jpYk&o-n8$~LbW|VB_GaAr>8#P3uBKkyITnY zCSN_AclCQ$JDt??`IM0%Rv=c4y!F6z^T^BpaNFI(88RE2^_c24w0AlpWe5I*izF&F zS3GJDyyL5&`S8Z$bGOovi)U@Nmv6RPqj7hFRrdtn>W-pO5vlT8&^0j5N0^yOK$=g; zt}e;JYyEWzK4Mc&#JI0^)5TfegPJ|Ef@-Dr_lY$$zTP3rlPLm7mo#28p4PY#rg9}Y z^gZNqJD^iU6N=aVtlVw$DwkL+_~;&s^E>i5H2S@%ym3N4mLU zfR6eB%!S zH*XG`>=lgMax!xJ;k^G}KB&Ka|8Lg-ISmzqUP+#PKx3>I(sy(MuTSRp;|w9o zK<1YbIzO7OCdj!n_3c+O-futW!rhoBYhq{fy%zQN?MivNjAyii1s#r+g3zq&85)W! zzDjTE?KYS{ISc(jQnTl8PI#8l^i(HO%)(1{pMd%;)At$`0)D(u>=`JF zxk^X1e|DT6=_tEBV8Z+o<8~+?Up)Cyp_Wt=6Wo|I7y0rm{wXY2+m1QLF$Q1sJq!sn zxR-F!xK@N9rdiRmHtnP%Fk+Y(?x(6^r4N)xQLfo43|&Z5VRZfY%B}fLS`Y>Ab`QIW z_X2lvl>ZrDo7x_}bf3R}aB*g!EKqpK^!q=VHg}6d++v5l3_qPI+4Feo=^)c@Fg&{2 zZW?Gq0G2Ys6)dT{&{Z1Y;RqFuo)%-Jlo(~G)xWUeLt%cyO+Z%_B<@?D z50HWSCLAj~10vwKdsr|?52)4+<@O32Plu^ANt06bc^kYqwN>}6=5Qnm^Mg18;vGQiuv*enh%fVAEF={DB zle}^=jox&etqmn|w7J?WbQ?$qKj1w?1I*q8(i2XxI%!YSSV^7A#VT|Oa%wPY=$T<1tj-Ju} zMvI3ohl_lChKIH78$T!p-%SXL{9^4m4Jt&)5q2CinL$Xw9pHZy|Jl5Cuwa?tt2nIg z5-RX5G+4`XT6pi_FGHuF1#SWcItkp@8|T{xTvrg9X#+ByDCALoma<^JFybv34F{CF27D@nYWlT*b56Ve1#J#QlxK^a zD06p9`DUhQ6p}t`XJ`59;;<-s&JPq(w&d1rlm%BbxRTO1iHV0>IxgeeBm+@YZnjt- zi``op21oG?>#+pJC8728K6Dnqtpd;l5PG|kI(866$-oF&GJk(4b^eJO(~}}&{Md$s z#uaR$uPok5BPmd49diotnGlVl)suHMUIyRC+z4_FEI@C}GOnFnHQCa9P%15_o$Vx1 zbK#$$Q2cRXW)icJP5h}v?zN)BxkP~y{HQUgxDB2?&u;}u3W0eQF&54jHfHL=lgQO> zou7xklo?$65w`Q6U4PVU=rHwz=5i7@<0U*_-4@w<^hA(}3%ox;1v&sK)zO_~FOyL)d%!l_{@Yj1gx zc-piaicJP@oDKmXTP7SvTr>)zNzAO z>t?+9!pDSbDXJ79|BREI#t!j<7xXn~Hilagx^EwrH9kq{$lUvTcZVUzj;)i&A0=-!QowbS$hG8_=NGA6x$x9>QtpAu z>@QA&+r&ju>^M?j1O*=p*7X5lD61)MyuIgaNEhRLQ72`VM2V&wGlUSFE&GA;1rEZ3cU8a!biSHn_RUf zw$`vJ>-Bu?C+W`2FnKS^Qv*VZv|oZ2H?+dbg<1`*rWapgy!K>+A1O)}_KVU2X(}FN zWv?@)_5XQtoSHkG&6HGEqd0$!Q|TJ^bgexr;l&me3`Yr7AZ6W)^A`Dm=e&FtTyWuwX+^1 zyCKF5Y*>KNK9*3kFtVz5Z|(f?EZZ+1^6yuCKx>^cw8XsvLPcQLe<$B3Fu{41?1%eE z-GvnQXSPpUE{ONMqU4YnvZCiG3Oz`_o^LIz3y&4dXFtgIzZu0wZWBU-^*0M9`WN>s z_|pjD(9zM2!W)Mjs{k*yf6%=yjlb-8*;loIWV%h^?3X3|Z-mpGxVQ}j4~8m^Z`H!P zP{sKpQ*b7Om=Gwsb?HJ);>E_SGCH2*WaeugV2gQr{Sj;){XUf|PyGv1YG-?c>A-;_ z=9Q!lX9V2*H}D_DjSk{R9oxz4_o5MCrsb9x(*J9_2z4Ntq2X;*B2QS^H|AS-PRTSKJD-!olANn8AWdO%LbY#Om zj+RcX_%p|_o)Hypym~LtLJnckw|l|9=FjB#?kg8>=D25W_P6AhRn0PHPRKt5tL!tc zifaxgVH+=zdw_!uNo?uIBGJ-npV(o@k@ib|o;#LyT$VP`#gRSKXsnw-WX_@DdE0ai8wb z)qeWpZO8Tp27Vu)m2TV)$cgqG4Q$DsE{>ai)fNE_ma}|Dhb_Cd_Pw+DE&sN>!-}~r zb}ToJ85qEsY$R3kk47vgX`t^@QahbAF5DKp-k2MXt~q-5R&1Au=WTl{X5-tWv&UtA zzkoBoC-5)hH;kwPRu_PsK?wJ5Es*^KhwfH#D?#qpy&NaS32I`KE`LF63-KT3cvDkP zu0o2*(QiKR#yq_r0Ph3{=l|!y|5qVKvIN(T-;ZX7NX5QnP2j-{y1=8t2%VE>riPE` zfFG;RXH-mR2`bw5IXYUZb@*u>b~id-VPk!5dj%*a3xjJy$Ofy-Ye0E04A@4z0-W4+ z>><`L=|p+|A;*F6!b$22Z^}o-SYe+-Hydu+?iRv7sjRrvf*AIZppgMP^aN9bZ2&2k zZNN0EgX4VwGywrOQEX^>_hB~vN{aR=Fse5t_4_Q2i@}J~-JlO0I+Dr_WXZ3#Q!7Rm z1|p`Lc78Oj3}>9NCFjCMcMcB+RhBi=Q;=h){s4!og9&`Vc>-qzGr(-26YQZ~&rALe&Zg73LlX zm)F;3#gN`^hvW^um9|qzTzkEjB*&EhN@c3)o&IhM(O0DpF}eCHN`jm&fCeD{+vAeJ zgHrxEtFgp30s^^(E&`VZ|FLsdNCZP;QL#2f|66t4yA{E+rRG~<@4Ak8{U(D6{u$1> z#Q%T*3(C{U;-cljxoP-=-{{>Iu6tsy)JE9!t7JU99-}Jup;}XLe}DuZ^c|VkF^~qK zMFUb<<%$!*T&WF767U(RNFj%QrCx@GLC#6Nh1 z9f27_WyrC%|Ak#*p%!L1hp6+pJNv)gP+Q238Tu5Z^G(`exbr2+``(r6$gtdM0p9mM z^cOUD7qIv>v{-6CuC&Q^QB+@XO6TY=cLSsy^&aUJ`Fh z5?5_#t3&_jkKE{xXQ617=n4YEhvN$^@r42L-scV)fjxP$^4DLOSNXE)=|127u5YHg z%V=?Zk!8{LY*2m>2R#xE?(S%|!vNmX9xI6Ia4iZ+7&yi6AvN=RZ*i_jiHACd7eT4rv_Eo0~$G)JAS|R zO+=)jkH23A9&b-3pHtO!fn)t8OKmUZ^h{R01SGM$myPyHCA<`{a=y;2+|dq4*S}-OO{F5wn%v1PJIig?{-vh6jLcR_9Qsjg}NN!q9lrP1#j#V(MjR_AS z1+5oYN8Qr3GrF$yElIRj(B!T7cc))(uL!Pd0q+(Vs3l<_CE!vJ4Mr>~2R8~BKLB1A zZWXf}qQ;jVpSnoNpBKC&MYz@MsJhLxK;D@#&gaY&HsL0@UQh}_$VfDVG(8XMA08Sp z0ggNuO5AcI%T@66>=I=>*n7Oev-{=GQ}WMEaf<)?^;WKO2_>bCltGIi2 zY)=}D{>s0U={)F-G0i)rz}a`i<1L6vZas}UC~-CDD4>T~nc`t21T##TAyi;U>_CX| zKYkx+8R=(`>LPB>?numi7(fZRKf1H^v1K8_#zC>VSKB?0s|{P zZDHOR%rdxq-?t2@TzIjTAyIWpbv08-(a~evbDJ(Qm3xGwiXvD6+Wil-RCR=f6$=BD zRAZ;RTk4-Ah+i70F@F%Hh5K#kcf0)huIL!57;0*d07nzlZCV436wm-J^$k3&Ztq40 zAde95*#tetC`m>4e7#tazO`4C+cWEWcP*5Wz)x4<_LFa>QZngqMg<5Iq9HV36v6`y zsWC`brOC;}i8hz#x*=NjuTswSPKLZPOD;dZc)2F)uGOk6xT-HhK-fsc0L7K)afZCyP;DD0HM5lJ94Y(UiettA39036E%J|yW^^Xo zioHUvJ;4V_87&^<%m!pg1<`@pjon)Z83Z$_e6gmjRBMj+I*FYydD|qq=L$#S?Nmcm zT@#SF>Kl^ag4Wl6QlWHYWrEx%l-z;Z5;AXD&MQaV4*v@if&4FH6MIC~Y?t-**Jel! zH4oJ_e<;O=vqAhNpTO$<@75>r#{gD86*4ej`|(nYw5EVQab;$7&I~G{Iw!VrNaJQR zXJ@)sNODEl^y3@P7iZ7tk~2#7B86^BpA!0$^Y)i$fk$R`^__i`R>hZAumu~xb-R^~ zLWlsErfIAR`12zofr&lElzdLhg#3B2E?WS>qFe?D2!J8K6t z1(3$4hOoDBV!)DmbD%~{5T!$7bJ)<6QACwuI2KZb|g7K>gjc=*lbtO;Nbav46B&)=TTjI#$&1$Esb%AuH2qd0$FnTP5u=gM8< z6O{W@mC9RlhW9i(aSr%aA5hH1155KK)M|Vi{{EB@b7$7{JSM*OGji2#bP*klRfIA%B_NrzSUb; z#(r8$d_LOJWkm(iyY8zS`M62I54c$I=6tyW#nazXJ9*Vc|IfBW@delm`0o>Wgl%QO7iW?BMYo){I&_~aVSf)fHWNDof&CMtqumU_&JLH# z28PcIM8A&&iwYc}LvWwK8GMltNg`L9p;EEQWYh8N#=h`Hcfl3nY6k` z8pU8oq$O}KfjLFtTknzGfN@&y107kkz_>Y$8t<@4I1nv-M^9L%ZTy*lzt_P&lJ@P&kg zH8`8WD=9iWy$4NKmbS9sO+Rj#}b zb1(l(vm6T($rrnGa-NDi>lG zUq~~F)y=plan|aq9g_@73vjg|tQsq-ojwQLhz#!_-~oxNXEUuU8nPA_%D&OvyhKXu z9XwW6vy?7Z%N5OuplcCK`(8+0JrOMUddQzioR*5h?K5YEn?qGwUm8CDLU_ZAI_y(+ zvnK6`v53XXqhB&l5hq%l*Jy%20d=bvu$|btU}=@!!D;n_IBG4hn9x#ub3OgeFzTjF z{r!K=xUP+oxZ%zBw(1jy9R9)%z@hykT*;I<{3){e2MGai}I-GbMa-hg}iKU za+S=FC_m3Y@+XcNM>Z(97x622I(1x+MruQk{_0_m^73@_k)m|L7o%RJ<;l7y4kFOZ ztw`@v%kP~{A4(`pDm;?72g1v;!$dk_8qJjlPM#9b+?5*Oy>`3Fth0GfzuW#_VaW?^ z-|bK92a~F6veYW`_iOjNl3(H4@vZwn8;v+d;BO~R7xRTe5{>aOAVj20)z{>B@w#G_@ixz@fP9cogH`tEeZlJo=>dVD@s9zlGSjN_PK0YM#X)jK$k8oE z`#4uW16rR(;G<%UOU&>Wvr3NGxX#R*2PDpEH`#H;vqM}Hd@CND^i`m&(0aXQDYo`9 zSG67jQSJ86e|`wmfx3J(j>;<-c2(g&^qO76-vZf_0mtF7nmq52CiW1n*HceCr&t#;U@BG2dX2Ga$i}15hKJ5WW>4 z)M$09jSgmvTDP2zMv|LBVY=TH)R@ODOvQ?F% z6}G`HXU?|T6IWsiK~;tz*Qi!J3Kh<1EL7>{&?e8D?s76Nxo&zSx$cuq@6Ga0E%)|# z3k?Rs5L8PE*3yC|Cjj>an1gAI&79-CLfwvVxf6D%0NIe}c`P$Ka2j^2x*=YC)>8f~ z(4T$u5)!i3_+$@{FlQX!p?7lr2)YeLuX&AHx-ReC7MK< zvGr~GobxqNXk^nJ!o zY<$#FRvPo1(z{T4&DFk^rLTJ-i?MjC9)HHqDftiQ8FV`;Qv>)Z3<`{tQ{8oDm>pEa0 z#i!yoEj91`nVs~wAp7(4!GW5z^W!E}_PsuP%C`RrN*-$w^7amS0be|nu=1{1jjKVR z+i^7vFk+s|kRC^M9zE{&CDkQhHt~zzK{taP2M>@#^v6Hrr4jqUc(NKWVpvBq zWk|4ud%31ER8q}mG4w^+&x1bA-nNpp10N6g7WJP?jnX?CmjL5W0lHHCJOiYe zc5Dks^DX}?p&Avh21=rE(H3R$p!&V5g2zm*UfD8b76eM8HeWi9MxEOuzyLj=RF(!G zuEFXCDBU*>{4PS3bN)1SyQkKpK*O`_8~&>MpBao(96n`?SwE8T)TH*40l6B)Sbif5 zyDC*E4z~O$U?R z$hZEg@8SCedhhUqfUqu=8K~TbAFpDk43J%yC}#Ab?lz2^mw%4vLANHo`iRN(t)ENt z&begt-&Uzry0Jfa%|b;&bZrG(8&+U}xx15?OI?6KFv2|wz#2d4#bmlRe}LE0ks;G; z)D5yQY$~)izk;tig&2b`7<1hJHHo{={~QKqT;rf*zcUNtbC(T{7qzmzl13-Yy$=0o z*F}YjyXKx%nLqj$hQvd<;~TvQW_}BGGa%BSn1#N7_Wn@|Te&EI%+j$ZW@|~f;qZzl z`_Nz5)$a@46eFLy_g{p5Yk)a>02Wv>%8dXi2_?=zvXU{uES2d8wZ*~eF@+P|o<4=0 zhqr2woFtyKEf-VwneRxsyrWg(QMu4>V^h4~I_R{41Q}}(j* zQtBN&gdrmwnt8o043jO=#L(Q4wUi$dR}V5jb*^j`>OpuFoz0* zK*7l|s*C=~`nusW(+=;C!k0$7PEC^mM_@as{sH9221BZs3|MfLo|A8QB0EZ$9yV2Gk!o2>v!#@XhB~c&eF6(nR~>YR8#%W#9|bs@80~$*T_cx!o9PwwL<>( zjg_jz2UZ#u7zqoe-l_gs=q1YrmK^%R{(7Q;kLImYEMDzG_u&*XTU&eA6f?@baRHka zL|=^rSJVT+3|;CXZkeaxUV(~#&%jT4sljKV!MgPYOsm;l&-)@?f4=$b{`*UP(Z5tU z%DlLZn0q4@JHV9bW+(76bpDKJi)4w`bol04PR2|htNZtddKBZSL?>1x0gSixm#CnA&8MBt@`a3 zinMZo@u4DO5AX(91^FKv3*rM3zn6rN=8HHwd1SwiyO$xq|EtEwnlI0Tem33xx!kik z+UP=yIj2duprmM36K)U&HcW` zQ8+ttom<9brN0dHM;eLpO7Z{J$roHXw?c&2;!uw+h&+XtIs*I0&soUAjlPQBWLt<} zbZ^dA9XA;--#H}BCibBdr*z%{ku3svvt6Y$$Kn9dgR3^@t~0L5^h-WDqVU4NuM?jZ z%-w_Mix~QI?g5|-9}c$RnO;yC7dB^hp?2m$zCKnAn>&cnu+sJTaX%n?rqnA&iwZ35y1Et<2`bSHTG7An7AIVD&GlL z(P!)zq8sWrT*>fh1XuESD<~@2u>@OXf};p6B#KXg=Fe4|>km7B`=3zzev~mj3)7rO ze|5t@b?1t~fhQIdYz7`ji+hvMEsq#Pdd*nLcsb|;N#nwIByqJ;y-nAY>)iZy+__~c z>-u;sMdH|A;=)c4(+g#BU7+xp2sQj2@>tMnJ5Fj?+TjSJMw;H_=$*XWE;sqiZ&E@i z2EGl6ZHq)T9aZT>xg#jzbSnm$snSNR4sPL@+?m?LiX}+q7hY$KcAd~a`Xkg_!NtY| znO}UlpZp9C9aZMa0UtbE2YT;Y@@>RedfRbOMvi%Xy(zXt?s;F;Kmh0cxT|Am&yFR( zyAQ-z9XXe{uPy0MPodttQGtSZ&%v3p>LTsfJ+!60K+9X|!EdS@1h=Uj*0#J!QRxwxKVs_@o)h{+ z`=05+-K9wd68mWhu%CzX$ZFtQiqT9v)EJPr(*w>42>w^8O6vUj$B*?wu>p*-qF-sI z$(^UyZpIq+e78I#;UBYO_EIDcB-SmK1g?BI=j@&0V3xQE?@9JjtFBuTNps##Rl!t$ zZE@t{vA6oYi;xZv)KprP0O*}unaY)*w#q!GwhlHhN0J)#A2-*~Bd+D^-8DM|%N0su zysM=yheoQXxSTxnZ9;$pH^m!Kfo5)_OmHU|QYFXZX^(y~n=OQ^h5sBz(T|*Ld91|$ zQVpauJK3fx69Rg1W zjP!dhpWbuV*rm2;%I8!1=7%PX6<6eL&}8t>Bd1}YL(1HJJU#B76(Fir!0(>g0-Xnb z+lICSm+Rwfn}>`BinK-@t3T9lqub|ekhYkl`7Cs0G4uI5CwQ5Ek`-p`;+s(+TbEjf z$=iJoln)%J9TLCN#FG2Ha@{0j!v<>f%qU6gxOP%})(4s|j88gDoK5Eog9}LL5Xi@v zmoZ7`J=LiXeKl%iu`{UGqk8{dTNjAH-`?6QnLsCwNl{~IC zT8^g@slbR1Iy?(dc@69n&Ac)qa{rXR1u&YmY@!=f@m?Wgcwa&ppeM&)mW<%QwY!^_u10AXJw2IW&^e56{%b4-7ttLzwbX6zhYVu~0l`7FD`E)^-6X{TDUdx~=15n%hw?oJ*H z9)EOxGpOdwkIZCtX>8e0ao>VP z4ipXOkE?<@Q-%A3MmrGn|4F60jU=Rlo4Q@i68`YuAiqa4i&sv{4*m=bUa(Z<|mT7y53!qxFQ)G zbo6mv)RzT`(~8?+?DHznC7}2%f0|`H9h_Vvr@}tK%hXBT`@_L&t6nh4-#i4MD#9Ec zMXvQz^=DR>;j?HwP=LeJegP4E_A0bii2LvtCeIZKWX5&ZPcYmHvWTYqoY1gBkvnc4 zWwTM!#!pl9;!BQ=HJ$*7(;q7AL0&mu6xTwnGJA;ff>SnGyl+&dV=5!6M^CeJlBOm) zJ;lx~*L|wmkBk&Dd{g+}42q#C5G#P>9@I6)q2L(=ch@{$!mTeQfH_EIX5&5iw3`;9 zXoM?AnPX(e#r|0%j}gHNkM<$c8iVoMk)ljvzVLAnDFiK9r`n7VwdpgPhZ^5qxTF)07)fgn z^x`#P+1@rvXU{H2#lapo6v}z6Um$)bu^C{;0kjh@n@j`Et!)4~gV~GS+|PBL#_#K; z|5Jv<`1xVvhBGLg97>yO^1`df&7XR{za5WX)0g#2{|Ua@4i9N)fHj3fSK31*H2F(A(-kXO*`M&+*Q?g`fitI5(NJv>rGE_(sEfQiX$rf4&VMg{{ zQ4}$D%9cIrWUmz2cQZyM4DOM}&CJ~WE}#FN@ALfeJD%@x{Qmm=qvQQP-0u6n?(;gY z^L(A>`FgbkpCs(G?x|05Gk+*{pyGofiZ^U0vFGgWw>QLhf)?)g5=yYdFqZPid|b43 zUUiW+}xBHu*qJ#jezkk8r`3Y7iEPxaIe1mk4 zAscm<>E6&zbBng9Lx1eTg>~xR)iK` z4ZyHlyOcmTXBbz(IE7&HOnTFgrFVMoHMjXOOn+8<47D-%xjaQ$Y1Z){St56mBIlt; zl#v+xktQM|7`8Gb4GRQ39UOKKl~Yp8kiMEb?H`5_i4OSPzwwtBdMuqqk4 zf{i7=cZxzV^Uad|h*(Rhx!b$(X;a;G*>giT2VR&+%Dh{#u1zcmKFXyNs07~b0(i#m zc>-jO3CACY!wOiZQf3x?m9Eqbq>h_ia1Ey1Xuq>-pRMh^&VH?>Q*|`#t1?&%2zMT4 zmtbj_S#!2YaUlnr51fk=j8yQqAQy5aIF08%f`aD>yy)kmZ?h(A*SVv@-|x5eTng65 zIxuBdR?7!^?WTv=JHBr-Bw%rswOfN2@CNX3!9W~E&TghW2*fjB2P_~!D#Ld9m`S8OPs#Aw}NlruB*@`+_CBS0- z@hH3EcW&Onmlt7bytr6C-#z}YAZ+%y5_}{EK})l9n#EmHA6GI>@t;S=xx+P0^uy=9 zC5uj6IO}vYt!U3)8uxxK{Xk=$K?+S#tk7aktTdgSQ+lkntVW5v~1Wtn%l2dx-;AP2@Qe2i&=Z-0iLBBSz> z?4XxOh=0kr@gb(h*LxxcnpK(Mt3Ttf+lzj5Ex)fO8hy--^fSI(kViQ7|JPKzSpQ$vRPjSw zhYL1}SXsalsRWccf|VwqsQT79L;b_|n<@6m%UA1zTz`Zs36{3~oQ=(J+W;FiB;Y?M z85q+HT_;xDz;S!9o@AV$S3OB!v`f^EULII_KW@Eo7gK)2bL_{)>p!9=%b&&)OF-*8 zMxt2?>A^r=AW%_FAc~ajN}oyKHl8_!yYk_xi^NG=v(*zibGT)Wj@>GlFd=l8flvomE zXFdjIH_$Wbk1jy@5#AI}41D>}vbU$e^bM79cJkdrDI&FppZn3dT0g(aaUKn_;xGP) z`LJg7%agTt1>q%ab*dafcD2#9LGzm&@#^r7zTeVP1A``o3j-fTeWNal9M73ghG+U2FcY0qCwJw;O3#2+4vu@Qi~vVNC+CBt%L zmaqfkCh#?70eqeS7)Kq+tr%SEvJLcaOQD5a>MxZ%c{VyXB3gfE_??Y?+mCyD;RX() z{@+ftZ~flC{{KD-sl<9iphRFndwCcUx{QUD$#J?u^yROvcuq)Td?o6yyXNaYO%8#D z3EK&EBg^2|1=joSc*M(_Q_KFwm;C|dx)4K9nfy(v7NXp7S>KOwC?fnV)mx@jo8UI8ZPD9ClTw#@vzM)D4-{=pP5pi}M&l}T75Bxa z?e+UQ_eKS@>q8H?0mqR!5G&ZJKsTOAX;Z>1sS5pu#(k46R9{MYy%yE)mlL}!Np`&W zvqVDUPpc-v31IK{=DJ9*EV}a_YV^;At%8JG+4g`VRd5q!qG(VwL$$A}w`y@%)Jot1 z|7RZ`^h#b#kJc}#U;59LwpBsfARy_x1z043uV)N&9tlpi;(EJ$u^shJo#y=06U_;w zt~O>b-mv5Q;^X&1?>nRTIFO9Uq1J($&K;wWw?P@QBHA5H=U_xM##*?y` zzhjMZCH309ez~wg{&j~(Bk9Si2Nu4*f!eK>)>$G%Mjf~xfMZ*ZAp*99*4MB&gr#CS z+-+9HiW{{uF#S%p{`!wjeIt)$s_=43%J5%pct6ftG&|!7t3=SqK-3jh}oBq0l6##@O4<=Wg}U3Kuu}oBdKBz__ab1#B+q}jzXYq$P=FqUNaSI67+pw@g3hf&-+hH>RCtZb*La&J?`*V#Eq>KTx za~U%^lT7}vd0&>@>pIN{w@B|*S+5FOaLnB-U}5>N(gx_a2&vPPW_fLWw@dh*3=Meb z=um)V=Hj($8A^?{Pj81V)Ye~l{bT3uo%`H%kL*RiaMWMIeJ@}MrXjpryoI18JlHKj zf!>o&mu#zl2>Z9{=)EkpB`;iKRLrMr2IgAaICHh}j>C0y|cx7fCvUizGmHK%Aqig#;gpV2MP`)D$sw7EDWE zr5W0|X!w%m^+$}r)4yH0sK0#G_Pz=^|Gvlp{nPXEr57Vhb1V$#P?Fz(gpf`tr++Au ztW5MUm{w>5s0mXQ|B3KkyDcHsh7{`6MHAsE|A9hM=LfR8o}gc}BM!k{9TB&#^A8_nG)*wNKMm&!0AG+%`!31H!=IBVdzV zB!Osbtc)=ZM==TDzb)1h_5|p|QN8nyG4H8Iot>!LF2S99XiPn6%lB#6&BYtLbhf5O zt({U-eA-3M!dNg{dI;GX9K(e+Z`)G#+OY=rL37lLX-2Na6;WgYI>j^_Y^Ro(~{+_z*RaF^k=c}XS8`FK@ zs-cVBl}Ed`X>3DcUs!De1e~N+a_qE(v<8f&+9`EBIeG4T%#y^aiIuE zed;mvS=*vE;vxL>6|hBGC2EmuHLGRNx!i^9jADx7N9>g#%+e*Mf52m0m>v)8?J<1G zW!==HW~5h_>M|r@Y%O8kuaVFlB@Vqtjm#vpVm9nKK;is-cnM%wi+%a0i?wf&I)RQM zLiPz^#LD$0hteruf6DYr_n8>KUw^I}xaBUS_zkW+H#&VnGRJRwQVr+AUS1Ruj+X(8 zHwtJT&Bx~2j`=E3nuZ&wi&7#}_h`zxDw&$;xexRkWTRDUCHJ0tfy!~E9NI7tEBoby z2(-dunE{W0i-=_z(~D+$o??R6Fj8k`1ul{es%1Y{RyE2~PkKH(>>f*-v9u7DwA4ot zTrvQo7i+g9vU3(7hSbMT9ifC)X~}_m!&LJ1O9j(pyR(_@&db<9KR?+AwQlE@pWGRt zKL;jrg*}2@V2m8lK=^a7;0Xu;aVp3V5ZBv55;UrQQ?Irz_t+t`NG};VC;=R%Lkw z*(JjUWr!KD^^^6m(A}p_43aQ>_`ob0&1u&!`AtqLFN*u}z<3n|Y&meGpVVvL#%O^1 zz`b19Z?FzTSY|y!ufIMbjAi^NA7_7*ysu{UOS#LRS!TUQ#`@t&LJ=rWH4!MoxG};7 zb{K*3++GPV5bht_&>^+oX`)}~=p3=*J{@gYd4@LOp>SnN9(Nuw1AaJ$1YH4yFAO(! zO|uLhm_Ky&lx#b0x1Nlf5FZ(B=du}Cnwh*AN2@Rm^j&=L02S}uZgS5;DspvOEgx>7 zGBc(VRT8zh>}2x}){ybD!c*wR7!2 z;Tw=?Ec6J?+C75sZeyy#1q>&63?v5&S~fple0F%G6;)6*?v1qWZ>W7#D75c=-43e6Pxkm%3of z_u&LEH3)`ux%6cc#s+5Fg46v-)qr7KqxxbMi63gX9t{z?sd+dqYH%96gVgy zh;6E#bW^@y8RYoo=<)q~`s)|K_@xFw69-I5BU`m|o+>l>OP=0-&_C40U~!Yff+m`Q z29_0#v4T2QV;K6b2-ZHZv5Ygxu#GSAA1cd=u<~XpKI|wPERs>S$d(7h&wIe` zvj*Ntltih*R#+{4rjW-u7>(=#991+(WA9*i&dfg)Y~nq2&R%mUmI2-68%^ZeV|Li| z4<_aA)jPbb7T}z~dGq4~{u_~j9s%5Z*`1{C6@WtlL4zkqoMCDaMzJ`+G8ZF(+8ja9 zK7h&c(iY*PgCppse?GDTWL6s($6!OI7@;18!B`GtjC6Jb`e)g)MKl8-nMr$Lfc@RZL}1s* zoG1bx4)Vf24UmI;5WYa>@c<9l4~X-YEtWg$glFmU$ZBh+dX!!k|1lBQ&(msTTRJ-U zSeShptcf3%@&>bwM9Csnc16gt3-#0pt!a>I+tlg6yQ|2pdo_u@%BOi^D{g$2I1qOs zI%r>c5=S)w_&(@yEh`U<7J#!OFagOBFae&ij&%u6)sqY0fn_y^z7e115Bxplm=o9g z{83?)Lr`TW&o0JyAO*b~HOlXyv2Al?1_K}egYbZz0D79xfK@w%q8e3=6`7aj8c(3#eLU-Vfz zAx>lEa7(mGTUez?<+tMz>$h!7$_{3!sm&?R9U)IR(Gr+I;jM{OVMXiC47dni!nD3_ z)Z|JR{zQ+jJCg1f=(R^%qhe|*`Q;r2xhziInvK&(v=U~m+HtjIv=ycup=A)ei{RoJ zTARG-*=CCtan_MfDV!{<2)1?C*=FBdJWC{`@#Hh;&rEC=@MY899#v#!y5X<+p^rQk*I4dUTh)v=Y*{QBfCT2!rJ-4n8g*u;}DMMmmeza45X zA20hohX??~_Jw!=?>|(j4lr$^CNJUx^cqzoG7f-U<_JdZ5l%vjL$k2;hXL=2Mf0Vf zwPJrf*KR(WJlhpS^5`Eu%}qFbMjvd&dE!3bZpJ#eDi!G^Lg!RNeB=sDmX^J-s=v;8 z3*Zgoq#f5P%OfX+#!n>7u%}saFpkn23{;9WA*Y$4!gAmiLf0D1FlM!w)i3>>a^Z4I zeY<)8lQc~Ba)AUBx=eYQMamvDfY8tk^<`(W94Se$@k z^a&}(i-dBnu}T##ed~UtfU}e6w9jX!+`G?nic!zPzwmF%c*#7YkNITIlD1}=!nHFP zmQYSBVLMB*@N+?J`^w(Dn!@sZlIcnp!sZeMpM+_1jm7JPjr$(7`UN_J1Tn}i5pY8l z5CUOu6L?tLzq5p9BU$#0qjtV+mX&?Q;?*jyhEpfS<2H4lwbS}+moL=_<6i6=S;VlU z)|s;K03!lif9E>O9`<)&EcS-t_s7@`?%}#)C*gairrrzn=x0NV665;wQTF(%CwyQl zV>;lol*QcR%h!V4gY07+7|=W7d&ao(JIgSCaZ}{ENS6O2N3kHB(fUy(JHEVFGuf<% zi6^XDI`y%y(eUvz2rt-EJ3fKCCNEC&vNK5zKsAEcN)+*OtK_PD>nh=0`X)6zL&!_e zM6Pi?MfFH zMqJMOKR!1f@3R^OhGs*6fslhB;7D95^R=RoX^)sXK>e3Q7rj>Rx#X^=y33{Z+NOKW zRJqZT6E`c+Ojh98^5h;9N693;W599VceWvh6DY4S70nT)u)}%iT8?J`=kn96)|$<` zuf4q5)osQ*GdaV>`D9SsUx2?i!$GH4M!RN2X}7>@eud3v0Ed;$sp^b!YA^r1y)0d7 z(}?Io8vJ|GyY5-{p>4U>FXSIGU_L+m6!%$_C55Kob_Re$K9~$qshQ=_|@_1II_dB71&X3#(r26yulg}pHpNW3lJSp zDK4opJ!F*Qx8s)F;9pghYrhzehJL;t(IviooIN9am4E^3?axgk%}%h-d$mQSYh z^dX%99F*vbXKYHEep9!I*JPf5b=Uh5D&SQ@ZToZ zi8MAx8KD(Hgfp-J#NEco5QoYlN`|L0jz@MyV=C{7N+HJ}LOsL=*8LLS7@Z zwoDMqfVIgcbCW<6G`c#i%IP|-o;Yjm?DadD&$${*jGB9_JS@-T)$30==J&bQt)gMy ziYxL9>j+dg=4hNudlv6|mBU-o z$*=g;I%G?KjGp5j!9spcOV&Vnq#x^q4$|Xn@`hHTH9oW4H51>cwAbD~q%1{ROw{~n zPj1v3MaBp>Jx9|hE5Oe&u0cdv3*=x{Q&IK3ILq$z0#B>S+XZ`@n6$CpYso&6QCRQ$ zF`<^1SWgHozX;_%QB|Z6(8Wx01$=%Y#t4W$BcilJHML{dj=5Jh%nMF@q#o@v756~- z#1eLw&C4P?0q($=4MtHYnOKluM&J+CCTO71aF2?f9NjpgTJrb3(kk}pLnBLaw_gus zjMmsFrX0Ewe1^*sh1d}&YpSENGonw6^`y7bvRzea?vgmVe7S0Pz(qzmPiLOGZFNUp zd&7PHPsx9bdJLTGf{4$oev_8iTPFcy3r#e^OSoasf5kP@Jv+$!0UR@KZm%wmiKtxc z{BHLyql4D(^{K^fOl-f^Ea+St0uOr&OsKk<*?uIIo738;N~&Tm_??3aBLQcM<||4% zo(|e$WX7kbz2r|^xMXtd^|Oj_Z+#!7r_3g%4gM1XR22x6K}-aZzjqLEizI5$QobmL-qUj(kia>cvfIz4TAJk?E`ZFwbRNH&GeVf;3 z(NP|y`E>cZ{CO=aUibDqcNLYn#8pLn&$HZzYWFV9sb}1;?gsl1C}UY;;e$R~4Vm8M zNtC$|mTi8(9q8Lq!|%;sdAVaYxi$O~9_~k+qE}rcREiy=9&3$Vb#Qj-1*3xhH^l_{ zI%w(m-y-`Ij@3m*1Z>B=O=!76q70LRq>sU83hdyncJfZ|x0%CUG5flG{0p4j{PNp$ z9|UeJAE;e5G3(cSyDKrCelTyActUYlgDRg!pmp=P?R+LW`=c-@v9rE zwGxJ>#=bCC4Ue5rSJTKhQJXNh%{{NW)gjLr$j&2CQUE6%dTfGG4f`=q!I;)$qELRF zJFZQ_va#O&b*@%snO}j}?I8J)x|(Z=?z4JM!kg|8sfz#^mcc4$+Dazt5lqYu(58py z6uCSzc{kx&9w3Z_h7Vy_!rN8h z!{#(Odcx%@xWM7DW)X>NG%Zj@n~B9~$yc`X^)ap%)^1i-zZ9!RJ^EhfKh0bnwsd+% zwTc?06#he9Pem!Q{t_s`q}8gGCAWVlI|PJu3#RV4-s4^NG$8@cMAYbA5{gvQp|l^4>K#(=;lW*pa%jD>Q+VBaaBOUV;jVx<#? z@4-QLsgfA&3z4M$_f0!*J}K-%gV@H6@Z~ztlwop%Kxv*b@*NZhkbBK9)GsG3h>wLS`5}Ds-4KWJ?WXKi5x~P6CBFm6YB^Mj>(VoAQJ3U zJPH<~awTQLkNXet8D`CYR{7bQU0iF$Umy8L;dS`NzE$)0D1rvitsugR2{bG{1mTr~ zK+zW*SUVaZq?S@9;#_OtitLZL`o4CcWVza1XGXrZ{`yj>JpVxHkNTePGY0#U9}?=& zutHfopj-!F(oQD!H&#Sf80w;wKBw}<97?l=rWjANCvfvVMPlmheP-KFw#Pk7diUzb zrDso`ZUAfH1T~^9p>yqK5oAh_gAEP_N-C~`{INGq`T$*cs5#~BG;!B;b4Sm&=yS%uU z`DqTj(r*bGaMmk7=nxD&dsdA1-Eq{mAa0apJ-|~=nZuge$Q5;_B|sPNFc;`%_3G6}4`y;X`{(?TE!f_VBRyGw4&rjh$cK z6m6WmE_@I(N`%$85wnKbT6S!`Ig;-ZpK(!H@v!5ZlS`d)t~$+k%ajyl(kj zV)*oRDV-H_o#y1YL?07A9$CRaDxVj>A;AZ)2UhVVp#_VV2$1>!GpS{<&AV_rCni2W ztR3F$DEZP;UP?`#=a2vM#h7pJ`JOJ+k^4&UIdGBU4Y(WO?E+g($ro6uNzS~vCr~i3 zTWCJ;vQ$*sthyt&{$c{htn3oksfuU2un!LGLmlGS`e;{J2cZ;}i0qayLM0G^CC00Y z_!3&JY}TW*2Fkvhx;@&J?zbx~cf4L&Ldo}MSnD&(#-LK3+vv^nFaW7Xf|?J*qE}!C zx{^`lNaM*8o2HjYS|D$)+XK)2<9(kcw-@I=?jWQVfZFuCE<|7x_{=0dxp{I`yGmZb zNqR|7nZuE+eV&I6GU4E#ngTk1_i)+WPCS%NyeWrzbx8RGmfYK(0J!Q{LW?uTEky|l z$Wiz+ajCf%@gj#_rpKdz%+Q~8@NQnnXZw91$)K#9 zl4V#!)&!a=zZ)W_bNQT!?;3Mu7tY?M^Q(!nL$KHBt`+y*CrA#kvSuuF!F;rVQBTJO z{X@AXOiavHuoNNVwxY)sG3NEQ-ZjmBb3zvj`*l9`_dL(V-#FJB`iYB_M1tj1mmZR9 zBUn-)GoWroT0*GSrtaLk<=tG7$rX`fZ?R(8;=@*W1( z&G{P!V_NuOou!e8W`X%@(_*z5o|>T|^%vjzq+i_Fdq?(pz4kN3lQ-?Nz!39qC#ot- zDhfEVtL%&bbugRFH|5rRq`c34xTm$Pb)3U`+;}fv#qn(BgYvUc`n+1D+Y?^wiAL@M z5kA$pRPHV??c3W%Y|X~@{L!8sVasios;`0> z$=4vYF?lquHpFE^^ufBZOO+~@pJiR zU}_OE#7+p!crF1i$gKkxF^UqY7=GBNYeZV5zCYzk;>p|lKSX`2IB|DW&iRF-f$U@b zZ>Yd6EB*=y`2a~-+dtF|mLqHJ2{I4VE!gTa#vi4OsmT@UPBUfb?sq*p_=TR)FXZeI z2qbmfUv16jkgry^&@V(gO&a!r2aYcojN~WIz^3^){WBU@)JbSp%+yJ>|wUPcWOCEigrCIP`N5Rqw5Ga`#5ZOl) z?h@iMxadfZW2yCpWNmflRDZJq98HF=!AIxmv2MO+XH+k}-2e74K?X|+dy+x}DBTV^ zFy;`}E$|bhge|uM1Ss>R%PT5tO($>A!(X};TDy#Aq#S&==Wat-yU2K`dU4SSMe#jd zAA&w6&`cRE&0y07VjVokx~$pR0Y1xP}{`T_GKPnUfbmFN18 z^1bA=aC{YZS)Y3qoE2OwLd)4^5KI%Gb7d25EfOO6!HaeWvI&%1tTyjB4$B~Sl z*A&jwlh(Vv&>v=hZRs7VEi$BG%R<%x7#t~_y!8Au&v)=AI2y zw>K4*oBA5APnCZ-XluYrlbj$>UOh=J0GDKfceTO~KJj@ zs`^K+4IQXSr0^A7wfjEN!)UmEN73bE$rhNw^%|_tp zm{yaU{y^=Sbk#d=gS&piRPp-K4_rliANOG?R!0vz`Xgoxgq!9J9@r4ve=j?lH0KR9lDv=%=1M^-r^1ox?cB?dAK46zCjC1&s; zPdW(31jwovRt7d4!30l}adbq(T-y1`diyCSle%)KIV6vK#@|Qt@1*j3^Mm&rG#>W} z9!xYI&$Dt(r-64rPJ|p(S$O=wKh(0X>c-;UZAeh{20#=3M;XjsKva9GYMlU?s*ax@ zAL38Mf(8dsPD^nGM9%6IfQocUkRvxM$DfL0zeppoHRp#mLV#TsJNq;%umR%fv?Ad0 zfoYUA!<=xgy|3Nttbgbj+b=36zr0lT!^l)yCKZ+HRCam#9RFucnsGvw)@*_UA zojDb%qoKDSNRBZJRh}8|`7pHBCOi>#19f+D87Zatk8CQBY|JCmq}4j7Rw9$f_X-d( zE9974flr=cyn;sBF+d5a4Lw#g`#9$grQb)QB2C}MyImJc`>*J3t zP9~as*4)Sfm)Nj`2eBZ$fl?Yz zeYA!O=RS^*4+r1eQrZNwU7)lAYx^+X!X{4iA__Se;kI`k(Q5s5r|x^zNz!jqVotg zf7EL^6jR24rl;uVTQk=SVrSYV-n4l11?qMirs~E&t27_-YI zO=^h1^^16Tix&(XjFc>LKm28hMyZVJor+2wojiKO(}jyhGb4F{kMfCJQ5HvVBLGOC zy_YLMod%Hmn5T2ra-FUqY@F23u1- z5C8EWN`hsV2cPK#NR_H;ib;f3ovFr|x`1b&hNg!+(u@ZuHeJ#^LZmEk{tpQ9<--fp7ZzAD*O{{{l4wslQBAfHYjd+-{Gbs=u4y=?D(PaI5iq zJ`#D2hbzf0I$>WQ8%@sy@h|SJ>^Nvx`E#a~a?+}q&{7X38L)ufFo_+E#{kBHJ-QCL zuq|k+Q4twvvr088dYqhSZ{t#^vUH>*^>+4sNX9Q~r}K{;M|mx3=Wr~kB=Fu(*{{7< z#twrn0w>x!HC?+iLy?Zpad<@4?mr^3JaJ)QZT{NA@gJ!&x3vW)jYVYmt@m92a3fzI z)WU#otV#hTpk7!#r2|obpBC!9ErlPoY}ST*lscYxoM6(l>V8H#w}kjCFG1PJT)wUh zR$JMs?H1s>@mlP<>M7~*^)8*-s8Zb%`Q%;+6S?r$4(=QuYHw?w9q7JKbI ze=(gY<2%bx0Jl7&i-SC=FB2G^(WL=GZ{6yv--`Ll53L9cJ~+oC=@>mljW(qHHg*Mh z60DwtToUpImE~X6|4`Y=zqbPUkZwIem>bORLKjYPQ+rDCKO!v8s&y#Mk^E>dfFGD4K*!5?lx( z0NiU$Vn16mGqQCT**s<3q4VucfJ*!Gs>e0TQ))8*Q0H6M7t`Dmswf;M8mYBZT$ z3B!UY(-RjCohkD5Z$o=$yNTX+&(I9HdbGE|tkbe&tpAS6x{s+CqJvr^j4mR(0>SHKh6EECsh@_z0di{LsP%|g+(euZAY?oKmWSbY^<9^Lt(zDvV_5? zy8{!=x(Tk13IN6u3>LrT2n!4PDmzkD1|>+52ZoeY))xIsY%hK=DOpZF=@u2Du~>Jc zvg)|SIk`8)WiS`ENEbo`z^4oj#`kWbI}$hpj=@T~zQNQl^203cz)a_wkxjdWPJX(^ z75#X+?45%P`6u??k

    %DUcVXj3Pw^;_00`X5Z);HjVPwp_U77lYY^M1a;-{HG*JuAg9lqFg)O_dL!3f zIt`>;lHTInBRn8J@U=^#dOL}Ft#@FOmGon-C#-8%p_H@sZ>_t2%XMZJ=pCb;6H@6c zof@aJ-p}>in4jmNhjyc1sXO_4xqpjst9_rS``i--{_-s);Y(WlTgKp+IIN0LZ;MqvIQn`wRZmATgSKG$Ce-(j2 z8T4!wiHX7z%Q4tWf{rL{I4f;?XWafJVLt1adLzbyheeXAabN|+b;xXbDOH7(#W zt@4bkyZs41X!d$S*+)aXqobOBts#qfft7vSL5pjrGr_QZ`qkjM%NO%dt!U8OE!$#^ z2s(idG3tz`yrOvg5yQQ0&`peZ3Co8j2y#cB+=Z$4&+hQG1Qz)rGkfbYbympdd0L{1Xm{RKcCNwixW?KS5_o!rPJp{s6^SG0B*5u>ht4E_AoSo>?Ik2{h% z6YqFV$kIL$tQoHwW`a*=rHmF&gl|)~M!O<12tOYdCO%+Pt$S+<|HA7UrjmM4cih3- z;li6BQM7+BalHsB>NSZ|Y8~4yR0WVYO@a{cmOQF4uiCziu7d8xZOwP?Hd>6D0}P zrZ=>;@1r_owRN45F;g53XCfbfCHogE;3;_OEd_OSE{0=e*^Z*AZoD`Our=YOmP(i5 zP_VClHsuO8D9RmwqR*`!-}qeW^>FrU;g@vJNh^EdZ?Q@*a3{o#@~1|o)@_!%jFx>6 z=G76;x0LNqQfDzHzrh=;C`Cjn$3@c7(BS|ZFH5uKqC2BW5jwMOb+!49Z){p#Iz|la z^bJ*gEv@P?2jHG^>_=Ksb(knxRF(^i`Dvg>6{gurqrHWFA<)}7&Y_NuU^kg27AvdH z4A_tq$E0GoW))|VDneFxqgEuKB%BjBml@;KLQ#0SdIiJ6`X#$Z0^*ntCea5K5 zbEwCB1WueEWqZGrG}ZI{ILt;|PIu2#;B=)vTJX`?`LB#&>8pq1ICsI%)^Q{!#o?Fd}Y96M%!LPy2F zy#12ypY)QL>;aJ}uD`#su;UUnJxB=M-(OOwyT7u4u@_$5)qJ#Yz*UTKhJr>G{6-e? z$U$%X@{O)1Q9H;o&Ho`qYRmqwLW-y=u<>uAi1#oZoIedjo2dol{h}?nwO;$UaV}`LjsF%U4{M!8oW5o(} zhWSa@gzO)+oJDTJc5pbUB(XIO@Aj?;2(E#j_$I~}Qj?4LR!8?fc>&nHH@%DkmI>D@ zFp-kE;4xJ}NoZcTc};*pARISNGrWf+N2ti8ch>H9AtJRs0UZINtHQA`zc8}H9))ZF zVnpp+JaNa4)S1N***6dfydr+(IP*D_)HkG64I{ha%Xqc#c#Z>6oQr^_523n%219ua zeKO8nUiS;;_a@YD4gz}W48D6=-4eb~ExsprCL2WLKCSZ+Q0DEev0y)*SeC~TqwO6B z=az1I<<|}0|9#W|C(Y?w_axx+X~wlP)b^^55Xy<|sNgC^pVDSNOqM%u_j9~VnckY5 zGIRVn==r4)lfnzQHepA2xe2&62tHug(trs#_P#JWf+1YH!cKrGpF z$9+H?D_A#pI>Zd*5e3-#glv@@v@Ur&$g9iTZ&ThXVKspgYXrZZ6?W4kOZt?Y55bFs zH?9Y492^VG9A*ZF$i%MHn7sWNm@8O>DJiwR!-5=wt)qj*Nbx}^1Knsc;`rcZ;?!RH zbXbmN*u#WCJdeu7F-SjpyR;*#mFvBHSC zT)m8Ppun;O@?YoG9^6{@*K)@RCZdNBr=1jBzd8V@Nl<(f3vmlJ?~`dIg4{s^j9Aph zo2NL@U%_!hSHgP5L@r;`T+uXo;~Q}WR^jU6wZHUo2h)LoTW8b^T)vKS3!+_zR78?} zRtfgw8Uwh{33wd9IYTT|8PJUD6?Ksn*Y8?cdXZTjC&~Rq{OVDE#bCIBlwaqkuAA7 z932eG=mgY!WswWU3|0+R4G!nE4{OdF0%;EJ1V#i7CJ&Q`lo_Q0rVTlRa@|N+1ojLJ z-T5P;_PE5ky+7p@B?l3cGa9@1T19L^^)<_hk8+#%eeNWK;J)uQ=7~VUa3lq;E`A3$ zU;;4F!#EVn>#+6pwkTM+3f5^WunId9SC^w5!fOx=Vpi~^G5*rxD9U(*Mz`%Y7tuX` zvW*`_OtfXDB7o8tjJgyqk{6GWs`k~_PN8R3EqKYLa7zxbx{vSGXpG8!6|GP23@!MN z?+Kbn*fgErUx0lIIXa#7;vbbb@Z@M~*WtNCFO{s0qVbYY0ZS+cqu26g>vLTZSh6*@ zY&)Q)g^F0n5fQ<owcTXTw>7VEE=EwnQSs z9fIk;u&SA*Fsfi9C`XU>&Ik65EAi5(teQ{lhpjBbcrk!OS#an;pjHEbJq6Xln8p;` zX*G|x`rzvi24C1vr4rG%H8QAee!Q~N8W$Ny8VDIyGf1uG`oVO%F8{EVg?K@Tpds8` zq@fZ`18}xkerS8wa#>z}1iTNPy;SMz^`2OvLRdqcz%0~y8!4#Li}q=lM+hn9<=2cQ)L_-Ww<&I5Q}LYLNk8{aU-V;FF?I&0IMr{N#|xT1}u zl#v~MaISpm*qj}`2nTs#cU56LN&jHK`08Gwd5-U`%W|iKNfFk=mm{|a_M>LpLz1>R z$beTuV-XNd!7% zw$2w!?l?~`Wp@A~E+;#!A}_{vm^qMvjYH?R zbIC)uZsDJbG9AM67m8A)zl+0}2oQ&VLVI0d!d$_&8b-myPkQX)-Muv_T8Zb(g3jRZ zmdsWr`nj>FB>_c)k}F0}fl0F|?)VBZ?zmoN@a=@oZmuYlqY8-5(x~4+qOuks&1IYyISlHl&r%Ek!LN#j!-i|%Yt-xx%odN_D42a%% zUif}c7rlQwH%+9NL6xeMQ5Zm*rSLqf&Y$iTlSkpOZlQlF`GPZ+U4(I395O~#EA>>t zxFMWp^TS(U&H1$DyAw%PWp}4%W>(qoI(Oy}`MM4iEW5s{Aq|1&6so$M5zFUU#s>M^ zr)}CKL1oTxQ>BBtINGKC6FZzM@#nt^o1z#TCIPjH9ugOYfbw@tgF|>H@Q~J?DA8KGs_u7B$t>j&`{SIfC{6aG({TY4hG`5Q)zucBHWKB&t zpYeU!d(1#2WzP%#iQ+Jm^j`i6d2)ilZNgdJ7@bO;sQ6NHCP*OUPKdp5n2|{AFuT%c zQ&qgN)Yoz^o!{IGCF3MvFRF~ThTsT?aa8i4Mt&i^irnLk8Lkm4mI5$fSgC1@X?~Ps z`R;7U!LSt_x7BC=vS=topQd%@>$5S;agF*5X&u2J3kej8vrrBej^?&kysjbQOAOb| zB@h3iy(fkIrad4t;CJ2qGYj%xafKfw!jtGHFY=mSK|)T2hTq0i&-`B~kstE^z=nW8 z%#UmcfcbYegylc6A?%DSKf3WJHsmK+@fSAaXPN&N8}c)g^dG69|7^hjLpJ2!2IRjW zLVf})e;r~XrY9x*%p3g+0Qp}hLO`G=cYdNio_?P`J+UFIj}*t#=btpklaz%8M8pOJ z{w(=}@%UBd_d37TXMZF@e#m~@`R5&eQ6YbDBLDW+QyahJPc{C@kNiyP{fEK;PsRTh zasdBzlIuT*9H1xf7aM3Ba4^S8^mqUCwI4nC&DU6eN3$M>>7RZ6 z?=x_~5gvY$?6YGh>il!5#)m$(Ka z5yRt)N$ZKN`IXH54^#)?>FST6e>7GGCZZot^k=LG@N0^AqR)Pt2>?HJ_k{2KXUtE^ zvYQCW|01<>VCM}~KZihZehTe%PChqS2ZeAlm`xu^9RqQW;x`C`?~BIVv{rW#gl;fb z9izx`9amnG5}LE}4>vc*`kD+c zI^KZx13X5s^s8rSw(7F&p6afsIUjigWC{eezU5Dd*A&~J^z!IEe8U+m|3a>`@H+e> z?6LRI`@BSl;&h!Buy^17MPZHz)du+C2ps zDLp$I)2FQUPrmU8;{AC4dHI(S5dAQapV8+DW0SS9(tpe<|1(7X)Y>n=?I&>e2jBLi z^*>W;^Z=HhH~&37{bP3hCtG^F&!2g)A9nH=!}&Ek{n>E-h>HKoaQ@=Yk6sWML~I*l z+mOoDaLVnDDIOxO&~Vy?8CHMF&F81!_0fkop*2*XI?IQjHlZLC0unNzgi?j*1(>TE zy63;>=+_ka_zbM9zf=VLmk0#>A4H%>Z)UAy1j+yA(e(C^yZ>zZ#|?iAB!J)NY~Y^* z$^U~#KSuLk5#gst|F%|p^ynW?^=HKZ{~SGj^Jw6oZR5W_n^`OB|2L2RF*v`g?01j; zvwi=sd-M+j`MXE|(E3lI$nOi3|At4iva$gI|1GcnVJm+&o+o$sHA(+`=%*7lY=49e z2~&e7wDpIN|F-@Uw6cFXbmLER5Ae@{Q1J1*??-@{h)Q42)W(X4iJpm>{t+ytGO@F> z=3sa{MC4IwY@=swVybUTZ)Icr>gPK2F`gON>pvbK{y(Vq$Lt6E6`=p%sRi{O8D=Zv zA5Zb)d47b|KR>sVjiC_&BN31d;gNp+?S}}&!t!VvMnpelKql75<3oO2BeMKW#t341 z!gHT~{*W;;GCjK6Uu7Uhw#RP$MaBeTW`7*ZzsP`$%g;9Q?n@nAsmy`WG1!%cFJtQ}(o=`)fH66Z>N*`I`*D^!L6nf!NspUKhZ|@-H&b z(<QtrI8iU d4=?}WKel#yHg-R}2gt+@0wIu-3(JTg{2#+Vc*g($ literal 0 HcmV?d00001 diff --git a/Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/EGO-Model using EMComposition.pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/Revaluation/Figures/EGO-Model using EMComposition.pdf similarity index 100% rename from Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/EGO-Model using EMComposition.pdf rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Revaluation/Figures/EGO-Model using EMComposition.pdf diff --git a/Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/EGO-Model with EMComposition and Learning.pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/Revaluation/Figures/EGO-Model with EMComposition and Learning.pdf similarity index 100% rename from Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/EGO-Model with EMComposition and Learning.pdf rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Revaluation/Figures/EGO-Model with EMComposition and Learning.pdf diff --git a/Scripts/Models (Under Development)/EGO/FIGURES.pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/Revaluation/Figures/FIGURES.pdf similarity index 100% rename from Scripts/Models (Under Development)/EGO/FIGURES.pdf rename to Scripts/Models (Under Development)/EGO/Using EMComposition/Revaluation/Figures/FIGURES.pdf diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/Revaluation/__init__.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/Revaluation/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/EGO Model - MDP (detailed).pdf b/Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/EGO Model - MDP (detailed).pdf deleted file mode 100644 index 8c79dc7b758c517e61d2a8ba28fa9761435c93ef..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 39994 zcmb@tW0WAxvNqbbZQHhObK16T+uhT)ZF}0b-92sFnA@}8v%h`L`p&(-Zq-^7!LdO9c{@<3d37?rr>jJ-hFB zsfW*p@_ZiZsdn4R$)EREu=q<)Mmny^Uw@7^?)FO?uKL4tG*=cLR^^pBdE^ckS`X!O za?T3qa=)~{YP{Va1vPou4hLrVDEOQ3dnEXO<`(Dnyzl4!R^9Pd`0DX~^LRar$R7-_ zr_TAq;pV~%=QHMi0iWA-Wb{73qq4u4T+2`CcgGz1mFlKNYUa9!ggEG0tc0Np_9JXw7D;srk=oNiwg0F4+4{o8N*a zJMMcTGN)cwos)>5mUuIsKJ_!hZ;@c3VH9|WIL>3gC5sPMK+qWG&)nCk1H6Ne7t z!CV`tH}X9%;)j7Oyb}nMv6KvuJ>53KD^3v^sWwcT5Kp}s3W#Xt&BA>-FzmiEeBY=( zZ(1J)R!+m*c68laT%WC3!HmoK{F zS{JIxM6XEMP)ckWu{$7OKY1UiWXr!x@07Ia`6vYQek{8&N7VY=u#L4qDsf;^3rd~E zhP3NgG#QmL>nxePGZ-Msrach~tf4o*tz(O| zQL4@8s!L#)3ym(Ek(F)%!jk3AJ9}s8EiPLI)Z2pZGLAn2+zcnW>eI2&DUFF-x3x=h zbx?0B{y7(}bJAx`_g%J$I; zW`Va-m~N7*XkIp--Z>TLWSY78(c>8jzRds15}2mm>+!fy)bkbZ>ppd;kq8HE%MY-* z0IRD5i08sO0W!3EN*G9;)~`Ot>V!;|gb9lOwrm_oxRo`joMpjfn5&Z!_*4ov z${`0x-I3Oh+V)G`i)P4OpC2gnL9%$xMs6>ui4juTFNY6eLy4#BP2RV|{Nt=e&swHa z@g+H{zoruj}vNh@`p(xa}d@ z>r&Y@Tgp~gPPae4Ob55+@1+}|D1IfF!Q_cCgwWQiJ=2Y<2pjS&!v@a}mg_<@NxzRc zbIFYyTq{m)k~;m_JD~j_%zot`iTXUhQsBsW7H>28F!MC;YIDD-{X_aWhWo;jmF$;l z1&4{F#nNQ`qbYcRtwuNZvBeUPyR1B5A;YSNBj9%*YL*unpJeVYY`qp1dp*Qlzu$b` zv38#sZeMF|7Nj4T(MBsAl9{Eb{%jJ|@q?ybpcNw({V`si1L{G*ap|_5Y$uyF+^MNy zz21+k{dMv#F9>x6X2KLLVRfGNsCb)A*qOURFFHuPdUtMB+gJUA%$1}GP(4N}IQaU{ z{^17&fLe%;it7u7MvSorJ}on96zsFKNPJ@{GLOp|+Tn=@-7(TCJTJLlE8rJ~hlZz0 zpUWoKy>wLyn1W2dZ!n&OLge`8c%gm?S}IuV0^#)>_zVk=DBWoIe4J_b!`Q3$Vmoh$>TJ+FW`d{SFbHqw^Jo!}vissT zAR@F+Dmr|sJcp@y5)E3oBS7(#MGOgJ@7N6U!PF5hHZT{JH;3O7D>r&P zvewyPNbgk&2UK{x5l0?8m`E++!*D-2xSSr~<7%=6`Df8vyw*+%2H-a8F!wZ9DOP~Q z1gU;CND881SLJDkFmn)64#=EhG%YCHN%IChK$zcR^TWptaG>CZc@Tk2lKaDu)RBql zuqDUFt@ic!;gETd5C_U>G+VWgsPy&0R{OiHz-q&L2!CW?*p7%j8U>{;J~u?SkPz9Q zmT7R5z&AnX6y9pTZ6v=A{n^NxHkCZul#f$`^s@w#RIXM&Z( z*F|Q~wmrh7He%}zj3;?>mj$-~5~KS5m4KU+8D-0Y=K7a;mKSUY6S(XFg9uu`6UDTT zVMBpiynj6KaG`_k?(3yPFC2KTQ5Tex5nE!4yX|7_xeRnMgNMz&*9aXm z!7DbAc4n|8A7b#dSE?L$YVcfblGOaVgEH7|9C1I2(QbWt5QYH*)p%}U+m*o0oSJ6J z`^ZyI-6f*TGdS=(^U14VjMj_k#BScix}%$3TnVi+!9=wEY16FOrt8@2UoJ0tJXIY0 zsaYiFiEl;``Z6nJ_|OhJ*fA_gZG}RCk}!x@S~7+;LZM=SwkgbFZAPIBfOcmx5!UX? zt>SG{n?Z-cUeVFE4j_^^(bi2-rzXKaTt!mehJY|=i>a-}OKUdd`iTrwS+HRzIGPzK&U_CbI37i4b2quh<>8;+&@Koo zXsfF3SfB<5Jq9A!&<8106L*8onE2I2C_2?<)vHD<#AEzZZ|eBnF4v?#NsvUx_b5;i}4y+G$-xcSvP!UnWMQ1RojQ z7a}2c7TJTU38goNs_c*uy$PBS(Y#8@i%Bc(VF{Zx6L_d|G)Oz`?z+w{Wm-fpZWM0z zi^ViLdD9-&i6w$)pFxDEzA_VLXvHE^r ztX=C{TWn?gMrtsAXDTCEJk)F^pQt#O_;k^1qOCvP{S#S!I;pCVf7p@i(?feFY_p8C zgl2R_gX|_TN}NJuiSxL`?HOFe6mgVR6oi>F z;WghePdo(#3eZDF61$WpUc%ZoP|>1;VO261Cc4kCZUruTm~Dm@9B&{g?sALr)+RmN zQRA0`1RBl=?oZPuZDcV%&6=jU6&eAs95QFDNdBaLY7s+9jPIfrN!qY}n0A_FF#9B_*pB>y{RLD5O_|V3=xa2k22jN$8w)(7=cN{J zMtfc`pIj^7lM- z?l(>QZOF?jI^xB!uv|v-GXvVdMTH_-F+=Ry0kPQnM$O2uEv8X1sBTVvipg?cB91Jv zB1J~b*$%{^E!-_!iL9?m1@U@hW&E0+pqgk2@v{3edmRHP6S3TVC6<$sJUsMe@bdV9 zZ>;A0ZanmP#~AQiqBCyj4KoU%Xp6O|(vyp?%%RS9gzv$@7@lMR^+nZw(Lzs>7PCS# zW-~pkW&DW5`G_DjXsGcXq}*6^YQ5cG&&541xXs4;ttVxmvqN*7!@s2JCn?ZgUyMO@ zYJRNnzJ?)pSP$(bOtmvXp*_;`f{agPWj#j(WW(+#4Z2ro>Ve$DTZLu699g^9i!oPO z_TYBoxKtye8tE?`8&f{d#eH&N`oN?A;q_F`5%hrv#1?}WPXx28S_s|KyRSvfh!%!| ztz%q78K=j?N&XXcqJD~$us^qItZBjRl@zd@eg+hE5Ctqfee*ut1I8R4hSHzNAB~Cv z`l(HfaiY#K0*jrNS1#QyJzaBo-~Y$!e6e&mT%EH|7UH+nrP>8G6zAF6J76*jLqwV< z)Zy1--ncamI9{_9T`!_aNN2_bRG};<92B>_X8T-7Lnak}vH-1A4iLU8Z_I?T{)h=HX*kd{Ru-WynH;Jbjs+pX~c5U~3gDHT7()j`ELPsp1aqqonRQIx`T zcKy~~0@x*bu~CADb!q?Wj<{32l);wGqi(>Sn;t;U9o-5Mn z3d!7_9ul@ZcV=cXGga!S?NS#%_9@;;3@%`bmgjV=?rI4IX3zHVhA}igR4<&hvWG*a zbNJah)6f7U$=j3eGh4+->>%_n7k+HrmgLa;tMxBBsKt4=EAG(sor%lvywm8_ zLmW1cr`n5aB1X{bJf&GX@qFE#o!@=U@=D^m>3Vcj_X}Tdwp{u$&4xokwawS**YQR{ z3s6eQm~s+5@D9kIh;$5Ym%_KZiEgsE%Z0g(o~re}uG35d!{`nHuj4HS_pEZd9mqks zx)hmrWxUuKwuk&1fpjl@ri;RY) zCerI>a9N2OCP|N2Qy28DVR9=Fzr;vJ9EnL|>-LjpPunwk>Z>KW1Kf?!2bvYNMC9Cv z>Y{uB@r0|jfUcWdp^I_N+&&JXW^#QWhv?;KfstN~D38>${5zqv-s9T@5=Z8x2}-UATp@ji@f>I_@hcp)%S$p>oI**1MT~fd=&L|Q zm2%;JA4>|74h4I{_f0#8RC-L-d>H~}%hudZKAFdz+8F=}k+CGKbu2OfRXk?IqJT@F2v7TJSTG9%eVCJj8)!6!KqJbu`Qsw?|t@K9Z@bgSKqhS zLv|m^%LXy|qUcp5Jm25@{j{(35=E=4kSg@N29`nTI?s|{<_so4w&a`oKx=p?pdxfj z6%P)sMycdne4;@&M}pGOQhFZ+nSKn%M}GE9)Ox8BK(;bQW~kXBizf& zuQN;gv0Ul)s_l!Goj6tb(*DkVC?PS3A$QQ$VAhNCrIe_P;N`P zu|z|ek*)v$-Kvvt9z76fHCU}`W6?`q15Vd!8CPAy?Q zsn!conM(>0YFf@ds0Q*W#~dmVLsD4^(ayd?xP>AjS~+isdcrH9)CNcXaKu%Eha&!s z0QV3S`9I@v$(>z|Rrusqe)-8rEe~kf^~ja(ZLOTf@l}6-LE^kO@6aKhE^9tRQd=Ai zLl#CK$SqEpTGqke%lY+N`>w-LOlygoor!ru`{dY(LoKJkTTG=;H={D%CMzNRtX_1m zL)ezjA=nN{2HaI>PyB++c1{qBsX!%+H$O(gZvwH*qd)InNj^+6up7|HrrP zy6d5!V_};9iLZ3%dib>LHQXB|5%m>R~(501|7wOuLmBq*;e3YJ zW3q^I5Rk?*hP`IzZuPpsx+4kuSq4l`%wUmm^OTc1auULR!w%y`;B;4Q2m+dElp{GG zs$AUKvqtQ#kWX6llx<*pQ47fuek||XzO@?XkEFd@D z7#8M?_fRX^`kS>+`9j;E;s~s13+9mAbzW$=BQVd;He*y5KAZTe=YK4wd+_sWp?Zz1 zKQT&qfs*nXNGuSWu*K=}wMvu9*D-GeqKSkrKQW<4{P=OduL~yoQ}{y3jtj+GBQp&Y z`}W*8EN+-?#vQ&}i<$w>S#L669DOeBk`azyfE!|2Q9P>ATGuaB^vVne?ja}-oZu~V zz#-jX_0BZ7R4zIwxQ%hOdyIoaJjJt(ll1AQ62gidf>WhSOh6kp;he=r6{mO&p($aq zhi?YF(~JTFH5k#x;B)ylEd3N*&6}vfaaz06Fg;qVv8Zfm!oESh?#x722{w_+kAsQq zRYm}0T$;IGi_*EdTVb_dw{AG$+2|K($#O=C{Oxc`)v_HjYwMc3Aqm#>%4=kTn?Vlp z1Od!{1TaU*U;Io#Qwo_Soc2%3a9ZbVk_fK`I6@NQ=neBoJt~F}F*Yc^>bFp&Fw1=HVfVgi z&2T(RzI^aZaN0<886#FcS#4d)1Q@m|_>L`G>HhxJcC^}X>a6pl#hnNJItOT^(Yir5 z(>scOFu`OZm7+QtpG5wdpF9~Y>gj2EC=%~$Cw4q-_M>>{cbah`d_M8DY33|vu5x*+ z{7}pW+Yv-1<&Q$^nrKpGOA18*k34IpD3@mHsdOZ*Gmw2FYnCtz?iEO}QRC8U`-x%c z!bCrhjWY9nuyhEH+I(fl?3*hsd1-r2d8BhOLUha2J!Vdw#poB!r}rKgNsX4z_r6(0 zXQ>;a0%b>jj(KhiU_TUYwyM6*(rZXR6xLzwaMTA`^+%jRg&V^!`BuFHk{hATd`&n9YUg`&=s%tKJkAx&O;;x79lsF~yack}B zC$i8s3BE|r>#3LSK)28qF3IYi&M25vP7*9H#Zfthr+g$MBm!odSM3RSH_6sf4IKc# z0@s6i@zk!uP_SCt3jtvLjs2qE%!if9aaEEu%EsiaPM|-YWl!ln9Ph8_+_P%XH=8<_ zK~!e&&f`Pc*Kh}WA)1B0;u%53{RPfdpalz!;hZflnv2dv49~U|Z;jaER_!5oXU%n^ zfySe0b~t(Z1L*ux)gQJO$d2MaoPSW;U#Fw^_Gb6F{O*v`3NIO5IrEWz2FGqlFRIr%eDgprxwC zGH%`#>`U-tRCyis7!=+YG$Bd`#|aP&KCh4;Ors2a#eJ3RKUTo=rW}aV+c3g~-JEU$ zSOBt*1?>PTRglM(m+BC%rE-^8mqKqF0h=nXe-oBx{al~J?U7LJ@9%K6o!#CHRt`yAxq~afrmAF~??6a%iBhyT z_(aBEo#;;?y4~+yuIbX3ET?Y38)rH%F;|2A5Z+Es-C2TB`7@(GXU%BZebB~3TghzM z0kRw!k8-2Z+d@|o>aB8RQ5Y`dcvj~bu5X;=(I9OkOMzCha6r{$uhc4Ip$hv)A;8wa zI4-htLt+dPBJ&Vubv0p+&BrEAaUQ9N-e}j&y0?=BrFFVVqi_;t-_?gEyi2a<+57nR zJCWk2UZq`aWKIn`y*{df7V6}Jan?Uyg?%948vvB>_fq8?7?xnE?+Xfo>bhZKyGz~U z)=qqE3Z%(&IkXiybQ608p+KNnsjY$!q63-xv_d5}Gn+dtL#h+}U6Y#BFJ?aKN+^we5D$FLlK+W1v<7I!JGH&d)O&ySm7`c>2$fob|%35Y%gC4Pq^zV)Eyg~a;##W z@}j(p7H)BPZj&o;{H*{t;z^DqWJ6KpN8|l5*vMhyN@bui$9ILR3+-C~!ULlSZf@VK z8sZ_`r}ivG9NU5gVZW34N^`H7Pt#B-pCn2ia@08E&@~`U5mlNS#=iVQ*ipL7{VQBR z{7)W30eQhl0Ux&5?u?+>gCjSSf$wxdP9r9rwR(BD(_&g|(B%ZHtv=Lc)MRC0u-LMA z?P}#g$7u81L<>|lt0luO7#Y`zToFEXU4N$#$lc8TXlKor2u#xCQ^E02AxB%vL-oue zL}Y4&|GuPSikhMjH#bWJNk82l(5c~BZ^J;8fflkf+jB}je%3Q=wZ z;jxv<|aQh z7JCbWbhkkQyc2oy2exavA;~LoGLaB0Dr$&dzYY?dQqVmsP@RfOtg4ZWllkov1a`$d z4Q5apfT`HVo-nRtU?Hf?g8)}D2MU8hy2EE}%X5yn8ZmwYbYpuUEti0C4xMogW33{8@Bo5JgQxp#E8EPT~svcx(C!odp9f z9`k|81)eJz4dV3zuPSd`A#c>ZV;Wm|gP+%bk_8YK@utrh9Q4Zc7NY9^Bj{rfhy;wQ z{7K#W>+TO=4T(nzl!>kJe-%=FKm9GgV*GDqSSlX&-*s0C24?@dI-1xz6EJ#(fH~(GmrQm31q-^3$p!MxeM2vu5#l+p2K!<={*v{I{QQ6+W$b{f;U6`;FBLVBb z9PsfG(2Ke|iz_>SSBU*fC+@@u#qp1GaVI7M&i`X3_8r>4EL2n&3E2LzP*GtbVE?zp ze?t2E{=Y-|XYl{=K`$)$t^JSfUkCqRwv7MHmho>A{%tJxog;d|zXfi#e`BW?F>$pp zGEov2`adOZOl*uy|9cVKf3JJXPH|IFUPB9;>1OYqMIs^~eZcaBB$(%xKm}2Zkn~po z3JoT66#*4PL8MeOR`~@e9!6v&iUJ)JXt)A-hoCUrrzj$NxD{;$d0owVzn$?pqPeqn z+`6)6cI~s$0if^^2-5GY0qD7Mor&t8;UTQ8M znZNd~?cn(;fAWrczN_AnUCiVNM(1emSYK4pccTG3k^IQSZR|-_1!%GX6FA(flD{SZ zxB~@f+RnPi3$@lw9Q+MK4jpxIVeJH%Yeh$W!v3BcXgA&)%K4l=arLgoU4qUZKrD&M zY$AGCQd%M{GC2tYmpWto`Hb*q{}-pl)Yt$`R_-tK+mI7g#gm_q!4c-2Av^&Z@pQ&e z!G247ei&@z;F!MIRw(8!dH_BloCf<($kPcU)Dj-&-Nn1wsR)_p)o@!-yRP%Hs-3_vvcusHtA_Q2Z& z0Kxte2*4@8bP_-o0w4{7e5(LZf{d$xu0fXe0E+>hvmkPTZ1!NHg_aSpe?WK$3o3A+01pXA#3LjMyv~x9VNeBCL5sKF!0YoIUN+2o%Di_U&hbzKvkC8}lH~RUkZJ3IT~Vi3$Q1#4*HYFk(N8VnCU^Gf^ml$Dp<$ zWqs_bV3#BgnOqWzWGD%`5(#B`Q-F%3C6NvpFIhKX(ioSqxFd$9fUbNENiKmNDSRTo z!cPh^6nQBeQK2*tFq~Clp)h$(zE|*-7&`;{CAQQZ%C=*)ZHN!!W`y z=r`+-B57A*Kw>MBe7V4B-8^YAA(qfWvB!K{lWY^$y2&~Nt~h(q`$E#`&;$NS_XDH} zsfpGh#Uaii)?wqg5?DWj{tL4bW^r^K*@qExh$of<15;#J{(R@*Dp(OR%mVC79V)mLHklYmj45xMxIMkx)5DRrsTjdzizh#Ub*RZ(ob7E+Shp zQ8H#sR(W-CTSdQPrfCMo>d2gorINLCq@Zq7Q?p8JIgfLIXVkmUBlDFSnm0-#DjRJJ zO^@c1Mw#}U249m>6I8>d>8$Q;V0|#dO2^1&B-C=>sByYv>!`W9oOZ{x_tJ+uL#J*F zty8XZ<_YeJ=$-7H5u7*7J**q`KhB>!8ix)++DXXWKh9 zyfl1*c+|+x(xhXo8ED<`)LQif_1f0NXTxjD>ry^0z6ib;KI9(Po=PA37lTirPuI7t z`<0vHx0QE!5Dt(k$P=g{5HS!na6V`zNF-1fkbyqHzRo~=`guJP?L{;op$}o#5FSWY z7#-9%Ck#3omMXFfk2Y0zUw1bUT0>D0;lWOcxY7Ah@u(OyX*4&nG4UbMC2?6%FVR#{ z=~N9G&Bg<<=pWGpCA?_(*r;A4u8!r;^IOtk`xn2V_OIxewNn}|)*YJ<%O_@P;`RD8 z9jNfI@o{$${zM@6mmkX1Jm|H%9fGVRb%wns9uHF=$K2|?n0zFFqy{$x&xxcC>O>ex z)<~jC?gPmSHt)JL=-mYzl>dzXSxCuTK36_v5#03Bv>-%k5Lrj9!^}(GO+Ua=$ROM? zZn1Qwid-Ntmmovoq-olj7W*ahC=pGPNgAXH-DugcZ6gHnB!iT%*0wF6XAx*Rl85a~@|yU?Z`1Ga)t+V}iY78@aux zn5S6Rqi^1LeKai8E~L2EO*^dxXFX(%YJIt7+E{J5kxHaH_h>lrJMy1N{jj;4?L z>(=B`@MU-LQ}Mgot}BGw=GJM)y+*pBrtaEo&8|-M;pAPxU2CWMMUR?+mV-XLMVxV5 z6H{43w^XgA_)_Uv(OCdiT*guNnOEy&_WH=iO7D)1-vVF^@DMm39tuvE-{`Y~Rkfha zkiq6&)xgYo!C6PInV+7Y$fd+>!i;!`c=1SooXZmT;tG2i`%kw1n8e}weTNad&)r91 zFXYNo1?Ix`7Z46Y&fDK3FZkCvFg$A>kvP^o?UU`(eHr>0vy9i7lcIOwYjN6qZPR^f-}srbUme>6 z>T~?2^G?70)Zud&`zIS4I123Timu1&kM1V#sn7Q39C$grF8`z_$EW$H(ZzlNIg~tA zP6dDU=hDlHX?gd_kVbbhJrvL4R2X4H|~}c78Wva zGBGCj7g$vy(D^q`{|ov5ja~o42%P@kG5%lJ?(&Vf842j6EsUMMLH%D+@!jKpN5ua( z&k#0nHn6rc`-gBi{hMj1nK(LG*x3@$Gt&KQ+sA*Ahrc-fe+Kn0m`|_lV(9!Y)}rF* zV)9S4e@FF?Qt4StqlLY*og>sY@ctKn5w-o^bYfxq z9gVDk(Z5XpBN4N3baECpH*h3i{)Xu?2LIJyWc(hmx`nZ``QITku`)pYMg9MOoNSDY z-+i$&{f+J4CkMx00`fm)|JMEQcUA@l0u~mQ{}B}v`(Mh#K=4=hKc?sZp7j5jsDBbC zXJGS9VE)&%S{s;slfl0<<~zYc-%Joni?KSr%*>sk*x0`NRdzP9QTtx3-;Wyq z$O%|jS^kkr5HNFmcjapKujwFQ<@}x*Sp)ZfT8!*W-_5^aiCI{iFhMc?Ls|dB?EWSA z_u~3z*AXKDJ2S_3=l`__iItJ#@5ug_T}uC*pml3PI;*H=d|hi<%2tzLe2@XpvPi6W-Kh*ext+Y7oJ@C9e(wF_c>i2sSLi(^hMqxtN_V%=<~;8CUshFK(3E_8RX*c4I><;fP+R!SE2J z7t7*s{;*s5sR&o#$@GVts9arDASMUfZN5O91(^Rw6s4A8+)M%HbO`$?5qh{F;q8pt zOzKG(KDbzt4l=wIp8Bg|UNEIs!Z<+$(Nu!5dV}=+i%EE%y=t7cy>bZ z0h|MoS|<{^Xl+_Yg9zySRs=`Fh<%D72t^S+(%gi#BA2*W?k1UuOY$jWHX4vvl$cHS zFm8byaB%ZcVWl3v3Kw%eXLC7j7I;n;H*Oa2Q8IJEM;j%%f_1d1ciN0lG+CP{Q_rX) zRwiJcvXa7dys2|M+5SS0zCz8*IT%kCK=@_6P|Y%5!Erwvm&ti=PU-t)nyD7e4A@2% zx9Ry!9_jt8VomdfgAEq7Oftdct0kKyo4Ej_s6s6kt=7DY{;L86^;81C4dwpVk3;Q1 z9)|fNamQ5fHK1!qdqHWN72B1r(@!+XC@x$^?@}hhCdzFYO@#_#Z zKbvQY;N2G3(?0{SH4>!}ti;$)NQ=N*Ua46-)c2;J3*PQ4;-WvO+usY-IyebWv~ya> z%QBV^rRS>`8+_7Mls7uL5OS;QI3;Ex4kSd|s~SEc*05Z5aZte4mK|A6hYc7#IfmGX zYU6c~%mKJv(%xkrtc+71nB3KE(z=Z~9lCmy^q0+2H$}pPNaZVxqC^Ajt0kmM_~1MO z(iO|3>yj{uO=6Z;jVuY;!$xo_hzJ^(lS)_y39y;LFK1ZAu!dF)ueurQB-T+Mz1NJ; z@vQUI>bQ?>6QV?g{Om5&q{uvOtM|cw&GJTzyCgmqnGfhn^2W1EKA6@J4{O(K>nP?G zoE~~dE<3qKD=4_bfeJQf$6|qAk-YRr7p?c1B}1|fqXMs8g$uQAH+~XWrwOjk)0818 zSkr`%G$lN%GImvB!QMWJ842x9%ZJ1cj!7I7W249**gee&tGBe~2*&(T4uX{OhN}jt zBT+V$%wKIQTd76*_))vm*YtMU^k$*rhd%fRS<~+G zxw6^o3;EeP?)mPPj&D1vm(E~8KUY(ub#@P<=NM9P55e4Rr9X!+!SxwR{-&F850CE% zBZq$JJ;k%LEN5mU)WEB&+Q;3iiJgs!pNoHbKlrN0?!MuL-J92bZKYvqXV0o}b(Lvu z#Yd^z?lm&%>|I39VNPB9!A0+ZZ!DGL&HYc;8|uz&)khyZT9rmUz-qEU{gnPlZn`f_ z31AW5&<@7eS60;sHo#e8ZK<#L@$U>o`4dGdO>98+Ref0NfqP?EZwM3pexDCzrJxUr zE@=F|C3|3;;pRuCcBs%@tg=8rCD_*kb_F;GcPKdh7}Yuu8|*#&zKCXBSzIAa&Mp|p zw(#RR+=_f$Ee{0WL@pTl0d#x9X68pTiU3%12dYPMh(4}GC4E5jmUfJkM1W={7yiY( zUYi>#Z;1wF)_BljgaC|9ARh*R)`2*MXRfTk9c1iSG5~{z1&EaaGItC-V8B?U*iF$K zh`=5}IMQoMh2ao_;tHL-w#cPL+;-i5yzctP+C#3iz2)^%MN zeGd9W{1o>gYI5L$$QAS5$W`MB{dL$_-ciw!p`ELpe<{n8=J9?}@FE7;i+K7}#=L)S zI)p8&FLO^0@|=({rH;ST0KTMqwQ@zDKkHYI570h_6LNTvQh#5xM6e`kL_F4o8E`}b zS=ZAfp$!NBN|NimELsw)G3_hjTI{K01M&!Cqy@f53=a8q;*JrQ^t91iq9G-xNs0To ztZ2EZ%!<)jiDwyKMJ`qzn~`VEx@e3NB-RO5N5a)G$t?J(^27CM+{qtfnn|f6x}(%d z$Rie~lvlSSqDr%Yi{=X#rp78MHVQAf7A@b(57bu+48iC>kR#5A3&53S;^n;qKhCJM z&FuElpV?TE%?x4JWOIaze4#cRe187c`G~_vSET4K>Tkkhq!|R618SwtopIu7)9#$I z9AnIEXk;qIg!Rl;OUhTHM0LcRn01%E8+je33){Uy!eHljG8H&@wMB8(BRoOEp|JcY ziP=RscPTu>PEl;?deusqYchFgAf4kTLV$e1b=W4%ioT5nX*aOtPGKK;feT|GOE!}} zq*c-kGATu-=JrQP&9V=b%y6sM0C{n-g>NQLu}wRmk$|Q^g{9HuV+5u)Krw*Pi?#op8>kmN<<9 zv{MkoykZ{`@(f}|z(0KV>zHyNCVF{v3gq*ca`)>jpC8*zAMD_wmbzXqMnzNAsSbn% zc0G`tlpLNUMGkQ4z$5WmRgue2HyRJzIOn1UcVSE+>1%P-AIUYvp z*qC(rp`fxgs2;en>Q`cZcX5W|2+Kx=;>5)AW!VptDylrdxdhvD+^@eg+F7|l&B2`? z8rq4EI9rd9w*;xqdY1ufY49uoDM_Mah!FSoYVxdWtsl2QpBt(XVx0>}Fbu-AG(m$i zc5rdcLXB`NDHmD2O)2}p8Ri0ql))Ek^>LqYSr-_YOISPUa$lps!NF>V4YDR2A%pbL zhRZ=B5oCntClZ|hsG~#Lwm=2#N^&!7ir#x%dib4r;}>Utd+2L4g>+$4A8dd6 z2eIE|2Ym1DI>dhHFvk~oPwK)cB;0=K4}Skbtw0(tm>kqI*^x4XLA!))syS=V2FhG! zpRj5G^u%v=j9co!7vo*<=+tRlo~bkv>jS zr^-~N?%VoZvI!~GJYsgx1(Th3VxVVm61=MQIW&EHdmCO)1=8G1glJ;)R?iqzKdup? ziOQ%Fv8d0Y8t^4Vj>&y}Uwn$9g+i8%%33t3a7IVrB)1~=KP05kpjE)pqd5R0i% zS|rvLX`vzX?NCNV*YXrw_KP!K)O3@ZnHz^~N;{W_>3z3T{8+_B*E6JJE?KcT(bqgr za8E90d)4rngu3R{VE%JpO|pf{>#OGA9wI;3Oz}+Fm3$+}?UA7iu{ozOT)c0`V#KCuQy zW7^1wcTa)+Kt74(Ev3mfL?AaFw-*!*i_Oxx=oW``fr)}G| zZQIqhZQHi3o!`6fzIS*3v9TK!bt)?}>(t4}Q*|mn`OPe;FPc~NyI56|qx%t+fcZmf z_KeftiMS)(2Fvu4nFrY~V0C0KO<(|eL{kjnb)PB81F0en<8j3dOC6`Awpr(_OZ4@1 zsg8oO1@rD%^Tx77sTxN18_Apu3GrmIJN7)VNDucA0b3MfLj6{3_xc`_OjFNT+CM1u^So)u)({H2YA5izN+ZU8XIy zWbuhgMv(wkZeE{vqrw#x6vI{t38K+tl2b*euEEiCX0fgGUZX!Z!-M_8jHE48=u)V< zvbUVgIP3NHvM{7Gv07%#N{3oHA}&E$f8SOS=l1hw}G5kHu?W-{2hN@2Zmk_k}5Nyj1>+;%>0LZC^J#ASDXu} zP>vxPH9~Rtc=sG@D5XEqYG&L>j&Tdhj_mo8h?J8tUc&44d|8rw)cy0o12W!6+S_v{ zW#I>a!7UT4FeSC`;P$|q>HZ|UQ#?NG;EJR#V_1Uh@ks65DH7iR@^(^>D>Lke<^9Km z!^S=JcgdGsHynh2ML>qXJecm&C(ldyb>oiYcG%AB82{T8n7oR)+4iDNxLT<^Gc8qy zdj!$4zIolceKS`?L?6-6zyTU+2*cdU+-W4E9$^s~@asdmKS&8z3^W$(U%8M{sAX3| z@+gl*-D5A^9f1?6|D`p(KlfEd&_5W7lZ|O4w(&HFv$X=&>Wh;lL0UO!q=N60*4+oM zfGTKhOguRa@EgCg(*Ilf1 zTq=re$lmT*WS?f?SFd@@xJJDGfd(l1!KpS=xI!XH^_1#38!u(e1DjLl!}_Rh<;j|I z_fxO9w+sR|K&!BrOJa7a5>6VTqUfzcbu!hpq&iVSj*3UvO|6kk;GpKiPg$nJwIHPf z@U;|^y~*jvcqZxPXVXQ$%Z`QP4gV_KP_J9ZVGwrPRM~z|f$Cyh$+%&qmC#CVA>^T0 z(yD4#pFeJ!%4zHw!&&l?Ag5TRP%dMv)t0CvI&P~y7uXe^ylTn&WOi2tFfmN5lq+b; zNKo$7{{E3|1s%RNIcxGj{GfHUAZK2QO@KnuisGWkV+bLfbu{{rM}o)qrYo6>VP4f^ z+)8sLQ$UFU7%X5&IY?Efy$rStfFB2A7NbZo2Hw{oK>D6*r>g$GpfRCCdR9tEmLZB> z8PD3|(-frjw6PC3>v{X4UF>*HFnP}TNMZNJw=*5RH@BD#2l|}LXzzBte%gv`-==8t z>Lz}b?L4fC2w%&v^%)3~Gh1D3k)I0Z0{B`3WsJtk%B2RBQxSEv?{>-2iP5AN9W}<45 z#raGSJ0AxwnIWsr8uNFUI#VcZB8^~lq=V)84Vly1K0lA;5!wstSOwmA#IH*6Tk+6D z>P+!d9+0!to#`bwXe>-_z@%MaK3_kNb`*~*Wlf`4>Hzq|X^viER-=M3Ulyj3Nm*fJ z9*0>`7}n<&Up#<79z`8+vZ0q>zcG+IFvr4~+@R;2U_`kXGe+o1+^FP4)*O*!VLHqyb?cW0Z?|e2k z_P@T%{$H~Fk8b~y{#V(*YX0;5pFMH>-RnQ)?0*IOUv>Un`>&S&SIOU87)}!N|6U;yF`0xCG9vKM#Y1uz={y(qyoBp@R z{~x0MpW#mb+kgMt8UN~>{}A>6?6&_y(+M?c+1OYKX_=W=37HuFA$0}@Hf@;y7ft76 zVEs=`|0lZWe>DBSj`+Vd{jaji80r5r?g8_+Y5(KutW1CD`+qc@h2=jror&RZqyJaa z2^s$#pY`wIqyNhI_lf&&O=n_bVf}j$|IO))9IR}Az5f5p(~~-(^%4g^+4-2K?=SeK zv!}OA0PU+6ek8|L`A8`rj^QtoVTYZtP$70wx6GU*qHCVj?#|PU$^eFC@j=;JA79pxZ^?@ZC*E$ zL@e_-!nY?io0C&kESaGhAKPsY`woM=o{fbPx9PQ#iV7iM@^i(C)*Bg~jXmeO@;n2j3I;4fwrhHx6N8E-~)z_o>w? z)iqbT86xi1k}5Dz*MITkkm+(bYsdjVQv}>vdm4yPuo^-T0XkQb2~sRQzFm)sY^Lql zzJ5iO=I{3VRFv-ZWZ%;3d~P;n2fZOK25U#%Jb4P6;U{MZh@Bm@TeWg|jg*lD?YEpi znN95)DfxN^C5p#Oz`4QCi4<~5G!ydr8WK(1@qnxqR%$g0_gHXKq z4X9MTn4-OyvPNs?p0c7$+^4`8Q#8Ryho@vY*VQfMGZ4wEETv1lQnuoz>+1hgbBTYI z@?pr2OhO&^>j}nAN$Ssm;HWr2LZ+`E99!7_`ltLd-%UO#3Ke5m{3~hlb+T#5rV95) z-`xfE9y#L^8MhKgU-X4-i#Y)%U-D;?rLa4aj>sF#=meiYbv^(6E+S`0Ml9w#vwhYlbj>5h zf!q^c?(^#VtM!xWaBlG0CdkMFs^9N&C@^Btp39ocKRzcfP4blypH|{LC?2-1{7-D7 z#EcskR$3oc$l~dN3&9P@qhKdMxuAu3X4G>566&NVQ=GmIKXx*gKB z`RSC%%28dbd->C4_^x03Lex>|Z!i6A)W7;EZ6%_z(!hHnDuPE|2U2C=H{`2KVoY3! z${(6AtkbxjI*#c%;VH1d3%?goGvmVR7Myk->1|IO!tA1&P9((XxfCm)f%*r4ldo%X zb~o7HjQS&>J$3gz7J-qrmCxxUwE+lOEvfzftPA9aIkJ-?=^5!1#5Cv~{j@8=Dl>T6xWO(kX?;Y}&?PaJrLj&&l#tAsSDlld!*ne%AU7aR`1t5Bv> z1!Bm3m!M-mBSxNy4hOUS#BKpX`wEow+nvzPQE7% z*_7~fj5%%8!R!{(Hf}DCr;GFaQwZFa5)x-LdgsCF$mlTIsBlnN7|1Ez0N#GAjQDIc zZrEiUv`#|f%3fBN!Swv&VQknch#Lpz!M9x`Ch%Gqgmnrk>vS44j7+}L2s)ijKCr-I5%k(0 zKZH3m1m!SQu&|^7_-42`>`q{KDVN#eL)wVb_&Mnp`N^a804aO0$+qD*yLmA~NRuI_ zwEJIx0H%Gwk>v5@hTQ2SZRsRnQae#5V^#FN!`y)M9G}yHJ#V4$wit?@ntf|wPRReu zNw$~#nL(}pMDd%<0wu|+fN9&N_qtbx;a7a8!Nx+`YKfEvpb>W@r6F?s%hlZTX=3xi z!o9(5!TOEA!!)f`6I-m?I=k<@NWVZsW5{V1D<3ax{#%e+4-Tf`m$YPNGl^Ye!^~B- zP&?|Bc)Pt$ZdP$zqG`$;%dA{+$L163;bcKm3W;!rKxQS9)e_iVNwXVo5F4eT>Anp` zO$1xb`?_1k1%q9hDXOegTpl!hLa8Zok^Za*&#UunVe!hr+b`K_43~Hqd6KmoxRg4A z3o8TuP_`1MgKs=72I7P~?%<}0jLi+;6Z+tt*5!I`PuZEfPRJ_@{mD;QfiBf&wu@dA^% z%zFmm%IUMrV^RK%{yBYsUUmAKRV?$_<{5hcA!hgQ_L?ZA4Z3j1!+|{=O_N0;-yqHVOk?bO=~Jx21X3kM^$fdc_YSs-p>Opoo8Kk7PBdA z2XQ+qC#@#)vlh$w19j%+jLA|foJIdOqE07O3kS8Xb_F*Gs*s!Dc*k<08jAy`bXCij zoEE}t=*5$)O$CUF=MU2S^W%ry~*STs8bkh7%Q!7FDr}i@`FxP zw|Fq>6O@jdtc`|;9TL@3|JonQ^t)e656vur3ARBdsI4(tg5E-Gf}xW_laGcl z-Y{oALc*_drcVfhJbWTg`IoH+RDh5dGlp-h)^y9ntv+>}u&l(mT|;DbLF@eE1A6lM zQSm!g!@n0MEfLkZr@z!Z`MQ1=6Q2{eIcIavJ0R6(i0|!juS?}K@tyWCh}E3XJ}Ume zW<|kho8sK?L8&f#6{lg8uABP(Lq3);We3ZUDf^;mlO~{oo-k*Quu}*-tb!+g`bbx> z;-?u|#0ak;0-nHdZ6FgjKVNssi(SlP%h_gOz(rq8#D{-}yw%w3N5N|WsYvXdl`aRPaN7o$^p9}zwDfD9a^|-nF`z?;A4FN7RQt;`^hfz75WpS z9j3dYNbPX;6=}fFyEW;V?UnTv7JeR^D{e~9YX!!+yCJ=_#lYv|C}HsAy505>!H=aq zHR>_}KQ1R(K2^#d^8QDpMEZubx0JW&rRzM0wy=)~!c2EvoaK-9luwnnu#dtI$a}4h z$5%tn)-WE~FVRg$hAf%sf`}6|N0iQ_E!pR}&j$vasp7h#3u?2t2C26o9+2*`JM35N zL#{)J`Q43)4eHkIK4PCDm_{W?Z@cu|`P0J} zZBd(ign5dSga#CsRrzlz{s5pi3f^8qwX`tN+%wxCicwYJoDb;sn9F_}{&#;bYVblP zN)q^3>>_kC@NDyh2DI+1Io*TW4*98q9E&9GM94&n#FC_x!~*h+SMk^ro9w?6KFg2$ zaYwDDHsj7moQ}SQJTZkwU{;i)T;Ez>XnS3^fg{(&w({8?v{O}Q^&fB_CUDyVtMkui z6PD*ZPNoOyzcaghb=*_DgG`$0OI?qcSAf|3We2W3n|!PM&zx}14Ix)t;?)6Ca-$R~ zk_LsUW_}H-ZOU!JV^TI!1QI?;(b5#Rf3}RgHLXCMyJVc>kA3}0*xVJ?2wA1MM<*-ge)Q_S z;@!CYNTZK3=}~?H+nlH_%&OEdogUR5yT!4ofuC#&??^(U|S zK+P3IdmowO{ft$=i4f zvkJ$UQ|c%D*3^px+5_QJES^ukCkL|*PvtZr3_p-+(&adnSNH=Zqv8+9wpa;A2+2{% z-p%S*^Fy$)ie44tfRalPr)yvWOu8s>S;SO_A2)xvI0krMHo^IbKm*VNhNi(W-tkUZ zWDT?p^Y(IgxNaOf;xdGc4+%(IjxpjR?waGeb|5qE)ONzN2rDGEjz4&4Bv-YGI>4anE;NNJfqJOl;kfS=uwGNkmeIcaE2%bAQ~aye`p zrr6HL`!huF0g&*eH5HfJHAj(Mi)rP2Noc3aZU7pjn51M8l3fLcoo6>*>y#_U6)V+< z;&jK1{0^_jX zRYi)XW|`)akFF$U7n*oBazphV;$-{Uj!Oq=zBbrNmSH9_P_7-m%`huJIO2MsZ;y8J z6>n**BG^sDYcv~*TCJ3~&D%E3vC?o`d*+(cEt~%$Jg^byXwtfw9wr(PEDOTwRH*qd zwQ>Kb;^e9Va>*W(Qn}mJ$3@{NbB4Gma#(WLqB@to&YeHaE|TatI0N4k06)rw$>U4)-5CrTkYqdZtl3AJlpaZg;{2&RWSvKqGhgkB(a7@ zV|uqW!l14fhG0vLf-G=5HvO$~?AOqr9cH=PK(^6JDHCnaV_+PqQ#Ly*RJ z-u9=r4r2fMyGHIVBxiP}P$Ny&I;;tEnxzVwIMTgDBxSZ;>I%V3v%5BiG`ZSYZD8x0 zJb9kYaT8pb;4=lXr?1Tk(+K#=Xp?-l`bta!EHU0dsbBKM+DqKk7D?CXhlIy<+^>>m zGQZKR4JNX#%5=WRYPFR2?A%Y+Qw+VDa}ni9aU|(D`J~}Bma#y z`{Kr>-L0z^_No027_PU98}>@SRT%7WFjP)uW7 znF^tlP@~yeJZ@Q_B{oSZ#qo$JP501T&-i>RBp}{AbnPJ?pt>GWIFF2OEb; z1Y-zD_{&6OKxI?<=^yKy8-vs#*R26~fEtGf2f2l9JKo*5o=eB)+l`~m9qwsTT znGX@1ZpT#SS7if3Z`&?~IA3l+qms?v=Zw)6;qdATmDrfzR=`WQD64i9IeQ44j}crs za;-Z4UBb5=FE(Pu75;YJa>S}5dZ7nqE5efAjB73sN3<#9>t1TumJFqPjv~vs9jJ>% zNR^rpW7Z9R)`~6rzU^1a$5(n*v8zOk|Eptd%VP?nr#C5kJc`@^tl>O)(`wu5Yr634 z#1f&!Q|3Mn4mMWWQQBn&UOKY6xsG|(vHFoFwt0ls7#(9(z0T2wVcWQ8$d|^AmvfI} z&l`ow%R)rWY@`UFD@QD>*g|F<+o6g`k0Thg(GIgW(vtRb65jJbgIPSbY7CdMi_1zI zoWzA;`=^q0gt!YrWyRzBCSO)syb+pN%vDVV-pSnjC{w-J+gvlg)^eTmKzuX4`}>@x z3%}bS+TXSL7tstfS2dN3P578i#Oc1h=~6siSKG4-_$N1C7Zra;OLu8`;t{lO3Iv?* z811aEt%Q!}($){jL;)5gWC1~}OR?mAdjdfr3n5p1{rDEzLBp0@`2=}%F2bN+Z_oE| zrT3B&?ax=6)QuOL9(T8$9?$^v$Jne5x01*Y^zl)YF-u#m>b64JyS|ak6XX1#De4+>`GU7v>J_ zX=y$eJU7;P%|a)PAK-Z!e6yR%e5wbzMR@#b5g-4LTb{c(js)jEqBw!~y+c(PK^qY1Hd zXyo++XWrAs-ZzfZod})c-^W+8MzdT-(=G+4u&s)g=$!`7WbB(goRfHZbvgQlT04;{ z3-uWzm;*_F{K^2iTVylMNi6bpRzzgHEy5A@lMZ3#Hop@&(;|Gkg~_6_p%5 zQVpeW?}()2woq)D3`;Jt4X{6~XB@2=8<{h*!UMx2jFHPLgJs_N0;b`l;tYbQiGbOP^WT7Fw`G>~3RxHgG=*?ll;W`kMW0<)1w~E)k!?X1aXB zahRs^&~T>b_^xXU$5_L#wmGMlwR@$<^?3^CqrhibhoHyfJyc4)OVmpJR#q?R2vT&j zIOb_83{;$w0k_$oaQqRnSGdH7;C!+c?`&^1s%jr|*YLpnZe2G2h4b!hUwS}5MiU?ktNWoO%pwbbeU8^XwY zMRC>F!x_hm8pn|$&wk*Sc>-|}ptvNbGR*MQ>nT&IWrwOFjF& zT>4DElPl0^ho{l*^C?XVhX;$TDN;YoI^6}$dCw%@vu;9!x1x6=JuKg{>4 zrb5YZqi+jwW!DtH4xb}QsiIP!Cc#AE5TR;LR-R@c9(GmDE}t+oL~F-gA9x9P>b_!@ zYvw0aTc5dyfg&xfPoic4iHXJ=`{2%cx(;c6^7|5T|4qm5)}6@t8tqsGI9}D7XXbpb z&kWZQX#&0`jAM6EO8t`QWxmGrH17O)eg#?-||yzyr5Urb_^j{&;{%S z7ZJuGHRSeH7<#zYi2OA)RjaHoC;=}r5HLC@K95|>5%hM>(Jo++YQ;Bp`J4sHR5%N{ z=E77opfaGe1~kV~%aYepM^4Glh0neUxUJynYEc38=oDy*vuBS1@!3N`xUL)(3Y(NkQ>-Y8H1qGg!GhbxX>{tz1j!jKb=(QER`ZfajbtgDc1aG$X8worDW$ebpZ{OF+~c zQl2~`EFYp#MJZvUz5qT@fO5W7RmG#~rN|QH`Y2BMA&-7iyZo|~EcDBzxy#3=8Mco< zfie>PlvV7Hv0w>#oHTKYXmuzq$wOn@LE=OpOJeR0kTaAbu_Io@NG}WQ7`@!fP^cX% zS(Sq3#$L5cxJ;d7AXkr$%8hzQP3$YkP6~i$+PJ50aTTs#ZXhbO1!=3+R$b^77Ow)h zx?tRIF(}3C6aP7!IT>l+yf3pnMO4`>>ym5TW;Us*-pYi#h|hjZ=Q5Qti=w8!#>Em) zp;}xD|Kt}PW|_${W?8y_NrIc)zOp%ocf`FwQ8+wF&hEg4`-uu+ZbB8=<{b{B&?svs zmj|(6rm{$*dLtYne7wJKWbMNIjQ<2yH~)nvrFEJA2_GR_KLEeJsA|3$km9I79bHTJ zb2lBu@LH?vS7*eeD_wO11Qa;O0=YbiT5S`zHVH^WM)_wn3s@@p=bH_3I;+oLIk9X0hXwgTUhOO-xLN*NMZz&xtyzC~@{52cPoPl*9T`6517S&T7j&s2)PLL+tkQRi};bO9oyGmKzhWZOK1q9TBN%YY(T72VOI{ zviXb?cKJFviVI;G;K>S6@hQT+EO;#F@h#wnqf-Xuwwa3RtWG#ZAjH(aMeb*vWoFb1G@)W{ba*_i=FC?29*J0aIncgJ5rRh`PRPCe&N)X`Snn=G5z(Sc-5QLT~zO#_O514P*oC#xA3;VF$~_R zeFf~izT0@tP>#`(?7qt3mHEu`$x~z5nB?CdD-c_eBjQw|qEA`gw+^c6J`sAOrrdwa zFsRF6`t37TLtKrXdg8;3FI*B)C)4Y$MaC84B2zOt!fUl5hbrYtDTA3T(UKeM0IGPh zT{&&FpR&1hXLQxLVeI}2``ny8|Gl>HIEEkM(OZ2ueeCX@9d{C?Hr+L3a-4dPXo^ct z{FWP;&4O%Q5zq!HW->^poHDHEZs(R4zCm+mztf57YMWyKvNKc<)I$j4+fMzxjCWOBf3d*cr zUpYxX4czAVu#lBxwXKee$L1k5IerSCi=tDEtaWzAEBc4LC>NQQ`#3Frqv?Td9rta& zc;(l?2!{&OZwnh-OV(#Cnnn0F#|dIrAj&lO>&xw3gOg!Tu|XcQ><%#v>hZF0U?pM4 zLdT}p%=n{~(z{u-Uq$Hm7x50%HP0OJ8)3t&l!Q9K5|1FKI=T560tS)#d3L}8`yjqU z;6PC^I0bG6QXXxYNz!O#esUt6Md=pCgC?XUCCTb3D=A!b4|GsE=A9GXY4=i}dv~m5 z=Y_OO=a-t9?58;6HIkdg-TiJMH4P_hR~ZuRB&W5>SHkmw576z0YhPwysEmKEJ2 zK)i#X$mbYitnk7!^`V> zOM6@J-2SGOeVgIS4C2Q9`_VQC&eAQDZ$ZBnna`b}A0Prc;rrPN6fP(C%rdns{CSKf zbFy?B;_Gn3)8`Apm2E$GQcH$#dx7aglFPWI;Mz{80F66a!gk|1ovQ@lBIB2kq*gc& zdIkw&;yA0je-H6tFYq$7lTIf;?LEbJ+^kmZYJOI@D(o;$POsy-xXkZ%uzsG%J7tnH z!MDJW+QnY~2{@Pgcwle!*^u2{q;EUjK(Y{HrI&#?GKSy<=nd8!(R(`t><4GCXR_B- zTbmuuWS!G(H5*@=;QnHSCAkJxV@WuNZ|{?u-M!%_^c6h+W8n-{A2fF?V>f0_O4Uo! zo&a+nL@lbC_fqj^$!N=9Gl2h6IQGlPRfFDby0$Qj`7rIwqWt1GavA}z zTnGQ}>bt|`=b15-Az8*a`qgw#o+Xm^#d(gX56nQKpOMfMz)uL+iuyVYL!T^HIR_?I zUrArimHc%j`{*#RdAUC{0{tR4{R@hQCsD!P)v5 zurVrbUHmj%7)HUZv*Fe&y)ga`#q2s}`VbfNgL(?56%7UwKIO_5*|c0JP3gW@=QE(Q zx{d3>4FwVHm#~;ylu(64Q6gYriZ4P_FO5clu@}+_WGZ7USdz+0t(kZ()8plG zqSP|A|AaQQ{ZFk_>25f60_qaN(unE{Z8Q}OP5dNxr4{1O4PtdCsD*)F3Xo_*v4O(k zJKx97kk9lzoxb@eYtMav6GFMjfp14s>zj!0Q(t|E!yikKP3SBYfxt`(n1q4TS`NBG z)Ry5($zU5_ec7c2mafw_-vTu}B&tg(TQBAaxFQr=R=Ta;og0wR#8bGnAQpJbv?n={ zjs)?}k9H*i@P@e+y}S&rY*_nT>%8O~37O~Vr+nZ*0wL4q*vcQmwWcd!`0>UsB=4D3POl_Y|psn9-xck=1Z zSj{&m<>2J52XPLzT-}|ujaRKMlUT!J;6qmRHH z1U}+%dTo*Xn=Jx&Y4m}wj@C|E@$Cp0zDSQ{u$6YVO@L6-y`ZEn%n7Rx{10cgCJ$xBR$$-V)HNAJlVW4Nin8C|kIuZ71T+wc#g|M`XT z@+DpKFz9jd(1q&gQQhw{NfZ3Zz1x8>h_4dK3G0}3(wa%~zYBUH8{l(`@G4q(E?k11F*0Q7up1M( zZ;g2+DnZs-LaR1*SlfB1>T ztP}WtW5$w#`d{AV*+CTVj%~oJYD(_ONJ$|_sltX}hFE3;o$&xK*o6Xf6}3X`ERg&` zNM}zq!YeA_Frt?qcDJQu~g zFYi5G=>;i6HK{lyDJyzvR3Po#yGb`Hd~2U`WeeFN{PC+FSl|ayf@`p_&ju1I-vd9k zFz`sy$cBJZ}15q64IDlo_k|UtEK%=Zt z6{ZsxH*hwD$s3-J3>ksy=_)c*$SoGH9`L>zh>Q{JGyVt&I+6?;EQHSCK!i5E-+e-S z&IY~?*X(=z1ZrEE$!71$4U?QCnJSs>e&QKUmfm+LsA>5#soHjV5S>kMuh>S^oC@As zH(}Ti7gq#l)b?i#u^y+>knK}TRS8q_wm4Tm_*7XcThXU2)WW;=WG9_#=rcGsqYsWT zACF-((Q)zBI+wlkS;xB0?e22B%Z0o9m#8)4C8~=#69^!lF~bph9J6%2P$1^Dm8NJy zfwS8;CgJbYPSfWp`z?=$;*Gs^S+o!Q*X{#E?yPt2BY>Wc zLZERV8rW$h?i5;+a3>=dvhrN&hoSXtjQ2n%w84GIVJQz>oov8w-2#nz3l$=dmhza+ za+n|5+qomL<@FUBmsKj#7ZDX}Z&r1>%g6ZNox2p5T~gMe4qhk_*neC&9)6#cjcMl{ zQ|m%sR(u3p2S_<3<|NzW3=X21veRU4Aw~mhX=Y758~FWZ_@%9~O~|gCLz|<*D{M8Y zRZ2HiS#JgCG=5+DvKnG7-4ZnFEbib_$hd(xeo&tcc??_PNXKpFz_&fX8v~8Dimt(p z7ao8Y%=V)6DJy5r+F4h(YN*3vJFl3L<( zcx(mp28$G;0>*iY&@p(5@Mmw(IwA=K4Y|1k;UIShX7xEr4m5LbZ9#g1bryER;GVWs z#w4vO*+2C@wK6LLhLihk#$b}lNAc^scl%w5vB-%O)Wke^-J=8q z*LNr_hX07|GEZhcE?Zdd!P6f|RuxY@2yf~o$ZmX+Uq}8`8CgAvskaaXVw+Hg9KjzR z>RLE$RErUgJdYq@k}qeOK%$r|VHLOpL+yK3!aTFvW3&llsFKs7XYLteuI*@2O;;lx zUZ>&vZBkx#u2EnWcD4t+rZQE#-Saj-WS(qUhT!vI2pWgn74ON`ZOmSAn^{v8Ty2wK zDwX>w@=;vglE;DJGnc7cExLxY;<6*mLj&nf*ACBK$gYC}y!GQvDjUqd&XUk-g$<`rJW^@~2ExT_bLB-}$yK7rg85e$T=_ zkDf>`#1GB!0d<}-F<7{{ZKw_sPlGz> zPeE4@JS#*s*hJ0thcrvZFS3th)6a?nOCqC)MemU;2rR%rD-{aLli0*n(R&6`X>krH z2H`M!d99+nfo@IN_79dChI{jC9j^Q1wC~yp`NhBi&B1jeJK%*Uc%YUL8nv4W;?|a4 zDdmgRwu9um{)+-iE@1}|h}ia8IW6qTtHa^(3AU+<$zJz+FQ}-}`>pCc`*n5jJiFKM zfZry~b^P4IUia|FN8;A)-UH54SKaqw=T1SwT}Tnhj+#DKg(=z+ceS%pNzxtVQ__wX zfZLoEvVduc>UDz3$D`iWgV~eAcNam7j=}SKy#_`t`b)}!R(``3w_w$jhHH+QiD8kM zb>_$nc4Pc1>8g&uoFBWX>?zg=T9bxWk*=w#p?RI4{&K-t-FeN@TPEf+N-`S2IfXV= zkrj_pmmrUo@`Q5#W#w8~1qH?t`sx$Kk)_%~<(|VBE>7=K+EI^Zgw&zi%8C-O@pl>S zP++EklT5N9QktQIi_#j;oV1|{Stv4x8!=;88$d@4-s74e1#4~XNYWu> zLu(v-buStR!$wyZ#P*6jF&o}M{@v}~mPW!Chz9uX&QKbj!|`1b3RY|S*R$j6CZrBBlQGNC1&IPF@yLne zz9MYGC8bBFSE4r74jv+Jifp~ity~nH;e8VL8jp=;1NT$YUG#Cl%y4#0r!M#x{@ZVb zY<~3dK+e`^Scf&%oZhiZlb4WM;3vDvRlYS3=LfBExObuV;bQsE?OB5#S#P8vXnvAN zI{^{Cq+}Mg(ea7nX>*UCbmi@EY8e%q;ByBYii8R&85TUYzQU$<NJ(Fd1fW(cqc|Oy+jL zYqUP62Bo*UiZ7$n@px}d=yaazDd=>+XPln5UH9T3OB;;tY>!^7Ssc1w-)eNcz9-dM zbQpeUHJ~~k^{2|Rf-oXK3RYOEXf&QKfzK%!1vT}Q_Yv}wYW+F!6&^y}0wz<;R%_Y* z)pd`LF#fqIEc98>zq#MvUy!gFnh-VRM2|2XRn1=YjAt?&z?Gc9b?Mhb%Kxk)`Bmyr z@s&H}s2wdrYYc#7Jm^``v0j2V7wA6Ejb7)?z3~P9wL9G>3+XHGm{tt>q6HUTOWY7% z{#Kc_*3THaj92E!OCPnXH8Es5p<9xY5p`GowiVYW!Yfw7pyZ0w0}c{l|Hl0O`uHl5 zHHD(dyzlWtc4ilYm(H`G4|=HI?guL#@ZTSqrYZ#t;H?f)fw1<_WBco1VQAzd9Y=vT zF|%MfNL0*{$Jb?F_Rl7*HKotf*Ls78qy5RzQTNstZaK0|@Y!~sX!3Jde&Sqaa1&c% z)D8qkU5oqcPc-?>y6pC|20O&~aQfUIPr&BvQk~dN!RA7rLQwlZHZ9txzY4YC*Qqa8 z6BL0F%>Bl{QL@=G81YMtbnu_`!fKMurxl3;iM z*%U@T;83maRYilVBs5~ciR9cdm}%Li=LT^|!ois7h+%1&X%0=iKch=9W-FVCb(En) zN*TrK4IhwM42EX`-sG$&39u-MTx*tZ zJ04PzJ{Qk9|0*+bTnA^*?m$FkBxYBC!$nmL|G9!Dma$mwT2V!L&ADV;BS{UNq^vo= zb!HgglDxqVcKM=%)8aFQ@oyc4;>3h;JTw z{*8uhT6*GqUUgIF(|d4tAZ4_MN(;Dqba}AJMOBQ~m`L)019w9!x*@?DKyW05(LPtI zlCM}&)JkJs|KjCT#wAUKxj~TY-UHPDz6NT~n%QdU-WX~C2h~K*BGtPD;$H>sJ8Q{h z&QD*KfO*)$t{MMrIyP6>guU+za@I+kp0;owwCb3DRAw&0l6^XV07TU9Wc)^bQeL zDE>Kajs+=Gn^8qTo7|qYORcNK3+IVEKo<7TD@KZ}q=@ARrMCEPWG|&zih~!Tp~h0o zXAn!rtZP+@{O+l;&jEhDhBc_IDJW1f-}G=8*BoFK1t_e6ze6v=8tEKIAu5j>cDx^-We=&V#{$~p1$D|R68q`l__jASl= z2_O0*Au4MqP$N0R!(75ozEtNTAd`UbV=qfj_WeP+TwPEj4Piz9hLU zH73`P(JX7UKtQedVeU4G+&>0eX<>}~@l&mLNUiqoWnGR^#9P2yVQOsiPH=UBmLLf$ zZvW2vV$@zx&3GOd^&P$fl);b*BqbJhD%b#g3L=+56KsdH`3#AUU-0vJOL(U^mDZe! zCUvJ69Rs;S&1e7sJ#sslm6dh3{d~#2n=ntYb?oW}4-nkT`fid&qL0KHyTS{Azz|YA zheRFo{)9DBCsJON0QSm)`r$6{DiKFP%v~|Vt~+$=}Xap5oo8#A|*nP`r58xc-4tw{Mvyy%2Bd{Vd($S-?if$bW`eE5s?Oa zaT^svqXRlDsqYQKVkQ*;NpWi9jN-xQ9R%qDH9Ng&I^$wx*Ik6`8l?TM?lr5HV| zE)Yf0s7LyP-5kJTerJbnR9sBDr1kr$`I|(MEJf4(G9Y zg&SjN)m=<3%9pQw%*E}WYLoa+GdaIA;e`(#E>VuO7#yaXw0Y4kO2&`iT0dvAxz&f? z9uuxw8;?76@V}sM2IXs5c)|_uYAGJ*K5gbovJ2+yVxa%dpfLsO(<+~DfB5VT==D^H zuFwftzJ@?3A@AP&7xv(5+iJh%tBQb^$U|5e?22Q}5Uc^Cl|15!kqv>-|m2q{1kno~??UP3?C4!Flv5vRCpT)zU{F(I> z?FREKDVgvl)uzzcHwY~cc@qdB;~+_|t~kw0%6;Ca@&1|RrNGn`@#ev=JPB5pg{;U4 zi#EC{bPJCwBW}G+;3yysF*&PHHP3K@%z*ig8jZ=W%xk$jE-Kc8ePu(EkldBAp;~9} z4{=hquUwggIDKEINSQxv;d^zOff*xltT(&HySQ1kABK0-rDZWEP_x>4T)oKVm?FgW znz>(Zfl4c)y63U()x{02Wv;zL^;SMaFXA&a${?vwt=C|@aF4%!w%r+5S2y-_?EHC; zV;eR+&Aw@Yc`6QPl_I`6 z5*?m0JiFId_GRV#{p^9gcdQP3d&3U3(MLzgyIn}R=XtW%NoTB5CUnkgaprcYt=SQq z-}ux&0y)0rcqVCW~T6Q}6ZNabD#Yq+C>cLq*Seua(&v`?qCc1Nz4jI+W#gENe zoQ zwP0FP9Hmm7YuTB`bT{Wg(MUQhIpeV_IT_4s4(sa|B1pM|XFg1h+cPVu6Ntl~J;x=V zsxox+ZYKA*1gVItVJ6n+V_q}jCZ4^0Lx9|K?O=J-I#hUbd#&8IVvbSZ*(*5yW7Jj+ zM_X#;G?7b+H%swL-NzOQhuS^JTsEtjR$VyEC3ZM6LzLrThY0_02lpCR2IXvdsnU(S zSpPLCDlZ&g1CSm%EgV8xfp%E?ht z`M&m3NE9<{-P<9L=zDEYPmZIAxqqOddJZ>1skUl~QxsmmL)tRyob<#lR7{)os&uBE z%m~A1=VVYSL*;7`F&xAseqcq%9* z)(CgV4%U9cX5{oV^$>e1dg@}@Si*y}1EoQ2B|So1jN6A>T8`Gz8lNIN1?B1HZ`gA6 zrUc!I7N3ncQRD9Mb)!~f)00WLJJG1LATUne$LfWNH2(CJ>vg-N;&5IC8tVk-bt+q` zHK$pwM*4!H+1z0YjaxlgsG^WznF2bR=B=a*)Omq3R#LJJTJ#M~Dj`+1pxot1v8AH> ze$xIN?Kxz8O)((9FJyk*?oRFbpa_GrQ^@Q2A0An%m47HNi1#A4hgDiT@#AZIIuhyM zlH4Q#o(A5dU31O_yXJaGS{o)d8ydB6H(X#8kPbiMJfcWW?@C{PTJCFv{n=M780-Q? zjGkiX#KN&=r4&`GK^pNQuY7{erkIEfKy)2_@ zgD>;9^ImcgXbbz;GL%g$vW=H`*xD%gRYq~wIaLC+wXZR1DMZI48aJHv3~oJq5)Q)~ z$Bh&pI%2po6pnT&FkhuQ@kmgb{fd9lwL$EVVxn8_*o(1vnpM;_vl^r;s_5Bn_y(Eg zdEbrYu*Rc0LJZI~^%qR6`=QBl$BUn@i)1I0& zB&rBtlgc0_%tW|Xd%DHrCM-@m{a=1Y;?_-3u16n+sjr7FpQP7qz-pdk!<+}XOb6V{nri6@WQ@GV+EXdUwN5s3r$%5; zYaF*kH}N^ACEzVBB(k_bVo6f>IF%Mo<*eP zk-CbW`&|Q&EFg4e!5k+g0d_*2uvY55lnKCFes?laNS{QHrqA}hiX71%{ zp`ut5g9+Xh-8%5iG9tKDaL|KYC1z77?hKB_BIzQP_oI5pokfOpO&{2@a^udb4zEWJ zHuw%rX3$MGa!+Ad8GPy`qQ|8N4=lsY%K{D!dB?~E^7T!;oS@1>Gn^5B5ddzyYqfSg z`EVERNOPbf%{kqh?K+tsdsq+wmK0a7uO1G84??LnV~UT&E4ZC@5Y2_SSP7wC56RlB~4t$p|4R7w1EZRMo= zfym4>Dk#n65OD=1tZFd6R=i8R58Wa<mNxa&kPzf_^Ied( z+E$OyZ`P+zdEpu>A-{aGswT98E~1gYCQL;%pT7(-J9Xq%qu@xMP|#K$Sqr!;d+B-I zIqE5id&Yz|bJuJkmSGWmxMhWH(jsV>F`?Nq)<41<1tM!49PO90wk=LCqb(*}pVtMR0ptw1sE>4sFN>lSQdiuyOkhzdHnMj=QfG^SNkF^! zYZ@aMlFk*3MPe{9N$oRs5qATM=#s`u7hFcz+}1qoSDp3_lcsGgIzbZF`(p+rcN-NmP8;@jz_p-5eYG>;e|jhWJtJcI&`XVLe}!# zGQpayAU7F?rt!N5>bVfeqn2*U&`*XG5#>#@sBdoxp_4)oI!~{J z8viTY{?DIy)e&yd`(7yt&?(?i0}tsmZF-WP?$LP|GhaFO$t%t7_F~%D*XMo+cU*_7 z^f!xp?)9e2Qg1y|jRtNEvwDpL@lIIlEJ;?;I8#h6+@3g+>(qk_9v^Qu)=EklVkBj` z8M1PSeBnLxB6aPcN{cRuk8ZYhCbw^!vlx4)LcX;v{l8QTf@3TQwM4_-+5gQ2a2)@Rcqc6zc z{=9%?mUG?g1|FK)1|L?xaJiLA?b4MNGZ~7yGJ1-<#5C(UI`WE{^wq%RjwKp#J}2gA zF~yph2Xb_==Ciw!t@`Fw;eEJ3^(Ob5Ya5y;-VUC5J21@i$S~VGR=xnaL3i6N4Q1 z31??kXWxZ|4kSeuscJL>k!Vwt`3NP-I;rt+iAx@#P$vJ*z_=0%)TDw{aFMa3`^cuv zx1)+BMGb!@bHNVCC~RB+kpUk@Q3I^zU}0&;5BMyc9V}e2md;>4oEcU_3;egz0)~Ky zKww})KCB@>@Ga|VV`l+|!xfN7C<+d>0E(Ag&7564!GIF)yI;@4g&!m==WJ;Pm@2@0 z*3M>**W7G8!ElHO6aoV_^!+bDmLb3ImE)5a2ZEsxP!tReg+b8>F?}eM7x)0O~jsha0eygF7B7d$Ah5ECG2K0jmmxZFyNFY}1_iaFd zl&*s{1pJTVfFVSXzi$JEhV2VE4laup0W=f`YX}q+{mZ_%IGX_u3TM#1+)&HL(-Jt( zfLzhR0g$KsuilC8VSeS^I2vQVgZiN-K z5HkmkOB(d=hx}#xxZwiu_5BJXPzc}>pFNAwP&lA~{6qBkS9IB5PRQS-kD3nOZHz!3 z*iZfAes~+`U;f8^r{g8FUrdreK|9bz|BEdn2$pel1gtUp-3W#Oee$D){_B16dn<_` z{%$3Z@RdK?{hu53=fVEyD)>J)6#|9YKO=tw1^fmaF#8h%g+}~B>8}Ls;~|NZ#~?6J zIaztReLCcR&>^cJCn_%{yHAItjHs9_5`lz4F=!-4K}H@auYiUShKjAq5$ZsfpSBZl5RigVl`5S;g z&+q>in1Iwi=>G*wXb~U*@H;;Hcj#YWLW{zGxz+v;U_y%`ka9325+jO`7lX@*ippbP eFgY0+Q4x6&S&Rt$-@>%d%6|?MQVjY7rvCsZB)g{o diff --git a/Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/EM Composition (feedforward EM) figure.pdf b/Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/EM Composition (feedforward EM) figure.pdf deleted file mode 100644 index 7350e045ce7c98d105c647b19516a3947f0f845e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27934 zcmb5V19T-#-|ijTPA0Z(+qSJ8+n(5%c#=$POw7r|wr$&W*VVt;WQwBVbWHRtFl4=Z*Lx?$H$NtO`(aoJ842x;tzdY02^quzHZEpPAHQvk zT+BqxOzcg~2pMF}>?~X?30as}*$DahVVqr@%#3VdJb)`RwBqoZkV7_}XdXFl;@Ka_ z3CzrDX(CvX-~$uFfWaW+^EhGRJ9DZR$Bg67nHR(jft|I+Du+~x<<(xiUt-?Y{qFAT znkI58Pu_j~`gdOx?30rGCkW0K6y}S^(rjGpO6g2>Qmk_Lm#ee0+pq4AI^HrnyqlZs zpzqJuI^Lw-uO6Cm_|F^teV=(N-WTQGm)>LVi~ZL#?ayYaPkT@^qwYIb?$`Nin@^9g z&RWj;kW$*Kgxp@H#?^JSulX}3hf##(*)_gM>#7@+zxp*2J(E2mJ)@?kXzSJj2rRy8 zbAsEZA7Z3h>m68Dy*A2=~4Z5 z2zqdRyY6u7-`ZrISm%F}={V|-D}aCMH`%%R{>P2?spHO%N-X7`@zp$8LfGwmHqxY zw$&+$&Tq4|oaGNyZ9k9f+8fO(=h!?GQ4zO?X-!t^p$_W{&pE}ve0Lp<>YUQh_(oyk z&Mi-uy0&<_TWS{__I#?$b^xV_nf(#d#wb+VS(mB;CfXb8!{rX%^K$>C2?Rckp# z9%~Lxjhg#iyE4q2&T7LRCB;;VDzkn_TWAe8tw-pN-({t?Q}bq}Q?iGtOQlna5kO9} zb|unz5#f9o$F>h(GZr<_*U@@{Nn5ui(g>B{pf$GeEqyk%es?#|h9}fDWJ^bTTRN>T z*CR%HG6+G(p^u$nr}jXd;b*i#>ZvghL{ykHSw^rTW-4#>1eZsXdM>tLf>nihjWTpk z>xIr7$b|5+`MRECPTJ%`C^rD z+B~V3B017;rIU=ce}mBgm}`vhV947IuR5w|0LY2Lw_>0alg&{;P&5fF$%Xn$Lyf!+Or=)sm7nk&%@OYQ(N&I~4@}eF(bseaLk>?^h#T8hOnj50^*5t6B zk)xvllsiwx5HAiZ-FYSe;Hu}z-ozFvFO%~kjpG#l<0m;I=+8&#eNp^tV)!F!kMND0 zGhi36X?OcS#usvNr56tpIS@OqFT{&yYBeKI#o!nCR zt%-sl)oarrR?`QxD4J*k*o!U|1Kl=*GDq6fj^S!81Dm9J!0CB`LT=JZ#GZ`P48T9E zEh((Ei9I95;#f9pBH@r0cehhBP>*OX5~wHR>(y{{D@A4QS2KfP7yznRBE=ys9E*~W zs3?}Ga*P8qU^6WM!oz!4Df<&69h1Ywk_6w4@o*^2tGM_bDjQPow;RcL@kwm#nQXkJ zw!r+b9E4f8NnZIu+eaKdo?Uiqj!Di|D=T*LCnnZRqGHM4(6P;P?7D(2*lZ#Cfzn(6 zqAE6r`&AAxsEEr;a3T;Y62dXuZL(DMRT+#AIoC5oaUFWHdgUozq7sV;qqxrtfD<=c!|hHdf_nQ0t6UCi zu+nwP2=hU?9r9`LF4m0fa7HUnre#|znOroVf7$orJ=>ONfRU2Am^B8?p}j_a4Fkq$ zfFtE^FR8HHWHaau@@wYwgIC7U4uQ!i(X8?ys^4k_>k^;b+cT^f4h(I~`vyB|!1V)$ zlQadKa-W&fnC|cl>1ddy>z!7pbtE~E`*--}c%KhpMk5(0icPjl&|Fo3F?vUmBx@rU zd!{!bGdNbNWPCCfIa*BuGCUOM8=;a6zS&?JpHMPMu$&AcOFp2zKi9WelqyRLXF^>g zd4F{li;Gs=&6kFh8C=jKN78{lXiZj3C)Xl=0Njl!qdTW&|69qD?hul+eOTpH%56dAcKuZASLB$2M|;;(E}Ve_D=#5d<8OTaHNJ| zy_36o*yd1|jD}|nIjndC+_+{vh zR%8^`FrCH#^cGGrN;x)Vv4FiGNAd~~5Fr+s2@t|@)$&Hx9E`6V#`V@PeQ*TF(ko~d z6_`p_j#+=nVnEf)uh{+#>X_*it2l2dzn*J8WR^M`|}H4Yr#BtvxLZ-QI+e z27uhFap4vnyTBuij;U32dGQd68>fSxI#_ANbN#E4G=qtk*C1NZn8j#<9hSuvs9Cqd zpti1xZ%zdSY~w=K3>orFb6h99_1s^x;|RNve2hy1#JFU}>qyHIvg))G^3Q=`Vv)xq zMLmPaBzHlRw+xk2naHdR(3on0+N}GQUUP+psIP?71jdxBUPaOZD6Xj6Rd;Gz z5v19a%33jgJHTH7tTTvL0pGtwW^7+^KToBe9^3=WBXQ&&#EL;iidaNXdNpgCe}k^5 zLSVV|nI9)qP@-9B)fi*y!UUXuH?nbIu8@m^BUFCG%+?==uxT63EXR5IkF@LXDjwtQ z*D{rb-y|b zBf{nVW+{ZHn!X;a?qwkAC3H1dn0o;ap)|OWVsO9|?CyW2AS+#dVN+ZwDHy$cf-DL? zk1`cXoF8TMGhblpkTjNt@RS$3n7rCdHmCW^k+}4MMnP6`91e37QNwo zI>(QbvT5AR>4e_8o+^7e;yZ=AQ&Njk?{_k0VG3C(3@$5E zH2YND$t`V?q-m@Hu~cg|<|~FF!pD6QzEa-^2&6CCxo$Rq7+Gn{hWO)KZS6uE=F9i7 zcOcgXNeUP~xbTYGZA!PoztW3zDZ02n5>SSW( zOvw6oK*Zk8<>R_D;ised$RKBC3NRA3_aM|^{0MMxG83|Laq7W*SeSp*^LgJtosXiE zy@`sM3!%jqOGAXD%oz2}Q6kmhD zx?MDZ16Xr4(aul0bWu=8dLKZrkbn_1fq!qAn}bO_ih|6(cJQUAmq_;4-JjD3n6|CZ zb}EuP&%gU85Oau-LIA;t&9U?Pkz?+Gd{e3G!)AU58@^4ZVh&|u#>j=vt03O#t+u%@ z-lu6BN=qf*&Bf6g$8KlMj&4(2&mh~%j|R)tO(Wa1$oGLX>rj=-0NQ(?oSAw2E(^qn zmToY5K%Q$8!7PS`eI9WQ0tEG~m#f_gA?k_CG8@RF$YM7VEJDh-2zen5uy;6U{(M!Z z@?^|J^Q}j2E8IhN;Q@Oo2cWDRdSyvAv?=A{o)qQ8GA`HHISYWgw_xqH-N)ye^AHqdJlOg@O+0YVo{<2SqmEL?zuAWUeN3?uM*AOI@p5(zy| zs4O0%05M&7{^M^S%2Py0krNHHPb4B9DN*oglDq_q`fFLPh$7!Di3?6El-}3pACQxT ztPpR&zQM?8ATvhri$IM5OEo;WP!ioEo1z}*-yt!7uWU&-WAGz1bz^RQxdfsOG3ZW$ z5*&mAof6lC`f?>I9Ir_Vj7(&mKwJP)DwYwCQ1GQCMl!*17Zf5a*C-7OqaS-GOe=cJ zSlwveSekJz%_L1-7IUB79Em0HDX7qR?q`uEMTLMHG&^!;3|+6fVVt2(P3lk3MWa(5 zT!ew&*xR@ce$80A%yw*5U`>b%(QN@IJ@8xPS03%u>u482h+WQ?#y&*9VEl3Y@i*cK z`a3D&p|!!RgAj>Q2uW>7RgrL@4xrwGk-J%y0!tKJh(nP)dv%See#R~ewM*fW%O{~p zg_2?_lTxME2dYY05$lojk#`U!4RM=FIALiC>MK-{<`5c?AtnkaexoEuQ;^0L6JC<% zl5P?76!a7+RE96Ho7F5!Yt7PD=n(al_GOSrVoAM8-5IJ(Do?OZK1uZ^Q~0)!>Yi#q zpF?3lo!}T5?Omp;DwHf#f!_MY}ZgFa(&`uV7;Iays4S8iq1Sux!df5BQg zU!_S&ou);;eBC&zULw5?hftGllZv~rf((NugF&^@MJ~TmPGwF(r$ncY|M2fg!ezrK zq0Fw5+{oNcqim!6EfbQmgz_oseC>Rmeom7{$H*)G9UbzTh%Bi@shA--mBrb0Rm0Ya z`UyDeeM@rIa<;aCyqZ-ltqSq^A6z}WgFgA5nNKvZd{LTFSr}^=2DE3iDs(5b1X@&D z;F`Ae$2HGA%e@)adM3UDp;kL4b>l^A`wf+)bQ^A+XTCfcdNpepZSrjs_XzjIFXS&w zkbGetVI2q$i!N@IZNa{C$R~$e&Er?mJCGFK6hCLAiZ`;f#;L~P$Ma#~FfTD@>9Cxp z9JiWrof7Cx>7xxi9CByXR+4wLPo78R7Ge(EdTo2x7WtOmnqO!TWe^i2q6WSdCmrBS z!0LymR%;|^R5v9)7++Xj6!UZQNASn+qyBRHRqo61X!Hj1=JvdHvv7Ivyzrs`#tBvd zeF#$kCJv?!$q&m6jRNip*3$*t)fPm+Fl|7pJBtA#{3_xW!VAp?r-%ORj73k&T0ws5 z*{tT_=iv@UXDlWv(%U8(H#j{g5fy_WgW)bdB+(}}Cm|>1EtV=Kld4IpRktf1tsPBR z#D_uf1>Kv}&8hTZdQB#5=X4up=bWBJH>K`$*{NZ#bY!9`-k?j%ksALC0p14Edjx8C z>7H!WtwD?X9@s)sTi8qDK|jqw%$44w*=qt=YH)4vlxS+NUWAEMl@z+v4v2zK!=`Jk z!FAwn>9_cA`BW^WQ>9~o;QGh<8DTP`$Ql|w7Cwp&h91^@Mv>NGz}&eSYM$U!f-I%8 zmU&xR>>t@X$!O9{vaedObyl_O77}JT!%Cyb`?tFpyLKaZLpMV&er)D!?yeTU-2Q~V zcCnpXBUFaWSLv>_$h+%xwQk-0I*GF-w3S@C91$OhG5fM%7r8#G^h2rUm!W0d#eTnV zi?GsG2i>?1f=!e^B3jb^&Bmj2RY)uvwM-spASbyJ(h=`VF79Y;e%K%8k@J#$HIhjg`-#9Z-l z!Eqo?T*iLKv3Jv1*7Cs0Lg$99{|s;pXdfg$J{oSj|KNk7b)}GPpV8`8MbE@Z-f?TE zg};Hn=$YhI!h}SKMBzYpoa-FV>;gv#$2a!wn8f~{JB|bPZ<}`_-l*kAiY)mpk6@fc zTvyuzj|3OlaJ);Nk-lX|6mL2Ajg#7u+PU+q^AU{&>;P|B?~S8?nJm03?~@>#y=1`L4fBL6#!w3ygYkzFEGRobKdNz$j2>mkCt9%{?xdmv-Eq z{#Y=as0+7<3vCu*cu#(+R_FnrD4n1YuNOa-sFaw9eu`f6`gz-TITD>ZvlG%u^{4S6 zd8$0e&;6yt!g#3n(t7G7T|j!hEZ5-JK&BCHbgz_%h_I2fnJM9?R#hg{`=?C) z)Z71*$^Vw)&i^-o{i&H3gKa@Aue-$ZW*2wN} z$u4T^|z3Gcx%r>%Sx704HY`5lbT{LUxW1g>LkZ z1QQcGA%h0M)W!0%kL>ImFpPxkOrL%I&yADwv#aV~*JO4MdVj|@Hxb0uIij9+n?W2hQ1#IRMSx89!os)}cB&9%O2u4s7`j4b#a%U?ZNi|~a5YQ_ z2~i02ENoW^yy+m>%Oy252}d*hXqM=>baaApm#j7LRrHKgcRQFo0eAnKm5o35+50{R z-rpDAeGa}~tRqx)<`Uf2fNRD-t3OnMKQZNq)t1^@DoWm1K?dY)W;D>K7km1`12y^f zxqhjZx=D8j#WMWS>F${k79~jnN-23$fLm&9c()k?Soc8#47plAli#J+8>}3!f1!8@ zb5Fpmy3kw8X*@8HIM!}6@7W}cdlMR4zI#i$FF+N1lGb(?ldq`=U$BP(3*&|<)DWM7D%|9VOgzS!JD?C$(|FS;^pE8c2E?UADUO?=CuB+!Bd#)A zi6Ol7{14O5gD9{}g7pA?3h_WU?yhCEvuPDri`TBSDSdE9NIdKi?nH@1gtk|TkLdDX zyG8Wt$O-e~ffNnktJ&|Q=_y<1N6aA;!hpcj_#2%BJp+2EM5Ut~W%c=^{ z9|$A{r-w3F;OOT77FlK-USxaZSl|~%Svf6mQn=vg!@ABHNeB0P`t|y=#3v1}Cgppo zvM71nYgl_ZNC!IwQ1?N_kokqqanC4)omMOdl6*uEmSFtAyRdD61wei$$sr{li7OM& ze36Q%cwe_1jGJSRp63fTU9(&R1n+25rX-h(_Y^B}yVjdPZ~P$I8e$(IEGx2wkT^LP zyZbhk*YpU%awW{#8f+4sBqs?Wd{e*O`JS%UDsOmp1ST#bQ>TQfJImShnG7^nJ%fFO`9Dxh zd>qx0@VXR##8}eaxTQvF0~>#N2a6DYY^CfEV>5f*yx}+{q*Sk|A8fw_?I2$%Bk=n);E9%Kd%vfQt>>AOti1g^3@2b=-HJ47v#HIZ zDI$mR@Y*HQWaCEZhO4Z1R3?sV6aA9!G!)k~SYh zWi~}Qrf(&^eECE1+>6qi9@4wL>06AyPUY7qzjjxH3WMmo@SlFKcfA)lI5quEw5Y)e zl>$XDMB`pwDR=OS%PR!$KNksN?9ubP2Z>wOn`U21yb`eR0)2nRjwkpk`@6U#)Kijg z2jQ=}O|&j@Jh$FB8JwIy#r`_ajj7D{wM{zlEw9E>v(NYKvov5@USJ1gu3~2S+U&jd z!*5){tvmE2!hcBt;(&r%fO&eXt9=l`5Ih!njoG=VGbIrRDv|jW|IDWo;JuIZ#x(Sd za~S|?7s}_6vkG@10 zIEOG8d2FpY)poO=U874I#S}utT@dfS2{Jq6LCUTS!RZIC6(J0bA&`2{-6iO32N1;; zUeFZ+ST5KLAT-1Qa~1YM1XrJg_le(AF;@z{K)nkDw~M->3D&|Hgg?-q%={XKE5)e* zp-_3T2}&hbY5w9XVpew=b6)Nxb9 zb{Ko(XCUvp1gMtC3NoERMy`I=S}hoK4jxyUSmhXx1>=mbI~xHcYtRowp$fle()0s2 zh_7*onc+t<_Gzun$@+BdC|zZa1&{k?SDJ}&O$q;0rkQsUu0pMB$$y`q=Y(VEA#kYt z1A{6mCfgfi!YR5=lEd;l4O^h`9-=K2&m56b@TbBRzQ({>f)A}HcLTAj!SUFv|K=%n zG{PHZg^ufe!v z14F5BJ!x3@FSji2jEE*UCb29OT3<3Jo)?wfpxZKi(|m;;8JfQ947`PZQSz%=&OVi) zS3n@+Aj?r`C^}c*OUCJ$;&fznk<&4R7lGr|F%%<}mP9Wla^)N+ML%(|be*Y=%F2YjvF}OmE+=*0)oQd>T`FUU0v%oMD zhCe2sN|3Uwwrmq|s0w4??p?E@}X7PWD`4W>Qu%fE}9!&MQkjU!95zJjzH} zN!I2Zc{@A1xtpZ19*m0NKJUi^jF5t^pwbP8?KVi|LR6W(rnIH!SQBkweWA-zVp=dx z2{Z;YlTvsmI(o>*e_K0>l#7T+Jz*bR`lg}1ET^Ravtm>lD5euZ)$k;IA`E+c9IV(o z(XV2=;lYQ1uVRO|;i0kYxM4->nOEEaA?G`e$Ps9DnlmIDyBp+2vz+g*|JmAao^{3yl3&6fzFUvcrRgx+89md>a&+QB zh(!>__x8d#iW*z1lMc7;6?ZiBP(w6ExZ#$tO_cj3v^}}bl|ivLBwKSB?s~&Y5#&U= zh-CA_C1a+QOHQDLj_!R7t8jiy;yt?Fb=^!C>P+W*U{5rxuM-7vtSj}{b8)xBhH5Zk z4t-FaF7^WJ9b9IaBj5J-6;haRkIdU|t<@&S!s)h2zKVRbawmOzxX z&@u$`^YYZa8k*e4&!X)kmUoovVTpt=zgS;H$K|@ay5^#fJ319e+Uow}yrRGH-aqEPQM{7Wri#L^BkS9vcNS&`1L^6g8yc>CQAUBM$3T0uRx$ zI4i0s-PD54T>+j~a*zJv&8OGRKh3cubjV3`|M&+x^lbz=?Ai-AUtddh241S-I+KYt zAHO4R9n&D~#|9?p1lWfW1c@ahV)zsT7s2=pQ#%-^{~5k2Ho7qa+Vd(28?^M%$M-#ppxp%U|U+ z3v_}##%j+GY$Mo_#yDZaGaiJKkEl*f1Vt;urnThigG({9!6~$Y2;^h3L%3ilv z<=En(cF~iPN^7s9DHEiGe~ZQQ2-Xwhv6{PMF(5Z|K9jCSR6Ox8nK64ebUai4<|C9uSDtOb7*O*L&UrDb) zmlyo)m9{@AmfvTq6T=MnAG?{9tGAzVLb^?#vhWbea-{$8KQRoE)PcbA!QxcCpV9P+ zf!JsFvi;m_>NII4=fCXvGTrdDPIPgm&%WcDwN^s5EQkH%ZR2KwKLop9Ctvg8Lmx6z zbsw?hP3u;JV$H}S!1Hx%M-QibrQA#}TUbmzbor1=QS+1qqr9#z@){P#$B!-O{GrZs zUP!Lo4T<**zdy;aegcfyvolDJ@l?_f##lP_-sfSd&cfQ%@PsP2)^X;MGcQvs1bbUo ztdwg;8NA4O&Gc24Fuo|s1^|6jRY8Kj?-bY6olb^ewNyoV)^oKqdgY9}j41*iHGSsck3sYnk0`yMaq>4Kv#uqwP-Z>uKzqoRz}$?ESru zL9NsMLt^$t16j-Rsu*Nq98vHiQe^S9is|ZQV*RxVg%W_3yO2b|!tv2nt<`)0WB_L1 z3r=)SJ-41x(c1Z>73-hTprBWfHjIZ5A+v=O)<;+2`kJMCU5Q^KV|yiJVJrdz#SiJu zzU--S8ZWE-7+(h69oX0HZZb!q#jm290vM!Cj_!23QM&Ru)5pIvWeK58d@qGx(r?nr zT~}r?DlM%pO=sOyEehyX&EJ*qPFySCuFuu_q`|I< z-HoR0pxdi2G0I*5+3nbIW;;C@ggyCq;HHbnY*UBCWJj=T?Tq)95FgY7#0M_7@oE~y zIfMoB>_;cT+d>$m48*UIW27s)A(G>mJFhSuhln6UblEeEFxOO(O*!$9p2}Zwz+uB@ z(_W}>lG;M<1P6N|53>Cvk9%{ddFx?q&nCP^$0sY38%RygJD5XUGc5toOhX?^nGsAq zG9(*D-HoADD7g$~H7mj4oRn8#77g6W8dQu;rz)$ADdo^<7zEN7flXy8v)(bGRa?uu ztD>?wuzex^K*RJBTkXj!*(=Y+m+cKn!ST1Vver0%pnaWzRIH?a%kQ+10hTnbKU{3l zfQ-_ZoUwTgh)mo+m{%)KM3TXqCuZ<|M%ij5B@hEPU3+D2k6=G**O}yyEL3@x!IT&r z&bY(;Qhx$MJCrk~rb*SrgXq24izf%nEwD$7T~lhgvzxHg>Q=0Zo_v?xU=rnBLL%M4 z6u{C`41;7-;l`8;m}hH5xQUqPP^A5qDh98|p&Vf%w{Ro305g+Ixpp*bjj1qb%Fl-D z;t$um=Y^2u`00T7BVa73v?wWecK|dWBvqLUY%x(436vlIujG)C$ur7L$?Drdr&cWL z!ebbUWU*Az@g?>gjS?AeKVAOw;J|LelMP%=au^NXz3pI>TEyd*8NYzP8lv`#JOITkB93TmeSU+Ol%({J*JZ3l@Ih}BP*I~L}B17*edXnrpW#@}; z+qgA7=b}r6U4n_)qqYr;C|B3%@rRIA51(`pz^ZxiaQ&18598bO!euLMwNDn7bSQSpby_)q9@N#gTg-yer~j*|vl6+?}M2xL8qH862-@8TJADu`Bo#GH`5lS$}jle#gS;W3HmiHc0j zh@eQG!aiNB0P5lPM46+!utY2J`!M3Wb=X`gNfOUdbHse!*jSP=ES>H1JO{dA=GQNUVX$Bmq-K zGLpVYB1i#jGGZls{kj0mCW=QIm`Gi^nn2V98bI=>27;4ejxmkfple!Nx+A+VA(Bj` zl*LV($;i}RtP128VqWS|rpYtgw!iXSjkMmDFA&c+G##V3XA0`e9#6Ylzhtb0`8zn7 zN{nU0`I1|Z*fHDso-IGcsF=4E_S;6ypWgyphy2fDXbluQn$4a!*K^nS>$3TR+>P<8 zMp*kuJl5#G6JOT-sDXz_OC8M0Ys_K0CeD{SOM%N%nhGzum7CHJyG7*gTp0hp zu{%>L2W%fY(q{O@Vb3bSy^WjuuBrQfhmcgmy4*}oBB{linQYNg#SU)O0gu6O9QHUI z`ZZ`7%K~9tSDrVSqQj4confps=7?im;g%I>^3W(7J$Bs>v(^GWHb+zboK9AeICETE zUUZ>^HLkLeNDyuRJX2`D=e5g+HStZ5#IMU{z&`EfGV>VZRalMyB%M{bTOxwY@`r-S z50ky)Kk!|i*6I)X?*{YYi)H^!dp;K?{qqg^-?Zfa6{CpI2sr_aY~)n`eg*$;q5$); zQtAH(Lit1s|AA0GUi|+Hp?tuMe}Ip_p~rt8+1S_#KllH>@}H3p6hp`e!@>Nq4(nt8 zOnqXRPlWQB{u`=%Mn6m9WMTO$`g!%Q`~K%Wf8P(o#mf43IiDr}U$%e4nt$i`XIoi5 zYWW|S=D(5J=X3b?p#D9`|AA@#bNK%Y(Gcp;v9q%g(y=hJ5i&D=avDZPc0HK?3!>p- z{9qveMl_!b&HjaG{_6cd5X}d=ku@{=gkmrs&HopyVPpPyX8%GotgQcuXqXv4_|U%* z4I$ItEAjq~@%{?@a{~So(J*nae4?~}e9ef7iHqa^0n)^HLmR60y}ggNn{2Gx$C;Oh{_7^|;(@2`WL|$t+NK~2O4Oj% zYRbx3_s;^-oB7bQt2Vc-HL)nZW(cuH%Nv^k=lQx`Ju54QQ%MUgktbk~rk#Z;36z;rFt7`t*zXz9*~j_OveQ z?<&yL;(cN7+Pu*u9V!oN4%-odX2+U+p#|xe&#%HZI62?N$xC;8QHPvdPmdB^xPG-- zH#!RkZj$7HDgy-uBc*^8fntELgEoSiZjbKj#y&#)=KswUAnVe#;OXA7{`HWZ5GXx^;Qcr6B?R=+v3KXlHxtXi-mfJ~q`|>(tB_wKl}& z8YduO4vNY--{Vt(&jMk9G=km(^FyXBdPAw*TkzwG4_@H_nfQyB)Vfbqx+8Nh@%&yl zx+r*FJjqH{GxLTgRnHI-7Q^nv93?zIt;N3|7`G1#^;$G&tJgg2f+kt~LJADzPNWKq z^8|=ICYzYSNIh7S;;A>>UmjmQ+)qj}P97u< zz#AURhlv$*wtP=Hldsc6H1zc_%N=w_=(cDj@QTpuF6OrfQils4Bfe;=K1ej=)D!GS znA?|~5sdFN@MGnoNb-z^vTab39yIkoK2oA?WSv7z#P&OJJYn#ledjYKB4HuR-hh{x zdS7x||Fp)Y>H8DFOH6%7Mx%k~ib*|RYU9C-TUMih*gyc`fj z8UTJY9O#3O=>lIaRXl;M4Ym_UqK#fvP>unu8bniDF{ zb7@}LHC(COH%GUxs{G}oOH1)CcB$LPx%YeqE1gfv@}sDur}B;1ARN7orrTp#8~Y(% zhaFi{#lonyhkT?4U<*^!D1~C-1M%!6e384Iom&NzN zzsC4*o=Ka+fZsSDN+&b5<0N@R{DsGNyoK#|*Kxdl)=LPD&rIP*mT0h)osZ$6Eu;kx}9BlbEv_y$`eOYgzP)+hyD&f|% zM)NmUumsCMlF9FxT+D6hmmqoShdofepWJ(Pm{C_r0<;3^OGjB((bz*|J&S$}l{cfS zIa0Fs*=E}^2D7{l`w&5nwiRS+3jwbuYJ3*FgCIJ)eP za!uV-5aq5aV!DF8m1p#YORabCgzyoUJS3c1t}LpnuNImI+RBct-$f>kO&e$X$94a( zSdJ2m$k^DX&3$_m(|68`CMj*-5A8)X2f$|BPct0;Y+dfSCE&HPTc(LFHkcjTed>N( z-ro!<-r8MZ9Rmep=L>q*s?wICJEp`Vl)Mg0V&7_uL) zn=eg2##Dwyw_&DAHKy8f=aQ@w!vK9WkLBtSu&@{hH~*7BDC+|0x--8L)r4u8t;%yK zsC8aaPo|&>eKN7x_RYyZ&-Z81nfCN3Y<)12tya4!R~%*oc?L;kLj+lZw(SDvPK!U4 z{_m&(F)Q~3aM!v_H@Zn3Yo39lonzuCtIRfX4t3S+yGk7e|MI*m0iW`O&Wa#^>)NZ; zPM`8bOojc8vfa;5Muh_VFEKcLdzIQUhk_b(P>VdqE#^6M_l1_DPAV80MTFApJMqR6y5JB2_9@$6} z-f1Uh`@9eet94SfDUyI-!h0d0U!t5ki8<$F+d#x*qb`zX-v3&pM~>>GW0je};l?;& zRe2(HSI9CUw6CeJge0%A>p90eVKc7!>aNeY9BiX#!GIQ1+**>Yu#nJbPL#00m??xd znvKXzo3=3v>l(p;Vmo$d_$6klv8(CIuUC>=Y;vzrNYh;mVr0@tkDR^iT4EVui0QEm zs~9E0OA08Sgijj8_}e42lrPw0+Q1NcX`Cll+)}Zoq!+W`AVO_Y6(P?Me{BwakBUNk zM?t1TNO8)lLKT?+X=Y9Mn8bLcDY+f=lR!bBDhKOJSPd_~sJ_^m=dRbZOl_&a9dt{e z4VwOd%9n^Nw0P)(1YP;_AD9JIgs_7~S1$#Lx`Om1lT!nm7-b-rRhenhnrG+ zCg1(<*@1NEHE;rNTpdyBxco*Elhg8?_^3 znX2SR({rU9;g}ecxb^o+yiY`L8G3Oq5L;Y7`PmXU1ykEN8-^|h4J;Ap3TKXV*}v(H z<0H+ZfE(aOcvEQsdu>ED5rE8`8nL?xdWiby@=_g;#TDRUB~U&wJwZRwHRKoL*yeBK zLmn9{kv7J^rFAg8D*DL2<^zzue37-p&I#j^s!KCtb6E~UT(~+??DE{FY;V<`ao*D8 z4C|Jp?^|kgHA!BzzlVFLERy>fv}(1={o!2t=EMFJw!2VTJ3D3y@wg>k|3_jM_|}Gj zOK`p4H?3WP%&HJ!6$gHX>lSBqVTR|h#|6?i+HHrXdg|ebLNnlxlmc@_xvaTR3AgT6 zL`}mah5z8W1*Ah~|DW++=sv=MNT}k_bHa5FT-ps%i$UH^mzYcvyo5!92h4!-dc9>L zKKirN*dg|rhnl=jr{jb`Bhau!QcoEt>spqjtY_GvZY<-<06Ddod4_IZ=zeb+&U5H% z?uI}&E*C631qL89jA@xe94~ZAPt=aMKcki})rT`brZaDy>zBrzV%T$f#D7NEaGytM z(&&KBo!HHfA=IJ)sSa5!KwuenZN$-%lpF<&McWA}z(6V!QVREd*lkj0(X~DkU0P#kXOWXT1{XE|$DOkc z&r4(sYSAO-+kRPk`8 zEwk%bhqR2|u)(&xkY`H)Z&gb$%vWt(2DXG62Z5*LnoxZSw>oYQ7gX zMP|2Oh`>4>?_$E{$ z@uY>hp_225uB&~l;E>dJLXCla7|N+@8baR&0R_;I*Vz zNUce-OUi8%95EA5wiHP6f*iS3Dw2mHZvd6n0x{bnn!7XX{suFmc6#0X4*MI^nULtb z$CqI@7z#l^1^lu z&mg2J%aRZWc{NeF5$CU%7T-YC2?WWzQk40wb@RH#`MLqjS^7M`YpQdD1TSWNuSl10 z`$Vk*a#!zTh0SxK_?neLDsga4xkegtBT+c=q1LX*bXQikq9?gG7OcRX`RvmbWY6$E zWr6lg4m_6*ScsOcfA$)*gBmSeBcAU@c#gHLKEBScr#@0JSfDO5M zk@xFrRxqvF?v!2}ji)tjFk*I|34I7pA6O_x5Cz5y>pMBOludhlp>x&uRZw<31bV-6# zG|!CMZ%s_h;M{t`I(fNda>%y;I7ID%_*gO%Mxf>ls%&Z6zVHgkG>Qh75))7UR!LOl z1e_`EKT^Me6aIAOWYtk7kzkg-leWj-3<1<-CdIyb5M#)|ed*WgWra40WRK;T&X*3Y zzA#D{v_el|)OeR7v1}{xw4oX#K)3<^RVhIcl^|I^VJThAR?E34b*pSSR1uPT3!h>- zwncXKm8_V+J|ZBgEmsvKFkPyc=vzlNFB2!5u7E7;%910$v43U(-+uQq*MJRI3rs zqnafY1dILA_d7*}V=VvS@NwRhBXMIJ+L=k0I@FzU>6|%Uov=y9FT`O&5*LRx-_;=0 zWt#p^b>{)p)Ydj?=?DTUEeO)3mlTo^s`QTZ-m4%rbOaF)PC%x7 zQUqy#Jm-AhDc^Vg@4t8E-nn;ACNp{Wnw_1spLgvfv)*SRWtq^s+RTWa#oRX|w6M7M z0!ZEI@RX3rIr7eERe~Me_6N_%$aKn5(Ff6=N`VFR!B^v`5$KvFQ3W;Jg%m z<&L%$(Ykl@^VTPs5$zMMOCwXpu zpAvA@tI_188;tt)9>=^ozrAA827EJJw(zaEc>G1om3|kSWvR}-XGh~aq=Ub|p16sw zLG)ADLPB@Don%CB4t15n_q#FzY1K1~(^f@9_uh@~9}z^Pw-D zR@4zE3|hq7L(Zw4pIbPn+PLwa+MVX@jjWkZPw#QW^+fqC=SdB4aZ~Q9WJRIJ2w1Ld z6Mfj%gC9nJrtvgw(|=A=e>&4eU4K1uw6f#EHtOV4Tf&~`$($%JRDQI;y!j;hyVr|5 zRg;g>i)#{xBHI8hF#vvi57Ez<^LqtatP3r$a@*iDxu~Q!9H3SXWy&mW?a)bG_jKHe z0}j2o`GVk#Z5i)^`cSk1DE7GV9hSTzoHGZ4@HZeK1WcxypZ&dvY-nU2_C>4=+B5Fr z9Lm;LW8+!0*k7X$8EK=|yB-*lx~PXyiL5YE<4ft4**t)HI-4W-rv;ZzroT5B)+shv zTvlcnD!j0wtHbI`l^!_YPI&FlH7oW|u_Er|ebd)L+AjcsS1Bu=kc|iBVL{RN-WD*7 zoA)A}LOerCB)pY9757vl@--x^1GVS!xT=}Pxor(+nw=9QlAn@4L?3@5Lt^V1M4v1V zTg2NDmTyZNhu4k03$`fpP;STc-7r5C5FxfT5HKH%XMAKxwn|;411Ih{eKW{n+Bl9! zRHW`U%QI?y-vaijUw{KoSDfurUy&}Qt#?iv%s3=$#$CrHi7;V`Ajg#$a@FE-pinY- zc$ReQmatFrOggdB#l@TE9KPJ<^{16H5nq@#Q?z#YPNo$$T8u*y>gLqdKNJ)wY?Tl3 zrFpx?#DE`%JbqFn$J{Kl5ac^j~s`m$RuyhKo5_NB%UI|aE)7u$uc|2WQ^8) zj1=|-g$`DN7IJQ;Gz=qkxlYpZa-T3ioA#=6*^hIZ>#RLk9+w*R+;p*_XlQzVyPh#Z znTsbB(I3z$8gBKzzX_A9x30kzsk%UhOh2wUfLs`4%w9KKL6JQBeFE`qsT;k0#DP6x zdTq$cVY3Up{f44i-?h=+hG%otwfn?Xh`qjf5uaYyn(Ng?FrU;(0fL@)QE4NR&8z$g zH-$c}AA&G}KormXU?2-5tA_1~Bf4k;#B*WplFAP8L$GPf(`&?qC zR@@9ZpVfG;UHBYK8Il+MI9Qiju(Vm-VD^#YzF5JDES56h@;KNFib?-+qV_E&Jhyho z`<8$Zjldq}uF$Yt<=xcxtLtn2&-JXEVpyJ*B?dZ+ZCQJjC2Mqj!qn8t|Jga4L<~!V@c(u9SP%ymyWcZi?70tTo?pBR%malP)UX- zREW@UVk%5+qrQ*P^`iNNr}t=t4472oA~mYuvoW0ABCX^(b~xzG(UdvUWSW3$>0u|p z2TH4kIa018^9x%1#{O1%iKViUxGbrNg4bkH3`;m)t0RV*n>_SqD+>2LT}+YW#dX`3 z4BI&iPMZ~}J(}+qiW>0An27`~o`TP(GRso4q$a(;9T7ECza=8{0?ge0E}_#Cam=CJ zt4R{`2A7`seMgG@qu@7AQfXTsfCFTMpE;6JwUIlS=RP?U%xNO!rv5zTVfDa)$DRFe zobFj3j2vc_j)^;I#%UC$xPEmZn>56rCc}RsNZYP>B~v4_HJ?~?Za&%+GnD!8Yn$Y8 ztNh-usU!_7`Rk_LJN)m@v`zqhrPoBelf|7zl>OmRir2bFoL=3Co7|6(U13@4Gxw~` zLm%4xUt?6zp6Jzq<46~MeK_Xt=OcdziuWsc#-zG0nPmcL0c&34T?nZ9w<;I`w zCY6rMz2J1-d8HZHWs|w{VLKws659M8Lu#^WH=DfCC9=wed;5Ip^~A2K){z9 z*(Wp2g+LyQ^wd~%9+wSwHEy1SkgSvR?FZ1tgJ3yL4+%alim33QI|^METxh z$s(fiLeiuP2~SbkqXcZk1MddgmPMR>N{$>8YEyTWD3=GGO==LCyUL?f0Tt12B&at6*zbOKsV(=orn{QE)ugTF{K13} za}Y}|d8w^j+K zOy{OgM@0&Ns69mHx~sX|cMLsZF%TWQ68Pt`umN--HK;8@hRzI@D= zbMdmfpiMm^kFfGnKPUt*t;ePvO8Mi}2zKa)Juvx8yEY7U`XJQos)Y67G@EzR>VSN) zx(WSk68A#!dEJhlhI-tm8WUDM_d|?liR8ADDh~lm!IT?#JCTc)NZa|Lh0GjZ+nwjE z;DZT}d`QO5fUjX{UA*_1)&1mxlxOY6ix1uujoQYFfThJpYGiwyi4w72{U$H{mTxv{X!yxm0ldK$Jhp`*QGEEFN zF3WME&?{3IoLnh-jA;=-quQ^5quk$b5_fJ>{wYu#S8}TDnYQ{tdyqeD5bl3;pyo+U z_@HR6u4r>x)$n=o(AueQ|KT z@eF+_fb2F&WJRkVPSc2(9-gFef)$xtz!)xL^5Z;=f?)L206B|mMU177Wda@RpCB1! zwRNfQHCcGd_#|?L{V63l9*fkmao1uNpElbz z*fv-dz(zJ5#GmvEzvJYzkScj6I8H`3&bz{t@7Y~zg*xzr(1>wuye)B6QWYEjD=CGt zWW>Ypd-vOHEb*w&7#OkgOp-TKWLy#+2b)xB(Gfi)jEhhWsh=O1YqY^B9g9kh&N&{A zs_U?*dRn)O&0cw$8AE6-aeLc$$;{PchF`N#ZP(Z-ef&LHzP{gUbN)I#uO8eMAwKGl zr>3Pq8-_(hF0s@~Ni4ATT#cr|$w>I{P17b{n~^9F64Uh%hak(4Sa*xj{eU)f$Ujhj zJhH_2ln7+eEO;`&6%?Q=*NNRIc+#2X5NGOM)4PCCQP!)hVfC(FE>br-@xwq#5m1V5 zAi30H#OBLa*$17U0tWOyH%?jAIf%O&k(}aY=SDlFOT%OunVW028#-z5a}~!!xYMZ~ zQ6qYoSFR$D>H0|^jdt;rJE;afq|Ka^FqZ0)PpIiYN!CnQ%NoW^B1--ArWP^ucu5?l z=-Hp?XcMyyWQQ0NFgyt@p{yUY^DS&EsRIZjJ3~?c5M0Mr!X%m`Yqc zAqxy3np&hM4{0ZvnAs$y&SAgAP)`;&eT9ux9Le{MakVg$?&X|6aBUs4^PTR< zcH!_EqBn7^SJ*z=kd@$ zB6-6=by&Wymx#lL?tr#3LFX-aVeLDN=t-uRR1ghe+VltaF}fYaXm26VI$!Pp*D5wf z1T14CGHgPXbeu7fIJS!w_fjTP=pZ?1YdwB+JIXRO@~ceA#sXrrB5An|7m}u3y1Q`o3PzQ?nFP|03NhrcqpTBz@<}_0CXG4AI^T-@zM&Z`-SA zIwJo1<3IdYajYn{sBXE07A70DFcnw{tbtFH}=5*WbyjZ~lUG|8*T z@dB`}Z(Ksh=C0FMmCdL~+I&Dh5GJX`y!3j(v&<-pZNMva7j;%LHN5>{rY;2N_5*s2 z$+4|-E4tPbxB1MkgKW8clWWu=ein6|S|BQ=T_n0XS2r`fC1XP)U+HMv@9ttn%hEQc zUI$7#fXV%4i1^;1u|t^noyp|}(%ZuAs2@XXTk!_zd}f1K`70M2da#d-a|h%*;F)!y7hFK^kK%q@o^cfjxZ$9}m&pFK@7uy)pYnL*jAe z+F{12EM%k+_Y08Bw^ilIS0@6j@wY=PZx7^Pc^n1KZ8?h9ti**-0}URqJ`m6Jmp-KR z>jktqukrZxT4BbG;j;N;e?F2G+ZhqnU?Cw7b6vJER@UG2vb8f)9Lb)P+hKo5{#^Hp zvPg6GqpLx#f%24wbNR4wg~mV}V_!MP;h> z=8pvx!rnM1(V3z_^K1c2S@_E}=!)#!6?rAMJRGJl<&X!GFiXg)lJMY53PmE*IQ4u< z%F^BXB)bgoZpQ6-)lb5#OJh$D&(~QfWP_0R_8dwOkHfcJuX8dG=3j}!OaiaxSgsLsLD{l-ERenjmXHd4=qM}p6`bp4|h zzn(m;*9&_{Tql~8Oe!mjO_GulK=H-$?dN#TA9t8KTqTX2TVqZGxb@u5xh0#<7fcR*~8}^oFnX`NmBS+1=KF#o*G?RzwUj$&daE?5me4qTx%Uau2p2E!nG` z+VE%Pv=?R=iSi>J+(hF0SA2!T#v~n|P#-ZKwTGK2mF>@YEXys-1eN@7`Vtb~P^_Cd z$W$uhp?t5rcCjyzP@OJZb*tkj?qo1sj)q?W_DN50C*N**CrorxdfW0b_y+|f1yh=4 z#OXY9U|9z?QASy=EqA)L<9gZGWyjlkHe1WzX#(g50!GseF_u!$SH(Sh$^lyxqgHNr zB16|K-dH-G?1b`$cGNO(Hg!5=XIG-}g6Q@ZN7&rVD_$xq1vE@|O1_;c^RfE2O4x8+u%0gUXiVTp+TF>Eb(Jsr=2lMv>`~yGk zIowYmeFM%vZq%IkZr~CZ@4pT;QnO*+3dp|yP%;o#;;qUou5QS##+t?vnQr4%?_glTjR%Ilz zMAq}5>7t|cqY0S_UC-@f)1koGhjY!hzN2UMyM~g#m0)%>VZ0u(auqYU%pwk?h~?L! zsYz0{#k~_b5{M>xc7AJ5?sZ$vYj{wpy6F`0TC4)VeJSF&&Pf#O(edd9_989cxy8j+ z;fMP{pOo7JFs?VM(zeNt^#%E2#r4JJm3>Glb_7~R~|6_#>|v^2lCye>*pb0H6Jwic+W)fbrDux!12 zIrQ4HIDK-FBmT z-X}GjFXUHwa7gH!rYH!z0Y!wX&eP5^QjZ2no~;<+K1rezu$xMx-dSyJFJ+hU8E+OEhg%)CV4ll0UH+_hXiWinZ|Q`js$MQ|Cyu3NRylMFP?X*(w@V+?) zZuVroqsDfERG-*GJ{NGXOG=z)n-U;nk5gX<=?YMXCD7gUqnFP@ zlNT$eF{K$rjQUNxRG!3+awK8xjoZ|P8S}d@#Uz=BWDn76c|IN53!qp+%?ew!)3;S4 z%JU1Bt&?Ctv!A>Mk_(kubs>g2uV^rTtXw(LV$ULp^mO9amY6m<$R0Jo)zuZ5H+ro* zj0Rg-2Hp?BC?~ne`R`u)%lc&u>(qV(D@oLXw}~`F9wEjlioU&P;0+dDAb!OF(F!Qt z-|k-@ap2s^3-&n;aGPKt?y^!Iwvy@?ZjlHAi6ZuPX`VjDtR$cq}W zONMzg@jJA4-Ct%_*Ifn@U2%F--F3u8himCmSg^Xh?%PCq)Sg6gAeo{`W3=B6lo?e` zy(3A6hD<#3HB^lpIB@jJwmj0?iB;y%Q>)T8zch|ldPao*)glDTE8t{or0ue9+9EAq zccye=Ow4WDwQwTmp7FXf72ZJOCJag6Iz@jwaGs80X=-XX`Ed>wf5lzNrwexveN;bDJD%Dm~!MBtJ zQ=(5FZ(!c}>iTt;qKptEtzOw$wz;+LQmMkqPg~HI znr1&273HOGT))X~GC$nScQa$SOqzfGb!i5Fo9aR7LT5R; zr0=ZR?J1_d4OXW$3-iPVQDwIA(!J8@tVX{^s@5S8)>{7>-7~cS zZsjyvrKUU%cnF!nF`NSV4`Z90wVwez@j}>2rp4t8WZEpoy4yq^te&fWbCkxb$%>q! zx_p;B3y{Gp^=?+DP@no; zhcX}gjhtludYIi-yCFsJzFzueUrV9S1@%Dgsiwh;HKOL{=a2gFQu57)j4}0BCA{_W zg9_-wUXv~}UV=7E98+16^@FuRvw~dZsknHR>!L0#JV2spKUD};Qjn*XZz0R>eT_aT zO{zBj45>!R@I^JcN!2#NmV5K&2Te~S@wot|*eMTI&sIFETEAJ*yf;yDNP7OQ+;WF2 zp_ms8b5IMTG3n9o!4s!kQ%FakHphIog%Xo|1x-5FrSpL)|<-CXvZ`1VUo)rTf(&WQ(0*04m}5GF}E+1NRmrjJ2K^R z2d{mmM-nRjwLk;(M<=bCl`~4N#3G?!C}wZ#VDDt*Y-Ml9ic(}D?af?FQR{TD*czKY zQKi)|M>?VQpr+e%@o<4?^;k^xSW(ZSE><>Xv|z9#91i3K1Ixrz#tZ-JIfEb|1ga|HPwhacQ~vAmz)&aT zUG%HB`LAMT6?^BOK%q)SepfXAtg6BL&C!f{QG@NB{B0imngze{4-tg| zd0-a*j(!eW5eEm9k`Bc$sz*`C|CSB^bL9V#6(Ar4h|;7%!2iq&lnMH8()aHZ?)T66 zTb6+TK1ooDXp|o9pMan`2J~xVFbwi{E-3<^Q~uR`=YG=DF|_`lhX{_+ z5CI1wP!OTA5Fr7Ugu+3RK)5LUKSA`9k^dGV zI0y{<--qZgaQp?M|NloKIFRT68boji^bfbRKa50h9@ro5e}BA({SP4$hr&Q2l3*T5 n2?;n14wsOW5CMvdh=D}FU@#Oc{vRRw$;f{X5gZ8m4Wj=7mR^Dr diff --git a/Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/FIGURES.pdf b/Scripts/Models (Under Development)/EGO/show_graph OUTPUT/PDFS/FIGURES.pdf deleted file mode 100644 index 5ed880f46d2bab8fc77bba6dcbd3f485cc6bc7e4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 417564 zcmeFZc|6qL_c;C>Gj_6*UH0s2lx6HovSy2rExVDu$P`&ZM4_~3Q7R#OA(L#O5{VE7 ziDa2k#xpa<@9MRF-kV+s?0|le(CS7{(`q65XW zw8ZrFnWCa2{k_AO;t}$W434snDv*&e($|+(U0ovej$>*>sxIx;nV~zMhm!LLJA_j2 z9J(cLrEdJh=&&!dUesYObrqTYE{0R;BX(_4CGvKA*dgc%mlBhIxZm$|0kp%5>fd;B zJQ)F)u=5W1^%?0O9xbK@9y^FBJNQRoVj_M0qr^0RLl|Siqrvkiu^pK}1#5r5Aa5f~ zyqLQRK&YyytBdX1r{=+=Y!vG6@Av0}{rgnKG}X10v@|vLd59@HMh1obpPm^;M*sUq zOv(hidI2F}{cWqs82TEGA}R%9de3BBS5$#=qY!#X$eAwm!lB zzR_Yk17aBlSoZV@xO}@^FN=ufN#7UwtYY1w}{M`9~UK!Xhx? zJDS)2(;U<__G_u{`^#iZj2(YBn7^!K`{OnqVn1kRX$Bz>5M%-VpzUeMz%)MSI0RW+ zLvj!VF+hm)AP5B@Pz4eI1%ha9(fom7biIZA1E$s6dAPj{Y1;*2qA^i{mn)tKM(k#hd7|Ukfyh9RD_|O-Ju(;=!~m=x7KZTy z@G$_(f;tfZ-l4DU^EWyHe}lcfeg35B?H%wZ{F^MGCFs~BC@Lb<`{b`L|C1kKp)o*T zzbBdtq_N@e_*dSM0Q#-3$(RtQ9sCacwLpK%-{~WFoeFex0eB_|LQ2L4Iokku7l4(b zLJsfn19(8{o(eSH!T+ig8RM|Sza+pr_=lSt0T{@Fj6EJ{wj=va{mif^0R563c`Go; za;GiO1F|MM&=JhR9sM9Xe7%414`e~U^A9)tT_?y`|0vfT-avht20wojQvd_mkqiFe zPCN4M(19bOjduD1ZBa}Sp}*z^z@x$TaDexV(pouU0ei>Dp+3QCUr7adtccp%Yl zI`jwn{hKaO(txHW$_S+gr~}F%VLAH(q`I)b}48{OA7vdJ+Wr0Y0t%9F5;O zKxQCok=@8PWDl|h*$9atamc60*U0BP@bCSb{TkhXKYMfd#ZL$rm!LoE{2}ijBfeuF zQ7YvsUsWi-QHl1Cj|TP(8Dk<&M*iAt4Z)7!FJ>9;tE3>NrlO_}K|A})P8|sPZvSiF zK=8i)3zsqlL3+6D?d|5ja0hV^RILT3|Fgevvf2>D!3;sKp83W^#{NbRwR58ds{|XE zY5b4~Bmv0+dDNhNkPc)3nL-C4TgVYQ0(k;^4Fq-`3B^H)&{-%A%7QLJ1yC_m0^Nlk zKow98)CjdeZBQ4~2fc$nK;yuA=Ads70U|+U1Oh>iU`22t_z|KADTF*i4WW(DN0=fG zAsi4#5XTS!h;T#DFO?&Kslj&Q72GI zsBBa*>H(?&)q#4Cnnn>&R9aSAAzFFb1GJX3uCxKPakOc)g|zo+>uEc|T$-aL(b3WI z(#g(s z^e_W6gD8UhE|3_hIs}uBRiuM;{irnMnA@rjJb@Zj7^OFjB|`+ zCJrVUCOsxcreLPCOjnpHnL3$1F@0xdW)^4O&uq^e#C(SN3iA`@SIpDQKUp|gnykHq+`OeD9D$Q!Z>c$$)n#1~#wVid672d_QOKF$oF27yaUDtLs>>Av) z!p6uZ#b(In$##mZkgb-jpKXbqkzJbInBAK_iTxV;Gxibobq+2LRSsK@Fpg}FM;tvI z3!Lgz zGh73+f2^3T6t{2#yKS2+0XK2qg&J z6?!eSE-WH!E*v3zRk&SvNrYF#KqOe?vPi4QS5YoeebFG%%c3~ZMKK;RL$Oe?D`FjD zggrugEcV3gxxMG@9*ki8>2w3lwL_TG@aH}<}hqmk2; z3zWMi*Dp^azfV3${)YU30Z#C3L}aviiV1@isg#aO1w&kl#-Pil~$A`mEDx{ zl)IIwDw-;xDy1sps+_6^Rg+blRM*tx)O^*hsST;Ks+*}})t{;pHRLt?HEwEr)a25% z)=bxYskyaJYhUEP%6(t8B(;udUDq1X=GL~;&endlpLV~|e(e61{XY-vI}m-K=D@0s zqE3j;Bb^0Z8C`$fQr%fSaXoLn+j@8qU>wuGrH@DNL3^X`pl1vu4Ezl48_XN-H4HX< zY`9{iViajqZ?s{2!1%QB3lpS?xk;wUTT@O`C(~lnNi%V?0JCy4g1M%7g82&zS_>`vG<+tb#*11M~6v{Mvl3T<4&?pF-|X?*_@9$KXl%3 zF>%Rv!MiHEo_6g!B5)+=$Wu3ZH)pr|ZtL!*?gj31M>UV89Ub`P@r3}r zfJ*^i1JQwnflEQgLB&DDV2j|A;EfR5kg|~NP?ylEFov*WVb8)j!$ZP5Fd~?E%s_;E zL|O#?#DNn9CkT;Nk@uqzQ65oGqq%`2|2jrCCN*X{RzLPyEF9+?R~yd}A0Geuq}<8# zC+AO@ow}ERO7Km1nYbtMY~u83!_&8u5J^5sFR>EXbJ)2v=4T$9WjY&j_D!-%a(?oU z6ql5ybHeA&oSRKGPc2W|l@^&clCG0pl7Y$y%6NNT{ruJQ)J(t3-Yk`@!YoR*Z+7nm z)eBcHZ08)$d7Ha0_vS^qi{TfC^U!$@@;UNP<~iMi9|c|oJ%t*DB}Gg{ zF-7<*hps%mDtYzdRZ4MS@rP?h*Q%}yT|a+)O31+x&LJ z9qBuTcj@lN-u+VQT-tq4``*L*0{64-ZaV>`Skg39x3O@@swi{LClXPni?!6NQsP zlXs@}PF3Oe;h#^NOuwEvJTpG)JNxZ()Mv`vxi73=3g<=VA12GRN}u71@=#RfE;n1XscwF_O5wmi?Xod)W_-A1~MK)+b3}B=SZk%nv{Ksr|Ei z(`9p>97mz2T-n;Y)kL+Te%cP(-j48&^#0`wAi*ciaS&y%l|c~e5eVXX2mA;3-{QYt zH4wkWtN=#*UUty`0{<2x?mPfK1XKz9cl#ya-Hd>|O(_IP1Nv-7A;^vuf*kilH0tVq zmS0UkO#Lga-1$NWVu)KdTRvevzd!m9x8Gt;fck^)f4u`~)YUb1+Wd$6_6vxEfmV;v z6^RgsXgCl^4#ai`Bnsr91x^aM{^$ikgGABN(K9eIF@p;A><|qCiKIaxX=!&72Z+<) zIfUY%<=mrcNXKRGO)q|eTkUM#Z3YRW>NXyS0fMBuPh>J96EELxegP?I8QHyZ8k+mG zwD%t{HZe6bx3D~T*wM+^1-NIve*VV;0)v91qGMv?;!mDRIhUH2o^d`i|I+1x!lElz zi|^bmy?4Lt!NW&2wRQCkjZdF7y=?F3?CO5i(>wTn=)>^H$I-Frnc2^CU*;FSE)v(i z|5zt&z&|&4dnXh6r~IRX|7jc8e)o@U{ZH%kA9}yd)9rzOZ0jFe_}^Kxw+H{R zt$%Fmf62Dq|6^PK*w+7&Z4LcnTmRVB|B`Ke`0v@4{F@}zL?76TtWQxHrfMVx4KF8Z zO?&Nf*Zgwtq36C=Vbe!xUK>c}TR1tL*vu2yakMiRo#7p z!ri3xvuzZ!K$@i~Li7fOeT=_Y89sWrb35!{>St4zBWI+dR=h7otd^gt%-)U7nab@jmk%6{izIq&`QQhn@64?#8CFEV!ZYYdv;X!j|vkhQHs5;mI z>^m&MJZ2l}am048VTExExCJyU(XQ6Z8J^@;mp~P2@gGuax@e)f-kGGj4YlpU;x9Gg2o|EFw(c;p zGQJBf7<^NlDAk>zRCTK^L|fu}SVe_Ivi788$ zqBl(%MmJ1l)|Z^aR+An+NK}B03mc4&znnVyVP?+Fz3=Jy{K@_FRv~frbp-`;Zz1|k z4$y+&nhQ%~mMo=)uaQj?eF)T|^3lR0(FuFsK74dHXW#SGu!V}$!pI*KAvY#c0 z%8|NYD~`)RUqBOlS93z6uPWmLFu4r}ZV(RF^mVvNHmIuay4qa#toaz5ah7jR%a5ur z0i-SP6+@A7G#uA2hSPX2(uyp@Pw7gQ4&bVi>DU_Y3k zB#Yd-`=DyJjLPZ;TK7}m;|O+E7=CJRNqxl(e9e;}QWW;+>rJT`$Cjp=pwzmHUSSCT z!hM0gHrzT7F&ecO=E=ObXfox0N&+>V15;r3hBB3Lp430eM$&#-|Djd$4X3cJeWB*l zTEdXJSRQj1zFUuy(RMdA#hmlvT3T-xUUX1vc@No-+KaB7OJr5~1vJF6*pjg2kj=3e5d@oM{hxwm`s`f?Ux z7!`WxtB0Y4eS^>@#_? zU_NTmZxo1JZ)_JOoJ^;1`g%Uww^^A=*{Af<8lzvz{wQHD8c_Ch)^@#PZRlsxOzQOxD{F-47tCDHeaVRpcpr zjVNzYN6bAF_UUn}6rm%3Vr=XoTEeqk(*CGh%0-r3{&VPFDalF$^v8zGZAcCci}O*0 z9+C}{UQB8aw9-*W1cvX;_9v?^)s9DZtvV3aKFitq%6~Z$c2`L6nyOuj3d+02c3q92 zoTIyksMF?V-hoXY%NA%&sTwtW@O5PPwTR#~_bbD$$1kL_P-oCZ#QKl@gmH%F0 zBwa7PIbg>ULo*%CylP|;H=FzwIgypFi2lXDh zzL8Q~)-grqUa#8PQ;d71LuCNV*xrJzo7>P*Q0_KlVr?M0^wpHx4)M1_|D}q;i|!Ut zgG0dNuY$?5xb)(R$@Ge^H<>;huN_j5PDr?R(gel0r&z5@Fq=|Ys&>?lpFdt1+iAP{G5C& zaqm_#&OetQthxMi{_k2ueZmiMT$FBp7R%u_W_(k3{OW|bQ9Ny1a31G-4Od2^;Z7iUkn_mlAx4|=)%Z(>z0<(CUgo3DdC zjlQ08;sa$bS&|~ORiZ&oPh;bUi^cEGo)>C8F?vg-W$&P=1JtP6YN4XkqAnc2e5lpEWQ=T z@I!)_KR#X7>7i)!b2+L^^ZV2-&$t%en)~ zK0v1#hkrEH!1kgfCu#TA8C(F4V=`X7{lHXe7{#z+@~aYQGpBT;v8XUBh;&)pbvaK_ zt%qyRupY7@pTUJRQu>^NzSPB@J;_0~QsWIftmt3|Cg)80rQcG(W$4Vn}e+Fn) zWWFXXZ$n-pb{kc%(CpZ@FKB}04MA5Ab(?od;*0M%4V&u*mUuZfNNPq=@NM!Jh-W|4RLg=EjDtX#jQ?D@k`G?AUhVeU0!i`k-ZHW zZel6?%w$FL>4C8F#1llvv4$6{uiav@oyo)=vJSr}|ML9&)N?4QE2$Sph~lGgB1x4* z+Om}B-1DsX8%^RJ1pvEb}eBkIOw?SbBgp!Qw|Su##=Z)EQ9`iYAzW-?mEEd+M=i`kK$I<=eTY zte&@^AHDwAki1v)3z_HhIV0~L*Za}YAMbnMMZezu+eH3rQuFk}s~z9)C8!X6^}wV- zjV!^e_v~S<0i!cP`&QbzmY=@er6-GgbO)D$g=JhQ!Z@-`(#MjAiw%=uY`ZDoH$){xECxpwFF!f z)MSa-`)w!`li^I|#t~7NeI-7HMa8L~-rWriO6vDTtw|czwE8}Jc+5aDnSS9kwup@) z5+ex~GumT>-e;0B?O|fFdg-LXM-gaFYY2O6ipax9VIQYvvG!BBJjJ%5xzk7LfuY|A zb3ZLkouL$dqXDp7a7c$cmN=oA`_-!pOW|^%vL1!20o6TQnTdSl!1#a}>ZR!rQGRUT z&r=@51$wWaU;~0(+@it@WK6@BLyvl!ArU?NRkig~uo@fbYGeJ>bXCsH-OVNBA~%oa zChU~O_Yz9%8FbAybf$#hRs~DREUP|rTPUHY>c%^)+C4v3NOIW-ux!5CZ;~_R;C{p- zcf*M}!DW%DrYYU{;z?9o6_En$>MXFUh7!WzTv(eYC!sd|aiCihAa@onunLwD&Hdq(g017N#?E5T7CCf> z#FTk7Pk08}YxR+C$eBwGYd;OB?Z3Pz*vIi|{1yFX-@wt;bGq)+xuUS`A>v3f2LB;C zEO=1x8}oPor(slBM6&Dg^CM}N94Yd<7{o?Q4QRRT|IY=B@FK)-ET1MjCn8|rnqXew z5L8LHq)ou=_VhRMc(Vu6srkLc)2DnP0zE4Ag~}R_Sd=KsJ>%Kex1pqZYJ?~up__8U ztBnopZERagx7fZWP+6nlW#U~@8qkp?jOq3g(RF$R_Z?0591bz$z@`>i^XS{@cQx;dxA_GTbhVC z+($IFvzBR3;(i}qf7)Tq;9y&TKbN)cIa!5qU!&Lu3a22;&bW0Vh=|Ndr-&}gzGTKk zkF8<~v##^l{DqdikAe-j>eD zPfcof2_cM)!pn#QUK7$S@gRIe<{SlmH$K<4=|g?r8>>V4!()TPU!{s&DOnFa@k_>Y zr51BnvX@bL)SAox&a3$sToMnq4U5KC+JQlk#`<;;F=u(lzRzHD%8CvW3hup{;&w1Q zTUpQV`&H68@avk0NRYU*ATv8+6G1)(nwX(|*goawn{EgS;$5w!Ft1~v3cR8 z-v!d<>6*ScwPY1H?IVVZrb$fjJe3XX;mH>9R)-7Q4c_aE#ov@25B~JEShMtm1-b>K z2kh!DvTa-x1uhgCI~ql@*=oh%8%pX|?%s+Qm|5-C-#@JW@Z02`k(H?>QY^VpJt`r3 zy4R%ikvVtZYXe3WKPCG00YV>@vLqIzSD}X%4`_bYYFLV3^it#X-}UV0#p%lg15Iiiu3m=1 zZ%KBh=n-C1#kVd|(%SYu$t+?Tnd-@60?GLl3>M5KtCY3Vb1bjyC3!sZ8?FL$b2imG@RWO%v;7%e=$ zV=SOt`xM z7rZix3tzsg{g(b9w0=PL&Kah^7x~Y*z0mmcUW6#@GFS{~HMW|5OpS`p^0i}k1-d2G-w)Pm z*i5^iIraussB-m1^wWI*JP*ezK?X^oiz)`_+DCuy{BJP2q(8fa5LH6q~B)2#< zn7t(0OLjk=A^Mlg&8TAcs08QDx1=qqM^clgL&(4pPXQrG=oB2n21~A{w(wPXY~Ahf zVp#Df#4H^D`Q~2D<1ydmMfM|BLQJQ4&u*ze=u&}SqCK7na2=%Rm3oA1&UM5VI;%YI z>{xr^I+H1-w6YY>^oUXN2hAC@C^ozbRt}(W*-{xth}Wt8C3QtvtH+3m&KEsD1#KR` z8j~1y`x))+@1^H;?*-VmNUu#FtcKL)e+ZH+x5}vuNuNY(r_yo3qN9C|ms|;PnY`1F zmF_nd51Z|4ULY%d>8wPe_}lie4M~*1+GtYCR(Xpuu$X-K!}KaM#hU0` zRJL%czEmKlqOdvvLtAQDHul6S{!W3zo~MN`5rtB@l9#B!MXT=5SYO>ueT8G9c+8ad z#IPQAA|$1sJQ<_ISD0{qNi6?}pKqT|Gq2A{$uGDbBz2ekHgx$zV$wGBWUi5VtwMe) z2Ac|upl-aLQ1t>DOwHS2mGxbtrF=Z;CM6FhBuXlPGDLb=t`!a$icQ#oRvU~*1&X__;NJ;F3(z(Di{ zwz`|bnM>NWb*6PM{C<{gsR-|yDDgO+=#%Tfe)Om%M#x_0;GVkVBO*G1#b%Sf8FHUx zA-PAn0|#YqvAtb|<>r_Bf%Ud&Z$rm08QA*EJ+xg9Z+*yJ-Rqj*)VKhlgxk7 zS}fnmf$efmq-q}3sH6tHG)p$bwz;7(Z}{-*t!4UUv!drWKaY$}K8wEZi;8Ua2zD!Y zL2Jli{>7?M{&_=JDX+cPD3&7fooqbyl}ZZ>lJ|bq3T@b|Sa~>e!)DYSx+a*grq1d00sRV&shF}oqOo-5M=Feuo%=2UWyFatXi-_- z0lqTgvt~xxZ27TmdacsIW-H&Pd(A83-QR=_ZbQBvuN~Gtaz^^pHOG&tzv6Q@9D78+ z1RT*avZ`y+`zeAsCX4q1XX`z@l?5fJ%%WZ2u~?qJ@=9EvtKft0<4LDne$Has>afDc zVK%UA-Gp@#6nm1}J6&9E?rt#{H|cvc`i8Ye>j61n^b>OE!Rn4Y-kE=FY4YFIsmz)) z{oF7gmC=f95Z~D5R`53O(MMRuDSvFO*Cly|Z;fF>sJb9JlqFY~pIo!-+$IcYCfsS1c z^b~o%t5|pav3K>D)z`}{vCHD;kC;0=u$iHMz(YNPBRF8;{W4T0kl>-!g6q%$+t5cF zJ?!*Ga=lL4lffUhEI+GetV5rT-Cvw}tubAC6dVAJUCi8FUI(*ziS8t$>cNI_^ttg% zu0)TaB$lbyu$C=L@!R~(yJxg8m?&W*Mo)boP=(=7jr3<4qqG0|w_@u0q#HJaT=p+P&kZ zi1o4fldH?AKRsT4?7ey7@KoR1rGyz=ZwLtln#o&06mXnSg%iL|muy3R+M2*2h_^bD z_xMK@9lZbkCFh~L`}>0VoFBHNc?X{Wv)dYvl zMUBHJ6zdx*N+*L8POb1pc=nu-xI|&7ka_fup3F4hHCqJ&xH-^f3A>ZSF2wRIx2T`4 zTAY+6cosw})R)l2-$^*{ovH0Ns_T3Qe>Hmi3ZdQJ?<%!^*MjB^M~`L2ypsZN!pVZ^0%P_X3CMz9Ow`2Q)Kr(?_F}`u zrn*z0FwTeO(b;@UU5MTxkj5>fsq5aSR40B(0>nc>VA}%G_~jDrKu@({+;I#nl}*C> zKT!dZ@DqkUOD&W19ty$x-g;Hr9)=U?Z!MSw-sV7>#|R(zS=jNy89Lz>G)ux)AWU;ps+f{gTo{X z8%bAbOMbSyw34}9jpdXRi6?>e&tu!dlQ;tVXN3ds`n8g=)jzi(qBL%B-TfB*hT@O$ z@vJl~$(}jE2|LP6Wj+rq`8d#RV4}G#%Q!rKbZAM#GU*K#j}y99{!PPCBARY;dH73D z`j$?M0^8AK$1~U(9-g17U`R4RvzDZO9Kl8Tj51MvI{5}$=NLm88+ZKi>5Wf8Me1}P z7hrq6$DKlLD#!?j%7@}Lxx=Txo5HxWC9pCZMc8;2lRm|vIrVXt%6RF)xO>mtmO_Dz zTpGpptU8}d|o0IuN-!pQo z@~(q1T`oBM{cTf6_#G5@P<$s)lZ0am+-*qU7{>pHka98 z%S4@|nzrz%m;Shc73mzF@RT<(tQ#lyb0es&UjKz-0>`!5VNM0R5^yAJ*B={z zBMbO~3=5YXJ^8?|T)x*?G|0;U&9-4-NtZKW6p6cThO=zh`VtLfv#jZ^SXag6KDE<0 zhv+?b8(AJ#ca>V|Jam$~{n_^WZTJQ`=9eMY;rPNSi2L7K)rebNcfhhi$bhYtyayGJ zed1PkJu-tz(@p)p5~s4bmTxI0x)eGoM|in7+y3UETh{mf(d?di>{QFjmi#GP_u4F# z6`cDe&l%s+I$sJ)NXQ=ElNz&ysEj>({im_Lk}X+`ub(iIPT9Nl6o-ETdNQ-kR*7ef z*@ku}9%vn$2$v${o*$h1(R}5~O?T^-*KFK(#cUazyH$G`P!P8E1`liwtWXzpF_XBH zC~i%ZZh7*&HvMG@Z{l7W%NgZeT}$s;6x#Ug-cR%d)W_}daJ?#a@myrv-F(}LUWoE| zrzOzk9&}(8nWNroi!+4s4Tm3G{b_BUj9#LhVcXKO@>;kHW=JP+hK-U2yhswjU6dU8 zIf8AwH?>xri^6_n?hxG!FFb=D&j0iLO8zI9hsoctZLiQQz^A;z=aBRkTibadH>XwO z4SPa)NRjXT1&-cpGx?x*xwTIgOm(pt3*&58CGK1qwqsiqc(y?dNcH*9ojy0T%uD&v zpe}Zey0tQ~=>orp%SokxIEd@o{F5Xn;*F*}iL0Gi&q}#0ZwFdMYLsi{<%}c3Kz{hLuwHFsy@`UJe zRM-|PGv(n#z*;HJzaLg$>slSFnw|p3b;rnTg5N}F8VF^hN{SSzle(64m5n26M=ejV zw!9j@*O*{e*_SLKCX@`(zrhg_>tDZ_5golbkh?NC)wx37+?vfux$N?_jKYsaoxrxq zQk=wD_>-g-mnP@Q$`)RqX|Xp)mfH#RR3=q|+(tX7Vi=9x{Z)N2C^{MqTit(lwbbH} zS1?)iX&!;Q#q;>5n4_dBo2_iDe=aOv(p8e`lBBqGE)h*+K1_&qyWTLO9`81@CaAoO z%H%CLa;$*;wRn7-5LaVhzYv72@8qExQH8;fftabSPc1&VOONm6)b3@Bz>F$tFgT9% zRp?q;cTj1QNw)m4qT^1UrDx=~A}PvYl}F$Jn>wQIx0r{fAxs{xQ-XUA+~Pctqr$&t zaEoZ4mMc%zpcQ$N3kAHS- z>2<1&{ngBiM{0lY{5t+a%$M-#bHm8ls+X(ml-+Ro{m#enA)N7yqambo)^P`Q;qzTI)4qROx%&02(NA%2}5 z`gF*!4LU8q^^re0`E>-nBnUN_vEyDd3Ziw&%RR)9neKC=^$dxa*&A_A)vC#F)PfIO z{%-t)<nu2)MW^{G z#zYYun?6r`zzZgt3v8A(?_UVhkKVF)f?h5DA@JuEXSg)5TwbY?dXooRD0!p-m5 zh8S&sZP?{R7yI`b-*EgC-&Xp!&uWqnV{QlGKi^UfZya2@Jo}lxZ}L=lFql(*&viN{ zxrUWX-)mfO7+n0KjN?}CTERIvSWrT4R}p^_T_}ReJIf0kb*C)51YiHAdvv7Ve{lV| zz{(1*?G@Uzv7d8svtlNdQSWccg9sO2Bua2Cf#p^yTvMb$g6$HHc*%pPbbETjs>3Zj zy#iM5-W#Jpdg67o6M4-$QGdi=BY1Acb<{^OnfO%u7TbDJl`Q;y8v%2S^GGG3qL-)GJ82ra}ol2HP&ST&` zRg`-hB(StsPk}vUpny8WLuE#z5@m=f-B>-1nL$A~7`3RoW*tQL8=1zY&dHgvYB2nYgf1sd?0-vEx!IQ3Nt!QJ*8mS7)9 zi6I{gYDK?m6>^6Y9I_6J$hSUAu)6Bcpy-n$cy%hVPvrnBjLy#YR3a_=w!{GqR-9 z-^7n>pnZn15GzsP0pQliieiS?ySo^^e@G{lWFrQqBL21uq%oVcRN2 z*OE)l;x^Q=i?-!1t6NFXf5t)P8mdQ4-r3xqjc_VRGo(*u&}9#yWTWw4!1*Uz3L1YC z%(!c{gg9Fd^%q`Tq4ei}WNAOyQ}Z<6C6oGAYWKKMRMj;BM1%Hy6HmU}_4ifZAAo5* zt!qDBj?3AGAYGo|g_sLy!{OENi=x-vJw4pILoI$NhiQx8-_fsNDFUiwyZDsZZHU4d zIWoPPrR4c#W7;L7szn{y(|1GfZW{F+?@D!6*;#r*>A?dCuOUk_g_pG$|)mEs@_o zjOjpj=(`k9Yel?@)xh8U=?g6_wPF0#S*Huol_4Z>Ib-{mZ-UefPiWi@Vq5+B9ETq?yr) zuOu8hZZZ4a*A!Yddcm%S*y^@Fnwv{GjmM>W(hciNCz=l}|L|gilir3I+vA!~2=NX# zyz(nNB1CwQQF>`Xo-zz}ph_=7Z2Gc1@q5<+TpKU|#UE3z{|)jRZZc;)&omZb{|&6>lg7vrK@bnBWZBl{Y-h_bf|dA@E~ zo=Hdzs$UjD(**h17E?!Ezc}T2e%`Anlw0qO`B6`3%+%dKi)HybsX;jXL*Qm!`B-~` z|H;r9i*UtCc1!N+1No2hCFq>#QS7Er_<(6rIN;L(jW6?paQFgrt<>r=E%A}@X>7!s zfaztriI*PV1kM}vp#0L-X6(QB>+E&CbY1=Og%@axa!xN!IFi*v7p;Fgv}ldxUhTo; z<05y|;zWCK-PcI1fa}+Q_7OC`0?X$qs%aH!A-&H?4;gn2|d#aXod>=N8dNBb8sPA(RU@TeFoT+GA$}F`X|kPO?HGcg~wjJgl>V=U_EjOp z_eXx3Pi#zZy_ynr|^1;~aA| z^*HK`#qOS}(GplAeMUK#Dv52ASY8*+I5k0SKH)pFb^Pt5htZ7HRq;mChCFYxf~$p^ zCxw$!&bZ>E3`E~oaJyVijE1KPwWMHJZw8%<6^zM<7G;We=<+Z=+*+dJleXtls@;)^ zx18GV`0_^W5yzo3r=I(T=!?Be)CpDAH9U>+X4GaoA=mNjN8d$xN(~nO$cqpov;2(g z1NS?^31p$g5n!y;-KAaf(u1GI;ViogHoWf5@Rgo$uT=&`VVacc&({RY@tAZp!Kfcr z+*mI1Nm31(qEEDcEbMr4YD|A`YyIF#r7`PETlmJrxUKk8v2uJGy;vT2lMx#h6P6HO zcIPWO|wIqG(T5=ytxrN@B4ymrt|8uN7h}y8VROS%H7s|iT?2XwDLy`m5KWaoV?mO5)nuhCKf#2 zi;3I1y?<@FqF&pFnfuOkZ>MmFMNSRJm&a$vOd#sdZ$BRa0(H1KCh9(LAhil&ogdRY zG2o=rr40kz;@R9K#@jH4%D8w_tcl24mbFw_Uyf#((5t+yU)WHT5#f`+U+TQl&54^* z3YKG^ebmfA7)M!z71ExjWvT%Y)VZH}dyq zKBFX0uUvmezgk7%2No|=OH8MBWZ-_xVZ6SYdQDf6xhR}lElJw(^DYDEV;=IIr>ZhS zAEP$&<89nC!Z}~}X0p5qUr4Oo>&e))bfPoq(l*o%rYZh2W|NDmj>U)H18?_(4AKh? zm$ePx8Z=U9fs*qQ8Mm&sO1OeqW$Z&#JXc(lajd9)SVXQV{|ia>Zo_ztoWfv_q+Ku7 z)k(+D?>O>$$-?(pk-ON^#63jkihHj~8_rF7Ki>86%Y7AhZqGl&vCm zfDXiWR=T=93BRtRm@v zne-i-ed+==kC5K!)=;EAKH;{o@!nX$ue|hp_|oIjNT!Pn61S_?lmLCsq>pG$3UIY( zPr$ab{n=A@?VGbS8BNmjYjSy!Lzbiv9r|SYVFVXy&p9 z>WiG08Ck7Za&Z25;e?zCf37L*s~!>$HO!0Pf`fJVNGTu>Ju7TX0vZ2g-%k_qf^8>` zYL-!?>~0H?>I^>$yi;3wl5G6i+hzICzI-C*fXkfLIW+!^7r|q7T*d~IQ#|!@3w!vQ zYwj{OoS0PW@Bh$)e?RTahboB{@V-KV+MVS6{SqL>MJrVOMxc+G-i)7Rmm&RAjm>R{ zJL|?_DpMOUYzKIX?rhXakXY20aptohJ71d9ZD%nqNRqeoelVW4J6t;BCYvTTXNrJH zr|<=U*eAJ`qCQ0Bqxjd?y`!qba-&KyRtc(eW5L5LGg3og6335c-z~^lR46cy_S5lW z+)e)@W#?5y$9h&Po53(QveVO?F%)_5;57T`BN;Tf6kAvsM-c{ZBuy)KS|^ZXK@TB| zyd=I!kz#9f$4%|E*=Kpm5>iSE(NE`G&L^hPN;UaM`msDo{V0WO2!62uxp*-Ua>*@BN)R?OrpdPH zRm-chqMaDIgScW&`Qx}Q@KXrDf&#Ew@Peo=v}5ch}V(=sNRlW06_)#pk!s{Ii{I zE9|{KTj7;I3Q~7@=XP$IZlqB56Vtlw`0SqF_*8svy~WHFqKpSvP4T6oY^}SkG|~?e z=G)2cPsK1#8{Km5VM*utw&3OEp_0vjWv{P2Uh^lw%3Gh*t1n**cM@_*6NAh5N{ol1 zgLHWwyv-svhm^~oUuRa8z@GY|Q-0&-@bdfYGf8iOLQ1A;r_SnwV>a0es#Xwh>}-c9 zC&lpL#G=as#0~s=B~jb@#=NGbtlmNsn=`N9WLH-$KEk~Pzrc_K8hu0A3p3uI!nT2v zmSEQ7!bqDDO_w1G-($~L5_ML6PqI$jdn4iP{r!f?@aqowgKM|LRyj+4INzdq5~Iun z`yTlKegeQAA`n(FLCt?l(o#3dw+|s$;QRI)-aec@yBqD!m=eJ+DJ{KyJDVYh| z){!K+)k;-`FSQBCbn0Q+$BMGn6DoMWn+_=p+Fr6MUNL_{R!N@i%aN*AebfP|BA%Ra zJcIaI4b;O0GMmBrAkO(1I|beqAk|aru}}IbSGC%v$eg;stGhpb3cPQuJjU0C7H$Prt}@O*tAQYG>Ii&sL{#?|QXSDX+|&M14|iJTr+6>;$QrQ^>%k zsrD)cisz;FkwV`0Uoio35AHP|>EWz@mg*dVIxgM}{-3`tS_oSm#zVFBs;OE6aUZ?- zXwrK$+iHm+Tz%IZGb6n32xeW+jHDwUfA`si(!#m>&p56)A^DtN`S;@I>9@QCq5HLR zd*j4pcg}jMzUPB+XvemVV(U$C0*Sul9bXeIR!@*gZ&80Ru}}-&pW$x#usT;)aH}xH z@rK*vTRMMl5ug0XWxSs#BR?%PFz{ zA8p?q)zr7G8$_f^QHs>4prACR_kcgm?zwl2ch4E)o;${S|EMDa%-U&VYz zMoek_FgRc~c4i7gmTnis|NMn9Rljme@s>1MSTk|2P>_P?^L+T@LSz^M7E3ALO`p;OIARlH7kyP)*1{=*P z?+fl}5QYogN%0yT=9_OT0t>ek&i863z7sZo&U5wdc&Jpx)(l;ip~5UMP-3{f2Y8yx zHG-Ryex`Vg;sMI|SE*Om5)Qj|=bO+pqp0j9_n6 z%+HoP@#+Jy9n^98{EG+8?RLb-v2PmLX*eCFw7q_7p5wZmQuOP|Z&Xp3c498}*ONWX zR3SwD>)a}=Xv+{@pH-(NzL|zs=MFVKGxxrEYwb-`UU|#YX|8=fokE@`QU!MGQt@_b zp2fl%wg`6|3D1WXnPl+Sr0e>w+v{T-dRCQ^B{jy!-S||iUd3POVW+c$GCGD1oK({_ zBIzBNKtRVe&yZK687Fz9B%eAgJF!svCcAG%A3Jlni<}yG(1auKRz&UH4l~RWxCuL7 zfE_FRJLM@uzIrn}U^8*^VV^e%-7Qm+)qnlcv82Bt)iqix);Fp;bfNah?T<*5iAQ0q zZ~ucHg;)_utJ5QOh#1(m5?8t@e7C=Q7~yvN{8sho&nMrvpIhi|oYVf;NxM9@A2b0Dxdvto9pc!YFj&|Q&!4*mgP6< zjB-50sV7eTMOuw**u^leqT7UmYX6tB^7slrvuA za2w=3=M^TUPFhRs^v!mfM+dm9l}+ebO-LDHCNg8!(P04m6sOWjixCFe3|%}~hwrgZ zoTB^Lw8s?{3{GzWt?zzXpH}Y5_vfPsphD74rnUb@MH+w|GYc|$DE4Dz>HwT1O)*J{ zY~EW?$gGJpxK?GAu*jpM?POvc%pBeV*H(_?nB<#CFI@a(-9Z=ReO%0#s>M;cNIGlW+w}ou7z8gz$;^*>#ML!H+-1+hyzfs`L~=$bxb1fV-sF@Bi$I5O4W!+$Q`M3gUrVUyVI z=6iYnNohN$<~_Ocz6*tVst2<>1ap+rFTF+~V}Gu}ZJmE>Y=7bL#ZAXE|Z> zH8TR5|J1+>jo@ZhnHuB6TZ#a$ht|>wxM`XM2pIjouc2b)6@arGHm2M|S|C1Xsd6gB z;B7W=aS8-j*J>`REgtmAs0Faa394U(^J~`z(cay!Gd?g|Y$&4FTqWA~Xc;Tam+`a6 z%mXTJdM`E=Lpg=r7f{wdO)>WhaZ;jo<@pvZS7i#6YKl~q>HXUCbRzaf>WQxTqoJuX zB->?QfMQ}7c$W2CVnwRxPZyBpFyV4l~wV|L2Pu{+|+1LGW%xH@;W){{^8Hm!!#ncU5lL{BQ)itw>TshcfvdlvieJvq8rJXyS1 z>txrFY`njt_82fH4$?5Z8l(%%krisX3%LWB+QFFwu*r9zUMo|@29#Ns^{pOOT=%+N z5m&7=9(;FSd-$*x;V@i9-w8F{B$5)VmZ(rGB1oyR-Q3e#Y{9`2Qcz^EXPD2pbDt`RoRjbkR|@vs%ifBwn-Qd!9X(feaVf;!8BrzbMjnIScEbrL z8_^pc+fj&+0JK(0%!FW#Dui?J@1)O?=!p|z>|z!wcvbN6E*b134wL8J*Cllr1$w<* z^2={C+d6u%_NmZ!bM^|z3r*#iW(eSr2t2?+NXT*s1|)&A+I%Rew`v&;Uo-~Ea>p<9 z=nI5T9k!zWqD50MjUOP=*}R!ftmuzQG10-jPLb;?@%dK{m7W;_z50+((Q7+MX1qOS zpBpj$zfmpH5XA(uEgWP4?gPDGlP?USxPws}G)cL2m51R$ad}ix81M4YKI0uBub5Wh zt6yDLmU1g}KJKUt=Rx^9&J`t*Z00*zw$`jQSmo z+Mm6>MJig_5EnIe=2pSVK?P)S>agCZ&d0%Oa0VzyTE6J8`vy$B)C5mNE`_D zt6hLg(k8zihj66;pFbMhho+pK9t{c}APO4n2Xk{{SlWj-33oQCSNF_)slR*~tJ)G% z5tNhoavjtGdAJo{@W33*$3)UADapZlYq}JGK+aNI-$f|=Jn-XkeD*hjdKY$B-#$7UB`i*U^3 z{-{j6e>Whlwf5(0T`1UWKDI@ZNp&lPZPyZkYfQSGZB}Nx+&_}ff5{^93}#`l0cUK*aHjlfM2Fe~ zL5=#$voBgo`->DKoy^fmU)s46L3}CI=9F*)u`mfUDh_h(-zd39%xvO{$sn|0PFM%- zKHP%1pe_YW=)FiZW@AzjP~h+sV=rKNjmof0UK8xKM;zM2a2zfc2r{3IvTU_Lbvx&I zo$DDjnQB_RDwCG$-A+$57Gz<7DBrY*rC8xv-SE2@(qMxmDH<6ELpAs_zHE_vlI|CDiGp9TqGJvrb-snm7aZ-D zEcJ+^mFK$>4}HrydNVszm_wR=YQ}%xtnpTC_V6|yYU&LZE~9EP_}Ayus$HWl_t@TxrKc+metR|Qs22}7<{vH*lRTN!>*kcbDr1woxPn$sPwm( zZ(}EwOgAI23xHUT_(Zz zt&R!?*3s04t!e-fVdvijPDi-CZJKXWxL?nG?s66-E$W@LR;qaXDcvg*CZ=BqY)@_Z z#Bp_UkGE~N`k*EvM6syDz(=uUa$b-=93{~{GaT;m8#Qx)>dWI!AI6SN%rPTef1_3jZrd1y zCmyzX?TRVoYx7Dlzr#i&&T)HiF19Q3Cg!!@jn+@8kTuw%39>M=VEHB|KGmV^7*6$j5dX3^IBSoUmyX)q)`O9z zCi`yQ)CO}QMx@v>V-%Dr%a8y#>aOAMXs&ww;^1jKk2Fl^=!-P#LvzRzPXmA(+j(Nq< z63-xFG)*x?Z=E5m`~ra08EE_9A3~PP={0-;Q8Y+^uEP+bdbJZ{{C5pEBRqY|zQrci zo!svGobGGzYHD9RU|V<`b=Q5ca0@U*!{0P0E>Ckoq&W0mk8*9j7dRou+uNK^Bm{PM zW$`43{%GL1@fi2B1o)n_1HM*c_YiIuV8xno<+^dgxT~zl$tO=Ep1XofR#3# zhi(?TkVIPR_l~p1g8uZt=J(n?tk_x59n)Z(N;QIDq7WkzZlx z`@LZz1rK1wIUlISewt#8-S5`0@ajGgt5j%Z7M zNTIj%-40gjAB=WYI8PdNqurI8i8eai*cg!_KurO8pZmzI$ZvwxRo*gpL9Tq4LobYj z6eiCA%}5fs{t^aU!F^yOt-b#EG?5juOj&?#o6~{n!Hq4KP;zcE+2}^bnC#Y*`fnyp zi!Xl>XnM>Lz03FKrbj@JBB4T5%pu;r(Mg<`GO_Udr%&B|yOPaArMFqXS;0!mjO@L% zSU_A8zXbfuVPaPuvRet5-#~yg(&)4wRM9>aNGxDpW4BJp{*6v*6at&HgfjdYVvay1 zCZA%;45>E)koOWu99;i1ae((CTrqU_?|;r8Gom?bN_NS=uYwJ5xA5&HFD@wg2387> z1u49Wyy5;_T7_-?lw|FWKdhT;*wG7f?F?aMgiA3u=-{Aw+>2@cAfv&b>UcpoY;MnI zFW35sW!p6ut}92biAff$m3u$lwGWwrNHp|gF?=L+Z8ZYBb7fzQSqHqH7XIdolVkBCu}sV`Ozrn&^G>=*6RK z)|-a}&#|{*s-P7Cx;n5k69beRzHFMT_^P?U^;e?V9p#U*;~RyQj&JW@)0N+M>oJMC zRvVTi-}WVs;RU%8LW$+Kf(hrc zpwuQU1v*Tm+^PZ*0Wi`i=49ul^rE`{rc8su@^Lj}p`!J5Le_S-?{UJ9qam1X9K!eX zH!87^2K+F=3P4rg9~cL}0^9KnzZS~}nkS>DL8iJfnmfGki-A4@hCgjr`hTOGGC_>% z*@$HVYYIP(SrC*0iEe2Mu(zc$^H+R7JpYS={zpnl{!4=z=(yjc>oMf0?CpGy$6CbL zBkC2Mm2%!rPrNkc;eTgGy7?T(75%ELhU--cDkNALYWlN$YqB!Jxd3kIG*ves7&e+) zjd{{NgXJ+6C#P%HMi-OU=Du0G-1)Lh;+MUBaqc0MNnnU4(swKaIvb85#NJ3tPl@X$ zO-G3w4CnU#}gW?=7|LU>JDR(nEu;6-g-ySv;x=PI&s|xzk6|0T7 z5i45YFwu2&%*y7W_(uate(vY9VHVHREuJS$ab!mdx%ekt7Bbnj%i{P&o<`QCbonjd zU7~A=qsd=o zvo&1UB5av|rS%S|O?97!Ny|UtlUJI}uj%B^W0e(RDuAg`S*U4bT0qZw4ht(nEquG( z*L24MPMT6^$!`ibEH3-hJv0sr6ptIl4Wt8kCk%YRbY-3Lf_4{dq*V}KPw<{Ghm#^P zK}TK(!?U3L7>j~PI_bm6+i3q4gQ}>LEJ}tV+$}}%r zgyN>bfF_YjZ$m`j=Tt(Y7+}d+G#?3fBaQYrB-z{$JqMDhoxm48Z9uw+G~Ffq zwhD19jMxex9PYq>KV)V=EB^hMjhj#f8%k&aBN?m$OVfN|`Ppe4wCCq;LKH(Z*Xf7- z! z-JOm{*7R2B<7=y9+Pkjb;;P~g6SiC9`y!OKjz$gwOzRJ2pl6W)&0rt^l7O%nT|p$p zVK%gXOv6^wV1%s}n4UI}RhlLrr&=ML+Ti=tDa4jl0q*$2^qF5VxjhWWyh8pyyVp&n zgDdma&HQ1Op`qMIrRRwXVIbo zvCSW)1WNE12Ynr)i&l!r&aQdY)fp|`#)7xG^%7QuE*Z%@L4Aelt=h3$2MvH9c`udq z8M3R+1Ne%e=N_|O3AWa+ zCqI;ryOdDcC|s>~>T|1>`Bps}u89Yw&ZGJ|nNEO495hOI4E_wULY{o1^cHNHEm$?BBE&&YGrF_zcZ7n0S<5Erk>g@Tmy=qRDA6fA!jq1qUCSnVMOTe#YMdd~!#fGjd;`*K= zmzo!<=3&1KLYks%*z7OM&%gLbPZ14){^TLzHNu#SDYKO5+#QLpP>Pm5MeU9%^1YlL zmlC#YmqxakOGdIu<^uC;dFk5)#1|M?ZG4tDYYwEh?V;rszku0rR8|m{j_~S&*wW~4VZFtA%;|q z-LO$0QeN~Bw|3$FU;VUh9}xicu;N1ITMvJ4s^-~-gd^5tV_v+sKWc~Ue>Ty2JBiIR z%DM0=rEv{2l-I1eolrRQ6?1V&5o!0o=OI|u)crHphi9paucIa5p&aYqIRmjX5aH7T zF0drP3;EoT2!s+8ZHNPr|MGJny`>DRHzVgRP&AQmUQF{%Hzr79s1KOWFr+#n=;~~6aMC*L+CCl8LeAHjLwU~(97Z-nj>=nz8%md&Q zQi!CgDcN#&MT@zvtUbq$TE0onwm0C*8F!^t8<6JDv2V8{L&Qqil^Oc(Akl#a{onzq zBjuE;D4%()q?t1PFV!96V}>=Uv;N4wIUDDJ86#vflfvH2i8Opu_F-h<&*tLRDtg(}Ko z3t!w=#M0U|>aObHWLJtwyYAVy`6{F1Ejo|Z69cwP-4F_f6oIGQaS4M+nNxE87&n0Y za&33EK_Uuehm)_}e6IG8ZNm9${OeQhg@fk}pB1!o%68gZEj!1)f?)_)(f9!=FzhbW zq!}!Ud<~YfYmYrxnQTiHH+>AbyWJIkK0Cq5N4xsj?VZ8eNAVBAh2ZW&5qadHFZe3H zHq#SEQrc9`oxbe!v$MnsAYA2+aGad+aBtQD(wFPB)*k7B;E^e>W+wN!Xgzr`3CN?H zJ52I`%9R9hg*1xN~p?4zw*AS&i!Ri=s=pJ44k{<7Kr$8V*> zc0*NZQT6&+0j7o}l)Q+dnf~&xDpT?n78X8sGhrO(Oa-m8Lkglpj^=|`O9`@+0Qwp` zB@IMD(CBY6u2^M-Rhs}idpgqo#TYS`^c(f)Me7f44;5GH8Rz~x6A5n>Th4bM{0s8t zT(DbQn1zQy(nhr`(ArB+Ws|rHgr;gA{$(v$@6Avx!L`Pf^ z|9tqs(WF*fcUqD#JGDJJ+UT`=ql=+BZ zO+Om2yk;?JMQY32`5-9S;G5Xt_obb#9Ezz2xucby`k8$(LvhKkV-a5PjDJ&*jzOdx z;E>P0Addc`-5;)S#I_v^h_V>m`2KDquF$N1rOoUcq;c~S`=@kc*0+5#@Q^GSbUO+M%lr3Rda8={B zQ|_<<7i^@~u%}mXkm#f25$$M0Vp&T|-E`8QTQ%l6`KRk_zbYD#I^LAYHyCs+pl+tG zXt6%=-s2*H@<%R}Cx&64;4ckgbxhEp@bZe9;-`)=8=#nAgTA6op~YWcibrHwrq@o1Hc%(!$h{w7u}5CdRTWEzhx_)BSa$*(ATf zO44^8)vnVGHgX?-qbyP+tc`bC?Bo*QXS-w_0^aPZ%X0o4`?oxX;5fo*O+NxPT|~Id zDFj+X-y^XPNxHWU4wV`^TA5W}`<3+sH+7<2%D3*DHj15VIRQTpHiL~9Y{SFg9xTOD zdCb|4`5Yt~4tz&xpO`t816ek7slvRvW0ADp@m15qeWF%h#UtYzeGd{R&i^a2{QwJ- z6+B%wW-YRq9cjx5{Ead#hOC4b&Hv(40E~hxgc798NL?MJBaIJMk*8y}WGjZ~C}=l? zd(oxTiA*6^Z0z*^_%nv+L47E5l_3Q#3fqpMpL$pl7C!tdgs>2PwxYsHIH368^|#(? zUA?ZUrOu-KMRzAuB;L9n@Tw0-e>cX?S%d4v%Q!#yH~Z@rPz)cdZ*ZY`Hq^=-aBlVCXp0=+@@-_75oNG?8)}&k$IgnT}%|P-Y0BX>ma3PF7^yE?`#&RoEfG^2M`v zo6h1p2uHibrbh5Ui>Q>PzGVps0qV|-J#{3Y5{`kIG=kX}(*4)Wc#}!-f)mkgLIfp+ zx$634^hVz|a|wFI!*xSZvEQhu4Ycs>G$8C}8Q<{w&2&r8l`+RtWkzaek5NC1hrN5O zb+j=|XLwy?)(Telr)l+q*pP$gBYI&*yRkK}Z!UbY#m_*lf#Uc&eZuvZ$e3@3W23?? z*TkX&t~nQE46b~xlIMu{i+xiBXy`Bop{6nHS_C8xjrfBj+x{z-_}U(EBa+CenwQU{fp+783p)}Y$4r4A!zxW~{6dp`81 z)=61_B%%3eAcWrdAnHGA9q=1F{{;GmU{LGmxkB3-#IXco>pg@6IM*XVe`=k-vq}%B zLK1Z!`%n3&UI6wZB10v{_<%J8XndTzG`Q*h4CJuI(ys56lw`fZ#Sz=lEnUZU>72O? zN)@}x_kS-6(Lhs9;<&nuj^ozE0oIG?rl|r?&6Lv^XtwR@{0*#%tX^I^mEQbW8fvgMR-1@BI0y&skl^XN|h&|-l}XZcQjyUYfaM?g{CE$(u`l2DVCI$6`y zNbrG3t9U-?6iKdP=-Ga$5H=KGDu>%HrZI$G(9(v9&Ol6}yA2vs{&>Ekp<-s5#|gUG z6}6i;%a1MNWiwK9#Zr8(VZ!#iM=*dDn_4h@o6|9%$49^+;*=Fj_11>)T?HK(LGg!? z;d9lVx(XUU+eiMW!zklmY0Qdo9@J!pA-#rppgd(~4RkVj$R^+k# z=vP;dhM)**7l9wQGtdq&CE*yt4l@iV^}4F}<<$7dw=?GtON zOV-bI$1lQW?<5kQS$5+0|zJ2pZvX4&-Z! zGw!dKrfQnwT4@i0jz^O>=r3NphzlGy6Z$LrfZDFxy@qyuH{(sfxNuDh;siz;{mJa4 z?pQ2P;^7XGqC8x&?GMtTJQjXXm3oi%(DYnQI=Ohfa-qWAl~?EfGd&;?KobR)`Ux4B zl4lk`B!E@(ZepYy^=TEP7%#m8-uDi4nhQTAUB2-$>3o|-z-PUZ6Ya@ZVg-W;P9qDy zTb%$DUL-c~0?QP&<8XWHgKuSS4v4)WPB^2UxA6l&{~vFtGY$Vk`TGyHge}!<;v3Ha zs^rQmP4ttl&KD`&F~yAMC!UmzpULwPvFYOvs*9G{VK%+<#O0dWGcAWOZ|5YgyDBSa0O+k`-b=J2aGlDPB~kg^4Woq`&ZAQxCKA!$?e*s_E{aN ztWAB9=dKg9Z9?#p9DF_CA=vXJbS_C#{IcXe{*J~cM*F10v7O2cL7)e#&J2azw|x1c zp)zf+FawR6$s=NCxHUrW9a;QU>om{r60$q@N%mTstiGWEfB?fh!Ok`QNi1;ik#nUK zT!-9@-!N;RUY92W&O@@W(|>) z>ZIW)PkTb?l-w~3DW9P_J#ktz`Eacx`lp}t$>U81#vhF}SguC=)xHhd17W+tSAw3e zpaBO=XogBaQ$x_!4%!oBP@kl)Ye9|Qc)Xnu=`bJR_2$ltLjI8pGErTj=GRxn9-ZUc z$jE*!k@!{YD>u6-s*v@G(RZVbydHKKXneGQ4qiGAm<&y_3IP2Ni=krd3cxB4TT=Xy z7Tn)-IpXIF2rqzPpAG~;piUh~=8(EjmB)xe7ZII#Hahy{5oU({;36LB$BvqY;)omzgF*ibY>^2%3viM*y>-dUQ&3gRG zsXmFi_Jy}vF~*QtdOJfR1+3MyXJO4NSbr&@agK?$98zLuZtjkYMeYG8&#SN9qAcHC&Ub;eY4^H^p4Ecd>V1?x>aPmgz%38VC#J83eD@9r^2Y_~wl(u7lL zp0gtF3dzVj)x#-Ss^oQ?@q@Wnj&&^$;ZFRM()?jim=0n#Ef@gV+d2hWg&5%@cglvAS{OzSk#e2azMuCrzR&W2i{C;F;UVGMi+{JS zp-sbD>yj5Qtk4*gSClh`@6KFb#ogrQ&Wq}tAiCQY9zr6#+L9pSp3q-(dyXD~oseq8cs zE?*A>?DxI@cqM}ERwyvCIgnWSGma>agHgTTs0w@}S+7`RPB?*h$M=fdQ2C{Vp*gLY zpiYx=?@Y&*_%2Su27|B|h46+l^CWhl3~qwQH6j94)Y}kz_z`do#Ar_jGmz9kIGie!u*94AR;0dv5_g4j z_h2pW=N+Ng!B!}l$Pl|nzXevN3|4xv93Nq@ziv%Wmx{AWYZhS{7kHOVT(ZCM$loth z)eXP77M#PpF2{-o)arVD5|(Q@Sd;3}XSZapo;y5TrSeingGMbc{nTEXeZ+Z1o?(f| z2M}iQ4531VE3m+Qo5yoX8lIU;+?63%1C_SEbu1G0HsIT?v$!X=_oagO{7atUZ3nQ9 zJ|ZBQt3ZrBwyns%UZsZaS#+ETKdjPn1Pp^eC|1fHH$Alb@r4sshJ9}ygg{B%uxj!j znt24+YPOxjMBp*^*x#>JZu%;j**khS&weP?!r@vs`^|zWOaUf!LgG)(V}S`nbLfeJ zmqLkB@QwJWa}Qq_QalrHW-n2_Qz}bB_s99^R1UD=?j>Wvn3WlbBIwd8hLA;LrZDVF zzi3?0fS*ys(w)96XziPRikz8f6u?=Zel_PS1A|<-b(s!WDl0G!-wwnh#@UiNdgLW2RIc)jvU9 z*HYa4{o~Gife+REXp}ac`lODI=$yf5p)E%$;CTVhu+tBA2p;nS!QeKQW8!p8H_ftt zd8wWL;O>>YABN~xnDVrh1qwu~ge7OKM#ty6;(6<4utJy3Zw9)ngc*9Tb?)UqbFXD2 zNz`_?MC`j<{@qYa<*Npe*@p!NPgU8BDb7_zH*f{4**Vw59ao(U2)uBlns0`ea+l9J z9J^c#qVqjK%39Wto0cT2Xw97(j%KvIG=@^U8A8gl?H?f=nqp0i$BQAm+_(z^Oj|RL z4f=YhY#vNytBL$M^LqW!z1+L+XxB%4E_9y;2jgX_YF#n7G5*lL>bm4 zglXe%Ev3#;!hYx5%kZh@3p;O`U~GVE8JLC-5}L6Q5Q~~Jd;}uz%|2NG)7x$jXYz&7 z^p~=iwB?0XZqMDW`+(|5=6Oc`5`6iBObE!p(3A^&arT6EFIXA)Iz1O?b z&vYvtidRa;+j2JrKIX@*WE1XPp(>^ zfKbG-AJ8_o>i$J{2lG$w4G6g51W$P+1i^dG)mx`RD&)@FT;xY_PmLc z`*G*`b8g9$8Uo$>-zdK$b0^GCw9^NHTXiA+9YiJo#}>=sE~Q_ee2!|>-1(LvQs{V= zzEJIx=_|1>Az3C1ku*wx6~XwB7jCrd4D)HQrdy8#AWz)8CKR1; z|3oLjRiorl7{b5klO}NRQN49?H79P*lMwX4*T1KT<%yI4h>;D5QzNU{(D%9mB)@PA zkblCP!D~|ZWs&xWdqJK#d5(Ps%@75(iY#D*BCVDf;7#J8oK5H@-@p18Fs3lT`A|F^ z#c5fvG~MVZTDc`ym`GXI`Yvt^g!b5b5(^|AkhX2Vw=+bKQ&%mh=#~3aFPdBW!?CnS zjLWkEySB*Idt1u1SKEyw7(*`r1;vpf$G8s+v5O(vuXJtwPSEi{|JlGdZKt47 z;p8jh+JK%?5qfXrJz=mL*3Ch!>4~DEf4+EB&JMaaL?Dm`@m9*6aSw+UG%OkGT2VuWYHZ zbgMR{~!N`LboBI2M(0WHJ0|C#0uW#bqkB8W97-545)F97_Y zWb7iar(J1Khnt=NkTp`~hn8yXLFBbR4G8iWPKfjkvt=~hj@w@Eg*KRZw$x)d3PamY6-(xhutD`m*iFUPpt=)U}4!*%M}EJ0@|Xo*pP* z|Cx>GQE>9T?_8%2fRk4a?jGKDmN!L5Wsb)BtSfqX?tl95RN{MsEy8FzVn8H)#gB`C zODPO^e-BnyBHm8DqKbTcc=uai;EBJ;IqqkNy-&Ylt~gbUHK>*yUlebJuKpQtx7{~3Wl4D{@$G?&Go}iEp=3f0(eol8jvV;$ z0E)(tDv4678*R1;`nFb;hs>nERQ4&ndg48ycZaXRooPZO;dBlSg94NE6e%IW?_A=agD&~@GfAAqU z_rp4+ov)Y9%j4gj21TU%0uk09Cu!u&0tNvIs6*vBh=hOK{+TyJAARC-dq{DnrfPcn zwugZspJ8`G5qP5=<%U1mJlk%WD28#Cyt{P0f#`r1`A*5?Ddf=5N#$I=sGi8B-8R}5Zx_$t_hMN9Ed$vf#6eE$1d&dyMtU%-J?m@={N=$T?QsOkmMw zt#sc8#v6SAy4k(0WN;MRJYO6RHUW=a%&7-B1cTkbmX$ z%$@^<`yFikw9bWXDpsZLKe`YzOB?~7Ihqd+6QKbokv%s{V2IcO78jbjQ+cHZ1grK| zEUW}lDlugG`zyX#_huqEvz7ZU@9ZZ73gbtO@;Z(Q(UJkn*ShkjH~|L6o52jEBl07?<7l8kk$*=cuPtl&`O~zEMFpn*2s( z_z@}2w3@st!QFT%hUQ-7F|yvtoGYK$rvxVK&n70X*o(CH41;Qg60o|ml+U;fE`rk% zT69C)q`DTqHruIq;^!&x`eIG%1P9I-NkdJQu|0t--4eJ(RfK1nSrUYy092EG7B`A- zvweHhnTk7Uroa=m!_vjXzq%qixpCSLaZ}RN zs!wfe)5AW1?BU_;`X>Drr-b(_g|3wrFnf?>-YAid%9{1s8{pp*EJ373#jTPzKFFS~ zO8rTr{WOl8nwk<)Rw(^6f7B$JTlF{!Wv)e715qa&d@Jcg@P4o%&lwiUG?87#5bc0|3(e^1o%EYA%cc-MjM+KW?jVV-@ihihnh;T6hjBXsD*Jqk(7n4w81R| zogPxOfNhQHaKF;oQa)atoRH>uljSY$aS@|lu5gJMC|!gd1C(nPEDzLV3i1YGB%X4< zdvtFLu~ekh<1Ap2_AyQXdTG|1Ey%U9y0kjso5-aUM^TIRiUHhn(U6;Wj?ptgBCsDb z%Yhxehs5Dxi4;d?L=9@;CzdWUMbJVJskWWrPKQ7m5^Kc{H6?@hZ$vRJzj3c!ZNmyL z3;WqxTd5cM`Pqo5^FX&nPgkwTpTE*L$w9w8 zly*PpS^H#_zWnxgu%tF3Z62H{K*yV`!3ihVu0TTd_hx=n_H4vF8p_n7c(e>_*mOlL zr2IT!_pZo6YiwAcIq9$0@xi0uSHQ#lu)?9ag7*hNf4?#~d3mudc;}#!b7@ptS`k!H z?xjXH_j-+D*7iC`Ie$hH#Zn=abJ;q-QD)=|7X9j9RThnhinHs>>SIXNRT;u&uVamm ze*fNBD1YR@(=ZM#b;JPp7VlpxiWC>4$IW0@=(lUJkAjR}+rjDWrynOvz4Urq z((NH-e(-?W9&>wP?-3Avkr+h9SOw|Cd)Kw`eQsLM?FVH0P z4fB6-U9r!J6t6`b!s@Q^;|Q9aWe#R2#&=eRJ9u3>aV>3nbQn}OAV)W`rJu8fv!Ny- z^>M2|IUxWAT@+9J`e{yOJyC&PopfyX#KRk5#Ic_tT!;gPMA1LG4o)OYZ*p?{IOnxF zMc(^{-_J3#`S~ih1eu0}<+DU_B8h|+^jMOnA#kV19Mi(z|(mT&c zaP6y%$dFCu<-Pn_ev_$MJLh>v2|xId{SnMG%=c=z5MYu+m^o~xDMK7fcL?yO8G@D8 zGPJ15@$Zt_4XICRD3Mv-@{i0Si$XmdM`liN4^&Rsg%qaB#)8g|+rN8XVhDeBDsj4G zA+yHacb4#Y>KL&<=ZSjjhl=8e^3qq!Z&hWQI1y@mLkjbGkTwj>pdU^1`Jf|$QDu%8 zBT83>`)k+A0&_b>g>1Y1(ZVuM60`GVlH1isdkv-j8ynzXG~V688`^-SeioqOm?7Z( zkB5_pt$Ptp92M|sSAdIVh&?dQz+H zxBdLDUhZH)W;SdOP{3=~5Kg$5^iXyd+w5TjCUlVxu0lvg{$;Z=hl%mCS+l7}1fO(t z811yh0&ML$njuv+E2%?AxQr;%&3>?^7}WgY@~vwLY;DDF?_6lRyYt4{gJYYEOig)) zACWYV9-B5|z6T;R_`Q0UNu9YLz&Ac}n*OMBhx)Q8CMPe&XSq4jq`^JaL_$Sqf8tjZ zbPuB3mcki42Uo_YpKyy_i;LSFe?;O-vEK)w)1`-q`2`G6kcI6=x^(jXJoCA%&*w_dcOOKexX<{;>~ zAj_I5;~3uxC|^!lHDY1QC5VCh=TEQ>piL|T{nP)j@y{Rq$A3B%N8gRK8b}P1gN$ZY z!>2gj6H8NMt7@t!$O}QB3doaWde9-#^gL+KbLfR*&;KLB{%-_?fpmTep0nZqF!$!+ zQ2u}0_>?_c_7GEuN|J1qWh&W{wAs^yWH$*>F_(}fTL?vw5GDIEC5&BB$-bMhN0u2e zE@tNHdHWpq?|y#I_x>Kwb3e!N`{VaV<>)ZW^3TFt#bo5P7w{tu;NB! zolKj{Pgkt4nw@g9Mj0Ek$3&!ho^Zaf2TFIHJ+Ywu(>q**`3XE(F|m>m0lA^KjXAW{ zdAp5^8OlNddKLse?&c#eD?^j%4RU1x_Y#j;+nv$6h{DJN-GKk|b^CAPcYF`{w0%VK zBWyY7lj3L|SAdf#J5@8ET0J!{#oi`-jD3@$A#Q2NW^J^#56GL` zWEfM3agaU#$U|$&>&T`_bl45Gw1X+w&CM=#mKhbDzBG}WP2pEKzo_j%=G81y19!ts zp$xm3`H zzgBCf7}RsDJA-&F65uHk6==GI-wIQ@lBsR^hyUOFFSvsSz${RRtF}bv75a|f#I38Y z*h&n=*_rW%m$(?v4?q#~T)ZOmfBYR3bZs30on_YP++&)^uJ3fi5mKqEITF`iCX+{h5?nv2FMmYE#1_Ff8YdH5X<$fDc!+eM*t zaG^KIAVLc%sm4c3$)@VsA|KA~Rc-LjdNXon*8f}o*>U_sW9&zus=b}V{4obx#T9B7 zVE`E3o&?5ezVR@U?7j)A&Y|T>Z$ZmK-qcqIaVx)Kel7Z;-mL402OA1f2hczSBcrnw zO&62_kX3Rmlq&+Ii2;1GXxJ7ZWs1}s|BL#)jG$|~p-;H?gV{)VA(XoW8gu?vuG@`w z|3Zg*w&UO4fcd`}mKbsbdNs9kJCuo42BFLV{qj$aWQO1W+Px7Q!Pq^8T-l_9D|3(v z8b*E%17PhhDvr2%F3=Ig*4xz~{0B8B3Q|BQhmVqn8W;3wj53Qh;s% zwO`Zr2kwuyfLqlwU5SjZBm2=6&fp|#gTP5<#?&?!2~d#Ufs@oVD&=urX?@1yV#)0^(b+BL8Ryoim)k@tMk5 zjGv&k#%3+tz#3M>aeu#{DBbQ{8b*K0@c=Anw50=K} z!W}=RGj-dWuh*n-%XMYKaJcdj)pvCAQ+K*LGaSUxYul~1Jatl53|;0YWG~d%-235P zv$p9QyOy`(W`Z%d7rth>-&g3gmPur@k7;{Z`U_x;D%s^_ShF-I3G zzALH4I&dD1qA*&WYY*X0IHhncvvPT_9=p1Ce`yU1lPB;lSTN=gIff)rIbUO(2XkMh z==61{wri`tA04j$Hr72%9vx}sa0*vD!yHC2#F=1)k=H+W3NqA*lz0$Wm#s9_$q*x3 z#3BQ)dgD%%C^aNXo!TMibDra%Q)Y(+s^ZE~Fc-?`65Jvi9gq(i+=(9UwVF-wK?Qcd z)6(7Ws?@iyN;VYOS3x z+a1vPW%^8!yT7in{g)#tJ3H@Ts*SIX85RGMBopd0L0PFhBKQa*61bC6luRb^4)Tw> z4(sGt+*>e0eKJbc({enrl;YSSc>gs|=TH-wzbSsmmaG{DIitgXk2~n8So`cL->4cg ztLV!;YgEtM-9D6PxUl@KE$K$ze-IhAcOm!#OkPCsf(%u7g+j81F#vxk2iLT=o(Jtg zTspKTWqeX>p(yX;iNj4iA6O3Y>?o#|gXKE{HJl`af3Z-*S#3+YMu3-VQH_F+u*6EZ z!bh#Qo*`|TC0#)>L!Vv3@D7h5Yxin^9@hnhzyEMckW$$u(v~NocohI+p)pEtE5JMV2jZW2SH@MkOfd&~JjKl>In|L;G{sEk zUQY|U1YJ)OvV!t^0q%8s0pxRX!{+71KxuhVB4l=UxP929j;HnZ4;l1IKyj4I(+8L?A^d;ZMR8D*!hy*Rd_0ijR?hx!_OzrWqZ% z_$JOaMD7Q9K#6@_tG8rdCg{u=GaLFd0WE<{k1Yz;q6|#&)@W|YiTQ7JSdF5(#yWFc zH#BKo$2H7*ems)udc+smZomtb9Y8^u_hP{vsrEa`MiI0sya*Y-%RWt@gkB8Z*g>+6 zOjxN1V#D7APvro1?T|MJz|)vdEolX4Nz8=qSkaIRaR{P-cW{E*CNChkVNB&wWG*>2 z!}27(2|UdEAm|{Pa-W@;<5!ro#M%(aF}=1^W<#FS2^Q9j=SPRll(9e5fL&AWVjE%`h^%)$p*IBgwI-l2}%#ZcV%KD*{X4W4z9^BWlGT6c}U zyBPg#F1`kiFh|&<*KC<894R<4Ju8p1YZM_43 zKp4Z$1Rum-CkXz&<*BRqohsWloELfwlu`xyjqnJ0}N;?s1l~uj9Sz*@>y~ zuxci)in3DQ2_O7Hr0WJW&L3Gsj>177KG+y?78khQl`L@idO|#rz51Heg~#T1ZGp|W z3V`3wA|07)*8@Qy5b_&M&HgXh^#~K_5}p26AgK=l7*Z;+6y1b^?{{S|Ebo^uxU?br7 zSH-P1T?#RAPrrXn?QYZIXNj6u8!WDzk!mQk0g_jG7*AX<_^uH7IR-d6iwB}9W6k{% z#KvOL=-Qp@<+Gc+FQg9ZlQ7M+B~}usJpfja4l_qvk$w~3fd)~H8$a8U9j47jB&DQO z(Q8%iW0Z~iJq#(tj*7|NUrZiA_^q}%VIpt`rGvIcseLkbBTz{K~3g~WQ}pd z&W~+0&JcAWcnXM=#}O&_u}krQtPoAxpb!_6CVmq&jbZ&aTsTJyvbzL1ZuVyVoqN`~ z8KbMYquxoGzay@=GQYfv}B;a5|Uez;;wYub$t0x2WG6S#8`tb-C-wj zShH!1FLuI?y`vG?TNT%FvBz_&vXT(#8F$eBQbKEJ+52G7m5p*Am_#$gG!bqEG6c(^ z7sp&D3w7H`%~(N(!ylUfOTCZxtgN%lHN^%VazbUpE2izaH!OW)tLG&P>r2;i06&J34WDO9Z{URhRtJx1+M2XY67JF|ZM)bjij_@-;n>a1#o=QU@{;Rv5l`U)WY@6w3h@Qe^nzo$6HP6n6O)b`oIPZ21E~(+#X9Yod#tf0Jd2o9I<|&SJGlDaYC&sJ!e|QP{ht zoqYV;Xkr?kecOqE0x=({fy+@ouJ(KlfsA7$n1Qo!P{<4%l`g+~^H~@k5P1?PFI%>) z(D`gZA-bE`pGpHkP?8LM5`1Y)1aWNyhn=njDFFE2cDFhVG--}Gj@o)l{5ps5&4cp5w}5D*B^G>~aV+#f9ZCTR(HU~AUK5&P@tI%ndRrC0 zV1STe?c+9sRWD`FJR`w zGA1@T#}L4hsXcEEKU2Qhq&$KIUp~KG*6XERi96fh$@6i|8(X(sd#Q&SR?w?{5G!W! zYAZv?5X13%7(#ui*FVDM*`qFL>!W+zul43uAOGqu{5nB) zBv{mez3=JsRqN|0{srQHwIeD1w;m<@l!weLUt{0mVQibUWdGZPv~h*rr6PIzk0GgL zvY*7oWm%_Wh(84&@)u~x_1SU8U8*t1Q-fEIi263?q2V6ntfaSH%LD4ucVQoIm;Mx+ zh$ycby}X?t4ax@iY0q>o|GKRE)8WgHT_OPQSpeY=>r-YSC>?qIbJhqVKOZP{UmwT9 z`0frX^E==93iDc(a8urWW|@*@;SDNf1OwJ!NEP}o6~cch4X87T0IF){V4UHmABDQS zsX#{h1Jp{)0{c4^sI=1jz?VE7c!bQ1?k^Sr=^oKQ`?Dj~)B}^}j5CyZ%2>8m$8XP)I1gnZ$=7zsW4)hylIn zPY8quH{vH@&t8(%B3=Ij&0pn)t?8aS=2fP#my}$%XL6hvtTh+>pGKkJJWSnjR zj@bHtQ7K^GL-{OQo8ZYGLJb!|(hGQp8%<2yq~>Bm(e;HYykwmz=1MrN+g&mGeol;u zxXTR7A(=V%cl(mS8+iDcc918Y81B2_>0UBJjS0`XH_&5~~h+|kr+ z95jIy;0!Bv^j#1&x_vo3v3vUP2WkkPxtzF5ujQr&(v2;fTPg>ip2mFth7#YRsAN_z ztwpIM*LGE=bIHXWVx)Yt9Cljp6l@;d-L7}&fZVk^UofNcHT<*$W(wqt1v&o2tAPg6 zFb*{B#Rh8nC-y&hx%09l`Oy(UZ#kiZ3LYh8M#RR(U`!|wrx;*JKc32D3&0Q516k3( zC>GO68i6ykMOWDNq5J0UJ2t;flP4+@^?GXrM~c|BKdeG;I)UX{PbTFYcCVG5cYdJt zC(6>deDOj*I~HqWS-HD~2WnqZZux3C$^J}ZbTi*!$n7hGO_RdqzcFPI&GEY#C#s*O z*eYE)Sh=L5BY4cFdx^ud`}zgL>(a@a?vtJHab;RV`Lp<)3|Gp%H!preuKCt`A50!x z{zU7XI*slcXf=biv=B~!L|sT(X_Qv$XFz@uf=X(mlH?-&1t-G!B8y*Am zHx4y?#RC8)q&QG1p5_1tQC3G9FnopUCPC(Ji67m+J1kH3-Li^S!`s!?1SAvA6{rgC z_~V^i+|3^j3hkgi;Lbdcfv=;bE2QMV`6$pd68!RwpdoC9>8)(c_{p{nQ~WQajYx4- zW(X2#9ze-ixurwNc8c$lN8{;n*^dIQ1u_}C=4lRczUwBbAFq!3)^kj5FvP_uW|QWG zctivL(|^mfj}W%3R;#I3(d!h|J-)DZ-B`Qp%>Es>Tm*#Mgo$^Q>G3 zS66o8N#WzCjx{MB`m!%HgrC)N6WnDDjS%Hbd?b-#ii@t!TN0pXnkJdULX(AOjX%s* z88}W1E$=Pq!J)od?a=a6P;k!|_$0WEAXyE!5BN&)x13TUg)>Z@*t7Gn&9n^-!sQP? zO@)IC+T;Lx_IRSW)ow%fU`#vMtsr{g(8TwUD5hq-R)HwE%zlqSCsK z6qkZ$?1*ASFnIIq3%lZ2%{aechVmMKaSpg9iB!z#%HA5MsUYWiu>qF3p7k;AODR_A zUAa9nb%eI8Ki+X4=WnRc0r{^X3v?|1MGa%Iy*2kKAk(mbRf^nrx`WaU1#Y;w%ijH_ zHq@x?s2#bqoE2rbE7%c~rqn@5CPQ=|;dmN&3hpB_&Ch7@c6BiDTSNj zE50?aXmj18+t*s^q~zk#ut}=;LUF*Crtq^|tSoRX*aAnO1_51=?KfQ4J)S%L)qf&0 ztolGBA(CM(=gc3PQq~h4TR#5fgu<+m@Mhxl^Qk1}16=`z0@yPO6aEnhU}g2}?+R3% z@YE$;=s9w8571G3YAC_8YIM0n&^pdKN-k}LV)lq3M(Js^fr9aS=S6MUALley^`E-; ziuGPq)z7@KxIGT$acWY@*LK*M{%5fSZvg>_U}kq_g(Wj`xEP62NBl?>SAPI`G@xla zsT@dpZt?mKW)<;vG;TzZW&p?150AMmO{`IAq4z;j;wmRIJ*?jAB#M)(!QV7!I8>|2 z6Wj4JezpVQ1W^Nf9h`}9L3B3|uT^fu;ExT%O?1hN#8N;nTXQb|BY_C*NVJ~*oa%D! zww9=BO4~QoG|LucJF%stB7EQwcqYXN0E!_xVjuw;_JhPR6sJx`t|Racy5NUnYPqZ< zVI;~G5OS6QWyAPIO)3jbJ*iotF|W11>gU_{N_9gtS0&|rwXtdZ$I&X*WPtAhlHYF`_EejZ){Rre;YEQbae3DB$_@#IePg*jquSlq+l{eh ztOAksn_rdcf&TTI?Qgl62v+bXXoe?49-u*~r$N~J9tj|lzZ7q3WyWG@2dx2^{a|1l zs$v77SBRe`Lqw4#JAk?M5GNAPetki+Um~_HzdzF}_CV?L^MXdKB`MbImNKWWJstG$ z#H43Z&ML#2E8DS}R-h4epv6oy@M372|M;;UrM3JePqnI&C&MS4RV!lUQiarF=3I7( z>JjJiysjJ6G`{vk3{$?K2Zp9HBvrDQ;P#2%O^%t)r3)^ zYfmw`T~%Mc@8%gcE4k7VCj<1a4*K@M4r3_i!EqqlK=Y(dS=Yi^lf(n2vzdi9qjp2$ z#2A|e(aY!K_vlFY5`36H6xU>tUH0JaJ;MwklHXNvSi&0<^J?)D4Z9j#CzrSq2P|WS zmJLRhfipYr@3HlGo> z;xZ5kBk;A(4Nx$+j242((-15(*Tm0jHmOLA9rmiA?sr*Svo-lmf06$BoK<1_JB6CK zC#OJ{i;-?ac5-Rt}17ps+w3|!TuyL zgnAHOZ7@dxNUN?w8JtOd^N<*up;$Uhff~=?cupM=I`kz)=a6W&AV6$J%y$(heQT>?GWpd;T8559`%4;Xl*<{;UURwLa(i#!Z`p6c zJ6H%C6BnKwoQSo5d{zea{{1nO6iWKp777NmGaouH42;Uu>HGn7nh{ky${iEA=s#~+ z{L*@%>Vrbb#}k`Xa=$_@)l}#MV|hjpUNrC=vlv5;#}I0ZqxY4iH)4el5qDnR<@5*q zsRvnZ(aOVa%2#}T)V>^B^;I~;DD#;&LR#OhivFYhC5<#=DCSENyoOU=F8~J!m)E?% zlzJ~>M(THosTB7Fz6S`Y09ieQ)}m8*&OFnRGBv4LwBAC_UuZFd&BEW?12m z1>`2TyiJ(!a}NH((e9-#ZtnRelE9X&YQ&)^WclYY)nq$^FIXG9F@xU~=nAyMZ^~hu zJ-xc=7oO+gR6G-zdVH`0{|(hgOQdp35K&ADOh$W}&>G-APiCyB#FQB&C@KBLr z!YksFR}`@0ST~`^4IgW(13R?y6xiZ=IXv7VcgD{U5w720Sw4lMX?vM9H3D>NQXsC1 zDjH4JF<)2o)X?EwDk`dvGn$MuHl0pri6wtNTzBU*%SjY^I9FeL_h=(+;9!}dOtRn2 zU1bT|&c241uYl}rY6DCu0P{n%U^-|Kpj*4ybcU*Ryg>K@x>rp|yB5BRAG1-*%?Xmf zk7_h?^XClR+kvuXw=#N12eAojC^$fCY81&(cp7GL9lX`q%HAR`H7n8X&bBlUgH@AA z*&U7&g>M`j5ASRE6l~N@xuHa#ftEX`;_Kr>p$cLn(~?}=M&h*!AT(BwIw+2*UI{a0 z%`$TGkT&lMzLj9)dsE!%7&{T%-NST*=Nl#^4g_2U0E*Uhljw+JxA@I z+#D|`zq713LyU(ge*6r9BywErf#nD)-^L{S>lY(=0oe1O(=6I@Hlkczzpj5Qo&lY< z2+wmSajH*LN_mpdH2)!LGdFqNYKCR_K#3n_gJRiD({%w*Ku>|%n8d0e})hN;RV@Ch#S}|C$!D; z7}sF$iIGk*c;mt*h;ApN`d1f*&vH6fhNy*ikA2#I%SwBOL)!jp=W;CS|GzN=YWs$6 z1L(T)qfCCtmH4PmiJ5^vVv`OSB|XNUyVmzSdko1e%SWYY>|0FG#Klsd-PvLd4 zKtN$h%K;;zgADrws!?yH4qwckV4icU+f6ln{m`#h$#a(9Ie+L|-gGkORT)LmkeioY z2Ak2HEWcu^EEkpNhY-?V)Gj$#m2M56>S(Dxb~~l-Ew5DHv&GMWbgld+-*HDzC`raP z&D?B1|CE0QgK&ZM$q|nKUZ+D5P2O&Xws!X;XNp_r-0KO*HZ4#5W7w;akF7Anc}UN8 zJ8M@P@|4USDU?W%K&I`$tO3roeuvVY-{tt9(&%GsV^t@LbJ9W4ql2}|)u+w$1#Om? z4h>r(JW*ZC>rsBVA&x1J6pQ0p!adryc~Rr)CuNzsE!ksn&ox2^{N3O@kh+J;V{o7R zZbw+c#T0}ag}2V~w6Koci2C3NvQ>x|6$F9Z3ILkWI_>?yJ9&#(uM8h(smkSDF6Bu{ z#cTIwX_&bSF+TXFTQVsxdLDm#dG6=BQS?#*20(|~?wSVtApbGvAHLT%$=^J7bO95B z-%Yg56m~0HKG*v;Ua%+$w|{?53OSjsxBuHQ6^Hhf-OvID4d9KTN|4h^c-mM*4CJXC zIh@-3!t<7Ord}5rY3-3~_=__4)_kEp-IbBkOe-8Y@dGL?4kL2viUzX5_j4HsV8w|g zOe3%-a#;27{eE;@%iT&y?o~qT&Y9;ax9Q)S%LR?qc8m`V4ZHRyxNZd+v+kB@+v}Zw z?cC+Z+X_!CC_Yqy+U3*eIk2`k+MzFfbnul1@}i+N9U0;!i2TiLQ=cOW2#Mx`{j}xL z?f#nxD`ObDi$NR~(vjC>ceM#4FhLQ`qAa?VvR3!lAd+9!^<0ccQ+KPG3RLWwXo^6j zh?;#s6&qZ4y(EeeSdO$gnnN8m5uW{XnaVpC>E*|{7#rq(U{tgpx!^6H=a8S$?q5Fh zBEFrS4Tb;o9~E7IQG#vVFo2=hlA=x(0|iW|;V{No>qUhb->(AJ8z=i@9Fi82I(J>? zJubgD_ob^$P#&C@r1J7y42WmXy+?G2(4da z@|1;z4yoQdhOF@-^}Qb=jkQbY5*mk_8O=9Q{8J_fXY>@{F_A#I7w`or-B`-iG22VM zhL62uQ=2kGD!Om?DtUSJ@G1Lopg+63_PB2`x|zt^Js~Vd99LlIcY*8S_Fq(1G&t(u z{hvUC!8Xq#au0Fg7jg9~kfmQoQ(45Ac4CM%hKao9A1sEWvk}hkfJ80^hM%J6v&}uW z127V9YflY;ra~k$sl{zr_)#=c1ULytGLgS1u$FhLKz&`%3^@Kt3n&D@XBi+g;UDZl zc12H^2?UhW&J8U4IZ_|15_Y;0vBJ(V0! zOe(rq`u2Rh+?@24aKvz}mJ*NfgaZ#J9S0Ta)D#r%YpT8b)+zSP`|mF_Rp(bDo#tQ2 zDoSym&GeY}poU`*If7I|U>ZF4Bf5sMY?imvYWI+_(3^q$NT!rJpf zuJy!NT;IAoHvnZ(sqUuo&o73er5s?R9S)LHkPC)j_0YQW++M& z&BM4visClkS<@Q<4H@ed|(h+VBFlSqU%t)~qTB?)# zZS%OZX(ehasH7sadCP)<-`{O_!E#{iK<)4&nEFq+_o+G0!@8Lk)@Os@kfG zp8>X0Vj;?EOU+!%6TO9R6*IUVE6rUpTF~x^zj_)yV7Ul(69)@eS*IW`+{ht}L*@{4 zbwJys&@^vsVYj=Zsk@zo{jIFoD7jAoMU3yC4x8pjg}gLQ{KPEF;PkS8=>E)FHsO4A z319&>uu5~v<`>TZ;jskp#lOQ35iTrsVp|5{2~mJJp+&8*@%)jVH|$C?=(t!aW^Zxd z#F^T zDR}K32hGImXZCRK>GKp462HvO`)#K|Y6ula0!Y$5XfK{MP?DL8h%FG=@Vc;DQ%jvl z|CGRu`Lt&;E(dn$Dc)QyieZs^qL+?e&gp#a0mC>6`|3iT_LN&~OMq>XUosDg)S*ppnSDP}{J!m=beB@-yT2%6NL?}vaDR4Pm^ODM zZfa_vZA|ya_hUDBS@ct8t#z)M`g!kcTWnPT0}-r~M}_6lf^5$h}=Z-Ds@qFU}#I|bZ@1B!b;fsV+pqizp^Fbg-BW39;w@ky_!4rS@E<(O8 zEbafwU~i0`;BUl4g6FG~cOkMPyXUX`&QPF=v=7Z!2eOIUrbqnvlJxNKvWNW{S%pVm zMUda1@bUi=$-_=keNBcU ziDwi)b?v7YqY>CR(+x;UHQ!qwp1jxEnaFco>>k2Ec)@F1FcgE8SZVIi)>BX`Q>>|o z!u7U?arQv>%1zv4)YQk*MGFDCChd9e%lX`YK){BE;ziijn_|=NeG6y4*Vk;yj4I>L zY>^OF5qfZT3A?1FIU+UQ6=NiEh$0fW6nqf5%CH3!XN(^eeXugb;HEij+0#P9@3eF{~^rj;FB6Bb&cxfVfJ0LRTZPnWhnMC&nU`g-Lq@d9CL~rLgN%=k`kC7@INr+n0oAx*u>FviZacEQWz1+IFu7g z3&NPiJEVHxQY;<1P1T;W8|OMFbRKE`$?;KN_)cp{W0nBQ+1eFs8+xO5V zYAQ3?(-2^&iI((-1C*gD60DkzrRFUW3Xtvs>f|`7X_yc_N7b{E+A&N!F;}Nq=q5+M z6Ir&b5>*w9|BC_=h0L!%w+DsePbqjZ0Gc+)spH_o{EImDPZa#18PH97czq#5D%g|b zN{H{7ptoKpntqfTPz1f_&PARtDChqJu!|6|=C+%wKm)9-*pgv>Ku<%pN2;i$Ag^+( zE?-|@hkl)j_1^nemh}!4&XlLF<__A(ANddyE2Bv_w45L@ga;5_8hD#P@HS?Z+?kPB zfeQC{j=*!TuAxN(yc5S)A*HjgYafHIih6VNF**Dl^IeJWV=i$-;yW@az8iq&4lEPD z7tY1h_+gr){3b0vcGp~X*1lo8b|oB9`A-6lLRF|+{F)cXwxEL+Er}71;{}~2)Q^rg4GH#w)es%xt zH&uo*%`q@fN|VRAsH^)UKPUL!iI^(pba23*DUL@KeVy`+I_K9OEgq<TjO(My?CN zs*m6ex-xi&WT=$bGi8#)J@my7cLWu~Fjkd{UKThN#ZnQoW3u-0wt#&S5;7bPJd1{?SErV2$?lU-{t<5qE3o^Ga>nq zJ?$82Uio@_d>{)MdXmB2i|l2XbRSt~ve*zFnlQC4JwB@Bb@QqHRLUV1*=)Y7M}V>E zmPx^hG4{Tg$Q%@__r|wCgxz(YVJDn+QUKH}nov8|8txplI^5~?DbZH+?rK(;TB+7C zE}0Vokh?!b!9uXCXKEOjN7Zy%7$w_!X>6uVt;~)L7@uuv=LSyFR?9O~(im*aj~JkX z5DW=vn?IZ)4!Tb&bvBhgRJ{Bh&;be@FY8;U!2lph##W+ zb{4A>Bk@NA1P4r5JwFocF5dsL@?5%G7`*}itY>eR<; zI%njn`T{*4qKEvnp4%Son!gk7Tw!>9cG8rv=ft_k;eQyf@jpo=xTQn%ql}%fE!}`| zzbpN8kM42Et#e|;r1mSMl#y~I`i3btPaALB$fc2Gt!Z;tjmTjQXzewa7-y*AJ*Fge zQF$^*bxN%D^Cv_#;E$=IQcZPB#T}O_|6OOS3)kwMNHXZ^4b=-fn%>9;8v<~=;Q?sv z+iGKoI-3B(Q8->kkF2CO@p)mw;`c>&7$?)$lEQeFAsY2Y!-NpQvv+QfIEvZ{s~IAf z`oO0S_7p?E){EySiOQgDo=CZ98xJ3dUo_4}zFH!RSP8o!H_ff&);y`K(%783)NiZZ zLPwg9xb0j%YO?p`(L0z+fL;U;O)`w#R3epcfW9;m^H4?ca3-u;j$CFZ(X`#6t;5~5 z#RxapRIDO^06{L3-UoC@ly@=Cht~TetX0G^7q7KeV)%mMT=?mRsgIA}xO_Y3=pMn_ zcaK5xff^(sa@ujI;ULCQ5;tVA($dsA;A~af@8u^~PG`kCkz)h%CsZ$vTz0tpDf7tP zJJ~1{zQA(T8=R0fV0h$f?C8G8HwnWfQwq09x&<6XiPH+zNOwiQ@P6-N$#!G*V8R(! zg$h6VIir)eT>JElz9??By#cML`(Sh9pTY}!DZT+BbjJAc$7J#p72){;{+OqjMAOGa$F*J^ z-HHlS!zU)j$;hiowlKL^hm<%8wJW0$?9_vD4k93s-#I9739w}|P-Miw^ zIleksS#386Ott@_+<{8-*}tf&IffLtJXl+p>5M}@9H2?wj^RqZ*Ih+Bbxw6~{>$_& z$yWx;B9}7Pm!z7ULpbChX(B6;oI|8sLBmHt5|~8Rj1DvbmPD^)+a73Sy@he7k2mGI zRNED+E?PPMCi(0x9j;f>mJP_KJQCPNFq{U33wcRHDfR2E>7*YAWyNtGW)|cdqX8 z+TSM|a^S{=q;%{d)B;wD7heis^U2R(AZ~&e#h~E>#3lL!ZaBiath$$ReE+Ae8bin1 z?F6BD|1Y9?(yJmFT#1s;3=T_Waa#L(q2~dzC;%q8K@>A_gV3Bsoim^dzWw7#^VFm3 zo%qpZPs3JH4kjWZ3X$tW(>1*n_dOmjM*v;h z(bdxlHLBL)dsCn8Pdk#CcyP10RZ0=zNnjG>`hbWzQrEP60c?!;r zd}p|@&_NCsavN%}ft-Li1;=BC(-i;EJ(68WTZ6<)8>cDv6mJ`~CGFc*c?5w@U57g# zI9|+r;oQtdh2ZC4uyy4OTQM{iK%GlF&j4FA;%4Y_DtOC+RBzGTfr`aRrR%ZoJt|nTqjs}{5Z`)BmHkWg%}zPWAJ1rD_79S$OF=n zBDyX5A(r1Aul(_!C@+I{Vi}jVUU)9kOF;GL)Sh$~j+N~IEg19ZFyjySAs1@UL8QT4 z04UgjB#e9=*R45quKMW{x=~*;YiyodN<>iQk4A&(nKv)auw^CcTUhLm_~iu2U?~1( z3}IqhW|_ib>I7*>m0Xl9pE`++X{9M$q3yJA58CUcZ%ds~_IH=X^d_Y-BfvOZC6c55qIe;XR}ZfN zCjhrGc_p8Egtr#}Y}#5`c5i=snpi$^%&<+*!KK*R>L(s6-J8>+%CYHi3tIc6LD{4G z?Q&p=#$x@*qY)_;Alie@1|!o&qFnzj_ZS!Jup|b1X`h^!V+^{!2VmuTP#e(l;6Dn6 z@fAb2|AS#`JU=La^Cd_YkH|heRPlM?oHa?I9d^VoQ^?M#CF6*`wdjb}JY3LslJggccEl*nz@Y?ovwB*$QETbfe! z=mK82|AGT`UwgszVPi?*&sCN;V`Y|n4+IWQji(cx-5KYgo5dis6o!Cx~>` zgJ1^OV&OlNSXOw-s;5$fI$n98HR?kR;ekO`p2@=_RWc^xBOR;K-zPOX#Zu+&lx>KyS%RTW&=12Z?UI;p6NyaSkgRJ>1eE!e1AQRv( zJ>#Eeop^5^QS1?1)|Vpoe7G^X*SBp*=g%NRS%D$GossX5_!O@Egvg)PRMBdB&Gat{ zkFU4CZCm})*XGpa1cI557S;BG4^L(Wg^F$*;F zIXrha{Sl8_y;q!h`v7$w49GelOSpjsPQntnRXNcSw`+P=)N9XFW=aGUZTP;k_w+gW z@bm9?wkw^As+%fF)vgy$Tv{X!kQOVqGk?bMpMKGp-S|OpLgBRaJtaBP4Jef%^S|IZ zcQ-LJ?Ui!ND;#p7!(~1z(Dvurl12B-wMd&4)2eur z&%qkUoB%zdh~vyDp$^OC4TfY7h@2d(?F<%Rl73 zYGUc*Ts3)cOev=#!s&C)jPQHeY>hdqzo>i&kZl=4j8HZ+yoxm&rN9+!9SLove0=)U5S-!!LqS_kt<-9e<|#$DU%0aH!> zTXs3Cg0kVOR#wC3Vb;YsvcV#1dZ!{h{ZHfNH(uOCYHKlXpf$4?e{|5T+BqE5`Hd@- zd96*OXDgGPta9tBRrb8OeMyeX>qonhrCvCCybuM2r`7}Gy$TFNh?_?0!XKWme)tQ! z8xe9}CYE(AC(h+Hb*M%{4pR-l1(f_GxpAbW%&$P$D;=WLV?q9 zAtDMtxg@%AmU`3OlJIQDXD6~nMVgFHX>(TO%v1~`y9kVUG1O3-S&C+e*j|C6#*c{I zV1d(0J}6EW3dkz^mgXBJU8#I}-``y#SR_>c*rj}V&Yc6RQGlDq6VI%u7;~`F6#?ey z7t8a)nWMuM5^*n7l?(J(#TN1g&y#FWNFwA}44(otG<&*I!0~0XFkJ;)3|`44bJ4~u zJOOEULWODCJo{2bJ!iYJr)Kl?^tph}%}!(-s>)mfi;4{oznWYbo|{st{PFHhfMC10 zQ)Q1sf-jb5o{;KoW83=ZMHoKlTUVCv4%ZDCjY^*V3(70RV8VWiLxU!v$6yh*9QlG|;JR7&% zQn363t~^#rFtpt6nG^$1;R@8J#y>&i8m2l{q()q6uDoy4`UPxS9R7T_g?4<8Qeqx0&{CUvqmkS>U)Mo+UbGb-&V$JLsu3fnPN^8^D{!ae*g zKEe@{MaCh^eO`$t(X+4l-{VE0hA$-G>J&xdUP9!}sM)5J5%mu~W3H#ZiCGLMe~3xX zU?;3veV21SY+CwaPkh4j-DdevgBCCjBb9P6MEIx7V)0x^yP*33L7KBH`cnFpUSFz{ z<3rCQbrSe-}!daAfFQ;;D7zqqZ=&j%As06{_PHrd4WA@`Z%&ZP(EIP$gQ zk`XnK3;_lzaY+Sym-Rt4WrEX`bOqx&)re1!MYEnwZYR^7d?7}Md4$2&YvTzT`*Hu8l$;e~mm z;JIIe2Yf=#pb!&?`~pJ>h*+GN!l7I4(gE}!0dy^YD<+`wJ-DKuq$crRccBOvor~wR3202c~hu&T_4i~YB&XgP|yi%2Fvqy1Y`nu`dJcO?e61_d7}RHJMw{2rTNCl zd_Tdm?T1oU7{=fW3kqGR_MdWQ`Y8Mv5Cr-|4lA%12|fUL+@WdudBY6Dx2|o?c(l1) zH{|+jvpViX#Ky9twWnc4Ti#{lG@2Fc<7g1))#lAoNKD3 z=!TF%O|ba)>MRHT<)L23d+yBf->@q4Es@-wMO6Ti*Au)9g9{#Tnuy;O5PGe;*Hg{? zJJ)Q|g(tHX&uk2jSVoo`@-3B?8vUr>!5Y%c0%EA+dE0{RVT37MGjXJ&MKt{bWB;uT z;h#Bv?XUDMk9Y^kNOzxcOE7>2KdU1#>YIIF47aZWzW^45%bhL(B(%;`u}SgsD#HvR}34T zgqJK$oNBC7?A}l5FZJGYq^i0qW3~3vvnfu3MUkko1tIB#919Y^J_AYOIRoXGIe3xZ zSXNzNV%*sxEkqUhRMqR-vG!+wns&JOul8T{r%pZnwNMlzeg=h}Qvl)9=s%txT*RBW z@G^W8v1-b0IDmEG_>BOcW{Z^B8Q<`ldx^{Av_0jjHZ{%P63-$u_IWK>c4FYOOW_2S!2>oibu2$g4WOezbG z2V~RGFhMzd(Mvo-iKXRU?8ChW;_kJ*vps%Z=EjpU+w(BJTjgtU=yr92{X8;8uxSWw zMT;`n9P^gN|d~sxHKzAHBby^C?5RFFo+6N#PB8MBX9L*5AA!XQDC)Q{G z&=lLM*+B!Q@M7&Xr##Nje3kK)?DQjYqs&(Kl|3$RIs95pn2}uE5SAz3JW5)i4Yb9&dq;cXachZwzzy73%SYRzqoP{cXl$6|k_({dH}mkZXCiJYeY!!O!>#?Z6A$_VNG0-g|~M zxpnKpL69mUU1~&7R0I?$DoDgaS%`qrq!;i5y3_;Bh=HS;w&JsC$NACfatqjsB8i&~(++Kbosz?9*O4%-JRT($44y zqjV>0?6tCWm19*DL$O+1tznYg3yb;rnQdi-yNY?h+4?h~Z{VOg>K5?GdhHx4h<|bR z#O4)D&1gnpGg0vqYMFmSh$r(6qCW?0Y9c}A z&@5Mt|Sok(7p1htoGtz%_B!d9G2gj$h9XQJ~me1Ir_CzS`Eg$T>)-#4;sa%1!9>u zLZV@-MEVaHLKqw-nfQZEm0*T4+whrC2)LC2pqfStJ5Nn2{#lvTtm-zbnW;kC^Rjqm z>GzDmMbki;OPaxOQG+j>QMWWYH6DET&+~nZhU2T0myH<4;qTGB;nGZhXgyCm%9$28 z#jvEZs>j`$GTwDRUzPJ-lnr#>GwRx!Ep+p5eHQ?+|Svq=vN?IYOXv$ zz}07=p_xR)wHhK}GsSg=PDG)m%n&f?T~XZf?>RW8V-=q-NHf#WAdoP zws27h!*drK#<4x$+xc&$VMW#4xI4{xKauIgro+{(F`zc=5Kk6B_5Q7>-%mLbF7Ok1 z1J4v`rSBa8N0g;G5<=3BV~di6)Q*B0v~%KT0Hh!8kw_As{4z4r>-@M551HTc$z+`I~K2Q%CdH2OJ@5npY$j9Id0FtD*LMf#Qu=y2$fj>jO3}>dz;I*5G9Z-{; z5vAs~_q#NoJu!trg|X2dK!RS$!FiXU;-h+*IBW}kT!+1F5R67xnCwT@1|vY3f^CCK z`cdA!uJEmsPgRi3(GajrUS1u$9Nl5SjW5H*;05*NnKIPvDbz}`VS)#lC^)a(?NR7e z7pAhcx=Q%LLH~}s2O>6~rX-s@+dloT)oOpQTKhAvLO%xoT#N0W5kC0u^x1Ft|8Ght za zn_>qAP5WpK=()KD6jgVmgzTt_8Sm*;nw{RVC($ZR{qAAFdHCAt?>rd)m9OY59j5$u z>O?yoFCWewh6H=OllXqE6Y|G+4tIul-AotR&t5W-dncUJuwzE|JYvAT>F4GatR>C* zXb&Q^AG6$B!hR1rLtkJECcAKf($U4%jYC?z*7tPMUN+x-#92}C>Hw`$aZ4G11*5@y za+|DVT)}=rv2X~|FsJ}SE|*WsOo02?6hlfi>=8(-8E(+-N@rgVM{TB52foH_pDT?pC}^w#%cCsF)XscxV2r%Ugqq^VB>RJ^70$G1AaKM>UJS>f9E=8CY` zj`|n-64_p(MBE%x_C9T@lP%n+-cFQS*Q#rC*3^|dPq0 zoyQh9%(xK@LG{xc(O;Q7Ud8V#z4dp~SVQfBjJT>s`9TZxj?-nHsrs_*G4n;rHaNL3^6K zcGf1N-5qb+}o{_?GVw8yXaf1RKGSxv=Xw*TvF|9`tJ|19g=|Lykp z^?ZNb-1!B!{&lwe<(Gf7NyqrGL!x7(Y;?^laa`>7L?$Ggd|E;95%)p+%d zx+tL|6amSvu|1Ww*k>wln_xW*^BymtN#e~Lc4XVqwA0C8AGUCM{Y{i$WX^bR(Vem_ zS|WOVRlEVb-vi~VSE~M|B>Q*P(_gQD)ARhhO*sC$z-ZrB>6amA8iusdTrb4IybZ+$ z(9GLLh9-Obr&a0&oU|R{=4zAeT(w8MUp2o6{CV*RH#{A>8M$^r~ZI#Mv3S`{$K9T%S&U;OYN^q5f{L|H@a6 zRX`lPiSQQom?lErhsn7JrV;_eR76k{wI-pH1YEG&y^6yvJ&^Iwl*pB z{+<}^$k7ec&u4mes3bP)T=C$XkN=LRpNDj59wbDwDuRsdAjWO{fJx|`_3{tVWGf9* zc2&HoO`85o0`^+EzKkm`@J;!U~No7Dq?1cKy z?%AD7$m*$LvPqZ^D?QZJSt3PzdMsoRd2aWGQB}dtv=WWBxTjA!l5W5dCZ5>0Xh;EY zSDro(s`LBpnI?()Qr*<)RdV;>xuRSCrR{+kxCw1>^y~2|U#Uvpca{;`c>Q>>#DC2R zu&bbnJF4^t427DAB`Gt-59>>Z@j|UL=3K$I`&w?F9kxfsu1~PkhRu(BlG@{0zW2Vg z;oF{9s&~1s!>}toY(B<}xj5696{XJ?c7`^|B)oisg?L(TC*_4+pD1`!UpGi8$o7({ z?^Zl=_^Zh?p_fI^|M_&}f9F5{fBx@371qT*MR=&hR3?ZZQHK;o+ha5iv|@yv`Luou=^>r13VOfyA_=0+c$Cm=vewo^JUlyfU?H5@`Yc#@xuu=FI1L%i z3?TQY?+jMCQ2lW&>$g3%RgD!-lyTY0QkQn8X>=_b8D20b-~6X1+xJeu&U1`rN>Xn= zfgl%lAc0IkyKng5^fxc2NvVT@*RAX6k|JheDDj%zmRpYA4p!rz%WtJ}!wG=`<`P1L+kopv6nFm?H+7V$=?PVVzIWtCUgO*Z~{H~T}_z{a3KLoc)+wXDJr49@(G8booUDVtIa3(E=Bltj?T zpIH|y{N8u>cmLYA{z?A5VL%s5m?z0-c?{BU_th^=sGm0NnS&D)!9pSPcd-Om4MeDOKFSd(g) z8sH_&?7NSU(mPui?9jRy%sprM(a*K2CP5E2`0m{^z%|gV$IslP+A<~j*nH|Vq4xVE z41ajP{e$PPpj+)RP9~6 zl6Vz!i#>lTul$Lh@1gW@R{l-=*bMy)l)|{UI8us+-)+!Emvy@L%_bC6a@`|8HEr+H z_kHxWxnr5s&r=rm8`MihlR6&Pt#rV}=h>pK!zi^QsPg6AN0(x2Z(hWdqk@lghVwv} zT%&1BY({TVV}V&)(Cu4owiY@tW<|^U*vRA3KVaLCgdTbwD;MFrl30hQa57AxG!W|) zKhC;qI2w1z*qKV0IvC$q*;?fgm!#BEw62u04rhv#viT!ufYHpKVeXkD6)v%eLy@0g z70ktgZ=Rd(u^n2iu}VAmcy%W6N%0_AfDu|ps{^)l*8^F5ix;NqnEZ+rRkimAtlfJ5dKP?%LUcTCo zSSz#msbsmc_XkWtoe;!+6AWDE)W{NRODU5DjDq^k2h@kxdz$f^5;)}822-q*zC;yb ziiXnFX;_N?%zXH#CfRDff`#T9?cCx|uVUT}AL1(MKBE3RNmSN=$m6$z<`GraTvC{8Y1PJ|2~!n^`3&x- ziRt_9RoU2F|7zW}_4AEqPdpx?SmLZaG-*@_0#kNbkObkqR_P^XLy>@B?75Fh5}X##!0m zZ)F6V|A3vNSF?`*Qc@(Pl5r}03N5OMe*mxgk}qF9U9NtnvR`S~MtIxk^|2><#eS!< z68-OM*Oxr=eU_?rDUM!cxr$LQ!=W9NIgHvd5(p_U(Fn&=&L~W=Is9A zTZsuOp7vG`2F~=WY{~JhyPDFYCexj0O8G=P#J+&P0`ve=*!-@*nlaR6VvS@QM(i{h zF9an89Mo5&WwpD*90$Qw-1<0pn~{0BPZVy9InTE+))e0!wZ@ zu((HFje6_j$E+z#fKr0oo4QlGvp@YdYrW_GXG{q1hq5ji24HB2azm4QYV{D_b@lG5#{OLaBX|WgiG(58(wkYSKsG~P0ZL#9IZWY` zP`Aui3PiqY<8x_v=v>xO3y!t3lD#-fe2gEZBqo4RkF3DK4RC1U@8 zomhd6d$0vsX_>8XsdWUj7rPuVt&+2CVRM^sVd8qPhD{88@#~kv%5+E3TtV>$i#nOK z#Hen?ohDX^BPWHvVmi>(LCZMmOq)>)dG*s3ly2S*`U5z&$b90h2vAJ-JOGFTz!n3RlY*~kH{tb`9->pGb zeMq@Q?%OgSt`Ijlzck&GIfl~O=a`&sDp;O1WWC|WKiBElHiqmQR>)R-+Pn~LiUqE#{Jz2{3RMDB$zJ9yfrY8 zM&uBS)MeGx^|T!6Hj-bv%CBgG~B&_IqCy9%1kqe%IWnc)x*hl^zxqWq8}m9GxR z6|s2z$eK9o{W zYMzNx=rks-s!!0aUd$oO=Bv!n1de3R5A!c(p1S{(wj<-AgR9_g?nS##>X<;La@8B{ z_&E3JiPqPSg_(fz)=$jIJD_ubnX+3+7}ujzo-C#K$ebF=5C#|R!IMgIsMp~Ss{072 z_NM(IH*eK?sxiH0eCnU>vuHodhsl;kto=qH6hxG!W0LTZp z!gG+p%*yuQm!J0sNMK3`$fMSbPTz)?*LAZ$QmomQNU8-I(pq8jgKy*d1X%|$Y{K+u zYMC_7R<oz>XPHQcb(M^HF}e$66|-E4kyx+sY5yyT-%mdoako2R^)_90BI&J z4uv4)z-un!)Z>^=g^+GBZ~p%lCX6d=>T zs;z~DtN8?8ShP<|`zCijSTULslj-^TCBz>7rk+8=W$rF%J5)w~ix@x3{)n!i(#9eC zaHU`ewusx~w>L4A1a;hf1A?peR8S^f&v)V}?Q-?Y;QE$uNzc)?y8MEt?tN)$9!YZ0 z5-voFslv?gMU#fnkg7yJHMw&-KQ=d{2132I!$l=UJ^iiWVJrHXSQLrmhg*rhxUIpLui^PKwoFe8opstt`Q{iO3=Kn4-IXcpG#)c(bVdkLMkZEYddWOoyl@dpZ%rE&kua56y5#CG z=G!V_>F1jM!E@M`D2m^yL&j$Rn`@q z9{&S&$^M^4g{VT~7_v|s5Cr8Z)0YK{Ra9x1#fYzReHwDdlD~sN$aNe5g=GEG?#u2v zm9*QbMBiF=dlYeLjyR4c&0)f~FlApsUNrUT?O57r+p6rZm%>iJ>_o{?{sj{1swtK| zc`E6(+D9q=?K}o+;{QWwUF;ud9RKY5@B?7(xC6m1d5{D|b_Sn*8!kfYOsW>*Qjb8X zza%Gjd{Dtx-Kjg8=XNV~$d9PB!{AtV*8=h_iYZyo=GCGuv3X}n=CSgJsED=`Mjb0N znXFSE>{iO0t0bo1?YnAoQ4CXy%P`>C*^VxoMe`G>VLHr5i17_}YsLmU#omHv7Q+-y zU<;-}VB56y94oF2;RBasT7PU%3=Jt=ly)9kUK>+;Ze4NftiMuno?B(|;dbz2hKo4`H+`6}YW&EZ8{(H%k=E$ulI?hHDSD#0PwP9#TDfC>z~0H+ zG-pUxudrlOS3nMZ84W`>yXXaxurV}XhTU2|z3Nsqjl<^m&9Ww=b3+4@gev@WJ{{V= zr{jZR2<0RkKWzIQaI&?Y0H0pKuj%-pp`-nXjz)D74)BEOT^O2CzUSApFBp3%rS>$q3PbcYg^yD&)vxVWRzO35TY2-3kx3r*M}iUO;dnEE>bVJv^hsmAI{WzO)Ds! zN!+_?0qATjhF{d+Dnh+%#BHnNxkElC_#Lf{qhb8np73f93YInlQV52NWB71Rl^Zjt zNpTcaF4}`LKQJ;#;I@h!AsozlbS8>ZsZzvgJnIPt5R^}l&0ALPoBOJ_p#;4y@A5e` z{b|wWy3@{knL%;yyBI;fPGcR{!BP*l8QT$5Y&26Gj0XvNM8&$jktYp98ZH)_lB39DFL&F8X?7r%=TS{VJ;-OPdUVW&kqYjp z5=$6)oc$pKNiog81hu2ReQjF_Wvfu(h0Io-)_%uRuY$dp_~m)^rh$!>Jl0$`jlDb@ zILDRqyy#CrN&idOiSzq6FpgbdPyvmkAe!`wD8`hdnRXC`*AZNOWWfWz2aX=R=3H+w zcCi3@rFAS_9J?9}Lii;9Go^_SNZsUx0w{-aQUu)$XdH)ta5XMx`3QA| z{)PtcIN{TZYtk9|+?am8W{@%$|0%!i73Gq=n>&}`c4IjoMk^}_mnNXm z7fEA+eR<>}G&hZIUBW_2?v?2L`9oeo_ou}x8;5+YvD@aa9K;ciz|{l!uj{T3b8fmk z$A9Q!ORcblz}(EOcL0Xyh+u22_brQ3PqVkb9Y9?BsryR_nZ9E`M^0>dgHpcll4&*I zmbkXA^sek-_;*Edo_bfXcQKNUoW!b?E!94x3< zBw6`PAyS{9k3M`@fhJCTP;D%t8&@q5WQeQ1bj;(;sxa4n=_<_Lk7qr0!O1>sfuDE_ z11bl2`x`2~%gCSdZF0vo)`W!&vmo&a=7kcDPe~ zd$n%d{q7x)Cq$!l;Z#3$rZA%&1XMuCZQTgXD4EfWrq90D*_^GeFMH)xGxBc9<4Q7C z#5mF@IYexERcWj~?J!8t&aC~ZQU=?12pn>G08U9^NHfo~((J#U&_~J#qbvp}FJ~_d zOgK57?D5qtVZMK4_Ilh%X$z12X5CL+O&&BshmL~A8GFK9N7x+f{w6uVf*OC5k04O0 z+cnO|5qIgi3oMQA>*sLr;PN$T{-u*YbDCD-2#+ zSKn{aEu2~OVt*e(?bw&PzI%AiCVu7?>NpkXl<~}G3k>w|Xdjoz@hx~XG6b2Iy2T(TlivxT6G|+iG99+om zFayi*Z|!I^EoKk8`4jS6jJg<@;oZ;B3wFuK0w5@t-j6JOPHayx#ePrT{(A#o|` zzy#FJ2(k4q(_dyYck;}3efMGP$9^_vZ|i}2$t%)eQfug$QDgX4OCiOM^n&S6>kdxm zr=~(f(38n$Yb{q~Y}dzu;z=SHXPYp7$czeS^RG|)2;Dorrzyf`+E>zy#+{4utXD{} zY+P*`I^MZqS>V}PYr%QUqvMOx4_HAbQxwj&tYm-{d0dm4!+t{XVSWTEXGD2Or4vcD z%L)|YUQ8q8d0v(o)NDMxTygOE6kYg84@%*}$uzA;L0D^L9V`vH@gg6?F9(W?QpSsdu<$eNi5%)J zEK|gkUe3aVE3K9C5$o`;!YYRRqaEi9d=MS!r9Jg3LKJ=NN zjRw!ukA)M1b=U$5c~D^gVN83r=;+Pjnl(OI%SFzh8ip3rANi?}c^X6rE&$O!7xo3V zAnrhad)HI}&}dRp4%G}b2A@9B*}gJ*)? z=#wkI-8K;;F^)}FF2B)F8m~PO-b3woH>|P1$z6EK8`CFFFjKOfYhLgADVqN0X7?sBZYcFt& zvt>6Fgy4kF4fmZM&ptO%M5t7U1H`tu5lQvkXcdC?BbzgGCM4-N+V(5<=SH&q_5>_Lj~8*9K}?DC=Cr^YVo5(>Cuv-w z5lsICzP=7{b5Rr<{nKQwMe%a&_fgx?WfGA<(#uTaMj7i zPoGa7G^-w#6>FY1khP_O)Py?|{m!R>11#OB9Y0_T6}N$(+HW!!%^@WeQ&bI+M5GQ0 zP*1m5|-eYbApkf|=gTRPKm>Hv;)U9BY4zk^$aRnoOZaAku6Z&8H&*@oK@ud|Il98DBB67*J+gFR@ImR2M2+HS1vVKe_h z|4!-XGiP`H)KrBXf{}pS9vJF)_SUey&?s=aXs8oS(>;Ux__9IndGEBK$h39a$nnGi zy*H$X&W~y;74Kf3fB%3#S%pr=gKa;FgaV=BEu3nE!iYknRyBj8Hq=j>PU(cJcD?PC zajVFwUFR=fl6n5}JjV&Kha60hCp<=cOGe#C(#_NnYqWgZjniQ1E&&nTm_0zGn^zNA ziplus6-XnNeZh>T!tTzWV`poY0EJ&tWuT-qfC*gb7+kf?lW@77-+jP;JZVxHVCsUeavxnwJnt6o8%qB0+SRk{ z`qd{|f*Q*un~%gB5;13$tI@@0VQ2tvG2vh_QRbnFvfC*jOZbhlw0Rep$_+is1K)yJ z8g&E;v+Vqz^0M+1GdbK#097-#2k+@6=KUFw)L115=aqZ-sxwILSAzEaW|UyB%{3)T@NKtd{vr=CK^*&w##Ni{iJ8q7%dvaYEy-B{_~6|lV$ zbyWciGw=D|a|k01XB26Q^j+*tPMDF99?fV&VEZk_B?D}Bf zG8a^}@hL0N5nUjCp+lRXw?a`26Xv-J>1-@D)_ll~y58`v93}Ym<%VVT!i8fQQ@ZYU z0WDSeZ>QpVSXaP<59NG|Qhz{PpJ+vHKJuz&uA!R~iQn|X2NCNtWF$+bq@QO~bad8( zDS@N!gX2~}-ZhO`X5Dh!jS$&HRPPta1#(_1f=X1#uyFS%*+L_uycKap~A>NCq|lONpA4a7Ax zzqc%Z!r@0RLN~kt?AM_afKDnw|m{kU(s=f@hZET?S-H^nvZ9K7-QN8 zbgUB8o5Z;X%8?G$P%LR-#;J5$PztL?T`ADibpA`ku?1c%I1E0X$>uL5n@{MHB$s=s zrDQEaqDuQ^*{Es1N*|e(9gf7dkwBXY;Xwx*hqXuM$K3R`UFB@UlltIeIJV>{Q|zJ* ztEj=mYt9qRtr_zsV*EzzrH><~rdqqGfes083>II;1s_#p8J2!;2NUrU-h~KV&4EB3 z6CLJ^CEUqr%aK%5qNUQ@zPFbX%IsbI7A?kKcKtVS>s^egi||msJDe!+t~^~U@et~pu{cS zC%gksy)7{Wj!SWI7&)GVn;>>BoJHpe0$Jc-AYcRZta_$r9GC||=!^5JN%6jJAAEbL zMP4h|_B~_IY`xw{dPPOl<65sT5ucA*dy+3#@XxTN(an=JNFf9%)dpF4RbKETawaK} z96|O)I0*XP%s*^EH}y69HszJJ?^Kub;T@}L0@qx8SYi#PkD@PQ`7*Vc4+ zx2Ia!%5D=g7(Xg{GZAwHp>_C)8Bz^zS5)`tW2&3k_@``_hV7<~t2b%@7=}9_SJ4@1Z<77QD zU=R?79RpdbDifrMuK>)j=@ftjCt}UF**xgxfTn{?3n&!GnkiMYOpW@>HS4>WC)1>R zlYYkr4&~mhbLy6Q_&xMh&%1&iT86)U9@cT8*Mh&i- zs+tF{Z@fOe)9U13Qqs6?*7`ka<6TqRE7zMtgp4w=!OwUc#}JsvNIq% z)`SWfp6F(SFeiroxHnD$AhO}Yiz%Mceo4uPaOoFbyGL%aukIAmHUDx}W=G(eT-&Wr zaRY#Q9s#L=oLSVC&3A$t-)97cGIV|HSK1twtj%{o9dnjd4BzGQRTD9pJX8fi#o0Ad zgM+_ZAw*-2#ia*wO|BFWYOgR)()@~N`eegkREhF(pP&HZqU7jp!N)jB9a{~}O0$EB z!XC*cQcuQ`O-0PL0M_051WihTuld2Z2W^EZ47l^#!P$y{)QpfEWb~FO zRIR=h5mB~`+MCS&CX;(z{c47mweap#m01y!4*|#~t#8ETRmKi>C%(caw|J3Q$9P(LSBSkkS-l zsfe>QN1re+L+aEhdIVK`uB9)jNn$X}ja+^*_sw8+=}}bKtml~CHR}1K=tpm_UUReC z!n^w};vW3p0$S^Pzz~AuDDpZG!Lq=UvZwa3vY2Z9?K5)6J;Myhm;81rowj*x%O{p( zDC~D7&BtYqL$7}>x|vZ=4`n^XyP?L_&4+K|$D_!7F>JU&nWlM>dhA+{uij)8$34S9 zT%t7cpziY2`N>|KOj3CKGa*_e__o{V=2Jgl*H$0}4@zvgcSd-$zuO(Y<3?Vo1FJ)ApaQBx!n2;q$Rre8q~5i!#2EgGxnYqp0HtBI}m0>(=|zHKQ!)r zL3go~YxLsjFN^zk5yjj3WQ;Ax-f?p7vfX3E0C`XI;UBPB8AK(Km0}K_Yax52zK_5| zmuDs7y;%#uNT!< zW!lrG!Mk%n4_0ssTKL>e=o<6Z3on`!YRgz*x^L-O5l*GWS6d@zEhG|mCz<8zjhJ^M zAf-O0oEgIe-K-Zb;zg{~>BR6E(U5O5xb}PHepzMcSPz!xk$uz9dvCtMJ@6O1@w~xx zR^+?0F#01u{)GMsbn{+3DFXnkafP(>PDBKI$LOun8jsrHOtY9NdQd>}kmCezL2Bg( zr?Dg;_aXHYwdZNI%i8XP1h-zf9CZN{0U!U`uuZt;cy(j-vk>l3zM0L@d4SS60LS(O zt`?7OwhBK0DmY^)wmw(0xdtejtIxqhdLdngDC)CmNNs{Zn)<=dZJft@^vZs}`0eX= zysI7u2rwtX)n}PUJ}Qnhs511RJuh8+=SMfM-j1{Ra6@9F&%$8dztfv_l==e}8-^gO zN=&p~7JrQ!(p*v!b|4VWJZu)(o*?VFr6lPsY#--$b+7|zM3AzPAVT_J$2OXYRYuK% zyUjvvAe~UOf66a#O8jgK}E`=;C1x1 zfgC&7yQib8Y~$7S{Ap1n^Qd(vAIsFjhGEh3NrQ`DY#4*)CyL@t95*~%%z<<)AN=R( z&d~|_BKXek2KXx-rf4hqLm^pCy`!8i(BpJG0F7PR(S4U{qG=*#V{=B7p7kPXr~b7i zzDGg;3-)$TN9O{My3Q1T?tOpOaeN_MDe`KUZ-r68CzWl*yGW55eDbO69DJqivMED> z-6?I|fnlhIX;U{M6dPpxt`00K;Ic`a50z`JR|{Zkt8%Y9RMg4HWkH4za~qT&K*N(d zThw_V+YZTWbY{7iy;f>IjbZX0%HEec+N->gX*|ii^h~I{l<9So2ue$s;+KG!wWq$a zDJ`#qIAJ#3-)ZQ3`FmBr{RVd{0jUeb4W^Wn82qt5ZqYZjSr2emfGq_2Uaw1!g4+ z2SP9kI)GqGcFeRBt0eEQ*+!93&Al2+_BrN562^Kba+{@Z44Umncn$k;@Yg@dzv9qi zOtKMRClf}QJJ4)i8WxIfFh_&gal$YA^nQoXBPdjDGJ1v}X}o219CL4Op9^748wRR(KBj2cBTv#-i~O$J>o{kBWCJLM>L$c|kvVP4 zN-^zY>3LbRGpjh*2hiedS@t-bdI}BgH=IQD_3NRiLw0E^CqF7jY1h5X?l5+#OFVDm zW4h#i_0j38Ji))eh;K&cA>xtL&>)KPG%|7!q)~slP#hjF}+P2~n1&*d+V3LjS@^pT1omtA$P zJap`+X`_6)&!`gS9mfyrumu6~`B*j#oP=YYz>$XeW@>nOcz;fX`Eotu(!%lZXvTS_ z9(AQnz-+f|M#l;CdED^UvgErJn)0`gJ*c%7s+74a^ECP`7IMzv%g`j`n@wB5YQ%`8dGHJd*0Nt_d={v?S+qbrrZ(Yu33{Q%JQ=L%Iel%0u z2MFPn1e;H1r{n{~maj}t7kw+p^|{xznl#A+)CBExizfw7dle4WOJ7Oe^8U3TUVTl7 ztPUv%F@@)v?UTLhbe3tPg?z(pTi4SkX1W%Be@XD{THks-baVlKH(Zu^0;rBBpt(S{ zc5_8=z9wpiotMq)ON2I|4TXpcT1>FpA`3C)j)ASL2Gk2mMjEt!z=|a@k$pzYos&Oc zik%e2AF#{%00bYXD@TVB#n_^txoD0gXGFH`z-;%*R;7M;8J}Kf^V&z8lG@U-w43ir z)@7aX-6}7wD7S2O?h^h2uk2e6V(bg=wjpE{F5{`hR;197y=SHcXc|PHs~I9JH`l*g zCGZa93$NUX=oWZWGvnXIGx)P=3M~$&-j-Yj;I)7<{UTL|(nEGI+e~kDGM=v+j%&zL zv~$~GqKFO@m`&1@$DQ+m4zj>1=&mloo`ToJwh+hF%`&Eukf~OtlZMeYr*h5qM=#a$ zAIOrg)QI*~`9Czj;KNeKla}EVy2fA@l2YqNTU4}T%!Qe5jR~zb7F{K?2lG^J6@9wr z$7`HtcY6B;)3~u48i*V`q=BP@#O_|-%v>900uK~#m&3%bWbf?%3>0roFK+SaRy>hC zH6Wrr_jPh(nS&jlEkzxP0u=}gY$0lOS>SQ^RA2F|tg6$(tz5i{@tSg}!lxqURc`#L zKB_+yzi|F{Azb?2N52Tk&8UwLkzVFd3p?>UZV}_9vr^kXe+!OnOB7wy5x5ngv>+QJ zerAHdHvUiv>?{lh_PKth>Cm->Q^Kv8Xjr}ySxSMyWK zx?mG|H1BU07X9mm2ma~u!9R5S{e_LlRP(#HbtsB?_QB<7uM?=#BVXRo)N&EA=VQIJ zo=*gayt@-gpqeRV`+g2aAGabTrg8);F($)U41G_4a724e&$9B@S(3<%rMAt)zF3*F zRmlUsdM6ajU)ej|ztk{M`&v2k0NLTjT~LL^`QMxMAq>Q(t?I0&dO-|i_}d%_ygM>R z)ZPI~=xCB@7}A`>2gQ#${)XR`I-uls!BBwfn#WOW?ff&L83DFn6<8~U*q`uJQ#@g) zg9t}>2U6Fh&sUPA!Vn(Z=|v}ITR)khJ#IA=MGL!HRbMf=Gx6m5i>sJr`u}^*Z@o?%zA?QFM#fgNM3GcHFm&qY-M28(ec#xu{dX};mH>{?1HElEK!CTVzu9}`V zJ7--itc3ekf|$bTY`!TX^&BXb-MEKv%epZQFo!nki4UdIx=UNWOwCz0J#2{4exw*= z>>})tSD|qAzKO+omG$HQu}=8^3ev13cNKw(by&_I=+uICt1~4~Gdzi!KVXEJ8E`<2 zo>rL*79xFlU3k_jXwQQyUL!bL+(<2-D^I4{7h}=X8;7%xX|WPMp*tILzUIs#XVHw^ zpkVY6ND7MZbYbcS`-P+?2dlw5hkC=7DIEJ7%{_|D*Q7aSq8_J8dFlEl{=+)OH<=-3CSO(R2f*xXk`%$DeUeGm7ZOs>1(S8VXo=xgY41l^yCVM;4gG1DutVNx&bq3W+| zi8f|_y2iGaX>%4$Z<0tofs!8{*&pM-=5avWg5#c}7rObvElhOy7S?kFDLqGueYhaU z`MG1TY0K!8(nr*@2d$bGd(*%4+@3Yhc-@}ZzTs=`4Pyh3^ArL-2jD1=?gE`4VF4_+ zvq^c}i-2fzQj_y(y>7Ji{bn>f=X&PM8`GWBK5=B{x5-wCL0g`DI*dHUeg`^SkYf{c z0;F3M7Q)KVyHgOH(6V{>(Jj+*iZ>zPOpKA_Ysa`3HT-v*7ObtF9eh#Li{?b)vdhbY zW7*r~Gdr8MLz&bWGFb4!Oh}CyD`a9kGsM-a2s%+^GvsU+o$1)A`oQ~}YFEs)11f$N zr)&E-sXTOl8XHT+viUi0F%+4|e)hg4Y3yBNns|C^qsZ#H{>x`R`o1h=2q;W-kRleUVX3Mb^Y#h|9BgJclNMS<(aAu0;GAWj-i41fWof;Fpv~`2+N5P`~I-+u?ERN9u&>6OA`bh*U<+9A5_MFeYidDo^O`D|HIogSG1?f<|VGon;a@0EEOmf+)x1! zlxP!UdN)D5OPWc49z2|j8J(I(qJ2XGif3jEd^`$UBPAR98~Q(0ekc_B^!_y43QNwK zSEnLk5MID@RcS}YH8kBxq&4&ewH?Mo*Nflw>?u7}e0%-~zd*v>@{GP^gXqVloSsB0 z0fap0sLKPPdJ)vinqtAaeJH$22R6ImY7M{0&lbg{f3R$xPe&CYfA*Ycq#LMgKPgL3%lNWYA7$&yE9!p*TSONN7F#8aYaEV*jS(@YPW*?`0 zeF>e^P}``iKr6m$tBbQXeZq4^WTr#^%6+^A?=|rzcnJbz)-}-UmNZE=@2dsUOvBP< zx09w3uzM_qnNFwcni>)`B}|JQ=H^!?0*7SG@AluBeJyx0a97Zi6yq()P#O;0H=xy- z(q**V-g+!gm?o8RgPNLxuRu`brE7zIHkH!9Wb2_YDm9yJArI?IJtk9xWQE0 zH-;x>7G9l~#S~tnD?tWSLOV%-t?V_g7}<5PZCuY>X#~P{Nwc55@t_k6bGofXiK4-~ ze_h$U`>AYiOH$1dSOnIFRD)wm*0OnB81BJaOAF|BQhc=|lp?Np~=G~5vsGGW3 zTiw(I`MdkNzgm4)^TSO3S<@7Uzd|o#u5e)IzS1Si)dEmRrh-U8xZ@%Lh7w1~qcZwz za=2q!3>Nt>fPe{gciS6T3HF@^jlwdQJ$bEG*S^gyZCNW+PRnSWhmWpM1i@UHV5={) z2XHdrsJfF9|A9%C`!dbm%*vQwR+VuWydc9n8JDmedfF{sIk~3BNGe}vx0BT)&h3o$ zwzE`JH>e+2#R_9&y(V%t9#s%~LW_T#5>@W07{~X^pLqqiU%vk^u)by>&c1?rhmJWp z0LrXXma-n{YcL-`=cq*s0S*kT_;;*`wv6|t#9U*v;y}^BIebIShnjea)8$hG`^as= zdA9$+Lf}-NhKSpcKM=9cshN+8f_s?$3w`6;iOHPTPU$=h!`@8)o|$r3Q`}9sV(R_P zcHdjw2{3lVzqRsr0E2)FbZ`+qO#;kvodhxHXE$7=%BQ$FdmEHAne8LKSX^E-9aj!k zh&}PH?D44AK7~gqUH>9vvx7vdFoUE$YWgBjR$u->=vk1fe~^|p`$&#zvW0(uztWHN zkNf-&3V~rf{{k9)fB0YHnpEv(y~cVWr-$gKP`E$sI{i7VGL4+PYf5(R$D}DN?VU-A znCr5+DlD)^q$N4&Xa=gkm;$_N?p!w59l?`Q%#)yole-%(zL}PAG3Xxl-LD;!)3dR* zD=i07&U@cbd9^D7QDd*lWUd2cS%IYn3{kcQ0vK>6w>)e_BQD!593Iz$zt`FQ3_;XC z{CxU~e@Cz-_q&tB9SkuLtolZf=%Z&Pv}txN=wxK@dFO;z&eD6ce2?x6mA%USd(sfg z>B{E)8$SuP!@h9<#f4k%SI|sl9dlgA`YCpRJt`VOeMoAr`jyHT{904SSaO~(bRn10 zf0p)EN#h=(N#`MmtCD=h+1Ng|fG9A~M}q?P7kMt4O_z5cvDSQVPr2Qt?w?tYyYAfxQkcRS zz7$+{PJn9Z*T7v!!gRqK_B=U>p1!`A700|i!Pj3uSM?Hf#NsP&C+Tv0v{G8i zfo<LeYgnX!0r#stFHAU9UcTJ@`c>re{5P{1lN#9F%TJ83dvpHZMD|hM z4d05ZrU44Qg=C2X``9PX{Xej@tLo#m7QWVoCrUn^HzJ?wbN&U&CvRLw?yd>R5~!}a zQVql4In>3*-2w!ng+jObQWe1`->@O7SF-E@6QJpUu&Yyc+gWl z)R<=urZV(De?Oz|J9|1~@afG{-#CHeDUQ;`d~TCw73rOHU*cjFg_##ZIiV!6AHY@e zJv3O}lNIN7oE6#s^|*e(*y)T<&u{8~`(dE+J?FpXPW%_T>;G0j{Ad5Ix`dpWq|;b9 z0{I2JHsuZDD*H>8&E4Q2i}#U(2S>>a^Kawh1HK773Tg>X?*_9TTz%X9t|vSTl?f{6 zanPYy95_8FR$|rP^i+#Q7#!2|Ccm!OD|UvG))U~~dSQ?|TR8Y@f}PN_w9D!6<(cXT zJ-~$$3PugE;tAvm=YurONDw4@&on4pYZI?a`Lk(GlY4rqwzV)l#5qje_kMur>kn48 zQk;DKl217{sg zHolmvnklyU$Lw4^z(0WA!3Gk1P;$Y=3qkA2wCjuRk1E@`DxjcF>E+_$*>zLB+%hxX z9*7o?!|QvePrl+FJfy!Jh|!x#%7eMff}cOZ$^7u%|qH2J)o<;FLb zN4Lcrf?F83O%>8lRI3`@SlKwnCBmaX7>SKQs1wL|R6VH&4$!DfQSU_+N+{L7>&jS* z`2u#mWc}uZVy3qOF}Bq6CPnS88CP4dcXl>wMAB{Yh%eXY2~i$&_0r%Wa=u zN9eoLRMuS?hl0Dv>T&CJi&b&*a%1{3tG5oiFXSfc^J>4>i`A6O^7?SgDalMjGSaj> zH3|L>x9-L`AEry)>_h>mMvK-Wjz9PUSN-rWikW#sKL4rxi?-Kv4R7mTc$_9GOp&oN zKCilhgH%DGCpWMGB5$#~^c7fDzu^2A7iaSJ{x-);`}TC%7e=x-bnmzxEsPcuu;p(_ zOHutUIF(AzLjpReOAv}-=3Q_7XZP=Jq=4t2g;5c#_W9rM%8L%qMQT6dyLoCD`SRsH zR057_ftxa0itF4IV2T6s*{se#ph$+P=#_a^9v4)6fwo#d@-kVWRyU~q6%1xH&|X;2 zddwDM9%N;OSuswty92EeTuKw`ei_Iyrj{$0x@)PKuq~Q+;H9IUpYU^&dq4JON`3fK zYf8gKZPQzw_^fJM&wBeb3n683?l*a7G$wQfI2NkE5Y!1Spi*0PvLkgSi{1o{Fb+V9 zUBt}9GNvTKSD()Q<8Y^bOwO;kZq7p??M0~vUs!usVbDv#6=RTdy#$BUS9+@+HLa}M z(JrizecR#|=PpRn1iM)Xi+oZ~Q+^)*ACz(nkP-OEGp7b*EnK&jG!$=mPVMUZQv}%n z>uQRXrpG~DBdb~@?l2;aU%=Lfg^AMIVrZHR zf*sgBkaxGURPyUDb?0=9@eM6_W05_=NZDJ4v&v77&w-yAo1AW{uPXe0_5rcCIe`D! zo2HG-kU7_6$??3$SI>Pc9NEr;cc1mK|NJP3fBVJKgX=RNYI>JW_cw_Yx(^R?4@ltN zX|5xS*~0@2dr#6{=5b%zah3EHwur}rDaEm*pO=PBRUMDUdCDKU?XFaP%|LH5D|nO5 zs|Duz8OozsAd~YEkR-X)rsw*sOyf51uiX1ezqumsyzrgY&W3HK+11XseaA{d7MjAE z(aPiMlGYD?N_-gF|NiiqS%tZl)UaE1ekaw>ehIGdTS+9aZ zUJ=eyJSN(dOGu$l*1zP&CCeBq%Lo`JOaNB(nBXx49RlDI20){xhZ(Zo=nDm-di4*O zd{Hd@At8Knrf+|zQbviLl9lYy=3|@@u&Q@>+JDVT+Ea<);U#81zS~kRZB5mh z9*TkCZz{}^+vUnn55mGniZLItHS9F1JaYQJ85FwkU0>Fm&*9_3)6t6Fr!-x(o40xX zb+i?WreNs^}mM`AmjkkjJor#e^ z9OvZ>HrVr^5c(^s6Nu`>RT~;UXSqk`XoQBH8QOz_V z)vS(9=xF7$B^GiaDDEU;9~|aw_IlRv?SS9reMMD5F3%&!!sp0T&L@2xiAI)2z>DM6dVQuw@;6#)3`_8$4~k_zzC?0y8S4y2jM`8aA^H|87- z-rd|RllXB4MRV$O^2$go%T~FOdBbEpQ`}$L#Yo)PA__2X4gktd&I-&Y9M>;kh67oe z8fHq<_|dz#6*$Rs2-FO)>ly!)QGV~UIH%gdZp$QC$xDz43YYosUcUQ(nK#8ol%}$X zjL;W=xNV>}G^8zlSnteB`;Va|KXks7pQ2CD_=l8IW?|fm7O;<}UkVgo8^cp%EO0jC z8;W>*U2;)=$O~gx|CeeT8{J3WMwlu^a32Cxa?p_mrraf3nBjX+kws3?m%}CygsPU$ z`ZynIz-L^oad;hAvnpX(cB()@mXaMeoe1mKP+Z%YW}vVg=MiWubOuMyZe)S5PHtva zn+U{TG+AL%}?KpYc(*6?`gRbU$X9tX3N{fR{N&Z3kt9zvgI z=mlW#EYo4aqPPTpj(N^KIEz?g)OR=1p-*JmwwQGcpJHXzS(^MFxzIwvzmN-fIW!D00UQUc7LjTXdAxMRc=_z3NO&}%C zVZB3D6OlwX`F9M{SiQfdb!o~N9i|g-B-HntV{aehIA_R;I5Gvq9KdAa8tLZ2QZoiAW~F}u67No?Dnu>@zz!NKi2+{(SU zoTo|Qrlxl?OIZ*cFD@LTUKwH&oCzisO-?M;&xWQOezwR57zQ;Z|=W2nMNyLkPU>fpD;PEi9WLPkDaw z#p*t-m^|w<>g+LmQF;A{&{tNiL6D~lC?@|i>R2xpk}8c;i@?5i5@*(cp~tc0AE{Ht z^ueZj$eeNAh`@zW%(B(1u)g^~ce)l|(aMP4QkoTTX||<3yOC-A0X~*jFn7VZqnzQa=V9Cx!CMR?n)%%LFKP9{EHs*rxZ6%iIMT{&->%>xF^(zr7!XxX zh#3_!fho3(f|QHbOR2gR5N=U5Rbch^S>Es~t6Qc){>6q^wXA#Q_4B$8!IwVuJCX=J~5++>Z;^6UJq#%iX1idvR2RQp3=Iu?E5~Nw`#`qK7P%) zn(B!@{yx@ZC0#c)%bm;bJ5!vYfEfiYR)7pC;hHacdAXZ|V2&Kuwld|}Tc;Dp<_Daj?BzRi?2cmn|vBsb?>w zZI8wFs`?(9ADqpQU1+y6*(=wmPp}3zZxEz%s^SRy!p<-rgn6=Z@v6w_$sV1NnHgje zlrWDL?($Wuk_{5E$Ftoxcj;s;+Z)IngY~~QnQjB6eUbivP=P=$m_NXZ#CqT$h2q}L zM@^HFuCxSFaDfS}x5)Y2k-G3x-g|0CTe_3UX&Qy~yWLan?b@Tef0ywGEWo?w5F|1d z`am_08;C~o=7zrw^Qdby*R`e%J!YCM+UjZDjn`70@6VKuse8K%7ea16_RZ{4l9Ve9 zKGr}18MoalQ1xr}7=a(*0*I6lU1fqvBw>sq^l+fZ(&KtihM4m2^8%{%Q)Cc&ZpzV?Tx z=xxndz6wLPlAS?@%>piZ??Y+o95?0vc$PEyy7kCcEVvde6>nq@pW{t%d)< z+D0CH@Y%R*@+#@Vq7E!AsX(BYkTtaDuiOSly)}p)1~pQv=$Wh}j1tp^Rk)p7VF(oX z=EiYC!;904ihY z1XfBIKQ)o=OCxnu$lPF+>8lOg^=XlFt>}H=i!e8fjhvuwS_azE?h1d9_fiPCu3ckP zqQgFp8NpGn09uZ|MFLZL39P#(HLjBq!d48^qiy%rmIvSQwvM~uAerHiWG0T!y`7Z{ z$$0I#74v-Gkv=Nw`V8Y6K0bt^m$SaC#!GG2MTL zC=NjjlQbu$PJNDbb`kFtVe`<3pM-{v*&OeFoU(o)z30L{2`;QImU@XarOI$%?}d9a ztRNe@LL&V(75)gnR#5ZS*>)5pk6KAP!^ItqS(n>lTpOzxO zo3Z)S$(zLDKGZw+cszVsX5n{qo9f!KcZeqQ;F79VxZB&+$F@V_E&Aa#xgT)-`66FR zS))8Zj{`2%uc_;g!0MHNIll?J0##FcNsE*1XntsEVW`LCKt4i-|H?rJOXNZLTO*a)dV>^aXVjFc;arPq6?`y-q*055Lhj-7f zR>=zi)GbFZiR)Q2{&zDwc_oy0C-$$UKArpw-VCl<9s=*)T?HLRwi6x$U4ukVBNtWH zi}j<*?42PxfZUI5T13ZljhyS7nrCS3_)32+5>?%KZJ6wGwXk6goxm1j0|irE#gyY* z(ws5tfbXgsdJGt%j@y3Em+(bj5twN5P5x@HUNOhq>XPj|Rg-~Cn?G^^ow_{xG8QKP zjbVuG@J5g;B<7DogG2&;+>Y0{! z>5%u=_ummzILQ3Fbe}h$K_|h>UN^+Aa#Pi8fhWSzhK<#&&~eB^-X zvu!>fKC`DOAuj3DuN&^4O&%1j)f`wyP_KY^VntTZ5HRK*0sdxAItOZcpdBvgZtlhC zc0NfHOsJ~!FSW~1e(QO6sAR_B)AHSHQG2t!Mm2g#4g>M+h!%T*7SqSFa4lL4Q8kvuh_K`PFZM6DKecRl6bHg;b(U zCp}*^l^#d4&R#67YB4Ku4i3i;>a8wz8T=il!I6%{I~v6O)X+z1-IXz##XbDr#}oma z^8YHX`9JhLAqVhqxYAcyZ?QL#Q<9xRStYvQ9 z{IXEEMMV5M24zua+Hw2a(`6DphYQo1@_CM81qyOt z0Gmus!BPXqGi%9JVodOi3j!AmdB6@Ey3dyf64S%FP9bkUjVV9L(fXw#; znGfy^nS?#R@#KA=)5^|zD=V!Kpzzgo*5%ww>k@Uk1F3txMEjQUN&xpUeJe!qT5f+h z{i@CBb_K$;F`bw7h~N&Y+XUBXh3wCWTIZx3Oz!P6JdUApfMW`%rbpU5J_cbQda;;y?}miUy1} zJfEq<&s!sR*~mX<YgAXFpgzCLbE_6lvh=E2X-O)Jv-F zZRGSH5Wi}R%`K)#0uvFKXn<0^fFMg+fHOtuU6xemJgidGW$H@QgN3(i4MK)lKmGhZ ziCQ?>EI*l16F!>yT!;@o44}b%ppy(Ih`CR$TfK48ws*9)0TBU8Z{U!2&Bx-^4bpbc ztyWHU6Wjm=66GBgSy-yM;1u9mS}6{5hGJ+A3+#BB!=i7`+(t92tjhCL)3axJtz(%J z_91reb_I`(c^W;YcHMry@!>{0_s@75f?ftTuJo_pOw>lm>7Vf?iD~oikx~fJ+=fK0 zzbtvC2x}O22U(AxRXj{D$aTXwZxPC>Ae|MqfDX9CkvVG>pgMe9Iy^NFbIjiEV^Fcv z6sLF`sNj35&34rIp;00T&Z?floWg3^OelZ3pPd3vJd!JKqH@ILaYJ#L+{XcBx8Md zN~7aj4w4ZugJT;363^rc6!jjO@@-Ek!-y&01jTSimb+sx@*Y7? zJTeuu8ei#1+%@?U60$Scz4aa*wj{n?%oI^!3)Hcy*dk#;!x)fU@3^FA)TR%4R?Zu{ z22_wP?mJub*ge3oMXNoH^_r+)hLr8!@)r`HoJ&?f77Dn}yf zwS%#{OYzi&Ck_q{-LG5|4i|8MwnDHyfHN9|Dm16>x1(ygnc_1bS5B?KL-;(+#XZTz z(EI83MA_rB^HLR+#FJNHqK?FmTG&xjnR*zEl>jytoWYK90zR&HGiem_5l1zaF?+n8 zzl#6fV(D3&XKXjA!g%u_Q6&L&NJ6*%O#Ft@{s&wBvCK;}fWyGC1(aFQY{k_-pdej7 z%!Ha!96Wd*T@0-tv(_g)a>Lp)n<^TUmpCHOM~prtiTen%!0b9|L7^CSnnc%Q#kGhs z8~{kEl4%Y#zO-A*YNC9)I9OdhreOB?!OStU5KD!colnINwvs;us(Iin!Qft8tzZPf zGZj+O>G`=hsIQugY;V$QQy;19S2cGJC|q1NihrzpseV=MgloCq9QFJj98-wE=Hp%< zK&rpj3E?!(>tAkBNvTY!0DnkSET>4dn^qnoq9C1!tXU$9raCd>7?9gSke^_V)ly>s?FUX|TSk+s z=d6g-%`pk@#BcjSXu00HK^X3Sj&>eCgkXx7u?7A>twA(JYX}@|u6hZ3owx_3`O{2) zthXFxzCgjv-dum;oAbPk&&utL9<}tOW43Lp%w9lk0G4bTB^;&&?`;)9r|8(=J{A=jju zjBdJ$bpBa>xuvILF0(6@e%kk|n}C3F<{8Q5B%42hSGjirF&#K+U=UNxfo=~Kpvmx= zcAXwg9{V{yb!(ZK)7qesk7uXO^pRMP^f|X2#rJrd$Dh6hhE#_yu(RMCZD*JotRf75 zu=mpM4s&~TbZ49TyF|-T^sQATt$hnmx6%*xo5}!8P0g`=g@OykY{AoD-2<6g)Zfv1 z;EL^*f!qp+tM=d6)xbO*N-V55knuXSIJZ2y@y)6e|1QzaCFFSPgvqmb;mW@jY}65e z{zV9dU0@~aA7+1Qq72M-QD?fXb7J%1X}gObD`Y1f`Hu6fd7lNJf0p_X_5({^CqYM2 zDFmi#>mLEjM3^h!81;De9o%sbG*bk=0D^dYAgC5u8s>wqo2;8N+S=&r zkf6gJMK_wj_@)fbp$Z4F*eqjtg1nvf#cqAWI_Q!#Ll*!)ah)>}Q_ zrH?H-G)}9~P>M?Lsakwc;2ol;9a(aCCoP(&By#@4o-?{9_(4s93RB`Yo6m*;7>8{z z%FF{T(3|7b&6pPJ-%S>Ahq8q5|gu?3x2@$A7;rUsaQBxF7MtR^Z2&m(!-FfCdcHIH(3<_+J@Wl_lH5b z(|xMul4KOt104yX{6DE1XuOBM*2sEYtNxWp^VVOydZE=Lk?;5E3K^`L(q_P;pz016 ziP351Sl1?yHwiN`Y^k4Y-Yo9*-!Gj)q&Itg z@2DSsT|bSedGIBxVWYTmU!pf>%Bn8s_wxv#15-d>3ml{d((Rh;y)$IN7->THs-ND$ z3ScA`HNURREvkrVi}RMv(vkQv;H7LRToNgzH>CCHw(@`o@{cW87BJ1Lm>$A(n;2k) zzy`k_75Qw;d2R_6Sb|q>|dT8H3o1 z1xH(=Av(-u$$Rjk~Rzo zuoO^D(9zDCV;Y!|@?aYaec|yFL1%|v=}aHgc$k$r;=6n+MBiO+k_tk|+-iU~089A| zK4lJ3E7>{eu!a+?kD@MJy3l>;Pt0thT1S2Cfr@QL=epPnqtgcfK=CDlJhlnKRuphh zb_+si8gc}l<3RGtQ_Rd_V>z+w0V zqv=wtDUX$9Iugb)JNXx^-cf^)Z!!2+iJx~10stWdDC&A%ClPVPPa?7*HkC7!0@Yyx zJ-P5$)EL?)-~iwx0YGAII*4b){uOvuVII(s+kLvP6`uEjlk(W2;n>L{r?c}WEPILf zKU<@3=W_rKyMcDFGAK<5P+tNIYCp30s_{)KgeZ}M6)hr)iZ-Q^^9@SS}hsSm$|*n5Qr7^0phb593teUgEUks_8S1K^ zRC67@>3xXcotQgmBvNC%(pSHqjxS7GHsWw$K>%d`?+Ej(!MQCXQO)2H2&yFr;P;S( z37#IYbqlG{fqw%2K=*D8)|ZBse7su^FdAoeDqb?Qbi0zS^oV~-%#SuJG`7ocs+xY`EWg*F)tcgEYoE4j6`FxRv`QN&)4rx=%P49asxzM9d;7f`> z+3_INfhlc<99`GW<N_mS*hkjhp_M-yX+L$)jg(q9OorGs=flemjMu)-hT_8YwA9~ zqIS^{wFsFOf6;R_j<$nep@+P+c!{>Fl#secUazEd*!jE@(u70nZ6MDy4X4`4_*Vv3 zIlPCs$^5R%DcqT;y%)dgV=U2r$`fu$rR_gI+KHvu$_{h?R;EqTCs?VVdg=&&14WQ_ zv6Ic$$fG1d>@D;5y!XNAqLPS-vP%^189&B1cz`VcW)-OhDcp%crgnxkCjNKSU%H8-)j@lNg7^IEMc&1+e3Ui*X-N zi5W*$8}aJ5HJ5#8&TC{ebox4*XOy<NLE}Q=q zn6t_tH&B*)FOTT@IU#TG@31Oc6#KALPaIP?wfCw?lTB0rL7;zWmy?cTOFv6up%^)6 z4#TX6NrRx@9VGGSp=H zPvA9G-FceM{^_17LIX&V`_E zO~$@pP?Kwsxy`I&PXTU(O7=+|rbsTi~!KaW>Dec&fb5CR>eR2heZGkQ?9Hi=bw*B8HUAlwG^`^>4G#`kZE(AD*A zza`fl@G(t$-p%=ZAE15W=zzgL=>ZCS#3P`IktFE!W(W2ivR!x34<191W^$CQ>MMQO z2#~s+zT9UsS|{WEUfQMZxkQ6qfy$vXFt-CGeQ>ZXrmQ%^olUUt9kvATA*t_dcD!nFq2=n}X*@Hq6ojzdmg=c;>86-^^nk1h3^|WwE8fFcVDz z`3;UQ%y$Vb5S(Q{UNjL9zH8v>BW?2w-pPa7K|-wDp%Dh#kW-ax3%U9ZKL%q)2?}9yF1lEI>UHR0?{vT_gAEv`jZbuH^b^(q#e{e z_b2i9{Q*eTC~MHsH-%6)#1yFBqV$fG8~VN?RoZ1L zqLo`Bx+^!V&$N6!-{X42k%KY9P-X)cixY7R9-$9sWdr{~UFrdX7xFulLgeo?y`64z z!d!2wVNmVMhff=nu^0P*_F!KikW1Km8^P5ES(c7I0zZfrWx?@5j{}@k$rDiv1r?Qv zt`F#mZ&CdEKeoSJ5ve#rJos~OmdWdE*Q2&MGl^sbsFRDaCe)2$I^g&ZlN)%_uDKue zIBxU-hk<`!0w~|wt&>Gf-VOWcrFR|nd_Jyp{nDWm`wLWlXZ=e!b6K&nz&i^OLM`ev zQjat#9wQfqq}r=JG)~f!32VZXiuu=?iuqAwA4Ol&)$?gqzF=Y}CL(x_0Yn}z0r-wn z_Ct#TMO$kKv6kyJ`_6Ec9?U^nUB8zkF%}mvIG8J*a#Cpal(bK5y7dhWO7*D-tUN{t ze6R`oGfs#=CMBcUe1c1G`QY@GrRjziS}^SrvHP>lRs6U#SAF;=rXTKa9C!xM*zZomY<8it7 zq3m>)ZW8YJGNC|q2-04mYxw(9*G!-~nvDfy98#L>jE%eM{gD;@`|^+W=ft^qjvI&Q zbN)%frw>T8;1B>Hab06Q!JGv(o4GBaVtbFAJ6LuASfO>vR|Gru5Pz#P(H$YMS<{Fx(kVUwLX+wingS)nDS zT8=Y6teXb&y44Tw{ghk2(3l!%r{dIqelK?vz+e(wNYiEQ2b#+GM(^sno3;;(;F`g8wEx`jE`3nxP$KgDwBgLFlQF&ijTix!)z z;MmDNINz^Vg{u#J3nvl_2YI}gN%v(lQ}2GpkPawo2!)kK9Ghvg83o6IgqXhGi37dv zFM{F$YF{t=G`09~041Qspc3#07H~a0RO+uy@@S0R5cL6kSj&Kmi1W$^yOiMq7!1n= z-;Ln}gf-M_$mxPb1jTbH^$|(9O^azs6DSMUMv1An*m%1Yd1?{+WXez2A~{%t@6H5h zDJbcRz?H8`vtF^@#dKI)_BO@{hcOT&M<$ z>|Uzevu`yXL8BqP-nn_mPx$n$m0Vqd?D& zUU1f)`V2JQ zee-lDZXF4Z`92m16y1c+DB7{Aujpp%Scx*H^YC5qolw zkgJNn&06|!0N(WPJ8gy07-~$eF<^_OMoGYV0XRmi#Z(yxyMP)fQI;9L+oMh)G1PO~(v8ItX)$rqg{!&As`>TE1qh(?#oC zij9aPSLT93^ofhhA(pTxAFr~{s^9nir?5-0i;MH0=gpcMB@Vy7wy|;H?c1bu&~EgM z(&Wx8*hAoP`Cy60Y=JARmI^>002DG`Tmz)I;PCDJq}~O-@N}mA2Z?a!^-4~=^W7i} zW9DdDe?9hrE%p+Y{2d1!wMq5;3~V+oKUVzh#&P|_Vb?>w;Ub5go4!xzx~8D(dr-=x zo0v(xBelOy*WoKJbtV;3XkdzjE`S_BDBT2{yrM4t%~a-vCnTuP-cf6gL|L3yX;+vS zy>Gm0ALmaKqn-Uv1m9xGI1&}?;`At+x0Z3n6IC0uPI|+4?(C@O56P{F$)<{*hehV+ zFYj@-X@`YR4XgRb7+%#U{!ai~OyGn6ODnbfwLdnYBeo?Y$45fNuEHcwriws z-_H-+{lu7hE}hm#A-jg&aoIiPNd1=6wP?(g1Yn}j37amQ5Cz$bn{Jw*xYlQX&0o13 zJ8#TkJx`8n965B)|Lcbe?-dctJL=%Fk9@A#9u&Mp(@zXpAc#zNl&lh?PdGT)&-T`I z=$$AvRmcPoSJ1fDnoGz;QH*296Rhe8nBx!zuu0q)+S{iCLII1%T^1qrZenV5z`e{r zJw>qj0g1(iW2K0-is|89I_s`@Ow}YU2;7r1Kt9~~rMfAhLII9BSJ(t@!KOP|dWm7A zDQFm=oEP1|6I7n#M1*Tuyyrf6{m@(uYzwB`0ywS~fc1Ks=>aNnkC-y{VmktvCrg4a z_T*@n-f7cF@9sM4&LLxskH1Shy;IOuYn^iG=!}%<^9bzy2R2${f;6PFXdL}(Q=X|e zUcNoYXWL-b%x{d$*Ufb(QI<)!y%V2vM)Bso@^S+?{s&_K%ZQc5)>ctK2*nV**Az31 z+Qkf7+7v>CX;4$6t4`{o`AtK$n|uMoAwGVZj934r8oBkZom+!6e+I_xJ6!c`$N_w6 zGl87(Fev&p%{ocXt&BjFmMJrHk`%S;eItG+?@hLX?bkbrho&b6+c#3-E0Z*^PbEGTaZ{-Q&gp0h(L&!S~*N`rhjFI*4iu@xy4 z;b0Vl;f!$!f7;MKy{gYMeB)f<$UW{ygC@>89q1cb* zC&e1#pDCqDK1lnZ7-)63M&g>$4FM0%?d1fvV*nD;?PK#E_axiuc@|(hUEh9~M56tR zXFS>+iI*bf8no~kg{u3KH9pY$KYvO&m86030ZX`w?dTYyuF?4*OB%q8yjje!Bv(yw zpJ(AQ2(O1LalUB3zPN?zF?{LG@w#wp;>5gn(YqSikA8aO-mr4j7ZO}0=YFrtfbU&r zMI%5zPn`+2Aaj3)N02F73f8%Gylr=w0kl-U*7P}o#D7xPNyU~*uP8oQ)Z$n9)uX^fP42g8yy7!A zsXdCKJC86MHeWU#l;@;haPRH`)=nNRkvL-byi$) zXjGR{=7r_oh#H=3{#Q3a?g%D~ilgu^=prTh7#E=iB+Z8y7hlIm60FYySI<2eVeq_o z>cLK5whB=tO0aF|8|giNfgh4!16aO?q5$}Xz&#DMA&6SN67t7$EdkMY_`=G&(cXM* z$}U&<)PvQCJB&+oDEv3~?%y1#St{TE_nU$KXQ8|Qqw%T`6$mw8>U@&|9Y)rRr?9J8 zwpHu;gj7N39GY#J0Z60!!{gZ50SPc*fkmlczsaFMl5f3W7Q0$tUbX(3kPOHygV>hi zGz7cQ724E}+sDx2D#Qt#~ ze+!xnoC9@K%TOMuqf!9Z)LhyPhLHf$>&HW*H{ReFpQlRH>Mz$dLvF=NYR3K2>aI5P z5e&37V$;SQ98%z(N13;%f(Ln7kM-f~DX5Ismzut+7=n&NxAzB&JkN%YE% z#7{&jE*CV3tRf;IJxw<6;1ZUIq&t9I%qwc5Mb`CkwqV_?J*BQ`+-7l!_vNyIUguKxqe5?@7dBgh}IF>M^6 z7@8yt{a#{$Qf7QO+_IsyY4iZ~YoKP3tcY5y@Vx-l)WfC?o-U$7yuAWEtnmp1`70Ju za87)g3L3|Rd|8fp($t48_8PUgtCgytd+4d?_kPtSR0NJ1fLae=*s$mGIsVou+yTo5 zrP@~0>R%E)X{TPhj4s5Y4w)(j`~1`%HR{<_{bZ)*YhFc?kl^GHq)K4EE}&VIIx{2CF6u^hrcA6w8E{6`t2%l;S(e!4jP4q@!<|L@-;5N)Wx6h5!fbYPg& z_%95xbS~O}Zko8Q1WlqD`(tqe;@71A4FWkCw-ryACa^#D#UXyp@q_6^c4GSv6FfB; zhw=}D^XJq9!{YjwQosN`;!2ZbWrY7X=H5IW%J<(NA4?=;-(zfrY$;1+84;31$xc&| zH3^Br+#)*(MJQ92kY%zjVX}@Ak}V?)k`kG*jA533m-p{`me2W~&pGFF&iD8D{{HZI z)I8?C=en=^T3*-d^<0o3|8>$8G6EWp0~w`Lz$%j==`%K&Gd4VDoBr{IJBwP{>jxei zuu_+udfoALP=jG}hdwyaKi5aBrTeqb+GfF@eb#3O4wAMi_uY^xKo5Va4hmh!9K> z12c0T+^ruN8FK1UPlx)p&#;FhB3=R6*6{TSinGOa?_kx7YxMb)%HPuQ)!V~&18}Sy zEj>+4*@ZrTV5i^|om2)#!yKG)D2^tOYfK3yog8rr{R8U>2?TbWpB96hrd)9cZaE4w zHY;i`$Vc+$Lz<=1QHF?TJQM>rhB3TGfgV6gWyIv$ud36;!NCgH4B7sHA*bcQD_mGX z0d?Ya_?jp^8}TbO8QM_g$KD0T@qb|3pcgtYI}8$>fmR1$h!SQ1K~WA_zUoJXz~t9O zDWN{{8P@ph45S_$i*<&*s=8t_jgCd!*6%Wx0HaK03Mc&tG6PmSYha~{wuNpuqbC#? z`*8W;DkS%n>n~}}b-C?ZLza*EmQ*_Yw6gc}sn2b_olQR^uRqV=P)G-3nuXSK|pg>ijfMBB&^m_)$ zDhVaBI#eERQQ{nKLah5J=bxtfZAGum&cK#xP$amNXs~qn787#=__0W+W;_|dx{yTY z2%fp(?ghqJlIbX>O(FF49hbre;Vs$ks_(w>X&37^y32n*N;lMGVq}BUkOkt)S|6DM zG!hlALVq%!AUC3?m2VV$=N#Xmicrhx{Moo^n-GD-eInrpyZyiziQ>0Cr zl3fmg%DJ;aNx5U`8!^t(qkXUP#N|^jb=dZ~dQ%5Av z3=&7Z=b%(%gF+Yd08!IDH8i^E%ju^xyBbq)B`|YKJbBQYB`{DpY|+{ferPB;`4P{? zZme+za>r6UQz-`zp-H|@^=rYHBz*r&e4(f4BL_oNyPMdi(IfqiFc;4@c3@xlZ%U=q z5TXL;1@O{=(+`RlP}GjoDg@f^sXmOm(g>rYtr{#s*VqF~I5tmUx~%@U(k&?HrcyNC zGR#sz(xN3C1PKMfP7FL4>@TouR}mmcUN!_t?wsGgl*gc{64-B{^g`$tt^>QHV~bhk zB&SgB1^}nVd!)sVFq0YeL_OWt-il3}nXR>%u%oxwEH=R$a77TkLTIlnPJ@EB{bg`+ zS3)jrA?G&1xb~yhX#%WnDot>!11{Be+&invYZ~4-qKOc2GUL<9{4~|B`s}03wZiwl z8je!O*7+n9CE_s~RTOn_;FN^gm9XA$ym33cdVG0%Uf-&FTAsAfUR#qsX@ae*Yj|q< z`9V`UL)qhE`(8H9M=V?Mp!&xSuKX_ufFM6%|I4DwFzBT|l<7&60&Av{?`Onv-jA6b z=aTWA?D1_6sC@ysmf0%Dc>f!KEsL5{z_;Uomm%ZARjL*=p-c>6WUDG~?8hPyKrC)8 ziD#mW@g5nC-FWYfeDQC^i3I?@>m(+a)>JCw18d<(%FoZdNhv+ElBKuRM4R`Wa!W_SkiDT^@J z?jHlM!F8&_34`xI-gg3E!0~&UCmp<-!;KVWQX*gr*$*A35<;#yCR{Uz(ZC)VbscJF zMO;Bp4w)vr`h{X4E~7Lk2{#hQrH+p0KMY$(*!e2Y2KTDNLl?(xd8Lsw(Ha!gbO(t3V;XlR8Lp#~9} z7>Rn4vdeUf!Gh^CMXE0#26dAIlgj+Gm%LXsG5Y(z`T9QUV03q0Z%9xRymHtyI*0KQ zY_RLSpAdvnBoPU)8qy6(H(Hp$_IHfvI^M$*bK{r4f2E5BIm-SKcX7Oo8T~hte~phu zwxj(59}Dn)1|&GZ85UmMeU;!UgEy|~1ogz8?0LmG{;w1(SB~ptFIc{Nxv30I^qOXo!$mb+Mmg&23LgOs_WAw8khjvu9MZ_9wr zOo!Twur+Zw?mNhbV4zjF)A&g=9&+Qx4=3cZQl<5(xeTAj{P~O!l5=LlE$w&3@{tho zvHdm?h-DHlt5Xig(fEAlt~V?vffkk%rK;xPT%FQMRwSMGhf2w|^Zj)rES#>_cnxlt z%YH3ADF2vmp_sfym1E!`KxbGPC+3aMBbUqOC3r-~ zBF*_ggM$3OFFgD=US{BXEQ^>4e{uw(hP}OvC%U)?b%+ECU~1|L3_K}f$w$0) z#UavPm!u?X3FRQ_J_o9*$fz{)#VvIn~9noYy8zQe-XSzhem%-dcG ze}n9r!K=>!aZ9l6G$ZOEnq|2CysNRb6-V6%T8~AX*QVq}%@b?Gd&(;W;h7poU^2<* z-{)Hf3D|bv*)mhJ)nUa=+fTVr{OZXphQqraZQ?#e(M7P)%XDvsAD=jEy09|{G>^UC2q`fcNiMT>?sEasce?Yar(c|O!Q)`uS z>ReB+U|L;mRNb9*_+{OL}KE52U^}B9CY4yeTAa@iumvsa#|;v)gquggCLp zi(tmPMNs7LLtELFR90vXl#c^Aa=hnGDg-ZX~uMfm0!+ z6_1(MLbGk1PTXX^`!MFhr7{gxMM>BT9zp?Quk$Q?-}J87SN;B%Ay-+em$>JyH{CHg zbipsb<5Qe`pqz(F_78Kp3?v8b+VEzIGlOfD;ud^eFpJlFMyM!r_OYrw-`@rNPLhjs<7RXij4zH-Zh#iA8j1al|q*JyO_f0Gjm^MezMvkxo$@64~3hR>jnZllq&R)pFAbaeWgTMpi zfYrnrHHamGGa?tG3p&7%7G2I zk!Ek$FdNnwVFV~i5odVgx(f&h9=H%kIH?@+aEie853C+rF+k>G^?px1+XC2AGjbgX z0V0Ac{uEXAWr!Mv8V9GQ-v*-L!v1ki9%#?Pa{eW|X}*@&o+b6=?;1vuuZHEDVy5Jh zLd7)7{4f{j&kc_5YS>3T1>EJ&s4p{b-0e?hb^5b~9`*{im^)paH-IN2mw&DZJ##S= zykD5^bv5}+;_o-d_IxQSqdq0!z)6ycW=Y&=Q#jV!pFAh#nF%V6jRTIi&ik$p&P!!i z0w}E~)De~|dax<_ETuC@t5aI#jbVRzk7O^k#3NfoA!UHw2KlpQHWjQn9N_}97Bftg zV;y+Yw3kb!#v#Ry4(*+f6mnU;s@f={7aEklzEJ9gUHn3mrdYfR6$hiyKV~Bd z;!f8H+D|h1<79|tW}j%ndwi2hv98&Ro{%WW zGc^*)9e$!TRCjjf{suRv`It~Tqt9yC&#&V5=>3Z|-GV;pQKC@?_@c~=4dDx6)DQ~V zoL&q0qZ8poC!UFM!ZQOU`m;8%&~c-rjpmG~MLD>#?I)Iyn69Z!13yK#-o zC()<{Fq;Mtb!K1(!`qNe=qU)lnW&8S5OHn2n}5kD^wG_FPRW|q@{+>x5I)g^DFuT( z60kF3h_9I$#E@C3Xu?}eoH#t@c9{D9=~W$Z7h!GE#5s3xoIE;ZeR}eZ&UKLCbX*}y z5P|?iSty8d%*a4&{TwLxvY)S~vgbHdK{a<#c5_|nlR+6dux@%T!fO?bh&tSE@C!s3 zV4J36Ei!XYr4}t)y$;nMGfmf1Ij6&Mpft`@^fr6)1}2~q>S*I9V(y0{d}q~Jy)$zR zo?LsYT3&wDA}rKie&wO!=mk^rTM@78-j z(15vKeCV0xAW#1SJ^I{J8H88Pr0;6u4b9w0M2NzV*W2^tJAgDQM94*mnovzB)5AUM zJ}9M;IvyVVSGiI-n*Gc{pbXJ;+MWK&`G&!-%+B|XH{ohlpEmy9Y3cvk=l0(*G@)Za zN0+F~*f&fy@J82Qas6)2pMZLmOLUiT);+Fzl8fqB;Vt2p$dw~5rbC(`7q8!movMu$ zm1aD-USA;yiI%G;mA}m2vh{)l@QnU)l{H^r1Z(yF@@1KW*V}8de^_0}qTyv3o5E&w8 zk05G4-lejm$^Nwvd+zre+u@h_7Ce-L&^u7qM8ESx5=7qV z@3viQ%E$Ge+phZ0Z8y@r{qAsoW0lG&7%F?nkgreBrt8!BmqYD#c4Dkjd!;wou-QQA zfx!*-rYINGn3Q@OC6!>|>NYFYRB|HcHnIFuWwuJWx#!7 ziY%0bAuOR6nlR-)Z7~N=nO1489n_fo;?RER*zm`IBR_lY1TRX)tzc*Ziwy4X;0HXR z_TzdXSApf;R%{&)U+6E3{VFuGIp5)kyMEsbOjlSh9Bh4dzV~C>HRlAFJR2+x*{EsF zo{IW}WetZ@Eogg4nB|`&HjAgX;Zj~sadDesD$m`TEL_jxEgW+|QPs}GoWrR1g9K~Y zeq~Y;gTogjz<;570ez7N&whzsh}sw0&^;|TUMDF$Z^Y+Y^QE=iuHjg_UNZmKz|_aL z6`$DDYnA*Tr|+dRzyW{;dOxZJysH*?wK^bA&QGUMO=veso=M>+Du3)8kb?95u=!F{ zTc5clBBD`qaP)a6N66F}PIkP#*W@E}`euHlo;}$gB$R+902xqSm#2tJ&ZQf0X0ObX z`_ZcFiiHXK)W1A(fE;UGO+%2{uiH#XEBl-mFoSJw(2 z$ZC;xXS&rT;#REF+L@c13D+{$9Mvm}TCxkhWTRB>W*o7usuY9;2I7{N{<5HAq3u9~ ztr>VYe-nx49XjTdlgbR=^2(;`Mc=}R3jcf;P3vyKQrfk-?_%Pz(C9Wl6XVVJ6h!nu zE`wylzAUmLpfcso`C6&lQZQu^ap%&;(aU~(dAvWYkR}Z~>q&BQk7jx14D}=hsa(3~ph9=X7{k%L7_O#^p+w4husJCDRrPzRXZ}}UIsN;z?|}@{EbU`wQZ-!H zJWTiW9pQWizP`*!r^AO!M1{zff=s0Gl@;p#aFC&oN@n!6$$)DS^m_IEln0Z&<4*2roe{qJg+Bwh z$e%3^^j20zN)A;6)WlvlWFq1XjwW6}Wkz+()UgB9h0hEp1=Dj`d5k7;Yb(b!vo4@G zSEyN|HlY8o^OsJ!+Jh~sgcd;dCfZY5ot)nVF4Sf;l&`T{^YO>jh5_k|K|x@o|!`gO(du z%!dIn43r#6JwaXXUOnGT-yBDUyhFioN_)%|3uotuqLjWYHRN#@zxKi9QQ$|n?g`?1 zP=Sti0b@B2L9|v+YDTZH?-^?#4))}HOLy^UkU9_+biHKi!(`s?rGf8a?0Jh>q4IGb zuU$4Au&`$bCrHq!Xj`lslA^G>A_MRYuaOdzT-;?P;$uueVP~l7jSWh1Xr7GbBV!2* zxfg*p#~yG;efT2E$OpNshgg5imn?u{6tp3qCn1-^NcmKo;c(r#A&;ACwRdF`$qH31 zu{**E#fhK_Humm*wCTH7gd=C)YVDO+mwN@6rb;hi6tei&3A^;w{c| ztNVS&PU^SC4td!NEBmR|)ec)q2sYO=24@{>RDpXlp{!3S?680}HG=@8Z#hu?e_+hv zLR7s_W{d3G>P$aQhQ!@oYzaOeC2A_NWOmu`8rNf9?6?>sDm0w-na>*|f=tDBaA^DNHb+4%Ms?@Txx zAegKuhfnA9QGJNL-w(ewPs!|`SupAOcCxvetzQ;PkZu0K;AYGB;qDW zp5#&bD;hYEvr5ooe#33X!c9sYQ^h273|z9#ohIXN4>WZN2Vf>KR#TQ$zfUBZz{k{S z0wH7=1)Sknhv?}v9RMknNcxy`E_Fq`vT#W_!sb%8{fd2!TT3#`S#DDi`I>XJtv9^VP{_d+J)0>-Yrk?JxXR^LH}1ISpWLGHTE=`?OYwB+vjBO&&d$S9ZkdP7`6At~U*U$h zk@f5}i6QD;utgt20c7AEsGU39yWHo_JvT8-xS~7%($FN^^r!N3SkEz)cW|wi(#2#Y z+wAI}jL6&8>}$E;Srfoee9^|ln83HdYvNWKy#0vK{GJx{HQ*ZJ5p6oEcA=%=p_tl! zs`3j$Y+cjw0oK5`=}B})`}GwtkWt|3P{o4#EU(9eSTfuc+<>s-A4Z=S@6nKn8MnW$agyyg{Gb>_1+O2|ORYVJ4g^Opt=NM>ZYXCV~$3ysvb$ZpMATk@}FA328b$2(KPo-sD+sB3|nx zj7X`I)E{Pc%&pB+-MaD7OF88|zvnEXYFy8-oa}L*5S3dnVX2GY`I{8}za9qv*}vf; zptDXRKw!6@(T=RL??U1SyL<%;+ILPj2;W&haB$$I67QGE`hCwVgXFz#1w{q)Ln<>w zFSBLjoE2|q(40L@sDT`r?uzW05e(Ly(y?fJEB0iiABPY2ox3N}+S&R(O2$k+z8f z(Vnad3Ml;sEFk;ofnWx9GTdypkt|+g&FA>5AB4rmus;uyjWSH1ySQ%tQ0MKnNQ1bAH7ae{Eolq*-wG zDKXEfo|TjN@i|+tWeB*=*lvTG(jH@Q%~{k3IMH@6x^s30w{m>Kcjwxf+dKkaUZ+@` z%{evDb6mji8;|z%xgH9u->u3Q@S+^4rhE$8q=uvryDBrHxni{Ux8Kc*LdUClVwdy+ zakBiC4-1d(+C@btj$qc4=-H4TL{Z$;E~K9Rg6f`WXnP(s4^1pOwmhkgI6E9ZWPEcl zTWtdfD_u33v#8K^x%#x)?+-HHea{0kX4NKUyYrsS00H z9*Rkx7e8T5t2Ui=73F=$XD9_cs;&PcV}iese^xlqTlmm=bvz4Bc=~vj4N3cDiI=)x z5v~Z}$$r>8rV3|Cr_2EF<=HJ)?eVNzU;l#s?Scl?aTsPNaAhj$WHj#L5|BUBlH?$K zp8pI&^jh-|5vgYb4x~qvuH0$6nd@uOa-D5DV&@;LJp3b7ivP)XmB0JrImtuxG=%#` zBx4vU(4tNdQ=2>HY>e@#&ph_kwAkBACdVYxdavA;&)0DdD%eTd&9GO{`1vk&Fx+bF zyFm~fI_Sh#J2R1Uv)KROb_$zo_sgp%Ob*nrzWeRPvU2u+(!RgBwj9`+6?9(SOB{PS zj}Z6K#+#-VI$MNAM1&o6o^+Ztj5U9i=e@5mz%o{ysjKR!8q^++D4829?cv$;Eq~x5G$5T z;|k>Egz@~8gwQ%P;Rc+PCYavc6*Bu@#dTu2pii%O|KPi8sBI?qD<72uOs5?C~!=b+b=5!o5|J2cZM-e{gFG2RrlqBlOrK|w+vWckz z3$9z4oCi)W)H?9?#G`{O$CA|yMBU&IS%B#y+px>`hE-{W$_(~*oyLK*V z)VIz$Z(S()SxLJoeZ}f#f0|^Mo9`{K2eL1*VG6wsq(9%RmAp45@k`>z<>~MESz(7) ziMEQQ-sFAFC)3Nz_ewu9Ri<18ujDlZauAY`8!`!0*7gA4MwH0qB`LA-+Cv!^M#9C4 z&V*A8w~HerC1k_y3JR!Ys}Vbu_XaE7d;y<|5TXH1&D~S?95xyvUhGpw041krXC#ak zQXP*w^mO@Z^|&|_b>jB0(uaW~P8JGohhhCkX-p>+Hry=vmn+jvtv)^eZ{-~?1@;N@X|BIH zZMaL9XGu5Lhm|U?r7}39sWvz&80*TYqcqR@CSm-`PlZFZ`Fr>KZ)Un0e0|ZY;^s{o zV|f(cKPk>88fjVK9I%&|Zp3eso-u%^XeWr6Oy}2YPan1W{7-`7LfzAF zqKir6a^_J{W-$2gGgw-Di8#hSu&J{>VGP@ZTT{&R7rWsTzTM3^oNvRT>^NA6kEWOH zoVjh~BxGAsSaozo6b+Au%^FzT|C&d3P9bLz-L1laxh_7wk~av{pXMAZe#@Xr ze!PqR5r*>RJ*NzJ!YY4z;&0ffE0G zRbBddmEh3(+yPgc>L;gT1q?s3oa{Xf0_|%c*E>T|He2olZ_AA$3g}?GZ^c96YwI2& z_$ZfuoA?=pXWe)ve95g~5YTsz0hKM1bC8&M$Q*{xEC>^t)P=|8-p&*cew+Wc{O~Eo6cuY{ zXE`(R%+qX|V#wtnG7~tiS)r7Gq&k+6j{wt+QC zy|*;JTwdC<7)EuDOTC^f(7Yfs)$~}sFL{v}J5)yA&5pGIVS4cVg^WYvGzsc~&}B;T zB>=RwZ;q*LzFAPLcW3=XiafHx*-+kf-uH%T?USjLPHFh!ZY>zBHc^$}p>5m1joO_I z)I!-iAW=O}!gKWwlx&zj&*{%~61kj$ZxXC0nraf##kxpBk84X99<+4eIf}oPf8HjT#jkX*soc852=ZzV*&Kpotdy+58_a^(PAVUD>Lo1?JVF`^8(F`#m zLN0tu*+aDn&5^K*M+s?1tqjKbVML~fkpwoSerbW*Wch+A_KA4~; z(TU$v<|F>#NUx537VN08f6JBp{=>SZT(9lFzu@_DR(TD>;HU#Vm`RTVuCNtz+&X02 z(M^RhJ#{XflbKm%U(BxuafxufKk(xF8TpGJE^P-^*`&O?=>pcP29Wj#SFeNk|C?|< zvaYx*Op?VO>q^FJ)du;97sj8GHTLv8Fl-`lB_`AMOoAwPo+8V}okRwbrVB8V_n>x( zO{^ev#m5PkxRGQ@MbuVqC^?P8XD0pRw{)SE*i|3KeSkal$0w2R(JN2_cyO4{4M)DB z;5tIC4+Tq}yh18Wi*!%CemwNL;k%ZLuTMtlDE7YKQKi3y{QUtDfuox!vr8|+XV^3k z>uIYKhUI*9&HWWJe}B_8<2U0~c#`s^M#x$uBb|u=bZ-DTaCH|Q9!Fevp**1;T57<@ zq{gXdogcY2qEb|yBUYGv7OfKPt6WMbka}>yp0o%a1BkTxAqY9>hEBtl@Lr-UOB0_QDI>%a7w5&VKO+ScIheV$TxuzW}wt__y?vRK)tMh z{u&ib-1f7FsnVFFJ-eaxE?D!x+lj!szulTiHP`v#=GBG3~X!nN`+;|e=}kAml2Z* zeZf=k1GBKzg(Zf7+-|24iTOwKWz1Z^$Cq?&dR~m|WNKi;#Z!m{KFk-MFP@BjPpEqV z8mRVd-{1u29Cua%=63IcExb+PnA7v}>rr>pCtT%iww-&CTDJKrhWpLMU{p{iOVv{) zV%~`_0?^liuk@tT#A$TUYabB_DXYU{1BddwwN1q)Z+PK+qHvf{*uYpq4%;)99}Zl@lLk=o`~W&M$}Rq7d@f}fvv@jZGf2?ixu zE*~olc??pHJ2NLQa z?&EmDW0Q}~fszo!JAiqTJDXRfo$@lY;WFuE%S#hhWZwTVFcoseMoM6EGI^ zXMi3KloV4}B<7se*^EiT^X(@q3#1eGw%8XB78|(`S*Gu+K6c#9oMmqVL=0{W3%vyB zmOemM0a+FS)G%Di|J$(M!CB?&%_BEuqAaq7q)x{$oAB4X)k~L+WIGyo2A1}FY)g16 zlQe}tQ{8qYPx~3rH8WU72d1 zZalX(p}#ElsR#Q~+)9d%D$kQ|XE+n;HC&VrBuAnMQ2R-UP=kJhr3oo?T|P)mT}GGt zgxo8RQJrnU){kubEPv`eup)jYdD~QCob~3{qsMVtJ5|U94G`V|gUPc9Hz0;Rh#O42 ztWB??*($``9^|K>25{$d^bM%i>z=VcdVkpUa7-L|wh+nwDhT<5gSwk+y9E)lfgOug zvMP~>d=a>>$day9p)Wd%)jqeeQun)QefIsql|sH_YMZ(&t8((pLh&%VTYgl)drZe5 zeulB%5Nup#C>-j@TT0M862KfYC`ne2xt7x3ym|4hhkP zLgjD<`zYD7k_eEl*{>C*3K@;Qbk3DDJD;ap=KK`-`#smtG5Pqgqv?t)um36MhUvg2 zV2J^M`M)>|UkScT6$%~h+G)9bbhvJWI5mkYm@>bj$BPtD;dvY8ATmxh+he2@z=9FluIvcVD_)+O*)yuJ5}`v z;ny@5iNLpEb+H7n|F0#c;;k6(tpdK{$>!1ORUC{ogjGP(dl zQ7e^x+&h?2ILdHB5IyRqD#6)hj}u9izER4>8?HU(o^CFBSXS+2QKpC6J6NDOVIKo@ zf;Zqr*faKSQFTJ67iUQ%5TZTY712 zqcLKb@n)Ffi)Vqp#2Rt~i0B?62x&7_Gs$X<1C;SOE1En*cKDly>raMIZI<1Ulk{)t zORDu(M`#srJZG8v(PVKLeHbhq*l0ixvZwK3=CBE>G@WuvjVsZ@rM|0?v_DnzPJ6;T z9@Sje@+*H}D9aK=LJVYbc`s=&s_jsynH32DZc;AyAwSi5*?q@d7%qOeH1=JDT5YU( z?!Bzk$HGL|QVB+%(m=9#2Qf2*C|UE!Ze>yt#j``)b-JePvgo(0WwPCo*6yx0m$PdI zy+gS^%E#RHaY)-Q;+Ew=e`kZ?5eBuNX)&3z;$VEZd@R>~MDL(`b7qkc`FU9J-tq|= zR?j>{FrZrbinfIEqceb2x07K4=^|=VX_8>Vv)-dW3-=8oFwI98<3h+sMJtgVPGLyA z>C)sG=l4>az!R{U!yOq;LfSIgL1cy8<-G}dcKZ@1$#+RV;(GX%-;2c6_1(SQZ*=`s zQ%dtkz1(y`VfThiGuu7cMu1%SYwBNt@c+H6J^at7|MLkhN(f`01=W*+WN>U!d4WPu zMfka@L4_F2Qn+R9qetpm=DkrJU*^A0s(wr>j|se}KbWk9`L4_0qSKSZolxT8p!68^ ziWV>vHit>dc`|@5oOUT$e^nK=P&9(k;n}8+Fr~t{B(B}H6IuX4Krz@W{&KPwK(RsX zGALFL0CJet9<2zjF#W9EvyJgQc{Ax|7eCv!(|1RfD=!?-zw03PnQHX%<}7?dmwbb$ zvph-wMEe%L{E45ujbNLuPq^@1?wMw#xaneAc1Nh%~L$yvc zE^ze()Xs)tg4&&ciC3~FR1=Dr*G{_OHq>&yDSaW+`PFH)wCsJR`xdMf zKSp?@?nm{`?3y;-&l4hv#JAX9FLt@?a;Rvo!erC(yy^dYX|b*6CGUN& zOpRb13K<&_|8U}FH-RHW(}CI-vEnUea~LifEB$^8>-3$Xc}tY*9beny-UlCW>zp?- z)4lBcevL=siIsc+y?1<@iW(MxI&lzzh-M9!uj$^3MR95+hwRS|y%dTXu=H5`c)4rI zhEw}Ewf$}nHw+2{46k<>ZpKG=jeR7-6R}s(Rp;F_e7N@b{!c|?JrmML4NkVia5gP{ z>`FJ+TGjmfrP_aQw*S*-V$D6Ng`r7jn!)}UuX?rZO#<(fFQ%Kx9y$DD z>1_2I>8Mkp7=i#}9|o*dZuDY^kcyBx2nJdW7%*ZtS8oFdrS~!Y-`LqOLGOW#n=5Su z)y6^mh9_?I#TBOsQ~wTR}>E`-&b)M4InC_z`?@@oU2wT z%Ej%kMq2dbh=W~&JRQ+pc|w_npN-D5c(2G>U{19}c7Pl>1OW3S z3|XEsfY`0knyl7`hBy&7hkpThQ%tROh?A7l+`4503;)Ci9nOBeILqW|C($55s1XDL z#xoSP;`US^KHM=#F>Nta4Ob$ib`n`CLqk4ps})!?jc86S-p>4p+6g3T;J=lcA7I(~ z@OR8-c=8aQP#ji@A>~MEgQw;pkn#LW48@T;%Y7!pej78(&-uUM$g1*5C^gH<@jo#0 zjIDJ4&WItT@%SJpwrq8-K1B2Q={fV@knwQhn}ee?%aWxWdGtgk-ji&a6lk-2&s4Rs ziF#=F+N`bW)$O()m=}NHoc_Ja@c*^XFtb(+P7u(s5l}dYhp`v+36KI1oB+m`7TOk; zG$>+_CO@WL6!G?An^}`}{?JWUOZl~4Rs{`vdzBggOaq*8M?Ne@XY1{SJ}B!#Euw!D#XO z1KYUxiG3Rg(0$lpRLojY)ct!PK`;JoksZIS*vg6`d_}caIHIUodwkD%QI=R%DMBI@ zI2^Ko9$uq?++5UvNhM=yramjB4#`D9bnV29cAb2hqNauBn{KHfmzChdaYD_OP3JzQ zwsy)&?{f%O{rB7SmBvl9de9~jE<()eT!sDoEhH~fYV=3P?LGy z-Dgp&IKg$ieKtXqu@_{Oa)3H`0+#R=PP7vurmon`C%yH>O1v)(&Fac2H$^mjEviNN zPEM#K4#}m}AJ1|#V?BKKr9%gte29dI(}YARzShLl_^Tq~s4_p*=N~OUpYb$4kQkBe zkoX5CYXuC|X#5z)J~7Zbd3xFSd~t_dMfa>e`=zj%y)@&YCwa$bRqRCT*i+6ryF8uz zkjF7%a$JqqoKH3qQ9F}>CVHZ2{8rrzHfTSEH}q|XQ!_3;*w@ct>izfqDJmiX7Mzu+ zwfvqlKR(=r0}RW4EFmoqRF$csffe{^e4YrW3Hs0OEWIglC?eTEFzOEY`Q7Db(2<1q z)X!QiBpA~K6TyDVl6sUe0>PjgP;`zxZf2sOi;PkwzIs#K+gjtc);u46%yia0Hgt_O z?QEGWyam8F2WA+YKfz#GVDv&n3#%_c(bTd*I@-C+x(c& z?#){CspF+X6RW3mM%wxoO#)KUL}BcR?eJq4(u9FmS5M&I93S z0Yi8buw2AaWE7S`Xv7*#vQG~8BZ9NCJV~v1Uf04|B0Kz)RzbXUUC1L9PG4XDXt_o; zmxd`9w`UAIz-0?D%)lysYY`YpTakCi`sB#bV3^*f$OckpP{gphuI2_z;j|4rVML%pF$r;*uwS}ae zUDS0`i(A1u>R^=fSb@7`=%S1Rm@zaIVzTA~p|qz!(g^LDq61HpJb3s8TgVBwkzEouVX{@3!C?%3lWPoMA^l_i zzyuUXI`=U&jj(*jpC_N><6d{FPW6{#u}u#uT1fem8Le02GY{*%5eZ!~op~ab1HBVo4KIcx+y+Ro6_mVT21&qtG>(7$)Xy2f4K)(ZU zRcIICy5|B>Xb{Yo>vWXWN`$J=ra$*mNj<*j*@0On-&hh;)Z#x$Z+2<8# z!l?}IcKSomSV{kRdzv7+%Rq(XkvcCko75WUda}b8zWiQoLtm~h@^R+ScaFn&-1qz1 z=3g&WhBAZ@gg7M8iU(jfFY!zco~J)CIO=<{GCJS&2R^fK%SqWV>zak)Xxgd9L@#nW z>c-3CKTM#CRhpm(YXz=`+USSfL5K8Lf+_8(oFw9Rm6$(DQbHfPrLw_9zZj`z15-jcV5-;Y%G z7HUCG!9;(WASYv=4OJH0Ce&okoc?tF z&Br4{%BOo@wB%ix+^GUu_&jk$dVNFiRiXenys>roN$Lcqjvk@v8NBRXU02h1$)N8J zu$$Z!v55iQzqnM$y~RO^3E& z@se8S`+I$}xqKI|qpTex7sD7_p&kf;$n!d_aKD@qet)x}6*FT+ISxpL*4dYRR9XC{hg$S+vgQB$PO{CZ|ss(g9u()8?| z7%_=U2ewv_(*SA-XK?@JpPo5Wn@ula@P*cNw;d!gd)qr*;{BE3P*@-Pi#8 zQ9sK>ixsBa1qD{<7?$X+Lwk zL{2Mu4dSMsul*2BZccx77Q^$SGP5BolXTWiry)zgSm`0h*(0%vkMxof!bdN@{sYT_ z6N5!kHvK_x)$ZS>@-u6#Hj^P$MVGMh@8P4c z6Ls3r%rtgl#=Af-6er;Ba9V{_jd5yKKKGNXRp}1qEqpr9`Up077Do36FAyZ5+QDDV zN5~wqIaUy1*D*ymPpQB?!Fcw?#}TypDC?57N$v(Bo4=} zi>%u-xS7FSp3uyJHTacOih_MgoziLxC+W~MdC{acdm`i9*z%Qh5}xasndDT9jYWJ3lN(Y0 z{CWDqp31z1{`K_9dSl7Ay-auXt_GTaW1**c{V(ZH+D&j>9n{W`^{}F-$X{m5nE#HQ zgYUl_3R`0CnD1P}_k6~cMxX~C1_b-v5><9KwXXc!bYn3Dz5q@s3><@v0#>g*h9s+H zo!S2Lj9;D2@YKl;Yvk9bDZiXJH|zMDPCF#O-5)5&`IZvU4K-Yc9-7evSr~RA6tF8? zFT!pRA2ZS8yTKH*0!d{%TW{c!Y3?bi_f%htD{6{#~1m0=6`c>19ls)=$YYifV1O3DHB;Z0TzaGuycz3D!)D6e)%e9_2_^nUumcPJdkxj`}^zAgbG~J z-R-?llD<4*S2*l+cCIa0xz)w@E}vONE;$;)=caZa47`Bw@ZHy-!k9KE<_vcQeYJR% zgcJxh?joOBQOFtjIOEYMs_?!kD%4eeUl+UOhqKk=I{MMQ>sOOt?h0Szch7Y-_`m3wXJ?`=MS34AIbAbQxRjv!tcSZQO zUl)NzMH2yB>Liw?)xodMggXwaFLw!!9$>aPDG)^tD=Q|`t zHtpsk2cst;h9y4{Pu|^^3oeGoVV&$F6`@i60hs!n7AZkuF=CG&6M|;X=et! z3~<(o27ex{>f{g-PdA35;PDdUou^40eYTSr8g#LV@wskvt|7j6Ec1771Mk#vn{PGd zvbuBHw%p&<0T)nw54edkfavCu+X1x)BjIB+6Dl%i*}5{2PJ-j`v3RE2_ZCy z6f&};EU9cQGA(v#qL8gA`;t_$Wf_q@gb-q~WpC_co9rRkDPm?wBFl_ryqTr@^7)+a zIrrRi?)ja2&$++9)G+V$^?ELk=i~W!TK})gbX_78!)QtslsE$v#APTt0mi>gp^pk1 zB7JcEqwrhLq=mRx*ZdpY;uHJQJxVU~Hs;&#Bficv_N@Z#umXB6D4o6bsRUYuLMjXp zW;p~jHR4I8F^~1)t+OD{5_S6^>D%~)=%kCWW0I2Pl1)s3UjW;fWDj4Og|+zkqVHoP zo247b^re04GS71>tA{qW5AC^k)z{HOWxA8l;~zA8Hs-o8E)EaL0*5p^>rLZxq^u#I zk7Y_`&VY%UPV;7`S|3kVZ{LkQjH|mU&0!0dZwSE86BXCLiPzTvVt6uf#=vdUw!9>|| zbN(%JiOzG9V0O8LLlV*ktZP~^xM_(%a0hI*V0EVif-Tf?d4*%t{|M>h)OV+pvx>~t z`Pzf}Z)~@c3spYfjXkW!eP(=zYkchkFv{V9pC}+HBcyPVyKYtSx#AD06ld6&r#_OQ zxJ9a}@+M9($NCO4nng&Af?xlq$PCLG7*7wZJF6c({jVZ1OV=}vgCB3^r>5XKjIYvI_6Cibf;8+O~TC>90WIY0ztr1wj_yt z#lCk$$w&FE-le!nh2E>HepBGrB&^M9>z?lNIO~?=72L0zKyQF_vmcTkUm67u<`x~} z0LhRkpC=a&-nq|!li7+CCpY}MZm55~O@}Zpkp$Zc6DH9!IY7=*CtO{J_=o8b^t7v^ z`9O2X&7n;LpOvLm1v~S*b;0&9(Q@7AUna?{c)_p^kdObH5o9WyA9gbq1>kTyDBdBv$|ZwYp<^d#XZjK0142FVbRX-~Ts7 z_8-*-xdZLZ$4K~K+QopG)kBL1C$1;HyDU9AxBR1!t*MdkgtK(A`MzAIC{70(CKjm~ z++Kz}c-#GGk{14Xrg$A`w7&xl={i<>#p)>^QQf*x`#qoCzWw30v0_f=QhtHd4fa*K zAgvDrUGQdbH19_c<;i?NWr1xTGDf#s;z`=D9pS%{1FnA3Ti8N!6vF{Gqp%7|veS72 zck>_cmquXtq&vFh5aO3BS%|@*h-eK$AccN!6v|8btN2;|R=H z0+zueN20GT14N2#KOKapgzUg$H&uQXE04YG9~DOOEha2_-`)_nxEe>f9h(t2Pp<}p zSluCMT>C1(6=uTQP|q~v$HqG|B}f1&`xWDvJ$-o}YT81Nt)cr^=S0EEp*Lb|fxua{ z)Zj_LX+y`*H2y7$GFhh$J7IZe^nRo33|bIbcr97jum!=x)Lx19#C@md_=1$;vViUwOz2t(ANX@Qz4>s>G{65Eb~#2W!wo{5p_g#GEt zkN`+!@G11b_>3Zr_YOMri7-3u2#DlZZ;vY`^q|Ck3kR;Tr+U6!+!wu~uL7d_>}9j3=vjiEIurZNOZXnY}zFdQG65WG%1&az_<3dPGx zjQs$&3RZAYJK-i1Q{8LbCGvAXu{G|osVerXJ6Ji4eS%b>z-fl)2eR5yJo$hwIs0~~ zYp)`T67hV`r6_Dc1j9?Gz0|ldM3>7ZB_5Ml-;U z%4h6rAbmlE!}+BuK|3jZ58kUdJ8O+NLVUsG$)kKuItQ^fAzJxy%HyyZVvr4e070_G zL*o7IcvjkV1j!V%c)EOt9B16a-cMSOHzKlRQdGWor%H`%)a4vf)A>oYem7=W?Qr+q zl?B8Cp2lm*;OZlRfpO)eX2npvfOIu!t4nde#mZ<^9(_c(gzw(QD1|UaqZ`iB!*_?#e)%f6-3M)sh)Hg&|y-w_t z2CrN7=bsSstGv54>uVlXPBySfn>r1Y9cGGkT|Ccp9D9KN7EE>wLRaLsMPs7zRdLZ9 zSXRhoF!&+^>v5#YP;yVNfZr!)hllxgMq%-!D}^dx*ubt2;rp)x*HS1e`BX0r&IC~&{WQS`Uy zC2J&Iz83o}ik1ni=e)l%6iyCjaGoId6Kh*z_EXz4kB+J%DtfOCizh6u%*}oX)7HHk zp`4)6ZX?KS8_TTBOaoeL38*!H1i+V;wBvw~w$~^lNs!@(CpjXQqN%3-Z1ndU@;><4 z{5GTjVENaRo|~S!<2s@BGo5=}=0+Ium`r`tY02l>HpK}{-JRO#fv_M%S3hGv0PbG> zqS!!+W34ZBGMC6kC9HM$P~_I~AI*M`CKjltbTPo~Vw(+(!itj0DUpox%c zqm&W6-C3%s1q;G^vjUvGBnLLA-fIsO2tIhb!A7^RZFAkT61b7^7D~!_^z*6)uJ+S? zY`J{(nMJ+A!LR{Po!hq?N+H(X;A3%{lWl*Pc48Nj7>yb%f0*LofS-aDLDL1Q85U^L zquM1H_3>W??IC|_@^~vc1dQm>4xc%!d~8_dh~%Xa#nO}QPsK0~7=|5CdEW0V#Iy*l;*%Jb?0^ z@ns^DFHu8e=EG@+g~&Nwrg(Uua^RPrE57PVencvgFWSND)YSmjC&^)&$a<`@8MAd5 z7spG{=Z9#O^ta6jK$k(2EaeQ92x>8nlCz3@J}!Qrrj$z++e&ks-p!oxG6?Yg(YF6< zzwCn*^{TrZ6W_-vpn|)11>GW`h#({YJ+o?@0L`(k76*MGb9|imR+;#vYPe+CAl;SA z_mo=M8RZmy=IS4^(I0Hg9>M@T8Pc)wyLT0mQdUzfu$HHJX7`Z8A0QK zre(Vor5ue3!imv@M0?zqHtE6?-CCYjk#x(5S+}N#7VL)?;!$AT(gZ6heBq3JpaR7T zqSR38u}N#4dnK-3&Ys!U*E(#%P0ErUl+cE7H}vODX~xr*>x646G)!#SobldB8PJZ;v(e{75AsN`d;-+cw*w+4_)TnqZ52u zOg~Jz9*t6FGIdBP{WbB!!!X;r%_n(X&t2}a#3z2I*l&_Hxx@idk1u<$+;vmbGUq*gX?oxI!1H34SW}XDRab46Uqk(>B z2ZwMk-D50#%Wv%96N-@PEQ6;LTBe@!_ZmjK_fM|siiwQ+U*s|%!nhg-d%aT3POBRR z6qMKxNeW!|kKj7Y8q<7PM6-wh|B79n%fI6aFT-~RNM66H&@ukZUyED!d}VX$!y@*P zH)h)rKq^+__>lTV@GP=Off+^D%JoWq3Dc9x99aBYyWgw6*t~Gm<5|BTqg;3;G7n0> z+DQ8OfupWsL_A?)M)IZDiCZ~^o%{DCoW-t5((BNK7zF7i29nBO4nf3eF#A|`SR_9q z(k-lSwoI#$s!Kaav12Fns$C1+B+(Vt@;>I-875Y%lRq`Rd10Gm?Ak4`{L7kE{I5Xi z;EGbo_#k!jN2iB0;0yJ~;<7Dub#+TulzZK@9ZkM`@AdGOJIErR#Z1kCgOUgA|r{Q1j5D|ta` zuUjNf%az}|1g}T$a@nNZmkv{p1a^zr?5ak#pMXD0zd zE3NHRx`kM=iF0A)dfA_XNQaYr*rPSLfwCN@8LZ8+p;YM_Suui!%&UpdFNit$-loFA zFC|Px&rxz?V(RM@1cz)>0 z&&73Q1h>?|zKa$8b8ipDp5V%6t`<1nVFl%Yy9J&vcz$>4(gE64@XvV0L_Iq4!DiEe zYq0@xx?}QlDedQ5)@P6v&J$7`arSQ9kM&NAUAsD`aqFb{w@Zj8;JjVvZAs9&MGx}_ zsc`OAHGL1drDMnQCwLjGw;;NkO_+`~|CkN?*3pqtX%Aosfi42J6AdZZQe5cSXh&=a zLNphm5)ix)oa$;mcs;Y8k?#4fdZ4zp{(!Qar>L7mRy@m@VCJ)5Tz9&56M07r2V)Ea z91g&VQDS;h!pPW-3sd|vPPnlvwJ)n|oVu=%PxBvF=g(2U!M?o;c4vav@gaGf8~nwc z{dK9PR3nm=&_nIpdwrHwIE;;R3MU+|&AxM`%M{ZdHk$7?aX8`#(Wq)Y@>Y<#%nWWn z6yAoExb4%>rh35T>BYImkNLe1%M4xRjo))kTD_km81R=JV&vfn(~1)*46bsj9T<=K z6T(m*-+_kaiS{XB?z43PX1a#3tA3qMrlWVmOZoB!-cy8=h1|q9YRkl(RS<~4cib@mJE{x3 zYBdP(?Wj~Wb4X*F0o(7dLwb^U$H&Yn<1kSt;}+81>wqG~5_|IEQc&adNQvT5Cp>MH zCP@WMqZTxH>tU$F)d*gi6e&MO{fPPx%PIV?G~e;Yi-nrcs{);4MT$A=q`H0*Zf(Z~pG)V|G|h8i0%7*EbsQ3!`0*OrVd7^A8L&s zu`Lb!3{KSm>iT#{zAt=vSJ#D&8C-ab!Hgx3nEQUzLNSOD!V0B!N;k&iIAf*DeeIH` z4|xf-cke4S;mewLV&Bq29e_j_`wYQS+=kN0k2IKR7oc%MhDT>YzjUsJO6&*wi$7}l z96yI0PcXJi@~Zi9t*m7>1f}ioOwJEyh|rFZ9orEHUHBHx-wDmZy9lLJ)O=cgTT|<6 zmzoY;kxiDVk)z!Qk0TV%rS7~t6n>L>(Z~B^nflgw@0cfjk7w_GGEfk{S76;B_Du8O zYWK<|(fCX7<QF&I4Ljjyf)*unbUtTBcGZAtht-YG|VePi{Q}lYLd1l9%sNrdczjE1zYW zX<$ARb?#bxam0@^+ri3TZ?n*aZ4=Jfn%ms%`N(_ullX@C_@6^gG)?f9JwhS*=d3?5MPdnM=(+u3z9#%`uKegt6+JO@k48~WSEdn8yp zf;sRAl+sDDFieSXTYWd6T-;>nW}j)WH`0+RZ|tjmDfVWdn%jLx=dRU$nn(yKWhUh# z9+DJl!Girk0~$Wab}ue$w`ZOUrQ+M2kF^$z91rH?X?s1%^`l8rH+nct*|UJ)E@&YC z4GCg*$#cPNuJ{Akx1a|_pG1i+ndqzCx
    ncI8lLFmh`VfRMZjeYK6HsIhdB?HIB zSm~8$!fQB(h76>-?ASQec+r+P^Sz*k+#S30ld4EMy)H z80_N=u3fo$)-zCFArwxO2~~F{EX+$xul!-s9RAWoT-KM8&RM_qaO$>U%Q1wC=3www zj>%9aG`3WbaXSU{6=0!r{j*6~BxGAJieF*0Cjv>6oMwwPs8yhkQCYvdxJ>92h|M|T zN^xZHOn^HK1gps5KEi#nA36@B3Amv6zdEh^JO))T6m(5RPiEBpU8B#nM+(usQZ=Nu zvgTvDCi(?2unU(Eh4BB}6sg^HdPDudEG!Z2wN9Onb|bJAUF*0E-0R+M#yBtr ztiYHC$9Fv{rR^L0>6Zbstmg0Md9Kuho^j0kanMiaG5$gM$AlA{@q1~dVDP%xM>xqC zl;yUT=D|4;^z=>mx8IcD;qgai7_G4^Ewu@iZ7b=~;>@Q61HPljuH}$}nKULzKvzwg zeBiJ07E9s8Zws+%SHb15EL&Nmo2T3P-SU>Lv{cMMn5pq)R$kX!>^g|-g3*D~=xT48 z2$Fhl*SmIuj}o`*u`Nd&=9h)VaQk}Yh5gO=c~vf+B{Y2f)K0JI->YlSHY?Ir0`L@1 zBIW?(-q|c#Mh*|D=5n*t8&#NXsA#H8lBxZxf>+ji*e*fM_^`PiEAQnFrUyp zpecjRf0Zg9*eAA+9^cIM)t?&*C~|%JB_-d=`n!(jjDohC*$oGvHZnJ6P}m(mDtx7| z;bqVsy>5i&f4B+$1vM7rX%noC54V@u0G8c@LWG)TF_1#a^0(QzptN_zzWIT@m_444 zvcE4RA5i9+!SP$vJP=;g{p1ilq1F0U#)j4Nn^Z^Q5;_dU8o1wIXz(a$wH3wXd`GnM z*_9HngYlQHU#QFu^V)Qf%KV8_!x17bY$|?b*CWr(_XW%}d0USu+NoEi@Z-C=E{Odo zkyTof+x$GfwG~1;`uwBa$Z5V{N|HqGoLj)MG0v{uzkdcRa26vN)8~5Q6-G)VD$Y>( z!PMlUxlpakDd~rZX=R){sM2Ki|6vM%lZ&zwaVk4q5=()Oc9vAnRTS{2t2qqifchH_(Lz%y>o50Y1`MLqaT`wnrXkV z@cL;M^D4*BL#KznM2pM$a@LYBIJao@kqhnp7g|O25^Z_n*ADygIDQ@M*wq}JNi2H4 z@(&YUhvrf$F1Yz7x_muZ3s-w*WBspNxot5WnGL1&f!y$b`Q)PM+8!~vf&`zhVsA^H zJ!AiFnjv%DP)1+DJUe@7B@P7K6Ot4=viH*I84$R$0R%JH@f1ld)?b*1(mP2j)n5Gj(pLr5J=SQ9dj3_6EA`_zUnt>?}N2X5YP2XY0>4O5F-5N*Wrx zqdGRF7!Yt0Z3-Q2fBnWmxJKxJjH08fT2sabE~#pB32it^iv#WNjAQ<{!NF_LEt)EE zkQAmB$BTl@23}yG*u|vfdD|U>(6wJS<>mP|-SD62s`N~>t>QN%#hBDo`yAw&Oir0XH8omD)uACo zwO(z|u=QEj$aH$4l z>TW!@?G;E+pf`g$pC1UC!LGG*f&~JcO#oyI5*e?fXG5X1ee?l8*GG;#C|N8dEb!^T{-E0xwzx!c1>BG zhL!HM{RK=f@_UA^sdsR0ik@!0{bI=E$O&z}cZsjwZQHybF`2HfSxTV2e>hwIszBOj zswu&ua_0#(V2egUQ^6LCfxCmt%^X65wd}f{55&@`UJH1VQQX(nEA$+6Wun#?W6&el zb;s)d=VXi0cq4V^x@3-D29W+RCI6Gbcr!HHfOkx-|&~xF|w0!d)b9$}^Hv08)2E{d}~obMfoWZDlI;_LbJ( zkN5V?owH%hnw6K^QfoRV;u>}&)^(xIEP9QKouCtO_SM)7=oJHdEpw%lxWhCtR5#Ffk%@uC=V?MHVhT+V zO*~K2fP%&taA=a~r{dA8Ef{d))kyW3aZ8VtwCYjdCgPKRk;OsF6Eyo4jczd zmK#Kowdj2aLMe`Q(s1m#m>7g3TKaTFD)WBT8xIwZ36e^wPO7t?BfMPlFO1b=Liqgp zUq;+-<0+nyB^g7s$Gi}S2FVA}5f#6i!R&zyF})24pdL#B5=&ha>m#h(cqaqH(&)(3 zQt8{E_?Im=kN!4JbnHZhdIg>enN@5zz(gdt{{2;hbP>M;ISXnIh>Bn2X!Is|!Y!b( zo7y#f^;CaMjJV3!caa=212AhL3XPn<X&+ex#lkmSj;*EuTWUH1}}uvCoSjh6V!?+ zF}h&e@JYz8mU_LNI{p)|>ez2#I^h-W5rMKUn%xCHxXy`{QeU~pLx#$QAHHr%%@$(6 zPLI-oXXCg_GFK73bvHvlI<;rwcLg%rApbK%k$f|fe7kY%z1U5cH*i^N<*DtOUM^p+ z94@(MOI)}lNjE$CNzn(L4UmnN(?ezY| zm7_ax%nAtCRp#N7vcM>&>YZ#_F?G31X7@M#7szOlKYyZllk+|L-uu6 z>{cveI46yLUF0Z4=)@U+bmFzdP6-&4rusta;4!y8##?=s<;l24r8nb!c5n2qo_dqC z@3E2DR$7&HMwK^R_2Lpfyy$L@kj0~hn#if*;>D7u_(?48FE6YeS3CWOso(K$BZ?Y( zA-scf6zdA?CguSOy4@w{rwf2FjaB#HAKXto^u_k{`LZ437ZWeDfzY3ph8IF3>5Hm=}7?W z%DR5rBZY#aGHE}=@7Kx6K2r*t;*H)>BzZq;M_+{kvcEd1o;t2uS~YpQR6^^> zo?O`|rX?oqX?lvf(mew98-_R^vQgJ=I$;*}BwaUNdrz+FBZ&f*GL}6})8M7Gzt{Mz zv@}#BDP&Z)u5lYb^};>dz^yjmPW~3d;FVG9zzgx}8f>vbKr{8P;1S&!SBn=*{M)A% zy?*P7?U#Sr{N-Zw-_GSP?**L8KRY!Oqq@)|zqd{3rNAf=FwtU%=_MLR0{c*v_%v|94EN3nyLcU6_KBgfTSK$4_BY#74k*iSkjRsc%2vYU8Kc|816I=_-;U#}pDyRQZ())Ol zqEZKc#@*^7+xs%IFN9#WgnO6p@UG+Ui!OR1b?lZt7dyOu>e&=+o#-{FRuP!Lp+}*v?silD zPeDlT-}~np5Q{+Z^!)vJA2dns{d-@HzoK1-6iI%wcR~=Mj8o7}UGnve$9YngUM;AmEmqxK&?;6&s{~4sYRW-)xe+*;DM-)7IX3Ap zPJ-Ml1#9^WNZNowja8f`x5z{;*B< zJ^7@;**bE?;Pf%vFlY-j6S4JpFXj$E={%Gkt~6DA?EOxYTVRdZ2<>XjnI50}sFkEw zNy}Rpt3fS6I){J6zmsDA@AUruyFO>$?N)3d0)yfR6xAetVaih4CWm2>&5Gyh$F3QM zhqR5I+4Jd9FwU*slO?>tE8{|5!2kAAiUGXZXJX zBf=&jT>mgldQdFs!3=?3at;Mc4o!LfMA}wV;h}{=bYalckBLc5G1tgPUp~c0h`!nH zy1ht-(?sbrn8Q{_3@Ezvc!nfIZCmqjD{`s0`nKyyQ(h>m+GO^Ff{tgBMTMV^yjI8Y zMW!4R+mEa;Way~MiD*CfE$-9ut->koe9Xqh14J$4B``Q|KYT`)DMo<72p#1i+h|)r zWYtBJZlHHgq4>~5Yk<$5a0ScIL?+1`V(ctVAC6fBo4vHfKD zvI^BMa0IBnxkn7}75%Q&?mVHf;pRCbvQQnMdvs}F`{2h%TlQTn;?%9>s~}As0j4RK z0i050TrAo#6?YFW2t8F4b@HANO^d-qi~BC;G>5+R)=JcpZI_D=8S!34aiUv5bDukG z63yTgq@HrYRHlR@Y*zW9FNUagp{JW)3S}Z}${d|%!Z>D6y-z*+`O&2)l+X9aURZs^ z=(S(*Lh@!yD&Wz{B&@cfnN_G6N}U!KN{br2Ao4 zpXx}tLl0xfK(&|NxK}ri{fxfsn}hIdM>sYy-+s&y|Ll-NBdtrV_3li5bnzjXF+;_^ z9GA0Cla3U2;Iuq;US{Kx006=^16Dy18pJ!;4j&L&R46iT8l&4Fpq_{aDH6y(EQP>o zFK++CbY~5*!wp_Z$Wex)4#h!+5zxEhxM`q;mP3(1dLmm(mn`l;L8hQU)Z>C6(hwvQ z_Z=Nn6pmCH4c7Sx8tV(E?==Q@4c_!A#%FNI{tp448HG;RIada}4P1;Vs4;u`fXd&# zuefbP9U1}avRte40NI&h+4QF;Ep`tMI_YuAK780>j*i)-uY(ecHw8G^5P#6u zmJYz1e45~iNrs@+mVM*J8SOKAZtrwFj`HFy^xK}KY4z^0e-SfQ;cg=k3Eoy#bs&I! zS}bscS14vQ0ie8Boq{*UJI% zVYd+j2M=f0MLIqU=_H(~coPg04?ZhC!1@Zv?))P?-~H*2r^*EOPVb?|268$B%0i&9 z%lWb5T<2P$l+#vFFvI#fJ&p3yppZ4?rvFOOn&7oH0Vch3G7I~*yC_EVlx8tT7iw23 zCdm*_lMe<4O95a6~<4 zlptU)9*ir8@(KMhn||z7iF3Znlf_}lLkrA{32LdsZ;vDKf;+RQ1z6bE9?vyy(S?RQ z4(uBf%uF0%`eNJlWY4Gxj-3{25$UcQ5$CptL{3DWdOd3*bD9NHx9T;Wk{B~@9J&$StVJX(&3d;jK&2VrQ{v$yWD#1g zS^F@_CoIp-_cbm#j6auG%AToAJTL*=m@GpU!p$OVnS$Fi zkH(DU1z(%sQ-%DEHwq`B3g5iC5qc@S3s58U;gC!hupbdy2*E~s(pZn1%?`TSdRs^> zd2H*zfkdgAn)NPb`9Q~3rXLB^$Yaer0~;5G+ALJ^!`0QC_hRyIUrDBnjSZIEXybMf zl#EqoHW9qE?=2Ji9^83iv+(ERKaD@1ey{cr47FyE-<6&?mJqFH(L-%Qe(0~H30ko} zysag7!b4ilFEZcfUHW6ghN(&WtU^64g}@j7$1_Kasm6|?18mWzoSq_TmwM0^SYU0! za};U(#ikyEjHA%A@ZsJn_-C`&z=1zZLX9fBjg(Q0Gd}f1xBKcB-XXnK+PUis`LvZr zb6=G>olm~IS8slUg~5|b$Ee#2661c?X#Ivu@5q{;bN~^ic^rZY?%C8jLp#B;J`fe8nY$oS?mT6%7UCjGaTT* z`Ya&-cahklZj*oZ7-3dw#eGHi50eTb@%6qWNW0-3w)?o=j)yressHzT&XTOx+Vq<` ztDd4p)4&X#M1F6LI=_ z@HRG;mHf{pl_m`V==R=|E;Oz{h-39Z5c5N-aN#&uRd0aE)jic&`ZG$JiaaZatJ=~Lh!Um`{)?Mx{=&&wV zv-b+r2WzI@7QK9d8t`3l3LPu_9>JA9On2Q$#QsDuc%G8tqSSS$%KkR=Ds?Se>CoCP zOytCBzrvDhgmM1IV(QCK}0QkT@K6OX4|JC_~xDf-B_35*r9hm z;nBp>@$9J0&;LN({?$)hNI!HEN#n!JB9{)6yAgyc1WDJ(lSoIljhB`{`E5H_ewZuq zKTzQ^us@XCde_{OOZ^O+_rvDP@iq+7;<>D-nPb78Fp=Prk~19Yyq5_fm*6izml}x5 zuq|ky{lokMR$HruS6IuJ2LyUBD;(YDSWz`;?)EsNONaBJ^p#8T) zS7DpsV7(=R^;SXyK0UaC@p%j{9C%1@sL>!VqSXH;@q-k_O}ed06(nY} z=a5vSloRt#>OV(-0yo(E@s;zff0&~2T+8a#>>zH6FhlX?j)Qi=-@*n{=9cD1B=zgp z7=5|g3a@+zE!^BH4N_6Q-ybr;FO5H=?SW8a6fpoBEe>JHd)H^U)8->Rd2IFldgUx7 zbDdKxHTrTj6-@kpm8#Z!%lnLNTsg}G^Z(DGrR%-m#@Mp}g@5_peISdefHpN1d#+zi z)j=f!E`|Gk*f-!gy6})pe>8efU5C`wm6q4nR7td$*Bp^DD{qSMJl&Iidv=9Waw?c$ z_FK&e^=mxzYk?_oNvg16g42I2j;upUiFoZi^o4vnt@;PPdl9!+TEIApIy<{3LySNu z`0nv>Q{xprPnsJxmqKJ;$^Z{N2vB6t(u|=NU##m*7s~k4jJ}_FGYzXtQYa7z! z9BkU8I9|ynX7prLcvyFJC_>WVOPplPj7e7qCIY?x{>o;Z#=DIhWKv{YvMGtgDDs;#@jO0s#7V9_cv`^5i4_m(;>36Up+VI(|jHv#HuColYoohA= zYsBNI>8*^$DxD2?Pz-}<+&6S}UcqMOZ}5?>Yo-vop4-PIxd|a9f4Svg4Y5IJ0#HFg zhX5TQ+*fv@dq+QqbzZM(sPnr;qr%>a#i#x!-2B|d7fTYdjw+P1I@yUj<*feJC+5qQaK( zhiM^+u4DFj_cM2@GGo+YE2PngddbIJT?W zlZGkJ=`m;*_k;1UU>XnkpsSn9+7(vQa3fdFdkI%*auq#Z4(d!v{fucGUn37y7noxY zge@uz&ARD|;KlnKyXe|}zJ14vJf-9A=ppKUC_y*%6@T)JuQC_xZVm!PTDS;fJ@vLXt0Lx(O@Qh<{Dv}3qXqYWo?S|QJpu0RH6T;w3p`LFOZt=eA9zRcyGT z7?~PO{t6)k_m5~+nn3j{q)^BPkD8{Qz}@o;l=8{FOdG5^X#jd*MED55f}U2J7oQmQ zTGu~~$`~(loDDo`65XkNVi`V~`9Vv-Z1W!JUo!Lm9OT8UCAbYTQ#PetH^Art{BVF- zq$zeFC&CO(XApdN4a_^|3F#v1JKuW+0_{qQ<;ye+g_o!MW!HN4>b->xrOB;dU~tBR zL)g$?fs_ivT@BRZD%M>ny~OmdOP+=otf~zxCmmC{UslR1SzYOtEq1d@`e4esM{c9@ zKi~ZQk0$W{2R{*iM*eH4pR8OYq59d%dn+AL7xQnidkPLqVJ^v9tl?s76P)p^{`Lh| z9+I4%zKGJXsTty<6|g`zNC!c)H0d^a(z^>mGIF2T4_kKMx6N zVs>71d3dpnzRXKrA84o>6a9|RnUU|ZU`5yV^`t$wOKpW08a^bvMrLQqYMd{l0>(7Anw%akV1<;mDT z%Ja6pTH|bpR^fIjUY&y%}Ji!_x%JC=R3VpRj^P4hLV$R=fxU%nqZ(ymwyY_bVt7 z=vK-2BxVU59Ib;q2m-hR$}$1b>T zd{%pzWpg{%n^Pjow(hO5FX+oGSSWL~2oal>^2K^Td-GJgZS zgJb@xLh`p;3Vnj5`|YhQ=2=L6Xc4+(g{THx=$k&UH)Gh) z(&xo;MGsWhjGHsl0%bN_!JV!9QU`XieKW*N|AAl4oTQrkSp@&}nhCFXXO-oVzSb_b z`omHlOGUY9zvPH8NcK*lZ#KH+G|>F&b4A*u9K>*|qtvgR50BGoutz)-V5R}}V?_)~&Ca$&?f3CEtV`hXy<6*_ZKd9O)NC@z-ZiLW z{yQS)PGj0%vSjMcsuMCaemp@XqbO8|Wd}1rLj>iAw_f43%*ChM7_DCZ;Wu!t^U!u-8zmjh3Zae1?ZO#PPhwo9yRUz*>1%vC2Hn4?KPsu8){v<8v?=n< zh|Ld*PUTiME@Bp7Ds1Z+5B2RDcg%$BMtAZ@HW{xTX&pH~ zK0p6n0sbW+&V`8vNXWr0jGnac79%%sbZ^aiCp({1|1{J5>=>UO zn8$f>Qk13bgy424)x){W6<)5>$#Z_eyGt?H|ME+}+3OkMi?ZP~k!PjW;Pq?v2YHX* zx1JqxUv5#S)l5_`aUj?($g&nJ zRK!a^?}jdvm8j#`#aTk|KVZBLz!(Hvfl4AfO|X~Zh31(S31FY`4zo#i=k$qB)VS8S zu98H&VZHR8gZX2x=GP>_s8{IuEe6L4aNu;Pfhz6qn$F-T2miD4ZM-mp;+lFFPBe5h zJ?kXJtSM~r?91?~-LZ`zuQ^oN$1QUx1TPKrS;MOc*J`vnS4=T5nYkYMk{s z{|#5JBGax{WO>vXQL!q;a@2r-Z0*hVnZ&^(zgTu;vT?C6NX?AFl|BoPOdAC620&3Q zIx=;G8p_al=c*XszHi^;`BEewsWa^Ey)m-vb*5OA=d+7ay{AJ875=aC4Ok}59``Ms zIKLE5a%dx%wN92=tZNPDH++b)cE3?Ks%S9%UC%=?)$?(`T-+J$$q%u6*_#5mPzc6n zM5TKh`kNwQIqoA^p~Eg7ztWxnGN?v*!S}^l`_+?DA5sEFzb9NH=t;7D?_$~09Q3cw z{I4?6|Ai|8o3~}`8wKfs1DMsJ<$niYs=+~Ea8%<}M*=Et!Z7=r3m-mM86)sjv#8Lu zfX#IByhM@KY2$rG+H+TQR8*;)%{3_#Xhy<75UYw z^r$Lkx^^4W`ji1`{$i9l05jtz4#C~`Ou^}yDGU+ZY(M?*{4xwY4ux?rUk1C<97RP! zao@FQz`T4kTEmdpDir5Z7_9xotN-@TbN=l|yGKCGARY;=1GB(<88n+)N-)gPglh}| z1hIjkzWIkKOnrB<6DhPF3`Bc{Zu6%3BGw%-RR{)eEsFLSSHA=5B@y&?R6m@?#!kC@ zKYAP3nrBLo+5i~Cg%YG0xvFf1;p}j66E6;KsU0-#b|n=4)Ln6tb~-8Upr>noFLifl z>!(GNpu1Z4!SVflSE*vVC>eBR3Qg{zA?ZNpCm&IhLgVLRaK%%vf*O=!M4Ez-5xH-y zv<2>bU6>uE{s~YHqG8W|d1M|Yal6Fs`_C=+ok3p(*kDCtK2LCV8Fc&j{~+(p z^Xez`}h2=?|uKC*Zq4v&!5j9*HzbC=5(INd3-*{XL*0#?`B`jP+{j2yaH5j zM`t8*_D;}6fd^7?~y)Qc;n=9l{?H3lVL$j%I8EcP;@-NDYeqyj6bEY5n@?qOC! zH_S!gOzUDvhk)>DcgK6Jm$+J!6D?K(I2O*BwRQ3DVfEt&&c+3QFoC%N8sa8-{=4e# z#cgiR5c5S9D7T^B=Z_Tz{Dj!V)?>aWwP`MGqO1*LOW6&Rl93Onb`gfYRV!)Tg;Z9F z>;-+N)pIDyra21gO~Ut);8UOl)F#Aq8s?`e%-9C&^9Jm)gFjHM?8UdKLeN14G*;o-VLN5 z!1q0C;ruvHk%>}W6gqF#ZP~PA!AN4E^?$n^=y)2%9t~?d%wU%a>Ba#*4d@bP^VB1X z`82SivL)v+adnYY>6*Uf*{ZPBb<`^5KyR2gxRJ;HCvi8!&6j5G=;`KkpGdZg_hD9f_%?fsR!JCk8RfL{LX&SGH7F z#y}gD{Rp%vP0Sku1{`TWt+Kg+X7Ge>{dwt+xyyJ9L{R4R!05txN4hCAy{$?r0~Qo~ z_VC|6v;7lv4h0tI@c&#OaEnf*yuX*x&w)CXBBrH!W!;+FBnTuXUeaa<4? zX#UQ5^6SheG1AB^!U;SX>+4}9J%0L4_%7O2Wt7+*HETneEMFxB(<^SoQt|9e@a!opBLyro_8XsPN$TdDOepR}IyQe)%Qk%ct*3i-#=vN4^ zH$$mfjD6Jzs2I{sfT;IhmI@bu@5CS+gx!5O)PVOnVuZTHs;O5_tKZRAz8!PBt38sG zMHc%7u1sBvGV*(p#uUhV1{`~^o*>n>UbM0i6RBfe@%gQ1Majs5=)>=xiXxh8M~n}} zi5*OMLGEE|B~dP7V0mb}D$1(Ge5IZ?hN0nW&~Bg5E`z?GN6mgbf1ijxV2yP z#OBS|gyFEl8U@BQKPxSfF9pYj0~{okAVU|R#r&gu(HTr;2$usnvpLnfeN zwn!-$qMwE@4lmxFnB3`0-23Y{045k)+l8^r9H}U3{rdZn_%r0_RUELJkaQeBni7c& zq!kkh*zzFr$&U9pbR{^tqs^-TzF($t%GqWyRxRyr4|M;s?bw~l2R&%?&KUhiDDf7*eT5*T^}lhlgJdpj z1zv@I{O7N`i~(Tp*~`g~lTGknp|S(jQR=UzCD}f!D&AAe)yAkDw9mZrE=C}q?F?ag zf!L*naLs}Rz7OWbuS4(&&Ec7}#mNtwJ~&NESmbo4gYoN9kFD~8e$#w&Prv;q4i0Uy z#NDBmC>-Q2`@f$l6p^Yp$0z}D&nueT|8_DPZhw-RM)}9{AR7*+ZNJyINaq-| z#8219T$896>muCgpe;!<V*79igaYqB{AVa2gkoKlS!q*Y zaHKW60c8;xz~ifea6X|58d8Dhpc_@f8WffDRRjwZpZfS<^^iuJvu*~#vMPKoiY4&5 z7z)&NpW?nF-M&1#&Qd-;ahZmAuAIA`{j12()6;7wJHX^zmbu#!9sE9OXYNxL7S5pz zqGFd%jnnmrfP61Q?b^bv5+~C$>sRyS8?U)5Y6@^3cNF<^pD~rQ`i6c6$R-0|0S1=K zS>gitlYWG~O)_Z)!xr#ksa!<_cW@|MxT4^}sH&rTC2O3N#}0J0a;n2M`9!6n=I$%t z40i;<0fuKs7ULQ-46pNREFV&&!q6EneD77i)#Vun-#DM%keJ~=S?BO_ElwPiA3+gY zQ~Kd6lB>~ou|s2Mo5G$gMrGvPmv(d=tNzjfxm2e0kxp4)ytt+&Jk zgKbi$o=DqZG>C(FWS*D($nJonb=-lXyeX3C$?_PB|Ki+#{XfqA%EkYeNB0XA7?(pJ z{_G%nU>QqUPFdSHwV-W4mR>_^l*M-6`e50N+g%mqFXz* zf3%4X^MaToUv~vSg_Z`X!kRtXYMq#wr5~T~t~wB7Z$s1ji(SV;L2VOcD6>>w&EEma9Gq#63h+xz+=Z6F$dp9@?x1s}=}e3y3&8ARydG!%(b1 zgo*#01BcGkKIa;TbsI#rI@1G8IMhD(#5Z_0ehRLCW0j<8cJO@o*4d0GCP}H|4y?1c zVftD+d!9@}ykUn;fzETkqq-FUE8>6PKg5py+kfH(!R_4Nri7!w;0T@KqJ)Ld`ckwa z^f*3Tof>t#YM(Oh8G&^+nh&{b?CdMkES4YTAZGGHw^C)f8PY&ed@Gkm>DKrpPf{yIa~D}r@V{BZZBUDSPTL1 z+eI7@b`piqQY$#Dcdu4wxF+!Z*p$;%htcwcf-;-qdUww3!zzAUdUez7SwfffX`atm z;LpgP(5PBvq=QH?A;hFfAiF#*1N@t-_9Wfjx}U{qD3iEyl389|qL}-ux+9_Pu(9R6 zV+M!Tsu7+WOi+`(1(2I`ngq47$$V_3IXTvnU|;6(a~6)mTBkfp8Jl{ zU6Rjpw_|?PJMVnNNCRTCVDt?Ot7Lxxu;0hw=W$0lTJ( z4=8)zq>4Pg*0YpRWBB;W%jL}e{LyShlZcvz16&pTl}C3li58d`5b=X#RC|D`v#?k;dsbnXva>S6Y1-XgITiTMvB! zUR7SI-k>t%VrJSLKVsi5?0^a%yH?ci+l^Kdx57vt*!PUL(eHkj!3n)>7E+sB1x7j+ ze5(ksjIA&PGH37!9&~F@N%^O`jXwQpk6vXSwkf zJ+ZyzEcnV^Oe~3lA&rI4rY(Ac+&cx%qMyZgj?D&mwp=;b>s&Mtzh7B$M^OIAXqS!d z%ejcsQ|eJ%2gl!Zgs~82f@pT&X~Btpn-BsOYc-jP&8kF<&$zWXVr(laDX}eiH?TY( z8ViCR^%Kpm2#i`WAS#^-#Sh!v)-KF7vVeew<4gB>~8i9>QIy zs0`^7`w`@&Gcr!pP!yA)yeysTk*3ual%L1tDXgkd|$Yl1(Gjv4u;~3X6&l~ zlI2L(hQ4-HkLdZQniS12^`)Z|@jiQrVJvoq<|Id5!j)C*w!;9>y6+$&@h7NHs?nmU zQ0)4F<+fBVKFMDl553->6Wbx(s>I;!mc-k51|}rwsm249w}LmgS-La)yBWCu>&QpjJ4RM6XfL|U^ zfc~Pa(0br*${DhCVf0&OTH5)LDY-s|91kr?HqJluw4XNWvX_qAu5UqcYRtF$jjnM% z-p?{aB##nch45`4A_#p$-d@4&V`k|qEFh|simd5Lr)4j+g@b=|cg5NGBZm|O%3Lnu zXIV~3g(xuhdor|$*Z5*ll_GWKV@LaE_!xp;J=tb1+}FBfpssX8Bd^ip=nkJ~!-^^! z$G6^C`K8}+m|+q_0F7|60#X_!95}l}q8KW$Kiy_5;0HErD>3tEK!hiry|ORgrp+tc zH4tlMQsHh=WOYDL`Pa^zgB8#C)@3t@dVu~84Biuh($3F2L*(TGwyYARqb4sv5`Xccx|)Q z3MGB?OjZd~HwB`>@ylX7y5=FKDS3I}cIhGeU)01KEuPMh4*Av;%pya@{Mb$}LEI5q zjrQn{MDHs{KL)Dr#s?{Gp^Dz3T`0kB7oi`ec_r~-4gucrc3w)~(F-pchIf12qaHsl z&$eaKi}_BJTLR|*+hv%okw_u>nRdqxRmqrmJ8jX=65|_AWQu(&Idd}8plCLJWq-`m z@3!l00H`nnh^R1SDvW))vo`^7*>EW?+ipSssH60(bzhBD_UQ`OMf(?PZ)?j4dpS6@ zeG@00FUU3uU)*9ry5$SI3*jRWV;_OGotrrz-$1qNFm=IRS>&6rjwa#OWv5$XF%jKD z`C^|e9=j#lY~F~qFKWG|wb{*Z0FN3Akw23t7Q~ot$_C9K$&&KT|6`wBfbBFb5sMw# zckoPy|9A7CgO?0nNtapNTwd+D#=`f^I=@%2NOa97~D8I>qRq|eeC zjiqYDB#qg{edQ7_c(|LEYgfc2HY*pbs;~tD+7K?NzIzx`iEf%yp{X;g6n&X|GqU-5 zg?w=MMNH|4r&>m$X5}$8p^BP2pCejuuf~t(o<**F-N8)kJqNuaMWV^GsPS$Z9$acP zprauTrLTyA8VD4(rb)_A?= zu-y#8_W|)_0t2Wfat2aUF2R1lkBs-=PED)uI(WwqpP^h#-m3UC)P6rYnm+P2`KFv+ z#O`C+r{m}R)1viJP<;@ZT%ATS2LsFeKOr$aj~UNA=1Cw~+mOb;c=JUkhc<_ze{t(W zmb<;viL&d?CjhO~L?Mv1PzEE4(ZrJNgKJi<6$&03NR^;^Kgt@{fAisVT-)L5Zph1f zG5Tng_v+jo4$I50WA?KD>4s%N&a?{@Y4;7&)F?SiUX+D_U(mRM(#H>3dLj1elNWm# z)p<5|_K?K{&NdwhaMC)ORY6z-zB^RHgyfVy2D}o6h;NPab zzG6J58ipvZBddK#z(~g#N(7i!K7D#KM#sFFWLm;ZW)>H*FZ*~Vn zmNPFl$ZMoePwqL1#}vP-&Rlbj@1mSdbu;Artmjq^c@QaB;!-iNzrQTZw?wQV1w8Z7 z{^k`*r=Irvs}pTAD(v;?EFW4pU2R>F6@yhYOyo-+i|DSggVqAvm z`=Rn~hR|=?PC&hbB@jz6O?llsI7uo`@=u_+rztGJjCOjG{gRpC8SQaX!5LHpe|IeZZkSEy zm%7-UdAZtL^GAdEe%*>zCHFqCQ19lS!k71pVCfBe7OM0z@Z%@_TEIZ)jl&6JXXnzN zV}DnI?RdUMrBi-pm0R(B^=iX${Kcc4=qe-}U^DH#u4?*4##(!On3#4NR}hc8&%|%T zGgS^;E44daAaK>hqmse9$me?aS|GS`!f z_QB|W02383r|Ax%uMuH6OapP$zu5s@S#vToKe;Y8q1R7g*t7cU;6Q?3l*bi~yN#^e zI>S_;LusXvX)6-eaBAw3CAK3ak+fyZM3W^QY*NMeqms*9XOhxXGynu(-4#Fpe)w$d z-tHnOkRkYJbJ+%wVoDq{=^8Zc&}!)3pCP|sUuIgwHsiSxHC-#mK=!mw3etO=PE$^J;KTEE_GGDCSk5T=(2#;(EK&NM?av{F zO$Kl^5)AhNpmK+8B2kPmW5>guQ{Bk2IS+Hj#wU=a^4OjBQwv?lpo66IQPUV%gWyKl zi$4x+?P`Xe*2>=mx?&N>18!fqz4?9k_092UCo|o&UVBMpp`sV_Y`qVEo>(%-j8Y&I z?2coO;m0xT8iiwQv=eY1MXOOt0w|ygfLK#$Qh(6jP4SXo$O!?}@2<6JyUN9FZP*Uz zX#_YQVOV$+US5t_=(mav9ZhR^N2FMzLbZsL(^~IS)B3>!v$EB z_qB_qpIe}hlaKKg%^0u!qocZC{M_OaUHs%bQu8;HO%zPss~7(8u3N=AG*O8J#4i%H#1BkLs$2}6jv@jqKwn*`jlB00{9w-M9)^5RI_$A*Bn(=-nmxUm4Zw0 zX<4-0Lam=}JHAg9cV0E$OSJTA?X9Dog$mUVvhnB{VOhbZhSyHTR~X$N+%uw(PiQpf zKOlqX=S28KHNtM4i;PyO0t{>MS*3S4f67_oKy{tKW9EhR&bu1tj3p2FDO?I&EP42g zgiB*w3y(BO2EW(bRO``bA0VWHEEUc~2Ul8#FKU%F`iXX*oo_vVxv((5gG8H5Xm4m;s$gkpmlXC;HgbnFJ2yYK0&6sg=gRb z@1+WF^-4sSzo|cP+H`-~aW#ST6xx3xEA??ePQ{fFhAvU5@8Ej`v7;;>vYCCV{`7cZ z>`B)LzuMIq(^qFE9c!MS;QJTiWY<(}XyF^k1vBb1ZM*12u{DQTDOYdpYTR;Zp8SRy zOBxnU=%SBDD!WYj_^z6e^e}_`|7{Y7_JE+NQ=ZYa= zP`xYB9UVa`^$&@HP5MZtL`UppsMv1mYV$N*`gZ=iJ7%2&B`s|c1Fd2uo2zHp<$iU; zYJ>);2o+ga<2Q|#2;l5Pjak)g#6C!VS|S>hwiwbNF(kaksPM6$5ESY?Yj2*y?{@9J zArfqj;K0jW2~GnD3UYKB#TtlX*54+&C>=ANHs61%6%p`cH;w|+IeVk&4k2$J6YR>@ zWQ=!Kj6Y5o@m}2shTaYe%@9kImUdBEX}Pe_wE37Cd2odm9;WA#UH{?NYSE4+pZ8d0 zYt|4`F;{IXInO-hfWlk<4(lPNft_iHaMXW9NKlDQdhRWvxF7mSEiW9D;{=D=_6>FE z796_dz@9wY^~G*XEYaP9BNjJ$h;8>d;*Y|g0z}XY2XY*=&N3!|3zL(EQM=1L>qR2< zRTu8MUpie7i7r*T>;K^bo9~Y6l{lZ2gr#HUYB&7puddQ4)Vm+(n$0)Urc#f zye`wQzDlOjL-j?RH&2YTi{I^I*QKtVnja!+LF6dnZoD{D4~EOhPtm+=t#2+i)_eNr zw9Y^Rq+5MiFKjlde&@eWQoV3VChpxchrO)_pQ&8O+y-fCy)~MgEMk>aP}$neQWk&A z+G2)_JN^ORpMf14lR^pUzpLaT*F7=(@U2yzGv>9Caurn-aV}x*?{)x@lhf9NkG{Id z@PF0F1NDHI)I*f@f`d}2i5-K(v0Q>9sq9t{x zY>lWeef3`4sTnktbL)*w8MA&y78RR*!i#f4Vr|t4?X&AAf|3{A>L>vHY*_{H^f+uWyipC1HOzBUoYr1 zRU(~8`LifO*&~0mGS0=IZl~=%0o5JiK||&LZdbO^=p__ohrUMP;uxadYuF@G0jg(r zrW%>=y=VvYdOfN;M~j3fE5*(~ur2FhBQWs5fJ12+ux=q^7yKvP0e;@#Bv%%6@_fU0 ziIVe^2kec2F4&IQL`~b+I^Sz`{77K3WG3Ajtb8V@4run2|7BT1i-U@~;73!LP_`xE z1%tKAJkOf*fBiIwyA$chd%WEK0{WL6<}iK&RgA7f!}3~-lQVpy5G|&!CG+j?Vpr|V zhRG4fx3XO0+D7`0P2B!R>TSd_aQ(HA)SSKDp7X4Bdv@Dq+itev>q+G~OQ4sxryftb0y<*-w56agR%BER zwzfaZ)BP!!iu^`DG?agNnc2H(Wm$gONi}OgHzShRi!~TtM+^P5agwJrf7c01`EW{N zFl(?hzo>h6yQKCFzr&6a>@L@gz|8Gio?GDX(D!4$(dT8D!MbjGQ&i!`pO3?SmnV676xUSZK2O~Pb5W=Nn>3F90QqCSrvUF6 z%X~s4za^EMgWiidG9h^Yww^gsD;8jDKcM`v=2V~;O#!|5MW!I>&Fu{{{%X6vz2B71 zW?d4Il9z!B67He}zy!S8Om`z;FHVt~RpR76CEM@4o<|SdW9i^Ia)l%F`h==B+ei-^ zH=UGT^*Y~m;G&vO`ea2)K>LXu zSx@c6AGHCcxx$tE|2{E*+z0Mt4z=yXVzCIvFVOECQ=m;!G8V?>pOn#xDT(RV(Ie#v zgIEp+T?fzCCyuMPeoNNAk$h0JCe=zc3W)?T5paoVm2na9JRzA-y)6SAK)cI(8Q3gX zVcM|q;f=+q!&KfZKZ!5#x1RXqIvUG-j62@Hlu%HRAtUlxHG5@BxrcAQaT-hnY9X8& zR9o7K9*F!F&5fWYU_zZvcBY;JZ}Nq$+_5Vr#ro+qR>|9mUMjEl2UM19UR;h89u)&h z;Nqj2DU^*<{ja?=xBT*NN{&lg5c+D*FbPC=Fq>9B{1~T2SHZUu`5A`_#seHo!El!P zLou6DpsuGYAEGQBb$X>-(5J|1lJJ0D8_apMd~PlqAgKQ^KJ4Z{;ue3NGb;0gD;S%z zy5#Yo_W4J9<*W?HbAjHQT#CFU2kYw_k4TZJ2BQ0H-{7p_FO*U;Cd9k;FwT`K{AL=% zXRoT%l9f9F+VA`cAyR=ZF6R`REu_%DEMR?`+HdY!{1 ze)oWH^!q}kWV@#lyNgPXxX5v<#TiJZ|6|bcKX_vP^(Uq^oFNIy|MG3yB}5nf#=k5V zs+ytee;5H!{cmVBCM_I8vsy&2cjRB%qs2G?YT5D&Fvu=lB*B?!CJJbNfQM}baK6Xg zgftpQ5m0w8Prw7#z30T$r-dK^rdceb*L$;ve`6WqpuQ`A0p9$}axI0AscV0UZS^6r zzcWx<|292OvDHP8#LZ%UZer%y{>#q|P_y%(U@MCBr5<8_1X~_7qy&9`kFPtb)n9py zXYI3@Ikivt*)OHZna51kV^oyKfZlIL;nzJgtuov!55WquU4+`XYt^7(EDyABE_;{)HqM?d zs9vrH)I3==nI+|Dew?pep}sgyKBV7f$TwR?GtcM#s#0vqBf`3?yG%TJUNJ7>)0)4^ zhVlP?x$S!I9xdoAb$cLhnD7Kz_s%mw{Zz>MIi?@}&LIFgTNc*U(6z5JWQ=|2gmaVz zXWYA6J9Cc$oCnK};Nw8N6I`Hp7%)v^Mt zz3v<@#^v+Fh$0i*`sH1eQhWQ8sJ;3*+en+nC+Cu>t$7e}M)E0NB*+N4O4xPrsIEEa&%1)P=I+$T)mDK z17wp1R23w7*?kwI2TkrFZC*$O-og3fm%l6-mc+qjV!I&1!waZs+T7D62Q7M%z+S`>XOAuPzsz~RYFy*PI8{Jc^=`Lw1 zi2Z;v_%@2{hsZxC4_WpB@A4TE{V|{=R3jB-0qU$@4wTig$xxPS&%@bFUM;u_el z&H-m;SL#r^fKm{^yog0RK%y{oVL0OnPClN!-?IGauS;m!r-^;Wu_Fgs?f_lOKg?{R z2gq`OU=NnU)tpx9-$DQj&7+~h6VPauW>ynhW09#Q^@=mUSNOB8>=UYtDzD`^8}Ub& zAHYHH`OUBzK)YdJC1BI0C)F<777zZ32HZGP##kwLmFojaIpq;RC3@B~P5QFbGy8oZ zo?FKNI!?4k`!9>(aUW22i-J~XYMViMlH3E76N=rI0?F91!e)$^O3iHmiZRR^^Em#~ zI9st2D-%o1(Q&j!I2JzP6p2SYcuk3dKZP4O#S(goo$C`2%4-iS3+n!7+$ zExRJhwPgMM4pWN5{vhA4*2sF@L!_(n4@4n1(d5qUIpgkD(%6~Sv$&Sm9RBW zZ0aM=z`Y5FhWz%#Ti|l*3R66IKJyE{DLmoVm{G(8r%mY4`lQJ$!jVUry<9or&1mhsz(X^`F|;cJFJy$&mX@|B}E9 z$(u6=6}~S!+s!X;$?sy6qsdg%SYZ=-KbU;5A(lE%YSBa*lyZi7FtBtK;|Mh^jN8p@um+Vrb!AgV9e-hHZx!nyso-}z3?w^eNi4A3 zOj<}eCJcbnd{fN)4ThC&mJF!>{+Sr$nG*OSKh9JzwjXhSl+6mqHO+k$^L?RgG2ypV3+Zu>orpoK6uK+w}dcho!bU^axWk zRST+bIUPuE!5L@g2hJz`6JN9+*sVq#&?1L zeKA@x?fR`q+4T2Dxx-rpKQEOyVJ@(!n#{kQK6F*)Rzz;*HZV1`6}q_+o+gMUzY)bQ z+0l+Q&#Z)1{fXhf#U;Ag=aKv_Y{=6P=Y5=+KK<&{2^*U_v2pb98%P`2)-WSoY{rfG zcw1~TC)$K%n=m0%LSO3l6+#0@59FpDn-<6J$L0oLtwRJZ4yIU!UJ7yB^;WUs_)bGP z_wG}KpL_@}xa$|PI2_QsFbhZCe4If8d9%` z+PYuzmhU?q;Rc)Ic@7Lr$~pVZUzV&EV(%)ki|@buJQ&>(4pbnRA8@Cr0K(#ujdVu( zl13no8eI>8D{JL`O1IPplwG&Jl-lPM5hjNJ0B5)R7&V+BP>FDCfWJ?}eiXV1+({WX zr(%KqqW1<={J;vhQ=Pi=ZtX2-DgxnExOB%ix}wR*Sll{yXWDc)!W-MCp`?OvB~d?m zow@5!KRq7d7^Za|cQX5J>w6m;bC?G|hIFGTu5yp{^%*-TTCHAaTHL4dN@`*FdglXq z+lEM~SPccc<%C^;>Ls@R?90hVuibVAqYE)(o;2WaLQL>OfCCUnRB7SdVG^a^k}@2l zKDs{sb-Y7GxJTci2hu^XlQ#9ISChvcyb2_)%VC+PaSj8aB!SG&~B6m=P|7 zhZA<83*(1V_b_uA*3=SuOJiN~iw9q_pG!VYcJ2tot~rz?p|DlDhn{=BVCV8;DLU|# zZT&ah6Zr~p0GBrE--0elYN>{FPgDJ(9S$U%G4r_d&e$t(Pk^Aqg%65>%@B&1fTIS7n=m{Y{{4Q{`LqG% z8uJ~56l~ha7?B$slW&3Ry?mU!GMaNX+{S!|1JK_4w|0l-`h$_KL-yy@c*iL3l=)pY4WK}^Dy z77|YyZcU@y)M6xexAM_N%T|~NHE1Y?B*?_&pMs!Z5$YwbCnB|a;A;n=??d<#(8yp8 z^o!#y-xBSAnSK1P<|6t24*h>?aBg?G7@(RH0=k2MQqu(uALR>K|DrM*TmB<%CXGkU zbb3YDJwDp;hDlt+jqjZylG1g@qt3ZZXekdj@&UnGguN8JqYl{OI0Yd3(PL-o5dGe3 zn_fB8uID~C@P|--uJP?+{c1bv6DCSijSkcyjqeL@9GMZnzw@Hq(Y_}eb6%LC)}&Y8 z1N3fLH21|UmG|9WN5=s7vbzQ>$_s;5h8&o1G-q%DSlFDk3PWcD;k^WS?&xGW=AS}n zHJ4b*sqhn;k-f{t+ygKxJKe&;G$fdM0_1~s4_m3gNAZF%GW3NGh>9LkM3Ajv;-N+vt?`^CErQ*}x&T<_L?;-| z@Lv{c5DiDEe6r|PQTchGP^?LMN~HRg()60Wde}&TVpP^R`{#)!P=vZy&R77)#^8fA z8fY>Z&bXbRpF^JTF*iPp17->oRq#Y85(*yr(J^p4P(0M5s6V@w1Ja(5TTp%L10=z# z&>h1M8tC+Kv61+opoqi6H>2F&U22o@uCli|Z^>CCv-cja!Wl&3AuvRFsE7FqF{@1JEV|0^tKr017e_0Z&Av5q~V9u&ZQudPr-vQuJ zPfbVL^GdFqD;e_?m*Uc)z?{_UA6vyOTDWwpE0Tq*FAID#V&P6kBG9+ZBhY1p zhVIdhCgdbD9>?QGMYWFkeMq{{qXH1grI}UGOjEPCuGHp`KyTouk;5_18X1ugIh#bW zCBh)HTWXt?aGQ2pBFvbXHYVd=1yCGe(LN5}ATHeq)JyMtV}TACW2;FikGvxvsK|%oKDKLun^ce21BQq|#v~wGh;_N(itE2G;S+VaWaGXT;p?smuti2I9I( z&i|M`vOoTpW$h1heib|wH;DGx%z#DGPeLToZN_^A@Z(|Dz&~XZf8v>`#FbbG7)1ip z225$3ss-EnEhhw~ng8=f!3%;ZwBu;FtW` zHEJ}%F$_$}0_LJ~GE4`E9xIXo1UtBuO1v7;{BX8Vi1@}|RA(_>m)%rn>`-A_V@-(4 zB!^zfWu&Y396tPf%#nJl?EJ(?B>^Wx!V%5ohA8=h-T&4bjmd4d4g-vErXW#F9R{+* z2uCM%9%1*Q-|!$&>B41hj)5Lk6RwC-0Hx5zesV z-@TAF&@{tPEYcpKDONy}$MZZhtCs!ZL65rfp>+=#+p69_>gUu(yMsB#F8+KsmSGq5 zm8GkfMtoYsLncK|aShgrGtTxal^3u%mL)e@^sgMUFIX<{-rK6E+ESNyC{yT~q15nZ zF9U+l{EGxC2iU#?b_`3ym)B4Y7rUvdzr2*oXbCShDt^SRS0~+gOf2D%Nq*Qk|BZ0) zRq34H_=9inq$V0Oo3&zqV&| zQoPN#l47x!WO3VG-~Sfp>3_w`Zu^y3>A&bnsipx~<8y`^U6$c!nQR+<`wIJV@2-v; z_a7U32C_TvhzClGEMK{IoW5UN`0+geNsS{s(ad7J621{C2NO%=$C!$RFjOzLgYm(EJuj)<-&qJe-i{P)+t8R zjYhG_lX~a;b3#fVCXe}fsa@VQ-1;pR8>jpFIal_K0E@!bDd2!FK=r2?{J_1OV20xk z0S*RdqZ@`I@X7dH5A=QgsMn0yfpM&Cx0=Lw z_RnILt^)^<1yX1cT0oC1)iC-`b6@k*6mP1b%+&cDPxY4`Qzu$RdSA|k?}~ZNxfw*$ z2kUADyEQ?S)17>@Q_SpIA@_c4;8ZE=N!QLQ`mq{Dx+JTq z#NCQ-3<_vxadxqz#LE7m@r9x&cg>duDQ5sO0Yj+-n6hcU@H6#jk-|g8r2`DMj}8mA zR}|N3j^~WusQ6MVCLQ?9#w4Isn`B+99q%tU<|_&me|dPEk~ zqx7a);U2%dyc>J6GBEZI(b0CUVg64=f|5QZoGID4aCx?K>Pz7w6+A&RxaAF69PBb} z2B^Yjr&WSQ*}@A8ya9Hau=lstR#lf$vbM}aWto~snQkM9312zOV)YNsClPFMfPxMb zQ2dDGpIH6{|EYZxBe-5a5-a>ATWN9rTxHLdp*qu(HMQQf&%&t=x$%ZC?S$%@AO3m= z)k6@u7!W2bnNmw>D$FN%R@kQ{RSx!>`Es#g{IY8zm%~Xhv)Zaso*-KEr;MPKFrx~7 z^;?>45u`iEA0#aa(iNe4Q?x4zJ~8$i;Z&pR;r2hDAq4BWy!*AaZ*6=*n3m_Zn^sX_ zYiax?CYRQU{XuTB?w{K&n6Q~@p@bVYne6f<4z757lNHmG#2>b*})g3 zkP)a%qy-DhhX>cn!`{$tcSszwq|Y&2=u#O!w7S}QRi_|vdUt$>xN4b2etvJ5-J1x@ zMlXC|z%GQtntp49Ax{}zOh7RNlNwrQ?!QsDxk6}`2*P~+)azYvBkskdu?seEpQplG z%<;atwKdi84=(IGn5r-e;kr)MH;0u(9;2`GN&M20V}Rf(CUtKa)evZlI=rJ|CJ!U8 zD~D#5Qr*IN4u6>|x@_>RWmnv%(AA`dZdm(9t7uXK-&h_P>UlD)Bs&Xw?+qyabJI}x zuj})T?*j;zs;k~8+04usAAWj}b~7xn;>@aIj`z|_y$y!Q2ExHXx562~{B(83S+EX% zu?Y$iDfe=sdeQeV!q5?XbF$iu9VLX|xe``G@UOm8lx5)9;=f_kN0b z0bzBBk-n314V6yEEWPuW3MAJw$?Z2I_qNfCUMbrfwu@+gZHC^MHx$B$4VUbxPgTO9 zQpY5QV08nUyPT=Lq@;I)ifY#BT+Yg#7;oNhyu7qd5Bj+ifN6i5oIBYyzbVqOI}Qoo zB0O0sD-2tMRi@wd0%-Y|r@e=jQ7Z2K$7e5>-o!<&2}|!wko&QVt$+pMM(sh9zZ2o3 zq$Qv1-+x)ezzIZRW60I?`NjsScVNOcs>(jh>i+h)a89;7#54jw@W_R8wc#^w2|w?E z1{sWIpj%KtN4KePT!eOkihLoTRA~?vO_)mf5FlnWpzP?UKIHw}B;``k@V*mv&E@pK z9na)%veu@aq+3Jvs%US}bwP3KIRcU6`9;B-Bvd)i&DuBAGj%$#6|vKs*MgqJ+kaFa zd40ubIpf@#Seu(bq>OdE+7CP!N36dDkrO~PLdE!Z)Tj;6^GJ4HqR#Jz0V| z>l*Yvxobv7@T#$(Ik)S?_$yoG(|OPQFR}e{Te|x%wLC9mKMAJR-4{(j(3!yoa{zFiKp*7blT~uCqC}3=#zPLmGLp^^86eLmN#Dn zDIcpiGapQdV>r>?=3~P&>mfv$p;|+UIerzVQbgU^lDsFI+9_VK`YjgqO>TAB>6GZZ zTx$uUf7yc#kZL0wp>%H?C=P_YM0!v{hzgD~oD2oy5982-z6CqGTDHJZbOBIURP|?- z7YUqFFjb5*Ak3T2NtYFWy;GgwzH5#TY=7E!aBk5-;9Rd%tAzBY36&{HuF?D{PFcAm zv6mlZwi_vH%>s>C4&xp@&55yI*=y1mjpyH8U9kyfK>&*9Q_0wuH*F52aOtkN`>-cv zXqHTjE+mK31YkH&Ue!Dy^5|JKW{#fQ&?LL|Cajbo8RmNE2 zep#$U)91#0k4)bi@T~{JfwSG*3`+`cBvq~6hnZ(c<#|$@bwl&`4~NOKB~2qsFRtAk zBZ{KQSrEmRDMwR>3n^@MXm+gLk(p>r6kdbScx8Q9G^E<&{_UQ;iNZM5T-D8R*{g3` zHMW=(x9Q372GnCG5!gDb8OvH3ww`iQ^~91;AFG3%rIfU-n_rCA<{7oU$2jDY5g@z@ zQqbFJi@!x$R37=0isc%V3@v*95X6tbaf%PhJ|61%9*}H{Ex(#4^~37r4*aJj1Jo!` zVTH)%5%*OWQdx!;H9Fj7R^O$Ve~$X0{(MJ;+SzZru7!s9gz!CJh;CO(SlwYJ{RC9+ ziwDC|TKY$Siprq5YLEP~m~~lt{dYCbH%TBYU>N7>>_vgn+YVCx~h@d+w z2Q7j;a5o9Qo5zr6#`|*MPNx4(jvHK&N?s0X*zn@MBf@uQxn1wmGcPuAw-1G_X@F!1 z;h3kJ;D<5nctLzGs%R+oZZ6IZK0fo+@1)40%Okn^;(|K=WD#QaIi6TPXpU89rk{-Ab7Kj z;7S;B=3o&kK;%hm^@kI&c{_3-K3c|$5~ zM}C2);3;=<7~&!{`B4TJwU73RqTe})6o5t@#CHyHk8bmf_T&cEg;u+}M2-)!sOOdLZG;hhz97BeR+W+`~ zu8MWcyijlZ{l>I{asLJ7TPz92J#e`ZivX9vdcoqce1A(SQ~DR7h(%4_8CRS*R7Q8N zZ5Y=j7DhSG5Q$emOJw^0+F!-hzkgUhO0<~`^9e=oQ!mDL%$k!u;t$@BUa;sbzMT;r z<=ul)CVCJz0j16A?U;(tT?r5>|R8f0X9$nMc zW~zm%I3zc-cBQU~(Rw^!1#EcI3ta7F66on7t3WFmZxM>{qExLeL{hdV6I^W^ljb&& zf~}d|!#oV~z=^e!w$07!%XnqL_MJwu_-g6GtR5^3sF-^QFrW1$2D0P^-nZ7LxVhHW zSJXu<53SW7I<4opUUof!&quqRI-s&4T6Q`TlMAvV+>qYW0y1v_-=9)Q#AhI?sCeU9*u>hUB^y;=A31C7fLjR%Z0E$L#|4h$$`l)_k` z83b+$0JvNhhS=SJ*D##o)YT?W5vWGKckPNWGIVow&jZwnEwusK9qkOYa%s=k5sRCH zJur$Xg4j$3nymG-2X&bXYx;2UHZD|o_k-AtNZk)lg-`ifO&}KfCoKaii@o@ z)CHab?sxqe`Q?*IziLtA`pwoyr)Al|Mgo0CAf=u5a_HF5S}T?!LY}$a?*}q`r#W-0!DIHfUJO9`G^OmpN%vG=5m_{p< zc{BzD>IK9r?~B=p+_m_lR9eiLpcGglt{2R`!$d^S1H3dOKiM@8sXDE#jY5IfhFQl^ zCUY)m)shC@Osw>eoYF^mZc)d}CwQ!~?zVZ~Wy4US_VvF8b{x_NIyp8}A%;8XBLNyq zIh>kHqy}_k4irGQcU=EyAT6FAA=rLP4QqMm#5{CXWnGKW{3z)SVqHAaSdb*q35TKO zYp6c>w3dkX5R+j}b~nFT5#eft`jY8|au^s1Z2OYfM3@W+3sn9gu$A4;7QTd1X@|#{ zoP^B$$jw+8)Q%H%3l-np6xZmz+hiJVUEOl@+Yi0A)5?K?_hCl{n#^0Px~~a_|IYyJ ze-Cv3H=hN!mcd?aoEidV?KSZ3a!vTbW->F@&x7+a95OyGy2_a*YirWP>mKgqfK?9g z&$+SO2JlfYCwKWQ6@$-KpY9lLKi2;3z=H!3s((OoEV>KtomZX$h?VX}v=quxecpTY z;mOjCGD!?HQhT~i-^f(b==Pxr^XW_OX>7SnP=Jh5#Y|E#GdRk5cnspuFO{wBL>;S| z6r>{P(LBOa*&NF8>(7Y!%Ikb|-~f@4kp&X70^_HqMA=J44;%D{i$>Osir*gSRU zB~(aB#CSeY{WZQJRA;+XXI@kopwzX8w?6v-6C~`_@zX?7U&nkDtS+s61AwPG9Od&@ z7jWIJ$5Er*JokW9x6eo|PY7Rl*qea8^nljxx`De=%5#JA*hOH&ZDXK;D%}qiqxip97li`wb#c_&YeK;^(?JL)ngtuHlqmhEk z<+Nl2&z_C&f3S5FIVtDW$zbLCTgs!^gc zPmDL)ksBw=f5L%i1~~ubn8h_nVqZ6$Vh4uO7mpT;C`WPINnnmrVQIC=03nvT6!RB% zTXU6gcH>~SPVll(%_Gv;cYOWmA(p`3yu2*~0pX}P+5_fWEa^F%N4*T~M#3JDPPvWb z@9}zbm6<;g?$&(N=SFytT%1+mW~$5M%3k(zz~6o~Pq7qCfpY^jR1*_sLyg_^10t;s zvZZ?L9eYEWr*(bbO-=srMLr;IOa8`vW$#e?it{O=MFM?kVQd?1;AMCCVFQMsqezA@ z&})FgNJE?`#982xG?x9wC5p!}zm4J-B=UzY*`!!nW*b~RH5Mv9a3Jk-uAXGNb4ilK z5*|_pMjMw`b)@l`-8!@>mb!(uF*6 z4baoT`e!IFQvp0Z2HSB2>vx73jrF%~ai?f@!uFuusmWcT1n!s?t%9pp2Sde*Gw%#r zzMHiR4-o<#FpmsZO7lPZC3^DGpS*>kDR73cFWNze`yK(vf{8Zs`Nr=Q-wwiZ&b#jt zAKtkb=YE{|IdXNjSAMIdkbVr^!ywB#v%O3;JvBrt1osDjIxO)1fNTa%DU`^3yO zJ_h%FVR?G^Hsh2}$<;Zz@miH}HF=jJ`k`0I2Ni}jI|l+UUle1{FdJlIsR$t8NybmB z(43i>c<=cs+`>-mD7m<&e9w;uvu~b#_$lm+LUuKopZ*~fkhhw314v&?zzsK!RA}8@ z>zDpH1RWV#v!&ZJKx!!($&jNZ!|HH7f(eL&SWj%~oG>sPFth@RY2Mw& z*GyT0#*I&v%{Wgd&sQ1|W|C?DqRP9Y8(k*eDM{3fsN)=%Tr}lzBG^h3)0>6_%(@F1eY|hpzpD0M&wy7Hqvh z3fO;{bj0Lznq4s8tV zja$2@1EDoSEQJY zeaQ;rUoS16w!c|;Ryc5zoVa*%F=u%$<>1HG2prgGAg+nRfV(@jw@^uc-1Cp^h85opmh@X!}q_ab+b0k7CM8DTL>lEhyzc3Rwy2E1l}L};K89`yoD z|8>@JhSMa552|iQ@Uv80{5>S_W5&9R8}Y91@RZN_b=gi5QW5#p+Fv%!#k#*nyy#Lq zq0l6webN&Yb|pZ6uk~VT8ZDUeIcF`8GTDihtodx@;+7@~sdm%uPx8NY;gVLY$X+PW z+Yx$9{qwj#lj}*9uoE|hkHu2}W}ue&_7>og1DHo8h7I$Du(6g#iS9D={z10YccOCB z^87QcxjL-_ks_GwSNYcJU%N@Y$vh1LO4A3~s0c`Jiwq<;6uJEE;m zl^b53B-CSEHSc=WT%+`u*~?B4uUh$OWLrHkF>`T} zAScS`0z4XR@J^|u8SJ_N;i zXv&wi%6!i{$S{DcHX&bH>{xB_h>3jHd$*3gN!|XTQh~ejIvJIrO(~L+UML)YW=-^; zqa-{KTUsAgS+iT5pI+>SReT2JG?P>QIXn6%7!DH*a}Lfrzr|;5J%MVh%aa$x7M)y7 zkjMK!*$tQ;ngz}>FN=VvMXrCQYqCD#YSoKlb!@p8(jJwm*Sd4r|FYKArNXkm&QH&H zzm~T8mUiHykmT4>4NYmWBwqz1H-JcVSKy)SBQh(f;w;4 zP@|2LeyGFga>ZdcqjaT@e>@G1Y@{Yv^w&$l`vRc0sALAfCPWEg2C&>LNwh&j6O>0m zIyE&;FtpTrUN$8eC^u<+EL}8Q!x+~>I`zT<^VcoD0oRb#~+W_tpH_r4N2?e?YqnWB4=E6?>uHNgcky^kM;%Llh&zPS z)~IpObzkIn9Bm0eOnm^*^~ijuJDe21?)%DyEGXdjpv7>hw#CVTpgq^YNgEzmc55=& zqx#Ir@1#-nqlb}#(b#L{km`Iqh!Wt0V32RuqB>ln-;-#nurdmiB02Lp`erQG}^s&DEM;x?-6-dS?gmYjIv z?J1hcWhwWNEt7yx)toYC@ne`dwL_rbms-KBU>*3W=fPG=e7xNO2JRmEsc7PK)Z{ z>v|si6uSSRdlfFm8^@=0?o-1upDBB7^;Y%{9lpXi#Vp2>=7xyjKygVN-VItMKZ_)K(nFXvY!)>-A0jXBFlFuQM=1^7TD7i$VpA-L{a2%TQ+K?s1NPNsc5RpMdU+^SVu58!8E7ymX{SpnC({DqBkR)WDCVgR4F<2f2}$BQ-uAsE z@w(jRteXO%6(E2A{JntL$~uUdoUcW8uZPp_0wj&)!e5OAZidBc$nNy0E}tq<)uwrS z^jqb~#J%Sd&kJu=pBs>poG{;v;zI2J`UfciPq8lz0HqB|{lblhlwC5BJk+_7m|L8v zMCqOskNX*@M?3WzCFy(9`(C;{QWbUM^tZdOoH=%{gTa;pVio2jrW5F1t-YgbGfsT1 z33NCMt&^38xARggR05Kn^Dd2+>=2$7oe`{}5?7q0vrxUke`e{Rc-;Yt{(;B@Y+ZTx z(6j@K2e)7szh1rZ#M|?@ys(ulJ1ie6r27sLp4lT z>zk>twWyi0%YgkHoNfWeA&oI+^@9ZQMSKcgd>c7M8=`eU0aQ0foI7PLJs`tR!c}Z` zdu-DF3|aWgC65Jix|Fc2LgK{D{*Y^eKf74_z|qZR+5iSph3U1Uo3FAY5j?15v&k}s zX4zE7(cDq`=`VZ%+1bNWImZY1XU?cDrSH61;g!X^uTtTxeM)pSbP)3i{JO0<&?FY@ zvfGbVm_%P9CvO~3iDjtm2%Gx^oT@FVQN6!4>v6kTe(Kk>(UIhj+``=DMBe|T{HzL5 zfWrxLx=WI90sLC=6!u4sbNQMHUKdMxhojiw9wI1?xaqC)%oi+^0I9jy;+uqThYr^? zfAy(PTDVQBZ>Z%3u6`6*FR`F0Q?*8Kz3_MkCkKU_wIBEgPeRZjJyVqy^bgyc6g~WX zu!8F0+{TvFRTj?)>gqyH%&nra5E~q?yN4^GUEfBhDadBYYV7Wp-u=f?ok?ugJ-5J& ztAfk2=GpP23@rOL&@N-EeG%+YIa&KocXhD!fm_&Z{!iHyx4^Wu3EKBcwfjFJujBN!f>JSz1c*DUw7CgmU%It*uph}z8p7BLuvjB=HpLY$G~^oV z8d0A7YUzUNiyEM1FdxP8N>*()DSR zx-y%c?^R1&?rYmVocD>n1J*bdZg5iUPca>=`i_@4z@s48kuMZe9TpB%mot=F#tTo_ z6@ACwXc*3LRBZ0>4U)(c*tFK_yCdhp8T69e2GXHXVBR_!=BwZg;mk#)p>;7*nkXv1 zw@o+i0`6SgiemsOQ2knix1XzY04l3xa-V2TM*p4+F*8MAVL|N0eHUQmTaa`k)|@`9 zt}pK14R-*Pw8iW#Dn=^y^?Af+*%v<=BOe5|*Nv{lwVfUo1eiWz8{ zT-#X~iiA@xy|}*8&eEmi;{uC{6MdtWSzT^P1TE3vPqa!K_1ftxhp@T%4)q)2riekr zn!_Tz!!8C_y*A++FEs-Z6i&zarg2Pro-6`E*dEpE&ewClXE? zKu$$lk1I4mIn>}@!t8s!^Ws83c7uZQMVmYB2ThUogbNXOEt!#EiDu7Nf?SDar)yd) zIPKxw%CUNNa;)*OU%t6p7xBk$Z!+kK(@&4(T(9gEKJ5HBGSd{6KTo;0!gx8jsL#Al znPe5Z24YXaC|;qAG+0d<69qW|8JeH9z`31ARBCqOp(Dj>FUa_*oka^;@f#?#=k=!< z_nVs*VrzC@Vf??5V{ghs+xBaZEqegG0RhQNtPO(Ii_YoY0hnxtB^d!^ zlD2Ot;B&e~T&*fCi>0G^+q3KH=r+uSWJ77 zpoR^gJ!zd2ZR(V=m#eCTt2vfQsIGl0^|{K{*Pd}4caaad#v|Evz!aW^gAOAYG9Vek zIZyFri1f8%s|D9p7me)VOtc^**cRS40`Eize6gJNM&_Y`K{A0tzNd|+W=f*mr}h_W zK{hM_L14~u!96z!A~Y;jVoi>|XQk%>GWohVMu8eK-csSJ{<k!`pnRNVdG6fIdlvG&ia+})4-X9+fLaij97qIETo z`&7AdK88J!eRI0#AsfpTPa3fMV}eh{(d5#Y1_Y;FCDt%rP{TwMGvr|m+1+ejqFTm> z*eXfL-JPt?LefLH5(lD@G}a|5Orhm2ZQ^9c(cQ_ zP2(rFMn=MQz6&lDkYOG4eg+k^{yDoKRP}av^9Q4+-iN~T-ANfRt;8@61NEz5W2Le9 z9ho56g~Qjx$7B89p^;>yEULKEzO^@#pHy2tN`65`#f+Hk4^&HiIB@>U0X;s|Yu7la z1uQ`jK12nh=8)1Tkl=QqEMCaoF*m&DagZ8(y1A`KN4wc?G9!{l=u1^?bN`*U!=1&; zk8h+gq=C1`ZGhgxd=B?@=b%Y}UlKjz@!b_0BOi_sfI3h5;-lA;Pk)`7H$%$g2r-}Q z=WtdQCN6PV3jGPkF*#bC7VP+-jV{CZC`Mp0;634k{1m`w9&>`($jy~WQ-j`7U9vZ#ffdAGw%J~5pe5pR8?6_z3a*XoyYYw^NVR?#HOYc5n7` z-{nF9g@kf|(q`s?iXg837OrxrU6Xy4X6SZvhfsu1s7(wc`Q>-_HNh*++&C)zA)`u% z-oRcUoeI}$Q36}}ET%8pHN$4HJmrwqSV`BOVQ0b*n!VUB&Wyf$?59$@(##QG^#QPe zweJ#hb_%R$FX=DpY0=_tpMAcc z4O1~}AmXm$1}uyd-0UBIHEl0BtLiTD^eH9yIQwmM#V(&)Hhp^pKA{Z+tgYt zJx^Nj%DL$ic3oXEVwE)z0>Qvd!8OrAaME+&;v%Fx(4N%%XKx{iK0^HG&~nJbxpZ#V zl+CFPWOGVag3@wL(?2c7Jt1A==&K!qjDTNOTF8K7CH+_i6Wgb(y?mv_@bTM9_!}H@ zoS9u>smsQDZr826KKgO*k~bKkOwW3V>W)bNVaudad~> zdsZr0g>DeM`JqSZxz2~@Ly&$oOTZt3(N$SPO?GR#U`jc&S1i7wmD2zz#Vr6CgiiER z`>Z)tyYH}z?;&%812+3_8^2WWc7Hg}$bMztsHNFxdboTh3Wojp~F;OVgzh z2J!tnIXT&s!aHSmJ2kZ(QeK@66(j*~b%QR)8b(muL48eevP~y7d=)7Yu!$-l3)D(A z5uN=;uS%9(K`d_)Jo5sMRPQ7`jIs1#bz%UOHWOd55#M6Aw}|RSu7A{G>&3S>-00-m zy^H5e4fYS#j!k`U-$IN{u3_mytU-8%Qzr_;hZ&gTucXAqlqkIjj6T)#ZQ=Bv zogD3){N?L=^*^qec9w~Ja&*ZR>}BKeeBm7}1G4SRj2#;StxBiznGl*#v=^ zm;K9w_&5MO3EEvi%KdpgfSI!T=T$4130JtM^ak;#9dtydO^ez<rQle+=JcbeWZD^M z68qlo0e8Z;>Rh*0Dd*QMNjB0?f4#m*K>4N}@-{tw^ULMO$M2YOQ^Co;R~uW(e2d8k zTX|+1>xiYe;iB^uplx7DoPVCc^0vP1Fz=i32p=T zXd*@3`g)U^`{jx<sy5K-=RfNUUMYm>HkFk!S0ht$I zWa@Kk;{1NMN~}#vX`k!PL8<+c-}0QZRzCTQQ?)%+N5*LkoX6H6OQbJNvjk(dM|(Fi zU$+PWNF_AcTkSXcvKIAqf4toJ@N;6*)njU|JCk^|L;Fd_(btc)4Gl9&NoOY!wXfBDU)k8dC>8B{L*O(Rgk&AU47i7aqbxy~y%KtgogAowpB;Kga!n ztZigIM&SW=gEX!QsUjG9eQR&9UPF%vI2=`ff&ckM;-JxTpi|d-M^(4ynK{{?vTC1i z6TcaWhe^YRH`Wn!4aOm64c-@siGTp-2A2RF)i-fN7(U{`w|^PLJU{%c)C#6Wy^8CK11NROTZ|jVa&=9)ignrTc5Ap*e%_+w|;3} zuWhXqu~2jpa#gHe&o_8nP~DD&pOBf({U(01FBO9{4~Z=Gj* ztB5m%nO8|SeNrAerJlK7Qg|)Oto)l34LJGH$HAoL&x;r%BNhdt+VrWuUCnOvI2>JG zuWoEjJ*%lFfR-?^bn*VyZDobYgKQkMMcN+lOoOa0Ks7-m0Zm^wx?F%%ezSg5VBm(v zMp?~|wLp+zxu4=Xzb>bI(nl^P{cL#T*`VEbh-3uC3fM5Y^0iuE?Xk(-yF(ojzE`Q1 ziN;?1wQWW31MZq_cKCn!Ayy+!J(I3*;#b77rJ~?d_9SM}{}G=d_|N43hW}YU2#1br zFvJyT8esHJ{wBiUj0~`ub^U^nwhY&J#(Z{d=T&xlXZ4?0m;2XdEFW#JjI@bEHo&ep zVmqWTYN(wH6b{s?1Lafh^8rpL!mrFAziSy*r{Cr-ok`py9G2~#et2p2vM?LG!1ZXl zjSRKqYsI}gk}em_xLM{~Yr+s9C z4IHS+>+f-rnQa_UjGDI|(Y$~Q3n@OKZGKP4TT5@-q1o$01pfo>zO)n*Zszm0{iuS) z$?p1GO+g5LzHz)(qW;n6V?@_p@5hB+T*R9vFHJt9hNjA_HsJyAuIh<&`UTd| ziMgwglUw$(v~OE|fXkrkr?1vc%8F?(1Ps0fBOJf}zp_VSn;X%8bFtG-eSo2til-k& z>Gw(9enmaeIkCLqNcnw!d2{4|ywW$W(~BbAD6gF!wg?LgF%pQ|*3Bp!w9}B6elL>X z)}x0Jq_jvJAPl^n&puEx$$iV}zN$>)L79u(t(UG2MYZ@YU=!x}M@-~UhiIp4EGA1z zw`EN>h0lkI*Iz2B_!vW4M@>8XR+i=dn;zKPrF754H`u1g%-^EyN@9QLbWUu?dC;g8 zI;#9^Q5`|^Y971&ilY2hCjyjBKd*h9sj6l_cs;aCbW`;=ZzL}RY_}*nrUwVZ0_7E; zDCSFZVdmXhzs;~IU(MBtoSP{sZ4sZS56~I4QSe^3IG<$^3hykOTvr0YwI!|=bZO84 zX*2}glfh2)U7z$VM!Q2U(FdC~n#88+czo{h23yQ?dw9Q>_u z+nzw759~UpE<{YON<09pwMtlhMN6ocIJx??#UsUPGtCTD3x^Ajy+?N+BT}=!SS#bf z;YK0>2xSp~Uw8q1k44QmmSB}czCH!yKsqli+`F>Z8u=TV%^v~4kfmmu+tG)^QezV? z1}q>U9YASN#Mgon{1W!YysW#VaUkWaRgIr<@24C?H9GD*?zv2-xXoEMnSEY23^GO8 znYJK6`!ASK=wkXU))Y9M@Yk@@xE@%oX5R;@?@P#ks$br5PdKsoPvF}66I_1Sx6Z@g zwOuOSce7zYTBsuw48DTJ4JP~oW>Ccc0oUi&jUJq&g>G?-PR<_N4E8$ra|?p78#Ob< z{H!|r^hmS2xIy@u959H@XyElkC1bh|9c6;<;eyisyWG8L$s+vN&gyxNw|4h=#ffO7!i||)I z!Gf;K8kW*|uoBy1ecQ7kw$0`G<6yC#mgwaq6hcrY`D4W|ZNcHo7rFoRV0!U2cIzTw zZmO^yXSd-~!L;_=Ef?ksjNEMAtiJOVbn4*RA{VitPjaJ`YOx3Pw{r)2ZeY0O9b{Ai2dzQ#pMMi0P!y- zaVh4bA#M%Ox{qtTC?ou8qx7~VNJXePTq_ps~kG4&k*BS z)MSWYX*pw{qzR*3=582P9@$oS0~y(m$<#PHX+Q7sTxrwy+v&)*HSv7HHO~F(8$ykP zKKnZ92nUc+45oQQ`t-vLsR~GhB0RA?*ku^lGuzYR6gp}6=(P>dqxDq}4pkd9@kt;0 zv?AGF{z?^G6$<#Ws&qv(xJsICGCkzE5MfB(B+p4Fua=&($N<{6iCe*8YFm0f>5!km zTy#+c&;Kke_qUD6|HU7f@>GT(sI1NHo4`neY?ocU90mqAfPxca3jx9=y7adKl#@RuojoWNsJ7lhS(rtWR2v{W_T7IQP@ZUBcmp zSK3U!cSWmLy`P|*(slRw@6CuW+zgp6untQa{glW*SuoTQ(xodo6lI6|prG5;MJi&9E zTQ5gnpAfeB{z|&g(B$!t6KBOPP$nfx{YBW~t(MNqOl|TP`|4qI(vx&^LcM-ziCNLrNWc2rG&N6(F@a z^ZEefYNoze^_!?ye~hr#AQr@Jh+gTK~}=-WBW zn#ETZwGfbfcy`v0l92b&l@z8dT6b!YzJZ`Nv=yp{O0MWY*&@9Am!OShn~I{N-F-ni0D#b$e4kb zmQD4V_p-M2llI>ZtW;wK!OPqOslE)d~9m`2+Xzj7IV=Wcq!350t`O*GwE4ZSGY; zktR{)EAuRVU}mDCQEn_=cKT7MfPCl!mD`?b*L{>>7Y^jrOvk@H8d#`)%X!?{?=2j4 z&vVIWsyWBA$?Dl)Z#&lPzd8*5kKeJAKz%@^9dOl0@zuU*SYM==d>d{tg5%<_@+?E1 zHoG7oc=CwzafQdG3h9X66^3Lnb<&k$`ma>L5)`*Q=|=IW(xrzVMTGzZT&;Wjz^3uL zS9OBC1WI48`rn^H3sN^xH`>geBu;j>Pt7N~K}r79(mx!p zj-0y7&WgVLKU68j7+ctmkHCTb0FG1yLj>p+0F8-l1wX1L86>E~yZrb9^XJ4(b!9E= zu83Fki^-rn!(O%bE+f9{UmCl|gxTR7$BR=ZMke%f{oY(l?~~f|<=2|&m5V%zkJ*46 z)hJrI9z^V$UL?YIrZjgrnJx2gkNW3f>4~0ID7myfF*GKO&XoXi~zv29RW)VfW|5M zS1DhziqJ9>0Pp&?mYCPCp_1e_Yq(^QOLTEps2j9q7j7{R@~~1Y#{&}#B&JAD@uFDL z2V5mr{Th|xaA{ZvC;B5_p>kra3~i>~t><51X)MB_R9ZXseqClwLg>H)VQwAve_22`x~Ort*Hh#P-h1%XKH2|m2_I_c>C z^vHF^#huDN>995s&v4FzUsD92VRgtfj6TR1{2n5>!liBh)gX^H367XYj!k=rbAJqP z)rIQQGNwE%Zf_kK#8EKQ_EwG_33BO z>J-PE1wmn#RlHvdWv(GxTzSOW5m_a+qPd)0?`?B9v+e>lQ|OME=QH#YX3)xcxKhkK`Xp_WqD8N-z9l3QxyE za;1Ii@f1OjXQw-{rg2;>O?OoIS&D6bqbxq|)o7`Y)1>2<2`gP}?fJ5g!^#)UD+)q% zX&Y0s0hcV)w-Bh%f#HpvnC!sw zgD~MuZAcuSUhy)~ZTWbQH(N_=T?`Lz&Kx^bN&)QQQu+k!mTu1S1!M(LFJxQh?Tonl`n@88%|a@dF$<-MjWA;#rL-qBmnR=K55fP znmy>houRoGZ3PXzQb{omp=vsfc(pcsy%*v5%iJq4u$bU_e4hV(R^9>QH&;51U1t7b z_Bx#4q$xx)zTTH1CO=PWhTvo*|4jrx!%Z(fcrn80Qe#W}rLeU^@tqN8qTAHxz@r%- zkIcah$?7}m%A4;)aU(;JDyZmnk^9y|s5T3*^%OcUs@a+%(Tj*mkuf~BS}~xU67D7Q zLEdfV;Ol&=7?;QU&5re_4p&@_zbM!TgB0u-;$4g2I#G7gNr${!nxx9=H@0CnCyk&hi)n|{EBpn1|fdH3} zW>#Xo*AWX6YaLWOC+O5-Z3D2_*3`Y}p&>WWQ+ziF%QglIPl*>EeWcq229{TTIb&o@Cs?n)`*VouL&a39Bt>2m zII|hBABcd%;SOTR?@%bM7y3axybpbpytRN3`tC^ZDY*zGEgK~iR*o%aACH?0xR>K) zxkt@jsQKm>?w|ARfO~;ui0#nKTL3~=kamkY)cFJor)HBk)(>e+lp=o_+i|6Iem;))lTu;bUGsFSVZswjv@UHG_wSA$ZZU^5N9=%M*fG5#}(S7_p0=mZ$1InEDBEXe*(vssCYsDQV9E;dbvTA_FksWswOF5BoqChQ zHx8ojj_S-hYRdTU>X<^Y}R4UO@1+#UN*<^%!F0;O4UZC;r%c@^OX`n^s^gLr;$hpvR_svS}5Ohl(d*Y@1&s)7=LWH z6sGj9;~AH2xZ&LVk)rOGx3iw5O9&mg)(&gH)gacTDGs!AkZuX^UZCC% z;lHq;lyTBzJV>_0z}enf;Lmc(-f`oKA(_d|W=bDR;4(A!1{i)L+U6_kFgm;fs4m@z z?=%dD-1Dq`VpdDkZ6~G|M|)qdJlm{lObD*I)4*zI2`0hzFzhI`lMmaJzSkX+AJ(jk zD7G70f8AW0HyDy-+^_7ELhDajZM-09Ci4&x1K$rPC3=7^4-BzBGGaZ4T+YcbY=KUZ zar^vsHIf&KMz3dt+nyt?wS^2iTVJcY#JySAoqm2qZ=vz*1vdCy7EmOjSoY4x5+r)C zafVOFS`5gu8pJCjwfi^@=YQN4%b&X^e8gv7`or50PT$9ZVhlS58+fWA%rv5kJ06Zi zOsCP-sB3d70Cuk38xC5c`{zv5-QS!s*8yeE`_&~EyF>$}ZNuKUpLtZ`a{HhRdlHLm z3}0Y!bh`5A#G+NbkZ=4R+!7*8PZQ2mbRap837>qO$aW^>04TZwC2ck?Kw_o1VIjTd z#TzDcE-p*UJ+Ez{O58-L+GnvetPz_g2@I_XsnTdya?X}7| z2|Lb4jc}uZDp^$RO&rA%7Ksoy4yRi|b_@B+bSyACa-S92> zYM-VXHNMU11!-T{dW9xGef;CZ5dOg8t)!p9L247pbZ!Vidkn;?aM73(!{skRK8IQw z-W#4L`sGNJ(Nc^9?%5ltKXRG7mVWhdQI-eZ1+Xe$6bsEXH~f#O^<4WNz|&s++P+!sxotRco~^(@z`PoP>m5 z?2IW)FQOI)xX6H^D~At2rUIq2#1%?f(t6wRoA=Xyic`%_lt?Hq_}JzM4IVnUU!>k7 z#_2c6iE?#-JEpOS@Twt(Pyx-GS;%6CSgT$62GkaotZnd>B*GULO|IpKr)GPhl_;wm z^v^2Uga!o;j-+oa9$CGJ>30LmYX3~%;LL3Eu*t|FTDi= zI%Rx{ZbrlmVg*`wUK6ec%$-QHXqc##b-2=8UDx7v(|$zqP2%ONuOs8wj2oYQ3tl>l z`3i;s(yXK^gd{u={%Bqd>zPK7?$g7IhiI~ODOWCFWeLu*$5&ky^#gnrW(YNoYpJED zs{5LRw3WT{)B#(r$`qvC-aDG%M;!MM>K5uNwA7nMJ^v+ZsrBr*QNpVuy2-ixggW8*kp{`*|{GC0Jafh7>;&LScxwx8|D zp^+t+$wg!|QZuncSoc%)kM~c<11G^IvqaP!dz2jzV!>JQQt*ePfkL)!w(|ZGc21cn z!PERE|03%HU_&l=2WFtvWSbPOCkF#s%1NzJiW8z`22Qx1#b0kVP{h>`)-?LqzGSi; z=fcqcQ|Few`Uwn+mymhaBukxo<(uLCoFd|2XjteS(3>KPQ`<%@wfop>`Rp4iYv$P>Ac?BU$h$ z`v&agtGVF#a&Xv ztQ0r~>yrzE9zO3MF^Y_6q!^L@DkIhdT`3F)LuB|RNSwqPt9ps!-Z;DMSG;)Uxb_8? z*)JZ}GCAqfF9vQX?w0;vX66uIUkAe{M&?QsY0B|mVQ^Qp=fxRWT_cNUhGo3Tyg1!fZh!EUt z-Yu)P-tX+w>Q@qs3q03%W3cF!^)bPdzkFWg?0zaIgQTM!m_G}^G6_u$#FA>^lnYyW zu~don;I0o)8PVoq_4zvIqot+=@wu~?)z^H^ectQsxoM@y><5=AfUXxTkV|xf`&b#H z-w@A1=?k$*gT02L>Y(7fb6zkf^md62Gh ze3lYxV!o=pBQc){JeckwUdYvXQ|c7LxvJ`p<)yl?^y>9TU%9_Nar#!A2HGOyDORxf zL)TGhv`EVH42zwN-XG<8X=eTzE0ppibOczQ2(XvLT0~@U(;o0~hK-JQw0PFbpNh{P zoEmea%IwDXuwkuqoT$x<8_+2vi)Xx`M-eMvBE>j8dHcX){-w4YPmbLKt8cFkL~k50 z8K;&fz)3r$4len&MhkXpLu#8VJNE8SR?&5%+fn>#LeROJB7UmX^tIRcA#QtM6G4bd zD)G>o!5n0Lmby}-jr|aItM)@Doa9e=#rQC!3)H@nSps_0O3wvYtm~mP>E8;bxVtON z58!8X-(T`gPaw;* z+w4x_R^dRZS(b;YSPc~~22P}U|5V?oX_oOFrEAZy19i{$H2XEwIOBBxI|k|_7LvgN z^u)z;s*I9y6Ohj8z@l%gYl7SO14U4x<&o~k8MrjF^sayi<42kVH(}Ki7 zF7TTC(g^%USW|vwIF8QL348N)sPda5H9YPI!?}S`22_8Cw$TlZb=3(&q4M3)9}IXw zm(G}R6AkpyuUpJ|a)k1bESf&0(8v5u#(%WMlM*}($Cl|%32i#FSI;pTSzodSfHi8R z*^yN}h@gw&!pe#Z^x~b}YSn-Ud}nOva>bT0A>ZIm_xqpUdl*e%I8h4Mo5sXiqtnzq3)ljfG;nO?$&&fgqU3c{ux->DttH&uvvKvfSni} z*XNXP#h5 zy90g{fEYT$bu@_}2yUgSba|YFEU9O|;S+vGh&a|{5WWx{UN~JD{Uu{n(Ipxj*j5=P zDXe$-(USai0C~Fwx(@=`+%;Y)1Oe6&x%jv#su8x+udrFZ;0lA}`_?YOG;$eFS+tk< z5q4KCY=1MpWbJAc$#q0xubR2Q2jtG@g_yB%{C1w{U{B z-kriI(7l2u;Q>Ona20Td-6$Esj92AkYb*)4EEVKOvMN{L?q}^N?<^TYLo_LF?z7_p z@Sh6$ki%{X2uTxD`r+BdtUdTY3UpsPNE7Hqz1zin%@=2QKgXU7=)>LwD2|f|7Ei>2 z-Skc`O9hgnmUjUVURGQz)+GLo#a*xPRtxnRLT9OV;kB0Lv+j}Gx{L^H*YkhaJPX=~ zYeDWXND|*!0GQE&Q+KG$%=c;Zgh7^QgrgZ~R3!Zvt8T~O!fslQ%~Sj>*R8=zrZXR7 zU*3-+lCRO^L8t|%! zfEKcW9b*mW??LSTC?Q$`x$KJa1wA`R!E()9h0n93nBJ6(y{wDVX9yu_DmlL_$}L&H zUg9T2EXTd=cP+sA=8uE^8)t7G4`ulOjVd8Yma@kvMG}&&Y*R@{D6+>SNr`F*itF7xlIR4ert_k=Gl7yNHTC?N#C64aN^F3(>=!Z zl0|=6h(l12GFYFZXeSp~HMT`|2}wSB= zPc|pJ(Gxhb=@xPTJ)I6OQY*SVprc9?eY`bMxCG1_RWrde!Adn6G7xnEWXpdNm*(7k zcmO-@FLy?_CvG|nR&oqyV|O^T*|k~k#nN`U_a&f zF`JPA@d&V=1~ZIk17xqe?Nr4m7586T8;Yda9_HqW!pkMNAA|Qdw{|m9;dNr{un&bB zzW>S~$NY@pv)mL#^NKCJi`ZY5#S2;Y|E<&|ipBRdpD0S=8oPs)#^aA16~8K)cW;q5 zHFIeT&HQ*iB>fpW({#Ha9oI2o0xonPnnH0l(vbA)Zi$*Y3rT_B#+)F=T^;x#(qjmS zbNUB~3PHTM@%9#J;aD?1qA7p+Ul#n=>rtDk3a{uA?_okTx9sL9Ot&El*rf&VrnM^Q zXgJvm+Z?SPrlW~>YlBxSsM=|+5=ZDJe?Z){zfgAoH=TnZKlcsE#OL`%;_WRU5pO(K z=Minvi3sfPq~au}kuL;kUGteXbFk3}I5qTFX937_+?R1&0J#5CwBu)K88hW?@~>n4 zv6r89gNv5d+}@QB5Cq{Uha#qXz-<(2&+{;x zZ*ByXf`@7(O=*(5vXWYR#u6Tk?HiwHF&m3pzGfx*Ad{(%$q)7? z??5~*%#dvZD@YLAgppW`FTbGDv)XOEPbXh~F6v(337sLCLe5;d)|3$6IFX<{v_M&pq+lg27S5*Zz@XgqXD5@e2r{aS<1j*AhTg*H6S$mV7H4!`mP-~Z>LRa;I zuYWK2@^)4NGz?4ss{q`8`L_UI>B`^!o@n*sd+)ETQ!x$B&mF@oIe`A6 zffwG#J&Vn3pTUhzH2Z==l+J0!5oUUjeqNA0xxHo5-Ez?LdypCBccKO=hnHKTr_Avnp zYTk3s<&c|A`=EFgt9|4`0=5|syfOC{JPO>lEHF5*+2K@i_^pBGT>5zK*3xAOb@MeI z$?4pmlDvApTmKU{4(Pz%^^Ji}2El1A85I#Isg-JU_}2Ds&*ySE`6q-|K1w?~=#=d) zQ4|3M6WEzwXxDpSF(^1q{z3GEti)VyI#3??^~-P%P#Vqwi~s}d#?T`8bQG3H>g^*) zbBJO0luxobebKk&6WYh9rR>$h_)V(?M5`iAyM`*)m*Gf8Oz*@5=}ImEgjs6l3HbUI zwe-;)IjrMl*m$b|6fBBoNcht&7*{%>9$bk)x!qcD_U-zizbsqvzX#x9a-C&p+9gne zrcl^Q$Sc#R@bo<9;(_vo0s6`R zL}7XVqA+0$Z}C}##04zGA+)Q8&Ur2X-4P96tpWK%CGEv)9U0V6y=2wv4ry5}dxB9> z)$##PK91hwR%y%N)WVHxwi_=nOlX_IXxZ71_iO7HR$9-m!NsU$!}N2pNCE19#u!4L*pOIb(O&ZZR4FjPJ0&u^0oZIW$EGXPVLO? z^MxD4e@B;~bq9uok4%2uQ_#ioanG|VzB*#SrWw4y!~%Rg8r7h2LM|`pF)GA=Q zu{E_<909B~kFo9npAJKms9X-tYeAhzfyV6$H$tlAz*M2kY78%^%nxemDl+SuaGi(H zmT}GIOB~I4uEu{^CUEm;)Ci1hi>94Q{g#+SjirIHhfsrT8vi3l{m^ZP?s_t zEmz!bZ{ZI{5?QxG%7Bjyxd64?-k{nJ{AH=j!7d4BZ`&O2-_v`tH}>BmB1e1)Sbue3 z@hz0_)wyX?!5}|c&kXvo-^8t6m0iY_%(9NRW6Ma-unpec?-88|>xXoY7jQNb>aqYK z6%S47#wXY~PZQV-e_+fEM9VkenZW_)*x163T%LGPHJtpUXJ`Eo zdhv!F4FxPpar9{=^mB+~;C2$@>~|2k6^^1zYrd{CyVnVsRRHNx1HZ1xig(9vIxOjry-&Q5 zgOL){=UW4QTz!QA8jl!=GndZdhn}f;1Ahby*CohAb<)CHzt>9+bm;|qiSGJnzG~rf zJ~C>;X?`YdkmQK?9SuCI9&R)lStD2{M6V73__*HW)_S5}%pg$d} z&zCY_p1kP4mJR6t3pe4tE5Ket0BaX$yGEk{-6#3@*iuD{C>o^5g$4kS>wzC_x4Sj2 z--=_uyS6m9QXL5UX^NPydJZ1(5;O61&sZ6bw-vjG^u+%@H17#K?kjxB)`D;_ADK$un5V-g+_9UOS5;jqHKg=e9A7SkcJCJN-!FLhpLX!0KL1Y1Lnr`ctY) zKEA`YuTIW4JxEjT%edFI8={9!7+PTPVjKAu=v)MyJddS$+m^p9dk5l%g&3E3+C^^? zHk)vw3M6L}aJ;y5M)S}q`2BFU+`Zc0dk&Y#Efp=VGz}7IRl)sw$f(4ku0-D$%UIEDi(O?!sol&01Ny@k&64`CEk`i(Et~&8;Yo zCLtRrLe-9jM`>NMnrUrcc)pv7&dnEq9t5~Bj%h+i@@Y?t;WQI-MA@q+CAKt&6~7&1 zNq!^4whn#+Snvz*j%IP;SFGA#M@kXA`miZ~75-m%vYgyIM2DOGB_Hiy>SD7)xYto% zfo=uuFUvZ)}>z z6OIh3MzV)z{XW2NsdzX|@#hmIxbfTMK zG%b)kh!Ar#Dk#CYSS>*l){eYKy*;97v*KzS+nu5KdosONi~7w#b_)X??4TGsL5;Na zbnDW}@#I>o04J|yMad{wtgJW(Nl#!C-GrVHqZ-mE=X~&NIx;l#)`YH@q9u*i_p@W# z`yxeb8mVV_%7Rb)FJb%sg9G$yZ%2-+PxGwerr*=z==!vgYh>w{r#o%Feknv;$)2eD z75V$^siY&38^sw#dR+pR7n|a>({LCtPK|_aCy-a5&fsw9PM8UlMJUj zbWTFu^=iEg|1Y}FsL$|=>b;0Y@y@d~x>5~m{1#T%&y3#to-HhibuT0${%7vY1DLS#NZeMV_X`S9BQ z*98OCguBQF+;}L}otdK&i04IhV|jzrDf`=v;~ZVA@Sa=nz6DYX%`~t)H`0!QXtkruKr%dh6M|3|Tg7zQLz) z-GRpckI4G}?v0@r&~Lz--n2emGL3u<`hd8Qq@MrGmGCWVc(fFCc&;D*NT=NRw%B>a z^1$=s54r(%({QGSFc1&8fvsy~d^%A>3QhMh+ zFBHU{@6>;Cr7`axx_AoJxc4;|R3rb15j3_*oMpbzdf3(jG?aHU%KKd2lEO4SZdfH< zsX8!y)Wd7uHQ~qj=Z_EOZp$%wS9TABwXa3rlXoX#SN%UjlQ7Qd=M>*Yu0owXqu{2Q zEzK%kqq73L6*hMxvc>mbb9sicWrA{h2q;@OUIy_AO;(?A6xV<))&2v%0_QcsWt2E7=&5UF}^9&{bSFAPU^v}})GI?(_*?9fsZRj_?Aq0@+8rJim8_xmafmC;D zA}v!8+C}#U!PCE|%a(aq-f?0pRulfl{nc>x(A3=G zWa{79O)tDb8W&W2ZuymH`H<2)`Do+>tG}g?^MOP&l5<6>1q=JdYuLqH2Sdujv{e&! z+^W6Ak=zO#! z>H)H81X#PY-VSGSmzj`nd7_XI|DQ?gDf_#IPlMgP2aJ@zlm8ejFWbME{&L0wvlNU@ zsaV~im=loD4noq0}6fbxwd{y6hjZJZ&S__o5?yVcKS=qy5q2g0OR<(MoKt zC&yW zS*GI>VdEI+2srXXEE$KB7xZbG3Uw5h+u>)k^-A20_=(YC`MN*veHF^Q8|UMWH` z8R4OM4sJ^Jc80;gBCn6{8WF)^K1LTpb+*nhGoZ6H^G2&PY4=Kpw%zUAD;#h&W&Q^? zo{sT?-KPdSPP2o!X6O$c=nBzqVtStZG6Wh_Q3R9 z*;qgBq3Mh2^@ucNylk!RH@l^1;aHxr@9Bd#K^^hnN6u{lvP%ej`Xly-(2H^%?6i^5 z($UQ1=mZD;V1tii=)+Fq^$lcXb745{$Y;7@Gl|2bH>2&)(s0rEY~b$MDu%no>tLJR zHs`Gm4ItRR(|iL#aDyi)ZR`LouX#`s!L*xofceY%m>IE4RTY)?{Cf{a10E91D(RY7 z>MJ+xyak}`292W0-a_p~loi}D8FFw|`;?KaO5ORd!4jQq4+BN)q(lm#)f)GYCqkVMJ+cexY4gDtT&oysdhFTHf50 z(cydO;%ua&DPX?l4V)f|RAfj`X%0VE6E}WeR~MhKl|a&mkrmr;K2=$H)_yP%45j9c z`_gu-1#`3&rG_J*%=~~hVIN)KQKLzQQ=Z%Rx;12~*EVlNYhYnG5RSqYErb;f!B{py&J|z52;lt9>(xR6hk53q!L{!J+=)v@a1zc7PBQxVR{XTiP^(2@6 z!KNTLkCYzL=pVSGd&mMu^moQTp7F zkzL0cX=`xh)288g$fM5+>`4j3FWXh#HSpAlU&_!j>t~mdiPY$Q^i@SsTu(+iF(Lm1 z7wK}FtIoUh7l)H?`ebAsbj}-uF5VWUM<$JC+;JAzFFyQ zv>CS z!iaYVk7;RE8G(m(D3f~^N4$%F4bpjZh5qX5JdtY1{IadcUCsaVaiOXyPkBJvV;Ss$ zk@e@NjhY*`bn;kbUKmJBfNo;R%gyE(M{|f$?X-gr(NK-zntyh0%Ht9;GSLt(|hZrwW*4Q}%@y5c+ z_M82f{DaZ2B-iZgJbZqc`mbtF69ZwZJz}nw0o?@$HwsC_Dh!tDYzfN>iXozDGVoOkBAN7^0Uz*4-Kz`# z_yEmQQoL{15_xb>dHn)D^Y;-;>)LY}kYLts4)zazl@9jddEk75pdod^ao<%sC8676xHLSTU%bry3EdT4kfdAa7p`UX#{#l|^MjfRx&#{$Z-fb(Iz3#g{A71L;b0k; zGnKC>4Ds6wx&*QQqp6-Q#r{awhXJyr%WmQRV}@Ey6i4< z)&0w!+HIoCk^8H4I4RPp5h59q*(VB7e+!I9y`nEf%*A+5L3~@RP8ra66ZdkNVKGDZ)d#q>E?54+_mSrKAlmdlSPLIaS!XT+xk_H z*ihOdf}R1AJP-YrTet|9NS}TJPPWA8$3}XTb4+>9AE|X`g|TM`Sr-q109Ok^JB1;2 zQHVg!hgk(`k$^b#Sn|Aqj>w7?E( z$gkZh(SJ-TM*rC_HuQO1xfMek55DMcW@>?=mMZy(wjEKS>ane=hDLDsjSB_WxtZf( z4}P|PWUKA;ldC-5%du^9cO+WNWg4&&)Qinq>n4h7*p(Fc&v4ZW?ctz$T8Wha9#)D3ggF8-g<*W4t4Zp^)ZF^Wfc|SrQetPxk zkS6vGpw^L=A*Ir-X<?qbRCf+%}UFc0@{9@h0&UI4ID%$^F`~5g5;~#j+Or1+>wxxA_rKM5-y!BwnA0O{^ zn($~%*Di4xG{!i{QMf>m#E|HrYb*|=!l>pnc>+x6Pn8lNr&ILU@~imt`q50e{Z*Sg zpMt(SI<2~IB+UR;K^dwHEg-HVXVDiWU1@T2yl%h{`~B9z`|U&UqX<6+oTg32&Rml) zZB{(I6u=>@=;(U*NON}g$umAgddqOvp}mp~E&P&P;y`udt#|m8+X%WBgXe=5s9SS@ zw7IbfUFtbU6Czg_l21pEZ9ELWmg1W@$%D=NO}WHMi#vAw30qJ*p+^Ya_!Ch&j-}aR zOWE^gj9fRX<>XyU-;W05Vy?d7I(*!*BrRN(&Jk~7Js)2N0uQ*_uljhAZoBW=7q0}U zf#?m{!1zyp^dv2z-FCtcM-8P_1s5zQaj5g_J>LAaFtxVk>$AR-UijsoCf9*G%-B+v zY`cWJ!=vb)AwnC^4Dy)8-cqt{->t~SAPGlsKJed1Y28htdBB*brDh7b(2c5S$Mu6G zcX)WB&@)8}UTeo@?|c6RG_h+X|)(l6BL>3I|3Or@}A-~8lXe5QP& zIMfk(_+6*C$zOAc;&xWbsOXx-|hyNDvAfPeF7a-K6lK$htA~RKa=xGRzCiS+@ZI0RQ)N}bPm!238M(BYE2K4;>pBe!wLEi|ITmgi!au+ zUlx`Gj1YlCa)CIP$X8dWs9}_1kZ><4NREJHpD53X$v#saNtnVfq@KFA>-*g?GiH!| z>WfzxQE>(hG?iLVykHMLUBs6nBUKy;9$p4R15G=Wx{h+?pK(b>2{K8tQ+@u}N_g-kFn$mmH z{XlCh_hzJp?11-xSJKJS^nUC{dFCL9>-N-JMF==9;K@QN0p>pbE)73j*chX0N4CTq zhAorwOd%qP@GrUu?1P6RwRZ?7+MmBW{ zs49nOTvSnJc^PuT9OAEg*3~3)xmShj1Ia_tVY4T&1LozSL!z-4i$&NzK{`e{_ZMp@!V_fWvi4779}%U7(06-&Ec;K)$gk9&6J z`;-LF7plK+>~W*;DVzrBOe>^jN`b5W^KNX9z!40Bw1=zcCOt1Cg}?L9-0L;$*JZXr zpGoK0yRe*Hj2Gf=&;{^o-EddjtLje@qD)Y>aS_h=ILfJ@5}490_~6P=VwYLuO1I~y z;PexCsjNVxfc~8Um2NMS1cMQILS5-Q$LUJFMCrdQ{>KI25#Q%t+l%b%BKnz;Es;Zr=2$yz44sl;aBL{ zpz8St_G5wM`v8f42!AsrO6fXv-^ODr=Ef9Z`Iz=$Y9|x~HryE0RxdP2`eeM{A*bqEoXMS5 zEq2Z?;{i(i=eNH*hsBRFHKq4-XRX6B-k4=FhDPvWxQ4$hR&H{PN=O3L4_w^SomI}c zE^-aQVyC5>y*L~%DJ1)AL`MCR`0#S)<<3(?J_5|qN%%Wvy4_}Ift7q4Bw9}{ z_~F^4S!dn^ImRGxA2XK^SMah)B$+GqObRcET*>blDRwyBwJE$g?S35`TnG{YhFvUc@v>2 z9Fj)yS6A-i&g0W_kgK!^o;*XJk?s5{R2a*^1Iq-z5O2h|#_T*e@ zgvEika$Brz2(mAd!G~*_l22&1wX7gudF}L1VCbeMy>o)m9F#NF@l(@<*=x^&l@JRh z;7o2~zJz-qXlHjY^62~c7s;<%3qw?*s1%6`FaJ{+*ZQXBFOnRfM+0;5Yt~<^KS%Ri zFwT@UXeq!=oCdSjdUOtSQ*uIcH5!!s#L!LtRHqWVx1(s6;Jo8X1A2uYXI&s0X2~zW zn)USsJQFhtPAv~QFNn-*<)0IfV_fZtD47J~Yr(q458?;Rig_B;1+Costk5o>x|+!45)QLI)0qLl4WAN?YMm!;<Z_T7yJoVKV~2 zrl(rmv^sTJgpHjZ&ctxZ2MCSe_;W@j~mGZJc8+HvIQm* zSBh*mjR0wwFR3^5+_)_ZiHZBR<;-F+|G1 zGJPBfl3|ACEvIP@K4;hC&aE$WKi){$yr3^0@nXOKzKkI64p7VP84dqGlZSR-z_0v@ z6+wZe1k+OET5EGbDC){k{+deNRgZ5rudD6bMei2poa^%3cz?@~CLozx@Ng-Aj3#kWq|c-1U%=at?-Ewh|7k6d07pW zQSG@J$CXyn;+ZW0&#JieHgw54Ev#>T(D#j5$~2Me=+83vfo5k=vQV}VAs zwamBml4E|!J2xXtgR?Y@opZV^g=IDl+j|xQ2=nQ8c(TXT zd$77+z+;8Eqs!ErK*=Y8meMc+3D@oE?fcIyCqCoTd}wh(>7sIpc(tCX7P*~&KmMN_^mCf6}OJuvrV%!mCJ zhL4ZmVpE(jf)44?`OsiYL>18Dj?)PeDKk73a;mBC(wu*1_y|ds)x~@>dVX@h+DAd{ zNQ1-8sda-hW1{Zjh&YdVJKOB+s-OpegR}RFal*0knj$(5K zIVNT&_QbGOIcGt6A?qaPp&6Sm^poT7xqtT^N*H*uav4?&|AAy2^QFrCduM)T#8zA} zPm8!_>(c#WQ)BJ(c5Z?x)nZX%?;__<%e?l%IPqRBxnDm~NGSwJ6@F+0Pxizx1k~ge z8zoLWq;P~fmk)V+I2?Z{N>&EHgZgm3A%tfJs&d@ z=K?zb^+N*PY46&%&4yWX-00Go+ z(F4!!pCL(C+PRxW&fS)-4=(v*&>r?AtG2RNak~dhxgY+<#E>A?Q5$Fg#usGgOOaUa zJ5^sz-lAIWS!4}JO3V3qKK`2ci1_SI$Ek?^mzC$PO|t~FGmh)iJ)ur?ML2`ks1+-O zdr&C01gZ3nw4}izeM=u2McEh=;shqYcZs)_zIw+0s4rsog=8o~5ZxGq-Z=N~IN#o$ zQDm}O7tW)hLlj{2{>*%UBu>*vX-qpYte~Y!u4q4ZVxU$(`;EuW zzuI})I@6V}d^b*0AQ(Hq!4Mt+DDifZ;$Ieo13zGgR0WOkogC1*mfOsrP64wh0s8?! zt=+}3Bxj0@DH$q!DwN6|wZ9AftuuVBsJO)N;We+Ojwxlw{Q6BXr~y?~?0*XMY5w+)LfXcwwrLJNWe*2-;~J zNgJFVv&@1Z<3d_P=@AZsK;2;2z=bV`yi2Rsmv&jTek^pdXmN5k*E8 ztY+}rFMUYtF|RP$=l@utUo1{o2AM3SlsJ8(Vwh?^Lju>Oe~INXGjO8-Q=a)4DDW;U@{jBX>&T@(aHhw-iO+y*4lZ=R>5@Z!g3GC^vCrqB|`Z z;I%wxHU!~PeaU6mqS${FKg@9VaHBQT^Wl^MY78wZdr=e?`8WgDOO#U?+52?#dT|@I zoKI@fAj{rY{hi>21_N`|U5*(=nkMKb;Ry(V?C|II?ya^0c|WB9*GzAg`(++Vocrn4 zHay)4>jvqg{}HKQLrjQ4Tp;78x%G^9wb`bHlG{yU@zQ%%x>%VMj z7N{$AofGNYRl0aKu9IM4+HXpeOJay>lEbM^%y=y$K%&=Ff1aXHh@LhQQT0GvrO1s> zmQAI)yIP5#|KnAzKwuN2catY>-X7U=*heTk+>shANW36TeG&^pL+Edl5#E&2S` z1sk`32P;pJXBQ5$%3k!oXVF)**pSE4@N!uddW0uG3@3FH)s!2$Boe0b>&s75-$Y0~&M z__)=~YMN<8kn+sf#Eq0L{s_gjj~?<5m5RMgHMLs z2Q<=Jd&mY4^H$gqt*dcy?x+gf`0&@8_ry}{e$-#Kpr z`hovn7EingZQT_8s@HB}l80|;>S310IbSPISDyHCLzVcFy9Pf5uAR$&+u-pS{SnC! z2RRwrF=iZkIul+7+Fg+#>3TCuF$w?Tcg@weg5TO<39`petYvSs`1UrC=vBOH7IMFp z-|C^CVM}@#yd_OQ+Mdq_J`Hffr3+C7s(D;_Q7I{P$mV7F(~;MQV+$X9==GZjIh}Kt z^gdBM?IelW1cjo|P!Q7K4xoW(YiUJDJs0B;r#*j_BsXmYU|+YbEDI}&-3CA zubhj%V~lh@W!lX8&PQUKc@z4HEtgqcq5Cj+inXWwtDfq9 zBJ$A`SLK(FBGzdB!yzKiGLJHJfw?H-rU%wXqo~8oP#qvVisq}Aq!DipGoEHhXZgE- z?f#mdX{HrhbN{Gym*lNu!3$2GHW6dM`y@b|bh0nRnLt0cgeXQgtdLCa^m%vH)Xdin zp9(YTSg3o~C1r3gQkZ;!?`qrmZ^{YrmoKePC_{%p{(WEzJRSOc5%ar_tgD2`kgd|m zM~e^YhirtM0smWvOw}FqOdRnJd};*|tBxQ8X#&0tyq*17S%$0D;ZgNB>HGF$%p~R> zw)dY5&ZCsf^e{x(=$6m`qRa||Qi1S@qxnJbFYXd22NnBDT{H?x@=on2N`A~rwdA^T z^1QRl<}AnDBdbuv_hdXnobEas1c|c4Jt=8nGwdZ$CK8m4jA8RkZ#`SwTIPx3$ej)^GM0dt~%}h$CVBF z6Zd7kaX4?lR_wln9#5iX(FEv%49*d>D`MQ-9&yA^x%oaT8g(yiw$0Nec3i;PBO}pO zWd1$xf$Lhc)BE*ln?YVQce);!vI4Y2LOP=4g3=@j1sc3y!cEjNS5)}R;CGid3X-n( zkF+|~mVD6n;`lJ3+aXAd#%|{_i_p&hk%F*5j@sR3Vs*|;E(=N=rNv|~B}dfuXTDLC z+?cNz^vHZYA8EZ=ufkn@L4@mw^4oooI}kj9F^-v2!6^~)7R(j$)Sp4Mr^zI=YDm1E z;mex(W9S%|z3YocR%5&P@VcxOYqPplcM325z9PUp4~BL?&(V~2%4M|#yw+iwHeDMa zc~W&^2zbs#zR~4Gqt7c=Cq;&BgmaRoIt{R49A{RwS9TTtcQ*}l1AI-u36TvL8A$nb z_8>)|)+ZHoh0^vhpGLfHd}Cw8IK|k`Ak$ymr$zcl;^oh0=)c=9&A)(GV8)J8TbQY6 zPke|VtQ4_YLf|7H54xHEvD5XZx<_5#%zvO{EY|3IOj$~fhj8VsuEL!V)A4MeJDP~Q z4-3~P;7InwRK(7cK8x6O`H$hOy*nwd?wNydefDsFUs%ZT_K;r!ujS$FecXafw>PDN1=XKj8_F~sm}WWrFX+2N^O|XK z5TdMQgV`SU^~3iU>_k6Vg1IFoabiNx(=k@<@q5E-s~Idcyw-nN1hHgG&@D=8#{e1< zQtM2xIp|77t8#kR{Go+5K<6qD#SU%5#jAtKDsYipX%-4?! zdw6K4c}PV-P4{2WhzUcN)ot;OFV{;C*lIf*ykbopIwu|9N$)ZW^ zp-%l9E1v#O?J>AMT&@z($7fav>=dGH4b3O*Ve-*Hxgk2 z3PI++oRSldswMJd;cMO6rGnj9*BsV8(R5slX#|2Cu+BI-aoy%5&4t|6zPFY8mV3+g zVJa8#i>2L4cH>So%x#ye1x*eVimJWV4QoV>eG5!n#62F)#LE1!v&M8uhq$N;_-jhh z7t?XSd+fl*EBM#MoYFeQIL`d!ZO4oEB56D<$#H_O8G9sen1Y=333_^wCo%;$bA&vc zGL2VWVAyatz!yZafe=@{U#x zClu3Ky*|HT=NT*T*4AW&rl~|&xawR$QUjc(P|?C-PEaS^E;Kw`TdLoh%ve?)^ba&s z+A@#Kx?zvOw$Q#b57wW10L#G5c`@&#_NywMIFymAweN1)7Yb`bfXVf|{82ln!W1ul z^x{o9M)o?;)xeeOM`KAaUfd&NvyJQYiMc_AlC;_`Z(L^%x;YkRvs>#-vAb0#vkv>Q z2EgY@+!ovZY4S+1%KNa*qiBPkzbtX^L8lg6S^H|%bay+5@s9?{X06ik?!%8A+ed<$ zYSsT8(cjRGNWtvsk3gG$YVesxWKeoC>qc}?+f%kcP_^fPK z3}hA(T};5Olu#{#l3XeBo|~{u{B0@t&ho{T3Q+kBF+eqxJMBPR$t$qUi93VW(d&~< zV3HK0R@kp8iD^EB=~rStz$_Edc!4!EmG&}~L zOAB}3h%X8qR;>iv>Q_$!7&uMyy}XNOgHIQue_YM()oBIVhj?W8iAHyRVq;1wQ6yyuWZHgam67iYDJhfWHjD4i&6bBFdTa z`en@cMtcepuNRe8yg=xAxhtQp`;~6=K2ljegH);KdSolg-O&*K?JdxFEfL4=-M85SNsU|a^qQN5uY`UiOCFwa*(1Px^1Qltg2CL`#3=S`Ey z&F$y3M@(V#=N1zX&pl?S{v^$g7)o0LeR5AZq8>91wO_J{CUcc_!W2}Jh|kXhT~2uu zj!+e)zbsGaXh>au2=)xtkL3ExaycG&wWM9x{X&LBBb^;q2TBC2AczKc^K=7lC18D( z-E>_?m^PocQVt zGLoU^twa5$wbzd6V-(KgMv!1sVqK9YrT8jH{wqzYDOo6{Rx@Ksu+L`9M*oYgK3iGs zA^tNNv0Mul*KP?|TxSv3)t6a)PCxz2*NEKS^ecEP3J$5-2qB7gR(<`Uni))37`iI^ z3JiJ?k~D4|235K;7n!5|=;<{qO-d*8axF#Z&8qTLF#8;cO;I=BFWc%f#BYOm zN^|fB6(41lj6yFEn*%8A%-y0yT#8r^O$M`S6qHSZaeF&rcNfHfw@ROMj{|W)hy$uo z9auSqJP)>v|I{)cda)+!%Z$yK>5_Z`vsyssh9X!+{cX%=@P@kZWZiZC^Y(CJYqUR+ zDW^Xo58ui#jjiix#{OseeIfEj!AJTvKLz;g4T5PsZ>q70&p0~qGZb!94= z-P9GkOZ?_01KZh{S&0}#Pe;RlaFb6?_1pzH6=l${oym;6fv||1G+GjWYvgyw;Rj|( z8vWTJ2G|*R$`B{on-*Ik(ou3h<+qMOtyQM7*K+y_7_@&z#*a(+K$$(R=0q2&~4 zeM~hc!RSo&Pd^GD0e;s+(S zq_WTnGK`D!!9p^PIDu`hlRE2rC+?#0HLvri2(l4uLb(w`#e@Swmt5DA%*g~d!OC`% z`_;%f!Yf#;~f@ib*O$COq{tWccTLXN^+HQ!?aj zM7Rjgr~KVfz-7HG#~5a4f_K>gJVY8K@Df1s!(W&7i1?!eNw#e7==HeK0dVdUD+_J= z;M1!x+BwA+6ae)}qCV{_^Z_eb;#&P1i~YJ=@N{pmQnh#Qu=-KwGF)iHtGd%dyUI-d zoWf~$U=V>aZ)&x=c-k;y1%#n$_0jE$7om7AulrtGKlesP;XLT+?LwLis#Ge904Vce zix;X`%06j4{Bp=8_o4fW>v{NC1lBEK?A73V!Ww2ZJ`xImfEp7J-MWTOFgA1*bvHEZ!pcS!*HK&E_?xW}DtH1rfMsNP>JO*HE z%?AwVxS}1>6<|IyRm>NVfU6pGtWTvTYSOOBHd@&@M+cLC*HpeGiA(-hiSa43?sIRR zawc$#6)QC5nD74c&wlV}V0^;3dq7OV5?D4cfkwk@4Q(hUw zgd($&OvtfOPg?Y~f4Pae*Sx-NrIr}E*iVuy(RvzYF1-!0>l}gF5aM9_d_nNdGOK`E z9&~!J*617Ugf%RuX;wl7i5R z^^%RZk(CUPhkQ_7veiS)1q=4|AI*Bj^%{RN;3&L8UPc~py`U6p$~)Q0O)Z>DV-=C+ zFdldxc13aF;zgy-Xt~YPpo9%8#gg=hY&38IHV#_dH(HYAyTmxbW&7)k9G4#0)iiYo zRT`u|yVzsP#F4{8wWP@O%b`ta2L|N5V=j!cMGpTy9d?Sfk9Qx9C{yP>^?u^8isizF zsX*|FlE-%58?!mh^&9l5sM*5#AZKzE#0h|(jAOCVQJ>EkRwP#=Uo>^prfB6i%F{vkyX)ZZ8ArOr%YaM(yH)V|b z#ql|7mZGl`9-8d(iKSO5`gYngS^woTEFz1p8fH7)(*8d`vqs+H3AnU!{meeizPMrB z1<+W9RiN_e=f)QC+^A!&lobhW%B_=~LhO3wF(0nQe@OZ$VaREnAjzNq>1F7KKQk3R zU7XR>PQWtc{V)+hSP;@Q>6|uGck-{;fO@Dfb(#;$efc(qmrZr@nAG}40ZXnO=i!Hn zd?v5X+Uus)bdIaVoAj9))xc_LpGW3hz_<~WGz17t#a(6!Tcd=xkP#Bcc9Hq-gzE=H zE8d^#;tJ}keHN3T|I^Z0YF`oV3k0KJUIp1vU(VQ@^_Z(g^P`JRk5AdDJ%Kr?^*?#S z^JOb9I>Ac6d!bf4s_Ut8)BOxrebl#}hgg^1r3)H;v=McV=pZRC0;iS~^4nNP-qo7S zVjH6R!OyIT5o+Y~S31Po^IiOU2>Q+I;g%iX?0nXIE%NTF`^txDp4cOJrKF4NkuFJx zRSgZVD)|P=dHQ7IBX!E&%=Ga4`L>DPwB32Np1-Xr0%lDA*xpFEMq3rqvD z4n5uy!yda!OSxgM*4<)0DfUzAL#E?fj5PCG%d=yjk`?%xhTeU;V^1C3GO3M}izH)P z3d0?Aev;L2+3p9fOP}mqmi-z1mX%LTi7TGnwBRgMK{Vy?yT37ELXpmQ&i>2XuppTaTrFHr`^UE($%Uc|wsBRe>@2T6Tix7j|s8^WU=VfDy^{#&_5YtSS;c46cRV!xB}*id%JQxm@V z;k{isnZkU`vNw1EuY(DosfQ9tX-j_Hub*XStGZQjiA``)>O$$qCJJI5a+B+J~EeoI5V zpj4F|Hjz2fA0xbJl)RGRV`;M1L51g>TX<LiVopc=KD#xlIBTI2UYuEXi+@#V}7R^hQ9*nkq^utY|9>FuC8 zQudS&B8DZBxQ06Ex4(0bZd^O!O)K!OjNSq0SEJLlMx{a@xtxz7?ldF5HzKpHRB?sgT zPc)hnCAoaTRf3J8^Zxr}6Q|$fwV53*^-n()~W1-BwLsf*|+S4j9s=w82i|XkbU3A7Lggt z7-r1(y#228-#ONp2NY1Jrvs6@O zX|-c+cgxaH-9^1XBsD$($IGv^r&SVO&ZT@$QekZFGt*n;VYh*onrg%=lL{8{8fv}V z64T%MzqQc9J?xofI*WguE*O(BdfO(J?abLmmkgHwF_*EJv--O2_<2hC*W2&abkvRV z);_Q5o=rHhXZ?*QPrCibLRG3c51;kG$gV5nn=5-VGjpH|`_q}yj;_3$ul$v_ExUi& ziPr9R#jTIM$jAiMz<4Ojzv}Wb4N(v4Wcu!FF6Q|C$q>coO0nomR8(6$2^U{^O|vSx zMRX0<>5mmxiOFY_&m2>pns`Bn_d7I$ z^&l++%dR~GkF&pLlCEUg1S z4LC-OBmVG!Iws@i{D3Z*^QIlqOH8KBvdJ1fZvMu-IZW84j|vE?o%FXoIliY;kNZ?9 ze_L>d1eaSOvRfognOxDQf%C>K+b(a9sx+GMNX*x+2Q8EPK}JP7E+q{KJ7z5*LX5NI zLhduVY7bafvIXvC7~xAXgHJIPODbbQI>igWr@G{zdOj>xedGHBJTH#<^mjxX;U*Pn zrvmOd=Lxu&)gTruWEI){aWiZ6Z)=HLtY@P9jCGIZ&uj+ZSmovwghzlQQhdF9+Yim4 zC^CN0^UB{**t4m#N+qZN_M4YVnD#sT6z_3y>$XkI|kA zW#G!Cz9^WtbIWTRDj8&P9s_rP$5#&kw;_1HF~=5Zh7C^M(rgQ^PPo4{8Kv=O=fPVO zAJ)vaEe}fB?&nT>uO@kgmn2K@0`GGqfQhU5BUkleAEP68wXsU3n1n`GLo3PdgWK|V zv-c86gM%NWv^Zt>sU;>)AiEo&B+LQ)=rj1rC(9nNe0b4}Z~^pwC|kFe>xtTn2~+aE zzY2j4&QcF#d)4~qUf&B7diJ92Xr%JtWbJ2Z+i}NHK=)hq^6!(vZx3RHUW@H;J-w_~ z&GsHM8YobQL5@)?cPP-imurszt}?^p`y`Ok|I-P;JcZ7qUY)< zyw-hJem)vp2EkXBn~gu|2L>2B#|9Xkm_Gw`01C1C$_GUT_xk*}N|`(XUmYG`gDbVyR>lb!Lm`2uw(3fE<_UE*_>a{oWwuz&yepa}gG(e3wQul7qF zC6~?3_Hd}UaotQ%RO5VW<5pt6Gg;L-Nc&8|n~41ZQY@a~Xz*hUthaioILICEO<}X7 zQ{oZx$Fk;Icapj^JFhGIH6^QWr-ipSoL=`N{&57u8Q$=fCBRdT1Teubi0_3tRzD>TFrNsY*J$UO7VB9AW`$Hl4xCuE*0vRUW0T9z|>d<#rTSJ?9ZA-=rbT_LMO(M@<%Udj-hg7an_@L_r^q#*-Dh9viF#Ihb8F=YGvJB2?xW=fgsoy zA6W$J^bF3m87fZ18|9B#cQ`iH9lbnDeMqC2ME_0QagS$RPChpwHy9Lhs6OD02~sov z!az0{i&kc2%wLZFsr@R7x{b9rt=$oj4)i1$=_eP+2e1MM8s7H$@BgECsSJa8V@Qv#lk2 zEhhHBYv_hE*;=KvX07*FO18Ha`EfzQky9ME?c0asBM+T5M7N6gC&;qj6#eNh9ARxa z$|XD6jCl|dhYn$EVVu}`;KIb`Oqq;-w`vII_oueO2v6i#TW)^#D^W{awfbVq?6pjO z2z>`AJ)yB@=Iu<}_&+!${E)gOe^XOw<9DmbndHU0Z8|@1?uAR3DKA0xzoOO5k03ez z09@+|kaj50G`3vSK{N4Lj1>L(#ZX?h?eSqjKaA7hM6j?768ZYCZA?2eqj#*kfj}+> ze&yNra~ZA&4nFsg;+rwe@ALnDu5J1~x$mABhVo>>iy3WTXNr$srz!2@FM7{a5zS1kSFoVyupQ~R&=vFOyAnw z-~H>A%Dq9x`AO#6dslz^SD~(h6L5|6Gd6d}7deMR1(q(h+ts$@_*c8#cwIx2tZ*n3 zC)@n2RC#XZIgmnu&YGyfXLXro!*JnK#n`=;^dWa2Z!bdeLD)tvCVbSnte(GBAB@C?G zyj^|KOP-OwZ{I3?EK$-kPMByXQ}6%oKzB?}vjCoxHS1 z^^R6HKfUkvX3gtBH0r53uuUrHdFs)nX2hP2VYg~)N|*dAOB* z@0y_VvrFQ3Y`7B89GGQVL3JSBDG7W}|KzTTT(#=`{8jQeJz=$ogQ0OU&a1nDilR`| z;p*krB8ixFfJZtKQTBuWfC1i18+j1NRj>?5pi zd@z=K$=_46%llb55DigzWYORGMM_6D!`hRV3ddZ)iGgxd%bt| zrWZC7g6CVNoFp#E@%4PquCkx~UBz7UQ{$~kk?DX`g_BU}w z469bq8A5x`1$Hmc(zNk^F`%+S$7auAHZXl5q$AgEnPnk3sNI! z!Frmy2An3jo{tuj?Kym!5))D$UaoaHs3=gU$a)~m>vlX%k!7hCu=4L zm$1XPFfO$_)WOli6rq81q}N0vuN6!05Q0MGkM>*cKEj3<1cI+crewriv|P; z>4zpOroMzasch#V+|Bi-E|*g)g;s*<-vo`MZ@W)o2w~WQxdg8vPtMt=M2PaxO*=0i znY1=Bq{yM)<eA+FTf<7)0vG_)3E(#!L>i$k$DLH#3rYZg^=?+futi z-(K{Nd608N=39`E3SyWLa=ljS1fv_lqGW4Dl6=l5xn$FsL_@q9rey6#+IcM2ylaa4 zyiM^->pNk4YC?i87(fb1vQ0Tcr+RBE=0%m>b1f2+pBi!|9b%aPM&wFW*IU1+e0&yx z7heM!4YpUSQ$}jvZGFm*dGu?3v3J=Osku?4j?qH5O`U`4&>GO&03`2}J-(2CwbCz| zJT3UVl~9t+*CsIJedP|L(!49>WmlSu1~FS4lvm~Z$?g#{{#Yj_v|Lz0L@%*7Va&TF zrRJZGc|NI_LOKh@w_*b5RU8fH3CUQp(I7SDg#_KI?zw zy4^F2n9Ww}etLhJme+&M{C(TeS#guXxpVL(9o8o~E06xKDxj=n=Kojp19T?%|HMkL zl8*rEOYyA-Rb;9oe9_yaFPX+sDT4ZB3j>)CvP}(k?xeql4xyka<-dUf1w0?bo*$A<0 zLk7>tAo2%eK=%oQB(jNg!fuV`>Wg3@T|mcmfaKGWmLMRw zVPC>elW4jK5I2s~Wu{2(cKfaM@qH-?(DdrN#zyi70pXj(y`+_@c@`4gAX-e5d#R($ zS%BdvR{Ek9)UsbVV$(`Yq`BpdgRpUiUw}LpHNysl@^#c0oJ32FQL{PEfUl$h9#+y7 z|ID$cKEVPVQ%dHz_b{CJ;LOl*Hx(SFQsPg_577i5q|kh@R_%yyP%s&1EkI8aU4r_f z&@AoWL77Wtbt1+s%Rb5P?#ZE{(5tzMkO)&pmzc|cSguk1M<$!22Fi};wrh(&gy8fj zr(o3%@@qnq>Us*Zg4OmBo%2*@)XIQ^7X|C%znDd$%LC!nOHAZ@KyGW~y2=9r!+1;h z6tUxw!TSaS$y$^N#+C45Cn8V!&t;5`K!{F)F-~D+YyNQy1u7vCuOZk6 zr#%KEsjG=yBjm2!!M$~>@Jqv)lb@5cg;O*9Y<*h%$dwKFrI)BYKMIOpW}86lnv)m? z{v%5jC4wXWO<{|ay)R06Y5-O#7qapmQrnHF-H!ritN>o)7g-eqbaFW=l)x0Ld_#P1 zUg6H=yWb37r&o)mTNxxPq^-xP>0JBJ&#qAk6$M+Qf?D|towvj3|9XQ81Ce!3G*d5x zZd*S+X~}wc%Jw=k;?`wxTlZu&l%yW9MQ2NeGvKrz98V)@tI@jt&8)l45m?F1ipsRv9{=IdZW-jhV*0#3^*Fo;w<)HDNK5+*@}td%zuD(1-kS}`;- z;|bL?@Zgq=4T^c&^M;pgxHC>Wgy~<(;tobd0G?n3e={-UYmhlX`p*H}ZQHr3+~3nw zr+!L)Ws7dyDl*CThB|Ftz;%(0C4rj|R* zwlr_{9_+ip=bf)V(=eYy#q8s+DJ7Dw!UiE2tNr&&JQ&|%2e&E?NZ7GrO;X!PutT8W z>;q0lp-A7^U$@$wNAWjlNW0(>e^Ku&PkJGE!M`QwN($-*&q&*wW9mk#y3JX;1P}Q} zE6UH+@!Em>14RjB^5jGSM7R$As}4~M=0uh5VVAN9N_c@TTQN_*!4ktS3vRyP99$kx zUwx)OR5aNHEZN}0YHD^)YdKDvB=`?bmIEKXhM^{r{eco|sz72FCJJ#&r0^F<@Ap*A zjQ32t0Ro%3xvp+*nxxRe1D6;6Rk`=ix8ol7-2AfM$6AsYK?tprYLqlOOa@z3?NhWLYixX`IP?xDZAb%8S(!*nol$E&*5{FlzufU6s0; zN3PuF8}RounmUdW%t*pTRR~9jP&Vrx(sGQZ-dZ<5Lr>#>!8@o>u23de7Ybu89S&h3 z-5lai%2O=zUR`u`n>t_~mb9dONk9I5}_`w2@u3`YsCz1rA3y)~@|~aMzdV83{MIUPIBP2a91aOV zP(qv+6{(ZEIy0B5@}R0xclcZ{x$MpM|0W+Vd}z#*%~@ZvKq7NsazG9_kmwTV1h)ts+bJH zCgR}u`VYF9wcHVnh3Xw%Lg@!RZmNPxKJjZWa*w2DcH9VqRfi7XVZY$+iR46^^TIYB zQZI^W|JuY2{Bx(^Qny2^ff@SO~a+&7#@v?uq&+0+T zgz6hMBKk}Jn05itVDf6+kAD54v7^!f^7)e*{bjSNtw0i!KJc97cqP)0h?M`xN`f6x z<;!?^0?q?Vy$eQLr~)y=jp$gX+Gc!4<&N#G%l)@Hm)5l%objaHsi{Zzb4su0924`v z9EQL_8vG)NN^Xt8kM=8mQJ*w}|F%aW!M6xCBn@AgzusH2IvtrEPR20uoJFSn?@t4f zMxp72Wv<$C$v{w$i8<0Rph}-^QaEBRppP(+TzqB?ltvq;E&&ss4JW(jPVAht$jp6%e=l#@~Y$0 zssnPco;IirRTQOW8?e#|C)289QZ7ZC|9K_Kai1DC12S_k>*KI2FeLdkX|0ugU$>zl zMBLC5_i;c78*ysbNwC-Qmncv-|8@MS!tJe6WChQY>A$Ph^=5bFz!QYuIX5xLPd+6P$`=hk zE1FE-%H0{FLP{8*uZk1muzq2H+lR8tFNXoUndVga`|Q1=csUPVu(3WzZc;eH)ZRxk zj;88x53B_6ax0PWYNuTULBoQy1g~*SW9lNz`Via&pJMj1*v9_$9Nhn*@wu5<(lI*! zG%xa{B$p#i-1`RVlS51wIdBD!&bG&qy_)(~;o|L&n=%UGFLoB2zJXh=E~6X^IkfMf3mCer-P^(8n(i|Bc5PwApu zV!M6`HkWC)ibMnKMb!Nt5znq^B#;6!gjOBnTY>w;_!sD70qQnzg|Q(2=XX8NTtoW} zPQ}@ZeGm7I#|h!Kic~&qC(=Y#7Ia@6j1mSiXl|HNklKR!?Nug9|FEayT;^VegOaKN zE5WUu`Z0Yo++;*B`VjgF*xc~)Hvu9`N#9&a`;aUjz|?hXT&*>qvkm!Glomd4jBJ|_ zc2628iPX1i@SkBzdK4jkio+s^0!-K#oC%DdH1byQOe;)j8Y~~Q&vC9#wmyxk>7U=~qV}I`cMy36 zW}Z;t@S5q}QH)X-u8j?^fFX3{u#Hy~T+#L{a!Id`ic)(aI<2x}@Iyo>*&vq*3lf*m zT<{j!K~CUNcg`#7nC3?MNIc_2@zNdD;BXBW> zpCI5ub~t$_2hpx}8i1q7JoxrMGO2yMDgw>aIT9s+dFsh2@c!&`LWz663%6NH(44nm z4`ZGi%cU$|`_rJAr|&;d+*1~vakvWq0^bGWWH-bx=A>>2IOa7=A&`-M5)%lJQgC=< z5bKZuEK$)O+zEowf}d)8{_{^SDj;X7mLvtp3T{aCKzcr9mGSFuofgss|3?Il*vCyy z0wGDpYtR{EB2Nc8Ua$v+(b*4CO^B5p;%j>+nUeZ!-tl{DBQouQDx3ezm5GPcz)Mie z;g`)Y9K2tAMz>)9AvGumV7Y+q?jb3f zDFlL{wgi4O4R!}?wFL_BQ~}~@cIqw?E#{nUp zot;%**eVTO?iR`wAdGH;z&Z(>X8j-qETj`8OnO7t2hk5cc^7p-VidCCxjYJ-Gi_AO z!ru>6hE;!6KQG~YX>ZSq*lzm&6IoLJH~#;bsV+7U^3oj}=rFx4loCR=V^Gxv0jU}z znA8is59{?_a<&!5r%=cH5HtqPjKe;-u>{0UDK^BX4=Yh!HZu&rsmpUQ1mgX>>Tntl zaz5%1pp0Cri}G86<8Sn}f7yL@=b@|Rz)0otuHC`?si{yNEA5$z8tanaB}5MN3K87L z)R#$1mjINI9GmqJ;}{l@r1;tW3m6ZbuQfg?2sC0dAGU_muP4H87F@*X4R2Xnr8_L| z*R4^{*1?+u@i(e=4KV9MO@1LS6~t0VmaaX=^lMXU z5&ntiSSvjSsgey9K+=W*=J_5<0*zzM2{_Ysk!-Usx>S>djbi^$0h7(k+UFDcz zBVLuRudVN`94$UCncpxT+x`x{;SV^<&%qj9CBm3EIw2G;FbX8%+`9gZTPSIZl<6ir zB*2e7ciXF^?6T#>6lZ5Dh0Fi3ks5|HdOFd+x{JCScQdiwY}#`2Ifog{^HcDR>es%V z&+P=T27?EWh$FoyE^YAXtfUEVjgdZqIYBebMr;4|KtmgtyXp<`hgm$aX>@t30gDo9}!7ZeOX~!h-(Du}sYQ*^ZpvSX}}?4gP*t-&dh3 zjOoMw$mq>Ui@>L`VEg_V_g(0$xZd2-^$DMIn~X1RA9KylqE@L7uD5*v{EW&VXM(p<3TG1N0DyGmJArdXbZYq6bSO9 zVHo|*C1wm$r2MpPvekBa(vQCKiooa-5rsdvGvCjS6mz$EwO&zNwT9iPvXl2VOG`}a zUUM@Gnk-%?&Oc9f7Q7L1F`#fShVj|jmt2$4xbe%8nT%rotlm=}sBN8b1(i3*awz~s z(k(B2@2QW}b({qu-iA{ljJ2OsdU%fuimo!^3+lxJ3X928azuOmi+1F6HN?mwV`N=i z$oe-!=orPkw)K=ZLs*gwmqIjLWO^tWLt|`p__m`MuJyTE`{tT7(5P%C)HV!pQdY`D z_fY1fHsp?7HZfVD)D;WyDUckO&(4%daG@{qE_Oji*|j2LOg(7cnDrBN{P&~bfKC$C z>yQ|$PQE&%v#gRFEH%1*7Adn&y>QuWLG`HckG8q$tC^h%r~+35>c;ds3=V9I)}27> zUzuqUbb;ah(0^o-5>oq;*rRyIgUE9%RN23PL=#T9v9poYm6?MoWdF^+$u$v7^yNs5OB1iKK*$k>&%SXu!&Cj8_&UF{Kzb}qU zSrOSo5%^#ev7DeWDe7<6>#d*j3_0dw=ob4S*U8(e;IvF;@q;-=7)wo}{Yxvgl`!6@Dv3)WET{JohP{2iRL zj*0iTng5liHanCSA-V`(`TD8U7V(5qa?_DJfutxR9k{K)S$3-m(Y#&@E_K%Om3s@i z-QqizoCrPCm8#`bi>1^06DM_Y-l-p?>94Y3Rt3jF8L_fId*I)uHDqj|>4`4iC_+!|f_Gu;LfLpTF_p>Wd}KMDVbZaB>4 zcG>K(UlRx|$^T70BcK8sfP9DV?h{0UK;MyuG;nrbRV4IiKK4hYyX4>EmiQZujmd7= zMqUzQ0?W-ChM!9|$jD>P6x-gMr@Om-h{27&@Go!|6cLmO=SPXPghnw@i$OC`+yl}4*~$)Z@{mzE)Nyhx>)A# zXv%*7cIQQHOX_a$?Y#SrgAdJ}6=+DSE|-*8RG@#rwt9l7@cRGsY4y*OYaw%1tJ;;H z55IiJGR6#Z2aA}+vo2Vv?2IP+bx2WXT@t@s*~qbP%;~QU*8Q@&NGYDB7kAmUN%6az zLj3XP0orQ~^$ps*)%uYUdb&5;k1wKzz0r(e*b)+hD!9q3t^D`fQ6AF}N9ow_?&-5g zgQgCFH}w?{sBsC&W9i5gmCTD|Nt**@8FhaHK zMq{gRAX5lcAwEvTiGd%I{U4n1asRmYd#>f`&*D*@7lRHE+CY0%vO+_qg{y1_kq`LG z65>d3?0DBt4=>$xY4ZJhnSP0HcJ~D|6nE+;El9`SnfKpLejsWP|vK&4lF9F}eV)(I;m$Yc_?c(d9oc~QZ!?Y2I+1eU^B z1uSi|9TQr`M9GAX^HaU2;vQ1iQC4zklm_hU1wDRt!?~TcA=jz(8e$mLo978yUl)F2 zQf<{c##6VKXGtE49_;i}1V8Y5f!SV2Vu`{z)56m1EX~k*DO1+cTSkw)nmxG)#*&Crm+4H92d_jalP2-=iDz`%@H6%TYZQ^$ZI#rFf#V9X zz;kG-;XR+mqV4MuF76LjX!mroYoc#Besns?kh^f*W>{$M3EQ*P0}>ma3AX^A_Ldlo z52NlJiAeS*bdqG6nXL@yo6TRE{KS2=QR19s_cJh^pq#kXu5jh|xp5U5>R_8AEnHxt zPU>Ugyk`<;szd9oBBtd2c>Hla9{MUBj}9}6 zcZgv{+WBub60BvkrGBhl``F1TJ)Erj^%0h*AOrv%xZQ(>GgXS|(kxbgPsUarzm9y|tAp&&e>|hlijMd%W9mBZPqDu~)!oEl`oGLa zIXq12obTsJct~;44F|_5emid^2@>B-XsVGW{AB(0Iy>lGmRR1Gq}!zG@=b3gnJ5er z_ggovV#>od5f}r3+2u9kb{^xXYT^qF@$AZ!bj8MZ7I~vBp%-fb{8sfct#8+=VBIb| zRn;BYUfHiEOOig%`~#HwYld86r)XT4r$=#KzqIhMGK^sgA+3U>=&W+9lT50Qdjl9e{eeHjme5<4=5JhCqTJ`;eM$E?aq08>o&sBvfC>Padsaf~<53*M0{Yn1A zn(a+!LXc=SFmMyhetV@INN?f>7yKj9ngzn^DT6=b-6$J$)qT<(Gj#S<5P+}e2waZk%mu#M}T96)e$omf-c9do$Q$s%>89KgBI@$`J6v~ z&^{M&6QYWtLDNhOqOT9a(egs=|w+dZBUr3YeoQer_!9T$c-S zOx}pEyX6|ALc{BB}6rccwthu-&w+@s9*)*gGl#h){R>vw~~ zzL4fd0zts=7KxddwrVjJ?!@zoLRy?(P-x+)mD#Vo5FvCs?VG3JDObEMRrk>W@6YFpSeel`wH75|ht(3M=;5TypVSxzRZL_ITzf$q; z>(K>PA?>BFws@xVPbyOFPasz12nS^g)6Nxfz2rL8$)d8c$MJa#Czgcf#oZusf?1F= z-W=R=bS#y4b6m}Yf+yZQhi2Kqu4vmpJX$15^u<8rO$T2)p%BlTU*3gWN}x2xcVNVE z!RKFz99TgdWAN*Ft%;JD!ESENJho1)UoBtaV#YH3%ylK2PdE#;+OtC-x-d=BFlbXj zc2@}wOQcz7O+++PoP@doQ$~gJ@TvpjK>qW%Jkd@0WTiVltJ3UmUi@3IS`13G7evoX z;LZ$}dTf=)9r3p@1f4hOxLzf*%=1UYt}BP+pxs3?=pN6k{^1g)E37d`AnKM^yUAxR zoz+Qx3!SM)iAv&|50b-DveL^8{hR^?*rBlDHm-Akj`#)$BDnE8=v8?d)BQT0Ko?8) zmWU-DqYrXl8N1|k_m4RpS86y8FB8C5AxhR{iF`v zL_Uts2gLQvsb~%}3Q2|ruGk%wtRQwzu_;|pjHzc7imz_*qMIkJ8~01h?9Tnsk3~u0 z>8zr!*EFq;AB~j-*#nHB96pgmSH5DL&~)RSM&J9|=1cxxkAlllnE{^f~h zpSPW;m_je_G5?g6&JVR#uNbL!F1op6z~?nh`KwLiw`Iv5pvAeM0SCO=O+`b=wWU;bOz$yNt=U0WfAw8uUktoDI5W&6jmNw5PlHrhWmoQwpJBsNq_b=Wi?)x ztIqIarh=8d6(PXzmY&i(NX^!s3}ys}tT}H0^$0aiAHwXazE3c>8QlR} z;*ZyCnHrcu=C5~9ACsk4Kj`x2xy?FzJMbFaYn2eToQXPj_*Kq1Y+@dFdcNeiKXB^l zNH|4EgZo?yd>HBisr?sT8<&5c@>M!WF3_Q@&q)I{SCqz^>SFp`P%hPrM`-jrtItce z-rF1CtQ7h#UB&+)JR`Dy!-S8!SVqPjG04(5Mb*?&n>3}-QaulObZtM|yozXv_aOwr zJMO7ltX5G4J;ta8$zY^X-bi`c1{mt^Y8vofJYP?y8I%wed_uK>5V&j>`A%(N-@b!T z)ERwE{l3_AOJjlfjn@7FbJqtaHO+eyr{=hQa4XpoCC@(tVo*6Obmd8AYUW6UwB%pU zt)^`KF)O9Ng~h|>&*vNdG_i&6GlG^8$pn_NMb@K3Jmg}>ahKcKCy3l z-nQEJ#l-^jsGZmjtM>eWRbIjC5V-wO1{kMqBY~$AUwr}vgITsqJ^HJoxh0gcRYrotlRevTDo@~dF}rGbGFh<)_+ z#pxH}!x~;OSOFkPp zt92Ub-k@*qa^9VRQsRPao5+!!L@gr|_-%;z^ z^}!WSkSUN)FLo7oWqWSDeiA1|R+S5}H^z&?do}!U35h`Dy(&n=GY?<(Th?;v_LXEE`^BFK>jp>=+JiSts+xS8{cqjEUijnuq~H~k^Z`q=uLBT@K2vhaA07%9l;vHu3$2D5<;ueGQ*_uwX_{DU&Xn+J-aZ(x^*s^={v zJ`l0)qFjt)5RC9zf!?1Vb|jzQ%(79P+wG&3*s-6RO42PXU}>TE_%U?v!S)QUI<2&Y zQH1t5&4fA68QfeZE+Woj_ipmmyB-*sBlz%Q)Q$X$2O{iN2g`%C?K2 zgxj0?KU{S&j(3WP>CIO)mG%}xFR?%i1TgoPK}@OF0gZ!l5Vf&-S_r)w?1dwf?eTk~ zw;FUBnqEB7-PL8AAh!FQO)kF!;MuKSC!Gsp-r~i(xp(ug`G00ue zlDZJ_OOunxCAWQcvGRFFolB0EV8i#crR&3nsiaC+ zbvU$oiH9>E@tA=lHK9JG_qNF@-uF>wPFvdPWUec!HZdGr2_FD1o z;kL1oSD^R|3|8IA}m`5R;hseClsD5EJ z@ST_XV>M47sY=Buc#g$}DJ>UGb?*Af;?Y)*Ga1bs$!-(|RFll8%sa;fGzOr_` zIU1GcgKRM>GTdM34-41O(X=gZQvb9$u{{>f+uPz4{q*gQ$)*Xuji%9dEd9&|Wi#?q zT7dWxPfNH%THsg0itH8nRoM7zcX60=+lCcvCk_8LS@I3PAEC%_*+E+(OW!F8!+8^p zgg7B$P?uoz@D8mO8ID~SuS)3<^%uA^Sb4Fl%n-NG_wU`*`$`;FWDM+*Y6jD7W+V~c ziB1?fTq8h_xWU;-zYsNB5iEVSSFme8yh~ju9@g~8*Kn;{JJ{U2Km1FTnw+;lOi*MR z<TY0A520F_&BYJ$@ zoRBZ~-UxF6H{Yc@H{X8yw{Cxv1Pvo9f?g=JJ^Jh8kQp!J)V^23B&os^=H)VZ=b}a$LZ=kh%u)T`1k2{cfA$KlGr^T0bc)dAlfGU6&LiNRTVcrt9Kcb=AZQt zdmR;B6@KUv3wszZXMByNf4s6DUeIwE5SX>8vbO9@mLDTZNdz5qr(n7saC*NB)QP-< zI~zW&Qa3Jhvz4gN3`CJ| ze(b~V9I=TNTB)4BEWrcxMZ=%Ox1=hRD)gjOO!#E2ic^fNN{J_EMQW~$&Bs5tsI90i zZz|;0Z5X-;kIfg?`?Y@kbXL6r;rBO0`-Qz(j?%o)n)ql;!I>F7g@!a?8~&lY7>dOi(dxQ#y6_=_}~Oy(wN`Be{ z@4lvZK-X`4lwXl7`sUd{02Oa2)2qN~2bD#Efd%(Y*#WP6w=y)izPfsEi=rl9PM_ul z4Dlbm;Vi0Tif1YkY2_>^VWoLAl~4<-O%0JX;vA80VD;9b&5SWX$DX^DZ55-b4@(m( z;WB_yCRp&%G|G9O?6;-5q?cb{e$>@y6{lpwA8f3F$y#h|A_HH8GsrQS!-t|Pe8!HM zo|)5iX}Bih$o9|Y5rI!kjHvXO`^${gKZqZqNVIm?aS})3Zt&_cD&yy5i-gskR_b)m zns+$At(2z40;-&^RnQR#k&OdgoQEC6m0_XL^3SIE`7u7-{M<1 z?_c$YObpi*cyIedJIhWZyrH7yk5guC59)BfmNr4=P1AM+33^U@-mnFe<3CW@Z3L$# z50Umfy+s zin+_(vy^;dLi4O;VlTe6;QO0JF}saW|3y(9zW#D=L+`a$3xc0s2$~c#zb~Ot%zEVR z4EvNaBwF_rpTMdk zyB<7rJ$dAAp{5c181evLsBXTo1m=A&gq_FKdU6l0QkFv(y6B79Cn|U64AsMGYrbjP ziM$%*qIxX{4f3B})9W0GgwxNj3B)N3T1nH#`hQJ%DBUDx_gVYSi}{pmskdf0glldw z(76@J{8WA{*V*LN{d|K15`letDy`wWaI_;c1bg}$g?TzMlVOf!{w=$c8J{XO5F6-a zcYU7N$ED_{pX(!(i*D<|&jJ+-4*Ug7Ti=>MLYf=b^>(xq97>+!*k=4 z7X}^`FXx=EpQ?9O%$mI@h?uD$A2z<&_6M(k4Qq$;xAC|j*xPs(&&%&lrt3ynuy%J1 zq~(9mj~`%r^X&G~h<2u{%kNJKPMwYEba7ssP0a6wzq=If!$|Qt2!lS$(?p!_`Pa5f zunDB<9Iviu_Z0PUv zIwFlM%F8P?C|R?|*5u1_Kev^z@;YiZ!*gBUS|e>m8tUt-RSfv|#+3C1|9;oMb0F8T zXJN}2+a7lBki;LaJ_D+~Mh9`ii6S3iCT^!6epLo(fX%&l-l}f3f|^_I_5Q8u80j70 zmiPOQs%5#nlIA_jPREN1DbE&fP-lt1QTIBMjz>K+p1TBehmKbhqfy_IrWE$Lw^3;C z5(6)q(`!D9^n^^-OtGYyzBEam(wAYiai(U+hT1r5y@-$Z)e@*#;@7+Hsc$RKWsPkI z>W-m4R|YFV@~l}5p)#Z~NZkn#%F#BgMd7>3&(qf)Ex1cm{u=R-EukM|exv2TG;gj> z`N6GSlLqAdk3rs#NXSvH!TaR7`emAxm)`qkQC#A=E_A5UshcdWO6?sI)6M>Z)&7IH zp8V%5h(;$S!Ual)t$lTnrhrSesa^iN1B*||N&TdN-S0J?N8;S^l-S!q7~Xu!eyq;? zo4cWR`yN?{o_Jh;Zv6g+Y(D*W6;nzf^XPvJ@1gau9>is$jz}brO~bE`5^)*ab&W{m z2i>9sp$nnO7g;sO`p78sseCxRtsxg2kH4J1NayxoZ=JBITy!ZtgT{?!61y;-M7DXH z9JVA;l1S@Tx;&dT7+xQW`#YLn>*qUH*oN*e1fBk}cM~h-qs#mdchA`qoEx?+X+8SMF}C67;_0w4 zU487Q(ow9Q)T?xFAZ+0sG-HV$yhsPBdk%~?rgTYGKxg!XV!5F<7W(Pk)xCxc!!c^< zT4YtA=&;f-U2mDOd%EJTXOyWH>lXjUkhS`j|GHkCueq*?wm9X7+gB5G?w5%xCfbQ& z_65x4@-po5ZfZ`>s_5X_Vhzu?{@UK=<=xB>Q8@IA2Y&25;(KKKIZq#o(ca5 z=km*z@gM7@v-J9x`e)YnI4P60HrssvN4e+W`3I5${x4JHTd7K)B3F)@Q^oi)tMeSL zb&(Cz6z`kg`B_y=49H%)E{u)lu=Bh>ym{ml-SsQAwD%-A*3Ibb&7YZb?%Yghk@WRX zhXu|WE@tq$Gu$@ouKCE|GfxXEXARcH%E7>OFv9veh!ZbPcnWO42VSR>(E)jfA|M1$HEXBpn@q;m8CrgT(6;6FNwMbZIh; zyC1cxQuy79Sfse<+rB9H!D61$o7rn9pl(M@gK2;HqOu%Y@`hB7oWZd55L_zSYT5|) zH}cyf9k%GR|2G)tAOl29upkQj7U2%OLpkWm>T*aM!xYi|%QoVQg!JD%d7ex0x@ew9 zMXujIEs5JFOC}dNH@;k2gwk8i1e7ht$-$v z;la@k|D_4Z%~9fQDznF7PNgm`w<0W*$gBp{ZVZTbZ0^&2w3P!!T?FqKP(^gP0SIhw z&Q*MB`Uvf~hkGGrHqzR;U#8Z?^-{^#V(SoX8N^QyZJDa$pZOt>pP<(bsy`QdNn-+> zBxQUcE(?6pd;GmEFc-zpqTs_`L>;N{B~G=t*_%@0Lx)Cc*iU550(tiEmxC_{qjb&s zi0up=@B)@lj0F_0l!Dtnj|F{wNpD={d1R3ESO4+VrGiShr!z8#(?F^&{^E`SZ0uV4 z)C5nFl}pXDrf4oI*YHK5{$gho#tM#C*+L`2ub*cHaTNRK?AvB4)F@5yUxLgRd3{Sy zwPcIOFmAuU_h-sC%7)~g~|Tq=@)}oF;#mSKr%8DplQ?{upFjs!H;!E z_dJN&WiA8EVg*f;e=F~B5iC62JIoiq)qEHlBTgRZjla!if>mu(V>H@rPsdIwi8J4O zUF&JTkoy76oUr--qV7w>p?u%4i4u~s?^D^gWXaB?k}U~kjVY2XWXm>YMD{I&vKNwl z-!+qcE!i1i?AbCyjbWDGd-wnTzTe*u=7ZyS=9%Yy?&Z49>%7jBx`GmiD*@A5uV}ub zE@=Y=F1+|gM^NWXdlpDON`d}%KJGbaiZmyH zMs#Se*KC^6G*y(y_5Ba*mrZk&W_L5-mF|UFn=pw5(u6XL9)LI z7K$WU4OCTqZWnxTBt7uNZ~E;NK6QdH4t~v!1{6zCUN0YM0otBEKl{EVJr|RnC7jrC!~v zEbc<*i0;e#48*8I(oN8uG9fGT+cQ>tH0!LHn&>dRKSa_qXfx}#O9AWun9z}EEI1M{ z;`)>iKwuuQDm$SavfisEjDg>`?JR9^-%}vjrCT-$yW@5S?k*9b---^{uN2W1#V(`` zA9OV$Ea7b6MHYbT29ONeJXX{}$k|4d7=se4`Ks)at%JaaUVn{TIB&b2{_^d6?)3wD zyHXyuXO}qinn5M&5$50MR-&=<5TYcsCwY!;r5{k93Frp=Il$K78gK*Mv@;;se^>?k z7Qi&>`}q?4P+W~t_1bz>Zl+9a8Gx9-AaT!}cLzy&)KP5~N+r`bjH&Lb`?w+=2V;fMicUdW-9Y(uy)>43N8~X zapE`e46zk(wZNHR)+E3S>m7;ZW8*T44}mFc430QIA5?gEZ$n3)cy{y>5_9~3#6#9Dv zFwdbRBHhtR`8YGLGvr&u%edxVl;RKMvb`%NXi`a*i)7Yh+v0C*VIYgKwR-w6-OOe7 z!{w&7oX6nh>a|JGVS(ha_$#I9>=`A*<&ZQb8DsnM)Qj+I4U=kUB}`(?|GJRRW##sk8JBsBt_caj^tz>}6b&Fs#)ZIb!Br#Ty}va3tOtyP4P_TR;qe6r$E5YE?QoA7n;QaZ321qFm+TW*<4PjWOf zx?Gw1k_L?KJaxLc{o%dNPpBJMKnHy+M%&U!fN2#aOLU~3{|Dp#h2O*-MN*}Sw|Y^RCuE+w`mMK* zf$dKa_wCgSE~BU{eUHj>*@}nlgKBoeLOS!X9dBJ4gbuQk9gR)T;{o4bMD4SH(IMuA z>Q`lhH%acjSw(6G&nAmvg%3QMrO>mC(DjK=jD^?C>x)*6($a+f8m+aA9b{@E!b-(i{olNY+r49$o>DgAfuAPLr# zax@kV_9k$CGQ+4N`Sq3gJy1GVP`tl~IlUAqw5`9H%`B2pvuy)Eux-bB9w>Wyy<2as z{$mO5Do&IpbQ7Dws>!MQ7ZRV*LLBx|71Y<8fb@(?e_8Gos=?(Xjn^p_Rz^3!)r!zg zG2zb8PzJ%h2jvK$P82ST?6idbGc;&?i{wE%c`)}Ziv0K^Xl6Ug7&zahJP!M*OS7;} zK`}o_*>gGoj^@u|Y=<)*x?V_u-QND6gtr=r@CP?Wd5;isp-LF#nz9uC9gXUc@oV1_ z_HOK;=seb~GjH+CP-&)Xr_qOQTY~A9=%!OA4aRiIRMG=t4rIjj{ftX~)eNR{Z>`fmG6IM}W?F0*q{wl4eL zlwbZC7v+Ga%(ISJ%rQzt;S9-(H4n!o%jpI##ho|+2^7%$x5Y{n-UqFVl8~>-3~*;k z#-IJY>KNp?ryUk!J>?u(87aY^4~x<7ZfcFNb@TV~IvrkIr>Q@7LB=G{ITL`cNcewX zqv=#3%OXh%i0@6v3uKm(ZQnr+7j6VcNU3n%bD30|?o!voLC^hH?E`bID94Nq^lW$> z4f!#+n5ks1vkY9X)S>tp*fT+!KM^C#mbTNrLE_S*KFf%WeKF?)) zny-+)elnLYmBo(oiF%d{n+A1GR-)K`^aj4ayV7RE=r3+DOOMbOexSY4+}tdSI+(Je zVmsUXU@5DYHD~^#x$6cFgpb<50>~YST^pUV>;hCv`oFA1qpK4J2{yL9E9kS08-=I( zU!OhVR(9C7Fn{%a@y;mko{hFBev~Yz2O@FzCs;wl;T7ZQh|d=}-gK9+GW_z)9bZ{? zn5cYaI$1HUAM8Djp2~`&Ap!%bY8>4Pa<`I14;KTBQYBb#X<&ILmA`JnUu`)Z;F}l4 zzX|xXoZI4pH#(fT8BW7V_{(vhR`>t?$ME0e|GjrW`Zym9?Z58MZ%cIyWcEz!DFg#q z3_NFKHQu4$kKHvGzM`dax1~?njk^qTl#-A3I@R%>U+{xw@;fy47n&%&*}F97rGr}{ zv)2>UT3Q;2`FO(xr%8V!@p=D}tpK)c7mqz&eM$dNskYx>lfiPkDE>x7e3s-pn0eB1y%4t z&sEpq<+z38=CL5kU>*dYwgMw_k*Lgq8*b1>xx@`(C~p6uwgV9{=QQDWNGA|!cU4^R zrv&81l3`ES`NN~~ITQnU;w$Kt}}GOPikCZmnz5$FGrr zGu!eax&75&#K+s8+?1fIuH^F`{`}Tbh8ltlvuGdTp{T_>D$yMp%ll`4;fMiU!J$o= zCG!co?#^sFOsoXQ|IzmQKNo71dS}g&Kl%80nguw7m>L=E>0I8G*c;DGn@$a5t&|JC ziyey?4?Cah@*jf}V3U_mIAi zI+(FVzi#l6HsHpbvkxW@p5p=m$h-b1hnLRlPO6K-V1Ta?)4GK?L5b-SvACi+X|M3` zV+Qh1;8k%t4f>S9jqVnlVBXyTfi;J1XkGHeiw?TTlITKPR_1f-1`E5 z$*WWJ>*tv>s(&QE%5RYC1Yw;?SEi6EQM>}_vGuaq!%+sH=q*k2>R0p!>o!z#mPKy8 zQ9hix^vXLc1R~!fAeubvKJ~$#rOZQIBs;0C-_~}~raW(SUr(^SQyYSqSfbOGB;T7s zoeyMwL_)@DDxyh-qr1(fK|+th#VNbBA|}z7;7N1a3ToBD#09VvSt%)K=TVM9EHVZm zWnQV%JX&AdoOPhD^78#hbG~cara1-)f-SMkVfIaH?0d?;{=Q7vObalm`5Qd9Hu;616|npImiM{YBQn1wA56Xebf zVeP+4!iojjg~u~^C@z5Tp7;TLHcX(zw@(&}eLuR|iv)U^g}g5coBt+~@{M0w-u}r| zOW}m4exFNC@62d7ztYsI%)fQv*8|gZkt_?eVnE%JamNv~u}Xqo=yagvo-jUj6zlbs zm4ocScG*MNI3vVv$^W^I@6ZXL%DDO6D&ckF$&$8ZDY~kD!%HZl~eB zq#M+^f2{Mb9(xD{Vuq~x$=10yR^0;4DrOQ?IVnz`IArA7Y*9F^2;U?nAL-5-hHT`@ z^!-fc#M1{C&Rw`#X|K}U77?%@cdo3mf#5&H{)Ye3EBFwh^&oY5H@J%uG?rOM^|TzG z53!{({xN|wscyvqW&eDf>HaLbD|vd2Rst>0g;fEH;};sIL`!5GlvMWm{vQqhX;BV9-bFwrV~4k&tpgx;l-vH zVf0$(s3p(Hc@x`>vVPTrvI~_Hl~Z3VeSbqM1$S1f!ywJ0yN#r~ff**J$x`@>+19sR zE&6^h+FBPUsy&MzxnKAAT)Kq}g9y!ejr`>N#_HX_BzesIFuuH}##Pb&l*isf(U!Ki ztSeTR^j_pRhS0oUeHn~$a?hCxbeE5keC%}7t(>G;o&CFUw2*-=MQ<8)k4Q)IMvOoj zz89S^bjfk(1(fUZrZYnVS*9a~A zBffj+PjiBIHWpWy(~~rwaOa};ge>0}++3<=r|y?$bLAdO{>M&ll^&X%t@$2|ustD~I5Ld9kFqUmof)kAlUyk!zNl?T_} zOY4o35(5%Xd#l@qJ=g3I9*JA<8j3f*y$_LeM|H1)7vtJZ@>20)Fm2Weo^SOa6m9Us>JRIKF;5T{KEUij`Ps`Q3b%Y zLXi%EdwL9Uz+FL{%#lgZAo8vZl9^XwoI#l0!so3`6MLW9uPFyvizLDey4x6JTGq}> z|D-Y@CSY_(m*lfQiSyY1Xkso;?LH4|XG+!8YVCY(L?2gIbwlIcLS;lljz!K8L0g5r zTP`D3!JsPMTly&x?KB(Tkgu| zXY=Q33ZK&?47_-Me2Z(P?O&Tn5xVv%lsUS0Y}Ex~r~-LvWx-3uc%F)qkH5u}9i}7K zJiG=c$34RT_BRUmeVZ0&nAI-lV@sGNWhT7^WwE9D=DkRgHe%|6J+dov=FSKI5~;?i zoG}9rUpD!#z5->;Vu6d1Zv{^2e67AZgTxju)rvq4~o#g*zM7XQ9sN9G}Kl6b9A zGFN>+il=3=s%YZLxOHKJYtGl`T?=t(UJcqo@mG`MzBWQhcAbG`bv+Y3CHEeKWR*$|wDyG2PeE z+Csdg@j36#+=Jr5%_RJ>hO4bm)MqRvNQe8yh<7y;5GQ8I#BsNJdWx0Pn)-P zSx^~mP=D{dgnpVFf8RK0nO}{3u}s_Wk*jgrlJ^#B-0Xf;&=uvF{C!&chd9_OjLgkW zJ^SM_iju48DA$u5g}KAqz~7kpU?O8qZPxEtz|KZ5$!d1!Rgmb{L1+VNB_U-%b6W8O z7oxUnQE9W)(KOw}opn7A?S~RQYFtCqfP0IgvFrJ>z- zdaCB&2`WYVaVD*N(@pO>fBsI=e6-20cq$8^^o66~R+J`fK#3rTQz|{z(8Lm4#_2$R z=|bxJ(qB6gO=A1)3sxg$yw9)$skdQ%sHdsk4~tg*LhDrNdhT*xc(U*a+OA4A4JoI( z{y_%o{ms|qu+UqAXi|rf^(DJUBq=y=4JapE{Waaj5aGA=d!w?i_JsRcQ^53k?FC1Dm?C1>SGASL1Nm!lS?i|ULe6@vmon#uQaSxvv zU7|6)NR=I>Z@FabrLbZpM)O7_cIvB$GkqM{2!N}JKtLuK_RqQ9#Va0i=Fd7TG5>sJ zam$b5ct)|xr>&_TV&-pnbI62>rR|Mlod+2=;a*f0Ktl;1wL>gG&WQXISaY2}b|5a{ z`ep_KCKa017c)J(F8a*lHV!8DqVTXyuhi$Yt}kg2j}f4&!X=ZnFiMpIjib3nRR*-g#%n89U-wn)QB{*5=OdaEjl=;r_z9R)dJ??ti-xOEyF zGU@QTHIBv17FqkA;OJ|+bVGEa`erHbL5vz{G*6lWbI-%WUc<##znt3a%2OTRyPhkB zfBDFINk_1^qnt8#CEc1QDWd8u+H0)M@!gOX%)CqJPyRkbb%mudb40JH^E*7c)Gx;)SfUCTpp< z>dk6^`|>sd{h>>fo|<~3Gt?g`o`ZoN{-3M-V(ZCLM;0tEl3z_FFz5eHN*@z2-%jpU zTq?9UU%Z~_AfQ>)nz>jp^L?|~^vBFve~;H3GxfY9B{ItZFzS9Et|Ucx8W#7xrxy}6 z;0zu>*4z2teiZQjLI5bxkB?HBZ>=JuZIa2t`3?D9vPRuUV=9@~5{sMPsBUYI2C*ru zl0MxG-EmpDGCJssqzQ~Zw^aAqP44#~!fYMsKe{BPwR14@6$ne9XOsIvuFUv*=ykIz zpSiBVXssgDxu^X@{`b6ImGoXdJ#_+IU(%Bp@ikBVN2BGNQ^Mg?I*HMvk5@&^cZXQq zKIM&&R~513nbXw2Q8|_Y++L_@t^PMIWGRiH4{fg>*|ZJw{u&5i9eI#*RHp4wP`s(O zqt{Z@OGg)>#}P6%f92h4j0!i+(=f*US2^cS&gFy}U)3QLAAS09iex(JJ)Sh_t527- znk7;EsSjaBodikcz93=>!tIP_NI*%sDcdu&e^3uD&&Ffx z>hMxUjdC65^%|~)Wo6t#G2n7*8YYzDT`FatJbIII`@;q2QdNPpLCrk(;*B)Sr)vg3 zk9@Vo`c|*~sY%}qG#{4~N!+93Ol#RorX=v&}x2ZagPBwKb5!aL9DraM^WS?qf|zk z_FWH;F5hilf*#K^40El_$Q09TQ!Bg>cV2HR;b zb-qce744_#DtpqnID6NI%S`fmIz|THE^(pQiHf8P zv%vneT+^9kdTYh?I%CHZfw88nX$R5f!{Vezu|2GaWs$lJS4>A36tt718pKqrQhA%d#XXN#En@6?odY`94nP62r(QQ1!^lt?yIcv^f ztNx|c9nE;b<1uY|^0le!JI%+A-0V+cH6;%`3g0JdHJ>|~se@|~!3N84nYwUSm?(@3 zF~xBuDnlpal!y&HHe$kTveRF#3`@|gWj{)bh}^T*ylr}-ZHkq<=#WKtJ9T~04E7e( zgX#Vf2q*pbnnJ$aaoJl=;0h9X99Q6D@a!v1fR;3NKLLg__nIBXj-?LY$HVyuAB2!& zO7eu&l-X~P6ZIUNPUef>(sKxYWM={B#eyZz$OCAh*?EuFu|)SWt87)Cm;Y8KinhgF zm9w3#JxohMok~CebQ$O7MP}0@I9grz;tzDZPpq%H{g67dvQw?TU<Xf+$ zH_Sp@Z4Is)eNIUllkR1sWK3};f0OMVwZtk_y~Eu{LB3}51(k~6gG&&f;2gVZi2Tc}(SD=MzW|W9 zz3Z24Rk1gTJs0!g!J{{4?kR-KAgv55@^&f1QAAdWAxqNf)7<2Fkky|GKz_h;?}B11 z{RSlwjm^!YMMf{xS%%AWC~^)JpwKj<y2r~Yb z`5eFh_i$$SkfhZKSv|Q;F=hxlQte@^zJ% zHY6iBdfJLE)m3yNR~P??6cza0rxpK|48R{UM}D_L;7!Qf4k2a6(fCpc^zw@~2`T4F01rOE_b6T!thl zgfW;Tx_>>gNyu{g756ARB>Ra(h;)qe<+lx$7c4C-w~JA4@tb?nU}K1*L{!grFEu-V zt-R-MYay>(el=IN!mMUzeOp;t`MoOS&p@bc)=rX?obPdj_KE7oq?`JEa3m{tFAH%?JPc$Yojr>$fWLmT>-OmU+M!yw9-$V#dXOQ&_y`NV= z2b1PlBd;i_CbalTE7@c7U<1~B+Kab$cXZ$|>CE?Q+$W2C-gdhf($vgKDCc)YgHDbC zk)(~wzR$b#=L;o&mf!^*L}wMQ-zF3`@``2hZWfHee(@(c2d;MKY2X%{-3u07Ge(_)CwdN)2vq0y zJjS`a%mxYgL(4+MJgkPYv?ohaC&IAY-JymP-Az;;R72L8mc&JQ4_D{wWw*;AmtI}L z=3QfXyk+$82vW6<5^?g*V`bZPYZPU$o7>v`xY8E->9_&azaCBINVdSg{8CE&24$-B z+uzHT+SeQZoo1AQtnd76Z|H$N5J~fSJw(g%CZ)!(=VF?DGzsK|lv+?L@`|`mR-J)5 zgZ3<oFOK)1;>Zw%}ex?Z@>Vept z_)?i8a7SMkIfm zuY0|ILzbJ|C2M^o%X!pd?xpIF->kE#yP^QN5w<>zo<*C}Ul^!N{3Ib_CSjcD^^pka zI{8)=tDISyO-TK%l45sPPUwg5nn06}TdvA8pY@!eS5_5e6=er~Ipu2QN`<3p*FUwF zjJ3q9nl{MR_T-%E*;VAu0Cqt6oI${795EqEDNHl8L(P zrsEGgi@7rEuAH(t)OzD5yTWEVr@tjS~+pT3ZME1DQB zJ5+d!*_NkV#qNQRp~Y)6*Jgi&E9^lPjPf1`&v}JiK=cEHJ?Jw31fYTSEDD`DdYG}O zCTHVuXEOGwZIro`N~Ub#*rI1X=5Ubli* zn)i~6+72P$&E={c^_ z%l8^eaNH!bgt9xkdOUIEjEp<*{t9S#~*8roMDT02h6(vZYwntGZRuA1LF zIC6^CR%P?h%ySrwSMBjbk}@2W3d8dQn^&)<6ac-n^L~j8UF@;cg;2KwBVH@Jj~HZw zIBEQ~IjIRt*oK-{RS7pXr*7Xc?5Wsv8SF~HN%hjKreJa$JwNdhe9w6}S#Vr75Ovp4 z%B!}sow0={cdJoPf$0H-f(CyXyh@@rKdOFtiDRppck(wjQMTh)Y>~~{W9`uF?pu#t zlx;0(#C>%s>7OL2do@wyclk2tsu5pNS`@=2?#BoosYo=iCUgDFZsv~;s)xA!^+>#C+S*c_T=pS4dmod|Azc#QM@b=e36Q-A8 z<)d2=oKZ4hT<~jO+R~Pzv{1!V;*}NzkNQ8&^I93Uo-RD!B^n8#3%SB|$?+04$yJ9i zNev$gESLQ-w3+z=m)awq_zay z?8~?xX`V&hXu7GcsX0%%EnF=HKt-2QieHR97qpI2CDv1!7%QpcnC9N<*Hj7O9%y9# zDgx`Ud0=z5RBJWHxxtxq7hikY;b)%V=ljL^bXgRwe`J+BBzs2?{y&;~6aYIqE%E3A z5Sx8&UiUfPnD-*9m?Bfd-8d{A*Tq6Vr%TSNst}ET;(Fnh)ES7tY8^2a4_#T~J4#eK zeq$>#y~ml@zno6ce=3!W(>%#QwNuq#!3uu4CrF5QS$g;)M~zr<3e@O(nX=@Xwna$&<{=f81gmEroDwuS7g@;v7I#Mn zJIzhz99~|EkOO!LG@PJk+#GxTy>9wuI$!_hjpi3pg?n_DAI`Ggcp?4!0VNu7)%~9x zVfPXS-Jd62J1Ga3E3}>6uC0(swYL-DzBsHD^3d^Nz`JYwdNerc&;QO@2QLolL_V*! z0Qv5Z#$n^Dtn@#F4m-UCk`9x#C2M)_FaMelK>h`rxCAKgwk5?b_O`YCw(R=)4ufVy2`IWk*o(=10b`^=+eiGOemJ^Zj-5BA3?6g4e&BO0d>b053qLZGB z<66$f4daU&7nplDVT0W7!mwri@^f)LA}d1_!5c_9{Hyw7*FImu4M z386}Cykw!qT{HgZB)PWl*_PkloZCBm1{d;$-KS22cC)k8>(o&pMoJ#_qFV-4XoAf9 zBAI=$(C}&EWf!U=&!d5n`MDZe5uD_AmWA1k<-9Ay3ZJpKM!Z{Pxq6a;;g4kD#72Wf zma97)SCZ#hvB>UR=&ktPmo+ZxPE#4t0qXOZB|8(Pco8%G5e`QBi{(!b z&S)G?Am)~sq0jd*&YgS~*(VPa%;v2=Yb`AO-Ew_xH}>Iz!Y7MGEPI?dL%@dLQ%(1bewK)e zvWeH9CB?t-Rav$NgugCi^o7~G2Q?+nBu5ypauIcwm5DKWWD#*)z{GESF>s6`DIr}o zRz-Pu1AJbWZl9MPSNAWod6xa8(6#AAIh|=9gSq|Y7Kfvoo`BoMi#M=RpJ`~na{A2} zLUZIFJ~XV$vS@z}9W|6#$;?@apjBf?(Gx7aSuY?5HC%?nfN5NH_l^Y$58MjBvC)&p zVGFlB3%8y8Pes@$Ihz@BOf9ASB*kg%96GNc$(;EFVWQj9#s<91qF8%!#QAs+k)6CH zblOl@vVRIH56y1^*2Zh%AU@Lu$Pp+KG>ICef!P+h9qs;sRsM|DqdEi)`o*3TMxB#s z&jw5L{R1SEYP6uU5wl3XbqENo#G0{)Ba-DPln?9|`*Smdz&aHeH{AM3u}QO!HN#=G z2*amgtL71%t(MQG3z6_r9MB~`0*M`W}O2Ffu1{m3xnQRTaSijyeq#zZ*&l zul>1C+S=y3(fp^K%m|Yv)+4=-?u2fMxps!yod$ssB4)?LQh-(4PPf7!X$O0>tQaBhiTD2-m4iC$m<$VBjwn zDxUum8DG!fpY5Yv5Mk6be=e{*BalU+yEXj>$@YtYx`NyLX*tb3CPSFHpqeV_;GOR0 zG~gus;S9P`jwk{Aapno&dPnQoxo z`>cf#CVnRj=F1GQzFXjs_W3p1rRj`oLk00TUXweju~-k5l>HOR7qFzaU`44b+=B5^ zzrz4qV~})jejY<)hb0YL=g{~i>Q^@Y@+F_G=oQn(wMYCNfIO?|?8Kf}7UUA3vGtlI zoZ%I}9JX2v{M<%VN6)04jA4@9STz+FlOz!h;bG_BnwhuXR~DbClrsr7WDvN2sn2C^ zuvk#7sDO17hNbLPckL5p_v8QUL-Mb{*tGrxKd-|TSAR&_n3eWBUNvVsshvA&sa`M< zAbS&U;f8xon|ZLb)A&rW|A5t2(BkFEbHph$FUq6@*5I*qN%rs?^xwlj-yqmpaWVHl-`SB4Z9fe zNRX35FA^QLCvG~t*yXVKrlwiTR>~V_AyebH{%)8Sxag?ZE7Rb9J1#Iroy#x^8bp5x`9(Z|VPxIaN<}PZfA9RB*MlsLFE7nf%T`nu_QHqH70A1uOTDHRy3M-4d6GtA1p|+e z0DNjeO)os%$}{w*zGRr^1;OfUz&P4cEo*HYAnuu2hAreUph@apI z;~IYfsl!n|5U9uQ90qL~PwO0d6s?`2yCw77@sY$E_4+WnzL(+m^a?ekz3t5{K8Z6J zR`9=d!I=3bI1TbEt@9gLZ>Npzj4HvcV}r1NggWA`@9@eURdi$ z{=UaFG(iAxZTJ^d3e%_TWmp{7+U>%NU1%?lp~8<<^S=zk)X&g7<^$u4%?*Zw@Pc)3 z7;i)vo4h|G~bs|+{jT(=<)(|G}yOh$u`S2XqU5&IIc)ciYCCq#=J%vU7%vQv$MaG(*a}xM$MxSz=Q(Ikyl0$q2x>S1IfJrL%@L%HkB%fmo zBUZcuxwehDQ^xx^y-4aH=(%vZfWf6hrk0zDsa3wOT1hI1ZV2QVrSK!A679jti3yAV zb$W#T`;x8s#LAY74KCl_t>_+fx9YFn)a*O79!@jRS?nGOoRH1n3vA<*{5+IgzwTL> zdu+D0wsGI^r1W`ny>lM~_=~=Vuxf+!RfnoXjPB2R*i?l%^8qpKZP!=VE6~h+AereH z-ad6dQbfR0b@NO;TpGu^ELIGng}NC8#H3&jglf=|9)JBmOOl8oPkw`_Rc#Aptcm2? ztMcsvj&T$|3OK7A{-Xg|U;XGF1b=IlJK?XO*uF9T%Vhm*x1((L&)e1hBG`L%^F47d zTNoVCYMVS_CMUe1NyiBPu?MP~t*WkFpEfYtpx&e@F`_rK1QXV|sLtX zx(!y(Gr#;u`|F7_29uRCC11kiEdrna>FWqIlB5R$xEcCtyaSgKW~LVD!A;=QaU{RC z#xJxG+6&FOh52bOAU{~*V)JYxOv!75{%w%y?bwbpZ-9*AW9gRrr$egBNxX0q6u9!8 zE4nEmI4K=ix%3vN*&4Wv|3tcQdt9}Zd3}LL|4@Ww1iL#3C=5CL(+l@AXf40q*tK0Rn5J7GY%%4E665jKRbA{v{O_X#AqIU_HcA>itUcgc-O7aycM(Q z@oKe=F~@dc&1;X{k0J;$u*j3l&LuXY`O0*u`zKevPw7FN-9Lw7Zrhul|5f$1HThDQ z2G;)ynih?XhSKzX!SnYZ8!YO1-C?Yql=M%E=UJwHENI4s@D%XNFMTI`EDHVoaB?`j zC0y=Go@=7VRA+05^56EdEW`Z}Bbe~XgE1vL@$Rha`O$t7Y~$S~;TpCs=8;J}4@6v0 zLj|}YmWEA3rrkCLh?B$Gusyn zKUBp^?;^NUpS^uC8*R(3YhkP+V!xa~DWvkr(`o zZ*|oSfK5xqR^>+3eEiv}%3}GaseMYRx9L+A3`T#T>zv8#mKH?c6&M@&DdiJc5AbpW zk~FQI0vvs1+ts2LHHaAL@VbG|2Zm$0aq|})AC;Bz2%qg;(sevKx1-f%LKhq5kx&vF zVNo>c^-$y2+Oea%T-?50M}8NyK0g7$_)ACz|6r*$vr~=@?v%B?<1`x6VMwTo`;SIm z+mtJ=rl@zgiC`_@AyXb!c#Fyy4A*fcS&}^`{P%=d&;86*-(7GaBz#)@{;(C=lJMcF z^hJlV2PsMEyGRD8186$4-2LRA4^n{o{6x4SSdK}hfk)nUZcKgpMSfP~)r(E(8D=rB z((k6M>zw?7)T@x$mk5oNY_yxKQGY})pI%JI^%dKo)#>U@1-+0L&WS}{Ki`HXU9Aa_ zNjL}3VXu+sU2qA0|2-gmlqIRegq>mUvXI55+Pz1W&|vDCeTJ!l#_=QkKJkiICxA|` z<2IAwp{sW1hpo_2R2FTnFR#R{ur3$_PKz5~y+k~}uFv}@|obypF%2QGf;Xpq9l?pt{~5m!5pf* z8R}&gcW`FBIPbv&eZqx}K-q!kf4&L}ZkOyjflbPol7n`E#4T~b-9K+xW+9XIi`r`9 zx@@EKL_%7B)8smZi0TO}Y>MRNw0PZt(!nUCyL&}M|o z5erFA;f{D<3#&eF(Slni1MB#vcHNBYA>})-M3x0E*vRwT*Iym^+lxp5iQiS?hviYC z#3GvXcxhW56zU!}gt$}+XJD7guNPfAh0pdr%fBlAzta&11=PQ%tgm4W$Rt1rPJmnh zF*=7$AhKtXck0uvGX=$!a(mUDzdHtsFN@HWMp`Sp8^ue~jCIqgrx{ z?IX1^I^om&xTR~UQgwCLV$+w=LbPXfG>YvX!KDCl1|dUcZiv4<3D-q3!Dg+bdO6y! zer!&8?H&nhyrpsL(tSD)*~ImC0H9*taF-)qW$1~I`7!iltmtt>IcH7nC4`prxb_+9 zS7-wgG%dK2Aeef>FFa>dW2p-Z`<9JI1h265CDDR`gViL7f|QO){Uh$o4d0_?QZqO? z9Zg~vIl&>52rTn@P?LTopfv8_QP7xV{q01psVQ2H|3*#Z0<(~qIK6jH*rX6)J20?H z?5kO~(P{GwWe=jbUiexz6m5c>@b|GrY<@hl)+CuKyo0<*4#Br=Vy8y?ye_z9y&~z7 z6Ox*Yr8U|ryg1@2#cmAHxk)_iT7#5Bo#dj>4JJ_lF; z+0QA)M?k6nhlJ47B=Xv6hz+b#l!hbJ<(I229s7{2&OG? zR)FQ0D}8UQgT>K1NVfP>x_nEcLDki#SvyY?MWVGsv@Ks=AQ#ZkKy+t7C#mz-C1CdK zL%du<7N@*v_@<$c6OH_~oAI*kVYi)4(ZtU|*&hZr1_n?USCy*=7uIR+f6yCOqWR~v zY>D#*XfQVjmei>7|4TcXz{)zk4Ax12#;=pSl(No%RelYt!K6j}XvTH1#1|qBC>BEK zP2o;gx}4ajl9bh%>#K5Sh$buIudkcX1Rfd#$5$<%ad(!RT?f~*7PqI@&zav+@?`4p z5$1Ffre|ZJfrHqLz7^adfh-I@3#bEm5XvVM_aJBE?zYTd%H^l5SJTUS$_eq-42$yv zsa%(WQq-Zr6t?Qamy;E!Ysk(L7|s&9x1hdse;VzyA72+URIXIC_3`f8VFBfmLZ#RJ zEgLHfwXJtY9RH)y9iyKJJ%$n^`rgHJ7M9+LHgAaftx)oLUg27AK*N28Rq_60q6z@h zmq@o2e-cNr_acfGYD7IXl|7ZtSpFZ^{~8!r+(E?U50^4|chEG#$D*tIqxS#~)=mc|pO<{Kg#f0eLXZ03pnN5o zzT5o^0o;BGMxuVNi3~hVH@n#L;=&#?h0dhz-rs|fm^s}W>Cv${ZeOolD~p~qX%uQJ z0twRQZG~3pzeDcmHZDjK7`HC3c%hSx*KJ{Ugs*_5jq${Y!dF^?UA^9ndaC^hHM7{% z-s6Wk1OB{YmbE=7YEv!4t}9v2Kaq8zjz_WoRorboaPIFtPPpE7MaU2z1sX&&Rh(f|8M}E-0X@^e+WZvAs|DFKw;I_-q&rWug?L)#mtgEjo3zxOuKtpNx*u0o zY9sEU*ZM;;sbpBgn~D0t*FnL@8*2By$mYh-C7F}uDr&nqXp2Xndwz!!{2Q+*8XZG^5_T2(E72wqrWrs)-82(kNx3luu0mmk_#;Yo~Wr_sGBWMEWJ>yHdbqJ zJcPV)$nxFw=UL{Moc7)AsT)Ghl>Uyg{ayx z>8xlX-i8RP5?UaEX+($^v1VC|WwK^p{o(3KbrzMvv&FlcE!tkwOQz64tGUqkA;NP0 z>Z8D;`4pUpF>=fM50^J6+dM_TT6h*fXmrQG>c&m^=vm^fRTs`Z%!xl-UGhcIWQ4Dl zzNwf=*cud${`tTd@l}-`lj<^s^;?R`+Yy+^sp5Q=I_yH5!^F~|#sJ6(tQii^7nFw{ zjr&5(Bn>=;8vVc#;rFdxnh|6;kO~@VzMJtCIKOmzVIR;7d~S+GO<}tO@|c7+5XDDK zVLz)5o?_hH7zU+v^HX_1cWeS_pGIzusiJI@)e(!=12MjsD1);=$3B@wq~J@AidaqD z2UkvD+nfLX8+by0|L?5@==?KJ+8aetCss;|fr?|4H0gLd0Hx(W!n2z!7SK+_lr2^h zlHfV|qi^5TM8(wETYSm{@z>Ar58z~@)Zj~~xD_B3R`7>wEcy$SGI5FK_|v@!D+ev} zsyDN{NhHG*I1tSi4`hbedxl6*6@kMl6kb2q-wo!<8}B1RZ?LWOHLK*l(0N{V((Hqv zKs`Cinox&_9BTq`UR0N$CeLEJ%gJobwQT*j>PSM8dtnKz05*pX{4J^%yVZU+ zqiUPE9trRxiTbf;L+>c&n9VG=XpobFJJ`wS8z4KkW_N%`5CNQA6 zbefL3*(cf}8s^SnP_N#cK)Vh0WNASD(_Q^iXAPY8A7b+3hFSUs`=78MF5SPq8(HQC5r9 z#P*W*T{^N}m*C!__qv@`DOQm~?@}JpLctnlZRxoMp$tuC*8N}JtHmVP*OYg|T&Exz zZoSNKFQ$znj|0dIpV;7ZzTF(U)4SxmUzV%*9D^G|062WE^Z_H~hVe^n$L7x|<0 zDqC;@1sC3n&ikP`%Yt_}$_pS&f*POVi#wm28U!+MZF;$pi=5Q4rhXfQm3R-e)*os| zA;OwKuupEhd0)MJMq9m%KdZyYC*4~j9Ppi&?%|L#~S{?@^ z9zfIV6FnsxKi=M85korLBD1wRvN_nLdLk(!a#PBqRW&#JL&kkKk;I>9GdR(26}Y~U z%jnKSC|K1w(IC`+0JXJ#m)qW!b<*2Pp;4U&PO62mJR{n(H9T!(SPg3D;%P(;2; z#}61v_bc#bJ?zjtyMtrv0Hj}y>4a#5g1fBE>v z1ShvUFH|4=qZT)^L!V*Z+D*H}GIM>Rxv%EBVPTOF?=SB<9j<+WBnY^v(}$FGbvE$M zVdF1r`H;DohsR~pe&MLyGTUZ;hfaSy+cx8w^2;VGn;K%-pg?`ld_n%zM5*OX?YYubIRg9h_{M%R%){p%H_kL|*jeyW_O zIfGExTRbSal}vvglz*9FrW>o?$#SI??D^6+&wFM`3pODBJlWOMLk@oj z9S%yI-uyvBV9Q=>5E3$olwy3bVjCWo0WfRs6JXzeh8p!%_L9d3YTq4ZNXKlO{P?l@ z(_OE!^&ZXA^FP)P(=U^LZM9H|cOuaGn39~bdT!t-i~wSJ7q*Yg?R9n`oND7)sbjUt zyfuo%RcR@nv?S(&Qff}|iQQf1^*Hpeb^tM&T&CUx$b$sn=DgE|uw$!^rYkAs@x(zb z;Q~vE-yihe@6`CXGj}&Xl;zxrdW4b$qb%z1p-%EVi{ZP$vg5okROzbRO*WN0Dt4>I z$L-R)h^@%;+t2{e!ey{I5 z>r+7!2>)iVxbY!UDXRiQi_9l|Mdze?n%{mYn7JV0e~}4!S$YJ&drUXkH~889Fed_> zX)}aj+s^^sn>!f7Cgt+-0|L2o*t_<*3T3ec;65tCvPS1P21@U)xZepnhUas7PF|}s zP@EGXlil%?JRr1gJPF<;7qeO39Y`87{rk(_#BlsN-uzB?u?+yZAWM7TLRW^yI+Ct5c`{4WU3!G-;I}*e%G)Y;>PH*tvHV%$3+|$ zXuvGOS116co*bU(eflHdOh4?k-}lIfl=9g}>4w56N({f%;BPj8Rq~Xr?%nW*%K!yj zs~vR9LPVBBXj!vXAvEAF^({PG{!N!>;940NW7v;5$VsZR7MZ~auJGR2kQ-|iMh+?~ z3w$N!FUCQ1&u+G$6}A6cO>{Y3^L%enZkPP`#Q>>;7qjNJX8y1QbWbfO2x0XyP~%G! z{TxbU{VA+}iBTDS+i$6`zJULm>xxar4asFcYn7|Ggf`CJrP#SsWfOB>2Y(SYmn+Y} zv2=iRehrQ98}z50hAc|$&ZT`V_bCW
    M<|9Fwu-rj>@qUeEsXLCv z;&Z5a0OBD+3!d*+&k)9aT|80G7U7Snf920%d47*x;qflYHh!RvuKz0mPzoI3rN zq6QMwpv8v}EP|SpWA?RW?fz~k1Ea6fQE}tCd+(IHYNz}>Hy=gG-07?2zyDc^gGR34 z&|o*YbaCE_rH$<%MU9oy-J4_cf9n>nYua}j4C-!EZLBN5pUK=WBeYv>o_|=zYC3?( z7I$R2Lokb;7Mt^j%SVO0{DJ|xuozX*j z+0508sSFF_-^r!x_|l1dYc%H?+)Jd}5{I`YUWlSj{@1Yi#1HVraV!*SJu;~B4;R-5 z;<_QubJuh#`yds>ZHNDB;Eei;vtNaQVzw=Qn9IM%z#5xsiDBcffuKK=CA(=U2x|S6 zh@ZI!lI^}Iw5RwF_oghlKU}X&L&kssi=+uptWR4$edtYbQ^?9d$<7BWms+D>#J^rFcyF7V6<&c+75o z_Is%5SW!vRTy@IT-i;Q4Q)yv8%~fae=8hr>_)7b+@^Q>=;vHN#S+rix1to4Moh955Pv7Z~krI zJa+$?KwIr6P393WNRfx#@5mA!f0(X@WZnHvJA%hlboszP>dp&5s&u~C0*kB^lfHZ9 zKS=k2wHFH0e{#;}AJX3^bAWgpG>~OMNW;rG@sJVtNNbUohw@@|REO-1@J_r$uFdUL zWbX|G7W4v+(jh0}%d%oTJrD4TZFE19+kT#>{dGZ5-p~r2XcPkMA0@nX zCif4Qw(xI$t82YwO6*bJ-;xULW0?c{aqpAt(Nq-4nHfOOnxI-p&4`^1Uav{0nfSvc zYZW-I4r^%AGz-bCIFjWO5I@{mJQVbx!(*+b@3*Gc#`E^xdLmG!lfZ!Ra_)Psslj~) zPrEg8R6djx~@nEeNXrykVf&llT-dJVG>4SpR~5k_8TrhzxAg*lv|{T#KZB zn(es9&6>I|EBxL&W8qZpv3d*xVv7M>-MR0>ujutDR8`-zGqavs>p&pOoeF^#(0;2` z4t*NoKL_6FAS~#pH`rWg(R$f90%bK=u4b}e+4bNm`c#!+%7z&ov{t0R9oED0Pf*cB zE(0E?{a%th+y#xCKvzGRg^TF)J#$ zg;miqnV=k^RuH#W&SM`=cEy_CB zGUIjHFAo<()H>qIn;yhFYa1{pkWf)579=;VA4FGl{9datm3%buW7WWPRpzF-iaw*L zc^fH#jZ>`KR-@2%H02Ba;pQb(D`plbSuKm z&2W)BW&C5$Wb>8B;%k>NNnlPKkhqqif%f(ATN}&`_JPQ@jl!yC4BRy3d9wrIy!=pu zeK6@Mvb@qxLpOJW9mZELI8!j*9c3@s%ht|sdu=&#t}MDsCg&CJ4p~a5GBG>p=l?V4 z)T#?zp`KWGg*20$-Nj8j5MN7yD7C|9-SH|F^Oi<398A)3fqU4wjD# zx_{o>4$$>nP!9jUq^S2|$B31RQJWoQsSvF^(G4ps{;fwG03Tl-6w^}A$4Q@HY)dYJEeG`HKIyjHyGZI{ zJ~f@ZPcJX;{Ba?E|FxU9ck!urM5eExU=<6t@M#NhDFJC61e3&NV~U0XM7zt~)_vut zJ(-`WHKk*(??irkROfqE<G*^w)ubVP~^gDUxZP>+QhA!^A^dHdOF%7AOAhI|Mj?ohy zz^M+RKF-_krJtnlLy1d{Bm4<#%<#T6TRp3nUWVjS+5z=3ucbPoCUS$e?#`kS7@rv^ z_JFSY@rQ@qy@0(5w#0c*9VC-Rm=i$H?~psHJS}0d&L?!7 zC}eHpI;a=Xawl4XX3&oNmIz?*9Z7XB{T|;5Lw^WX$`0G{|M7$VGw`3Ila<9=u#)t} z$Nqd|Ubv=dNTr=xOIYcz2$6|<^ znE_D8dyx+DH1w9bYj>@$##`WMHNu}SP$eyrdMI1yhMfkk(!R1cHxDjnGK>-4B=BU! zp+;@XYv0S9LAjr!Z)jVijD952@@eykM(ev*+zIj0{cYmm>3_H^;tm#Q7f((NOADYZ zh9+E%eCB(hK<^~v62uQtDiHqiACvSznkL25tjW6bS@hWKhsN}ylc!^ZMlQ%!g%xZO z`Bs=!Md%fX!dIf_Qy3}Pm2@;tD<@EAv-Q|{l_Cio{X?Lv25mm$Ju91&hrT5+)Xi)u zWhv5tv}SX?P5|z!S>}bNgK85N3$6D=Y>kdTJ&EZyrU5pBZTCQzyk6t9Ujdu8`T-^l%kx8OpTq%K8|_qMeUP>v3qGJpLOnOQ1owbEZ8$w(`sC>uH(Mq(dlY8 zu6)Y9ivitx&p=~pR$fhu!jd31X}l}h9GnaXPINbonw_pM_g14=cLjcke5*5f=~PeB zOz#mwfq^*iNr{};y32yvg;y@oS@=)PIf)xa5UFArH*4KiX8(;%)}UuR`dG<)Zi=Rp zFdJshzoCI-g6MKg1lj{VY(Yh*8b~t4o7a3h)?^;c01=ia7kWDWa3xxOXRf8DJyIx3 zIbaD|SbKH{?=}i56t1Sj-)aS8($r)#;^+G?FK+hCKj=k7o`2rBa5ZJzT@H#a?XYpD;!MiI@ zp}e!vUGuID_WXXM$`HXuX_wJk4>Q=HoMl+2N5ZT$c$%Wave>F#{l}p|Jne?;UJtwwG1uYtBb}FK+ zxiEz?YkACEZrLX&1uPhH4+OeauuCk%10(oKYie-bZ{5K5KK3S*Qi@mg>DxE>kqu-j z=wv1q4t1tCk6XU1@k(~8PSMq@dwQ(ni(JKxQQT#VS9`cN{U%zUXI;KzRsFla`$)>+ zq=v5d{@DvTp~7J9kvt~i)0-x7RLk%DQskWJf-AJ+ypU^{Kj4IUw&R4Nwf@HCfV{@e z!{W)t9+yl2q1dNg!B(9_P{(i*h=fYpRq&&OtCc_NzJNJ41^>eGdbN+x@qU-0E<5HRZtl{5MdY5P>p)PV&Z+!O$uK*f= zW4T(}F@@4Uo_A~2O-s3O(M+!u5k7oybS2zL6gipdeDl?&E>n3fp2Cpn31~!-c>(s7 z0*DOp6BSR@i8}*}ZM&*gqknd&Bgah3D)sN)_mUG-JAYhP@mB^rX{F*MHSsL=8r+BQ z$+1M>n|dzksrghMd%Qz&k|>d4WoU|eL`Yl3!>XY960faA9|Q~p^!!VM0em?nL$=?n zjr_jz*_ho>7B2y_p2Y#&*usjpS43TWFkbK3ds9zwB!0B{drqT)K* z)zu+`*#|G@!G7j1Beg$hj?I0-jFx9LRzB0;pID`uG5+^rWHT!POn6GK?chMnn8`{QneYWFUeck zl5ZV5J$=Y0H;er)bQbzjP;X?MrgF=)!C}*9y9sC5EWNCv-s8_B~ zR_3=p;{G}}&R54ZsdW9_(Jgl78ipDdxyv9+ElZ)~2l&#-8*0K}DxETGBuEoUo2*WI zJrk@R?eA`}E6A&hA8qk5N|jdVnREShw9=15#`|10XCDd^y~@SaAX;!%RPoK<2rp!u zrO4jJc|mlxh_Hs5aKi#-eg4qqAzS<7nMKLR09xQp^;5G~PbCzNu}+&-2Zki1fI;V# z&=3Yog)PF#Ks$FwilO+R+l9(zJs|>)KKGcZ=~@vsTIuew^3_9)Kb^bPtGw22l*1nj zyq@_hmhg{Y!ZHil;R=GZ#eejA^f^!2Cs~I>>@7-8tZEIg=1Zt}kmahAazFwTAqW%<)G)?%y{oP&brL>7EzO>g{{5T_ ziFevOwH1=QJob#=_(dYlL$YSBC%7{YsX1oC-L!iF2t3(**gzf@_sKH+>wxW5_`33YuK4oEmV)w|Iq-K=ofhT3IiKY$;4cTp}ez2 za}^bS=J~UwqoHy7wa-rVoLDnD_HeWDrt{Idyf0H^tZsm_m@O`e@XgW95Z;rN``Eof z3Z85Xqtv=g!_|6w=)i`r$@WCnRi)GNcGgNFv5InB*}{jypo8d2Z?;q(_#)0dSjC`Z z3ND(>^E!mTUyJX{2Xz%SRm_zERfWeim&vSU&)UPki&7u$+1)G?X-@+?dmt!=33AMc za)9G5he*lH5jct}fPytCt1+bP;(oQ~vx>I;VrCDtaTm-}-Fh0-?x^lNW@)k(I;p)x z>t;)W*mSD{c!MNtXO;nY%|!ZYR><_Kkn_dvdles+UqAKR`ct~%w-R>Go0rI`>><+g z4JG5C%svmj;%R}c(E!1OQxF3KP-*(=g1S4r^mY7>^=9O`t z2rtvQDHysC0y--f_in@ig7+d8!dnVV^>@EU06Gk_OHFgIlV0=$bYOoB>guwE@Ca|9 zZ=fx8`$Nt7L`o`V#S^`Q zNKg08HMKm_7B z;eVI#GSsLhW;s8gI>(=u@BEfI?1K=F}Qh)O+mih-WGCZWjNH@ zw^N@$6QQ9u-$EvdxmT6cewwPuR zzIn~^0>ie$3E$L*OsJ6L~l-@w^Cf8gIDz}fqt6_zfX^o>3U+s#Y6z#KG?#Kp| zURm65IIzSca4i)TyB4S?KELQAQi+IE_NvL@F1nA}>-f2gO+GohE%{(*^i^-|Gds?N zb>iYl-N+8|-vcfLlSs3+c#JApD1MvbSv%49^V{eSWPQccG)9KaT>dB{QBlz#uo9I` zG=wOb5d8{a7zMlr-9N;Jlg2(#>1Vyg9CNyMWLIJCw+UJO+MD05+3-((i+Y{r?s4x> z12*!1s}S%X6$E&~kZw>-R3SRgS%{Ht1m(A24@s*sbaJT9-a>8~ZmKsoVowc-Z9DOA zeWH$~_Uhubga@lZ;tiF}E}YhaJs_>dPr7peszb_S8pq z++ykDBJh&l%n(mF&~=~n*UqR=ht}<2zW^;d>O!v;=&hFVrR*toI#is*=2X zbJ$`I5Ra|1_F_MiD`Ms?s5m|>sfL+1j@be`ki2)>y6Xk3RK!^5T++h3KHXm4vzRRQEWQdsoY+b#GWwTil~}aqrMq7a&Rso_?OMuV$I)4cmc|qIctlGNgZQ z9HJyhi(s|@Lr{X8al6*BSK293yKaah=|8QFweT6$+8cH!P+b2D4~m2R*VsZGNJ9R& z6(U%J0(Hhg^2$XXK&$y5{b#-+TL`FjxJ&Zjm~OCTZlE-O&!#_Iy?QJraN22q3pqey z`DocwNwa_ar{I|S4wqR_StrFUmJx^WOwFF{dg@K+t-~p^zg+hE2W@zUS@i)cUT*?7 z?nU%cC;j0i?piAN~*(OSpJ^Ehax?Q?+7(d27)`@?@v5 z>`sB>UFIP@Af8D?njjI##aj?Gm_-dY!FLv0vXuG*j-PHQbiE~*KK0wC%!Z?S39_q*Iu(QA&F2;xWaCJdMv1*J*~0V_ZU+|k10g%2ylnPP)`?aPnoGJ~~SN91hg z#8{gA?ZXGGBK(TSw(-l~@ITdn`aPXb9(JqP?H24Gul?zpy9!{T`b^RI$ds8Xx+bN0 zr7z%<4*5n;k!)>gZR~-FAUnWTmwWN^%Bz+)9sW6&RLmeG2Qg=#dlW%iL>cRB1)>?gzC-&rMP<1&Dmk$g7NvKNQnM(c$Cf zs0=d3!3u(?K~%#V1e?0RiJZJS~j^!3U<2=xL@50k%9Dwd!ee8R=o% z%gG-(G_!H44062g*}I)qM36slUcYTt#7$QvvDvwuT*|{14Mg~QK>62i+&SJ*A>=e1 zttGG*PBSS@2s!O)P;(*Q`$3?Hw}_zKiJMNTIx>=X_i#Uxq2NG(O6kSH^2SYzq-Cn% zCm<@;BbHjT7;U)sx6f?4Sj^q*>qXlAar!v{0h0WMqPdq_dW?MKUo=b00>hh)zO-lS z&Fy<+LiH~4Oy{#jCXgMM=)wrEGwZM$sw}Nt=PBcHcKg{L&l{GDmOpM=y3k&$HYx3} zcQuh!`cWZKkppG6KSopPfMdLiVvFieNUcRe4$Flz%$_MKdRK^E?>B;zXO&UOD0%(z zsG~ejs%g%{>h((+BKOGjATrI2DiDvMUs2U%dYHXInql+t5tFBbnePkSFTHu~r`u0U z?>znxJ4O894eyTuACy7_(kQT^Kr%yMtQk*$@QLCMB(%ut()QC2g|k}xL0>P!u|q3K(Ix54blC&*+R?L`wZM!ra|%TH3u>S&`H8@Tkmlj z)7oUoP9jOoF-vvwrPdJmnut$qz5bB>6LuOSO&IYy)Rw@# zT}dAEp6Z@b4;?S|vD;kF$et?Dx^Uxl;0e2yxb}Gz$(@M@*PX+HLN!<24VG?*3t({W zyA0^bwT4$lK998S)zsj9@~%_RJ-NgMBXnA3QvTYl&!UJkhVDgdJ${-AN~jTJwtQ)y z_SpwgnCnD(;jh%CZ4peVQ=|viqbC*Yb*h&XPVsI{G6D71`w)df9N#_Ndyu5bd9}yn=8lJZkW<8)0tu)} z@dgXhp+2erTllj(5B2;#-HRu*R1a#uZ+1Pj$uHixqQ=thm6x{?Z7vD(4@1M&KU~SY zFbH)Vcw(WIK|jnC2$!}+gKcuV%z`Bvu*ea5wrS%%IHz&oyMzM~MvOS%ePA z*0GH7`n&|*jXwaJ<`hE7zcb+AEmI~B33{@C6qK7i52ND=*EuIX`2{pp|5gXWO;#zTK z^q|Q{_du52uc?7OV!u9QAK9|=f^DkbWKlc22iSc?E7aH^cCQEdu|^cBmp%Dv^uq1< zYY%iW0<~$Z0>KL8n{sVJxDIFo>swVojiq?H>Eb4+(VXZ}Xwia^5>xJuLG6$!>o3nd z7|Z|e=V)EL;~k14M3Pu{C3{-+)s=<~=S`gp)nmFsm zgO&KJKm6w&G^MUx`m$S9s^uNG(K%J4TYN&vFP_f+QQq zk81HHC=_|B(0)}sm?qkUNe^tl(cdCSXGNr}`=oZyinFDTfnI|K-1&#gu1*_>LyBem zO*;3C{^62qr^)@{vNse3h}N1yE+`ZwT7Qsj2qZxJ>|p@xxE6VuJ{axc{G(i-m0Y3x zz~;7wshp$y_{j#Y(tTXKGFI!A*fEG6P{|fiW;VcJCv8Dfo?8bDhO_mdAb*a8ht|w0 zT4e4#twOUrr~10IE%!dL;ulxsC0?#@fRB#+j&{TB4mremXu!+TfZb+66A>c_=ncft z#6EwK#PM;%wgqbMX+W{tFq_W)t6>&-+8+aW0q0lg@ogYeLW2=u$wp}@_{DoS&`md~ zVC*V=bHjd?WL~!Wi^_LnKMga9PwDrJJ#&0dyuKh2&c&`~kXu2=MqiidPWK>}S+#_k zQIV8a!)LsUiWaF4M%+ytCv$5{lMdJf4ZdIho*ljxt-Mx?Y*z$kY82GYEQ1@fkyeO) z@$I|}XcFg&Wl1tTADcSwdL2A`&EpK8Q1B&x^*Y_3-3T8fdVB<<^FlE}nnEB#UMY5@8&ZoY1vj(?TL~(CCU=YLD zyuh+~0OhLz&&2M+S0Oj)7Za_n@lUpY7>YK{L6IK;$ymQ;CAlQbuP?wp-oSGcAn{jc zi(P3<$F<%;J{t-#oVp`JFv3R!Titz>XM-b6UwYZx@h;o-!sXd_9+tWl!wIatC7?`$ zT>o$l>9!Mhg4qNVAaXRP{NWP%ohoi9%aEG8?__#S26bz_Y0d8iHc9x!oY~Kn5@XWykzxW`2xAuD_ZM%C-Xf@K;g++6lBKX@7re691^ zZ|5<6>8TA9ucBuYizb9E9`}cG!jWf|m=fcL{j7u6v8{S*F@@z&jF_QxIlZx0T|7k7 zb>N9YozLfOeVN4tKMyxXQ%*J(wNqjmP~rGef#&y^D5!KTN(;X==uWe$G(Ok5)zxcH zc#YBZrp2V0*0hnX0elK z*>h?RRb{cy>4Hs9gWBDY9B^9ygy5q&V4^|<)1F2wCeU)Vh7kFYA;Z_N=I&i8OrJ}# z**SVr8xTRL%UX~lxvu|uWD-*K?F?~coB>e>=Od{Q?;M5)(Fr0pmAKl=6j${HsA^vB zSDCPhw|P!Fn4KhjO~GzY!N%aaGSn!p4h9xrwduxA#il^z`&(mhzrJTz#Wv?w44NCh z>?%6yZ*tjhWJ-mz9F4qDrm0bJ7#d%pq+<2Z23?dj%_u>S>nfJvCKpK2|0WG$J z3pXd!P%XrgF5W;8U9Ge;;#&Pd-hjVf2vNEARV~6 zPM$~3Uo6lcWv<7t{yKtj_7OOo5s&aHGL{=qe7#gpuj!^V-QA6D(WcX8I%Wlw1QD@& z>3gyczvJ~B;yys^%18Jgg9|f|Z-9&$GlFihZl_(2F{dx$%Fgs$X}Q!e$YYwW|1k4> z#~#e@y`E?U5>nP_nkwQ}++S^!vG{l++zwmrbf^zLf-3I7EG3TSv zb!0b7v(KIr?OTXxl}EQ;P-abm&r$+xbwWf+4w-&IU=z|0z=HlGs_HGIJ~y%82-mL+ zl9^g&sCGVnhe_9Gh3k}BG^9dm5Wam>yj_v3oMid06RkHpsxP3(AI zJ^Y;F+mig{760!7>|lYT=IlGdT{%ag>ZY{zuX_^1vdX>w3g}7;zz3aa1z?X&!wkBY zMGS_1xdYl`9kffzS2-%)hW>1?LY$kAO2-a`pTPmecdojgGM+wslegLP^AZ6*5JLkA z+twGH47M;eQ~==(t5@+9xLh9{>Wv7Jcb+A*sP$?T-!Cz}73AQpe)M8TQkQ@=vOuIP zsjr{82VzJ^B;gNNZsjUtnk~T(XxK^_%}86JK42^|=L)?90*Z&d~Z z)f_^sag5d{{R^Q&kWi4Dy1>UzPhB4tc=ZeoOEf;>R?=tQI#LaiKsgJAex5p>_t{kvOA%j~=z`rKZq)U+jsu-D3HR7b$|KDqmPA%me^y z@d#pj?KL>1L`n%+xa{o#_YUvv?%WM}Qg6{JX1OMw3JDs}WZ`<%GyDw!kBdA#8!>Eg zabyID3CXnNivopSc4uF z5k6qd7M=!U`@&Ytvvk7zIKBCNY2>A{lFGCe>%6*KPW`KqLh-dnem`0SU0Zls;HnF zQ)lh%rg<^{yXYP47MKW3@NekUxUc>wuRsEJbCBph*g1O_?r`-1jPS*%_>8|wacsC^K*F-j`$xHlKjw@m0Fg1E6U z6~8ZpPwpquNi3_1s&=xWsZC!vHqSRaX1-2Coy5#=pWSjbPon6?`y4j8?ku>KDX6gp z;t?{#J^aHZriBr06yK}bE2J3VB+(}*}= zS;uN2^I}DVz8_}bqWv>%pfQ%yGvR5&dIuf1{`*DwU#M)6@pMw1_9q2}jS)Kf+dmk5 zD)KdL#E6(3l-Q6}t4hn~W{B^A_S=uV0_$WC-K5TD{_^7ax!Zk0KniHx6X|EVANjsmC5NS7 zD^143scw5Ko^N8>NDhh9+YY53GVXG~g5i)W6hB;U+MF1qEd6&Uha;`h2X^=qW7GF~ z%`ywpyO&w#T5)Zk+|P2rVeX6T52UY6v zzh76i&)G&TvL)@nJ&M9c>Y68E+BnU|+l0!nEF(v&YyGeo6d6#xXHY{n@(4zx4+->Z zBhJH|8A4Y=AhI2l>UmCwpevp(bl>O;xGp1^ezIuhTiC8i!hj49Yl!d=tI`O(#Q^7J z&QtHlhH7-97yGsp*bqh7dds|0b>;QK)89qE%pc^qDZaQeGn`3?DA!$n3Z{AlW`rxP zt=SszE<5K?W^Q=&doXXwC&gS4#x>w`Sw+?a< z+U|#hf^V|jb>iC(qEo2f%|(!xrdFXbN8`R`FhSTHL3JfB33Ji{TH7)3oANxs_MU+L z&@h?Sv-W7kpq}gv8p43h`ioc(42cyfTjV!%+`w) zV0e)@oMD4n+cv<1^lF9P*``4kh_(CyOfkHj-Tkw80OyE_`g`(i0q>N08u;LVWZ3Ht+oxbVsV0E=74*W72j8a=QYPe*DT!1@CxU} zD(ElX^}GpeZ@-lzrwByJAFXkFo#$JNbU6(HsbI;@s((1q1E}!`!5}DT0Mm)IRrO%@ zrI?97T%ZMfX~GOFa9cOWOs@}`Zz^{L;lw_LxBMGGmAYvDI4?c53c?({$T+MU16F;E z@psjS!@JKN!Qd)LAO}pKqSMa*#Y7(;)rIf<7T_GLLIRfa+b!bobX#b6uC)dYo}`Nu zpDFBF07r{g_1$7{y98gXPSo8#bx@lxH=&}KD`8kdV&7w&u<$#4&)_44+BZbn^u}5I zch{rST7gueq~RWU2BB>Row>01C?FV7sC{rR=)_2nJS^8LIvLdcPECX z09J%OyKBY>w_Zp8b{f=r!~h8MgZtUje2udghNc%HF?()+ zE6Xu-A9TFKa=;=^TB^5nEhnwjnuKRf^_lx~+zG(8L0wfD6AP??PmY<|lRdgt7tirs z10;2=-uT|MfkDTJU=rOIN5&piUJ*!^m%$uWdZBu%18u15BhMVw0 z%RLBo{yEo&A|_*r^J4Q_KH?z9snS?;J{l$jr!rB0OB* znceVWL#y(#<5J!#QLg{>iUYK?EV*_XaC#X8-T+C0v-d8cdKt!IjEBh_GvPw73Pc7> zG8Vf*yZtZe_ReU%{FYCG%vF|3;r;c{6ou|_%v-DwIPeZalq~BY3~XMn5X8DFM;KP< z^H^RXJNd+IWHdl@7YtjF*t9c&Dursr9^!qRaK$e5I2kh!&u_3CUQ_Xo!_6Ftl2x|H zd2+Vday7djRGsKn#36m7Q6@@c%qK{tvE7_* zb3$NZx|*ZRilu{T`6zHMU4R3XGETxgxwaoc3LBfreE)7ItB^B42Rn)~p$LeQMx>jQ zqZgh6aFKd5=g^rp`AYgi`qO>>3N4bcYx3#%LX(Ysx;-N?vurQl)u$X;-13#b)w#** zzZY)7f%MYL{qi$g(r)f!J-Yd90EuDUhZ?WcYjTlG+vhtA-8vMNZhl|=gu&U8+P9-Y z#CV0kA94`3qjcr~arWM_Onl({y}KuI`Kj@7i|_1ZL%VaDf(|xo1zdM09y3KM*aAyn z1={C9E0V_;`#p&dC(^Eb?A7ze&N*XKWb28n0eA=@k#`;%KF+|Ew3;3w_giyN= z!<{-6KYkr-`Wsu)SARS;#&0;~k7$8LdO3nCyQ=r}LFwFKTfyX8#PBGvOFokq%eyH8 zO?bLfOdDq3xiqK@M^k0SY|&IrO!0Y(x+`B-7tcd>-LRTzZ^Xlzx(M%m%E7`_R9w9r zqdjp!u06xs2XfCJ`m&PPHK3o;%?U*{5sS_p63ENHaM<>vr7`dlO(@HB8RO&uSgnyaEj~jtj^&ZMkiLU z3vMEqWJfgpI|%r{0fGg$u%yl~_Ys4?M9e z?si|C+#!FzKzBWvbuEtFX^mf^5p()F!XAK{f%AyyL5Lt%PS3ZGV@UZF)kRVnN|+S8 zQ{$%X^-;wRKPTU?%;b@;dv&=F4eg4`xqW99g~M;I-vjvUwzAFX+!Kb+(}_`Rn_Fs` zZ%JR+KIe|49M!*V)Z8}lEWmPKIA)s0+79%HYC-4<&Z5Ta+TF0?G|l+kr|AY%Y~|gb zvm2ugM@Cxr%{vqiB%@W%9%0&<^Kjo`m$VAr6j-^FogERR_R8$N)eYIC({cq{Z)CAS&Qc+~R9P07|Zd`JR%O8Aj1~*9We@zM45@ zmv{;I=iE(S2}x}}aA5gr$9A56FgMfU~oo+q4&FF?@QEbV)A~u@w4WeJd!H4G135dpAGQWNEnSLkT z_j$Ho&)_|N*mbCOWgi`SSZ3M&WbefU@gHdVDFUnujf2BX_|1R-DW3Vp9g++Q?w`>7 z^lhlR{QF*?n(OnU%TuGp4yE?^=SxYRr)cUcod0RU_TO9a|7y|uV)f~$2&_^N`V#zr zfif){EK_yZR#D%#IBpj2{>b)LPU=B!<$@L2ny*USusrZC@!&btL7=cD{C+~Tn=Rsv zY&R?VT@%puwEICIrGrOe7Jsc@ASoz^6oT|lCK%jh53w>>e#FR z+hv_C?wkp>4xC0`oNXu+VzK^?7o)8g{I1>N}HYQNk_di*IR)(KNObb1me z`}4MO>aWxT_0fb($QeCMXogC!;U9Yd{x^Z$LE#~;)J(J1G*zn9$?|PwRX@IAo$~&< zwDq4$4_yCyY21G|rSNYKiWue%tGCZ@V!Q_sIhMl=k!~jdD~iz$G_JZ#F|aSZ9*$VM ziJO<)$cxok=nJrYUX*im_%16Y=4FKX;#5`lqyIwMo5w@hw*TWJN!AG=#3)4BvX^a2 zNs>_6%M{tNg>0E-L?~jihoV9%WGCxnOC`yYHD-pSESVwWVrH)Ud%ExEex7@KKA-RJ z_x+=nT(0H3&hxyE<$WB-`ye9qk+i^94n3`lw*h5o9c6Lb2&hDHxct|TI$CsBf&vw9f40&j|(T2Mr^ag| z@FS=BKNgD!uK~%#aS`nLSV#uuie?CgmeTWR2Wsm-`M8MVW3P@3KONA@PP^!}Wqa%; z4hQ?1yRAFENN-eRWGq6udLY*(g>j1MMs;UytE4L?n!9&6sSNyFrCC;EIdej6`+wHr z;&0{0C?Ci&`>y-2Q9DwDeu&_sdJ+-leJN8 zwc8qaeeXcw=7$NE#l^W)aaJ{`&L+RN*b0cr*~xy1qxc0Ge=ol&4Dx%{Ps`G1!NcO0 zZOX^@al%6qk+;04m(VB_@>-zH zfQh=OE)w&4ohS6mOTs**@uggXtcAk91B9;6WT{M$)u0s`2q!wv0AiBufElObdOQQf zmmgtxiOxNv%dbe)n7C$bOz3c^&}o8gRiDepl#}*;%CJ{(^3=kr$)y0FfE#Q+4@KAy zvX0Ywm^;DLZMLB9iQQ8hNNeppb5ZJNO~2;&=^tUpOFWW~xI5u2b+*qpoMgahb6|I+ zqTLpuy@d;7^7DaIo>$jciGT=~{>*Pu+1W8v+`U}w2Vq`7EbiFsm*ND&9|(`vQxV8v zKS)89dCvmuF~&B1vtJNT8nOVn5)#tXgU`r4#t#>j;=iSqu86Sma8l>60CsH*qvrI6 zbWIZYXR}VI7)_V@$VKqpXG><8;}gNRB~VKr=mTl8XzCf#R_ry1^2DhlFBCUr`H^Yv zx%23R`_QJ5VmxsWy1}jz>PVrtliKP z2_&=s5K}Olp2W)67hos@US5p>=J`nrsdEx$2bHTF8IF!&H+>Y$GF2yQT$Q#(xR@f) zp*Om(;Hg(#-(9Nmwek+sJpOX+v4k)HPBCbb+y@6j=RO@Oo$OXnn@fDHNv^4$AS#q!hGz zQB9GwjZ8$x4)zcIeZ)r+s?b3hcxy&D=6oe~)x!J> zEMP!V?xV?1zzYzudY)fG@l(grq$K5XsW5j!SNKcgx zRz4;dh-drDcu}l@=L{529(*Kv`{AW?Z$`leemv_RH6kJ=_YTJ$60o&DV?8y1+MmV1xS`I!~+xQ;qgQ z3p#|=>$HqZHou;)J5xMoCRfs|(H0gQAKbaKXw9cnnE4htS~Rtlm5iJ>r-KV065`Pm zO$O>9{D~n-$1!Ilu!(AkjYI3;i7lnM<#?t#X#VQek~PubN%s@PS|?kG&l+d&?;@Tx zKd2ryM&C+K@nn=L(%#ClFZ65_%x;f53f|WaRFccGp2?TR+Ya6Q_zm9KU+7#metGo~ zGdlH|joQ(qF8_T+TRbLRPZu9XeXXUcQgWFhjxhgOXzhikPP0xkjXyiT!fCel+W7n+ zPPA8q@+Rg$@j@>p(S&>AtmUD;m&e@R*!XW?)h%Q^8`BFwUtAiZ@r`0$f!Ba#=kf%K zw~8A-neC114lC@^cpe$tK6L9nOF%~``Yq}MhAF-g86>do0t-7=o45@?-pJ8aVB6g^ zv7kL*y(oVYnm|qucg@-Az@@D6e(Tb)0waBOVYSFh{Ac#rv41Bb!GjZXxBGe@&DqC# zTveo)LmLhC4ZrsbVzG8W8Vyb30pLhA&1E;G0#1nAR1BqfXg7^%Kr`w&=@rqi`AHwM z^%7V5sDzt;%AKh_#Kux+FUZOX0bePIQ)e1(#|*=r4hzKBs9URL6`nP8wGZ{i_Y=38 z#n08+r62#e{Kc{I%@+>0TRQj9Hx?n8&Uh`N6d03d^JZwSy-%q=y_zO-ir5fj8Q1@L zBCKM+^(#yJ+lVV5caU*(2>KFAfNf3?0gRfS0RR*s%)B3FDrpkEX1@=FvpzUrSSLZ2 zLwOS(g{Phc!rqa7kyVVfFT(n=?r5z*#*+(H6JO^{lG*(!V@0uYnR#*g<`??Zy6$0z zO{j8!_ClutO#u|o&Hm3iFS_(RI1qSkt@@r(mcYLlgL zbH^_R98@cnM&LIQCD3jpNO6WK(M4Odpec<^2v8j2n_JYPI`qK@AcGrmEab$ zG-CfCS@G6=!81WgSVZRrtU0*EY93u-${?M|)jgESoa^pB_;>8Lv% zIarf(Ixo50B;dmgWGSM*$z;h9+^81c;!1Sw0Wt7fxII`Dt2{`NfDCo)bhjAr)_Usw zL)E#usa(id)>Z&?Qdr12csDQq;m)mgR24|65X9eEG2qd@PF?!rT&7L z+M7sqI#%$wYUpCLOH*~~jV1j_wIHb}M)%U@p44IBSD#4@N;>k07u2SF+2;9)sSCEM zD4q<|q!WqmOI@6w1EOK~CwZ!E99HzA56Dw`CFdXJMKEqil@ZAi#zrTvaA+!@LCt^oovM>#U@MLZNdzKM8S724a$ z-1&*7I)!f^#$O;nyPgojrU(*ql?FF+R4S`<(1u%GdrLkGUy091UZ)&Y+g}NXX}rYd<~p z)h@-4X3G%9y~`EA`&x|Fz5*l=rEOa$%-vtt=LZ{E7IZWs#C%`eN0Mu5P4@({2DIQ< zBGnWLY4x;_c=Qj8E`aa~hh05kvb3Yb=F4o!W2g6{F`2GGGGe^7OWiMLGoll}^Jd<_ zNOP6RqMwt>FwAWY^bNlq2ly^T&^{J7_cdb(C6A+y^oj2mYMb-lm3=mDq~dVmI*}%CzBnB&5F!tR6yKl?lzVS_K$pWj>X_c}S zsLU^J!E(3dM?L;Pya4+GJ@^d`DSRQ85rAWK2EvmK%;pUZp^h2I%T ?3R+a8mlNo z%FtDP45$Fok8Nh8B2e?DRD#gR-M)F%re6v~cx0 zzuPD82UGpHzPmh09w}nju7<7@8Eno4qZ?)A5f4!S`lovOF}FWQ1NI2<6wom`HVlxg zksrpr7LsvY)Hl$^we0`E<>iLFOx`3cxJAjqh;t>zhf3yZ9TKKjlb)OJbPL~lq1n&ywS-lTW^E|Nne%>c(YsGer5kJ~TUx+)3~*w& zUeeBHHKsLnJT?J~rs+=FF%2k;H~8L0y^Gh1v?d)ExLscA7h00o{q_Ca3&!%w-U!6j9l7&Qh_{f?!6BF{ zlZGPqv`d1jUjl=(+c0XXbkq3g`Ouf1bIVDWt(Ak)EVue8u=!!{h{;71=`PsE1LqVh z?Qs;ex3&!+^11UqkKm6^JG{bGg&a8g8*THBlC&Qb9gBGPyx1&0tOXF$q}&_xbp8e* zSjYcWt^K!PLvgp4LoZV&ZyfYEx4km#N#FK1FeoT+cL(qO_JS|TraPmL8gGbVgzSCW zy)=*r)Rr5G=C96;&lL?Svfzi@+XjKruABBVQZd{^M$A3L?+sg*;Py?&g<&kI1|+Lv z=wOngz@GF}1Q8PeYy&csMxeA?cn-S2q#>103#aLyE_IGohn>}M91%I~Q*5hYQ5%?t z{y{;k0Dom)EmMR+2Ud1u3G>QJ@Z%J|W&8Z1)^&~Z>ULf^`}#*7W}K3h*iDv@)mx+%+7=y^%mPZHPZ=@;qpwzCEH#ENvR=}s-Ls-ji!R`55!j;v| zZHfG-GvUqUF0J{JO@{Ct1IdLj1uZsGk0MWDx1o69tqyz^?bjY3o!Ct?|9ql8ro?~8 zR4`AN_M48zcvt77NK;iAeZo57e~)4O0e3_3;7CO3DT|RKFt&wt5RaNF*9)-K?x_}#?r1+hb(S*fkG!A#E*kNWDy z^o&8#j|Cmo?**UP@6Hlqmrbfq7PH%CvKiFcVW{Sd(7ud$5dP&bfy_|txg$kRe9s)( z6N*EFp?rs$G?^_`=ork6hR zSg*OYbbdTkz0WPY>#F`c&6?DgO*KgY6`rdS!#8ef_zDz$TVx8P!2B3Ui*W^GhwDPs zr0&P=ho(1^MjojguI^iD$oEt2s@0GjtbI?;vp+vB`?yqzkPAwOJrXr!rU8y^{HPu6OOWyS9(TrCkMQIf)sS>`Ck-e{w%SPyT~zc z{T=i5!MyLXC%anqwXFgFGAbAp=8!}Hd`_yOH-J0r0h{zrW4+|p?B`uxlV!8;-oz>tf*G5(|ZtG!&BiwCM%GrsGu6Z4P z*p2&~m%};U{-)%LiyfaDL~`EVkjFvgnbuPXIPYoc*G`xZSVNH?4ueJ3Vr65Q%w{HK4zvOq*A)%dEx!Njv&*`$wr8RZ?!@K!xPn(Eg)Yn}T zKfL{b4;HlC-^aayH$@fT$wi{5@{IXB(fM$y5oH=GqCFPV{3+6&S;gWvx>*0xGSg4@ zJ7L6UfIs8}Ip(%CTm>!zjyE~Y3{1u=R*d(2^DaAMKbqO3d{PNi{aV~Cc$nnUe=_Q6 zMdC4Dwte!+&IPG4(x{g?>{Qh#ct@y=3FJdvJr!m05sP-+@}SUeOy!MFiLwje=g^WA z)U!PH&aI>%qP>19P!~>9qW{gxiIcQfzoIbgxTanGXikkAfFmc&=4NlE0x5!j(eS+aV>U!nYL+p{#s6)WY)y? z%saK>a$WZqq!lfmoI`(TI3Wn1dxBX^pW|Q^FN)VxNHk^XM#DbgM`5wd$Mvhdqzl8 zU&~9rIRq!XI6TQ?*0jYj^o7U|p740fQu;j=8ILTzXX*_{d`FM)Zo{8=`Vje^2+84@ zf(>Lq17SfnET;;QMJiu-4eHXX_cVotl$!Y|x6~z1@c5LovL?nwV~=bpO1tWJo9zzs%=U%&Fcyf_C0#}z*%+# z-fqB>lcq-5AcUmrR}3U$B(WZ}!~(Y+B5|*WEGq7nUXe}Ld&o5(yz`nkd}BjMP>n%| zln6M*Enm{y=i4ZIW8)Tm#9z;~U+WmiSifI0|87CB!6$~p%~(a5a?5bbJlSO>1~fcb zs{~(q{I52S1qxCS?HIS9M@7y#Lhe)M0j4Tijs$*#x{3kcX`h~uAooDtsGh*CM!_Rr z)l{Y3<%-r%0@NY{56PpL;w21m@TO?ICz2ojbQ&VHPw3JtZWe?m?2%~kaxd)AYo~GL zNA4K4mt;@Qyf5u7V)Lr=2Wnx3A;?q&V~E4Ools;EaGIFQ*Zg9!4ock}g9KSCr*^u| zU6N6+Cd~r1KD=$e$ME*gTevn~k2gl()WP%Q@Z?*VN>!%75=Mf=WT&BSV@<=RK9wcH zm~G~syE~-r`|>Joau^+3Q1U%j7qHek<;#zEdkUC4bXm%f;UzK&n8 z096+8#+fhcd%wgf4(GgHlKAGZCuc*SI&Rv+C$T9zqD7hj`16uMX0GZ8C=XgwO<084 zAVPbyY$I)U*aWY7Yfn{@GvSoZ7U3JpK0fqx9hGx+Sc-owC%W>iB^#!va|5R511M9Z zD@YI1_j-ywH&o?gLRn0xHVIn{B5;`tKWuuiranH>!%MHw({?ZwJTQGX*~#dN$xkHk zNVaJM)9{k&d{sRmJBZfVVd0s=$nP(D8s>$ElAHQ7_psy#}B$kPMCe zRgMr;Kvwpn5C}^BRv(!r7Xu4swUga1(KC`XHQI+;+Fs8WN+twt?9^p|{BNP=hf{L8ff&yV>RZOYRCPV!(iT`s-Vc3isVkjpJN&T<&+{$VNsPKs6?_f% zU~_Oy4UMn7gLj|cfnF0f>qCg(P1R2y@?r!-1KVIVDzFZ3V(neG6c${rSsIzJA3s*n z;_s)sV>-_6v+&ic&c($*5FX>h%cHA6Ne1w$HjhC)Q{js{nIbgN+pq|$iP@I_eW?Xs z(AqwIN8tSHA%PfqJ;Oe$YrCJ{v0Z5av$_{xYlTd4;LvRzWMyIfJZTbBa`b9itYuk} zYh;_#l5}rd>At?4+CX ziX3^=VG!vlK#E=7w2w#NK&|bUqjH;(8Mq0>bxGi!!ybNk@*rXO7@)~Zfw?(V?_hoP z5342+2-&8h*YI>a{m}rr>U|i4Gnek-&y@1ZtejrfShoGyWQ+)nU62Iz%ShG>9Pb>y za%Mq-GUg{7if&gcwjB)`T(Mmmzmza|JX1FA_!sNYFA1M|BU~>S;}y_5@Z@);!x9}6 zyEE(|J(~P$C;qAjUD-T@^4YuXT#UO7ZM-dq$YB*H#=8JNqQ^tzms+|D(4e|#awd@j z>*6xycPDEo&Twy25+5t``MJZc1t+F9YnIs?OSVOreAaQ?aY?uAK5BHr4M_*sz`0AJAfN>MPSY zTdx&HFarz6X4h3}J+)#UxrB&!`2~6f#O!^PuFc_eP#~@%DUxdoF$puu1M{|lFDV>* zlGe0($B-K$b>#9d`27&a&)*syJ5yF;Gbda+D&v@Tc&z*7J;J#Q$a(cR#ILBS2@oE9 zc-K-$hUp|fh~t^lor%*oo!a43vZ8#(F&UZ8V-s?`}|2!z(w_g(%4tLwzKY? zsy9$ufRlrd9<6P$AT&#qfn0g5H|@6dZh70XxDM?~D+{wd#+Gcn3(Wei2$?}JjZ=kF zr68FdY_6ZQ#oH=cV8~8-wAcvcA7a^zgE?c?Rk8BBF6syrys_SiqfgqPC!SzI+#*Vu zNY0tqLQ~RRe4Q%=wRHxU?f09X4nMzd)Em{HaaicHunb|`TqtB5Gu*wMZ3+(nQ1{YE zRK-y1xCS|Qevxt=-2w~uZ7+F|{iJnQv{}H9;kEpX+XCcZg7tLL37`2Wkv?z^w`My`|;RpV5>YjH>`K&(&dn?Skj0B82$Tp6;5y zm^Sg*WQX*dZ-rV=6HxrX$&AV%x&xa1dr7kRGG%jd)_ zNpCV{_CTacifc-e`=(B9uFZ!&-$iCKJwY6bdl`g#9k6x-hT>SR%4~}Kd}lsM$u{uA zdG8&^T3Uz?w6%+HTVzEnb9ZlYA(bWRfx=Z1^(3g0N6rU<<$nfw>#H)*-&&X3SLxqH zswbO_Pn+9XU5vSKiG7=6hD7>=l5}hO#8emy3As>-l+G4EHcvbcPT*Uxp*VHo_NFmA z&w6hzA1^<_pLUaXIGh43#AK5LOtr>8{*_sX4HJ!_27K>iZevUVkyt1Rzz>V8>$uq= z@+tsXuSkPUh++nw`30e@Ty+*CX?)GW7a;tJR4*j+VGo(a3ZbcjzBo?O>`V<}k>gq&%KIkluH>-yS zuJ3LQ?yq7n?)BWO95xdn&5x0pkN1cC zy-kznQGtAX=|XqDpF(S~!xw3e+Physi*TMWfwV6I5pF4rfYLWT&^{tlAc}q%yjnR^ z`?|!GNG>aS>bl{kEcrWEhd*AnQBpsBFfh8lN(mPpc51&}JMH+m@ zliK+hGx~0+WhZx7%CUwFsiRK|H9Rx)OvDJzWx!f(EFE5^_p`Eyeio5gA)SjOejOss zd#EeWC~P$n=zaiYP(9t?Y!=pEf1FK*zac5@7U#eKp!xf2rPzP_9XrF=i#^E7Fx(CZ z+nGWP340F*e1wm$s`v*a-`nl=0UuV?wT2TOx|_F>-$))_J16|=HD_-c=ulEiRvn3= z6t+A-x1!3&6K@oTa*u{rx^u>yCslk%Db750J5OmR`rX;Pp`}ZTP%PuBIrtDR zfh9^{dI4&LZkl?h6BIt_k>Bs*%S^fMB6#BHySrk9w4gni8m7Vpq(zsja>URIZTyhL zBxl6YYK3gOSyB==bMmrj~4yzrTpaN6s9#kw~ZkW;sVe3b2rFz+;U zp0<105~>Cd?HzKVIz3bK%%=v|$|iqTKlmkH;=QTC3yb3gLMAEE9(Oq&B46OiA2Fl- zlYR>ZAbe+s+Np1bpCpQY2MInev=p0;oPO{^MT*bXqDn0$M0V;!ZLSC7xuG=zp}0O& zN^ytzc0ovn1q`UoNDDj+Y1}76-cS0%FU+5OTW@uo6%Y_^9XWSyOmnc7tDfgx;1xtV zmz2gxq9WtT-?Jc_!&o4YeVG_REO&p!@}l2nGjcu)kFA!9 zPB8tFk~PJYUU9<$I3yNmj>s5%%P_GT&D6A5Pz?zI?5-Zv1X{d#MK!Oq-Ho*c%@%2% z5d7{t=Vt{Km5j@tZ#VHpNK7rpp{Z6FNbd~2;3Z^Dbyh5mL02qB1#M0EJhZuL>+^sN z(J}54`m=r0l&WJ(C8-g1*#0ioXIv|)Om!ako&Xu)y2#LnyGD2PRb^w%!$Q;qF|)Se zMor`Ml(StLQ)Ool-%JodSOG%^KmUoULpO#F=3zIn9_SzRr1)`rSZyuwE)hlC=UaL= zqpH*=P;Yw4EyUOUc4^?>r}TeAkBI4nF*M9C37CRt_;yqUU?6pTB|;Qo+ESjGzo?z! zb8wGsy=MKcX7HB2xdiFJu8h^dqxq4&nQ4wF5S?vlLsi$_!CwM=2f2w%GiobkLI3=e zsW%{?{BXzWaou44l53J(K&j;8&-&iF$G871Z9|Wk(XUcX=1Zw4V90F2@@>k$7F4e_ z{_31*$hBF%j`y+24km9AtWTp+)0Uj$S5&tu4OTT3muwxSkCQGDg+QXmW_`JHRte8m zGGF%QXK2;>OuWVTWc&8e`tLWhK%c&8w9xS&4ycJb)|PXzXT;~KahgmIH74sruylCc z`p{JAMjUH1kZ_T=aGyc+AZ4m^fosYy))uEI%~Q0XXR7PsR3;tU=xAUl_wp`?E=1H} zQj^s%y-lbOL{~!enh^tp+BdncEUl|iAC%0`fV!^ADM#ces4F(@E56Y8lSF_KkrSg@ zaSB6igJV|Pe5_G4#cv3NH4*Z}KnES+M4?w0$ie|IC%$MV7SId9x7H~Ss;~UOeeqw> zP`aTI_$>ewQH{{RXRQfnZHv)gDf%41FIXx41+nibNX1D50y>TO?GH!i=GG0sciXP^ z_7}v=7`QrjEtY8}&-DwUrw&i{USo&FZDD>J5NN+iMZ!XZL}pRm9}bEBcBr)UWg&3k zx3$T`Cg9c3%gBF?2V|7|XFNq95sHh9V#+Vz*Q`x`9U`ziemhixU$ezvdRrcaUQ7WT zEWZvU9+pc1VT~D3Po|9p3&00lU`|DRZwtf&5(oiJoOSPi4z`B=esE}br@(?SipCp& zUjs`|m4|_Oe)|#yy|$vt}{eT6K6u%v-XRz1o zgrh z4fSOXJvs#QpZR#s33Z3WAJlZ8l{0lDQ41TaEPO}SeI)vN9YY!4(t#E=6vCQ*Y~F-# zac-6x#u{X|+bX&iU=L9TlFcaDw}%((7I&vjr@yTix%;l`ty`kPP*6au@E4+_AvY*9 zy$xBEFeRXb%NY(fb>1HhS9qQ862^O^aPvuqk~=O$D!%J(RCskNe!XDIFbi~pgWbDn zw!JR0MNzr*Gikcd!fVqeN$%H!e&l4&m9q+smCm$RzG~@xMQifR4b;ksW5JyDf4{p+H;H!wGiIAQG3pDk6OEs z2L#I(8ZPPL6k|2Tfo2i45b~BU`bz+QM?;9hyLV41*5HD%0G z4E1a-Xa-F`%&J;>;5cPF-}LM)HG5K}P_s58g0rz#OE2?YWAZsi4KbguNgo^>L%5RP ze{86kg@eKNPX*x_0_gw5K| zm%mxMxrrw_?&4}=(@R-)*1@NLKrj_Mh#oxz5--sef5S@fq z&f1k1NBtv@t+{%e2=Y4}vlf%Jts0^NgNRcJ_X#fscxFIN;f5xFb|L4(=-HXm1M@$y zr+Z~?xe>F}{JBqbtuAPG84ZOfX^1z6TwH)Hk;-5f?SgxV!bEZ%m^KWEmAetW^=wjF za2%gfL;cJ$W@>crum@*e+$yt$bRr^uDT$yyofrJs=^Th&pU37gkZ@-%7aBCTc5fk7 z-y^CbeXJbzW-T|FGs0cieNs7k^i*M3Cz83{QFn1_(0=(O%-e>PVV>uXpsM{aa@4LSyg57T~Ulic4*2?b+ z@K;OVPfPwE`vvIDeRFfLLdPGq`tRfU@u%?sBkd1*6wCwP-ruZ-|ESTSp`R{sm_OJD zf4bALcshtcMWy-NJB~A8qU$=T%{P46=xbLy&?VPaBY-G;iQlLn9t3=KV{??kSVI-2 zZjD5>q}MB)YffchM^ZCo!PU^Z-Rgy@am-HM%Jsa^i!4uZN4>uyRm-X~cx_W9^=sQc}Ties;~g^8W!TGu7Ul|s99648`+fG>~$Ja!Su z3(~BadQdl|aw_5JgzyQsA4r4rTZg96IMftt_z#szk39D6Pt@;HN3cZTjruVOue`Eebn z%Gx;MWh5c%Mt`F+MPg4w{Z+OnnYX$4n(8$a!hXnIircQluJ=@y_b%H{YhkD#Xwn-% z_`Tc6XpC$P#dqRVfVTyVTPb9pzCehpo1a#CO=ziGgXtv?iiadkmP=goc>p5I@RS$b z;m`RnAkmatp|slq+ULg6bczW1r7& z!B_S8iMPvCrzq{HnS8i+^If)eTVosm(G-c~=V&&tZAx<4stJT#Hcg+v!{r}?o?Vsb zGQXx6S}3YE))LsP;bYLMX)f7%A>HQl4q?P5A5im+Cj+B>6NsdcD!*nhj$*OBD@`G% z27}({gBBDIJ3tVw0W|XYronm=9y+*7vN-cXN3Ag1aoGrDU%le8B8Y4Nv26tb*Mq|w z;w#2tA+9{HEWX6360?UB4qm;E9S7w8uNmB1tii*_h`ZN|^J8+GKLlXg}= zbefH>EDQk)dNY6{BPZbZB&wY$*8d_^v@J)LIzAJT-f9L_Lp}no1S_3QA~zO%eRYJj zKkPhk($&8~-fw>VZ&=EI03HlrW3^L-l!t!piWStazs*nua(AHl7EKKib+6|nUO2o{ z_exYxdYF6CrpO_+=NvZ5%U8$Va4skduWi_0n?Hat-Nph@8~GUbu0CYJp6YdSpvqkL z2Yt@`W&et6g0Ihy`C+A74x-6AMq=kB9n&t}E#@Y4l#W!w{N13)+Ou@btc_T~d6UUE z*(FO0ZqAYH3!00r)L#1Bqm$$#dsp_W9VPQ!LR+B11^Yvs2-GL#KLN+KUkjPSIxshZ z3KmP5Vd#mkGU@C_If9m*?&qS~K*tGQo2AbZbqhaKqR^x6wF%Awqi@rB9sMo0M;=G8 z6dx1SiR3gA)fxrl5*2XEk9KzUL}Y2#Jskd~J5bLz{iI2_@l5=^1#%rptR{ydz9*5g z-%Qb$mpg(D{iF(EWxO3>h3Otz02+xCq*Jx%1LiJ^G4}JUJIxAEV5d&jYK$MbKSB_`mFx-_UZ@FMaUsO@2_^@qio=4q z9u&V8I-az%(c~iFQ}gH*E7ejy;TpL$^=g-znA^AKb#4c^xek<)d+4KJ`Dft?@GMmP z5Fq@|g?6K3f)W&4SAY1dV1yrfoj>;DO-*NxV@h=IPx`D~SzX$dOrr(%FpE~qh$zer z{6WqWSf_rXhu_!{+d9aeq5Hir^Jlo8N0y@JF8LpxA^|(cEp~q~EC{SSduf-ebp$>b zUv4tLivg@sklM)OfZfVUWh(nny2W`-gAPc!{&*C4N9)r{KT>_t?R(t^fh$SRk3ElL zgBHM+1&OLrSI}-2(4K1!%*#;YAV~`vuQ9wAw$JT|N83! z_@xrBLz1>k36vioH{XqF*$)2{2QN=@__U07V0|xbB|_Q{fKMr8xFvegP8ba|cqMo5gP(Kmk^%YCnog4eBx3(u8n12`Q@#W0Bhh} znzZugTyL9BU7lkhJ-705m%T9n5pm#qwMe4Lz@)7JUteI`erTWlZmN98*nvtsMQ@~q zVQJlU&NHygDhRo`00m&X#I(ey5qc2nvQn0 z;KRL}5P*~mzhTO+*TyUuqKy4Ry&qF^M~D^aZZGeShoLX+ktBJj`|8kljzw2*egn&pC_uHgk@e&Uc0X^h+>8geOHZX)Oi%D z(^exTKy9k9|6YH-(I&mjq_q=--8H3J4hh$IEuX_|b9UFJD9W(Tkr(q!c&3LHtU4&4 zF4wFUC^j_L*jAjcYO3|gO$Pw;VQLQ?N9<5#Dgfs!$z!7p3R45vV6ME_xjRL)(eeJPd4H zI?!fnLLXFI8zxS&Q}HYw$|`ypt(XmSsZlFTlc(XmOiNE08|%PH+0W10MG&kM>7Be& zFE|Up5cnTL?u4~J-fL6=9__&EA)yE+*zQKq^&VzV7y7RcJ(>qgqNzQ#9r!p5WCbk$ z-9xi>j|mIKw##LId5g;5-lCJ4_V06h!Gm)l_g@gkn8^%yyBBbps@JoPQDb(cGZz8A zg^KzG@ePBl1B_K`;H-^7{_)(ZTq?jIV(>q$;k|guF(NY_^DMQ=;~&Qz#QCSW%K!HY z{9#I2=J|OHFJb09`VaFbuLX+Az&^7O_7sBf9VkJZ++`b4X|9qN)!&}P94g+ynK1vZ zX2&LV-H2=bRcJ^S)k5I)-0vYJ0j1GYPnA`lE*o?7q8amEHgy)%tm1x{dwg;T9W zi*LV;85Qwa>JzbYW6cf2AWafkNQk`Ehcy9v|b;HF#21JnS z^`a~8N)Aig9Mdwt$g>HP*N*0X?u=**Auq-mZtXLaW$F)H`1Df}_kr)ospc9V>SxDu z50m%0_|ATNMJU1VfHkPGv4*=skDR5V;umaIPYn9FRW#gy=kI=qmb7?smU=Df%~zr+ zuy_a2afW+9&8^^PrUK2nf1-y6~>W-78!tZb{fMQnOLu=}rzy#d!#?m9v{) zOUHyt=)<(ki}Fq>I9F6()++r`LsH7~8fo(G?^~$q9~?yI%_=-@z6oFWZz$VekE7F7 zAw6d|%&;H1=^i5kBZ1WrB3Q#cpR!R|aZ9JQ@AH3}&7}qpF*DJxdKfA=F;Ic;Tkd4{ zS*;tlLLQ=)OO-P-S=~PmVdjSyX-Ags_9=Wfp%~*n;9a?|b8zNVo%V+;rSo?BA2U|f zM5J0?vkcYbb&5>6e|mY~bnrtPemyJGg;O7{o#Ej+rttmPF;8BrtW$Vy4dbncO{%4S z)=h2aJr!=rC9i39guM?>9#S%PVK|M*bBVrY1$z9XnEWiAZ6*Tq8PXd20S{y7jw|(W zu_Dx5WjTyoSs0pK@=DXadcvRUiv#Z~y{I*|qQf}7m zVAg(B9JhS!7B}z??|(VRVa-zh-TkC4L0l}*0Dzn%mNGNn;yxvTip{uIJsLXI;gAu( z!d`?i!@3YZ@w$aB+{s@P>6$Lh*u;GHc4xW#xBBIt?SIThR9xH0{|7wtZmc=qYRQN`}=@*d>j z0mt0ukQ!zFKsLG{-6kbL@1SYPnoDn3tt&30xu)E!omk$vU`&J8F`_V###M{$1mU@m zJFX8jOHO_d7SOn#T2pPEyd#hEap%so{?hn$+@ps7vq}41k=6bUt6R4`z1pJx^@r**j zuQC7_-G7sHKrZI~K_EXZ1hZ+zRII;qBoNda+`J`rPBHZM*3Yp4HGGBE7q%MQ>c&00 z`u$%D0N>6@{splv+V%tg6=x6hL?H||bFVE_0BD)TLFfojjO*elxh!G7-{rRL?0My1 z1+U9L%k4iYD+h~T6@g(cwfbM8_crEPmDR5=qxvU}p$Y-XYe@XpdAZDZ6Q*v|pB_9k z>+$m+}iq@jHTb++>L+Rs&0-H+bb7_WOHDU;@4rA;;l=f&bK^$sl zk`T;QOYKb!pv}I=5l2_^)@@e}7@(g2Z65Xv$Td6y!1q3`BIpuWe%)MXu%``3h?CVA zmcG-_oD@8S6AKHl$5_JFH|4so8?8`yKg;2Yb)MUzt=>y~`H z9QiX%D&T*+sur&uHXDpE)Mw?mg-Mcj%o6C8MX+B(>1ad6R+$(l9DaZ7tiqN%dvK-w5)C+@_5@QlAFvk0#p z273e;Es-f6B&r#DSf`y>pv2y)1RWk0&(l$BcFV)oQ`PbyTb|&{RRMAbDk3=nk_bjn#hc<)L*)OlrzvqA%0yYxoXVS+t8SFh z(LL#u9_y^*J8cbs%jg82dKzoZaDs>Sga01DZRytPjKb}hbsjq%Go&<1jI#?<$rep_ zlw9xWfMX!g$%TR%KfBivv=2H%{((;1n z2ld^u7gD6wT<-2`mf&fSus4NIFoEvd406aKUw9Fzc4Lb|B&_Vvo{x*oil!xRCvDE^ zI(jb6?V*w+ni$$UkXg2depH`jLkEEWfXRhq3gF!c zR`!CTt3di8H-UFn*1`|&9c$lrpLb}v8VnI^=BNh%FUVk=N6)9wP?YG>2-U@(;oWA( zo2ozMtogoBsxej0J0e#sE9C2X??B)U0gkXY2)N?gV`9=Gq?Eyw#!%&C9H@v?uB&u@|o00zgd0Ur8@V=n5McI&+^x)3uEaB_@zhbUE&45(dAZR zZRxx}b(P*k4a%jv<|bZEZ4O5rICnkqyV%2q>gCA7OnUge>3CfGc%`200m+^PcZPAO z1>Kl+k9nP85jv^iUjQMEng_qu+6q*^t%}Y!yBe@V@dL%#=}!pg>8^yM4xsWWqT(k{!t9RB(9ZCputNj@2WqcILE0``|79OHzSCVX0Ql*-$jqp>v z$a@HVRiSkG76hCI&PI4ZH>O;P$PIHsj>pSX8826Sj8?7g$>*6ODo$T-Wm~{taGd}u zy9Q`iv5r#-dcfbR>1lPL80@3qL zG~Hfa|3zJQzw5=8&EoJ8GOEHx#Nt0c|WKT%;5Hq$UCNq{X%*@sA^!eVO`?%kQv_R@}7(Y)?o-)u%sHAV)TKBhMBIM(&rIYYPOQ-VYKJvNrz zOF#lK)x(rvP(gL&+GETfy5RG9+mL4${mJGFSDxI1r4N*jyq;q`uR_*&^?^!0f_|20 zo!U}mYfAQ_=#8sB$=l;!M89eVY3x9@WeC0QJcX=9OK#Gh^R4c|&kfOMw3X@Q3Q9>`&o4%OAqSooFrZpIJO91eAJN~rO!qE6whTS zdhh42PalsTAqXHz^F13nnQiDz@+6*RP%+8|hagF9OB)hM55y*wf`=0Zy;!>!n6&@! z*BCZu3J05_AoCFr)FmY8x9PWa8j?CuNI}u;DT@SQrfFWs(MmF_e(KxLQ^UIg%JhRI zvzLSqiceg=DcS2LqY3T13iJ80%6FE|8B)9&U7O*cxP*(wf7qb9DZ&QVJ!Id_SlAk@ zfefG+{?UuU3=H#PzxFmJ`l}mVbGqBpva0_8jjDXt`}`!`;0R%vJSsfvBN-cHc`f~7 z`posh&F7%l4O=5gH3-TbK&nMbMpn8-6I{_e80ji1sKG`lVhpuk_`NwB)$1P|?5Wr% zRe}4>dQ|#|ph>Op?%dksb_3NfK{_M9^)!a;jG*+K%bY^Cu+Vy?gUYI+nypdI7eX1= zejgxS8Nd{dYhkbN+;!8H6vHj`>JVbkQV_n~zz=RPf%LR970e$_gm(p5{b9SXvMtn( z=lo{hQs-}uE`mo=AR#%)6Yd97%>-Qlh>@o-7l9&Tnt&*u89oqLd%4I1@2(NmO6Yr= zXZ|VWfva`Yl{a5aqHo>5cEdsH;#R&r7QXUV5kG)$5eE=ZuM8sH2qVC+UtwOSR^1O& zOswdgfLuL9{JW&`9)-PWzP+x}GJPvXz|r8_cMfio}p`B);%PvIGiDtbLXI@qPXzxsn+N5O~Q-##T)Llg5a@11#{@@>~hu5o@- za+@g?q+WM`L{0c1cobO)&_jD#KuaUQoJQWS-7q(vi`w}uV`$05Xe4GS>Out4JECLtW@{y|jUoCMBBkI4a7#EY0*QTs+W;g6 zk=@n{S)o*@ojVogDwC>5sd`j&sxai$rHy=>WJPi_vmp41#iOPZl0}#PVcSqsXr{y( zLyx#nUfoEz-yqs|_ai;hzvR-&m1oa?lYOcTb}!t^Q@&hv5__MO36W?lb#to2+X#j; z=AejcvhR^Mwe1g-1I>z~ zfFHk@`yHCkHO4Oxrw}d5>=L>M5+f$_be#^a3rY#+dr>Z0+n$;YSl(v`l#NnEx5h@j9ATkP9n)qs6lp#lkC&V?nwD%ep5T0Jx9 zmcZa-03WU+ZQD9akiyA48x8Mv0WBJelwuGFD}vxU6+}4E?|^auTJPBgKA__h;nfB9daHymZ&SEdCQV@v)t&=ZON&9jzw zRqS}CP0h`wO`nLx*Vt~6gtx9&%aCON9)$pQUeW}fF0~$h1OaTFJ33PPC_N(3=g1(_ z<8@H^Ir!^)1d?K?x8|U1-~VE8)#x!^QOV3XHb~q#@#2ki7yqNz_I*0^NEh{dJAoHT zDn(MRJvF7vcvnX^Y-e&%7NLFGXPG()d6cE7xb*7EN{-~pvj(~H`LAnF>vBgqiQO}Z z6Rm}(hc_1Kd#vkNz&j3@=4p=XEzQ6h)5g?n(jF_d06J)%!xDK zy`oRHu^i)yomSCYr}t5%!!F3(nT>xtGxY0(b-+rf@~OKTKo`U7vAul=A-Eq^3B_v3 z1^cgn?FgfB{M7j>!SQ|PEdidzc~AT&llyc_zmz)Re; zbzME6@7DGC2I2JNs(0KcPaoRXjg1n~Fk@B6ZB&h9>9)hJqE9a z$--l;f7rIlUs~c@ujKouD?!iZ|9PhUKRvUVaTNr`QLTT!6d>z=k{WqaB@0{>n;<^*0*5q99emo!*Gmx(%O4%F?!7YS-S;5-n>UymA8a@nUgzM+ukd?I2QiH zCcmon;b3uZg5%sq+*A^~QMAhF#)Zt25%%~xTVvFhFFjx8I6!(l`W|*S=G1V5GSg!4 za4$xE@~QBeq6tiz`irI2NHVbLl@@nO5L;?2EY~0jwl3?xoOufzKPo7KraaV7^P}_3 z8SYFk>2VXEi&d*}x*0Xgf4Tiu%5{?7{Ol@!myYBTj;>KYg^KwNPA%dh2&K5_C8@G9 zI+41^q|ZNh-)R4QfUa8#@$5OT%&JF^bXecScXKwcp5-&3G{m?};`soM;Eh5Z@sc6cGC?OX9oU&YPFb zntbmI`88B!RBlWRQ={9%9mGmFSFVoAM}9&bwTqtZ#>FWuPW3QONS=H}6P(gM3trMy z2aE;C12diIIL3LVJyj--d91e~?yYCeVfCzxtgoGOdP9MJEh~0und`AP{ehozyzvLY zcC)t|`=v%m7PUMHv;cBy|M+|VrA~GP^C`3EJ~a7=;6dEff=2fM^AIHg^9#^ zk3Vd$-s>~zsHBZ_utYCB0H$c?)*9UoY}H|GKVcPAbt;5lA)yNBpTAeG539O~^};V_ z5i46ySEh%H$^2nEcYMoSiNeC1TP>Qo?X2enN=^NiojcDee$Vm(xXwo+nxcA(V3=^n#8vcr++dB>u?WdAT5Wkdwq0knZhJp9< z?=ybi&OG(^la&FqEEy=0{(s#Nh3Q*~MGn|+J-MCP3?GUHJM&j=Bw|H-$3JEW{R9JX zS7s_HyZ`GSyq54K0?^;mj%Moc0vG%F!7cN}0|pwwFahT4zed{kZ^HiCV|#h?Z)qO^ zVdvo{+$uXCV4k{vgq4Z(hWNT!`J|+=3)8oRin_dC(^IA`?x}mzRt#QE4@gw5As0am z%u&p$MUrxe2UL*&{L;#30r8Nyt4=>q;;9Q2X;fjS*)2VF^0?fK%gy0rfL(4pNlYYgXqf?)d#eRMC;WW>XxZ^EVnruT_vj^Fc^m!8Wyc-))*daj z&f>2r%@gFG^+|ElP;L_6^lcRzZ-kSL^i+QU`lUzJ8%BvW3G&DY%*cw&vyFUE zth)bYhuFgl^10l_a$F@JZ3q-Q|kBleeAuIkdEO+Ky{K!asvtc=piDD07Wxk%p|;tIdWKnr2+H zvzFx1H(6JnegEXsV(i+@JL&sl_x5MSQRdqYBJ|DoZZ+vyFVBJi#^6=zByzD*^e#Hd z@gOJ6bR1q2MRgW3#lgNIRZO!18N~I;B-XyrYg5Q*WxdcX%n-@T>hwuaB$1qHsK_qD zR;1JC;h9K*iqNmy@8-tJg|33!ji!0%*KMtepiitXHP-eLn=NR6-GAm~EXs4|QQ;ne zinLvF9vbGPuiJ)WH-}*!gDbc)ee#aDub5EkH4r249BH_OF9F`t0DeOiKgtJp&cz{D zBMk8PNkk&{AuPG+1zJ1^+u$>>0lbB8!$@VW{A-o||Mnf3-b2^)f-mDpZeS^b!r*-M zHN1jPo^=rf>Kn_>M0RNL{Z@mBacyg`m$L|Ku8}|*UQ3$k`3=3~WW3d6USZ~)O02jO zlzT>yp~Fg19!g2;yW_ss`V*ouiQG=%q$$Lm!%dgWjz6c$)!HG-M(e*Ul|H;YRu%R= zcs_ouqx}L3^`j1isg3vtC?WH1ZTTQO1M2a9X0xZ7ovgA}gc4DubEUTm{p~~5w5DDN z_Z_EPhdwXvKAhf~-P(qPNvNr)g2Hs>r4z;Z&r*GDi^rvNhO0i{9wR6$1YKsG#jyr4 z4;VYOp6uYfZ|iac{LG|HY4PuNs#e8yoR+(_jgO$1siQqCg8X>Anmr zj1nd3K}FbA)bMZBBGphkVN!3&q`GavkLs$ZAL{x6mDV?1t~<<}o^gtx1K*890^A^f z9=Wpwx>dqASsh)0i(JfDmQsId`1VJ2cE-^U$yUG4n9Jo|zxj=DtSt41%#!GUy<1eF ziT2QPrj+Z(O|mP#d#2toy8kK_r}QGapj2$_Sg5RJ=Fu~%xw7Lucm->_55*-t^9SOs zS8}Hyy6h5DAak0taiM3lng)uixj(@%gS$po)j9_49VWi3m9d8%*`}&lGI{$~{=RFk z56GOk$aqQugz#e@VR_a)X;u|{>>Cg01+mj)X?Ij_R8v1m8kAt#dX_pRWdn|~i+7#R zIQz{+sd4COxVNp=ut(lhZSm^auHx2bI6+&B)HMExSNW%F+OF7rR{Wj$av5p%hfPF- zSAL@}iADj!s1ItNjK@{t!nF_31NfrNbdNG^F)t0#uKi9&v;5$+RPBKsO9s>2JJ%lK zL0inXVOl8#LB>&(nx|3skA$93D6>U%d$R7Y!n82c&xV&H$Z2l50ZWx762dfj{V!L#7p9!_4cv8yWAGDAk&2M^{=52d-L zG=#TLL>`=0P0X>p`U3v*efQ4Fz&wErLX{9JgXKeby$M?&NyF$5j2qAhdT3!2 zkDpp3vVP;~xF0>#!WiU?(?dX9O;|0!tTWDHyFeMA;Od1G8o`{wS!8Q@Plf5X)<)Cv znd`G^JXc<1dv`5P=)PNS&)j}vf6C5lsjEkWggyfPqDjKyxW`nda#WwExlBCM=v5Hj z4DKV;5G1bv+`88HHG#{D?HL~wr^iI@SnAv~Z5Yl8dhtJrW>7ys*uTRvAeDA9%azeH z z^$hpxpljx6O6KT2zL3kYXa8cqI`e$T%ZwyXHaQg|eb7x*kuB%GZTI%1e8r<|iqgJD z!}&+q1S+C3Q_mrUg3{8!%WlIq->Nr_UF)TX<$NZ5Jx}QQ&;k-MkMZBU;I?i*kmOt} zX6^7=g+Wy47u~bswjsF+DjO-&K;LeIaB%=&1LB>T0vqi#5c;I*(F;Lf8ucD@DeSkV zbb@K#1!a(1%oCstVvC1!l#3ZztWs#Ew?Pho8oqc7&F>gwo`f33`IugAli>9f{Nq<^ zNt}ckCu=6i6kUp8>8=`qN5)pOf-~Vv_SJTQXn_jrRpX>pRaE}=l%xp|RtP9rPAod42H9M+)^{G>w=FuLr z>{m|htf{DrkqLMH63`(UWhKl;insEA|6L@C*2mfuqkkH_>-vr?H!@Oh9?-Y&x?ygn znu!GMnJiEP4nqM8l4@;C0%;SO#S>KaHe3CB%rmcl;a@ETR?!Z;-n{#%!0X8Y+2#|Z z?Oi8LFVUUgJ>?Wml64bq#IHGIsR>;F(^0Ge*YdUTXxo!QJKIZCQKJmm9xh2ih36#d zBi_^aK@>Q`Zp(TS{Sh4ns~~s*RPj4i@gwVG;)oT_gJC-IGWE*}req6-7>5n?nLC5; zp*&vvQJ@_pb2>EU)P`gA2JZfB@X|r69(m{G68&|v2Ty5t28~{>uZBp(Q6ycbSzqQ+DG5V>(@jj-HovaJ0z0S!k37v&`$Lx zp*ZPxpdE2fZZ`C)4wZix5E1?zo_!v5rfcSLGyir>S33cr0rx>GDH<#VvO{6v7m2fYDC_s<)ro8mpL(n>!2{V0of ztb04=8TtejS99%#JImv9iS66MV}&Nx*e}MaxdFd3e$xWb7dEZtyT#=bx$6MBH$257e1r}R>myWe7SW2nl4S`q&rc&R zK>+)W^a6X5^|H=y%5Q8Tw#;o+E_ z=Tg*E-BHip+?}i+eW}KJ#y+VdWLV$Fdg|@fxV8(s4L|q$9Df>TF(>;w_w3W&4Ndov z&FbM;jb3oVldq2>Z(2Biu0+~+J2x#quUon1xmz2N?F(ST?HNgeTM+`#e7PllX*(eT zl*FOsGPlKcp#Mp+$kmb8!|V4^9q=LO(-h$~Ga0s^F95i8A9W_sWkVfZ0qb@|3h46r zk6CTd-SXdm7h8|R$UhHSXv)HcYuVh&thFllP)nGtdzLq}zOTZ(ZK5k)ho%OrKszc4 zpp4fj4v{KGFXqT(84xBfV|2sR%^xR}_~z-~9SXdoq-j6jrzzLQ^@&&0nsI)316ZfV zX+oJBQ!WXBGsZCte-2GprBk71%6H>K2dxJ0Udd3ubfWzL;pUx?j60@9Af0t(=0j!5 z07~lB*OS5F=XA{T+yZ_-Za*_03zF7LbXh_>8BH5w)itPM2Q)7F{nEgGm=eEW9)7Ji zZ?DJh+f^An@@xSwb!b7=VoFoF;pgzHMihuCDAWV08&};}?`8&kr?QnNnK`?Ht6`S^ z07Tcnk^R0SJ#3=X?CK{wvQ( zyokTdJzCkwNJE&^s`Nc1Fx8|^^k^=80J*3G@=hcAHt$!I8$^O){P)OyV|A-ICgfS@ zd{>;U>@OAP&ONIaMu7SFw$gu-Mo{DNTi9(LLfTeE)W8<2T1;|8W#W_keXkr&pS>-U z*mp{N=sceoVis^|X|wVwl}AEhkO_98*tKzQh84DSE1OfYXZAD070(k{r-nmvQZ|MX zd>W!|YR-gmG9`4ka$`*(#t2XSi7Jmu_WLnU+^wa)WB*0o^PX8lV+$!KK8^z@Wqtdi zo_CxJ)lA_tjHBQRb&CE}4;hcQyE)w|l1+$IjS;&MS&$X}i~4olu&^d_H@x;Vb2z!fCPF(oo4k_LCb?`{$*FekkXpfqj?SG(Y=P;{JJ}mjr{KRmVI*K zznGl+LSfNHfPKAl60~g_!MkTEP?H%Q0g$Qq*yn3Kb!W+q9$zC%aUekwAd2vtydJplFamkmY=RY^K0U!GEDAB?0 zeE+dkMOqAarTbk8Y7pry#7iK3XyBrUk{ATKe0KXoSIXjMLQ@dNOQhiGD?R0l=ekQ3 zl%$=}@?tgYV(dfrkbqJ_O?_Isev6T`ljMMG$t*{G2dLnHJ)+E{b=;f$+qkttCrR^g z_|)lKaX~?g+iZ9M0r}6(F7m?=>5J*cMdM*sQr0k>`fWHFRPvzZcw97!c6AZC+WF?n zPIaa*NQ%h*qT@haQUWY=V-fFLV4DB@JFEY-#DIQhf z)^3N>Fr!NR8otnb*x#<($;?~S_{8W(-rJ|gRr3U6k6#N9D8}w%o}h!J%>jbWh^G)K zPa`wdKQz4GK^0*8mghz5QVOEpP8iq8<;1Eh*4Lm<{c!kAPF3yPn9he=aTR=V5D8}u zRimP`#;qskBAG6}li7}=N64)7Kwmc}RqHb8OK+^PH|;8(^OWiGpV6FVXMi9r;tUvP z9+6FoZu1IETNPuNH{T#cT@?zW@B z#b6f#krtp%_yPb1&?B8jtfvH5vhre9Sb@xd+U}-GS~2GHj_S;md+ML*Y}+0rY;PBN zEz3?G1(?tT9wg}%j-msBO@XVOp@TuviH!5~RLA}j%>Kgef}VU2K5oNTA6Hl}1m{xD zNQj-eJ-hRVRa4n<9@Kk(<}awxpFkqoL`Ck43aOjH(>egG1+Y2%{ zZ>po2P4QZClieC)hE*6+OIA9QUz^HmuMcF;c{U6lM!DJ(1$tbNQDAL8#R2MSliwIK znWpBVk(hpq!PTc9(AApiBY}1!_8qdCcYIW<%FkcBw<$)`YhK;h-v)hTqNgDcIe z7}^QqN&l|7R!vEBM)Vf4JL4*yK$iP9mfdiiP z{8=*gD-hm4bFfux`wt{+=dDBw`vc~}I*5maeT&GyZLg3IGB_!9ekH^r zE%{)M`#{m7Zjp>{;&!Zkl|;HUC!o`8$=QyM(ja_9vuTIlT-JRV6lfA-=6lb{on)QR zwvG3}X|0GHFKPtL!}0Fx8=y9cAuer?{CxeV$N_ds9?h*Hpyn(POL!FvHvL0rKw=H= zKKdMpOB#90TV0>7a(0HuQy1nUT|U`Xdvp0)=Z-ebdd(i0yF?d7K>fb5aaB*OyjT-@ ziDQZ`G0;JUG&&HS4MvOpfhs~>;}dc(1}8-qibOG)LRnWg41MMOPky&MD3g|hmkgNs z!-fP42BV;%0zgP{Z)GGII{xGSH-zUpyu2=)+Nt5Q&kbkV{0lTn;K@YF0s#@cv5cH_ z_jI(1J2fB&phXw>xB6XKmb-uSBE>IQUOL`m@DBtL>dPo&Ln}Jak3OBvkvv_zuvB+-)3nk@uDm_7ts7+U0`s8HmRr;u z&dr9PQMsWUMMN<7;bAj%hC0|OPgCaLr<2e5y7}k|!_2*sw0*2x=%%u0Py~syX^9u3 zO;0AVBo?lHRc4+jwDUl?XEhyox@c`SsxRi(di&wct4A^tV$0sKMKB?#@j0QwEe;ld z8f_M_yXLDetD44U?JO!p+2YpF+L>NTMc${7b9v_{EUu2Y_!1>q`mmf{D&{0r%sk=q zr~~QH`c9#4D0z-40>&*jNEcqCD#N)NC2UXus5uHUfF($PT)>J^Zu`TQh~xR~Tt?-M zp0w*N7;iC18Tvi1`$m0VaxW?P!RwHnW;ycApFmhcNWi|($VZn|C(lhG_QUA;fi>V}!U1?M z6E%E@(jJA;nLKOTZ#MMy(!;yoY=_RHoxh(aR~@>1=iG({a4Zim(WOvKLHQ|tCcg}= z7Ky0iC~ThU=DSjnsW)5^m0r?s)<1Fak*bR7t*_l;$$80|dIpVp4sV<8A%}8lpnMX3 z0|q`!CAv}Z0!gM79SMtTsErqo*%>*3h||Gi(c(c?Tac4=b;ij+x%1lqJ8l_nxe8on zL|2|%Z(IU=;^A93oAC`oU$rIao(pZ6SoVQMtqb|KzcK>l)LJrtXS;5AtX<0Qjh|Fp zl)a!-?rG$JG8q_1PCQeTK$k4?K$QtK_3RxB+JpxuE9cVtc6*&|$#g_b zT>ExjxU`0}7m^}uNm#nN4L`b01M3TkPI_21>`UJ>nZ8=vRa-d)7Tx}Y5X&^XMzs9( zOO!66$9sRp@|(G&t~@rVFt8aFh2Ux6rXx%N!e>PDCOQ(Mo|midl<#ri-M0}t$qxpd zJat^jdcq@*Bh50TazLUl_Ah+gzc6xBsY?lUXDP!UM~7R`LdLW(nRD;n58z!EX2(V^ z_Fo!r_$3FTjm*(PvJkNTvp`6db~;54Agb9d(}vi;CrCX)HRM#1u!7)T!7dRWoahO* z@G0q=Mx`<=2e5YV>!UfK{IsKxm8Y~CzcD>o7%Oy$^dL-W()oJW`2)yE=vgLHME=GL zK&s}drRiJz%-)vn!svBB-Q-ZY$x3y9WzVAMHJ7_FtU>kD>u#=Ao)k1L?PKI^R2ihJg&uR)C z)t+U)NlW)!N~<2a`!UBihL4?m%C0L9K;#RF?MIdU8yPP1A-K<500#n^h31%ijGcbC zeM=udErYN`Y+&7uGQSiHfB08N@jo{**WqA36IP{`DP@Ty(~SNW+Gt&7+e+p7HRnLx z6;t)(mTFo_4lL*u0Ird}KRT;$sLXS$f*&8%?;y2&#U0XxZ)kFIA%D^1}l`G{Z>X+!bEIp8Y z>qCuefu1gbdhFM)oODxW5bJ4!3=FCmj_(;KnQqllV=eHWp|+>ks^yp$ZV3Z#k_}GO zp4t26p#e{2)U4Gf#P3GbGz3=s64*~>FlcbX<}K*RGxYXiJ^F9u*;1gtKkR=7{!?dK zsg^M$ojzHA!Q#Orxg%5g9bVgIpICzPT8*-zmh=#o8ph}p z*&|OHX%Y>+GMqZt>?oT2rsiI2eT4U*!EjjNMOts$P1!d8W4ea#r51CF2xL&k@IaD zD0%>z3C`2HjqYCAK6b>lzGWoc7N0*LD4L9n4myKQCG{^vMS;$8nq z9RDKF;lCcrME-b0|9j~Fcqt4xhBFFD*{Q)uNtFA;RxFJE`?d1@`zVMMQ{vyF02z&> ze|oL$u;BXPZUMp|=6{51pWEJENk@f+<;_Z>H{r&rpOfJ+9?AdNgG%XX{LHid!c8~xL3-NoV z1gT$ef~JxpLrP|bv$S&y+XBB=W@b5%vak9rbza(a)FJveiV@68Ab24n5!Et_tt8@T zIVrojXObv9KOf9>i7jjN%H_ZV<@XHs+nJ-~bINa_*3Sh>L^` zg~RY}Sf40h7Hb;9{0AdRge%zbk!d1-!S zd#}DK7n|Jamzh~?vbo^b7-5FFGSd~Y=q5`_N|NA3qo;!oAa9cdHjZs47r{EeZ zzC#*YWXYW8ppFi>8AZFY1g)Mu`Q(!z>jLof8QCZ*_8|-RGX}sd)S>9ZaEgHcf|21U z!4dgZcPUFd?Q#C|b0-X2#bq0xD=CLuIx(+*iH%n-WGzqN%Jvll-*~fFStp?d-NQ4z zu{$^?k~~l7h$kF4Snna5dr);ZhtaO_lSMEEQKH+kJ--@3FkO$d*Orny%jXFEjq@`= z)5ER2WJC0+`%z}A%Wrzt@V~4G84z7n1~x;zg+}y%)K`tup{s(-vsb6w=q0FWY~dGB z`E5&O$p6IcR8U>|){0TZ@%2ruEM90b_1pv<4~DeT{WclVK{YEoUYJ~b8UfHlf5vW1 z(`>3(8H(E6*K`;M+b3T&AmZW(rOFRNj9(&&%#&^Ye z>ma2dvleRXB`#kgxZoJdSYRD`g(_LP!^`tPtol&LF;)h&Sihz57afOACrx(t=4>&6 zf9mIfPebzt8_H|Xd$TSvK~DLOZtjAizUNNtZFrnWF;|WP0Mp;<#3izoW3|x5dgV4h zhZddo>@U?#2@YuZxR`h3Sb3>9+m8l5kmGs<;=U`U5m^?feFS&pJ$(~OeB#r&1SPV| zeW^oyYO^Xk{Bm}bwr6Yvoz?n4yV_4~(104fwWa9AQ+tK3($5=PuKxTnBYDmA0M6%% z*tGL42j6)0?jb@hQHXBZ+;Ems0Ge?QjONoai^q%r$DEA3f{L8}g*EfwUmCHST%CMT zvxTyLr&7_wrOe?J%PFN}n-eY(?RBb$&F{{iKM`k|YU254*QK5l=0s46hi+~MM8`bWE-})?9Ez03sU2v8%nSpJZxcM^C$nlE!C@!~zyhKKlHw&2nGbvuCg7Y{Je=yv|fm zb_oNpyT5(~B|N!4E>`e%-_(hTo}8C?eGe?jSdTFqwYUq)IT#xIc&dJB-&gYXqJfyR{0!vcrd*&PTm#5t%BX_`-RMesy z!xy8?s$2z-iodkC^`TeuDcu97%Hx)lPpUS4PFo(Sk+AvVJ4BF$t|P_a0Ya7OB7-VV zyGc!IajEYGCgHm>nFT3k!>J_S4y+u;zzV71JBUFf(KQd}G4lo}1R zRe~CpuG0GvjkxFuks5}?6DN+T(Vie-qH~HeZugVeg1La3uM8~AI<&_a+}M7kK%F?l z+zUl9RZei!01ioX?yss$9KE!(3^G&ID^rD18s_-33*F8yrL-EI)%i`_s z49Zu&o%n#z$p~7CXuVlbmp7Q9jDea<$1H+-E2muG+naPE@J7anRlB2Cu0W2Wa@ZtW#Aw}=3D_NWRfw#NGCq~i)efLDv8Mc(P+y7y+ z5zO&@P^edZRGoEgwQm2kLg(2I$%L-?<9yE`B91Kw6}$!Vum&LZ#} zVcuX%&xbnlv0qU%l$H%xH6ps0R210H8jGN14^#z$mDzIun%|j>LNy`9So@gzt6(!o z#1e?nuyu7!XT(4enE+LWRKcj5s4&)6I5jGB!E z0MbIF_)3(rO#i9lE3GDPPLOei^+>r=MqDmz{zMj=t!as}Y8p4}+MwYJHiTWEm9+j1 zVRhszwgzf63j!*G#aJNLIRCJXQmx-EK&5kwlT)Unyq&b7p}JEtRN6BD$Xrxchf;Kk zd0|HLe`Y79*W$NtA*XKmE#NZ0K|;ra65<*v61QIgS+Oeai71~K-6(7R(Oz?kcv2`) zcUI?fjrQ7d(B<$-jccDy;jONEg`G)HUuBS?CmRRMKA!%3P-~3uj2^vcAWbbqu2t91 zBe#J@C`P6t6QN~1z7fw5^MO|Jj(8y7u_LlCz>t;fb@?MLDURWS=SXY{6wbbej$!CJ zG$Icu1K|XQqE`<8e)^u~;e=Nno2aohj=kSM*lQw3`Dg4KY=1db&bn-D-msRapH{wy z+^u~GBZMBAYuz**(-Ir9+6?Z8dwnImO9F!B=Gu?-EN^(N1V2sGzIn%Q?naRHJ}&YDVJ2U)f>o1{R>9@%a;!zn-mLPD7X_VXTL5j%G$LlKVf;O6!WF8g;Iy=i=D!^K_* ziXI4$eaferr$*}7XH-79r1TN5G(`1|CoZkYpWgNUXL9 zqlSM&dft4g!hx!Q(l=L{MOH0z+1SoDRN8QITfKM~=}C>9v9d~=84E#8^vd70RwE&} zPdpZD%TN*L@}-&iny3`3OZOQw$g=WbX*ui{f4ElR5_`6KDzuHm0ms9$KEmH_60vxo^q9L?6KgqX!-cRe4 z23)o5xG1&T>Wg@98=cYFaE@Urnrvz*2xb^o>E8DnqaUN?z=}w|>F3`5q(7nW=U9OC z@(1hX6JVzz6%(sC+sR&L1IU|8;dkf}%_7ci(u9En!6Nq8r`MGQ%s%P#q)Aw@g@+9H zp-Mr+f~Fzs1@>4sLxXvXQqtbMgwXw=5D>`0ESi1blfqM$cc=OExodjAbhvJhd_;y| zJ8|3?If@@GkmA;i->EAWpn*;HRBulr8cAJRF=^SBk)`Kxa9t*zdyD@`0(6#?SGVwu zBJ^2>m#)lvf-6^P0S+R>AK1{0 z+wBLhT)Wlisy%ea*WcsIi3DwHs_g0ZC#PB2VPvAyLMd4n(n1Nq%Gg0v^*Xt63OS-l zl8N`hN&u+;hT8-9RodP6+0R{~tdghh29B!;Bp&*CU&oar@=n7MDv|60ynq5$u9oNv z=4C2*y@e1Zp_*ptMJWJ}UMT8) zNNsv>RLgWwnsU2naTYZ`Yfg~IRR6eb9&o4n6PyfJ?A4jEUI}BEVa!(eY(YYUvXqEE6245dNU=`T^?LZc#yZNz#i7x~I??hNiTcbVrRI_`EWMbTLJ>zb@yhiqX5vT@x0*<=aat-d3J`d`UH3=1nz7c_q)fG^jWouX#3F z;HT4;C}F7gJz@OStEP?#^QVEP&K2H>`aGvsFA3>{_^ZrNR@7Yw71|^ERmP=Oss=pt z``}tO|I!tPh4miw&)qX>18vtD0MJ2I%;cMJ^^xl>}a@ zuIj(Frs&i>w4TaJC!0vGB)lYK9*+E$|8Z=kbwy(Dkh08ru1B$v(E`Egd;YX1?O@?m z4mH~c7PH2e_shB+lnC2Y26jL{0!K6<>uerz`9^|k&jVPUE=S4goLp}~9z+E#SXO2Q zOAVeD!nnugcE z=Epj|$>!y64v0s$PRM{%bSoC*I!<}vHceenKM$kk#;2f-%hro(!0x)f2hJwV5)N0} zg!uljy>Fh|C}$>bnm`@C34hpXpiNJ}=w@Ap<-$xeZi4*JKjTILO%LC4^mdHUouT;* zV8aXJM&TcXTc#VT=vas*3!bTS7UCvQ^ns>|jBe!x?6=^hP}Trm3i{NBrz7C>{hM(= zGePP`5WTsg%u?ct3GiUCQrAVz4FX)m@o9GBFWzLbi<41->M3kkI;B85n*U~bOh>53y9egO zS=9R9C0Z>+qS1I-KpQFhQwS6 zR#;)U4Jt=9F72ZeS_!;bYOL&6L5l{pi^N;?Psul}0-+3#nQLWz?^$0jyRoxz-$jz=?x3+oqO7RQ;dUxW%k@WKKO3! zn4ub8F_H@z`G)nW;eU=zt*yubC> znziH^Qg9EJi%u{vw6=3*}V@osrsgFf7L~Yy8d;6RV?=VooyhyPMJ{ zqnf^^rO$VF{8}^a^j054qpy5sDz=+`ff(mv^Qd#Jrkr}4i;Y&^=bNTN?ykDhL`g`_^$B`XeY*Tg2+;FcO zzsw&D*n50far7}63$30t{ebdaVakxIRM=yi&EFQ)TF7)2_E>(g@@`-Eex>`i#38`8 zocOc*;A(CvssPu2eom`b-`3>b*F$8ZZm~ImE1Vz^2#ON3&OIY>|J}~|8zwy9yN7`q zJ8ruasW=`kM5O&m{cu+f8 zlyaeoP(c8W+BAA(x}9A^KT%ba#kEN0HkW4v-p|&7rHJoMbhx+ekZ@;b+TBeW8*2x| zx%*{6xE=2EJk7lp*Rsb{^v&dbhfYYI!H%APys=T2ndLj z*ysY%J5hQOkX}s?1(YgXsnV4e>757!5D*ZM7D^yAk(Qu@07<^H{oT2D?##XK{4s02 zf9S&H%Hm|7v-f$P=hLJ+nSml_^a}_Qv*RR6Eqx?a@pbd6C&oQ~NA1}V)oR^+;}(xr zFBLomEG<{q@*WsaF9DhaRGKr~fapgvtDY2BzxO8e93+S@ZnkLh0ta()2@nciR4Cx- z=S(k2{T9R+7P&z)+R zt5e2?Z}X!E5z$|}*5FI?dql;yl?@Ubas>|FByQ5^9y8ox8RA!~xa)1s&X+!Zfl^CZ z{>>d3F3k;%w7ZXA2<`Xeb~@|4jHaTrZIP^jDP9fQ6^^rp#v8%LBQ5{Drd)SV-4a(+ zR(OkU8)%dn@>CFc;I70_e+s%I1RWG5aqCi;vT;BJG>zrQG(fk9u@WrQ;ih?0%Fp#N zhG5NV1L~+|8K>F4Amkec`?%y}dvEwZFp7x$bBe4KE`*ovhO>u@;Xup{jCNtO$&Mjk z|HU7-KN)c+kZlH@^Ut#f$>{Eyh;r^zclyrVfQn4gUqlQa9#$H&@aR()O>*yNl^i*B+yll-zVN{-UdZk6 zDdeBLzVCOfx+n;fZK)BMb3y)E+7amfA~MwL5 z{g5y7WOl2Ta){lzlz59Yu+-%2vzjuaS9IyL$1CtQe*LdUQV_cTWYEK$@Q(`#}`hgn+;J%+=0lz{|kx0<-fbY2Y{6Y zCyJE)6(OI=C%%wor{>4IwB8UBZMDfFI=rSS#kRhYX3f)=(xJi2Ah83kO=7o8LR26*gtPdX74ZhB! z+q%hf?oNqm?U$wVEME~sxu)f$2sL?l*EKbHk`S??cy6nc4=s&}xgDpR;jo z_0{G}Y<3cTdza?zU8l>dm(!MUWlayt*FcWpJK!=1%p;-iq5&%9ccI6Yrxwq4+Dc|7Ste!5- zf=|CUM8#{XC6}WrrFOArA_o;YvQ&~}x%~%Ic&Nfh!EaOnPjzcB4TXN<S*z>vwb?r61^Vc^^z+(R#Nf#$ zLg#=*&ert;jM?CoIi}WTiY-`xg>jQWNZvd--F5SsDB3>vJX5Bw61+a<6luJdVy{iH zhA-_wh}RCcKUKiKxqyUIa9&u)p@19T^@FyH24-=3$yb}k3O-9xY*)*5;@(?ESO*Lh zM(#g7CnLH7%LY3UfLC1s6gGEmpdEx(|GeRbelZ*1Q%Eetj*Of3$w_)MwO8Mu>Ux!z z6AF7xk_LNAs@=MfGAMAXlZaT^5w#`GmAegaN9{or?UTB%p zhAo}n^@PU3g3j?5r@2L$+Pu!e%SQQ#qTLX-cD{;m6+9)|yVBKar+A7tDr(-|;<~5T z`Tm=-MUJw!Wz1+k-aJL|&&Pp+p2P_byryj= z6C13({M9t$D){k;TY^NTen3gbfuqXf1nMflLe34Kne5~Zm#S-?_%pO8RivkB)RAsQ z*{7QEqn1>_p)BuuU(3~)Jqv#h$9Y4y4Y2T;`xH8J+}^SaN?CNG-4anr5u28BywCsH z_SZL&M7l@&wO#I)q928@2{FsWk^spkQUiG43IZU&0rL9TU`O!eX>jZ)HYnkyQp%Rj z*wZHg-zB|@E~jAcKdX@cQ}cO&i@2P?a#ithC2}U63Z9us5=aJQRdbv-Ih6_YxcbM# z@NhUz19|iSvyB0)+m)XDWAA8WzBKVx5`_s*29rn7{x`-%!`%dDi>z_!rAcMM_eOh~ z6wo_63nhc|Rv4)51&+He&l1>0&{RRu^Of`~fL!zl`q|)%AO~eIzec*-d$z2fJFw|^ zI)Wrx>}r0#%F4gBB&aH9+&Sr!dor5W7q1fx*i&pqT@+Zdx{3+`zBxqw*KrO<*J_*@?h z1OJZ!*#e>_XKB2tSc+eCnhxPq|%y^Lq+`qq#9s_v6(R?5?Y@S)ya) zqT(EXMA6__jM=?Gc+4nKv53M%{0?8LY*Z5?7eU;fdl-IK<96k)btT|k6BxOV^D5Sa z$7xqHt`$WuPKvy4VO4xL!Wy{k2Q0nvnINt#L|KEL+b6+^{XL-pRo_PVOX~`Q+l-oM z(2K#lONQGYxzWf9mYE_7y)t;e4U<1rR;h8qP*64%iE^McuqsCfhv`a`&ENIlU821? zo3dycd*epn{f2X=`qW``9$+z?K&wE1j5Z)@lAhybyO)yQb`}*Iu;m-YXlX$7R<@vh zq9iWRO?u$@D6}t6?1MeJ^XTyNt!t$vZrpEFq!H*Mw_`A%otHhE?hv@YE43ESnIYN} zS$$f;@D5~#5=#O&X5?MUWS%s+5!{U|I1SQm^tCS?=|0w$&fF|V<1#yDW>_lWN4NYb z6J7bXucsr5f(EI+AjWyA{yiUHp9Rgn%na;K!l?mk0DAtjE(_mJ-i#N%D6i8Ob*6MJ znU$9vRm_Q#;HwM%?b*8ckxQTQ_(7ru-@}&1Nf>8v)`~V>Ba=tKzcD1G$0Snbvbu|_ z!2MJ99G(4NdE_Zm>7*avWA2cpIV4CoG8H@z**S$Ke|zDw z^MxDp-994iKYbA#<<-UK)#0}=IKMIym+_J}FkQMx93==74YAgq=GZ+{!fida^}epD zrA=Pb<$c6jr<3xN6c2A%T6@vqZ;^Th+@eQ;)5xVUZ=xioXY~BM1(D5w{?~UI8FHJ> zG#afWbZGs~v!v_iUH#7a>1j;O9_fPHiE(gGFClAsryozFj*E#-c{Hscz?-6{YnDs1 zsGj92)X*e_emL*Y?i_Igvh)o)li4eH9`1qUrYu+;#Pu)d$Fx2~3%vLJdC#EAz2HF{ zdmhi+ES<`d60}}Dk1v0Al=y$T8yAA~>RITL!L~I{`W`t4?!t6hkeO)Q^Zh7rYRoB5 z)1g&+BfT@EEWLJLYqd^hEB@4x6zGmYrPU?f34wQ+Bf(gXDV&C!Bd^M?H%)@9Fc`2x z2!DRhSr2$aWN`XYN)yefl+Dgp*R(UpqaPeg#8h+w!2`7>5J4XU89 zrtLXwSU%AR+Axy(bi$46(?_C#@p}_2U9nTgXU3j*owJK_@f>@35Eyts|KBTdh&9|J zA0P@bJ6$KL!I=bma>1X13Ziu{*P?p&HM*N@*I1Z_+g}M@$I_i1U|(8JbCgXfrarm> zY80T$_ynhIe{v$F3HI?Uv81}Ugt*a_Ih|>DA(Eh;5TJVX*unQZx-EI#W^O@O#uav$ z@^dWOBz1_A1X(Hu53KhrP#%WiB7b8wcUZAJ%(dU;eo}bqGZd%fEk6eea~;jsaPfq_ zp^AoJJ$p@&ub}8m3gZyDlOjkw-a$ICa0dGU+F^>Bhv<*%*4mR-l7a@CSXDf|zI}MH zt`7NjSIx>%DwJm16P(^olo`l*$|dv`;E-n~_$QqsS}7UsN!^sln3S(5;jecSLP>cZ zvZOs6RkG_ec6iIOmPGhOE)VxO@rI%8#K@IumuKhP(8$C9ko)fD%=fj)%u%c09;%LZ zMO|-rh_!j&gFI3}q5!?KAkG!h4}XnmRLze=KDG>J0d%%kjfC6sxebXWVb3}LusC>g zC>q;H1{#wKEf;v5(+2G{9NOFvf6o2a7+sJ)$@0(SeoR!533Cz9{pl1LWH z8(-GCV!*^nOSl(++E6>@QO&SfLOvLhDK)xK8Z8+77Iyo_S8c? z(-dx;7O?24jaj_6>qsqsq{-lu;b6_KZrTSsLM|G9I?Eshqj8+G0DW-W=JuVej_U`iD8*h(0}wK?xn27N=(TPIbMliz+E3%0){1>} z0`=B292!mgcl(ttR&4J!0H2>5M1~WOUr?NR63@RTCB4L_YFE*2wc5|ID`kC~ezZ-E z&hge9=oSt25zNwPDEU3PmYqW1f`_bNVt_-q!EDA(JLgjNMOn;=cYjJheXmcTRQuK& zMXa_%f);C`I%$bnr$W9o zy9fLn>%m@1`}ApGpuVxq9Y3kNT@n(cMMfdwVd9iU2(=nBYy!qkPHBIf^&St6pW>cC zFU|^lr&|r47)~3kMBenw5qfNoWFYaK7{R6mYXnIgG|PM2b1S0>Xn4OgL-5G2sfkCA z95S$97lv2Qui0r|&QiEM_Herz8Nz*~(~jIgp{Gn+xd6w~<>|g> zfoIG=F2rkBZIt02_tR2-{uE}vsTMgTr#RUh{rID{zPu1!+2BrKeQSDlN}!(VVa-HZ zu%9+IM8>pmS==|rYTdlype}(t)u89uFuDWWzr+8b0*A^R1EG=p&i}7I>NB**fWs2( z6~1s6T2F)C%(p$>-i&PX0~~UazS7BjGUkgT@j{7L0_C)d!w2vaSOkd!c+4hWeQ|O( zxMIaF9beadI9-GG$Wm^#zb0ZdXV3pN`E8|8mQ=R27f~n29Qlq|5ZyE4lTOz>(Nyn| z@!ivIbg|8yawxSO+|0sKG5yNMy-Nv`th^W)*g8I-9M(S7LN=a8A%T8t4$=*-Fn@dP zW4POt=Y-LZ)huNuKE0r8grUt^QwAy>5COFR6|?Y=<$ao9d8Ytz3k<2)NpwXZ86~?u z%elkpT!H(DLQOPQK3S1@IC?cup0o+?vfhQR1d%w}2;SthfYVTI#f;YUAHr+=b{N`) z*h7?AylR*HmJjBM;hXX3=#tT%136cq9;XZnYk7ndXYkY!PtN4pCQ_9+`re zcV31qEb@@wAsr2vDP!3}w#b>4G1!z%lB52KyHJhZ+(x!|r`4Qt=*kmp?f z;3)uhjF2n5NC4B}@kh>97-xP_<_$o~Eu^2^K1f@T%1`~wtoxdfWXBX@K;n3*ANMcL zIFPk~pa)p^9-Z6R=9d}d`R3-3>s}fY`}s=tiC0(NesFmKoiuHTib{_UwasDe^B}F@P|N9-Q#OS46vMbsu{rCbwi)6a;S#xxLJM1jGdbfAyDhKQw@>6gne>lah}`Wn zKU2N*s-pKQPp>#`>HNLcS_B%dLmDTlo-l!u4X2;rf`x$aR*I*=!-?IS*(YQx)L@ds z+1whBLu-$K1f3%ZK1qq@Wo^U1gi04EH@wRXvQz=VC5HzRpX1e}UoKMD{t&Jk+W{}U z3#G4USl6gB9;o}X>+Ew}*U}o9G9^6eotbA6=TYg-Z0Oae>?pez5%T49B)rCb&-UN!cFO-FHkytH|CCdVZJaF2BcJ03F02XF9tU)OGm0n!Mi~? zA#APmN5j!}Ro42MlAdmXNI zqxKZ*!-fX=q!^2I@x`f1A$oUNU(fyK5rR*x=i&n5ISZ%(&?(4l!545~cRO^#2EzbR zDVUjhCb9S8se54Z7O7fO@pqjCLV|bJv`-hu$Q498W`Dc1UwP-uG57YV6C29-A|H6W zCWh;g!RB`#<(v;L@HBE$Q3?oQYM0%qhbvrB8|Bc^8%`_K9C?P1=-4Vdi?w5`((!lE zd6rhXSwDX0oIaj`^w@3)duLCiVb{;q`JCg#(`BNkxTitzbR&e8c?dScFUao+5{;KN zU$-0L${sdNwQH7c{$6l9em+$1V>?Wrd7j(OWbo7jRch8eIj9W0OBL>!1mxk!x=Cbj zg~ZMAfC`CE{yW~fW-cceS1lfMzB?x~z$g4#Yxk?RqdVz04hlj~G+<*GoMnQ;5~PQP zeHgGsmnZ8k+V{~uc5KfWzmyV_jtvrU+U}NXm_9VbN3XYoIa|n03^j785&&mG+sG1X zfuspiT|edo)o!ag>+T=4J6%KQb_Rf&Zx&EVtUb4;3d@mr|;!P}rSdg%!H0h@sn z?Ao!b)0=Q%TFImJf-}KQc3?;b#2;d$d z@9u9n$k@~Y34+4xA|?elN5`Zoy0q{}c#7I#iNFuTMWiBR@{~jx9+RkY4L91ARfZD0 zXjhtB^s{l?cF|@ax2BgZ!y$ssA@TG_NmRrsVfwAx(<_<)@8}K1#K7%e*LPe--)caf zx3sj~5q9QG3`;1R8KC#Ewcv*|RPlOOsOcu%dcQiW00QvV{XeW`24-aLcOX`SffC9X zGZ#k4!<$t}Z(qh$s~U8pA1)dYqD$jPFMkmV3&Hj9eLi}nGcs7r7J_gD&UQ=X0&;l8 zelXv4GMMA*+FCN;Ss2V97?r(yJM42^<)d({Z7=Ur$}jcgWPnHtvmc6**tfa__;DYQ1|-XE{83|>IbCo`p zUzeTiQ;18m(jPRRtn&2x_04ZQl0N4KVxxx;Peh#PQ06dAy9Fob!_VTMW56h9iD+f( zXE?DMTrIMHk0DcvT~p|k@PIeJz~g(fL0^|u%2tMBc0h%CCYqSEgn13SNQ|or{=Hn? z=Ku77NzNj@RHl|yxTaidhgT(eH@GCu!RubNLQ?ka5aqzQlnwVyfeeS0fZNQS;I=#sG z_{81Uy`ZfOTDa#uX zH-)z&FZfefgWgg~8bWaDsDpf49G4Z$NncZ@9Zxv4QF>UX-oST+4v$7oc z;VB=qyS)uNJb!0^15MFalX7E+4Z}{w5Mqd&Czi-~$|X0cGw6)>Y88Zc2b|8P$yX9~ zMdTzMSNm@~`^j*+@^pS1#MSfu%HdA>31jem!Zi6^yDpZ6F}UsFXO9G3jj_Gm4~-qx zmIBJXx~{7;V4q-}~^|X&4W*-)+$*^x* z^*!Y*oNCKslP81VwTP+c%%5HiM{*N4-k@;1a*E4?vIPHmw(=vRwkG)^DXWAZ(CqO| z1-QxguXXoQFMSDMl-rdvTJLlvr%}!j;|O6y7sW`D-b#W;|1`JxD6BQQO>WqSO`B0B z&fg`3_6?)@50_K9iAso)tuGZE#XoGv$b#wi9%eoFs;}hNWn1-a&JtS3NRhxiRkcFy zyBZVu6J!+!ae$mit1$uU!7Ecrv^*sQ%;81<4wtvx!1)AhN{^q#BENSdb(Rr_>i;GY zv;EH!F-|Z@x#M@D4ePN0^ya8AKtbJ18-$ZCf&x!e^n?jMd%pt+M*j@D7(;*9vA|nE z56k^CX_oRje_sN?TnX?^F|{;9O&-Lw!B)hk=6E7q;foiDZ-R4Wgj{k{=UF0<$>G4| zw15O6_Jymz^-uKHFsXjh$$lGJdHPQR4EMzwSs?PTd>;)O9qhLobyeCEWrkgcW=L#s z`2Do0z9+eisv(A{O&xqt1Cyz;{UgvIb~z$}6-8NZSkjBopsuRh-aP1Nw|f8BKMNmB z*koKu^YNFN{>T$zUcx=$#B0%Yz+@^<4lnoxSwqa!$bP6NsCOd2bmDF^bY-D&;_CW7 zt6`^+Ok38G+%gNPQV0vQl80?zNuSCeD}p-rE&edElNZ*F-iTA<_b{gjPCofj|3h~{ zVcl9c$~@IrH8U;EnE=GFFS>cO9P1NMBj=u))znrVivb-A^-)iZj}nsL<&cH_ z&0A~XHWefOX!5QlBnCT0uFkx_hm5on#rG$4vJf4BH^5EAwlu4Is8iKcNtvhPiPVo8 z30>xCk?4MrXQ<@F5QJfuP~fct#2SJ^e-;n89(yk#K9Wbm$C(kfs ztFg`(x5<=Hx4!-$+0Nc3<>9M+gHM%$bAT*Lu0lF`zp&!o`G~owqE}ADdYot-yo>tw z8Lde`x!toWA7~ngR%d-@tI~3Q!|`#53%aV$sJM)0EN${upHYymwgj1?JL<6QV<&6C zK>xYqRj%p1T%{kGH_h8_eS(fy|4gqBt&%2N?%2!4x?J>u4zYkY8*rj4$GdDB0QtgY zd|6*UtLBPFuIw}_c{77u+pP>eWBU%xBlSwQ;7!H_u6vKDtS?3ST|UeQ?l8tI@o*5f zkODhHTwubE;#qpfJ7WY&gnjCStW~;`nZ$dK8E#p;l%_QG zX;HCBFAuHwTU8t;Ygi(VL|eES#=C{NHm-<@0nRFUR$+Y2d$Qk1NV{9j2w zqDl`)*A_nA+Pmk#;%9u9(g9f%94Yv#t(q={v_o=%pz)q}!B2j~svTloY-FvB5p zIzfN*!X?2N-_?Lrhy*g7+5`Zt|I8Mrlu8go%TS|uD`X$pAQ$fBLQWSZN<9WzE%#t& zNK7A)QtB?BsZY}WO_>lyf0J!)&SvND?hxvOgxs0QKQV0G+brHGL z3}sO9#1-y!LF%Mq0!}de!HEl%Pse|uZ(3`6arC#-I=x?i@Aod5qe6l1NkZ8%^Ci~E zTh-R;IO!K8`LOF{%fq}q1}&f>w3ce9fAp11aaKRua)5AG%DpDm`n@gj4dyq50Yx;; zTCT8nc)Kppop?pxD6X`0xu`ui=|{A1R^j7eIkENNh&pbdp#}YEzG$i7^Oh z5BA;7O}}*w$+yA^XHwwd-!Hu8KP6nitdLQaG+RMo5W*z^XkfA|T$H%;!GlZfHcl$m zbwyt{Txz!NE(c$phB0-zDLWna+NIj)uZCpy*xokWMvV7?n0y6U4IMW0^hlBIL4st+ zR?Sz*7Q&S`&Vz+MD2-o-V`vQStZrB9tdovB(VKHygkC4DWqF6AFxy?vPh&!_DQtFe zs)jTI8xlN2xd!my8iSo{Cnn)fCi1o0iOHq5^Hx1ReCn59GsL|wqbFWba?CmN^#Wbq zEAyWS66=kZCudyY1Wo{@{P{4wrPRfvD)xxuNpK}Tq@sdOy$TJ>Fsu3b%=_h^S{pBJ zZjH8-5*>fEhoz1mAtgI#!g|wXtAMl*@8U2)0s|N&?vw!QOAwr>3E~XcjQ2^QIOrFa zIJMj4RM>S(cLk6TUfOatJ;#GeECqF4C|Oi;aEyE)9+(q<;&Pl$Uy)mMp?tk>pgL+Z zigC%4&GYP+KMPZhpR)VB%5L6FZ{6_*ba5ENbeHiE7avy(E~R{Tb3tFecs{*_Je67- z^nB+#MTNpiq$XI9<2n_I6H7=EhxhW@NRg-fgx)gQ{bq}69_?(jgGEVPxbZa&#)9`e zp3jt@SAdG@B_SA(*b%J^41j<8A;Mx4m%6{2a2akdxKH}7-}*Y8UvF3k&RR8IoW4*DPWt*M zTa(ffQI??eULAcs#cxNsiv+t!daL_;x>-w*`)r+HyT3F-u8srw^@(&II9bPmN4*y3 z0g}X<4itpxCwIXsFC6bedF#e`KoY`_jtQdRdhjFeB}hMHK=6k;$maEh=RNt)p3nc6 zpK101=s@EMA4vV1wotnPg02;|IM?DsMkJ;IaelqT} z_~NV>p?JwC_S{wAY+$q=aq1HN`6Ml{M7EuKI{?veLR$Zgm``UR%GZ%4)XCNI?^>ew zR=k?Wnw^viUUL%Y82vpwaCh3xnj2H8|qILi(l2*YcHNBgk4zH+0s0w2uoy zcex`Paxu8f2%lI%m>bw*aL+C|CRVQP$WSq(?MV`A*)6J6@UaN|Jpcd!%j$po zgB(_-&=!quTLBq zEQS-9Q5;?VhE0;_S_<0>b-nX=S1IuGc_)-x} ziI|D^xYyZ248UL4UX3%Ujr(Te@kxC~BBgv>{raC2*{;TvUwl-GkP|JEJ)kc}w!y|J zRNzj_yH4)_aQCNU($Nlyh{5eCy4W}yjnv+n0h zP_mH-{_=}F0v{@#Y1Vx7?-14bzU?q^Q`e$$r;tbMv3jDs;C6Wg&C<30Te0p4(av?4U+w~t_Php&em%2!2R}bAsxDRK7`_)B_=ZloAMf$ z#~%lW!XV-;V9B$cplEYKz(7AqZxxchlK{ULX0BW_T6NYW=kxFbzc7<1eQ2}-W_sw` zflMBAm<>WOgY1m4MC|Mh;7-10O0_f)rG^7K(H^9q@Gb{=2}Bg6Ved;4S`ftrtQ|b= zv_$HDs#;kLvUF#iA7D!{RhfFdF7IqRqq4+yYm|5`FTW7>Bkw%X5?l%kQFnPOpPMMH z6(SYTp%AEK1*%t(89E<>y! zT82@KljE^?Je4XH$4?jj*xaH@ucigLOdt<9xmWGL`!3t4_m)9svLgN4wY5yS^b)}! zi{@Xs2+WZvF%_Q=kQi(Siw16VI|z5hfRngkl_g?(n=F4e@vPEf!m1rt%AIF484n*j zy^p>D`$lesxUq1CUm$WX!HJ^Y+aPab4m7#vN#>=qo5zW{#VRSKnM~v54aH1C1DC>L z0!>{49b=?(YHyJVpK;UCf3i0lR3!`=8(LbFvRt1$NV!j$6FZA!^Kfx@@u7z3jEA83 z3+JtMZEX+03)1(A3U^-*)0;AWn``>DkG#@pC)T3Kt5p)e4Jvc8lC-v`_tTKfos#e_ zcDO4GZMb^aZ2Mi%==PZf5(KWu6zmTVGEK9VC!TrRxLL)s{9dtt;@c?luUVTZpnCxv z5QB#Iuy!^rZ85ZXXB~eHNtV4(=~Z}&cxOT*VJ%un#KPw#bgYMJ{>JE3Q5Nm>E3TsI z!84C5sO}295)e1@3$3ZTwWzDPLF6|7Qeq(9yBf@I9L-xr~&al$7*N*1j}{9#4>-otB>N~H^ne@8 zp>=L!4%VT^HY^i9!xs&7#IVRh5E&rGK@Y8>LhZ#HjwhAy2NtfJ8r7jIDgJO*1Hfk; z)aUV2Nuf65KIj)T9K5$mj=!IF>EJNzRgi(xgB76#5nAIchU(O6ibNWaEOr(44Jg5g zMF7~(s)7Ui5E0l2*C>FSy9VHO1xL#GX^hB76GQ`P=46M|7>RP|L4!}n@F60KZ(6(MSABqXixrpZYAf|hqT{c z5sXIf2S=)k&q)P@l{M5)9ezfmAX6i4*!K>C%9Rh3qm$FEiJ9PN25j*!0JLBsv1V%H zS+;*;q15UtP-^$vZNC&lrB?UdLNjtdaU`Ob3vzlD76$0)E@iMOeh?qa@WpIJL50J^ z$S;ugg6NEc25+?DvfwKfkB_piP681{piCA|eBo96dIZE@aGWAV5z8nnkwgiWsI}}a zC{;sSf@{J4a_3Z4K?6%7gYL!lT27jetfxaA0}$)d*sFh!;@hA?$wYe7hC_>jpET%c zs5gy2wpF8B2JwwId!2m9G9_}e3N*Y^6prgbbf+DclLw3kZ}z; zH=I?tyepz*LZ~H4XzJQwpJt7Xpbq&GF#y-U+|M!YQz zL2K(o-pl;52{i)U?*Ge8hyOG6|IKLu%kl4=7SR=yERoMDA+#atD+%jPMPY?6y+-cG z?foxm%7M#HM?v^=yx=xs<)Sjd?uW~GdvL}ZpWCud>0@*7=9T-gtahbP>@X5K7BmLAWGlnTF# zwU%A48iO6v3**ncK?r-s@f=~+eQ$-sO(Exz&iS(#Y+rTwUA*EKG*`#R8;_>V(a+m| ze>RL_5-JG%vgC*#?_lrBJBKsx$vvwgjXTk-{a!ya(~6$4?)Rj1{xx^S)V9B+&!8$i zCtPhNFsc+7RoYnj+QelCrs%Hb z#o-|7NxZP{NM?wGG*Q|-Nllyt%h=HQj_|Kx47zqE)n&&_p^81~ZcB2!lkbP+W>sdD z9>P~*9j*eKMB+3s#sSiUwe|G28snTBVLFfhbG_mN-wsnt-Bu#W#+2Nh*=~Tw;zOQm41S=4x z3D4$x>8M@z$4#6J(oXHa=)WkD6%%Fg^$|kPEYdGWTr-17LpDYix;z$g;Gzy?yHQ)E zQBDx{b_$%rBDq@53ke~og-o$GaC+!65iakD?aUk&fsU|?W zghRgYtVfNdYLD+o!MZN?*ou5g6fr(MvUIahRv|{^^MyU=D3rues`xj1?QAyiB9(J5 z8k+Eq&M$p$aOcU7tJScV zPh0r%c4;Lgg4A%rj#IWIW?~Iq=J8KbZUXUNBC+=Ib>AVQW3^Yzpf+n`TjQ0rQ%f@f$hQxyDg-yun%TNEx-I z6|;kKF9Xv%Mata{4L&Uxy#6fLGtssF!)Af+;~<;PuM<^Xk2+hmLn5GAoY^4KomjwT zKrf}H8XivkUH;3A3&LHm3f08*Wy1pkpJG#uF_+t-7d0Mdd`6~oNS>06yrX7k{RTYB zVE1-uQCMN!5ZVCD42CccmaYqS2?s-jNS@s;Nz+PwyT?$e^9?v))wT#&_ifhWQ^^*E zm98}pPM1fN7e!FX0M(<^UkY4@vgKvsh1#sy zuG=mB4Sr*|4fdDEokG`9u;Z+h)GS|Pqo5$|(uPUP&BiiyyW0hE0UuXB!hU&4iQUZM zH%+h33bIm0$Q{jriw%SZ#}PoyAK5|dq0nDk2BzZ=aRM*B2)v%X-8W(WrM)+2goK@} z%w9f6mGc~_rwLR*8R1=*;7cDMV8(8bh()rtL-0FZl`8Vsxk=Z6V5!wSWsf$^>_el< z$1Rp@Hjy9F#bt{XGzn$P&uMxV&rej{WMP=Gb_?}+YN)~zX~@R3vt{-mn|!jIC_?y%7^`)_-2{a&?&QpNWHu%Aw*@tP@$2x zFh}IN`DFm{vuPRAZ4iZ|t>65U6b0j(P*L*q$7&Z&c-l8u^1Co%MQl;0?}*+S+ ztcloLW}WQUs(<}9?KS&Y#;t#SEXQ-mb34VlX)?8^T~m|&y`jl?S#${vSD8oJh4I--+KxRp^<*ODx6i65lC$fG34adPfsk9 zakrhIE#%I`!_RYk74I&y-f_xs6M93fCY&CZsOUNLSP!@M=ctw?3SsB~?^fRv*pXWg6CJT%uhMsV(XdH-2xD9)F z*ZHPpHxkTM%I9_UE9LmReQhQ^X*Cg2dreL}CC?EB$2|u?ZUVX~;}d)MY$#rIKk-FM z-~=E6X>x~3?*x_ie=~<%Bx!tIcs`=?)@~Uo&PrX3U>ZD=TtE&wDU8xShpn_*STRTrvua z__8z=D_%(Z{fWurQ{PX0rzZ$_>M0uKD(f8}pKm9j+=AuanU>s_{8q{;UTG5K@z(GgLIJ~7UhV(aHzA>V15aV!!OCEdEIAZoq0Jk8K$hPdVBV1JuzjGb7W@a(QCPB zdaBd0At=-GTXt@iAOhW4jn2G^_b5QpBk^2^rjdUOxBQ$@nu8Gq#1QTbqGwetNxxy= zvz}Uclei9-FDAUQv+ZcibmMrUIavPIPW;6mYEA$-(wtEj0@7i>8eKT**H#&1e^=s| ziFr9N(q|K$Y@52vCim0hwk1N!DnJQ#*7FcE^zBcb0S%Xs#IS7d>!xY(Fx`EU;JJ)- z8w~TI^-cll0Nxeo&%%!06a;2QGn*5OZfg8LuEXXewJh!3^y*O6J(t5rMIuup3^)hC znQ~`m7re#`F?G0NcKI+o8GEWUwaBaM8-ix?sB^eaz9qkZYRsZpT&f5POwPdB} z`}{#8VWBJ9cMU%a{uEc{M zsjfc$b0h%c?y3wI!?N{jaEkXDub17s`Qu60;D8Qke=a*~Z1Z9j>0NLS;R$gQ9E>4& z^PW+3MVGlZR!ZR)ZKaB>+Xa-aO20p$#gI~{PIXs~x-q!t#u^9tKG;R?@SKQ#%q(OX zzQckmymsKt2zTWha7k3SO1pdIR5(sZl*c~Td8~dZyi8K8%W7CHUac+}Y$+U{HW}N`-wrx4BeD>B zhyZ8cP!pK7o3Wg=Ys{21|M^BVpYi;|wR_5u@7~{0x^M{(H6>r8qAxIyxJBj9(XcX^ zW|JLR3-KPjF_U%6)u_+JR9vJoEyVcwk=M>$zPt71hrpNlm|CWXpv-MRI_<+m~}@){g_X^;CWXCpap zdxIbTRYBn3(3`}~rX&o`gD~QYM_dqe7L=OOc^1|llEN|DyfV>nDe!)}u%KaYc*Av` zN8R=}a{i&}^$lvA2MLFZmWHNsWHKoU?4k$Q z37QH8`P8>DUUyM6o$}vz%bAczXX1@$L2+~Bxr-ZB}azYrM&iWiQ2|h z@GC9g<+`b?EN&!=K4s4#0qec0!^XTaup;L(4|$Fu7XMI%l<4R8;P&qWs}HWGMYOd? z$7mlWL2H9T>);x}52kmY!JPFXAij)@Amsv}vzl1Ze^C=35q$!12)#_xX_DY4jJvMG zY3}CchLGJWxtHz^eU(i)^T;xi^O401=+spL6H-*w4|6kqd${F|@S(hA4DAGJ@W7?4 zju6{Xt)oobh?GKnq5Xv}Y|LV@Sx8-kmmg)FEQ{EyUd&dY!fOJghpS;i{ z6^SCEgzXPTbOI%z;0u8;HEyFc6Q80h7qY0+zfK~uq*KS|BPd-Cgx}?_g-#6l9?ogL zGoOGdI?Ty?)eqeA4YnF-u>7QKX+o60$K(G8d+#0(Ww-Z_k0eP<-YkAE(t%H;J1CXc|_JQ(5OISf7As!aJ+VVf({3PC0XNpNU<~U z;q=mIx)P|Hq?$OKO&zHj;KKFS5hk5>3kDlUT!}iQ6e~uUHV-JQR03Hds>9ijp!rm< zlmr$1HorM)I9`g%t>JAzLie8M&be6nlwTb#5(wRUe?X%_EvzW8Jx!7;59 zd%SH>$x*>58Jk8)lzdAm2*Cf>SjuIPpklxnUs^k~Ftq1O+H zl@eU`&&xz~ok|6_F7PjI+flO2h@haKmmGg{aQ(tJ;xJ|on|@e2xfJ-bj`;rOjr=dZ zqw95HxwuI!Q25{|mYj&=QDbrEQFbpqDAuW}sLn#l%Y9)uzP>Gq<=uHt@7n1gtts5c zxXJx=6L;suELaJj0eB=Y$my_AcbVBd+KS%wty2H)mTzcIuZeBpq0P~~y9h3qvBkkZ zigO1*+SIPR^*A2o?-&2Q&U*3pFhp)0Kh=gojJA$Q(FTB)5)?0$V8EC@f;ee`1jV}OQ zxi&CwsqGo|zJh&^1r>tLOJg6Y*xPh~W)4@rqZ-f}DG74}uoI|1k`Sf1Un=Wicj;gEQvS>0)Xdpogk;#mMeL?se+3p88#`Ni$d$DrLrDCl(v0$E%M zz$Q>(H2`{R$UuWN5Y-&La=3{N^u^fk2)fhrfjWA7hM^Cu6M6_$byH^;%OGsa2ju<4 ziu!lNCCrgvr??*z%KCjm?7uDzJNJjtfrlfoCr#)yRvL*r!Dg2F07}^0Z zl|M}BY4C63tWt#ZH;O=&fMJk}G4rlTzmx9o76q8q=-h zo3_U-Cnn1LwzBg#IAhHEj=NT+)Kjcodo*#E-dy&BEQcOZaP-zK5h@{G>=jTY$2{_x zmy7=tLCi~|ZAR6aqGF5fFzFljKV#SewO*WR5#KBRH9qys{U^k})6!Yxk~{mm8S;k; zm1T|?&y9e5b_K>Z=ELg@vujj`Pn1tsVVI~#BV_K8-y^TuXnJ1SAvLs5ODbA;OXqck zsh~qWDNz~n525QOb^ zT_sVQ!WK#nkBe7o?$+Ot{bo{KME1A7>DXkx7D@-v#DGB|yyE7P+@ooCdNpK2??iG` zTrf&{=7c0lM8;+@*=K`{bR+1E{@@&o_RJ{`z!?@(ljFvY0^j9nl*|c9c@q>|PzCPf zKXyiEAW`iMJ#av?nZd>T)6n}ybUR#`r_XPT@wdNXUv-->SO~fk`{ezh1m*n|9W6EG zT2ll7%vd^b`%Mt5RYKzD&r!gICU3opdW|Gv_0*$H$^2G*Xl7WE^(`-^+0!1`JN5 zACT+zf-FE-Cay9OVnJF>_@Z#OmZ{Cf>d#NfDLvk4wS76?$P?%Ik#);gtsPfcY(B7x zg3NpCg?)XOoX)qeDD~e$-V>b%J4Gl5ofYp{23+cR{{Yt$(vM4Xp+75xMWSpntwDth zzgytWrxUrP?1dLynQsM_G(scsnBd&?nVBgOu--Rk*W&b{8ykc(+s7_i{{nwsw#iQ! zle3R7egG*y-4!dC-BALcMS{Mgrs|N-Df4-QOKV4ty(FAQ(?)QSqh>PRKadMxFgKCf z{s&?L;Ps~1hakxK_yWAEN5_FdCn9xS_=4%+I<9_H1FA&1fc3qC=Z}2U@IV- zJ9cz@wc7ICMu1A&&oRvENl}msw3@L04`XfA`+?2-0KI2GPSoGP`Pr{Sxq+bEKa2&= z43FC`M2p!BI9esL6O>&XX>??)!!X{=V{f1S84Hv889XvheEbhkzF9`L>33%JXJ{57 z+5LfoK~`akAuwk%-wSVG8X@6Xo-|D)c-`6F6 z0`%QF*aRTH_YXV=C}Z5KWcvq}wqyDNBbuH93bO_!7<#3c`X&#+=6JDRRet8 zuphF)jjoO#5OmR&(RUh@6YaCPeWEJz&E-8kQr5BiBMNzsl6{W9tINEE3xWC9jT%J9 zX?M$)=lJbkG5y+{BIQ@v_FzTkW;L$yg%-jd1C^H!a4I?W$}dt^eeP^lrP)cnWrqc# z2pE_p^P03{Q$qH*NieG~|7nj?w|P`DP_%83rBXErbay19{gAaSrCYpqH@{*W8SLlo zms4aOsNnPM=~-XHFCK=vz}j{5rmjRM9_MSdW&B~nT-60Qb-HQrI{k1~{588F+S$4E zK4QkyaBm$b!!o3Le^o^POJDCW!VE*$j9O<}P!+*zYc48i5s1yr+MYXidPqfe#H_3C z(_yQkBiM6q+FyyaE4~;i%qu(*yEXYTtuB1utI*VYhV0w@e5Kd)^BFB0K;8uHVI( zVdE&5n)6OLCFb z^1GE<(~KDO+ETPp&5&ex!?zOVCG&)(vrXQ>GjngL`Gi*&mK+cIBA?o#rU;`TGGPah zb?xODDpCmdDUi0*L}q6$QRETB&M}C#*9*%8!Yw<(LB-P6I+La$GLDK)_2-{+{QZH} z-yUdzDzhQ-*N7DnH54@P%ea`o4Y3}o;`s#XLIeLWRHhb930^KhM52KOM*{jhMrB3l zGywdvHUMfY03knM<_~+JzN1PRCqP7050-KnhUlOm8=A2R$n4^Pm#+$VKu+xU}Omp90Q91V~M5;GJ*g(8nvSR<>xcPml5_N zjaaG~)(3=BLewEjXEm;S;A96uYC`UbyF-tys^pe2mQ3&PIqjNrVrHBLTWTn5BV0(z2=ce=x!Sa63+ib~cizlUaX1 z%vnEBY7v|nA-g$F9czkp8B=uKV5TCRml1#e_A57`n_}W`r1+lA-yNLK`ekk>i#L7h zE%@cs2`;{6rmKk(>M*Q%#4r-ip5*W&hXn-=WR_`p-#))A@Hy*?RV0;LZG89>A20SS ziQGr(qEiAU;{oUW(z4(fA&`C1eTI_s+H>f!U5jhCQqFj_PO*W^uq6HP$ysq!b5ZVf zRljiJJt8B-t>$x%K6ep4Wv9kp5@{xi$`stLzHlTR-J1uHM60xh_V z2@dvT@oE^xSstU6EuG8bNb>IOcDI;ST;y364>GTJw8rPPwEg(M$$CSl4?*nQ|$1wvI5KSC-?tcCpU??fk20&2^-7FH!ustI-lkV=*FG9Dr=G39^q^Eqj&*=xZSa{po9@=(p``GD2sS}NSC@0Mp zzm7P*va?;!c1FeC=a|v#2(=1cx5qst_^i#uQFfE0fuL6AAY+^*^t`gs{jvNOJ3+a8 z6<6CWNj}^Abic|}clX8j)aF3tPqDMXOlPG201$MOUuCR3%=*TOa0xe0yR257EE>;| z;TcB`(b)QSYi_*rBtX2^;j8wW2 zP*I+5Ev4=@zHRvktbVS;!q~oPx@P=0`w_zAT*8V_sT(tfVNBl*_mWEj0!i1jBe?tB z*Wg3SEv5VoyA|^vJ4gsxiP>Zb%sGAAx_+ztyGKzv-aNlH>{|vGp6xu-cU~Tr(VbKP z@kGk{MGn_W51XB&^_JM7Gjsqp<`>^eK#!|8FBPPlF~>dnW3xMFkSrkq~D z+UCT?qs(_t9Ro;0UIIosZLeIPU)*dkTY9`W=iSvE@m3cGX5XS8(_%FuSH$g|*yfK> zK6Kdh-mlY;S>dUMbV&K2lWO9k)T*B4eipNy&`CrqHf_j_c6PX#e=7p~!?f(I|2 zbvQrlD2rRb=D;drVG-q*pZb5KzRXR2{VA=!2luhi(yc$^(lX{l96fFJ$q5;Rby;Wy zLd4dCxtM^=g-RNj6+0E0<-*X`1GTvmxrYhD!dO3G#=;YsKwM5E1~4{{lPI?UA&+$e z<^<8>=U9Tx6Lp6*W0aS4hjF@~y{#CAs*PLOLXZF;f}j!gl%Kw0Jc6mpz>)(PXZx_D z;Pq5Tz!KFi^Jp_$moe0o`Ug(A$c1^pypQpKg#)+z(*TMtvRcjSs}Pp5DFT+vVs6Cp zLR5$ys9McPI!M%Q0tLyTs;NrEKMUA20Gr(Z4ZRCDO>6*}@tQI6I%%_6K;QWD!+?+8 zh05ii(JOEySQU(2UKg>Ti6jv)fHg=tVCLnZRTL;B?)A|Bj&ecHd+0+h|Bl(Q@bw=c z)<^s{$A6uCv#H1!gS67-vH#yQ!5`!U69mnCSBdy9PY^e2Ih{89vcS!-V^drQ zaKy=3xxwogE6_$5J3j|RB44TbUjN(%(76npBP=^)6VVe<1ql{wp(oYpELC0*jaW07 zJ7JT1kGDKULw@>OY4Lkp|IzJ+a)pUJP7;adrJ*TcE6JhrfgZ{n!*pICI!1aa--t`A z0R4z2>e^oRk1KO`3KncYi&?yjtY@ve#M)8-UCV-{20)8N7WX~6YY25DbS`O~NgvB% zMu>%NR_0=q(Uegqmt1>I%h~4LuQp3avj)>eOC#hOuG|ZdI6U1n4Dovzems~X`u1U4 z%9e~{oHm(&TN-sV9FN){om{ecmim|Ur+;4GVjXD${S>al;&i2M_m{F2S!9 zWer?La@3;lx@QSTepAdjQ)>8v-L-q(HuJ@6%rX%e>U>J28Om@!o2 zDGBwILl69r!Gmf~W@SICzBufUjUj|FYym=m($^8VEvgsXhD0#shtqtfv%cD(EXWt&JU2`OEifw_SG8Gv6?<>P6%I_$ZS^6VrUsIaoVkfghHv*tK#V03flu z%kg>Z`Igsl=4ST#4bwNEL^bo-XTMzIr1JxP*cfjh9Q_VZK$QIjp#FSa6iMxv6%QCH z2UHE}4PbKO5U%g2Y3q-oXG(4n>!a)A=mE3eQ4>aOe+GD8{~S@*uOe3ZtmP*# z!&2hUX7N)F@asy^@Z)#TXTTlE1IRb8^5IqPq4(ooqt@@JKe4gDvdV|5@t!h2uvCAc z2io<|a!T-u^U&A`;E-#vf6pk{W8?vg)Qpz51Y3&zC%5!>c*D?CZU80-@Ufi4{J89+ z0S5bO^W&goi~*VJPvEHt{h1U0#>6-M{Q+tCb_g|miXmH3sz_)d0j<=UwDA(0Lm3Hr z`@}Eao6PQ9F2p1=~y%$r~#hJlj(#XUFbqM(5v)7e= zV8&s|eY&tjz+~DGUN==Zo;^Tn^47ft!dwJ1 zRrQGLptmf2mo7I${`10GnVSh-VB_a9>eJZHj5hIMD;gAQ*^Zwb=(@1);83tqu;hl9 zDAYy}cfqDsnjf~M)%WfJP1;G1#}wz?mS+h2F17Dcmcc4BZQcV1v#drvKw+PM>sMoY z-eb(sx84S3Mfd7^R2>pL1P<1?Z?)RKk%>P|ion80tc*7!91+x6#jE^?)EM1Ib(>93 z`PPLe=t)-HHW#LO{SoccNh%p0_eO_<{8G>tEG2m6v$$!{lo?%)p+-}n;vztk0;A&{ z8V?)qJ$=>Hduc|*=tHcf7u~)QiV~b`2ZRF_QQvSldhsw7bR%3^+~jA_iZgCs*t!Q- z874k)yJ1{^nPjPTPS&{R_|woYrd$mr&+eZnl${rvLpB0yk8jQg>OIF`V&7Da$v|I0~=HxXr55J_@aOch3;|1!A78dEuGH0J|^0;yK?!J8<`op@I$*qG;n?{rNP!|G%8T(yDrW0S9R=e3c@qBbv-^U>y ziFS3ptUj;El7l+{CoVQH@2m1>5Ycc0rNA#Yg!*m2y=Uea^~|DV5aPio;opuHldEESifCRF{-E$B-CE`hm^LgbOd%`kXv+dhmg{`}mc6>M`{#15z#4*it zXwZy#Bv@5xNMt9dxOX@#GvmtMk8TH?C8!{+-0D<=41?=>gW|d4t!K-5))_w}qg0~s zjH~k)A*lQmME)|<+k@MnumJV_1AvJwo9Ur&}3|-MOZG%Os_z9~Axi znAQ-MY@c5Fkrlf~2L*`|skaxOS*8=d;5d-V8_~(`Ypok@lx==#etUB$g_I3IXuDgW z3y9nyhvRa-8X0}w9W;3Hd_au)mRLPsj=I{@z&JdrK6Qc`^hP z2y@pQba+hB6~Oh5-FAKc*kP{Utp2_R@3Nh;Y83y4t<$G6bq7bL4$%*InRl30k~k1C zcYL*wuY(*ub-cqZhl}W`P%itXcOYD)bJ-EZqINO_G7xSbxWF~j)_J!Y3-x0No`qDH zc$Rw4|E}=%AReozZN+7UKv@D>HajxX_^FZ)MuJxybvH zi`R<{24#za?5E`8_GxTu+$~Tb(7IQ@`R&or^LKBS1%sPwVJEw60OSqO4(BzN7c?;$ z$iW06tVm13*My}iqJLCxdRu0=L*5bIsnfl>H@<$u2TF#YvU3NDT-keb&9|Zk5nSN z-Q%1y!(Q|Z#NZ4CX0`52;~9@hmuKLepb7_Q`LrJ%pnytMNeAIOiw|9-?#Ve;m+nf! z`$YAzyI+50pRCW_&URN}dGE2iH;W=oXD~e~2p2CnrbJ-nQ1G&3MtNC2W1MVrIY~dG zXJgQmjDLIhrev=1!;>FVq&Qr%lh%lDUL~4tuRSyTE#uhXtyLJ3~BG zK8TCXUUWFB=j0x)hBX?xBsJR)l1tVJE6Bf>$NYq6VZVR=vk&37V)_60U6v6;B2xz7 zT}wd_9(3XS+;_Pp*Puh5pO*VlZMW$IvU zn*s*-N_T^}D+L%-N3Qw|%U7%IXvtLU$C@X!XPta3*|H^J%)aBI*N3Yotpc~M30f1o z@yQDiVF|kPEHO3}m;sVsMOujKA!{a%;$nV)Ncq~k?n#0{7c(%i&Yn}z-tC6fheT-L z?8BM+EXZRb^)#usvjRiahq|tn4dmhOgpXKR4wuqN$Kcm~u%qc%5+T1b#{ z<%RDnUnOZwEU$XHg~0LLg6xONw@rCDk-e>l*b1s}>)`U&Ggn&$PkJj&x=QaCzN9Hu zQpjd~_ioIFHD{b+uH55dvVI1wAkY2j3s(UW?P|%&AtZ9l099{b6Y|ArtW%JPDpgZT zh7)F6q~Z{s8QxahN?Y^Rivu^8uX;eRjisR$&#G2~-*{G3$@aZ-H}OP;mtM#RXCdR~Y(3AEU3+tM zKRj=pwWdL2k}#}7(Y$BxWliSN?=)#~G;$HVb@SEdcQXOUPHNupVE1LoObcIF0$pig zrD%l1pF-sChbzeowPDDnO9KiXDb$mti{}IYACi6N$d8(Qk*9g((hjKn6((mlJ|Eko zvgo5Xs%mC|UE;@K4DMP&w{G9Y^lgP&O}QrHI^OV(Of)Dt#>!zFLLRvQOiG)w360qKo0wcQ!ry^oyaZ;P^Dm*p^za$@@udR9#~Qd>j>D8- zItCH5U?xDLU$HhAhU_cS@&!%Cu@7`b;5@G*e8$2?#?b7`h=tQ>b6M|jw8MptOjHx_ zutY3lw&{0v4bV8HCV@tvDMmjE!m!LNfm7xsmLj0rorZ*KEZza(TJIZxf8{*%@&fj; zcO{UI+Z&+Kbr8%4)&14iV=1mT2Ub{!2;1hNqz6v8Yf#m0Vva6zTfh|Sca*Cab03(oXrH@gQh*}DwS*!S1b`pa9qZe&KP}6g9CFtqL7T zV1q9Vni97$PPb1f%I)}Qg)izhlUtzD#y}q1im(L<71UaPh7y^`eG$f% zC%9%trrO@n#9Lpp++f;kTF&bqE`2q0m!^<3%Y~yT6lzTYe)0zF-r}3a70EbMy-hph z)BX*GNr#PI==CE(9+DQtk;_1_X3GbYxjjyz>Z7}qFTq+=#?UF^>TPorhyi*1I}LQExYfE_!!Oc&@qJGBmsEYR;nT*h=y6`g`>8X5-GYI<32qD z8s7NXONx6N0^A#9u5Zls=wY%dFSX+0paLNI=O7$ot5j$kqPb1UkWZ!2&hw<6gCkBS z=%^T-2qa7@ePlaRp>}{H_UOAZ)D1$@-FNTv4KCDkChY6CUs4ODLmP@O*4)V7BOTO+OVUE@4JAZ_ zh0j>kwzUC`jT4$20ryjet`6?!6>#nAUozF*YXc_KnWIR2M9UJ3{SAF^BjT1Zs#d`( z!ibmy6S#{;hp{$T#%>V(yL>?#z)1n}=^El-un0umiV6S#G3SB7SK$h2q=DXyqbV0c zOqNwxzPpbn!V5R&FSmkC<00f^w z3?Nk!(QFbqloFm?3Vs;~?!Gm4<&bUnl4_7|80>-zgyiVI*6FH#9RLuVQXz{2@g z(l`YlHv0T?ass#mG15&)_#deQRb8!wn&Ot8LRDV?D9*G0a%E}NsHLo@fOJ27L=FV| z5}Dinu(B7cQ^_Bn~KYu37{?>InN6Ejojf#`VYT1@oEd{`ei$SWoy2 z!557|xZKMKIP{|9@+>ls`MbTEyKn1^(}1e}{Nhy{{S(1}%otio+r1Q>^RZ4ia?53(SwYp#pd|Oa!}F4w9J(friS(A_2d#EndmB(W)4jQDmifH5 z{Jn{3UqAUp5h*@_vNB;Sk)AYeY;pcW*Q-t2q_c9QlS$jK_nQ()76V%d_2w$Dcp4nH zC`+s?z@n`SO9b@Y4OQH8fd|b(&`L7zLWoo0+{RG$kHZh6&g!-(&ACk`4q>~sYk_v; z0?sZ{?FLfPV9z7fly0rTi&H}_JuZcj4j;AcG6aThNbIRsvzE+uj-C(uZxyKcV_fII z`)98?x+($idccY;C$NIuUfeCc+99=S(rsQ>Uau=m7-{No)xh+Oth0u8V;}f6=%l|} zK5sEHYPYfJDQkNTi!}nW*9JES~qV`tbI)hpjO?^_4E2X=5(>UpP96cwk66o}}_IG?6Fja^y^|h(`WlPvK9Ng{@;E2}XOTS- zG%q|j2nCaCX!y@5JQOl4cAn`wmk$yI3$4-6GDoFPe-^x@AHE z6qRFKi{9zmu+B>@`ZCk8*4S>1aw&@Pl-y9* z=Cb!YDM4QZ(dJpM|^R-xr!<)KL~f*3r5aOZ%g`W0~-L5$S+rJ1-QQ2TLK4AFq1e=<1fX;E1z@k z#e^!n(k{BH?(iJ%Co~;iem?Q+ zJ>$LmhfT+7Sou8YFI(z{REut*5LIBb83xuB$4Lmt1IL)p)i7Sx%xH;rsgPsThSB$_ z@1I3x-~8aARJ9@4I0PkiQ&&9!6mgP=mEA6oIv{cg4kfQ7voU+;9Ys`9b2;R?5f7qf zkDpVES#=qdH-m1NxULa|Ba1dS=;({RmbFoP(0hN?7ff+!n^@erk~Y=u!S_7k_ev71 z&WP3U`P?2-V)lXu3$e5Q7$4l?x?*r19}lA-436rl%ODn1K=e)Q$YXgt{ScrfxXbmz zkTr?Ue+B3REuFLoaF}ynfQYeq_)HfWUb_+aE%X;jIT%=Z$7mvd%)PO}Z zWkmeD@TlgFo{7OQ#&kTnSm@LJ^ivCKj9AC< zbRcQ?g@8H*=pal%7K698hR9jEu}j3X8WDY;LsN^s96z_`YpZcFEQaE3)xt|q_-dPW zwvqESX^B88H-zd97;eBOlyw1Y#g`x)VCP3MU$p=~09@Jk8$5)p|3Krc#-dBb;L{Kv z;|R^ZP5$**q;hwj&f%3-7o1JroRhFxg+AWnmZLPWIP_^h0=(^SKq(#w;Q&Gzo9HRU ztnM5Ne+ry%{0p+Ry}UIqwPN~4eoo|~k6)apVS>{10+1}I=ZL`2$k>cSHk|PG9KwbY zHmYH0d83ky3xjcHDh81>pQVGE1x43U%9qMz{aG$NFj}qw71zKig+P(tAncZ68A=If zbt6T7JseIkW++hblHO`a$81$<7+aN7(Uv_|xg+@A))^k<+*4zC9N2U&ugFsA2p5@I zM398a)xpNaec1$)XwN6;nl*>{`v>m1hRm+VcjvYwUHBM3p_t!ttab0;vpr=Sr!emn z|I-%l|5^L@fAV)M-<$t}Z3o^C&Oxpk4wk`A-T>!kfsq#?ge*lGrXcPJp#cKbn&g9V>t3l^(=m2J+%L!~@zWijhAIiCT9a(1 zB}}tqC&`*bRkc}mx#&;MxMM#z=lm}$@=qqDU*E~zM9dkO9HgQb9dl@|#4@=Zay#U= zx`R|2^Y=U6C4kaH-%<^x3N~6r>B^-lJiqosMsZu=F7;r7$Wm(}aP7P2(EN#$3(wS_ zX#lGypPjjMXO*CVna9dvvcXhqVS@3Qv)>dyYGo^eTpFClpN(1n7gzs-D!AGJhG@Ze zl=KJK6-a7oK)X2ez8bo+z;K7kC!pC-VgPQ@U=lmu@ah~p^%|WIWV&o%r*G&6+Wc7n zQ+2L#vXv|AauK1DPPx`8A$*^Q;nF_a_o=&dSB_NOtU7;&->l}U({2ylv6*TN)ss|P zVn7c4B1~;6YZN{W`zgKfcWr&us20yRe8XY0HeOGLrSM|Xf=^lBr+u>Xe#pi$YG;7g zH2{Af;etV&;6+or_vTM&>!!bqf0~s@`r=O1Y}KgUt-nt5LMoy6XO`F`V1^y zlSa`}HFBn_x~rOLi$zAs?&r^^Xi+*2n7W!1MnmPVU*3p<;@KIS(FjKa-3TfNx75K_ z2@-64X?L4{I*XY_W3P!b4X)gn&EddzX|Hr==k^B${_5Kw4D=c;U!YuiYzEYEyywboqcK9Qqod|%?4nXe=gMXjgHJvwy2~Ww6JdmS1Rxd#+ zTDRS$`awydq1f@rVy>@dJMu@niY@N3WUPd@Lu44sfw}v|Du}v)>OS_mV%d2DnEotM z=|}fDnnJm%SF$UP1Xp>LUZ%G*i@a_yA0ulBt!f*w+$0+!+i23E&!N6ou4HzpLk`(l zoI5mcgbSQ#;(m9ftFt_56W~4DfbYHD+PD8$IEPs57xD+x z_D7+bbJ1<1#epBi)^NLPbIItf4_XgE1N=z!Sgq@#%);P+W?$_{*x)ck6OuY*LT&?_Be0nhv&c%eFC0NL-s)B zih%5#*lzY}9Or9-gl&=tXhL|z%hj#rTW;{Qbs^rFFQ^O^RX=N6U|EK^4DhV2Kd7{f2hQ8_jS%_1+76mD z&FU###a(jGo&M}z9c|O=BdR@R*9x#2A}Am}`0j$rj}d?=Sr@uIg;~sopAJr-H;%uL z&3Mrzd_?Iv-f>U8GwB_-B+5GQ3u4hrsdNR7<4jboV4QEEV~j0w(co( zeW15uO(2ym>C!R=ocuGwRDNR=-8rcUCspwhVV<{MZ(|o|q}-w>FP&Q3tQtTF3~!a?;#%v7B%zsvU_UZ33~P z5cO16P!A(XiiVq9BA0vY7mR%6ve4SxwO|!qSGiT{kVK}|*EyUX@(K4QVA6~()_7!O zvXYurfvhbm3RHLUNU@FW=yX2LeN6ji;Lb?(5q#m+Z$4@uP}LXM+R9ZK!1uKF6Zqc^ zSQj+vNtDnX5~hb_ZF2YDewn5YrN>~cjJ9&ylf$=|wZ+)-#B1rxF7Es1WA6T1F4W(~ zW&PgrSL8bpaAFF%n*h#DdPqI{NP7PI3T}WOVjZBmIX2L}3AHV^F9C5o%}mV)TPh@z ze)9?4ApO5Pw}0h1){(4kN06S-K#L$Z21Jl7FMr+J+7LvC#VDojMj)< z(D~x-{S_Ub-F@i8ia@C|=%ln8RL~JF-2ikD9_ZSXre?b;yN8o|zT4rAw)`QDA|HcX zuAOQYbb7}tc$u`HclEh_|Kl#!zZxc_b_}H_5nSOIH~?&~rv@FQ{lw ziP*%64{~6uDM{Z_y-;<}SYq}oB*%KP=)$eg9dpFwznG8rw=?*!?DK#6Y}UvG!mSGK zoHe1DL_JUHimSu`b>3>LVk8BFhU;6acxyBIN>cTEs;@vc6Wc=gQX0`Np{(}$PfM@+ zq^+DL|qy`?N=~jqx7WW9( zb>=m29X3m~5Q#H5*(HA83D+1mHAKGx#5Ae_saK;KTD7CT|U`N-CuzV3yx+0;cH|7mINN51i?WJ1EouQg6_n? zo@fvrgoOXsBLXHFuxg!@fBPLJu^*V}P{bjinfw0uj5K=B9k_!~0zTMF(BLGNsx7~A z$k87M8{0sPcumHU0E7d$J2d5}8NkUR2hcxSc2*hLMh6QbfsB?PGs4lH{q_#P^!-;` zdV|KiHmnP^i0JCdffqI?6ByrMU6N#!l zvm%xRVoA!_QddWW%H>JFjwi^ld4tKsQ5P|=WE^u_K}8m0cPMjat8QAa+Gk<86HfO;E-vF}i zCXEi4@?lAsAy5+QQj!2B_WBnX%0L=@4$+{)mkBZ1bS&MphK=D$JdQb6gg zt9${$K{u-eJKp^8E)<^_xL4PcoxGa{$14zI2J4%TC+J9s3rU%yivgTh0 z{Edplp`#H$JFte+cK~Tu&63yMUoI#%aU!2dTuQR>+-<>~)d{MfsunlIozcpw-3*Qp z*b-lyHFD}#8oqUnfW_GvyiKP0c+)IjQK7I)mlNGZ*D-{o3^8|}pvUIUw9WV9bps5W zr*{h0b>AJeR}@^;OPIH{Q#fkMB83^RDonq? z;C<)8I5c$Ri2P-(qnl0ZIl?I&H#pYdZCg%R^qij2$jv6-M?e3^0HS~Q|Cfsv&%lLr zc`qkeERCVb40oLO)-5xNQ+{FX^z>N%B50MiPqI5)=~RtQ+5H^0<*v97{lNF+HAt6Y zm@yx!9rEIBMZ}TXgnN-M-8alqXuGFfqzyTx5`^Q$-zy}iUc=fLbMD1G&Mv!ulMwO0 zn%w=*B#5Jlz_tx+I^N8r*Q#cCx|*#(!f|tXnpz6Q+sowiY{+A`a|$87LE1%AF^t@e zes-D7{`~X&K;EWfz1A_4kTpH-`Mry`TXwqy78_CQHL4QexA*JzC;??=w|SpVoOcd3 z_D+93d}SU)Q2w}I9K09lKK*z!Opg?qh2-aRuNrP6Rlm6~L`gK7NEN8`N4Yd(62 z8Ft?J45XfI$T;~MNdO}EFR&u(N2tysr)ca*s>x$Q%V$t= zh@3wF>=I-mK_(X#8PpLNq2Q{+w4R(Q-v9j6wFAI-gDP`)e)0JZEjRfX$8Gkz@7z)0 z%fK{=U=Z%3@F3N?#$-3HU}Um8lW~w5TXbW)j+J~K`(07(z&%q#rePs-sWuy@L1=X?H<#M5VnaWu$6KU6F)_?dt6{&b^P0?nz1d5Hd1B6oktrW%THFno*=T%3#a)GRT&lRKYqsU z=zwf*ua4vOfxUf2Bj@YbnxIkbMKrUZ6cj23LWuPd`Nc5!K%79nQPc-pk1HMpQwNma zT>Dh;;OVYv4cU}%QDwu5=-f4GwdJYF51bkm`mR+v-VAf}6l&b)lN$MMV`f;TENl7N z8|vg>MmNys%rD245m~S{y4|JCiFB#Kh?&V1>+G_%E z&X*TnmJfyOt3=qRcqmQGExC80GY|Csd#xh=S?{|SjlP=!0`v%KURzp`ok-ov^XONw z(4yzN$?4ov9kR*^!a+LM_N!Gs)hBr3SjoT+~dyTi2b|VNM?av)rYw%ty#ZLL-iC zl5x$Et~l1?@E9HBW$*(3MI7`EcTcQRzNY{wmCs3Z-{{?>g=xq^*_*8F`v1}SC(+Hn zAwKqd`%lpMR~Rn*TVoS3fTPFdoRRV+?21e}>+)f6#oV#O+z?0_?*A2ITd%G{?-$In zMqGK*k{7tuF9gzXx&5QS4)$bY!eM8vmOGfLC5ABZ$=E4o475LVF?(Ow{yyfyM2cxH1Z$du;Db4)$f?Hyy=5R$K=xdgc1XmP1V#*jld*L3x=b zT%1~vU2AkPB^H-gu^Hy7_REb_gXyu%Z3p$N||5Gt-cV6yugtiz*x#TYc8klY1KzkW`>75Jiza z3}CqSJ?Fl8(GM+FSX81z30986<0s%N&f88!t_j+RuRz`U>Z>KSJjM*e zUXvCB8)93!*ABaJK^x2Oo=8#L=kB}6wM+d*2R^mYahGjNdL?K2(}E~1aq;c-Ykc=M zwn629#eqhZ+3aQVummB1u88jxiW#ZKp=}dBg{8qaKUGjXTxCm+O;6)7J1;h;bODYS zhLJ1gzb;1Jgmwh;1iltCwL?HCD&K`D6MAv^q9kmDk}hh(5&x`qx8Y)qtz=G~K)=qhT15Z|!ag=vg-w zMW4-kQ@HndN?h#muvkSzc~^;!2m9om*Wi(o1ZXnN7KGKk9h7rB3l|P{rR8-fwnZDx zDRHKoRTQt;YPdcmR5tY!LX(93bd6mz`ptFBztoy%H-0v)8=6hld{8U={P^o{H`d5Fj&UOE^&B0+ zD%VEA;{4g`+s2e5{D;1IpQSxy8V5=BefEKy5Uxcqtx%Ap@ruCfr9)+(3ZjUoyN?WJ zW{u#?&8yj7MULg1YzX4rLOCk;E&tJ>-k7xxs=>>Z2uC&D7AijpPR>(sQm2gRCmG7L z6N6MMzvp-%w@NHJ?^&=WGsh)c=UIf(VBf5XI-j{+!_JzhVeK`Atg|@?=UxI(h|A?j zxsRMqFf-DoHhxD%Ff=UJpD*-ymXDvwW_)FfRj~+WeK%;PYiEPj154Df!%_9U zb;-owRipJI4GA?l9XBD}c#%+xv6>E1-^wKmk1y7sXkAiQRxxCo)jcw}G-XK@t|H|S zt8uvz+;HX>71Now4PTpvYZaX{KOD$!Emdk_4+VWAj2N2*LBV_~NMYLGNlmQ63Lv8H zj_0V2Spj*_WYG3Di(oB@=t)|iqXEbEJrb+|w_C$n0bHHY#FTfMY~xNZ$qMcamUM^zk}di^@t z_cDi`g7y{hJN0qvJxrEm7i)kILNF8T{A~iSyBnpS7@nmjJNjnqoK1#Bj~=ybPOaI= zVM%6RVoTlZeuc1mThFDFM^@bV?#ZMMJrfxnYk9~F3LBkZiMat0(Pt1LA2OOIWbJD$p3KUAVvjgz+fP2c~HW)I-myKt!?zC(`$43 zC9_&vkLOia@}n;9kq92K+jr7=B;)JUa=0moxZuGZMC!4*f|I>;UB>2CL;?AbkQZA( z_C86O1wx!!|J3WR37qje3VZn?tIC$g=?|au-CR|(!~fq?vmXJ;UEU9qqgx&#zoi%X zxN<8oO`W34Z)dU;SQC#;+DYRd47J;Q3A0ZpZlk_}1hnTPOCEl-etBZi13pAo_S%+1 z^TOmfBt9E^14mQ~g%?r8p4CfkhJ7b%b3paTj+&tU-ByzSFOB!p z4t3GA@vUB5uTPekRD4C(?99x5@;?9GVvn}Uo%e^%r=ZVOvd=$3f5jkNUGKGlA#I=n zZ;3sUnzY@&HU)AVNBhkLtRr0q`W+o$r!n3L_eOb2gXW2>AEtHJ#AJ*TBui+zm17Q0 z+io`nxZadhx2YYDNtP{%cU0alg37uKwEPqknR#Ux#*pA^RExX&F z`C7_Iobh@!=_+Y_doMY7<#}A!XYp_7Zr#NL%tzR(I5s;hnq-CJ(inU!f1%B)eNU8B zlu<0@8bkl|N4oeWOQB*!VK-Q*9idp7_r7o=7?o>wrbSr(_%B)GZq52Q93l zz&Qu;6(C|*%(zcLj1zV2CL;`t?l&*&j@5wc3=pagY<*b<`ac~+z%u7ASQcyWFW67) z_O>*byEuxcEa|r7q3pg>4$NP4z7$yBtE-anG3bN)8MF+<7PyB=w0viI6t_F3%k%Qe z)JmH*Hw_h`x){L=tc(E`l7v7J2SiM)Pb#AJ+(U2J^#DB%4G19Q8%DIxKCL7aL{@O=+Vt2>wV;p@vKJo)_P;;cX0^c7^VWu^S+ zip3o*8s{aFHznQJAOX6sAbNZ+<7+@i z-gG=vHHRkb)@rKGhV!i*@KY6=4EfB~R+7O<08u?+;k{{n{p^Fkne zf3HaE&lULz=+*}&0zEw-NNcio|L!c{x}9W8TCIg5qK$z>+ufsxs+K>|VQvE z+`aL?YbE~$hwX2L%l<>*pDiB&E6x0QdTPHW#Mz<&Y$ElY$ z|9*CVJLKOFL}$dm%(t_&g%bRSGH`nVa0v>^6?^*jN4r4g5&^jMLIX+Y?{h!5ChP#U zKg9B^qQFo1wdl7_ffdgFb%4`+=fJBfIE6M+%-`^R0P>#K?0P{?MOsIVOypkWZmPtL zV*0Z0eh1QS(^GYs6(ce zZYwVfh?EQNhQ>Dn_7IMPXwkJKfs+1%zrIv)JKUdB{48?)_3HP!Bave|YgubC+cyv; zH&5ZU=Jiun2(+ZaX-+|6jH3t0#c-(DTyUT|!v681hn!Bc*s$DMirx`Eto}y^4Rcp} zD+0=C7dasDZ_pxkZvz8k$6Nc40sTJkdK@ofx?n(pAJS|KyVIoA z25u2it6wwa^QPVLJA-7szQnYFPj{i1HjPErhQ{8U8v+XbJ~;n)+sgijgVTR09rs`8 zhw#5s?BahjYWgvCtMXjb0s1np2U*U`{W7}Pa+HHIeM$$YZ+@|-b18cMIxFp#H4Eyd8aMVcU2!cq1=zSrcxlFamH8PIJ# z7+=sQoO?*SO+39I0=$~@Wnbz!I&P_QnkYdAud=n{N0sCG$0?be!rN1Hq?x0-Ka~mm zFX;NeV;BElkR|-@Z1(xzwC%t829}lv`hWjSxBpI}@juhN{u=B5z8(HY8Lofn=6_!| z|Li*Wdr4XU>~s2S{Qnr|#otRQ`Aawd7`pjq*Tdgqx&GPb^w;?RG0u(u2#@|tKmQo| z`DfO{KMTJ0&wPyk|2p=+@U6eTpZ=$8|1W&&FWdSbv#kX<6I){^XGaqQo1g#L8Ct@z z0q6lgAGx^!bg}>@0G)ufvx%dvfwKvKPT0iN!pKBXOb|dPZDMQYYz|;zU}FT(iCX-W z0X|v-#e_|a?2JtSyu5I~H|!2tp=PG8G>n_0ln@t(0`T`w`ko0;1Q&AL?%A`0-!k`jmd9Rp)p%AG z4oI5Wy`lwXDI9+TsT}6p?U9_cw7x=aXi0ytX@3m+U3kaO%R@u-AbvtT0b;&qcHqI- zE7fGy(dVydif9pzkRX6~c}|hE_d}ugFd*)q`Kd<0{iV;znJx<;LJ`e}^!-uji0l_Z zDoO3Xg7AKf3y;1HV*Ca|Iq}0=0~dtKCeCaRe)1z?vhIfyGQk=M5OO@js^epPqP}4i zEkT@jeZfAmUy)rWeY?DUzgEle^4VY`Z-cSq8TmO>GDq@BG2Hv{eyQEA){V&J@SdD9 z>7J;b^LJ^@Oh=Q{=j`K2NPx`S#Y(76I0+AhiLzED1&wDPzCtSbhXeEvDQ_HM z@D73zrY)G9>Q`2sKD@fv67+&Wa{e4B5Yr60m|Tz=xIWkBbOvE=dMImeffmB<_r}JT z=ES~ALt>Gn0v#y5+SO0W8i05%BE5uc_;_MBYpa$tvHc!S{jkePqadQto9+xqZ;>Z@ zta2IB4WwC!5v(0qQp2E2^KAKTLJ)@0w_ZdEeW*X)xbjL~dBf_-V--o%qMt!+I~Xab zqIBa49VcZHG8IP-?hPBl5)6bFNnR0$#fp-ki|-z!VQ2=E^A!!w0;wh<^b zWYx=yhs_Z{UI3sQch}$>qE6#nVj{l;N4dLmZ)YRh!0ESXRHI(KW)m8Ukv7w>RSW3+ zh!bOV63@lwzomdq1RqvyqCE}qS~oa90M7&;>s|@H#Ez%nA*oNyiL;I86u&DBhd1Gy zp~QFYih{DF47_>H!))THJBWJR%=yk)7iG24(uL_3=-br9c72SX(bG?AT7#y}qi8W1rfgLjKr@qq%ZGSXe$lyWT(gW^`K(kI%d z@p%sPy~n3?4|_iFPW%Jvbaon^*l!_v5U80HsPoYVT=QJ)V89EU!Qg_Wl)q|3JJvWD~L8HdECe)^znwUSm5g$>HES15O94k zY6D)RE=YF}LKF}|euxi{;rdj|RR{LDBV7pe(9{t9AJNMpj`%s|!5V^A^aEC zKRt+AP%?f-oZ|~(i6|2BU|j8xNx|FZFlXYGA+2quh&*Ry@ENgh6E&33JD>8*sW~BK zgo^SZ%~4zMSNv7;ye2rWsXZ~;LtH;redk5c|C)wr8oXeLKZ+dLl~AoL9r~?TvYN3R zuB;z-gLAEmvzp)-O?E(o9j`Hjiyb?*7xLcOo1g=MZqRMZ#hu9){H)LG;*1nZBGy>Q zk>WxeO~Rr8r@&AV$AaR3@_^erwMG1F*!T+3^Nr(`+;e2!@R>fOIS*1#)7D~e8q-2C-{3NjCgd|eQ5J?kBuO!)bI%=Mk z*CC~nEcq=;B*lV<)Q50~NR}iwxb=|_A%&u~A3Y`wEpX~W>JaNR7d;k*7P(AJOpHv} zb|_71F2QyXzd|R`y>p}T`&2enao~GB?9AcA;e+*z@J#UhHmIB4?4CXW+aqcx%5s2X zz-T}WBPAjmPEMqlQSYCIhG-Z z3^QD%K9&_~>5 zQbFYgdEHY(-zjJ2izZ4Z%1SB^3UI18lxhX93QWT=3r#Bfr@l^V=DCO;HO%fQJmfd! z3+JcAl)iQGnT9@@Sma$KK6!KUyx3UBcU0{H=FI4#dN6&&J=yP4;gaucwr@MjQ?D&xMys>#)3>6X$2YO zEEbFhEG*23Y}rPdMxJZHBgr7ere)_kaU9ef;t}}6@XGTF3v&^ZI}$7sH!>^*LCS9A zAVnpGtzt+Ozf8JJM%8AvxW3k<#UN{JAQvC>|9tEmxb76~j2E-W(! zVUU0DM+{<&U6ys0uxw`bDQ9g~Z#EVe8;75hgG+q#bc=IqMhkhXYRl?|xjiw*2z%LLcXSI>4xUSQbPg)UQsP66>!y8<2`nRZhAG3_zP8w# zXdTo?z!;+}!5sQ{R4%^W+gGA)J_8rQ!M+-_J=wO5;!H|eYw=H{{S+tChneW~7Apmn z!()k&6l*z#3?{otBT6nsrZPMn(HzlyC-#i*IEXytF4cS?IwRInd!$OF4>0awXW?~W zeS)n*R*2;$EGMp%$ERmX=q9BiAwes$tR;Cjz!z7+ZJ6x-jU0SFOh9b zYwtUWU$>&}al*_>za@=Yrgck?Ft ziSk6r%M@hM z!Icg#v&-z5)}B_nQ9M!5PrRL9xpkZzsZ^U()ANjYReUVFtlv2z@q2t) zndGWzJc>S+d0jF&b6!1p7(B==>=^18JgZ&v`yqd+u(x$8mnDbAN8#=JVltribq;Z@ zyo1FT`T3=queDR}Yw87t87Knp#I!QV>M59iJPb$~Vh$lW;_@r{o1w033`65;)N48S z^%!yKE+&ZLkiENN0{50?@UDQjAZbF6qr)J=j36Iez>D*@?3IeUxav1`B@l+rk!oCe z8k3Hbx=Z|ypiZ;ma3db~2u!#3_l5VLv|i~*zIuoLZ5+e;dmN($ppy_15;Sl!F$VmM zZ4?1Ie}p<5e+qT}UI_C?ki*DKPycV?nd~$vJMCuFkh>?0Bs1m8(_xpabX;TrLv&4QEP}M1a>`Yv^2xvk}*#T`0tDRImN! zu@)G6yY^bwIq$a+o7(~Xc)rqdd-m8n@*d%RNBNJ|8NS`M6)mhrpS6jSJhFRFYqACP z2KB(m+m{L7w$8g_r`P@`%eB?Et-U2SzP8%e>N~$izg9mD6_}W}j=rMl&98!3K!me+cWb(QF=7I>brz~Z1xy+YxFvL zCelizF_9|2>fNbn*4AzDnDV~vVrEq?~b%rA~+Z;u7!FNyyUA7aj1`b0+Ag?pqal`%d|@ZKaU%ri8b-q4GL85w4K1}I+i6?GKU1?;3ll^FUJ0l@+kqNGe zSjI4KXHy_N{qSWb3;V~SN|nj(#Oi`sGxEcYxw$XT~dBmdYi?#g12IfXlB zyeB&xikmDOz3?@5J_bD%PSEsabkrTi(qcb*f!1^il zUC#*LM$HIu+i*ZD?w`LcBRRZ<@SZElpl*zQ9*j!tSL#hf(@rLDY)pY0|G~dO-Oo)U zLkA*#vWe4`brd6y=7Iu4ziuwfO|uLNg^P$nr6@!C5qIN*NhqV>fCzflu6@*b53CVn zAU}NV-MYQKQ^AkWSb-%6dWyyeny>b1N2lTy*}AFUl|B1V@Td$zbXo%4d)?_NmP5r8 zC4zQE1gDGrO^7`;w!z87TM-XGQ9^cj9=m1*(I$NAdpi6Ct4=g)A#z~3qGJbp@Da>r ze@xH$#PSUXUBG8L3U7H6t4-mg`wvXTEQlfP^R+)<*6_ohK|_kM2~GG9*bf9=K}JQM1gNS za;&>~ikb7eLS}gl@#IP*u>`vH=0P>S{|GaJY86IQ)`o;PFjPI!m#4q5+z7mk_;ETn z7Ma44@JnLh=qOY>+L6p&-&oaWjH#E$a+A(!J&DP;`f|@|=`$E^?c>SY@Ci3GO%NPl zerLrIFCTAKl3^-2=E`p5mBwWJZNYL;vj2*)Rd560JPr?pnTfb5dg`JR57XHb2N@Rj-Rz_G zfOLqCqNhUyhq8)=Ofm=wH!@z}VW##txhU{Lp2i*ZC5zi4zw-klo8&4|3;1FiLA-L5 zX*d*4UF6y$PE+_L?Fd9YfGUCn@cgNN1>3WKMSg#y#Hs~fkVsH8cwN}p*(JJ% z3=I2MFhPWXlsP;BlHJ9R4(+E2s*CS~D=Rf#`r8of+*WZ($b*{1QJ+oG*l0}rDg_Od zhYRtJ6y@JX1r#k<8Jj|VeZd{ z*28$8XS814*|8qnXG1B**^kBMu4rQ?>ZWEzftO=3rd1Q%-2!D&N{bSL9&D-!nduTs zA}kS2E-jrn{ehV*Efs&%m*N=qBc%}9kOD>au$ZQSlzk(5)`aCZ+Y@+}OpN8V{YMpI z14MGT*1C1=E1x&2{MZ7wElPRfLzDR_8wzD40Bqn1Rgs`DVXd?cI>eQfz9rAk{=-3g<~B_3Jt)a zvn#H!TTwwfq@{LC$)wEOs&x@vqHf@tTAqhy3IjM%Yt00FmTGP~)QH;`0``cm_rly^ zfpy(V@ch$rf));#*ui&j@i*Fqt&zDQ`6(H^H~R3?GCsq;xlv5rKTF<3hJph7GKk+B zLTP{*E&i!dI2i`i_EYn8>0wAj)m$0EXIQ4`Y_m8?NrZO+6PV&(6I#xvkN4>!9%%F( z@q#Z%!@2I>eA8yxE=){+F8gNb1-q?XQa8yV`>yaZf8}0~K1|1GCZR?tpluz_rKEVH zt^&nqk^|@9o2z|qu2$O%W1#!0ayhGs<3K-Dd=b;?M>#z0n(T}>AJ1Q^vd=al`FXSG z1zZ4o?D@76$qg%g%TsJZ^2E-#4w`o65;8%YVl6oQurdOW=LTi`hi2l%2+qRSk^;z_ zQS%k2(GY!VhD^AMF5eMB)J6cHJ^3d?IhgVS;i+~o*^Pqc0`AilK?tfR*9pxs_E+MU zocdN!8-Nv2=0Y=gi#)E+_T(oXAA*COl9j(r9NC9$Pr10=?!AJQST}Y3tuM#^Cr3@$ z!ybr4%Nv;ex_30Obp|m0a_i_6O`Pmp9F0tz01WiMy&`01>kJfd0{ry!6ajQHCdL*9 zf_Clzt)Bv{tgHYwCPp1NIz>C@pZNAKC+~L{{8tly;NbF(c1B7j&H$h_J*d4Xs+u=iiDD=RYs7?^&(4o9b~%nV>) z`9lT>lK;`)AO7lp4R>c`W%w`P?%64B%1R5EVdL#=?cd)J1IVs%Jz)WpT;doI3gJN5 z9V|4M+*KG-6dm=Qs?Ml)K(~Uh$j(ZX73^6B%hhVe^PtAs z!d~Otg4vnRTnmW&Qy@f-vpT3hbDlc-=~1`#8?=$WJ1}e%P$YHGt_@RD2(f!%u<56E zp7iu$@qyagQ(AxH)+Oprd2*-OSHE~-HX%|d5IB(;R&HN%tR1iG1zzHc2)OC{gV!_^qWX=BcbYL#EjAlt}~g2>ZOBU?Ai_l7oU zSC+^C*}0>fn!3-D2BAk!*Bjj<&$A9^6hX&14Zj2ffl2D)Xmdo0{J~+K1L9t2wjBWx zE@4=RI+td#vp;0|a8axD!;s;9Qm@R$2Y2c5>pFxOdMy7cyA%;nG7Xz_^Z1hr{F4)n z*=YIiN(qmSA|R!X8$aKnyl!=ISbQ7qrOwLj!nz3AS5eqcf(;HgZw=uNP>-WE zh70za+3>?=rGUoq&9*`}chLj!3E_}$l7Jqv;{g#O0VlQP^91ub(n4W?8|r8?uZXX5 zie>bC*?S;_*N(8=kQN|T+Py`HHFhNXM2(Zb7Hr)O_9hP6`6C>204+TjwLT)gKa)N9 zDgY$dUmOKI1)NqK%z_`{ivaID2%G@JJg94sr9H@WfaiAz*+5o%C~XL}E<}5H1b;Dp zxX^AXdeGGX3z)!j6pT*-rE!=A$mxQ!D0rGM9zp{0?C9YALg8^J3H(34lNV!C1(oIr z$@5&3IODd!=mb4{g#Hd-hI$6|2}Vr=n=(M02WjwMsOG|l5$hRQ7k0w(2cZtp>q&&+AA$j&5LJi4yAT$PQzr#QC9;YqE&wYL$%sQLz-x{ck9XJx zhYHIxNW;b)z}X7Zh}tkzHJCM&q@PJMN>i1_+GRCGVG8&W_}Or#rcj-toX-`O6*V)O zrcYHrR$r?+wFZ3N;Ft>^X|M}t6W`vq30s@dmZcJ+5qU1E)&Hm$afAHAy^U%W{VWi< z+v(iUo2UcM58sbqEtYVglOhgQ6T&JGnJ5K7YE7z)f(x?;^Bj!Y!>kZcEayxdisI3y zZTPMxW?rC80-szq5nUpb6iboxU3y)BvV^q;!7#%x z+%Ra9rC))pEg>MG@r_&w|3UR6*=HhL!Ku$4lTCFpb)1VPi}Y{B*a{w}5)b;X3HRHt zVNJ+Pw00oLD>KZPAuw}g8( zJy_oK9`YO};%4A>;Nsw_;HEMUu;66aW(;TSWb82y)|qR<*F$LTXztbP(kG{%jw+jy z)kbpURaBl7(M<3btd#Lo7!}v5o8`;aj-lx!&}y*>G-@|0xe3Zi(W%qvRVke1@haq2 z@LoW$m=x7G05LAA}Nh8o1n_q%;y?lH)?Q*xZvH=BCih5 zmPn9@9+pv>pI%kgZyB!}N3hy8Cuc5WX&wAjy{w^8E;{>>qnCTgJKrPo$9s65NcG5U z%oR*M>Jw@unj>mLjdvQ5>Na(U)epUkeHm6dMm~d~mRm-(V}&cb^%W&FYp$IqK3o|( z)hn2-vaRE{NVmj~wZYBQj1rsYD~B@JhMD2;^pKG=Z)q?>u~KT^P#&pcm{iR zeOS4gJKuYldz6D(g;0g&g=d6)1L*?M+YQ>?8c0YtsYj|kjR_|BB;*>x z4amO-s#OPJZmsq~h-D?gl|)C?YJ>*D4-6GznxWqcNp0-9(4Q`bB2MWJJ6~ zQbnXv)u}aVw?(5gqX30Gn1py3UZk#$C3ll6Qej)in{ZpFv`pG5wa1H&^*bdawGilM|?4cln}s8L(ZF6ql6$j;Um#Ok2}pUZiitqewH!f;8Z3Ex)XqiJ9bnRTxFxz245)wjFsp zd^P;&%VNsn=3>_2`V#uo&2nmmR1q>;slC)J>!#D)vT@UK6l(*p5nnhT5gmy(!CSM9 zSe;h*s8HRZZ(e)0J0RFBsIbvaGp2=P-EWOyeX?R&TVc7BN~}9^V>q%Iu_;HkoN%jlG-G%R!>pB%XqnDp98(YmEOOIBHm%@iFL90uUVX6&{fdNrP8FAgrvb*|a?O@T&(_e1j%pyRjs4c*CG zRS4Mh8!T^>_l_TZI&A4Q^V9PaJ`ul&9~TP|`#jha>oUVNJ;zqemc-fLvjnXjfry30HVKC90Y&?U&ae50Q1&*slY$6KE$ z;N+-sO8F|DXYS`rOWLoGKhEio*M2aI4Q&#ldrkgPCD#i%UNZh(v`+L;tU_!&>POUy zXU%p0`AAgi)K*C6yO)N$hiB$eUM9{PUezE9a$p~N_~FM?rJJ3$<6bPjVm@9YQfkn%AcB1 z1C{|Q0KMyi*pF}RU4jqUir3rTZfx6l8P&w{UZtYLm;DYmzLsb|-^hvNH_M%`8!v}1 zJG{OR?^?ogvl{*Qc$(C&*L2%P9t#!t@QuaI4m88?M6IC0~R8tlzN_v2C=*C7C1GVhW6dMq7e^1cB1$pM*4}e3{RUpIW*2(1KRfd;rn>fni^ALg^|ix#D7o ztJU^-CI4{|nr?8tb!5Ju&}yM+eEERFqo`3mT^q+p(+7M(OJ}p;Xjo7XoHQ#;Gp~>3 zbd-=OhV>SkW6coycr0B?Twmq~$_(9>PW4Z9f*bm&u|ANfkhH6R6mW<)#&Bb0AOWe1rT zRVP`8#6&ciptY*}CW5>Po0>JOxdBCAG=fvdi3;&IW(O55t zwL6Um-LoUc;`z&b9haelPS9HF*i^ntwQZ=tdxUqsvW8Au2f-5N1DdFUr81PPreSgA zHn;p2?pzvfW9TF!x_bvdFzR>VcBA%=rpEM|I3WW^F?}ET?d@LcivX2 zS#ESGSeo2e9}_x2@^MZNti>Pf0r7vk39_C4ECDa)C=tTYm(}?Q1a< zpsmCs3eFLp?(!NAWAQjnfmmWrRSYd!NA5pG$I4cfgrx+nXkZvHA~U(Kzs?an_Ao}+ z0+j@OU|y&A*p|`A-TYp8fmPhv=!?S61a6<28_fiJO`tx*Zctf&K7yy&A{i6Xr}A2x zKG!;^lW*Z!P_t2VHLSb>dlxqhFXp-Cmx}wPaY;=tZ%kIbA9s5Pu>^dpOlT-qWTVBb zP*w<*B66c4mQGV;qh;olC*E3GYwxPd#=n^3CBLl@a2_1>(W++adA`6XzEJN@bEu<( zdeXl*H;j7uygkG?dmV*O(eH@Sf_WyHW!|&0&vv}k`Vg}Udm>f0Zz=RBLJxMzA%HR` ztPk6UH9{#=>(JYUO{7)47fYH=SZ|O-p2;A0 zf0LagYjeI_mpv*0TFN95IrT)OVokZ|btL(q-8XOQ%FT0Uzw6WHcuhVp&B4`J@fAG!d1&ZEj+!j26YWtM2}phg)NoK zbn~Rz`)UQyQ+&_Z^Fr|RoG4%$&>E8{H6GP|gOk0m^@F~%XYSySv!Z{RVM{ngb~7k} zS1Hq`fcuDuL(bk~_+C2~YE8RdxszdCV1UBs>cyBEiq;$g&{a`ent~nx9$%`mqK=ut z!S`9^yg*&elNoZr!EQ&T1pc5cm=oi&@0(4s^F~%TA#E@L;Q0&x_YWdh2S3WW#EMy` z2U8v#aku(E#IKX|*qVdyO9fJdih!I(bizEMmzezSZl_RO9{Z_? zm$5WhpWM@_SCQzGzp_D5fP`T{p~L&6qKE0ZXGh#j9(c&@Q_`Mvl9kWx`ULj8T0uBW zY>H-NNOeQwXxt7(3WV)g3$;7mL((yFHnoF83%J~Fqru?ca>56;-q#JlA*Fki@4sOp zulzASG>`*F!>Q;wTemb7lkn`~ttTBC=9@cHH>29uuoJ+4SjK}DUQ*~4c;qXve zIbK61j?yA3Wa2A-fOFbBLu3>yR~Q;w2d?+-W6p`%G-35YGW7$8+bbWA z@b2PajF?=C363y%0fwMhfqM!JD$cDrBo0_6spD?j)tI1SULWzHB5VY;@w@N2fpX}G zs%LP`X&msJ0y`g0Bw2YZuoberK79=n%afi|tk{Z2+TLkMjHO4vO$g@B6@@7S#D*t& zqbXK9WyI4t$Ek_i*`E;?CaC(1wZp<1A@t9JUE)6lx|7Zcw3{1U+nMdz4} zLo9G|a9e;P1)l;uMehk+tub8@a2os>5QK;8Ht*Hr zsYw!YRPJL8n@b^b>_;RVDbey+6s-`FeCYkOu`jU2G(x9EH&`DG2q4QZE z@5HkhzCy!>D|@9MeNyd{7^t=|&NgxNHBtF;aXEiThYUa5W%VX(>BJ@agL13;kGN$B zp5+Kpe5ROdVGQQpPmFX_qI~?6M04aA*5Q@}Z}Xp`vJ#b3j7TCV69HR&Xy+kf=yB^) z7cQe3vQ98D+lehXydwh>ff(gUmKgZYEdu_4%qcYLEe@4Z3z}_LRl-Z7fr$YEl07Zs z6qW2f>89o53sU%FJq_*(BS_#|tX@^#Wy%_re627eO)#gsv7$A8xTNYRLJBX83=K-`qvRgG^dR^0&#;yZc@& zgkkMl1uOm*HD;M^h19PMkk_|X#%y+7l%PfGp&ScW5-a+R61Gb3fx7m=(*V~H*BKSl z2b4jZEU=yw#ssu#jmyAOIR^cMS-0*6&YEU@@@modMR76&XRY_M_po5{9ekh$9^-X|0jC)Ht@M2=-{T=*;JtNZz^!sb6bH z*SdG)>35>obg2}$Ex=8m?rTJS3`P`=*wS%(&&R|4Bm)#1!3`YoS=2BtG&kAiuY;10 z@7-=o>t9vO{E;FrFY@en78E+V83P@{-QCqV$C62oNus_x>JxPnpsLW!1(vWVK**-( zqB4RaxWLcw0kFt?-YCa;EI7dB;d#)`<&1+{qWEuX43uOs6|z@uqgh7d@(m047b>@o z+WYy-$``+JNXPT%(TermnxK*QkYof6DP4OOcV>LQ(u^k9F*`b(9t^-z_kUSvqCb%iaKA_g26`qYAoIWoWDoux>+t84l>cYe zffc~`i;einI<$j${wY5;cy zNLd&=0r`;M*p8pkpCW+aS9%Sc&Y#<7{+kYj44e(D?aY3oaGZYUZvJ&dzj%^=JEUJc z3Z0URq4O{5N7>QEE~wShI!j6c2Z{q0-v3`w1~ezsU>CLssgTrDeN z^t0{~B$B9sL;pyvAVF+<#*}0X^Zdon1aN*5 z_OOxQh>kqN2wjyOg4k2krcKD3c`e_?#U5B1E=?j1zj^?q7gGtoF-Sa*j}vAmFNc)~ z=G+K~>-)jbD%L}8l)X$L_@Xqf2P z0E|F00`iFTtU#le09vetk$|n4^-qqFPQc0NC+o=0!SHiO%x_O<7=e%oosfaOxQT_C zxicIq3-C=PXA>LMpP!I6F)+5UH3KjMIXm^=BmgWwpU4=v|FW##y8Fc#0?`u_MxZJE zI;H+(OMm`8`puRY01S+59KddWH!TiE;OYK<-HY?t6V^RxX6Ci|&c|$I)R;VRG(kGS zgCbD^ERnu94gyT+GL)KH0M;u|^i5AGF>I!xfFKPqXo%*~EM_$(TeUR#$MD4r&D0i* zr#jVVW%-)QI^~H)u=Lln4m9lt@9{gom+@Dhy)36Qj=j^poYuBl*P9kH<3ig6%E_89 z?TK|1uPSi~F*jDp%F7+GL+87bV1%22<%Q^s*wRIKF?~ z(J$C6$FH;h@wI-gVDpCqYNw;hU^TbizFCEJr)iV7C%nT>>20!<;zPb3M6`3IE+e(s zH{phdq+`Es1V%I!lsRYig?G|dADqvIb1m_S<}P&DxF5Gp;B9K?q8CJ?pUdv9a?k_v z!=MS+TW_Caxp1aXS>nHAAs2V^Da&1*IJ=&7#^zsOk-p<_hBlA6*2ra^RMXi?l2y#S zW;jXa{kR2^e!M=9S_!|qH zTM0WQ{4R;))#LZn5vM+ao~W05pZCmbQjVL0Sx6!fa+3*&pnnvEeJ1HmAn`fCQ7F|Q zSqUbk#Gt-=m?Eo6w-%&LaNv_XYI-Uv1lJHKx^RqcaGlbDlIvt-$a zj3ypTeixyzlynYV$FMlPHx_d$lxizDXD2vr1NmG#!kyPZFjq%0L-)P7_sA zV^b)r)F|;K6J4pVAR|M%MP<^|B9-(V76mX~e)9(rGd51bl&zN5BRyiUv9 z=Den}{Wn1mMh|EYQ=ZwMV)s>i5<6tpO3r41zf+m6M{aoTnaxwnHYvxZ{rC)JyX3WF zX8Mno4U&=HrNJ7&IYC=N86VtYrfEE&&ii}32VQ3ECb4-6n zDhJ5`Nf_V?Kq-g152R+KwgaC@jLU_xr^${h0SV7lT8e}gtGeb{brT!5v=25 zRbQjtD$DctYl=d`Y+Aak4$Ft#gGqG+9X(?7aLYOX<$k=dtE_W*g((QAlrp#~sIf21{2(FN9g#iZCmjC}khhB5!Pl(%L|Ba7Yw+JK^0r_u&W6dDpY#o% z(r{R&)*Z3HZox>meNYaNEf-6^6Gan3^X6TcfxtiefGCUvtrf(tuVa4j1bGCR@elEr z^^b{>ysBXz#o_>e1j+Un0@2X)ck5uz;Ql9$l4- zNM@KQFh*hM#s8B1;d%Ow{Sj*_h51(H(|JMF!0=dg31bTTZ9n}pbfzRU3Ljz%!nJNh z4Pd_tt%gSMm7t&#{z6Y7K%t;DGo$X>Ye6~sb!5Z2Q#^@Qu!K)8$(FywP2nBdVk3p5 zT;SuB@PTWb)|=02fya^%B9L@)eWDMRjPs0C+k%Zjf!fo?5BA|oaaYM7hb$rqO%D%{kJaIQa-3Pih0{*G>O-KIcXX^XvGr534Z#k^nS`XMY zeTz=iKnlr64zA535sJaQk%it4q0b}U$dJqjFCYcjhwU|tYvbVX-fmE+a`z2MytJhP z?T#t^NWnTZL?GqYVRL>UpV9 zES0++nClNTZRw{1A6izy0D0G>nN&&oymFS3Bg|y;?x{m%5-7zN-c-|9eUe!n0Nd~KDzKcN=%wuC$MI*dd#sA(1ULF$M0|KcLsfsl9 zRl-CeVKM*%;mDrExnnN+wKJY9@rBS)-{-`R=CoMvO|c#PG#zMNzgxN9X}BAmV$y2A z9c;jx)QCk)7iz3JABm@mu7sBRP5$i3rOMBb$PQsBjnIdlCHcOxPIh4}1+){D4Rg?o zN16|(E80u5ORUtF+HBotK1nNl)L(+KQPYHpE~l%CA9ou~zh%^e%~rsJR_Ra#!{|>w zn6VAunbMla3~tsuG%c#m_4E~2;Q3l+syw3AqTe`D%|w?k%@0BkMUVF}pAV!H%$@KYlK=lOc8@`}bX|ggZ|Rn8+qP}n zwr#uWmTlX%ZQHhO&3(V=p6Q96j_!yZk&!ER?EJG%oW!$M7GL=LLbQU;lCmkJDXHA3 z%acl2@kaw4#^rm*g`c+i%sv;9b9nNHFY9LJ*QJ+&-U~gYHez71Y)9gN_~6`<>>~E| z0C12~2scg2o8C26<}u@kY!e7ALo|{!CI`dm<#()9g9qp0?d#!%bT74|-tC31raK2! z_xkzqi67L!kIDU>hv60YrCQm6PM787rN=0< zL|#=4dkQQw74-^>4!$&9w+!2^F%RM>uJ7FXNcmAd}nTB76Hy=~}02umSUZKhcmt0AR zH#edTdmeWllPV!9dC|z;LE&D;?xtYI{SPl8x?BvLbWyMq;#SnQTa=6erJf1$Wy
    }#o?^zQ+CmC3M@6h#ggkRR$0mhFH~7lR$? zEb}Pmc^Ue&EwenAt{-RM9)FpK?Sd&9fPv4VJBWlXc*VxJ&9Ema=cvz2w#iJL>zeBR z5r~_l8*$BXQ>Q=ef#%)zc+B0(tEfWtqW^;{>o#{Qga?q0Msh+s**1AQcKgR8Et&+B zB2+{qZ8oQ~&*5A0!wuA&-1IE5U<>q@&*{Kn=i%=~UNK`P2Cb@4lj-h?kDaq?$xto zRckb>HwruzZkAFmPR?mpcJg=&f5d#Jb6J`6JKE=N%kj;j`VF}wTyXnReLjMoMAtC1 zlf9sMquux7IGi_DwH{NmXX2_8ZIZtwTN1O9k)BB|;#9;`iEiRBURUNRSYmklAc%5o zxT+mo6ymXWZ|)gC1=q%oZ)4QMJ`tb6bcvT7HtBmyx(PUsE}U`qSaxZdvTY3lKg~C_ zv~xbGqgJl;N1Q!hCkjVN-AJ%UDz}GiMe}*@ki9^eCyv@18Ueq){jM<{Uxa)JG0D3q zZt`FYfBgWKRBY@r>liE?3izPc7=Z5D&op>(47Hagd_^ifqj_dL8OnO~IN@eYQ=?U} zwba+OTDgngH>egdS-?9R32%<2znb%?dT9AdmG{=2pnaigPY|K_Uun68oKj5sX}Z&U zX>fOrQBq0Zv}1pNPtG3n8(!$FT=`{fQ76q)h09U;`VSl0c=@%Y+4N#>71Z`&lK0=_ zMvd7tqxt+0yvoY3waBNY?NM))ZkSY}(&+rsZG{u|h5q^B>hvM}z|@{Xnylofhzm9< z(Sn^>{Qc7bbYoy4e2MB5@%`r-IlXUFsGgw_ROK5*JB0bfJ26L)e$f_Si#jEL8G_3Z zRpHaX45G})Gr@P5+7jGG=O!p8?e|Sn{C9{?1_AVZYF>o9Z9V__AcS+K3CWG*yV~D+ zC>`*b54J1T(_PPYa@`i(<_!z@GhynTe-kBhSt1N-5Ehtz?^~)CDP#Kpj#^Hi?vFP>l6N zH+fAh8VIR01(JP#7rOPH0ka57J!UUm(&|4>S8(34iowV8KeI}T>F2EJlhuP{Dk|lR zpoy=N&HzO@)TwKcx6;o3D8xPzkh+Dzu9o_g{XV=1WKY>g&JvkTNV_DEOH%0Rh5Yz3zJ~!@ZlXmSzVLdapnNm)wnwoV`jpL2kids?`|%?#N(onDs6SK zY}41YbWff>G{+?lDw_#w$z%*xu;!IYRVYcSYoah44HA*<8$(>kx{3n1IX|u%OR0_2 zvD7ww%pJ8XL~)+(BH_lXB06fg&)!(VC+m23p0&$V&--BH;m5hTHxggF)24RBFP zG&EvGGeI%eJuu{MWg-;|*t&<_mXnX!Ho^hz`@t-%C7NiQKf#-|s|Th1#r==CQ(E-( z96a`!ECYGt5(;35WaVVK<{3-acVkpNfvB&X+q%>j87&2Mo6C)Sbrn?BEoa1>Ep6Ma z+4SmsSZjf`v*XS2rL}`FOuZLsq!#JvkV8XS=ksz&rT`kvGx;Qk@8bNr&+Cno=U z55Pw_tta6vyHPvEGX17QMU09a?h|OmHmCqTw`b3ewgOu6ThF7xE)@ufSGyKpt|#9b zq8Q^GZKl#Xxf2Wh58R7{hDz;@B&yb^tWY{N0WEv!%B8P;Pahc2GMkk7pN);;LOaRy z9bT@kZ`-i%F7VapAYrbiTkWv<*_UP~>S)&O!PB>2AcQ1!T75$PSyTm)xJ z1$9HSJDAN7^Ug#Bw@41Vu!_4&)JHZqhwu`MP86lJoNHR^!#Tr#1TfvK3bpD_ zo~T7`Nkw3^d%G@iCD_sgoBXp*h!b1^{eGS>s#Wq6zju2dI9E@EWjk!Q--APl12_e- zq=kqaDcu{Z=IxPe6GPWzs$Tj2%felm+{3=43b&*Pg$W;TH!zZkCUE|Qk@Zm?TZplQ zrYr$cmk=)K?gGy>1}BE78#9A69JmD*L02b!BBGz@-;sE5>3ku}hs+p*p>#dFXrGxMk2-?YTNCPdTP72jf**nfrIR|t6Yp=7 zp!S*`wd=KOy<>W;lXPOE7c~|M$t&;jppt^!^>pz%X^wJt z<=Y23W1Jmj`3(lT!Akrt-XrduSjzg31QT{jYk%K(Oy<(OX{6T0Y0y`)W6EG1L_sZLP@?#>MVWlV>8vWi=a|q?(N(VDtXS>n*P5 z#Yb=Qjmb>~7_=Z{x0;{o&04E+7FJWg5%=ZwXw4lqf?pUg5x-Sm^KZ&0HmEquq;JxN zo1&g1+I5U%PbEn-)mPnP7?zYUex%2BaCbBuWVOD= zjia3Xe$Xpo8cMx+@aQJ9d+oIG&E1TkF0YR9M2BJVu3Z&ra(UD)vWY3-(^a4-NWo@Am_LKp5Kz(U8A_0>DzXv)uOptvUux)` z!33r_#hg_lFP7UR2LB;0k6Ker)lv-eX{oV2LZ6L3J)Hk$(xG)Q)iiMcnBsGq55NuG z!<&zsrkpI4Tq!!NeKS4$lO9TcI%EerYiHSc19dBk)6$E%p_Tb#zq68=v>8_WYd`BD znZ)P0zY)4(*x1B_cnew3@p($?WnR$N*TL-8RG8o8VBW%4Uf$jANbY0s?!@O~rd#(% zf5>YF%v29*INQ?D7kYR*0NbO6cAbH{_ZPdLy8BHReDN$Gw+Gh<7-j)xq4QWAor9=8%qKVw4&vPqC+~Y@=U8Gfe>9 z-MYJ>3d7|>&bwsb&tF84&u+c4>E`O(rkqw zEfn=gmq6rF_bR9J#+P6WEcW#Pe%w;6BHu`mGF+sUmn{B!1e+|nEe-~s3*=pM=pMe}9@Lua2OX9wkHz=`yX=%r%J(z(95jdvkFfwBh$VFGtNrNSk7 zxR^%_&Ay8yK8F7)Fvd;wy^@itP-<$5=;`)ow;9)a z{s;Sv-Xrx3X13v+;H^=iq zF*eG5d(r-EI>>_S>�lR#x;nEMbBn;^9$Zx~$9FQb6Imn}?CFz;DBEM4v9-7z|_i zVPO=N4(U4M+qP!zf{h&sY2Lb7Eh4T`utA3Me!<)9REquJpTUc219at$$O3_O>E{wY zZjHaSC-bLZzdIpeotq2<3q1y&sU(>t@ja7966BBSILk**%N$-vW3cU2+X&T__YG86 zp>uv+de zYh?;=Ln&N&auG$ZPx#(l~2`(fFt;ZHJE>*}y> z;-GK;a1>^!EkTST3=hKSYmV%r;m64Kjgah`Y?IeT4260&VXDqxV4JWxzqfn{wPYSw zasgcmIDQO3o>Yalz7F`PxeH}t6%c~$qhchLKWazUM=i`9m)`u&X(^gM)b=h{qU5wz(<;Or; zmUaK~c|55xcDMX%X(TJmhK9V_Mnu%?fa+bFM}ZpM(>jTmtVA}Yw4Jtz@Tz3UK;H?) z`g^6>`mRrv#c+vRkT^wKjKqz+j>D}!qJcY)XwPG|#f*~lnU{?T3P>P{pgH+>3gAbj z52q6a)a)e_5{4J&W1_cQ!`fWjEQn*)5g&!ZWIR#)vCA<%a}D%F#7v~sB+ZnK_Ui9b zVdw^6Wh4DT2-M^XvORf1ChRKv)|JIjQE<)>5KG{pZs1TniiP!e1QlOb<);mUW+*wd zpPMwUHZH152L&@FcTKl~PWcr=*HGAC(1G8~o7l%lE>TV)$LGfwCqXuhy9jCIdv)ds z3&%xU_&cl)x~p_(JlOl;0^vXMSU*#*sk*p|CS)n(k_**J)ymKFI!F8ietT(g#{`BZ zbw$H*IqD~qlrBq$$0ti~v6ECO98Ee8N%ivzIY!TMYqot)Qw&RcR*udcFl|JCuTUa7 zc(CURMYZo@#vJ%@)3owxu$4R9-8vlS${s2->oAdFwXVvR%p5{G#=nos94{p=r9tMB z_cQZjt*Bs7&_#Q*jdKwM<@5=iiy-?u zIMV9)^P-}iF94Cz3tF2S#`@DIbE>YgT{gmMerokYr0))cDya%4k6qm-CIw4G z&kV8mj^8EsRdwns_drc^eA1=6PyKae#_B*hX1cv!4mTtDRva&%+NaCWCw^W(b;VX6 zonB|w6f*0{WY?Lj7KlYulM|>|(C~{aiL@W`&ZhrJNcB}wkbZWds1}d)4Wy~hA{=DK z`5Qyz*md`UQ6 zeO%4zy%zP^ZrNrxVD7N(6NOQfvela>i6y0SfFMHl=sdsxdDgw`Gpmm#3i?vJE+O^JaHkXQ;(HXyv=m=zIzUM=oI#Tm2%lt@?@YU%x2StuYCc@=fpUp5=pNawZa3!a>58RAvj}F? zAy#eJym#%zG%x%vn1ou!bsQry3CCeH)+j3Tmt~o{o!WDhinuj4^gMpP=}n^T)9KEZ&)NXLN1cuT`CymKW|GIV!lkdP@1kJ@>jn|@a6K;>lPa_x-c zIxbW4f}Exg979}R&c`ER1aaTa@wYPa{Z)Y$kwwpK?v&zAWlhT}$+E#RMbaT_Z2X+1 z{*Qf2oao=G$C0S`tx}9R1j9`?K;I2_x-Ier7k$;&l?=l8>SER+$wV7j)%l`@Gb-)H z!Juxb9}{Ru=c-}(LdekA1`!g zup}ji<`ozcP!t!Wm1R6#l$+z_jX}(&`84lBB9I_eig}#n>>+J?nQeC+et)f>HS_(D zXm+}71XLOGoIJQ*{ygHs?mSJtSzr2Oad#9rJU@B$M_8`7XeBg#X%BC|CZg-O*?Bqc zu{lFNZ@2z^v{tDu^PI4FiG$~QPsuD>DY;5Q55yRb_xGx1S(BYLpgBH(>;j zTqSGlfSxCw`VH<=o~z{A1nkQS8!}WSY+dHO%r?a3p(JUW;`WS9>uZ@8n-?B33@XBV z=mm(?KVIJUconM}328aXp%mzk=7SJbWC{}^a?B+L0&g~umi4tPk|oA1#yLlg!lvS_ z2rqvm@#`88NJ2phIEty^_xU2Xq)-tl0W_ni!;kmt0;4Tdkq4r4zUVrvpm^t=?UM`( zB2Wa6{(fOyGK1(pqpE*(B>2y$>MvpXZ`Gaf|4`liZzK30)!n~=$^Sy#vHVAM_m8+^ zVPVB%XJUf*XZ!E_|A@PP?f>2W6Jz@qu4Mh|_Zab*ng9C7zp;OG-aqsHiT%6Y`?odz zwbnn!{+atv?4P;+tn>dUzW-=4|Gn+;U-|dnN-OR7F^d5@B;l8D2tS_5Kf$YD7R zKy{~FbKc&#?7+OZsC}yY7jD_JU)q^NcbJuy6<(`cHU_+D_sq~flnH0=Xz^H~iuVRC z^*4s}HBzTvkJfK#yp4wP44vG#r0iA(tf}oZk>opQ>@sIo)z<^H36|c^@d1TA*HcR) z11!92*~52=FbJfE5ykZU9LwBI2acX_ipO(nByu+;50Pa((Jv2VA&B(!Jy>I~{LzLN z#7*|J!q(P`H7nBi^q-W<6%&KiA2wCT(p735mFfA_6iAiYRLa;8N~-0MNTt)tDP*~j zOPEMz&E=FxrQ2oO`Fu-d+j)FViq`Ucj?STg7>(!hdMq^%WNH%vq#t}^%RxkQ%24yoox~-u49zzQYyiQr=HmA+Ayt)wntzkwGxQz{p3Qo zE#{e3GRg?-0iFE z{3128#h5TDMKbb8H)aQg9}6siCjcZ4M+kQf92P|A0OkNJ*9leuCW5rQW!$CGxh$-m zJO%)BN(1k`4d(0Y?5wPYimXjodC%oH`Ps9?&A;ZI(_+ifQ?84>rTtW)`&E5jsovEb zwkie*Ke`JtaZ*L}PJ2OCzYcP-Tq?2#k2lElRh|@)jA?wsAW&F?LaphWBo8x1U-fY0%&Rv?XRp+(seD;z6E2mPE*3t>i z?qfKOhu-Pntk|Bl3~%s}u>IIWj1<*o1a>^^8%b99XOc)ri|Y8Cxl`LL#07&pvN6l3Q$5XonP_C!?KeYpLG64&J z0fOUG14{j#1hE3T1WtYQ=`;b+1KP#gW$G1T>f7;g1s}e)?Gc>eNT#7&j(CXN&-~rB zN?)-(N-9HtKMpR^0x!~nUnKIBTPL#Znj7Z{5w3ZA3sX_hY8fees~^t;+cC0?#vGWQC`^$v|vvQBYOP~ehs*}Adz z8m6Sf?Id+#b;JmE7mrU5_uw7#19Po6+A*CF8+sL-4U7$t4TP@lpf!5${g1bh)}^2V zFdB3!C-N=FE;a*NE`S99 z1Lu153ejrZ^Xcj=8t&hh(?XxQulmpR**()biw`TSzGrWgqYty+6rOe5v_iwHvodxu z0b2YzK`vq3D{(jAMZ>LY{Bm?bjbL;bbm(-bwa|J7pz1l#!T7K^_fYQ0o@|5bV<#Tp zZ0+`WF*r30a+!3(t!Rd%Nm$KxGgmGu}#oT>A0|4>W zeKe~50OTbDH6E$2GbA;0323I9O-wZ!6Iz$$Qmf7P)?UOapQ(?ab)TuV9>mMX^3b6C79QAr zqciH~FBJVn-c^{Fus(8{nKbgigAF8tdBjf`t{T~v$Q^^3Gs3_n8x0^tHT$4faB>%W za>s@`Kj+uxN7GBr2cai$!;?C~!tMMwpP{)w>mF+8;dg#`c! z?qS?96|mKGkd|!{(Flq($jwJ`syJDhDdUF5gC~J%&#lI;0UXcXC$Ha}pmmXTwH>Q0 zrJ-;e+0!jeKe3n+v!3e(GeN^f^STLK@LHl1;)y7DxH9Qa_j2)s5AO=`1K9#Po7s$r zEp6)sK!Tv92|G=+kAl--vnTA}zaNdJ8S`A1g2XlieMP1eKLl(~HgP zw&|e@_t9ofT7fK}xVW%Ha4O!8DSga21&!tp@nJw>yb)DmpE-5SnXCErQlQH-Gdp8| z58}}S7$JN^s<2&C#okk1PmK%y{;E7*`C|TD!F%;!0q~B5$B(NE>8^b9P+k-K5*9O$ zn~)Gl<1RKE{98PFn>h6M6V3bR#v_gJ8BL4ssYAy%NgrC&b||S^lB(q!Kd##(`_4Cf1*pN!6uhkf%=mD9E|tH%OERFCE2Bo_Qs7 zUAQu(4UzMgFUbe2Z7v3dHIZW_=^D$70%qmrm_-p4;bY}x*T@=b>W)+G7Tv$)16Mz{ z)f(Tm3g6Xt@7_Q6)ePHQ**z7J{|w7c79C&IIrq-yYH1R~ixK@Vx9vVFm0L?gmsX!j zS#@>^)9tvu-l;WEy{b!sW;O3k9CS?AUu?Rl(d1YI8MP&D35ZM8PKvw7aeJ3QPO9zV z{7sw;$%D(4);f}a%90dOJtm)WO(xX35QI^8?LT*+G|KkYI>rwe0v7u~#Y=;tvT52Y zCM*Tbb@e#TgdP`G&E-Pn6Qx)|Ts!DEfrjh~XUY>;>4ObeG3_@XseDM!p6 z8<0zdO&B=kjM$Wdfy0F^hM&iT~=KD8q4c=CA*?vbU%@ud9F?E3kP-=|Iaaa?^ZCjIli*kllk{ zG3G50D5e;pU`iiA*M?#UHzwqe>QaZ6y)DF30N_zT2P5mwge=?Tr^8<8Pet+)2joPC zJ<~s*xj49}zdyiPJ)n3)d}V}N4g5yvp0i%GP&T4aB94MCMxw`)bqKvqOytQzA{uCe zP`a3tSYQnDhUgxKRRsTzrHQ{*WFVeUE!!o{u-iA)Yvvou$y>Bjmtk)Wr-M|XXc&g* z^u}Jo@=i-hh0`ug@HJH;mtk5>K4_!tM4hs#oAWIUNla6D}KeBGoo_ zKu)ScBymmJgFhn=YEnLL^3M1U^Tmlda$NthHquW!L^{Yi^+0z)Ip*L%>K^VT%I(CK zB~X(aaTevo_{H9pf>SJdCe9KfZK~R!*l(bpbSr94Rc&t2;1=PO;S_piR8R9JaQ{Qx zBZ*vML#Wsbi%I{k8wX0?@O2304!c5sPoGp8#bIB@BxO2_X}2YHQy4R={RqAu)`C@p zyD*a%|DBO-INLZmSg-$She+11GSL4??CFQy=bY$0<^%UBfQwH8g-j9Tp${U`q`n$$ zS#!%uPlf}pnCn>AD7a336`vgsBVHoy@-UNopv(JXHshBa6HnAe+(x*TeJS-7Wn=)I z$Gsb%toQg}M?K_m^K0Y^-&UT}47CvGZ^ue@GR72|qKZ@J+{3Czo_9^}OW)j?0EwG< zMYM*%qfJ~Ub;FzJ2_{jOTUKae$$ zxDdFII}j)KMM4#52p@ms=}DqdPr@ySK!;`qXA0TpE$2x|D$Lu?SQPsoW!0DqCYz4Q zkAFX~Jn%ehI1sizxYws}LBn+37K{W(2_)5~Yz|2bQnk|A`sJL&2los8mND))MR7sA zGJ2bLE{?Mz$`X=^ke_`QZcgx?5P?86ht3j7uE!q+l1AT8=%pAu5pQ{=I)470?Ugkq zV{1|(X7lC%|+Pv3}fM!B8hh}HN0k8iVFe>kN;ZIf)|At`k~4?-li$X&^Dpv82p?B4}wq3 z9In1E97%XdQJE9HORw}HOFczXTxH7nAnO?c!E9O~9~aD-OMx6!NSv*oQW{WNGi=Al zuftw6eF6)>-OPzTR7O1f>j7uj!yqsijR0;QNQF!2dGqvX1f#ga!dQ+!i=D}t5+)@$ zq%XxNah3|M2?C>mQ)QTUgW89P^--7k8S$*KNp{3L1KZtNaBCR~3Qff-UTM-qL`s^AzoD+LLBTy~(_jOifKA?zx9!Sig(cz>)HKUA z)WY6y2o)82B!f$nsAQ-K6UG`2TTH?x8Aob|?cYg`J%P*(6csIliVqQH^)X9x{WUD{ zeQj2Tr&4)DFuBj;!{Z3YLsOussmXB|TFvD4dRP6vzurY4?+Ly8Ey!qDNjX!IZ$B6q zJ8@d}9XKvoDuh`wh~)JWp(?|bSj+Fs47DOfXu29>QwSgA=x<1oqwJBDmr!5ADn7g2 z&Du*#Lb6rABxR>g%d}6$k(&aPkYJQ52rNGBjD@6U*A&nFcGS_!R1dZ|c;rV~E?R-y z*dj3(%X{}Fm}M}R%H5vYhpLpLl7fNEGp0Cb7^=rK*kT*it@*5b_rFKSSlP{IO&Gm=oHkEXTo~0!vw^FWRV`c)?7f~| zihXsCXEsC4b2xj8CKsD%Zz{0WO50-wSv$7R|Jk}zECd%~U3ZeYnXXGOLnV`ZLnB1g z10P-jp%76(?aK^#C^Tm?*QUdQ^wZ@Y(!L8;izBw5XdPBD2Cf-FGS{MzBe1yeMYQ89 z{FsQaJT9`70vY#3+#Y>91gsc3>(t@Gi5LQf+NCnU31uEa6Foc13kF&tRwJrc@c)_2 z`mF%DK3#z;4}2dw;N^cKW@f|#ahFW)ETT^Q(6AAv0=$vVe4kvsoji%&Sya?=)PEHP zZ|6YzVk&N;6|M8?NX}!sv=488{-K+m5&4{M9*2=SY@uvvm~LrVtgFPZ&d$%z+E`TA z>Z+9$J0&+lpGDh0#5iy2GiChf;b zk-#~9r^u9dJ2_j#ONfeF&T#cE4mF7uBfCqvGz}(hKyR(3S%cOBr~Txm5@`Xs|g25Q%Y3Nla8U8TA)SBeBm_Nyy4=R za4^p%O=o4A40DC8Ihz&|leLRR8pWHR?ZOb+-T*Kj}wQTkHcp?W6X2|=^O5Atua4dwI|=dE{^E$6z-hO6VFnP`HxqqAF2i_FRYrHRj*>$L)bO2aXAsk z$&N}pmb^y^J-45A&b_hca5ri8`a7o3{%~H>@CAs$v)4dqpD>4I5|*zH?ey<{z?rjt zmj5~voUzo8nG)w7a4-q2j!y{{irBYHH5v7=SYGhD6)M@9Heo|3W8^Tn5w`03LmKzq zI)F$e?9*N#yQ9ELd-6=JJl{r{Rq#cN-6mgv{q^K>*r}cItFYHyecJe(X(Kc9h`oY( zg?*)iMZKlf=gO@=$u7&u=U5@rrQ_VixOI!BWy0pJ7YFvelH}AjR2xE#ipQ6ZawZ1e zQk^BkJY+|<-}}B-kO9BP^V$CustV}g|vbf_>R5j6y=7#cOx zEZWYu6QEzLVLZKJvnbNeQzW-MyLx`h+w`1}I#v1lIzjmwfUPOhp-A>O9`GpDiEM98 z_aHXSbKfU@nzv8R4krBXy6!o#!KicnfskYo!o5)S3&z``bWK0nWL7I{ryFJ z#D*u7DpR2P;3N23IpUdA-qlx@NlqrDZGw9M5OLJ)1$gF_4?FFs`e71+>}B+&RV^b7 zlCnxDudhI?_ zl`Yr5o%`r$O(Ppm0)a8jSiL@AjS8k@!Z>v5eC+WJ$bV9(JRfMfYuvG8WXjRnrPxZ| zzIs3@D4LuYKs@<@0SG-U4IpOAU~J;?*Hh~iO-gwyBht`S-Rcov@u35yOFtcvI2ajC z$`HW>lp%y0mkn!NppUN{x?!H;ABjsbMgWYs(?sub3G%6NiCNBdC=**S6X{Fv-yR+T z>fHX~%F;j|O+2j}WSBRjq&g;5VScA;NR!(qL zLM8(zH3MG78f(U+&H~3UbvTl7v0!}fB3Ma99UyqUdEBQM$5!6cztG223xQ5Vf8JGp zMp1dr!AW{PkE3PwU`;EALaHnlUL9i`+#|r?w}0DbX|+%Zt#Po zI*%h&W`aY9rWM6yBNF?@3Eb<7k*;7$tQaz`dW{Fsb8!&LZkSVO%9I+ofuyD2+_Xt*5S8Y#{X3}K&vhpQkr$FKE2+D3k4^;nFJ4j57fepQb>-S-zyb?fE z9{$v|!{(f~{p`J*^4?TkCnyqNvuR4_NUPMZIgNZ<-gE*F{uPjdLD1-2HeD=IqbvuFV*T!MX5$dRK$Aud>s2rndgG7Aubn)7iPWzH ziw8mov5x(Vrp}>x+-6iPzj~q?{V4&&d~-+{1KHIJf7SO$rF_eQ?17%a%1(8+`A9z$ zUikXBDb`0N|9rLxmXmyYtQBsJtrP*JwWZ9+;<)d*jvXRCguB9GDm=X%T7JK=} zgG*>%h$;Z7TA1&FDUOSRq6N{PSEu2OMFP*d!)wj5!>b6BI~HumA{-yyshpdjTk!|6 zI1qnsJHUEJuE)Yzcn0L50B~^MqG8bc7H&r%^&oT6bf34d#SSp1>`5Ds3N9mQYpJ+3l3DkEKD91gkvfOgBW)4P%0=+crvIC0~Wpru(r{)Xnl@1$)X~lt z3}e(+tYCL1mGV1{;2Cs(NSDuCGOR*{z!RA)@pW)-#A8>iJm zIwC0+K-oW>X=eOBxQl&C<1|qFVOGN|mwq-vpX9m0f5|(?tHH8~xVI*c+4=DGl z5^l^QUTMSO&Lq!|E*k;l6)Pp~M4GE5MZpORhRV56Z`phQ(TTnoxn=F71l zA@ECdcfd@vMl4cfV)p{k$UA0$TI!{Y4IV-o1IKPjrvI~R53|t zD8@TxK09VkW@okf#I;;s$7W3PBZ+ByFJV4gaLmH*hoUE3SE(QO(v;bmDx9W94r__T z5jNlNDKX!68BV=TA7QTpM+0Y`&eSVAF4wHI#64{(-4$AnCzW+mNy-rf-|ltwq8Tb& zEp}rUm|3oyO?TmA#KfvEm~F0G>x5I5JSQJpuV4OF#m5%cW7}gs)M}(l>L%ojn~q%#UyeYGferOHw>uMw zm0bL{hig=6@=H?sli7=F7=P*}{4$rNmGN|)?!A8>MpobYXD;qzwPGy>QX z2<;N7$T^p~=5!-+*ISujdfDokzkJI1`HVX3xKwV>{63buZ^6Zq*jysxWTT3*|~%Ppxj-K7rEY^YL2nB^O(T{E9Krc+`v@ zSh;#}eKo*gWv9;gSy159&P1!8ax65{VPYVTmh=}%mWiMw_Z#t$K{_e^J2wwq)vEhx z{NY266X{NuFD$m}DG-Og7(*BD6iJ#6 zKUw@Vt2*{DD}9xWFi9!QOou|fY;P~w4J4}m5{?CX%o^3<99APQc$PhVZvWNmbA@P} ztNjmVlm&A*!ZmYsRE7#KWN8nI)3VZ%_-p}E-zs4@*y07%*|&r~)SD)is`g7DNz7p? zn^zr3`vw*Y`3{Sx#P&y_{qAT)n;fX0m>c+SDeS$NdY%Cj0-pTZwDdr<^37GSt{CFP znIkE9FY{IY0!oU=F($NzIgn_3g{&%Tyh&oyS}S!e)1Iks1~Ky-y_{Wy_Xv<^|7ngr z2>Yx9juVqmE5mrLYqaCcNsAzf1`=7PNN0^3{TB$%NzOy<{+me>*uW+RH3Rh9exr^7 zdbN1}@#nJYGNnLwstJ?|==SBaLC{K+$Cj`bmi1EL8w}k$mK+(nh2 zeHcxE@ne~b7xG-(zDoAVUS6jvm}q+TviP?2!F>|F+;Z)995W`T5DZfzlZ6!RFS|*U zGChmaz%VDjOXjS^QJ9A==x9=Rwp@n3;DAyWtdL;LFJdU8dTM<(vklw8bLSIpKmHR_ zDeDjus(`Oc_E+Y;QUoyfCFNb=Kj0YD?B^7WO3QSX7NR1GTkQUE)nVzAS z2rz<_K%bLxj+nS0TAD=bK_<#6?7AS6{TWdhs%pdBqPC$gT(hzY*&}M_&cuX+;Vx)n zXKj_>bGVcL0#o3m+M~K^8u0cmoXc-=ys8-F&3j;^)t37Sn<p>5+(Xi<*BY_;>Euptj{hlk$&tCN86<6J+8fJW-R(jw)V*o z3LBueYLYluTT3TI+B%vM8sWHii}<4d)W|VRG2oKv3T2;1K>DJwsk+w<*!GyMZqYI! zQTpKrHZUb)~d?(L-QUNWvU^*8kqx0(fTmsR0be#AzhExWKKjqFKk{FyKod~J4(WJmMp zRy>wEzD$UEJK8Y7ALT^T-8EQ%0`}pP@gOh3gTA+M5|AM5~0$PR6& z(Ru#Vyuo@$*KPP*T(R6jx(cmss?R=tob-fZxx+=+2)8n3CGG zDAtV^-Kodtkg2IGcYLN`%zO_|hmY9#1>5vv=eP3R`HwF$;C(!}BOahM!y!y!gs{Wj zt|2#2-s{>QY7Qah!t^<;Wr^s}O8f`*^hMNH`+p_ds(ba-e_k1HKSP6K;2Ut_1u+*& z8H^xzWfPw_-}g0`ZJnZ-f>Ggf>?lYSpo%(Q-In^S(#rsS^5r zqV~DcJ{12olYY=mo=yHt0(B;gMCC30HEaZCt=tfaR6Q}_Poyb4^wUHgO(_AIE)e+; z7-X?eB+Tdqg*|o7-xE|4zC5a&UDmR(s{k0u{ zhE1J*WHK1bRJitsu9EjweuXyyI}O7%O)l6t_4uO{3yc z`8dO8wb$Zdy^3Y-;GxM1(+0~%3V7oRznhh?7>j1%#>E7XtJ~`YFa3-b=p=ah=F9N^9S?HT&v8H5%9@bC9L6@h9_7WW~{Z zUflER(`$U0@xa|X+i}}rsADOte9IjeMw4<$a?6Y>BeoNf0Ohadk){Hxhn9Uek?E{QRL|Uw&v239voK8m{4q8 z*>{l_)Rx?dsEwH{Th<_f9H29NH_2BUwz-=QaVtSlUQ9X|L9QduBD3~kC6cx>rNW+0h9ji^0B~(OB z*7~v?gL8ygz<5W`tWT_g@(D#*^lm&gaZXK6^8+aQOciym>~9;2VbuBT&n6=aN-*{K z0#sYMgW{}iWZnQ4xY0UR>W7Jh9T4^7$ZGGUJRKVrM!y(juEv9Dv$dy^Bs~yZirX9I zWZE#r8bSW-KD^R&66AcNV4JR@`$NIy?~q!>LQX#}8v^HGO?<8eQi77mE_3A18%JX88m~2Zcz!x_w$Y*y#HVdc zO*Qb5wN&D3b46I6rMZ4Vn$X~BWTXhgrN@UL6P^Wh#R_cASIVnqsZsQhMDX%55gJLB zu`5(#&?hXn5v0hpPq6DcQmim#kpo20GG<~``RDG!Pl?SEV=OqdCTbxT#guicVp!f( z-ZXA%!lO&gi$bxn_GZf?Xwaex-0*kp5yjtrZ~a|Ogtr6#;l*rYHFB_bHeB$kCiE-} z?nk_dv^xwUm(xBqK$6lrU)Pj}1mE6`cZ7k9NGp}?@g8-sniMj0-F=$n#SUxv{>$xa zwxe|4LI3XcDZt~HFn`DP#(S-$X-?iubB0tN5Aucym?rLLy>Ob+!>;~ef4|(~3O-{>=s6&QuWeTLnLnL3*Uaw9)6*id{*B>NamBX@+ zk?g_{+)K`TeUcvyn|ppMuclmrEVVJM`{tQzPXUHpQiu{KCCze>dK#9^?s)T0?G=_% z)8jUC84y8lXmeHcPdn^;8y&PXH61b+F^14Hc;5nl#&%0!jBu9e37br@+65*lznl2e zY86)keDt-&9?<(Jm_h1bD4@BZ-w(sOYVkR%L+Ao<*p3V*ihvt_O_WN(1@hMMsHc#5 zxvA*xTza^@~2nl|-u`Wg=A-NvCbm zkOKHy6eT5_tkcfXvMTE4h(JBy#~zP2sdZ*|0)xIiQw*(|Qqm(1fBTh=PW=TB17w>} ztZ~w~JuA}m(nykC=l%NX*^F)Px0$<$%%^unGe6TnZE5~dV&Nb$l}nkBJkQ>g*Dg9!(Gwc z;QwqtZ#t^Luv%lSIBcsL)tv#bBJ@-iznLPEe@mw5V&k)U3_R#sN$&-ezkN!p^6Jbz zuO^FFMF|C#S#K+&QdFM!ToE^Ty(}&&F7G=yNscMxK;J(&xd|$!uaL~+G%qy{)(u+K z-_*LRmPC2TLgOr08W42%glbgTqrv#Tm%A@K!)}6zNGq6P5Q$P|FB3)omGQ!tr}Xe?Wfydbo1sUxjLPSd1k-x$}B^(UWsuK>G{uq ztsrBZ3oR8u!E}D|KS#RN)+mBNFZ}hh3_$yR!ByeJps?*pe&6y+eZo!`O2s znHxr^YY0JjOf3ZEiD_63^>TcW8sI%hys?!C%z>tmF@pP69<0`|n@sOL)xGW9?H>z2 zrWa26MilFJEd4J6`~N7W|CL<-UxEGq7c@oeKcOkCChY8g!Hxgm3x;9(?-J{Oho<~Z z#s6Es(EkmZ!t&P{{6B@z|4sl6^N$4jpY{Jr4E+~f`JdRzKk@#5aFl(b*mY*lO4uze`;HEgW~-HJFf8)2KlB!Vt^C*47YXw z`Re>2XB_678oo{P(!+{dTwa^@k+|W0u!KrdETf0mI9@-Asuz>~~FpE%}XyM^kuG;SDH^mtNRp&#w*8BCM%iEZQ5fouzGjZRMg%m;!*UUjv z+Tu!*i;J&Xu=Yo~WvxyR{g?A3H8i4S_>1|}FqFO+m1JoA4omQ8{D37|G=7gId^E}? zhDr}gekVPzK!SK{H7-hi<%TD@+y&?9@;o5TA{|ih(&92G7&+;0{9GV+ew(lxxJ1+> zs9}D$W0YO2lVtQ=tcy%{=j!-( zaLGR#{h^tpfxvR)r!PxgETA_oB1)d%&<}Z^8n+EM|V`Gaa&zCE;w0|Zzh-WPQg$at` z#SPDE);+!I@J2|xKGxtXiq`_PDNU<2-98sS4%jF|czf>JSq8rVQ+H2aQ5MB(W^BYu z(FW_^NQ#YZ;#~UXgrY|A>s`cf~S;$mW1jN#161;zcN;q9oM`mGbH&!oH07f4hT z0!p0>7Z2sMJ-FE%4%G9^U8&e*mDL^vi$ovMALxG2J*ZnmqkGzkNDxP#xCWU3onVj?anGA(kGT&b zkOzvS`38AGx8HD=c-I`Ix61{_1r>5`vu}Cf;az-O?u+gWRJ^XO0zD72w1x9n>omI97GElnTFLwin-vf#`fQZP2&ECcE{7N*8 zsJPFnujvdXny9KjbQxW-v&pJ|xAZN#IqGjF01`vDm#tuWawRFU7W&?5PcBML6p9#Xzdd`9sYCVX@9|>(vFrrpYjrg}HtYQD! zk^7G^%d$4_npX7j@d~MWzz}7CQIM}s2MpPx6XcP27bXzh6_=*gN;Nl|h@5g1 zsw9>#N+}>7{j>7KRHO>MNo4_LptA@z4>6C>uSF75V`{tqJ!h8|>hO1y-Va!PtYLrm z1EixoBhk^wcBHr!fH1lgAYUC|%2*0xDTUKxZz2n5B zLql|CT>(U;nJ#pt(Wb^FDr8~j4lfZ$5l<2OK><4d4km zY$TfT(B@|yxwE7Q(E?`mxKrT>(4HswxWCI@=_fH&C$@k2ynE{7=xmPIQY)4e$Fa-C zz$M8S(L7BFepk)+6bI`$M>V06l9W z-#c#&oj;TQUf94Lshe(ub-fYlpL~SXJO)0j=3j{+(z_69{f=GhNadImaY9 z`a~qUQ6^}&jR68{$*cjjL&^i!2N&e?5AOW&6Vy&^NK6z@_(bgQ)dVwcM~j+VN4Uc| zfB>x=-nNANLxv(WYO^>&)7S(AF7;H%+_MJfT45i9C7LSP?I`9b&4+vXqsZ;Z-bgLJ zaWA-Ik-ZX2b5cxm5>|5(y~-r=h(zDO1c4>lRcc#tAF$9WNd=$rjm`Q^Yx#9u`?{`w z&%fd+pSpeAzsk~}z`}Z$x>Ydr_rW3$E8ytZ|HxS2zA?9LQR-Q~(r-d(-xtTjO^T1* z+^PlT6ZHXf#+s?1zb$-+lGI!fw!dv@hcXLqYEqPuA#*+CK)1iG20W+myO3IKOx$MCdZzKiBY5A_PZjZ) zI-rWRJ$eA^x+8o5TX*!M4EtE`z(PJ-yR*%(HGF$>TB(LJ2H_ z*kZ^-LTNiYVOV*@Q+#W>MyaxPXPWSW5*uC@dV#puz7528wue6-`)jO5$TRr&%a3du zs&JoNhY&*euRXBs>fIfHasjh9_e;(!h5z)B=;_?5HoU1*dQ+FPN_u#)Ac zr}*JeA>@utGL###bG@SEB{g=j|2g_sb+8@`+-s;>R=izB&;%g3f)WII}X@r{AZ*_FjLCrcP zpQ@t0!gwESzNmcq7FsA#d1@-*eQo0#B{_aNzCP7j$nqK3PN>;W^Y1BP;`5L{xw}HO z^tC$q&DZdjJy>kd=k25)y;|Xhk0(+ z`-gw1i+lRiVHOatY(>^mSr*VfsAnZzb}lY#Y$~TpB)BWvA0fM6=OvI#$mYtB4e6nr zsWC2`C^3fJ)WSTQV#R~AyW*bdYab^Rlk!ckV3)+5{m>{SJZIcWStabk^(&BazZOGI zXO5ZHU%oZenC`y~z&Pvg*eX<=D#M%~M7WlS9u8e>w6CWSb+BRZ)jx5!kj7xcx@lCa zj!(x}Ublsp8;@1R)`i4W%AY?q@2R&s8P~i6E6>z6yo{3Si@jWn>g!3{>$^-3{4TAs za?&i8gWA@bzX^}lrXVf$9e8rLo^2eQ?v5Z|w6Z*_Wq6lC$3V}tvQj1sv$QwG9F0t5 zadn7f#ZqT|?`MwTpC=s0J~1mo3`JMGBA)Hyd@ZpRdE68oO|~NcOiMdiIMLYjogPSI zC~20YZ?sH6c@UN$O0$fXHj9|XquIG=Eqd?T%oj^%xUJNa1-ag1Y=y!hSh>oZdM_1? zcigvmR+H8|G(F3GEj3GhuC=4TmTdqynu%L@S}0ohT-Zxi>x&>4pGrvRMOMN`f=8Z4 zvPZr~c0&hn>8^RJ51z!naFCmU(oal2IA4@faj3pr?d~ytn#_~?ORy57l>n?Hnw(lh zN2qN(&@$avs02bu7D#^z&cx=Be3A7LB7X%F#jhjz!0&oMk#>rfVw7`js5aeWd;Hpi z-orQ(WsGu~7mKUb(v-rar7y-$L7UgI$iltsr+o}op~13|BoO;>2wXxS|6U$J@FoVG!Yw@@*R)WEI#YU&pw^_UtaXS+`DW(O}^uzQA z%UXDo{$QQ(^CAuR9&(FxBZB1WgJCBR-xkv; zN=Jb^s(HX%cqDVjWj@S~Ur4B6*nESqCu1+R3+9e3j{8dNmZ<~46sVaIJ*uU?5YD`y ziWE4H{h9f*GkmUPRFheSUGRwLnZf|nr8~6PL4DNf{rh&`kVx73hY)J!o4 zzu*r6I+B#wq~Bd(ouYu>Rk>(muuQPec~1g&z<|1AFxsU!5{FTkJL5b2iy@i==~@uc zHSr#4!of$_H_XWjHBE|DLF*p@YOkQJ&qy2s7G|^#etbr*eK7T4Rvse)6Mu&aXJyue zhEHSz1JWmq0C|l&lpk$!L`j9XG#j$s{v1Hk6|xDYBR=ynX#_mF zC^?H8r5uW_upSviOm$B|CtXm6*F%~(c&(gGhld=Xh z?Y|Ltgni1a5MLlqn{_k0x!-k-#=r^*!%}8Md94T%^wNDKan=RHxU*~ue}^Vr8;gNQ z5W&jl$^S;jfaU4K@7wJ2f%>MhMk7SmU7lF1T;ML(=6HP_)kWikZ84;$G-?7ODhp?h zHS^7B@-4Qg?e^Bf9i2Q-VHEBLaczsh1M1g#{`nRH3f^#1RsZQ6$cgJj z)pk=1aW-eh^z{DW`GM5C4yyWxg#m44)TF`r3WAnPgW?7HQ}Yh0ioh4aKoeql?y_%@ zNL_AOKZ{s{=6&viKWkZ5L6^ewUfXThSXg%0wsibx{phsx7Sn6OC?94rhV`Gf{lfGG zy&|r4-z>c=JDUxou zDPBind4Ej?@)Yyb>cRQV_|56D9dp__mRBlf-#wBy`dsw^{*B^|?$Z}IG!5u?bc21e zi8{9#ZpI`={kUzFA+GIXLzE@W)=Yq9V3uAD6qz92igVE~q78})WVl{sfW?Jo8A`sUmS zP{U8SX)n0zhXh>>_b}_Z#PHBBM+T1&^1+(9;T4FyD1l+1%s0R+ z@m8@L?wYS|f=P2!be(BlTYJ?F>0ITi*!IkYVGj}UEi(%Vk?fU zEBfyR&zXVaQRN_;Sk}&tahMi#=LGbiYc#r$z>d-zq?_xdzF%e}Tr516W#nY6K*PMU zIlw$`x{ho&Pk48nH}aY7qaCf@p^v(iZvJp#JrGxxR2%9q=#7;Uzjdxbv(q3iobX>^ zpRA&PiJno=>x;oO1bWMrvW*l4LBa}{6P?2@gWU5E zuC8IwGO7}>YQ%ivq#;2HgH%+(ncwgx_$sWcBPD!1QFe5ABB@mQp(LWq8(6NMbD4m6 z`>v9I%EW{Ip!IIUlVl)!C@4!EV*f)`8~rRa_>N+|AWy#iW6t`ZXM^Ap`fzyh1B9hV zbuuFkF~m-&w7;vHC$YpUFZ!dh>O6$=ZLOl>1i2uOcCC3xPFo}zD{=%UZoDJ^Q>^sd zpxh+2qTm&Zl#_{oK^Md7dc-5Y6}=zoHae-|UOUCvrO~5*H~Xxd+e#C?i&2(+Ia8q{ zmenb}b0K-Td_J&xY~O6;Y9_9{E{{vh*xp!otdX z_|9uGY|FC<@UWObKeXL}Qy8p|%BuQYT{nkp*0<1ox`D@W{%;DSTDf7lZL|qzx-Y4+ zl_@r~_u%F-Jld^L`00q-PP80c@DptaeFXHE`2kZAuIjUlfP!F_BfwfaIDJ15_i(2_ zC)&>Q-Qn@@evmaPAs`Y!{ZT`(cn0*x93FmhY~0Hs+%AR~FPl^6+lMU*W9p`3;UHnX zIGbZTGu^#>kLk$Nv-l(JFfBN6YVeOtrNixA9&x|vnhEgr-=BvEo4`N9SK-mZ>0rZ; z&gP*Cw#@J>Yy(Fsdwv!K4ZC>UH(Xux7F3o@0@;h25QNQf(edQZaAEvyfe3u4dJ;Xj z*&kWo+?~cFX7Kx*7a7D`#R9wLFsTy|iphU(OC6jhb9v2^hs0Rg9Bv+@p#yG}s-b!j z$SJ8J5LU4?di>6?5H`puQgz-NV;J5%xT*MF)a`bG6#@oQ;gI|$EGR`n<@wVyZ9*h9 zs9yR!Bt0P(A<%hY(p23(?rD6W;uSlsKwUClob&=&q2^V6msW@3D^7`oR$$X0FbIkT zi{OI6AQBOw1KJD=1!x%>CifF(!HhWMMBp{*L_Zh}ng*d2l#7bEC^tciSTz2)LMh4$JZ1k@; zuy=->Fdh~u<~_>yTX{dMMs9~llo**g?jYyNP!%Meu3#O34XW3I4dE-4S4-<&;pj#p z`Wzvp4CwW~W_FTp<72_T6%G+0UIahTeXvmuRkHcW-aVAM$PBJy@U)3~$OjG9V-TZ7#r@tOa~ zMeMa|T_=1OKy)#j-J^mUQ=1KpZeI4Eda%dh&3XCzZzOyH=Xz|+y_s|f3b5`AFU zAWeQ3F}z!4_Wp4x&FAL_uu2nOERBbc271H0&|hKv zlt$3uVpxk$09o+pUhs&P$1@pc_$woM&iSbzfyP>{&6%_rpSB-(pGecm<&%w@xhtCl z0x0_gy@v%jqg7P*vuH^UWeZg;b6db4aXRhD?&Sn)O1j669Qe%&Df8;rP;5)fEPlg{ zEI;y*FH30L&}(R7mglFgHx}KjM$8zNrZO|-^A~UjmcVsa%o`V+Wu_>zlr7+sU&pE% znFoh*uC&rS-0p&n1!CwiXk^te=jbhQU-k-nmQSCD3>sAD2O*CW)eS!b(Rt}QsWN>D z#w83Mdcr9##ep!+8^R|LgNql0Hy{u;tml94%8I>0^B-lxT$=~ykQz-~t& zEu`44v?NixZ>XOUZMAM;T~P^g`ug{`TEGnXWZ)#^07_(*Os0e!K!l*MK%Q?sUb=vz z$dM_m(OEaCmF^BdKr3Qj4t(4qg^ETKLotDoYT1VzC|*aYLGobvPn4}=Q6Jvt>`qgk zHgiiQ8iA37!f5+cZUKEVx4^sdS&6B>};om|N^|7R1qp?W2>Ble-FK%17c=EpfSF zCPtKEr8R^H2q|~1weVIbD_S|h^Q9B>SeKPi0N3SH&+92XU@a8Bd~!GELC@UOqwHi! zk=u1-v@dG3to73lgRW^MiO>0ln<|uTksL{8Ny*y&e6&CNF>^Ia(u|J_io`?O1J`9SqlCp(J)Isu^pn6sips(|QCge4nCOfNnwVwP97USr`S-4JQD3U|iO%lU z4#wQorS(xa8KvUaKw4(Jq3gxhS+BXuW~dh~x(OBQtQwPwbjAB#;g%&M3$xz-v|p_R z;4CM%7A%21XEvVfR?qWSHya)GEt4H9R*4-Egqg$#^b$n!AK>L=y(I?8Wh!7BPR$DC zpQ}iT;sF{0f8T91){2VOnj_YEYe0B@oeVAb-!|-1x%m_NDp`{h87TT*iFNHDOv!Yv z(!?M(M%sWiORv^<%2FrM#kcAA3QO1^mc)EGm5CWS@5AXTDKc8Q&pVQz>dP1_u%m?k zDw|D(N6zRO5W65-%3T#BoyAVCjFDPq3H)ssFng8YwSXe9@bl2~1L$^M$S(9nx4gdo zPKD?)Y6b>=#xJU{rOKmqP{zaKWpoXy`{OrlHX4$fyP8VEe)2^lQnpv)%~Nd{MG1NV z!#M(W105J)$P=UClCLTUd~fXE@Bq;w>7wcDN6y#d4#$4HyE!gyF$;@y24@+9Diz;o z6nrjWH3{%8q?^V(v)FCmKEbGbI5q40cwKRX$_84g>+YvnEEIW4|d| zYi0dt+_|pix>S#RQ?1wO!)uGK>AmOeS@YV{dCHJ;Rg`dfy2TkzdCFB4)Rfd^s^?cT zc9DjI^&8KnZb27^tg@~%f-1S6YzFun1EMLP6d&GF*u%%Esd4;Ro&>pfJlP`{V5(KQ z<`VWfL>+6#Gm`MVmhkxTw*-}@|HO2RgXd075Y0F{f^K=sVVYeMGUqE?34(L2yWv>z z*ck-MVd}cEPO*M>{NNtRB#BvF^ZNSbeD%ELB3yj$jr?^(o?_no0d15w&O<$56W~dc z?{&qyt@13o9s5Z`=3(Wri8yoJ&_9d(JH@B?Gwm3EGo#x@$h1t;;QnI#q9dR~C}5UJ zzKT{jCy<>`5Ngi}m2Y>L0yKRFlK_qhCAmM(RyCDxdF#f{mX{lY7o8WM7nL<+`E9#a z;e0yNt_vgP`OMZVnPSY*0|U_vt_O>GHmI0~+(dF*?vttzk@~Ld2jYIOS^hD4BzJw; zMKj})@?UCgwqD*`;llLC?-k>+6Z+z~H4-a|71dtTfKp35wO?Buc6tW6&y&DOg@p>V z`}?4G>X`*>mx>el`f)x4YU_xsb=d-yTkDIp7}b;{|MY*j-UqvmQarV^WM* z>NAU0-1#|o+%H14z*NUxQ-P_0Z?Le$ela5;WFCeIyZI}kqr(BG^>w~fw0NCcNyc1# z=vysD`c7g+>2$v319}?H#{k7FGvhIziIO&*lC&NeKjxr^m-O_ulD1#JhOJoGN^gt< zMCZW1jM8rw&@79_lt%7K?ZktAX5c8Yh{C_b<|Yh7XNo2%?qZV-VkZi>mUWVg_fvZi zs;Ga~jva(pC_Qx#jp`#oc8tuCDx#ssQq_NsNKrF}oZ+Oc`5W}@P)ATKhRLOPf^2yC zGyA{Sv6XGe&NIi~q%IVt{XxTD&X_nU5qk<109llcD)~bdKn5a-hYPxHpUseNP?nRB z(@SXjD?%J3vr&~pe-ff$f^HPeM|a+5vv#?4`9b>pv)N|t8u4m68VgQcw%IbMv#*)N zwW_nXnfOJz*oXcXfl1dM8+4bH4jxLu9b@~%8^x=TI@2;M#Y^LtSwA5>PRbS);0fJ4-{NjuUC7W38 zLvqw&gbGU?tQR2-l%e8pi6ZF{+dROY&53S|-BV35Iak)bJAcmcDleUT<^|7cg(P9- zZNcAB^ZF#ljIKiAC$Mpl?DTMcH@AU555PjLB(an+tVt!BxS-OPwaGXDBMuCh`J!tgtv_+zE>we}Jo0z%hq*v?t2+;xokOL2H>i`Br9OgPEa`!RE zwShNj!w=66t@qxDMDPY?+~~zvHBM>sgLKbHwaGFZX;paq%2myN^S5GCvC~J=T}Xa9 z#gt6^m6Z%lqL}ghlo4D30?Zw}=O+YPVvQtz`_3}5<6T)q_Yq*RsY7QC^9`zT*4MPG z_A)pD$QG?cr2{+an4$$&itC3+mGywn)mA%S!tW?F)E!9pZ=l#e&?NgSiEB}`y@fmg z#!v&Qy$95uEJNx1>LAiahh1 z%oq*W+Pi?Dw0Bbvt66XKW#Qc3SD@w(I4>Qy^|NA(d>4Jwnyr-&1gQ-WuR z_AM{%H;8Aplh0Gkjhc?_;_J3a6E>FC!imbrnFH&w31`?TsWz@SWFjyVEjnTmYPocQ zbZvE)f)a*#g_c@to(nn6O*U6n)8G-h=jaKq1KhNbQP4C{r`L8&?dgu;Gw*%(Nw0G2 zjmPSD?dj9@n2{bWpXq_!$m8@R9T^WTz`SZl^)vUeH|u@)d(wpFRSP4A#Y_d{H|j%j z5Gjj*4I`euKn)#UQh<&rI}U>Tc)xtgIzDk@Vk=Le`^J_0Q>VQhxpul;2cwxwdU2hz zYTEF=fjj<37E{_?A>BQP+8+05x;W(n6%0ktGnG?gG$U`IdUJ3kYx6*Rjq{;RI0}O5 z%g>>*b^K9r(D~|5XBly1<$2K{b)0( zl*(eagnK6m)OkpKGdhqV_r+jMj9uJWT6swj%P5&E1Rdi4z@!?fXd6uYOzR zIy~S{8Aszk$b8bBTj*HHm06$#WU{Rj7EW_y(;G_0PAz8XM&Zv#QBmzA1j)UIKA^>P z4Di5k^=j$cla-IJcCCGu>)E$K%M7N}Vzh zEAf$5yh-t&gFhY`J8y^xiVEzsyuE!Y-9G=-djB5yC0YX0A`^EHtEOKz#^!4W`kY(- zODNA#I%rMIi~WU3{1hjNls!!Wl`EtZ5*r0_K4jj|tSX*BiYeY%g;N2*3~-E4{QLCIEhuzq7Q7l<6znkwNrPC} zWO&L%HG_i!!-s&~3pH9pim8FsPSUt!YjrX-<6Y195c(AO)Fx>tsH$LX-UKPRhW}-6 zT0y^KgSf|9pmr$dLaQcH@mKN9AReiYI?;TN)=@Ccal(Cp=0DXXbL3f*59rglEm8uN zCCu2QY+7s-kRD}`WT!yg!Sc1Z1ue(l4iyU)EE9i|V3035suz}ANE$|MhxyYGN?8^a z^1G;*dO%qMuRTxrY|u`j$RD`fE;q%3b{|jjZ>xKD?c9BlJFV$aR$;jm^0nU;`k3rK zPYunzZqJ!t!?HGNP+1DnjCfX0T4byY0umoSt z#w6pQ8>w*KuN0r5!aQ5 zi45jph=~$2gT;j-r}FPm%VQ}TOJ&Pjvc^)iTD$MF($0=)6dz7{B4u+78Ovia!_<_z z5Z&Jozqlb;M2Y$$VFHL#qATDi*vZgq^ZKN6@ZP~b-|z!&tj19N=mm%c?T`ntojg&< zhWylm*<5vC`y~k%$RHc@pXhEo_G&3_${Jmny)jU!r|94zZZf$eO2BvSOGFIp?x-V* zLYw(^eH1BV?IumHnYy~lIGw80s32Vj4Rv>~9v|Ym=TyO>C=LSb`%;{!yDejD#%5S+ z1WNL4bnqm|aTm@+l33t%MhmX#+WOl1nl?V$7-`WT;#F#x%8nuDLZoZ^w`b!%>PloMlW@8V3Jy`1gJLp93E?@u_nYoE7W_8Hhi8m0St>scGj zuA@w*i{SrRPiBNXrB)hLBOKG!sFkZvMcIuZ1y?ExW_xqT-l)mA{JIyq*JuScdWcgD zk8m?87kd$9uWXPBQ|4ZZDjcyIIa?`FHkFR-nCiuz zcBmH|Xc(q%Q+GhyES&^57@xONtO^Jh$#ELVuB0#;Pe)cV%2nXzCH(f&OA_BH(W|zV zmiYPuAtS~OS4IdW;GR{j)8+6QeU`g_{7m3^-4nC@y)hv37AyKxX|Cajn{m5$G+9wi zzxn7Psp}DAuIpOZ#>J}LmL&HQgkl0dX+KkgA^QxyN9|eo!qEhhA>5>tv{FW0r>JaD z63fiXZWHJFOxejapNBfoksUB4u2|%m*?%NBnzt>;rO8&Ayj2k4y`w2Gqkg zTgFevSB{62@0p5PS;yeGq*;lKe}moc+Olm-KtoVjK8YUQP0AI5slpk$np2L+R7rl( z$eyEw5+w(yq1-VP@DOw96xBA>3T9hxNJ`s)<66#aYRW9PT^_f z!tudly``XB3B-XEnY@w65z&vY#FdHi_iUmMI*h{kuYr?-bdApc@~AH=DxN@bOd+#c zD1IFqt^$<28`jltpa{GkSz1%?a0=S&KCfBDR@4jTzF5bcpFV^Omqp#ft9W40Me}+M z(coN8lWz)7*XS73j%Oj}7h_?~pJyY&KcHf+ z3K&}-VLrv3zklE{X!#Q8N{m%#r#75R{3(&F>`Iex*d(pcTuIENX};`?bL@%<*j%&L zj>%hJ&|bcteR7~ROx>l%8+79(!Sn2>fmi8>k{fzWIN~} z^%65tz0_WNkJP1kO@HsM_vmt==*jNpx{tj={v;g|8zZ>mz3>?L)^iu;%J-P~rQytc z-!Q@F^v#@o|INf~e6HB**Dn3-@P6~VJ*d#)Zoi~b_FBp$sMgp{H zyZ-P@-{q9+fuNamNhWO_&!0X*F5?N5uW#%HA0T(3rFgMJb6=!$JaI%U|Ilu)sAt+P#4ZLPD2m9ABf z2D`RLv5M+@^VRg*c;uF$@zAh(XyU97m8dlehcSv2BI`7kv1&|U4YohptNis#2EnUz z3%4Bxj$CGI7@t0UczXgBhhs@GIk?COllW%N*h#T@YdDzhFJxu?JHBC!)%f|{xiOAv zdEh~1AKTgkJaie96ky2&iIO(5KLv6S@-y&mB_sA_)F@)YAOZmZhA*W+7-i_#@`42s zz}UEaKfQR(F-=*ytU#2r=Kch~nfV=HbQF_Zc8I*Df;Gu`BA2iYw?tKb|2lzKEUj%| zpQ=pH-qKx&1)Zwmjtdpj>}VJjes-PDTTspEYJG1_Q!C7sJ3CC+CT*rZ3zttdg_2?2 z38kF+tVI@V)q~AcOCpO5VFU7xt>ngI^6_P;SGe;aRN)=|U1%)?9&vmR^8|h^>j9x^ zrI?8#Y)4$u-6h%w5q=m+9SRy-0GYW@>!i;wQv*lH8+@XhcDN_cM8!*P`oY1gLKU4> z6c;m+J{IzZI&SypN9)+1CHWc_YpFysNfeBPs&v<~j`U>nI0y6C1o$KKGyTvDPB9j{ zu+W<~<*DQns?>dtUZOQ4l%rc_aczxJa)&+$HknZEeI+rQj80^_I~&=pr;L&>?~nVA zZu`v<#qW(rw@H=+TyotZx|$Fh_vq#>SEH+`5b~T%ZcY#}!!=2Ip9h3)Hu;yyO2{cd zJj77qZfeGSZY#C$Wu)AD^gzR`X+=<59b`os*GaiLE^6*aB*os%K*`Nj^roZ zpeE_^aLCEjOxNNIBk`Bo-sqX&1aXjciOY~ReGi{A5_u$~K}bbTxi`!G@|j>0wl4S# zqls*?@tAprMLLs(p=uF)66evqyx5EdyqGxmDOr2&GDhq&vJt*z}qLUNt4>nYh{B&#FVWIscd3& zP}C1k_6ZC@^?|oLw4`XSC>mKg z+1YZxXK;9wn8U58Gl8RRUewVRyt6HkIJct|RWr&+i7bj+ISqsoma}wppA2{QIXJ=HqLx1wQ2ZD(FbI(Nf ze}Sd7${)a~0Vuy^3F#*pf60twbC8=d)uVT!_8_`bS~E%o^(T(Y9DWyDng1~xva5Em zNB2z@{FXRpcTi@2He1?n2poEg2BL#OFP}f-D$l3+u*L4-YH{c^C6GkdOId2j?%Lj zntS>H>X+27*b_W$*degeT!J3dm7*WkzzJ#eSB>PtsRViXB%`Lj3 z#&*7LX-@+8@(s>wZf`l8dnESS$XQaFr0Ou}gU3a#eI`EJfoXQ&{VUZQ<6rkcOebW> zCnCJ56k+P-l)I^rkan>?Ls!g(Rd>nNIsEacF1u9}n7v=LGo+nSDw$BAearm56Tn&< zd}KM&UIs+qYaw*L2Y3ux&zQZb5L6^Mlb)RpC}+aHs?9JWR7zItLW2S9e#HvX_g zAMOdhqtqtKY>5UKxJ4$lLS1GT=wFl|(emLmW^GP>=f5KcY|ps8ivN^ti?(vYTL$t< zO62B^!uOd%C7ImnqR9}ZG=y93Qk-7uzu?$i4jO_}mjy9SLfoS;_uM$`CeHWq>#v%-KNS{Dy;Conxk#pGq5q%j`jX;&D~>kWZT*|{Gemo zwr$(CZL8x>(j8kJ8=a1At7F@??c}N6d!O^1v(I_&`}z3RSoNW1%~?ycYK&TAUH_jV zo5h>Y0n=ymX)hH7NaN;}Ov@Ywxz80a-lyjtZS1c3j%O{Q=?8i>HD1nUL-nffy3Cv5 zzjkJqm`iUzS(2085yCI*tbz?GB*Gb~QDCU^ne|B5Orhw9<%92z_8_@W+8{{q3NgS* z+zD0E;nuJC=eXG;>`>0;TVL`L4`N2!1Ht28q zh@hq`Y_5Fel2NLIbsU29;;K3LliW67r6RSY3!=a=6S)o#^wN)J+WUP-y0f2piI+ch;PCUC3^4A`SJ#gDjUyoG+SEKW zZXoY}X1t3Y;nt|U;A?Iq21R*e*1urUS*Q3(Tgd zI%eIMp!)>)hl0nBYK}C=3ZnE*a!SH+_7)~%=;Q4~JTTynE=T-Hy|K(19#{hk)R93z zS*m@_q%wz&)W$$^e@P>D?;dW~`X%b#VOyn!;*jYD=GI{LUBljbxmgH0(T?`sN zHq@d{(~cVaWtM>M)wEXjpN3U|u!Z zcPH371*Pv`xRmsaTANLqzlghlSj6^bcMhw>KGulyxnVu5IvtT30j6>D8rkRbi1j{U zOrxeLvtME?uPm%L6INg&p0&dRt$p_{0^A13@b0SL59ez@l zBTeLbaPf-Z{2~_Bv?n+FEi}@Q_6r|19X`U}n9Z2%cCO25*$0`qwazK~tVc!0gL*qZJ%|CAtnD)vNL(S|t15DH$9 znv%@0GT^3X_17Gtt*RLV=5|<&vemgbSYnj#1aS5@fnalHicUZD&-&*UYPT|WkcD+M z^jWl(l$Ep?q$#L?f_y49O+MO!i7x=8lXRf`*sU=RMGL zj3HF#q|AacdAg`jP67z6B?!uBpe2?)O#Y-|B72i)E;<#*C&Oyu+dP8Z5e|=6yM5yU z-tX*-o)$J$cg*JZy4wtEu3ihf82W7vN6TB4os1X3a5FN}Z6KOw{SfqP&esrrzFy9! zCt;lJCU!EMah9P59L?9;`z`{{FM|($UUSUuK_t*+(5u=|266daICphYR!Z0lPaT^t z=KDquxuYnlGf^#tSzn3`TJ;+B1SsLG9}e#i@2Yba2%`JGt*dgDs5n*A!@8BM> E zKdnp!W5G!4mn+;eW7lhzzk4||s)>}Can3~57*KDOl%TpK;@^62+&{f?zY)Dk#E8%> zpK1S^ON6Nh(L55M(mU4Fv4K}B{5of~>Sec-8>?~Ev4z$a(Xt8Yo1mX?Lx*-2sF;Cm9@lOLpQ#kX4er`7wJZ zhbeBSEr$1<{}lLAj{Zl9JCt8-10zSr!pdW-9H0_?J*1O!7D`OlK=eK%9t4Q<)8@(Q z@Yo(4VHkSETMb!4<>1hf-*O$li<6D>$iRgeb8##nIF^%yyKFD|xn&m{F5QDiyVjGr zc^E@ZzJZ>>J!l%kA)U9cc%Ry6b6${{|LMf01y}t$`sNB)o+su-UGV$yPw-XCU>HmU zU@(gcM}w`S;ZU`TbX^Wod&-ToWi@(FR{XHINRboCU}in6C~;6v?ju9vlt4?_blMoQ z9k$d_+7eUx!nNhSfOAs~^MpF7R;uRW8UYqbh{BcnMhXZT2$mO`PKc~q!F{Zc%GV;6 zT)b)dv+)AKdd`>F^j$~N6iCl+q*^;5J(Un%Er2NJ%ka8J9|2wobnJ6$fIa!Vr*WsFl4A@{okXD_0*gt8&7y z5&ML|2Ms^bFOEg!UduKsudA&a@N0H7>gwn7flzdB5p%}j#5@7SFg-c^8!z~4lY zB$Gu8yQlj#ex}Vhr7WAq+Nsgx8jC~7f3iu0T4f^J{G{Puo!(h(e$i;TCc$ymN~+85 zu(BKc_*!pJD8ev9uKnm^_VSYO!22@1l?!nJ@vaT|)j5A(KHj(`FH1E{qg1n4dkaXh z{5niOLpws$Eh(HTstNBt*!{{1Hc}OxCj`MBSD4m`q#Pex{Tn?x!sdv6Q+1^BMU~#y z=z23BS$rEVPoX`y-c!s4_Zyh6OzUHb4Bb$#B;sEX4zo0_G$ffK3rbU_y-N+^Wvx1& znDyzNku_(%5)CxouIZAFsey3Zl{e+*ikW2nu1%d5+J^gEMIN}$WgyRlTdkrkLaKTT znhgq$j?awVFD~v+?oS?uUTmMN+iAPi8}ppS!k=7|@`2a%ki1xP@X%bP8L<}$JgB=s z8Z=kMRP-FZ$5quA?c(KaVvTmO3X`Kw14?fwUo2D9e~1^Cr6i!u+Sd=cYn8M9f@3G~ z^@X!uUe%S=(GIsNQ8lj3Xy;9|)m;BoBv*>WIV|W!B<_6AU z$C-=N@7UKR-|ODFIV`F<3F?v6ghZ7riBDhuShf!@tcm4_WUr9vy&e)As4#Pyt&3+< z1sb%%Cx(6Ag#!5et<+1Wq$toEmh%|z7TDl*TMVVMTyp#t7QvkWr>Ug1nP{^l8v~Zy z8g=8P=QRuhoJ6f!v*t~@!AIVHA*QiTLji#`xRQpdSnpwYlq(~6eiN5gZ9+&rOfB?# z3l}ypL|5=PMUVh}hb|(>;*S!=Sa7_xjgJrfPs=*fkmnocv0)9>#Y6B5^i2(Gt>zPr zKdOb+HJ03I@4aCNZ)+gcKE!;YCDG!_#6cUA~l2X0zODB(WPiU z#^lS_E}zCyiw)L>kL2WRjIc;{{XnhVR7(Cclz{nPtMmVFD8c_rfQHonV}J(hpK%$g z4r=yRCJv@%ge+|TV}J%bAiCn80UE!| zv0kBno34L^YXCmuxAFQrfaCXP{0`{&SKsxIZTrY!XhH8Nt<>YV>eUPmDYf2cPMsgJT*;h!1)f6)GxV4u(0z>18^aTL)O( zuH`25o#S~UDcm1kjw~q#PIy1!Y9k|V#AcdE`2-&FGyNJYoC>4X36SrMn%7wFDt8v_ z&}+TCTr(i8nxdJqBG)3s>hswOlV1M^&s$Q#8C6Qd1NJ zTx5r~%)^GPWGo#Ea14L&!6VDqep9yq-9nvBgTkQQR7U~pp<2HvOLgo9QJG6Y^w zN{1?PgnK~*26c=oS05^IjMD#J20fQat%YWO*9mQ^HaV|a8M-&GW2OHDBL~$@PvjblJZ&4h7IW61A)6HhrYIiRZ`wdN$9_a%il z(`Vi9lrh@O56ibNLm`x<8WVccwzrko(LR?oXk$m8wG~3o6vS8jB&n92hY&}jJ}{Le zky3?1n-oWp?F9FG3&=;Syn#Lr>THt9QY7@plpVhancUse#b~2v=ONlt5gf zRuMRw{1+&#CJvYzcc?GUAh95jq>%tL6p9g|3JZXSPQP5G75dlv`}!vUp-g@98_*-e zZ0+3n%y**eqgJh-VS{SHaoj(i3MbBQR1>EW#nBSQ*aAnzR>FX3vik;()m)U*gobcY z#~t1(@$!j){HV7pnW*p-$Vo}MXIF?QffPu#OYsg{?$ULCAiVJmMN}A!T#-3DP-P9| z%&{p#LPsk+lMo--+@KmiKj_Ytd#!&(++dWSoCoLy2;>EG!C=W?Nc(f*46YTKA&wyd zfTR7?)fc_Z{q=;AM^^WgMETRtPouy9u@*D~C}l}P678Po0F!UegUrABxB1GDGH(#u zvIx<|Z%9zkvZRA&w%70NN~)hI-UDXUM<7jtZ{URCUAq$ky!jJT9UJ#9GFC!74w@PcZKD)O7h`H2-0J;5KlN5Lj?ae{TO#c(+Y9y;DO8TUn2~nD%ac(l3gyZujw7oxk)aZ#0%Z z4xdH_U8dfly>m%8WW`thjd@MIfGQGiUE)_{1e`HNsFY7Bpo_4K)xNZqCSMl6h46~# zcjLp?!e2LM35eq-g*9-N_5b<`td`X$t_uk}#O|gOX;oeRkTaODnCT9@BXyc22LN7M z?eQ41`YN;m?rZf^M8Fj(yA3>`fm{wWxFCY>()CNH<5B@lG52zR1@QO379!%-;6Ity zj_3eTp=$5?NB@AW`S!Xyt!3_@27XzcbgL7&IyXGK@|^PIsd-Mju^;8lb!z$Le=x5! zbD!S}G%LU?PiX4ysFs(7bxi8&{TudY)B)a$9_=&}ek;6IeU@AFetNVRHv=x%(T*{a zA}dH%j3D``h!6J+HkF{>UXS7YJgt;KaA6 ztO1d%fgN2*8ekWoHGdR;z}INqGiAX159U=BWFKfAL>@@?*-_q*GLlwoIHXC0KoeHW(-eYH?Z|^4X@gwC!gi0QmO*7B7cum93~clg3im6awy5umukpdftcZZ zFe_k17q41qt9^Fp0&qVB(TnEe%_%+jt;apfmirui+~>fL$TSM~kd5&Ux>skVH<&l|tEU02SBhizG`Ji&sj;1CmVD`@<(l?ih1PQBj#Y@(=E z8@o|OYh6d?G!Kqqx*<+0Wvcpym`apw=`XEhshHx zx3!v*0qd1r##1k0i4QgoLzNZQ6B1wfzkIJd^woYne)83)63hl6J>0P&t3348zpB6H z4^w(Jsi~bJ!+>g;BpjM=HJ!;>7bEa&T`#0u+iP&qv!B@@#mt|2x%?F4gnM?f!x_$h zfOIX8KW4J+w9Hjww0?TLZ|SB|*Mc>f*yMZHBbayRtY(+J(1kM_h-lN|vo4%9VJFWj znqG>0lc#3=BKg+gXO%UUE+JmJJal?&b$3=c}6{?L&p1{-+m(gDx0U-{hn{MbbEju6kV7{_H!QJk=COv*6 zSU0?>H`mW#igwp&5%!*H-FX|yIFU_F`$IKMa@rkvV`qm%qvQu0F4Or|IjdpI z%ov+-FSIuFNwpQmkov|%xELRW$(Me23T3xq7y|iJxDiLoB)<7kzz>Rh;OVS%%;|8z z^#by>f(9jRuuW`u>4;>v;tWrFxWoL2-c;ftD6Vu14+4sc#+ z2HS=6B~@UQi zfOz_JHgNW&FT`CpqG(-&d(d82Z#QNa)H+%I2fkb;sg@}sR?O2Wsl%i_(NZhD7F3H= z&NPs6bW-82!mgNesQ8hKZLw)`IGJ1v{T=&|KLiI}{u|!;YvPvy`<{Ar51}%tC?fgE zy@QP9UB5vCt620YDOYfuuA~}>Ks3=`I3b&93g`jr?mI^z-QumOnRux9NZF_pDbDc{ zD)MELk*gA4M6&YXM^y{yZY^%{o;lc)#*7}SV}cCAV7A6)0b{u+tK@E=2HSyG1^fLutZ;n-JSdigw=XF@U3*fxAX5;m_h5qThW+7q(L!$|Qt3u6RCbaFYmw7D3 zL`4XvnrzlwrnXCHkWr9+DNG=5eX^Lms?78)a5uczB9*e2wU_%$>i*+hnz=f765p|# zn(?{03EiZ<@$1g#*v6lDbuiNbuBh#w5>rJmX7+Bi6|d2~7~abH?*%3;gbLk{pdy#L zYwnl~x`Jz%Y>_KQG+A`uO-|_>654y^Va&V9n*wExav0?;QPYCH1Ekl|SVVmkeq?_1 z4z%%Y=`eLB+WjylCi-ZUgV54Gh`!&>>(X@TAI@vok-AFq5kZx;cn%pbU0M9IrXm8TBE_32&`tI3P7cZ*%W-+wJ9q&9@q8cJndnbj7w4w zwgt|Mk{=|n=sP{7yP#W;Y!L`?ouTT-MSQ33!gx)aG24{IS)ze2Lj-|)x8gMsYAD8h zBn}`13Iz&+^s@Q=x86ixEFQ#7KV=_rnp1U$)=Bsl2XKZek9?efn-dZzaRx1RnNGuj zr{{hI{E&TS)*)?RVpx+(dE_+LkiRJ=G>g@-%y+Cj`sAHmMhqcF!$tdwpD!@@PAH40 zAgyvJeCm%gRBq~Nji*I27vV4sCYr+{8Q_6BeFmPZ8A)gWQ_PIRY{&8WL4Pn{s^`<} zr!KB!e}*3-!mAM{_JdGJxOI@;KI8?9IHpBb*eAp=q!dB3LIwMCyQs`GcbLr-_5Qk< zEfJ`~__mU3{uK5U*|B4$JbKw}S+w71p7`(Y$&}%EDCY{GwQ#(%<;-;aS~_w*1H;8K zIKBbXut<9a*M(ig7Z)(sQ{7oOy8B5kdJpa~YQ`8P8EnB}K&=&!a>dmbSqGY>Tp%%X z-L*D??tei%X42cbWf2a_ZJy6I$|& zQKJ@J%@S!CNWQ6}Tip*SxxO@HV_L5%v7k2G%!u&j12tAMRn@uIItAgqhcU4k1uyGE z&;HKa*~qNr<}6hu8cDG&!g}r!a{bEi5A}mxXR-M`!skb`MHUENMd!NdQa?81^ zB=<5`mf3%XK~OAYyE{_SHY4lB;IpUh-3{yMLfOSzuKJA`KWON8wisC^rrj#$L z3-)-z{#JqAx+5*n7dTXRvV9XVbTV3#^yNgPHhh2)TPzq>V)sG1>2EoaEKr`BUcrP$ z`9azicQeGLE;HHe>p_gK0E;`Q)yIo&6V4PpJt6`FQGRYbb;u0%&f< zNDy`j*3m9QlAIrFnW^iLsOIeX^P8nuRk;?5!_m3TP{z29_KWuT+Bzy2#l`MB?U3wX zwTmBp|3SGvC$W(4M{e_yWo0IIc@W_8ZzEe_bHnFl1!-{}3d+Oa2G*p@=yD8UST|e~ zA|VGh+T9L%#^jSTsA?cT^337`_f#{-LVc0Bw7_ZCxHxuAlDONfCkGC{$HSRSTpioI;uiFNW>`EP?pRi`VKMIAcQXR#gG;VS%uhECH88H zK{#WW;#`LKsLSW$$*k;(oe8hRje#@B8@x-6OP^aR&~)WYEvRY}L^VnY#KUbNEZ}E# z;Y_TC>LAT6lEp0z&M+7Y68mi;eamgx#j7ue*7heshG$q7WS{CaW~e_`R_SDrf3Y5B zUo*`ze;=-yzHWD`9<%bBbt=yv@k|~$e^zZ)_Z&NOa=4rIWcO;cC>!-(2$V!bhQjz| z_{)D4$sHJ3F*HLh=)5*fbR1FbRGcBwmC^M>|54N2QZA&=IrllV0)$SltWX85I23Yz zhWvXCaR79eyHd$^N5!nu*bhou?l?XA0=?o&_vJJ0>nr&X5sC4AXzmnA2Pq3t)6jU)1jGKa3=wB0vEP^5549VL-Ss#wp4qs(V)oDx zwVoU}DS-K`QUoJn__}Ac>;0zoldF*JvQg)h_xo*~Bvu)1EQWSkVpI?Mp4kzYN~5!K z^^h{CZYsA~E?usV$;Yp&s2SpAv5-SlLwLzNOLtDQge6-D`d)Jg&sOCP8+zY{lBKKw za&pKv`h*3j63dA$y(C?!co(^^#SwHfw46)Nx}XfT?X$t0X>vD)P4f^9g*f3-}-a6o-_AfEiOs7>5|D88Mkh zESWI-HZs6p$%nnWcnW2qVnxgLsmV!;ga8=riR$vSPn$bC2xI4>R=p^v-I zs>q!nFI}W-*=T-a}i$zn0f>^$^H6d0zWug7^>$B~D(UrTKx@HDt92QA^ts`U<<#@+I3s5=Wc)pN%_r{|Bkh1BqW z?0;>=)zQ;oaaobm1+fZ>OQAr8(KZIPge|HTiqYgH1wY6{lGsX{v?D^NzT|>-c|n^5 z8KQ8KXZ2ugoQzLDL6J05FjK&lnTC|Kh|(nX^Hhp)3aab*>QmH9E>`2>EV3B2m`JW2 zn;pWPkcdP5+aG-1Cxp8vL>45f?Uw%u00>WV7L%vwW<^^qh5rcC?rVi`XGnP zPGFV_-*aE$`maZ(tc-0I=N*JLi2!z3ELrU2?1)eORy)oPTJ$tj`0TcpR%HgXod&e* zb*_t5@y6K`4$=L2oF5|Nb~T~(0_Bh2(IaBP227fAHbrnD}Wr>*_Cc2jkm z8q?sSYmeFsHsx8P_{Ehcb5Y4wO5cHVM$+hVm5{W?*B&+EO&I9abXehkDc;kqqTdG= zn*c)yg)I5WIO#idGsreMkQ2(+rX`WSS?^w)NW6X+3Z652`O1Vef>0@^78$et;zVsu zb2VIPEACHF{CQUNUzvV1(|P8s zD+RJNe~R(Jl$b;bxZb0|-U)Qpva?LQ1wU z5xn<3g<554T!O@6Z#=#H!l=Wp+ShNjZLxye4F@L~CH8AY&XjY56XkIGnW5#@HHh1Y(#SHk88VMmn&s_gO)>3^+7fV`^pL${B+ z=l@>z>N|afn%(=h$Y3qIgNz-BdY>m=Bvmd}wdnN9-ToXq3K(tdE@&jyWj z?g`Og(EHLWw=uypQ!+kqg>m8uCQCaKc)+8#dkFdTMnOE!{*2{*X00nKBL-d&QWFln z0qx4HQCEpRa=)mcO0G8srK9Kgw$TK=l=&x`r3yf^>i$BrX6+d!aY4s`|3I@Qsx7tY zPQ0*BwS|c;^JaZNB0yGNwwDU(9!9yH&W^CF?__DZW8yqxKM158dTHR}H0oOZgl~!b zC`rS^Q;cp_gtvFz*Tyk2zHSgeGOpZ5~I9gyuk?Qv~2a2D|TR-G&+JwhX_d!!>g;7-?ad@78Djay}b z)oJO{8d&M<+TNETS0*$cxJDgdUz8iCH;+BjRLgu>zWMrC^IGwk_1bZRH({~u^we~t zGC>G=!NA(M2B+~S4wk~O(fevLGXtMACeO^0=syQ{G~QU2U=j``p~dKqHNyeqPSdr@ z3u6^UUTa&eh~I>Pn0QH?c##vn?daX6k2!$8Uj_BKa!yUjY<1DgTu!qm8hG$!0=X)~_saLX^_L{peInU;i`fjKQZGOiuzhEHXak~x(77a2M47D+ZS$umOb{g!}x%Q|@5vfD2`B#ys zAF#1-qjp+F7z0K2G+)l6Po1lT8_~zXSHh}W; zFDGpFFt)mGL#Mr+_6u_}7cLgS3F{fJ5g_BJC@D*qT6mD>d;zURgD)SEBZlmCOq1!; zKLvv*%ZIm9tbG}VRWF@2*DcXaBU(OfrY}l(1Z@!`@-}8xCx09%Uy#xK?gQCGH# zm+q-SP}?p&l!}(pt@y1(cT2{{r9s|+m^w5M_<>t^wKY`OzT5SNH-Z1#fZn*shed%};7H+#_$y`21>=z13X z@nsRyvbgihL3xNk#zby(1jwWZnsR}G)8MzA8kk4c>>>2@gh6!k`qamgl}!)AAdt1O za_97~Vjrp+ZGs&a5eyk~Y>ZSr(47?&n%n30dy~2L_!QG(*nnJza*!VLOas|}DAC3r zU&_U}B$#|Tpj)ENFFBrxJJk91!Rzfm1v>RU05m@srm$tT(xTZyig-Q3S*RKH;HVLb z@$6!fSEYH2McBS$Gd2(X zj2jYZ>DqP9y(3Fn5jYTBAcKd3MGK94Z7Dm|?`R)@|C5RCvksyLFwu^YPTCjd50E-z zI(uh-GSQ|uv`nHwDYovM;4;jLOZ@$8LDrB=BQ^cj-&9Urw+X0e%Z6r-jk4Npd7LJR z@Zzc6r-;(#?5j)Ce9NPj?~UW-twmrG)8uNK5b>SqRDMqPoAZ`S%PE%zmX9~Mw=^52 z1uYVMy$7!9!1o5n=hfhx50zy7DyZ9hrzg}`?@on>+ zr`L0}ncYYt@nfe>qY_wsiCQ;Bv*`>jOJTM6!BSK~^e zowH#rlW#}2))K)lohZ?|FrkmyLfAur4y1|P3^EiZw!jPeG2EULU-;&Xc? zPz~yE_u-lJ>rsw5gwcV7Ph~SZ2f(cptQ3e$FkxLNtnCX}lO{eFRwMzTZ*d-m(nFx- zV)9NzdY0OjJhsTqamMju=m|r{$h7J_4Y6YN%gkkq3SWmnXx?tEL?@uJ5rANOb>P?QdSK^TTppohBvud>WS>M;6swrggxR-t*jCp>G~Gu>;LW7AP6IGg5Z00wuZ z3jqz4v>r-8oj#p_V(@c>-fB9xSl}#GxEUV2#V7KJa|oT=6g*h86ta+&+Q*Xa3aJm` za{{U#!Gqz!jL|QJraCAC{(Y2ZFon6dAKh|P1M$-_#sSF0C z)e|ggToGkbhxzu;(YI4!YH{7V}tZ_`8`YWZi z&Q?<^Y$%dOCHHo)ay^CdWXa9uSr0SFR9^l|(ieqH1K&ySeMOO#d5iZHSPHenA8E5r zTab@^a?XdYXG%0Ecwo`W65mCz)0kMj$eftB)y-$QJ$b& zP+(EK`b|8O9$0ds-3Rqv83t7nYvSmluOqV?bY0_NJc?K4Mr*5|{T#x~=9}T}t?s%C zJ7U?e@)|Cll8DvPbV`&XzW6`MslcuC{Y=uVMi%qAtA6whCcZ)C@GdG!m+dI5> zv=6I+a+#s`?o0a&(5JXO?Ti7B z%kAs_6Nt{<{tZMY&1Yh0S?TiU7BxByZHdx_umTU@?Nq1&glGZINv+_Ey+K7>pvxOz z1ObA}8$>(sn1XKO9EER;Z&go!0b9(~;pErT(*SJsDgB0FlodmAfSJ$N6mwM#`Z49?1 zN{+Ln0Q5YFHq=4%D>c&}TKk}T5rZnePDVVS2h}YLfocu|Km)4A>Qn~3HP7O|pCFe&P)T3l*hT;Y7ArBc%0$Hj@kc#Il5i-xCjN^X{6&KJ+f zpz4;Prth?#q-%6NU*3y)nDDhsXD)d%Vs}7cGx*+SqH@7N4h#LR`p#i<|6bPlhYyY0 zDg>(82*n9Sw-ol#)CTJmoACT65N+~LAo_^jV(~8^+T#8<5KWoXNH9+O^1X|HLRIsE zJH^`*771>XSGnGa{#TTM`1Ppfjau8h%UNU>9EIGmue<(O@Kv{*p1hJBS+Bnpx)v(xl zM&Z#SrN)i?g7msutuf3Jy4NDzP>#hh< z8|RV_TZhi^Rfe+T-TKL55hH%>3i-#rNruCOeE4LWnN2f3M2!+{yAPYcg2cUSOeP18xPjkml z2#pQnuy2_5r$eJGc*nB+FG@o_+2%O~Kl1v)V^h5SDbOh?dI>{< z!G0w;onK`+1$Z&f=v|~@DV`@?d@jV0r%|$Gq{-5v{=QsVK<#a}&uW$SHa+?@?ep?I zm8S6f{RxYnGWi1$A7{8bQ>H@~Nruep-fJm5e%2_OsE2i^(l!q;=Q4W8%wEk-x{1P% z2?9vd&xO?7R})y&6KD7w6ZKlDoXTBP0xCwGB1K@NTH|A|rl#iLyq_b;IA%a+cN$uC zOSZ=qQ5y%0lR;DCN6I%dP(|Vc{kNU9E~kKaA<-{AR_%!)kY-fHbq-DQdWutJnSrt6 z4tLk61<1#TkMkNEaUcw%c)g$oDmYT+Yy#=KxmOEWl%ua)7)E0zlpt^=UP&`j@9(Fv z`hMaAZnu>qNmEWWx+Il&2kf0%u;8UNylpW`TWl{OP z^L=jPCaAZ)dpCJwwL|3*^tJ1kob^o;ON^j6WXtM1?kng^7Hon3dvgz!9d5E#o+e5r zQakaoadW@1HmWWeU2WZ>eUg1c>hlw=aLz`Tu0eGF_T4T#E~g<7)3(~ysxGY|}|gTNq@b%p5E|wA>7|@xARAh&;)|ff%=XS$=SDM@9JxBIgf; zw4GHAyo2b??T-?!TL!P~$GnhI{?{+CcHNs>&PnNfnojR(2pi6is{R8d(;*3*d7^dU z;^9$-YP8`ft=lz9kAXlNx1tR-i}xBj6`ECFZbs)1*vb5a7<4&ax0lnh`C?yT^qjS9 zPs@;IzSL8NfnEgjjV1G`PhTv}gPqBV1X9^=prQO?z;oOe0>HoD*;M8?4Fo8uWPHg- z=N9&yxY>>l{96ROO$m?stH)f zx5R<=J-U8d=z_I*SKe#%pT@k^QSsn1&Tof^Z(UBb#0 zdKbfo&CcRvWwN?WI?4uu#ewcG@S8`)Y3^;aU4QISki*Oh{>GWTK!#sBY%$V&z~@$iza=MyN|E>}F+aO32JC!N$nSX{t*_sN`nk z?CM3RC@L-j5O&QxT&V~dL;w=4tAjH%rHPT1vjZU$JrgTEpcp}y($dw{k(+_xk7f&J zBS%Xs6Bl|1XA51be~bdGO4P{JjF3{4o0*ZBg^`nqiHU`kjg?WGk&yzhP(d@uIhg*> zUH(NW^>8*bhh`*X;e=-V=Yx=)jSW!$V@~*wHYN^6!av?6wEwe>g`Jt5mGj@*es=&A zApKiEzzY_3uK#FbW&7J$7B&`kfZY3UV_Dc3SvdYSmW7R-iS0kySlBrM1s=a2|J*({ zc4iJht;@f+aWQiJZR{T%{&U|rxc)ZwzxLx`<^<@ie_IDT%YSSOJ2NZOf9x+86U*Pn zvNHpY!oPpse_byp8|UAT)qm^9%E-dV{`d9%J}iIRHx_mPuKc&-0yw{1T>mkah3!A~ zje~`q^>62#g@c8O^Y6!$gXOpW{O`w#ll}KB`+FN;I{!x-2j}0`0oXTA=D!^)KpS9s z{QFo|Mpi}+K&9XB$3Nx+M`z0Y49ad Date: Sat, 9 Nov 2024 10:12:01 -0500 Subject: [PATCH 407/410] fix/ego_update_figs (#3108) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit • EGO CSW Model: update figures --- .../Using EMComposition/CSW/EGO CSW Model.py | 8 ++++++++ .../CSW/Figures/EGO CSW Model (PyTorch).pdf | Bin 0 -> 29672 bytes .../CSW/Figures/EGO CSW Model (basic).pdf | Bin 0 -> 27444 bytes .../EGO CSW Model (learning and store).pdf | Bin 0 -> 29718 bytes .../CSW/Figures/EGO CSW Model (learning).pdf | Bin 0 -> 28390 bytes .../EGO CSW Model - EM (with PNL learning).pdf | Bin 0 -> 40065 bytes ...position only.pdf => EGO CSW Model - EM.pdf} | Bin .../CSW/Figures/EGO CSW Model - PNL (basic).pdf | Bin 31880 -> 0 bytes .../Figures/EGO CSW Model - PNL (learning).pdf | Bin 33803 -> 0 bytes .../CSW/Figures/EGO CSW Model - PyTorch.pdf | Bin 33427 -> 0 bytes ...BIG).pdf => EMComposition (example BIG).pdf} | Bin .../Using EMComposition/CSW/ScriptControl.py | 13 ++++++++----- .../EGO/Using EMComposition/CSW/TestParams.py | 4 ++-- .../functions/nonstateful/transferfunctions.py | 6 +++++- 14 files changed, 23 insertions(+), 8 deletions(-) create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (PyTorch).pdf create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (basic).pdf create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (learning and store).pdf create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (learning).pdf create mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - EM (with PNL learning).pdf rename Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/{EMComposition only.pdf => EGO CSW Model - EM.pdf} (100%) delete mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PNL (basic).pdf delete mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PNL (learning).pdf delete mode 100644 Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PyTorch.pdf rename Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/{EGO CSW Model - PNL (learning BIG).pdf => EMComposition (example BIG).pdf} (100%) diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/EGO CSW Model.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/EGO CSW Model.py index 561f8a881a2..18d3ba419b0 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/EGO CSW Model.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/EGO CSW Model.py @@ -275,6 +275,14 @@ def construct_model(model_name:str=model_params['name'], device=device ) + # # TO GET SHOW_GRAPH FOR PNL LEARNING: + # inputs = {em.nodes['CONTEXT [QUERY]']: [[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]], + # em.nodes['PREVIOUS STATE [QUERY]']: [[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]], + # em.nodes['STATE [VALUE]']: [[[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]]} + # em.learn(inputs=inputs, execution_mode=ExecutionMode.Python) + # em.show_graph(show_learning=True) + + prediction_layer = ProcessingMechanism(name=prediction_layer_name, input_shapes=state_size) diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (PyTorch).pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (PyTorch).pdf new file mode 100644 index 0000000000000000000000000000000000000000..824af2aba8154bdb724c126d87287928fe0d6697 GIT binary patch literal 29672 zcmbrl1z04@vZ#yepo6=+ySux)ySux)yE}usJA*SYxVyvP?s8$Rz0Y3vopZnUzIVHt z?#jrl%&f@#sEGKZiRFYusOf1Kp@;|eulG+%Zt|uEhM*Yn=os5Ny4Q-8#@o1%tZA_ia@R*tDneljdpd6hXjPVm!6o%}je;>4@z+kSIUBxa$Cb>us7mSc@BrD_i)1KmZ+f9&+Qp zo;>zHE?%)-_&D#5)$C?{ygv)1j`6OenY?*Earu1MJLNlt*lRABZ?5qSkw@e5#`Zit zPQ%!|K9*dST>W|Ku+MpYyp7$A_D%JD-SCOJI9t^9dAjgeEP4KTJP*`v?Yfrh`USED*%Q`U63`}`(Kdtg!33MFWI%t*2eAvxs#b_Q4eSHZ| zk)1_3L2i-K-03=DtJZ*9ozm!MkiE&6SrBD(&P?5GZ{eZWEvw*+qT~*2c745h#Lpf0 z)BI9@mZ3|z-eRx+#Cf6okVAR;*1O^R__*F0OdciM^P86q%;TEej>=ngeX#m?t;W@P zYwp1~7iBnxeX_ba#noMP*gGas6P2~y80UM<*pGTN3|hl$eGQbBQg*A3Os%MsBP+8D z{o~X+R@0j7^_NHP+p-6`&f5})V(QV5rWmd>!xUxqR{NZARAEYX5tCdwvIJ z`eOh26Pdj@zLB?xBnt~k6n!1Xr2foqi8BoF$+|@>_z9bT0cFEsDJu(HlkPf6$AJ+t z=o#4-!M=kS3ftCHK+>9zY;Tq!{r*BNhre2Lp}s!($mMw<-4Cw*!t=0K6uJ%)J<*C- z=`4dKBOBQ>rHx?FbUtQ&0(1)1%=NXra~~ir=_pzK6Lg(VDnrKVO@f#kK(k#JscPLKRemUotO)33`yXh8-+OuI zDSgQeSh?O}wb_?CdRWt$Iu`jR=zh~UO>thfh^C3e8}^#056$ZbaH$c-FC&I>RQ67` z`%?uxxPzhxyVEDqR;C4^DUkLaj_-8G0O<4mBL0n*>ZcF>GfUEPhe>e(GOQmYGvIDL zL_BrsS{aOPfZGVm#WMq|R5DlA@fKL%RINXx_D1&_htgEw6>9A+r2}Fu!C%mJDHHiP z1cWk*@tRamsuRxElq{S@#$2$<+4?)Ru{;csrsz=J$}#*h7&O@G04Tw#;QXm2Ls(-D zZNLaMmEtc`J7@wphWp?w38WT#3c8u11%SIyipA)3?!s@dO*pylZg8^3KC@7$TKC_; zTk#?cv@l;c{`yiUY6#hL||LK#g@Q#lr$%c6W;v^f1t>O@nQqlGu%F znopr)LZ;8r$6fiuY&+0dtQV2u>@Ge4+ao=?rr7 z-)9PMV5PYEY=% z@jQP*e~{epVlP|QPJlgLwB?c-!gAqdd9Ud%tv#q+h7;2Uk7onxW?7$Y3JNj&Q9rhm_+x%gEAJ(NO?TqK2^w^HspFi7VC8JDH{NVXy6i&SqdS)naP*V#se#lBYAom}oAXlFAoZw2&mXTMcN_-TGI{;>;6m5aaAs?D6aVH* zQtHjRGOL5>P4|uMc6&Auu*%R_l>zJ)M@|~eF4Wo}M$Xz@rJdQ35BJ;>*NRa9uFbae zVZ9#!Ku}+z7ve`IeKB)pR|rV;4F`yDEcX>ao4EzRg#ivo!!@y&Ls~dVeS8<`D!OG~ zP{R^m(XY$Xy47(Dx&~TV>_}E!RAWL~)#zpk6DA+$B(Yt#SX%cnn~Z+%an3c97v)1vzZ=)Ytzc84Qf1aa#8PEauBW|fIm`< zne8{g;Gvpy2$8E%J~`|yrOHbX1oVXXI8=aPeFxOykK`ig_SulsmNo=pf^tW$gn1sr zHOp6xKn+UboJ!q=3|A;6=AM{r=fFAW&^=zaZHbBBj(%aG@`aW%*BkPMMjdv; zI^7pMWMKhayG^5Y9wA}W9*Bh$sf`PTkr8RNK`=^Ag<}1b2aXg^a>&tc6Y}|XxY-gP z$Y4?&S|$lp#T0*OKzDM9d}+m4@$>h+60^0uU}O9Oqs2T)baPva7S@v!s$80!RxBdp zSVI0Kh3rCBQisi$CQBVrSeX6q6$3;hG?LDtokEfhj288|WZs}TgD#J9Q`PR3kSfB> z7J7)1tt}WPlFRp}E_1b5?0q+XOpVuDTnZfAbE@%4fEQ}(t}z(gRY^}IlC49-jaQJ| zaO}QVab)}`X;yt>5R#+7X9P&Uuz00!k% zX21laY0?hmWo?MTg38EIW#P^xfWr03q~|BjBmge5G+9$wC#rByrDd@dZ7^+q7Bm=^ zgRQj^=Cnt*XP@XFL(h3T`=wWXzgr>1FAAfSqF_6;k@tF}Dn`SiDC6f60Y3sspg~v* zrk>diueuO&8@ZI?*^pePB6cxtFG-unsO#R3TbXwVh_@?E5Wn-nVQB+WPe_=wO#34Q-UgFH zAL5uvx9Khx*KMDBwWgHM6c0wI0evWYTP2j z15KW=2}i6kbkD zWukoi*@c9=Ycuccr9W+BdZ|C2%75rY=HQ5X!dszic7ni$QTl}tP@U|s=~f_EbH;FE z0Pn2#O#I}T_($_w8}1(A%wcE8W6mST zih%GWix|1QiOO9^6dx}G`2k`L z5PJd5q~Ix=MyVK5A<8WRn}xE#u~RwQFW4x|2TmT&09=`fFJN^=m?9a5rtuHe)$6$LsYnY0Yl;+ogL4{N zRKh?ec(=f`;O-3a`nfSj=*X;$DNC%En+b-{{JaMEyI5X@-2-#Q^Lh=ABE&Dd(3GEb zP2%b*p}fnXiRPDvuSe6(E?1Uvvqk8js$M?A&tO9~#Hw5Qg*wH>WYAP3-X>g%>HuK5 zg?y`17QT8qIe=)=rC_WDN8W83&NX7|jHoP%cgr0>P5}%8VIUoU7Z5lxfda^=e6~}v zGK3#YX##0Ow1_{pwbGpW>=!eFdl5iqP9#BydF_}0G>|95obXh@Tm_jSOR39|vX?
    7ZwFOD_rT0j2H`fIHv~{GH0ksxaFkKouwm_SHd$A$JlPnGYm?uULvU6%^4wNuzWCD_MjF zaszsRKF~_ztX7(XiMF~a#9E=tUif!&x`do)I%N!84#nfo?zmN?zMcLAq#$0s<;^7` z7(ZaEF?jaF>~)hwF&4`~D=hCE3lWx}mnl8reWmZ@P!^fD97n8LxS)j%(sV9-`Wbp? z%A;#}y2z|}V|bbcF#Q65$bT_-*;m4N7a|3=vlv3_q8_suLKEM=s8}7_EEFIHq+Ih@ z7_LrrwA#TaN7&&prRf;x9$EWlC)TC@TbSKuK0D*v_M8!PQb1 zM^zGijEskh@(5Ovs!BlXmAe)`4F+W|%_{F@aI4xX=!-*A+zlvX9a-EMJZWXDA)*X* ziX)lLZVDh+FnE8C0lXg+pNNUp0?bRtCPfUGC)kq#QhZ^c^^)$>ne?iu zMP$p*Ft4h)4ok2u*mm4d{Km$Gj5zwN4i;K_ObJ+@(g`B&dkL3W+efne(ZQ6!SJIad zv*N;Hxi3XTEEdpbA|Y=OKwh4)OvNBq3mI0~ea5#2+19JW9Sn=foOvfK0}`ysIJ>8! zlarK- z9+QiQcPf6AsB~D@9xNC1HKIw|JKfcFzX^=(dYZWgcgvAt5Y;^S3!RTd!{hE@<>$1b zjK3*xc!$Lu$M<9ADselyBvo>_@yY<^lZkM?L;cBBByYj#w7mKW;y5u=wSVr zI;$sMjXA*r>*wPHwy)*8|3a!p?n3_`ETPl+1BiH~kz8faR+^bY9M@6RnQ@s+m>F;PPW2gQF{&n%clmjo(wk7xBHVms<-uuX71($J!hF}V@3I2*3{i!9R2f~DKM8gVDDy0FI@85y?6pAA-m}2 zyRL1!c{tvmGglX2;EcLMFRz7z0AAu3q9O>^` zu*94gW9d@SyLHo5jiY_alMxe0?zCt{s$*x0!1hB#r2nM@dpbUxx#gt(@e8wO)#dD8!EfZK$%9IVt9NY=%xBrkL4U}4Hb=@ z@H9V16B5CrRWf#S!qdj16|}Xobx^d^H#ElkiZKg1(&I7z)jtox<%NXz*P|>UFRGYtHZtCZ)i;+9TNam1FT5Aq0pwl-K>D22 z0R5Qq)sW6lerh3p9~*c8LW2i{RRiqZHZcJaeG~$kd+p**PcIc4YPdhA@iXdJrRtF* zaa{QDjVE9gBmx6~5}s$_@*zRp2TD?G7({3I0Qq&BOwJHO&w!E-Sx`x^J5XbFUvfa% zF`AZ2vX_scK7roJloQnvXgDvns5!Gxdc7v!|ZrO&lM^H|`OS7v(FG z)5#qEs2uf8tJ6x_E);-!B8^P!hVEo#fJSRDzWwb=*$X^?Yfym3jjS7-5G$R8ft_!p z(2=L7R*rzVmNb+{tS`9%w!eBp*dNlz&R&=R&FoqRq&Cr{818QP`Ft~Fqgkt9E}lFknjAqFB%IeIJ!@^C6bx5 zE`WD1yIiX{_=qhxfFL0-kqxg0koSouJU!G%cc)oZe2rr)gU9>fGcL4Ngw3`TKY`-G z{kK>n2jT)M^uo;`tDiuKao|pQP)z$H6D?Jx!66qEQ5v=L^bl-x4}uv_ohIKIef?;W2@| z0eS}^qyf$9!z=+b`z_aUVnK-Zjcp0JA!mW3_O5PAw4(69w)CNHV_pJK1?%=DLhy}1 z08fjkL11193B;)p0V3dA#uF3)l?i9W!4_e*MT^DT?*W5_=If`Sp$wt#hN?$x8>r|n z7)a2~rx~WHNTD9En7}joKLr#U%-8);Bdz3hfn-6*jHVt?(Tmm7tWB*0UeZ71#DX2} zMc=`)^JzuXVz6PZ25Es?jOy?^>4(`SxpM2I*g(1nfcxoqY2bz54dsjFi?bPvJJdrO z2dM#K832c$f=6UUqy&!vaR~7igwV$%?_VnGL=Xb+KA>elUKg{(-zknoB9n+D9zukw zKt!J2=&vMhL7+{-P11#*ILcup>VT%srz2ZUl#8cJ441$wmqbQ_BrAa>EU+xYF44y4 z&gU*ztN>GLGpAOb)}E~)+a=^J;Y}-=$e4PQx;t8xSP^fTe3I%(ESt2L>YA!clS`_L z?n+41;ikz#Zm6dE(9l|AZDq*)qOA zwKK%U_!t7S#qKk$jna)AE5<8y*rKdOFSCh%25)hXx^5wjiH$Y)$@a1KG4>mNDS-9S z={+(kpch9aMs-BR-Y^}sP3}&vvecOCd~}j0KT+6FcqJ1gnrWqt&gEzsTp2&#lTW>JjbH^!?R4g}0&?$)EYN zG(RH0M?XiuaNCfuJicO@qEMrdbBN8b**@ZmXIGP?HauHAK|Fd?T5)M^LrJfFvT^d8 z<$)OqQw4L!a6#>wx_YI^LLPfR*N9i4d*%}*GC6|OqDI;fg; z<8keC|H?pyrM98>aEQgOVZ+3a^@FCWGU`p2o-=RG4DH%=ln$AW$$Qv)f)|n(dT{Pg zx6m%whb1Q$vW_6{d4!Xr?beB_s9kVUPtv+s@siDK^$GF`n2AE@Z>X23bJS=K)Ark~ zSPtiDE(6vB>r33rZ>=wsaFTHGqLIT%C5eX^lh8V0 zsWqzcsx>VM4+a+&7bQF#JmEaiJm0%rx+}bCANAjW-dvv7Zx$~PpBGOc0{A0VyYxLb#GO6 z^Ko+pp*9c}5*+9diyfI65si#SkwkG785JEAo)?uC_7qMPmP}QnQg7H3iPDI|`@xNZ zi;3(>psY0Vn^sp!r>6*Vf2;uqw#AzNNP}h(6mtMfOfc{c(pjP_%4tvf76z8z3#RD zURhFHQXx5G*>u@&^PtAZ##sSk{fJshZANa=F4}&kLOQ|rU*_}Y%HIq4rsJi^9Mw%a z(qi7F?!=;qGKmA#p&KmfH%vv1bAQQ?BOKiBW$f9EVUOO7zW6YkFuOXNcDuZXy#8c9 zw}h<U(;R>&OFvAwvnN)5m9EPG{mb+KpD+IJQ(8h8+#2L}nO(|6=S&a#T%dQg9DyRv`sq~N%{$JAHX zSLjUaDt=NlShRS!FV=aUb8eBflr@Q^FFIkUZr6U;_HFA<(DQr6A34Utwnq>)eDi&)G(r93zP_|0ZxPheB?W`20xM%|}p zH2gA^oQ$79&dOxZb@jOx7z)0M3`Y&+P<7&IZ`_;7uCA@#81ql9(Yb1KC|m7%c~wrW zFs)|S=Jd|??m7#3i(bI#$Vt&X^)dg`bYb1~$FAWmWw$D(8`S&oS^I@{3TVAnix;Q;lpt=w#U>rAVX5v1pa(WYkmCx<}pZ;N@6U>g;ZC5BYoZL-KS* zu8-?Wm#M+%z@_E%Njk5@Mrk+u^TNILq0!OatgHFOkCKRuif-O}|F`@X!`tEbKc}*f zvdOv8yslo5uh(uQ)+Ps=4?#CC|J~UBGG@PQ;7_|(Tu@Lz-_h6z@5@$Iz|;P>nfzsN z|JzLdk3H`Af3&c_jArLg!MG&M8w>|(Mixu-vN*D(<+zJ z|5t&Y{__r1&5fMQzV4BMnGWi!um9VFh2_(GfMWUVn+}ha;Y;`bKG@j4wDunvGxL{H zz7BS#FIVP&JfN8A=S^jiK$P zeRlq@BqA0@`cKF0e@Zj@r&rd+4(+C>obl22K*Ab(u*`D7ByO8X^1I;zY=*^p77D3| z54uXt%AZ1*QnrE&kE4(;K6sEHO&9_Ds;j7251tLI?U46lxAXlb_v7`@Lq0A^a9JNt&;jmcLqE^WSJDQAcn z1%Tx6;Hl-d=qd`r&upH$g}RMB+ASz6A%Mmp>nSCI@? zX_?weziBqs_k6x+b0z>!8=;iLT}IDF!vpE^5#fS(h{xIFSQA-O=au-Yg`q1AZ8OW#&&4e%1Cou4;`3_1Iq{eK34+w2w z%RdkRWKu{3EQIKt>?KSRH`<}p90;A%yeLMP4#4Y$tZL~{g=j6p zbUmWjMz}_E+tu*Q2QBo}H276j{)KJObr;DtTG0xp#7!@2=iELbbC?`!7m1NNf-ADm zN}g!sdc7Q`{Ei~dz}~LQpx>iNdrcj!ah_;I*A;oSvztLPpc2RI9{+KW?JBY4ClUXwwd zdq|atM1<%HRX|#h?JTd{9bG*%nAgRM1rrf2&_MVe7a7VD77}#TUJTzAUjy`@a3I@M z5-I;Qq)7iDke{eIAa^@{XNh<`CjvnD+CYku&$^} z7xo89=g`ws8-KuSS&yKexMN;1!GD@A{e^Owy!r#=VTA+~b%o@ICkEzs4Xp28%4rL_ zg%KDPRU~Fj$|_x!nArX4-=X3a3TUh z%dZ?-=eL8qVwV#V$TuezJN9Q&uQ;h~wZa0Vd?^=y!kyRf4>0)i^}`8x(k zWqp-ZO+BS(pwE$!ATF2S&9uLBvP@)AmS%nRz-LZLXHQAv-yA94bEn!ZKI$|$OL$#2 zQ=Vz6y8|mO`dSFPf4IircN_dz-|*cua0)6Mx zx6#*7;#lXiq_DZR(nGw0jG2a$tuwhDaPDPy7VwOF*Ys?m3%&3zrEB>#gmUT4O*Z!; zu6xJz!rxuv@z)!?5Dv=ws-fm0N1@}$G-uFtjoxs&03A=`TT&@PqJDG*#elw;ZZK*?gqiqv0MR8>lc$IuKpd@w%nU4BASBjB}l=)CEtKVw+N`z9& z1FXpWAqKWk(tx?`IA0OnU$%(6_~HXAwF6Z@5bAyr&!3rmrwvSsT(tdGexI0+J?#{n#81eGr0=p-%? zPa&Z#;*M-V4Ha!ixNziO%$E4u4kE_j`J1P6BEyWpK_YTdELOh7AE^*z?c8=;QF4B7 zRfCMMUELr;Z8%K%(8-@;X&Zip@(-uIZ(+gGsK-eQ zD5ihy-S4H(QI|!R6_)jEpOQzhX0=}Lr%}|#TBLy}=E0a0R@)+KkLICpA%&T z$m*db5P=`6%UF!U?Acs1xhG-~ZptGS|FBvr3Dpp{9(3~K5Xml*ZcKKW4Zi*MjNn1} zhILbG)@uv#v>D2IRnl@-!3@%mx`D zWr-QWzw_J6a4e{QoXx>n#7Ru?sZPT#1VSJ$LF^-`xNn;%E zVN4aWkQbXH$e>^e>)+A2vK`=D)?7|qR;2Uect_2eZJMM~6W|tj!%k6L+~)Cwp6qlj zrEf(X865iP-i%+=z)9y^BJ8?jAzumB*9YG`0tG_Y~5hzet6uCfeTm{xME22QO(6+88ti z8@5ufhIKrAdAF!v2iNu}dscN5;pSx|hs;9u;33~Wk7duR-@f!jD`XgO?h5hF{yS<# zEG9b@VXVu7>qHEzv_(~HI(7_853S6xi9O?P@BEPRM*OiCx6|sxXw_27=3+FdC)R?| z_<>1{xRAaHxbAUh9kYyU^I#clH(WGp^`U|N+qy2g4_^=MJg*ZBqYOJHN5?b9&U6fO zsT0oGR^3D>yyc$kGTR-WrezkFU*Ik{eiS!%hW~)38dvaD1bCLP86%L-x%d?`UUkH?ghExZ}ugof8lmkdy^2(Tf z0IDjgV2bTx2nmH-D_7|l4gg}7b^TuT+L5F9RrS7GdwY9jDr&HczVhkjw#9CKH|OHt zDhWD*zz6Jvu%&>kFMvED*upZ&>1AgFhpnIv(GH&K?AF!kg#38GplRo@sZ@_Dq()@Y zy7n#Fp79mxH~xqV8LNr86~(CTrBA)Vfc^L|cbj9zK*0tC5AGIcLbeUl4Zyh)SfJ>Y zFhOXnwd9X@$P)0NZY`r@s~h)?-R>c~lm+t{cLU7Frrj}*;Ct6Kdet-Ysw=FBg^=7q zh{E>&GfAx~Tum-^mDP414=olFm#R;2Ut7DQ*FEVhU0xT4It}4{8|9R5&E@0GWx94$JJOXlO z2U1Shk8lS%4kb8<5lVuq8(PY0|8!9>)N>&*RZ_!dkA;L(!ZUc|f->apeyK2b8^2%q zE(yGDa>OEM6D~u`5Sc1UYyv(;A|}m*ieQl`Ib81KVJ(6eTi68Hz(hMa3Zvsq&JvQ5g-hDw}CvHc~+kpSCT=)^q8&K?_yiIjCnj`K^Zs=9|JS zDUNCFt`NwmD{|Q8kS>Lg&sg2Z%@w4Px`B`)9SKa<%=EXJ?%Sc8;Z! zMOE>tnB}6{<{Xol_&@vLZBuk?SJ2i0gNJr+JD6-X(11JXQ7(s$Uh3%Iv&d84@|LZb zMy;e#9OtRn+Yc*;4wW9~e;&6JgToQaCIrk*EWg*eJ42H#yBYF3oXn1XzwmB5fj~du z+Z}HrCAG%tAaG}R^180r<1hZzyPt64AzDY1N;byBxt!>_7_*t+k>Gd=Jx9H5t?UJh zd?xhUCD$2Vg(5kUTBL1bCq8r^CZ#Mzlj=f@zBzexpQmcsDqg%8$66+ad6-Htw@~jl zx&yfn`_8RXin?jt0lV;WLg}FXZx_>6W{wu}N$bScY#R*BS`8tR%;i0KOS#>SYe%c@ z#_WTR?iNKRsSU9C!^w)t!r&ZUU38`{V+Ot^9)`+TB~b!GAOe*$<2eMCqWf~p8r9zq9GqU}v6g#2B;zRkjE<=ozgy=mab|Dtp$|`L(FFW)7OVF`PyL@k* znFFOpZeh{G`Ec#j9>XFUaqY=u(3=WlnUczPACH^;;PEKR;ao}=qddW)e8jVBoN27M zK=h0HNg+vC%gs@bVWAIxPl}VIaGr9mBAy~n3q2W1*>c%q8O$_}a3VeVX(c||CbjW` zgBYoVaGA?w>1)&dq|W7WOud+7*4}%`Gr>Neee8TaJz+ftjs|V0)FJV2*A98k1r7wt z2I0zvkWxzQbn0oN3`F}vm4i{PegY(J0y8lZ7U0(Q2p0ftTAd38s|_d47|fI|mo{Mh-Ho!XN2lRo|_${_oN>guU;RvWmQHd*JM}FsXYOE(|eXiHUY36Yyng9Ws zHx6iJ;H}Fer;Q<5q%$sBn~X)9e4lYqA7?*M;R=l?EO%(&*n4YW-7oIiu3uOKQGbRi z6||7Ru7<$jobI+disJV!!0y@%G8PPh2AiD<&d=LI>(8jgK$smd4>Bp`F&zj%y-{1O$#+d=cDUar*g^67a=_hH>hE7{cOHTjWsD5ZP!BxNF# zn})5hq_p)WW_Y7w(vfHwcZH$C*+96}Vk67E%GLVCE^1N?%>1<>@&S2fj@^a+IRdH^ z-3DrV5l}9Xsu26*r~TMv1$bGUGE!6WhQTSzrg`UBCjv`CRe9C1=@xpucAel|y+?;* zhb27NsLs639p!|oR%mvpcALN$#uP(kJhQx2Sdc)lPx|8F1L}y;44#3%UuSDkV`Era zzG?xpXNJRvw$7qLiH47TM3<*I{x3063)R>tQ=?M^Xw8p^jwEG15$KTQtup-7TrE2p0%4q$ChMyMglseFx(v932Fa;>3`j~!hIV27oO#N@wRztt zWUvE~vN_bq{(yJ+td>bl{xyjjFe0;032?TMQp$Jy*d$F!sS=IzI_b7JE+bF2jai4l z(rICF=65R9iDV+>7j3B;A{x|qbBU&d0R222bZok^eh$!m2XUr_laLmlOfR#r;?MqY9$n0($pgS1SATTYFp4KMBw@(-)swBD#*21k%oFqM#8C~ng)3R}t- z8@(MHhW`D?xSq1$GAd-|7jAHa;(H}&B(PAxTIt%ks)kPq6MZ-{%fQ??4QdmaxfVx#jtwE_?jmN6c;~&IG-LhzEWj<9}1O zM}Jb3RKv#i!)Oh6b(Ql($R`HJP_j))8=IhI=4lVU z>KT$^P{HY)L$+4ufqioyWAkqCCR|T_siE}Qbtubw#CeM;r?|4QUjgITDGvwY0uMziPN#3qw1-KB{uLyUw6;+|TFOUxA*0d@x(KRR6>l zoUJFcq2DyN7;pB|#II%7NjcWIs4vu?QvQ;p5RTk45f43(J_x%9aa4I0O)gR(Nd5zZ z1|_F}PAw8D@%@=Y!>Vq)-lQILKsKD*K3c$rIj>G_ewbcGMs7|9(_76?vt$=`2D%@@BMHi_nVtt-3d2dSGSfI?|<%6 z30#PmX;@Kdu4idG>u%PJz1ul z^`=*EoLsDj$W4;g66$m+^v%Ft=1z=6n7a9+S;QDUFMaI<<6`&SaTTJ6Somvc?u4;i z^vSS!uU#=4v+A!2*2PX0r*tSM%au5yC?6V}j6`M(;zLTNJ~S+;;3Ki!u<=0YWN9%>aEa>i^sVSNxJk~wQQR*q}Ap8CYFLL@s)9AOwG+B85l33k} zLHJr4HsdehR;iBPI)(rCNC<4cbD4g9;>(#KdS? zLc@ALQp~jFwTbC10H?sv`H{S8?D!;7Fk)7bw1<29Cl;x$&b|7*PVh1S0QJ`oz! z7e@Q5{E2;7KXKSs`EM-tCH+EcY>bS5Nxy3Uy6Ru8{_Cp0dw}|aZ-2?Yz}>(4_%~qt zCzAW0WdBBZjGtrCL4CFO-*DW2C$C>C@*nm7t?qx|xPLChe?xJ2n$#>T%y`s{49s{8 zbYDDi>Y^*y))6iQ>NU5&sLt{iT(CuOYvg-)S9Z{ok895cfwH~TLX z$Heqc6vsgK$pQa^;_yDH+Sexe5A6L*@b9hoZxlz*M#uJv_x{D==oy%qK1=^^Fs`@* z(hEs=g;z(}TWWHhF>yS-kTg*W4;UwI243V=z%!0x@G5_B7*c}r>R3;MO~KR=0#-^zjH~VGx)ST>5 zaMX67WaG5FYBSfJ@#RRS8FUi&MO4DKI5Mc9YhwM3e@t#D7_l&IPXZ#>(Z zixawy^)TesU|z9i6*OkLmPX=&^fSd33ysFBino&q6LZ<{ioF=<;dNJ$2+2u_Z9!r8 zR+>7rOFHf+bCSBcN0m>hhze05aIZ~`QNUe_4?1^myTD^}brCS?jN4~V!E;PJ%+XQ` zqYtqq6!Ue%mJD}~$1%$jkir46UjA_+@M8Eo4ncn)pb=Q`t%T#k88saMXDZgsCWXmFy#Po$&= zL@X(;-!7Y{?V>se0xj7<8G;4x~D3y%p|MlP}zUZ1j0Cx{ka>Xdtc!ZTGi6v43- z#lOxKq2d(#+X?o7%soq9%kt_Up8Lbyk~ClE>JOl8&cAw*WrvL+SRkMfXa!$_23R>A zLOTV=L&M+D@Ypz2Vtwu;ryN7sU79{@6Mb&o--?FE=6yf7j;Zoyp=UrVGu*nioNQ8e zj`Q!=Hhwm8@u$azDYj5tO-cPM|6v(2;j1>0}ffAI)42dVD;9(1{ zM8A?Hlw4j@Bn;ySYfLAzqQ#*JCE+VY? zW2e$(&&hOq{C^d8j?tYxTc3|@bc~Me{9@a-ZL?$BwmR;lW81cE+fF9`JI|~;&wbXc zsa0>zKHsWSr>fR@adz#`K7Q|=yY6gl9|-o~+H7g723%G2W=Q{0cu(I5CY}yfhoBKM z3q8alh(^Pt&r;eDNqmJ}I6lhM8kfT2?k6Web+(h|p3%4>eSxi6|J3C}jwy z;NUqC@`#d3!PPhjhYKhcX309a9PQ5ArL{OmmySjnyaqr*@IqJpeWrQ}mE+esP z#7;uho-liBHm%(yr7`ap;zt60S=E}**GOW@Ay2RStI3D7E?rcWm4(H5zEiZT@b52S zE_pbVdL@{!!8K$V%HjGp=DELMQ@?c><)$aC$w)=pQ0i99$rU$k#XKFnH)0Hh&G+WS z+h&S4O0t&^2Ta)Gn^@T__I_<_Bau^MyT0}N3Bc$=1Hbd-tExF3{+Y5U{E?mO_uPYN= zKaOo&boTBY9^yq@Xg3lw%gg0n7vYDxR2J?DIv4u&R0q0PSKKejx5^I}5?`9i|5eG( z5~J;q0|zrR+$f9<$4JHrVgSE&!7F71&ckwVD^{`HZ1#6?#jc6sbj16aF zx=%cwgLY8e(HGIG9P^gbD{u8U=x|Sq)@oI#B6Q-zNg5w}A7R70d_;5(mttT=tmyZZ z;=_&F&-rxC6?5k?794CE7&QQY7&W+zeC`kup^GYAXen^G)J+=}p9jMHZ`}EQf!iF#Q%lg_DEhuKJECilzhlW&61yr2lf{ua z0=LBH#COKg5E6tf#aTK+aO74L;GG=a%baM**EV0t5sDBr?d7%{eYM0*K-+?(~1 zv>)_1Gkw5m9>jxb$X=3kIHP5c_s#~~&)-w`N7`uzG;IpB4%@Et?7QvTSSH345=avK z1ZIko?s0cbP_xL5!*1qOfo+%Qo|Q)tn*iVU9d_Dh#m?_C!w?6Q3C0xfB~EHtgs_ec zKgV(JxUM#fa{}s&;~jL_ucrH(V0Mbo?^DEAKkkpT>CaM5?Va#y)@gFzo=ej=hpxfT zo=#LR!FVK8QD|1dNPA$92HoW-&(v!SMv6;}xo)T6dXZHsn?(HNM+q_G%#)+IV|oHl z`#kII4>Sp#+|`6r$qVVP;C(}cpVazF3wckG_PK#Cn;;RMS?}qqer<%bzxPhCy2k}Q z5Oiedfyt_h+-CK#ynUm1!D%`qy7+T1Xr_+uD6P>lhl}@kBO7WKX1 z0+fc)M}tNL1RuN&;)+%)UMf(_#_(T#SE9rE4t-K(C%pTq0< zJtSVm8;7Y6&i3(Na;FWxKMc~$($I5cbGCD5lGLSl7PXPwz%blCGeW%!f*sY7>-|&% zKyWE)0#mRfiUw4mMYT5f9za<2!fCerQO5ZX}$Pmc0cysGqVx7F?%B3s>ny-ORK* z{drW|B+j>Z^RS=lT-F@5E=+YYHB7$0q>e6Bba%kSk=og<8kRjSqsjDs0jpw2cUt8tb*O|Q36J#|}G_pvvkHWEJhDr9AC4MQ+8heRq z0o#yD0L-~U9$akC6-IU!(`+n^PYS>%Bn*%@6>^;it~Xi>uWI`~3F_QA>i;~DFNOq7 zdVIk|vt6ERSCL+2&Up3KK1{g4vJi6*;g~zkBIyV5|v_dG@CI$ zjly&aUn1_LC5d~5DTR>`JH|9AG}L<}h8aN|A_PM?FA9C3I`e%*@hVg_27f;j$+le; z#+DuP_;3$V+w;Z9BjY7p$IU1AS!>znMeZHVM=Fw+?%@~rCwEx!0$GO$?4tb=+#JV} z_Q{fuZ*r9$JWf+%WfZTz6gN`J?6S};f)f5LxHX!VH99;>v`F)aQCx?#(j?tWTmBG) zGr=|d*;6`k+!`p8_Y7D|FogE06_}Uul!hstrO4M5QncZrMQu{=-8s*A>B2viAlQ+^ z8i|cH3600a8FOWsyV!3|yd`&;TBKx|gcviJp5fx6C13ELXX?mGkm0{!Sm|3WBuNiU z)(9Yo3T5Q34@%&a56M_j7ZG)kqR8YaGoW~rhozIv?h9r0GjnwX2-k;9E>4Q@oWu+u z%S1b08oVucPO3a?yJGd8NIcm>k>UUvfsDpZ)M*e!$!<-!EnJ;k_aP=ZT#PdK5t*Je z`9-Ci=0In)YehU_vZC$jw;}tx5^Nl^Duh3O)~}464Y<~de8RWjnYjJ7g&w^WS&e%C z@Wa?nN7h)jZbg0^&^8o}yLpEQQUpWXR2cFRS_GyO#V8d>1LMMFD&+c!&0-*1{`Ex% z0k0c$NQ;WNo+Aod+YilWzzA>|HW#oXwh|z_%X>SW)Jn~!!oz62lZO| z&UruSK-0d18!y**+6YsB>)Pa%268iHRc56#oyeJpc8y7T%5N0 zO1fVl^`aP|Wc08)=RTFIuDglr!Zke=cR|Y<@GE9z&}bxVYytU{$)u%4r9*eQqLYw^ z`l(Dz`43S;H;sj2s#fBuT4U9@9>;irOnoE*@U9+;NajZVFj*peHOL1}--U$lA1)gJ zAk>^p3!yxX@VZu8m(09?7E&%9mPCRw4!k9_a&|(v#=Dq#RGq$|7u0lN0}x%G9+WO4 zf5(RuJVAuMb1+va)5tpq2{5ROUUk=X$#~3MRw+h6W!^aDz9yS?WvSux^ULkhyFi4P z|Mc0ds)T-A!vCxAcB4g~jS{Jo&s+D2#y%dWn97CkHn4ZLV3y;+eK+nwnHYl${FQ^w z>jzRjlwF`CFWpfMCh@w)*sOV+u@D}$f|sAnLKFlpryiCvO6ITf8PD4Fblk1;L{RwE z{yU$dO7Zlg)1NoF-01Ekg$HA+*?+G0hk018L9w>|YKp=XK9Qj;nO5a3?zhiwJUR={ zf=$mVMGcHTNi}_~a!Gti{Xnrw<2p43-TiLDG`20T!unV~Ayp5k9sLJdbJ%X|C;*+A z?x~P(kvdzJ-?j~}W}K0p`uzfkanBK<1oW4y#C*z7ZncIx7fwl}_U7mh+!Vb$1$bE1 zy@#~9Dz{q?YHbf5;LIh1)aIU=MP_Q2G0s$hLXe#8fR8|i#63j|A3His&yS2 zvzqtMaBrcG3D2|D!%dUhMo<20AB?eDY`t3(C}Q4R;BkMfpfY0)cCV+vlVh??OVfH$ z^jMc#xyDkp_k^R=G^u{!VO3Unb$B`qb^w)|z%4>XgkT4bk zv?Aa$-g`OxkL5U?uXoOGhq8hYZtZ63vFrA^cJT`A@S~_}RWDW2=uA zsT=R3t@8-jT`y|HXCx-! ztfymvFsYgaMB)a<#Nfw#IA_P;`YRk6eqikY*E}zG2%azF^foA7Jt|+=DLr0zq1*If zE*ij@I{wCI9;_Y_jzt6&YSuE!NUq6y=sIq)WqpnkI9CrU^F8Yfmg6Y-8h@mgk|qf6L6&?q49tY4gh zJ`H0S)mTfJb|m_TXfg`s9;uI~Pnvsn4g>a)I}56#h@wQ=p@zT;2c0nZp?w+}Y#1CL z3x#&2f>rG?SWDodL_iFcLv}w;JMSDN+_?~7M4yldQ@lZnJ+N{;$>XB4Nm0wqb*|X0 zQPD%{{^ojj1EXTi&B1-#W7M|ucGz9@O^egsT;>hmV^Isz{B6>Nrwg;D+~_pQszk5r zA$`6HFS5K0_U;CWT{6A~Wph>NyxU_sQHvA?+;q%4Wr=2-h|yG~I)F71n&`?2bPK3+ zz7ltbNj@U&>hgsnH;GcLChNFbiL1_36*gBAR`<0_qJ`!KYZi6i4P@xXG$>o9PHAG( z*sh_fHgZq%y{7+EUgfU}u4vO= zukK}wt@_fxth-m2ru2_LxPNAWQxCGe*{cz!D zUi_(vYlCDWKq^)$PlduNbJ&|iqTi8yA#=N%N06i`q<2DiyJ|{5;kk~pn-9XpZz^BR zGzoa4n`s6P`o+&3sE+{G3*=Bw4RXFkck+Kg5dHQwHYl91GG_hv{;uV*n32Reomhj|%u#r^Z z)k>NJ&bDFxg2AhS=_}4-7eSm>5S&^=ozg4;wy@`Fg$^M|rf^RhTi82ej(LG5+a-2L8>%LJAZJ1;2coF!I=9(mnK=TK6OsWVPp= zc5x@#B!LbGW1Uje;nob0%Kj{o7zR<#XHiu<`Q?r@m=JhIW0HYc<+RH}1g-AtEungY+R)lKnIKtroAkyBLl za$i@iyy+-rYt&5}#VMHQJn~1>OKOo}MQN~WjQi8_=^voQGTVC#!nU+0;(%bP?p3vu4xUN#r}r`RwVH?!SSO_E=mhk5kF1DYn9;L$8_sTo zZagscE}6YrY2?)v0#L=UUptGTUn?sL8;2tjH@B~I6QsTm#5iQySdX0>u33}DppCtw zv;paX2c*l!M^?8-E>}u#kPfYG@W||YE8+bO0*u)6*i{MWM z(CC zfApo82-&53Y|_%^mgtJ>$kF?rZuVmlSIKM;K5(wvw5jo?V`Dy7^0VnSY-eb5mEdL_ z#_>WP=&PZa-Pmej4$RE^b#bO@+*R+vQ|6JK-j-fN{OWr{ei?p>=Sw3W5Km{3MM0gV{iK~?D=-XiqR~8v z7Atu!*51YrY`{MS@f~)7PXow1*3|P@6 z^oT(6_P?>v10udLUDDXp;nLR#$kIXuKJ?n-*C*hlgUrHIbQJb5(v)-z>46Hdxt?^~ zfl092N`a2RxQrAS{2`7Ud8ziJ4_T7|ni^tQ)UaYb*%cu&hODJa*qEcZNuZXQ%spkG#~Ggqp#I|ua6MD+HDSg=iLRCAzB=79KcRmw~-@j5p=ze^R{kZ1Ug$>T`6^W zc&eQS;*6oX&ADSVtY>&I2M?C1NezKB96beA^$kF@(iaTfSFfGsUC#WvgPl@U{n6_( z-~u)fCL^j1r)!o6hjLU3Y=86{n4iKJ$W89G&(^IP<|O{IV3rDF>L^vUY-K)OM&V1< z&4)!P_ED5aQMXDCSbmqC=gQe8#YJFqw4L=(3OAK=N#n2E=j>-#-Ovjq_Y)Nh?!>{k zLrWH2$;A0!cv9XHkx}ZJZQ{FW-4X^I+H>9zlg&zaXO6KV3a~mh=FkYGcRl>; z;OYCK_Q$^*(91hCmBZ|{hwV4?r)^-j(#VOjdbqkNl-d-U3l+qP3S*=oyKvP z6~I(@e9>T&*h zaRH0+6?jFxxw@W<(r@uNK`{tlrmH|7NSIX+5 z``wz|Wno?oVpO0|G3|?~0_4XF#RIe=L-b@#HwH>v7gNubl?PDs?Jj%&s0%o zYhY(tj_+Q3_=U^E7*VH4%_-y>-SfvRI=$Y-vi;nGnQT=Bx7%>D4u(#1m-mHPmZDfU z;wtQ4b0W55*Kl;89ebbM;i#6-1~;5ysv@~zY?A{J^y=RYk=SI^YC)ADH6_MwI-M-; z#QO`OScz5S-f*NLIBX`z;z3;sE8at~K8;f9cIL+`mKSv3Uj6erg9O)584HWA+UMr_%6U)EY+Ck@#} z^hrzs8E}3|Fn(9emN5hJBk@zs+d#&ZE>c9PfA;xm`_u!t>KUs)^?r#E~F;VJg zlNa7a1wJNjZoLsxms{;DPD7XUWW2j9xArg8aF=Nx@Yim1PnQGP6yD5w?~Qak-Do$( zoY-T=)q9YmjK!rp^wY8iMFrM9?;-InOSY6!53NCI^u z$F;;xbi_;?f1WqX0ADoE6kNkp7H2;X>CZ%4&9@%(#d$_3%RUj>g~i$kO7wwCp;*w# zm?x=0uI6X!Q*#kc&=1z#vat!~-%&k2?NgWGepaavgi6rQBX}!WU)<*$~<5z>8(YlE2`l$*T$`5WwfSXc^ktvCc;Lt-V! z|L9A&ioK}XoPFc@94IS*uYINFG(oCA-yXs7(C_PAr{4wkH3lC~vfyw2WhtMmXxr}P z%qOLBtyP3)ZCCoZ6KmVi%fednTDFGpX)+Z&gkg({VsfQyc^&KFWQyt9E8 zn2L^Voy^UVq-<)0OE8^8fSwEIggqD2bjsF+t3e>!@g4XAv?*txd4C8yh5$KAc8%6t zH7zF8{WLrmQ-lB!W*R3DMlU}D!*;Ifmg>`uL$>~OaQ6dn=K~L>{$E+1nf&gfSFtA$ z_Hz%&#zEwT>tR3HD25eT>Bh*pptap0!AW_3XqYjdLcfZ{UjGE!Ou7^E{%}JX5U`yQ zq@W@AtpLcj(`Ds&nWW9k*7p{~h(IIz)_XhO4EzzC&zrfjyIkoGWy{-xLfvl7%8vsZ zU-}=L1W?7FHwz)zy+#a%s5lLcBYfOxIyFlCJot@L1TYq8dAig%r}(E>LVto8M=|d( z-_(aF+!}^cDQ(FLMw7ZAqsX34rd1L4XGm|gk*#&Okzqs>)BPY ztPbqBWzuQMlE4>VVpc9q>SegaIF?#_XAV2%+og9ndj>8ed6vSni)F`b3}bJD-ZXB& z4`gm=5Ayc>-cWCp4wQz5E)C>~zlz(e?J>%A@wL$Ljmo@XQ)x33Rq!eR37NlNY3YvB zwO!fS3rE?@;r{k_i{-)G{t9XHC;!O%H~jUS!X^ED1qx5k>cMKPUwEC<9eoZ_j& z+)5L>Bt;%ivGY58C|g!GnKeQnD^MatJlDb>b;dsEmbOBGwMX8(a-9z^OzH3VtB^w} z$yFao(7+(7HG)?w7ub;tMNG@p+2|fZ4Q~en{iXjdHQ_Rzu3q%e#e=~VP#hshz{PE+ z4~%pJk=wfE7-FuZgirIJP8EE9X5uCWw&uoFJ&cx2uE2e zv!c}KoH`sWngDi$z_-G+i@i+3++zeFL!kI>V&#s73${m@&DkMfq)n+6k(?sj(nU zPj!AaEt)*)VL36xyED_wU&Xd3-H*D68%1lCs+poe>2oV&LS7fMH4Bp640UaCszN*g zO^d_6X(|b6zD*nPMN~qN+ZpoT1c@lHthlSBZF6+9TYdjC2SRaw<-{mr0bI=~M3AyD z>_pz`DsF~9*%1@gsGf7*)M8#q_?tmiIZJhEQzHVfAh6nBzG8l!(gJQP)aS1bAUf9T z-BMq9JH@ZzW;5-GkU9F)!DVG3xgQnY%--+nPu-PXDS$#%20r$baF%06g)oI;Nyn(P z_3i`-AL%4XhD7bIkbO30T(SK9rcs`;mP5!$gv;UI>uOOh%=+qn#A5E%Is^_Xhqifa z_}pol1{n@(Zjg3+dQ>u%d0f%-3#Ysa5$VP%-2`T(S}A!$++;AMWTjbOetMa{7fpw4 zhd5gDUpuHQ+%TK{3I?sr3qIF`9gGdTfR_2^5Jsst44rJ%{6U)7n4AZo;1L+DxnvCL}zaXVi#xzd)M& zpr)HW_tU6yPHWnG%O`>$oMjP!HxQ^vq)3F|Kqz-zp|Mg(QO!#S-5uc|iP*H17p4pO zLZIz{w)hN=7ZG1U+SdkWblfVV!_B~7h|j!NlP ztP{}FN2s)6`boKjo{vBQb5+HR=KNOdtyE$vJN@D&;Bctcg-3^~h8eAmwhv%cEyq#d z`HbkyXOCAHm+Q^1>*XQ8E)G?HL>}z#zAu zo|Wy6E&2j@)nNJLZj}ue{NW^aV|acB7nowFXw4N=2N&>{ht)K#)|2v%Os)YOJh z^vGrH{V_WmeU6E|as%H4#_I|-4hLop>%wFZ`)n>1{xE9AH88=6?T+j$f<9TGjkq9g zT!*@qKHYZNJrgc;l~{ea=xm%Vy6wnO6|w0gy^eKL`+jlI<~^qBdA!8>x}S(tw7aOI zPZ>h!?1Ou2w}h4xMTe>ElDr?BT02xyWa>m{Fs|{?)n!A$G#m;b_cAdcu0%QMiDZO& zsK>*4`5B2<;_A47T?MH=V{-&KOnNoh1Ub2ew?un?u z!O1$w`5cN-qli9RUMKZg?jB?N6+s^Yad?<-2Q(prLRpL~&Xc$HEm+wf0pW#2LA35XGHSw5K~ z2Req@hN_vc%?G(DKTMk=^1K84qeL4JIde%@GgB{7#F^ky25cFFKbWrwmC`5HWBMlC5 zdl#bPY4h|~Cucb>;H{~AZw+5x3ZOXms|<$?VV%G|Q2Ru&bM1qG4&DT0QY&@a z*RSXqV_IHO)bROgoAMcQ4vHy3IZjehL^OWpx#hdWeewT0vm;hm>i+Zcfv$# zLLntJ%5Mmtqn)vf(RVgQ3L66>IVD0>6GtZtJ6l3VRysyPZ3;mb3u|LSCKf?DYnb+v`+scgtZe_o#=!6$+4CPdR<`f_k^f<1|EKr=w6T4| z?*6-t@w@H!jQ>;q{ajXdrvKD2umiq9cK@N{U;zA&@p1fvq5JPT#&2HWe~yI_zzq1$ z{+KwvU+&*+ogEFnS$K~B%AToY;bHPUf8StrJ3D8>e@KIh-;_REQ#-I$_w-Vm;CQ2Cnx9cZS${%W8z?9 NWPu?i6_FE#`CrVNTg3nX literal 0 HcmV?d00001 diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (basic).pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (basic).pdf new file mode 100644 index 0000000000000000000000000000000000000000..6733920cf9abd636b9668042d8e4448e0271a05b GIT binary patch literal 27444 zcmbrl1ym%#Wy)q{ev(}0D6F}fdvc~7l2O0+{($= z;p5Xv-^p0W*wEI<7(gdwY-8$V24G@mW(M%^z&JWN80%ZZxB_>ki^mW*BL!VNQn^~; z{g61{mB(i3_|?Q^J>s9Es2Qv`#usb+(`Qo`tob;rLWj#Yw=lh8SD^=kP6zD9^d{+X z+4E^1!}8pRA>{35lv`Wny@3?>`r_5&<}tH2_d5iN!jvw!+LIo_U*E_iQ?H|}I__Rs!YXUH~PjIUpeo#D05c6Pe0 zvfbW~FK>j`X1&9|VR+KNKlOCXZ{BT*TAhj36z;J9Xl?KQ!#!QZJp+GoPy^sS*iWX} ze66=0$ap_=W^Hmj44!C4LFf)K%2zw@RIhAX)T?C6P2^7O%TQ~IMc^6hgq}X1UZAer zT8^fZq7NCvhd1=Pcx0$e<38Shj_O^~|R;idk%hzAp9 zwv!5t*#gW5ix?O%cp{OMNe`6ee)+O_6SyHAZM_u3x#L=!oukK}+x>apqpJB$_jbH1 zNirP`4TlexjmlN2Il0U=m`S@+w3grG=L-<1+w`Y--;MtaoGo^rP56tJn!bMbyj!@< zN?CdD?-TsuQGun_w!STAU~3Nt(3EoUD8*_I?7+}mPae(k|BfW-#u{y3zLyZt!W$6( za+!Ljlg9+9-_Enxw4HSOZ-nsUXq3G^Ewlv+cM6{yY~6;RG0;}H!B%UBL#RP{LsN3jbxG`Tc>KGkr$KIB+hf{N4n+(rv1&aB9i{}Q>GwZ6%YW1#Jm*6^ z0TESc?JswydU4~UDKa(<6WX)#r(V)2VJ>^Ko&&?u$l~|&i55? zaeW;wy%PS~wG8GZ`12aN`nIs3k(F_<0zdOM2o=tz^Jc3vbQ8D%-A7lXCxEo;#Odk| za5VWwFUQ2OcmFIo#|1c*PVD8-Z}x36Y$XKrqMo9_^wiJOR-N99(MT+l^Bi))c_P;g zvI|6sk_|7HSSpAusgKm%c*5sAcL*R?lB2z=|(Utz-gO0x1DcM5IF-O+oS2-<; zUR;RKR0b&#s@He`HBHP6!vJxZlcz`J4k9*|W@@m`y?x4gj_m()e;9e>yoo%afR7#7 z0{L(QjDsl~hlK52n7iX8B zW+?9EqPcA8udQwH!QyJyv4{NKw8!H=BIR|ZLc*0pCCHJ2_}5eX*ONq7RmGo_`qd8M z1CDF7WQw^;j~#5_rXad98CWdov1T||wNcpIoM+rlbNKY|rgFI*g9)OC4e7Fx^Q@wd z)+Uh}+^6@2#KBS`js{*K`zce+-0)uS?J66?7$#MW;$u?C&)4#thBig#PvfCK0{*}QWZMh?cV~3qi%WGN`(4rxCVD2PVHZ$DmbM{N`-ZD>+<;V#&Jrm zY_n;CiuyT)F-Kpxi3IWq>W0dx=G5e@pN_V|UWS$Ao(HA*V-YzI?FYSvA z<}6>qZ0Xzo_`$VHc}aX`@xsh*Wo(^7dE5uO883`QR?FoEQiQmS3ai1wPv@|uj7bOs z7I?v7OT`-Y`}oNtEHDu2WMCds*c)vGja@a81`@7vN|0V^3ReA6r)QCaGw>ae!PVJ> zb29D`b4j+u)q~tWwV`;-Ev^vH$QPf0&I?|pBN(1^-G4tWaBNVfoa2%o9%=x3;b+JJ zB%9~(+OnYEOaW(I3i7c*@$@WGUHVSLqN`NlY}v@5#V?t^{tn#0#c+Qui2HVl$RyB1 z2u+|g*3&nycl_`f8)tfPl-GJjjK&N@R24Tv!UhW_l{|D5?ob+r#-m2Sqt*N+oy@BT zYt?=r!1|lmqL+RVct0&w09bT9|Ih|F6%Mjbo*z;gDwQjO!-D8Zi_)#o-*h#&dwBl} zx?>nwELU2#)6XJls%X;^qRh`2cez+by|GusJ+;u2xGJ7$zO`Kco9hb`;u4-E9sc%uxkzL} zxE$+bil=AxNjE|q6?}Y=Q)TO?SbH6-)wbQ3*nc>@06IuiEGa^*Fp-m_e*9+_B_n!= zH6$?+`s9!OZ}2&aY70d{lYM>(#S9jA^fdR`2%*L0uLpkuwrFOrM;g3XtgFUv$D$VT)@NSdVgz2vqr84+sV2d32ek`s(W8iz12D%vgd^UJ0so)+ad?YW@jGZe;A zCo!nwb*R3~EY&P9!9jsH#tVBW;VBpMZ#j#S$Q%=SvA*nw-^a&hQdAKid$&FMLcLi$ut{aZM=o zr6&uc(ZHPJGMH-8H;R&2{UwQ#TQFHyL1cwAWF!R7sS2=_@*uY`kvOV=u^PS&hiU(6 zRIRvmKm@^=-#4SY6Y`<}bg)q*(G z&M}0cI7f(qx1H9s-uH9!y=VwN=1ST{Hc?Wq%ST@esF^>?(1)Z5L63d9tzFZg-}5S5 z<6+Te5aX=HC|cYCik+D=+1ThY0>4h>6sJJ_wk&<;1z?je?} zkAap@K2uoD2Q%Zz#KuKsEBTv;dNSLEZ!FFF8V}e3%>W~kmjN=l49V#qtJnc1Aro#I zZ5Y+AIU=Bkv%?sOxW#d{$0MsqA?}mTOze0(~`cD0zCx(I6BKP|H- zu6DtZxVa_7&xA}uaNt5@cwIQ;Kq8Wd&h#IQMeGj~^PZ3dhrH+!bIq$bVmm(2!^?&| zhHS<y7Ux6rXGqDJ1NXH;-pm0b^AC zogh`_#uem95hy0i52KiQe|y320TU(T0d(}sn)IqM>f#Ba%4tI8M=qP3Xi-I4YfHPa!@P-KF962mFaq^Oz2PU6pSS!KQYd>M&(m zHWBo>YJl%r=LUZiXDxeR*a((X6_$m-9EcaP9<-Fgl}g0-9toDOoWWTLHK-RSrNc%Q zE`%2W32Cx(92zKdu!(RpXFJN*Ns0!*zez z<_D5+(yc17ja*iCI3X6={S4|s(SsZbF-4cxbxs$ zl3>tLD#&)drj-1d_w8;DpEIjki4WB|hY1~`CXz2vF7R)`rsFoBMM;HkZNxzY8X^>pD}MFY*fkTVC{=wc@>VE&cCZq*~ht7iA-M8;68qLz4)%InJQ^LT~5C` z4r+@vkaVzdrKrybzgRwAsV?k(=?z~jyk{p7BSx|lG?UN0Ql5iK#dlib4}UcMReffM zBo!q{A%7cN>Kn z^^DmzsAfUHs+;5)(P}5rk_%{X2~-VHF92+*BevI+6lfOu)HB914;BE~T_<;RdXGLFjOfaODITzN=Zx4JKc!f+hQ1F8>4^O;g7vf6V31+W?IS3 z?!!IH>6`xb?qF==1YrCqRshf`7(3d2cQ7<|1Tg;{5VW;%`Y3k+Z(9{~miCT0K|1FJU7hyDInL!TG^i2y(+WbABiXsjSA@L%f$jI0cd|NE7M|6DxCOm>}2blLM8-| z++w>!17^O7p@GSViTlcd1P78j3xSKEB2lOqDg6W%4IwlXMuiRVH&}$eMwA=ukrxu) zTaUDazNlom*+_pKQr}!YXkJ`4z3^IW1Co332kUiG1NLRkRYN^L>Cr+)8SQ@n!9)ax zR|EdJZDIl@`bP+4?xmAEEv-asu>Ss>*4L25Bz`UF-7b5>-#+(tUd zc77ySu2w3^mTA5xq;aQ`cskJD1NrRipC3{{^r&gNV+W+UR$+|7s95J=*C0SpiTxZM z4)76=9A;TSu0^K1;b39n21Q7Vspfl!!zNEx^@@)M3{;7I(%Ye~Qj@n0aMAP_zSXwL z!oVcz)@f#OXO*~TXX*?4W{=&=oA};}uiV4#&&pS%r;}Oyky+}SR;Lwoo#;UK#2OiR z^<7EIKn>Oqe0$p!vKIiLYjB{3jUPAo!B#r){lDSJU?WaXtsH@~EorF^*`BlgZGUwK zb3CMto_9gFK!Ltuh@i=!v|=FUd|-9_JWD_@{0vLL&H)y7Ky!ZXQ(!Xw ztacDuV5&dA*uld2it@n(_ejzMZ}^!*`ClTUj88!r6NPpsd#cHS+RT8uU(~I`E+Efh_eB@Bna`ma*5=msIn5c!UD@O91^X3ZhUTng$iFvZ06L;Qrj{$WIKht zB)sTE6PQwNQg%iv6UyT(lTK1RNMsWiQ(RJXX|u_634A2pie6ekg>>5)avKC{faOm>dmg49CaV%g$(p0+${vUt)Z zYcf-p`F-~#upizfDDeZhr$%Ldnf+M%G>a3V%nx|mBoV(Jnq+ZQ5U#5NnMLV8Wla0Swt69-S zKvt4YjZU{({vwx0KD#ozpj)(C)92UEDZq+e1b;?PNp5&{aXe)>fmbD4aYT4eJlOxmfD71L%|k1hV>Ig z>-&wBr8Jw)-Dh6k(zR>W(c5L(C-33!37<)y86desTthnHAC{b)$=d_H=8;Ygw_7Hz zB6lFkJjiNi#fvvH)h8$>zD(r9!eLxu%+X*vOxtg_;5x+78qr4Ty4vN=DX%8&Xq-Ha zNiRknIQLukuP<>gzqUM6AxI*`iAD@17AG8FPr~Yirc|rOsa7|~KNwtCTom(g@`Uk3 z@t|}$ca?k5{n38~d3AnTzgfIIcv^gx1!D)RfIfsN022XIf#iW@ghmGc4%XKL+|%y= zm2O6tSZfX)MBqixIfx6I1x_37$q|#5nz@4X)U8F?)!WqtjK)A%NU*54<+Ug_v$Rjh80x;-U6&R4um#P=|i-qJm(s$1Pwmp!n>g!Yi<_=7>JgQzR* zKgKU{U@3uhfzv`M{n}xM;#J~k;yWO+{Eb`R>vXUEc1sgu6Z0vUN~cT5%>x_$G|UQ+ z=!e%(X)|$?b<*`Q=hF+e{W70FS4PR>n~swrcT_iNPmO+)x)X~e&L9a;hpo4$+b|V1 z&i*AohO~dXo4#u^iZ^mI^6brG!s7DXw9EM|_@#&C+!DSrXrW4LwN=JNyQgjYuInVm z8elE9{IL!^8fA>LX%oINC!Zr<)1_xtf3ZI(&?+Fm-AOZ{32!xEg=TfOZc<-qv6@1t zGks?;`aAr$EXi8@$)>uO>f`#@ec)MV;eFw=%a${g%i8);+l^Y9fx6D}RMnPt<=)tJ z-gR@k>S>pXzNWn%f_aQlOao&{U8h8~h3I_oalx@4c1-$y=dnlgS?0>n>SFh%wa+YY z6zBjX4?ZexhtKeXoMk1y^??4`c17RhN#1c=x2cb=kIRikp;)K>=y1-c0~4?ecv>t z5w4NDu(lA^RKRNPA?2}o8+o6WUjNHfax!iL?MDVjwu|?*z+li- zL>NX0r>YZATf^>5W>rnq#;9LPwa!(mL+NVg^NVsyxoH)L_BXFgugj_N-*x zQ*ZO5#tZAtBfI+7+ZF;1DB(bDYH94-4t(44@uMI+1@VCou&pO{g;;0CuzJA8zo&F zPYd_f2S$gxvo7WrMaAJ8K>Ht={3*Eqk;(toA22Ou@ z1SN;>#(($ja}a+Q@_+saP$`*P8#~e{*jnq`{H@uAj2#Uf%=o7?Mv$${&niVBO>Mwj!uGR`VIi54^=Lu|E~iEhK~`dnj1NpeU6clg&yX!uK(MO zmG#3tz_5PQO%GsW{LKA78$0`F*8Ux1VfjqT=f?3t75_QwzkR~6(9;8$|64gD+vgP1 z13t_Cb5{OJ#{bOnzti=1`egL2KU~ZIHn~>%rXL3E)4P0FBY_Y11*1vBNY4bIVP<3p zFtB~J$H2(O_~EO>KW5w9kl)7C%J`!rbo`EnpWcp**RoSRa8!Y zZ+#$Ti`idhy6s0GUFjpZeHR=4H2UdbU5Gakn1uzVCFr-DJPN9GG17 zerp4OssMeNR_D&V;df>B5sXcpZ(qt9BtZuvJ=lM2zAd&H?zxBge&m1zDd`aSpPPZ)u#M6!3@4K2#$R1iER4$lWYDl7+SlZ{C z{uXI}dQC_MV!l41foC-eICY>Jiva;Jtbr|mzdwjcJ~5~evR9I)Flp>ayHZmCY+}=* z7*QG!Q;!G?B>oSxRoSKhJXE91*v+rXD`?p(XvL@D-<=0Kod>+@Akt&KGGY@->cp?~ z6Y{FW<<|=^=L?o3MFMb^otQ@}mkv~j*TPNL!wYSMt2MV>49~nVLrzVDUR33u*#}&9 zP;H|Wt?-Lo^?v+5w-3)4qQKojWugh^is-eHCmz0DFZ)t=f#Qz zGx1lD{;*vxa`Yu!RM^VhXud1Hde{Ns0QRXwGX7y-npR5wxb>#do{=U=Kd^wr2){tE z9*iuoyWhTN2>}0+4QEdFzI*w3_SKA~UYv1Lt7=Hw`p#n%O0KUSQ5WD@LEGs4ikJBZ; z&@Yo#i@+XMNWn2yNQ*qMaZogHQ9PAX7j*N(vCAt-%^H11M-t*Ojt8}x2a*LpYln`-u~C~yq4D-y=_E3J}+sN^LRmnMzyKx&EhUt5Q;eG2R)EMLGaH9JD-3M#Clu4X9iaWo%dt4CDtYPol*5mpdg zVyjQcV4XxOD#xcVzi!T@158iaQHBrCBSqcz)rtG_Gs&kgvnd-u@reiS`Ir!D@XFx0 zJinRM8T&6CH>e?M^4DS_xYjZuqW1gGVey}8>s0t874s}y^uP%V0jrUORv?4BA}P7L z?-EpCpCA|nE7Y+=L+`+k{1y;Y$<6(3fLhvHQQ6pCf(iZ<5dr3W3E4z9p7p~-7JX^f zM-Ou5lx+5tEbi5j@-2I+&EmaQ<6AMW^JelBZBmG(nd^(fWqZrXg1! zez7(llj-=jzNzX9LlqTzcTB|cb203lckf1TeX(P$_maZq+DbRc3K~u-ex}akw*R@O z-I@Q>*Sp3i3tiZSHz{4q$3gTi(|K3`4Y4Y-htjdou}{dG1fJdE3kG1m9lKp~Uk`EDm;zoTNy>?~Kwzl@ z#$B_h17veiZx4sMiIqbUdY><`B13Stq}Md*;!koAszT^*Vjb#m9q6+5KD9Y(XdXz5ZaoMVSqs?27%0$CfIrv4yA1 zQQ5Nf8`8(&8khu&*5d>fG=fPJadZ-wh@+Iy7I8zfpn-|9BU(80D`Za?w}Xl{c!Klz zp1?RGu%Ccd5QCd*aU>Otrk&mPRg{9?OVuDfbVoOkNE-o1K4h|IGJx9(r{?5f#&bsDIh>bz5O-)m_HH*^oK#h20U z?;Z=|PMR5i&wBf&YMRToK3)=jKQ&n?rm%>}`5M&L0b zf6emhecKkJehYCr)xAWusv!D5geT$+$BJf7QDif=(vbT$WX5i2ppr#k(`iL8j- z%a=#ohtEgk9n#PMo#}zT+c=t`IF)Ivn<+)eLSAf+FrAV$wC}ghm2LmGWzFT3Wkq^l z&Nqx7vyGF~Y69E>uXxFdi`zUNu#+7wB@8Xdqr)=SICFDMHQ*1T(1X9f<{OLTA|}Ai zzLxZXNFk29aNM+Lg{&tVHl|981n8t>CY8>YC`$+jYmp|^7$6B6rtX#>U~`_@tBU@x zw>KR8cLtlXkb#;`z_5Gh%xhz29BCIiZG9G+|UNzfT*z!(6pUzj?!^~OLO{AOW;Vg0s z+5LxH`yAF?&p!K-6RqGO;JGX0J9`w2@)#TrYN8nD1(%6vHff8>m^8d-)^0kPV-tI( zouBiA${TUVp4?8W6C;&N&6|r+WFEK+Mq~RX)#5_>CXl+v9knbnE=>ca@LdQ|Y*hya z_OI)@Sl)cybo0DUUzlV#a5&qauzycSvy?dDpKaAngdke($}Y3t@o8HA;PMUFfh36J z=8m}S**{ISjFD{Bu#wqZgP-G&vcaPlmhOc{4k=myHk=~|cyHqOT~EU+{lfQ&>JARc z#|z#w|8qw-bGwPRm8<`lkxgXv!&8|q@FD{zqAzmQ6gtz6eF28&q|CQV9j%1Na|LRm z?i+38$)RFQ?_Frb4~vNn7o=)oytS#vuZos3{X2pJexQ=zJRYFYZGO6J9@(9?r>8s~ z0bcO5>^rCc#r<>gxvx#EP#lKY?cnC!R+yP}v30;>5eL}c}Ym)VogB=pS zoF-U1XsV-2SEmE|{Q;Y{jnk$=Ju;sLnOW=Fr(k==N2t&EJvMl>I{H==yXq%H$_+OB z`@6a891Au&9x!B3mpC(;ZK$q4{*}N2|rS$BzV?Ht4-Q<_D z5Z+_1z`1yIzs-gJ##^vpbK^UGtPB!44C;`tp-(x?hoQHWjrXuXev6$_3{(I@@` zuP(M+`g+S=B-J3NMer6oZrH3hw#`Rl)!ULdc++JQ`@vugIoOHek81>riy5AKJgWcJIl@awJ-GPt62o7L}kbcz-DPgmJyvQ5u zzL1zIu4cE#MMW*<8MtvqA9QoORG7Ps+pBn!1YI{dWRErDRqq?klV<`Nba>k;W;i z2`}Lx!g1w(b#S-g^0gtsc?u09DG9^Wp~@2@1s0nH6(cDn>13cHwf>-;`S_Yqly?0t zYDRM$SZM>}UfK*b2jW^Dd;xS}5 zJKkf^qqwzbVeRdiO$`4EpF7NbCZ7Pehs8bsawBYGC22F`osXx|Q_@CdB-E;Orfu0s z1uJaYwh&Lxx%~z+ME&=E9qY-s9st58nMG0@$J$LHfJs;6pw%Hws!(WeB@V}~93;L% zDw~CD=qxU0h8usyOpV$)kp@CFA;N)#e3`a-VPF3cO!&bAR<8JEM#rMtZd*V|&WZ)W zbtTr_463pGi(#-k{Eb}xdy+) z#c~lo7Ikx)S@=9%Io0;qusV_=j(`!{mt~_{y1_#Mg4fjNgb-3(*Z0eQyXU=>Qk+bz z?Sb32EY2`0Hw)#2`Aq8Ww~2z2321ToLYt@PU1z0SJ(r+TI^or+bAQZ`PJEW^!cfiL zb?XT?*2P15&Gog$;2{y%*!JOmP{5@#;M|ziE5j3PT~;s^dj0O>)5Vft^)@R z?%cLB+iYNhb}*n{4jw+&GQ9nuNPf*(wqhQ!l16u&r{-uos2Ds@`ZM2i+(rV4Kq4FO zKRdDfR_pd1mVDXOkl*2Cb_C_ZtMvp5>x6G-tdWe&8n>O$jq%a*x_p0 zTG|xyQJ!zh2`-D#oAK`Pj;FA5G~3q7p73aALgUWa-w{-6EDQKmWgH=q87}~*Bi%jpz!ASee0C0Zd$v~A-o)4GN3>1Y}&%Y z*-SBMozRkLgN;+8Aw-(7yen@hx6^*@Xw}t_x!>N^tjH|20Wp6tSw5K`l*Oxy#oTGk z$k)ijSP`QnN=O7ksB&gJhon+)Uq*7+XcwVtn5FQ0SOr-rUncvEe6KRuPAH-9K>oGU z5IZ?OY8RD5h?TCQQk&h=&V26@eDu;T*Gp$+U#WpxSoB~%Ogp99uz*%vdol_9rrcPj zxZ=&*{bny{ERt#{o66ZJN3b9l`Rp2h8aFlo>!NN_ND|(1bHsf}=$+q#@+2{gr;MwB zr{J4~o(z?2ne4F)PAX>@v7Y?25+7Zo+F0Ixv{ZbU%w>}Fwdr1B$MP7CUUU-M&wI%; z!Cvn@yj(p!VLe99dTp4LL2Zv1)#Ct*&1CcJi0;H}2 zGtm+jkkMh z3JK3IbExOseXVEPE9~5^TUY~Ae}X9yw2;87g2Mkc-DPzc$?uhi*SQ&JEEo(6 zF*_BMo3n-4mtKR7G&^h_XkuQ*wK;L|SRMop8`MZq`z*Y|Wo!d0#%%?Q^vFFS9xUp$is}|Q}KecFrZZh*)r{#VdLyz<~Qi$Tt2Z26JP>R~XB`8wj^p zZ2T~yvVqxN1eQyn&c}Q2u^+uGhb)a%Mr};m zFgRu1H18PgKw^!rEUP>=-NLHVt`)qib8mNSw?u>((V5q|qnc3F3ds!7ZWS2Co?@(s zW0AKC4HO9SPFp;9z!)}~0T}rCcC-{UG=!$+s^+m+-eYN-9MpqT*00bUjJb90J2(b? ze4#j|d&(t>t+y&_D4B_3TC;+`+{@|9FPRk_}sdpM}BTz4*Mk4S{M`e=4mR45JMrUr#Z*Lx>=o*&JqguivX|R?DP1_nK4<6q&`l z7&KEzDH(+zCQ(yTs#xQ^R=PEo%gBR$W7Z*{WLj99<&9c(B8gb}SzD@_m=+_>T%vK` zUq43&3y;3Ej}v^)L7aKvB)Hi-!_zGExbvFb&CgS0jY_zxftNz+OD^xx03CDwmQ#ai z{j=MH{KIM&ofn3u!67vDmkMYubl2%;g)QZaji2oshJJl$Up-_YWmL$`FI*7@#CMBR zNz;V8bP@~~+0m&Z1aQjsSdWU5YIp>`*lhx|23!50J-nC_eKxwK z)obATY$nDV$QY%Uk-bOI@Yn%!;Acks-?pHGfYH12B^ijfh?LML894FOj3bnZyAu)A z9ZP4Bt!tYiV+O%Oa;SDAH&mOdF{DML+9cunKj&INhD;~(D?}cs^)UZ z59#oHugvD|JcG${JD=lt0eJ-R#%bPCJ&MUYTaRzWx@l-O-t41|Tg$AKa;$b%U#L5! z`Xxyz9IhZbo2!iMos~XMkGW6<%v_ns&=f-qzA_ObH5q6o9&X#;TySJ zI)CCBb8B706YkVi?ss?wY$1siaLy?3Q7|SE5_A>dN%0;uT}R_#=Uv3N6SM zCtKBsdvjwbeF($%o12|o@i(4Vx0V-gM|UZN&LqpUY#6lHvveJ`H)~<5?TqKIv1<1n z($|B|Nzz%`OPuIlDnt#k^4HMb3FA8JljHGTyWlkZsJkXy7dus)(xI9xQ{s%I zdZ>3Y5}7fG3of2|*RZ5UjKFh?i>w$;C_=0MbCew%KZkM4ucgX?`4b8!ud8pSx?qmz zSob7Lsk@||9OVcQF@2(GG;T3X9+tZ#R=Z*lww8*=^h>x!s(rjHRYGR>MIud_C_@Mb z9sT$jaS+lyh$)~%E2p?#TZm{tfi!}I1XD|BNUtc_T>FnIi3u+lUGKRW+qMCpGkVZN zv%*gR2P`O-lPapJ4x{HTy?#~4cYs@1LsWXF_4*4*jE$W_hM39quMpAr;?N^vw>+aK zQ_vB^3+`t|ittMp^=uX;EUTXw_)^H9AFK9u46DI z<@$IVH}=o7s)XN0ZrnL--(`2*BJTbt3jBO(?H~T}H%a^dir56H_#Mpkt)!Ly1}y*P zMKB+avHkzYYpnl?*Vx$qukqR^TKoL{Kxi-2Y^*pL6l= z#QvS={{V6SIR*a%!T~gCSXo&BG)#;v07m*x5=T$}@g(H`0m5<6v-~H7`+O4dzaZRS zSo6y_sY|ApgN7(a;Fe?d5A=Kq9njPxG_@ZS&)@WInQZ;yXt-oFC> z+=Tx?xDRy4_Hi%#i^4H5aB#5wzhk&U4`@%+z1N2uF2~bT?sjo!wq@M<OEfjzD?j?zwaWOqb$IB(Q7o|!x!b#ykJzeGo+amglEtWIT_%QEn2 zu2ekpO95lV+Na`lW6FzztBZvmjy3Gv`S0wp$#m^D7M`#8Q1TC$xxVX4jIz_yTV5z{ z^00PDWEQZ;HX?6i>59=PDikLiQ9j<+DkX6qxkjnSG&@ha$kL#{>@o#1`}2h(sshGD z)WU1NY~zEawJ?z!5Ou=n)Ab^Or5*ZrAM8_hejPz4I^up_lOSpz8>1D+T;33_G~u+2 zPFt8{&X8uzh$?4|kWDu!Dl{CM!zn!#I_P9I-P=;i-Xf|dq_2Z#FqTDOD=Y|IquPxn z20@`$$```u=p>o`<#7)~mmBn=SW|4SPACI1L z5P5}h8y3*jZQ>l=h{4LggLo7b*Mv$n=4WkTp_PU=fZQQJYuko3lXK7a-5bPx&d?me z-*d!o;-PLo`HPT_)6KsFsEpizdl!Z16}(lK;AvUd-HFpqYDeB_X2ch9J%1g=*xhO9 zZ1|eoeP`U)6}8iZ3V<(6*NKf7lXsv46)HNNWD5FQ-swt#k1Kp43js==xBdC*Lp7%_ zAIMpkWoQQ~w?UOVA*mb@64pihk?ulz#Jo7Apy~@>&m9s;LHiCrH9=;6pta)0duCOf zsX8-JZf;6*#+BeaXlBZDMqQA}dFJUa)34y9(0LJPrG+pxfnjz))KDMbEMjgN=$t$y z6SZ*oNcEL@K47x5CXj{x zkUXFEm_VwW#e0W(kBaXJvwDf@hQQws!41yr4wK0hbiNjPf|yJQN9Y+GD-t{1hIzAO z(r?p`2K60E5~)hy4;(*h@C{aQ@9<;jBjQ&k2o=8;czxE%+G0D59r)TE@E)&PFLsbj+pTZGv;7PKkUjR8rvvbB(^7Td^hCSN7*kaf5@ zv^J%ana6C8bn==PAEXw@*aB1Lo9`#gs;W4)wHLqosD8icc2i}#s7UX&48B_H?kEpd zo!q)8+x~o|=T6dZ2*csp+8n72H!K`2@n+pKwtxAvf)3Xxb5nX!ggN|;1GR;b9Vp0% ztaeBvA^NhR0L5?T`{UeoX$K7RK)DvVvuPQKsti0hZ+ zX6RXgz2Y6Y09@PQQ46=U`E>I?K0E#lq^O49@A;-(^HPq3oQRIh(BgxnZ?M#QUIx^H zNg^~Ma%k-drl-T9ou@F*Fpr`piA2V5dlJ8i^bW!@%9-G?nkeK`_!Jl#afq~qUnvvL z0F4L_CwPlH43I-3#=l{1)5|Ja&V;ye%P#BoEIcv!#b)jzwFPqt+{b&}LS?x}o)UNk zQgPkw(mHTFLNCJ+G+~k~CbYJYB)$jk7d_`JZ$e(Ves#mL#)zXD`S?$Lz9$q1v}NKK zBw1@h83n1LcoPSnR==#|E?;Vbi!feNmR)8Ce09-pvouFIvuwp*iFwI90uN^T`em+% zVb{BgH=SN)pu%j{5S6Y_@LVc+f z&$kdS(hBqi*lwia?JP3qP~POKq^98zMViuL#rK3KO6|0+4q*9*qac|``qBb90pBG7|--fKqOo-XnjcdwRUvCiLTiGYP04-xiSb_O%7}->7 zU!F%4XN=XDXXSgB`=~99@Ss$Bof0&Spc{s=@m}Y(_CVeUx8L3jQQxZ)oy8u+z7>WP zdd(m$>pvn9KS>VZW76Vyvk5CK7CINWD|j98Cg7eD7=c@rX+(Nso!8fHe)F}DiQ`p( zT%e6fo~F2Et{78rXVsRa88~)$Ac$0&k8bN`z(#44k61LZwA=h7#W<*~Z%j-ImFfua$H8!o2n zOWp;nD(shDS_h(XWJmp)4iHZr2^E-nO!}m=g2h@Yv#(Tfw8>=kBezZtLXifQ+4>JP zbjwEi=+=2R|4(6O9aP7+?RnfKcyLG{5d6U5aDdTVylj=eoRiM{f~HV$hfj=~)xWAEG)Xju z;}Sz*CTedc+C?-N2H5&~SqPh}nOqT;--M*?=DXq^(w&`p{?8~xnP-QxhH>Xj`r;|v!<4F-ep3$83fpdw53+~8Z3ij|ose8h=1%G`{qf#=f?%JxT z7JT0+zMt=t!jcQB<@wH%BAZDg&?I4$LR%=W%jQ$6!?m~jdnA4v&vH;zRV9j4)Bf9i zvfA>YV!^b8WHUIQfPiop`N{#_TUX8{aWSt|TP}FbM1`rA1}!2EfVnjYVkWkh4x5I4 zE=suaawA52TNcV@F3Bmr=m$wg3N&G;(ACdPUzYorm!vV@>@b&<&d2?}1Zaq;Nq1^d zE<7ARYA<@{wK1)4_6*IM)8Q>0sWWSO}0wGsP-%c1D2` zha zToP?;N?c?Hug~)&{zV!av2!xl(M&bXM~xJ}l?2WbxZYf5S(n)EDmR9mi5cXET~Toe zKUphbsHyr*2&PY00i>X>D8v?YIhN^H1J()NF( zW$#*GYFiVRTc`R6Pm~{^hdGOABT1Ra+T!?Q7?T+Y^%QeDuf={+@X{oc1o&rJQQeGs zhr^WvrHU7ll~?2dI}lC!gq)PuYXzSN6rWLvP9G+Y$lZK3lkENK_;~r@EVeJ=NmD>d zreGoQKgYEh3dqVF;@lkZFgyCS!SzRywJ-_9J=lYNq7)hdlH}4ssrT6*@|ro5M>40_ zlHH35h=wLE0g;<2+S-x&6*XlQmg+vOj_ztoecGN&{EbzX+VIU8q2T8uKjt2zodrnw zk_8XyZqG1KZgBRX$;iZ}oO{T|3J)r@uY*2pX0Gp{rXk@XF&S6)?i|Uhi;T_^Rv_e| zqh0Qlig#Bxc16L3=opfHYLm}Ayn*Z%dPl?#g4bl`>TXn%C`Vz>-0)WjJ2;6=+-#T= z6!TZYB*x8nhxwaTeDbv`T*#6r$GXo^nrSMcLc+Qlhpd`Wt3R?15&I&JNn)Gq@WOl8 z0?s;GaN}`O73voMgaP!E@%?+rvqFy$D4hfUs#6+!Fe;!js8(_#rH8j78Rj>}Uo{vWJVfoC1Ft%R=u139IBw?4K)+q#4 zUU<1y_=Z0e!QeQlp|7E+Ox)5Gb1{ym`xqd)GZ^%RS{?zXU2!-069&aB?IrnizYslN zbzI0Cn&yPCmeah=u|t^^?1{Y3dJ=5sxg(FUj)StTGk6yff*eizu~K2Ev@BtM0d&Ko z-}cac9r2}@Lh&qY4h=6~UE5l|R2ST7=e<=MhBf`U6(Ph!5}#Zy>4PQ{RyUfHwM>)1 zbHlj{cnL07oou<-z`uLU!7U{_G|K9WF>?Yf-w3S^0{6>)$arYt(YD>J0*r;S<`o9roDMXo+IjnR)lvl1&DwfWcTL{|!m z&a2LVYsw}r!UI)F_zlnuozjgJV6EmLm$cDNYYEmGig%iTjP;v6wIMiUDW2G+tz8hG zv=6<{^apSGqS_v(JCVdmIr(2oX@SPE0{*gh^vsOc1ZoxP>%FzL(|MBKLevWt#nn?H z34+Ql#Y^@r0?)BJdoJ*oJQ*RzotL$}p2#8ONp;*5!&e^_#rb`D^>@z5$XFfR7{%9< z{e>efsFWiY4duH|cgRU#Y|%GY_zmM_1@~#iovz{OanjlY4XaxOQQ>3VO%|r6BQy<$ z1IU*zKN6`i71p)@DFU^*SyluYIFV|cA zou867^uK=&27Wuzf@S`0C`8v%sK?tP;-Ple;ut-3;I(kiz^x4Z>Nm8<48_MsF}|di zSMP)X`&!sM6(@RE=V!PPliZu!{=BnsU5o9C=O1?Qc-h_7Vvrw)T!|{|xAEm|yB%cf z@H14lq7nqC|G=VFqt}=(epJStr!R2-`chn|Fwi5Ow@X;rZ4sc-k6RZcr`jdzg@3}U z%(V+3ZpJ{bp49R-M<<71`Y0?kgQ8(d79V3-D%>gR3k1HT92BLv;Hzkmo!Jw28HP6Z zN^;W)hee`!b%SzD@WOa(JjHgwHyTOG`;7bD|6P6e25Zj_>u8H#J52l;Xv$AE#?foJ zlPQY|8PO-iLpuac$~)x2D)XI}1CWrh&zV!!4&&zWiQiTZ1n(^-{1ZT`QdG9yzV5{o+BsHVu3nh%hH|)g;v^XaQeh2D|-Y4$_f((AVMTii!Gqp4y*Qy@Xsu=OyIo=}SfRdd1)- z{NG)))CrrVXB4Q%FeZMWPPIjjGV**!pdmv2?E@8ids8<-1EgY{jgJbeI*8`*F601# z=lY(R4)8A5Dpn}$gG9~X$MnGk8|e;@Kcpp|4<6=Mnzsrbj@LC)mum;Mzg-p0woOKD zS}%w3A-9%1PcK1HeAmCYu%@CqRh|Wi+bSIw(k)GNT_1L+(@5d>zSBE@>0- zGUSepT=4$fL4Hpiy?$bKYNKYu;6|`{fd74yV$hevU9xZG(#XBYJup(?r)KaR@(vcS z4`TcmFi4BUR#yqiEEWXNAoU}@!=sM4>-m1V^8GQ$*j(?9$m*ggvfR~;5TehjaAi^M zFR1hTt9~U1YTkKGJ zxPwNrAFVkzZR_tBVVbI--ZRn7wKPUaIAZgMTO9L;&p>vi^5a1Uyzv;$+i+yI=hzWC zJ>ZR#FhHulazlYPUGa+npj?QYZkSPA@Ovl3b%_&m z_St7zj-L_-Cu)SZCD-hmv4I)5#vA2$`(4qTMiit$uw0`u*-mA8K;e&VzjV8OCUSuLPo;q+z`mBa1j;6y>bJJ(Ei0 zqI#gNgnp-18LU?6WeH_Eta?qk0drcC2q1Trpz;hPie!ouX+o}7-KXw&~ zcRn(<_*8+5gGfcv$J_yHVDI|ICw~gr@Wh_(@K=8qDSavUKN>7LDCT5*d#HvYThwy zFjZpJ?~fBM#zYKoIdtUv^*M!@hXesN=^G108YpRes*ie9HXZ}ptAfa$<^9iMkvjLX zH8C1)1YzxW4`}%U2fy0o;o0(6@vNxJwSF?sW%Rq@0CWP0Q`c}u4;<>@nr*73hk5u@ z`fr<)xQ#K6RdMzqwdx8utWH`dZdlSs$!{B>SUAqXd)08eAh(<=gGG}f1JgKMS2??J z6ItPouc{{8MJ-lMH7h^J6Uj&Awvq^!$5vSu)k6J#r)=Bt3`wC04gePUl8)If@dqsG zL~5J07&eMPwna(q$V+nD`8ysovPVTbPHBA>vra=QE*6w;#HP6@k_GJ9sbi$&w#5?V zGgEZR#e7hS5?r`3uP2|D(QYr?m(*fRLACb_l$-54DI%y#{!}(^gfmwPCQ-S=v??|k zVVa%6Z5g-z>qkuc{H~8u4=}$@;Dh!S6P$b>#EIwr$oQ?p$tA~5W^I_#fOd(|g*ia*f;dFM~Z|AHQQJd3nz z8K*e>bOKuOdS3*fff4OQn4vEI?UTu&picdd!YqlKqseHq85|8exD2f>8`&250n!=Injh?{)dgtfg~U0J%N@OBII;pfh_ zng@PH#tdAEyPCo>vGu_Da+5u^K7!lE*=z#h4bjW!7kP^hDoqhV(9;@x(5b}*gB9e! zHDi@s1J?SE{D||ez<(F9gL_i8ZvON%Ti`wZSCwClk7q&`)UP&+94tM5un|%q*`6k* z<9|=wnZJcQ$F_FY0WE8M%=Prwb2kZJ&t%4#@eyou)>852nee~tR`*Q6B>zV6{TSqr zu~~&Q1YATpQE!d}h4rYd* zbKY|#OwZ2W3~obz5MLO*Z>3lE>H{(b>W>RkWB(F8Z)7VM20iSnAFQuFm>=KIIbYY( z{gF!tou|&-%=nJ2bb6lQm27S4qC1_aNPDO6>?Q};QsvA0af8SDh^(4eBDVu3Rt06T z>JE0&-$SHa6kXI<;zyrr4vk6{1U`GDI;HxU0I8W#nnWj)SnybWHlk@3XkN(n2kh!^ z$?wW{VvhvBV8grLe~UPeWce**5=$UwnD2ga6W z?`qc5pZ;L2ctLVhp6-~N^wsuP7vKT1d1hA0C+OIH=;3b<4P3kq?P-2LjP(|hCKk0* z(cf4ZxET;nxL)%m^a3MR3JrY?PDu)ld7<45;`<0wE5=6Dtq<=AO9ptO1ckPcO?@PD ztdlc#*HAYlW<>wmlW`g(Yrs{#n92K~TjL55o8+iov*<);VqM?-+5(cr2O-UVT3AtBhV_0FsUGO#F3QrD*Cq9&jad=TD2M(=QT1 zQ>)mM$WMH5<8v621{u;CMHv08zRRs1NxE&@O)M@xm!A?auG#6T7?UI2X zuSZ~)nC!kP)2z1%O0!XNa$T*EyL6+=DgIf>RgXY4ci+C+jLbd~zBzNX!=UWh(2TOx z9JAHh69SK~Q&?VYhYOF6N_rZ26>oRQzi@Lcozw{(QqLnyPvmd4?Qstu6_NERytV#q z{~%8(5&09zM8I`K8yW2bZINz_hWwCropbIQyO*DWisMMc{30&f3+++#Kv$1EN8;XL z0b?M6L2$Jq{>OryA8s#B`h|P-{z179af37N!ez#iJ-LdDrY*fsU_Z-A!7q3r5irv@ z*-i+8hNn5pO9)f{wckm1#B!1BMI=_^<^b7yBCGH|ViJTm{MbT_bv!fWowU#Rxkl04 z?_F~B?Uvvek(NBx@(0V~%>>IW=5b*hrMQjDx!s@P&%ewL_Hv53-jCObiO~mQ(b^J?rcZYV=Kt8M#9cid8ko!j|5 zowKa+>4^^8%drBu>Ajn?`~4#HT2K0*VA1=w&QwWvRZdrRvQ2-wKI$PzMgO%{Z5;mW z;MHa?HL23Yy5-`S!$egjgTvyo5}kEvqvirHs>E=MfE-4(KIX)^s2{Cn6B@WJf@djk za>yy8B1M_RIm&>*uxVvR#3=)*L|MT!lpjgg^|FDA4VPid#8(M?$Dj=o0U94l1l|^( zc0PN~0~OKp;7E>g`o+_UlegEwVQ)*k*-VToR4xVnaaXb8ozMQ!fBDV4RyPu5v5p&y z+rC;UW~z!(ntJgcj?Zr!6)1jDX_- zrZTKp!R6MXea$2v#9ehyRkLlDm&J_nT-h{~m|~C@yCTKD;aVAzfA_^;rSoUWA01EH zo3qyM9270v-8SZ%YPY;4hS;kT8CfWG(Kn}S4PKPv`<8)tnk6`vMtMpy(z>@&_;TEp z(>(wq6`IlfFjpPOhWoOT$`8&e_}nN`(m81c8VxZqR2*Z5&#=I{$-IF94GL!_mqlX|b$|1ebZ)4y)g=ZXs@4zZ^LXDQir< z7N3xABZdN?5@Ar(VM3-(%n7}+**zCt=vgw?&;d3=w$#GkZFr5yBCM+5qK9p=P8+C|Z=8!NVV6_m9gddGPOaeE#UbSgxJ+Qgh^LP!LaHf3#{OO*`8H zZ#5ta4u3)tRZ#oOhh&tF&ysM><0L8mF$$j)S1^PLkGi%U%51ddMH0+V`(vg}DT(h< zM{-@%EB<~_;1||GlFzmBaXZ~N)F`q~hN#Af+N74;b<}Md);M8?{TF6ymTO<~WYJL{(X2R|1FxDo;?;n$-F`bPB^#n%A} zA74v7$?S+q`6*TiC<5j>gT@k>hl{aXkVi%!@F47K85UkGvTw#-o9LDO-QwA3m*0cj zb|n?$VbUrfqXO>uRA#kMoc#FG(%GHyQGbz@(R7!dSmQb+bz^miCL~d;eA5Tw$DNTX zGVB6Iu%i*uV;VzMlurmFONBq3BWn%V`WRdgZP|F%D3qpmQuN%GXp$`7WfTAC)S$jr zg~mW~1d7}>IXFeP-^b&9rXN}s-RxV<=~saB^=n)`u{f^%vd)y@n;SU!iNdvw8?wNF z)jV^Z!Wlvn~qlJIY)bAD_9xk~Sc2bnVoVa%M_;-Y)@^)Z5H`jL)CwgR_ z0!#6eW8Yo$j9+)OE5bO_6*NaNb!Zx!%O67QRBNE6=?K7m-n;GL}hl^A|R{Z`se9A5jJetDj0( zXeyd%$yxC74tl4xp8L1%e|4ILmK$!cdEVdc_u}uRD^EzUBJ+82MX4d!vwym^uUeeM z7UEyfhUwQ5g^O0W5wq`7uChTJuD&V6skFUrIB4?I%~t{<)~qY?u`%mcF+ z7-aWgLbDn8l#f99+5QtYVKZ^o0)31{+=V@6Fz8C&LRnN+Qea7yKUpPfyPBup?^^Lg ze)KnycKh*o62ka-Ff!~1SnKt(i9HIXP3?U&_s$JL?6faW+Z*P^E&9JxfUf~tL(3Za zkkX6GG2X`6p{N_Ijb2xCX$wqrUqf|?JPN!Y?~O}ohC&9yRceQ$!@eQ#j@(#Ecr*3| zlP5fB+%H>H-uL7N@Ltm25(L-;VWQiuE-{gL1@ozJYaU`EV;3%{?XNzJ_h{ayNhw*H zWnC2Vtg%V~!pqU_+E8ysaj%obzfv3cjo!Q7HQcXwTX#6t*dyH7>IjMKY{`;Q=6)}U z3P3)>Ea7Y#EpS8rcyrWePy$BeuhH_DI44VA>mlwPH^%A~6DW7JqhBnBYFM>I5np~D z6>d>cr>#;{r945d6BAHJj?%AKK0VF$%^kkI?Nay>-grORZc*;PHMf3_CzKaX( zmV=KFqH-bDk*?M}Jij|ft(lzKaduJ{J+9W*lWK5TS-{hst}J?2@G z!^f9ab#}B?--7HTLf>)GJC_x#`?3;kM(x~ry)FHE^+==(lbDb%U}`*bc1MA800ohy z2IZx5V;n2NI_+;ONe0h~A#N*G0Ged~?xDLhX<&%Q%#zwq)qgdw2Gy%CY>_#&eWg*W z@wBJnF>{~yw?*4}+Uv*IEZ!1;g|_H3z-<-@iOnw}Y3J=~0ztRJ$b0bYPo~9eH6!fZ z6G4gKoX7G%6;w>r=~ux}9P;7UDaDw9cXSwsQTFtQ$5%nx@51kDB-rr%dQ~z>Vx*Sp zF_TkWHO?*_hqN4J|5R$L*C_tDUCWa*@5-uJl43q6v{~Zu8Y>6ff#Y#@q>?TO7LIp=syCU7>{Y5xD1(y#Tg_CN zMJ1x7`uUBk=3r~&Z1`50h~8S?P+o}&V(j2(ZfipY;A8<%Y0(Qin_C%Cu>*y{99o~L z6rJ@QoII%BP{{wnD!Vy-Mq?Fut6B7hOrxSVbjs;|LfFSFwuW_ z#`K~*Z%GC~0Dz4f004r392#tFbZ`5c?yW!nNYMV@lK+zm;O1a#@|N-e;6!^%8u;%< z1>)r7;G{C4`fnQtH^&5^d0)W6bn%TeW-bNGrmZkG= zzU&|_uK&?zCkOpEyqUwlDp4t!dlg(dlj4{OiH7zuh_r Mjh6PSycpX50=2$5V*mgE literal 0 HcmV?d00001 diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (learning and store).pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model (learning and store).pdf new file mode 100644 index 0000000000000000000000000000000000000000..c04a1c5b6ca70a97fcb5341e67d55f424665c8af GIT binary patch literal 29718 zcmbrl19)ZIwx}Dsf{Ix)ww;P?+qP}nX2nUxwylb7vtk=Jwbt5a@B7ZV-+SM?8FS8z z-g+M$t@nJhwf46V%L)op)6+1*5clt2@1GRi-><;+T-ge+p13&r^5cJ4C|oU2agYCmj{+sd6M1O3CDhWxj|O))$h^AvLaH} z=BwW^yB9+tLkPQ^PgmPEJdr*|KCeID=)E`JpPzW%tGVvnUT}E4&2`?-`z~i6ZxMEP zk?ZPOyk6_2j%J7ITB!43Un3;&9MXq)KJ;DPWSXq|mYs!gp|HbA{L9V< z7h=x|4=?R{)@QyX@XE)MLm+b^(I9QMwwmm4mSS0|g{m=BXRxDQ+QN}0(v5ufM=wKx zk)#FIlPlir=@W-XQl8Gwt#yvU1;Z*mh5HLo+5yR5QyLx|ITGAiFKiTdYV{-@QTJ}I zldQ=TPpu!`djmyBYB#bZUDqrbNRO@N7(KHgHT~q#2wm@sjTK9v2XvgA*O?lI_`8h{ zK5N`Q)-mrbUN^KIce^n@U0gxzX=-oxeX?gK3Lek1+@)V85$BfrV$RYwZBRXi?&7v{ z7wi4-d^}%ISn{*EBde_JS;{;+o){X456F#E_PB@SS4W7O$!Q#>xM?v0ti08B6W7!g zDU34dqM2^TZI}<@-d^lBM;6k(lv5g?yl^f;xQjLl7By|W9o{>iSVmAJHeYiIx;vk2 z&id?d*AHYLJJl9A+qEUWbOjrw!&>an^%zWlcPO;W+^uhQ1BX1Y8!;8y`I` zZ>6H@h#9MGSg|;|cUy~S2AtR5-}hlvsBmXJ)R3JaTS(nwo!oG{sQ<`Lu4Q<0WA<_1 zmtORy<9uygKGO2G)%-xGvoStFmPoD&rIg5A&n~x zH-pKomNxq}k}`Ik?OW~a5T+edfb^)LC8j1o#QFqIH4tcGNg18(27e1`5JFtd0U!GW z$LaNewLEX8oo1l9q{SH5vBdHnwl=vPo*@p0&x5;1I3%S-R*I-%v|qBiZo#~&=4>ru z)f=IMYavH_TyRB|;*8OZq;?TUD)6%8@`Yf>xoDK8u5){ZpPPm9aXh34@giG8D5TqG zw+gpk+p-g_hRThiAOD`9hMX8_KS3ur(#?3Gm@Trm5TsQX5u|C%x#;UvAe&LS3eu_m z>P_zg$7N(@xSJLTq2cM!t~nuWw%4E^&n$5QnVV)!H0J9`+eWXA5Nz@Z2sN6dYg`h? zBhb(J2#Zovzinyml<7$%(EI0p^#~J9Wz_~r1PhogQH#38DRSf4Z(CpBDAgTOQus2j zM;xrklJ3LmSv;bg+`p-!P_aCALp=uhUr<&z2kPiwjM6^sF@f4KAQMU^YJwlB*;eR39G@xIPb*3a=VKc2EJC4MH3*tOf?Xu|)oY9* z$y06#vx#C$rb|NjlP!r|8zXZ|($U1PWF@~l9L~QbOm7;$uBQ?d?I%5uE*dknLZp}z zszv(zy2SGj3I1LPL5~`~a0hE+)joXI0B_XLHBQ)`a|N6jsqxxK~oMms~>5Qa2cH5Fhbgau_|#ekx#J5T@yW6v@c7cIs%RN zLI%OSo6!!ntH|XFC@|$cyotc+P+Yv=iDmr+EYZ;D+8)H^#H=P%7{ix(xOUAW9{$r< zKiiG-^o2P0$wApsnjJ;qCLBtV8tI<^kq#4_fp3iHm@19l_uYFY!Qx8QURyqk`vBQf)yrTaaP^w630AN@w@+!aA^grFB2*WORYr~^KE?r6_%G!hY-@UdXV_R9 z%zRs-(H3PnjPe83Q~c!H@9ehA1ea@(^uX74+v`f1u8cf~3`_>6%o+3bkgE68bPxBn zt&&ai7&j~{y7OMv74zek8T47*p@wnR9$`NwS2r}5H~b3c+i!mquKyfjHp0a_XRS(c zn-6naP6kuP*|DdiCF2p)JO>>hbr-l&z5ZH%LzJf~?#D>@BFGg$9a#TtQJJlkDh|Pe zT6)7V>d(i(IajV=hC2dUfFKXLBkT-%orWJm{hn3cLfC-;s`mj)<}X1KYU(@gbHH`` z1-m!9X4ra0wu^mVd8eHf8wx`}TOKE%s(0{96V?=Sr_XDrP}+Kv)TFIWzDak#^lju9 z=JQ{>&IBsI-+8z<_{THL1nLilHeg{%d*b3B zY2>TZ7f=40IkV_>-e2#a{~CN}CO^oXSh`x@DEu;3KKWvqUkyVqst~Y+>*j*aJlh_}}=J&q-oM|4cIAF#@o12dbI z=$&mHMX$Ht2%hs`nu*t>7YI_<{ag81^b`c;M>#eYmaWY3a7cdy4QCAq6d1{tcRI_Q z+OcG?c2VHO&efYzqK0{m%!*DoxE~mVjI?sW;&44ceH)7p#*b7uwIw8cyq4eq_tAl5 zs>IqTD%GIJ0QAwH0=kv$Xtq!p5vmxKn^RQz$SeTirFV)j&SD8K0B>PlNZum=FH3Y# zUo$79z)m3MxN6fMk967~ffpOuTQS-PjcYLE0azc9kS$-2quPt<@n|F&>nHR(#gaxe zBT`VBoCE}_E8aw;5am}I7-F1Ac1XnjO05;p5SuS*7{oBcywgaWam>IfdLAe2>%m-s zRHg4a$qY#yrS1TO)c%cS7HZOB&n7|))jCZyPOCWW;crV{KwObZ;TEi4E~>;l9;8e; zZCqd#RbBZcH@l`Pa0A%BY=b>NsY>?(W$78SWNJ|pWN@@ zq(#G`q0s(ItW+q%Af3|A6VkG%@o^mdFv&S3d)II9^__cOMNFxb>B5_guk#k>SwELt z@XNCS`J|iTFaoI{2hHffoZv}4Cb_Wb7xDFXM^v7*vUOXOIZE=5U|GX*u<80%PQ|$z z3qBsYs=n9}lpY=;?ea~YGgjZEKL7YSl&a|aI&9v!0G$Mx&>J!-*^x#!H(u-N6RY&O z70xet134)pms8Sn$7Y)?^ES?FI1Olp$-XTNb|9rVPSrtaSM+BSh57DRl5X4jG3oj0 z5=F@KSEbq*Qi zeWTS-xF;PulZW5;NA@17*4i_cH<{C_pWb|IwBK(=X`$G%y*cpqv)XGidhmMTNyD$t zJD|qqGk@qIxfz3sF)HUp7X!frO+YB4pr8)i5g^jVlPoBEM9JlKh{juBw=@-{IgOIO zEQuu0EFLyCaLlPT9jy0_XslQJ2h9yeixMtMOR%}arkxqSSJYCTbX z0bT})Ya;o5-U2i@dEi9d)YyG-^Jc z0(juzD$qGRf9YYHS!;kdC2LlIn;x=q)lqQ(PCvf09^Vn*Ai@5slqk3yu}=Sc%zKrW<2 zTJ@JY_3jw_6Z9mh`u!@cY}ywWnZ8Q#T)7)~Q=P#@c34+8&5!7xayLm`%Q*$p;nK%g z+V!UWnQ4?}2Ck3dX1m}%oOHjU5STRsMLe#+YRA9oAdY3`zyf%%b)6qkeJHTSlvkT^XqXmC# zCEE8kO3w^tkSvwvFOMetL|5kO#&loS2CWgja<=1tgK<$}N7?$>9T(sW(d>-lv{?2l z=2bLu&j^9N{D3UMNs~QjC%5|^g%!p_$*c+q8F9v>l@y=3s zh4T=}N^9Nu0e*}A?A0X-zmj6p!j98sN%aXXnH&t$t9gD~X{v%jf`;R@iwubovBs^T ztAB+QT0vbC3g5LDEd?R>43+=!IFYfzmxqYR+l(&qz--}q?qkIqAmkIO_(o#4^ovUh ztUO3p3u*?cBJax%LU<&{ZufV#M5|xUkb3-^-Xk}hbM_WNyGXpUt)wg^b=3AqfzBKF zeU(#VY}>p)yNv@RlLeSDisLs*WgBr_M{kMa&{d1{BY3?jYknxHl70(@@eqDT)MR^fy6b=>i%Hn)s4g|L} z)dK>WZeg_nEhRH>M}^P^etg(uCOwkffaa4hbi}N;PuplO;)~Jb_EyM&geTHUF!(j$ zc1KHss&~1T68~PZmLFR)38rr?%Xg8~hI|rtzI+#$K4Qdzwl9dqe2hOP2IR8SE)dVO zI(9$K@r073MR-q?mNDuO7agA}c*Vi_9krD@j*k{{#;Irwu?0P2t0xMbjk-e&p@wP6 z@ar|UB<7jqJS=iXl9x+b!Mr6-F?!p^^y(<&oYmbS%Suk3MZ)h2cd0_>SS)IOS>3ZPmP?^G`BxUSmqY}NDOv9l zD2x+1{ft4~deYEuVG*+SFkBpvLDLkWlv?M`IH10l5kt zoGX|8nzuymD~dAiivXw`l?WG=Q|l~7FA4`hVeK^Dp(QvHOKVN^7Qlkl5TYTbN&vBS z{dRylR$fbD@do23)lIwwu7t6C#)0&8+9(xXB;E-Nw0_3nQ7~mDDL5PWS$}pGroB+{ z+u_9&Wz`U3t2($`*}z;MmWLF=D*yn^&nLO}WhwY2Pb!9<8nl_y85{2$GRy+{ykgi% z;6QpK$)jy;y9aD3Z2a0*)|Xd26fMg``}Bu5kb!WI5kU*XFfisW^qhpXkmge68g%6l z@{e{B5Fx!brNT1qHT6EYcUTQePt8t}?; z4cBV22wCyC($4HHjo5N^Ng&kohI7BNc}u@CbPuTGv#53_f)(Mq?s?K#jl1+JXw5u5 zya`*(CJwQuXCfz8Hv6n}l!tSULo@%853%k^oVZxOYh+?)pYIE(>4!p)lsO&sGm)5V zah^0N^Spx#HFC0-SU#%1Ei`vUaAGYR)mhZ+$WByXe0yx6%+jGPBME`md(}raHo6;% zLBDIgfs^)Zpa4nqh$b?EbyJS5d2WT1Oabu|?1&EZ4?4-DGV&vvInIF(>V9a_3bx!5%8NT8n%a@t_)1_W6(DXRp7%~w#sLmV zkf8uZ)3l5PYbae6Az0tmuUz9MNa-|dZMBbZT=TVs1)eykUAlzT3{__rZVNe5=e;r0 zsYBmx-Lq=|`+R82Whzm&51cen@B0*hCXFt#ed5ioH^cEa(7f~(<$HI0&Dx$$Eq0b) zTVD@bs$h0o=X|#r1iGs1C4tJp5AU0vPKgSUAmt=lXHt=5(X2tqEj8$@d7%z;SMxPr z95zvuxq-A&WP%S{e4Swgt0|*S{*8$sKuI83yu-E$+#@|YS;3Yqt{@L-yQ92>gkwp- z_nAG?y;D)8KzM6r0euu}h7cY)Pc1}0F^Vf=I7c5&Sm7Z=&auLY6MUjf8b_+l2P#;+ zva-zAd>&hH4uRMJK;C!7668yfv>|pVulikXz~cKVRLIM>RT-NJA+~)63qv)cqigvN zvYDSJq7gJ5qA2^1VUa!DZm?g5a-mOty&vxE?3l24*J^WP(a%czByJwWb3;H^>|NKNzERdESngmE8)+> zU3;hunXwB0F=;hH4l+VnI({}V?*4cNrL09=@#|&@qiC_UPX0^uC}{kM>wvMXN0gc{ zB_0iqRYgtNO3|_gwpfFg^7zU<)*br~V@>~T?9TDDYWRc8pt4orw+Gzu;q${)NF_(c zl7Kkou%qke=_s)@tUGrfl;D%+Qx>Js47qv!x>EI(v2dY4oYX>Vlh2w?cEmIu(v8#&lG*&7%+0GR$32-sLVe%3nx z{zQ;J8%P-$n(6V|xB@ijJ_Yp5YzzQ)Mg}dI&zSK)1O0jMU!i7Mdm94w2F!h z0M>uC`1e?B|1p;I=M-uA{)D!zKjYc}T0tXcGXo=e5&r)QZZojZGyKot@BfT=XC}KS zDlDUgOm?t#OnoH)5Z_|CLjz{Mi=u+bg^Bsff&>SWI17Rcqacwh8!C1Ki-Zsu2%*3R z`0Fo1Un9y6_Q(ke?XO2#Kwnfa-)yA64XJG|A2uy6n_PG;wgSn%`h)d4ssj5m<*K5b zpY&*cMIP;c0Kq^6hF1md-ZnM{6L}N_nS1TvN=qvi9jv=Qr|~sxTczrfC2?5z@QEW} z6(E8Df)Sc$;q)d!-v>!ls2jj!_yGHLn?%kKOwWLp3!PU^u-ji{d0%uu**21zLb8{O zr8a@t&Xg6|CcBYNyqzBjmaCacyk(N_32D@!D3%Vi|3Ef7`}ji=hz=!9XY7z9*D{Pj z2nF*z>>30JDzTrv-5x&TiQO~{$n~emUN~5onEp?s#ZUsYXV#Z&f16)j!R!xdqvtQm-+!X>_z{YsG8zf(|1A0`AviV$2cI%&`1XMKvD?jV zHa^lv^&`6*{VM24N$w~SIxx(%Er`=kHI~K@CeUYo+XsV%6cW=r(*niRNe9R)h+Vc> z4070p3rK(vl*pRL9mMNI1CbtPxU=1~BCg6IhQa;)@cA38X1MjXBp-po!984zp*?XP z6=wcspk)up*H}o$92h1)8afav-7h%4jJBW~0H8o$QAE&WP#RGXGhVP-KJFzT7(V(X zVCMjHTc9~V_bD)Ge->K^O)!=2FSf97z9PIZ!95amz#D#MQ2v*QsCj&)v1kPdY5WU_ z*y>Pj0(`P;D4+uZVX=tuyiZdk#TXO;rMUvKT(^XdSglZ60na&*QvfE2H(;+oq*Rbu zy)R2ZjlRp(-*KQsdPlbeT~U8PqIa)ui#MZj!#DM!Z)0BqQ3dJrCP48HLxD~Ut3qL4 z3G&CP5&d)8wR3$CvafW6=%7~)wSJsWu)u>LX0bSBN{f+}a)Q!1=W9!|F zp~+y)TnW~Muo&6qd(!u1o8-#1oniy!!XKf>;ZoldzZ1p>#|LjS=G$NwX)Lrln1w$A zelmc_l1LE|3+fQ+EfA@fNzSiW#*rWx(XC%opS&h|iLYG@heSF7MJ$*IU7m;j z%$z`rgo~sDKVgK!P{bZXjaOTyk|-OXLyQp5Bb!J@f+8c1BgDTf%`V=;>&ELQP$>VU z*m_R2G_^HTU8Y0OOWccAB!MyICS`Y|BB3nKBIzW>gIFeUF~ucChbEg;2j55h{pV{l zh+GeEkAh!*wQ{cTc6~2bkeE<|^cM{gQ|1rPF<6l*cTL zOo(iYjJaVtXqnudTxF>;)Bb2DPkN%TrtnN6NHR_WPKqg&RI-^zna?b>DwUa|Gbb_E zGha4;nWrg>nk<@h$(qd6Vfxs64LPoE4RddPHoxmT<~mNmO2_KN!o*U-N?{sg#!R

    Ks5~p8p5`uCFXO5(D6UgA$(OF1 zK-P+<(O~6k(ri+2;g^x1Ri)LblDo*|mdmclF6a{J((w7!Jq1|Njo{1ZDb5Yg?b6HA z%ilI2ER8FhrpQ;%|31iO&}bKa#l5RRQXQ5l7B3bxBBii2x1p%pI$1vnXK`Rk!c@lG zHk4PrrlwXdypY4*$2sho@0RgI3Ck6s8j*>%j;2F(Mx{V~LiJ6JTn${+s{Xk8xo@RE z-9pR2Ybe-!*Pw3V=lVfIMG5t$bJv;I_jIl5b+k6=w#j?=dx95|7kWsp5Z90n_=hD& zXR@|HuX&`CqwVI2tH@nQQV-IaS+SzcOtlH}i7ylRuyE*?=yTK<_S1IT%{caPG=?;h zIo<#+htG>IGGJ_A<Ic^Gx2^=R}O2bL088#pbP(ytX}AXX`cDz*zE!`HCoRI79Cw^x!Fo0w0| zSTbEQZWdVoSU<~8tQTHQsl~`e+CkgLluswn`pazoTnRakcREgz%t6h#Ej9XG@=i38 zD1$ga4Ytm_cEd!(DEpV(7}CM*UizN(DDKG3$cs0#F|&)4NvHFB@M{nAxdnVh&_bo= zYKydsR!{5pUFS)R6~Ib#`EpcvG|C8j(>i=(PA*5Tx>MJ*?&9Dx=EE~!^0Yp7A9&VLcwhM9vgHirvbO%a^+q*K zUrl>?s&Y%KVt?#9@4Bf?<+M{-Ps2_Z!7Rowrk4}AZ&dtX&92>$&D++UfCqBfCjrWDc?4s_XTRDRdi-{g1;@GU z7VcI0oAfRFzHv%DTs?PTZ6U0&fW^#1(qr?t?`$US1THmC#)s!^+E*?1C954TuSzLpCY9`3-@P)u zI?jUMq89MlvXXU9z0H0%Tv&Ddwyk?h-mQr41ot|8)_S4cxNr5^kN(KS291DtKBwvQ z_|RVC`t7yxHVs*Vpv^Pp&h}>dW^lTjM+zfDkyXl5@izasXk5~9dz!PTJ6RWM5)<4k zK>LyOR3*~~K3OtJDO@jnEK(sd8Tk~s?p|{{a5);8GP@hpMgHFSkThME?d|f?VWL0M ze`zs&lEx#xQQXP?yl`)IXn3?Y>tc5CvnYI{tdr;7?=APm;C3kP_o>XIOj33fkBcYt z>$NM1mGQymL*UKJe>b-O7_)zD;7_|(OhABN&%wwL@W)n_2Wb7vO#WkV|I1ANw>|Fg zf3&dw7|l+fhA};WR>I8C;nSr4W6^(h_$OWCAJd!tzx64g=cs3CWAeAL@9>Y6|L+I> zSl9piB>pnKX%(FG9slwX6z!di{+`{RMf_dK_veS7Qqj!H$bnkk#!AonZ<}4v$icwg z%+}Gy9)|vpbMWac2w8t-mzY_9jxVKW@K@7+ON7nr9UKKr_3QzRpH{h~-aiF;`p*@r zm>D{n{#hdfGabyIzW#477M4%%0fyzXZ#n=g!yn!M>tbX3qqTp_J~LqcsbgpQAQf7kNA)%Cafr1h*my_Ww`xt4k+pAOg`-{sRa z;{Wu%U^J*1=okUiObl!Qde+bJ=owfUKK&}O&)qgN;IlTdH2RzfEuVwIAK#9Z`SbqI zaDNKapDO)4OM14VMrJ0ajxa1NpZzL08d)g==;=OhRsWU)n3$RVmWu)y**?2+Hu-B) z0L<*4`y-|2`ge(*mEp7aXIh_GMMexTe<@i0KaS*If`2Xk?<6C702>?KXXpQ<9I-Ic ze>!geQ&Q4@dSxA-aeM`(^pBPY64sc5WtIyjF`ERE@wy9$85XNq7?c8D*a}rkKMEmA znKCi}M?P;{&;TEX5EAZHM?s-3A|C{}qU;(b2C8RtsHeDzu?vt1WcsO(4Mkpd+oosR z%NbY8A5J2 zt4Jtc(|PJ9>K67Wix}WyiUUWd3F%#1aJ^+5q5?a4e>xlK$(^>D<^!of;->Ko=HA6Y zq^4WP4dxjh&&cMt`-!Z|dQWQFRRdX*SmP zT;3=%CLj-M!Q{hTMvr=f1F6zsp}aV#$JwM=@W&Y?Ng=2)zdS`_dqX)924V%{ zcVue`mvdcjDMX5g7{AIMN%3~Fmd4}RkdzwBzjcOKT9Hg+fwx5N;Nv&Jwx|#LX-5Bo zUK(*vE=N!3nv*QRKzU^pGVt~#NF5S`*->@RjNkg*0mgLRA{u zE(T}b7$K)7L9Z%uFKh!YyC^nM3YK_9uDU;V&h5f8hRAVtQ5dPiIU{;4<%ou_*Gs>Y z-ciKr+u3&L_qi8nt*K$u&l3&nxS+1KcQR=Bm*bh<<3A4Y-b6H}TiSnf#qeiT2gXzM zjiL%l1xksH1-c->_wzaAoeG#o`=ZF@r`y9jkEu9Rfryyu02w|BXwc7b3$73r4;NXX z@=p!4ndOnaqpO95@Vrr>NXj?tOWi`j z7q{Lx+B4E9;RhCw7~vNP)`Ok}cDLhumH_ZC-f-q%>${hmXIsr!>ct*6v8;l$sp~jK zrr`YM5p@BcMTmcGs%Y&&@>qSw+B4r+R0?*VFu?B-!vw3F4;j{GRaTQG=`V;J7g#;XZh2*CP7B;dv4zj0G>Vi&wI96E&iD`q9 za)$*rZeJSQQm^M7u!dOcO`b1ZM{nsB3$1j_vK6H%P5r)p3ej;>!&>h~e`EE&B{{af zR(Zm>UWH|{Af=o{Vmw@!Btcv#aU|H8a(t4g7v3F+bs<$vI;6fM5g0~p`QSRAEy5MM ztRR1`8L{ZGADdd)Nd=3dMH$m}@Y19qE=UdG!CT7^mQR75xWy~Dg+_ZQZ9%zJ)YT06 zJ+{U}Y}JU$T@BYB6~YRlOKjB%DXgPNdByk?#<$J6bb!f88}jhcd8CNjff`YNekR!z zMm9w~C?3(kJuf3dHEt;!r{{OmS|k6Z<9by@4Za!-1lJlyM3jF2IZVD&E$wojq$2Kx ziyk;3L10zl(DJX~u1Jcm?tA#5vuVKDhgYP9%V;vbt!JY0N?%Ed))f=6{89va=iR%}TUX>z3IikGQ9x$S@MX?y1X{Ozvc*<1&9;ayV4;%N}=(u<31?nO-Jj`M}Dv&#Li z7i2yHjMr6N)kT(k+mT7ufXf=a!E_!bKpifz7^(ab>qsgVI`;YNCcbBv*n&QoZ~I>7 z+_xi~HO7Ef3F0!sEf83$fN|F>ssNc>l-r}BE+VB+gx;4+%*YVzEvYpP+W3@;t*0@o&{Bq8`(zScT61BlleKmf~1~^SYHWOc#Yhz>NQCrTZ<*E+hS~x z=nAXyo4oX#zF$cdd;$UGyfok>w)z%|9C$rq zy+@f2pX`bLipP>HthRxt$Wh+1@f*^^<{X#=i`L};6)=QJ6LxSE6OW@1*AjL^HK&G& zvL#$N@+)LZ7`KIr)_;cca7tj9;Xg<~Er`L%HUBLcjH;F0`b~tK&r3x=J#<$mkWdQ& zTP|d>XEb%gcQgN>FfiY|78R3mdtI(_Xjm5UJE-qap4>=P86s}U)666jEK5Su<>Sp*1mQB$`@CS-mB9< ztyJgT;`?3;TezWna82HfHh=e675&;dCb3UOxr^x%?&bfNLw))G8( z>YrzG@aD1NQ@kqEUmQt)vU&M(i}~>S2!B8t=%X<{&~+I_(-);Oj&(7n2%5`@&JmqN#RW^}sUWT*C%w-N9a_w?h_B{LSicd6yhk)mKQ}o z9JkjnOS?1f61o>tK5GcG1rBIDTQ2X2<4ed&b(Cj%F@)#5>!nnFv9& z*ppdiyW`cc_`&HLunUPF$;B0M+jDT5Y7ryRqHZm{xduPSE@_QRCnVJi{Waw00guF}-)85kJf)He8UZgmBlUp1vt?h>+?skCnmwQ|$=Gvrh%lKxnhMZ$82>0&_@cV&E zfOC6*Mz{LuuzF;7*qokndjxpF)3EKL02o@ISNz#B$t%`VS(`X^rde+0^;I-h$t;IP zn*x+%+pE54q<)_P;rBzq5ysE1isMGR8dy(rO-B- zkWi?(e3g#j04RD{$M;pY6*ZDiMHku1%gZxEL7iRXl~*UbC1w-ZjFWGxDDVghAGjUL zh61`a5Bh{)3&%LCo1G03zKl9pD`={{Q%AcU`r`qMrj^6GTrD!6`YV&>wNJtJjE`WS z(MN3XXjSyB2v%h`eaa0M{Ktox>l`x{8ZIzoP^TCZs!gblKi(Dp0!6pDF;acC1z-5X z4}N#*<`O!#nlYc4?JlxQ83^w&SKwS++8wiS-gh0tS6x%jn*6dDD2W}UNL;_)lhi6g zmE@vVKU(hNU`2ytQ}l=);nhT!OWtnTeoEHMYU01gjvF-Tj&1W&TlO|54&HQH$9^&x z{q`(j>h5X-v)HiI8YZc<0)1yH;C#X7ihRA=o|BGue#2hdJ@>o9VX!+}uu{T4q&x62 z7=Zz-5Rz{?A;qkAPZxQET^Hh0MOAEeI4CGZ+yghxXoGHUm-2JBar@=(5}@nGM=Y|| zVbZh=5h)_X#*m{VqEbxgNah(5L#2-HR>FXo{087UCfdnCXY2W{N}kH1J81Ft*+TP1 zVlug|QA|!}J<&`{p8PM6x$ear3-TO5TRMWi<(g?ABm zM;$M1I3$r!X61?VB!3Fo%Z~RL^eAfixv=*B!YYb)g~t`)dvO5u&zpP|I>Mt_y(hNoJN1!?tph4`9?0 zK5Vg1lPnb6Ux~xEEdz-!m&|4+9XgB4nc>1)F;%6qN~DHRNr=5^WdmUi%nAO9`& zB_V_a$Mxf~-}Yrcr35<@b9>;nHH#yR(#>2cVLp?p>wTi&WCB`DuF(29de2!gSJx$| zgjQ&E>f9eAqyvvRyD(Ivcin2jjb-tOPGfzoA$Uj_b~YH@(Oy_RBK_ASRG`19=aYP6 z0wWfuTW&0?4W-MEeE&7UbEoKouPAkOqVuQwXVt~B)qN6YXYF~m4#krN6>%!qr6O8p z9Fy4iJ-vuF$=Wt680)}6gS)qFOx7D1pzZW%mxD(yHT3U4$dli4mMxh^ETzyK=Be0Q z56cG+6(8q&j$4T#5r}2t{bwhZ-)r2QV9AzU4fyO&W=D`Oyjo76Fi&`Q#~MgUt#H~1 z+!&rbugmuM3V(I)$Dg>1)X=1mjdFiqPHRy&F0>q9JfJu3Y|_lk(L_FJmC&4NjfGvUE=ZEGyeDTNyW4i{VA)xpdC=C`q`)M( z0Wp6#SvHv;l*OZi$<$%Qz}vvhP#&WwLO=*YpnPUDhooF^UrKz`U>l)hkR`t}to&6m zUpo7YY`-GeRxqLPQ0}e604q5@Y7d27kcGCqLW|AQ)@=V0eDu;b*GqfmK(U@nNaS!n zOe>|!pnyh9YcdJ^rp!pXsQlgA{boOCERu34o6^}ZN1!10>)AElG)`;)=0)wKpai_d z=7{@{;0K=v#YtiqcPVE9cfogaU1>_0Qki3E>{O00B3-#@MPAwl)v>&TXvz36>B}Uk zYm@!N_T@2b-RLCN?t6(dfnM)@++1B%`y8h{djci>FeL+MNkw)# zwbT&?qJ6>gfk+o$eiB#ynP_oyNGm&}3!oOw_JzFFx)UcXW=iKv>sUk+`$-!9com4R zB@P#7gla?{n{A}`q#x<8o*y@h5~s5h$D%($sL_l_hJ zLt!f=(!?~PI-|=6t$G48%G9JKixeGOFUYl8RZ2D~;2U6Q*bkIBL&Eb*?dv%9-s)KQ3p=)J7uLYko?(gw%*Amlq42&> zcUm4r@_FUqc5DV32?WDJ%uWU6=4@f~rB`Di%?_Id8k?1JZcbc0l?8#r1~rh^ya=su z8d<}Na#_M6J<-=$Anhwopba}vpvHr!!Co~NGStMF0f!7O@B?XC3gU{Hs34R{W@fqM z7k>Vi6cnCHA+gbb(B?OtBd6)#dHAbP;YKE5Et;H@{Zt|eI>}6H9To?z^gUAFNg#?k z9{Bt43r8u)FVWGw>@{v`Ws5B?pPrX=9@ZT-lS~X>lO9+H!btQ=124f_IKqT1>Z<5S?u`u!f4OslU2pXWU8L=jt(TX| zDHVh|Qcr}?V}2llbuSe2^W`sS;41sH=U7vt?n_2++S+z)UboKZk?zgQVV|TLb0ebO zJPk!*A}A!aH0L;2S4iGKWC$+8kui~*-o954|HQ;3SZCJHnI|7yoAr)B2iX%TnZXS2 z_j{GjY8qGNUX!STer5J90?iauOh(3!Nz{;#EK)zOk!p$MH1uHGn6(cmo)!{gey38I zNFq{t(UPnpqCt-{6K^>1*UQny#HB0g;{e~c7h_sD32ySv@H7oQ?zm=i^Yav5qZF#F z=OLH;lFRdZfR-tL%dy_1?#1mv?qRi))(hQJ{|K7uOF1+rn(Oq7{Fc(iMt9qWfnOi$ zHxC&|X=O6A3s-~zvAv>Hk~E=C?F55GRyAsNY#gDWTtL2w{Y`|wt9_WbFui(La3adf zvYgV>6TBM@^4x*Q9hiaQ5-xcRmmDw5WtZ>!u<7l@nSiGtasT)+KAe&r=97YiDlWb+ zRx3Y^{#HL|4-bZTpS4a&)f%`StFh6>SM-w0$ll-3@K^zJ;Ae(>-?yNHfYG}0CFqH^ z2o=#L={ay!jUtqYx)Kr897<-sTGcd0#tedm(IPJn$IYUth%saia z&(v%`uxspPY}^gnMCht5Hjp~I4q2JMe$oR=hUdq578@2%g#w-d#R6?Z6q^(O*88|%AF=}x^V%s zYGz%s#yDvD+QoXwYX6-~Hk~i=jH#uz{uyWLD%S~~9!pSs1)L)ad=!jPm>5kNcv7qf zRmZ_-*m)1}eZ^P!d!F4K*U45D;{Mziav#F*{pMyzXZ(%l)vd+F`|rCH0%zi78dh|g z>si|Nnwzz-)i#Fnw^-HtcB$(@=On2tttF1n+-(T>MEQD@-AX$-50)t>-RadEM`x=+ za^u9c_!{jpJyVF6xf4TSrcS;n7EwlzOCMW-*qD8{Z}L%tEPT~8cS1PMdStjf*Dl!g zKWeWD)Ei;v&mO6MmxBJ^s!Pj-Nxn z<jBSZcTh?qXnFdR3ZCJW1560KR$4_iycW&9=7 zEZH_*nkp{6_bQ$yMVKLojfQsof;b529>f?>teI0(rzJ=@AWsrOOpKu^IHdbC*-YzE zh1i$}jJEgOlyzI5*BLG7p-H|Qzzz$F>8OIDqRrsBN2gcW?gVfPtB*?Wuv&j5j#2WQ{nt}}_W<(;zWpWp1MdFm<6nU7Kat%3B>NY_ zWBeS84(3mb{|(3eck=pYNB*tezt#O8IPO0;;=iFdfCe=S3p0S4k%1Y&K=+5o(b2JJ z!TcXk96KHJf1i$z<^yS?Na0cgF6{@T&@>8=DXYma+`K4Hg7vIVzMOyDTx6BHA0M5U7G=3tma4TK^s?v^ z;tB$u7sw*M1?d zgz$_pEu%5ju`m=9pr0u$Tc|f$Rk)pupP0);ROrUS46C_{KuStXXbB9xx75(4UD9?t znUm1bIVyij`6?e7jPTk}9|_u_@SuJ7whKBsR}=n4jdAk8Se&ivhh+@8G z$b#YS@i=;U0$RvF#?voW7*P~|$3E~k6f6=8zNJuX7^8+g@Jt!vJpm41Y#7aBt98ko zN311X{JFzK8C`7z!!kHD;VM@Zae{PaoY=`v!}O?eCTz7Na&9Ai5^CIJv2|ipTvxZI z0qtql9!l2{3#iC)e#vFVelc=#q6g>}qOfi-$9b}8a2FT5di@h^J|ab35Ml{2y;hkV zEoYShFj$E?+RzM?+DSE^pTT!7-!)M+NPkRl6U7jo3L!3qG=KEI^H=JJ`0}FKQTHkb zc0j!#b@Ik8jOJJ4f>d^j z3wMIsFMZFF)3m($oBRH-^4N6%4>UB%#IHA=Y75o~xTz^GVmb6tBGWZRApG@8t5`ZaOvzXP;v-u4`Dkqy~n=~<*ob^wBV@zc}WU7(|3)y5M1nP zX&D)HFJ-!0nyTrE(4M)5JC7CMB1}cItk@BsHK#kzO-0w4ibOi^O-F;QI43 z3poBR^J~zF=gc+E*|G@tte#N(ySS+c{n?ah!$x1PmO^!P>>Re(16XIJYV*t76{_?t zJzQik-+~_1OGoGfVL{Z7Xkm4^pLRLEMMCRF4Qa3B29z|Zwpbom2;5O-uIZk>0Db=~ z2u!Z16watccBDlDD#0UxM-codK)w^wV9iKqX6PuAIT9LyX3!p>>;1`jOqDYWI|E*x?%J{CXq~cioO{2%xzzPG zC_RQec1qG1wr4=VVlpeGwYiUeKegJHCxa$ET-ZefPEZ^@D3+KEz!g}DdL@f5y1b@{ zAHow-pH5;$k6~}i>x1zq3E*jHyKHOk3(DTVqw~!^;8J`1xj;Bm*~=<>eRSTtcwYN+sWj)^Ugan_tw zdTn&~uKuIf{(hFgh|&yxM4=jU#^nf(QuBw%?~{en=DI)Ek~!ye?L>BtJ1xINMYM`{ zD_r-T&349*-+Sk-J6qcaf<3r4TiU7tR~5Y((ti}*(>H>Nr-RiYXoSo{53vZM(J<+= zls1G?&-$dbq$i3`if4*p<`|TgmCZ-lf~7ekv=q=o4V7pj3Wg&}8GEqKoWSL^jfov_GRG8}^s#hR$qzfh7 zh%K^s7&*ummyIN+q*aHo6N`(PGu`Aplwc(j&tLH+P1&BNS?Dz}BwNW@uYM-AE^c7% zp2WGZHP$0_Jr!vBR(iHmZ^}Or6sZ2LZlHVIJNQD5dNFz#p^dJ~NbDN1lTfuM%-)(! zYj;U$%=?A-k$_)TwI=j6l9+PH)9e0f@*%BD7gc3tVR4@C6zwYf`%9Qh9uB2m2_|fC z4OxbAxW0{f?l0KXZyiRt=}BucQqeY)x)pPB#Z6l=PY3Uf7(-$6y*cr=nc|I-?B&A& zlzK{mAiyTVZNbc;h#jO|BCcIK89ky53{pNcd;<4x34C~LnW&sAhKwY$%ys+r5%|H> zAZfcO0C%s|c__r%GGhJ&blCe=Ph)ru<^}#zt+9Z&R$(Wr;#SCX#=8BCqi?NIP4R{L zY%g_vA(D+wyD^6xMgyr9k&Vj;X*{RR5c^(Bt}^}h!XA+e*Kok=$^_SsV;dKpy?cj; zco7%cjl|6Ia=F(<_@OS9g?oa|g?>HNfiBh+_e=7v^23G1m!|T6RkE|hXglPd`A*zhhN5uL-O7+4W2`hBJNaHIBfK3#Lg z+}JA_2&qDmK93LGwV(}u;T!Z)Vil017FI|`^FoZDJ#T~mM= z&Lii5oEX)Z-$Bv23V7uYCtBo%nR{F;4!9`N|aH!cn66eQW8EHFr(yqP$8edb5cne z`ia~x9=VmR?nRGU|2P|CE**i!#13Nuo`4d3a?fGeUi2JJN46KN7jO;4p8x_}_tgE7cG>|=n*y!Fw(C6mZu>Tti7|x)k_112nWCh7++7pY zEOO(pn>kfr+azb9HCi+cai zkgz3*O|UD?jLBy{PIu<`z|xVkD|VY!JGQ!40{Bmu9@WwZ9rRl6xa|tB*Y&&JV4rYW z1io98b390&#+KR5qMw`%!%g=s9DmsC>qc24uE-(me)@TdlUf!btYgE^aojtutIgt^ zfI8!N2VM57>Ha2|og(!66!F!M`y*}ovy@YNC%l?lo?cA9pb?Kc&Z6r4^47bmWQ160ZM|I?SKh*#bT#A~&6zqti z0TpObtvr=1gv~iej}moeKP%0D`3GygiG1wr$ut3-AwBaiNoJ2&(cQpOSxn9@RF9^DW*y z?58@HHAk%rQ=Lo=lkYF7qYD+?9WZgEc6O_VWseKFp^)(g%nVpCq8H*i(#k-&jp|hg z?hDrH)yF*rZxuYftr+zx--NLw(hb2h2WX3_mQ<0YN>1$N9y1Y5w-gFQ4Xy|y^KK(7bRcgo03Uv5~pe+!2Am%4oV0`PVF=2q% z4R(H^&&;gxT-4!ft33NUlb3petR{*^7K!#zIF`&X$sV)BFU477FHtRE8&U~?IakPo zi|x6>$nIjAjfL?^0r-T30rI9ouJge4Mr+|!ZQmzBojXVUp9k{Akf2GAFPLby%X952 z(yPoFuim_RWafq8hEo!)rP-{)vulacFw((NroFM_D$>d$ za|tJ7L*rjZ9F=Jq`eq5^?JVo)aHpQLq2krLXIUG#(BxJ^@hKuMzqlO5kH)3iKwE#X zx0pVE?-8R8T*gB%`%%3j2e#^WFP#mHHNi2@e#dS~d1Yh_vo;z7P0!t&FMb7dqK#GSMx zaj!6?FcM zz8HCAyoBqx`Q$!pE&II4y`%X^Me@=;{Nn!P4l7kxrmv|oao<5`H!k-1VM$@uJhewGPX&y0(>yTEOq4C`_0R&N@jNJ7> z37qmF87t}{q7G6NnLK3%6mRmdbh6ofp{#ypuC4&#`jE-RNfDltm;q#&Xy;3Vx8=@B zm4|IttlkreCtD~|96%$G(b$PP4WcO7tqHe1->S zJ@ndS?LRk(EoNzNU0sFCK<)-p&or$=C<{qUQ6FO?YGT&NBQn zEKX-K%0O8Qe^h(G%?u+Os4c4mK;ct4N~6BUoS?k;n;Q~zf{%rZ(^g+e_Y0(66eE<3 z9#-evr*hSGH*sCKrl;aAXjub(#jFe(jf9OYAfGasw6v&n=q^`u67o<#m5C|;A!_KR zu~1CaN<39-tUA}@7%z~ik3<08)k6`<+{hm$ON6fm`M~MBknsJ(Wdi_&nv-cEl&2A1 z*J|sMnHSJP%B91SNKnRsw}e*CP6*d{7c-Bl(>L^jnl5YrqU+Ox(q-iD_>h7ph|qTq z<|<_xdFLPj26fS^?z%1+kD1FV#R#a(8>igYWYexJHJpBaxm|h}h!FFiKD$+w(2q;_ ze-++twCJ-@B6ae4>ps!g$Kw=Jx$xZv_Rbc}avZqt#yuz#V{n1La?pAGK&pqb3zX!g zJF3AXUe_3#HIFkE!lPF3@{?JJg23g}!%{}c{8c{VS-YN&yLFxj3cuQa=TlTEo_=)t z^Cp)Y-JPWHU~Dz}&-MN=56d+u*0x_wQJBIfGL$9Ls=UShj@gYzXW?0}=~<3C0-%Xgtw&hh=AIm4C>H)Q*|3GUF+l?Ispfl4w74j`oXUp>2 zw&B%`GtyJPUjQ-gIUU5Cc3=KV9=Tc~5g z^KA8S)8w|%lmFTWW2_ci@74s0m^T-A+#f5b%$S4S>nZT$m~7M1v|bcF)}>aiu~h9n z;V3oDDplb-c^LIgUCDF^spX(&v-qB$uK|i1r8;OmS0`UxOiq;W#$4-T&`ev~&E&Ej zd!`94nO6q0))?*-G5M0RvOk4e#b23C52o5us-14Jz86kw;vU%Eo3h^U&uHc4RDLJD z%q@Yx6I;@kN%-hKCR@bk^?YW&(YTXeR9gr3M{T!v!A!rx{?N!#XIbZFGSj=+>bE%C zejZ%HUvdR=;kq5#{6?&?`OAizr3b!%%SvHr(z(S-5@Yvx(ihsVLY{9ptfY@B2wtEO z%_&E&5Uq$!b1%XgOa<7)A(gKNTZZ@(Mq2zRr_?3th4y?=b>W6J95sLvzQAuD^*5ha zBMJ>2)Zub>6lTT@gf7xJC3EjlRW<5|+a(w4!@bx?8QYadmh9>i9Uj2e?x0hP;N>hJ znLzJpJ2Wc#eK&M#?(XXzIlFFeTB`4=?ZAH?gN-l!wc?KQ8s8lxjD-O00_>7^cC0~= zQ({EEM2glP1zZO(Czy>IyFy4+7p#=DVwZ$jXjWaQZ_xRwm$=E6CtzgRiAT~Akei^J z0*A_%us)2ONVgyc`xEsMh~yk3%(0u`o_WQxhqD^W@h{uZ*$s@N_;qmEn*VUsesI~+ zE=-@Cm{ARrw%Qz6iG1$Tk;~B`!?c-1v!Qbczm!1!)9e*~wy(+9>Z3*K#`|dNJOXyt ziyHA+$>(jm#hMbmR7cB;8IA0P@8OQ}X}k#fnpkYb*ZC5GuaYVdj2BRr|J!lJK7dCp z1qN-vm?&U4wPiqCpEM#|Nv!kqFVn;i*(I8cY>fcdnRmUtsH(NZ)IT=s=~y64s%8O^ zxPdV-_%R>O*)h2O3P*+?SUbQq&&wTx=gT;~4T@Kf$`^J@j~8C(Hhq|j25_d1zwwy| zt4D-m5kZBTwTv>7Yw{ktj+<;*pQ8lM)q~1>&pLzUI7+_8AB?^b6IW>>CG7E?47Zw~ zukhZ&NFg)ayM}BD*l{{^!yCFFt-69d?)53ilg2cqPBbDyTZ4BEQC#2Dp?g^D@2zly zlSVs1LXqg=CXz&@q#{@ax|=1n(*1(Hmi5y@?hnuqwrv6pK-kMepYN5V& ziT)v)jDopG>f`B?=ANCyfPLi7g6b%uD3NxkA+W+hCk%dQpN0k-2FJ%jpxae$B)N*s3D|TyC^pLv0x!&Et zs91Aza3A*=we7qec2|AV;69+!mKGbI*qa_(d&9hpKrp8EH8t- zyFp@?jITl2TopR+_Lxr8B834r9rI3Eq8TS*G*zh%U`>Q3x^e>D0_vQv#NA<%k4U?^ zeBsDVq76PvXS9dh%p@Ifa0Jb4}#haeXO~9y1EzVt8a z?$xC!{o@br-`*bCd(%yF-|9JDrXs4Tkt`7!J6tFfVmd1>K{i1@T=9(^?*ra6jrZrp5#OPDHBgaghz^xl%6ivxPtc= zg#}+A&2??pR(CbJJwZk!=P>k7+@nidNBg_?$2?G{hN5G+Ap)2zxJ2g8TCI2hg=GS% zSVA0V`pC4YNhG)#hw!MhYI6oHNSz?+m4T7V5mfjlIcX8^NgRn}33dB8XRk1;d5J_K zP|+9XhUo$e+|yU2?88BxeqHq1;KCYB>=}W0#bs#1+LW3lJoz7NBvp8|lIDQ3ZJ57c z@M>WCiu2e-5T_Lcr`AxXG)sUj?73Q@LkN;7+>^!@_Rg4NUZC1jdlkxZp-LvF;evqg zK)o_;mN_$qshV4Qq+gSPfAg@A0tG_BFW)ANJhqs0551<=JxK*w?RlqN+=(_xpu@pf zrxbO#H3OuwKT9NrLDchERMk#?xg!lG1fG!@5SRLVa*6&`vHBWtCUywiuIH|gH(QF! z)-2A)O!Cpfs;7X_ZY+`l^L1r?#4}k`Q2(#gEi{-cX^|wv%}zB{JPNfa0`D zgo3p60jj$NQDCB2`vqk>YP|N)1{rJz_V+0Q@j+=&?-#i6ji<4*HtTTI*QpE zb<;+13g$VF{1Nq%T4Y#J8tfY5{kza1tZ&U4Od= z!AbDnC=C`r+>+&}4T92_P)3Iw z>v8LCtEGlHdDHr1>r*%A;ZM|%50xF&#gXZ2b<8~ROY3E7WE{S?*`mBLh}GE_yT^Z^ z40*H}nk1k!#kxwHMGIz2s1>i0bJ8@`aRt#9(~bgm3=+>xWkZdE`m0yd4eQp##9{ac z0COCPrwlUa1J*Tywd}Z5xh1M@p_wh7D*(e1;x-bQlYC;A$O-Ei=QR~o%ZmX*y!i<1 zMbN;-^Zr`5zlvq6eTTgH5|xUfU_ZthUD}$5p}K8(GD>fR)G-0n+uJweJ76^GG`5}d zkXMA~nw`^2dIp%a;-WF#mydZpuUv*wU;N+9E_QGP%tuS3*^mXke)W&X`z9{jAGQP7 zI7^TBXKj5BL)G@{o1oR4m!+kEZk;{_PVP2sE&UtA=zz>7ep7`iJ7-pSA$xh&k1h!!= zVqf&G3D_;5rs`f?#cif(X_)28Ug?0mG?Bq^G7&TujX{vn;h#Vn3iKwbC_#1j`-Nnr zEYu4XGK;EbR92xXwN%qF0=nLh{RxO6d*Kq16o@IepF#Tyv7jgYP2mKt=n=k+L8gH zSz}@a%P$H5yd9QL2>P7a!w7`SimM9L32ER5pB{ks!m@>R;6#DLoMeFK-!6c|ZUkn= zVs;%>h9!kr5xCaP5X3_TtroU!%uKUmnXIgeq=p{&Q#xh(?BqDryze)cQrpv|+=H!W z&SBRJ0=&+5AfT#v)0loX`98)~QPBJw1`4MB@2!JhvS}p3Rk3iSQ zSCSqdWAx`nEo>IWAe@PFM(Zj+g4lT8*TA$${w@xL4e&ZJ4X$fX^TFhgz7!K7yL69D zTH4$aU2z>bdf(H{ek|fDnGM1R&UKqMHQsb=%;!peHrOFXB(h>%8qEv}K@0w%wvOzY0M~HKXog~Z8iEtJ%`Zziax)y}X(rbuceQ(Gw!%y*iY2*Xq=`6A+sI#=6v@>i4hT%;#ng`KhCC?@M3mA)H z*uR~UO_#FPUw+-)9sQV`J4COHo1Elf^Z0dN+@4u*^}H1gQQ?yTE4qXp5lG(tHx_z8 z#5bl(8k;&?`WgXQTByK>UVHrd1e|n`S(u8B!X8GNl8zxgP$4$gla4zu33gj4&=DAy zkphE1#E~N})qeCLYcfDnLkx==R;(wxB1FcJwR8y^^Hu3IJ7eJv?vxdmLqlr{gZ!`L z1NcL56pJ(o)H0K~rwsHs<1>NW^`$+w*vT=CLmPXMnwZppZ>1h;*HaGU*R}R(*-&Er zC%EkO5u#VS&B5=yyTCF;i{p(0*h%X)a%3%nuJ>`?*6oWxXN#*Vr7jOowbMYHF;urX zcZ`Ph3=ihu!7??eA#jGHr@*Sd0f<)mf}#8BwbQ)InO}FXQ>v;zdR+!wzy`u(M77~` z&GO(-j!J>;kA4I5Qy2re$-Va3x>duR#9tQ7QejLTrK*;#%%{sJe5tzmuqeeoit;Gx zR>=X&@3Qk;IoqVT2yBkFvmQ#}rgAQ6{B`@B{S2!cdZFZgqGG|FI2d%2vTh?Z~m=LL3jJrbjI1Y^O*=2VQ;ieP8VB|j>Ud{^q1~0h4GRb90&}8Dun_X ze$PXUk|9b3$RtY3IH!+8Pqz(Vqu_Y5SqbmVF;+wYR>#I18lm*AhkqSBeP7i6_?H8E zd55NQn7#I}{f7Ru4eVALIZ;+`w;&M&*Umc$N@uPM@7=8l{pYXKI1aPI*vm|(&-R_2 z!LI4IC7bleF-Rri!p)N@J&z~<_k>*wjAGTf_$6ChBUX5A3T0J2&R;JsU{SsTuc$Xy z*K<+&EgmN*2I0$e73c#AvkHRTb_omv&Z0iv209KxCp7s3^@P5gaX8#DP+MR-yAVOC zz>*OD$At2nP+}xWzyIzxckFZK&v^uP%>DY^`u8~_LF-&AOUn_qcJ<}jejYCZ zVqzwrMRIsLPpbkGYXBJ_dfrD5zky_!k5c9BE?@yo8{aPP6LXrrkr}Xa7yweG&mL-c z4N1!M_h$~t_U+BU*c-E(IDxL>d9_v%!+Y&d+>P!L`1BMsjC?rR*6C})&EZ4qfb)}~ zU+jG|Cg!SC3+AYe-D)!v%Ede^Wu75-a+1$19$u{+k|DGC+SXjS)(C#TTeG_?%&S3+ z3KS})eKA#l{CJ^wfL3IPo~-G{K#A*Os`}^q@;J`dyMZmjt}3Xtg1@#IVq@3Ui-D?lO zaCsOb>J+Itg^&E!}-s7qnRdnmTtq5wh^oMpBzJeGrbILy-s)B){jlSz(B?_4^V(r)Z0q>I%23Jrjoo}wU?nV0_lkjf!zmN4h9_Wparec7Bb@%Tbt&N^kLQ-M4S zGbe{%h3q8U5YlbvJ@+K5hTxsz#M-_|<)+Fe!u#HcEj#baS_|x?A-jk^i76lh&QA%( z?~2(nWL2(g3sKPhO~5lIO8so|!n>%z$HdL8 zH)85?tDVJZ=#rj{cbDbX{)HOuGVKHY+Kuk%av+<+n_2I@k&dSu?WUL$d(3#86MT1k zaLknYYpYg){is7ULd?%PA_cPKuBV9CP3pg+lH2V+f;b(8#p&r%&9ShElGI0Sh^;Fd zaguPxGi|&ZiLaqoGOVS8=o3xYdr(a+18<_#78kMG65Uq~0ag)7pl;;2me`4on2F=h z^JW>~i{_bvYnaO7?B^li2&>N4EVDiwlI3Ho^iZzb!C`@E#hNpZ$aqE7lUuE^x3ySX~1 zY|jIHZTe9{v;hO*f50a~x{q;fFtdYllea&AgPjixOJS}Rhrnw{tOWTVeF;~w7gd|H zZ##$9g!KV!Adtu0uuJke%74s%h26y+nE7 zXcvq#$xY6Di0m|IHeqHz<<1QjTL_iwXAN>oyBx`|h9_GBU$VX4qMN&gs8U;uBj(SV zv${@~b9(1w;MglS%<^6tq_~@kRz$dzo0xg#wl;nkpv}hLG)?wZFfj;Ql1|gX3VG1uOhM6KfyMW?!>%5+)xGtY-a>1Xb65Q0J80L zSvg)NX*09+y#+BM(8#{^-p)4ze+1|AX0GfmSGq&l^7f!mw_CIFLT(RPpD{ zLP&P65rZKrPDA4eA9tEgjS@c(exnotj0IYrE;Y_6{wbEwpJ2vO%sb3C^&twkhN0a@ z`=**qv6zY~%+sc|Ko8KDZ3m{QMrc2z8^13PY+bXu|` z@Wq#yl}nR)8E!F-rPkh=!%q2j=^f6Vfy+pqrSR-x*)bc#*xR5tjT`U-nH$=Jygk1+ z)ElJ(rJWpy5n?hS9bQoQTB4U zzx~}}c`&!XLfZVvfARoZ0x~K2+9~;`-R-mHWK|X}9X+}2%`?+CsFt~ZP$#P#GHP+- zN2m3Cjf2#!v1Y~@)j@BI2Xvyp_L8%VnM&n~RNqhM=)l&E zmxwhDbGbA~Y*fKeVW0en26J?L3g);Am*E<<0PK8GOy?fU0Wl1xc(!?%Fk;hZ) z{0<+=mX%FrjS$ERln4>eweUxsu@AbXtq@@CkvFeg=fewA`aAwAAy=&xQwT(7d>?GU@!#~M+g#daogzwBi%scwr)9w zm@6sa(>!RogR~bnu_u&16v~9a$RFrES~%1eM#EyqFeWJ(>onR!YGArRxkf(EM9t)g zLi=&o4HStdVM7HMJ1%2mI<)*%__wRN(E?wGjp1S0ZJg6fSxF$mQI^WAC^b5#4o8b7 zfE^+5t#IvPFOx9$7y-x-D88Fmxntpi?NMfPb_f_Ll+ZD7KAB0XMMzsVb!@EiYK(}S zNP{umWQ7LSG!kC2Mx##MiRRh_wie5$vdV>?e&CWKc)Or>f@*JSEJ)K+ou5sMCXaep zP7Lwx%rx^?vF%Csqb}k`(ORWyrf5+5+zOeH*Trnjf@C*CU7MV$5KlnU;;?U;N2SA!H=N zn9Y6#gI4ARpKHPn#)e%$%lvZ)Bwap&x)rbG60GC+JQf9F6x6Z5ihrB6+ZsU)wZI#a zC(|?r5~x=&JoFFM18Ucv!|}Rl?H^+|Vb6DMCRPX&5}wX8YDBwVAWeQy)6JgyX;e9< zHSN9S6G0HpvIxK%2-GA}Btmc?l)J9bSSh5a=B0z~j&P7fY+A|-(}jE?&~`vudGrA~bdVBNS>`4MrD;fuB5YT0;(+RIVh3pc*TOaI_rq)A8rF1LS3Fzq~RN658 zq+CMJN1%YYs$xcSek=A?DlwIvesL3UI8^JxqeE50jMhfm2e7J^<0$ZaMs()0$19A> z_2$?0@{nJbo;2{4k!<)}I1mOpuSzw_xQKj4emQMi7;pLZ3=SAzklRns%63Qc-KF-K zG{9@dFDxd1*9&M8)hHyBITv{rxkmM>0IfJ351-((Aa~NNJF@O(uWFLi6H*Jwm&uz& zpOF@SHB!#aVB|At%vL8+R|^4SzW|Z+ax5)*3rA?7QFq)wH!NAXTaZ+6x5$hF7r};# z6*6wZd%`{Yk8^?Tg>n3L77m<_De203T`3vM$l%wCi20yqkmz$2k;<@vmldR~5!$mb z%~qkT^}3J23_}^?kqpwfv;2h4Sr6rgs9_~&kpN8UD%m;&D=%kiYC|Y`fWeIf9(>rxD=HAR(Po1>Oq$>DZI}=xn)6DL+duS;cwxMAYEmWS!)E4#lWZ zM4v6Mllm-okFkAnAST84nhW#Aksg8{5RA?!YeI5E%f~dg@4)^j(FR1$T+-Fd)Jv4HEj4AR8iab+Ns#1azEv-5nt4nFF*h!&4qwva zvT{37yv*7Q0&TxVL0I@$iNhs%+3ACIBjYiF*IVd=y^LotJ=8ks`$la#Jcg$MTgKoI z<|{&_^ojM@zj2HOtDuCQlyvH=XVst(h=bEXb*}0xbjy<%xiGQ1i%#s`h3I(NJpI+l zS&j>MYbxJc!`GJrD9-&V!(l^M=XAkDpkp@FK2hvk`yil$HvyT{O5OJLD|*J5mRA%t ze7@SIe1@EZVoFeslT;KDjh}gLI>3BabD+Tr_3e%hU10$K+S&h-*~b|0p*e<3jrzc6 z^4Ci=L|v>ZTiNq(eynWY*(U$Z#?H$ApEfp@Z%E%i<-f;Z{BHZL z1Ny&g46N*cfA?cx{|ArvZ+;w%4F54d4rbPWZ_D_78~;8RMgYq{Jiq^ITqX{NZ`I6y z+ngN@zFByV|H`$gWZ_}*{rtYc>UMU{gx>@FLq(LZHMJxB*LwNqMEDzX$Zf>P!E9h) z!pvmA%x-GL%E4-A%)-IU!o*={%*eqAFyw{#|GWH8m6Mb6_p+2Z?Ik&sc0>fwsbUomzTd@ zIxfE6QB@O8_tkbrR!@}eC=A~Rd>N7&pZD^z7rwe&K5|TYqS8uLEOd7D>#YA;$v?H# z*=*x{W8+5Axb?`24(6>acyTeD%pvY}f0?^l9|*b4u(5Hy(E&|Zlk@6!dp)vvlY5rQ zf46zB|5DNdA_q~)-meqR8=H2bA{4|*{Qbr)m(o}H+}Xy7p>fq|#4Fl_;)`f%Z;}g=6d9!X%RghgdYhi&s6_~v@7+qv|K-=(VN67Jgy_nF(18F%qFMaTvC?bHeyO(W=MVy(O; z=xCYCi4#=*%4?x~#+ImKi5<{Vc3gu2h}an-0uhR3Yn$0#8IX?#OPv4;%t+#%M zGmLB;nP2buv0@JQ;1yzmOvwBL&vpfP(KSK*klQr1(DHcv^CDL9`zFzlJKp-NSFm0 zZn2F;F6~(+vf>y^P#l47U)@p|gDRu>B&9J}c9>Xn{4fO@(edkwXtY)4%At~FRT1(` z*&`6qc-rM~+mQ^|gv)Pr^i{%Fg*oqgeTm+R{*um;Nv;)EJ8OLwTc}6)5>%($`i=ab zDV$YG7ID)Uj(kAqxhh6|8g2qQ%c)Vu(kBVJeZr<+T;;~6H}_(;d%BKDRn(UXDAIlt zv9uA$CWE8M5cnxN{|Kv@PXapAV$6l!QKK@5T?!+ZH5=Z5h9s{6rK z&Ve!K8Jz*kXS3#sk#$Ugp5>~ZZV;EHaZf4u_C7!F%=P->VdEsPQolR4zM+0(W0mtB zB+J+;)5couOOA(>9?0Q>s<8BNy}ahchZq-*o+pP5h|BVFl(0bT#F3`gzM`&?q0a%0H2o26z*= z23a{XVZIBfryjeM_U6uqS=3);-7rp5<$)}JYyb_MKQ?&gqB=PV=**(5DO*(}Cpz68@s?RW3 zHz6N&8>1i6C+@l3f*b&sN<@FyThIfO3Mm)Xvdg$P2^h_)%*MVeq*5XDl^CEJ37E{Ge&5Tq`} zIYzd~H0cF{Mj&wg?;%@<8NaJjQ{8!%2Uxg37_*fK+RLD)meUt~Jx2cm#N&!byk!q0 ze>v1AA3flC9JPM_Q-V29tRfQFo`h@6=i4V-Twnqw@F}`jG1~<}BuALgV$f7U zSV>snkU#VDAi%5=Cb1h@P+g6FQYOfXsb77I$`-5NA-;gry814@4f*A?s6Nt(f+_)A z#C*gDsAxeYD9VWYY$mt5!O(VM)n-`En6Y~4pJ8);?YBT`&xvU}JS^#4 zx{mew@~*Q^(iKFU85Fq|ey4!J0U;3NTrg$dpcId)C50|hbh7Vg8W~Evyw+|*K}uf> zJ&*-T!%(;7=w}syg86+OAC4RppM{VCIgG*DP~0+=ohZ)Pza7XgzOkRIj)+K&zExI5 z+E=|PoJXYyH+`7=6)9vrSu(sxC}OsV3B4c_w+UO(iy^=!EGxEw3`>lJy^TUG&yJrc z2}&Dh9>4j!zTNN#4l8E8q$_>g^Kjlxj#H*UK-7vO1 zSE2!(YqfdS@sxtme zB?pjM4t`o|h1s=IvC?1g`2Z>))SD-gyOLU&?o3 za>f6wgt=JNDh|}0`q=}dTVKy#F=xNwq{xR zz;IPvSXhNv4FDV}^LNRsCGp`qIrEX{qV}U`4`ehz$i^uM)UCs$K%$G2DE*J|!t`l` z)1c_a1f9>M7@cGV*|?3Fo0mkRV?5E?GZ|%~-o{}pH6}8eSE@KLl)ZH6YV~V-mdGUG z0-&$>U?OT(_rWP@)*~0UFI4XZWo!I&YvFrrFiYU2Xx)~Q{RUS3jd2Sf&&u^XUH$Ji z&zU{?rPfnZ$@-m#;g?zjy}ZcyLnjDzCQp!w^G=p=gLr@i75RMo8o_I(BbT380HlUd z*Q=bKFNNWLSNI}WCoz?99P;kMz~vz#gC|*YQNmpXw)CofDqz?2_7s)edt-4Tr zHIfCX?xpy-9Zq@bK*3bUtp149$Mvoi=xk)`e%?RKyR%NS+ePXbZzbt!WJMjVCXTbU zf)^iGC6b0Hp*?g=b)!X|DcM&aZxTat=$<<&EdVncFOJF!oo;dK*BB-ji|9MULqM6u zE^_-wuXNx^?nw@YK{uGB{uwn9t z{k6?TM7IrqNim(DRyG>I+ZO&(m70+=x$fUXn%nYzm^t=o$egfR@d4<5!h&|<`|m`z z`wmwPour>+PC$yO|5R=5;S``gM!^AX?Uj7d#>*1|IOij6zw~5g&J6$)$o>G)#1~DvnM( zmE0sH{hQWukY1!)D8p?vGLz z`Pr#gv78rdbQ#Y`3yt3dC5W@Ex6a7Sn=!f%rAx!K?gTC{IKp&csEXcGw`2XpIPK)K z8HPlD2#b#70zNxz1Cuc}m~Svm7ZR6)yq3&`dJc|r#yw##fhI~KoI5G|k!U&EG2#kNqIHu zjUL3jTGd*wRQxppDb|$U#KSxDG3JnahBK6ivnq0wTXAQ(QWS~Jr8|-UC3opi?N-t4 zZHhL03ipcQemo^&tRrI+amG|qKC0Ey)n!u;Ul+gk0Yh;Fw2gn=XyKXi}MVu-mjyV$mw~zeZ zPma^NYLcLB9+N4CO28ydsz$JmN4ejh9n+abNbjK?+qz0qY}Fy6hbyd;8J-6vs3AHa z9b+`NLvq0%l_j74_S3`kpg1kA<2V5Re8nPayAwwRYZ;omt?Ot8_E=gSy}!et_VNZM z-i4l;#DoMr=({q0C-eTUsMt6`a!z;6jwuiUt`|R(QZ??2(qP5a}QM0+nDNzkB1Z5g(F5uQasr#~&@IZtPa*`Cne+ZF^rMvoI6U_ zQ^J3riuH#@*+8M1rjN@?0D2`;XZx>C#-`2ymcI?c_I55G*PQ{M zS(c9*WKB&h4TbF80on{721Z6^763aVn=Z^pUgcjEeLnb4I!3|C-dNex1)%*=n}`^I zUd7bi1)vL{7q+*tcT#pRG&Ti%W^II>83C++mCwibk-Pb*@-MTB3L}8+(+oqeqQV4V z|L2Z>*TwN4b;*4+kzVjK{bTo$`vK64n7UdTn<|M5{Z|5riH(uze@+qoXI3aX#Z5(d z1tWC2o4xxd3K4+x2FDW`Fv~504yG6`>8}715=7=I0xpJzOsQs~@*P+_l*m{V4fb20 z(Gv6(lEQGGqKN3hMwB)5c{S_xX2$D?=GMwl>(Yw(xzADukittKSig$~us=(_2HKyK zJ{=U)iNSjiEF@qA4dCxPW@cdG417hbjdy?O{7pL7XnGaMoEP8y;)&RW zNg#k=L>Jh2e915mK$4Ujhp?I6!NzZrDVahTnK1I93#y3r25W8ZN)M?!$I?>C_VaNx zr?9(Na-upFHZw?f3ZuaCb<#+;%?rIDO}kYjGk^~6Ddy%LvSff5(9-oMkI3?E!kI+T zu>XW#fdD}z4RUrlAw)iMTI2w^mzeKIfQ3sMl^`#rSswfvHG8^jRDLvKq)r-;-3fD- znZ9X)i($a@ueDDR1t!(BO}B_YtHwJ!(_B2Xc`7JyYO;k8IM}I@KL-F^fde&dW?d76*ytq;?!uA7MxLJ9I0NTe z(^3Cof6fiGAMXv}yicF_^Q^{Qg2^91EQ!u+Dtb^-S|TknISGf5I&JcLkMzFxozwEi z*Z@sd?sv?~;9shWzmlMX!Yw+3c>*-z=uBXO{1$fnu-M2Uv3;|x(Ja2|1Nj7VDzr&L zj@t7A2@`{o*ztRU_?&1XF~W@YbXio#*E+{CdA=PzeSy`9u-lOlBvL-SgNrqBA}yf7 zF5C*T=>tKDgLKJ*VF{pP0HHB}$Ma`)0Nn%t1^G)Lfu?}cNq|@ifHes6Ed#*_GA;wV zezS4_nh)^&2__fF<^Z7srv4q?0T#|*TmUAdPnrREGr$rm@B#_FK(HbXqX;owXb}ll z3(7-SP=NytbVxWn4kpl~lm zgyJ+vfRPEUr(4Mmkm$3@eoG7WAEZQ__kr` zFxj!zfVCnnMRoe048ZS@UAlKsZK9nABKA3778N1U!U`mEg?}x{iNPXUfMtu z`vm%w0}AWZ^2K(V`gwyTMKcPLjlzsFjKYn+?XnIjl6EBoB($Q)mka!^n#R1*{&Ozh25=1|P!2`1rc5zf< zRA*G|HOpc9^xpIuTdk$udlzN$Bb6PMcQR43Su${PY=w-f{Q}xTc7<(){5*pdnU$f{ ziq-Q1U1jui>9kwUbhbXr`~FMlaa~8aXWNt2?Z7ebaUxCzP7e+?jw((n%P=c;hF!*3 z#zDpr%Se-j7Hl(^)`8YhvmQfo`kzS^Gt$OL&iv|{vr^g_zM_pv-fH8rMh){qxyC6} z-2^&qcEMJiR%JIKd1-nLdi`3(^L#$V-0Iw-Uh!USzwz%s0jma)f|-3~`4RcOhB=0X zJI2Hn@s%@Fg<6H&!yLvfjuDr9d)j1m;n|W2lF?(b%FFYcDh3_XP1A7JhZbZkm8_j3 z1$FD1npI+pd7J}0quzxcnUB=4ypbA_*%%ub`ZQ-W%CsjmUodP(4x6jXX}4T^&wRKubn7-SI^{a2?-1^Yp2?mWA$ddHL%R{~mt9;b zI)i)`kWYT?v`t+`?Lm@zk=M^jmTqNhPEk(5PZh$#VP0U)(_%TzIPSFJImOeN&_(IH zJLJ!+t|jkjojgs-E=3=?4%!ZGEc33swmnlLN+ZULM~)j8Hc8cB4?JrH@p=IyTy`d0z_KZM16GG#59`9ao%0KD^n_*teU&AG;oV_GL9=b^B`Gv(|mP({Ib89rQ8?h@-x}I>drRk&oxG{Mbbk<#bSN!a@?F!|#zVW-`S|i;^ zQ*Y&G&9-j!!Q@rJRcojEX^)zrwxa=}WvoeT6H{43w^Xf__(JJ%(QyDyY{p^tu~+L^ z_Ug#mQty_n-yCo>=ny0y0UBPH-{`%9b+w@Fkm34H)xh*g!Er~gxu3qD$eF}t{Iqzm zc=1So?AHbE`6c!;_9V9c=!D_=J;xFI*X>(jFVxE43e1J=4`3XGoR_;J4`0r6;CNO% zB77=-lfUNPwfxkI(8^z2Ukq<4Vzcy;@!Iy-3iRlcIm>Yx%qR+_w9-L*r}8UUf_lxX;m(?lb-7U5C#>%zHL2Xe7kbAG#i| zcfEDq-#(kKGmzzodi;}~9IqCy#;1D)HJcgWj&lvi+8q1CcpOQ+$_&aN+UKad-(4HUh|)gZ${#OpUOYTC+9}nG?msg5Q*i$ylmD&9o&S#t z`%^T3{Sd~C0D5Ul6Xy>}{i*0b3j9Qx|B~LE|E)}6Ll;9Ed-K1=zVp9S{=XmiRM-D| zBmNTK^vYk2T>gp?RGhw={@uFIPW*jQ@bgEATE)`V)R|Vv-qz6WZ_O@Z>TK*}>EL4T z1jG0l9DGCzqIMqy*3#~yepy4~zi$1vMaI1_|ZdkOA{B1 z&ptA-GQfP6^?$puv3*1jFl--XGXU6`K7Ies#=-IFwSU`KSwEffxpA_5hBE)-2E)q0 z0ATrlTxVkc9AXB*=e7SFmA{`+&@llkk`Co$qVCDQ6 zA6Y~9zt0%inLbWGoAp87O_^Z+a@%+_?O*9#Rf=%5a22b>)2T6-Z5d`Qs!oEK<1DcA30pAg51t6@6P8l-u5H@ z_r0#S>)iL3BeTnX?hXK`8W8-9CU5q&pgW77a9r9#=W@<4DFzVP(cxq3P0?i(l)uFS zZ7Xd%XS8)Ja2eI1%hxHnJqK{Z6+Dt6N2Ndpd%5Y|&e^s@*&x!^ADOKEOT);mH!hp3 zv;5vsZLfDzIW~Hx3(UvSg zUUniWM|;d(O~!|^6{DgB@lX$Q$#dWjzmMMfOh%GJ(PPKGCF1(Sco2u;L=tut8i-f& z-EXMG%SM<{6n@DH^srYX5ZIGdm?^#Xgxc7W&ESByN9__4w!*e+jRxq%j6*Mvd8Slh zCic$D6k(ygFpC=d`V*y%NW$!DcxESTXSo_!LZ(nGOI_0Iccg-Ndr$;?)zAw)Ku?0o z2h&IkP4bXP|NJuCCL7GC3(Z6-G$b3DRISA_RstvKJf-1TiZl0Tn^x=3tdhRNE>1o7Q*P(9LA>Rgw zj5wdnxWuvs$xFk;f*MK1jUudtqGf5ZZ@4QiEECnsM`|SN5#}2a#de~#+BXTDgW zr{=*g>Wa@CLvDL$_R-2V1f}ic0M)lp$*SdO`v;(UMEba&&h6Ju7TQY2%zPMurGHU@7sQ5?I1g8O| z#>D}h6A=dZ9SQvWwtxYz!W&@FC$NC6GE$9%l;#W>F%4)o%JT@R7L$q)U!@653$mZ% zSGZ+pfQ9frU$tT(`2sQ+zRyE}v5bcXTfHA6a4FCTJ0$vz<7X1N;HW=sJC$JkM$1Is zSc`N3*tev}fFQ6w%p9=WUH`L0Kw#OXD;LMWo#Fz=TIO;;?hkXDT1flG?qgIco-ba} z=ioWSgjW_Sc3xx;b!Y5-3oWG;V0Vc_LSC^fum**Y;hnaX_35GkVCh^2+8UGhfYyx& z+NoRCRWriJnX++=i{!Nuu=`aqaLiS*5-%KFR4qJIZ`HI#{lW;G%4#x;W>vLrYh3(+ zbhzby?^|GP$&Tv+e}?Y{Obx&s?ZTqE78#2<#$ zHYp<1iqc65aN#mU@nNJem#PVr zfMn=SfElq4zXFcOo70p#o{`P?aa{Hrt7Tkg*etM_^>?oxi&t&}Ianx1c!=l!k3Jtb~2 zggr(j8=HPx#ykTArMmdc=0A1}&DCERYp5}LV$%jta%ymR;ATPdLd#tO;efiIW;7 z?)}D87D{fwzo-FntMzM z@p#@Q@fKj%1m+K_%Y{gTu--OY26N<)vDqiM;vkm&A^9V?oFlMZ!ya_%(B}c?E!txA zWM5)D0Y|2|&K`j(Pi@;iV8jrYXJ{HM#()b{*aRkB%-Ka!DxOM8SIh(5iWVl?fq3y( zKru(+4+p3iqbE48uZc{vLWhayMX`AKR=;IJ&~t4mn^}1uk&p!q?#< z^oO>o`2`sEqp;5GVL^WpvQ5dZbHO)oPspCsuXxvW7A+P5ztjtSFI!J5 z_N(?!P3)a4ubNeit(1?8=YjE}LB$7Ze`8@qHfDqUXbWT?*a)n7cGvE|JPp!KbKNVu z>$i4*8@U755yv((mpN6$N>O5-D1(YEY+zUK z(teP8MSCT6MVY~$>kTt&u6de9Lx@-C6+cCJX@}1XcDl>0jIj-6VpQ%5cYdC^4*Xsm zdU*Fsp{ZCtQX<^kYuNyZ4AKub&g(Xv(2W%1<}_KcZ+fZO$>j@Ws#2mMI%LUpM##d( zY5SE&I9#WW>f%|Bj>f|e9$6Uyg@cb#^Ymc7UqfhZBXr%UU;U+A!*xgi; z^LJ1pFMM9S=CQrFd#BJ&14U{ywb72YcMow2Ie?@kSX1y6e1s~4TK38CmA#??y)X8^ zvgg#dkguOdb11Cj5AX9G^Vs&i2OP^zbV5de=Pyxi9Z@kWV{tiYh+|zB-KJvLWv!}X z)A3{2dg{65C{XtI`LZXW~Wb{kMjCr7*X|74^@5SQ_8NuF$VBTmRfVov zcN4F%9A zMB8{~Io~-sAQ38QLv(|GcJ=7%bwR)1tEyn_gt(N&{Rd@>E@D*wA;<{U&76@38NATRaMO>i||kJb-))J_#c{gcC~k zML)ER-SP3fV7T{O>St*!ha(;uS}EVqwJXN3hsTA|{7w8p)tfZvhS@JR1-o!LdZx%! zaZ)qL2{H*;7EENTOzDvd7f)L;Kx|<%a3c%-^suYlLT?R!P3bMPRM%XwRSPMFV($bt zkE@|XwhezFJY>FS85dI#bZEF3E`;&{XW~vjl42%RC|n7waGq}`gGwujRsB8{i(2VG zaedjiV0>gcm$){9l$RLirRU|*?WWt8rbO4D=$OgLnC4D3-k7PdIIL)x$*IXFLse;w zN1ZIkS4`q`8@JK3T13}o5WLM3^1g8|%=JE9w|&s;9Y(tJ-P!9t4o5x2MH%QnDw>bT zq}AB@#L-%tNyoS9>+e;SL-=5he2rdbD!#!sU3Gw<^9lt?thHtJWZ)Ls<@Kt+D z+o_F(*_6+AteB`_htJp-;~ThkUSox7?jAO8s}*N)m#a09O8*^tx&iSD(F+64VQiIIp+XI3m1QPMWs|h>3ERJ z#ncY8gV(-m6a8(eQcQqV(~@=$p+H|jy)!PnfwYA4+nD{!ipdTA@Gl|4*RY?|J;siKo9Xi3FlyQi3aSCxDNx8QPm(Y2p{ z0%?mjEKR`gsXTOTI0USKMchkvYw}}PX#fWh+{Oh@%@hyuoSk@KJJx7x22e^1kVKLQ`mXh9a~jz1oShJAFjDyk<_S?BWcCj zH+SPh58zYEQ?zN$B^X%4Nd>1oP7?Cfb(W|7&1SU8%l zoGuK`;n&Az={98&Xy#+8id7LOA_gH+J2Ra}Rx7%zApOJTX9&gW;u znj~IZo#c1q?-?)N@Al=@CT3R@m$0e5f%`N2)+UoXx}AumsAJioyY_g5=<~S0Usn^J zA_#=KlLR@T#dLA(!cPf2a=YmFPP%HdYzm8H7k7*HnHJl%TD0nCJ+xf=$5Ke4a8;A& zVq4H%F_nVXyn&hJ>(f)jOONdq6+3KeWLlL8jj^=zcoNM(fGwK_bus`Ois)$*XjbXW zOSWbc(PpT#u9}mahiW{b5rq{_ja>V$jqC@--8&77>tLEsFlEA4QusAc1l%({Hou|- zeG2fqw}MQCLtr81eg@~~ZDS2&)Zrk{jamkoSyu3DO`Shh27|)}H&fO>i>~sR+QCZj z+Q1?|GB#QxAE-=Wj5<@HCxB?eUbYo8)yG-_hYl|a0qNR^;7gjTBUZ{}=eQLXleSSp z5vUiFnhuF>ebN8*b>?>g;Tlwgsd;$24$t%;jd-Gd3d?%8^ju8dC+FkRSoFgr`Y3W+p@`0ss= z6Bm_`<#DQL&B>car)*o6T@zi%YzfsB)yL-B*bTb%!nX~coz9)sNDyOs3wpQIQ|daQ z*`d1aLK8SYnX2Mh6>Y+Tgo1t3myYf+M@?n{Mgjg^ZADE@VQKm51+3P0*t%v%jo?&` ztF%Xx9^Hpd&cPr5P@FTo6_O-2+Lg6bEX1*F*}z}!6b$9^<*Ad$VP1T57fqNZK!Fj3 zj4c)8A%7>#76?HrBf~sIJEdz_(K%#D34^=0ADv$}6u_7h{K2RwW7tlOnLUx60<*+w z`WBvcm%#Y&Lskkw%+$3^Ls3u_YoHKiKb9N++8#})$~Dokzix~n%3;~T$4C8?20{zD zFH-cOFbL7EABy$qVjLQ{)+zld&cgKj1v5B(LzkX_M^DVy_pOUjzvOx=Qog6*Lcq`^>Y_w(9xj_s&h@fC2O`Uh7qS)iN;IpnzWp_c%s4ifI@Sp>&QTNr*J7TMy=kEB(&!hMb*bg z1P?mY`9twrFk_Wve9Bl}MFE(LUjMgIi<_x4VQ&%A!5_zjaH@{jkIK>-_=NsA9YSEe{8&;0cKYSD>)?j$W~Q4cnB^By{lB3RaK6ohpP2}9Z$k$IWAqkEGm>l*t6)qs za^Y*3MyitZCLwA%m(QZu*0)5(4ugf}QI~)u+m*I+2wP{K_7bX}p(zIyoZdQR>vSDD zw)8W%>;-Qj_BNCm%bs0@vb{z8X*K7TR5kT4z+b^Gfs0Q#Mxe*`IdGSS&c{=+Xu8Rf zO3baJ_Y)bf<#8(v?ec!F&gJbsgURvuGtc<~@(ALK+q$j(JGS6#BcUDpx~bK4Yk)3( zJ-c4Uxz<&4vEh_@T$)NWa^Flc^icLN><-LX?MXbjNQo%kR5AYy~e@_qnez;yd18N){n}~6lRok%Yk3{GnB2@ZeX@;?8|mo zhpi~R>=*1#+!P8Kf=OpA?F~&&ct0=mzalW=h)Au1b47zsfH8}aVyFR6OZK7bJDZNW z?jyae`ipTFIL`B)Y}XcH%wygN>Zo&K zeTTv==oy%;Et)4j)<4Nn=`HJ|K>ZDfoH@}p`C&Cf5uU#+QNL;wzMh89JTBTM)A^$! zO-gS6MJipEI8y`{1LOD^X&BNonE6|oPF`uFt_bmv5?LfEDVC1Nh(Sq;rS5||sTn^Q zeg7W|_8lVuSB&8MR;BL%PFPTE7j-msJtpsc2E&@JuK(=AdK7=e*BMlo1zhnmdwT1=Txl5_JMNqTiX@E-US!A{Y{|!IyU2 zZo(O`Il-D9u41vI6$bd5HxK{h)QG-~U3+rbzsv7?MBe^)6!^Jn?H~T}H%a^dir9py z1)VGnZDdvc1}y*PMKB-B*#7_HHMal6YwR5V*Ldv{t$ltz5E{%UM*9o30oXra*yq{b zQ0&w83D!86ng6nWUj6Htf8F(Y{0Zm&dHw^veSoRI%Vqdi8UNt5{{(XXlkFdX$NW*( zU(dq)Z!qq^bJx%D__uTacJ_b3xc?l6{|4d!+O%wJtN>bOCRP9w!zYbnVE9-K`F{X$ zoD8i03F1CiBmNhN`>XH&0&yRJPR7*m6P&_)bmG5|94pfYHTy3R$HMZTAdZRQg982= z!~s5-+UFGcH|qV%@Xtj22gEV5b8vplhkubcMkW@vkF)ZW?j$m=#O z!z50Yeq+LfI_e)lf&@Wi34k`*??Evk!9!M11i0%vqR;q8bTn+bpGBNhB!V%BxEWsZ zIlAeh$uEiyZA3SWcSM*RKC4fI#oGIwcW#}ZI7r&sN2)Em1MT^aI2&RYc zt;gAk(<8cswtB7YM04o_pEQ~F{kJp_Ix=ELHebeq1Q@1Rq?sJ+i4*WOYx`X9%go5E z=69vQh?$Fk-jrnLJp1E6)UmvbbvpQ!%WqRtvU_uLqa?|AUPQXZF}#Y2)g#)S zhaBWdP<&V5LJ)$0p)hKs5@H&mHfTfYK$)!egzGrH@H*Tbs6ZK)z9k>&-NyWN=n;9%)xxjwfxj_(|7nY+6=Ek8Lg`Q5!8+>THf<)Liqf2_n0lron2r zN$mj)4LFRZ(0Bgvfl3{rW;%7-F5w;=@7-y>C?>cXAq4$9pj zp~#ajsg$O;BCVm0=2d_q`uZaujLPQpVsnfbv^Vz6EA0y1b@G1 zV4r!+6cJ7f_!&`$BM8y-<)`m#KpplDUl3Ygl=4R=@o!BEW#SeSUdbPdR0;W8&C(5|A* zil)Cinsyx&QmoP35s=XQ#2=rArm8lxrxlgVZ4*oo>3{7g@w%h-SJk7vmoM#BtBs0$p`PxveZmu$zeGxM3{C!(4wg?NM?;l>N$ky8B{^ z+L7zpCBscnh=0~Z{2?-$3X@zdn#9gccm7anXh`5h6`6zRtJADIUp+{X_b|dmg$OPh z(mZd3-4mKbWyOeTNwzow2ImrTt~zYqit3Y(WI2L)zd`2qvG54ob)<2!=&h9N#8Jj{Ivx->)&J3%VB z#`A)3gZ{$+dm?I;kE1S+Y)4fzg_KP&9;5+(>Dl*too;AB`i%SC_Wf|n)hNfnjJ+XK z0-4OnnXbG1d97eq&(q88w0sx(@dlHGB=FcGaL@wnH`c7(zIeP)aH@Ku&>h9>_@90c z_sJD}T(xy^{Fn0Vp)Cvos<*1TRA3&R2bD{0ty+oB$oHrOaW{-!O?RJ5Y4mOwshpTw zB@&;E85i}PwA~RswhX=C2=30v+U>Ea-3eH7%v@ayl_pc_DGPBqH;m~BS_(e!*9A;E z%Eh&rqp97Fe&z`4#%i%kb61Sm@8pAx2Aq7Z7WtR65~IHy>}2V6^AVYC8Cf$+GQ~2f zWCvsnmFvq#(O3>B?L<}aB4q0+B8aF9`q2vZZ_N;n9qBZ$Op?i&pfXTq{8b&W##i8s z`pf!9%SjH9Gc@K-!<#6Z%_%Ll3W|Gj@P8Mlq{AvHnGnggXG+Xvg_kp4mgp&)sa23| zCNcfs=x&dk#no2GFj~4*Vk&Jp6SL%&~@GISu zd`=U~Im+-HJ+9fMlcBedw6rpPws{jV&MWz9fKgmtHVP4ry*LYLlC7J0i!R!9p)RHT z&nX)+#X{YA8@AQ(q}CHFdfT#dD$yV|zUU@g+f|=c#I-Iw(M$@Wrc2kj)Mz$(FK+kD zdH2H1ejyfULLpQw0hnmA;HK{uM&2H&Gety8`%kE{HSC)tCcVQJ1H>-I{2a@}zDTyd z{-f{MHknwlb2*N*(wJ;bqBvT_@U~FZ<r6A1H3arZ=WTWG5e8q-5^M)?>?K!-P?cT5;E?ihU8 zNU2m(JdKbcx%_(6nk)YTPkqwZ>#9Sc<;IsAVS{&M`hF>t7?RY}mSbHhTYj}7Ti%fl zo6#vlmejJ?nrCxrkiM|`Gjk=8mIpf6_+xQ7$4p%6!WB~^=hDS{3HV;70^I3oV8Nvd z;)1XXzNI~~Qho_1T$RBTNS@(N;2~d`w2nkmtAYO+i`*Kd*WO-OqD_p!=-D^JjYm$3 z(9o~#^A_V@EWdmRcXh`YQli^9Z%6`L~5_L0#YtNj9-hk@cLlDBX zQDYZUkEl~JuiwZ#w_q!xEvPjY^NF2fru#=Gph3$ZNjmaoJ1Gip&N!nsp=N&kgL|+0 ztWDYSH2|MM>DTn>(PWeE3v!<3oMlPqbOE!t>rwcdrpZSW!bgR!jQO~^(RLHovr!gv zyUFzgkFb}7Qxv^PI3#LHfV5+8%A<}a_cZ}_47nY#TO8tV+CzMi5pAxYR|E|y>y#v- z8JanqA@op*u$S;e=~8-yUK)7at?}~Zxq4(WLiC7cO6le^6>%hEFizyZ3k!dlV5DG} zm6+)f>vioB-FQO&CgXOzPuNOuE0g6)@St!G0vlyu0^eJa(<}7~y7{j9n`>JZBqbyP z%xa3bmzX||-6H5MC!2hwNJ=u5Zmdi4$^iJs&=Ox1Ie-4Unfi8*lDnSGqznnd_JLW9 z?G@f5ENvdT3s?3?NW=AVcfVG_b~LdgJu!8_`P$4gBp@Sua`+A!F(Ni%YfSX_H*82V z60tu-_i_KLuyc&g{{Cwte&8_ndL>-e=r# z*QhV`)LgS_thK(>s(OBNQ<+<=V#e)-xjpwIMb;>4S@(==`*4SBhe$RGaKb$N-3ixih#+Y%?!>1%fR+d!h?P4aPcTGZNi#3Tt(sve>#f=4RW) z`DNvfX!dhb#jJ*cSrA!$`_!K7(a5A@n9m91F?^5gyW$VmtwoFU$>Ow~uAmJ8qP*23 zG8gWR8>si6eo&xy3@-%1I;KqULJQk)@*&lse9z8q?5p9gh!4mQ)m!LNA+3y3Wtdta zw^yooArr0cF#f3Ue|(LgJZh4gCA5oeXA5Wbj(^%PzayZ%FYF>CB_sR9Qy!nrYR|48 zp98z&ea1;lAZ;qNeBO=L%niF*0(NqU{R`N(Y64Q{uJ2e3%usowYVyB9Lw&Xtq_ieYeTBt+zQYqAgIP2G9VMg99Twq}M*zde_R=%9iLC?LEUd8FFlb|SY;gQ z6x+h3`mtj-wePy*amKqxEKVZPpi5JY)o0dha_Lf;M>bAac))XoNpye2GBuyDyC`{M zo7%#|C=&_J`%D!?|Zs4LcYIpd1=Q%FyGYh8Ca9* z9N0Z|G=v$bxHZ6eAUA0;1%~F1YJ0yOOiR7Q#0SoDBdVy7y`|{LPMX;#`Bm+n4l^7v zIdZlosLYHFm+q-+UlkkcKuO{%pMnRL8s&knxU|u!7So&LX?GbAJwk|-}7mZ z{}qoMvblBoSU(h6zJqFsr5lLpz}c2sHKHR*pPDhiKf5oIcPpX719Ra9RGVYu)F7=j|H7D@RxtHYPk1zLA2n?K71S=nr&Y5e2Zce|~h|`E*uj|&a z2L*2w?eJUVT7stBzuud3ZOG4a;tO=fOZdYk9e<9#0k_oqo_#A3RgB5+JB(wDP0~h5T{Go_sk{r4tvdI33I%y&!{ppu#8Mb< z2~{uL(@_?#lqa0XrL@#7xCi zb$=KYlM0ati-Cx%h!%@3COM>OM?ulBj(F?Q^Y)geXEAS3sfKq@~2| zlfH;Dqg1;f{!6djz>Ikd?}Us_t70>t6!*#uycrxR{5oK9OJA|J*rIYV-s-o3sJXE9 z`<3CZF)JVjaNZ1uFWN=33ZyzfscZOz{sd1RG2j_@&<#+q4EUoWSky&=`EyIrh3sor zhe3KGePvC72q9cM3|V?cQkqO!Ddd#Lu@Dw$@|%)EUJ4k!Wbq@vx@cIe+Il%>QnvHE z<}7XtT@bHjS}`I-8g=~3YWZh+&b?2*sL6a?+b2U?g(I7*+If%N$w@@5lP=%{ zzXu!k0q?J0dB{9t1u@mxF&_7n<0|M&70Ncg($0}JLqGCK#bOjqhtd}G3#-j7X`D@r zOhgYgtJ5=rCNY4nj;$Se0-GtE5^%V`COIb(94hFsEj4xYms7;K%yrk$r<;1>tizB` zm2@@#xN&B2_LzN=OhXl!pCSxfOy-wpQZY(Dvx}7oluBDlv-(InLm1=s7d8WmoR6_( zKx(ieOO?hL$$lswNLFW`Xz0a~%qC-~l4%`w!%!2?e#Q}{(N3leE8gpes5^0S3R=;$ zd!Wb-?3Uj+$&r>wrYrmGdXVv3J=MW`X_7Z+g;IaH<0gB>AGJqWW@GnpvSd{35=|^nM&O#OP&3hD)-_={4rQk z_UK<`H@3(=5z8VtvVhB_?X0rOawy(r!z)gG71oZQd?RCp6@*}lZzR}THgVD(ESvYl zwyI==!M{R$V%kXwsbZO|Pz@+<&Pj~Xrrmb1n|QNFzBh}#DvjQI9dhcIh>1DmV|iUE zcjO=aJi4{X7MjDa!-pd1Y@E9(B1mkRb^*44xO7w# zo^I59gx+Ajla<;LubyHLUe=$92JXUPCR8H9SmPsipO;3{8=@BLS6gW4EGEoK@HUWm zXYkODMsdBOYk$BavXFf@pnPAva+03Gbrk&E0+J4~8csRtl5V2Pu7N|sV4){y87u*R2Geg!63C@Yn05jgS`Gv3 zn^(`77&t3hmThiy8e@hMQPs=H=J!Y8oGZPu>ok2Oz4SH;2^Fg&=fRv!!rJ>Xl)4*y zO+QBsv1|);U0_gN6RH26A&Ig9Z2~Zl*0JsE5AfD48|&Ldm{)ET1|R~{_HUU4(QQ3F zM)4ELDahfS7__T1BaB{U%#-W+tU?dRDmrX&Etz2(%>+6>6)@PMR6^Y4TPEwy0lpUK zY^!B4hpMHPxe>aSCyao(>dAL|P+RdNYcQCn=huagpn$)d@^7w9kngVHRttV_Y!Xxh zyJ?kEb{w#=Ohxnia^yM@f1vJH%n7FWIs`{c$i#bU%|p+Lz_i!sxgZDTq2c3Ax^RLk zLu+)C#xqt?wM5B;G_FEIHGno~8Vf`kx36(XYp$Q5FH_vO(r}eUUB)qY$1c3u0xf6l zS79+(Sj_^=ZwSo$VAUzgQ}m=pa+KQDH#&+o(c@8BIF{`C#;NL9Sq#-|E!wrIHO-@@ zdW~BQKaf46#?9imRrL5=sA*{7KNWb7BPR$N<%%&3o+k(E79ub8W2GDV;8PSv< zI>(`@G?^Y}2B^3-RXDs}Dz3CxpzrF7(dL>S@KLtj#qZ<$r}i0}*RQh2+Q%d6F`P~1 z*XEBUo5hurKV`KT-6m}_XX7M&={o*SKB=m%%n^)rXoTEEV%5mBsxjzDJj3@1~ zTmjL9Qj_C8-*t)KVHV`zr8s)oPw{4pQLY}@0{^1bqL>` z?6bWiX`Z+@hdxw+w93&UHZt{_<|PkwznBR}mMoJK8pxwEK@TFCsT#~GO-vV~p`(5n zU4Us-x0nyc(|PTwJI-+QP|^sifJIhKF`s61ax$3X#cYMt zj5!#+v`K32;e(oLELO`UFX#LC(ED-Ag;$W=-&aBcHaqu)4Q-<5(>~Q)o!2e#=$N;i zj?Fpe&nH0ks_RwC_T&1Ez)MtSu9W}cGm8f@4~U2sNTeH>JJBho79hLGq~MaW%lJZD=&OC82855;^dS!}v%^ zN}vhbBaqc)O;)zC$AJz4tmKm7qPqNA;d;5p&bq*T`hCk zEsxVUmv+T>smtiw6x>tV-H&}&>4?eeJ_8BYRTu0oTU&o0bnl{D9_Jp;$GrXdj9iE2 zflD&ocBhTU_FPy?3f~TPNvbB$f=f0wM9+G>0;#&SFoev;d{fzJ$4LQZs&!Z_Nw9sp z&fuFMUGvrW+e`|P>6dZOoI?pT;`Lc)b;{h0X2-Ge#gN$_@;5>)xM2P9!doT$8XX%| z%XOA+6)Wb;+UJ$$JFQs$JZ)wQKLa8RE1Yhz57EGrHPtgE)bg;=z<0nxB2Wb1ruE!x zE;m3mPw<{FwH>Mw?h#}lvD|`LYQeA|ZcdQk=CUQ9PI3TL>Ufr_u51K+q?t%K8M0(E zHd+{?B_Cj93uEV2O_a8&Iq?FO3f^|4j|mv4KekeHK))TKlD4s8*inLA@eCK71{(w223f|}=Y_f@Ahku~F4m9o zS0;~=5RZ5jH9JVMbqlYAj1fnX=4}qiOnV@%-BU^^72UQf^7dNO)B6bYa{{@H7L!*S z59}Eswq*V-j3tR8D%-FpiS}i%!|}?uso!D=kpPasnOBP%s2%<4w5htlCz*LIO8zCz zMIA9)4#&_K%czLxr8i10;M=9vqREQb|L#8{*M*OP+U&U6T9B@rGV&3)m*H0y_3l%= z=+saFX}I7t5K%Ua6%>VATW^#=>MtTHUeS7Rl^n>}Vg;0sD;zPcA|z6$Fbi0nObB(g{0058b|y ztsr%SR*;jnWwC(@nV!qhtGVN7LoLWrt(h5(UX$Te5pYSspJY9z_rS)AxYp~JN&>D= z)Z2hPx`FHQ_H=%h)LmS!Yi?1vpNAV!HEWw3$Sff;(>gmrxR^}|1@_5M|9+q6z)2Fn z|0zxEpExd+LVFx`d+q5D@$7ex-L>boTqWldF^c3`PpgzNzN9&s&v8Z{{BAr00usiz z6z(v5a1br(^cMmj#W-^1gg6qYS;eGptCmvB_+x%P`*(^e8XVb}E_;Ww384<2r9^7L%W&SjG-<&~e1WLPm8Oh)6=X0(;pThQ;s-66be^tZqUosezB z?!bjZOah@Yn})5!H>3ze#ilWHd&8L zsbzT_OSVPVvUrgXVOFrZ%&?OYaYByP`_L+-qSIz0DP#V-tAs(ljdomAR5S=}PvM9b ze@Lj|J19Lz0%vjv-OquEw3jVi5cmX!JH#|RN|M9`vO0bqil&Os2e@hXMB?K#fQ8JcBwC2uQvzEW@{Aq{EO6GH|x~b zTkbx9o!=!Y(J4kHY3^-;8=B9RPqum*D{E@7&Juyw)j*6o?NA_uARsClC>u#_(GU%p zoFWY3RG>FnLk?r!8IY8bJYS{KI9b=MrLN6gm8cq+1AFtgaFyij^IJ}Q(qjS1j34=> zvspo?{BmA=6jd8jNuSH1%y~#ixT9Q~D?KhV%dkc0crR72J|-gS6Ja`y`Zxf36|)l% zxP7^@lCE$!N~-~64D1lneKRlzF)+p2D@FyMuTBUVNeJKCelFzZ1&L}fK<)8C@|$xF zT0g*U=^X)tswSTj%)36qgPnSBd*GtGrFc+KR_=Bn+2PQIvb}ZPQGU^WZyNXtdn9k? zfB(*7z{U_&9dCT861+y;lpDH5>OH)L2C5Zxlz5lC-UXSJukh$+pK9%CZfWo3sx$Jp z&bUJ>gOdn4gjXHBrJEIOk%S|DAoNg_W%SgpZ^`(%q`ypgDh*7Af{861SP)aPm ziX$Pci73os9g|BMD)s4_vDf327U49C^x*3H7;rh8vQ3S(X22?O` zQI!v=ZZ=Cb55ocyqZ_RI-9JU2Q)_~0=5ucr=1n0QXY`20H6{>0HeFCYEI&$UfL*2^ z1;H8%sl$NMsU_49vmw7lhW9Wvb!v{GxVA8j&)5FuSHe)OqwX^&6D9m_N^!i|g^=?V~$S z{TZ<8UM%^rb=?_3bFNDGDAQ5iJwKc*tH_ypxkVTeBc3}Q=KHw6%*Ht+N|w}n80kv5 zOh9-eLjFfk4=9>%J%+(fp!fjlZvv@KoO$A$`(ARUkXoMNoIj9xHoW zDjVEIbMtbp+s9n|r62EmqWhc(d(&Em-;bEVbIPLX1t?L8}6rM01WaN2kYbohZ@9SO+>(80%it zQ(NU*BhdANx^@xAI|x#?q2hZP3WZwLrecs~p)l#NW4i?OjqOFiPyy=VfA-38@JEDP znrBaSd5w(uXKZ4|BNvJz9?$?mq6dOWt>S?_kR9U!uNkc6L*1ah<>2;d0$% z$QoCb1m1>LWJFQbwx$MLGkm=Yeh~O&pwj9yXn&{0Tn%+Cz^22aAcv!ZRhB8mWW`uY zV@sXD(vpFXIzVpQ*!_8R4pGA!eAOV}g7Om)%rqDT*BGfWrV#|!9F%1e%;K$SV;B#y zen?8QTmg&V{Ey^-h428Ft)7Y99sa52LnDFtLDMrsvz}4+AI~-~KTGdBt^7H!&dxc{ z;P$S&lOwtZx=Fg{{46&<9(-qLW5|PL1za){i$Luve4Ks~MzOPg1|&Tqliso=J{}dU zV#N{o{ll`Hb!b>b_HZG+UI(OsZDc7kU11){zWrTkl{@pxJU$++lRE8UMxHD|e7~L* zXrP$1v;tI`u7d}tmx;4|(An`&1XKTvsfF72g(CzT(%O>Dav5(+sYisv+T<%MpX>4i zY#ga@r}}e;x;Bv56Ss}I=^&}-h`~CcPeT<@kQd@RkiBN@HRo1ZO`HSVgth9Z(deH2peTpci-uv2E;N zEiPL3{$5^qlDsbv+|k{y;T_1^)Y@whUem=c@hrP-T&7j~+4A08+)lbXF?YZXHgWMnCoB`kNG&L zwnlvcZJOhfuna_!dKe<)e!%?v`X%E3VN_@Ew(78%j1rs~KiLEM+SFyn7ak^>hv!@}k(`iby+mNJzOQ+3+u|2? z>%SiHKBVV}+QtY>NMCONfXLB+#F{SPN98Ay<+}EQ4r`vKn((QQbeo4?le_0oE@5BH zRY47R+Fk+3EmMA!KF0*TWFGEw!_e2eUN+ph+jr&gHG5s|4bg9QE1z(0-21&e1+y!? zn)lu9u6MZ8u1Mc<#MW8=U>!Uu()ynW;FEdF3ZmcjlpXf2LX?5yj+I~|2_u&{o*313DPrpb zEo0B`-b`niR#%=+i(H$fZ?l#79@Sd6$DNss2?6Fli#||~GUP#55|LN1!<59EVCHoz z7Y&q#s2#Jt^<=_q^2pAAhZ*#bRPxv|W(Gl8FTKRg|6QIGz z7s~lvsIS^SHabBT+j|3L%|x1bnDU6k(ru_HR|hQnY_E+;%xQ=t`))32h!2Y#@`{E^ zq`7b%3p6qz1U(ucwjKLhwt7EU*i=o>iyuoEe7E~&g8&@4&n^6NaTrLSd%(Lk`C@$< z=I5JP2E*#b_nqeMG&mvzjH1u$g~9W_T}Gp%%wNqTo4o1T^~wUggw4W4aF!T(dOz?^ z2v2Z?twR8#*tgiP8bj3XF+)2K4lVVW;;~t^*eB)N+#mVKn*7j+BK`g2h49CrYFOyu z8DSb@1b{~i$10}9QEwhmAQ@#!=ptk+=f(anSsrQ5)%Mw$13vxVBHLWOp%?JHOOe?n za$~kewKqktp4Sxnbl0}~0mj|0%GY&H%hwU!*z?2P$Mkq>p%rOEYN10KR+~MhV+RK+ z(3LTJhvj}z@LrT@1V~8bOh~UmKLA&#WkEhUB!r%5^GSJTjli4TI53D9XlT-N71eY4 zmLc%)X5dP7(1dZllBOwPPWMBA^ z?x}+M+4&3=cyiA~{Up;hw zZ#eY@GY1PgzX|k1lW9V8@3Z;=mn$Qw*t%%7jj|g*u`3K53THwB2n25?%gXzZHcbES zrLCmq?WKWE3jr<^+2$&Zoed95{yLKf2dDKT_f>ZEF!A$Z=AQIR>$-Sr$GJUz2`nG# zHeK8a@A5%3d}?{o{zeR?2HfvQ|KyyoyWak$nGY#| z10n&hs3uwaJN^z_yO5sYy8sp^>PX}W?U=Sm((+tYhB9IVmEZ-TxABRxr7M)WF^wKTb&Wz&^{tBGEWfB4f3Jid$ny zu#Rx(^yeeKNyJ2!(d>i2eurrMTeCdQR?py^3E(5)uV6xq(@ZfdnB z3&ZY=sJ_adh*xa^2#YmlCmB}xdk;g$dov+$E~@YVyKu}zvP2T7z+{Jx5QMP=SQZsy z9?vlFgiN{$D~s9Oo)Go`V|m>6`{0kBxnFT1BTF3aNJysBN_J|TC4xd$bia!H7-{5` zdffSuOmSAPJu9TyGEGFy8sO5v97iUJKez)X?kX+tai_InYbw4NE#8W%u>BBt$Il$z z-yB`-*H36#UxMbapoUZ?D>hr?Tb<`J8Y}S*J1*;4n(0?u-ej;t?t%SAKn_B`K@eVV zbG`@RZG#>nQh(}*CgTz{ZGyZP(LER_sH2w^oK_~!&Z-!4iL3`$GCPt*aX!GsrjSd zfjo!Dj@KbqEJEjV`hKcYx-;#=cQ^d_Lw6mo=KMmlJ0H*gmb&9Q=uRI>@AY-c;^$v| z-~9!Cp2yKK4bRj1?>R#V?ZeV9t#HVyqPVD)1Cmd3v%noLMYax%Mx$nLov%VK7*=#4 zq#>pTgbg@UWl}82_jPFBoLpT@=!U0u4T z<=0U5{nOiOupDLS1$wZyiu*nK8l5VZn~Z^c@Iv2tbUA#Ca;d3?z$584cXm9jN~ayX zRM)YuKM0Z_!m++lJQpO7EFid*ts@lfPi4lgT+|sIMwMI>1ie@Xw^bod(yW1<1#9&n z(yLjLgaG@aVWn@}F(SHLrIB`E6zta?%M^Y910od!NY|8wim$N*LNY!xBiPnYBp}#d z7RDeSH(B7zvSuB>n0xlK@HhyoIP$g!CB#^Mp+W0=)_&i2$(}o9eJ0vTk9laNK=`0q zH+4itj1jGD89I(?w-L_%nzj=2AtZSpiKCC|lc#3rA~CK!|8OYwlGmQ4*Nq)!q& zeg0^e?uf&*VJkC-ip&7S;dh$pQS|jA|82moHix zwWzo}vFum5FdJfFWp$b_V4jn`iL3EfEir0aLt}YmVl`7IXG?oKVgM^WfLMoG$ko!u zgqVp%h=W;&hFH0MO&hig@@89FtIlcgO|DkaJ0RPz+z{2@I zwXfTL&Gf(JV`67#`f?Ecx8~wx_{FVr`d95ZWlJy9ukrhWpxfKK5dT93RQe+7*_qiB z|7*Scb0YGEB;;WN7@Bf08#6JRad0x5uo$s28?rM37}-pW*-gw04UPHW{{JEW8xZL1 X^0jRKHE~SroXkvc6cnQJVsQTpu~^hu literal 0 HcmV?d00001 diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - EM (with PNL learning).pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - EM (with PNL learning).pdf new file mode 100644 index 0000000000000000000000000000000000000000..b45ce851f312bba904fc8b5c8158634fe4232052 GIT binary patch literal 40065 zcmZsiV{j(Gx8`HxjcwbuZQHi(H@2;bF>xlE*qYe3or$sYzk7H0e%LSFUFUSybG~%d zsZ-C7LRnmrfrXI`j$-KO{^+9Op=fq!1dffEnb^tH7LK2vm`T#c-rd6W-)?W>ZXs@A z=45U`%p`B&XytBA%)-vf#zQP92hKspS$pa2yt0?&gcO)A8m7KJ|IQ;HOm@Zx;Gbn={>**1 ztj#3VLNJf3*MV6jZ=9Ll~S~ zpSiHmHhhcsnqXV0$bP?=tYF$P=&bAVf7`gpd-}RR^9s`XdSg0D6!ISxcRihXyI~A+ zd%dqWE6+{H7JLm9Tj`B+Yh$&aNxPoziIwSoyttqDxN<&=_|+3&7PfLTnrTDW#=Y#Y zA-99*|K@tOJ-7-ZB~W~ozxLB`$KH(KZD1!(|60~AsN8jH8x6cr(J>YHz}&Glafrlk zKN-f_l}VoQei&$EdE1*V)bsiLsO6S*%lp@&;9{+}E9b6xjZt+?{z8tc{-F{bOC~>g z)yJ-)+bY{|JunAZ;Nq6`=U@Y~1Y8>KdQtI8wOd`)=VGt(k!v)|@sYQkxQFhEg>~Tm z@y3$PRMIbd zR9Qis+DP9w2Rr}a0%xll;1G$@X~7@oba4^*z@1f))_C8lm1vmuC{xo0zufdo#tw&z z{36rF;TgDGH<{D=3mJ#EB~zrkyLKUI>6ejTd5Fyj?!AUJOT1n##>!LZp~dvH7LhOZ zj!nIlyjji@KNwkp8$ltmRIJU;&Gp=;&BIb;0k#l(I}Zxu=A_Gy#{J^{&&{fVME3Sq zb&n?N+d1=|^8V4a-X&v&iFHP%huMQkfR($q+93-y^@BN4kA)~gWAzsE)Z=5;Jz?2} zfDn>xJQZ9SX=R5DIb6dQBWdCSUnZXG`COa8BZ$Oq?vU=8>Y+{L@uY(mw zEqG<=`>9OWv){rt(a%cffMs@XS4gEwXEzER{aTR`40DX0)LJb9r&NCyq~bR{!PBbV zqmHgj8`nTlH{`2QKZUkaDlRI#t;-CFC66XCw`|~D8GBcm)F1Au8Q(6iN=*cf1gq|i zdPxRkvB-Hs$IH1-k%i&uVk@n6J%aHpXDwaF=mO(PpS>R~h}O3_ZSQgOoP~j2c~6B) zVl5%tS>I_m?cp${%SCd)+GzHijY*Ec)GtCa}(hI;(+q&`7xZ-d}*UxWiLyV zHYfa^7lnS~jtSF@fPA!t!a>;4-+FA8{R7}WQ1HNY$;)yr@H`@^a5!3{WWFyKb=#hr zyv=%YxlT`sgbJ?8k7A?$NpoRY5C{bw|I_+yR=zKbkgyCqDQ1oQgIVxo6Lxxvs5?-3Y;u)7tI2nVTeHBmx8?C=bH?|Ik9$>N z)#Tb8=NH7U8}%&z0BjUK*>9;a-N-JE06A!S87m z;a2#+7wj?THl>4kMT~5qOYJZaD zzsT^L!0ByAVMC3%@X(j5Jx-aIM)%ECa>1yOCmf1 z%ZBF?D_M6!rC(GKMzga(MN5hDC;o^a$2aCN%7NZVe zftA+9O@7U%Tij+BFU-YFWR&|wP2h&2r>L}ixBgd^_nC6f1%TrD&A)Z}M7BC3e`xit za}$m#Hkr1F+_lzHmHyT+WpS&66%W%IH@~7MVzi9dNsxaTsB~0Ud{@Ox{~Y<^jh(A2 zhe^A=x<3;tq_syoe#8hTXpCYDR%Wp~5uw3LZUS9o3r%`C)t*D4mlt1&8zF^{gP4dU zuvIaGaJJc4Ikab2+?Yi*x7|v?KbT*Jq&a}PC`U6E{(>^uP0A7k{)TX-LQ6Oy_3p1$ zT7@QTm-tTFgj(~e^ld40xToz~KT1g`sDA4Hz5#f)gv1VfCVI`yAr}Tw6FGlF6kh=d zj+#F?gX-WegS=krU~>)_Xo8)6=^?+5-TW^po5`)L%fVg1n`Rv^k&6t6cH*sPLLX*z zYg3H;Z7E&%23f9WvB=&S;q|p$>7mkz`gOn*#NIh*a8k*00=q0U~b)kh-kU4 zK)!*<6ByR(3FGFG1mv5ra?9q8KyDBa>iQD9Q=hg$w$On28sA!iTXr}n54?^F7khjW zf#UTq-|o8Ng#1m(?)uSlvn9F28H2pH=sv^2r;OQL>e9}5zP0M@_9ioqm-j6pOuIYe zqr~+GuN@0%qoJEf-_Mm^Caaa?a|@56ogg}_FHX5L6-P9IXOu?h%Z!kqj#Vzd23OX( zQyzdQG-zr5yWmh@U+LpeNFcqbchU{WA3ZLF*@o`8?x$KK6397J4AX+Ja2oX`Hf-)B zn#MnqP*uDMW9J}hQRL6YjUIiqCLa;$c-DoQ?2*O`J(`B?S)BuPR3!lkAsiZ?p??Xg z#RS-!wGQk0208I_H~zIF6+Dn)kR|LuyTAhmPByPf8P>PeXX5|h-yp4<7Tu@8-HtSfjJB~A5;>KKp8 zo}A=?Wci-^WOB{6*O^82*A2#2jslUf1u<$0GaWK|$txd{GUEsMl*;oHE1oC5CVBy&8*D=UBZBZCEH{U=(&woL-Pz zXa#OZvK1dfy9(KYzKo4^g3~e;S@K4;B6jleBq(~`k*EJy67B~v9B>ZJ3?zRmcG@l0 zg2FF0FfR!91}-tY7}`To7DqB;auX^-+_GEM`&pt1q$Y)^HAb_=;ds*QvloLT5YciI zYsYar@J?iAkNsFLLwz+^2slC61&Q<4iNAHJiSNknCRjdWZFw}t<^j)}b266A_i4#8 zzzLN}AWL9ey@OORZfWm4Ll!V8dg8Pcar|Tk1Dr4m{;VJEY721l?)3j#ToDBRm&jqB z6e68zKUVJkOXzSRsQ9|q)S+k{LjT{@svb z@KZqMy*{T5kpCn_Rzlww@dU*rA>O@`I1*F6n*2`><*S@%Z(ANd?(2h(a6^R=@29VV$Q@-A$h z!OKcK@wh@jzxx@#a;weykx%hw;_L#?W``l$H#Py=_hCJeocF^SEWgU{4cLO0J>mH+ zh=}hd;-^rNFK0aY4DibFM6Lc_HR1^W5yR)$Q1F^TsWX{3YHMd*h$la1#;DcqKy~Qmovo7hh4h;G zf^Qwq!8Cqu&6CLO&z%S$((wz=6)*W!EYDX>t^CbO-Lb*>XSJsV<*C@+O}|mGL49Lf zmIyqAJ0bQ#h7jNZ*0EqLN^mtDHu@-wzQhB68H0Gv{IfnE8Uz}0>P{8D>5Rg)Z$HZk z=`IKka9D&wcb}~$M5r@DMTauxl{4%Kv;T{lVxUv47APiq{yON^=P=DAD%LcSj!}> z6K6#ZY62fSq_n)2;!ecc#soI5v85|QI6=mU7bPr*iei^c%Oe|Pv^0*V|Ac70DjTt| z1j2me(Ptbi<|Jj>oJ9tXGI=(Cw+R{yI$8o{T+U7fb|B-rNr!LkHf9P|+8Avj_%ew{ zPpO6^OKxQmaE`tqs=6p4iU)3F zytq=V@lPJq^M(VU%$FVE76MX>GT?{~*hA!|Zv_$EY%E4&JN&orHdX)L54_l0>5QhL zwj5d--nnE9SU2*>i^D3R7TG4MfFcZ0a>X59bzn^c#_v~3D2mJ-o>%0x38W$|NTIW) zR3^&l#~Ti&og+WhLrJ>Nkccx{P%>vAkM)t`!lpDM-bN4ic=XTnoTBl^;RJu9CfW%x z_{ri}0OC1C??WChdklSj|0L2!0rJRQ_&DPy<{PBl)rLvO+JZa6RFH}mm*%+p#cDtO0<72$z0{zDSz@4C zIv`HIgC5rwhxC)v?#Xl%&@&+jj#(@>E0R&^So)#H{706;rg0k1PVt~>FmzmW4=+L0 z$EReEsKi&n?2LA?N`RvR;bv>ZBuUexc$kIiUP7jz}C;C{|;74R1J_3V@ zQ7_0fC=S%-{#z&;v%&raUbXq~aU(;b(fpbQ_&V-_s!#--%canzqI^nwWuq;hou-BV z;8krdmp`g(B;k=&fJ{p)$VI30AXc}CT^}{3aL!oYq|eVm`miw5+2;lO!Fj5IS?CO= zZ581gKk)W)OBWoIVxM%0n>*mc#WF`mHxLm__LMSjM*HX2*dC-`Ke&38T_|v7IcKAE|LR> zB&FFdvZ_NCdogE%JWqcfh6p!QAOgw$1&kHCUZbBA>;FE6QA>xA?M|uULg;l7p_8a^}!-b zg6Ju?YM=y8$9nTIag>;yI6!+;IO|S!qC3vh*}0UN z#nW~fq+0LpH@EpVgJ_ySbs+rDzvb>Nl|VC;n<;FNQ`hyHy|F!RyYJt4>I3Hbg$Y9U zvCntg=|-j01*cgel?j3+r-GDDin;%faCOVZnn ze1~8p7f0#*-ya8brS8}cvIlp8CPc{$va`ty=J7Au%BQS2FU>uWd`%J2gta@8MRH5M ztqnPfLl8naoBF!SUOyWv>V&QZ0MR*xTraR{1-aTsm9vt)5M>5rbOQv5=rV2RaW3_Q zB`qXJ$8G9rD_fg)7~q&aR?Tg95!2eQ~UKe&b;X(-}W$c^8m36@}U zZzDGb2JQhGv9kKl5vZLbpi^J5Y0M266A&X5jH%N=O>z;ME@o{uF=lZTgKGR>vGgA! z9g78*UA7y_<@k_E1iBzRx}>?RxH3!BG3C&@eD@!gA4T(`<9iFv)DT9Cp#H!!&Ovbu zQCu<&88*g68Lq08V1^2RrPC%&kt?HvEF3eacGbT6BM{c*kt;nwe_H^brd*7pyc58lsxoVQ_<{AJ1!K;vkFiu2t&VK zay^0{uM}UQHu*Wz?*g^0^!W^Z^U{)F``tq1fYTZDslw3UxyKoGCIo5Y#sjDw{-pY( zus`(_2o0skl8jFdW#jr~Jw}bUvE%%tY|>n^OkSaY#|lSYsyTSa!Oq}GN!yP@oS}q$ zOeFT>wIh#v`Y+b}g2aW3)TxovJJ``h*-`Lnf38xoU=(3l2rFgQ=6Tt-(`G%NRT0MU+HvO^T46FA@Xa>@HmOsJBxZ zknnSm=&%b)Y3FSwy0gY_!CVaLY!7BG(B`ox{h7PE6CW^8J3uS8i-{^`+CNR4v7@oN z>igUTreecGb2|5dO~(e@av7Z>c@MTrI@bbw^v{-0-lfbFe|d*^JzX=F`$hr;G>K|> zC)W>Fi5Oc5UN0e1rEXp;Xj=ldUyoMHqVsti$tgf)5O{V|1`$(5lnH!3VV+(bcX!%L zoMso66d;dS4&PPMWDce#J0Lzqhj@AJJb8`u%#Iys+ONqb)Kbf;O;5Q{EC~GvH zh_SK1SQ$$_J;6i(_U6Wle;Nqfxm-$6IjvYKBARCII`73LXKhLc5#1}7zw3`5)(s-T za2>*+&b84o)HOB+ID}f)0ti>0VAv!|No#N~di9y}Rb~r)f*BjWm4fQsO;0b2W#}cq z=0yJTdOyfWiV|~2E{S7IM7`4|KsZOCqFAx83ExyrrVvbc%(wtEJaRshjF33OH!l$3 zTH(@qigOuRDb5zL!Iu6&foK9s%_%RTtb5fw6`TNqEM4xIaO6u@N%*KNcXsI0*LLHn zabX1XwI>T9lQjLm{{6@aqtW>gy+HsrrY5KzH3Y8o+)hCDH&I2=Tc%RtcqqM>f`Nrz zc*FMisfwsZEp21kN#b`chR1r@xpwo1< zUO7L}?{uKx+#P&UaM|Z|?tp)5Q3i47mSW6s;5@>^cJtH_nSwxnt4ku@?UU+6!eNf` zc?C|c5GLFdb<@a{2jT@>GZfShqoTvgup;c7NQzY)Z(2JpZ+^CGdaTmX70)Uv-SdH3 z`2`Kzr*-)}KE$W*Jyqu-5}4X17ML>0`SY}Z3x5|YW78W64e+CNfc5BlCfd@_Dyshd zm}4CX3)SCIOuRQuTM=M}I#jmN2u+wf*M-~~4WuV{J-0Ax`yGr-g6K@%)__{bm@*j{ zie+_GKnl$o&S3-#(#I1_3 z)%)Zu6b*O8R&x%Jj07{2!P+g$A#4T=K>KuT@5J58JXfun@ zb}cLfi8;Fu_``0`OvJeYh#t|>G8HZrc;EUFk~8A>$ys0JQ=%UKBnS}|?=X_Sn5FXs z87+5Vh1b|V1+IE&k0?vNGhfoRt~iy2M6oh-7$3Cm1{YI*%zNW_^QO_p0Hp^3J%Fnp z`Re|R)Yu6SuV6$!a>wx5RWdfv@=T7-`APTdmiwQbh8BOIrJNfkOpxc91fX1ko@AeK z@fn$(r;1dm3R7y^R$#)%tEPTFRZ`C3iQrH88k4U6Dvzm%M?Y!WPGbPrbV^#4Zigk` zmz72KlTSUD_qP;hySR1=x;E=PGOQHo)9z$0LX%4e)c&V^Fe~&eNZpT?1oawg8M(twvbPt2AC!3(A_SgYqY|UO2))#}VNwE3n>IG2-(G zPd&0<-w%7+qhuG&SU}mU7>90D3#ee|=Im+9lKmp+ux(#6C$u5I0e>S}YhKv0CEa&{ z#4X>xLIJlf+d^K2SW&==vP!bN#ab`DZEv0Lt7+iVfV=gl&-UKzBTyRg+VnyT1l$W# zCD(;+8por~Wkvy}hAvcLtY0}G&)V}WThU{RCa9w{VEG;bD501$8h32Af7m!tcFRxV zP^-BrFrXUqm=X^t+w@TpXSp`rV)>^cX0$ra9cos0jAk`+{ymZ@oBXS2M!+{svq{ID z^FGpeuYIJNx3$*+zkwPW^&QA-ts?^ANTbD>*8bputU`5rLrA||AT%C9|IMC3FK{+# zwDmDuMhLt#Q7aPChpYQ$MyygWNrm_%7P&5epDR~(2hD5U&xqpQKo2#~Y*e<6avxyr z(le#zNnec?rxD0hw>HeJi}TqGPNyf!)-q4U&dXlBlN`H|WH+`$Q^3Y)OJn0s)R^Yb zQrgeUZ|X%i}mQ@ufg-aW^=85#u8_w3)66)PY|cob7k zvh){flO%=9RYuwN=UUT_x7QU@#uhYm`|KQ+PO6G8(ojxVqjg`XnO#rgHpiW;ib;cr zXecEW@j6WXCOz)N*K7CTr+d}iiB%$;gU;1=K+jM`&pI_Ytt8>AL-Uh1;h<3-BpL#m zTAZaKvJFe#EDz?iLCCSh-nNqCb-?xmCS44W4R7_V|+=L%||27TR$$ zZsVa$-HEe1kRha?OoDr9t zb_{ZR57zT{vqk7iBCVsm3>o(AR57q5^IvdT%BF=CZ66^n`uY==)HcyNY5Uoit_Rqe zoDG-cr`%o^MUR>Iyed>p8flbfM9ntP-vK>6kZ`ajkyDLD+0(N50>6xpMR~+6fHw*% zWTa6oZ{{FMX^9-Cw$|tG&S~Be9vZWdr%wvY5*cjO$z1>vvHPasNDw7x~ zP3mC1N%Vw<@}1Ahhf}f1&OcAuB}nMniRkz$nxqH!_&Kl|uQSbBPsP@Flo&6a4YM=B z0%GU$G$xfT%|OA7IT-S4738H8a(0o5IcFECxt<{%h~I-{?RgcW%T>c3ek3!M7F9e# z3~l8CQ$QQe|Jv_1COQme(Olz7-IH-NaJ>$^C9&K?cL0+}2Vl1O8-2ZA&auLk)69Eh z$_&tKG6O39x)NU%yGSpjDZRx>9yraK59L+jwwWM-X%jAY^<#7mkq%H|ahDzm)w*hQ zf%z_mH!^U7=;6~@Z3Xu!S?uS_)n4gHV-2x_ox^Htl`>hh4!l#>5T`bbf`NN(4HKbK zytG3g^Ow{4ZLvaUAQ+BQz&9!p+=6bjy#He5lGs!VfG`qpSJ$Q(g(+YOTruGD6r5ny z%&mc52`e8v#?93iMT2*TD$j$JAk8HvsUn!l+4!*GZCJ2>;uV&bQD&VJ2Y z^_=!u8R-# z*=KX4Vxoao?-4iHGtyBOUStM2{T{Ad79IiAKE^PeHh|06JdYtt>t={-ic^ zXB)jpSu|Bp>NUQ&#|_4-!Ew80$x9fAq-Lf3H}+Ra!Sx9O&u7-?ZSK zXP9XuWhLnvh7%|v^`k7crRf8_ha5O1`|n834LQK7$2UfEUv3y(t~pc|IfC<$J@UJ3 zU*mg5B1evZKCYn$hp&-uA=;4|7U=j!y^KMlC3iYK9uw6E^>2(L=8BI6dz&GUUPzlt zs7NH4dbtni{q{uFKp%wD;nJdNYoXhlS-EgXDLx=K-S8#}-7~**c1x1K6CD}?ZJ4vV z0=yw|FMdE)jltVQYF`-#k1x^N9tt!%7)!(<9fJ;&*GQhj_Fm=R7LfGOgL8g|)j&?G zrs4lJ#~vv*8Oc(*sfLw6*%S}eE|2=0wkw@VeXKa+QK+~W8WdL9VP;UcM6$Bbz@~N# zLG-MFlRbRupt1d^_SVygkCK{0=Ng;tHjNbkg$dmiqd)yYJ}At&Z>`W;Naj*a=r#;J zJlVQh^C(V|ZB>Cry~9ctS-4i;R#cOaAJcswK!bRo9Z>6Qa|PS)heSk2pv8gKc)~fn zb+Tks;o-wG+%OoG3RdyVNN6wEolWnoMW0c6kMTl5Y1jU#NvhHVsfc@z&1KqsB|6>q z0hyZ`jCXw5$5w{7`Fjc_RJIeIqQJzdbd_dGgfSfV2$jYn5H+GzQ5s8ll>)rPR+N!L z)^w{BPN#^fIv^?Lw^iKVZSm84|3kTGzmF|0>Dm!-&fd%7!LUKreU3u7Y(vs>Hjdjx zX=V^;{k*d9S)ToTnp>~j`XUW}2l#5)njs%4J#WJhKT}@mJ`?m^KZtW;Bu7pFt+4S3 z`+w}m)% zPo*5+@4))?e16bnBoj~G%KCZHNAN`uOs|k&Br$$&VhbnvY47a*Kem+bFk7cTsr;G8 z*duY?M~-fxkxK;A70^or!>fW}+4Q^?6WV-?y<^AZ{&4%M;?lji)>stTAy#%DF&V_X zkR7)8*AXzWYaUjFwUi#S=YA{0PyJ`@eIR!%-5K>!=SS=8Yme=B9z^3QB5=~qdD^J7 zFU;xu)))1p(IY16)W7<8z||aC&P^x@O*LxP5Z!g;!-5bdHh&fe_uZ;{Q&!)mz0J|m z$0@JV1-fd*J;Ml4qo_t_FQown2B}AL0vM8Gkybl}BhcM281=@kn)X3z?g!dklTp+R zNzP?nS84jz4v~v&`gfoXjC1ED@71{jHDaG7a!q^Xz(NK#%ZKD+ zbB-+eDII%U;LtXK>JWz5F`ruaFL?EfTzNg8ez_l=cTCiQaS5*PIr>66=@aapLi%~18G<|qDDoVxo zhXh7M?`_><@XvmdBKH>Zr%Qk^ZLc%TYL0TIF+7lV`AYIvutq*LdQ?bZ>s%P=5^z%( z8-kY=QyYUbQLuV7RflUNsqQ4XQa=ikxpEC5k0hrE-h)&WjB*lyYSVCtxPnyt%!mf& z9Bt&9N+Lp#4X6~pjiBHvKODEaFKO9zZXL^4-ld2}y7l=$kAMQIqdA{^LXm6!NdBel z#!ejR$qBj-d9LVUnF9m2pVFV0VISihLfT}FSByr|m9>BQ}JU zWG#K&#nk7r900+vf)(OQMskN&3V9V320H_mn4(DmSp)4e2TUEnLhz!!VkevYo06~t znAeFXW36BsLs>=ZwOwtX$$7#b`s_7v^q7)bUorCT8nhQpH3FUJMwl~eWlA&&lS<)# zrAfRMDs06s^jANc)XXN=Fz^4AsOiB!s^{n1V@S~DBas50Tk`ck89GB&QcEyR;YEZR z6%+pgdxzYcP!IJH3}vrPc2?8IVsqZ;j`o0{5jF$RH-;wV=v8j3RuUr4`$Ax6{o)>} z56-zj;!a}P^25f&I@x~e(48&sd-oi}bkVOHX&f|m{u6yzYfK!X77HZJB`?nS*7mp4;D6!UgbrVE6P z0lBxD=vqIf=M#G}Ws9IfU5+OjCnsPowoy zLJ_A0XU)wL=cc?{JVzi69nKutknyZ7tm;Aij8YWX%WirZuGwL zWP^GzK(W>7WWX0O@Fj?v!;f_XnpNI*->An`f@TMJ9k|qbAnhVo@7edtf?U(%JRlo| zgQpVSFKM+Jv}-prl_RLaLP%K-dOWT(__8F7f8x^6ZLf9L{{9MHuVXnIY*;24^AU{fb{XlnXcWvkb=Et!6)FC;bjNCwb+AHTM|aO<;|L&XZ5Hr0^5Zn4{nvS)Klnp+rO@0^6=J@e%o@+*n-64YDF{fTvgpjED5{ zhfaORvrK9=S>_>&g+lnCvB)_hXgYxN7h_4RV@AWqKpD z-#~x`t#ewuOy8#ObWIMOiV=t&$aqj|=5T}6tS`5R(7mKP#kKWzqY0IGzQo^>+dHyG zYaW%LYm*K9nqQK(J2Ua`8z_zsg$h!U3ZBbgkZD3Nz*R2Y7D!mGIQZ<&so)d*j+{(W zU>WS=Mf4}V9X24C{`)jzqoXTwwxm<=b-Oj~sN#{YD?`eEQu9zBzG;qH7s$_>wf8W< zEgi&;0AgtpyFAzmlkueXLzajZ4R5QVnpK(MvD+OfQa@H6HQWv6Yx;sx&!@yIIo*tc zW3RDI_)fl&M1jWTtOX3(f;)QQ6@$x;|E#$H-GG1nt0Fb0)@y{^c*1?MV}8Ua!-@es zz?<=x-2L-)NaqM$dDN?i#B~BV0_q@vSh|#YlzDltJ}UL}mS z3$7}@3m!nO{u#rm=P!#2tB;s0fr~$2KsrhK9$Cn#=5Mj*sMH4Y~W3dq|4w}!dvZ_G|V z)cCv{g0tqDz}8Uk4s_r6>mEthRAPL9gV1&y*#Sf2MOLx;nrk!BAc6&hT?blE)6;A^0Y?-fAXmPcQv=m{@21 z7wL+|uP+>;+#YgLw58Wl2bQfWO^l9&!d|#nVKd0cP*r{UZ7n&{;(lELUuS-4>pu)T z)S04ZZ>bzlgz6)E2g$NrVaBjItnslJ(T(n4X>$5z1id~3cv$c9b^O;*v_;su6pLMb zRxVuI^no%M!w@d_-TKIVq|g)zR;n5XJ9!l929oh#s{d1q=)mc9lUeHPa?keqX$m7bS!46 z!cVWJfS(N;di`?Nm+XS18qz6Zy}pRNN$PwWRi1HI)xO9oxSw~9V! ze(P*WDOl__5;&%X| zSzNNFNZ>WXElxE8rjrULYHqW*>Dv6Hd|PfSBDRouca@oS_QckfrP5?w$Y}H*Hp$~@ zQWqO8(iuY8qDo`Njpr56|idShncpXflV~d*@AM zC*}e=vyW=dEPrpNR&=qObrNyn^BJAEhq11z;m64j;QxTMjj3GJW!Y@fvuf(?@nEm_ zR@E}Uvoq6|hXql);`PD$#j@tiD(_UgZ!r>GzgDf}9u~)V+%hF%xW{KjHbKme%!FqjZeAf?=M#Fnl`%81AERj0Y@&MN+&Ulml zlej(x_l#%0yrM#skj-%mG(B2l>@|*fSdZkNeB4R`|3NE(Tb^E#H^bBti)C5MGfrZs z&V^ahoVl8;Qo-FO!la0bneXqcJF~&Y2No-Qys5Yn)m`V^HWTo_Md-E?Ehm;jL&6y& z0tGH)KVAqlfAcJ$pHbz3Jr)rmBqZ(Wa63xi@OkRIhoXgE`e}^COZVq^OuQz5eu~}J zB;g^D%cWNQ_B^DizsfpX(0l`tm*eSF@&OWSsTum-r)64BxjL4<3K5!I&L3u8fMAN} z;~JVWSLn+p>VUR4AVzWq`@j_l1#Z8NoJPcfV;BDg7+T`<_Dh3*L_JePZCd4#u>84x zx`-P@xXIrUX8 zU_pZ~q3CsCO_sc^pA7j{)VF1(^7~F~(5)%q5Rm!lK5eT*zjL zxoD2n1?jhSLp&oz64S-yeiNC0h z&g{jem%2Q|7)=bOuT}^$lsq6RhGwQ7mhO9LSi}{w5AK=^86)2W4p$ytD+ac|WgzqK zs9a?Z(_ZR@0dUK~LuuIlPIkm{k5dkZX=~QaFbryc^^Jz2i@|b3h~`k4RI-ikAB^^4 zsuwM}sm}6aGlh$NntlodSTkf>PyjAgn|^Zj$3U4<{oYn{j>bmwQ>LGsrq4($)UVyk zD!_V>laXIm4Wdw#@AcXZRPLYm44a#e8hD+jPQN}957dAS`ss{@t>OVPA)--LcJ94` zP@-y@Tq?wL$y99(o#pJq3M2rhR3N$Te^K!nCejr%O|9KbG%ac_3rVqR1kHpM8`Cl? zS}liXg9o)VWuYD(Bj}(AJOLx5{V;;3^33;eMO)>4Wa)}PD}&Y&l>ocZ;2jvbWo&iu zcsLjdB=w##>}0*b3bB>#YKvl@Uv(}kN*9DMl_i3O-m^h>k-RRPY_{~5Y6RY?5GTfSAN&?iRIObF&FfeLk!Th4M4u?|QGP}G9dA{4xaxD>UZtYpV{48W9R@Ww z=SFDwt$`201Y+IkYgyvz9gq_Lntf)Bj9@nPIMtYwXn>dB%esVIxCW%nR(EF;LO^zq z<>=cGT&j}*%5(qTJb)>S-nZep7gB8Pz*@U}TSb)sv`3B}r!jZDn+c@=Kxj_ z@BeVH{<&Gdbc;9c`hx9vx}cEpRahyr*4_!5Akq4G$(sPXub|xILP-k`Ci{dpF?MdW z_X;gPmL|sIdrSu;|1&B<1ifUdR{U`SfY`Q5mQh_D>*tJb5+y`Nt7p*N zU#VO%sSFqFo-?l*P*e5s3x+?toLz`L^!*~s0wah_*AaPqJAn14jDz=%m4TU$>N54x zhpikCF%|lH;wP;fjEM|xR6!k7(~F~QA~D+gKvh<>tihwEp9MOn6r!j*0O?hJ{R& zqvlfBv};*fo4we^dS<_EEp0C;p1#iy&wAVD53zFo`)N@#rg?{n3O;?!CIeU<*iwEV7f7v1ACvT{cCbt>G{$5mR19_t+n&Gg4?2yx8?^ap_cE*WnDiK8y z`%b|)9^B7qonr2^?cuYUu$v`DQ?*FM)3#3;zUoBU}zFw($1!P27_+*pV?{!2hm@E?EpPxHUR>gp`Soc{^KF{!Jw5_A2Z8~;y>`+sOD{;QEmYhaGfaRtAdzQhJI*N}{9CXqv0{fl5V^nn|F;hlQH1z}^FtNBUL7C62b@>|k$x zay)G3e2(hutev#2tXbUztaO4Xe}qB|xNCz3vlnZlUtjd=qoPd?J%eEbK#{aT`}Qm? zA*5c!!4^Nd1+ud%rAL~dt{H>PyEf>1l_}kpzk`xVxx~n!K;R^nIQau9v5vsf)S8EJ zSid1A9@A-9BUo55i(!A%k{%4z+dowt({+t!Wl|m%M#Z(TE?^-WhKkz=yc5T?XFqv)s%%UZBSt^%3FTsu>~Q487sKnDMJF3LdPN7 zI{E4+{?(Pv^0D=6@7fMwpxURvnD4vB9p&Xrfk<3|&W`A4E`=F1n{9ML*+CL78q^a z70w@`oy2Gk2Mk);3&Q54g2oBVw?nt~Faik(=TUB#fgW=b01+btCwCO`1q-;)1F*o2 z_58N}nOyId$m;ub@RFbO9{h8^vf}WZim>wgx&%$euz{jVU{9ii!KB3bYXnNM3lMF!H317lK?5gud|eu z*tB8Q#bU|=k7Vw+oiGMrZ$;3v#OzR?paDRXEU*O=#8r@A!D|hC_%Kofle^;H7s{5mH@Vp*U&Q4rN(k?s#+mW zSWcAOc!nWO<3wYHu7r6PQmR$^g4cNTrfH zX#~J$NZ*vEF=18Yw+ud|Vk){!1UZ%}IZbv;h`Nj|sR5+`Wj9IcIIp>sE4GfXp;8@r zA+Zq!a*B|08Z{-lk}ST2=$ayrY=^Lqu#Z@oDq^MMqIPvwXTGjdw|IbT0FzWITjoRN z!T8VAnq<55i%dTXrL>hyuS_GxLMkJYAla|-k9IJXe&K$#kdg+?V#&Rh0fBHCiJTwl zrctIjrqQNh`y9h66u(nKQrb|Js)WxP<|)cZa77o&eCFF*6k2%KE!LR{q_|4o7gEoL zABj%8A7L#hEcA}3kMNIhkD4b`p$3?ZU)WS}%HmSvy5bTa*pEAA4rVqu>un6bf77JD z(mK-mr<0~zrh}#@R?BNREuk;vS36WIEi&6u+M3v|*}g9^*2K?L%y<>dQw>J<>gCHDpfD zzMfLIq-c)iDgIe^RlzVPSh`gs@YAfaS=*{avH1_0K?c*WjEi(vq$JUhWH5^@|KN>c5bZRA+i+Be4$NWota$o7- z1!A>h^D(zDjp(oF)fg`5iF9amAhjJ@&Kuqa*N1ZK49o&XBWw@Mn*Wq<9k>3hV%YKQ zy$ayVF=*Jr>{9HSc|v+3eW!e9ffk7Lj_gKyUUm1R?g9oZp?vF+gd~I}v>-eyEGnc2#9%*Ye^)3G)4UP6{vsxr z=!ckRI6o{0f&s>x8#W_7doAUqPrHV9ptlzUgQO!8IeBen#`PWus(Wt}g2x29*EAl<%WQ6V{{?6dV;v*R%6W9ct z6VDtnh&Gd{lfjTV0Q={@*!5^Kx(_+5N=r&Bp<%0GwV7d(ZvPz2lBQm8#ln?!D%kbFGYw=XqA` zS;Tsw)s$L{+@u|}eN1_Df~})w3+GBmxqLG*Qe^gO#%;-w?^1VS;Y4Y~ernKl=CvCp zqDEPx^5ft3Z+BC7t;VoNZbn|bn2nj89Zfo&-UDBIn9nWXDgu60YOc1(IBWH^Zr^pD zL;>&sV#}9fB4ZIon44Ci8}stn^3|QXrgazlg90rA^4lHMlNxZA1C}V3XY0mw73Ql+ z1lluq`eVOCf6EfD#hz@cd8$0EkKg;Bbrjqeyf|+;K{&6iAGO}7rs%6_FHcu)X;tiv zU*}#owW*wTD(h+3>cX2v8AjDJ6xVi0RGEt|6dmUu`(Q++?spu!H=Sjy46QD9Z34XK zzC?fwfb-xWW3_t^Kgd~B@B;?)*0#(0rcQE?Tf0oWb-abn#I9ndL<2+%hI*qM7dYn^ zS&LcYS$ZR42Wxh0hpgYW?gZVD%8uk1^I9Iivf;B|{T_P6y~u>&T6PWfEIlH9%errz zRu5Ir`L*^dq%ohx%w5WT^XTVX2KFR2HE-I7$8Gq1N^0GxiR4twBuaW3dzQ1;wZLG& zRagjW5QmBbPiy_|Y(`~u<;Iv#QkC{qi(ScT$IGiyQkh95yB4QshG)lF;9JBmoVLtF zol`HfqlODW$B|9lTjEYdWGATS!L!y2?Z$np=U(JT1|~=t*z-9}r~8NY8uyXs#@h^d z3A{G%xEtG>>6^jnPA(~wEJbE1Z^hffvAFC8ZOBmI{aGbbs$5*x*x z?9adM0SAVMyK~NF7llQk8)com_dahqF9x?mF-NDekFp6_5xmYGkgwM+Bmm?6%?JOR zm;XlFf57Y?4E%|E#RUZg^z4lc@%~^{1w5^P!{k4R``=og}vGT%c-EAgPx_e$=`6_{$Du%pALWE_5U8lU+A0mhoip3 zUo1h<&e7=a@cv}+_euUgKLV7BW&k65Y6WY6p4H!=UC7AZz|PFZ!P*Xr{trF)W_I=tf~IS`{-x2h%?(ve45*{c-ir zgN=>-e@pqVk^Vb{e`5JNHW@v@CnxwnMsBHR^2u-hFoRFxA@E6Vpfso%=os;+nHbpc z=vnFL@aP#>89rHy_~%rb8Sq<~SQ>o>M9Xh)@P|6FGJhWb8SYPmn&D4NP|rrp$jrpl z0g8p?)9()lBY-jBP_>fd%eCT6C;?P7S0Y@e>2O#Vt4kD2{*d8PGS{%+B;GJH1w zg!NgQ#fSll{_hq4XZijz{5$!-OSOD5DJG^*m;Wlh5~ zxN68{E|#K@`q-f%VXs8Ai&HJSZ1KM4A$oaE%;ALo4d+#V@VDe)MM@Ppw z=C_BB!;g=b6D!B4z+dYNAnczgd3YV9hB=!%@rM|RGdz5V*@h{|)~Ir&8gul%Jupz~ zS=o8Bvr+2Q0Z?++{e8c3zH75@eI+WWS*Db*`UWSRgKTOOmT0Q|kgl_cbJonq( zDd`(@IM|O2y&9g+&px5#`ubKZq7z1*05!tthWQksJ3MVp99*V*|xn2aa#MD`Ru zPMi^R?a%saAF3+ai2*gykIx)&bsxGa=79fe=IO6vYP-r|Z=!1xA+M%k>+esC!!n0_ zci8IEyK8Xo57-`(;G0W5oAW(gQpUcR3+0+y`#O#{;;bK-tX(}5@NKSHnC?*mn5Eu% z3%p4KEbnP7?`JGtI+$Gz5nWXgI?IEcE023Doojv;!Z@Z?P54$4jp7nD6ua+ec!NVAY2fDd@O%vi(C*dU~~j2 zT0FNQdBHnrWSKq^$2Non)~*ZRLnE8Ct$yhMpEh)>rdoAe@`R`2c7hm(OXbT6cih# zBu*I1>l!;A2DvTBi`XLe=;vK>Sp^H|wY|MGUR2w5{J`#&ullup;afFLb_rZU5;eBm zTlMB1aPZ3MZ-U8mH`kEbCECVx9Sj~26o7qb?5Pw>B>{0_Zsu>P$Lx0c&wF^q1zjsYEeOln8GrA#rMMn1CxkNQQ%BF)wr;j9VxfmJXZhNgl@zKOzcv42~{ z!obU~pp)6fkD?SZuV)mK+i7p+K0t5p;_RCd6rlKCtc?MXjxtL z27f1nqq&_Z9b!!*C9AW4-0qE>4kC&ty>Q{^lzjFuJ(H=*cQdbUzuc{Fhq+(h=p46l zylh5(nHh7=8hhRP8uLpOFRNWz7<=uP8{gwFW3Hb=f{Jph=7dy2HCm9YIx5Y7{UU3k z9iioy=T$yUA&I}BzOkSdx<6dWURog>2Ubq7Jwx7LN~g{#{Ntrd#eMF0vcC1l588Ka ztpsJ>3}xPkV@&?nu9!Qfk7jLfR9@SUceyC!P>CEIkG#>zZS69KT1{lp3vXWvp#VqW zBAh{$rr8m?@7}<9EXNHnNpyqBcOg-F8Qp|0_ zWH8}J&UbP%J}(%Vo*^5?HLxLD>kct5VB2KyJ32mNe)cEqd432l$SS=kcRd7v#uKlKC;J`4}Pj$M>~Or5`)sv8sg+H6RBRjT#!@U zbq+)}z+-A`vfpDh)Iu^!uaA-M7*-DO%^B+Z&Xx>-ShmFOdo(U-5~Ju#)8=C|hA|if ze(!#r8<*LFX>~pRK0X`lGS$m~d!rMTa_%FF(1*H3Ol956OL$E}KrcNGVeivW&CtZh zT5hEbUtCpku@ioXhk9i_WXA~8!(EN-wB>>m*mnu`igR#0MJEH(FBM7`M1PI{l=@|` z*JXN@^Ar!$1m33m?0{#U`4C5nJBy>NQUO(ghG(Tz5dlGE20v*#UiN_D{uzKy#>fF$^ed3Z=v$lXMTPTD5#QSO`suT*4i2?LJ1dW>Xv&S~VQPZk0Vdyi`BZ-?Q}^wu$@D#qb6{f(=^lj{@CO zL{qHwkZzFHOsscO9^e=;P4o%b#WDr+yF@Q0eEM#$O_LRuVZ?xO%Mm$X5z9){AR;_~qQsWOdjz|)+jynqikNDFMtwUbNa}}MvRUvn#uZu$k`lG!hzHSc_J=T} z61#|JU*)q2)5#LUmeN!LO!KIbX*YQoAUY=!L(VH&_Y_ZVm(44ll+WRcY`BB24pD4gJQPvTZg)KaKa5hu*h$;b0PXe%`Zl@B&0uCj@)WnAQ!`B?i7KPjmp(6)+R~&Mlt2EE%%q_d_K^yC8Wc+^ zicre>cPLBHcFX!XYle=>hBbSY;%okz2AL<_`4rCD2{pRDi`vTj_n)^Z&FtufRR|W| zm*d+wHlwM;j&LZZ(7Ld(j8x|(#J!Q4HSSAF(r7j#3|Bn}8W0jII&FBow-a~qbm5e_ z>6aAE$Lc7;?}teaKiMZFn5F7jl#aw6!5XF-m6qwJ8u%&-6Acl0<_ypmSh-aQ8&HEH zS&$ELh|R1d?mMPiCaceKCO(gbg~+M@rq_staW2l4x-6_IxA6=j_7$~d$l^5N z2_nnnWF6DFb_zkk2m{^`_8H{EGstF4rewG8&Sq3ejcC>%Mr;NXK(a;Nj}`b@Q9Ml_ zK_E$_PfnI=IkUf9fjLaL3*#Dgsius$aKziH~HDuMKdpcD`qx$e>IL<0 z59~}#e|!4uM~C;fkH2649oJ`cpDM-QG5$I8?^qc=WB9axX7^{Te_Q{&|Jl3!w0!pZ zbNzoxNPkW1f3MX)6aV)@{D+?MpG)*#S_+;9H46(f9yKEaGaduoA3=qVjztUV|DvU^ z(=q?2mhxx2_J6dLzvk*+TFR%!B4woaM_7XTOyhr46=sG{t>b^R6egzs)KVDeK9l`N zOTnZ6dq?{}w3Pp>h5K(Ug@Ng_-~WgypJLGGYW_dfln4(Jc0*3-;l3g_7K=Rs-{Enauq6?l(R(B*Afn>rxvv^A?4p)HlC6))2f2NkQkh%ehc5A?w{Qz}IJ=a(EB;_FiqB6gN?2d5dqq?{d!@ppNTwl5KvR4zzdL4Wu?axt%1D zRjIbP$>;D6JdU@#*2LLCx!|Ekn(W|;Ffjy=;|n#SJYVm`-YqoAbll##Mm~F&>M1#R zY*iVdU@bAaUMy@{UvSD|=BUdvf7{6NmctM=DvGYbc=B>@RL8XaE%wfK-fEC&;1IG) zszbOJO@t`S17Sun%axxNeJ;p=Aev(YI$9QUPk=4LfS`5lyrx;AXbW6L>}#8eR}CF1dTK3r%i=5e;q#R@OLQsa_B`S33iB^lgmX zRWEXwpOiX5<8c=Q3B`eSndmKv?=^W|W%)w*3CdkNp0;i?fbAX8y)%zZLPWzReG3b=AkYpWlJc}E zqoC5%sy^;)fJit7Ucj6$HIxbdh=d4abxeZeyNFiv@Z2u3oN&Bj^px-uohu*l^K5Y@ z1~gXK6&3SuMZV$lu=ON;x8p`m`8~Dc!L%<|RE`tId|Z-Rjc^E43I{r0gH1{TBtOl} z&Nz6P1%;Eq{^e=wFN$Vr^-jkO7?@|es0N8NgH&4KYOTq`r~2>2n0F5jSkYj&!Z{z#R$t#*wc8xHFB-Sn1<=9-G; zfN$d`6MPBykwrdM?3WJ7ODxuqH6M?^|1Qd6Iz=_q=xTx4aeq6}SMv{JMi*XV zxPRY7^w^=QL^bHJJYEK$_6Ggb~2phYkC@-($)=7a1Rr+c2USp7>ir(_%D)JC>1>s3Dw; zy+b}lo_lddRs-d;)~@dDjTkG43)!{9?SyKR6lbot$kZk1?dSSMSthVU3;iB+h+ ztdzq#Had>yZ{it8Y`C?BZylkiZMiBUBtl8T{XW7^ASPA((#}FA`THWWeRqSxlOe|z zHWOMify2u)&mL7dujyJdJ!k?P@XET?AOyb)L-cKXmK)X`YP8MTCzcG;&9RKj8I&=0 z28(Nk(OBFRlS)gR7D`ipf<=iBS25NFI{ghOdQsc4I*GS5YBKQ^u^IcN0@%e}^C#rnoc zo0{e%nRmz`mSPb;oj z<7BoeVqdjFV{)KjV}cJ6bMR+`!##oy8H%U(T9LFWk(^_%^S*hy1dc879A6l`KzPce z4u8PWV8qGHO~y<>t7D)NW2_=Zghj6~Eb7#=MVs-Geqa5(*Bv+5C|_}v9s2u?-FvQ- zz5Yz|xNm>+Qe@2Uc5lwLUic`f@Kt_w3=`%v(;3u=6SR^jOQ~_(#Wi+4b?LLHbI0`% z_XVjJ_=nU%TIIeAX8!aC>xa_4Aks_sh3h}R($l2D?%#ZN=2D%C4W({W%W-KrQwCr> z0MJ8eLyz?dd0($1!@(<|E_?)PBUEQ6&@J?(1_S%A+_nyV@}BUgk$DY8>+)>KY!-%H zT;SK0+46IiQ_uuwEoe3E@gbRL(33tYdcdVEXm=%Jrsmquc)g|z=<}v#1#(E)_dt5k`D=bEyeSIA6}TiC=$LZTbWQ>#*i0z)4m(E3uQgQur~ zxmOOP3t>mWay>E!|KZy9m&KdIs+sA1}uzwG^j&uTP zoX`*nG{%q~B3^%8mb{9WgG@I zBnC_|?^%tnr@IHg#-1j6Wt|XyM6+QTy0)C&NMIy&%M}kr{gl% zc$%*o2H&xbA?OpVs}EK-L6ZuR)Wv*T~m^z(YKagh@S+B`Bw=`qbj)9Rv^5SmNXhPMbUUpG+-@wiD* z=%KZ^d#9=~$SrBFzORWZm-+Oqf$#pE{2!x(wXUaD_cgcnPx4Q)gHJBUbt86mAg<}J zVXcQY92uFysB@HuV9$(hq@D>l*+EODEMe3JI+9@pO0!ja^pBJAi^IH1l}eSYSfeaP zS!3_?FIAdK=jsgDkJWH_T?uPpq^r{RaZOim(H>GL<~dFt223(0b7NN>p&NX-*{g?n z_0Z1je7uEOfU|?~O>~X-X9^rpI=(Wx!ghqcD}=v^ zKf@RwNk20WS{i(#-~X{B#}X(#@L<^(T;Y4{$atb}U48LcIx7Y%R;4tWM66<#z^uZd z#33y{MmuIBi6=2qV(4mO#F(8TX_%ayS~mJ%1aENckV$NTrP#YKSkx<8w%e21EG4g> z$qPuOM@WxEmA(#Uc|Hh+JT_DArnui;{Vjv5=yNp2-pfc^ZOXG5t-DYv7}a9~mxLLC z?Ges(etoF6xKpi&3shuDn$OUWLxU~oI#ZN4m_K+4cRK4pwmsSpS3E~04%AAT>t-z|~)m?#F z9hNxwd>y^pm*u!w!>4KnKN>JlP4Py7kHF+O&9QqXv1_-YLdamv~vP5vU$$^bv{DS?+_30%+eFtH$r!LZIllKjIDsW6xKa z0i%4wo!5XAv$-^kp@5yMeV+t$4k!NK=`hd>mZgb)Xfjh5I6BEr~lSU&~fV9D#UuiW8am~sZdM9aV3=Z5@kSO%znG_!$u5ZlC zjZBI3#DhfNt<_fo3m^fb(dbEooC5fs&gX=P=HYdBd5K3p$$=Ck#YEG!13r-Bx5OJBjBzQ9!f^o*_$!eoR$;l8IG)NiU3DpKV=yQ)KUUeV~Ek`mKvp3AT}aJ^U;Y*i1pLn#eaSY*`W1aW9XXJ7c>7L zdc_-xLn=ai|-Xhb-3S%BX6AgKu_DMi>tP|2N0Fz8E`!K}Vck;Rk=1+ci zNvFNMTjyX0Pmp@KE%e|PnS^vk@I(wBgm3CnuQ|i;@rhVTpu%e0mW7 zdt(1gd*$jTXsC4@Ga=~}BSgOT5Ava)f(4@X`?%K7E9fx*${KHAX`J9ed5}7w$ z*(H2D*g-G*NzX*&z4$$ETa8sb&e&&Wxh`BA=t*?VpVEVvx)$U3pUiQx3#SC;APX+d3S(5fxJ^Eie}zv657TxX z7E1(UNHW}-`Q`MgVu<);Q@3_s?<_-rHiilP&GY>(lx+2jP@^>7I< zV~khFc{Nr;IskFSkxQ=`9a<0^rn1IFq+^5^;MW;0ygH>0&vEEs9FlhGP*w$wdgiw& zWhtE{y{-DDz5Jo=<{RXtKByT*_i>()kv)Mt(sD6a;)s>Kd&F2 z#G0DdES+kCnyxIIw`Q9?lLmT?K^_%Q>mIYt6J-uAhAv2~3gIzJ!kM@#PMNs?9{is4 z`st>C#>raRI+}W&{nl~Mj1%-l(j3{|Ai-Z9iA8HvFinmz%H`-@AalzQXMDnDqu9R$ z2aKhe(X>PTxnjh`BK(O)=jDWIQ7qO^b0$kZ)Mty9 zIplIhf=-kVCc%=MDVG+cjNMhqH5XaWnltm#BD#yWyUD4w6?aeA4L?I*Zl4agsl)KmVgw4R<5S4W|+ytQ>kmaKPEG#^wH05b`UaN7wyu0ynk1ET=eLpUU zc>s?Og9Dq(MCs1g;N-SJW9|%8rsjN}RtbCwKpg_A$C^oFr%5a4QW4@2!z|AzqG4$; z!z`d)2^+l$#5t=rmQOHlhAuP5)51*?3~?P@hUJyyoRj3pgd)JZSoINi|^v;H8nj`v{=j9AB) zgVc!XasovVa1@IjYrY0@OVDEd#KAJRP^Nxkn327}gg-#+tSOOz9*$V{PH5rwan;#& z;hpyWHhD!HV{SC{uye$?Y->r=0eBc-)tq`d24o*i)hNh_{e(K+kTxtI`5HDrr;#{B zx=%DIT~4oMz(575Rb0+gQS++!R5Bp9&nWIw8>+7pGqUT1qM`_c#z!DLklaKzoL=k` z-!uA@oWuE0qCN4H+%v$Q+|zBck$qhT(jSV+g#`~&7fWv8pQXoq{) zENE)aH=7K%imFbl=sLK10e()GVNQ|mcBq@q7Y6?o6p*}7^n}Z6g z+S>N#t|!(Ub#F~+bxqSh7GZfkXCJfH(ma!BcsZG!&V$vOtR{JKPR%wt4o`#1#xl)# ztwmQ6%mU|}&v!#8!LU}%>)~`*yao$wB;@R@&KdUz_L$2@EjUrD3xvb>T~k~-1)sTi zIO!FPG=Rd~0P>tIg{IIglCGN11%Re`BljMspIB7SqA_#l6n5WH#8Cq6OoWNPXXvKw zu+MRlHmj;BJG7j*Eo#kX+_w>|9tv8f-Ap-EqWd#NeUIf&!F7mV+|J~F?!M-j;4j8< zLe(hvycKd*A#zTfLWCSv_^l*aLWvSL0)tBX=W^hZbd$_^2JoQ2+2HFv%Pq%%Vc}dw zMOih8S!?PutX*zCGSUG>MP=-p$hc03k1IEgj^hGdi^XQivI+0eM5pV66Nl5Ld%fN6 z#<6D;*YjfHUY7RD)=)U?)AN$`5vcdG|K!Hi%os@1#w*|bNfvj;o5vg@Lg(f5HzOLl z6`z(DAzb9J*&1^k&J6V$zhbo75D~{nE)pL_62>92AAI!?^GHSMrF4NHee6!_~FTurYh(QZ`@TWuqZ8v@`hxWyT`fGgvFv%%c)6K z<&n`b0klAMwnU~UFi_#IgpScpr_0tE|s%CE=%P6~l}lnDpQagsKsIq-`=4k>AG zj2sbK49u_i#M{DLa%M=<&u?_03`NT7!xlA(;xrbj5E36W>jxgcXNy5e`)#X0fsgbe z*lkmxT}CTKI*r{7H(+Es`$TxYeS;~hr;976*3D-~7p-380H)#fQ|*JJK{Keh-jG zk)}&XR$Ymh8?A?*(H`+rEjbPAB7pJOqVJvY#+vc3sN^bqy!hNcUZl2II@(KW1}qqL z(sd2X$}77u6Fv?nrCu}OzrqSA$_X8W^}aLDFE+plFz#x$u0YxeNZ@&!;=XuocJlbu z7G3DEJiJ_$*I6?d>(N4(V;l1mRGsGAKrmR@Il%EMCF5i)FhALCZF#8i;OUUWKGhbp z>3|93&Qn^dOG_>1TB`gkllx^wpdlVO>=zsfsDmHT%Wc*-T@xVETMkYk^6`bcH0px8 zRO*7WQADki()S3|+wbxjy6}EfZZ_gr$(d|msZ!V4RFO-XHA@bJO-C-vaDT8}fS7^+ zh^6$Xq#EDpm*h9fFh`Iv!ZX7#%F<<#Yf*NPqth~UQFp-ZkC%uBhuay|wTrt-y$dLf zGhkD6;WHxXSjJIm6Ark5>8+Sm*kApUnO_hfzo{kD{>*r8Z|4m6un5d{%J5oo0U0I} zDxRh1z*3n=Et1?gDmNR9iIFdjIe8+uCYGF0y&BXoF&R`Dz>1l}sr$uBi9sUjI);F< zsH}a0VM)wjT82>s67062bpbJK+x7#x5%T*NCeEJxt|L^-UoorAZW>#*)hx<)0wZU8 z@#<3*XU}e#-Pp~(V*vPgpL&??V)Hw5?)hb)`DMC!wMsxIb341W)b;4LOj+F2qTV`u zclL4ybE)y7xfB+IMR!W?pe>{5$}4zT8K*RHa?_}8#`q8MW;n5%9vWOe9c<-ekflIJ z^N=E$U)+u!sm>%{+)iX{5*;vB=MhTEl$#16Rf^e_r`ZOk4%-2^?qL`&=Z3r^F`aoq z{NF*A(mZ8xy8Jw!g}?Iy2?SLF{US4Lu_}N`=f^h>PlG!BwytN$clohniF=krPC5_K z_aR5f1DF`=HTjS^I=gMk1ljk~_vjV6N@44e=0-_Vt zIoiXjZDsRQz*2_PE0SVXI~8K4uN&wtnE<8*vBT@7*%-_HLdtXYpy0788#Hv|zy@Le zSUSw7&dt5z1H!!qAzgG+Cfp0!#6M4VIFDy1zFRwt5@Qh1IfIlQn}X1YK%^trc(=6iBk{|1q>}~js;en<3_E^3yGs=~<~rsI zV&IVi67ETksGbN`iSJ=q7J88zCRxUL5~O1X(H$LY0paT&;B7xzCtT86rQP~XZRms6 zc~teXoeFz@FsRCZDgA{WiLMxAR$59@2l_f~wfM-<(6XX6-Vg*JWZ}pc3@KT62)1fW z9j9M+YY0%k-@-qe_a!XH1gjYNQtMnkPsW8{pBYLHkZ1$`KGx{S(J~CtYsEdQj{CD3 zfrWBwI%Kp*S+zPDOy>A)6X8@{<`Agq3AGp+3i@s zatL=4nX!23cL<`)1$$3mrbB#HIO||~A6=LJzU@f?LN77xv}jGn zow{!VH0z@Y^ssQ?lk+lSE-Kg$mkWUC0jN7hb@GRqA6r%CFzAms$>gl0)nHnsJe_b+ zn{^f*8Ko+FJ*6Ck&6H?E;BCI2MIQ02r6J^+h>pXZ;A7JkqfYHWs7C#SfjY2WorWNt zG<4T|;|w6n$QL<_Z!%Gd8V>??)Tov?p3JF~UuvAJrl^0iymnr5a4qRw$MR6`X5&ok zK)&?psr@pH8nA{Oi+Xlm&T5kM^2Qt27jCsLbuZO{XC{{}WNcWTbUkl*clBY-VLo{XwA%m)6clg^av89N1{C?T2i+y>!Ut^&dr;2(8?{~j zBL+r4)Sj9s-K;gpLBv-3KFpnk-3B7q$rcM8j9!z-ZzTrSWq)GLvY2Cv-{pFA{v*Jx zF>vlFk>2f;Z59{rr1JP$3h_^lE|))wm=rZN zOR505^G|o0wYOyGt>|t|pdiqF;9&zA@LQrG&4(i@2<~ZRcqVGN(iCaIt zhc-q)xn*^$Y<7_?xxKM%l`Uw^Y4;2CEGWNdfLT?^^kVsaToGNs4cXSiQQ|el8MpyT zrQQz$7aSknfA?&5M0*;$2eNbye|z`slx?E+#&UUgx;Mb^vbG&l3-g*u-H>yBQsb`Q z%V;|4FA1FNhvc>Ivhbj3tENrgM@{yIj}DyDi>4$AHMl>N8GtpfSC;5IWc~L&9$Sr zT!b<{QXpi<+VOkA_0WtCmgq>-3DOs$vm#NMsEPV|p+r&TG@2TnV^!;o!{+t-;=EPc z;rA3lzd$5=4lbFZzN6|?&0u8cr*SZ$tfg{^Zo}&*!5)1}quWD%nNly_kR%8UI3V~_ zbK6lb`1p0fw|kQfehQc=L_J@O$2m-Og91v)Rmp)D$C=*?tK{EXgi0Yu-7;PuGIH_JKuD$V;@ zi?}h9tQgK(>w8cWxNzBL=nn5Uzz-{LI!@JP#=D24jT;g9T8GhqWhIAU=CmM_Q#=eb^$up*EcglVU-5Py#8!1esZ6C?S$Ijhq=% zBXp9gYZ4%PDUCp5_0z=_Zy7%S*{9YED5ECuY0?!a;|7T1;EMxKY1Bc+D4;|^nIeHW zh#lc0-4ct5m4%d^)3wv61d#HkIhEU8kT_(|(MTQrge zXkxdz`HotD<8u{HnGThr0Ad;%{RKZslbI-BT0z7Tgceo@)p$F9ilw@lN<1zUest;^ zcbEGQu4zTh(>Q;7GFWdpiw)-fFy$I_=3>?l*b|Vr0;e?LkUm;W5it~kQWz#Rbzc^? zqh1ifW`Z>AvMZ{6c1l*kWv}hJ&SGwU|4k-{BImMgWT%Zkmjt>mrNBI@mT zz$x!}zO^~(n$3PP^A5(8Bt7VwH0MD9Sn1eV`qP#daG zt|PW0qE&Do+O#9{gij?510sbUQ5LkE-?LL2^{o+0E_6?d($RMZI+ntRez~rVtSu9` z{$MFz%5?et;(a-@0Ur_dvdXm!SWmZO8j(H@D?Qx_p91e_5y2^mOp;Zhwc{$svO}xz zXRldbHKKD>kYl>xVE^eWGwE*taTj$9agbWhyq`zE$%JchQ*qm{BQ2C><8fVPkD9K= zCag=e&zkzRn;x|L!IgQ4O3`lqhK3CY)M9<@c#q-<{*-QgG^41Z%>RX{>WiHURn6AeD&r_8s}^8&QeVN7Yes1e9iqBKp5_ZcAj;9Dy_apN81a;_Ag8DeH$*CJINbNfiBFosC(07PM5-!tgklg%HjN)WyVUM+1*c(`0 zXz8$d@gY>jMXb&cZROPoFVkaS-B{JWi|7iJ?w8KKrL`8;Y{kF1@B>6_l)XCvYCcA9 zl4G&DTZo~DeTEr-T4>UNrDYPv)VbK14GO75#exZ0%H1L!*uz z1{_$9_=NLYF78gj!rpEH&mwdjl9#~uz7y^E*q^zj0~H0ypZ8m(UdW*3&)FH%FM+`Cxkkn z`R!L%f&}huK&{UE&NI!CI9g3bnn4%|mU}f{4U!MHRpSp_AT#8A9%#%hj4d1)i-PTA zn>m|Rh!;7Piq%s)`=**OlY8GPwBwRX?5tD4UsIysYA(sHPV^m1y$aVr1}z1>nRc*o zqQ!(%uc1XHb-T(90}P8iV~#jlM6^mtYKcpzEj018KU!pLYEJYY8m>upNNy_Znx5lN zT%UyRq@Sqo056!8Q@eu4_%B7362&pW;Xoq@=(6R_yB$v28Vc6Q`_OHzMRJ*YK5pMi1ESWt*&69FA6)09_ zYbU9TMy{RwPv1fj-YcfB5Uk<<$`d7Yv?A+7jGZ{YS!~nd;k}0e5@*0$ zqWP36x)mvH7u3rCS7B!X6vhAdaT)|cx}*g`sXMw62|>CJBt6Oljygb)?xPWqkQ4+2 z>5}GXq@+vgXbI^!`Z<69C;q?ZH_tpfJF~m*{d~VWv%51Jv-^4}9_l`M5;<)=$Y6nf zh`xciUXAv!;uZR~;Ho1p=k;o9%yh;gel6a!eptfPul3&1 zM@M`X{CE|q&YU_4Y>TBeUSD>M28NGBg=L^r{Mq$xw@Ps3I(G12{P$1v>UhC@EFT9! zT-^n!vn#;P6ka;D1AE3v-)5?&&y+z?^d;2Tw|W6us-i^OkE`iCaW8E5Z7>(Cgoj3k ze$OoK6iB+KcU#1e+4ks#9Jw6U>h^-TF*_;AxDG0aP6-x`wD0Oau~)Y7rMCp=KgUO= zY4N{ZQoz*s?(vee;QL1@LnIy!PSJb2NP>GSBX~>%d93Ll4ugFa+e#Ysiu<1K_c5~S z?1hJ~lc#Y~Q75YG24lnrh%oOGz~Y`9lpRD!`Wrsud^mf4lp-Wk>M}ShbmX$63vCb* zm%NSSo`UAg#d(}9bQo-1?vhhu^a>;#dKo2;eXxyWRgdNyOc*`LD`BrpFhPW^i2_4D zYaMZ)(nK|E;75@xW;~iVxZk)%L5+Oh?g*Xc%$Qu<*&=pAWSUxJ`0aQ&E%oo~C7V8P z>dK^dLzLvj;Hh^IIZRkidVAQnlZWYDlp^a&hW0(&#rdGn_$_#O*i&UR`6zOHS zRhz;~Y94xiC|qEQzh2m_Gz~N+^ZmZLAI?Pn)ywi@!&A0bc!Sd$6et|P=RUFBQT{99 zasK0n)A<6~4h&-9aGEwUAQ9wUoH1_LZ2pQ;;if~tnB&`INB(h@m#W=5!Mbz0FBwyl zD(YWXv{ptLTNkl5tJhII(yJ}|66;X<+5NzlVf4i@_hn*Wf?U2g@f9(T9RvWt)?dz)ltbYn;s@8HRMo7(Qfvv?}Rhk`Ras)P4$YD zBZ3s~w*74Vsh$`GP1sY>bx+B4<3KC@F^pq1TBllqlA=&)d`+we$qcy1Kj3W-%cob5 zIdu}+A@K2p;-g>ell=27ZdZa7QyC&|vyz*m+dVM`=0+dxnK&aGW8&1u)ov5su-&?5 z8IS7J`t+^f>nO@*O5Yxy>Q1R2&N$?ZOZ~Z)I;T zD}4vKW1VjbMqf=JrndHV($EFlpVo`EwU-r`X9kaFRX4;zVr6fYNxJl93(O@9cX#W+ zhQ<|>cVur->RrnfZGhj*mN@eKwZ%Rvzd68C=0zNgxGAAZMaPp$sT_citR9!tFi%kl z;0o`8Q=YaZHWvTy=ETuKNjh9idU%L^7WYh!58S8%IW-w7N^mfZJx~72)R%pssZln! z`<1A&y>xhX#RltX8rj#dN%)(^?njoIhI>Bo`In4}=CT(WXXeGSh`osO+__+b;RV!; z{4`HXeA&5dZ8o*`aXFW9-OcSm)p54(?-*a%WfKT_y%un4P&M+G+5$5N8JZbVJZ6{X z@QR&R7nDl}WukK?0))!aps}xFLamWhU2QGPR&@Ieb?63@MEx)xzSrY&ME2 zu}$4FUTC96kZ`O@gOS+CIX1S6DZzKmD2`y3)gbe4VP~9`Qlfqyw^Y*n5n=r z$HEKn_tH9VU*WNDI%tzWh=N0!`JDt8NX-=R$>l4G;u{KlW6lIY}-*$Lv zQA)nFBb`6gs&!aiTD*?$Dp6n&w}p=D2dB&T1A4RVE#(>;q8zzQ*H*MGX_T5W29{&> z!5Qse!bRT&lCV-pGhDdHq!3`CC>>v8`rLhZefa<{Xm~`LT_biSMj6r&L@Soq30Ji* zWHplJ`EAD4Xu=yfxTh#qMX+@Q(FpE?Xtl=)*XKu!BMEeOX+b5%FUBi!4P4`mrEBoO zm6Q4`WP5W*Ao&Tj_AguVWIXBzIVgjO@_`J$ z?Dn~#w{A%<=9Su$YG_R?GdOD#nP3=^8lp`)Qnt@Yx4;N6hv$f^8Qa~~e|ioHfvYH~ zVN`Jqlb1)rl{Llno4>hgD$Gbp%!rZs&db<8u6(o4JuGv_Gkq$;vYXMHQqru3TJe6`LP@>={6U8YwY!=F_fc@bH7VnKB zZ-bz8JdPqd?Xgl7C`8s2x!v4b*&jbJCqKDN=9UZ~#o^h0LAHtYI?`G6(mrg`$_MTX z&37v1UmduxVm^Mhv+nj4d>?)&br8r9AeA_ZYI$AAFme<0vb*%&^Rs7_HxDnCc4Xu} zs=epfHPqUo+{fWry?294r~($1HJ$O_F*hTtAi>3x+n%=|ov;<1iBV~4r!Nqpa#QiN zIzYSPf51<{ipv&C0U@59ok(}bUyWZkb2&NPP3xgQiBH3AE;lx&;Cox?y?aZ}7lRMW zcfIL_8pw!3)TaZ-ncHB3#AWls@QQR)5 zS;XRsoO(vTf`l>{Dngq`7{8xb*H72*;#OJYefT?UtyIzGlt7L| zvW*}UXAy=0R}qYNvVsAt^Gt0~gt`;{+0r2w_hen=%Sp*hDp#LdoPMqGhSVAa^}1Uc z7^Ga+-CV`JTc4=(AoR3V4t&7zi%6gm$lM-M#cU z8VJSR2o1FenSwGccUG|8uht&k*#rAYU$dIKv#C5F?ROWV<{0EwoTa|7$Mz=YDbKZY%FXXpHmnZY?gU1S2 zD*X~qeLIe;2!GPWJ%v8I2I5ejnV@pd9?3F%dJ;EDSHPEE$9zGwjbWkMY|+_0Y|EmF z^;{%V?f$uebn{8>+-HLpw|&dZh-7VU|Ha9ggJ}0hYZIlq8=2!ZZ?4K(1U=BHSN6dbvi$;)C}a8@yg!=u?MF+j5pRIUS>BuP<^q0huj3WFcG*GDeaGt z;K&fJVZqc+XQX9lP_)7*s7?BU9X~O_A{D96Ae&gKqB8*HOvo_E_Pt%FgC=qR<)-+b zYyvUBU-)Mlw^GXSK2#B)LNo+U^e3GEL}0uz&*LoC2EzM>ClHtwz|dy=+${Sw)z!taS5fuu@#84{jXLKWN!^SUxF8ZOz5rJd4eYLgN~FvpR?33#aOg6S zP?_LyE6|Wq$ei7iFYQbPB-OeZ5!v`}7UYO2N}+6+S0Jwo-e>NsDqpY8YpR4vQ)F!MEC)>y>|X9pH>bjNIet@LLC zARn#WjW(m0k72YXh7Ti78x7;LxII4gDHdc3t+3_&{`2<_+8=pdDU@Yle@h`RW~AX< zDdk~<$_2@NQY>7@{YrHWM}?%0xv2agp`{~Vv1>dyp!`)&^U`Z)6vP#O!%W6IMie{n z=oF>VYFi*M?iLG+RZ*JBBDjXx*Rs6yj0$b^tI7rLgi!0waq}aNdY4+R$?PZ@5Mibb>}_0#kE zh%42i6)mw#B66QP>5r!7CROqR-9!8pP;Pfy%QmMx_E89q`)}&qBfiZec;+d*hmQS_ zHzW4l_|o`MvXSc+tsxmxV~aUQ1jJV{KTUp93@k-frFGlF<*1fT7HQ<%ilhy_+SG)r z9UX%aTCouv(Glxiu@hh3Eq!{$!@ivn7q!quF=&ZV z)Oz@d(@8fdvMs!}_0i1ip1C~jAOOb;R~lwEutbZ?X|}%i=Bp=NtP4lw;FHX}$Bv$m zuAwMVCPS}fc*=0g7UgCqjp-5Uk^S`UcRp>u7Te2{p$18&L8hkLXfog4Jh++f-nuN} zdMrI7r3`L*Dm68tVm9%D1g5KAa1Z@DHcHk?!oZCCp_;~IUK-T#OP>;Zl2O+hMRk!6 z(V`4@L_<+}lGxpy*DEswhSL?uBP%b3wM5amInm0%On9L8=$PN&X}-R3_j?eIp;=3mobR+JWB7{7YAl~@NqaEx@l!Ty{ql;M6>2((nn`u!wX z_q<%fq3e+~1^rLZdCQMCIheN@jH#jR3FZhBzQYgEfJ{ev1BowI+I>_757nnX$n5$x z!?LXeFXS=rkSa+|s3ZVe<42I&eShoVE@MAU#+)o2LDm3-8ntQ+}!o>7myWG#`ISOn(_ z-X?Ax{y9Ez>}#QVHWXMZ7SD;W3Rzw$VulPo(EBxlRe7pFhEcjKTZ^okF7(9yI=))m z=M6Xr!^rG^yK7|Mh3FA=P=~2XdF;4(^-EYo5-}KQLu5NY<8a;*unAiX^)Z z7=U1UST~(~NIaLt8j+GkA3Owx8_W7NA8BN3>dyA*!S@om?XoN#=fr~XVhu0;WahxW za1G*?;*fO8E`BU?Ps~#yS)`PIIYqh8mNj-11z=8BV<^8txKGO>Dm0>Kp0=R>nN>HF z-s+h`>IZfx$UQ(@ZD2snM&5`du@Kb7;dQId-&)%Yme;5JR@ zLE_Ptyu9??4*7;^W9VKTL*1JjDVW|&IWc4G4j*DAA*atc3*ox`Yne(m{oQ)`ri9>> zuxgp%_AxP`dG-03xM~&xb~kr2@x5v?swXpzfW7!}S-QaT*b(HiW?-0T5b_cIK|W=} zQ2ylSa>z&TrH_aeH>0$xFnunM6=tCWnN&JzC|kE^5%Laegee5qs`g`%f=}9$-vD`K zWokGmC_o{d+csbP^G}E!RIse;O3XM~ATW9{lE&358b2E4K~zsrAAu1Ng^TI)h~XDx zISm210KKL+s&D_8a%<9$L7qW`e{>n;^1wI}5(<|5Aw64j zzr7X|&dP29yDsr2a?9f|_XsxUoAoTkUOdPSgd`rsv0o#UQL@n|cXi6Q*@_zNck#rJ z<&~+59t&G>-GcL|?@G;HD7}wI(L`!HbHsHbf-=MW*`zYnI_{V>I{oK|+7Q7*J^p3c z5!|{E?25rdAd6#0^hS2&)@X6cpsfE<$sN8UQH=aQfh)j&w=n40x}k}sT#D*X&=^>l zla;%rHH?MJ0c@$M%L1~7x!O88qAd=5Ko%3O=kB&pD;5Ev=VF2;Pgu0w!7w*37PPwI zFAs#L+Y>^5d6+dAO?YMDdIJMHL*CkYu?X-90r-IEzW-Wu?3|H?$sqKJQFLV{?L%HP@w07V4Q zz}>%PKs4C(ADO72*gyLdL9sX)eU{x{B__2#YF(3gd7}-no5NK E0S+$bod5s; literal 0 HcmV?d00001 diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EMComposition only.pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - EM.pdf similarity index 100% rename from Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EMComposition only.pdf rename to Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - EM.pdf diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PNL (basic).pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PNL (basic).pdf deleted file mode 100644 index 8749d22ff57c602b7e3349261caf8fee63103afa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 31880 zcmbrlb95z7*TANSN*-KW02 zySr*vcdcD#eKx6rh!`y+9V--R!+GT^6f*$>fvuqh6b}ypy$ry{)X9v1<+r2+MLMnvz$s%fKFebF~EzufPO zx*Sav=l!ndED&!BRy!y?tp0_dFOQy+Fp?u}Nf*uu7=ATax^S#}sWHqdAFL^HpYJ!j zPXqi1*F(PWbkU5fzs3n8V{&75>G>lsgWdOhQ=Bhv@QWwasD-V^2(V}vb)Fc zR|9S0x5P3S@|VHf-2iMO>w-`}FndzAL?&!@$t0CunIhNdDwk8PmaOf_6{WqS1BfrX}nID(iv?E2PJN^^HOmI zSB@8Bj^&-|rtrI?rMLSJ6&Kh*2j5}IyVYx5TWppdU^g@IvZB4G$`a%k?e3Q$)*NPe zXMZV;Qdvgv!J@_0lpOmeWbhHNi&F&RX`hlXbaT0V3wg_!?c3akaz;haY~TlpNc@RY zIS&`$YDeVu!*IC(Dw5rXNrOd9&37uVkJ2bb)cZ$iR2Bg=p_{iiSok3W!KJm0 zcI5Z{m~p0v1&{lgue}#->@lAYFHa9f=2FP}ELt9HpaGAV{ee7vlSol~=&+qJ0hlJt z$LZxolL0B;S_6Kl`I3ev-_jF|GxL-zK^Y`d;ZZX1<$gzvs6r5X3LT*eip*y{1nuQg zkZ+epjj4n>G8Xi!?g~b5!7d$L>9vN)gM3B!&>OkcZSL~%_PRe8-;Ta2!60dn7!+0# zVL*?u8?8$hlySKkCITzy>lHT|MY2H)U9L{FRE*LVff=u)O^4q|n?Ed$ zHjXD-rS5YIPQj;>gmcEaO%8aQfK{aFJN1uq~2k<&jS* zm>R?)nJJ*eS2vz`j>E_?QA%Hl86(e2b8E|8bpvw^H1BZ+o3VVS#-u4KVU#i&{8mFv z>`b+G4q?v@W8~#Ky@tm>-`-+1e0hC5J@>^&5=4z0x2QBPks-bZcxo9&M@=OdX-4X& z>(gR(Q6l`*fS4WP4XrEhJ`Ox1EF-Q#c=}0k!SKB|{JU_mESgsUoKuAhhPq}%$%k^# z0kj|D!JKD4SlDB1e_HWIpTmSxA(U_e>KWaS6@_BF^}w={$9mUcv$}%0Ov4)Ms-i2; zN=HwR=aX;}@o6@7a20`-Uu}ea{?@3`K#BfRD7Awb19&i8ncwLInqnYO@W3Fk{8T+g zo*haNJHQ|*X6*)_(6ZokvDqScGSidgCL#Gv9fX(lb3&#>4vhmf#HHR7@gP_+d>~5< zm-4}Kvl>kE?&E_1=q%CiUr3>$_&uBUzH=$S`^2FP6-xCtXYVZO?xx6~MNr*QL%Ys+ z7>k_gKU12&JA$~u@v!ngf0grp{T`GM*xD18Gh@&xSEw!nY znO>5NNdpsCRE^-cwq)*$##MW~MZ9?_<*9$z>t&Us1XG0QpBdn=IR9+0dWbc1gNJjy z-RqiBPTim^nkHURJjwL;e#&MlF0uC{eM$9Z!rmIPtZOEY{t!QS{1qK}SW@FN4P>KO z2Ui_^2Y);soTO`|6tgPt3o1PTXh!AlOidz^?6fS~Fy5+<=)a_^d|*(MQ(8{D)`CP_ z7KHeG8{cIc%|6x1gPX-?Tss$Ec9wbcf#XI-4`Y7xcRbejbR(~dI zR|q_II&{&r`mN{fgMk2jbds1RHLqAt!CV*2_E=Qu3nu9IRtV7D0?L|$0;~Y8>|b?= zrh(sGZ_$1b=2MErXcSM1R-UH5G2O@BMywrN!S$eJKA}v5O3t^=aKk{-7FE*`y33yE zq|LXH&Txtq#q;Q%Ps$a1-++CyB6N4ocBmDldIw7{C2>W%5V~2h4BVqdEBQTTX4p-c zXYIXbZu_A2Ia{)3L>LF;JGzT08geC@#$KO0O?qZ29QHRV*|B=xqM1FHY0qnqi`NrD zw_!38-c?9buzF05%EO0#eToE0W?@Q z6#k~6)w~*QU}_fIcx8>Ud(szl5GjTzN+pH(oF1+u!Z#b#NSRBxKg(HDp`w%*N>=qj z)#Jj5|BiA*F)UJ1u2Tw_nO*UCf~vbXqWdYOx(zxEV^60bZiV^BwHlWzpTq8ys7MQ^KtyW@?mp5sCW*N%(&&A) zj-JtvbDdZX82EN$N#Gq21=|Iv;orQeqOif)>ZMt@Vz}a4Sc|Q{Zbn{piyJ`nnNZ_z zn`=7?$eUH4#AEkT_D_e_xiFWTFZ>Fh6nuLN$eK3B4~NTYhyqbv5d z^NhOdSr4A!qoPH-K*67`sa}Nq(^Dney@ z8EdU>=%NdKffJf!UwmjnR~1^1ls#zWd9O5zVMKU@o?+ zM+af*b}85@4r%X|!2u_QVeI)VjI{%;3|H>SeM}*fVS-ibUIqy19^(dAg?xTNZjNe% z0@xV;I~M=_>rWK_Cv^X_{~f$Dak8-fG5Awu`cIJmH>?+Ra}rl}`W?~}@bLT*et+l< z7=Bj>=!FFd7zyZ&41RBa2l~Gc{A)`uW^3d0->gk&8E6>@SpQil{JDS$E#q&A?SD#4 zza{qnDKY<+IR24-U;X#@->u~Qt^TtVQE|5e5YQ_am;(N`Si!&n@Vfzw|C~oJ3ote} z5VCb6(E2Sg5HK<_u@kT}auDeJH5dNf+TZq$1bqg{kIjr<-e_WtU zK(FFpVB=`_=VnIk|HzUAP7cn0oAIZc!oRIV050Z603~suKPLZnkP^Vr*4e=b;7IVt z*-HPdTa14i_)ow7y~JYtub%v0Ypg#v`j;gm0V695Gu!`Ib*+1Pda2AeT&=vOa~{eZ zIGZq~rW(hmGpCLl$4fFLkf$=lLrS`Vk_vneM3RJ%ABYbW@A2gItI?TRMhZG|^kwpZ82>+8j(>*velq^9L%!-7wG$D*#~ zWgid-kWw}}hXQxi>5<1NJ^TksH!q05p=+g9UUcv#FLbOmIKrmpO2b*}CQp12tJg5H zR8qxS^&NlsVy8bXPCwV+zBCt_FTz9zayA3Hd*WD}!K;v-|30Ytq>j(}&--7GE%eoE z5&Ku8-!gE4TwM(~qxWx4xfWa@;i2U9pJ9^bD>F%T;^2Kvzf$acp@9{UB>PUM49=m6 zSb0{oNBKgHAdo)=^F10rw~K3BFnpLv93o%Z{{r)e@|FbO(MZFdg&pXARUUe-+Q?z9 znr^5N?7{wRs8lar+0cow2$MTUl|$d55!Vx%iVudkP&Uwa{_-?9Kt7Wya+!M4bKZDX zFuo%$3_mKL4&j9CB~oku#Egk|)im|R50{hsmT>N|gk9w|!5Fph5vB}c_i+=^%cTCC ze!KBLj@%F>QM-uL+BBr9Wy2Aw0Ph|{iuee#UkLBsmtmqCx zF886`i6F|ff;JFIi`1(@qN*i=_l&bwAb7C>rXMr_@GLW!%!Svh`kfB5{a8#8}l z_aiY;l?m^#ghJQ=5Y)gkb$PGqrqtmxcM~6wuP>|HvUQs}(Wv1lrk4!-4Zl(sFH-?q z{q@{*+fKpRWRET6nMGPxQ^Q<8EL!(zNvqw2Wjgg3HgaKd%CR&DGEEHrCEcMs;k1*H zdkg!0wn5e$ZF<2;YKM1yx79Kyy?)@Q@{{BM3okvf03bPCZhll9;JNQnGB%&s*{Ppw zspheUD$kA*w1C-&!J!7Q7gGkP?ZbUQemIfZ?%A-ys5vO-TMeAQSVqg*bMtOEu)`p( z6F4I~q{HM6unnF_;ZZoXYI6!@$1;OE+>sO;V=tE)TSCXuR)Y>t)@Y_#wOP#>rWxev zCs?OhLt@+RE=w(235PLo6J@uje5BdarcV@h}}Ql+_Y1pPG~ z(9@VnZND$n(|CcWP2HGnnr)JA5=i^=?YRX}yUdZ;D(8f)e{?>ZLb_8x*GE?nj~wH@ zg)d@=E3-J<3N?UIRR4x3{@bnF^dY2Z{}wa)7dApwdHf$b1F~r(+EG=-z@HWlP)|z zR#uzyR7)~MWslVW%W7F0Y%h6g7g=piTNlYi*6QC9f1VsTEk?6lX0$xalXtqgI6c{P z#aQWR?q0u+c`rt}$Q1K^j&;F)?66ZGwA{?L1DYRHv|jI;~>+G}}%`3CdP+p=j!bjxas_R|?o=Jchf8&VM+ zqr}NdQGX@RDwL&9+uU;`Hn$|EQU_cy@Knx(C72~HmoJw)WM)Mpgsri7ydRn7;m2&; zJuv%nzw}u*>9IeJv#k_WX&sR7(-BOC*7wLciho1F}b5}<|nk_Lqvn0HDgsvs7BOj78;E%tFO z2H&x6^DP~;l6vv4Vf(<~L0-~EC3o(4{bJRAX}pzP9GO8pEUJ{Bl~4A8{j~d_JxdcR z8WHcTk(%~orjxVXwLDqYEL6)FLqg3;f|#q&qg7x-jaA$W`lqtQFipXEPT%hqGp2IadyeJcXI-v0y@z3UJ}X*J zTtszYd%IrRPM({YHmYWE^qo321$-5)w;x}4BRr=`qHj(~ghXN{732ni%r@av!dNs= zNbOL!X5$i{->EA#Cw{zSoOn_}FUv8(JN$sr2;0u{paD=e5?)L(a?4x8tKc)ZvdnJc}j!5@O8f1zTCn@4J{1NVC1FHeP>}_M>>Nzg3!Qt^e=x_|MgC+IhyB;#zr;97j9zK z%j$Ip7@@H3o{@wx;c+^0u!yl$VeIFqR=!SlCkhrEB%_=SgrL|w0$L7nc5w^14dQTR zk=)?H0vWl~Gza`ov!0Du4jeA*{8FgWL$@6k45T6>SViYxkA_wjK!Ha`EBHfkOvF$> zIuwi#CF_*B<(OQB{Me}DoQa9)Jhl2nH+rFYT_H+*uTT&(h5i-BJYg-3iL|Bb9TyxY z$3V@PAa2$^J0!M z>ijc|9r}6nQeCoXaS!L*smljcZcAK@r#Bbw<=PJKHht?1f<>JHAhP|M#@8HcqQSrF z288OF=Vq<~o<{L$(wqjGrugN_^q!Xjl$nfe7n?45SU6`-&VAzA6*o~+Yu;Sx-zB-5 zWW+x?(;@?V$Ti#7UyxA|u>#EYUp&I*8~udOp+fw>TFP zL#qZKa`kSPT}ZJv69CuNzpj>7i5R&tpy}0?CAodUX?e|`$n^$l8hIBN&}ild!-)Z4 z_|i?ZP7MW3!fo%)a>Lwxvc~T{5s&wWTTE8M`6e?>>(NU7yf}FgiPv)b8 zCZ8+ujY9)xp>uVaPG$fuQYYmJuR-Z?u5Iw;*tWNZJrk zf*&qTwPty|(*Xwi7@JuBK;%=x`y2uG%X3WHA+&oKjLYQ8%=S9QjgLwnn?Z-B^XsOB@Zc?teL@)2!oU^Jk1zI47&Q+9&$e0o{CiT^MfuGI?w_o z>aYSND}^LKYC_Qxma-@DOaR2(J?5sgdpsq$O_6d#>LP}?`c;}ZLcXUy4#|#7?h7NF z%Y$X9DwAgATuhzm9N;n$)M@D?2UZS^+->P?>7I&;EC;3$vknqIjj@Lfhp86Ib?5gS zB#IJW;w_n?S&il@Em3#JTrnjR;_9*6BHJQ8qfDc?cE1~K89Y=3WR$0j7<3nhSr8Jr z-;u2cihi955J#C9*hOqnc~xONcp8bB9V3rfm_sp5K~)p$mMU*u?W)MoN=!^kL(J$K z@Zm~*c66!w8SE+ty}{`S86`Vsx{DhS+#Kr=v;T1xLJPkdt(jSmnQygY7caXOGrPcT zv-^Sr@9=1hvzNJZCpI}gTTZD>T?8#z%c}O;YA5pA`u(-)y7=WgUF=94p~p!mD@Jk{ zPyamLviXWpZXqEZ3iR^*9RY%n)P6unxk$}|dI>A>0lAO2iZfz_w7p_78mqRlw-LJf z!@B++ReQ^wlOCr-CHCWS(1oM*AzwLW%hQB_k77ig#ZN7U@J^Vnp35sp8*fC6Sp>=B zR-$&epW$?3%Cywosn-Oo6oM2EU<6Q4d{LQH}<} zuD{KI0gjKBi^Sk@Pr0QmeB2KwzoxBb8gy$}FNfu=ihbHynB9%(tiPU%KYZ>!Jts@v zDl6lZT-e=c$=mMm@<8iCe{;g~_-um?3sNuk1BS-`?xkY~mMVC^a6-sL=~7!;$`s&u z3#|h^i*7>W1kIH&2?N?U|Ay6vHH&r2C9UR?JcD`yWt(8bYQ<>HxM{VFv^I4~pI}|c zq})0db=;0`4dF)>cx(`i-rZS0ddH@srrV_xtX{k^)bQn_RX~kQz>TrpC z3B8WLfsY0E*18q6Wp}@9@%l|E5nEO9kc(j>ONs<7cQ=YAlC8MSafc7pJNY5Coc#Nq|v z1!dJqE|Dgl5}a|kQIA|xsL@Qr8<}hLfiS~L4z?B|Rj;qmI4ETU+dJEaoloXWkVM8v z_JTLelGtKF)Z!kKQThr;Ezm4l&;DG>oCF66<|n`QOd*l}%)S?$An z!2@&OHx-KM1H2--BKHc3koweRD4CM{dekw-6QD;!2h#Pn>fFZ!pdbMtgM8ZNq1r4; z^@@=qjNU~CCFZGXn0wbBy>uMY8SMiYuyoRa@LEIwh!xae^^UWpkRraKzSJm_3LQX7 zScI;n2HU(169%rs`6HMj2i*?$gTUHBS?Q`a%LCw|yH+eGva0p#gom`l zWIEdMkBOhS6RvNaL^_b9)|(k_@@x5NaENr=n99*7lHWQvZ2*s`5wt?LW`%rb2e51w zu>>fk2J8V?&%Kgf!IBc(49B1eWs96m~?3m%HkH_L3n0|(_JTRJ2 z-J|B1zo>SKeirLqK?{~lBQSs|0b@R2_zUPt79fmJz{{hZ42boixYkV*HA+-a(hep? zCfPZTKpE6D&s6gmwY;v>I=^ulWHi$OI<|)f@lDHJZAZ1MDq+`qFJoVrnt~gHn-bRu zH*A}k@8uTXF&|qlq1JF)soV8Dv_G4-uOr_thM4fDJXlyL;H2aWVE{OApKNbYcTwA3 z3+yNL`#GfB`=qOPUWuN72de!L(KjUfuzKw&){`P?<*!kMo%6i(EX?DqVX`w`z(WzS z{@9GOh*ZPlYEu27g1*<;(2dvg7u9?T?CJ^3r;U=iei-=@nNhV>ysT%$&$nlo%*PV_ zxYTo|CQ=^WQe+q7XI79PJHc}>ZQQH~qpsjqVpgQ|tCy1zKU7+Xg~KWZn+QQKv&#%0 zs!2-40tknqRxTpP$F01jpk7~o988LcnNUc_J1jMo+ZBr-h?0%KBHFkG130Sml8pXRJx|cM@nfTX z*y=QCTfl?hkYQbhDKQWUA2TVI1S?YIafoMGEBBXZ2l;$+NAFUsqm@;Eq-f)dfB#RKCI=cW#GJ=VIZ%r ztV_}+aV|e>u@Z`Mc^o7^^!Pzu-O_jje8gOgR=AUNyYIMf+CCc(KKFiE^46?gxWdDq zZNA0Jk7BUCCDTH8k zo$#t=W`vQ0Akmd=_bUM3298fV3ZYDSMJSIA@!TQT53t-S=y%64r=#%~9}&BObj!wV zyxsTG+tI{;7vGA>M|4!{1zX2{A+AQUgfZywv(9mWw&AQKd1hs_gMcg>aVR~71~ z5(_@^MW!n5bFRbB$TG0Oj`wb77{8|#d>ez3kaSKmgF{PhIE0g874o(EQ;s}Vn4F+u87@G*N(o9^XK;7wP49(ekAmK}1@C$TD{_o`zz&6r~Y z5C6`w_1^QnsZ$$VUf!4Gqoy#vJD__rS6Ys=m|Yq#K^%@?F9%o+*^QyN%iIu7W^zZ} zO8>I`pVyiViER|-YGt_Z7HdX)*>b5*=GLdj>3;M9(+%YB<-X#q1VVt1b_ad%!PQ)+-E)z7ObFOp z+;hJna*y$vK$_@5$xf^fUic;HgxLW)t8+)s*fkS&u(GFDAomF4s6SfG3tJw}stpA0w|63Vh!ib8DT6}8Cf&NeN1EUjyh+zkr zG)ekrbO+>?)HSh-g&FTBo7Z8ZJ&Ho|5qlY5lA_^|J>?rxQQ|@z%{qAa3_`3P_KLiO zV;C;zS*BJfW%To$Q@bF%f`B^=?+odEFKxud9%si#$InsTv3I}M;36+V{GGfvZ68cu zWM6;ZDBplj)uJOTr!nU?=P4+g9PJchZQ?a@93(!pNh29sB=j)s0sc_Pk&um(rl$>Y zI!AZHZ4ix$y4%xZ$~APlO6jJc@g7WMGv=oFwv1QTbtQ)+~RJ!Jg&&D$~(yl6|1sx$1byr0{il_DjY#=9?%`=)bH3jf=$BEdX=wes$1>+g<3JU zynZ&MplkR~>sMb63w%jUM|h7|bxCKRv^`j(Vu6qB>0_-h2-l}YFM=`L!`p>}zsJ6> z;`G%ZAiuamvqh~2T z5ng<6GsAnS2fHKdA}`qT_x!b)h3);*-D(}X_4of!@b*Cr(7n@QdchWJ*!cPuep)#w z(Rr{oK@1)6Y-AoIhIg-Oxu5wAB9(p!jM+_2J4G^6u)^O><8i%!Jqlg!x>gx!2aPuCnxkDI#GSfAF#E&6A)KAqHJ?}QSO?P|Yd&E@ zcb*RVguMu(HASb^qF8;{xIhh4Y=V*@gJd}lAe*Pso+yvpFD11DY@5Ft%{fJe(f5Y; zcPNBdvQyhD8*g z?S&X$*N=bTu8QLWvb88X<`em4@@Ic^KRzMsAu+hA8KT%PW;@(B^2t>c8;nu0s@(T5 zNo{noVr2&AYcnB2%4(h(B;L!}YWcaG%i|!cb-w!b^EV}|!Rv#7fWX)`fhKSQHhVe8 za+6-vU)S?-rS@(k2F0ie%*JYDpQX}gMvXXP@kVy$Dx)kQ1iwF6d6SW0*C z6i05+NEB9Aau#a49}gUtk8-I>{i7Rj#!g-_zwp%JSdSetnpY}IN|G%H6&#O03oxZE z2IW=AS~7x|GH)XKu*GJ$cjxt?g*5)OPcA ztV88Bo7$^#I`Gm?+4JYzPm}UdP=$QMztn+^4`ggq`s}0&)WbNlsXQvj%5W6B1vhmP zW?od75{Ei=21gHE7pBs}-rK|!H54<*op5zT>}+LVG=Z?97SFbZK|9+ko{H)8Y=AYE zy$AJqa|au%&*8DqJmv5Ikt%yXyK5-s>P@H_bO#J$KGc+_N^T7MCwA zHi<@EW-Q-$mxOFDs9F)rc_*yAk{Jwf+d*NQmMx)o6f}^^+4R7Dd%!!(h_`L(U+v~N zUo5uoaLsjf-()>+X2A5ePig33j*Ua)PU3*YgtgFfj95gjviH>DK4E@6pJuo0)Ax{( z?b23KS)$iUm|d}Lj08j$UYaIp$D4D?X6$2`>ikse72C@QKR{69{)QtNfxcN1o?NWsauZvAfGN(BWi-822WhOxLs-^~e! z{TurZsiga^mF@2MGp&WN-s{pD8=Ic0)My#q$e;rWWjAEZEnj*`i7rcr#;q0x@TXI! z?_^NZ{w zWpXE$m!}v~cXT(UwM~~8i@gnZ&aCx%+8;Un*XI{c`M4ENNMn+$KiPu9-gRr8O?)G* zGI+c8Qz93yDkf-we@wSNgjKw^P*`#n=K3x)|FFkQKvhxgP6=r<{4Vvg^Hm)tTP@)R zOPJaf4sqITMikF)lJMu=2)a9pyH`B=tlODEW^QH6{wrIyb@>9(xk5J_z0+Y;=qBueJmHB4dnQ-$#rf zh>^X`kgw~u22j9^&<HB1wC9!dBM__Z43D|Rlycu zdpKopID1(Ws-etY6%vM+FU$^Wo8~KgS+lqSbFW;WFa(b}0yP9-AZBDA?iSn@$D@Ha z^bSHVRbU1LkKFd|a95In4ZkaFrY?#NxGQ?5F^UbhYmu0*9}DpDR9uWJ&jle3bE|3j zn{vL)M>oc&4-I$h2D{j$XeahD?}{2^P@89Yd$d;BycVE8?UPU$h0b>x6)SAh8x3Mp)tkPf#+f{{$Vb%oNr7vyNm|FAQRdd!%N{1(Iy5 zv4XAf8fgHghGe+r;=<1sxhKZ2pxis!fewKlAepB)!?2V?EoEC=kcK7z=Jj#w1NPG= zJKh@JzC3qyCZ9_v)?O&5C{57;YMHegM$ zvo)9FhR&F;lRO&5bgO_R%TVlKSaR6bW(Jyc6gg;9&LXXk zHjx#ZBc?knj^%8u-r$s+k!)p=+NK9I8^qQ9P(UDEpbiAj^e7PLyQoz=%3nwMd=Bq~XR}g$47s}OUP_PCiX3;B_+MZ^`UvIH)WhrPd=5YUKB8$^+( zcnX$Bj$ru4kQs536(x^wdR+$q&jXk(igLzuLaV|3`WoSL-?9rc#&Kt!?E8yYKO!05 zJFxTwy=ve>;A}b%4i0u7(qblk`DjFrgr<8SZScTC+{*#|P(tXvgdmw|G3I#eF?@c< z1iZtjgEFM%P#icI{#L4zm5C*Y*4-ixmQQ@JwzGCo!Hz};=7m*7sXnX|PNyu&cqV2* z^u=Tj$-%sg!WGAM73N|UlwAvf6`Vc5WIqcH{0weVgR5rj48|`Qy>}Vk=Z#(>hVHV5 z=FS;)whiu?>@R;7>-2#IkMLq;78>}DXQ${Y$lRjLatx?g6171-z#+4yR~WGO?Tz&% zc|ad!5$Or>Hj04dK11PgvIrpo|I#GM1O7E4K?ft^O8h(|a3{BKegpeU28o*gopamIr1Y&W}vNHXp%@L_zp1?34cZpM#R?m1>Wc#y!y>?I4$c9Ex4P3SJ;*;j{xd$TZgTP+*&l#1#gmQl!?gdkD;JLZDutq#+&< zE1haX{zQR2a#w>_q(4R-Cnu+BgF#{3aH0V&oNv>ujq0sZH~dDuc_B z-R=@8yZGH9*Dmxan(FhwTgsi!HFO+TAs9M@1cgVLLu3T80H2&Qy7dRnH#k%}<;*LK z8E@SmJ4%;fXq5c-lWG@_2JQkaHCN_rFwJnAZgk;V(Bu@edEh@JI|==F)KR!23Ml3` z@JRj7N(q^quf8kAbN2@$4YI-Ee1obM?^44aO}8I12Msm0E?3Ydrb9XmXCAmVOjRc- zHB3sOTp>*(kv18jD3FLJk!TkojXe_%o2?<>W?BI~-=nyKxz*GygqhvLOh5|T*$69g zh^n`L5J75U6*9W*e`?m>jZH~Gt%713EGWdUze9HLq)ys#qR#x0rZp{qd5Xr~riTwK zB-FHaVB)k(+tkn)Klo%v&O)cOt2Z_HDAD8TKgszO9?iC=*%0N0lO!gy1Br2|$PVP$u9 zk=Av<6TQUZbEj#DFa2p1-I>m`-bnJz`ZE6I$^` z>D`JR+0)c@jI92P(_e1&gm{)!w(~yObuMQmmSH1PBJv`~rAG%xlSi3HImI^TE2_q; znIoKW!srL(qs!@a%%<{_IXHfhX!STzqK|uCaeWn{mPD3U=EaLI+7T$9>h=>W^Jibq z^jU>!+qPEpb)>D~oOaBUw?^6_ln~n?01$PEPFL9dUE*%|+f@)eYk^O4fgy-sm?6|V z;aJ+#CmJyof(K68+Q<6Mx%Czh=0h_i8NB?G*p*Twn89k0=Fi}*IA z%{)`mFpZO)HTPn}+*| z9xV)8>?>n9L+3rQ1hIThcwH)B`96j;(7&0#e?=+?8TdaO|4-WJZ^tvSGyli!|M2I3 zQ1X8f)_=fp5rCtSgSnlPt;3&z`pc5b8Cd`3yM-m?RAeQoRm`mcj&cB3C0lC)8wF)q zmH(KD8CaWJxf4+QwIKLyLh#py@?Qp42ByCeZ>IlP3;hcSr)6d1AfRPtU?X5+VEE03 zGyMMbFD_it$-v6oNYKXA3P8Z{_cTFAqd#Cd2Pfm->YtYWsnaqsvi%i=4eTTU=B8%9 z+4J9*`70?q0jyOCIR171pNhmkl|Oy`{b&K0ejEQU2>cH!{|^KHUyk|L^_&fz{?hI$ zzcKSa$Ns8929AKg(Eb01lgu3)orKK{9RBfxjKRO_jEw*4(*GO)0>=N!?iIE9ozQD; zV@g1;W^N;B<7obm@V5bK=EhEDj{oNC|8(d7UtweWlN!MG*D>r&e_qdj3J1p@@c+Lw zR@Oh3e+nndpU3R~F08*tmgRpPXJY@8Yw$ZS;rFq>1OEFVwEbpd{wHsl;O~_1Ke+t= zJ|_Qg`TxmZ_&;->{|A@P&d$N|zqovNFXe9!PdXb(?*Q|7GV_}m7E0^lpvGx1DsKU; zeoacx2$4~xFQ6Jw7t*+*)!Et6kz77LHV7psv%-_T?(epP~CCA=G z!S(oFaD-5!6xcl=RhFa*=h%zPp=(#1ctAyJG3~G>@^Op$}tn88=0^Gqx3(LnROY?a2wAPofmGNNi8w%16uOXgHJhavVUt~b0TyQRO( zP)u$x3oe~b%XMj zbh)HW7@K@a`x(Zowngd3x*b|Ue(zc_QW!UtPsm@4wlKsd>_(yge$04$0 zS)JCWR$HIv;TZPYjwD=p61?zlTf$M+YU^*a@ARZy9pLys=sTPr0vAqx4iGNBnLQKJ zQh!vN9iOtNuy45ZfhtQ$(_^x^90DvKn<}4~Cd!Brc08VSw@$QFkmcV596){?a_YdF zpDS^U-|N4XXX`7;FoJrK4@MI3me8vJ8;Ox<(8Mxr81H@ELyq$Qo_=QX3NL|Vf$`P$ zhs*kIG}BBCwJsz`#Sif4LH-2leJPc1-#8UG9Y7=&iuF~erOrlhDtV9hWw~9_pOPk$ z^T7n*((0doUhn8ES@}^Su(^Rf3u1>?)7lv3ozCzS7Yy7$xYyK+EYjF!Ywf8$p$G9j zr&z^4k?};Z3`@g6K5OkZ71~a4O%A7WFtZ*x^9*6inn)su5^u%pqX?qHFoG+In(GDPwY0<4l}tk?}`T>Q)@Q4~He=hbNl+_vwUa@r~NzNh@$&SAp5 zG2BM$>x~wBq5G-9Rt3F&70eMGD>R6Dya=M=B)swT=^NM{Y}`L`7l8_ZpLB#A(f6n1N+^j`V_buwRZth2#fZQr(YC?;{)Aj z)L@yweAEq0bkco&+8ZO_W_Np8N3vd*(Z=)2lCyAI>~}!r20#n?LN;?HaJDtNI|R-S z8M|Mfzk-`GywI93JG3mv#~MdSc(92|vS9?KBJB#&r)BRc@?d=M*vx*qn3oD_rNwn@ z^H(x>o~gXgu6z8o~07N z7J#`b_Idz97oP;qkuiQiW?oWUV8?WC-e&xj$}H3#ff0r%+lc?T)_s(Ke<3kV&jhfa z{>H?apvCn6ly}x)QEqLcA6i4u@Zq(izJ1u5z720^+(8l*u& zK|l}ac5nA~pYMIYKh9rgE@m#~e%5+c+_4rH&u`t}>b98#9N9VvA8=fivlAz!!D&OQ zL&8o_L+fViR3r0{n&$Klue!Ifd*`^}W#@H+Bo9es*wk9NCMzWTiRo!jmqIWICU2&n&P$ z9i5+)AC6q)IBAW1o1z)O@;r)9Dr-IxRp2|it{G5u-FTeFgtpeMsj2C``I3$o)RF_I zTa2JPv2IWH5F}(JC>2X@=-$0YOODfek;l97VyQea5q3h&&z}U`S-gdJT7Mrs&%mp~ z{&~fmYq(>$O?btK>yVjJ1iDlwR=~sY>7goqD|mGHvXY|lGaA9tnH}6v7a43!Voj>% zfcv^g5?+PdF)uA+>`-F}lL&))h-nw;tz)v$_~#!^8dSK!+tM%GqblhwIz|^H>fk-q zx)A{8I9skJ30*5;3!71@L?fmxntknmqEA5LB9GX4^Mq>*|^60D?yv|K&)V5)N0#(_=y~0h} zHQyj{s@hDMcKZZ=<94W#Ikyz~#v>&CpkQ)~ce|20EJQM&dKSC8BLxmBKjBDyJC}=+ z`Bz-51M#BKvMt-@gB{?+dlQ7_UX*5UDHrPOY@`r;6WqL|E^7Qu>@3nE*6e`{#<(o2 zR@v*|a(vl_h(~hKH4(lKLh;woUC%7*KMfpiyb-#JYgy3i1tW>HFEO_@-@MxpJ7Lm| zI$BPD9n_*TYiD}$G7Pm}Ic-W*1~Zs-Oxz;Aby5>!ReXy6(1?&hBaY}2n_{}dN@ zxxv=k`RT0Nx?g3#kWARjTe}aO;q#}ST1QP;4OVPzjEWm1*Nhgx)2En#O-$dt)$P=a zb*Qn^Fc`M{Oj0<5iR91? zWeZ~@G*oQIdqT@%o){Q^K(WCV+0ZJm+24)~YXk%3ID~aE=5|IlpO2qAc_ddK@dB1U zFmfz(Z^^^?BvA1x4$0>REwlc!grMhs=6H*X@W-TvL6N4*7{z-;m*ITM zH)?W`j$HXJ&ksiE+N^wpt!b8!8ifJZuexTOXVC&k*c&1h%3l(l`=iCRw?iHx%Ug$r z#6RMXx}U*luYv{d9X)bECA`jEcK_{iF?p@F@EgMNXJ$Yy5(x1;TC*4|2~I_aJaBeI zpt#}Nr;_Ox*8K0O!y%PWf_lDX(eHDAfVhQeu;<18Wc+G;fHwU;uiZ@6>&3I)jn4=td?I3Y|;cup}3j>u_3uJbKRb`a}PdCRYf=2_!2_HgSb7;hVJTi zPEo$;@X8ISXM^9vdRY79oKez>$gN%(L$v+K4>F+odafB@pI}!lVVxa;ToPeos+N^y zLoqFWOZ}pA!;Q%s9jlq;o^zRzdEE`&7!2rCl0fgoRyZFZErN9)-L& z;v|kyNe`^jW_$5WB@$k{-ED;F3+oqQp{dU+nHi^?^inU12pUxz#Fp5f67d(hBNz)* zenLzM#S;2f&(P#edH9v9No0uvY2!LIad%`*0zxAgv;h<+v-|f!c~#X;DfPVz1{M$Iy^e9j}QY@c>Cx0SKeNevl*%+QKn=)vBO3 z)J@C25^plTyxX5femj2w8?UD`usF~umIm)zfcUosb+M;SMB z>jG$k1P*_(?Px?l!J#PoncW}^wwmd6$a;ciH_dun6sZ9c1D6r&yZI1XAxFjrm2~%+ z7r}>*2H!)D#ZV1c$XV4QlkFx)Xk<~}Mfa(O%WRK0C%0YkZqlV6Dsb8=PhY^BB=BVV zYiNCrt?Tgp;vCGAbiJXxSX$cj$jRPQ1=r|N?!5(g@{*GRFfo1#a=~s5R@V59J4U$r zbpgWq3gakmo`1c-ocs3uR_=x~HmYxITb4ZeHtTJ$0<5U$U2>C6TavYsGPO;D;e?*r zzNl_qQNA>^xF?ct%#2v{c8GDnFGNPT!Pp;6gNSLEFN$|mujv~@+fd)jQAjF|i{yxa zMI^Htg`GS+J#q0)4BgmptMW#C2jMazxGpYL&*Jc?2I&Y)ip7g|cSt{+R=&i$OmN+bmo8T8 z5#xabYn*ZD@g2N0#iyH$8cvRdn5ZR6pEUTq(IfL%`6~VOLiyy<6uW*4&qP<3|9I}x zrlP;98{;MNC+~WrLj=~(eR+F^YI>u=A6k9xvG3kUobVnU$&_{i&q)wN z(W5X5mGrE(5?AOtvAgqcQGKDCliSt+zyA>355rPqo zmAK8s&4y%F7>JN*4|V7rq~v^(YVSw&U3EMftsGGYAa!rJ(xFltYFXWAjJ<|$r7R0P zA*{_rI5v{m#b&d+@?>#jAaas=!|O!4AG)sYfTA%(WZBxPfb>WM7QlUNFU=!xl@>Jl zY}EY`mFB>#-^|yj0Ca8~w(eF%#7w$DbFa!zsg{~`E2aw0=!+@Sk}fo~>iStmP_Bct zBPnbxR;^4U9u4kO^4a;KIX%6?Et=$y z)gcM@%Ld9+A$e{v_104?!^DrPDYKkCHoRGLz1~7#4u$dRzmjq0d&80o`pWEz(o;Tr zUTdH!+6|9brZHl{EzuM2?xF@XAtyKD>2GR!Yq%8)MSrgWUn5C0SdXki{Yd$As)x9T^L(91>20TVmDgZQm*1?vE6cEj3^PtoF984aA?eiEAdSx9b0^f_U?V; z!QsIQh`mIK`;_Dz^o{za>D$^+_YfWqL>@igWa1G{UoB&+MSq%X8hpxZS2Qn9xGD9} zlXnmGz2RQIIHY6vbVzppV#%+sKOLv$rDe8mo`mi$iE{sPfOZ6iJ3dbs5O1j<3h5;) z35^xk4u_Lct4S7AV`>9(t|Wkf*sSdBx`VYGKxE_%RZ>)WVGP-NGMPpNxvL zrX&yATklX`;dcvf8b9O>FvVoRJy6(8#rzcPwg>WtX*z(@tD`? zVA1>q#;Hj4WAd33phi7%_+V_E4RL*ycYW}j;8Y=dy{8Q+XO5NUz#A5qtz*TJ!A`*# zNRY85I?t0V>vY<5ns?Z#FMB$bWtrmoW_$Uh8!R{YwKA%Dg_5$0)9CxG?LTmRhI%*H z#56C~E;jUgfrrPDn|EOO1F~}3+Up}~k6FJMOSKI4g+j7AR{BqiBFuEB^kw==Rf?q` zbyAPOPd%)FsZMwA60JN_{=hXOT20os^h;Rsb<_DNjZWfA#@o>c!GYA7=UKZkx4W`u_6! zhOb-0Z=Ci7LdiZXJ8PVbz}a3O5X`=KT-NsKi_gu+8a1Q?M{nXdmvogIt~x=)Ao*7B zc%Y_;tVr6DJf`wT&KoLs8ASw>X>(e#UfZ?#%Vnrff~!!lv^^4Er=XE4?>25gC0y#c%1>lu8 zLz8Kj9D;~Us`aQ{6~X0MgauK@I?^Tlav#Mi_^Wp46p=m-UUVzG)icZn4Q}H2Zxfh~ z%jsW4>vSCMaZ)w-T`=*%K+ZvudIRxtbOH!-(&+go?}k=Z#daKgLt?cbsQNp*1UP@T zn9q=FQ5)kTlwVcuYmYHKQ}_$j82erbHRI&(S93MEHw1wQ{~Gx8NDKD~}=c z&3aqa9)_c2DnAO#%md6J0H?mYJiP{&#&RaMRPGPUQ+)YTdL6~X1N;jxAtg-9Fu>$(2dDJtsA+5}?w(Uf6_-k1{-b z*g+yNPC>ZNq708X>&xIlNk-34^Qil9Y@Q|-TwH;PC=>bW%f}*HD zD`f}dlg?ni3!H6!WGzqyKl4ji0>{T8rHR}BFV;}s7+5+9hR7Y5XZ6?w4C(R{zU{!I zE%V;7%PbIoq2uG_@u*+zYz}omE=#U8k-d?0K<#2FPPMIZ&(`g19Y*owXxU~zUooHs zfB9v#{Ffc9$3ESH4J|bh)7*h$s)((&MIQ=SfhAaLvJO+#C*lcrkPY+rL}unVyyBbrQ|(LnRP< znSyn?z6WwvYpx_mW2f}0a89qlY)#OdOZ~WGn5qt`>JRBlI)F2!rw*>7_6xz?qRuH_ zh{t)T5H55tgUng-xiT9C#p2&8M z-BtHU6;LDa*bB-<#fOiU7eH5Y7l0vfv5CF%8I`Z^YlTBpn3&)+(MZE zPn3#KAW~F`&HG4p&m(}%f$rkzAQtn&Zov|laaXO^0LA=Pk|A@h=Hi}Qv?=p;kGPd7 z)7N@_+0)JX`O%V5$sEZ0$NUC`rmStsGZytRZDW)h0#1_|dfP5r+m6m@ZRO?JfLBiD z?MFyscGB=PPouJDM4rlVhig>{!?UV0m_aJDFTMRGu1Z8`NIAaD;z%9Tkx4y%U#TlK za!mT|TaTh@?zbYF8ohYQy(^?P_499g?2ut$d}s6+2iz2#FV((*!e z(`IMiqth%~t;I3HScNKn_M)uj+?4_(Ny8qxnu#~0DN(S#)Xx_~_@KfY-n0^tL^>~& zUHLO#l(o;S3iO9>NYLd*DD^TbA~-yK@Tuw18D(ZrTF%Y9d=uHcp~S(rn?QpI%#SS> z!q%tT*VrI4P}hIfX0C5&|8zd7*g!6&UstHiiA@ei43 zl+|Ph?Q&xn)kp_DeJh2gqc)R~x0-E)ODDiP%V z89w(~V7yH8_LSJ6&s=upRyLf@$XqSR_8r-XUl5O?I5G>f*WkaC3~nHK0_Mlm34^TF zsu=FpPLdT$kJ*mee1Td3T>As?J8z_RnZLG72EV?SS&#ZY0ApRiQDF6cX5@VKhZ{5Q z#-v_wx@I~*4Cr3&paBMHk+kiKFm_;FUG_)?iJk;Qrxv*^o$^(75@ydBwj=v)Pu?*t z_Qr?&-KVwy7rY$imD11I3e~8G1-kf0_${sRM zF}Tqh2a$zl>*ZaIQIsP?idG=ksGfjtf%3Gc{bd6TXGx1ViwkR>>gR!G;6^;5(AGX4 zZNGvw)MIQK%TaaV?|6l_a&ps41Jajgma& z-iUCRaW;}3yfE_0cbM)P;!V<);S;+&NP!cv#@=4(d9y(S7YIke@B}IWwX6iqWqbi` zM_8>qpg_rflzih^&mMHMghKim%^iuv&zW31q;K`qm-FI3e>p+t+asJR=b5tFPvX@%s z{;dikxmlw}+p_gf(=$=+8nmB>L*we*7BvrH4jm31yxG1He=E!;NJQmD73vh}9$Vw9 z5KCw3?RmGx_ED#hsQwh88b`>ZacG_93{g|UC3ivQLDJ(TT3c9=5s7d!As7X^g^!3( z)qmr)U~&g(H0r3vCW%X~$d4 z@HDMao~(pV7+xO}MqEns`-hLuj9#4q@f(6ikw|AF>azqDNPJPf`y$s#o;85zh2P}l3%Psi?!x1NH!XvP3fT|k*;KSLr;TPN? zZxOfDX@|ZGb5~@U_L^;zY9=SR##q~prQUF?o+_ne0oeHiV$Dtjsy6cKx7wrTO`?q+ zVwe#H=Su5{!(hydXY^(v9;>Y+XYfd*)wo8UJZ2TVPwIdHXW2c?L;=$aCm;g>RFA5OZ8-4VLW*Ut~w_c)!4%u5|j%cKGqnC=%q1HA8g!DQK-#q zXcIjEI_I@@-f-deejLehKBP>IDpF2U_Ib8^A=I!_C$SC4cG~;#!R3eVIV8i zY{Z`SAbk8-C=*3rNHbP5C_nqdnC(OIcLaImeaP25C7&%O#q(cHk0x#?XLp%o`=d;1 zqJH^+@3b~Vv_4g2Z>knWhEvQ)p$gIDgg*SJA=#A|h|qOHv)Zg=qlA3n;Hj%vyMO?R ze{Yh&Y7#e=J!3BNyZ}CLl{;tNgbvGG!B@;{7%W*rnu>{cs#t8Mz^J6^VUqQHyu|70 z@P48ANch)R1*wOH80X!U%Ari*LCoA#bRVUhWOh0n@_AsrEuCJ=%AQ%ewglOZ+2O2o zPzq*G)+ebjBQKUr(ps=HQAeLVGEp9;S%&?U_)b-$sT?FoUX`845!8b zyR-ZGYxF!8<6<$Ags4@ogEV)e>TIxuHm)X@kBy=KuW9^lh1|J&5Jx0 z_BT7Fy7Rj0x<%#BJg(g7KCVpmPG|+NSX=nJv{%t;FD&Y!O4!0uPd`@E=c;^D$l*8B z5sjjStflJi#hOD5neeQL9Sx*^o>;B7h^Dn}u9$7EhA1FJ?&Pw2?_`qk07NX&@i2jR zlB;cgW>xfH;?4eBNWwlyD=f&Rf11lcU?>so8COPBHCl~%@^prQ(?k$s_*{f|oF5hT zTX6)FaYBD?)$*6GU~AY*d~NKw4P2Qj?l~Koi}O}A=#Y2q&YdC)vjoR~@}XYwRrkf!8kgrkA9(&ng|?XWK2syFNJj*yUKWU5V1fPD#JjcPmoOh8@#tQORZx?~ zRs9FeNNl@^>gq)8m?1Yf)Z?sh#T)m;gS=M4Br@iLqhDcBF<+EpB)6^;-$z&X#6NT3 zrin+d$IKilgij)S`-oh;!2t_JC`{>sQT)G^BP9;xge{kG~gcVcElFE^Mq&W1ehoL)2LDsYjHPcYe?maOhqD7^c#RH zqCO@dcGew zGO!g<21W_jx0+BNrCreX9bf?fUrJG|0b?iBPVQ@{8U9Eo!zI3RQ740>retr;m`>B) zyOEFg4PKK8@2k@m#5dOJB#>i=7tSRD&xFMvKi$v4Dz0I zFO&Br@FP0pJ)%u=-~l<}`BJZH-!#h%)C$E*Bx8qll%l-+!g$D~A=kSn8|E2l#VFcJ zeY1l&&b@yjLj)fnEvaANmzSB>U`ERHsv%=F#*cjWutb}9o62WL zIoA)V?*-*iEkTh?A&zM4N zeI0YzYpyxY%AZe$!&w+n1G770`;)6+0*ulsS;Z&&r#-U!WBaXL0KKvOkJA8#vKvJt zx1UCH#=aG_mD|tS)v;4Rn83t``9lD-Sn`whDz{6&lLtxq#?67+7%GjVuN}ckJJ4>w zZ>YRqVhkOwoJ_Ij*G)@#k{4-6Kk{TJSJS@W*cK3a69vI$?~p|Ck! z=fSHJ3TF1jwN^h6&Le#KzWzXUu@WK6txWl8W}mG`_TxOnQV9F@*eZC3SYn%rvs zyk#f&=<)5rXwo(`X-_b$x)bGF8&3PuMKN2OKquPzp7>w!P8`~u_=2eU(A*F#rrzd7 zD?W20I*DPRV&6G?=ikSY5wq&2Yh|K%fO5SpKzuPiBLTSS{H(o_J>IoC+@6)aHK60? z;#CK)``|Dpd+SmLGpIyutnu)ZD!g-Zky8r{@!qB#SDbazyCNd5I6^$#befA)?#PkhN+v2 zK~NVKd3$I>wK_EhLBKQ7A*_~MiAvc@qr`|<-B?<&HCk$_fezso>A8WU`&@9ME9!5} z>*b^IVW-jH{OMrXEISqUWZs`4*Rze)-JfDr!d`xid9E*{Zs=(St1!E(^gaSPG|Wur zMRIB)vIUNj1SaCsBBTQKDSq}-4`-T)(mk8=O3&Fwt+dqW1via^ zrnfx9#KomwF=ZX-g$@F$uh0Y;Dr#)p4cF?xk3+G8UP4oMv0XU(%y_>dQWO_1MSkn3 z%BGrj3i7lbiU~v|){R<9f3-j$||&2p$&7ao!pN0lPKE3?^6om7H&_o~;SMXF98Q|L~&>q2^a zQ&qlFqz-DF((*vSB*p2G+_D)jBLlg}ZO!(hN1xLl8&o%k6P~q~y<5_sfG=o*p-oMX!sF^x z)jF|4XIM%VzSVRhs9H{Q`WH-??fJNfX+0Lt(0h;KY^{@P=dP2B(j18KkVurUp^8~&OgNY+DHVi z(h(66K8;$7D?qN9rteqE7p8@@9Fy{CrA#<4q}1*Hbjdq?BF&P4jU9&7m}#+CG@S_b zF{-dqKZ4wE2o!}YAO3~mxO>+9Pk06l`ZHSq$K6x(KVgNR561s_WdFZD82>kTCib08 z0Pq%`aYLbKl0YyIUwj$WjFv__FP`kR8%=!EjHQ* z?h6eycH!-B%eNc1B1QsNZuc`vzC`El3$NNT-PtR*7p(`wp6>0l>=IYLaIG&f2zVct zg5-H05Z!_hVBknF>y;tET}u6^JuycUY2W@^)^@Ap9(G( z1;t}NA+gDhAQCKFqO~+-zdmjDo1d4;27Mb23)4X?Zo8tiI46ANMxN9kt09#b+-)CP zl8k2#o!;n1a1hhxOnNC&@%5oq3adF^R4!la>7%#IF+{QsM(<)=^~q)OH1Y;)W^Faj>2WF9j-s1^C|k!m48(_X$8r^|CF9Md7(Z+E(B6)*Et4I2 z>|lSC*BW$RD>;baJgSk0zj-7nOUUij5doy)jp)B<@J=azXb{B3^CyM#dm@FuLm=Pz zjyus03P=8-Dnw3`Sqy4!VD&HT&3E~KAZ~bgxNeCXPOd+R8}9!^+<<}H-@EY(ar1*$ z`5STbqs)Ir-26yJ@mDs9Uyb;GN!0qq`I;+1PDHySL~a@GAs+%CGa+=RiSZ!`6Ja;ht&tH3v}=Mwt}#)fE~BzkB};MrH*Hqpb*865=UzjeYO}+OF=dYEEB?i z|KUKhT%>O%@!tGAuQV-5OrjA>1u{;k!UOaS2Ti4RA5D+@c~v4oiUkrnyZMukYKowk zeZmIcc8T7-8KP5tFCAHh^wDoP;Z?G8QKnuCz<&2%RQ5CW+|iMof5zosr}6(9k#6}v z(4X=72hjM3L;XM)@50Pu14pR2>n)mh=Nq?xlc$=E$ zN09suVg4D=et?XBaf9Dm|CRn@2>>1%*gpTFqo0f9?HG7@f4UXuUm_6be-VLhy_vTT$fNv+N3%J> z?fu^L_Z$8ZNI3sQmO;M;k{{;rQ;NTfoo<0@j$hUu(650|;x?7=Ef`M8 zXlP&#wWZ`>5wE-AYZN26krVh7N4DP}2uLcA~fO%*e^`HaGGA zqTXM7L(tCv{THV$VQ|Za+nRnK#rN@i539e9+ZAeTf&!!j@uA#O<$t^=dAPZ6ZNr4} zhYSSd<-2vGyBDR+A2Q%Ah<+PY{w@OoIsT>GUAO*Lj)R-?ZZ-a!j1$bw^P7wh1pcQC z1mx!WtzFRVa`cb;0YO|`w<`Tx8ysA}*S(eT{Iky-T;RJu7XMKf{9C&mw_W~cT~1ye z?tj+3mGS*^9GrX{z~9R8aookUzqQNB$9K0W`G*Yrn?5+fz~Ao&<^bM?#J|@CbNn_} zoL~;{Z|#CP?_A^`b-6*eKJX72AMiFr|3e1m{=FQS@1Obr0fE2G3kdjopKoQiM<4vX z4G@s?4t@VyySKLSo8CY`KJI^x4Fm-9{UURO8r+_40R5gT{HeLS@trT+Z2^>RZ5=7U n`_zv}Bx_@0OZnZ)zx$7aqXE?MyZ3-NfFKYG9i6126w3br?!Z^x diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PNL (learning).pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PNL (learning).pdf deleted file mode 100644 index 75f8931d6ed5f4b8ffa85c147a751a65a8824b36..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33803 zcmagG1CT9E*CpJ#ZQI6e+qT`eZQHhO+qP}nwr#uTJ})M|`Tv-hii*mUtM#)J{`V|zBwcp7e1|&k+q4VDL&I*NC6TbAD>pl%+k@w z{%>ii=V&BkWME@x1j)k#>ELK@q-O=`nmMGQQG-2V=bfWNA2Sv(?9~In4}kl&s-W+W zD!w5PO$07OsKcjTXW8q+yQA`Fi?PGQxd{XU$-lQMyX-o>`s3)+=*{?R0&zm!T*u4O z>4CaRClPrC@7v{T>iuM>=JO_Gr-t{8N4*( zqVAY^sfr!%{QddsKH;X@R%f^A>B?n-m%ID3hPKD&$7ko~DaPmH;Kk?rq{T-acZbIF zba{nW=iuE1;3xR!r}6XIrD9|Aoj1beVxlRf-RN@V^W7V0$%f{osid~(a0PMY`YYLe zcc!F?xy5J}F88Rj2zzBn4m9#An{;+-xo(Q@!pk}&NA>}|ni6n=k6rGiv75cCA~ywj zS@~euchd^2lr(~ zb1NRIVg#*Ahb%GQ)Ah69XPUnWdt-=D+oq(CoQx>2jV z05)F&YXY)aMV6IAmge0V^fsIGc~s3I4LzvbLvEgW%dH2Ik7Yr%owF1-in(No;eNbi z$;J@HGMhSUz0ntd;9C8jb;={xO&bi)-67kpqpE$PU{5$z5PJa9N?;g!u4be++z|je zpkhzF(vH6J*gxGDFmK=IL$p3`SE3THGEc`5wYp+Bs;d%PdT`0Xz}WHN*AJCda{D|x z6eC^*)tv(p9Bv_1ee!V+F- zrp>urX|eQ{`On=p6q>jr)c2;d470F_+9pgaITCII@m^m!F*euxkE}iJo?llDnZx|r zkH3OJt^-@ZL>rpKv`?b4i=#_vcM;6(?(gdvYibMTp5Msgv;gR%)jyr#P1n_EyUF3+ zj|<+eTz?)NOsLe{DAe3^|LkS2kRr*l9qf$_`ZIY(j5kJT^pU;!UP#{C1i}RVvDACW zieC!oM956nGMeOEXW)GmbpkrRDM-rig%bfWf8<2&Q;>Gl+Au%B!MDB_zLa-3 z@8qNs1F?8#?orm;N6ez?R22jPKS-&yD*WW6FQ@a4!_6QN5Na%Fz5%+@!$>JdjMCAF zBE5NtTlm^dbNRzI)ZsqE?uja#n#oyjkb?JcZs6xWTgRX$G>lL;>J12uNt1-nUnC$^ z{tK9nnUBh^SLjm+5{x;N+8PBA#y8Zo>{?wnceLt@1qYoi_NgxAw3jX5!ueg!AJ}ZA z34i(aK>hZpLZ{RHt4k?&k0dny^s^wmf)leLXDT0@3;>r{YK64YZpXy z=y9nZQkwr>ZoFC*HBpS1hZIr7WKmzUh*0}2Q>$iz5Mmw_V_XOw3gq?h(v>CB=tY#Uv|7a*VQ6tbZp02d zzn9uV_+8r%iH%4bYNcR*-LB!nVxC`uz1rs*_)yi2$I9rXY!Q+hbkf(q)`Enlsh7B2lMRs+n`2l|%bNyA#=XPCVGg(+}`0#+;$SC?BzI2jx^F0JqVs&c3fEy8tJ2s-Fh zYuiZPO$0W9lB{qgaVrnWK>TK?BIoTb;h*SKlE2@cy3(E$YLUAJvoENNg|7dr1~diF z_HS%Tq&SH#clnKrD0@5x3_5!`T%Wv^wqOrjhJGSw43mg}Tqr_BJHpn0J8l?#dJ%kh z%7_tAg)5TQcQOKM?Sio)OL3>?e`mekSb&<&Ff(fzYM+^6iKic(AP?8k2o4ST)k z)Dj!7`Bpr;K|(z*<4r-mn8psh04@gbp|<=`@y=fEoZn&yuu*5Gh?0RDcN7`=NTa$H0TtRXx#Z95z3v_00%eU}w3MP%k_ z=jRd&q-^1$&0B(N>ayshiO6@(F`=jHXF^!&k5p*iN?Uq}-Dl!wBsT2|&I^H& zCsi*29gvM6GGEOm3l~3%_7>SO85)3~k4BV2JW7QpkEILBs3|`q?e&@PVHW;L}uByhLo3{7HNl35DQ865=WzN=zbp6e4p1 zkQeCKAMVJR*TlEsGIgvLqRp_Z8aKt|cAMhOk8Z1zuh;dsZ*v{*lib~iZ6Bu9uW$l-g3Va{mUF7&YJ>OMNe@?vvXy(&# zizjaqQ-wmOBUw=WahDup9?Sh5HoE_3Xrl>8v#_f$G6=s9`|`BqGbmdhn64!^j>`lfu5bJh@(4~8GHe{K8FyWtv9vRz*DG(3ne&^ z9KdvP4v!B1`H7cNhrSX!bE4Np;*;?vWQ-2HLhV%A_he739noK{}61=6bAs_PhQyY7ifo zV}gO}K7_JU`Vw1_zf%NeHD!UZC^Wu|K!*JkGC$)qsU^h~4$8?7hGaYw6{&5%cNw`* zjvs5fQ{6)1R(x~?UDx*8_i)K~6b-a@4OcP? z1YYb3>N7adlyFEMd|$A?V_P`Bp8eq?WNH#dWnHbT`{`P3*o{F7Av4*w@3OCbjsAx# z@Zz~t4@|imuvX%_Br*Sb_!j@e{jXzGGQ;mG26Mv549Ws%LhHIf^JeC76;PB8jmBRO zx=0K^Eb?aTBgOluS8PrZ38;dc3ho2A{jRD0TecB+v-#i1)^(5gZVKZDsbFV*p)M4d z4=FNny;y%j+$0KE_T4&|Ck4F_o_DEH3j6Ijt+eq9IJr8UXp6J^bDg>!IzxsyL1LFu zhcWvB4$z1EOrGc=M>?KMq){oM(GEU z>}?{6lI=wk9e1ZuF8PUJd(S?hyJ$R+u=#YaGOSyYT!k?y zA^^0!C}0Sr^u;s`=V6Oti6gO`c8G2Ds!JDOPE14Nbtl!BmT8UxE4NflgBnrPsNUvD zNdoLx^udj6Yns|w$*ub8R{*1|5BQ9n8Ch`BqBEP}F%zNxCLgTD$_kwYqHlmKdw6pN zhlY?{1tL61_3*{@B#BJ-+KoF@`vCplV0DI0cFrk_RSq+QWfz00U6)CFmzxS_Wi&xanAEU- zdd$=awo``J6ntN=%o9O>FwQBwbS6}^7=IjG3K|hyCL^lV;*1fTOMEpn~JLOcY1h2y)i13XAEGYKeNV=jb$b&MC%K z%B%T){fuq&spL$K97d+-bbnZPb|)TAQ}4kEslWvSj2I$V*wF^1aS)W<{$VQ=`K(fh zJPiYtzC9UI4zHnWYesB_VDFFN5+_twK)Exh=pk-hLFfWEl3%yzu4#gZm%@ zSSevcn&IGasWsaob3Oo?bxW}htc=>}N$Al>;}NT;TH1i{l7T!=HgU{S(RiDBqMd`J zYvU2-l0YGO6ISP%K$41B0X)Oa0H;6RR!R*g#=*@9M zh39&eGo-X(LC-ccbVl=7EYlYt(#$B19Jf+8gH*UsY9l>qYV}1ZQxLO3wvcaTZrRG` z$dY3Nabw3U>`ECZ@ScrKnT+i8GZ^;{iy^{`VFNx?Pt0a*_yW5eT!AN+e-)|Kx7+QMIORv4F!Dn|);&X} zssFuHjuy*uDlGy_FQ&gpSA5Q8;I@FX{p<8%&3BO=L(jC$yg0Z>E+G6`I4Svg;V)Ff zY(%jEy!Q?GHHy@YLBMK;xuJ2?YLkgzUqN;gdHR5S6S=c}MEHmk^-_303UrTOC< z`^d5a=O8-#)TXbQQ)}lHVq@23ar4sKl?5Cy(uj7V*)zy_!eQca6l0TgfrPCI7mKB* zdKu!DUh0!C(j$Z_6kMLkvUeo2*cRI~&(+)47bf!^U4?e^?M;IxQx19_cdp?Bpz^`J z*|X8OWBhHWYbqjUFt*x5i~a)#MGBy@4-3@+SIJddFojS43`O7y`XmJ@V7{-f1WuYb zY}qXWP6$MjCB~QVPD=32%+mF-=;0T#jzy)3 ze;B=g%D7@#$SjIpPq@C9iNq+ z8DHyP=7N7i`)luj{}03YZ#q-d-p0xHe}wyQ2meC<+h4`MZteA~9c=$`X5jW;SRCKc z-sxXq{L?4Fzg9v<&SnNi3Zer4nEYon6pS2foa_yZ9Ps}cS%v@6E%v|le@6U&u&ng| z#QcA!S^x3#Z$6eDpPq$|o#p=^UXMIHJT;c$ZZjQfaGQxQB$}TZt0=X`nyJ>}kJzFS znY*L$+*S#KJ1dcqIN|OM54gll1*9|}+4ws7{P@HL)&QvU;<7|haEFW(n~~=WHPTU0 z#nwSFf76h|SZe5BWH=a>7`Ev6bnNu(bfu*^7$0-pWU@O%qu>L?LUrN$c0LwgXSXoK zYyr#7gYuzKZ`F(AMs<52@OfkMy(~v@Q(C{TkVVu9y{!N?E>SL9cicT!38vHq2LKFx zsLCed5AVy-Q~{>u-9Af%Fj)t89aD;Zr}lSoTq5?7p$ySQi~w7ft&Vg->; zC|szO3Q92L4~!>lJzF3*fgB({?pz~3Oj_x3MH6MXHH|ydJI+Yo>BF2u)BCS!T&o!C z5^ocknr4+2jUG<2ksCWS4==zN&ci>C<-~Ju?l#)lWFu88MT~;wDRz$965Q%7)nfLF z`G;v!L(I1LO23j6NUEe3aw@iM*@U2=V39j$kx@`C9AM9%)&ri}vp66*#92kjbhU0O zt~#r!|9}k;$Qpq}R6F4$Zk2yjYS_+L)m)Wr=}<2E7f#KTJdhNuP1cSDuA<#ysj2ar zylX|C?QrgG+S!BUl*;{1MAs8L|EzR)RXBNzn-R#$8h|iECzE*>!xYEIxRTQC$Bfyf zLX2Rhf$;&EFCkzM5zIUP?HDhke>Z$iy1b;CIA1@nFl0eqyJl{#kAxw6yS$uht6ya# zpjMW@=>CwTu&6M~uTWQJ>CXZ`I?hl<>#B=r)MOmPTHO}hu{b9;PyrK2&CSNzJ zsjP}bsZ*UtgqBz?U2r5Bk$68BENtY=&fqk1+UKjKkOFyOLrD%7`E4)lk%#fh`jiLb zf>Vr|m7L zkJY>MupZ&uXFq2@n0I+Bp@$)im(z(GuUY0jOnFpv>^KM7atE#Yr%zD@ksv0!8X{K_ z6Wq0A8+c@@?z@~;2&Ci8LlnYh+XlWW;*exzUEKLSWks#HlwOA_=~j}d5f9FN4 zrGfJN#v`23zV0AA{qRIvS3W+NL!IMz2Y)CZ@q;ufhnNJCT9m67$m9V=TPcBXT2^}|`;4)5WVCY#+Wwz**E#;st0qH@A0!Nq$IOZmiP0gvrW%SuGpG|?! z$hw5@R%l&vj+r&_kaC@Ml5^D_rH6DQq|PQF{v|C$tM+LvhBPIAPI|~L230k3D9Uss z3?#nLAWEH=C{)-uYwOrzmJ^mfFCY|!d^j7x2OcI42B#J}qF|GUnxZZt;ec2lkcxu~ z=45-bmx`g<%=B@@$vTPK4chY={L~OaTP1ozSO2YWQuJ~W!rRUi>AgE&`)KaeXqQ2O z*4`wd)%(Z%chj%N>r4qcRKj^q=A_n4sU%;WIK!&F=8e&Jnbq3v1UjAE*Tz%ODwbo~ zOLh6U2?+}_)h}lH&*aI9P~HdE;_jbU9Gk0EhR@p-pZY0>gVUiCQ8=6%PLXq+P9K+` zC9Pcs)8)wJOb+LjQ!JB(f}F=1pZ49;1s|{Ldke$u2S&@c>)#I$$mxtHK?nT=bb|EE zuyX5cHGbmnOpO!lm1hXt$>puqYmsnePq{ee_@$Bf0JOF|xeHnBjh(o1kFa0%ei zXT*Ye2}|-XzR4*(aT>ltJJTKVU#GCEo?O}tMPwSR^!Ba}uQNL)ZQ7l1 znUyKsF{QJRUff10gKFAK&j}cx4!Y{XEkhvHV4fU=^1~0A2Cno|R*9Y?$mJ=$4VC0e zF<&ss>go#Xk~I2)XD{_Md$9zn%I@2$%APIIV9+IvVO{i>td#m1tATY;YTTSBDyT`Rq_x6O zy~I#G#8#eS*2E-bv+E8+xL`Vq85Oz#scK<$a71^vhETl7q76v;UCxnatwB>!Og$J8 z;(8~C(;_iknxINQ*^`H<+SX2}On=O)#4XhnZO?Axw5aBagQ^`OEMr1rcoAD51@=H| zauG|Zij(Ri$xQbrEqaW$0#rH1(m_OjigCW*M#=5H^NnI5Y?933xF(~=p_|Uf&)L0+ z2%hB>QBT?#Muaf#-GzfwkgO<<;Q5w$OvUi`<4!^#ny#F zsj3GKZU5xSUyhDw`2O%ynr!Fxy1KimiC*#6O{V_Wj`s!`vl+z6hG^|!v0;y7VU;NX zBZ6?bf&L_Ojk+B9Odyi#ba^s=j#H+rQ&kvx z#PKRKs)hrO+-H9k)2fd0Na0&fGM_f)i?DVTVG_k}7H(blOHW`WiN|Edmt~SRmABET zqTI`dFUMuKE8$cbaBW-$!F)Hf%v?UNHM%(eSVA@wtvRxAR2%$ihG^*Eh&O9Lt$i8G zie^%C_W11xbFFZ}s_W^0_e~-dnK=_b$UHTXv2Ef9$@BENW0_g;9ym0fj}pnsgVQa= zg99f&i-+<`r^)gwQK4r}L+F08yOR&0&z3cAtBHlr1iXdD5gmP9rZQ~pQqA%pF2suK z9vw>B)?nf6k_%OY|I)@XOZkQM+7*V~i_ftQ(H5#$E8LzP8uDc4`4z>Y4Qw%!w*pVwDMHw`!sth^l0&cVEF%h$GFgiRmLkz#hI~~R)a>LXGkNj0hq=_rZlt$hU9S?`fx!b?U*xJ|KRnWoOU59Vs3)$racQrf5N{<60K11bnBR_+oi7cbf~peYreQ8~Lx{@p zwsq@9it38+55+B;3$tBWOaMU>^xoET)tgbh8@v+bUG*v=;TtXL?8`QwN(ad*hh`;b4p-O(uD=p7g0&y*UuY+PRxl6jEnQ91 z!o|A!XF+j6qkws(FsyRhkMt+~tf>>}8_7?H!sOE?cn57qW&6bko<;RyAZyR3d=p_? z+IeA1z(%j8Vr5IiQRV2GA|Wl+j0V@dQMhdn??d~~qp*Kfueu?nGw9YHH@k;DUeb-q z^$S)dfuftMva8J0etK13)t=2EcNKPd8-AybA@q5L*)^N=m@B;!a!VjvVs>5$d`T zwP8gy8W45a_t{V0mnfZPd3?-|sp2VZBbzp;lV%L^PRVVVe{i|yLSB;Ms_V1sw;A|p zB9iCoPeP&T3E>P2Of@|kLRdBRBW%9JObkYJhb1`@?!ZNg12Fu>HRA?O9 z2MjoqxTEc?s&(W)Nd^7J3|on6C=x?PE((E(xk8+*cggB3)5#h{91A6;>7{+;;%QIv z?{h_ysnoMZ{4+9GN@;=AD9C}d+KNLBQ|_%^;V;wDAKI=N&lo38j`U3sTe_ZlQ>VQQ z@wUh=8M*Y&X)j%#IzA$vUvU+l|6tPh4o4x-fr>H<3WdG81mQjLVlF$9s;yd)r)1q_ z{-u&i+w8)3hr2xKSg-X>5KOE)A=UI?+3gCH^s8nPFa$=pPoo|7BR}XL4gt^rAwHC@CG}nKR_4u+fN-nho?(G?l7IEk^=5z+s`lz`qK> zPDmjv4_XXmD}zqkFF$8{Ke66vh7<-oG1}`DczJ~kXKIixOnX`$k1sS(N3(}sx}Nus z-7LC-oD)_w8ePO6`mg6)C@Jy2#s}A!QOC@YVWYlZzn2;)$dbS+ifO-|qq#}iXc)e( zJ!+y@cV>CnyKTP)oE@@(XX+p}6P?mM`T%%17%zsR1Mot0CZE+R!fbj-X%q>}!F67y1WnT9X6H7^$T)*~c5P88$Mm96B#L zIqk$>m-BS4<`JvL8yjD7x@Rb6-y7fH%XpC>ow6?zx6dZC6Gq_1=fhM~(IeXWQ`wWHOzMb$M%5Mf}}0?#We7N%92KpH=q5}6_KJ-pgISvqGH36&Yhz*3lZwxtt? zmPxz!8sdxuEx|S((tPb)E3a%{lj>A6aA4|T5nUa<;X?^4R^!W7r z0Jqm6eJ~wJoV_(XFql9v#!0xZ#q|Zcc!Ng@=}WhgKJlKM>AQ1|w> zn@g?Idqn5>dmMB`Wn7$L#an-KjPyXZ6TR5c>49NJ-VGVj7d;JmP5C=BE7MyOi~JL}uS8D`Dj+0(Kx;Zf*7XK*U-YT;E%$x)>F~DxS)Ffk zjR{9ATF=^>mBMkS8uXz7bS^r*LbmFMEPMhnoaC!JlnNv$h)ZA%PEV$;SehF||Drlz zttBC8K`n75^!x(Ea50&dZhxaBnKr+nv?PBwp3=`J|uZ2 zQDe0fIe)a);Z5Xwn}fcvG)w71-Sk;aGHz9ve!$hWR$MC?L6NK&{>9JzlHl%bkaxn0 z>YkB<(s0L{yc#+AYI%V6JJmI1#D~RM;kVH_%kSf$2sh!O&hUNwmd(Gb?lQFNK4A?B zn>P%SVGC_rr4W30Det%}=hA1RA`ashz>FZGKO8|Xv3CP+u|p7f&(*GIZVXTStYoT& zbgH|V`?CNNw}!-h-LQDn`$dYP_R4$$b%iY@5Q)m*81w>)26oyN>xYZ?C*mS9W3L$R zobN>K#gem#a!ysn%bHt|jR38_?A%qXOSocxAdn(_7zWl%McoYe%kt3vpT)YbN6Wv$t_n6# z-7`OjX}dp0 zwpDJw=-=*msor|rgtq6G;J)w@k-lMRO(MK+qFVO)&cKKJqyj#V}_l(f})ozKI_&B)$7WR;_ z1tvB{vj0@0QdW9)`6V!{kU&QHQ}&!x!J&uW+877DVG65yLVIT=Bmf6ru2enL9L^mz>CVEAIBy5mvF1zYUr^g&ka zkzqVTLGn`hm5e&y1G?rxIfo$)c28oa0*UUS6{09M=w%$Mu#4(ZC%gNvj>#Qc9=qHE z$MxD*o>kbAJ-fmOnK=aef|`u@_>&{FjtViT8wTm z3c;TgU9)EMy}}D#C`=+AgMZ7jK zYxP-9I{@)Y^p<&?drW>Ony$fw#`v{>+9?EB#lS&LfX^?hb?1gNbNOs zwHA)QKHZYIXDIt+fk+@5A0k|ik_r4Fu+X;T*7w$6uVf$e4&#n`N=mxr7(?sjlsD>| zY9alNgD$3U$UBK2osz~ES4FR7QfBN3!r;?b{<+F zp)^_BU1;HteMV2(uMJ9@~KV_7ncHkJhqz)vWQlJ4z8E& zV@N^I1l6eJz&BVfVtH+ei!SX;E(~t(ONVLL$S#U2j`!HdJ4BvP0O}1sq`Dw_k2-gn zjPX8ahGxW#rzcvx@T+qIi6Kchr#S5!#T(`uM_Y(zLJ!=*Ds1dctih{y4p+zx?Wo#4 z&s(ggz+7mV?=p6-5tG0H+q?)z)^y&=UWb46Kud^quEs4Cw=mqo!DD1BEl8k5o`x#z zB|}okNu9LZw>ST7{e1#j;HA!a?}FiinmZp%9>_RCy_rnZnrxaA^psK%L^CK_y>r~_D93}bQ8Lts;X zSAUO%H8I++v`*poz(4)zlSl$kUk$a=kR0KA`ok(Z>U7 znoBOTA+@uzXB6#F9D@+)GkPe~A1+K9cHJfOoZ>;Z!L4Z9NXYMG_HkUH+6TmGB=2W1 zOfuR0F`NPtx~Y{RxcAa>V-JTo@xuv&3;l|PQJ8lYMWtbjdmu16oIZK$gU4uOX)I#|3jahdl$lnkuy}*soUh0hQ2{{SqY`X-HFDW8Ay0{t- zlv6Z~4I@Exnqcy$g*APKnpih`8-P2$=v*Nzu4F=I1GU@>lF4~HIZ@vU0_l~(;PkmI zjRrf;``&s5JdZ&UbxXVjKR*!eU&{Ajt!bmJ^})HChVdOk#*b23r#>!w9nc)~FVwBF z9!d`mse+g2N}~lHbK^U~Shh27)Xg{kAkQ`kTr|OTD-7<(PpbPG1&n+{FIcyjd9}bF zTqrAU9c;kdHvwr&Z2f;)MVt1rUnwfbxj>>`uc{|JsT-hBO>k#4;V5;jkIf!?G#CQt z5Uyv5&w!-GGJScT|JVb)6#U?K%-YQOw#?%F3I*mFeDPTQDN8@ell>%lpMIa*O4H#Z zm=1w6F@?C!KiMOF1)ko3)&n;esmTLQL(KzoDQ&Q|IxdS0IqV;wRDbNG?FGyN^^&Dc zG=r)g zBAYc`(Suhv!Lj-4~q}H-8Z=@tireHKys_&eSioB+! zYgb^XSFc_cAJfv5?p9Y6p2GTxC{ABesxd_rm*LB2pkk%8KQ~MuEE&tU;H2}CO^8oa z9N^;4DEV+(4>YSmgBqQRPLj3{cO084OQxp$Nbg9mZ3h&zE|_oSYGHr|ZUdFVq)V!& zhG}$sYSJBWIY4fnPO77J z_oyclwjKdBVu58TzC2B)oQhm%QkOaUy^xsXRnc)f(E6@myTx*oJ5hu?lL@TSu*q5@ zcvpPBB~jl|W||A|UcKSGS~fmiL6WowI2NC^?8o(>+Hd-M{P17kbBn7l|dK5qAs zpL5#E#oiu-Z0Eu#$3;awh$L0~NqZx1YNGaUreaQ?A$2BM^T{aL{CS*|pg8ypH#pI7zuYEq(GWI}D+&X=-xKv~JPu`cdPmyu*8C`w* zpl|ZVPkp0I4Co}gkhQqD7GphI+Qrj198?h_ni{#xx3Z#6_$0sJ2D4%b~<2J2prHL(Z z>_G_z4C##iM!=v=w<}+XzmIoH4_X69ZaGh!pcMqLI*LiaK7U`Z(G>U`YmT=@jxjJy z>}H2l=*OP7b$_%%&Q25&n%sWQGXYL7#5{mgNiR^zVxGt!hN)$byItyK=i6zIpjr9S zRvMDPL8{&gxY*#lj&Vjzg~>k2aKiz0e{JliU~oGfzPj-2Fxj_&ymRTASTi_#i&j2F zvkVA8@a#OUG|c<(u3$CUDG-07QaBFnA`v{)83X?P4?%j9U9IjntpGh$ft?sWa_D6> z(TC*_dO3{CQwhSYX8BeB!1x6~aR^|cQ7hy);N=N`X2Pn?(TKG;Pan?CoVM zK*%H3-AgW@JN7XI9u6u6*Aq-ozuPGykFQt1hb<~U2n0J~@i~G!T^9eWVQ`=hp0#y! znOR{Tg$&q4K;3~9-6=(Q*YD4C+`$EZFboqgD{C4z7u)s5y1_y5wZXmc@zu3p%{{GG zbU(-&NE5r0lOos@l_46I)L8yNjlM{~RIq2L8$L)se~M10RUOQYm-t(baUa6YaDF(- z=kwe#!~w`3=~o`yA}&`|HV#sDceWYCfZ(WpZZU&R-I4oU$jx_9jf4NN{vL3hREea9>GM~Y9ZD>h4) z;pc)x^*KgJed6Kk*!zoJ?%O%o605{1=hjtzMe&U4EVwJ=Dmx~{0pZ^*hpd?IkR z8!WqR4BjAD@I9sz9~?X0e(z)?>tQ!&{hDh;Ww>OI*->Te=!&}cle=2}+I1{A&Ip5! zpX7-#qu@{`o5=LEYkL|kZ?=)q)(NV-PY$FkIv{0=0RKqoO=zs5Wod-hE>BtB&hacGsF(=dHXCmE=57Xl;K%en+CpG7z zXM<)LNtd+aB2)C<(b7Qjpo`o_rv}=%LtY6b2`2k9Ask32)8sCgx+1v~;U~baz=2M( z1o4I?k_ozfV;r6Kf0dW7$UH8AUmykPf zRWL{Rx?$7{^%$Q@WL7uKB+Y~W5+i~Bn}^1LoU}w`>=*WHU*8zB1Ez7_gjVF=JfXK8 zF8>pMWx5#F&KvTMeL9D+xF_d8hQO^&hRPTm=xE9`>1dsWh@T_1R~!%Z9v#Y>Nk?= zH^QV3mZy914_o#;#+InD{kMU^Q8zRCU*|j==-a_nJV1Ke=Yj!y52IZ^bbr&356}UN zxJz8$lGSDAZ$}a#@lX4p!|{NU`ZRbs8?~lZZkfl*(SgtvSDtv zH76~!yRWFa&#Z>WyNua6I3^3sr|c*)3BB|)vPmc0y!Rzz{`@m?Y*nItl6y=fqTIl2 zW8{lPhp8uWu0bbiL6UI4e-Fua(aN8>DXATdJ9$`YK0+6lG%w#!o<-P{X!RBBc%p7OKL`E_EaGZh6tLm<_0+0BK6 zg|m}?4=`aFDCC#4IxD;hP+DbiMio|8&2reNm(wh!FB?@$N<%gkKVIovqvIcL6CW!) z%0wofIOX~=EMHkWck{po3R(AzOr1V&+=!EkTbP@YH`2r@=1dLQ9qzO97f3nyT1j>l zY;!Cj+L*m_mRdmj?2Sl7S9rG{{+8ih&#qk8yQOEojT_LcimJ#d z#3+Tz?dQvlj>vh-P=zq;NC*V5A>)hh#1Z%|R?J@Fr$k@Kb6h-P;h8A;qD>sz zyKYILHhTH1Bz&+BtX{09ro<`RCaLGDy}YW6NSLoCkyK$=@8WuDuJ0sok}``2Yft&c zxlQFngmcaG4H!-;QW3}Z}R5hICQ&OK*Sue7@oKWQ> zx;Z{5Ti>j%rcqT{MYcrdrct#bO;mM-1MHqU?2R#cre{7n^fY_)Oj@#5m|f;+Cw0eR z?Zit4WUUb_$6%yXTqFf9rL-ou)+^M?k@h=g%Qq?vyG7=%J%>m(CXConBhd^VNc5q+ znDU3kdb2gG?|6z}FoT7Y%?61^{bqAn35v6ZOd;$zaeXQAuxEIU>zT>9bY3z{5u%-@ zjyr?|sz^}xx+OzFO zdSw5owV!$MTc}#RKk*e;xTC{UzKVW$AP^LG*CNW6j0Fd9NV$~Xyj-SUUjKik^}kfT z|B+hL)Blwu|5w;d|Br6@|0@Chw@mmSsqg>mO$%Ca($)cV@In8px3_?5YwOmAp}1Rd z3tHTQySqbiDDDo$-6;;mtrUtDhvM#Dpjfd2#oda-7wGAEd*qJy-tmv|Cm|zy?zPrl zYg$4wpY>eu_s~s>8B4+{kq4=Mz{k2%vOp2sW3E)S9!i0#JDe3S`T{k4%5j);++MyY zW9AHYzf!uz>^C9qk+HuD)rllwqu~x#BsMF*O`qSvEWhHD2_c`Ul*EQ~q+mJPQC{Jh z^dm$85$v)ld8-!J;KF+!xP!gvV=s*HPTvLr9n<^ZwY^4=KZ*$j*9>dL^B3h6p$eG# zkn5W1N$8pR65ef+TF~TGKV;k{?=^_(ndpf{ryAiorc*}aUtG@yYBU#M=B*br^=VrL z#yqTjj3_$0eJ0!xM*lCm|5eWpQYZ_@pH=S6zi)Bzv*i1+Y+cmE$;i>d-r3Iasa*U~ zByV5?DmWKcd#NaVOBB%OiGjRhO3u|`()l&-qs0rYy4fW3k z)&^#vnsb&vTMPfJP^V{O=LFCLnb-j=OiZ9+btcfOpM~mD&IZ;NMnbk`)+PX^r*1+{ zMvs;5oLtOLhmSLTJf>#>mF_<}i->`}q=|)@Ij9&OG|ZDo#o5G04Z!)c|Kow=uY<>h zfxOFv1@Kse{(DjSW1aplkNl5beh&BA(D|wQUKLdK{&?<5Ds13n@>KEvUvy;Q=;SP7 zZs7P!4YCG*95es1fWPV=@MNK%rSM|5piM?BY|Q|S>K3*_woVqm1WyxCw=i}#clx8= z|FM4l|J$9N{SlYI{uHynkHpFOsH*>VX9Jn>pLZ6HM_|L_odfjhvCXf)M;Q<_@?(o9 z%Y&c>b}wx$9(Pp%Jf(-h)t>x6%?CWsN*Di4-WO(c)=kZ4ZB7PEPcz1 zslT4qXQ!~fgR_1F$KQM=|WSjDa!val6W@X~VJJvr-c zebA}IKB(_F>)YxNcZ6m4mCuu413elX*34?0#p`1gUZ7n_uo-n<50sXt5Na$%tjmlx z>38tHTZ;jUEbKbLl-bm+lEtARXm^Aj+Nr|aA2PI4SO{Ks1($t*dMl^aj$lR$F1m;2 ziHPxxmq4Xqv5ytp2QqOvs>QE8W5MipIhezMiGE@b%O2q}ae*c)CJ|{pzN2yhayxRh z*Is11zRE(UgtfqJ+7CZt+MM!Ygk3BLD18&BA%K1;F?fx(ce}_g)9OSNeol7m1IgoC z5M?wACe!>j$)l!Uq^M6(VmYOxggh-`C1Kr!{`JeK#FDFE5O>I}mXZWt{R2L%PP8L| z`6R%+bHytiarMS|EwjFuyvwHYi1Zv>Bixsk3E@~oc0((`YHGp&= zwr1m_evENUFK)JNSeGCRQ^L(Wdr9IbPJh0s!3ENp`bwJQLfn9N5eJv^iKg>+&yn71 z&is=453M8!qi2+q)Dp@VC=}@7dLLZlPjpG+(q=vL)L*4cBuf(*4xoJ^Z3-=Al0vm1 zWWiA$!szoQuU*i@e?RrID+H-DJh?|^X!SUUtt;fEdcz_SHBn4bm>5G$`bGVs7=uz| z(%armUX#?D&O~fp?o#*B{yle_es%1WjDuJo7Kxp zLQ6`cZ4T2ZiAYfq@3Y*)zrxHjUaoagl_ z{hpw2=Zywo!HFsMk_jwa00BA|vq-W9bnM70^S6p$MPvoT;BbY4UBc794j-#GdLtdF3au?J z?r7cwn|IX%y_wj{^=iws_1G7d><5WRZ&ol$m5~xES)}2WJIaEr0iLVH{y}WLZP1C- z@PXr|U0+IOD(LAZKwsH2sHt|&Y8^sFZfi^vrh6OfO(hwK;I&=e6+FR3md_MXVZjgAfWQ=}855TuR8jV{v!OIvo8 ztTf}anPCOousKK;WX)IQr<6-JtT?Kg_5)RZ6FJHqVnzDx65cs@4lnmvEDBgAv-80R zOjDiv@r~(ny)+8h0bI%P=gt@(-cHTF%+Y)o7|d=ESfR~H2P+(+w`^lMzbAK@vPU4i z-`igD?hK%G+p71Lh!O}DXMmo7jff2|jy)Thx**Vqh}_9C7u>jBveI=*I`NjsZ!yH6 zE|ZF*DYWkW57FS%097l<;NE>)wFq3o#fwh-?}I9x6qv?~PTrnDS;WiR8ZHAAx9z!W zI3pa-j@Biez>_RZh20s*{XVh;3ldLTqp$lgpZZ<^H?VzsZ?p4HGMJ3*YYDGv&DV&o zp$8A^w9oF^1TIn)1DqT>d`58UYLDnoB3KSh^IUl%EG;kNPB`zKuSn+)gU4**aSQoG zvX?0FbQ}gF(bI8N`qIMXqG+u3H24dlb<3V<8_iCnwoN>RGhen-OzZ5;GZVp`JIn;%TFqWq2@xt>QxuJTe(@ciozw3DWZpJ$uude@dm48`q_00zE~0INlL!&9EU^=rFs`6(BZ@km;La;^h?26{Yu%~ z@X7596Jb)3lKt@VM8xn-m6e0+~@$eRpSxJuk*86h%Y90Gz7w{8arUv-#4#CW4IFr6K*?D zofo}V;1tl#8&DL--5JI@&7AYopQ~Ems&n_O1#^E8S)s2G2Gg5JyJKHf)+zIQf%?ro z*fo_mAM7E_EcBd~X^ZGTBLDE21d!E|Ngob=9ND!8dG9gWC((grCD}JN4&pd*=LW?@ zDzEH2GM>r2Q|FM9IxpuL+ihti4R{VJ#`BlN9FvV7J*|aTRrhZRdA|#d(0^_`J7m0I zG+)KzvOo00+Qa>jS=@g1J+u&u%_^9}<FU*n$;9GgRo6TkZQH@>lzIh9~)v=+ZZhvk0}UY)#y`HrFCEpg5Db& z#HwvvZ;|)~-N*FduFL0aor&&ilzIw|_uthx9nvPZoNaW214sJ_=YVUsnwW9(+5+b+5D6^MlOXvfbFns_<_ z=2s=s{ii4No5wC2qjXc{wX1RV7gr+Y0S|cnt?MuMM z0RH<|6%r?77yY`Qr;39MlT2mOO;Ur+#HnkWUr}Sq+6$?re(EJGDVNEoIW1<*NjDCn z%17DHk+|5~#t6=8U7nni=o>$14ZrhYTMl)uc=zSCMc~){%|`QtiSOJhT$4TN!zAfb z@I>XKMO9=B_;bT1!6WLX@~{2mG=7x$1o--tuO~ZxXsB4t)_$=ZE+5j`Ej8%q9Vd%> zK2PZ#yoI+d?-=%;|Ba~gkFATC?RIU-ELWLb^*QY7nS`P229DRgu~yX_uzYF~H_;yy z%x1WiCnqj4^5=CR zpV2>hk(+_waoJbFMz^KHWFhVOSr@6wUV$Bd*h}F6*1myE)YVjTLBW|%pXpu35UdyF z#kd+V#_{ypS5m`=fy&y+IB!N@DbkTBqfye1n47Hkpl_z;9Mp9x9UKlu@hDFmtEL zaYCdIClBX{7p1;2Z(@{^aV`@oVS0;LVnT!+M=A!fJu^~3=oxiE>{*#O-yG#>y|S(S zi68;XMksSSzDep5-n*O>M-a=ZZY0X?jY@Xir?9jDuap?*fU*i&nj{3wEK#pK#Z6o^ z0TIqFAW0Uwsi^iUHf*v}GTd(c>rNZ{7Z<3!y?40xd-st1$6l-FFu{;-5y+tUDHc{2 zaUPs9Vj70l*ba=n0$hbFm=|zt-i*D~-UGfdg)ze(LmX2!OQb^^W#H!Ualg&~CU<<% zLfq|5$w)o;eQ5`tJkD%sA_OMBd1jp+TO5{meF1{gLU3UhOEA)2zD-aQJZM6(zgMY& zl^plhp&;2It2+BMW7t7Ia?@BIu*PTBcG!nX-|PJG)OkmJh-c8koxE&yblYVp@5YyM zx!w2TffB3v+2TXVp;*G9BulGsE!Ypsb+PrH=RoFgRd_Wz3D9R((q8$aK~X;RVu!bC zn=PUr3DxXT_J`F!MkOCV=v0%sAM&BxZ(r>J74oIlB8E+P+YTp3WQau+9DIKmpHw4NE~aCvBkS9$H`hh*>vjL$^3D&U(jrxK!GFe{GvDi~*M^4F~(2oSkmF zQRs_`yn;I)=9d7uJMoB}l~P+0E^pt$^MXl<+$`z4&;*uK%ud7zgzk@cglz15Xu|}k zv>`Dpy>Zqn!HO3pUWGTTnQ+K?Q?etP#Q1c8+NhF=-Cj@~|tpWyA_?dV1NZj>Z6jddV~&4x863dLoCYnmyWUJosm z@ItFsBdTtar#@;z+#PsPLC#I(fgcF9gAPfPyN*ZKA2zsA^r=Xc?CoGxhq*AJbuEo` z_E!Qz^ z1^l=!$WFe^3p&|hBfAaHvI`1)tNh-&4Eo_&YmxEO_oc6BQd%%#@i;@xwYlsTrNvX@ z`v}sRcPeXtK2ZZ71H~V>SGq`5=@ufG=SGMz~OT_*by3=-ceP1zJ;5Vj$&$Q_|ZU^(K3ji_(q?@f9QdAX(@e&RKZyGsI3sl$jBAMJjlwqK^}Cr1#BG0A zFNw&{hh1kkZyGrQc_t%x5(b-C%-uZ_e&qR3?kexS7eY6Qdjk(`u8(a49uEjVu__~h z%iDu@IaB1T$xmvD6*TEx5`X>@TWnu9ot^8ht#vDs%YWsi3jUA%wF8~H+l4fTYJv-# zzS@yju6x!sTu2RLS1dlyugCcv528u!gsyzbA`+r9g+CgnDA?Icif6kwyQT4}SSMnK z%NRF^;h#wLdL>m;xJLWB@w8#X12nilk?o+-HKJ3Zs2f!x*8@4;0 z*HSl5>s@jd-LmGk;7;QC%vlY7=S-pSk~?|6N#+b{Jlkn-Us5l@4!poxM7 zX9VRzTTUXJlt;908*bXs3DcqB6E&_V9AaFo`@Zr7%r{kAjl9-fESYgudO_W^{eJJ& zwcg!Lxn!7zp8Hke!~NtCc{Up81Rh) zdn`CK{1U>csiYyHI+*kWWxu0ONoCY@6AelDNS{}vb#YP_9&r;u>ZzCB^@>#QE{t8k zY(dE=f4d?}(>A+qmQH$&C+`@!>?y3#eOu7jDk8np|3+kQp?RjP?d>?9fTEs`(Nv<6 zC6s4`qV5@nfQ(_B_c=sv(wA)a=|W~B@vL?DehdcT zJ4Hl0+gE~P)cHZojzX*gGaD?j!8Ak9G@wh~@O=BaiUcZ4#<}1blDAD?cOTldFT9*L zY?@NCW@OkH>;eCA@(z#4E_FNYCF^9J24~%+W1m)E<^tg~Phk3hh;#MbnA(f__VW7Y z0x>Svxnx1d1ONFFEa! z`DRw1P3{uirc98$dbpHuyvNUu#nnb>)p1y+M=@?|z1rTx&K&!0p`LaVl7!&u7n(_n zqq&7lryikd=Em@Yac^*edP_~%(l`ULJgBy98v`p?d=5{*P^N`r+51` z4Rfjzm=f4!^yv1rbV*_3UO3%EW%TC>sp~=nz4j_NqI%%aWI8dXdSF*T+Pm!e7WT!S zMkdy#aYPEHWZz}0vLWk~j&b^QaS@rL?_Y(Ss2U)+Dq{Uy`; zA%6dj>U38fPvEE)HQugshUJ{po5oa z5~5d-D!22nPjbg0KsaGHjWTeWJ~5xH6-_j7qPOApr7|#!dn2(j7j+{*LsOsO9Y!Y^ z>40>mPUunyc1x7sfz45aYiQjDpQ0M)g3TwO2jMhtSlF8>Ju+Ig554V7xdF_~&k3Tl zZ~5}Q_dVzYV2#|NDg4PjCW!a#$#&%5M1G`%3ArvKFK>vC9G*7{BNCXJgfWIlSf^En z(`O9Q{31gN`9OZt%Xg+rdcG*+n;6)7h>XDJ4YAGRtw7L}>2qC2S3Wdt8X2KZwv6*l zoIa^8FXPYp6qs`+We-O?4pET0fTn5789cP|jMPVv(<}L{;j| z(d-M>#hKxyn8?`)*B_v=Jq-IL`9@uQ99L9ghx9GMKEX|UWag9)J%S`dm zgvzQwis@$$TJTrlUZt6RINJMWcM$_Ulz5vk(h#!i)AdF;0>VTFhGbck3Y#h?1e4ag z&V8g4quFHHR$7KOt~&j%D61J?F}UdLHAf%5<&LU5#=WBMbGnT<;kj)(@!6e^9=jY{ zA1lq~h7NDUFzB_Ha;W}7Uj2OP^kNZ_8j_t7_*K8hswQGjEQm8UVHHI|h zb?5#Vu|l4x6gy5>e{{d>o7I|}YQg6{hOV5MRL0Pj%nqgnXAYIV3mplQ`L@58k&qSIqua9xjh+of#dUh50x-kOKZE&%MKd~otll}DQ@q_P^{So z@EzJ7cp<$5D0Yw9J{8Qt09pBJdvDmalkrfA^ZO6K=YG^j@~COdG6x(6PE})X~Nz&RTICUNO-Rp^Km#FqA`DUQ*b<%Y=`ZLkjaA zxoOT+1(TbrGT$O5cVApvi^%ol^j zgEbyRPHf=de--j^G0j&f!B8i>!#sO=CfRHtfiqtBYEA2|tK=TVS>A9m0q6qTr;*^W zz2vjk4OtqBR2rJ;UHy&8L&ESF`m}H^WP_9lBqh{qQ21_lEH1BJw8BPHN z(CoFqe5_}6vViZqF9HjayuO}Bj;tET+tqWY*`uvGl41F?fT_|NxdrK$=nOp%+&jYJvwqiVh+LMeFoz#B+uHHsG-D5 zkZk0|i+8H^hV0uXF0$FVbyW(wMA%ALxHMk0)hZ)&7Ew~gL4#*d)FwARI?8j{B87S( zy?Za*>uh73;l(bJ-0vA+Vvuc1GkF3TbhEwhS|7h>ex0y!!GY!|b34#~ONjk($;rX^ z5>ME!b?1mm>w$Bdr`~W~-f_CgE+KIq%ftuhrrFm`JtN=hGr6>PL3nBzuDgsDEykq} zoh9GN3kg51v0~XDXJJg#5Tqk!n=>|!@}-_zgD{7mh=QqdC8fpb^r_-)6CM1 zH(DOP)R2D&$pk`youSpmQdB^>bPze>9D1O;bh5jFUsQu1gwTHDsDO5LWS?FDQ#nSj zxaa6jo`+7f7dW`OJEHlK38gNCQXkH70p)mNc_7zp#pWms7a3w&4`xel_MIV!%iiyS z?Mo-FJwF{8VMEe_T!XeFO2?u8iwog06!#t!gd-2Oi^L<_1Bk+Q9@{{kZ(t2Q6@-a* zbX)CN7wz!%2{OmzbyTj;%~1U#9FTcsgVYCX)7r=9SAeS&>h?wsZ(*Fa7xeZpgPzlc z@Z^LdQmk2srX-}Ncw$}^KV#@nI`m_h^;d_!2RlTow&%Q1JYYIG0;Kru&Ow*QE$Vza zOta=AV^y{QkXNohyJU1D(v#4=lko7@$0~-DtEaobay&*&@vrvAIlO&)8=)6GGqvD! zTfczwHe|{kn9F_(cQ1UK7cWZ*+!pizZ%NknM`wjSBz%To05@^tLU2bG58x?GJy;|9 zPQ2#(Jfa%LNo^sHa{=n2oj4pyy*NJcyZU#V6Ls#S(GRZUF%8Q{F-LZir}k539m@4+ zM1j6&Fu3yhvA_XCM<`7ZTT0hh^?}{*PTXno1)CG)4Lu8v2Pn5V&H2Z)SFaFe;H-Ma zs_Exi<{n1xY1I>(kn;foI)T+~NC!QM`$66n#N`l2%xGcZ8pq1atf@i_eY-S{WbS^} zlrgb3mv3hV`o1HaAYHwNC{L&d7kXwUTZxPIura$lCv?AOa) zOxYi_9lC?64MlOMfi)=Xnj7odC0IC&>UU7+Oz)1YIZbe6d6VJ)HdSU+=x`rB?|W?_ zy>G$T9oz|`@;2cK+vuEd;B|&Vj>t9+H*R^gIghS9$7k&=_G@;&L8*-JY6meLE{-f2 zhy4Yb17%yX#2t#< zGE>kVNRSf5rA8*6Q&W^CyGYs%*z{+Pb-K9ih^D@PX|Spnx8+$7EpaFIj%p3L<;BKw z#GrPhS(Gg+P>;8%QLPsxkdla(vQLR2E6ov@pY}LbhBZqjA7y@#`UJjC+%ZKVMZE%vts|qnkhwbJET98> z<3{Gy%@LTk$3AhI+1A zY9O}_jI=Atj{o?A=*MHy%?y7pW(c_-d!NP#(vKZ?_BdivHL7atH7P4nVJGy9Pkb%oc*A9J!^KD(?`c=mU)2bsF;* ziwXtfQ7;Y|m3hRQk2spM+B>OMEVjNc`&%exYbW0TJzquO{XkAZDd-~6Sy#Udnyg<` z%*+(6+vs%|vTuRWmp)k0Nx?C2IN(h|9BV%$2j7r_TUcXthnVr?s+lZmc&X#*0ykm6 zrz|95sT+2^32n0%xt&{ktyC}_MG!}z$5WHvXxaa5P=yAXVh{$IDxyz1mjaEAfCd@* zmLIg|V332qZb$nUYZkn-7$h!feMKW@o%FQmx)0+HJo`29YSE)zZ`@y~)_i#R5zo~z zyXI@YtBS%wk-=E?Luo)bm>89UADOC?&Q#OA*zP*f|v~)V4~Juf_SEuf5(! z+kJLGSdD{SddKd8%~5DEklzHu^wGr^;S;gvk*VEj;{ZagtU&?^to00Nesdu`v z?p3=x2#vI-GiLF=apSkhsH*Ml>Ez}grcXE;S_OS5E#-t>k}+EQ%H6k;J41q&o?mpy z+Spi2A*Z%R_VN?@r{de|uenhl^Y;nZY{H_$84h?0km`DuQ$vc9#T##ZY|IV-m-tho z+w|j-jr^!KnKP&hI{~9b$u2jnQ?8COJ?~22@F~oes%e8WYs-s%jj7vCS;{n!Q;AVY zs=yU{0T9WXf}$DCqj+P5HyZzA<#U5Rr%Tr>+K#3P1P6(vJS3xe?nJbYS&P|NhSIA0 zw=V!NqMmKM4W@cI;*r!3kRdGmT%(q=OECo);m+<{Mramv2h^huZ}o_@n77^|Mu{g= z)0j%~n~Q%PwIWms{~jYc3tc{9&P0r`9ZgMtRRwh?5FbP{i?B_}aT}D&Np|XMCS0`mZP>&~+m7;O z515){#(Ww}nMTgSx(C+dUL-1{Wi=#p!Q~cNr|~ycPr51NS83h*W5DA&^2*wU7aC16 zK~Mlgd~sw^%s%5uT1i$_euOxA)mp5`TV(XkuWmKQ!K~+nG!iW!Bu;<(*{5!<=&juel( zvm^%Tt=Uh^rIE3FZd=`plVa~{15)ufmM6|b?^Um68a_4p4uWge38UQBe#}@OI~igp z@UF6pdsc<3aVvhtb9R%bG$+NR=AWxT)FSIIe5cSB8}_nOHic|>_#AGmt7S{(V^5X; zD>(d0FM(AQeO=rKbsRcM1An~>Uf!Vd3(~OJPYCnShT0LAR1482FP7#7iG!zR53T8v z4cnq-&4Bpojc8u8t`;j-fuD`su=h`E>)X{o&)`M_eFy}57nor9o@0W~%d+95F+x|g zLh0Xt335Sx(}4M=6VhRU2QePXRO3wuP=rVAo-Mv)62$3KO9z)Og9)<)I|hP5e>8fq zDpnzZTz+@qjw0UnoNP@xCA?Z=7~6~}9A(pdjrWL6INFHW8)s5FXaLp;ZJNwl81>xe^lwQ6IHV_fCZjH5%?n|Wu37aym+_#LGA?NVY7*-4jeB;{JK z-!fk{TPyOrb{BJN*w(1-6YXy-)Jt;6d*S8>9YEoeY<*hn!xX~vcQniqUn5xja9k|c z#7P7z!4+Vj##xt6ri!a%*zC?`eV2K9Mu8wqAX+ z^oSiNnDhvegKa|nLE;74?jq2Quzi>88f|tfR54sJh6x+AV1}S_H1cbbeE2&*hlnhe zW+MLy6sDJ3H}xMcm}|!`P(5Q>LsdMhOV~AQRV2_8p~wkn0a=3J4)rB0XHjLu(4-PQ z4h3OU0cpqpN-bn1*_UeYksh4`u&zn?a5b01{&tjv^B>Pk+j={Fg zUl6LtMO#Tq5N>luIFn0C#5xB_hKh4%QFg^WClsA`QD!c3m+F(rGeX$bkm6p=BNOMA zmI{@&tVN7+mMfgD7U$a~8VN!^RguW~Q8^B)%o@N;ER(l``d#gC=`L=Rb zLDvqGMP3)K!CKgh9XCkc$2FWgW*oYBsPZ$Y@m~_15n;CN4iRo`-3?u}4_R2k^yTJl z47v|EFriFj|C*(z>;W=yf`A}#^Z(n9ye+iZHDv&#n=LW@tnlKU%)_>k;Dd zN4?p>hoB>_xbB@+J8EPszL@3_pG#Q7yBC>IlT>gk+T?>{l((}^n~wLXsb|yeiPdqi zQn^;cqS_~4j_ry<;u#RonEqOofG^f+kukW_poCOfDu>IWatK3n1@l8B`YLouLA-1D z+Mg!$|hLER1)kR8gYerqqm*S@KeSC7Rj`+hYf^GwQDK~bLd-syzg0Z?(=%N*!Zp%)w_3o zgHdDn#C?Bz+T2sbL#Re*k=t?9*F38$O+p<1Q)fo+Hof+y`96{^S#4a@xI0qm;##_u zrGm#aQeXDV$y;J*E7QQK3bUy4JCu>ykM6vlYV?nfX*ZkZd%Dl=%?&0ch(=n>`QxXy zmMo-gpP)!fjTQ#0l(9RZD^Gl{4YZB4(q}3hF$Gl` zy|h#siE=|rkoB@$4XRW)LY925UnA7iB`g8rz!UnmK452EPPYZ} zFUr-RYC_R!H#iqsaErZUYC)|!f5+G}+%>4C^G!M)Kzq|iCEN5cvp6NgXGl(feX-& zPUeiWon8H1vENGQ6G`!#{Fp-$3u}Wf^Qlhewv_g&+2G;+A zPue9{(PX9$`t35TeK9pP-^gDH|I*fD`zL zXnQ>ODEZaq&pvTl2 z!tyxCPhI{uh$B#u$*=3VNSQDc1fK|Kg`r_2PVr6+8N3Z=eBPMSZ=_Wk&blxo>(BTE zgH-&g14xovBZO;m0)*(23nHFDKqQyEPzPXxd1|8j{xja5=J7|oar_x>EdK(^1gY$d=OaBf_Wcd~8EdN9${srnLYGG<>^2k90Vw2Hk0x*Ie z%(}lZ%76m)SJH=nJo}`4kfIrx0Z*&?-7qY_-Z_55lCk`>-eVm9H^b=0%Gq`U5Q8r= zI)}CmAqTjHi}Tazu5sqC2XfPu<|Jr)ot0ZibdTjIWc$9DPO(CfEu@zhUZF zzhNg(=t`T&G=;}~`x!4*2lv8qB0Uw(IJle-YjJa5t3se}CJFZIR}Kks{J11zx=J{V zaCvH^OeZbHc3&+|)ZA);5QPE}-JSd|(zV5oD1H0}4?DOcWy2IN7bT*qVUGPr6F;Q5 z6lc9^0dx45?EIvhKfHy7;n7ri-oEBd^SQtN|F`x3~&ji6JV)y!y zr0I80{atM$Am(@wWMJWF2ViDkW@TVz0Z^GcJKJ+Jf*7ViQZq*bdvgmTCk8u5 zGwPr3UZB+*zcvC<(f=R%f!6j1sT9jU-&I8nK&V?gv!_)&t>+J3ssD6xb2Kr9X9BQr z!GrL+f7}4B=9s`L1pjEMAlX00W99&% z*Z+16kPHYi=3nN*4DzhMuNTPl&vqagJ17zUr7sY)jK9wrgphvBUw>`K41CPWf0MEP zW4%D;Crs|&+OaeJb3FFP==ys*&cCk>$jtH2wXrY(L8G~?fB(*TT8@Q;LPAY!yfZnR%v_`gU5I{#xP(4nRV&HB$gU}IpUpaM`cadfh< zvjuU#F)#yksDxizSQ~>fr!a^sp50W38ldzVq(2XUqNw=Oy4;-U07CZmp!vu>fvy>t zso@zxFGU|o@2Et%S(sRW%*;&e%v?;IY+PDQOfNtW>fe^|M^a$^*POr!N_$U^0R*x< zz8(Ma5s*3mJi*HJn x8<3rYAE5gervWhj;Z2XaF>wJQ?m=z>QvZM4=+OqCfA;?LBOn_W#~*(5{{Vg?5_JFo diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PyTorch.pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EGO CSW Model - PyTorch.pdf deleted file mode 100644 index 6d531c0d8fbfa359667abf9045eeca03b156a65f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 33427 zcmbrmWmFtZxUQW932wpNCAbXk8eD@r4DRj(cXxMpcXxMpcPF@fB=3H=oNupnejJ)L zGj-R~-BnLjH?wZ8Y7!YiVQP9B02E2xS@|;*BR(CzrLGASCnr9w1kl3J&Iq6BLz08S z$H%7?Ha4>Z+I+m5>DU1UfqItuKqzi*C|f%lppH3|Q)-{uiUsxnqI*U~;=nFnOuN=E z{9oi#)zO2YMZaPF@x1GRJ@|TykdYe`Y(Lky1ZW(aY{;>^Gt1J8(vO>!?2itPW*!fQ z3YunFSJ2PoZLRX1l)z!vn> zndI1nMwACUXJnf_fz7L$MMD z3GOR5(=5${Lutu{Ak>KVyujA0v8U1XpWo6_Q!u=Vz64a_gofY1CUi$bX?BZrR;D}# z9DN=QDcaVXYa%`S3<&y(WGa|$``NjqP+TI!z8DNFV~kW9q>P$L2c~eyB-6XRs%Ws$ z-2%yKa(c2p7B|5vC>)EMa1%`_jj-w%rWUjM(jrK+z7C%u#`))^#U&j45K1t4a4Xyu4So+O)bVL2kL-6p zGc)jgpU%u8{@;TtZ{UVqz38pnSLq!EUh}B>Gp|rOv|FwseH&HAM!1m~DWKT3OtsW` z1ARjN%&vCY`+DQY^m;E(HwT{Yr`IV0O)hoxkr%oM<6R)nRNRVy)a(SX7gPPu4o&vv z`7~3_LDmTDylKDWas@4a_V!n5J0~tF8hu|~M6}0;7l>-8%t8=QN?^zZ5ya~Cko5KU zDgy|Bw0-aWOp5#^i^@-eKAiO{NpWcHnjd`Svkso_f?8^W#o03Z#;AmPqaO*f67zZo zZ6P$C_I(dX8qATJBN})6NPy4ZQTBy)tWVe3YB^Y_pm97n9Bs+)U^@Wx`X*FpooZ^i zUu(}IZ$RnoVWhGGO=q^l3c(23wXN+rf5{H9_ZpLeX^%v^aqnN?wA#Z-sry{%-obhF za5V2xrEmF*8w{nVs5leDy(6GN?R=N8uqOaHI;%M`TA%TIfK}+n?4D(e6x_CZXa7aM>>{=pCYsh zrg*s3$>#)XrG~uy2_Ev(Zi0=;FX&TnqJ{5vFMMN*$IK=Hp2!o|oo>fuZKsl7i8Ch%o zLc()_){viEAa?XUv`0R0d&R#)M7HG-qHL~eB9a4d?mg}TUD(*Xux*yNXP73Jz0I1n z2ugm7i%@5_S`KeKzcq<=@~H@{$hoLrQ_ugHlgFcV#f^LZy?6ffd+#8DmZe%tfQlRB zYKuI{pDQz0c3J=$2OjEkgimlKmk-*5*kBi&`hFIcs#@fqTMrXAoC5mgVr?|ZnQhE3 zpLqZJyj#|Lgv92xSi52xc0>>sLX@8!8bgA|Qs$U+(V+PPrK;@D3&Jc?E%hZAsZM_}8&)2DDKqW)#A~%YY_w%@+sW~FC5NU=3*f?Aw zt_H5;f3s0hy zVfa>ixYO~xxZhVowtI-x-y0?gUlGk=RjHO-V1;dvgQMzjo=Hd7s*U@i1b&8&5)l(RoU&OOQS;s;bWbX9tB~4_|v(pSwSkR{Zazg_Xn6dG*C~LqB*zhN{sS zB>g@Ng`8)$8<`rVVC6Rwq)=%Tx3Mm^;_hx7q{Wxj=q@rH2g2#lUblc%=FpC%uL_Oz zed3g37GSz#8DFD_WWv9l;G{M)_0NS;(Bvc%W)m=jJjLi|`S*t1xc;a~!4(DN!f4me zc{bOd#BQqqYgOoKfY$TgP+s~U?xcJqImtkEHu}k#@VP}KiGKlD3wz0B;UhR&gnOZ+D ztQV5)N}9a!%eIB-$-f!wlzK*9;ujFD6Wj}ea3A1W<<2;inzc9M&h<3GtVB{axHO!6 z{t~^9!he;aLJVs?PV@fHW^P zhkLt+<0u20s0sgA+;~LXHockyn;=+?snU?Ctjn)@<00jQ+Tv>=0M(TXShchqWN3lx zcdt%Y3{XAyVT%i#)15e8i8bkmTTtV@$JZP9O{e;$*GK=+T@vpK(%~^w6_Z!aL6Ns&$kTPtD!6ddPX%4|~m! z;+YZ&9!?Oyb(?J!oIutITuK&p8qPGK(FY4}ag&3J5(#X~lh}=*O#*RJLECSvKAD7o zNKC339L2LNh1*BayHrdt?rxG6WG{;4G!1956ufc4oZIJ=SZpGa`J+*MX;23^LBrz&2^_99Q`DHCgu;0F8lLd_CFw z4wRE$D_Y%q$ljej{<2WQYILq2oDf{aC->>70IXNm-2?01*=5oIr79jq2geo&WQW;dv;(gxGnBw_<>!karmXW))aGOXx}fE4snq(~iWTV!yFm6QEi0~1D=nk@_N zN8?#aPTx=~Y=tGD)tS3luZFv#(0v>A7{5nTS4WBjf5+_R?JDPr9jNlc(X|9&q$22x z=GPbqhLfS?HIziMQlIQXwH{E6jgB2!M60MedTh-B0mlY>;p4~?CM`+{ic#bbjVp}X z{S~7aP3cw~T^i^BY>lgb-q&Oa_X>ZDhSOOUX-(Aq{5_UmeCTssG97>T3(F7ZrZCv( zZsV|_aa6)lRndAs#ar0IIA7S9x$Q#}QGz8N=Aj%Z(VpdL=j^X2{lEHx_^)4GWdh9e z;;uM0#fhND&aRm#XgDRbD>h&V3=bo9EjHRhn#g)Dqr|HtsLQaWlnd!$}U7z=sS{ z6l9z3#D=DRJ=Zo9=)_Trt+2#CAqG)=E#c=cDAKbq=FUU5SoFLUv7F`!m1}iwcfGn| zvwS-!V5Ie2$NipYD~t4i@GJy~Uu^JK8)HZ&6fiiw+N&K@Kr(E~RL&BlfR6|>xpIMj zq1OD;*vmObE)1U(situfr(cy|_7*V^4O zuUN|qM0Tpm@CPRQ^?a7}tg+BV7mNCH9!BmIm(SH!jX3i;E{t}rxYzr3+3wKHps7F@ z+))Kxzl9Rhn8koA3fQS4S!H(B#R5!?pt*AG3IqK7hZCn*=B%c<3tkWY`D@f|iNEB& zT)U<0tKHP2C}F39xO!V~(AwLuW-}1_ELirSn4xWH_5$x=&1E`p=cojdxm9hizq{64gjKWSSbBN(}mi>*`#c1h^vBQ5{zG za|yGgf+{!8G+5ca|tL&UZgmShn*yC3$<6?^&?SYoc-W(=KTpdKMgas z_Pn`;2;5)slFSMm6cQuARt6x8AFF6HZkFzlBZXMuzGa|(1}1Y3orBcT8tp1~p(=y3 z=Zr79Oh+>PneS15=ew~@!bG4LoP@e|QZapZ>TMf2@f=?;qv_6FnQ_pMXC_hW|7%{@NBoPIe;lb|01nJ}2iN z;p2x^hwh_*Pb8R=O0sk5ye-26sar@YxvY@iqR23w*TpBiax4dhpK){;L0` zKpvk~!A8fz*6Pp6^ql{dMe*%y?Eg07Pd5cVq6C2s#(F?G5&l0R|B)a$psl67jULbz z|4*{z{#&=`|1|L5e*K4qME_qs`QJRGKPUPpk{+KPz{JS>KU}3XS64TMxw^~c=Tx=> ziG6zmhNL9@=v2m}QT=F9h8VIWx@bsIComG;U%p795Ym0oz9L=Ty(b_L(82 z@S9kC*=pr2Pw+y!H#K%Id;gv|JE|wbSlhQuIyC3Fktm&Kel71kFy(O#kG16AQ}<1@ zm8&6pm%~5Pa6UOY>ad0HU7xVeJ3_)kNozmC#LtzdlW0W2dm4Tr-~LPm%PUIyi$?w{ zt14poX~8bV6D0y~?gY%QaJ=je_EEmzK?V_sTygJnj9B^2AKZsSb-N~3V0#rgXxU0b z2iZ!Rf&8%hdpCh%-MA$K#{z;3&a4$SJqLR152y+r=py`?pMLR_roaJn85F;jD91nM zjAr=b+2X+Ppm1vtj7eS~HTRB98@N|YQl7^;9RF?#<`{|DkzeJDPzoMmNF#C{H4wUp z@6GD98tq}t4v-VF3Q4L?L8@5N9s2I$-la<(9bzK_1L;T7o ziUcd;DGX=eX*W^_hm52Dt{l)X6W8O^E6xwnS!G$IQLMbD8Ag?Yx>EntZrq z=&w!OAp`8`g~4VhJ`_UQ*M!kOZ=5C%AccB28PPs7i+c01eHWouLTJ4bo?b(pRc!m} z(k%L2&9>q@l(9pKPgzqPwq#~^%>Ec(`cH&blk|>kY{{(Z(%G$9g$LzP(y>aq)cA)QvVPFkhl2W2d)?}xGuHAoi@4*3MKU$IDoi7oalebx zRd0jSj|&WF&S&>=4BdCdnm8T+BStr6l2#PfYHr}3{ycM*%&H-slIlXeG`iziJ*la> z-wBV9qa?*BzmR3*OVXxn?Aj6;n-YCj242!}mQM%87{x7>E|uD(XM`gJtund19vbH0 zMXcZ6F?w=5^_Vwku{?~jtat>*)SBGF=g>ybkTt}R7rEwn9>>4tNJrq}u8KkZpjNTc zrd&Z|J6!1y)IuYr?dPs<48$NVU|U13+*N~ms5ENvR!20kFC!e$Sny+RJSTzo)pTgv zdrEZMXGl^f?xXM}^J@}Q0ym z=wV;*zXfb@FYY&!xbduFdBEU8UQmZ6v~Rmj0o0%BZzLCnrV$Sc%B5$d6Fgwwt=_0l zQ~2}Q$^70pq`qXdBbvJI-CWUc63xDwnbwf2U*rv^I3I2}W`3j1fR-*u5GK zUfH^GL3T_Z7-1$v;Aouo%56~>xb0i?wHm~AVCx}J3iP!)9S^;=BbyUP>Myp^Cf(=2&y`$I#uKTiE3nvTYag?n;Mu_2RWA8yD&4_1_rZQDkL zh-L}(^AL(P9ElXJwQ#DAt8WrCaaLR{UrK=TnGqq-SX8ORS+yPvvZ)c87VVqGw4beo zmb?=aLVC(cWeS#IjOH~Pn}CbJjSWsxd;X-~)+J}fPPfv_^34e|1sJzh({fK)3f|!G zjr%iRt0L!yqv+)QnM3^rCX!*J-YP*QFDl?pUUgv=X`~hlY=1R08I8%LYKR;QCXn$V ztGB5~VvwOqp??JY!BDWZU{4H#_vvETt&XajO%UU)PW!YvNyS&;Su3u~9oi2D7g?_P}H4}5Xb42Et z45JSjeR`^XW02)R%Y%knD%!(D7(H@-sB7q~a)QP};gYQM6p%Z}ISJ@RcC-DB01QhyZj<%BO3~(R*U3Co8uOoHqG{3Tyt$T&H!ZiGdPo5U@_qNfu@? zv@2_=3?$<77p$x_**HVtQaMuDIT`Er{G7H_IuQi^zR~b0Z-Dn&JLW9aV_JQ^68#eg z5#XY7%?5@)Xsc@|W<+3=h73GpWJLh$F|3)poyCrv2^&c-YaPKaG6$cURfI*v1a6%u zSY9yOzduhxDk;SV@7<_tJ(3lh9V@pOs`$WZn+YAMKo3^d-ruFJnF*NZ($@U-t}r5G zpcf4a#)ASdp=>%LRVF<$Y&&aUU^qvqJl2VpZ(Ng)9No?D$4IVyi9SbAO=Tc%>Uhfz z$Hv-MHNuCJvBv_*I^V@>&6XoARgQ2Re8L=o<5-T}G&$p6AbXctf^*cRRL@RytweM% zq!dYU|gsKbA$ocg_i3mZL6t*yZ8~6$(rW z%z_&4G<}<1POT(|Olq8iS!c@9K82ei2mQ&7`QK742e%uZH9G!6_CQd{URC`o)>R?@ zshU3iTE^MwOP_~fJgOACzJ>`NX;Q7nMIU(veanT03r;4snd38$s8-nxl%%Q`N7`3W zjs^*lcea!eADrq&Nj+wqNbEZ%I@L*tu2RIBeStCKE%Z%Ug3#Z*!oKqMtaIV2W@%-L zF!GS@SEDaMH?$s#W`;Sl_b1E`wkfs|I7k;OD)@JhHW(2!>#-of)5?MW*aMpWeX zDt2Q!;?Q`f3Ax2iju<~3*_b8A?A&21w@-*~wReT!^oO2+WSx*1^bB*snJ7nGe?eZ8 z=fpSlt{9Lzi0Fu~av07;%zdajwym9#i@|{vKf-@;=vqhq3?BDZ30>hE__rTk=coR=6iBX&s@zsW;K zJw5L#`N5*ezudeM8Cq-qbqN@4F1u>Zg>JKQtO~A}7gaxx-;$i6he+EU&MWiN?<;;N zbpRNCtV2V!Q4ZInkIo)?;9^P_7JYv(At@-ND;|VkFR_lMZf6=*Q%rT0oLF^iG}Td^ z+-PY;?$A+`-5e%8fzkLU?+sTv?L4TVkl+|+kYZ;Lk>rV-H49tchQaMVwfzmb+}t+1 zjrNH*n_Dk58Z&}tdm9(Sz0;J#2c`MaBX!G-J6r-w58>Vfyb6nLf$9sA!()Ms*)obVgF(FbA5OSg$0Wnm#w?C^lAwVUud$b?`1Deru-Xci2-m9)= zPG)6^2*&m;yO&P)gdb8!GC_5vL7@{#zwOB2oEupSqhGFbGzw|bB?#g`9N)v~4ItznL z2nd~TN!NUZzD)awAdh|gx35XzS%H54VJKo|ge+oy7TGWnMM=0*th9NhqbyA=E-obn zF|DW1gFWfd)}bQV-%$#Boy`_9Omfz62gk?1G14Yt@9i{z8h#~QHN6%i*KFG=T5>gF zW}d@h=Ls9$=3XCrH+}n7czkrGltP`d09v#fp!D2qCHUO@>$&2p@aY##UwdvMQi_NAS>;A~^+}8YnyA-48VT{*9HYCR+S&c5Z9j2q};u6xr9T9y7 zLG-AZuoW&jm_}HhI z_}Jb+Ye3JS8Bp0lv&W3Xfc4D1VD@0nVBWBcD>)=gqZ~t7##jK%=*{Uj%$AT=CoX7X z%<~!Kn~Dfas7g3Xys0e_D-24+JUnEVKpDK|FSl;39F!ql%gH;kb%Tw?_-b^@jng?< zJT2(N383r~C{WIa4U}wMY=upaysN?XpueLkjN=*Z!}z}__;75P%9IQo1eCa%aTTrD zTztELUc+0*!-RWj-t^nF`n_cG{8KItOHuZoz{XpqIw@2|WK`jbS2Ul)G;Vu*oIAab zb?*p%5qfev%%Ek;2o4i^v{kDDu}hrh6NDGiIF`1efbJ68o-er(r9dxMjKI8M>1}LW z;k>}Syy7^!U;}3n_9)!2OSU1@aJufb#1&d!kZw6Ea}%MW+ZSkT{=oXg5FY~vGJo-nl3I1Fs22K+ z*1=u>zS&dkZ`S4@WpY0nKzVMtI7jk*GU{Dm6x)T~3pFpH`HChH=)mPbFdomnc{N4z5QfO%rBRRjgnN)3YbFWnf0R*B z_s56ETiFgl=~OjNS90n#Jug?=zp(41HPQgvwg&p~3`-p?ht(^}Vb{7ZBA*x<{OkQ2 z;#LXPEgKnsOD()&+&5i7t>QFOwraVkzc+4Og}$B-FyKwNFfozCiAm?f0I}iTnP0+g z!?xV!S&nP>vPib}NLFs$;#`4u-}eH9UXZMVYSkwI#|4z~U&095=eTH@7)Jp?lGAP= z10j*#SoAZ9-v>vP#CikyJg+jL>#yd{E4gD>lw%lA>P54?&~tyJhgDZ_0Zxe?Z%#27 zkAC#xP|g|}h`G3nk)Ds9nn8kY`_IC(Z~zd79lx3hn~~72T#SdrDl`!Z1eNnO5P)4| zmgwG95*G{m5DbJZpNEc)nz@TXJwL_nj|&PLkc&s#EH;!{6$&B#iKz8 z5;AA{$0Ca&{oZkeeipMD1J^|%IsK$i7M0jRu8vD{M7{L0dyHH~d0KNi4@D{=l{!j{ zJDz)bf}t@&A3aNpb+Ev6ma)y8dA>M*w=c*mRo)S%&mf1x$eTZa9O?XdFMg}+A4Gt4^Jtc#i(3+ zF;m}dwi~p}<3g}X0G42ibOeJ(3<^cT3luqRqM24py??ZVzP~u5b;#CGOUm6`zYN`S zk&0RvTlzH?>FU;VvAURQ-|kH**@{$9GAv{D$5d#b{PgR+)Z~>Mq07rJDA<>0Ybb6F zy!1siV0%$I%zu8_TTkh54?f|1$M3iWFW5M^@w07ZQAaq6PsfXWl9yx_-8iJ(a8m6+N@P;>k*4tzK%W>WEUcdjoJ7vmMwQ}wV z4}ZGx5-mN94tPnRhH#31eIqhfzVBdaf4{(M_59Qi4Mq^t0=@bv=pJDl-;I-<+8TF4 zc74hesh~+g!9y43lbaR%sh2vdxY(u&y1(%y#4P`}Rj`;XP;CB@RbI{hxLba%)3124 zR(@n&F^8KTPTx>@;oUwJ5PS-|q^Z*3fSm+`3nb>|TZK^jHXuU@cD$*z#i}A|AOLYA zZ$zLu=2po_47v$8COH@R?&$fB#nflgQaouhY;&nBD-9o1zAej|SV^xzH3}7Q)fqbrf=VKMc z6s5!dpZ`x)M@fA)ar2G3r7*_tMhCJNSWaU-^|LK&{8{#?&qk&7zk@9frpz z<`Oe}MbabK z$T#P;-_12a999;ubch4oAa#a%D=XG<;Mx!~ClB#X%DoMRgi2W(D#8xYu7e29F|DFt z$60H)@-*XZqithK$96#`W( zwKgNqNOEEOp_%S?r*I?U)22gh0*5v&cIUkZxTY^pH^(Jg`6mSUaA&YP4;Rktk z`xu|ygUN+v7dml$Wl)vQYTs^!$#sp9oLhvL_!O~>f;H7912)J zZg^N1p|N!)*aB54tGPKjqF68|}jQX2jSK-IDg~xF%;4FF&M~8==;vk>Zq? zBKSUDMreD)NE{$QaB83k6q78mF?G|E>j4Za2eXsz;RRK?9! zo_w{48!j&k60lXghqcSk2YH?(hC^J308Nsa2Xz-fSR}}yHEpCBI>Fkc(78Xlb8xGG z|F6i`)f|0xz_v+WQBY_P@ALNq0?$pk%($?q2V+d*Y{Mh$bK@;yB|Zq3AkTmkNRe(M z4zvu}2ZHloEk?Lcwcxj;9b|c%-mX&{8CdSg&Sq;^%^yA#ytN-4boZpNmcPj=7M`|= zms%Ebcn++EA6*+fGpWmv?(MTu_Ioa!V7V6peP)B*_9C-htJNg-tsivNLPyTKCuExk z(WCSOqO7$ctiUhBXdE{XmwboYjum?9e!Y#F#&Aan5xb56jNYKH5cYN~s*fvHfd16` zs&|;c?T7sy0XKqhRiTO1Fn|X$JD6^wglB)Sw$>>S`{k*xJd1nvxnaAUFid2kg;{vx9aaWk)vdv{b@TxOwELvS7Kiy3dRO zQ|>mJ{Lm>JiQMc`%0y}B?T+>0UMfkhcX%C6-_9*!3Rfu#aAcF#xLjUTlwjH~V|(_V$GgfQ0@eH2J3Jf^Kxhg!t|t7|UY`qq^I z-AV}PtMw%6p=zj*RS*PdFtwf9G*N$AvFUv}(F>uWyyG*?+MS-|GIDB~N7`2veG!wM z>tj~y7nW@jQRf{q%bn3jY7+si=fzcKW-Udr;S!pmej8$nPRNKG?$n|pO{O-L8#Q#0 zce{4cb$^sId8?8w^qnGm>eQ5aQrp=vW67*nn#NfM%35?4wN~lLQ_rhN@~qFM*x6FS zBWLo_jex$LN6%8GSk@5}wzuGct0^6n6tGqST=G}?^QsZM%!dDGrAbEbLeJmHy4CmgNaK}Gp7 zR>vgLtC*OyW})Ys$9zp_I;-1hmF0r^B7u)SxPvE|Dr{N$A%^)cX9l;HeY^sF^c~N8 zbU%op-Hm`RYnD1tAoS2Sm(?4;WG2?YXN*D1C&&DZUrY{BEcF~^&r~7|3y;v}3N|}Y z^=NxOQTTl#MII{nY$gq|Sn?@#@6>TfPfN7DH}D;Q9h!X5$uTtq@ILAZ&Yn4tne^-y zNhxDuGSkR7Iqv?&wGn<3`3pRQjsEByzMT%YSEe|O?FB3|Q*6I~BzOpb9bu4`s~;2u zBPp+AcRlCpRedJ@&g`$AGSPLML{@7md6l?P7*yoA^kT`abw64J@_QObD54HUQ#u)S zm-=~MnC`VtsGNZml?kq2w;_;qSJix)-ln-GtST~JNX~^gZ!1k`;Sx*p7B?-M$gaNU zZ34Fj6L$wQmsFwZN~{$jVTibctT4BzzQC6>is&$QOZf^wa4I8ELJ;_3g!bTU!fmqN z>$pR2BXocFO@rW++S(cHh}W^;afD6RM7H?qh?cI8Y{B7JAnfVI1adSH72(KvPC&)j zY*_jtpDXd!iT>_E#SyvAB77m#j&;PftOV)T;#%4ou2wRq#wdJmQ}VSMrHL79?>j%b zSI??OVheP^In5n4Bz@P+y;xa+M{eGa3URsAa!C-}ihAJ`@U){mS8Fkrty#as1z0K7*O5BMQnAXsMu+>@5G*c+%t=@PNmfAGnotS^M7FaK6iWhqa7p z0p1WfQ*|+_Ymf0f&Z$yJvjSW+4aD+?C4+5lq@zkjmV!0}OZ)EYmpDmW3$4LlQ7OJi zdFTk)BDidE$Z(6vx|E68?Vp%6lqoM*-EfC$fw;042=t}#)qvm}zvAC5S)GG`wg#YX zE2xjPlM;ec4MC96dtw&BiKEWeWTt~Pq3)_KDhc#TO#4oTPXb6$Xhdn|b)F29KndXU z40e;PEu@LKM{FQwD)qagA;n2&*+nOIAb`E!`M-TBxRN5uoWpM?hmIIpK?IfbP@HKZ z52Ek3pGt62 zCyXr7m9I2(2;DP+RF93cAYq8j?aBva4#;RykTs$aSozhfryf50B{M&56leOuy0-xE z7E1rxhN;EpRs|OTXVJdDzrS;r5;5+{O(l59KiLIofeRksTng-k;z#Qy08LMcFvewx z;PyJg=Nd%mmmo2QV#P-HHd7QWk1Ikn?-aZ&aWs)@&Jt4oiNGc z8W??|EhM!`@aLizC_Az$GZrqR=$Q8{W9tGTP0rWx(z#CZub8&d89k?W-=Tk<(|ZaT zxXm1xJ)_sy()rC`eUV(K(F5*3#D$rjuj4tInW!lvaf3X=+NWSj*aCS6_YII*ro+;+ zJJJ*H0)3c4sKw7!F9@Ff2!+eW#E%3rrAnLwGBxyr21d}4=y8DeR%*}q8g_~w^U($W zK2$D6cs#0zlk(j@^vzGAucefKoKVC67BLVBZ(0jR&{F>iUhnm5<%{iLRN|X}lenQB zd{Xfh9(KunjaRqO;)(QEC>HGsc;2k}lQs}P!${Wy-YsTgM;PRC!D`#i0dS`=-df(G zx@cgec#;L#13A{vZ53{T_6TK^l$4?cI=Oz`vC3D0T#HUMZ{mosYvXM{(lQQUXGxnr zs#6^)LstpxGAwska*z;KD#TXFf+RtF-Sq% z>p=xJVYSwGf=CSje!ZLCheqw4$izgH3Ml6OynMXc+iy0ml=0hklEKe}AK$2q3J!tDX&m@A;B`f{&4d}n$O zt~{u=?(YeDd`gNh>HSO8R|7SwJ9$(wgC^#o(grLydbKI#CWSo0E$GJPY{KOQYp^xw z8R}XaE~2nDMrg%+v55K15Nr~#HiMb@%o_3njAO=y7ZXFG-?_x|8AzBi3;}t-p?aGt zXtRO5tcar9h?^|_4$D+h z?#r~r6LGi#6gO2K?&;#+0)vk1Ub%3pU5RjUGGq@Ok5owyJTjy{IF_{M`Wqt4ZLs19 z%R4Iz)Q)|wXhkOP+YJNUsShh?_B4jIdZI7p7tv1-x|VIr8&i+cy=fAwTV)pk>ZWA*QltzuwAy-v3$Qy}=R5lEi9dwv=Qt3(H%*jfJ&!DZAJ3Vhmug@M zwi=X)@04{(o+PcIXY`(*OgYsO;F?xg&Us|kIGh%l1`SOJN(&wpAMPKHAEqB>6@Dy? zRMcNdA7YCVK-(`JUP`TDG?X6C!uEnht;G%#y5Dt+>M0X4B{aP>E?jt04?%ucwjNua zJNAB zix-MiK=l)%&EU!e@2n`tFVDr7F!P5p&+{<2&W$z{5OA609}yuMZM=;;xv-Sd_;nD^ z6lc1)I6K6+^(33CSU{DPFYlR~NH5?4CAE%jx}Q2XS_Lb{Km#gPA~kxBSK-Hvz95&L z@MdS*(dM|~7=otv4rt1$2b}$=?!d}O5n#=;O%g{^+};-U@?iQ_T~E$#6nBh{`ADmH z9^Hbpkz;87b7n3ZU%+k1EvS^CWRRhT*rK{4=q-O(U1;1nIk5mSNoY2`=1xN0u>9LW z!(cDry@_s6m};-2$Rg3X-Cf3dZI@TPdKUoTa&r zg^awU!hePd>zEsxIpdT6jllm1!T%dW@lSx6j^PL5&G4Vm{Qm&q)Bt8yd}>BIW_$)Z zx(_y-?&H%xT)3#6j+wC@pM{|r5TEYvZhW?Sf5389Hu}HCKP~-Jre>gL{woORSp5JR z8ybDE=O2gpE6Ljd%@y%k|Jnaf;m5y)KYjg}T0n-6;Qs>x|AWf^%Ygq^V*WXvy{_F~ z+FjuTGyhZfSLN5S1^$KZ|1U{0wz0JnFw(L4HxCjz|52u=|EEj;a{%!F4w2m7QwUjn zJg~sn!VsTU$=HI=!q)g-;cu;yvA&&=?SJs~f4cMkwlOpRSsK9nH!&;>e=g5|H&)g^ z;QxPV0KlKfKN}m0YT(@TL>$gOQl+`-S;fL%7c^T z#Ky0`o;|Wa4iCQm$id&IhJa+CQMug>q)m4=gyaVXAZ~XwUq+YE9LBe0bYZ3Q78>@;rX6^mmPf88b7WoH%+km zOQHPr;s?O}JAr)`bZEF7&ViGO;CG`@g11>JKr40TLsD!A)iX5uM);KS9&^aaOfN9G8yIu>?|s_ z;MN_qCm3|A1gVNgJ%{m!lT7Q?PRwk0NXPx2^Hv=3^Ih{}GKURot%}L0QS|oZRlq5sC9Ij&?&BQ?Bf@DukrkUNUpepg~Zjf9G3E$J=CPqdbNrjfftSTmQ*5 zXFKbp!9Cgp89&Eby_-_PiJsPKB}lVpH>W{CF#`q zT&6HqRly$k9c+0B71^lB^1^`izf?%yapW^}O6t@rgWf2Bgp5ZCqh94i_bkIn zW+Lg=_j}(@L5fL4Xn_W?DFWCKATle|d(r%Q!6}JyM53?E5FPom6ai=I!<(|WVg)Cs zX~F5j*orE#kTgBo%7XLytIrvLqdB_=4G!%%Y+t9j+{oDo#-*TP3@k7`!cpQCiwu&8 z9@)c##u9jQvv<6TtqSVd7O{Xa`ZMW=^H&+3A{-K)sOiEuo8-aoA4*R(ChLsxO*Umx zKj2(&r*A}1RFj2fzqFNP3d@8I4g{yp9tV{TR8-k9bb7NEf#@oQx#>ntrH~a;qTnRK z;+=&eBf-c)6oEig4{MIT7Xbyle-o3VQT1L00il=wG&9zkS)Y`45W}ubq6~9Ugyd=9 z?21RQIj28C8|3EP7w@jw-bdi~L#_k(3)@KBeAi;mr3Z;2%zMAaqN$$;-Sg|rG^ za!It8TmFJjEexh0Tn4n9^0f)!)w}ay+RQIoM!WaWWINGh8|VCoVCE`?8o2qX%=(}q z4?~M)$>_{Vh8QM=Zd14wAmP}#E_c^B^*ejo6RAKyfOFASvrrFq3Tt+}yEl92*4d5p zJdSd_L^Yd4i~%?b4C#Sv>%JnG`V?haqkTwpY2UKJd&>)L!URQc%xdp3_ddojT+*FG zM3Mt5a2_RINB}o;Q^^zS$&+C6yPbKF(8rIlEo%ZJV;b|G_Mjc`^AM^OYHsE}&Y+dI zm3wPsQk`hh%R=jpF}8ECt|~hso!9Yeb@DBnSNY`=jCp`ni}V%&w~<2JNx*rFvBp7} z(P%C-RC=cCj$@KLmnhlz4aOr63$@%phr}UaZgFmgt=DfKd> z8Rtr>`oDNMawjeBAT)lUbH|=vwniJ7<=j7gOJ_`t#kxZ4+d7#{4$lG?BW%w9F@&fB z8%{D$S5!y?l>W>0gC+wU7lRMr%hoY;1Ky`4?=UUg#B4M%9K%Xo}@JkKI3~D7pO_ zh|pdFg<{_dP6V+kv#CANO(wONB9gSYRV%)W4FN%u4TBk9CUGM)(Rw^xtn+j}rB^-L zkGIToKhe;$cGtEJJ!{2x4L~XoKWmxPZr=Y|cb*Noqn)VzPJFrEtV9T}J|ao#maAsd{=EBtzdRztxH##=Al$mec2_+gv=WNa5ETmj*a6oXkSg0csXHOMQ%2hKk zc)YN+n{#4LzgewOt*&Rzgjb)9suzlMEmVj^&0^aOpTi%@UsXlb;_hYo%zliya+{{^%kSPU_xK>VmO4PjM=KW-3pt z=t}&pNx?|48U>fG&%OEKTuDlqJVJoALFS_M*s_I6jERJQLd6$(3NM=SFb-mp|}VtSR`EyRi-CN z?fTGY5p}mh^uy>4(rsM~_U-tfdBCVYepT0+%l#uBr2}HX;_428!!e71O}=$qpZt5? zY3^bE;nF*tTd`$Lw_2xGuXK-AlPH@qpPYLJ5|4oQ3;s}PtofmD4Qc#kh4ApWh{V}s zDOo1tBee`0B?{KBT)K)b^!+NHqdXg=fH@)OC3$Bx%w+kW_> zwtSngcJJVcm&at=nPgO&DM2m`jQgj~*}Cs$IOC#@&p@&RLO%{J#H#v1ADjL{e{T_vKRj94mhUjD@j2Jy@vEZ2?Y*bC=(5~|d%94nAoWIUkZ?W; zyufuBvgZh+#xl_Pg0BR;oFoQqA|Og7QE2wbakEqefT;Ou$~3lXsRb>+O61t8^{-N7 z*vavg)H85gZ@f4N8)H;D4+5hS{BNvZZ)HTtpCZN31&_rOe?)B1E8eKP?N z=&0sFY*Bn@KSIiDYd!jKfq67)Y}M@{G#et=>gZ|}GR*YFwco(ek9@cg(>NIds+L4b z*IZg8_sxV|;I7EKoG_x30Q+8~e(Z9$AcZHn%gVho$sS*H;ef23iq0|PRRol|*SzI)e4MbZ-HW*GFcp%sj*&c|n9>M5dXRwBa6!eqoL_#nW6Zf6(*bb2awzGug8w=i+gJ)q! zgG#=t&Q(hIh~CwhP4I^GR`p(VWtj$fm5r78cH85%uuWf$RV~--aixbU7Mh3vTlQKE zNy!-X5Xe!N*>nwHIzZUmx}I}L!7ysR*LEY<4(h76I#*3o(VSz5b+k$=RHGq0HylBF zxU_b#zL*@NrM}ZS=#ipK%W`(5<5EI?F?~ln?(5pu1jOW(TCZL&1~2QUDGo6BT1MsK zaVg_LhD02)#LlYQ)^OqmqL7ZzRK)q5I@qDkdrfv$84P4*zayd0UKwKsklQL1VMS!} zG{k^D8EVdM=kR&M&MM2Qg+piXv4~{&Dbrcp3L?}dIh#6K321d;@pt{XSf9E~b9G%5 zq(YYsS#V^!8t8_(GfvR1Z^xgLBIELWs0 zq#fd5ll%ee)FJw=SqVBWUB3CT|A~xfXQp<>GX<{-N3AhEqo|*xnuAE#lH*-qPt{jC zRb|P0x#72sHY=?ZvMN>HkOWgjDSM{6ojbC+#vJYsFu0QvPl0?N?F4(Zo&w(=%Yz)> zbA!X6YPe+yb|O+xKr(&M`bN_lyn>_ z0xgJ=i$82$Y-F938~?+w;}GhEmvnMANGUAMx@iK&>6hEw9I=HCn8>$-sko=~(1^K7 zVUx!VObiZfjy_az?iH13bCq6Y52~ffr|Wu2phf~_`Qg(~&rhC*n%O53_hWfc<+U#f=p00gdpmeM9TKyw?y#-f3I z5au`aR_kN*waNqLhNDK(4arMX*VVD2>88`$u95zo-aN!o~?gsF|FT7VDlG=t_+v09H9~REv{u5+fPP zbvrbe!ryMrs6mC-J1lZE9VaDaOdb+yaOW6*_jbR|Pryu?9j;j}D7R3n-|L244WBaP z-+^s7{Sw%Gv#uqFq!H6|_de{PPz@%oru4r3&4`-SX8P^Y3KO;X={Z-bbJ z`IfszOYzL)5a7W0i=@T`dslUYPQC^mV>P`4{gtL)8n+)HP2A+P`eFy0F{{+G_z5NF zEB>ko?c8bimk` z67NMtbJq(4ybnz-mHcDRVtqft#f%Rnn@!G{PQAs_%@8z>j5U^}#AerSWPAE_GJsqJ zC*H^lK1wFDjhAWNQE)abypOYfvrkWOc-{G`I&PP3 z-!BFRQy4U%57ymQv(Q^|RTAeaht^)Gj>xVX+hf{Va0;0uIL3__*6quLO3fkrK0;ym zThC!sbe*H~)W&;Suvvr-7#R#ixAVmMe-#Wt?sIf>k<0QaS7+bbfeH|7*malNjxJ)5 z-L3Y{$sIq@T-@|3z{)$_lp0eG+1eae4pD2)T`7VLWJiVSXDkJjFZV@JtE`djdhSTN zK}G6oU60-5P4A=$c&~R4oZ|~IT&)gN_wuQ8HZ&Sdw`2vjs);^fDw#Qn!iKo zjL#J$2+vg8%7m0$b=|d2*wr*qeh$RWJOCIS}u2rE;aj!+R57diZwa zl*ee+&xtSiPE<0+^<6w?$HA#vsO}ilfdI^zCTPem1^rP+hrNXmo^@Te@0&U8CgV(BHr_ zQGV7k((Hhde|_x1mga3Kv=_ykW8QYNT(E);=@%9GLF2akrY;5*p-Xy-XS=VqCv;Y6 zjVu@hQ@<&UbTjmYsAnY)K{4?0gbn?jlb!m4?zjgao@dGBgV2()A_6#d$!yvq-L<>J zyuh5L+(o^nB+&7V&LL@y+lhNPN-wQ!>#i>>rc(JP)t&E}4`D7`6MHv@_i3Dak&BUu z(MHa+j0NCB?c;a8KF40E0bbYp_1Y8p9EW#qKHd$^lAGvWWB+Z%5ig?rOM$4J7fuK{ z>lMj;J*31kNBrp(VC!)HLU81YLJCv2`p!rO1zt@2*?T;_a`&0Rl}NqfSlMFOAx9>t z8H!UXB4M4(C+qM8GozXPjU7A>D+Xjr(dpSRY$hCafF{&0>J)8cZSRNN>P)8x$mRlz zvk#|A6*!FqjAViqm>ZD85_*(q^n;e#X3C|Od{(-A+=(9#m8doEQQ(R-xli`4jG}ce zd#s=*2o1hNOQ(!lv@l}pWYljr^DjMo1#Pasj^0~otz-2%NsQZU;T(T=*(TK%A3U-* z3rE{Rw;3PqgmWs|M7GR1pxo(yQ!==1j5uLZgpE7`2V?XuO0<(N<@lmZ9p(i99tF2v zMlrUvdx*Y6I*>d#8X}5|)Sql0LWoi!?dzAB)NBCfg?oS?B)$m$%767VhNUO_tO(d_*8JGv6v`?k$Q;@_y07ft(}41C2$%%DWg{Q2zc+TMmjA zTuIAXOZT{W!|6jwWQ$@9mm4YkUM2$$L_KCj{7#Vx?4eZztIF@-%6ko0-#17_6an zW7K6k8pHJk>T^m?ioLj6XJ+jP0i*#(QVw~AI(u{@4bO+e1zvKxm)FI!r24UV-Hdig zxHc@UFLK%BnB^+R)-zXqeM)nM3LB)&<1~wI^zEQNRaI@9{jbJ(Ybuoz~|8I zwXBV?&9;nd>jJ{`o4{NsoH@1QdVFw9_`CTBcxUa%N%NR(RWdZXC0YN0+toB;UVNdK{oArg;6$c%u=Jc+dhB~fJ63rrOrb5WZs~iS{PG@uAnnrko z=bxL)2em`HJ#U(49;I=ZLj?ojtpHxr1UOU04EBECkuaPLM%L>(t0 zmXv=7N?H~Yp+ryW_5hNxet#p(+m}IK$qkOR6*fna-%fQ*4EQ7#aOUID{xsYVesz>0 zoKal@6-O%+r+UTeSE~Qj3teM4Vq3;>SjO(0s2Ptan@m+wnJAASMzM_|Pjh>@QZ}GN zfx#hn(A4OHJG&9|rD1iAX56rSLzJHTJXumsYG(Qrs?4kgyOsc$BVT~0kH3lEYs}2D zz)VOns|s{Zl+=9*CYIv}Fm5!cY3(oC5vpZUy)W;_!QnhtnSuqz8-ep~%y4j*SRKQ$ z_DWLE<96G#k@k4XZ)6$XAGX;VPD#kLM@D*2@69ba!_-0+_0u763}okeUGp>hXQ*rY z)5X|k7fe4M#2&x@_IVSLc$x+KzS+)%OUJro%_8Db&UP|lhYRyOJQ9;<_QWD$mXp3Y zeoq~@VjhV5kL{Zubx)Q)x`zc31-u83qzFa%LR5m0)^Ayfh41`vc+cL0SGP*p>GVrZ z#gb`tm1f$`dq-Fa5V$< zaE=qD5&}iMX$GG^*)$uMbT*L%JqmdfAfU@dZ|LR@1wYmKo=(5P2APK)w7=SSQ*GX@ zNcI@iZj@%2>J3I3FW>N|x2#i+*vi`YANdrt65uyjJZ6k57Ky5p5Q33UANMS@Ip410 z7WaK)TuH@%LG!uOyqhqYC`Svk7QZ70-mtl}`F7(0`e@po^wd{}+q4&-$r%6f{Y<|A ze@Xu+X{O6r%}Rgs;2Wom{c4^;E}W@7!m&0`W6X+vkmjH~Mp(=6;YUPbmpNk75c7r6 zmu!n~+bxG-`#DfKUIxV7*Qi`+ZkbT2?-lIel`T!E%DiP}Pga+qmBCw_h@+LaV=4~z z&a^$7B8?q-L=n|^t>@edvPbICf7;>!4e?g0jr2TfHnz;~6AnHs%SsH%6T_yl<5=~^ z78a}9{S=JvXE)|fv3LTd%{V8!aBW-2!VZ$198b$C(MV2GshY+G3_D2EMtYJE5m8T& zH`*CD!BnQ=RSnhiISW&w7)XZNP9szO#0`@R8!0x!*|;*`>*K(8>KsN-@cYOnE;CZ7DN13y&PuTP;$LZ`1ODn2OW1YWB7_M+B z!!cspZA5DExGZdR`6F4!_to)>U|4molkaJ8AUv4wztEXM$)l6KOkY0dM3gt5UrwEVH^Dc|)9~+$ls?}?LE%>~?K34yseH+(?qeX`bQ8I|Nikg|u^tTgP+|!` z2=XHjVRa0Bf2?}U?Jdhg^lko5Y4G^{3hZ$M^5is{gb@>bA!o=0B%_+Qg{{lGZ^wx1 z7KQtI6U48y4xCYxXN=%8CZS(J`m3hb0QZ-!ZvwuF&SvZ6T(-BkDLAmEfC7*%+j_ss z0BD4ez;*~l%7s=McDbj*HRs}Q!N}b3A6a>85INJLDKeox<1GW%LeUIP85jDNR0x(9 zz#vL%tC7#_V6lBQM7mpN3h-iP1!36~4qx@AKS%#=y$${bUVh2%+bihPqsFv~m&2k~ zW9R&zVV7m&@kTV0xe!TT0C_8*Q;c7K5`T>YMc6J>{_o>Cj@L!*V6k-x4rvUx^2+- z+wer~3ZN)Jh!AQv?2h2ny7Gw)#=ZQbY3jSJsrvn^yLzVN-(iQnCoW^SeQE$4(APn_4f^<^`ke&Pv}pmLX32%)+p|Q zK4Avkr}_b$S~6iDd$iUy6yYRx984+Q@21$Jk;ZXN>6hPGlN+TbHOpy3yQN2COn!nO zOR((2{i41k3wuKwB&GOnR8+hkYwXUgDiV)0!I7q{k9Azs@XeK#f51JTq4JVLVTK3B z_7!*YtyD*7IjIhu*c1#6FX+kxDt>>@9_=olJ979^z^)hKA;pqBK^_7Tq|a*ac_fw0 zv8en+f-C)kr1{HO~d2`)P6G{f2outoH~ABdq}*z6|l>Yp*}hd4EZK0+~W zGlvv|_q!9OCZ^w>TrW=YttCw);YOPjL;kSrN6H9@$3e(#crYZv#G-cuGab8yUC zE}B1EzJ2#Po^tQaw-*B7jDF}97|Vew#@2$Wr;$q^Z(fO<;-PIJ`Z+fxCfe(LDTgkd zfakWtEcvEA29CP>rts8vGZoyP8JDel*>(lS-5(rA9oE8=!L)MrbsbaqMJAjZD^w}C z_c`qzwyE~E%Wm0F(C)<0Q6bhv$K6oPN$=4v>St`LzJrO`O!Nv$Lf;xML>=*~bEgmb z6LN?#9}Exn9w5!dE1|t$uHzN3Mg;;E8eGO?-lWaHzl95D4G3<5qZcvB>0%lzUxhOn z=YNyPpX6w`zYCjuEX2llZb}h&X)3vm!iK;X?oE4U>W2kQP;l~*3Aow=?T1)3VU0;? zKg5rIurvw>EMo$I;&|iHluVUqf$AcAO?}Ir-LtBR*QC-TR0J;>arjG^Fz7=!LlP92 z7+0^|_^gvqJ%O_W=8q9jCyw(1#0vi7p$igP;c;O#x%F>1ORB@vxT-*CFj5uA;}kQC zzFqKB=ZPNVr+!rmO2>TAkY`_1!ry;qln1Mf9<5+bDz#se5;_sEBWnZkkeDJ7AGbey z@(HI^v`f}Eqzm|X@`0LSPOfdZ;>V&UPe~6eX@usouFMRy4^u^q1b*BXP=W})c}ubA z_;Y*&7BB2(qYj89SUNHlTP0+z25>*=kxGo49{Klt3f_yXVSlo%|KutHE3b~Gmi#LA z9ksOPfj@;=h9pe9V)hlb5%oueg+j-OU5k||(wrcAyN&fP>yX2d)Qy?%yt;5gj%RY0 zR0*$A={g6k;&Em(7WNB{YSQyyX>04sRTYtBY@U+cH&=;FENJ#FHppr^2Fq(tT+$<=W_A0N*Wymk5aRqwT zNZpt8N^mM4MYGWt!Z>m&+(#5QHW#h)wdA>qgjf!v)OK0+6N^oLk~mQK7;LtTts-C$ zT9q70#R3UIl1HbgOBVSudGc-I=P1#m@ZMK|`V=PK1a){kwUh~2bF-wrO7|i7qL-uw!8i^={iQj zFsMWjYEu|Na1l~=L(2DIjmhAomjInU@KuqOyFqP9X3&#dZesUG#oVBp3xrxulj_9U zj>N1!4HBaM<&>N?8Oh2lf$+nsSqAK+&a&^a4i>d7-p08Esit|gE#%^hcWMO*RYwLP z3-)gZ_&>$oNg|SZ7uEPHa3DDu2ZvEbNt4r1ZUx7qBd3>nWi5GLLh|`m zdLpr;M@{|ORdrsXR)d+V7l!VDJ0+)&DZVPwC!BB5AY>0iR}m(8-ZblNH}@{D8dBpL zvpb{6P}Im+>IWKT6)QCq(7(&quU%%kPqa*LFgdzBr&sT*6ZN?*aKXuP50!>0|^K`K?BGgubPn2dmwhPJHOhtwgERV2`* z(KcU#Kuze{9s*)N46`lFF-VZ>?R|iK@wmztxGXR1O=qgvn3Wu-u^sP1i$HJx6Awqq zW)GfKjUJ75Om|Q|FfTa!6+%#quBO!@YhRc|aL?#D0e_NZa@{mMuxls6`b`BBL?S~o zkuL*&lQ5IEnsCw(p{s%3WvDOQaN27iZKjYO!NT>_BZt_J*Cw5#2$RjNY$0K=6J=VS_FSB-bRkd1(kp5e;$b;mrc1=XH$}gS-lH={1mu?zG_1mxVTtPRh5M^P)~=!RrPG=svM>)z8@}-8{KACs#@+fYKo*8Rmosx z{)id%WxLkY40rG{f1(wJVc%b}QFoP=sK=17N*CUE5q#>MB1j-?15Uz1BdmVj;*|oT zo)i@nfcE`fPW_}IeOe|Z_8EIs7V>S_g^7lQPtv@4qgChprojbTI{}CkfAD?zX8EqW zedsveggLNF9w1lXo(M!S!nTBg_oQbpt74E|!w9msd;Zwh3WeMs5;L#~PC>6eaAFSf zir@DQ#c|i?uOU;fzJed}oe{tvji#Z$n^?v{VeBW)GDWnzcL*I;UJ(9z_@Kur@dlm~ z0p${YSbXC(_V95;6zrvfyvg*00@AHU5(*L@lOCBHTpq}ah)w#NGYtt)QxH?ox1dBI zR~))FIUK(9AV?s&Ac6x#37}|aEbYZ0HaW2%G=kv1B42j{M=xR?(<R>9am;Sh8o zh=?Av5GompiBOl-SMVz4VWfu`w|Nu$f`x7mc~3;g^!9j67)6m0_EV)KkuR*Y{>BId ze4N!lE!JxTjbncjY5lsdh*;A40Bf)a^2%Zx5a3pt0UEgN!36D%n`^%Htt8{H!qAPm zh`G;Tpn_MHX~LpKrQ3wfoIn@MqxF?iB|?1As~<loA08dgq%W)y^ZPzh=3>2uaV4>}5OWbJ72w5*3By$^w#KQ5Dx2{jzih-LIYVu?8zhd*g(@&|R2Mi0 zdB4Cz^m@5V?CVY7N^uc2(X}=*rjxmSs+Cm-1_;rn*^ABGo_~uJT%tPO_zf`v;mB3( zz#W`zX&JjQ%Bbm;1$ewLCNyy23d^qB;;dZtSFHNyuS`1b&B5S=_sv~*$`qVd9~6{p zXHBn$%ElzHUh#%wbm)dae~uROUw|`Fy;vpRe_>ZVM;JPy?5IWzegPso<4cfJT-IoA@bA8 z4tt&yc3i(jk^EyAmsHa!;6}n~*!n>}k)8cp{>CYwhV?BUzRu*-o0Wha)5jS9?=X*o z`cI!O<8=*;A7e-$s~^9OT!nX@kCpoxjmZ}Xzb@5zQ!sgFmwb8fR!c!+^0h+dr5&wl z&5TKn23O7K;Okw~NOLbut6ihg(ZMo~8A^8T1C$qnNxFgu{$5m>PB6L|Ge#r*4CDyZg^D+`{UieZ+N635q}!oiwGlY z36>!?*c2*+bhfaL1dw&?r|q@q)WE*d+&PA{FydkH4L6C9k@*1fR_Qcw9i{|k2F-${ zTH;k%zk`<1+)8-ePKse+tr$i2P0FB(PB3qcv_+Lq@tS`TdlIw1$>h!c_dWmGh4+n_ z#+NvO`%0p$2TIece2$?cQ?(V;dA$fT*zh&0gNC%Dz5{k8aT8flC1qjLdyWlJ0$bmW zD1GoNaN#3O8Y^(REyngKQebCBNit!Lw=`?DEAr;9DjH!Z>T&u`_d8rRgZXZxb2;@L z2m;xYwkq)GXEx6c<4?Zq3&p`}MZ7&F4lDJ;%>E9^2fugJi!fPj{wtW5wDe9MmN%^W@rm2_qT^2C%D=b^B{J!lLk{=|#d znb1#<>-GD&_(Y5&=jRTohfDl*)|$i~NY&qLA$hn_Z9kL?k;1!Tr*Wd`inV-o-YYNo z8cs*yvRJh~#my7m7<@=dz+f?_du_N4N(cdBXw#vKwG8R3vlAt}Jbh1?I#wDJo(~$p zG0<}HRFi>R^}g9gmX~nWig5A?E90X$)x=YK;)SK{X-n@|6I>YgC~je?6$JPBaS5m0 zfbOLWp-3qKDf{RKN~IwkX<|qq@E`q>(0Eo#N=+t)7fBJ)>lw*DB$n73>lINX`29}g z(+OX&-Z>bK#GbUf5=9wD<=3y(XSw#A(yYhvz2D5*Y$Ts9oih>TC%>lpB;LbEv;Vr% zB!RcORpzdQT1n?nJdUu(34J=@P%UmJ>~iB(l24Nn4tPt_z2_x+Am_qG;e?D?N>0~( z(|L74$;BgBt8CTDzo_$BLw~4~m7VJ^W&ShY#P6)k4-Dp6^b=N+Rg_S9qe?4eZ>DGY zFNo3)`F{bEI5^mVU=lD@#h+jj`+ozIxR}^~G~*XA=_l6n8<_O7%>N2Z`kAu>XqA5? z-T2j_{|~{We;bhh0Y~~tPyIf`M9j}h`2QG3`iUYv14qw)&!3*LBmfW|dj9+qB6^lS zgGijLtUsjBd!J>0mip_cKhE%sD*cfCz?^=bdAr~O+bV)>twiu@XhSQVIjyU>KoUO|HM38Rw2DJg|wu~2d)4fe-NuS}|DIed^2nPlD~!JKb#os^WGmr4>VauW}+WG`b{l=UO9eA7tHcgcR$F-|B^3Q zD@NA3n+QJWI<<3fPamS6`&Ci?N4mS5d_Is43egl0`!6JQjKn!__Q4F#znbvT**r=U zdP3cHj3d79xb>Bh)|^L)V7nzs!5CY7*xk1>i4UYZsi6O`&0kN|h`4F;GHD1`Go3l= zf(%5kl@;ZlsROuF3!Z#e%0!wmEcW{`oES~)Yx9YZA7BiF%K0#scaGF61o~zZUo0#D z#K~}D6AkGqVNl=6Q6r=|s4KMlsCyviR0{+H=SN!m`BPFgMfOO2{CeN_v4_is$d#AH zBdeiL{YDbAs9z1|Ke6$@0n5MnGtdh{LWv#X z92!!28!q^~(51qplo~F&&?D+E_yhx$e5?J5lUl=Hea`Y1qDv|W2LlIBDyCE=!UXY9 zL-F|+9sPNs1oi>&*IbD8Um_4N0n?w+{J*vs=*@sSCJx0vJet7?{NSHe|2W|vfrRBR zx|{XaK=S{V)?cH?A5%WoUv1-m&H(=ZKac(~IDe_^ zFOUA!zW-nM=pP31caQ#|^`Anqzosew3y%f>I9XZ#S6=^}XLtB%XMf`G&l$}e zeuNEaGs9=P`iGDIF$)y7ae7X2_RF=0_18cs49qGIWax>g4D`(GZHSl|nAsSBR5O*S zqoXZ1BQR+jP-vhhe?OLmi}@ez zW8q?XCfa|igN2KO{qJJ{l>Os;tW2!`=o>2&`# don't run the model diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/TestParams.py b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/TestParams.py index 2c9bb768d2a..e9893eff726 100644 --- a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/TestParams.py +++ b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/TestParams.py @@ -50,11 +50,11 @@ # loss_spec = Loss.MSE, learning_rate = .5, num_optimization_steps = 10, - # execution_mode = ExecutionMode.Python, synch_weights = RUN, synch_values = RUN, synch_results = RUN, - execution_mode = ExecutionMode.PyTorch, + execution_mode = ExecutionMode.Python, + # execution_mode = ExecutionMode.PyTorch, device = CPU, # device = MPS, ) diff --git a/psyneulink/core/components/functions/nonstateful/transferfunctions.py b/psyneulink/core/components/functions/nonstateful/transferfunctions.py index b49f27c1306..5e2550b1509 100644 --- a/psyneulink/core/components/functions/nonstateful/transferfunctions.py +++ b/psyneulink/core/components/functions/nonstateful/transferfunctions.py @@ -3366,8 +3366,12 @@ def derivative(self, input=None, output=None, context=None): if output is None: output = self.function(input, params={OUTPUT_TYPE: ALL}, context=context) + elif np.any(np.equal(0, output)) and context.source == ContextFlags.CONSTRUCTOR: + # Allow derivative to be computed when output is 0 during initialization + output = np.where(output, output==0, 1) else: - assert not np.any(np.equal(0, output)) + assert not np.any(np.equal(0, output)), \ + f"Derivative of SoftMax function for '{self.owner.name}' is not defined when output is 0." per_item = self._get_current_parameter_value(PER_ITEM, context) if not per_item: From 2255310285a9dc43277e47bf1f1210b819e6077c Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 7 Nov 2024 17:33:46 -0500 Subject: [PATCH 408/410] llvm: Drop PTXExec execution mode Signed-off-by: Jan Vesely --- conftest.py | 1 - psyneulink/core/compositions/composition.py | 21 ++++++--------------- psyneulink/core/llvm/__init__.py | 4 ---- psyneulink/core/llvm/execution.py | 9 --------- tests/composition/test_composition.py | 3 --- 5 files changed, 6 insertions(+), 32 deletions(-) diff --git a/conftest.py b/conftest.py index ea4a1b2f206..2761218504e 100644 --- a/conftest.py +++ b/conftest.py @@ -190,7 +190,6 @@ def get_comp_execution_modes(): pytest.param(pnlvm.ExecutionMode.LLVM, marks=pytest.mark.llvm), pytest.param(pnlvm.ExecutionMode.LLVMExec, marks=pytest.mark.llvm), pytest.param(pnlvm.ExecutionMode.LLVMRun, marks=pytest.mark.llvm), - pytest.param(pnlvm.ExecutionMode.PTXExec, marks=[pytest.mark.llvm, pytest.mark.cuda]), pytest.param(pnlvm.ExecutionMode.PTXRun, marks=[pytest.mark.llvm, pytest.mark.cuda]) ] diff --git a/psyneulink/core/compositions/composition.py b/psyneulink/core/compositions/composition.py index cecbe74c70e..08f5e91e2bd 100644 --- a/psyneulink/core/compositions/composition.py +++ b/psyneulink/core/compositions/composition.py @@ -2053,9 +2053,6 @@ def input_function(env, result): * `ExecutionMode.PTXrun` -- compile multiple `TRIAL `\\s for execution on GPU (see `below ` for additional details). - * `ExecutionMode.PTXExec` -- compile individual `TRIAL `\\s for execution on GPU - (see `below ` for additional details). - .. _Composition_Compilation_PyTorch: *PyTorch support.* When using an `AutodiffComposition`, `ExecutionMode.PyTorch` can be used to execute its @@ -2067,15 +2064,11 @@ def input_function(env, result): *GPU support.* In addition to compilation for CPUs, support is being developed for `CUDA `_ capable `Invidia GPUs `_. This can be invoked by -specifying either `ExecutionMode.PTXRun` or `ExecutionMode.PTXExec` oin the **execution_mode** argument -of a `Composition execution method `, which are equivalent to the LLVM -counterparts but run in a single thread of a CUDA capable GPU. This requires that a working `pycuda package -`_ is `installed `_, and that -CUDA execution is explicitly enabled by setting the ``PNL_LLVM_DEBUG`` environment variable to ``cuda``. At present -compilation using these modes runs on a single GPU thread, and therefore does not produce any performance benefits -over running in compiled mode on a CPU; (see `this `_ -for progress extending support of parallization in compiled modes). - +specifying `ExecutionMode.PTXRun` in the **execution_mode** argument of a `Composition execution +method `, which are equivalent to the LLVM counterparts but run in a single +thread of a CUDA capable GPU. This requires that a working `pycuda package `_ is +`installed `_, and that CUDA execution is not explicitly disabled by +setting the ``PNL_LLVM_DEBUG`` environment variable to ``nocuda``. .. _Composition_Execution_Results_and_Reporting: @@ -11841,7 +11834,7 @@ def execute( called after each `PASS` is executed passed the current *context* (but it is not necessary for your callable to take). - execution_mode : enum.Enum[Auto|LLVM|LLVMexec|Python|PTXExec] : default Python + execution_mode : enum.Enum[Auto|LLVM|LLVMexec|Python] : default Python specifies whether to run using the Python interpreter or a `compiled mode `. see **execution_mode** argument of `run ` method for additional details. @@ -11965,8 +11958,6 @@ def execute( _comp_ex = pnlvm.CompExecution.get(self, context) if execution_mode & pnlvm.ExecutionMode.LLVM: _comp_ex.execute(llvm_inputs) - elif execution_mode & pnlvm.ExecutionMode.PTX: - _comp_ex.cuda_execute(llvm_inputs) else: assert False, "Unknown execution mode: {}".format(execution_mode) diff --git a/psyneulink/core/llvm/__init__.py b/psyneulink/core/llvm/__init__.py index 34e4ade75cf..f46508e71c7 100644 --- a/psyneulink/core/llvm/__init__.py +++ b/psyneulink/core/llvm/__init__.py @@ -70,9 +70,6 @@ class ExecutionMode(enum.Flag): PTX compile and run Composition `Nodes ` and `Projections ` using CUDA for GPU. - PTXExec - compile and run each `TRIAL ` using CUDA for GPU. - PTXRun compile and run multiple `TRIAL `\\s using CUDA for GPU. """ @@ -89,7 +86,6 @@ class ExecutionMode(enum.Flag): LLVMRun = LLVM | _Run LLVMExec = LLVM | _Exec PTXRun = PTX | _Run - PTXExec = PTX | _Exec COMPILED = ~ (Python | PyTorch) diff --git a/psyneulink/core/llvm/execution.py b/psyneulink/core/llvm/execution.py index 5a172a83eec..cf02c5f2bdc 100644 --- a/psyneulink/core/llvm/execution.py +++ b/psyneulink/core/llvm/execution.py @@ -501,15 +501,6 @@ def execute(self, inputs): self._data_struct, self._conditions) - def cuda_execute(self, inputs): - # NOTE: Make sure that input struct generation is inlined. - # We need the binary function to be setup for it to work correctly. - self._bin_exec_func.cuda_call(self._cuda_state_struct, - self._cuda_param_struct, - jit_engine.pycuda.driver.In(self._get_input_struct(inputs)), - self._cuda_data_struct, - self._cuda_conditions) - # Methods used to accelerate "Run" def _get_run_input_struct(self, inputs, num_input_sets, arg=3): # Callers that override input arg, should ensure that _bin_func is not None diff --git a/tests/composition/test_composition.py b/tests/composition/test_composition.py index 737496110fb..41b8127e523 100644 --- a/tests/composition/test_composition.py +++ b/tests/composition/test_composition.py @@ -3737,7 +3737,6 @@ def test_run_2_mechanisms_double_trial_specs(self, comp_mode): @pytest.mark.parametrize("mode", [pnl.ExecutionMode.Python, pytest.param(pnl.ExecutionMode.LLVM, marks=pytest.mark.llvm), pytest.param(pnl.ExecutionMode.LLVMExec, marks=pytest.mark.llvm), - pytest.param(pnl.ExecutionMode.PTXExec, marks=[pytest.mark.llvm, pytest.mark.cuda]), ]) def test_execute_composition(self, mode): comp = Composition() @@ -3841,7 +3840,6 @@ def test_LPP_wrong_component(self): @pytest.mark.parametrize("mode", [pnl.ExecutionMode.Python, pytest.param(pnl.ExecutionMode.LLVM, marks=pytest.mark.llvm), pytest.param(pnl.ExecutionMode.LLVMExec, marks=pytest.mark.llvm), - pytest.param(pnl.ExecutionMode.PTXExec, marks=[pytest.mark.llvm, pytest.mark.cuda]), ]) def test_execute_no_inputs(self, mode): m_inner = ProcessingMechanism(input_shapes=2) @@ -6606,7 +6604,6 @@ class TestProperties: pytest.param(pnl.ExecutionMode.LLVM, marks=[_fallback_xfail, pytest.mark.llvm]), pytest.param(pnl.ExecutionMode.LLVMExec, marks=[_fallback_xfail, pytest.mark.llvm]), pytest.param(pnl.ExecutionMode.LLVMRun, marks=[_fallback_xfail, pytest.mark.llvm]), - pytest.param(pnl.ExecutionMode.PTXExec, marks=[_fallback_xfail, pytest.mark.llvm, pytest.mark.cuda]), pytest.param(pnl.ExecutionMode.PTXRun, marks=[_fallback_xfail, pytest.mark.llvm, pytest.mark.cuda]), ]) def test_llvm_fallback(self, mode): From 86be704e3ff5135481906aa830709ab51c693708 Mon Sep 17 00:00:00 2001 From: Jan Vesely Date: Thu, 7 Nov 2024 17:26:26 -0500 Subject: [PATCH 409/410] llvm/gpu: Use more aggressive optimizations for GPUs Improves GPU performance in stability flexibility model by ~2x. Signed-off-by: Jan Vesely --- psyneulink/core/llvm/jit_engine.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/psyneulink/core/llvm/jit_engine.py b/psyneulink/core/llvm/jit_engine.py index 5f9be454ff7..06af278f34e 100644 --- a/psyneulink/core/llvm/jit_engine.py +++ b/psyneulink/core/llvm/jit_engine.py @@ -107,15 +107,17 @@ def _ptx_jit_constructor(): # PassManagerBuilder is used only for inlining simple functions __pass_manager_builder = binding.PassManagerBuilder() - __pass_manager_builder.opt_level = 0 + __pass_manager_builder.opt_level = 2 __pass_manager_builder.size_level = 1 - # The threshold of '7' is empirically selected. - __pass_manager_builder.inlining_threshold = 7 + + # The threshold of '64' is empirically selected on GF 3050 + __pass_manager_builder.inlining_threshold = 64 # Use default device # TODO: Add support for multiple devices __compute_capability = pycuda_default.device.compute_capability() __ptx_sm = "sm_{}{}".format(__compute_capability[0], __compute_capability[1]) + # Create compilation target, use 64bit triple __ptx_target = binding.Target.from_triple("nvptx64-nvidia-cuda") __ptx_target_machine = __ptx_target.create_target_machine(cpu=__ptx_sm, opt=opt_level) From f5064d154858e81dc8ac4b643e567c93fc8e4233 Mon Sep 17 00:00:00 2001 From: kmantel <1592123+kmantel@users.noreply.github.com> Date: Mon, 11 Nov 2024 21:20:15 -0500 Subject: [PATCH 410/410] Scheduler: fix false add_condition replace warning (#3107) Do not set _user_specified_conds in __init__, because it will be set by add_condition_set in graph_scheduler.Scheduler.__init__. This second call would result in the condition replacement warning for any conditions passed in. --- psyneulink/core/scheduling/scheduler.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/psyneulink/core/scheduling/scheduler.py b/psyneulink/core/scheduling/scheduler.py index 5413d257668..de1a59c4c0f 100644 --- a/psyneulink/core/scheduling/scheduler.py +++ b/psyneulink/core/scheduling/scheduler.py @@ -56,8 +56,6 @@ def __init__( # TODO: consider integrating something like this into graph-scheduler? self._user_specified_conds = graph_scheduler.ConditionSet() - if conditions is not None: - self._user_specified_conds.add_condition_set(copy.copy(conditions)) self._user_specified_termination_conds = copy.copy(termination_conds) if termination_conds is not None else {} super().__init__(

    $fN%`(j04i}l`vARc@UUq0R|zTLaT+;XN@BR481+`7JLfY zl7+@>7kmbakJ_G5$(7vKUgsaqnXDW3>CSx=HXjEol2}5$Td0o^Ly$Pb9kas*Ur=b> zQ39LWC^fB>m}O7xc+H_A6uvrVus-V$d-O8tCxmrNfDg@u;(1&_-_^ICN;k*cZaOzp z(BIm_@`4q~jd^mO?5^ggSfI%`f`NU4Cu&5v**gf7Sv0J?CFZ%Kf}JD-8$f!?l3Ts- zyYEeRwSAvh;x-z1;rDPsj(g|NgsJh6AjcnE*VElL3gDna{44=SKA(-AB|2W01Kh=}a8VjgfNR~7{6 z7O@}R!@)$`oi%1xET8KqUi91D^>p7J@I&$*JhtiOnCC8tc;mIN9wY>BB_m^e-=b23HM$$G1)0^#J6z< z9RB}v9s=mr2(6e^#d*Ttd2S>~DZ4C%a2K}~Ua@m^59#V9Y{OKL{ z;0R-DslS6&Ge^E)Av40*dK-biSpu-95<4CzKVB+@UoRz_vQ4k-Da`QH(|0`Ay}X)z zSx}R*3&43Y1nBXq5_oq@f@9Tv-gLdMyPr5P?MdHjg0PELyi8M~20 z$=|$N2^NlfUT3;uT6{oRdFk(tKle&GOOy6}zhSoH?)30CQ&k0ZnPXRy%jU_m<}9g~ z;OWk?vztZSusaa#fKzmGRDcqd?`~D|Kv~r(y>oZsBC-bY*X+fm?tl8I_Do+Bz65oh zW`mj`B@uAMM0BWzfzNUUz1>-Y+)4yRM)Fv#{r>)}p<)fD)WdntwQmM8sp3Yv6~^T6 z;HHvOD)!*UBN75cL2j_hQiriV^gkWbd*PkvP-Ti+$lEiyzqR+Af2Ur9IV`V!ZgNNb z(VibcnTa4~K8S;yYL~0qo<81jnA^10elsF7-_}wVm%1(aZ~D0S=T8wfk&;RUEY9itTgb*@-fhtKB!Yz zT^)W$S4}x(`@@36$i-n!6YfoM@NInnw?#q(8M45RH&6?rK`zrx7C;1jB|i?Eb0$Qw zw6o6?EUIg=KQZNsD>@{~0JTu!#lrhiS7P_zzJk&S05}Z*%DFQQ815RN;J7k-yz>}tP7{2`O6XfJOAt(_7tpn7>d=ZsGWlb( zT+@&jSM{@G?u!irJ!N@0chdV$xAL?TeS@bs_TZ+-34ojo2bq(@iQFD~dT`OKYDh}! zW5()@lE2O6ZVxMq-9F^ipOI}`ecsw3SXus&?+q^^h^0hZ>8XL0EeE#wWg6T(IMI3Y z$^@+5n)0M9($OufE6M$V%aNpI#ilh;F94tv3pQjL@TTpDtu#PLIa4yG%)jN zB7C@(CB%X340D<_kHb}y!*#Dvak2OVgBP?Un$rtx7Wf=;{m$EdNu@4}3;IW8;B><_ zp36!$gWf#?tUSUJT|{^~z~?{Dj?p!S>~WmFqrRJ+Q)Rl6CHB9EkJS+0c7pbKvEuS_zyz>EOMGJ$447NyAp0suYZo~^r^n$gFfvh zo{#cR4bcwjGx{f1;uWQQs!HE!A3?8yM33__43L-|hrp!|kZwR%n0|6bv<0)*_m{tW zhA8w^Vpp8*X58c}o^Shhi;IjO_32jq(qQy`Vr(0V1wzJPE;o!MD1nk7cL3R_j{3K* zFX)}D!E1nc1$lOY9e zEyx1CJ2)BeGi2y;Llf6)+>P5M%)=Uw-SleGzS>`vwD*eLq%v>3)P1tc-6#$Ly?2vkSiNaJ;i;*?H{Y z#2I;gy*=#QW))z|0QrL+8n>%%w*hkp1;wmma7=bCL+jW#f##4x*FAQ772Pz4rmBZ> znNsFsRFn1a=dB^l%>bAIZ27vrq>s}=Y~nxEtrA&WQ(fQe`-YLfWv5QZcX+8Sw zg#Odl54_q@dmH9BTxWl%!aAx9aO?$R@n7sEAYvT2uz{`A_75fWH{I>ikgh_etIX@4F=4#vR->div^nP)d8rJcaKgi4rJrgmMMeUfC{2(t=JzI7@*m zgc{S(G;rgUCTFmj{I6MGxn#?z#x8utg`gizD0~IJ3p87=uy&IefWw-l3B=E~K?>~x z3I@ZtJTC6O zfAp7J_vBIK@*hXe$Odf#2XV{1^ubCx0OFW>FzKRL?NGp(2mh(xyx&F2D*Nq&@YpfM zSTgKbEaf$ECkqQL8rM_~>srsGE4AZgN#_QZv^O&fJZja%9Z?!FJ70A0U?0j^C~a&2 zBN)F|qCp-LqS*hZ{6mWNbcjFnLPka|dHcI~6zb--kHTyx{72I2Z6YUub_SY5(kPke z88>8C1f&|;ybC;>C&>vNM@B0j7|>)tR^hf7RCv1M?yo;)LtoD5rah=P5IfiHJs}a_ zN&?XVtb;==?C3>Ey|2?hl=-+f;Uww*>;WYhg1OJIs&y=M8aR{SDL$H~Kpo1R>9 z%*^D+!kXZ3cmoCE=B);{g0yishw+S^@E^jli-0>?Bo8b(h{Hf+{QX)YTzkE#Zn%of z?V7CUr_#`fb*f5Mvc2`-@uG{wyOsl?sNcmrj=~FI6S>Z9HN{O3TIBUa@V)Eg-7IbQ z*&;i0yK2I&-gmZ!@BC_Y(!bIVk5>iX{Uh^STr2C7uGK2oemlre7p;Wp3)bQzS=bv5 zE7cUaE^6gbCDQ~{+pRjMdsb|#n$M|J&C5CIJJi+)^0;>&J{l(5f&m2IRKSFUr7RF( zxs}e|F%Bw@)&=>uN$-b~3Vq4?q`s02rkSaVjJQYkop`Fzhi5nLPJQ_%W*Rv0{noRY zPy%#Ej!h$0qPtX7emc0LK`+W>UDmr*o1?e6Z=XJdT~0X9J`9#36B))p#(-88Lpwo7 zpMgya0(AQV+^A@Q1sl-_)~OlEQ+lV2sLPvECEx3<3w<*azM_>Y+$Fc6QVtia#vuGr zK*D`VZ&$#5NNW16i%9jOG8e0(*L6SD3bv}`bM~wGsKpf=(nw4#HsnEMc1&_)&jw5YvfZd9xIWomzwl;j0S zFUvphe#`#g@!s~@elyu}in24`F`CjQxH7ni6$lH_LBH{l8$th1i*m#sr_z}DCIYkVRmtlN;# zTZePPgscJhwf%$^6TPEI3%T}ZyPiNf{ZvOyjDOK&vdCkb=D35TJI^P?Y)^jhcoP%6 zJ?Qq`_B$0pgdBw>MW!ZeFT1pv&y57eYPd6P;Dpxc1GRvB$@bL{Ly*V%_Cd@M=Y(&2 zW5SJ4Z+BQhvLI}I?@R2$loMGZwzFL=3KVGx) zx=GQM?R7mvWlMaF2JncLfEuNo_|fjnEV-Q&Wy86Yt5u?vRxtl0RJ3-ycvSDI*0a{0 zo;$Z{eWh*Cug9(mV&ieJ22h6RNC1tnye9MkgtuUJO4EJKEpLdT{-~T~Ks6m*$vBl% z+{cS>U>$H9c4(?9n*amk%uJs2y%coaY9ss;k+2Yh#?HqJImfq|)C}CTymmr?iM2=; zGg;p$i}yytEy0`=lj(HC?I8=YfmOuDE<(j73xPhnvp~+?}Yl z^YYI}ArTGER}5ixJq9->of)pd!Tcpo3PdV*-;it5+=D3qf$@Rlozpd5=;gfH5rBdcPi@#@qGu^&lT?) zn>F?ASL8liVG>`>22QsH)GBrcaL|nro-TSgv-8P4(G+{ps>)%nq6mG*s>a}DPQkNN z7FGA?S~@xJf4e~g2tRO-i6@!BrH?v+cwtj!nUn=eh>sA9I;K8hlq8D0ocPW?W6avq zz<5hfp@BParFz-(PEYhy@=5Obwvl5(i7TsqQcv(2C5eA4DNX)D4P0Rxnb8PWBx$Bd+r%Y?Vy9t zv@mlCxCIGznz#7`7$*#c)0t{;z*%pc_%|y1=j6x9zIe42_4QRF=KE>YG9%rN?ap>7aX%@p8!M zXA9~t=q3N(M%RD;ApA=p)3>GrLaqmeEP1*Y*%@d(+EI(XVa#qnY`fSHuQb|M$sQ|6 zw|cMt?zr%8Anm3KfD-6*0<3_cRh9%;4>a!qOwh*hPBmjQ1pb!H-vhmcQ=fxk`&za= zchBJts9+UUxeGKs!;}&zWJ0VcrYeQDlY2=)wZ)T*wY!F%TIp65kI5C}PPdf7#%(hymKqK>P4*ml zIn#8iny&%(BA9b%{6hT4aA}_%?qUGR6_ma}{7DW`yhODnTcrgZaS9vOvB0qn804N~%vS6i_X>lm9tgUyKi`HWa&baJR z+xZ&k;N#~u*?w-kr!!?bcf0d&eQ$ISX8lDv36Mu`8*UVf@cI`4n?8+oDvL~-m&OcZ zu9@iW6iJM7lFE1&%1%D{^23)7fsgUmQ$A6Q-mc(EY@Aqo-5A`%di=l*opvFMv7`}x zk?f`FGsU*bCgNty8|o1z$6BIZoRj=H(GZO|0u?kOXz!i?yqHG-@AktqSikEszz16M~>m;W;qw z=UDdl>6fvia4gd(i_r@u(S^}i77qx2xUWEqGzaR(6p0vpe5_09LXC&F#bd*Z&o;H> zuOVMcJ$Twu2A@4?TC$M>=~lW8HC~|yVbD~A3YOQ@n;EoLF?Q2RAM!@L#sgCr8Cp=w zkUK}klye8=lgXxpe(Gu19E<9z%- z-rWDR@?)-2&Ptik9mP6uA80rEziH^e=tlNHgUtxJwT@yElEbD9!d)A5&JnRYK2=)% zrBct!=~z~9YKQp}*^>o)CMVz%nrcIOZW1iH1o~z8eskPCcA60oYnKQ(IE$$IaCvkX zZSThSgp6r7m^oEM0nTND5W_|+H{YvZyxoIu+&rD*V&Cm&S2=(8yg%acjM$foJcMlq zF#!qCEy#~BL_}yRl$5WU0?qJ~DeapcTqBA4uBKAW+4jSQZiAm*O~2{lyVN7rG%tPA z;qRHA@V(^4G`0efgz&{e%4Xn^m>NV-BV<87P(67@(A%uPINCa(_%t`%wNaSh6(^fA ziQmhAiu_mlmk1w+eUgZ16rsXqdI3z2y!ccv%v7MwC5VvkC*&_tYj8Q1Ya4ZcUv*!- zQ`>-%1Ry`f$ar%6eGWeNDH!aFrCbC^nxHQv{5UV|IHfm^ag6F?-f%p?C?mhvWx4B} z-;CiI#WYv9i<|cY)zsSB&umRZj-Kb(1@*|t)$l&4ops=a?Qg)CFyCOD%U9)eVwyv9 z3p@^=3)4-h3Y3k>Zr~^<*I^+|1VdDtbx;JH9&Myior0pBV&=dPV31HkiJKv<1}71t zM=W`4Knxmv^RP0C1usgC{uUB+ps*sT3s(ea?*e(R0AEhJwZJ!YGg=SNaTGsL&hwxokU#>gy9HV0o&JW1Z3(72|KYcA z{FS8_fU9VJq*-11!8)mXU;DnU`9-;a&)c1IKuqbB%&!KZh9^L68MKLniClMEHPY|d z_7!(c&`X~3eUloaA%Ab-c=+b4(N$!B3PT!XFjeg!E|7T-=w+UCb0pO=U>yIlDe{se zn2Ay&tn@RdVSUr<+4vQ>I@q{_7+dP4a-b$}DKN+SoG+f)r&C8+-h{(g(szNs<~wZ! z`Y}W7Zops(O`V3T$=zh})2Qp!A0!pAWB;;U=J}v&%eaE?bNsyu_Wn-zFVGPr03doX zWLOI-elN_K!HT$}S=PkB6OO+$KL!=oelD+wHjF=2X#QL;)d&-@rL%k*Ho*+n#x?_k zQ=#2yO9ZnfuTZ5)+H12xyD9ODfrj=qgTiL9E60i^+wUsXX`IbaefGxQd|AjMl$nq3 zB0!pk>&sL;>tHdRZv|OB(yV=OdFpswpFZeBxKcHg>ogPJ^5@5XyPK^+&QoHGi8^51 z4mp!`AQc!EQ49j?!g-7+j<%m!2^Uy(3H6m~U)x%7F%fDl=`{V3U6^9`sli~rFqZLz z(MLZ1zH&~I8Bm#Hv>k~t~SXbMxF+6>yVF;xc#~MUw zq)t0qYHQhXogE%Dex&ox916CTZRY?uV6l;b z0rJj;x}^AxTKcVoC!7h@U|h&>SE{F9 z%g6if?*A}CS#r!6_yLW81CDrn1 zpzYa)D(3^=&gpL*IZ5jQ1$K%H?ysu~?K*IsV_NY{({5{Uo^jakr?K8JIyZs!52#0M zWYb)k87zQtfw3`+=>`~%e$8zpFBH_Hc{5t|my0(KXZp{p0=kb zo4sSJWY9>EG>eJ|SEBrR+54nXg$%ms1BQ1++W1og^wY@WF!hbUI+vDObPdE3k4mHQ z+Z*`bz*~$A`IqhcEa5O=?I?JHDX7XpKL-By14EMna0iewwiCq;&0dP6UqZ33jtI4@ z?RP5ESbLi_byYT3(!)wHIkA9{Wee#dE;psq0GP0_OI_O6BmkuWu2!DL8Xa{(Q%f^$ zWi1i%1%*JSS@neV0N&46Ua(MjxkJ`fCL-#EV$#KFpxTC2hq-SD}DXG?Qm59 zaZY!+Qtz6Oi~fs2wYuuMh&PX4)z95dKF?qENG9L*4;kOwb+9$05w zxvxhw$$oY1v61z!%;O&og^V=H?n+5av>j1oLo-3>BeMuDNC&XwG{EpJ&wNP;4AgGJ z9jfY;_N(;EN}&~H)xUgO`4be8%QU&V<8C~zICo;aI8(ml#w~&;ffS1<&jbOiK=_ZW zDp#}dcVz{e{fZnC->=m=WI!@pSHEyhQmq%q=VJToe9A0ZL;490~Z zB_b)OQ;6A}*y@yZ54sr<>Le55?PM?~MnFg&q6dBDE8*|2RBMGBCKuyF*%CR_Eh}$E%4m^^TuUh?>U= zXdSINm!bXwHJAb!d}q;qv-ta|NneIa6(6RdhgBVt&-(^;uOrWVel+7J=OQJbr*z)%JwS`kYxtnVj#6_>he)OL^@y*fy z<)Lc=kO8V=N?-_tga5L{P8}%!rKvRhVA;e=sVDMB^n&VCKTpH?L!`pvtwQl^4B#C{ zX@ZY{Jw=K)=`yY{%fgj?Y2zJu%8IGVCCbS78>_URtC#8L(`lsk1|vZ8y(Xt)^toeAJ6@Nxi%18=N=GCA;Yz)lqO1E2tXRfhAr_Tb8!?$fxl+Rb#T3U zHndOg2GrsE>KPzdhZ5*&kpk(ea_cnf<<_$1JVcsecCbQD6hu#_9AjLe#GZ~O;Wr%7AX{hMBVDAD_tNk{zT zAN*yX^B8El57w89{Z1n6BdmfgM(#@6aqkc*t24+}K9(Ux@D&J?})1&gy~2=g|FOEPnTFs=bk5dwFUsB>o_`i{-4`r+GwBc8`y zGOgc6>&s}|tyeW$5<4xc&JZpgc*;2GV(qvt>6LS4MLa7}3;fAV zETnKKpA-UlU`a5Uo|*Rmu2Pw8mEOAR;Z&X5tNrb`?78l(8cdV>WCOZGooWqOElDY0 z%)5;MbSlM)`4JEwrtAamI;2EZThI&&u({A;dg-UDYqwCb|DoZnj%7ufqgh$oO>2+u z9lY_Z7K}Pc8#2Uo5O}ydwfWIV5`X8@rXRJa53<^)lMmeTnxqdqme;W7~d;srrCV+m(b;ZS^IVIuuO1);;%a8J{_fcN=Z=>>`cFLq5p_{Jp z%BX}HrZ=DRK(3eqvRV+dv3MltI^p94inFXM$ztscU6Y~F&yqyT25VlJCW~j!eACdc zTPu~Pn>ZeCykjnJ=xDE2Fys2oBlURz-2u$YQ2k&32>d(v|3R8b?PMM71u}h>`2s-# z-mz@sWBOUf{tP~!X!oM{EWJ!zkSqR36}3%g`!HR19ZoCEggSGwPZGy2WFnqeyo zX_m31eEjl`JLs)qIOMsbue6IB>}F)X`%0_57IIl*_;<6~8hS42$u)C(hZl)L9XhU} zcHCQ6_N+#84#1D?m4Ag$O-C7Eq>v5eOYkulzXy-itiwvxl!HL2xh}R_I31V%Q}v6G z!)Y-Zz!PV)_=|uv-vQ5`3RfP-JfKw9e4722O~x-#X)qWm+RL|E^nJfcw{;l$#xrF9 zoU`Y-9m#;*Lj@5g(zGn*YdAO115W+Tv0hqI-%+aeQxhwm_An0}&)$sY*#{5Qs9n+@ z>Wu`ye-%Q0-w)iu{g=YOhPWR$%1EW!cGhG`!)I^w6nEd*AHFj2_~q%tByV90%n0o+ z*gZozb+Gl0!09vU$!z5n;MR`E$Pjpo!@9lu@uiqamkq0%uZxEY>nrOX6)KBfp0B#_ ze(S?K9_~pnXG{Q6*jWQ!FINa99&-G?0~o9Rdaf!{Qp=BOQ?y8kH1k= zm0aQqzk+Q(2jn6aSrNk!ZKXv~!1O8VB9iKzS;{ySulj)ac>l)FsR6I(#r1Q&&TAWr zC|p#<`M+q=Z{(*TJ9cC;a&HFhQS6!oSf+!)loGW0W^xIxSCM#*JHF9~>f)colc;gx zwS0zpY-Un{)7bO7(nnMfpQ`M*soj)H7T+guZXu9udwIaLNca)-bbI~d#%>jwPqyDp zzHf;6e|kmt+ZTV1Bcsl$?{+{6lyiUwlt_7V4w9y*pQ)yFK5f*$U6U1*?b&cE@@Oxn z@`R4YMq9%tjkZt9U+jUE+cUX<%wG`P6QD}0!o7Zi%1tYQq{|@u6{~#CHWvU_OaAhpSy0!COwDEOl}-m0jRE+EbaE*Xt)A&nQY}9b%_ek3@Mzny6xa? ziMr)`y~El0+%u1=mq+CClGPhQHdBp2$^dQwVhkiDL`XPsG%qc1m|;^o26gOCvHmcu z?+4shyyt4}aapLks*3&2MD(>&OJ-;RhG;R1AB2sO7%(ZYN$VVSbv7PKXbsm9e?B_t zpDXbtdcb-#uvzF4b%N-2zDEiG3Dnnk3ST;K>|BZmpT;@}3_Is41a3CYFU)Ltpsj5F zp;M)KX7AnnBWIDhya}!P3zMYkFJG#{%$utYH2+ae*m{{+Ghq)kz*gALSK zT1+)WB$e7CA-}g8*Dg<>QLPBi_^;WA2NcmJOG>`4hxK*S7f_FTTXpoAmoJ}|Lr@22 z2`#9x?ipBIlL5nXShEVt4_)YRWnLot*eS#-YFF(ucl%0|I^-7Xz?q8tie*S01u#u8 z_kj=u1B8S2%O-|$F*8`6p0FbHwXXd)s)qDC{FIXSgPeuB@KMCtL}@q(Dx2*Iq>&U5 zc{X3d1_C7H*Acj$I|L`X)qpnMZA-oLjsLPee_T(DPM*Qb^Ef*q^fV)F!WrSMAc6XI zk5_K(S6+BGqB18jBUi4kwZZmYx6=IjQ0IdYLtVAJM}?-A4i0wj@-I7_<{k-#=7H9o z1g9XuYQ|C=t^|^i$-AagQM1;nZO73it$4AbcYW_#T2G}NyHyv59#=|w%`H^1?>=1` zm?mI`5TtGxq=J3QaNYlfYDnfzVz?F)e^g+-AJt?C{)l2Z|Dc}sQVV;PtGq2wc=rX& zYlkFB*s*)a(b<`YNwBAloDbCT0SK$6d2@XrTIpupe-;^Q&j8V&v}nr zW=T7N^d3Puy*awW;+dvew_?RXj^bZ7VRYyDp+@bu2C06qXuL*g9$M;Jwk~gpbdmPw zZ{I5tJ>r>PfDIIQ20Qe?Hfov|qfbe1)$N66&jhOVT2`gam0Y4YwUl>ws=j;sJ#WI{ zs!twA{N@snM&aUHDT%|v1 zExoL>r86~~yg5tW5uYUfM%Id6KFt+w2w)c=0EX>N3~Ft6KS0H z*_u*48_q7?wdK!_Bz|*nuzq=_?%CzshzIzy;B%!3qyiY_yug?Ri{~c&EEvg^4y}Ww z`WbDuzPlA=6LqGcGIGEHSbpop()O3FpQf8fm$#@RS-UV6k3KldW@Z{VA|!WdlK|_N z)cKu*Z!hh*n3on`(5yDqeI()WrSYxXoz*1jw-k|e`)?QC!}6~JtN(fi3F$`B*asLc zS;g@zRrIDq|LmRV8|I&CQirg-KQyzWru~m~o;4a7oUSfrNST2*bukO@7?@$p@GxAM zh<3HHQ4T;Is?sfb!+Fmn(lxrl$}_C-+t-N`pS;Ek1rt(Mu7SOjVS+pYY07rA7QimT zD9D-G_DvCJ#O4zuF*DkzpVyQU=D}Aho@#~?45-w9y0R5mbLo~-rc#spD<)9i$fvAk z!T~I)5G1u0D(Q-h`$NG=Dy^aO?Js}q7>V|s2`%-|wLNT67fC+OdxZa0imc9X(^x?S z{2cQch*MF|0FYE*%(#V6q0+o=ab?IvlT_6xo*6n<-W}TgC|G|k+A&fZojme7ay{-& z1V?%r_VqMm(f}k*b_O>p&@c~h+p>x6rJh=86RqOFW4$Gm{1Q-t)f z7dHez)B2IjHGSOU1)ORG#a^&Y+6DA<7q$apDS$D~5bu1#%Xb&2n7{GZW}WH~_eje!g z;D*D?7o_)x72;V}5dOe{IRS=)mC#RPEC^601?NPCwGgg5v*T;Ar1v=Otcor#}AOXRT@RRjv%TmgGS}hrD ze9A=sen8-fZaw!a8qq7m%NGKd#I7zjox~5-t8(=IUmB)vK&=wV;DeN?sa%=&rs^1XDA!SzN4P zP`KKH>pN6%ajHsRXv=NnSE~>K-&a8CSYw=Oi)*Sc}>D4X6<+W zgnyIfkiu3OsMa-xgjTlPedX~WS(WrOHSKvvHZiF%eP-j=l^%i>N)Kx!@nM&aTnagY`PDmi{A85=U-ugY#4d|Yx3E7c2HHTif{M ziP^Vc`XpP!oNUEPT*?z@-W-Rq_~)4M1V7Yh%#7ihz${J}J+SA6?4NShbM59OiViKF z;O3{jS1eDHfSvl6&5`dhw?7jHSy4vLKqn~0tq5+0y5iPyxah#`ssO{!UVd3qEuxVp zjygDKIqWBknTN6ueBs%PQ@}^Yf>;jW1p@=?)(?_kQ4u_n%itAs=O@%#q-AXw$K zx+aaQ*3PXJcRbXI<@PAl6b!n~fL49HZhBF}%f77rl(m>k^UU#OPC7 zB)^&8FWF`=V@e7`BP))ev6;nB2a9}+ZPvtGofd;1@*Bs+o1kx{9bj0Kod*^+h9#rN zgvU>ML{AB}&TD;o^w!FL-b#!fX2vFZ8AvM}2WYGzgJ~LwDkA@3T+RkbjD(_L4EHSk z+ki0>*;>(}FoY>|A_p`)BB#pEn(@e;5P8X_a|3KF3uK`wekyxx&92lGh6YMz1%3p| zE+DQxfg9PdKWllJ6s&sHHmw~`K@d9HiN zmtzabI+DeFLYUEJ^})n2Va#U>LE@(dS%Z~`kad_BaWL_(J`>Jr0~O-TwLKNthK~7B zBxnwlHaXe;oAR!&vBU1jiP`<+jr+fQj{PxFZOOaJZZQH3?m{L*G=+6gn+}p1%~#>D zar6n=n?5h2jF-4Iyx{VsjQWmT=}nC*6_%yaSW){oZMHqNcQ~<#rUT$MIfl5wFVZ38 z&oY2s&*p?!MxO?m=5NE-qRn83q8`PQVZ7kTc_3d6^-z!QrZ}=N(eslay?H29Z_#0^@eOoEfi&qCxCJJs9 zA_c&&YxlMt((ZVUP9cM-5q`6#y5pCR47daY!VkWHeb#$^Zc$7jYTm?V$t)~pk9KW8 zuv5*3;U?jIG=eln>^;b1^Xt8h|8AVZ5BaR6I*E8loloJ;;Vcy~>L{|u^lW~Hyp;(b zz^(ZkfYdC&In*%Iz=it(d+@>ZQ(U!qPAGLLBrvRSCDi6?)sTjAs)5zKWtY5I>K(aL z3J(H5Jeq;6BaQAl|J~=hYYP;VDJ+vN)={l)u@Tj=b*cvc6n$m;)h87hJ(_^`J?Z37n>B19IC8+E^JQm#hS6$*EXmc{PF z%9)3e0_(y?`g6kViub;8%lXI7E3m(gdRu_)q11gpxNZKPP&X1}>ZYm|i z;s=)3oA|Y_8}$2eQSJ=7o;%=I^N(Q4vDjI@#FO~JUP#H1A#{%B2#y}!HTxOsorG-< zoq?X}HNOKb(Kf+8_vf*m%;K&sc(;#VZOAw-eEo0BY|tU>jdywb$^m>BdkC6tl}d?Z zxOp9VQ@BE_bao%Vluyo`n&G==zJgX;x{x{{H3dSwt!Zw~J-?z!Xf{mJi3uCVspfNemA1~N>X=k}ltd!%=VO}GI2 z(-YwR6s`~sk&iF;qr*1_rl!13fD)Gw`1a$-^u}c%vew zs~1~juaFSyN;6?F3G@{wn_6*+uFvP#x4NYHOzey9rFDE?dStKu*Nhs&kld4N)-T5O zU3?Tv8Co^(pG$uIMogZ-h7U3 z#~w%4!WdG);BO5w%i+pM%uR|(eQ%3f0Pt+qYoGPhFQ2$D9cufjoA*FY-M1&RDNQ|X zSu7lo(ghKkGLUT>u)VL56)J6C3}*!r=0=*-Rj1+Rl*_=w>QU1I*=8;?dm0Tm;ieA(qrA-$6A);#^hrMjPoCPWa!^~lGI;L!|#Ro z0odk~aB$N$13B_hX1*f$^w?hiwJq_htUBs0`5>vT@%P5C{@wcVsd3YY^9>f6xckUF zJv;8NARA~4sUVsG0i@yD6xM4qWw+?_nyBf1UBB z44^?7z2m$rem<~RdkhBpRLXF!ZlV$*6-UbEn{XE?3vYeoa596RLbf~_%IncXpA_Zm z&daEMh~VC~hOKQ<(tgu0mCVdWufj8AEEmzYW}LG=yizpe?&O&3`SK4xpPfCW7J+-g zKLLscLEv>MDM?6*H#b8B*U36nPjzg;NkN@o>mSc`Tq!)KS(pULh^yyocU&Mjtzp($=JuqZPD%^HpG~=9}n~firw? z`&Egot1q>#rXp*r?6^s&P65c!nx@DsBA|ec25Fam5Izx-OswRJ1Kue=`G9mH#AF!D zJkI^$su|fsv!ta=0(1`oe)Dl}!_N13FmY;|62p)Vr1LQ}nN7f7ij2qbQ*DzQxyVrU z@JPBHNo|)YU2E+u|QN6R}G9ToEv6-KvPb$2)z;|}|`%v@*o6_rOW5@MUa0}0- z?DqHSif zfIeLog(W z*?&s_t|)01u&3oI&Bg*_t28G{JK+dWNEOQv+o9F(6|VgWhl&V!HUZ?Yl827IS|7`1sr>bC+pD^w;~n_S zExv!hP*EXV%rpuB<)MRP;BRO_dj@lrSqE1QC%SV}r%K77s1{egVrTny+Vz}_LBe}v zXZS-a*pGIp<55REW%{`12#hoObPj9_l9>lrgLde=KPEKp7c+~!!#Bs_-L=J%TLHU| znQJ$7yuK#5yhL75IA7QoVoAxlKR9bBj$EDc&4H1i(3wN)@J;#}R#d2h7tdhRP3 z$F*Q^+$0NzD4xX+f}=gCV6!zQM5P0H*m=dBojTde2wx2s_YM46FmV%qBjebu*I#FQ z*p*E7Kjg{7!00C6_p_!cHi&0%JXbGUvF-Yl zyAI@wJ}>1R3OP-Qd4^G+F-EXMJ~q{Z+cri6#=LIuDW&9bX{YDr&kHZ?)9{Z_wL32A zaTq^eODhG}!fC*CHeDrP^gcq#1)zlv%ya~)4-Y9Dt}V|p5oE`YTF z%VEshVQszlYS-V(6HvZ+0^PQdnh!(RkhXU@eP$R_2(7l`7S2@VIIkH^bGJ)BZTYW{ z6EAM{NWADiT3=mnpB>8OaB4GIeOqNlrPYm_)Z2pNW56gM)4SgSwu_4Xk5lt6ivq$9 z*dO@u%;D`TQ@$u56ZnrfcFd@IcWh?*t1IbIbU7F_MgA%=Ef9kK#BL!ewk4SBUTGcc|T9Pgcj?xGl7^Y3Mo{>nx zIKP`(!lK+@7KF54rJA!tf%7#FUJkQsG%LLL>#65+M11laVjvjUN(o>q;ot<7!`l#) zYY!q=LY2Rdn20S3l`&xE1{vwwZwZ zRJ;~OhcRvfT4PBly(6Sa0wVec>Jz_Q+UjsSd*FuviWN(BY8MtPv)w$Jjtf%XfrBez z5xf#mWb3f@fpx1QeAcaLC}vJ|AlA9>kIlo`Hr-zGsr3!5XFWf}X=@*bhlk%O9K8LM zL-Ka|Zx1#!fgw7`IuJtNk8O51S~CJiEM*!QCTe&Hxq<>;uWV7t94PReFuB)xR~Qt2 zx}`-IyN{*gMtMLEV+aB_<$e$yHX|^meT=Q=G;Og^q#ej}OCx36*VBBVo!F@y5oGXe)pv zx^`yu-MV(h@U^KvL=GRN>oE59xwZ?!M9Yb>H{cdq0}rXNzJq?3=}3f7(>~0>5sX90 z62NeGOu7VuuqNzP%-9rBP`^5-Lc2==_8RujLoxvbUIJa4kP;xS8=(OlL*!Y=qXRC= z%=$wR3AlrvS{c~*%did(xJUhxW|)a~ahc2~jYjtB5NN2hBX*WOY#X=Fe~brYPE zQ{&JB&}{$>_uR&p>BY%M(@qYbe{ne1w!8f#jyyGaRh|!QYW;+CRTw$i{umU44B8C{s%ei|-ZlNld)d z)*`t!g=jYMa9qK!`<{e}fXTS%a_4RqD-(MIxWSHVAYH*Z0>Q)i!n>Lz-43Q)~4WOvh7#7cK@Qe~O221(~Mqws488yF$!aB0_ z>nf|MDltXLyu(%YKWct|?C;_0SO4two%{Opwd5t9>5NpTivC!0X$RdKp5zG&=|w zVlH(mU>-AW0m!lht?(mIk(+ryDSTt2r^=Uj#V+mdU8Sr``<`qC)!h#13GdvikORCz z^C@8X|HB-Kz-DVx2)@XOaqztUV-#7#=Fl5)LWqxPmD z57L7|n=B=U$}nJ@dA}EdR^ock;OyUktjl5IxC-++E0QFi38EVvLsN~_rz zEO2xh0%0<^;K&&`>a2Bw3lujSDCJN$@JB$@R`j{w$bpj?#yymJr^~8nD8%xVg zg|A=A6UZdonjTZ1A*>w(V*$_6E<)!Xy_*Kye%C5oK!mtup}!WVh}Y3=)Hx??YQ<=H6{ z_I9iu(3?8zJC-|OTChr?Xyy3IwR*R*QZJ=M8_vk{iI3mdW|2ByaxDB) z1ArbQf1J(O#JZs0U&HyJH>6V68A6+z7SXj&pJ;VkpC7)Zqxq$$YQ^4i&V1(Oi|1y+ zF8~hkAZQBW>i{Pgh&e|M9EocZL^pIG1WJRDcLwt6qAmlzf`Y6xr#PHQk1j@V!zx{Q55W(M;OF_ZjXkjcXZ{R>=nlUA?KGj#jN< z2ZddLf-phRob!FFDlFZq=j$i=r(E%_E>{oR|GqAl+^{CudCQZxqZ+W{q$;+`am!;=I-GKOO1#S zKNL#$bq032v^}RA_WJIY?p;WTE(y*7nxUZfLQNHh0Qegj=yVoO&WtR!JslLbF4iZG zga-eb%wGC7p2eBj9j7rs0u~U9uu0EKJfNV6^po$9ih*(-a{)P{|{Pe z1SoMF#rA*DO5w`GYm7a#QYcI5bM@!6Qvj{>E_QqzG{Oa)QO9%4{_YWL-@bc+qc|FUCx` zd>RHO*@rL$9ceITIu=!egH#>AuBHh3vW{kaw=tS-{nYE-BwJY%=Csav$MtR2&wP)h zhYK@Jw-*)S*Ab*50tkTj+S@BMK&oO~+v3Y0#_g@^&k?T#j9^6P4xI|ki(+o*NI+qhV3Dsyzb?gjHewC)qYZ=~*Qcz`yXRh^*6gfzY z6W(C~Y)6Lq3RqzrlCu=yvpHtldK^Nv4sTWe81>Ft*jTuT%3TOiw(>l}o^jHFT~tl& z-R^Ka8^{a+*l+kZ07OT%p&+B3<^Z~=KX40eW2<07Q_w8AXOfzWj5)Y%N&pLN7tRgQP3A?4APzwI8SosE&G}0Qw{mgPp~) zqi5UQmFua1)fRGIEJv!Zp@`}#r%++K$gMbb)*fDbI@#f5&^mq zKo?c!bfqVvV&tpl71v)3#G$vrA2d5-l_SZ&e>M^ugKBDPQf`bJq-cZM>S8fNl)&N# z*vZH+F!HSFNEfxT{keZQ8+9HW^`&8qR^J(BfrWWvq*GzCGuv#>WtqYoCyqOC?6Lu? z`vNw0f(3G@;OPKXFG5HYuE=ow)p(BT;$~Xg{B|{ZT=PiNA*lf0oBRfYkwWu2GB;CW z#CAdO#90KMQ4I$%0mWj>Bt(0yAB^L;ju4D~r4NIw%06yj@N2lLzi^AO+v^4!^edZ_ zgp=0;n|s&9MA@zh#ACE)rllc|%M@h7$NZrOIqt&&>O`v{lPkoDqaclF%jI^^-Z;t? zoWWMH07G&eyzt{dxW*X9k@D~hmG`Vs(3?YV0djj~*xsqF&r;E{b6oFl8Q&v2xL>yi zRM`{$d~YPl|6B)EFor0&izrtSQXu@Q0Vf~jIjB@QxK_3raJP+#c7IX=b9kvSirX?b?C3&t+s@V%L^nev>bNKfO4`o9Xa+#l$`+eOm1a>X}rFB!&bj?0gfo z4Da=(`Dja5*j?FrVdH)CX1AB?i(Y>m?R@XMBjiTBpqth*(;ri*%}vSU-M)Ms8`pFc z&E0(E3~E;MzU_eGXR_Fr&&d;A#Q8(rxRwZG9yRH(gs_GO^;N+c$3%6F1RY*%A4%4^ z0v3Iz7AFdq@*Ce_$k)fjR(;b){QaG_p@d^GGbVQ(gHLmbT>?Yp%LhKr4e6hnt*MRm zntK^3pK|XGTiJbp2@^Eon!U45`V!RYD8$_SbiMO#t<3%TPwi~M+n29a^IO;sXcP`r zJI_|vJPWbX_$Vaw*y<#k9oM+}b4qO+j2%5^IdyP&n|b3*8}8=khe3OL}`m zu(4jSD~Yv1STeFTMciN54=z5kJ$Ye8*kPusKHg6-Iq!px0_?*2JGL@x+mt$`HnJ*& zDv(&M?k3+N|6#|lQmIkRY359b+8dQ8Jy>xtIQtLX8_>qAN~~SQ301#^>}v0{|I2ou zqBqI-CM9j^`7I9IcKX6^h67U=+w6t7fs_X6e8LTs2*?3Q4jXVO} z)q%t|s6u`l^Oo|kYxmgl_`jBo{*V)*~mO{O@3 z@rZbzp8XTV>m3!X z8W-gZlR6w;#&B8A33aGLz|KaEBj#GdMF5)<1F=0Bf*dGj2+y)EXVQ(ZW5+2etgO;^ z92?)kbXy=9769BKXu?g35~SwV-Yk4Hcc6)QU$*J@Qp4DN$d231>`M8NhQ8~`Km1zg zjlg+HEBPBst)t2O*Fis)+LvFGrOzs-ke1I|4|f|s|L|ramy6A*pbpE=uz><+rAG$A zXe-$h4s{8}KZ}bS=^?7P?uE@Vu$Om&Rys;Z}kBbe}@9YA2pV8?TV_Yfga}u;GN&U9qkVH}(ycC7DOI_Gh%MfkT zf{h%c_j$F{jL*f(!q=%hQxly*p6gpNUcEDJ4qw{8OxC-(7Ikw*c_-t|&{|*M&TjadOuF$B#Q7-n>^S2Layy)83ngL;e1L!z6o>Y{@c(C~F91 zn<6AhrR+?yHMU6z(M-s`MWOg+DqDr@#+ETzBP9DiwqzM*l<~)3#? zJd4}6eFKgL(G+d)wcE%u{Cpu%7DK2vH0lV>-(10USRfvj5iG>o2KZPZ|KRx*1DL({ zu1~}7bF+m7Lnb7^m+HdT%#*EJB?@q98pU1u3hZ8l`#x!FI?)|Mc7(EtDqj{jt%ZBuO8 z@Uc?+8*2vXsIrxYGdtrV1G7%Xe!p%%e!wkee*5X!ZQ)6qUm&f*zE{sY626xtYzDZk zHVd%mTuV^|$vgTo63E8KNkPDLF z=y0!ZiB?r{jmYn3yZb=w_k2TRSe(jh^BfyHnR{Y(TnZO{0cG?RH2YjLNjNTlW9kGO=ha! zvR4n#w>Jil>#v`e)E71~2RHWntZ!vppRh6F@?t5XEv=mD-UW|F07YM>o6Yz0hG+n- zNzzobQ1~iY81^Ndi|~7Dk*6_l&0ae$aI6N)Ki&Gs^X%7v$%Y>B6s)4k1^P+0s|gF5 z@VHt2K#K=(UT>X^SIGBYSyTGF-{rlEt@a_L5*4e!{S}{-Lj~lXo1>}GXZi(@!AIZz zBYyVdd??}fR;wT5BwORZBC4OZ0jJovAVmvt$6;lqGEGi_34-1iiD&1CN?vUu1`)=A zH(9QOmdyYB{Eub8`gi!h`*X|ThDi@6){cW4A$? zTWNDlo{E7$48>{6oMIYKV59;{^FpgXgdPv_);}mf&2sRLKX7OAa&@|K43~0-P2W>R zHKpj*s;bm&_{g@hxbeWp?NkAvIz{$IvZGd0m7SoCBfs%dZ}iPu`z!QVM-!N07%#we z2qSiO!V`{Wkuku1LEB{Nj^MVGF8~PMsfG-2+_SAgi?MJtpmK7t?iNYAWmhbhy5h zjlU*h>>d<=+f4+vAeQb4`BF`JIx{YV~z%ay}i@qT$* za{FwDuE1{lJ-CCK$UBMLL<+^kYDzklRF}T{g7U8+fnOO z@avSy@0&~92E_}#M<>R^8uLX11RzwL(>)n{?Zm73M0iUPKw($HmxGF@JQXzgM&;5`^N7hT>Nr&4KiAJThG@=m zQLtyc({FQ;B%SGjX`S>feQnBGcp~wVe>~!=JFSsA8PyILyI&%>^1s0szN@Zt4B^*C zX@>3ydWuUKXra*@y+M8iQqG5f*oUMJgL;(YL-Fk!yamhDPYZR=B)P4vuZ3=PQ`=cE z@__I_6_Uo2K(T>i8zi|jX`y@78Wrz`nP*aYsG`+bPn@@T57I&PP(zGp2ujJr?c|Cy zyR|}r!voXbzi8Jmgy4Kg1IXtX8AliD>ol*egrX@NpC?TRs^i}Vc#Fm_s<#I6sPQk_ z*VKG{t@bBV)A0Uy*))8@m734Mf|6|N6C6eoj>HO1K&GD`2wp&Gy?OAlt8%I?xf?Z;0pyBn<7??Khfz1kXz%&fq1PqXt@)WRqv5&f>gcn}_Z6 zXpdtI6UsYf?j@L=Z6SK5orlFC$WCRDibdx-IJEaKlR4YV#xsp;!3wN?=EZZH2eG_B zPJ|KIdwUl;7F^8^1Rv@=dbacB$vY+Qx-o)q50GriS|}pblRr*UD}Y#%!u>|7%i?DA zBQ%jp20e_TFrTc;vZ$@v`mKDq#FOC3*K2!0#{x}Go# z{G79hyP}h_Dd#T!a`-yQ4SKkAgG5#z1dWDZK`0@l9nQG@{4ya zSPE2DmUkUXlsII11%ORqt2o;c0Bx7=A$XlvA>N>;E3e7ASs7xbNdFiz4L+V02(9}S zZE+NMi&s5LnIG=ob>p#f(%Jl~&JO@*^i-KV=V+$P9t>Zz7_$df)q#IBcd}W=^!+Rp zWi{wj`f;hc+&eI4MDfY-0a>I(-=WCvclqm{*7Wy`N;q)}HjMV$ry3IniTJm3UO5~j*J zmZHl%D);L;1s-2rr+GF=S;Hb`?V@VXk*7W!E=rV0{~w_hnGz&Kg8?X3NBR5!0xCs! zU~3U={E*x%DQj+1bQQrH_)b&kTlj@{E2F>e*6wCzsGH!&cb`y7?sRO;ehpce5~_S- zZ0*TM0Z|Xx4-bI4nB3hs`z*Sk!vZu~t$Sn8I3Jl^(-E!Q!=JM}=$Dl3tp*4?z@$DX zHHgBA#Qw=n1mlwu&_R%wzeQrT>NuM8Yo#{3tyXQt{#9Yo0lC1)U9Kk&Hial3kEb7b zy5R=bF<4@uW1yn<;ULi)m4GMKG1VDKOs#O|oVn%J*@IpafMe$OOQSQ~$TyB>1loP? zxTT!Z6p#K|*|k{CN_n0vQ&t3?_Xb1u!giE_7C{1)QH}u|!-C-s^LSy%&^uIiphRgg z77_9z?T!X=5ZQLAU*LAXXeR%2v8bg7xD^at$cD-Ho&gLwF&{3acY$^T7yx~=`ij8; z3S)cFYvKpi{mjGK9IZT!P1-fBs`z!cbc??Aw--H^u3t5y*Fc}hznBMcK;$PObxfi% z<>)Vv6~ORyxZ@iVppY`l+N#87%NO2X#CwPE1{y8_kCUz5 z1m#4Y3vXAMyp-U_)yehU>_&zW5c?gu*~?9{EWay%e8ueXMcqw4>NMLG|I#C4l+|picJFY}4m84*_T&B(#iEFGkArY+dP*-05bAniN5 z#{kMvhaNzXuV9{JA#k7v2)`g#R=S2cc>EFKTCMHDtYRZiFYPb>o|@H3vRltl=^{73 zdbMdt9A~}&Du;p;Q{%z9e85(0yq!JzBLLT-`9Div2JMMhKOcWf*dkY@Uf7|!GI1$ zIs;GaY=jA#Q4*;!!%EyvJj$?EisPGQhMd_(FJAAUfIMmDF`NtUrV3yvt}(1Y5wC~F zx3@huP=cK>Df$JiXZqVUqvIRCzZp97TVW9r>*sOSCdx{4QA*b<%Ty6lIQk(#KgWd1 zfQ@?w;pR6}@nV1jct&lc$^DJWGyfuFt(DTXOP(onOEkytoLSE&qxN~8>smoX1B4|+ zM4+!Llb02YiZgd*)7Oj!g1o)&ta2yKtWx~knJbeAr&1nNNOG6`b^*{~y=6>ZZ`uX) zH!$Oahg6dB^us!TS>^-6Pea#>yhaQp<2^lN2pb+%DXU>Qb=H^KhrVA9&$*Zui7DnS zZv-~p>@w2|PyC1l&|_l`U>1`yc^!HQnAtL45-n5ibtrFFS7q{9>r!sNWu;EC=bn3; zedxWw%Ok$m*g6(fSm1JSZ(#8>fCVCQw(z+wl&zanf}Ka3k3lLOPR_HJ>&tBmhTN)* zjwf_Dm>h1sPq2L2=r$`i)V+LuAxR|cqTms3?+fQ?m-~x$SaUF>IkzB zBfQxx7oywx)14-g{Jys7slAE1fBq_Yql5F5x5Semsk~j_pGO73iS-65;Ig`KJ)nLD z`gdz}wA;#J-+O9K?b@o1du3{e4ald+2j4A7UQkqD_Uwy3&3%!Dc|;X_YogZ3EZNx| zTzyWvttTsK%9x)n$9Y8x)BA?c74Ay2^!Wp)PzHfb_Q1bGK?*3kU;_p^0i%m+QrM_Y zgL**$3-@!Czi(TTkjn6?eCb~|su`o1^4RF{QH()k^TiSTQYbh}%4isdp$PsVhPbZ! z$I!FQlExbXN_khWUE`3aANoPv+GO(OkTaHs%9QZrxRA+Zj}y&KMz`1in}{$S%m@(< zW86@apnD(>HQzrbPwZ?9>yHJewRC-Ko3q}K)z>sPq~JQWmUL9lV=jMds@?CqsleFQ zIk__cq#ay-0JCHV=K^`NHwzfX+0~jq2%y}$?S}xCqtO*1PG8s7ZU>2{$D3N{>i*|p zX|rToJai0^iXp~e`Sk>7fD;EM8uZA{x8`SH{l^j5`X}=%&FSalaJk&Kpa0O>vO99` zRxN|riJF|PD548&Qon(PNuhIsxi3J?X+tT#F`%bk+M{0-ea2UP+zsYY6@Zd0|Gc5R zKKIKth}9J-=k9q&9XN2_a3UG|$7mf-)wzq^0v3wrI%);&VZLn`Tl;=)}tSWS2Y5bRY<_g={iPUHLu z+eK$L*MO%?6F|Z28YCTt`D}*)IFx~`j5}z;!7Vh164)1?Zg`O8W4Yz=baNt@h+`h? z8qxFxZc95&SyFw~!N+z?d*h!X8CxPHfeC^OEgQy@W*&8kC?cGXm`dX$fMbU)`D&g| zVU5Yl2H@bGxs9A{SB~Cl>w?GVCVSmV{iUY8bRPD4mK_x#6 z3|v<4Kp&o##_D1-~}6Lk4d>!QP_$NcQ#l zH2s~oK8}7^>kRACLwV8{3W~}P#ClnelXi)Itlphcf<7|%J6Uemjds2|=h~13F4t%& z;DAzxa!c8ZLx*e<=x}gLqrDw517gp}A#}g!EY6c;3Yr|Wo25r;%zG3Qb)6bAi%`nN zKKz(~V43DWrhRGaum*)mrjP2j0QiPs$clCR0>|NvC_+XE`^8BDKE!0d(gIkPJ-#v}VGIv#O9He?${2O%Im@%D+4mrysey zOC7OqXQz?sNbMcCXM<>FIX4l9uezn)Dc$^F{T$aOi1bWA)>|7;ykZFV;KW8e**JAV z{yYR+H+JNJ2Vh4jn`PcPJ@IL}{fS?((yqsQ&k7aU^DWQokX&7+TZtU7pS^SVLqBI! z$fVdrJzbq?W)J zoL>5t1pbt*ughp&_cttfm0Es1o5|j!3|{#iShRVte13poQ#f24{10xs0z06`IoF@- zr;e0_b&ifiVhq#NSq_t#T2E6`48T^6 z@(14PWaoTz8zAzCCwHEwoT!?akpp2T?=30og;|9gL?Xii_glz}m71<=rD z!vwId0K2|(w#E}Uom5;_=L0FW>R7AP&(zJZ;KC`#Cd&DUi~Y#`$}iD24%U8;4~8%v zgQq!G_eVAC&<{CSlRHTq6Zat*TMB<%buTgqnLg8Pt~)2hFY$5xL-Dak*pU0@CFUQ) z|A9lYsr17|?Fh1eRyr+)8>xbo>Y271S<;(Rh{twUSF$JF>l89p zPx>Ik#Y#m@*f6!D$`PgY zrk@3oA&ZQ!`KP)b6^nPIpt~U8OK@aCqh@=NM0MGo-A#!yt;x0%H;P^p;sZ>bYNMS6Vwb z*oh^xWbZ!U=kD)dJPk^n#8cg%p5hs2xwqm_uGzrcAWj@UH->iFtSOikm z&Iqg$OOUU4Ij20|L%!l;BeRYk1_XMNfxTuq=z%Ol`Zc1k}RxTNs$3u2^*E)fBAD3k&RtuIv5 z?)6n<_*Md|xepwR*-f2I6PasAbfn^X#jyckBsROJq_j)F~rD&59HC$Myh zC^C+^^}R5A@UwwN(?F9&SVdf{&w(@39>S8^`jvl5C$(YG7#m>lY)DFQ4=f~;)uAe~ zB8tA6H=nGc%8UG=MM$d|7a4JS66|1;eIcc%9qkD2KLMN?bOj&;;)c)hr&PX0Ksm%{ zM&hiKAus3P_QjNrU!LN%IceTba+@4dSY?uaP$cm}z>0-JInH&sirhgdDt7lP5Wfx&g(~PSDM@wT$MqqkO`R zT-nFZrF#oV4i`(~kR_`5FE7!$SbvvQV2E4rW?47Q*@T{_dOW>&ztxl#G~ z)mw!P_~*!M+dq#Sr_1Km9N_?rcWyZb2>2NWkTtC*(yl>=RoBbd*Rri+=W^boS4)n1 zCGXoj+aj&39%hw_D)0*D9>C1~oFxZmB^xMA^Wz{5YN$r&xj%31!nG2MT0Tr`Fp3qY z<#e8$C3-xD*YS@R6LVu}n#?{XJ0rJAThY-r1a^L~`lKA%`Q{+;5?gKP6ipiz;5@RC z_9op>_@>*l78a}G4$R$Y-S+Ix_3edTnHI~XHJr2L@bSSH2S|-!ND;w^pEO`bcMTNj zP7EA)d%!1m(lZh6ju{{As023JlPuH`Dlbr18zzL2)@^)oEzfCsM!j)jvioP63IDXB zjJ6F836`-CbVRtQM%mzN`RRfS3n~>KLptY3P1$WDFb^H)e6s!fP`C2Bn&;SaLL!GU zq;Bt5-1v-J0MWT86$XThMUUgj)xg03@~2jKwIInB zKT1x>iG}<8;xO387=k*2VLzlJGT-O$$cQ?O3kx=}6R{KviF;#s`T|Gikxqmc3&RK8 z`~-$zr)LAq!8Pz7M~LPKeK~=nE6v6}5N*(olN!HhxTRg`S&iSyT1uK?m2UVb+8f;+ zeM1Pt1}B^DA!+>q*F6jQ)dr@^EXNx=gVBe`q6x-@jSD>^9$EuqD@g+vzdt#3f-rsm zqIIOP8R7sWuxYtQV{!?C4O$3+@Mb@e3?rQY#4+=ot@yZIOafh=9si8zYj1I#hapGLzI! z>u}P4$M%Bp9tf=$7B{<($>&ISW5l7=y`5vj8Dsa<@TQ!c8&zyGlI9# zOGi$7w5ZEp-vx`!QH>$EF%?1j+&~yAqS`TvYphD>S0L{Lq7Sn3ztk?@9FT|$b`;pz zQ1+Ne?>2lXV*6!k4jeo7K}>%D-+ybnC;?#Zs!qV#;hgzNkjZ>&$LPrc$pM3xT8&=5 zkphJMFA^3YQz6zhEZ26j9iF)3q=N(Z}FVyG1o#44ugzf=E0`DAouR3 z5PAy34<}}V0Tw-f`Bps3nmszhzij5urYkBapw!76)l9wH&EzE_FsA69mKw@ceU^{!1w zOexolWDlDB>e!#G6lSmO)E0B{@>71`GlAxw^xXZHQOX~}1)*(B=t7sT!GNbG^`+25 z)m;w$8qVQ6IU5ZSE5FHJ^MG@@_Gw=}Dhm4Y8&eH)Dh^AA+$u>}a5-saDIC%R?0!{h=*6HX1lLl$VJ2^-U1xH+Rm z?nu&}xSK6kAunLMlDGPvsyz0WCDZ>*!Q+Cg~LX-vyJmg?iy8ZWvi%VQXO;ul z-as~@e_2piPN=9VS2KAE?la$q zzh9bv;x4OJ%#M^~**h{HZTN97u{CgEB2P zwF;Jy*1`GF&sg$3c@p;=CgGWho}Iz7`L(lO>-dy^>n!C>ePHX`s%X3&P~lK}mu)2# zRIM_cWg&m!@r6{tO_lJ;G9C~*uU*Z2q)5{m@gD2Aej_ZbD0fhv<+9%Vrw6-eU0`Je z!o6u`&?fVsFBJ41J!TUeJS!Nuwt47h?(u9Opo!#}#JL`lUi{0DP+{L;=d0%BN@lXp zn5CM`Kp2-MmVFIm9@?jO9_OA@vTD;=YKoB-Lr?n7>AooAlyGy*e-nF$OYa*eh|kvT zJ4y@bC`OLx)rQQeN3qU6CQIjj0d#j^8UaWH0VfU3#{5p51Ud@-2(U`S z=Y-IzLrss!{2hW0Q_~kKuAb>Bm0QKgtyT_>^3-Y8+Z^kjIHSk>^@ROyfd%j8GUVeuQgt2oscZcMbB#2} zIKhHRQ(ZSE&oX&9_ilQLc?SI%TLDj2X%ZD}dldi-+!MTQirpc*pGSS^rHZ;8;RJEL z_pmH6D~-yFU*Gj2-h$E(VCyLa4Yu!TVyl?izbq}-L_oDvW69d@8BpvnW&81!>N7nG zftq6`{c2q5l$-U*wpTBYZ@tzYgps`hufrgvP11F>>z@KiNF&#$2kS*lf5H`*`559H z{{Kaf&}m5*?4e2lxdLK4(`U#BfTxc7mJ)3X~+2E`soeuo4!ouh*}g5|~Eg^!ERCo|s{K(D7Pi>A$BV@t?0 zMG+r;G`a3P?F8gt7BHs|#PSKA82AW=E=H5s+l3)jZr6B2IvwTN-ZQ$G=XvOZV55OF z#bOq^4^I`M$TQt+>LKiy;Yo&e(m{%1dcosnh9ZTk>a-|aZ;vbwPKbUzn{{>L_!7Ka9JfLr{o2z0-YJ|ot z)7}cYfINNeGsevl_{($&Z;Aue3i76C5cx@ozHdXsoypBY81?@SN2 z`lI~4KP}YalexmY#MaRjrYN&daUMHCWAXt3Mo!Qz$s(`vnbX)>{>UKAfyvpDS~X~}eYe%FbSHE5 z0K%tY{Kp^nmdRhf{XQc*bcr`)*EW0=NGn?b)+51r66y*Cq&S{5eNx`G5V6KyZ8U$R zdE=aNfx96^h_|9Px%D3&mf<~S<{9k74wKh{Y}$&!nbL)F7SN@yTjwpb*yi{+CI(5V z-ftDyYwlHcKbDC9~VITlZ zHgEtmg^+i%(YUjbVB?8``I1PDvnO7EjX)gn0>o#kV;m=6GoqN>tvpXIl0{>NZ<(;L zU5#SlVM@bM?-rghL00f;nj zUz5H)@uNj6E9hOu^kUQZZokWobnIPM?1CDGFN~8eRzSN)4~K9>FdYf2z;*QA-*Fyn zhRMBr2j(seO>(`tRF2+N@x7R0cBt5t*I0Bq>7^Q>g*jJD--Z)=F@M03KM}wrQ1QD6 zeHIsBm0j_~*MC`XqIBVYCQmR(lkIl~3l{2K`LmgCFz8eupS^jgRIjEurbGIgwUHF5QsZLrs+2a^M>6sQ=tIrL8AFS5y?n+cFVefuT#-X_ExW7=}!nfe4xTeBz3r` zw4>2uR(KMSKl?%GlT_jYN@&u_*%66*uToJgJF0dz+RsmKY(8ROOslJDIF9ADe|4BV zU95+89U7er?m#8alCSV5muco!x%kV+`S|!@mvKX9#zifMZ!bd-PS^B;Ahu{{8nNsI zo)Ax-9;FKuF%PQ$%T9R`giY>5RD5uS*KS3;ztlVjwyceX_I2c!@+f?3#D^-x$`AsJ zG=QnKVt#><%`ivqI>VZ@nq{E(dIkpusag_7509GvXjw*$&1dUTk)mTa;bd>@x_>dV z8$rH|;0?ibL|&h_Ow{IIP^>t2BCopfVhVu+^JKzQVyu%-k%uk|BtTmgoh4hNAeEvP zI4G{QHVy{Q;dxI~H!Rks)?Ey8H)j~$wplW;&N!ed)<1KHLjy6J8};VyjOe%ZCXKnt zyX(~q9WIiu@0?Rb`vC<3f%8O#cPs~6u5r}-9}au`pALom-}HB|za#&>VRD`+8^YU) z7e-62&GOP+M6vlH&#pFn-@L7UouEDiQQG;g$s>3te= znzY$T*_#nmLyehJGr^;Qvq`Am9ZkZxrwTFqb+9Snu;SGi=Gpe-bt>PxBi~i40d&eg z_l>13g#6=W6uxSy3Y@*?4-9z$niX752kG7BLzXm$@OzMEX$Ycah9%@S85dFg>gexP zQJhXvTDxYO^$BO{qqL8IRNO8cKE``E`MUsf4@B%2jmE)j0jF{`=sp}LqQ=CjAIZ$T zxtH6(_!(rq1CtWz_FhNb=cmjxt9w!EdHJ=!l&s#?NxbW7$DEw4$^?NuJ6MP)PXp-? zEHN{Y{1%gl;Hhcs$0?54I(ym6{+hEQJ-}Hf{;-?U^Z#HbwAOX)il>)8reGRyhWlWv zcjEEHrdjfRY?9t7GRF?FeG`n5Hn{(O2*B*eosqjYzI?@a*EfE$?Ophs^7*9lGq{>A zJRVaD2j@IjJlTQz7>MBf3K*D0?4MSSr61dMPa|j6gunED6_C zTbCrpi>Md6#`&il-G!~~r;Fm5T&!=Mne7-sCaV|xupWE9^Oap)aq-FGv-|t{0A%LG zt8_h!{e4Vig0Y7Qs!?3Teu!eT-U>cCAAB?zmGnk&f`*1#SS!htSVTZ@nMbOWcz!P> zDJ?6E@_S0|Y;KnMiJXqSWyIs_xb7^@kOm|6gfd7O6g)5>5Cfj&n7I+eN{(8WlAvU@ zs>*@poaq>cyLS>!M4F39vZ-ItJNY=ns`gTz&!K$S4O^XZ#{1P~$4xa$7X_ku#ou`-5}p!W8NvWM5g?Ao%^t|h?0Y1A>hR14l~7gEwv3^5%}z5?DBK>}LC4oc^-fR*sFp0a9dn3KV-zSroL zrfP5AQo{{#M5Qf({hty~Ty1r-v`0D-^(Y}P)H9Gt-;+Tq)JOwEQqCAa0}h>%PX-IQ z3CZJH|1eL|6)C0d<08B0S1RxQ0@qfU161)za}%0VQzj}xxSLg+rRXg3xMU~k9C!f` zeMe4%2*`jvB$cWK@zP}03`Da!8ys+R!m++psORyG5fzP9r!WvRvWAcQy}Ec#5w7Ta;$aC z^3@cRyT^uoOCQ(a>TCJ896RH_$n(XdgKDZ-_eoHl`WMn&fuAnAz~nV0axmhX_oMqJ zaIKsCA!WXqc>Ua0VMppC;`oI73Ld6iKzdp361Fr{yBy!Uqt=3%T;RChpE&^bQX0sH zf@Fq5>4S}aJHXA9gC`+`S8FfNT+CweiJTC*_wjaZ>XIQLSoadm20cKt00x*Bvv-Qe z(`=aiv*hck6Yx%q6#58MMK~&lbJu$q#KgayUa1{Tnt1t?OSUfIOxEI`W<#{|Lz^A5 zJ>+OI2+7!iM-vHjkYI@?0!B7#APqJh0H4wN!7AVjqTTb)Jn_G?g0 zW=c;^jM;}^9-15E6glX2^#(|bSnAAmYiH*Fxf|s_(=NlTeFwH9NLNO7qJiUcvwvKW z$vX`S@vdMU1iY|9N+=tPp)(5wD_J_z4VDMfvyF#qraxt>$aI^0E#3Mkd!zJew*j!$ zk&pL!Fd|d-1Od9@%8stV#0~a)&m4ttnE6tJ85jprhoi9JNv4*4bQSkVM^O6ud#7~= zzv<%KNPH#0mihA&Tz)3}Fw{ftVe&1IaRmN&=ffl6x(jFZR2O)WXC!ZQc>1BfxXO9~0L)%;wr~$;54hM8>lmbEtg{>xW>8Ul7v&-vADCEr zV&ldUKci~%APM(j-jgRTuu0>qz&!q+I$%oQGxyW|7;i!R`7B(Yfu&29Ldys#<$(=+ zl^(pP|Gjgl0C!Z^F`VhMI~(G0jt54k;bbGrSi~VTP~#I!4*FTB_>Gs;5z4`|xxXxO zH1pw7x5`g7&-$OQXL4uACYN7mJ$bfl703Mfyce9+pFW~7Spn3p31B6bgiFz(` zy$5@YQf~}bTZn8hJHEUfS;}bJIdqX?T&XUJt0Y;zAli^P=@KCAzrPzQ0$Wd;@mP=m zV{tL>hlPmSN441-zS#+A-z&m6&j*UR5`Cgf{anvHIX zEfd287n84I_qJ)r>?HpjhBQfYp`QU=zfCq2d8g_jluxy!$d7G3{lb+|k%7tfz*_$v z;e3AxW%j<>crOG6PKY(u7;rp6(Z zdHt|wyJTdyfK0(^#ah$>Y9dc!^&y$w4|KQcg9 z#7HhR8V46To8=k#&G;NIAU;KxsIZ*PPWDnR-?< zy;bx%GP>-8IJ6+5T{Wil+VxWNV9~0z{}-Y7GuJz0lZsD|HC>z|bb@DQK%Afj7Rdht zbOBECrL+=9YUVI(A*^#zpJzC}2YL^_bDmmNHx(Kcu+0b?rj3Zpc+h`d8p1Fw+_h8KX{OTh3bPzwggc$nJRJai2LL8}& zlr*|NxZzXsn-;bp=+xh^)054dH;$3L7_ra&roa)EZRXP%7P56bd9o#2zP0Q?v0Tn; zTL)xq!m`q_n&dl4hgp`rQiewc&P#B%gK1dLc5HOBEOe{1(bRq3SK*i0tHDhro2S)E zrdJJd!c2Eywo^dznjN^JQFk$SQ{&K_sYKC5JKfpHVbEUTJaR+ej1#uVyj6a1VV!YEUEOPX2Xy#AP^lb+>Qh`xlia9L^!^R)8S*OHc|2I+~uKCo`qYojY8dn76Z>Qhb z5S71s_DhHNl!0Qn!JvT>U6~^Pxzu+;S5**clYM0a8+xZH`pq4k`@(e}nR|hPD*srg z9oTO@7(z9?+O!SM)=3tj&C5Tb+fvESjt;g?Q!Ck#D`lsv8!LyDWu9KV{Pj=wrVkUtT)S^Oq%0A7D{k2CE7ZHliSL#}=a9jwwfUq()85Mu&^fwF7KU@A+h_ ztDh>S-%VK}xd(Iluq-HD}rL*aG8w6Ew1S_S2;VXll&wi0aYxFXX6;jGSgz z$g_FAjjZ{wFPq9ungx!lRSs>`oLh_Lwu^3tAqUQj&EpHp#;~moJ5S)QmQQj8DYu^Q z`hlFQ>iO!X>iVR1%_hL1m2N718bBlQR6gaB5&pdd-c|`i=*LuQlbk3tCJ&OP4ebz3 zTVWz0qlVu~e4gDH3X~K9-LqBh!{MLhvuETl#aX_HmErBb%m8srt^p!cvWdCemnXYlL#!zvJLGF5Yrtz z5f9+BQ(0}nXhk)CeYBWvR6`E;F1R~7n(eza#p`fSI`YWodS3R(7T4$KT-HTl&Ze+UOA)WEvDSwZW>l|$#0fv zpt2~g<%#^`8i#qq5Mm`_YqVQnyy#(g6(SYxffsvxreETx_O5K7N%)&k-iwtdnZJFE z-miD7A$}0RDRvM%(hNPLV?!4noXglDxsZ!H&|+kp&tb{tHln*fQp1o1yg92!(C%)%290!fA*$dNZA^I{@9xY|I4C}m=tAlCj#@e z=lw{6GI7l&zyhHkmTyfD>6zQa^Y#2JP$bLHtB*I^KyxlhYm=l9Nlp6<2Fhl@kXWb zou_tPnYFwMi{{jO-!*jT&2Aq>6@*jCMqnXslVNmWo4Mt9JuC1jtl(1|es0bu+sygY z>UhQlD%$o=cf2X`nzuV(c}MHhaHHnygHN?{Q!5r&KK}m8;%u2r4StwPK#8vGhFj|W z`k6&(7xJ>3p5Wy9ke6;oWJR>&=wfDMJeWDL&zaY)>4b%hX=T9Ya%~^Dtg&rtS;LYO^L4dg&CVy;&XfI$hFa zM#bpwe-QRe!R=X?Nq%L+=>zy?1lf0!E}#ukBbQSn(Z`@Pnh_%bm532wuWL>ic(RhW zQqDIp79ZQaC3fUft0}FrJE7o-%jB*TFyYau(fB>p1u%h1SL3D zx?JaDR$;qDL&m3#6wC24mk2QOg%atK#a;CL%%3(e=}DagTY0}Vo*aOqi&QZMu#n9! zX8TTM2xL^%Zvk_(4WG%uSJ9toY0%+Fx;F?sfqxCj@{{ny%}MPJpaa%r(8=6as?p)M z=KD!Vr8DMr-<1vLX-+RJgCFUg^!Gl(zL3JP&(Jab-MdDREdX99eFNiX3CRWn^kya( z9<2GkAyBVDfVRVfC?QD3rm_CTPaVx?tAZ;dxF_ZTh0`mPRCQmyO6KY%^#O&CSN!5g zqMfObswrJ`jrs#}52mRw`w&$I6+|aY+qK$P@4a?S65scSN!*XDjFbAInb4*FkC>a% zK+3Iw@>a%S;Q#%IW%BJgQ!sCUrw4xmN{DSBPcKm7$5ahr981{t$>~cp9h7b@P3Rq1 zM54oDET=PVht0WKaf_*R5zq|KNekKo+54iy&qN1{1%~Ixm-fa56?5{^xSy~q>c5+1KxrNhR}kQ(gRMzJ^)kMjnjd*DVxYu zKJw~s-K{S5+ESU-QS^R%Ct`4uqb~hYR||vNGypiK&{MO;q0$p(dbEiBC9(>B!O~UtEO$M?iS!v%mDp%9 z=;7Jw&JNXanib%~0)gi|du^g?Y2h2ym}-Y!Yh9DfF!e}}nuH5tw@YqnsS7O>po!qg zEugPq1rDZAD$&?Ee5~L&5-&*S43^8AOYE7y!F?m>RzqdRu<3{Rlo|ENNmQAAV+eteC| zZRttgT-c1-#-wVdG_U{RsxgE%H|FX|tVkucS7uy>DhKs)@a@Z$F-N&YH(TY{LILz8Z*p3q~CW z-{%K`0g*jR`=N@F+$_WTW-WP+A-8bpQ$t+V_n;o$phI0>S^*xd$Ms*KH*}JYGC|GA zeRI;B$x8!K3J8)$VHK!$c!V}ys9EZ+4d~;&Gj$?)Y5KX^B@)PKd9G$&uRP-J`AhHp z0#Zg^+;vHjar`8R>>okJNH#beV%ctO1(r06_-Wtw zt?xfHga84yE038(TnoVBR_H!|EXjsKqWLhpFf|y^Ftv_|fma8|7yy;H0foAU&1Kb7 z^b3=NwUYNuPOLxw@-R(2PyokQ-!TJ~ZDaz%lYhhNs{o7=#MRUQMmZ>{3!}eb4usng zEi~*aYg4Z_P(%`g3tAt1rg}o^3gDh@`zh@VU)T9WoM zAhBTCj2$e3%WAf~?LWrmT|6=D8}#5tmZtZ6|e0cY;Y5sZrMYyZ)o&MH`pT2(UCaUJ}JsGH!eY}{eS&$_4?tDAujU;dw zK@dm40V{5ZNI_J>+I$0;Jj%c#vvB9D;6GLMO7;@nrGqWYR`OS*Q0$<&4FKg5F)*KKqj| z9;J^fE4QWP|I&LBv6NK#+UN13P_UuB(A~}oP|m_;0)}h_PrBd#VfMlSDXwoQ6nCa+ zP>L7|5fJd=jrpbPvE-^USA;)!y{YY4pc zzPI(lHQLRD{gnl;Iy$!^=d}5={s};Q>!cqByZ9Ix8>PcYFYC;wl=&+vSsJ)==uV<&<^^D;|2*uz7;EP-eCUm z4URV_N<4_>BEwr#mmTi=Iu#`yxbvxY58LQJKmQXMu>BqV??t%(0(|=4m`ndp{H6c= b`@e9g{x6rq-*5lfGyl<%|8Mr;@A&@$Dx*1I literal 0 HcmV?d00001 diff --git a/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EMComposition only.pdf b/Scripts/Models (Under Development)/EGO/Using EMComposition/CSW/Figures/EMComposition only.pdf new file mode 100644 index 0000000000000000000000000000000000000000..803deafd4e8a5454909f52af41893462e375bf8e GIT binary patch literal 25456 zcmbrkV{|3o_U|1h9d*)4I(EmlZQHhO+qToOZQHhO+kDdfJLf#-f5*M!zPP*gsH!<@ zF3nn1f;ZWmP>6${av*S>S8(13I8{^P_xnv=6aB!#u zP0Z~LY`>1?I`#(q26|Td29TVbkaqUA209jy&guOsW|bKI@b8?RIb4C?v0mPzbA&b9 zfDpp3+szRC%ZOV-c#hCOct$o4(@zG+*00MJf553MSl&A-C@E#Q-oLxJW_xC;%TFL0 zlwI462y)ac7i)dwb|`X53SMtgZ5Bs;=FdBQuCKou&V+te-CCz!-=VIPx-KQf0#ZFI(ANN%9{0f+YTC+Ui5%|OZ+9_{qc_o|S+sXUwV`!?+c6kO>sL-*yz)!Ve)W}2Um z3XWh2u2o4b@z5d57~-j)v%Fgtvjj}rDTt!aHypmk0-AtqfX8c?^&p( z*}iQEFx8xiEQ-;)d7yi+yf9r*J6h&wJeC9@eeUy^i8&SAVr9i-YWakg< z`>~!}LtHK&`^QgFvkKH-BXYhSXVbybHct}ud(J}B_6QX7_(A`Ou9(aQ0QpUX*ytvl zjEdO`3MS6aG#Imt`xcIr+iFmP0j!0RgIzIIlR6`6-5%Nd1GmBL;puMJXz}drzR=9^ z{rzeDd;kH;RW%%@wGIW{KCGHYE1bsaT&0X$@(h08V!3ZA@sir53D-YM)HgIf{#N}* zR`&Do&emZvsg%!f%6hmRGs(nKLmbFa4&oN6JP8#Y1t$0Sfx5cI!xf9mU={m?yPcSnT zwsu=_)BF|AKz#GJjjPX-gQt_@+oZ-t?TZ)Jrr*SnD?d*4#1Q>V;eT%z#F}x4n`nHG zW3;mbrRmX=StXFE3-~$vb&*RsPQ_m|$9Qm_pT^$A+)RQBh2@!ybO&2+p9?=9E+%|1 zMz#s3(Sjtvo(4a6s_U&ST}IZ{&RWn>Ac@~R7C13HwkDG=X)m?;ujMuuj;a-DZc zMAdYtkDNmJUeRU97<@Nt-s#Dt1%6SOfyg(7@kQfRCQQ}9sOA$dLrB{~OdmN}_HKlP z@;YD_$VBn`6Aesd>dXIHD7|2Bk#(L9LZ4#N|xkF?_#ft`aW{vZ#Xd` z@boS@lNPj*=S#yNV&Bk!!+ZG?a&Q5*wHgl;Hd}M22#aa{cMD~NMfq`n=X3KCp+hJA;61sYd6js0)3T_XCVAHWONFS4nRR&?h6Co-e~>MIMN?74C;9tZQk~S zR?y1ECNVfav;o50WPXsTym?UxUJMk&$#rSik{aR`Fju?QR;6c_bk%O%kE`-($az?S`5l3A6o`Bsd>gP~? z)4?YO+nZf=|2y@fgOl_9u6Fi%m>~^Rh+hPYWGnNixjg)hxI8~FxZgRMF7Ujo^@DT zu0BulVV4F^G#CMcd<~$et?4ipsSF|%`Fw#eX|n-)7`&w*s}(M7QJ`HOQg|){p*V5r zQ3bdTQ1`0#5Xs+9X`W=P4MGx zmAfc!5LZ#wt{`S~`cD61_1FNSR4_SrFpB#bpvMFD^6$fI1Q$|BF;^LLbvWL_cO3=~ z67!GdeZfH#tEgC6-ylORoKfVRCjNyIpzv0BZ?y_roY=^G zrUDeD>vjooTPZXC%#Lp}ntq|DwVUJD_P2LWCr>ZKY#x~IFg#B^pow|fq4At#RK_yT zAeA}whHqxhtN3!lA^vkLSe!WjEN6G`DSlO5G^&SH+ zq0vAoTuIxhp-Y~bjS?A%0EQ^bvtnq|E^y>yyR8!-zJ{(_7#M|4_P!aQXk52TO5Z{$ zCZR&#E{&<%0fdGvi?y+94Z`lGT>ymtF`=>|ZwNHG+VV1|`|KS!l5}#)P2p(cr5{Gy zEbI^+Em>t@UG7IM4jRIOf;%a9(&RYN3!XJMaQ4y^65b=zvuK#3*mR3KuXEbxjUtW$bx_Z+UmH2dpvcs+e ziIargUj6`Xn6Tl}2i?rAX`9d6gqV^kX~Pq6l6aEW9(B!e>k{N>(~$+b0!^HX z%ev9i@M@xAl;)C?OUh?V2sP(ad8Spo+bqzUDf1hhs@!=WE)K0_7(3LO9YQQJ?qm$% z3nQgDvyXLnRY&``HU&RZONW95$d`0#)hTZAMr`QMiJaY=iK(BdvtaY?D=Z;VlrxJ? z@2*Syw#G9}3$7Qy-KqJDEq7yA*_clQ?e?X+`b=wP6uPtvd#Otnr?Tkt!KbE#G@GV~ zZsRqF8hPA#JZHS<@cqqi=z^|W>h_h9%%U>7eD1AZIi@Pu1jy~hXM{Ac)c==u`a1ig zy8cM5KgTbzMN7v(_b1?wN&6pB_E&}pINJ-!*?&ne9Cr3Uj;{}u4)vFTL&e8~LxV%5 zr}MS{5@uf){@GIrT3OovSF|AoH3c;e!@q^yp9%~qXue#G|I#M4-@4vdl z{GTr2P|3Sk8{kk$>lhjQ?Vhxbt-)7oH2*F~C1Id%qQh(DjHCADqQ;@2WoE*mV_?A1 z_#4jm)wQp9JDfk#^Bc_Z7u&;Ao;}AS=ZCC*)t*kTtNga)$`^ z{3DcUa99}WX#a;GK3DZ}m!D7IdfZ^*ilt5w5vD~TyfQp6i0%3{M*Sv^F}zpE4cBui zPD+UT1_6<#{ezSeMP01lyNC2%38{)Q7<6=$-<-QKuc0Ns0d#SpL=04^N-;lQak+up zu=9~1%f9vf%#-u=up`ZGf`jX#{V>z*!s8JJ4nIeKh2NKb+$?ov!{u{WfxqCHzpcYX zBghEn?gA7q4A$AmWi^z6Yl-Ox#o21`*sfxkW~ciCRGedOkRSiZN1}m**rb3hHr~it zkhv@r$Z@9_DZr=DRWpSd>xJ;wT;W$?*$OJRw6NsuEbZ*dnA`butH4YLOz3tLR=VjH>elf!!{Zu90t;HJ8uLa=DbW#y zb^LbD&J$Q?2D7>CEvT}MIeCU*i~2i)mt0MfYG)RuaZ9nt-tm+~)5a(aDK}uf;vt#7 zu$L!v87mzfZzIDq%;OJgBBMNp&WY4-dFkEi2pb{0+3EB&7QCLB1rm3# zi#OW&)qdvULLVTto4-?2Ii0$@(jt9HuoyS!Vq(sy5So=#w^ac5Di`$aAQ;U&hR~Y?-WQ zxkKUj;hOjm@Zf)cC6zv?@xCoL(WpVxdzgXU%sIk|&>`&Q_#rgJ6(QlI2!5(E)=b?r z;v)1=TA{>i)v^JdVV41wzoOt2yLc$Gj^EZ%UDDchx?8e(GLKQhHd^YU;MAdH*P_m; zi@G;Ln0Rh`f4w_hoUJW5v=0rc!SmDqasqyNSu#_<{zm}sGw8yUQq^m%pPmO+lv9+_ z!kDehQ>5C6dh%270Vfx8=98(CPT$>Jel`9Vl?`RVG2pj4-qn0SR64t*xY!u zC6%$6(98Nrm}Ev)Plyv*(hBeEaH#73u*IG1`YeW| zvI<=zt(`p`qiVW%nX#!@Fx-Kkv$|AbqtGe8X&OZQts<$=?P&=S{B}ouUZCF6 zH84FoCq-wBoghPTo|kq6g2`)GVN@Bzi>5H+c|(U(swyFAm@{}}(Gp-;XlEY?SroWo zaC}r4xJ7tvu6Ea^W{~#uI~wlujKq$2+iv&!El8p`Jj1$@d32taahqqHimmg^kPBjFS8a z&mlK`deNMGzi$A%6qL7wybI)j*QKOz(X3`vUefccLe4}S0rnjp)MnML$^%(#Lalrc zU#A{$Fm-m9m@oDu`A0=^O#tmzO{fMg^W(|_`<1y~JC@8KVd4pCf24$H>3xsNW_4~# z>xiP^)WcoU2H8fy93jr{MeML;GHi;^%!<2XeRD>(`Aq*98pvuJyxh?U5eRt?vAgOQ zC6&l#k(Vm3i|i2H>?!FG=gQlx6=VOFNEELm?vzUT3ya_Uwg{tRGVq@Lt{tfqv5+&K ziDs1kpqsp)N`=`;vFiLxXJK%|(8`YP@uvO_KRax(kHjE$IG&hCN`=`#UVj+Rv>S3Q zz?due`JzzihdC+98M9CW8f(LD{! zoM#LEZ6|Ul&Tzw0Wi~hL%*(CGF!^n`)MVn3K6iKowLZyf{A;>>$9?acNRyW0Vf(Hb z`YR{kN+Q!ATKFz_Dcee%NHZX*;cqID<}ENE_1SBZ=tq_(1>n(C-)FE`ye%8#~A%=T_-Ekx5`B6%@po8_Z(~l~_3ATg*%Vz{685%L=ynfPD1V$X85Y7O zJXP55FfI~x5_w-yewCs!=?fW2;z}|rk|gnx;*mMs6$97hGe#+DD(XsFE>lkZk16+M z-G({UL}f;0PfHQycjodB&|6OT^%!LO-`eK|(EG5SXsqepux#0lgOq0z&lzJ zqsyS%I+s5Bp*)G&6>7SAdCpCKn8LD^bT#e4xUr0pR-b-V0+Jl zxf+sAJ;Z5FybU30cBBtq-lsk-t%;~Q0qutrfn_!lJykXK;bjYdI^lGC0g9F}Br$^Z z0G1I~-|Nxll4APm>=9W|JW@xI<52#RjCJM!}E zO+3q4ey#}YD{jys85U6tTh4=+D5?@IwY)oWiD-!}lt@$j95w}9#!n!|Vg`Nh?v zOu^nUyLJP_#L>?lT2^D!cEe$eObCHcNDhTmp-+?igJZpWyA$coPT4S*yIa9`!`zPE zqE+cf*Xb^~ebeH*qq1qae5FdW;|kL+0ER;X3SO`{u@_g@sj}Zfd{%66wJLDdrOy*JTUC3X(DKDZFjS91b&#G_f1lrtt6^|+(b zgVJZW&Js-&WM`U2a*0g#RmC1l1j)yOEi(=(9tVWNgRw-?v*8soFPZ%kh0&qSq)I%BXvZG*M{3Mia}AS6Rma!NYz$`bEm&uG9_ih- zL^1+dBw~!05l<{Lf#}$20u^6m%LYX`igT)0Wq_Lb8WtrygAR^A(yKmJo~0_1=wU?0 zJO>YdmC}k;5TM;Dox0*=MWNy&^vU`l1w_fgc_BS}=A=-47G{1a3kYx#i3^WGgYRU# zyfO~xE#+-qb$(>PWXbeO23u2xXpNmwdDl|MKH#l=Uv)CJ7@hD{_RSFgGwDI4Ojw(* zhkd>!*JcGLlEAI~vscg~1ga*1r^A?$o~ZqoHHTNP!<)ke%;`^S<>6h1+tb$8xXM|D z-Kvv|iUs!h^@w!|j}Qql`Y66rPP`L-4JS2)-TD1XF^u(-h0?};>X4s`zH;dK!^G za0KX=@PDh-mJbkYsa;07tIQxwV!J%Z4klQ@))e?k_`YvFvQ_o7NehHw*cb0k7Gi$h zw`wMbsYKpzMpTE#cP5}tVm4uq$9LYNE%LoauQ8VIC{ zU&hhip{&^j%AdiyCYu3F&)Lb?SZlkw(Ay!J`c%dkG!S59!)nYAnBMLTzT80&?kD)X ziaxNmGF43;S?PZd7UH@R=|b%S_2w<#K|KL72gd*+)(2MvBMKkwq< zCevo)=7H#hdA9fw_O6Sh$b+t;0wjjwFO}&5Y_Po?nhdd?B`ST z8_ktURut^C5RQ`{r}c3j9&$69>n56lM>y6v0fz|TN^m;dhnDL{~jZrExL%0X|F;Wj31+bF0eUc zm*LkAhZJ`@@_J?eQnCxy8ypAj(Xmfs#9_;`ON8%(VGGlnBNjnR07Iy^^tG?V*(dau z+O!%5Oj1X16iA?D(xYJ%wAH7tm}p`ML@F!)`HW*qw#Gsly|`%7eIyEtu#SV8N{Mkn zC%E+pzyzo`fIJZ5G{z)MiBLbes$RZv$x44Kf6|PBEN=q&doqL^g=3}YaZWHEHA*=n z|I*$-UPGRQNKee32qb|(oN1hR`P0T4r9Y`}j{Mk9n4Y<$qbN1efl)8jreSqjPoa@a zM2>u}ef>}orpGj?)X8M3WC<+0Aqjw7ZTLx(8UF-9;b)Wp| zg#M1%g~d(5jZ#AgpbvAUpkXY$2|o?ig-3Eo`E~ zd;%;gC1o{*_67qAf0E|~BX4rroEx~y|L5fJE~ql*jL5SkACZ~tWaTWB_?vc{{@Ym% zDzhy*Kr0}S2e$u5mJWw9oBoi)7G0qGL-acqUj>ks1-^CHJo=zIM}rmz?5C?KKpmjC zJui9Bs7}losxoLDAS@uFOjM09`OaiE#vZtU&Kx79Tz(mOoA1nQStKnub|6Mo^*fm7 zT-PIN(_B~c^c-qK7@hfMn$GJBv>GRA8vu49W5aCMhOubM`7`b68SUy3ExM~+Y?3+q zY?ook@*r5I!B&rP;Nk;O<@tBb)ywtmkZ$u0j0o#Z=W?G`k4{lmm8?B?c*Hbc`A?Z`grHm&^9N03t3VK>DZWPy zh1u@hvD6Ma6<>DxKU09bUYfhUdB42CqgFJz=2S&JYe{<*v=?>6t`7bjnE&YEBtsm8 zd}U@Ud%ziDHD__-;gE0Q^^l~ib7L^Bo@>PyIAN-Hy{F-NYI1ej7Mp=l^+TodXH{fY z$UtSuQha?ynVW_H3nMHgSUESP>63d|y4wIgX_|@Pc08kP#aF2PGUxY87kEb zTyDBfK?*_2z1aG3+ue$5q!`(r`yH8~%}|qNcGj%O(6gD7MS9jqWNf|=#MIvGT#>(9|g*?W0T@IaQtylw@mQTsvwmB>wSlP3?KE;El%+w zr_3S0V&<;cq<=oa3zPwNy9+sr>}r3ILDs9uP&4D>JcfpYoGc`!8tt9%S-``;a=%<5 zLC87nS9J^inPf+UX+?Z38=2nX*wEmrwjjJno(k1-9_q7lr4HjPHDBsW38|ZV2TyL& zvb~A_oy#({D$SA-Wgxd!eQZ~FRE~OXr8=i*DlRq{Tdh}ftxn&WpwWYlzI`HogzKAU zGU7|V$fQ0OwCh_O%)JlQT&mYp2Fi1U%^SI4c<&N2Bd8zakS{K}Wq`!i=Z%WuM<=ym zyF_6kT78Lux1h&q%g>|^laRsL+Pk@_?^rGoVySAsEL01A&m=0Gh}D0+q2zA0 zWSGPp;uM$p#uaW)T~l*MNp~ft3rNe0X%|{6OLGodsxMj@6$KWxj?2Ux6>$ewlx~&g z7-ZX}A{FolOJelS!!GCsN>bD&OifQ+%jB_ogQ|JlTtsOf_;Od#$tG>l1ziANSVTu7gFFJHZ}sTB4UyDGIPH>CFbQA& z7Roj3Xx}gzM9s~nZQaCel$BF4CN$o3PP;Z1X3WeD91cDPoO}e{Wd*PS{sx#e#nJU0 zFca)fG=%tvfI>w=f^DBNHx?@;`~{2zwA3-Mg(|1;;r#cR5*0ss*`JO+17d?jNy|3Z zhW3b0gWgU6*nl6<)FD+6=owD1h8ATRE5Ohfqo4LuxH4{8e#4)hUHVOIE5LxJ*l$$d zpveGc0a`gPL+YJ=@cMuP`o!Q`B#pBYw7&?0M(%uq&{v_n{Q*b+6L$RtEB~ThjPx}B zpq7}Q!`M~+fjeM95)ju zkSYz^Ojg@E^)u?cp7f?_B!`u@M41}V?ErHqChD#fxq1SouG_@@Z<6_MM);K?S~}`K z0e@0R^WW>a{=sN}P$0j7ot~|UwY`W}*3O{^{u-n}U{x@vnnV$6DCH z#K`yyFn(3@*Cl6fV4;Y^{7?BmhVZ|QKLh)kF9x(Yf56^(xTtqWA zDNiLeZ&++LuP*!N3NL!m9iRogS@szPqiiB%5rnd}){d_>4-Y1)gS@(tHKGJEwYK^w%%0PQ*a$7%=H=w(;p6M+qvmA$A>}3IVW!;$%VPlqzW*I@ zBuXB+I-^%VI1|TIsPwcFL`*Xjl}0}Mp2y=Q1Pm7xuIIrs*-t9BN0~*nsq0!q>!BgF~S3Sm{ z{y>S|1R#KdEUWXfk_`wO7eJqI+;bNWC`A#^fh(vT=X0 z`*ZJK&bx-f&;>#X1InSGSyVtQ1fGZ{tQVdAmmfxiMqVqYSFr_iAdT6kxFxRS3eDg2QtWra!?9s_;Ssh7?fX;Q~kFMT> zoTR|o`Nk4X(9r`2YXeJ}!dW63+c&x6PQdzmvO6o)C)FpoA1wkz0G2+{0&Ny5R9a<@biOl4OZ zbyOU4_KK)+Qk@yf^>Pa}X^+70-KMJqA#G_e?6`0znQc;cy7f}3xK}=s3d0R5!=BK* z_^GlHX;oe`K62)1sCZ1&0NQr}ycj?i?a()mUq8$+m|3$V#IeE!V5hf;Gh+Hhj8OW$ z8^SSsbjk3EurSn2O7CKBYNm6A6#SujK?An0ibAJzM@KPW3*lhFZ(ZCXXS-Q3VclRS z!Ed2@$|q3{+wI41HYbfuG1FR%S7!PbJzq8#w?uchlCFRdcATy7^HJCk27 zu-+=%ugmQS?dKLKU|A1^NU=?{J_-fIv|2anJ{nFZGgLJ_x2vjGsWjRf_2!dMH8FkQ zRtN6^l{RzGSayJ8@w3-1r`kqiVyYu3^p|@6P3P3~(zL$cHgbUNB;pVl8 z41*38qJaXkY=|vlM*C+0te#^Qw@8z=m(+~>~uezWm`a7uk_hw5WPQ%ivY z@S@z}bE;sc_Eu+@zwk-ro4)~gE8m|8=C6o(EaT|S?@zwTAwbh0yIj^=sWqZV3K5Fz zg89}P9_VkivUZ@kFc!?K^#Z|rj9t2A(w$TmZjeY4JZcmDjk*B5MD4XGN@S{qBHDM*&1&%Iq>&d&_Qf%yur_&wZ3}`v3d3sTFYEJ z<=?8J>S&$wrSOR%+IV*EAm0=N;e5P8_jysSSFHInBFA!oy=2?GzU|v&Vcj{_sqUVc zZ{iCrWz}ZouXSK;ws?7&7E_;B{C?O>$WDTFZpC_;*|pK&mupsp#fKjX;OE$}#|!$V z^fG;VBUGt2GRmWBs;wgwc~u0Ok3lKen;hLCkA_<>(iq%$#3=GU13#Q4Eb&7pgy$D3 z#YZ@-7`KaYO@ZJp)RSuEED2r`ekg`@0dErqFEgh7eSS`mN_|*UMzDQ8bZ?FXUSwDr zzc8>BS_msSYt&YRJ`}G1fihx62tIxjqb0)PB<40VrO3iP%1T2EUUR(FcOSyMtymvB zAL5HOz%BKWN)^ZawN#xE4DAt1{RN`g&!YtunutD(=+ny5656xnXcVw{LB9HouG)76 ziYybJ<^kz40s!b!>&OcK;BajBV+!|i)Q`6xZQB9e>*|}6mjg=bo-AtiWtlVXbXUeS zQ|Wx8^4|~=uWf|q=CGbKOn*Y$hNWD;;u^l$1fxDzmlvM2belfkDhDDEU&>spwxzX! z*4(Z$TD)V6dUfyjD%(zZDqu4*)sF9s;+T0Y} zDIvo}FYrEv2*^@^W~}C#*Z_QtU6*q5^?wg`ZIlIzz?pUpaD**+gKU~>ZUc|vY=1}= z3T5LS+)qaaT1evPUF$AEW1Uq;D$)113G`w;GDoQu$MFtK_08*wDA_^mW*GEN{Q|7Z zje3u6%XB^y`DE2lHwKq7nGAAdc;^&cc>i*C@R@7GQGXC+-%9!R22!aLPr4-2vCm(@ z%+w`oKQNHG+KVIKU$4uH^PR2(a6&*R{2iWx-M~|#!P|2fo5z`jCWfz>Ij|BFmNEQh zw{)l!$Vb^%)eqSh%^xu)T@N|jxiU;n7KO^grSGvBf>yTbENTrZRJ;oKt$gG2pYE{54YLvl5O#&PNnOCbt;^6 z%n~28BgA9$_}&t;U%d$tiNQ*B;$*VI>sM;;D&u8bP;2Djs9C0X+hC(Skruh8(^jg@ ztV*m-#h5SGzgHcGQdxQ7-&4APQuRJwN^(4G`^@>RYW3KCCcog;BP(f3eP>)td4V>5 zfIF(NsG_8dkBTY?$ADMnnVtjc}rAbnCN~Wb}V|Ps3vCy_~b0=Els|*08g7Y$&k~#m4m3@ zR?R8cQ_lYm#iOS#K}s@&xX zCIzKi)~x9bICfwp(BOVQhZe{;w6}0)nh)#Av|L(kmdfmAgq$W@oFzrFwh`W@Qpe>y z--S&<7Mbqz$u3rtkV%tkSnGd8wVCiG@Sv!V$17>lm<*50gQtJLaUpq9aydFbhZ+WT z0g*c~GI3DO5TXH1Kt;|$GFH&USdVY~wm!+dYj?0@bV#{PIo3MTvf)Z)7j>+8srW{`Z2BBF45qqFaX4i8Q~qRdy*DTI zQHGYBG!5R6gYkEtvwPQ(&h0XBnRfWPr;##zHSzRxaB~MP$-MvFYg{C78AKU~bfzdt z6L^v~(eLRfqy^s_TE(GWT0l(Y7M-ed5ylhwk$(il*XXUpVKDy((|0NZoQ|Pp18||C2`=_ zJfw1<&KeQv&Vc`pAxFvq0<>mBf#$w~uy(2}S`gBCd1pJx%~4I{8O;H@V$rz3Z1zqx zkWNLPlb(kQhb+JVK$pN?*8!%d@;d-DEj)_SAmRu9jYvmIxR1ivh10P1h{yM$$({)` zy65`PqIYlpcK9Ung~7`bBFvS-x}?VYVS!0%elC|-6UY%L9+!mciRYFjFt*PZC<7xY zq9t=*-f^3c4YZh(%X-+uf zkU`GJ9o39s07~0L?@AgK3`06?S)X24g;BMPF%8gIFdR&<<`^b9DMaFHDH38H@F>q9 zT&_$^GKPq5*M>otdn0N@tbd;@TLW8$rGP8}5anW<5#l?cM`z|Vk&W$ZTbW6k{Jnc6 zDbui55B zy#7b#IWn~_8W~K}%pV|l)1!rGfR;>Jg+PA$_GSVq3bAmS1nuG!bl@+!7q;Z=n{4_MPQNqNLFoZDKLT6q+4=+Bhn>Co!=AFF^yv=wA$?eOB;&} z+$K2Dxb0Ev`L(EELIg&2YUGDy70E?3gltDJA?!@9bdG_*8N@co!h*4D2%6SA+FY7U z)(kTSc0p-RbxZ8(`waCGwo7-Ha!Z%AIw~Md!<49}w}x3UzTN4+9G%Ooz8xG$)Jo`f z`sUCbcx7I_OgqZC$i`&V;1x+rJy|v|l&X_i%K?FFIbQWp=(P*J4VT8~-^rP( zaF;-bWGOqzJ26Ht_#xE2lsm;n5^*WI|d$txiZ9ab-#52oP%2tZJ5lCd(t%UvX%Fdtkws_)q1sMD;XF_&VRn|8x^9a!vUmN)h?kMWw0Cq5 zmbvW!&B5$2r7nnzqOLP028KlkqQe)vSHjwkW&h22ui%$WF{=oZ#{A$tNL|0wK1lqpV}$EBz_Uk44yAl2Vb5$unIcJ>aT1lj*}dd1&>d z-1$3gc(q)!$zXo|s!ir|=OA6P-%(CK4s3=us(B(IL$(rp^5;=XDU{Xc_%fbnPJ!WQ?eB zWrVb0i0h$g^Wl&3^w{W=xWQ~aHq!EUqxcwmufxZsA#*)7puFYZ-*3{a9^_Q>2H-Im`KFeZEt`iE3ia9FAj(t*^i1LA3S0c(>hHlwL~O6` zDFaRK!!f7Dk?HIDNqt?m`sOfT;h8ulsF*cxJ|CPXhtZsE2wz~wq6#@va_%{wGOVwE zi->@?BU8!OM}7boHKa+pPTmfZ7f&b_H!cp3P7BM#KzCS^ghs_w&JYUL6C$%~JS#ub zH3wO;WSp)&vng?*!w8kuUp{sTvLQM~I13TEXN>{x!zW~omz6`m&W>4{3@0;p45NLP zB2KGO$Y{1ETIQioM!OTUo{mR&EqvAN;3|+%0W2{G9!IYO9Pf>SBAh zT5Q?<0uU*@O!*QoM|!w72ssbbnOS114IrnJKfV)UW>B+{TAFHAfU*8V8`F$hRJTO_0U9wTMh z<7PpusbYNOls!}2KnAK<|Vg+ zvyfok9e-qMCzo7#IG zkN^q;5pb3khIsEu1FFO<)t^*~G?ha>dIxCV7!I}kIh@&q;Wv^hkDdL3O3g{OA4Eq) z?Xd^6*OS{i!PS2ZfL6xtDvXuo&um|(%C?18*3cDbN)nVozpEOQUS${9n@ip;v!5s` z*36O2a*0>-ntlR`4AXgBka@mp5u?)))G)NWf=vj;q`jRpSB?&LX^wGGz$j($LajCw z$tM^eMzyBnWWe+})-*P!dXc4Ep{R{?*F5vD4+kf2#oI_T(sU;-@zm=zP4E?P_iNtIzt`vc>XAbWql^vb6ydy1O-?7*HD#V zIQ5M;xSR2ZzE+hk$W{T+uN2}|RR+xjdd~Y9%4m^Y#khg29~yLfQ%i~ggzrUQ5R5Y# zGASG`lbpaeBf@8|OC;c!M3VhQ>}IXo(3XLgfQ{UPzSd@`^Mp!~lvI2vr%d{z*`7=s z%|nM~*zNn1*xG{%URqu_3{|ym=i}=fW($r8=#*{eGjl|Km7nimXT1Z9njYt!A8#jQ z#BLp95`{OBawSp2mEpd)az)tvQ43NtlRB1$8wUPj_pd4Mu7QPy8G;XfkoEr@8Ca;X^O6>bpz?4sSR% z2zNbx%DHk_t48%?KD52Sbh~{!J7vE;zjZVgjTMqSW0){kQ3IKY6&fLh@A=h7ifbqz zYm~cG95(SaY(1TuW&4?~DcKf)a(f9Ygxud#gD&RlFah2aTp^F{;IDoH0iGX8wVuL) zVc`fk(QDcZjV=}^$(>#)Ome`n8SP0Ft2J@P)q4!q7|HqhNzpp@I+Qc5M4+zPB7-af zW1`%xII=4B$ZS-w*7AmplF`}>2aB_p5hH-}r7%w>g=kn(D2C~@BUvIm(hpKYJ|&<} z`w{yr$>!w{+5=$nq$~K0Nd!-qn7L8f8Wcaw!_9YK<*YoGq&I^u6g^P4UdH>8n3^~Z z!h_}4+{B)ABcKIu*cw%MIi43R7EsfuyrJ`KaD4ZK@wi>+A!%Y$-aR!u%59+bR2 z`FEV(ys`qrq?cs%VOn#H>6esqHR7g!jq0ENQld-MGijKx8FmVDN~=X)Gi?rEZ-#dR zRl=ROt(Wz~978)+f5o$mvCsBEY7}i5u{=1N6RPcDG$Q=}dOPc|sJ^X_&(Pf+GANzH z3^h{HEf|D!hbWB_A`Oawgmi--4U&R@bcskK=@3eZ)KF6I4E*$3_qq4}^UlMZIkV0_ zyVhF!9C*&>%fzv#dicHMufq?c!+@1FKZ?UHKS=XT5u2Qn9xTx=JF>W@Z zob|RZk+(b$hv2cfLHT%50AXTNf`N7+CdT-hYu%^K%i@HbO8)&s85fB(CNul(f~Tff zj6TNJ5`+%iP;yMcnyw8RcnnSCghts zMj%!iG&7>V!v|jstDyAnor@9KzBSg10h5UC$=6DJ`%TphXkh;~Md(8j$#_pl1nwq! zexZ@LCcI?OeDUK<5*;G$!O4S_IqMsxRCs+U!*7v)B1|&=xU+}$d0zVdO5ytG9z?@0 zopLgxQsdhJ>D`aIaVP$h-3{R~A9Cv?U>C$}x!V>W3*WKC!_#b9VPRglG{G7ngGppQ zvkVmgGL^Q112z57#4xVmP2$K#FUzv?jR|>RE*#H%+3~zr${OE>?_`>GFNe?W;X`}T z!x#b2lhK5oc(al447e{MCy7>tf3cvdlGTffdPJ@^F3qN{q1GrkzQwug!fr@A2|ZD; zK}S8nIpHoaFU9-@hSDdOOu#PPm0TAdl6_Y{0GA1-`ieN?s--YN_wZP8jfc2be#4bFSxZ31_;{I}zKjYi6FO58!Ra7yP zr4o>cVe!Y<#72WJ$Q5H$%)(&CIY(g#B0&h*a|HJ+XB z5eG|3w(ZRwZlG^SCU_hB?y(*Xp6-9*_ogX($I^du*lhbE_kKCfcog9973p#m`Ys`Q z7+M%r+a=;&Ed_+)^G7qQpuN-FwG{G%wbO3h(>LBGPrEQ3UETypuo7_l4;lj)a-DA5 zCY_w{#CI$>Xu=XP1FN&{x^qro?S8U=gL$^cZ~eiMJpK}R+F*3 z;T(C&os#@jK4*zp_aLNyRr5Yk9n}sq_Pv}p7*~y2>j{mE{YKs^&NpWmMA)}4KG(_C zcJ*x_)yi_|Z@0mWc#%9*le4ZQ6?#EdEHQpCpo!JKl4NFV$k=5)M@o)$qup@nf-B%- z<|I+iL!waGcZUP4OGa!RbBH;F0F5-j5X;=;+9glMy%?OznTW5@ExZD~a+>QhQvtP|IL%WsVE@hBUru-*F`pBDGA zoq6lW1o+E#mT{Yo?fTcAO)R}%BLC8nT=w_$D$V)ht{wjJSx>e zS|6Zf!M}}jh;ax_kH4HC+#DJ>;Lv}(r11d78X5MRK6-fjogt0@rgBIwma4Vt@(8Yi=w0E$* zF5QRLq)DaRzPhG-tg{m0dh45&AmRS3C(W^;3F$6c`WN}bSqWdsb_QV+@_M{iZWS9^ z8%9qY+KK0j_noHj8|oiRwHwblY)Wmu$DjbtJ`TZ9lB}0@#;pxI3Y}8iZ8>B+2t8^O zO?<%C5@uGANM!{JyUkmHJG0iyUX(ojHrKDua>~jczb4kpfASsUEbA;*nTe6uNtekc zyp!{pUi?JL%GI4*DIo7NeDP264{s7?rxUMZmdzyl&8I0inB&?kcC@=QNMdt9iH&`A z{2w*|KVnR53EcgnF}vuivOecv(yK5x((wR`R-P>~e@szY@x1@6zDGj>0 z)fY7Z=;Ox%YrR~b@vNXRCPwCxW|EeT))B4ARy#%M@2p$z1g5t2nd?*#ml>BCI~GqB zPFdHd8Fzf_Wk2x2$qsUoi2l~`Rk^2U9E za0_c=TfFj-ElX3r1-Fr6Npr(<-`6QjsvUEjOE~mcdT*{&N3M74=8}rQX_wF-kRiJC z){_faw|6}Rntp1xjnKr~m4VLQ81T1-14x`M1Q~@&m*U=IxL&{A0qI7a)NMFDke4^(@*vwg%>7YENtey4?$v zfYflJW}Y{}7y8V;UGun&x2!S-*sSWu$o-XjR$)C$;jaqIRF%tE{FHN6b6r3b_=up2deVT=P`u-cWKH zC~I-d10^tN9yN-WcBZa6`zzkA(6gxM=AcDfUhCW@Xtr@TENfI(4xA)2d6nv>jjN8m zDBE##12RE|XEBm)#GYAcJMwJEu0V79$R|JF>UIJ9ZL?eX3L~tov2_{I?hV3uESpod z?|eV%(cig7hE6w=H=8Uth2staFmbq*uvV#{)o{8!{-zr5I$AX%<6?+)+y-yE=iTi% zoFf9b4d9t2C|PRhbJCT4RAH}1tqW72x!J;4RIIvyWw(q&rj}=ak|j4(`MFL{1Jr*V zBJRh|N5H3)0S^p>Wob#1SJ{&twFPFlW%J}zNUz40%pRxGM?>`bsiLSbsoy!=y$ase zw_%FILFgwt8hLcNL2$$M*@a6bs)k!#M{DD_1jol~wUxz1iw}ke0fhQ`8HD?Bfh&`j zS2I@$ue1fz;vp19D+Sp%=$5W$LNhCRdP^mA62~x52Kt< zGY=4l7}U`LDvMI2j#nJvg+nf%tzD3$hKAg=9(HzJ!k&-~IhJS+zFeM*YJoyQ0ue)l z*($-tNi5B9yjy-aT_H)n)!X_V&(IFgfptx*FETHB+3KEWev%xnH~uDzbm|KN z1SbxVczMB$aneuNg+D6W0`cg@Aq1JE`(uvo3PxQR&97+<^pmG983C ziA~&bI)1IR*qgo|mN7+kcml=M;WIK5I`XGNWSLE$a6Z*sq!16)%%cNr;gKUw!Byjn znGVe>^-M=5XcrtflL$wf$%KiIu*=EcSp@F{8lH%xn}_yG16nBD6t|AV5@py*y^4(0 zAoZ`h?--crx`A=^0JP_%@L9>i(BQq4dNcH^#E?nGCp+Ab|Q#A$Xz z?G9mVyKRw3+|JYcV4{TQ=va+tGHSaZXopFOMAwIFXujYF!ittf5}32)ysv4fOoPER zQ`|mv>CMv$s^jhw$}tX+ln32o7N8q4Se|Ne<-GIn+0wIK0Pg|83pd=QnA0;>yQEcI zaO?opfG=nt@HYqn8bmE#fSIS=on2~@GC2St!1md(eXJ`WCTWfsV$K>H=5{*(&H;%` z;se0YH-HS(gQ;w<`w(1N^O`%`QcQ^DdXoH6CeIwgg0wK3M|o!DBp*kVi-4PuVTUK? zX-B3r+Z!>#!E_6Q=@Hdq<*l&`taCn&M>tzVDgY9iZn6GtF(sf4nhm-xunE1IxXhb_ zs0VM zZTVBZQ8_7U=a>Bwuur)qQ}h052Ym)Zvk0UO(w3vy7bUerp?kc%NmY(-vlvoFk%48x z)g1%w{}mI4h)zG5obaZ++|9wheEO42RvQH)s( zT8DA!OP=s-i9gsE=V{$u0s!#w5#nV7+x=bJ{l2sRGmBM9%Uou_{Qg~c2^6V!80fjVndq@v>tg@+-z1<7= z*Dl3&%>pd%Ry~Si5&pX0ygyH@@SMK%QcYBNM$0jL#(FM%TI$n5%>LJ}8x<#;_m1oq z%>7m!DT17E=D3D4a}=3NmFDplST(K~jIN~;rYF8`Z9Vq>Aopfs_O#op>oiA186fe+ zd-Sv<+QO;kls|I2jp$TtM*oOEo=v{XgD<6xWQS94Bz1=Cv|6XKU`)2TVfw_*wzAbH z{C@PQOsRH%8mDL}`t^cwDGBj0AF4{7RYSOrSjIcX6S{SelNGh%YlvjV6H;)X5*BZ8 zGnGfx)d0!j=3BAK^>U1Scc_wr@>^^&)oA!IFy2J6y9Y81aP?#mh>nWm@wzoej4rV@ zEp>`7`6$*f*hMCbYcP^xaELrMDmbv;)!v1=qo(SLy?>Fa88|;$RJ~%Z zm%@;a*3qenL4Sh?RXYQrQKvW??=>HsiSk)aDFesU;6+wpkzSjtN0Ozi*A${$jZ|gl zuC9RA+CI)*a;p;Cl(HEftdF4$c~hKxbvx0iikFvASzNy`gyM)e51V0|xHt`m9$Fki zF%X}B-yzosr!mA6Q_$b@G9#Ytj?Cp$?WgUPMmmbphIe(O{j@vtGl+_>hTMQv^>H)j zk>lCAF*qSYwHGNBQ%04;h6>ypG}{+P3QR<{Zfn(lDAr2!E?2Rw?%`!Gn9AB|gU7hl zCtmIdkaY{g&b+!1FS$m3k#q`LZE4Ew#y9Ejwg$NScqg9EiFFR2BFD~Db##@WhFAAh zV8`|(IdAjsL|cnV@t{!kjub`B;M~jR3BF-Hk&4z;LiZor$l&io^rmD9 zJmF))8X1=UvPJ?|&1YT$$EwmK%e1R6qQAO#-BG3m=gJe6j`*+_@}L`)U2+;E-AFbbbj6-N^}1QFW?Hi*wxk-{HbEDIER^=MoP3gw`nGG`opvAN(~)uAL+RTo z;*$YdOvt-5y@bG0Q#PO$7`j@p$Vpi=1jCO75fF&Pb0P+6NUoegD+yU}|cmV+zOz;P&fl^-k4kY{| zyesfSdhM^z)FZ{rekku6MyuNgfJuV(B-?q0VM2{wHAYfb{Orvn$Z+fhD{`&o$6Kr-@?u9Jdri}1= z{0%;9?14N&>-yx2oopM#3j+lbz z9#3OmE9`vq*@Gb~uE=tDE7FxAOS_0t03C|H9POqYW~Fc=)g#uub%Mk^@#RhaS4Ntt zP^BJFln|MV#b>3YZH}Jot3?`Z6>ro|dR6DRPvS#Qau(cF^>mycCT=Z$iMf#~VY#(% z2V!Z?q%0ZgeY3~-WyvE`M(l}=$-cqSA;#MD{KjCCr%=P^t`V;O*{z3$+Z8WL5JzZv z31vHfaU;qzC~qO%2o)0e;lw{x@qWcUeh?t%w2hXmg4}gEZfR#*(>s4*Y|iZefvgc0 z7DSRY5Wyd0&39S7e@oWDz(Qx;_=&7JM{Rx~YtGC3H)PEj4e~P{@^d8rk7&)`Cgk4& zH3I(-LBCEhNC4%8|7W5`SXdZ@dW%3HXK&O#QaMpXt6!KNR1H5nP2exe zhY(c7=QH`ApgX80*teBJwnPRKT|ySgoP|7Mc8PYWO%bm}NO)7KgsR-AMYK|ey+TM_ z3B2K3;m;i3_(Y}>`G^`%{EH`O=;-l<>^dN7fY&WDpFgwhtRMem8E{gdVOb|JB z|8j`IQd+|XD5()7<*9pp#Te^Pb7uv&7qR#uiMT}q_Fq&^tLkNgoZ z{7GdJnXca#dpy+9v&lzJG`|1{axR z((-oM45Rz9OhIlk&$sMc2|zQOd=e6dyja7}oQ=FJSZvQ&OYk@xt1_LzkVI!Nq^iL< zS(xAfD2b|n@@Q%0t~C;d+$iHs{4Dv3qmF37nl!$b-XKjN0>J{f(g-Cx1UG*=6lTz9 zO&UzK!lQlGPltjbZx1FX;#vfiN>EKNuIZIYb&kd1OiYLP#ti_c zmA|+QH97x83j}f|vWoileZ@vi%kgy;0#~OVOFaDEU=eQzjsZlp|b+$!mnW8*} z^xfI^{P6c6KoXcp>v`s&SU^o@2Xmy>=+8p>-zmX!Xz>Rmc)k+<%=+;m^H&rKId9;H za4Axs9~Do?2>3ax&tm15tVVVNHTZuG8l}?eayCR&TMHCUc(z*pkYaU13Ag?nsXtGf zEK=qliHw3c%uQ{b9Y6wn0)l)35Dex&Lw(rWE&>S$wY?!xEbY|VLo?Lv;; z!p$7{e*^x9^L~o|Kd-5>rbuek!TM|zXX80@!q4M&ceb>`LoS*yJS1=W&kH0hBm@-# zS%J=N5J4e9r0-BKko`Y4Fhmdus{UpJgHdeb@8tylu)$y=zuO>C0o020%efFi2n-pa zzu3SK;lH#Y3Pr}(ujOD+=pX%qL4|*B0}K-u`D1J_VHh$1f2mIZ41xV#4g!IS{$3vh zA_ztX!Y}7SAfnK}*o1%YGg8M`2zhP!^<1dPALE5UMgFW0D*DHGk=^@C8!%LI{niHT z4__kZUIdw$zm^jeM$*W?+eDy0*<77Xkqoi(Sr*-}^|VCA6)N$rJ2<$4AZH15_FdWD g$^m(yK1=x6pMk6fo4cHZX+X;)1Yuurt)Pgm6yp&{&JaY)bH`swy5z0abMF`quM} z?bpyvf2~dl;t<#yjmZ$W?AXVOQFd)Lzb6Tm*%Hi4f*HW9ran&$5^@7OIF>Im&W)gxATi#*K#$Q zz6KayT1EWnp;$d_MUDki%8y;w&-%C1A?8D9EsZNLjgO1Vu%}PYPv%*RqW9J1Pm}zY z)wZtB>m%V#XQwKAuV+B+ckG{^A)lX(Zzrz{7Dn&7HAWsyM0G^wO$cXZJrlo)QmX}P zKW-GuhrA=1HVuAn=nOC^aj1mgS~vPPEKlg~7BwK&ZunA~JGhYNl#h&E>6CDD>G+Ks z<;nNf1{yT#$zQFevEBFKD8PkdZkS{8Hw^(_J-WO`jVAo!w0XB#$6ab1tiUz?re{x! zQoowmTtkR`+3E4RbGO&npU&*p77xdC1*oG0a`v7wcCS8aik81Up(KvB zA*@d9oE#s&x>%2^l*YdXmy*NS%~mf@`UwuD8=ESlwVJi>kC6#gRuw+d!ycAhKRxFP4Fc#6p~-WBn|FSl9}ICtg+z}${YHYfX%l!#&_dx-D*ANZ* z=C(LE=}5l6=odSv?`tHpe~}7oFn6aHPkY03AeuRLH1>~x$_6-r5$Jh!^9_GakOBR} zp5v)NRPs7EYgm7nP`+kZS{!P6l6CCEKdn7n1|h9;=}GKMU3Cw=Ha$5}V`e;%T8|tp z@Fxjrs9VSrx1<`HSHQxI)s~T4)xo%3URZEktB^nU{~m;-0&HM8p%oy$H^urz09%kb zhHCVXV@1?bFp)?3c*b-Z9xH&+nV4M>f>Gt^e@o{M`3u5goVe6sK zEYI##zpiA@8zJm7nGeD5!o;Yc~F4d#@f&;`RTL5co6 zCr5lwj6<59z04iT;-5BqcB^#N6J<|)OMMq*&8N5v8Qz#b5~6{phlyap)nYfmmv?&) zOS1r(mT)ZYFY`F}x3Y`<=Wz?Y6VL*^G4IF?uZuRK$eW=H?5K1=Av^>919%*BVM==# z0)&D4EjQ&BTO$hbqphK=WtU@@BV{Kyf43@9yct3%pw8E95D4ls!VjV~u2>?iy#*`V zNFyNS71bWzXg{%yYVqn&w(VsRaPsItLxRmE`ZI*H18w^zLbK`ceFFq!?w1&IKo~@m zjK$K&HtNluptqrUk(u4E{}bpEg_5Gt$BUt@i^-)%H!#Z+KaH(UWig*ii!%z<{**c) zC&0EXbPWa=JXDJXcYvWN{xp1(wTTNw2xjHSl8BntXooC=SQ@8B`kee8L+qXE$U4ni z1!b7pr$H`{$B(mD*sCtr8#0#lJH8CS6~uHza0q=;_H{!Ou}x>tHQ7rl0h4MyGh=wI z$ze!lAiM)ZUm~08f|5a2yI;PZRu*FIu&5YO4ZD z+R}QM6{UMG3N4f6QK#TcmQ^e!>AVUlfiWU4J1P)4Bp`5-vjZH08T~jBFYxG1{Ho60 zGU%S?E{Bk8wn7K2n|a$59U+*K@(fm~6S#nhmINDwD@jL!Bffd!h`Db(TwH9##v`P? zuL}>O_V7H^BaPhQsbx!UdSQ*5KH9R}f zGqTk3LnXA!$RIgQ&TW2Q1dIbmXqsO>=R>udzXspUbbEd+48ijD*Y{C-nxbE34~_+i6n_kLqOzTQWSvg;ap~;!35WpvtvQNg2*(0{U{eV}TC7hsFBI-pBc?i!|Z&HV#Vf;9b2Gh*t z?^b{OJI)`;g|TS}UpubE_+b`L^5iw584eLt|#f#)by9 zBLTmAv$+sFb=+*{+o;Y@!Nu+3lh_uu=U(V%RFga?;vjW&TmWaa`MzcGOHWb`t?+{Z z^Iy$9-WK?RGw)cBG0=iL_M8~o`Z}|HS3=e%=1Ul4G{TIMhqXKoptyRlY6h#v8QCCA z7}7Js8{eXSS!drR$^wxas_M#FGz)h)S;LIAAkNogy)WD+&a`EG&=6m7lUuL8cE5_c zYizcf#(TAUeeM9u3L?hkLg?hFzRhRLB2w<-ND&Z7B2k*>#CtG1o1{PSphhjp@A7{$ zC}k`{11M*olM!oMDq$w&r;y6TvgxmNp)_$rjFBBD-c%G})4qUCm&Fe1#l+gTM0yQVErfz4}3Lh zK*F6QfP)y_5(+S|p_4i_{a7l>zyS=*V6vPN(Lk`9c**y}xabFfY#>LG21*~y)3;VB zz^SHjp`~%l=65c2M)ZMae<4$p2z8PrK`-c|L{wxJ*lDmyH3uD0q z5s^ROLQ{OXwmBA}06>T`Hi{_l5#;@b%Oj4oc;b%fERLWX=IsLGJG@?qk8@lCICwF7 zBQ%}Yykm;KYFaq_p4;<0uXj)%h<^_*$a%mTeg}~)EJWxhc2}0^P@lK_?0^@Bs0`W~ zUz`3T)E^Lhq=z0E5zXTKVh5NE7>m>bcC>Ey7+z-@?5s~s=w*L*jv0nFISfaQx9a;AP`;{{m=DusjmI&uSh57`1f=n1*giVso;`H8GeO< z1k~FGUIK!<3Lj69>!gSnAdc{Wj=THUjDC*jV5FuI#L*kCE5LqHZB9 z0py*7nspr@Pbz&NQ5ZQhbCb!azeh2lR8a^31iT`yTc^h0I5!xvz--(IxHD^imd^1S z1+DtO#IN|}@-g=#uO0KHV2#GFNX+wyN~K(sKxtxb2)hN(^UCIw@k^vkpcRf^-Z_nK zOSfSw+n_t@{mK%($ntHqZB6vq&MD3IJ0o@9|Km_fRzmk8zTw!788g}-SEqg?p1IREC@G#0DN zAIHcS5eOZ4t7kh3javkFg7BOh;ERICCbg%c2jr}VV<;FQD_PWU(u71$*E?518XDlj zo*_t{#LM{88Y-S6gvh7Q+mo{Poy4prcp;Ms+DX%?jY1{)IMMV1HuUHDxVQfXS+ZIx zIa&E6m}lUgy__|FTht@X2&katFHAkP7rH*M!Kjynzpu9G0<2}MumfG2DlSitA9Y)u zhZCRgujZS-H#VHGt16|Zb{K`F)CyF8-i~im!_8)rU(Q$bD~HJO;9m(~hQR4SlCcGB zROM#FUTG)pt2O)Mpu};9Q~-G52;%^u)x#wfE!a+r>WcC4T<%K3HLVBg0|;IN?n zUd|?ga0~|5OVRaq9SLY?p*HWhV*&zn;SzPhyg~;X7jwxKHUb0Qk7onF`KeWqVYLtKeSTFD$K#O z5JdY#YA7wwDx41+DhUW2klN)X4^jR&NrM;br>mfeMo&Sp5fm~rRZ+UadZkqt#fL!_ zFo72y0bqoHy1*+lgaIvdLDOQ6BN>MvW`1`Nl9Mxy+&o?;Wqa<-5(0b7W0sSyz%HyIY;~D9q`&dj&edT+7%Vm^zC?eMx>7&{x8?Y!81k8r`TZk&H88GsS)RRBJlzhSaX*UXj4i4gs;AzS6o!{$8 z;z+?=SJu~!vUTdG&k+&lk&W$cOV4idQyTUrwhWg+Mf-3Tb&$_Tg5!8@uX;%mKAJWH zI>pTfC_6q)qDY3aO)vz4l68Mf()5w|`ADTwoPJXtV<`zL&kD|e^Pi%a{7#D0tC~(; zt^~;jm1h`oqg&O$STG!f4RzBHm_lmJzzEjFVa{oNoJOT6bgY(1pPzUMpV1A)cmv`; z#<~GVB3CmQY^J-Jzwts*46mBuE*{oo_Y+2m&c4!3CN#9r8pe8PDw+e<$c)Q(@>fRiy1!YDWemK;T1#!^R!!Dr1&&{O_8y?2My1|yG|+h?V63P8hHTN0(9Y6#g< z{LZFe5tw>flV0qL?Ht{nX0Uc1c;Ss+!8&RS2w#j+#*(#2{W=z3hd)KfeMV^7jjdDG zH61Wx5*3g51o>-)z{h2XD*=j7RPGdW-L2vB{i|>K36Q!-I6^>x;Gt#gbBR+Rq2vID zrp`ivLJ^Jt8e5Vmdta9hO3^L!S7^5I0Au|PZ^*yDx*PdOS`H!4grqt4AzPW@7_|sW z<7bQ^JI7otov)Rv6YwsO~SqyX3$fsvn)Xv97w+llupES+o6jUfoJsq^A} zYRHT&-|2&>3GHX{iI+ZC{l=t7LQq{n{uMZu?a)}FRroWS5c%W#_w7gdNtt>tbzls`6F4b+1N{5WSBkUkeQD@k|7xS9I!wZcJ zg)?%hi@|*xpgaFjZvb3M5CTzy5Y?8!mp8@#KF#qevNlARBcoLR2e0W+8NLLEjW0|<5UocM zR3M|3)AnaST=fUXJW2L1|j5N@+e3k=Dv~V2K6QXIfD>w-_#G~tv zNpkbLCFrKbw1Z0uu3kgt0X>e~JbeTFjF;KPXEA2ZN@3(F5j!o_tFNk*Rum_yl$R#3 zLwYjv57W4XM|i_OGsUyIfkenp4Y@5{*5D zw^xsr?g=aP7=kQf-S-L6`fHAM^Xe_+J{^tX4qs7d2?-0?-{SVwp^@I>Mtj+pBxJMH zo+)U9PEtX2twjkx=y4Gw3ik*bWJgR+jhl%>LJAZLz(fOP9hsBkWvnSol84dbVK84b z=%h?in)$V|v#$08unwpc9j>@m>uQVSf3UIj3t$;151;+}-=5rf+Bvm<`x(SF5rWD1 z_HLYFZ^gbC7zx+K!tn}dV#>#uvAn5Z$<`gXcyEpN@2NTU`{B=q0VG z$3NpgL`5w}?qNvLA8(k=0mo{JC`47mTXMcPuts(v4ld!$dTkr;T(?b$I^0U3jXPRk zEpxvl_J7D7x8jNr$o84$v%iy-k=&|9{c>B3yi@AVQ6LWudMsm(w}_&pCtXC`B$2r zk(HJ4kHMcJ-M`cMehf7T+zZ>{V{U-$SU(MR-TG`wDX-wbcA6XQ~!Orn7 zH~w^!|I13i@RzB+p`0+^ACrFtNY2pS+R;wm&>rVcu;uh2g)(R}NfV+~tQBj#E;~$VXz!$yd}YRyvgv4QnQuq-EgkTR)z^I4?Ibw~5e#ZyRjn~E`s#2AV|iH4;a;e!Xbi_Q^fBcum1kW)&E zq#{RerjYY*zyI#sdV1e{z1ldlpB!g3HkuqyXQj8N#Q7Etl+n#&s<}D)c)5kW4isGr z<~?bp*`^i(%*6t$Im+{Q)!kN2MHGu?(v*$|aBCJrf= zHCM<>M6&X#(xi3S^Vk^CIQKW5N?&`? z6Nw|?MCL+L_ehGmMF5URJZ77E#(K~TAbUiU{A5B*PY`l*0Gb9YZ}9NY05#-P zJ`18jsC@D6^9X?;0Q~z>LukU|gUI*|A?d9qo&~pUk9xZ4eSGLP#_O4CB-e7zrkz4F zRyA8ST|FYtM;(ma43nlVW&#L8CZ}8C;tV^)i!Q>Rg4WaX9OcQvs4N5bbNk~gn2cfk z-aBz+D3IoMxqb@`YbxyLHO{}MR7%s+(K-J1W6N-^mRrjy$WEw_Yk)fYEndu9VSW+q zC&<;Z93_}kfUTf`yM%^fFKJFvnulmch$W*^>e5ji?8dNiKi zQSe=OM9DGRolEl9Nt zfmdgs*2g@(fg?p1V_zJ0S!%|S%v&Dw0E{c9Tya8!_2lT&;75IZj}eHY)X)I;JV%Zy zL_t0&1E**)I^W#w>|M!z{A{rt7MV4bc9FS#V?hP^`o($%vaQf8)WAnWUAWRXx z`Z%@T$jgfBZd_fsyHf2u_%Ol?kb$8(1@z*El3Alvqg=JvB8>SunF=tMypp;0+qrI~ z(M8@?0F-D?poZRLg1xDg_M5xN?!vy9Mkafwg;vsBP~_zuV@0|@{m~O+LsfsvE}|q5v}gy}TLYoo`v-37dq})G&+f(AkgHa0h&7r9m}QV+CsS(A zv-{EA(L>=O$g6kd|?V(TXCH+=kQH)k(z7Z;JJX9I6cE4rXJTGeWXM?NSi%6fN2 zA~iJ_5E*d~W&T3iESFo_pEz=yr4ObN%Bx zMm|yvvwqbeatym@v_jV+z2}*Jjj?`^TT=|5oS+p~bwpK|MfiobZaHK$Vvh8LR4f#m zuw5$YmWB@14?wju=zS2c;}?r5DndUQ;?&(O7<{d0>^6Iu4q} zk#ipoV=M0vDaq~~9WIjYbrRH*Vw=Q*1Yk08q9h7m=<3B5E=$L3$VT=ipi4hDz^Fca zpUYU05%WmI$&c74j4KxDq;z}t%Qp_o?0xm;q&;kaD;By<*`qAU-pG^>!bnF`YBp`K z_5*kF>zcbgo?6vkCmSoF=VDIAurZo1mlkAg4pvbFZRb`|>c0+JINfmpMS+cfMl%%E zMP-6Tz&y);mlE#2x<}+*y%doax0mUV0)+$HKR5$q&N<6}2{IrLZCk^%eQJ|#16|K` zqn0uQ&uNE0_Z?+&ZmV|?=thDo>*LzB(ZR3ItSo79Bj^Soz_s%dY+DTw6 zTI8h^$=SEI-L;OLd{CqPA*5n&dO_4OeMYIMRNGK@M}OCj@+mJeOAfj znTTl^)D4v~zszs?a6c#`R4TjQGf^zWo7c*WM(77%+d7TXWs?l)f*0SL;#*3@($40$ zKcN3jTeUyRgL5S2YJb|hOjv<@(Vlfl{3t+^D2@eRQ-skj)vRBIRF9rR|A+H6!*xE6 z0a}C|@}<6KI%$^w!%Kc#XV1YI#DW^T<-Y8z{3FUR`1@VT)v@3iv8X|M=OWiwW5D^m zHFMYA>}XCb9d9KQW5QM3NLVA{2?IggO)L4L3NRL@yAuC_fsa3sUFsf4j;aabFGzUa zADs+=GN^Qy^m6)+N{$_;!sH%tlR5b1_Y1H7YU3pk2C)tS z%U?IGX0!=6I(i4aWK=uUsZR!_qGY0hO%I#LK$=Q+*AFA`R3Ngl5qckJM=ak8@c&yDf z6tPKJsx+T8m>P-?oxjSCS%wYO^1C%*{oW~H5OxdcKBHGR*nI{3X%r{Z(GuSIm)OdfKID*{9~m)YO)%dhu+K(ApIEwq)Jyp+kV z3$kRgh{Xk*a1ZsB-eB-GYgV61zVWe!J19Rsnk!{eL=?D6xr&lZZ+j@F(tXQmGAjFb z$<)$k=aoyzR@SxB7Lrqj*??R&D(apf{VJU(NgY}f%8K^_>Urtb>J2K69i%*~BG6?& z!~uff4J@gQp`m}yB2J*s^RCt1dbo9eIHs}dlwKN6|jBD%{LFW>USB>(oD8SHotE*|; z`SEi_Lx~{^{H5$5QS5P(np!LVMY10{#hgtDZ5Fl)B3tQg0-ZiUvYOnSOT@>w?Jcc< z_r?ISor`7Be3CJM(bty~X*u9Yb6^}(JnX6=ML+Kqsh{K=(nB&UW#{g!h)^5^`%_3rs@P`4}bloDL9eBRg=3SD!@->*~FRr6RlT$_fx zxM6b>fHJ%+8pX{*rFmPg>He8hgN7{q3V!=bQhi`wkh<%Z_+gz6e(#6@*+*g6APvNW zP1{(y!Dl913T%W-R7u~YmJ$w-gAAw|LnPRy7^Wyc7dgu~47iE(o8OrE%D| z9C1z4@pF5VNbE&=KvG$`e^ysiW~R;C9P!Lz;Za(uJ7H;s38~YwOsZpK9ccwxs{EE- zDg$9&ua6F_p~CQbY5pmA0UM+HSZUlrQt7#A0Kb@~UTP=p0rD%xkj0VYiCXV{u2L1f znZ@3~g+o#E!ZM-dh6ciNm`_Ypj*@O6D)^(W?6{vC}!s8R1y1+^lJUy?}g$4O2>h z3sXwVqxh%|D~$SJrFkm^Ib5A4TY33%q(#<7m^H37v^1T%mQ6UY$?X#pGvHY1Jks=b z%%8>;+xqTxVb^FgorQHrZDixWYI`?zw+xLRAxla!*bt^K7H7Pzk}ks|MO&j`+;YQ? zK^$@~Lk!Zst+i;^Cb(}^A?9@|pHRbA?KJ|;LLnGT7I$h#rN z+Dl$=d8X`(|4?2NTenLqMb9I*$R|DDQ@(?AA1d1x#zCb8gym^&!a(+sS;abRi}>9w zwyqTEi>yxXOctKA3xVhhqaJmrY=Y=798ydkV^3({*OD(lMH(|#)*@5e*-KQQm`|Gc zW%d)Q+iAiNT`4rVxtPV7A=`|hdad2+;a3`Er}*K-8k?JVjfLC6=DYUWd&#SIdwT*$ zVa%-BSkZ=*a7Ux7<5}zc1vbL%B#OZ6DIz|)y^ufC-pVV8#=g;doyC4p$b!cYj2mnI z6nD-&+7OeFNaB$0CS1Iq5y252H&rlqmJok-HKVJZqrj$HxU?Yo%$S16A z=4NqCaDku5`NhmcrpTLc`=VZdaARq#6xnojN1VExd=0mkwErO2ZYVhgiT54&Rxh9# z?{9}J`$S+h|I)`{y-&>YP#%z8w6K>rql>*BcCP&v1QeY0C&ol1(~y-gGnrWgu<9!KNCN~v6}ocT0CMKTb< zkO4;dV6oQHs8GTsQ!EI+DN`ew6M9?3QN&OEc`T1)ud{7p@=LpBT?LQ@N<7RQ@q>~-on`sKew$=A&a)*Tn}Au$tLqb&9-T;5zvgAVK4-~KlQNN(3ks1>3+M- z!Q#SEDlbA_?S?_RnSxvDYkZcS7xOiHX2ifB8)mrHcg3LWl{pR2bXK(G@q9CQvmECH z!;VXLh7IqV*X{;%X(sZ7P*MS`>Zi^rV5LF9B19iC-VQWc#*K&2yeXi8i0sA|7AYTzlob-9 zP5}4bcJMQ#5^92~J9t9L%5uJWPI7P`3oO2aA0t)sq7wG(?tT;sEhQWJJV z75_K(24PYNDuwa~|goi+rfUZZ1Q_8INVUiwpj z^Cw#t05AWi9r1Jk9IJX4+Qv?d4lBqO#I4t1-dlFxCC^*jm&7gx^euNaDRNWBiaf{@ zWcRDWq7(Id`NSSIRnW73Cu+CQ4t#GMFk4CR&OHWphN#dgv%4DzM&+l~0{bLL>L6+_ zI6oEF^TOrK((|Z~H zew#A>bxNXxu{Y0Bc=Fd7K35cU=v&BiNff51HBbfU~r*71bmfSAs906Bz z`>35>1jfVK!$=z5@sG24_mqjf z?d8c0)xUmWzcRW-c8q+G%H)V#L-OhtR?9SNvJkdYwNq-$52vb4;7w>v*yoB<47{Eq z+-TiIKbJUz!`4BswyZ|1GB?;}pyVxxFJIv=1#omZhnCOgL!2F>X`o0g7OsdHZ3l-q zu&EDT8hqxc@=VePw2QYc>oqcb(3ItE)gT!VaZ1&ul%tIxcT251!A`@T0RIlr!Jj)Z zCeVH+bxEz%Sx`{eJ4N@EY}ULZwX*c`jd4IcDWa}kTe-N|>K2*MvJVyfU;gR2l zsywZ@$D+6gZB4%P7RphuhezIP=B`u?+T%>M!Vfr?)TMSG=eRew%RKKXwvCm5vpc4& z_r>TAc1tnnVV#As+&_ZDn~HuC#fG~@b&HqC7fP=HV~p8pp2;+@x+N0#rD=-UmqRzr zK01MVEO$;#fcKT%<<|MGOYjZ7DpDEB)0=$TDm93<0y%F4Js&9D26pfjvc}k@f3dw{ z#mDmzj@-AzS<^hXs#in<9eZyLjN1o+C6KEb8t`6g8TE?bj^VI` z2J3Ev-R|%H2y0+Ylw&Mnz7%BSA1;UFxtazhQ-sbSv$LgUWLsweW-S%QQ67&*!R4E&?AMJ zBBwRh1*dxLp2;KESQcUX^q6NeoiWY(Fi)W*Lb!h>i6T&hQuhtNQk*ZD(L6fvB^q)6 zzJ$>QfX?m!)*<%X-z;ANh5Jdb?^3lQFXz zfwicRxa4HRY}Mbi2_2H8VN#G$%%6e0A*Qtdyt);OU#_9Z)l<$E{+9EC=JdFWB8;<9HZ@glTz&ZOg{aZZ24f)x&s% zx3NV3xx8Ywnk5mQl5o8l{ixn1`f|?wj|ZoV^Ee%=x+#_^`l^G&K`?XhJ0=P)izcGo zWt+tp4`lKtC1$G*rBgl4hRFelsb7wkig+9HKdTa2Sz&@|BEcRO8J!k%``B&4&WcEv zkv9;m!lNmdvwL5AUgM89+@&dx7^r3OT&GZ{aw`cZSZGf);gef8F(sv8F(Y7kTq|Z7 zJ7)GG6vx76^>I|JDDl-^EIhSsZL`kLUb=gnP7>+3(XPlsz&GIODE+Ynv|JvBZvWUL^nq zhrwppUT*Xk{%w1`Va3(K{+o{mg+6AIG$N!O&|Xs(?8RdT%sJcG4&=kM-7#}@U}DdvBqt&!{#58NcSj(!HCYYm_tk{5`C*C> zewFxjXzAzF+1%;0k0OlhDW2S2GY|}M%f{yfpU2Hzd%RdDHPdp@u*lZKi|o=t75&um zz_bo-(xPYgm{UfpOx`dA7_ z-Zj*vOEX!nLeXhkWZ*Y8#gc5gd3SScfP|v9Ui|4*VMLA-h$uX1k!wD(C~gip1!?Q{ zMbP;GBq}9X;J0f&r}zB=Z0jt27|%R$agAcH7uiGm^h`GQ`Z^F3q;OTvQ$$pX zBF7;!-9#W1<<$%PI{&C_nLC8=z3d5pi}v#+y!mQ&9lVO|t?GGW%*-^R zT`4Y@h?F44r|$$v0Ld~0(AQg#B@JgglIE1=nGZ8+k6!)-Rp~vC#8B`qB^HY+e)cPB88%1js`Cn4voY{L`j?N z@#W!GI2vZX*qL~$LwBhzj%&@!2YHPWY1fC`@s9BSk%8OB$d3FlInrQ+5r+?kn=d8$U; z76Wp+^uj`vL0gDeeOVozti&CJ8VRUOb2=?iwf5{aohyVLZ+4&8?6CDt@#IabBgX#d zsnD2+#Eh~+%&6j;9|)o(*d9&{-V6?C@#nI6u2ffzhMu_TICHlC%&4pq1 zw#0@)@q;dy9MJIsJ^M`ki0~f6iUt5~+KXlTy-^X+`;BTO&wn_DZRC13HLHuKL0bk& zeUF0*#nk~r`w8_^RlK#VvAV@Q0Y4FL748>47|Wo2bXB!F+~Kyxcu!7qfES|xRB{}g zl)me?5)gZ%dHTFev=*cN)R!gRevMi0>F{YL5~M1z%J`k%M=e0OumvN-l7lsvJnorA za6~$j{;Y_H62*?GT6~a%{SbWL0p(r3gB>#lR}BOD-5LSOCS)l)Ru1#EQ*`_=8#$xK zi2zOkve^N1C zwNCxa=~>0S)ukQHS*tvm^aR;Pgq7;-zFQK~n;t?=^h?5^@8#&Sm|7-x)`Gf7IM?)q zux~;!?-=)d5~KeWZW4zk8vYfY3|>f@pg)3y8IOdf=LMYiUy9Yt~_S#nkE8g~l zOL0(ML#ir{{`|S;cvv^Cj7VoV!>=0FM|T5M)>CFdDj%lg4Qv-6f!jr2&?gAZ=m!riz=^O&9My=yE*ufxs>pW8Yupt*wf zH4EATVT<}@Py8{fr<^mnb;Iwf^(O4GZMlMD(dVJAEk5G8J$x;re8_w-2dx#`1?#aP zew2}YsY$q)X(Srxl)D9H&fh)q*-p_y38K_rC~kQCR?{&aiZjN6qSORIoE7pkw-rt4 zXv+rEuA}=(s7_bH8-jy6j1RnkvUV_otbXl& zVf9##|F&qmP1*vdKK@W#IF9NWp6HWU4vu81Y<^^BY*u6ii%LP z$p3y%8?)2-o6FMO%)q~jE7!caq$Vhuy)QXz+R_;>b2AubR8?K$4a?&P*o}2d-@wTr zrIUYOKG*CP#DjgpaMN5hwx~1qwDTZKPiNz-swt*rNpaZ=bFmAY_MNoGQN!oV11mkg z+i4M_Q06d{u-I3fXq8dn2h7)b^%<4L6|7=wEa1#vc?}b{3x!EybhBRN1)Q|ToS#6f zJ7yix6JLepDetLslcpenyd_A zWjWjgWc1qVDe%Fn72_5_4Yg2}^Lr1uVlhc&_fSu{d{Iu#WL(!B%X^Z^h#*y~kxBjw zzrbF!h`Dq8v}lU?9gFu~y`%Iw-Fu%n!DBQ#l*%;8XG07pd%o*pA4=rrMuC zxTE)(rYtr4O~>Ab-7;SoV_Y(S78DgcYz3>gG!MwneXnSkLw8f-zcbIJ%t=&}7ME;( ziite^eYU#l*-&vB=_RiUT2-Ff^dR!y1;1o}gWq1^x~arzeHJr~Z>IhrKNS5ta1aYK z9DGkut)OE*+W#zP z2L~$nq7dH%qwYhIw>e1G>0q=2hclUoG7!6OoA0Y{kZ&rZdGBx|))ZHQm)EKwJs_>Fz7|E}v`o-X$==^~h&qFj)M1I~a1Kfv+0^#`b~gC{Rpd z@)PLzgz#hj)mj|hq6w+#RdhrK6~$eFpA{I13WkGz%s%?T+PuyvJNEuwxF#y#9B?pCPFj6_G!oUH9v?Bto$QEN|;X zuRt`{3+oO>_+vL4yKb;m6(k#5^Jp-9slnJi@c5V;s|5AQUc}V?zD9v`w=zsq^=xd? z=KkuQ%iQ%~H^6WAlq67tQq`}mFbj<3o1fd^On~^Bpb(B^Pk3)-$?zud*sck?L{>or zuVc^a9K9vWL%#`6eo$$NK=n$As1352G+ctIJy1VtAmt@);YByPh^2%V5Z|=ives3W z)0d5s(N=k8zPFdG8&of>Of3!Vr3{_qtSxn|WaK3k{%t0tYiVlXfipLKt!e7g3Ae@y;wK{Be|J)n@K#Ko{IseyJKKvJ?$V|t=@?VhRhT9jUsJMEqdYSl6WE#gH z8kbfmnv;XP%7?CY0=$Z+lz&diB$o(LERwU`*^dTKiE|tt&PG;=AL&;JsZvVsFm0>s zjtSZiJ;*zNRu)c2B@%bV%53UmyZQ9+`rg^qsgY@)X+LRy#d*X!O~x0-FePIp|LASV261Loq*kbF^N#Wp~ftzO&ju^_2Z zqa6-D!(YY)xPDEEqP;ePdIy#_ZHC;kOnr^-wdlgDpsB;PzUSQHMUc#xmWh^ApcBAZu=fD2_&WR!o-s z&Q1QEQwTpr|5Orl32qNg50%fNqa>=;@{+DN%JB#(UR9O7DO+N4K2ZXC@5 z+SrV#QVN*BfNROEl5TAJTysaWqHWea!7$?kd8|qzO5K%Gcp~fnly{avaW&nd#yw~t zXmAP63=HlP+}#OIu;A|Q7Tnz-1a}P(f;+*T-~`tT$(z^j`Odjj=hvl%DQ4~N-MxEB zH&gwrRyX)KTBu5=rd-lv#`MknCAu}|sm~yFN?5Z*fi2qtI}ha-)j=j~KXz$Kg!L;p zioT61)h{s&=B+B*QUv=SRQvGxT6dU|nAah5dx5jU|5_~ZIqDck} zkl&o{i7?xd5>YBbHle}q*^k^~U1kw46}g<#K1WoxbQB&+hOLCt>ttjji{ z9!A#wJ(7z3r0#nvHae9=E?OLu%;XKoBZxct^u-NYcXM8DGS&Lo7YJ})c zoJuUX>{wQmMjG&v6bOqCQVUraD9s=lOBM@L6QGx2`yz~9jwzZwi(|u|RrA@l=14s! z;tqZ8=gUuH7m3GOON)=hCCZWRx&KY&p39;;^}BrU#ZZH}wp`|Y#bt-R#_+>rSO2Kn z(rH3@qob!K)x&K~zmn4zDLfja)n#>cb&tt7LsnAVc|o{$ulc8twxwA#mfd5>LV|3} zB~Cdqz{NdAomyfP+~~}f2W!EMUb>qHEq(foD7{wq>EQc@)tm7M z!eF9L8%PBu>m9{RIo{B--bh^C8yU?RBGUK%i`^IdqRJ2SrgI%rQNB@5?Bg$P({W!0 zcTPpuR?>ugQp>4*S6*ZiVqa_#Qi3-45j3~iu-=s0M4PKX6l=^{oCk2&5~>=w1n580 z(JviN9~=g0N45CrylEzLeb)BLNvhU=7$om!fgVFE^d?4q&_g-O3%hnMTQ2fj4*oC( ze;k=MN@qSWi4IFYe!*zKflaG?Z&6W+HBu9yBpgIjGI=!{BhAl6j3ABswaC~4BeZ(!5mbADP~zP zPA4<5f+NZ=LiG_`v3m`t={dB1YV3F@t6jc7_vTO%Xxrf#a-%Jb*4A|GlpBC{%VzTp z0D1Q#GCy;p$DCU9*)(_CJ?(TP&CKUBqpPc}tU?zLIL?;#Gx&khYd`x>x1Z=!7NM); zh_`9|h8E!Ru4$5G-8;d!jkb3{#mxvpQkmK%`rw*oj%>Z}1r2>aYlrxS3ro|7@OhQk`>6~ zoE;Hrp{QPPd&Ky$u86;~PKt<^1hAIZR5=SFAmyTtD+W1+`jXgyEZg>6y*N@v zVyMET%Q3poJGdEr4My!m(8ARig{TbOXGfPswBP>@Kr$axOnJ}J*28P*%qT#7&)CS!M+4y3=2);xdxQ8r< z>+34U2fN(Dje?zLKWPWEQBrAuEsj*|hdi?4_EaW=h>zF@l^jQjwhtxi1zy=b<9xr! zOSq?lVw@E-lfVJPd7~u^{q?NvV-3|WYyD^Euo~|gPGoBJc+YWKKN~I2u?kWkr3&CiK0`{WWXhq`$&1K& zDThv`y-FtC_pU)ph;cjIB{m&lHi9;Afgc*h9;R{-$zS3Grc%)Anb$>ov?%edWjE6% zRQolj`K(&Eqz`fB-A3{mtO{y9q31*Q?QY6cb%y8Jeno|Z!|L{aWJFQgd5OgEQnXw6 z;lL2Lm7LAe_ek>CNCw(jCQHeE2yuL6XnE`m1f?@R$Y1tM&?9YUM_aOf6ssB%s* zYl7JDb0_3!5nuGeuG8$60JSDF{Hlw`)Qo5abW_MM942&wi(ae*T#1_WL-pTpd>yN} zyF_7Km0=IZ*5K2GJ~n|~k`Bg;yL9i8y~Jp}^qt;7(XNY+CmC!M=?yzCyz1ei7a}zg z)iKAnFlSn%UMT+hW{>qH5cj#hZ?ttck)h<(IEOiuRbJ9onA*_vu3bpCv9}t`zARjZ zFkFSNihKUHo>CdQB{}+c%K)@d82H{iI`rztO$R~*Xj*(nMpfacoi3#aOYO0vZisFE zhg*@yTuTy`JD_SWxav8@%I?s~7UoJg1Jc39 zjRza5Rnv*M<$5s3r|)IU(}><%TG?WO|wA~O-6={Flb~0cKkf@wNlN2Me_c~{7t0J*%pU!1k?LV(5;2veb@T~ zt-A7uw@+iWTpj4=LO)_hBE$^5i_DLlERJ0x%_dSn_pTRQ=I2abHxc!tyVRU~+jPRY zP_YFpxyyKt2Ky;q-B%y7o^saXh-fiR)1yGvn$oq`Br2F^pP;UoBK?vL0f7pEt6-OW ze?fdsgNu$T@wtK|_);(nvA2^Zi-`}3VeWbf@yex%sHQZsn3%k6d=R(Xoc+a)4MYe8 z(?awNWff@U)SS2+Zy=lVjm)MEff$O_+=sR9Y11a~D~wjOO^MOBoJnOwT;^tuJ+p@D zTI%uTp&|vlXr$xzjB(|*?mKfEQ)V?IWC^b_+hmTP`w?ahZiC11X)+0kKHEm+2yjY) zNYg-_>|@arED!T=n59fzi-j!}w^FG< z$zWJ}b5S?kk>A72o8|Mm=p5)k1f5F*L$cqz$}Ah10U9d*%%8(YPXrHdlTM^qFj17l z7iSM=6c6h0%Y-jRa7jzSTIvW?T?V{6{iSP7S@x-t6*-Ujo*I?gC$4WcSeN#UqbgJ+G8<} z$AfSNS_}@x5iT>?P)NjU9W|2d-`vAKAAnX?4gCt{h{77tO%zg6j*GlAyAd!G;he9K zb(1){tvGV-Y9M(B2B;^5Si8dsH1PX?mMjR!Yu#nuRjY{aO_sMhL=YOxfsj*R$lQ># z6>A7Mp!HeImL&NKB}b@^32C8=V=?CoUCK`bbu`sv&C{w|O=?W5?R8}wW1Ark@}ar; zc5aL7AtfV_djAbYzoCnlN_}rSu`&F?#}m&m2hAzTWy|NNma+fcdt(gWgzvWek*ot5 zn9mWeNQ}ue`&HO`1Bcg!ZKa>}Ub1(5IugIfbzy0Xwp>3Hx)^yqf^o{Q%@7ee+B+1f zWywIu@9#!F+;hpPsnO*LZ%h1Iul@_#GZ}lSv#IO1zGo{9@;Ua>=Wd6$^9}dTpPQRE zZiQzP{3_s=He2X!(|7NzBRc$vXI+~HFrdBNG|G;I*j3_7q6mj={WwHq-wDlZEJuUM zRisdsdSAL_3=0Iu9b(uYR4(*XkMkH9?(Grz?JyGcGD}r{{EkntjU*>1dA`4eUMG*u zK<4jV2{E?z#tue`4vC%g1*`F`iuqjomCas?Db8cgjq-2cB|x*#Dr@)&C^d`py%7BDk2s=m%2P% zn6CB|;5BbfO6m`S2Vi(-TUUcic$)5XPe}KiJ=MNLmtFLb?B+~sjzp#`aR()w-Z5Qm zH*v-Wf-rjzdPeC>AB0c%(1T*Fo-1y8xjzSd$U)O-mKX~!&?JXC&T#meYN#u~qcrGc z%`Pjeygjq?o~AuuQH}O@hS)W+w5NgwPQG)w_K_(glnvAI@kh-1m6M6WzvZ zKWB-N)wN`9-&{9tGJW*!xaRl>CUb6yP6MiM7}6uk3=(Z*IYWJCc@d<^91%6k=*$G5 z_h}M4cFyc_@Ls!8?lVbU%r&N$r_{RKm?r9C8_OjbYkY$HxriSwaykn75jg179QSMX zHI5W0n_xYo&nE|qWEwn=JVkQEYSk+f={M(ga3c^g!}8<~4LFpz80w~UVs%s)LqWlV z4c|rP%~)a8@H;MJc>VkWqK;7I8U3JY>gl9XMFa#zo#0;S-j?alz$vh0tdIMQut}&@ zQYdP0=_KK$Vl+-zf7n6)F058oUV@~OQeLY`SutpZvLMNcRF}*_o3d_y#f!3_H)l4R zygzW+lFc`>3EyPD63rGiB3c(tgM%SbrW+8IG7_yZl zZFnnFasY-TQ-{e*vPU{Rb#&GHbVDvw;L0f08-)*R*V_ci2LK*;@;y^#**%@4?lYOe zN4zFj{6e^oBSPQr0m{~pmK_#Oh*;JP(uLwL zg-yxsnzo-q5)AfP2XOGMs8!+pFC;cI2nc_9m z0VdTvXkK&R4E(c#tLtjMLl_O~OVgYhN%>4KRk+#^0+5&3cJxFe6>XcRik70h#F&Yg zA`~7t@pfR74H%tRPAO|)RxvR)=Aasw!xT^J0J4cC?jSUCO4gD+GNI|(xDO*7OnWn5 zxlh?etEkvgC}r&Owbi;MSRLU5vB$SPUFyB}X?u#VedafH1HHql!>d)6ZPbaCwy7o8 z2K6mA99jm;XWC~zb|TH{u}7RGbnHJ7o|zWA7AM zfRb3KOvGX+mH`$N3UseYjGZ*9!}BukHpmodTJ(ZW znz46j(&Fdn19TWCNrprl;e;zzjLTW;IbdKzlVKXKnHP+gVJnd`ov%X6&|K49dvHTX zPBq!`Z(`@FOd@g@&l4TS79j~0YxA?K&Eizp-@kLn#A;fg!wt6De;p7hIoxrW- zt-#qh0C_)%;oow2cv6*Ft#js@<1mSr2S)L+OU%z^TBHf^Mr5n={M$DQPoxf#9!qDdu0reQ1#@-4wD8C>3;Yqbuh z*moTnu&`q5sx}{a87Vj${8Pz1MHj1T8}(jlv$E~4Uv$ed*LEayCjkjOC~lN}V~Zm< z-@hg%q8^FF{IpZBy)~M$15T}7&MS)Yt*HG=i^Dp*&r+ZLQQO8n{jm#j10u)#Mc_%=r8XEb1S4l>GUd668V_J0I5?p00dp9RbuplNoBn6t*TT9 zihI;^h^F3v0s4WY-@qo&FquisAms)_Y|ND7J~$i0jp)S|aYu1uSEv(&5;=GcNuyeW zb@7fi+A}!AhWa|jX5@`5v-V!Lc0{kYFUL|Il4RU+_nU%FvnL8}H&9bHvUv|RCFzIA z6Unh~mTVNY+8LqWCt~G*swr3_EI%*!9IRwyMkYIqzZ?(EXB~~6udppM(kN$PO~67R zE%n##;q75&eXD&wpK5qZ-Y`7_nmNdFOgTKZ?2U}tpK43or$TJlpq(9LVjX26DHf&| zCe5yb)*I5lz$xnO-C3{P(0p!0STOHltQuXJn|CH3VMS#-ik@I44NZV*WB`a$f>ZTY z(JyFEi%&CHg^x)y)Hk-)9A5kwJM7LC#dWk>aJ1g?h_=0V6~DM&J#J&ExLn^1m zV>z?$ICiso!rdfa;V3p&WrrxM6eGpz`UF@%A-<#9!cfyi9f^>eKh{ir3lF42ljel! zKfZTuO&Ik(Ron1#hzfht&e0SGu_GzHro}$6B1@5_5oXn~rg}0JJ6~h0Q)P7{!FJK^ zIs>UTkJk^mj@b;cRL6R?eG?divgZar{2+Mpb)82ufO}!)D0ID-F>fF4-L9&eKd1<8 zKjp+o_1lQCAzAPRwGb2PD(KelurEorFA;5eJ(7oVmnOyY^h!H6k#q+rGIBQ4<|(l# z&C7PhB!&d4JnUO@MAFXP=|`B^UC5*)Hdk3lrzo^!FgaF z>I>um&HQ;V8`V8{e*#W)Xu2uo>8v?wl z^ej`<#tdP;Nhq{zM?Bci%9eRs(U>W*Ry;@2HX1UlmQGf9pB()%5NPLWY!()2ATW2C z8t$(3F+|S}kCs;^_nVIW5BGZaxg}gZ*&FG2EIvt%Tg{%koitr`+L-&_IYHfOrJvWq zA6Dk%drUry2EKOZxP!cAC0^KsXA(t6p@026@!sIxhzuhNaqg=0ynu8rDaZ>a)L(Vp zPpUi9K)QDP!-EVe-%js`deSwU=NQV8&+@SpQiAp0Ih#*f8>HWJ#f)~X^l{;DX~EN+TLY}#!g1P3b|}{)59vv59GwT zvDMUQ>|1}I=bw}_hZOTcl+sHTDAYT{4x=lusd!;BCvX-W8jTpa+q_-94&_w&?Gnw| zi)pp*n0h}4K8PWxtfpGggZ-`mNK9$q9`*e3OA3jyjj6Fc-D^CA=!GpMx<s^pR^+wPv)?1PWFQ{M(*tIZLC0xALzWAS@vOS@>@K<>o4zQMB9)h_<%- zNCTrV=Gi>GY{G1>lWCa6B>XpG6e%L>!X3lxCqJLny1XtcNwS(6jMJ}8)X#ql-6l{c zq^+m1qPp4$VY~~qXa}A)I6S4J+Z)G zxI`*eDTks!`y9bwDtEFJzA$o6V+C+O-60u^1_H5X5kEzmXNnNqNH|#OQp7&ZMyYIK%pMImEO52y~zLy;eVzc3y;2{%? z#98o?cf!&Rs_33x3zHlSaZORM2>v8;6*xg zB3$x4?78SVAoEUDmjf*uCHf*^Ne8y` zlHgVGWq8f#fbI`@FBs#$KNyrhEZw&Uu7lNI^~xf@2+-myk^zsX;%MONxxc3ioLoQT zn@<6UdiMpn8GoP8bwt|@>A(eHUxa&`e5M!Ij-e6j4!ta}B^iagE<4Hkc;rDMHTD~S z&o=+eAnfQaA18`d|RA$XZk%8_$XO(71!sz=19F#^b7JpcSo7nc#d`V0zKD@kRb z2xNjtHdR=ukS8BQHY@SQ9jRJB-W9s`Mul|CvqnakLGM`}7)KAEHuC8%dq{Wh5(y~? zE!99v<9QKJ0nZ*w&9H*C1_LrRv4esVeq)i27X14wldbd za1H%kuYh}^*mt-Kf%idDmGaPaYZ)tdmsSP-1W!<`|G9NzoJQj2YTt-f^?Sm#b-VUfA9ndH7m0&nCL@ zT