From 661508d0b3095fe9a2dc562c7ab136f2b8e6ad95 Mon Sep 17 00:00:00 2001 From: Javier Duarte Date: Thu, 19 Dec 2024 09:30:18 -0800 Subject: [PATCH] use submodule --- .gitmodules | 3 + hls4ml | 1 + hls4ml/.clang-format | 191 - hls4ml/.github/ISSUE_TEMPLATE/bug_report.md | 44 - hls4ml/.github/ISSUE_TEMPLATE/config.yml | 5 - .../.github/ISSUE_TEMPLATE/feature_request.md | 28 - hls4ml/.github/PULL_REQUEST_TEMPLATE.md | 38 - hls4ml/.github/dependabot.yml | 7 - hls4ml/.github/workflows/build-sphinx.yml | 40 - hls4ml/.github/workflows/pre-commit.yml | 26 - hls4ml/.github/workflows/pypi-publish.yml | 31 - hls4ml/.github/workflows/test-sphinx.yml | 27 - .../.github/workflows/update-branch-on-pr.yml | 20 - hls4ml/.gitignore | 16 - hls4ml/.gitlab-ci.yml | 27 - hls4ml/.gitmodules | 3 - hls4ml/.pre-commit-config.yaml | 70 - hls4ml/CITATION.cff | 56 - hls4ml/CONTRIBUTING.md | 55 - hls4ml/Jenkinsfile | 110 - hls4ml/LICENSE | 201 - hls4ml/MANIFEST.in | 7 - hls4ml/README.md | 142 - hls4ml/contrib/README.md | 7 - hls4ml/contrib/__init__.py | 0 hls4ml/contrib/garnet.py | 370 - hls4ml/contrib/kl_layer/README.md | 18 - hls4ml/contrib/kl_layer/kl_layer.h | 87 - hls4ml/contrib/kl_layer/kl_layer.py | 184 - hls4ml/docs/Makefile | 21 - hls4ml/docs/advanced/accelerator.rst | 77 - hls4ml/docs/advanced/extension.rst | 185 - hls4ml/docs/advanced/fifo_depth.rst | 49 - hls4ml/docs/api/configuration.rst | 225 - hls4ml/docs/api/hls-model.rst | 102 - hls4ml/docs/api/profiling.rst | 71 - hls4ml/docs/command.rst | 133 - hls4ml/docs/concepts.rst | 69 - hls4ml/docs/conf.py | 127 - hls4ml/docs/details.rst | 33 - hls4ml/docs/flows.rst | 67 - hls4ml/docs/img/act_hls4ml.png | Bin 9354 -> 0 bytes hls4ml/docs/img/act_keras.png | Bin 10406 -> 0 bytes hls4ml/docs/img/hls4ml_logo.png | Bin 26453 -> 0 bytes hls4ml/docs/img/hls4ml_logo.svg | 514 -- hls4ml/docs/img/hls4ml_logo_lightgrey.png | Bin 25377 -> 0 bytes hls4ml/docs/img/hls4ml_logo_lightgrey.svg | 562 -- hls4ml/docs/img/hls4ml_logo_navbar.png | Bin 7506 -> 0 bytes hls4ml/docs/img/logo.jpg | Bin 64144 -> 0 bytes hls4ml/docs/img/logo.png | Bin 23360 -> 0 bytes hls4ml/docs/img/nn_map_paper_fig_2.png | Bin 98357 -> 0 bytes hls4ml/docs/img/overview.jpg | Bin 202059 -> 0 bytes hls4ml/docs/img/overview.pdf | Bin 642072 -> 0 bytes hls4ml/docs/img/pynqframe.png | Bin 335589 -> 0 bytes hls4ml/docs/img/reuse_factor_paper_fig_8.png | Bin 82654 -> 0 bytes hls4ml/docs/img/weights_hls4ml.png | Bin 10231 -> 0 bytes hls4ml/docs/img/weights_keras.png | Bin 10969 -> 0 bytes hls4ml/docs/img/zynq_interfaces.png | Bin 76029 -> 0 bytes hls4ml/docs/index.rst | 62 - hls4ml/docs/reference.rst | 104 - hls4ml/docs/release_notes.rst | 8 - hls4ml/docs/requirements.txt | 15 - hls4ml/docs/setup.rst | 187 - hls4ml/docs/status.rst | 92 - hls4ml/hls4ml/__init__.py | 24 - hls4ml/hls4ml/backends/__init__.py | 15 - hls4ml/hls4ml/backends/backend.py | 177 - hls4ml/hls4ml/backends/fpga/__init__.py | 0 hls4ml/hls4ml/backends/fpga/fpga_backend.py | 866 -- hls4ml/hls4ml/backends/fpga/fpga_layers.py | 84 - hls4ml/hls4ml/backends/fpga/fpga_types.py | 459 -- .../hls4ml/backends/fpga/passes/__init__.py | 0 .../hls4ml/backends/fpga/passes/bn_quant.py | 169 - .../backends/fpga/passes/bram_weights.py | 16 - hls4ml/hls4ml/backends/fpga/passes/clone.py | 92 - hls4ml/hls4ml/backends/fpga/passes/codegen.py | 51 - .../hls4ml/backends/fpga/passes/embedding.py | 37 - .../backends/fpga/passes/final_reshape.py | 23 - .../fpga/passes/inplace_parallel_reshape.py | 24 - .../fpga/passes/inplace_stream_flatten.py | 25 - .../backends/fpga/passes/remove_softmax.py | 13 - .../backends/fpga/passes/repack_stream.py | 64 - .../backends/fpga/passes/xnor_pooling.py | 22 - hls4ml/hls4ml/backends/quartus/__init__.py | 0 .../backends/quartus/passes/__init__.py | 0 .../quartus/passes/convolution_templates.py | 183 - .../quartus/passes/convolution_winograd.py | 177 - .../backends/quartus/passes/core_templates.py | 221 - .../quartus/passes/merge_templates.py | 108 - .../backends/quartus/passes/pointwise.py | 95 - .../quartus/passes/pooling_templates.py | 111 - .../quartus/passes/quantization_templates.py | 36 - .../quartus/passes/recurrent_templates.py | 305 - .../quartus/passes/reshaping_templates.py | 138 - .../quartus/passes/resource_strategy.py | 77 - .../quartus/passes/transform_types.py | 54 - .../backends/quartus/quartus_backend.py | 362 - hls4ml/hls4ml/backends/symbolic/__init__.py | 0 .../backends/symbolic/passes/__init__.py | 0 .../symbolic/passes/expr_templates.py | 172 - .../backends/symbolic/passes/validate_lut.py | 37 - .../backends/symbolic/symbolic_backend.py | 104 - hls4ml/hls4ml/backends/template.py | 80 - hls4ml/hls4ml/backends/vitis/__init__.py | 0 .../hls4ml/backends/vitis/passes/__init__.py | 0 .../backends/vitis/passes/feature_check.py | 34 - hls4ml/hls4ml/backends/vitis/vitis_backend.py | 52 - hls4ml/hls4ml/backends/vivado/__init__.py | 0 .../hls4ml/backends/vivado/passes/__init__.py | 0 .../vivado/passes/broadcast_stream.py | 117 - .../backends/vivado/passes/conv_same_pad.py | 109 - .../backends/vivado/passes/conv_stream.py | 52 - .../vivado/passes/convolution_templates.py | 500 -- .../backends/vivado/passes/core_templates.py | 213 - .../vivado/passes/fifo_depth_optimization.py | 104 - .../vivado/passes/garnet_templates.py | 249 - .../backends/vivado/passes/merge_templates.py | 106 - .../backends/vivado/passes/pointwise.py | 92 - .../vivado/passes/pooling_templates.py | 109 - .../vivado/passes/quantization_templates.py | 36 - .../vivado/passes/recurrent_templates.py | 175 - .../vivado/passes/reshaping_templates.py | 132 - .../vivado/passes/resource_strategy.py | 48 - .../backends/vivado/passes/transform_types.py | 52 - .../hls4ml/backends/vivado/vivado_backend.py | 476 -- .../backends/vivado_accelerator/__init__.py | 0 .../vivado_accelerator/passes/__init__.py | 0 .../passes/fifo_depth_optimization.py | 69 - .../vivado_accelerator/supported_boards.json | 42 - .../vivado_accelerator_backend.py | 162 - .../vivado_accelerator_config.py | 162 - hls4ml/hls4ml/converters/__init__.py | 481 -- hls4ml/hls4ml/converters/keras/__init__.py | 0 hls4ml/hls4ml/converters/keras/convolution.py | 95 - hls4ml/hls4ml/converters/keras/core.py | 136 - hls4ml/hls4ml/converters/keras/graph.py | 71 - hls4ml/hls4ml/converters/keras/merge.py | 32 - hls4ml/hls4ml/converters/keras/model.py | 45 - hls4ml/hls4ml/converters/keras/pooling.py | 93 - hls4ml/hls4ml/converters/keras/qkeras.py | 177 - hls4ml/hls4ml/converters/keras/recurrent.py | 50 - hls4ml/hls4ml/converters/keras/reshape.py | 92 - hls4ml/hls4ml/converters/keras/reshaping.py | 98 - hls4ml/hls4ml/converters/keras_to_hls.py | 336 - hls4ml/hls4ml/converters/onnx/__init__.py | 0 hls4ml/hls4ml/converters/onnx/convolution.py | 85 - hls4ml/hls4ml/converters/onnx/core.py | 122 - hls4ml/hls4ml/converters/onnx/merge.py | 42 - hls4ml/hls4ml/converters/onnx/pooling.py | 121 - hls4ml/hls4ml/converters/onnx/reshape.py | 39 - hls4ml/hls4ml/converters/onnx_to_hls.py | 319 - hls4ml/hls4ml/converters/pytorch/__init__.py | 0 .../hls4ml/converters/pytorch/convolution.py | 107 - hls4ml/hls4ml/converters/pytorch/core.py | 133 - hls4ml/hls4ml/converters/pytorch/merge.py | 61 - hls4ml/hls4ml/converters/pytorch/pooling.py | 136 - hls4ml/hls4ml/converters/pytorch/reshape.py | 108 - hls4ml/hls4ml/converters/pytorch_to_hls.py | 340 - hls4ml/hls4ml/converters/utils.py | 245 - hls4ml/hls4ml/model/__init__.py | 8 - hls4ml/hls4ml/model/attributes.py | 244 - hls4ml/hls4ml/model/flow/__init__.py | 8 - hls4ml/hls4ml/model/flow/flow.py | 142 - hls4ml/hls4ml/model/graph.py | 854 -- hls4ml/hls4ml/model/layers.py | 1385 ---- hls4ml/hls4ml/model/optimizer/__init__.py | 57 - hls4ml/hls4ml/model/optimizer/optimizer.py | 328 - .../hls4ml/model/optimizer/passes/__init__.py | 0 .../hls4ml/model/optimizer/passes/bn_fuse.py | 38 - .../passes/convert_to_channels_last.py | 135 - .../optimizer/passes/expand_layer_group.py | 46 - .../model/optimizer/passes/fuse_biasadd.py | 18 - .../model/optimizer/passes/multi_dense.py | 68 - hls4ml/hls4ml/model/optimizer/passes/nop.py | 14 - .../model/optimizer/passes/precision_merge.py | 40 - .../hls4ml/model/optimizer/passes/qkeras.py | 276 - hls4ml/hls4ml/model/optimizer/passes/stamp.py | 20 - .../model/optimizer/passes/transpose_opt.py | 21 - hls4ml/hls4ml/model/profiling.py | 684 -- hls4ml/hls4ml/model/types.py | 689 -- hls4ml/hls4ml/report/__init__.py | 5 - hls4ml/hls4ml/report/quartus_report.py | 258 - hls4ml/hls4ml/report/vivado_report.py | 674 -- hls4ml/hls4ml/templates/quartus/Makefile | 30 - .../templates/quartus/ac_types/ac_channel.h | 555 -- .../templates/quartus/ac_types/ac_complex.h | 445 - .../templates/quartus/ac_types/ac_fixed.h | 1546 ---- .../templates/quartus/ac_types/ac_float.h | 1196 --- .../templates/quartus/ac_types/ac_int.h | 3099 ------- .../hls4ml/templates/quartus/ac_types/ac_sc.h | 552 -- .../templates/quartus/ac_types/ac_std_float.h | 2318 ------ .../templates/quartus/ac_types/stream.h | 36 - hls4ml/hls4ml/templates/quartus/build_lib.sh | 17 - .../templates/quartus/firmware/defines.h | 47 - .../templates/quartus/firmware/myproject.cpp | 48 - .../templates/quartus/firmware/myproject.h | 48 - .../firmware/nnet_utils/nnet_activation.h | 516 -- .../nnet_utils/nnet_activation_stream.h | 676 -- .../firmware/nnet_utils/nnet_batchnorm.h | 104 - .../nnet_utils/nnet_batchnorm_stream.h | 108 - .../quartus/firmware/nnet_utils/nnet_common.h | 71 - .../quartus/firmware/nnet_utils/nnet_conv1d.h | 64 - .../nnet_utils/nnet_conv1d_resource.h | 241 - .../firmware/nnet_utils/nnet_conv1d_stream.h | 172 - .../quartus/firmware/nnet_utils/nnet_conv2d.h | 72 - .../nnet_utils/nnet_conv2d_resource.h | 303 - .../firmware/nnet_utils/nnet_conv2d_stream.h | 238 - .../quartus/firmware/nnet_utils/nnet_dense.h | 169 - .../nnet_utils/nnet_dense_compressed.h | 80 - .../firmware/nnet_utils/nnet_dense_stream.h | 46 - .../quartus/firmware/nnet_utils/nnet_embed.h | 45 - .../firmware/nnet_utils/nnet_embed_stream.h | 29 - .../firmware/nnet_utils/nnet_helpers.h | 140 - .../quartus/firmware/nnet_utils/nnet_merge.h | 249 - .../firmware/nnet_utils/nnet_merge_stream.h | 357 - .../quartus/firmware/nnet_utils/nnet_mult.h | 113 - .../firmware/nnet_utils/nnet_padding.h | 99 - .../firmware/nnet_utils/nnet_padding_stream.h | 83 - .../firmware/nnet_utils/nnet_pooling.h | 319 - .../firmware/nnet_utils/nnet_pooling_stream.h | 316 - .../firmware/nnet_utils/nnet_recurrent.h | 583 -- .../nnet_utils/nnet_recurrent_activation.h | 53 - .../nnet_utils/nnet_recurrent_stream.h | 65 - .../quartus/firmware/nnet_utils/nnet_resize.h | 38 - .../firmware/nnet_utils/nnet_resize_stream.h | 56 - .../quartus/firmware/nnet_utils/nnet_stream.h | 121 - .../firmware/nnet_utils/nnet_transpose.h | 50 - .../nnet_utils/nnet_transpose_stream.h | 32 - .../quartus/firmware/nnet_utils/nnet_types.h | 78 - .../templates/quartus/firmware/parameters.h | 11 - .../templates/quartus/myproject_bridge.cpp | 66 - .../quartus/myproject_test_parallel.cpp | 112 - .../quartus/myproject_test_stream.cpp | 129 - hls4ml/hls4ml/templates/symbolic/build_lib.sh | 18 - .../templates/vitis/nnet_utils/nnet_conv1d.h | 68 - .../vitis/nnet_utils/nnet_conv1d_latency.h | 89 - .../vitis/nnet_utils/nnet_conv1d_resource.h | 104 - .../vitis/nnet_utils/nnet_conv1d_stream.h | 34 - .../templates/vitis/nnet_utils/nnet_conv2d.h | 77 - .../vitis/nnet_utils/nnet_conv2d_latency.h | 90 - .../vitis/nnet_utils/nnet_conv2d_resource.h | 107 - .../vitis/nnet_utils/nnet_conv2d_stream.h | 82 - .../vitis/nnet_utils/nnet_dense_resource.h | 250 - .../vitis/nnet_utils/nnet_dense_stream.h | 105 - .../templates/vitis/nnet_utils/nnet_pooling.h | 341 - .../vitis/nnet_utils/nnet_pooling_stream.h | 344 - .../vitis/nnet_utils/nnet_sepconv1d_stream.h | 99 - .../vitis/nnet_utils/nnet_sepconv2d_stream.h | 133 - .../templates/vivado/ap_types/ap_common.h | 376 - .../templates/vivado/ap_types/ap_decl.h | 212 - .../templates/vivado/ap_types/ap_fixed.h | 360 - .../templates/vivado/ap_types/ap_fixed_base.h | 2354 ------ .../templates/vivado/ap_types/ap_fixed_ref.h | 718 -- .../vivado/ap_types/ap_fixed_special.h | 230 - .../hls4ml/templates/vivado/ap_types/ap_int.h | 330 - .../templates/vivado/ap_types/ap_int_base.h | 1885 ----- .../templates/vivado/ap_types/ap_int_ref.h | 1346 --- .../vivado/ap_types/ap_int_special.h | 223 - .../templates/vivado/ap_types/ap_shift_reg.h | 138 - .../vivado/ap_types/etc/ap_private.h | 7199 ----------------- .../templates/vivado/ap_types/hls_math.h | 27 - .../templates/vivado/ap_types/hls_stream.h | 263 - .../vivado/ap_types/utils/x_hls_utils.h | 80 - hls4ml/hls4ml/templates/vivado/build_lib.sh | 17 - hls4ml/hls4ml/templates/vivado/build_prj.tcl | 250 - .../templates/vivado/firmware/defines.h | 14 - .../templates/vivado/firmware/myproject.cpp | 25 - .../templates/vivado/firmware/myproject.h | 15 - .../templates/vivado/firmware/parameters.h | 15 - .../templates/vivado/myproject_bridge.cpp | 66 - .../templates/vivado/myproject_test.cpp | 92 - .../vivado/nnet_utils/nnet_activation.h | 777 -- .../nnet_utils/nnet_activation_stream.h | 777 -- .../templates/vivado/nnet_utils/nnet_array.h | 52 - .../vivado/nnet_utils/nnet_batchnorm.h | 124 - .../vivado/nnet_utils/nnet_batchnorm_stream.h | 123 - .../vivado/nnet_utils/nnet_code_gen.h | 32 - .../templates/vivado/nnet_utils/nnet_common.h | 76 - .../templates/vivado/nnet_utils/nnet_conv1d.h | 76 - .../vivado/nnet_utils/nnet_conv1d_latency.h | 439 - .../vivado/nnet_utils/nnet_conv1d_resource.h | 103 - .../vivado/nnet_utils/nnet_conv1d_stream.h | 89 - .../templates/vivado/nnet_utils/nnet_conv2d.h | 75 - .../vivado/nnet_utils/nnet_conv2d_latency.h | 89 - .../vivado/nnet_utils/nnet_conv2d_resource.h | 105 - .../vivado/nnet_utils/nnet_conv2d_stream.h | 112 - .../vivado/nnet_utils/nnet_conv_stream.h | 394 - .../templates/vivado/nnet_utils/nnet_dense.h | 49 - .../vivado/nnet_utils/nnet_dense_compressed.h | 90 - .../vivado/nnet_utils/nnet_dense_latency.h | 72 - .../vivado/nnet_utils/nnet_dense_resource.h | 263 - .../vivado/nnet_utils/nnet_dense_stream.h | 68 - .../templates/vivado/nnet_utils/nnet_embed.h | 45 - .../vivado/nnet_utils/nnet_embed_stream.h | 33 - .../templates/vivado/nnet_utils/nnet_garnet.h | 816 -- .../vivado/nnet_utils/nnet_helpers.h | 382 - .../templates/vivado/nnet_utils/nnet_image.h | 41 - .../vivado/nnet_utils/nnet_image_stream.h | 66 - .../templates/vivado/nnet_utils/nnet_math.h | 178 - .../templates/vivado/nnet_utils/nnet_merge.h | 256 - .../vivado/nnet_utils/nnet_merge_stream.h | 370 - .../templates/vivado/nnet_utils/nnet_mult.h | 116 - .../vivado/nnet_utils/nnet_padding.h | 145 - .../vivado/nnet_utils/nnet_padding_stream.h | 85 - .../vivado/nnet_utils/nnet_pooling.h | 375 - .../vivado/nnet_utils/nnet_pooling_stream.h | 609 -- .../vivado/nnet_utils/nnet_recr_activations.h | 56 - .../vivado/nnet_utils/nnet_recurrent.h | 571 -- .../vivado/nnet_utils/nnet_sepconv1d_stream.h | 119 - .../vivado/nnet_utils/nnet_sepconv2d_stream.h | 143 - .../vivado/nnet_utils/nnet_sepconv_stream.h | 306 - .../templates/vivado/nnet_utils/nnet_stream.h | 207 - .../templates/vivado/nnet_utils/nnet_types.h | 64 - .../hls4ml/templates/vivado/vivado_synth.tcl | 6 - .../krnl_rtl_src/krnl_rtl_axi_read_master.sv | 278 - .../krnl_rtl_src/krnl_rtl_axi_write_master.sv | 276 - .../krnl_rtl_src/krnl_rtl_control_s_axi.v | 422 - .../alveo/krnl_rtl_src/krnl_rtl_counter.sv | 87 - .../alveo/krnl_rtl_src/krnl_rtl_int.sv | 415 - .../alveo/krnl_rtl_src/myproject_kernel.v | 169 - .../alveo/python_drivers/axi_stream_driver.py | 101 - .../alveo/tcl_scripts/axi_stream_design.tcl | 109 - .../templates/vivado_accelerator/build_lib.sh | 17 - .../vivado_accelerator/myproject_axi.cpp | 14 - .../vivado_accelerator/myproject_axi.h | 10 - .../python_drivers/axi_stream_driver.py | 75 - .../pynq-z2/tcl_scripts/axi_lite_design.tcl | 26 - .../pynq-z2/tcl_scripts/axi_stream_design.tcl | 59 - .../python_drivers/axi_stream_driver.py | 75 - .../zcu102/tcl_scripts/axi_stream_design.tcl | 58 - hls4ml/hls4ml/utils/__init__.py | 3 - hls4ml/hls4ml/utils/config.py | 363 - hls4ml/hls4ml/utils/example_models.py | 186 - hls4ml/hls4ml/utils/fixed_point_utils.py | 138 - hls4ml/hls4ml/utils/plot.py | 224 - hls4ml/hls4ml/utils/string_utils.py | 25 - hls4ml/hls4ml/utils/symbolic_utils.py | 211 - hls4ml/hls4ml/writer/__init__.py | 12 - hls4ml/hls4ml/writer/quartus_writer.py | 1360 ---- hls4ml/hls4ml/writer/symbolic_writer.py | 114 - hls4ml/hls4ml/writer/vitis_writer.py | 32 - .../writer/vivado_accelerator_writer.py | 430 - hls4ml/hls4ml/writer/vivado_writer.py | 728 -- hls4ml/hls4ml/writer/writers.py | 20 - hls4ml/pyproject.toml | 10 - hls4ml/scripts/hls4ml | 328 - hls4ml/setup.cfg | 62 - hls4ml/setup.py | 4 - hls4ml/test/build-prj.sh | 155 - hls4ml/test/cleanup.sh | 64 - hls4ml/test/compare-reports.sh | 123 - hls4ml/test/convert-keras-models.sh | 81 - hls4ml/test/convert-onnx-models.sh | 79 - hls4ml/test/convert-pytorch-models.sh | 79 - hls4ml/test/gather-reports.sh | 78 - hls4ml/test/hls4ml-keras-test.sh | 27 - hls4ml/test/hls4ml-onnx-test.sh | 33 - hls4ml/test/hls4ml-pytorch-test.sh | 33 - hls4ml/test/keras-models.txt | 53 - hls4ml/test/keras-to-hls.sh | 148 - hls4ml/test/onnx-models.txt | 27 - hls4ml/test/onnx-to-hls.sh | 98 - hls4ml/test/pytest/ci-template.yml | 23 - hls4ml/test/pytest/generate_ci_yaml.py | 36 - hls4ml/test/pytest/test_activations.py | 55 - hls4ml/test/pytest/test_batchnorm.py | 49 - hls4ml/test/pytest/test_batchnorm_pytorch.py | 43 - hls4ml/test/pytest/test_binary_cnn.py | 101 - hls4ml/test/pytest/test_bram_factor.py | 56 - hls4ml/test/pytest/test_causalpadding.py | 36 - hls4ml/test/pytest/test_clone_flatten.py | 60 - hls4ml/test/pytest/test_cnn_mnist.py | 92 - hls4ml/test/pytest/test_cnn_mnist_qkeras.py | 104 - hls4ml/test/pytest/test_conv1d.py | 107 - hls4ml/test/pytest/test_conv1d_narrow.py | 61 - hls4ml/test/pytest/test_conv2d_narrow.py | 61 - hls4ml/test/pytest/test_embed.py | 51 - hls4ml/test/pytest/test_extensions.py | 180 - hls4ml/test/pytest/test_flows.py | 121 - hls4ml/test/pytest/test_garnet.py | 105 - hls4ml/test/pytest/test_globalpooling.py | 124 - hls4ml/test/pytest/test_graph.py | 226 - hls4ml/test/pytest/test_keras_api.py | 479 -- hls4ml/test/pytest/test_keras_h5_loader.py | 39 - hls4ml/test/pytest/test_keras_nested_model.py | 173 - hls4ml/test/pytest/test_merge.py | 155 - hls4ml/test/pytest/test_merge_pytorch.py | 72 - hls4ml/test/pytest/test_multi_dense.py | 65 - hls4ml/test/pytest/test_pointwiseconv.py | 156 - hls4ml/test/pytest/test_pooling.py | 124 - hls4ml/test/pytest/test_precision_parsing.py | 29 - hls4ml/test/pytest/test_pytorch_api.py | 742 -- hls4ml/test/pytest/test_qkeras.py | 572 -- hls4ml/test/pytest/test_report.py | 76 - .../pytest/test_report/myproject_csynth.rpt | 196 - .../pytest/test_report/myproject_csynth.xml | 878 -- hls4ml/test/pytest/test_report/vivado_hls.app | 15 - .../test/pytest/test_report/vivado_synth.rpt | 184 - hls4ml/test/pytest/test_reshape.py | 49 - hls4ml/test/pytest/test_rnn.py | 126 - hls4ml/test/pytest/test_sepconv2d.py | 63 - .../pytest/test_sequential_parsing_pytorch.py | 83 - hls4ml/test/pytest/test_softmax.py | 98 - hls4ml/test/pytest/test_softsign.py | 33 - hls4ml/test/pytest/test_sr.py | 71 - hls4ml/test/pytest/test_trace.py | 56 - hls4ml/test/pytest/test_transpose_concat.py | 56 - hls4ml/test/pytest/test_upsampling.py | 69 - hls4ml/test/pytest/test_zeropadding.py | 73 - hls4ml/test/pytorch-models.txt | 18 - hls4ml/test/pytorch-to-hls.sh | 98 - 411 files changed, 4 insertions(+), 78750 deletions(-) create mode 100644 .gitmodules create mode 160000 hls4ml delete mode 100644 hls4ml/.clang-format delete mode 100644 hls4ml/.github/ISSUE_TEMPLATE/bug_report.md delete mode 100644 hls4ml/.github/ISSUE_TEMPLATE/config.yml delete mode 100644 hls4ml/.github/ISSUE_TEMPLATE/feature_request.md delete mode 100644 hls4ml/.github/PULL_REQUEST_TEMPLATE.md delete mode 100644 hls4ml/.github/dependabot.yml delete mode 100644 hls4ml/.github/workflows/build-sphinx.yml delete mode 100644 hls4ml/.github/workflows/pre-commit.yml delete mode 100644 hls4ml/.github/workflows/pypi-publish.yml delete mode 100644 hls4ml/.github/workflows/test-sphinx.yml delete mode 100644 hls4ml/.github/workflows/update-branch-on-pr.yml delete mode 100644 hls4ml/.gitignore delete mode 100644 hls4ml/.gitlab-ci.yml delete mode 100644 hls4ml/.gitmodules delete mode 100644 hls4ml/.pre-commit-config.yaml delete mode 100644 hls4ml/CITATION.cff delete mode 100644 hls4ml/CONTRIBUTING.md delete mode 100644 hls4ml/Jenkinsfile delete mode 100644 hls4ml/LICENSE delete mode 100644 hls4ml/MANIFEST.in delete mode 100644 hls4ml/README.md delete mode 100644 hls4ml/contrib/README.md delete mode 100644 hls4ml/contrib/__init__.py delete mode 100644 hls4ml/contrib/garnet.py delete mode 100644 hls4ml/contrib/kl_layer/README.md delete mode 100644 hls4ml/contrib/kl_layer/kl_layer.h delete mode 100644 hls4ml/contrib/kl_layer/kl_layer.py delete mode 100644 hls4ml/docs/Makefile delete mode 100644 hls4ml/docs/advanced/accelerator.rst delete mode 100644 hls4ml/docs/advanced/extension.rst delete mode 100644 hls4ml/docs/advanced/fifo_depth.rst delete mode 100644 hls4ml/docs/api/configuration.rst delete mode 100644 hls4ml/docs/api/hls-model.rst delete mode 100644 hls4ml/docs/api/profiling.rst delete mode 100644 hls4ml/docs/command.rst delete mode 100644 hls4ml/docs/concepts.rst delete mode 100644 hls4ml/docs/conf.py delete mode 100644 hls4ml/docs/details.rst delete mode 100644 hls4ml/docs/flows.rst delete mode 100644 hls4ml/docs/img/act_hls4ml.png delete mode 100644 hls4ml/docs/img/act_keras.png delete mode 100644 hls4ml/docs/img/hls4ml_logo.png delete mode 100644 hls4ml/docs/img/hls4ml_logo.svg delete mode 100644 hls4ml/docs/img/hls4ml_logo_lightgrey.png delete mode 100644 hls4ml/docs/img/hls4ml_logo_lightgrey.svg delete mode 100644 hls4ml/docs/img/hls4ml_logo_navbar.png delete mode 100644 hls4ml/docs/img/logo.jpg delete mode 100644 hls4ml/docs/img/logo.png delete mode 100644 hls4ml/docs/img/nn_map_paper_fig_2.png delete mode 100644 hls4ml/docs/img/overview.jpg delete mode 100644 hls4ml/docs/img/overview.pdf delete mode 100644 hls4ml/docs/img/pynqframe.png delete mode 100644 hls4ml/docs/img/reuse_factor_paper_fig_8.png delete mode 100644 hls4ml/docs/img/weights_hls4ml.png delete mode 100644 hls4ml/docs/img/weights_keras.png delete mode 100644 hls4ml/docs/img/zynq_interfaces.png delete mode 100644 hls4ml/docs/index.rst delete mode 100644 hls4ml/docs/reference.rst delete mode 100644 hls4ml/docs/release_notes.rst delete mode 100644 hls4ml/docs/requirements.txt delete mode 100644 hls4ml/docs/setup.rst delete mode 100644 hls4ml/docs/status.rst delete mode 100644 hls4ml/hls4ml/__init__.py delete mode 100644 hls4ml/hls4ml/backends/__init__.py delete mode 100644 hls4ml/hls4ml/backends/backend.py delete mode 100644 hls4ml/hls4ml/backends/fpga/__init__.py delete mode 100644 hls4ml/hls4ml/backends/fpga/fpga_backend.py delete mode 100644 hls4ml/hls4ml/backends/fpga/fpga_layers.py delete mode 100644 hls4ml/hls4ml/backends/fpga/fpga_types.py delete mode 100644 hls4ml/hls4ml/backends/fpga/passes/__init__.py delete mode 100644 hls4ml/hls4ml/backends/fpga/passes/bn_quant.py delete mode 100644 hls4ml/hls4ml/backends/fpga/passes/bram_weights.py delete mode 100644 hls4ml/hls4ml/backends/fpga/passes/clone.py delete mode 100644 hls4ml/hls4ml/backends/fpga/passes/codegen.py delete mode 100644 hls4ml/hls4ml/backends/fpga/passes/embedding.py delete mode 100644 hls4ml/hls4ml/backends/fpga/passes/final_reshape.py delete mode 100644 hls4ml/hls4ml/backends/fpga/passes/inplace_parallel_reshape.py delete mode 100644 hls4ml/hls4ml/backends/fpga/passes/inplace_stream_flatten.py delete mode 100644 hls4ml/hls4ml/backends/fpga/passes/remove_softmax.py delete mode 100644 hls4ml/hls4ml/backends/fpga/passes/repack_stream.py delete mode 100644 hls4ml/hls4ml/backends/fpga/passes/xnor_pooling.py delete mode 100644 hls4ml/hls4ml/backends/quartus/__init__.py delete mode 100644 hls4ml/hls4ml/backends/quartus/passes/__init__.py delete mode 100644 hls4ml/hls4ml/backends/quartus/passes/convolution_templates.py delete mode 100644 hls4ml/hls4ml/backends/quartus/passes/convolution_winograd.py delete mode 100644 hls4ml/hls4ml/backends/quartus/passes/core_templates.py delete mode 100644 hls4ml/hls4ml/backends/quartus/passes/merge_templates.py delete mode 100644 hls4ml/hls4ml/backends/quartus/passes/pointwise.py delete mode 100644 hls4ml/hls4ml/backends/quartus/passes/pooling_templates.py delete mode 100644 hls4ml/hls4ml/backends/quartus/passes/quantization_templates.py delete mode 100644 hls4ml/hls4ml/backends/quartus/passes/recurrent_templates.py delete mode 100644 hls4ml/hls4ml/backends/quartus/passes/reshaping_templates.py delete mode 100644 hls4ml/hls4ml/backends/quartus/passes/resource_strategy.py delete mode 100644 hls4ml/hls4ml/backends/quartus/passes/transform_types.py delete mode 100644 hls4ml/hls4ml/backends/quartus/quartus_backend.py delete mode 100644 hls4ml/hls4ml/backends/symbolic/__init__.py delete mode 100644 hls4ml/hls4ml/backends/symbolic/passes/__init__.py delete mode 100644 hls4ml/hls4ml/backends/symbolic/passes/expr_templates.py delete mode 100644 hls4ml/hls4ml/backends/symbolic/passes/validate_lut.py delete mode 100644 hls4ml/hls4ml/backends/symbolic/symbolic_backend.py delete mode 100644 hls4ml/hls4ml/backends/template.py delete mode 100644 hls4ml/hls4ml/backends/vitis/__init__.py delete mode 100644 hls4ml/hls4ml/backends/vitis/passes/__init__.py delete mode 100644 hls4ml/hls4ml/backends/vitis/passes/feature_check.py delete mode 100644 hls4ml/hls4ml/backends/vitis/vitis_backend.py delete mode 100644 hls4ml/hls4ml/backends/vivado/__init__.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/__init__.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/broadcast_stream.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/conv_same_pad.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/conv_stream.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/convolution_templates.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/core_templates.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/fifo_depth_optimization.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/garnet_templates.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/merge_templates.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/pointwise.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/pooling_templates.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/quantization_templates.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/recurrent_templates.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/reshaping_templates.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/resource_strategy.py delete mode 100644 hls4ml/hls4ml/backends/vivado/passes/transform_types.py delete mode 100644 hls4ml/hls4ml/backends/vivado/vivado_backend.py delete mode 100644 hls4ml/hls4ml/backends/vivado_accelerator/__init__.py delete mode 100644 hls4ml/hls4ml/backends/vivado_accelerator/passes/__init__.py delete mode 100644 hls4ml/hls4ml/backends/vivado_accelerator/passes/fifo_depth_optimization.py delete mode 100644 hls4ml/hls4ml/backends/vivado_accelerator/supported_boards.json delete mode 100644 hls4ml/hls4ml/backends/vivado_accelerator/vivado_accelerator_backend.py delete mode 100644 hls4ml/hls4ml/backends/vivado_accelerator/vivado_accelerator_config.py delete mode 100644 hls4ml/hls4ml/converters/__init__.py delete mode 100644 hls4ml/hls4ml/converters/keras/__init__.py delete mode 100644 hls4ml/hls4ml/converters/keras/convolution.py delete mode 100644 hls4ml/hls4ml/converters/keras/core.py delete mode 100644 hls4ml/hls4ml/converters/keras/graph.py delete mode 100644 hls4ml/hls4ml/converters/keras/merge.py delete mode 100644 hls4ml/hls4ml/converters/keras/model.py delete mode 100644 hls4ml/hls4ml/converters/keras/pooling.py delete mode 100644 hls4ml/hls4ml/converters/keras/qkeras.py delete mode 100644 hls4ml/hls4ml/converters/keras/recurrent.py delete mode 100644 hls4ml/hls4ml/converters/keras/reshape.py delete mode 100644 hls4ml/hls4ml/converters/keras/reshaping.py delete mode 100644 hls4ml/hls4ml/converters/keras_to_hls.py delete mode 100644 hls4ml/hls4ml/converters/onnx/__init__.py delete mode 100644 hls4ml/hls4ml/converters/onnx/convolution.py delete mode 100644 hls4ml/hls4ml/converters/onnx/core.py delete mode 100644 hls4ml/hls4ml/converters/onnx/merge.py delete mode 100644 hls4ml/hls4ml/converters/onnx/pooling.py delete mode 100644 hls4ml/hls4ml/converters/onnx/reshape.py delete mode 100644 hls4ml/hls4ml/converters/onnx_to_hls.py delete mode 100644 hls4ml/hls4ml/converters/pytorch/__init__.py delete mode 100644 hls4ml/hls4ml/converters/pytorch/convolution.py delete mode 100644 hls4ml/hls4ml/converters/pytorch/core.py delete mode 100644 hls4ml/hls4ml/converters/pytorch/merge.py delete mode 100644 hls4ml/hls4ml/converters/pytorch/pooling.py delete mode 100644 hls4ml/hls4ml/converters/pytorch/reshape.py delete mode 100644 hls4ml/hls4ml/converters/pytorch_to_hls.py delete mode 100644 hls4ml/hls4ml/converters/utils.py delete mode 100644 hls4ml/hls4ml/model/__init__.py delete mode 100644 hls4ml/hls4ml/model/attributes.py delete mode 100644 hls4ml/hls4ml/model/flow/__init__.py delete mode 100644 hls4ml/hls4ml/model/flow/flow.py delete mode 100644 hls4ml/hls4ml/model/graph.py delete mode 100644 hls4ml/hls4ml/model/layers.py delete mode 100644 hls4ml/hls4ml/model/optimizer/__init__.py delete mode 100644 hls4ml/hls4ml/model/optimizer/optimizer.py delete mode 100644 hls4ml/hls4ml/model/optimizer/passes/__init__.py delete mode 100644 hls4ml/hls4ml/model/optimizer/passes/bn_fuse.py delete mode 100644 hls4ml/hls4ml/model/optimizer/passes/convert_to_channels_last.py delete mode 100644 hls4ml/hls4ml/model/optimizer/passes/expand_layer_group.py delete mode 100644 hls4ml/hls4ml/model/optimizer/passes/fuse_biasadd.py delete mode 100644 hls4ml/hls4ml/model/optimizer/passes/multi_dense.py delete mode 100644 hls4ml/hls4ml/model/optimizer/passes/nop.py delete mode 100644 hls4ml/hls4ml/model/optimizer/passes/precision_merge.py delete mode 100644 hls4ml/hls4ml/model/optimizer/passes/qkeras.py delete mode 100644 hls4ml/hls4ml/model/optimizer/passes/stamp.py delete mode 100644 hls4ml/hls4ml/model/optimizer/passes/transpose_opt.py delete mode 100644 hls4ml/hls4ml/model/profiling.py delete mode 100644 hls4ml/hls4ml/model/types.py delete mode 100644 hls4ml/hls4ml/report/__init__.py delete mode 100644 hls4ml/hls4ml/report/quartus_report.py delete mode 100644 hls4ml/hls4ml/report/vivado_report.py delete mode 100644 hls4ml/hls4ml/templates/quartus/Makefile delete mode 100644 hls4ml/hls4ml/templates/quartus/ac_types/ac_channel.h delete mode 100644 hls4ml/hls4ml/templates/quartus/ac_types/ac_complex.h delete mode 100644 hls4ml/hls4ml/templates/quartus/ac_types/ac_fixed.h delete mode 100644 hls4ml/hls4ml/templates/quartus/ac_types/ac_float.h delete mode 100644 hls4ml/hls4ml/templates/quartus/ac_types/ac_int.h delete mode 100644 hls4ml/hls4ml/templates/quartus/ac_types/ac_sc.h delete mode 100644 hls4ml/hls4ml/templates/quartus/ac_types/ac_std_float.h delete mode 100644 hls4ml/hls4ml/templates/quartus/ac_types/stream.h delete mode 100755 hls4ml/hls4ml/templates/quartus/build_lib.sh delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/defines.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/myproject.cpp delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/myproject.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_activation.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_activation_stream.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_batchnorm.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_batchnorm_stream.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_common.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv1d.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv1d_resource.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv1d_stream.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv2d.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv2d_resource.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv2d_stream.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_dense.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_dense_compressed.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_dense_stream.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_embed.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_embed_stream.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_helpers.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_merge.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_merge_stream.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_mult.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_padding.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_padding_stream.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_pooling.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_pooling_stream.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_recurrent.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_recurrent_activation.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_recurrent_stream.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_resize.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_resize_stream.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_stream.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_transpose.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_transpose_stream.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_types.h delete mode 100644 hls4ml/hls4ml/templates/quartus/firmware/parameters.h delete mode 100644 hls4ml/hls4ml/templates/quartus/myproject_bridge.cpp delete mode 100644 hls4ml/hls4ml/templates/quartus/myproject_test_parallel.cpp delete mode 100644 hls4ml/hls4ml/templates/quartus/myproject_test_stream.cpp delete mode 100755 hls4ml/hls4ml/templates/symbolic/build_lib.sh delete mode 100644 hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d.h delete mode 100644 hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d_latency.h delete mode 100644 hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d_resource.h delete mode 100644 hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d_stream.h delete mode 100644 hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d.h delete mode 100644 hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d_latency.h delete mode 100644 hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d_resource.h delete mode 100644 hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d_stream.h delete mode 100644 hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_dense_resource.h delete mode 100644 hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_dense_stream.h delete mode 100644 hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_pooling.h delete mode 100644 hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_pooling_stream.h delete mode 100644 hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_sepconv1d_stream.h delete mode 100644 hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_sepconv2d_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/ap_common.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/ap_decl.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed_base.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed_ref.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed_special.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/ap_int.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/ap_int_base.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/ap_int_ref.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/ap_int_special.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/ap_shift_reg.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/etc/ap_private.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/hls_math.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/hls_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/ap_types/utils/x_hls_utils.h delete mode 100755 hls4ml/hls4ml/templates/vivado/build_lib.sh delete mode 100644 hls4ml/hls4ml/templates/vivado/build_prj.tcl delete mode 100644 hls4ml/hls4ml/templates/vivado/firmware/defines.h delete mode 100644 hls4ml/hls4ml/templates/vivado/firmware/myproject.cpp delete mode 100644 hls4ml/hls4ml/templates/vivado/firmware/myproject.h delete mode 100644 hls4ml/hls4ml/templates/vivado/firmware/parameters.h delete mode 100644 hls4ml/hls4ml/templates/vivado/myproject_bridge.cpp delete mode 100644 hls4ml/hls4ml/templates/vivado/myproject_test.cpp delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_activation.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_activation_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_array.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_batchnorm.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_batchnorm_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_code_gen.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_common.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d_latency.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d_resource.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d_latency.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d_resource.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_compressed.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_latency.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_resource.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_embed.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_embed_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_garnet.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_helpers.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_image.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_image_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_math.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_merge.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_merge_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_mult.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_padding.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_padding_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_pooling.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_pooling_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_recr_activations.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_recurrent.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_sepconv1d_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_sepconv2d_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_sepconv_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_stream.h delete mode 100644 hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_types.h delete mode 100644 hls4ml/hls4ml/templates/vivado/vivado_synth.tcl delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_axi_read_master.sv delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_axi_write_master.sv delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_control_s_axi.v delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_counter.sv delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_int.sv delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/myproject_kernel.v delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/alveo/python_drivers/axi_stream_driver.py delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/alveo/tcl_scripts/axi_stream_design.tcl delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/build_lib.sh delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/myproject_axi.cpp delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/myproject_axi.h delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/pynq-z2/python_drivers/axi_stream_driver.py delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/pynq-z2/tcl_scripts/axi_lite_design.tcl delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/pynq-z2/tcl_scripts/axi_stream_design.tcl delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/zcu102/python_drivers/axi_stream_driver.py delete mode 100644 hls4ml/hls4ml/templates/vivado_accelerator/zcu102/tcl_scripts/axi_stream_design.tcl delete mode 100644 hls4ml/hls4ml/utils/__init__.py delete mode 100644 hls4ml/hls4ml/utils/config.py delete mode 100644 hls4ml/hls4ml/utils/example_models.py delete mode 100644 hls4ml/hls4ml/utils/fixed_point_utils.py delete mode 100644 hls4ml/hls4ml/utils/plot.py delete mode 100644 hls4ml/hls4ml/utils/string_utils.py delete mode 100644 hls4ml/hls4ml/utils/symbolic_utils.py delete mode 100644 hls4ml/hls4ml/writer/__init__.py delete mode 100644 hls4ml/hls4ml/writer/quartus_writer.py delete mode 100644 hls4ml/hls4ml/writer/symbolic_writer.py delete mode 100644 hls4ml/hls4ml/writer/vitis_writer.py delete mode 100644 hls4ml/hls4ml/writer/vivado_accelerator_writer.py delete mode 100644 hls4ml/hls4ml/writer/vivado_writer.py delete mode 100644 hls4ml/hls4ml/writer/writers.py delete mode 100644 hls4ml/pyproject.toml delete mode 100755 hls4ml/scripts/hls4ml delete mode 100644 hls4ml/setup.cfg delete mode 100644 hls4ml/setup.py delete mode 100755 hls4ml/test/build-prj.sh delete mode 100755 hls4ml/test/cleanup.sh delete mode 100755 hls4ml/test/compare-reports.sh delete mode 100755 hls4ml/test/convert-keras-models.sh delete mode 100755 hls4ml/test/convert-onnx-models.sh delete mode 100755 hls4ml/test/convert-pytorch-models.sh delete mode 100755 hls4ml/test/gather-reports.sh delete mode 100755 hls4ml/test/hls4ml-keras-test.sh delete mode 100755 hls4ml/test/hls4ml-onnx-test.sh delete mode 100755 hls4ml/test/hls4ml-pytorch-test.sh delete mode 100644 hls4ml/test/keras-models.txt delete mode 100755 hls4ml/test/keras-to-hls.sh delete mode 100644 hls4ml/test/onnx-models.txt delete mode 100755 hls4ml/test/onnx-to-hls.sh delete mode 100644 hls4ml/test/pytest/ci-template.yml delete mode 100644 hls4ml/test/pytest/generate_ci_yaml.py delete mode 100644 hls4ml/test/pytest/test_activations.py delete mode 100644 hls4ml/test/pytest/test_batchnorm.py delete mode 100644 hls4ml/test/pytest/test_batchnorm_pytorch.py delete mode 100644 hls4ml/test/pytest/test_binary_cnn.py delete mode 100644 hls4ml/test/pytest/test_bram_factor.py delete mode 100644 hls4ml/test/pytest/test_causalpadding.py delete mode 100644 hls4ml/test/pytest/test_clone_flatten.py delete mode 100644 hls4ml/test/pytest/test_cnn_mnist.py delete mode 100644 hls4ml/test/pytest/test_cnn_mnist_qkeras.py delete mode 100644 hls4ml/test/pytest/test_conv1d.py delete mode 100644 hls4ml/test/pytest/test_conv1d_narrow.py delete mode 100644 hls4ml/test/pytest/test_conv2d_narrow.py delete mode 100644 hls4ml/test/pytest/test_embed.py delete mode 100644 hls4ml/test/pytest/test_extensions.py delete mode 100644 hls4ml/test/pytest/test_flows.py delete mode 100644 hls4ml/test/pytest/test_garnet.py delete mode 100644 hls4ml/test/pytest/test_globalpooling.py delete mode 100644 hls4ml/test/pytest/test_graph.py delete mode 100644 hls4ml/test/pytest/test_keras_api.py delete mode 100644 hls4ml/test/pytest/test_keras_h5_loader.py delete mode 100755 hls4ml/test/pytest/test_keras_nested_model.py delete mode 100644 hls4ml/test/pytest/test_merge.py delete mode 100644 hls4ml/test/pytest/test_merge_pytorch.py delete mode 100644 hls4ml/test/pytest/test_multi_dense.py delete mode 100644 hls4ml/test/pytest/test_pointwiseconv.py delete mode 100644 hls4ml/test/pytest/test_pooling.py delete mode 100644 hls4ml/test/pytest/test_precision_parsing.py delete mode 100644 hls4ml/test/pytest/test_pytorch_api.py delete mode 100644 hls4ml/test/pytest/test_qkeras.py delete mode 100644 hls4ml/test/pytest/test_report.py delete mode 100644 hls4ml/test/pytest/test_report/myproject_csynth.rpt delete mode 100644 hls4ml/test/pytest/test_report/myproject_csynth.xml delete mode 100644 hls4ml/test/pytest/test_report/vivado_hls.app delete mode 100644 hls4ml/test/pytest/test_report/vivado_synth.rpt delete mode 100755 hls4ml/test/pytest/test_reshape.py delete mode 100644 hls4ml/test/pytest/test_rnn.py delete mode 100644 hls4ml/test/pytest/test_sepconv2d.py delete mode 100644 hls4ml/test/pytest/test_sequential_parsing_pytorch.py delete mode 100644 hls4ml/test/pytest/test_softmax.py delete mode 100644 hls4ml/test/pytest/test_softsign.py delete mode 100644 hls4ml/test/pytest/test_sr.py delete mode 100644 hls4ml/test/pytest/test_trace.py delete mode 100644 hls4ml/test/pytest/test_transpose_concat.py delete mode 100644 hls4ml/test/pytest/test_upsampling.py delete mode 100644 hls4ml/test/pytest/test_zeropadding.py delete mode 100644 hls4ml/test/pytorch-models.txt delete mode 100755 hls4ml/test/pytorch-to-hls.sh diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..b560a90 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "hls4ml"] + path = hls4ml + url = https://github.com/jmduarte/hls4ml diff --git a/hls4ml b/hls4ml new file mode 160000 index 0000000..d03b3b9 --- /dev/null +++ b/hls4ml @@ -0,0 +1 @@ +Subproject commit d03b3b946e71c9ad0a0ea27775c0398bd1945da8 diff --git a/hls4ml/.clang-format b/hls4ml/.clang-format deleted file mode 100644 index 17cfc24..0000000 --- a/hls4ml/.clang-format +++ /dev/null @@ -1,191 +0,0 @@ ---- -Language: Cpp -# BasedOnStyle: LLVM -AccessModifierOffset: -2 -AlignAfterOpenBracket: Align -AlignArrayOfStructures: None -AlignConsecutiveMacros: None -AlignConsecutiveAssignments: None -AlignConsecutiveBitFields: None -AlignConsecutiveDeclarations: None -AlignEscapedNewlines: Right -AlignOperands: Align -AlignTrailingComments: true -AllowAllArgumentsOnNextLine: true -AllowAllParametersOfDeclarationOnNextLine: true -AllowShortEnumsOnASingleLine: true -AllowShortBlocksOnASingleLine: Never -AllowShortCaseLabelsOnASingleLine: false -AllowShortFunctionsOnASingleLine: All -AllowShortLambdasOnASingleLine: All -AllowShortIfStatementsOnASingleLine: Never -AllowShortLoopsOnASingleLine: false -AlwaysBreakAfterDefinitionReturnType: None -AlwaysBreakAfterReturnType: None -AlwaysBreakBeforeMultilineStrings: false -AlwaysBreakTemplateDeclarations: MultiLine -AttributeMacros: - - __capability -BinPackArguments: true -BinPackParameters: true -BraceWrapping: - AfterCaseLabel: false - AfterClass: false - AfterControlStatement: Never - AfterEnum: false - AfterFunction: false - AfterNamespace: false - AfterObjCDeclaration: false - AfterStruct: false - AfterUnion: false - AfterExternBlock: false - BeforeCatch: false - BeforeElse: false - BeforeLambdaBody: false - BeforeWhile: false - IndentBraces: false - SplitEmptyFunction: true - SplitEmptyRecord: true - SplitEmptyNamespace: true -BreakBeforeBinaryOperators: None -BreakBeforeConceptDeclarations: true -BreakBeforeBraces: Attach -BreakBeforeInheritanceComma: false -BreakInheritanceList: BeforeColon -BreakBeforeTernaryOperators: true -BreakConstructorInitializersBeforeComma: false -BreakConstructorInitializers: BeforeColon -BreakAfterJavaFieldAnnotations: false -BreakStringLiterals: true -ColumnLimit: 125 -CommentPragmas: '^ IWYU pragma:' -QualifierAlignment: Leave -CompactNamespaces: false -ConstructorInitializerIndentWidth: 4 -ContinuationIndentWidth: 4 -Cpp11BracedListStyle: true -DeriveLineEnding: true -DerivePointerAlignment: false -DisableFormat: false -EmptyLineAfterAccessModifier: Never -EmptyLineBeforeAccessModifier: LogicalBlock -ExperimentalAutoDetectBinPacking: false -PackConstructorInitializers: BinPack -BasedOnStyle: '' -ConstructorInitializerAllOnOneLineOrOnePerLine: false -AllowAllConstructorInitializersOnNextLine: true -FixNamespaceComments: true -ForEachMacros: - - foreach - - Q_FOREACH - - BOOST_FOREACH -IfMacros: - - KJ_IF_MAYBE -IncludeBlocks: Preserve -IncludeCategories: - - Regex: '^"(llvm|llvm-c|clang|clang-c)/' - Priority: 2 - SortPriority: 0 - CaseSensitive: false - - Regex: '^(<|"(gtest|gmock|isl|json)/)' - Priority: 3 - SortPriority: 0 - CaseSensitive: false - - Regex: '.*' - Priority: 1 - SortPriority: 0 - CaseSensitive: false -IncludeIsMainRegex: '(Test)?$' -IncludeIsMainSourceRegex: '' -IndentAccessModifiers: false -IndentCaseLabels: false -IndentCaseBlocks: false -IndentGotoLabels: true -IndentPPDirectives: None -IndentExternBlock: AfterExternBlock -IndentRequires: false -IndentWidth: 4 -IndentWrappedFunctionNames: false -InsertTrailingCommas: None -JavaScriptQuotes: Leave -JavaScriptWrapImports: true -KeepEmptyLinesAtTheStartOfBlocks: true -LambdaBodyIndentation: Signature -MacroBlockBegin: '' -MacroBlockEnd: '' -MaxEmptyLinesToKeep: 1 -NamespaceIndentation: None -ObjCBinPackProtocolList: Auto -ObjCBlockIndentWidth: 2 -ObjCBreakBeforeNestedBlockParam: true -ObjCSpaceAfterProperty: false -ObjCSpaceBeforeProtocolList: true -PenaltyBreakAssignment: 2 -PenaltyBreakBeforeFirstCallParameter: 19 -PenaltyBreakComment: 300 -PenaltyBreakFirstLessLess: 120 -PenaltyBreakOpenParenthesis: 0 -PenaltyBreakString: 1000 -PenaltyBreakTemplateDeclaration: 10 -PenaltyExcessCharacter: 1000000 -PenaltyReturnTypeOnItsOwnLine: 60 -PenaltyIndentedWhitespace: 0 -PointerAlignment: Right -PPIndentWidth: -1 -ReferenceAlignment: Pointer -ReflowComments: true -RemoveBracesLLVM: false -SeparateDefinitionBlocks: Leave -ShortNamespaceLines: 1 -SortIncludes: CaseSensitive -SortJavaStaticImport: Before -SortUsingDeclarations: true -SpaceAfterCStyleCast: false -SpaceAfterLogicalNot: false -SpaceAfterTemplateKeyword: true -SpaceBeforeAssignmentOperators: true -SpaceBeforeCaseColon: false -SpaceBeforeCpp11BracedList: false -SpaceBeforeCtorInitializerColon: true -SpaceBeforeInheritanceColon: true -SpaceBeforeParens: ControlStatements -SpaceBeforeParensOptions: - AfterControlStatements: true - AfterForeachMacros: true - AfterFunctionDefinitionName: false - AfterFunctionDeclarationName: false - AfterIfMacros: true - AfterOverloadedOperator: false - BeforeNonEmptyParentheses: false -SpaceAroundPointerQualifiers: Default -SpaceBeforeRangeBasedForLoopColon: true -SpaceInEmptyBlock: false -SpaceInEmptyParentheses: false -SpacesBeforeTrailingComments: 1 -SpacesInAngles: Never -SpacesInConditionalStatement: false -SpacesInContainerLiterals: true -SpacesInCStyleCastParentheses: false -SpacesInLineCommentPrefix: - Minimum: 1 - Maximum: -1 -SpacesInParentheses: false -SpacesInSquareBrackets: false -SpaceBeforeSquareBrackets: false -BitFieldColonSpacing: Both -Standard: Latest -StatementAttributeLikeMacros: - - Q_EMIT -StatementMacros: - - Q_UNUSED - - QT_REQUIRE_VERSION -TabWidth: 8 -UseCRLF: false -UseTab: Never -WhitespaceSensitiveMacros: - - STRINGIZE - - PP_STRINGIZE - - BOOST_PP_STRINGIZE - - NS_SWIFT_NAME - - CF_SWIFT_NAME -... diff --git a/hls4ml/.github/ISSUE_TEMPLATE/bug_report.md b/hls4ml/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index d0aa96a..0000000 --- a/hls4ml/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -name: Bug report -about: Something isn't working as expected -title: '' -labels: bug -assignees: '' - ---- - - -## Prerequisites -Please make sure to check off these prerequisites before submitting a bug report. -- [ ] Test that the bug appears on the current version of the master branch. Make sure to include the commit hash of the commit you checked out. -- [ ] Check that the issue hasn't already been reported, by checking the currently open issues. -- [ ] If there are steps to reproduce the problem, make sure to write them down below. -- [ ] If relevant, please include the hls4ml project files, which were created directly before and/or after the bug. - -## Quick summary -Please give a brief and concise description of the bug. - -## Details -Please add to the following sections to describe the bug as accurately as possible. - -### Steps to Reproduce -Add what needs to be done to reproduce the bug. Add *commented* code examples and make sure to include the original model files / code, and the commit hash you are working on. - -1. Clone the hls4ml repository -2. Checkout the master branch, with commit hash: [...] -3. Run conversion [...] on model file with code [...] -4. [Further steps ...] - -### Expected behavior -Please add a brief description of what you expected to happen. - -### Actual behavior -Describe what actually happens instead. - -## Optional - -### Possible fix -If you already know where the issue stems from, or you have a hint please let us know. - -### Additional context -Add any other context about the problem here. diff --git a/hls4ml/.github/ISSUE_TEMPLATE/config.yml b/hls4ml/.github/ISSUE_TEMPLATE/config.yml deleted file mode 100644 index 907ac6d..0000000 --- a/hls4ml/.github/ISSUE_TEMPLATE/config.yml +++ /dev/null @@ -1,5 +0,0 @@ -blank_issues_enabled: false -contact_links: - - name: Talk and engage with the comunity - url: https://github.com/fastmachinelearning/hls4ml/discussions/categories/general - about: Check out the GitHub discusisons page for hls4ml. This is the best way to get in touch with us. In particular, if you have a question about hls4ml or a general problem that is likely not a bug. diff --git a/hls4ml/.github/ISSUE_TEMPLATE/feature_request.md b/hls4ml/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 1739f9d..0000000 --- a/hls4ml/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,28 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for hls4ml -title: '' -labels: enhancement -assignees: '' - ---- - -## Prerequisites -Please talk to us before creating a new feature request. So that you can check that the idea is not already in active development. - -You can present your idea over here at the GitHub discussions page for hls4ml: https://github.com/fastmachinelearning/hls4ml/discussions/categories/ideas - -Even if an idea is already being worked on you can still create a feature request, -if you would like to open a discussion about the feature or want to contribute to it. - -## Details -Please add to the following sections to describe the feature as accurately as possible. - -### New behavior -Please add a brief and concise description of what you would like to happen in hls4ml in the future. - -### Motivation -Please tell us why this feature is important to the community. - -### Parts of hls4ml being affected -Please describe which parts of hls4ml would be affected by this feature. diff --git a/hls4ml/.github/PULL_REQUEST_TEMPLATE.md b/hls4ml/.github/PULL_REQUEST_TEMPLATE.md deleted file mode 100644 index 88d445c..0000000 --- a/hls4ml/.github/PULL_REQUEST_TEMPLATE.md +++ /dev/null @@ -1,38 +0,0 @@ -A# Description - -> :memo: Please include a summary of the change. -> -> * Please also include relevant motivation and context. -> * List any dependencies that are required for this change. - -## Type of change - -For a new feature or function, please create an issue first to discuss it -with us before submitting a pull request. - -Note: Please delete options that are not relevant. - -- [ ] Bug fix (non-breaking change that fixes an issue) -- [ ] Documentation update -- [ ] New feature (non-breaking change which adds functionality) -- [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) -- [ ] A new research paper code implementation -- [ ] Other (Specify) - -## Tests - -> :memo: Please describe the tests that you ran to verify your changes. -> -> * Provide instructions so we can reproduce. -> * Please also list any relevant details for your test configuration. - -**Test Configuration**: - -## Checklist - -- [ ] I have read the [guidelines for contributing](https://github.com/fastmachinelearning/hls4ml/blob/main/CONTRIBUTING.md). -- [ ] I have commented my code, particularly in hard-to-understand areas. -- [ ] I have made corresponding changes to the documentation. -- [ ] My changes generate no new warnings. -- [ ] I have installed and run `pre-commit` on the files I edited or added. -- [ ] I have added tests that prove my fix is effective or that my feature works. diff --git a/hls4ml/.github/dependabot.yml b/hls4ml/.github/dependabot.yml deleted file mode 100644 index 6fddca0..0000000 --- a/hls4ml/.github/dependabot.yml +++ /dev/null @@ -1,7 +0,0 @@ -version: 2 -updates: - # Maintain dependencies for GitHub Actions - - package-ecosystem: "github-actions" - directory: "/" - schedule: - interval: "weekly" diff --git a/hls4ml/.github/workflows/build-sphinx.yml b/hls4ml/.github/workflows/build-sphinx.yml deleted file mode 100644 index aaf5487..0000000 --- a/hls4ml/.github/workflows/build-sphinx.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: build-sphinx -on: - push: - branches: - - main - -jobs: - build: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - ref: ${{ github.event.pull_request.head.sha }} - - name: Allow for file ownership conflicts with Docker and GitHub Actions - run: git config --global --add safe.directory '*' - - uses: jmduarte/sphinx-action@main - env: - SPHINX_GITHUB_CHANGELOG_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - pre-build-command: "git config --system --add safe.directory '*'" - docs-folder: "docs/" - - name: Commit Documentation Changes - run: | - git clone https://github.com/fastmachinelearning/hls4ml.git --branch gh-pages --single-branch gh-pages - cp -r docs/_build/html/* gh-pages/ - cd gh-pages - touch .nojekyll - git config --local user.email "action@github.com" - git config --local user.name "GitHub Action" - git add . - git commit -m "Update Sphinx Documentation" -a || true - - name: Push Documentation Changes - uses: ad-m/github-push-action@master - with: - branch: gh-pages - directory: gh-pages - github_token: ${{ secrets.PERSONAL_TOKEN }} diff --git a/hls4ml/.github/workflows/pre-commit.yml b/hls4ml/.github/workflows/pre-commit.yml deleted file mode 100644 index 5788ee3..0000000 --- a/hls4ml/.github/workflows/pre-commit.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Run pre-commit - -on: - pull_request: - branches: [ main ] - push: - branches: [ main ] - -jobs: - pre-commit: - name: Format - runs-on: ubuntu-latest - strategy: - matrix: - python-version: [3.8] - - steps: - - name: Checkout - uses: actions/checkout@v4 - with: - submodules: recursive - - - name: Pre-commit - uses: pre-commit/action@v3.0.0 - with: - extra_args: --hook-stage manual --all-files diff --git a/hls4ml/.github/workflows/pypi-publish.yml b/hls4ml/.github/workflows/pypi-publish.yml deleted file mode 100644 index 9d97f37..0000000 --- a/hls4ml/.github/workflows/pypi-publish.yml +++ /dev/null @@ -1,31 +0,0 @@ -name: 📦 Packaging release to PyPI -on: - workflow_dispatch: - pull_request: - branches: [main] - release: - types: [published] - -jobs: - release: - name: Upload new release to PyPI - runs-on: ubuntu-latest - steps: - - name: Checkout source - uses: actions/checkout@v4 - with: - submodules: recursive - fetch-depth: 0 - - - name: Build SDist and Wheel - run: pipx run build --sdist --wheel - - - uses: actions/upload-artifact@v3 - with: - path: dist/*.* - - - name: Publish 📦 to PyPI - if: startsWith(github.ref, 'refs/tags') - uses: pypa/gh-action-pypi-publish@release/v1 - with: - password: ${{ secrets.PYPI_PASSWORD }} diff --git a/hls4ml/.github/workflows/test-sphinx.yml b/hls4ml/.github/workflows/test-sphinx.yml deleted file mode 100644 index 57a9a89..0000000 --- a/hls4ml/.github/workflows/test-sphinx.yml +++ /dev/null @@ -1,27 +0,0 @@ -name: test-sphinx -on: - pull_request: - branches: - - main - -jobs: - build: - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - ref: ${{ github.event.pull_request.head.sha }} - - name: Allow for file ownership conflicts with Docker and GitHub Actions - run: git config --global --add safe.directory '*' - - uses: jmduarte/sphinx-action@main - env: - SPHINX_GITHUB_CHANGELOG_TOKEN: ${{ secrets.GITHUB_TOKEN }} - with: - pre-build-command: "git config --system --add safe.directory '*'" - docs-folder: "docs/" - - uses: actions/upload-artifact@v3 - with: - path: docs/_build/html diff --git a/hls4ml/.github/workflows/update-branch-on-pr.yml b/hls4ml/.github/workflows/update-branch-on-pr.yml deleted file mode 100644 index 7290487..0000000 --- a/hls4ml/.github/workflows/update-branch-on-pr.yml +++ /dev/null @@ -1,20 +0,0 @@ -on: - pull_request_target: - types: [labeled] - branches: [main] - -name: Update branch on PR from fork -jobs: - test: - runs-on: ubuntu-latest - steps: - - name: Checkout repo - uses: actions/checkout@v4 - if: ${{ github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name && github.event.label.name == 'please test' }} - with: - ref: ${{ github.event.pull_request.head.sha }} - - name: Push changes - if: ${{ github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name && github.event.label.name == 'please test' }} - run: | - git checkout -b pr/${{ github.event.pull_request.number }} - git push --force origin pr/${{ github.event.pull_request.number }} diff --git a/hls4ml/.gitignore b/hls4ml/.gitignore deleted file mode 100644 index 22c8ff6..0000000 --- a/hls4ml/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -*.pyc -_version.py -__pycache__ -build/ -dist/ -sdist/ -*.egg-info/ -vivado_prj -.vscode -my-hls-test -*.tar.gz -docs/_build -docs/autodoc/* -hls4mlprj_* -*~ -*.ipynb_checkpoints/ diff --git a/hls4ml/.gitlab-ci.yml b/hls4ml/.gitlab-ci.yml deleted file mode 100644 index 5a491d2..0000000 --- a/hls4ml/.gitlab-ci.yml +++ /dev/null @@ -1,27 +0,0 @@ -stages: - - generate - - trigger - - test - -generator: - stage: generate - image: python:3.8-alpine - tags: - - k8s-default - before_script: - - pip install pyyaml - script: - - cd test/pytest - - python generate_ci_yaml.py - artifacts: - paths: - - test/pytest/pytests.yml - -pytests: - stage: trigger - trigger: - include: - - local: test/pytest/ci-template.yml - - artifact: test/pytest/pytests.yml - job: generator - strategy: depend diff --git a/hls4ml/.gitmodules b/hls4ml/.gitmodules deleted file mode 100644 index 3513213..0000000 --- a/hls4ml/.gitmodules +++ /dev/null @@ -1,3 +0,0 @@ -[submodule "example-models"] - path = example-models - url = https://github.com/hls-fpga-machine-learning/example-models.git diff --git a/hls4ml/.pre-commit-config.yaml b/hls4ml/.pre-commit-config.yaml deleted file mode 100644 index 80ded4c..0000000 --- a/hls4ml/.pre-commit-config.yaml +++ /dev/null @@ -1,70 +0,0 @@ -exclude: (^hls4ml\/templates\/(vivado|quartus)\/(ap_types|ac_types)\/|^test/pytest/test_report/) - -repos: -- repo: https://github.com/psf/black - rev: 23.9.1 - hooks: - - id: black - language_version: python3 - args: ['--line-length=125', - '--skip-string-normalization'] - -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 - hooks: - - id: check-added-large-files - - id: check-case-conflict - - id: check-merge-conflict - - id: check-symlinks - - id: check-yaml - - id: debug-statements - - id: end-of-file-fixer - - id: mixed-line-ending - - id: requirements-txt-fixer - - id: trailing-whitespace - -- repo: https://github.com/PyCQA/isort - rev: 5.12.0 - hooks: - - id: isort - args: ["--profile", "black", --line-length=125] - -- repo: https://github.com/asottile/pyupgrade - rev: v3.14.0 - hooks: - - id: pyupgrade - args: ["--py36-plus"] - -- repo: https://github.com/asottile/setup-cfg-fmt - rev: v2.5.0 - hooks: - - id: setup-cfg-fmt - -- repo: https://github.com/pycqa/flake8 - rev: 6.1.0 - hooks: - - id: flake8 - exclude: docs/conf.py - additional_dependencies: [flake8-bugbear, flake8-print] - args: ['--max-line-length=125', # github viewer width - '--extend-ignore=E203,T201'] # E203 is not PEP8 compliant - -- repo: https://github.com/mgedmin/check-manifest - rev: "0.49" - hooks: - - id: check-manifest - stages: [manual] - -- repo: https://github.com/jmduarte/p-clang-format - rev: "v1.0.4" - hooks: - - id: p-clang-format - types_or: [c++, c, cuda] -ci: - autofix_commit_msg: '[pre-commit.ci] auto fixes from pre-commit hooks' - autofix_prs: false # default is true - autoupdate_branch: 'main' - autoupdate_commit_msg: '[pre-commit.ci] pre-commit autoupdate' - autoupdate_schedule: weekly - skip: [] - submodules: true diff --git a/hls4ml/CITATION.cff b/hls4ml/CITATION.cff deleted file mode 100644 index fc3d8da..0000000 --- a/hls4ml/CITATION.cff +++ /dev/null @@ -1,56 +0,0 @@ -cff-version: 1.2.0 -message: "Please cite the following works when using this software." -type: software -authors: -- given-names: "FastML Team" -title: "hls4ml" -version: "v0.7.1" -doi: 10.5281/zenodo.1201549 -repository-code: "https://github.com/fastmachinelearning/hls4ml" -url: "https://fastmachinelearning.org/hls4ml" -keywords: - - python - - machine-learning - - FPGA - - physics - - tensorflow - - pytorch - - onnx - - qonnx -license: "Apache-2.0" -abstract: | - hls4ml is an open-source software-hardware codesign workflow - to interpret and translate machine learning algorithms for - implementations in hardware, including FPGAs and ASICs. -references: - - type: article - title: "Fast inference of deep neural networks on FPGAs with hls4ml" - authors: - - family-names: "Duarte" - given-names: "Javier" - - family-names: "Han" - given-names: "Song" - - family-names: "Harris" - given-names: "Philip" - - family-names: "Jindariani" - given-names: "Sergo" - - family-names: "Kreinar" - given-names: "Edward" - - family-names: "Kreis" - given-names: "Benjamin" - - family-names: "Ngadiuba" - given-names: "Jennifer" - - family-names: "Pierini" - given-names: "Maurizio" - - family-names: "Rivera" - given-names: "Ryan" - - family-names: "Tran" - given-names: "Nhan" - - family-names: "Wu" - given-names: "Zhenbin" - journal: "JINST" - volume: "13" - start: "P07027" - doi: "10.1088/1748-0221/13/07/P07027" - year: "2018" - number: "07" diff --git a/hls4ml/CONTRIBUTING.md b/hls4ml/CONTRIBUTING.md deleted file mode 100644 index c68142e..0000000 --- a/hls4ml/CONTRIBUTING.md +++ /dev/null @@ -1,55 +0,0 @@ -# How to Contribute - -We'd love to accept your patches and contributions to this project. -There are just a few small guidelines you need to follow. - -## Discussion - -Share your proposal via [GitHub Issues](https://github.com/fastmachinelearning/hls4ml/issues). -If you are looking for some issues to get started with, we have a list of [good first issues](https://github.com/fastmachinelearning/hls4ml/labels/good%20first%20issue) in the issue tracker. -We also welcome submissions to improve the documentation. - -## Pull Request - -All submissions, including submissions by project members, require review. -We use GitHub pull requests for this purpose. -Consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests. - -1. In the pull request description, clearly document all changes made and the expected behavior. -1. If you are introducing new functionality, add at least one unit test under the `test` folder and make sure it passes before you submit the pull request. -1. Similarly, if you are fixing a bug, add at least one unit test under the `test` folder such that the master branch fails the test and your branch passes the test. -1. Install and run `pre-commit` on the files that you have edited. We are adiabatically turning on linting for the full repository (see [#678](https://github.com/fastmachinelearning/hls4ml/pull/678)). -1. Submit the pull request to the [main](https://github.com/fastmachinelearning/hls4ml) branch. - -## Code Reviews - -We will review your contribution and, if any additional fixes or modifications are necessary, may provide feedback to guide you. -When accepted, your pull request will be merged to the repository. - -## Code of Conduct - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to make participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, sex characteristics, gender identity and expression, -level of experience, education, socio-economic status, nationality, personal -appearance, race, religion, or sexual identity and orientation. - -### Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -### Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html - -For answers to common questions about this code of conduct, see -https://www.contributor-covenant.org/faq diff --git a/hls4ml/Jenkinsfile b/hls4ml/Jenkinsfile deleted file mode 100644 index 2e53e4a..0000000 --- a/hls4ml/Jenkinsfile +++ /dev/null @@ -1,110 +0,0 @@ -pipeline { - agent { - docker { - image 'vivado-el7:3' - args '-v /data/Xilinx:/data/Xilinx' - } - } - options { - timeout(time: 6, unit: 'HOURS') - } - - stages { - stage('Keras to HLS') { - steps { - dir(path: 'test') { - sh '''#!/bin/bash --login - conda activate hls4ml-py38 - pip install tensorflow pyparsing - pip install -U ../ --user - ./convert-keras-models.sh -x -f keras-models.txt - pip uninstall hls4ml -y''' - } - } - } - stage('C Simulation') { - parallel { - stage('2019.2') { - when { - allOf { - environment name: 'USE_VIVADO_2019', value: '1'; - environment name: 'TEST_SIMULATION', value: '1' - } - } - steps { - dir(path: 'test') { - sh '''#!/bin/bash - ./build-prj.sh -i /data/Xilinx -v 2019.2 -c -p 2''' - } - } - } - stage('2020.1') { - when { - allOf { - environment name: 'USE_VIVADO_2020', value: '1'; - environment name: 'TEST_SIMULATION', value: '1' - } - } - steps { - dir(path: 'test') { - sh '''#!/bin/bash - ./build-prj.sh -i /data/Xilinx -v 2020.1 -c -p 2''' - } - } - } - } - } - stage('C/RTL Synthesis') { - parallel { - stage('2019.2') { - when { - allOf { - environment name: 'USE_VIVADO_2019', value: '1'; - environment name: 'TEST_SYNTHESIS', value: '1' - } - } - steps { - dir(path: 'test') { - sh '''#!/bin/bash - ./build-prj.sh -i /data/Xilinx -v 2019.2 -s -r -p 2''' - } - } - } - stage('2020.1') { - when { - allOf { - environment name: 'USE_VIVADO_2020', value: '1'; - environment name: 'TEST_SYNTHESIS', value: '1' - } - } - steps { - dir(path: 'test') { - sh '''#!/bin/bash - ./build-prj.sh -i /data/Xilinx -v 2020.1 -s -r -p 2''' - } - } - } - } - } - stage('Report') { - when { - environment name: 'TEST_SYNTHESIS', value: '1' - } - steps { - dir(path: 'test') { - sh '''#!/bin/bash - ./gather-reports.sh -b | tee report.rpt''' - } - archiveArtifacts artifacts: 'test/report.rpt', fingerprint: true - } - } - } - post { - always { - dir(path: 'test') { - sh '''#!/bin/bash - ./cleanup.sh''' - } - } - } -} diff --git a/hls4ml/LICENSE b/hls4ml/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/hls4ml/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/hls4ml/MANIFEST.in b/hls4ml/MANIFEST.in deleted file mode 100644 index 549cc69..0000000 --- a/hls4ml/MANIFEST.in +++ /dev/null @@ -1,7 +0,0 @@ -include LICENSE README.md CONTRIBUTING.md CITATION.cff pyproject.toml setup.py setup.cfg .clang-format -graft example-models -graft test -graft contrib -recursive-include hls4ml/templates * -global-exclude .git .gitmodules .gitlab-ci.yml -include hls4ml/backends/vivado_accelerator/supported_boards.json diff --git a/hls4ml/README.md b/hls4ml/README.md deleted file mode 100644 index f528368..0000000 --- a/hls4ml/README.md +++ /dev/null @@ -1,142 +0,0 @@ -

- hls4ml -

- -[![DOI](https://zenodo.org/badge/108329371.svg)](https://zenodo.org/badge/latestdoi/108329371) -[![License](https://img.shields.io/badge/License-Apache_2.0-red.svg)](https://opensource.org/licenses/Apache-2.0) -[![Documentation Status](https://github.com/fastmachinelearning/hls4ml/actions/workflows/build-sphinx.yml/badge.svg)](https://fastmachinelearning.org/hls4ml) -[![PyPI version](https://badge.fury.io/py/hls4ml.svg)](https://badge.fury.io/py/hls4ml) -[![Downloads](https://static.pepy.tech/personalized-badge/hls4ml?period=total&units=international_system&left_color=grey&right_color=orange&left_text=Downloads)](https://pepy.tech/project/hls4ml) -conda-forge - -A package for machine learning inference in FPGAs. We create firmware implementations of machine learning algorithms using high level synthesis language (HLS). We translate traditional open-source machine learning package models into HLS that can be configured for your use-case! - -If you have any questions, comments, or ideas regarding hls4ml or just want to show us how you use hls4ml, don't hesitate to reach us through the [discussions](https://github.com/fastmachinelearning/hls4ml/discussions) tab. - -# Documentation & Tutorial - -For more information visit the webpage: [https://fastmachinelearning.org/hls4ml/](https://fastmachinelearning.org/hls4ml/) - -Detailed tutorials on how to use `hls4ml`'s various functionalities can be found [here](https://github.com/hls-fpga-machine-learning/hls4ml-tutorial). - -# Installation -```bash -pip install hls4ml -``` - -To install the extra dependencies for profiling: - -```bash -pip install hls4ml[profiling] -``` - -# Getting Started -### Creating an HLS project -```Python -import hls4ml - -# Fetch a keras model from our example repository -# This will download our example model to your working directory and return an example configuration file -config = hls4ml.utils.fetch_example_model('KERAS_3layer.json') - -# You can print the configuration to see some default parameters -print(config) - -# Convert it to a hls project -hls_model = hls4ml.converters.keras_to_hls(config) - -# Print full list of example models if you want to explore more -hls4ml.utils.fetch_example_list() -``` - -### Building a project with Xilinx Vivado HLS (after downloading and installing from [here](https://www.xilinx.com/products/design-tools/vivado/integration/esl-design.html)) -Note: Vitis HLS is not yet supported. Vivado HLS versions between 2018.2 and 2020.1 are recommended. - -```Python -# Use Vivado HLS to synthesize the model -# This might take several minutes -hls_model.build() - -# Print out the report if you want -hls4ml.report.read_vivado_report('my-hls-test') -``` - -# Citation -If you use this software in a publication, please cite the software -```bibtex -@software{fastml_hls4ml, - author = {{FastML Team}}, - title = {fastmachinelearning/hls4ml}, - year = 2023, - publisher = {Zenodo}, - version = {v0.7.1}, - doi = {10.5281/zenodo.1201549}, - url = {https://github.com/fastmachinelearning/hls4ml} -} -``` -and first publication: -```bibtex -@article{Duarte:2018ite, - author = "Duarte, Javier and others", - title = "{Fast inference of deep neural networks in FPGAs for particle physics}", - eprint = "1804.06913", - archivePrefix = "arXiv", - primaryClass = "physics.ins-det", - reportNumber = "FERMILAB-PUB-18-089-E", - doi = "10.1088/1748-0221/13/07/P07027", - journal = "JINST", - volume = "13", - number = "07", - pages = "P07027", - year = "2018" -} -``` -Additionally, if you use specific features developed in later papers, please cite those as well. For example, CNNs: -```bibtex -@article{Aarrestad:2021zos, - author = "Aarrestad, Thea and others", - title = "{Fast convolutional neural networks on FPGAs with hls4ml}", - eprint = "2101.05108", - archivePrefix = "arXiv", - primaryClass = "cs.LG", - reportNumber = "FERMILAB-PUB-21-130-SCD", - doi = "10.1088/2632-2153/ac0ea1", - journal = "Mach. Learn. Sci. Tech.", - volume = "2", - number = "4", - pages = "045015", - year = "2021" -} -@article{Ghielmetti:2022ndm, - author = "Ghielmetti, Nicol\`{o} and others", - title = "{Real-time semantic segmentation on FPGAs for autonomous vehicles with hls4ml}", - eprint = "2205.07690", - archivePrefix = "arXiv", - primaryClass = "cs.CV", - reportNumber = "FERMILAB-PUB-22-435-PPD", - doi = "10.1088/2632-2153/ac9cb5", - journal ="Mach. Learn. Sci. Tech.", - year = "2022" -} -``` -binary/ternary networks: -```bibtex -@article{Loncar:2020hqp, - author = "Ngadiuba, Jennifer and others", - title = "{Compressing deep neural networks on FPGAs to binary and ternary precision with HLS4ML}", - eprint = "2003.06308", - archivePrefix = "arXiv", - primaryClass = "cs.LG", - reportNumber = "FERMILAB-PUB-20-167-PPD-SCD", - doi = "10.1088/2632-2153/aba042", - journal = "Mach. Learn. Sci. Tech.", - volume = "2", - pages = "015001", - year = "2021" -} -``` - -# Acknowledgments -If you benefited from participating in our community, we ask that you please acknowledge the Fast Machine Learning collaboration, and particular individuals who helped you, in any publications. -Please use the following text for this acknowledgment: - > We acknowledge the Fast Machine Learning collective as an open community of multi-domain experts and collaborators. This community and \, in particular, were important for the development of this project. diff --git a/hls4ml/contrib/README.md b/hls4ml/contrib/README.md deleted file mode 100644 index 5cfa5f6..0000000 --- a/hls4ml/contrib/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Contributions - -This section is for contributed work that can be used with hls4ml that is potentially useful to a wider audience. Examples include implementations for custom layer types for use with the Extensions API. - -## How to structure contributions - -The best way to structure a contribution is to make a directory for the contribution, with a README inside to explain what it is and how to use it. If possible there should be an example script demonstrating how to use it. We should be able to validate that the code works. diff --git a/hls4ml/contrib/__init__.py b/hls4ml/contrib/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/contrib/garnet.py b/hls4ml/contrib/garnet.py deleted file mode 100644 index 075819e..0000000 --- a/hls4ml/contrib/garnet.py +++ /dev/null @@ -1,370 +0,0 @@ -""" -Excerpt from https://github.com/jkiesele/caloGraphNN/blob/6d1127d807bc0dbaefcf1ed804d626272f002404/caloGraphNN_keras.py -""" - -import tensorflow.keras as keras -from qkeras import QActivation, QDense, ternary - -K = keras.backend - - -class NamedQDense(QDense): - def add_weight(self, name=None, **kwargs): - return super().add_weight(name=f'{self.name}_{name}', **kwargs) - - -def ternary_1_05(): - return ternary(alpha=1.0, threshold=0.5) - - -# Hack keras Dense to propagate the layer name into saved weights -class NamedDense(keras.layers.Dense): - def add_weight(self, name=None, **kwargs): - return super().add_weight(name=f'{self.name}_{name}', **kwargs) - - -class GarNet(keras.layers.Layer): - def __init__( - self, - n_aggregators, - n_filters, - n_propagate, - simplified=False, - collapse=None, - input_format='xn', - output_activation='tanh', - mean_by_nvert=False, - quantize_transforms=False, - total_bits=None, - int_bits=None, - **kwargs, - ): - super().__init__(**kwargs) - - self._simplified = simplified - self._output_activation = output_activation - self._quantize_transforms = quantize_transforms - self._total_bits = total_bits - self._int_bits = int_bits - self._setup_aux_params(collapse, input_format, mean_by_nvert) - self._setup_transforms(n_aggregators, n_filters, n_propagate) - - def _setup_aux_params(self, collapse, input_format, mean_by_nvert): - if collapse is None: - self._collapse = None - elif collapse in ['mean', 'sum', 'max']: - self._collapse = collapse - else: - raise NotImplementedError('Unsupported collapse operation') - - self._input_format = input_format - self._mean_by_nvert = mean_by_nvert - - def _setup_transforms(self, n_aggregators, n_filters, n_propagate): - if self._quantize_transforms: - self._input_feature_transform = NamedQDense( - n_propagate, - kernel_quantizer="quantized_bits(%i,%i,0,alpha=1)" % (self._total_bits, self._int_bits), - bias_quantizer="quantized_bits(%i,%i,0,alpha=1)" % (self._total_bits, self._int_bits), - name='FLR', - ) - self._output_feature_transform = NamedQDense( - n_filters, - kernel_quantizer="quantized_bits(%i,%i,0,alpha=1)" % (self._total_bits, self._int_bits), - name='Fout', - ) - if self._output_activation is None or self._output_activation == "linear": - self._output_activation_transform = QActivation( - "quantized_bits(%i, %i)" % (self._total_bits, self._int_bits) - ) - else: - self._output_activation_transform = QActivation( - "quantized_%s(%i, %i)" % (self._output_activation, self._total_bits, self._int_bits) - ) - else: - self._input_feature_transform = NamedDense(n_propagate, name='FLR') - self._output_feature_transform = NamedDense(n_filters, activation=self._output_activation, name='Fout') - self._output_activation_transform = keras.layers.Activation(self._output_activation) - - self._aggregator_distance = NamedDense(n_aggregators, name='S') - - self._sublayers = [ - self._input_feature_transform, - self._aggregator_distance, - self._output_feature_transform, - self._output_activation_transform, - ] - - def build(self, input_shape): - super().build(input_shape) - - if self._input_format == 'x': - data_shape = input_shape - elif self._input_format == 'xn': - data_shape, _ = input_shape - elif self._input_format == 'xen': - data_shape, _, _ = input_shape - data_shape = data_shape[:2] + (data_shape[2] + 1,) - - self._build_transforms(data_shape) - - for layer in self._sublayers: - self._trainable_weights.extend(layer.trainable_weights) - self._non_trainable_weights.extend(layer.non_trainable_weights) - - def _build_transforms(self, data_shape): - self._input_feature_transform.build(data_shape) - self._aggregator_distance.build(data_shape) - if self._simplified: - self._output_activation_transform.build( - self._output_feature_transform.build( - data_shape[:2] + (self._aggregator_distance.units * self._input_feature_transform.units,) - ) - ) - else: - self._output_activation_transform.build( - self._output_feature_transform.build( - data_shape[:2] - + ( - data_shape[2] - + self._aggregator_distance.units * self._input_feature_transform.units - + self._aggregator_distance.units, - ) - ) - ) - - def call(self, x): - data, num_vertex, vertex_mask = self._unpack_input(x) - - output = self._garnet( - data, - num_vertex, - vertex_mask, - self._input_feature_transform, - self._aggregator_distance, - self._output_feature_transform, - self._output_activation_transform, - ) - - output = self._collapse_output(output) - - return output - - def _unpack_input(self, x): - if self._input_format == 'x': - data = x - - vertex_mask = K.cast(K.not_equal(data[..., 3:4], 0.0), 'float32') - num_vertex = K.sum(vertex_mask) - - elif self._input_format in ['xn', 'xen']: - if self._input_format == 'xn': - data, num_vertex = x - else: - data_x, data_e, num_vertex = x - data = K.concatenate((data_x, K.reshape(data_e, (-1, data_e.shape[1], 1))), axis=-1) - - data_shape = K.shape(data) - B = data_shape[0] - V = data_shape[1] - vertex_indices = K.tile(K.expand_dims(K.arange(0, V), axis=0), (B, 1)) # (B, [0..V-1]) - vertex_mask = K.expand_dims( - K.cast(K.less(vertex_indices, K.cast(num_vertex, 'int32')), 'float32'), axis=-1 - ) # (B, V, 1) - num_vertex = K.cast(num_vertex, 'float32') - - return data, num_vertex, vertex_mask - - def _garnet(self, data, num_vertex, vertex_mask, in_transform, d_compute, out_transform, act_transform): - features = in_transform(data) # (B, V, F) - distance = d_compute(data) # (B, V, S) - - edge_weights = vertex_mask * K.exp(-K.square(distance)) # (B, V, S) - - if not self._simplified: - features = K.concatenate([vertex_mask * features, edge_weights], axis=-1) - - if self._mean_by_nvert: - - def graph_mean(out, axis): - s = K.sum(out, axis=axis) - # reshape just to enable broadcasting - s = K.reshape(s, (-1, d_compute.units * in_transform.units)) / num_vertex - s = K.reshape(s, (-1, d_compute.units, in_transform.units)) - return s - - else: - graph_mean = K.mean - - # vertices -> aggregators - edge_weights_trans = K.permute_dimensions(edge_weights, (0, 2, 1)) # (B, S, V) - - aggregated_mean = self._apply_edge_weights(features, edge_weights_trans, aggregation=graph_mean) # (B, S, F) - - if self._simplified: - aggregated = aggregated_mean - else: - aggregated_max = self._apply_edge_weights(features, edge_weights_trans, aggregation=K.max) - aggregated = K.concatenate([aggregated_max, aggregated_mean], axis=-1) - - # aggregators -> vertices - updated_features = self._apply_edge_weights(aggregated, edge_weights) # (B, V, S*F) - - if not self._simplified: - updated_features = K.concatenate([data, updated_features, edge_weights], axis=-1) - - return vertex_mask * act_transform(out_transform(updated_features)) - - def _collapse_output(self, output): - if self._collapse == 'mean': - if self._mean_by_nvert: - output = K.sum(output, axis=1) / self._mean_by_nvert # check if correct; from pre-commit - else: - output = K.mean(output, axis=1) - elif self._collapse == 'sum': - output = K.sum(output, axis=1) - elif self._collapse == 'max': - output = K.max(output, axis=1) - - return output - - def compute_output_shape(self, input_shape): - return self._get_output_shape(input_shape, self._output_activation_transform) - - def _get_output_shape(self, input_shape, out_transform): - if self._input_format == 'x': - data_shape = input_shape - elif self._input_format == 'xn': - data_shape, _ = input_shape - elif self._input_format == 'xen': - data_shape, _, _ = input_shape - - if self._collapse is None: - return data_shape[:2] + (out_transform.units,) - else: - return (data_shape[0], out_transform.units) - - def get_config(self): - config = super().get_config() - - config.update( - { - 'simplified': self._simplified, - 'collapse': self._collapse, - 'input_format': self._input_format, - 'output_activation': self._output_activation, - 'quantize_transforms': self._quantize_transforms, - 'mean_by_nvert': self._mean_by_nvert, - } - ) - - self._add_transform_config(config) - - return config - - def _add_transform_config(self, config): - config.update( - { - 'n_aggregators': self._aggregator_distance.units, - 'n_filters': self._output_feature_transform.units, - 'n_propagate': self._input_feature_transform.units, - } - ) - - @staticmethod - def _apply_edge_weights(features, edge_weights, aggregation=None): - features = K.expand_dims(features, axis=1) # (B, 1, v, f) - edge_weights = K.expand_dims(edge_weights, axis=3) # (B, u, v, 1) - - out = edge_weights * features # (B, u, v, f) - - if aggregation: - out = aggregation(out, axis=2) # (B, u, f) - else: - try: - out = K.reshape(out, (-1, edge_weights.shape[1].value, features.shape[-1].value * features.shape[-2].value)) - except AttributeError: # TF 2 - out = K.reshape(out, (-1, edge_weights.shape[1], features.shape[-1] * features.shape[-2])) - - return out - - -class GarNetStack(GarNet): - """ - Stacked version of GarNet. First three arguments to the constructor must be lists of integers. - Basically offers no performance advantage, but the configuration is consolidated (and is useful - when e.g. converting the layer to HLS) - """ - - def _setup_transforms(self, n_aggregators, n_filters, n_propagate): - self._transform_layers = [] - # inputs are lists - for it, (p, a, f) in enumerate(zip(n_propagate, n_aggregators, n_filters)): - if self._quantize_transforms is not None: - input_feature_transform = NamedQDense( - p, - kernel_quantizer="quantized_bits(%i,%i,0,alpha=1)" % (self._total_bits, self._int_bits), - bias_quantizer="quantized_bits(%i,%i,0,alpha=1)" % (self._total_bits, self._int_bits), - name=('FLR%d' % it), - ) - output_feature_transform = NamedQDense( - f, - kernel_quantizer="quantized_bits(%i,%i,0,alpha=1)" % (self._total_bits, self._int_bits), - name=('Fout%d' % it), - ) - - if self._output_activation is None or self._output_activation == "linear": - output_activation_transform = QActivation("quantized_bits(%i, %i)" % (self._total_bits, self._int_bits)) - else: - output_activation_transform = QActivation( - "quantized_%s(%i, %i)" % (self._output_activation, self._total_bits, self._int_bits) - ) - else: - input_feature_transform = NamedDense(p, name=('FLR%d' % it)) - output_feature_transform = NamedDense(f, name=('Fout%d' % it)) - output_activation_transform = keras.layers.Activation(self._output_activation) - - aggregator_distance = NamedDense(a, name=('S%d' % it)) - - self._transform_layers.append( - (input_feature_transform, aggregator_distance, output_feature_transform, output_activation_transform) - ) - - self._sublayers = sum((list(layers) for layers in self._transform_layers), []) - - def _build_transforms(self, data_shape): - for in_transform, d_compute, out_transform, act_transform in self._transform_layers: - in_transform.build(data_shape) - d_compute.build(data_shape) - if self._simplified: - act_transform.build(out_transform.build(data_shape[:2] + (d_compute.units * in_transform.units,))) - else: - act_transform.build( - out_transform.build( - data_shape[:2] + (data_shape[2] + d_compute.units * in_transform.units + d_compute.units,) - ) - ) - - data_shape = data_shape[:2] + (out_transform.units,) - - def call(self, x): - data, num_vertex, vertex_mask = self._unpack_input(x) - - for in_transform, d_compute, out_transform, act_transform in self._transform_layers: - data = self._garnet(data, num_vertex, vertex_mask, in_transform, d_compute, out_transform, act_transform) - output = self._collapse_output(data) - - return output - - def compute_output_shape(self, input_shape): - return self._get_output_shape(input_shape, self._transform_layers[-1][2]) - - def _add_transform_config(self, config): - config.update( - { - 'n_propagate': list(ll[0].units for ll in self._transform_layers), - 'n_aggregators': list(ll[1].units for ll in self._transform_layers), - 'n_filters': list(ll[2].units for ll in self._transform_layers), - 'n_sublayers': len(self._transform_layers), - } - ) diff --git a/hls4ml/contrib/kl_layer/README.md b/hls4ml/contrib/kl_layer/README.md deleted file mode 100644 index 5d306ae..0000000 --- a/hls4ml/contrib/kl_layer/README.md +++ /dev/null @@ -1,18 +0,0 @@ -This folder contains the implementation of custom KL divergence layer. -This is a custom implementation and not a built-in layer in any deep learning framework. -It was developed specifically for [AD@L1 CMS paper](https://www.nature.com/articles/s42256-022-00441-3). - -# Files - -* `kl_layer.py`: contains the standalone implementation of the custom KL divergence layer -* `kl_layer.h`: contains the HLS implementation of KL layer - - -# Usage - -`kl_layer.py` contains the example of how to use the KL layer. -To run do - -``` -python kl_layer.py -``` diff --git a/hls4ml/contrib/kl_layer/kl_layer.h b/hls4ml/contrib/kl_layer/kl_layer.h deleted file mode 100644 index 0435b9a..0000000 --- a/hls4ml/contrib/kl_layer/kl_layer.h +++ /dev/null @@ -1,87 +0,0 @@ -#ifndef KL_LAYER_H_ -#define KL_LAYER_H_ - -#include "nnet_activation.h" -#include "nnet_common.h" -#include -#include - -namespace nnet { - -struct distance_config { - // IO size - static const unsigned n_in = 10; - static const unsigned n_out = 1; - - // Internal data type definitions - typedef float accum_t; - typedef float sum_t; - typedef ap_fixed<18, 8> exp_table_t; - - // Internal info - static const unsigned table_size = 1024; - static constexpr unsigned exp_range = 8; -}; - -template void init_klloss_exp_table(typename CONFIG_T::exp_table_t table_out[N_TABLE]) { - for (int ii = 0; ii < N_TABLE; ii++) { - // First, convert from table index to X-value (range -1 to +1) - float in_val = 2 * CONFIG_T::exp_range * (ii - float(N_TABLE) / 2.0) / float(N_TABLE); - // Next, compute lookup table function - typename CONFIG_T::exp_table_t real_val = exp_fcn_float(in_val); - // std::cout << "Lookup table In Value: " << in_val << " Result: " << real_val << " Index: " << ii << std::endl; - table_out[ii] = real_val; - } -} -template -void klloss(data1_T mean[CONFIG_T::n_in], data2_T log_var[CONFIG_T::n_in], res_T res[CONFIG_T::n_out]) { - #pragma HLS PIPELINE - // Initialize the lookup tables -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::exp_table_t exp_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::exp_table_t exp_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_klloss_exp_table(exp_table); - initialized = true; - } - typename CONFIG_T::accum_t kl[CONFIG_T::n_in]; - #pragma HLS ARRAY_PARTITION variable=kl complete - typename CONFIG_T::accum_t mean_sq[CONFIG_T::n_in]; - #pragma HLS ARRAY_PARTITION variable=mean_sq complete - typename CONFIG_T::accum_t kl_sum(0); - for (unsigned i = 0; i < CONFIG_T::n_in; i++) { - #pragma HLS UNROLL - mean_sq[i] = mean[i] * mean[i]; - kl[i] = data2_T(1.) + log_var[i]; - // std::cout << "Log var: " << log_var[i] << " Result: " << kl[i] << std::endl; - } - constexpr unsigned table_scale = (unsigned)(CONFIG_T::table_size / (2 * CONFIG_T::exp_range)); - constexpr unsigned index_scale = (unsigned)(CONFIG_T::exp_range * table_scale); - for (unsigned i = 0; i < CONFIG_T::n_in; i++) { - #pragma HLS UNROLL - auto data_round = log_var[i] * table_scale; - auto index = data_round + index_scale; - if (index < 0) - index = 0; - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - kl[i] -= exp_table[index]; - // std::cout << "Exp var: " << exp_table[index] << " Result: " << kl[i] << " Index: " << index << std::endl; - } - for (unsigned i = 0; i < CONFIG_T::n_in; i++) { - #pragma HLS UNROLL - kl[i] -= mean_sq[i]; - } - Op_add op_add; - kl_sum = reduce>(kl, op_add); - // std::cout << "KL sum: " << kl_sum << std::endl; - kl_sum *= typename CONFIG_T::accum_t(1. / CONFIG_T::n_in); - res[0] = res_T(-0.5) * kl_sum; -} -} // namespace nnet - -#endif diff --git a/hls4ml/contrib/kl_layer/kl_layer.py b/hls4ml/contrib/kl_layer/kl_layer.py deleted file mode 100644 index 52990fa..0000000 --- a/hls4ml/contrib/kl_layer/kl_layer.py +++ /dev/null @@ -1,184 +0,0 @@ -""" - Usage example for a custom KL loss layer - Takes as an input two arrays: z_mean and z_log_var - and computes KL "distance" between normal distribution - and Gaussian with mu=z_mean and sigma=z_log_var - - The HLS part is in contrib/kl_layer/kl_layer.h -""" -from pathlib import Path - -import numpy as np -import tensorflow as tf - -try: - from keras.layers.merge import _Merge as Merge -except Exception: - from keras.layers.merging.base_merge import _Merge as Merge - -from tensorflow.python.keras.utils import tf_utils -from tensorflow.python.ops import math_ops - -import hls4ml -from hls4ml.converters.keras_to_hls import parse_default_keras_layer -from hls4ml.model.attributes import ConfigurableAttribute, TypeAttribute -from hls4ml.model.types import FixedPrecisionType, RoundingMode, SaturationMode - - -# Keras implementation of a KL layer -class KLLoss(Merge): - '''Keras implementation of a KL loss custom layer''' - - @tf_utils.shape_type_conversion - def build(self, input_shape): - super().build(input_shape) - - def _merge_function(self, inputs): - mean = inputs[0] - log_var = inputs[1] - - kl = 1.0 + log_var - math_ops.square(mean) - math_ops.exp(log_var) - kl = -0.5 * math_ops.reduce_mean(kl, axis=-1, keepdims=True) - - return kl - - -# hls4ml implementations -class HKLLoss(hls4ml.model.layers.Layer): - '''hls4ml implementation of a KL loss custom layer''' - - _expected_attributes = [ - ConfigurableAttribute('table_size', default=1024), - ConfigurableAttribute('exp_range', default=8), - TypeAttribute('accum'), - TypeAttribute( - 'sum', - default=FixedPrecisionType(18, 8, rounding_mode=RoundingMode.RND, saturation_mode=SaturationMode.SAT), - ), - TypeAttribute( - 'exp_table', - default=FixedPrecisionType(18, 8, rounding_mode=RoundingMode.RND, saturation_mode=SaturationMode.SAT), - ), - ] - - def initialize(self): - self.add_output_variable(shape=[1], dim_names=[f'KL_LOSS_{self.index}']) - - -# Templates -distance_config_template = """struct config{index} : nnet::distance_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_out = 1; - typedef {accum_t.name} accum_t; - typedef {sum_t.name} sum_t; - typedef {exp_table_t.name} exp_table_t; - static const unsigned table_size = {table_size}; - static constexpr float exp_range = {exp_range}; -}};\n""" -distance_function_template = 'nnet::klloss<{input1_t}, {input2_t}, {output_t}, {config}>({input1}, {input2}, {output});' -distance_include_list = ['nnet_utils/kl_layer.h'] - - -class HKLLossConfigTemplate(hls4ml.backends.template.LayerConfigTemplate): - def __init__(self): - super().__init__(HKLLoss) - self.template = distance_config_template - - def format(self, node): - params = self._default_config_params(node) - params['n_in'] = node.get_input_variable(node.inputs[0]).shape[0] - params['n_out'] = 1 - return self.template.format(**params) - - -class HKLLossFunctionTemplate(hls4ml.backends.template.FunctionCallTemplate): - def __init__(self): - super().__init__(HKLLoss, include_header=distance_include_list) - self.template = distance_function_template - - def format(self, node): - params = {} - params['config'] = f'config{node.index}' - params['input1_t'] = node.get_input_variable(node.inputs[0]).type.name - params['input2_t'] = node.get_input_variable(node.inputs[1]).type.name - params['output_t'] = node.get_output_variable().type.name - params['input1'] = node.get_input_variable(node.inputs[0]).name - params['input2'] = node.get_input_variable(node.inputs[1]).name - params['output'] = node.get_output_variable().name - - return self.template.format(**params) - - -# Parser for converter -def parse_klloss_layer(keras_layer, input_names, input_shapes, data_reader): - assert 'KLLoss' in keras_layer['class_name'] - - layer = parse_default_keras_layer(keras_layer, input_names) - - output_shape = [input_shapes[0][0], 1] - - return layer, output_shape - - -def main(): - # Register the converter for custom Keras layer - hls4ml.converters.register_keras_layer_handler('KLLoss', parse_klloss_layer) - - # Register the hls4ml's IR layer - hls4ml.model.layers.register_layer('KLLoss', HKLLoss) - - # Register the optimization passes (if any) - backend = hls4ml.backends.get_backend('Vivado') - - # Register template passes for the given backend - backend.register_template(HKLLossConfigTemplate) - backend.register_template(HKLLossFunctionTemplate) - - # Register HLS implementation - p = Path(__file__).parent / 'kl_layer.h' - backend.register_source(p) - - # Test if it works - # Create a dummy Keras model with KL loss layer - inp = tf.keras.layers.Input(shape=(19, 3, 1)) - z_mean = tf.keras.layers.Dense(10)(inp) - z_log_var = tf.keras.layers.Dense(10)(inp) - custom_output = KLLoss()([z_mean, z_log_var]) - # create new model - kmodel = tf.keras.models.Model(inputs=inp, outputs=custom_output) - kmodel.summary() - - # test on random inputs - x = np.random.randint(-5, 5, (1, 19, 3, 1), dtype='int32') - kres = kmodel(x) - - # Create dummy config - config = {} - config['Model'] = { - 'Precision': 'ap_fixed<16,6>', - 'ReuseFactor': 1, - 'ParallelizationFactor': 1, - 'Strategy': 'Resource', - } - hmodel = hls4ml.converters.convert_from_keras_model( - kmodel, - output_dir='hls4mlprj_kl_layer', - backend='Vivado', - io_type='io_parallel', - part='xcvu9p-flga2577-2-e', - hls_config=config, - ) - - hmodel.compile() - hres = hmodel.predict(x.astype('float32')) - - print('Compare prediction by hls4ml model to Keras one') - print(kres - hres) - - print('Building model') - report = hmodel.build(reset=True, csim=False, cosim=True, synth=True, vsynth=True) - print(report) - - -if __name__ == '__main__': - main() diff --git a/hls4ml/docs/Makefile b/hls4ml/docs/Makefile deleted file mode 100644 index 3f73575..0000000 --- a/hls4ml/docs/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line, and also -# from the environment for the first two. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @sphinx-apidoc -f -T -o autodoc/ ../hls4ml - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/hls4ml/docs/advanced/accelerator.rst b/hls4ml/docs/advanced/accelerator.rst deleted file mode 100644 index 7a79d9d..0000000 --- a/hls4ml/docs/advanced/accelerator.rst +++ /dev/null @@ -1,77 +0,0 @@ -========================= -VivadoAccelerator Backend -========================= - -The ``VivadoAccelerator`` backend of ``hls4ml`` leverages the `PYNQ `_ software stack to easily deploy models on supported devices. -Currently ``hls4ml`` supports the following boards: - -* `pynq-z2 `_ (part: ``xc7z020clg400-1``) -* `zcu102 `_ (part: ``xczu9eg-ffvb1156-2-e``) -* `alveo-u50 `_ (part: ``xcu50-fsvh2104-2-e``) -* `alveo-u250 `_ (part: ``xcu250-figd2104-2L-e``) -* `alveo-u200 `_ (part: ``xcu200-fsgd2104-2-e``) -* `alveo-u280 `_ (part: ``xcu280-fsvh2892-2L-e``) - -but, in principle, support can be extended to `any board supported by PYNQ `_. -For the Zynq-based boards, there are two components: an ARM-based processing system (PS) and FPGA-based programmable logic (PL), with various intefaces between the two. - -.. image:: ../img/zynq_interfaces.png - :height: 300px - :align: center - :alt: Zynq PL/PS interfaces - -Neural Network Overlay -====================== - -In the PYNQ project, programmable logic circuits are presented as hardware libraries called *overlays*. -The overlay can be accessed through a Python API. -In ``hls4ml``, we create a custom **neural network overlay**, which sends and receives data via AXI stream. -The target device is programmed using a bitfile that is generated by the ``VivadoAccelerator`` backend. - -.. image:: ../img/pynqframe.png - :width: 600px - :align: center - :alt: PYNQ software stack - -Example -======= - -This example is taken from `part 7 of the hls4ml tutorial `_. -Specifically, we'll deploy a model on a ``pynq-z2`` board. - -First, we generate the bitfile from a Keras model ``model`` and a config. - -.. code-block:: Python - - import hls4ml - config = hls4ml.utils.config_from_keras_model(model, granularity='name') - hls_model = hls4ml.converters.convert_from_keras_model(model, - hls_config=config, - output_dir='hls4ml_prj_pynq', - backend='VivadoAccelerator', - board='pynq-z2') - hls4ml.build(bitfile=True) - - -After this command completes, we will need to package up the bitfile, hardware handoff, and Python driver to copy to the PS of the board. - -.. code-block:: bash - - mkdir -p package - cp hls4ml_prj_pynq/myproject_vivado_accelerator/project_1.runs/impl_1/design_1_wrapper.bit package/hls4ml_nn.bit - cp hls4ml_prj_pynq/myproject_vivado_accelerator/project_1.srcs/sources_1/bd/design_1/hw_handoff/design_1.hwh package/hls4ml_nn.hwh - cp hls4ml_prj_pynq/axi_stream_driver.py package/ - tar -czvf package.tar.gz -C package/ . - -Then we can copy this package to the PS of the board and untar it. - -Finally, on the PS in Python we can create a ``NeuralNetworkOverlay`` object, which will download the bitfile onto the PL of the board. -We also must provide the shapes of our input and output data, ``X_test.shape`` and ``y_test.shape``, respectively, to allocate the buffers for the data transfer. -The ``predict`` method will send the input data to the PL and return the output data ``y_hw``. - -.. code-block:: Python - - from axi_stream_driver import NeuralNetworkOverlay - - nn = NeuralNetworkOverlay('hls4ml_nn.bit', X_test.shape, y_test.shape) - y_hw, latency, throughput = nn.predict(X_test, profile=True) diff --git a/hls4ml/docs/advanced/extension.rst b/hls4ml/docs/advanced/extension.rst deleted file mode 100644 index ad86051..0000000 --- a/hls4ml/docs/advanced/extension.rst +++ /dev/null @@ -1,185 +0,0 @@ -======================== -Extension API -======================== - -``hls4ml`` natively supports a large number of neural network layers. -But what if a desired layer is not supported? -If it is standard enough and its implementation would benefit the community as a whole, we would welcome a contribution to add it to the standard set of supported layers. -However, if it is a somewhat niche custom layer, there is another approach we can take to extend hls4ml through the *extension API*. - -This documentation will walk through a complete `complete end-to-end example `_, which is part of our testing suite. -To implement a custom layer in ``hls4ml`` with the extension API, the required components are: - -* Your custom layer class -* Equivalent hls4ml custom layer class -* Parser for the converter -* HLS implementation -* Layer config template -* Function config template -* Registration of layer, source code, and templates - -Complete example -================ - -For concreteness, let's say our custom layer ``KReverse`` is implemented in Keras and reverses the order of the last dimension of the input. - -.. code-block:: Python - - # Keras implementation of a custom layer - class KReverse(tf.keras.layers.Layer): - '''Keras implementation of a hypothetical custom layer''' - - def __init__(self): - super().__init__() - - def call(self, inputs): - return tf.reverse(inputs, axis=[-1]) - -We can define the equivalent layer in hls4ml ``HReverse``, which inherits from ``hls4ml.model.layers.Layer``. - -.. code-block:: Python - - # hls4ml layer implementation - class HReverse(hls4ml.model.layers.Layer): - '''hls4ml implementation of a hypothetical custom layer''' - - def initialize(self): - inp = self.get_input_variable() - shape = inp.shape - dims = inp.dim_names - self.add_output_variable(shape, dims) - -A parser for the Keras to HLS converter is also required. -This parser reads the attributes of the Keras layer instance and populates a dictionary of attributes for the hls4ml layer. -It also returns a list of output shapes (one sjape for each output). -In this case, there a single output with the same shape as the input. - -.. code-block:: Python - - # Parser for converter - def parse_reverse_layer(keras_layer, input_names, input_shapes, data_reader): - layer = {} - layer['class_name'] = 'HReverse' - layer['name'] = keras_layer['config']['name'] - layer['n_in'] = input_shapes[0][1] - - if input_names is not None: - layer['inputs'] = input_names - - return layer, [shape for shape in input_shapes[0]] - -Next, we need the actual HLS implementaton of the function, which can be written in a header file ``nnet_reverse.h``. - -.. code-block:: C++ - - #ifndef NNET_REVERSE_H_ - #define NNET_REVERSE_H_ - - #include "nnet_common.h" - - namespace nnet { - - struct reverse_config { - static const unsigned n_in = 10; - }; - - template - void reverse( - data_T input[CONFIG_T::n_in], - data_T reversed[CONFIG_T::n_in] - ) { - for (int i = 0; i < CONFIG_T::n_in; i++) { - reversed[CONFIG_T::n_in - 1 - i] = input[i]; - } - } - - } - - #endif - -Now, we can define the layer config and function call templates. -These two templates determine how to populate the config template based on the layer attributes and the function call signature for the layer in HLS, respectively. - -.. code-block:: Python - - rev_config_template = """struct config{index} : nnet::reverse_config {{ - static const unsigned n_in = {n_in}; - }};\n""" - - rev_function_template = 'nnet::reverse<{input_t}, {config}>({input}, {output});' - rev_include_list = ['nnet_utils/nnet_reverse.h'] - - - class HReverseConfigTemplate(hls4ml.backends.template.LayerConfigTemplate): - def __init__(self): - super().__init__(HReverse) - self.template = rev_config_template - - def format(self, node): - params = self._default_config_params(node) - return self.template.format(**params) - - - class HReverseFunctionTemplate(hls4ml.backends.template.FunctionCallTemplate): - def __init__(self): - super().__init__(HReverse, include_header=rev_include_list) - self.template = rev_function_template - - def format(self, node): - params = self._default_function_params(node) - return self.template.format(**params) - -Now, we need to tell hls4ml about the existence of this new layer by registering it. -We also need to register the parser (a.k.a. the layer handler), the template passes, and HLS implementation source code with the particular backend. -In this case, the HLS code is valid for both the Vivado and Quartus backends. - -.. code-block:: Python - - # Register the converter for custom Keras layer - hls4ml.converters.register_keras_layer_handler('KReverse', parse_reverse_layer) - - # Register the hls4ml's IR layer - hls4ml.model.layers.register_layer('HReverse', HReverse) - - for backend_id in ['Vivado', 'Quartus']: - # Register the optimization passes (if any) - backend = hls4ml.backends.get_backend(backend_id) - backend.register_pass('remove_duplicate_reverse', RemoveDuplicateReverse, flow=f'{backend_id.lower()}:optimize') - - # Register template passes for the given backend - backend.register_template(HReverseConfigTemplate) - backend.register_template(HReverseFunctionTemplate) - - # Register HLS implementation - backend.register_source('nnet_reverse.h') - -Finally, we can actually test the ``hls4ml`` custom layer compared to the Keras one. - -.. code-block:: Python - - # Test if it works - kmodel = tf.keras.models.Sequential( - [ - tf.keras.layers.Input(shape=(8,)), - KReverse(), - tf.keras.layers.ReLU(), - ] - ) - - x = np.random.randint(-5, 5, (8,), dtype='int32') - kres = kmodel(x) - - for backend_id in ['Vivado', 'Quartus']: - - hmodel = hls4ml.converters.convert_from_keras_model( - kmodel, - output_dir=str(f'hls4mlprj_extensions_{backend_id}'), - backend=backend_id, - io_type='io_parallel', - hls_config={'Model': {'Precision': 'ap_int<6>', 'ReuseFactor': 1}}, - ) - - hmodel.compile() - hres = hmodel.predict(x.astype('float32')) - - np.testing.assert_array_equal(kres, hres) diff --git a/hls4ml/docs/advanced/fifo_depth.rst b/hls4ml/docs/advanced/fifo_depth.rst deleted file mode 100644 index 44f87b0..0000000 --- a/hls4ml/docs/advanced/fifo_depth.rst +++ /dev/null @@ -1,49 +0,0 @@ -============================== -FIFO Buffer Depth Optimization -============================== - -With the ``io_stream`` IO type, each layer is connected with the subsequent layer through first-in first-out (FIFO) buffers. -The implementation of the FIFO buffers contribute to the overall resource utilization of the design, impacting in particular the BRAM or LUT utilization. -Because the neural networks can have complex architectures generally, it is hard to know a priori the correct depth of each FIFO buffer. -By default ``hls4ml`` choses the most conservative possible depth for each FIFO buffer, which can result in a an unnecessary overutilization of resources. - -In order to reduce the impact on the resources used for FIFO buffer implementation, an optimization has been developed in `#509 `_ that correctly sizes the depth of the FIFO buffers by analyzing the RTL cosimulation. -We implemented this FIFO buffer resizing as a :py:class:`~hls4ml.backends.vivado.passes.fifo_depth_optimization` optimizer pass. -Through RTL simulation with large FIFO buffers (by default set to a depth of 100,000), we estimate the maximum occupation of each FIFO. -Once the maximum depth is determined, the optimizer pass sets the FIFO buffer depth to that value plus 1. - -As an example, we show below how to use the optimizer pass, inspired by this `GitHub Gist `_. -First, we can define a simple neural network in Keras - -.. code-block:: Python - - from tensorflow.keras.layers import Dense - from tensorflow.keras.models import Sequential - - model = Sequential() - model.add(Dense(64, input_shape=(16,), name='fc1', activation='relu') - model.add(Dense(32, name='fc2', activation='relu')) - model.add(Dense(32, name='fc3', activation='relu')) - model.add(Dense(5, name='fc3', activation='softmax')) - -Then, we can convert the model, including the flow - -.. code-block:: Python - - import hls4ml - - config = hls4ml.utils.config_from_keras_model(model, granularity='model') - config['Flows'] = ['vivado:fifo_depth_optimization'] - hls4ml.model.optimizer.get_optimizer('vivado:fifo_depth_optimization').configure(profiling_fifo_depth=100_000) - - - hls_model = hls4ml.converters.convert_from_keras_model(model, - io_type='io_stream', - hls_config=config, - output_dir='hls4mlprj_fifo_depth_opt', - part='xc7z020clg400-1', - backend='Vivado') - - hls_model.build(reset=False, csim=True, synth=True, cosim=True) - -For more details and results, see `H. Borras et al., "Open-source FPGA-ML codesign for the MLPerf Tiny Benchmark" (2022) `_. diff --git a/hls4ml/docs/api/configuration.rst b/hls4ml/docs/api/configuration.rst deleted file mode 100644 index 1a275da..0000000 --- a/hls4ml/docs/api/configuration.rst +++ /dev/null @@ -1,225 +0,0 @@ -============= -Configuration -============= - - - -We currently support two ways of setting hls4ml's model configuration. This page documents both methods' usage. - - -.. contents:: \ - - -**NOTE:** - - -* - One important part of ``hls4ml`` to remember is that the user is responsible for the format of the inputs. There is no automatic formatting or normalization so this must be done in the training. - -* - For developers, you might also want to checkout this section: `Detailed configuration in converted hls codes <#detailed-configuration-in-converted-hls-codes>`_. - ----- - -1. Python API -============= - -Using hls4ml, you can quickly generate a simple configuration dictionary from a keras model: - -.. code-block:: python - - import hls4ml - config = hls4ml.utils.config_from_keras_model(model, granularity='model') - -For more advanced and detailed configuration, you can also set them through the created dictionary. For example, to change the reuse factor: - -.. code-block:: python - - config['Model']['ReuseFactor'] = 2 - -Or to set the precision of a specific layer's weight: - -.. code-block:: python - - config['LayerName']['fc1']['Precision']['weight'] = 'ap_fixed<8,4>' - -To better understand how the configuration hierachy works, refer to the next section for more details. - ----- - -2. YAML Configuration file -========================== - -2.1 Top Level Configuration ---------------------------- - -Configuration files are YAML files in hls4ml (\ ``*.yml``\ ). An example configuration file is `here `__. - -It looks like this: - -.. code-block:: yaml - - # Project section - OutputDir: my-hls-test - ProjectName: myproject - - # Model section (Keras model) - KerasJson: keras/KERAS_3layer.json - KerasH5: keras/KERAS_3layer_weights.h5 #You can also use h5 file from Keras's model.save() without supplying json file. - InputData: keras/KERAS_3layer_input_features.dat - OutputPredictions: keras/KERAS_3layer_predictions.dat - - # Backend section (Vivado backend) - Part: xcku115-flvb2104-2-i - ClockPeriod: 5 - IOType: io_parallel # options: io_parallel/io_stream - - HLSConfig: - Model: - Precision: ap_fixed<16,6> - ReuseFactor: 1 - Strategy: Latency - LayerType: - Dense: - ReuseFactor: 2 - Strategy: Resource - Compression: True - -There are a number of configuration options that you have. Let's go through them. You have basic setup parameters: - - -* **OutputDir**\ : the output directory where you want your HLS project to appear -* **ProjectName**\ : the name of the HLS project IP that is produced -* **KerasJson/KerasH5**\ : for Keras, the model architecture and weights are stored in a ``json`` and ``h5`` file. The path to those files are required here. - We also support keras model's file obtained just from ``model.save()``. In this case you can just supply the ``h5`` file in ``KerasH5:`` field. -* **InputData/OutputPredictions**\ : path to your input/predictions of the model. If none is supplied, then hls4ml will create aritificial data for simulation. The data used above in the example can be found `here `__. We also support ``npy`` data files. We welcome suggestions on more input data types to support. - -The backend-specific section of the configuration depends on the backend. You can get a starting point for the necessary settings using, for example `hls4ml.templates.get_backend('Vivado').create_initial_config()`. -For Vivado backend the options are: - -* **Part**\ : the particular FPGA part number that you are considering, here it's a Xilinx Virtex-7 FPGA -* **ClockPeriod**\ : the clock period, in ns, at which your algorithm runs - Then you have some optimization parameters for how your algorithm runs: -* **IOType**\ : your options are ``io_parallel`` or ``io_stream`` which defines the type of data structure used for inputs, intermediate activations between layers, and outputs. For ``io_parallel``, arrays are used that, in principle, can be fully unrolled and are typically implemented in RAMs. For ``io_stream``, HLS streams are used, which are a more efficient/scalable mechanism to represent data that are produced and consumed in a sequential manner. Typically, HLS streams are implemented with FIFOs instead of RAMs. For more information see `here `__. -* **HLSConfig**\: the detailed configuration of precision and parallelism, including: - * **ReuseFactor**\ : in the case that you are pipelining, this defines the pipeline interval or initiation interval - * **Strategy**\ : Optimization strategy on FPGA, either "Latency" or "Resource". If none is supplied then hl4ml uses "Latency" as default. Note that a reuse factor larger than 1 should be specified when using "resource" strategy. An example of using larger reuse factor can be found `here. `__ - * **Precision**\ : this defines the precsion of your inputs, outputs, weights and biases. It is denoted by ``ap_fixed``\ , where ``Y`` is the number of bits representing the signed number above the binary point (i.e. the integer part), and ``X`` is the total number of bits. - Additionally, integers in fixed precision data type (\ ``ap_int``\ , where ``N`` is a bit-size from 1 to 1024) can also be used. You have a chance to further configure this more finely with per-layer configuration described below. - -2.2 Per-Layer Configuration ---------------------------- - -In the ``hls4ml`` configuration file, it is possible to specify the model *Precision* and *ReuseFactor* with finer granularity. - -Under the ``HLSConfig`` heading, these can be set for the ``Model``\ , per ``LayerType``\ , per ``LayerName``\ , and for named variables within the layer (for precision only). The most basic configuration may look like this: - -.. code-block:: yaml - - HLSConfig: - Model: - Precision: ap_fixed<16,6> - ReuseFactor: 1 - -This configuration use ``ap_fixed<16,6>`` for every variable and a ReuseFactor of 1 throughout. - -Specify all ``Dense`` layers to use a different precision like this: - -.. code-block:: yaml - - HLSConfig: - Model: - Precision: ap_fixed<16,6> - ReuseFactor: 1 - LayerType: - Dense: - Precision: ap_fixed<14,5> - -In this case, all variables in any ``Dense`` layers will be represented with ``ap_fixed<14,5>`` while any other layer types will use ``ap_fixed<16,6>``. - -A specific layer can be targeted like this: - -.. code-block:: yaml - - HLSConfig: - Model: - Precision: ap_fixed<16,6> - ReuseFactor: 16 - LayerName: - dense1: - Precision: - weight: ap_fixed<14,2> - bias: ap_fixed<14,4> - result: ap_fixed<16,6> - ReuseFactor: 12 - Strategy: Resource - -In this case, the default model configuration will use ``ap_fixed<16,6>`` and a ``ReuseFactor`` of 16. The layer named ``dense1`` (defined in the user provided model architecture file) will instead use different precision for the ``weight``\ , ``bias``\ , and ``result`` (output) variables, a ``ReuseFactor`` of 12, and the ``Resource`` strategy (while the model default is ``Latency`` strategy. - -More than one layer can have a configuration specified, e.g.: - -.. code-block:: yaml - - HLSConfig: - Model: - ... - LayerName: - dense1: - ... - batchnormalization1: - ... - dense2: - ... - -For more information on the optimization parameters and what they mean, you can visit the :doc:`Concepts <../concepts>` chapter. - ----- - -Detailed Configuration in Converted HLS Code -============================================ - -**NOTE**\ : this section is developer-oriented. - -After you create your project, you have the opportunity to do more configuration if you so choose. - -In your project, the file ``/firmware/.cpp`` is your top level file. It has the network architecture constructed for you. An example is `here `__ and the important snippet is: - -.. code-block:: cpp - - layer2_t layer2_out[N_LAYER_2]; - #pragma HLS ARRAY_PARTITION variable=layer2_out complete dim=0 - nnet::dense_latency(input_1, layer2_out, w2, b2); - - layer3_t layer3_out[N_LAYER_2]; - #pragma HLS ARRAY_PARTITION variable=layer3_out complete dim=0 - nnet::relu(layer2_out, layer3_out); - - layer4_t layer4_out[N_LAYER_4]; - #pragma HLS ARRAY_PARTITION variable=layer4_out complete dim=0 - nnet::dense_latency(layer3_out, layer4_out, w4, b4); - - nnet::sigmoid(layer4_out, layer5_out); - -You can see, for the simple 1-layer DNN, the computation (\ ``nnet::dense_latency``\ ) and activation (\ ``nnet::relu``\ /\ ``nnet::sigmoid``\ ) caluclation for each layer. For each layer, it has its own additional configuration parameters, e.g. ``config2``. - -In your project, the file ``/firmware/parameters.h`` stores all the configuration options for each neural network library. -An example is `here `__. So for example, the detailed configuration options for an example DNN layer is: - -.. code-block:: cpp - - //hls-fpga-machine-learning insert layer-config - struct config2 : nnet::dense_config { - static const unsigned n_in = N_INPUT_1_1; - static const unsigned n_out = N_LAYER_2; - static const unsigned io_type = nnet::io_parallel; - static const unsigned reuse_factor = 1; - static const unsigned n_zeros = 0; - static const unsigned n_nonzeros = 320; - static const bool store_weights_in_bram = false; - typedef ap_fixed<16,6> accum_t; - typedef model_default_t bias_t; - typedef model_default_t weight_t; - typedef ap_uint<1> index_t; - }; - -It is at this stage that a user can even further configure their network HLS implementation in finer detail. diff --git a/hls4ml/docs/api/hls-model.rst b/hls4ml/docs/api/hls-model.rst deleted file mode 100644 index bf0d8ee..0000000 --- a/hls4ml/docs/api/hls-model.rst +++ /dev/null @@ -1,102 +0,0 @@ -================ -HLS Model Class -================ - -This page documents our hls_model class usage. You can generate generate an hls model object from a keras model through ``hls4ml``'s API: - -.. code-block:: python - - import hls4ml - - # Generate a simple configuration from keras model - config = hls4ml.utils.config_from_keras_model(keras_model, granularity='name') - - # Convert to an hls model - hls_model = hls4ml.converters.convert_from_keras_model(keras_model, hls_config=config, output_dir='test_prj') - -After that, you can use several methods in that object. Here is a list of all the methods: - - -* :ref:`write ` -* :ref:`compile ` -* :ref:`predict ` -* :ref:`build ` -* :ref:`trace ` - -Similar functionalities are also supported through command line interface. If you prefer using them, please refer to Command Help section. - ----- - -.. _write-method: - -``write`` method -==================== - -Write your keras model as a hls project to ``hls_model``\ 's ``output_dir``\ : - -.. code-block:: python - - hls_model.write() - ----- - -.. _compile-method: - -``compile`` method -====================== - -Compile your hls project. - -.. code-block:: python - - hls_model.compile() - ----- - -.. _predict-method: - -``predict`` method -====================== - -Similar to ``keras``\ 's predict API, you can get the predictions of ``hls_model`` just by supplying an input ``numpy`` array: - -.. code-block:: python - - # Suppose that you already have input array X - # Note that you have to do hls_model.compile() before using predict - - y = hls_model.predict(X) - -This is similar to doing ``csim`` simulation, but you can get your prediction results much faster. It's very helpful when you want to quickly prototype different configurations for your model. - ----- - -.. _build-method: - -``build`` method -==================== - -.. code-block:: python - - hls_model.build() - - #You can also read the report of the build - hls4ml.report.read_vivado_report('hls4ml_prj') - ----- - -.. _trace-method: - -``trace`` method -==================== - -The trace method is an advanced version of the ``predict`` method. It's used to trace individual outputs from each layer of the hls_model. This is useful for debugging and setting the appropriate configuration. - -**Return:** A dictionary where the keys are the names of the layers, and its values are the layers's outputs. - -.. code-block:: python - - predict_ouputs, trace_outputs = hls_model.trace(X) - - #We also support a similar function for keras - keras_trace = hls4ml.model.profiling.get_ymodel_keras(keras_model, X) diff --git a/hls4ml/docs/api/profiling.rst b/hls4ml/docs/api/profiling.rst deleted file mode 100644 index e403717..0000000 --- a/hls4ml/docs/api/profiling.rst +++ /dev/null @@ -1,71 +0,0 @@ -========= -Profiling -========= - -In the ``hls4ml`` configuration file, it is possible to specify the model ``Precision`` and ``ReuseFactor`` with fine granularity. - -Using a low precision can help reduce the FPGA resource usage of a model, but may result in loss of model performance if chosen inappropriately. The profiling tools in ``hls4ml`` help you to decide the appropriate model precision. - -Profiling uses some extra dependencies, to install these, run ``pip install hls4ml[profiling]``. The profiling tools are provided as a ``Python`` module which you can use. - -Three types of objects can be provided: **a model object**\ , **test data**\ , and a **ModelGraph object**. The model can be Keras or PyTorch. -You will need to initialise these objects by using a trained model, loading a model from a file, and loading your data. The Keras model and data each need to be in the format that would normally allow you to run, e.g. ``model.predict(X)``. - -.. code-block:: python - - from hls4ml.model.profiling import numerical - from hls4ml.converters import keras_to_hls - import matplotlib.pyplot as plt - import yaml - - # pseudo code: - model = load_model() - X = load_data() - - # real code: - # load your hls4ml .yml config file with yaml - with open("keras-config.yml", 'r') as ymlfile: - config = yaml.load(ymlfile) - - hls_model = keras_to_hls(config) - - # produce 4 plots - plots = numerical(model=model, hls_model = hls_model, X=X) - plt.show() - -Calling the ``hls4ml.model.profiling.numerical`` method with these three objects provided will produce four figures as below: - -.. image:: ../img/weights_keras.png - :width: 45% -.. image:: ../img/weights_hls4ml.png - :width: 45% -.. image:: ../img/act_keras.png - :width: 45% -.. image:: ../img/act_hls4ml.png - :width: 45% - -Plots are title "before optimization" and "final / after optimization". -The "before optimization" plots show the distributions of the original Keras or PyTorch model, while the "after optimization" plots show the distributions of the ModelGraph. -In the example images, notice the "bn1", "bn2", "bn3" labels in the "before optimization" plots which are missing from the "after optimization". -These layer are BatchNormalization layers, which hls4ml has fused into the preceding Dense layers (labelled "fc{1,2,3}"). -Because of this optimization, the weights of "fc1" of the ModelGraph are actually the product of the weights of the Keras model "fc1" with "bn1". -Similarly, the output of "fc1" of the ModelGraph should correspond to the output of the Keras model "bn1". -When optimizing precision, the data types should be chosen to work well for the "after optimization" model. - -Different plots styles are available with the ``plot`` keyword argument. Valid options are ``boxplot`` (default), ``histogram``\ , ``violinplot``. In the default boxplot style, each variable in the neural network is evaluated using the given test data and the distribution of (non-zero) values is shown with a box and whisker diagram. - -When different combinations of the input objects are given, different plots will be produced: - -1) Only Keras or PyTorch model: only the weights profile plot will be produced, the activation profile will be ``None``. No grey boxes representing the data types will be shown. - -2) Only ModelGraph (or ModelGraph and Keras or PyTorch model): two weights profile plots will be produced, with grey boxes indicating the data types from the ModelGraph. The first plot is the "before optimization" model, while the second plot is the "after optimization" model. - -3) Keras or PyTorch model and data (\ ``X``\ ): both the weights profile and activation profile will be produced. No grey boxes representing the data types will be shown. - -4) Keras or PyTorch model, ModelGraph, and data: both weights and activation profiles are produced, with grey boxes indicating the data types from the ModelGraph. - -Each box shows the median and quartiles of the distribution. The grey shaded boxes show the range which can be represented with the ``hls4ml`` config file used. - -As a starting point, a good configuration would at least cover the box and whisker for each variable with the grey box. Make sure the box and whisker is contained to the right by using sufficient integer bits to avoid overflow. It might be that more precision is needed (grey boxes extend further to the left) to achieve satisfactory performance. In some cases, it is safe to barely cover the values and still achieve good accuracy. - -To establish whether the configuration gives good performance, run C Simulation with test data and compare the results to your model evaluated on the CPU with floating point. diff --git a/hls4ml/docs/command.rst b/hls4ml/docs/command.rst deleted file mode 100644 index cb9d346..0000000 --- a/hls4ml/docs/command.rst +++ /dev/null @@ -1,133 +0,0 @@ -=================================== -Command Line Interface (deprecated) -=================================== - -The command line interface to ``hls4ml`` has been deprecated. Users are advised to use the python API. This page -documents all the commands that ``hls4ml`` supports as a reference for those that have not migrated. - ----- - -Overview -========= - -To start you can just type in ``hls4ml -h`` or ``hls4ml --help`` in your command line, a message will show up like below: - -.. code-block:: - - usage: hls4ml [-h] [--version] {config,convert,build,report} ... - - HLS4ML - Machine learning inference in FPGAs - - positional arguments: - {config,convert,build,report} - config Create a conversion configuration file - convert Convert Keras or ONNX model to HLS - build Build generated HLS project - report Show synthesis report of an HLS project - - optional arguments: - -h, --help show this help message and exit - --version show program's version number and exit - -To get help about any particular ``command``\ , you can just do: - -.. code-block:: - - hls4ml command -h - -For example, to get help about the ``config`` command, you can just type the followings: - -.. code-block:: - - hls4ml config -h - ----- - -hls4ml config -============== - -.. code-block:: - - hls4ml config [-h] [-m MODEL] [-w WEIGHTS] [-o OUTPUT] - -This creates a conversion configuration file. Visit Configuration section of the :doc:`Setup ` page for more details on how to write a configuration file. - -**Arguments** - - -* ``-h, --help``\ : show help message and exit -* ``-m MODEL``\ , or ``--model MODEL``\ : model file to convert (we currently support Keras's ``.h5`` or ``.json`` file, ONNX's ``.onnx``\ , Tensorflow's ``pb``\ , Pytorch's ``pt``\ ) -* ``-w WEIGHT``\ , or ``--weights WEIGHTS``\ : optional weights file (if Keras's ``.json`` file is provided)) -* ``-o OUTPUT``\ , or ``--output OUTPUT``\ : output file name - ----- - -hls4ml convert -================ - -.. code-block:: - - hls4ml convert [-h] [-c CONFIG] - -Suppose you have a configuration file called ``keras-config.yml``. You can use this command with the configuration file like the following: - -.. code-block:: - - hls4ml convert -c keras-config.yml - -**Arguments** - - -* ``-h, --help``\ : show help message and exit -* ``-c CONFIG``\ , or ``--config CONFIG``\ : configuration file - ----- - -hls4ml build -============== - -.. code-block:: - - hls4ml build [-h] [-p PROJECT] [-c] [-s] [-r] [-v] [-e] [-l] [-a] [--reset] - -Build your HLS project. Suppose that you have a project directory called ``my-hls-test``\ , you can often do the following to build the whole project with all the steps described in the arguments section: - -.. code-block:: - - hls4ml build -p my-hls-test -a - -**Arguments** - - -* ``-h, --help``\ : show help message and exit. -* ``-p PROJECT``\ , or ``--project PROJECT``\ : project directory. -* ``-c, --csimulation``\ : run C simulation. -* ``-s, --synthesis``\ : run C/RTL synthesis -* ``-r, --co-simulation``\ : run C/RTL co-simulation. -* ``-v, --validation``\ : run C/RTL validation. -* ``-e, --export``\ : export IP (implies -s) -* ``-l, --vivado_synthesis``\ : run Vivado synthesis (implies -s). -* ``-a, --all``\ : run C simulation, C/RTL synthesis, C/RTL co-simulation and Vivado synthesis. -* ``--reset``\ : remove any previous builds - ----- - -hls4ml report -=============== - -.. code-block:: - - hls4ml report [-h] [-p PROJECT] [-f] - -Suppose that you have a project directory called ``my-hls-test``\ , you can get the full report about the project by doing the following: - -.. code-block:: - - hls4ml report my-hls-test -f - -**Arguments** - - -* ``-h, --help``\ : show help message and exit. -* ``-p PROJECT``\ , or ``--project PROJECT``\ : project directory. -* ``-f, --full``\ : show full report diff --git a/hls4ml/docs/concepts.rst b/hls4ml/docs/concepts.rst deleted file mode 100644 index b788d5b..0000000 --- a/hls4ml/docs/concepts.rst +++ /dev/null @@ -1,69 +0,0 @@ -======== -Concepts -======== - -The goal of ``hls4ml`` is to provide an efficient and fast translation of machine learning models from open-source packages (like Keras and PyTorch) for training machine learning algorithms to high level synthesis (HLS) code that can then be transpiled to run on an FPGA. The resulting HLS project can be then used to produce an IP which can be plugged into more complex designs or be used to create a kernel for CPU co-processing. The user has freedom to define many of the parameters of their algorithm to best suit their needs. - -The ``hls4ml`` package enables fast prototyping of a machine learning algorithm implementation in FPGAs, -greatly reducing the time to results and giving the user intuition for how to best design a machine learning algorithm for their application while balancing performance, resource utilization and latency requirements. - -The Inspiration -=============== - -The inspiration for the creation of the ``hls4ml`` package stems from the high energy physics community at the CERN Large Hadron Collider (LHC). -While machine learning has already been proven to be extremely useful in analysis of data from detectors at the LHC, it is typically performed in an "offline" environment after the data is taken and agglomerated. -However, one of the largest problems at detectors on the LHC is that collisions, or "events", generate too much data for everything to be saved. -As such, filters called "triggers" are used to determine whether a given event should be kept. -Using FPGAs allows for significantly lower latency so machine learning algorithms can essentially be run "live" at the detector level for event selection. As a result, more events with potential signs of new physics can be preserved for analysis. - -The Solution: ``hls4ml`` -======================== - -.. image:: img/overview.jpg - - -With this in mind, let's take a look at how ``hls4ml`` helps to achieve such a goal. First, it's important to realize the architecture differences between an FPGA and a CPU or GPU. -An FPGA can be specifically programmed to do a certain task, in this case evaluate neural networks given a set of inputs, and as such can be highly optimized for the task, with tricks like pipelining and parallel evaluation. However, this means dynamic remapping while running isn't really a possibility. -FPGAs also often come at a comparatively low power cost with respect to CPUs and GPUs. This allows ``hls4ml`` to build HLS code from compressed neural networks that results in predictions on the microsecond scale for latency. -The ``hls4ml`` tool saves the time investment needed to convert a neural network to a hardware design language or even HLS code, thus allowing for rapid prototyping. - -How it Works -============= - -.. image:: img/nn_map_paper_fig_2.png - :width: 70% - :align: center - - -Consider a multilayer neural network. At each neuron in a layer :math:`m` (containing :math:`N_m` neurons), we calculate an output value (part of the output vector :math:`\mathbf{x}_m` of said layer) using the sum of output values of the previous layer multiplied by independent weights for each of these values and a bias value. An activation function is performed on the result to get the final output value for the neuron. Representing the weights as a :math:`N_m` by :math:`N_{m-1}` matrix :math:`W_{m,m-1}`, the bias values as :math:`\mathbf{b}_m`, and the activation function as :math:`g_m`, we can express this compactly as: - - -.. math:: - - \mathbf{x}_m = g_m (W_{m,m-1} \mathbf{x}_{m-1} +\mathbf{b}_m) - -With hls4ml, each layer of output values is calculated independently in sequence, using pipelining to speed up the process by accepting new inputs after an initiation interval. -The activations, if nontrivial, are precomputed. - -To ensure optimal performance, the user can control aspects of their model, principally: - - -* **Size/Compression** - Though not explicitly part of the ``hls4ml`` package, this is an important optimization to efficiently use the FPGA resources -* **Precision** - Define the :doc:`precision ` of the calculations in your model -* **Dataflow/Resource Reuse** - Control parallel or streaming model implementations with varying levels of pipelining -* **Quantization Aware Training** - Achieve best performance at low precision with tools like QKeras, and benefit automatically during inference with ``hls4ml`` parsing of QKeras models - - -.. image:: img/reuse_factor_paper_fig_8.png - :width: 70% - :align: center - - -Often, these decisions will be hardware dependent to maximize performance. -Of note is that simplifying the input network must be done before using ``hls4ml`` to generate HLS code, for optimal compression to provide a sizable speedup. -Also important to note is the use of fixed point arithmetic in ``hls4ml``. -This improves processing speed relative to floating point implementations. -The ``hls4ml`` package also offers the functionality of configuring binning and output bit width of the precomputed activation functions as necessary. With respect to parallelization and resource reuse, ``hls4ml`` offers a "reuse factor" parameter that determines the number of times each multiplier is used in order to compute a layer of neuron's values. Therefore, a reuse factor of one would split the computation so each multiplier had to only perform one multiplication in the computation of the output values of a layer, as shown above. Conversely, a reuse factor of four, in this case, uses a single multiplier four times sequentially. Low reuse factor achieves the lowest latency and highest throughput but uses the most resources, while high reuse factor save resources at the expense of longer latency and lower throughput. -The reuse factor can be set using the configuration options defined on the :doc:`Setup ` page. - -Thereby, the ``hls4ml`` package builds efficient HLS code to implement neural networks on FPGAs for microsecond-scale latency on predictions. For more detailed information, take a look at our :doc:`References ` page. All figures on this page are taken from the following paper: `JINST 13 P07027 (2018) `_. diff --git a/hls4ml/docs/conf.py b/hls4ml/docs/conf.py deleted file mode 100644 index e4d7f39..0000000 --- a/hls4ml/docs/conf.py +++ /dev/null @@ -1,127 +0,0 @@ -# Configuration file for the Sphinx documentation builder. -# -# This file only contains a selection of the most common options. For a full -# list see the documentation: -# https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Path setup -------------------------------------------------------------- - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -import sys - -sys.path.insert(0, os.path.abspath('../')) - -import datetime -import json - -import requests - -from hls4ml import __version__ - -try: - from packaging.version import parse -except ImportError: - from pip._vendor.packaging.version import parse - - -URL_PATTERN = 'https://pypi.python.org/pypi/{package}/json' - - -def get_pypi_version(package, url_pattern=URL_PATTERN): - """Return version of package on pypi.python.org using json.""" - req = requests.get(url_pattern.format(package=package)) - version = parse('0') - if req.status_code == requests.codes.ok: - j = json.loads(req.text.encode(req.encoding)) - releases = j.get('releases', []) - for release in releases: - ver = parse(release) - if not ver.is_prerelease: - version = max(version, ver) - return str(version) - - -# -- Project information ----------------------------------------------------- - -project = 'hls4ml' -copyright = str(datetime.datetime.now().year) + ', Fast Machine Learning Lab' -author = 'Fast Machine Learning Lab' - -# The full version, including alpha/beta/rc tags -version = __version__ - -release = get_pypi_version(project) - -# -- General configuration --------------------------------------------------- - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - 'sphinx.ext.mathjax', - 'sphinx.ext.autodoc', - 'sphinx.ext.githubpages', - 'sphinx.ext.autosectionlabel', - 'sphinx_rtd_theme', - 'sphinx.ext.napoleon', - 'sphinx_contributors', - 'sphinx_github_changelog', -] - -# Note: to build locally, you will need to set the SPHINX_GITHUB_CHANGELOG_TOKEN -# environment variable to a personal access token with repo scope - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] - - -# -- Options for HTML output ------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'sphinx_rtd_theme' - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = [] - - -# -- Extension configuration ------------------------------------------------- -html_show_sourcelink = False -html_logo = "img/hls4ml_logo_navbar.png" - -html_theme_options = { - 'canonical_url': '', - 'analytics_id': '', # Provided by Google in your dashboard - 'logo_only': True, - 'display_version': True, - 'prev_next_buttons_location': 'bottom', - 'style_external_links': False, - 'style_nav_header_background': '#2980B9', - # Toc options - 'collapse_navigation': True, - 'sticky_navigation': True, - 'navigation_depth': 2, - 'includehidden': True, - 'titles_only': False, -} - -html_context = { - 'display_github': True, # Integrate GitHub - 'github_user': 'fastmachinelearning', # Username - 'github_repo': "hls4ml", # Repo name - 'github_version': 'main', # Version - 'conf_py_path': '/docs/', # Path in the checkout to the docs root -} -html_favicon = 'img/hls4ml_logo.svg' diff --git a/hls4ml/docs/details.rst b/hls4ml/docs/details.rst deleted file mode 100644 index 7508330..0000000 --- a/hls4ml/docs/details.rst +++ /dev/null @@ -1,33 +0,0 @@ -================ -Software Details -================ - -Frontends and Backends ----------------------- - -In ``hls4ml`` there is a a concept of a *frontend* to parse the input NN into an internal model graph, and a *backend* that controls -what type of output is produced from the graph. Frontends and backends can be independently chosen. Examples of frontends are the -parsers for Keras or ONNX, and examples of backends are Vivado HLS, Intel HLS, and Vitis HLS. See :ref:`Status and Features` for the -currently supported frontends and backends. - -I/O Types ---------- - -``hls4ml`` supports multiple styles for handling data between layers, known as the ``io_type``. - -io_parallel -^^^^^^^^^^^ -Data is passed in parallel between the layers. This is good for MLP networks and small CNNs. Synthesis may fail for larger networks. - -io_stream -^^^^^^^^^ -Data is passed one "pixel" at a time. Each pixel is an array of channels, which are always sent in parallel. This method for sending -data between layers is recommended for larger CNNs. For ``Dense`` layers, all the inputs are streamed in parallel as a single array. - -With the ``io_stream`` IO type, each layer is connected with the subsequent layer through first-in first-out (FIFO) buffers. -The implementation of the FIFO buffers contribute to the overall resource utilization of the design, impacting in particular the BRAM or LUT utilization. -Because the neural networks can have complex architectures generally, it is hard to know a priori the correct depth of each FIFO buffer. -By default ``hls4ml`` choses the most conservative possible depth for each FIFO buffer, which can result in a an unnecessary overutilization of resources. - -In order to reduce the impact on the resources used for FIFO buffer implementation, we have a FIFO depth optimization flow. This is described -in the :ref:`FIFO Buffer Depth Optimization` section. diff --git a/hls4ml/docs/flows.rst b/hls4ml/docs/flows.rst deleted file mode 100644 index 37b8b44..0000000 --- a/hls4ml/docs/flows.rst +++ /dev/null @@ -1,67 +0,0 @@ -========================== -Optimizer Passes and Flows -========================== - -Internal Structure ------------------- - -The ``hls4ml`` library will parse models from Keras, PyTorch or ONNX into an internal execution graph. This model graph is represented with the -:py:class:`~hls4ml.model.graph.ModelGraph` class. The nodes in this graph, corresponding to the layer and operations of the input model are represented -by classes derived from the :py:class:`~hls4ml.model.layers.Layer` base class. - -Layers are required to have defined inputs and outputs that define how they are connected in the graph and what is the shape of their output. All information -about the layer's state and configuration is stored in its attributes. All weights, variables and data types are attributes and there are mapping views to sort through them. -Layers can define expected attributes and can be verified for correctness, or to produce a list of configurable attributes that user can tweak. - -Optimizer passes ----------------- - -To reach a state from which the code can be generated, the internal model graph undergoes a series of optimizations (transformations), dubbed -*optimization passes*. All transformations of the model and any modification to any layer's attributes must be implemented through an optimization -pass. All optimizer passes derive from the :py:class:`~hls4ml.model.optimizer.optimizer.OptimizerPass` class. Optimizer passes are usually applied to -nodes/layers; however, a special class :py:class:`~hls4ml.model.optimizer.optimizer.ModelOptimizerPass` exists that is applied on the full model. An -example of a layer optimizer is :py:class:`~hls4ml.model.optimizer.passes.fuse_biasadd`, which adds a bias to a -:py:class:`~hls4ml.model.layers.Dense`, :py:class:`~hls4ml.model.layers.Conv1D`, or :py:class:`~hls4ml.model.layers.Conv2D` layer, while an example of -an optimizer pass that runs on the full model is :py:class:`~hls4ml.model.optimizer.passes.stamp.MakeStamp`, which creates a unique number (stamp). - -Subclasses of :py:class:`~hls4ml.model.optimizer.optimizer.OptimizerPass` must provide a criteria in ``match`` function that, if satisfied, will -perform the transformation from ``transform`` function. The boolean return value of ``transform`` indicates if the optimizer pass made changes to the -model graph that may require running the optimizers again. In that case, optimizers in a flow are run again. - -Optimizers can be general, independent of the backend, in which case they are located in :py:mod:`hls4ml.model.optimizer.passes`, or they may be backend-specific, -in which case they are located in a folder dependent on the backend, e.g., :py:mod:`hls4ml.backends.vivado.passes` or -:py:mod:`hls4ml.backends.quartus.passes`. A common set of optimizers that are used by FPGA backends are located in :py:mod:`hls4ml.backends.fpga.passes`. - -Certain optimizers are used frequently enough that it makes sense to define special classes, which inherit from :py:class:`~hls4ml.model.optimizer.optimizer.OptimizerPass` - - * :py:class:`~hls4ml.model.optimizer.optimizer.GlobalOptimizerPass`: An optimizer pass that matches each node. This is useful, for example, - to transform the types for a particular backend. - * :py:class:`~hls4ml.model.optimizer.optimizer.LayerOptimizerPass`: An optimizer pass that matches each node of a particular layer type. This is - useful, for example, to write out the HLS code for a particular node that remains in the final graph. - * :py:class:`~hls4ml.model.optimizer.optimizer.ConfigurableOptimizerPass`: An optimizer pass that has some configurable parameters. - * :py:class:`~hls4ml.backends.template.Template`: An optimizer pass that populates a code template and assigns it to an attribute of a given layer. This is commonly used - to generate code blocks in later stages of the conversion. - -Note that :py:class:`~hls4ml.model.optimizer.optimizer.LayerOptimizerPass` and :py:class:`~hls4ml.model.optimizer.optimizer.ModelOptimizerPass` -also exist as decorators that wrap a function. - -New optimizers can be registered with the :py:func:`~hls4ml.model.optimizer.optimizer.register_pass`. Optimizers should be assigned to a flow (see below). - -Flows ------ -A :py:class:`~hls4ml.model.flow.flow.Flow` is an ordered set of optimizers that represents a single stage in the conversion process. The optimizers -from a flow are applied in sequence until they no longer make changes to the model graph (controlled by the ``transform`` return value), after which -the next flow (stage) can start. Flows may require that other flows are applied before them, ensuring the model graph is in a desired state before a -flow starts. The function :py:func:`~hls4ml.model.flow.flow.register_flow` is used to register a new flow. Flows are applied on a model graph with -:py:func:`~hls4ml.model.graph.ModelGraph.apply_flow`. - -There are common model-level flows that can run regardless of the backend, and there are backend-specific flows. -The `convert and optimize `_ -flows do not depend on a backend. - -Each backend provides provides a default flow that defines the default target for that backend. For example, the Vivado backend defaults to an -`IP flow `_ -that requires additional flows and produces an IP. It runs no optimizers itself, but it requires that many other flows (sub-flows) to have run. -The convert and optimize flows defined above are some of these required sub-flows. - -Another example is FIFO buffer depth optimization explained in the :ref:`FIFO Buffer Depth Optimization` section. diff --git a/hls4ml/docs/img/act_hls4ml.png b/hls4ml/docs/img/act_hls4ml.png deleted file mode 100644 index 8ae14a8d38fbbc9540c9b057d080b61b6ed6e1e6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9354 zcmch7XH-*b*KG(*RKS3UiUbJ+l&(l`7Fs|+LApQ?2u6w^y(gX{MnOuD5@|;apwdD| z>h^$u0!D%$og5Gl1BBko-RgVZc7NRQjdAb&k%x?gJp0L7bIm!|+Ie)%)PVco$%7CG zgd1h3YYu_1P{7CO103Khy=Nq_;2$-AJsW?EyBPl<=X-7t6K8)P&%6Gf*juLq-R}8e z?|LiBDau`tIepvT-^Wi~Uf%1U{~&kwp1b^!8=bY_CR{#-wtf)EA^sl^7Ra-V;}D40 zI!gD_jo^&Ak&p~m%ZT+ws>_U#~ zB{3=VwW}Aj?`yId++gE$Wm6*DM?*vpI*(n_PbtOb9Qae=%9YaHr8X^rJCBR!U(bK9 z`y4)2@})lS^?bwh&c~6+^^C9rI~blsvhCeLb+e?ANG1>X!PS!ZKU`^csepK<_>7{1 zl!qbh4vIluPc_^^t9&q>%#@q~ypz9NYG^YIy+~u)U1cWu7mx+N3fsfQwVY^P#I1X; zna5Bum_LS<)~NM{p$%jarTVC3^5of+YJZQWlnO&yLyv5}-hiL@t_{ryHJ1HIy;inp ztSG-`_Uhbk7ICj$Wt1Zu{!-&dysp4@<%O}6FmqpGjI*|-O5WgH0i=lGz-u8N7iH6NUv`CPS_!*O6?PX`y@bx|t@ z7bZz^SD{;5oY~d_l7T|fsJDgZ_EV~oWh_^}KuzA}vg0g%P7tz*@DQV(;wUyV2}Hwu^_tu4S*nj7rD+3Z`IKQR zKp3`oL*@Rrzn1mn5aUK@R#bBrf9RCR#7#WWZQdO>*RyS5Xl)MU0dHIzDs`Z#iQENQ zDJda`ZMaYIbaFyBAPe?%S~T^$H8vH}Ms#?i1>Mo0pq^C`@(SLC)&tqBW)QQyE$`cn zIVj(VlQt~FyctG;Ie&=8v08ZYLJY1_vXrlO?29I^&)nD>ebq%JtD2s09|-03vTAnE zhwO`NaM?1~n{a|kSNARGQfRlWeY5o9!9A`GH@h_`y62cLGiChse%Ps+eCOS8z8e1E zBQ|}M`L>}z*)Q&Pm}3eYp1p{a2poeJ2xp1A^Q=*0-~iB!-MR#fS3Jo!T->k0>hvRu zhD=U=7*O+~2H*u*1asH4E~dDu7%iJrQ2W-YvN*r?wbe05)W|jJb!=wlv#MrUz^8e| z4DtdySb}e`D@>(AB~|L1%Fh;S_YIfuLihML&MzGCQCWzNo*d&GPWj$&XyaOQh2GUs z*1BWCwlUZ-&PanQC&z<3V=J58g&~@eFW&_YK~>XdRcnP*E3gq+?BObNV$>PZr*TzJ z8~G+SQ^Um(N2h6E8lV4uZNC@bnpuWaVO1r}*ghIB@9=Ht0MS-b?gp5Psps`!S+*D*#c#J z_y*oPJD_xH>RYI@92<<`ArR|652Ut)nCf?US?`a1M9{`4N3Mg{&iWC?99u7<|E0oElo!^9G_L8 z21Un`@){Hyq8zUXG1;n2N}ZtLrBA#XC-JXdxG1;;YG6`@~G2G17QAoMN%O_vMyO4MB;&Bh|3E5>N&_EVh-!G-e_z zd-te*>r~_SrgzHmneC?HCD{IbIOphr!YhHEHxu(3(qy>qc3+#10aM6i-B;-LeecKL zp9DDNhw`$_PYg`>h?KR+(y8roQjUF7ja$&NiR-d?P1&=@zlmv-I*lpJojDoTHZ_-0 zwXNjJ6{c0T`%R9%%H5&ERgdh_$ZwulI88rGS1614Hs|`tGifTI|C*y8Gz`*@^kT-e z6aNhB3DxGPsd?fzE!e)6z7@HXdNJzp<~9)DI#eus>seug?D}sPsAEu_+cHgp50A9@ zi3~id7)Wx}*tCKzy6WCMhBYyvza#`-K)^lD?N$(K)DUoXZPxlCW0h`RNFyICNU!A% zT?8L=f!pMiFf3lLg&W`oPr`jRB?z*A99DoBhtr+j^Ig?YMrf$1Y&+}TJTZ{aVP3{B z?kf4mB+S=NS5pAyp5U-M_k}WBEjAukVVbHWqmejaams+a&Y85T&U`{RIl$$*pSRjm zGdF0PU0@!TEryVLL`X-BlL$31)!$~>4lT&hr(wSOgC+1-0%PblKQQC^h!=$zvdYg_ zkK%_3>g&oP_8{T8&p&dZPO>?&ha(Vhv&D?7ftL%mse}!zuBJ)fivp&OSyJZL>ZA&9 z-C=%MP}R#jVy$C35@iTPW8(K+o`G|l;W)wuN_Y4K$thv=)B5f5VHN0fwR)b|xc$uW zmrQvLqd`hnnG=uaF<5pCK@u@8S9Yj9=t8Goi0;mrIWdn6`9Vz{rexj4kn@O-;*8Dt z9!z+GaMBmumI;6t-zGNxxgv`y&KRi{pP6&rUUE{NZF4thvC%C=Faj#;_A7gS6xdS6 zgs&G0&Fb1L(bcFMC=iJ!*NI1zJt@;yPiwqC`@yZJDkr1=*sJ>1GWXIc4)5?gKCy-F z-JU&1E&Oa*y;>KK3<%sPw+c{c({hZMkW0@x?vS|`@ml*gGH;?oM3Xck3ng-wedijj znFyB<3JiUw^u=tV3hN&6QB24*ga=iU;9pACF|5^S0%G*-&ZwzqRJ8DDnQ$N)1~$N<5jn>n-%Q3AolI= z>c-}#u13(giNy&kZ50krg+uXDT0?s7lr}t%znuTWg5U=q3vUrIEz#)TrbjBl^mT_61pF$e1-pzfhL{G=#m| zv3ok?<6{3Zm(0iw_-P?0hby&w8+C;KlfnE8)x@>eX5QDox@uLtjs9}QwV|G~y_L8v zG;a6M^Z3h=drUEn+b26BA1eiY2-Hltbw|#lqHFRln`8f5lbNZSp<;OiPaNUR%$OiV zeC|>pb+!w(XxKa}=ISyfq_mSR412PAp?HGpi~F51Wmm0%#SGIx9jpRESzPF`)fE}} zJH3Ud!JQ+v@-HG+w;Q69`UPoA8oDhUK%LO{jKbM%oQb%SL~2!am~I9fR+m&%4%vJ2 zu26-@&1q)8b+UQDbzLt@1SXV-6gR23X&2~FHv0B~EG9&70aF$hFGS?Vp1U;A#Rz1p zsl!I43&QSZph^03ryjDt5*$_CZB+h{bqtlxssq*LgMCrDx}{O}*H>%uF0*%4Z;*hk zofgJDxF8&_l@5q|oyI!R!(3ekRGuhO8;MmYjrT@n(ra4+yUh2kl^Uzws3FvMPHoPN z8Q|Uws$5~ z+Cj*$i{v-!HY)Lj2Fo>Mhxew00AqXuvO%_TGlUpu)H66xVk#*#IHc3yUFfGoF{eQm zcuhJgWi$}_?QkhIE1D-t9vSx1jW#DY^N#44#P?xBbNdf!aHLUsvyqog{Os{f)`c5e z@K21vn}I>}*1VVu{pI#~ejUR==gkMG4<3%y*3bg+2t zJhBK;cUk@ZtoGY&yUseuhS~t?uY2?sLXE14#<30mlkkmgp6@zOW;wny+r6{k2amv{ z@w`bRLC2AdM5Rcs@;$froJa=^(>G&AZcavlifg`QOc#@P1{S+PiK|!5%qFWDjEWGd z3OPHOhi4Ysdn?g<35JEX>1-CPCj zbwtA!$lb2nvl{P1Pp&79@T;9iL~36WVr;cH5|EVgF( zT3NvMkpaCw)x6}i2PT&DiQLXlpw+}Vd45}$x?KLHM%Q%@ri8tUWYX%?pwfWvhLh{| zTUOWg^fj|Ke%I~oJtCawAI=$IiZLz=ixs)6+G=Hd5)t;v&Ghl~LHbUfn`8|3!*r&< zGey0uZ6PRVJf3%jcFEOjWzNxG#_jZsCp{fj+N$@c>887|v6+{bwf`waWSP9Lyo`F>>&Jvc z?eh}?muaEED@zl{wVSZT)QO0BHVj4!iO4!Fv{V6~7S&Z$EFe3E6Pi0a`=F+148{uO z(WON(Q5m@~Ma_2dKaRmD<2{PsOyLRro=a+9a@drk_QH!Bx4!2zD=3z88Qxi)V4;@w ziBr6fpE&W{sz5+Enne1GtuN88NIxoS|EuCN(m?F;l~X@<{IZSI%0U5`6Lw-J9>M}VoQ66;ya2O1>7SGJywfUmt7prw$;9PdWAsb zN%l`C=gQ_qInoH^-jFs71~Yl>1DpO@8`Mb%##xcJ=Q6k-12~kU-`=k;xS%LqgvaAA zGvr5(h$*>WnlC~O3|a=j?%`Q=&Bd zOA6#LFK^JKd{A6mT;Pl2<&{>w{5UELRVvgz#rr_%`v-u*_q*a=H_SGG-pKKAb+503OG5APk*b z7XBvca%*Q{@waXKuej!(x_qI0G-dtn!N7vXfWBps3R(Ui@wDu2Hto~U{QWitll&FL z0Jw^_@T{z^o>R8!(rIzW^m$|e>I$l=HvR`Nn3UiH-JP9M@3}hvwXTD2pFe-O$VyuM zu%co!SD{#?-IKk@DE_g|!o&Z$u`ZZDEBV2ce0WsFH-$XczS&h<-)UjJht7|{W_a@Z zt2Pn0ws^U&++v}|z>C(5+P^$F{flJMwRuIKk!+F4a(I;8NULU)*o+GCGA%Ep#SP4_ zxgpd7qaLu&*Dqw#otrRN2If|?Kv7Zp0hrsIE61(9xvtW}QB5iSQa*|cCs5X?+UvCL ziOwcN+8b`BS%UwOi@G-jVe4N7>cH@sjq>`+KzP`cA#0q0>}(lgGrcfh?8n74lC9ok zCSax8B0y|BT+dS@7onXh^CmzEJkntqj^ig!em5ey%^9WJYKQZjiLd9k+3gU4#yugF zm?UusPZgElA(80l~>JE{bFJ8PDIn(M{WVy;lQO;uhP}}u5^x&d9u#N0u zPq=UzyPOwd1px&tsRBl^{&)@yz#{=7fEb6;{pt5mDlG0S%g?4xu%skvEtJN(KNL#z ze@PC*%i@KQnHkozAbxn>aEs>Ub5!3AjymNtth(Ig6&#`Jv!S|tiuaU`82(2@pU%g0 zx>bd5F6CSSvEX3K+*bQBsX%VFg?GsWh3uX2R$)9-3fnDCcI%l2(eXx#;q%lo{^1d@ zD2N$AC%9_<-j8o=)NXBUear*0g%6a`PN$>0MZtiL|D+Ld5N3)6|5(RZ89;mcfl9lg zZwj5Ahb1X-pYJGbZEu6+-PyTtBnO;}JpDBn0k7fN+BG)yPII##B$YH`z;x$i*4R$% zHwDjAAf}Zkvcf%;N_KbR{a!Kt6kB85aS();I(_+2Hox?9AM>;PY2+)&5ee*!YR?vO zSC=+N^iEuPhqwjS&aLG0coq>3MZ#cY}}K63r7gPOsfP^^xk=3Fz?Pd@X8=~IObW`*ruS; zVz(X=g3tO~pQtA)L)TdM#F(mJylg-cQ5p~mHrO@M)w~fUi#_JU;S;VqtTg&3@fd3i zOLTN}E#I}o5jREY==kk>Qen{ac0P27Zg^$yVOXhnSKGpZkN*o=cXzkD0* z)EDC|^z-Xr`sa<*em@Rd78-@Cew6ue_9$30ugcZ|Be%_^Nxg??jFpt$sE9vQQL)@e zY7&g;YL;fy(dzVoA`N%%A9211RV+U--S){#;y-BN3PJcAgW=-oS@?I35A*OG6TVL( zA>o(7*c*YL`{%l(Md68!jlEXT`!{?Hi>zKWH8uVG?9aN_`-RC6ka{q~eM61E556dP z%;R!7fNhvr1{EW9mdQ+~vL=-AimPe``j92^{v zd7MAoGe#4q@*R6#0bPK*xC3%VA7v1)xQ{?hE7$8>qFn+FNRJ^-8ZYa22Nb$w-BB$H zJMai_1|Sq^3O^-=Y#n50b5cE=*C$fY1q{N$_U*GR0x;hPWO>#S)?(vqD$>|HHPB}C z5s(Y>9SG(}%5ofIJ^;kRNu44Ms zUz~Dc;kgyNZVMlp3jlF)6-meZ%`fK3Efi(=nB{=-#M$p>Te4Z0sTTw<(s1Oi^eI~w zPsvwiG=0Evm5Zo-pdKFVuB(}3(fLDt9)u^VA>a(zt!*$Ix`j$UtLX?vnLHXQv0RHb-DGtoyuKM-}p z1EcKnTGP|6R{nfCiB#GAOGR|1JTPzCC))1iTNsROE!Q4@H2D^M2Xix-S#A^ZBkTVZ zod1W&I#}5iBe>N6-2g|3x)F{6`g`AJz}=KfF<( zdEbtLz69@0BU#b_;9~J=@;p3^oP;aFFN@0>mpsHn;^GM9VmXl@nkkOYKxy_!q^*J> zM$@&?|87Hir;}C`>5*swA1hcXSk}HqUsyfLR2Jg4(h0#Y|5{}ryEsyyB`k8DjyAMg z=ySlQPuHxWAL?Q}F}-xZD|;WZ0Ewfpc;l@gdRJ@`@V7|SNH@p+!cZsCQEhR1IAOm^ zX5`4Nq(k`l&tryzHnf{4c|)c<%QmE1@Mt!PBtV|!nmvwoL8%zm;%6KCw*WPwH(Rk} z9dDqy?+*co{X5^tQ_ntCl5uLRC*0}kloDk+e+H=sv=yzBKV8U;4`5{N>%Q)~)QlA0 z{W#2BYnN5o2IAu2WE<7%^?&+`e;5{Js~^5Jn%~BNgOf9c=WS;v-!J3vi$MM%ZEzb- zeSQ6xy>~#{fx)Qa1IShd-D6`vJ#F+)Wy|?n*=}QUOXrM_Eq#0a0~UjQ+0`sT!;$vg z&MTUrs7XIDu-5wXRqAj;SOe3c!o`{q!ORh^vr4I)`h^M_QUO&kls27C^iJ@9K`s~X z`VYd{rV#qiA;78Vv5j=O2g=X?rW_>q;lp3;oc`Ok+fG7^e$~cHFSpDq?{!kx(2sN0(qxIN9zpsB5lfK zxe!v7^jQ)shAz2KBJO1o#a+);qgCAws^rmwn+QMMo2Rj&J|QW;*>Kkj)igP0!B8FT z@Qs>u(B`m*8_+mrR=ELDwdlJT3^KtU8bb)f%?XW)aGp~6815(JC}OIt**|Nkcx0TM zBJbp=TM3GJ!a4;z)7b8b!SwO`G9>$HC8T7d>pH&=b%lP(uD%2+S|6DB!V7(^22`)_ z|GyBXHHkb_?b0hI9mDgYV9JXkT0Iu%(e3;+4wST2&}$WbowgoJ3~LDEy{qA}I~@Mk_ie49&@6VTuOA|H$X-2btxE7c;TVnx+&> zDxjjIXQoz|6R%SJ{hC*rQvIKjck#~XS7eYR68+=JQHlOZWND;VQi>X~%SqlPAaqj? z9bd8IJmMV#%9^z`=&2lV>ntS@N3ILG&8kz-mQZi(+77Dxg1L~%UiKD6(0VLAE zpt}T!Oh}u4izyI;{~$>L`R*v_3MW0$=s&(4DSQw&5B5?bHXNQ@g{(XL3y_7$8-135 zC=iCLW+_Rn;9ZbH$B{?yF8Bw)A$$>Dl&(Ne283|e(M^P4bDBGT7pDf4;CORGl<=SZZ zA{07lv$$vDD9kNtGzdK&LtsLkpn2?7}kOw1312N4}|)h@DNq-aejiclNR_7+NGVD`ay)TxPHzv%^RFfnWbR~Q)ZwZ@2KqN{ z6&ZCl`$MXD+nJ55-#}A&g#e!RA0s9B<=Kl)P9iQYE`MvitB(%_B#lm)?sA<={X^or zEH>BuPkHq}oi+P>!=a>tbvWhx_vFBf%!*w5xqc~xvWI(Xr3Bvc0l7fzR?RonyaMY% z7H{Jdt411s1Pz+&YU~_&_;2^}Z8l}js^jElk^^x==K2jZo^P2|Uq^YySbsJ=y#F>e z`Q?4N3crebNn%OlsHBRh?!~LgWUU{!_+3#=QtDYV3E*vcw^v#*kK^25i_2d1QZ=TD z>a{5f6RT2IOi=Cu!W}nmXzkV3ub-8qWS~wy1@DB|BS#Q3E=e*=9CQzu?}3HUQ$r}ub+XltkPqQy~lE{J(d2lF->|!lu8HExphUdXHx zL(H3X$g*l5ffX#p1|cl^C3tmP6p2~9;Yb8eyAu6aUYUt_1D7hBsw~2kszm3y>awpc zjWE(M?|$t7^YwV<_#M3B9fPdxr%|0M?y$u%AS(Y$f%pi=VcnJoK%?xjZ2~y}C-U{^ z6giK^gSrBIIFFQ1TT*5TKJcwWw5955t}x7($0v@Q4ar5Jd-^4E`(8T$MGrXooE(eNFVzy+Ys%A!v6LcrUT5p@N3>v=*2s@wn55xHp`y`Mb>#lpo#+k{wT= ztrV*FO3&d&vBn^zLJMS{gyuTvRX6kc9=%Ab1qJ}xV`jZp_YZWefIzAqq`kBrw2Tg{ z;Y?9PA;Jza<6(=7_nS#&j#$nWQ~S_d;ZOEn*w4~)QpOq?z�U5dAz|f=*gl%tz!@ ztemo(Yc)Z%P)i5Mrk z9rMW#7gn$`tHrZI)kYNrQG%hD(4 z0poxm5?~BUFOh1bh2G&To!PVh`+d)cv(NdEhaW4+D!H%wD)(Bkmo1FAI8SjxAP_FV z*zhU@!c+mi&KzL_w|qG#i2?s;U{SW%Yd&af@GXB=i1{t7ucr^z6XS9w$kjgp|uVED`R zknDv~Oa{V=xUqa_u4cVtesn_SGOA-t$J0GF|Jzeq!lMLn&PVux%5#&Cj(&@th@Lwl zT_YzAKLdXRe{}yol=HD{rJ8eWJS*Nzd_(^i=C0L9{5dUSrQ*t$O1C>2qWZ0DP;LGb zu^&E!uVmXMz?qTAs+V@<-yjqUMe+|0aFy=(uh$Y3Aa}$rgvjTF z%HEak^tGCxRJDY4Q5nobQdp+hQWCa`R5&eiLlmK7MO6ZHG)k8wfj7ZVN7_a5LX*~% z0KM);Zs3M_Mait`&g}+~uv(cmZL^t(h0ktp9osWW3dWjuR$wotx9#v}FdB^Ov@6bX zzDQr`3T3!x)tm>*FV@K3^w%8fLw5tR$=(u@$=y6-9Fw+{8sS&z$)E7XH ztW`ITL)w$}l^zIp zv6EgHPnl?Oqe08n-swXHxuxH$_jG!_B4y(zQ`kB;UaXq+Jl0~}l4c;CRTys(Zz-1w z^myt{*b)z-ZlZ^LqAi);nx;2F27U)d-W2%7@9D6}_RQxcLZX-#q1_+ncP}kD_U;qq zks^YUMX{=nrme_g)XmT%>J8pI-egxqd*5+iNQHVtqL{_xA!gLC0%-8b%RWCVMBlB~ zt&s4&(hwnIOqge*hd&po5^tZCB-bO_cg`@nDfbdAHV2F7yS}^^o)8ai(aNwxrDRd_ zjZUfPq@;mENM)e(6_>av-B|HXhAN2%~wxa^;(1me#-VxlF_ex za2tm)cUfX;W*Nyjh~W6v5gCa~d@)=hJfM)H`h7Puc7Yw^S&Ta!?G>F!5RNh+o6hM9 zi0AFkh3xr>kr(OJVvx-z;WhGSF+v!X0_&PhC?-6Lb?j#C+XR;8(X^$-APJoG;iogW z7n6s~!UXQTKN?tqge!iX>bw;}V=eJWh;Xng*;P{K%iyAr{FvVy9nj60jX{_{I@CUs z`94dOR`|udN3@ll5bj(HnStmrH{TE#R-@JhF}suhkldwTCroLoIc})ZM7kzQZfsbQ z%>ySb&Td~FeK-|&h^0}mL&-pM##p9BSisJ`7K-XY>{N?3G9n`U=+gQQ`4H0nqWlnA z$2{cMs(E$n$?u=%DNTBILZj;TBt#fcc7_?kc-k8bM4WF?0KO&ak52F|lPljR+L zzOcl@s|Ugr2$3<)5TGH#^6d5>J6EeF^Q1)c(jihz$mkp(lH8~s0OF$YM8>6+!8&+O zja-Moyd=@T*0a(n5)%Hqw*y3=uw7rQV_DVT;jLI(c>d?al)oJiYnmr2=b+cImr=j^>|*7+H$mzoEU+%(54( zxp#$@ExEeCy5-?7EX?AN!X1b3Ypek5!k|%izh>lcuMx5ZaBy(XhQa>{d8c%#uF0{^4`@+=jW#Fi0Xp*MP_mU6jXR zNMFC?fkT!?rPUqRlH#vR7Ap^RwUGO=v`M+&tAj<7gcGdRuC-Sjd7~K0%L?Smkz0|UD+0#>zO5Asyz=R8#MJK?d`m|NL?W4 zQuXZ&CNm=0JId4J?H~DLvq;mfow*h(XGM~l+7NqWLq?0%p z90(mF>U+Y0CDnwmdS84gK%pbO#c=QVfQk-qpihvjHth}6wW1OUi-hV&B#oC}ZhP?3 zmSC?mk=`Ynw|*nhlN`dAZxR%~hV=@nVk&dd?MC~vE_b%OaJ))O!{fk-;OCRCQoTNu zxdjkz@0Sx--dbFpyi59|m^9kGD?Wj{Qjj?9L;T8)-7@EPol z5>?%=J~p2YsU&&&W40p_@kqFf8X>F>E^zt zD|emoB(|C>1@ob>Wd(B=AFSC(PH59b>bzuc`_7(428l>c1Ty1#dd!Bvk_K540wVUs zpZV;+d5lPuBj;1Sd0tAIhImwFewZ`@c z3_rj-JxDJv88(jm)NDfCL3r&C?mj+XTQhavxEaY4l1C~u%`{kX-Pmt%GQMqcTjS_f zHL0GU%06bf;?bNsy@V)wy-1yqj^g$d$2bAK5(@@Y?ZB~+iQn4yIY*okXqispHJs#RFeM^FDB(ZPWP_|7tXc8Gd-jV7r0C6&Y%hQ)!{epC9`4@jP;Z#wr z(@73bt~zZf5^_@xo(#oJWs8~Z?dJ21!!d+I&(W=V1jmjpL9G4IN-d%!!+vn=)1C~# z%OGjsN>{G|#Xw|)2;fg$Uof8;J+;Nxk^aXt8G!luX)dS})HcNky6Oq*QR$?GZBYVm zR}9p!)r3%Um<&WJ>R$9u?ZGGU{1HJY!(3jhrB*7O)!5!W&dBv^OdJx1?VPjY#Xh|;8G0xoT?pBsdaBzuW2I9ZnU)z7n9MXNx zHU72UrueqM*UV*FGQC0f7lJJl9{=`9Hv1j~NFI2N5a_$;j~dBetK0EQuI!P}P+f|} z&nt}Fj&>vL4eSVYJe0D#EmGwpxt$uR*72U}P^fei*=OKy`jL|`dG9u1HQe$ztp5pO zU?<{`m*!50SVmHB+<;FA6NW~J800`aS>{r#YvXW*9s@xL(_fORH+eP4wbjmSlk^$x z11SUA=McWBsxBco_{8#kZ2?PYVgkyQ8Sei1k|e$9y2Fhp!K{ zHsMKVbxd>8pnvkgi9=`yiZ@D5GdC!?TCsyKp$CtTd6ZD-j%Z4mVkS1lCA@G<(QcZk z#t)RxE}Q&96^PbDwE|w}dq7uTg3KmscADeSXltMhM;pMjr6pH6bLZ%K2jibPf87VXL z=by)4+n0jd8-C->u(NP~eX0uGbt>!<(j!)3;sUbD#GecNO7>1}vxQy`c#3gZ9=z6 z47;(Wot7j#QV?cCTICn`ok-++iB+kml6tS@`~F_$bU0Z(H#WnyDvYnXYpyZFn~ zOg47*2r=9AbJ9s8f&GpTOM;bAl3%DQXZz}pt9wp8B(bu#xuDS!S2tLKWGgOG;|XQr z8VbLWdQ2L4GbHF1fRTXEom_`qr7ETKmDcL6n$^OBf=Bcy{g0+3TNkewRt;XSPYUfd zS8LLQv)L1Ci+jKt2I>p-pv+OOKKjZ?q{j!93su=`Yik#@vzJy^DGkIQ1-Ta%7RUXr zq_~db)4$^#hyo3vQ>XH+i}^%;v+MU|Ms@#p7Fm@*F;8x)-yZp1`+*_&)Guj9IRycbLcna=b3rsZ{j42L< zsNq^}cg*&1bKsx9_J1lhNg_+qmdIbpW327B&}i24y&4sCf<=0@8Fi3=2JGeOf?rGYgjEF;UAKvX@6q(9^peI87i~~C{{PH z@>dHaTc6>#95usF0la6)iPS?IXt|grj=&F%W7^r-8G=2Jf8cQoyvWGYt3{p?t!d)*k3;csS;wDQQp@O!X0lJ* zu{H|jW2qU(yfy6t&>0OXX5%Ts+96_;qft!VgqlN`nLb%aB&J?GpLtx|MWe#;0kBUD zEn^O{j9Fq*?L4cTbw*7r(_93l;*|ymi)S@>$4ANAbsM8oCG|SN`$r$Da=12CxJaMt z)wlpg^E9wQ(~@&@MSqKliQ{f5@leMRr%AQ7wcn0Y8XjdG@1m3tvcWo)5JrBzo0k0S z*;)73Lj%Z6J5zA0|JC9zd$NXAs{XwB;IRlE&S-RU#dg~8$~OZK&E{M>a>lnk&wF9( z-b)ge!;G4V&p@_mFR_gs_2kf&fLl+&vWRv~Mq>6YRl1CQ4!N*(felRPVDa0Eo?rVu zb#Y?!ue&JC9f(v&`5I;ESE>fMrJ+YlkL7sGq>&NprVUoqB62*rUBdA^{T|S+oDmfa z%(~u;LQ$C@u;rLFjt%f?QPZQp)~PU5okbaJ&H@+7^@N*(u=FY6MDT>Kk8#W=;Oq3M zv$%BTDC72?vw*jkx${3YwX1$j?!tpL{SEy_sY45=^W;Fl*TFD6A|+I8df7Z1+4iOq zZ_6pmDkv!E%M+&&P?(mermp^Nm)vkU7kFXjSetT|$R8*+pn!u)bTK`tE+WOx&D29} z5k*E+HnONj`(3bGt&N%E@JFMO(VY^bkZdkWX_ng&^|-0~iB-1!BY0Aa#3+X+n}Y&< zs?nvkuWrM&Z}j8k)09#UMbNzNQs#K0c~4Q)q>$k(zJ?D%Em$n(jh}-2%kkn((jcY8 zT{0lquej49Px1SzNC83$WJ}4Q?hfx{QYgwm8n zwnTkh3`fT9-U8xq?;#Np5jpYI2#2G{;)KdCcV8CFb9gG7;`A=Hre}5?hneTt9jQU5({F`@)cqr${|#GL z9q$_Y@NYdg|C9fQkC9hMV0(Wr(nG%Xa5*iZywDvaUc@?d7S*~T*Zo$Eaf{#vFJt45 z#m{L9JR=VBe)-L2wV?dKUB5FobRbtwA#CPWn8jU@(Xx{qp3Dcw_B;$n6zF`2h)A76 zS_Xw8YviiQuIfmo6Vd?98&p}KQ$j?XehN@C9^yU&m+xHb4ri^f^SdR`sdFLAPUz&P zL0965ff~>lzB+EpDx8_%1cUXkG_!?Qyz^gzboWSsVj~l$q;i%U)w)Fe+8f={`&I14 zVL98)JjaXFIO7uo-tn9%)q=Li{_OAXUkovAd3~X3(Z-Oga2}L|?_sxvLv{RTXYG}i zO@?6VLLn%_k^TxY{sm&T9W42`p)t|XBv+MV+}u0574pe$SI>vlDqL&gc=az*&+0l? zlt@Bv^x^B|5@{fnI)JJdof)>g^&9d%z^!_Zwh49y=(OdMoM-T&i} zw@pC+$i!&504|EGl#Y(hF&-X)e>B*~$LG`8j6goMvp@P@<1d%S@kv(7z`)?M`|DS3 z%2jDUMZmfkoPqwQSl{jRs+oMoZXHaT7pj_FDIE5u8xM}=|C`$jp-{3oc?lPcY84M{ zbwjI8fS(T7!kBAy-~HZNJFhz9FV+0wmRd~O6=UA~6Ula)bOg`gC{sqq$;gUwtt#az z3mFf$%zCiF_X*yqSZQBzh96*K(r}Zi$Tc(WNDU?SJ0)Qu7Y+BcuOd_`y8MTZfz^#| z^l-ibm10_gsv39nNfGhH*ikXikCpQ5#88>8->AajCZsdS7|)-Ee%A+ z0>LjW_=GQD=rD9QpY)#W65ZU~9E);b^1h@zXW{DAAQRWfI{>9eF}0&w`;Ca3S45o* zvXgSo(e-eJkR^BPi-xkSY0Srt9m9I{zh6wPKlQgkqhs%%Hw~6^H7B7)**8pS7l8l> z!b=)6*$}!2;Rn7aRWntze_#RFnG~R7!mz~YlU1Oo8=p=8WYo$^;Ac)R>~ivmu7zx( zXM_k?{ti~LD70gMIQayDMXho#5k8Tu@z6>72T}@CUr5?2)UxoyI&llga!ZKC-3#N{ z6b6h8aNC9{mt@5L)00x&ePBWo5%sYZ=XBRb^jUK<8jzI%5P7Unl+L z6da()s`DOmUII#uXe*&&4<3t`?Jz${3 zPR3F_3dHw|%MC1&7>){7-)2&x=}rylG{RAVJ`RDqkh=9mLQc?(Dr=LpJ}A%Wx-Rs3 zD4C>_;SOJYbe*)u_IxZIk=F(6IByNa#pV zXQe}*mG%Gh@KJJ%^E5@;6uZe2d{dE7zm>-6S$L}6O`c+_T&WMi)P zl`oR5PEajWYd0B3?s)&(?NVPUB6?JzC$b&n%>q+RNAR|en`x#5rk->g;V4dL1BRIl zTQhApKdcQ#i{*KvRo|za$9WyiJ_$RDvm!sEZ-EihjOIaRH$tbaX#%wnJ@KN4MB?Dd ztB(Sli~e@?z9E`>&({vWkmroh3HX5M07TQ1sqlf^bykzV^V!_x0aa0u89ef|v*tej zZpKm4EaW@2{vjvN19RNR&;gD)3Yy2Px@mW>4Ac%2$6dvK@HtqnyH$7@>;jscakMA=}6~ zx;?F!tgDzBHDvSaIJ{~x-L16ZBnX37!UJXRNCy;c8>TmfEqP43$p&CmtgnKor1Mj)=}85owNVkLc9{(}x)u@v|hragG@p!snm zCm7*Vrh%=|kzds((#bf&48(ZoI<+lub%aTl@L3!Kbxy?I1cu!22!?Q7s7DrnWzcx& z@K6vYU~GTHAy@T$DbpD9NEXq}m}-X*2;U84hN| zh?#W`G6&a8R`X4=cZQs?Suk0cIm+AO16ta$9;FotkU$)ck-%*eH$kalosvw9T>2TgywOC@>B9WR7QtWSO~a zM>>z!7=qJ}RO$wcBpq}QW`|=kMNZUsB3TTN1fK*@B_SSw$n`U82X*axI>x1D`W ziJU0XV--4WnS7KwJ>I|QrFdFlQ{C&DqpMbR%9<=~fgBJ$%e=(Gubim{?`A7w-iX8J zh)=x3Qa|Dcn6@|>FBGk}VKgds`_Q{Z5~M>^^{()MH(3L=42dR*F)0YT35|E|<(T z#-xLQk8y&Q=zUxPvoLG*Yc_sZzp*CkR)psFDSX{ks#LFEFjMUvehpSzIQVUO*X2Q@Jgj$>K-kRxS+xK%&@ze^2i^)~vGBaG z#Dfz{Xtd}N373cKI;f408_RPFEq}%CiId}o`QCsAA||i(7TWqr(8zV|F{PRAEIoQ) z8mb6X%rya#2Shv36;Jm$Ym{DW3pmWvH?e{u=g|M9dHn~N|H)1rDi@GY=dbyw68rkf zZx>A5#`d;l-)zt1w^Xocf}?~ch%K`=voNzP9-kNN9)VWXD7G$}+PSfGPV;d=&`8j# z{=~4~a94m+;p>8AbNucPJJ?OGArc0wn;ixl;0JTsm-%j}h)%KpKsnjDAS&#mPnM(jpL+Sy8M&x=eWj-Ut!F9HMf8 zx9(VoO~Y6xjtA8WKIOo!%?~rOz)FF@T7O*2w&71(#1EW@*GSQ`=1w~2;RkStSdkMj zDd;Q#K~#h0L7K&2T1Ks#y_GcxI_I^E~R$A_F$hiqk% zbb2!qh~yfOD;~yg3J-9R1rQNz>O8irdm32C;4j8-hSpbK{PhdaZs1dEFE-L9D;cKS-=R2RO(!U>_G=YQbN-IntKjWt;)hi zUdMuYT6R6c`GDYE_MyyjuUT2GOwtg@f`Ezt6hp(==BX3aIMLmQ^`P)welf0o^061f zuJnmAdx(K#LVxn-4>ym5+fmsjWrxA>q<5n^UJH_!ye2p&pP0w9cai!5C2V?1Hdv7X z1NsRLt%?Qstr(IIM2&fq0Z$&oa_fQ3%V%1nYn)@dK!2?d;! zYR1}QwpGz5LI+xm`70lDK$_J=hM)Ak{s2|eK?#@;yaoQi0xQYtl=-}mk-grq<*qC} zz0E*-4^VKA`Cd!4O8hxQy?0MgR8;;SXFWDHmTz!jPC8TCrVrTBd-_c()!T68TdDN# zCM8y}yuuz**Ci}`OG<~d;kne0kFQDMQgNhWxXaUT>h@PsZHrGxsrOm@Zjw#2)p{pl z|L?4)ft&Wgd}4ZRVC7Q!^WA6Ce)n&CY2`t|ap2EM`v3O{oe~J4=E<5l;8s{J0BD+0 z`w>mIN~Tw-XS&xB5%$z{iNMC)%;mes=kx(nM2;N&N&aM~`|&wXU_5D!3)tloaS|uo ze6rY+Ea7O4)qW0a&RP*Rq&3q%eHLSPg=3c1LiUHH#=NDF7LGetJwJyh>BY%=Lk z(CYwSrvo2L5?be^b;A6PR7Ay2hRan50ZzWbLDJnpt*2m}KRkUrd9_nmhH;NJj}MVz zWLmUox-cs`WiIxtMlVgl#2@Q*e~!UbX^PxvQWa*e_uvmO+*8*P)hKDt*(>W7?8Q}6b|fybJom$x~kt6Nj|LyvSE zERZ_d8iafeG#hKGo*ncW-X0V9uxc9Y;K!`Rq*hgCw5e^rlOcDpB}05-2D@{GY!i3X z2Mb>0aaa`VK_+*3TJ^C3Ww<;`n`fIb&r}P-92$AOPaSY~PY?ly#OF!XS@drbfw9o7%kMUnZTh)`)B^J0TZ2U|-COe^q+SYBholos3^utNq zs`x}E{I+mBuWc6aU5fr}C(4NEW3|04nUez_2^J&s13uHcdJ z&-0es?`Z(N4m^xi;Xj09fBp|=@B!t_k(ox>fy-;iOO&Eu$oOK&Mf2(tZid3d?l4FN zynM>a2j2dR;pcv;zm>phShNmqn^5xzFM(?7d+c+^TO0_8K4j7rfr%6Y=GImdEbT)W ztJr#JKmFG(B3IsLyc6)Njwh|-#Fz`yw;_Mwwp3ctM#AEE>k{#of~!aR$wLx|xVF^q##5c7yu6S3&kxE@dX!mrHGp zt}D*S{$*AE#{7?G)J%Rjp8tcjSP5=MbCkP7Hn=yAn@njlq8^aNVSc}nFl1N4O<|Y-pGcNnE@;`olU(^Rx-b4Q%>92` cNrya3rGCYeA8+8nX9ghv%EGWr-}(Ok0%+Iypa1{> diff --git a/hls4ml/docs/img/hls4ml_logo.png b/hls4ml/docs/img/hls4ml_logo.png deleted file mode 100644 index 5d1d4ab6a10f0a0cd55db0e782c7419ad51b4a02..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 26453 zcmeFZS5#Bo7YBF&kxvk?qjVJz73oDfhzOyBA|g$s8LG5|4uXJ%X6U_z-lY=(1qFf7 zLhl``XtOG%3z*lnU zzq8;Mb0=#p_;tql5$xH2;N$h5`3Latb8nUPogs)NnDj#yzVO!qT)g3;_`>C>gQW}7 z)X4%uB9XjsdmCpn)3+A94o+5y1Sv)cVuxUl<)67FtxdRl8M!8({@q&|=drtMW1!L( zV+rR}|DG`&Mmj^CKj2z!OAolvFO*j#1m&O-%yrk|8D>Y1YOWv%lU2<+%I*JDL{r{i;CkOm0 zUW}4KkaAyUxQqm1Yq4`E9AS7psNmzf_Ik5$~BOxdASk;BdIjwnygVkQ^!>M+ZT2301Zu zItv7=P=w)yGke!=Y05a&8Yahw9y$w@zk4+Q@m6dH%cTY92;obfaUZaSqUW2rQ=5&- zg2|w1asLX6e|Nrv;EfaE8Bmv{$qu<<}?Z;~z_d*d#7{0rYQ@r9v zo}x0mcnhd6+(D^vm5ZEj*7l{d6xI45rENP==ZdbXLiDEwpo3r2|4)cum;YBEiw=?= zeRtm3-DSOaMqc51P$Y-CaQR&n7fj+Owx#wc9n}J3Tq01xbS|8ko2d5~x?;u=s?I4D z@huePggNmpz^wBN#coHTA6QoD;<%7g+{@>LJh2mEHAfyu6C(oyNq(Eb!ccf16~yrw zfFAo*B9TZH8WQsD5w2sxy9%w3Ibn{o7fhQijNUFq`@rbmo9?&ntPV~2;$U||(MdGW z1tEZKevfNBH+mH4>wyjnan!%W^`iWv%*cl8i%^8w8m7j_kF`eMLC^t7pSg1z z8XD%tiPES=m3&o6dOu}C>~=EyfA+Ta_QuJG>yVtVFyL6(Djr|-#U&x$TMM_^*G0iD zDh`v4bg=-|SAwIXfUdKUo;@tJ-J@{1ncr}lu*IPc1H1XxY3eMER@yXn`jA0djBmhJ zn^mh}zLyt{ZFNM{F7&1-LO z@57E~hx{f;RDVF1l$1o(c-hyA{n4loAeD_^RK-)3aN! zToFa5E=l8;(a#VxGor-iy={V{BZu~xNE8zXr>1;+Ha6Z?qn&YGb-fo(dog1<-$%+%hZe8ict-5-z(-M)0ULjXwQ>in;(Hd0S6E89x%Q@T8 zl-SzYc^ArUQGAzuQML9ZwkB{Y_F3xdkR5xUuEL<8)Ew<3psv&Iz^^%ZT@PxE7M5Ae z6RSHyHuE2E3bZo!F>H-P)M+Ip>kpBQa=4Lpho^-T?r&SeSW4V`KO1j9!)^hdyr{DM zwK{Qp4Mvrntf-^$^+1Tp%Im>$6N%n#kBuKxiGuOi2^80_hxjK#N8>RM3?ypD=%cc2 zMI{w*zA%sKS?|;R)gpuHhK2pPaJDc<4ahAei-KZ3YK-KM23RMB!(JuiHAk z3=@G{jnE>ODasjFOJMozKv;epcN05(jB%U->eWpXQ+=bO9(rn9WZbt#YUA-6$X*8Q;Z@9Qqheih|wzdn#BOO!X9Si!E4fI-W|WXfiOIP__z zMpg&|cXM-_q%qlVx?}d?Y?`w4{&YaOP&~GplO=K8kkBCbpTC(^cM8$%d@m$dU%%Qi z6gJ_JieN>)TuTw|W*O`6-&Rhixw`p*0g^lSXLU7knjm)X^z&&FdJ8O*j{WSLcFf|8 z74SA|4f75}uP;f)#;embztuip1`Otzr2VR%E+fM!$|bjOC6QpfvFZ#UM{8!BxP{7I z6z`-%%3`vz7FHe#l5x|5&lrgo4Or|V*e3Z)r{&Z&1s>?C{qpp*!Mubdo;h=7bBKq@ zZQg|1C|C)C()>x$QK0GdlhjSjQDg7KY5WrLUHjnB(4GMD)HbDW8*|hPxTa9{o?OfE z@aV|q!PA{I^YYh8h$6MUyA?`?Gkxzy++sCj{TW~~(k&9J1wx4akQ+kORVYEimLVC$ z$#K4!?{4GgQ&S4SaNefQ6zL;pAevVT)$xCLgd|OHv1Xqr)0)o64;)FSd_L_& z`8HEv?4NrRiJpgP@7}#zOd1nMEy5BatU6)M;-{r(|KyM+5a;J=0kp`eIkIzCn?X&E zfrzcM1~9^oU=JcWlC{US3Tq~e@Q9u{!~c?yqAyxAd$TrSGifD}TIs}Y5gWg{iUqLDjPK1sRPmm*C^zW&(rd}ZrJ8zmkohFQ({5YQ-8o_=B^poGIM1eio^f8jPWdik)Rtl`e0 zauPWNMRT2R78UwD3N=K2_Jxk_$95Q@+RP_;@3uRgWPTl4OXQeH(L5{W+U9L2;|y{8 zxVunS>&DA4o1hv;%IwB{2Z5~>quVlBY&M6KKh=|czYT0dp4EH1x6Be@|8pii{^~pf zhu@|PR-m;o+6CVK*K#-g`WyiFeX-Tm)uAh|7Y7HM#qMt^dTo4q(JUoobCZh_x{?#h zBz97sG4U}H%>?Yp+a$>yifW{wm)a1772Klp-6HNy`6`2p)9#Nj4H11*)DBu`N@|&+ zYUITJ{uTt?3yM!>akag0#^jR_jbA&z_u;mk3V;wOuGeASuu$qk10Er4NuZ?J2+7lVr@s_e$-&1fmoE_rj#)~ry_J)Gt& z<06A#yi*BUdCIPj%7+tE0cxi`s_{gh^^5mj{|E6PKx+rNl2bT3>tb-DU>6S{E_?;OtxECAmE!$p`OREMAGsR}#(P@3gW?^`Je=#nT zbhk<=z(NL<{7`-uGhm3Rvt1Nvf`O*{s~;4?!$tms)?5lPR@BD7GAG_U!tKVjLAWPJ z$zRU!nAt<~5@wD(_x$bKw@>fGIhuOfI$|D3J|sv%_a=XI#1y&d-=yI&Y48eyDJfoG zX*Qt$a5&u0s{2d&CX~kA`AP8i#QH}`<-CIqlM^Su_<@=uGC)bux#qXuFnX#Ui>Xd~ z3LN@%FzquzSliDW@gvV+lK$+&uPqfM5o^P<1ci7tOI46yYPE|$6|(9|dH^tvOwo;l z*3$53?qJ*q71e<^l#6UTwhM%dCQfiH3&2)8ZKFZB zL^Qo_$w{mwXFQ{?_y($x1S<7cNEZ3_?MC0u&W_D&N*z#CH~4DPo6LH4^>ZYLLWZ?n z6k_1W>}Ny9RDc9!P}!qAKDjVi@^kmK@SivJsR)NctF^MR7)IDw`N-O9d+gPDR*q4 z{kx$5S3>Oc?avv)jU#p^=ehkA~G!*PNgSedIqGq-^lmB7yqoT^<96c#c=ocENGz0k9u?Ck6uqeIyCjpG8K zT^|-i5C@wJd@fsT%AZ`C{5lBUKSFMNF}}YS2;e@d)>hOr7oE@>=DgPmQcVGp>r!4B zb0xU7nFy!PT5u%uEXRV2jVG%nrxJJYa@v&g7j|A0G@H?qr>(t6W7@iIyt*iBTxH8& zqC^(MXYw~l>qjz*?xRGozHw@+F^R2$db+w-chtA)UIcKUL3o)3QVL}4k)7pW9Du&p;B7l*#==cSm?T*)R%Z8M?9WlIYHD7(- zZYX*l{U$m5tPUz!v-p(-jOb159^+B|BN_iTEXd_htyB>6U~xdnrX*9R#S*|Q*i=7S)Me7>kFJO9mFg~S$4 zJgVd~lG%?_JGCl)*W+Ct9~x;>r^eS+8y0eVkC!Q5=$ZnBSvTpV2Tb@xTNn!C5F)93 zb9;BUVPv9_ZRRyO_noC@dU|X}v5m(IMb3*@{dh#2_c{MpPv1vE{#2}PuaeNHH%^dp zYoUHQf`^6qn^w9H<6MQ(@15sEAn#bnyqg~phX z5jCU~1iL1xXzK)7EXJ*@tjI<;oqcC|{fW1?IQt+w)Q-a*9;UYKOjJ0B1hx4L^}n4L z2`p9sc~rXzfM|4vJ!D=!60TBbtm8VW?{`a(^FRNo-QC@k2t#px|i`O=Rnsz#x6EeF`bFF4i6Wuea% zA+qNc0DkPM0-)yFnx1%m98Mb=w<;YP8hRhaP0HxnP3)rNO|M^D;KSOA9?`*!;7ib_ z@pmZuZp-p=9Nkx{FUBSyYa6Z!fVfT6)s=W-Y>Lq{AS(Z;${;E-&{}Kq!?^4mJ0KA*=)u|d9f6mkb__tU>R0tnID=f&WiJWiAY6V?QL^j&LdQ7qf)EYR z7yk1ULXyK+a-{4F$?l+(!aw(&pO~T0soOg{^{;o=M#ArCIS6MoG;I8)_tS<0P+9(J z5wPyu=X$Sr)dZySowjkkk>ABr{B6<)To>)wFAApibbj1~Nvgx+68DtmuFKm&b``6di`~Z9K542$z@Gt;PE* ztE%HBY|z!Tyu6c^g$}(!ngy|fhoW>{ZC>(E6^I#V8epG(f=h&fZdQFEYRl)s9Sh?V zK=&JN!c{{0f(R#(pdM3a(Gio%=6n2CaqA%CSUpw7I}uHVBp#%5kcv%Q@Z$K+*zKko z>)NBd5nF^0sJIL`cETVI*k(W{Q{F!zT3o7yMir)8y%~X;)=Be$JRn_ z-cJB6uE0{m;dPSZ-Wq_^gU-%Q3W3gZKZ`-8gM9Gd!5tNei3zmXLt4=sjM6ur0ir+!Wz*B6jt>YZC;sYu3%ctBfQUWVJq=gWSM2b2AaD3^( ziK)aT;l$3h`N6?KI?)}4yVlm$9(CYAwy@J;DPP`Z{%;kHjg1$W;9;MXMxa+YB)i<8 zXJllo5)%>jI>@^tiWMHhEF8uoWbVa+nX7W{b$a!;(PqT*P*N=r`V<;#~1Js`b9V|*1a`WANace_61y``!hSUfA`3`=c( zzDZU+*4F_bB!no)&%ZZtR!%fAd4b^2ZuY6j5&;rd?mHyiuog{}et~1i?3y7k)Ew!; zc@=5>*{#FBoQ3@SC026lXh#RmM{pqN&r(r^GskdQHBHO4(W7k5;{HAmMd_dZ$?Q9; zDbcoRBoc9*6s;^`u^HcbH9RT(;wx)C=UW-ZiEA-2F&Hyi$}8`{7uBP~!-R2futV$0>}Vp+~`l15_#OLLp&e_{_d4R6e}kH;f&2<&s>QF ze$j*@Pi)0a$mX*^S8f><1fc`|dGTB%7H1399Nj?CeR_C2160LJxJyc|EMCU@P!#DK zj?QH=XmA|5m@gI_+Bq_g7ba$DpDUpQQ8c)Uj1XznNV$bFlU3blq ze%V<@mgm6LQ2+Bc_w%kopn4n0L45jirqOCy_8#ndP-;}ggZ39fo=RB9U4wZRXawNv z=A-|?y%-jrOwKPXy!}#f96g9_V0LCs%OzD7Y&gB|%x=U2AP5T=>OP_N_XCWGApgfl z6+3!J$kXCydbrH)u-Q0K>6hbP@dpJVlrf`Kj5dG;2<>MEDdlBWW?Ih1d}QF%?;}5I z_FteOsC?zx)ztGI3g#(wgbi#Z?Nt`NQQaEmipoY=L&_eyQSz3%osk+6dFEX+l(RTe z)eXSE6|6N30dg1>oWrbUC6bf_E~k<4tMnK zzE9Vztf-K>;K_caUq2GV_-F`efq0c}{eIH|)!slhO=$c){3lW0@8OZI1n|#ue|6bm zgjUQ5=f^>2)-7F_Fhu>HK^u}2_~;?E+!lex zy>-X+wXQqFkLWYq`zEiWKC0iYg-#9NKd(=!7Wf0jOnVbIg65v{XMe$lymqCf#%@;} z_C>?v`(*q`9rxULGEf$+oBy5+L(|SQ@uSJK$h(qo2i7nKbaJL)Eh!hgbuvx{sgP1v zx!$*X+)9uNZt+C`JHkqYS7MmrD8_AFTltF*9oANDTpogceKcu;{9a$mBjXno{3=ag z|Db1o^fnxQyRS>TYg`)FO$Ii~WhR~C7Y8m-8Dv6LH3950Ihw0JnD|Ce#aigX-Xb!; zH@)thWrkaz`;ktjh1N)()U_g*s7fjYl2rH=DOQC(J^3jOIo(Ni!6~nq zUZAL6(K3k?zKQ3EW?!L(gv`Kw&;A28;g@mouLcM5^mG3WS0fsZdJ#NxbkoFbB2SIv zeZZ{$3GFf?WHDO^mlm?vt*vl=hT=b;dBRf)l=j^I&8+u4ITI5Rbjl(kBBaZniU6*(a1SZjrE@g#Nn8h4;Y;5&Y{~zFh8e(- zLBj0Ar{w?;Vw)v)t!&W$hD}Vl7sh21(J`!&0R+Sw1_8|5DngswZ z;y1|zKo7$q)oW))&?E{0`h5|y|Be;zpZ~?iFd^yxajCj1Zgl4+?9M+5ev-Tkv`=Nl z?5>+>ZlWd$w)>!|OX_UPIf`qu=(aD|N4dNLohnGq>jk(9rC81OpJR*M-uy?I8%X~^ zjb5U@_~;ieQHjJSxnmNw5De$OGZZGUwaPhwP6lfJ^WXU+IP6$Qj}>7`|DJy3EQ#}_3XfZ3=Jf)#E*Kf>hM0@mk}`z{eO?1A=z456J@imo@zphTps>hKe*tj6^($d z#D)!qCBO{0D&NZwn-fA&>d2v=--X*Of>lr*HtJOh)C!T(5Bf?m4k-UtwVB(+)7pU^ z&u)9T&EyA$=McryrCik4^E=GFo_E>SwdKT(JVSzmL~7Vf*75Ymie- zq>*XO6x-JwZWzW%YO|(r(-`L$`Ggp}Fyoajztpeh1nWL7kv>|l+Hy!CzKC5v%pr$~ z=pYq_oZ?T+aj8-jvi8@<-#P?KG;zo(Ma9s$o(UFVMyRDta|*KX(VOs$Xrt(;ExNt* z+L1nzGDp%iTVp+`LXonVxRKN?JxL|iVlCbFTui!6;=Hd&`O!FD#@WD7Bt0KF>Ae+1 za}-a$ENL@3@Kk|qSq&WrH)UsrnK+9?;PvR}boWL>;;8YFUrN%R%e_>ko6PP5>5Q!6FLj(>?UL87jN5)1 zzoW{SnS3XmnH?Uf*6ie|ve=9gjq_5xKAMKUN^Kke+`&z5+&; zA!n@GkGBmUZeD_Mjk(USHa@9kH1f#7ilm!1CD^Zj6FKkFaal>mQK^;X+p9-)9^eqp zBDGJYw}%p!AH#yBm+~8W7FtzyFCpt6JJr?(rCuMMoak|Fto12o-(NmlSY6L_4BVVs zG#whUK)m>&8gF0kZZTHfl{82!p)8?%mS3khH-Ee29M)u=wc&~8Ff(2P*XNzusG7Pb zv4NF*mN1Ojz~H< zE{`x#k9s?A_AE`oBaY*%JG(>T1x*Y3D~HF|>8>7YToyf6sb^6LB)%t^`m+yR@yV6) zym70WzGX?qTh)5o=JCRcmRl4<$LH>I6)xNVSz~2nJ!t=xwXJ%l>`&gh_^pZ!X|Ckx zzTc|zg7;JKtu4wPfr-X9=_R>D)m{u`6^|_(=II@DkH%Ixs7bO%l=S878cu%|yQ5`o z^w3a*@12C4OCK*=fnhdhnTNQi%IawBf^2VwwfYSi@wdLgM?H5@E$*fXOYhJl_6c@` z^{2%WIkzR&-!7j$2urNoy=^ed8&|Esz)~pCyv~1s5l-@qHrJ(m=_{;(vl(9#;c9%W zspi(cQs+QB(U2pu>icU2x&nIG7WUe683Dn~GxT?t7*u>7|1~thTc7=Xg2}EsoSUFc z9y<8oU4`$h8h|f7nyDGDTg6Phv7|AaF(`B~4xkfTHjMA8OtQn<|2av&`^^;h{bFNE zL2YeCGg;=&?p^6}^%<+k+=b1()3^9zS6DxJ29r=nltI#q+K|(5Rfo?~KC@S!`X}Tc z5ER%VTpm+by9x?=Cc{c1Lzlnau4}mTd{yOY^HB{5cFvl*eX;q&12cQ7{PqJT)oOD~ z*kU%#b0{mHvEliBzR8$J*QVK`+7lf@a2Y^xf^34ug1rA+iU+;>ZW(6w5%L0@yHTi- ziHFzPTq^$6@-zK|hK5xAp;Cg8^vcNxSnHKPj*;k)$m0oK&%L^q%7VIqg9r5Zx~d;l zoNy|N-PVsQxzzsSJE*J2p=BM{Ccft5?*tZ8JPY}#r*E#BtU&i+ipojcwtdkmG?b*it!Z=ye&K^)(2>)@F7<1L+DlVjN-vc<#x_eGZptG0i<;G?D&!U>0jzQKVK;^%oWWeG04RGE`{ z^cbgQJ3`E5a#T9==qg-^e0QNV@3QQJ5!FlFm0sqpG2t9qRNTIieA05RMl8>b2G((u zDob6v4gGbiQ?}Ti=KNJD$+CP`qfXJRt>?}YoaIBD&yg-%iN}J8f-WUt)6%>+MT)c# zm#(#>L14TXY-72zyEj~L`H$Q#CMBqg9{82s9pN49n*E>Sc&tNoo(Nuw(h`-N{rz+p zEYQQ&UC{>_2v(ZJ2;YCF zfG=ap-rt6=k)VouYAHFfznQ~ECB89J*Q&!dX7i}e=rm9BLV z(G}%aE;=u>um-6D6PI;AsOb3oH(vdjU*Bq-hDf=udRl+jcNc=X?L75kv8+|Mj^PQ^ zBASJhD>yD#cBr3loYI}@_Up=+HF9l~BHfU+Jkdh@w1^8S0;;dk0U#W#Tg>HSe)vps zyYG_6;$7L|YI<357m99OhMbg^E~G*3l(NJvJddJv5?0pesgd)kuDTv;EaLjJnDVec zN#?i=^Zb!khnFL}uLLXo1chr?Q`HWO&@i~t$B~CL#ODL9jSq2u>be;8bq8M88nZeV zZ^y4-w`tT@W~8rNlBY;JVv%2yv z0-o74EiDGOFP^USb8>lX_1S&na&!ytX>a_B+RR=S(W|h$c??ffzeNWoV$sH70Q<*DIx2HRcEg++8Z- zd{jv}klTy2j>~r!2Yosc0`FhSql`)fv7kV?f$j~4TGZ;okEnEShbt2Je1?QzS)E!j z#gyjGsgk;`^1iNjn;_kI?=pb&R#q{;oby`j0d{`aR6<8D=tmH>x z?q%KGWM15j)|TZW3n8cVyVNV5xus}1JJQ}5KQ%Dysy2G_p7s*93uZras{C5%NUCPL zyy~hQ1$;@)-Q(5DPt{AKcPxn;S`SJ7);$47l#ka?mwZAk$aVAyfnn>)u}ob!&-%FBhx_(O!k_RldWHM z%@d6$0nzyCD|K{Q#aN-(JTK(&6yM_EdvJry5rb=T3DVu*)0ayJ2;i@9%L;>xZhU8v5I<1_U&=i*h6p)|d$wQx z)2vhhr!{(W_q7se7K-98aLZb0EPGX=X#o76-Q1g~nn9_b>nCr2x*#)kLXSPZm3RBU z{1#%c+s_SmY<~*kY*cDKn4EPNTxg=<^ai7Q3u~M3Shnoh1I~(Po2Nq6wr|>>Q<|Fa zVr{9NezdrTu8O*j_X^y?k5dviZqv&SKVphyij@56JZWwpYCma?<7o?zw0%9hY$Vog zBI;{2>#9B2Wo_v4w#Mv_fl5Z%y3ewD!RR`V8w(WuI%b7Va!UHkS<5_eT~H6Ih*1x? z$i@gopm$KB_+zu%lBezT5iSvk$-fUaqi>1sjUL*JO^WR|;Nf@f>WK{KF5RDSc4O#R zyVfyMMDv{I&)A%j^k%g$UP~!gX&a5c!&!vtKiXsS(QS?mccM4Gm{5;1cZm2VvpetG z>OZ~mXh?3M&5n&ApgP?ZU$FN*fkKknm`YAjpjxAB+ZQSL?%3upg_i+qi8b`_)2|LgJ;QHJ4qg3p)7ZHHYCzP>(@Ea9FINb7v zog!TOqLqSKdEO3r>PlJt=)Owp=O5Q2Jo7j(A?EE!rQp#}AO($Yl6mj$Rs5c6oeA!a zWcZqG+p`g>3D@G;(GPtgn9~mfd3~4_p6wD&9W3#(Sc$~xyT{gK!)*GIJ~tAG&#z79 z=-L%XIKy5AHKvDVDaX#61^|R+bz>RLPQDdNOFGHcuDk@9-i3Dplj3K!yMm; zyYfw)P@?;{(9cfGXz6oB9q|N6LJD|V6Y;hNP@1&B@LInX@*Sb{jTZLp4Y_~f{P3x3 zt>Joz^9eV0ZN4MNNM?67n!!O;Qngl~x)x_Vsrj>OokiQNJ+Pxs+Ta`U%Wff>hA$Oy z&}v%Qc;xm0~Hu{{q5mmMKuydm?o`FQr8Z9+@~o;e*OHKti;Ey8e6{x|vVqYu|DT{(L@NvSxaNX;Z(W;vtcjfSKFW%h7LScI{fT zMfFE$)#fyP*Sx>@d46{R-}v%j&xtktwnU_lY(g1p=`q`1ahsRjYVqE@UtSaNVnv3f z!e3rO2&?ZCr*{qeM17}JM{c{VWyqTd9k_CNi|p3Ybh?Bm8Z#H`@1OhS`uEhZ;F`=@ z!JX7sI)dRZ@%jZ^Cf;uCH?+7wctFeK(sz!AYE06uq}%uEld;XJU+KqJ@KOQNR~Kat zF<*!8cZ__#T)=ty_z1Tf4IAiC8v9x`#dH0QXK0(|Ko?P9_s6=m!Q}3%9|7sNl5)fH zdzu|!j3$3xa!nw%R(2hLpYW4D%%gWm&TOUj-d-Fxtg)O4R;$Y{b)7XUwl|SJI8$VA zpfjxE>^zeYxTRxH9w&G^ulSMN0jQs_9yY1%uT(AAeeOHoE7No087qS_CigtZn7gK% zqt|$R2AA|+rHE;X%11i=Wtl#I&q*f#oKnZy9BS7sA*lR-?X|G!sA+1%_SZ*4jt6H8 z=y2j>XZ+L-6H45Vh3%BBEt1^b{P-fCMMNTB&+{5F%M1=?|72Acq#x?%iP*-NuMjiY zxAn4hnHB=i$}Jy#4q6-d<+S87^S#MF*5`}P0fm#Mov*1%Wyb*aaef?e!<*Bi;sxLn z3r#HFv7u$n3U`}NX(GM;(z~f7H~y%s@@zx*q;Z)Tt=a=IT3$Dv^p{XdGkX2K4eMz{ zzX0mZOcq{p+-b>rsmxumO`UBhq3S-WM9N{YJ@b=2Xl{e?Xsss0muB*B+naQ0 z*fb?Ogwz)0uN!jT9Wi-R2QuZ;-?eTu zrlGf?Lb_7POiM;1&p?>>yuX9$y?vECvBmWkx<>@Ymk@R5cH&$0PS30`{^-L3KCj>C z45^)FH9ffN*wRy)Qp*`?u~S=>f$&I)8ic~+KG!`XNuG*G1|EZrH>2_=lm1s}4(AVj z?){pu%F(IzQrE~(=FK6OZH!FMA4y}}7{$`LMmSp*XU&yN#9SXPHS3;OP?Bdgq3=9geZxcgnxhJjAsn55ZZ)c+Q zLoK8#JLR1AQZE?gU7_nwQkmF{{$2ZB{({t($H{1(>xLV{P4B9nj_=eR1TfHMCsSpu z|EoKz%(@@%uF{gNwcP;?lf zGnc=?K^g)(bwvGvNMRu;TROUjF*77m_;x#(y12 zECs};R5-8E(>Te((rwkd`%{{Aat*R=ZEjgK8QR=(^}FQU(ofh^5fyu<(=1rOS9eJ| zl!DuJu}<5j+av`M)@Wr9PhQ*Ri8aa|*h0C>3Q}_srIr&2#;GpqQ6PBXY#Be1#GgO9 zZ-q!j|4beDdQTGk*@EYbNsT?i>5{9Tj?-a(7aVoRxj%Y2QcQNV>LI|kla%1E;N6un zNz=oeZK=GO2Sey2(`uc=Q8krxuJ-dFtlZ3bJ-NTWj+Z#(`TNaMoDmWLlX;J_Cf~Xe zukM^1Apc|>EJ6PCZ2($<#-#J)dSQ|moTOa=k~_mqlNAbUF=Iq{jM#`+B?t)NmcyC+ z(IvFB(0u4wIapaqRx-OkRU~hm&~xbL8^uSi*3R++OPBH~uhdhm(jAq~-{I7<{>+1% zRL?3cIj@y%crsBTwAMS?dKA@U-DUMY=E(SES}sOi4xtAvU$bxYcBk3c{G3&t9n%3R|;}9`WVU!5(KM>KXT>DnEcQLbnR) z#qk$B2L&?b?av<9D*@+Eh_YU%a&C*^G~8zITq{(s`5#vtyT1i5*(po@d{ax+#x^pW z^1Ijd>P?1Y@ir&>!M=f>ofHlm@?Y~0gd}aEx0pCX@45*Lbvqc3?C$svk(G*_<_svF zp4hX8W^fUmmJ|^JsvA$*gi_keMnpi>6z+4NI~CQczP?{ZA~ zY8@ziP;nYEDNoeke^_yJ-gRM#b@D!(GNe}S(>xX}`K*5AP z@KA0RB&QzH<9r$!&)FqUvwu*26njG^WfIr7j_WGbOn1=8Dh~X0S}DMJTG=*leW~Q* zR}$&x$KK{+N}ohCwMnPAs7LnR2pzJ!JCJOi%@@L=Ckbjs10y3n2e>FFh!Qynsy~)> z|EtW@%}^DL+TU|wfsSrQda~pHJBn&iEMA5eHB5i`xf-U!Z3p}Ih8J`$-1ETOUc12# zHE*Yz&|zku2X0E3AH~f3I+T*LKOvkb2vz39e&dXN!9Xp>R=avu>e^p6&MD?| zZmFM-LG(w)t`(4F`4; z--9r@IvP$j%x7sZf6gR~$`tNpXWk}De!(e|=3s^?mt()K;`01mHA!ewXuj4azi8z$ zicUkaLx%b0vUz;!8iCrWsv$8`W9f>Z4t>YEXLg@63`aqkA7uKYcF;u=xI!&FDF;5nje`fAWfhicbZXaTVnu zxQCI(_YZ6o#0H+}6@b1e zuIALD`R+sADgMz+Q;ib`#zE+5{d8X4bQq10=yr<@9S%0PR*_UqVdS)lI%Zx#0}1X- ztkjKVO(l-4i}gFyl+1;NeW_5$_?GNAuN&GPXrlI~OS|5#O6AR2`@o8}Ea3QK88*&1(_jQXx^aV~*l`B|h!bS+nLTxl(^R{J(zxandm zDMwV5WN+^PMP?mdWG0$|#;79;BUHfU+tA|*J-)!vSkYka_I>CqcLX~g8Wt-@BaO)r z4nH<2kuXvp`pU2y%pqd!Gwb^i!eMdNL};>}%OJ8r3jd0m{&J4SejJ*kA+@S*?OUR- zSghOwD0AU;wG#IA3n7ZXB3ohPc zpQ_$fWTtN?84nR($DS%ZT97$z24Rmz?lr$&!8ylr>2pqAc7EihV46%j9y9>bXJ%@7 zCn6CsSHS2}4o?p*C8*eBSJ)+G_9Ghj^=o~yz_rnZ3!)e>TH((emQLQEb^&HNW9b0B%baPq|}P* zy7u;$Jo6=0@2o|X2BIT{(FK(8e z4=-QH3^SI0LP>RlftUS506VYz`LcY8<>WKUO_skWCtMr$!d_R}qbKd)`XiCEQ5Mg$ycxN0lDT_WmHHrY z=T{$2BcR>oq}R?019!YsKE6{XnPRJ-`V;i2`RhoF3&n@N?TjpUH!3cs>OJZfDlkOU z1^-aM@7Re|v-SDB7=>$zPfE|V1X%2fz$_XKe@@uf&*gDW-Q8mbTS#bwp#s>dra|zv zy$ZwCY#I*fSVT%xygA#3V}r+XNY+v|%h~9g*9-M|;V;QEQ?O?!9-PC1PIBfIAg~ua z%lezC6afLPs)tUa`Z!CRw&kHOUT91PnP}euUb0+vUoW%sh2xGcUtdV4WUKWi>UQ7Z zr-6VVuRzdi=Yhk56q9j`x%L~MF!S*2l44y%%4;Xiex3ipN4CQCJ?zA*=u(nAj`4M+ zFT(1{AZ{w4+!m&SdL{PG2Y9goKIzu1`<#+`=f?jl!qvF6q|uK%6Z==EygFNKs`TM1SaKc3x~Y0h1Eayx`^2=UiKhlQEtQdsW{C{9o0L7mMTx;SKLT zw3z*Qj=4QU{r{Q;sIE*IQ2G0k66=2sZUq|T`%k}Jpp1xHBrLk3WY#zNc#_MuqvUoq z#Oj;TwQwud_se#{du)g9Fwl(vEqZ7mm^3P>wz$&A$3c9Aa}{Gj_F?%ir##pbYjcS~ zXZEST&_FDFz0u(_KHf?@23ADyfK%pSW zO0>7%lOesm4nmk>O(u~RCZyp3NQ;FVsEY)+eV}Lxc9@JVQPvq-tD?BNvcn}gVR%PX z!U*>RzUW0e_gQ{%4ppoM-sXj%d=)Pf#Nt)hb+c5Vzc7C-b*oam`Z7c%;VYuYi3geHyNEXXXyFJ;ULjx2ge|bY zmNYoTvt-it4+#2RL$i~=e4}p^#=~)q$|?)OUdZ}dJH*&KI2O!v$hoG21p~Y$z6X(= z^{T};bpE=vIlyi<)u?IbWb-qCN9zUwj&gWs>|JI8T4nfpk>UPK2l+S!kR}+vCMmeB8d&@H)Hej1hA}nY)M2?T{g0hsC%J(PAfS4;#hRrP z#cZ!z&4zuK<4J=Y`A@(q&)&nn!JG6uq3>JN>t=?T=+xf;Nsh+1UeoSsJ;dREl@rK5C{>MKln=Dd(5-1>kImMWssnq z%IlbpJ6r-_ZUq|nOb&hb5zjd3DK|oRdJ=JLg>gP}+VCYg-EGqsrC0I;BEB?+Lo zd!5A)F|uKjB6hw1*d9*Ah4U>NZFW9W;G8Sv2)bhP*#H$6VD7co_DeR(92A#@*QKk7 zw+ScFkAo9Tc2idkP|4>Yxxm@;5L5!+zkG^pm}g_L#ytR_iR4XgsE{c#To65WW<&MV zFkDU`apImL_#YWWXwSy0at<_Xo|Yl?(;j(xA2B?~8aist{%E)5{+$%rllyK7lQNai z1Hc)2b{9yaBwx4a-4(BG-3E(q^F!wk<*bgdyB!AI)+|D?*J{^r=8JFa3SKsI{F9Cc zkdB*fu)6G;VVl)jYc=c5y~MCTvOH79Q$8a@R$ktZYRjXaf~Sh3x&YtWwqPV6mUGR7 zppAV05MEFG`r++ZMh=s*&Z^zYp=u*!pQ9M3E3LO}O!rF}#my2fk?wJzhd{d!K-+iW z2W1`MjzjJjV>P4K%1yKPf8q|aIz}xkk{NAk1oa({KQ(`z=?UjxiS1VtJ95_n(@l_8 zeG9g-4w_{cr1MtJV!OkY=n@!j)N9=6QUsei@AW69@gxqN!1XmLGukdK(9ws+>*7w!GYTKmATQ?KDzc+wY^H0$~6_I#TRClF(6dB6t#hwE;~yGJtM)H z=gu0I*5Qjmtww+t2$)u%?M<)Kt;^avA7Qg(zl;%`iV9hBjq3m8DUNjr8=&T$Y*I&j zzCJRt&8Ek=yb@o+OMdX>i>{5n-_CTA8j{0(|L$iY7|*SGmqa^oxWffK0gC4uxTKVe z8zYYS%yHBE?_PtwZLX|T3!WBXT+hmHn5#R!B3ckrhhKfYNM(~afk+EPd^2>$+RX^s z_+o0avyTV=#!JXfS3SG@jFlQ<&tU)WA<(jod?1}~O<(mpvfZyYJoxqBjoPn*7Mz%2 zH79zJ>-}%^y^mgDVq`es0WgC{a6NWABpidDdAe+en!l_j@jDBKTG8$Q;ydg;51LcM zdc1gyi`D4ed*>;k3r6TEN@Ph?z%4MqOnS(pKQqDVpIlhs@3v}i@V<$^aMk*!!(%sUsMMf; ziW2z7e_ml)bKe4f=la}u)~_MkZeYU=DHRS64$PD?5}>srfPe^ywyO2o7C$X?d&SmG&Y$y9{YhG**2dYxQBN;ICQ(CNN%BY$DyohA;_nm( zbpBuPM5+=Rf_|&EC*hU``+R+l94)u0fvK(#YgCt|i|%Ec{*gXS2${D~;WGRJEMhzu zVkk%g_WRk<Lpl^79x1bXE* z41;2&z6pVhE_8N}VC(Cv8^bS_vHGO2@j+*$qL?&l+>5h!HFI5?B7;}3G3=`zrlx+# z$r0}dNdrbG^?|?}VVM5S^el#1<3+)~6rn5sv6W*UQcz_6{;%J>M@m;Coug%_j@Sw}b!J!5p4glrYq)dC(0orxUXVev z`(PB7Rh9KQPuW#jwM61ZIj}-D17nv{r17K^=Cc>58i!*`Os_O;b{w*~D+?T~m>v@j zHRHAl`M8=l*Di#V5cP8A-ZeCCtYmz{X~OiL4=c%U3pAdJpa&CHQ}D=6UprR1jb&5R zwk428$$j@DQ@u__mNuT&MC9cAeW2v0Y$OnmN35H5Ny2wU6OX%UWJQ$voj!ABH2tI3 z3#F)Lq7S)T3?=KXk)`1~_-j(Hr#Y{Qd~!ib!#ur-suY5wEk% zKVJg|u6w9oO&kRE^31SbtC`YeY%L^yQLD6dmBbt(nWjbBrvs5w8_l}#vVj`(?JzIHQWGBj#_=X!`RB0;2;u@ zvk5`EUrnETR||X?c3-od94DQ&P+@7Nz@;NxI8xiI?bLOMNubtzLG&k*&-xT%nW#&! zz;oF36MXF#F1=?3PUf43P0C%+FUoWQum1@1m?)JxqDJ6d^r=CP>IPKv^L zLGI(fyCPHE3Nqi5&*BCuGr}zH&xTy1uIvOQD<8Kx#bbW5d%-GjH|K8(fpb^v*_@@I% z=xx4}hp0TE;ykTYIK`_RtM`E6@LQ0mipv>clvDwBKBO^qGAcUlEiM0>cW8_kQlz-b zwqy1PK$FOgJO&Xh#axo5*v#2VKGFQII>Ga$RrVUKVI4|W*%vEwF!LW@df(L{!A&6u z#3DHk5S5amQry%84jd@H2n`PppDm>XS-sdcdG3U<$n2d@4x`h}oPNbk2x-~^S5Bew*wAT;mhogpNdfD=F< zG4utAzBQG&PXP^Ja=`B|*n`KND!v{otvzNmGX zMzuXha;UNW+Q_XZrN8>hvn@H)G6KeG<`c0sb|XZzb5ij6^85eCy)=Bjy$2)R$he5` z=$MzK8t^`N@2h^EQ^B~}L8V`<*a(@>M03u%1X$S_E6=o9Tz@@o{~%Lji=EdX((I>V~;Qh;;p0FKl3@Ox+fb$9T7W` zk0z|N2$JA3mr+k6+cl&#pXzOmw*9%gJAu^~ZI58RE-dtL@~7I7?eR_##oMDoYB%mZlN9+f8TXc84)&9k=hS_{tHY}tP6vT>!UQbCA42`OFlc> z6yCyZ^z3IbAEY+Uo}!2WV3K=B@!RS>k8W(+d5?6}QgX>R9SUu?2LaN_r>pjqAknz+ z)@6NM%qI#-k(wn|H|eG%Qkmd^Ye-6L2$cFN4B2}74gRxw6__5A#rc<$45J_CE_`S< zs%e*zD5VUvNTk&|RJ@dV6aGisX?3+B>39yXV)2#5@5S^8Qh5kOM-$MCz>wOj3y1fI z(@|Yju3Z#SU>J%y^?;-1@Bzxe>({T*D!2hw zeKzwffBSQMg@kUz#wFeYm&_6_QvGc}y8azdNd zH-~M0^vv8pbm6uZZs3|U)eSN0Ikl^XSP7=nmm0O5TwU|PQx=NV0-OWXw356Ap^tXF ziLvnAtSlf|hwr>PtoOdh?SgD?@>1*fqc+EHAny?+=V%YX#(hMp(Bs-Jiox?w9y6#F2oMzFjJM?xP{KPU%(cZFF&LNm1@`-sJ|}d zG{gb~NL-@}%@{^Jr_d-jCHW8mUn;gaKCwP>cs0$$UJAu($35ZQ4qdO!ZkEaI)5pzy zVhV3=8-vM-jQOT4hITwGf*#!@YwxBok;}^Dm5%B-{no!J63v3&y93$ok2von%x!cL zXPaz(>CzCL5*0kIgW;|3q)bnZj*e!sP10d^ftwo*Y)b_>_{&gOc(^r{w#-V&W~Soz z?|*7w5_oNeMkO`_ul=|CK3s-_b?05Qb-oI#1AFHV7H{*|CzcDn9!n&>DJ!E^*TyX+ z63rAK08y%IAV7wih+^o+(#n8BsjECy*_S-sUQ8G976=Ddv4rh~(#EnBAvW5`ZH1PZ za4Gg)W~2gt`vYrhYk^{kIZYFI%hG7|$)CUZp#ouEOhrY@Q27r5gxiszCRb}-zv!<+ zL!rjtB`mNy(d?eQv)>h)latfpi)05#;0EVi-TwCNn|$bEziWELvk{Ph@SN_qZao9x z$Ku%yj!%^4zLqe<%61~zV@APy&z5LGcVzFCjOxTY$ z*I!@X=xQuWL4v1HPdBIyM9G7wjF68=0y9@D0B$+xBXjstGM6;f2^Hh0mD}vK`}a=~ zb!S`t;Mbe#Fd75Uh0O7w1DB3T3Nwk=WUK3LUL{Kvg)_>uenK9UbR^qK_NS+fnKgL- z`9xKY0c1_gA?~&^tO9e#r@-D6=By6%4oGDi%-Z&(j;8QmvKDZu_k&M@eK;_FspcH+ z+2g-t^3poWv5iw-hv+7j1es|%#SHD?Cx=mbu~VtF)2Ro%-)D@4)HJAJg>#P+-< z_d{ysCk3;Hk`v^IC`vHMyL;dk1 zAtWcT^GcO_n>%>ymqF})F163?6!kSW-$qwdjT6AHR^p_xK97#x_Rz|e*z~?E$Zcof zH6{qmYo*4T^OJYSlTPVN*WanWiHscfs=t3P`YfYi7EV^0T#wOP#f(_xZgt${*%d)# zn#1+u@2xET{N;QG-IcW0Vww-SNXO$n&o`A`QR>gwxbim`s|`|bI`;BHNFx%aYc_ZX zJ0(|!kUuLTI{Huo8?8+B!P0<7lZ4Q*S(=QB=qR@77Uzau*^co-sDUv0*0oD(`hyC5 z;a`@EP`U~X9d*C>fcQzC z*5lx1;=)F2{4J)@XmJs=$wotB%-1m^L5&86J;KbN{zUK;UPQ$6OX4^axpf5bOD5dn zCx3gD=VE^z$+gPeXwpaPa!#q_aQPEW9>~71<}jqE$6U#f_iHD1Z|34+wqPg!2z+17 z=9uoA*P`fE1TuI-U5|w~eC($#{n9P-=t@%GoDntL1X1P<8voKqvoo)Fkes9GPrx%C zlH5GLymCk--+WHA$vG1|(*Ov5aM)4Fcn`oGmE6?|rCUUim4)1q=dg9}<|G3&32jpQ z>axdLD_n_njSNHC)G;j=V+X>`G*jf)!zeI%THcC~@zAx*Ti;2!==E}^&EBHGPs1AC zuZb)NN1JIljA1_!u{MDOH{t?WifzIqxo-Ozv zaMQqxBMXJ8zL!kV2kb=feVz+==VK<}r)*O(-D@k>2;ki1>2VukdtU zImuxtUGa^GRS}~$#mC3H0;Ob!u)9DdnFG0a-5q&SKl(r=`V5$H+CJ$C*^jY&Y36)m zx)hN061DpR2}tW0b4EkkA;_K!#<-IfPK#~>#7j=)tL#VT=Bv6q!=CASyj*7^ctD~beK#jijIw#OOwAeCE+gyefi;eaOA+TTdf=} zAV8}pG()9%8on8LIqjsY(`uewK{Ng97tawRC^EjK&tXu(MINJDB)o*(*PJnI)+|)Y z_QvW=lBXoS-()kr06Z}z*%iWMCN@J=YR^Ri3ZoB)ra5K-YLQ50$b3E*lzyZCxsLNB zvm&8)U1f_Fvj_g`Fq6rMq;NpZ)?`u$jzT+*b_KCFJ!N05*Or9$?pSkJD+@$YV$<5cd8-G zUMLjM@7`LjCHUL)F0QBK&mJh)@j9KYY7Up0Sc|c(SGoZfIxasNI?9M|I~d78~=dC@$_oYF9P1w5F8hue}!uQV`}H=}O<* zI$9kv-vbzz-+?=Z24Fa6()EZUIF#7pXCLdi`m)g!fa>ko~mX>X2mt zsOVAl1FXK&j26pQPFP>e>^S5ZM|5^|r8r9GIlb3Xw|>*`1s)FD}#mqWTZoo3Wc2Bd(C`9`8G8L*x1RFfJaJMDQO%=QtUcrxq}t zT0Ku)W)fPL4{;222bb24;GA+&4ZhuH#ZR zp2`FgA{blCp`uDx)WpnWq1N8?c_B>v&d$TR$2;+VR>`)%NpLv)h5qRUZ+QOgnIEmo z7fjLXsvjj6`V27Dud1tq-x68qM-#`~kp}(qA);MEysf1(w2a>QUDIOHpG@Ml_S_gq z4p$=#Aw_zGnHtd}A2nxXCnv_n?wH8yz;$t14lw(_Wq4{zQA2064ku)fmD*R4dLpw9 zouo}*eOO%Y3-;nvgO>yF=XHG?i9)VW30Fn21^`%Ur8|WWszS{-?k|vjkgbYP$f#;B z%P3=7qfo3dI}sF$?s_x(RjF1Ku0jwsHmXjpVX*N*7X+fhP3am5`YnE{J_~rKioBHwAk^PWdQOzXxt*80<`G?N&knDW5Gj~p2syAj- z$FA06ca-_#r6m_jva!m~{tn1U9PU-}CA=@31^B&Y`4T2n@Ir63l}q!|%L~kG{3XwE zKni_LjHv;0pmSDePIEgmv+h_m{-9^VK5cXX(5@iQNddqy@m7YU%ZM*4DaCdu>(cCN zIYT*jG&e7_%{mzW-KTHqL?>hC z_Y@!kwnTk@|KOZD4FEkJ)`+s<91r;8Vu<*~|TaFv!^~S9^(oc{ZxUqf5qXJlE3X7>P z@IKPb>bZRp8K6qvv}+nK!3imt>f=}m`1!}I;c3Tp zJpKZLLfiEc)3Gg~DA&nz0pGHzG&5%xKVPe-|2L`}+PoU6eyCj00*1&5)MQ?0u4}7Q7~KmrmJH ze4N{&h3?vT3MkCL_O5^^w7vb17~_1=b=+yo!+bFRgj5)v*_eO3m3(FhN~gJ_V!n*{ zAv03KzO7F0vDoVSYyL)^P!B@vRx#i&zjAoNgQ+EQix~v3^)G@|F0O6;Pi!ah+ z903&wfoj`G?wEj#L<-;*q|R=oK+v8F_WuYd8m#p5_aE@TapSfu7)IMZAyo)Zd07&^ zdG>cDa`%zO0;%H}QU8mYf7b!|MX)?k4{}!*K!8Rq>j#KITO$&nl*MWV^@+Nd?1s21 z0NI+?+`NLF7_owD{GrzMyNC|tXfg}ue7IVF41fL{Y$z6QDdVulbODSy32a3_lF&N@ zR_;fYzkn0~*By~O>J=Y&iyC|XyomKxyVPnD8(zb=zmFTEUTiD(e1&!`CCnTw+1umq z3UZ6~Wy2PK*TzB6uG}W4`%^H_WR)4>zR=7D(&BoC20KBv0U^kUa1a&seD`ud@0uPfL%YM{ySv#& zV#iel#HDe5C2$l@dQ1d0Ey5jn)#HnjMq{p*fUBCT>-FK^&|&KGPTGCbV*UsYyfTQ1a+x)9ZKjc$fi(f*Ff*n zivb5*?KIGjm>Q6&PZ1f{EShWw)xo?@@M96S1x!lW@VGk)U`qz(22j(`qg(U>FcN{t zMIs;#T}Vqq=!#;R$Ni9`5IWAhNg2F;U&xk6u(%RFqs%O5XN)R(o~|5M)>TL1Sye&7+Q@u0pE$uJ*z z;4$i&e75+5*J+zZbQ;b(TxZQB<)c@$>m3+H$nGJ diff --git a/hls4ml/docs/img/hls4ml_logo.svg b/hls4ml/docs/img/hls4ml_logo.svg deleted file mode 100644 index 0a01891..0000000 --- a/hls4ml/docs/img/hls4ml_logo.svg +++ /dev/null @@ -1,514 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/hls4ml/docs/img/hls4ml_logo_lightgrey.png b/hls4ml/docs/img/hls4ml_logo_lightgrey.png deleted file mode 100644 index ae8deeff06acb7c39e994eb2ed80917ebde206c4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25377 zcmeFZc|6o@^gsL&QV}hLvQ;xt*@_af)vZht%DxLDOJtpprQB31Y6^{gne0nUvWzvA zVr(PHG9<}9D#MhCdCqiyzrW}C`}yPf>+yQE7@y_3&ULPH&ij4dm%Anx&TieZdkcaf zTQLUzTtpBa`0oZ}(?oIhL%lRwp4s)Zd0I%Cq&b28 zWARdT@-~@tB6&7=pWmi!BPQ3zuI;&YRx*9N=(c3j?M$~1y<^kUCsb+@;#gbc%5smE zeU8t`iK@9@@M1UW;Yd+k`qm9Z)@W!%B+<_b|FF!qdR8%$IhQ!Ve=2k726?72e6H}) zlmQIy|Nr`bT0qylWEQ?R^JtCT%6gEHkom}EIx|Nu)M)&s#>2cBt&uhJdy-Rh$HIZR z%-J(}Z#-3ZdqvA!H;m8LiO-p)#qhQ7HR|K0t+{ZnYAPR?D_=Zw06|b5#Qz@q9*^En zUwl?cL+H2-d23R$u8sCcB$`nDF@~MqF=VC07iQGT7-v3%-S)qx(D;?ZnrbGG7Vty->>H)Tbi5ta z0;9QWo$iSl?(`-R&m_4SsTk72C*Hk#xBCA5`vr3zKI939T3Y>UijBzB21NKi=PqHolxk`xUoC6QFf@v| zAH^%uw&8i$z`42}O5@>~C1{(qTs;d{~A84MEhr=;gY3RC+A5d8W?a(; z%KE{$9AArcUgSBB^B_q~AL9sp-4QQXs=*&M`*aXTk`D`Ma9{`ZTO$bhEH{EY?8q}p z6)fxb%zViBTtq^q#jj!(X%#MQI_37{mDQqF1o4RjV>tEVQ6V`*A&iY_&@M!421j3i z656m{TojEU>q0P~X6@AUbo(T?!7$pIhq*gf&r(@3qg`2G%pL0h^MBV$i#AF|OT_hi9alZ}n?d4;3O}l;8P*m|UNgtsHcRK#Wh;|N!wV~S z`}+Dci@G)-)!6ZWa&H@I4AB-3&jjO>=4U2o;a!w<)Qmg6tGM5DjI)T9#F~3&U~bon zcRzUB5H0g7g;>a$nxQDxI(US^2)r1y4XIJbNnbsl%6|U*)sJla8I7lkj~V+_G`%7s z)=T<}Egfyow_ol78_t{O_FlK!#*_Kzg=4?<%4biEntTJKo%1j4ufCod5gQyR7MmPYu@Ra7n51Bv)ZN`3`43gc%w(N~ z4ZbWEyXSC$!d;HnsDan~ODs3oz3_@bAO@h)2R*AA+j(_*Y^kdD3NzPnkx4{NTp=YC z!@QSM(EIE+*XLa+o!_)b@~nDd^x^xD>R5fV%d55K!ub$uYVN8UEsoR2k^^;HF8Sh^ z-5))xa7IQ(Yh|BnxdQcI2PQLi;3DHRhK0Rw5h@xQtIZ;ZExZ-3J0LYNC*l*1k(4kR z&u8WFL)hSRVzJT($;uqdJHGL|Q9xKTyJ+?K#V|U3$g^s+l@^hub9JQVhwquQXW2BG zvB}Lv>CSobrAwC*R7t;czVSq*8ul^X(yFa7@eox#j9)>nAX4UHn)2{!Z-D z4O|mW?-CXpoJ4Mlb8BpNGU#K_>FXCTLh{#!*mrWK9k-+jA?t_YCtT3sG|JneuF5@% zGwPjix)l5k}I9c;llap(_Iw(dq5CWUfzt z`0#R&IQYvq~r@x_O3gZXGl^X5RyuFg)$pdLNG&Rxo)h#Usbjk1@Y`Cdetk5<^ffB)H1 zyV{>|KBBYVwksh~+Seu%EicrutY`mxX=T@>n_#zzHdShfr=CH zi{J9@jAVew|zNpedya4_HK-Ha$u;ie)9ZGG`RBugxwV4 zW3*AQl}cJ_YFG@hEdBl~sKv!DdZcK-r8QAogz0o+sIM~R<%OdfztNNOX4+6%q-c+q zl+Mmlm(#k)y-9kNsu5dersu-9BP8RO!wZoq85tK=f++Lz^OY9G%Ntd~?rMa6h`2{s zGZKrn#3{E)RX}UYp78k?O}00KECl~!FeS1EFP6$ z?k>vKgwX9Qmhs{#tWio@+K5B>fFqrr@Z?2lw7}t!hQ|kH9U#|5?fu|=TpB$iD0$w` zK`ho+;gC>Ni!*jfZ(EwoL8jylm9Q?(kT$q*iEV&rFj_&^o}wodb4pCqX-Gav#M=eY z2cpQCJ($>rY>orGqd9(P`JPA6>Zeq`+Q20y2iwfDmKC7!^h-130tV-^>#1tK84no+ zf_c|3G&tx~-;p;Jw*NwB%N!gCL#vS8CE6sl-ac=uB_BD6WZ|B9gw~MR4!bvL}Q%2({ZcZ!{NJn8VJZ8K~Ywf<5Gu^`bmP@($%ZakoK{xzQ#i>YFVg0E< z{9X)R5}y=&lrQRLa5_P9ckFmxj_d{eR|FIJ#H{nNt`yKI`g)1maFyhxIyXJZxRs>oJ!&mY{>+0zwMZTDT(#3`$$OhgFo$*A)4YB zdh%C^WcS6Wh8)f!=k$B7l@izd1tTLLStA0;kYv@+;fhn=Z+nl_JTm%&2zOD+9g>)XnjQtdHBR_unL{nZkzx8n=C|e zu^se`YOe3ZgC%cQU7<%b-I_q6Zpp}b1(}X&#Hh(#ftbJTDS;rK z99VtQg|XCFs-#^>js1@0ejO&^a#mL&6y&vsKL(}&IWqK^{#BuCl#8CQR6+HRS^dY z6J4%^FMaFWsXxV~%s7BY)#B9nVeUVCQ8dLi6U(l)ycH~QdNUW&Alp$Hgnc>fM~~1G z{lZ8*lUu>YqYd9e;j~_A;dP$MrMK<=ISrYPN>j{H(&SK(gHo(jl3M`wrOUWu`PN%b z`Gkcl)x!b!;QI*)-A3on&nuH^4+U6wgocK~UT?C9KZ}QFX<$S}GPg88)WlGY>yG_~ zvbpccqSh;OQ(=+yO$cWag0&XN(?Q>Rf>f`U-h-7le0=Cq_>UMHckCRh6fca~f>t1; z4hJ;^pWgX~=KFIo3dP%EBx+Gd9$70NlMM(6IM-)}!P_Ym?md_om`D`IsOW98(@_MgLtYWm2oZVERE-1EFio z3M}dO9r=!U{#&0TmX?-q$|_QKPZI>~ev#;JBMEM>a~rfV_!gO$)JyleT(RW=%+RiJ9eLPZKK9@p2KcXs zFvX(kluqw`0oYQ&w81~$7PSfkPR_4k<%?=| z(R9qp2Qmj9oJUs2zICR`+LT(Hrs7Xd(AH*_L)R!dKUH@>aJeOO!t!E<(MB3g2m)iI z;-T)XH4|J2cNeXbmhUfM#9I3@Mr(uO>2gZmSTK_xtz~QXC%36Yg%laYJd8*E_LOLA zvyd#fv=FR0uxChS)6Y?TZe<<|C@77n155F0Bq7n|-Fw=8*x!=*W=!^{GAA<=JW-Lp==Y6#&9Mt$*8ymuykWn+Ugh?5~x%A^b034gabw@*Zb_CP9D z#W1exVl0*vQjWrO;h5zH8pPR?y1kTn-_H3jC*pM^+Wf|7BED5NA>CDZT!>S7j@)V- zQ71_GO$|9S5%pgD!5J6Ra&-=N`*I>8pOBs_sJM>43fV7EIij578c`jU^4&ipYr{y> z^0cq?Tnzq?JJhd@A9fhISRSS6^Ppvk_e>GYNW_9zYZVL{5u(s}_ee3;!C`9=)c<2jqq(WOy zA5bWM<2UP^`tpqU20W{b@A5|q98_Vsl4;s*oIM6WK_8Ebq<*oSc6Anue@ejZ-x<}S z{6Ce4*W}3!Aws+H5qmuvh+Zg#W29r?lbsYFTc`AIpFVvW??_e}yZAXxt>uv%qWnvc zqV5xxeXtIN!Jjo)t^XOBp*t49%;(j+_5t#2?g3fZ@)C`1?RNpV!*?dG7ZMw<_#*{x z>w@Nj4`6;?#Ei*Z8}h&;)ctU*AyeWSGJUY+3*X)}z1|OLL_qMJIz8por9eH2Hsi+( z5x*Zo2Gn!_T-_|!=;r!zC6;K)hGyM3s3Y;(wn6dwtK(2=w$<7H%SnihH@%V9et!9>xOs%)n;W}@Ern)B6))H*m)E<#WITWZ-y-N?!#nc*%#8pCcf^kOV}oynvMaKah&wII z9oCYM#&K$p7PX83JLC0N=kZ@L*kGG}^(ZM^7CG}V>U~{N^n5&FGBn(exfa*8!RZ>n zokzq8bf;yyo07wBebYxQk$I6+WJ7b53h4;*jc-c2+n}B3mq3(JA0L?|eklLu0zL(w zLXQAaV3^iWYM;9QB`usxsCwe7q9P!tLXz)DO0|~pICaqpyVE*}7kfP0-u?7|c~|

T8h z#2=W&F`wh9ZCL9Sbqt@S*t3H(oD%brzTXP0BG(kXmj0AiuSZ4nHB`cmR6ky$0YEmc z*1=Z7`UzWGzk5O06Fq&iX#+Zg2_2EYPQBr*Z@_YlL z>jPz@j!nB? zp6Ny>rYAC}$^r?5l$5$6VhUNGxG2K)Q}r0($q28O~Ge*Z%2WnV;um)SbXKzQm z@SkkI3S-r$>Ua&5_0|#whp|{}UB#nsmnYXw`Zr{7!H(l_JiyV2==VCqHY56;^9(w1 z^}x(Yl`x;;evK0Xt!e=I{B5T^?zM12;IE&J&SmvnGBajDA4@KvwMrby*_Bx*b=@E! z8m8pgj;uwuvjf84y?*^V0M$_4d?f@Zlpd8`D;ri{5RH9DYWzF{0~yYEsn2v~a$wC$ z#D?B_q7J4`mz?o~eWAbO4>J}5i&OQexW z_A>gQgPGOS8<3-Pc{z#da|e#q^y(}!vbI>|b=O`hdEryVggF%2p-_5zl?4ECI7$2T z)0zLQw?$6$fGCWyhwJY^HLhb{wx2~Z@VG2Oga5)%)qtq@#%X;RE|sQ}6mY$hmT7e* z>&^S+RlJ)xlv2}w3%48LRU0_jDVx~Tss87z{94;b+dRDMQ9{q`8A5R0RbHUVD9+Q0z~-TJn39sR8dCjjaWA{{#?~lu;NFhL56fw4OCAV<1`@0hyHa!1 zD?TBiDd_U$%K?OAw>N`5b-O=Rx^?K`S2~UadWpq0DQXDS7582#Nn}`K_=1aL$!rW& zSrv(R8ZhDh2aH9%LHBVT>A3i!txcXCLe_$KU~#_!eliA*(K3e#@sW zAxP*h&U}>K4H^U^Ay z^1pRM^BE^Gj}X%)g2t@*4xHo7$>*^nKt^fV_cJVdOeAKx6s-uJF4M z*~+Wf(pRlnE_5II`gY7?b9eC_B^>rw zUC7>3+*y3UFgzt*D(*UR{+7(UWzsetuEVfv3^+70LqKIu=aKu9Y^}Gnvy}Nuc)7hBguumy8<1&plWf$;<&maE|9I~jfMLg06`)%nW&M32DX;pJ5sVz04l*zhg<*{osr+Ps{D1F1SsD(_um8O}Ft#KQ=4Z z05)z~;_r>NU}a;fpZOI0_zh==AU=EJCr(r5OaJ@m?-YDqaXdvH!^dgpAi^Af1_m7P z?`{mUkn=D(&VEL|TLLmw=%|eR?=vaRXEGR@riis2IGt7ws|;h0`#TZ?{K{A)yZW3) z&a4EinI>oX5Fo!xh@uvp$p>;8IY{gXhdfFhY(raf+C;icjte-wgWVXZOHJ8v&ff79 zT=txAzd4_-uOFwOwKz?`eNN*w-ysZxm<*yb%6YYk5*-4v1 z+ba}0V=Ze~8*yXGz)B73@ky%M_D|hv!RO6*V&*F4ZTEX55@>H}Ocl~*j0-k6i>%aN zw%C0H6F_h5qi1|M_vwzF2&n?&$CtQ;y9)&wjIznRG@_J(ZnY8$(-(zFn7qNL26-NLH zx?^8B|I!dfVg& z`xbX1Z~Qr|Mo6-JhmVqn{WczGw>}ZZ*p%5P{E^4u)JUcLtuGDZI^_ZQqdN^!#Kpyb zFSjQit~UR9H?ZYnsjVeY#FEa~>g2%dNp8iR-ihRaPVc+HG=ooLv~q{*I?v6i^~5}4 zQw?hpyCSN>>H`Q@-dSQkp3g16Woy)4;kF;y*cBoAI*$rDA4UEV6V@X5DDVzhM}5I!)2fC^meQEA`A`!27(~!jwA0F~qJ*HOjKS~s zy>ZFixU)Kj9r1YJW@@B&aeq5d9nnO7%DkvxOqB2QDEpzUz^c4MEA7coGQN%MYVRQsqziVQT4^$a%oZbdAJ_10z z5whAu=e#sp%LJbo=ob`FJhCRNqKP)@Gs58OaW`)KiC$wE^1Ly?yAIKepD<=?1>Dno%_kP$F6ei)(i$G&hlH?W?O^+%$vPRIv(Y^CM zR-@%Rx&oO>7mSS;z=jk0ZdkWOO?8XmOn-_I}5mdZC{oSt-xzKI%yrB<1Dj zKR)Wo74qQ0gVF%zz4jteo))&jD!G0FKvtsL;6XrjZIq(xk(L2W7s|YtiYD+zLgzwp zO7#hb_^)ypC9f(Q;}2!#5pK4MGvfFR@S#aRiKeTtq&vMnmeiKrl5bYHwxv002zugF z6vlz(S@qE{loY!7h5dFVnXv)aBp*MaR>QLDziA?mD}Uqn=Y^_Kq#JhX%7A@bMD4Gp z+Gl`l2oy>Z(CRs)b)Qim6i)RWH1><< z*;u9eCfjfHa(|`{EX7Ms!_{p2zW50id?$%XzGB(e7UmT%e}t z5O)H9)X+mKHVM$V@PEa+@R>sG?vButW~gmDZgm( z2%Za#&r<88wIQ1HK&b|==FyJIOdyVeaf<_q52piEk&XWV|8%9wrU}o0W}~p{2u&vq z_5Lj2)wjih(c#tC?vE9t*UH-mok|8q#%N8DEMlOb$eLAd-U>bbb!>1Q(Ps1HsDys6 zkYNG0KuJuSy3V1QJ$ONwhhQ7Dr`hsGeZwMJeiCg$i409#P^5X5}R{MrcaFC{OP@kP2M~;1}(& z!DXwYgFh|l5tOcUO(n1o%-gA^2bp$(3d;`Gv8nz!%KM?#jmww6sCcJQhr#ZVNp7N; zP=kK!&^wsWi)7k*cuHRR*mLL8O4_8mdqL0Tg4QDcdr^}luX$JH`OQDD@VB9@O?_nU ze(;??X6pFv2k{x*#(jbPfX{_qcnSTh7eet#Cr35`o+;JX1l4eqc4FSlO=$IwP3O92 zXe|FyHR}FaJ3IO$Z)P*TF@N;agBnqVAf_6iBu7{T8m!KErAJ?053t^66jZ_%7z~DQ zdjYRN4%8{t^!0FzbHDYLm#QFy=~Hd6&1k{kM}o$mzGVrg6^oXDQ`=(j-=(%Z%i`*} z%~?qLx&jB_7{1Sd^*>xBcrz8kxGPSlk8+~%$46QhP08xh``Y_c&$;?NtAq;DzK#3_ z(q~Z-Z2k}|TnpxHDlnV;8>PG{8#=o$+zf`#%IkPLC{AXo;-rG^9>CR{}>b=gnRo0Gk zSAndSO|d5bYt!6JNKbFbC4}3rRdhtkT0ptby*THR4zTMgc8prqwrL~nk>0lY{Ml_{ zH|jkWRXkD}9jW_^yG*mUV=@PUosBDs79gWA8PoK|zRxFbTfV#v#la<*a!b|s^D~-; zhnQcWMaF(cv}x8I1PJ;QBpEMbC-zz5=@1x7eS~)dyN?Ptj~)}Qt#Aiu_@wBx)AL?r zdOxSQob>+kVNNh5HPz+z%oe~UPlzgJU4ECN4|P<|=bGPccJgP+Y`SG^VDoXo2D_qT zDxBWCpwXNKH)x)oOzY`2bO1-tT46{-z`9DAm)L52>1#RAJ84q?`s8r+$U^t37?n}_ zM<*X5nWm-WWibVBxfLD!r`|@Du`~{!7PLtqPu0k=)PLnJ20XI%UyWv6Sj<}N%BP|L zv#f7)=@DAQ3CMwNgYM|kA{k31E0A+sDAnNEC<=b)gq^^5IX$jQ72AC4^IF;#lSW9f zTq@+N{puT=S;c};El+^;f)sqzvzE04oQ2&P1*abG(fZ#nf!K#POw%E1O?-eD+zTczNXP_H8e2z_^u~!zP$Fkk<&hC;@x8}6+5mD7WxeE zSUlP6W1)m}DR6d&I4%YfGELPQ&Gp5dDq*x&b4jR!9PLI!!7GD?SDXf;kf;E_fVG=j z7gw4o4h})F&XCV;f`9=a(H)@_YXA=d8Ezd(=|%p#-_C`s@%{{8hGAB3!*DxkY%DBu zH$J$b`yt~jm|!jb zV67Ucc!$kFh`{Y24%>TCd@Chl=@WD}lZ>KGQM-|cL^EnXf20k6WOY-pr1Y`UQOAS+ ze^!_09A5-449yh_9xVu_kNm!29wxY`k8cE=H3+-EX*ijS-*CQK{!i^Ws;}?*5x_%T zw8c3(KnQ>(HO-W;_T`&C1I^z-hZ^Z9j_ab#R}a_)bndCM=WFf>Nw8XN(yWr%bRmh+ zZO+&7Ig3l#ZU`Sir|TRe@k0d%^}}xnYw=oJq5Y_8C=haRD%683^PoSbxhOy0IG zaArR$-9G4{-d8$^kjy2QmLJG$8a)9zm?fX{;7-!0H9hJKwQS-z4H)y4>zK2F*vVx$cDbQM;XssHTV~DcfH?vG4k#OGC!_c_F zZyCDNN2_HeoB}K`MANx=MgbRT&*}V&S0N)B9MxeXG*Fn(msfJA^`A7yehd5kt5?-1 z;bCATrVXQE(Gb=bnNq} z{MR3p&Uf|~M$LlJK?C_iE>6*gqN|cRP)8ZUz(I;J{@cT3EN%T{H|E% z8W3paUm2h=wY1Ne;JGuU99s;N2m2AS_On1Ysw8(4{{#&_<40f zqi5ZyI>7lv4eJge{2Y}F{bBW-u)g$Sup|=3*s7vgIegYP#ac5iX9}}wr8Xx#56r%l z7Hnje&o1jKt_b!XBk9prpx@ebDJ;kc#*a)O0< zq^_j(#dO!Ucu8r6LJMa=7_(4j#y@1(EKD8|i(fze=zkc#e=i!T6wmT(bDFMYoqeS& zf7LCG@Eb05_)ND~56Rc{!74dg>m&+rLRlfrFRRA;<6LX<0z6F@3aAoC-v=qbsHb zDmz-|{4sQ*CeZI^FtDfZfN&WMK(H>R)XjY?9Tw+l!3-_ig_nn36^mt?2b-#-a>^h~ z(X6TdZbXh}Yt;J7LNLR(tZ2cMQjK?8DHJ$EP)NKO=}ZHvRnx*07nSJ$$8j6aTK(^b zC=XB1*&?~Pi~{@o%Rq(voPc)}&-aQ6#6Q@hrWnLDHaz6=tWP#aZpi*Lzix6ZiwVu8 zI)a<|2M_}a;X{`{BrwmKGQJhXl{+-7D&f~jIX~eTN*S%^NaNkrfeR{{KK}mxr~CDM zeBe!>Q~rI(Gj&k1s6exM)=k)tS*G1&nI{*gEjzObc~?)cn(lvdmX;5Ddyd_+Vxl2DC@8 z`G+@7*<3xDig$7fER(SqJm7GpYs~m==gylV$eW?^`}?4VcX9i0&{ix~r$B>yb?ohf zJ^7URK+NBI??R*RP0y!)=@=5c!|FP1W;1>LrGrL1l*E0-1zuRUk`1j<;9rp^J1UbC ztL8jpDWK`|s$=b?k1ngdKTRxIBwds`hOG92Y>0&_oeh@6H8E1W%0lb`NY|ionNVno zm`~MIz$VNzoQ*NcD~E*QTd48+fzgEvP;>~1JjvsViibp`*6jrEGwp$lpMPK2;Pjf5u!^ zb1jk3-~xiRXZ6z7#DEUPX*yrB{m$sy8+9{*?JLk|Z5(P0XT8Cni?r~9qbsI^$M_Nn zi9h)VXbFdN1$&I4qb!6s`)F#((VgbtJ-KQQ`c@Aw7HGVt*M#-wGrS)$D&kb;Ekqpm zsPK1`-G#y|E1`zf9Tqatlxq8_;2EQyet|pdgdZkIst!<5v|$l1}dVoGW?vZIIyNV>&wQH$|uC?-HuaMmwWYuQ3lpx71*fm*3-1Gx2(nO!8KV%i$5=^!~6D`JlAH3umlfSs@;cY_VXn!7;Nt&{NTG1`~OJY8t zJDs&RdGFp{x(=>j4j>hGA*aTZQmh-DZ zPRvy`8Zfl64cdhPHJ}Pfls;0c0t}KGKEfNT<56X!3c96>H#K8d+_$e>=w`U##<>NPu8p6SHi&?jaM-jrLQp?uOd$gvY4Yv9xrC) z-?dSO06}-t#vFo@Qa8WwBE8s8Tl-yQ17xi#${say1EVtvNi*heGWm1)^GATCa+_`m zC7O27#yAPQ0cgsaAZ)N37&C^ja80S&c$^;oqso-Gc>gV#&#PM{$M+NnJ+;0V6e|el zl62kAoH?U?9GqSQv(wzYR-_LfZ~H43*dVXRs9F>LVjgIJMQ9uWpho?-`2mmK_am6) z9p~13fVgBQn(#Giw^x=ZNi9+;=R;KvlfB1oT)h<~Th!|a4sqKH;OqEHj+Vqvf(>oF z(pmX?aMURprS*))d5^CeZ4QX{pwib+6x%z6h;F~C#mt8CiIB$!lo49c;AWm$U`${ z>Fc?n4G|TDsv^O=ExX!&>;+8)zP-WO)3Zn^0nfY94&64leD~0bBoe#9%VIT(+-;uD zWpzDiT^v6oa`vFj6)c>ks0IZZz#Utl=q)7L%0W7ZE6|YG2-VNiL2JCAeD;WOtMZKi z*wONMLSn?li%p-sxS_=qwch;GmGQ4!Tvu|SXTN&gJc1E|r@`4A575kkpsNX(pztEW z*p?n3()SnMU(EOFoEOI)zhb26B=b}}^#ffm$?e9~C`D+KIhnpvz7b6-pK%KQUB34;SToY=8F9H>~boArv4+XsK9<-br?0&bDdATQMnN`3G=)_7bu8Twn# z<**+|0sa2KD;8)N@(O~^a_gHx2BBPR@Q*gTA*#$O}R`5WfUVhmc8ExJ{EI*Bzl~D;CZr+^b<(LpUiLhQ_9D zBxtd-_G3SSq%V`Ub{CH=?z(gY)Pz9buYuKWTS{(haE20t8~^c%$!rnYFsZ%<;t-N$ z3=L$HJCJPO{P^*smA2Mu?moj+OqovwzI>48Wqw;P2)b{2)W?Q$llTlR&c^uH=6~;d z3f|ZW!X#H*WC0QAhf7BuGiCwNxpO4&=8_vHy+C&f4LvO=LjeQ?oclY?4+i~Qh(HFs z6KV_hlsscq(g2y2UeslleKr9Ovyy>aXe;69llq-a1aZ~SM5`l|$`^j!36ZpnXj5Cu zx+(JEPXY?FUb`m%5Je54ihM*3L1?<%h_0o)gx*RBj(rM*3&%*wn9!PYpEi~9X=TxT zGKa6I#uG9!Moojn0-%yN zoJS;A$+UjlbYDCFb`TA0H2p_7hPOqYa^AdiKBl3$>-@mtpCi%!gTrQ)mO;khqve)O zN%kut8H>-)`!cIDi)2Hc0oj{TYw;f~(~G_75#FG43B<(>?A~({No@h8%)C>7v}Ye*G62u!5+5@1>Eo z&DfO#ICcrKqG&1TF`+>VgTM5*wH>wUeE3jQdzDRTrs3+RKc|8Bq|@6BHAX4%$$>=T z8~?Nn%<5P0&z*dHe2e880`P{}Xc$TN-JIzQ7~Hu!D?A=G=t#YUPM4V(#Al#Ao>xYt z@9Cy#wK#cT6t-dX4Aj-f1m8>92gjQR#@LM*7TJkkwOs6^Sd&r)w(E9g11zD3}ogfSM7oy&EA7sr2qpSMz6V9&(*Saw%hQ-=gayK zr^p~59jj$sbt`eh-cG9E9NnG;w?cwjC|GoO5pmK2!i}2|#tW{^%7a%qsZr(+yOmaj zvsSn{&pGa)?E1f20C?s>-fja(<4j?UMYj1(v%M}0tJqU zCTKSL|H!T@g-lOjU%sVir{~SM?cBN3>+t=L=1Y#uSv(x*BXVRSCYQjTEA2E#Sq~@C zrgbV*qA7VZd}`3yuDo@H(*RxSuxg{_50qGwO7DXh0Wopp-0_aajcIDSJy!XFHm0tr zN!E^^U@y?`eWUIJ(E?^}grnti3?T zn_l@5Zq*Gp1@RDYfejiuioo=_^XXkXjYm~Ob2NWqVj^Mjr2P*_2zi6vqcSryXhrhE zI%L3?ua`MS;FvA=J#7zkV9me3I9tPVJhuBmrYy$)v_xCv_O>5C%*Re#1NmYxJYkIH z`!Mi2!7FvJpTkC2e@nUd&|DIk=EWv4!&>;^{LQrWy>C7=B<`5HddNA^>XuaE1ZDmN zCNd*$CJBXUM9-vvw4%Lo{$p=%ucW<=Ega804ws%!a&rffP`w`YCe6+l2c_~kk-3#a zDjiE_{@s8)eLNS|LOTeHaDcN2cyd$W>q(1qs|uJV5uTg3WUjRN-5?XCyL3LP?S8-- zSsR5ddH1C6j{LklK`8p*j7o+pRu-1(GN$y8^iz?2996Gqu|QAsu0QU>E%_PEIj>+J zHIKr6zJ4EK)M=oa?;X0|qfFY5mOMBkfN8Hhj|X<v5G#lS(2=g_U3Lh4YfqB^v)ESbyL1NWLR{Hwnx zfbm?uO>7III}an7m*hpF98=QM!*hua4Zp`OUpo!w?aU=}fFJUXcjk`a<61S@##7rU zN|EnoN+MZ8;GMtFxUqeO_Hb-rR#7D^hG1kVNlYeKXSi%n7LnqaJ`D%ZlLJP$_1(D; z#sSD0O&>g~y1Fapc`!JObhjgZOtw$;uM9Z9Sh7V+qkKe_T1H_SsB!Q0K;e5#SX^y>I zu3h`SE*Ix;ct#bIW}Z2=jL$d*r(CU?3a1zL&_VCT1j&9rt)O47FL z_wrHZF?j958*MN8-|xb>gN4bU3ZK+pel@v@@`ngKkLWJchLtUHgCYJVUKUEAwF1Tn z>E|@G5d6G9xY$eaw3veO*sEV})qKJ{n=|*U!?CfkK2HL3i8!K%#us{bF!M25YnT#Wa$>Y`2sPu0 zW7~iL#hbJIMcfL(*x+jNlsNOX;%lzew{Y^uAE$$A>$zHLad@$752BlyjaX#?;n*qf z*!aiAtqho0W`|ul|9U^okQJ%0>DF9_jUm+?ofypgA+yOx&Z~4^jPnV_8S3xbc@T!6 zma4_a!(oQhK~s9fm1ldWi1dhP;#}u^tg=9p7gojmcF|)vWeL0-ztj}HoAZkX6mBR# z-y|R)aI%%QWH%I&Pf(~2;zk(gEm6EFIs+^t*!!};LO+`opzYA_RX2B^J{uh!%{*7< zy1o|!0&9(HSnRiVXL5hIVvH%3-mfWw)Dff7Gw4$J%U5xk$4MAY>DNm01qyEkqB5v| za3U2Oe{v3&G|{d0t{RrDQyw>(f%{U~SM+Q(1zhApAa$0D;IvIV5xlLL?Iv zUOp>v@5A9Gfukg85lL6+flk&RY9eimsj!iOM)jTmCRr}dXIFPAS@pz;>#4hbv(=#L z+lUFTTy8#J|azVJ9e$1sxB1=CT_o|-&ZIVa|&$N(lhq}-4j>^BXkH(FKGb{JCA`Ms8hu8y?5~Cmg^p=RcU;eX$hYNsh;Q#&frj=wgEi>R(9UQ$) zb99x;8sMyy4vu}Fc&6)MSxVDVI68CzUR1!(dC{L$A3j~NSX(GYqYsY0dWKk0lH{#_ z8e@Z9`7Yh4d9M5$(yT5o1Z<~Ab50%JiOGloRmZU1KmM2s%Nx#8ii&TV<^@Td7q)4v zMR*QQixfTA{XaJ$WV1<_g|L2aD8@2a`Ar0N#dm1{!+r0RM4N9CqZ&@v8)K8I?GFEw zQiIyPfRUqAb| zD$*^v02%MU8UE;4-dsmmZcm-mNqkY&s-A*fnes?+P|?RaPPypq31kbb|R9c~B&nK>j|MBbmS{n#C}{g5Bu?~lvbjMJ&kn#llU>~nYG7idhR zEjTBp;7lZN(E}-i$tX#)<|mqP=87B@vc(x|`13yhEvLF^dBjAv6Sz?$DAwug54YX zr)X3}Op|?0Yk$~Z_x|lq`?u!?S)yyVLrQuKK2#)AoA}M))7RD3Rc1!Ac z%FT2E|Vsf9!+k^tr+H!tssEM^4b-Z|DQCJ_}WWE%pnqA({+3THf9?W$0{pnt%2B z%cE()woegIkPe&mRg#OX{>Xsn`{(%6E4I4n(pKvlT)fl7ui*Pd-`nImDDr|mp(o1u zp+{>2|0WzJXC^wT$c5EfdZ=5MW^_5xigoYml;|4>K0vd2asw^!1NWXa@!3}(Ph_;I zk{+Nt^$J@&B*TIr&GEx@=N<_YrqN(j4d(%WdY?RT^!)l8(HaM=q(`@3gp2xd1JO(m z5PkO#^Hm#9csLWUEtyWqv>EImn9=;wM?Mly+GC7j0G7MQ{n4Aq)pM_oJbYWe>I&vX zd1r*G9a9BVBMz*f(CPHFTfZ+Kxd}Z_B>e1q{)`}6m9+WmN^iR8s{^-`)rh@{n`I`e zN?5*B2vl5N*oQw7POPsR)3=yMv~Tg3Uc*L!>keIH68P(y)OiZ@!0R%kHHVY02Jx)I zXsAk}v~>qt8g0OgPPbIm>`dLt^2HX=v`euo&Kpm_Q4=c+M$;ud0M7xjSC7ufrIjzE zGxjt$H~S(5CCu^BI)a_O)CX6O-Tf5P)-CkRdE ztSr2+Z!rO3tcW*TD|uv8kCkl}QTrjnXFGqszgP6snb1a3Q`BR$gai^~EJZwk-Qe+U zjGYG4D?XE5nm@M0)-$e3F<{)TXLNO}sU+1M1ke|KFF^-lLov(%KDX8kRi&Q2Hq64c z{Q(ik6&cp$_bT7766?FjB*V}_-F{**QFu`>KgKdq1+M#*wAzR! z$)PLHtM)gh83h)>xh5 z0>IJ0PfTea?XZ>0J@_O_rv)#Ht3O%eqiYn2+r^b`0igXi@SQmO->)Zq_x{^-* z>C11DVm$vbLY|+eO03*R&zo2s1Kyh2&4@gg*|c#7BLK5X;U09Ht>U4QQoltoQ8Q74)@6Bcl^4h+#TD08*l@=}7KiZ^lifSoVPEN|yCc`#iz|ZaWIgswvzXoJe6* zE$0giIR}A9Z7sM3%(YWl-sJWwvDPLk@RY>z+`%D}$rEFDx-k+;v}LaiR|Gb2h+9;gc;ya@*AI z;w2`GR>WQe```+<(VVAev&kbdK&b{J=gyY?L?j_0?&-St(`Bv8K?C_15d5)!UW94h6XHIxPQWVPI(*Zi^hG_KHFj)>T`NWQ}{V)@X= z$|ad3yILumMvK^*cbh&{vO#&B&UmOg(Z6rL2?6?N#k%-j_e04(^XiWkbMhIHV7WF; zw4B-VPV@AB&4%hkk!%cP)ef}A5aqew5Te6wt~ss}Bas#v<;~#(3p|Ls1ETy!>$US@ znM?6zHh43mbVhjdYimUX1!Ke2Dnz>nWq!;O-0nz^gS;T)0KCOIDD3t)*4QF9$QA(w z2j(Zr@4cw8Y}tu;PTXe{7KVsjXbPkd`i`$jcHme_Z&SEu3bFQSnv{qF+Gm=Ee*ydT zNR&79WDl{%6bJOJ%&41j1{J9N5|otWiRAXPvdaN=2Y3(L%K>o83nr*Jytq_Db@&IG1&yr+s9jX%B!F{$-uvank6l!|Yj)l%L1h3-{EfhQw>2sJ<5J!C{ z;*I+bih@NizY(o2{8czZeNZoSpl?=@Y)335Nu`J;EXIDtwj;3Qp7h(12`Wl3_87%_ zNoMY)1HJA+f~`~&a>1*9K{)2e% zYuLcz7&t;wE=@0-*q-J;mv$|n_tyj3NXFS=&f7B6`^{PK<=%|qE%${gDt+%obFW@o zC_2v4Q57w@_>*1qaVBITdb%-{7>smX;>_i_Ea925^DN);0hPEz*|hImqu0 z%S?cDzT!oZxs(~T(UlixkQiw3vn>*A-cjS)ENtreL99EaaSO7cct2>hOogxs{_fdr z-rf~jVZr?SLllSJHE1}#$kmf;Z3MlN=ndU3Q@6GtW<->mmv`DtI{p+pkYuN}iRm=! z6yK(Kgx&`#qa0~{o!!CV(+(^h6STKInROclMams@$-f>V+U=3;5z z>4|#DPjj@|B4}H~)Em7R`|=gyCjVAnUr)22kg;`e+UKKsm>vvxLk6TK{Na>0HRsv8 z?(DyVXTlc7)1S4qdwu)w5l*YuumAoPHA*1;dU0n*(jEYc{pQ(5$F{+Q$*3hd@12}@ zKQc1Xt+S9BSt2Wh%GA6{$tcDOwt!{KIcAekSm!v+6A$4ewMAJcx$~c~jt!1h4MLan z_Ejfl$uLc+O4}({`%hx6J^zAFiRF>U^DB8{Od7tX1qASSAb`qUTJl8E;)YMb0Kqy7 z8T)3p2vPxXQJ$>h$;R}qMgt(3mk)ba%H$LL_H%;?2 z88K$U_3ldh^zA?~FZkjen|ZNX`<4Y6nW?v`_t;3o2%d%WYGK?OU@1Q-KjP@BCU38Q zV?dOY!l)WdboTYwfA2UmdLOF#tN&UfLrma}b)E{-5miw=D0%xGE3gxzE8R7SS!kmbtHid&mE%LQYznnsG!9Rh z`ffZ4rq=~{P^6?ND19=(1S{=`^XxD})q1&yUleWCE%}K^aDs}(l$)SaQi`k83Vj`6#3k2vw<-Me>lXztWzSw~#c@D`-9nh^yU@8c;)96kWY=qEgUs;q4Q zV=;MVh9j}a*_wO$S6gNQ(SI@G8garg>sm|dB&bi{Lv-i)86nvm6Zj7;LllS$lxU|1 z@fJ+ZvY4#ZTzCN^74v81?8e_$0}3Pj(AUPsMiN3pD^QbGue9uR5+_jC#|&NH86*WK z8gG;*_@UFf*zdtbPXBwOY>cNFH=kekY0g3F;*NaDB|W$T-C9Pb){1~!P8!c7{myTp z2W^AXEkvSG?0GGqu0_SG4T<&_wbQ-URg4hIY@Jgi(!B!BNBXqzSawAz7Kl8 zYz4v`CX0GAP!n9_*dCis`H^RA&N2?2PJ%3;xL8?^a7X_JN(-bhqS;n=lzyFNRrsDt zgv{LMMX#6_>-?vwW-+7z%gNFoGw88FxzNS(y<8p1(=@uV`sBG#mTWObRqNjITt@{$m}ZuYnLt zy_Cl6HH6f+ QApLjRrfnNbf7=`TABIA - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - image/svg+xml - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/hls4ml/docs/img/hls4ml_logo_navbar.png b/hls4ml/docs/img/hls4ml_logo_navbar.png deleted file mode 100644 index afa0a1f50c7539105f429cd84550f5103a24931e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7506 zcmeHMXH-*5yADM_1Vj*y0&3_T0-;DCfb@VMRhl#*!BCP&AfXp2B1kbv2~{PCfJl=j zM52hO1UM%mRY;DA5ex*66b}e~@w;bz=ez6P@5lLZf84dstTlV~-p@PFo_S{0JMYZC z=ImrEEGQ)i004wxc2=&O@&zY{2=H;PgIOtuIK{PicduyIAUqHgg+?GF;K1lO3>*lL zMIr!z*jc~)dEr5jKn9(7M}D0-c-_tW+D&0d@?{x)`+=*A<{wkL#@k+p%dy^TLc|zP zU%S!a5bf*UH{p}CArHq_$WG65cSnE;t@PZrZ?}#^w$=sogoK)-=c!H-!H{mmbfO~r z*5}<7qhUJN4)-2f^O&W*cPjuO$cN-K%FFRABp4m36%>NL2-k{@#Bf>*0GOD?VuFIh z;nBd0@K7WQD#z{{kOLw^pmLu2jvz;jCHxZ7EN)=Ip9A{qQjwbUXIQ{OLP<*sHdf;1=6&RMdEbjOa*}^ zQ6UJ3tCh_k6r3lh+@4_!CL63P;DI}BO`5)uC}hOCdWb(8;6Pxiq%A6 z<$qB87l#!b8ytniL?h8C;1ABAi|CkWsGJ-J2mV3+V*viej>7&S0!I$*ABeV&7D)Tw z&7+Zs|6u+D`P2MIWr#B}79Qbcg^Yxwu$(49<#a)yf0F%)djA`$^9TBa2?C1@3Wa+j zL!vMJS?y224vq}H#OcX@t8y+13Fm0=qqiJHPWy+F+J8vN85!+=4b2aI{_oS@ANflf z|1HGkO@U%8H=mFh2*&&KAh-VYryXkDR_Dw?bt^ zS;f)*dm+Wc=E8r74X3i2bH-WP874w{PIk!T*rl#O)nH@m_5g4>UJ76LJu^F>i8| zpPyeD5kM7UC(TEBmwaXoQYx`ADTW!Kj~!xJS*~GMez6P>Z%VaIjiFRI=8W-H114dQ zx?sJFiyf21nF=Wh-46P*sP9GbL9x@a`cG8GOzXrD2GY|9r!R}b7DT83|DRh6C!}ih zz38LdIKo6#yezCRBg5){$qo7MKb){GJ8o_WIx#&>s^pSRJsId$WV`h|3b5z+D)RbL z^;vRZVIe5Y?zWPAhNy+AeGbwkrs`5;I0f)?C}Nx>def?~4VcNZz~6r$R&_{i`}(~{ zPI|jq1z|G?u&?&AHYE~)tJ;37zKM`~VJB8A{PjLD$%n>*p_C&FqaPFf7-aUGf5f1u zh5FBI^0@dJn-Uo;mBk0UdR}@Qv8B?={jp?1+@suN-?%#c48h*zPN&PWL`^;%k5AJ? zO8|h48-p5U?xK`1f|&b6vS_Rg4N94+!lu~q9#7^um&>!9_t~|Z=m%VWWioDT41p*) zKmn56r*t}AH<1`82>EKIBYSSm>~;-xo^DvYV-<|_)7PHu{l06!!SMGT3RU9LF+@_g z5wpZ8ipDSH&rQdMdW;6 zgNspJ+sSctzLo+^;tapyDwoVnxOc-W=G<*1OEg1XozDofboBna;x?3XPjh{knW@WO z@4$c=KV)(Ec&CLttnfDMu!TT^!&7QWJbO@dq6Ei?M0C}V4j>G~FE!7)nfkE9%v(tH zs$HKKLJ{5ZY(tSKdK7FVtrU>pnY2l#;FaQj(&Em~*))NSv?ug$10Ftn$l`Vv+8$Qi zRFV!+|K-gaedzd?wVtKT(6Qdy@?*a#!jyRe0I$R|7{@UsBy}mp{A_k|DQPfulz=E1 zSyKHa#k-`FXGEcRG{T(LomvApG|H;H=g*ZmYlIIX0Da9U{-RT2ppG>q*V+(7shfhQ z^P9aS^8!*^RLxo-B9d^|y7MVdDOW;^KY6FII1eRbT(EgzTdV zT+?IE3=M=Lcml2yg2C&PQN7vg~iJwYxnIM++Cr;(F!S{ zg^wC_J8a~8{E4--xktgfS=;+l;z5DbD&tZK_4D8QJ3BY&3Q4Z6$778#e&jIs9bDsV z{a9}vMc8uEqkHq}uoi}Le-gL&nmo~WfE7>xJkN{L8FNMZ&VFc$v9K%QQ5M3B zohaeug2e8th3^U+9y@;YvSnwP!4uZa1D69gxe5x_0n>RGe?`@owb3Lj=NGGSu-JZ4S2A#%2n+IcV&r>+!2M_e6krh zaG$+vXHwZjZNnu}sP`ndj8MNsY$nObynxtDh(5I*3~RDc57QgbQ{-4Li1(j}N`?-H~N7u3fTmhtp{;8J)3b^KJQ1Jw~PPp&^~9xN2?Q1)+YK zO9isq1>AUhAJ>+e#r?0)#cSRP&*-tbcNHIz>+3_8-VNo}OP+bRe=S|nPlo^Q%CUC# zfyZG8CLV`NuB)vL4piWurBEnogfX543eaB~>?>jM_2kg2jq=Yc*G`zdyS)D`am?(nq`P&QH8H-j<2q+YV(ir=V!Zg zbq>tDo}9#aczRBSzFPXt&CO;QP_ldJo zH%D2K!Se^3JX^H5JOOXiuPZHUFyC64W@GsoK7re^TVEVf(Fkvx==xsc$Mh#_#?Ppx zhKlYQu$IQ1>2O2}@4iyQ5Y=CExh@8=DYi~jiOG`hy-S?Qj6sluDQ^&4HI#T*2Q}gx z0|Z0gFI3g~&Cfu@zo{s8-*i2ItMn{JdeZILqK8sF5Gi4uxRCn^#s^L)B^f$rEO^R@ zHl+fn2XIK>rr8bnOy!6CgkJC95;AT z#UIn0V5j&|nx!LTPVk}I;mK`iH`|Qhj=cSXWfx7T6!JUI^%T`~BjJor&l3!eYv?Yv0RcVx?kpv1>N$q!xur><4m+GwtB0j64EY zhF$Y#zBPv@o@OSiE_Vo{=2B>LK*VGyqF&!VL&h)3EDR*Q3Zc@wAS7=P_5<~9*3McV zJ0H6y%ibFezJv!^!Jecu%1|W|C7{pWwI4yhGDpvefh12g_#iE}u$S z-Skt2o>746fk3Sxg%=C-dhqo@`-$WbR124uu&hS5>Gr`i*M^5F3U>N&o(?y)uhEEcg>^>UVM-hdhgP39s6LM>XbM0C_Du#weAC}2TT!Xf+;kO5v@bA zGRG{A6^X7N%Z{xkk;5K866?u?@xt~lclI~1Nn@RPLMKg&&SgOI`OZAoM_t<4Snjrq zM_1OB$Dm-eT*V}zo?&WoHGC%bxp^nVm3B?PV@a0*4ld5P*uU#bD7G(>7s9-XEDJ-U z`FnISFo)NJz{Xi}A`w?VB;6fMOX$!ro=*5at@8L???g|JVhrH0VcQL<21#a?=-B

Z83Rq%U^Tb&(bj#e1@w47(- zLqA{eWIO99?|T&HCvld|usg@Bx7I4sDyCWj&IJU}^ATH;uo`}UzuGmKSvq~dxK%^A zAY&C}_M*}Viy4D1k-zI;T8Yi~T<|`ro_BBG04*ul32=dpJ!mnOmrEhcYC&0zAcv2Z znF_T;Nv2sW%K4o$BW-)4cVC_HYvXv+>p~}9WY$lxM}X7D8JHVHzZ=ZB^C#9^^E5{M zk3|+qX^vDMvo2;k%$a{nk`{d*&wRUtJf?g_NaEC8uZYS+iQY7(LmiFmT{6$3abp6B zNFsbFo7w+m-lq@Jcd{qNHz1I=WB}yos@Q{?E!PiMx+w10w1Fp^ zQ!=hu7apm3(!UKwYy~lZ3>)xVd1PZ#T9HE1CExA9GVi7Syb_b~(6bNoOZ3R?Y+u@4 zy$xIMA*o}>fYqDl`(7Cz-AgJW`nfZAPOV%>;O)8h1Ofphm%?X$IXV}5xM(9D zoIoUHA`H~mA`+DrzJoBzn2PQFc%K)Pz1d{| z(iv&5>d^RfSk|iq?y0AOy&&-9B2n9svJ>DHHz4{{NBC8u+2il#9H&UTHAJv2RNZPqCawL^V8Jpxw(WMVwKFf7e4aK^?8luLchz3 zDQG<+pI+JMP=JOZ@Cj@~G&(N?Q5?*pHkKOC%NlEE$uKNn;8E6==u<;WfhlL1d#}y1 z(jRb+@Ci818n|aJ^ch_{&4@;%e>o`2=kmOY$aC$auz=WMQoj|g?G0R^SxB~0ZTWda zRMwWLq8<7Tqk4h#b_lT*1sfiE^L2c9z%=LW`QRmJxg;IyQ_Kqn_dTquTgp+|?(~pY zbKJORS*S5GOJym+u1=^9fH6tc%dhiI~4s)Q}z?X7L_a@SYN?DdP@Q&}9Rdh5w`*^DZc_JeE6$TT7uIQCI$3Ez2|Qj>2~$TsZo?cbgs zUE}P1NbYKyb#~m_Wmr_VRe{u%XK9!L(jGMe+*+`mj2u$pdVHW#W@G_yzwF&AHhp`4 z!Rvbf_f=Pg$NsF!Pv!O!qwNuGBwJOH-}{FWzHf0DB-As0wm%yST;_r~G?C3^Gd`*D z-q;vhy_K}j_f}dtV@~me)aJ%w+Lyateg(=8JnbGqUF749!!{=O)#kDOCkpcB{H5(_ zZ#dTq}CnO|{&&M&V#EqqnE${Md zw&h_gwY`} ze&PPyjUu4(`qv{028`iRR)tQfyXL-7|BO+0SVBar=+g@fn;ffG5=@)J*4&*EN9*OG z;-QO`pU2tmjXU8yv#L1fH5dJZ<;3{|{ox?Ar`!w4n0Mx|(6X6N!In7gDf>qhqi#RL zizV<#!dQ0v?tPel6$;!3I89Hek>tN>U%2w*$L`1=AO7JBOwQ3{eZNe0sqQikRl6?Y z^pI@OOWbt3{}IoV!iC~Vo>sXQ{Zv$d019cpnR?u1w(BgVtFz3Zdx~m+kB)xdFeN4? zM&Knq$bf2}m`I@7zT;*>c=UHC*EXiD=*>?W0RsaIiJ zoS<#1f1F)d$9}tzW8$^y`06F~gSA8?uE^FY)WNA4@ua|bC<+wab`MIaJM^?6!EdO$)gf8P?~{6?ArW ztx0U(Hp~r9D3_Cc-l3W$#YmtG@YscwmDtjr9L=Su;VEa0LK8JwONm01AK@LIx0n6a*jw-xz+U!2vNI0LjmG0FVOj06>&Y z^hXP^MKc9Y zatY#bbaC_WQQ_TukKpC;a8}_pm(`Qd^HX9FN9$7INF$rF9bbzy~ zlF=oN-^YUgsqp^JWJpMeSctTkZ-AS)q@tptxP+9rl$0pgLp1QFPmn{Xs88UTKW4b( z66h4*;TPoL>%((Aql2SwaF7ZwuQ>P(;=g~zG5L#ERi3}Dza;RN1pbo1UlRCB0)I*1 z|2+x(4ehx2fDk7H#4-S315hw@_YLw5boce+krtBx6fSD(ksO08@cK#f|0GWgpNbz( z1S&{YA}OD(qc27f=4rlZtE<}^8X0J4U%C7f&(RD!`1xHYIspLQK0yISniqL2EUkFR z=YIk@3g8T&urA&>Hk3odKv%X z;g-Alul9goqhCC`259_Rqjw-U^7rp`a`@YK1-YAmZpv@`xCC1K;>X$L(&b<6KBm8T z^9#E8YfiuGKll32wYXf@{M8WNK_aL4N;E8eJ(e`l?6XB7PkW>JGMIf1{6Yzz2XxBsop2b60=0{}pI|JKG2t{EBy0Iix%!2vgZn~&u9MhZ{?P*5^# z02g?=&jF$!Z*stSKn>(p2QUCk0879Qa01)`Zy*2&0m6VgKr|2sJO)yLbRY}J0SbXK zpaQ4`8i5v|6X*p7fDvF4m;)ApH2?+d0~iPdLJpyYFhbZM+z@_%@m7WF(9vJS3td=Sj3kOi7$b z{7E86;z`m;3Q1~6+DV2;kR&J)JSimt^H+AIQFtEs*VzlasTN3zMsm8<0DY2a!jUKPN9G zN01MaFOs7vs42K9q$o5ftSEda?oy;u6jL-)3{osnU?@*e@>42N8c;e@-lR;V%%yCg z?5A9$#8A;w2~ep}nNWFB-JyC$^@gg0YKm%ynwpxIT9NuHwFh-1^)u?X)E}vl)Q2<- zG(t4$G&VHBGzl~XG#_ZjX?AF7Y0uKC(OS_4(I(Iq(YDdf(4y%W>BQ)?>741{bQyFt zbc1w1pp;O4=mn@9^cFM~S_$okuAiVharT7z35OFACo)ddpBOu_N6$zvNpC>!O&?ER zOy5nv%s|d?mfGR$Voq0AY~P0VvF#4Kl7v{|mRB(hYnjItb^JazKINtcrkPrf-hc=CXigH?^y zg*BG7oOOith>eHsGMg9M6Si8mX?7BJA@-~6A?%s#9qj8IOdRJqoH$}RDmW%Mi8zHg zjW};}=5T)E+&jf}O7oQ8spqHKPOWpXaH(;5ay{j0=33!q8 z;D8X3kgSlqP=-*yFp;pFu!rzV;V&YjB8nnDBCkcpL}^4Xh~5w_5k-ozh+Pr8D^@G^ zL!3|CO8kjD%PJ}QzbUQ~oDHYy$}$teXX zRVZzq7d`KFzWDsIvVgLSa<1}%3a^TT$}5#QRUXxAs@bY@u+uOH*lXCl+8H%xwS2Xu z3xXFsFO**RaZ%!8z{To|2kJ`d;p!hQ5ns~06nm-fGQ(xl%juVAGa*%*9q5Y*QL=l)=k%)zan%c;7WrYk)DoTvfh+FzrK%t ztpQ-5ZIEm*Z75*qZ}|Qy*;T`<8CSm>Ng9P2br~}n+Zz`f@0(mSNi>->J!cwh+G++h zvoR|&J21awo@72}A#M?F(QC0w!CMQ&wgm2b6Yec3w2deKJKCfa7yR=_sYw%d-~ z?z&x*J=EU8zVaIBHS=r5*RT!-4zC^d9JL)^IHH`?ozk4voYkD4IxFoqOxvID( zxh}h@xFx%-xWn92-Pb+TJ<>h4JheQtJP*7Kyb8Ps*Uhh&dsBE{^RD+{@bU6#_vP{p z_5I=}>=*4f=da|S;*Sc@4ag6K1lk7H1u=m@=Tq>x;HY5a4b>YNH_#zwA(f%@p+2FX zZVKIecysBN#;v^DB)6Szw}zb#yAy^CzYzW^0utdE(F#8UzYkxG)Ql{=Lv_dN&gZ)l zca!e!-ZQ_~aR1c($omUXno%XubkPCPV=?DrvL28;@OaSoQ2OEXhxl0M*q%6vxYRgo zyiIFR zv(l*2g45=nU3pgbobUPL=h$@j^pT8<8RaiHUp#zq@Y4C^V5VB;o2*k=aal*%9@$@C zX}+p?efD)~4q48PobS12xm|e*c}4l``EmKU0-u7pLZia=BKe}CV$R~k5~7mel9f`M z(t)x|Wp!^v-@GbkDUW*#ybXT4`tI7h(F)y))=GuSH&y&qFRB@96!+MKb%R;Mt>tvfn+i?5U z_P!3?j*p!hoo!tgx)2{#J~nnMb=UVO^wjps_11in{Z#!~_H%WgTwhJUe1F}5;y}X} zHu)&t0OGBN*+QYpg`XgUPO-H|u*^JG7b^f|Metmpv;>HAKGIEM^Dt7wBbm|P} z%{4KztetCUE*FUT9#UFT+vt=ShZeVT=QGQ ztVeIqZ@l<%?nl+8+U6&e1!{3CV2iLFzr(guuq(6Mx@WjIv+uo+IfzBGp$iWc4!e%b zkCreYSaR$$oFJ|NuZy1|coPVI4gn57vA}VtmdF(h;BFTH0IekeFoL+5itaaj{Idt- zH%t%Gkl$a&&Hsu14MQJ)0AdK>btC{-ssVuM2LQ+bX@2lU2hxUW0AQj95J^k_dI7&( zfv(b0KPd=xEeK6g34|RG7m^DC0B(#xz|;~5xO@=DPXNG&{oio;aqknQ0HCHTAunP2 zF{Cr{=lH*SBYXhpDIg}q7Q_&GfQTMKOb;Qn1Kc3*q#!T>uRr=ih=@r@$;c@vsi?sY z)h7TV2r)4c2{9?@u`>k;2R{c$=t&t)OI{#jyy`&C>(3;0=gBLIGZ){rFdKbA@ku)d z+@+*qImyb#&VN?moS=}5tem`pqLTWh%Nm+m+B(K2re@|KBy)0hadmU|@C*zJz7Z07 z^VYrlQPD9E9>yjmKTSzZd-gp2bxv+xenDYTaYbcSbxmzueM4(odq-#2$L^lNq2ZCy zv9IG3$oYkDi{F=)S5~*ScXs#o57384$LoTCq4z(o-&XdY)&&XyLPSDBOhSIVE(lQw z_#&n!Aw4Zg#&F>(xr09=uhbn1ri)Kry=$R7BW;9Yb`1DJ#lk0p4DFb(D>ya*Ej4KbM6peLpWU;rL}4#*~oqyoY7 z|9v+YVc`FA{=fYB{F7;m7`{1l)=K8_eKK-VAbYBvCbLa>zmFDBtF6)RDGKrY)Gky! zb;{-Cty)Qiq0@XCePbGQ z1?1#3x%ci;D5z*Cv}BVHK-8GOy{~#0s~hOc5%0p6DdX^%qNnn%6vde{k1s660%682 z3TN3~KNDxOivTc3`{_O?|j(fyTj(_QWj(GRh@u4H$IcZD)9;dz{0QO_>qpSQt_%td6 zuZ@U?ttEhs1&=O9sz`x7(eev!*r#Q1eEl*3IC2?UCLV*kZXS}NbMT1WO`Je|HZ;#2 z$cCmMJYaixC;@n5wTX|(H6%ZYqQ+}35`edIdDx9Y;*E(v`J@vQhW?V?|FrTXCJg^2 zy?;Y`BmWDe$A=!v_f)FA%U3nHE*>nLQ|z%#?L}?*<_C?t%gj7S=(oqOoqfFPY_>JG zfUvf?ezMT!u?{?QLj$V(!Nx{eqvmS^e}C17Md~Cc)QhaOMTT_ti=T9`3o9p`7vdFk z-b_C}h0wlf%@25)@Qa5-t*U2(ZujPE+EN?4xOjXM>S-O0HX}K4p@lPkOO4w- zOz2(HP$L96DSC16d*?Pq`Tfzf$GYBul;LJaml#^4G(}u;f}W_K>ydif^VouTpb(rq zPE|bAxPv{boz~|-5B0sN(DGBdVeF4yZA3%O)2>{L4xWTi(QglXv=TWsoT^8jB2(^0 z-b(y%iqY~*&?bHUyWCYP4CmM7j6i{^}8ZW#)~lprA~Rfuy}ykDpFN>O~Ljx4-1&4Sb8B z40UR6FnE-@X^O(v=1&j+=h?3P44l|e-|VMA-wK%0{EWguGu`lX-G)LRMK<)U`=5+W zpoGysb>9fitg^6Qw~NRLcXJ?DOyB>`VNth+>7HgxAOI2vD{7qK{p|!GX8%!*J@bm# zEqIR3K}i?phKC;_AtIyB3do*ijo>J-XKaN}5AB5zfB~5UJUsDoHU+dZEBYSZ^$JcV z{5Andxwm^Zk`aH>yv^WEwIq@q@?Az z)ukbusXioctn8}Ai*HF6Ul+QCxLh*3VR{YvRM~rL=W5ff4nIx%}!d~p69T3HxH_u(t6y%e$<#GC3HlSX+WHGPUi@+R10TmWW2uquyD#Y z#XEPxVyeQKOQ&1vLjq#sjMUpjZC25+q-{(&dZ)a1Qy^0n5wjxRyIK1_FE30Jb>p3s zwY3+0oBwIZ>E>83(e+S@iYZ>}s`yZgQP&#+R0TYP-;!2vx4dv(`EB3h){ahfXtn-h zcl6Kn_K4^0qN5(u#BiflYpewoiI zEhea*T}^Z1T6MzU9wnZyv6|kdh@70ubua?O^F)nX46A__y@|Yia}NRSmn(XGktWS- zhg2yc8JYC=biE!6p= z*~Dv$_`q|5yy?I)h*SQB*aOwADBPPgr{Kbwp3_y8cXc0rQ%0U)YfUKq;rArF_k6|c zyPrrY?bTEzx8U)zFqTyY8=G!EGy0FS*M{!lzmJMNu)HuS+AA)t|DjTvS?_VN#{1l~ zJpuqdu-}vj@Exk%OJ=8H8YpjiD7H2lB_entH6H?^Zn)E z>`v_-E`%gw@(7P$DEpi@^7OOsO>ch>rGNpOHvYyQsNNTy;spg2&Bb`bGvjy`tXh3# zQ!Q$yGGJ%r;QCvSTE=&7@9KS1Ih@YOEqM6(GkK1)Y#%zvUEHh?U>X@+JNkhTfS=LQ z%{dG{^e&j)UJaMFWdDbIfSd5A1rBx+WAUex+akzEer$Y`w3`tEGuU*amYdxP+Gox@ z;gfS*4gJ^xWq1+q8mUTNSyeYIE_z+`jMTK{?Y{5=9!%KMzivP7t-eVA$?AzKm?BN6;5CmT{!l12$#~@IQMj$@>%=J4T#1py| z7y^KK25=DzR$1HXx1<{Gq31MTfp8~$WSECE>NMs8wBh7pH> znw5zm2k-;6{iO9~0)YR{je6e*nqD>VIKb!5<`ID2bd?vk5uj;pAg@YuIUoQB68}Xe zU)C3>xpn@X zmQR6eeK0brtkKA3>b`Q+6J*rFqX#@%%UahRGj8aCajax)GxD-+9xlRb2h(4(zFsDS zalc;bJzrg2dx2Bp;+->(G_R2x8HjX=tm{gOw2#ux8V8lV>On8H?nej=l%Gyw!Vgs6 zwSFYG=Dzc)?jd#QMT4eqHYwZr@jouIYMuNXD_0y>X^SYgU(*x!mknMT(#4nTVAjLT zD2>>(&5WqtD)1y+Ah~uI%5$%)`uer-N#y%OSp(>DLwuLCoyQG%#?0rLx}l%iZ?lEa zZ|s$_S9*P#zTQM%t>y4v4KC!*iwzzY>_2IC9tso9zk9=G*3-PNsn(*!o5VTM##8D} z|3Sk5c_Nl=DD5gb=G9?b!+Gu+PZ7zi^wB|MpP5OqO!9tMNIU~A5WaEDBqXKOD_t{Y zeTD#74T}b?U0V2D|M={FXIPyRWU=mPq)r=^wlL&z3{j-AxH;~1Bg}J?>(T2~r8k47 zZVl<8QZoko3moENRP-MiCO<5_GRdaXCIB9^Hpz>jPI{O7d$^9iUnGAe%yc2KdE_Wq z2m&H+_gVTUm?q-O#!n8uAs#!O@oSyZ?6!&2ZcSp12J2|#@$B;Lh+W740XPrp6*G!*YxXSb9lI?@8Tf=pdjhRXs+Gc4E5FVJ@6@KJOmXo>?LsBB4SW7 z)pSHzXgRAxHafooRDCxl=0|zp90;FVh2#}zgL#AdSf?6MVZ&|TkKKdSKutsBhwkkK zku6(NHj&Xl{X$jzr&wSUjP4Lcs)plU2Q6l`HfZECu?1~-T$`Tu-lNvWPI2Gd@RmBi zpi)S@m%Oew%pjy&&*Tbv85cgwgr&8)9^5`1i{Ex!AHpL94#T@9m3#^uN-aC!`O}v} zyriPF9j>YI5gA>^e9?K{{zQxAlFuoVcr#I1Ff~%X<`gva^;=l@JcgTQxz=O0IZFTQ`?H^zM9J=`v9v|*w(xngGpXNk8D?tk=z zOH*sXEaZmCB}3iw?;xgX-6HLg02|+&o9w$g2O%+42ZL3j+B6Cd zjuh+8G@5qjGpp~7)Gpyd@b9k&i86qO3)a6WC-{#tSnxkDmvVT1|4G;?7jt5TrDq}m zfD2UG<8sdtr{l7mW*17L2|%VD_b?yV`1_&Pcq7M1)lpDiDZ*LNb_V`<+9b~{e=%>^j4=Kdp&%XR7f3Yg%wHW$~g3cfv^s3 z#>80rDn7<7+N1-!3NW{Fj$YQAzF~@7; zdFmb)Zk{TAMEk9MU#l!ree7BXaPx+k$PdMxGM_S#Hy`S_oAYoq%@bY5a*S9)(}P1b z)ef}$);2Hl~j z&>LJ};5u!C04P_}ie1>qsE#|-js?&=E&Yq3?^_4mxa=S7%J=7g)*q=nG9+IS{g)z7 z5UcIJNyDo8ca0C-s^3TJf}A#N>Ezkmx_2+dA=(V`G#VIqSxvl<#U9FGj7nM0d!4$Z znWVlis%=trn-%-mRx0vzgW8Q0zamLvK1D;*5F>-J+T9fVhVmyDmL?ULip$~Jl5a>t z?fZr*$NgG)mQNbg)}Q%AG_SBcdiwan8Bneo&QH!azOfzu7NY2S!|jG@UV=qdQU6#h za8MpKOZOvcii>Zo%x*^5v0}wcp0dKDfU_%H`HK8yCUWTYB=02c8oj7TC;Ayvsy1;q z;OR9-Q-k!Ycq@1drYf8OwC&nDyZ^%+HT<9F`;>k6=i+W(yyNv6%y5-m{{$ZUoc1_0Q$AMUqv@X&!&&(B?uRuN;TP& zQa&ZprPL#J^use6Cew=wet9+dZcvBd_+COvysZ>v@f^J zX`So3%oMK7K9IUz&Md?bFWg0&_{s09&X4#-swYvVI;Afabr|E_T#F0NyccoJ z@MV46a{_>xIZk(HHo-06?WA+%N0jqWtXK z>}&gRl(UsEJjX1!!*<4U@I2zA@BxDpaO4eY0`T$`0bsa?^u^IA^b!ENaeD%wkhr>p zr$k^h&ar@9w;NFK`vcg_w1(P?UXT)0_ z3pNDwQ&@FIgu69{7u?Skd>VR!m7YJKSPY|_7pF(%>lw3|i}bjqYSJ2Wo27)=q)PMm zR#G)jl(8$^GV*K|D~=ImwMvh-3eY#gL^rEphkpXX->Yn3Pc|oe@zhxuC;TS@P`%V; za0do$lti%~v}DmX+R{a3+lJ<}*hvltAC`U_*nOm(Vca}~=Urh5Imzwb{9Ju|FZEZu(iMm|>XyJ9C~{+(fqt715(h^L6&Ex8>1H&-7NQEU&!Rjujm z$o)vxI^_6Azje4KR?UtI)Q6co1fX^OB#8VC8VSI|3#eLrG^Y!wH?EfxfESwU2Uz+N zJQW;s2|qidf}TcZJ!%kWZ4xj*2P2_WUJcY(Wz_70D&eB?;3hYtp&vs=$<|vj4;Sic zEkt=G_~@;av}`lAP3{|XH=3b#c;{Yl8Q7mId2W&kV~owQb$Z7nd|Bg>ka0|i5Iw^VG4HHPVz1?|UHM?s^UdnQ@4> zDCqU%*1uSk8A-Z2V~4cp*?&j%x4ef0=&+j(z~VuB=N&L^VFs>OY2 z?76YU?rlflTGIiH(s8jy+kWI0$rxVb-bG7_s)6fB(GxIX`&G%jsfxGcBypORk05M% z6_4S!S|cU#bZ9DWHm_vcRsXavG8bsZ3}0+CH$2Z;uelP6>!#P09Uat8kiEZwQF{WY zev3VaOi*xm#2d?JF%P~L5@ZaEINh^_c+XK4XAAv&j0iaTZz5~Ntr4a<(qn>uoZTx*{#e!43Aj;`$FQ^?kSa?$j1!^`M0r_%|W z`VG-P$Tpf!Lf>_5Y4VUM8h%s0zWgDYHxi~RgZWy`L4E2X&Ed)Y)UmfqB!}jPxm({7 zD93qLy7B<)Y$!i^4mLmKr8=erdYV0X*?7d#*+^AURCm`yW#w?$&wP0E{awhB<$ra- z{{v9EY2RhQzDfXSn<>K_(Q(V*S6ASTADCX5mo69(}d|gfCEZoB*B* zOk`fCk5sa8ygt1 zFezNVs;tKh^7~rV0yK; zyVA_-EPc`~d9}_sN5NvkyywEQ+7Ksp?3h0mFO;~{iHMn6#qS6W`?J<6``E^Gn3)O< zv>`p}ic_4kQ&_91&wkq!1PvYZJM)fmO{zFUQE_FFmH7DOPL}&qrpZsI*HiXjh9%oh z6Ko<^4LI6lJVcBHwRuxmw7zK|Q@&J`=K7gNL&FBh>2IROT26j$)+3u?=?ip|9ZA9@ z3JtnDbm1x%vV}*B^zA|Dx8!yhJ|>W)g5ZcCOGuk5r&_MilzaJV{IarUkU4|!^pwWz zOv3HXOcUSzC&LNP@v8y8ociK;WMngD$J4EOTI z$QdNs9~EQG?LdDS-hH(Vz7Ojr0DDf+s$Fe}Lj_LIXt^ZPz*jek;Ln)huLUsrOb~nw z9HmYG4lj?bg8@~$*^`L9aI8@|GVvth+x!+cdb=NI?9z28y_-w`lE*q{_jXPbfKhus zn7P-E6Kde^c?RvDFIpIZ%}w41S*l3%CIE$5F~XSM*3Cm61vmT$Pxz`GsE6$uNx->a zAK?1}Y4`%i{yr+q!rsEsJpypg5%H}YKLX#^YvzKl8oV*zDIes(Uv88o01o3Z_#9;a zJR%>CeW1XN?_#UPzj82GH$$Wv?5zL2={hc1`v#wY;nnRpz?w4KS$s+v;EeAh0Ndw1 z;rqg~@RhO4+0e?ueU6VB%8v6GlKx^Wu&SU&S&+h}c)Ud#ZmYa~!*z5GPro5JD4!6Rvr^(4JP%_spw6wqb9||m_UtcINdYZX zAaaakLE#`)LXnGm!nk#I&9X)cv|~oS=lWn4#TiD4HASB}RhhMAMf9v}x`&MY_tCfc z%d%X9{A3Pubns)$mckes8TS3;jeg8feh`Mc->7 z0N7Y~){^~1)|OZ^=(P;I2TL;An#IAN$Ubc<0)#dv4x$mu+{|E@B$)Vb<2C;_{-GvT zw>%oQD+WRZ)F!AS7VrFrQP5FeH186Yg?xV!SB>zt=Duss)#xzS-=t@ znLG4$Tga06z)3Bo>p4b4-U|&UI(-GC)L8lIZ4ar4Bd#vU-?w3I31d@UJKgH!zOayf zO`heqRU*1PuGzV1ygwRPmfVh5{78m0rVwXA-rddbd%opT=MQNa&i>Fh}|7IEpk znCeYgfd_jH;2XUT`LNkPw+&B2ZNm2G_ra^r-ejlGTo{w==g?Rb2ez$&LONdj<2AO^9nm`eaw!kd7I)c=p#P;OQHXx6qTqN*R4vDxX1Ggsv+ z&z+k+6h@old#)rB0JBizI4Q9B^#%<8;hp`F<14(hEPkH*E-3&nD93VuZsoaXG?V~1 z^>^9htns4d+c7J~1VA9?0es!Q4J^;hYsq@+2Ogzm_}IDu{-xXIOSn>&;eWyu|G};? zLH>D3(qEeB-_b;$6IqhNOy?bEL(wr-qI*gyeQ+xP!#!tp2eqkpro`T}bk@G62hpur zu#yVw|BlkZ8&T8rbXio{=e@npO(`iUjbkBcX@Q#FULIcF9D4qNf&S8bhSn`q%9VP; zx&9iZKYEJwOil|W#h+%=4@|m$nq*P1NB>K_4dT9XNylM+4?CgY;=s z#Mr`n47JYQx>~0ozaH1r95RhUAlA#SbNnHtc*B z()*hD5GK$F_KGip(~!K_kURZ=-c?VbOU_am0DMe{!9{>&ek-F9hhsZMd;hF_11y{T=S5JE^IK^p0 z@~`$p@NbzG@UQA71YocR46ETAfPJA~juP=97!keq2r-unL*+!!cVL*XXU7^7V+p`1 znUn9)l|>$DYIdOla~)Gj6WZC@d{@|qEYUeK@>VvF-`zZo(#vcVyA~Cu{d%T+xBtsz zTh_*>@vTg6^$ekpYlSW*t6fu_OAv<`DWJ#Ryo`7^3tAVlR#*FC z9vLP^b)&UaSLbU-agmp08%-G)Gl^fB%t-Q2L_M*eTG~K@zEK1i(rl@=-%h3#$A9$g zNW>Ahji9*)HSv071{f|EdrU=`zyrj#z5%|?5AjkRM^l2uz=(d=XfUbZtV0@4#6Cka zD1{<-;ve>RLExsREW0~G0CsWUxhov_f8pr;)2D|1I(q*Lk6wN@)bS5Y4^2Z{M@-Zr zXV;A}ieXx~*Ae_zDm=I-23eOgxf8yh_tx!fmKqN9KpK!PcdC6`<>DEcp}r}g<)$aB zXRB`0_`qYr_S=(i7`b1KLuc6KRMfsTONmnnzt#J=6lE5=EJR%AXf+}v=5?N!b%9wy zWe)Xl38}cjxt=YyK8KiIRPSsVEFwp*^AYQsS=GkAN9SgNO#QWT!OPr>IT*FDAoN4b zl`vOy+}7-T^V^Bp#OAZ;R7`vN-oOsa;BcSOsq>cyuO(?)^cKRI zO0R!;?m#oUN_XMOB33rpWt_!S^Xr_{l#ImgnF8gqO_MS+ZGI(m?3>`V;fkd)qkxL0 z#FbJqa%k!t3e72^c+R1y*w4Cq(o)}7-=k08_(fT1?o66yqWRn4%*rjVxwLkJI|p2l zcj)BTSy0a`dOTaG`TX~Lxi&KQjPA8W$cJlV1w%@#l_kTYH?0PD(bbb)mv7Ff2$&3U zEh3vzzxRxF_XIY+S58pPx3*qms#x!5erjIEaa+*IPbaD4VtjHKD#tA6mEeTnNZwV= zbSlQQvy2=uoyFU2{S)fYhb8x4jMUqD3aNg%kbjuNrkjLwVY6Bh5I+Jhp;iFCQ;aPWnuW{pk zuGEi>mbJ%F+QU~&svrmAE%3v$ne0cNAf}EOIY&I~PTU2xt@y_kq`#2e;lVx_nNBvv<-^x!Bc$Q0^;`Vm0%YLK_N|D0 zBWL(gW$O#eG;AE0c;6=l&A<>dbnAKf!3pe%bHq3m_}qB^;U)PGa12+35}3Z3-SUP% zh3RA+s&J|k07pDGh!n(VaQE(k!2?ZoJi-yay9NteI~+cW{A2U~!?H){KQHV1-=&#U z^Kb`Ku@ogL53ltF zMN==?iucLg9WnBBn88*kt_++{XeZxGF8@z_zwjHzN7wN8!Z}(hne)l4^ zV3&)0*|&??iPDwRpcO5DuA`6DC4HeluR4Tm>2AwKqqr}1-K}fd+LpcYZE#ee!TRSQ zCoVBurmB*=L+oK05`?-yXhAP`yP>Dm!F}baVBp!#Q0yS$%g%!wc!}F{QP&#Ng@m@* zk)hUk?*mii5_do`-mmUD>o^XfA49kw>YsP_3$97Y`8VhVGFPAr(b zLm^s0q#$Qy-cGCOXo*!o$2kPg8L-;hXuVJEy&PvT`8xD9Sj;$H+9*)wH71!Gpv$Ly zVrKTG|H&9m`!=S<2cIg6`bnX2+;?91*IDrIul6Kv`k_`n$L48>R=t_7M=&ke6JvGe zl$a&Q$DqUIDMs&$G8UN~p3ezhfcJj7@^)=aLCKc+iVrs}*@TYG%sS#y2{N(!=ME>v zA`x?MK#-ASFfoSjQK`khj^Hfc6sJafi$rE&AVmbADD)og7K#xMqI3uW_?l^d^fC^n zAiSA4mbePV(}IUD34t*MA-o*=CfHlG-o6i^<$K728pFl7avvqez+&-W25OsU4vyh; zojr<*g1xiCTf+L^f@e(`x=_BMi#Z?hsX@vM!PP8@h9aY0pt@zF${z;zhE{Jl@Xk2n zKcJe5>bJxnIO`=goMO~QJoQ#IykNE;5OJzs;cn%tWS7vXPJa(QFIN3$zj zCu1nu&O|Ja?I}Yeu`aRs#g6aTtJ-e`T2>wr3m%YH?Bc|5Nlni=oJZZ*NtX^op;f^m(^bOV;Qif%T*LdR#9*1;N%p%wW2 zmAb5PdtBhh@LDVe-~UM^1+lg%uX+S0@5dB0g@L*e1l89Fz)8C(Ks0<+w-I{(8a5>3r*ko%wh*A?0lFBmKe1y2z@C# zaPq6uQ95)~@!oH?mp|e~XGP|(x_o`Z)-QV7u+`Nfo{N9dH$?gaQlcayZ|j!IM$qEX z5{hI09WDdzu_2~sJ;&V%hhX&*W=QH0Lt(J8OU?X)JjuRkXX}xoY8L2%iZR_Z z)>-H3{aO(ceWAXp$ko`?;HL`#OJ^4-$6n*2%xX>iPke71t$vA1v3j{#D?ZnF^+TgW zOK=LEAaZKxY+!Pf(AQGsOuELDA8)@N*$t6C^h%li>L_IC^%~tARes`7A{z>pEx3bG zEZR_Cd`k%Y$gJu(l>`cW13V00%fYC6fh|j>tJs)-Qmk-T=L7Fb*8uiOVtg0N#}jCgE=I@qY6W8E>H_ zgZdpA!~;0f(5E3~`-KlE#oFPZm)y>H29)VX`4cbYP6gTz`8+C!n_&Wr8p24y>Iehm z?8c|Q7=aW1%%&S=7AN*vW@r}XeES%;XQ}}?R-9q3qx*Z9OnOmG zdQaEPl+~6$??Mjw#u$6oY@zcfrxDYu2O~$gr8(LNboe+1M*zSlmwK+qATXy9EU;ZD z)O-%E&3!0W4O_pN0{_vC%H#z>i6RU`+lmi{?n;&KtYayD56EM?N8#Tji@{B!g8_F! zUN*Eo0?a~m%b>=Fz%0_xevukijvANl76Px`Sq&EEvV*Zm@X7P}SsM7qUgRc*4veAN z`>Jv0J%u3ct9&=aQLue&v}G~Y3-?<2wi{{;84**sJfUAQ{~jmYuJpM&|JD4di>HJ& zYrLQ|9_%yaXY6MCnH!XqZg+5~9)63J=(ksI;}p)?%JkJdjeOB?GvE6`o?W%bDW$KT z3@=4w70t?JjZa!bFW*-35AYb#oA_dMi}Tskjgw;8TM9kRJ@GEA_1h*zn|c$%sZWLU zruQGE{ur%41f%5iMDP|6R4wm@34&XCVF3>2nw4qVkjeDYsMC~?m8>&lSc3xXf z%Zc3vlLmNd(2$X=A#ywA9i_bBWj}#x1Uzt>s<)u z^-wKdoiLIu>2@j;$$I@u+t@7nUZI;ax34Zf57r4l zdpRr!nM$Hp#qDQZ(43~i`7g?U^tOCbNE7VM6AJcf*jvagbTBG05rUx@ zVBz3?qucy5ku1aE2#!^RFDj%RCp8MFjt9y|Hd=e-pc4-C-`=ozv-g>$%>;F%_2uwZ zRgnx>a@ODIg){*3EMh)_$WN~1l!C=UJ36ZUr=E_yDDS5DuApevI#hC1;GAK(l0|B= zPm;fX;-JgnsP%J9-4zo7vp)U%LwUCRK5pv;y@H+cI>!0M7v#5A6B|o*+D51V>?2$` zVy>PWrC0v-y~$`8{}#tWM^*x!f7!@p(i29lO1Ux^rneQIkmHt8|79;@T5Nu{g^PJK zJMFr(hpNO@V$2@vH+bea{aI&R#$7CAOEba4u+!y*iZVi|{hWTPZ|1P28C9MjR(APf zsary)VbVd4Iq98tV^1aRS}tE=?)GsFdqLyO(_>{EGYJf;6WRrCZ2G8Il=-DdrGfOd zQ(_Sp!N*OuFZi;Z>4J}O@O zFSrM4;{|*-=p>M(@;f42+%lb$Uq;&&55^kIx0Qt&L!#_kJ~#^994XOHLvni$^=~#U z44}{8>3-m1tF1H6!@2RF!G|xq?it;5jQxJl){W}#98KlbFuncoa(_bD>2TRbMRD=* z()@bKqHS;2wCj4G6OUN^r&;?fLzw*D$acuHhh}Zq(5%W<87B~P=;>ICc@)}WhGH9Y zg6h_YHa6Z`G=$@x7LL~TnVyqB2*+^OBi@x`nW6jaxTc)cp?xp(7P6l?v~}?FW;HC@ z3fG)7m-TQWxJroq#~S`b7W2f)t>$+H7Mquv#*}5b>a(r2wlt0kZ=Qh7arz_0xzRU< z(FHN@EUqwpFB<&pH<*{$Tour1(6+7ChqqsC`2JGBjNYeDdxaaZwsUV3^l*unW+#U5dgl8eD6R}ggEA^! zYERjH!9f7{;Zz`w)&>E{YCn~~tNpfE7ic|LgbBbE6IiD!VoDvQmJQwA17VvRZ1Kb_ zh{xA&qSpvO3B02kZ-^6uf7|tk;^NStZZ)Zs-lqeV?UPf`BiuvK+?3*W%9o5+|Dj|1 zs}c6kpQreDJzSg?nix-&M!pV)97yislA77E!b(2n7~QZdsDn-~uwX%S_o}@|77dnf zwu-!?OaV;*tcSkMvg8y_&sxt|pye+$XuNGg4DE>;m@}?SXb%RLFLC$Q>OQU|lfE`_MHN$iZEV|V$oLX(@D~GGfPubZ`ck@+vU)H+zKq7i# zLczN%Cw7MQwf*uH7HY~g2ls1lSv?i5oMoqaNuxsLU!e2iq*kYrZOx+K`5vQyry0os zY5R7TDU0J`#MCK^3L1+%Zt`8>^9aH6?oKHU9R}MH6`EgZU80fjcmOGf_pJWuDY!qj% zr^4)$sn})VRkEms*_O#4UrJiNksUL7KGLOhv) zYgVq+viV@e(yEevj+Z}j_WLX|R;X2-GIl9c(EniXy@Q%;zqL`UC<+LIRLP6<8WoT#c@+f-(o5(O5osbKB0-V7Ql(2*DNzAq zXrXtA5Q>6;^d=<%M4BYjK#J#f@0tD0nfYd)GxOWuoZlbk562-gPhy_?S?gNsy4JO- zgbBo%8@{H&-EP2_2w@c6|SUpKz$2q>Sa zKBHb7*U}$%e!G;4bB>p{UNR9RtW*SRCuW+2kad#pu=CN%XP|sf)Ei zX<*&p5(2sjVa$HU3)m?B0BpXG0%1mjN(?tvu^HM-W1?7Z!RY!Jeh)mARY+!vZxm)f zWhITVvIQuLBwtSS*kb_xKhpeZ_$3T*7~%rdQ(1=(c*)k1&W~S6VbD3)SO5ym;%6v! z;&i$1UYH8s)xp3Ifbo`U4@XlnQDl_Qwi9G_xCDEzV#3be)}(yg8^WW!ij6Xh9AL`y z16J{J+6dMYH|?aH?M+XNnC5Smvf5l_sma|j&)omO54T$WQGu1l%1GAXZn>y^k!$Jr za1Q<;U*0dS05jsu#uWR_Hu}nZj_X2%m#Ca@Ijg7JRj$9y?+9GQ4`-8ayD>yuP+h)0 zkh)}u=opH>VUD|+b=IEH9vIIEoI^&3$PP1nsod>3=ct`=YM_!FX*4A_-3IEG(Ey{2 z6{TNToT*=+rg?b0vs03OA)leB7Y;d6t9XW6nP|~6d%p5K%KpMaeiYFnLRfWl*X3A; zT+?LL{I*YZ-c`AO~rfHZe}>-LOX)6Ee|_N=3+@T01F0kmLSXuZcT5 zqa*%o@V%ENaptd&PqeA)LXSsux-63})OravDtT;J6Um-etAvxD$(ZE1ABS4!4&U?p zArx{*t!1=(Q|FAp`A@gsF5P~Bn&2zb@Y;^D#oLW+U4=XijwUUir1MZXpf)y2uGj9GTTfy)UOuIp?~U_xc;156E9JXsH+rq* zT^v)huvfZl(VLSXSL8P7o_-fI=ggjKPFa;&^Uq4s9Kj{ab57(39j$>!w%^Rde7Wuz60fT55%&6Yb9HE zv3pooELc9JXHds!?k%NeLY3)YKjlY)9QmGD@wH}SrJj}f2&!}LTQ_~?pJq|unk9Oj zS@ZhaBI(!JVtAG9Q2f$eq4!me((F$17P)S*9R|~h6;>WriPCcZ&t?ujSl?-O+5UbL znz67MH;*B{Xb-mfX!jxin5RuSfw(%mTJRkR=es~DZ8~F8_-XnW71@RtqFIs_pR0n3 z|1@f_Q|w$nLylz1wUiVz5m`e%8^YpLF&tEqkh1Cvdgg!lm{YRqOf%xf@KliB*s8Q$ zX!{S1h&x%!lQl?s3w2;aWUbVww6|BXi)*$>J{$+`lV zg`1qabref4Kz zl-JcFN63(~x6&mJS$FafHIGW47;}|tMMV0&F;DiYKJyUKO|f+D=098(Fhm(7w&H*4 zn736#z+c~WvTikNynuRhf6!vW_Q^#;ove?29zqNAi##WLlXb=|-EJY-GcVcmL()RJ zol{=A-JRPThLUkcItgtR4ZL65k_{zC_YWt>n8%pMS(wMz7%K4^+DWEIOJS4t`p=%{ zU|VkFp2G++RH*2vU^Hd3-T5zyL4uA%!Nf|A0M$1lzJxdv59K94rp*5aSj@Qu_Ieqng<&sUV%|)yQ zuLJJ-%H5&?x2yhGjUlY&Anv%_)dwlzB&mW_Hxc+nQ>(%HCxwtDPJlG|K@B;Gp~*&t z2AKXssN^3>%V&*3ID$3Bssalq^gQ#HkRpsjZb~H>|G6IZn>9RkOSFvq~w1xalZM`xgqUQIUm|&Qpy8-0X3lt{?SYTMb-jx3JgBD(F&s z7qE6a)m3uX-p?q9j{q{jY#og903XMaMPWK-$^}*>=Dhok2SdL7s^aEgSkjmzquolX zdVBoU2}8vy3@1Z%|#NY#1=eYYX78_IKyr#+N z106Zyx5VRn%TDFTb!qqu8JnI+8A!I4yw z2;WdU{KE;Q0#dkfYqfM{CDlnv*q^T9UT|M))WLMD?kawR%}^2?j$}hk$s9w;4a3WZ ztSaWm;J;JZ%-Hovursv6PtH=`81A!Z-j!)CDmpSi!O5MNBS&e*qursnxoVP-#@_*e{{k2rjsx#?$WvV z>z)JntCeo>V$EWy;~nPS0gPcGIBy&YoQLF8nd2H=7h~)H>r8kpkHGpBSe!fHOpJok zspftXE(D~2Dz0LzG2J})GG%cGP(TmQRSqO}njZALt?&VR#AeMM1owLSaeP_ zikyb>{#55AEBIUzsx&?AsJ{1Z>m@~ko3!oa=+yeit24Yp5iv}yJ<*HbC%m$0{Medp zSZ5V<_tHS5(6B$s@tSV2Ru7`}6VoW7HYO;7BDjQ&3wQizO>>sc3eo6Zt=}4LdLN53 zed8EEh|@%yRxxs(y)|R+BUE7-Z}4j>zuE56sUVVD_eCo#X{fW{EO5ljQ$STUoWn={ zN|<*_1SnP!!Fu%(9oK`djhcP_H)IL?Z;$q(yTqFhfvR$o3SyNFh=SgXcl!X_fB8v< z&;&5?v*~ovja=iPryW!*7xw`tg z+L+d{@v%;wjh?e<@9OFrFI7iNT%2?eX=rH3JuH4sT^*s4#@i#I({U;+Gh9Nz!f{*l z_WMO$TVjq+u*H11>(^gyv2_sVs*)6d7^zuCmVLQ@BO| z%q3tQS_Dt_v-jnP_MrMYpkUynqSX(=L;CpRg1N7Ji)HcayJnUnx|1hDBRpNKa@X~3 z>s;n!R|FI~F4B?Kpc(KKJ)3L)IJG_k6UpLT8I3FIygJY<=lOE^h1J1RJ;mc56XNc@ zd?lrykL`UH^x6{+(R?7e{19H^t*;fb|G`Pd)VVs}rq~I#0>-?2du5*~FeD0{25_-` zr9*>;x4rMAVXw~=B#0FUUo{GRB2fKhIq?fV9(|zs0;@MfqmkxIC6Y#?n}tyWU6OS^ z0&UgO-=}TANR|={Ja#FS`_2uy-;EAa9lW)qcHZTG|8ls$B~YUM9Db=zKQMKXrZKk> z90EDtYPdnSKTVJvFdi&4fvd?58-7@_j=P&;Tx-{epITE{dvU-r9_-&&$#+ z3&ZuSF6ZUTYs6TX6-Tz|UyRK@d0i{ueO}hvx}^EEWihIxijl9PWsBlV)5FoKDp%J; zOS?tb6KwOY;zg6{qYU(IY|Of@unnYIrNkwt-pO(kk{n90@xwx%7;GcB@ycG6QIxTWm;Ot6zW5-o7XqMEI!KEm4$<6WD}qbHgU*LcXf&Qg5Kl^9 z||Z*Hs&)Fh<@63UvUexVbm)hJHNho~|y$`z$xZt12xrm9_krM)v%{yJM`Rl2q1 z{DY%t<<{-(`qrr?({Ea|+>MuSxi>*M8rlG6c?@CRNV@FK%ziZOA)&Dm49F%QibSs- z#j^6r+}kUkO!rh600i9*LN7m7WX$55Va~9RFxn*r795a>fT*}LNm^v(?JuCUnBs(a z2Or1UgcJ*Dr3$|_d+ z!*^%bgm+O{BU?W;{cn2}e#p#^inote!z)$RZD$=P@}v6VQ2r51a5L6;4`3gM#i{)* zn{>wj)RN?a-Vu`~j}Jvt+S=0J(O>o2SxQt$VlaY?6h#pcjiU^^InJjxRQ2e9o~x!> zbi`;t_tLf!!lEJN(4*aq8kIir0|@_@1qBC3_eH{6NfZr7-4n;*s^@Frs-?6i=WQb7 zRd3-2!x0|sQd1M_Hhnu`&`-BEinzEHyMJ1?&OT%)ofPBdgzHrPbSWL0(~p-`PKwab zS9m(H)@0W^dSxu+OUNe}1nYX?~UFu?b4WOFG>u*pwca8R+x2n zi=6GKELJ2-Doub>BcXi51{}p|P*-@oXNU6k<+Ry+#x@x{01aoTbqsy}N%E%3@LGJp zBB3p}j;%Tlpl^iXKpCom9naF zx5iy>2-tCc{6v<;*8F-8i@`rC&iZc3laok)iW*PUCg z$@3h|y)xvZpP@4&NkCC1my}vWpT@mQnZu2cZqiSYPF^~#g4eG`CKO*j13C0YUsYUD zFQdUE63QKpbGC>(zg(qHf_(0aLUIKMQ2jetA~au0Uu%6|d}9EaX~HQ%%n|&iB(`fg z3|;(koX@*j<5s9t^778k@{N0s=(6{r_!6I@wG!oXOg+e}Tjh;n66e4Asg|O2l+=W4cwoK>NPIo9N2R;~~_fIxv0Xp-#D(YG&)n9!=I_s@!S&Qyuvu|J%wd7un+ zO7h75!3<|Hio(rT^`mZ~h)ODZe4L--l>Xq77Vr4a4P;LVS_G9uj#Q6bvFd&}ia0{7 zEO=V!Fpqx?=WX;892QWv4}@Et&+v;#gO^wbSwlBvI=LYgFy0Gl<%*HEarn^EZ69II zdfTr%kMdEsH`N4wxum`?aCx{OcQrfLfp?kACmzMD#(&}dvHAaYpsDA-mlgk)wvjqB z!9Is462>!T32nZK5dQ6eew{;oY=ap}& zzuDA8%tiu4MAROsu$#L6^?6fFs`ue)to-|xxlNs$=ZC}#;)3wQ&+UEsD_1)r#vkyua@zEb~EsvbsG@D;y`Q>`H!W~x8=}~JbbZf>nW);k% zFB(>9+EzCnkSOZSHE}8I?AHuo-%!R`?2p0LoD==A;pSM{Qv;)?HrXx_+s4d!@7eC!r%LfZXK^9>4KeXI?_R_<#dO$IHxKeycfpmq$1SaXw$ruD zU>_a^FQ(6bAPdxinK%$Jx5$UY>xka=kT z5`eMQQOrbtWH(319;uI!63Zf%A=4!GnD`mf#(yLEBMgJ5X?C*AAyhQ03ke80rtSV^ zv;mWkFuyTR*pcr8qYrkrd(Ia@Pg%;8evAm_7ViCKy8sFm8`ICmiQL{3H4ym|O(6Ge$LcO?EUKHyouZ>MrJC*!hMxE^YnHo131sRZ_Xes&uu28Do0V z%A4sjvX3u<3V9Krz2=Lg9>sn1e)6LX_q{s^=e6Ha9H_K|#Vrf?tMmmtLqGMqJUsQa zWTVedG5Pb?)s7fGSr=Jzq6-c%^h9&))d&uAt0nVy`W)HvRwTY$jz>!!=fiI&$K6gg zR5;{QBX?=i2ZSvk(C9K*tKRl$s3~m5t{_(dS$0?);F`A~!_H?089}qA<3tNe;B&QD zho@5_?MT7KD{mQ!H6ukO{ce?n{w;1kX9rrJ<-oeq`2C{oOz6p;+^I=be+T5(imQ0q z4F5B69U0O|Cr840@)Oi*YxsgmGq)u1wu+}2?cNSlkRBBy-ie4UIw>eXoYy3jy; z-rVXe?WsoRRW*-W1@aanY6|e|V_&q@WIHM%3|ozCdrzeb55p0P8+@(b3 z?Cz@*ArTl}VQou)Y0Ru|+%#8|nMKlTv2k(gQ1kIf^3vb(W%l1qq*Laf=P72gkkEYu zN<01|ZnVcgxMXwQ`of;{C99lj%ZJ?~njPGYb0SJ^g5%j~D1*x%@S=(eo{w?2J)D9< zHmov3;Jf46fnV6`N?kU`X<55yS$j6y4dMijx7sx23)!1L_8c#M z^F%t~T*GJ3SB=E*vBVjoR6W`#2_0L7KSV2LsL#QU{buw1Dd6LV!$vj#MKf@qKx4Ol zmY4}d1SFn7#Uci#QPocN%p#|;KA&g4KK|_4Zx)2^2Zq~d`OyPTs(zoSw-p%+P3MqT2J@tt%stM7d{lal=?q%EAqBV zm9>2H1ZNrG=wwS+%*#JKoLTRMbfB%UI3vJ^EJbNws~=ItM4^v2OBi|H^@UucsCPg( z!>fn^oE+^nQ_$w3_6@ZF&IG8Ba zR?pK=>8?5J9-k#Hvy%_+ciuL+@ga8#5$-?X_ai|#+qp!wj4vqTnLIpuV(rZ2gnCn7 zYhv`wi4e))Gt|)+%|gKr6!n-85yn+Y_NM2wB*iyw%K;F(v?zlHj`DL(OZ9Q*9TgF$ zbilGP`FNja?y3Um4|}|IEC^d?EHf8^ejy`!%I^`6zj3SW3K>0-@M#nqO+-YjewQCe zegFMVmW+VsV3ynQ4wbm1%`{J|T#HAAmf7-^3*A});<6^Z{S#&xO#*jw8a}+~&ke<2 z3!8%7MNY7HV#={=2PmxjMQUD?rx4H7jyX- z5$)G%4?B5G6mM4?wY!1GkefxiYY9;*IYK?21+Px796%nj+rV|j6?ET~l5 z+ZO5Ie<5;B;5`z-%F-XWPwVktpC}P4k|#b@7YMPlYp?BbRzWy^duXbUE+e$!U$BH} z?Gz%Zv%T-AquNZkt{jTH6~fb~@QAMWo#x*$4H;w@y*D8VebcGGi@o=Ye_W6Ez+W7| zu|Orbfp*URNU>!pVD``Y}C z1m;*!zo`(uE{}zAC!|}pJIDKc=Sz#9W^og0Wo9hG zjr&z6IU!(f}A-SZ(nuy`?6J5X6oW4Fd{qYQ|gqqe-kb>^ zX^d*_FUo-uD08t?N2xZHR##;@A^~r3Jkw?Jekx_y{d1X4gZ!8}%*VuQV8L3>>1xps z;SL4XmW7Ve@l8kUf1M-lbwE#~Q_pZ&q7k?-ytPH-<(6h!^20B~`@`~uGmqwwoIS#* zXdF7L9gV`Qfad8AXjUBULQOB4Gl6K?6B70C>xKi$XpTK9*vb3!1(yTV#W0tF`-g8W z-o7!zJzyML6--o^ZA=(uyxR(E)z5jB<@LTvIkn>maNv6~Swv+?(XKIT6|zuATwudtd>Kus)-{s(keOrcDZc1#xk zZs$tpHt5$JZAX7WG7G@JWCrVi-^MuN7m<1Dj98R6i&(+UG$3Ny(Ll4mcj^I*nV{m! zyxNKxD@H3~7RPA|0GBK^HGta|>c;Sl9G|~Y1v$+SFK^~qyq6wJHE69N0fy@X`d(S( z{f8%G1%Gj{u(Mw!pr;-NDmZ70_C6pOqVJO5VKIse8+AdTR)!EitKqe5@d%x96@ z_7ff*e6hqgiII&eA3P=q5zwOdceR`gNj+Bwo`JTNNvcANsz^pp=nZ%DW_N6zP0`7-t9TO6vAt}aA00kJl|zK*T4Z0(jjPx7=w#fYLS=i)O?1eOXV3;b zMi+{Z8btJ`q>v;MMl9HAM0#ku$=pJd zOj$_#{uuue<@(7cQgD`njtIDHe|(#%XE2b)yJFe6A?BLv83Gxo^!?ad+x?<~(V*0| zd9$NL;z-(n)lUl_tiAa|vo=?$uM074gL%WptZ_x2E!tXD3Rak(qL$<+5x;b!B+$1% ztuOy89%EHp84(teg!tBYULv~IL!?tiA4~>2NE&m{G@Uy6`ugHL(PoN&omG4zO4v_y?;wn03rvuIECn(@ETxiwL+ zu^_#&0Z4=->S)e1<6W7$)pK{P&DfajPkIPkylS=eKu`P1jXkt>p4>d zJwK_(>a4>rM8JY6i!8%-F#0ft?@D5@M%S^^5W)<_2f@fA8Ucbzgz5^c%qiO;>{~$N znBr~8>U@o0tzv*@8zUtJFjn88i)LU6j{Kh<@~1!29w4RX{+jL1(i6rr!|{Zz0^kb) zx;BgyQ_g2~fq8-mnB>dyMs8F8n2d}J{Bf@8WW+TCnN#TxaN+#_^sa*R zjWR^%fI&bajR3+fbQaAjdL~B5WyK&jf*vC2{2)c_7ZnBL!FYNymKp8>rmm~euNKgS zGV|hpLnaa#B0wE>IE4aA0B%DNM0uIH!6F}M1k)O-bS?>KH^W+Tqz6eu!MbWt4rEhQ zjPAvmr3~|K*FtAcU5Tm&3g({y>Yb?TBU{kZsPJ*!Vjq%|r7??d--h<(m0N8Z33Jlb zYqr4Xgwg5tSF1_}ro2n8a`cYjb(fc!k;pSLPI z{-d_bD__IvL5sy3ldJ;E$2sTBtFWs-vbErVN^!BdFG|Umpl9G`CAQwa zq`!L>g*X}Tz?0)c$VoB9t$wBO)N*`$S=liLhEnij4gCbX_%#KWpJef%Z|@>HG_XUfK82XEewOY8*)&F$dqD`{!fK!GeFl0$Ah^|NF*W#-QoZWjqBs9yP#xq>KtZm=)NTcx0uf|84s24GJsK?-`X_kM2Gpa$_hMhO_sKcjJyx>%{D%f7Xyqv?DZj zkF?fiS=&8W7ev@SqNLzt^p0l_$Co#Th-G0E6>fWX2;G-R$(S5y8_0GJayh>Y5pUnK z)^R>)yZpe2Kh*J;=hNZyHOw{5s`|XlVs>a)dY)_d+T8_~*ETB;lLDN13V<^YovOP? z#xr=ecVNQ-Cn9Ty7&4?pA;$SeuJ(!PNfM@wAcRsUR@0+pyii7l6uN%nvAKZgD^`}s zdG!Hp`2^sDB;1eX7qr4#)-W0W8bZ z;1ozEgecIpGYW&G`akP)#@s=- z5N5c+?kNzxrf6ugkpqg{NW)W4knssscGiEVlNWDNpRE(M-P1}1R|i8Nc9OT|4-=u& zJ!n4KczljRV%e6IjI7(3Vhy_?@Jv>4!Gd-huw;blJuwl9P`A<;am;}-gDbHER1ac>eq7;6@ z+hvxJ02Ud^xOv6!C>85y&#j;GJhSsNEaHYs#f@QoHfy#6afp2-qU~!}RG@f=@un$% zircH`uH7GL_xL#y5G#QS+uMP4tTBp-ZV>h;CEeY|BtV5r=yTby^wjsom@Z1y!X?m9 zv=#|$ZgTpCa(Zeoh#BQ(2=AlG26T^vnF#`0f^Ich(0^|>#Ce!O+yWXg6bZJ!nFVr6 zkPAa#3!w6+loeJB4GSR99rkTsWDk}yX2!lpM7KBr=PyZ26*xpseVNx7n#gHq0rZyx zMA*(DOW<8bW#8YPm0`SQ9RdO~9m2NunUDvF83-OS1Yw8(@rD$7rx0XFAIc{9?`ZT8woh=NZ&xwO(HQqmG{ua2oV>VGz>H_X zf=>)eBp`WE#7~=3G%Za#@PjDI$gGTt30=#}{(R=!3<^WscZwM9_2bMRTJ2hDR^LzH z=o8@bGcn9EuKm{Pdb`Y6p(?CR-`3FqOfUEmM+;?3%HboA{g{VZn-!xD(ZX848{2hh z+$5-ktFVjIh}N8immpvDT$kySN9kBUDUF)QI+tRa*2(C>uR<#eSEitr6?H(cf9cl` zqFHHEsF6dr$ckpp^snAu6K;0JzkIL>H{3(5?arl%Y4T~kB>Ys;dRchP`j4o~f(K`k zCz*e|)f0X?JZ&gfwo3Mkw(20F9ySiQ_C(~HGzq^f)UVR8&bP@*T^i1}kbksnndK|g z>X)4ACg6GfL&BGepezgjNz*OUkgtp&d0{f55{6+ za)rExPdW*ho;JL+m*qJU=cb@3b0-*mhQiM7HzKUztUzKM^%Z`QDtlYOGEMRE8I=*E z1|pHh&F;b)FRoftq}~~Apra}7EG|1{UNDqDJE{XDI@rk&CHt0gx$VT`%l3V( zGpC&lJ|+L4#yMZ2eBU)n?iRz4ri3T{F66N_ROjD2>b-^jT*l@9WiyMYlw#bGAfGMT z5$@K19bbSjf8x|zY^UU0V-CyIi2do`DmZRkNOUyJnpeqQIC)7c5~AH=nN}Hod9_AJ zQ2ox7qm8uR^yYxfGNFY))4=Zt(w<9YN3|6zkUKw3>AMO$&Y)6SzX8hifuS_pMiu&i zvv-boy5#L^$=_*O?K}_WSEmu1_eMW8U;KnKJ)l#g@gc=atlMg@s`St((T@k zRW5s;BxKr;*V4Ypi2Z8c^tp%2y^OMU(z3ZV@3a+G_M=VI?h^dLn%sT#Whys^AM`l6 zIlry$jA!y~k&s@Y1g>f^{?x#9;mWyHzw-!|v{=3^I%slgM}DIZ^FLlZad#%tl%Nbg z`_Y|4%=CZ*vpqJ`&p~2&F9Bx)FEF{7z?&{@ zp5-MUJmb*9xU5K5Nvw`#^|jR6bigO9Mz%`7?BZpceQpoS4tZ`$qTg&!O${@L|M;x6 zFRU%J{fEz*pMZ-^h{Kk2i_rG{ejw`SGG=-2bKj|Qy0nRTpKxXvtA5BmK2ng+*z9P$ z!mN1%&pti_{Z!Xad{{kaA6RwI)O}4#%>MpNYq-<<1!dn!VTL2^8_?qP1CdrbY(Z*t zX6I@MANpsAG=7WwC+PY}Pe67q_n;N!=!;RX4HGDxt3AXjadC(6OqcSnnY!Isfo%v5 zqgSQCzy@BOrmiL3uBMJhMc%ddqG8$MtvSH=E>~J%B3lG9af_15aV!NZ7m^ zWtSPHieW-)-f|Cgv9Ur3vx|_uYiLm0(dEz}g>Ae3+-5?yEI<|4o&2Ych5_lJo9hCq zn{_P>wkp95-n4?zslc>@iT^=PE=ZaTck7z^EMfIHT_jj#U~R72lryAzE$%F4e7az`UB9TTbfzPMTatK4O%N(=>XzQatHC z5kSFy;5)GQ4is-G6_w54@Ysb+l}=CQf0d(ap*E-zFAo+5V#f{8*zauUgb^07a90_= z6LDjngK61vycZLj1A%8{NzyW*VctdZK&u8Z988^MNDh*|^QL~k0j(xmOGr(2 z-;+SlWkb$^X5R-G<^E{v8jKY62m3mEd!xw1x)L&NzA?JzYUBAiS?|dBXlMCY!`ckd z%5)aj00a8L_wL<0gWuvmTBw3Mq?^1~b#t=gzLG^snrPX=YZGCmn$;!J#YTx@{&6D_ z2LYR|u5In9?j2p;_qaQ@cJ>2!o$H>Hg#pW!VcR=aOU1r5U9jqH+J&ume~XFGO= zov;eT8s}`WiDafRHR{t$tn-;6@s~#v93ZDq>}|rv-!0@w$v2ee_1Q!7!ls+U!l?ll zS#qSJXVP z#^^fs*(*SO3c&HvdPgI525(eL+2{7;!8v5dQ`7;?-rctzz5sWOEFQjucP!AA;-Yo z$sthgEi&GV%)*$7u(RlGUs^Sx3zBcj$Vm_fU=0n<@^Z$}kd_u;Vj=+!4zdhbn_`*+ z7|YN8{%@^J+5g==Lt$StSR5yWuhRor(ThNrp5caV%Zw`QjsBSgI5#Z}-foo$+U=A@Dsm9U zx+V&4uWK9##b%}09R*~!%-#;)0mk$n`<~gT^CA7y5>)7#2wm1F7v(*7sZMgAkk~A+ zTyyFti;tFBSbEV36-C^ySgWAaBm@{7sj}Uc5QtIu#j`W)rh;5h`bia%J15Y!)p@_1XAZHb&+Cl=!A>+)q=2Og3^ulI*X507Fe6}S@xdos{O2g z?&ui2r0O0O%HY1c7vdlK@jY11&~DMD&jnx3b~AaH%~9ytUo0Hp`9-6b!|>x?#1n%` zU^>eFjM7cUwk3v%T^tAz50;^JMmLMmM)h79e*oC>Nz-&9?vr}1p^xN9a) zEJU|Rz-G(MSYwE4;B>Nf>WwLgTOJKQ7s+$2IkJO)Mct&VcW5g~_x}ozN;FDzYYv;6 z6geg{?7J0z8C|wrDDq6tvfpPbzdYkc*q%(=I!AtMX--g5eQjK%wS=g?SZ=@tA*}vI}7xpdLq&QheE@#XC7n~l4<$@?@m7EXw1In4t$*~ zlVbxif3&-$d3Ev0Aqo~=5UtbXHC~VIB(<6DAI7)!Em;1~D&=6nO+_mYL-Sf6FTFDx z(uWA=`hilT9O?)hQWmNOs4ioe)Ke`isg$e%jq#t8b6E`8;>*cfy`=<>xy)p`NyD z-K`?^+}1W)K`HAJi#@d>F)I3ne%=?|6jC71iFZ+o&Q5XhhNKH)Pih@+$|3L(I4%C! zrQvWtLHo)Je03zb>%H+_*1RbTS%DYvS97G}9WT4dSrnT*D$rMxfm?)+DkkILX1EjW zRXxY`;rqUxA%GtMF#$GPjM)m9`^|O|u{!#jt@klO;Wyih6C@auHx?)uf(?L;M9KnT z$e9mV|07t)yGX_e1W*TkGzE9&SuoN|(bQaGe>TxZ9kE{omdxB#1RI zQw8nxojgRogAqh~;6;IrN-Hg1IU^ZLRVGTEo~=;i~fgz{i+64=Aw;kw$f zvB;Eq)Do!vnw6x}aN75$sCw(3z_A6wxaj5QtiJBFC$(u&`@K+pukAg4JG$e2iPOmG z`NW2`ApWMY4%RLq!|jzQmUH`NbJQ?p*&nyc%=c<$to0~A*_O8oEFXN+{oZ~vnCZMX z{pa^Htg3aUnDmix{XajWUCs5;Bc;QNgmN7}@ELi3 zy02Xwm|==n5(Bw2Scy6gGl9?BvY<)S$Mzcu;vf=wxb-$E8WEZ90ufDKqSHVZa^wXO zu^Y7Ti*C)14$`)r{rd1Lmro?>W^2^Q`Ckt{F45sbO*CZM<~N+lhYVo8HDk54zyt{( ziiK`*fc;$QWC`NO!ClW<3qb?wOrRCLFD1Ydqa^~XEt%)6_Y^FXgRnsd^1B}u+lj1W z05c!C!zBD>^F~j0x1a%(_JAe2f1f0#tBEk7U4l`{W2Z@;gY< zQU<(e0LwH$0g)id+&#dW3k5m)raJ5O^R?~Svo3!_G9eUO{NDANwHYw?H2&v-GUi6& zP*PxC)0a3ym%rG@)Iog*55PIcZdObFUM?fCkeLoU^{4Q#!_&?gFy2S)fW%)egGDcG zI^wB)otQlxQE=m3mkIl>Tp{B`%wlhGT6TrPV3wZ8EQ1F3Q7x;3{ zfcIUXW`N@AjWs$QW`x|+rV;2e8!)DtBhx#SFb_ET)r$mH(hXLu!d~kB+`qRy$S8{i z+PD9k7WSVP|5b6$zrK$H_BEHqaT*+qIl4Q5jsuJ!aI7%oCt$=Ib((df8AR^#r{D$- z3A59oBz49cu!aPD|1l&C({i>iv2eNXo9Rjvi!&II;xcRbe+t)sko$(zNW!a-B*?m` z4DdMc2X(O`ER^*Iv+jHrJq82UTL!xqh_$3{S4}INlX<=B)Wfb@{jI%0p#^zB0Y|30b*F{MJ4_)8lNGptMAwUwgW|8q%HgOfnex zxlWu_(KyF_0$s6KD=O{M=er05?RIe`HDIrv6AOxB;Al&K#6F&;zE3}f^dKt}-uXTb z)i0%BI@Y--ryU$mrwBWuc0W~*Sd2&fC4Pq%^o$Vrn%Bxpq8&-~$6CO?&ZXbAEEJmZ z!oL};D&9#++88h6-yqy24CR2tDnk;Q&(?8rC4__etSRY*V-Km1 zcWF&2ZK%5Ya$`ol^~_jR&n4|lo1v`+Xyqr!Zio<>5fxin(Ki2T*lA$FHcRbBy+)%V zHpJRFoH?I2XpUpoQt``>G92Id&F1(3`hp=&zI5;dEW+Q`H3a{;tX94>N94jM6Rf(V zA4;nlcdVxap8X`4=GDt<elGtUv)j4&xlL&i}qd+qNQ;rb6U{0K29;%bH9#aVu zM0uOX)zU8k{#Sp-vdq(3Ohv6I#i0n1UNmhzm6zghQ&=wBA@;&2mkP(DDoP!L)(>h| z)$S>u%1n*^7Z`dY(erx#^j-xZffeCltm)> zXIH$DGo!*hGffl!1T3->fy-f*)hJKNx@pu{5|qdtYcCx1L>+vmQO%sRu>AHP$8TN| zyFmGsO%}Ov_LI}V6|T2QhG{ljaYEM5?SHdrRovvfiNE;u;>1jQH?_8i-%07oEBnoN zGWRo5MZiU1?P7m*DNtBXv+vlZPfJMkREQQ-cxSvRLBG|>5`lhX3Ow%iqsQm}6`c$>DT>8~1I;r$GZVHteIJ15JG2r)4J#J8VMK*86T$Ia zZa5h-3OYtHn-Ds8A}eLwvK z?)gLLLi0e+@cK0A4>(NrA}}LdU|UP9m|<2Br0+l6>i@l(gzlK}43=@j4hX*G;uP?O zn_WIgAl?TcJUKTSwu;U|FL&~W(Es_(=GTHAlLmM2=BJ%~3Rme{?jB%FgkZd%+*t>v zTKE3IxDQeIp+;Wt71xl~nQ21qtQ?*D;$WIW*$wm$1{nP;gJD)T2a5ga*WLcu1P11-b^(*QBSw|U6oAKr!cch zbQ(NPzU(gX9+g!sKG!IqNkEbMsw=wa5t3?}bMhDocQG*OSt+E^>eI=?&w zaE`QF#YU*U6HbY`LYD-_97hR0+~LA99gr~Hzhcb?@vf1hZOy_B>Bku6vryg_yhfm= zq*dHZFnD?v-t#K+X~Yb6c}JSl!Jxiz@c!B_2$wV|YcxUyK5pZ4Ww7w*!qrBr9Eea< z)4S-_A-@=$iFW{XFdGb)%U^wvC?obj;1n$oLq?%rJSfk%&&t;kyU3H$a`%<4w?*eI zZ`Cxo8h_pF8iLzm%e$5Torb~E#NH+uz&siBY>k)@Ef5ddjevymII;fX%$r9QD4Us9 z-M=VO%-Lw)N<0MJ>i*7SpKo8Wk3P0ub*##Jjdx9HV^>%iSK#pZFfY7&-!%MHTmaYQ z&!p#R*UHo`@LhNrvFCNGx!5K@!1^AE+g-TdmsDtym5zhdKNF$8t`_Lv;=`s{6${Q4 zxR$=omJjaD4#v}A{78P$Yc>hlGFQ{jaoL!)nZHw8*Ox~wrhwHX`=3Dp_KH9;QBY`t z?Hpmoui(@F`A557V%ZNnt!UZl;C zngTyLbEbUSzl;ZjISq1kphWexT9cl$=eDelC1s|g#^3AUrp5o&-gibtwPov;q9P!a ziHwvAC=yjbGUZVL0TGZSu~d{OAOccK*ocyo5|t>DL6YQHl8@w!WU9!yidayAx9oA> z9UV@0pL@HmeDTo%Aae$+IOVBy__6UfR6mZ2A)@oB?f4pvaL8rF;}k9FHF(#ZI=tney!kvX3z zASLiF&pp*KhR5EMBjjUb6COE zeHhN7>N?<%Ea)I?DcC#nZ+DQm3m{2!PVl-v)WHdT%@g_z^w?L2Udi)K&NvgCB{7 zHg6yZrvL-Lcen+@C$a)Es}|5F0C(3aAm_8FgA>GOHE{48Z+*hZZr79(5995P2|IprobYrsiz(* zBic_vtTjpa3EeJn`;&|s^){)TNA-3Z64(eYVuYSZ_PS^BBp0pQhPITyVKUl4uu64G zOe!2ta1ZbPrr)G|_1k2h7>C3X82NE=n55z=ryxkH9q%g9LMaufta_5PVQdhG`zn-n zpUlOt^QGYQrRO-FdrrI7j*hb0*{a4Z!>-%o3qPot5DqX5#EZ=oX4LRci^~dL2=!Sc zxftk|$>V4#3{t>cGM0l^bnD{mpTE53esC1+$;Ugll__tja+a7P$vQZ`(FLpVc1yWDU|h*nGSnb|fA&C{k5h|&3n4L> zY}72xY=>ya8yr5y{EGPW*5VVQocl4=1eVX)>Pl%ml1s& zZO8Cz6>RiQ7Wd+nh2({92{_)17Qg3wMc2%Hr|Q~Jc4DD35xf3#UYEze0qeg3>pyYE zTlc@&tbb`XOW`=B208f&c+v+*@2lt$9P{wfxXd87s)T5U&%RUTgE)y^GAO;y5cXZV z+f6u4O$d$?J#9SFYMXwO_r<_FYs3E8*px9!^kMv6+otlc+R3x?SpG7#%WYYTRSHbu z9;n6!9aU8yZqXQ9N42mBi+jj)v> zOuRj653#7D3!*}qY4h6(W@2LEgL|n{qt+FTvq@PdW2UW?%6hpg5%>gsHNh530jFfE z!8}>DQ$cw)8YlcSAJ5%0Iaze+M&E+c@t}u@EsNJ>tiDxpzg`|i)_+h0LaN1g^e&_e z5z3>48($;1EBj8g%aH0ObS5dLI@tQLxhxs1blXQ03r`6?SI< z8f@whx7QemnA;&JiA@A6@zo0wQ+cXtW01|&)a`gNN~Lm5uQUIrYOk_DpXYW0-_%4p z@*NtwDbcK8-uRAA7ETYX8`r(O z6}?(ecI;-TK;wfe4#mb2PG4dJBV`IGUcASJZ7!W!)-fkNfjA%JC}ogHAH4PN|9M=70F0oMLiE{+K#PkH*V&Y%f*O712Oe-;~7BOZoRq3-IbfgG|Z0AJT5Ef-ty6FYJGWnH4nisDatmGk?h|%{Xs^5 zXHm}dLRFt@Z@SvywVLjDRkg~Hp4~5)`PVqp^HpY{ZA5*MdPl=l`#dv3qz*DBiaW&M zXkYjO$At_bkSwcJ1g%QhiCo}R3=Z4-O4y2YVqIzxjElbd__exVTJ5ra@74VG zo%XgkQBGojjfpf_BWKufbkxWynFG|4_r#hpEjCT7_C&&YX1)(| z%nKxep-5|o^yLLvY0cX1yD0rsY5s~3?M%bufZ2>BOXa`qA$=*=o9^M*^C!fscd%$MA!QFSE>p60rsa49tLfIvbLEv`bPOr)IL`w%RW!0BXC+q5Vr@HMLhrBU?dFF0d$>?ZDcTg%Ka>_&S z+cRbQIXA5j9rQlhud_|#FR3Wv2w|q3G+ycbeNB(%l;I3d7ae7lM6^I@e;z&BLJ6<$ zO_rRTU>eQ|^(FItvZS(iebJeDy6H}Px1?vYRafq@(U4*7ajEGG=yoVUDqzduegH9R zeJ;m8N3YrnQ><4sb1wz6vhckS;2^Ur>>-luA0ExEINT@e@v!@6Y;J!D(zP1Qku@1f zNL5jDsabQJwIBYIVcn0OfBfzKJF~_t{YH-q+Wl@fb1rpe^yU7txhjto_&Fxmhz5+w zrO85BXH96k3Oa&0O%JKJKfxdTNt%Y{e?~Ip)s*1Ut9sEfTW!rJ%{%I&DX+E_F!3<= zTD;B)pi5f5I$Vp;SZTivU4{~0!M0Rk>!@Hrjop>V+avaXv20e?5iCSAsuBtg{cuzP zV=+muU=*QG@VO-_lfynNLiy+6{2Gl%0$~(|P*gVvUqpB1%0(#wg+NqAPftNV2s5ps zO|82Of)*oW+jC5R`1nBkDzl6W6?=EkDy0x(R2xtui-Ge0#f1B=G|uM7l(^$ zW$+y?R*Kg{%w`+hJ`c78QmP

D36e2x9<_}Wd%gcft7c&Eec*p_Cn#n0}F@!Ft zfnsyq?f{9(_uQc1%@oA9>DGwM{$2Lf!?2g7O{iU71T$ti7Ci@QD}wJ;-wCBE8z@tP zL6VxHmhv!QANHaF8Vs1&vr{JK>yt7dB*2QPb7YPQ(D9anFQr6_24VJ`da}s{dw?AO zJ^=#8A>?~NXJ^eV8G~^9JcvLaMuI3bKzY*u)s{Z^FFJ|4=ijL9UmLY?Qz`-N1A#n# z>0n(rsF15#*Zt3p2)0GpTg8!cS z-MG2Cc?+g&#N-B3mbq++?`Z{WLK=msagTq1=0u@hMmiHEI{^)NU~>DVhgXWQ0ug+d zlr*z%8w)*6 zVaLzr$-LvmPx$NK8DP|F;sQ2xk|Q)4Xq*EbR$akz09uOg=gs@NxXTy znWfZj6{kF3=Sw5F>DUM!>L;`dZtXBS(kos6eA|LnyL?QP=-FnS^qZ1(>VkbJ{pXNT z5^IU*kwxrqsd%Q&^Hjv!r4@MZA39=b<~4>|xAll5(tRSX_Z#AX2PfLM`Bg*jNOPt& zi(aTbHuRX$-3G{tSlY;X;|#q1)l=(Socq0P+KR^{iMRUEUvJuqdM6S@RO+Qv%pAYe z`7p((_oiOD^vK~PYJH8-CvdxecC#mrkIhY&>tQ=TIg!<&nI11I@#1Ue*~Yb?GSXO!;yHub2qs;+=8 z7%U}cMX0<+FuI)Xodo3P>Rviq0hPLs>Z&CdD(-o;6{=`^bf8P`-V5e9K9}SL#G_MX z3ge9xQ3I;vJ~IFBZ;G$P=_Y+8tOcIqIlPtKnlIy4y0$%oyis~K)NJiK;*LkBO2tq|0rmWNq>O9rf67g-%O zkN9J4^_|Xts4SG=}!<7tx0}kc%gQnF! zLO;YY;?Vmo=xZ|$aL`K@H=*Ei?)o4(`LJ_D z%7%LDr3>X{G&MfMBVB*mqU`j6*$H-N`kk8|z%8~@i4-LX)vc?@-7`-R+u|SpCrvQcic*Ew?6Bo74CPd!5i zyqHT0sO$H3lecud$o)#!s?K3n-Be&92mTs^?Lp$z0qPSE^+81YLA$Of0Gdt+h9c)P z01=OWf~4wsfKz;8s&Q>kR1AY*1PZ(~)H7gn9Re^c`8M(4F4}h=7F`52s7`h635X%d z>>B&9NMEvYC@W^Zs)I_F1e;Q!m#Br0#ddz{grOmjzo9jn_HT|ds!UDHj8 zO%*BlV$FTFud#G^uDq;qwlHfEbm>1{BQ0gVu)Usvx%AQe{_+H;(UmrW?v)SA8im`$ zn0A>+_ecRJs||6WPbQ%0)oUp@Rx-0Y@G@bRtQLbStlB{siM2a)1U60;a3u^^)?*hQ zT9&UXU7?I~dvjRL?BsfYK?>1hGZfuPQf{d zzV`{JfMOCMH+IaXLE&tlh=hrSZsmG$1`c$tCpkMfOGcj7+6^m><~XZ^DyT!hZu-C_ zf}nki6RL-M*J-8t)N7_o@dJUt6F=PJ^eiH2!mj2S+8W> zlie4m;IF@7t>k15jaRV z5Vc9Bp4aDwy0tz6Kh*+}zbu1v2m!Z4Vf{EU0gS{9EeIrFn8y=Co!dr%jh8(-C+Mog z@Fk+0D#>cN46-yOxzs4UzBMx~Xa};dsopKx8gN$hZCU>+%C{kjy@oazJZVvN1HUHQ zMvxA#6UaVBV7lt03z;uwNHxi*_oRU$lJ|xaHq%n+A!U`psjs7%Ey_k$4XhiK4-yhu zh))8YOwVa=J7H7xeLv5%&5)&rhZAq<`?!73q-{W)geGHA`JGL0E@CK7zX3B;`i=WC zVInZkCl#l0axvLR#wX9M1V5b9J~*`HcXs1&b=3*D09zlg^|jfW(gV$I4@u z2F{eJnc9A7{bo#HWO@3rQdIOs=}Q4{II%xvG)PiJLm+ z5bB;0)^3m@ulgLxQT?Hc?tIiwU;H5H z-m`6|$u%iOC4E~7=HmC8{b<@IZ!d>3V>dI5HP8I00z8h(%dwz|*HYS*t9TATKM3LA9)y$+&$OM{aJh zy7D&EtAMa6HNUZksXJqFpXk6Lo>Exl!QL$r{cYTQqe|tXx4}l_xYkFbO`-HgJJ}W< z4yVGFvX(2wndT7-W}Ak5D@%&Fo>E0Wm9+}x6)xFLKZ2w0r8;kJfxH-OW=a%7p&q;O zqR&ZX>dKXukvXYR5+ao;Zh|>bZ4t77(%Ldi+tfKMtHRXQ-JH3Y$mm>#J*IM0F4Q+h zDA(_(?!<-ln?fD% zBR7QP4^)Gu0Wa>MVQLEZVG~pe{8S6I{^GBhFShfW2L`EQ&6;G2i_(Y0G>tPIVE!F8 zBiht)gpiMSEpi<0&;KrX@MI23_#?5g`C|m1x_QLcMaOYzbCwWpl@}rB zq5F+D2FKa_)GA$eJZKd5)X~)m+VN}_Kc0pJt7YCi-Y+$v$tE1_b z?L1cWI8J|-_{CiGj43+KPVhufL!V2gU!z{8UykIsvvrGh6y=Gi2@rB9yUEPLo% zL)+G{Un%jwa!dAAldrhZ4wAH$kiT-kND4pge(&~c{N?~Re+l8Bl6pd=)#>M|v-`^P+qcc{=BvxLk@oVh|>J z?S8KA0kdOi*UwCbT!fL*=q4+C1Y@l}mzBn|+_&($6~&(Xj9!Jy&)^K(aovbWp7#Tu ze1A-95euhE$)~cnFHq9RFTfY5k)w7{j}Sjj4T%TK{sNsszb@|1xE7kSJ9jsqM`J+9 zmK=N285!*-N!==ybwF9XCwe!3tWE$_jfQ$hOU=%%A>7DE))lS zvmcuHHgSyX&Jvz6uSHC(U&)RCMLgK}hJ@g82}bkniSLyx_?P9LnAJfuQNAj-jRp~M zoH0YI*!a0-b2P1Cyo*nRE5dx~-t4xse2ZqZxOA$mZEiKw9Xlv)R$L(`r0oh@43k6R z*Gzt?jXLtzo`^I3W5v6&<49}$CT^)ev^1jOO&GtDn7iRFSa}ibg#qXLh9g>+%j}oi z6(0KcJW6@(;(FpTFYTO~6<>mepmTW~&2%uUO68!lEmFDQ#*Sg|=(T)LI{GJW;&~yr zG}9JR?GD}wnf4R|!;D{CdvCR^_u}}$vtNr#i{5g77PP#_R@-#s#@Ts5XBPh)hzwdj z8p{UUSDnDVj8wZxL3S|0+>EfV6HlViE;6%WNLdn;zCq5Q+5GA$XE8Omyn#2`{!FyZ zB%dV$5VmuRn1~-V!1OrFYt(U{wVCM^72On^FBU$(sGV-|W9(&=WLKjj(}*|f$`O2P zq`jjrI$9np)Dz`g|1pZ^QoE&gj%t)7>COLV%}N6m={$i#B%+p;^sss zVhK(^(-w~>XQW77E`e2i?3N@Ix7BVuk_^<(=s<;pk~}_?N+s6s!(sv`r>Hz@LC_}3 z0`-F7=Ns49%Xn0~ssqNo zaj6%d*i;KAt=64LKdS9D<1N*6JYe`rz)XOCWjAjz^fKpu#OVWdtHsf{ znWrt^phi0|^L5+TWkoqX26pA>VJzWv{6c?vM=G~vwfIc0tmA)7Y z{8AF}c}G9+o@r+KP)=t%hUfsage<5*u2=+xLI9{*f_V?%=m|DZQB%`VvkZbFemT?@ z>KrJwaYN!?Tm-n=_qW~;5~o0H2F&rc^Uedz?sX_@7fxXS$(A<-NOBd03qbK?EI5+B zrYDIi_}ID%m)BG-^eI(!c%8zj9>(vzPQn+YQ{#o6w$C*7cYB*BSkcBi2+nl22ho$8 zr8>j_$1B!kA>DB`7b)H*D5oUJZWKO@6O_uR2%pyS6$`qzbI*{w0|8fNP)b0+58n;u zU+?vLZY{7=JUw$U+is;ab6$5g_ApGVU%Zvk=1!lJt=_Qgn3!HcWv>LMw98BV7p&Xs zhT}#m1@Fn_D>Gpihv!G`uRM_(h_LQU(u;g0=Cs(INnezfgVssslTtU4K!3lG8`G<^ zefo1c`wdkBm;}0-@@23c5eW{@6pF~(Nnrz-9ujE^PPG=?>LiBNTmiVwJ>LC?C{+`U zT&%gd54%BxCYOQv5{3WeYd8%xxD!IDfy+y)wa_}K*VSD|?6HEQK>G;VeYxYiOd?5= znJAGSXGX|;l!O{COB90w*YM(ATD3hE$y7|TDzM(`d*7Ocy406vzv{L$bk2X~$Yh%4 zQ^oIjo#^lYF{)_WOdJ^E>es#@-JR+iCC0o=$xi-7LMe7z>#g*IST(gBd1Gz{2G8Uz zH$G;iQ1SHd>~~E!OKm%}!|hsK(#AsV@(wLtaZ|yLu{KiA3|3vlnJ6%#>TCGBoi&)2 z_S8E+2p7w<8Wtp~_dAi^oNDIx+HRV4wQ?)x32%L0B2&WdQirpdERb?DXZezW%a5GL zJ?l@hv(}y02{x24bIrAO=3&pyHnDNPoq1IE?LuEbq|CdBi>yUS;#2&UHs-O0h-m0H z);jUe)*AXp*LnhY6Vl}1ay`CRnJ9~gf8!9XNBF!tJe~(76Or{RZo?+tv(=fSOB@aO zc%{|7C3kANi{r~{Bp=i@^B>IDvZ&4ta>@w~rdCF>IH8sr+ZfzKrFb{Hi$f&qclABN z^e3VVgU(qyC9x^BT4pDdoTByl))j)FQ_)Ihy`)619d_3~=I%yBYPQRO#?0eTv%KRA ze_Fu)=RfWLmi#YVg3#O$jI2{$zD8hFHbmD`Vm*EcGdG!qZ7c4+kvcb361tO{C?iBKV{1B7D zQ$fbZW!WtvDUW&estQbys3rU8w)PHj*OsDFKC9B# zsdU`8sa*%=?`Stn6(x>yJ=$|`nV5mpDBsfg2G{ms@{p*!#wRyM!uk5f^kvp7jIxG0 zqT9AJIcq15JPzwn+TA&Oc)BEf$M56gm)*SR!U*B4e)kEIQy#@0a=G0?#X!^%#8Pqf znpy_`UvIhk^X22G0X6@_N^;iRr=tsp77OO6z6#{g8hN!+d})N9rFqH{Jrsz067+M5 zWjxvKY5C@Rfv(w307_{=TwRAFDM@nAWrooXnDP7?#2!-yX8Hy7EG<-j6Fo(Ba|7_o zQzmpDCY%8SMUFm%W-^foyeiBYK{e7m0BYbEr$ToL_(3Xc5lJrjL__7^Y({KMVe`lb z`k-Cg>$GHX$q?jRb3KYklL1nY!=7-Wr9Bta8R~<<(`o@ypcsb0ca4Ka#Y6-nG|7|< zJhwFriB0<1_ef?fp}}@8=%BGQupo* zWa1A-hE}~ztuCmC9vj|PKz~0Ry)sC15F1e%&blX)FX~W5GiC|2+0GDGE_C`vFU7sA z@QG&IY3ykYZMh1;&8{Q3iPCbNtY|&t?gCDo5xG`Kl{UCf4{TdVYu7x{iZMR(gcYGRI2S!khvtf z$_qftW^kR<=__h4;>=JPxb)rBHM=||;bq)RN|@4UbJwvrfgphl`cDF{jXrR%U^U)o znclC5a7LAtFGb8$KKDKidiMR0o#ffd81w}Nn@fi$L&Ps9aNHxkf7mhN0a23Imv%V# zUC0=c^IY#*evJT8j5#%EXPl$5GQr4l+(3M(m+PzX`GXz87krLt#APYm=JgM1_Kr|E z5taybf{@nK-Gg)}so#BooG3;vUN6V8xVXr;_gAl^Fzv~S+6XlUVi_DToALg z9=qOQ73QRv7kwm7B{07s=ca0YRnNW08ER%3E*vHI$)+g_bgKi!U4{yIayU zD=;pqrIBxT-3cxc9H{Iyo@2LZRS6q%-!TbvZpLuoJQaw4panYnHsi$%i^A&l3X1!L zW=PvR&r-Tq+3PbN+<7kYJgiCYmRz!+*)v*y>xn`z(1kb5L{zKa>G^cz8z&j>6q9E> zYTjyJX;Y%txtw^2p3RLSTPm)OTbKq9YDMnNuTUp?o z>>%O{@G9+lrIcgCE>zg)K8(-;d`j$={T_SK8u>suI3*Flp-3)HY>HV;5NUv$Xy*Uv z;q3b{^X9!6{E<0O)%VlE9h=C8y4usBNk(91xPXKE^GBganp5=Uh>03BefN#SciHuK zsRr$^JD5V-1rk-L7`5Z%pSU}5ad(S)oG~8gDE0E3T}-%$sbHTG5)xj9cd0mBOHRyG z+;h0j-H3%?X#LtnrrJ@t=FlnqFC#afLP2Eysk+X>r!b8$|q^qy{8_>#tAxSs;cYbCcFb{CD_~zyA^FH-wpJ+G<366^>#A&VQJ z4rL9)zVCL`T05Vlr=P>mhHQ1%9hwUN#uetONn0DU8P&5C(^g{S(R?90}B8qcOP3+-StIS3Cdu)lSC;J-p-OJ6vy-Y@$=a{q6BPzzVeg zmqG|Mg2p`$2E~JK-Y%1UgeeJQBz_OLA$jxi8HSrzn+4v~K8E$)ZAUJxL(!uk{H9_w zI#(6A`kgA8F0PxS9q4W5L|m|bVNcK*`A+%prfKB)_9rLFt0lF*sC zRLYWe$sTzH{M14%B~e?5=~Il`Xb?Y%wsso1CdY3YdLT(pAtVroQHoA8wUP18{rK=i%<8&s05AKu zPGR1+G++7OiHDW5-R}P|Nu#H#|6+~u(gSkX9tg&^iz)Yf5&w0?HjPEEyRP@UJ>AP` z8nL^ick#WdotM}rYmUH}1HI_b#()_XyYnuQ0 zhcqOadaBz~02(;pR7u5|G0G%jXaEEP7QjAtL4*oXk+n70pWd%(RoK$;9}9e?J#2_^+*3!Rag4E6)W;X@-rQzZLAl zb@0U}tuWZ3DLJMOoRK3FWB}cl@=t2gHe%Sp#3gLIs{KhTmg#wpxS7>^e7jEByRo92 z{4iw;=X0XgQ_}%@HusJ*xKDE1PVU3lmrUvU0E{T|*ZK0=thyl4(GYb#{k(Ly!EL~H z(M<^o3AHf?s0gSY*qZRBUPDWw0>m*D+-LfZYVnn1U~L(=(-?2_msr+LNg^MIhx@4p zZys1=c~ujc;ps%t(5N}|m{WGd=96;9I-gsR<0vEp>-w7}?x=a0*U+)(Q}y<2OT0b2 zw`JGYnN1U>Ha{^*8W6)LgXvkrHX2?%{mL&@+`G!tXE)}1i)GtZ?3~#*EAoeCy+|D~ ze%e;d2?a501raY>gtC_j&y9$cvhhXdGpfklIj8O`0mDBCH3O^>--Fa0cx`7iX+IFn z>J8HaodZo+RRY@*u%&8ZN7jW_B3?_(w0geyT*wjFajb>A@p8HHbKG)wY zX`w~2I_|#J{R6`N_3=Bb+GbS4&Px_Nd}byE%6{F<*CjLlzA^tD_J04jn!}lRnz(7Y z$q!FfcaY>&PhYalG|KwOeD$^uPRzU9^NP@kk7wxY1@7$N+$yDxh%QxAh8*r~ zQLCnQDHAre_EbhTJ`Kl_*nwCgy$hbHQX8 zLMD!|0r~#CFJ?%ZZyJN3R3M|rAlGp>#H28ymQWNmvLhH(l)+pboeS zj-Yc_L_VYKgwunc_*lk047)ud(DKW?NAe+aPlfx0(r*kIrhxW1d`INLqYfvv?_S3B zeai*ak#Ds>&A%V}emWz&y|F${leS|xDm-(3htVo)VJF79CG41F)cQzyZS9kj=k0IS zGR3}sefYi?Bg~@fke7k}YU_E*8Q(2xVyHDN!qg;htfCyDGK2UI4(wbzOmrjc8Y1sUKLKr&s_l7l`a#x_%bb2HDZWZnWf*b$XUivZYGBAput0rF3f zUd@TE-!nRHxS``eRpzv6>x8>Et9RB%>4T9`#d<02Y8UNb4HI6$doqy5BIQQA_y`9{ zvut>$wCBQ$kB>F8-YZ9~+v0S5E;p8ZG*#h)E&q<=^?N=-v;Q~y-To7r{hn+6vYeQw+thbHmjy^CBX!;zXi(N6rlJEa#$-9X#0YC_-76-XiZD~W{nPsiE89+i z4)Q9pUtdkFT|Rt-sE^;OAnJqcsAu3?KucJlW<8Q=o*fd#hhkfu@zv?%BXgP>l!I$z z2#$kUkOdIFp!eAbnzYLV_3KVBs4j^JxVIaPrPL#fiR5SyZ2f=!kz_1u{rld!ZA|cZ z%ETV?IP&JE_<^hBOGI?FoNzjUGFu5z=w~pUW<8!l%J|N2C-R@*6GBusH-xMD$$EOu zc9;e@Dc$qanQMtj^A(yjYCYoZ;s;)Db21p+g`>7kD7{=usNQIVS^w_%;_t&{X#Z~( zWYsUu>GRu6RzjyvkP`Yxc49Y!r*~@DI*tDXFDH8lZl+pEuzC_@gl8^CxqI=)Z=s9e22n_@wjm zOCjQ7LIcQ}G2VL@{Ux?^t1EPM2P9xw*>au&uN7c4p0u!MGMco*6Lh(`c^(M#^TqX9 z*UFENFjq&7+xZ&SnfvYE-w6lP9wQZXOxm7hoqljkllF5Lxb8{vBLWlVA%VfTxL{Bs zP}Xv)C+Nj?O#DpMiM8%!lkJ44rtDF^?_&_R2mkEsuuow&$_SK-TEnMJttDMF z*R<`6$(gB6qvf(j?8bxvPle{ciE92{Q1=^_1G)b<%RK*^yMu)Yf`s#jFx#xXP+Z@E zb^X`S^b0&h3Fe@s#@q7KY`y|@Dxk)fRT~JoMI`~z=0Rvrix1{LhIgT=V0@5EA>cpL zgKv1sU;RTR926fB**w76=?HI0S-waDux_aCdhI5Zr=8fZ$GWcXzko?(RN2_w#)B z`^~I1Yu5ap;n$&ecUSG|tE#)M>K!5{BZi9n4jBpx3ROZ}SOE$ORt*XYS_uRLwA5xI zmqI}ymzW6&$w>$aLFDXhjLj^KprFJ<;u8>+6xE4(Pd)Y{aNJQ_$bLe>{K9H<1jk3q z35Jk>1uP4K$vCotgMFDZzrBSQ%k9r3%~HdIqnk%Gl#`$B@JEgJYR7bT9y>{Ywzt0E z);)P_J6*Xw;q-y(uT%H*My7)npkWjOZ|w_)1&3#?r^A5Sq0m|U-iQ4hbwPV`1s{y# z^IC-N60;C&jXyT?aQpgNOwMVyZwZwt5MaQ$f20&J2Fd<0f@=unyFu1!tb*VZDG5%% zfk0!FrEUhmpQLTh!xJTCU}0{Bo)JT>4bUY(prIz;ZcH&RAc?vm%)3-nJA4^CzFYkwjlr-Y z1dG2V?58@uloG>REU9xyrms`t(@yHZd3m|ZDZhu^*ni|WS0J+Ud&v_=J&Ew3xu33M zvS#UxaX%@_EJDb^rNWb!$n|CtX{4kJy3Uqs8KOgs1Ex?XF6R$|owok{fet=N2Q86S z0X*_SLdG+2DHE#~H9=~PV|jQtxIFg_Mix&R2fItm7z*~tu1S5c`Gq1(;zhn|=+o7( z-b4+4KT2)C1l?(SE|*`{GqiR4`#VzNcV}=r?U+f>Q{nYT;SKF&7Wap6O99<)Qa;)1 zZYY5%^Q^es?u=oRhv{C-_d8TH1}E`K5}_~5MWR7KM2@@J#w5%s53%s0w%x0##v3ljpz}ASmT;| zDB=DCzgbFP4%>(HxQ^aDwv(LEa6rp^b6DeXW!FNOYwufExg+(2ZH0|$FJCizD-a`B zN#zvB6dGwS0-u>BcTNRuj#UJu99s05^%s|1OMzwq#x(iCr*~m_S!SjN)x3XD-iI1z zPmQOJH;r%aaXHd9B6^|lhNcVp^vQi3#1jgC9~2VOPOew^@jgjGiarnC0p9`fQ{=sG zkG@ewqZ&a8E*oZQfOgb!Aa8(Guk%;L3WIqwLbQC}Pu-krFt&7B2n@t7U>8CcX!Zbt zKCjLnXXdUB&4_%+V_h~qi5moGyXQi8yLb5haGz84#6S>R&>=`9AUgo9Q@WGmYY>&h zGzAw}C|IJKzMEMe>zw%<JpM0OjEbc0qPJ|886q`LDcwv3P zWRH8#cmF<6aN^fTD*3oKgVtYow=uVowtwI$4-1D=_hKy4sgTUlu#w+rq(RE)Pv}}T zc*L5Oo~rMp&n1xj10n;8;Ux8c2nTknhkOk&lPJnbHuN;Bo88}Iq^O&1&m}5w1)M;30L}NHQ z5Z<3<=wz5&NnI&YnQwSH=pCaNMx5b1jd=($uP- zmB*07G=Qm+bpw9}50&6Y#LtLN5d;xp(uUHW(zPko$;~=>HFV|+D@H9tt*IKxc}Ya9 zUZx+6cC2b=O1h3p8~ivIn>(Ls;d?r}^s7XgdI%(ihB$`05^hThRJ>KVOBzdC)fd%J zN>WQGEvqf{EorQR_9w3d9vB{!HN#88j*S;(7RQ`to$;OXk6x}^PkDouop8RSzREdpUw>(VtBC6-88#%Q z$~_lRrfp5dE?Wmv=j}p$D|c&uOG)&O2#F|yNHJX{{gAh*`J)b>jz>#SGK~~lRXXRk z)n>~?Q{VR^tRuyf%jvmx)wRWS#7DQs<4cdN^5d~f8x$z$VP7xkC|D=hDHtDZKW&Wq zEd8D;NL424=a!D?HP#N)4q`zq!B9b>po2hUp{B0xuB@(uF210qK-}+9!gOKtcy?q8 zOtAzDM1f(*VMP%sI{U5muY>2N#d}YCO~sC?M&*sQ8g@>D2M!k9bH8WPB0IVR`dGR> zjo^Qqt4&o5&kxR1*U~;hJz{_Fpk9~i=IqAq#j~dr z-L<)k?h9^g2JDge!FX<3exeP+cU;39OG>#4%4*l!lqa&cMZCqD7LMj$qV%E+iQJr7 zj!aL-t}+fSuq{w5+TL%zCn!WIIxMU!EVq%frZNv82#mn$%Z@@%v*n&xFGIu6Y^eN0 z8_}WCX$c3VL)jkBx#!CL0tzQ?UelfS4_qvUNT>3BZ&&J#$>Vfv-seOw2HlnoyKRMX zhtt4fG*#~M1#NDRpWZFULDvzpe8sH^t0mqR`*kNaEoKf?UhU4A2DbNW_aynh!s?c+ zwHrp7y*49Xe|9weaydm6Lyub8T=KBv-+oC&Ohz{*+TrWup>d@r=D?PEh! zLBz>^!1m{sRi{6lHs!4+}TBJI!OO$9%+RSchie{1_nF7hmKfm{y(xt_eo zt)~hZkDZL1Bz{p6fE5{b0_YF@uZc$-sZ*8_f3oKe0rEfp-JsI~7Jn(6%fC|X_#OHd zZ3FUz>j?aTY7W_Ho({xz=mPJx3zl!q4zaIbw?)`_>WvCWWR_9osEP23UJ+C0pzL;q{>VeSM`ex01Q3ghis|( z;(YZ``y&^eFJPAiMos_BbwS0&fdvo>Jjw|mdBI4p#tI-^A}UEmyy=J&edZvQAq^(* z6I7~&|B}#+?ZeH;3YjHsY=(4`Ysru2?jEO4OHNehfCk2}JzP9cE5xZFI6Q3v>$B*t zF1o9W?&_kuy6CPhx~q%s>VLQZyy&hXi|*>8y9(Ce7TwiFcXiQS{e4Al(Oq41R~MtJ zi_z7Efc|21buqfS7+qbAt}aGb7jj1yqpOS2)y3%QVsw?W7+qbAuKq2*YB9PBQUex~ zfftj37n6Y(lYv_nlYtkLfftj37o)3-(bdId;5UoW)x~7s#bn^cWZ=bQ;Kkb2#oATe z@M7)i-|EX3YgZR*R~Ktn7i(AlsLWrCt}aGb7o)3-(bdK1>SFEcALtc}(bdK1>LS$A z|0L8Br{ZjG?{a88WWD73e~p8v;59!46xGL}-E<-7fi-}zc!(Qp2M`Lv+IRo}677ge zfiwD?GF6EAwHjQ3xk2b!B60wLk_n)=P!5ID!lCn?0)Q4im9J7Q!X@rR`{v1=LI|0P zsA4AnA{N2Aa9nSp0P6|!?!$+X@#f}yUOj+_!XyCX0|Dd?pM+~20rtVw83L16h5|T> zKCR7n_v7*mxzqE(+n`^U{v-zdzjeS2PotZ9xE>3*YXHUu zKm^3weu3%|qKHwDBUo|xeI^(T0Ph1}f7W4uM_>grz7v8^p-Q+OLN~n+o*L!f0ALQE zoe>Q4U>lb>PPPZ|AOI=`#G4U#Ad=WSJrz#y;o-v5(#W}*yJDxMd8V&+0PwYV080Q6 zBvujt`dTFbKGANO10VyZB7?>iXyY&dfH=b+Ddo8JF~d#ZVD#7V$(GUA|5C#sc!T~% zWemb4EkY2eudF`)1uqC-qO73sQ~~T6Val4yPzDs%LnQ#@TMP07l_4htfj|}Mz0^Kj zH+gE7pUc5~E|{V|n4%W(@#BOE1^QTjOZPEc@&o|Dgm=@awO(A{yumCC)KyE@z zpL{YWv=c4l-w;3n)s&g!`Ot~#9|eUl_O$kWm z&ZqdnRucIz3lq)LVx8||a7n*VB8&$nB}uAvvYnO1ZV`uAOhbd%`gHr0n5Rj_#&1Jk zb-vyvIt8$GBnsggTlnKS^cfZbxS1!Y=;bj8H>gWQREYewxNZ?3oCy;^M!q@vy(rHN z;Iu41B!F&J_X6lg3{Mv>2PjzkLLe+#wvs2MhhD-m zmjKip3MXCweZ2Rp2)Y%Z?VJab;f)CYCmgE-eFO8Q+99~km(H@I&u@nBi=wjBUcl+M zAOD0w5?xR+FzS0T9PlKXBNu4S&yLoh#l_AqQvUxowuz(yC{nrrx=SF(S2`2A>8Qx+R$WAt%c0sw);qEbSrDk1hR&_%z|#&NFm zjrZs2K%Vx)Va%s#C0vD;tM5!YDR$iN|Ful3wnaDMANNT_5TH6PjKCw(gxPQ$_v7a! z^RNK=h5=NWG4)465{_kt>+Ty&YoKu^mEKjeOseAj2*lak`;>A&oIY$csFI*BNz z@weiI%A!t>pMjg}dku8O8sJo%cY<4YE~qF|xTj43Wjuwji}x?m2g6(Bhkr9)MCSrY z_}VBbmNZX(h(TV8NYhNPEW-H+Uz84* z#FmHq8E@UgLSydq^!l9Lm6!t#y>1zP{vICw%gZJ9{)A=f{^iQi5?(b-dxASj6k?77 zoN=%=!1je16+lS=xM$$yi;FObysLBsECd7W^B)%A##?ybEr2Z6H3GCRIW7@BiYqgr zqw$0#{J3=3pC2?v1@O(iR3XM9hKDKUx<%prCUCKn0G#sk@Q2qZ@Ek6MHDK8ia3x#< z9;FJ9M}^?j7o7ltq0m3R7wOR*NgT(1qE~U;n))J>y<0w!+c!nd@eTjQysP%b<$?cj zRqgLp7ncuI*FypHLJa3l<~}0xPvUyTe!{_d|F}N<`#W*JNuy`d0r;{2f?4~viSVQT zx;X&Z%3x82E-~2L;T3(sB?dbHWPtSqfS4`xi2?d4K#d2k5#tE_)|)GWuG5^41tuum zGy{C%W}#2qL$?w7#QEVwfb89FexH-s0w7yCv&y(MT1yg;xUTl?L?_2cCWN zQDl~jt_CYD*VaeTaI-UfmeiAH1PZsXS5rYE&=PsxaWVb~K`2S-B;NL4?(E{uqqC^02|L)+vAh4(@MEqxK zfvrOt&^iDRi)Xgnhi`YK@Z(bW-V1BPC%|g4Q2I;oV4sPwIb%v0s{FL6>3LgPn(3eb z8dNw9bEp969LG};ZXmL-*@O;Q4~^}NdNh;lK0^h|Y*pxVPq?NB$C#}AII`F+pL6an zwb(yLvp+`ZpP~H!*2)q==c4!W_cHje;w%3*s(`c>B-DE%5#$bU{DLbk2BYeD9@stM z_vxVyrbjrik=p*PYcEWt`rWnn2aBY@x9m^?gRk5vvV_4 zzn13{k-04XCpWjKTUEW-mjvM5)6gxN3o}F!zDMn)0}SdW+R zZ0ZRomN4h|ZSxl>R9x z!HcHRUnCoefSCMZQpta0%>Gj@gHilYY~$p4LYX;@J9(L2j;$vw;mII=cYS5uQu0XS zgSfchmX(NGi%sXJ*2euMk2-b3{I84=&_lG zuR)@gBuTF!Q>uNUg1m-Aru7!B47%&*tW@YV#d z_O>h6>`$tnedu~fsk1HXQno|l{z=(u!_o!bH__LG3Qguat}^0i!ET(;Ww$XLpAlC0 z^)lTyzUJ*?do(IwStcn($Ynp(W(#A<1CP5Vi>@B-Sd)=Ur&pxhNDnS8d;Iv9bt{hl zV&LhP;Aca7aYyRx>q3VV1$5^DkkFof?{ds=pot*Z3yA;Ee zYu}C9r0Cn_69aXMhM8H-W%-Bq+{r%V7MN>My!YC+j%#PCzd>K)mf?KJP+x={KND3! zea;_-o>M~u1Ai#`gEyt{MyflMYRN$y1Z~W z@jlV8z`EFCy^39q;puvtlnA3-X({IfEvfsZg@>L5{cvnY$H!@PgV95(J!R<`XLWNg z3Q-lq zO#p#7xE3r}f+V#Gif|KJutq~=SaNrd5vDiIX!_5P=Pz*-+IqkSYW?p=rbf8tgLF0s zKn!UJH&^SJoXZnH9Ri3V+^DX-1?&a{+oLZ9&_lZ8d?SB1hklY@Oew(m{FRIUk88aD zgLh&CZHu<}f0r!|@1(NMRPwU?hV=L;Eg)Y(06Dl_C1{dCUP9k_95b&T?3XHCKl0`{ z2p@rEK(6v>`h4fiP##zgj@Gnq(aJ~3?T#t!DD|{St-q7(v)utTX z;+7;-75Oy6;Ab?%v1S>c;sOHv`Ne~qT$pW#jHdB^*+{7QgZ2o^!K3CDEY8(@Uk9pp zeK%OIA7a^UvzhO7kAJQo^5cDrD6rqBiTrc^gRK&dS8_W7dCKKEYF^rQW8s>9=9t** zgqK<*wKb{?WViUl)LVb&3ge6GV%M@=>IPlI<_r*u5hwUW+lSc7`mXosG{&{;mghFb zz7Hc5S6QjVPJ45|jcOo2Ao4b;bAKfFO>?+ub-0zn9pF~$PT+q)4zp;q`J~+3(I~|? zJKr5WG+WbhK2&F)XNYlhS9)X>0SmIiJbZi#kz}_rcm-n0G`yJTbCm6p?zRs% zy0~t7+S!(pD-LnR9^LX#DO`>9uqrAXHrgE>Sg5A4$^OVKJC#>QPMU3%OMZ!*-*h9@ z+hs=~KP?3=wXW3@tH^%tY~bo!yY8k<{-HOG-pC8zlmiyUGS)O(j(&oh!bULz2Rz6d z`+>TODzZXr{Jiguc3wEZBGN$OOQr}bGre{Hq#DIL`=jAE zaXGzBA(4@xJ2!Ax`vM{(Ph&E4gS>7OeY?uR@21DL*V$%i<#mH4k~y(7_xqo>H{9AP zfHXe3-+$=4BSv=p_J)*I#w%>H_N>iavBy~Elyg@{>Dw}K=VFT#<4o-8J-Q`FZdq-4 zYHnaWHW)yvvOjyZ(7ED(J!nu$#i7`+qQEK$jWpQ;*Ywa?W^^cGUpsG!AKP}2UZ-lj z)#j$fQzV`>Iq>OeAGA?IotT41x4$_h8EfSFQPEb~%%LYq1YLuNAC(InL+muqMkhYH+VkL`~i9z0}Vb{8wLqCA&5R`ZZ+VP&CzSu&u` z-52K>mt#hGc-ozPO-sAj>Q=67cUWLomcy-&ow;FFYj5V-Xa!~2*cF;xR7x|ElVBDY z8T*#R|H39D;|3nnSqGD2P!)cWku+P9bKz=D^%j1-?uuZ1QmVpc1ZIm^`Qg^}`?Zn$ zELoIu3H|b5k7J%?k#Fa$>Rlg`<+AUmM<1m1a<5n6a(!iUsORwdP2KKZ3 z*s-T#c9NO9r85fVKFHpaTz!5vKx(_Y@97UjqbP&6BoU3Q7*f$0Rm{a}zM0Jtl`Y?Y*)i@CQw#cYnHACYwAV7v@o`tr-|(*vv-zt+Knw=<x2#(nLndQvw%XHIt?RFralpb~QW^{=@ORogQioZMs@Jkvd$ zPWm1z2{Th3d-&>zzD;zhW9+K}2TqEC-HvwcmxqkJR3)5ja$>S%E40?-TBRIloXV~z zvqIB&5>#50CcJ9YO`HF#QOF&|-zrT^SmbBTZuHt!Hy>2=x{z9quwoPYd!pEl7b0fe zJkC`@Z_AY@uDNCz8D6az&k2#{49i*--7co6y1h9$a3?*@P30=h;#<64fA{7HS zS3Bb|qzlZ)QI8lcl8AZTjt0d@$p(d0Y_A))NfcUWYi(3VZgqZkUyt9J<+?}3Chb~3 zoS)&c_2tbrWQa{$XJXgq%tRa#`?+|}o&1JtV{bUuhBZCovc*0tU#fMmDcU&j)8&Abc(VZ~w=+Cadm4naR#uCV42K%gZ7lx>Whc`f zpI|5D8v}92!!Vj{#__3id%`N;D2)sCZ$8dGb}P~NDc!B)jvE1KFZo;|<&9r0_VAKT zv61n~LyDM!)?$l0`3|`g1y*8J_h;N~l9crKT}oAWs`Bvci_9&zMinYfu8uI-u(9A( zf^q59tQgMcoWw#Yt3-PP_pQn<;uaOaV1MY`JsgvjkgII8$(fz&m^#ao6hNr>EYOx< zBh~Z2MC^Tl=&kH3=!E&OzbQfXjwmY zHtNRm&XOQTvv_@IzjCta{f>|prsW*#mR>vx3o{Iy`UpxdOkW90FW#YzUrH|~ zh+ykiuZ7BsE|U_bNu{vzqCZ^IlNr@m_I?)suUC126SOZb(Ea01$Un6ZflDVKyIoN0 z3ieI(#RyYqGZ$(eprnC~SKs^BU^#Zjz$Uu!gVP^N!H&gX^W3WsiF`kOS5$!q;^oVL z5q-WIuCalC#33gSdE+~IKX47BY6K7zp~s?e91)KKr?)41$#pFkshGryphpu?nWVM!b#b<{a+#BbeI!B+7T7f*1Mtd--R!iVc*b{pqNLtL zII*7WE03pVG}dq`3AT0j z?wu`Q=g8W`s5L3vf7C4Hka1$T=QFEGa$8%puf;vN?alc#iOav`4{m>LHx%8h9iiG_ z^P6%~T?FUQwqf0_)yE23LsE>+;1q8bZa=28Dl=8(a-HSc!jAmQbzTPc5ww*To3%Q; zFQxv-Cds!}tB{y9x!BqWB1i-IHL4BOL7cd&dCT}Wd5RRJ_#CtyyHVFAIc@`|>#$P( zAQmh}wFSfLXpAA><4*72D6#36jHK_4g|6Ku9?p2D@FMIA-8xmA^1LKvYr>GmrWXMc zb+9+1^ez38mi-Bv5T7>@jF(1du$>D*EcW~T%g5LVgVD{#!VNfI}%86n;38|%okTB934I+$r-qOHRzS3N%W z!21FvHeddvGG+rm5gsTYu>64D=PHprI7eD{M(Q;>OLbi+pU@fF@Pw_(*~#`!1S>*? z^^VTI%9nZO>@D0nlW#ivj{3XUq|X%v8))RRW-so1uFf-Ngd5}bv@wgfaQnB^4NSYb zsU%5bR+${KsA8o4__05&gm!p1JG$;wu_5Vfh0cN3*?aD7-FeMt$nTiPL|^u{)?CYh zKtmE!K^k+pp}jgNuC_JP$7a{*#Ix<$YF0}V490DfTw?Q8uAkN4XV~s-nVxg@Vb#8q zW-3(Rl(J4y8z{hEt--QR<0o8L*&K+CkDCMwCX>BbhGViWbqNww$C;mv?R^)aE_wmo zdO|`ombZ^cEN;@Q<%ioN$*k#`F2m1(e#$k@88Z|8S#UsjP-tj`%&Hfz67@0~etMUj z)O<#+%kMkG*6;g~lr2t^3Go@s@X2e@{Y~fPuC2StTk?}%?r;jRU}qbtZL8I9lsZq| zz`E;`#u#NNryuh>-krYQ*MY{^I@2#%@BQfIHc$4I+M^?H4b1MX)5JtHt9&V>Sz|ZXzRUB?=Lzw{@zyLf=bjUOs$atM^HxTeBzx1@ z({(LsL!XqM-`77|@*xM0%_N;0{Kh2H-C2HYJ zqdWOFTSx9NJ9hPZJ8y>zFA<#>E#&cMeh=(5Y$Q_i*^hC;F>!wf?HK#W%P!5s6sub~ ztE7x*-JP{VZWsy77#Cq!6+V>Ob^)%FoUQc)|w;HiRQimPn#8mtt$4e5GD zUWqD@Y#ABt{hK<=^9R~=Cq;dnD{01ZHk~||L3P!dfFtjE$MAzkJ?dmdFYfHKJnH+t z&!=p-&bBZo7H3Qrt^6R*f%-JyPXzH`_?vh`Qr>KzC7_#8HdODMyB# zbYArD-sEIc>+V-j;pvGlQ*n~v%+ff0euygWaulc2jeM3zT?}e;BYIF$JEiQNTyWM** zeP3cq5#bQo1J`L(TeM?tckO_0SH7v@Z>$+l0w%@X*FLR%inqzp;jVV^{-7W5@(iO! zY<$W=nzUi}{LDDM{`qN3MVoq?#ytiZ&Q}{erd1BtSp#>Q=xBR$w&Gi>TOO-SL@`G8 z&PUWXshO;u|wL$`} zCzMIowyO$7?2y#KP>xbi>`ScxN<4g|*ip`Q`JN*Q_9q%8<2mb<;{tDO&T7gW+;;5Y zEGDkj)^5l3s>%ed_1ny1|oy*SC9B9K}vv+Zg!uYGTMUAmjkF^-R`i7;nMorpK z$q{`oeKq5iZ|b97ew20OtyMUv#ysCJtLA!IzK;q z;PuXJYEM7O*A7mt`cNHmts_YX1HvAH^>epr~tPw5d5u2wt#u`U`jg$jI~n zi}ynKg5nv<#Hz3G1z$?LgzyEz%B}~XvWxugUlG0l2eN|~BbHRn&8w z3~t~jEXx)bLtr^jx-BhaHJOuacuEeVK zSkB1Uy9gma`=zfw02>{0b5j%+<3u_$^vq=GL-Ko)j+Owj3o_*GPO5#~czk1#l&i*} z3y1TgIaP-F?j8iid8X&37`35`?DmgdG71_b-{8zOnfQtOGD9!)FOsN_25&u}Y#i3r zzaZ);fyXtCwbhT1x*6M!xu%-1qFte3Bd3??&m33qi+3L*%>Hf>Xs%dcr{*uBvFs z2e%F0pG$HyU2-?=eTUuTnvD&sBp;{9M1n^%Qib5R-wuG!8TUz+*Rf~Sj~NL~F=>IB0N z*l@Mp9XcnIicZwtjXXyi$Vss&cG=bBbIjy7{?F8b0X*_8r5@8GR|{{bHg=0xiHNXc zYtdD$Cs7UFTDIu3_I4vDzoA4u{N)x$!o>#)GAyMEY%))i;%g0DQI{5sWk9G;c7J#M zAQvM(cG`E>27+bXOexPqQ|kI~V4cVPjc+XrFVwciIQOI~ka`-5w3)|MY=IGfpesqX z+E3YXdmh8cuB7}#!;`Ej*WEF7H1X&cT7lV?5@|nN8m(rhsBwgSmSZPM@ z>ZQh1OOQ%&=hKN7qx5|H4Z2R1Z{(?i{$J|V$iiK$Fg}G_*6WAvL~GL$p?`4FXnq)2_J%zh{L>hQWsi{q0pEyg{&tJKQC?AhFld#RN`mGmPpVyd zjsol#U3kN*V*J?D@HT@+<(k2~L4N9%V*QS}Aaq4HJd{U7)XDsKc!N3WO^k+{qy|Zc z1&|Ux5oWT1e(NMRiog=Rpg1e1CY>_3OJ8c*RgrPDtDuZ}>dB^1oKJyKwMdm3gM zVb-=lH?Jg$g~eEu`?@ont>;~(qO8^pI&bv0(I1;jUh{%d%Bd=So=4j~|5hvWg$06{ zD7dxT@aKZ3@r|Y$SNtSa;WUeRU?9$S0@+mw-={J?2Qacx&AUMPLzQl!Pt?*9S?n_^ z6d~T;IAKtP^zD1?adBu55Fm!8G!xbiecI za-{m#%_VIyxM)0_l;mM?H&A#B^Oiutd7XLjxusH?CNWiH05luYU&1q@(v1jK6(`3yz&^sv2`@{ z+e4xB^?0&#T)zk$i_Vi0LB$SdqivFSu5r|B0aS^vi`)tpuUDh1Nlo9g zqYaDbYiE+<$kNrDNx2M0`Xr5`Jv!eQJIMZ&!Eu4tn=<;I9&1o7EIe^|;@<5GS7SaUP+{-59phm9 zhOH?U50fR9*CQI=)-%R_)Gj(Dr)9^luveCDkZBQfWQ?sJdu%rx=aixH^L*;AOa=Xu zdw~~`BWpjpJ#v=239fk-CEH2$(d^}-D}gt&{PP=hS%f;<{HLfbuApGQKWC6|zAF_L zi7@xP-Aw65V$G2_kHJFfCBrkxn7GeG#Ej>`5a(&wYFvaMqB8*z9&DZ5U=5W(NBXFL`8zVb_RDJna)w9!{#CRj;F$>e;TPU6V9d`6`dLuzCDV1(PcYH_7hOlsOEEhjU-nE6Cf zZSaokx6b(7@{&=PFR&Xia?XzhOG+Up8jftU*dTIJK@- z>4E;LN`01osvnDs%cr@`qh^P5taKvU626srF)@8$-}c?*H=jIta^PvNS^7x;E` z#1Y3zgKL2$BSu4qaNSteFK|1K>xU8whv7~lVuQY#PvbR4TJ}YHqqmJ?%Q>(km_&~Z zc#qIGNF{Hd079Cm9mJV%)E74ckJEu*oY4VaqIbY$xKDZ=Xo&!_;#cyKj15K5p(>V4dX+7_;F%|GOjZB`mRDdmu&61Tl})kmJx0QsAUU999-qb02IT9b_|%ul>c+1;ej zT9cJ5hjTe3ms%4k7MgM@HQg(Q(X#rZL#phq*zD2Hl16{`yFRH74;;>X{JHPhF9ijk z?Fwy*t`)Rr9N1=iC&wo6R*KL+C-J$ zER~c(5a0Hx@2IoSI6LEd>`I8LW4=v=5A|%0)2lmux9$$dvGELSZE{l7iVjpJ!XNQq z%??S^h-*#AN>5qbLGpQ?9$U0V_HDXVmS5p0u@J%6KlUp5bqs%Z?197C%*?Lzh-U9R zXXpODlaJ1Cm(0u&)6KE4KhavKW=tUNvEN=BnC)bnZ`o%Pc&M!)-@XvvyQhs4;=5-| z=Stckm6?y>cz5SwtiMIDYPibP+=&x*tvRx{u7FpFQ#1_x5Ir%uue%0O=Nj>r@NY(_ z=aJ+%XAn6SJb*nP;vYCtH&3~Mibv?M2(@%^UpK|4_uI;!w5%Ni8IilxmfR597l8CW>?ENJu~*IV%09$inHAX z4vy!fA5G+5t1)**D!tg{b^Jj|S!ML@Cv)!CQ%4J(i^H~e<{Oq; z=UQk41`g*~NaR{v(`2>gSZU>2+_H%=3`qEPW35)g=^V3^6%XI!iUnL7v9XXiJd$tY z+M&{1>}_-BW`@O;su87OrM|#4%mBToj1!E6uyKu8xhQPJQbcutkO0zfZAjJM@s6^H zwJl2#k3qO@+dYbUh}?Bvz|cF%A4=JT_;o|prDfF_x)W}E&#Ll@$3M#q#I>#Z&LeH0 z(DUJn#^q_p0z(zhrt^x8OfFFRAC zDwlMtg?eRH4NX^*}(m{SXt8bTM zBav@$N8a1&&bKz}5(aZN`Pk>1MYUcVwu&f=8Jr;5N9T9sDwgMW0I=&d0)N5g$Li7SqFbeRT4l) zEOPWzLe}N+?$+lM}=lA-go^jZ5RIxav$lCkdt*Pe@nFND@oJ&c&G8`{n%X?(1 zljEG^x%_!klYDLJR>@Vt?oINp0s1+YM&_G69ZIg{d+h)*6^l&m^}auX*wvO>s@po6 z3J=!}<}rjr|y~@uaS+{BW3if%`{`fBo& z5M-3xEU&x5q3}|Ser-jFl=JDTO{exZtW7!Pkm6LkHqkIdMT2DP_Db8@-@IZdAdq&r zrbWW$uJme~V%J@=g`D;sGg=BQiwm$joKA4CKaD6dDA-vu#jV%kn=@>YqK3dE%bZ8!z^Q9Dddym1%Hl^LfVS zeVx)4J_8p$yg!)A<^Ec7J&%!lA?EBgR)wd9eV%R9tIRFI2Jco5df-!zd7JjyadapX zo|e7#!NV9IUCmub4(?=IH@u25yk2m)P)v7Zx#4w^<9iN^W?==gcF-4Tl`$AkvHllV z^KzL3L!6wTP5(|!_mAk*YVifltZE)?Ec-X#lCU+5qu%kx6qw#0UGLlZ`_$tD6T`uG!5@ zSa|Z@L<=%Y>k`;zf#&!+P%)vXa!7o4c0! zmTnnRdm%Onad$JicpD_ql}Fi5S+O-ZmNm8Kn<>uwya%ln)MP9sLaz92S+J$Qc*EHb z=Zahpv^2}_!fX*h>(rshQ3hrcf*ss&Eq(H*2u}uH{=}7usTuMLQv80lMv4;q(Jo&@ z^>-6DCkZ#sG*7QF`+n|w!^eM_`1uzC=-dDHm6=84SC{~n7o@#B|7Z0GJJjc|@4!wgK1YNMet4d3fnhr?79>kUwn zs}5-Y@GF9EENdWlKYmrLmifQfd+(^G_HSPlw_*V#(mUCTfOKh6jLA>g!j=~4ooqlr zYCuGS1mdQJj)kHKM8!~~M0ybjB$O>xL8=NQ*?=@jRN|79{nozYopZ)J_nmvs9rum* z-Z|s^!yqGTtdVb>bAIM$ex@wzd00T$nIJ-3TI`#VTaS;$MIsH)5g)ly-sZXZ%3_^k zZ11;v7AiUi7SX=e31ZjqJ@|ntn5%=0QhF8iTIr^1juckAWMFD{G;}4zU(@Dnh2}Tq zgT_Sr6K>JNt|P318zbWhp1z-<1~sW9ld&Iu)^=0%!IrpT^e}0moiDn>E8`#G`T-mU zs3Lr1`(q7YvQkWiC~-YlL1nZ!=FV=WUbivxD^;G5Z9MAp)2ItC=cbRrSC7ZGQ;`+Z z5$L^K-Km73bgkS=Q)(e|;Z&%FHpO{}ZuU{$>FF_!X$_KB0n+-U3rd|F3a?Mx{(8gG zJ1b^2m83q>=&=@7{3ien-W!{gu{fCKQn@izq#|Z)oulpmJ`}4e8`C zx7#(YHF&z7<5p_PhhToJ?`$y9om`03U`{4?OR%h*fh#W-xW7$t1!*KPBt1ffvLwz* z{5V1IqvP`dJsAB3J1D(p0hHY{N!^$ky?! z!Ic8_Ne2f?0Lv`G$`@ASSkUNhy?J7-;G^l#^MU|Jf(afeV_m5yk&;zXn@UWpcjzG6 zI}CPPSJsT|bXsjrvoouSIUMbjxNcd@YOZ=fnT_>K zQ}Nxv`P-(Ujgukka^qO4?2n$@@XB7{lT{CBHRQSp__S~~HMlU8nEK>hbiv~R|M)_f z?g`?~SU0Cq^r+@{jqnDXZ zVoowdTdP0aUjQblOQ)w*k{K?{&zZM_lSi~>ijkQrUGBbg4XpMpJu#~be?%2*n$lPW z&G?L}z|i|t2g*dw<936aAJ(sxAj>|Ud&c;b13Qo*(Or1J-j&8!yu zwW5)b)xvA^=M~1tD=Y#yqeJ zsb1=9{9wcO`0S8Lv^QXa@4w1uyfsgWJOh#XtH3&JCL-Z+chx~#aOKa>g*k7cq4GT4h=8;4bIgWXj z@z2Cj*AvxMM-z>i7HkmfPiZwm400993UTQL1!T#d+02_;?y2gxUGr~ zN$)~u8)Z)_UB7*s$~mq|8*vNU@FV8^2esbda8WQm73Sat`DrEJ?dex-XwiukxQ<~NaaBpq+b2G zf-{UNkjJU&(QXgoBpEkX)izwb8{?{sKP!*cB`qUb_o}!`7iffeO=#}o7ychzgnkW} zR}9nb?*<0oVemiKO=!04VVsQT^)5-G6$1F;wMUJYHL&*C?EXLh40$xYoJ|Gq*GTFx zbmv#9Bw(?5mG%RE8+~A>7Pzu=x+iC2sO1CZfoT=iHte#~(3LYalw50HywZCOB2A>} z)3C7a={$|#8}_lvhy@1LCiV1;$GbGS7x8K?Hs4PWWsrv-=eN~3wvbhDwq4@^|Jk=H7~zx*B^=`R5%#P)7I&y8cqXC$Jwpx<`iCpNR&dO@t> zU!B^2zn}mA#ghoI2gZGHw*I5F5Qy>BV-ZsL%0P@03!mTZK_eXmY<_7sxjmPKMKx?B z$urmPg+4#r`EIW#^z*Ro%l1Qeg1Wl0&i<;<`2^BGxk%uP-Ue?_7=M&h9TJ`NHV)Ae zoZZ4WgBD5}U6Loz3*ru67=gcLiLZU7A`X8px+3|6gBZvRRQ&Z#ovKnFqHKB5f)By4 zJ~B4tgFz0p7{-apI(hFVSw874;e;_-8hNAso@6`uI9z^s?i~a}+ z1e%-*7rP-diTa5v%_0n3&MA!)gOeG0X+ytt@8~Hiyflz7y$Y#;8H388=dmiSzv6Qc zD^HijS{Dw()|hkKUuguP(|B@0N!1`y?qSTt!7K0!NTUN=u2AMR+%`)7K+WI1Qd$N7 zEDhW?(n~YH0rj?urX{+hCj?pK-xlaG;mWCVjyK&ygKD)iZ{emP_3#rDPkXGbRAOdg#`uQs)~AJe#Y4E}_8}6!RY@EA!)6V~~Tr$`R)x1)DamYgbx{dm6@PZ6DZo$LYWnS6!Bs^RZ83hQG< z=u}sI?aQZ^2L8AwfU_bryY2LY0{Qb@`g4fD!nPwU>>yim!Is~GeFN(1F+ZmjF5+&;X%H zsUQabC>Z=6`UDJD^Ct+xy>cPaw~KeYSnnxwo=d;eukQO46_dD^yU8q6dh2;TNvHsz zIXO@+$rOFOf;jFCxT2w4#~EWN1Q;FATY0~wwFCKDE?qwe9}0N!8xV#MeB>A6 zP<3>cm_OjBf3qoMW`GvB7ayQMWB4iH2KSX-kbnnuIE zQW<3e9{#2kh@1MxS{%Oizr9OGzMtkFTL8y?fmegIW9ThRUWA@VGTW*5mWGvM$knZ|3 zjjL(b=U!{Ax!Vec>uRFh^Q6|Kq*2MS%oq5^u@HK6E7M)hxdN$6D%Pw$OLWmx1d?o( zk?9kk{G)(wZ|098=z+X34S$WU`P-Pz)oaPJyEM-B-^E&QhS z!YFK_X5R&@85442?7k;}j2~cza|4s$@s582d#iZmTZww^9yCo^Wsa+aqk%361a1JJ zDaz_IFlgQN{W|z5=GtR=T4VO0x$DEO@$scH+OnkAC%47C>dfevxKaA5={3MMMd|L` zpkEVrH>2BRaD`y(?m|o(H=R=(`PyCS5>)eU;7vb+R(j2G&|2RkPeFLE;a95IA0f2D z_#m^Qgx|+QR#=$s$LcJ|!A$QKmUFN+;-_`9wkx@%p3`Po=0Sv3>PEou6J2h>cFm+knrEgQoz#iFiyNXIPCdIGbV-FEC06J z_#Ao9W69xC>dQwB8eQ>gAq9aqs*0_SXI3PS+~7*1(np)H*Q)(Pr*mPq;!~(L=ZEUZ z$1llMLiI<##;?uS1i*=RetaOA*JNT!s}7`B*{F@gD8|BEuQkRDFbwPFlyXQ9*S`7O zzCYknuAo($O)R8UxJm87UbI0J3V&)d9n#N zn)kdro?{KtCZW`EoGa7kd-fhueCfJtoOv)c0hyb02*_T^6&^DGo}tS<5L*p+w1VpJ zUnzW<*;pmf?*j%eUvCu?MV#XP}6)m2_>RvTf;*s+>I_JjnUNCIMma0ec^gt_^zsXZ} z2{9c#0+W%LDfIT0E{16Yb%Hwaz*w*8=n64^G}L);j`G^y-~(I&bS#qZU|oTa;3@M1 z|L0=BisfS#EGT=6g$5~vP^|paZ*RUqj{znFju`klBMvNvkXk{>ge~g4gV2vduCT}h zlZq&S;Dq{#AY;>T9WKeVN-J5XCW*Xai9RJ&W$}Yk&tf7^rzbvIiyrW=N*;(luJ}qb zrAkKi-g*=5lM`jSUa0Yb4J3DHf=z6YFRJKkr%&%$xW@Cq5SaytNH^SoNNYfvF z%(r8%SG2dz4Gc-GyaavwE!w?|lL&Nqmd4cYVPCRynp< zdpTXdKp*Psu*&WTSD!t5mSk~E`?4i7e#<>;Ast!b7wTBKNeiPHDjG4x2WVfB=k;GR zJ3eGfdFhP>7P6G3E0xpT#?uSEPZfsiUaNTMw%?050CN@A8?-uL-)rG%_%xT8Q5Kg= zH5ZHG)bd3^7a+L~Y>n8=FXMnYs(u8*FJauRkI<&K^Ik0%ok$9aJK=%741}#L%VF?H zAc2Mu0SsN^@4u(?ngb6Q?|EL-TT2((9xelVc1(N*$w(m=I$*LdlXVj+Uxdn}!_8~^ z&3sQf_pO9j_6C$VdZdXMWBeOUKlz1q`$`D=616LIM{m@P!s&8;n-&VmAVV(DwO*#! zZOCchc1O62?So|Ihq#A8Ps1r-xtJQWV~mpe)wcLx z3Wa|5;n-na)4G0F=ddY??GG#d$wL6XimTTJG*{sIui~zs#{p*w$4h{=E&8JOCL1Se z49PfQp*kjSv_II}m1JD^>y!yz1*kB|i)_7q?@yLL_rl`NO4sIiC@x52GWE<%X_RDR zm>_;|wyJ1$c%)Sk7RHh9u)a}o9~S}+@`dY@+2ErVkTeQiKbBvQR8WQ;S}ozZ!dx7Q znz8&^q<9U=G(J$qZE-F&g#Ln7QIhEE(rBzPoo3r6QS0dFm|d`rXa>t+34o@|jupo> z1knoF*5|~cHp|cOY7lZ z%lsD4_5a+3&ozn&uzmGzbvBj<9Z*)C?Ky&i3@ECI>h)VX6J6VsO?z#n6q{13gFA1j z82!;G8Fh2rj>d{fKVnjmPnjBJ%7#m6>w3JF@2GP%rVf<}eJ~p|1}@cO7-w6eK)cn^ znll{a66|(z`0c3S!*@dgHQJZ4N=VY5^L5PhCN-XE9O?e*l7V;EeW8B8Uo zmPk$mOUnc1vuUJr#SNQn1d*1WgnnP?*`e%)uq5NTU4@Ixwt~L|PWE*s>1DvqGIui! z>mtxE0y}j*GMx%gZId0@a9U0gG4x@lWtrJJM6;^MX+YC`4E@f)^U|63mOv84=_7m9yD~ zV>NzJa#+nbuU{@yvH%(kD*(Wid70gzn9)ykWP*l(?iT=~VaL2o87qbV$B z=DmK)^ot_Iw>dNi`${XSoV1;r^s11NcoDqeC(Oo0ic^)gMV0w{=8(%eRb3uR<0KH+0W+av(1_Avqp_4`ls*TcdEK%=)6&*8P&r9y~BPw z^yP3LVIhGX1*p&o3lG?h@4!}faVrRlURv*|cSgdYXwgeI33eryH)l7Tk_}$q-pSKA`9^@PjQXwc%&j@biRWxG~ID zgDPg99`Mj#|7Oixnr7>UWsAmaPN-(=i+($}$H=*03Ukd3mjCnXFQJ7`GinM0J7B%; z85gJ1;!>XqDK8okHS_M*zTUa;pvGz8>y3mf3-%}yfoTT;%)HnjqLciD|IQN$k`5A9 z+;*f^^yEVMA}oCFGnUN2V???uUp58F3@qze-D{bo5i-hEW^z9+dTZZGRx)tsF7HiB zdi|L_(YM+fJs-}HvL~8asE&pNxiFjvC9RsBy)F-i%9Pzl;BQ^Wt7nsDD9S^H&d)c^ zqX?Gg4T*OK{+#zL*nC$W`{>>18Eet|Y4Qb4*!QM|o*(v3n%KP^34NP$eW)H8=2=P< zv^3Uq~u;Wo6dXte;h@1S*-LIgbHygvSeD+-E_jnPfg?^%n!2T|%X|FI zkadLqow_U)Du0du&FY3c{ZLs!CrEr^0f|PWLt$O&sS#cdEcFb&G}fAl&M`tuV&Y>n z4>#*pRDmg;vH-*F$jg~eebZ!JMw&%#9n>DA)CFYKq!k&YX=iJbP{-VaGbn5Ka@`77 zTVBB6qVeaHC8Jwjn48Ml2v1t5^|K`PZ+_K|4K)KjiQMr|{_j}M@K;%*i{p)^xCUCI zbF)Y<=7nr{ctdSKNYKMaj-&c-(j^o!2P_i!LeZS)q$vW^0gULw*umhSNkxA-S=INyyM1s(l&;JrN8zG4zfvgqf6( zxu@il@S{2Bk4`xgIfb~|?WaDM;` z&Y!+I^)WU)%R?IQ;+V+~y&sZzkc|me(t;BL_T$72eIV|ydK{ISFh)5+T%`Zbiid`6+#sjTQ zC`pMM$ifdUr|>T@A%hn*lHdSgsP`n0{rtMJ`I}uBl#V)nMP@*Yvnzgm5sf!S9mFI! zM4aSLhJ__EG(KPMLRMRU;n`tUAC`+%Jp3*C1j$X6p0leuw#gB4zK|~6YVI_$;s3&M zy<%@9zQ}3tU3ACohcAE}!EkNn3{mz{DN@u}-!$orv(_Z9_&hC#Y&23`Kkaf@dodw^D~|jjRIOBQDi* zbg?3Ht+*6WP7*GS7{JMY#L9zkgb&NR{4f)h*@6^2*Mb#qzsEHPG-|iz8W%T2SfS#_I`)lVr`3b+DG{tzqPY&bS{4t{@F|Qk26U%;e0IM_Vkg`GULPnd;^%=yh*Drsx z3$7M4)-GJC6spushZg0A0+@u-knIY(C$|C~Gd=gIoN$*cT7 z_T7QMrvB6Mw+^z}*g&+lR@Lp}wMkYQ^Hm;K>dq{1xlG z6LXpRhtd1%#E|E2dZibACKYe&w7gZlzp7Gj@cSIw5d^nS_x`+@Ss6xX8{WyLk&j-Y zXtYA{i00t5NUqO|Zb#bULazqavX%L{@cHt4_0Lsa=yYYxmto0ZZD4Q7QAe*QkWZ8B zFz`kGxRu=H<{Kg3BRKl)!^@KH@$oFwjs|@vn4Yp6?}g;r!j36m5@?4wAJ7lbRLu9! z*I$1*ntlfn375kv02WNAi%21^JlLgqnWmn`5X<12Fh~3NvP|jZWGStqc2G)}Z(&t+ z<`wt8b_L11-Qto5r~bPX+fm*62y}geHYwssck`=sn}k}M>VS^6&h|$lF&Co`jl8uJ zX)L;O>Yj&^Y^}x-kpn;S|89)jFAXkqA)(a~b%phqEei-%hsob9SOQ-4Es3F0_~x%l zYzZw(7zXY5@b6qV94tI;Xg|I2rX-i>d+z3Mko9IZi1(&wk^{&}R@Iwe)iv16omI@n z&8dBj)alk|>Dj&En_}U47mM)r(@PM*t@r$tj_MEp2UllcZVa1R?BleSu0@^FgFhO1 zmEcdA1GAUf_-&6mlL5U{zF-UQnNhxwcUx@*)`E8*7E!$n!Z5#HH@Q$aL!j>1Lj<`# z^52U)VDQ6lu+BxzpYwDauU!u+J$Rn&5B{tNxRquE6S*sypN^8@#x`K1reG>D(vRqn z6mlCrwUy^KTw+q(8mUvs`ncUI1AHWI35QB)PY=S?=2~9KNox?Ob7z8*qaa&2wiNh2 zGH#`u&=T`$NeqNt5e&$D75&1G-_wz4-)d1OFp0qrjhe?9 zXaOLOy7&v^L>DV?9+j>)FZb*0~{?k}w=n)>W4BJ|+y^@A<{g1G-ras2<2XY?Nf&L^ZN$@8+F z{I`v;J_(#r-yUWm*v-HmM+x(4cu5D?U_jH8E&#@KVLbuvFRZ=Kq-sEmj!x@U^xC}T zxWV)W5oEwFu?FZ}Tr#cdnvLKpRC<4`-Gtw)|A9~UxK_(d$H@-$DnJX?KDhbz3O&fF zVMM)TaA%7U1_RFJnl5G7%b|ewMj#M=nmLlEC6im_rq{pf90ib?STb_@(r0LT}*k9*p2Dh8jR~I zB;i{?-rcZYqDcn2RUuJR>wIyS`(u*+;BPPARVUt3o;A%$LZwsRIxDNi>bEu)B-Lo% z43K->trMsiVZC{-?+w*uOmm`8QG3gxSg)U<-}-_oC}&=9)}0#|0y3P6AjgtPCIMg$_#KpIfhPGV)!AWDX>#*N{XVXr*rTCkAG{F7Wy zrXd&33L?g<52KD^5(cGF^WxX{74f#yFdN!O$Eo3~34$hpYQid~t+nuj3k=*$?;#2! zs*%F8xxieMwF|2CKkL3M#a!qSaXnRNW1UkA8$pW}1PB3wsY6cdRhelXyR2e^!+HZQqq|hc?p_#p4=1=F#U<_!aAT@9@ypzG8|N>ZdGx# zb&+%Xp-ZIky`ISlKtr`CsOSoA2sN~8XN>hKZR2@G^zQmr(RVY*tFh!mxP|t~zXa;i z=pz8~XcFW(8ThiyyRc%$R#w9u=|kJb@vAU>2~|6BoFlmY6S$Rba(nQO1T&7xhXG_J zo-cHgW9AX+=>{VFTr=k9iWReNH$UdIZ!6%(CP@Kt^x)K3$Wg3a3v+^4bKTR~*N}Nl z9+9cnBF#)AuIG@jdJw#hM6ueavfqOsnA_cVxcN8Z)xQ}+ZW*5~Xs#ROcxsG_e9)H- z_g<%ZJlDUeF-LuyZcwC&8B=E$o$3jAGv4SDCA-~za-#ZOk%5PgwlB(6x9U|9@_5Sl zSs*YstM(3WPlR2hcl=U|YHqD%SzKB*6C>Y<{}Z}1$x#OF2a3G%9*JLufI$Offy@`e z(?On7^X2vdv?$l03-Dq*#z}d7GQT|AELvax$^2XuQ+){3w|dH)`c7Fae?*J0_{U7Q z#|kJNgFz0Jc+cIced4(h{js--=(aze)wd?+deA&i=4?%=tMl2tTFclPMq`JfvYtLj zTatfJ6j*q5fOZz>db6Jw=;)YhewUO>%p%^Y$yg&&0PkC<$Npcknn!4NSN{_5s=1W= zD^1gn>0URfJ#u!M;zT?a*jcH5EPL#1{sN;3n6A3wQfnjZ27v|2f|a#W5O(g=O#+|` z;hs?73rVqdFSjyH`pMaZX2^CGqX+Fk?wkq+tGZhA#euD%{pIZENb$|P?#Eh4X@*>H zW`0)ePO$U!&qg05JvQ0d5oVcM+Z}OQ#Jj*U-Ad4}(X^x^E7p*kqE2#xWX67#DT|tu zhzf?#p1mwkzcMp!Sa;l8@>#_=zCNqaHqrK+m-YpB;8B}Fl@#VAjU5BO;;&k!dun)9 zawEa>Cax*^!+`-{1hDoUiBDy@k6k!Md->3_b@L6oGf7%9#9`2ROv@kf#&?rHG(urG zclWfkOzY^VlDpnTMxIO_S{yp+Nl$p6-Kq3j`jo=WkkyX+m(xvMp2r;(c6&{)b?kQ- z@Yj!3Nq?FO;;U-)!P~S;g-5MpU%1?8KOJ((pwLSFbM>pm;aYr7Dxa)Q{8@m$i91GShWCK7K%h+(d<;6AZ}|(L^5Mq>X4oKH z9VkT8auXxkLp%pbL*zvpZ`SwZ2Epz7eOrlfG?c)H&FlcFvbhHlSi6riPo=|Z;-ZS% z`T(5_3InY8Rt;Y^q1i^r2Atiic1a5-pQLYFqVIImw%0R90tY&M6o-moVONW^j+htt zDcVX#GrDbKkkU^qq&Eq3Ep{-O=-mEh|>omy^^$UiM$2BhhnSV4t8o#xdm2y*svy&OeNZOwR*l+zOUGz__~Y%zw|3$_ zd+>qEGq*VIoL;!>Z!yP%2918g9+hM>a|{`}kyP z*VN#u*^pJvSX4Px_W4V$Sq}ZPtxda@h!gDGa!5C3OnSNtiUDbEV~P(~-O15)EmSzhm73BUInzCWJl5UILBGL@a=yR>GZnY(dQOqF6 zqh)6F?M=J%2d}1#fBCKnc)q1DUbdd0pKYCydt3ZvhXcb#&ygeZ7YO`g=Rx1$Z0{7z zLb+#U8PG5p@S4o-l56Xk)@?zic~*8-Y(_0#c(*0k*z+cWug0H_E9pr!d|h+>Enqzx zQZMaeVJ8FD2}1mt-gd)qNmiwTN#9z0-0`Tx2%|C8+={&q($Q`z!~UjtHOkdu?P_{d z(@K)WYlHAZQ#ENM#n0u+t=Fe0bjbi%dH|){n4W$ygOoJ_;LYjSTHO(SQ*{do#irU( z)bZJV|BBHps#^`eb9U5FQxOjk zesBsRf()p~;L^af1ruQo2ZG` zme#pINkSLQV)CR}6?Zn*jF=B|7VBY?rz+%OdoR{tHl@Ec78-Ut7j8NgiQ>h8N7|~W zF#OZp+B=vlG4xVN_$!6qsSab^KhvvMT)f*=YDGwhwVGotgR5MVl!4W+f86*S>+mLP zB%n{c){?|A`Y4sTUb>-aM7}{@nO5m^d@h$wpv?nr?Z^kG_?iI}+(>#8b$ii5wPAzoF?sxg7XIR6>8y z=Agzzx{AU;qsOY|bW9rD+ZTFj@S2&WLBUCR=de`+qPKw#2<*7^9x5DhK%Db6ja7NNb0dUH z{{%(-uasW?S8Mx!Q$Xe4Bt!rE@jWQmIp;rvgdc(c1vGG6#WL`=+68apSVRW)U?6>t zR}Iqhc4R*+Ao`KX=53Fw-GLa-lpBdDbo`(i2c5z_Jo{a&r=jM#d%iUA{mSFu#crXK zxC{S4q~hL#>YAVjA{Yz4t_icumqb%QluLnY#B|I1y#BV^kaIR^*b|YxJP6lsDFw-T zff&NTlA@0Vy?x-(=WtwHLx_WKNYXL#FGZlho_%K7nS(p|^ANtsF0ZW@p_a`cyhjfE zD)aLA%Dz8jrZA@#w3F1hi8I=SaJt+#CH1S>eZ7Jv>@hLj*1rU#x0F@hMtV%UGmn%B zFLoSZ<2j(w>F>ocZJ_h!7gXGVc#he1f)rnU1O7`5h+$<(BV@X?eZ%LvRIB zhzLc_quKx9wf@Z?_{%gGM^%;;p09!Ou~RV~Y7G|3eF7b|Z(RX}(ZIC)2FrWP zp=ubn;HzZenRl9n#Opp`&25jfLg^nQLPVd!wCMTCsDrnX6wX* zXTGba1o&0BmB$2TJesnsbma-x2weRltR6C@_vGX4MGWs5UnHbtIipwB*Y?(B_O23F zFJzCT2s`vt8>aq+Xp)2k4h z|KXDS_my;f0WkLevHW9jj<+<)-@ov8S9)B31L8|^{(Lb1GnkF^g0PdHK$bWExjgS6 z1^m6q0qjCv3F10(VIFwYEeVHVB>*DxCeVK~mL-94<4e0G8hu?j3nFV>N zxHeo1u1U1{Ie6H3gl*j7E#`W^{Egl=@b(1T=FWL}we(J<36 z*A$&OHT585Bj7!)ju!R(#GjA0Urx#?Rv7gM8Bm70s5U2FVzOdC%K9rQ+FrMRhS%1z zIi8VgW^8t~UP|)@7qo!Mf76=O@m#U5>}$a99JBoVV0H%lJU}C11K}WXJIxa=o~y?} z_bo%X(y((d(3cUGpnLRi^teBh7p>gQcSrq|;U5PB8U%QyAXwyNTnGY>li|8?)cA`8 zk$NL7s4(^x)2DEvzH*BVmzSrg9b3F1lSh0%|~3!YaqZj`!s z&gi2Y2L?xvk6yO(+O<81Le7C%BjPvELW5>A&>EPtWxa z>kuDLPc;6(8fw0|`QxRrrYm)&)+r@cH4Wule+eU^rdFF1y`|xCOAoBO6bh3|tG79p zN%^7M>t`eI<%6{PCJmpb=rgUQ!5VS>?L_xaG)9l>9hF@jT z=hA90&D7h39K8mai$f$_Eikm~Ioqmp>8tllYxa&~Le1yyhI3RpuoO+o8GK8j<7Rc` zC!aF`AtPqN26HW;DCpabZs~)ppy}&@VCd?7W#D^9^ORIbGif z;~%SrAC%+!$eVb)STwDtp9%b&lA20dDH-3Ilm;*{ir3o?HN?WL`MR?X)Mj(n^&d< z^Bi^6m|Eoxx3ZO|+MC(aKkD)|ijZl8epQ+7HU2TBoOA4ZPnesfF{SHDCDYU!=JR+B zKMgt~y(|e+XS-n|CU1IuCQrF|uq&S{_8K%JE3GB!f1eV04G;yU)p9{fS$Z6Jh^-rR zmH1+yT_CsNXc0KdsFTpqJ;cuB_A=mlzY&82S;j3tjp+F^J1)YdeATq-ZnMPQgX(%~ zczG|su9z)w<_SA8CjELYd!jBSdbdzu(kqN@`uK51MV~`gf5Xw(-PpbS9n;y`A*s4L z@K$k*E|&Vt?~J{38`rA$g6jqzST!*1anpEr5y5`b4o-3wzaP=2myPg4E|MMZ4}Bfp zPtSh76}P0o!etok7z|&;4|dtt(oo_6+MAhFRR<&u8sRd`6=##QnZ*O=W3RMcP_XOn zW?i1kioa3fK9<*e&MZjd?dVte`BFH53SZ7=bPtGfg*LisxR; z*6$E6nTAV4Um296%4kv1<57{l0Y%z6u*1KV$!u5CqUO7mG`em)Z#4P}O4~Z0m9?D~ z&ZkZuFQPGi->>`V;eYb0@i12b$q7$d0JA~I=*xf>C_?EqjA%vnFXPPziyEuAqCf&M zj4$_w`wQDYiaGS%ZBadiujC!dLh2UMapIuj&)QQ)BAPn&n&^qCwdl;Yn`v2g)5-Hl z9vVJJx|4Tazd|?MC|an1*@$ze%$12CXHRC-pfm>(9mNI%Wzp3RB)B!%Bd5mHmAFKp4{s9VR)p(n7?J$UQJx8!D{T(vdh zX=8%Gmc0Fg=ON0POr`gp?wk9!3ZH)nY2QDRX{2`?$iDogxx{pA4(bKVGj^KXY=wttbe*N?UXe zwZi*<8RiwH4cG|Fqt%{fPy$=e&Zvr9HjjPzP&>%*h-EAj=!&K?v`GG2<}Nij1cG&Z zmn(h53($dXC(!c&T~yWswnf{tZnu(5@^&|rrS8<-4aDm-hit5jyZ!Q*uA3WI4W?)u z&lSNHLJ{Jv5JC5iE28;f*B8Sby!t=p7BrW9Ykc%E6sYq7T~p6OkJ;L+GOT0a6-uuG z@d|Cp&$Z0sMav6Dh40OQkqcxA4mdv|WXC42*?`_Lq~5TQwv2&RqRLaiOI6cPN%xXgZO%cF~h zA3$d;Y2Tl~$zzc$HZdePAd!wfaUrd>oDdKZQMIw3lug@(Y}{goEGlPKdH%zWU4M>L zzuB9Pe)yL_$Xn{lCtCIDk0jyauyc@O7h@J(Tv(>t5kXI#pd|sJH$YBm=oeo}CZMRK zE$VV-6l?(bs3geP(^$@R~8hOD3ys(V@x`_Rw2YC~aLt+AFzo?d80+y<5GP zOx~dLz?$iHq@LIpGU!6^kFG;OWi>C}%hm$kvunV&ophAD7i_G&m{f>=^4&aZw`WW}VyG$;-+j*^AF$#tMJWtxYNQbh!kPzNe zZA%Nvqto?wZjknjp?iM*j+ikgoDUl|y16tbwy|#QF|5o&Ds*a!(SMz`(OP9U^;?@w z;2uvPmB#J-or~fWVg4`OBi+&pgEe@5j@0jDrl%gD8qB@0%Lec2QxY3SP3z%~w@6?-OLa7C0zSDC)*n zjwbfQJzzq4B=FufGq?@RddNm|G%6n3=Xo^=6@pgn zxz5$?LR$H2;88WMUy<>*Ii0-@s(w3het9hcQZbK1X`B4XgEWJ#Nfqwt8MqWKoRE!R zHeJlqm#$n~W(RY%+|ULm{dmvB__GfvV#Puv@tN5Rm`s>auS{AzEb7b36)(mB?z;7j zmB(LkFROnDxM_S+11=~=%%hAy`}VdWB1#> zE2n$gL2C@gM1OaS>L!bjS1Jh7-Fh5f>`{8p=@i3OW-tNT`J#HPK0%-INw;YHPyIwVm;M@f+(znWCj~`FXM2K^g-D(p#b* zj&x)dvc#$^Adtyj30!|OxqW%(DV>BrM_@Wn0@~04^rFlPp4t;S?14<}J|o z6$1QLASsAr-8xkmby_iH_vk0MAX7c{+dY$cBZx2%X5T*bE%%(9ZeaQ6)h8`&8OVgh z=olO>DzJy#)!AL1kQqD7Xp}256w4blkL*2#{gNwh6g7X_;mV~#5nUOGeUZYkRH|dn z8Nag+L!DpF+Zvm@gUXVBi#HDIjxvAv3PbKtI4Hk#yk^<$nP}6J>4Gxr`BGoa?y8JI zhQ3RclUVj3UELh0v-9l8`da3B{~j0Mk1jJKWhmr@V!qHWV;9g2DrW672D(rgFf{8G z-R*Zouq9x7(*}(%Hvv}t>fOUi?f|7^N#kF+mx-TV0_36EXP?HI?=ABdViV6?ju9z` zrh9!bwQlv5{NtLYK%?N>&9Zzk zsXpHP`eC*qp)9C#JT$7p$q4L#{NrjBSKEBht<0TP&1tmGQx?w^(!ZJ-U|p&rds$VM zJ8M_0U7^_**t=Azi-GBA8aEcAcUT>T3iQVL>=8S2hOio|KH_H{_i2S zxWC^0$B}3_0(w)DHhiI0juft6kFQM11u?@c*Tvw>*lgj)lLx@?I?D@F%+MPsZa1)M zr<4oZY?-9kaDK51iJ|XGECF%-=CRAg!PP+C;+F)k_&&r0*f-1utS46i{8oq@pV|nvmN() zT6zVT$MJZqJ`Nqv%dgugKYM>k$1w`hf?Ih=W(H60$g!3X5H9$(J-bQTZKbR)lg-l34LWtJcoIE+;tJ~U?7)(dyb|>GDUort6H1t+55E2J( z)W;nrREHgZwh?2^`qUNlIIQWT0DR%!hyIxnN`%%x z`3Y808WA8+_&-m5)?eS~TKm0zaXG}iK6PF>qKroz?0#$sOb80#P7;`oyMU1}8_9dh zU_l1R^C|;(lm1wiiM%}V1TGuOaAwJDFVoAee80jDOOTY$&w+~@f27|3o+O2nz##3~ zf88-6q_eM2n%}`Gu)J~o*KjKzp^G{WpgX-lb(%GnINA1am_b-5X8%6*ZB}T!73u(l zvBdSZ*)9gZuu)<#33T-xXn)tAkM11g;J|gT16O>843a84)zgziljcr*f;NIWuKG2` zl^|+TyZJ?L`#bBrC*hjD6%DGm<F`RRMm3 zOJRY*-H|?nrn@c$Imt)=_&Mjq#eT({H$T1k_umFbm|%hP5s*tTp-cYE#(jVL>EL$` z|2TN36-Xvg5#M7F`}4RKslP$j|E)Xbzo_p2zoEL%{3oHhYkmBjzlxvukv0#{-r`M&pgpZ9+5;yyLu;^H=N zJu2$D-Zgq_UhC`TaXAA6>0@VvJ9d`!>kYS??+$#mRP%61grg217jP(#Ywp;@Hbcwc zUg6p2gUAwC52zu8`%bzC*`m+yY$dbf82p8mmM2M!OAo&sNb1R7H0WO3 z!Dz|ro?@OUo*gyv7x^Lv%Hq$Y+*>DsMF4*08B%VIFsVc%sdI4oY)TJq{~Z?d`f~Bc zFQ|IxmI?0Tdwdf*058$pzmW#&W=UK58S(bu%nZh2o!avfIoYG16x&jb52aMRvDhT| zgZwiWeBa=b4){+R*`4iJVdz?k8bph!5QEF(UQh4x9s!Z2cxoc1HSl<}=OeBA4Q#oD z&r{p6YBND9Fso;6w+1>+gZxal>tl05eE+|%{QfT@vT-2`{~R9&ZzfXEQdkv;&!%;u z6=?;y0?G^8~&Q!CJwOEr628w2FmFEH;`H$DCaC2jQqr|Uo z-^AOD?{Wvw=un6P&p3-ZtpdK!8*e95v$na_eQc`7y&&n(h3BYQl9M#jM~0~cG;W6( zXQ~zlOGP%Zozm)m=S=<@_(uTl5R?FHeAX}j0xv?c&yk@MTezT~y5#~D^m8xstM$;z zyeR%XEWEZYK5{Ugywh{T5?W+$pLaq|6yz1Mr(8K+t)rJq*^Gple1=Ho1lDA0p4dZw zIgq|HORyyuZQ&mUVRz`M7_K*;Y$^^;%c@Ah$J7vy)b~ng&6L-H;j;?w842 z?Dj;m%U5#ipwm6F>ias{FVzKY?`V#RbvyfHwApeQY5PoQo%FwZ%zu^L{qLf6|I@3& zdWn!`5>K{iMuaD|0K4%Gu7Z=nUWD@HX!E%J6h;|4Z@NoP#4MXpgE_cnQ=cUd(Yk%{ z&2qHkBGyLuDf4xePwlJ7*F1snUE8RKtM%tL{AbiZs*a#8&D&EQD2KJ(pRzQr*tVz6 z=3Jg#IUF6a`smF0LYWdXds21WOSXc8^9hh#;-l03(YeF8Ox4rLa$?pw_eA&?Gi+LJ zwL9;3(9llcX9(!8U|iB_E)X-_f&ap+|MC)v0&@#IJ8Xp~-7gRg;2eY9^c3oZZUa?e zZ%zIe43d#Vi-l3vRcKU9L@j(-+X@HARCaqF<|HNPXV~`k__gE30}CqRn2S@EDyR#u zze=Dihg*~uJtv*jy$`>tP<^QBP%b(a^!sp8*oD$lJB!`V?_(|1z6O@>k^9XaarCN& zdc14Bs%Ji-q*RUGyz#0TNjxMCI7Sxs0p3%%Q$ci! zmp@Z2jc5r&P6skiZoYwpA!q7<`_=SBzhK7}7CH{M9hzgOvHYmDD;cJ;98_utGKyUS z+vGkuf30NqSo6R>?vx8k4jS*ymwM}NRnw(i=F)RrjHcXuZhwU6`(Ul%jLR(eohp0h zbh37zu0^g=`BJ61VUHii;^-|06<^z&Rio9aISajV#nH~(bFcKvB-ihX=B&O{^`8^5 zRLFW6y0hYG=fgnYTtk#{yc&GpJ&LG(XK87aTbxN0?&OTLh%HKoD_0D!ew4^Y5(yvH zpcs=^jp7cj4xAH*g6{enrjD9CD0L)xq}9gP+NRc7XE<0#9)5aya=6kiw;p5Wlrrb5 zkgLqn?Em6!Vl-NH$4GO-4A^Zl*_KjMJ#dV@I}CIJQktj9QRAL&=2*?88%Hu-cyP6wJR4X-2Z-(7 z3X073(P&gM0((m7LOVyhq|A?B{?O&owF|HEd+BUSh$iE+=K;p~@wCuz`^P6x(&h<^ zUW>g!GEORMpS7Rc*IC}teen3}B0JH*Wh~(wO*%10y}72!cugxV`6@}7`ogK_<6C4B zQ_JMCQ6Yu01FgY$IsS&8>pYU9vwNO%NH(4Y;ul&(Kj$pi+SDTG3#a}?Ir)E?qW??) z_|GsN{LAZs{6YWkOh|%B@gIQE1?(o!fH`%oC}6St2c!d-aE4QN$fkhhDoj=n>0rBm zFiU&YCBW4qXNPGe5lVA@I#KyaRdHx(O(n*2v?9)WSZ{IaMOoYLurGjUajPF3e|nJ3 zKgV=Ea~%!^$wWlM^*;Hn~yhD^x>wj(t`W32Zlm+^$F$C-<#4~U?)A;GF2kqN*U9V5WZVc zGJ}EM@yXR7(O8EuaIF6~_O~SClNB!h?UD&)0ePIsfJw8AviZkT0qdUkDksm#)a52* zbjY^0a?Hi0%y;cOylvp$-s|J!@Le9DDf;<=!+0{`6xXEGmb&9pGr;YvG63((#xL)m zsgr%Z7JQ~on^$b`3-pZfiGXT_iW!Evdv3iWu4rLB7|^~LD4BWqL;*RTAJRl!5WVsZr5x1?K)Ka5F{$^s(WdL ze-Ie@3BOYS_grwy?}lxK?#~yb3gOF;JgFvu7;taEce(P!RBu+A5W1!(`@0J?(AabQ z68(3c>Ys_)%XbV{Izrg_j!$G)F)PneC$~g5Oi`HVZG(D_s_G;Ao1P5uPBVAH-md?h z3-%vqj2H$2*cp^=FK8AqyoMuywuElp9A%~0*LYjD*h4Oz%V+6}jdpU$?U| zY>61F$A{Z|idIiC@Sk)!q%ToyI`W5|IPCL@KM(AT&V;!#OB##UI?7`fW7=Zgf9qU( zHvaNU=y-;>Nqv$3ux0sEuibZ+gAP1%v&#Ac7D-$R4$c#TP~ghF2FvDR5WLYkZHBxZ zH&SvKN@3ffp}|s~x}~;~{S(eTXpNzSj&J{#`ZZW0SnnBJ2_m3kFVPpcv-ihamN!#Q z5j^ZFGE_lGytuCorrJp)j_U0Mv#aYKZFqw{7P2Xxm`QLpSWqu@?^*F$n7J@t0nDD)7vk~wFJ+r>E)2TRN9Ccx(F7zAOfn$~2JK8Kcy{q-Sa-c=$W z?08b!BG`1wmLu8b#V^NiE5;u$et4!TP%OZ(IfA&~@MVqf&~q(Q1r{)>3(5CwKDzKw zy$vOmZ1*zE_)DO+?^6?Hntt)gYI@=}dv$fkYU^EJS0A;X$Z&TL^xm6L8rQF@)!VwS zK4+wBW?`(GF@={nPlKpYfvrVT&WI?|o<=53*HfzPMap)kNQ3g~M+saa}Gf=ef7?$cjY{O;&$ ziuZC8OQwxK_dYt>An#iB9owc;d)I!zsj*yHRZOq;1~7Z@j_n9*pfEyj)95)ehs|M5 zI{*B4%-rf!?G3ZOHQ&Uqt;gf^bEnWc&@s_~A85BFiJQ*&c$Ta=GI zzhuJac4>;(gC^ar-uAKwgq#ajG5bb1X$jRdW;kADa9U-$L^o0HnwDD?OZ%w;7EAzNmY3$Vt@6bYj16ovCY&( zK!M}Epzx@eaNPs4^v9M5(i_SYvAS$SY$Lg=(5?dudb5jT91 zcS_3I@!7fN`oRw(cA$jZ#?L%}w*vGyO9hJrMPv`SBlPR={#o(J9W6dSb--3*suF6H zkn3@^7j5VY6HZ9Vz)3(ThW>c6f?o&QJjoahFjG^5ajZcQzv=o+@ve1kAIf{xeu;!v zwXe!VBeGbJS3hbTt_#ZDL~gsPkotVq=u0j8Tqn z9NH!)4DSMd_2c*%7HTK&3cpU9JX4Hk+lEY_Jb`oR<#%=$6h-2E+O2TojOk3CBmE9~ z0^=F1)T@I?F&3-FwD##lvHNX}z+(y=MTI>{=A4AqvKq*iMinG_h?7k#lU-B-p>&*R ziBY{SA}n#AJuv$ejVduHsgobBOX&5w3KXF}oO?LrmIREuqzoO?-m*ACQ9bVK1i$T2 z>SR=EE|$ZZy;+(65u8xk$C{L1wW1O&GMtU%A4yz(wfor8ao-y>Z(PIO@#>^JF$r@q zH`$m$o=m<#jK#o12fi(*ZRi73r<#2QA&mv+a`w1Al*^h* zFHnNQeHfNgXqmw1^4^q+$@ybwOw%uq1w(WsGvxZi`D;#!46w@|_BmEF z+9;lOqMKf7^pw2Yd)3%V%cnM}dv@X)+9|&B?xj_#*St@I;XSX$&wFT0`sU0{V?;ac zvZOLwyIzCS$>2`22A3-s8AzP^X^%!j!f``{2FW^LG1i3>hjuXGnPhRiKLP#;y}PdC z1XEj#vd+ELCa0Lyi{JgpQvo{16x)uKi6mkuHrd^+4Pu%qhnP&I!sU~wPEtrkg1D(j zToXW&k{cq9@urXTu-64IOVvk#uzTR@(uPtNm7(rXjsj+9zX$q$zoeR|3A^>MA>`Qo z+4xd7g?Nf(PkO_NO4*AYCKLsnt|8^ppkCK-Pj5kS)W;|8xS!;kdE4$+t#Gl-mkT}| z+2<&k+3eW)=w@&R_20v@s2C;Ac|sY^!IeNlnlgKP#LHMzaih zI}Lj6oT0p_KMPd$yA)avcLcGMC~$xhwjo%BHtfoHV<(Q7D=@Ui>Ufzfeo|E!L$-V+ zN^*V=ooHrBVV^JzVmXfDx&O7X7h_;c4@q$}cnvR!sXQ~>VSYc5p6)ts z|62c;>cSvfGU@~S_Cv2#U zz*ia3Ry|MJtREfR#+(fHpB(Mql1}+(sHN%^Us#<$V}wF4_01K29i=dR0)}y~hoBZ;JN?Dxhfpoh4-3YB2vrlf z*Xe><{=FYUVI<;O*825j(!_=v+8#Ijztw90*Z}YU(yPFKjQ=;_28aOOSsSo`7U2Ro z8*tSJOZM>}kOPPc<_tiuncCU!4d3vlq4^L-xe9GDNvi1J))($*iG2sWYe{-6DiH%lUWSPFOM^vh)&~l9s30=2UXi z-12Gm$QdY~JxBHTOBMTiy!~5({-^r|DkpnYw*eO&54Ud5Bd>d?(-Z3-Z-<4PxD``( z4D$T6o>^Y;?gl;&I6L44+j};EaKB3o*l?h^Nst-~uL8x*2xnz1N8)e7TS9Igcs7Lf zWsgu_gnscS!Jx_`@sHKwc#&J(mi72orvGH)scurn?G_X7`|BayUxfs{_?a7k)e|;- z6SUFmA%MH%OP=ieAw(fa;Dh;^)|OUV9MfI>O-1+>b$W}oBMX9^EiWBSRiH|V=3|~1 z0%NQ#*SGe8JCy%?ClKzSyWfio3F`iZ$nT2bGJjS*`Cz+*cd^(o1X-DlV?Y`efdq?JMp6@o)#U*SO2;TT&g;B+t=Hh zfM0@yfL`K$ds&CDi5IA0GqUX%nLX8Gk$cdhMD&C5<}(ovsW~-f_zr7Z+c~E>r|1`^ zK~JxYTpjPc_a#Cx!oN?)e#JVcY&%wOTI*ujww#E#VTr^3*|S*`I4~&AL$iZceg-OR z{SJMp#L?060t0A@`+{H2(|@#x+sigc!#Xf>pqLrdb|~M&^34_ARV-i`??7j*5oNHa zo%OYUoBD}#o*etyuX5@;?{p<}Ww7VCs~Nd`{W~t0pMA53arLdXqI?lf(&gIzqXh{T z5SCPvqhn}XWBRJ-*c+e6%fEjSWjlo3EfTH0QI^s*3oZ2vXU))jhhK6c7Fb|&_d4ty zz}kBwA|S@t%m9`O{B$r&k=^lICbUl{U6h z520zi2WcwDoA|<|0 z0x_xBC+YZIp;r(}NgZ9Ep}YlH%399@`%@Si3AKo2#!F5qeicv_wwqRFRK$rfPWzIZYGB9bPuLgOh1vrxM+(t_k@5~*cD0X(&^=}1s6lGSozyz2eQMyPv4Ns%$wZZAnr4HldM%UAJ*!$&U zA!_vV9?K{!FjNbp?nlFKcH8H$%%yrU3S*(Qj#-^~EX_H?vwac1iuKa8*}G>ga_r~5 zPvpVn}1cWB@QmD_gIFpr`3%zYx~eB{0?^I zn7>hOV3qwqqcUvg$4L8H3p=#OtETjR_|gbB0@t(_EtMc@e;*gyJ-|FXr;zJwxrq6E zfT^fIXHhiY^ZJGEWi-rpsO8*fLC)2;m0}tL=j^J}s#B=Lm1Z*hBD@HBW+hhaJR7+j z3+fjJnx9coeTO?~Kt(F~bE6aUvf#a84B zB}{Lmmm+zBq~v?=vy4^v$T}cx_^KlOCfpZJaWVq!zbLnf*_MkIjRtieD|3}O{3dJAplfd6ZrvAR~M)Wk}Yg@A$a7i0sIZ=>Xp)w19 zYHLH+gFxIax;_dPc$~Yi(OhR1x@~f; zk4lORvcpYYsrSkEihA(9G^Hq4+-XDq@^4uWXA10(LN8yO-lBZQ7$L%mH8vH^Ffwk= zwU&FyK1wu8Utr9el{i>DdwH=y=+7`5!&}{sjLb@ zS`Ys|hC2dFfy3UWdo3mrZiMEM*DzoG7}_FFzv>M7v*( zQcoGARa(~>S?~OCA!`<^nUd#aZuGL|N&6amW7M=yEf9mj@Vr5u9EhAdNO}R96fy>usDn5EOfeZb(8Lioa@X z9h1_R-%v+AMzlL|t}nF5=({62$3D{k%T-rHt+q{v)q8C!Q%cR1WR%aVk0#MCHRc7! z^&gFjFE=)ISo5EyR~y-uDIbcoGcL21PA?mnOv4Teq)9m@paZM}r3An}?GJ%4 zJ1Ye_vycU8{t!xwT;4`=0MRL(nyk#)nGEaIg9T`bz>+;s1Dn#l-dkN>TPqf9{e{wA zQdwH~snz{ZZp{K+gE?)}?nLXhuqkK2lrHJLWuy^O?veeY&_MaS9gc) zovJGP1o!^LtLL3eEbMBnG4dMv>ZdTt4xWbuKZK|y+q%XwjP6Doi)H&olo^HP%zwnF z3eB9a6icbJsXT0ik=!%zsZYHe`KbD2Uk5*g>-$FBu2QpWyo<3gRg!ZMUdMP-{h=uO;nQ4G`F_ss zk+D-9n5p>3oI07tYS2}kIsEEvUvYGUmHG?IZ@>lX<#l`)7%N;d6ejx&PLyVji9SVc z!^&IBoT^FBzEJ}_05_%bpHZp#$vkXce>zmG9rEB+Lv`R<^bvCPce$j6x3F$?w6#i~ zK`{B7qEYKw@aL(qXKmG75A3gNjKA5B9q*`EjB z%n4*yc!(?rpzoYWGUFotry3x>Rv;_VgZO?J&CUjAe_zmCig4jPH6%OZhtQudeh5j! zmxxOtOQ`P=f|na2>)KK1H}}wV+|xBQH!Ea~5U|XF)3_z$gzU);Bs1o1M@R9As0dSp zVDB8@?FNCvB4De57bbG^xovcEvo7+xe5~Ih!I=wpTPJS}u9)OEa*;k0t1*?6V+#cT z|4XO%zu&m!|MX1wkD>qmR8{D-Kok@L;a&V<{LDRM)Abax|H`Bq73nv!)bVcrb5)^X zzm^bXtQ%*E-h_IHk?T?nrdD$cH^}Psksh|O?Wn{E=a9&Efz0>a1N}GfxpO^mkP&JF zsE2Ry>kMU)%FkTf$RZa4C^Ycj(w5ll;JEyT0wbmzTXV{FI!8FnMA!MJ=L;wp&gR%QhL6k6)xYPM;0R?i1Bs893PcfeqBZ4CjK4#O>LiTWe zAJSh@zJa&Iv#$V?9)}9S+X8eV7_gG3P}KEHX&c_3jJ)4qb)noU{rdcJ+(^NZ+y{Rc`RyT(w zr4e05t6_fT-|jR^=R{*F9tJ|{%p*=`+P~-NO2=2n^*{b0M1H9gR@sdWPs;B8T|Jzo zW96-L*A>3>RRahQq)w&rm!ps7b3=CSJKVKZN^vvn!nO3c&zhO|-7W1OqwCvOMpid6 zm5l-uWDYRLGy-})ja*F;^F2rm;!s)I#>2;K?&Ryj|v6x|Y(sL0L2V)s&CT zIivpZssj)$#*zsb+49eD4wfP1hDK;`XEdD(qI;$c`w_sVdLyMkQoX2u(yMx7@Mw4c#|#i?xQAed2SJ`h@X#cTMP0kWa%g zXRHLwhuXYIkL0vGJ=(nAEX|>&ufoo?af?kmQB( z|G)-d+9e3Rg!pX8g68?O5p2rPX^hl=G@VCED^xqjx!lZ)a3O*AbLdag(5~U7y2o;q`(ijhkL?{Ogdo18op$yYI z;g{>xfSizbA3xOVy?Q+#44Q4?DJN#%q}@x_@+7DZkRw)6l?$uSTTM(`7&za zG!5A>w#}kTifI`@V%=%)c_I`T~mb7bZ85{W!3O^Iq`!gu2fdjXL(pY1|Yl!6c zhQmCiQfM3Hxv5Ip^FJ00JP!^(dPuk4_1xx9ov18@OHWeqi!v657A}`Ir*p1c=$)Bo zmS>N4zbay1?e;CwNu@Evd+xG)Y+GZBM=Nh-x*1oHj%UHb!W?y_2)>Y{WdGy7D2elC zMp3P%<;Hrp&wcV_GsCSrX2s7pxFq5e``J&mwb~?X4YAn>O!^D&>Ptt{zpMaa$4ujN zliKor4hrxHz@_s9#DRVQ0FHB{8FJkNF^mivbvm^~1oB|vWq5{AxMc3eFCL4NIVHv% zQc~$O#;<#a?}NsLHVLMRowfaA-}V#ni6Ocar!|OB&T8nU^;?u-!yUGXBDZRu>+K4kz+z@ceYs8R?k@O8I)LZE@FN^_cQ-ahswq^ z+@J7f*itsDwxPlwuS8N=c)z=Kob+egAOpRdIuv9o!pS+V#nE08z&I88_b~-Zx>}Y| zhjLm6G_=ac|740cwg%<(Wy(JC_hCPIB-I_tG0JGbaFX+1FXO zC}OH-bnYf3)H-GaMueI6J({;Z8k)~J0j|IoaGTB464<{Wc3^j0K~3 zQJxqRCWndRMsh$FS*%)*V#UqfC41=l5`K=uyb~O`U)gGD#@Z}Y!h5~(kVBmMLhdsw zF(#f8e447boR!w0i$7lO!$551n`)+%QWdOWF!3fi$^C!&TWS_#dr7)t;uH~UJQ3kat!qM^fWyxqW{cSkrV zAB|vj#q&gfh_aSF&d5q=se^GZL;5UDmD7}xgC#D;`b>D@dlp^_Mvn+_xxpK-q=qI7TWM0^!%f6~ivp6-$zQ{2qv z7KOo2L}d>OtH(HZmxVresxAaRx4owlrC4G5PvUkq89jMwOSPq3D0kCLua41DK1u=? zXKM-nE;#_?hOooHWa)x-ERAC5KUKRY@&>&AI0?PbdS}F|27xbTQ!_N%JHNu z!XTsC8C37eo(%2F*(4y_ z|ILFFynl{&vWW{2)ueQgatj>A0I^TDQlNwu$j>_{;ockfPZ<===-$-sCgRNtBWJ7lu0RJs)_?vr`-05-CTEQT{_9xl^Hu2}{(=Br! zxTFyX_YHwST6+e=J|du`A}`R0%WYf-3Ff=VVjjCj8})oAAfzr9}>mTf%?9 zbAp2$6|Ct9b}kimhy_&o9GN#5p2-YaIAGV{mB?v%sD|%^dfnVpuzhD?2FjshB*NsR z!`S5;$QN_m4E}u(!j6Yd>=%e48AM5pCB><|qJs!x3r@kQHNszF1I4tn{9q;NeDUOCfEUXk*x)M0(mAI z-c+35jZSE#3pAlh=ARJ*KCD5SiMdtccn?y>*e4)lkHfF+QO)p8m@AgGQ^?Jb#`^^~ zt6hD}#S0{Y1IM(9sS@KL#JB%930nVBqXT~0QozZN!ye>FcY7XW%VlBpOQHTQQ{AB! zlsKlX8+T|#YmErQWK-JT6-{1icn}nQhG#bs=;~SN6J}Drocs1zS6GPF?S7^GGWTJt zVCZp6fbSB;?ZtPgnIgajrT`$m;07NniSBP6F}<5;DCm2m`*s*PDI_k+Ob)lc(81S-ksYT!kM(&+h#Y zqEqp-ZkQ&lpz1nwuP* zrzYW9At%n!tM%HS+U0Eu4oSCw4TvsWu)}9#6C^i-PTQ7soiozAPOs*~aFghImX+j@ zOHe+uFh)!sYfynU9-)PoW43gyBNMSQGW3$6dDmB!4;EF`8=G74Qtq0uAMtm+bi_3K zkByR>uAV2A>?-!PC_ZsXAtTCCuIOWnulS*D^=gYQ8G%o*z-}3)+?Nmgz0tT$;RNbP zycc`IT1jd!{UvAA6RvX4zEUuFVyM<>OFqM%apSo)N%iZ3wd#m;7WI@&*zPKyFIvIM z#}U#A38EwXEMketAOHH=`L}2aIhKN-ELVl=z;v_@`~h2x0UUhY0GrV?PkV5^n#``p zGmZdM94(MZ;`iboBN_XC2vy;^<2Cnzyg*8L{N&Z zhJO#|mvKUV=3@Zooto)|Z@f|b!VjTwGH{h%@B{yfT=PTd68>#SE#C(Og@?4_=;I(% z?k{)S{}bB$U*Iv3AH)B($#2S)nY_rE=>)+JP6#zCx#ZC64uu&&Bn@}r;L|(Ym`a}_ z2E-_y@@y`_w6<=sfK(d#LZX6D5kPN~yKeVqmrvp$<04HV{2gE;3_Nu*INt9qWyYYwB6@Q(QWf9*lz-b)I6!uUR*wO5tmtLtr#g;P&!pu!WhA=*diP7-Ykf12 z^d3Bug*Y3vd`VSzeC;{xs{~k`pfYtSkQDJ<2grWgFY6zUK*mHak=UbBoLlJQC8#vZKaIjj-8dkQ(&I_HL48s((9ei+}^)=_nX4O6mH{y zUu=LrskGvKPjFShxMLf)_1jMHp{?*_fVez+!wkGf;5cywzXj_8i7oyD16BSbz zI8KRrJ7Hfzi+BsH#V!wlGWgpK>1zpxtH0)HZk)v;($2Dp%WCYJZts-)P(5=Jy)>;1 zxekA1#>_$C9NCjf@9ZeHg9l$094`y1hfG%i5Y)+j+g@^i)5eK^tpVJ`11zwvFXS(2 z#Enh@|7)}mAB-;J-0`!*@W+Gvw}XOK__8qAdPe_dU;KA0+SKN0#D;)cw}R~8diKBJXZgMOObBwSiM{`i-=KfThxZ@#6#S2o|K8&~ z*cZ_A!~tSq%PyWRr=$V&vhDl=V^G4I|)gj_cvreSG*Op2VMhODilvj5Dp?i7p? zl(2}8JowM_?1n0yFvv)>)W@{!cvSB*{Sb?S$}6nfT&D~LW9+t@S1w}@t&}ggu4L(5 z0^vP>&nW3H=R<$(CuoKHm7hccGiG2>2By|2@J1T#o!qiuCx=d$^C#_XN_^L0$)2Mb zMc9$!1^c06pCj;(p~CL*FA6gsTm#oHu2&|mRS~hsx`p+~HzME7u|^!?axoZKP2sy7 zLxLk~6)tj6-LWA8sTEbs)g3E(KZG{nM$k?9sfgd(<&3OGtM^#}GPROF{TgWxJFiwt z&Z$J0^EvBMdtJ&nPp$S-^Cm$XvT<%%hn>Xj;S&V1xDog!o~2x>K!)eZuFFrw>QrYP zIXPvcdoHL3!h1Gmx&Uo;hYce*aLR@~ovIgb@J{f4s-kCE<@wLtmtQxnNe)$e?!UD3jro}f_L$0{}uaLoFPm-Kvr z0lE1eFpa|+lQM=Gs^h(?M?t$1@9e*h%oH*)5J>#Q&>jee0oT|L{$|5-3>h$(P;#)9 zLwLR%WC6thGPzcPL*i}r1~(<-@qkwTwc}wg{9j|wvmeQF8?fh-DC=&}xzGB+(oT&} zhC?E2v&83rfxj*m+WlXzuKqvt;xB-o|EQ|^Pi+Qxu}#2K@xNaJ&K9ohoTO>Cef#J`btCZ0U&!=d#S)}Gj5)&zxD23`xeuT8a^ z`WYeY#xCw0$Si%(LlnCJV*#zhP4J4pc!2eK3LK01DDF%C!-$jQU4obJrlj=y^g~HK zn#U^xIE&mwLKpLtno=O{AzWEzA&9TOCrWT6R`&w8P?>peB zR{y`r`d_P@e_#ED%}EO+e+9p#F(^u1R^*FQSt;=CJOg3ca`uMar^&$-oYa8N)_@wG z9oxHh!Sg7j6l-*2yeTL^ykL%UJ3={tWZaf^zV*p)^PmFe0p~d~2Lb6L1fq*k8>+Zr zTikOr9DC+?;fAqb1cyi*VesdY{bGVp@$lx1xrt^E4TB9kH7+_ypk3$&6At38R%gBpx~iW|2jw@Km+`Eh5#9=bpi7;1;fJ_wqt!KD1x?e6<0Axf_-nVlv9H|IPT;nafN z6pVFmX$powE;yLeOH;|s_+>Z~4G#cy`!j*HxU!RnfjGdha*C|N3+KF>{Y2NF2HFhR z+PmL9ENda6Q{D$FR)~@XBI1Y8miceBTHRl$as@{+qBjX1Aw@_qEP_t}!O-xTwyuyZ z5S~Hq9!wO#8`-p5SZPMu!G_n(DvCX*{H;=-qw0@bITS@~F|l3`sMc3k~p z+xe`$Xv?twKcdq3XMp8jfKh+#cKRW-2mO6ANbrRZ?vvg3VQcGu`_Rs_1<-K!Pk9*l zlr`~`llTD|_|6m{z~D>{y4RWzo+bgZBs1hdL+b^UVuU3abe6mT=Yw4dF}6xxG~lV@ z{bDTzKBV$x5CIH6-pl(}eTkrfKqz_*wudOWO*Fk6G z2bxs!1mT+!XjQ@Ns}eW0OlWCYGhg2OuN?yZ#=iTXgaVKK*FA%8dI5c-dG3VJGkAHP z1(=~p7+q|Q}?<9y^SdmZuf)|b==4?*EcdYa*v~?xTJ$K z!+#6!2A~cSGttdXJY`UWgri5ArxT5?JsjjD(HxQ6$bK-91roL(q&wsWPX5{GO7I$h z{wbHQNF6?vp0Bz! zgN-I%(S=~u|6!#Tc(c8f6G(!Ua0`W}?8S|=hb+&rPf{8Kkel#<)4M$_Ih#j*YYU1! z$;e+m`ubf^`&4f*U8gdDEVon_2~+iF9Jrm`tB9uR*;T$#**0(k_?1Bh1_IFs$e9Yl zOue0S?ow;P zcoNP5SmdA9D3#7ctJ_!tk1p4@yRM2Lg z!B0vH#Lye3F!|gpei_e_wT$F?jPPP(neZ&Qs`2UAnCmQZChlZMr(*9jFb-k}o;?z) zj>O9>tiDa@2xbMG`AnFJHC^+)>xsGb<;-j1FUYZ^nQvVy2|t9IzJcnqg(nMX&){J^ z1IUFvP2mDG2z3jqbFtOvcwCut_3F$mlR^ES0qUSWx3YTLDzYX>v3%@vUCn#XK&Q6r zb~mD}e)dWKA-=S@tN5;8kTbRD1}C1h0&cg#@=cBy)pHA6v78eRhh@gGS9%4;ijPQJ zRpm|hTUM}f8MQ}pg3Z(Q5PJf*O)nGt?@mQMpwEYbOoOMJqMv7sw}5;~$mcf@0d*4Ugt}WtJz@B?0n@U8!T-9qyG2 zRI_6bKU7{Z8)ZdmUgg(1^A`9($yhtxV9wj!yTSZixUm<6zRP9S3q%a!?U;Je-zY>S z2)41eBg`NbgdI(`rR#a@jXWKWh|^yy3iufA$Mw$`%sUeNjY|9WsP2Ot-?Q2q}yNPTz{P-4w1O!kEtB zVf4>ow;LbDn&n%~@Rb}rv?jCU{V#2*QVPc$IduGe$ix-Vt20Q3rBOGVD`E7sUUz}u zMB#~6jyHq|Cs{nIvpT_a+W+j^k@AyfHh!p49h4SzPIXWq&A$8c_~uTWzhFfsjl$A# zj=mx{$Db7^%js)g70lO8pZ;n)$<%xn6H_JMEO)dgOiadCd-tD>W0!5fKq0APN#R5S3nYy$C1>5fPCN(gcABp(uzby(no{q{@j%Jd))2 zzs~o~d^7(yGizr4JL{iWYg`K#C`nFv&wii%>}NlFnlV4v={5S1KNaIo+dz7j zn6j1-5K+%5tAg1+ zXtb-(FDS{osu9YHUWv3}_2oFS%W95)krW{MldFh4*<`?2f;~n|{y;p0%|-rg63Sy@M! zps7vy0+B=EEO9VAHIQ2nagjLgQJrrBH{-vH_DS9=6abc*k>&BL+CCg(vPx5AxmPjg z`u{qE-=}T=JNp0DiQ(V<32VNM{2TLh$ZiT9)QME@k~f;H`>vO5gBq|0rcC$+nwlm% z2v^BAjpGsd1W^cS*bab_POVnDy9L|4+fA=?m~L^DEQS|#7zk5vt)(-1yQyB@4v&;^7=GewC3{<)s7JQ-UpQ>H6GDtQI}7b9%Vo zb@K;q72#7M2qO zz~LzJI&T-h0NmPd0-Is3Ek6rnk!UPFp4Ht=+C=ku2QRV6X-nE>)fHecHL+rT?VKmJ z^Gb8W_Fx74#L25gFxs}o^{i~(56y2vaj#J;-mo!rl|fwKL*K`xu5T5^qXC~Gj>r)7 zA%0ezkrbUQ=Bk;NdT(Y#jqv0L(xtwTOCdUak;QZ2gPsA|UBzKnO9L9RvNIOn4bhd; z|0{(2Cb)=z;$8sz?Ovic8CG4(+iyF0lnxwGC_;X0>wanq&jN0NAoyaiJV}BiRUfql zXpNrrm8KrY+f%<OrEUto~~y9UcqFS z1?>Z}n@Z^OYA%I~hj89=X8t|7rUrJmc8#tqICIXb@8%?P4ds&CS=C#NGqn7fRW5|f zvf?LC8(!PWTL)67e|`UAtEpFz_2g90VlTaa4D_2_<%$y94po|{ z&Y`P|o(;qL5$qCM(O&j`Z?+2aO9vXZK)3w@XA;FF(VZAH%+!<)9VcL!yT=*74%jVo z4~woexf@F2CdDqR<=MtO_jVe05|YI##Zj*s1fA$Sec_3QQo!SmCq|w@=MP|GisZve zhl;G*!&KX))tM^J1^deF^Z4t1?{v0#MQrQo{mRvoNMYhU&b5vh=P zT$toFm|3s<;OyPvA^SqrAKfFbO7rg|y=b|Qyy^l|EEh~szE;`zG8wc|EY-n<V?nlIHH}1GGwTB+J*Ft#9PdifAoOX#d6%pNR zb7<$XO`*kbv0b^vA&1gK4tH}aRbScIrPpP=&AFm}x%$KFd@J!E)8u1}a}~kts6B0m zOe`Jl<=e=n-y&;l&v>tDF|k-bx$BYMU!q(7yu?9}_Y;v`L94;?_N(x=^m0bwHNi!o zA^w~f3jWG6_Gxp&lX~23?&?b{BB6p@xNWNXh{Uie-y-%7e+?M*_=vKc;tG_7WWkC5wzH*}Jrku8`NkI^FtB zJ0)EUMBV$$E$as>R%B}4oThmCI1Zeze1;j)M})x+w5}Kgtt4U_rYuxbhXpc5X`e1^ ztv1!zWW?|+KPSjUQ6-rtn%^yE;#o(IZQaYWAFU%NPV7$iYwXNRW>~AV$tx1)wL){c z&bSC*?#=yQWFLaXFy4{t^?61c|M<1ph`QFz^T*Q`fZG+5%TBj*QJbO5$yb@^O>?7Ld$-o|RcxL%g&y|oaNnf! zI7tBPr!=uy@>akfGi+haQwHfcH)o!ncGv7mV35nZ;GM7@cRwsod-=RL3ua*?!i}0t5cKrPxxQ4jxD^kW}Ms;YgMyP^RQCtZ(EQd=4J7e!D3&> zBpqR6+ihW0GBr@tmy}Aj1gW(}uE|)St@NxxU=bZ(b78x{rtfMIp$YGM%_xV!Ljm-R`xXP6?lKY6qRDVsDelcLs*`a zdfOH9!h7zafGempOb|`++_v(?@j$&w3FMWs(nz`fIG)=*Vso~Z!fxR|ctM1g8Sg&_ zWo_iN?q$bT^0rgW-|#K~>mGn6q`_TH0wavU>|+WihONDb=9jZQa#8Tyjog z9&XXrzBPN^VMNcW^qsq7bMsJT(8nTy&)L7f82`Xr`F|jkp#Hpb9BR1@q!vI2duK`y zIET?|(N$(SP^tAVpiY8_h%&Urc-x1<9+hR!X<|A-(VXYh+Y~s zc%fXwA|;_Ec^1W<*65gK(T8)gkLOHwjqd#AQ-to)I#|joliJ=SNj^E;+FLMb#WW7P zW<*0az5@vMEk6&ud(~#bd8j{{x1$&keKk;ehx2Mid^<@Nj<+5mSTL7vIETL;gPobV zDI+qR`ndPOe#vHRP?*r|9?|5X%BRj!SmKH-yDgM79jM2M*@pHy9r8pq^^E=4>`5=RNgLlvc!uZ z2JZ+94@@kNlEzCp*Eho%Or`zwUoHGtwuzSwh;hDcu7MS;o8LX^aPFVJMhv<2N0;xn zU|xh?pc_aF>2%6u4abCkk8*~h9GRyi3tVDy{iyO$m6wDj%wq{Y0?Dp0k>zq~qRYK< zJ^sUSXep`uv`D*uZOrxIf{ZuUFuhL^ahPA)><1ocfTM0a$NMpCyPO&!+jt-zWOY!g zZ5I_7?`(*>N2yVJDC~IaSJp9x8F?Ew+q0CbWOV`uuT$@zY1m*<(Q{z4NhhYDY8}j{q0P-)C zdOs~m^R~h`&?!e0MzrCg_KB!!Hj0jC(|Bq$$#<$aiclY0`$!!`N%V2-*Qe0I za97Z+O&lfo7g(`0^^XmsnZ(6d?qTdXd6}Yoc>nZ@GD--x+4N>6Z~f~fWn+U^fhyF} za3)E4o_3~wNPDGTS?TrXd#K@Hpk3|a=V1V&dp%E?UqJK)aB-4g2A^bl+IJZ2N~_J3 z&rB?KomZ9}{ummGwH$3eR6a3=LzIS?zig~z42HEBw68`goE!*CSnP`{_BDPcG+6s5 zkN1CQ<3G6^zbyw1+Nb_)X=BXuQbcr#Yl`njM8W~msb(($EEV$#(X0lo?!*mIz)x~I zIndC;zXS(LD(CZ1Bg(JN5$3PnCOEUHz)Fi!Jsg-H5_a$1%su0m2Y$Asqvz7O+mOCi zF+S=0LcT@;+ZpvU36aEJCyO@kfIjwc0F9OI+44HH6tB0mWvczo$rAW!hmZr?U*Ao2x@?v-li5;ny~CX;yhXzouDMs6#URRH|>V z54(nOZ@_35+hpo_Tdnz}$RaCfxx1m^zT=apY@t-WoEr(xcis}@uLXz@XqM|QA~Z5& zgC56nZc~O%4-o>Hr1%3~vhd@c*$*lJ!NxnwDhtBtAtC4Ya?~sE1=KH7dj>Vn0%yLe zm3L|kFW~(eF2Gm+FE}XpbqfeEpns(86lnp(ne`{Zj}`=lK_i>c{SV!P*yrYh{(`p8F&c0ed`w>1Z_>1Z2@WO!w&2 zHki$3b?A>aL>a;#SWkQ3l;J2S1U=vMtSQTx?NzDyJf~~wz%8#?L}+ly$u;&vg0`-w z{AWSFOB9Nr3}d+%FjEcF7JOq$2&CIoVk}vlhw){Q)`2`%#>%JQoPk~vS!?5tx62+L zZ;vly1FqzL9(8+IUE=9AH&On6y6|dv*g>HOe@v+p1&9z~iN~BrmH@tpR0f_g6#6Lo z>k;z>P|6AiU*+-KBPWEGR{mcy@WrLA%NjJz;(y$UM z-fVXcVWe`!Fb5@J%~miQ)UM?~S;-^YU+c&5Lh@;ICJH%rc~jxgtJ3fX`1^Y*3f_DS zorF*lV37zD+Z1?UwXC}Yi?Vn3GbF)Z!GR|k*D z)N&hU^0G|pwxN|--N|$xP~c0DPlp2rI}B_{jCl*f4sUeaLgh!mkDWLoaAV^Sn~psH z;S*pgH_n)^+*|+0$;*HEvOm5We-|0{H^3wm=#2We;>%D86#4_u0P5JVV9MPPHB6~9 zN}p^iA)oo04OUN0fk_pxyJeQD>Nx)LQJw3e*N8bJ zp1eO-#RGUqKu$_Jtnie;qEUh-cQ=ggBr2`6m^&_O=MN!!@M%Zs#Q06vw>@6rBLyQo z>vEpyV36;HB_dr|CDSSFQ;+~=xZ=;XY5on@{;&NCY8s13bn<_e;s93Z#tA%)8I)Wo zx`d8;4h%kN-KjF?w$UE-(z7ynD7N!)iz-678m&EVx3baj&$l)c+bNGvbuxZ>EBI*# zB6f>kp}Y)Om*+SkvxOuyU#LW4!;Dy#rZ;yy6 zncH{|C2U*BOXnvK4sKHO!W?#(MOC4w>JkSv= z?mKRzs$9e1i*`1}T_-ZR#U?LG2#49?%@f4f&;9iQX<1o&Cnu|fxag=3M4z8zbOaK8 zmM6)-K?s4=JFzuOR5HpxZ!2_}1j%+z$+OMljCK{aDb&jQ@~&(seB?Ph@pe!lr{x^} zW>!bT#LJ0Pr1gt@U*`(-TI8iaksSOcWy8M#FaEVL-{`I)o)QHzM?`RJD+X<0#h<$q zpTk0vC})Ec3})aXY!c@ztjgeW`>uMy^sfILAqCaC}li)qzj_g^=%_7U%n58@eD zytSf}gh5+&tMbrvb&dnuL)Hy1w4}n5B)5?pw5h?cN-!w9P9h?*4`vK(7loiNU|~V= zX4&D%_t@k_Efe{fkV#>|a9U+!47+BnX4ZeL)hn-ALi7qqr&G z1Wpm#0fadLRzv02sLBkWE}<~2xQ27sWu+DEz!map0!MjpoR3AOe*N(21J|5MW6J`^ z5zr>)@&7j~ehmJL z(p2AbyZ;Vj@8zxQ#w{seCyt6zLBN@leD+D%V=>%zH)lNFzR#h(3q7`Pia zg8;vkV8sSfN-`q)0&EjQ)U#cnDC20Tk0#;- zwM@C@T+^uSyt6+OFmF~o72h8X@B)J=a($RSRR)QI6o8XO zv<7{SR{86Pq0&BDm9@UkSig%X=?ksICzYQvL+L6D+e*zH z+f+4XwN3Oedf0pc^jmGPYD9Y3sKxhOk63hL)=Aivt2y&DN73?iFPp@?oFK3RF|JrV zkD#9PXLGJFI_zExt&q49z&R5`2UaVPO>a&hk>knFA*eG*3Dy2Jv*GBiM$X}-ydUA3 z@ZhVP;_Y3G0$J*dJ}01G;`y>c?xf|eE7Zo$-=^i(GNd=zdV27BL9D5b+ELA zAW&*ve-tE71)v&>9vehN!hkl~hpRtB6m4A>^$K(#^A7p2smSJ|=oCaf>NQ!ovNP@8 zDqwP!-uff*@YQ>8!?W!74av=t!G5?3d}^KU>$FCur@=Vy2t!@~Ve(X%siNJ=GSFx$ zL7UK26i2`^_zIoMN<$+G$-YCO+ME1GChBhz>c51zzkAkjCGuU%v$(m9TH80D&))Oi z*;HWWe-I}si3lp!)%`q88jM9lplZ*YC$>KaCF@3@2Xy9XppvkkPEZ&sAj9uPHlr3W zunmy`GSQpq$ZpIyc8oSg{xr+K!$Ey^`$VbUPbuSHnxiZ#)8i^VWp_}Y-h!3;oN`Vd&IQZZ6e717sxNCLb9JI#A?u(;PWH~J zdXSsvN0bumMuRP*6MTn^q$&wIv8){-B^sT5yEK z%ZN2~!D|q({v}Y!KkETmmJ}vmmOkN$+QWN#I$8Bx0^|-}W3Y>=lUQ|Ic&% zw}k9J2?T%n1?XP`0imNo*gir$gMqbVfkPA;ulRz|4W||pcJs@kl(AHFmS$yw(UC&` zbbP!=>^0L&Gx!CarZA_fc0Hayl(KBPGi?o0ffhhCW+=+YdY&vi%jnkz&A9-<2m@H` z{JOc=Xe^EdoO0wb$+d43?=Csk3^PQ=UkIw~%obPyWmUnS*+eQ$fOi;+(k2m5z`49K zhG6c`LWyW6^Yre+8H9bzc~K?v?(TB4i(akk81WsL^#P(f`cR1@|o@r!oA>5e?zQ-pIBTw^|e%h7!U1bzj-gSTbiI zQ&>(ofZb%Am8AD>N4xxRMaDyXj>E9gzI$UDh&t5KB;D2M`>5sJ6QQ}-KLFS@yjCJG zI5-~+Xj9^|m4fY~KcZlsr`0-!`^z(L-KY{+tj&80x zSAiRhcw%ZyXh<@44L>m(j6?zfasd!ul<&%E6sYoYzgrwEV&Z|r!Sd(#)31@!fLV#? zLbD}_4a92jQR@6KWMp-e69@Vqw*ze4|6eW#^XQMX?RdVu8zz7W+ z1E81f|4l#|WLC6^0|P@wd0OyQb~rtcLb%B&A7s!HjdWO)jT8M^Cf<137DAxjgR5y& z-zyzjkc;3dcx5!sME3>zDYCJVD>IkXfb|~%quJcMi0kX{y@zpI4AIR`I|rvs3*il0&unjd=2>yG}svfWf9x8finsf zVPVG5&`shHhKob6rSFL&!LS|jYBh`39ceWg>4)4mj<4kqL9Wu!c!<}Cyn!2dAp3@U zDZ{}lSCIhB3UQtWM2sss;6=1yVh!e_{xAXpk`2FM5`)9dktTlpC(vYn)1CcC9uvWD zBma`Aw2(TBBJ1BO0GX#WhnFe(D+@!F2?WoQpWF65goA%-9tloc{`pB=P1f`d(V-6; zfa#z*BK`=vT);g{Yzrfv)s}{HiFG?)uh7znN=B|1<~-^v3kj-lV#(~4MC~W>*aQQ> z1nD#8EH7_y6jbi>}9mZw012WHAe z0++p}2Ljh`$$IU~X$x}6En3JIsoB3@;366F_3PZgR^dq%apGXtunU9lczgNfb??sh z93b#XRI+503{O9=Yg`IG*?g2il<^8uG|Ku#Y;8=H5J&GXp+({z3=~S^Z9d9r0ZDl- z!H|E`$eybNV-Le57IwTB3y*p;F>!8LDb2g+#<&i7*{-YDSd>f)gHP<9=#Oib@22XL zMV!Q~tcQ(EA9}t!!rU07`{pvoS?2Q^o;TR$wLo%CT-`_Jjg!Zgz6snHxrTycdl6Ak z?J@)9p&T8q(l>$6M^3EyCa|dy+%e!L+C^MGOZ1^232FiurmQxsMd5GlN6z;zgntuw zv;MFU@5lE5evf_=D3JRm(07NQ_Dx{*hetq@lHft11DWqZ}N-NLvNbEU7CQ?COhl` zX3JbnFC0t17Io+Mtkr+!A^rYJ{1J`!TO=r8HuAqE2O~(gh1DpjGzJ(w0StvvEEg57 z9w(N&HEQ7TtWMnI`}r*5MkCLnb1FuXxo1Ch;tkU-JUM?t=<6!6Eh}mRsRN8S$EKL< zZH$hjP?yGfnVRqKPG0&Im$ws4QM0FV3U+!E-bsAuNdEb#($m9g9*=}pqyJgA;2H%A z6}q0@jfRe4CUrHG!Oj=s1Mgtv>eYYb8p2mOH((SC8Wm-`x?pSaX``5N)w zA6K1zp{K7d~6dhNUtw#$tY-@Jy= zV%6#Pjucy)YwPevBg{G~Qpd8%Q{AR<`18zCrBP{c-LZkv6GF?gfFj8Uxj{N22^`;u zfkgSJp1h5GvXMRHPMz>;M{VX=!4WKMf{DSKI;=GB$PiDHX)ETxS{WDAEXCk&_XMo{ zO?)`EnZ0xSXxEgc)Z{t0M-k;Qk_y*Qvq`^4>6om3INWbB732}1uOyZYvIHWcY0Eo6 z`9mu>;4OZ+3V4k{L`oa}2~!2=heP1Dr6Z;k_%@*Ph(1I>p!F7U{3Zdkz|k?)I<||H zib!WZnd|(W^aU~T`U(Zt|6(z+2;zVA@`}3KJb`|RWup#)p)T6L>jvLNtf2hPwXqlT zE0*85(qDEJRnGU}Pkzbk9lsh}z3`PkxEfta!4&@0Y|zKGPCZ3oUJr|vfWEI1Hf179V1Hnc<%LI?xB7qBVyN~RS#qU-4LE+dDdEi zPHHEi3RoYD5(yENbNhFHoSbYAQ4Aj^Gwz{22mGCB2675RpVFShaZ30VCScLEc{FQs z&3S%obZ4X>Cz4-W3kXxiLA^x`^}tEILVGX^oi;&evdP_jJ^$Pn*$``AM()~ozYd?J z+})iOEPtPbJG#I<^k{o-b)(#qHx5qXuYkttfE)& zozsesUO#wmB;oNmJ;P#G zT|e7U;u*r|up-IbYTHHz;njC2E_Q6--M>R8Fm0w?JZS1(VusDZoFc`}lP1|hkQ zn7QozZU@Ki{*q*|vg?RSiiGVwUa7^-!kx92*1N{B{n+MT^HnWV zl-EM5w*RDE>`#L~_-Rc}*~}_7AvD$b@W?`7owcmTU~p|D_IPvZ*_xry7Ml@w%^$ly z%6#>$e_ES|i(mz3FmDb))&fx~QB*8+1O@Nv=ShinH=%*lBE2n79Ry-39S+MiJO;mG z1Izb>Fk^SqT6WY;BE!?G-FSD`mo^ZOPXw&m)Pz6yS_5xedfvEpWX>?P;u-o$=zo_z z{zDG^5wZ7=kwb6(HOV0cmZ$hs=Li4@7=E*-@cL$TC`ziKVv;Xs6-}&T+h?{t4j&rOn`}~t(1Y>xewr07NUA|Q$SG2l;Bxbu_QF(HuY2}nQ-Xd5tf3u#jX}8cO@$&SeI{VWFkO4{UmuW z+iy=7bAI5(j9*En;a&%J817eBWmFsi)30W((c!IPS?`(jvc^}eNa!GI|Ab5PWy0g0 z@w5$j{`QhqBP4B1mAt};hE z@W@`g?XzzJvSC>10A z#iy#FuJ@I4DeZiFRCM=yz5NSj`oeZ~eqxuMZ@f_Hh)@ns>R^^o_rJK{z#hAMObz+8 zRWZm=@R#-q*Mw3z=AN@##V-3shumV>#i^t0%3UKZU4HV`K|OBW1kMaYF951Y;&>00 z^G$$XMWGjCtw;b>LB>T zfV5s$0lvS5r*bCQPrmNU~~YHhejGp-_DWl&mj8-KVMb9&Y+;qtp?@6>6ii%)E(V?xX0fc=W{Se$v*B= z;!(jw>cGnn_Tue&pEOtpsowtml8ZjJ$;G`UU3U3qwU3*tr?<}Z?@RN&bGm6iAzn6~ zZJ2aWO3q;A!krDGT)+LR@w*~UL5)Um9$8A`{=wMjlwxyjM!@a26jWkpzDA%MGV)M4 z7e83|iLxLB&a8xANujey8|Nv|Ng}LDT5Mz&fgNEZ&!5#V>k5|YhN#UJfxZQ^r3_6+ zo^b?IJAOhX;p58ETAl`kj;(7kbb*{Q9*)C??(EUjI%@?Nws{iID?2~kgH3h}QfNEl z>b2RWJEh~p_%v3-VM^02bbm^uqS7{DL~d;1v$YCK&dw!zkRN0&FwJ8ySd zh>;04ezo)V+1qb})~3oIR4h`>5EC1bm!#TvIi;rss!0|e&BXT?N8;9I24^$(!Cy!2 zSLrlgoRIbzz}t6`8~k_*AgHZ3=4VlSNkAx2O9Lb)P8;6cy?DKHHcA0X%_61c6Nz-b zwmsV^i(YOv-@YnSP>zpVdPdx2A~uJpLJ!NCR>AAM%<06WDe;l}*|uV1_&ly{4J3A< z-aCnToKq=u_H$+G#dfCa7-jZG-V?PptOKi_K<+`-*P) zE=k{EJUShMFWWiCW;+RqQonA&52|(^nmE{WXT;U(p<2ow#(sm(>^n_qo8w2SZohOq zPTJ?vT%$u}JYKl$o5Lu6@0wEe)k{JehujC~r4I`^XiqqELP;F3Aa8^rZC5@P-HRH< z)=$+0KD1(zgyu3=ojZaz!aF*YRjT0#db#K`w~o1%VU@nog>%>u3>`(99JYi=` zh?Gz`5FDC&mTS#cj%N7Y)(Dqj$0@|_)md$oLd8*8Is_m5^MROZ`;J%SQT0<7Za00t zNXXIbT^8de7RIOSb0MtTKQX)mt6rW)bzV(;m3D-hfiIA?crnz;gc?Ij=*6t6H$}G? zz};r9VR+8Qvfe;+Z_nQGNa0h}pK;x?q+e3SE-S3j`E>?EpCG}i<{&KIFbH21g}``< zuiT?v;FL`XyS%|Loukwx7D~y3Aj764eV*A+6Am>^cC>D1JHDb+x=jV_P2EN~$=*SA zYlvx%P$Zsahf^`AnWn|LStXAncbpj6_~vk7Mu6XKjrxO$4sECRKJif2(eEuDPA|1z zpXm^h=4rdv1%tyL$y`8N9UD6r?;B7T65r`GqA*FXre+H4<7Qe>Yp=3ZYhbh zPLx#(w|fl~kae3a@g6iib}**x^tq7;j?b+NBeIh59d<6qkM^+%R$W_4Pga^;C{WZ3 zu5h@eHh4=_O=mhKU&0Hh;TOK%0|yVDDEb_B;wY=KlPrx4QKT?F{qSz>XZ^|npsIud z<;R#WSMrjJ;fKs=hhd%`BPUga>e=wR*^#}!zwMWEQG4JlFho5oZ$~v0SI9KhHifS; zB=6(C9JnGBn+`3y$th{=WS+OSyz?+HDHL+GR<$dQZ|fJ1dBCpfowv-;i|@V2bK0G{ z=d4bheanUlvblzZ|ulZBC^mlTjK* z>Jh=^@Y!WEb`g+BV@~TXDTYCu2q4~Y-E(17+P;XNddFMZ&9!dNNXO|52Q9ZYxfn$m ziXO2m#tLJ%MWneWX0FZCsz4{48_JcojF~BB99&MxcDyL&6#4UMA6vsVXBW$e%LiJH zACQ$y?x2^RzW5=f!+VCQ%G%HKJ>MV~+cli$!EV%)zg3vkv{Jpl+zQMcG{SUT!K6fi zZmzaHJWs~_^A#n*PK>hANv1LCM9j4MBH9C3gj(AkZ%#;wR_ebcqYf!be>W_`$WSU^Yu$YI~>o*5P3Zd4HfNK9L&!PBT=Wx<}^tiqn&rF8dEi* z(Q)Z@@8dfm+k~;C{g(qh2a9D3aGnZp$ZywS=0NJK+YwsuxeUBe9i^^g6t z$MTQINwsecXcg8Vz1wt zZnl!#({oPn(SPwPf=n_X^%ek&AR7LA_n{^l^ceF8i4xG@1IKda2H;y82kIV!v+;@QY(zD#rF!tu&^jK^vv*Sul1zm@~4q+HwI(RpOHF`)Bo z?lw1}0rJ|k91U7_fOg<_5+!G@71w>if!#b zdO!ZZybd%{{d?9BATAZ^hvwx1#Qfnu=;Ho@ z*iwkC%9PoGpxmGn>#osiLVKQV7DT~kFwzH5kKUGy8J!Uv5+QB;C1lmgGtgcW_ktU| zR(doy$5S_|#5^(4z+~+^mEcOfDAN4Y-qMO!eU)aTPLWwat{L}Dpyvm$SJd2tRlW&4 zP&TNHXHveZ67~VJd&&VY*z9Uo_b5c;B@(~}_}EGi^kjZmk8T1@wsC;ZUI+7I#}m&6 zk1cWh&z{|xsY36*@nUD5g~~6sJ3rw#G-*2zB2&%sMM${J&de7?9IMtnaP|xOCZ6yM zJ#CZ4&Mb*vXLsGVICai-*PEThyS5AY4Af?L6cGmKTu9eHPx|ptm;zs$<-E|(Dn&QkJ~WD;f^POx5-V&Vi%X z8O>c8m@>P3K-E*YWPr1~T7)U9FHsMC$1tc|DlgrGLDO1kcP z_!ziy!bts42BXjKq%G=BdqzODSO5IbzFRv~Eo1uI`e8$i#{lshYH|<}UeA^088>CQ z!!NW+Vdd^%pLZmk90rp)!60H%qES)x= z(5&K5h{9+?N~4L7h-0bulOT%7=}RI%7dX|+=Njz}PGX8pPQ1FMW%l-4+7md|5gE#wMI!~s%97lv zyxmF^wGsBRMG-E+`GqjWE6ZhQXXdMetx825C}H@U05%)HcxK@IRL@7%f(XrzlCZB+ z4qGzvIXdV4leZ>oPZlQJEwxR8(FYkG@q$7&ifMT|n097W`lFJG=uXGdlvqVw zAGZ?LU8O4^1Ah6)$)(BFZ`J$KL){)$+%v}BWC@1B zR}&e@PK(i!D)Kw$L>`twTo2n~*&mWt_$pLH462M@v6Y5No$}=?b(!(^VVvpU!{Iv5 z@RLJDHv=_8TQJ8P@{l=>tJB8v(-C=xlN$-mg9`C4s$3SLd{n)muVEr9Y{Hv>!h;N! zs>6+kU(d|z%i3&dJv1C}*Z%G;-+F9Zc{X#ei$@dMqw=<6zWI6)afj8u;|>M#BDe0p zJyX)yooVS&ZW83H@EnX|rN8s^TH<8#$p9C#z@9}=i}q}2fxm_@`N5375|`O!I77>1 z4{XRO88X`C&(8_xENXe4Yi%r=th+Tm_hQN7dY+5T>U(BcrQG#Z^MVL^>yTqkVTp%&(O+-P>>*Sp4w>F6xmS~zrkS|ko9L6Bx zK09WP6!)MYo|!* zEpF4c!}Zx?M`}rOc*i=yfMjXPjm1D^Gqq%_D|T(BX~y*JE5l&M=hl~0(F8jO)=}ZK z0x_j??Kg_Nnu}~@ua(D?i}{{o>`E%Pm5nKg%QY%-c?phvppbl4E|6e!VJs;kYC}ne zrBq>JZhN0J~X`5e8f%;xpM0fv!+?XKAt+%>38t1W`y^)_;!bm(NvKq%@T5qVM~V( z297$44pnd38L~^4^qcXm*#0M7t`)stoms%Q>#uaj9?Vu zUT0ySe^mC6&43HOPt_9b8WXUJVIR;hTP(JlN074k*9x%ar3m$e3FFG*v=i_t<{Q|^=EV$O4-CJ z+UV#q#k{ts$yYW|?J5)4+SGtyf5!YJtH~3w7CpKp%T^QiC&@Z3@0V9*ydjF&E8TX6 z8dKf-!nmKvYpPS{phJjb)n~kDhe2on^V}nQhiYlnE}e2~`-k~&D8C>ye=43IQ3$rt?Uw1M{%VO1)rW>PUOmWm=56)^4^!B-HNO00 zfZHQ={YYe2*q5ofec#G{>>lL@tQG;H7#cb=efJZHX~&2GN*<_pV@T)3$GhEcRjdnM+XR_f3ip;ZKxa{Unva^fME=3MI)%0e|XM9JAa z2I!r2KA|=3A|7hZR?aRf%$-m_1Mhsx9Hq4`x7Em;6H}xm3{%E#Ir;htM_TbK@VT}L zqXL~a2lqh7<{Q1xu9^NKgDm+HNy1pzwOavf5BIwTY12hnJtF%u_e<~1>jqc~WCu6= zP_Vk}0#Rk*L6jc(ns2KLtit>e{tUX-w_G!ki1&!*$~9{c!l9X}A+uU{?qPhQH5lgg zSv_BMoH0GVqfbXm_NB?=N5s-QUD|q~o+#YPa1vu-dE07J@nSLGwe5qb>|(Bw`F^Sq z=5c%j=X1;5r+N`lqL{W4lVQI`N3rt$iHni0C;RuGpP#(QJ;V+2OE1IJOcw?RFGkQF zhtBawxdyx)w%0H^#Bq==2FeAmm=3r* zdH#US)eXo_sMbfHmHm?EJb_+>OZ5r1WVXx|Xnx%?Z}LE&m$c(*dwcVsEOyI`+N&** zlXQX4=fEKEd=ahM3m+?X zy-qb3+GR0XK_{!1o~($7G9$LB6X^+2QsAmyn58seyacVSs&~Q2f2C?iM=&51gT#sr$On zAbM1Y5iX4PO@3GnN68nZ6uIlz6~{!}3b`XBawc%F-Tv;p@8AR2+eg)@@b+lA4SuXE z!_skYNXKcDc-gdW2eyh4dxv0BaVBfo!%Rvpn)>=nsjtgR<*Rvqx2m_WM{iW_8Gg0R zQgkl#kZ&}n{Drf_qbPfQo&t*Niaf_CnrtWh2+j$7wVmKy;_e{qg&&@kUuq}H%>rf+ z#o2WydyZxECdXq?&#({8l+KEq_%zqsc7>MAQxHlcXcpQUZHAv>hnB(ueH@yV2+YjQ z70zidTc2iKjc}rx+UE)nW>_Ay)GHmKmi*FgG*Hv7g4O9|WR%oKW=d>T(>>Sd*ehwS zQJhqC+q+6#JS|nvvMeVS-y>3fr6kp0;ADr!IwhBqh^YPe_wqM0cjHSk&fGE?07v{? z2+`DpS_tEwoX0fuFD7&C@H8;XptkZsD=0d4Xe;FL@Um&{Cxl3d{sJ94F=d;N{XP|ynPDiCl`lO zxXnB}a|*`~%JhuSs%byHuQqEpN;1WPVd0dYbu=>~$X(t6I%wpvhvjK7c2~yPNN6UW zC!K=`Yjcgjsd*6ndV`~BTR(=O*Ws7j^qaC0S~n7cq2bty0%m2bm&VZI4|PL|v*DLq zC*jgIlG{F`+>N2w#j#jT!z2eF`*ydY<-_Mt*spvqN^jO=@|EN@8`J_ zj=NB}qf#;DdtOzbnQr311Qcqu4d2cRlmZ_GBRv@(x7ALf^_H?D`Z<hQuT#g8aE(XljP@3qcYL(OZ&jE5XM1F@azK=0623)Rva3w1tz$RzojbMJ9#sOuL+)OoHn0ajj@c#Xye( z>atEah16JLnSFWEOEuNger~yVyW*9VBWG_GEM6GXIJ?$9O)V=~CH2{0hu!miCwdDL z!?qb6pLM)UbxtYv@z>a`ARYf@-&nwt?1M?0x(28=g~4T~{e8*}q={!is3&0|P*Vl; zM%xc7m6PiTCh%YwTmKokw!foN0{^tlM64NF-yf?LB@ScSMx!v_b)C-|O!lL?R*W&dwv7QCLY{$3)7frvDy6MAWnLtQ-`l>eOS(E9Rp%x zQCelLZ?n#|@{PVr6MVZZMbv=~Gx(*gZb%8~=}Iszfu80lPcXgx;nuu`UDtcpvyQgw z52_C3D>f+IWl0Cze$sPSy{Golxq%C55;tP121*_bhgrsdP+4>aeOWK@B~(re8@&j2 zi**J3hTmNZIZwi5!nSA!rqE|Kpp_!6;Y4w1qj)F1h+1ic$r4x<^;luD9sH-rEFS^`qi( zoInSv&&;WO=kfO;9d<t8cJpRT-wRcq|nE;rM#j>rzH7Li1ZS4gE@&p zzeka2RSH0q*s|Anvc^sR7y|~Qi{qXrYr}dgd7B!MI52TlBkG?fI%Uk5D5?l z6%m;wD4A3gh@7Z|BRM&Kuidx1>vnf__wC#Lt=ntWkFpl71%`9p_Zjxy&wl#qwmnfU z%?j>lT975;t@mz%Kkpq}wEyUw!tHw&zNy{I47EJ1KeJ(Pz|8Q;-Vv)T*%W@jcX{6l zJF04aHl2eUKa;bmP-nx!+<4*EdaoPuOOa&P4PRn^zP+t>ZE^lqC>xX*JJJx_0toi; zv{ilIx<_+;hKAFn`T@CohqXGSO}Ys6qBy4c@nA(s=B(7p8K?_5aX)$-yqe&-xs=aU zUANw3vmxW-A`1^&<=j}7+t%%z;SE=q(4M8`RFQEZ=ZDH;mQe7a_R;GJqb2Yi>XzCE znj=152^|kl+vYF5P`pV;c1J&Ud&oh@qSn)U?G*A8>DKnLjko7-pY+aS(hVeGz+m11 zt47^-;*tJBg6OmVt(2b?+jf3_hO=zgFXNrBIM7*f;ME<~z!%Ni@2(BLZ_8yff^^+7 zJKw%ZPE{0g!$hOUPT$!6Hx+7lISHM-Q1%D~{K!~9rN%@L7<;^N=&Phk10#@@f0_{=^EYXn|2^ccEur>+FwaqshpJcmNcRK3s;=rtVv0(v zW3NnY7M{3;x*?Y()^q|*{u-`f7T%;9vKW$U1r4`zW68_Pd+6u4mXx{t_&99rB9G-y zw1%L2BZHp+67oc#L!J#=tNh`k2|!IL$vqLI9E12&W0Sj_(u_h}9GVmPZnv4-S`WiK zZkP*}Kd%e3?Q-(>RvZ{!);*v5R%E;OyQ_q&|J8^7whQ#vhUfkVlNjH2DxU)sQ!`ze z0l^(qxP|)ZAh_Aj>2M5G1Mc|k%YdP>#*B!_2C6jZEAFMW8p4dkqlpkeV;WOg+|gp7 zN2iOZ{$FuF4c$=_^PzV4G8MaRfT$Xx+H>r?sb)Ye3J`wFh|x9-DASwVv7#tdYc1M(bA^=L!KSKRx)yK1~5JZg)2-Ddn#L1~$=0jW@1vpWUjZBN{}fm~59LXRQG z)1Tysq-Y3ST*tQFZaU7xu`o}!=EONa?PXU9pGe?lt)NWLje#V0T?p}+l2u|0DoBpN zbus%?DW!9CkBtRV5?+NZx{J<_e8`vhrel-AGzIT(-ts?oR(~NY{kyGL69Ju4tTZJX zf%xE$2&AR=Cw{ZLKSji`r>wqQdO5ePa@q@)PuP(UwQg{YUraXDuHt-^-3lt8KqL)< z1a2;ipb=nmOD5PV)9M%}#Dv`s=6_A4Od?=w3XBCma zqiVkjR(0K_v)q)_xRQas^II2Gh3`L*TfUNcC8i}&=jS; z^wDi4%jLaV>bl=wliy~x{#{4>YsY^i->PM~t>a?*qM|uZs!aFV>TI7Jqq}tKDWmFV zV0~~DRq|77*&wJ6@g}GqY}Yeo7Hf`1z(1y*_v+I6IEQd_uMD^ZU{A~R7ngRNez{WC zuw&1e0uQZ_DJNI(a8S|Szel?Mb0I|w1%b5s0}!^JU#m%ncGIHse>9^fiixZ!WvIs* z=!z6^f|cZ{=hsE|HSS=$n68qCtv^?hXd46~(E*&zw`2g(T1DiMG7b_!s(h|MB zKe{jBjQ7*MDBQUBMKtj!R6U2v@DqS-HG{5jwyNnqLSc_{Q+jzJDS?47f_%cv*4-@y zGw$ZKz~^XRyLlZ4jz*>B(k)*SOW+49}Ym;7kmCO7uwp~^NbZxKdwio6Q~s{fs*OQ4`{3Lu`}R{F^~Dz<@c#@Z$$nGP5ob9)1n0Bf(noqY#u&m9vi_}Du0 zd%}tSX0MmVmDV>Dv+fv%}wv5I3g+8?n?Uxv(xv-HZnkX zH4i)JF>QS7qtv-`(&w%q-nj4&o}B-2k@gqgp!G$p4#%qL@Fp%Y%RP}oL|rsIiUmqW z9?Pi-B8niNlxm96`C6M9O0RcHCbte) zxST$6zo*uQ)WH)Qhpru{t(@7(KwtU!RNg=3e@t&djK$d%l<5CblPNtgBB+-vBfjD$ z6oUU0UvbXhI8IQ5DK#+|6CKSHz;__&e9(c@G1CN1Jyb-MPXCI#1*y7FfzOk#xB@Yp zqq#Cn`f_?Sqja=cvpSLxGKc{Rf$`&^FNZYQVB>7cR~+aPZj1Xz&~^yW#$5v5S6qJ( z_%6Y1Kq5mg8Tpz(?24LE%I@syAeO}ady?S4=NRC@zPkU5oe=O5?F$EWNICl%0nB`8 zlA$pFCDobYdXTEn2sH(%*c`v8NJK}MDksq=^7T#S(MBJJFH@5O$bLR{H^Uec8YiDz zqf>qh|G5p+*6r-pvskMbb|-YtA061Nj!44B2Ol3A?2+B)H#?PdR~ihMTj68&`#*m5 zpwhSz9@_k(Ki*DYG>h3{_XDB#e-AJGqJQ|~5BrLnA{J#~a%?4*;%xy#WKtFnHDBI zT3XtONMe;aIynJ!HLF2ka~pfbDHA6_p=sTXQbHWI0G-xaX%6U(V-f@B6$np4H)O47 zE#c~gm|Ha@Zn`EFB)T-mNY_)P6s3L2ox=kRLg?t5rZ=b3%fco_WGw2g($ciC#vvhu zYUqSu9L2EbCGRy+51YhtK|*xojWe2$5KfS%Wh1`JGctGYy_oAcYeVakjpxi?zP>v% z+BM+!ZR2mwqJMA5?w^3)s9&G|M|McKtAxHF%jMp-#s_a=#5pa|uA0 zdhE6=t97zhb3!ofOtlc~l(q}GU^$&vgm}nFHeu&)8B8h;R#pD6IfaZPp7_ReCny5a zGe;C@>WN{QKW*!rM|M!tSk7%WTdbmr*Pm;UCe(EYx-ZS@|M*NT8EBZi^kVqo$oE(5 zl4nN#K=^F-H&rkH#A^ZJNBa|S`W2{s+|>Y|sa_@s8xr|CtUt^(Vrom8llE8Iyt*`Y zzrd9B;~B6J`#Y1fGok@<;U53y%CzZQ!X|&5JF})=3yY+}L1}|r`+Cq}U}fqR{hbDf zq)@lHM4XK7Vnoddh`Ptxh;Vh9YnsQs*ohyv&=E7~q}UI1dM$T6(~E@8zD8SS92`w; zumArC-7H{v$-?!k11yxpQ~WKrs8ec=MjA0bowYso0<65vwRDFJazw^9BtId3) za_XMePDqsPUYv}>oCK0IF?<*Tv?md9?oN>JGR z;Tinqdhu_FM1Rb|iQOL%l%i-2&oRKSBk2Tk||_1`CRZ z6w76TZn1oRUc_Zrst?#*qVd{_1ag^gnY0UKRxSv2XIrI1@GQ0MrO&Sn-yPe#`4(Pn zJjP<&R?F>U3Jh1`&#bf@YxHR9r=b=4|ME&g3k4`}^#D}5n;9ngiW?ND@yxJo-+|!4 z%=(H`B_p|e!zqoo$OOsicGeWQZIj^S5nx&wJ{{XxNjOT6ZuaYf;;Wvlo`HJeaqG`4v|$ z+W01UXNZli#ktyD+Lj;i*NU{*33E zd%eI{=V$8PW?*iOnh?OY$>6UzP+PwcOYExzc;-kwTdRw29;mdJnB6$Ta_Zz;%L;u~ zbqoi=eJA37t_H(nRL`9Ty03L5r~E)BZ@|;tA$WX5G7Qf1vNrOR5<=a zIOd2>Z5#Zo0PVKdl~p$uGlkNPl~zTq4v&t_w+79k>O0YzAV- z1?CO%RE7`MfpFoVjl|-#`dAvhnTR-DR4?!$VMU>dS@zg zRbyTv=V*zTO?cGjSAm;a$f>1%ZhL2j7HG+1|FDR zAxK|xV43dBtZCR-f(T|D@2hP--5TOtT7I!WY{s4{2Exq&D`v+$%RwhP;#Oh-*4L{H zw$|(@;8|03eS7_A5`39d4FvAa2;ucTSaPbxbx!fkJ?lCmCIXiD)GYQm&+zN zIEt<5J8v1UK7Ex~Lqe^bK{yx#b<7qo|#C@K(9g~Xl>^$pd>!%OpQMLo<2RRZi5 z8UY#XIXCy5=LKxhy@yJgFhnu6?DUYga&4Jp`}{W(-!Ht4gYxYD#ekv~`YmuZLGWe} zgLe^;nL)L50@G_J_7st;eGNQR$B-RU3Y*X8ha4v+etrN+8O*osws!4C%s}bq5s_1R zVbHt?E9xnJprrA}5kMS%DF7jT0buSqL-OX+R7>DFU%9Ns5?u&8d-4%mn9~12yucsb zq3lgHZK&lKg%q1Ud4g?pFWP24Df0Iv408w!mj_ zo1A7GY%>wAOgYe%*_=e)QV=2<7kfiAP3sQNHE`SglD0vI_KOCt9ZwgIsWP|Jil&-U zLm*`7R~)80pkBaJUAY6cy%-~9vJ~6tq&O(Z)!7FTyS}`;$8$o!eAvIROK83R`oT9P zzGZ2SU5oZ^UU=F+nivOwJb`2+E!uZpnKxAIKIp1s$mZ;o@w(VcSilOSX4ZAvX$N|Q zmAY+|#Z8d@0-bQ{T3>Rr^@n4Uv%Xe1Gz$Si4u1`*-bnU$gjw?uC-qE#i~TI<&SBtL zZpaDQPZ2*<9^i7(4aM-AhV%tKjF`g5QFV>ebfbv|ND5|n_*;F?+T0${2mlFQ)U(9- zhu#1;gxtYl*lf1R!G=-S9&~mmy7l0hl!_g!ACCR#iQ7d&!H76!xC}XQ#eDPuWHY5xp(BzvgIpWsZ~Vys6A>{QPp~bZHp-39@*8>sI)Mpxhe|r+)Yb= zv0&$VxJw8blGa)7?Ddyc)Dk~!7rIew5-MM7MyF+I6l9+-jek5BoB9uK%8rJl;tZY%qKL)(C_VVRUOdDGqV4=4@rGwF@gofp@SSzSe}6cn4I zrURJXW%cgLOMBI>n20_G-6eC~bjXPwjgSXQKd?xU@2>9?-UflC_VHBajIE)~tcV#+J{FrN{4rhW@p5*trLoZUbM z2e0h3<_=YIt#X#;BHNXjLPO5O(1tQ3Pw6LrO}C%`!pDY!=;Zo|U*_b-^9>3^J~G4n z+vXproPop&DF!X5ky0NsuAmztyM|XkFKKe=5#Bk2gV=u}vG5;3{bx9+xhs&$iEt%V zO8bhd%L9u6x_+_8DNgW_21$=YHG2WxhF1|$dn$YmdQ|omXF?q04l)#iFNd+GYG~Af zg;&iC(Uc8011bj89-*2bQ6!5BQ_2*_zPNHrFyh0;#^$z+%Whv%uxWi3Q0grz2(HDf0Chlan(sjw}r7bfv$vVp7 zHAGbA)+*$(WVvhC&4EL-KsluEyPV%B z5t}i8^oTb%-f$Mj{^<{$?!TmLGRDpLS?jU=Rh!&g8u!f#;_DD_{XS_Pe6#7-IS%4^ zzB}BYic&|^lHvsh=#HE0C)9hn@t-Z%bd=Q+O!~9tl(mpOgN*@{d{kWw3 zX(t$bz3a>gy~k)l3s(?61420_lwHdvfd3!GYB@N;9F*f-IDi zlIcclU8a|e6OFD>rbwq`#Pdi=;0GUuOx~Mg%V0B(+}bFQ>8q}zSe2EZ-`1iy(lgHE zp-W_A@bxUct3RyBGLYC5{=>~1Uf!n|2G0gi#4#aIwToe~^eZk^Km?9CsDDdm@M{DQ zZ(2@I5}37Zj`M)_4v6+$x&;Q3rNf_Y8M_l}#gmv0K}qL4g6@m0itx%fHPHB0zXdv9 zTHU@*fXSNxEXjP3D_!raM-$!4cJpZkIE7}CQU=x_iXajZ))1&#vGtactDe6Vwlbz!bS*sisek#7E z2(x67N*HtAeyZk-VcBv~@2;FU*l*K)C3GrH=&8S|J#g#lE^i7^S(brio9}#o+;8Du z>g&%dyr=Nu9j#rk)MD&-+`HdNt;pFjLKSpEY<6y4@WXQr)dzu3lTwv#GxXqYA&3JO2^ zdc8{Dz-=4cP)|<~g)P~*8h~xbsR?Mr(Ytqr1!B|h9$RdCd2Zi692NzI(vigwHr@w5 zYC!YTWRV%LyNU;Eax*)9mn3jKVMumyaLa+fxrmR> z6?O7$Mpr9I8S{B?{+71BS(SU!d1G54VtBKlyR@$=V(~Gv zGRT^`?>(2bM*7{AC-q(BBh^FZq34UDJ(f(p zjqU~=Sa07Z<#^OeS(aiE>Pv_{B1rh?9+HP7>qGdL?`<5En{uR@(5OJS;;zG&4M;V; zb{)jc#ceC7OtyIg9M(`;?xV1W8-)SpnV;loLa|oZ8FWh-?1Io6Y+R6tgE;~KPgKkN z6!g_C=nta@uH-V$$<*%fP_ithPuSYu1z#a~^0QP{xM9&BI~DEUged>pA_))HtNmXE zKPPD+8gMg#_9P}OL;!peU*KhoYJ-{eLd+bBIe`?bmkvbi(?5C-DV8qRAG(Vih+5?z z)3ydlz3$(Sjp=xuMK5}>q#{tZJaPVfmg}C0+Q>9;(7%TyH%B?BNwG@V4$QhZX;!Ht zCn?(O4$Aa4%nT9Jlq)zMDN<*J_Vs>qFa|Yb*56s4c7f$OrL*u*6x`e(4o0eaNx)S?%?WPL`2Nzdcy3-Dhopl_apfG3&L zg-+yRLuJG4qzUV^Ew~g@+f4UUx&no=auH{A|yH8 zO5r3zc-BEpVeYG*f{!7MvNXLrm%08p=!h6zO+=OkNt{trsxXsQ{S}u?gwt3CnzGOS zGb$gz_dNPa;k3FJC5>M5bLE5+BcnM_N1UHbO%C_+Lk`~Zf5oh*>CBHf6n0R?HGCgY zIWt;&blE@hrU{Sf$m}50tW5Q-0`v`DE?a>mmy5dfq%?JCw;=k`^(<5 z$FrXW)y?`g$@mAZ$}N@(Ly z4|vH#d^fLo93T{hR`#b@vkQ7`ib5-=C{0i2+z4Ip5jqkPwCGUP82#J$V8g(KE0l<<1}TJXVcp-qAqz&o?m6E;p)v`)4j7YA^LO5 zEc7wQO>pU*Hd3J}6i7}%@OA<+y{lGRsjg11q@J&_!Dq@wR)gMt57ijCGqC=-*&T7ncXhG=0w4Sf}VA7TB>6A{232BSwLH&d*DIPuJ9SL zHQ3^gKAkG+Lr!5%-7bSYbJeq$DYyh zVRuOv-%b{0(RZ>R2sYMcWFxVji|R?Adv?E@to`n9)Dc&xAz)*NYQN?biocH1G`FyY z%;l1dXtJA}GXZsO0We`$_h89ofhAHNmiXDz_E}kJd9dl#d+ko1G_MEi98!E^%W4d9%~)~c4{b|Q)ZBJr7wZ(UVyHFx%I^4e7GT?4RwPd z)t%?m5(n~{Nb{7>m)^9OJR+Qy#)}+0>IDQRPZM>-hIS{vhg+L>b{v@-75YefbaK2! zp)6T<`e}=)sng;uwge_yWN$uOHz|0G`{dwjMw4{5ZN81OxT-hnkKoX6ajkZ!H9~j7tT5<7E!(|%s%_NaD;W88pl_VtEwz6H{i=3|c z(Qw%F;+0Od?1lG}d!0NSOysU{H$)d-y{%8B%y=I#qr83S+(`ZClnYmhtzD?~Hy(qT zfQ@%nPwhc15rOQmK*Oc(4Yytcob}DyKM2FrR1nws;cg83KE{nsz@ufI}&fi6c6BD>U%s1+~)9+6X%_w&u+ zVkzmQQ<^o>Q=8GZ@I`RBLosdM(Nf75(doilO|L_a+obNh*YDEHJGSdaNYTzv^UmiR zw6N8X87o4)6{>Ea!agZWS&IWBa|fWw=w60I5T9xAC1Wc*GN@E9L0rNHUL4?+m7ag( z|7|eAr8$)qa&VtY3|~YBzUv~oms;f}mArDQqINc@Hft_XOxg9EVonXAJ64i)%8?e& z8OO4`iMR=$mkU*zt$xs=ft|{=6^=%3rhQ5LT{8Vbuet#Hb~B6kYuo{=R7vu*r9e4v z3{s7fF0xh~M(or+=u~A!6+>hzZHUL_p!UiSKZ_VitZA}Hw}T-|@ljbhx&Ae9sIPgV zW6Cv~Ygd`)-S}-hh4yzbVy~8A>$&RS0~57T;L|24p~DP;;#W)UM#mxic%_w0LL#ca<|WMdUTvJZ=hqr5;J= zDWpv%-tqmCMQEFuHru?GXz`u{XKXzG*gk5_>lnguJPxx*hsU@T^ZvP(JI61vZAcKLCDoZ;)luZvhE|&InM;f ztngdGm`O#lf){SRMR}#}O8wcT_)!P&@yG(n8BS3GNYORQ_ncz$>Q|JWhO*;7zQ<-G z1M*UyGNFFRB72TQYbe{f{BEg>*~Q~6bq)=2WI`?&KyM;%d6`Ct;zl(_vorq|wAr=# ztj#i`J*gdMTiO@7ESPq-Bgwb^Q**h{3C^wFOLA}bFQwSLjGz zneIv1Opxl=i}fK1mjfT5Dpm%{W_>`zV4`pPP-Vk))++eO%gTkC%TREt_PR?S{M;+j z-*#a}1e=if-nD#AHMXeL5y&t-Ne;t19I5*KL%X1f_>o9()Wbk81NxbLbY#}&EHlJJ zIclJ<3e?(kRCcAIYdZtta^By(S`Mnbosi@;(2q_R zDBM6`^@3?N0843ttgdGOfnRZNoNUW=>`Cw9%QSc-x^jAG<Bpb$Z~0KgQHu)d5t*r99`o@M4s z>Gee2ew&HPeHVInM*L76aW885&XxnOlNp1}#z@fivNm+RG-}_hEcsfc;l-Pw=@Qz$ z_-N`Qb)Ad3A{LD7yo9QENHS=deu7D;C18Nd;(3;>Lmn_q;jDIIR(@cbEq@_buMU@f z#SxjuTl>~`7D~f6$g8MEhyCsb{(#MdJ+FLTK^&N^Hpc+Bs3TwyyYrd}`z$7hxW-)lTDVs>5a`Jl3ubXc+K zop!&=zvu*f#VrPbz(tJdyAc&@l-_bouSvaxSwP2{zv8s{-o@x~rT2+rZviP}jx9yl zZ`Vx+992{Pf_BsZj?wIkS<~BT1@<}TJQSFfEy2daS-hiK8VeAGi$KTTHAmb(dLTdK%>aEIMgN&f)D=fD;~&Zul?Y6biSMf(k9p=s>rKi5$O6<_)u11ibh^@|Y3NLP zDbS=^*H~pFQ6`gmP!FY-YF*3j@(w;tVo`POlb4cOmBRR(>Q-lCu*Uc$9bG_(kTE7q zY?25RVnUO70y)J{&+8!!XUKc4S#HMI17#LWS#nY^2{L%IFOk0?r#oOrKp zd=$=EFz2N#+U54I9vQ-E=m5Q^S+xgzx3Y32DcNb7iD}%7(l*DG8TY%$>u(2VO=%l;2zaE8mO+wi4(ts^ zIEkpaP?ZGtRT1LY6O?t_n(#yQ`a!U*Y6-HrtNdkV-DiWj-OLJEl;YIU6gk&o`wRCa zd3nqfe-GkV7P(c{&Nv)NB5da-IyJ4+9t99(3<;{f4e3C8^=W!o3lZKNwYWx5LQs&1 zFL*-))|3l=NUX%C+7Gt9&-35iZo|!MO-@t>3Oom^uI9v7Gmmr2wWe zUcJ3zi|H1lRnw=Azjb@T?b{1Xzr*3QaT6vSTw(avv+FNf0#Nj&eUlqj(?vj`luQ}V zehy9GPg6!n>8A#pIEj2yx&KT9zx5U4EqdsF$1czRe1);`Qe1C}%)sWY(EmA4SK3|@ zwV?*noxfQdeFaHMiEtRbPW+-4;i!PlJO&cViK!qh&;02CYrRYmSAEAx48GQy7vAXq zvVY0ACzC3{ZUy;TT_AN~yoaHhgHi`4W5%Qq(c8;812)UA60U)BeiB^ABg7=k%pY-A zZ2&(ui`lSEnNicdi6d=}hj>{<2Y6{#tYuWYocK6p{hP5OPAY4n`$bbhZG>O5B{N_J zErnP+s`I!5wb8Rc9gCl21%!E`h=OU29Kd@Qx?N0n5c(3gzqEPZDrov>p8c<+r=A5 zne@KH46v5xARH!=N1pIt=p7@0o!;;;@naC4NVomKl5;kS7Y8Bj9 z`*4w}Pd+mZ9uNj@ZgXl1@>R044`((3=&aE1JHIHB^k$OmvcH?(-8M2c61vf!-m3#_ zuy;#{ZHoVKNqpP z0vt3T2!!Ml%-&Ip3AH4mrvw-`NuNE;4P7rIM1KR5t%cGcgqzFUD~npy<_j0rodLQ^ zP*&8x=(m{;r)J)J(ce22PCHrfPFu5^>?8vs%VuFFF!8ay6GE|vbCXN_NwsWC<@^)3 zeEUV_iDYVF_sYEB(caYCFB+ZFb8)){Sm!LyV(>wc$w2U?+HX(_gYdKs0d#mg#*8H>o{G zbrf-vMchZUqy(cK#XHb&q)yuSsiM9ty)1NX9stcZ74P@o&Q(Flm9`>b&iaE9xlC$Z zxf6mP4frLi2!Y9dIDm*Oajqhk)*wPvktWWOV}Q=p+}Z0Vp2!@K{^*pEK(S=EbRp;7 zjt);Qu%bs(7KCyiG$j2bLcHqBv&Sj#y?42jlgIopmGByfAN9aIr_MdiT@a@e#8$&Y z-2!=TEGtsR>26!EEL5ccuE0n6+UL0WX<$T=rnn4rNhYjWDYa|6`Id6)WB$ViJ~a`A z3`sN5R-ii5$_sntT%WlBKY9Us1i}e^a7cZKMS!^q1NB1I`WDa^F9xQ-#_COHstcHh z{9-KZ+EH6Mjqvi=aqdZU*Y(SZKHFfdI6ORZyk}VC7{H@-nsBy^X2XdM<}<>Pt#{E zdHOg4&@l@SaD$ zQLe1T%8y+Q)Y2fJGpn(RI9cg>z29awa#!sbyhK&*bCTlph}_uZS4Y&Jw|V6*DK+^M z$7NtETtEzhyK4Pnx?EVk_NjZz&(~iio&*bW(~zZZG7f4CrXI|od*%ut1=dSQrUvdn zy&|OQ!sR>ebmuY2Bq7|q69a1L^VGN{1W&Cw%L*g2CsdHRusGUMUNOjP7Ht0KiWRLy11AhKd|lm z>bBv>jtqm|FdN&JXetqsbdiv`1O=dWbqw$apMSqT5U3$Om`g$iqqRoWj z*o|!fzcoR^RXuu!+YY;8vZ*8hxAN1(GarDL2(XF!)LqzB5EzJW?k%2{;=L z6Y4TMJ~^XP14!+6a`gAY5p`BMZH+g>%nj282CVfFj+y7{fEW2!+*bGp{OEVt1<7|# z-8p@&<$1i1x23nY<%9y=sj)GJYnfN^zs!RFNOu2oT`*qKhno9g`hjpGrZ*cl8YC}Y zk|0m(F_%3{y5$DjO4mHS-Y;HrA~T!1r2d9%>?Q3LQHNjhxw6eKlXF0+K;JF zLzZ3VfpI0`@u*22e^n=K3Q{?4-3RMUCl05@Q^p_e=ULpnh}GK5AvUYMJC7b!uFesy&zaLk zo7cuaw*Vjh-)bbh)EqU7z;s>JOTdfsW$7K1?amCUZWfKyWZLdPmuRk*6wcn`Qfqeb zkqC;$7Q8PFK94k4oqNK{w;mKEvkV*yZ!!1v`%^r_gf=rko^)wGNd(oAB$oq+a}|&8 zFax)>IMHy(>E1{qB~fagZ1=DO8;iAa45vOUPurjzckx&-yqy`lu6*S!=UC9thkDQ_ zA=Ir=UTZ5{j);)58n0+5mxLNO02RJ^4)~DU*#;mzoX8wPZ8^s1;=Q{C7Hk|ddvh_> zsM0KSWa{cYSr+_@O7|Ei>uYn6K-sv|{pFP6iE2AdWt=R`pDy&>jPLjV%lwG16PFDs zva|WRvh?2XmOT^;z3?@H`{8DhY{4t`b^*9f|qNwhUWEmB@*RbHrNB0~ScO zJ#MOnaMq=3Sa*PBn_YY5bd=mW72q#oQS8b#hkH*GQw(y->J@XIX_>_qdc4U!Jt#}5 z8Aab>4%%KtIP6D(bvj0> z9embz>@PgxK>Dujz}3n&o`cJ%?uP>IAgORqy6EGRkD~2&91aw|Ij5gE<8<&I=Bpji z^M6@ZvcPrLnX9Q}PXg&uIfg+vQso~B-fE`zZgxxY^x(U8B8OGkB;8NyU||FjKvdF{FF^PRfMT5^4(yqV%;3slhlATX~xygIhIqP6bb8dHCcAJW!6HlM+;h@a&AW_rq!RpysV2>b?Jx zvGzfr9rjS!CB2ytXtTHFgWmmiY!1_lmhiOIUhQ-GbAE_p5`R;s1s;?r<6eS0_h>Up zNvIOQGzKz@din{|$ndfh5EiPl%A?sop%W4s?>@&Svi!S9H*DxJCL>l*H2K)A#^UOv z*UZ4$t}Td@;}u%XRN}iI3kt4Xi1*gIcp;|X#-535SC0KY#!&b5zCSCQ|1#kH&AL%r z=>?by5F&0O0L2@73YjiSzayg8(nSd}o3vYm6GW}uf}SJ!J7=dqVy!@8I)BU=ipR`? zU1ApX()StA>M~PHdM)9*VfTll{Dj{V`(~?+*>#*UA9Qk{nvJwI2#m-fGJ~zn-b%wO zvStyay3hsMcYUxG{zz@%xbsqy?)$LU@2Ne-AxpM`!A9pAjiq9L;r^0OM-JLtlKSiK z>==xucABf!Y0gu0*g}9@Qi$v(YB49)fn@|&NS|FObqUX$biZTx)mD~gAtI2|N zIp>}J%;j~Xo_(tHOE(9~%D)>&oPthP-t8!Pwe`u|$MfiQy{2ZP!&Uf^Jn)LBpxaHr z#zNIwQEE1j?f{HKcce<(6YJjU5emkuW;_g|k3;vQZR9QLXFDLaQF*UC4;`9QJwbHy z0o=Bwq5{y)g*qL*oFlSk!u!mO#Bdz-#+*OvH6++9bQ6rPrYG1eLn@R$b-uI{m^GNc zFD;YPCnEy?pL;?mQ{aY#{r2s{sottJJ{Plj#bp;e^_e%z%QN=!ZkW7moU|w!I!8cF z?ZKFq+5=pLDlnNW9t#ZItffS=elek^?i0&ilUk|eSLE^NvnD`kz7_25u<(|=cY#uX zPAAtsnH+hicc%fdDvQuMujKN>-To#u=EoH;@T1>U901pooTcLhl5^BzyEGxgAeB^4 zwZW#Yyn)23QjnG@S&hxs%*tMo8$N+db$Gf^{2n|Y^2Dx;az?DpN2^${pokZ zdZ)EtTtTJ3`voWdC;IkR|B@Od5R-UDITfSI!4A9(~=CLi~!>R%?zK zYpt?3H$2iAuxd=AXXbX3ElT&qtQv*Xq=}Im;5rH{nDRvec#8K%}(5Dl1TSU#Gc_(67pG`^`)Thf_->*hBEg%r;-pl>KC zR;I-_l@{L!rYvi5@|@nh1$8x9dXP1H662LWU`8p5|Po zrJICc$n0rw=}Y5pi;BwYMb6i`e8{^U@XF)H`^?nS-;J9{PCFlvtkAPHw_<>bBDa#eu%^q zB#E_4^erykA-mcS4HL{!Qron}NEE6GtWw=WqCNmp>^T9P+gM<>{`MhFwLhhreiw}5 zQ|pfBCQ^eQr6$BvCSWbc`DMOkiIwC0*dLlbV4vDcf>tNuM06_8W?N~TZ0h`BalH4F z%)L)Sv&NjRBTvx6&Kx(ZxQ1kdp(PLLm7onu^8~Wn&Fx{w@rjNOYukKUoSLk+@siaW zJ3dEw*My}^p>1s6pBw+N277X>km0Z|S=Q^5y!t4vY@U6@sp&%~6%IZMs)sTtZjZCt$bgsQ1QCs8l6}OxF zB3E?`L;;B1oMoWN6zqe>_8`lpLnp5}n@4zAFvwo=v`Mdo44VgXF){2hgW;S+-Yc8! zaP9oo_M7qgVSd5$4tp9mtcO3G&wg*k#CQBDkpAL^Fq;)RYBFl7xy?3WHcQ+U`f#5R zGG&_pkliziRErrZJuf}DTYk#OE=QC4)WIFAe8|*Bg}vlAghhdP*z|a#Cw%xa&aP9CcfmNERtx<43i}!9AJdAN$jVh z&sIPCux(0_)F>&hsG{_48*f?&@g%nJ-R#_up}dBWKyDA>Q)_Nzq143><^E2*nGTPD zB!#$9_YlD7NOB24qW7L3w6R9~)Lx;fd#^D`vXZ{Wk~Y4RGI@(C+`gJ_G~c>0VUzM+ zPFgHK$X1i#n1Oe%W$P(!ze^z!7-VY{`HOs?~2s z>>38Y$bO_)p`?z_uY6QRN}H6(pTI^x+QxJT+Dc@ZDrE;bKg|#&{k&s6#JAs0(Uc(INg|vfRhEF%qCGJ(P zLYIaM^ao7RT%?eh?MQ5O8*}xm5D)co)-hf>-Gt86_I%iuX$Gs3@;vt* zk~2;&?rsVe?Y)v-Aq{u2m=3#MQQgqe6hYMw!FDP?5;7-a@DLbso;d7EA&DAQEKe|7 zTVivT(5<+s+@AGp56q}fT~3R3pI{YKo8r0z4fF0i%1Eifo>@AV}yfPShh z`F(g~`KVc#7%B#O(@0f>(p@o4?ZJHD2$pk6OT_w2y62KE_ELl+-=VlO-x1h*Q=Z>^ zP#PXon_1TV=4eo6&xYrpACdSgWey%-{G`c!D-VaNG$7qGxG(H6!&#c-Z@aRTxx!aw z$A%5;SPRwe*c0~h4#hBM5_5dy<147EY{)RI^JuvPmUFG?SeV&e2_ZSay7>99r%A0Z z^HdrE$zB)9jHa9cC%_k~o+j8!L}mztL^Gzfiq*#B7&FUcgha-Xv_zsMne7Oczp>}H z?P_P-gcITz!>)5TwawelUo3ezvwyjPvwO>+qrzp#E1Xtab~M4KlL34(yn_Goy;Ej? z#U&hN1MzqnBxON0oUgc)La;o@7BlNZ?dWh3Wj6G96)+&@U?wDF9kmGP5SD>28Q5AH z@)H<0g94%5P-Ctt=moVU>b%T)C`{vE-_l=F$ zl#r6Jl&YrxKla``s>!uWA4WNdfDjSsB?>6gML-ZE@yJ0Gq)6`(5ouyjL4pLLAcW4b z07{F9fYi`YN+N`E6p#)AN(u*+CW#0qFpZnSO z-q*hNb+IGZaGoTtf!s)H|6sZJub1iHktYAkkNg<>pWcJ`hi?R9;CUjSKtv%2pc+zV4Y+kUS{_es7z<~g z^uz^Vav8nL)C*b-@Ex41B=nIrdlrgf{}BvQp}V6#t*MQFlg;VvMD4!%q45BL!qbhm zTv>|oaGxV-zsHz7*zo2SbM)t^Nw~U%6h@WVNmfRO?cSLW3n*!_zx6F` z8xNd>Cr7uLWwkK6u*WqB75tKA)B&J=QvQ=glZJ=~f!W?7lk1|vE@MEEmw&^JTZ!~1 z&N$E}6Jktx{kXkg(9D+}k^EG7(2L2Xq*Q4-Bp>o4F{#N7fa*TMIlyRF^*q(moPf0e z`Wfz#S<)}+UHzN0>|js-R=)7}w$v||g+=s)%SH6{z+j8&eGH%2S;V&2PhgBuo=6%< z2F_#nH_-TQaO6#bVAKW9rV942`$){qbaKE2H27OUpxp7GWpV8_P%Wz zC~Tgu_Dmp8a;&PosKM^zocmsj()Dj%Jr`{MTUeTG%iFYPKonsdi)4suT2F>4M%a*s z*V`rBY8%H5(tOX1trXXdHB`A+lahnA&1}ZBKIM#T^?Z>Hsa`405@KMUg8+&i&xK0(2kXam00iq+~Qh*?`!KFQi-p?7)sgph51?1zxYR$d3jRQRUy8 zjer@NQ#Hbj-UD*0yAo`)S;0)_tQ6n(8Q#uc!Tmac`LtpBpc7uF#@Q?H&eH8dPsI!` zx9Nd8Y|eZv0<3T@`}kDPYd^oIQ2K5iLjs(mGIH{a&nVE83Z`ivHELwiSYq+M&oHN6 z@pIL&LUV(^aLzOR6HJvkMl3WQ8%fiZ``o0Y!pul*J5cd%i6_U>i7P*uZE}C?ZKG6+G_t;;;*FKkYS&WP9*HL9$iF*j8=Wai7hn^pjkUKpMhqL zv*o`mZvP#@ntz@+AJgT+HPgs zZHu5c<;fJXbYrnrfyK}R^T->Zp_rGI&GKBBbh%l78~0#xxt>MX0OxevO)%z>lkk`& zHF{njE6bV(p>A5f(;byC6s*b@mbdZ9I9OXW&>ko8Gf`cu_?Z2pZC*5ZVu2*sV`&@-M!pidA<$a|u&%-? zywH@b*?c|WOZDRh6-yp7H~lR1&b1CJ962P20ytj`sA4)mSfCZd>jyb|CO3j6kO70! z+t=(#XIH`Ty%+KaEX8|CSP!7Oqdk!uT^8sqHk9KStf(Q?FiJF3jOfZ~s|j@9k>($M z^9SG6F_iW2`aak_tR@2pTl8`iD!?`we}T6f?Wv~`WX%;z6WfpndUD9JNIzVLyYr&~ z$?o{gd+Ng+A9`T8^vj{fhDMwi=U3+CXT!6uiv@dG2dkc1Zd;@>&wxB#rFR*m2EU|3 zYS(DU_+XHF@Xtx7sWiTTM%kyJxZfH_IKC_;-f)4{iG!V;$J_0Iz8f#$E0Uu!yR&8- zO>5399uLpA@|yfYG>m+O^!TgsnukKtKM!bT0?wJWwN&ISne2W+WZ-E937l zn|WtfIl8vcIWm184*;-tKk@e1eh2gY3}0?0H|?j@yAENBfd0W}PQ1vX%3zzIKd?%)fzVll+Blskz3c-0&se`F2o!RN+i#a-+@1s$!h_2@zKF-Gp!$d z_i!7ZCQWOXO`F#K_rmOBlVV{@@&$jU*K?Kj>NIu|+HgjvQHoUF^2x1&by*2At zI9hng3Unc-nm2NpM>8?@il?^yGI0x*fj|tue=V7vXyDN<&4Ay&YIwXI;f>Mn$|wu` zyp5MYV4fL-jBy))0uWoY&;qCQaZmScgmbIqCX5?x!6VdrYo)B2mC3PNZ6QGi<}@FU zR+9F=`=dhpHFMM>umCwy7`tUf-t{41LY*PcgnbtNkCuh7Y_NSZW{_a!EZ$T+h^l}L zBBabpK^8R8!&ajCZ(GCGf=L?Vb9bs$n3H$fG%sX(tFVYkUoM`$FDgQDzg!ffJrcZ> z_+)gYy80;y<%SuMmKsN4m~DsaI0F#VjR0=OH-a2*5LGvsv^p3s7GG$sN&u|M0OLlF zJ0&xLF-OPR^bMCF9EG=UjZteGuO!$`vPP|Bn?xjIE#{6Np9#^l=HrTj6*>q)UruSJ5H=K|@7dvA!(ol8utxO0Y6UbwbuSCQ;=vYO@Rozlvr&IEllHO5l<;7nC8fuGhORv@u+l%6XfU`9iyBwZ?`DjI zV0GuT&$H$lM#B{?H+!>g!(T^m_N4Gc9N2)T(;-SShVUELAu4T93Ci7w8o>rf7GbY) zAGSp+&c@H##uq3XT?j7FfX>4c5gE`aPN2|@=Ru|lFWJag-u{!cpr;i3wh=@~;ceVG z6!RLHBc(=%fkX;AptfAZ^4-`2&HGn>z~16k0%4WbUnd*ni8UBvheXUkC@f_w0pwsX zPtdB8Voly5Y3g=FvD-!58H8bwvFmGq_CN=MSvyDvwa||jV*2p&L1~D(+T%& zZiXn#RjUca*{4&Mm+_tC-QSuC6zU$1g5sa|ol(M=%`+go`eju7z=x`|_Kji15jj4# z$H_mgjIoeNbH~LU;%#st!spQTa zT=w%dA?y6{PvDy+S?{>_G-&9|??EI%=H%_theZ}Dc7j`OU@^7H7f{>5PXhqP*~Y=^ zvg6FtHi&euy_h%#Ic2%jZm3VK@MgWO@t00%lRv&_CE=lXpN4veH8uZVs*#M3t zm?B76gdef}0O1%1(~1KpSb?+Bp|{~Pir`AukSjR0GDn#E5?mNL5m&FfB1>cAWH?8l zlw!mD<&)j$Y7E+~`GZc}oULHcplDYPEETK${JVQeT&3CG3ekS+kmwLxu@`k=@+ACc zPwWVpmW`{2arO{t%GkZ!{07a6b`Mkz#T9B~-RN2fM0VMx%xc!eGUUBb-O?L16m;OT zfZpm@?9}GIS&6T%f(*OEO|DP$q->Y-~9q++_A>@DizR5q{ z`)zA5AbThmGXQV4?cfH}P%E~~Yzk5;Mk$E>skwe)WmXhTk?Xy8pq-r5CeBC^Y-GXW z5DtC{br!)y%nkd|^=)YF?$ob?rQY7|Hrse;GSk@>GQ@4vaT6M~gXbofPlV6uPUmh! z4D0;Kl3QpID1uMB&^|N~^x9*W+?b#67aGusx4hHlDJB>yy*pKIpfGvPZlX<;{z2a+ z`g*9?Do@B3l%pXrqwu;vfcre)TI|SVpr%`CS$PxFCkWk#u2$h!beU}1yOmnuineYo zwLr4@-Bh{w22yTOPB*Rp{=kXet`nz4JglW|BZo>ijJW`kzmzevNpApJ(w7F)c%o1% z{l#1ZZ+5gHrMH$<8jD7&1S*dpqG{Id$Kg_tzGKM!L~YKm&)g0cDp*GpTjMSaeRv@_ z`S9Nnvi|WSmzJhC;^`sMPAs*g^(r+=f#OgYyWPWca((6W;F;mD5RFdhm!z?EFBiL< z*JFRQvb`&9@pG(AIeGNDXAUbc%VR-{IFNKbDB|SDi3CAMbd~ z9^PAkIujt{){RFb)`yM4xs0^ziLJyDtP5+95z@~CQHCe*dGdw@h6f+{IQp%FjuM{g zP z|KB;Wzd9}e|3i?YD98j{(QcL%JqD;zP8V$?Fk#C-_}CceI7P=)JeVpLwhqOy$lZs{ zLk>*fX}ZZ&VFdis(lnJ4;_adrKvHAnLYI*{ysUK!>4TEpJ_pJ4)!nyok36jPl0s5P z4BM{1HV9q4v3=l3w?e(@Z!2yG&g?s@a46!;Qy10KcTOVP(a@2Z4IXcRCD==b4sNEe zx4C1|j}xYiXuFJ>WgI6BW zYT`J6+zT>5im1lMHFG3D+cJYy-#aTaVRMg_JEzJIgx+Pf8$K#p_Vyz8Gz>V4dUQ`? z?=|O#goC2_)lYJTe+eq`2tcL2Mf2OXOdFnQ=EEsanf|sMi3xZ*weeFIe5Q!8<90Ju z3_JzVI!GAyNEAC{Lyow@&l0q0X^yjNTywVam@<{enuho~zPvMh)quDgGx*A6C9mv; zpFHrO5Blo|$3N?${q@6qTQ~MB6hRwIp~4X1%u>eDJAMoz9-Q5r{S4b}7%Ii&zAQ*C zc4KV2r%!y0V<^h894m68EiB0`4G)V{lkaDx4Uc=BMD{LGKwiF=OrvH%+HVd%nI7ko zmy20jA)2njPvyzU)Oy|Y6;7E>7T^Bb`|MsYosIC=QCnK{Y~vO@rCO_G=VXpDV6fCo z-feTC4=Q0?D5l2D7@|J|CWPsuZ0RO*w22ENk<&5Fbl^VJG)+pXcxQFz!q4}RKPLZIG#LJpM8H0QS+K_-`=rNo5SaFZ z&>2(Zsn2Gh*nDni+b|0UBk)3DCr)T%Jt{3_QV6MV(XJMMJV{yB&-cK`c62x$mneNM zYuI`&>56u?O(=|}vhj0T2+YlPc;jdZ@V%3B)Ob}W{wZYDX>=Vf%6o^b>uG0L5q6kA z>$@f)f>atPHuq(~w@octW;NZS;!V{)JICyqtd{dq?qPQ~Z7a#+-vs0$fFBCxVm3P$ zo^Xu8@6YC_BKH_P%9`#V2dv(4_kK`_`Eo%1B5#j{5*S}{+OxkrQo!*oM)kZm3 zNkE~beaU#Ix#6AR#WY%e;Dcjd;asR^o2T9)#}h9z3CrBVAR&^<(L$EVR?nOrtJCAS zStJ@pLBjxna8i3B*qrqx=~`1bA5Vj4%9|xH-QbY!WY12LSYQQ9m%$@Er&6AOCe*Di zuRaMXkcIZs*ygPcepMKv72Yn}=7H_V)%4FiWh|O?x&C0n=vg@o!KNX+`6O&1jwcie zc26i|+xZo9gR#nyg!N3!noL|g+EUyM==%>mNexWDhecOj`vTar9se$zC~@Xazez>{ zNkLK2s=2*M6pYF44Rq)WJS4a&$RyKI6lAP7Ka<+I8aK-a=HtOU2@f_pYQ=26(jN<$e@CeVg`pf87U9M6-mI3SY$7znvC2j9t+V_cCQ$ zk<#Yt#wx#5>MEy4t~|Rx@l{Csi;7jQo6pPUh-3?=g4ux5M8XkSdvg~%*eA1}=ALm* z;D-E8ZpbRGhO+wT8QsX+1xBm`^Is||F=QGMbeJSW@-19RIaax8mP-_>WN$Z0uvc|+ zkL=Y4=a{@49ji0TeW9zNuYfdig9S%cWUd0)9E7TiSm~wP?iO4V%cZSO%hO3j`oU)Y zgQ{&%29vSYlY$-DUcM^b`N7av^Mz_|>~Gt?11k*P&S;RwyamsMb<70n7i2J*kN4(Q zcF!avz)~@1BM2@f>P{Cl&>~1D8^&1!IHF5>vMQ?|7ZJ3~v4XiCF-UmmotaFb{IyH@Q z%;^K(A|W1>t(82ZKy=0DoZ?1OQE`y?dspuc$^Jc0Z-+hH#`6JLV))~JU#Ge}=u zj-t1pFJW3NM>Bb|Yu!@6Tjxd*(%^DoT-i;+fai|#DT zeZeWu9`dun*Nv5lp)3LP`Ukkw*o(XY_%DXh3W0D5#IJNesE<{5?85FKB(up>%RGb; z9d&ks?@U)q#+`yS9o@91Ct5!Zw19Tx5JiAK|UiyPSWAvS}0L!(V*F9{bWoEp72~ z?u?_?n9~c>rp|6BktNB0=Yab^*`>d;IRAB^pC9NQ{EtT8|FQ)8S{eZj*z$z$K99I5Q-g4|?$!`-vv9G`33t0;U?>3R}W$90OmL2x9(2T*KGETWVcYz*65pLUFhD#d^W!zT9@ld2IXDN44+G9m991ZZhEDe0Ii4Mp zIS0pkYY!np=w4@UR(z>p!2pxl_H3KHBK+tkIzC)&|HC1{@V|vt?%%ilEFa%lRT|$h z)d#Qm!qv9y1poZOw*ro30nkv3^@!;TEPn^=35ce$vzQr633H=PYP?At=OUD4xU)V@ z&7RpydBi!%lOR1R6Gx_B<>;0^rwl~zy;A)O{(5r-(2@kJp&|!sk4`ZAy*hP$lp^(2 zXl0pqIA%1vcFBa6ey&UB8#!)v+r;E=BdaT~*w$%ZnpwKt&M=jp7JP!7@udVB|M$-+ zVAsUXfq)my+h=Bw0_^H1T0%$-GrBq)!gfFw5erv4a2bYQ%5|7w~e`8kZNf4$J4L4M$Xx?r>6Cc_88aO9%OM$?)mY?8ESjvbDQ39 zzNqzoJuvzA>QF(wIkXIzMxn+byjc9*(|6r1?s7!vX}BI!wQ@o$ z{CQ{IEwIVO=*Xpv`zn7d0lyb%aPBQLb(3(kS{&0`7hiAydQeI#D&d53%i4U{cGwG} zSAX!08qB~FW<@8$Gg5C&##ZO8?*1H+W13XL(Hgn8qYu*zf-~Q7%&8Ou*A13+N zze=FSTD>ZBP`ix=e?{QP_OjhPv9e&BJit&k0>{Hu1n&Zu@wlyz5By5o zX3yz!93gFV8RrZ$eC-QbJqNa3w|hWG#kq!tn3)wqW8*sDME%r-|8{cw|L6bP2Hdd!hhWS9 zY@y(}geE*^j~Y*Cn2n?0Zm?})`cPt6uQ|=n$*^ReLM?&5+Ot#DaVA)kbB<{+Etq16 zsdN{DxE!vEWm*lkHspa>%!UdsoRCR zn*_)1G4V$6v;*G$Ct1hxk3z`P!z21F* zO6ea4!4se`P;t6%9~+etj@<3laouA2>5BwpU5((E>{m$W#2P*RtJ^!?7^;SxfzVdH zXh;T%ddQ?%IE&#ek#RK2mLF?2YO2tbBmME~N--2)A^l1_?C}%WyVW=(GdzMLeuFXX z)?LXPA{jvYD`xMC2ash+yEx{fO6u@c^GJs7w$IevSn*4~gM)_DWS_=4@%L6I;ic4F z0AD6Bt#PTZ(SRG)xABd8ugxhUy!rKT2noqKQp+-0)vK|ldwFzaDXBK;V1B(;{F__c zK7o6m`B$n0y9yLpPIqq+6AZc1UAH`6d~MVgbgG@uQvKsSqIJMDDkb90@02U}eSd~O z5iS46s3-pTyjy;3d2E0GNhiA_Zb`5Bw%tGbN+Iv!ZPV*e2KyrK52~P41k}oQh+q5xp$F7&I0HmberoEd?CDBcCxT8($zF zMr&X6k|e%U8{SKnQ`)$+AX;6-Z0+>c7;@=X5xNmkF%4qDt%YBA+(xRJn&3JXz&K~d zFU2d_uQI0C8`;oO#f*??Uo_DTqe9z^HQXWlwJkr`js;I`^V_Z`d#{?O&^cVR|IB>I z=i6|AWHtcHWx#e6;D-{kr_swxK)Dag&Ha{JxUXq^{SnrjMPZ2L<|XD#dQ4WgsnFDu z`CDw8Ia;l5s*W1U)+}Se`S{gt9HhI-k+S(1qBH5qeL)T)>|EO1;}Ilh8yiLhoUx6Y z>26+6eX19DBBLA$Ccl}T)vNlYYk>=Lk-t~IX$8tI_CM#V^ffowQ1#Z;Mxa6M0hzJY zFhMs&sF)j8FF+b`bOS`vHf+pO_yFJ_Ezz@Idc;q)~YCN zHvEkpJ|mEx{cNGe!!>XtRJTvGs<_SWbel4t0N<6Nzjvtr)%OJHMjVAA_UXZYP>_(v z1rNAbj+u5aa4EA&KpuRRUZeUoXFWzA%CKdYrfUs%%mQAvBMI4JcPAy+E3N5hq%!b$ z0D0#ht5YSSH|CHl1h!Yu1jt6?Bbb}7$b1~h2m|j+_KR?d^%&%FQ|`K z3@k5<^;}e^)9p)@AAaAjk?b&^TKtBffi_!X^MpLWE;oR{`UrS)bJlUw{&Zam84f(J zN%vS*U1Z^6fV_1DB97^_npV-HMLKgHashvQ1KZzjKR)Ww4va{_sMadZ;4l?wXn{?ym80k@C)| zD6%|{nK5{w-o$wMGCHnQsHem_wJ^5J(s@4L^R&mE0p*4DYiYZpW!@~!*;{V-FIZzf zJ^Rbaj7y(fk?;ZA`Z!Nan3YiWR4Yd?0eLxNhH?@rd-~2y{#7kUjFpiRhDtU;Crn+W zoG2({dh`rI*H>ub+>(*Sh&w)O=k55pi9R`+iNF}ow?Js_Y+;VHn{tl&Uc~ioWvR0& z_hd{=^ffhi*n`-j`nT~)sFbHT#Z~}G&0S8A($i#1Xl@NonpW_JS9tX+CrBy~Z*+R?YZS_#V;hy7 z{h(A==O+of3jwOU72FDS83#G>MtBojlN$*j^H+gYa&IgtQI>EnKlmcKpXxTedzYh3 zZNu2^?G7vdj^6iwpO5hW82R7SEBNON%l{FVL9N9vr?H*f&ib;?#`IIS^NwkHaC79T zNmP+Gc?LJA^Kh$pL&ol46Z5yNPd$Gls2bfmUE~+O{ZI2FJEmX3@i-+MZ4?N!;ClSq zTA74SY6`MGt0L;-S#2goK~--e(l#tN~Uxa-|-V#6DXDi))avAfKF(Q$jn z&sv*kAL3`AmNQtvw$?o(qM8h&42}vzGv1Tt891v4xfFkv zszb^ca(j4gNPz6N|$=o*@%AkA8d#bkrwj%OT4 zT~}$OOpE~rZf2o%qvbQl1*(7cOUHa#&j`#r5=htbKosdH#4+7q@&=j=v>&%Z{`dWA3sJ`#(3$O{>J5=UnHI9VKE9*cU zE0;lCImTLZxLepVs>-sxOOl3^zv+m?6+vd54$ug-|JX0xEQOHN70)w>X+CZPw*W~? zMlvl?Q+~@=d15i01is4fDxB+lXl0Y#^`xLt6kF}Gve7VMjNf% zc3zAVf1t=0LzwaKaP#Q!_eTp97gy?eOi$04D@bijFrNTxp4bDfg$Xj_zR8Ei?G5b8 z3r`AfJMkYqhW~;?!znn9(i*GWOB&Em@i_mKsF5<3{G4vrV5&*+UHj|4&Jxz8*o%&cEJNl9F?+OkOII}h)x4oG{shrQ`;Vt+esQ=K#Kw52WnyWqMB+H+p05+jOH~06p z6QKD5%zw85pasWVjl*3Ul<^LQXU@hL@_STy0$%A;h7xx#hNHQP*i#^^^u}}}Mai_BSvl~J;-#3w z6IXHmXnDxO0yu0v4a_nLU+p4GLZ3Wdw2x@>L>6M;c$W}$lG*}EbEJ_f3}xFs=n{E? zxy+`-92^fP?w*4De;sS=yOA7d8(*&>YqOssnbz+gAE{irQuD1ItK*ZUwW{0yoO?ZO zmknIE>(cdv&`-&oBR%#Vug`BiT$3Gnb?%Q7B~f>~=2T9R#Iigrt2`N!Wl@$>{xv@LE`T3s zsk2fn!R4r6$pKyp_oXk(q6Z~`@Ol<_9~k!((iE@u^}=_p#vGdI3(A%YucA_rcM8<& zt#666n+i2raZNTYojhPEZw^C<(^0}<&If#I)ANO{JC76n|tl z^)~6y*xbbQq-9%MwpCn@H$Tg0BPIoz#L>uUurQx295WE*?}`y){n9k;-1C2$D~(d3S~!#BF; zfACGa%8mK&lDrbGWvFoV#3<1jo(*pR)FFqCu1VML?0O4@gZ`7RuRPn|EU-#gU?Qh;p zzwsE{6G%3sxV}e8U8?W)ld<}wo3!Scm4$M*#{|qriiLDdd50G`b)Qn#Evw8BN=^`a zN*T^q2*33rkHU|CHQ5EXD!5RScOq}N^P};*k=F!azsMEOw@*t?H`M2?YSGW#bf7dG z9;q-!NxLBX_pO<{NOUO1ztP>LjqR0>eZx}H2k?WSCGQ=OMFBagER7nE{7F(fSW?P9 zj6DIxl^ks?#t>iKz|^*R6+$;X+@>eHVrjSD^yxPYLtE-6XcuQb9&JYu}h$~6VT;9$>#&hz8=;KaV!SG}r z5}oXllSQOqcGY*!~EJmC*C*ffDBq&G)~Gz2>b@!5J-%vG9y4>ggb3Bid_;hv>mp~pjTRtY+0`(*71^NjC1rHTsfGx_+scbmwS)Xr@|@ z-=p_OMHDkHmJb!69He8rFC(YI^RF0`CStJ8{BR4!JKgs2S5C`e+64S#doD5!4@1+d zvEcBm;`|yJol%X5pN{dv%kOrc-J1E?a(f>e%ZJy$de2VRMDqXKseU4C) z7d&0<<4iR0x03acO)pXqFtm5Q6|OPXX-Dx59i}+;T)eisBHc>JvcLr;?viZWe?Yhb zmgaz|bUs*leM=_L(!M|}L0-_lTBp5cz1Z49(nCC5Koph zK;GDlKfuD;%>Xip?kLA<4z-hG-rC;OFp6#`lsmuoD{p^cO6h$@Y0^O5J70~t z8u33XGZAFngpl-7b%(|)t$H2v*9-hm+Wn#5reE1-IwfOrUU*7r5)6n*uMfl%9}k7q zEr+-pe4dj$=8$$Exw~x#Z1?w1hvp7TvRGL=TsiU&{~_KNtsDDZdt_Szq~Xa-6#QqF zu13CsnyR4bcH72X!{pL>dyok&rCmfNj>jnCe1%aeR>JSuQi0mAhXs>L$%PAhPeTd4~eLDHLJQ8VJW`5mrgQP{m)zb;H42Rn zKt z*~8Bh(PjGpwM~)%$9*D3kJrK5_wz8lSfx+vDP{%JntPElm@1x*1KjU$HEB`@4<#s50;(^X_&g4@Nv_orj3j%EyYu4FODA_gfbNGzF9k0 zFLBSNM8M^IXJy6VKI1sO0e#`?-Mz51ZdzSqmfsL>BwZeMs#q)BWSt4QU91^XSz8Aa z1gwK@4=))by#2D~0zVndKZ-KFHQaxMbJ^wYD_Z&aPK5a`9V0!=vK4O-ljr2_blBOg@7UEFLTw+)J|Y+C^2oQ-DTkRb|bW zx8E{wKB7KZ`J8R)9rSy7&9~ajLT8!h^9BX_a4P}(v@4FG>+^{epA&7<=^`4SujJcy z6ApjWbG_8L+AFCzr7zRn;m@vKk)%Sov04aW@cz))+W^M?Os6iJcUq@)1rbt6`EP)5 z8^;qfB7jEeN-*pv`&L*S;HJ$RBHWsFtI?;B?m3p^+a}&l)?gB^%TOLJJm|+5kTzqf z-Dw<@d?(lrg&TIwYBBK{RTHzK@;!Qs)71C|f_T(pXK8^h<10PYXH%S5FP#wP*vn4p zr>fN!-)R}>J^JpDC#l{mpY3x%>!xqk#n2bn(-Jzx4vM!L78v{S%zDq$YWvIL3$C3m zkk{#*=%6zn-z_(>%g6Q}&T_haKHglzIPakTkqx)2(u%R&3>DO2q8!{MUgJiW(mKeh z6G4zzAaW$YEKp^{LZM)(?hDotq@4 zDz9p^qFgBB1#+$n=bIY~cXAzc?fUi4+2~5fcKW^BWg{id2l}Q_PiSy^6e#k33I1#XVUXEi*x7 zBX^T&j6q1#e_5cKbDV|bNTPT`ihj&K8mvq4RxhCJ#m6&)yS8q(4xjtp($v88mf!<*qK+5#o;jv&d!r{o9YVs+sXp22i8KY|&S9v@|ixGmqsM`b~quEKk=}G;s|uv41{qwb{%QhW)%t z+{efZxOeiMgy!zmjf@_o0M>Vuvl|Io(n63aEl`>)3vOmo%-&Xjs#Hn4k1bzSW}@-b z&MY~6681bp+rz5uC6kv{>TMSIB$#w4`wX+EqPNSgCv@Y6jsg)Me{RvZ@}``lB&N+! zBQiL-XYNzdvkHwzJ1tA(GKC>mmvjvhlJChJzROn9sx)CjBJuisKKmA=^|U}M2F&gGW`w+7rY6jICMJY`4|uNzHaN3*=24tfG6U6 zCORie(;gl+3_RasvAFiB?--Qs_=9h}C7ea}$Tv9_E#tGqV96}ruUhuiBIT*mH9j`f z+FvUWEJw*Z}w`^HT*Km>7gK> zdX(wqv%olH_vaRO*VS!Q$=I$_@Y_7Fwc{R>gLJ3K8}Tf12NF^)Z(!TT_dEG4_-dZ2)I0ws5OLWwOSo9>-E;a< zpLjxN)b~aODW?oxFJ{>ksh2u$Q{~wzVI`fd(NOw2DP3CPlwp}K6BGF2d|BRbmzhRI zVt$Qls<#+ME5>L64XFuo6wpWz;io6ykQy*GSJ|*(p_irEK8x9<=1lL$49)xyj`$ow zf}0q0)Ks#duRBKv&#>hWF3_xjMKKqHliJ+qFt|rmo19Uq?|36{!Z20dJW{D6k{CLb z=J`OXV(xl5CK>84w~zO<9j|c>@YVZTYOLXqZlu#`>AKvj)kP15QV)7rmYA#uj&_;B z=$Bul+L$?rAQ)+gqh1+RaTL0~kYiY($@g?0tqQ&(;bg~a#^)~kE!Vx*IW7RaNJu{3 z2Bt(|$4`l-fZ7l{4)$DFj1aCv38)J6-P{kV^tg0H2E9bs})G%jXk9U>D`H z`URg|`~%n40CMA41&WDzn~dWBp_lU0eA635NS)GXuH&v=6tQ zPLJDqL&TfD_=iz@VZij!g+quz8Ws)%p#>=#b2zZc+Z?~Ljje~#beupR4Pi&5o618) z-Lu=qO)u!FO<~1-nGEgzo?lyigi~ZMz`pGO`@{wtl)druJipBA#L4LD=GspHg)beB z629ALndj>TO;VJ^MiDA=kEdQ9C-lgPHMzNFn?3x!zCw>x zR0>kUmwdN3PxWg+fiQERKb>!Ff6lu-*fub|@GB_feIP&SP9Pw+$&ncBI2+XD0GrYW z(`M_MCv)Yfo=_%T6v?nnRCye#cfI9PN;s@hDecC`8r9vd{NG0YX_N~YOOSvZDtANzb(%eM-?|#t)k-_<}6^0l4wgz{9`)v<%e5_fsCNigsSAX#vYV6@4wAE zACA|^(@PVgoorB@~SeYM?5&Ffphv&X% zmO1BJN|u}aUZA2~dXK?tKXq5bp>yuY=BW}TSL{s{b`ZCSw;g+s`Q zT-Ds>?ZkW#THzp)-eEI|g|74pB=)fya?*ZgL6FMe;GBi}M(%eiQ zpfq=Y+>4e+tuOwR-1f8XD%!w9Yh|7bo3jROto9ND5hC=EcYa$+3uzpb2zJTdG~y|M zJ+ZsG;rUfBgAXR~-9e$;65SIc?JM-2zU$|bv!q-ry&($xp`vm$Q4HEs7o zILiyAy?LRZahVuC5|Ue#%ZpSMDu3I9Ihy3pd6S(RTYu57V) z_fzHa+nVPnipc|d(EC35^AkP3EKKTUjmm~_J(-;PD)*2pFUxIg`poOzef_#D2G@FZ z;VVa@2`-ROlu#7ycgX(kh0H~OhQ7TT{ar)YfK96_-pUO4NdD>*5Qn_yfStaloFYi8fd zc93-xN0s6A2Py;kQcrK6e2ZyJ@40BK)}=L3t=v#A`zOLYr!6S2 zC@I}nK}St-*A}Q87kqLhQkO4`UJlS%rCz4Q=ZzL zA{(7I#zC&jVsZuz&8^G!B@;2gfKE^@XTsUCQqCU2kh~ zQYk9Tq?eIzJutOB7+03hx~Wl_X*tyY^?h~f%fKGT>&t4e`N+&&Y6dE@Hi!H5D`%u- zoo_rZPS4vrE;CXJ8^l2fiw@piCaQ-39e}SW(jEMtaBkH@Rg6`kso!8WyfA|f@@9)N=v?NC6;Fk_&2LGp!tdp*%KoZ6 zBUuy2+G8l`KV1<`hT7~#2ZIiO*(#I!IU9|i{~Ys?HNm*ZaBbyL;xwF+W0iY@_c>Ng zpJ&jU-4se9#x_aAUm~65qyPmU%v1Z);+ZK&ZhY_)Zcj3m#=z%r9O+ix&O#=EaTAYb zWZJ8;~oA)t46Uo4#N7(9CS z`DDDiexB;JCVg?U4xU`u>1So*z1K+S%&{sVr(*ZaT^rs~^D=MV-Fm2Tz6{?z+Bvs0 z)-icEdz^K?*yDasURmFkBFMOT5XB1uoVQ;ta@)gg`7NFYHshE@MNBPq`m?5n2R6+A z$KHEKHNAD~!n!R;7mKV4QyX?}9w-+u28JAJ zYE8p(^ugPxEi5}`p5^AkA0R#0*(u+t8}qpZ&~Mlz*f+_}t^XZ&lz^ zANxzfvR3^YX#$RHrE51pm6i{^ugX}r^+GRq&OiQ`e$ zC)nMK3suY;#H2cey!-H(UAR-Iz0T!a zlWXg*y69J)znwb!L2+Te#wdi4b!&B993YKExeVI-GdBb$Dv~q#gfn{Vh8m3uE>{mZ zOLNM#c+_5s{Zd`t{1;k{&1(fqJwy7cjbGM$$n2@Eb9Et3Lc|oj|K)O(xDSlxO9Vml z40+`9@Ct!6iM+Sf<_>sS%hmv8bIFp6ydrE@1adwb#DtTfWO2I0WXU{C26CWy3$UIy|8;gyc2gW< zx~g2TL0s1G!5MKD6{sw)o`to$v zD&|Jy}vIoS?mfiubSY48YTz5*qaP~rR-pHh`tB~ajhXS2bTy1MEtC-m7*JF=So zlw}zie$(I&d)x1D(gkG@}Nd!SOj8;NTLa6hG z5Wu(LR$yAB&ni@-Lh7-hpTXYqI$5oiY3OhpT^E3oa2b$&QLnfPi$et)DE)ZwgAPVRp$cp#H>Ih}P>X^b>~ zeS*~E8FSmI@KdDLz?nLMmRtQ|xo+l85+OE%yQX(9!BrT{998GC1d^R=kfXM1$H9|Q z0HJ*d;)SY8zXGFV`(h98XcWIL)IP zlKmFUPIvifJ$~cx#$T_qIytNadBY6$kbKQDEY9%Aj`i0}55f zzJ~V^By`02mN}+9aW<5Fko$}SWAFW}5V%X-B1gTpN0IOcjiASct9_2cG+b;=dVWm%ZHcxX!KLF0sl!?mHeGaGndU z(fLhqibOd#9VanoclFWxNOwRp7RHb69ukyrzzsEx;n6$GM zV3ToSbb%69;c0`C)eo%EDJyX3`DUf?g}yLHSHFG^A-!;z6zd|GKh}MP^)9uc*r(*2 zHbX%pqRY`CA!y8X^|Vm3{L{vxHEM03M7Q_e^x>xS-V^ybds$SrKDQcigkv|VzD3-N zUIHz4bvd%L%R!|XRloT)@5Ncirk91r0BGJkyL;C<^w)_;LObS-RqvK>Q*NrCh=gvM zRPlAo|B@1Mv)usYo}Q9BQP=w`1-0MM8nrUqBPx{b?W$$Y z*{64AC7AM*{tWwOpCc3(n9$G4F1N4KtX=rt>bmeE^N5p@V}-XKD_`FEq@dkfyk$+5 zyZ3ON@wZ{*@HR&VeD_KPEF8B~k+S}m7s$aCwvFnKJ&&;@DjaL4Tr&1A-2kBGe^E`@jyQbxhVULZ!_%fqJ>o^CwAenjCZcT8aNIV7-48T3Yjw5%2c_{n>b}|&Q z=MC@Vae8B|W@7_*f0!n>u>WH3x4kgtyx_fs+BoMy0XjgO2;I8N3qo#TSLSAZ?Ag)< zZV$`2I>oRTDX7nn5W`#;fs5T0&1x}K88DxM>+en7aAz6D8?^^(1G{B( zvQZD6*x*y5@FUq4Me+!au3|)A!ONPa7J0%+$Nu5&_jkt(U4!_BXaYObef8 z-?e;t+@@4>VyH|(u}`P?Wu2Qt@28_H_T9tiT7-h&-o(KmmZS&#R8UW0T}JofyYwMn ztqEVJ_8lX+ z$GyUTnOR8m)q!n2(dxRg;!<|+&+Cga-q%=0>CP%SEaTf6xqQ-gjDG>x)vYxG22sfs)y@CJ%m#S7^_M(+0RCMez_9vRDGq! zp5j#JB9%b)ZQwMs!~0%WO(`=}C(DIwq?Sczqj|!OPzId{9Ve`2?ViRQ9q-U*z<}Vf zVi5%?dE?^5&trP1R3MN`jIKcw7~9>1LukBn8*{6&W-dcfwyP6hntiFyxiTM6x3AyjMGO=ATo`LEtWoa)zg>hnm^Rnt8Di$k?#u4Gbp zo_VE5?#Q707vG5>ZCbjGswzQ6Uu&~w2_%`-sicL-;LCY?nJ}J+7njHrzzB{Ze&US- zHR;fqb$ZrXD|5=Ki7CEf0;DX%=hsCr4&$3?dSfAQABL$YPa?Msm_w5b3Ddl(dUv#@ zCCe}wV;7g_K9zKp*_nfPzh0<1tZV6MW?y-O^dd^_WqqK%QNhB5U%IPWXQk@2Be}!i zNS$mL7!Dt`vr%xJo-j}*966We&E~o<6joOSDu{l(NX~<%1 z?WH~S01Jz*1?6|=m_%ZqS38p}#O3I@lFeDW7DQ}U2MP@5&MMf>|c)fxPla3yV3Gmj)WENlmq(O zd4UXTzOJ3`_v1?_HS2vm1yMbX0T5}%{!pPDfgs4eofV;Qk>603ttTA z2FuK9t9@yM?eV5~3;19dgb}*_iJ&>aeCKJc@@{ZUsh`6Eh*!A9(YHg|?<~=tgrCbT zbDhg=GNkFO&8GHG=%9C&3d632M)FUNXN2HI)5-Lzht%L}uJ64yN&@1$7hhE1K<{Ri zvU`C8rYz&rftnK#>p&cjqyE?<+riItpqnSM&dk!Nlyw=5CX0~BlfnEA=LGdJ2VBO= znTDCIua69L^E85oRISFNr1_Ftt$%GTDWP=D%G-3l80tf{VRt^|piMWYa+ z6etec(_e?f(w#bATL^kgPgTc9MqFxY3BR21%qG+4K3*WRR3o8grz6(R4`WG_2fnBh z@ny7OIlhNIJ~R~wj?hUKf&vn%!SVWHj=p0=ti8g@_DjS&OQ%rzGVCfetLd-fLFTwh zoqodpjr+6%q%HfGTev0vBD9f#QqaIi?VdxAIkSpk2YFKHIr`Po!=s@s>p%5^l)eHk z^GC2XZB1p->MS|UVS-W;uV43O_ky;OQx|F#djZD3QR|*7r#KMkhir*(XXU`#_QmNl z8IrlW#}J+xvi9BcjqUm7t?su(ac zpqegv1f9$dgC@$ZE5P1{Xhc~DdrawvCV8J8+k6eXIHp!!xM4S;ue$qCtu{pcMDF6( zujsn@jD<+c@0v#%+KZ$2hW7yS0U@12Z$}}^61!B{yM{EpSeO5GyX=H5V|#8>GGy6piA-QU7&4}l$Kemw~ovi3&W7q%~WB}}~1Lienq~_ZwS@#u8UNMu`*6CKE z2QdC1hX)B5LLEn%NnUOH-OV-l5jwPI29aFb`6xEJGRR=^&Tmb-$HpfO!@E>lJIdm= zDIv~sVsAXf&67?f2DK|lC&|fK=D0_x_-O00EK;?9Bhh{MEge;D`O~`T<(4%G3rXaW z+CRQbRvE`A2#pY|DAi8*bFWogaboSx^|BT2`pRSE(IkuR7DX3f`|xzt(ZoCY&nznx zjm%%mj{Bc>&Zdd>&Otu|6Iv2aKoIPxH{2YJ)@mZiZDoNE4C>uJMivDo9b75`xIRzr z)~&O^wCMa+6PE`s=s&3_Hbkgw3 z$Y8SxLG@3ajwunEeda!i6k10Vx3KPsZ$!vPiKd;b$C27%6=dm<$xa283VG5&7J9k8 z(Dl87s){SB@Qwebw?FP$fs?!B&LZDFvg>J^9A&V3>;}S|K40^uSEb?~mYSswgyq-X zaCd!wI!Ic+NFzyA6aS!2fEdcrh5(Xv;KX0hF@PTnoW@3JzR2&pj|qk4DTegm1mHhNYh*gUB`y$4|kwH zJI`lPu7m~i-ydk$Vrlo+*d9o!ORD2%u%?qg!#I*2?5o_Tao`*2!hoV;PMRkZN1c-e zB7Y|!Hv&ex9V%iuyaB*VM<|_`3={DgC0-YNeb@d;Z>izP=Gk>2=ohsXXD?@3hgnTv zf38rQh1PFF40I5$jJ_nd=(;lBw2Dg9|F)`ul#uaZx)yf>z=J0#EsxNs$yDgnrbno~ zOr8j|iDI~Dz3Fy*(U^~(_{#WF>yjjIc&}U8m$eBuN6)-dQ}b4ovf^_vV!pJaOpxr@ zIbVDnbvRLOzBT8J(Dq%v%l&@HQC-?d%gw-riIJ`Uo!*IX^X-wsTyax?etsZ`XY_+EFY>qGh(*AzcqSjb}n zU}!&&3xs3<(~o(xE-Qn01vv}j@vpF6u?x5*n_1H6JPPV3gc^@p==_RqDm%nEU+wJ` z#6)ZM7h2ztQif{Ay+mS28t4X0;FT1=?{ALm#JwQbQNe`z5} zMGbu3lJR+YZw0X@3CA#SmTqq?;)Gabd1|7NGw8g;d_D49rI@) zlAL%WL%a-b$vV&}0uYj?CZt^ThA?xNUe!G}Bgf-^0Un#qEr?pvC12*4wzERXsKl8p ziYrFH=1Zw|E3=N9TM;;47s^O_!LlId-Z_&;pA$Eme|?e`8jq0q;{GwdGRUk~tJd3e zjRq$Q=Gf}dJ5NwX@r7I$;iP_}TQK*!Btlk|kVDr+hntCN2&c!6y)J_rLSx4)R^Zm@ z75Dn87ak8NX5G;@#GXhUm+1&4keSGn~0`!j~FkmEw~|Be$rAA4K)8TU@HE<9pq z$n&T+BokmyS%_h)s_(p3q6g%R4?J6XS16t-nN58JcIHzs(2JW%<_Q2jYdv%okAHd> z^whe5M@Z234ixJ<=41tWmV%Sws8^c>GC$rB?eUgnnbr+wg!}P*dDp{ zBn6`^n^tCTQftYP6&F@pB3}xzp=F>X+BvKH&i31I#lwyybOwoo${Ta*wD=7SuboHj z&FFR}Ux_ddtKXL#7GoZoIpE`5Y9bS-f7NwHCP|dfV?~>^FCf+E(PO0-MAWE291c1l1M3fdA5p6Gw8aZnj#v zY9fv;18wV*&FxsA<01mpbAxTglKPb0lYGo14!!ODwm9ch z<}TrP-qI)tePQY7`_s0whVu2|>5JJ{e=qTBnyd3~VNU4w4_Ac9H`F}W2GX`wJ#6W(3D>4U$^o1ml-}OK{tq{YiRPTxO$)HvVY^-)bD!dJDer0)=fO( zLKi@X7@WjOL6DV^m$ZdC1mU|7y5nUC zo%n9V&+SYLU|qIT|EkUtU4H)2)&NrU^Nc+;?E7=OdtJV_mS4n0WZO|_G#R5`rbtwA z_#G1cxNMNov&$5}@F!KJG*#bfs;G&p3~jt2gZ2@5vOIf9deGddz+vcC`e;2&!a=;x zJ-wJ$Z6R#)isZ!3(c$!$$_XaFS#&eCZ0>pqh1M^$ubRse*h!(Ap{wh6!762er=elH ztPuy|#^xjtlDv^QrZp8(lv*7i1eTsirmgj+#hu`2jK`^vXmu`^SQ&)G(A8E|+XKN^ zd3|as;#BinJ)$I32s`ivCWw(?QY|MpkLlVg7$^F&)=T zj`d)UJi^g}9Oi8gf3g39JKQoS0yWz)MH4si7cgdJwzYv(0~4I;iE7;VGiHG`=GY}Kff#Qemu*)JJ1Xpp%Nwix zIT#x$sg$01tr$e0?OyNaJ{jy_!Bp-UkkQ+8?!`oLi?Q?hovKVJV6W{#?|qYX%!@aH zI@)pZ#ylaND8fN8dnwJ(-OEhd?jlv;Tbjbqti{My-l%5%=8*Xe)m{?i_p-3(tI_G( zZP}W0;RdS05C7!A%gw~jwR(#3`!V0mIM$3+JMgFA6j=2-(wIxfl`RtkY1(K{$3Umc z#SQNYC9=It#+Nix|Ge%}Q4-$j-l95MkwX2p(;k#_eZ0z6+)`eV^dFR8UphODc7FRi zZFt{zzz~V2V$SH#L8o21=MJAbUd6Ost7bMk-@W&HhdWp?9D|TlfBeIQZ|Sb<=!Z8w zU)1NRm$djm>2}Z#BJ7&+_`7y5Kn52tJ%;?)V=VJl+2*TV{&eC4)czZ^6c@g+gwyf3^K%S@A5TIHy2jSb**R zg_{ica8%Ao>^B`Oxp!A}S8i9F=Y?!Wbn()>UD#(_ws7^aF5L22Zotf4j0KBZ)w=YF z-ARQ?w>32tZM?li!A80 ztFN=(u`#7z*sXV}X8wy%@PDH__@co$h|ErwPe?ZIr-|f=;*z* zveiwg=d1NOk-nR&Ew2I=ksLX%U1f0ZHs*1}`MR=VxEVVGS2buQq=U2=) zV(jlhQmoIVKTp=yx5QelDacQ3)t&vbGv&1Q@eZkd{eGvEGUNufrzb&d8$g?92#nxe z_LANeYv_nMbi%|Ds&q`G(omn$ivhljZeLC7egO%FK`TCmANvGwGz+OjiceupwIjRz4p2HYw28vT1}|k{{?i3V<|CoA zJ%4&(yX4?EjC&&IvgUr$G`}k~)oOY+3lp?Iyi|AVvM6;9(vjTk+f|QyZxU_P3UQfV zH$|q2P;|uJ-jGL?=^O6?!`La@H+4W-`Fk7-{g*g4$4f))<)##3Z*U`)iK`$^5#89ObD|v4 zx?S%$+u${1^7^fB8=DJ@BRoK%co@?IV~Jtgu%L3LsdA(KE{*2)V-G;1)~NNde(z#K zS72Y~I@sO=v$0tL)UUYlW3k`4CjZ-AyZ@Sa^*_}_{MUbk0gJN#o=N#fBlDj$xF z+O9_?W2Jb4gx#PIJsF4FpRhwF)!wJ`3&!=Me(LCRWIbZ<-hKKf@{W$qp|gKOPF41Q z6+Sl%1oh76PO@hNUj;fYTz#;3s!--=ra;Hd+C$62d))rhVWXuvN~c5?SsQI>D~^tZ zyO_U*<}(XdshW`B~KVRlOYZ``F;cHECW4Y%w1 z*6&IM*-YHPUR(1{+g_^(vh_yqe@z9u^)8AQakq2y{mOQ98=_@d696as*$ap)Ky9!R_0h-7dqo4Ue4Pui}-n@8XFEgEbG9T> zpA1@eEil4kC+x$nJys07gy^}J z&94M(s*ScdqREVqEva!*zP=b^f+KkrE4G(+8OMyyiuY&sawKhgX5RlM?LgOjBYIy! z=fSylDLdECCO8tq1VO9x&<^$ z8@%?ldy?q5B%bWliDJXE34L)*0r0O>y}g_NDlhks-wmAT?qR~XHNaVgI{n8wjhno7 zfHgo%?!vF9&TIm4zraA2RF5n&u^tmmH#$9brO1p~Yf?A#YQ_C&M6ijYt-tT-Vnq4% z3SGURMHrW$)Ab2LvD2rtzy=+tj@ys{mBV$C4|bZ#;w}XHhUdGBKGv8BKYC?#`In1R z=?&924UNf)X37JeC-bj4xt!rUppqYQQ>T#PTM?vp?2?8@7U6(>_)K&ZwPZ1cVG25!x0`=Q z8@i0$d^A6?Id?SZrS;;Y6Gr3nt?&Bt7ocK;>n|FlQ>9bAM;&ud=vR~YjmL5>I6AVT zP7CQfSyrfI*Yuafn1c?!zPzf2+l};Yx(-zBr{q+oXao8WN|vs~0QYRwx6hbii!1q>J~Ee^4;N?d(hYlUK^Zk~c#m_86-r{4YCukS zD+U*a!lrZ{?o6EwMoD83V{lc27qw_=w+`j5zY@zLS&wV=i(^Miz_`TCE1I1K`Z}xK zoer7hD8T}nr?ci>4^emN4!Pp0L$SBwD{FitE{@`5Z;@3j%PZW>irx*GsfeMAReDCk zOC=rZEG53vWTRAl-aDVjP`2)!nIc(+Wu8bx1ht$tQfWxbc6?;CyPwMoNwUtDhuew15(&1q1isiIF-Uq+bz#XiQ z9J&zL2mAAhQlT!_eEv|>dU~50VUo3E>Q|8Un571#bSq4!TvekX4ubS9x%RVqbPTA?ZZ!5%kPxlH5L932%KBJ0SiTPw$?_cKIOZK@4v*Orgro?GLbozF^*JI1s~?J!qUbNbqPyev>QW$btoZJqqn#|SCB?Y^`+ zTXFXcDeqDd*TmGw(*obhYz0$xDm<+(*k{*R>|1+(+(K&st#&DhwoGG51o~=i;2AEx zK3WqEkF6#G{8F|#J9wGB^rvIYFbf#NP% z#k69~27(ir47d2>{am2QWL|7gL_qbIsXWQH!>|vRzIab18wEj{P=29ws)Sbi!}(j- zP|FHKOM>APMsy+S#Q4>9>t87SoRQ1nH&ryeZI96d>B^9{iA1^clOD#Gi(@#wm}e8L zi;m@bMJ=DFDkm%*7II8Rg2J7Ce#?#FgoDb-f_)8&!gD;iWe8Ph-+V90RKO}Y)}aT8 zS;6g|qOC|H-gCxjHe)Bp_0x*3uj|L!gtKE+Et+=Ct7|+T-Kd)WVACg#j%qZ*oUM?Z z@Z+Be?P z4i(nJM<*;Nt}$$~kGW@mF?TvG-Ct)JITlfTQuARPm|wY$oX-K`_r%N=ViM{g@}}*5 z1!WfR^85m);4X>tD@f1!Ke} ziipCAWWOV5cYDH_iqqyL6)E9~hR#oO!LXiNc-2MAii;}t8Aj)x*4%n#bD2)2he7&7 zsdYAq`lqGbYqP^x*;TS(4rIgCpO+o8l}DGVcc=Tcs)m%$D}a)aPpns-$UIM&-SW>k zWyj3>^*6QD`adqjorWY`pYFZCm!5OPb$0#K!}_N>4~RH)yt3Jue)-c%ZM_J2O8fJo zbZ9-f%;0F#V#L-JBzVbBmaEaYuN35#_;O!LPaP8AitL+3MJC_@;%$3h+ z&m2zoJNn1ttgl`e3MMS0>l$Q(>{0=4LrEVc{VkgdphiM{{)5te|FO zaGZ2R^A@%Cu4o+>#a-Gi9hklgYh3+qI<^+HW!v)aN{asinCw4ATmDx+gy@fv|3)YO zmlx>A9z#*cluEn0gIvSzN^yb>Ua7|&j~n8tLq5mec?zZx5@6y{qDS9%d+gDj>0&+a z%R2qQvq$9U={;CqEc0)~w8I#XwLLJxb|K&%q2cUDfAFX?Qr#)2ICdsu-WgxZIaQOBd|MM_=kM+zsawe-$7`qcMB@a>&8Kf8YN{PT9=(jV|5QJ1e}(;pVnNiaBev%r4qTMFRieM6u;kvw(E5r z9DP9X&%d;h6mFa;Gm@n2tEFcYO15l&DJtx+jZSnC70CV|P`cn98lZXm(#{+snImTi z0^2wQ4)Fd;===OEGQohS1vJVab$BUhY0-Y%K zt(HjP_?iQc^$H}g>#$9{0N4NhbzSen+QOPw!18HLN z8$T7lh`~AjbLC}uVOy8Zh|vP=5|!w>m>wLPXVU7{YV1U+G0sIfXJ3zXmk4jKr7a37 zEMMLsJdLB^nxjd;m^_(t3}43^z%{*G{>ab8#ht+X#PV98lBT!nu;jym2-zv#-t(+m zq5K*Ix-iORa_bdPY_HB>oRXgLa0;MDO z(UhEjUvMc}(Qo+PoBWO=Vi0*!AhPpyp1*ux!6!#;@eI#jnPU{E5^P{T{Y7UoS?#on zTUA0q{gE7b1I^TMbj`cZc2(v{vc=198LI>-3qi{ald$e9;jQfvdkY-jj6N4hGSET? zIfmtjNJ@z=g8tP7)U-o4kfsJRfF$-8a3E8_>itd6`Yt`IOEbPx_bptN89ZP0Fls1h zwyV(qUkByx(%ML~bE_FPeGANhuJxbLcP6*V3W80$3dszDG&B#TQ7$k4wp^+-hB@DM zaAC9`j_e9Q6DTBU_z;~3s8xN#Ps2CE(4YOMT6;JO_}r)Zw$8u|!) zj!$@au0ZM8eL0M@6|%lNY?&;g$svD30q zYvWWJ5JL`&C{L{_=(5pc8iimRag*Q?Z&h)`&azFpIHaGc1yA7E?h%a6TR?H5X2P0& z?8!h#2AdE$$}$At&O3J$?dJjA{~e9A9N%0Q`Mi^p^k^Oc+HnV1$tiFo{lmUqgh=b# zt=XbrVH=W*EM*i5+9naTGboH(#Y-o9d>UJ;!PrE`B@vbQ>Vd5gFHiLlZ&bm2O^uVb zWAIzb!e)bZ*GQe&{^~`E@F9n~#0dP_0$Ru*Ixir`>(cX5&0|)|g#3BDa(B^Ay@Nn| z?TTPVg*(=PGQWH^OS@-+-BOY7NM3lShLAe)WgLH>vNOjyisT7=XVr}HgdcKo*r1Yq z?#myqOU~R^bhawK7|PUd=+BT|%tzLm2|FCHtl+-`jK%nP!he7bc?TWM zJ_xzd*Jt<51HCVP)-F2-a-d~B1d`iX4}o-JOC^NzV@Z*6-Fc6gv$XmW+EA#)lP;G| zq0uO-U{k-ITo+qLh6iP(b~y~4Y%_wT9b--<$5mzA;)uBA(;*a}`O$z!eg1Bhfr&jX zWWcf;d|6=`Q;HN$tF}oG^dHbh7aDuRL?3jWdv0g8r}>Wb;4Ly8u)U2790!$rjFL2T zNsJ?u1+@FPaRXHiZwMC#t$T6HBXuZP0$aLfyfKmx&n7xm9I}*Hp(B6BUIC(erJ!fL zzZc-EjhF*qA>FgFGmA2(~lFUC*Ycd z87Qs$57rtD=i2aAE$U0z{VfN9m`+KC_C$LyrQ?+z*-Eb9s=T3v66KnxXmWRL4{m3h z_B=3`;cq33ywd2@6hc+!jpxWCP)WJr@?G<{Rwef>x199;nWnWFff8yY{nPz z1KkZXZBg)-3+?) zc;7d;L};Mx<5D-WW|Fr^=9yFyUHQsM2NeQ2f5)*AF(*?9H#oM7see#?Z|AEXp}@c0 zTZRU&7s-@;4JQ~3Ycmu1`{>QX3VDZXhyU2At+IE$)cG7mig6YP1HJP)9Ba?pH&;Kj097poFN*Y21YDWFFxbgzhsuw zEE}Q(I94>^z<9Hk6OD(qZJYAUM=%z#p@tceR0-Rs%gXg$fj0=B3mHTB#deG{pTU5t zOXi8v8@U!z@!eyICi)HSwKvoxJkKkQRt)9YD!aB7o7bb{bqe2@809|eb1ciW;})&5 zjO2S#OZajZZTV7j+|wkIdf#^2O2ywI7!Rz3QRrIZO*<`(REZ$zmjwP*?VH8arycsc zE*M)b7!H{qre+ckBXm}Fdeaf7z-)eUN}NzIPk-v`+~r9ip3WR&U=#Il{3~~QD&S*t zK`;9ujZl$5_m9mr)*5y7!Lt7B=4#uhm}k~eh*HB_zPCC>hs)08%3{pO3ewkHYmTDY zH-t1(Vy>CxGt{a_6O9+&trm$DkC&MXBuQ5Ksp$*al^;+D<@Z-{qe?7zkG8rSpYfm@ z2nyO08y8(Kg(ekbV$B3CjqXGO>@e5NgxB}xm$3}jD`C>tCrj@1qfRd z0|Bf?U3vhI5Sr?=WBU;K4L*QfO=eF0*yA-Ph!gI@ep3fyr>pghP;Rhp{I*7?{LcKp z>m(tm>pan9sBzy1uz^i^2i@K;E`FS2Y*Vrh&xGzacv~?Cx)p?9yffy0%2haxBsKr$oCE8xFV~-qJYMLF*}hoTOeOq4F)X0lO?_Y z9qqdheKZ9P_|3T)+fAJ5)0!PsQZsq<{ z%;E1vAx{_V>n+rM2WSk4c^AbYT-~@ez`|hdxME_fn=`z#ga*q{I5cr(T=S!M%FU1J zQ0%;y{#celtmSyt)~{N8<=8HO$tutxuy^+NLm5n)Z>xybU6Z&`gl0o?(~(evj|ZVD zFa%{fQeSX!5Ea5Io;&i#A*q2A1A`)?7gc`d+nV_In`-Z0aB0`#+_y8}MH{)g0FV8# zr;?BymAB`|o_IvS!3Tp*x_4Bgd9b`ykcGH+Q z9gA~t;a2rtGw;3_z$7}vY9Q4f?7?;YQ_UkCW>vpL1qRj);i2+wlWWbJ81+m~ z3)*vsq?*q@==d{(sti^^Y{*?aO3uTe%ij$VeXr|fyQeu}vVkxFaP*~R3@UTjF_gGhr6Lb2e3*6j??5XK6eMRDqw2=Cx zDkOPqP~r~24VzX*-^^<}Hnh^x5=BQ)kjX4iG}|@si0cCR$LeurJe_bqPCIU7u4O8N zckKF3-u+G)be09{Yu&u&Pw109ALXGmY#q7|t{+}&M&r7W3;(isqM-z!yH3GgSaNvq z%SwEi+k5xxvKbU&uS+)_ibv2mcx956R<5W?1|}8`S8>6sIW_>9X{YQfUwp>#zPjno ze)(6z!@BL$ziXjmYV8vmzemZVS|1Ev51aHT4RGazvK^~_V>Gm@Y?YroDk&KItAS!HD>m zFo#`|-c-NsrhDpMIq8joGJ1(Zu z-Nq<2?G={N92mdfMyXvr%S)SnCCSIk^QfX71qqaPe zb*4)%GTty4&yiv-ku+0Ld^`zs@t3I`Cu(|!HglI9!Q!|lrzFt!B(M}98kj;{V0-pHy zSC|Hv*Q=`aYoR&jvTf_k{8}%Jx|7V?u;dE*`4F8!j>dr2ma?zH`J#?Ci3}3mSy4E> zXR;5yZA7sWRvM`|U~$03AjZB#_T75}gBZ+)+8LcnDLL~JP0$j?sB+VZz68j4mYwq! zIbTCO*r@;|vu(KbpICWW2pOo2el1>oyL(+Q2zRXY^}Zf&38=C9_Z)i5=q6SEy24q= zicS>!Kn4|o&~jn@3%co*FW&MEuP1KA1&}-ua=WmU_5dxi&+%ga7rD_6>STsLT&Lok z8(R_HuTx`GY<{K`WxWS((cq3&9#b${B_MqjkjBTeks9zLB#_Cb*|icB1$mp zkT;ZRE5I>oYTGr(*P%BYpJ!bNwlWxPxfq2KUp6sFd!|N`LkXuQc_ff(&95~|BkO%? z`W>CU?p3FxQhZrWjteRh2)vLB`?l;;iA!-hkIN4T>BDa-@@Kj|D9a#C^wPB>8(xHj z<@dK^ye7b0F&?jvLWz9&D9?)S5j})Egr2@x-!i0Nha_k5xkA{U z#`Ek$w*y}e7kjGJer`uoAN2UUMGIe@PCC(g>81k(A>-5NDmzkbW0F{ND?FXBD%VU;G=TWJQaDmP@k}kR>F-T7mbrz zw`yyRge1C7ucqHVdgGW(=?W(K?JspM1v(0*1SL?#e^}gS{@dcdhz4V)hJ#ruuYk#q z+xsQXz`yVg|K}Nj;0_?|c=!fsESQF-xqyV&hGYJE_fU=gZx?|9$1x{<=-3tc_S%_s zqU65+WBcfLNGZyEVtUMS;uF6SYQyimxBGwMIq*SteLr*r&G6%-qJG_;S*Oga5kT=k zF^%aYCbrI=PuPvMAcK+)j-^xFXAV)fBom22SlGv0a;>_rSLd0-`;LFP)Ca~lCSu%6 z+gu5EuEZQS1k9EA$`vL8QMUW`DDo|U`s4u^k`UP{f~+*axf9g|D57oxyh{8xBpz4< zU5R7}%V-_8E4{A~ypKLeX2^`qaA4epPpdntEDOpaT@1j+)q#SAII;Z=z;q{odI%K+ z4B&=YMjI$cF3=#oT68|>;}|g(Z*y??Dlq(SouK?nQ<=Ycknqpf=6~kT1;3B}o%-~T z}aU*9a9odMHq zhNl;~d`E+6&BKf~rXHwg0~XI!YNKOFF23~D?(H8nl8h}YEih&=06mk2YYSRhnWVnT zv>F?=HtM!;8vUb1yy2K->d+5D1N!;$6t=+UFfqa2mzUCdtn3e71_GYjL#WMMj~fE_ z=riiB9uVO~oMN2E=tGAfiqeG3GNFx)E<|oOSpCJT>&rH(Q>vZ*_d~pL{Bk@M zGzRY=3Xhq@SGzsLC-jUKoi)=4PO63onT?l2fjBlG4Mbzm=A~aw*+jmNri6{wk<5Yh zIG9)-<<4}Qh$CO*_Lt-_rD@g3YbwLOD@NES)8VBoP}H*}`AR zYEy5CGs5J%Y+0sj429#_VcnafwONm(89*9RE^UitqmWVs>A$r z9$@Cm3!b~Xcv{YIK05iN_mX!$sw=;#zN*kUKCrVMa;9Yew%l!!bisIOIp3Wko7*I< zK5v5!E8Mk>*06#yz6kOsa>J&KpwLqaZLi4KJUFF1t#5DwXFo?#{N>KGV5MFWWt*bo zeLN3HP=*U)^yRXRV>yo}Au#uTP$pBsb~8AV737NQyjE)>P=jvPEh1+xvA}6y8amd_ zUNFhM%h~g&E{OIUCOw}VkrAsB6ZtZ(74~CCSp$p^R|C5c1@Fx6`4zlzsXz45tzXt4 z$@`H2=fXqm_ttv{!_5Z;spPcuxx@Po3w%2(2a}rKMq2tcvCUj?~vYqYe^ zrff*gh$ZN||8g1a*Rb3hgTSP;Hj>DWf&j-N{x{S=IItHJY4%d{V%=zh#oIZLP{3td z`*TyU7-c(NXAnmI+RcjN@R6 zl9vrFU{lI%m%7Fsszy@VHG%*n+H|NUC%_w;A)hM~YZlnh)r+4{cBr6KcaszTyBwsA4JN94fW)i! zyICaga-?u)I6NxY&{Cj>(0$2RtMigEw0rw(@*nRBt4?JW!sfLgA*-Tjk+tzM&xF9R zYCttsf2Z0Cr-b3lSvQ3yScThYrG{m}o1#Xmq3KqUmE%P~#13Cm)o39Wcp?jPN}z?5 z@e+MYCDX7#yDV$%QUBd{bV05F47eH8W3MrDtJv3Hah|iSm*};DUkz7qOQ0mQSjTyU z5xJ(NrN#Ds%*kAGoSxh8qo&6ddDoh0U*osgdM^Sd->4dcE8w~p-QBzEa>^d1B&<{5 zJ*?k*CUKs2=>?x6=gm^8p2vRNuO6?C$Tx}))9$4x`}|sEqkVDza;~xsLRDRgS3>z> zf}%C~^GL>N9S_4W$3W9ZzMiqnGz&tHW3?OV#o}?)(i1ezCiFM3l*tM12MCT!I=ObuFgcd-y1+#Rf9P3t4gUqifIZ;hLJc#e){|FobyMR?tYqDsxj^m%I1 zrjm{C*RDrzO=4>hdAoLym{*q*3RmoKXy*(iEL~lg=eMEL7>nttw@WgXs-l7dNsLgz zJg+8(w%olUuNu9L;%Vl6eSyGSZ7IZ-)6lL>e3 zrc^l<%W789zE$Yv>-xBhjtAN|c1ZDWUKV340%tuE*}F)y%S@V$BB21<*-~aAR|ReY zdezzv2!>P-Fp2EACHJ|Nh8R}nkL;gm?O&b%_qIS!mC5r8=_tt9e)68}>As3-cSI&+ z0;hKFK`h_&*~_=hjD5=?5>_X3G#z~3InO`}_5!zoRoS5bPTAvf&s;!UN`oNGc{->UB-AD%Fnb|6$&ncs7w5l} zDLSkCM;}Y8w?HttsZ25DUPDmwPX!wOho5>kyth7a?}svy4=wi`j5odFQ@#ETGaXi& zjbv0{qH@kCE~$GgEr=W@lqky&O$cWe1GLRgIph{)*N7 zetkb0kcs`F-27$+;F#%s>Aj!KnN^zzvgFgVj?qFnvJxZq_9cdnUN$d(;yg$BZO1P* zD0FHZ<_@77iSbk9#ZG*plVswvrOaa$L<;&$`so43$$hX;ppZ1Gw{`Wgu|1YBM5qaL z9yY1REq+v)7teBR$q>()y*me58jx=}$#SFyvUt+5}=HFZ{w<&ZZ z{Q}I|`R+oi?d%!3RSr0%Ay1nl>Yd9u)i*n#5%8&A>aJvcWRO#MuOFiSTcR%ha&)eO zh_hHS4ys+II8q{^tgSR!VpF8gO21Q7@^7HeGla_!+mWXEt1c6GwWciV~Kzu3kc{uujMpwEP5l(#Pm(7{fiW-k*b<1j7VD9~)opW3rc zfYk_+O=lG z-t=ntZeOHV72K_;#<}%gnNOs!wzF>Kvjh@MbfoPcThbZH~q5*&rT_1sIV>MTlr)%+>0+H4Z{+C=_-nC?C8-f6R~b+(gUMt+U}c+nM|- z5@?RkPvYHlOpqlML4IOp?B&S&Mn5Hz-WE$W*j{W^K7{0=I%`2K4xnZW_UU0V`T_eY zE*(Ny+(g|)H|A#qe^FSh93%@0y7ol@Sq;ZT{hz{HU;@6Q>~kWULtB+W9Y^S@zv1)j zeg>Bn{6$^lU%1r&^jLjAq_xO2e#2OQNEO_AK+9(QMDuG_XUnpb041z=ETp9otvkjJ zVZQdMSig7Ru_J4&XOB=Jr?(_j)V(?(SKrQjKq*oWpcY6t{RXawG4)6c5V$UieVWJ! zXGYb+d9E`{=qjD8Bpk^T01xJ1>@D~3c&Xa?^s8M{ z(6ZqCz~7LQAO7+P|L+FgfAePcA1^;gzEAue^88206Hr{fMbR8=r?W{wg$p3#(^nZO zob3JNP47vd?UwwqL9QhrMZ5uboJBq$jhQQscu;%q+171btWP=IX_ih1sA#7RuC$a( zI0=~Ec?((r#8=0rkn_>2Y+(qOuK@icS5LQO36=zE3c)LEHquE~=={0`9fz6q<;+s5 z4HHfQ3%R}bPU&ZLE(#HwjCpYLut5P#QkxMKFBX`8jq@Pj2**F*zj)EVg)*i&H73$X z608B;2c9KTUiKS>#*~E=J0hr)ZQ)dbtERM!j2&!( zj+)=3W&hi()Od$wP_2n)=MZ>m`otamBfs$k z{?!$XiQ!7mu+9Q|FF>+N0Js)>D6V=7TQw6HbI@{8U2p&N^q1r3o<=cm5XnDrflK*l zl`BO2CIP1SaB?~6oX6h}siSAmzkEMrhF4(;s~ikZnN)K}wzz-p`?5^#*zE3349uTx z8(PVk80#4q3q(PWXllY2#1N|NETP28L(f zW{in#1k0`6e(OJ5asT6I^1pa>|GNh}@_q8}y}|#>xfk4(1Zv$j04I9(6$O~&hQ8n~ zf=_7kG%}4??h9)9-H)5zgo|W80>guOtl^ZKgJ=$@U z*=X#WhUe)43rt_;mLq&xTC)3EH8NFIe{q+-H2?9)*NXqFc=-=M>Jj!Q@Dd%&RMtib z117#;E^jU^wVOX|2_IjOkxSQNWE02KqvkeHV*3K}7BmeKwxT62dp6d9*4(I<+;%0v z_f^yLo?_K8RlM4k&hX)jHw8am7WECg!PKp1`DDnC{S zi?&3Sx?7g9otSpG-aItz$(n5_rY8pe0x{sQ`xZP^_SVFh7TTk6gn5t=+mj^}RI(z} z1rWg=AB~*j$3|=bQt!{FKOREW&<_9XcnJ79afs*8p^Jx({C0?4|1A;LU9u$C;(YrN+TFKzrMBagB`Tj!MqU|G(FVf9fdzC;lVw_YZ&XQ2x0? z`Lh>BNuz0z`D`Ho4-X2B-vMT?KNI~?{5q=u;AX_Mx{lO9_*`w&z{Ywr1xiwjOjqsW zCx2AKA~wipps*lGo(*2YN&E)ZATEvJnQAyp7bhH0-o$$#$34|q zfDuX%eh6U7ie>DxJ^WqSz#lf(5ECpVj1|NB_aS+}9XTytUzP_f6cfEht233#~d zw|NmkWS)e`Ih;{Xhl1@xSOd5-^-Z2T_z>HW^=p1USA+i0mD6H7N(lBG>ffbU^n%Vj zj0r7)N8cYKHsNEnmT-c90K{-R6jL6|7M>#Ov4y1=U}i8&g{|yDFlZ77b)$sYu?)CB zmTk`@)XoB6VwXR4(B`LKvyN~3Hxfo;4Jxd|BDf$LGLyV07+1T?Qe3!o2>IJzK{)=z zbpJjN`O65iJ=$%fEX80$n%W=O}WfM}lA?<7t{70Tdj5yP0(xcedgFb=*!JMRI3IF3(s; z!~bk>tVWZ40a{II{EY)Fv#XnzkSEin$Q8K&!9g^c`3Nx3 zV_Qa%w}E!rRPaF?T6VM5yqZDW5@bS_ca1xFo-h;{-#WtYdmg)A=+N&ZF;>RcF+WK( ze)&2szk%>Nir6%{4T*gH4VBD}OI=FJs!@j}_Mrk*-wkhw1dcLbc8HUnw*Ex$l-ytx z<~-rcFc$o4XvWlH(=f2OEK(E8$!~0rFEGDMb&Clj?)BL}cOi@s>tj9e%s@@ivdeiz zYJIb%BhY=)OwT9FJB)ST>v{KMF2%++8h2#qMo_IB{U1Mk^y{+7rluoptztCv85A1j zrWbrW&i}Z7E+RYW()%y%e2BnTpPh3)LklrB==z>1Qmw1Cl1adbebmgm>Ng4+FIC1& ze*c$SBv=LLB}+jxJKIy-ZkY>A|B?VAe^Y3(5cB&XQ@RXJXA(<^nLoNW69M7q4(aO=pP&{T5}!uJZ(71_=;TX ztYz(IH}iVAQY(Lm^Y|VsX0pX{R?*Ej0i&skJ7NCwoA=k<1Vg!@;FDqEY%^v=vbx9opMs6CJ( zj7I!(>2w(Q$gFTwrz=~7b%6~Hjpqt6eHD7ZQfr20dF9lY>g_WSvVdThSoC~u297*0 zda>i|RH(59*7LJo-A4422Teub8;#=;*Kzv-PTW<;h7ZUfeTt%Q?xbk$(d3#J7{~sQ zQ{{s-=X`k$hU~sBlEiy&HWrraK zOljkUozUF{iG(sH>Mf1AeKY??%fud_X%;Daf2F*k>q65`LAYw^$;}r1u9YnOS?wT& zB!@yyjqHz{#;US`0cbE`vwSd8%2T&}2H3aH{`%3MQ^Lu%m9}N+trJ}iw)h-E0hq?cwCp z2vV;-(=A587S{UVbgyEB(UxlnrCqm3F>l1d5k3N=)y}hl4&Go`MW2{6x ztk$#B<}4L#UVg#hwtwsZ5WHQK5P~Y!sUKV0EOsQRw=FK#TB*j&EoVZ3$p09f<~rHY}XO^yPlYrxQRj(K;NqVRB!DEu+tUjFF1)9kOU@me}HdSh-g z17VWEf819q^?Ktq-x3qy>oVh~&nNzFEZx8{GW(Hxm}h^xn^MIlXW80v(Qm7Eb@I++ zu>Ivf?$#_2#BWwV^-`kvn-`Yr+vjY5>4i2GjMcX55Qc;B*S-qo17+X#EV>g_zuoBp zqkK(^V9iI{Rk|VkI~Rn^sE3GB+Ljxnq+v4)no;jBg0r8NXOyA~r6?ib z$IRl|tI7Ser)~~wK2III~~VH*qyTG$+vTeXZ{YB9}G>P=JisKebk#W{C1}fgyhAVjnpGq>vAR zDf&`}D7a;n#8F`Aa;lA;gMwIa_C?ytFfXFfr8Q-q2>;WquTg6Mvs}S7F|Zh0siJcQ z!MYBeLMn~^f=YI~6=kX+>CU`;~3H=djt^HV1>jTd>I8YJ+BtDPJ zF51ATOo|-!5sRZR$0w6A9oRcYZ1c-XYIAS}L`%oz!ZAQEFy7 zlOrFL(@&P2M{GC~p4-2o6+a`N5?luiGMa!&M#dclgtzm8X7zv|h>0@f98sEWaJzLP zee>Q|d-frwFLLrAH@~Y$mmJ%Z*(>?JZh8YGqNC!!Gi6U0ZIk|GbOzzQ?LXQ$}DQ~84crN{p=dILBMHUs0@2La@}nLm(|iwAzp5m*5fh~ciq zaMJ0h17tlrp_8~11votUEXV??oY-k@bM;>1!5;SxYH0~YWC1U}q3?cUi{O)~Y7N`B zVB8JRkO`O_E6bJniM^xs1wE}oS&TRN5qO5wkZ0KL%yeu=_R`{y2w0z6F+TDHEa1(v z3g&C78rXVFdn0%x73b_WfmhXzlb09Eq)!RAd-vzl`7i*iURoMYUs3h)wvI z>Cg1*VI@O0W(Vc80IIMrjH%|Iz*UaDMw{wFXTnZUnT|DxG6w=}GM|d$aciB3y-=)7 ze9e+UV-WeK<9i!%{#}NVTl6G|M?fI=LUY}?hT8JfnE8hn-}TKqREpVqubFN{=BWCh%fbIz07QQ zLQ$yiKz}xoR<#S{g3dQTUz|T!JcZ!K3UHnb>{0}t>c>=UT=RxJ9o@v1w+hu|5g8Z6aMex*%wJ2IX9$nO?-XP=ZO9*Oy8Sd1WJJ-+ zo{T_N)bo0E)W4^n%y+BK>re1dvyIrU9qL>8=s_Xah*6ZEo&KXFJzf8o)qSiVlw=SW zSg5SMrg~B~Kl^zeduL_2ov@tw5k`P*+0zCXSD3Eg9?;|TDo^*}DmMg*@vJWbL$X9h-7OnP{JHrTZ znCF+@#w%D0lFF&DGgN=43Oka%LQ)yrA+6-;idGp9(%Gp)f~?fbfLl3u7d^Mq{o2vT z0TR~nGQ6BhlXGwskI8u^KUc(}TjSJQI@8F$erm4U24-XEHi79;+649>C>WXgf zc`2a&z&}R5#jEDs(V;?}H`al1f{@Afv-lJsk$bF*QrRa6tp*(Wgxi(Iqn0s>19=V~ z(W*+bjs~mhu*ez67DBe?y@IgcnY*`#kDSF{?3S;~#X3jUxwL*r!zk+(!pFNDb7Wb` zLylxOyIgRPh<6y5YBcTw8a!-GwjAr08(@$7Up}IP_qCU z-P7Z5au5YO0>zl=od}W&1Tp)m>G3 zm7GZZcAu%r84Ylgs^T~BHl$x>`bV<|-CYvf0$x5gfm#lgoEWukmQpYjbPMrn-05rC z7gMbk4P%XA(8BOOhyF(H&=zHvRPF?bRMn*wpM`Ko?g;q#`X!EwUM;$Y~k&4a#V82?6G-i zAKPe&RSQ@cJctJ)rwcSvKFn;?B!6npKqx>Tf$+Mcc4-)!(kkC;{1V0ONJfY_Lq08>z z{n~5ONPiP6%|efYc)Pn**(=t&riPxvg&x#zf@csj@dNd_J6!3vV}q-vcgu|X3HR;a zTD4s$lY3NdrB$6?T0SUPqdXn$D0ebeZ}Q?;xhGP(0@WF6q5}l1dZNpaBd~^K+E!v& zFqzd$7*jWPT>^{DeExcVK>)~jN2XJYw4h1d%~O|1RSQg%wI1>;;f~r~Dqh%%b}LAu zQ%0?Kq|ix2BR;K6EP&-E9Qe*EjL|V+rM7FOJOH`!jxdBkprm%&MAgqUawW@ba;WAu z;!&w@DP)^lRgq*{sV%CfT2*$L?N_1vH`Wx#@|zqldgxZ?k_9J&ZNd75EjF^kB$BsI zl5ARGJp3C(zKzF7uEI#05k-KCN)kMMI@eT=4drk8wfTo4^h24%RDug&hwK5Bu{YH+b4KB45-u~+mGxBS#7;>HT6 zb!KRdahWX0?dah3pRwRom-n5NUE3{QuW?jdLM3f9yYSb#kus3K{1RTWD!=Wu8l2#l z-_9<18g}^xDP6WF@)FmzW=i9|jH~iaW%<5X!l$m*ofj`BT*%auq17=}P7VbrkBBxM z20*Fk3wTpOmS8{-;~4jyAvz%#seq8B2$CKfTZGZU_T!WfV(zqVLys?bxrc1oktHH` zxP`7Q2KM1jHpsP=gk5g4@7mrIFIFy!l|P@!2v3?+@~*436;gONY^oXFQfGCms`cIq zOrYp>Hu(B5+oxyMK;AzWlCW@EBI9I-5T)4Hsj6(tXF!Rimj|`K1GUYcyGlv_W`FA+ z{IoiazaI(}uxH4^(!zn}@aPw=dJp|P`0(YqIYbw=%aWW!yH0Psko`_Dp8ZS9<0_4m z3WFSY=?D1js~)_In;p8=&j*z90N?mHFXwbXn1WbifxXw8zLXeYW1J&>X=`z|nM)^) zah)o93|9+A>HCh-Puvz`WfI4BBaf&qEHb=h(x~>)APznI<`buv35e#8dV_&ky#~_{ zFU>yDSoh&P9YxxbXHniSim2b-o73Q7ewib+rWNak3Vvj3{(xvRDBoy&SG6{Rc(^%O z`MEWtNT#9KeI7GTVV14y=bCr$jlaIQ@9W_lG9F}~U^#RV5xI3aD~B3_z}g6eTZA(T zr+TWMX^23Qv`%ECE1#iHD~p7S6hQKd`Ontvm!w5_9vcMqzjIDFIoBSCM{6;#$bN7s zx@oE}7{u@gf)8b>8L!I+YF(N9X7M|VS5vl{7?b-rcgHdIW%8f|;Y|JboOO~T{TRN& z6c+S?!3PJeIPhuLY$YM@yXFp*F^7x?5>1c88TOtk0 zkzy>+lQP--;k0(72s5+uPkTVua#ulVkQC^2KxUORs9VfE^I`0H?fMrm7&B1S$Bq@n zV4y}!`nXT1zp_mmZ&}~a>f(6Xl_=8#@dRj^G8Cf?^eK?sg&N5P?pGfw^GNs8uv)E) zZ^s8@^Sm!U4MeosW#~}2^%k=%$*n~|g^28pytuKo%ksE?-x;vD3MDd?LQ+|$5RK%o z@oF6s7If$CVolnYY6{NX#>6o6$9LrPlhRgdvZSdR6q%oXcl|L%g#{LV`DD>@5}n4 z6a7oZEGS8P<7v%H%THOmW{<82%JVH6Gx3_(L1@OY%=(94=MtjZQlkc)gfZeB z9+64KSWqrllG)r8=yS19FTbrdr=Z$#G@Nf;M_ZAN}jLt`UE@dbS`R{z zqq$Zi2~iLp#vXuH6+&1-Y+D+dgl9rE**8*qOpXxfRT~x*d!{Z#Nr5hytgWP7F$N~X zm{|YoQ)Ve!8olMhR#RE-gf!C$|JIt-%lEAVwev*0MchI#dM%7w&DrU%;}YEWqE)gdQ%dS(`pY;5+D+ePzuLK|wSn3feUo^qIq zbiw28Ub?m!V1CR9%2bf*8M<`yEt#yHbE|5Kk#Q+RFDyZyr6ec1HLUx()!x$FqLY|re}u~ z@^UyMWy+S*Kfxvc6eG}5_5i}bghrE0p|Fp(p__;}8C&i>I{k)_frGd;QE zl|y%zEq5?OaX`i|CXd#BLi#v>*fj07Iq?goVjq;bPkYhqx}gczA!5m4I7OR48=#sl zHykY&fi_jzy9Hxh{%{tA>9Qtsk(tXg4zswSzJ2Y)} zNFXC2yo1nPGoA@+V~@k%}Y7NK!MgHlN9{51Faof#-C&` zbP%{+ucsYE#;tC0K;3E@-JZN-#_&>RGdM-e5I3 z<(fC=(l&&ygh$!SH7@W+e0xtdDYj3>Sga!#M3j{4v0L+U$osuFTPD^O6nC&j5OQH? z&`ac%J{d*tS6W7{4m*5^b;mmqny+G^@0XKut#o{y(x`?oyP6WiNZLNt*U1^PS!Lph zwBO72VB(hMg~=!D=_Ho54mhdRf|P!hQaP#pA;kVBEjTGrj(%sniT0p?9I|)4V=O?( zwc@n-snc(r2`4-xmZ%xl98VYDRA_TY=P2Xjkm1JWK7qTtSK}F$K>GD*Ay;E=snR#H z5z!jzHXVqtNXu-zmq&ohy$cv{RZ)lm<)i9hY!Q+L2iIW$6i7c3P!E-84J=Qf;fb^r z2s%Q6ApY1eTuj21POQ?HzqU;2vz)$-Plbh_m_%_!}QU9DBw{OOGXDAn^Q(i@n zyq8e{(vCWvazTEyr$`4H4eyLDnwz=1(-G9SrZKm{*c?H55qS_i9?v_4*w^VA-5}vW zL-TC1DBT@l(3?~O2TUIYlgAuo#OZGD3#>VS#i{l??Om_KPnn5Exox)fZDKMN;m<9U zN~|yqj_Xdu3)XDNOL5kh;Vf6#bf zB0e4~SH;|RA0?!RpoD7^$nJ5@&=wlE{A3CI#hTdcW{3KS+FpA2&tuxdfPIX4Qfa+p#dKdN#y4Gia|=8mTetq zRI23p_~(s3V#6NM?{w15?a#Iwv_^(ai+SE5IY8}#gz_ityUhzPuR2B0MS`y8=jppP ze?vit!KgWzF#t1^Aa7?#>OE97QD5^sPtQzb>-hTnQUlmJK4BGz{3}!Y0~$i`Sq@+w z0Sm^u8%JMmox3Vh;J)Ct4h~MWbBxzl=O$?l%KrAYPul8K&sZT=@vf8IBeJ7``AcQ< zBA0$O*WUio)YiPmkiBAks!o@Lyr2PTY0Yn({*-NUvd|~ZB|0QemM5);By7{B=3~w|QfaUAMRL z@R9si-pcl6Vout3_QNJrZaz}!*UsgOa16LIRSng8g!K3`u_^%Y4?s>3X||y=Z>S0x z+TfE+RJUPYAMq?)&k&X1J-#$@&G2LIFjcBeKhvn;XX zadp9}gRd>GWPQoN6)!T6h#L+*-}`Z@a)Ln^&D_FoM$&mYQGCRuV$=^nc*8Zi%yY8F zQ3PZOlSBwTg?Q+cTh>(9l%b(?=z5m>^MSd6XTQ{q?!7s#PPh!SZv}h|-$2^UcP3|1 zFUjlUm3O$31bA-|kgbuks^EL3{uTOp@XUqUPqUV~qu|J;&AuZ!h^Qwku6gps!bcvx z`#b@hzLtUGyZSwfJywy*4uxe_>5_K2f%_PnbaAU1GH)!32IKOr0{Hc;4^@jT$3X+5 zpp9*@9urJ7qeGdl(F@QzA~<55_W;1q-&TsrRS`@uQEH~8r?^=Lhs9xB*O78<+z+o( zNeeQer#GApgKNwCu0G!I2@B$XFd!Xe0LgOHGk zYG?G1Zr3M)ZtWBx#8w{+cXK%})ERtvziFV7nBV=Zt=?ox4H4>BH{GKVx^=c4(};26 zy=!>WZq?C&tHf3wvfKp~A+UWSyX(^#XR$eLBF35EK2v?b5z#m8pb&5;jJ*7)u1OX$ zJCQ>SyoDPB zEUx3WPqa&LV_jD=(^3!f!S<8kK`jD)KvwY%x~kd{`#AIr#$TT5F1&qphBc_DY`Oc! zRMVj7NkvY!oXLh$18Nd;+r>y;i$+&v4yp` z&@y@%w-s=xJt!pIG6-eo260J^8XV1Z6aTKzZ~D`C7S9lBZ_oCl;G8q#E7Od1h8cAO zCEAIi3MMZoH;esxTV8}KLH?mNck)%!IKgObmd1=C z9RNO}>aSWuXIGweNGW5)fc(Y46r0@RaHX9u%kl*n{-(l$mH93a1ApPKNL?x$6JH5f zMrrl=J*~8=5LPLHYOh=6xTkhgs&(|q7i&uyH?hi&fm4+gb>k}LfIScR+)Lo>e&mkM z<`vb%{tVcbZeqw(TAAfk#GJ}jv<00}j|mw0BU^1UsRnV--)BSnX!xb%@|Tp^;P74i(GcDK3h18&(fWw=NsQbfslG( z#$DR~Asv zQ+mr|uwrJz&^!~naWm$%?J07j0mt*sL9-G@(LQIK>|v&-;bWhthZV2A56!aw)V9`I zSMKIOgf5{@@6=u@tg}hmX``H3c3#&BtlFzT^J2^JsWCJG6r>y$R^%pJW>q7rWL1E- zN)H^_0cC(_jzF7w6HI7BH)>>b@VL$}k#oJs!y};`YJ+r}l>zG}T2#h48si05KUDA* z`;v;br}QY{-SR}lbnLasKD!ZuwC-hiY(<+*Y~N#4W7t@ibLT>iv=Zc0wGQi9uU78$ zQHIQJMPXTgRk>1UUs2#Nzdqf&!lH+uA9V4LHTx0^%}#RF3PdYk^`gtLms7FM2ZWTK zEq`*lTa%-|2vn^@>PZENi zZE0VVwEf1RefGh+($3wI9nZYNuqc0+dQqrx3m$&wcWvu!pZc)3WKHqcwZ7)hvob!8 z1)CO1S?*xRbIuL=2Q3Uug3Ty3T=a=wB{J?qA9mNAP`c|iVe7k9MbSJ{*YwMJjA5-% zWKidjRpIMj%2fhp=$V`RKFLNjg2#L`1;y;+BC|4)8GDN z$0C?8m7g09X0U)qB=0k^o+s4=gg8G(&OwR9T2etL*E^IanyAzY+KpoI59IOEQRB1C}F>zghGxn&0ZGIKMAcT&^5G{}xC zju+PDXmd0B#5cxq6b}==zNNeC_ubyp#cM9q6LRV^k5$^`!atCjCZ&Y7JX}9??`nk4 zu;l3B9k3C>B;@(YHW={SzFknGD3NhO>ZQ$NTEdA4RdW@d#O|ri*DOn~Am=i^+>1NU6HM;g)joG{B0c3>FL!#*|1Pb^dmgcTEgH}QC=N5#fQTRzb&4%y&&Z&yZppHA zHWm!C8E%uvpJox+OM<t?z-M)RcD+82oH`+u>0N1G|Py7q!5eAKhO!R53^MaQm zf*2ViP41>&k;cr(+Z9qqwvux*SL9283Lr@|tb4N5Xi*elI0@rZ$Con|j~x6pPnCbo z#rh!;R}GIonU4)BSSj}cRxzD~6=MvnW(@BOZr4>R50V2CX zAVLB(ARs%kCkO}#Bp~5YlJq?NepBDnJM+%WH#JpLGgb44s!$a|&N#Wuv{j*-&Lf$b{8L0EH1PpvIC-bR{VcIpwuE{LNnbMGJwL}q^rzrz| zzEJjR-8uBlxApZv{7+oUe9lqy%>Yejw@wIT+nf64kRFO=)U*f&s9#ba5ggJ-M?BO`8^8_&AHY||aL zt(6{)LTmZCmv|k}Vg|8CR%G~RMC4xK#CQAB zR6F~*UseW$0<>bxC5q$F@@82K;p3N#f<3J5dtvEyhL1&%*p?q1(OzdEw#7A)SDojU{fh1{7nD8n15p3t-1m zFq?{sQ>XsWor7yN(Ye0Ck(PQ`p_%rIf@nI?t)eXkpXK59;7|t@^B^SkZ=9R3)2Tt} zY<3RL!6p$koPR0JE?0Tts>M=dVL16UW5obEXn2)&dWt%IZ}(xa*;eIvx0QsGU@H5& zr|-?Ph&*ZSLI;0`c72-!P4il5T|`jT2qM&UaSXg@3Ii7{w#Ha7Q9GGXezG0&fH9S( z!GV^|q0)Lm4?psGy4N}6qmQ+D_~`+Lc2X%+8nWO#-5CI4C#MuLzL5laO|d~0&KQ5A z=BE0z@lt5g-eca_CF%rql5o^*y-{# z95QVv@qPQ1c%|o%KT4fZr~d|!HACTG#qo82bQ_0ZFM3PeyKH~y&+OFIqLN}zVb_TP z9qC-T1z>$_!~zW{ZkZ}fIZ1-Vz9q`$z8Cj`aw$kM8B_urSv%Zi1tNwC$9~trd@ADg z86;u8-JBv&p}QA5|2dKdE;Pd1oPClo=5q?2bSUT-l+sP-eDCe1d^dE?`mvY$&ykI5itIHWzYEg`L_so^vW}pZ*&QG^FeEWv z7>EEFck;6Gf)mx=SlZ*MOyT5vg^RqiJxq(ts6`Q^YoxqvX5ht-dhdUj=7llj;Pyi= za^{`m@?#SRh#zrII}(>qQX#u{vU5%K0dV+^ zM{$#&4=72VGQee~AAPY``z$JizMXO+?G;i775M(K96n{+$4(Or?~AIr0T)?!0-e1~ zFO09eo7Cr8@XoXyjjHmTpX;u;E`uX`UXgiLpWY&TT3IMbr(A8%H6lau(9*I+ZOFaf z29)=XRnA;VwY9wk%u%|B_~~S#K+fkF$7s)_6WKVJ1L5wafwOuf6SK!}XhyJ(3uRG( zxBHpe0I6%ClM3?PFGCrX_(Fk<&7{GOD>yXv6kN94Y$0Q=H+N8X9%>BJRu+^ahG!dat2iPORQT*swl8a*lnj6N zEzWfa%8$yQ?Gcyu#NGF8%A)zX*LMI;jIC2!n%es(gBeV2GZD0+o?l$=49VWMvgAsW ztqN8}Jr#5ZHV#ewt&Q~+Z3(?wM6Rd(w{UniqTnJzc^eq)DG#b$0(`n96 zVL&Vq2<&O?$*J|1S7A-!s{){+CN6({@e+O*bxMilF-crj;0FqBV|izdUX5xmjk;Oq zFZ3ebHjmNCSVV|t!tR{1RTj-xq)A2gx4AM%X8GmJbTzX<>(<;2Gl-yC_=5b<>n5Wf zi6GiWt=37YH_9Vj=>lcj)qH7>lTwY{dAy`Aq` zGE@*oA5^(L8Kh$MC31FHGytnvK?rRCWEB%|Zeppl@hrVJfBsd8_Y*VTZqc`p@vtfO z$Jsaq9^?e6nV&ONe@{+6l|m}=0}Ayvo!$=%KN-O3OL1Ls&i2GKkXLEyI)t5AN7P72 zbrrKNIXSq1ao6D-J|<;L;P4X1jp^C;Y8pASP_i_979i30x2C4IYQHHwlb0O{X@Z_c zUXgB^5h=$Fa!!>8+I4IEyhm-#BvrV7$S-IpJiI;32bX(7yHl)aWE_!q;%l*bkKkzW zeVz=g-!W`80e|BA{o!f!^}pZ5?C|@E)`|Mqoqh8D-rngB(mUzg%P(LeB&+P|Wo>GJ9bIo4%&o+% z81SUPG>ii7pBQVEr4pYaOrH~+M-9ht4gN571Gzjo6E}JNLvkF5Z}PX;LqAO4U%c@} zcJMz(z_)oNdtM;5eZYM7cmbMsQ9$IovMK9J2l%dR-4x1K>r9US-yCP~S`}~10}W;2 zzhe())IS^Q0xz+^C9fZjmpIAq1>ZJIfo2Q~ojKr1u?^^7lwNxe0;+uQhE~w^(t0BP z45}6wIQgcrb#zA9$*-PYGTHcXr zbsWcYypiw8^d+eOh)nQGOPI@pmj%sqa#O6O@rzoR15U}$X+r{^k~6Q)cm+dF2I(EX zCR;H`TVBF|&gB}&i}Zs}%VAAwZ8xIAi*=|zU&b?A;JV#v*F9nZ$F1#B!9yiLk*4ql z`X9#7U8uPfctsH$T`S#nZj;m|V z4JS8p6emBmaMZ&@Ly>zGM8x~Id$C3ky$};*+F~L0(0zGgH>ZfoeZ-1ea*Ya&Yj2HdU)$36SNa!wVri}W8WQ|*X%QtRu%))Una*KLBwr?$suf+1cNC~Qus z*u!9nD~!1{{Z1eGpCx7G`7{VOqSQxTd5Jw^*HNv0N-I`^yeRxL|3VYYFd!U(|4^lF zHK@Q72n2}*3SB=ifACIm1OZC5f>6MurzBG)QRw;7@NrOg8E1U*+dQ9TUE3Jhno;9t zQ}^7bxvANitQZw5t>a^p|L|8%D5JPV;6|weUqZKy-d)02dLASU2^k^h;*~%1)C?nx zJmD{!V}^S=B;R}cFNJG7)hEj8t9rjdx%Be=POTh+i3Zq32Kpe)H7^z%n&79ht*1Hf zz`I2dfaCapQw(QHq>WLVJx_T_f+D^@SfZtZqvx-yzUN^sBRV0D??1sSWYXdxi;eU>( zXfm^SfBR-c7-4~vWZGeNK zboP`bv$iCJCDXGYz+6n}fB>}{n$SBe@fJooK4 zbm#LhO3kNjD#{}Sq$DLL?{Z9KG`bw^mmvW8nhBWJkeX+)|I8d;w~C&`!_H+WNrrc! zYs>%Iun8~NEj$!;m5?U!7QGsr3qtliWeOMIj2>36GlA{>j^fZC1axflml6r`s z(jOE=mjMM1%>5enA9Uv$wB}DAqpV>w3)bqtwT$VjTf;hozr}(EH>9*zPFIO(J!2sB zx8Gl1q8>Yjv$W3?$79Q_AGrhbLWQ|?N6K^XWwDlpEr-(utk(Xr?XNOm@e>Xw`CRTKS#tk zm%tcFx8=1IwAAvo3(FRh-->ql(K^pP)nEfbv3}}kgigSZo}4(b+L~9J;)(il5-#<1 z6OEt1F5uSog&&XS?D80D_SC6P2GW)&5BoFMzj1lxa3a8L=1#8f;He&+gfdk^FI#r8 zM_yo9XU@~%#r>Fhw6ae6x_tt{cj!(~Y#srI>Z^+S0?)7EA4w~LyvewAIAW>_E^jf^ zj_U{%36%qc)CcP5!*ZMp6WUGJAZI0}*I0Z{$=Cmy17e-qG&)dm+E$Zj7rbi>;@I#r z<-ONA2O)eZT|kQMAYs(T{Y8}n0cLOza?<2Ch^2CYo`cj4o#=X^UiRv{UDFfGn;#kJ z4}P-oa{QoE)j+gFp953j5T){s`3GPE7o=G3n{$f#)fRQwAY{ z7H;Nhc)RCp6S98Py|K>u;Wg<%dwhC++RKJD(%JaQpLcjCF|XctocCq5Z*Auts={V& zdwNYn-$cFB9;43u{u3>L5I_7_jBGG=Ww8@LwtfiyKY4a6ib8>l+tn>CrcHiOi?!j ztETnMgDBs52e97vD{oynA~adPVkd&J{ho%FSMfc_kQxH_w0AiSOhA#evQVy1u_f6K z&{4!d(dYXZj@UBIcnTCbgAP;N7i2|3FqV47^cWBu=!ZG|e0|1?7LwjNi6ty-XqzBM zExipA_qCI?F~;gj_3JH)J7%h+z|>8+$V=}xdO=2`iN>LLmA^~Xr3`neFTW9li5Cwk z7vn6_#5P$H&m@K9y87LxoqgX~s}AzkeHvhY((J3-%tpKKu1`MR<+>~eT5K)?nvdG> zU~4)%{sd_4^IHQDL48a~YxIzwy#XpBG8Z=AgF2RrL6QF)5%xTuQWK&#c-3*-M5+?k zzCUWkEqlEloj-Y?9iTT^ia)ozRR2)^=X9K;+Q+|gg5G_k+CEBO7;OYa06*TDuU|D# zsA1KUb!uhk9rB$EN#`og?~Wx@kP`adC=Pe-bbIdfJ;dX_J%nL9x2@{mM6Gn@XfGKk-udOfPw{) zJ%jdSHMcB>02eIYOHA!(?aM6kb7G!$UYVL|fk&IPsw8_cs^w)Hj*r|HFO>g7tB5{m zJmLD=Q|pnfvvN*nRh|%sTKN0Vwf(5|`KqX*qN1u-$A8eCD}R10!SQ**g}fRchAue@ z9r9H6^45yl@G07&ZNiF*5iChTpj5*o3h*&cf0c|Ei;|9fUEt3JA-ZF~aq~J9uu0$} zaGe3>Mh5&ipb5J5Jf${~$ukECl_6400Fv(}~p+z@Jh_OCl-s51Ad?frHY+X-BN zzG~z(>HaB;xGTLbQ#qbHblKzlZ)nRzy9`(^OF;0twVHWhBE$h?7p!loKt_^vv-I^X z4#HE3wpL%sB6Rmw7ms-59`AsAVf$fCq3_U}Jz`}ERPQo)X|w#fH2XSz;!uszBzgk; zzZK2K5|JAuOR%WfbO0|V$MVGPV7eAM9;zM*#_$C`>^*u^P6rt~ zy);rf=2SdC`gB_^5KH&#gr4;OAzwiEFy6JSL0d55jAvFS*LqFQM;veS*r=a%AMh=o z{#&`8xs^~57C@Uv7PfD{Vu^)KRNZAxADg=bgal~)wzCBpuW=+2u8A*9<)1ysMYJ~L zz6659x{XT1@;<-QbUBnIUF;ewOX?iUVMo(rzW zp7B~JOT|zjil5hU-T+3JeErbKZ8-Wb^sl3u`a2>3$-SvLxrcXw<%`8uY(6Tz92i|H z6SyO=d1j+kR9d+tI_j@=<%Q2*T3pj58Um&9Uy2QG9*1kxZt3Von_>(S#_ z{U^@H6>dc%1qj>$)M<+#E(Y7NWj;l_Fr&K&Uwy~-WgJGyf=hzN?>RR{B4tu)9D}q1 z2WqbN8t1*9ndN4XEvx4i+~(kdBb5Azg8l9DKgf7(KMfn z93zM+X$4?hA8RLp`jKJb^$rmxFS-0nhr~!@Tad6Nb2|_3>g-<_oZ3Fx!W_u?Zblm>p@ay{`Klla~S#qsyIvW$?qD%^H4Vvhgv|%OR)T{L_$iE&yzM zrbqQ+y}z{AhrKE*)uFg%Y5wB#5XlWax|Z{o?^SjOL47t+0TWpbmP#=96hpV7?-E9x z)TbQBGB_mdRZ>}ruXC}cs=ck1;EP^C6UVdRX2z)K{d5~4(V6fcBuGwyZf?iF^q2#* zbhco`5@zct{?q)mj|CHqHidd-Mfw=nZ_!Y(w?M*J@r<-4o~%nrV{BjozIxRQq9o1)BO<|8sL zi&~{s)H03BBn;HdvemX{vy!_cT{(h`c#qtBRW@h4>6hM~zP({)=0GU&bw=OzA8XDO z-NRc6oPurJBz5fBsPq;^a$7JomT4DGaUDi z0yL9hZM&(+tdukB1$@Z4xlM6zImG?Pvsb_yQt*XLLC?quq_c#4h@X;q##)AL?4BJ6 zClo*KKt!N0wF}y00h?N({1CWy38enO@#ux8F}df}15W?;=EX*CBYKo(KDBDGpXgPN zo&4Wg&BSE&V4i?p>R?AKE3woN?Ax`y2QyF#m_&y8(H2xma821dTu2+ z_Va-ZiB!z}O0;hUkVEh72|2y60b(NO{yFl1BGFB}8BHe%am*(hk2kcBs*LT2)Zbi! zr%o!|9gAwC)s5|+_{ee6%*(I+i315rhV0=bOp{ z(X3TqohJnZvDjZvZfQhjiJxU#%g_(H2hGThE%t68ZFBEVbo0k4YW(@uQM4xUcg%zT zB4G4?aU}ciL;=1R9A99BiDjcexd7bS?h0j@b0nWc8*u1qLZxAE{PU zmyT&MI4#X!vsZ6D)T@E?U#)R+8Q%`lYgg_+ux)E@EI!Hs@1jc}q`?ep>D~d(Wf3MTb3MJXExxTTZ8K^r%Y*^gMoV{(TI`(Z(8TT4E0jBpJ8Zjwd#PiB! zwI3#1@tLkWh$1T{Dd9ad6uPtk2d;!qE$IS^>{W1hQUI;lJHWC?01pC49O?t9GcxT4 zOXGk0kVkmGaGb|*E}Bi+X=mOf9Z*dL-kzIA>;8GOYLgJf5)9JdDg2HJ8UOD?@E;a6 z|K+QnSa8nO2DTBnVnGN_V3`NDt~eecfD7pK0i}bpmiPzDiU*V6Tm--JOH9}Gf4Oz1 zhB=CuT^$Uq_fY*%=1_#^0c|ktg6c^bk%1`*blNwl)KH=_u3SpCxi}|1Fbem}jm427gUnOP=JBczcJ#zMx&iG#g#I z&Te{)pZPE5_J9A2{g>bN|G{7K{WJdmU8l+n>@21e%xv>s1XOMCEc~s8*8Bl4pf39G zSB^I8MA>ngsXWWxb@-V@s)-X_fbfm@r6POEVp8A3s?$?x6w{~?Ma&X%pGM|%`}r{9 zlo`vroIOD=ts$k<)F69W? zVcds9dC;iU(c$o_2D={}ki>^pv|$0Y8_2RmDSJWz=qJqmsuCDu*FUP(Ty< z)q~;1XHg4KU05V4Mzq^c#tV>fU_NMLTbYq3@S#!02YADU+#y{euE zkQ>{gnX8AeQCFvyw2jIzPZvaXp0As*gINS3a>cb<3w0Fu&?-#FSK@^{tx2$C6!(}OAO*TM8b{49ysYp*)=vkB zYx@_@^YTCv*D>k5A-piQlI*2Y@_=MS`tR7Suj^X*k zA}El6ch)`7L{^LFJ^lo&eEtUGNkRel*Q zGcSx%8Zd6)C753I;6(rXpf6$NPePAApFC?Ig_A-DCmRMA8^ESKx>*E=71|xduy`eCy>~fcB0AT zlphgoJVCZ%w)L9Iv?gwU#l?!hVP%v)ulllU7L7+X$195GON3rYiz#2+kZ4UJ-~;3z zxzSMW9@34vqAcb8HGRl{ z_X?lNn)ahgAZjW2?h2p*&h$%+nm^hJN@7a3aY9)7rtlGz$eq!~dBlKIzQQ*-o3ZO7 z8fYkSZm?|NwKKeGu*M6i04=reC*BJffF!;v(+>H%-d}RchGjn?jYyZhU!a4%=wp4D zd8%)4U{~uJu`4DCW5K=EH9YN9W>fMQ zTfH0K+#-lh`$u^K$C+h4lZ$m0UrQL;Vy+v;l(Y^JYr5A7&TkXv+pw-ya&|PVQ1!#8 zEhndrCnNN?O=MO`!o!Dr&w^_;e<`pVuhu+gR2wxA36C)8noJhqY9Dj4!1Ir;x^}Vuh!;<0EQKq6xA8a7!2wv)V*|tlSRk5D89o2tIh{&@v z*I7&7^qo-LPd22M+Ar@6H5$c9@J?~TO4pd1N0vEMLz@7xtO5o<=L&18D^484k9_R0 zlZf^pxCMgvUZpV*>%pAwP1d8F%dp#ed~FpQM3}znvKhMBZ2WcPi>?2u3ZzpD_4|TX z;y?iXvO@{~i$N>^vW}1962|)yXbO%v^Sy3~q|MF?xLWV#BDG-mJ{q z_{E@Ty?>6V(_#{IAwbo)9DDDiJZT%Nd_=oPaysSHpE6*4=Yb*OLRBAl`uBHicz>bKWFtmQFBYF~R4>K=2wo8gdv<7K7C;hW#j zQ+&T}?rLgOMcA=3qS~_R+rKqBH2Zy#H_^DqIGt}775_@3)QTU6@!tP?IZRk~J(Atg z6BFIi(3(846G%J6v%z6`@)Xl~nJ2&z1UKy^(%Aqkf2{^t_mvqV#tW79_LQD)&7KNcOid!h`bAxQJ`jz$m+sxN)gOUQ4m?3?$Cqw@k zTo~a{KNZ8)oO04h-Z_B8E@KvS{{w2yRT81dIM12oP6gp| z?&}T#p!V~|^S&6&Yv3q-=-pC=vZjwq<0!OoZX<&p!f#Q%Dg7XGrfPO^APT=8EnOVu zHs_A+P`QjxA68q{gykvJF5!0qRXhTgF!x>mX*_~Y2IImI= zb__v$9V_ePnXfG6@mOaKhbV>IDZ$D6@7-}~%)Q?cpy0FoKv(d^D%v2cbDTYW`@zlA zj?(oB^eulU+5`cHSGv_xZbVnoa_P}=YJ6S#oR`m&#`GC6`M4>jVju@gEHN$~yenO( zZs2tT+FbK;jLnMIuKEDx>rwF5Sov*eru`_H7mfzum*e5!rXS0$oiY{VO)tnW^Rt+T zR8w`9#1oDO0f4@bR~&8P9u;}ZoXP~PI(E(kN)#?8kTiQrh$p|EZzoTfNL7=bnV2s8 z(rT^K>jXH_P#pOHjVpIw?IRsF>}vY1cfGC{Z_hxYN88fcA(Vjrr#V?B#j=#b-5qh% zQwYQL9b9^p+Iuv)CguIAfWYseHOW<2O%3~Bss^X1o@!0Pfm@+b?d@VIg*d5kE~NwP zDt4-NsB?r^xPw15P2@=EN0T)oZcODiCY260zeA*;skQQkzl@`U7-}3P`mEed^VAbj zEb%{Jx%bdkptyRFo4Pl;Ij$c?*1-o>%M9&t zDeQ>?<4AA*d=d-_TpWH6mq*)kQz#EZP9l@meq)C8UJYD4fz}(>Po`v9$ZIx%41)4N zPe|h!lj6`*Z=uZ_||{qzw2a`}IN_tE`;O z>ytDcvY#|!FOY5^CdWpN!|Taf^tne7@8^7@#i|?cNZJ_o4=0%KF()s?-TPLIb{_>j$;&B8D)nZ`XQ{#%tgXO`7vlG^PUFH>9d;JWqY7B?w;+UunV zLH!mbtXKxUSW%8H2@rC7i`HKj70GCMTJd`D~IwdYZRX3XO?U(d}hdH_ycd~R>Y$#ChH zu;whrH6QmRDr~QY{tg3}1vi~U&uiO{ZEG2!Zi0Z6-=*Rpsc!E#yU{kbc5Adgn#m^Y18!P;0!-D~MMw?H*^ zqlPg}^k33-Hr@``4B0PxmIzr^+LXss_P6NWC-g)hva5?-(=y7mV$3SlKmZYC)+5Eo zKLAaNNh3UU238uV_F#pVlCU7SJmDJd?COBL5$;@R9BgYpll~nqrD~2LD=_U8p8m-R z<)+5#23%>LERf!LN;aj4o2Z9H_+Hx@tSw_Qv1c~|%4fSwKeqO?EdeIhbhmTQJ}A%x zWqSVJRgBAzNX~?+(JEtyr>m`{3@!V=6*Rlo)n}KuhUFK6Y!Kam{8d)eS|p2^Psyr*Ir}@o%=gXcvDhCS>Fzyl>a=* zYrpkfv!9%HG@`KEx`jlQxc_>1G7HdLOxhn#!b(7WwP#v7Lp^RXyoBbh>Ah3R8Cu^w zR6KwXMxZc=ot>r^4ms_9ZtI{-qC+0BazDicyq3hrz3FM%FLs03SNQ@z0tj7*GDuNn z|2YTd#{BnoGI@*egOt&zz5*^#fiP1cyG&)#c}e>f1n#Ct&a&Dv(=Oo4hC}Il8Ph_$;3N3H7wQf$i>c87-a0WjkbW*CuAOB0XOcYkJS=y@wT8y|nVSi^Jt-Xf zLSfntim*$8w#pgwHD>ZNlQ`CJZ-+|jnLz0+Y;th?ML?V3H2n=I8;zWu_qvm)zbG=j zehDsAIMnsqyLYK49sSalnUX)eWTc%`!A=?Ft) zO)N4!&7HV7(=)BbL=OKOU(>3EYphPbV)^UCin}DcG@KOUl5QH=FEKoA!c(VYtf9fl zI+(Q3I|h6if7_s$Jz*--*?5S5`Cu!gc7Aw$hmPmuV2@y?bLd{&%g#=pe4)FL0q9pR z5Y!$=TI>rsfaM>#h4=mv=110To(zf8NeqcPG`w3tlRp#Z8YuJ1FZ#6^5tY^Tn3CwnI#KirIdxFObnZ4O zdgiH&g9cb_>WAIQ$gA>Z!K#twCbQ#Z(qm7^f0ei*x9mWNy*#4{<}59>i3J!jE{U>G zRdvY?3AnVr27OmOmZXbQ!g)lKq&*TQf9lWkgq=1Qf2*m4xK=dpNG?dDlyx7qa{Ano)SS8_yCi>Mh z@MQO@wGOD@d!`gtv(FqU=0E zgiW>GDt^$cf1}zchNH>IyvS(%l&O2U)7SYS@sy=EVXE^k(z|Nv1H>i|?#EwW13G`* z6qd3&YptX@d^Rz&MMbLV5%8u8}xCjE-&PoEJe&!^hC3AFGvU*}whB%0Fqjf;`MrdVeF| z_KCfZitM|QS5gK{?-C`mcOj29?5M;AUMu#ar@{S5f99Idy*IgQ(k{v>hsq_Y>RtH) z5(&u{H%yd2Aln^Uao}woQ&HN<9deTmb=-D{-}++YWE^E%%UY* zBYKzZy1dsdukOZg*IsBpE)#Ad8{=wgTd>K;5f@>{vHW`BpUj%He-3Ea^i%<^FAFF< z8}-k(g+~xJO~sNeCL^JuGXajzZ`BRQ;%AZU4wWeX)ptd3<`y@TZgK6Su- z6#`iuQYdWpijka2^0PXT_vH4;ADITz_rmmu^s31cjJK^f{TY$}8`6O&9rDRM{S6Ce zZ8-iU!wYO9J*ft-y1&ND^Ck*0f0!PLAGScSu!(vU$xo;@H&*0haZ1H7BTKxZpc1sp zj+bIqJg>Zo-!#a-LZv-^Hz8yDabiCx@KEAVfGhoe9{G|bAD!H5hfnD28^=kfHn|Sz zKFKYUGS^$Gc%>pXsAkx9ox`i)f?%M>!e5J4;sgcMwV|UQG`%dRHoB|o_-?MmF(@5|gY>>x%0#E|GxsUe14&*#C6NF8k z$1Cut;MQ0XsGzpI#z_MrcvM|su5Qvb;iciP7REHG}l|r&Ck)RUi(8MLt(1;p>|wXqdH4#wm+E8Q!VDL#It_-Oko=M%JgC z<%h{s(_^gOj9`uuLnh91t57(}OFA-e7%-!c<4Z?l+b=BMp4@(TkBLd!Ka&IowI}W$ z);#Y0l{xv>LA2!!FHu`P4#1f z@E}KnF*B!vy$3Sl!>PUi=l+`}x~Dr)eJB9FQH8UPQR^ySdV~4|x*lHIuX4a!3F<>f z6##8vPGJZYerT9=8a&mTfEq8W?!{dQ4rDL#EBwevhhAqb^DMs>@p_?$WvTx|+#|jz z6ewB*wI<12T{zc$RRwf5_+J^#c&X)sE5MQR^(t6Pj7g5Mv`R;17k> zJoz9`_K5 zliQoHvt8?VKiOtnk#*YmcAt*B*Fr#=(Cd6JON&&Us$4}xjmt-3UkrU7GNB};DXWMO zEiocMpEzYO#wrP}wpm|uP3|qFzC?%R)lH|Hs!zA(49N3I*ojwl2PP*v+y2O1F>*mv zvhLuNlpyuE!@sF4fu#?ghTtRj9MfqXWUB3gdSI>CCGK?=t;ka7wz}XxTmiu8SzX|H z<>n>i-`d{4@Wunv-HJ*4MiRai`3c>I72SNC-x7fDEOS{DfM*>5$ycrIka@^AQ5U|~ zSRKEatuI@+cBgo~7o5_gw25?w+yVpoCpi<$vxSS*vm3ktVT}R7m&*@v5a-)1n(Rwd z?+yD5l4sLt&C}8ZG2y_JVq#U~dQ~y@V=_l{Y-t=L1HCt#A*~_aoU_T}|45++qC8`Z zj7YVbpsnF;^v$Z?a%#xSp1NApjYZmmXq|r^cv(54qhk{MWih7IJH924v0h$GhtH0o zYprdPcQlVRtq~-LvRn4PV?WBP-5)Sos-C%=;QMUgyzH`|T3m9$9$ZYdyeLYkz)A*I z?6^k|tJ%_C31r)6^-w_nV(9c5JD4Ri17y+HemB9OBC!Gjj4@_ZdE>)vimTK=Au zAX&k^P_JzyvLu>LNvSqf5x$UV%RaORowpQ?+!W5~EkyyL-*}jgBiHNqK7=O&VZi zWB9@@(%TLM2~XR=)A3oI*7!1lBWh+ikMvm5?aF9srZDHsBE6$+nUsoc)ZVgNmeRj= z^UfuOW6qmSf4W-vGK9r}0~fd~`o?(2$NnkXz!^PbcoKRn_-^7?bVOu!%~!({6*Y&1{YJV?iNCd$SM(WDY`Ndk0|z^?mwsf+l(ixp%qUK} z0cJ}~=^1w>2bmef#3C=1UAL?toqm|Jx=P02__;@TdH>m|$?TR# zqkX4YCc&G(Dy)HxfaBmJ^VH77m-!eY#hZXKZ)1c5p=aXHiBnh-;DWq=cr&JkV-5uH z4M)dSawN0uBC609Oo!I*)o5YedGvMWkBhcnLY4!iR+6cI{k!dTN2 z;7;q7B+kt!+`2LRiThFR1EyH8_S{^K(EE5)TY%qdF>``A1$(U)*Kj=8ZEizT`3iy%LtsrJnNrjI$O-ny$1frJ+6~~5` zq2k>98ODOZ7mhX_MRaZSI2S6s=fWjHLok5a6i3z)gp;i33Q1%o23*W#VKe$1N1U78 zai)C<2x0!{7XI{rm6aAkT5WhUzVD$6Z+pOs?twmQDU2^EbMsilJq104B+mL@SyGrA z=A7B&3BCpXEn-JnK4SdgGZ(y0b7Hv774?~nUeC-vQinA9r|gZNTORv$xp_EFF047S z!d&h*PMAWFY;ohd=AoB#O?9fc$Yuoa;^>dx|Hrb)pasJ~wBJ?+s7a6n;Q$uMlf&>d z;hZZJj^;1`qtNf7HNi3o6fy~n@kF$NThKS4U>{r-2{}A7%KyaPdq6drw)@^Vjv`o) zj-X^zkPf2smKhNc5RhIaBGN^=1PO^sm8w*wMS3Sn6$B!ojUb>@0VxRrQX&aR2&8y! z=6&C__c~|qb@o~3ynBCpeXKRJW){m5c%J*Zuk!!@F6wiTDxsWF-alFTN+@nqKUqF` z%|TyZHX&m7luc-macB)s0kPwwEv(nzdtz7#iSgah5t-dG3HbmcbEtGM&&g5NDNwp7 zsv0{v3!IdF(62B?6WFR4%2CB|W=w78_DzKPyk6U34DW_xu+Wk#|EH^${{!3X|7pME z;Lp+jZp*kdW(g%gaux+StsxpP2a7`up2mdm#%c0um9Mf^5%RJDJC(kY=cp=kB%r!NCJANoN)s6L3 z_xC~bzWxn|3N4)B!R%WXsdOnsZw}niV#WUr&5rjr-k!r&*DSW`O5jRquc^7gb^6XI zYAylxDlxCLYOCZS!YR*QN@jqpQF3VYL1Jyip}SmDiWWXO4bAvpfc5ll zvDspR+K=&#D=pcmjgq>I!K{S>y`}n=Q*#f}ZeDY$J*^>xeO2(>GEJmz+Z@9_nz>cg z-&W0BAA=l#IzuP2D0kjqN03Jm7e2O#ReL_Iuit+1y?P771FtsJ=NobGpfE3x@0C=0 z*}0~Kh~`xFFUA$leEv4B{8T4cChkjjvRJ|6-b?gfn@FGCyNjxbt_xw8KDHxkeDs-=k!m-QBzrE$3E z`p3J7D+`n|RK6t_^qiA^_Ge|X?#h`nr@ik4-K~(4knHXF_4Mg*$7er=;@wVN@dJLs zHv?P-rm1(T#Cv5|Oq0D8Ml(Onnr}9O4T1*H&(I#Ver9VBNo%|#Grc5;^0td)5=(c( z%Lt*;pM_Wn^L^S{^({_=l<+Sb z8BVl^T;a4^{X$=Y;FS+LuNq@1*Ixxm8k=XnyG<7Gl<5mPrmR33`K_&ALsRlb(lrqx z5L zc!vaegofZt>$0*|RWk=RY>9ryy(9kI_`ox2r{doHn(Aqpy3#A7UZ!Jg9&sVlDx1f( z-r5>5)1U()5S#D!b1E&h5{0i{Sw&suwQ9T6s$9PGlB_7HecZI$_)=L^q(tv=oKaU@ zm=PiADBP=i1~Q6}n1^tDn~@}Szx5nb8;@?$2Czdx*$UUS>aoeFEEEqV!*~7aQ;|<9 zwp^@_~l`*(FZNLnIi$Us~*M8fl!cP$7sIG zodp6?S3+yf`j!t-I`~yX?z(b4JnI2LWP3X|yRB;;Llp~Nb0$N~NyrKU=FtP5TI#`$ z2f+~DICPK`EoL!FN@S z4_FGG+0Mdjd*BV@jq--y2)=I+E@Il;2E1kZv*}(QB173CRpLex`V#Hk9tQdfMY299 z)jw+z&$hQ54tP|Mglj5)7U>8dNW1o}{07FrVimt)`$jEQ5_gX)N4cMEfQuS!4DyUC z$al@iJkcNM!&@@?$MW437(nflh1`K_njPAxBqEqQIAkL$HFe=V{cNwnJHGSaRA6#rk5@3H)JO;QgI%ZcPc;dEZog-8jFkF%iUd{ez)(o6AQO0=>J|pFN`V@~G#@njhJLd* z0(i#l=A}rw`SEy^m%G9QT_1nOM0>3M%(*eI)xHWNZi}{-Peh-4B`s0y_gq7=!iCc2 zE}yWD?dUEfR0%#A$hlfyLrGm=Gmv=y{)+Jpv*3ldcX#fB*Yb_?lM=y+Zc9DRClMiM z<4VjLxOob^bT4Bff!Gp%e0dl{EF1QIb4AvCac2*uO}aMt`FzK2WJ59X*tA#5#Ed=6 zu7ki41Lagau6d#Eg;=b5jts)|a89|MdUr)Y$qHqh!v8d*;ca8?jQ)9)+G+We_UVsh z=AgX{(n66kJ_h8{dcY#7?IcN7FuzQ?rcI*TG}S4#2kZw?sT;)`6bXzZj25w%`zAN1 z+TDjBYzpvFx!+fsbd7uY%`PbSS%Lemcedg2Vsz2+1B4PqEi9o@ z^0mF4-5)-4Yg?AfPy%G&KNmhc|YaiqlGPPX=L<=b7ok4%d0z>`uX zoL<$5<6r&wt?W|=e)&oQyTW{-$54@xE)SkJKb|Z08)AvPNs(o|fO_=3J!3wvhYREG zQe(dGp%_AVK)#RUBOBOB_R6f?YthDQnrn{+c`LpDE|%M)59EhuBzqS_zRRo?WQN?N zzHV|ncyR$9dlD#5>>vC0D~vt(o>XqT^Hsr$Tz>VnpBcU~KXrtO zkxZ18k*xPL=D+EnI^v3+j33Egzh{M(z|U!j&D}i&zs+}p93;`p72!v1F_wIsYKhho zG*1+6xx-iRr5B z-pVxAN?#;dUrMC1>JFlSfcorAu_b5p9QB#h?nEDrDkA3&cD8W>n0|axg4Wr|7`o2f zH3fSgc26iNaRS~pg^Le-j?gS9pKN`sy0u{|FTK#(%_T>0S}l=4jW-q~6MBP@8<-V3 zZMPA+TB-mM0?UmW5_q;xI9?q_sN>P6zcGbB-5tsPqr`pJ6ih1j`TBx!c3Jb~2seWpfeXflnU{*^d-0-j zv9fY$CC#6{BfLn#Otw{;F#Q+u_cXFL9@^fAItE|YZX6*7h9OacbUm_k^W6snIRS;9 zK5j0A!Bbw%ca)XwZZ0h5ZpYn{9U9tLIZ~Xc{$#YJMN2Qu$!n_N-Smt>`o1 zjl&ojSd^&UL)2u(e@$&&9K1tar>P@OGl~&sOQ;<4!Qs&fEfPiW#KEpiWO@*=?)px_ zA9omy{)oHzhYww>+FdZ~c^xs>vO#HRrZxNV+IyMWe%_X6_ivMc!B13=-*9p$epE;J z!PC0=Q#uAeme@Q_z4!46wb)L2jrRR+)cM)K=JY!2))$%6SN#Bh#hEwC55sQh-i;h) zvy0SCQ`shpK@l}SP8+~-hTAETY8_taOH?mss3rD2CI+1KhxnlS1HVTG-EhNT2#*2IOvRR z&!k1!gT#}?3lD0uGn$Oq-*3$05At(I-75WLk3X z`Z~gq>z%02f}@Xqlio$P7OgY{np=N_1LMhC2N9H#T+S7XbI$ESB-*^w~4#Fl!^K zISaBLy=LhX(UA!57tTx1t$K4L)9#emMR9P$SR*Plvvb(F;yqMFvYD0NB>FN7lglIq zkhSD_%6Z0fs0aF_d#(g^n$(5x!oUNxVaMHfACi-j=t`8#bB@!V61P86^}2F}8=5Uz zTbpw(v^*v(T>l<-F+QTY(#kYrJ}hI*mpS}dd%}!Cg!{LLp{GZqmHKQ0pqjR8ZrR6i zU-X|8#1GV9s|oH-dErIot4&ctDaVbA>|T}hMRtCQs4}a!4d|8i$s8~eFAe*{$=Rdm z^MpskNztmr91kC77jNNp*_ojsp2mhp%3M?Dq7)K>xjsAUt13<#q>2xf=%$893`Izo zKYnW(sZxrQXqZ}vHME+&zvVUpEI8>4yo*15bwj8o`w0uH zIIPR&B^4PpnFXoh?ZQ*SvpNXw#_8vx>%KlNEwg-FULY{C82g$WwWs8L%y?9(cwq!G zM=2}qS=@QGoEhDTd1B_#uy*DQui0!@`nEOa*_&;ajM1-#jJ|xl5xADd)vaxRb}vYGX`%_WTt;VM_;V2scIb= z0|cu`K9X|uk3lx7RL2AU2!u>Njxq_2*0@~w+1*Y!SujvMXQ@i1+(a^Wp-588iR1v;_$RvzEdy&FmxJ*jcCzJxsQF z+Uez4{^*`iSh!Lg*$5FDKvsit8O=ju#_yBiE35Wdoj#Zjd<9mZAAij?+TlgLx{yj8 z&v4*W>Z3ZEl;s}R>&2ZAhu2VtbUtcQr5-;~v*A30plAsn0d~xRm2&fig|BVd4IioH zpiuEXHl{i%oqqB|UP4H`6eB?1H%fjCp5%Q@`n5lxEpOPVHPiB~z4nDwM49K&AO|2jMJHd}P5qTA`r758!owh@O%%kWWvx52{Zp=--Hc ztJ$}@5K=9)3O{C8xO#(9UmDQNTSkyjwB|5NaQ7cAVhc?>|Dm8y7OskO45LAry^vZk zUu*Ox#F;#TaNHt^I4Yp^-;mWYF$jBtVU?W`yXF;{Gp?;g(S; zo)Ez{r{Y{*OgE8iU0G#=98W~Q4jNUo-mWhXkaS=UQtQR;!MvbRaA0$XT^v8Q{HCpgnXU4r@tyU zxM^~fDyGyq$PW`sgm7AU29-SQv~D*q-`#ZjHn{RBAUH_OAVSh2B_Op(X-T$V&WFxT zqv$bmG_*P5k^FSwA!&q*H-DNNVV2Xg?Kg66Q*pxtA+(C!htG4ezcC6^+}M;m1dPfZ z_nUPVrc-NdzYHqYYFOG_Pm*povMh))t2pCqR#{?Y>QP*DNY&tEriYzxyesyFV~?vg zew)pA0Z!QXQjQ+KRdvi&w(73S10NVNdQ42x#uMnqtdAR{>HqOY)&G52D6|L2sM(+% z9EW;H+w*!6*4?=$qwQXkdF%|mkmpG|Nw&kcS{rIwjr4Gn-I3)L+J_55IAdUOA!fnc zIv2R!`hmTEN^yCyB}5M(qi7k9ZasrmRJ~#_V{-Ea(XB-@g)Dm__n>+^Bbd#Q^Vm%_ zm`x|X$&Y6l6mGf0Y*q<+o6@TvQf>}V?juUTd)fuiCyRQ?nwjET#kb9<)HX6JVBW3K8^$#dH z9!(=D6;H~n>aq*h*NqCR1STaU0${?R6Xx?6T1R=54~NZXJl0G2A$MZ+IOf|JjP#7! z*x@>86)N##v6AUBUT%#j`CiH14!{e5 z9Ve;`?(BHHi?KEOAzDUyLw?#06tEWq&qj4KdfgFf(2%~_H#v{iTP zeJ}Fh1rip~`^x6-+XH%^Z;=8E^p5LIT2`zr>*15?s?^flys!@n7sN)fwVV84i;@_2 zEsRX-$wYUq$3YGgHH_w~T&IlcMz?4_rw}@%&gSd~I;m>+azwW>B3hhYdNeBEpA4$6 zdOT}%XB*?@;o&XQ*Wu-J+5ujfCDT0yk2mJEs1onKlv&}JmHc=i=~9tdW{PQ@MP_P~ zv%qY+yy*^Ns$<~9p!`;?#_)y2^Jiix$e9+uiA14EwZM6U6Be)Dlq)o)sp!gjo*r5- zHa0OIM#?h#&s|#dt3CHTky2zdTA3N> z(VM2UQXX6R#bdzqUO-CPPEo%oeAgDsen*vZkTrdgDlzX1XEnC53zGj#1=KrJINZ4S zLiz=P?5m6vzf>}>XK!IvnQ3pqk~$^=&88(xKMIgU;Ke|((={}t=H%EDZn&!*0aw5s zo1ZM+UUo?_^-$^yYP8wtR$M_)<;rB_s^a$oe`l}oV;9uj^CA^-c=ubS+Q*}!YYU9} zai=$Pa`bx(87s-D(@UzFcZf(!_rfGSiC%wtl3d3^h$Cd>%`v%p3-M69(N=Url{u&R z_M6x=FO$rAyOixEsQ_C$NqLP>JbKNPb_sEYT2YPgruvaSzyk=uPdH){YY9Ho>l$Gd zM{5cFe*_t!?!{G8F&3mP%+&fb*Pkp`hGdw3vZgpw+>c)UQdfg$PLx|6A$@sM{h&S! z(@@RW{gmygrEeL?STOValI}3GV&%|w!TjA9nP-dU{WUF^m8QsRV-c?*H$O<<2BFcP(X7R3lec8Q7JU>D^FP{6*l4 zzPInIF9nR;-QFO>Ds|I(cex)^f9AEmZhE{TP;9(1;&EG|HvwqNfFC{bTA9w4@c_kR zqT6sLJ~_EoKli&!+QLpZSMfoG_K!VIlJr1%p3!TYwnb*gCoNWR#NjG~8x_WmTdLiL z&*{tzCffnp0e!e^%G_ebujj78Xlin?5T1k=Ht9O>>nZSFZ|%M&S&TeC)3eSrF2qUu z95jmREau;wXC}G4QNGm_*p|DcC*y-@4dbmUhiYYne>t*3(@7^3oN$qIIz1?~R*8W- zT8i4ITN`1$2ftTawSEn2C86d}IM_oiuIP8OtQv>F+rvV2mEa4hth09mDL?#G&pkyM zlx<7;x;laqpxaF zIyx(-QPi80mhyT#L`yH#m&tjn25}g2Aj%}uw198P>V9Kgr*-KPjN3Z>6QzE|xa8ao zBVBpZEs67q?q8$ahir;;6Kx7_klzcHStopYWTj^Y8XU(8A0JVvE*Q=}YsOt-CZD7B zELDsRtV!l_sEN=2;RhyAKfLss$mn)J*TJlXl8*K?TX%6pfHf*Ibou$#6SL+3*RwVEhhL5Z^F^z%GN_NL5HxU+@_bF_ihi@Pg{MSGJHVsKUyo&FsNX)p)(QJ z?)3Uc+Uc8eUIv#fc|umM49C0hN)2a=7P^VHwi3-}w7SDOnv}VnByI(?K*oLR{5ZRY zO19#yPIb3PSQeb3T^IG_6|`6#iHj(b^m(D*-g_}yj4A|*lHV9v8mIh#cYh8Yev1T+ zsNR}d=bB#jU72JyiOv9L#AllsPis5$lX`C->!w_L87bphQ>8o+-^mI4KR{RRTI?(M zX<3+Y;H%Wu1l5rsH;9K>(Td8IthAK@f5!zCDY3rqGw&M~LLeLqrXi zI|q`hVyYJ$3ng$tqSoA2Rkr$|6~3x2-Y$}SjbgB>`R^zXI2OTbUtmc+JdS*iCYgOD z^LNWdhUFPdCHX z5Dh=K;?M4q2k308Ob*Z~YX=IIH=vQerj~Csk9{aOR`Eclvc>#m?~Eej1-Lb<4jQEE zM@G~PdWk(^{s#C8chbJwUm0HuoQ*-U8R@Do4I_lATU&7~C|4BeI*h8`NYQ5$bN^0t zq6NF}`mBYJ^B26=_z4Qu%DZ5?Hh)~Fp}-jNM~LWB4DzIYfYfD1T#x?>lWUlme`$tp zw7D4j{K>KL`4!%>k$Ng~W1rp8?%`$QsioP5eb_?kI2`hyYLF}av=Ki8$Ws9;si zvTG)*?DI|CtLfY6)|aO~B($m8hP*sJ$s+#mY^C9N0R<*8Wk7z2VV?PdFlSU~snVfT z(Ya3hXHG%$XI##XA@qYhxcap+=CEuwJz<2|)2mOorLmtKPkvI&ap=6BKiF#>#KHtB z*|}2C>xZMeIG#L(XUqYj4fo~t=IHjM!_zWjDIv7$2!r9esr9(b{(|p~xl8!Qhm%I; z9k3I(nslB{z~ee=!QN;emmuEh+-Gw#$T8J?7W#YeRNTQ+X@!t(3{s_%;?FS(+B8;k z<*9Ncp`PYuYDE{UuA~P>9OR;#Q~%7^sOq9rk;XfO&$YsM;LJ^{gira@M}#VEJ<|0u zkw!HnmS9iR@;>3q@dL;|ABUmZfjHB7oqE9<7sKRb_Rb!=OIMZ6JCs2eLvaUf`c%{H zs6}NHd2HoWT6e#PS5T>RfM!>B4t`p8_8M@Cetr8EcAOfZ7rTNNWua~S2jwIO0ow-x zeqN`7fpM)0{7d}Fq6`85rGfU*ED+1=mSIWND7-9?#=w}o(Tr}?D=7XPuwbJo<8-Vw zP~KP0l8P8PTXH*zyBQ6;gN&=893E>-%uf~{e;~PI6p($WaX(qe8L0Bvm2LV6YUL-( zIJO?hU){j(!j^-?74^lZg5k`}q4_i*$Hqq?b&)JQ|B0ZU|4W1r>(8n9USI$4r zKUU>Aum}?XvX*mepHs`!2rpI|sp#$qqz^hux`CUBOzvQS(4+>OB?hLz*|w zBC5q9!H82x@g~fQq*0TzsTkdXjzd~i5ilptCj2URFOqz?uL~ewzxcEO-C6XHKZnFw zpwCSXA?|}?K7ne-VtKOKxdwr=^;YAX(WP8e6qGX%PWPs%(61+-x?Bc>-|sUMK1Veq z0QhIO;N*HY`nV#BdQE12mbdXq+6h$q^6X&^Tb}~Z_ho0JX@GB9E@t-6 zx;AP%O{JLz1YKkP28SBQu+DNs19N=rWCUj6c_l&D<#&!%ZHg86 zU{TCYNW)@^PdT#}ThGzH9zSg2Dz`}Li9*=Bc$1GB)JrUew4I|mSDz6BOO@l%N)nBuhvx%@6 zu^<(h6JP`GEL9Y`20dKnOqQaQkWig4$Z{J(4==a2quAJuj^QY^|JqmRsgIYMpr4)V zzj5H151G4L_=N74hlVfCpEy7dWzL{TA&o>$vau$t9R#TM{(}@(Qax_ai?s{;WVsMw zN*xS1dhg_lpmLP!W3catwMm>N_IebMQ(`NtpJ?X3c3-;v5NU$^fFyuKwL+^6gTE>Y%COp(BojQHDaJ^4$JXDws zsXKmEI7_+>Qu~?Af(n3;uKQ3G-%?l^;deeFMG&s9qA3H6{JQQ2Y)IG~Zm!S%m$EWfZcwdAzq+${`RQHi@|D?Fc`g8ohE%NQ(5OM$0^rQcC0?5J1 zD8#y>BB3=t?@&PD9^DO+V{N=iB#IRQDTfbs3wAydc|+sNjZM!qe&)V`3o_xP#+hgK zbj%2Ra(Z^Je(>ZL_;=A%a# zpIrIl`3@!`R(uUs@>7#n53K!Qxxspfd>IexkW0`yI)*%rZ~~(2q*ydZiw^5V{raz< z6;&NQ57xY&y!A-92*1v0v!Z4>XVls4R`;}fRwLBL^&vEUeaek_i~)jn0P(Nybo~lj<^^ zHtg|GVi}2ML$1u=Q<5%KSoI^%c8o(#DkR9xa&~c14LctQJU+m~x6O99H}wEbh{Vt2 z2w)WcI8;~5C`R4JhC{jMZ#@073O%d=!0zGJG?t^3@gWUoA968nclF?h%(0_HuCWlq zBaYoNF%Kaim1RTC{V<_kD2bbYJWFV7_nE}bQA+VfU7e2H01d{Cd&>QAC9^G@%xywe zSc^yjZgUOKKQ9TWa^7H_WgY@{190ol#pEhSfo!)yA@>+I*5v`(ne8DR@t^3*$({BftW%&QvDe`O3_)Eh$2vyu?ZT?q(XU|nfsb{d{yzuayIxTz;wk%TV2W~)l|(+WUcF| z=gs~O$wCWY=e9W}IR3DI!~BKWjjGe@TqbadR)U2g_N3LhuE>#!r!^tD0xi_(urn16 zrvz_m%3i+{lK$#XTb_gD3JQ?yg!hA=ULlsVU_@c?5tJS32ASwEXtF(&_?kaD2$1>E zCj*BjIBUF+N@H`R z=cLCnZ=#>jMQKW+og3cP*l>eJ5_FA--}suQkFmaPdhA}sx%Q*pEQ}yV68HptgHuq# z^+{ta?G7T4YEQcizwLZ%H~Mi4>muSFwWjnV=NCziX884UTD1OC3uc_VQoKOAlE7{?76s z3(H}agDk%Cu`G7~Zejdy#NmTG2#{OeM}D3qk&~z}2&v>J3tsEU6WW2bE<+IE{nJji%~DzuNy~d6Go2rYKWp>CGtDFf4V!4 zStqiVs0moY5PM1sJ$HN(#WAz6u(aIQpGWTk=C93oC;^;_dw;E5z*+CE{*B#Q%LK3Q z6xw08=hg1)7<3vNveuf>KocHBPeV)9Xq&r%|6bw$Z+w6M^9}Kz|4X1hKmK>~IQ*Ah z;hS3E(AS~nF#R+GgpUbTOza($I}7`0wiXkM94F7(eUk07rC(>ftQ%B&-}2kg)N6jG zAXRSZrib~|WtrCP&bL(|)s^3c+K~dIrw{Ie>pR>9JNHxylwJ#r5;(XaG~Y-UdJ8_2C*$Qpa|;=^CQo|XZdc!)d~FN%Xd5X-J?8C}qF#OO zBa@RFjuYl?LexT}%t%{|hWVDM4EEjwJDOyB=CKA4$GxdFUU9RL=+l4+E*Ft;nO}an zQA>>KLq6KU_hM1r$EA|43^*3;`W=jlZfl*7;)j(_d_O=kNdR%JhYhh)LiGXhQ8;O> z|Lxi?41Cq~@Ft<>3%A!KN6-1wv_}VG;r_F(UmnsLUa}5k_Rvr9(r&gZ#JxmLqHCi{ z?mMs8dvEK9VhM!^beXhHN*eg0C!9b*o{h9|+OPD!0bA}`&xwII2Mz0gi z-FE)Tk<)CmhH+vkcQ5ijgzD^dMcZ>>q-&UH-7z1C98ro)A!sHAvx0RvH{fpiZ=T+2 z*7hcO;ZVmE?Y*otq`j-ZIOJywjr-(2ggR+~!~_DJ%fQyMy++Sr*Q97F)L`r3V{XP| z$5zs&0X1p9f-mln@OaNp7H{q{Laj(M|P!_3Jq1!E-Sm zXg|;OoHp&wGgFzr!OO{W=;{q#3l&4UITJ`Ib{!7k2&|3jnj z>R_q{?jB2JChs6!be05qXgFX}doxI3rHjC}IGDivkg>7@+sb0{U~AioezM5Vno(wL zL2E9_iKvMl1$$S&8>3`Vbx0YYgfC@7+i2>W)reGOhpj+G4xCL7qq$O;j zDpA8&Z_4203_^+_g@)r{?&!>^Alr^k&l|obv(&<(IyS=V6w~44<<52Mq4E2 z(34Q`F4yr@kcM&4!wP-i{`!K#>653l$D|RWC9BPY-QKK58NDO*tC~vdj!m_76VAM2 z6(`~}mieARyXI>QY>pW@Y&2XdEF0?47LwMKmUft}>%L-;>LzXBTv(jHNT|e!9%gjx zY{s-`;e5xu;>p~QT!JV5qJv&EfxYP-_k|*jt8KNpAFGauYUtc0L@Ilx;f>)>uJAoh zv(z;+h{xW*I?qmF@%%)fTRFb3bJd@AC^!2G ze95N1$f0aWS!>DH>CZBa$#x}PMFE`zIX;!(sM>f}{iJp`OFy_=1#vvu}6f086V-z03937^g zcPoZbqvn}hCOqGeoUfUKbz?yZe}4^BzcYs`?C!pfUAczX8Itl~INZOKe(udGFc7tSNP*r++>8*u047 z+62!!iMJLQHj~UsnL`(v3R7JOd(aAStSKPjihKeLsudkrE(H#%6^axCdgmxy`zN}5 zQ5c5F4W;UX#rNPJDE>4V6-)oga-PX)q(}V8a+k)MwqzTICOUfeOzd3xFn>HN6jd>d zFj@R4K5isM)sb-Bp!K@dUk&B5Z!CE#d;1+1qS5$ zAdVx{fOm~dVKA&)ahNYx-@iIe@4oJ7+4L@)*d&9lP{aH_h3}ATOhI`J4J2Xf+*g zXhmYT**hms!m2Jf`(GCmMERA8l2O*&uU43GD);M_n73#Gs)di3+Ta%33#D+(u3%D@uQw>DZ<Ws^2FGc|)E$RcfK_<`^ z6!6pFc{S3+Y}&%OtRXRRT><_KtCG50i4rzbONRV0QnXPw{b1lIwr*J|OD?}T z5u){L>>|=3nCq6WK=~Vk5kLPj!<0;mJe_u$fp2M(LwUEIu7`S;XHIq6Aa_iQ4Sq&& zK>o!A=%F0TuDLc0(bMT7^%Q<#D_iE6gJyPxE03hQIp)Y-0tg@SVh+K`9+%4o-bWP8+Juyl`vOG}N}}p(nCDS-duUq2wqpuslweY3Wy-aWP&~M+apxJWAn3E|k-F0;SSq-Z@nXzA=AYoR2Xrx|Ht9)J z9YshZqjpW$I>&{Ast;*|xP9%r9Ixa}WMYh4p%pYOl((QCww{vR(vl_?8mj6@nYP>3 zaO9Y;EJ#4rp=w@Dq(f+zS8``*SJDMDpyOKgV5i0?8D4!4Rz5<>rl&7P%PB6rC!p#~ z=)7o3060AI)Q-96Bn^Ht#)=I4Tg)8MIFBFvWtRG2FK8ZhN6Lbr`F0F>{+n8An^hLZ zwmq1Vb79!!hqYn9HKTC>?D%34bJ2LQeD8>ojdFTYfpP2aVCF?f}E z{ZJFZk3a6TdWUWIWs#n_+VVFo{G*Jj>lYvgH8k34BlF^-Yt!!HB7yu4e`?b#vbZoq z!7R0HoRN>zfNN7+sjBlF2~3UQ7X1pic{`K@3VzRJCTH!i99`QFoF1{*KVk8#wKW?s_|(>2m9cl71_` z$0-g&2A9YtpSLONR2|w~@QlH|8EJ{1eIm$8mx5BSq8b5z;2Tq5sBIj4Nc7%fl?l`Y zgp>%!b|F#EzyT{R04KB|EWCHKvTcxpQJP<)zxdx@>2dA!pj1B$C5d7sbO|Iqu3_d= zARwA`m@R>oDyciZH6`}?-+a);5$Up_7@wlo2X9=LiQ*D?oCwaVR40mMo2(qpFL z%8b-MVrFy%LUj&w3^*J>)`a)w=o*7gY(E(63}ZFR=p7DPRgq;57f#-7i(OGcP`RXD z?D&1rfAsElIbO=NtG7tj*uyN6pOq+q%U|%XIOTDwaGkgOezP?|Z;_p&;a)1;cb?Pv zf~@%;?_%rAB0A>WqJg5mPTrT|U-kS(Dbcv)yq4#Ux{>6?ShEa}@Yb31BppG10J$lPeg@G9|sWf+Fj?TxYE zd-97Jt=tRM+DTXeO*ThnlN)3JVLBD znree4mA9yq&;JgdKZxc>k|^7B=`)ZHL2K}>;4gzSj;6`cz-2zjPsI#wK=C{y$XUwZ zs}RSaj0xuG;+HUViuAz6iN?m=R<$2Y44_5Sd2}2u)4M3iHW;4g1W~E#nztPne&eBrDX! zAUH!k^Bj+bHAt}TR{JjG-(%(tQc|?WQ51f4FKEi-QJX=~T(_kry<9QdvAfsmc4&4o zFv}?8(@i}N^e;LAP+9dS7gdf~I*59zG+7kA@0YxhKgTFiv2duibe1`k>!wC<6^`NKl!bq!~Lp8l<$gW@AF{RfXotv69D3yRnnd37flp z9E=xRCO(Wj!{dhNRkR(&v1f(aVy!GU>KST`Gz`$r$lUl!hE9qx*#~K=%n!2ybSIK_ zPwr9bcm*8u_qe$t%t=~$j`B9M9YgIa2XJ%J^tPbyDYnLdWN`z=IkG{V5W?&yi+Pe2BCZKDMsn)A9PSc}F z9e?2jax<+mPy*CRIF$u30#+USJ~Lqav5O14RVE&T@9(+$Z#p{UiqWouwh72H+yMDw z9ulv01ismh65R6PrT#r?YU8v{lR$$Yx*CK{QbROh6@<-JH-c7pnc;#*^Hw%L%45h| z38oWtS_{F(`+@%EyU4pac}w54Qo@26D2K|f#D!B*@uvPwmhpA*L&345>Nllw#W>-w zJ6aU8oiANS9#yM|+slzga89Jl9et}Rb^>ogRjodVTih>kpYYs^)Yy_Z<;@qNd`8l7U=7M#^lpt5T68=C5qQY^Y6d4(i)Gc z{UUb!4)w!2#Dv?LZSNK;iHMXSh;C(D9uv{b_Oi?~nk<4FAV!_sB^3Kb3gmbgNhr|y zj({}`(xpwPmX2qr&t9T%wcty7v`{ABi6nf26cI&w0q%VhMh0W=I$w&TTQBWENU@$! zADY`|4UAgf8c0<(?`aLbPj?fzJ2k~{%z1#arXZM;3iBv0qH0Kpq%no!-%4z*{|;~2 zuhjP|_5DhHzf#|?)b}g({YrhmQs1xC_bc`NN`1dl-)nL1SL*wf`hKOpU#aiU)c0rV z`!n_Znfm@reSfCDKU3eIsqfF!_h;(+GxdKcV)tk2`!n_Znfm@r{r@L3bp$X6AL;_a zzPMG`>z;Qvh353$$?;KrbQz5(uReg16QYEA-(!BCnZ=JKy@cDFjxo<7e1K!5R$mqN#6SsQBm&6gYl^N2J6WaNrb4Hf08_Q0}AX$WS~?ztcU@6#KvFIbts|} z!6X`W5-G&&+oXmPEQHxV)k8U{u}eMJQ(FuZt0roIa=dxxjKIf&x1{cH6n;9iySY3o zYiSJeM;;ZWKk6-8chlTD(ea-ntwB$5>?`B203i=z#lm zH?=wEE;vA6IBh!AUc$MKl5L};cn#4PyKnp1^xDKKT-@ot%pvPw%vWTPTB4i4+kZqi z6)$lGQeVHNM?Cv4DA#`y$p8OxCCtD6?FW7iG3Qb4=!eXcjAA6f(gCuTB81CBi-+9! zH!NLrgpWcW$4S6y!YKnR-QNgH14$T<*CEa6;eUhev{Eij>v23&?+Rd*RcAGXL;~!J zGCS1;95XW=`mX6`Oi{6r0c5c>Xn`zcVOf+C%`ogj5Ut2ZOQ_?rcdDqn}0p zD+VQ=gFQqTMHhYW7lIP34tft1cN2(;q&k0>7g|CRdlZ{8$C5EkK(R?vwVw#x%K&`q z8HJe8wJI-RIaZRaX*+C>Vw2s+$td}YVq<;WI%Ng*H;T=5IXUIAazL>O-RQ_jA6oj0 zVgr_3t1^e7fMNp}P`L`!@j@!5d-fmnFEZrOT@6h@#yv6%ym5cWe?d;b@1iz;trwd7V!zW)p; zHaO%Ng2O!EJCd75r5%;`IDg!nkD`R3R=AIQ2(Sj2V}7gJNq~YH*!1^Gm(hjpQEU#- z&(f~b0mTNeGCj3Kh7c0-KL5Zb&UmB?pKS!-5Ffy>`5QGc2jVzz|k zX99}J=k|AoO;FM?PwXA=2`~Q(!{#jBP>EEi+#bWmm=9Z*wjP#N}3%bkc2)U;`LX-DtrdXkO~nKe%67=Secr(N%z{ zB?JgIe+PfzyIae27MGik|3!SO-kvoC#J8&)ymhDFd>48*Pqy?6s<=IGh5}z5KU!VyE(f+%45qWP90EiiTgOV&gruO zl9@r@w7hmjItdeOT!utgE@e*iWYEe+oWgD?Bq z5GL4k$$C~`f{ojUhm^)=*N-EoGJs%{ewKX*6KwYP`?_I*jbTNbE<$wd&4}Htb3THTy#ipk{Z=05yvt zQ1qrg=XfDc2gaz`48*#UIV(pr!ILdx0%}%if62_-3RiO9m~B;ZZ{5=OK6hAuoEH{_ zB`oFBb(yOkniQUL@;m@EPs7GN&h@u|{f^g@z=7Ia_w3=J!bNh7npF|8M%QE1ERZtqA@Ms! z8L>Q!ngwiP@ZCf7h#qu44^Xp8HStVnh;4LTbAgTm&wDeLdmZ3;m%5E^Y;pQU{W_!y z(8L4F?7+y(G|%L??*nGyRkx*XUndJc`n38C0%~^qMWGWLfLcSxJY3fDFhwmsecs=g z*-;8H(euFjtmae<$Q)Doq2zdT8b-~I=mTnY#TRO}lJ>3X6ahKR{$kCxTCTk>k4H5c z{>hpZvBE9mAoSZ(B{d5tz3m8PkCYv#>D;vL1zYW^6wYeq*bf8XziZd6?2%H8nl(Ry zzH%s)94w3j)U5W(UJgdh+POudO$xv!DJIs#>$7V|Q_HHKtpvQtfEM?5|z>TG!ZiQxA?F9|6{^08$s5 z_yOM$c|CNUtfO%LBA@b!q%4<{nEvHR=InJ&o(&)<_uvxK+J$K(g*E%9P3l>f%U{(9 zJOxI(?s&8S2-!f(bYy57Koeuq9^8)SI>&a_y$06oA33*d;``R^4&7~QyaJnp?tXhF zH8QRxcfW#6+*N9N_NqRhGLw7XCKxAPWnk9qMaq0;WslODjTY|Uf%$#?7i+dA!jzT= zS1kS~YF0y(@!KGX2M!8r)_0td1gzPF=Z~b;D-OpC^4P$dy*3o;3anWV`)j2Mm^GWB zO6%P9sqA@cF0f|5>eto6tl3L1a!H-Af|}Jj-4aa6P*SrOD?W^edID;8kY~Ip*xLkF zP_yx*eD>&iOqvB$b00PKEHz5@C2Fz-r1{7~KEG8FHXXmfU!p$e=HJK9y!l4Ky|smm zr33EmdzGk8b*CV;UO(jiqsJ||ewiRc+K{n*Yly{ZK!0WKI#NC7$+Mw150c}6G;1K} zUJj($Zb&D2UsWH-kvs%z7cFw>n>e*vfq%J83>h`0VrKK0*@}{5&90a5-Yrf336P0Aq#Ak8jZaLJzme~5}MXMBGVy44g&c2HP$+YL{Ug?kD7l%o+IjjnDdy-O`ozM6QD5HWC z@E2(obSTS`P!TKWKcLwPJc zAkD7wFV4fH*{cFC=}zZQ9YpRLz_e8 zX2Dw=3*&QvG<#9Dxw51hz>a+Z_YZNgGrc{^a0hJJn**7 z_(%*BlV)dvc1SldX%;A%&5WIQ5c}y|6Nri2CSyx0MXCv;*=aD*f>L_7YGCMynnKR= z`P5zuU_%9^xK$`u&!q0>X)a!}08**-7^UsZ)uOhyO4p!~Ku*AYuA6tg@ zU_xe4*cGQd&%Fw1Rs%@03t%NQDy(VOp9U)?3uVdOPzT(kyLN->B}*6V!<3p}dwMhs=pMfXAoX9h5j@c>Jx^u!m8%)aXbJkY-h{ zo_4^bS%YpEPJG$(aBAI zug%Wa2=5Dr4^~yDI|I%$S!>qF`wHN5ryF`LS}|sj^-JC_0GbuCS@Z;R;5y2D0&TRh z1S#WU<4eYt} z>4ej&!Yt8^6$#>#KnFmx(_*4^TR}Vbjm=gia^Edo?32d&BUxAy)>8BHdsk86>1*lx zHVl0T*cBg@C<)XDUgD0ut$TKMlXr}s_&b1Rj~}vI4xrf;6wq&d0X6~I&#~B3X+6|_ zU!f(YJ13Uz(f~%Loygzjy6r9s2F+HGX7i5uDxKNw;$<{2y#se}+L?9bFX41Rf_We~ zePL#2)9wEl2Vb_4!kI1pTxJWL+1h@m(6o5tXQB=&sL}75pyXoCZ07UsTFjZX59f?7 z59x99Is}~A^Zu9TV$N*kz@fNNNa4&r^5nAZmCS5~OK@;mAz)_5d#>wpixMCOGrKbm zFtax?W)@J*;*9E2gDU29z@&=eB4w&Bf?+IT0%lfeb0dBn&%MEqeZrI=d+DcFZ*~%Y zn_1-zR=yD;bJ+o>qn_=Ln!zGyiT&@lOOwrk&b>cb!-cIwQ zhnI1LL>4cfH_~RvD2(w&_7-di|DxU}SYs!7tR z$&4uDGmdfpo>T8rgnQ%>52*^nP=0B(>?}`>K^3c3w$Z*#6?q~lzPg@zI!}tD`pP|o zdtFV!yl*3}KgLrd{!Y~z@(ubXHR!>kpB}hI^e@r5mlVRs5nVeo+;_sES`y#V@Mj7gc@bsW>EsLsB>-g+o#&*$56v{XYar$+Mj^CFlQl z8IesWx{`4Ft8;S+f89^S@t>y%Va2@O9FnMvxjGJ6k?<7K1nip%Iup6F)LwF>ZG+c~ z4%v^$`}BniA@#%%kGalBKyTv}>gbc5OSAu8w(|ASF6VY@STwwocXj5ZCivrihI-=w z+%(4Sut%3z_tC;ZlhG-HS6pW?e{x4*c86yX^I)Nf)mt$7O<^x5yn1tFRrfWaT4-5K zAAIQ`Yb5?|UiT8w-K^te##aeu1XA-+!vB2pld$KX1<-%PNAOhhUp`=Fib$wBki#w% zzXxxy4XwR)FOAjNl8Cm#1*^+SdWjulyVSn^l%hGuV;*G#(iGxu^421V?+xuM@K!U? zFDY*3hCZO%RS#5;Z$BD3N`PW$h-nu(NB1hG#05&XDGLuJ_anlAy7@MUV3kmRM&r-sxd_aA5D4`cTp1>oMTqZ3kTf(fT&8Gao2tsOv(WF^26U4Sx?ps4K6Pw(w zdDgUH={NN@wQ2KaFA0(F8u|Uu$w}L*0Yet`*@1599z~zbK3$jmeFpkBNDp1bY=K06 zH8QOycKMj(7WN;8v5Rin>#V@uFRloQei>P7?ucHdN>kw)TYToZ^~|;i7!I<_BY=X^$=p zHSHHa%sErh3D{;6Y!;wyg$dy)vuYAC<0lGN!SjEL(~3wg>@1CX93`uY`PX2!e6 z8PO)-!!=@F1b60L_;@?Qs?=N9IQYRdPV@aIvz}9Jrx@8Ab)IFTEoXl1Xqfa3OVJc- z$D+$bgW)p6*k}GSaPoPIP>80+2z7UCo)h1QR{aq`(XR>(n18*$qi7?$pxn(o@eu5q zAl#H|pEft;`PB^Pgx}cihn)4E`yH;{nDYHY9hH$S57y*lSbUhQUU7M$UcIU3%e$ld zxK1VaD_-vNO5C+Jp$=qfR1UIA`zaYQG?1-sK=zhtAmsuoVh@SV!xei$bfo<#a%M?D zTsy=TCBnzxpWzDqzA1vPJZpp}x-c|F=wx@2I+)g-8^-cNpVXH~4c_ zu~{Bk|7gA06yfL;p_MeknXp>huohfmpCTOO<)EeVRCzwXL?utc>q4OWp^n20w06O0 zDQh{|_w8xU5;q2I0?gR2DK1vh%|m5f!_7G#OUjA?Lpf5$iLMIpmmgusWY-5+WYhj< zDSZ&!GTlnecqhGbBS%A_a4UHd)vbX)mKvi3)OfSVY)a(2o(q(@MrPEW14Wh=N& z`_&JU@S4{!tZs=9E-+7%N3%~bwnR-1kXH9{yJk^4UHxy_IzhQF*6Ow&ZcL`S9kfri zscGQa{Xq}eSalqEy~5XFL6}6ZmF`4QKeK!y##DFzmRt7+G*@l5=V+ODD!W6)q)%>@ zI-$)Jf;zj`I8O_4C=E$Yb$hEv^k*>h}$$47=$ z`5ksf=G9G=2lzkBS%nb`H3yw(hbkK@_#M^RM>OBucZg0fJ#=KQZNSbQ>pt_)IpF=A zwGEjC{`~{cDNCjZbImG9i!2Snf4HL>WMmg4@Or1G`V#F?^c`409ifg8k-u5OeeA-} z1XSZFO=>T-l9KUV!c=iyN5b- zg`T~xmtfY4=8nh{TEz5+KC~<@Q`pB%30ZC8=7h&m6h`{EQ*}^QTK4MB<6|2W*{-bwk3_OnzsBDyzKy zVp(oPS`;h)no~u%wP&C(y>8dS#4ApXd$00-g1p$Btf!>hxRD95isTxSnuhS5MPxpV zY=??BunF%cO)`bGJY=_}uYf~%&+gbr(HEppdvc9D#1BR+V+g-3tb z2p-0D^i2Zle0q-%Qm4}@ZoEz)Hy~^&ZD>)wex+;9%`K!6C)RqQu0z|}#Xd)qTs@?_ z7usJ+2d{;>EJ@2^P3B1EuE6)9%+O>PdWd0_`=U;I)T2|R97#?^#jWAm;YEq&heFI- zvbKSaB4Yo6EY+<$A9}2;rh#{$oWkq9M#1y7W3FPb^wn>{Wy zrWQ$bua@$DtKfQE6T@v_hClr|oH#OeU+T-*$A2|2*#nwloddxnU`Uus_w1dDFam+_q` zO;DaM*GddmbXKA};)KbRPU&c(R@}8rt6()=XZAiF!2jBc0whd&Z6D0i-6Ukq^)rh& z%-lL12g=3j0ab@~3*#gvp*KYmqv0Ej;tS)av>=bz=YFQA(V8`t0WR)40=HZ{Y!^-@ zQN)||1tVQr@qMRnmO9xH+d~cuP9vLMCbX_;_S+-z&$_Ya-l!dSe@W)HrhPVvtJWpF zR~ZeE?m=cs*2+`LzC&N}G&jJDyt-#07@nLV8Hn#)3T14t2hR8WyG(9kNdj4s~5%tx9xXQg{+;Yi{Dy;&Rf^v~KyL+5s>(8nnr@RC~2$ zn(^quLD|Zl*^Hk>88SmQ@*_{M5(GNVZ93UX3S%7>Y@u8z2NGzArwEssEfN~CH4q-9 zjuJZvK^o;t%EyvFpmnlc3jx4{1%RJb8lVJu!6X ztW%F8XviU*nWy+k`c#4>^~Z1_8DEY(EM$=9|~9c>K987 z+_wsb-j}H}8yXffd_{dM%RK_S6{1@5ALfccK+&(KXU(~_7p-c@r|5z7hIn1%?0q3) zbZv>+hDbk}Q>9zUFW#{kqiYV^t+-BV??*15+W)R1BQM>sEq&s%Brses zlA(3W)kC;SkOLw#Uv+K(hy38!NrB5q-@)v;J}pL7wTTZ_&stG(Cv7nU%!pru_wqt2 z<16s*D{#eYFtBt4#27tLSPfqfo*Zr2a+0X1G@<;m ztkJSk8=_6pFnmbDtQ|tQ4rb(*jE071J}3Rhot@ZbXd5JQhB}BK2Bb2-L+fao6kX=4 zC7&*rO=mY*bd?Iyyg=%lH&}Yp8>dE>g7l_LOay<8s@HNOT|RWH z*5&M%-!(I=Y~f3F`Sa_-lNUEj4!Bupav9Rf5f&@|)In-&#fb?&bI4`XJ)GY=xQ%v* zzqiV&5xicXBBxxXF6`hvoj$9C^2SA`6jk|W2A8g9&e0W7?(-h&RrLHI= zlItrbu`J1g-99!6gEA^uFV1t<5Uy?CodSZi)lIQF2+}ddCUv<5^1G^rm9M~S;mxm$ zN?x>IvOnG8V;PT9pTpg9RhS>+ZMofPBZ$$5TwLq|`zK;-b&a>&@@z@9?M$ba1RMyz zeWz{1RaY_yIK6C|Xg~QJO|*}SII=})41IA}1zU6%7JvbGF%4Zo;%B*=r#%$i{Sl}#jT&^*A2Qt)OR`e{2_4O@EqZR za&X5ZMg>a})Dl|Umg(|7Ak-JM9_a|BX-5t8PZoUs{CRbqy0cepJP6O7gQ#kaR3EK| z3dohDjw~UIAJ=K+EN)ZgHm1G=#7b~EQ?MCJZsrR(BoB6nS+_pI6L=qkmsyML*xsR% zy16yan^UAuhUv=^D#_PBEQF&Azg$v&@^BVTGSFK z#&ql}8OVIF+;gW$o&EbziZS@sUgTG!OGN4*^>h`IH0X}j>_1v9?K58YgytX3{ZP>E zy8;C1S61%OxWS*Z25lScXw_!r-8$$On-Q56fyL-~#)kFWvfN8);jH}FBL#*}_H(pN zJaY;(69=xuxQ)ZI1;gmvaFHrHcPDtV15i!hi#I#SA!pu{2e!O0#duTtvk^30~|v#wF~ zO`5hGmYNZ7>_O`%IYpBb##>B$v*oTGQgW71?DU#rCO(B~sp&3^5Y-*3_9vOo5yK<x=~s#F2n`)*Ba|akuX{aP{3j@%Y$+JUzX-+`|;JIY}** zy4-o)hY-X01hZUsV!5_j2=al}+N+)uum`{GMjW{FT`T6%x-uuB47(A#v15 zY-KHuDD9<9=bj#v8BHIL+E*oDsb>%0c?L*5Pcf{7MAehypO@5t0G%{es~+E zSluMKVVtqRhB)Fcv);JsjEjB8zN^EpCS_8K3X#M_~cW~i_c&Ar;W6Hpkcp(%n-%~aNF5d|WeBg>$5v!Uw7p}Moo)cB{ z+c@ONq{@YlP%)TqKqh>#NxDEf6P-^aA%mkdH}DB<2ecXFTeTl3jTxcwquqfG9gyAR zj!;dm)#O?@vuF74f#o-OlVfNO=xhbwUwk<@Zjv`yC}&NKjS@#wmj1CNp%CpUm)j5Z zI1Wk@f(&=aM>l4oG^O(^=_t6TAl@klO^q*(fPtsA+KU zZSjUp!5kARh|OMXgiIP?fe7%DrvabfM3K}X54DgQnwqi<)xw=AAVyeZT>Lv$hZ0#w za6>^Is)x=3e^GehC0Q&9EZGV_b!{*r8SRVspaT~kxnmZOCiM=E$ixZNQv`MMfGTxO zq3^srSr|wpKq)Frs5=;ybF&Jp+NA3RBMxnAKKsrZXkW!_(zz=16v!{TL=IHc#|(w6 z%1fT>ObHeKV#V($GBT(ZtUvqc7Ff3BR|rG_spJPP_k^h<{SIqV_jV@JGXwU8-@TKt zVZR&s6|r+rk}6UGYw9hGt@3o{%9H4tHm>S)>S^%Qf*F0Eo`+?gI*8X(3y^unMKZPu zy!GH^hd_`H073efn&(X}+{B(yhn;G-rU)-m)amw>&Spp)KSf(~*KB$1II37quaERw zG_ryULbU!1gatzM4#$KO?VZe~SyzP~F7nH?$bl-mm{lRG_9dUC(eSz&URV2{TUVPR z7?ZjNh9zL%;KF;j{4jUEOX}f{*UmDg*sZOir{js>mpQFp0S;2-%44@*wa#we=qutKfJ)?SB__j-mA91W&w(J$d-cA;6m_1{J~^8&vMi6 za5;Gc6b=o0N*#j_YLrU1AN~A z-#5Vb4e)&feBS`yH-PWq`v&;F0lsg5?;GIz2Kc@KejXP;kBgtj#n0p7=W+4#xO?#P zxcGTo{5uPvi z?SE=rP3l6c7<^a$Kfya!nQHt~8CK{DP}j)XvIS&Gw>+Qc)xd%@p%a`7uqe+`^)_0E zYKYXcAHo%XAlAKOc}Ttuk}M9ZH0z{F+~c?*UtFKCV9BsI#d{;ISe~L&^SH3|0#TwW zVTKwqW{a%KAFdEkHO+_RhZ@F-3j$K~%X^`g%j@b(i>T28XVu!%NJIANPsC{fV?dM1 zahN}EKa(?Ruw#SgOMp+PiZ_SR<0At`R+t&oGD{-TukNX+=d`U^#5^LZb{%@oUSYIg z2LPl#roZ^T_w&e|{>3Yq1#tOtUT>lvU_36ImFcq&7nLEuk*c9Mo*Me~5L`gKDDxKj z&ZH;(D56$2e2utPYCG$TKAksV%oo&coYnYoilB*15vtC5qzqSzR?TzXCqGN@X0ChtYm882aOZspI0&-aTE>f>H#b-w8CjOKmS#{3QC{qc1tk9oOd z?c=+yY4&`cZEVEfd$;tKkua;`Lw4l87{f;`zXW6fj0tdt8UcVa%;#a8AuvsekyC_% zh_F7+B@)IN0#Zf{^OuqGRI)xLW!eHItHaw_$DG10 zZ|GF%BVW)1<`)oj`IN;c7OTvI753jIGp%FeQ9WIUCDW)(_lD=E4{S*>+o)u+h_&J2 zfXOO!wV0rfkInPf(+5IMwV8I(tIEDDz%kdyc69CT_76LK(f_K-Ltte2*IH2`O9-f)y$bsc*#0LYoP83o{NdM65=tO+kr z5M~bDK1rq+VdnOj?)x1ko;a+X(!)$IW+Rk&6&r@aB=%aY9<;#$NT6fEPeQq!|n zzXt?pP}mixJin{VX(5Nq1VRq@S`E1 zJWue6VWpANJdQR1BWHwS+W;I)49PKKSWN=pVtbE%ejh>vN)FjbK*W$7;rh9teg|PnP8KVxc7m!@a#m<3Bn_`0 z9312sZwmG{ft5%gbFTORcE4v1VBJ-vJk(97iRGZNQ!8Z6Uh=cY0Po zLmxQBBd>S3(4^LwJKb`6BAA(}bf+&?d>9Ws0o>_N{Vek8^aiCn{k1OUPM;dX+-Xe7 z5<;W;w~=9?wZfgIo@3BNH8!H;44FO(-05lfPxhVIE?0$}hBzuc)6jt-duQO8Iyywu z6ES!CHgg@KkGa#did*Xi*Ij-+jYR?AWX1a(B%5c;3xPYW^i2D9XUwwi=nFWdV46j; zM~C)?Q@swZ=Qsd&dhH){)<)pxzIOJtSrCJPrcOK5HJ42=OcnND|HP0{Qz{_Y^0}`Q z`=euA74CFxHvzUj{MwZMzt$c7; zZ=upKqXj=c)dmc+>+c2JFif4+dK6tw1n4wirOJ<&M8JF!hz!`-R?^bwXTz}f@lDf$ zS%7Yaj(taO{wal_y+-!v-ri)Y*RJ)c4uG6aP_5i#2T-ZzN-)!a@SL*njKCRksBFYQ z#_UxeTZZ-k|I@HreW(Ak>b=wC^w*^rIsJ7Pu&)4cI@b&cl8Y=Izzm)A#he~SRof`# z;MagI^~Ic?(d(Nh&q5buE6r*91}O!Y(>b5bATNbEoh>H5->fjF$=|q|^9K;ISCi@;e(yOBp|bxT|3Qh|%bx z_sj(a;A04o(_h@bi+YBw-;IBSRXr5;yf637E-zru*LsRN$}w`ffLKlY+7gh{z|D{L?h)qO| zfp*~LM%TSR=9hwjrjg|p7nz(0h5YAKR=WM{neTzf7L*k4lO z-1V8G9y`vgk~2fE2JOq@QzZ|R@RP>RzFRBP)dZjO(S_gvTI z79~I+F#kJJGzbHy&m*{6G^FIh&0MML$sJ@g&qKInrg}Plp&h-l}HMU!iS*CuA%=g5#h?*7jUqahGj@?Dj z=b(Pv-|M=`+Bk(bO{UM3)GNGcV9lDwEjme=h5V-Urn7sOT5W$NJL~~Ign>6b%`#F* zV!JpM_5-@3vM)gB!25h>&=yI= z<+KF~lCdDG$suzaz&|tSo0iuaEVc%T7{*3?`w4K<3w)K_w6^SLF->OT`wwoKgOHu1 zDl#M9Ke%a;%K5Td2?wz&RA|$qRA^)pXw&?+ujL#J`+8Me3EdgPwCT6p#aH1>lG2(^ zaKAx(06*YOOVdJQFzrgoBl8s0v>nw8P}4mr2|0k8zVM+;LqSb1Tf>p3DX8g3XSuxn zN@}{oB{;Y&A5haFANUE^Uky%E)9TDW;G!S}H4VUdF-h|4OGe;yGGp+RLPlKE5j#aT z1FY$3@sFuN&o~s;&?sB2WK7G(0}VNVF6g%Ah6ZL$lLdB*MYU`GFjqu? zdi`gQvb1jtm^j011Py{ADcNMI$We=vzbo&SJr0~ zrZ0Y(1*B=4aG`Dn@Yy|&8M@?xz}%o}ahgNQ5}w2$#)H?+dQPm|4$#jrl^;ru_pOwD zk*1wxUx8fT8-yzbH4Reb7(Fmog@X_=ZB2tTY1S|sbUX?V`X_9fATsM>)9s8Tb|H#k(+3gO$7Cfooe;JzZerOqHob0Se8xoFM8X$rdOJYE-Q?#L*mUNz zQ9nvo*Z3=mp7mUjvqJ?5Sksj!w7}w#iDA} z7-d@A?UQbbQKoK%^i#c>9#umaH9b<=OYGXLpr!#f+iK}@8_}LlIg8H)s%hJZ-LQ-k zP}9?$P7t`t!cvc!3l`Ceo&cYc?$&{x03ALlI)6Ywnie}oay2k%dXHfDLYo`zvbC5r zP0|pE+qo$qJX?Kf2e9_JU*VP%)s0Fun^+qa4%F;|Gz%>zkMis^*XpV;kKw(0yYWU4 zoevA#?X>zP7`BQoC$BiR&gruO`E6Ngp7q7vRe zq-kDnX}}0c1PuKDkfxvNeIq4z@&p!OsA1A{p(U8)-UDiy7#|?vVbt`l9t|{}hf&j4 zL?`y5cVMM7eKh4~N-hfeJWNy5k@)~r9pUAy!K~?)Ok}vIa{u1!F3$YrYE@(~%;M+AR`BP5&r! z6IlUj8i={(?tEkZcZxs&$oUn*2hM(Akp`$~rDIxs({64?e)T}RqCb$}^XVMWAE@<# zyE6w+)2_dY)xCk4`-t0p-2hvqt$7rqyV80H@l^EEs2y&0$k+y&8@LCTnAU19s!>qW z3EUZA35`+H0GeG=6SINzm25iaJSb96{rx@DSdnr9YWho;6C#pf9oEolxmwu|pltk1 z2M6>6Slz@AR>7!gBR}m6dZ67A0NO7+3z!EM;n?~a5VMrc+VQPuC+K#hYzHu~$!=pt ze))-Z1)S~j*>4YkvsChoNDp77*IMNYgW)_P9wM zWCll+&~ye{5(=Pcmk&P7HVm46?bN}Fb;#S7ul z&;-dh3T8UPpb#+Az700*!MaA}!L-?-fSI0u!vSqnFw;(3oN4z%70k3?4VT+<7%m0*Ht|J?iz1jABe6u zg1y~4rn>^sn+(hMOX#n=lrA%5vQX2-zOyA}xCJ27|Ft z>wbA+;HcSN5wBc+q6bjSh0YJ-sX=MEQHTU?8*DOtv&`yI-2Nxcfkq^4hJC{BmzExVn}dCtq>yzJV~(KXOdZMT z6=Q#5!e3I}kWjIyHY_~}smQj9 zX-*>IIi|jBtq8t2!AADdX01MTrf%j=u9*0AiV$g0eO}H>lTn&_%UZ|=V(ku7BWc}^ z9=j_oF0bq-XTHn&*ep)C=z(ng=r7ov!C$rb8<*;vL9>50XGPL|qw*j0%|v7 z)>QAQrGH|rIUFbX#Vk^6x(3;`;X{viIKPpnn`+VKA0jtxUtCB*w%QK zQ=pTHwssi*Wv2XQ!pe$kn-0{j4KxfnLO+ygXIE_8Z+OrAUd0v`(05Q(_bCGL2Sf@~ zl=D2%iyqNkNT7v49GL^qx6&|Dz?ce@0T-kRV2>A}94HqO>_@&RiBc=6z0|qy542Wl zAUV_HBehM4{g^$e3RmVL!SQ}$vhC!F9w{w$-QP9P=RmQ8TXYGwc(J zVFD^GWtkHej46Vnq~|azM{;1&oLuQU!P`*j#%u5C=;qIGSUQUND8+Rnxr>~jJPEv) z?>`>u&Socf#V7nW9zf!1p-lGOaDpab&y2s+iB*|;_2&YyDu5NaM-w?h$S*xLJ+m2V zz?e7zpqd6yCv+&6&lK`x%Sjk9a#S8B4X9>oSnA8q%P#=}E~5tWl#*rN@c=K#?v$Xg zB%=e$Pn8loec_kn-xFRsqALRUF9zdap&@{hI&?j5h*%Pb$#^s`)OBbvNqrFNBsqh3 z#h>Ih3Kyf<6V%M+x04w~lP~1FywuMLOX#uay%RSW#Zn!qbyu&UYbj=nYzlq2Op7IP zZWuC8@u6h}jE`rHfB0bH>?)ZFgckr`st)#QKOMVFYX?p$CUPQ>s0qA^?*Tf(?qZ8# z!RIPP&X>S|93hVKMn*|eS1I-b0z++?4f;|F0dOz>0e38ayUbMx&4w!}VqzR!6^uoi z{HH#*`@a3`#!RQZvHwH;;>Rx>j{W-_6TnLfe39~z)N;p;WR%%{b7HjL%n3X(zGp+e=a!I{gDaSg{k5u9)?2xlm?2HkYuHaQmxs z|K(4_N-{#G$+X~YAdu{5R-GM-k(u7}D256Ks{>omrOX%T!p95SsM=8})nw!+%1Y0~ zFT#)zO zK-HzYn(C$7WBaq4{p?cO)Wqt36_(q771M~WQ2sc&s6MYu{}1sL!J$HkrrkRJXlU{E z!!eT&EiN0(x-?m2rnarPMSSBda^!VN!~7lDi|xK0tP-4b&3SZd;VO0^Ac7}y9z{7d zu?kWnGgckQ9B;bnX_a!TU(eG&rZ1#=*G_X8EG5I`>!t|DNbNN>le{WoM^-OeW&|(w z>LxZ@&dl;+i}ENY^qNuU1udo=A`&6piM|kjiepS8lrMcLZ@e@3ncF4Ad_Mhpu|p13 z3PttoqAdlM;A_%cdTI@An6fot@DNi8JF7hlz3g}K>A)r*(&CASdIH%rLRk^lG9)wz zPiWn8=iT$dRSQ_>uZVorM3Pl^G1WN(;DIR%1DPdpl>vK_cO7t}#%o>52Ex(YeFJ;# zov!cOd-W@#UDL$s_9RSu@s{G9^88q&Lxf}_sN$DS))cG zD!S16asf%@Jv!r6U;}cPq;0uWLYwpcY+IzImFpeA_tI5r$`&>%x(*Guv3*LK2Ab2Q zaK`{Q#L;o(cj$Rh#(SGS-UZ#wF#@V80{HDe*3TQBUuphAWiq$Cq^+oaEnWA?>PlBi zVWlOMOj^K#tkRiB|a@iEeY$RlLqtSV~(XtRQL@9geq`2XGs+Co?1{P06f(}AUwb_qWi`Zcw9mzy`z)rNOm1DZpbuU`q_;GkMRP5OqE=do1EB}4+!_d%9cevUd zz~7fmgvQb+gtb_n$`(@s6aCbK<0i|r=S+^sJDgeK{8gh#$7rYak{#cR$xmywKHr#6(%rxB3UT(g_vSqp9 z^R}zElKe-Esx1Dc0c2Uz=N>0Os3Lb^Pu1q@v8&M~jJ4vixWQn*{vRl6qK8>R!y~pm zO{sb3n4eEh7&!${P8O1H;B*i+r;t5@&fqe zSf@Lxz8pmRoOfrTic-}BR~fs-p~dA59btl^G~={gh=a&+JD10!EEm(-(Ju8{qId5( zj6+CicDekv_lnSCZPY7cKQ(U9Q9VtZI4nO09V1_aMRsEyn*|0VY|)9AW-N;C$z6{> zT^cNiXgh*j>U(4F{VBHWdwTryb=s{Ahle};&aUnZ7Brqoemv%!E||!w z+icy$D+r9;QQt7yS(NH}w==lsWEr=x@bclNk3F-@Sq1{qV#W``GW}}U{jKg=q3;ak z_%za<&}A=ABM6<7AoXQd zLxqEtzEPwCPC0}eL}#_}z{}A_RCcyOg5!*3;U*_kU1 ztY@nyX}41vOoc~}9^MK~dP#poua86t6s_mDEHjF0lz&?*Zuch-9JUho;yV#QQ!PaPv5^5ssFnf z#LTJYKQ+St|Fq2l{1YjccaCvblm!|QTr1N?K=M#7ZsWrpyxyR543AX#6WbOjmwUJu zEE*`h}UP4`VG}le+ z8-;{`nE$IEbcq{Mxk(QiLLL~j2BI3i@8wp?{3hla zg#1%;df`jT_lFSGN*iIrH(f|#m3r-jXfX2XP~u^4j_-!zIgiQ%{4{TqT54oEhSFc) zay_^=DKV(BCl)Rot0sL*r%8&DB;)RH8claH=Yn}4<07iXSX%HX|4EWfV<2w^;#w6n zjzrf?_CK)TM037-m1yLxVgM^( zm{9wl5dZZc;C+LlLb#epwJ)xQJVHcv@dUeNBqTvhL+1m|cLYLq0#+I!JBne6ilj&K zORhXlj{RWEGv&YChu!6Aqi{!C8MPx_N>w+vLcb?QGG;xcb&@(r!@MP@d9z_pU|LQQ zI@rS|!RMdx3Mgq4w@BE+ujc{yg&WidQnKqrqAuF1ySQkxlx*}OpX>%o3;8*VUIgU~ zN}RifZk)Xl=Qg@&RS4uU@_Ex|gJ*Sh&y4)palrYv=|NVrWtQ=MW6|h0Qw08cxZ(y_ zSUL29LJbZ5HVz~Ib16M?Rv(pxYM@V1K>mf>PwVTPQQ|0dG$d{rV29ZqO`%UlH_|$K zcJ2$lP$2msYCM4C(l#1YPff!uMD*Y<((eLY-2DMgD0yD=FXF#l}( zjD`uG_r#45q8qK3{-+g7R6c}F5mdn11Gyw>Lg!fb9t9KD+BVrTcQH*;iMWbtg}P_I zNe@gfj*`vv6S4ZnE}L)-vTEIp*S&t1kYXS*3lH4fr4ryvpWMzhTPa#R|0#5i+Ma>V z2GxpL2HF7^Y>+|+ZeW>ECBbC`BQ`(WbALEv2e!W%y+c* z9?ddC`m#!G=mg->V`Xda!5$s&9hO-3bi!`M%_~atn!U3L6dqV5I`MiRPZ2)4LZ9=Y zTmb+6O^ZMht0u^_X=n_naBXy#Y@Q;#yKs9Zs_zfJ4lqU!%1w>!nMXE zLnBuaAfYxHG2zJ#1w!s`ez3NXl;41F;tW<&Ohar9fzsOw5=cgw_+EzIXfm;2tyz3^(>uH-R@WNvI^ z#S|fqK@&Tmb9$t+0OA|Qp&=PHJ>Npdz&spKQ)VWnWUYPFVrdNux{DY@ky#|ehT~~p zbR2fKBuz-M57Vc&g>kP5bfU`TxS=IqDDh4*N#-h%Uh8n$XG7#Rk$X zJyld)#@9%|$)T3%qG*bCgbid>KQ4|k4K=A3 z3btRnlVJ9${8TULof#++TbION zART-Y9)A4r5`GX+po89`hF0t?9l>gooqNWfZi0bQj!kT3pLWmPG5+I^Gi|Z+WOA)a|=SkJcBK z>493#plofj8^h|iK9mz)CAT(z=GVO5IDhekQ=qP$>Vk<=K1;%Vv-;Ldhzd)#q<2SeHnX;DdT!a=zqtBS%+)J? zpuwO7>~CFgLo1=&xVQ;8ij?o5DC1bPaY^^+DhCxs94D-ZV{qTgnIu+|#LI8?jp=<+ z;cs_`KJKEj_C=47+#|%ReKVC4xvt;EwgpY` z4AnQ;QP8vxH~*WOsE0TgakLxlEWGXt zlzpGKZ;TyOzfgS(NgM-C?y?>abB++eO8X(^@gJwg(t8B?KJy>CLHx7w|NqCzKhglX zf+y0fu}LVJ6D|qM-~AKj20bu~Wkk#A`OYN^wgKAvUqEhYK5Horr|Pog5kmCcl<2Xa zmVn$(O9v*p;u%Q!TAUoYNz4IHQnEenLfSX@t7y;fDEJ^l# z4g^qGA(E_*^0w1v6I;@@#W`PxvX8XZ3x`Y^4Lqlg9rH04FtMlWrt4_;HEd;bCxst;LG*jzA6GX2n?DOl(sgfjq#6#gQDPYf*|xcK7sm&e(PPysLDa|&T+PLX>8+b6-qiZ^+E?4O zFCjDeC*TTcr&{UrLa~W`lZ=j%%k~d^!RAmoBp4-pr`9qHaU(ZNFKVj9|FwgCQh1z& zsV?*bh*Q6Saq7>*>~^9*bYYLG+YecA{aZ`*r>*5?YzB+_$<~jasxScdRMn>u$`}x~ zvq9Kifr7C8cf+N6pPMh%ZBwm(z;-cRv+mg=99xI|2y2P@$uOK2nu2!h;~0P@i+e00 zdGTRdH8I+crrlwM;!gT~zDB~UWbzE??3L^urT0|RN?+9yhMh^q<`6#J z{%@%(hgi3?H01UD(AEJTYGahP;>PL{>Jf9INmzQxWj3^I_6A3q?~ieXm8#xZhQK^sJ$Z^+9sY~ktP|{-eOs`^cp3Zb`3GJl&YOdrnFy@+53*A8zHOZ^qItV3XC^3$ z=@xN$Zg;!0{>(^MQ9Ea}$N%&a2{fGyulC*aGb(C%U$xC9G{0Yek5kswJy|=H0HQ)K z9J|tnrG2)|>4pPf1SJ6WE&2R8!l*l-za4lPmdGEbj$|g50nI`{0TU)SQ-mXC`!D`_ zpdPsu+4{*aXPelB4YX$Lw#;J#Z>l9EF4NT$nK0U7$>Wqdt&i;MqZdIq{U-n79XXV~pCzS~9x*xM^aVd4yHQn>Itmp?WzUVz6d zwjI|C0C{2B|dtz@R{| zLJxwF`z#1T+#G?b?_Bxr7?4y5eg=S1<#Ze^S_1I|22@w`wdKALEz3`27&t` z6vXYvL=d+*-$CfkBZ7DiDB-}6cRT#YsnMvnkl)|@$ATkxOa4h5{Le@n1mDE)WN;@3 z&D!b#FzMQf30r}3y$uB!uI4*|%s)aodUV61Xxtz|a$KigddovO#n6hbHXsoT_9ly{ z;evoqPcTP4-wD56oLn9Mf?j)BWIMUe#Rgyff3`#Z*}VC`u+?;kCb9Dh zOb%YMUlLb}!)bN4Z>?71F(P@ZYUpW>NKL~^tH<= z(lslhYC>IaNRV&~x~mXU{b9GI5Zzqs*z4MG9>9@wAo#5P%&XJQlN#jg5&To`uZREV z$diZawxpY1!sJzBV^27XnYlm6{;B$w#mSXJdD%hSdl?L{TboD(H~Lf9fJ$N;@QxXk zFxo9O6Wt8k^f~s1^J_3sxbvppoA(CZ7(?K)q5o={OjDCtF8ZdAyYOn7Rt4-boaG8d2CGkt)b11Pa5#nICYP({RZW)687-O`enveZCccWVlu%*kNRj4s8 zejNDe?%=W=g2(Bj8-Y??%3nGnv{UjNHAD{LA{IP4Pt$AH{({Tj0$j@B~?hi@6#_JOB^&E83e zXAxIQ5XWW)e#^DxPncX7Odh&3Jt6D4lA3Ynp1yEvU&?^t)T zlfe4p76N;G{N)#4(U@c?f43?r^yQ&a+cX`p&7ZeuDy8q&a1wt=m0#cOBlkXZrHCb~ z2rf5oU|c5~7sIsPf*x)i{+uWaT@CN6PfIwKB7Wtn=dMK1fkZk&c7rPaTWmMIpvAl} z-NN2GpPP}lg4;RA)J>SUVYNkU9vs}g1C{xb_9&aWZU>mtb$fBb8r#%+$*A`Br2SI2 zjn79UO;74RUJ9xlIWv%__V`wO!M}&s@y|M~p~n9uby{9s>aG;pq(@F}s?&3IObi{~ zix3@uq-#LD+v!~wTy~tsD7ZqrvFb3>`mn~xV1Q>=V|v6sd;7HebgfxZ+?F%JOvuu3 zKg6sgu5A26p4Ee`fcWmBW^KG>ZSXaFyu-7d1WUUD3s0iDQ)%7T_GJS6l^hsmCKiDh zq@SP!yfmjlNI7q}7;unMpKWLoeFX!$MDuW_kUw|YQoV0{Iho7O`D0ugb$n^lpmn)G zw3$B)!T(_7w1tX^^tpOxfwKyt>oPigz+K1GmMUp&f`c95wPd`7+0hq?C6ptUi;X1| z@l{o`GwW3@rILHoReW~V$_BZazdl^}T=2L)Omljvcn99LCGy+KV7B5CGO;Pr#~Dn@RIoJZ^us-ybb%9>+vl=x-{HucE_5LT(eO;eXn3ex7eCpR}rF|sbF!W zqqz|M$J4q@`tDNsn?F+=omNjiUI@0(6`#(}&40A$^IEkpBFtwZ^YMOG1V7rI@wyc~Sv>hp?C>9YI%F&YNL4$Uu#lMgNh zxrf*OT4t4D=Kkknd8*!Df4U1viakbrkv-3RV{JNW&}-?n*m%v=qT6ELXE)b{sOFy= znVXrH`Q&+pRIu{#-}g>#FSMRN_hg#Di@l-L|I>dx>c8k-2D`w66GFs2Y|86`7}9l5 z?1C2pJ`n;!pQ0X|{A}m$ZfoH6P9k6O-tQux^4@!Os2P8KvI@OnscEC;uJUx$##*ga zOgiS|yWF*;cRNx)7pFVqt!3herL0U zzX17{1V<3u>hiZZB;?m9)DU>rTzA&MuJjM~Y9zK--=8?~ohyjAaGW%Ov2MOw z2EUCHa85YMy4>+_c+a8&5y(OuKydx_!kRr#Rz*9_XONP6c?5 zRh&0+Ajtxw#r!2>-FyJyYxF3FO?r%#Nh{tZ#+-CUothm?ld#q0CUgI+=b6?O|9d5!Pm;VU^`dx zef#s)iM$?SGcHA0U0OMt68&6S#T|ul8TI30tFueNqQl4-plVL;XxVXGoh9#ros__d ztAAYn&Sm<7uGC16r)u6u9X1UU=`ldD)ZPx)NN&2orDS+I?^_K=l(3rdRF|d z>G#xd(L?)(Q@?W^{seAi;uxc!Jm(U6phq+nE`)_ec>)ipaid6L6xCKXWcEz_w>-p+ zl4C*K_zv(1{*3KCezii_3d9};8#nC~c6ix*R{i~8=S=aoh!(bG^!1m1t~m2_@Ww2F zd4xpf&#Ct8B$h_y@UQ1C(+K_b3y|JQB_laKLp6Hn`IezFV~enp$`#}x%Qw9JND<=7 z=rV8;l`Rjl8s`!xiI@|Ctyo5K0LjQ2S|;Ix>9@lq!~x;uEL_i z%M@cpJID|&8%OhJ4fBAjZF`(~qhIIE9-^0m%*OGSE%0voF$2kPF7)Hg^9{dQ7~)d2 zuD;3_Tab@{ki)aFv@3%w@iE5P8OK4a5Jv>tM?nc#el5n5!{@oFw`V)82#aRNhaQ^H zx)xXnU|MnU&lHsdiQ9D#AfKKToJ1Ge3E*x+X%PgL1UKi%EOif*c;@s&(h!E=W%#*O z%yb{&yhHHNmD5=kK?>m-Vfz}`{=vRwxt55=LB&q-yQ{yA}J?^mz9hX-LLxK$d9*@LmA$Oa#UYh@)3fmg=~g=&2->cu=?IxZq%s zOzy_l*c&=)bCa#{zspyaSA?hqS8p_bY9ltab(m-UW|Z(hNaVf|8Gq3r83-8XFKnwV z$d2IJ0ND|swK-Fl6-k36enGYezd$j@G7NobgZ*-Hv=5+ovDPPe0Rf5^pM{*gj=ehF z$}fBNhj?*+t@hcqQCuaUO4djI0t{1>#pps>OysYW91%2sTGR-3n(jt?`u-bSEI_|7 z5R@+-&Q?AS$`|&aC|`t_)dm|M`>c0|A0>Nl9Sy3nLhlX)NEJ#ztcs5^ z3h0ssQ$OTbDwC;376YIhOVaFXe!gQQ2dq!7+#%8~5Z}f#KpS<00>+MwPdEaKblYi~ zT})j7)R@^k2g_Ls6li9`H1)M3H5NpRqjMX``LaXw-Sn6=LpHQ;VSoK?T{I9q*WJsA zt3S56IRt*iBx-^83!bI`Hi!;)1ST88SL^q2$YGZCtBzEuTi>}{^oA7~`UsI^G?+pE zT_~5oQXOx2HSF&~Il9BaU`w);J>DmRl|hmdq}s@hHKG?+H4e(8>+Dhdd_=DvKmD39 zsat=S%7vtV`RH7vm7e&%IXC|?fgR)u93*`FNA$;tFwcV+;DB8z#WpFw-2tH)_ZGgw zOUeqru!nfN@-ipQ0GZaU(Eon@^*n93_cJ{0vBvE z))y&*xeP=y4HEjlbNvDu(=V)3zxFSuA;f@S+Wtb+j~ftwyzGj)7u2<{(P6^X4;8bbQaUAPY)G$)SgRIxiL$!;>)z6+T;>U#G9S-TT=fp@8u2 zoyFK<+U?xXJ31plYx-BuY1Q+!s$Ps}Z9G$9jF=Smo=CzAAf$6&*4!mqo25u+y6S}N zt4?OEos3eqoqY7#zmuy~XvECyC zb8cf>mszs)%pjH@u*NjT_|wOF#sFn(4^-mqhTKsgj&j%K60NJUy8ScYNm(Wt34POn z{OpCfIAy3pd<)VJ5vS3)!(`VNHwT)ZxG#WeVX;nHYEq}syL<0NNSD6o<=u7Wdr+pk zr5{~U~a>qnoH>26czm!8v<8$!IiwN~$`**7zKsewVVg}UmFG%X2yMPw^z zVRT6$wK9jrC9e!cjWoPrFCDv9d#5~WaFEv_XIAUVQXAwn`hxVJU(kD}fJ~HEskECR zB8u5eAL9sYfcu9hdXM5go6OR@M~R|MO7C=5w=fD~Y>J*$Pk+#@a?Inyy(1tJB&7 zsv*KM2#XW~Tv_*3K<}by^3b)9<5eWdxrx3Ne2P=B9XJ?cO?a#$q)mi7u@HWwz+n^} z3dtmY(7(|9%hV8k&g(36^VHzf=q7a1d?ybKdcapc5scz`k(VZ~5JSfn@+gBpKx~t7 zcA@UUO8lLA^d!~$Q$~EOQC1}p0$xn20X%~vubX-meq}f81pNrDXWWN6a3FlMduYe+ z+a|hLEHaea=2_s~b-Yc4?ILNY9o}@XcJmaVGOFb)N{@MJGVjyesxlhb(zG4gXCkX^ zSaor;(<#WWro;E6A0d&zcmIW(LBX1(-VlJEs@Mtg4RC1*Lq5&(>vWC{7wn&6l<`lpVxFKc+aC8Z2@H<8 zsX<7}ZjC4upOtuhY@Vb({L!}oUE&R!ZkP)6kPLCC_c>)TgEtnBQ5%Ec(Qlpv@~=J^^yyO9Iqz7j!|Py|q_qEd-Nil`hIPx);c>xR43rw!5L!wtV37_F5hP%r zY0lzi7j_EZM6f~MxiXPLoL?|Td7cflsRS*clv=CY%aO?=OFz?XH@KQ1SMzM~tp|c| zGn{}L3%%SFw`Ur7?IYgzIto#z3^A^h&I*~TavpFlmC?{2Jzb{MY9Q9=h&R)ya;lO! zE7uigCXtJ`e6pQ?X>>vFf{lC`tX04$WU4{?#NB}@f{h~~ndH<@X*fmp3lv>7%WS+q zqABJ4bQKbwR$=?VGSZNH07-L1&K>VT9q$%Rc)g^^u1*m7o&I(;x?3QXQq;Gj=7}RO zsnNMHrm42iX(EgB=*wfSD8b&<<3^s=Q--=R)GBSOFn{Z=_Niw9#RH}t;U-7!kXM_p z%qsNRmtx*3lm%Pq_4LET3TIzM=w%L{?OW9^Q81Azx_D~3(XCU_Vo=O1Jqd3b@8L5$ zT9er~K6Hk4P^px84YO3pD_n3{ZsHi}gWi=fVF4vnde4xiL;pqk{i;F1)c$ctl2iR_ zOC2wY-e4WO4jYc0TV)8+%5ba5g;pdX;!;-oKye>-@1X0!;9}26)I~(W2T%T2W_q&wB zPE{F1dr??jRbS<-S{2_0ZMyTS171ew!c}>v zh6b`T-mKLlI>19m(JcAKkY<5&GoFfSa%G8&aE{=ZQRq7Op@iXOke~pdjQ4Mv4#|S|VCtPL`!!o)u=TsA}VDWTt^m$TdBOhp8K4 z{bccbwlOh=XJ^{pyo!v9ij+6u={#DV8@yg@+}JN*Ut@7H_snDYLO)RvFCCa8-X57&>N!)jmnX0HiB#!15{@`k>6;x*l3&vwwN~j9 zPQRA_WWhe&x^2Q@X>b*l0lxhdETF)9nNmt1RRmC{u32ZXhroa?315F`{bjx4@XyA|lWl(s&dxxWp(Wl=`4Cxl5 z&)&kAKS8(m`N&U}E}e+0K;JQb**&rUi@4ibqIuhkyrGVCH^sNwHg{VwMi}jBgD(Wj zYsm(Uju~EGs{3?QT`Cz>X07^WcW5zSK>3O^Aa=yB(8}}UWX6?;YHiX6+m8thtmLW2&zplMiHwtn{ zjw9ukn!dIewM(idl|^^qVb}6QoS#%rR#{9Fq$ZtJrz_tRY(fxS%+s6&R8#m)DW%q= zpV(Bt!%riuc4t&rgb8&g$O11f%X65X2!$H8m81jMLF!xde`S?dvXC7Krene=vI&0c zIJIa;pWaMq?-}D_cLckXDFA;`YvCl9g@GnUyYBc$glt=PsSe~AkP@nPZg?0A0TL!1 zXRd0ffBNLxn0q{U32x90holzhIah_WK7S{DDsywTy6Q92)iPPdsn$U|e`VA1Xh?C1 z*TRJ_5rDdnrr0Khwhy!KGZs4?GqFaZs80Q;(x;QH3bW`<8y~rt^|+I+72T@DXh{E+T%M<{Eq>+>)L5z^3k6(X^9 zBRqwrK`G_6Y|q_woMHV=6}-GTY%$)FWiX+7wSHg2?9;;N#FD;JN14iaDOzo|A_Whd zOK~`%1x5cZuwBMCTVQ&eAGV4G5>LnIe#)ZoIEVW?nKkD$t1y)#^rPRR<%NFDI*g;)p>EgInLm})sjn>RpIH*n z<=WX3p&AXQ$OCbjxV(W{X!>z0g%1*$oiiQbjFvU4cb4C|c9Gk*@7B61G7ma&}1_OE!FhObFwC>$$hNX!LB-LVNe450r}H0pU@~JUAo-RM^K=pi1(=rFZ83uQmCpCPz_DC!t_IlWAu z{YX12w5OJW;@h-CEA&PIM6+3)@WWd#J|zc0lRC91J6hgT1me`pr%9WsiefM}n0Z&T zt(S3@Y;uxP{Gf}cFyH}PXm#Uavw&5VIV5waNHkI@AZ;4D(={@Q-XL|*yM znS(lqEK>*JZ(gY?jQN>xbM+ce?UB8aRgKK1ZA0&nFHuL<^ldzodwglMsM@g%f(hg< z-lV`L=5%d75%>7cV1`Zoc!9spp1~rC)BQLLg}5p!l)~#^{+F zdf1!xaKW5fs?b~Lp|Mo{4E@rc_I7zrvn!bLTG!#>d1lyyDTQ0!zoGibQz^&lAds}H zg$k$oY@xRwt*W|`S5#L+1Ex=ZsrudW2^d`fHvohCqqa3BA~w;1_D)`en&gau$${y< zj%j=$_34Th-*}0bx*SsY4*oucrdQmQA3Q$j(&LKH!Y7qfq*WyswPz{YXa!m`Rk6Jm z*o)t}-e%a(L?h=-7zDtSUPJDdn_QdGspkd!$;NL7r4#P*OxI=}u$F$At^I-($3aYP z-t_lBOO(qz&~`1Fxt{Bjkx_$QZoSy(&}S3(N-NzTyk5dTedn>kIJ)TL)yw^DF4I~--QUpEZPFQLR&esux=YJs zM1@(2Q?*O2qLkhQL2A_UOo)ck>IUVMTW5^LL@%YrFT}CJQ^Z9(cqn0zKZO9BD{&4K zot8N(ovG|9oK{USOI8`G^|+T5pg88b~b8n+q0~Simoke1jt+N*xG# zD^UWYRz-*MSGF$eSHf&yh2BNJ6W}5lJWHi@2pHr$V*&yMbfj#AYaEW1I6W8XhNTF> z9Ldy1(Np(BB9f}r&EO&)dRNYhC!aT+tVttNmHG5;h_1t0H*HE(jcpTpDqT@w26qN;FD_8NUr7PP-FBnL5~)KO_`RUt=Qe3G2xVw z+^id?23F{3cc;p=U#O?1KeiDpBeSEo-qbjBfkWJ;YLct?+=?Sg0u02IElbhH&m6Co z5X@>}jSlwL2zsx4G_>Z`N~So9#Fgc=$0z`v!;;u3)OJB-5XGF&oAS7z7K#Sp@>Sfw zp8r_4;vWxd{>dq9^41z0NJ6(>pFJ4t^YDJV*n_?;xzYYod{u5}Bj%i;1;H9!L&~*F z%%u$re9QO4Ge61E%J5-f*rh2;yzP3iBThRvl2eZj%6Us*XtDp?M+pU?K$-52DGk1& z^qk5z`07gQ&Y-(eQOOvonym$!(kR)|PlU?g7fXcT_!vTOENnq%+fL`D*WHWRW`$CJ zqF2*Nwj!Rqd|wC|r1A2!`qa#+8vF9QrD)&k>BO&9Bw@p|O4>3LjCzQ>;|GT0T0dp0 zTAQw!znWU(g6DHylaO{+v(HkU!P`8kl^V@FEvoEsIxo7yC!=bpdg2;mSsdd61gt<) z4sZidIm~Y~>}`(d#{?N$i7l*fX8=`0t&hV$j)5z-xCYmU$|=lVjb^J^O)M;-1DM|X zuRDLUk}ee5Z0**6)`a(PzvE`vavJGaWz%d|5}ELJbBnJceoWYaQDKv*zv)yxZ4jCN zKEI(?=l*25>rXSVLZ!rhyBgD~$*Pp-gvR{>oln*LLZxvtc}^5l%V%qnW)3+YmTzNM zUJz;LZSTsRZ3p5hC_n|Wy$4T+>p-_NR9bgMmeA+^AWnHix2DY6(8DvMySV!d?kss( z$r!}DhelOaC_J7s>iyMa&Qd3`vqsRySy|s$^keHbT_?n;=%ng6$vJ}TQlr0c3U7I= zO4U;J_r1O|CDtBjy-{@fJ%UYce)Y_U%#RL9^6LhzYLnN%E0Hg$b}3o46t;f#1P9-Q zreRzuxFdk87k%(JDdf)W+)n4%Sx=_G}4v7|tA^14!JF86C z=RN(fIy|%+Dyt`BgcT!2fUya`;dXQn0RPoEcnY zkXzyekH#pk;s7-`OT?2h)@#kn$e;)A)H4wa7;$>wU7h-0(B^~1t~t@mA{!hy9hvg_ z9$oV;l?iK*LGP!iYm?We@t*(sJ}5m6&Mg9B{UrvU?1DuJ6T2p)`X)*t}ji zd!}#rV(vg#onM*Chr~-ZBQ_$!cV6#dQbla%1EqKj1H882QAv$b0Wv>p@pUYkX`ZT5 zG>4?%3obi{P-VK3f>#=yjViLFI`$^XZ>sXwo_~SCx4l+Htau7OTysc)oEn&^DIBjz z>T@m5RP(Ez9z9bXLFyf9A>8*pc8BrhTel@$mnC7vIl@bU%30Hvy};=34rf#t{rIiD zwDV-}(wUpFeT*Vb=%Qr?>@t;j&c9yM^8$IVAm^J7}jn`IqX7uiwy~Y<9^Zqgu^bVp^OdYs_On z!I+e%r+Up%as>H(h&|L>p&WR*!=Y($d4X!9fO- z>e_F2mBO3e*}i|Z7GWascZ?MC$iKYd!mx}vVO*UAd!e)WyJdm&(kR}bY~WdiV=i9x z>9wglMOPa3%Pqy$g)CzvbU+L_RwEQQ!N{)~vafQrX@!V$hKMcjK3w*JW|m@$!8ah5x})U@(g2J$wF_N1FxsXy6uiYzango` zg`5);U*_xlE#yR_&2&Ca_fmr%=uNMfm_alREEQ1>J;ci;El)T%YF7T0eFhGZvIo7rW>K>bw*>@!O$PGS{_ zG;fv1V7iopTuEyBcIp9?jO49|4XU_)DV1@5N>1!@z{#|eh3>zOof_IX>3LFdtEjKa z0(d>xu#Qv5$n}q&cF4^P&=jIqSwAVwD^5~Cu4np7MVTtf;e-RgEwn^UOjQw-o*>%( z%&(xY%L$u>nzdAky|NF}i_iOz5n04ozHsj!*k9bNUgXMt_&3Y&9whS=BpG)wj%s!0 z!eVhWi)Bzk#}csxxkL<5>OT_$ME(Z-j~?Kh3+hkc5-broV~O1i_7+l+qR7kjiC9{b zA^#)JYtB)cP;DKy?=Qf7Ww!}vwgO7&cgVU6wv51>&Ui|gA6CJfZgVG&u@#00JBw;( zmmME`(b|;=Vem5&ZmME_p_%m7DN$@i^}rFMV|x&hsItEgf;}_Z)n=PP@!=ni0LE$~ zzCu3C!4f8q=YJ4QsCN|>a7w7(Q8v>Kc~b)RZt3Ulpus_(+e$xX!9GvH=iBv-*3X`j zOz@x$_;3DAZf;i5YVZY=o3DJ^ezGQ|@_+8=IqW?+OB$^0tIo1hu%A9)_j8azFfLHp zRQ<2mX#c)B(El<`5wZf|KtOg(i8uBgm^1HIyy4yb`nTh#sQk-uKt&mNAU5Y8R_}jj zf+7o4HYMhOO)GzJ^F#uQvneNEvY#MSNpW-orj-FM2^qMX8X8C?W+61Qe^;LSx0Dfl@ws&lq<`533xOBzRLK#6h zp6vW?VSs3lka>eR&ft($dm?exn#2COcmXKtm7tPQ;EKhYB|V? z^&Dgl%v!gys}MpTJ+hXQ0`qs0d{|C^ZO}ns`y+8DtxdU;z?GP{M|L%TzW$v{h-Fl1 zq6i8sB9{5OH87`u^%aO9#u?C}^MbuKQLutE&>Mr~M{o7`oS0xaQf^*<^`Ig+njL7~ z@uI2pQJH?&)s zNM4TO-#iXb;8=MG=a?` z5u7kd%03G2ad)WDLlF{;REGJ%?m!8=54u2fIsk`?1Re~RUAz{)19{30$8j64l3m;73Sk@EWxq!@d%p) zi_cL}*DMzaAR7HIv1(w%PPBj20L$!yln>T}M7k((7Q2(6J=|f!RAXC!{2NaFJ}W1! z8Mgze$JRdo+R8x9L0t|AQyY8i@V7TF%aP_?0Xcs9ad!CY(^BCKT}Es}kMm$Wb_ZI! z7q4z60om@tO+_qrF1xR6ga$S7w2f!)Dax^%ss@*uzeT*!ducJ;XJj zSem%EP^#-r|Ejgb4&rBaZs(aLNg&2(t$WMbWVOCO3~oQ+@_GCTB>Xx!5+@k6YB23* z@ffmoUFr3St)X%9#gYrRkdug8$b0Y~gl78ykkAbBb3jIOb3Mpt=4DAa{*cfE-~P4$ z>9&8uUV}~^A{;(7quPq;@t@a|+1%GKWHs?*m zh=Sv3@<616RVvUFY?NO`tBZ0twx6^rRj>CA`e2zBWSiO|c%1Xg8b!m=yshz$BbfT} z(8s*!PK-8qa8z9FLa4NDN|@4Aowrj+Y$Om$I^bz>{Zp%7EJ^%E!XGnQ@*F%_C)DW` zAYN({9%zzGZy*uuZ4jfF(Ej~E6;tdHgUP2Q(l7IGR#rzM4gUJDd3o{jnuD{Vd0GTI zXm1bcYi6iOlDtpZcpmh$dalz<(YSP@dainpb8=G!P3(t?UwFAOs|T z?ho9@{D%99bA;sp5Oq__OMN?L{DH`otndL|OACbmp($dM_=wGL8{F{xwZpDts^N9G z#$U}|mr>GMBdWeIb9lDWwG^ojIy)wZ_N)ISctq2dJ!^lb!i)>tjdKhuY{FFJcKO$i zjsbbU{lZXci&aRN;+M<;WYLFeF*zc*`>l3#tJjDz;iVEI;(^OW%7deoq)%Fns zu6_P$eu0TuFqv7F4A@5fc^WVRe?Oc7=f=SSSUpQ5(A;(&MTMkdgS;hr5vsIU((r3z zQqN$2yPOzZIivFg6_n}$SFIhl6C@|$UvS(&Q4bjWck(|XK@vN|9)-?~`v3_ei$D`r z1asBOimdPrPB@SUIvhRUR1Dcr|Jj`rhuRi=Jw!p3JR59U{%w9f$S%h1iq?q$W{3p|>3IJ8^h* zljsUP<7=s}BRc8ergeC&#M3~Ol1lc~!){FwbR_LYq88hw?%R_l@y;X>gRp?-S=I5% z(jiIC!NxH`1DM(ED6m)i23&=HoMoi+Gi+e}yeA`RigOa zG*ZB`Kc?0P(Y1<8^^u`{fnG`sn!kv1N~f;fHw_h?Jeuj-Ao(mnh9sO?Nw8AuB|)?E zsw{;H#Ve8Ag~m_(9U=;X4X^qq;kQ2y)H+MoH78bB)}D7!hNkq+4Ab`4RiAh=nFVbsc`D`MR#dJMdC2fHYb_#K&5Xy&KA>e3{x-2oM zE&K78o91L(Ghut5X^}9A1l5ofK_ti^LX?4H*k96D$soxHBzk(fzq$HlC*Jzar}qd2 z`dPx+yH&vlS&Ik}X#;PQuAYu%IStGnq_ z7?@3ZZJ7-hETEhjiP7^7F)yssJ>A=BTwg?zy*tg)1AHf%D{}jw65gn5$;n@br_n3r4FCg zenk1jw^?SUmQP^mdg4;nSww(5d|>fmNRS4z{}ji!oo$Ve#01mF)|edp&U-(EzP)1v8&^+Qw3lp3Ae$HvaV$|RM7U!D6Pb};l7?t8H7hu zi58pIky6RaYx#s=;ASXxVbsk_QGdEt&-dd*mgRIM-fquOvwx;PB0SBG>{Rbj>6!OI zU%vis%?@3!SV|&8MmOL0v|cg8rcF&asrOB7dd*3^qliIsu+QKUk@g1>&|PMnr%CkW z%-b@)bFrUlU}BlSCoIZoO>m?VW;_DPW5>XH_}wr9>Rsj>x~c&kNF#FW;a{^hmW zKrTAU^x+>FWP?<)8GXut(Z+FXNWIy{hsC?~61puQJTZqRJaHsb=%Kr^3W)&&}6-HOfI>D}sFJYBzLX+*ymhj!dtRo35}v>Z-b!;dNfM z0ZKweHo+i4{xU}PHkl~{IbtrHJizuyf!!gm&^i{cn&yFQB1habq15l%v%Pbp3U^9n zoDE0$A_=!i{_IQ+*hsxofLyt|+_<~_jVZN>q}lDx*6cnFMrP+< z-&{cof#Xr7m&tX~*9FUy1m&os0Edu!q!<@9MWbvbp5Z{0$!OLxp@ z7Li=LWxr)!g&)sf*O${0%&N8J&mIZ680jQygR1e{q=fo?W{vSUJ5;4t_+-Q3_}ke+ z$sv-8wx?>cLrSlxm7>DcdVHHZU!EJ<3}~UFH{GoMm7!PzlZge@GRCy3uvYa5{AbXY(G1 zEQyq@I?%G8$BUQmFPf6(I(Pehq$!4nC3wJ)YK>(?ZlUqTL;x`TSxE{ID@E#%)`l5I z6Y|z#_A-a?66xewGDYp;HN$`{zYT+y?x;KTsWV^=O0PXV%J4#?jviLGzBlGHFh{Mq zd~~>JFvJp&&lpA{QY2(0l56e^_%<(oD3suSy?>(vwTzm7$Ponh-9$GG zoSlO~GLc{=SAtQb7g4ckh9dYudh9CaWmNs|bfhzo5&L8T8;-t1S-yL5MyQ-d{9|(e zQUI+zrBlHu`+?Ad&=AA(WlB@KF>EUjFL#-j+GJ`&oOYzXnQKz*c2hbKGAe~rI#x@Y zV+|h5bx+X}(=(UIA^E8qK7O`2)iXJ)8el1f(d)Q|9yxm)l09;%ajw%V#4l3P>r&8k zALZjJ!SQHQ^(m5LA89_|OX#!i6n`7-X3Lh3CK86IndR2hb=0;aQas8kP-w-QN-GlV}TJ;sT-p)gW2 zhW?M;Hor1#m6JYQxq=w^?7jRBvnw2lI9rq=gDhM>hEJw0xDQzUIjB91zguI%D|dF_ z{Wlxm)ZyUOfb{I+JonZ+GA~E{>5G^CvgNN;GE`^O9XC_|<$n+Gk9twVo@Whwqy~s3Z%o7z&LtnZGRb#((Jsk>CL?3!LH_%$gk`-vL>+-6x;hWNgiJP`3UNspK)Lun& zIJ>kR7s(i*i>_*abnRcIA43Zm=`&k4`|Y!R!J!OhQrYlRBd4?cYA(Jr0Ei+Y57zY= zHy?g@-nl5I9%F!ZbHH2in@Dkb6YJ>qB;ThLS_g*6zn_{P@EmbC^V%~Z=pk4HUegN! zf$b3&wEB?^wk#1x4nGoPnoQzorppT^JscIlirzkWpnC@y1i6jlMJRd!Ur9cq@D0WSb-W707EzpAO zzmxzu7p|xN>c|B{h}1Ip*XWNfV*y!14dc#LcKLL@Y>5%toX`mp6=qY36y7IpvjV`i z>wwPG`&4%*B9`&tm`Lw>9E4F9o_3H~1+P`l*1NDQuxcg3*U(EcD0e+gVGWfD`$p^y zO|2%nse9E>Aq3*e+AgS!qY+sWwhYM`;!61vFHpX^3n**erbYGbwI^XTbkj7ny|44* zMA2&qRa?r&6&>$m=U!uN=!Qf=c(>|CxActmuGnV6p^45D_?Gck@e~5~;mW~Wh@mHW zx;e=HT8c8iExb@Evr&6vZ8rsEG5b;*^*pgH0r!2>YfJrLlhKj+HB6bltP4sA7@Dtp zOnh>Hs%LVdZ|%(5oifcftrRGk9v4;H9^rX}Ik#1$;qyX(RUtPsg%ai*3 z6z=w{o>?@#a`|l0473HeEdd6}+t^TE^Q>`=ph|}6;<@Fa3WUc}YOLz- z_$bje?-kvv&oES6h0m{&36%A8#INtXf=|DWeX=#PB$d~1;PRX>_s#>I)u|s>AAB%d z-5|B5{uciF+LeG1=ZJ#UtpCE^dqy>x{_Dazjv_T8(o04xNRiNMB!6{)5d@@nA|PD_ zL^Nt7N=fJ$m8Jq2#2Bfeg9rqY03#qs7c7uu0BMq-#79Xo=bnAmK4gCBQw=}iEck^|aA*~}g z>Kn|>t$65&_9=@L3P@*Utn79GRue=+4N*Tyyu*{Xi-}Ww%GF|$l0|z#Pof8fk24j1 zTxY5h_hLm0tJ};+gcZ#5W*iG!U+cJ0yzr{(+SRwuh95QMDPc$CbF&ka1pf%B zE&r9rg|C|JuMO7IEX!moEy~wF=w6hN7d{K~hdmCiboIR^Deso6S*ua1(B3R=pFT8#9Jy6Z1DX(&pddHIoSd==)Dt$fms5>1D%kGku zgy=Nr*g2jtp&klwsJZ*Pjg}hkaKFZ@Bedwv5EoJl(#*<_Zi@SAXrZU?shCRF@i>&t z;KaM9@+^kzG--=efodcZKxA!mt(e2VcO4S8OrAWP2Isy`+Itap^2NhCi&t6|rt28~ zeJ*-u7%ipa!W`JG2te9-CtW*}IC}FKB$7GY;YYoYFE}^5FcpV#hxJ#*E* z2hNQV6UfT(PYIin#K7ZfL0v@c?cog%o`M={v6}xl!TC>`W}ykQ@*{YGB`fecQ)J1@ zeem={R1N!egvWzkubVb0e?%9W+bDr??dk`A*Rkdj^tn-er1926{C?%jCmECV2ZA&D zm)elwsNGprL7kBc-}fk*zuwHWOj4D!7-PJlqOBP5d#sff>%Vd&UR=WNKD)>0#v|&N zwu}9$k0N}Z{iO8XNsUC%J;!iCWQRIibbK-DUYIG>RayDs{Dd(!c;tTYGSawbL@vl; zMLD@QxbE#SMuV`L7D56`mJTkEWuZv?Qo^=eWP?Bhy1sn|Lu9Rgrg6VGO>KSfK0S)) z`UQK6cfDsW)6XnrPt7gUkEHpz1CWbb2<>&6~oLeW>b->W!S+Oev&=kAm? z3cf5g?V`N0gIhj!P}TD0h(LaoQ3~Y9lIh@3FmYgBCD`xCEH&hj(r6dvW;W=vV!27j zJ_RXsVt#4wYKKA-c%**OiO6RyjwZaIm#pL0duY3{@Y)rHA3|Dk*d(?2lIB~muVBx< zjX1}emy*|8vf?CZ4jX)pSH3RpR^zfexgxvD!DmfI!B@s#%GC5!VN01FPC4S7=so4M z{2e9-6RS-|+E`2G(igVMKgCb_y+BFDdASr$=8Bnc%rp92hn87Y-o5s+8T}C!+I9|4 z>wTRZZpfUZKdilUe?;|8_Myj%mbox>BoHB2-31#ZK{^QF00kI7e!#ulVmorfxYw`(qeoI(!}k;I;SxG<}wcR)@)3(eiQvkWVSb5 zPcnIv=wB6R?=a;=}GwDVo3fzovJf2tW*7RH<8KKc~f!s zb^Mn!y*)-XH>1g@3cc%-tfF(JFnQ-vUw2itR_U}pq>W~p3uNq?0p8qMh`Ika^g9@P z^ZDW5%SHQODK&RLnH!slDIeZCN(*2&6~ds1FxsB*L zo5swMq(g)Lt=^(%T>8IX_ZSxLLSc zaY}q1g#)7MrNNGL>RQ1460y?o7fUd2Mo2xkxM8SqS8`dOhvD~Hqhg)W8Y0HLDA-F? zo964=90>sztnfVZp2=)A`Ht$7Lz{P9`a#@l7kes&cZ*{f$e_ zVa|BCI?3*tf{&C3wv^!lcFLIl_IHlFu)X{*qtJhl)n*qOU25e+oq^Os+P1ED9={)nl z)9qwY!TA)|r--cBie4#>4vxR^zVU~$jk&ywtn!;z+{WCW;`>8lf(>zKrbRX5(EM8Y z?gwwOdBwhZHep2}0l~WSxT&-DGV{+p&pqK~?Yi$V_yvF|!BL@+=f7lb8A!riq>&F0 zLv?@e_)Ro1*We;h3FG&l^zOto)9FzEH5DgD zq!l%{*&nc)0zP0p$C}m8gtqeZ(>Piaq8YF$O@z6y)}z%`y~nOnZ#Q$e0H!nYW@Rkmc}OZ z&q2RX)tEVtC>d;aj+znRjUu26~&vebdp@sh+km_(tQ4E=Zi-%%WFvG zYHCJ}iPB}&wIAkq#8tR-l&18dQ}qqabA7;2JV<)!#=4K4#Y(YO-+NS%_fp=BhdI8iq*&>PUj5+@bA9pE%VFP- z>ZhH(WMMleR~l}4O{N9}jOggRgHn%5ZFQg_qbjt@N7!DT59Y#F+@uifbp-qaTk!lv z68{WJVufioN-MhD{+^G=Aww>We~BAJIq|WEin=3E)ar92?p(eue!U7Ew0cvxLBLw> zS3O-7{UBOpa-mxL`AS#f#J9mstbTn*Ag2%>@&)PSJ2lj1eQ2Z&-=gBdmFGPKYfLqz z@h8*}b#yQKC^OtL2yO(LTgN?{-@TabWf^3t1C^UXv&0O-Zs;`KMe`{atcbOu2B0L_ z0?q0;jKEHjah9T4;z0AEWcge?3Ml_o8B`a!t#+Si<Djsy+!c$o4G+fvPX~I?uQ$ z&=hkfha_wIbrzRy z61#t;J?1nUx0fhmKffA?a%Bx?lLU{4Rp>0synyOC5`TTPn3hQK)8rlhq0*1Eq()ow?fO&(fp*{Q&_nO+=1!+ z%}a!@xD2$sesfi)4Wi*tlv6zA^j~TInLp;_>+_91%g*avHS}B#N7~*CQ&wx#GWVA! zR6WGpE9Ke*W|DQ@RFl262A_TN4Sek6C8naU(i^A!j$cVKGHtqK|s<0#i<0P9~BJ3a2Gv-$4V#U{y+J%FeB&=Vn!p%ZBI4NExC;1;T2v92!1w;~K-6$MbVV61vxJ(3 z2mQUn{|Y>89oH(V*>b6v`0EFy<;Rea)=>7nPz94CdLCYcZQWMgwtqLT9ok9b6DKWi z*B+85@PkHHEJ`nOFY|HkA7Ww3!P6KtKfw*LQ9ldiva{DOu|`$g{5ZIE<73;`1!jifgoJ4G~) z{~f)v|9IH``8jwuTmZNJdxu5)-#cD{p~Rg1AF#QNe?I7(>;L$mU=jGBZQ=i93H@i4 zp8xnarKF=gbG$&2^&z2GWXRi-!EzCYLeJ2ENEI8mKf1*VPUt|DEqgV5TzNFyTD_7m zyFBZa|7?a~`ax87J7G7O+~hVAuNQyl#17ogG_J1;Wc{#)!b7bsgS$oPNcKvjnxGVu z-u~cc7Vc9-8-3iIo`Ms> zq{q{!`Fb0Z2mE``BpanDI1I^iJ_HI}*mJXwISnx%^`JCfvj3}HA6L*)9oEzpGtp7# z@r~P?ec0IA=vni-_q_h{gGj!UCa7!$X{a;m|jx#8QUm} zx%I)^V{C)fRzIPKIQ|z#zxwn1qmOLG$c3EIhmpI!?*b12K$1P>=!S3W2*r#p{iN=j zxmnvs1aU9?U%SPonePi6+&0B5xu4(6ay$WdY3-P@k^Jysphsb~=URJz@+@4QKP z4+w*$3W5Q|cZ+$`g^E}))+<2vJ#iCe2Yvt{GQ;9im;El$LxpE5$ zR`kOx97&34Ww~^AoKI!av~)IOiq6(faMl>ZYZg~{QAV6yD`aja>Gev%lcLn=vjPQynO_>blmpasNj^x*&>? zsxkg2JGK*BNHS!_w>BfvY`+gdJ*YL2E?~1?=g|mpSZy{z9asH*54t26B8c7jM+nRu z2!ZvqZHbcU5|+0@{cFpj>C-xwFO|$DR&z#@ouG_aq<# zcC=Ol<2(6d5heTjRHC+LzRg*$qs^u_^Ryiw+B#kFJ!9wQzD=i*h?M`Sa=MGN@s7Y> z7d}GG1HSgo4%_Y1q6zfA<4DE%8nqJzgs1-Lv4R+$T(6t|Yn#XOv4kL{ZQIW2j1KRr z8~P#5nU1%u9UC%N19=*hCzu4MiLG0|z%~cKvC0Pbz!evZhAb5M6fU|4o+kR4M-=Ej z2EDOa$+m^4pC)rzYmzN`6Zu&attPOYIJ7NI$i!Zf$&E#r&Z(?&9Jx3K08C;^j$^H# zz~!QUwCw>~D_=x=!+2nIMU`vK*Wpo_M0G#xDTuIrn$jx?&uR&hZ;9Zd7%){R2=#Mx zk|GyQXpSla;e$CA6q$5z>gh|~&s<+dW%MJ3Dvig_dMLhrK`VFQ%g4bhHV~!&2Ee<2 zqM5L?YtO3Rsb!Qr*6|4(d>8de*1kDX*4%;P)rmC;_LF9J8iF9nQ$z+%>em&sk*N7B z-&X72#w^5EJ9prILH{3XlWC}KJ7Xod8|x!bbsq63py!=v?~@&jisfBSKA0#6Rn%3*H@Rv)$1*&C~c?DiVktPYoxb}LZ}O+(tD z{Td#WpEmCCBIgY1PGz6??pjV{uqr3Bk6XHDm+yJXIrd{lA=FWO*yw$~Y%18dGH7de z?_1a^F1I)Ab_g1Q7f$6Jk+?_B?HTUcUtKXQe?X^IoE~k2xxBrzi zjs|NckapDyY7g>=Sl_J={&e*IEhhw1F-Z}+%{%1?l$mZ8w=WfjnmwV3*%)DB9J%N$ z5D;V-XNipV0Dr7&O&gb%eOX}7qBRT{7sC|e3j7IGo(tcneytxAvp1?am1AD$4GgAm zQeIV`N4vvc9F2E39Xh6G8^uaMuOs`?)WQH-d5pe<2BAX6Iu%ptSyzNwS1U}9bn6XC z(351Gem7Q*zyAtTTDo6)L{+LEQLa{KPIi&Y8E&Y)9Qfd`Eu%jN5yKG$fA0uFHE9VH zK^8)6TMIOK=Dn|7_)Eg#E}ds7Pha0A3oN1Cyad*_K7e%@OsXKDd?}x!)Ck zP)_eIVpV$fu~x%<`(;9T-)Z~Km1X5y`D8ZbBaf)PwCu4foWXIUz}d^1CxQV}GD;rM z_2-&4vXOuf0Z8!TA`9pnGiVogsW+wyMWFPlTzp)}QI0_~H^`VQRxF^!wi7e1YA2qM zfy>1_q7bpRHJAs#rJNIubHnekI;}t;bvH%+o;(Mk8a#zfFk02+`|@y zmI(jV{q)g(@0#QSW0i$ea(a!hknzwf@DUqBPa~4EvLi(zOd~0Hi5kGv%!QR4i=%H{=c0cheg4(`|2%vaGq? zD^~c7J>@sz6QkQ}NCZ7E+xC|o#f;d?K!L<3d-AKmC6gX6rWg_6y>+4fMnPCaXG!uCOd z9AwHfV>o~v7nCYSjvg1;zSVWFmR|6qtF$m~fcY+ZpWVFxyMgOEclL_(9W1JyHA?k! zB*w{(>V#ZH<-OMSvUt3%WKNDs*7_-n4h_Y91*DELr?1h0`E_q^ZW%PkxTZN2>1;At zTBbJuDTEPXaHYmc_^?ACA$OAY{aeseK>7quOGk);1J~#6*R+6AG7JYKk9Tf6fO8I) zuu%XIVpD8CSW212y7NlQqsjo@1&}FqES5Lb%2JJ0gdUuWG#QbEBA6<>9pVeORCV&i>S0`-! zvgAD0OU`I1EjF6_7HfC5L13qm3zKzoF)b%n_=_bE#y=n}kNlep#qNv3JggYe)&qm8b({9}LMeY%djM^*A189J@YGzMs%KxZ}w2!*HAyt zuFgB8=_`7Tk`8HB&GnYOXMb3l8Cl*oO-wF#Wm6n5L#?o)#J`}oe?i#rSe?Adv*9j} z!e%anZ7^hA%;}y332GO<>x}X-I?7%M1HiE!Ip#SeqB;PQo`muPIyMjl>9~KA{x;b9 z?4iKxBf4?vR^jDkm~ZqXGa<>8s0!Wk-o#T_Bt`5gT*lmfq&@4_SAt>hbyf|!clxG0 zQ)f&mZ2aTVf(Y+?AzaBYMxG7Fga5~gi2Mj2ef1LT5tmD8RDV2Hm8}n_qNRgIJ-J&| z3;8;4s7igN&j;i+^Df-`3-ZTbwJ>K|wLa%OT(B&-etOpK9BJaqh=N6|X?KEKvGzon z`Q2XD`%hk}F{96yoZE9EU&G&O3FHQ*bOA*sV(ny@Q90wrwsO*Iek0g`&0)7eTJxWt zdY``YY2Ys-IlP)$c-Ka+KxN2cj=8ogKC?d_rF{N9a4+6l?Ho}wdO2>qRNHwoO}LOq z9?9jho9f=fdobx{l+%a)Ts_%4>)=sWbfcG2^C;fQI&y2OpZ5=E;yqC8lmUHW9;szx z{TnzNHNut-C)V8Epu;>^pT9X2%t5Q|#VXF{p!-YryqFT0YY{e(j^dG9HefaaO4+OJhqtHlPyw!nW!^*I+MQ ze78IQ!uOB6_N=htCU_Pf)rZ@zl`1q_B`y(P57ixu?YJ#(0F?>;EN%#*Euz4(0H zX!(QqpD1d_kt$@gbiqObGhEGE0FOWsj3NU80d za5o^k)X?TBxW=&7H94w?4&Q5|11WM8*QK|eFmtRaVg*oZDNO&0(^&cV6IUx4d5JZW z-uh!YbDZI=!nA=;V$akfTax_WNZ!_Od4)psgMeb z-cPp|l2mg*kFSfGZzXYE;JLI0xImU9+FQf3f=M5tf%f)%o7T9EmG4{s72`n6u6|UL ze%U-JqhV5`&f8v;dyNcwVQ-XvM(dLc2A9UWm3J?2w=*x>5{4${WT$02WpvnhzChg|Vq zFE2j*Q(aqz+Y0LkW@xw+E}fC1r*D&^vu|i2sJuLxJ|X;|)7F|*`v=@G3fVtdzGkB?%xRhv$2c<0Y|MEh zxgf|$Zsc47?n3RgRXljFm0c%ieHYzIjP7)TYT~5I@N3%P~ z4K39ck^be;1h>Oc_~PCYrrONgQQaG+R){y833N6bGNQ$%GS~^oD&s374FQh;TVF2d znN!KUE-Nhi9NF;`6l=n8=Lmv)3q9gr)%(H@gq^E-*k|FFx5{(&FS>w5=v_N%8ht)e z=Sm4SD1NU8RSmvvc5sq>@=fWX1+cwt_KpGPo|>D*@&DBXDQ*X0rGdqmc99xhaz> zbwuDObqm(kKh^_i*%Y(Euw;)Z^jgAM8wDy25BR|z=vn!L{<0a&$(}_Yt2VuhUo^t* z<_4$t&c`)zi!PeUiNk+SLo)P-6R?eD<>?Hsf{^mNlnl>c3qbDISCr46Bzu`GqzuIs z3G~ep4P^4vJ~>;YJG30N%roek(yB8dmvdO+x(>kNaAw60y+4#m9dL+=CS zCJ(`hxVv#^OaSK+-0G*+1*dDHyTo`1!q=Qz_PnirI*~A|l`hhR#yv-yUH@1nKAZid z@Tg`fU9aObc{xB$ROeUrYRLQdfw>N?_+%F=z zKTWsWklMGyqNpqF1z8m75a8sn30*7Lm7&XXh8vW^b%&sE&$0)40Ttu9G3j|m$WThk z5XCC4yZ!r~h@y}aHT5(4Sq2B%$ih6=I4>Hj6V(3EQ#IiGkCERIjHr_tH(lO zTz=*}Tzq)tNuAF{Gkwc!?%Q@{eRElf%3=dwuV8o68Vj9$WK=(ZPs~N){vesD3QK{7 z=$yI$2kL`c+@*P~_0yOiis?4eSY61Mx7-o`L=a!-!9x+6ggBQ5=yVm7)n9l`e`T( zbvzB<3ha#KMB{f0eoBqEDAqG9DkOmMZv$3A0rTiByz0jdw?ShiKwN)7!PE7s410r( zr4$>Uu5!3955 z?FBnnKj%R&woi$Mz2VKcR9fVmWXzmok}2jfdoCmPc;aiV9=pGHxJ@L2I-6XnV9^0+?Ishg$v5lK(f|2 z-56^tF_Rs4BVVsPRhyR+F!D3YUD@B5LVR6+qXcUL`V*hxFty?R6cs2;f+pYd7CV5H zWhxJ&J;7e|UtqB!Y)0STasFT9H_uAV zxtYM|!<|kV0}NXL_D}O$gvAK(J07EDMT4})en!qM`q=wrIGh%N%khJE55lEJuD(cq zZ6p7zjwCJDf9+9r{j$rv%YK1oJMVrBj^{8~W?Pjv4!k9X_%226!%@KzYKPTzGKnYj z!qqPnb;$M6GhK&{_$i&4*QFbik5$hal@p3PpDtS^Mpx1v(YO~&J9$>C|K@MK`!P3X zF0;d*O`G9#e^i6Yo(W`LO#SkbKQ?mXQ3h= zs{ID+TYg!y-#=c4medZe3MD@r*q`qbJuKRbSA=~{C8xZKm*)kTyPTMJ_2|f^*rHM~2{BNf>BWCjZZBrnr0q^l%kCnAq;(0k5P*Bf5cRpdiv>%@ zlNinJ}X zK$ZHEP5Z7TFa{b32*ba3Oy~4d{*3!3AY#6V2J}Eg^7*V}1B&_NW!ykN$R>zwWt)9q zpyU|jO~<(Zy+f;VAL~PuyQ)+*V=n57Lhp-0bgPL=wArjUshj(7<^> ziu^@k*?Myzg6}J&Bpvu<6-4H&qZlf+FJVJfa68C{l&a(JdZ8B!0W1T+ivO_ z_Wgx)DF|1py=g=SYdfzSko~D2&e|Q4Oz9amO{fmn&s7xj%6{4LuX1;jS4hOL5yaN@ zU`2)){p zPAGUXUqFL{675Q&)0C*9J)$Z2P8%C+B1GekGiflJpCQTlkzUx(Sm%j%y&qAdTX3nU zQUl&2OsL;PH4=;}e(s!HN*A1d4t?DCSL+K=PaF4j)_Y7Yf8=c8V%tk;f8~Njto7b0 z&jcxxI?Md%0}Hp`vbzBlWv z>~J_h-e-ij9CE!gfhsmuZ^OCES~;y^D_cS4s+NxYr{ER#bGmY_=F=#li=>*pUS(*3 zs#yB>2O_DrnZE(UmG2nnPTu*m z_bSrWVuCd%r6q0`NBVhDE_HsQ++vXFjEo0bO=RVxkD_KMiML@x(B-(QlK9sS2{{gl z7Ad+VR=?jb))ME&N;w#(mSYBtvfQ$ttc|y~M{E(%`)}VB-GXx+&~J75u0n7U=V>^u z-wZ06ntBt~R|02=^$}+gZd~sa!3Fp^t_Rrff)Q+-Z|jr=&JZl4tXpuXNZ^ z5kq1ATHUwQMx@Ge8SZI*J~zKsgwz{Q*D+bSz2|t=__&D``<43nB#q%{7R4do&!RfW zm7$?J-aX!X>gkd7x-;QEl#CvS3B!!O0v*%1m8;R4q&h?|Q4QDA1W*^>jSMu`8Eh?x zj!rxiCevb2`yy2+jFI#t?FnqB)~Ru%Yhpzd?&mJcd;4)=K7qa~ae)FD^evH(mYGGi zAA+VfuXC3GSRnN-T7&v?xKgg>Ks-{pxYB^FwkOno`_PJy_#{Xb^;ZVLrErmzKh**tp*dv&wDt@M|HBWLEgfiyg77pPZYkVPXu zN=W^C$7NKCpFMNqEsFm;w>4#i$eHM{yv0UHM;l|@S?hD%K6hx|w{?(s0%#~L&gJ)t!G>3JAm+j4aCx$j1+TKp#;X37y zE?6chil>vk-?`CGda|p_P0Jp6^y-~88q$dJHm(2+qAE~hZp0__FJj?z-)FAQnDh&I zkr#@VU26ud^6Lcd591HljXkfCh&B4tSbJ>XP*A6oO&A){9~|bVbXLPGPFNwV=z@b= zUPA*qF^nqFgw;|H3P5JW4%2v+^@m$5Tw0gP$UIsF8sX@-T>SPaNuDc4=&-H^a+l;A zl`C#D%Rh1{N1pm2QZ?Hid-2lvgs02jz7@2Vu{lxqDqi5`Z zB7FUUP+zx!LvCc{5Mv9w8hN`KKnf~$!XB5h*SAk)EN&f}s>sTf)XupXH)L$}<`G^( zLr~?$H{vHohr5#Pz{sH-UOQQ3M@EFf!~8rN!TosvL_|gEf*T-a=_h?eHxcN-jAew5 zhP0OMcIgVNgio91Bry>z{pK6h%Qz*ISORVjK+fNhG87Rv$|8?Td1)XDdqsO1C8HM| zNm9|-f#tH+Uoa!deG8@gmLTM1&8FxO2$9+Nn1i3h8e}998`^=CSR^vlX&>pwE zLn?1CwwFdYkX0tgR%rkJMWu*W0PT?lw1yPGMO~1yF$O7*33dLxqY`|{4@u>{xJ?%b z%#bexf$&UKCAti^-4fY%fbSiq01D9R>r!xi@HVf~7D9>8K}BIUX)kVzOx0x}wE^d5 zv>!O183H3Z%-h?zblPSV#$Pmi+eH`v+2MTAtuVpEi1k}QDS*ZU$c(2;mM%-+3VtlM zpT_?!9ww@+x1Yw`{(Hxfjh}&*fIoxuqpgR;91+M)^J930>xlr~c@94OWY-^Zm$~7D%=Pnt0YwUhCTGodLNuMa?mbc7i=4~*h&o~1ChFz z5P9!Y`8xSJc3QodzzjS**J3mO%=oiL(7*;q*p*QR zHM0AY6GsljXdL6VW+P3jAgWtk6YoHf{8+Q`{E6$Q#v%&6xlKuO4HKq0I{l8*S7U^y zNBn{L9v1F*-S+fOh=HX_F~P}NgKrs&6|+PhJeA2G>; zOsk;$o}T?f3B{YW!G!}DsltPFRwKkSMO@N8N4Znw){Gs<3$^p8V39Kl91FR8{hH{j z3B})x80Vi4l&Aq_ZPsV@nv5cXHJcY7&sbD0h^(IpOH!{9pP-1Q>b@uK+V!wKzG@A$ zYrup_IalMU=h>>=gi>9&gvk;r>=O|6Lwxt)i1q?9S*R8rE{uv9_|oyeobnhpLhvXE z?1?v$-2dL;Z6iy17v_`vpY>|KfWw?JNn6mS?Eolsw~YdB)i!AoDC>xDJzBU01u*^i zN2KrV+&_Xhtlk~CzbF2$foH0Y=YRJvQ@K2;J6UL)^?0fAL3T(PUyL!b19D+`-$BB- zz=q9*54)SB3ZWP_JGnuC!rf@49zWO6zn1nN;@hPgQdg%W(@cchD@Z9<38{in!lx2(Me?^lJoaDx?4c0;fga3k@ zwv8Z(DPY8Tk4N)Y{M4mBZFBr23r}m~;W7}HIRy97E{$cU>+>UGXA8o2^JF^7sO*hy z)KfIDzZ9qtxU2J*D5Pn?vo5kJ(RK7H{hU zmB4k?kP+6?f-kJx46FByI3!MYCfe_u7@rXv8ygk7oBVX&?CdN>1NoyT&vS3?z&Vq+|Jr~oISgEL z^~ZfeB5nQ;(J$O@cTrJ;1@+u-K&8CVK9oh>DZq6FtJe_l0)HuV_-F@=M{q3+iFzF+ zPe|$dWzJDpQtk&OFgKTNG0M zxxHA=P58T}4~XrE-ckc7lui8iD{Zn4cNUFKnW|a?=Gbgn?&GMX+fROMPEA=xxQt^D zu+>PZz!c^|CPYvcl75ft&*<3Av|L23K-K7pE7-!!hT=ivzT4ir?B#NpLjT=(s{aav zJc0GR>=HpJ_i8E=zoB+Ajhg#sLWhC^shP1EHVg4=Dx-9}Cd)>RJJS;(Lz^ z_PnBb;T1gVd7!6v@hWKzM6_?cR!of~BJ0x4FYD7*SgVWZ^V`VjjKst+@GF+7>$`rq zL9!fNN`NuFRFOCRV}!III{Mf9)7jm)Dz&it+iD~rcNJB@HYEA6HQ)nro+nDp%?3XS zg9Mi@1lU1&eswaXG#~=pyZXjGC5)u03z4Ho27D?U`e9Rv(y9 z+`OtY_^|in72j75uI{2HUBPYC)&D^j695D<8EgynoTr5W24QOt2?9PH(V=~S=;DiU=N6m?p>UIx$&uq@mvSKEt+-2hSjn3#^lN zioyV&6)iZhEt2sNc2rj%U)S&Y1sYymm;3{|+|vvPLUFi;5qegR9ijytFu-tY=HgP< zU^BE9cnKP1ZHKd>*OOl74EZL3~?-FXE=81D$Tf_`lEroNRzQt(F&aorb;hYni zQ}4j6OqIH9#{YPjC%0?&gh-Za`@nP%Bge{rRwfbPhl%d{%-;5W(b_nP46KT2Ps=Tu zjTD-y4N9 zl8$!rc4tEOhhxoVx1H+@24nTIE_>|yJiFuAuJ0fS{8JPtpg~+*`+J87R0UR|Nsk@4 zo_y417J^9PgGUL7?OP?dFuxrlTI7FLMcp-z0E7_n8`~1N9{Z~&4xJHFaSoK8tPit(F06Lfw2t4n&R0S#6v*=n2-(km)zd<#dlTfDvKBF zsW#7iQ$bH;v#%7c^1_2>;n&C*kyW|6>PB`^m$>9Q6m<@-Nt=AWcp%=RE?;L*7N4Vb zD#yK6>HV(;8{*n_EsBrab0kJ{eB6pZXnR!P^9q~j9@U@D<=iYfGghj3pkj=wJw`nE zJq4ia()Qp09mhyAZ-R)91HFcZ!1j_6|J}K!QJaH;Sm@z%D8BaMXN8sb(JCmy7ESte zjjy<@!qOQa;W?J5n;7Hd+_yOvlODDe<;ge7p0r9R4pTlknOr4*?9~}dxBO3&Sur0e zetFBz=GpV?Sqoiv{%CQ)ihC;O*nmm+vSWe6ndhFwjpc#i(F%vBIz7(^iaDl3xRYR7 z3cK5-fH7vEWRjF2YX%(F1&0uJ4cv`X05)|>I&nvUU+W*sb=4|KIe$c|oMU;+K!uO{wMIv z_JB)g0NS)Noa_QL$XlH^nT_aE|Ce7s+a-f0F7U1q81u|?${SnpW0M^TGAYNvY%wcQ z_m*x+Yn;`KtQdJ*Axy>16Hr_y1f*r2a~LY$Hcc97;o}tjok?d} zp++98H%JS5$SHxlyi8sh`+*SufN6E3jS65kl+as%2COKO7{yvw#P|gcwR=%--J)D< zLPr+_XOt1ZD)~6Z&tfI6JjX+gqZcTVSe}m8=(IO?^9c(oQTf8qQaj-XVUOROsPv+< zD6pfoyQI@=2-M7E=0wj_!6C8ryfCTbH{A2#v38GAgV!f@^0mjQraBX4UQOB_sk)x2 zG=JUnH3@J8SJ?| ztMQ_VQZv*$n?1ZGJoPG01!E>!IFEnl87s5eew&tVGI2?jX*S!EW7^u?ORq~E7%6utGoj@YK~1XrVHROtGCf_st6U{Fno=bbFE71@~xnQ zq47$w2aK%t-7ip;WtF0JAip;`0VjONSwzWYx9VkQUhdH}q_ZaYjerHL9HM(Ws2LIC zI{3r&A6d`&_b$+3!Wxd}e+ru6&hg_&y|n$vL(lzIH6*5z3^7Z+xC2)=_QYIMH2XGt znZ^R49YZ|#fNlt7{YGnQnJ_`f&V9O>`+_C3+uJNHd2T#5#6NSZ&JvF}o=}lowPt8c zc1!6yc_cGU=gnU=D)P8R70=+;;!_N}+=F8kUWArhjb?|0?;Tz;c(YNa_HgoL7s++- z%KZD{;+gJQS-q}t6~_jpGx7uUgs+BED^JN0m+jZnwhxGgNUbA1-yrdAKddoa!A1_1 z@zq7yJPnqO#mxiULL5QZzh!dCDRB_y9qid*g;mJ)ucT6N67)5Ha)HO!_#r^zyJDz&|dGz!k zB}(Nsxu|1eV;)%gQ-R&2pp*h3GYDt$KdSi+i#(g0PQr8aB^UcwU*)>SJ zqm2|GlV`MxeSvnI%+4!Ri+ ztU9Ra6@HyjcCwj?F`gA(<=-lu-7niOc?%Bg678J^H6~bPPA>ZRsq3(?(q_*r@G=ot zQf$j3X^L3c+GrqN+okpH(>X^ZUj&~3T$>l`gv=&M?ON*5v8Gt187RehR3FhSS+al5 z`Naextc_<=M#wefRJ2dT3>)PnI&}I3^N<6nfoGjz4_UhGP!~x;xr@3LDu_&YS~pQ| z$QZavM6c!tDF;XEct-YqENmu=dzD3aIhYt~+sio+Z8V#GgG0Sn@^w8DSKd_k85AS6 z_|_o5UIS-54_>d>tGE3M3N`UpE`4;zKDox1uxtn|$7YW?7h?)LjVPOKPD#x&i7Ihv4r zkX7#im|IGo3750uO^&$3(yyM#TMdq38{}*0foe+myG8XLRduEDzMhchb~O?_hL>vr z{9W*hPLq!Fqh$VYek8rw!Yfgm4xD&6yZ!R5eiuIA_1_gJ^Nf2wY5}iyReF*dZ|P3Q zrLvVIB_1(Z5d38M;~}7;hqbJpbquZg6!goR#9OyIjeW*Ut+f}MyT&)xDb4Mz_t=k) z+64z)x_O}9glyL#?{Fn9|8Z2`)g_g@O84S`2u2iUHHesZ^b@;^pcF*K2$y*-x(vHck5EM&0vgL z+`;5>m))mwRrG5$OC;jUoqgUrYssT$##5ul`ET2s!?Jokvx9Gh#3!z@t6mR0JKA}( zAwkN?7pQ+$d4PLm=|P%&K3e8ddL^Oo$X@p!oqNrXmfOSZ@+Djy4n&=jFeH8+>4{B- zi-9H+fiG2rOu4EoYKo1lK$ja>%}-L|!tY#_Mlq69$M%2zeEamdQgv%*yb^iB^pf++ z7q6*ie-g@lYAfsAy1(bkj)~aIwP9c5+XUfZz9EKhGm9S8lkH(nugNMF7f+blOQ$i$ zaw>oZU9@4EnwdhXb};vAunNff#i4Q!VEFR1lr*I)6^^A>@+fXKzMFj-W6jYGAlC;3 zE44RASmhdK`#Tug{}r)*7oeQW02!iP!ezFP^lfCK8(&YSc4=cO`L|vAJ|2UNIcQm| zKCxe=`NQkpb^c2>RqB&DplqyY9Gyheex^2$D4b@kq7OiLenJ9u)vEZFDutNZ&Eg{8 z(6wC|t(JO+!?4FKUmyM;ab*z`sUH-{;*OQ4xhGU?C0aFWq1;uALcS=wSLD_pgEaG{ zY!s9#H}iN1>C~ssgCjjE-{nUFlQs(ekJW^U^0YDn-L2SQ;+Q-6E2v{tL9ECEu_6iA zV>%Z&0bpd(%=T$6>c6q~9#BoLYuYeww<1!a(m}{pL^`N|6p?IY3j!OE-h_yV5D*Xn z2@nzmA#}FVlok8H=yqXxYwN^ggWj z=_^)zcQxO~)_&0z`-bRjn|wf)!2Dk2kXXP*wwcSQVhCwRu%$mGMS4A2blj48S42c*Uw2of z+bJZb9#^)jFxyk4Kj8IRd0pwOy)BBhZ^ZvfypVpb!kJR-FrD-Dy{cxJC?-_}b6?sE25--UR6y+;Q| z$oAm%YqAe9xS5P?-0dK}J7Y^`qn1d7%xRnLNKrKTC0~^Hk%Ea0Yp!k&o>iQ?jx+scb1Q)F>xFcmk*uJ41zZ4gcjh*-n;6E0wWq={@Y9lcFP`cR zl*c z0v7+vkVz{cg^zS5oLOvehz!R7m}D1cKpHR^y{)c&K_q6qRoPWHj?S4nH*!Dsi&D6B zgvRLedSZNaxJy~c!#-$s`DDyo~q5ybN|Ok^QTb5rpt3iK-uF7tI{dIs>QyOE1qp4iVS03c$Rm_Ok&p;RWTK{ z_6wlRXvNB0I9gKKM!j0SHdSJxLA0g?iRxYDhrM>FaT)mF+x^biY?d;TC#$2B0l-Zm z4gR#kq*A?^ienwq96ZRGE`jC~OH<2DJ)Y5-cSkSEE8C&0i+jxW+?^JwSHktE?5 zG{$Y|;+HF}bwek~L&Z%d@>0)8X16>f4dni~JQ)0el*^wp)P2Nx@ORq`hWOjc<>R~{ zQrkFAX_(S)=i@b^%W=<_pq7{c`>-Ow^%gE2;C4(s+gz>(m5vLU2GLcRXCN2CF0SYV z)ab;R7>*O~K@w;rYcCwaw)o5BMBN$s>5F7! zg>$NoG`{QV^ODbH0t8-LjjrP7w$YRei*vfmLqt)<)5nUt?_NePo1FBvc@WCUDPHz4 z%UAzh-7Thtrm5;Kn^73w%?PP@{03#ao0G+~i zBfi3@QLUtX+;=k+M%ZWM7SDn#f~okuqbW_rY6WV;c;&>`noX4W%0@u*7(Sd0o2keS z&BQ!#GWfDh^&sV<%oX{=^gPqNUUZ^vgcE6_9TB*4WJHh3(mYg!A@Sdy6EDx4t~!%P zeOBBcRB)+6(pY6lb`oFN)*XpSQqC>3sit>(xapYMmM5g(TH%m-7+YkRX$~o|9NB=O z6AjS97E9cPG(q-xPApd+?P|(cC)6O~K`j^Y)qx9X^XI0CLWs7lo|{bvs2Xv@vd5|! zVU*IF56Tm?tztpVsRqL%?-)$r4sO1`)>v6wLJ9rjz||%r!knJ5r>$T*?bw^FH;j!S zO``t#(fX77FaK0?{x|HC%4J!<0xBbXhgl|LUl+7%Q99eLA zW0L{l1r3Vecd13jqR;f))eH{%CAMB(6b2pThJf+m0HLhd)vn91s0x8j*N zSyFxv1xsk+qr!#I9b=OVV{0mbdw)|es=fQ7jJ$B(71(TJG;d$@+~uk3jmK3o252Q6 zXw2mkhCS#zPx*y+zbD$HBGjGNkFmQ1m94XckF?UBRnBFK>tS(}b3}LYUT-JguDv&V zzLYq9Xl_#dG~6=1rkZ=0356ee@l>i=MYY|bmFB3u|FT#}5ZWd4YRxA*!n!?DHp&!8 zWSHeJBY(%-5g?+97dq0}Y07 z+l9h3Qf%>xFUxFcgre1Hd?dx*qJ+CHzg!%trm0``PV>BB*xj1=?j!Y=jzvghO1%Hr zRZOkSJLID;5o!+8ixfy#O)ecE8vH&R7wYX|??>}?lo>{5Z;d{NIsx$0xu~3o5Euq^K@2)A7dLDSCv{m$zv2WZ#J7t!shw?DVrdIG` z(6d&{|4=S`CzZ0pjOcisw zYhM84QtEcCYH7P;Zbx$^SH47xt7zhQg#)h+U2)MX1e-xv&<`6pNPNrD3{tEY8mXR ztncE>9&T0Lf%#cp&qL*chfTh(u71Yh2|W*b&Exl_(WEz7JDbmLJ#@JWT%Tu{J81uoVU((0if$6P5!f3o85`yIKW_)Zt6$pY!TbL&c`8)&&Ril#4C*&Dj>8YWMJcv`jK1 z@_EdQcIh8WCJGnSXggaP3@V-Wwa)Mv$kgf5>Q7cK9e&gY`P!69y}zEJY;3Bz**r5n z-@1U^GlBW3mac&hv5FB#!ql6E6)1tbM@TVjl}GI3jr8T_eYl+v&XQi-J514+C&$hD z!!2v*ODNpvaX*m=*U`Ib6ME>lWONlQ&*OROBZcGmJ=Pr_Uvlm~aaW>Mtbf@ZOiB)| z7+A9HD5;@3U`7(r@fYQkf?JHhmJHD(vev+>LRu``_2V8X@S*{6OKWb(l)oiVL zIP7NDUXP`Q=Pr(|L9)Td_Mdu*iGC6fmx`&+(hCRgwd%dIjeS)md&Sr>y4xe)<5@)> zo!fJal<=ByyGD}Y13dRbdphU z_j{!EDn$xRVK9zG+7lbmi<43e!_3eJehr>TRJMal=3R2%>eDidAwoDRDnR~=zxw^> zXJ;){hgDzBhhO$cp9^tUQ13p1&Lhji;W*tnlv9?!qAi9Xp zk!&>0tUtf>tZ;TevE$waUzR`sGKn(tEc8Xbesia2Y}sxoqWMj=vUTzDVt;bIa(}Vq z`PyQ>Q_s8R14o9L`#D))N<<*=1#ov_UlI86t%%s{$s6XLD7x`JPDCGJhg24900n&#C#D3r5Fdgx_vDL&T&HEp~G3qfo*RHv-p?TQA z8HO*z*wUm&yWf$4P0XQy0(If-_?KUdb`ed4K74$3JCj~LuazF6)OC+~zdpPOPccgs zwRlGw99SI4re54Ot`V;MrKu#A+-`KV_9p87SFt*U9G{C+PPiZZXiYe={ zNfnEO^O;?xoGY}d8X3M#Q0|sI*x!Meim<7W_P$5mmvkkt- zP|>$Rs}i*-ZU|57;A#(SlzE;4sMX|cqOTM_)<8`?mYQN@*MB9{b>(1`*^o4m32cNb zZoeGtQgX-_An%PYN&-F2o2x~MZPY@C0;y&-w=Q7=RTY}pK5e3|hardq5fdRMwVRcv zZ5a4fc9DQ9|F9=R2DVg>|ges5zQ z0?W14oA8>t4suKoG|arkx%XMCt2`R9N4k?N%tnB2&~8g7{vqS?y#w^}Sd3pGq|g@E z({`+xC6?pA8woY`y zBZWTEe9sKO0#hfLO%uI8RkSZt+NzjLB>Qdg%ahxBcTv*9NwN3u$RSo#}}DZd+*{lQXm&-KmJh5 z#bTDKC4dL6K2JRVo={beZ|Nnv5Kiiu<$YB?Z9PX)I_=POiPoRpC;IjFQtZDvC;j)& z4E>tTY~hqQ19btRYBi0FN*gokxIW1bd4W#J^2=whpHzVSYi#z-s~qv@Ch;Nz<{FgL z;jZxF6zP+T*1pz58C{XMP?8T*Ka1qEjcZM)Bdlz&!0m5oo2vcBbF*23txMNQs1XFX zY};_N4CgMw3%6@Zdop%9>hOFR6Q5#$Dz2~U(=f0-oU+{yzL%@8G;h_5Ir(7B?UeuF zSn5eNwfEaOKar>7r|73(H>T*OlE<0q%FO(!?_8GntIEeYOr-v5Yf^KS-XI~5Wz+SDFN zD9nwHPu3EYZg&@97~g&j;o6!d8M61h%FwtrDY0^tT3qRar3gdfjv+z`$|XPf%jnd? z=1~8byGysK_+C@thI71Mi*YTj2)e&zDNBZwMD|Y`k)v!4Cm_7qo;Nah+sV<_k^VzX zRxG_Uay9M4iA^dh(kYH@gdJrcs(cZsJM&1$`zPXDacC*}RA3EGfCqa6*8)Vq*VKkn zpEhj+)dvigqhrt?dS>)MX=1}!+T6ZL)d76&4Bw5^u2aw{MvO~y#yt#D=W|F851Pxh z)kwO`QiwCK)+$iRwismpzK!$o-{)ujh{+@a^zPOY%LwFwy3f4gcCi$gAyWY?i|qgU zm`vU;_kc*K4ICh2xI1wzDh4XW;5dH?94^b5(+r{1N^~&QW%5N&Z1cWx->iawdoyV% z!x3I@#gB{4@p%la!|+*-*9%k`p^anb%@J`;dXHV9dp8g_zwwOZGQpCK2IRRGaQRxD zWUdl4)iEhQRGquJ*To(C{u@sXsm)a-iM$hI*e~6cGu+Y)>np=mX}z#*yWa3yvEQ^t z3|ENzy06+_`-%J4CON8Bx?>$+3D^*TD1a?o#B>AK_r~n(YyJuQ)_qr+QU6gCZl+w; z@2BB&P#VL#{i^HT!wg&-Vqd$T-{G=iy*t#9iQ{9^fVArSF08Lf6T}_*h*fXfwEs)B zg})MK%a;9_j`-S_ZRJlT)+G51 zE!hM5SDQ|@e)orLp^m824$sroMFu^*RmI%b@(vZi)iSSWhhHex5ilsdus|#JP=1rs z+XJ(bNsd)I{y{F`N#=`(^;v4F*9}uq$>zd$sr1YvB1Z@B6dcal{p9N2;@DX4)s9;; znO<+~(r5j4#Ep-(ZJ9mv_)mV7ZNJKHV;OVb5UK)KBBxK$dzb|foZd52e)B<-za`~Y zD4lvzrR~|LoJN-(FHSwRv1=)80CTjQnAzQ(=dy_}*Xq8g;+mU;F1Jx4n#`30Rk^$+ z(Iu?(jD>X>pJir*b?NIBPqP~Eguw1Wdu<1rMpW8l^iIPtXLP#q@c{*&oae#QeVv+P8+$w^Orv_ zd9Lzozrpi~cm0X9f9<$`W9?>OYMOL1>(v7pG0-oU7WF#1GJ$>Z%T5CRYlv|Vph$ptFHdFC;9Jh!Wv%J z0S7?00G9`&#+dIYPcAst+YHC3LtfN;t9Gs~BQ0KCusOTXNB{Sok)`*pt7eNk9{VFJ z=X$Q^j(4O|Uq`VV_iR-7<4Kosvu#{+5C#Eiw-RCnsG)XnISp)KdfGJd76$d0tpYY^ z8j=V53SJeE^-5!vH1IIeJT0NXAnpWtIg~M+5V90rQ!ec1lCsK{n-XY}4m^G*r(r<~ z5s$Gb8}3NYZVyub4jm-K(D5Ws*TE___wJ80q|{#7Uf-fz9^aHoV@i^KUHBz!d9)0;OV+S#@z_Wr$ccTkCkxPcBvge(!2U?~PgQAAWjMqb!WSd50b-NM|ZP(oyKbk`hYs1!AGt z`n0y-bNOcWURLR%31t@U#N9S?*0$%fY^*afxb*V*nT6Kx`m62G8?cRl&9mhlSA>(^ znS$_~0*ABsY7JR#e=q;4Alm|3^=88Mp!u0mFQiwrWwy8a{nF595X!(?1s<24(hf8@ zK;N`Jg?d7YH?Z<6Mf;uN;i7QU$prc-D`3dWFUmN+$}9v4Rwgqb)`Ra391WZ_Zl zQ~N7?(Uf(9`;PE4`4Twvf%t!UHE>IVf{T}=VrMyRn?l=GLjGm3Ze!oXwJc5nTKNs!cUn0= z0f(D{d-;W2_j&D5R^$96%|2F(jJp~xTIFE7D_iz=kmHZiw z91XvsLbu|InWR?3c^A0b`a*jkXSle53=_$>5HTEYVB`s=ewWd_oy}ss^K05^B1^-B zfcpNq0U`@GI@c8G>ZdRU_L{R-hUz)(3zkmj?*hmfu+4|x^H>6Dq8A?Yu_D3 z?{8FV^E9k!GeG2eqFBF+*=F}{y-@w_NW~VpI#yz+)IIoX@=^>sFh0nRQQzUZ@zz4s z;K(yzd5VQ+{ZPmKV-MQDO+I+(;7Q|s_l@VBI{7mbx7qt?`zGuEmpTqUCkL)%hHLQ+ zBwy)C(IL~V$fJa|Y;i&r;w4!!S0iC-6R+;>7nnwt(UR`0xbtSnRp}6S6bh$P?3}C1 z1;^W5tr&<@xdFCALa7tVaQmRzO9T-H9WUkX3VH+z*NNucPFO&qt9Fb%7&sm{LyDzl zxoJApm^5(mxe|V+J2tW@QTj@rQ=TB~@SIvf5yQ&ntNIglvHq71rh-oso=&0fZr>{S zk4?G%Zadp9+%_piUrjO4D~Ay6TVm-n+-`K~nLz_RTA-lcR@Bq-LUaseYa(MTpyGj_ zzWutbv#*OoDe1!pr$sUGKf3(GQ3kbN+88I~cyQ??wm2Yni8F+`yI!-9XvECsa#s{> zg_r3GCCoeqLD9OH-+Du3j98K1dr%lsBN0Khn?mR4=r9|mZauv++=def9bW*yJ?0cu z+N~_=2^7<6s7C%3wi?Xc1)N%TMAAXnS~~M=Ed!Q7>I*@R6-_^hP>8{y;5(e+)ebZb zsrw(6m4zy(`TB>y@tDv9+g^pNoL-aGO4XvzCx)yFLmp!O2=l%g3%l)`%73HT{prsg zWDeMz$-M;0k)^By+(86CdplVAf#@Zg$?_DV^Q9sGd+x-e1>CryDg#e}i=ZPZIl#HR zCYT48XX2yL&kw~x%hBaZAF(pHg}q|=ObHgM&nkVW1DWzGCL;Dz+zhc zKo9VbacsZHElU5`WS;?2u7P{Glx|)Ca4%QYj+k-;jvKU6;9h=Gs)o4B{1r_=4RhTn zfBIQ9=c$5Z#$~7TC)It7#vI!&EZG!U6+A4m+`pk9@x?B~LweH2+M@F-85B|g=6~Pk z{Z~ginN3g%!-$y)DF9Oc)XRp=VUMyLleDc8ra;W|U(21+&@W%s zar}ONmOEr(_L<=1 zx<#~a{TYm^GADRj{9L)g<)5CQ);xl=OH}#`9nQZ~=D+jK%qEerQHY#bVBJ5r6I)h_ ziV(^!_REnj z;6BjZLN}r0E|2!e)`gd&Nybfnt32mjpYwhJFKlNvxZeI5U=XefH1HZL{M1V?AO*Ty z`#x}vuIj{VcO8{aYb#G}(Ck#}Nqbn^VB!j7qtaPt5BHJmSjUSPhUPnKUudGYyY~m& zK=jrAzxTzz6{7ylXI~M_X--PB5O#_HPVVivQOB8rKN|4Sh6)hYZ*B!ehK)|O>quQE$$#U)7pzbd{orEIB_F~b?EXca zC*{;B@ev>7c-8|TAzgO>V|sU73|Sm2i;044>8mq=Ae>+X>ZF1eo)?V+rofEjd2M4$ zvQmuWxq&69Fn8D3oWA+xO@2`(A>@xKR9)`})aLtB;r<4@wk>G=eXsNbQ~K|SDUq_- zK7i>=V1__$Ggc|M{Y^M(Ibi?pZkGMjzchK2K~4(YwLT$!roT{0O7hT!FO$eAH}jo@ zhdx^v@rdTSk_l&&)X9|3q)0!MF7H?1VbpRJHw`$HsYtrrl=uYq^;D%AKGhI}kJnAg zRnsT)sVAlSI9OHgz}9j@orJQ%6KmmHW1E0{m5Bp~8qgVquwpr8+)7Y)iPLO;u z)zjZ(%a9eGJ&!W{>5mC?;LFM#jf{Ou?njP{pcS{X;{T=yZU4Fttfju{d@C?#jJ9J- zHa+WK)ldt8kCUm!Wn~}6UAwgOE$`{PEFVJ}C7l~Ml;Z5K`f(^UU(U=tA@R52t<03i zp4xBEPQyF0+2B%KAu&!haJobCG$C2~*mnd0++~;)TA_m=p$+1gA|4ep%3F}4Y!g~3 zP86Nif`hU7qeu&zlRKbs`uN!-3Il}@(lSpnH%}2-wYD%`a>(&_i9{_gFY_QwzVXns z10m%=&K|@FAf*T*SH!>Z^t%1Z%Qgl`*ba+tJP&wTJUD78=K&ZEs5}PiI(`tC5s}w_ z1#ZzWwl=OB32a+KdEgAGI<}||Hy-}oq$sYO2?n>Ho&w}d4lrJ#AAyT*=iETh2f4jC zj_3dnuFZD}yr)~g;GXevrO0ET)p*cw0|uploXXs9Jio!1;D4exvd(Re z7hB=HZ1I`E9c=h}!`-L|WQ!FH>ZcTlJ_o@91}Mh%P6z$CLeH7v=qlxuyl)ixdNbjj=uk5qAnFTo%M}J+`za;I}sf-16wXSUVPmt_h6qd zSpvIv0qd8{@9Z9!WGLOQ_Q2ccv!OEb!@CNX(Nn*|r$YZa|N2+Btdng-huz{TL)$yy z{OqVdr@rDelNh$eG+fFD7cfvwULo*7lrt~>%Gb3V(pp`Sfm{EDuj{nvP!I%*&Z07P#5(Uoa*_2roSDJ$cN=v=cmHqmf*`Y}OubhaqB+tBCDJ{`{vC*^)aQTR1J_dj7i+86y}Nk36S+E3?bMy?vtwl=F7p!hk1{7U4?qeu5Q9+b;k;NcH~|b!InGjHBfD?SWz&x zFFu!NAdn4EEqsOj(RxGI4Fikw2K1`$sFqyLtwozsWz5|t7QVh4iZgL)CDg0f=TQuBuLX3gEJpMH&N5%9X{0M>XQdQ7~y7+@de6vCjC-$UQP)$X)=={ z%Z1V94r}Krej-_0*R#E3hU#f2t$w4=U zMhF=2&A=c>NA8iR6U-n^8g~B&tI{@&u@mvxo$$TdySObLG=j8Ik#tvgWJmKaQr@== zWzk`*3k&Ad7ki*|hrySK=P>Ai30pcJNb>paJ@|yEhOh9$;+i$F9~z$K&ZX@+q*8+B zWk$tV*ZQm)=+FyJETMDHF)n3&G54TZB|17h^L5;FvLlwN;xByq`A`7HW*g_k`@hAz zcxAv_j3>ajs%V58a0oGfIVl8XPQyf8_+i3m=}xX>FUD&cv?fkZde4_-HBt$Cofh@g z%YEwhoYJiwPtXkB-ysFa(*-zK zDDajbuV`zN+j^1#Y4Q2m=k=bOhC7FAA`N zyN*qZlc(R|pu_8kouCOK$bc&wVdbFk3(#$rrFrNDxB;dTR-Eo~5b$97fdoftncFid0=QNSWuiie{#x*3+ zt$Rl2X=&^|!CZco<7M>U-9AO=P+$pM2@aTJds$e1Ht*=)Am7sG)`yk&aA5;a-G=5J z`$EFi3>@jU9S^PxmXCkZpRtg3Uaa_fAHJl+Qo;6;Vc&TCdPXVJB;a9DIs4RrmqDTS z#9CDR+`F+?F;3bNe7Zow>&+_( zW1{dHU8?l9%D{W@s#cKh2dlwnPQDw>+OtXo72i6rA_w!Vph1b)A*Ph`xGO8 zG(SW`aHO{AJ{H(6cYe2B-Wwhj1V#`j7YNaMU806NGF(os#e^~VoufNCjx&&n0a__a z-*`IvH;p(7xPQlc8Mz|!x8BS04C(LQ%lqgS=_mVv_i}#0pWe%tJ3dEFT&9in*fFk` zJzYHR;<<1iUiEMMy}#{){;z$;`>p?HH zdDS;G)Mx%uS6g>e%K5YU;mj;Wk2o!qe3r3YifI0bqWrIjPH$!jW3|l+(txu;m|MTW zw#RN(kGNpIQmvjwM@|p+Cb}jecW`^KyBiHE^4W1f8&rRf(!os@#LtX1sf_|wJ+vt7uIEJRC(lRCN zPHD5PKi9X26emiL9{lZB*yo*pcOF0#kRjDhws_C9H(S_>rNfMZyh5p~N#r`-Hl=kp zIE5hO;5`7mNH31J+m5>|;Pzm3j^B&&4BpXn#GUfQ2QGM}M(sp(fm1|91$C&O@tP|UkJ0pc~UMtPT_~Lk`d;c9$70_j+6YgO0 zK76=Sr^Hsrh=JLVH+K|HcLi+NH{3v3?tok;T-e}BK(ic3>T6^O8;dPXYut(uD zrEJ&^D1}=$cQ?6?CCxf9azT0K1(}&P^z}uS96z4v-m|D1;2!wo%a<>W*RHJ@26%Eb z$~d-M&S*grYN|EE46{>hm^h#$b$chKtx z@bwP?U)vzyj=?K|9TCQM5a_1xFPF<4~@#~h= z^5(QZf~r7K$$hNJ2W2v1H7+w8s)!%_;pJM>7lnIx=o<8%Eyifah?+OOSI*aaS9)(sk zS1bCoVY?wpyEu8*25b+K_gW{#MIixTH4+ffTkBfa{#vedfB8YbSG(n3^0I#Vn^tQZ z`x5A4f#v-?Za4M~|BThhCYSFEO^v^kP`!K+kd$!3*b}21e4Fe~96})PJ=Eh*vilsx zT})E3M;2D*+9SMNr*^SV4(xGWI*P`(F~>oacH|hWW5{L6H)0NeT1)NkeLIm&&xa2b0LHoBb-s@hA}oFE=snA%Fa>QsUq7EGoLwbFp;zHDFI z!MtJzQjeu|u~0;CMtJcX&6HJ1r9(5^+l0ztwy*+Ij+4|BR)VqMtiTHh z_XHhhl>iqrKmz7ythSmbW%8E+t>Hd)&`kM0ABWY@Cy?XmX1^sF^&#%z%Hz!8Bcl^; z;~%g+w7S0RPMkCp#@kw;7zE;Y{_y%b1dqRphHZGpTtj;1c^uJ8b;Wl?`Qe}AlsX2O zXw~r}MH>;%6R2JrX(;_;WoEai)0dj0DVURq#VN1Xriy94)cM$aOlRAV z?HB!Q9g9zA)MwZY%V%VG$s?MUzCv49|IcnEe=BeO4?g=Rn?L;+5B?7s4_@*!{N+97 zM4;$!y!H#Z(=|DPRg~Z-iw+T9O;um7nSX58UZ$HInzXa&nk^Myo4HLFbuKyo;NgQ= z(ZfV}vA*==E5t)}zUo0XDCc06u}?%v)8j>l(-Zv<_gy0^`(K&$ev>=&Zn%OZQeh<$ z+;S+`NDHN^`Pbz_3C z@g1aZcI}W@P}94L(|68Km<#Ocd)2ky_Z@$uvZCtg>iRRfkNScGj#`YqF*}xP?OIzX zX+exMK9=6=u2rOj%u-u>igMQJk|w@osZ>8*e%iN~QQQsmbmI$q(b2d)pRW1{$0zH5 z7^}JE@>^@T!D9iJ^bcxz(Ws#Pt_p$2tZwTZ7tMRoDbY2NvGQ8dXFejJa@NY(J;Tl3 zZ!+&ZcL(}meMVl?EPruL30{?xS6??fTm1RNSpTMbt#M`wCzXT>e2l8gYKV>faM90o zTsl5rMcpH?QSHTB*da~h57e13+mW1NW9#b~UgAEDrdlh9h~nPfs58Z9HY~J5J|F6T zk#jxc!SX8Vw54#;WFN>-dL1aKAbvSV%`Q25adE6;jK*Zn@t-(xR=%OJu_5O~q@SxF zdJJ@)3Ym8lFT}}%&eQSiOp5bFrhUrq^SiHRnx&`cU$wN&`&hKA-FH^Wlqiz5(x`E) zNYc{!mYmW%3*)1S*D@YXWO#~RACEYcIX<>&(te#lw}n9pU}&2}rXokUQ{)Q!`BZYv z;rE>}{y5*joBa^6rZnu%iIf8&5=|jvMT=Q$Myj?8OTGcrETw~Y}>LrWV`4=MG%*vAZctJa4b1Jx#-aH07&V!GR|p;9P0?m@RoSDFPVCXDA^9)g9wtO z@xr4TixY?w>2?|a?#v6vgQH_s*|a6@s`odZDA^kb9C>;e{`Hi}vgS9QFz~kw&pLIE zcXi|MNiBcXDe!(>!nJ%RRFAtu^Gp>^s#d;f7F-DO>;QKrJ9WMKZj+-pR(T+oN@`^l z7ur^;q#PS>C5wp~boW(DeY(Ua`Sw2i{U#8!vDA%N$8as+P4QMKeQ^Xeslh1 zKrL@wrPvsAa$*m4_S5B`Oj2<%SScv*pxB%6G|hHao#R#5C-uMUmcL8(_aXK{*ZeOc zVMgVj?kv%jQ0uxIuHr)n>=X4yiTz^JlO3f$_x^7F^8OjsYy0I&^>&shnGwhm!NJcX z-|Vlln|(Vt5bmpO=~zSZL4=PSs^4$N={%vR6!{eMKJ)C_PJ{- zS)Z-(&C_E!X-%{ln4kQi8cVI5$E$v)zL3M5^%NwEO?5jQW-~I2RC-2BbuO)1KUQ)7 zvKfZ@nR}Q%@w9B`%}|=)zHzsPy){ph3`;$P%C1!=2w3dQm2aK={m`Dbo=00-+H6GM zb&j2iG?9lzQSMn7OcIO`&2p>#3^E=0PIj+OgT$j29zaIi$*z z+)B(KE=<~e<7E4)k3Tr@%33h+GgB{nMrT@Qz&i_X)?_O}@M+4`EI-;5Ai(co%hAUv z7U2iZr1om}Ncu?~zq@Ip6@PnNzRNS{7cKl8ktyyf0oUP_kN6O%8E zo0@jULn^FGz+bgGf8}#7>%@0ia}}Yp%-q5u%ES`VWYkg1+g6OMeoK+R-dss{OxS45 zJ#+S9sHT#sh0gt%K7EfrwVtmkl>pgB3e{fL?t9Z4Dz7y*_-Zl&1n%VzSV&V7X!jx8aqnZtlpX+>$bHG{ZU#y4zpC5R?+TmIz;Z?iYKZDcy z({4sKWeD~N!ZFLBQ`1&SNdHP(e-ymUiZ_Zx^-eA;!@MYPf8dW1r)r4#UTcp5GKZw* zu{G_%_Llpw^$2hYKW^Uw>mv3gxl&<8O@47vs;tyIRGsp$1zhb3fP zJOk7tRoGt^Waaz2{z&Z@*4Q85EV6wCgaiI%NYFD_eJhs0fJc5+k^botCE~E zr`%8ubB;sA@&On1I=cD`mJS3Wi!JMc1BV^wZ4K(rdKPWDsw2G5%X8Z zB-Q^SPR@j3`4`5d|8Zm;r$D;1q2@(2jLFZ$Mk!%-(T!T39;RA}=mH~dybKoXNQnWo*B6#@wHrNdY@-Ip9oc`M%Es4-Df(3_3kNc=_;mFP!=~ zV#c}Fcjd!JB(N^@4$s9mgu^Pr#zi&y>%_MYroS#foRw6>odwQm0EVTP0vI;4SsVhs zJuQr33T~7<9d-kQ`VPYmA}X4k0a5UOwzQXf95K!O1woxaRPlAnF$d{%-e%a#o|Lcf z)ScnW`n2XS;cqEf{F4Uc2o+C()a-Hi>Q};`?EMeo#&!3&q-7-zd4y!z00S!84JgnVO3U>`<#6r#(I3Uw3`Hn~_pG zSytDGE2f{{D?y2dPVY(f*mbq}ltinN@zLaKPYxz~ie7pE9Q2&kM4zMqvBe3uQ(!Am z!(2M_lOt_)nfvDK&&6Oskxo=MDI9xSzkmAn`y6&vyqtLMRZWzRtWx_MwTB9 zgHvZ(_8Z(;H$9Yn=28P(uXS(ou_rFKJVl_Ql#c^8rf)DP!nuipIqZCF8Wu1Vr-Z z4aGOFLiu|Hk?7+~RNUzN?cZ%joFK9P#ol{IHJ!G5zc|PsH6p!8Z1mow7|0BaARwS1 zy+lN$Hw6h26X_jBr6>X!2Qkv6i69a}0;qr}y(o}GM4BWb_ydx7ZfEax*4g`+_dVyV z^StNXYp>@ISxZ#>l{@!+mGAZaP}r%~ho3xq(kZfR{XWOB?%m_dZjw^H`t<(I;>-Dm zUlB5#QO#A$)+#g!`8r|U+qcLy>TVkzJ(tl!ZoO!G2VU;np&9p{ImRcrrW-CJQ zW0kL2!{*b<;Rg*jdqJI4G+qv2`SV)Huo)Go`!Qs6vGCQt4^sHbGrY7QT? zJv$mb)uMrVPaTK>W`tfG_nM$L15k)?gPYCM?9=i`Z)?)zc=yqDOO7r7dO_#zRn0Oz zffD~{)j?dq`Nr+BZffL^_COn}PJ!f$#%1VwwDBNfLJ8>&QDhoj*|EYrUuRq1AK$0l zb2cf5zpaqhfaLrlDz5Bjv4y+sn^kniYa@i%W#Z>o9Tp>hTNy*%Ktg7*oTwNO9XEaF z>dgBEPMl+eUz4_A$AjT{u#bm zk_=9KR! ztz>t7nhzX8GKf@SH;V5UjSe-&vo0SP6hpn|pL4_$2NG)XbZlyKpv1B5qas1m{>>Nk zxA$v~&GU1u|4mB^nL?Z@-49mXGV;kD*77^7{sn%s(1KG#TV z!H{zDr3nFa>g^z#v;&HT8TC+h-07O#-IumLRl5)3I-;cd)6smdRPCh;J(Z1*y#uw^ z{baK=ea~^NTbEPD@W(3PDKBkzAJknLMkeM#%K1^PEO{lMZ)6F{Xh#TPhlb^z1R<8D^TeqmWW`iDEu&yrJ09<9_J6;ye=xDa{AI zO+trkVSisZ_wm$M5bdWL!@Sz1-ju`d+8p)@`#DeGR+%b6Wpw(P4|n>c70KS2GzZS=m;;N_AAbS66HcyoKwqX%P~EzQW+04FXB z9`fgLr{d`rBP)`L|a58o_}rT82hZt;|T@6>0;alPs9j)eJc*mTb5 z4qZWz4;ImN43@f+L;5sG=c5kv`Kb-pFws>l4aN(uMGK=93H^<#Mn+V|o8I$LI$iS} zHH#*-_oC#L)cd`?>W}p^m;;eh*WE2-J$X_Fm`}6K+>^R#W9pjx71z+1A`u5fpMwe^ zeK2?kmi+`SYSIFGLY%KgF@h&nH+IdiuaP3eP!yz?m)m)Qbr4$F)klY~(+{SRCz!oW zbkh?(GDz|ZO_o(Xl01kV?nY+L!k}uuhcnhSkb_er(~?M7Hc3qjpib%w9r@-FAs&R4 zSJEB$V)8V!k0-re^>VLoq*00`%C(1DdFf83yB~!bC57`1*2g|syz{nhVNDzER$$Ul zpjAzKUL((|^g8f@ZpYBR=MNrF4p{OxtFXh3YOYFT>)b&I8}7*{uyLlQ*C*|C@;?;v zZIB8LKWU_e#71ZC;4?;Nd2_r)(ddR2C=k(K+?G!deu*#Am@ppo963tJ1ppU zp=AB!qNyfg*<*MAHEDO&8wU(Pa^?NJe84u zKbc^zMEPJ4rBhwH;7rdhP`_PEQXM#a_O_*U-v@gX9V}dGe#)zKEC?<6@{5`_(;P7g^Fn%%x7SqmWcK z^O9pHQtfQFLfGYI3TQQlh5*g+Sz#voNeVOG`q{6yjwWvOlo>Kt`QW^RBX!5R4*b*w zAOH6Xf;@^@PDgcvff;ISj+p`LG8077osuk7D8AgG9^Kc=(HK2C674N%pZp;3;?1O? zINbFZbs6l<@+F4%^3H5^jQ@kcEyMQGwb}>s@P&0Skkkd;w>Ny0TR8AbLK)0b;>!Eq3LPl%7wCrAFjg!D?~3I5V8rAJA9NKOShN&H``&{t{&TeNZa_|Y=@I} z)<-*^{bf7Mw~6-$w!;?qX>!u>KAitTT%rFekb+$0md@RhKl}EsNkr@QhyQ!jE#DG}LnB|`{KKc_ zZv&P8rQiG~TXg2Mc?mSeqQ&!bDh;2>FV4Ejc{NJEszEyagGW;feusEP7FDu02XdFUuxk# zO7vH5$(rNSN^VY|_Lur7Km~+j*B-PZC+E(dT2-fclC?8umAj9y7Z02pP-RM7cRhaJ zN~n13^%ax5WwOHCk9;?z=^_2H=Gg^a<^?83MJ^?BAro2IPC3C1E!LQa7VNUd)A#-5 zz*w!CMpvI|jA`}leixZrUDms}t-N^tM=!5mm@ISYd|VI_B*g*2CngGGg#zTz92RS&vcmhatm!AEhFJ zL4hkZexz=kb5Z|6IYsp2e5#3PzjB{`e#yDY^SCFC86%Z;Hj$U_4N!|5gZXQ=RR?zs z?Cfi`+L7T7+fHg*o}k)++5%60tVbc_^s@AqR(4wq9q@&oO zP4vy>PPZMuZSX%OZh+@R#ETQ>|D2OClf=&88)EzHl;+dc;u-hIWpa!+txWp{8gXA> znNnI8N2!hJdIRA{b;!W}+%$$c%NU`d64nh&{dNt{J$cjOc|B%=ysw>@_>i$$TvzeY z|LcZ&n#sTw%sp|Rtq=b7DYTPFWHRriU~!DI?` zX*a-0I+IMJW>g>!o^JipJG1o$9N}Ha`7#-A3hp4${{(8ruuC33>avGsTc3&*gO;jo zsm~-+r#<`aLbgsaacP+ALZxFhy2~O^77dqXpmC||y>%>2dGGNdg6B?)Ua0<~NqkXN1Xm!4?zYMOOU~-Ob@jhi~tmfnLHhwDRu!;pw&d#W>OmK0I zyX)5N;?&Ndu!Op_r=*`V5rcQQ58?k`^}%b_|Pfc$8}_d+6Sm+R*>Tz?xs3{uH> zr%??SD(2Biv#gM1-$*yP8I`h-TYiOF%aDn2sv4gH-^K!z`MN|;uKHkFftOiOLIwXF znWt?wMuN`D^rg{mYSg5lyzd<D9N{4vE<3sC&Rm%pyGoNn?DV|DlB5mEaYk-8 zcYSvE{lYpnbmaJ_vi)x#>oj<9fr;zCb(KdBDs5H%@N7+AA~IrP*9zi5W=4G5p#a<< zZfS^~+Lgae&oGlGQ}5O9XFO7OFa2|7*NQxO2rl$A0bcY#n$T#W}yyQl#)m zBCQ@oW@|nN(X+T(B~T;xdlc_|g_jgl?s@OlXq$wC`uOFY{w>Jso-oj?7!m%Lyc<{wc zxlJ9Im3{(+A~fg`&`B&wi)`oQF?{jnrYKF3Xi`@ZRh#q6dlj{E&FrjCP%Qf%=P}cD z^g3Z2gpp4tc7x7sO<%I@W|^12T$5~!UbwCPXVgx0f$YnwI-{}dWW=m#q45Vf-G*qY zAVNZ%eLI4KvlumM$f&ZNyep8#_>bN&wnhn#{_e3g3akS+(Hy60nD*M zl*@(t`ksmH0I}eAt`9X#*hm&}^VPilx(N8~oq5#Gb~k?KTZ7fN%2RI!#+$DjQasFR zsqElkH8$LPMC|AlIRGzDNovo2|j>JB9z; zC)#~$^BkcjIZcN<^eXz}GpT1K?PG#`Rb6^tqk~@QmYyc^=2Y-+GNq)QZI^!^ST;kw zvw(X%_tI~YVgK9)Rum{alCC1>OTR^W;z;0NoxfA8*)o4Gs7Akn2_W%ihgh(d@fPR=yHCO(1XDqk8x&r2;syge`;=GrZ<**M&C(A6P#ebsR1QriQRJL=ukioD!- zbdHNe%IAkoE#Ne3jJrk}PYJxhFwT{}z%Y7sw&bWI|K*Bw@_2=#P8s}Q0%fE+rI$KW z$>UN?InR`O_e>aLE#_@m+8J zb+EJ~`I@V!I!`eYU=8Z7D$T|$l>PA^lb?L6$`Gmc<9)TBY6XhF)n zUC%P6PhozYpGzQ0M8~_}3dd2rD_Apt&QQuke3?!}FLpP)GaE^GIDNx43NS)aM*q45 zIQD;J2lDAZ*nz~y5k@dVj1@9Gu(h3On7N5k7;tH@jvxeGJ^MbS?C9uM2jQUhviggA zIPOTs**r*9X>ARxHg4=FM-WnAkh?myk;3(?^EQifijW^QxN zz50RTUUg;P!EHuaqnv1Itb#K6Yms@JHm|4+GdbK#oj%m&28eGq*L@v^9iXGUEQtUP zUj)N~!xywG$~p$M?S`@>fFF_;^&9CPTmTv&`KXK;TvtOeK0bdK(*8lR7+dZaRbF0R zy+@;ic{faJb)j`sT~jkuv$jrY$6l@0yfY}JO#ZptNdvN%w57AMb4s9PCp_tkx|w1! z!S1Ey6IoH4I$06x&TB@Q&Vi$6OBAG_4#N}*h#&0aoK?;8_JAWm4n3^{5Afj9Y1p z!IZ^Bgc35>>?lB9X(g~!=hP;=JXyPDaQCs;43EC z#W^${6Cw1;b()a#wHm&Ep$k2rs1Ri(7%-WM4#g;d-l+da;~a#3p}gC>FZBm5O4Zjm zm1hpPIA?X8>tn?PH?BDc!V63 z`}u78izfBP3R@~8y6F_H>ikoDp@Xc0i2Xy}734X}$EKcnWc4!`q^iXd^>bptSQ@ep zGwVP)#U&tlj}{Lw!PJ8VDK#7W20UWE%%Y#nek8kJDptllUO%7Y=6TnzB}C-%im22b z2R;_M`2j&SylD5g5X+KxO6rKm%MYS-vPY5%U_s(WPv5s33rMYXka+OP_TJRf-sVdc zmJj8fQcP~9DwYCjx=eU!uz{FbZ+405aFJ>@b&z!iGNdh%kdR6M>tH=Y?+0i6+b_?? zI1()75e;BI56S~iWS>Lnr=rghbG!LjU%)MpD7`!A;yDQGE znTtJ!5$W% zm(Z>H1GKTzWI=-c6IpKZQ@-=`yY%I^-J_LB-Q3p737$j6W|D8*ebrL*DUPfO}MB2DXhX}Mz-KsmFvL@P7Cfu=W znEo{k>E77ZGRiWjoegUERHpKFU40WT94u2;5EpiBUY_*hI`?dRTl1&Kaip=YK!buv z$#TaIU198QVWEC{eB8ItF98fZ8Gj(;O@GJv`XYtI&UvhN8(nM<2L(Rffkg?2nXxPh z&n~k=g<3isNzBg>lm0AV=GlvTXBcBeWW{{={2-CyPJKT&IK{ej13CVm=ZF6)W5B4f zTbI6b6^tT7k-Nv*Tfu;Dm&Y6#x2@1&4(fq&*5FH)3gWZjcdlDW3CLByLflH@5bSg2 zV^dKGG+`3c0Z5aE18kyQaSqs!m!8qs$p4VTo@QBrf9sckTUq4Ct<3hw`-9{q2I^;! zx8gx1s{wedy7L2E&Lk77h9F!3OQfH3OM3SNj%xepJJ*+oH3QQSVb_5L z9#Y~hED80b7o_)ld)N<~@##bFWdx;eH-dx>IHH~TqK6Rm3x#POVDF}@?6NBZ;TNj^Z^LVZ7YIYvw;cwA) zBzUgme$^J+XqPT?FS>+om2nO*PlZdHcPhA6@ITFjpDn!xvQV*dm5{&k*;IeLWJrESU8l% z80u+S=1AlAKuu@fDCCh3K$rSRhW*=&M|KELAgjVL?t_=0NX6_XXv*^(nbWb=lyB zC4!TH*pX2#~!poC^f$d}Z=vExGBAIO_1~rkbcaK8?G|2?48ni)cWhoSc;qxFL zX%r6>;5?z(Jg7vGye#03@TRGS>5)y2sJEBS*78o0P($dlg3xe>JN{@BP`+?mHNYsM zj8sQ`69>E$A3xohN>N6xP887m1h4gUhsc^gl+o?4yr2iK@?BxqSs3~1ofh`ZsI)H> z;Poj~lc=&x3Azw@wp!{!WPYTZWK&?9Z1GpU(Uq1%SR> zz(f?%v+CV|0yzA@97r%DDu^k&Wd@*KOBz#;Z}|ky?Kgu#!{ae1TA2Xoob*U@q%`Y@ z&jBp)EZC#5_TZtq;pQLX8W&jyJm=?FigQviCz*M@Fo~dVee->*8`N6+akxCjW_0aa zS9{6~BG!U52aBG0BTB)VjnynRHl8aa|Mr+EnNik9o|uP!bO&7+@IxJN^9v3$uV44h zj<3^T*`Ehi?xQdFY-(_C#j^{|#2g+`G}r4CaBC+3GsK20L7b7<{4|b;W%7C?_P)}i z5yjpu$(;KGzGMnE8XQ>CDx3i@5e-X0PH}#2X@!b*tTg!A%>= zBsRj=oESh@?y&VOxAPcw@H>|rhI{@wV2_3a_9$&g3ZuicO6xHGQ3+UaExwuuDgi4p zIi>iSmN4@^yC0Q+>v16=a!rJ#PW~uihi)@n$hrq(0S0tm82M3Vf1=54z&9~2TFb^} zwmt6l{&(qS)d}Spy)^7RGxvgrO5LI1+jOydtenDTK_lwbv7sppTJ$8_z#Sp&zZ~ALGyanBK4k~5fD96G52He1a$npXVX~lT*95~jNnjJj;!?8Am z(%#;fi2FJL+(Cf;nn@(pN&g#&F<|Hm2oElYAMQO5NTj-eMCy=C_0jK-k{YUwf&tT7 z)v_Vkd;VXs^|m62bHKFru;sZR*AnCrMjy~$r!}qm7#aZ8jk&;JL!|7>6Me<|kwdn@Ll ziLFVnYhgJrfcqXQ%}ffi+r5YZd3#C^2)Id|2CYyA^*J|3bSQ%eBQ|0Dg6@Zu$+V6m zTd)%7A(G7d#iBpaw>=s;L&VX#X2Nd3v`^3HwOUP$?S&NbkfhZ?)mV^^LNyvP9V1?M zF#e|%LhZ~>D%kssKjh`p46<~}Yiw4Kdi`fv)pNHum8p&`GA)4%0?)oCf{vqc7N zMaOgu#=uPcP@IYpsc(5m03Be#a7{%R&(8_EI2}1F9o>|omgurXU_=t+2 z<42Zv;_t284Dt<$2-noo4A*=pvRx$FuXm!gBDa@zMLk?kr*~{?asaC)lG8K(f#5K6 zA<8+x=Q%3CM!NzQ}MgETFD*Gm&Q#Bn)U9i#`Fh<-}IT@I&shXQY%hNB%zzO zaz2EnqFQiyz>7I4-Da-^%dAZ3^>3=m_m>*%=egsje0kI<(Ya*Q`BZo0*Fai>)HQUw zez1{KpD#V7r^ixj#+RDeO)s}=v$O~C7&^G)O=Q86a+{G&WYJCZ3Wx9JE(~MC&hF3x zr`R@N`RpAhUWD^Zp<)#Rrx_pPR!DpwnW$0AXk zIriwGnR6d0k6er!Cu;}-xuk_}WNMRAR^jnm;q|TP)mh`usvEUyN<6N|lcTOyD6596 z=9qaQKbLPW0v&N1j>M1r)Qj7=gbQ`xq^tUwW^Dn?S`tWQ=6^>uPV`2k%{9Zj@}|Tw zLd)vQoe+YJIz`py#!r0k&|g3o?tbFHIZ<`#r-zNTrdLOQodP(GKfBp>Pn z69US9sVeMvzK)yvMZY0O3ChCcE9q2Wf`PHQ!z7I{s`};l(&&e+7(Wxo5`}N;YyP{E z*nU*2RrhXQ622vmgyg}5Fa&Q80(cTfrLoM^k1W$Xe%lS1ju4@(9a%ihj7>nCQV-Wu zFQWLsCiLz6LqBV3m`#p14essq#vByXAPM{9y7Ov?Q?SyiNeJI0x z{Wc}y5!Gr3efaLq5;8!lvOb(1|GIXR&`Nn^qT|xkdL)gqKKS{|#weR!r1d$jEVHai z{_NJPYTVZD?G2X>Vi?YEZc<>&h@F5CkD1q+65YZwU?gwZbGbc;la&y*chQkqpDlcM z>o&ofo|j&B^w~Hjj_$7iv7;kQA1bGK{JDe=eP(B@aiQ#7_^a(yQ1mK15aXpC9k8Zy z{!_1h|EfJMPw0MIk2n{ka-qs%YDnC< z{b`Z)XaVYc^||~-2f?aW&WBSfRvDuN#iikt)$2)KR&M{$Kl@QuVC?}r=mv2ktA6)6 zp8fS2Km~#Y>V;e2fj=H%zZ>E_fv@p^xq;C=3|^KgFp&odxwSg`29l|Z3~=DYiE-h` zlK^*s`g&{V2N_HB=oZKy7H9GvQE+igL|JBH?S@3yeP6u=+2h6$D!cb5eX zzIvOv*aavP&DHVoK`m?no6g&(nd^xk+PSQP==ika4<@lWh{KUzcr)K`4obWpIjhq= z?L5Y=v`yvO1*7_=KOlz9?MkczeH{K@D?MNMK26(_WLF<<&6m@6nq>(yd-8P;uN|?y zoFly`yob~~&c)#&&cCWXB1?o+7W>#fZr`5d?Fz8WGpF)Sg&QFA;7t#(!QT$ZdWJl* zGMduAk@WjHZZ_i4-?@xIeq=bq4mU|45uf0*iSze4vXHv|`iv;;=RD0M+IluGO{phq zq&mn6?MLPa7LH-|JVGz8P_LGt$UYKpnunT$eE1sMe|qsgqi6H|+F+0WaG_JmfVO-5 zPN%FvuA9i3ac!IaohyO40;I#O-PIZb_6*4$JmiL#h$gO{a1gSp?YUnLiJoN6mNG_MZ*?O@ zLimeh7L#I5+&wvf3oLdDxESW*QfQYo5H)pSr%vrSy7i6=ssmk=`%)$Hv&z%)t;Y|S zKa2*QC&tKNZl*mU^rweEZj!2*5lSF^l%)7JPWdV%KAfpomFG1qj8AEPv}-)*I7m?U zuCfU(zg_NhPx!qGdT}iS?1(mC0ban);UxR|>Zd2R{NWO2>bP#TKhbKO0}V{zTgZ?$ zlV+?=XYU*@o`#dPGC6{b8H1Ac+U?ydfs2k$-IHTouUPT4pX^WTqDB@pY%;zUm`s&v zO07nZ60~YARL)1bhpM(1+h|9T{2>#$=KMor;)aqgF-m5|lQot{$`0fN=SDM(;(Q}- zMkQ_;#nG5UoS4lsx^KNC?U*pF$n9pw;KFu2fE{%Ul+SX&PP+ zt&u7$wsm5M(u6rdjI|MQ}H7NF zBzMB^G0SPK*$8*4+vnP@DR#v?TXBVKCdJe!FKOO!&HP#$d+MqBZ(G6%BG)~P@V=VY zqP1WD`Ej8jSWQ%_vhrlTUq(#iM)So{Zw~_n-hRgeId0t#Fgv5-{{1E`bl!gS+iF#z zr!TFMVOCQYj#hcS5-*qTWt8~y*&N^`VN|F{eh5Z3;Z5v-QR`ul9;s7&I5Zk20NHjI zA1|fO2|^L(!)x^B5q)o7^I?seug~GuBeh2|$NTI@*1}fTH0ZgqXuk|`)hMG9d)?<= zE_xy;@r-&y$0tWcU1CEjmp9=@H)=PDsk0`-O6gw< z588I`RFpK75w9!xFU>vY!sWNJWUf@(5pbkXWWbEk1LfjxWZbl@_0uy^ixx0ay~IkE z^yW`=%S5kh6c~Vtt@!@}e~o8J>T!fYTMoRcKN+O4eX?Lv5o5qxMnl5__rPe$QQzM&L-hQY$ z{f77bzfbt1fgn(TiQM?LMR1Xe139c9U7@Y1uGR4^W=Mg>M zxhCN=xT)#BE#%gZU$`y8mSrUyzm?Yld}DmJk(Y|r5JprOlx}vIA-M+^h`WfZV+D1y z4zsH_9qNHqJFcefBd;Ro%}(BUB+h%MXad``=}_lu*0lOOZ#?kIu3O}#$@SHadfkEN zMXj9IU4A;VWMJ?aQRTaQ!L+FPL**))(DY&P1769{@L26DED}6yE!_MQ9OH{aUZ)l$ z`<+V=l+5`3I3lPd(3u6dVS}Vz!F5xBxEr2h28S$mxpzf5;tmO&{af{rJ+T?tf^7MW z$Xg{g;sWfp)|;nr0oT>AiE~VKto+4yAk`4DWyf+R=%=40BO8gM_|1sr?_8)CP?j;c zjc+?M_;{2*J7F`T>bgB1InHh!_gytDz$MHRMTq095fRV@_rnFm3-WlywsPL?%o#RP zd?PviQ*0}t{=v;MEH*aW-p6Wi0LyUQ-oY0C|7*VRx5X*=KN%PCuZfTUE)yUBr|s1L zyz&X}g0tW!zH=G1gCXgu?_6_Mf57I}A%nlQABBJ90};zP@Hqz9-A*jP?#2KhB_~A& zPLIPs2 z3H}E$*gOXigLxc&5QDQ8e-MMyoc>~Xo(06qZ>kx_ z%{sy8ZJ@1-1vmcG>E{tReXw+GV`fHWBSLfF$oQ-GJ|923)xHIWLajs4F*r*mX7>VQ z)z`_*CLlv1rmgzm4L;S>m8`IUU!W`CoS_*J1LrgbF2DT2Gne5wBLbPE6T75vw}$a|Q{zow4hre`g2=}e(5D6QeL1u7AL&JK_gUK~m`ezCOj z3;aVLxRi$;rhP1l-7U|%wtIZ3uxZ`Tr=yjDI%|Sby_R8u6|LFf8U>lR3&5K}eF3=fY^si$SJL|29or6R+@^g)%oNGIgzV zw){a|Qy>B7!x*10bVNI92@KLK85-|__d75j?6(|lnOYy^-mvBDvjsXI1dhBHE!#zG z0EVxO)|2Bne?08np#S`OCiue++51o|&8nMPgX)jP#nbclLDwd%hdVU`8&O_n_iNCJ z%IX}I#GOLIbsY1kV{yMtIyd`>*-zLv_`SGbWJ;$rG_i|donhcWNRWcnda`SoM0eP` z%m|)#CjUv8ZeHoJD=I$z?BOZej0fX$O9)|Q`rwy-!ir2-&R55oKX@B}so9U}$-mf) z|E1WB-%@Z$xcz^L5agdr==hiV^}mVw^*^o6J$h=&6II_OAlfKxq&W(i(ooz zl``s6qk)EY_3Q1vg3og)j3*`iwX)$d$R&J6Z=!OZA0=NW2JrYfx>{~7fo_5cE1{0i3~Kj_s9v$oGZ`_ep~ga zw?f*^B}EHT(xMQjs{!5C8(5NCt+g^f4pEiXPSSE@DX}OCvxAf9fHREi^{Fvx4zdv5 z?oI8VjE|M|5a4^-?K&i{eibr5`E-0;=A&(EhjU9VRlOEVMvxcWLVYBp&5oc;)W6uv zQ6?%K|EyI0__QNQqbDF*OU5XMC*^gBALH$x^9$zs1=8oLm2EC>pDq`^};P^Ua=jm_+GBGY7DT&q@ZxfadD(B&WfLoxjSRnYgJ^A4N_ zl5qwO9UAHZlnt#lkhsa=!jL&d@O?Nxc*3-4J=tMueID8ES#1|WBjz+41>`vc1{GW! zUGCIGWH^x3^3pk~S_)N5?Wo)iyV&`IFjD)awy=3N&aU7P7klVO?H7*+wO^ot?XsXQ zuHI(Ic7a*l=<2Naz!-L{O~Y*`=6cSt7OuWnIzIi{hv~c=1S;~DWZ`>*!Qd|=_PY8e z&b29zY-4^DfBCtfAr8MG)sLPBDb=sd&9`K~n$=|I;DXkLj-B@}v%4~n&-wbITNo%> zbQf$yHh4*1`-Ql0B||LfN?HZT{#oMHy)q|kj)DccxMd8Hsma+-5ZmgVhged~KTbM* zD?Q4{Zp;=;wuW)XaN#No{)YyzdwzjuO5&P_SmMG+NJF!9SKos(B;kZ3>0E-Y#XPsz zN{vX7SCuU6)CMQD>Gk@$bi7zzwjZNOl&LCfD%ag9TIMTGdV5Opm*ul&;@`O<($aNl zQXSl{O!Ludw;lw7;geR+#Op0K%?mOcb(JH7Mi2KVn^h08i>p;^jN&RM4tVl7Bvjn& zp$8OKHPFUPey?h5Q+JT8a;o5uB&Z|&)B4ddHU%0j-LhDploT`g~q)-Ei+nPhRv~rvvx$n4d>h;-0sssGr7amCb#u_WOp= z8|Bt{QV45NMg!%#-PENk=JLaxM|nL8WH6fwGLegwMt#K(%J)trl|dX`Tinm#wm?ShS)Wq=#*a~Fyb3Qn=7ZF|mj62!+~NKI zQYg;f&Vm2KH@W}W=$wCPqyF1zqqs#Wv&Y_GWT`{@(Zn8;+QJknp8r#EeFoxW?XpwF zlhX1}9tV?m%Id4;-Wl`pl;H_8Uf%Cde>ir=;%LJj>pHn1oo;{Wt4M>sv2)tSmjuM` z_^Fvz`bI_vUr-wztNt=fPzqUj(rWV9NZrLhhqN#$qetqg=aM;75lE zy$_j{VW$2uAsX!JBQ7(M&JqsA_%Y$M*36pFKPFfx#;zT6E`=ijad$T@6<@8;yk9jN zG>xaAtkhpL+uJa3Vo(aL6jL=y86UH(g=*cY1R3^X z6`_Pbl@ePVTF@@bE3TvZY`g2iVguS8t|f~IjeEY2?_R=LX;$X+hfqf&Ck#8 zm2v7`m<-kQlUD+YYNPA^o65dAZI|Mm1W+qlY889|Wk`|pD<9l_WHT6Ui>@lpQH}#a z7nZLWpY30oUkNx99qdY>X&YrX31nB~^)tm3!L)TYr`FvVzm!hC~W?w_mFXbw9;CNx=wS zky|t?PuLPuvJz3<4m;huoy)JUJXR>FS!q&YnpSuvv-iTn-ShPPkNjm4DS|7!5*X61 zD|Ud$h>Bo|xKe**KjFY}q%!~y5D)CpeDvRY(ih3F>{}RfC`CSp`f}4Yur5X#p}@4F zn3;^mtfv)D7}kf;Vd7|bA5OUQ5Zdol;e=~LOUs*3gX@0e6EwY7#+sVhI&Ly<9j&@O zzL!5;XwXiqun_m;m3yXz?sE`H?U^@g$f!uTpNtmt4!k5id*r}CrE_Ht#X&X)aZiMz z-{5%vPEC_uw{L@S$0sVm3bvVPq?*s1N|oIsGih?y`|4xuKIiJRo_R)i8Oppxy(~lr zEpwKA69NhZontzu&hBDg?QDJn*+I6;9G^zLkEwQGr{XB~0LzEbN?8{| zpP=!k)(&Mfn_Ae6M~}A46o0ywT1$mKjS^VKnwm-r^#-IVMI@!TrD1*M%G(n!e zgFV!@D1{i|<4u31TX&*7n;h@sM%I31gfx1si|K}+>v4`Q&n#!>QKY<%F`a{bKhYkY z-{3KM({l!S05^~C)C8MJ0qBi^Lgacf_82g}e8(WH27tny*+@jko`)>I5rlFvFU4UR zMUza60c&<`ND7!Ge2ss8ZOc?K?gFDeWAnPCHx$G8sK!=_o)bdrQRELjW@Z;hjC_pq zNF)b@hzOb@`n9^P)Ly7usBqE8H8MsUv#*9^mZ~GXl^3TvMfG}K=bjMF?(tO}w&g3J zep!BcIvhzMT^dy~nNoxcWYoKA^juIX4S4#3y08{i+N+wI$4bav`=xG;i>DP# z@VJCJw^z7{Zu3w;x^J1sCa*Tz0)y{VP+V8@2zF$8&tzs&5wA zFo0sBz$rmG3H4yKC}Vy%DhoeibnG9h)~QXOsmRQg$&D+D>{Z%>60&j2mN?=>xil%b zs5)?AiB_Jx^>u^aZM}|4&9@SzMs=u_F-po5AF3(RB^?WSoGXlmDNdCMuQvqEf>KI@ z)K5zLf}Qd%gA;Irpl*M_X!%N^M2(97W`hxskAVSLuqfr(ZCH66=_)Z~4pJ~&+t{@T z!O7_FZDM}FHCa~D;X*7^#^<^X$llf=6U$XRjoO1&BaKhA$|Kojo3$2{X_~i{zQhhb zwYFA&vi4<}F&a8(n~qj8t$Wm^<030aa`Vs0)yy%G%4;OfFXADUiC$3m2w06}G+D5VI%1>d zwWgD{av(AR?blWO@pi>0575PFj`#^JKZ_K>{5l@(4WnE$OpmU$j32aF@8a;@Qp-8A z&_m22+P->TJ-?;(pH|28oG+qY1%6;l} z)vsjH=rq=+)}_-#TG%(4OkMVQZX^F%CY4)=Tw;yCl;cP~4@5Xpr|oQ-1F4KVEa#w) z;>w5jexMg2ODtHy_}F8qhk7iPF4H2u2;~2#T5qZ72?$v4GF~KiGTEpeDDtUsN|D zQX?R}WGf;NkkFfvY-IxqLZo*>kuE|g8Z{CNgif~76e$}qMhGEPL8V9vCp)!y0^m*~}LAUONa-{72Le zw9P1`zE4y=pzGPQ@G>?sw1c2$tE#UeoV{TYSgxLah;7l84G6O`EA^aph3@q@3g3CY z5=sg&zF{FT_!PpN1?w>VA0&N?XnQZi2uW#Br3hK^kZTod?Fv1_0(mCt~NUvWQjr-BST#0h3*v{e|Tm0VN-HO~_i%AHH7(t}1!DOK{(}{MH z4*y1p^WN17y|FqqZ{nS{Y+z4n`mTvR&bFWy@2^tHU(I z=VsnDs~81~g~JTzRD-bwljY!}&ZSF=yO^FLu>zD5=7@hy+gY6Mk;-=x9%6E3z82Wg z3Z8j|Kpez9B@nyD!Lt!G@N{Rj65&KmslgO7fzEktTBS2iz!3r|MP1Dawg2o> zqyB1#^vm9Ty8NMMtVq~V&P_a)-fNoN+=m z&(VQ%$#)5tqLKlFf-^CHgp_S{fd1fO8375b>Dig?7glEXD|xTo+j!-Yd9e(Cp*Nq- zWYLGIZj}!fUUtcZexrnXoqEs9PTzR-%Q?W;rSQB9mD!+lH95C3wU6@`C7aIn-;{{e ztAf%2DqWnykUp~$#`bR&KkG4ec}1--e$k=KYShSI{(Qyait+nMH@cReoE5MK5yj(- zaNZ72PfA6^0f^PhYQP|wxpN(OtMvZWb+5*99J{7O05?|vma{lwZH`vy0drhI(@p-| zYpDXQl!@utu0*bg{fE_OybfZdI>=g$E5fKa@O3-)Cd-7E`FG^2-rV-Ec@m!Y2j84| zI{Y-$_Obh@30yuB>-3NXB!|70*CgYAsJo;QX`dt!sszNO2Yt!1NUGd6u+>`Ld%2n8 zowKAdlqeB!*+f1xYiImm3y9=w(JqG-`mI8Yb2X9v$!Q5owSuQR zk%x_@fceJ5D!t$%o%`NB<)t&^5l^P=pstm4QQ?r-196-O;&7Tt~XJk>r>5f**waM83lmGsgEkCY~S6^>dPE9PA@Ic)*^ zvq61GE%Qx;xZK^;N>^F;D1=h#-2S5R!RHf%te%VS5_RSq+9p;}a?8P$Nb%wjLt|Qy z;o&#WM;RVvhnO4FiaAfpk(mtVit)lj4$hQiFUAm0Cw&dU6%j~|@PL{{SZIWA$wE#U zZ&NuFRJ&O0g;$(YQN`Bcyvq5x>=N_a?T+L~YY86d$%?=}tvw;=pF@W zF-V3iUXRsFs7x<1OVhKCa41p=33gBmR6nz%xXKnPXX=cT3d8)9Bg7tNe4zKAoL`-T zF;@r6&e5rH^B!J7gIxf8s9AqG}^BCC!{}?AmfZ678={ z;^iHEcrX!Ai_SZiPw$m<6S-HeGX3!jhbLuGx+`85(gfK+`&XEQNXpQk71(@-S>mFS;~}N{;j-*vsV-vKSl8O3#b@yuEw?gAL_byapsHIDP%wo7{i9 z&&G_dddh9V2}UjN-u4aNdCrq1me!pgxBRHB<~ynK3fBOoX-b_p9NSb(zrYcVYMEIm^$ipTRA9gX4X2FPyw_zff7x-==%< z5pvw{A$reCGK+CBPrXw^v^Z>oAn{sSLMqskfC-E%WbQ^TH7C{pIY#5MQi@M119o5~ zn6vh3vi;TLH|(1Er5sAed(`2br3sDfox$LA8my-f@%*AUL{Tn%=bk_m0D%YtdF$Ow z!9WGal{CaV1_UQB>j89)>Bx+owWs|Y+3lQ>%GSJfy4ByDt)Z-T!(bH4@1zK|2rdnn zLfCT_AJy2-uPdymt9NnkJ!_z-Ma^@!LE^ zn081*veCQRa4Vdy_}wC?KFtFUnn*u9&vma^XtVr+Hia}d_!NbvC$NSL9@fdw!{k;% zu`Nbdj**IAd9X7oU2z@~1CA%Dz?vQIUd%^i8VtW}c&HbkdewN>3#=L_)?y8&ovjrB zOqaXB)zR&Vou^=hy5?U6{=SzE4?-$TW6t7Pcp4NZkmA{LR2ZG&oWLd~UJ9U310)m2 zHG7sVt{N3l8Ey7+R;;ac6odZRd5UY+#QAC8+BB97l8^N2RB9>dgBH)fWVbywtRLP7@*z+H9Xw$ z*3(2jXH_r2P8G$g-}DpCqPv#cBMsgsw9B2}MCnImu=0$U?Nvfq35z7V+zXEHLM_60 zwfWPI`m|f2#@#OE<_b_Rr{&mQt@~=fp51k-D_MzV5($H9_3MunhXHn7bi%5@5g@Z@+%M*WMM>HHF=78B216O7ha#P@1k}3}Z*V zQ0r}&KDYtl^G@bN}jWZ*gFWo$L;a$}y` zV+WkFoDS0vE%Dg0`vWP^qGa;2!r@Ci)lJbC&{|zkZSeS z+}%SrR81dw4H$LBWEE&-cQ0D@AhOBMCBS$j^X(?)s*9I`b5^yliAF9OO0Kmcdo9n- zEpJro3_rI1M%nk1`d>KW{|huXm)ZlEWWX8Z2p4{DD{0fDLXLOSmoxtSu}$1a%V)DC z#||fQTI~3QYwpweB@rdSbs1=Z-Mb8P!E!(X@s?dZG?D?@E7q@MVe`5UU|g;&RKaJg zx~SmHa`h3DMTz~hv!@$4{A1?(v;RgGzWp1twfH|ki$QPUJgE7a2Df$F4tVukQ^v5( z(A973{vjMvQK0U_*Q)5AHEa0DH>dc29b$p<;F2)i+STFqfGYGo+Rw89^lN94^z0n8 z`Ek{eYk1R!yKBFpkD$?@*k}9EttEB7B!$(&5Uqjb;eKvd@BXJVithMX^1F4a#s7m7-ocD&y!bl%P`<| z%8~A~mRZ_tqszO?hqwQt@5YhZC)B)j-mt2jU*{Rg`s|ti#cq7>q#of(Q0*0hSV)-E zgm>Bi(Od#vsV;1{J3JiBJ2`>JhGHjaUSrnKGhD6tpETre{NMJ*Or|Ljvr z(c_8yxob`jD_^1=j`96eF5eOB#Sje!9>F9U?A)G6=5;O!4tL19eEi-CO#*>2u70`Cmp?$ z+}sf9>G`U77$ucl7zR5cSC4+hQ#@-xzSqMve3YtN z^Vgf;2DF3hpdql^-x;*MLVDAf+~h(`fCV#Zu(|6L7Ffe6M}ZX3JiGf2W7N3U&Ic49 zgfDR8R(0p$nGG?3kxzN23`YAUFLVAD*povZ5L61MeFhGOb)4_dyO*0Q(i@-3bI{_9 zKx;0Dfz9ar{o_SmzH45jelz(dy$s!6w)@J-UaKkN}e7=+;o3y~0*04TQU%3#qvHcfGrTE@(@HFY{O+26Ux`W%nM# z=LudF99w`zr+0$H1ez{g(pxj_v%6=Z<1q6ojLk>b^-$MS&W#X_rk9pdjxomQ{H!x z5waYX@BKbm!c#$5FE9W{QU_C{j&`%EJ^JGl-*TcoqC%SVdZiW^<-vyjni+k%UeF{`uk=}hM2%8OkzZ;O;LCMy;6#N?FyVfvf%hwm)rjpnYoIoZ#=voDK6?rz zAX)kYds!FGvqI94^+(_;$;o?vB_r-8pkJ-bzvNJ8O@tSn6-}P6*=NwOi?fn(;2I!# z@FLEvbY4i&ci;6kn-ug#j6BY!J^E+g6j3^|e{38y6W+Rx14bk{NREBK6Ubrj38l9A6eSnP zT23xU6>nlp^WBCO7(26fhy$#Xvc4S3UH++gO8QS_1n`FO&lWpz5t-%df$81WzkO^T zQvY`K9p(Bjbp=CKd#`D^tb#q1`5lZ4YT(s$4vVY!3Bk;yIZetS@%wDf8_;m zKALod0zzM2>;NGjZrY1`m7fD=W{}Y*nRANY@?%OSXt%63R&7~WIi73};Mx|)Tr(tZ zbw8a#-x%^AZB~EvXk1paXTCawBh&kZ*Pg5=fik^SCVIB+gi#eP@07IikiwmEUFj{1 zKGF?@1l!d`1g5D5XgO1w7^vV4j9oI0ehaBwZ&@xYlKDR3Yyqo-AR&{dr6eIiVlLcc zI2VbP>&gZeLB;PqpaSZsq6j6(Vf^>-7GUeJc;-)mkYs;!@pM0OU^sRlr@&V}o1>37 zJVW@rv?edmZ|6mzI8^xIcd-&9ylc+f&c`(?#VH!u=LD1a%qxZ0S(7k}38qQ-Ty96P zRH)H=b+4?-9H1dfyz6g@lwmO~LK|1@Vluqnx>P%xHat2A+W**XAm01Nk1C|`JMQ&c z5&^;%A))Ca)Mr?U@#{H(fo)fmo^Fm85am>b9o!RoZJ8P+u;b`wb>U99{gOmkEIT*KHX`V65cg1RGb%m4vMwv*b;5yZ~ z^66oe0P9Dmhwcazkh}z_1jeX**Rf>;rpbmYA^9G>bk@3r?RC=fxbJuxX=+(uu-vnt z*r`Zm#<(&*3?%YB{Tz*OLTZ6!7)>jsB6*9_SC+YGCd%iEib8hxL{;{-g#<}@ugpdo z=h2ZDCE+P9TBf(0cRho_3@x_N?K61zHYhrT<6iCTodpO~L%jQbg+;MRiw`qK7^4hk z!w>GuBBWyYUZn4I_2yeAtz2 zT5!yQBA+Ntd-&-pNY-tC-u3S;0G93|&GWsn-2^&Wjw#YRQ6R~jj7Wg4~A53;{5K_=%2IFP2<1sRnLFyio|Aq;4OkA9}oQ&ZBNsp z7f*}O9bfzP$H2utz3GIuq}oSk9{aX(hZGz}eWV_$2Os057|$Y_ zWnJ3CQEWIk11nWo`hu!2F!ZtIWc6v!{a!nvP;PN|_P3|zWsu89>m!AD^+3U(nWAb< z-bJNMz~xJ(2>0GpG$LE6_um2ej_*T)iec&DR-~Kf!!2nv}g^a_2UD zxbELtM`#H;{dn7cuM{4OO}(-7W@tK_sg)`)z?e*&Dg1$u^=at%*kUMA@5f801|DrF z8lhWDa&2?m-WRE#;B;0$UAc-HfEmmk(rUw91a0O4!Mw(HS9diRug0KQVEkOY(@+jP zYHo_{P=Y;-M9YkS3J?r~MOI@2>Qa{+KCHzmuzJ&f{GACwR>a+jk6#nnbDHNeraNHdwV?2PvEC%TK zHP1<%(Hwt`1%z@&if#nddKMRN1_XSQSfV%4EOa%5kQ zmgueY^zY4RcAB~}*-Ei%ZM7vRW;|!iNb_`$L(X8tp!H;2r^em*5eoYSV*aZ0d{xDQ`}P^Q)TK^RRTG z^h&FwEp#;W6cu$u?%;$eI>ZoCno_^y_0xR29eBLA1#hs8y}i&W$~8 zqH+Cpd*ys6oLZrXaU9<|8RHIE7r~A`dl!pkA`kM)=eiA-15b`M`>9B}1$2HO$6Z;i zt244lz=10F;)TOXBB%d;@(0SnIfR%u{ zu|&Nn7&F_U=?C=6x#? zrL=U(t-R95KclWbi?!PsKYnWJDjzRDU~}Tjc6nc&mKk&Pg#3+Ji}!)pqKni43!=p< zkL9zZYGr6UTk2AH67ctstwmpHzo&EbtIqjjV zqh*fP$OG}qiFZpbE+qVnJKv`t{~+HjrwaV7RUlb!3A`W21yqrJScU>u8&g9n>r%7U zetum0qVtg}A?Aj^9yZAg>}X|(O)dHs%`MyGhOP>rAd3)0%35pKldei`>5&KhiH^xE zfmxjAOkB2c?#zVk6W`|yZA!8xm@y}f?ORDX1YoG*fU3eroKeW>Jj@55$8s0Cc5c2& zT!mc_fNsw|T;Fr%-ev4v{`T7t3zPHdz2TBYpJOiKpit%~u?Vlep##P>o!?Qy!-RMsWAYLH?^e7>C z!&;v@L3$Kg6SR;^yqo4PDJ8|q2_@P!8+sUyq7#k!P%qWNptG-cX5I4W?;PCOXS?i= zgiQh>;u{?>VKKSHIz>iJo6^L4;|p3aVm6_iQ;PzLNZ6{HIToVA{j$nqGPYGGAED$` zgt972m?Rrf8*{eDHZVUFE(JE<6L`cNPN3A4-c!*CNSS9di&Ind(YT$#Z%~*adAa0> zl?dZevbYvbm!#XnwP#eQV88&&RMKqG5xXWS(rq<$w*QjLE(TT!Q@_SCS7GZg#@=$h z1v2lJ-XT>|1*2~peDGpH^<$8lf_k4oR3TSgBM(i1&|SUzCj0zPrM)VRLhPFNvG_Fc zN1WdMoKC4L;1zx4bm`EEdg@+O*vl=|F3a~E-dmQYr-xB#+@X0R)gr(bNzK32l;ty= z>u-Im*)R!)5J)wC=+uk>bG)%UO*wq3wdS{qgHS*LikX+m8DUFRKl|+T48S#JLf6cX zL%FgZ518Lg${c#T6@2mc+MN=Kj4;)|X1vdtdbKroG|K<8?^Jg)3SA+W_(>lmo7o+1 zk%PZmd7JLrapAOwpKMQaJz7Fv8K>Uj@TlmNnrgUMRkilKp>)r}L(HXmi)f2ampk!U zHS^DTK6lC4L@{2pWow3uPQLXQ;FL{dtE6y|Msml_sZdMpd7699lz90=OoOy0-va?hLPPJVwRul;VhcbJ6%u~^Ns)7X?*GC93t(Nho)`1WbDzV7PoUJg6ZQQTOdwTa{_t;9rI9|ca;bz5?M z2XssZi;Y}% zR42ylF5`bg`>nn#0*3xKJ{5c6TdHFc&;8)B+=}yNPn>ltt^QcBU{^}iajSwM6uD36 zgN0_Fq7X9m56`i*gq6JV6vS#?>YbJYjdbz!@vCY8Yz4q}-7Vu$=0U5~yMLsN&noSe zV6XV;1|VJciiSu0KO`enR(25do7G7%WUwdi3@O?dAW$6U%Tmzy-mIWv)Qf9*Zglxq z6KPju?YIGjsk0g*$yhU-AB<)}4>7vFdtnH48<@(N=(PU{3458W>Y207rM}cs`}mJ0+ld2V^!L%P{w*DvO$Ee`U{9CjTgoGXI~a@0s?IY*+2Ih;1S@v zf#BEN^458{N{kzNhH(|}J5nI@*YNHmXq0KtS*{4sc~3em7lwbA=N((+NZ8b*ci~63FWHf3_#&?qMI47DCYkV8I@HQ?Hz3)(JCJJ%vhTsVQVo$ ze5cL4MeuFuNQ2k)8eAZxA2a`mwk8O-?+ z7$i>Qt|?v~4ezZGYq96T@5Z(S*n5pP4}n&0z%!dO^KxsKZ{%+0)}A0g(@mFPtAf5*&LOQ9Xx7q>y!~ka#gUJlU0H+#&kGjEJa+OEFnd4asnQ7++%K36rS$MPWQ^|Xm6oGx!Nt|zFg3&GQR(ZX4$!|UT#gKPQ6&ET9` zN>^CLZOVM)kIC}c!5JC~W5A&+hvi%ykuM*1Jao)Y_wlI*S608rF1~pE&%X0nzCSU4 zlsSA#WK3p`6EnR#H3qoND3j;&CP#(D{If46h`HteIRs@?u#m~n8_qi!(jZg1 zfIM6?R$KX9cq_&ctf403Y=41Q!%J#;blT_j91v=7@nW8JHZp1Fx(h_AxEx-bb z`$|i4Jj0-(+)qvZ;{?lh?2ToYl7gB+0DG2D{gJ423z{kI!ufg6AV$QsJj^dEp?19d z)RgvK=m`eur+8SK@hzP#Q2>>R`XZq()czk*`4fMg|3Yu02c5T!1X=ee#Ag( zed<+ELJbN(CChLsJT60oScgF-`@WEjTEVG~pLZNOYo7UzljFSJJmi0WI#yot>nN`z zziR&ER_jFh_X+QH#goZ3S8qK1;p*TpoE7oii?4d-pMBbXdtF_t_oKi&4SZ>&Gz-kn zY{c*C|7*}N{xcmxnaBT94eeU}iwh*b-p(49NS;}j;Ntz=L)ghjT;qTCd9^YidqZ^a zFZHmlZ5tl29Chn}i%=U>VEeLSukceWKrjoBPg$6tRrvpx*24dH-Oc|;%^EO!YS;H1 zcD@YBK^4>SN^^G8>S=)ckeE*;1|$FXb?F(t0T;;_5NGEWJ$~aK^%E)C?h_YuZqdma z^Ih|2y~wVpu5L}EL0FKI&pAk+osEvtfx`)hEHv(ye)#SG)VKJ*{`P*J7I04kP=-Db z76@kdARiz~jz8eRk;e22uoQO63Fyc#TU+b~0#E3E9q=QUyujEMB>>a_oWDY2{QdvJ zfK&hmb1_(w&`^A~g(vznO1WzronxN?_bXz36s0#K(PE++$H*4U`q`g zw7hCvsxz3wWT=L&eFJ9qAP;vk!>8w%sg?uTDp}RgJ;tRcIHv6tx?NsFGKV%w!cpvM zyUHtgdbbwS07#%zA#l=Kq7~klU0j}%#)yY;G<3vjG&*5k1q*8{v2`t=^6hP%Icct7|Gv{cFH$E zYZc4l4KI%o^6Xx`^d^*Ow%zVE!1+9pIka$JrGk2#{G*_JnCJ$LfAFdx;5=I4?A7Rj z#E&G;f>#b@=NYp`k#qEyPQmSB&xoeZ&4w-%CufH;wCg~-FT*u0@$*6aNWc!J)1&6o9=Uq+JVW*nJ?|MghsJZ2Z>7SdY{E)ODya3W68ku)JZ9>YydJspqJ{AoDK4hS$+pg5VUF2p#p+EU~{SZy~ce9 zr(i@{)#;t-D}%I*vTY=E2l3?ZglF?2=k0?`)ppaL$12R=&iD_ zyrO`N?UvB2%2k7{ukFxr-Qb3Y(62;+=o%SwiVIp**;R~zi%R18y1MIA5%an{<?i+A~enirmEp{a6zL&wr)NZeD51n0Bikc9|A4qm08ivQ5ga9(dt|~=W0R}j*t`gP1+Tk_-#*Y;QN5z&DA?v@;p9Mq z{{=wuXiz~KpY4ri#{asaU`}!>UAe-`^b$s7wuKuJ#2P6D zoMb&oFArNL7tR&u6xR5hG0h-vIe7O0Nh(!z*WYG4`S~yrIvqEiC=hi6ik7;69d1o0 zQ$~RfMZC!E%kc0$FD5a&8JnkIeTpZ{tN@`;rzl15oF0G7`r2+=_h(7cPIz(h(*XbJ zPoJmfrURZhWk(HPb^83McpE!b4Ghfs3gYLGeI1WUE+g6R;VlN(DZ?Bf8}__T8D*X= zTk~zMv~wv9pO2qsXBS549f@{m9YYO97hHc@pDmQJ!++Z1P+$P|DU0gR8O~V_)Q80L zul<{Jru5CK?G<6?r@&K7Od=ID?Z%8)OXKuA1K-U5%ToDJNzANiqF8 z6rzu9X>p8+DHCF#&R0fzT{D-Res}A1Sy~&8+q2h3F-+Y=h}ZKxgM}B&v@(n@cpj`%pYxy;0dQJFr2vdd&hnXg(!y5!SiLLJHHnZ` zrr_ZGIJ>wz-io3w^EQfYSrlrsU&8tdF(4$`{uIqQ$+*pvPBxUQeC*a^*K%`c#v^Ch zs!+9bp%-HSJlcOdpGUf`%w<AEJa-=D*sM|jlPb^DJ==lCiI z8`LYg(TIaHXmj^`r+kL>k>T~`*9b*qfZsBlt5x(luKDrn7-Z%q zpQ#QD^^xkhU$iF~c>=0UDh)nSl2fRhY*Y*xU3R!geCK=QdZF(JB7Bh|kOZ181ufX_ z?yl{GR}+Hf)-_X?ZtzCT#?TJ;O~uYoM7n}3_FWHhTLkFI?Q}2`cO%GyZ~og!ser+$ zpcB#A=}5K}X!GoCvIU2sS-#zpz|~EBQ-Yle8eQKK&9;Zrk&K}1ae%tM%!p|Ow z+-`RwL-_hUN^R#`A;98cmULWak5c`qAO?x+)Gr!hkW9b$B5@S{t?g5vJ@&z?;2A0V zITZ}#KEO~Bi*K%2TmOxJ4lqJ)7h*dBK0ZZ&j}H^B7}?sX&Q;IfBR&BL+C61^RaML8 zMc6CD$>M4bBZIrPxeMe`lymunBv%SWh1veaMS?=%O`8J6!N$&`Z@EC;Ol1kGN-wo7 zD;4?V?9^H1X$fcUyfBwM+d9U5c!LMv35TTZ$V$#KOAKaYXnk9$Jfxz97HY6n-9utk1#~O zTqfBmkRax|=;G#I3GG zS&_3AnZwm?pgVceJ*oAk(XYN+Ci|aIFY>cf^bUrwVyG5bflIpwvE4%OSUiyLcAvqV z1X4u+=6ZKfFw!Zq_jjiy#-Ou)%=$3@7%y3(Wa-|IfKb|Bj#{(DnB^dg0{~r(8?rd(L)+6bCp5>44gxm&lqV6h{qqer|w{z z%>k)$EgF{NKE%_E>+R6mzPi`?<9zeYb;YkKTAi;xVhq`3F@JD=1cDm4#kh9+gXyen zNif}tcsB@nfCuoXPjUvyoE291-9FQ)r1M%_2lUXAQOU1fi4gAk@iNP>N1qz*55#*! zIFD9`0s^k6Q9uBL*`O#fBAPQ$9h~Tu+IuVT8M0&-vm%*I$tDyB$9MLG>8WFHcQcJ0 zhbcwD&x7k9=9of*_1q2GmU5^FwcKtGTKHkce3fUNa^9)ODz0@xprSDKe$bun%?dCE z3oMZr5a7ECj;9Lr0ACdN948p*Y687MOgSRB@^|X^Hk_Z`o5J78tk=1snngo?71R_7 z&KE4FV#Tbu(e`>M6R*H`;zh&|aP2YFrv7M_!4?zO6Y|vqE%GQ-JocxG8 z(V^fBkgZP<0;-40G#|QE1(vP^fAu7)`gRBFFm+ff!_P<|+Ji+ZvT1_baG>MN(_E|o z$Ql|IbSO`gxO!77wOs`G@&P~@3_yVrxN!He%k9>!zVNIhkW6%n26y`<6Dp3r=b7)# z_1MBS)ba3M!x)X5Cn{O7;me$;zibm&AYHn$Pr9<6sRP`dI!faz99TS3zoi>8t04S< zQ-13{>s#1EhnAF%?C15@^<5;o^mjh{j>QZXsfD6y+{iij2q75sP&KudYcb)a$@-+O zrLf8YnXuaJmmBaK)JLJSa^o^4(32*EPsZ1l<<*66c*offVzg?vEPr>IOfE#|& zv6CtS(a*akg13NPl_?MruYzUX-6e&lI07i~LbU=iG zdHU+c^lUHv>WjvK=e)%8g?007qA_1Go?=^AIc!kbqQ!-9`LbyDukq`LzHH_u9~$(| zxtErJ!7I2z${g-*o1%P@quZ{20VL(VIS{NoRIBY5c!_$9FmJtBiA(hx)fq#)d}HEP zbRpzSaN^}%!}s+rr8UEqni<{~JiuhP95pL%e2iV8zpYg~-HX37hMm6;V5`G^@>LC) zb3ENY11)&kT}{vKbMg?s`J*8KRpwiYsde)|`-Ds>X)H9xs!IwGX8wkqxly3dvxvyx z1_0ykQ)BWCPcQeb3-+uIIZFJks0`$vm3mZNU! zg_Z8*g2}glj88ae?+#F*-z`dcgH`Lgv?~s(D_VI7RJqV?b$shHrrZs zYz<~V-(l}P+T5W)ztL0t_pC8fhf-#4}s9T}BBy$I{l7ofBx@1xKl~Q2A0!3ZPb}dSyRbHF< z0?A1r3Jtyu@bL`aVz1H^G!5{$MfumcKiR}=zG2LXR?batE;a)LeuB~AoG47x!ZmxF z!d1}<%*7Wa->X;@8D6UkSJi-r-L>*Ng%qnn1`I_Uo8BW}`ayelioZ;Ho4T#=K24a? z9YM~iDmuzqXt*FF8gNf^o%dMsyk4BSUyH~ z&;CuhX>0xPjGMAEA(#~TYCE0TB{OYCimO^ySLA|vSoPk|!@Xgfkh=O`&%_Ei{3a6M z2d{!F^YzYfqj;v9I#}eKIF>~&4lPu*y&y!+O@kQWP8}v}PlZRMRo&}duzaTc464_= z;h9}&DiP@S=pxa#T-JzcIa?TZXrkI4T_DD2|!jwF{eH?vGIu{v^x%ax)3@EPMsWk7seoST1LkG1UU1$}Dp{a^ zy&?M>%-i(v1au|5&i}m2`1^{nfE$3ei%FDQRT|;ZtMXw^&+wZOk^|60c^YA5i5)wd zxg9v9N^&J9USaB z2}$x$)u0KMu-&r2&6rmKg3kjWp{Cv}m~a%^g!h^|$5zO=+>Ydqu%$ob)3&F_X7ghb zBbW#w9^%3S)~;JK2YQFxA{iK9!k56XnHy8r+1P(ADkV zP<<>4JLpn$k)mC*6>z~W>@i!%m4F{Kynk<7(cZUQMdV(0SetgOzF*ONeRF_+N*HX! zVgR;df--HA2uyr5UZiEz({s35EAYj!BT+RiC^>h?ghMh)vDPVSyf-ueVv4F$JU2nq zp6Bo@;MneiKs*n?%}Bn?5*^uE)WF^B1q)+BMb_-OH>u^PI1<=HurqA()mV)`IeS0p zGo%bm5G;zb@S~YwFIG78XJXO?y)g z8F|crgZM7bBB#mEN8`3)!M+W5)!YzVEgaeaIjsbevDSH8Q)98}9AK~WKB)94%uZZt zQ*k)w*3$?a&_YfdG{(F(?&JPgxHrH706C|1DqnvX;>1+@cX7}5IJekP0+;7C6Z>gy1ZwqEuNJ_*1*ix$1Qnkv<+VucExI7n&qa9Nb2 zh;jitJ6S`t=25tYQd*y3d^POEu>yk;hvEug{`0O<)y@rG8}6>=GmnzfwK6prdkN7y zg?{&;+D&pbLfgtGS3=*|gz8)d3lnu`jN}k@VnO`Ff%o|A=P*I+79({-D z??R3B+?y_kNtkz}S7r3LywP*gJE_PDr3kmfCBv`JMrG%$ge0yi$l54ug$*c%G^K#( zoH{=-hn91GlK1{@+{%0o3~-XT=w)wjcgmK=XjcT1r%`l25y(gEIe;k}VtZ(;-JErO zl`q6XNTFZ%U=JqW3R!pl0P}$}C^8r~4k9;2Z6yWDA>vxjw;&EKPwX751Z}UUlJ$~x zJFI@%k2QDL{L%TUwc{u6d#6Gc_vIDi11s|@Vb{$M2G^srepL4nBt~#N1cb>CK!!Ot z7y3(?d0)6HPcRL{X~-6^qer$MsyYQ?;Dy~TJsgH!UpYh(b|F!9D7(gs)>|xVSLa2) z-Wieecg2K3-H|3eeXv40gSoQj+t{~RY6ezFExSJdHAIV-whKVqx{d!sKS}c^4(79~ zC!oy%bb=NxemnS+;n z&gE8}zm@s>$MfJoGUaIeh9&Rg2(p2^8CYloq(GA5eNu!}{FLq{qmsC;M5HW^Ui0Iq zZ@czIqB+bI7VChHxN)U4;tt~@x=k~K(IfYz((wmEaOmH;=5b#CuBHGW?H&P6x+VQiMA3syBNpPRehRck28| zGgK6f=$@0rDD08vDdN*BzM5-kJ@>x_p)4fwj{-19ThS3pi8;co_hxio*@XkB(M|a3 zvWcetHJdK(>3W9M!YXDjQEed(gV<;P=)@D z?e>-Jg$sx7l_g{A7-r%7LwH3jc~(X+zrLpDO60| ze~k^>drs&;e-b0m#2E{Cht{2c_Q5pX397Z=B)e3;Y3Xb@$u7MimUAV%N>#UqxWQW5|2_FVHj{rntXbrxr#n&F^D>3IXDwNwr!vJ~A73AUdtpRBFRpm5F z7XK8*r2O|$m^pJ@Zl$OwE~e_!*`}yj)v+Z7nX{6WIj@O>Tu(&SkP%->K?DYaFww=S ziCK7AtAcsXOOWh><)GQ&LWt=Xt}Hbu#ztK9tjThSrgNF^+d1o;eb3!{-*ukzyT51s!Gi&r$(-|>;~V20?|5G;0T#&= zf)+Zkt`DFLq4|>MT=Ghh4FW!=YOs%PzeHo)LqO%`PEeN<55UDtVW^cjQu`?m$f#T#u zx*8}>Jj&1N^S_MGZs|UgdeC?5kaMs@>89?4W=PHQ^!XS_vCwYri9{VF3?WT?fD%w7xbh>^;qr~*3e6G zJP=a?{XGu)#3LdVCF$W*&c#uu&Cmwzs+8q46vvxr4Qt(p)JbC52*S1E} zXabgbqWo76x6X$LK69U6B&Ga5oCtKsoM>gCU?pg5T&4$72d{v{cw_ERA5~=FI|{((uLo`zSe;b;xvq9#2B#Ke8I>Gq zkb46TYtBBoo#oIVe7vUU7SG>iE%Vq0M|s>vz^e7}wH^P$^q+Z}pxcgIeyp9r>@Lwa zkiGh}P?4fF{=N6M`_?Y4AuE8&D81~&)*gD?QijTQsVYt!k<-yc&96b0w;TKw)rDvp7``EfiDRqL&!VS3oo7F0fM2P0>VY9!?SX z(vh?wECavf?S%T0Br!cOoHvd9Bxr}-&+Yw%|0U&aEbUp&hP_)~`CC3T{qjE9ZG2F3 zUC9n-W`D=lUI-D5Zf`A0W4aX1dJwZhKSX}@4$p$0wffqq-F(VGU18cNB`}(c-Y1pe zmOVjSlM4@-LRnj$T>$Dl?`K@r#aU*Znehh}d z!@>wsD|soFdmCTT)#~b)1emmk_B|+ZVA?Z(NWb`K zS6iC-ddjEXu{85?zTo`HvBmcd5Z^7gOLga(Y{sC_`!O8(gKgh7ia`UBdy4}_utY0` z4DHRJ2kkq}wiS3FZNe?Il1F&r<^j@1+u{vv4C>U|{U3x5V5*h86TL- zoR}ZGG*wMcqA1=T(Cv*piLrX?=hjcH#=EY_33MM>lBThvM02~bYZ@(sI{We)t08s`;E3xH^|}dlA-++WUX#1e#lq= zsGQ)*0A$6>Da5Ew(7aC{9^=;$(%1v`P+x1wV*-}f>th&xl8;-yn~z@}gi&eex5k$e z&WyA*vF-Bj5Nw-CibyDzJ{7i@;4|RSWqiWOG~HG@sqy$NMT3nm_3ZMAU3KSctcG2j z#*gZx72g65hxT_iMI76Z))92%bwtX*mdNlyplV=fxuU7{hJPZJz&Dr zjeW_R+BQia>dyLsGR56kXdyc6ucmL`{$;%S%bQZeCOZrLRa;jl_l>LzH7v>^%0Kq; z0N`%ZghVNsJwC$sL=BC2T7Bde>^NTWJZx}`8Fe-ET;i+#fhDiMbWiTya!{eVuZs7x zhL7Ao2Q;LwTdP6|-_#x_SrqG*}X*rJq#mJ7gX*TZ>alVET?(?AUMi(dFKo z5QjWeDD&e>Q|m=%QtlROp1A!k$KmQLYv1IoQ`dVk!Vd#Qd8_*|3~+f9!R3`B28i!t zL&ARSt3Q*uIb`h_`cSrD7FtC@O9s^^=h!lULGMFw=`;0KY6<3dp!UIwKVKPLOns&;UrtTWX4zC!`H9E=1B3jjaziU9V>)m{^mnbW7Eaeho}4@V)r3#K~!# z|D>F8Y(xo_oa(js#P*#%MR?^)%sp`yA67EQOiOLGTuxI;(wOgu`fcK0e)Za1U}bpS z!v9&GzTH-Ek&|ye<%aj(5%%F1TzvrF!koNPf>XO9{;;IM+C}Sqbp9FTTo^cA`6aFKho>1F!n zfC+M-Gm{59ee?_8QZkF1+T13zfB)nI^-9x#1Ctj{i`?A4tsOEuef~^B|5j5VxaA(_ zs_qQCg}lzv@1%NUc^2;LNp-bUBdN-2o(;vxrzK=XR?&&h+ajQPD?~x2^kymD>U89}v5Q;hT!7$&aCFw;p0qJc35m^NTH^b~v$uvtqVQ>Z=bjS|35E&EvERF}G*F!+svQnk+ z(ReLg!P;?j&W*Sm8aE`(evZs{ZN@0U90oSg6)itq-G1w_S!zTRTje<{-E_lp>T?O- zdr@ujY(tNZg8orQZ#x^MXHv47v(Ej|OzWi`1;0a%h|yQ0;|L3lQLREs*LB@B~vUuy-^=j(@B zN;yHK<=v4AM-h69L=)m%t@qR)@*h%E)wtQ!c&3%` z#@nl1E4g(bRn33slQKjR}Q=AqaeNkgj%p{o5M&U zOlN3st?La$F7e5+1?!Q=Bt4)89{vzyf!Kw#^@xk{@}RQENDqS~3n#=AJqsrW)C2A) zPhOb5Qrtg#64O40SPdV47Jy3l8fdd3gBPLstUq2oD~4vS|Dq_F@&1~iJ7ufIiL+<& zP7*g7ttL8a8X2faDtELRC}*84iTHJ{6R7K|t1%j925$ zSq84nqZJ~b;2TFHysY78#PS{fmemz?t3n-$9My8j{>n%H$}IjfXP2B;E7Bek9xi4e%MvO^CR*4o z`C)>vWrPei>tH2xo%4w%7p{nF#%x0lwh;HGNLasG_%TNo@-@GrXNfMx<2x|6E$(*y zw?;9gmkN_w`J}ST4A-(*qP8|ozgD-B(N*ekB(^?nP4ht8@f{=b@;Ej>{MLT|I7at} z*queh{fgE4rr*X7netc;;pt`v_pfMpzhTuI@?8pI#^Q!Ct-ed8YHt@mo=GHX@zFzH zTR4@}6pThN$sDR}LC1~g)1SbSLfDu)om6Ix=1(smM73H{m|+VDaWkl86PAyFHx*Hb z*IpWWz_4Ik&(ontWln4+zD+)aUuYc$g~k&cnz7i2_sis2 zxTbDxZEjbZIver)>Nv`hfj)b>}hEuk~BGBkOsSLe zhwqi4C#AEmmgwZv!#Cy1i(kYs`yWv4;@l%F9vIo~*mOvrJgh}dFaG>nU!y-cGyZQ` z^Z(Jg>$i{1zx-;!_krJ)9)ryQ9asQBD;Wz$#Fg|=RswbvHh_AW3{_am-zKI5WBIyqLd;E{!C@s_PU`4ca`TO-+mflm7ShLeHhdy^=M>$ z0Og?Tie*kD>Xhz&(~wS(o6EyiR|uJj5tAE#u$i947iAr3eoY}q--=$ z+Fw|0IaBoNF8AByz4vN*$20=&UX)kanaXjrxSd( z9Jsz9OKG$kv3cNi==q&1xbEruPj@96t&xJT$hPN#XLxPWOLP@^GMGAk**i)=LMX)| zXUH)BRihnALljh*!*T05`}_>EAw6EQmax~o-)x7o>x%{?J)puQ=I)w{Xtww_UU29F z3gnC*%m%f?<*Sk=3gCSEr7IG5b<aOQfR16t1eskwMG3@JWTdqi+N=vNsp-1Nr4QDSlHU2J2ZBaoVJPsHu92MphlmrejhkCDKXxSLn5e4=jcHEZo^aV)f4QIvIB;=$X|dauYjcWwKb3)iWu zTlDz)NBtAytjrH7``I~`=`&6~60*%{(7(}w;kR+K{IEFcBsX< z{)AVIC6&vTC~avv7CtDW*W~vkYpdxv|8$x|%BXTup<366EL~+*l-l|(5g=VO>5{pe zFfg`>nkpQnAbPq}(ZW*%Dj4#;CpyT@cWv_6UjFE#WWD>NTzqwp=}FHP(14=@Zyi7= zP$4j!YoUTv;9V^L>0zDXEndFD^vMLlBi#eFT`{7H22G0Lt0!tPJN63k3brySSvl_s z1i;u}*WZ6PKG5Q~bw@n@QkkQMM}5e_y~|Ipt3$p$w$cUuX-fHLI#RKu}xT@NkM5X5a4MEbke-kyNzvX}rHA8L!=g-1v1Io0OP*jVgJlF>mpjCcRK?@4saL5vo`#0~Z%kVjFmmHzjT*M~hg zHYc~C)b6oyX;t@6?z|5xmfa1$%4<)rf<|Mp{542Iqt{~b9q~2EbGTR@N)S-{Fm&-P zNyXSr&0OK$ajbijIBFd4J2b&fD9Eh2i1enRq2z3LK`HrX=EFP zsU8S=$~w5t{$YT8bsc_OR=Pa_mT8`ke^!P>x6-r&dhYG(t}fGeEq_+$jo;tUJ$u#4 zYLH$@yH4L7hTOoSww>Au^d^fW&p{OdS|QEoA^bRQzIK*OeMj5}_Qjx>Xz@u_1GPfZ zIA7a`DaKJINW&9b1llj@jSxPOGeHVYY$un~D;we$KDHVLe;^La9Xhtdx1r?@@4XGVnTPs=zkVsIo`10sOSizHCOxwLdFoMlG2^sJ| zS8s18Wb-&F)z6Ewn=$M9PQ7l&C6KHdgkA%xE|&PXde6}qyYnYcl36`7J@a>dbL;=! zO8xJo6aV@BzIXn7os2=oy{o zExsa*Xr(N-UMs#NPWmn-SKNjT5dwAyosDKw*a8VzP&|aKydy-h!^oY+ce6V^B^x6c zug22J{7vK%^pIxoO|W`h=QAZ0F@ED}XvHxs8}0L5%JU4`e~MfyDVxQW^uC)fu9<%& zrdQngmaR@ml{^TzNvsuW2yKSBoBSH+g7m%}F^_eW6998$+@jzVzBJH%i~mI0eGOo39t&zO);5{QA5s44p7|I4CE z|3cpWf92Wx*Ia)Amr{+L{KlPTGX$rQPeN1Cu3f~nFpC$MN>U3o#~gZtE3)BtESWo0 zH881j>~;uEv)9PbY--)nUj_5`^Mk`R3YlrQ(eFV(Ut}UKB)Ws|cR(ENM+_P{>cwG; zKY=E|N@9d?1*!X-xk`VJVCoY~Zc4>31cdhE&pD6AE$NHAzoPb;>za%9YJELwMd z)0yrEGV`j!^DlOMDG z4FM~#L1H#0aoe9KF1-wLO7+QE^0Gl(@cP>f0&yp>XyC8y^-VOLKRKO5G$c+h$QfXP zE$Z;N)NkX-sAMf7(@a(-E*MRZlVG~Zwe|9_PLpIKgi1BV)`ocC1*}++@7)n3nt1^y zD7bpQI8;4Kke2>n#9jRSU@u@f;H;)p%x~1%w^&AdnKO5^ErnI~wh(uM!kUHnS-wOP zblSvt`K$A3;TfY30^9>k#-EN`+(umidlB{C5jg!vd)*hG)bLS+q8c4TL*UK)z8 zUP3KIN`I>EA-diUQ9rKgp;B9XsK*poH!z6Zo3i(qcF6 zo9w}{)juox-Qd9#51@$qGQ4_cf&BJYdSai)>j>^Ker&sxQinSh1U2I&5C?Vaq2C!| z?KmOl>3Lota6uK>n924Qxj22>>Xy^nH-4nZ$Y0s)h5rV%gE#~L=9g(8At^*F(G4IudHKCSclg}UfY%hT+O)6Ju<2;f|Xy&&-{)a1p=)4=G)xGNIB!K`L za(A_?&OZe8eajS84cT;*5bu5+iPH%EA_iq zK1$UpDEC`rnP27S)MYg5tsTAd5LbHQ;eS}3zg?{V{ja`v{jOW#ALBLu?~d2}3)qzm z1X3TF*WaaH8__=t$a7sErRRVRP&k;tTt#AF=OVE4gXAh@DL{z*FSe8g5?=~Lz)#9x z8&Shonm9Go%cka4NU+eCK(ZkEMfojAW7vpqW7I$Ot8b@Oev>ntnS<$yK0Q&(W}j-#g=(QISUav8zGOo=Q9?C~!VHQ=W?uhxaxCbFx_9vy4D zX|zU&DGr5o`2K0RQ8IC}ZZD@3jv&$8t7sV!rgzQCj#H6g%f_Q_OxJvSAT7A@SC-6Q zM(;3T;tFeE;FL_SKm!&Cz0ndUm`kIrCv|+6(j-cA8^G-4DKLA4e*ciaPg z;|bTCbLqw8pb7QVe9Cb3$vtg{tG?2@V$O$#YE~Bi4?)fVTtU_V2bmw}WePczmB?9z z4aNiXvP>jTC3Q%QA;ru;qnD%TYC@$Gk`IxY+rjw;UL;2I^N*phHc(9iKriz#THB4E z#1w=w>*^{llV=75m&5?Qgy~I>)p$60WI`Os`tn^$>CCCMtw+Hm<~A^iiHqg=n6`(n zW|f7bQ131D3s@St{xc-1?FwrrTKUx4{Z7HIdDwMO8$d4;Dkpu+Ex;^hd})?zbMp`M zGA-n5>rwqy%0^WskGl<)%KoqTx1Vi(SEN_+UCKjQf}zx*!IFYp!VvR0!Dg_yAAGW; zrpz|1!(KvH?+KOwcasA|%dQjj>d5m`Q%n^Vv}*mvicjpo7H`GXqqViRse8U2f@^LC zhMx4hjkMeLtQ!N1*a35& zb;t@hcYiXzrZ#T2^1&ea=*037n_xkMClA|#M|q5Q2oLwJfBNB76E1Lh_#V6KmDqP~A3F2ER$f0>L|?h@@T^M1pEN}24GmJBe= zGwS@T(^yw%t*)9ztv=~6z;NNB`{yg_N(&7NANWVEy?#C3`Fi|r2gThoi_Vx_+okwF zSyO)R{#|#_KM?po5O|RG{VoW+p7fVFi9D#V79rhO?tStTnO4qF_KTiEBz1ePD%1Gl z{O5bel8qHbCm~bw3kjF+M-QFJIYr?U#`*wUo9^)CLo`;7(B@sRx`4O#HtK65AP)ut ztfNpJ#8_L2oH-aGBA$CnhisZC>@enb@&e4(Ae!FFU|Wbj8kP{*VY43zEYN@+X`v|1j%?1I#NRb~Y{05%?2R2)D9vX7N0>Jjl54sll zNn2|e8^{&hA3-Yjb(4835M!R!e(%yzdMau2uA@H*Oa+GIXvirWnT214Xj>z`5W3YyP$VuCx%2gC!)T^) zvLz>J7PW#P-8BEIOvz(p9J`#R(?sU8mJfRGIuRMR1hHwY*_L?kd1cxW)NJ8@icZ#K zLEI0NMNWgO3TA(Ppe)zP&9@LNZ2VV{WsnSIkei=FKDB?tSp0-&;g2t@&{3%4Bw-0{ z*kELbN~BcU3JNq&Cy0JRFGu7r(yOo)*a4C$kF;FUH(xve{tvd_>#KAoIVi?v!UFqP zY&$`B2fx@taToZsO_=TwKe8}65hd_(y%pfo4sgN1qg!uI`?4k*YWud#Zq;sQpK%h9 zdWOD4e8FYr=4QCO#{&KsH=F(g@H(&C#!(gh74X^$s4>(gG5K$R*RdG75Y56ek?WdW#57PFcdiA3|$!CtFY zzkr8MMk`o{-hXkAKy4h^(Rp@yB_YuJ$u9}ZIiD==sj5_1E3J2Rab2r2-Yn3Fp4e12 zZO8-c0M?vDP6*fm@r2SQG?Yb*0o%1(`vYXGd8@e61Kg71c;!%qs8Ooo@J8X)OAxNL z?v+p!S?t;Je3G^p&Yvd?t*hDcaYP^PFMHtr@fRu5G4h}Ek4LG)WQnXZ-xfFYTyVXU z7z|GcPC<5$Mpg`v7&$@KiE;3Jh>uNj?o90VlY^hG{ zn%P7`vm(i*TV&?#B|uZ$9y_Eek=qI7pssQ%$SwKkwjAgBbK%JIra6Dpuq3iVWazKA z26hAIrqB|*a$fuNHjSH)rjxx9Fu#^W|DdvCwH|+PG-*+4S z1JnI;wDJ3Mys?#Z3b8-p&7=D1%6%d~K6WXY35>eZ3GzbF-4Y)dwbSE);_G3|ULj8G z!P+`xE=cGlRi?~m50HvxDGy^JRA<9NCrOp~Og3w}A}>mC-}S}^pY}m=5O)g!gqx|D z^nPdt{t<2*L0g4=CgRIpN_K9B`TL!hX`842wuHV*t-&K^Z4yVSaM^dMx39iSO=y!k zofb&wPXASr`G5M|?}F+6coX!8B2fJiz-FRXgrMNj!{FJ!CQ^sKwm_MJYXi4CF_^(% zm*U9+psJwK@~#3-fg6)=T)?*OMBC+j5xiq|QJLOKO!K<@xnWVnJr4_+nL-dsz8IV# z@`s*0e0aOkL{n

BUmyOsYr|RQ-z&3}CsX*xIlJdLV14ZrT}UO-?~dE!%*Yty&?$ zUbh#F{iV=0_LTvPAZj8TL-8BaB1=|!pWBHA$%VY=4#B8?>wQMqC;32ZT6OexP@n8Z zBkOcUO1gV~#862y9wm|S**YkKb^agJ+h(q+C{9-$N$)NQpEDn7m2#CYooRWf*+mI6-GDG zR{}Cpm=w~pR|q6LzDu1zaINZw(+Tx9Kg!>awK?a?LHYY}KJ`6ixGH~7o5PRt*YA92 zq^4tWHG{Y}?{xZ#LDr-Q^n*+uQ4`Ntll!2j^ao>VMwapZH~cWAX@yBm$4F(V z3}BZ<4A7>j0KOH|zI4B1>@%3)v3k--cX4v@)ak)jJ5Ly`%Qodr=^fkj@_Gny=Dqqa zW5qjca`Fl-M&OJLJ1kn#&VhcW4<-np0EX3-GkCu|-z_`RJQG@DLKB{nH?{T(2%Cqu6wY3Fn#A*; zO%Jx*R(2?s$30%J!65ETE=hgyg;T;0;f=-R;~)FMn!UOX*WDf8wHym|bS>GF^(@P2 zU_(}x{|0Qm0YGt2|7kN3d4StqN^X+F9wQ_Atc0-0^dxO{0M6osHbb9|wKrp{1{kSE z&73xu``svG+iu#LPbW^EWhECZuReZx?Oi0pt-4`*OJEg5POe`+SL?d+A~_E~K-5-&$*bBG;LgiN`5UI)I{#>Q zP@y+s1AE$JFnmL05nQb$Uey;a?_rkW=^F|TRXM< zBip$3YOv?e;sT%!E^~Z2t>OHGw>aG+ zI`#PEwBgkB=Fe-NSJlNE=(8`wK6!KpR>>|6R{8YU3x62sy9b}jS|D0TX4=ysZ}L1Q zOw=DXIQ?Dfj^H8+Nj8_rgQV65z(zS8LapyoGnh_rEmrIYS32^$)W>i1ukLg|x+H-u zh2`sU5F@%6Z7vBlpN#r0)hXO1Big3{A_z#13IQ#nBR~xLN%QC+aGw$j$=o_{I3nlU zBThNM!1hi_q2X0wB2~O39Y8*2xdJP2{FvL)l69%zY$v`;b$um&s3f(K#i%N&KR)dr z*Y>}0C7{XVCIH8AKj099C?-r6lT@wj~y^<_7Vi4z_JdkJXl zKrU{lOa(a;Em#@ry&H>psr$#RIsomVYHV_dKk;npy75Zms(rc%fRZdcIMWfrmNhd- zmm==#Iu&dn;wy9%EgT?Mi)3KkeYPcMhfCIIuZ3{Q zC(2LqDN=;ym%x+9yoI!)uYc6G&#>=Z)pNJ0>eO`;M>?x_8ap&d&C~vgd>WfFjaXJn znx8kE)-foPl zbsGYETmnYptxqU6pt1o@xKf=cg^>o(9N^ogMi8!gSAzpSkaW>j_(Rk2pCb)51eQ<@ z-S6fsXXTy&OI$K(=L`Qcx$ArCjn{DqI9?gERG-E7O{Mz+xvX%VaPG@3~2?N-E3#T*9;Vpb~At-r0Vjq zn?QJkUrx4v@x@2~WyEHd+N_s?rk%Oc>d_s$;a&I=6`IxB z*N~eBX+SCErO9R^?QGMvQwIDkktO!UEC>5y#S57!=N}&0k~+Of@BXL=WD!j=WPlsi z%uV_UeTe&!JAkpG1QN?Iu6MK`3?^SZn;#)jv1!Jx7MHf<6f>uYp~6uh(XjG9^Z@6X%1O{l!IKf1*}Yogu{Vs@S3%g18_To zWP+4K(v9q40=^YrG1p3;qRWd8AX^RZHSavc;+?AH8|Y9)mh_(VX-cEIB>w?H{bTO+ z>dyL|4?`*mIZ~(@&>p)?1a}h$Y~ztlwav50b)f;#z5-VMkQ*APnS&inZO;sN)FxL6L!>0v9so%HoxB9)0$StlMDVl%g>iCkNz;^fJBQ>ySvU(185?hOJCo*S`@+UOR;(AG5C^AW|EiK zh%@Gt-ZXJL$h$!f?`|ANUq#XqokSwI^s5wEqTg-NZX_61Wz+@!h{U*M9GeFpfqS-q zda8E2dTLxR;pIV%$sE}XgXdORIj;rs(e3BCyA%cgV45KfU6sg}fio#VK*w&;b#cL1 zEhO$w7&RV7@n0uBV2c(ajuGod_T4Krt&3>`fjP0}$ji(juwRt1+XeEQ)JLY1UutJ>?Its4#EKNi0p=-DQJo5e>0Zz4 z)aRJfDicwZ;b|zhIp^2q-)N?A1#Rlo*Ppkri94X5+gyeNViKm!Rg!We_Soij1p`*do`drZr=_p@7D zyB8fOo4TF6et~n4bkX05a4-OLR6$EhFYF>-leB|;l-%qLps_&icPSz`~@Z;&G3mou+Cs>eYR#vA^lBdAeKk6J4 zJmGefbB50Q_}_{)4io2JZ1rnMz*Y!2PK;h=v~lnY#~~pu@CEAy#_rUqgPvI;GT7(< z)w}5g;4i)d(yw^PT%wjMS=A<5N0$LR&yO?7A~yr@BrZt+h}l?mA{LOvWdyn+eTsBU z_7on0`Drn=^;XnUvOl>5 zJI?8o(67NN5#T(gXKOJ6TDwqsoZ~c~k0F-{!yYjPqxFH-XA?<>T=wn0j-jMg@>;N3 z?&)-xxNw#h=AYQi6K7Y>r)gy8PFRaXpWo!wr{~@jbE#iv#HRlSHKTr|Bl>933P%9| zEiXSJVBBct1QW?}R%)F~RzIMi-lJ);2ZQmeL|fk#=3D(da7uegIg-F_KmRtR00k3W zZwPAURHt~+;0BJ*6?^VNeLXD(H;{V+Tofl`WQ}oKjQl9jX2XptST)rxp{+$+g@MF{ z6xVVnpEIm0AIhVmhtjLO2&!%aCvRBo4CRfb^Lni}@cS&&t`&xmth=VFWj{)PspO-k zGKITEHsZ2b>~Oj(P&;s)GAt3fcGI>~617*CS9c!op>6nBcRjxRK9X>t$52UV^|MM{ zL2hW|lH8LEKb?LUlV-5*iTsJ%=*dX%WZnhm8;bw|{cz!O;#zS@y@5FYK2qr3lO9Vm zAsFLs{)*8S1w+*h`SdiAk;6g%r&oV2;PZQ0w3R|@L!WaN5z-kRshw#bJ|Fl+dgjbU z>@UX1PEU!wKQK54X$&t=Qc0b11_+QE$SU+IkZ&Mq`7QpuoDP@L%>-N7UFf9nqVN~C9w3q+5|O<*5{cJ2Jp4CkJ-6+9H*G&MBYD}F}OL?RvL4#u)ST?m^p;dU(P zIaz$Ues{#C`0fGzsW`u-)kyfL{HJ5_61Ii44FCD{PF8$1b)lMHxK$eCc;MmQ{%X#U`lfIJd9-xX{NHFkVStfzbe)@Sbwm z{EGES-cE;McW5qg#&Ey!_8$8lGYuO;KvL_+_bJy4`akR_GrAi;E_-K};^%+lk;N)w zxs`OtgL;ty9pGFpdk|_|L_8>dKqTKApuPY%JHiAzzjGU zQV+W7RQdhG##b;Xg33V;1)b)CRo#7ex_g{059bwVAU06#p@I?f_@>97fw5A55>P5~ z0~{jV$vB}7Q5H-n2XYcPmFAQ0xfA3i#4SkF{g{l_Hx=HYq_zD<2Pob^#Ln!}s%;aJ zE%5%k{=KtP#|V5Sua`CbF8%7x_bJ65QZa^qZ@tK%VN@eV2`S~GG>OKrAXD7H#V2W_ zVDmo@kvnvMm#QJ^jw-0kF-CN{$yGC+9ae5V^s2Kl`@gWJ*migPN<7YUg39Zw>(Vy$ zuz9q#uPtm6E3^%GlK}f5Gt~NBnLD(x(a`5;4)7$ zLE+^+Gl%$keTAEvy^wmtW#?3tF1I)eo}LKMu2EfKn{!%_wRP3|L)DlIXHpMD^yC$G z*xBcW5ndeBMW+LhZ5r8#pWG{44H{Oi0QE2<0?;BZY_u%8X+Gg%Y#eU6fg|XqXDkM^m)+8W=t?VBc$bu~e_WWcEs8%AYD}NsD z5BznTXadCqPs4n9(=+4p*IRCCwiz-;G1ZJWSRnElZLMOFExhbpzkchKX*+6JY3R0}>ii}Ov~4pV4M~v;vPH_~5?OG0WcHAocGZVtL5dY* z8y3&eA{qeK8TEwAGzF0Y@(mI;f=s(!Zndqtd93vU*qDh$sf;+PykW}N7C>M$XM_Q*#wVqrzhKlUgPR0#ZW-%UvWt zI4MP5gNH8Eite^^(P9e7hFRi3Jo_Q&>}@Ls`2_s+M?S%)fP7-IbQgMH>YpX-{d-QI z%(sPLXrKo_N12u^Es`Qr`~I1u{obkw8&Zb<dL9KPG)2STd44_{-a4B!9yy%oU{6p|WQ z(=B0J;p7OPeebl~0rTR!(5+y^FH<7FRd^g6U5>UBsRV<$J=`!Y!d0$eLh7j*GGOhK z(u4PBkzb4Boy4cbx|F?+y6^F8b;fxz#&ti=Ok#j;Nk+wJ#u;R zR5dW6;81|0hH#gp9lvrWJ#j>;zu7Zn#qg6#*yeKmPrvNL+~IQ2h)iyU%WI1#SJWGGQ>BHPA%V>_HA!1MS6U1J*kjbeLIFsiE|WjmWa9cC)Q^X$aZkmpa$8~k$><0DIVej(o{TK85W7H+OlIL^fL(Mry@Yk1@lN(N)wYEaD1LX~gV=?aE~+g@~t5y)g1MMKM=(5W_O#10luT*!fVfIO8hwr6*HnLyF(fe7$n&Q=GMrx;@O;5g>M|R%^P^jo{cxG`NoDpv`EnH> z!jE{}IOC87mO`1_cPTXP?VpO=f4820l)OK-t}A`?_bPaRTuuJKyvP%ll0GLXa?^F} zm#fC<6aj#0R(+S6%_sYNNJ;4b$EO4w^|$Zw?_OblRh66rE3y8&6dBs5CoLvD_;?gg zupk}kPhT`ozkVXEUwb)HbnhpDAK+|S3$THe&!5-8sJMF>p;Xz>3&+hb^xt|cMb6t# z=iw0pbCE`v(06!zs~6!4my>+zeNYu=Ww1gN|GU%>Rgeo|GSAaI)ly*hAZ-lv)#hx* z+n}84nj~1Y(PaGG0OvU5npDs{zXml8JY3PB5HOXr;URm1$P7v!HGF+ds7LCreGy!& z3r;{S^hq%Tovhxz=pr4IEo#6Ghgk+4o z$l$p!AeE0}UvZ5;j2bmhs}i6>!_mzhnw4I}b&z<5bmAAn{n=rD%-gDd$H69Oz`m-k z_PNxzVaZZ(f=il6o#@(lDv!>N5KN}Gk{8nbQ$0Q#tGFQ%E}>=SqXSb)i)RXt*p^ui zJH>nNZS+|LOCK1>9>KzzcqoIAj?39muUyCqEb~N$4}n-}OTWZfNJB|F@bcDZ$d+r& z0KH&+#F49Iv=iM-S_;MIYUdVyI+xtB`RkS?w)r+ry{GQw9_g>{aQHCiF%apQ+u-Tm zYg}#6Uixg;&SU>OSmTd&!2jpl0e{a()xMqC)hXvqlH{`>#UBB_IBtO{``z`xNq}R*s&}2<@Mz`2UM({dtN=4 zq52JD{R+Niz6d@Qmfa^E{zSet3?lYO1_sV^m@JFn!WQ?Xpq!KZfH?5~Lc z4A4}Hu8o=s6vmL+=v6k%boSR#qjI>o6(piX8z7tb$hU#Jp;MPInY`nOH&@|o@K#U6 zgi)YTQD8jfjXzRv@^;wE`+W8?8X-mB_+#Y9Zg*|(WG8;Pa)j3e%Hd(RV_`z`JLe$B z442drrxw1y%H1O2fU54&=H-ao>YOk9%>y#}8H5luquohG`yPDhp(eM5Z`i!=r>Tw$ zRS?1L#SV+2{YLsmj%P$iI_V)S#KJ%Tx=fb=t)pol-OqS1N+5zL%^mWBP^3k;Yn+7c zq8SBYQKsZ&AL+}iHQ<*(6bcQ!PI~x}UOB*|uN=Ps9_R9Vp2PXamK`j1_c|O?)&R#_ zaVR`%ZND7IX1*(tdZ@@+#oVBMYuc%BW`9fIa#kr%-!WEChzgY3vBY>n(fPqkrR$6L zUw3<1Tp`ppyWyG<4jgoz9x^Mzned1dtHM?Jhn9!NKXuj0skJSox& zadrz8df_TBXNlugZs@9dW)Jb6^Z^I2_hUS0LfQU^U?%8L@pD<%QvFr$?M16b*6!=M zGqoWoI^=6%LNN)*3EQWPp?bj7cc?@j2kS`W@}aR&Y%_};+T?+~Ob#_3y8K;=A)xI9 z9)hW4UcP+Wh%&78y0$2IWL$KG+Je+tFWHzTZ{9fiq$}0f z?`k`2k+MQ_wS%%Foe*wrYhPKY zJ4>uoJ-ml%dIxg5K~p zpEZIb&A#x>x~J16-G%XU?-Lo4X`&`HmP$kekMO8i$$A7}!F((ew@hRW1zZ_rC0Z8U z&-8tOjJA5<63$Mbl#rVFw%t#jQ4navhhvy zzd=2ts*~5@2p7Zz)^Qg_=(HBT-qW$Uwyng#KfW54_;oC z_2|8tfhozIs0N%ExYMXKvW!HT7z(9}lnn%apvFF$y9IJBRHhUenX zvXq1RbKchE3RjKSMbfBY#UcIB_>tiG`z3zL{KH!J3!|mdKHw`@+fs0-F?ZU&lzh3T z_1PwE{%#L%20P!-zOMt^9GeI7WnGFA%JhxOHd&r2-IHpgvBTl&IgEzhE9hIlYjyy# z&&9%vC6mzRTj(Q~AGufQuyfH=aLM64g|V4gJBr$)cRFPebp$oQ3xGQg*b^g}JTx0G zxQ9$ZRsy;>?;UKE%llvKy?0bo+rKaBmaS|+42bj+6)Dn1x=6OVg$*c3??gnTi4G|FJ#=Ya+_wE?yo-y8iW1K&X z5Ex|5wdS1P@BDs0g(O3Hf^q5W(1i9TRR)Xoyfi3gzFJqLeSbqp;|a@f@6Rr~BT^$z zPGOZKUTK!xukkjhuEyN59OI^&h1~93#GIG#zgz8{XmI^tt;-7O;AdiTHyx7ttZ}Am z(Rz=Cs0%_4CnlB@$@CDD+izlI!YW@G?+b@#t95()9&)uLCGV6;H@hQ5pp|AJIMcz#`>5#}%OB9knE3Z19nt?qk$I>JF;sdq8+4d(OunNBmI>Jg-z@Yy7T{F#~7D@L~?&;U#H8q6an_h)ZeP;}~XTxI7AFv+u;C1)8(EVpC z`UB->Z+gGY@0&4v+i04f+GyfHHTkhuMUkF<)k1J|`C^a$!}kM?`59DtU{8BkbNvgg zU`kc9V9YZg^E?$qa+%e2JzR4d)2@u#bH?RxW5^E6mt| za*3s1mQuY-jo3AOT2zl44+B8z=_##_a4Yh6oVu1k|oMKYv-koQ7 zTG<>Q>kKxQ(3>|Wlua9ogC@FH{R}B&`0P+y$q@yeodWD`HYz~D@Pq96;5}pZ;lcoF z;{G>f_eXQrlF)99f@GIA>@a{UtsAO&V!dzSq3@j68GUtwk7k}0>ea_65A>F#k6q{q zPiowGl)|G@m-?sL_w(y?`L)GA-DwXuT2uB(r$me_G%XKhx}~?5DbtP2byL~*B6fV5 zb`pTRuI$W7k4ZAE65UYM8y&o7H2n3K2TG152Ma{mxNc36d;EekM={^|TFmWUH<8R8 z)txDWL^b^&yPPKjr3*IK_g@S#4>TYt_z8GmsB_0RblOre16s0@mKT6K%IC|0J*9S) zQZD6!^%;LY`%vVo_bfZdF}vyMgZnC62_}gCApSq#bs^ z0fP`WS~8`W>kR5v_$=L~1}!9o9%8I{mPyS&uq#p2?i|h6^XO0zFe2(}FE1WBAO0Z1 z>afaauC(c;h*!@BfLA1t!C!hux`xzE=<|nIscn6Jx1K3BTf-7hI)eaFk}FUX)f%&|JwcXB8v_USSmesRXN zJo#>U3Q=5YEKlJg_RlfbFcbRcDly8LDyF65(obImaq5SspVdYx6)--lP}5Q9T^YZi znCJ7RaPe}E!=I=ByF%(Oyx+;2O%ppy95DZm{f=DS`tN@DKb)ffo7Uq0@FxC0So7>Z zQ78Y2I{9~_PX0ABF^20(939;7d<%aUUIe>;=NSMQpqR+6`2!Up7F``WBj;wFC+evh z&V`F{m2cSF!wK9A&uY8ZpJK`=?wQ#Pc?CD`kP{BgxoPh}!3W>zOs2gfWg&mn!d_qj z-LS^W6>^hH*Pnyqq!M##JLec&{*M_8fnXwKABPo#Eohh6;# zjYw_zl|-gX5g#27tDw1ZKe41oz9QmarQ+bw(bNy>pR)WfhKaTlG#$)lNa6e^RzWL#YZx{F|2~XQDBEI_b1VA;0 z|5p%l^-#1@y8tseh`;9%3hcnLQ3)-rp||nKqa!>8%m&k$a1B4zv48CPmQWTS*P<|{ zd(g+9hCdjraQH#r;pj>jZ+7GMO-Y4|D_`@hwn!1f^8Y3+os_Vxpx`}#@)dk1)eo?zp$Qx-jy}z zT|tC&J&$<=Cd-mH@;+2=-PeZq{{5D0KdLp*Bq6gra4EL5XJdTDh-e#}2qziQ6^{Ik zAtlulra<{-pT8SgIHbUaI)Aag>Hzz&MX$4ZIFLpcRs*V0w?$bX!Y!i{CZ)_g^nyGZO z2#4CqCj<6RJz~w5xr1gTt&KspJaKYn^H=^Ag}42N*p#a^5Z|VDk3S>XCmg$>TKU}im};LD9^{1ryxZtPeXYnf!VJ~#G%Rjpw+yL<4`gsxj3jaO=AR&FK9wM)m(6e^j!ozO${D-*bKhrwD%gGn4uc(_|(FJze6p@bX5Bic{a1epEC(#|Zzi=)c`jJ<6$2$T)u)!%NXnQw<}Hy1IM|iyI$l; zWHnZec`7?4Fxw4Gh|THpQdSw)mb9(JRiww9t}L5doR$DyzBYdrF6rJ8VlSG{}%!dk{38Iu~@axcu&{ z?K2GCv%@;pfSDw1dPUXIRq$XnW5I@4w>{{8mS3h%cw~FbDy%j+Lq)GlC5ggNsj?AG zyOFh5;h>5=%B|Mq;K53crG5GO>dT124BP4tIiEjNI*VK{1&OjkQBI7LlZsS7Z64NB zry+Wg;+Q2i4{T<+ps4`50hD<@-WX3i>pbl1ALUmy8OYP=7&VP%45#2lo4*R+L#AuL zG}7?%L z6e-$D+N#oP9?9n}Og&Re51zoB>pn2Za!pr2i1w$rn(mS7F&ohu)SY!!?;O~a@wDxo zXI zhH84*`>*H}S3&jzlX6EwGnzKfJwT+b5#+J4fk#7?ipkVRtjCzez5svEPNgmkrJK;0 zTT87;lI1GZo}tePDB7NrdmFwpXrjAfsVfqfYD!&M!%8hce_lPSyy_u-)2&%TNd zCd}nC5n(l}VI>mfGLe~G;8Ixl~A zXsV)xWA@9w{p?tF%v@Y&){pmY!ThP}WmNsjo3bC^G#&TupIr+Q$lu6oY@8H+g#dp? zpD6fe*IVl@&_!M{`)AkawZrW86aVZw`Zbx%KJSO#o((HNu7!ZuX^HPV+_9hcH@=~_ z%T^xi0JlDaXXw#uOOP$l=&FQc$!ad|lR^4rP$BB&|yVJE-w&JPX-7 zZT1Li>@vYl`*(W@bt}k0!wGbjVX|iPt$DFv@8j>e#!pw=0Y+tB2xEBey}r<^h4HUn z_=>gZemRTAb1{@rou=h3Bh$HZceV0AZ(%9YY5nz6T4KUyv@SkLq>Sc;nr!x54nMf! zP$FRz9mjSp@YVTem*b`>S@rGafQ@dHrh`VaWtT47NW(xRRQoq-PWuI|}SO#UK%Qqj*oHZc&5NGBGWdvbOh zx>Lf5@#nFM!hN$VtCxyD0@#`d+GY2aAax*AsIbf#R&IpbK@fwYv3cKv*({)p*& zHRXbBvlFI6{#Fh|dsqB=`;z>lvqHunimmJ`6Nu>vf` z{6T0aMem?dzpZ9b$UjcGMUxVd+vDLrpToa%3+4KGui*^vA-%%LxpKwdAMe*@Sac%= zxJqyg69~MV8ms+=95b*s1dc02rI)}_pQK@yAcm8 z{&LB`d;{CB-B)*UiMr@IYrrSOA6br;bU%3PnBF71rNW5Z*OMQYui^=awHr_wPw*VL ze`k2_<`n>+2%So9QRzVde9s;@m^nk*jX}^`yKE(b$Lr5koel|o{u#Z$U6wYejsAc= zc7s9Mk3o67Yd_6GGhH$+jWvhZx=l1`A}Kc&%1UR2QS{yy`C+pSJefEC+_Py#o3o3x zQ3;)PPrN?VS|cvJ?_HGA@K(!H$dgQ}n<~n)%dy~3pfXOlzwo|cYe0$vOE~;Rl2%~R zNcYv;hHH5QWskcHHPtR%6k#t~uyeI7zk?22Km0(?)wZ#?%4AYfOafNR#UwjVa+0=m zx9hUm9uO=TI4ZJrf-82b2j4(iqq4HuRFJJ+!clkz@F-3lH{|}kd%kI9P8rh9GSyeU zL&uB3>TaXhv#3=4`F*zKnBeZ|YvVF1iCq6+Acpl8Ux{h8g|25Xr1bl}3`t$wp_Y?9 zcmyOpE+%ZN|F$hDaqe7!9m$#}-k>p)(qLFPoyK;ZNbPSlCH-P8fA&D5n)FpMvHSgd z3Q6nNY|!zWMql?>Bj`lw%NvCcNo%uzSr#}9={OxWKFe2hNmp)pVN1!mSn00grknvv zzw%BsqRRJpme4~w&{_JnfmJpD=^A)^hwz;)r~uo^{7z(J{#ptLu^V;m>x_T*2JFyf zvhXa|zb1H20J@gH?Gjge1@ddD)D1Ksi?-vLB(cvOGN98~HMKI0yYKr9^AdKS3ce5D za;f`qE}@r&68FKN)VqiGF4^*@4<3)KuF(u|=w+_ZH`b0&t%}AGdr}j&t@@jds>KLWbGlAe2xoH=2!V&Jwu5%x7f~sq5xPUu&0=sdw-pn99SfU(v$PMCJD~ z#|F`CD=NM!g{dHYIkvEB{K5|Dy8i`xwpBy_sa8$(SpU_&>jLB3w#k=@IOi z6?!GZ|FmU~&Oq?Os4;VZb&%*}KcMbF96WZB!qE9v>6G7CmO?#~6Fjj^<_V90`Bsku zuT97<(u^$|I4_iUun6m5fBE&x%h9qiuIO zz&q+hCQ_?s=dTWv_TWo5QjHC6E;xvu5wG?eReE@(t$nZBYv8XaYI@xy7elY0*Igvg zdx57kI|NPuQ%whZ4xIB*KUokCsC1qnOY$!E78}opdyD?HJ=llNA?(2fyN+l^X{uAr z1fZmp=UempVF$W*<^yZmE99E(?jW;6Eald+pzZk#Y_N8+f7__(nL0NKN&b5d1LY2) z50?q6VKkqM^4>Z$Jc!lkj} zTnp2xL|H`M6+N9`**uzBPw{yhW&K7CY^4t~qb#Za^Px)ga=jW+ z_{M;XUG`L52RIuWqI^-X2LDg+s^Ft%yg>_wWDjY^g(u*^vZlCn;Zt!YV>5Q|al*5n z0cOf$?ea1ElNh$*t1^T;+;DojD;#mDqz`Ss>YFjgq;oAPv_^J|vn7{N*XROXO z*{8g~WzhJ>T8EU{ltdt9vT!mfr7|f?#4lIXs0xEM_s!~G@mY@Wv@>{Pw0RdCmWJqQln1u2=oO z4x+T0XLVXN{u+we1DpE;eLfy*V}?7<7MpDOpe(x0fqv+L?ee-T-ArBMY|5E~eh({r zxz6FP%MR%(so~q1d)1=KjjJ#kno26>@yC$I@tx+Vz~Px0o)9>Z?ZLB>xT3Gva)9me z9M_L(#IG#{*VuB-VqKuCWeiPOHl>nLMWj7z!gQa9pO8i3qaKca{M}7#&CHvLl9mjBT#|p-7vA+{mUZ;SuhZ1JpQeA zH!9WaS9J#Bz~{nnplOk~Wd44JsvuPdWl=*D^9s{jZyIJcdm&s{8ZRvjLW`ZdM&Epb zs{}q7_XNjhXheoPer9a6Pwa8voIOTy>gK2!6}oDSh$dFYQBC(63R@Ub$ND4A_p0>z z9Dh)(l)K=_a%5o!OurWU!E)xxm;hR_yIbBKdNuuq)o7Wt%k>tIJKd&Tdf^XmehYIw zFJXAHynt3;a8>Zcj^sPsDQ@IAiZ(oWmN{MtCf?3JyL!#xJ&f8wp57eRhBY`$Z5>;~ z2dz6;HIEV)j@#cl^n(A6tPaq7xL`s*_SF34nUD`%Q^acK)AF3*#5tF1ugCM>^Yn4$ zzxg;h716n7Zmms@iZ&aui45kZeqh@;QGif{`uRYVmbeFB@r0ZJAL|4# zHwwXbULXe&66PBvlbT}D^zUl`{LoprSRTVfCc8(+w_DasuAs%RF_GInX-|VLk5C1w zV7xU8ow0_EW~ul5_40hRVY2B-daW$QA(+<_opbSthH`M~^^%xEKWWd(h4+-p0kno9 z@}7Kp!x}+aO{%boX)9lH3HmU^s@A4lObkpKh~Un}w5YibS6(u0Cz({HV2X&pBoeKa z`)TUH9d1 z@3$Zlp}|3g1pWv97kAgV7s0C`a07nY9s`tQL|915Al@1B`kuCBdtffWIoiaZa?3UY z_bZ%6%Mj`@`JSxB{Uxdbb^{eB%w`u)ay&|Y|Mza)x>qkJh7%$)fv zhNef%Z~2?j&Qr8W?rx#7Chsy8oC1yd`AsFo#&SKqCGt&=U43yK-vtkJ81>W2v-*P^ zsS{mqFYC^liE)NVM}I1={_oOz|6`@`|C6TkpER9+FHPsab^w3*kq=_xFJP&j#=Rj! z_B(rTp{H-ra9Mcf~v3eEJE4$T>CZV7`*`Tp^(lW_-SZ0{)LIc5Bg(&Oy~>C+`Vtre&veUqsb!~dC;%B`ykl zxt!|<1b&0rAX|=`>JVOS*}k5_xyE}7Shg|Myv4cipjy`W>{U7UG}Ju0amlj66Klr$ zQceR}op7*C!dqRBNqD$-`%bcCf^z<~eZTC8%d6Dq3o?ICh1+#P$l0rGlYTOokRmBHY?yR^Q_=9y|~-7Bl(UzJS4UZ9gD zmn8t5A)b4>6jOGZY^&Sr0ez|Vsa=-5PZMDm<~=$!SllJ@=1Z!-)S;z&u6)=38;sWs z4^MDhasA}RkocE`bpeh!c0YXB?;tR^WuY0wJKWtHl@~teweVCX^k&kg7NsP{$M4O0 zemr^fFP61p=|z5e=(-_DyV3a0Ln!~hzt&)-S+)%RbqhxI>7)2bG&mPgUfXY4d(^l6 z-iTRc`~ehA+C`%;uQwArpz&6QbK-$P`Pu<`$V0v##XEIm+BftJlHndSC%=<{OBAhq=TDi=sN5Osl%oxby=$?MGWTrV@aclbb%w+M`&r>Vc-pLx{jzLdW7 z!BjY;eEN!QG#wo`ZBTaWfJt*};zG_5#c@ux*Gg1Ukdps|$jS3{l zxIMb{1Gf*Wm+cNA-@FnUogVai4i^K_eP!;S&54n2j|-4z<5s3?kBp6Kc=pCt-yFwnGLLZ>nDA5p`wc+bO{hy|%4^1l`$z zJVtQkv~^nk1OMnS#F_tDKkolxupqz{U(4DBH!?_TNgPM+PlKMF*Gy6hvJu~l1Xkog zeECuNPx0l(9^^i(9sTS6K-BcjvBl!D<@sroo-%)b_PWQ#`jHURuT5xg_vv1Z`-tOGh9UrV1&03sxQ@zt;VAH zlSrwxZ`Yu2LE|}xwH1?fnpso(qwjW&u-l5R+7?x?-bCG3NX<%ZumV{ljI%ya>+L7LfRVAzmTQ%yyh|C|6{NF@r3^e zulxVg+e^?_?g4&w=uQbLkOUhI1N1D|;igeE!%lJ*@8CNwn&Sv({{hYSkP>miaM|O} zbBEH{FWyB`!2to;Gi}!TYcpo08UMzQ`j+I-BxoCF|wI4R>s;L|GI;ZVto4<;+Ath&} z7HKCY9%qeEkTZuxVLa(``o+4#CC*T4B?ONsWbqxWV#hGj17 ztLS4{r5q9I6TZ=PgJ|SPGq?`7=?C41PgB;H**+%}qImItK z#s@i6N($a9ax62lX2h3%3xDw4Px5K$tRLoW5Bd^jvj-;#YeJMBv`D+d4I;*cKHrtK z8xz-KcDP+2l(=jr4~5=o5_%o%)aLr`Y_PvidB980FZGX8e>-$rt*W$&^r+-2c`D{= zV9{ko4@nIPG0Dta)fbN4%wT(>Nj>EQyy8stRjYVk_AAl3`DR`G!6ntQcW*bUrk?xe zU3n2}wBvichqf^*uX^U4Q+U)ez2e*%p+74fGw-`w_?2fZpm@Tz5F(YQ4-BX-5@5Bj z)C(r;ncn%|?!xNAK^J=^VWAb#3YMkw>eRagEMxVs>j1E_X zXWkr>zN|j+r2AWyZ#`U9bug^NIjVpTXJ@q~>37|-$o-TP5cb7V&gr*{y)-EM$(?S8 zH;mUA&{iHY5h{|k(hu9HMAW?Rsd`xC6lF}=UVqMa<9nlPbDN793g{ns< zb1gX{yuZBp(E(T0<)s|rWx5Nv~DVwDdh$bnNiwdibo7uXl+7Knr{dZVY)J_A{V zd0~c$5%!c!SoxU@)t+LtiwP!S>55Vo8w$UX(Bhr5+9_0*+LUgFu0}&M*8IrRRTq-` z^8-06PPNWuxmL|8$Bb>BE*u-W(0Ac&3r+&xIZlSGhbN=yz;dW`m0b-V;}~KUU;$Pd zU7Jcsk_qJ=MePBO;2!iG1wDw=E$ocq4>pr$wPe$3Po90>ex_w)HlT1Qqc&(emfc!9 zTv}{s)zL5#ReO2*`^L4QsEsvd0%3zxd7uN2o`C;d!FY;D;>GKp$GtXost zc{w(xdbD$_^Xg>9Sf@#SnF-W&*?peRO%Wvq0b+#PJo((HOq++&j`yC*MPA~^q2T}~j?T<`NG=%Krj`$1aHVe_2B zh`&^M4rCe}3WN0Gu0MV0Mgsn$Wra(6BlXVV$k_3uXqgEDB5=hbf z?&4`r$&h*2FGt~Lzwi3=0B6coe8cxbxs7|$g;lL(?L42btTe&X9nv_p9~Fo>UC^?Y zixPbldWAfeDLb~_N}9#rjLg`5+e{W*Z4qop#ft!{K@q++{|;7YKQW4_Ey4R6+2nnP zo+dBz4jnX|*IbEJX8E-co-U7Hbu}A_srLM{@R~@9-KSgSjG6o*i|}pxyl(F(HB>X3W_kkW#Z%?V7+2=%*xZ{o;@** zH(Z-MP~oF9m8EfYW7hlljthT*xmdB0%`Y~NE`WU5>8$5ZnCpAhs}bungOUuJEc(abPH)4Gy4y+{%Qs&J z7JiS{o8c^EuSa!4t{-K*=>sM8jJY*DQRHEfj-a|6 zSYwce@SQ3O22b6Wz-O0!>eu zKxL_Uk_$-qq{kbbIee!*o|cDGgtJ((R02QfoR4(jL?EqNRu+XZY|{?gNrG7#psH=A z2p~#b(+pXUzfCXDvi4!-DRb%;?XN`wBPIh4Tg)5=Z+a{mIqa&(srx^)8hXCpEzhCz zV&~Ny;%{Tc(OJxWW(dBUlQjmC0tsDLz57~Jbg`4^;u?$=wQ|!)orfCxUSl_2CM8Jw zSs6KHR^J(D8+xY558O4{oLw`KbZPoACyGOQxi@AjuE(w{KE9NdCSaedFRj*$mv`L!&Y&_In#-p3&d{|FEQh9RUVC?6cUJvj`X7JFJ^AQ8 zet5@rrrm|3*2S?%!DP+Gq9@TaxA1}eYq41xuZc|yY%Hm89NTe0z(QI^m!hAVdCVbK z;9^jF`0?l2Pd~TaQ0r?b@-p1-XJ=_NxB7^JqQxuh0+Yaz*GE18uIL%ENqeoFa}D<% zJdtk%K{H8o5UQD&+;NnvHrjmQROD;1Zk!pb_&89KCT~89}*r{eX4WFPz}dBmfjsCyBwYEX162yxRi||?nVz&2 zvwwbaE?9CO4k2v4m!90BKFz2)?UJi%V}_t-!Qc?d6W{~4^fm-MQ?F~oM}at`K8wh9 z=2&!-MFZ6N6S4Z?6`(P#O+6*v1YAN_FO+iCS&k*Y!DTdq2Z)2qH!}-LKhKNlq4ovJ zoLjsPT+{qPS@gikU^mN81rrF`hUd9`PF7OaMt)`exZsIy>UJ)1NE`^C+J7WN!%Gdm zW)(roxhmz9+%qhn#?Ew2KhLUrP^RLd+uu0g>}QLN&&6KY_lFtz=JgB{c=}uA)tqo% zucUeNK1W3lF-y+**Y7)H{RPkhr{7V1!o|uz5q5RxfN10eo=_?Zaz#$A@&sl$mbfWH zLd(S36vrJ->)dv}YZ}YYJ^(kYAb-Q2VflgM@-3vu^3B%nfadvRlgv_gRvn zey)X>=`hL!)if4|r21j1Vsg~1h0QU!=4lxkjdC$XPIoIQZhPIR4+XzG7t>{2+i$sL z=~VUN8H=l*yBJx$u$gxxQ82?q&N89*QoV7kyH$%eVm(~n7}Zo4h09_|a-X--vN#4{ zbKSbv?&gh(NAZ6^b7MkzC6L@V?s*>K_T0m-J2tG_oAdAypeLdaMf3Jw0+Y8Ndk`)N z454c$&&!(nu{a*^rI$04ZXZ|wnhx!af(v%;Jb9FnKhcWQgrgZAsN_6@o453$OXQ$h zc4hd%O5f09yG=}a@(q2o6ymTOFidH!xK z9=AQW99crgGgbIyCaqix@6B`r%`T*vk*&dEXa63TySnYEB}z`&&l^o=;<%_mohBc| z<;sfjCzKaO#~UiWi>-`cADx`#%f#fOWgIw^cJ|?$Bq_+-psbWN{h^;B%ziLE-1Ai^ z5Xg3RNMWw>7=!wVgYfsBX=)NE_19}^wKgwI#5KvIJmI*=cPfwkiLm|3%CRGD)_;-G z886Y^<=K9E%MPRzDhr9YDw6PLU)YmG`oLFhhTL@%lBU{FhKXLoB)#IYij*C1tY_h& zAU*Y02QV)B5E zyH1v{ckz5uk^oNeM%zW8e!D+#1JQokh zV&@SSnA802twD;1l4#oi%aR2?ab%8<;KT5TuzSDt;#Z>6`Poy>hd}w`e_O)iKiJyt z0cibeOa99xxse<^oP;`vx=I2r$qlALk9mVU5omwyzg&`OUmM^?Yq*#d*KG(uzW!Oj zP$A?pUeSVO69-(9L(D=9(5*Wqvb`^xU*AQrh1j@Q;F1iT?R>8MFBv%3;Z20tjgrHi zw4Wx)L$6zG)9Btp{bBHMZP<6j;-ngGL^fyfAdciBy3Y|uT30dG3wl=spsK8sz$G~u zbOa4m`f)E3%X2RNl!DWd!ns2|zAJqMWpy$0vSyQS6)0_3dt;3h@6k36E4muIMB7*iXIqU)RBol#Lrg zwrKaf=40@unn`1k;G6*T7zDHufGi@k*vfqM6?R4bf>Y&cz9-i;4NCy;N*?kB2Q<(^ z%e{l}(+a8|xWx+d7u%XSA#2XlQSbP_$AIE5u-Vcef4;+;2lE4L+Wxk@)D>XWq#p_^ za=i33R92(;d5q88! zfp1gLJY=oCeF)zQBCZU1Y6N`NOCU#=;lUFC;pY#;zcJ7#E*um5H^Tbeftz6A4J z@B~kE7(Pd)?}AAz)H?Yxc-xPJ4*?80ywA>~^+JnDR8u==zHS#jlmG>1aUYklrT^LW zssLCg4{n?)K&`N zH%wa3cggWWvvQ^oa5`okycyE-B+xe|q|E(U7JbKlWyy2m=t!iHa_ZcNO~<>Nb|WEQ z-pE!3AnOTWkwb|pV|w$zskRT6uC=})Ui|4k))3x=rdOcT0&O8D?6Y_W@QB{38Oo_9 za$sG_Df7Qy*L;TlJalsxS74_Sq@(W4odxya+NgLbMq ze!+&0Xbv>Y1fmb+&ifYlw5zAWQS-219!4z+ywsFMasI9|C`hj z@?{2oYWjD*UrK!(fEZfrb? zNGIlYJ{BZFGraw7J9VSet1^;Y@fpTn3AH}rrol0s3oxEe?XwAc%LjL?{tn0Pd38lP zk3V(KnOcKUwE(_ZoG{2ppMlE*u`K=jJ8uk=9@;g3yJfm*KB#S?!w4G&<#IoH8Ww0S z@Cu+zbXv!;%!U3F?ULJnNcNmhVI;S#?Am$#Q<{(Vcb4F^t#1`?V!q|e>*uFAWfib2 zI~>OQ(w{k_3!PfvCoDCzm^3r{;V7!h+E~=$p^^sY2=HRTU$KP`0f^cSwsyA77*7WG z)|;Qzo7uFO8qlGHjoAd!^HgnGZ+6vkAai&fIG|fNWCct`(?W${uhD#;#2D!}s4JIK zRgYx;rkE+^lW`b`HPYT3p6y>bD&Eo2S}CO=lI5PbYg>aGX>tHsFI3n~fdRB%I{ zfO#1XFYJKrLBNH3i~u-r2Y8@J)}_5uxdZAA0xDc}@}5izKNIVKa1dR4O;ob= zUq3}qK^^^J{NjbubqChZ6Y(u@A_kX|^Z*;n!oQh^uZF@7NGO$yocOm<*ny<(5l=z| zWD#{h&mN2mIf`2d{O#qY>e= zR?S81firlmix1OVIkKFSlb6((U`ScxKVIi43MH|44r~^y0=-$+FPyMO8Bp>5=oNeJyuX=OLjO*2M|H-)@sf!Wplsy* zB<~^{TU4Dth4;U4W3!(y#^%+$OVOd^V2zXIZAHt%aR5*W_y}^rHwD68d#By`}D=_#RIJ# zY_nwroS%|oWO6EPh3#`HT?$q{{m77O6ie=Qels#4ey!TLztXeq)-v)K(uLNbL;KS5|x&CUNkQVL@s(vXRbxC`YN#kFOqh)xRT9$D|A*I$N?RL+X zhoBM63^o7z#N1)laHbLjjWvH=GsgNcPx*SY)ITqIj^i81pa|AD;n-n)ZaM;bLD4>#{p$*Y}M-Niq$i}tZt^0R?t}RDBDQQX_=-Ixl^5p+>=1@ zsp!fT)T{Ww5o~w(fI5PwHK8D@VffbkT0I>xMc_%k*eM~?4+6$iRyrOk%u3{A^nx9V zE(_ta+EqYZjt))1anR(9&@gCxHKCe&$i^)uuFbr`BJYBA?yQa1R(ik2KrBgx9eErA zyUC-O*qg;N(N4z9wU64q_`9Wr+1{&G7R`D7$f>Dl(Mx8}SMi|>56{1Fa>~_D*lvl* zSu&e;{3t^bdDcL#=BSQR+WMM;Id0}Tbz9(wgZHIv9Kg`IBJtYh1H>3bYhnu z%R12aZ0G?Xk+(NsPE>~V-uy{*#-SCah0L9iz15XzeBo@n3hy82!2H#=N5=o`a)q-| z46rxaa-vNJPh?fy3ayEC;aJO}rMK`kx6~BoCpn@1>#q}#f&Ig4m75V3OblKvM&exd zr(u!$P__QBm|Ijel#6Rp{1cnBnhKNVkK%TeVuz@z4Rrpr)ogFyJmiF&l$qxVhHwyE zkDUxHGc{YtgC`OD6INr6NQN-FsAR?8+aUh#P(kkR8CM}}i3)^f56BR?iWi=~v|tn~ zSf9OFcy};N)4k{Gr}`=U#Bphx!%l4bzDDV*qBex4jO*CeE%DJv{o9p1~VPhuosvBUR~P@4766AK3M6OpQcPow~>0I1L0d{L!-Z2RlO5 z`z2g<%4~9-G?cYZP!}=RP)at}|~zrb5`z+nlHH5XRIybC+DTw?iul7A)>@fI@a4YdErIP2{TuwT)61t)r!8P z6P*+6kRFo8sMoRBph0^&b~wrG`Hx09rrMyh;f$8Mx@Q08tPrJNXzfL{$*@s|t8=E> z-O{=-U|Wu_IrB{NdtTS5>*SOhzi!$|>tMUXUe8X43jQHiar9X1qtv_MMOT>mn8JD2 zZkO(e4U?^^@lQ4@X5$eAwd;d3g%iPE-^`-<61Ve&_~Vw?WXCQ#H21zYd-|53PWf;j zpk#eoT(~gJe$_m@6vq<+nbfZ!-A(F)H`#br7jE2^V`vF!ci{}7wvpa9WY}XK$_9yu z4`9E&3=+VY>2S3|1itmWcjx23UlZH9uC7}6_e@UPMxD`lb96W6xndWy`d#zOThDuFvmW1pUOm z5xJEYH*Pr<&>PbAD*Cb$N-H%gtsfP7kF4(35@iPtZ_I*N{1n~%J^BD2QfjG^{QyU-jV`$f-fwU}+G5=>6KdPG1M@xf2WQtaNPiRyl<$S~0O^5U7jYX!=JMY1_Bh>bB@gRL? zQTtANd!kdHSoxCEo1teRcLws5!-;YIjh_4J0xd^sP5Z^NCo+}o*;U6&B#a%GPn(G4 zl%+j#3RJZc$KOicCs_BZ$mw6Nh+q!I-8(HOVzE@XvGfy&GWrvUB7LUZm}(;yKcVr& zsE?K7C&5&G>qhjC9JVwl3~U;C8EjrSMSTAZk0b!f9c^Mbc^)j`Yba6NkWnLYiY>&F-@sS-

WGa9 z^%5&;H8!Z_RIg+Th3Tyu8F=xHPo?Q8yfF*Sl~JtWhBT!yme!Af%vuXBV@0zkCUCm& zSq3r_6@ZZD>W-O4KemOwcTGFj^I;XCrMM`3`b~q%5G5Wd=E>YSW~?pMurM^KNn0SG zL=1|i;P)+ORlTi__pr!h?bl0`mXgqe*T%*D|1 zWQB4WI=SKE6<6E1QP;5b4~x(ZUGxYTI~0>J(3Yk=98p9$p6*-V=UPKJmZLgaZLQSF z3~O7#B)@bZ*%-C8U)ObA(bGv&p}b40O^U2`s$mhod4SP?fOHql8$v=(_^EfiVQ_Fc zYVzg2L0k=$@ihTxXPZi&Dko&(q%hNSLm{TYChGIk^MQpe_p6sD#FiD*py$p0H_C$x zJ^S@ktXK;9^x*>mD5Zyf)zT@Ht5P?s1qx_oDdh>&msag&wHdhr49K<6^jk4jLHU`a zfOD-$RE0CU?ffY#*HrtsrR^GmUY<)L^^&?z#iH?b6v8Uga1XjrvN|c@PuixOe1M_D zof28o6mJxs`myvWdo4M;q}(fPf2dgfIyszpp6a&kr3xcyiN zEjW~q`)3TW4D>U(32Whn@Li)Vevk$GhAaYhCNi9c?<&>{UCu-KIi35)dhYho$|!v! z>bPsFe!16BRMq8=H>)2_IrR*cQQCVVXJ0JG;-s>wM$3~blfp|B;^~WYHOD@G_n7k~ z3Q-FKD;D3yRr~BStQxy)%%)uhbNwY&ZTF~&byu}zg&xzeIiim*L&3_xRe-?01g_Hu z*Z-2RMu9?Y#<0Q9FiRK5K#c?7Qs7Lzbq6X^%GBa{pk07_e2HRnQ)A0}P^lG~3J{^v zL8;?}b$yz}hbG5DkFVo_z z%KjMx^``G@A6}hmo?s`M5gTwld16{`?BDoFUw6r)wQcn2Hw@eutYpXzDVZLeGt1RK z<4A6-a;m*UOia@MFYLX0TuklTHyr7T!iYB74njyqiuSY6MG_5?RBDj4laZvMX_j53 zQXxdta3!HKZDmt4)wGe+&{jpWL#akH(zG-)%X?1l&GUPn`}w`U>%Fi0b8lXM^r6ykT5hpwa zoTATUBe|08h`$*BHR>s2Cb^Sk%Q=6@($3kS(f{^9`c+2$BgBty#*F>&gz;Du5HBN$ zBt3Oh{WphyOIuNzH!vp{o!?P@%kWS~EKzl|U$dzF{iw4TlCe5RUvZU2!Q+WPtiIWv~+ma#GM)qde>2`P4fsfg@Zyr?B*p%}Na zX0K>;)sSxx(a`IjscG%5%0_h;WR!AaQ9ahtd6^gCSTp{YEKfvdQia6`UPz^i<$a-vI zia_wD4#nqPOSip( z+hlx(5e1>R>RIO<;9cX((@HB3Y7Um~Tv@z(=&+SWf1l^QIqFVJUm%&Y==MH(x+F{k zKMTM4L@e=o7+&A(%89Vu%bok12z(*pWza+GVw^1Wq9yf{R# zxI$nT&JDGy!7mi+#D3GKtH0e7q&JhEk7VLB5)W?piX$N19p3Z#4rV(!_JzJL3l}iEkqTg&o=EmMC23zZ{&b^GwH|W6vwCyH0|D7& zDR;47O|X6xx%rQ@;yc&t_4R_y-LvMlxbJIzSTE5*r?PX zOs&_TIKhb_BA4}I?PThL2HqmkQn7vJd@(a#7G(BTbQwvjQ~up0h_ZQ{a=gCg*>hVj zGuEEmE&CD=&ujLLN@n@_>h7b`FCXdMI@C@KZGI9JSM(}o{~mdUsl8X@uB9y><@Kc! zqn5H_No&3FOaD|}*zO?yu4TJ#_2HW;?grZ1<{0$Z)E_Jny-R*P|K;w@>$90RKQ*xn?Sy~QQc2Iq0k&M728Up^r=Cid3Ep$RNx{ZN(0m@uAC`uW5VH5=tYB z%QSi%ZL2iHZmUiL3G;S>hXO}Bg9>m;Bmx3{^cX$hM^~dM{OCGtfG>g2q8x=E{dE@b zqpt%9>x3Wu`-C4I?2pj6W*{hYei@)462B24g&W z!i~NixY6f5081lqqf;i_=mS*XMpv^0ZgedWk&7G0(G+fULxRGMerF?aqo2u{aHDS+ z`RPV~pm3uvdHt&ayk8^mU*r>-)BuD_+!eFQ{mo6_ij&UUwac7YOXzxMd}`g2Pf*tm zo5=;=z5TMd8IST~Z(~09N!;bv@fd0ywz#vJ-F23$6PE)LkrK}t8NnE`%jWVnllF+E zDxY#-dPsV6;e&U4A4eZCE`D{iUroBOKY8lmh$OG^kSK4L$b{~BnA?9M)snJ|%j8N3 z-I3^QW_O6hMSiIy$?;VyVU<$nSRr) zXopTitL#|IZ}4NS^UC9((Pxjt2XTl$5iuScis0GFp&=tvgvJFv(?D3axb}n3`(UQw zYW);!zw)+oF@f}wl(=sC8QE(49S@syB(v*n@4o!J7W??hq&lRBZ*m&1?-_V9B}GQ$ z4Xb*RX?4CBB$C%i(+GZia_7mhWOQDL1g|afIA;l$HfjLH9zHy4V3*gflx(kQB;1Ms z`MV9Xoz~8L34ECw-S@hm@;9EIH))E|Z2;JOtM6N@HLrEZ%qP3Fu=w$%2HZmJ*VSJL zQ;Ij9f3<1wa`en2OD2E(J4%v3g7|O5dBec^DO2V_2@EB1J+M!)K~PVx3UY}HXtZS2 zx^!$0$m=EzsHU!bL!vnF9ZaNz4dtz+Np?~vr#8}Z^!?odZQH9k8InZ(M(_Ll42MVh zd;`Hba^ieoZ-cC8eKPY5e~RNJ0ZNall{;BCgtb{LFV-F16Jg-0K4?+deD?hb{-*6kbI7pD0%2NUF|0f^;peK||o;v8&V+P8hw ztsA>=<#HGh+K~8yh1eghfeKSnM&Sf3h_g{7M6>NI$KEHa?wUx4wnvc;&E#Q-FNgsI zd1P-0hCvLV(^+QA~?*g<*$qSWobpV#j1=2jwiT zxj^Q4wi3=Y(7!A-5KKG$>gYSX#o!(X)_2!6ImJ~6yJE1p?KK2-Twav(o2$V)O)$!f zxV`$kA@?nZ3D@L=IUX4MU|1vg=!8 z{Wc|UDq6`YwzI8(oLN#ZpCBM*^p-Xv7DT ztrFZ1WKG|5lFBI>^?cxFDT^LSE9Q?%hd5S5agV;Evqa-&8iBQH^K&GKj<(Y5N7&9VeZu z5!~6aTC?y-+t08;FOy9Wg z+%;eE+q@MskM(VpyXJe%eYQQfL65Vew)(E`ZxIjNVwUb-*>)!}TT8g|$*nbuHO)1d z`Jp(M)?-^OT;CiE#Wy;?`Ie}^6}P#iq-Xc?V?v9qtwKRbVe8QH{H%oT=9e!!=H};z zhVqlu$;Qb`vVPy-URQDQZTe`Tb$Y2O-&`x$gYH}MaYt!z`@sSC++BBjGB#f|S?U@N z#LDk}T-@{Y5f~ zFACAgHQxKvXaDaNt!V#C&HrDb75`O5EB^MF_8JGG z=*6aq1(FsKZHUeF*;OkW3?al))x;)os7fHZyl{*CuI_CjI3`>CQE;w1F&w_{W6WN} zTB$0Xguv_G$V_kdXU-K#Xr#OSMg}dG?^PXJ@>&~9S}miEnhl{XtwO&(|B9LP1!0PT2e}et#Am`{6WmyK3o5vIpX%}c^W5QNZqQL6Zfle7l zYEFH@(Jfyj#!3z%+gHCSs%VQ~Pa@~Kc7`%m$kxYpd*&a;8zXl=o98E$-Hh-9XcVhV zotL5QeX~KfosXS!3eKm+l)Gesl=gT7aa35Z#%#L|J7wY>wy z9f(=SW$h}tGwp=Ix)^%$i|}%7ZCYuKVAX|2^|fvgGSn}1hF#j3XFm; zUoeh(xp^s&kFWxJ7UmQH$ao_ z&+f6P22y*S<80bTbn%CGju#Z>hd(esXw$08gRb$-XS+Tfu!^vaI z8gJCDJL=i6dAl20Vk(Py58MDsRgbo@^}4*JpYmu@Z4uRxCf zUV$8cYv2CI{-#<(6YU^&AB5ftBva*&prET2E;EQclS(=^1jVFp-04hR(A(b{2u&aI z>^Y}Lt1%?9Y!7n{ue|?ge5u+PBv8f5^mL^b&f-o~jYSgEyo#cRG3ZNJl`&7Y>+-nR z>r)5O2GV<9V1A(UIwU_F*8K!f|PndTSYcAd0S*yBuz|2HDDoO2tu48o)3ca z7Q+%%2}g3gP@W=J{LlbT_R>Q@Aj}fNU7ZXnj?lhTws$e)#pd7OX{ht2|=Wxz>^apRJTBq>GU4hUN+(djWl7kkIxgS#_dlY) z#ZIt4I{(_;MHN)zK2hWiA;&rOoT-!29})eq^lmlhU1en3x+rGLk#;_3H{>KZ&yBBti#n=02`Y z<#00bE#RdC59lWZ619R>EC8}Vv>uu$3H+@_CEdTzJ7y-a= zFqZNhn70eAvSnydo1Xb*NkXF}50Jro_`-dkgyAfO{n%pM&{e68Oc%K!-6&g!fP6`acU|Tz#G5x2zqseK zPFfj|K00A87WUqV6x@7n$DT~Vcb1#p-_sjXrcupUzUe|@ShU6wOrH}vbszBTsEC*{ zExKf(yp+0(oLXl~XJO9w&PC$6a2&CSQ?tlBkv{D$BCXDx(=PK7Tm6olzFl{@;Sort z^9)XA%ncgzc($X8j`)7+kp^r|-LacQu>;v!d|**XCyUsvBiSjx3I`G2>PQ}lHs*4% z7vQ24;X3*r&mf&7Nxsf5#7%s*x?AkFMsgb@nuF=~`4>;No z;TL9@S)3CrS#ulP>i=i=vR}!7{yQfFlFbKr?Max+BfVv!(^gA$WUjyg8OV%9Pfy<;SfT0JnBOg!WH*U3M)q0U)sLQDD^l-`L4mn7Ay;FOsZ~?nU`=smh zAW~j;(8*85Io<>2?^u4h!D||{KHSC*mHwZp#QqMj2LE{dm)f1b0P|mfxzb13A^}Mi z`O4bfZm+`+#ngIHQ}`rJ#KPnG>rzu6S3_KF@6^swj2cqU(o6JeFnJp>td68Qx0N_V zZhrceeyqv0?o;!Zx%c8Pt%Vw;C}yt0f;|FP5hzw7%;q-y8{Tl29XIkE#SGqCbfTb# zvI(Kn5~xedFFY{6o93(5f!7Y|oOPOE^UcX~&(p8B!OFEBi!4$d0ygJ#z{i=wktaa` zrWugblqgcIq^s^hKCxbY-C9<|(eH5S*FzO zcN5&p3KI-t5|QJ%tmwNkhjx1{-c$aSX?&EmyKmG?O8t-?V}Z<~y_y~Ud=6EhPRYUZ5JuQE^U9uD*Rh$6a1$7S7%-S z<^1~P{Q5t>vI)R^5_*&fG8Jm(N>TxDY$jY)36&mxF5=tKfQSjPgpn(HF6gXbcJ+9- zQ8E9gAd7#J#C z3uRR32-wvrJ*lX=Lx^+>wG}mQ#p}r?A&t@yXK@yvS}f><3lMLvYnq_T{s2XX`gqzy zm(}mE(?Oc8xMvR8SG?|adZm8uIR0i}cB9_rN$3@#I>Z%_5R*=!h4go_iNsr;I0OY^ zdzAU?d)q(_@&#u}OO7~>RFpC*xaj;0`{PBNVzN_>!P$?X1~V7-8PxW}oQSd2Tp+FNHN zKOe7$nAT)s5sfUQsA}ys(@;}X1FP;L=&9EjN;+IRWBq5nfG5i*p_NqZ>tnE5zpY7( z%r2+rGaf7Iy>zeN*63_)2ZUb(6!YvEo@2NA8Nr=xg)^a5W*VF^HqoV}M3P zU;R+>41YCqtNhL=Hjj%wAH_YA2Mo~mR7VGNMPc(^_el!4O@*AU6Tbd8qHrX6#;H2L zia6k0;_PI#)kNWf8r-_1gn$Ni1K#!UVQ23vG7tuL2KY0o4go&K2O5?_6j`CVH&M-c z*#f5OD4@20mRy0WKLPJbJYeGwL3$JjctlhsCbDHT<>)jaQIr7(!ohIJyA#+h_yF2` zQ^*M_LYv2T$yJ#A-7@=zCu8V>wDDm%z3xWjRM;PW01a^3QRXGFb~tFq@Msxc!f2E# z4{&ylXW-#{DJhTjHfkBr0Ii}L!};Iu8JnSXXtQLl+LK|DJZqFe@;_hAlcyDrC+nx( z9JH6ozP!6to^tcLoJ$}1Cb#_4Bl*|g0R8xD(@Xsd)cNJ`QkM0@+_99?#J7NY&IId9 zw^s*ysaTb#Qv;vzY9HFp+ehP>T(AUrkI5%Z>*pFp$gsK;N6D(5%uOXXvtPIz@KQp} z7-|%4BwM;cwp)a$96m!;>p&M!+>s2v3r3A>T*bha>F8Z6&KsC{d|r@|skgqm-7(ts zj@3yUqkK#pTg-5!N&6*@E``sYpI4TjU~=PN(MDVhQ?v>A^4}jDyz>XrCwYYA-us*$ zCsY-m>3G*L$LDwLFReV7B~5%XzoGzS`c}{JPjHIu_@PwyY=FRTI^O%ndmT8<#jeFG zG(s++t>)BvY=MW6B;AtuVkm-i4xU^d&Bv%VnJ#m})p%FvMFT?KSvwK4n>H(gH2w3< zf}wjun#oRT7bc-cu%f`op(b2C2{F~8*M$Z*yU3M6@3!tpEDGHt{G2_;?r?6gRp$#W0y>+`@)lKXFO-+$k#RK9gdet=&Av{AO#X^F>CcyrBRI6=vync+}V!0k+;AVq5ryG(*Q#jpj7>`t3E` zLTFbV@BXfFh6RSG?w#7jU(%knw0aLxZZ`e6&YUx~ZXikbL8Hey8ZPNaBv;HeKW;9? zpFEnhDZKSoR+H=YTO@LWvk4ZEC;W#2QE$jmIv2zWcp!JAt0{B9%1;vHM^%l-or(wz zN41@5*>mN&)hF+j^1w-%69jc-;>E)W$H?|_zv}K8eLeXlNW>CGm4r;1-7z4U2IN}B z-k)6r%p{rJ*`M0G0WVg`1~E0(_NwEBRZnq+%_~73PBWGOU&?w1T@eIF*=vVh-#%Wk z=XX)jczsER?yl_hM?|Z9XYd$CwL9xRd?&SuS05Qs=$$-#3TY|eY|b${XN2f1x` zHt%`4EadXb&8sfcdbMP-XzM>kRrp)t!e8|p{Nu0HN&W>y{{o_w&|NThn9!9{1D9Zc zVs>Swi3l4Lk)jP|F1JtUE;{o=X=4k;CfjEaBz{_sU;absFl9Djdf>SKIV+vcPKL3V zlR`DD3h?)zp62oF>LXgvWc$>ID^;TaMRgchgj4MSvNoj}AzeCfk6=ZKX>Vh#03MB6t{-UR8IGO07=$YW7l+?TtFW>KO( z?#9Fww4C1VSFEL4^)*;Pnr>b3N8*ReL`>z^@+5x5TG6<<0pxH<0L{W7qm+pZ~;FT$zt^yfHi(Jx5>^4tnm>^8=Juz z|M-sIy>76^GXPCJ*|YTXn%0Uiqd*5C-k4+Lzlg&cI-ZT40&y^|$9QS?}vKYhQ?<$Y59{CPx@%{VHo!HScry zdZQMGv8vZqJ<8}Jy(4`Otn1V7W=-w=dO6`R)C9oOS&`_pG%1N%2bK2KkP@&Z^od9? zl}<&1J{?5Wc_J3fSP={6`H1nrSm{>b%9XSMz;* zJl8?vA=0(z6~+`Yhub+_loKP1K#Tjn!|_k8W(ihe2kLbXFzQXJkI?n}pIiv&coG-) z^_CVPAgQTn9P$8CM9YY~B^R@nS>`rlFOmJK%AHwV5}f`mf2JGD{iP{ypU2?!=}xOp z2v%zO5mMFT^SAqEs12=Ji-cM5lVlMB# z)IS-(9m=u_nERET%vg*7Tta0j5-B?BW93sI!VQT<>MXB~m+)El2DG2yOF_!Zv|`yh z@x5Z6aQ&yLhay~HWFv{87pQkG{aBiF$OGi$K+C6yr}4u4O6avRI?X|94zm>Ct`+PD zbu|kJ=*!M>WszxBr>{-H*Yea*u~ohHSIT&buIW7^cW2#}Q_%YR z|GT5^ADGA(to#nu6o4(Uh>EygkS0-;Eh38UQ86+X1-)`AQ<5XW$`;Z@k7PRe2ve#o zB}wgeqxI-p@WV#i&=>7cG5SWr92(@947JE};tT-$NJF&-KbeaYVun&iIZ4*Ub5JSu z0RTmY*rRFol^;0W*zM!Zm_g=_@!<||;gv~8bXwtf(@Z#I=JXyzdD_)ao@qq@HR*ag z{h;03a49Zybc@3lry|L%s}8ghAUrEe{qX72hpSHs^&m!M(XdRLjT$+N$G{G!4D)UP z+BIBlDuRiMCQNgZtso)0X(W^-&S8Nj0mNxPT>U^~)kSRwmpdhKpp6yvm*i;@!q8{N zd&y1Q&~P>hFj^O-uwLDqV6;90$3|ID+L51>)_gf}@Hd79Co3Uv&w9PLJfsG|oU3V1=EGtc2f{h(9pLaaBSz`s8aB5HoW2 zJd>NaT|NwiS9rQVl)^V4X*e`dWE${Q(r{Z1oX1sl;3_R+DW|NjT9ERE?9G1#?&l2# zhz5m~KMNRwg?3IBD8Xi>qJ5`PO8__jf$FV+jlU---3L`T>Lk-x098n%$g_SZJrxNA zB{c8|?|>1RIOb;ox=ce26gYv{D+**u&H?Ad2AMI8GF{EDP812(kE($h$0)Un%Y zMQ~LMC{fbP={X(*^cX;W55nC;0fuomMSAnuPq7ayCv6AEpU+HWeuA9SW4Aq3&hE;U zJA~gF`TL z8qvNXfpk-fTvzl6YNPJ?ZK6jaK&Q>X4-3}`mcs88wAA@D>37j5Xyo&oQ_CQkAvi@% zaV9*MZbBr)F(`nP3`j8>$$C&d>t1kRd;;^UHqhz+- z{fQD?VDL8Vlrjy_rHVR8#d`u!luU)%Fsz>=dz>lL2*nTA5fTBi^i-tG-^7;7TIY

ThP6Nr0Z=@@kxB?vU~l&`I#7cM!WCj?naJ7L5w2c6o?jO63$ouPM#I=>J!;7A6H z>HOJ;!{GZ5mUHxEFzCb6w!>&!`vS1_jbhR#+6VqSC49$vBu$cdaneOMuD?i?uKY@@ zd1&g`o}(*Wr*510dQZbuMsvOlfH;>t<`)XaSkVVhp&WXtq5}3fbSla`E|A;?Z#c7S z;^J3mWN@t(7R6`+EO0&UeVP2>2QV+Vk7A*30X64QT3g?;i4hdDk%c>=-y2IFgSMSD z(gNeh&-0WWM-B9pxH8cDs(@0Zi8l$HgShcMsuVjGpAhrtKynCe2ALx{hIsJbPv!@P ze{{ho@d*i$&yKY?J^6mr`LO4z%>t7^)h(d&PejUQd*X-BFb_3=PufAmh5Ok_!0$tI zy)aRS5fZYHFXzM&AD7dS=x$#N z2JZhtQ~nK-83E&9%42LeS5=5`hGjT;M(Yoyb@GP?ekkE@+*Tujx8%cB=Ii8~pr?q@zES zEGC8(oGs5#f$~TZU6}AgY4|NgD+N>*d5ZfAJ@~nfl%Q2-!TmR+K9KaYC>WBf@((fl z#M38wT*7`E2X0B|p;^|@v zlReU;_~INIC01DQHTvPY{h1swuKES2ogtMl@;O~82PZQUN{wSkCX%J{tOJJ({~+qh zEMi>zPQ6NNy*g!dZ{FGuOYOH!bW$(mzYe8K=F4w$K)fxMVy$@1D)3=6DCUOInA99I z3K!JZ$ARyGPh|Cjx9IDd14pQMq7$g{TdL#Cw~m4kP)V_0e+QTgh!?<{qlkKMeyP zKNHkIL?FedC<=i1B1H+AMH;s6-qgKn0VrVId6tBN!8lG?Yj*kxw%dwz$Iv+Tcx7p4kLIwUL}Bn!6)`nTnsa%Tl&HNa$BaA`u$eaYt>=E@AF30(?oPs zed!;RBZ(}mO1Y;H96)?szHi?h^6sYNID3

fGR7pUz#2RAR^ z2RTnF0denGGKhP(H=-8uLVp_(c63v0uPEZ)t>Qx>qCnhxF_Ql+Tg<6L4XZ>s2?hXV z`JhXUh~pa=Da&_r-=)3no4aME-!&eHjpcp8z6>jM$W&kRYKh$U(E_q32zqzDkh|lK zHCY$;n}~zi|IqI!rEGwl{^SHN06;7#uj1{_BYUgJ-|LI$-@5qW(V;QJ)!7N7ZgEj{u|$EyTnleVpSgPKhn$s0XKh zjwJ1SP?bZDb9vT6I@n6j`pw0zU-;-_luL<<@Qb$1745YEB(a5p;4a)Ac`7rMmVhl} z*D*CPzp+JGf|&a1VNKcEXKx;n^35oEly#vq)5JSwOztKRN6gUKEn1D<6sx+l&VLdM z_FJbmv(T1J0oS#Z(Bf~y$SNyAbFP2DgSRMN2hIcQkp}Gd(aPHsw_B5CK+gi~9w~?e z^iG~O`T+WdOAZSd!DcIY6A<&VIum&SNQc9626k)qL&fWA4?{8!&Mi?f$8rF&Pz50gfYl3@dkKu_nGA=F->#QQ3x?l z-l3pvtE~Us;T&A(y$PDlDZSv-TG$+YfAULk_cZ}{vDRc@W`s*tYy$f(uKV#8YJm8~ z&x%`oHda(jupRcH6}FeaY)R(NZB@~0dgW^%&}x@auxf`(oC?6EzFwa7vHnA#qGCym z=~&QZcY96XG01=GGDxm2kUjaKPJPI3WAMJNQ>CG8Lci2G?1rrT2X;==M@&C0?q5~6Jle4GDEET<1nkE~0_nXpV zn&<#=hzxa5yRjv?pz=Ioj2Kkj%aYlb3G7Nm=Kym^U3n(*uMBxlY z=dN;*%mXb0=2Cb552ben$ObGL!#x49?@bKNWoE%e(DzbrY6lzTQw$WZM#6we`EsIF z=F&-^-%CkC;1T=cL(!SI?1uZ^_hh=F+Z&jO$$iD`JJ7Bq^b=jQ5p+AZjn?`7@I=kQ z{(T*^I|m9qKH9}6oDKsWetL)B2$Z~aqHb~Mv;xqy7PxE<{8WHFRj3%2;&&6nb`pcE zN<*hzcyzB%5-%@bw3ZrimX8^d9v-sc`4YPR_^@=$oatC^9H;{DzkgfxR2j@i#Z=pk z>qW#;s`C(7%Af$6JV$C;FJt1@oKo~pD>`i_ElNNT3UE?z9aT<|>quACe*+OA{u6xw z4n$M#5-P+Yl256?WKAThIH8I1TYmDXl71IOGc17UGoBX3Cko3HkIrC0~t@yD-=#VGOLF&00io^f(oNwqA zMHd5q6S6QkS{NyLmImH_p%;S%5X5r0@AZWO6##agwS~DCUux>JyPv+iFq@-S4yD<} zHy%O`oHCfjT(Z5?Nu{h<$qQ7Hg-)AHI{^nfgO_yO12q(lLw&M-#r&i}BBv5*ccjol zQGb0tI5RP+(^I~~wu<@%>c}+O@ndm7rt&km_v0}OaDI@jqP>Vd@pEN}fvfa8hsluS ztfGd=&nnq+>6b>{7yMM zQNl8X-kAdPlu*!8?x*3mbaWc{GN%ZU29b`$SAL#4dmwWrk{!XVaO2n46K4%@wD1dM z>!L6iBo^r>PPfOB{P@F~gI>O=_k9iL)G|yufjG9i-l5(ZnbYnRv}Jq0#td@M8EfTd zVcsS1jvMJZbu_JhP8UZT3e7**Dxj7wdS&FA6(CA~+0o~O=-zoD-MKaAJrXR~joiHV z^j$wqGvLg|-!$};V8Xw;HdMjK^4JMvOBQ>8Y{?I)3RfiBEAFgG>0$~}PEm9KlV)p+ zf=S~kz{KL0h{ob9X+nqK5!W~0hJ}WH+lz~($Mf&+{`@#IvKvP?3-!BpOm0=>I==yN zsv}Obrk@X^b$wTzPQF>W$1&3nr6&NfO_K(4?$TGq_5tljEW@nd5!wrO2ahu7XF<~U zT^}UmldD5E&q}*gGuI0linFWQ7bM-|YO&;1!IP7FUdNTt&sTM1Z=^XmZ@gtQ;A;Nq z!IEmL^1FZBOc?>>$|;6m?)sxWoWd0039&vuPG~A3pjyn+5yM@~T69uN1RLb6n=12@ z(6xhUa~Poa>z&b8B1{U!EpJ_4KdRkH$G;EHVcn~hV2Yc_X&$?_5zut>HI$7zZDfV4 zX4x<0!G>_&ca+FKytT&q72wp=dEyEiM7K=qZX7n&9ep&PC(BWB>k@$;+f=BXJ*m=ZZ+Pk4T~u>-uy_ZKC$03CM2u@lVPcO}^w;D!W@hRP#f2dgQFn zdwOf9&Z1javbCVW_00O#v9*6nVCqjB`2Wxif3>d2Ww1{DN-OEWU@((0L{!m&~9s<2LXVO_j z8B8cX)VLg|$r(D$NMc0?sKnm^YI3g}rn?)#0q3(emjp~jpR*7z#2{{=;yk_F6|r zCvN8Ff<3ai;w^6;fA8+xh8*jL&vCXrz2=?dU6VOQykRI$_xx>4f6IS}^UMY|Ah&{p z#tfMbA0Q2x)*XjrHeC87GO@BBPJ; zcIU0!RB142Empwc8Cvm8WIBVWdYoo@-pTgTIH8|(!@H1lWJGtEp-+9qP>B;->{c%s zF}uSMVJk_h-u-%ennm8hnTtzWxgIVV?pmjyw=gTy@U?6{PpDc=o!gREf}24mZfud+ z@q}}>o;Eus)OUPyXm9JW3{%46EsH9yW$X(j1%*tWacSN3ut$>0cfANe-_bs3yUtp)z3w~WuWChHFW%>SgGO%lDbKaccN^m6YvKV z(&1a(p^dDK~G`*=<#C6l=ynwIOyBh14e(Z`Ssy_?4gRX_LZaNMvdJ^cS@+OU0=h$ z5>jv8yu9lpga!L&8dD)?bi$;bqa&N~Pl86~z|TvslEa`CKLw5UM>D0EIvPgyFM>vu z9Dk{Y%!D{0Xf$TzBq0JpqkGcG?nBIC##QDz5^@c~tUBxf#ycB*4w7|?@A*_?yNCmX z@v>2v{(-XL7BGsUqvWCN3VIP*IbPO|&mqtTsXc=$B}Olql_KJK&LB1Dd?Q@QEUt*1 z{KyV8DNs(j*|mS=6WD(U&5Hf_X`Br~c4a0w<4>O;vZ+iCZT*K9@c&pO3G)}n|NnCT z{*QG3swv5U(S9bIS~HH5Xk0NRO-eHt_@Ea`i5N1ZM0m+ zRZ)W4zy6182GCV3VwWNThmKfZP|c&Y(lv;ar&P5T>Ituj#Lm2My!g9qOk5T2H{fL@ zpB2o^(RDI;(Wy5KoM|)Yq4&<$M*o3~_MV8U|Ngg9=CztFvHnARN;pU!~>D_%*49ix-Uj2s4 zr=ap@|4TI1Oo)#?1ril_R2o?$M1c@cE-RU{?Xi5464L4@J)RBlk3$HdPS0#OuH5PF z52c{~vh?GHIdEYAc+y8&Pf5o5`W+=Bj5mHKLLv;@a!Oeg$6H5IG!EE%u->wV0FD)F zkw-s&)E;gZJz;_bQ7dd?{jd4qbCa_G%1_NtVr|PGr5rS3XqTB3*KE(-=yvX@nCvxo zXoZPNLXPmr+gA>URxC)fjeAyMkqKzEU7UtVH ziMbf1iybAw2slluaq7Ls&W>V~2DQ)^D?2+EUAf}F2r4)Ij*K7q*PZu2ZI%Cv zV|IVx|NkTLe+6IchtiT^#1@tqw=1HikYzqVf<)BZJ66q@EPwO{4x~>BNnb3tCPxa6 zpUw4LCr)m3sW|&G-Qg%smk}rSin_mPO|fiCQS!9N?xLDW=mnZ+Gu#snFzqvO)JnKG zQs^EMcA(cp+180X)$Ky3l}~~?|u3H z573R<*>8!G9U^$hMmi5=6N@5GGuUh5MUK48(>(Qx9o?8>Tzt95NBjc;TetDI_OFj* zp@Jl*8oU`JW&f1&j)U$S+}*cfa{K=m7ayJ^FR}-b#0ktItk4ZW&b9=}STbWF6@Wkn z6lHD&M%aBgOyRp$fhSl=voI^HcEe7KMaHUXuT|xt$)-18XOPD&Xo0ByEIBo zvA{#Nh9jFj$S0kbnf(El6WM5}No|4+sf!S+?QLdexS>3XI~R)ST`DHT4IeI%awUc9z-0M7hztCE1y)ITd$a?;5;!X_P!|Vb zOZsYXu_t|Vc)%hh2UHz@09@r5iUZaT$&rxzYS zV;a5|`+C!{lxm9q`}zu(nW{jbS2$4lLGuV7u41q&Jqy553QT`{Uc&zZr% zI`7j-Vaz9Et7Z>8@@o-@;Y3U94Ce>dcAIxEF229JR1jNzwQTl9vi0IqcKY7A-h+X- z<&XB37e9-&OZ0y3nf2uH(=+}UO%Y*^y_@5*S>gu$BHN2EH!V1|@bd5RMxH*#$(BJ2 zFK?WB>5s|HUxwB{Tz2NwtF_yvEu56D#G@<~yYb?Zhev0aR)Va$_9_B1H|O{A+9S^&OzO1&kE+^v)bqSjwDUZSf=3r_ zWGZH!k*VHIimErliI`_CjqTU4yu`lDY;vTtm6h%+?<6OW=f%Y%pXQz4KT_7vT>19) z3fAWeiH6yS8WKw2+HnHkyYI9NBA(JO1&k~OoeZRQn^Ha~G zn}U^48<_hEN~V=YCV}%b5V=c{&GC>NlRu<{NLP>@CFSA|Cp~I7nzH3UMFfMS4+!ev z+4f6CDXeGo#7`o6yuB%(G#fFk_VT3*4BcE25;no!j+bJ9obhDx7F-_N38ulhQNpxS z(OboU4qFChin_>3HXCJ2%;guD-1}JV2u{iyv=by0=#gAOy2LC2v&tu&fOtZzt&UYE zJF7D6YGE}T)eNbStzsrk7a zVjxNnC1kFI&x0JEhO*g+JC}GK#ix3A%C?#$+n?ZJCkNW9Cd&4i#M^t&G|8uBE__31 zd&f<0@qCoi?)8l`sM{9sjFp8-Ik?`zdfz3=8De6Ry&>0Orl1D376-(Z>+*8fpBb#dn|2;!&g?&4M;zTR z*wy}JXlVYby+b31J&xn|aATEvo-&GxOVYMIe_mQza7O!S<~T^MYc~I}JIF}>?&aY0 zBM&J;(`UDd*B{$#n`u`~(`4w9zn&0MFk=16OpB+bs}gU*YUJlEbV263%9p9}yq4Aa z4f9tPKWzN8_daWV?>cY$hXu=3N!@qVzW3hOwXZAZ9rJef zmbNWnGXM?S5e$U3s3WE?2_2;am&GJ>_IV}!9M^1up5fQOrrRaZbSu)8G9j?Wx=@G4 z{0)8Y-Dj?q`WF0-ofhS9twE&$o8KK0Id$-)M++0Fv7o%LCmLa75AW4kc2^0Zuctmt zl0Kqk3LLT6(}@2p&2>18%&}9{r+KWP+QCg5k2(Vwm&AC9x{Q6&rpcy{4Hk^F)Ql?JMz(N{Mk;q2s`&kvvuj$8y%@e_p=BJVQ(p79R#;mnxc{*J zNfkfIvCQk~j-m&(kLn6$-a8aUkzFNLtNwO#oh|~{1GAIaO?%nx)7+|gM0dYi`>jra4ZGELfvtV}huS&$${t_r z6S^8WmJnJg&zt^;tHL5h-NXh|^m?mPy+4%U!kr!X)XG|Y>SrCzwtrs=>1&~Y-tw2C z&|iu||E~M_Z}V=@KUqQIFDT=GD3q~m#*;V26`G3Y*^{+T#x9TXyYq76ei9##jEg*O zz4>@lG-Ea2r%z(a2YIB&iPq7a^75@(FyM8Kl_AwiGH#qk7#zPl3>p0Rp}dBdNVdV< zP_^7_@AKnPN0qWOwkta!#gaA;>*J+1J(iF5i_h}#jYvRiT)J6*e>l}4$Mu_i%XFv4 zSmxLhIYX)gs)?rY9C&W+{vN6=>w84^tY8V#aev?$9&62jx4yX9$|KK2Z2y$Otn)*} z31RInxp5huw_fF2>T3^m{@EO3&w?A9V8lz92qU{WuVk%U3-(H^OqrL$ic%Z1^fY&= zYjCTpiv9|PwIO*Q>VrEaH&+z1Hs3=jN&G!Ijqf* zF5NxA0^kJ+w*rKQnQYA!VHiD)4RYb6LQEniF9h^C4!1gOj4?ph#~d38Yt%B)NkAGt z88i=LAmazx2T^MBUEHCU`+vEXJR6+o-W4=&SilPqP%3O!mj ztkt_i3wVKe4+Q=OpB)Q1bk3$+bYoE>v;>@I4}YluClUZa8kc0PxX)<<$6Lv!hI!}U zAGq@u9dWq)L(b6ohUn*X7ZmEL>0VxNpOHm-SKQgbCBh8cw6m z!$SJ4kf_{;N@F83VOR*C6W$xg&1r;wFj&)A)Uw5KJVQ2{YQSVPf6+1%*wz)tR%kAI zJwe=BNZMGCs9CT88SOdoo~qvo%3Fs4zqCmxGk$Gh-f~JN-}UCk;Jd@Qk36Kg*R|26 zHwWRyr(iH^A4z(#8E{7Fi4@_XJlD!#JrJcFW^NDK6iv{yyQ`*$0<1{EocQ2 z$jz>xB^RNzYz4wSN7&=Lu)Ip_w3u)}PG*$9hi2iPCNEtzb}fY_TA)Nq0TGU(&7$yO2f@94*Op6J>`!pTpJ(({7S64&<6iV>s_Ndp}7-d zxGKhx#DQny+9;K7L zV$5rFiP1*5xMeiR5S4UnX+3TlFu)j_h4iaBXjosR_jQ&nMuHO#2jA%n0~ zQp!d(0p8s)p4&bt2G9P;qNC)U?H-$NMX+spYT-Gr!mP%!U*z2lY;GVw|BTNYAPf_+ zjcNw*ENGU756Mg5g~D+1Ifx8{$7r@c9(=E=o$VlPmQs?ai{DSNi;){7g8XXMI`9c;nYJ6{<*;Dn?OO~kd&BYoe&7m<>2pL0S-iJ;cM(nAjHO&3 zqU(8tDup^sBZ>j;jacx4l}`dhxt1#G{C0tj77GF&VurenIf9>R@L@8hn9wsoBm?ve zwlP}-RmJ8MokII5a|2uJ3hf?ZhfgUem_Jau;`=D#9=7Y|uzX7J?@>w$RiCw{&Gj*F z9`kgg zMgWLl7MIHp;eOpzavz;}ev}VYU~3u$GkXYIJfiDebqym;upFtpQH*VqHRrmsIj0rb zRyN0I3$Pz`o>E{rXoZ`AN3&RlgmcCuN(sm6EcGkwGeYvm5|A}Yde)JswHsvVZSTKm zSqVdB^O^SSfN zv9V)IQ;GidPAj#b-H==XV-6BvfwYv2TZ6bH1X6RelyC;&h@HAc$k=nyv;LBniqNJ< zgXb!1Df2PXKuF1*fc|0Ixd=VcWkl+}08gg$*gH+tYwE#l; zW6n#GMTM(dZ^(sdQ%iY2OvPn*tcpTst6PW}oiX(J%z!ODE{Xbp$rOh#s8-vLkgMy? zm$Idh73c#^^T&ZpYmDKB7Rb!wDDqhxG_T!b4d|38XVDtW-T^o{xZ!2h&iClnq@TiMxrJk4}10foY&bu{Pt@7+XIfvhs(9+e`Tdb_^08e zw_z}vBX1;q0v_?#NfzKS0v=;?1*^dA{3Hy4d)2Bk@V7K}Cjesji`1)F6f7`Yy_PM8 zXR(d8Dhv$cDXGGl`8|rGgXf$kA(L=$4CN*MIF5VC1L?1 zQ=~L-nCbP7d_^vUki8IQ?+2(Tgg#i_S-x$A3DM?0&LQT(jSHx{m_t(>ndISvj4R_r zb6n}^wktXWmhYpP$@|k3Es}Kn@ZA9XhfeAetiGk+pawfLz&-0r*dpU1vhJJhH(u4Y zR$XK^>!1w6Kn$KR7PyfgzCWkN=zv&7tCvMpi#-;$Tb1N4K8d0H(AJwqyGTM#eMQi?DDd!+N;T z6^Ws0yT9x_R!hrfLxQ%8O}49$oQxuRQ1}Hy{ZF)x4=xqo=uB@@H-y=J9(^u{dPh_T z!QE87?_Y-vo>FgR?ZFsmebXcG4!JT+m@{ZeDvP}=Uz8_075WliAC7eQiP!d&-DKK% zUvYSXT`b))mHndlM0ZAbG~;%!7K;nQARFI?h?{7$zzT&EE3x6Ra{eOZ=>g0WwQh~C zIXQCtG(*9eHAJtaWlXAtihQ)f<+=ORJA=49M*h){D$nN12J$-KPX`~S0q8iUL_97v zAoM{hILjQ9p#dUURgZ|0>0cs(lV!6AD`v^IRN1C(MvTUw)lE2uuf4YVR>TzG!yC#; zIuVfJVN{&q?4d(CNBw%Za8Yv5W7JicHOk*uS*p7qTyr0|{Y@+o_LAerS-Rj4(-F`M zXJSIo;GcnQZjIMXVy|bdMc>s&Dvv8PjxAGz__t8~v55*tkvpTAmPlRRQ-N=gL-Ux{ zV&C|Zj2PPd;UE%faarjdlCYJ+woOcU1M=7n?3YsAG0>)({RdkN)q=;j_~itTL$`*H z3bRfMY!kNi`E#4{*lw5!n&sb#J0HI*6`NV+b5d#zz_DvM?6zm)k4N|&ACmh5R-KN~ zN>;gmFCEODPxM38<0--@7+WD8vj#`Oj~X5nDNKtew?yswETVbFWhG8}Rr z7YPlAPR4)HN}L=TMBy_YZl^;-m!>(_a+@f#k{QTg6Mb%<#L(qn6+1@_dtdXy!rO0IBr0{p9^{?}bJPq|5VO z(dM82DNJYTcd#~Kt-qIH-^17vNrOa>$=OEy3JlYK>m=H~Xf0@0%6{prVELescgIS; zt0H^Tddo43`k*-&BUmocAC8ay4e7%!dZUJ~9Ze%v7i+QK{(}Ubwiw@xfOmZbrXwaB z;0YkNU@$F}+mmr`IoT=n`QZnKeZjzm6T|sf zQD%(l0V6#+$3*Fr_AlgcE*EKyy^I=d2dZ|KK-F$wiFo(3s%N2H$2*U&bE0+Ke-L3V z;GGloG#y$dG> zmwocN6l&eSdhZS~$P#-`Pb=CP6RlJr0kDkyoIVqj2Laef5W>D8w+T1|w&FY*4J^iy z!aEgAxY=m1ie48Zp&s?E-6@ZGw(_R*6hxEJ>Y_Tu9Mu#p_|X)$sK zV`3lY3f)Dh5lNdU|LK_5X!Nhtz;c5AQ=BNOjrdGqzp++2OrqD_B35dq+e}F58>8Iy3+Gstf0C zm3&TtA@Z*O4Yd59h0c@z$wGF25v~5m60Mf$VBo+^eh%Uk7X#;T_4Y!uZujjBo%7L^cZTX1*cToj`{>eE$RalDVb+pIm%!vM zwkB;FT~Cpz=d-qG()r&Sej1bmO6MOPG4V=tJg(cz zdxD=g1Pu98vOElLZTj10n@3;ksyzSnjni#~kUhVLL-Im;{e{KVj|m`tIdAb-PA{G# zka*GleMhQSeWf*f$GQc|M5vI2t)|zcu0czgrzm4tNL+*L9^)(&{4E}=DDYDyVhWU@_?#_MRMeHII2OzR z`RBx|&G{{#o1a8_S9x(py&d+Wl~+L~FdGXd=2^6Z_(;hgXz8)FAOJpcmedmbt=1nD z!r1f=AO|+b9wkDx!rp`{fDFv(eN&LNhGl?#`|R0{kYG89bm7^q z9ro8Q?Ar1BAkcYDfg4wT(RzDKf;kRqu7);dI$@7(XQ1vqO>AfM#g&1`1oK+!vePoN5yAzT z{4AoSf6+23m+kIxr7XaV2K~`piHw*h?w@_0XlYJ?Z(RgF%NCZureXGj4P$l#mSeie zfGQC+x(`5k(^pJCg``n7gZ=VRm!yS~o)_h^n#u7&K4evlwi>9aN-9e`5eZ{N(du+Axpyh&&6 zYp;EBc%?E|wDaY}7_B)kXj53y$JaU&y2KnGbr8#1Qwf$lPj;WUkJPtF*VZb@XUSS@ z6Wjn3DK;c*>ZX}sI?A{vct%+N4Akg~($g%$a_V+=I4_WSjA6!fk2;teb-GbnmtC>t zV^+x=?N4H(cwht9|P)O)c2<(4T<3Sow2H4?K> z`Z@gY&1?e^D2Z35m4{1b7af&zQ~f#Vt|S{QzK@30Dj)N-gZj2shVKrD&>~A@qs8b& zfPZ>XF!yoQ=XUrx;;e1qOAQGkk!#qNWk_!4d|{Ni{5?z zB`xKcE>L+m>z|jUy*SCk?&EKkgzHMgasUIgQc8Wx{ zHd+3tI8#{7w^Z;2PcH$gdxLU}tETk*M-VjAwBbMYqYG}b1!=B8zzp0Pbj zNz=vGU_4;pyjtCozvZ{l9(R@ivR#M7lq=@?^O9oMHJNL_)=`Io2U)+`4L6YxdhZTq znW){FfhU?T)>2}(?*CPye`Ejlt-G}v0MkTH6T^Q5MytLCVfRPWjky1LEbto}OWbmR z@aX{W`y@z@Vlfy<h?S_Y-mA0C#()_(0Dab7xf%&mu_%|L}eBDB8CBt zH|*29Ba(+zi~KF}63oXle615nV=~oI%@g>jL|UfWh4hRSfZ}!VOu1lA1XVYymF zGgx%c-s+H#=K9;z)eey7vJQ1ty6(Bfv}}e*x#3a#U`B;Z+7}LoM12$JeTx=ax=S11 z4>wYTk=#^nJq@?1S9`NNWV7Es;$sH~cqxDhvR>{PF>H|KqvRRli?Gc|p1hSiaN27_ z-)Zk{;+^Pgm;@_UwUxUDI2wkCzwL2~a?PfS_oaS#@mgmnlAB3rVIX4Z7GdoJN+Af{ zI=As4%s|db0TiiPiMh9K8V)H*3zC2Wd+A&g91NB-z|!lOD|K z){hbWOOQ^wF>8BqSa*1IX-S%3A+~hT2c2E74Csqw%sn{bt)S@*TRo|KGQgM{K2@qK z2fKOMOae+QeEy7O^;tAXTAT+$GJF3|o8b_#OP)yhbm1-}qc+bRaU47Np^^mJ}Ttwmy;}XqjD9*5?&hccAZ=9jg{ET%DNU2VTo4b`M>j zWsU@>vcSa{xOdkQ?5KJlldige@>&W&OyT@~RYYBco=m%gks1gCG%c02{yH;N<$M4% zX+nfZ(V+)eiu}Dx!O~CPw$~PHI1sp6b(*%If9QL%4>$q)x%F@ZH-(Js;+e5UJ+@9S zW5)plq8Qj@Kdx|7)aBjnJA{=0(8GYRVSR^uZgHk5>$6;5Me(e+An(d5anBB1Tfv+R z$Ts?-)iPCCU-1UpHYeqO9wgU92`oz5X~d{6p#1Z;{AO9ynREy@T3x~DhfwtkLdv49Abx>sX-!|Ww1%L*es}D=+6_OB zkdh{#`tV0|l_bmKFgSi~Z4sd6QBn~UE~OS|ipnF_o`jEe)f(8W?RcTyI{CJu{KO0H zZESh-dT0*zRN-Kb8UfX_uM`|pra^l2Gq?*qAT0g;4~qHG^|_(vrYdLWI9O*))fpHD z)psGxcxg0ZK*(|}6HquQ}{Yl_)=)7odq$|q_!3~>E-J)9Ol zQ;>o9new@hG7faTjb9A`KM`%U(CkOi~W3ZT4z)$29Z{G0mZ{QLgbgAw{ zF=E#W-k=AeDISyo-Xs@#ihKY1AZIX^&%3 zd2LfqoVK8QfvHH!f<47SKjB#-TSMh8TgiRq>jCGfLaWL>bOU|ip@tvUmq`{RmRYfW zKI9#dIq`mf-kQv$->WZNx>=$L^TVdt@Bfb|&i~2!#(z=b|L>y2gUm65Xs2s?=&F5K zTEHc69ZsuK$0f4Y0@_EMh;FucyY(CYp?;^f%88<~@CpZusTUcZ&F(KU;){>o>l$_9 z9gfNGciT<+2)b)Yfvi&~s9hWggMjN8Jg~nP>CQ&f10-;^9pOpOcu68}zQ$cqN`@Vh zTRkv4wAXns)x)jg0FXUfn{nLs_bvc|%ngt3`=k4FX{9#N_}g#8<>wY0nC-h)zv}$o ze%rQj=E5YSIlqUxXw^(JLl#WcUA`SD^)0zE;%kv#nO#2gIp4S9Hoej2c~0@|h_&s# zvR6TUJ1fjvir<#E_r2b@xaNe&u61~S`uC(kNClv&VLIM!zmfYO=8<-idrWMqTbK>Y|!@qh5R4CYDXoH|(_Hws<`W^2#!> z>~>AqPwlgLuUiB+?u6v8$**(>{YdmTij=P0bqB4N^TgIq9BJ+N;Q@LgC?na4+<7_4 zB|}zjDXt8g_ry9xmor+)+Nf|g*jDqhCMsG4kine;+r1*m#3vwRT-E|`F83EDKw|iFD|bGuu*n%zC8L}iV&O2nfV%MDd-m6eIGa&U%-eu3hKxLx zLs_mQgO*$1beiIcB&L|^bJL^oOg<%ez|d_AYFiW{C*Pvi0y!eC>U+&SSk8s3CAevv zlAc3&hQT_*kb({JoGWs=In%O*Op1|YPSudLsq-TsJ-#jB+yffaDTNM5%7ip8&2{}a zYo`=SoG{~ew>uWB^t5fE@A`?G@$9+{ZidQN&^qG=K+D*X_~+`Vc0mf zT$bc<7`>|=kc@fCy@Nr2gM*Wmw0YQKwPNPD6axNJGXo~5OY#y;5ql@W=gxf3s@JHSf zF)5K)+{UsQ7&m3Y+v^_Ib$ECVMAA)}eq!4>Xl%iy=zzxX@<>|}w>6$!a63HT_M)<4 z!=^go+4m$+I)rdB7au||7^tMSCZnO^M|oqj)cYigtC(wogQ90lZvf1E(DhfkK6VZg zAG+7Y@=-wAxEtd9Ima$}cl8wpe>v!27>__kIIScizVPw@G>dtmbxgUNG`uSjWY`u= zT9beZ*zbIU0iGcfZY|V9OVTDm_g>rx-sGBb6Q*BFp*d<>5M6|&iGr3O#QmsKw?+iy zo|vqsPbdf5YP~i*+2GSrS@|QQqNJ|y+iN<19GuqIn0*2)`AXLD0j@FV9n?2N(g78# z-DB(ERtFKBDm3$0P>xw|yZ^8*)8IK}GglcZqS!F;p`H&HUI+Ozv&4mVDTQ(MwE+Qh z#zy5@nkcZ=Ac<~V03Q}&OJ=7imkC2?vzZ)U?EFB~dKbD4eiFN1c@PsRKWE|)qx?{$ zZ;(}j&v$LCH43@`GopJq0ICUNYa?lA%RM#$W`HYY{#K=D+@_9Lrq)`JfZUQH-;!S? z6Z&Ne8_Ug_J09A1OR`yZ;jU?kY&Vu`R|S6&dap2VUq3zWfG=XNMuKkT&HX4U-TLlDj<#9@ zl;;|t>d#|7Y7-2QQPdR&BB41@C~U#p&=@qMrj3~CaF%a+hiTUYlW=bZY{m@ThS*yP z&+_*xob_8@3^uv@UULpXa&JWBdj$scZDjm}ylK!LCRc4A{Z&qRB~e9Uu4-r2$}Uzw zC5wuYF9y@3dX6A(>mc-#5g^|vxx*9<>@OL!K`u7rfr5B~GlnCi#`st?C{T+u1rX>p zN(WWzG}VF?*yDkf928P5LbqK}j;DZR7mPz#-okX~T`?T1_iT?Udb)qmL*!Glz`m=( zuEzuVo<~aaIH+*D&@4Ij@rwPympXUijW@f&RcuYj8>( zg~EktBkEw5E2x_7aZk*=bZZfFpjl`Hnjezh5fKncqeR{58=9&S&ZXMJO}}B$N>Zkb z^kL#v<<0im1d5n$Qi7h_H1MJZ^$>=4)UMU~B*DI-fZkmw#DS5Y=wp#6&=5qCVqziO z2!SBQ>pjml#aDN_$c$t|kJ$I^yWzRuV*&zD=kbh?M`*!dx?p_-RUdnd!rQu8(e`wc z$tR?(1$dlIUb>=tVp5B^2FZcF_a@FOn9%Pl!wg!5H49-=K5)=dytoV+0j(KmoIEcb zcT-5LBG|LN3L<^#F<4p3KPs1znV~^QagLA|L0#M~V#aqg`%}w~$3KG7%U#}t26fc# zCY>P+;l`Un#EYJ~mTPtt&3yu<>Ac08&o!G`|7y&3hwoE2| z9;qroTPskRoYM+hZv9?n>?%7GysQCywLB5o*h3rDZxT0KSS`&c z8EXC1xSC0o?T@+Yi<%eK^A1Vw{-&+y2l#377NPu-tuf|@avI!mVGX^7WQEh)%O@TX z?)YYmqwpHfr0Ww;l{N+~mX?6QqG+R9zlTUMQNg}le9ur@o{|L8h1SBQk6!ZlTNPoG zzj&-sX2UxHTD}GPfrc&SjHang0q@QhpfXd`B6RyeTmwn!)j~J+*pK>m3{cWNAcyqW z1yp;MC$e4S9k=7_qG43|hJ}*TS9e(zyvYV# zYeUSTxqbL#_apdK%pa4GMa0^+zM%fvu7bC1bq{N_rhw)rILT+8*8D7#Uo1m$;su+6 z$p)-B(#_3+c__qTP+gcsjkb1$t%Hr5L8A8Rr0sm#Ox8LqHoRo$7h+vc@x#DveFuaO zPC1|Hoay^3AST`iO?5-C;*pU2DjBH6X7+GxS)oOM!=j6p1wOvI2bFN(5Rj2D=}m?x zWnWBoa5tD0g8{Ek+;76~Y2SKbc)XdgZDYAAE8;(dJV2?BD?NN~zSc?9qzUES5^QPc zDCpO1(%h^@t5rW^MG8aTzGlHJE@(C}0MM@>&tkU!_AcNtWD_1lLTDnHcDQsf%c7~H~ur_OJTi@3Fr>$1k>T}WeCiETAv$8~` zeJ$qAZr_lPW2TK$bdhW()2+d3VSV*`fiA*J5< zNA@~9-8g;c`p%CKyo>=azdJ2EMstuym|(Ft^A2DsyaU@UiYi-O zlq+320E6Nts6oJ7yf(fEJY|FA`aFCE_#}>eaMVElJn|QmG3Kb8BUp|H)YLraJ06Jy zDA+u+Li6_6Wjc0QG#i$0l4M`UG1F(;Aic@59e;Cfvd|hA%3qLPxe;c0qk*Wkst?W0 zRxxbZJ?!P~q{OBCWXpKCfp>N)x8$ZY(ErN$l}>5dg#8ZX$jJLm8t(o!zN0+be|cz1 zMXwfgfB5fQlz$y9M*kWw`pdBV-v)~QQMl)yEvNt2c+vmV@uKN=HS|1>Ah~IhgbF{R z1JZ>VvRPq*?PuX|Z+&tKY{ zfg|6_BKdi((A;s>qOu78X0$v$ElW33p1GsA&N#`8Mnlu zYd`I)K653fFM+cEUec^)-(TmhNWV_L4^^d&Mypq`MX+AEK$rMcHk-8wp(!pG-Z)+^ zxxoxF=*!Zara!8$sd|V~&hhtG_7C?{(4NfDjtR{gue@erO;7Du!bE}Nj`n~(?k6vo z_)8NLD1Q=O{tFhE3C+Xeo_B1li*I*#+YZl`=8yJYZ=AX8NN)J;zm*@~Y8D}}zp(n| z;@lt1Oo85~NX=vgU}r#j(g`-{b)-MNDROAi({6w(rW+71;Ob|Rg+1}$sGoz;K*!{) z&t|l|?(o^}c&B{B`gDOS_Ne#h@nCAb@}S+NZ5iK?M~acn^6@jmP+l^uPt0VkZg0&- zKFJ3~<8xRpNYIs!3!GVQ@+bOY(bTt(a$U|Hl06MQY&MlL@>TBCR6y3RpS?Y@zp*X) z4|li_Zicilnv$33aU2nf0k`k-8MZwWYVOxYZv$suFW}4zrhi-lWF#hM2hq{SZ~@x3 zL|faW^;3YVxToY4hYu#)Z2j0hysm^6nTn;YQ`j#4g~?015A>ozV>0w0-S~Ir7cdQd z9NzO{^`Y3g|Eg|ln$)ZbuulC`-InEij0V(geb1HXMJth-Rj&^1GBBq}759IRe|2c< z%5af%ceBss6`g+!{pPwzvg-uLb^W&EPX?CQUt>d>JfZvwe4jKLJfNW~(*XWDtrVS8 zJFYuytKC&QEfj6(0feIUhVy&U@XgaC%@C$qELh$q|1`-l2y^2etLsp7-c;-MD)?Rj zWv?7G+!jVLB?nZqA4n-4yQguIubzG?hm7S2aO0i*yFk3I>HFw~^`vuCJ*1xqpX@uM z$ice9+lMEQn>IZiv2Bkz>VH{J)bK0QEfqU$?qhxNtGuhpQ2d82`u{;hNc*2G5&ai^@qaFTQR_PCsRVPX zA6*^^Vx2jNbeyBLc z*Dj~eiUU(OloBen{G-O4YVBPzT;Y7l z%0VogLA_JXTJexy|4^#xk^c4?lPE46sNl!>6)r-#X~7R;z1J;Hgzt3yUC`ivA9vLK zGzMrbtJwW?M8pOCldN39f^N8qpb(wk$*O50QZ?=E@X59P8`>vl14{hW!=}z}t1wuR z!p*4oG4^Um`jD-256hll`MmBczfOK8#23V=XM>JN9NIBZf}2rV$aL5S zZ{7raNMdBljAMRg5OyQdBkDC4MRG~1n?Ia77^rZ#k3Cr@Z4Wq>uA^BP_77+{IJc+G zUVh|gVbw|==o9S2r&ZK6Pk<#cxf&Lefm%C|0KHl~Edg5H3MfN+6yrwWzy_@AT|v@+mRNV>N9BEmT*n0K6RJOEXHG@6gPB|b>=5Nqh> zSyMT*3#hNpl@O-nt~^Y6`~kP@`+Hp@33S~wUM6q?Lnb1+FD;_jrGH(E_8P?{HFc;S zTnpKAURL}(;3v%kBI;xXX~Ug) zK?#orX=&bBqvX(hbY*4vb*{!$a~$~0ra#=g@U#66w{&vr0tNnwdU-+x!{q~AcwPp5; z)v`N^yKl{rdw{7v+b*Av{;Yq$_TVU%qn%+?@h*NS-jYTVXMpoeh94CD8* zI!c4kJ4ep~U8yMTLF#ajt=YzTwH}L87g$ zEp}`dT`$b9YTlTFV-&Xg2bZLEf76+`{hu_i|LR|@_^W^QU-qxs{<)!F7of+b^l3we zlQOYv4huBqQ$Zg&MFJoWzebJDzZo(FYLeB_tz2V|MVd1jfX3*Th@l8I1^-I;FNO>% z0#=m(Lxvk_l9U0zq}Ep+hLBT2=Rs1w|Z!jpgY#9F+Z;VWks=dP?q~q`A7=2tM z8~m*q3CaM}fdk7p=G%c? z%M_$B`rU-hYMRDhvZ~ijmDb6wJ~x%u!!DlW-c5+MoMc3fva9JArIPWgYjLXEe!1_0 zp1y*^r>nJDMbh!cCR+_2(|G->!v&xSs3;F&In7RR9r6I*!TwHE*O()jL_bNh-P4%EN(nP22^z z!}p)=hnyM{pWz?9^}hS`{&^DR?3%+%#vQJcCUpPb<>#qCA%C!ik`2!z z$T%50s^E-DupR8h_+B(GjY`H$MZ!M+R<7cWwK}?`->I{PLmGml{a?@J)`uhVmF4GOT85!@E5ff$C%wE_v@f;ROi7SUI-ur*ZfFtj|mv(30m9``l z4^M3?33hIoCRO?R83!Gp_ewk~-j=r=3V3S2+V0-$0Da^tL`3P28n$i z-E)6&VWl!ny-gy~nsomU2*y9=cGy)yf4%w#jG>yTNrwR#D(z`b{Vf#G{0H`T;!AGP z1y5m9*3Rxi(p*qB+&du=QU(s{hG7su_u^JvLqs z6F9z%6&hd{y{fwL4N7mjy+FXqa_^ayHDmOx-A?a1Y6Dji-MiZk^+s5@nMwmUF0S&P zS+z}GW&#@eLey~+#sL{RAb2Fn4|yxz$7W@bdTFZEYBV(Y@@?h^!k&0;yFP2IJAbq6 zm*6DjNGmWZ<_DaTkFk~Oi<5VuOIoYLIuc60yR=MKJ(ER62Z3||)1Z-T%GaszXI=yq z65mcCw81FNYv=llgAi_3O(eyb8F#QcME`l@ogu4~g3(!*545-+(P{Y?t>o#->mNH? zY`~>y+ycwx2yR zHX1wTfF&0(^C{_6r*1%ob*G+p1*u!PVBt_tyew@HWdP}j@C(CJIhoq1EDdA=+*c<= zVvr)SmaDh0p9$;hM`5`vdzH8pcL$v4E%=<5xGU1pntHC|5NjBp^Z$CCewj0Yv{aS+Tu@uFcn=IE3KtV`XJb(|Y1If*hsRyK9H0=o;ss__xtvyujPAGg#NA8fk2!oDSE3DIe?s!hJ`ZVzi)n1e1ry|+~8<=Zp zy(Ge%xPh*Q&wUgW&&~=}R4jfs;N2C)a5?2<*3}u{p4>R(cg!muA1aBkoY9&{tpmH4N=RYkJ8qvB zF((>uW=gRu?92Bi*gguZEkeeaHVubNb)T##D=7#mdT&OM$9x3 z*?a)u3s&6gnY39?@mMrV#Z&ZUM}OCaL#OU0#tU1>*osix^b?_X9K4P%H6V+^WPtv9 z6KEROH_*?->yB09WZKSxN>5Fg)B7d!???Mx;rm}$69uQ9{y9%Na0*WnC%qP+EspRa(Prt>VwX) z1}cC9lj@j&2Q_k79S=8}Ue{Ez7OA&lyn)Im>YdCw=v8tc=6G6;5et~1%ERdRXfvgj zmh-fdS-r8F;o$(Ke;~|X55Y*rP~nn??g?p`?5>a`uVuV%sMDix*MMj*^aBH1;w?+W z?a;wJ*cmW4Pj4P!)`B@yiEswKzL?peY%Nwi&fYe%r54S(Q!)ZZrt{KRZi>ipV=3+W z`yQ*o45vx1MMQ4=^ZFzgiW!vZvDf8m(q$+_f~^lwEyLQ>-(fzgEm#j2#;n$)0I?b@ zd^gi{r{=!EEF79!LotiAwSQDsgK*LocRJyRd6Di$ynzKve`W@KZV^}`0PL*2nnfQi zqnJJAMYylMctyc#*<&`#|?e&8Y}#iO2J zgkmL1YN7W!5cJo4<`$%uL?VJ~^iAbFaI`ld-F+Ya%Lm z=i~ZLmnQ)`J1cJTJ8C`G65Ts07(Y_RmZlEB_#izP^&#qQPS_FV8{0&e|J1wxm()fJ zVql(|RUbc%!=Vh!!7Zuwn)pbT+KUxiU;W@0H$S~{rk7lj!jtM;K7HC}KsY}_>Z({V zlpr?@+jQG;`OiA4V8U>lX4+#I2t864Vq)bJYxM@DWQ9ILIQ1ez z*?7S_V=%nO)|~5u<&{{xvG+!|eeB;(-Md%|-sSS2y>`fMM6WF%q=Mxn0R?Zm+y?lI ziRtqoQ0wPVwUHnI9J>MF7!5r~osSiX`WLptHIrq}H*D_^*2HFSdm$N<*4+rcEj=X* z6Yy(ma}IV)$Y!leb5M{Mu7M<9N~U7{>6S^W&FUaSa!HxVKqFxon`r1*Uk7)mIj*7#6BR|jG?s{tA zYx7{*MT3lrJwXNfRqu}PK9qZ6?0d5?k-=pF7GD9k0DK~EAo8J5*Untt}9uDQ@w)>xV}I z3cH~;#d68|;rHLXK9!$Zf;cK?N_pw*1wkKjH#!X|vVZ7l6=)xDRz@a}L0CgrYj*NV zzd5I^3Hg)N`j4?v8~>jD1~#BBV0{l#!Uixk7i5Okn?0iYoVq_IvcV8dVk|B6+QhH2^~Z$alC!{Z`s~=mQ?;XIT{U+L=6CD875hTS{l||xa$CEdzoQRZ z^qknGtp?Z9jT_1pRs-46ET9+%CrZ6JP4HPuVXd$ch_ciJ;g|URC8;b6Fe(8n=wgaZ z7~AOT=62_bi4lh35J5#85`ofc$UV^=SoUz>V{|G5PF)>??qaNpEiQ6 ztVMWA!R1PfvRiYNZO44v9ApBNjX^_m9(!59kaXpFlq|0okxivoG`-u|wY2t)zAWGh z!)i+T;V05WC$>Zl;YOIJ8Yr)7u2LBjb@bK~I1ZB3*e#=ixf!eprH7X)u)2IDU9z4(@b_wOTh1!rjpI$Be)4^yd z!ocTsfH6Eg8nHfzNf3jU{#92Mpr~>fTc9W)E1!TAXp!13+`TTBaLQ{HxaWP_>C-4B zCoQ?SiF?p;apT0;&ai6{deJ{=0}#&=pC?uh;$}2L?+kcx+_5X_uUXO9DI{7|ugQTz z8NJm?hlbOnEWfn~Q&((Be1)x1dyM!O+$y7_v4S7B6x#L(=3@?i$lVBAW7Hwl)xkY4 z756FW8!G(D>cu5(bo5J1 z;kqa;R1|@T^coeV3Md@}vXmtV2uSZlM5Kv`fJ6w1O0QWeMG=UIfHWgrF%Th?C5RwZ zKp+VM(&URm{DdT)>E3tWarV9YoF8ZGaqqcz>>nh93`p{oIp;f{_bI`@{Z=mzq#F$+ ztdd2ZVeoEEP=8)$$TXgA*GWue0mJc`Mv0WheF*+9Tf|QMM9WlcFNnDS|r$73svI|4SiJ)y*I$P+8iD& zIKP-yL%P7Th2$c5VRgv52MJlJ4W!SQG!GubX<`v@Kh%r9HFNl70|-Sr1AG-lI$eOJ zcp|i7Ir{ZrsOL6sr~J-P>z=kl(AUBUvF8=f*ua6Q`PnH$sHPy7hAc!uu4>`i=W&Bb zC=lX2jTA)ne8=8wfdlYND%DhFNZcFQ9Yg2@6{zG0?qFaW&@MEF8chdDbBim%zJPnX za`NTt;jbi}*I&l_?+5a&C9~ypp9D@Ov-eY3y)BfX$i69(;4-S-wuV0S==DC&hZ}+} z=*cQtUUKQkhI?6>rB3T<8(y!m21Ctb9y9cfOh2b#561v})Xv99AWpK9Dtie=pC`Uu zPebGAy_*Z1+Z4aIm-7RHj^?|g0*J|>NACUenSAf$f%mJ{+6UfQ8a_r){)0-|5G@DK zc2ER>d_gl+-e@coc-s(@XT+F@Otv3G?$o;*(uhQ?%2S_^nL}e?-ey`JK`}Oo)0=&X z7hk;j%$sqZ4~Aic1q?b=y8xN-4!vEYS-d{1oI8eWaGp)Y+CFiO{cYt`uYcQj+rK+k z(=SM!Mus%s5cwuyw0Yq|(yRkh{dvxnd+P5i4!q#o-Vfvd{9vr|_)C#EG?wRI3`bBs z%pW}tl-79khsV;SX=;f(wLHkfhZrI$Jmn5Twjd9WcFDa0!WPmzmAiOC{+;{ezb-a_ zTe-H6iROyxztPM>$x>dR=s-}Bs14y6XkayU<`+*R>IJYYI|5#ernQE30a*=l3O_N~ z!$Prs|A1B-%e;a^WDy#Hu+8Tu>T=$iu>l z)`YvBI$!qvrvC1j0|GfB_sHCrdq^1`KlymLCnY#%OeX}F9X~HsqHwr3qem|}- zIvz~J;4rLE^ay-M=Qf^&%mf`9vu)mO5w4is?Y2G^3AP1c^Npy+uf~K`N*zAHhPB=> zfe(mN!`+!>9>d9B`V{sMEfF=wX&&=kHOax>rcs0`W9(t!+~4}MfLMWDUeV3m4_mYPb)CM~xP22G3&Fnr z%_1Bw`l_vuMgKb-a2mMiQI+Ql#;LIxpe;;n+mG zvp+EZX#mrtEN+;q51!4zxDGJZ2Y}C9>|0tZpDUcsnEhD7dOW15A+4@|IuzZ-#5hc$ zTZ^L3D!hOYU4?udGfJ<=`d^>L9xY#CPBfff9y)moPdfU40lEKWL?PSt+2VaHG%$oi zgAeM**W%j=w|?=+eZ^lUWv*`v1)_zy!q`jAgGx+UoEV%8g>|AE%e(LbJ$V0+>L3Zd zNuTCt7*i93RBm;2Ayo}sD#H&uRmyRf?b6dRs{Bb3|G>z7Uk`XJ;u7x z4AL&F{t(tR%75-YEi}Lhvbeobc#wBPleGZOls4vf5&?yl?15j+t7I!fd~bkbu|y9k z8FzvS?7|f>dEGbKG{)LQ*(OZ%>h`mbLEJrg)HWVwC@~!y8(qv+7(X(q!S=7h7|psf z z*bX?LfP$=!ubluZC$FkhvBjY0MHB>180*Ju%bR2WuF3%E|_?R zVm(97lR-?siqplEv;L0nXgaZyk6uIaK&w9Da3)=I=kiut?e0`MNhq)l0nOeHBg>Ry z9@&zasM|8#(y?&KU*pE%RV#x$ST%58a`#feAy{D%ocyDlWK$7*`zeZV5-eBz5fD3G z!oJKZ*ZYh_!kP%QLd!{f1jkxHd5N1JDFGaNto~6-;@sKDR7DCiFSAYFM!hT(H1FRN z%HU{fe%9Y0o+AVFLo7`tz|FZE2gXCYBI*)oP897Q1=P3@5N=9JEz*O>nZQfbQ~(Od zYEum)O1JT2=DI*yUc&(Y5?Zto&HLpp+ZaAm3M;i2$v`Xu=?s#kn5@wn8&w?);s%@COBuc^G;TCP1-oinJj8PDKcv8m4nnP{w&U3X$ZIz&#dA61s)W+XjV1`w2X4 z-VRo_Xz$l#{mH4vU$ujk#^X_!i6X%Ca$t^_4wUUm-CMI=sFW?&B;9G5E+dz!IC|f} z9g0wcy7l>5rd?7)Tr?N31@^#HT2^w!yQ2*w<#B4#XL#bewtevJ;y|!`3I>J9PNA@g z*FCr$FJREH*?EU~H5riwY5Q0P_F(oj&USmNkA=|B07g0X81?xG{yA&Mx-#l-W219Cvoa`0fv0yP}C zG*e~ce-4vnn5X*~&P~P95MfRzwJfm7RCu^??ecP82TC}<&%FWCD=df2e4yM`Xr&tj zBh(O*$$Kk3reMA;_VLi!QVfJnqW8 zk;;e*p&|f`eQQQ#c!QXjub;@?!3aYv>q9|!{>>`QT9|-_T!^72V+2u&`<~X(M{n_s z5FvN`#}WqNUp9S>+ZNm&^e(m;4av{8hTBYqkpe3ekxkA0U%0#K!jfm@+9a_Kqpsc~ zWEDDII)s&evlv!#tR0@UgSsoxF0X3S7EXp3YG{W3X-edzfb0Y!@RrEHUkaP(>z>Sw zuf`03?QRbwK_jI%;`ez4OW-6&L8eVT9i3<_NLn_PZ5eIomvDYH7#8Kt|8>#g<;X^y z()mYr_hzi8wYuj^8&i0fmjCY@62!a`S7?btrqCbYX@z>kdW(c8;tQ-jr`gyVb<00% z46k&4n7apas9|6~+iT?SlHC0(;}SRlI3haK*vF-$7&Fy*TIH`M4N>0jeO!TBfULhD z3vQnVojq*GlhqM&M6}ZCJfyYoZT3nk9f_D;DpG_gj5y%J15US{zy`nUn}P`%KZ`57~4L ztdDro$$hEq7b~{C^t2J4`%+5#4Pvb%)2^3#5KRotpBgT`w_hk%ey>ly^z=Rfw1n$^ z{7!T~#L2+otJvo``E7f_blS`uYo&3_-WWB#`fXsxSOf-kbQ4DpFb$`KH4s+^keDA` zM#l2ET6*a2?`va|Sfgz8g>7T~)R3syonYX=H5;YO*>0{}J5-(~3jp=cQn;vOhO9g< z$Bp(W@8hE6IU&DXI+N%@|BCDlZ!{^|r}s{oYq1XT;rMVlLMO>`( zyN+#M#^k@eNe=*MI@>~b??|bO)6Wxb`6Hvt=SwyG#dP2LACb!e&3*Tlf?|q!Ur7gH&zGdyFC5t#f$4bl< zm^(i_pQW~Y$v^mJrWaTTlU;7ghTua76WvN3(mVY(S{tQKHy%~h^)(q9uSg5QHu(-=M!IkEL^^7J2F zbYzOOU8mq*;s*t{P`5raa7Tf zs`omgMK;R_^b@lUba@ov)0=&;)TI>D66I{QKcUKzA>#Igqlu4aptF^BdbRB|7s|eChoM`drCr1D(=Oe+YZHe`!IchDC0N z@h@+2OQVeSCT(w*uS83~C!I`b=u_+2<*IRXOOzhe(bXZ0meF_`3oz6>MBEz`A5I|zwHJ2;H;US4tj*-3 z<_Y>U)c^1XJ5C3AqR{2YTm|)aVgiZ>+_S_Zmqtr6-&_cX$r^r{eN3I@jxv$s zRNvqW%)hU?d2IUaeOVnK4LwJvSKaDb#26V=YF(?b-BeX|w5f2caL87&67yAlIZ~=p znLXKfwyUA^84+?AfR(By;#%>cq={>%=6|+%vcrHt`0L=_9@droZ_A435SypR``?S2 zyJdcI*wWxA%%PVD-jKwmZLivbAar4?#Ptx)RRbyCn^7=_bhL4yjvVO~&~X(E=oN9h z5VRLg&KgvOufEUrzHOV-2r}J=Z?~ZMQYMuc#`ryL0=OZtIVDFk4=+z{t(=1j6E`_Y z#iX7xk59qU**Gwj3^%++iFtnv%8hPmoF^n}G+Ix2%Y!P?HGd*bpvc<_3J0#fE)mlpa5gkDd2?-HsmeAKf@Kle*f=VoPzV|B}3pW!D9jjI7C40^oTG>5aWU2Hr|>kR9@m`9r6qf^S&V- z;2y-d4rJ|oX}3Bc4O?_bHz!~%!VK;nP6JW((lhntdw)=MKWhrmQ7 zxX3<7BOkpvm>VuVTsBz%J@E2TO&n@1PARo=Qd6oclp2YTG3@T|nc_;~+Z_O0LHxxd ziIc&a!L>BI$yoa4VtkuupeyhWPvtbV9fYFOj5X=oCo{F$Hy@F9V5nyX*->$2UdvnW z@6tJnzj)4q-A2%J_>uE4R=2pnBBXgN_W&Cy$`NV1Yb?`ctlJJMxJ2w33zIp4IAc`FiT7f3NT=`$y21IGYq4Ga{zZ((MsaF^zeA~!&b(E|oJ#}U{C=TjH(wR5 z57jiV+Poe7#Z;MzTQoSudkiDRpxmCwa?OFu+ds~Tb_HNO;CD}= z!XMg(+eQ59cyv) z-t?*P%BIvO=&lks!;!|IV+|Yk=213Kb2~)@8$1wl4YXUw*BE# z5B0?B5Jj+ta4~%!YCk5~K04X@!y4I)p}Bi$My7rwD)z2eQf5e1@K)qxTg^@1lAycO z!9Q3xT#*gol)^J{FKcb z-Cv(B2p)M-VIGlFY~xfQXW6SmzxiGmWw%)va2EP{w-S!;lz#rGI8a*rQC#&(%uIb; zNLi4y-rX{fzSxT3mgl9h$HYBuJSulTr8~X3`3ahw^q{JDb=G#tw1DiF3)c)j4a>y$ zUH>GncKlt0tP7GiZyuI4x#FWQm)SLzgxnY3PQM0KpheO;$17~JkO*^;8jl9LZNX7P zbEV1AD%(t>oGPb8Ri|R3#^BL{py5*60&EDsi^ns zr=IAbaNVaLfcM>y8>N^qMA&t^^T2sL2{mdWX|P8O%CnE$BaHf zck>|U;*^dd(&;BT(j&zlA_+JvV0hT5YGlt4;LpCq_%t2tG-cD(=uxUXlg;tJ+Y&m?kc}{H{fa?G`k8f%jrU9KHzmF;ZutXys(f1}C0 zIOTWj-L)#Ii^W)3z7MWTN)H&t@l;rfk!jSZSS&RM>l^h#m39HfHv$qnE>uvxgPlmy zAvacAXYva|I-DLaY8>vBb$IQ>YzVQNx6LlKryP|s&EH9C89k zxRP+H7(BJ*EyQh&dfdoq9m}keiF}4VyDi(^^xSsJ{W#E}p=e*>y+oVqAwsc)t_ewjCuhdlb3U@F%#r}P<=*utc!8{qmeOOV8ic7o-*qFW~|k<@Nv@Z zm~o|fhN|=8)u@1INyl9Z`YZcoD zo*voXclsQJ%jx*N!Jgp-axz`6lX}lVGx1#it^3BIJH^62L})(24|%F~8Oy7^SwFBm z<}kn5hjG+?f35jj&!vspj}plBY75Q$Z66yAeY*4@>u-i^<%z~MR|(q$prG}}`#W&G zGJmdK2PICsH{d{oWA58RCAbOHZrh4VpQ$C*xP6H0Fy^Yt*V1nn@7psAjQn7fMV;%< z=ej1IUUkD)7@W=Nk>gxeD~2Ozn(feA>)ru5W|~$c6uXMJWW0EJ_Huo>x$O5W356o{ z0L&*d4~Gugw=KBxJ7-#2O!3uG!WO4}W>OS2CctN6X)c_ni@sP;c)#eV!&a*SsnICM zZ%>W?kv3!)=Lzbn=7bu{_VsTXkQo~TDN*G-yI#aKQzzwXW25k-Wuwd6&ypHicE<-* zh3L&7W7E}M8?aB^^b31pi4RjG%5D3QCRS*GUXSQTNd_)pNP=Y30l0kE{SlT_mTy97 za)&?2OLw{qA%E{##CJFOOS`3sE#?lA9FYRLZWjZRGj~_Y=Z1Dp2%S;Y`F>?sFuo#% zV4~_6QQ_jmnO}>`ksPUzw&}3Xbnn)gH+X4o8furR zD#4q993k%g&2Jel(TJV!g26F4{s;LZA^r`}&xVR2*X(PbP$`}E1L2L6TzLt;ys^&F z7{?MN|6^$RYmp&TAvj($O6lmHw@wAm6j>3S-3UQjlSPLHU%4sNUpoYNRRpScxbPi} z|L0)FojsrT-~2&Z=*|z-8Mf^=8D3?L#>DlE?>qZH<6$Dci-I0g7yd#Cgk()a$hC4> ztK=lsl=FhD%|go9`0wyE(Vpw+Xgz5)xLHyD%~`z~k%`@*6dzTkb1l#4ed`m$HKFLt00Tun<%Gi3X!H~8Y-{c{Uh=k)KnY`=d)7Wx42{f-|XDVc!hd+bBt zu`~pC1STQZ5RfBn^GCZTALb&p(^Nd3F4`tK0^sj-Y5#>{z_mi}{Df)R?mRHb_GKzd zT*G|rv(2&=+%-2OvD!jlJ>&aQ^KVT)OkNLa(vBvv+V=$G+DU0Zc7a3>tn8q1_*@*4 z-W0VPEtP)(xAjadAiS0-UvM#j3(2Qq?l0bNNY0cM)vQMxX!<;}b3~(C3 zA{gm$7N^Hnl?ILNLQ9uxeZEYVR_CUh5;1@C#B}}J_x&IB_xXPQZvEhX@dT5k@a;}0 zS{+Ui_Jusx~#I{-qut^v%~9)p}}nAY45=qfr6`!NUzOa!n58zbqgA%P{>n6Au8RCDlT zV%DSImS9`o#>NZ-?1r(T3|V3V)*rf`1}95#h<{(}NhR;)5=sx`ICK z?l@Oo3z1CV3AQVV6ykj2G`jh%B^8GZYbOZ)f4 zTEQ7|!A?QnN3w&cMNjACgYRW1#P(XLy!^!j`fbpuSy&Oh*19dp1 z9@%@%*`!EcukQ=TC-*R>0)Ww{GoB5R3~*=YgxyRfM8HS#)$m-7ouI2SDKQcK{K5&@ z(Z!56&Y7m?G!z@p#6+t0J?uJl-TkeVy(70ljU9)I{C|?e{Xc-P5x>U%bL|7}{Q}lR z62DsqGoYarGLVcCMO`M{!9#V%`yO-qx%(lZn&W` z-nTZ{rPF=({aYgK!beryuuS&Cza-KMIN?Yjkya;oumJQem?z^g_fsNP74?<+aIca| zv(Iz3BJF7*vH0Rhby1LQHb5+OxenPV#tJy!aC z7Jq)=PNE?*yU8h*0<1eXFuAV>rw*&nAro=Cn0m=iroDgzy!=slP2yRmT}mj(F<3v) zN3V%(-#U|^_dR&BDATtx=mt(R-N{{@Hqwx;w&pX-%{3OBV}xN`$1dg%}6C*JFkm#KV7Jr}P%sQh_Vf-kisK0DZ@ zV=jaaIpgoWOs8VElfJRdgVJmA#@l^YhM&$Slbpw|)i_Q#)T~yXFBOx}-(xFWVRScp z*gjjqkeG|yg%g1v%?)%T5-B^$QsE_&m}bUoCt?rV0GeJ*2~!&0*ry)S?XgKn-dLHw z${HGQXl)t_)d*v}8aeV}I7-D~>qaZdpK?M6}GkgM(+;#}81kijDk)C-Ok=&A(8n)TL6N5TC z7FyBjgd{ zs`N>6m@GBvgfla$a=}fyz$U5h_{5D%-@Pu&*HafKvJ{%>=`ozV14wAm=w7T@I-H?!Q(MS8YH5MTnCcM2S z?dE?rjfykD6kH2$8pD*0jnX~-pe3j(1eak@F-7hJAfNJy$>$|=Pxqi2zMGNpaZWz zq|h{Uj4iC6T+Gx@*$7cP*?(Av>D<+HPxqwj*nIuSm31AkSJL6$BigO=X<_ra*LS~o zB_X}=DIl@WpofZ!nJ{%VORQSNkSQ{M`&HS=xSF_Ud?VLO__P{Z=Q>(F*QEyth$J}zrmuy?q3q#UtpZaby1pkZ9;k2?B+U8juSV^W*J8bOE#Iv$OO+ZeejT%D@KIz32YMkT5+IjP@ zw}X{D%e0wpqV9=d^J8z3}@o zyY@EeV-_r1&vg2;MT|f_FLeTUXh9M?zhM7nQIc#bFREA^@kUd4t-^JqV*2S#4N821y1^?{amON3^7ON{1y>>(qGC zejt2NIn-gdaby2jJ7F3+>2q3**bn)!Wc*S0xZoGH;x5PpeBFbM19 zC!2M>($Gx0{)0F#CGVL%RQkXv2`Tc(wTlK5^g7LpWOlAFt~n{(#uS+A8F-n*xB;x4 z0Z~A#Y@*kx95B855jy86&i{6U26-r4Nyath_curd=2Qg^pEc!k-Yt(*I6U>h4i{TZng?>P0Mxwz z1=(n3pdklzijVeh^1lcM^#;(=0-km^-rLa z2}jD4%Qq|Lb*?qm3%K|_^iv(8`hOJA9DgC|Z(t@zsPdGtSblZ&aH8>5g}px|_CPm$ zyWnaY0l6!t(pf^H%NJ`6QZ1jU4rK?OE>sQA0buXyvZdzr0zX@?O+O`D?G3;iTxTCp zV%-7b_L>_AvGp-z>fdQo#40ieQm4%5~`~C z_z}MI{WQU1jBM|B(~sV78&YH5J+DxQCvIuSX&D=C*|_$9^NLp+z@XE=3Do6=KfK0@ z-IN~e^lvqG2gc>X7QzpG4_WW?$u9LV@$!80khG^e z5$o)wr@1SwN>wM^EiCv|@Uz%6*21>cpttIj5p0#8P}yx?YX7~gK;d`p)$XUKGb6S! zUa~*$p4vWf`hn8bzEo78{(ZHnDk-t*u9N!Ew9R z@tVhqQp2iRpWxDISZ6rjV;b+*(>t(jPbnN)DlZ&Zo}IJn4ojM?9|x3KY%t*d6r$RR zLZ)(GB38Ic%-yAm3e8QHha2l^>zchj1?Mlks`UE!#YbkaVT0(*M5O4dYo8uczq)B- zJ~W2^hPwSyA(efWu^NB-rM61vBG#t!<O{dB^qfw^VGHRNBO;X_KF>~LC z3zEqKik^AC;cjey54Ur=!7n(kwccI3Y0f9|&~L>&No23hXpMun7=V%0$^o=&rSoi< zKyj(2+wOgD4-gkU)D)i84g&*V z(hp=**^>0R=<%2zndCp6qwvxzU+&vfuMAt|#-D|LVj(}Enq0#XK(NY)k`1%Pk2hO~!l9*LS zkYnk0rPb%nncH@__iE*yma)(Ddx#19`~pM~QR>rlIG+9?G*qnmROzdGH(7uEZC?Fd zJ7rM(g4)KcWZ_XdGVN;X*`dc|!?|dW>V=Aq!lP6}Vnc=Mz2M^+hi+P34?3gPbnn&Y zO9iu2?iQ-o^H^o*KGEm%nl(dpmcdgo?qRjuC&Y#y9oFjvN_}+C?Clnx+EDtM-5jEw zl8P4Cn2y=nd-Y0WoLM&)F=njqUxD^Km7bn1bF$D}W&Kd|;hUlNoD!<5lPL3GLwSM< z-(*B57sRHwyS#f*AkqCvv&*2Gf-rFC^t(189(Cv43jM{VDtEc)aN{wZrNKhG>F(;s z%du$|tfc6?0o}LX&(#&49jYL_xd>%1_v`aVkjx|LDy27QrAOv_Nqw>2Pw|C7HZlq- zj08>vo>l||7i0xyz#Q&16zd6*GRUC2z`xvy)n|sK>P|aQI}dz`8AxgPF|x`&MI-Hf zvb0r}2iV}A)`h0AKM&sPdpokW@l)f<*b4V9`;RSujLk+SqhK+?K!mp&BRhM9zS#+& z|FJ7X^*6rZQjy`nF8IKUoNue&^}|Gdw+JO&N>j^|GS4zh$_|vNiq;Axf5RVu6r(#{ zo-@_gD%u?xZjhIlJgPQYWiZORUL}tiDL!+3B+>BYOT&qd;qzCGXCLT!gwC^LGP_!9 zK8>~5#0Ptn_BWPxjTnVK^7dG28uy6r$jEic1J<^sOM9cQe*o_Aw!O2lHW31L99K_~ z(+SW<h?;kR>k?9#uKq;mb6ra3~!>`GT6i>vZ-aGBd;f zsA7p?v3yX&qw@rv3#Ez;8D8^yyeA@T)ox@{%D`4uw{MOY_-qkDt-HsLB^mjP2lRz^ zN8;toB8)puPO#5Fv0ZnACNJOh1$kjn2<7r=){6cJ28coXxY&j(Jxt%cX1G=8{M#ds>V?B|=uS-eU zDrg!nogklKhIH1H;Qg0xD9?E58=1yKRLjw0M{=<1b(kg>!X@pmIow{M-C6E*Vyfw( zF=xM2#?0|I(3o_g2mf*IJKAcEpI=6YD{j*Y^R6e22^*igm@RfCKmV#@{COXp1)nLy z-ny4BUtTya(q&WOT?ub}dM=Xnwj)a@W3Fb+y;Xy-VCDt2wbJ{-1ZP$}Y`71D80&~0 z20^e7&o}YD_HkQHRiJ`Pea7q8ZkE+wr2BIkzA9{=uMK%H{7}|F8$)I2mWHRhrZ>r+T64q;mBcV~vx0U7m{MDK$II2XzFpZ3}vHThD(;phz#v63YQe z^4RFDQ25kF%$$+&-tJd;nFYi7tMb%vIXFPExAvtp276gaE~$f7$-=A=7=hx3hckm(jxBScef~ zSK7)-)oem0V`n2Xr<9V!R-c={N9}Cd`np}Hy(So2GBa6M-BQ!KNDSg)IAWj(m&905 z+m!r2#ddu1x0CAPsxe(tZ(cEQ+?O4oqmayKalaJ&d9?iD0^5|H+869UR-QVag_Js7 z8R{Es&x*p^fjIZ?8H-}sPJQ7I_By`#{8F~8zT37*-U~(s=3u&WWNqjkx+0%#dJm-g zd<(Yc8sHRQzD^=P@Lv5+kIty6S!JW4scxJygh;D7wywS@V~sw5L3y=7tXH8S&pgPv z&0|n-Ow`DEAJ3Ch(|=XzXnbhV#W!JfI}HQGp@maj?ngaxzFk#(tM&X)ui99#tDsAh zd*KD;%WnLQ*GA{~KgQ%7f0?gc$Um6q;c&m);Y7hD!s4on^mBwrYWsLvCRRP>WAFzD z2m7kiPb-|#vOP2I6-Yaz2Yr2HySPGJ12vp!eQjsdW>j?(Q@S3Cz zMy|_YhP#1{ka~3_0$8XVAdfbN2#U~2DH|V;DULLp{8^(=Kf1oDa%HD>;~h)YQ|RuZ zW4J*yFIk?+n@ZMaoU7_Irkc{xRgUjY7>b{{$GD#V@|}x!;=AJ89T&BQ6#Gs@kZf}` zGF2N@Y09)wT$G)dQ8??~_NP3m#^HhM?-WB5s&XBveqNmHaD)888cD}0BaND~!yRL3 z={bf5&)z%BMqRTr_Oa<5q@L#&G*jA=u>=Td#^)}<=BlL?;D{~;76o@%E z&Efv6s@XcbN}#Uiw0WYNyg5adlfciWxgY;?U{2r{59bPFMwyY9kmLjBrPPx-G?fw7 zU_1WG0;alW`R|(5FWYXuZS|{u0Ag7@@4&i7ImB_yqvJ|$wO?p0soFGFl-alDF&|}0 zG8b(v$(&UqDrvjkXnp>}#HcND<$s2dqGa(jGEj@32c>N?zFmupV#^OU_*P>*A;oNp zoas&r(Ld(w?=#9fR@5z=>IZe6_OKH`r^I2g3ojVjCz_?iO#AfDmxDr&k)7*osj2N4 z;w@q`Ul#u2x^#%*FnxV3l}>1YwPYq(?n^{@oVZ(X%R;=YVzg&00=6;c)6CdVtC#XD z4C$1%yR}j3zF>WJgqLId-}8b^bvYEuRn0^s@WGmJWT4jY%-2+ku(2~VPLG?tbA^4X zn5{ZQ)~$gTzS_yd>r^x%GI~g7Rsz%fW>T>8#^iWfO z)K1Fffhj65fUqG`NAmopNrK2>v+X@FNF@`4lsbzJ!nYwbJ()@Y(q1^3 z?v1h@zS5?S(FV!Lv{Z8J93ipj2$Wm%+-)hwqO<>`dDiTzQQUg!MPP;?RPz{4H)CEc zp|#@AGS7VBE62S-HzPXGAP;c09GYTLU}Q1&6x2fvOOr6wZy4M_rg>N-H-L<{VR&Cp zn<9L(>Unb+P>bR8p^d{Uu2AC|>%mrZn(isRyHURCvme7|)@>Sin3Q-VKo2KM=3LhT zwnK0|ETe$Vuw$S-ieMEbQ%*ZjLNS)qjXKC}y@|*d& z&Cg&U$rfrDu2%o5aXI^`h7<2MGZ)Y?tjAxpF%L`42Q4Plgc_G0`FkG^qi*7n1WKS? zBC7stqGm^p(%CwTS9YRaXv`yq;_Vttz@Oydl2EJZHYH&FDb$F$(tiK^ht(TxAUu%x z(E_Xt+ba0>zW_}`kyA|xn3bOPU?2bRwXsHtakRNJ-5RwEBtRlk*xFBm+Kw>wlTDAd zYm@^Cz6BXH{gXH2v^^=K8>9mvMF;ZEwkA6cV=bS#NxVFd=%&bm?>RpEqKU{rq$|<# zRwuSGxKZRD?r!fti6VgRFmN0F$A-Ei?RjFE4No5T051tHXCM+A5VrqND5 z5Vx~_R70ogb*r?ug;}in#V@=+cv%O4s^c;RR$g4)qz_DbvOJkji_{?T`kN7tSMqIZ zF1MgE+vG5F=Ng9Bb=Q;e!p4TIN574qBm>U}=ZE%pktJHP2vtEwxpx_1`$BvULPf8N zS6cq&qvfsfiBcd(JJ?W(6~!q!oVp1VN!&f7P!}p_ApK0k+EqbK;A4(E0<;a9B(@gx zgMGxqpPhen6P$`8VRvvk&~#P>aAUiUMZrt-x8h4jM-Daf%;RaFI{4V4$y_1VBQ&tb zW>s36@PI}Xb35pIOym~VX~+6+^Aya=X*9e(tY-w6v{&Cr*DaQ8k4)!v*)NMq_ zjqt}80NvRmVP(GCSTm@EH2|ml%L`L^R1eadDmw(MUOpdfxN-b$snE8W>uRF&* z0RPx_;*R+oZm1_eV#RRoagj^2BJ5bY@@Cx8C~&h+!tdqGI=PaKwd24Wki$JZY>a%| z#|V=>Mb8^HcCNwrorRiH$S0~-{*=Je3eb=b@?#&D-+~zi_^)LwwVk&(ItuT8Hpm+&T*_#)hvwfX=;L&V>)pM zUP7$&sujIkNNubixV|MIx|6bttMj`GzgiCsuv)>{;R`OEWt;ZiB83x z#LQ@lU`QSLjX&BghD?>GgSH6C4T5%5JwgRSej1BnUM>y=W@TM;)LsnHDK5fh@Qq0x zOAvepfNm}krUj^|KFRv1}ohFxBa!Z~9nP3H1dP)8d0Mclr(^m*mA<{zk zci&|ISgj>2O^32`@)PF$+!(FEh0C7vLU`XZE2q;}tnsh`jwrvW7Wt}aZo zV%W@PRChKduW73&xkl+ua?Xc*`Ry13lj-zc_5@=9Bwsm=kVA@9-6&MhAqF;ZWW$PfKZa&9K>4_pweX#j9DRom^${0yrE~Ab z;&#+*n}Z?XNC_C2Xl5;NvcU#hdN#gIn|&74+#!$j$FXuHY!!%-(AdgH|8b{eG^$V! zoq`n8MI3;?cYv9Qz9*D@bdGalVWlHRGqcg!h7TmJlAcFC@R_x29({wd`tskkWKoJJ zTJtZSX5dt43mV?dYhujrmZN#raB&qTmMeyQQ&U&%T8H(6iGpLd>6wl;uB!E)U>Q53 z#m>HThdx%OX1h^><*NzdTc|`7ti#6=zRx0XD8K^J8#8VhX9fxcKl<(GfzA3%sWn|R z0VPti30nW+3arF$BqCK_s+l>PF<+#VyLg5vN4OJq>1;l0G8oKg_WuJXYQU?WlmI5` z%3-(AvN%Kh1$>7yfExm6z>1Utw#X2;?7?_nFs)jGm)F~4X2A=bV6LqE|;;z(g#vdT^2Tg4SO=jj)eL&;II(RP@OI#>sqNLPmY&&cJh=aX<9z!Sz$y4lL}Jm9K3D1ySG0$Hn8F9v zfZs=wjqg|hQ+p~4j60Mx76n{J`B+Nq|B58|kA#oE?`hP3a`M=J*;f4TXj_2*8c|p? zfc+$bRSkwlBH&5YC_%C8Up!m=_yF7h{-^c&9?DnlFP?GVZpt{=U79N6-wGDtr$J%l zDuoB5O9)>S){ah>SA+$bczSa8z^lE6So_v=?(4Ft z6>*p5{HW>ok4&bs?R~lep1JwxzP4mpPg&oU>%f1QMM5t242zviyBz_WCq}OOXuQ=H zh}W*lbuCSLr*h%Dv+4O2$w|o?nv1a60rRwUZo0pe%f7?C*U9&fm8wczwadFuSKXFSIZ9GBI7Sh) z-kVUWI=bl4801)NULfZ&TMXmi(S5werPa0Hq0w@*;z+hrxr1#)@Co}g z^3&-O$q zi)WKd;1;BETfy2RoBSiS6O^q7xUv|0XG?Uv?s|&+UZAf8wL$>?64>xEed5dDJSXn> ztJn2I`1%I5{?mdEKeipdc5ugSw{@#2p|9#*-8)l1bmoFjUXy z2b$HK2!r-ZKajlxJLgRLHF@Ky<%Ut<8{ZB_4!HM8qo{`LQhFTL7 z=NC%KFeRQNt1~JMj{cRuc%D$|{g(E10OUJQ@|uMF!UJsl5vH`1q7o314af;7T4ytb z%s8c%7wVhv7eeYWHAO%pa)BE5svMyuou*yg8uBRB{+0l)ENcZ-AS#$X| zITJ|vWG75k1-dOVf#c0oNYL`p==C!kr8@K>dg z!+&G%J)@f1-gi;miUr(m|zTB-;i8LZmk#A|g#XNPv(iy`xm6MFb4RfYL=I zKqv|-9Rw6oHX=>mbH=zI&L_sujI3m3%{kxqd7rYsdmWDYWMs#&K%W$WMfap0c-zdMiXTN`Xx&ot(dN;+|0&VmU(z)}ZLyP(W;xGHT# zMr|c?H%in!2Q95PdeSIMAA@xrC__}xRhh^A?+nE|C2=eGv%dvEmv6%OA9#gHgt=Ir zGHsEhN%@yi6rBe;Z)ry*6gs*un2CCu6Cc`Q04aqMd=+K`*Jm$L<7_f&9a- zJ!349^d@A1zm$8F>3J;l zt2KRAJX!Tx^R1>6#9o=kKSr6I!NTvtIU_y7#QFkP&*o` zu5fL!MiG9b_S?{hIe~276g?v+StlJ)a@5J@2G2It&jsI2T32%`?Xm*mJqs^gA($Vm z4WA*x&%{a)U2VeZY-g%WjBu;wyaTzQmATT%ZNqB3VTenqmbusC|i;@NfSKz1$X_)WtXb~6?)yI@@^!6{K2iV#@@wW=)&?b zk}!YKFCSaZJRDXy@q1Q8{QSJtT_{4-1l^4gvd@}qY$*#&kyjX$tgpwCC9MSRkCV?C z+o+s;k2}g#%EpU<_OK-sykFAguGUv&bd9HENeL?a2JtIfM#The)UX5=k+sQsw#Xbf zNB;z73(ph#-8<24&vV5V%KZ4RtUA$PTkVtj$nKDE@9|YR8K44AUDF~6f)Q^#-Y_g|V z|B7s6XR2btF+x_Ktv z=LvQJ-@LD;&eLt}_ia4e7PG5oT4(#~k=EzWn_HphRJE^7I@(_qPsh2t`zggVYOCZK zc_k}aDA>9dbJLltA}DD}NY;m|T7n;*+NkvBik^Tl28!92&6GqvJNElZtX-)*SK#CH zDqu!gF0ytitKOjuNJ2*}tQhYP7kCXAA6!-KKlDEQg-t|WmX{2@*4Wanuu5j|_{Y*_ z%dfMD>3?=zVEQI^Uk-LAW0l9@f6ZQ3LU$c+Ap{IfBR}sKdi|`ktoQhdw?|JUe}~r@ z_x!n6A|8JajFPd?RRiHRvcW@k6e~L zZsa_(nENp22pG1~$t~Y<F_*_xn0J z5pf_Kzq@mVPHG9~l&-;g3!p^lH(QdXQtvtgbDBL>xu50`24^4K6wUZ#F%;HXB@G*b zV4ALj!N*2t5P?n|f}eHC=UCxGUd))=D*N(hm$&r%Ri3MyxAZK!!-QYyg-iXj>z-S;-&FOv zSO4sKQoIk>gt`b&zVHXC@!p!$fHG8aDPc?9jIfu+=yr9I5VNst*Wu5nJrn)=XU$=U z%KCh}Rmx!;D@5jo{-<97bnm+*uZp$31t>8!Q$=OZ1*=HkRJzWBy}|(^)L|z9$%nL1 zIuNUs^t)gVCfnE}hPJYvRP^xJ$wVcGt&#ZQ2!l+a&ijq>gD6JY*{6$cu2r&)JH?_^ z?=*CSejuqBkokVGNiftnN7>8I3oVA1^F^24kdmaRhdIx70&Y^)=UENan88~T>kj_) zMyeg7GD9aT^oGWaX`J;o)vmm{hgN%1vk|xjbN)BQ9zO z-8;I3`gm!jeAKW%eorS^lqQ{oK@E3y*;S^~k#g|MwAjuV>uEG#c6C9AD}P z=jGujAC`Za4}%SObG_#%sLfg-UDQg));Ulv7WMzLYrc(&;<|>3vUL(k-z;YS+4X!# z7oZ9Z-ye>sRpZ~!dCs82zS8sy(X^P!8{5Mw1|-=Dx*O;vU{$G{sudn!>;x3$6m})5VWKA5cJ8=iJoGp4nY-S(Q{9|ow5V;mDHpB9kP6F!a@ri9#7v+qS}ep+1QpF588+)6;v9% zQIR#X?e>vXiJWS&){iUJ_#@$e$fo@7<1UeJ3SbXf<&7>Gg1;8HzMI`v%mb8+Q3{dY z6H2Ed7f~#)*;_#iT8O_&ota5X>fPhFyb?#-Ge&z6Qmrrv^zG3LuJd#K23W%(+>J0NCDtPZE5u&O!r~Zq_3OtuUs@6~?JZtK_bHa@WjO(|or(Wn(dUxAF{J zoVI8k(QoqdI2a8me=xft_;&Y?PyZ$J+0FTm>zl@XOT;bi;gNAIA-p#JzJ*;-DQlgJ z$AF>QUeL=XUhX-{fpg0vvkj?D>208QVL2DmMbdjPt3A&D*>}^B z@v;x!O5jN0dBOpZG{Di@?^AHuI6mzj0f;n+RhVx5+|5|ZrQ;4RVN96GJB6HFZM;7b z?Gdu$SaN!*0l|6~v0#o2nGlXYcKqPxp3;|%sHsXygGwee?1op-L z%z@*m$ylOC}`m1VuZ{?!+JHZ_lKS?dAN{&Gb!clh1q?yL~Q|TD&E1Q26xI zCp4;`cWfr|<(cD?cTBx!L4Nl3hwsBM#$O0X>vl1FKkv0C=xP&g@g}ffAj(J>X1(L$ zVS_y;AR^Imbr`u|)d!64Ft)zbPtk)+fBP>zVskwTU;gzASAa&To~0CIN=XRWN%0t? z3sN7u)IFvDDRj>=j`9#MkE76GcW}>~H`~CZjb#8X1IG?8fUT1n#)#8g5WGW1dJOV=?lVzI9kXHCA8MdFp z$G(YiWD*7uV)IrsRUP;l#ws{j#1X;S5ktRMs(B(#%v*+T$dp@Ar`a(WO(M-`^ zraERhf>I4*pjy~wE@=MC&`+JMJq+F|T{*YE^LPj3&p%K~%0#tAjb$OIe^?C{JK*-e zX|#?rO#P}4MClgMPb`9>Xzr!iu|Z}rf1SbcgzE19n+gE-qU@RSd2^10!ry{#FNvrO zH7)*miK1xOnZ2j|RG+l*LhEh_3^exkLFM44Mu3Q|W$-tQTS@xYKjazg-!*d`|_ z1wE(Y`G-p0-}}Cig;DT&bSM5ac-KE63&zF(p5ZA18EMcBUQskdD# z=IV0|($a%L#^NBFp0*`5IcLdqdRc?9VN|ryP9N~oihAcxmii8r%r26D=hTTQZ+*=T z3Zd&rkpl@It(A$*=wgp=N0D1r%kVuT;L%U?6k{|Eks$41&S7!3ImXS86UL-da<{aI z7#WZ^17JOJOg)682G=l4SqV@~oeP8=y5{upizy!@&B&yF#y7g3>&nTb$^^spI^KBE zvYnQ%?BcF+yKy)1kn-4I$O3|;05!0K(fL%ByMXtVf{a6Fi^u!J526QA3C znm(3Vq5PcDloaW}Oij@?J(aBlkXpu_F>mVWY{Nm+2e+{drPfJejYYxqUj>Qim>5m%*6)z)8lo3v`k8=-yz+V;Ah?aWKtsyZBa4I`xM^js;jq zhcF(KJ%2!n;PFZ$-t>+Kd2yFsr2M)rJ-Uk@RA_q(CO}XrDEIilf={Tx^*Ki^-k$3b z#=bL|W? z`cQ1`=q_HD-~W4P_D+Q-qW!NSWH5FXFNpAh?F%%;B_e$ALgw_b41?KX+4fh^GSk_z zg*5qN1G?9(J4!(%=T;)0d~DEc@4$({?FmSkyo2NXJF9@zxy|b%F?#U~tERiA2bfaX z+JZD#ehw-jKu-7W0f`L!t`z0`4;-fEt%kQAY$otl#xIAIDP;|QC8Dc`q)Boi`ISY$WvX=t$3 zW<=I;{*(WYuye&|p<;}$^yYtrop8;b=Rnx$Sa+oiey}(Z9g_Oo@#G6r_Z_(PRXC%i zVdpJYsU&ILu0b91;Hh;$z>cb2@u`fGznk;2stG@naR`{r|CjXpf6Qh4-}Xw_KjZ&P zRk;7h74rX=u8_<*-hs84kU+3CKIRfAB^NF9<(ux#-p+WN;nRs+kf+)_ns{{6Zc|HB z+ebqtT0q?RO2$|G0>0&avKz^YBv@~sSrFQa{`Tr-et<*`by(V~pwyz4;4W$=ouAP? z`7%Gt=hx2+-7GZBC^L4t!hQeN(<1+bx)8+evWntZ)Qyk0B*X{){t7wf^42i>2&6NF zg-rlVQAZNA2m5}Ah@*W6oNfaP-=;5agxpq*Gxh+HU+kTYEyCk`^k(PWq86-!a0j#f z;o^fDpqSVuOrlKQ8%2QXmo6Mf{#AiE{{9=_809Mz-F)E_``{{BXK;LU{e6)Rf5v0F zyHMKWxij#KYUhZuI{r4*D*N9f-W^s;t<0p|$`nekvFTCl2~4PQ%XX==Z?K7?)Ck@$ zq#M2Qqs&ylnv27KnIti+5s*s#Ju*v~TYRco^d$&lnU&>-H~48jJ#${;52HVGGsyDHKxyGMP7CgIu6MkAvBLx^GHB>!PpPPQbUnbF(J{^~&{0VPCz>$6X$5;Cq54a#m! z#o^b$v}f;ZXU7Y0kj7yL^4yU^Zv)wyoP!NWf7!djhvYSVP!E^6oF03{|VxAO{;wYpd)|C6%c&4SbvmCi5~w8*D=BY%Idx zm(h)k&so~>UQ<5f1w-l9`rOQ-h9@Om{dU&=GkR0*PoC~v9tWQx)6@L}t`D@!LBGCM zyHw%kRaogQPB$;^quYWS@{{T036FbHMPJut?w{5Ex_-amrMe+vqCWYm`J_f~Y~R~iR^*mNkp8xv0OQh${Wy#nu zx5XG6jhFKtt-0Tp#vh8iPb^s|W$4XBvSM!DKa!&7-kE8kIoW(s!tYUQ-lZ>Tk6M~* zkCWDi)BUF#$Pc;d4(pB$_R8;oA(y3igA>t$5;z2$1QQC@9m-0wj_eVrE zOI)%}2OVp$3+lL$R~Eb&d#s4~DtBjn@1gyFCY%EHFtLZ?5;S;5$ZaJK5fKt^vj@^O*B+tgUt6L2?!KEU;?&!Ep4+p%Ph~X_qU`f!odj)L@~(tsu);FK zvW&f9QA$4%AoL29VtcvW2;UaA2ypmE4d*Iy^fN#!D#R`CQOBGE9DAY{EW6u->F$|0 zq$tb0-9pP%bi?^=EL5;_5AYQY31K~s{x2hESWC+Fn^scAZZwb(d89x2*a z_Qj6iR0eFM=u2v}Ta5$}AL!PUbV5>Rp!(2Uq`E3igRWlxBXFiEJu@o6(;?{fI@|Er zx?6bO{onRrS+tDfZr~ve+-4N60s8?bfEC7s0UfFUjp*IGERQ{cg53Lhgdu9vnzVGW z+Z<>eB?BrnAuoT*a+)0WeiViMnj>3{PFkKvq_yF9p`ZANw4DhUcJFHXjI0Uvr>l%_ z4}DnS356BQGjx(;KD#E3K?hR(KmTfs?(Gzutg;ds_~pcZj27@>SlF>CWlYWaZNo}) zzhi~_-S^FHn`>StWt4TEQR-#+6&UO~{Iye@VPmp$UCP_%Hra;_eB9vg(Uv=5GWw~5 zs$(Lm6WPH&TLE4h?W=bBhjD@=N*9iS#Lefk_(4<|c8nFybRzCW$>;(6rgVU`PaibG z3uuBZgz|b6fQ3zU>pb;0EIwtJmEYH62s*QYuG=XXZ|#;C?IsFBLLMQp{<_#CIFWZZ zemD01l3M%`^y#b(^uvN{tELCvp3-d6q+?!c5DUG~Eus&ox(96n69&g?`0~29kL1W} z4&H=)$mBa`WO`(z@G5>Uw@BjzXKbKR;thm?-AS=etGgAic`;x<-feK&wt2U zqiI-`oG746n_4dvXMt|JZx@16-yYp@8+5@fPjc(~Lc%mptlx&oWA~Z+;0^~nYHumc zm@4^qWw^h8WFfZf&vHI1Be3Ol@089L2ZUelgkqbZegL{WvwynIM!Z6fv>Ups4#ili zjb!_Kbq0#3MyYkYF+4BonVA<{?sl(r<@0F&l964WEBtuAzol`J^XRdzVZ6Mo>(>`m zveC{^L?e!p02u4;R)GyKbLSuTt7>UAYJa{BQQ3%V=0OMZfwN8?;m+IVg?58iPVO_m zAiObU#Y&17-Egci!)HVRnuuFurDU(CQlLr8m0Zqb@scI$W&4|#N0q6m@<8X_e4}dr zh+W93kw{x>(yyF&@qc#Reeug0EdAakC8(SZus3~1{8Q;AEhw3`Ed@nL-B)xlFA-$|%`1zAnz_?0p>^8B`hZ&bZk zbFloImdn5lXuys9=2S?zD1S@zLOV$}#Z#h%RGKA!i2dmH?I@(4;c;9l}? z-e-G%0ajJRvK%eOSZ{?G1DEyVj1{?rKUWKG-@vg6k^W*>awa!5_$fy%&Y(~_HS&yW z>$2?9JDSEDhd}IM24zameYAO@=Ppk#-jG+KxEC6w3?fC~h>Dkj5-ftMvjs^ec2s?_rC3%)Po- z-`Vz2YpKsAYAh?XCD*TiLWi4`<|uTkJAAJ4v}x@ziF|QHE$&ATz=0AggDa6A5R^~( zM$*!-w&8@v$CtBO4RHH-CKpfNEgw)?cK_qAKVuS)iB*zLatm7P5!E8{&bHVQeBz50 zwNZEV!p^xnDmI1--*OZioTD*~b1i3bM9w}iZ%k(udAogGEazP zfyGhgIECmYw%Bz*<*6r zG9cb0c&kSVb390$_hHe)1-0fWz!XWY&T#k52r~UGruBLGkIT!mBy+4G8S48H<$ z5F{-|&}{g7eK^+eQ6bBxx|e@yR@$dP{YKsT$UrO6ltJJ5;1HXz+M0tliq7SFPbe6K z_{!w#&SHb!Nu=~!sH)9<<8@GA|;j6JrLsgqo z#XBk~Tc!f9Z%ug)OSDGMcr*Q*9Hy)z1IBLR09?n2sHh=Yu`w8vNgep5-;L>B>{Do^ zD-(J$V6J+B(YinwfMq!SZq@P2eeaDkZLv#zdNu=`8~1f6lj_=sw+Fc%o3r$$@T|)JK+f7BEZlbxPKBIJx6B z>Mow1g_B|CMZqM!sOn3TW#JgjIysux^ylZ6I{K zrFMOxwl#8k^7l^;#C;MvLjO_u))QqeVkK=VK$Tt1uS#XgV;W@i98Y$La^@Bt3ll53!od9<^wOlk2P=p3D7OR%Pw+?JP|prgOot zjBwf%>yPC*|6*Yu{m(EYoC=H)ox2InQ=yZfu-%y?1)%u-p0H`Do7OZ7ju%e3TG5AB z!_XOSUh{-ORo?CT&JW@G^=@b#+QDLJu~rlFRE`d9{z&HA>6+sQ@~ax}A(w=3JRJ74t;L zxkGjJQ2qsm{$un3y3QTbBO3>vu4Ba*_#Az=x#LFICIRozcc35Jr!e zHmnu5(>)nE9R?*jksjAaEoW%$8LW4byATiYcR}pxHe^KS2`o~Yq_CsFhh~e-mO`(x zte$tx&QUz(wlt_Ak_#Lg2pLFo)z@-%?4FFY5=p>|MaCchsw2A55nCOrSVc#q@%Fx$ zZtdJzMDTyvGhnL3*!sY~27-ATc>AlWd4O232NC%0H0v?+1P2&83xN8n6WtiF+>Rf} zycA_ELdGiP4tDuw0~(Pm)o8V7ffuLr6a>tc)~|b>&bMSVzMvmi79Wg`p^k|x`YjyK zeR2nD2j;|#OC-o>6@)ocU}^Z~rcFaO20LKVwE40QB43^5NuV?7*kt-kd>a1Xs9Jk2+iBU=|ZXLp_^Hu{6Dgz%%xPo(A1>y{2IMNbm*w zP1mv*?gla)#;~`BvC6pBdf*$lQ#!z{I&+rc><(oDQD@gED}!0o!BjarTf=?C!7=~P zk@72Ht{03k z0+cr&4J1+`GjIbuN7WCX5q8`2oWH!p9Bv$4Fv3S(~GW^h{@ zBpt=Jgxm*jlf-*KWOR|WmE%l^Ubz^VLKrBAS-i{~h}~MMQ%tRe#%PGU+EH5CXKs)6 z{oV!KV>qX8@9u?)c|y;*#K`3jW5bA+_l0aTu(XlG%Rth)701Y=`*I|`S%q^UZCb)J zU}%eAnsqS6R(tXLFt8YEwkJ|9aF!&^2_kZw6?LXbROcmV&Xuu~Q{3q9c04nlu;#Xl zpJhwQO$nSMjE!DT*d#}p0=TKDzZlL59F59_VL&bP?Z-+^67_V1(YQ;^Y5Sl4 zcqMQCtYxZ|?Q$n)X>vjJ3ZElLb7%9wV25#WJA)+1aRvALD`0s0<)-08Y9P><^Th&h zN9=ZQ2aIoLTPy&7sCzf#CG9JLZL_!@fo>N$j4>XE8&;v&HjLHbprcmuT8cOrbXzOL za@vl5Wm@xSI}iy{5c7sS0Z<6@>+t#zdv!?OI`a`M@IWQRy7Artome-xl3?y|g%M*o zCn_WZbr7RN3z6UG(VEo{5rRT&0juLY^@IMlc!1pJI$FGQA5V9Nr}o87`_)rVe-n{R zX`2f(U-o<;QSOl#7eyd7u%ch`1Vx#dG!HP;^kGP|9&qpD{4el6aH}{G^aez_NywHX z(|jm4IvEp0LkdA;_5qhkOvIBl;G+mF)89oxc<1RJIP?T|7*Yg$cBW0682bHB>+v7E zgmmWfdHccSjL(Og2A<7YQY&gpgssN|3!+7mw5iL!hMl{I9mlnz$RugBuNy8KdyFAm z#Z($xKBSM@{pGjM&%QBPzb?b{NQKith077DMmLc0G8m0OnpIpa+_ip9PV$G_?c~RI zLU#`p}jdSnlFN!mkV+v9Elcx8SnT|AB<7Of7cS2&AhX*x@RE>t zFVVXWLJky1K>htfI^^F!vpDDJ*7DSzHX_C?k&F;(C8ZWqVE>A=PT?pI@Cu0Q_{E<* z!9}(`$BJ3pj}ya~a9{Q4K@l`mzk*8d!dRA0cOaFhf@BhgawXOcU}Ac&aJ@W*@z(N- zN1*&_H&>B^=+Yq&YyV;W6f*2u`mN%>4yj2*YneswI zUSb!N$XWnzb%eKw6$5{>15peI5~M`fI?uqfVxbLYz~q!Aw-S2@6F$+j)gzAnJWtfC zX2yRat|r5GGx_w(b!k3xNgSnmh}6gXlMz+)JcRxsYAmA%1`%t}C94;okNs^$vW7NA(33u13(psmu3K%H%c-OVxsd*BLl^(X<1ZOwcOv)tm;ZVGFIa=# zf3H(guv55Jk-G>Vo%vPp)2c8HHa2_qLPr>LgUezVGcgxx7paxeo*Pwn{;_|`)u&x^ zX(312$6HTdAu4};Ui)F;LayfPtld0oJi`JH4whj&!wm^(g`~of%;pbJgBdeqa9M`; z0o5kabgw|s(pD4Q)`3F!LZO71zhIjO z_Zc1D-Mgplzj$f?^{M|eb=>_Ow{@QalfpQ1?*SbpKAE?B64%ik#Z+R2Ky(U>?+Y$? zDiX|!rO;$*ZU+1e^_>bE>DE2f8h@H??lOIAllQ41K*}VDP|nKS7l-#S31`1<` z>GgmLMFPpA}PY zc_MpTP4x+#^Djci59m+0_Me;HY)_0CRLQpFfEqVBgyIp$3Sk0+>hcJllzBfwhTAY$ zLKxUiSLovS*$!!~B9aq;FYtQ2oH@|x-&&(8nH+Vv76Qm}m2Z zLdOKrsA1HGDXthQ%?}gS1%utba;%&`&Zcae=MB;Zvuwx#13q8 z%iHF^5in5x9atnFU}zI>WJLe7>r{6ja7NGU_F;znhl5dhFAM=}O9qS;|8+)JnQO*u zWVGBG)dbGy3g$xDZMeQ|+8n_YK-q@ucLh`y0uCP{KF)@eFaq&15ig{BrxG zseyUu#Ixk(lOqPzrLHX!iF+O_e{C=D(09uxN~^0^E4U2P3iXezO8Q^?G_>YsJHxR; zznv~S{%Eo^t3ZC1?K4P+TEQ#@J*#!zzJtW{nakBH*F`awbB=d+Pe-{wfJgh%gaXJe9n)XitQ5k( zjoQV%0QBpZfBrrnFI9IVMp)x06W4bYuUdJJ0vz%9!lL&xzACA`2L4`S0{N4#Pokf` z)P3zwefHx-fiHRI8%J14r!Iw%IB?C+xt}eE8j`7yrc0*{oENnH~b;pgTdAbl&8b#r$!2Wha?!X z?VyynJ|FXU(##RiNTDAFz<04d59&do_s=$0TDS6*Eo`fl6GO(zA& zY;BU}juRM2+P=&^CoYh81LN9{xmdn>_+t8~QZjY0n9|&Bd-?H&f!v?x!*!7pKpICM`rc zi5cm0=(Op_>pD->TPkIy%Y6j?E$>r#gUbxtTygBj<+j<5D7F!HW?AehCXh)Oj?KxW zZ3}XYp;G2)vZ4m$6;FGpznt9O<($#2RulMJxQ6;lzHb6Dn=)ri8!lpmxY@o`jdx1q z1yYK(o`+Vve0SJyeIL(1(1v$wXwgZSlc=MVTZsbMZ@yzysKJu&!gEwB3(ffk^9NUq zdvtW(o_JAo8XhyGB>&I`KvLa4w?60Sob3C2Z-_Q08M-YsOS{af{+6{ZyOT96Y2*x` zZg17OeoK<6$oy=jO^w<_GT%A9H8q`vaI>Vk$vQKKs_&9`j@U3yP(R*R@(1F1$kxGo zqlXIZzEg)@5=G(qgvbzBap+pDXmn1GIT%W%7$IYt&UCG^xx-<_$tczOlWn zX7eWr5OFE(6);95N(2}(b0pF+yZ0qjNT>ELXd+s*iPZg=lm5P3yh!R%#!zxte3g0S zdBT8Z(2vL#DVVp*mRHA#0+4fI2;=fSe?g|1U@U%ks9!NpdN= z{$2Bys&@}beR~lpyTyCH)nw-=d0~E8vtht8(zKp&6}5x%-NX_FW4gb@$qX0TvGk_v z*vhb|^Tqka7bTuzx`ELLAZ{CU3nr8zDknbfo4|fZH7;}xA+m?x(r_=DcQ3^UN zDM*MhHuw`KfZ0vMrFqIufUG`mXC`+nnmFGeVn1W^q@oFPS={FVR*RaBw7HAdTW2d}Iu zxcz`vm5J!nf=c7>sxLZJ-&ktfMi+k`8`y?aNE`_~OW`GhG{A03!pV34q0_Bg4Lu&F z75CL~%xi&%b=xOt3)1ut5hHg*TcbCaFr*Y-ENSUCv z2GY8x=x=kb3Kd9S&`h4*ggQ958hcs}~AJ(Q|rDnzQ+w%9Pa| zH>mNO$^qf|HBRNYn;)r$$ckdkmG8rMNWQrXY*rVP8!eK6mBNHTt4#TUlEFN5>VU3a zj~Y|?!*rW*&{N6`CWcOoG{rxf&)2W*W~yZEY#CZUYnzX#4YYC$|*U!$tSRi7N8DwGXFhHF&_BDzNK+AO&gN?>L%*EFlb=DGA}Jq5kn$Ez{CujCOC0F@c+9v$xag?N<>Lobdar9-KPhu{pB#E&*>312^sz&Ldz!k+ zH=6YA_s+>?g1y08598~v@>dDTRaT`RsA|)(|x$Pb95d*ypw;3r5xRhM*Fm z+pA>kpY4oSEA8d9z-)1+L~>U$O|$*B$!5E>=$dqKSlg?GH^Amnzx#WYn}^0g&Xc8U zmxDMy1=FKzHy;LFT}r-c&wS|rl0jn&)+Qy>( zSzrYsF3qtfwdpXt>fnXi83z?*bbm`yp_+yMF}m!x_A(!Wc^nKNVt~<`%R3;<(}2<| zp$0IIl~5W0he_IY8%cn3Y1*EQfWZT+p}+O!+zg%zx6rlJ&-jvYhv0_wM>?~ks%LkW z4{rQ+ps~hmNt2CLFYvra%^{LKDF;-jPlN4;Vdn4-tx?r$y1_F2qaZO!Y4C>7xguRY zxUfZL1-~x!?dX1PL4}~p!F|5P*DE(QsLwuh=@LZf3%^tp<1kk>R=Vw-i2e>$nfRIS zmoJ2wbULseq^TD?cOW!v8?W0o@V~85wbb?usm1jHks#%9S8&DDiPg$aus7-*0x}bJsu8N4o3ul^g)@uh32*W=eoWkkj62r z7PqA>aBM$s7AC9@ljaS<iW?4JgACw? zgRQ4L_w{*y!Wz_-Tr+YrPHmxX-sGdMeU?>rldKxhKVl{k{&uACkH7;zugc~#`-ubg zzuRLHE0j`O)K63(HeA%MM+g@D3Kgxgm)~s4Oty)(5|zD3wa+gMP=CD2!A^mDSwRA} zZ=!8IsUn-c?5l71i0`;7ui9G}I<%hr8aL170~e+n#o{B#MJ66KTi@nC5Ep-~g`wMT z_j}M5PT?0^a;u{!UTU*Ce?gxp9|N2S=gQqWk%x>Bl9^3rxvjrjl@Ch!yGwc&O1rgG zzqqOv2OLQ%CT&oB!jiPE_9vZjrUBa4W-*?ohq7vdec2=Gq6II!-w~J9Uu`0I3LD_e zkzh2o{@KdS#s!w2cG|U|%t~aOrnQb2^klv+lG*trPM%uZI>@*@{waxcQWJU1*~s?j zYy{Ya<-WuBuG8Q_j0nj@i@6Gz{BW7l7Gi$6HLJoS^KDP-y4jV_Rg$TNuGSYP?20w3 z=LExi)H6(9 zLuKJa|#Aa0W-JeI74)RV;ug(yrF@ZW6t#fVTTX5$mV~y>V zwP@|g>214Jcg8A<#K4c!*CHsx-QinKBjv2p;pI23!dm<%Xlkj#Wfh(QU2}@(1dfjr+;Ex`!jH2^J zl<&poti;oKGriPb8naE>F_s=TD&Xqg3L> zcbZZFp4m@YK+v{(&qMl!h~IpsT%#01XmSrc20SGJ$hYb=i4m`aRIW_# zXlYz2wH@DXGlrcTU!A~M;lAT6@iid{-9Ge?iI*v%W!6PuAt_hIhdZ?mnK0cxDxs9| zHmuM$xi9p4egeRW9Lb%Q^X*<`ll`_s<2mvtQ$+2CyuZ-HJEKBzd|~_eQ#)H^;R?5; zLLT@uk2crqt8AVOuP$xKzDo4Woh?(fBfl6CFQ^;wTrob^O4|JxRLXI{9~0xz@XxMl z>=3H$6R$3`PNJB11QKQ5d}BH9ZXR;b8CdSX4gCWTwYPO-EG(Ta9axmGbwBq=(~tC} znsY@l+p07sN1Z1vnJCMknZc;UE|?(QzF7Xqnlb0lI%uTDl<%d^gKpXp^= zU@U4rt=ac6>}l^apSVCd3skSkooVKQF9s!w#oDEsOV`mQL6) zaw^C+MUm**&u32!*2=bwT>D#=Yh3@V(4hHaywOm-zuDvZHM4RoQ8v2`~k@1#eJVXr*rF0)V2|w$ZrF*MG!=YtFL!~NbX!Os4 z>(BMW9$<7>Ut!`8v6(a*tHB3lAtEz5B6@GV&mZlZD8DRZ0)4Q1=RXp1BHzWuVzSB2 zCW+f!*LU4ZMm+*QvnmMOVgU#y&J$FkX+*yOTjF*6g4_OydBF|*evA(_{mqr?f;Oi1 z!)gx;Sz&|G!&>s3KAD0r)Thqv8pOm)o!UAYJc7|&U19?%>c4I=3S4S*&l;S{oN^ht zI!bJOK{6rqeRQ^sFi+JjX#e?b(@_OjO3x4k+~4_cu;O3#A)3LQJ%VDB8uy5!30bk@ zfrZxA`znK$_(vR-OuA0$kA`=iadB~KI~J|D-AjWZF?H7dUjBhv%(kL+D|h!xZcZtu zqneV(;I6+#nO{vSdK!~3SrFc12Gohb{_!VknyHemU%Iy4)^rFALvRqCxFI)GrTdx2ww;X<5%l=9 z9&Jy0r@S&a{p~U~LTNP$n_L93e*}y$j4rpRG6}sOJG6So38}p+^U-M^`4alBF{WvS znx@kHDeu; z#4hO5h|SmEA9(IftV(ZSm0+`RVxT zl&FeOX)&LV&PBKexX#_T|5tlo9@oUV^&d+W1vTP^xDZ@%4~U2$WU5rrT8WB^iV_h6 z;zAU(fIvn?7FTLra3xx+h=@@T!HNt71Qj$YJ6J=4LWLj+NEnit;dchK?Y+16zVCZ~ z{k^~YNBq~rtWa|eJIH(?`4`^j{)-E!FIA#PtS^Gg<)YYK9^8S!BNUbgGF@tT zW_S>Qayzs~I(l4rv)O#~OYzbHYs=kU*%}v4iTrZVU}X=(b8z@&aif1O&qHww7Ge5D z@7Otk1o2z>rRQzJ#tagvu?J+GdS2*G2N9llI)T7vPakAa&m$v68?jB{i zviQT533JX*nwX7IV6OqQRm>OMG8N6r2w0X=g{CNF7iiUEpK1#$FOSIHwxeiQX zyFJKZ_QxsH*afq)CaNyPWsUrqLDv2e1(BKn!qqCSE~JKtl%*f2zkCbs-C zo~GXOsw|AVUF9o3eM_W@en0%eh~rrWwW34!R9m!Rzm5u%9yoXP?&VeQg7)qSBFqYO zy+GR@BM)#;DDc$rmL+gzo4eLiy0GmuCqv4_@F$L?{C}dOFR5kUy41pKkqyLu*@FYlzEf*?{?}m$Y4((5LGciAT zL2oo~u9~?w!&v968%qY&x|+(FO}rc7sLqDuORenfG|?r()Cg^JN!Eil(T(uHK5Zht zQkd#2s>+R+Re8 z>P`k66u}~drbPb`7OqCKWw?(3cVnpz5v=mAUMvInob%LB%W5c!Cj7v$Dq&TyDnzOq zVf{3`G*q&iybS#m?|h5$$rCyUL3;SLLfxGA9v_ExG7fsHS|`-X@I$;tOoI*QzaCAV zqM&v&33b;}E_rLDn&@(@?6s~+$5t|v!rdBnbx`R|S*c7fWfH;ci65oX>&_fW*NxE4 zsn{^N7P=H$*75N4SmS7or?g?Vzo+14YisL+fPfZ`zjyn7Oj`W$#UG{We<1FEsE++t z(E0zZ&pKcIwP7VgSR2=QDh~wzPF5i69R?XXQQ(`3l+qHH14(EYF`lKmkIC1Q5xT&9 z(E!KM4Qe892o1UXm`|w+1hjh}3Bm!M;7pgZDGf8zcSK zHX?oRzU^3-AbIb5d{KtW4(9t0|3V}F>t2t(e?L7nJ^sUo;O*O|d7Z9nXsBDU|4r4K zFa3PtL$2}<4f8rWd=dZnsj>5YzccEyl&P*EGqR?H(E%JKn&ZFwqK$lI)AP$e5qog8 zTvXYpLC-4U8*(L(BIr0ik!kd+aOSy!9p|JAd41OeN7T9>nKsZxH>B*2B4q6GKclGWEAxwALCN{w<`s1Rwi7D%2?IHDD2t&kfL5{mkm@%*@#8g6 zOB4+73#{t2X>1*&fLXm1ggGgkAhlqAW^vk4NG)(F5lp3i`Ke*3!zntoU__zEb;U)e zTO$-BYYzk;9<9hS&qsah3G0jeFz4g7lsNJc=`#Dt&JvrcsgPE%d!(x|q!suNj}#=; zTD7UtAgy4`(X}%!jF%pkyeRO3GWC_n)~I~1MgTp$pTP8UHgR2%?;a*&9P}*=ak#wIx8VH$EPxql~kI?HD=!oUG(zQ zmKByvgYh+QM#z;bzxC$=8}uM|rsLEGGLRKhciG|OjV^BCwdWpwGZ?squic;e)0Ih+ zNI6ehPz=%BKbx!n&5MSe#eeCJudhP?Ppm@!D@}wh%Y-unR$>%YKn*1yVjHn4w&okP zZ$(W6q=p>@GxUu(+~hRVn3z{QoM{qfqmF>8j+2ZV=peYS3 zS3JKcKkBQqsWoikj-{UQ`f;8iafuc0DZ66!TRc&5e)o~&Uo!Wwa*eqI{WN{nXcv*5PQ)wG(#PvHRnKzSCWbQ^dyZ zO4`0y;e%b{?)54O8>K31{6n+fh@C3t{*Oqy-_Jaa^su9ss^F2ixvK62nUL)1SH-zVUb(qt)_16X zX`1nloO^}$o8AzKgT^UY41 zG8dm4pgE-;=1_9IxIS@WakSH-e8Bz1Xw~3HKV#$+?XtTYYj8(ItKFOXl zC^EQsTAnv2Ezc1-S#u-Q(SLi~OYOs{U#pjE3+_gmExYQugE*XP-cR9bF1-`{x^|j2 ze)z3rJYyqlUG_%%B&pfnq~T#_)-Bm`=;N;|<7^J?f1CQl$sZHQ&F;8X`0Kwdn}40I zV0D)KrMtd9eT)&sG5rm{=RY}p?5`7|-!07^)OwNXI{`81M7}6Bh#34ORVgwG8>CoB z^%IuO8Xk1i78@Mw?NZ{gK*en`k-zMJqBeX+yIZkE@LeZv{%Oq~!@P&no0tBa_CReI zlO`Rp=<3v@OP-kwRv%c|hNB>vbWhQM>KuTCn|pfp#$&^(DKjtzLozP@FthhHk2rkn z_-A=n-DYqdUrDEY2wDHj#fmR@cuX{9tPX5PwgRJvSkwW9(1g*t`r;?}IE4H7<0S)X zOb7n)5u)G2t*; zlgfn&yF`^WiV{v-VJB=roim`Fk+d`*>qw~ynzM}_kVRx_TuaNYM{P}_Vt9ij8~S?r zgRl;c={<|+@i1mqpF-Ia9--XFfnOPF+KEwPgIcc22-9=(pf-8v3!&_KK@f{wDPQ$v z{m}%A+AlZQGN3a2eL}}Vuy3dVq1NytCXxu+fX0&(85A@i;-_0eQcRd$gmH4JC$YHJ z)@$Yu3+fNS7<>UA|2SQ!IR)>3PQO32x%wiCI5b%oWAC#s^+bB3HTAmv&Uh$gyoIRg zsKT1!dk{V)B`mQpbJqqy)r#y1U37R&HiNdx@ns>cvi0OGcd__la$liki()Sm_W}62 zTlLNOSjxB;JaQFK>Md=R!v1o))fdMXreRWwk- zpd;?Z8HB@j=<91l6);UI!|mWLtgg4jpeF7JZ%)BtZLAcBw|vJWMg|48XMqnWXRZjk zqHsS9qm?N{c;D{Kfdx3q89HmWj)V}1D19rugjil$4o^@_-qTJxECuQ9zYm&mXu7an z*{a*ZfrdUx#zYs@(@qYEaX6^k4vP^sY#0`E&dyr%=&zXhQ(B zVM<^-=EJT!NKY6w6Q7SFa}*LSB=RyY4W5qFyy4+&yS#80msiAMoq5>W(!%u}L+yn2 zc%kax$7kckYsY2$e7c>7r>OkO-T|gdT~Fhmde3_Nez=vPb`P2}5%F@GLiNFP29#8k za)5c27o48sIdprV0;=w|x2(p`*D{YzuTvv=>sOU&f~N^=M@=s00Yx_qGMk;8+>FRg zUd}Z3OHLLRoqoX(9(wUlG01`;HPl$mqXye$vyUMovD=7zGKh?g3RUnPp*Ympr6CZb zlaGUoK{XErP^eYGszxiT3N(m@{tG@d&4NTyq=`*~Tu9Fj#3!!FakiX^$8bigOnac4 z(M{+r!cRhsCX?E@u!?-poA>JsCr~f6v1L2Z_1d^{VQL%qmKP^g`7pL@bD(bZ4xfr< zWu|yW(ZjfO>Up-e^_yH#WvJkWSHni9xVvu?m1IKd+xoX6OFO%9$xnp{$O0R!xreG+ zxUrDyH3A5%pMkf}n=;`=Xd&o=;@=?hKva^aLp7|U92U?(8R!O4L%9303N>Qf6}yY1 zA!#2RW0hD6>bpeKe76$y4WA<1FH=tyns0gyE@6|!WDr$e`|X1RND5XWY7;bcGI)>i z!}idPc5SsNl*cFcwFz#7yNZx9GQG@8>2wO+SA&@V3PrKWX~5`3eM9^9To;s1z0}I= zQu4fZP=%GGOTWTrZSg{3kiV;^%d;Y(%DqD?Jog`A%$5GN#+=@1%hGx4=)Z{0O*eKi z^+Jb05=83F%;wbjgg4|RqIn!OGd7qGbMVN8wXPGp!yL{!F1c1}UD+MxI6^O%t6gCZ z(>B3Y2y^gGR7smtE(B?=xu34{EUZ8`*H{HNdr6zZW6M+@)#im&0dAUjX;naAi?b;1 z{10~Toc3o;ZT@e0?*Bgp4F2@BwUAa}C_#sW^#CqcibIz%8LP+UP!m+lqcf58Eo+JIE3wo=E zIzI|?IzmaMxVOVoJbG*a$umBzJ+||Q+Uq`Rd~6yr8fIS@YS~cn+SW+s4(>$x)__pQ z`H&V#n$m!t#Dca?2!&NrA5BucF3WNL-a5bzPR&e^21aki@4pN1M$i zjjL6I@BSea*cFm7s5grR+s9zUN4y_ZB@?^F!N^io|KwY!TC+$IH_G@4MjJDJ?3WWcHa~AziL{+k@i!p{lzl}QBqUFCxr#6vFIIPlN8aT(3>5- zco)6!Xv4kq$zTb>#pX# z7UyKzkyM?^L9^Kd5ZJxstIYfdrnSTe0KhlfHjL2EO*mM zFi~v6e}E7%JInssjS51QHCIF_c2LHdKYV(k&I`?%o5eym12&;iCQHs{H1JFi`4sZf zcI0-L<#n7=6iBML#2Dnu%*ESx-v|K*$q7fEe?+dsv8yc2b8>-tX1Kk1uD1ABX)WuO zXncW}qp8+@C#&>M=K71iBb_7VVr6XqBy_XyF#k~ryjl6^Hg(yWRi_7?u#m@a8`^BX zoi&uV{9W#{({_Vq1MJ7)6!U(0Zt>`&)vkMjM>_SLsus#@V^c0Wx*at_-3zzmyJcAp zE1z-2UhEh0#BKjOt+Qq%u%TLph2TBq&G<$cWK~I4s@OGB4L5s<0X2>{@GgQc02RL; zXG?gM!og5@FTV0CWd)&Gf^{?4ow7{FAY8`U+{sv2#@@YfH+xDe+dR`rjpjBFXl3Ka z>|Hc2tesf7XcU=`XpJ>){T01UY!&LfUI*}+@?{Pvo3!t#YSq4Z(g2h|Fz z6OvXfVS5hX+}xp}McmX<^q}(HE^POat4d*F^yMecr>g(&ue!7-W z2!|3?I}5iI6KomIZ^k#XaUhq?K;-jK2@@vhN-av965&e_xhE=_1o1#SVu}CQsS+Ui8`mbdUdL;et=1;o z2Es@!rYrbBiqkZRpei7=;1w&4*%YXA66SLlK;J5S1sr5@58TvK9VMz6Ww{Qb?{4{& zwa)#p?Gn>G^H24Qa`8W}U!=U{T*%18WqGTg4_0B(NEukp>?b=%Iv>eKx4yM!-+7`s zqkPcqe6o*s+n=aZ-@nImY4~_&(4)nT=C20x7n};oEq|V>Oo+aH7b+Lo4Z&M7V!XWE zD>m*NSz7O!yt??U)n~GVLI7l=aA=Uww7*-HyAJLYXF5VBS!s>CGIH zIa1Ba=|3h%m2haJJ$2K!BWiK9Vx+l&jyL-G!+YGr1wQuvEVH&-=0&aBh9G`Np9XgA zWKok5ci72>+irICSuEl^I!7iReWFI!Nj>SZh z52gx)F+Yp%o%c zw1!ujb>%FIK^!_h7cv+?fi$xoXtK&HNX+j^Gi#x+1WA)Zh5j(yU}0Bf>|C(e>B`vr z;oXI?CfpHtL6hgy(wTsU5mm5=DpE0{GTm;@t^bu8S}x}N5}1&C?U9C=`YbrkebrKK zM{Fdge=txin0U{2lTaf8ZmX5R#^=#Rv84F>w^;?267Ljcy2%l3`GC3*SEGc~{JP3; zR?@3H?hpuLEnIeL_(~vOa*(FPn@fuG>xW^9@*9fLk&!&FgHRBA(IL&7d&2mewE25> zO)fn9Xl}roC*Le{L-@^A^+4I2n%N_XSW2GDvVZ}2tePHC+s`@M;of}+T_zkQMrn9M z{r0edM~w!*_6Y`Vsi9?-Ei=)*l`1d~{c`qUfUKRk zqh5PQRA)Aw6|*Cl*%TJ-psD1rM}1b2HqUOaAg|d?Dw;@o-WoIPI?1bYJ+isH4~|PG?SfMMFa0Sg~b}oA{e`Pp32EpSmvlAsP-Kf8|bv zxx)TsuXO<+Mn{mPWLQx3lohMZh8kn4Y1hiK(H8j33w9|HSe2D%qn)OlIEYrGsbFJb zX-M6O2A&6*^A;6{O@jQC!(#6D%{s zN9&eS9{?_yxtB7uzqBf+4VEcAA5A%ak^4(H=zN3J)SW;KK44bPMF$ye}Y|e1C z>m5VU72LxUESBe@{^x0n#a6L?{hGN*9cM6ZuRx9x^N+~6#&{s`VbmL|_UcQuqA<$ODB^4}I&e@n(;Tj>D$b5$>~+F7#&hqY}yHv4Dh5nCps zp{kx36K|&qstj9m_1CQ5V!Zu=Jywezza-qcW4z~=#5KVQH%k2>S+HTR+9OA5LJUw$ zWf=uu&J{J}qh~f{2uJ7CAZaZl8>=_rncsPzma9c;CG*9lfoE}jvy@k$%& z=U{zh)q%QA_cd;Lv)6*rW!hF`JN0XT8CP1*7Oqt#d~-9gWn<#9>rJi?Ck0N-;Dk=H zU+@hJv}x{Z;E%>s7%Gp$>YgiGM5>777~lc|z1 zIjF|txzu=8HKzI=mDqMNteHTZjD4&k=llf<@Hq7la?=wuY>k)zox2$_pFA4u<8^y9l{*8@iyoj%Mb3}%?A(H8QiU=K8CVW z54C{+j9f|wU|QC>45+nYRdw{ItM@0&nO?Yam^683f8T!=eQ9gGshcEMPIL6%d6Vhk24j&<9 zpr%9l(>t??VW8cZJHcbO2R2#onMFY|MAUvT0N=yIo~!}Wq?j54 zYel3wflBVduL$MB#80CB@MX|8y~~L_Z0M<2(L>2NTfqjRIYvaYsaugzWbR+ZkW(pyz}t4eQG z>8&cgRi(G8^j4MLs?z(a^u8*+uS)N$()+6PzN&vOSflq<>3vmtUzI)rsE+{ZBY^q{ zpgsbqj{xc;fWOsW=_7#p2;lFy@cO(eeO{G5uS%a+rO&I<=T+(Rs`PnP`n)QAwz@uB zU7xM4&sP60>uU7b>iTSTeYUzjTV0>6uCD>q*8u8k0QEJ1e?_&j-m21DReGyRZ&m57 zD!o;ux2p73mENk-TUB~w1wFEY9$7)-8=I*|R?s6W=<%xbcvX75s(%Zwst#~~0hz4l zOfp6|88^@*k`b8(>Qr5*=2jP>&oLy$Za*~ym89eMuXZxFQ7^KrUWP4G335eDOm1J6 z*)t)`t{A&39Jghb5U|1yh*zm2WA{&UY;HQ-Jh19l>%tqBA7%SR^TnfZmzd0P+OO11 zz=QV1=jWQ0_XsYC-zm^afwbc~4&a+9@w1BXSe=1E-mrMfJkle|sDv zYdM_r=4!C~3hFoc#t(t9&{<+Y5Y;D6ClKZ%Y4v#m8iuHg_ort$=F`bA?FJ&+6gHM- z{#kP)%oPVb&Te!oz~lU<=vE)gn+&weag$m!Vx5g7-c((#m5Sa`6W+e2!T6F!^1A@8 zB$^;hEAVX;w;Bl0ddi8cZrpGTXc z?YbYch7J+l-wVKL@iskZJfe(qe+zQO)xHp`Kawy06g~BS_af3+{?~$X0%8zN`S!IG z53!^v-+(+j>MVdKZEF7>?lB?PritGIX>z`>LY*&7&Npd8c(f*{i=1yr zj3%qNv4SS&YY7OL6A&1f8 zJ`vdEpiIEd?jo@3lW{O+68Fd^Wf+cJDar^ zsh`kIJABqlSe3j+L+z8c*z3L*=3fAOuJBOZpwSx7avj-uH>K6O$>y;B zRbuVsQ;Hcf0P5bX3zOWMQ%$-zT*xYCM&|0+zHLXicI?vo9UY||9U-P{>kbTn=(K2l zI3SY&j8yJA^jQs>>yAp`>ygHtjFnfYq3whPPjwC;qM&>CL#crx{A;XX8cbX>k-Js9 zm6{BbI8qJIF{Lb;iON{~0I!ipe9h8c?qrn6+M;z#a#1Ja=Qs^lQ$PaDlpk8Otc9gE z>SWYT1WZCS9jimh`)zC`raeQV4cHp$MJFS}Q&xc%OZgh+D<(b}=yw|_^F=CSU_P9P#`BSW1odE4m?(k$y(*o;6(Cs3cDzH?T{!Dorzlo%1 z(ABvCt>L~R(Z%W$amQN&4T*v6RG&6Ludl{?DmoeaFjYEWn!~8BR7db=4#7Sy^f=$5 za>)BNSTr>N=r!}8m&N~_@{oH~@X7`V5Az()$D$?8ZDVy~BW8RUKt1hbtVYVXH^ZGB z6;tbV&W*B)v&<9?Rt=EC+JusZfN;0!a37(pa_MY#BwE^6Y;u}Jf$p$w$j&#i25iMU zO-9&sR%Ix1b7RPTVX{>ETr4eod_N@iStkRJE~adnsS&Z%@LEb>Kn;HYI)#|X(Lhdy;8vFja2K?01h1HCs9dHxHzZ@Fg%6E z_&zyP$y9RjC651`{IetO*Q+0o(Bz+LrhTofZ4M12UsF^-1Id>$yMg4TvxB;T)EqiArkA67ZuRd699c z+WZl5=Nlaf;=}6;nX%{3AT@EAiYo?!%xtzP(GM2meQ36b8Yn(8{4*dvjCEv?hS?d!^q5rMo6eylXn`Y|2a^^9*JgQBqfNCi zgeHd6b+pZn^rCV*8OzZRSIHCPoleG5w2Id{I4qi&hv83=GG0@2hp3a`AHG<}rmi78 zt+)&}-Vdhf8rH|w%J7Jf5tLCRV0yz62D;cURM9{e>mgEIL*zs7oJ(OKcL%Oe1K9X# zk$}oo@$#6x(GBQERN_}7NFkG`LEJ=xhItBh8}$!n%b{1uZR8FjpQ)BsWfj5{umPSnLGut3k%=0v@>gN5 z=6o?Q-qJ#ULCpllTM+t%)Z?0>F7^Zv`V)<;rPA5mU=@>ZPV55~vfjVwy^o)kSyPIP7n!R9jJM^hdzj)q!PD3x6{@8RxU% zC+y9UJit0NrIK8TY0b1&1^i2zD>k5=7>}q9q9q=biL4{4{9`$>!keqYd948;E1`j`;AF0pR_Oz~db49U0A zo1)d)sE|(t$#-V_8Tsy|9X!yeH_SZ8c4^eu^dg!dxm1`~*t7#**QHVOS_4QA(5O67 z-0^@`)_EGEIf3w%5q2!Xwt#qJNZ1AulR-x<#oE>)azWQ#-$zVLM|A+U3dp7h7E?dU zRL43QMYFJLnBodcEoG^Dpo;#TjAvj`#LTDCV9Np%`fu_P`8z6$F?gA>FD_7zI_PG&I+aJSwO2c zVS7Eh=vhFknj*w_-Y{F(p?_1WM$JlAb-Xc-{-#v}HDXv-660=8Z2=gNTf%`K{(BvJD~WZnvm{uSI;EHSHj$?c z_T%p4(x+-OTBY6%&+>BbQmL-It@uq3Xvyaa@gi2papKc1PvQXZ}&@3+Awm*8q2fRg<&{Q4Z-)%)WE=~I zCmDkn1X0@xhb$8-&@!xUVh0nCMAawJn^?81 z6`@{h<1{?-TOroQyGg_CzGe%Q$ou*fo#A^!Vu*2E&z#^nT;j~5vg5-+PjlOTnE71+%9t|S51Zf+#z3ogWlh13!izCr8Ld>M?#5ol{ zgYVFGqDP%(b=aydJJIWYNLDJ?iA_%mA9vi3=pte7(yH5uFS0{FYLlVP_He!})_gi(XpTQ zL#Be=eruO$_O2QK29*!&WL!@t#$v7g#P7xU^#UTWfcT1sM_{c!h{6O-M%4q*hG>`u zw2Y-h+AJy1Hb^zA&5*nbsLn2r(Yz_31h0^M>P;u(EF3NB!5jw3@@@kOC4F_ylspvm z>SS=8;wq4Z2z{sqGcm+6o885uPMe8cyh%rf8+Ows(N^L?(9TMvC-K#9W@0ZLy9$## zbeoA8El3IlR-#>XRxzI_{J5Mxh2FuVFR1BL=z~tiR+W$Ur(Ib@_pR%`=Q;0@G#O3bcUMx{lLKYS+9;Hla!chKfQ`Tu~v7`gzKPL zqm_u?L(VA-L5K#PBu+?T{EkM%9g_$2phICt9mw#KA(%l#%!VHFqvfO{=+OMKGC z-QIRqhw{Iw$%0*a#f;1av|epu3C^tS(yMN9RdQ8EmtH+s_fDIRAPyM99`mjav4isr zi;GJ!+#kWW#&%>f@h#2xO4uSVqYZaa2}~+%_=9+~eb&xKC2k=5apZhVJ_?MH?dTHp z(E_A49#O!o<-w)%U|9?}OoD*LL_k9j?Rg4R5=?(0(!QK}LREmYAx-R#LmX#nG>_o3 z8ri^S2g8=mxk|=?l(EFHuOn5gcH?RtQyZep%F)gRC&9dKd+`aUN3e6Zz32wa^~8o3 z9FRWAGH8@3F_oJRufo|1!_VfT%C1sa3R6*E*RF3XpN_|aTx7_X>DzN5D?J9@rSKzzb6offpV=z8JWCpq%T2&0&0B;lD-5M>yS7%+Ma7(E7z z_A7DgL~_^vF20%(<4@n$S!u-g-xRnjfbX@P{+D3m7Ngk~Ru=TnSvL0ewzEE8wYK_n z+0oI0{Vm_$ch_bMYiqc++=A_DVPnCbyKNUgz~5^Z--7)$|2sc#{&JUj7VHIn^kw+D z4Le$GwasX3>#2mutLJPGI!gT_;*M5ndCjVX*{Fxy?>V3`_NyW?`aNvQaxq8gTdwg zQw~oVpElcUVyj_{+X>@C-wYcz%x&+scP&4BG2ns0otZ?G}Jxnjgu%5lAQ zy;m<;_`S{2XZuOHY~Jw7$V!t&1Y(b>cDu1=r0 zD4C&+*q39ET}qi~<8tb)~e#X6e&GqjUHE53Uz{3M`>Z}bGUAsH8R}?o} zrRcX|!+VBrP*DkMZ0^iRMr(e4KkKg#*iiv{n_yOPf@q;G)_OO|s9yW1_ zm$wDi&0;llVY>B<>9Z`B?e+@TwZ~%V*9)8l{=4|WyMF(Y?dLBo4FDHl!;@83IXrj_l?zk3J$ zUK<$0KR%DX&klzAZ?9Qf(PRC0*Q{;-u6^*l8CHLCZN@*ocgF00ywAq!@A^iMpY7lE zjlTBBvDy{j<+qI=&?OaaetTeg?Jd~L1p-(&@CJGs7y0`LU=?-$`)gQ7jsu_k)?wf-SQuS`0PV81u`wDyem-}B G(fQ(%rj< zZ%T-VyFm$RWo02YnI0@bE2pDTl9?v(QUMT#>ln|GzEB{=NaOBmcLVz32!0&l^i~@2dU| z;{P-Rfd}CKT;M*m2>%QDJEs5Lel>=gk^S$BzyGU<*TDbt+Rgcu;e-Ei!;_h}p`KXvKY;p*N~sjsBqovjB~U`z~5e=kcbOV$Qb zwYEyuxc7W-QWl)v5kC5ARLkM;#*e4Vn?7bRgEG@g@tG|1gtnj%naX#$iSXWUe<%V) z5HCg0rm9j+lS~EkTP+a$X(J{HVXC~mxS``c_E%Gaw)Jlwd=PK~{;xi{A=A{$tAnxE z`;h(FC}#tI^i!h*+@oLKEpnbyg)v*rTIadhv8Q8P>X8{p19)k6iUtYAfr+V|F3X^w z$rQ``cZBYVMjVm+e1H4JKNCHWF{H)^!|P&V`~(PudfL0WV5Wdl7BQ?r+@_Ud^_??E z_NL~J=IiGEzY<8%%J8gG`235fj^c1Z7-f{klfT_sdP){o{8InN-_MAxr-J}0;Dpg` z&(d)7MZtFNQ^$Cbm+K%0Q`3(5aWeob^~jOev;+UC)`(UFQ(07*R~}QieqQ46TkaBp z!w+9}fd7UEfI8okKa>6PjyjeI9F(tWv8Y6E-<0(dqLcsl?cP4E!t62 zoAS%oDk&@~X)K}0JdI^yf}`aL0%g+$#M{BlT~KTz=oJ?B)1uVW!qohAoC_ zo@2~nT+)Wb+YuY-ygJO*IHW8}i$vpG{eCHVhNMA#pY6Fgm!g6q;j{Q1HKLb~SeNh+ zG5nor%5O{R2pM5BL#yV?eh$LVJ&of>N*?6w44DoEGtAFjm(w|3m!a{OFGbFMZ}-&# z9qtwi=8UeCP0h=pq|?j6_OF#kZ0obsSyMIHQU~k>$>S7K2VRzlx5?ny{58J|R#qfu zSU6|Op)va{dFp_*AT^nSjUvVK6R z+7m92Lp{%N*DX*83b?t%2Ayq+XgvXa3|xhqBB)+Im|nXRxVM_?m`(mPs*JTPlHlRZ z%{8nK_IuflHAmxh>a0_SdnDt38_52bYHyaII-DUWpRm@?<{UVG`cz(3{)Ur>r+-Yu zl#3F0srffF{7SzST~${7gc{#Okeqi;iRtf*-K>u71_U{q^F#KwXs^-GkT)3y&aB<= zot^x-@fmGhOFoV|mgr{meOot~XM{c`>YNlObkj%~uR+1Q(z@+_jH=M(sGo9WwJ0-zot6RIp&O%8| zQ@i%%$NsdW`kxub?Bre*y^D`sYswOt&cLj|N$AByu6+Wrpna)3*MoxF0lFRO+R94U z0bMT9-I?RVkPXL!yOq%^Ey+I_X!mY#`vosdck54UzGq$@T`+nyAtCVrzUP3ouAUH# z5}nJh_+~x#1SxMmZt)icw5Y9d=<+d!lPZ2PL#z@iWM;tn`jQwVHZE>*vQ4RTvZ<+Q zf}{(&EI$kV^~ld;GWllS?r+ZfP(e3Si4zF`1&bh`os7uX1KREDx%TQL0&?EFo`(86V+~Pa=i3tk(3_P}RIt09v(6%_Gh}mC z<_5+PyxY|$B1n^iZ z=D>~*1*n3Y`Q2J@=|0K~&1 z56~OeH`f7y0ZmT+mM0k_fT=0&OtqIEzx;<@LVQh=2)xX(nB{Ys;FGi=tJ`BVy(9O< zUAJi;=4ubvOW%hIUM_B*o-!^}BtsJO!pI`>q1RXF0u=HTfttrcu%~l47dvn56Tu8s zli&XJyc^SprD1W}*$8*K%QD zYgU#rONwUMgK%Ei82OdcnAEU4eDgdyEy=snj1kkvkBOcT;OBn-JCDdQ7mF(J&b99a z{Y@Am5pgMrz3rs{B;3vVUUYhc4~j_yvD+u}j&VQTwDI=235)}roXvyXyP$j0N9oOp z@ejH|r+V#}Tf2NrSmxr@&cZ?(VFAdt{k@K*!h$y?@W}5ru)6bln-fJvOZhF0T7xxQ zTR3N(knAP7u&{8Id9%x1B(XFU&+uni|!rVZ&4##7sH^- ze5JH4&Pzc4t5-7D2tBN~aR~7y#T$@`hLjJ!X(#{Lxg~juk zBRsw7zp!th(d`TS)6>&2eGCVp!Pl2ZtHs^jF@Cf!thAqGmj+|c5M`{a`TqN}N!ccyI8zS&~ zWMyJZsx!;-nBiuQ$oje!?Ze>-e^Zk7+m__-7*h>G#79aXxOK;!IfOn*&&Z&c3Hb>r_LQb%!BZ)j4DRUI z@-ivAhL1_=nVD6*!)M36wIt1ayLfQ7SZOL=;$;zd;0kiYGQS02S6@iFQUSlbYB8%ST3Pw>1y5F)<@ta$e`}W}%k!-4+@7y~CmjZmv$muQ z7w9kwvv4IhQdU6f09o!J0E2(4;wIYB+Yx%n7DfcR4tqf!b{ka?5+t2{y8yy=WvgNt zu%~bZe@&yarTn#qY|Mq0RJq;&ST$oQ39^5^Rv8mgPU6RmHHC{ zpoXJP4g-m=hHnx}`9hPe+U%<7W2EAjtWI8<{r#`fZd||x!ha7K}sj8_R6xsjB8;ER@hwPNW zx3DM2ixEc0rqHu3KgF9fP{@LncIS1kR$4$Ff=|CFzL(!PuQuJ$`h5*`T6@W+P>F*I zKyLH)pN{t_6|p~$4HG8q#fLJn9F{BmGzUc^o0EoYss>>}sE{AA$GM;zl=O8y=wyT^ zc<&5!*_(8?iVKGIpu0U3ruqVUBk`en$ldVgebr5_- zEEK!$3Tkh!Pt^sptFq8u!9bX@3HzXpR=bmvlaJnik>Ev)X*vW9zMfo6eR$1=T}n+7 zbI!21+jbnNIA?%fZPf)K&fdCNGalQuCk?adaw7f&ELa5i)uTRka2lMNq9Z%q!FI1H zLh1^)GZ{>v$B59YUhK(e;dk&l_x0{H2(t$FU4le^pES-RpW(_h!)l@qL3!ekiIZv< z!{Bv_X6Kdy8AEQ!RyAmuxd4m(fY}4xbv1{e1)%51Y1CCxGv;{R7rP{&W%@QlB6#MP z==1x&g&mERzltqGU+82UKi=)+SBi;v)dN3LH(`r?-_XuQJaR?0rv^0EG}LnSzUa5D z#}5w19$tV@zUN$axrFzDX1XTVzx!k`JukFG^G3~r9|r;Mxj~-E-hWSepOHR^hx>1X z0c*{||KWUSh83Yk%aab<)(@>8`yKB-l~^%Dc+b>^?Z7C9_OhFtW`B!`P(NWyWIzax zSPPFl46BBoOBp8@1Y-Q#7oP6?jngd*NU+)2?yr96?!i^h^fX#Tq$8*X;^=j*1-jmv z_Q0OelNX0|r}B88f=_?DA0{7dDeVj%WSDt7xo_o#PE>@1ROK+C6&{)<>)T!|8LdmBPZZg*|dno#?)DoZH9&UviWf-QkU$(KI*2YxB%O3*m*oX zf%5w}kES=rY8~XxObFlp*_f92S(oa`t(*e?#C?<_|9$KUL7yG}msy>)diHSU(tHW2 zNdca;X&iY}Gk4;w@WI_m5q|L7GM!IYT>QFQ1SJ`=_M`L@A&`mCW_`Uq-_HtBWzL<+ z=}W`zg12Pw;(jP!Q%w!8q$DzUV|e_~P4ws25nC$YIPpE?jN#lBlRa+B($Sf1vej^Q zPMF_&rtmD=CX7ZbWGR>CNu>AA^tz88TOu7W{u$E~65@l93#rVZj-42D-24B_s3`|G zCHA`q=ea7@`~@NQBjsJ?Q2)DMt{pY20umz1MQ*`Ff7tIXQtb?)z=VY*UgtDrqtYgN zjE}U3J%~P~C2)sBLw@csBzb)o3^rBZt4JjYBfYgZ9bo`%3|QQiAvhkJSp@ADHcJp- zP|aG68DuZX$dR*Q+ANQY27b(pgu_qY*;!gEj<^c11-|T|)F@FJdmv$_GHq2}UTgZK zZh`e*<2{$4q%U1dxKMwwKTDI;N2X91e-#(orQGFd#c9pcG&ZF5!7K0*t{ySV`$sgCS&|V@g(nS?%mSa!D$i!Fovm&c(LCU zeYTI03C7xAASG>h&s$(!w6KDi&htHZF!px;ND1-hwu_D7<_dZ<+Dzh$osN6pdo93b z^fHFa?Yo^F%t~iO`=dz9Tc+XwR_cPBv7S{cR<-(<#*d!K$3Oe9RTbEbTdL-CbT{Yw zbTo8;ml3+#ZsH=&i?@fPd9Q73+%nkjHQTYN5XSUcE!d6jqM+EGqt)7R*%(e&b7OCf z&M`Y*s@GJyJZ`{w%E4_Dc6u6f!l(C3qejowGX zo~xw^TV5QzA{3ABC%-FFo4|pl^vq0(>&JV-5~0_B;m1z3MlE zfoIAWhsfVDcY|>oG+%q8nNKIT=c?Vt9~%?Xr}r*}k8?mn$+oFdKt4*_ z!rVtG^Uh;bY&DHJyq5h@6`p4&!XCg2*wSSerT`jcIFnDjVyxmf2huJChfU6AWraN? z@$vCmi(HI9X=-$2SZu9tD~;S1H(&hv&%I_s7$tC+(orV(8kZ-`%`Uny{jP!uwNoPm zuVc+^AuNQ1?_b<=1Ljc45;EpZ*ozAZGWaz&13?xS$@mY+$tn0}hgXvhbYDKW?)Xe2 z_Ms?k&iOowA;j9&;rgNL&u5R6com6=3K=kmBg8Z!O^uCNZ*7fqa0dJ(wabbI$Jb{1 zFc6p3fvyGSS#+-B3@?Julg~(+r;+{qO0vto*Gh&7Jsy30XdVoDt6rwIiSdA2#`@D4 z8hRI3&ldKvFP)IyeH-h$5aqze3g?44-2NJ|S|Rl71=Tgcq# zgmt;~CCZ8c!C*W6_;^@s240*KghP<((b$jcQzKE1{`7%7;dD954BlDms$W>x(jT;KbVk+U1D~{Vv?SM( z$l|;_7?=9l+1e)3Nl_>DKC?+J2BQ40Y@jGR{>=9U_J6(Z13dzX>>atkL6R6L0?15T z6T@Iey6JPS$`}^>xXuCQ@?T?1i zq_^@eZupnU@}WT<^Vx%V7cw2e6*H@T6^++-g}s>MS_$9VnB=PRxa2kKS8=t-h7yPQ z$lo5-{|HE>M*CiTzXf2TM98KovUC&^h7}dby#}w$1+BlOo#Uvh!=Yh^ZHpAegRdglGf+4_go&s zsBF@77rwD42{(k;4kdakkfpA^K>4qyCnrCHQ3`h57y05jN;$;C{uf3=AP|N3mbp5k zNpup^v$Iq&@|I-(L7ti%|F3qbWbETQ7`o+iI*}l{0QJKSj60lp#IEW=>;&b^IVq`za=b$ zr`BUbif%5~D!_W7OZ;VbvoNq$rK?AF{2Kn1wuR;L7)qwjD<|KFj?|S@Y!+8~3w_AcN#VXB{iMM_!qb7A2(+~3PgAz@%H!jH)WaSl zmG7^<5FlhMSh$w)k+RNs9X>vf zj)6vI_LB57JH|~6Ed@A50!v>)`>t>t&)u>JYOlcf zayD7FXN+<$8E9D3=C571uBhs8)tFECu&;n&U&~`F$ICA`Bk2d=)W@%>7y{g;tau~KTy7h}BAZo>! zjIl=)N4_T_g4?eyTYw(2dL z(Y{n3-@Lf*V|{%%KO)%xgEdim3|A-|wVL~=E{^1G>;btCNg@^$r5POHym}yYZkfC~ z8S^t^GD{|C=NBkny8W0Gl9!c--Os{o$d!SxN0nnDh{2-Iq^jj}5_iRYtlW9ryO(Ro zqXDsSZMjgiuaER+oJQcqO&U;MEK>h7|Fz1kW!0dlj3ECNNt7lCYR7&Px{=PtW{2G} zLI@B8ERanhrhsx~?#l1`Swz9b9U{I;NwcS3asU%JXeZ%D zBqN?ls#mgi!&B3dfItedMx*=cxNSl;X(fX; zwbj((rmUVVbKXPV8*Q!HamjRCX!8aZ7l*!khw$$4w+p<9OrRMY9NhJhsCfM!xbYIW zxfefre@tTpI|(OYt-sDG8x3~2F)6rFOk(u8NFsPqo*uUoN5mmH8U;}!s-U0-Cq@ts?7K+;JsBk#eTwN1Lofs3 z(BlQ)#CRO>LQkD@4e98Ujal2~BuP@ojM4-q#*dkmc>WGzIum)QiCNDFQth!8JaOUU zD41`&ay!_()!n`t(Kzxt_rvz&fwYB8Wr3aw&dKnF`FRO`{uFiAN1-b7UBm-< z$GSRzskvzbZ6jrGbDgw-fdK(?^VHPTnh@L^eh!?{kceK(1)&eKQY6a|E@A;L`p^4` zOtU@HlMruZy zB7O0JNH3=A2!&n7)E-{7kTY9nlrXh7{`tgHlWho=5E{hnc#WYLMDxt|pACp&KOT%j z^LD2*`XIPp^qJZIxt}d*nA{Dn2uNbhv_d(_SY*ZV2`d*FXK=rv1KjW8QnNMYSDB2; z{gLG!Vr{(*x;=b{yGjPGH-%|+^mhSJ;F5Kp zy7n^#=z__rcY5n?2H*hC<~9ng_%ungk_#B*zLn(5KyPd|Xf8OU3mLnyQrpw2biOt6 z)Clf0{jgt2KJ){N>e6Flvvu)EeAh3hk*+}B7f2UOpHp7Wf;A1)S{$g9!Uz4pk zIUU{)esQCD|E3IX9R50s1~=ZcC70aG3yc@oKMN3X znbS8j8>S4C6}#s`@}5+8VD0j9=5$!yyR*8pW~P*~P@fc8>M9aW2_(?rxbQQLQ+;uC z`R9){7kyO0^sfZC_k!(T!3Zbs@xgdm9TcSkH+J7y$k6^L@?U^+rXN}|K3@>wZ7*(} zv`(`SAFr(fx71b~5@cSvpbpn2Bcki)R*_~KV|O<|=Q9h5#dI@ni9KR&dg^?nv1!QJ z16waTM*WqVyIoj@lH*Vw>#CpM7Q@r#11eg*t*h=l&#vBTy1m%Kg@7oq^o75 z0NWci-F$VzhZ9}Ppgt8l!@EJ;kN;5JEM{TQ+28{EA8l`9QWdu>_N9cWJGJpm*WEG4 z)7~V8nOQbsq*}N0F#;Ac?yMJV>64k_0U#GO(~`az(#K_!)!rn-JcwKB*GD40PPYTz z8dF#K6{Shs@|gXla*)()hwg8GuU5Q2F4`emUEU0iDj|^Ki+(UUgh$ZOwUYfkVXM00 zYLp`hb^L2g(AY#_#NnWjX7O_Ih*0}B=W%65gtY@|{F2lBj#JBn>G@s#MUp{4?(3={ zgoMDTUbc$}yTx4X(JXBH`peHNzvGhgdQd0Y&r59d!=N=vRN~M{PlLOGj)ccs-D0!g zkY-*aWNUduGnLWT-S!RUMh1DIt5mH>B^%9;F*rD!9!$wGjWXUFo7dJm?f}}!-1N7V zO(X7x0?P+tgYjT|bgsL`YKl(7S9t{{G=e}emw8kSM#~9j%N0iWN#@^t2s^-Y3yadEWQ3wnlXD)Y{mGk?&zbDGc}0Y!$rBXwG}$Vc z{%#eEo^X$;D$H`Pe1M)t?dJg_-do2pXV3Xzk-A?MnoyKLMdh`(ubqZ*{(2B%Xj>8y zx$@xB zU6MQl7w2_e9*Gh0W7&QdH4W(lil(Z00l`k)ekx77RjMh6f8WO59tjZ>7Vgh3ZJT)q z+H6tKn}9UNusuDyi_0;eYvzrw0ILgdX=Z zfD`35U9=NCh%Yd)0EIP%n8Q7lV5J7XpC-+=)Rzty^>b1bID@75rBgY1*ZyX=+h4Qi z^|ztdRt(i|usw67Q)89lV`mW$ryI2%;2oXpiQeE5$rT=^djL{dFvF1+z4v4pEDW)r z&W@Xo(aNd1WMbxFyTkPvIC3S97#|K3v5$9NloEGJgrzt+vOtdB*O z-*-3Szk@=dv z_k;Vb_pG7Els3NmheY6UFg*`q7$_i9^0))zy;Y*n*_6-mlUL0Wl@6(D6B7|7UD|%Q_`h@; zA#h5CRsnRxn0{59-S4WMLIf@2_LKM=8=cFQrUcGb4%r``tY7;b3ic$cRVYa&tw-1x5J&(F_$xjM*G)Zmv2L0n8D~e)YsrAGy_<+I%atsqJ>7 z9OejgSQn1$P$yh9%UDlYSt%-F5HkjYxRsxuOKl}mNsO9^+ok+%M?lLJ^V<@E1Cn@K z?>7D?f!~HL?CUG?2Rb@`=@lGMeTZI5^sE;TZU(L4c#q>s!yOszJ#WyVY8(?qMH|Jc zWSYA98^d&k!k+cZr8DmO2^)RcffKeIJsA2$Ij1*aMUA-t7>({nkfsV7KX zP_2u7wxORuP^=T+oBKOQvkYyEE-fp2pz}Q~;P^Nw1Pb5!S)!+6iJK+Df&AfFk*?c-k$gRjhT}iiMv%J_vI(Qry))zWe(i$O%o%eK_oNcvl!R zg~@raEAqF)9{Tpg+ND#>(vEpF2hI$i(;t>F@JT{i5A>YEo)oyAX+$*%>0`EhFCG|<4EvO=(Gfm- z&r8h`_l$XTqtEt&Hizb`W|>!K0Q=`>ve7%#8sO-U>id^@=7Q9D1thRyzAv@lPZ81u zV+SHx>dh{>@tzF4K_O3ipY`2pj(zhSJL>~vq~}l(E9PEAtdqdJU7aWyG0E>zUGg&4CNPO&z!&7W$6Y2$v5Mh64W9U?J69 zG^?lw4l7El%TI|PGRJr|mCgi)5e1bL8$!=(UZl4g*F3O{U&I%lq2RB|2=r)D}mal*|(T_ytKiu z!f)aoTjsMauq7%x*Nb+IHLU6a$S<2X7VR8MVJ}x+V7-rb1Mz`nr5jZZp}lq6hKXJ+ z>X9grRgM=?V(W z_i7eW^J=BtDw;Gwv3r>im}aT&Ej704-fEu0vQT8)&!?FCY2fhIN>JfVcin_lW9uHd z2ik}3S2*eWm%L@!7@+7X8IX&VK7adQi$P1fzPh5Q6lV8MyWuf6v{CsBFGCVN-4^J? z>E`HYxkP{Rl}bo^{jUvDg@3;A9DXE?S75@`U6jNZ$Z-536c{buT0`E-yP{gq@#p(-t1H5Q$CG=9Ghj{IjX|ZtL524jd)*^$_y+T zsMk329?_5dP%DoIPB|Tm#h;9O+>GYZxK_oI@~fsafO!Q99VT{dt_%1_>)W_3nI2Pk z|D-iFyY6l#@fjJde!>VwjwT5LWG0LND-H9+s$8ji*^iKIW+Hlu4E1-9xs5cqK1{m)qBZ z=4Z2J2aaOY#W@Fx*j4s_O16)16aKEuYIywOi@h+P`%1?_hi8CvI)9pQFF6)gizfnKmTfWBQt@%B9LIF=7)B)_fP*N(+h2CHpEmPwz-%tb!0 zfQbAFc!4YqjbKsj?#dEr*Mcq}gr0qH%i&6z78bdRRWtt(HNf>8Z~n>UVa^L#w1i#- zZ3yqEo`W!H+>_BUZ;gb4oNUa72vkf`uwf^LP1ptbgYrpRRW}&lFl9c&T47*l=mp_& z{Kt5KEOlPeaD(T#x&GCG-dF?U*B9zm3qatZTUT>T)jAnffjYUxi1vrLt!k(J1}DAj zB`%@@ogi)zVxd}Eqb`+PoojT8j!9Nu%dqSv`1y!fNq+N~F< z^_=E(4D2VMoiYn{4>S9KfAs-(;4{m~gpohFuJxi%Jgi%>?@Ztlr5l4h2&+a<2ksJc z${&8iiqP%=c(b3v%M-2*9L)0Y9zo+3R&Bkz(O=rURKGw5Ikb9rTk!|m+F}gWzyvB4 z>|P}5V|LyqDRu|+1?~6`v(yo?^~s5&@3K0+eJ}my9~z@K@Es;IUjMHEOelS#ov}Mw zG4uCwO_gI2%ZyXg?QKx%Y|s#gl3UdWAwC(Q@hKsyxEy{(*KXDTrTn!-z6}#wcb9-~ z?W>o|qp#)QC!JQ!-Crj59P{FE)pL46;sS!AA`~eO3iS&`YjcyoDTG@5JYR>$^W29b zyjMCp1!}#nyN>6gcCxdBUtbAF8S^R2|03*;#CNgu4HuP-KZ&oZd|NqZ_QBJ#2EC+> z!U^_z zt%mjr0eWMD?&65cbOWPU`60)RN{)llizYlDmcJi-2M5=8pnH0p_N@%caLF#nM-O>W zV}5>)^}pWLa&>Uej)znuMFgekVk*r}=ebIr&Zn;=;r7V%Do;nX$J|+93{=y*x^u>4 zvl02O^QY|d{tR6d$ry39*~wjgmqmzuaOj?_{g9YLLH3PppI8E4_`iT;nQ60=vx1Ca54QCR_LG*RP7Q65vw$ZQr^+`&Y*IS8|B3ds)qg;iILjR@HYmW&hrj$Rk zy|u6G1j*FSeR;SyqZel3J^xh|8;p32QlSlUMsPBHT@XY`G*?$BXT+NS3wUdHCA|S@ zuw2J{LiAdHbMWF9#6$;0Sp$8yr$7ue$52iYbtKpWz z+=11cQfnncou|ehg6Fv7#foQVZr9|a-E&ZeW@bw%q=w6;)Mh_FxrhHA=rOgMM@b>l zC(fLwh{uF09Xa1LQ^w!_m{?W!5zmd7@VBCh59Nr$)6B6g_pg<%dCoHMDH}m5qC3ps zplUpyljG9%?$apvUVcuVMhP5OLrnQZwnP9IC)^Hjm<#ag`9-T!e6h7bBc+W8{vLUI zX&6q>Fnp??r`T;bEdsKY8%hWNq(sNp?$2JGdc2$$qZ1VpL$}NF&QX|M&fl?xINz{l z&VFt0TDo09B>9f;6#Pzl=D>0Kp*;1Ttg_v1-~xWmU=V|IrARcBta9=0=H?%@J+y%f zGwH2$@KidheVYO9;SK75?n9PyIa|$PdiNgm=#aJjeHW!3tSr+CbdGuEEXJs6Jr%{F zmeKx?nb){XX#=DX&pYLAjzOcv$K>CLimK}DlIT~LNJUy1wK&uKgiw9DH$yW2FZ~3^)QmGJm|agIFsnj+@>ybKH>iwXMiQEOplUZ5H{r`ya51dRf@hX`YX-k zdUC~%N~(FM4>1A526R!j;2%@)*|_W%!0?tT?$KeTYjO&q(Fr=K?zgMCx`UK*_tC5Y z!ddEH*pbINe2ib%V^dgS<$qB`fg#5*wSZ_Vb;2%ms!G#yt>@~a4>O(|A@e+`o-2MO z4EJmjZykP%$Jj#!%#5s}OQG#S~8|-okeItC^q6(3P7@>`zVw2qj}rgS@?i6L-b)B!u}=V~$wVB6w-?pjciQ zF3orBfIjNgU#V@&(5eV@b5?oKWd!uK3?p`<01maYz9-!wiqnYw@(lC-q8whPNkK*y zC`q3)LNLM6GFbRhw%?(?b-_g?1IL}e(O`X%UCIZu-2N*zE`9Lu`~qzNQ&}hTXZQb5 zG-g^8KOqB@;U=WCm_l=a$7jw*KmEh&YDP*KU2}uo8sf-NUqW+rn@*I#rJs4kmdAP# zRUT%2aW$;)Q11-Do$Ah*b&XgxkAjsNx>y=~Pimg>uiSu0j&0fBQdszx@zl($oKU|m zcND%XU~7+}VB!?{G2$sTtNrG&2)d?R7M~G&g3ejzX$$Q@6A|aNu{it)2eF?7(^XFB~Os!Ua?2xanRj z{9A(J@iF;dG&Q8z;3cZ!+vuwp+|szaYkpc~Ee^WeXAE811C-brD-ym2oONI(q~umP z9e+CuH<6)Hu}Qh_qI-XYTd7E;3UAIDaRj0h2uOeTI0V7?WD+0C zm8<2;^W^oLDQ{IVIU`bQV=D*Nbev?RFKk?4Tl~tb(3C?_-;koL55Jc3{Ez>wza}L9 zmB9mwU>9CFNmA2*;v0R`?QU_S##m#8g`RFAukM?)&!MJg!TnTIl$_q{QiwLbVq1$( zGS?e9GWO1nruDW7U6w5h0t{c;Lxz@Vflh+dxI=2}v8~lA;GJ0~@;bl24*MVPU?lFS zEuW|@4xzG+F>|HO&~HSHJ+0{62;~PQ!+p*Ir`7|7Us6=9;R^klZxnGri$8Os37Z;aoO*n5cjLsnvp{zFxUlbmTZ4WXeily$&OSm}WDDdkx|`0|OxR0~heRI7jZ679a6+V5DV#%&p!I zM{ba$LdfU1plAIV^`3>vy(K3}zh`dbR&Nwpx$4sN&(s2l{`i|iuljA1dV^jVE_rGQ zp|-XUavyErc&68#r?7nS>Us7U!or5qi-#0e5Z3!QevZx>`8VXj+bp5cc+0cl@3pKO zC3v3xqI9paX(&@Z9 zXmL~`Nu<5X2|h`~mDQ?LnN~v*7dWNEdj16tdA)JZc^#7V*krU}W_R3PvZ9sa_%V?g zxq|gL!MzG5(vNTQcJrxZ#ODm{J$D_9^el|jYrg4x&80({(7NYM>So#nY5*-P1AIXj z6U<5+931MbFT%JXOAdW{Mz7WqrStwfWh}CbOAX@P@s+Z{eUntF?_2i+rhIQAA5w7l z6f-f(v{oG{l{VC|-|m)XxccOqiZu}Tq*nW!-EAAmc^cnt-`?+ zXh;YeSN7kT|8DcQ13JikO5K={vHd-!(RoJ(yVRC0u!%e0j2V>M!Fr&M%nI%&jyrMj z(Wq4*Oa6S~bf3}Gdgvjd{An(^c583Sh7-~;RREo~aC6w6+)C$fqmO?*v@lBIY1CTs zbCZ)t`pDFEcdx)gKQqi}&`;1)$AyXLcU#ddDClT_wcuidK75*DE%+`~3$!e8kei=9 z^jf@D{d{e0_o3^jloB)z1ZT`jkqbJr5q7m^mt_$f%Fm+2nh}e^Qo+C9QLg5C!bDfu zMO?`khtSEMrKS7=99F))-$E@;ab&klmCE;;SDP_c$@!^)i_-MV`yL%$ePP5Oa{gfs ze`eKikD4@D=j%7p8p)T!U}mXYxD#pe=s_PlM-th4zq~u0>3MgPzu-#0Z_bMr zA#ZT{n!hXV^RvU(X~OAWh4|-%m@vJC{T1F?ibiLbH2Jr}h~|r%>H{&aB0Fo_H~G5W zk+qgJqMEteqBp-1`+WA0f3)8T(tZ4=*2&tZB^cX&qtsBU;QdKHmu`1!pDKQn=}0im zb*?oZD0^7);KNtTD)3QnS8bNt+~n?YV4jb$ym~|fUd0>_BfSc1WLT&&e=DT3an{gE zAe%kR;k}z_=Q~QW2emzW3%I<;!^zFwjhFICfMhRK1)qUAN~*oiDy7Ap`+ck5aNP1<1b_S4B0M$=-8Q>C$lt;uH-w(1E~kXf zRy(6`>fPO7a$=~q-&tXe%5$0a&MU)a*NTw~v%6X|)@}=FtbN4%m6not20w-73J&dw z@5$8Ht|NS%8rY9Vf(r;?T0#aA8ZaE8euY^w5>0PiBZy_M;NiH$2`pnSPJGqko+x%T z>|R%+IM6~Sgw^jzUY$T^Mt}QePpN6<$+4Zqb(f!LEa~9(>G-BTZw7mVsiz31&rFDq z-p(IgE>h^`zHwe84=%I9+%i-`f_k?uGl6+Bk|M}vGrx9|a`<^nQ)Iyc(qP9Mb=neV z`Lnd;5F&H5hDh}8lo1vFS>rV;o%9#4HdYx(QW;&6FZQGA;zQ^1{1*qS$=1*RNHGtO#yD}w` z`Gs;cU#C8DP_+}v`~O%v%c!W@uZ86!_dvpjdX)_cXxLw9Ro-=$RLBXbO}iJ zAR-OYT>{dA@A3J+E*W%pg+ne&qabCw57 zJ1Dy#uL!V~B%n;t5`n((>vKvB^FSG5Nuu*%eMTfa>?(XSt?di!- zK8D>=`=x)VUOcQ=0HkKe&9W)O-xhy9aa$^$Jk3UJk}z2ie4W^`x)kQ?{EpyFtdCAta?*C8_3ez@)Yiy%d*h{(&QZnvz`uPsH(=q6&$CKV|}I++y&fRG9{# zoLdp%_xkYRV#6jU5bVHz`prf_Io5#-j9STsq#f!1{T5*2 zUNEQLY!g$S;+QHNR>fDD)a++S;{9EUIBeF*3~s4B{Wff^G_9K2*!9ue&Sl`ajcgK5 z79vCN`0wUf^%9Gc>EX#KMkMd(7ozDw%~h|7rY6&CoBMWgW0Oa{)J9MKkcP>QyIMlm z-6GKkci*Q8a@S{3udFd^{Z3E>U8t|L zmrmiu!y|CZM%~JMXH(V*r~xM4WJ(WXwKYeaMfiHACR!FhZ5lo<24DXT^jxyzZziWI zI?2cU{el?3A|BWAVz-5e(3$O$k7z5W=2y~b6?@C@`6%-~d+6y4H!b^y;9J^cKxEK| zZ45Y-RDW8VELsw32MY;`2Ke%~W~->nS*B8_cN|CYD~E*sL~w_>bQwk7qJS)6&{3Zx z3d)34JNDnCCb|pM5XAnvyCcW@>{kqZP!m~wg7udzPSH?YS;b63)RB5>WAfX=zj-P) ziUjE5W(VPM)XZp#5@$z5HNMFD2O z?Su%`(i;;EQ&ZEA_Hk=_Ibx@$rzC~bDvtFD-6=>%6p{mnFAX zEG@=URsqHwXs-;&X2E=;T;ct0hCsE^spk%Me=u07-t8}cNquwsvH4W!Trk}eYgFCFez0Ene zvee2buwPV>{5!~R?-wZ2D-8*6dB}`vPPBry(`@b9if?KOjk&}!nhS}2NAH=ZDpH~32efO`4SXZtk&EXAP1|~b>=JFo zw=8}Ftmjrugvm3y#`|ba30QMP#G%8Nvl|!9H(%!3dj-;O_|tD{LdlW`emjZqhsH9# zU)>ms!TFm^Hw>R}S^C^%?-J25?9>QG0R|=h3IU8qJWsgEIG9aF< zH+IiQ>N5xm2@~qaI@b5;=h@&<+j;ehv9Olurquwh@4j? z(g0Z$yj_BUocY0+&e*v+R(1ZsNNwWK&tme0*neN3Cnr2yY)_vU$MYaD>ROL+#B^Jn zI+twl01GEA5|=1t#=P3k5c7hKa_j~NXB>$Tj5Z+oc8m6YtmMFGn)DrX6g8r6wbj+j zsZ@E`R+E`6$AsQzBG0Ho?MkgYFqi|BpJVo&^qeA!u-avYQGSPZJ!+vYE^fiyD2JkK zXYfVLE-_@;eu9<5&7P&fT#ai`Mn4qGtJ5xyIyTa-p?vRmzrFShU(W&4uiae{8!@8% zG01|wCoxg_4LLURXD|r`8P?!;RO|@vt~P!kx-eyAvZ|?Pa^R8et&=(tdvIsN{Q`zD z&2#n(;L=zd8l;(Jo;H)vS4p?A+^jLa^(xScOmSZcS=Mt4MBff2FuZ;7M8Z?R?R`34 z+0aKYxD-}*vC^*BB_=c(g7Nz6zx3M^_o9DNsXK<65cd*WqEkGO|EyDtbk9^PuWQZK zZL@w)jvb)^X?~`6A)K33YAXI^SWxoXqrRP722TYY-j2}nO+fpRPowcZpQvoo;X|&P zkR6OWg=t5$d&rt01w6??pQb$Wl99i%41iB;xljJlWFi1lf%DXynee4c9%vgblnqKs zzBIzSe!5wuTlVUzJ4^@xFZ&?L+z)R{2UiRl$&}x1UiYStLk*`!1WF{3XH(O~Ye&ne z@ytnT$QUbi1DH@t*wWPxwOnB#klX+D$b+l)ENvi%!PCQ!cCgsooNq=OFKI7uYl z8h0KR#D3ox0vyybp0?k0b-*IRXJGJut*=o$2=>KRk+H=gMZK`-yY>KH& zieMTY9p=*aHUC%vPnIwKv6+lTovq2S;H;~c7?U4g-0!pp5=R4P<`qW0w(-76 z3q$v!n@2nQE7hO%HN!HPQ}-=oPs#%M0mg(gf7&RhWX6$?H8s|=S~F`7b62kr7rd#w zkzZ;3L-g8C@@qb=_hWZ9da5LmEKE>7-Aj&S6P+x$Nma+Dt9XAv!QB-eW*=CmNO_cp zInKzJs<&6`_-UL652585l_DXZe~_E|LvZMsq$3MyuBtrhm%vQ+hCY`qAkGWPKf~fo zrHzhN)P>b6aJ4(i#AD1(m)moJ^W$^JQ8VaK`P0IMR6e}qXlYu8MS-MHU_OJkvodbo*${<5^UjySwGD<*y$+UAaDkr>*TI6=hXri^FpQ*Yjs^ev8TKgXkC7 zX!>~>2#5q4n}6u-;T4CQDFY`tXD9n{U2Ef-!UH6ItgoIv zX1h{Uk=zul_A7L_@h@>UW)(3-=(K2~8AShP-x8J22yCsYbPO8|U=yD4V#J^zC zCzFcNTtq18i4!>9^kcCcumqcyMF!|j$iS5ykYM7Ma7#Y0OgQf3HUKjs zM}iaS`6y^qP@Z~KIOfP$Lr1{fDnBEkJK70RyMqkG)2}EsKo|@PTKFkBBeycS9&|?J zx_K*XR!Ans`pWj=uHRb=>tC(?U2lH$M4WH!S*q}C-zu}RjGiCIPF*-Sx*lX0H%3Bn z(P^@&HTVtlQ8}I_zeyxBp;yhLTxhC$SxGvipgaIcu~#ZhowDRMGVzUwy^XSchHmi= zkmSH7O}`1O#i?d=%6;3xq>JP9{H>KiCEF4Z8R97Z=K65U?!ro3xr{yU z`c+KaQ3he#f}M(pz86CDHxW1s`gc zuYuUo`=OIg=!1h);Z*UXRKhq2R>>NT2haz~qWn4EoonQDK{6U3R2XPSdlE5SuFb-( zpujB@&%#1JSmVz?W`-Ws=b_3=I4=YH-S+L5dC<@|=kY!Ea#xc>cOmilt~Kc`8!#<< z7Z?+DM~5y;hp@v%F3y}uuQN-6Cm$>P!*D&EDYs^bn|XAvnuBlv-AXETVf<|B9aW)= zgb=h!vzmXkyXIp(!{n}CYZV%8mz$r}8iR{qt8KlN^UckA-{$7#yRXY7 z$>O>uTGHqiFGHdKeZX7imVw73(RI`RilZ2L__)Ye80~_(EMO(w4+da9)t+pasIp1V zb~UN1O8I2aH()%F8zq-0(~^`1e_dsU-}fy6SV(lN;lk$TPMc4$5 z_3Wn*f$CHz?riB?0@ci)_?zXVMx|Fu;SyEOOd!lOJC|Kc{8^3EVWCsoft$G~922ZZ!a z5-hb<_waF%JEBwmnclp2 zar;NSr~zSPqCE0gs?Ejq!()%4MDhofBP-!_Pl;RC`9sT<&&%kE$DOh;n%ZJSfAbwi z*f4uIdNu>(qUWnOxtE=bN8JjY)wsoMMNl3!Xu$lP3}JvFJ1Jzd|DBa| z$f|)qJ!N9Z9ZyktRTYrt|K{``Fk+4Jc?j@A5%+)(+uK{y^9%J@M_&zG4kX*fPK!z4 zDcCU&h3mh`{QET%erJiL%Tumk$@b~+$We=ZWhqtc2Y6?${KBY|8-~^gzx}!b2Mym` zf79X&KF3$r)~yY?yEVC}6PScW-Klj7hyrC(4%{}SM$kYY`uA%->_Rt!uv4+O}#dcL8q6|RneWHSi|-fPaRN11cy zP4KMt#?VH7O20y?1wZWF+u4qH*Aj}n$#*Yco2>UQ8?hOinY7#W-1EWt@?T2 zA1y>eD}OCkuT9l=?}M(vs))(3bu77(6q9?%<)fGc6Bt0+GOKo}UsuX9h}FfEJ6BsISUy}+zoXb;GeU%NVam{w7-W}E z8;L5Z31diz8kLhJ7}oJ(Q9;RvaYRdFk>Y(J94M9LKrPu`sQoH-m$IG@wQbgqk55<$ zC>$BMea(B+AYvo=@n}cj109$L`Fv`)vVajeuJR)=gtKPlTtB38%z1>~m^jAx^Zq-s z#dfd11F*PA?d^Ne%ATJnP1|Y z8I5M|;}7M^b~ahkRIk3k;|F&TnB7ZESozQ-r2GmwZp47C-=Zo*+*IQSf7V0Cc)(!) zRKw-=mPbj(%VJ`2NF32xaVI^+yWM}pT;ovTJFGH;npWn$lEf%FpuJJmD2Izj zkWliW^-Fzwdwt_KHi(>%e0l;R{J^I}oLNAQZ-c{?{Su5?W7c+;&XcG-TOX~eu7B{7 zexj6v#4OkAU69uhpzpDcjwvfE522w=sw8E!Up!#XRS)Yx2jf)Q{d^DBehKCE-1vta z&Q=80P9VW{Z@{ly6PJ@;KxllTWYu}AO_Ugt)NxJ&{ywzd-SzfL**E^LL*oRvyUUWB z?}6u~RJw8I&qNR}0~xVS_2P(!!JS)HgA(ANq}LQOPNqUW$$R|-*u?+ zA&esdqic1tU>g`|l2FjUI_xaWq_A{ZvVN@?2<6ZLyAvOGT_*d zYtz4XFw`SMojnM_D!z^K*QCz|pR+hRDRpAwujkhoLi*;n*s2T;&-I%6Q%Ry_BSxJ0 zNeD6a(;+I(KU3xOByIX~|IJ9fBT)wIxsBN5BG{(aT0~VmqVgIz+*kH*{tZLiQD@0KF^99(do{gB0xTU%IvQWg&+xr9J^RPoRSmul& z>)av|ggpo^V~8$;B&hS%1bnXlx$EwM4TJ)>&9KrnWeJSpmF#F=^^%yM2XOf3M3MV7SU5$N{?)tvTs~)28e;!!0QB1g6cJ2j(`&eEGH%= zII^6!PugpCzs~~}@PvV#ogH3P9`H+!tjfm5AFeH{b4}@UWOAVX@`|Lwf}|*Xz6|6j zo>AA>hgki|iYa%?U^Z^R5o<7PEtrX--`Qr0NXLg~AIl|J&0NAXl{kqBJqMWvW%%2% z#!oV$xDXEMvGE4bjpc2fbsUMm68X>yo{#$Ew?r={6W4^S+Pm<~xhnul#>c_;L{x$v zzqRF0mo1eD*rR;dXMhH=q@RQNL<+q-fNu_I>5uTiAQ6^dZ@e{kV?Jh4&iTQXDU@oYa20Je53#S zE+@^dql1i;l#K}M|FydyG~ntQ68fo7s!99bpMMXSu-c$u5TIoz4zgzC?qI&=V7AAT zow5bsi2U8FlVB|!z6>SVlBx3Y@|6z1rQ@r`MZ@k@&gEUk$}n_d_)8eqAPS9&zq=(3 zFaADPtXu>WsGn)VwoyA;BS{ulY9>LC+A7^-EN48QM`_^&q%eUvLh%F^S~#X2;QAdu z2|ROo(Y7ngMZ-jfliWsJ05t3(J= zsO&Dr-(#*FPtcCav2Cqz=`U-1z3oJ<34rq17ODL*GtG<%A24ScW3_J2T-xCHS;KkP zjT1+muQ~JDo0%XvLr{$GR=lC=HI9zENVhJ=AZ1=B!)lq6VVK$K_wSmLl1~T0q=o-G zs$~C;DjM*sAi#X}>J>XX`}X#>*xg=0Js~MmzO?%ep#8B zcMqXiW2pNF+wx(6M;HiI;Y)E1=4%ZC7^ra)W%?OwDOvR5?X*Ye4h(_>;X>cd_=!aC z!VUm!r9{F~*g*>2!jYHj<{`^0xzuw-6}4e#?$0ofy14c!G(Og3`$zB-*U77K=%8HM z{%vxwH4VXHXVA6Nu{>b+Wt^4xp{ZS=fF~z7_mNu+S?)R}r$M&qXs|5b4ydWKk-m!Q1K4Xv|6&sWWa}jWxjp8GLZ?<&CxtlK z*#R13!HlB`g}1G?OK>oJ-ppT%^u1!Lww9Kbfq{^Sh=`b2$nU+W-rnB3yTCnH-~Hia z^O6aDV@oga=S+ncQv1AwqT-c93Z7*iW`@GixV>O#uA&Wt{3bLWECteAt+aGgCK;*npA+~Oyt^Ls0Y;io8m_b?cRwzQ7 zaQUex4=Awn=Wbz;TIx1!4EFJf7*E^h&(+O!rJt(i_nS#n>Z#+yUvq7s?5CR^A`+mZ z2Z4~hDIu?1Hd})p^ov7g{efq==(pxIc}|93_ijUp!T1n!DbSo=&HYz6de1XlE=yq> zatkT+o>-m=vy&MM2s)S+$z%sJ`;s@1Z4NCD<1I*ja&iJd z@>Eq-0eYZZiO1zv+Jcd!#Kc$4RPjhyaq^?FiR$GDLr@c207{tfd#T(t%+f5F;!{{>~dM*^!sD;m-97CWWH4Lp7lO zwIeNe#t=^Q@Hbj|jIr>sZDAnC|99Aa&3kK4p?h_m<+E@;LxTY{Hqsmr$&f0^mDi@C`15WrhYFA`DB|YFDx_kVb_;;jm(yzp7tzD#{ggWZ^v!<&EmU zXg#rZGuE0js2cpE^^tB%cajiXI>~RIJQqFU;C~NjjS}G1w^HE6^{=h1ZEaZx1l%^e zEXka~UvcItsKJtx{V1R?ig}0%U5bmRFQU2qUx#0QY3KP*3n-toJ71mR*0o|DQHBE^ zVpgSEas7dnP0(-Tjs!L9~J77wBslBYd1wBs>)7QBObxjY;ld$yQY4K?V>i0=># z(Ho5)J|7fw7h%+%uqB|r<78LMf5}0kWoqPMkfZpGL>i3W&y%B1O}>|xM`4EfV)HB# zX|u3Z*A|@g#n4P6s?Rn9CH(gIw%-fESf5i`#lotaEcxekZt%i&lzFBpY}`j}++FYX zd1(}O-17Ir60lXj0~@!Gwdlt0yTg#ETeNjwqSMXgFyvq2e^2VkGA;1gZZW0?0j$)< z3EQP+m%QnHW3jI~d@knRmTNbre!r|M#SN_$Fwmn6DkKDRkQEFOo~(t*Lz@d-A@_TG zdpSIIQpInaoDMS(BkgYHk)UY@NJ<>^BSRDH9}XKYhko0!jiqeCUTWGLznfjqO4#Zq zxbT9u;jMAT;pha%^!U@aB9S$Rk)SXQjZjHR;9vUn%UxZ45;!<)lv;d18pa)o3(!V0 zYy!wvcH+voa6geA7wv@}FMs=D&Iujal~4uKAbCv^D!)`UUM^(vWPXASz$CSQV?%1y^+*VVBAr^F8cGhr70Qk{m`3lfe_Vd zG~i?xr13i8qOT)}F}qA$qWk0baONTKrb?iLd3~6=+2m9XD2pJ@JQ8;UOWMoIfFYAW zKz!*(ea+WR4VmCWiR;MsDwZDZz8gO*5yhSb(@YptmQ+ztIRz8_Y+%mS!!kZ0dy6gm zwWU6v(kc=+fi0A3tE(Eb?ylBW|5B9< zuRj3-%8e;%8rLH1FTA6y0TB&A-?{WQtGwU3q=6>;d%Kp!^ z!KnwMU)II>46$2yGvp**MxG2L$sV*Vore&is;qf0Lgy z5hy<$n@qnOt;yr0Lv;ykfLHk8;bAC=!pX&jotc?tD>Q$B5+5d*z}cU461T3i`O}(p8SodIwt`@Hh7M*y0?MY`sozN?=lH{fE|a&?Qu#Y2&t`B7J=p@CPwzf|O5 z5gFHG&w8dO_C~MAhk|`lOV;74C-vF+71{B-IAr;Eez(rrPwQSd&KTr-;>)X>ElWk! zznHUmR8Vq{DEGy=#Iu?*!)!MPh4ZSma(&V3HBvyy$NR$Hx0)nO4c{~37ntsSWWno2 zXSfMQJv{PW#R&`y)EX=xqq=RobfG7b9VyM!wbLl9Ep3GNK3;`k=LdDn9dPGRT?ui) z|I~|wN`d6FslzdLRIFg2cYr%~dSBL$Mg|o78Z^Ufg$`_cK=G2(8TB-9J1OH2kCTct zu4@0YTt^q1%`*z#%mzU~Dqc`W*Ad_hNoFg+=`f$o3CZgd$b^6Lo%&;x{V#K7$ENq4 z1s&33+~Lo{1B(NYdLDyd*;tUqxY%+<+ug78i=j%>0tNS)Hq7leCCIF7lJlE1ns!9l3k22;-DOj{t*n!8x?MyRp%GdtLm; zi`N8XWMpELlk0skzzP9r>AnY&YWRhZXue0JAR~P3mp&^Cc@z;|uIUk(x=d>N`-?us z(bfU0%4%O-Viw-aBSGBkF;CysNuwyJq<`D-h7$A4xi`liye4jJf@Er7ZZ*};XKGV= zim-LlPeyd^?6w!}%Q+`ZXg3`iDfn0y+>cAd1qDfAjD++<8MvXjoi?gbg*>$TxAtrm z^}%VKrM}ZO`)hd(Rxe(xY-8UeM6u-A1jRnPxzoiu=aDV>5(LN~aff1gIGC_o!a5n< zY%RUj-PF6LetlSV`I>LyuOGag{zPn+&(oC}v3pNi=xAS6pNDR>_b06<)&5tjQlaw_ zH*mfCg;s=Aw9-$W;C>E0dqHJ2-l%`N70!`MPK0VlE5_qxkr7{C{w>37{p^knEm%Ch z|5;CaCMMg&HQJ8Ueibrq#Qvh=qEhUqXvy#=BJ_!7b@fYmxTA2;#A@GKaH0k1R&X61%{Y+IhF<#J zBrPOBU#T}R(Nh)8%Pv~}-D`J#E(&L=$-kcj8yOlpIywUJV|$_Z8~MOz=`}T%kWj$Y zk%6oRz?}lNHLSR1+1&P_sH38yCW~Fo_e8u%8`;X`a}po#JP`w1DfZ^m%8}s7l+AV7 zY6Wy+B^OOatgDFjMP@AI^0_wVR61cc^BeFar2HC|a@Um&*8#G0k2dnR$U zH}>uaqor0#^nq#RF7vnp1Za9axd!wk9;2|HEYq*jX8n0?H7=BDGvLfGzcJF7TMAR` zO}5e-5t^JhNcz`as-S3$&PeP?S+TB@L)yPrf`RWBugGFL)Uhv5WjI6QH4S zS)#LzidmeF0r>8j85z7xjZ}@W3qP;#2}RXTxFP$j_&2c<4BJY{Z&q4RRhm0X zNDV8pJ@t<9;Nb8W(jgAIz+p9vbYv7sQlu_ z#$LxtnuDsAPr<&SwoOdgPN9>l!_|B>#6m@#jz|s1_n4$8fv1N_88W?L;B&gSNBhc& z|Nf#=F2`N&Qv0v<>e9o*rlRV5f`+%;3%|d=HgnP$yNW+|EYs;B7D)bzV)e0%w(MRl zG|c+v5l7H)DkE;MgRM7wx1guEJY7!4fJQV;l^y$clXlC=C#G5-L)r2FA9X%q1xiqX zg&#k(_4M?Bg51UBSV_x z>6vj;6+r2gYEN1A`WdO^^(lWc#%x3R&BmW)DVQf6>CuU3@{$9t;LNl#PeYG&X8lYj zo}hd-I-E`!7^02ycT6~^qXa)MtpvukDC62XyXY1W5TKQ#lx6M!NDO_Yfz+uT2gXL{(*Be>nDBh#PxN!9^PjciR>SwzXHPf=(X z$I2UB04(8w9>^u?XKQ{duk=SM3mC552h|nm> zX(>ymEmhdULrPXS3mlV~ehxyy3YHiSN8)Zrq&HXG?DeS+saV(>X?({`gkD`|KSY z8g^Pvi`Dw^!^bwRNu}SVrEOxnfjHPK*bOUoO|7K~a}57|a9g_p|AZ@IU|=BR{>p$Y z6Ap*ZF#Z6vx6Sned4!1L?@hH7Q!qVurW*Z>id?Eq+5!u(yGO?HsfV<;WhjlY?ol^_ z@%@-4FbLTEr_~DwT{#0CQM=D!u=r}SXwRx;U3z-@mxrsR$oGk40;W!VvmK%huVZ6R zHonO72OV+u#Dd4sTAtFtD7wBJFS$5mRQHF&Xd`B}e1W|-u^vfN6(cGq(%I1qT7%vr*)4U=0T9wKJI|XYY|WY^Rj#?TjLW=)bM{Cl?@Q;fe2;!q ztALqyM#jPJDS;6GPu;zFEz`TL*4?AH35*brMArBPTV7+{?wN`ZA!+!uW79~lGe3o* zHJIiPcZa#?%B}M-kn%&kD&o<1*;8c-S#c2}zEz@O*JEG|i4+=Iv z?YC5%-Yi<0*K`peL06*N?Lbq1*k8K;Nx{bAD=el4p+>e*&MHdJv?tH%=q{=lDG5X-cJk!$rxdtd zOou61zFEszoO-Rp%KKAZ^BUVtCox2~ko)=k`@X|r*1B7|;4VEZIi|EEY?Cgk` z*~@aDyrNGm?EI|lxe7A7*y^ett5@=d{3k3U_@J4FIwkzulaGZ++f-qnF-W*s+5h@f zZb1B0#ea#0Lm}Mgveazce)p>n$EbQXqnl_asM7D@_lO^!$PEc9?O*_Z!6;%noNl3j zLu&XSEgiZc%fu@z>{;FBh)qMM*JG%m0fj<6&bJghR({UU?_CDgrq^gHy+A`l7K+`M ztA|vSl$10ycpWb`g;Osxo>Iu=I|XumWO21K=<4}$;FCwuj5$P@id}J#1LZHR{fVMZ1X{ulQ?1DvRc+tT#_ZSce$V@jL#Hf7(3J{ z7DF7rzgdnSZQ+7JhrZ!HS@!X~tl((rsH%d+Zj_&;6!@@5@s~-adINd)7~_5>e5}H~ z;J?@MuYv=vAaXM@+`^5m>3WSPb%Q$-QLd;B%O6#Qr2~GBQZ%L94!_e|yr$4TQWtM^ zk+(6ZQ~d7p@LewkNaK>6`!+it;BhPQwD^JvbUp^ETj$yD+bp#mj60a36trY9rNSU=s}WErMPg>;l;nk+rIU}+m7bu zYS_I<>5xoA3=L&n@crBPw7>iULIgDqpBDt>vcVN&MpTtGY)z!NB?`QR1iBA0%9nAQ zJP}w}Je0C#`NxK^j2~i6l@(vctEdOTDdfnF-8mN_;rksKcVFtg>v=1?vcat7rer?e zX8!`rbzy0EPgjk_Sc#U^YJq_u5B|Ka?i(h{$hz0)dR8GNCY{oD*RscE!5jN!qZb8` zSrQv`FC=c+h_<&lrHP6GMLZjcF;2~bjz)o8xHB#^YPIx#hneMCxu!yczb>2n6~j*E zcT$@+d{@qs)yzuq5t9io_8&;Iu`IVip`O-Z7gM7Yy)~{{mCffMsRidNVAIgd%nUs} zeQ3AY+~xBZ*hlkqz=W>qqeN2^-@ARgdy!qz5pV7s25_DWzhb&q<#j6RU8(oqBS9OZ zC>*gAf>HvSM&Xyhl?VXpiHHCqF{Oxa6(IRc!|4A>1VR46zykH~JXnAIu?5fzKkbvJ z-FLLHAPeT)xJYB!SW}taUmRj!0bllb71;&LI^leNSvqtNrAudS%H+Y#{VE08g3}On zC>X_KF7r+*{1PWDm6;*@YXeqV=V|xjF|c<`v_b!u|FNzV$M|q!9Q9cIl)W~=_MZmB zM^QM=f}i;|@Z=B5Q_VD>XH!EV>le)Y=xKEI|t95TM(vNR4ZBQ1q_ zB?%6OkF0%~tPOS!k@V6qFex0P$8XPX_mC^MXJThtH5T%+_JwYjhgH>o8Mpj;RNtNN z-Y@k1lUVJ>83J*uRUr0o>_$KoQqvT>wk`cCiHiX(LZdgf!cezF&$&zLeeQ&-t?n1D76a zVY~#kRBvm=>TynBK@sK-n?MJNG~AlSGpJgt$C`~)!=X451Cl)?60JrKAX!q0GG|%{ z+S#A&08R!d(Q!?x38Bs^rQrSKDRyVUuG>`x?+$Th;Q3gAZzY}kAk!u|>G6Ljy*@}( zZ=0OG^vBiNKwkt_Uyr}s46}4_z{}?IZekA4L81Gv8Er;|MW3EoZ8Z<)Ibnuw^c)0t z7`*i+q%oPpp%m4B^XA+9Jk!7Q^5I-9uy^41%R@m_eA zE2h5OG5+Pb;8ASyUBjO43&Et1a85)A%3H0Y3|Ma?vVjcYkEq+58yQ~2?qbl-YpUlkD9w!& zX1(JKBq^@5HE`MrxIBHCNoRaaRON{o&;r#cg@m{$MiJo;CMKOtd`;W8QF2gXEiWWf z>CwgplN1Ut%j${ssyKwjKAW^oONfYg=xeZ`^GDT4r87vCk=s9?Hq4>IvXGL2tZh?} z4`xFPzK@}_0rRr=%eAC!x9(h0`8;y_1C!`h;iwvPKu^nD`VttIJsTuG8DJ`XDU26R z)(^&)t!w>gy^Aa;DWR2*zE2=`t?b$J3uKxh>BtZ(6e=TUCYoH#jR`GACNpj4TLDfX z-VwBf9(!Q+-7rK-|Mj!9wekULqdkx|MzlD|F1CtEuFD2?RC_f46mfDLmi15y?#!DY6y~Kv&a4* zgSL+`=9xriNvM%kibhxM67ydB&!0a5=n+CSkHc9X-ilX!vfMW9%ugE$xTEy!VixpP zl#7PP$i1l_SQwM7#+S=SUUS{hc?~hoY!RM$N0{jR%0v zzvh~x0jP@X0)@48EE#jYI^7db8v;6=fJh5SRnSL_K-Dt|b-^4Ei{(f-i8n1^jVjy) z3JC2)3rnu0aWv*M%!vs!B1`6Y8Cmmv89_mjL!$inDeEj5Z;L`A7^)<-C_WB=im0=W zwwSW_XK>{<7zts6mjYN=ls`6*5)uA011&idrO`nuijtW?Wrhr7aYGHH zwDlv74Pg4zME0mYOTyD)6htng*_i)7nbUe?OlJE#_ZauL)(h5{W*RSpr@dm)5PBi z*$g|f>T4NDkR^7BF5f>CrV{a01B-2?^6_P=uKuueoY@CEuzdj|@382ln1`}Ur{mI* zC?k8^Bp&>tc$m?AJvKfWhKa#KI1g)Hec2!wZr68G+GjNAUWw4-;H|jK5*-9oECNLl z#Cg%qMFUPqwTAl7>G1C)yks950b9U8G=rA4(~xgZcG01MUD1ARm(08Aj-p^!$f+XI<`$e(`)ek*0rv)+b0gm z1h+!bGc`Z?@|BynlUBrBz@&a^lR04CyqeuG|0U3}5HV!+dMRJr2c=>ZB&bd}nb`I- z2UGqhUw^&5Ii#d~dpRN3-5%x-NjDVFMka=WcZoAfOa)h|ERa*B$4@;=l65Yt(LG@A zB^k7^&bostS#!s`TAR>ThbHLl?Cc_X)_`d+$4aw|2T;ft6chwqpM>13$IvS-c87J3 zSTA1Amvrt1Qv}&Hr-?r`X?w>!6eUV-h|X==$?e<+#+Tw)y7FemQ?@V36L`appHC8U zu{Ppmx)eQ;$DhMKn6ZQ&{%vvbz_&xaP&8-4*2&Q^IyxHQh65#2yRi1e*=;aTr^zRg z2OQN6oumm>0e+Jydzl28`Z(SR+j?4tMlNEKLbajq&X>VgU>XR>0C$2Yu3S6f%aQ!? zdr)t7Y--hT{5Zxsc|v_3+orSquL-djS`rgI{1I;mWr8eZ^YlcBVp21rvazeFy0mVY zgSx;g618_43;)4xu_HR%6aZIvw9s*hYw$sHsMS?cwuk z-_3J2c3`S`>1_`W`>kTPy4D{|H-I{@bEd=B#4n(|BQl{`(@OxTNHwKI5z|(E(tUka zJUs(4hP0`czJbB?8GKEAIyT~!lKpLWJQeg(Rzu0;l!dBGW28o;mCKs8@N1Rpul{s} z1Mr?54z}uYC&wDju@hbRJYb#a>|R(r>wLD zp}sT!^S9x8Rl}ElA%L;nk36VT^)!S5k72!&^}`!PWkrX7UhXAJ3V5vXn1l{vZ|noyy8JMNgi?@9pb zmU^0VJt)Sp(VuvOOmgEKi5&)|gt)H%mZ3vIN49m|kozuN%+H8x1`8Vf+1=d{fzV{@ zY8bYZ@y!Z^Y22k+Jt1OsJ@le>PCKR|BL3M+8D4%LyFh-V zs8eNSetC~FenT^*Giz;?mqU2ixQRT~oKwn%j}W2kxJ}x04!Qq|%6#N7$n$pGTa>?6 zI9RQ$hU+O-1uNt^ip<2Zcc8J_fG^)%&BDeUFH_)m%*yu^MFsgU%7!e!N<^>U534ek zC@QDteBwfj&9BNCHbx^XnuDw%wQ z+NCHz1hWv5#Umbi&NCOkBMn6|#`?MAe!d$E^ISiuj$5BSxJ#s&v`qgj14#jvO~%Qb zA01R&?Dw`zzDgdq$Y9}_Qbx6 z=8Fci4!_OS>Zx5^-3E1?XJD%Ay`torX#zF^Ou#&rHz5Z}0q!(?Hrrf8-})OJooU`a zuIL!K;0!HH>Gl40q_NGzN=ji5)fXSHzT5~33JT@sC-LwfGc#?*HPdD4$;rv`kQ6|6 z1NMlDcLfdV+u7}IZEXSTRXf_-=kb@mgceK6sOmkT&Kh6t%-NqIjFI-aFZlVjB%qv6 z`r_878-M6j27whG;r+}XPiIH+RMU99cCQ`I{Pwj%Bz0s)4w>L%=U>ktmh$x4uuC$- z023FckMl&ua82RGu+85%k7IsF9J=vy4!@x!7JAQ%jggk0_e_00(sS=p9|Y8w+q}!$ zeTq5)`l4q+5H|gS#kT(b&Jqbx--G^z!u1^OLLTe!U%Jh10h?s2f{k7-F2fkcPpF>< zIquEwZw{%2dO!M}D)3rCqhCMMF6;ghdYowzV;bNvIY?C7Lx4zSqC*5GJ?u@ccHmVn z+NJTh4A{2hQ5#sic(vy4=hpy@Hk+FOho2R4LZtb$fNh5*@gz^tIHL)Y7A1E z+R?X6dJ30Gzp?4gYe>yxsX?Cg2j9()PKK+?_tAsv$s5kHK?tq(bW|S%Z zJKzJEp6(M0+b_-HRekjwbBQr|=qmB#&$c;wHc<$}UQ=xk6Xz1w7 z`N(S2FB}2fM&W{Hwy}CP8o;E}^V;6Mu`EzN`Z+5{yY9-jE+Mbv6%z6)7Z;cRD3qd znU~6}Y9~b%B}NZ=*wJBb!V6?}J`HZBNFN1HJ%<&4H}^Ib>nl zU0eK%fSuIeX0N+>M51;ui?D<^fFF|W6|=n_crU05INpE4Q}5P-4S>ggHrD*)_;U)K zczW#bE2%EWzyTeApWb$SsLXq5^ovk~YWC;Jp?7N_w{zN~hQezBGX9@8H+uZiK95=T znRzavq?h$RWUE*aY1#E{XVx;0tK)yPl|{0azxG$Hu)CG)1PF=;>=pE_`J-(_wB4=H zE^9pXcu5l{)~r2oz%h6pbUu+S#{<#GEYI32+q^dt%pUGkJT$ruv9G1gs;tD{!)qjE zVaX}<2HrdKdc{(+6rDOBkSV4uGTjWVZ!GAAPJ`G@)$M^UV|mi-?5!lELt z<#UAWJR6xuS??I`pE;xY`83NLz%T7QeO?6JUh08+3)uG??WBMOr+|Qf^Y4@H9tsKfZB;`<8YUp` z`hK2XLUG zMaeM^`vShzfIYvem5+^JdaG{Xa3OzFhX-FbsMvJF!FdQ*qCNw9)60lYKmhQ_f^P;O zt*#qy7>I0aZS@aU0ZOc==f%a|48(7@5UyCiie8cHzchVY*`jwaL6$6a=pNb|l1Mhd z_vJo-GCF{X-|Y^eKs`0FYr&DM)MKT1=VUAj*rJUzINlA|aoEEKP@N_!_UETPPcJR( z#HB@E3i9zM8W`Gg$VJ`nch2@6%X7Vg)_Zh7@ySpo>;(^{&zla|ZSJ-elLv+`cnUyW=#J zuzOba-72z#Lq)-AGPbUQ851I1T~xmKEvxZt_^LH{eQ~J$tu$@UPOFSpF>^6Ho~#|o z*AeBHdbPFX2bB+pM@~z12SDg`%hgdr!YBNtmOQ2jGBy2sadKIzQq!HLzeLjI=7hv^ zVH6FH!KeFrVLqXTs|o{Y6d!uL4!=jv}FDxDE!!ld1OTR1}D z1O5KIL3a-_&F||kD(Pq2D(gJdCT^YQgiV@nvNmKyx3Bz3B@*8aZYKveFJG5)(@Z~E zqVB}aL~p1Up7N7t-<;bXUkJFn< z=Y??6VCsxVYlGELQ%FX568QOl&n^q0kFe)d*iaPweV6BgQFN}#oxMNxzyq#yM*81r zZHypy>kj1O@IeGiEklZRnZa0KGCInr+~0Xidr;z^>gy7T4PHk(wiB_O9O$Xo#_rr!?vJ@=A+hym6esr$#cm` zF5;I8xXCvXGbt%|cu!bFQS!rNphKi&8NC{cWf;D^4pw!gd6jg zi((h=J9my}&y93kuQ?GSWsU@*WJ|~3X_3m9&p8r>3xD5y2AFkEHvF{?h>PuqK1urM|He>N%195!1^{NP_ea zjNlm>AXzhkV6aVwvyk+_J4({f= zP)C%=&Xuf;k5AZRWxcDkuco_50f$j7&Q^D6eTd??7ly0bpvtnyzT_A%Y;9E8$ezd1 zQpmTQTj<^1#Gc$g(oSy}+Z_wf-Fi)IVF0%5sXWRbPNKz{(xw?SAwqjKv4jtHq*E_0 zu)4nllCvsIpBAUDdEe~G6V{|y3>SXA*uSz-{UQs+M4jSwmeSLdmLyVPaU>|NoAIR{ zu$uv1s5pKEItl{nY<}m}VeIAX1!x)TLxqy2Q!zNE!BdkrO9XG~7(map=P$P_zM9&C zW9nR_gsl5Q@%sM1j3S`rNdM+E|7*8DnG@jUF4sHfEF9>NPS*xK z+&y(ZU#4~*-=5gzA`*K>u*COs$K(yESxIQ3bO?oXP?(gc+l+3ihEP|zuPFQ`z^YhA zM5OBS!r7Ata>pLNv%A~q4j%>>a2;!WAb_vou-I_&Hv)IfrGag%!sS~rV!GTAh!Zu` z)LiqBz5}bG$HHzNh7oQ>sH9on_i}n^P}Gf+wT7S-E14<d9Eo|%u$eRw6i$e|~UGJzMhKvM?8t96v{xc*aDm(K9iDA#0sc5KXOLO@c0 zIxFqjGs3D2$H*)f8qWZp4#1z_nR! zhQt7cX}Di=rb{f)3@lQk7>&Ih<6ftQs!WaSPdpQP3}(hz&9(a_K% zk9VR{x0&Pu`n_1gHSpC9jTNO>!2&nmqRL26CR9F7@iwW1+B&`dK*(7whm~67V6NrD z%~w<*{iBhDIM)naxYo(827@DAMUjU*^(Ql;sfNQy3&HJJs>FJB6ULx0wgjCx0%if& z)tz#{2C?DQ9)~0XLs>o~y}s0u8xv|z9o>b=C&$iSa`6NfZ95#njs4 z&xXhORlBPj0sH;5 zpj1xNbR5811jPNUcy@zB(<2IvlUfC4-Y99^G_#8>p+=TU=g7?QM!&%;L1HpZi4`|R zc4*5bxxok;hCBBb1(Y^Hc4&7#srzHS(a0pXLgFt^f}$=JmqU!~Nt7L-A50r8AoL;} z1frQr@7D<~HYFDK0AulIwR_?ZH=U|VrVhB~6-SQj5`2#wPD#N3zZyysVhz@$^-gYE)6$?a%a@!H9xcQ7Y_2The)mR{af4m&8Gn(U%=Jjl1kX_@$oUx4PWig z)ktcBU%!4WNG%0yBLV(1nS#D61+bDgsq}Vnsj8_t^DTySlSY;JX2$nYH%WQz`}ME9 zx4b_8Dt9>?ktg9=Y*RkpT#(;h`aLoXPxeXKqmeI$N72Ftt`9z0B76Kxg)zKT5hD}e zN~-qlxvab8uPkxNK+C5pc097-ZW{xr6FD^WO*1l$!D_qr*Q=|eokn5J0|N%);(IDt zIP-E?MER6GpQ?_zV%(kFPfRb><$FihTq024C5IEq7ZdSMjjCdTa=Cj+hrYh>#!-q}K6|km)*XYpWX# z+h)UU>W_&fhRb2S%aqwM4`FW_F~4oW>M|#jm(ToC@}^{k>kNPb8i)ckv8OycK8S~) zv(|JTu^AqB25*`X6T8Y5b?mwMxt{dnv`6^7jon6%wMyCf%r8Se!$d5ljfEn(MCHZc z(jGxnHEPQNqZ-e`ViARRioFE8hkIJb&?jOM#VzEGKudVU&j*@y{##Qv)7IxE!5E(z2W3Kjsh<}+t9ZFn87`L8{>Vf=Ry6Eb>>in)Odfn;0zq1S}1fI z*s#x3LV|C$Qqe;HyslB{7IvIJU-mx&R^yeBwjd>ppdYG=iaoXVGl;!CU0nM>D+Z8h zFQpb4Dn~s2o4Y4pgQ6;#hX(?o5MqIXm=pBtzlR8^LRy;s4BmGunH1!qOc*s&JnXKf z+5OuGip!#-&n7Wo=`ao$EaP2*3?&bCBmN6fUTQw3kRi!W_S2I(kVX+niVRv{$>aR! za;ArT_?gy7nl7{@bYO3YO6|J$RP|Kzl^|)IXpLg`cUVdKWiAL6+l0*`1|5Y1R$<=+ z3t3ee$^H)8g)X#HehjP9x{+IB%)?X#zXU6Mh^v^&wS(sGgv4S0ve5c+pYQatI@PiT z5}U(IZA(r=Pm$lSNPF_lzDz!I;eJBUoFokQr`sVLIs-{&+*G})aD8fO@MbZ(E93QF zf#j_M23du%Mc5mawakP-uNyMM`Boy%u8oS;f-MCH!!ozqkT_Sw`A#zn`4wwQMV}q` zw-38cf-fuM^}3giv^Gh=E}yHdeb@pa0xQQtBB$ymgrPkyMa<1L>c1#@Urkp{NhuQ- zemFMDSFyVV_>+Nv&;22|Act4=iUl&QlJQk~HjNo}Cq4P;2G5!{zEh5~yrB}iF{0JU60I(6ZOF|jjc%n5L`JIBkRIg6 zVwMDZ3G_5g=@rWIEe}#;S&fC*@0r39TkAwj=6|p`0AC^?b3{y)K7IPs-QE4}>L^%V zn;Xao1#tMAGdLi42k4}+5;##EfUIb6u*~QENUk#DTP#q7t@!H@A4&R{d4rg_(H*x% z6-)jIpXx89u`@>%>@!Y1XDQsw#4_7%IHsRv_0P6jqUyk6I7SR0CM?DVz*@4p9Fv3{ z#YH6?u`o|<*VOEP#L8rT<$UqUW3hRPK23$qN?$?kY<~N_j;iR{W~e(l;V!2ED(d-Pp($am-SQb<2KzrJ!%t1nz>y3Vq#*#!gaP2&?!|BPvaHf zMW3Ia11ibxPc%|aSHRx0h}z^}q;Yrb`BvGR5qZ`p-2PiWIr5UUYD>K3dFu=LASsGK z!)f}H8!1pO+rK^yAb6E%71`O_4-5?GN^Fmhk5^Y$(}*~g2``L!wv?3Y{_7&|J+M&F z;c1!4y`cO?C&i^p0tjW91q-)od1I;7%=T%EM5##a4W}_43Ro?2MN+9!K5QW(AWx$g zUOlJ!;@dmx>N%r4ja50x^u&yX^yV^OHG5-6Dudm*nCuN1|Etqg9~Q;sQ$*zFX@j;8 zab$)}=J6%5`E2ol>vpfeLa5J^Uk#0>y?xHwQ`BuuQn$uOs-WqqMLf52F7J%gUY462 z{2KdZ#hyb}4uSTyh>S6OU>6VLBOq-Yey2Fj{;ty^;rUHrP?6bu^&9iVliA+`TcIOCHMa!aM{GOHGXCaq35rr(f;JkokKErks{73w|!GwXpAmqq z3dyI7e?qs(Nxgtn_aG#2H(vp)xZ6E|s0W01mo8(+JI?>I1@tn2kgb*fuV1JS5#-z*&pssX3N{YFXbcSPmc+TFMCB}Cuszrlba|b2 zj?GhFPDt-U4ws`4@yjJV9bwPuqFo_~Idzb7fMj-oinQP(M z)zd-%L?g}G+mtHVg~j;nowcogiO4E_yz14ksuhlJnx*>bz@0P(3nNqC^}{Z9dsSoM z^VKVXZj0Zr|J{NDA8+HcXUv)+Fqzo-&|h21V-XhR_*LBpzigZe!vKw+cJivEReYfn zGQh|mNg~<6Rp3cgVIeU#HZCQL?lJ?SEBB_$ONWdji2gNqh3}&X>ECpg>Q?>(N&keR zq2$y%uIN^@N}Qk?3#0^+Q=y+d1x(s>cxV%PfELr=-@jn2cm()W?`zJ?*V2>lu|PWH z)_xh6j!PALxak08as_BX0W1YDLMvhG2PU-E^Fm&+XP?Ok2l?UwC=@ue%+Cl)5jgd4 zUf$N$*VpIguj^-HGNIO+3!E5=6w!3}Icf;d#wL%bed;+%fR;)~3gJItLe-E?OQ(w= z7@W_PRVK^Hwe|~h%M=LKchqusSL@44OWfJC`MNKOt34wqECfz1X26LvCgO+MHfc?L zL9;9Q8JYoq7j0v#pcJ?;5BC_0A z{|x}A3S0UYx(Z=t)r$&=NmwDgv3X`n&d*6yUc%O2y~AJ%n574iYjJQ17;wM%VaydW z5|g}oecdp#cX9Vns%GZA{97LaIr3`OU69kHTmm>Aw&|cMpf>`}U5=!$+tG4MRu-*$ zBBagL$tzB}UzYvFeQmXva~^Vh?i<;ZCr_$C&Is`IY@B$A8}}}-2L+p3r-#D;Q;tpB zGM~I^7#=8`2x$9tD@~+)j!pITshH}1{Q@oyU>!^^>S9`Jx&Z9IfWxT{m8a4wN|HugjXHNwR3#2Jh4=Q#VqLLX|Uy5x}R)1F=o1V7#d8>E*j~&+KawG zuW>B=^}8;wq=J8w#Ytu-?>kIFiYzbJuq!_LVo6kNB6!2EBRvqpyX(M?Nz{d=H;a6Y z$?dSfL5d@6APkpB1(7NGEVXv{cQg3{B2h8-qw%Pxua^o@QhT2>)g;Q#kE4O{X)b@? zo@S@OLfT`c7~ucTeS{_NK3Xczz+WD8$yWcEO^RT15=Jw9{hRHf4D~>d)BDwj3-ASF zYV+hCGo!^|?j{9}xhm1ckZRUmn@CD@h5o|a^7NsHcp-Mkrti^rcP;vdIeKK=QMMW` zXcUof-`)nh%h>BTR^=%Dhq2z6ojM?3VZGa?xIe!*6*hz+LnB}yS*G0lL_1B|i2N#L zygW9TbY`ICt3=~lN6mMI`Fk9wlB`Pp04qmATwDh?3@|WCd}{%KA*Ez&q&NW85x_J9 zm6ViDfbg6;rzJlCT-7Ricd*hHJX2wOq>2|2)n;7O?VeUC)|&ZD zC)H#*kdoiLcd+1Hk&sDjAf6m~{mXFBdG3DxN%wc6{GuW_@D_MmugyfCnWV2d0BpIB zgwi_)2gV1afaVGipY80}yllNVL4^Vm+hIU4X|1n|Asfl!6Y#?8bgB8{7J)>@r>(|h z^_3P$3(Fc>Pr6EiT^qM58Mh){eUSFCn7Y0~bT27*LR-xS2CXXWL&=BPro&vJumO{x zFq3%P4t)dTg?R&b^P&J`)#yZfT2N?8=gBi0c&glPgzOX*8)Tuj^}+AxVNO+8m?kG} z=!?G^ha!4(Y(6X<3j9lamv4nQ)1feGey}DbD%LxrD%lOFZdvB+1Yp_pi0?`8;w=_~thP2F0i+cKgXyHZx=Q~Fse!lRl}<}339;5L z{AS7_ZH{-J3lwDn;M2(@_1+NsqSrHxNjlHla*s4R*Ne>qq}}e9hXl~1-c`c8C=#^6 zx}`7o7#VMp*2@BqY0*^+uGFl!g@{* zkJ-3NAp+yM2V-_DI@eYnN2@GGT(FBd%hzFM$?9=>ayjts%RVx?p^yi>Z|{l#XdDcl zvcvH6_b)9i#qZE7{{yh!v));|yPus>0Bsf^jM^GVRR<7(~@sN_V#wbO9r4(a>u|_ zBoO=izk?q9fL+~7gUgdf4FEEntFh)DFwoSU5-k7m!y+iCwY4?-@!4>AiD+h{&WA}JZJ4$zxknh*wVX?Qo_?mA*$x%rE=+fHHG5Nh zajY9uWbWMm1fpQ9Qlj;ZF%;vBH8a(KAxrVy~Hj zHsbwgq9^Iob*T+tCwC^zs-)ApEJ(S-Ty-g2=PE1oXIhdavt72SLf~Dx=iannl7C-a zID;&x>lp0y>}H<+*TKlwW|+BSmM+@D3lK@BM8_xp&jGc+JSo_-t8aBGBN;3vVVkM` z{y2GT%4%vu8IZew4pGU8uU=1)DTs|WO;36b)L|%jf=?TR{Fz{}VXA4LhmfoDjX6eb z-$|zvjWG%7#opuTBp;}qO1}1jUfphm=FMU9Dmbu) zYS-rlI{H5p%__H=Lg>DyscglntI~f5{rPy&Ot@Q4s9!66H)LsH(Rp-L`62_bvjd(i z3Ga%_6B|KsK0cS#KcPKjzzPN6tLwxY;77>CjHc6gtrIm@prVGxxmpMCk;nN-5$*#( zG4nS-+rrBia?+}yc%duhG_v5_wlq3AIx%tf7nfI(2}WL0S_;fkO#qJ(X!CU?dtF5L zJ;YgGyr8C`(aq$=_}C7}HRkHd_ik$>kI7iDJQD0X=Bq~f-^D*yv~usH6s29L*VRQ# z5BA=-v|F%nX5a14v!L4!Yb-@R2<=^xpzwE-c87Bl)yj5cr|yL_gqZaUZOW+Y$y>xi z*;U5`(`nPPDCuH~PEp$-OVf)QV1$svU7j8*)2nD$smH;gE{(_!H*!n`0X1NE^u^Mb zb7r62dEq%Zfr`G%D@K&TwaWSGwt$JhKiO4ZwPS!ni0Yn`MzwhjrSxxm0+cLkS9t{y zTBt2$Iun=uPvp~(rlK<2kCJ(Q)u zTmn=hO~4?&Z3V(c$~E8@=?Nx5S3P;sV(2~PDVFe^mUt{X+Lf{|on)JP~h44Q$565X&$xcovEYKL2a1&o?? z+N3OP>Abx1f~xK#%PkRdYY$h;doA8aZ6=aSB0@h~p?R%8$~q4`X`m%`c$&hReMKM? zZYS$~;Rum#>C!At#18ZK%<9ANcXQS{;S2VwLxt%lz5jH z7eLDp)P?Hl>AAGD)M(_SPzkDEa7+hMkz5*$MqpTspiC3%y{7(;5->Yd0~3{%6We$c zY(Y<{RqybD%+FtT1vM<(YEtvbL4VH5%Sedpaqmw=!X2f4CetyYRiDMQa`y0C3@KQ5 zE8hC~V|Jg)U4Xrv<`(sh*QdVhCnw0d7)pMrPh~p5sKuwC@RfT@ z=fNkSwdfy5oPnx-Wk#UlRxMbGOU6QDh7cInP7Zq2Dpdm;E2-`oS%+fG^;a>( zN6Wi7BzJ2Q%dqs1Kjd2zn2F_tr(kR*Iip!#?(pB_ZGpIdzTvi347^#A523{LC#``i zLC_9)^K>q|>^T2<%eJQLv4>MqQtp=_mg=^5j+5Mv4R*Tcbabm~V9@fi-`#sBon~jT z`2hh9dw`6cfL?n4>W&v+4WInI$*K%`I3+Bo7(B@}OWnorhp~R+B$YxZi2=^#Ww)`{ ziW>7Dg8DiU0A4d{blciWg%CXXNWp7zzf+Jv@Juw_}G@?#uf?Pe=t4U-b4nc5~SL{o=SoF-_Rv8u~ zsQH}6=~=X$+bA>L!pkam31&nt5qJrwYy2cXUh0BAznR497*LZ290HFnxyZEpnBlZmi#5Q+j-%O)Q z;V`#ECF(6?H+DdOf4p9adS+kGnCk<87}O7R(6+H%f) zP4Fo^kZKAtRUKrkGFwhOqWID zsbaeVW^9um|Lc0hK`5~7!UPz zO@#Bl2Tkl^CxL}L9K_ENi77bspZ{#}zmM6R@OHA$Px(MeYYz;LB1bE2$RKs~;9z8c zhjdn-MYVoX%lA#K%cUP2u@k%J{(;HK$!bGG7x?(0vLT=b$A!g|)VHTe-;kw8^KhP^ z0IKO()5rFsfXJ-lovEDGU{-99UhU0*1cFM7!$8D&2iD^HJEx=f1nGAxQs1I)UQzWu zVL!ZicQ1I)8M%E}$3OILN&4m>_@VM~f7*dD=s^*A>kpX&xTX^Hh~KzMrR&E>Nblf1 zd;k6x11rQ~!s90Liq+Y8NN&vv1f~V?9qB$j@)fS)fUX40 zkN-wfYwsW|RZUvxwkuPj=MkcJ*WTUyrQr+}8mOK13B3C6@m&_!@|6`LH019KMPOfybSQi<#yu!l+E5r;jtUET>;kDQxiD_OjF z;{-vdIAL${vx7iyy>bGtFx6U_@9OFRlhAUp<9g(p6)x$-8Xx}aAo_n)`c3Q`)E(GK z{6%oiPMh>I0dSqe!J}ONu7a4E?N*THVSu)_@pfvHMG6NN`FkBH?J}F<+v9oUmf=%e zud;-`TF!nJS*&nECcD&pdldJjBx(FZidG^Ne)*(=^8M{Y#^!NCeEjE;C6DG)!7I7kj^M+^x%E?I;RutNrtYUMvpkKRlL9bl z*U^=_np$mL9XXmQefxnI@T)lp8^${Xq9^ophyloVX(N;5Q+Ij!Cm`*?S{5vFfxZbe z5qxxc`4< zIIJ6p;7yclDUiNAJII{1R>Eb(L4zaDJMlmv2F>Oh;yEQ)uX6on91b|R0fEO?<>3`6 z-}PY?T8Y2}s*LpRrYz&yZ8MiohuD?2&XwcCmj-{BmpW>uJDOmXHGsr=b|}_5JiX2H zVY%w;R26l9b8ym!zrNJcM&|x) zg-$yq51L>@UAuQRsZZc)5!?p(JgTuEx_8fNj}t!n||FR*ZRn~D6Bu&3J|s# zE=Du>F;Gg$u z;9pZe3`-|U7JF6i*Y7Q^oq|2n>xa9hzK*9mRIKt{6sp^%*8w#J*igJv_2V5)M=L2v zX`sKV2U_Fq>xVS^R!@BJ)zQjAjkUU__NSihCzzNBLFDmERQqyNs73ImpicRL- zs60K@>`(YRw)h0m<3R5l|1yhY9r9%&PRTx}?_N>Be=lzPGLF{V)FXFS4v_F*S1W!J zUMR)T;L>d$J+P*uM^>)kD+a;6gZ2h^ol1^(P7PoEXFmF?RZ%+b!F2Ul&OWf-l_Zvc zor{a6Z##l3PraijUE>dIZipXKxAB8uIpG@@)yu6($^M=AX%+FcMJFXeP(tXes#&k)sg&gb#fm-tgXpHky0oPb6dGL)D?Y#LNh@pH__V|t8*73A^c zR%`oOyT5nTwtLse@RHA6Fp}Omz6*JUT&owk(-v#^Pd_&TXP*I|oUYz$u#_`Nlaw?C zFr9xv`(u8VzqOS}S9TffgErs}Mv?qcJf5G%!Tlzyf+#*D;Tksnu@;H-Ycb1|F(4To zgyJ%|gx>Q;u0SAdc#$gZ z`^Hk?Ri%*a;q-W{P@YTS#E0$EMzp(2qu)Pj;kP6L8-4PEsqVFP+8FoQ86uTAjhmu# z+)p-yTt!zcnO_%}+lxCWOaYM=M8KzpjvCrS*th1w7W;Jst}Dr|ps5YsDvdKthXP^> z>l$5I(mJvOaa*S%*e{OGho_!PrP9cH8d~^NNRJl=Q*> zzTSVg(5xVXh}+G6^6pKJ;b``OjQ2^muD6JG6TR{``?-gQr+rd|?dzuB_W#~Huoygx ztclINN=yu}b8rIqtADq{6gdi+9t65fu_g{qX{gaaLz>mQz0MwozMer&R&K?}$JJiF zqoD%Dk#lucpP*aB&!?}Q9cGEfEe@3aRXO;n_8u&3M=n?%i=l4w^Q{dBtPny>_f?X< zp_1p#Po5_=JQR;9F)(zMl0qyjdLbMT3lX8)Ho*6;!_o z?a2-@CIji<-WyS3&()0xGNOTwqhD*CRKGOpYQ)xbc~3^$fZ#7F7oRW?OBzh*9JjW% zQa&P>94_qV=2{r7RhyutP~Wa;L>-v}$ORiGURUtpB>}ViJdhKamgRoBdRk@%=WHe? z>)85tEAt%}ZLSppaU} zGD0c-oFU*-^KiUYxp>5pMX$0tGLg!7iw0`={rp16>80;u3$C}HpVN|>SLpFueygv)i@t{9%fC;>!ZO>=*i$^4=DQkq`JddLznZC-r925b zfoxFlsGI{!h#q-FQ*JjCM;zxBF(oQjUu9T01B+Q0ZYWFVM-Qn4>-6_z+g>{$P(G*= zB{dVf^JDQ8=I7F9pb&IaY=1f&=HeOY2gk0ugT_X&x>(ylrbG?!kQT5`^WD-kCKH8Y zlrq?K9jjaElk@BRS$scP8KZGHoZ_GV%+GA>UNyfAzHi9BOb|K2V?o47myjjtF7zQa zg*t*9{xBXwhtm*-k|RUp;UA!-HPL0c3m`on9v(uzg5Ub$Sw%rxuXLvum**?UgB5yR(qsP;MdQrfFUs#<4#nV#K#$Z6(m6iM!$N;4TT~F&!bNxXr%!8&tWQaAYbz&_a|etlIwN!S!}y2JSd!YE%GnRY$XjRS zBpDyy7G3#w0e2AO9HFy2NTc5b*^vrdhUL!q!N&!t6#|dHY5eo*MbKv zioygPZz@$5mbbu))`%}Am6gNd z(QsLpa{&>Vv5%g+YJ!P;vflI_fT}}#;&f1D3B$k?7bcpLFN{P9QDoV0l8%_KvWp5g^T zqRE+Ii^?acW;iiNb$sI>A;HhDqoZ>~h#tpY(?B$4;}9gREFVpo&_iWjYMXoF6X4+? zNFG=2Q+;-JCKpWx)|FQ+d_R(1P)rnN^AbY}RsZsSb`jILsA*tn>qFE^ISem^jdn!! z9h4PbeF6wqe_SQ~m-P{|>DoJdX)F;8RA3MYK^W}oZWA#F5}$`$BT{5;XDFCK-{>E& z3&up65h)SBIFyU!bCB&aj~|cjaVG}1s=IVQnck@&hl2g9PJg-<)3ITXx6nV9&pei{ zyHq($b2zb#y6p_DERU_enz~3v?jmp6S8jAE?yOmP@3)Y9R~1L|(-!YPIPExHwTy9v z)_8T~IUiQK@#_cv2lOJzHZe{HMZ(7u;K^Z%&p@$CrEewk>v{Bx*Vm_GQOZ@EQ1M>9 zg7Rro^0P#|R6~ixLqR8|Sgani3UjY=K$SD=J(wDNT~ar>=Sd~6zd1ozHbx&_#*_4# z?a3(5+fp`kP^b`A)u7>nt-{1FAh%$CKKZr%w>}Fc+Y%C2F)5R+H9;fvB<1JNpju#E zH+DHyV%tbt%Id^$w(BM?DM9LNO4b1UYfR^I%bkZBw_Xuo zT|rF{r7H>Z5MLS_)6~_iUvDnawgZSV0Mj&~%~F3FATj+@14*oWV^x>7S9(W8n*7oT z=l+etD~A5UYV(eWIVEUG8ReLxp=2sWidt@NMabaA|4JB>Z>F3d0x?&^3`9>(pLFL5 zg(kB~8XUA%(%z0xv!oAm<96_wNNa0XB~!?{$ysvnxq5Be z_1RIN&*km);F7sB_x7Y)O8G+5UGmC(aDch@C&rB@|0%E*k}XYkFPsSne~=m-!fwCh zA;6(Q=TC9z0QHF!hd|!w72}&o09B^RVZ&jrCiYa3h5h-ebfxzrKpbpca*$4(IoK5m ze?w~kt*E$-c+Gpi`Hd6!eWb*nB0fadAU2w^%7pe=DIR zipwj9!U!O<595+ca;b@fDJTH?qJyt`uFSowQyYZU8!<}P7Q!aW2xUtQCrP^;=`3}d z+&Qkqo)Q)kXprIaeWXk1Np?#$W4q0J9fifVr9zfl<-XEBu<}qZK>wgH7rL??rZYGV z*vUi?(Ir@Sp$)^>kQxbphtzqx+eM?YzKOmLx19GWFj2Gg94| z+MY7$$;-9k-+VGuzWyxPRdMxc`lp1ws-`+HzXL8z3wir$y> zQt;p)2NrA9Pi^Qe?09(W^Lud^(&TpGQ6K0J&Fhx>U1QA0RsySNb=$(>tLnG}IAjGY z$vg|A;@7YD3S`97N9cTS$PRw)08CEJoDplvZeTg~t_Y}Q>_wSp3A*n96B0Ya$Msj} z(UB9yC4|$Wf6UUB@5U^a*(&j;B4xa6`p)Q{AL&^9`2YPP1vV2CN6rsPt)!fKg=_hY zr2jxiuGt30WZ(2T4XakyFdhjXS|~r?M$7zJZ3NCpo-{izy(rQ<0rIA{ ztJzKe)-h&l<;Q5MZMO8AC)Al&qhnPET5|+TP$1x z5m|t);MY7A5c!wTQ!~$OyHFRivr(W=OVCp@NrCn1hKp5;UGHyo^nwMP8>Ci7QWYB= zhRZq9c&__zVEiw!`rmMLf0BfNp(hO+rf|o`vj0I#1&3H-=h6*Z)Sjb_|0TqG`wGgT z3f3@J)zsAe4y!V&0ysihO$b47RK@pnsQvfvbQx5KwGO3u_6hmlv1HMGGpuo8f6MZp z0O39i?Ii`m=Mub5qE)Q~Y=miA-oC?!L&I_`P*Cfo&9d3>o{fDQ-TiMrC3q- zkrmkOtZzPD$%uLd*R4;hdQMi92s{6-WB-}g;$`FG0PF?y!7?w`tjr065ob*tW9}L1xWut zuHFJF$}Vif9lCWuN>CXP0qJf5VGu-87(%+cyBq0JQW~kD1O%kJyFnTRB&Cr)`}@BC zKkNKw7Rv>QE`8^H_kQ;C-1l{toBy;RESj?-g=D0-A&jsCdh$1y@84b+(*sv*u3^#V z-ZZysmzXbMYZBoJspvQvaz=0PU}CH(@&D&)dmViTS34y@4uVV5%IP{D%s;h}U4)nO zFbw13Jxohc?J-A4YCnF$cDR9s>;$m{LRM}J0n}&ag%7ly$&p3o`Q7Amt_RvFPpQs7 zG70C~5iWMHd2&DP?Vc27;c2tnga>O^L+#% zVh%$djux#1oE4%9zy2)cg*<_6*JsEVSkLPysW~?+_KsGTvmWSvR1^m!^(+r#DE%HMLT2db(y+VxJx zKrsr&8eQ6UZCh0*;S&BZ_~!GdQFUz%k=py+q~-Kiu>#L1t|HzoXTEK@X-I!+E#?H& z*`O|)djAge*1!wURl$Xqrd@NWbJEs5S~TOfAjdS^oBNFcC6qQ=+tyU8ibSzz6d4cg zuDWaFGdOs(sdfZrxl z(IN5}^&f!5ucXe2;$jEXq_TJzDxI_)wX`2J@)2PitYQTn0uO?}ds`AtLr3g5x{K>* z7CtcAk2ahbF6KP@)QR$eHiDef2)AR63RE+_UzKZRy)j3(M`va}YCkLQ3bsCJ96BLo zQ+H|G#McNLQPN&>q@X~-vLxc%f;nRP5pwxsLm}8WP;>~^FBrf6m{YYGdpHC=3kGAS z6{Ag;hRi9rYufWasnZ&+EFoFvCXp;+C>JA6V~AJE#o3Z= z+IO6R6|ji2E_5yo-TBPJO*cwJQ*H1FsX;G#N}{QBegmRGqsCD()pajx-P=)iSD!RfYV|dJ{il ztB2&5NMOnCr)RYd(iChzuK%}3k~BO~R7?c$96W?U)`?-S6hxBy`v(*r z(D{Cx5j{8-;-?bKVZ@_9(#(@!xhvCuBd@HSh-V$69{i>#>m@!!CB&yzhv-F`OQTrss#Kf6tPO!=e zq|4KMU~x{!`IefxvuZQ)QBA0F^njpRjk}r$^BpQmdB-FQ?34Y{&OvmSfg}&tvk0H- zYpD@gtC(ZV;RWeB)tRpJ>I+_H-tCZUV4eitGA58T*c{=;Y?DH2C~MDjB;FXymMpKV zZ#5IWZ~O2Gu0p{(eGAVk*^!h^oRQ%qT~A=dD|2B5d6qsc%Nq`*f6JgW&th!nDvhgS zq;kgqxyLl04EDN^;lR)IHe{TLou zAVZ~fB$&CXBmmHy0Gs7K@<*7X#! zLZ7o6*r1d6p%g;;+Y|l#tYBYS#g!loek(t|*+Uc|2m@R{%t8nXqmJ7S*?XaM5NanN zD*ET*TCJoCSZ12<0{4Z^RaHJ0A(0II4%tK8L%>xG0#L+YexYrOKP;^~^7`)!6D0rw467;kz?3N;mno~$8`j1lMK33F7x`R&;|Aicb^ z^YPtGeonHTEV{c|B$T;kOil1QLB896UB>?k{9o>CFV9fKATbh5 zM-*Dv!x&^n_WF)Wpd~= z=Bb+R+4`5-E6d`6w|nr)*X$5TH-%pxR5mAIGz_*!_Zq zh4t7Fa2Y{#704)ya~q$#)Tq{Y0aM{9*b48<4WCp|9XxP985c5a*p4(1q%76H_SbC=}TcN=iPy-ea&S&Ctkav^_lh<&<~ zO&G%`mH)T?{&(>Z_7yIzXUv(O46q4wY^z&=*R0gYk&-2{Ft=e87z`r2+}pqz60ZVU zgo~cY;!>OBYwnmnFe&}D^_gTp(eF??lud`4c4EX73W4}jLI|NB%5lzDV6w*bZ)PYV zp@g9h+g8g>DFJvSS}cgETq-o1u7XGQ55{%=w@?1>9o%=)JyA?hCs#db zN%{ktwwIH~NFRY0;ix(K-p^ub6BHV?bwiS4SbmSVKmTZ{tm>OYtl?B%K}jI=oR1W- z#NZfNKTdTeM#;1kP*6(N6oMdGFy%0aKOt$C>YFTxhToIq;7*iI@hm0=2xAs%A%s(m zmPqVLFQ$H=K_CM}Lku+*LatXKEHR2rf{JN@<3s2)6(3IoPs_+)Wn=UGM;7Gic^!;L z2KZ&fk{a37MLR(AwdAo|3UHWf`8zv1Ae}IrItS>CypFoa-pR;7^gv|+QVHl4GVn12 zLF#RrUMzo?q}IjurPDK#U(cR}NitR?Y&&^4I2<1=Hh_8m@>Tk>(2?N;*IBOMLzDL} zxp*$(w>sxsyG6)-9sgt}3A55~H8wOfl$F_3mng#l-)!gPQngB_+S=2@qrS4zq3h92 zKmfLwFbAcmizm%P_p(9Z;2}}&GE2-cjJFo>3=SL{8mJP+5neI zmmvd2bmYq*GmXTRc2Xn9N-}&^oY)PPjKNGf6Oa z0FkUJ2taf!Eic!t@SERX`ljCj8e1S(LS>LCI27FN0NSze?;w?>SzS|8^ZonrnVFf1 z3B!5bS!mA>eM;V1yx@R5P1iq5QIZB2#VaS5=Ld_Ls;VKOln=bNHx;@EaH;AR4xAVz zi#l~&Z7-rkg%U^h`Q2#0$N=snpzCneRDLWp)&R!lTE|7;+y9l#XG5~KWAWB){d;bo1hvQHXQ3!`J+9dOT@6z7)ZWl%tmjjg&gWPb3k1_IHAI>$My zX@I4^V>Pq_Y$9;cOOeQNZtE@&G|n```hRk`a6o4NL~r z|2ni97F{uL25SBpPKZ6|Ki*3n<^i3UeHS(m%{4hCZ+qY0mD_P0<&!j@EbPtAKZC+o>Ljw5Ub}Cg=LHa?*pVU`#>;Nm%0BuW)!_VfeP9 zHtUcvBL!NUJEwVGm5a!T^=sJk|99%}-xiQ0%mRdwAif)qjMeLOOscA?Dm;fjofUxq zUR2gEb@q2o^YsL5ji;%iTI~Y#@=tyrL|OJMUn*tCE?>3p`Xp(5vx1?vPWNYJ7X{GU zI=kM_{PFH2zg}Fv{W%=pFRt_CmB-a_0!VNyLQ+I^-`$L~$ia<`^L(#u?^&uQ>wtI+ zWq4RpK26yE@)#UVWZ>f1G^g&14Hs5kmwC6(lwYr)E1sexr)Bv5SyWCcTj(QWE||zR z|G=6g>UlTpNu{&cVA_!_A%v>)5Qj1pf}#{g%Q;iN_{#HO^Y?5?zWIZX{-OU8R?~&u zD*;OYNYG8dayBt%Ap9(*Z1*3_s&P58q@)q}7(fCm@YaN5IB73e81`KMY58;H#x2+2 zcC^M+-dOp8^d*E{Zi0KzN2+C;55!a2I|!zSGNk2v|E@A~_L1Uy&fZeRerZbi@{h}c z-5~)$_LHaMJc1X0YzVo(X}oBi9K`g6nf+!mYp$`@{b8pMA}+ebev)WzgZy7$)ysJC z$WKcB{oAlmY{d!6`S$%G|5S&9<6z= zOS}*X+K-xQC-@j~WsI+DaUhV#2>p<3&U{WuEJf$^2YbsC(@XZJzboeE>J-`Sejnn6 z=#%HKp7BR4l|cNkPfo^c?>BUZWk*1Ynf1>fB3eYLU(#C~HawU!ggs6hC$@dgHqaRO zQBf-4j3@iRK{CPji#-Yr%k_H=FL0llaeLhU+~4Ip0HGfO6ms{&x}Fn?axiKrrWC*j zP9J4tP}j~M<*A+X^32pN`n)a!eimSF1i`SN;PSm_SibELz3H&yzag$6*Pa2zYEMtk zKLdf!WCR_6z}W12@1w1~XhB_UQ+o|uw7|MRYG?)gc2v)iq!o2fM>-QcBx7MTM4f5O zd0#8H_ny}?LSSprudWs3&Ju=Nw6iy~p4o!v3dzj@tzok5@jBPi-i7l0xdt^~tGxS_ z6&N89vy{4!gC{UWHovCkeC`i9I$_rHmBr?k?Uio36cFKRRKidn5B=_A-IX6VH<0v3 zK`SkV$*{Q}vFLFSp1kh(kaGY;`u?2jlK5jYqpk@u5)y(t^SLd!8 z>te&iqQm;}!7UaZ1|@`B?zNcMs7e<%w-F5eGOfOd36B1melq;102|-02p2TR5Hml? zhINxuQ(gGefgi`dVNnd#9|Av8hrrV1EmN<+m zQvbu;P)$ws39FwsxJ{# zIeEYGxg()&>xCQ0lo<6{86fi&JmXAP!m$6sC0WgXnc^K=rX#|2;X~7WKN?|^4Bjj3wrHyGJXR z>)CBl;=il^-!7kUTSol)N*Wr(u;lAk5L7@s#VV;U{mY^5oXSgS!kNhF=w?n_#BwJ* zn==6gf)baXQ^X;Ga7NfuP|%>xLV^mJl|7&k!ia&ii0mTt);KHl2QUaaghWmB<2!NR zL#&nt+xrZ^kvyi>P^X0QZLKQvbq`c&I^{r#N3AMiw>l%ET@f=ph`CDl#c38wf?z-F z(AZvMWO<`QVSc^1{$6L_$@AQ*URl(1Z0PF=?v~|pYBW@_Cbt8??gPCfB$8B{FbJw( znu~)O2r4viS#(PM^36fcLNnO;7Zw)Q|KL&suj)vefa9;;xFj8qqT1SvnK52zFnDap;CWwWDx{e<(VOT;njcr(-M@K%(H z;wf9m%STFJgJdB7$Em#!>@}d1cbRN9ZHh@F_7A;prkt%9mqPGS#rEtzrD2Z(exs|^ zO5i7nB%2YrZWi{_24vwkXb~f+uNcHmP_}#~ zLWRL3l`*7}5g#-yco5V%vVv&6%yn2(g z@B~jyPg~Th_HKO)`RFK6v7&vrQV2<|_*#tWm&2foohws2zc@SV1{jopYXHhuK~Sgh za&bAXcSiw_J&A%0HU}rC1HeH8JcXKy3b8J;M=8*vJ^|rUz}%dd*R}5=S~;XbXI#f3 z>6xYVD3)|Or>@X?4tcJ~#Y&6|${@8etug0Yrk1nonPq46tz}lVW9;9@ZWDcQ!4y09 zMi!4o=G&!@qSCTf-rg1t$1DD~|C|d?hKxAD6$k$=&k!zEmWG& zoQ4tj;Iu-~j3E)h;kgRxDl$|R0kj{OHMB@|ZgfY*8yT4ysvdfWYN(3D$x`Q-lgk&p zg-Cz@9=O&m{H(mp{8CGVR6#wl)Vy0_9rUQy@pj(L{ru^%))}_Fy&WDNPJ&1NuVnC{ z$Y0o#2ViL@9BXZD1qh~pjvg8sb&X&1%>f^qgoNZD7L?D88-Rd-ui449k{O}!9Ku!N z!jTTY8@RQ)@!*jl#dvB~M(CcP9QlO&N3uicS zYJ67pX7ue7IcaM^%UO4>ovHnqf=gYdRrg+5`NYYD==z4<91`Hxw&wmttiqH|`NUk4 zottbu7|;D#=D?e^%O_C#j3M0DFnSu@&)c;~WGF)rR9rgY_ak3=61LnNh#%-H?T;7} z>6D}zcA4Zv|o*At`6(4JI{_o#oXuOvP}e;vyrOblD%? zJ~C1lmVM0?wacFK-YbU@)c3eMJ7C|RoSaN*x!Kq<%`hPYPg&UAs+DzdZsYjABVE3M zOSJTB*lYJ;B-4U1^~cmAPvaIo|5-_uDI*33`(U6~KGCvN=p!0B5)TEt=vZ_7q9oahKR^q)q;X;21&?}ff1+jik7*>!a&SKK`uyM$aA?!p zS}bPPB|WkKIpw@~&NUHYjxM~!!oIhAIq}f+sA@awMEvNXQeEo|=5N4^Edd$teenDht`QmP+4d?A@)^K8U z%~MNW+r$#XOG*sU65eG75jweF1r@q-&#m7%VY!2FBmu6gK$h0yuA` z!Z#cQmyv9<9T z!P%n=vz1Grx(L>flUQ0g)H*z;oH;?uU8kEfjG?zla; z0cSQGfE4zXXgVs6L8O588~$IT#HT^GyL^@#)!CO{5zwsdRMB5$49*?|gRvTOT1W3S z<^n#ml+=aIR^ut(zRm76Ybf{{p*Cg3I}fh*-NIbN)^JR8G-+(yfg-AL{d}OG!}#B~ zbSr*-3pG_?W>yX^ZU)EQQ^l_?4i09kJ@@OKvlT-*Hu^|tK1yS_eW z#Y!5ZriT9PCnca6$8*eYhN^#11w@uc>&oL4XiM*6pm1Rtkr(MvYLPh`FnEsZTN_1p+7`RQEZjJ})%S5G` zPzl+msLsDvf0U4yRAN`(eE9iq5LD%RuQC+HulMQUcaO!V?}(7S50|?tb|;$by*LX@2HX@q31kjnwGu!O-yS@gbG4WV2#d1g5iO?7*?7L4b3^trgQ=Bsy8OW7D9 zV*=vD9Eqv)>*P|MmV@`nb_?H6U)QVK3;OQvoVea-?FL3hrp$%WPaG!m=Kz~0MK9lY z4cyBGCX-@ezC%C7B#H}4#<3G(W9xV5gQ)Fo@;+^EAN>~7{pE8J%%ldG8z%V$51$H* zPfv$1WF85RWnmKCQ0A_S&2*?E`s^{y+lO0xjV^~c}mNgGd`_6 z3C}N}g|mKn0fCE)VI*Cai6kcz!E_ZR%~wSs5Gee7O~zN9_*KMvB*u5WlyLMQ2x=lO znjK_7zw$hKQ?vHqm&2uh^Ic6M1&R5k{7KC#Zg#7=8uQfBQkHr;TntJvVKj#w2e8h& zf>2D*(*hW2dw>iN_4oG&1g#eEu6IQ$fIKq+P+HOwY1G=JcdWV{YZFh@K_FXC$k~Ip zyXhYZl62QNm5M&QpPn>N#fhTIP2})dWO2u`;x8uuN^V$OSL&x|q~H^?kLTXL}|< zgpKK2=F>NCZYG;L^gN_&JUlvKG zklUYKhYA9|nN_Itnwl1v)R#x5dDtEfpY_K0rpeOX{rq$5V{UGK)Euj}v#x2vm>}rA zGpT#qrHdaGDOkKiQTwNSp$72v^Bc7MWNU^-MxE7DdiT6WO2ZVl7siaUyFRNcGyaLQAU_l|YL|TFo5(*^t$pb7-aEq_BwkBH7^DILA z3_d@hAu@mXN>Ard^@Uh=WzI_rjxA1>-=z;r)bu&(NOCdcNFyXZs2{(1feOX1XsWLM z&dm)>uT|Ge(dl2_N$9|uR~Gv}U(Q`Nyt=J1=K0RY$2*CN0zp&EsNgjs2f5N;03V4E z9}{#16LkQ|q#2yW!9*$|hY=Dao()DHy7f-mAUF~%PJk%UGPwRL?b^Fp`GcuI1YiAe zmxmOjpyyR)ua91-rNAkrHH%zVd!5z+Xo7T|QjDzb5%_V24fcw`AF}%;7zL3eC@UpD zQ+R>fDR#^h!jaF;E5LJh`^RVNREoZh%EtF{2+$HX8B1ZqK*S=h(=U-JxiYGKFg_gB zmMHJ5+|%^u?xW|T+HWjTK#|b(xIK%BX=F0_vi)cGdKDz(+zmcH{W!04S3Nz`vW=gB z-4<*;cbV4Mo>^oeyfjXrDe-jIJF4{6^0fjOIzUBl{Aad2&)(^J#Aiv%P2`OI4m0fW zIc@qQH#Ere)TimzvSrKGOo=;P6GNxBk8-O@<)LzNZT^_@BGk-c*K^BLLdy5*JjQqL z45RsIXc-$bkkjJKkfBWUceDG6=*7?QT8;I^ z#l`)esipc9>ZdT>&)aNGu!qBFl#Q(14I(|PdPa|gXVA%@+xAu zlsPE$O)e<4YpkzV%5ST`NS##8UvaXuU&f4 zI)}S}H8yjv%b8Rip9w;S)l1>H*oi+QcL>P^y5YEremNn3bcUV7#a+?szL;GKis>?K zykg^|_piCw%L&eUmkD#}4t{_~N;nK?30O#82P z#7Gj1AV~QR{^5jfbh;x3AQ7zM9X01I2-6_+OnM^`Qw8J5fe z`h`P}ra0+e0G-w3?Y*k_`)o!Rw4yZxB^eJ46Sf}{dcWq1O$6x)Afw^t@&pu7K-87Q zsx1gmkCKvI>o4Rc7QX3A0B|2bv|(Xlg2Mw~Uni+v`8H!y8&u4KUd-sIBA90VthPiT zs{fW}-fhp~xoJvq44iRYMR$|=7UWi60(SgA(Yj#R1*(7Wr+^Gx)hW_jeucLdx5hZ` zVD#39>o;E`tDKtOybE(;Ot-_45w-0jWpd4D+fjb_d4*R@nf_$B!Txv6>r_6m74B2h zO$qZCLgFuI`Elx!k4}_B{luV*7JWbEwu{1+dS#byuTAAMS$*6~$=%gTQkSkTMluDo z^*&6U-%bhX+M#!d&(f-i`9>oo(51uk6KoeUA_h;pXaF-QO~|vR(=TF_X(dYJT1kk5jZ4@aK1A$p`@7uuV=jW@aP`$B-1+u?Ze z%S^dznWmbmj(_j3yu^Qnre!8~V`SkbL>gp;0=QTHrAP@u{*qCS^AMEqtuXpw-(?b3cbWw&rpOcH)(DgiN zZTOQ;MVWb9{HIY@S^M~-?`@P_YjCV1IdZ@_HAIXC)CATbrVtX)o~tOf1Ue1C)fyNXW5-V#*l-e58?5=t5I(RW z-pQtwLkqVuGXtWmo){`VIY%Js?P{Hx!bV5^=h*=FJ!qYR=jZ4Ayrrcj$;JuyTj_!c zKAQzFOk!kV`G+}bA4Lr*`D1pi_xDU_hEDVBGUjtB!N=yQxv{dM{^4{XHtqUTfc+ZW zZ~#r(KklRmGCBw3hYy?kW)FF$-DGY;I#jsPsoeyvheqf^FgLG5h#rlha&#xsH00%6 zIW0sMsT_miyyNyZWBP1^($%ea*MBAAC6b0Sh<=#=lw;`eW8v8cG&E&byY%s)KT3sO zVwZO1hl&81yixw_G7fm)|74>h>2DfPBCPrt)mH+GKDYCFDD8>(@aBK83Yf>sY!M= zfUg=HXU3mQY0r09)JKuOD;)1#dlateZ-cQd~yFs6&<#w{BrIQP8mghOLLw> z&s~uv8OY=18C}-ZKmY^o*QAZ`$e5_bMxWU*9aPLB;rHKPy{XV9na}=f z?ED~eO9y z{b|y_H|jCJLi$57Fah;P!vd8ur=TE+qJ3-#qFCzde{P+;Vy)-s2Fv#cO-&$HFflbn zY29$Z2z(7BSM1dXnu}rugl`d#V+o81HKZ|~mX=i`SfbcQWTkQ{BJ*E9n-CXToxZef zsw>jlK$u*AR~q~v23(oYILPOk59b$mc6`}84gw7GSgnx90c((jiXw-O5SCBM+jDa% z;@Mzv1>@R|)o&fE>LM*Fh_eJW9RasPnBgFR)B0e5we0?2`L+T6J@xR4JC{KQ=wU&7 z_~}z1rxvYkEZYU>5rFRi^Ao_|0^E2&5hyP#v=dC1Bnwrr$4@))5}}Ie0n(xTf&!4k z3L0T6XMo_Pt%}zl-GmiA61kN88?fk>f16n=5v!Iw%^cz5-~i*58WPK`OMmoi)Scq~e!ar;PAxtxlxKVv&h26J>`sFlgNFJ?Lr%Vt+>F*mg8$NWN1y~E z0TI))@|=duz{ltAYN}Y^%S>(g#S|Y?$jg$OExR5f)b;@3+IH%8Hv1HI6vDA^jWKdY z{IKw=UN#>%z}CA;I=jiuEDXJKwOM=uP<<>+j)LhinkdA^`CcN+p7(cN+?$1#1m)YD zx3Bd#MDL}idQz8r4h&vMo`q$gD^@UOOqV~Vp?0dJpR8vcUEr8=EiMP7=&Li4Oobg} zOpLQN;HW)2*xfaJpw?B>G#u|>6#19_LRMW}Nu1WIO#lbgsE|QcDHa9$Zt^=Byc_!3 z?u(VYim-gbOZ>RR4W~tJC7h(Ud7x=rSUlTsu!bR=Dupdq2{9!QukKE;&&Qh`>E4Q`R@_5RbQX7~@b@bYh*3gui>!>I zb0seRIwgfEIJm&uYh-;#b(aOb_r;A^Ie7#?XZod-z<-*4Rk$~=LWCGJPx&QlP)Bxy z7KIWD^+TF>G!ZmXpE2~&pyKu>^bmAHETFqT6jprJMni;--5OzD3~>^Fs)V9QDrJS6 z1C+K5a14PJMtKMb%aENAcHWr;dJC|XXC5k7S674PTc44GkB^Uu$tL)IotFHJbHM~S z!7SR;PHd|nc>c|#7SE0y*ocoXs?vafdKDabK&!G%Zi7UzH(}m|x=+D1FGuHR@F$32tR>|8s((q9HzG0H?^7Gr3 zc*hCL?!{7$qvvv5J8e?qF8O7GE0fk*vv~)w1WS1G#I)8ryU76sP6624831 z!yQ`{TbrmF zWYvQk8*-)}vno5?a#5UT=Y;S5x|zD5D_g7$+)ee_x6Jm7#>RWy99n4MKFrh8X<(8d=``suZ zLAk`p#lL2io`dQa;uCkQB>ci=0-@n_@!p@LL_>R`T8GIj`FJ>rA^7elB9scGKeme* z3>j4UDJ3h`bGf#LM;>hS>s)&n8z#3+Y|YD~ho*L{mfE5(M!v~Ca*K3iWl>ezo0{k& zW<(ZLcH;*{C?EmttmtIWDrc*IGH-&*)wZVx^Tht$(P#eRFSpj(d0#FNE~OmYow}u6 zqMSz3!A8hG{eS0_tD0PiIy&myQX~C}n_qo5QJc!}UhSrsD2mX^XZP8@dl*x7m5yjQ zPvBL_Vh}bUg0wTrx<0(Y%M*GFL4iQt!e6T#_Ncl!cAYnEuq6C7;;2LS`+INtr=??0 zaQsxBeXwH2GimAhqm;u8h_qi8eMAYe3|X_2_+fE^;%#mscd@Ct&2JjK=()MISy3J6$r>u^kp5Jldz?L!lG%Lx1fT6t*0=7FhqIXTgqs z?UimLm3`NH6WU=@Db5sbLs{N;|FuQ=;O>gP$<|dzVK|%L^jfKKlgSySi8RRYuNxb7JuW1S|_gE6Dhmw`bOkIdyr3G zgi&YO>FDXMuSlcHa^s)6ppgPSdpy@54m*ZHlVP2eJ|2Ck1#z^FWLTk5;sf7{@ggU) z0Y~TEKsxFtI2q~vX}gj|Hp3`})FlS)7TwC40`+*Ef`Ai@B9?ZCcVK1ML1G#!y zQxDkRpr?7+?`d@-$3E2NZtSnSR@Aq>{A_wPA<-mU&-?xneCpL6`g5C#!P5mztgYdU za?edlf_c8nKsqlJp=K0zYLzkeyU&2``#HQTcuwPi3Xd$(!ZkOI-A<`rEq`k&9xWMW zSCZ1Vmp4#0YXU<@lTyRr(u%5eRHw6Mdpu~6tXIOIn3j6ILO+q~{9xyMu825#C|fQr zdmDOM*Xm_%`5EY;VGR7S*3_PF8nG*C@BiG*HspIqmXf{n)0>TFZ+6f5u?Is)66**X z;j5)z<#NdsC^c)<=-uu8CTvZj?j>yBY>U@i9z}|FjG)3)s8!+`?Ee#-dWEB&`^k&< zjjpr?<9SDS2>qmNvcYZib0-X=*~C~8pp}8BPGLTv7Q=?v<4?sRqJsj12wI_#H;M$t z7zPFxs-s`n*9yG|`;(y9kuUWyo`;9hIZy2-q8z#1RP;O(^11m7Oms7)nq>Hxwzr2! zaU?^yJ~>kqA^#RgvH&3^hO|U=&yObnM-C9hfLdgxMB}-ob)H}z16&yjGyUOuCHrUm zu%!CN17&rt=yZ;&%xF+K37aT*=L4cFc3{O5B&tlg9pvKDPd)vF4W)6qgaJ1SL{FT( z&Y;JBqht3$PqG(r1^%}*aql@SqM(GKi}e0LDF_{I7df;$NM(X)-FSD3GW`kE52}{S zRxX1+Wkg5S@FEghTrWXt#I!rkm8BzG276PjjS;&CLX!a5c7!YV{ zm$PKVQU4My44sII%dU(3QNWmUx<{9sK>*HcX;|xk=_6FJEaW?dDb7u#;X{>%*&!rR z+4gWerL1|F^?ZUu3SKrjE7Br!-(S-?;@g(V^6hGGn{v&x`o%(-(TIiXsMJfsK?hFd zl!2at2hBn!JG$>BY?rFLs7|Omq2fO*I?=z~B>xyU8|}$J9kAXsxizSJr9w$@SBRl= z%_GY&62q3^VQO?Qgz@vk#ah)-G_UJuc_Fb|SD!4FrH_cm;ou-v{@QVkm2PS9mv-$T zypN_Q9THvD+9nHUW0oA^5G#72YPVzl6TT~o)CHdr>%V2KEYYz4%DZO<&!E8ef&3%@ z@?fsly6{AA`xKHAImgWOww`ONe=d$ghLW+5z%nEh+uJ{Iv=J#TdSRhHj}Ag>g&GW8 z^2I_*vRe7#T{%bJ$NmFgTmKo80YM08HJVvj z-Q3vTu0Aj>Wg(3K`IDgM19}Bul2lo#y7MsY#G&iuXN=<K@!aRH`s5a#qwy5 zF$%2ksDCIUnT_3eii4nsMTGkCY)*QOdC7EQ7;9BNPdZdT3E9prq0`+`c_ z%4eo`j|;3&#)xaxF6xWvywp1R(dFm*P17r1pu>=i`I;A3h2jth%2tAxv3*(E5FU^2 z+YzXX$kBKwGt#+a*L=H|3o<{S1C3$mYve{iJk-IT0NZpTW z8Ezh%Kto4b5^IVd3J!q&91H$sc6frsIz9wqHppyd(+Gz>*`KV57+58&u8+h!6P7}X z-EZImmK%73l_l;&higy@E z_be=n>g@s$g})pID}%+<$B2`_n~OsYAY5cF!E+B(wLc5WXSWLE)W755)=xkmM${&?Bm&7#=KwPQP_Fp{PL19D&vImH^dNq!n*S>|uz z6~3t!?~>}^K5_3cflnc9#T*|8#cuqun;%w#P8H@G=xe0B=pj|-;)zk=3l2N+Zfp zZ01r@_Ex&~1_KNQ#IeznlYLxok=Mr@^ zXm6;;ASsBbs!H%8qz?8lXdA9>Hd5i5jH!Jd$=(iQOj#JM8=Q3$&?H7>N02BmkB+Su zzaa+Z^3zZrg#yL8`hv|y~@jkI@SIAV`arW5Oq@5q}(t9uBo-+s1b#P zz0>!`_Lck&yno<1yVLICI^o3{G#s1TgLQ_I15Q)bA*t_mGdO-%=RWSnMrYf?%_SS} zFia2+#`7=A_sCgFJkayjnwUb`JS;I7iuroYMn=jRJp|34Bsr`pA>he(O7eHBct(VPBU{h--TLi2iRUDQk!ABMZ}O~cZR_CLrNt5igcaP)v!1McwMJX~ z&xColm1r#a{u<~6`+T76LU-%ZN$ro)8jh3`kcEkL;@7_)YTHy*m<^08oF@7fU(KJg z-IA=^%l;8>m@sGZ>2ZzI$J8BX<$@pYQ>e6eC{cWYdz}j{kmQ{!pR?r8o%C!j45nZ5 zk{!i)6? z+az7my_|3>k&c zKlwskvUBvt*HJ2qx9EK&Hio6hvM=kaX0hEGzP+E@c)RL$6^+%IdNNUyJDtxJ#LO^* zwL-bXSY0P5e21{A9F^x*?LD0X<-b9vGVn^iB~u*?{aSD?P0t9;e2*^6EBfC_(=`Ju zDN%5mRXhGqeG3`&{Q~!Q7cHW~!m=;V++OYPKc$?RF4LSLK7HH^7{YMjk8o1>vk2i( zVIpH@EA&R>Zv*rxmU{?lth(xl*DzXOz$V{j2_#!!n{{4TzfKfUc z>5z~d(h|Z*3CRJ{-Q67{lm-=P5RlkFLZpUtNsDxMcQ=0bJkRI%+p+(*m`TMf+tTcEwTZf+W|33O3J*P8na979q|=wcpG|-C4WB;wkT}ffp?2#Y;}h6sBx#r- z(9`bU9R=PlQyw(xQw|@WO_VEaPvqa*g-l^>_x*D(pN7_SPwA|_i`=n?7oBmRyFbY; z%^l6nV5P=`!(Pa5c)3{Ehg_q*Lzmr|4njGA76q^zRe0)vX+Y@lL|)0(Ko{Mgg-i}! zavRUwTy`4zP|lezWE)-~-$W#{)+l(0Ob!0Z+LTW&ZOi)=p>#Nsa;dN5@_HYA=5v&H zsAYgm9F(_D6NqfY!0~|onNl}0;DtuYWPjff$tEpuezC!)HMyGiZ-vWyH~RplXnw^y2|%|4~kb( zDtFj{s-xcEW;96X8F1_fIifcX)X93+sHf7De@Mm0?|<}tB_D^r5Pj!t5w=4^3JF0K zqFNyH)W!`!zT8xsfht3wwOdyb{7vVUrWYnA6Z~fN&-&c!p-+?FxX4`!(@z5PxA}WI zhSQ>xh#rrdcCrg@F_DS>A&6f(-)iUPS|2a;wY5aZs#GSOmb$V+1ww4(?4&xk+ROD# zlsjVDz%8IdZv+ow_&a_5G6>pp=A3fU8PkK?HemT?)v~=Mi9H9$#y#JFDqp z1sc%%>%U$>;2Kh3ee@*lop(qTSItAsdlQ*`kCgs0@2KdPx2 zD2fLA9#|-8C0$5z=CBp7sD|(Ds{KS8+)WoM7OIP%GljKtJ_!=NTr@nB9Wy#>>ZH%j zdtsg2N^*=P#L)y-6lLH=)V!mT;Cic^DQm z)y(CpRdJ|N*KEXrTCrg-<6Q3l+@wt%lKEv8@&5^;qd!wqTUU3kTeckimPwM63k&EH zQnbhh&Ht)M!ClVMpaF|M%%*y0T~oXD&ZkjcU+8m!uf)?rPef#&Fwe#zCSeFgQ>&VY2w4NG zE4{sX7owt$$ZALUIv`B^ml4IivIqMhcHX<&W9j=CS0O1Bmk}j}3RVrn{dGrDi@^SA z*~sf>&L1q?4vnqJrQO0yr&{fO$1E940v^WC^CL^p#7CBp&R_a%bmoQ~jlwg{%Vu-) zP)uM4h>5uzp}O>qQ+VR=ZsV57_?w`_N&gkwlU9*Gfq%yu^Dhxg3H5Al6`{Ka3`sen z0=Ji=R(dt+tt3ft{Kfd1fR?cVB23t#EF2g zBr7{;Y5ptA-X4>i!|5CB=CeMS=|5%BO>0lMA-M~2I~7zZ#%topKiZnorX<~QO|OZ5 z&Iacm|G|#yA>k-j%W8;^{^sVl;d=rF+q2Y<)qCp(OsC*Gg)6|=?oE|3o0(G$o?6hI`;E`1fz%WPCN zuJNschYcd*^kPmN?rD6Yn0GjX>f0b`^sIdPct26}bZNBn`%1JTSzCFP#;{%(j>JlO z0drL)K?e>%&b!{iED{L9$c}h+TJM7zCi=E@J7yVIEyrVlNs6yqAtCfrFvX`<>X~X3 zc|s8JyVL(oCnY}5bDbcg&Cwwv^k;r?B{Er&A^1GAFHIff0m0l^6psl_M{UzYyr>fe3u$7cik)`=o z(bttUJrg4@iU4jW_l+8&_gyW)E>{P0bZ9o0)DqgP+=N06=e_=SyI0w3QLV#?PN>F{ z>-~yw8y3hIyuZIkC&rr~THHu#PI;YSkR0Ey%R^JG61q zGPoVTW;k|!K9(XUJ@!FnU~+$Y{N~ff40bO~kQG=(b3$_o&l)GBl&!;~5-N>jvTR~+U} z@VZ@PN=v+Hrab_n+2qk5lR|g|?iB?Jd)c2%-hCXwjkqPcWoAp)2+Cnl5t{f2t(42Q zc%Dw;Naz}jycx|wvm*qPr~hQpJbN*Mg3{#L&(_MYMD{cX*)xlzm{2irQSwx(D}tB^ z3mQJR9=C(&5CIMr&*fN6Gw9RBPF-w9;|~|e_Uv9t;FQmjT+$7y6RuN=H*4lWoit7K z8~ldzg;r2Du3bb%^IX9|Q7&7q3~c)HE0OQ`<5SOGt%Ot_UP^z}MwE>blJ6V&pPmlR zNX?%coq3wRshilz#FS}0)Z&VZ^E^}ftQ^0iM+B>+5Mql2SM?+%JsejS_ojyq5UAbd zg5*`1M0L*MHQ^A2u0mrA*LSC^BbZnuc=Gx9Z?dFHZ54jWzmqC#jB<5 z&6b63o`K;)3mTP;&*H%zw7qpa?pOQ75X^Qz)}zl{<9phKIU46DZEAo>e%*PDp}p=n zVXiO0O6miTKUm=xfxA8jZv@#>zyW)g#kRKl46PsYL|?!DB}DeYYIr{{>s!mk$WMtY zrynoiz_0p)Zlvb_y2s7_jqj+O^Z?s|cv+Eq5r(-=GgmqgD-h%6m*9<2_pL5Ff1i#Y z)x{AUTT#}Q`?`WT^IiJSfbWwEYGfczliomwq^*&mqPx3tA|g($lneg|?nHR0M40n*(r9xHnk|S(oLXE9-ye*DuZdgfEKe)vXI-1VoxgO{2Jx z7SF+}8WO;n365*#9x^agB95Xp?!Y~sAse$?r(7kCmuGNfhvAbbe@|xdg>lQTfQ#M9 zFPkHkivG=uXfqujFYnv8Z_)7=<6nOKBr$@5f^vnU;?~UuoT+GGu(zkDf8)o}(b3`I z+vg-N6)ptIyCH1A7gmkzVCCP6e8gWfhcHOX?YT^NK+ryq$KycSb}>uUiW(`TMtJF= zXZIfZ9UF=b$mh+YCv+GE+uNN*J;Xx6%;U$qk5$oO0&G;B6?S$DYZbzZ1ngv1;*BI+ zX*nH1A#Pu_>XP&L>Pb~Kgzf}-K%X3nxd8At+~d_fc^{Cku!s34F6!+Hw;+0mqz+t3%d7NRPx z{pqL*adce^8P>_I|uIW`@1j4ukCf^g!gSVvmy)(;!^&azuC7$NG>FhqR~fUWIL}6 zse-y+cZN5PB1pA!Yt>79_VK3_&{b^#MJBq0LlbFGDoa%e>+c`?K^bS(+HZg$P5HCi zWh5PY50&^vU$})ZdweAF^zS;TUJME(-2~)Qlku7I4sP8uX`f6f)Jd%wVkvOH@$2UN zqTje}RU**YDZF-F*en4e9aF8tcKC>zlgERGK`@tFd?PobC zhMh~_PuwUVbR@iv&GI#*0qHv5v%)+VXC_jS2(-!*TJCBQPuDBvelHz>Ml^vvA}ILK zB;}ss;^HqhYGd6AZ?#gTl3K4!*m9zGCtSathl2~g7Z##R1f$WT<7)1`!5smCu`$rv z?ob$@N8eIg+f${+9(8R@YXSf&GX7yN87Qku-@qUR$O?_-?m80+*4~j4$A)8JS_DwP zYVKpXS#b)n>e$sed!TeK_cNx}`P*1cYm2EJ`>d0Cqe!(Pw83a*Lje?7fctSVGr;#x zM4L=ljJO>Tj*{xjz_z%Vc!K&^`o!f6sOpvr!iW(D5G{H9+=?)Fw_+1_dy%(De*OZ; zxsj27zmFLyzIITHRBkEv$9tTvOeTH$-+|7ziU5WkB*wRKmODlg`RDx~(vmq?ssK)e zgvK*FPX{oN0~BR?QgSd`n9hZPJTQ)A@E_CX;fOMt@F@KEZa1q~6#HKWwiFZ zQuKEE3!5WizO*Gqyn174-fU? zk{SxA^3Peh4!j@(HsVuKf}+tSv?W z6Xv)Z>#krx%$9&|KH=kro~4pbnSB7O5((roUq*f!5+kQE zCuIs5nPS16VA%G5F2%1Y7UaI${ID`S3Fz1sMm+~b5>9}&>7ObXf!+e;KbUoXld9bG zHXjaS$!An0M;h&|zt!Rvh;)YS01|W(nh6RmR75dqFrt1zwOaj0EJGFk# zA5eJaHcU4ha-bK=L!7j9Yy)D_Ejn$#J7SRX0yD`W;NVs0XReLPU&pC(FLzG-j=ogj zqlfaDZ-mrDQ~h0Iyr3~ZkuvhdN!99`^!8*24!AoQL{-Aux_MzC z<;ufrP^9EF>@W`lYs4d< z94mS~L>7KzYO)OQQlEe(-N1+~CWAz```v*x`;O*h}dmR4=fFR4*s#q|l8zewFyTfU&Zz~S?NnQWP z7J>h8H=Rxaqz6@j=G@u|qU^_t(eM*AzXKhjR4#(^3#*NxP3T6v|LI&qIF!RSe(H1U zM(OmVTfakFhm3gV>b`R*!``YDX%*Y|(}#$sLLr#BxkEwMAuGVflyCS0;(_E*rT7%X zupKN>QgowiBeyp|KlWzl7r2xXQ#y$RR&#@ ziO}@$_rT9q^792sMyBh&JM8whu8Af}S9$O0w%@RCw`8B8FC>8y^|{2tQW4RomV*|< zExlI@_ckC)Cnu|QbmkkBj6_Zp;urnxA)=w{ZHzN8tQ8Ajsc>Wbl*?sXGf0ej3ntx0 zrO;*f-&?U7GvT$;pK|H#qF#ZP%Mf>8Z=K;7ZPnPmetj~a2xAWqdy^akECniJU(P>= z!rI=SL1tlLBn2eM@@;HAY9^Cm%CZJo)Ra7~G^H9+8x-S6*Kh8JKs;Kq88zdpt>0~U+#szgHPEc& zY`_0yDZ)+|dmYSIF^}lmJvsgfZ<0p>0ljU+t5xL%ytd^A)mNrwUh+20?Z7vN)t5jF zikBkHLN{iENgPF}ij7hB8;i_nnL`5(<{nWr>v7E;y?yDRI8${wQ=NndsLF7PsH6FX zgo-%u(My0XK_u)vzI(_lZd`Cl>Sfs~_XXMrlv$W&9c(Epi9mIjiVc15YO8QZE^WU! zEd!`n;`xi#7YX#_Beh_f9Pdo14N{dGjlQ6EYdPoCgx{MMx|%#Z?fG1F{^$OE<o_%`Z^BxTPhh}+Vkhl2^muQa5{cZx)a8XFrZ{jS;Z-%Osna}@mC_v+b=a8gr)>W5$;%t?ube_e zNKUH5Pqa~Rf`TSPVYd2_E%sKCo8709R)sL1KSG>yFa$v*2gmiadikY&5LSYFl+_ci zMVSztF`dH2^lIuWtPHUC^KR87viEa?((M?oVlbSc=f1_*pxbEJOCx ztnZ|g>6#i_yvF!3r_-&`(ao4VlZ(vPj_tR1j$B9sJyX6&@KjG3&V2~J94Q`>kkG>)5yewhf4YZCk*Y_4#_ z^hr1vb7^iablydenLaBk_qkl-iaJD9fH)}>S=qwvLX!7-foTw!i%0y7)r@K^s_Bg;js#l$QZykXMrBAD7S&o{2U}FrW~k`5x))E9M5h%o~{=3idkWGr?p_lfnUy>JL0_Fnw@{$pUbVeKnMX3grY-kJ#6FZE)h zjs0Wb6QS>FhvhoO!-318J9Gy_Z-*3CYf*e(!8+7AKIv`gW`rL|x>R;7U!(39^|z}l zi&2Hwv(;K4<)3I2-2!RMb-h$711;4n_mKMq-%cK)ln?Ho-qUdVK8r+D^4C^gP%o*2 zsR~a&G@K3srFjLO70y;!GIghc>VVllV6{>{UwQ;0nj1fg?(Aj)h*a)o-8aQ2|8>>d@5dty)bsqg|aW?%#PYGG%{Ay#zPXF|z#%U()-PYsCt!c!if-gq zdXLkz>%FwjknTHk)aRjB!VU8}fD(M`?jmJ29>ImuH_u#J%cOeL=fW0jevIz*%x|V^ z5T@QAM-2{=`0XVqol}^&V}RZcRn^xn1h4#Ezv=6rz64(NsfWmK&iuoa%V&7zSlGmx$2*J~?#_IabrV|X#J zH5%S-CQ7X-RErwV$WBN1Jf8R&O`aZHssezFz*S6O*m4*iNzv+cH=@&(FL8vil2sH7 zC-PH46 zRC^~SmvwIr7?wsUjoiE-`Kgiltp6=fG-?k9n?9qoRS!<5&s%>yru5GPfwxfgwY`Dn zR+py;#Iwp{Aq8C_I~;lVk~l^OmAPEoo20?!E0MxDqV1zcUj)oM8o|^?MNrZ~Ci!(2 zwTk!AN(Dkr|LS7*{6^T>vZU7)p2Me}O%r%Gq1wXb)BrPnu>|?`QNTq!wx0j-H)+(l zdNFUS$n~Mal}c0Uo(B;|hYnL_Y-BgSJTJH%&=KyK#jd&C4^{YpqMp2Fu`PM8p4fbs z%Xj^4?mJ0*&iV3ucplNgzpu2R^JBAX-B$ju>y`nGqU==RpSa?)b0p4vZ1@j+IN_ea zT}rWaEHp1KVQ2^hGaX+}dR*zxnxdgs0wK!Enq~)F{{lzJErT0oCh(ZNFD?csTfUzG z05H3ksmhb%%QSSXkqu>?7|h6-A5HEfH|@S})qmaV6&Tq~*DbDHX~0vezIW%J-$bmr z-ungg=lI#X{;H7|DP1OHlGEw`ffZ+)gp;{vyf+J@0~_j#J|6z{d;7*I@#iEL^4a#{ zq0~wL-;=B2)s&v09O@!a+~4JtmArd17p-fb5U$x7AICsk(Xc~%gFJna%q&04W4?bZ z-|rn9oJ-i(3-K{aoQ9sLwmvlH%cf~~sSO<(=cg;WnGhwRTbxa~|16r*?qo?G7Yj(r zT1tk0p@_gY54Fd_v*pfttEbG`F2>Y#;DN4(_?Y#L|9{*^kAJ5c>rU+;b5`KvR`M2c z75mqqC#i2&JQzrZC&ppdnF%Rw-j6(h4g@v{Uw@;rk$bpQ#3xL24t(mPe*SR_&D$sAKGYtSE5RwRyi8f4< zYxWj;>+$C6qo(pIjjdREiunP*jx?sXSJd@kN=Yh0E{8=@eHLXw9Kj&RY& z>2Y?gRB6?i30Hm&$NTwGE?%r$-y$3D?S)@qYrc#5?%%F~(6^f?qEE;3PYIGpUEnr`9sjlQQGOOe3ww!xh$M5mL*NFJqhKE-wNj$kyT7M0- z9Yx;nu96X;YqHjgUizjlOV1OVP7Qx)4knE_B6{$W8e2TPq>+$p@jA3CrcH2K%Gh?A zgvF;aVXe6Wz{R_u_n9uH4f$=$svIHch%hbusD*o@Hu?%P20tP)k2G_ zr}j?nRwsie1x;lxb1eC&2I#Vh){WGpAzhrIb;or-E|3{T;f$EOg4#Ga?5>t-bG!2b zl9yx4s>hbxJbcc(j-P(_aLqzio$mX&^AaE{IO7GvAvo>+MS}<)V0l%m`8=<{9AIvUaxr~UW0c!VNliSbPzkE18p_Ki_+T7>suz#W zo~yGiF9Y4ZveBD(L2s~qx6j8@PsTlq7?azWAV%Ujh)XS?_R2D%!hCwbHJA)2eTb9XMF5QXVb{fo()6`SdT{XVMJt~0#PdRm zdS@5n<$_o2Oqq%Vk?#I(Ckw4&c{Fhc)1>yXj?~7)l<^pv04ArnG#)0PTfV#=tFTLL zECVfk|6m7bMX91Dt=wg$XHaxy5cY1@hj~h zJP6aaGERQhPCjE-$u0Y*kzjg&97p3Ju1*qa?}rf|FOg(XT~Ut7g>w)71TBA*?5O@qy~O{ zKe;f3x-&4Y@5~!Fd0U-!6jh^XBkm2od7zkkoz0HXq7(RJ)r;B5QY}Eceo;pHT|<(2<2<@`kQu08&wLiM%B;(OTM%nx)?a^r@XRXpLCYO&VK|7$8i zlr4_za_T0Nv?~J}C-aY8%OT563gOKwrWH=C4^E$$2I`XNUq44mOb-HFf6y*@(@*U5=>z9bmFwzfT%%*_CQVc`@;lF5tV>f)T7di>%k)+aCF~UQQMg&ZQD(4wWGi@gr^Mo9dz3}-uZjwl zUF5P}{&^hjjZKsgeQOt`guM`1cr4-734%H+lDc1Z1U`;a9LG7`4 z_~GtLFgkwC+47e8JNobmC7jo?vk};KY z0jnpO-er%4Sab>H=(4^AFh(!MPu=}uQe)Bk zG!5=_WOI0!FGEscm~|H^?gn`m4>%fSflt|5rQIwPBx;2!%s8}l9_Y%9T@uV1dV?r| zP&tB0Md8;I*;@pbH5n>J1qgHVYW%D6K+amrKt_5-P%=A((V4zyUxwIg4O%I~jK5WG z`>_NlTcr|fG#E1+mPV}NPosj$4_j6u zxNc%VU@O1P<&4kC8c@^F^id|Kbx`+lDVv+O-Uhmtvx{3AesS6|^Vyxx7ASPUFGlXh zMTrPrvuql*CxsJ)m$GkHIBJo~D8G(Hb;a60&k8co^4(!*&RfwxQJD88QWh-N2%4k7 zs25RQLR3E_W-A88Jf)U~nz2lz27KOKAw5gu-LS>w8k3bPYJ0|vqIOYn*?sFYOQ946 zOEns*#-Sxs|E{T5R;K%SdIE>P6OLoa>GKv}lcYl>Vp9tK`8@x?{qhMPp_)}-QYyQo zhJgon_gu3;U5H{ie|R{71~N=izlGz3cA?U58>u@aVWkaL_~aV28Pq4>nEKd!s>Il?DaI0;2o`p;NqX{m zq(w%}7_m@Z>pHb7ZeJR`z3H8H7BD|o=i=;Sm!6Y@PE$ia_;F{d7Iow&@AEuuXv5JU zD&wMd!Y!i{Mph^&HsNdQ zRtyEnuzHDoLmwUFU1EgW-78;3?ceXxIfl2AS9~XYZAEVPe5f3UX46}5B0MByJ`PoRm@&0ZE=o$ zU&%TmZb@*_UHB4A!?)|gHeoh&bTs?s6DuvE9sd0{J1fA_g3UtQROh2(2KDK4xDo`6 zNt1FM9c<9RD(S9xt*2H>XC||E z)Y#b?VGN~IP{PpnY@(hNYN5Kk$VJ<6#lA$q(x{cOb-!QqM&Qf0J?lVHyCD1}wkssF zp5>L2aq3@ekM{i8by5E#`vP;tL+CpqR0=7~s)Gq?45Fjiz2^~3VTNOoV3P98l7y~{ z3BCH=%-c;EC46wH-=%~N_Dlwc5xEX*zS_MKPBch7jJxG%w!jX!aB2K{C};P;62RiA zXrvDbkD+#ncR6WtFi5+-Q*LAiXa@1C2M1OmW;Q0m`P;(-NZV@zf0{WUDqFRFrZ`Yn zzjBo=6InGExHx>aPS6O^J=ak@bx%7Ux960ZnD-2e;UZhSjDL4>yNcn#+LYlIX)^X{ zHmfZ>(mXlvf~fo9IR4S7t~Ly)DCaqGwVh%RdY3t9hPF))HMYeffgi7q1u*jt_`n6#LQEj z<#rSNYFdjH(mvQzsx8(7boYM8kjZZ^~1 zf6aO~KQ~K-wuk~YlV^%hb-4PindqGfY?zGwtGmKH7M5jGJP z?cT9-l7~Yyb&%kn5W%}en$BL_vuDv{QKkJ4qf+xOzRVf%?4)M|XPL<1p^>DdB-@wI z@(V)E3vJFxz;($JkbI_O%>%Wo`)P2h^9D*Ve0H^vZ~m8`a8# z0CH&c%mt74aKGbzxZ+d>^6fS#M;tQ}P~W&_q`d`zvq(;8&o$6vQPy>f$%3%9#_5PI zx?~r`vDZF0M1sI!q(teAEBYPPh)StPB>`xGK@7;UD}&l8wrBSE^nDlviiyZax&0DT z4%6d=i+9BXQQAzgDVF^2gJZFY7$fmSXHExy4waS_aitSl47N%*CZ?+Y-8-a3kExOL zhOke&WP=o{j8H@|b34#-Ab|o1=&=#}af3LhK&3%7nXO9Db03}HK zO|82>zzD3)-B8D-%3sgjF4_SAk?Kvia?)?l4?C+cMseb|Gm4VN8{Z`8pLD?`5oRMU zE^{A{pS6Irk!|6Uj}uDMb<5D0`r_SJDQ5Eq!>rgKb4jwrTPrdGmpQmzAPXk$dFBLy z4O^5n5ldto#@|BIyw)BdXZoOWG0}G*_XuBRgrKjJOLTfocV#-8017MMpu!ubZFl+n ztBhK9p=PG}k@!EsNPd#GoTLs{Y|?bDNjYwPaBy0iZMGjH>GmVRYGb7ZZoQj>h+F!QmTA>^Y0#ZR`wGqc zld|&b13e)3p__Ym)kni$4ia(MW%5KF=Hy$MAA6}H&orx-%cgASQL6{B`~ci0Ze4mm z&(!y60#7I}uSZWl-N*J{fCC|EAKKK;SmEGuW~v@4OE~fB zyaj_eAHbK&7s3W&c|IKr?F8Yuac>5V-%ITV>-GCLrkFa1W#>B%kT!H;H_g_&l``-%Vj#$YIqFPb};TtyKos{COMe zyHE0^Ttz&o6EsOLkOg)~Ww6@-$sk+(zx}8UnI@D_vq86Tlm9F)22@ALJsiW5@a+c9 zQKmG=29_rp$qAx$5$946HgcxfzBNb7#toi@)dWDd_QnXb;sv|QtjboWc@}AAeNw%0 z=owF|cjg>i_az4J1zFtINFE;#G}!5QrVp%>Xk!PRgfWk{-I~rCunjr0?oqd=ez)0KpLdissPa?mvg(Y1h3Zc7_DfF_$?Oem|Re`B&<>-9_aLgn9x;F4zmfm z_fSn(<%lCywDF5-W$ei7m(7=7#?sdK_=U=7aN#{VKO-%B+&$2IBy)GLs4t?BGwzY>)OFdkD{ z{06+`btK1U6lEeKJ!K8BSj~&mukpI-!u+_RE5Z>^h>wW99s$AMr{OUnZ~x>W+D z{`>B6R&;fXOOoE`#NT-xv1Z0}Ss@~(kb1tLvL#YXE_yv#^-h)IRwlG+9;~Z*k=ylR zU8P1FeqshG-Mj6DJq2ixb+4V+WqvVDul->^obEtnS{Dm6)m7=%&#wqOeA;9P9lg!n z&Lom*#kha#m;xx*=NM5V7)B$#0!VGM9MH8NEjrEz9UI{uY|fNtE@^RbxD!Fo4vk74 zCCSVg1Azr!i`+}RBOCw*wCnt0 zzaoM25e4d=(0ga9Tyyi%`Fw^s;^{0cAdY!4!0lgRfQT^Q!QT2}kdC!1g zeg2lxK9-)?@G$W;O3#{qJ=CJR{`>Xgvp(geKW^!f)-<*SMk~t|INX*K8WEbNj#*z( zn-^qpUjw}0%e&NDEk;*pVjuc&ms{(IhFtYmGYKC}tB9n1t4X^<)O?bWr zH$8eJ3VF@%oIUrW`uiraJH|G7*0eWIFC9Lk6D`29 zXD^E?0v7HyKxqTyv2sSyQsR;+d9?0|q;jxBA6)tIbDslMf0B(YP(DX^?$ z#ULf>b?%n~8}lCWoOYR+&*Mm8m8xHf+4Q|JJUL5DhacBXzl^k;rQ~O=x0_0v0SmJE zE=+S7GJf+|mE+6uw$?j)4=px3oBG#1beCi?%#pa3x$o%V#3MImgG*$!lsi*?PdXbu zgfQes67Wk3d9R{rhch!-vo99kbGa$oi;@{k%*e}pnr8nww-NfPROeF~J_5l3jD0AIBCdL;;q z)$uu?Ab&Wadibp?_cxB~x9<-vp!TdaKLx!SB}8CL(>jKxm3y~D@4q9N-~$^DF81Dg z1|dPBl3p3&!o~Pd53Jq=|8sChxw4IyS;dtTQAz25y0imJ6*CO z4GyE0`M}3{v7UDq-UjGbHv9jghpRnbG~Le_I`n|76xFt9E6t zFKOSN^gae%YHMJZ(c7+j9kxEWMqT~W`b_ZBX?;b1-hz->JS*hPb}RJ!b{Uu5=C=2Y z;9Z#op2kl7b4 z#eLz^8$vX~!oor?Vwt7bh%o^@(??gXv%h8-@OdtB4I8{B`)#WWB+4hb&>WG(?y?sh zM_!g~pF!;cvT0~Om$a^b=jITaRDtes$||j&NS>&eSV&MClBj`rZrZtceCEJs)6#3^ zlEejb@(mAVi>A}iDNRg{iwFlu(4(K+4b~P$dZtT_D3v0w)j#4HDHtw>>3^DdZn13P zHSb?&k(lIk@K|Vk3;74VaXakRuuFW8q|^4eRkoS4`B{ zsCjDFIQ9K+-zr7#TPitRo-PTwBM;}HmD!KjW^lx%J?8{$h%9;egFqLZ8ATPoXt(3P z&tb5zxeww>Za=g0Zoi!CmZRjw^!zTCC@=qEj0_Jr1So;CDgWLGFeE-6zu$U17cXhO z^H>UGw`Cs_UJC>3!nz@3spvEEPu7;LzAx;)MYMCgGCfeHVD~X!hJ0su) zn7Ny;3{9ElWo3~UN7c`e+Al1TX3Xp=zI9t* zqo0$J%60!@>y z@6El`JJkQTNTCyaq=Dx_hZ~_A@!4Gp>zgOadL(uDIB=#FMs&_l6()xVrDCBmj||r< z@dhzH2bdEo$%KI?!I0;GY!b{wNV|@_Tq6m2VCM#_nLMrw<L$4cBEYIsB7w2>-?F-r$z22A7h^3 zc0GI=P@cNzy*d#0-qn%HEBnkAg1a)R__V@~SHJQ~``*Zd);ZD@C2wX5fGz;-t>C``pc@@ByHG>?Cyf%L znTS>NT1;-2_cl|4{D%+>hg?GmhiG*dE(GMX|VfMecMrM+(rW-(`I) zg%KAkOj0PWZLZ21DU|`=x*il!{e&GJ`5@T&4Jn)uQxQ*k0;mdT$i0YmKofP&)Lwd& za&g!k&}(ZN>EVTucS1n}&dvZ5p<(|yKr;RJ)a1=x;*wb>$+=CDu8@Alh=PDww!eq- z)SbqF&I1rVkZ#O-?0?GoM7@86^W@qWRe?ml}>_B`+-iFQzLh--xbauAp6 zYwoS5QDYSv9WX`Fy=jf3}dwEt%tv?+Mx8?oPuD)OE{L*n%P8yoC_LKqJTAIJqtOmLcMLq!_== zj^eaVjF!No(oH5`dh0QAf$xb$O7wV=3tU-|dI9XSWhLh`PofO2bzc=SyhPk^~VsADfb35eZ}m2FeOzO91sfW0^WbbP6Fi8kppOg`J};k zFWfv$TTnkEryn1mvpYYh`CWi%tPr`29A?<`XZtjZj|f{VzMh#)XNejYI3Rp;yPzkT zRuJV3NWgk5c@ONJ{4Upd#oD$kfr73j3>^5I!Qal;Odt^moG|Zz)9k|4z}G&-4MnO1 zA<4S~l+Un!NES4&3cE|{X5{ug)X$L!U=Z?T-Eg;?uU_vf&ar-Phn!eog82P_pB;Gr zRvo_9b!c~Lf;9-WeWE>#|5kM|F|p^bBha8Dv`~E@Rmm@uP1`E&a-nV9$VK~L-+zD9 zPg!#E*Dpu*iwF??a8uTsymc1o8ms)>Y0_J$~jZbK4Y2m695LTG4 zeoBoW09{Z9y`(JWOK({6J==&RX|g3ZR%ESku=2dPhTB-%nDwyz%b9F}oQd&!bkkZ( zfXOB8ck7kMtR#e(_udli(bl~BGExGuAOfHr%w=`q?*XaD5axsE@%HT3 z?z30ldnSmB!c0E(b&68b|H}MabluPPl(gr8+9Lb&I}s`?-&|K5BC2|7&(a7}X*?wOV-%)#w=hoNa_U`PG zNpVRiRPE;#2MuN*;oBy?j7&DgIE}5M2>ti3i`*qIW(s@&A+)+us5g7Z@FesNQBvkR1e%;%$>gM(%UqJ1m$I^zEU5=O3-dOV$?` z*4;H76lv)$@$UHqCY)*ZYXJjvw6BWe{69&$vG`nPrN8#F$%s)2vA(GR$8V4qGhi&# zv!xeyPG~zhW0N^27&$IpWXB))`2s3;Mejx9o>JWwGlV=z2Gr_Wf7ew3!>q3WQxKgK z66Fzh;Xkyo=j-B`bS05Df3sxWB~3}c5+kyyX6*chTXO9Sgu3{MXr$utp3QgwR^A6HkOysQZ{Iv$mXKbWa5u-@bNs|T}jM;16G8fT1C za2@xDm`Y& za?)LxhIRdz%L9WM_(HK|KRuBmlKNzwugB4Pf+NpTSTz?`YRgAZVD<9-q#qf;Zp&dv zOQj>e!$;!eu@DXy#*8^e*2k$s+{ROf)W_8Uf#6y`zUCsXVXjsnuGv>HlRgNQqVJi( zdQu@8M-ttg8l^yK*NmQGDg{Zan}`&wBU897SR z^%|U(0p+==a{awaL?~g_*@zjWlhUP0k4nhF-r8RBxTBeXfVR~Yb$i>bo_@y6!&tMv zz$8V85#QTl+#6>n=3zh6SY1?fkKUDiD@-uq+#K|OM5SrH)>M;?cKB-8^W8$|MxQ{@YB;Unl{9Pswh zo;u52`iE&{KpqOcRWh3r`{&ZGUt}-3hnIz`e$%q$Ua>xjxi15{(?C=k$pxJ(pK7^$ zY?m1Ue)*8ZFYqv2n=e^gB6h;6VqPzrD|v+OD}vzbj{nzaZ2_Vp?MWGX=ekkCmm4I(Si$Om6!Fj8Y|KCv zZc1e)DnRHeGXq+)Sp3*y>8mw$uPUiI#M076cOFQH=^p>V7$?Ah%D|WMD zW8b1)^Jlz%W=-`Ns7p!EIzaZ@+m0oAD0_q{AG95(RM`UZ;jU($u4b<878b18^=xDq zADOxvmL_<5ir(J~3VbcBrfax7wM6?M6dK3Q+juJ2U#nJn<=ZYt>t!}HS7%yK5R8ST z5Dl>G)pnb_1kLh-)pamCo>4yNUaQuC7Ufm*=%}wdHwl~OTJHuCs*&Es7|xPJ3N0g+ zMhUt@34R4GAmErCb?WuTZ-ot%V?K`%nm5d9l1en z*5^(Y-i<$AyIAMqI>TT}zLA=s(omSTPN5NZ1W)^N zna6W$9$jzDt;P^Zb*_FLf!iEuLn>$h zE$Gi98UmE`@&I@BWIG^M;%1eSziZbe(o5O0co+vM1714k09)EX3=gomHtW)$oo4%t zqP)E*D6!NPXRBH_rA>zB^E9xy^Ts3~gW|EpL62=}JNwo{`I2u$qQR~gpA{tNYYwx`^Yv~TY zvEnBD*!?f}dldxaevPbOCA>RCVRV5QM*Q0FxeJA@FnF@#8m-d3&}!m}ll>dAiprMX zs|4(p%NQ*WH*I%TJnDLA89#l5uz7I<$_4-%?i;lHJyYMq{aNSI5W>fgX#F=gb0KV- z`Yj#}-|Cwe>^rWpR_;3rTxi;pBB}QrKkTB-CKr4b3JHIf`58_P!)@VeDX9fF5)l|B zO&~c2WeMbOQ=iMJ$;rOmzkR(Mt1p4m zX~KjH2Ec7%;zIFc&#rbk-frX&0rnc zn90U+Zyf<*9WmlrVa5hw$oG&ZAJj_uMI`+54 zJ{;8eS_flh6aV>ZBhLZ}r9gdVynuCj?F|VL_#VF$&7R_~u;!EYYAt)^c1cT$W_+R=kKHt~`NVDG#7ZCOsy$A8FdmnVrki zej!VqKUn(aRXYujWlpL>uhoTK-^C76%NW>{6!-PCa{OvJl%bR<=yvH^Y;EnPF9*6$ ze}!~tWBZ%@VC(a!RvaDz3f`QYcm#Oal72Tos!gAEbZop{E>c(y}meYS|s%EH+Wek~1D=G0yauYvu@>&!#aNg?8Q|r9U zD-M)C1(-tteEHEa0ywKBJ<-8{qbQ9i&XZBm|NJuEq-SqyyMNo{GBh+)nvv0bb0f__ z^f1&{GS`?AAWm4<1JOH!KmUE{mxrXt{x}l)kagjDyR@ioTR}0{S8g6bVCY>^l$V!> zC*e);fqbZ16gPeH73QD3Ue5hsuFglxMmKYMFNKly@3plHxV9eo6X6uFQ2KrMC*_gw z_A4%5K#)9=_QK`N`Us$FmIb3*Td7!_oZFc^;EFVZ+hjNeh9fmVIH6y(%AIsRrf< zq4jkQy=y0&y7h5!aTofBRF5CGdag=6Ph(H(-*ld>;dlrl2_lj;({YwjGx~>g<@h!i z>`Kx;|8O@6#8D2URsr`3t(q5f-;fC zO0V}xv!z6Jo8&nP0~8Bk^a_gq9t97m%hGh!9Jq=OiClV$`{!2O(8I+Iff=s5`qROI z-`yOVXnJbW%guG{8|L9>#8Ros<~q313QHf^iE-*y+@dz=1MLo{`G< z#HJVhi|4sq8@M4KUwn7Sm=-Iyl*GNAuq%#qYLG&t-u ztO3EhLb6{9Inh4|?Oa&6U07&65Y99DTwl}!KZ8P>Pl;oy3ti`o2anF1^(&pbAJgUj zG^>rpJ3sMMtdI4knH#aKY*Vc_iiwtsKh-~#^>_)lJhJv zQy#kpbV$0sP6BA)p*!{$ZmUqI)3A#%T!=)d6KX5dAppUJh5|%LxwpvWin0nFub*RT zfRydFa$L3udXgdswOJJ~x$dL|2KyZD!>+W@xn@cSV6-KA#yyMf_IR@^96ybEpvG?8 zm2vIFGS~Muo%!O%cWDxWNg90ds(Ko0Dg@>y0TNJ6ND)6mo3QL^6K^yd%4q31q-}Aix^kTF5kK$jkO7a24W)+r+r6mL84=3v3rgh;|bI2KJK<&TgXj=Y0!>TB&RM<>}SN@1?dDjL*)=kHP)HS-xVofX-i$ zc4BE<1dx6~6Ec`CS&&w|vn~AKQu3+`ePc=S{Cs-inTOr7G)TnlqVr_DSdO`eJT=w- zKo@34>3u$+7g2o@>C)CV&c{cAz(Ef*z+SQ7he_2R{0yV_Dk%}IejK)X?rr}5UwHbP z07#!=0M{|Gdp$Fb>9j}ZsTWbu#x{er(r7A1Ss}%)Be$}sAkC1v_hCfQ-7NIz>v%Lr zHv4Tn`q%@8T03hl;u!{5IxEj6QFe zuG}G#q6|AM6Q#uP^<3M4U0()p^9fxNpqG|WThj&)!h=AoQq{-7bjaWFsNY=Zy<8+j zPIqAVK)0+C-CP-p|SUn*rSXURTx4qguQ=Xolp8P^wWBh;j00=^~;u~RUVh=Ea zGHp7c-D@nqbPD`FI!A4LC4Y`|*Aeo(;)Kqb=#*64(D0n21C0sLGow z=sr`(Ont5CK#zXBtw+lu9fY!wN~nCk-Tds=&zT<+u7&~V)S*mnStY!4{cIYnt^#J| zT(6tRwjrh1$s^^rO1YvwFQ>n`*?2Ro{_HOU-GP)+U{pS)(0KGH*B7~n<#w{26b~kU za8Hw#?7=-D2R#^i5eo;O#%p!9SkvvBe$)F6Veyqqi8==d(^r1ywqv?#e}g{|+M3E< zly0E)Uvt>8ybA4CNyU&ch3n*^^`{NHE!b7km$vPBUM zLlBsW{N2;jZop%FNMA%a0f8ENy6q+nXgf|S)(i7U=psYZ=Em!w1r_JNMoD_#X8Bq# z8>9_vWWjv40qPtYJI>OK2~LB#`h(Ex(9gXBk|M}}+ZOa|Zi7;oNlgEyyrALN=#-TA zr2sjVlkm!=w^=joztI2RSK5aa2i$6(*gp*-S-Ce=23fGDV0b{1#p8e8b<^C;Ni{_$ z;k5gE7T4wM3^5MVeuNi>>p`_q?Zg=WbOnq&-vH#AKbd(&g@mBaD(Y`CZ)j*v#<= z>SS4&F-Z^P8(d|aHc+>Qg@HkDpV6%FUz*-lfPuSsb91s-w;Ljy5UAKOB zw>x())w#I{leV7wO!0F*juurM92~ZE3B3@EFPHoO?scRYBVdRvB~a^Tz4|V4zNYHP zzRB#eN@xkjFR-w+?!*Ovb z=ndW>8eJjSS;y*X^$AvZ+!qS?BDBeET()8U{KTgV;ApYS6p3?Lln@dWRMX!(pz}Qn ze!4gHy98>>On|A#ojH$*;m70EjbR)^mcOl$6w384bpP9`jXSS)%{{W`%y^0JKiFzL zjTREBFrC6f9>!jlKPU5YQVfa?{ZP9h*N)kURXKl1>fTE6VNBhoOhk8IWEK;BYR z(TO@gRB!g`nroiRuz{+pPF3lTPiTYr#1Ke2q^qs%=}s}NoxS~!tD6Wd`O^6VbLt^Z zj<^^l+oeY1!aFwV`UeWik|-n{YQMOub*>E%Z;!LfVHZ*fvQOFk|1AgVk;qG)- zr7iHJZ-@%0a|^XYp}TIEA0y;)7itk;N(4jA3^NW+$PORex z?ng8Anh&$mtwMoYSng`#MX~{`bT(rGKIo?+Oos zEU^Ik%mE*X90%Wn?(F%7%0oT#wCHANeM5uIoB(Yy#!u(042{!VaJwaZDmI)TKv+VI zB3jnnc6MfRvdIOJS`>EAHR(!4U)>GT$Bp_;;-emUHDo^nKS#}R1$1pX>mTmvYU?qC zlpBs0S`Q?|MCIEq0)(%E*V_eRi079|ii+mWBKqUId)B#g4GnqK#!AYUazn3G-R&aF=dKC0py)Y`(@d_m;~xbr90Qmkde6s? z&h(i(E5GPfQcE~lTliPEHNBtTie=YV2fyVfzR`tU4A~0W%;OhMtUFOPx1ZwHl}>Q^ zZ8rI$Mx6`SgyZsAqNJ3%TwCF z9q1w{!k0Nf(YflVq_i8x<4xOuXx>)_(U0>hjo)r0*<_H%_u6j?Qku%e(V_>AP~YvW zM$gq18S~e*V^VwUEzNZ`2T#R zvkX@ZEPzIP?l7YheaOA<=69xSg_VQ<}zkq4s*%4L6UY+zAr>sYPLp{m?eFi4M$jumo zKIrirsid(R!mF#R^Upo%X>fvL43{h5+%@fi+STAL-jf+%omKqLG5x=`XV|*r1x_=K7rHsPF>rpYTK8^;vf{{M&YBn|N;p`92J;if(1OWz@W84$B zySCtw7@<$W^!pQZM-fiRdt z)S&Ml7bdHDH>?vM9#&P`Nea9A^d_oA{4lMN`D-oCJ`0cZAzCSk-K6rZMF!~i00kkoO_yu=!hPYlKp0~q>uF4e{OvbyqJW!%h!pwtf!b+Q{&vQp zbIXE#{D65g;LMw2ZrNi9CKYMLk*(a@e!I$h9^kbbEp(9JM4#rrMIDI0Ir^z1{^vm{ zjrAe6Js80MZB$4&Wk=Mzh0vy3E$>H2V3w}7U2}d#MRzbp2Gf*V)n?Fi5$tC6o(bDU z<+Jl2aDS$;he3^EKCKinEx%l_1kqFXmL{{4mGO#+i3#wC@bO9T@relt2=I#QJx{v} zkKyb2MEJxF^4Dq)`${>bC{>|W8S=nHo517Psi{fEq)1N&RY2oMk*Vzu02*ezAcwhU z_k9Wcb0jCJ`5*S(&T zddSzmdAj{`dq=(axk0Pv;X+df3T09qK}akGqIV<?VoEPa7Xp{lyNdc8Le=MQK*>?CuKXpx*5MXr^JZhujZK8g|GJ+x2}FMA#FV{Vs3qr2yO2kJQ$E^c<> z@|y+0HUzYF&)f0bn|yzVYcA7IPdR+r%hl)S@gR`h0);S1!|HNtNp=ow z4B>ix*0e1+oXeo`PnRDam5}XBMX|N2fRZ?k-QoC*Ju;t_RyxE=7bk}wfq6Bm25tI3gnVFgN zaiA?y!hLt6ue7w(xoJ>D4q)elKv)Y=M%>i+IGA_hdr9I#o+gyM&j7fPGm5M;9_swV zYL}!z#=5EII`wvzyy*bo?kcJelr$(P(#R<|2TbAs9?hfyGUp}$kWW}$J%bNB%rrUy zC^t=+Ccyb9^@TO{@3}=F<{_(v_sN=JTSA2Y{9JBoN(L*OD|Ua|IyKaXhLWV#Jb31} zi4!_Ns7*bx=_$B?&~8RK}@k@x)V{JX(NXP>^4cDe+Cf_oMS0==)GInY<{eP3OOtyhI} z1gEH187UIwfmA~t7NfBcW10Nz?;Mst3pk8tPLnubsL}-7Nyx+a{-{C0*FG^eyXIb; zwqI`^ckDTan%;onD%lsK%XL_VhwUphwj1A8w;N|&wy|VTz-3#~LFsEYwx=yeB9x{8 z``=0+7I3eKSI!YmH;`w4AfQA7kZmE5fz4qzknC)msmg=xZIE1Nm@!+F6e}4MJAYpT>SLnoJKkE*SVI#y>Aa-`>9_N6dxO}x1q%^!*zF&rZO zD5;4+y9w&Z_@^J#j|Z>zM}A30$7)|>f0k*g?8p*2G~H|0ZIYirF) zgEq6i#QZgrSWN{;3QrHELZRBnd>}$$G%OUO$bi90vd{~<;WrFIVQZz-?ySC<{%I9KY^H%&OF-eiqq6(qXWWdrIB^-yxmp`ci ztljwGrn9G;lMskPDyNw5@k(hDGf<4>>3J8cshW)RxwvN>BgbA068Plnv(*P)trjMBbGYFo zB1#~%6erj0yyG^OX62W%@@(u=;XYFbgk?r=W{a5Ln}Mpj=K~`rxj9D!h~yb4;^_~K zACHRJwA95Pt@m+j<@0Y8=RL{3D1BvFT9vIH)i}s16Jh$Yr#9{OjGcfp2*7p zRR8+x$`W(`i~{o>9uJhF_P49k41G@kpMDwe0fbqoQ8u}? zxmoyXGAIbMwwob}K`Mgho)a6eOh-i1t%AB#1jhh=vCBy!qt5IsV_~5jK8Z#OJ4K}j-;YUqJ2F2s)lCOc!VSLXv9hO8WIM?>`KCH?<&};j=T^YKGc;|z| zgbBG$=l-D;diRZPQ`jO>Zk1-)Jb@)O)?t|cy89G@xAxGfza(P96AB0Iio|fkKN%}V zOhw}1n|w8?xD$=Zz>sS8Lqtn0LbZ}joXFIeF5276I*c!j=;dqaESETT)WA6bgjF|>pGw1i;eaZ)w{s*4zYJe{9>v*tT@=& zYv&Jg$~inNwj$SiiHP5}N1XIpJxJ=NoR)wu zQme(nBoaq!0<|$|%G?spcDA4r=fSC+d>&nI{Dxxb+V62zXa0rtETbHyDF*|CLGvBs zI1wr8>Jqw8X2>w%T7yc^1!b z)Z7_EphtA@ZP$^XO-l*6{^w64ztq6$j*{4+in3Oi+;rr4%Fl@HDk?@K&Ia>>aj^Vh z&)!hemrXXC&DP*$3JM4c%-Om_AY?LutC&I;F01*!Gwvgn=3Q6~>(2u3A7V5@@X0h} z=tCM8?8_!IQnKx8=P!@ug#>w_Y^54(lZ&(08zjnBb#v!bf@YSMxI)HjI)4mgkif;O zqQC!ccTMA^2hIp`88OYlIf9<8Bq69M;oIfs?w=NKN~~)Ry(A2N`nwZWqTW_ZgFzTj zOu{Sf^DazA`k8NcZ9$pZE-FLK#gsizxh-kn8Yn8BE?`GR z_b9c9jdHU=43=o7wE2K9p}>bu`J7rdnSNDYU*bpA6&legqGh68HqPli(|D*J)P;p> zRhe}}H}BGppo+Jhc(Vq+wI3NfUC?VF2{V_>dKw!akG|1@Ewxw@Kr~cIAaPR`VDgj?{2kuOAi-u&V_?IWn!68No#aHol8p)u0ZO<*rVlOWon@<&p?N6uW^6RxwTWqY|Z9v06CZ_dIs zt?S-VrV}juT3J!xYHeKKJV;hk{Mv6-)LU!c6$bQH4JITcfl4C%@U@%135I=BXq<4THem=*AnyzcIz2t@@9*!-XLysS0SWDd zU~GYhoAOy6>lidylXZM@@n5@gs?Y?+wf3&h06;6h+e8QW#Y56RZqEE^U4-3+U12({ zzTNY-6R7|Nw>gcXk$q=ev^L1?X4ybOfT0rHKyLedJ!~YZa&6|MI0ZEhB9actbR}Z3 zj9}9?{iRto%@v^WT>HbS*oBD1)zbKaTaC{Z6`cWHBy*0)Tr-pnR{D&h|CgjA(0?lz zM?G7O+)Ov(_NxvkVs<&7tGo2ueg#D99ThTOe8=@attX>a-)aj?(I}+laL)akTSZmS zrszvo$`}SB!XU`8AmUqZaJ7i`9F%;J}1YmL33g7UKTiozcBKod7+5Jv$r z3ABay`CMMOUPk1>Ewh)FER&N;}cE55^9@^PCo#3=DgR zwJh)&G%SOF>ktv2Etd+}LJ#%U+aeIT$m2q+W#lauHuMZ={9{Zim1cO$`V^CA8!z#9 zk<15~H~nlp%Sdy(gRrYe3SzseGpi5kVKSVZW^v6WCLzKmY{hRl`hxfjr@tv_wWzvo zeIIKg%2^0-pPsEm-}HAKbddl&&wENThdx^TFROQP{`7qvcLZB1=jO4I@nc#v)>qmu zhT%=(LWz2y`QHfxspOwXaZ`%Boo={{NcbF1?C6!5o(F}Q%WRq-+7^$NCrHmIEcA?R z9_LB7PQxWcez`Ovq!fI@r^ON!nS|_?vNAKHdBRM*0H5jk^aS+0!3SBSL}GAMsbz5g zUhvM;fmp+lm91@gIdp6>j42{?7W z)>o3owcW!B_xAQ?DlS@!4ACs;8yMo`Vs~lI((+A-WznRQw?aY{V zbv`dzO!*)i6b3`guzl)Z!77IC<^_kxQHzQTh_DU-v}httO&)f$YKDeYqgJFKNC%!Q zP_?eBtG5rh-W=~$PBpjibDnK>+Z3>m#xqtd?H38sKDb~4NrjE>f182UoZ7gY2Dq$! zAFxzzJ-$7&sGS9x>8SB%i0(Kld`^B4^f5iX=^!sWEIe#DN0KJGTdi*+w7#Cg(38Gv zXK-jJH1TilDd_&nc`|5!kjvKgYQC7D)xUBtS~oT4kQNnfcz6|gp3v_*LZj=SBwzUM z-Mfp!*}ZH^3YmnbA;RE*3q=M04rrZ?4Y`J)PDTh1&+NBNg^gQl0pWCEL_Y6a(M{{b ze55vUqyxI;8eX)-0O|1 zS6Igv74OewcGEUhBH6=rdsBs79+WH?TQJx4&xwhN6_*PnAndvMIqn`P^w0}=PR6b^ zXDe7c`>+5TFtmkA``49hQEOPy>~Z?r$gL^^U z%cfQ@zR~vlq)ul1$oRhL>gfZ`VG-Gf^eJc0$bSbFXbiFuWvAzt={?tGnc*|GfKqX0 zy!JnuZS%=&$-PZlPP%)#2*2R^>EdUhRPh$)}eSDn<;;ks4a4aSGqQdQvb(3v7#VH-`BBaYGoSX%d^E|S_6n-PSVNM9`o=;Eb=n# zkbO9ush*BoMzz)8r~?bf*B`t_O=40zuT`a=nVZWM#h?)$NQGz00ScPllUSZ3#vA4s z?AKSvd^=6aLGR~m!*VO22x|Rxtz9vA%C};+O9N^2T ztB=1hT;AAtwwC4Me>+nh*3;A4>RUUD{m-CMe^6ciBD*URaJD^Hg*MSDLLQ?%mZ2*A zC(Bhdd{o~I+dWd=K3!BTSQsA{+mM*$0$lBJUr$4LyqD8O@AX3Q@Tu_4*Y= z8uFIaTg%Ny9{UbV3qBd zO!?Q}WeP@tY)s#(<)b2#h}^r1`jSRZ;I<~39p{2ibgyv~m?~WlM4@tWXgND4XRjHb z6NF0bh*80*=`BY0sLrC}FTVNsp@5bqX$E=_Cg~`aW7+EwV~C!bn!4`%=#nI09+6`| zk6-KR>e|^=%N5+Kfi!L zO0fXB@Z!CSw-AW-HHKv?cbERh)eEoiwxpz0#~J&ES40vR4TSQgLl$M_P`0WPN*bDO z%3`%rpq~L)dE%IwF#exa*((TLB>J`)TwYfU2x)nuPSV?O=DopfUMDwmV*&maMY}-UXGaWpJbl3*BF&#w-?47j6R-N%J3bb3$O}Zv{H$B7%cL93``2uK&m1anMqKzk8P`=5nMFxyD{}7&joo`U4 zs?NGRp&?8iR)C3NFQNhABv+7;k@0^Sm=DnF-T1ZR+Epwr>T?SNYx!=sW1$!0FtLX^ zF{MK7M@j^sCk)<@N;bTYlqL2*?H?st9t7-7WZ4{cASc2F`)*wqS`@rtt%cz)C}8fwU*}WC4(>p;w@WkR zP47g!c<|tX{J=n+=%ScrG8VU%Fg$rogN^Y?d5W11>8PIc&CU;UQ=l9J>S(}jV2G)H zVHW!(5BeBCT)kAon^!`9r!a*XiAU&5fc?F-n=3BY8--2vVPSD?6c{Rvw-ZKpIBaQUidPJS}6>ErQQKUY%D4jdNp_m7}^`y zd&yh_K!Y$`t5sc|)x7on332Aoy|VR+2$u<=LqwV3k?*E!Zx{O>%y~H~Df^Eh8nkKN z90D3hsifZthe)8%w=wY^K;9nmBm`T^+1cgn)R`x+>}$u+k{$Ojq11+-;ieLZ>C+bv z>6qGNdDd_1t>(rtGA~tFH>r8#0sKsyR=H4ZM~}>u<4xj|C`y1M3!cSuZ&B;cr-1NK zYI-xlOaL`$z1S^VRRNLk^6~NU3n#y35~z7d!TtAMke-=bW)c(aya2n}4M!h?{Sg!} zZvn!%0RMrp77B>(Nw_ljSO|^iZqf2}KO>IslCmFvg;&t=6&R9-6as0hNfs_e)qX*8 zRANzSbzW)d$-CNjf!Bi+QGgoytw-*x0tmH?Mp*f&PDS_5x1aA#L(h28d(oF$jeJHC zTL1tC%q>Xric4V!s#(|0ySTW}#686Cz;0}EY_8NaJ$z!8}2=B|?yLYdsxLE%!^9J;iN_J>QN>WmGk|{Uk7IX#BUgYW6KCm|A((e&tR`%%b z{$9(4aTaZqzFa@H8I>pP`%B2n*tc;&YzlrB7hVjJzn@jJyfz@1D$g6@$x)3+(Gqx^ zZpaa^4x$fMZM?*MdpHCmJR(~*Rcl3eb3V7VK+%| ze~;sZ>4pk@m5+tfKO2@y^M)+++bo7UdwMi@)9pc@$M<6IVSR(NrNpHW$rmLrIW5N< zh<-y(+uc=t5nqItp@n64s_l0HI)!bf414J z`MWyc_kRECtZT~n?=c$}iU9c5Fj!07H&VW(leIUEdFWBFuA3V2cX^hg*XmBn&3H%9 zv20WC@zt1$wBOw1&xxU<7jM4Xy;6s7ioW?(&XbdaVpa-W%Z|c&{dsO5n4p-? zSpg35s`OdApq=`gb<)qxfn(Gu3#uFu_T+D`>KPE+BlRO&BzSAN2+YRs%bgweQz-7g z-S}et;c;d7JNcQC)dt9GpO#z7n{m2*y6bOIdajjRoqirOCj&-rx^6bNyZ$a_V~r1^ zdR(|1D~=nOL%IvGo|2&r25K*^5LHpnPT%j-2s>^)TWvoEFfFQ$0f~oy&xcIIx^|;7 z^xwR8aq)QlD3xK<_{+<$+d10WvGjPhas6pos$495xipzB{>)^K>b8?!E4p?qRW!SjQb zLHPMn{YC4O?Z{rwp>^Vmh5C`>N{%#{4&R)-k4)j;U{UX1yjkB$*FpY%=t$|tUQ<3g z>H$%sR-(Yqa?yD-a@%C|3wX7Q*+o|%g@2IOa8^mruC`Wmwlmud z`me5%+`sqZCz%SFV6HmgAGo-JAnFX9t*x;=|JTv}`TFXk`}>bYn`VItK#hW{ zL00;%=%E386{tN7G`jG2w?)9uUu`K$GrP?!Z1aDCFQ1 z{yk1gw7~lJc{o@W&k1V%PRO@&!ltQZY_?4OiS)u;L-5UrZ9~9ry^VNhevgpez0njG z3Pi=2t)D2++*#EbFw(t;(AcH+3<8_?Uh|^Ew}&^U)bX~K@P)*YSTj*~&sQT~{qh$s zR%`NF-gxxSmZAXY#kPvO%d_o7^QOD?Zi4M`o{p|jszc47$F{i3m8)@^{9DE7-r@`> zI|=sDlM?43V0338l2`*bsw`A$<1S?AC1Z)+kG42Ts;9<_{^NzisVErf&`EI7-{U`b zvoc4M^&M_2pD%m&(;bER4(B_}`;Z&}YpkYmSv0jv1S?{X^$J+vMu&rt?}|s***S(s zeD<(0R61$agGW!EP@Spc+Ol4&A1bw+#4SHZ7^ch9Q>y6IJU=4JkGItl&+88jCrNog zsovS-b(56tVP#gprwTMcWlvdd|Lp#?2nY9szl#&>^}>jK);kGVunR1D^QE!0D{zCQ zuHnvPMfk3lD@0aN8=mm^l=i?Ou{3#q#EH(h14ZARHQ5byq9d#8vO!v z;rE(nzFYk;+gkm^6g2 zcSXkS17f~?UsYk`EA(1Oo4(KSYwTC{dmWEcJ{IxL3Yu~(mD#B52?%}&R4Oc_6{0(1mybzxwiVvL1oN0xe@<3jtjnd?x z0E4xw7d)IFp^6wSH}0|-Da9=tC0Cx&WfYz8`|iUun!czo242d&yi`b;Fm@+R9XNv>w2KS3FWx_dp{qRXiB7 zlq?O<(Se~_^w^y&mwlNQp`r+(TZ>%tj%N%I^Q8VRt(7?bFT1Nc@+F*zhBp!XU!r9y z1?TwC;dqF<@(0Tg($Zdt?Dx+%oN+1>N^tCn4%Qvp3~(WF?LQS=LCIadzNweD`Ip17tnDU6$q+fvnbQEM>uK zWb077CPNok9t_%mQwd=bCc%sBNfO-(EpD_GJyYiUo<|gpFh~{>*x=rK522zXE1$A$ zqLvT3S|LLqTSy+O5+x+4MXc)z80Gy`O&qBvAPcsitO7TqFSCl77c38o-p zkIE#NdR=Fouw@}O|FUp(NG5)%@{uWNRW?|+9I(nFrgTzBNxOCxwFQh!foKCuVSDCL z4(tlAJ8%rh@d=G970u*G_Le+(e1N|m+|b#^WlVPs@!X;F->gB~nq;CH$F z)fr0wF}#Ggop^oP_>?Hh>vj1_F~AD3uY5E@VA$bg@=BH{zVLTFNwXGStzJvi@gNlr zg7~sb651p2VEcg@bLl)QtF`0D!I!Ol?R1j$&|J4E}wrAdt9~R-G`M}skf;DXp zSHo!|G!$e;EUDTMu#HXI-=(f68lPMI>|qT%z3;oqW~BqxHbWi^e2Xe2i^NY7BtT`j zEp^!1Aj(G+;p7;)ZRNoCDZ0s>IvX7DsxeH>I8Q1IM@bK}yx+Miu?O0qbYWIka}ucT zkwA+?OrDWyQTfszV1dNW-;RNSr_<6Ib8?=Rg+%+rQC2&k2qTzu?z*8Cfqbg%=-XvP z7a~vYGxdd>gc#p8AKJB|PGF$%BlGsuKIIlo(txFp_<>#=RrSTgxlU?uNOUsuQ2kpf zba*tDioBg{$nL5972~Wub6$U89d%=n?Wbq&i1xCbD1}ouTTcT+IVhpDw1;h97ruR) zc`~;x?nG<&D7f;K?d1L$j%+A97ztLx?)YM%DM5u-#{T}0>8*UsM?pEBoA*z@2@21@ z_@Tj}TY@a&c<<)vIXNzT{VF5f7Q!6LS62QDdVwi>)4cz6;cjBcUe?k8gkP$A7Rc7g zB=;aVST5(N%TKNd4$ZVsPcs;N!7naE_SHb+=uK$=5R+is9# z0l<$jW(;5k!K*VG)r)KWgjW^Xk8>}R^w0Xzt`{*dH-IEDqAct_;6CPv(58Nz&`2xc z+1B`?#0nF$kYv-G`9xi%{zD7%>IA=-sNlC3Utb8obvB=8^A|BDJofWRL87<3hpw7_s$@X(p2tjn$R#n?D1GPwJTSW3-E0%9tqZd}VHD;aL<)_(*R$Dc=>PS;n}V9On9kmt+)smxh&D|@IqunDF=eJ3iDw?7N4x;)3`?#&!aGY5Fo<74-~#Q znA*tyR_~rWX*3@I=JDy{F(_N1E_z}**Hfvu*4#y@HK2bGx67uTw;Kd(p3m$nHn~h~R>MB9Y zI#w*g9n|vW_oY956n`&L^LuZG%WxCWb($+Cs8O z)xp{Qf-)SBvl% zcFwwq6NNHyb(Qe)2}O2V6^4Xpdc4Aqf{ROO6?!$*<70dv=AFa*;XM@Xe%;UAogIt6 zUw>H_mv%G!`snx3&0Ywk6q|0MVd>ydou`9pu%;lR*z~8Zk7nO8;Iku7*Vo-z6f)3C z)s@W4=Sw57)%UnPZrs{~QL$Kh`O4BxOel{#&i?+)xuhI`)GIo!MY53Q=a&p|#}3gILO> zHsYqtWWhBTTDH~{=B76xEXBo{#SOL@`uY?tFe3#yrU$)Yy(vk787#uH%hF#(zzK}6 zJX{^*mD1Hlrm+V&fl%!d;1vs&RfVUe7Jdx<1=H@~AQcL1Urb4JAe(_Q$8fAelTXE! z*qE~Wl8RA$7<=zPM}L2JW+08_H<_M(DrIYxrvQb%nBGV0A)X5MXRfZURC|Otw@gwG zy0*5mD3aArMYj!{#z%Z5kOJx~$w;qIYp+PdN#$skfaJ+y*=^3h4@7KvSRhQYAXXxe zY80q>xs5Qzqf}rR@FoaiIBXy~XfZn?CMdQ{x-~`&fz-Y(?EqHe-{)7COf|4XUXkUx zh+*yupk(*jg{nf8NU*hL4F#4Kt|Iy)B3iSa-xtJ)Dwu$5Qrg|M5W97E<-d3OzFNHb z`>we%_td-T5iXNbyfD}2;!5@bc5Rma8m`jKXhN(BimPg1wIC z5iY960%)zG5q-6@_Md3Zs6*V*fI(0&GJ5uaFUL8suhfv8;uynBj<#!m&KM4=qY)Ah zvGA_DQ+wA4lZA#S<1t`o?FGs6C1Hnn&>`g`A9Q{BFiwdtpMsBTxL8F%7>-@IhAEG6 zmt30tc73VwMj1c@y!!aj$;?@*tWnEJDLFY=Sq=hO|ENZcuXUFT>^?$PF_03cTOvr^ z3>WsV%)fW^^dHE_r(ow4tpiUxTM!-r3ZA9!sbWlp>=vOoGGr`D={8E;Zn`P& z7=$-|zSE&c7Gfu;i^TEV%EQaQmFhb6?kF2r#0}f&1fL%RVgbh z^ezjk%B9Q*rcZ9Ou^cwg)?zOUlaeCsWr863#CkG4ss&0JZR@_(3rh9N%L6VMA0MBe zqpC=L#Hn6l8i3KrgDpq@)k|=A^lvnY{4l|a}Sv656R0w>~s|9<81f42+9 z9aHx~aM8q2td)*fN3_fTwAVLy2EwYc zjCBn%GaS<4sN;(8w98}>zkyA>0>2%nSRBJ55y&W`0;h6hZ-_g$jMxN$fn_uvmbO(v zx2nB(2p{;=D5du!7+<~Pwwx{UzDAClEoCJq$r= z)Oo!Yo>XJvhOb9rv0bSkh$Ik_50Q%6D>3Grf%axOhx zJ@`<4P+Sv^qJ9~PZ1T|N=4SEuHX$z7nxZI4OsO^pMl93~zk5bH|CydU4RK)DjIEb= z-t%_TvKayN2l;MW1b*)t}qahlR zwh3o`UH*5|nDD;>^d__Yn~h_JjK@X{vny1<~V3 zv|{VVju)z4M#?he+(<@7kqW>$fxr2$>bi|j0nDT-WMCWq%z6OhCSE}XnSYz?>WbN z@A~PTgx7uM>^#CD3gQh+HMPZ@D z%08hA#K)^iW3c)qy?h)sqM21_3yM+fx}6gm3Vir%>jT#k?VWe8$98P{VF+d08$V~2fr%oDg z5rbtx+iQx&H8pOlE1lbzTq-&`b!cIrjCba!xYBb)5*Wl6{vBofKtUC*!~fTtp88Qq zGTT|(!O{}oYX(G>sp;ud@i19D02u~8{>}9IIQQPRc27-Nn&eXNiT{l*O69c4(4(OI z(R7{=5vrBnqiUJePcARNK*dUoIGr?&VRmAP*8_p=l#Il23cGIy#zoeULeYQO;Jn3x@;x`{Dc=F}7b{a{ zq<$132PKUutA10;fhjMvrw!P$>A&JJMNDl(;}Q6!X23UF55>WPTR@^EWD+81f(gpF zQXTi_7V>bMYhY7E7gjC7wM=c-969NTqvRM^vK3LOpZR0Rwo%Ek@q(WJ(Lm(zlHq%^Hdmc9*_e0J2ShJGFPFg(1i>~0QMS+)Aj9-!^QPTQ((}^Y*A)Os zPyHo_tUd2O6UM^?B_%t&1`+^?lBK_Yi-6!oJ`S-8;Rr#lYmxf<>V>E$k6RKnOm%bg zTe=EuyF~&jKkql5dzCFd`*Zo?uUC+>^S;yt895~vfl2(JgtumGj=c zj;FbToc27egtI}TCi&WR#f_~EQsO6@3<7wzkyeK$37SN>ON2 zkNb6ZZZmIbY`xTr)oN1FA|y~(RyBO^ET1vd)e_+s6DCb4W2b~J91NaX+u4X8!t(m4 z00J6?*M>}vaC$mc8C&wsd5m+EFE&~*9|OnH=&yGvO?F2*5vE3#~~Z3jH-$Pn}>S`bW}M2zJqL<{MHs!rp5K0 ztF6NWQ}?m4g>cWL6+VsmBW!gSF1y+xpg&`iO8)Q7pYF<37@0DaFnQLRCIN9m9@7oO9Xys=XuuaFuC``*VY#a z+FE)_>=!rymH9Y0%Hd&iyl~g@9n&vLiB~?t!+o+zN3(ahfAkm^%QnSfrBrd(=-b3Z zlGH2T#7B?xTI-vHxs46AIpPYHIl|7UI_6A0+SVFniVSr%vlUAihoB>agCP$d4v&oN zQ}FhAwaNV}rMg{+{#_fHVLC#vG-mjUyl!`@0lO&wT70oV?cdS-Wrb&_YML^3UX;@! za2tsA{ol>L4qHeqG#_gyjIF@CVAlDq%FOi4kMsdN4`p~@e89Gt9ST@L2&q=Ge86)^ zmvSACovw{;=k+UVUA3-V4tm{SiDYJBAt7&X?}Nq8S9beZjAgG39n!Rt@zMG*()_QV zk>(;s*-s)vyA(yIoH=~kTmm;}raqVsPzP4=t#kUlTD)9~y|F;9{=9>1IK80js*GY) zIO1jQTGzhdGDAP~VffQu*P3<1i4paKsIp*2erO1QiYjP)wO?&yf=V#Tq zg6iA;Xe$g=Nv*Z(WL5_XCEwq_KU+of^y!PhUtbf?e`<0VYiY0XWrJi-$9%MR`I5f- zQNj`7>~=Z@nxa|k?xLowaoo|V=5t12`~b?&W3+`FSN_b0#7EGs?_fJ46iFExA3kv z#}?)h93OyJFjP^Y%VP7iWGF}LfB=sGfFwTQDNCRgt3UhM4v0h;fVv9UXlHN#Apov- zS+qcJOD{@y?&o`IFI=x@_i1|*{><>zQB&grE|T3O#9snXs1zEQo4;3O%|g+M1?{xg zNUchmG@~j}jDm~RBE!kv<(#C59#P8LU%cA_Bfh=9nY;>W&;X5Qh80S8}a5|pGK;c^IaIO6`m2+UH3<BE712Ef=vT*U3=BzG!9V^`~T#2bZasvz*KZk5mc79J*Gteyal)p0chQ z;}8{G68Vw0v-{JxeHoCz!T#%pj`02+;g$taJ0c|tzu&QAkChjSGVO^dawOP3*A^ES z^F0vU+4sFEO5G!hj36cs2X%gN2NH}kjMVd<>~ZhpQq*X z93A1M_&Oz$U%}C${*|BGUjxY!_i)>q_U&AR);m>Pr!KB`Rtb^SV)({gQ~vqvOIgIo zvZh&n1UZP zxN8hT!db-ko&;4PDmy&aWHlm_Tj$#HVqV?T3DR|D~e09I1wy|FC(89 zK`gaihLnfgN+ERLPiPt$S*7ld#S=b*{eX;Zn7~2d{*o&T3u4mzbzQ~c<7-0pLVlir zMw0^nDb2pLq$r1>Bq3p398q$h-NP}jvx3T`;-IL8uWLW|91iW>+?*Z%!?*6;n|tu@ zSzO-3tg*8O8tdI2kIDPsqhpFE_K0!A%((C`sqceZ#`Nwdh&3B<13E_5l=2U?wIwAs zR^HBzADaWL40?LV@1taQy%|{vv0h&g-(Ts^sraQ`+#`?k2ZB=5z8)~219l87{XinvLO zRaH+9y1v#J{n28!(M@Y(uB?ya!2xNo_IAouNgxdS7b4{K1kus-2?m}P9Vcl`>0T*Q z9=zE;M$PggVB47QSJG~a^P0voGg4`A7F(V#`n&jV`7MQZ36Y0VtC343ThvU~#sj8=l zWvQNDT(FVE#33F%{g0~mmv~o!b+7xO2gv|9>62a=KT`@YZ_q9kpi)aT1H8*Dkd{cm zh=b74@l{t>Q&R)7gMSSUj;gYg#I32ca>AmSO4&sPQu3>Ial2TNpmo)lfFiB+;R;^% zY5}LoeP&kCG`ut9SswM16%Ss^hSugZQ9`Kzc_z6ksW6E}qy43Gfg=zo9OzX zxp=Vu_&gGmyR4}>`Q5{A$9iQNEZ?L@#vgc+L9#1jt<1Um z9$$eh%~Ks)EmEqXOVii*lmr%C2W6x~_BnIJ;xmMgmNw*?^G88oyU!~dWZVNKY%`3; z%)sGZm}fWp-pxj@uRe>-QqhvP63mD>lUQb~o*k>4h^_B9j@j0ot}F0L6ESbjO)14ZFPL?uou^Bk9+UB$6_REi$}(|jkFe8Cdq%W|lXK4^PiLw?b{}?-F;Z4P3q}8zz`;Z@v=LI8&!4vqv-i3;M zN#Up0F`ys0b!4*Gq@iLZx&^5WlI=g-B)4CUIn>|7 ztWv_ouM!Lelw(sgBtzDWNq}QM^qy(JzlZzl{2!`v@1C^c?k_w<<=lQe4DtH4G{fek z8$JWFE+z)QQ=PIa#_*v+t{0lBef}cFl3SenXIn?l@z0eLz;K`BU(m0WYg{KrW}h{G zb6Q>`tX{z)(aXA~DK!5rU@wdAjmy+S! z1YA=duHNv87C%RKePiQ6?YkRdl{pIkN-PbI@;C-={>+99xJY@q;?0Uq`?psVqN958 zA>W(#i$eH+=;B9)ZmhL@Ss$$%Bth>cOW!|%PWu&nb`#3(tLGS4FImGBH?n=Q0u;X`eXMf?;k^~Y7VDa=-V5FMJ?+Ggt<&wpj7K=n%slmSF8vnZpVha1s19V zqN;Ez9kBsCFr#}qQDjhO%F2YMiG1kH`rx?xR;C&ZTyc6n`W73|=zf=|G9X8&LS;8L zLSm%fy9rbe20FRCzAwMIe!-jRK@oR-9TvOnC>$O8jBi3rIPaxr6Ic6aR#HJ)a?I}i zBl{?~Yy$B_rUv%jwdtYE5;?JaYw0Dp6M~`y2b@!X!8uFZ>~aai zAwot)l|@LcXq|s2+dcSy&;CcRl2jRR{gTVK{;6L&P1^-#7CjgjP zPyX3ca72Ih`s6bZ(W8pkkR{xIx_h~Nr5@}%)d}C)0*0!lmi$HPaPEu{j(ARYuO$#_ zycgF_3AkWvH;ek6Ho*z|eL4Dlb0_PblQU<9w*eAc>l|i1ET+RT1-s>#1#7{qUZvIRN{BuwI%K7tp{rc{&%imw>S~dfnZyz|F>a=IykRdS3Bk3*P z0t)P85dY4t>HXW@4FV4;Z%L}-(oIz1Hy$`)dLS)HAuXY#a4>4)v?4FBsvx!B;;TSE zO@jTX7#Anyim_I9175h@(I__0fprCrVigZ9h2NfRl{KbT=X@J23EtySPt zeKgfppR+GdmGiuiWMI(u9&iR+n5#-^`H*8@N`+#k-I*2KaJmn}V}Mf4?d1tmWodi1 z6}29={s={)akh5#%G~IX1_JAO_?MZMMN+-hY1qi>%=26?81{lylfH!74H~PcK+&MU zSId9#l-NssBW8YDbudj;BRc|Hw)c1UrRXGiMs>~DxqZaf)A_7|J1eX5!)8VUgf3=-%epeIv z7O{*QRWg|<$=?j%UiZufHG7%5bg2u>O1NYX+SAjjCqZKwkV(`zOJ3){xa97;jxGQt zv+cxh>&rhcZ?e|T4>?mxN^N)rVb1Oy2z|FVpB;_NO!bb0R7{@G9?s9b2>etr=2-ti zJ3S_gAi}p*Ckazcs*&%c0g+URjdP^6%_Z=QKYOzKgi0-XAsiGgDyyp&aC@7l zrDN%9=HsA~BwquAnTaoY7M>|K_vhRBaU0ft`dXQD?0dMk=Ss8VYb-e8(r;9Fde$iI z*VHP(pqDPhJLI{Cr`#80cg>rdO}jK>TRG~@^T}S+*3UXhD;nU&fh(Z%|2Dt{hcp^6 zffM7tVBml&X`P|-&k>CDK7oPtPQ~VwO5?^6V5^u*?`_5Lq&rLbjK(JB)v7dX42MGK zN3Zs{qnes0k_%pK8=G%Oi<0~;wQkdsB{TGTas?9xOBTE#b_Vql_1Vf5>5meh0A7S) zn`TFXF`L-i+1nFzC>SgwLxbTkpihkXUibVL9_P4?G`FQ?x#|P*8dAxMHO3=NI6S1Z+M3IyI54 zr!)!Xe~j)iAZuk2s7;Px-u8+PQI%&N{iNB{S2Gbw9z(Ob@V3^DuPkx=8T_C}2Xy#8 zuWlLiDw$9(BvZQ4$+966tp$3T++ z<6zI&m1`Vx@nWZ^I&@)4bK;ZbQgdfbjm`|&wyva!v9awe2u8K-rXC_@qxw!Qz_8?k z7vdo8|MR83LUq6Mc;jaWZO>5-9dhL)cI8k-zOM5Mav|z0%~DjVr*Ga7XhUTjO(iED z-=79Z!q{KPY4R(G$5n{e_Y1ksqPV7>*?0 zisMohYc%9-k7P$B7YfD5H^9nzEm53G!%5K~JLoi}&6da}Iou8+3ddiYVwdWbfZ>%p zcDS`vFh%Yi3yugl8x=>Akbx(hhq!$*jj&d{dW%+20x9=N83!{X)&&7HTknhO)Q+0% zILcAZG+CLN$&O6BcJbbqOpfy1-Ju31(x#5FxMRX$7m6f7-k~j=8K=m0@h~~&WhQb4 z;P6@{2sZPweV(>mLz3iGEZGPWV+MTzYm!+=|4Z`XhE_u7NBfsR?H{Lu2G=i~%*}3! zIsfErk8u|+*Txaln#^WVB30`r8!mAD@1Fji_wt_z;ohhp;EgCQ-kcsvd2m-Q^H;Zy zi_lzJ|2B6W4~$`)`2IHgGzO?&MU$EHrkuGjTptHH4N}c@t&*v7T>P?m6=w{OXVWwC zsorNQLzKOS=eF9Q)h59|DKX#y*3Qmy&xxqWhxs(FFYLz$>vl}TJHYGij0DNtF^XEr zvC3DCBy^ZeY7N#bdgXd{j2Yzi=L)ZUq55;m`P@fYCuBCcq|A^{Xy+)o*hF>psC5E) z>QONUgABK^+M&>wS<&dCFVRPRgP9r=l=xU(qA$f4 z4*0X3No>oDhg)H7y6@xotx(iY^NveE1OtCe!=6ZkIgm`M%+QCub&A?CX0`&vxa+qGz@6f6czE@1(npy_N?inxW+l$JtvvJvck>;?8{ z=O+CsUA^*)2)5OO@ zR18!gOZf{UwwFAKe5V;(Orcp>gVAWl_yC|Jc>4X}Q?xL&+uryqUJqfW?_LRaV0x8 zJVszvESX#nbx>h!@~>+--3k$EZzHXje@t;Ja=nNlMDc^=w6p7;0(JC#w?p2X*-&(>{=QFw7N{lnd zIP;EI%^NJ&wpJET7i+PHRsBeh0&feIXr~Q7s?uWuO1t|C$?W!cT|rU+cMcM!)@o^y z`;HVuCeefLn|Gu>MZ|#c5yH1bg&+DJxCHY^80S2Gl=v`n=j(pFQG9pu6bHQQ3tsGh zZ^C=ky;VsS3CLIDKIe}#R0AFe%AsbT#tx9P&}v|yq_ucoO!)0>Dr71$Y}%FHbHN0; zOaE~Dxfpu5mgo0L3Z1H8F{9IdJ# zrRyI5gi|A(CMhO!=NB*dfA9=prj@YJsb1(AMP`;EqdXXeeNbTF@wD!s^_`+{MYQ#QBX!Q^Ua;wMKP+wxv+u z!S%ULyZJ$$Osc6JI;M^u-{iR+Z$+hfN{$VKYU`yp7AqD-V?A##y8y~c=GYo8X)$7^ zMGkqSdVb9s6|_;c5<;mFfkq6inosV4`8R0W?k}w`O7>(%V}YYdw8*!{_QBiHoj$}p z`sfd|1*hl!3p}}HPiYX)qJhOncc@k;)tOm6-il%OHZ@pAe=HNz!|d_*k2HtDr~ly) z_xu%3gm?;5SyP7Al>Sx==H28-M9C_P%Ik1$DWf{*E55ok?=(``hRu8+#Yrxg6EE(r z$39Y{RU-8$){X>G=Ms(fk)#MtuwfGUWG06n;9gY@F!OK>t>~H?!840y92pQe_m-@4 zy_CzBe*)21TRVe_b}A!)ecl?P=F=Ksq-B+B>KEj9%O@L65B*w#9W`!t`sPW@lh0ih zzp1uEtzEe*_D`ZNki?x!)T7#@J61y|7-H1t2lsKN@1sw7?1Mwol$(1tvN}V3qjf)0 zZbwa$Q*xO4jc;BVF&3A0wDdU&;YHW^1V8 zi7R$zSZsoyfgPMklyG|RlcpK&yrkXyVaP@#wh%^g=81Yx?g7)8BX{!zB#w^Dj~b|D z3(?7k7@n{AsoF?9Sx0(NF^Yt7kQytPPdOf$c%%Epx_;Z1B2Ykp>m+&2|CjFKgnpoc z@j&EtQDc@K*nHa)>%=ckA(Wo#4AyWzUUgixOjrG%dkhK6ONe?9>-aLd!zL$q1m!a$ zOotdv{dl3PKM;+o;dS~m80y)qUPO*vrJhfFj53Mm!{Pz~Z!v&s$n?Y4Vy51wf=G&g ze|7G!FfgdA9?3H0czGFFkJUW(A&rN-t+rY^UA3RwN1Q~TstAb#7>$%8Z17iT;+daY z?_-D1hyOIKf60U&XehqwDa{RGikbdCzqumlvS=Lj7cqV%S$1|;S`Uv%h)rf}p9$qp z^#I5X5_BdrKA`y`B_%CzzjXfFJ)UsT`uVkon(yn#32s8hKra_63hX}Y(A8HjNHO4T zS`Un?uIvDKJCz|y>W%#FrvK?_MObiAG)g_350*=+wc)*4S%Gquo(%kW_$j@@iNMQ? zh&nXp?aY_DgzBODR4ljmKTnvpmUoDbed9A> z-0VPhP+(}Lw5V-k>_&)4w%|)vU#EY%B1A(QN-AM);KmqTe}Zgt&g;vhex#oJgo;Xy znUF7FJc!dcjYrj4RIOd$q zPsYy+(CZkNvjF1^ksTj#$V)(BUVuU|F(LYg(}r|CZ$=g`&Q0)Oy%obKCoh6}9^WE8 zE|TBzFXQpFSjg2_R^>PW7~&(U>tO&sX&JAS`jJ;fA+Nb%tR9kp&8u}tyMp8N`}wV4!2(`=+xf!n-pT`p$I zG(}`{V+^k0B`U592Z@wyI@|1Nu^Q^chr9M#o&qickFKK1Y$3S5T|O-NOr1~*!L*XE zvyt)zd*ri0!awMy#BIew48dum@7#~%PXpAn3I^|pT`Sbi6&?=Ys?Tq7S9MQswzyqR z5B5PNtA{zvC5J>wEG++RlY1Ri0ZA3GqPmo2roRpQmsuf0L=AT#HiUGBT26vg$(*af z)n0Q8nDeBeQ=4yC3=zD>YOih6dy}{8feCLR7KzJN!h4p6p%*RJEXhn=4)P_bb@_GW z<-3X(nQnew7Zs@hr~0KoD}jfYe1CuBZV%yi8^-?U{Cn$mV)NaVpu(HL4~G}C!b^>{ z(^k*r4mt0AhpG5I7#tZ{Y&&xeGy6Mz^xCf;m|EaiIG<&hmZg)qHakM?`30DceD$0E?uw}@i-}3*^4^WqlR>YR&L%WUk6bY0HiaQZ< z9%WM>;pY<~axoHAcC;$2#$TGqbBH4m?Kik)Qi8v-*%#wsJUX`P9PumJZrzOFWUZsC zHA}T7w#AR_R?_di$)BO2WKc_Ajd*^EjP0-A+;6wPzH>l)UOT0}i?LaI zQ`2S7hYQMA>Thl-Yr5P~H(pMZ+~%678AnUz`}ai15JU`{zU?o(|2~L_ke6t3dl7KF znU!>h=zLPhZU(lt0^G~YkbX)a0uVz2QBeFOKGGy?q)N3w7vEi7%qF8SAohI zCRa0z#Q!}Krzf_J2UfGxnnF;?7A#sa=b93ji5001)^58vn>@?(?4Z_&Fl0ku{M$f} za}IGHS9$aD(ufOAuS)WZRk73N5HxUAco8U9elz&+H)2ZR_2uwj7?;Qk15+cbWR6bV zq5-jWgXve2@`;sSGum8md-x}}>@rs31(sYJMmd<;3YY318xkCYYFpf)U`}bTRB0qt&S1x!Bd!qHI+*GqjkBaiouhQXNA@Ca}8Vl@YiblwY&RRcZ_=iCuB(ydG1U`xTyq$_t{Az7SVJCX5SDLH@--1pLQg<>t z;FUg7P=}o-PdJou^)i#3{}2u5he3nW%T9ZLS`mY>N5K=6n`w62?n$5|9p0T00JSTHpti`UTloKD>8j(J ze82V-)F}#xl%yaE0@7WhyQDirLb@Af&?(&@EiE7o8{MIVGy_K0Xc)1*kKf#hFIGFUcxoM^B5 zhPjI(T8CYMt7BL_eVxr5yh{lG`iP%X;m3{^!`hQEVk;pJ0|(zyj!Qtp|H80tIokf( z(o)~%id~e6$X{NXnbx0s1Sr$OB;Km*{hL2Sp~Q-0q?XuoHAvI1JaHDKJaIF+&jiN> z_!cb#X8g~0B5ELT4}C-<3SuI5(+%T>EPC7|P?9YVJDLRT28sQfnMhdQ7)>-+~F zjaq(~=Q~vC6(_n@o@<4ANru0C2`&D-xsVvtk@_ zI!WmSc@FD6<6iev!nPOhqXaCR96TbPCQIx0hPj>p9r!8G2@LBo<<7zb-KtrD{b=8c zAJaDI9yz7Zp1j^~`^+3^@$%n(t!ldxM*U<6qj>l-Jkca^T>kYlGC*G*lh+ZJBJ(U`scuhd*;5|A^F z%VYFnU}wmZtGw}>z3WGr1eZ=k1wZ@&>U6YSxWv1FJI&hGry^S zJ{fJau|>hSTAJhZpyOt`WR^}!TG|^s-laoj0UFB%m%c*dl!}`zTeo;)kJ^^2Xx&Pe z+FZNlBrdpC<{-k88y3(fNELg};;96Yo2y$L#gpDk&oa3xvPDQgt7CY;4c(czO12%02{svr<=AXeG*1f_swA09LW|N%Bb}V5_>Fw!Xl0 z#O5rH>tQN@742fR#gm-**qhz+e4M2c$YOmj(z8Pkuz6?vc5Q7Lr=V!37eUURCCl!f zc?I)pqVO}m-pMBah>I(0e^palcy@<3O6`+1OCZ|cgqz%gqJj?sL73M?-@kn}@mWK+ zNH+dWq1VQJXqo8Y+plJlZ%#&)V^tqKzAxqD(_~fuQS9F!1~5YYfS*^FnmXq)5R_ShRybVA`W7@1Qs<6GW zfro_-SacErO2B8u4twpIa#4s1a96R^e+c0GF2DA+o9P#@&MZFB86>xik`f28RdA4D zg~u|IxO#agQ~;=>q6uD(mHkxmiqTPk^?`|1k^`V(ov5~%Ub_@;x2~OwP*VI}{J%-y z3-Q$=QB7c-vLUc#vppCCMGpti8qfdS^uH*ET}X%YoFc14ZbAtvgVjkEA~+;zP2pQd zRRJHgJ^qkEFDJdSms(sUjtF4RSUrv#IrxG{eH*2a+Z~V?IndD7tIz>VMSV(-S>kyY zxz#5Cyqt|j;k8DDB(O(rPE*xmL@cAGqXV#1M3pjLwFoiF3Ls- z^WZGsB6I;1a1t`5JaDMLB{+Ps=W zC;%lA5^Pz+&>0VEVEX(mBz^&Uy;?u!XmC7X%m_Ox(54hmIz#r?zt_=iKP#Xi# zl%F!#MK@!I*14WPMsjzoe1~K!N>JPbfba6XnWwk~pN7eu-3PX*GRDd{duQyq9A+9h z`T2|8Z0${T?P8rI6B#b!LBk{AguOMa(f0yc<}>CQWkvM$%XF*i4JyE|EB@7<+dXJh z7lJmyxW(AZfdDx_#=y(M2YmdA*t2IH=RpbL-XOfk;pMPJ+FI2+HJ9E#xR?!PGi zt0mD*Kg@F9*c6E+_L_|LaS+nK7=)#)_D4qpB7+|gq-)#iW$f4Vp`Pf zpU5JiXY-;jEsz{yfar55^jMSA@8L1D9dusGpY)4sypxvr^kPTVu25XX&$&DsHz%e-i*;Bx`3|9dEAytOdeiVMA4j*M?23mq-ay5aDO>=0{6sAukB& z3FGr@;bYeevle67VykLBb~`(N%Z+zEn>J2J|6SRi&>OubpJ*fdu*jr*xS0P+OHPxQ zl2ngZ&f>+~Y$e>=e!}{|!0PuP$JMgud?xPkO#1yc*{j~MdMtX(3j_Rk+p&ql4ijW^ z!QD>{V;z6$zx~geUK&?1D`;3~P7G)kHyh65yVq%3S?OAD0xZ0n-zXM70+xkrjJKtJ z?q9cWAM{3PXb_k!x6}uCuTIcO+`RxsAv-(b*GElO8exysHIyoCI0KR0U};1qO{6M2 z9{uF|uU^%%F3IXs>LN?16PkmM&|GvQeuVA=bfJc7^0K22h);29ywaXGcCZ&`AU96Fx!#;P%xo4lZ(*XI2GvG zTIK3+6r>T#8$|$GE?UXtRvcnsCS}*>;^XryubMFD&_4qT!pvr02$c{I?(fuxvMVwx zUUbhmsnG~=&(wae^c)y3^g*K=q(9OWY7zImQcAG}pipF6pjb>_?4BddwyCi}9Y=$} zSho43?Ze)md|!8GIb}7&S3aTk)Uk0(F>Wt$6jXHqLJa_-n3Rua%Uww3omt|)7jwP^ zpg`v!BIjyjH07giqK&N^!AJ+lT;L@T3*BPcSk5Q_uw;j}cMsQAw9k+ozacm0XH=o< zpDnfR<|8mCQ%I#pRFS)-=KdfM|64J2?I{3Fe(Bd6;>?%9K4_}OG&mg>ng?c)32A2Fxa?1)7zG*+xN zkS3%wvD%FFsUkzy0C9_64ISb8@qW^*-n^+tYgU;qT%*Ov@NjjWlG1HMo;#>ncP> zMh3P&Kep1Kt=FnSQ_(5cOZrU@`R3Z%0=69|Nn4vyHFIlkkBWxEmWI}mhUT|CQF-q& zbv<}gTal#O0bj0b{1pjF5-{znos>jcmPF`k=YZdEx;d_OeI#mq70)aupP)V4=x!eV z&Ube_S1hO zuZ~|RG>0+!WwGx6Ienlkta>0)JWmBej5KGXD0&K~~xuz5n!0@Sp z3i%v`DYA$6;2YNUHT`#At6X||HVR`@ZGbIJL7s;fJGpPwB^^YuvzlK1 z=}jN1HBPFoRDfz1J<~y;hYO|y*J0W~c^GzdLJhylj}18a^Mn?=l%Ph_b~xqd08FU= zle}9&u_ikcYwPtvi;C4m_M+caAET@qwb*z79b)Czb7kC)sfh;e!5J-^=IPC>h>&Vk z&FPs&$HivtG9AGz)hN!E!YlcrYZ5|9dr!aH&R_sa93U3!>gob8L37o?7D2hE6s;$U z-A`mKtgNgYRUJK5V96$-ie*gX0rI1CNl<%l#Fw6j&kHD`3SBdJ8RGylR-N z{<)p~G8VfF&&g@EdRkq*n(cIhY^a~I3fl%HU7uqoOmBe&_;Fqg0)=S!0rLE}Tl=55 zRqsndM8~ytQ|t0d53O6&Xb;H!kJurG$;fnFl!(ms$)+&I0TKR83VHhWfd;3HLt3hB zTB>hu-EL2ECB&#@t#a_aLgTwydV8l`Og@u+29>TrP|eH%D+$=&~I_`O=&^rdHIN)=2AJcFvQpIH1OlOobHD zMU4gB_SI`2HZ#TCirVr6L5kp( z)L+W9#akvIZq3=s{^3pn3!NL6?>mRz>?yBZLoce=n+)z|KmZR#(7Uh+tT$s$xcz`W z3ihTW^fvAi+Umaryo0{6CRf#xr(z*Q$oqJ#9;If~x`i=Vjc6{mK0NKeL?CJpE_C@; zDV-mst4~&mUGE$!uT{Wnc7BntJ?IT{5#xInc(oS{oe2PF6--UzAM75yH8)on{U@oM z+ne>ex>r!lR`2-=BMe=p8+ddUYgb1>P6dP2eT#ISJGQhr$Oj=snmlr_FTlPSq6v$g1(gCf)_ncD9_bZ

UfrygvQN#J-;JGLSYBS${j5zoW^S=*3(FZVWCMil(`p$2VkyWKn+tvXHugMI(0BcmULrsBu zA>C;=?lNh7`g2I&&851$OxTA+lIqHA(4!m#436f`&}s6RzG7kBEj_s2n?|wF11N|Z zM?av;YiihYVKu4)MOUFEXt~ewOT0bRONN#e>%-g@pL~aK1fXNJE2C%wy_H~#oq}pV zCX1%&gYM&SS9zXF2=-R=J8(lk?j9qseo1KrjnEo7=^KLmB$0h+u~ z)gQRR35jq!Z0jWRd>Q3Uk8|W1mfu?g|J%;DwGM6L_lPb?krp7OLu1ipYM$Lwakp?$ zp4Q{sn%!%0`kK*Heb{AoONJ-Uh3`uq8a&m@E@oE~ziiXW5V;sQW^i=+BFFLI(Q832 zlB%UvtFE~xN@S#lJa<`B&}xL0HK`gUlW@!~oIWT!@M_ySCjiI;Y41M*W(ZP|Ms;mY zHqgyORE3?MR{F&sD8@rO`+6k_uZQw)@@d=l>6I#ebakXP8C$|iD8XV~Mdj*oz1XfI z&^ggNi!&ARvh{v{Zj^X}{A!Vv025@_Ka+W%f}KfBNpor-=jD>z8W%C8dTj@@d>Vza zEuZ=IbMI(cZDCRC$j^=vewHy!kYzXD?8?bN@h@GWt}OJk>onQI*I)hi<1T2UV^n@$ zwE21~D1wk^w_&Nl{wVlNWz3UGnGRb9@vFF4iYFTJ$mV_@RQ=-HsT}!c7*}sq6OQsd zVNdKn_I4xE7{l+Fbu4;~M2=x@Qg$I|Iq1=~*QUOi>@51Av}xzn;^HFox&aC>0N)}w z)PalXAG=U#DQu39N{N|(wQP2J+^gAo4anHd%ii%LmTDoSaINwP!ogbozbT}xb-KQ} zUFT`9&Zw6b<0kh=+@7uUt8<*jG%;KH`@b&dNOo#4Qo}n}r}sg}dbXJ`d=RVRQAkn^ zp-*&+?fLN(F~r<0a1lc9t6#IVbRb~mE|jSCF}bH8soT((`(@A#MwBI6)D;tK;Xt_? z;l@3zmPKs<|DjqI+$8{5j=S#=x)A#>3FE(UW1MZf==sVSMCe*RS6?GA zXaw03qr2V&s5mtgmBu;BCY+EN&UkerkzwK|GaDm*IGDHa3O(C~`L0qtQk$+qF-mc2W(xQO5-QD$P6>St2YcI*qO`4YNQw&z=Mx`CRVV=Tg$Pd^PFk5Ry` zH=r7SixQ_k1bMd_cLk}5I(Hw%)~m4tPaV( z@d+HhzCp)K3q4qkNY5jtV|aMC!^Ta2rMc$Idaxi(?NJ1$2tU_RLYMWIBk8MPhqAK9 zsv>)9!!bCm!0bN3qg3L#w>dVY=(YH{S@z7Q5rv7s7$010*45pahg^+gl=q8Xy7{7k zw!f2ye&q=L!!@0SaiAlFGObzOY*Aa9(!>0&NryvyQHLLX@{HkJzW@{s)Z^|bik*D% zJ-C@igc7gWL5|`sju_mp7KW=%Ub|OF%UoBZ`<5X5z@b1PO_n64U_&EKLD{~SBEiHD$De&2&V76P* zO86a+|EG;(sWs{8!%5|}4oke(#V`AWLt)oV*D6@QRO6}}wjH;k$3)JN&sg5+PkpBi5qoJ%-r|wOu#W>bpX&Oo;bX!OgO>0+L zZOr!qCcn3RLhZ6X*i<_Qbj}h!pTbdpav>axw5b^RiM!EE&N0oeZy_Sy$Iw`QQ%=@4%rZ;XEdl{O-rY?W`>P(%^S=3& zjpcSV?-TZ%*5AS_h@U_A7^||p4-V5Hqb(24CH_E?Ypgq3JEPO*A4H}II7>o?g|T!lQ6SB zeaOh39S4pfp}k#@vaGi|*U?TYC@lp@YPaXmlPAvy(}R0e!(!V-GY@D+$mMJJLH%s(=XzCJmQ z%)i^hi?C`seYx(htBXwuY&f|Hyj?}?iTrG-b$XrMh>@ej4}o2-#X#qws};FBuN+LU z{9b$5lYPJUC6rtbotoy~t^Ncasg8AKJu5Ogw!t2fKwS%*PF4D-qF_t2&~3R2CVM7! zWvrk7>l@hvJ<$I6Ab` zG~j$Hhf4I~Z@FLvI|mS@pWk*>cKw-rE12RUQ(0Ejelq!aLyulNDWkf@fr%=-Qw;o{ zA{PEOjze#t7s@Y!zI}t2S1%Pkd)ld2|CiNdYm|pRNb)n@^+iJ{ninHNla_WS`@^=p z6@>~suc9?gf16;J<@!F7Wg6Mv4tAM>u7yLd1}!cs?DZ~YXalbsTG+Z}@`2u&%_}B% zV8-Pza9F?1)(_R5cJw<1rLP~qels4Mt*@(T0OS@qsM(KP{*L;$;kkdzZlZyequl

<^_`BCMjzxky`B z9f-l4^7NAZ-{Ww53DdyT>s>2b<%W?-N~K(g;apUM7S z+3;|qEmSY5nwt9dbfoR3++gHu9SJyhTW>Ws8*yeZ-CjugL`Y${KJ`~Tx$b1#wz_Xv z5pn=UD*{YX9F@E}Ygm|~X;Pivxz__h1RNXEF5Vmt21Ad^bX9+O+Uw1MTj>7N>7fK& zliJSzS=$VqT(?ys_!m z3tludzVcJy#kyQZxpJ~8Ym#efu%>HdXKV*n*3B19R5A|+G{RgR6awo<{CgN3H3}h+ zKup~zzod5Dx1?afShjNJ(VvA(Z5*6u-P`piNX_kXj*{`59NUlmCnBvs726v*y3S)S zD)Xb@fB#%hqDPeoT3`%VDLloA5k1Niky$=B)8E|BjsRre3A~a-$NFz>_(Jb6q+q4%Kk9((P7j#~D zYx?A>5Vn|_KNf+5Em+(&4*T$Z?Yq6>*@ zb=;=n)F7%KLjX|sIMP=o0@rLB)YnQX&E$K7A4og6U*JpC>ny@vbJ|n#EcuRQAF3bx1K0vlRH1zvw&`i4wX(LT)kEHI@PkK*V=Q zGon(d_+~#f!XikqmWn-lOB~FbyfL!bP?6LPS^Onwd1K8u1|N}G@90p*JYZT;3F0c<3E-p6rR_?j_p zhFvuiUY0jgH0{e;q{dz$2OeP{O4%&+m^~O{Et?LjjVzj6*VE2ysBODAHsT$!zgajA zPJyS=w-hpSBWITNuKJ9PZ=p3Go5aDAKOFqW$-<3~{&o(0*u9)AG&-w`fhoa{Q=`si z)3)K!#gnl-I$4xQCWoqLSJqHaAHzOKFiW-#lDORh1csnH{0r@k@ z{+>LiFnmN{`8d|>ABAZ-06Na1t=-ov8%rhVy7ik54j}|z;(QioAToFI`+aX|#a+D2 z!3l20XE5xy?M4U?|15bf;03;q*t(Fl&-ma z&LgM7sCi_QnwDnoV4sqj${yn7jH-qaN1Poj!K@GSbC{+&zWmew(y&UU(Dq3~%soma zSJ%w*eCn@Bf|Z$%cSWF^X;#P!+PND&sVN9b@Gl=HFV{0wlYHowGCdl&D#KS8v#%zl zb`?*8BYpY?R;u&K+?0d9O?g_{6XeN;{f13L0?hqOU(b&SK)8iCJven=kr#9SPV+v#T%M_Ke)RcU zc1ip8Q&eGS>42-YrIisyp_7AgLX2nL05MX8x^$H0mcwcSzRkoy2)IMBZnB5y&WMnt zo~CWv1*+Y>dvaep(kyT+Qn;Qr3>+p+se}yeWW5#IFHW9>Ei7?!QOkmZ6H2CA?$cIQ zaT(`xcrJ|(j5p84a|h(7ep^pMht5BE{kmB35db&>HdfZ+?2X9lMjipQ@XD`CO6i&O z+wOlRjF!!x+IAPpVu}FM$?M($HPFhuo116fo2}h!|BREhzYoBq+1J)Je;@ua^-<?so|8U%$qQtt`(-jd|>So1=$tFUw4*VjZ;` zKFOp!%BG+}Ah>6^#AY{h-c=<(9_>y*%r!xV{MC}I>wDcyFWT3=^fWWQe17;2MC2_9 z#qI+0l6xJTmU34iny>*`KPzLFkkZWl_PL}n=#8N^pakr_pkX5!;+dM^b*hX>t;-2v9fC{8$aY$d4 zGG}GhM}g?(5GpSrtu~_^=+&`4d{1c6ubS6gwg78`e*vPvyye07W)+CLS)}BeF+;n-?d~ob*KpG<^elCX+B=^N)t; zq(+1$a;o%?4M(qf`#n%&R#Z$3?B@79#neBpSmO!0C!=(ytmYU1TyU(NDhIgNTlpUq zezx&$=bshkg%#%)T7EeEjlmJi8MCcpGldxunnhd+c7Akjyp{Iz3!)49<~MswnXGMX zxA;p6L{+nLvoux> zTr%eSQdb6irhLz_n^Irr8~;(Q~!C!*tc@u|lT65x=UkT~(WjpI4i( z@Z2HVTV*B^SGOwL5-eC4QgE1{T9GG;2aIHdmQ0^{92v;#gIz~F|Vdn%)$ zfvfEZV*x=vp(|bJnXa+1u})c%LKzFU26A<#P!s1XunYj=y*5E?!3MhTGdo<1!7&8b zJ~RX91clw2fnYk*zctc7V=esH7s)Py;J*H<`BC)v7ID)vb#${78{IIi6Tl&I2SgtIs&7>xML|%Q7tTOC-9$A{ zDLKt(I2kSVG6Cwpv)Xct>`@Dr({pA0Nj043+%~dJG|t&#B6ejC#U?c%0 zOaafpYJEWamP`z~y7mj$iTr_13ZSmc7_$vZFAlUvG>k&o?}%e2TM)aqy^z^PddHgY zTOZL^?6=Q)F#8P`wEL8?PqLRF-e;6c+6@~eDbPLZK>ygJkzeG10$pg!qbh}-D+Ddz zQzr~KxBf-N2g!L&uxo0lMU53G+4AUyZAKnR9yORO+7UUfX4x7uU1GhBUoBovaJMOS zu4r*pm`sp5j5Ffr?)JVlK3O;fXSDdUxFkQnX3nWLLndugz*`tak@IfwHcY}7u2xbaC`qL5a_$OZo;~bPP_j}E%xw2bZnCe^yxxN9(35@ z!G3!M{W$i6_*nA$2-N!M*_8kK1=gkoA0!@grK$zHb9|nDhPY&`6E>ltp~;Ras-d_K zawb*#-dtT*!MVm0S)Tfz_8{d^AfaYLJb{W*0QIJu;wz25zq`j&W)b!V(VC_0LH+D2 zVb(I)JcWskQ|opQpZ=Ak*KST$TWbL&o7Kr&T>>fzpAzJzM5gvigI^`+E=QWODK>np z*D!hC4R<#Tp+WQEY--ook*zS|#5uS^5 zwYd|=3VcjtfbfC3i{K>9p;(NH>Kd9kHs9;LDsR-ME+JUI`_$N>BtD(u_Ar3px#W$U z`IG5Orw(Ej||>sNw9~SB}{8?x}FZpfk4*Qr0zE^mwPJO9kOGLe4B8O4cD3~ zmuF|x*jv1@*~&=LB;C4}9}eK71(iKkW-6Uh;z-`R zHw>WgZ)!+Fo7uH0<)Mq(|v{#tl z-IOVzMEvUV~&Zx?iy6YulqShYaOs_4!^4;jft}J#zz4vb7{**Ln zZE!lef(P?HJ?sfeCCUB1Q%-pmnc9(Nm9+xs@ifm9E%hW@b*=F(blG#?OtLj*LD z9o@joKSR*dhDG>EANJA@wRzC1b^hnGRKKGReOhMju9dWA*|YAbBXV~b*pe#^AXq*# zX&PU{6GK$zw&hW%|EP7_y$JgJgeKPtx=uFjWLE(`J2w)79vF#Gtj|{K1CrPCatx6G zKlM><*5?5AmRh!`zOfu<{(pJbaN&(=<$g$(QI>C3uMwF$F9m(ddNKuRs=mI!&w2Ae zEDywLb%H%;vw%)>Pi}cMjVLPgt=Jz-PA-f;z2rJ$>%((6Q#vmdD_h^C6k=nSPUs;5 zebR7t_dhR&-)`(0kBpc!M7WF@U5>!wg6$pO1k1*r6!TYA%X{t98m|gQGJU9g6RtTh z4ENhquNZndMrOKgZ$SGf@aCC`b=jEc<$#SmmGpJGT}t9?@lF}Z%IOoA99!d<%I~Nv zHC+5UZ)4PxsHD2_%u;L)lFT;qd@-7r_!A3W90W3P-5~mPnx$kmJ_LmsPShqc-M7bd ztgkrp5rBR_M4zKs?x|o)Mizl}(c)4_qsS?lH9MIH4oIErlV+S3f5-tHCGAt;D*Q{R%JIO=4R$wr(@={^Sx%4kdc`ugj%#UbHt%9PP3C zirc>fUlS4%@bK_Rl9Ir5uC>n|{);=!77~?(@^_nGfQLyy|5L(zEmt#hP`j2QC0ZF! z0@Lctz=Lu6aO~;!Te;pMHPrwrj|g>N7cp1hTh@XbbKOI|JFR=K>KZq|GN5mJ01XkI zZ#i9Y<~eG4pqeHv(YJPgMT7JbTJ5!)JU8g)WA)BCE%B8xZ<#F?*)ItGtWBKJTDj|? z9hzrfQsV99tlXyh6G^zBV_0{(Iam5gn`q+ISu;^k)*ZFFlirtBYP?!l%pW%;w?Vhs zNcd#~>aPzKFes_z$(lWR$T%4htc+O->MW)x^_tejI*heXZI_qcuA^DxdaW{FH+>*S z9!;KA)z((-1lRZWHB5zzcxggvqsH*Tx$kp1ApHEioKKw_g_%pUYVOkPPEq16n?pqJ z5#O;TEgbPsak|LPNXt%7Pv0@A4M{8K0)hu*rM&fch`D@xcm4XmSOO5Kxd&Vl&$j>R z6SI*QxOAsmIcZ1wBhb^S6wfK`fgwF7De$e%Nfm1Tibc(-jV`lIDh|6mlyi-KYqw~1 zFR-=#fsYE@L`#M+gsL7OVa0>gI%IVtB3n*viv!$L4t;e&_vwUQ-!qQgILYxhIMndV zSGfuzoAk_SIcWZ*#V0eTt?O}nk3JOETDd>*9vR4s-(!t4B2@3ne%|OVZhBsw#K5MpnTt>z-s?hzOq; zD8ql*A)mZw0*QAq=&L({DV8Ws|F%$1Lu(qE&O<13Fl&r#GD`-AXWNKi(9QLj=-EX> zg%a#XHa{*P1Vrtz=@`zHZSrE1IRt_p>?^hODvXs|D9-V;4misRKu+;rhm4J}4j0!1X@mkTi9$@zc`v0F&YfID zfCoaJr}}hY^y>GMhp_S48^%Km?L}+eSrT+ntS-t0Q=F0$*w(Us`?3lqvez zf4?(=OTv!IGSJT(w>KLUif}zMoW@kVONL1W0(El z2ObD?=bLwjb+tnH4|DQfi`|-JAQYe>Ck81G#Ij{I@rYuSujdRJAezjWo&hPhftj&= zUp6x@wjSXC@iD27MOvw0GDcQPgb6wMvgD)D~vau z&3*XjGK)k z`;1KP9`rN4D~bD58YC(H@E-2BLR>m&%erTpu1FQaWN5mP7pElQm$Drux-}uCR{;7N zApo}U2`qv8w1-f<9j~=LVEl25Pzb4o4Y*aQ0_NM2nKx# zs|W+4O{5Do{To3sq014A>x+6%>}9|1?QsnOZq$#^Fd%m`J3aJOz>zc~C{u5c26cZYAJ)#`eK< zU+U!jS7v~8mer7+_%2BB&q4Xz^xW)0ea3fAn1JtlSH7|!*e}=wi#RURJ+MYws__k` z>BL8K29me^hm z{ziA1*W|YOSp*~AihorH3DP8AG5U?YU1N}&s@+$ylIMIT|4)(J{5*H5DN_mg?*4a2 zcX)T3Hv>^x<9bbq#+Nti-+nmPbVgDYgv5;j)oi*%Gx4X=%7y`9;$ci|Z)J zlsnfSYt2_!{g9bBf~~ksY~See*&CoVK}<4vnsSk|2feZ_N_=$kcX%H2QqdqFJj+h_ z_R=D9b@KK*`Pmb+!$te}PW=ZQ5~9hfr;l@@lTO(^J>0#iML4+9`nNlj+Gg)PO>})l zqEHmC+TK?O*tSxk7UHnU=>k7R+VJrjVyN`D&x+0h!gy*~A#mF_*WPLEhZvN1>Xe-GwZ8>Ao($tzK4K*<aKO-bp)&&xAeq*tu*mqL16^$ZR zvAUi@uejBeBEusCUM;=V_tsEOjDKT{qihghBaSP6kKP5f>4xhge&V zy<#lf@n{j6YZk%hp6selv$)+{q835b3TIlmJt5%KsYM@pYt2oK*Uuu|-{@5JIDQsX zd~=Uj5?m5DB&&vR@|-O_+u>m0o$+%m+$g2EOX>5&&v9wtpT)Jk+L5FxyqNDe=c++u z#ie!znXm6ihS~54LdX?LHM9%oEmzv59^sAvmx&q$UP*UH?hvg}le@VsYrF~$h%jik zjMBuUb{iezQD=hPcj^lQWP5SVPD6PB=Md$~QM3u6bG}J0ouiJfVE1&|`|)E#Fh;{* zvaYKuz{yC&&DEVFQ^f_$>y3l_6RbP!xLsPlA{e7CloA2`?RAuo$zGZr@D{@ZEfdZe zAdcte-uf8`X_v#Ny;c_1vO?lS30`=VmM@-W4F8aHCSHc-tB!LNw-QgiLDDBYKxlYDE z7eifN#tm&+Xg-&G`ho5X&0RuC=`(r%^VQ)=o@4CXUr~0gmMoD1kdB|o6sfuEir;3R zTnjU^9dla+iT2d(M2eNwMTm%_!diIxXs<76XKQ|?@G_pfvy0Wi&FPxJ;q6uLzMmuw zBf*_d62)bTow`;_BK+KoChy*@%TfhVF_3yKx-HIoYWa+W5xvb(vFg`;Q1Xqa+@d`4 zU@>dgWZ&M-J@rK{?)alB2gh=Vh`#)0tzg!rj$_fiYB+!iQ`NiB`U<(e&%uIv^4?gSG`#P{ii< zgpS9U^=7?9if>j6dECsZGxZzN=(?>I8<40SDVEXJxqTP3MSd}&LI-w;sHtp*&F!Df z?nQ1Vc71R>*T!{FRos?ly(yN1>||yAl6SSXRGH|9Uyw9MiE}aoiCy*gZN6!ZQ<8r4i3lvH$YG5?bBU~;v1c}PH@Nc& zls0!aSFx7$y$ZkB%5{u}^0&1Me(X5QhzPrD*p6+k9`pL7q!4Y|2npx1Kp5ydNK*CO z-T}N1oDJXY()vk>WT2DXG-4jTw{c~4C%4?-=5M&v zzeYgwRJ6<-hLwl^N1F$#Y?B$5_+Lr9v z{u(QC+H=$N2)X7m+d%)_iLqZ*arf>#yUACw#;$gMw&=R0w4=EA zI`ccS9Lsx(meyuSp-|(EM#(p6Q=EM)j|x(ciB|83;|@A1t%sP&L7AyK5<#Ez$jcr) zDUfDQc9+_e)G15*nn)!6v588+Kg-TDs1EdkQVp&69F%V($xGJHNuY~FmN*2}hq)K|zNxfF)*xvnCfX_e+483O|8h%{A5QgM0qWBALVG9?e|g z1Da1qsE>JDvPfynzUu#%fdmkc5GsmX7@A(anfm>(a8|>Gr)CBMSriub@pP5$^IGcY z3<~1j=6=@YL4)A0KhUQveMb0&(0~D5X6QLm_^dDFI)rSqrZBz;d9?LD0ls?XVaPwt zF9ouc@XvL-tq&m$Cv%AS^1?lB1bUeOFR=Kk@vVQea#+Q*>53!&u9usGZeOnv^N~6B zRn-&*eP)>BxS!}v1NubR&`=@#uAllj%+>0B(7zM*FaMzt`JdMlf6CP~{8?7Fu(LyY zXK(-w1K0CA+XyWMAiAur~B?UZFjdD6PK#4BQ832-Hm6lmKW7~pamU}$kc z;@}qrgm=JqxCZJsBIbln`ke)u)z{f9-y`FGEgCLvS7-Q0E8#wSOo#Hk-%=(?im$FX z^hOHwSE~V>S39&xc!!P{^a;0naZ>X6-LM~Vs$;Y;|DDC7CJ_>9QlO!9qQ?rHcwX;w z_b;K-^zWW3?w?2S<#-5XJ~w#W_hof&t^aOZy^ntqANPYFqlziOo9WyI=Q4yrb_9Wy zT_$5zV&1pf!^M$k`|2%@G?aHs>F$6eTX8brKc;+ctQ43D?ttl225NGfVNRZ0@A_rj zp5q|hIpKU-rrXYG(hotOUi=!2IBR}(1(sAGgu89np!Pl#X|evJS63rB*00;DopTZ! ze`iLRsi|~ibe_>i(CUZoJVOA7$)^rz1!#8>3dT@>H~-&hc=6w9xLn)0r~ZFTeRW(^ zZ5Qo;#0Ux_{Q}b6f~4dC(#_D_(%qc`(%pgr($d|XgQRqWba&n3`+oO#@86u6;XLO# z&)#e8wb#aA;QF=8L7#|Mm?wabhgt<~9bQJv?IKTQ9lewoA*r6VE5{cr5sg<=${(pZ zF-*i2%Va`p5$+CwBB-FElKbNbgX#J_TVm-Q^XMYj0}*LG1YlM^+fIW6n|bt^F=IF) zR-(ECOlbbl9;tXbMyv{VZtjYgl8|2|XSDeZiUUNM>(yygsgyX2S|VX!a@}FMQH;l* z*N$B?HT9L1-TMyoG|f_#3>P0Oqpi|#5m9+iyuel1OZX% zelDzO8Cxr%ZKLP3mx4`dmVH+l5bYuFVLx0;#K|Y7owHHP2yZ6HB9+=&)Qe2pAVbC} zNps#DGD=J*(!r}v3&OFlnrp2rE;eVH;m(J)szMGvK$9>Kgr&)7c}rbmrk&pHn4vORYLqmclr>$ns&_5%$N(AX&G^)!w@Dp$`oiM!t+ktZSoaq?YE}4-*-q7N zaA#2>mNbGt;0pMiQ2)mmVPkAMFW*OsQ>gflrZL0nGl=nj1P7WhIDDttPKU3|_(66d zJ*qa!H-pl^4LCmh*6J0aF&5t;V~~gAlLfEI7XPqHoS^=cq)Z4mjclT^Z=v|i*N51@ z#R+Q-ffi%8q{=@Y&KXyc9evt8C@)6;{nj_5n`!WuwWYno(Uk(0qepQjN&TE>_YSx- zb}}e6|9^xsD#j%2pBTJUMJRo+e~`83!^w<+KLO4@5=hTc^TC!rdCeUOh?I!YJF;p{ z9fiN;Vj-m^C}w@dm@m;{H=U3P7t2LcXaxxfuh62V@j_nkBt+?u?0;z0C_XB+IQczF z%tj~PQdC{A%jBhf$`NNdA6yw&>QI)Me!MDJg-2!`V+Kh(q!!ocf#jh$2ro4hK-U8w^|ei1P~$DHsHrIJ4ma* zpeXzr%0>=>25jIcS)U=dHyGZJ^<7cxBT?%rY*vfwR!dfiCu=(VFYWCBcKhiG{cfKm z6EBXGd|}l;FlNip4hIZzG>wrg5x0^R>hsb(K^};IDo&LagW7wBGDoMa=++Lt5-Ad; zz*zN71NE;}W3$soE`t6>bQ{OEO(AKhl@J6KLgf# z9)8DF7G;*4Z`EDdrsyI~eZ@iCyyA4$8Y3Z%+_sK>8<38IE%7=FBuu~KsA_F@M39m5 z)HzuHt@4UF7md4clh?!XljZ!c&BK+ZmAz74(&Wa_-lkQ+0G733D^++`=Yr zmaqYRCuj%o0Kee=WknA3Q%Q6(tzN30g2f$opP$x!-N=?l0o( za`>4Xx@JQ0vPI{VPd^mK_`2u!+f>#eF_u+*p|=G}_IvHl24Cyvw{)?KIlyiF+?U$N z-U{7)vs^-#ck^Xiw_W*Zr*bZGwIo7F&Rn?9hxI^rijmlx-u#^ah!Iq!*SjjUo*@Em zd0brj_~SX=k^s_`C3ncq3IcuXI`{KD=ss652C}Hf`U>xmB+mcp2B@zRN(eAtPj@)& z9z4bE9$$U=LOl_lS4`NzG%F0|0V78{Wo+TqZ!L*W^!7A_i8-6jJ=Y zOSn&rc#_%aUfXyP>96d6lU|@JG~4DXlfOa<)q$E-SLgGonyOGCb=HoneNHI}f_bD& z_EAQ2&jvU7o_;zDVtm0-{dkVvBBh+kbzGgnJr#IcFJUGYH|v~*_y$oJfn4~6uhr!V z?YL!LOGVunMui0C2>6#%fNtJ?^b^nT)|}yfUhPaNIVOkqvffSE)mz%dnlMW{^8nHkOMb{n4rbm(OVKv zOuESO&mi<@n8?kh*d|`Q@|V7np&JIv$WsRC)3jadxd6yt#GXF|KvGh(6lsZO1T;#e z&Q0Rk&qs|LDLVyve{`PDo3rl+Z4}^Cc{{*l#_a;xb(j}ydc3+9>h|LZ_+<^v{|~gD z+Jnu}SNOfQSXp1a>Q%mDCZ@-GovBnCF1Gz=>VYFF*RRg2yZnwJ*`)*%>c1%y`50;5 zT2-}-T@$I6rVLaM6yu1|#2~RydVak954a@5N?^^CrRJS<5e#8S=e2Jov(p58 zLfHk4`qySixA; z)45xYu@ga!D5m6~WsQqbn0BXg^s#HuHqyWVN*)Y?29te{#U82aC{P@rb_BCI{Y63= zjUsQMkyOuJM?39qM?K+{oxcVU!(!oPer?8Y$xRFlxk{3;LNcY2Z4*9IeAZ3ke7;L! zB_(Nc^maCQUj>k69)`Luk1C^Yy|kL4@|Ylh37zWp^^|Pi)2K-E=%@=g)%KfUgywVF zI>ATf>pshr6wv|W%BO@Ky~hQP{xP{ImhV^;a8&ED*;$*xoMzv`SZb|u>53-laC(9GGS(>%+AyPaC45O$;4ieMvx&Tkvt}GCRL-6RV(wV zQYnp|Fa#w1ULc2|*GzdMs50)C4vKJ($+Q;U;4) z^R;T3XqDF_ZZoBs+e|tnS(qA?JS=w7#JA1ZCu+Dpw}3o(SQvzqZFi>TtTugP?Hac+ zyi`H)`2*p?LBuCfIiI}PAW+jt@?lijG)o23XF$ES3INPBq)+B5UTVx|pPkammSCLX zxu>F3nqd&#PzbcjXPbtCTN{d=6ST!G-8LVhG*3L>`%`G;a#ab~d z6z_KaFCIb!@DRU+WAW7rM%MF2^`rPB3&o@Xly#*4X-+%L?|6D?VV@yQ)E^dtNOhL? z`NqihqBJ?P66L|H5Eko>1nQ;c#2y@lZ+68g%NP0^lYowFl7=1l`}YYX&Nbf}3nL)K zE`Mv1rq4z32T@3^mir3Z+FnS}cj9zqM5f5>3M)Y>@`HA1%fEhq@Z(;T6ngqe+=~Vs zI7iffipScRe|@s&2cB`n1|~KIg+Tt zkyOVv`^yV6KHcit``UuOpkP8Ye+;$;N=!+4#7mF$ zVHAPf2w}MQMFk7SQqm8tP({yAH01c@1=SB{8hgbaOE+1QF9!3jdI*`7BOnjq(do4a*9q65c2_aIX!%~vvt$=R zal0GX=M~OeKWCaPYh&yOuxjd$J}s(*T$B$CUiZc5%S)VZfsa6dSKzY=di4E(3B%{S zUZYA4rlwq6PF$5P%8Rsj(YX*|k*Wr{?XUH1lPdMCm7lWuOLuSDI6%tM9KBuMG@pbG zxYA5C3r4m-|2;I^g6P11)FeLq3u6q}$~P*fV4Oh@_7=YH6a18Iw&=bRi!Cs=SUn!m z$~F%r*2H`?JL;!k{=L1t@$@e|JGa4j~7x)}l#u za)y14-7Rs$q!Ek%ZXgbT3r|OT3x2}#tDA#}aL7ir(jXl&NP4W*>gu3F7($gmS1Mb> z0Dcd>!#88U_5RVuf>Vr#6!0H=(7;%##t1(TeoguxmvGc$TvP%~h{uoy3pO?}Y(m8T zy!39?C|A@7MFk<{O09xGyoWUSn0@gnJH`up3>q)=UjNNVRq}6ixDKo4sX8t30fSF^ zMQKp(rsoG37~qBbgQuHAAQV!{9mf^L#e7b87js2%SH+giMu8CDP%+^FO*yrN={vDw zzuO41Je;2DT~SK)_>hOD;-R4-X%Wb)ZUb2K-*p}CH>*eIR}gw(+yCFvF48jD;%)?B zG2#qo!;k@;2{9scV^Fr~JF_kb0jweSY;m7{Sc?ntfANjO&$Zl-E2~*BPO{wH($i4J zCRV&c3zzJBO)5($ZTxv{#GG}x9=?v9(zdasNbl^}Cs7c>SDgI@0>++59K~(CRokz@ z!qMmM{JKlJ_t(cl9K7?ouzhyTMZA)LQ}dlw^8CbQr=R6SCCp-AYP3dx9O{=qv1T#1 z(zYTa0OimZ0n8|WE(H26m)MB%@I{w3mlHoc6cgCke|HQ(=6pewmY;-mpJ*rgwV7|Q z+V11=00N!|1)rVAaFaT3k)VYubA4Z5b05=NxIk1cymrQS4Zfl?f}4W%i;X0j#;Mpg zA&t==7KkWJ*Gr%%Ui9acRta&@_jE=A!UEF$!o=dniN)|!6{lbVrh_|O*7)#gOWk(g z?qs=)F@2urU&1lnCUf)SmPsbkVBI*;(-;}uW;=)&xT{60iJkk=b|vK2c|D|z)!EJE zLzeu=7uY|q`i#+o6_ZV)H>5-L##Kw0s`)j0*FNhB@CHEEJxHLEsjdH4!PE)F&=<%E zVDdX@w+*=6A(lt9goV1#bo+esZBCm9RmmjXF-7jjC0&I3g0a*srd<#tYQqHQTQgY3OJfuoQHF*6jo5(ny3+H@(Y9wqm;;M zwgOgKEo4*E)2$DCB^*vpg@qq^gWZGcxM~<$Kp?GudQYo!KL~>&(`P<>xHW_CCSZ5x z4vkOVUrlqr2M2!L=NqN~2+y1K>-)lj0_6~IrVO*Dz2ZDWoe7uV*j^(95S!9prXC2K zjt0OVL`7~DSwm+3xp*UufD{~fmKgN{gm|LKGi{599LCi8TWjG1>Eq!GbGI4T(?eak z?q!mV;B4C5&weVq(lc_Tl@|6ZeT*z@o+JIEg$HNjKWPQ&&rW-eXX(u#=&n(D>SU@; zRRmd2WQ%tg!%K4iKbF+yny%MC(L>79bEWO}_V(EKxS90w`eSvPB+aGHJzvV`7^N6J zLSmyvgWL`pITX~P_1Kgm zM*EDIOc1|sCihxM_nBDI1S&4x-UkK@LJ$5gL zi%EI$ZQhnGS-t;aEdE*%joiDE{02L#_W9;pq-Z$Pv)H~0m*&*V!Nm7*8)W{PFw*dR z2J7uPRXh-c6$iEJlrM!DraU}Li=H1?4osio)+n?j*iX%ZYn8!>K_3fv+&l!i{B5zJ z;aCyx-o1bIhw@klh9j<-JiJW{fx;#udMP?n{eGjXO4Ij+_U|ve zav5AegUGrVkE5mcpVl_?iqg5BDpSrsIXS#M?u~v9eOrzK&kyHZ8HZ1kJp{&3y8M5| z&_MC$WoX%N^*6!ePUI`zO+Ps{WpJh$D=&637@mH(tslEgALi&hl&J)X%Yr}&_y3w3ks^zv0wn&${o{h$!4I^Ma%bm@i;I!5F&r7omk>!# zHA*|=El@2zOGL?^6WUVvTE=RC*PfHzU zGqCk89BVl99u+&DY17k;2J_Vw3CZ3Dj>l|yA>W7Q&3h+P?QC6(bTx(~aGxpa1=yj5 z>?d2!zrmQ_kWJSG_YE(V(@&u|uL8U@u_INUOPw+}%S*KYAx={GpU{Eav!t~=S7*LE zBcGG-97}q{V-7|?U&F71`!`Xo8BfooS%Mryx8CO6F)p07ce)ATBXeoJ36qi3FPtEJ zxKf*q`ab89c09hKeAI7KoVR%$I0G^VhB1B}#hR>57CzA9xF^W5Ykw7Na`zY`+x(?R zM;6Qz9Rz+OZRk;?!esRDxgp*^7y`~6HQPVfv$U}I{nN1f;J}(9jt(zUmb68;(or(m zG-VRExEuos5ib%MYC(YV;R$ZQmQBDDy-;Z^0ob4BE1MVgZDXDK94;N=PP_JwuQYgwrUlUfUgX%-&WNB8JJ2^a`OTFpF;@fPU%ZP9EP0>CZRQ~gVgM=F@3WS6M;GJiMzBc175yc$9TeT`(mSVW=zuqgUDAR48v8*ju zy$g<2Wk?z{aqbodYueNmB%1|8hCTjU3bwzew=h_Ng-Q&==7N|0tDSr(Yp@DGwB5;b z+)jxsaLZyE0!2a~H(}9E6nt^cOhOLwhci7NvkI#0R4vh5Z%;P5SBlwfH)E^tWsP7# z-}D3BIe-R;r+Pto?w}k-7KpPS4EpR2rk_R+4Tz<4;77HWmy@L`$Rof8byBk0d-=3Q z450x|=zYPlChTdWTSF{rq#{z~#KYN|wM-lp8Z-d;N$V?3FQx3_GRpeU#rmUIivjob zi)w34VDyNoW;|8l_>+T@z)S}T#yNR zw%FlIpj$lYN*xc8J+foLc)Se>FsZC5<5QeEJuO0Z%<@#8I1MjC4pLSNQg8DIZGAPu zeJhyo5|LQy>awsZqOo{z3%|Bkg`@S80U946(OZ&by1w%`o<1Bwl(V|-HT%XTes~E1 zWprf!Mz|Ps(&a*OuXvUaUFFr^8$jsb{8XEl7iil1O9er$ZR+m(-Z+cWx4f1ou#Y${H? z+ltnMirC;ngS3A1An~OiF7r@~VZTMc?FXIBl)8!~Jt7<)ps`>g3UE9M^D*}G@CA|I z+(w52dfHEy%>@Z*IgbmNm|rvXx}dJU#dgYo0*{ye3Coo{gi2P3k4 zS#@*PrQ`qZOM_A{1gbl^ZMI`(Zsy!gmI~Mi@1+ygFN`kU|5sVv>_jRGd$)op9H1Bo z5dqQtB#(tEKD-*Kp#uRzT@&`ne@@p{Q>;JKQ z*lS9bZicmu*`|p8L0^&s_yE6lJL_Fh=&qY*=q${$y{Q~#u;!!I@cpMqr94PDP_u?n zr{??4L_WWr(-aTG!63K*gkAa~8$U}>Q$##V@C>gckPJA1`uh5oWd!2s02s^7UElB% zf)=6?{vQRb@dg6Dg&KZs7fO&?6*=6c9)rd^m>Yi3ADveKl?OWw|8 zUBXxryeGZN;%}_HxJZaIj0g=xbm+7Q%;cF%-HzV zkLWNo&$`6-HwHHLlUEc&_D^_jav=k|4E|t5$c@YI^~%+w4BL?#yeXT#W-l9F&HL~v@VG#Xy|G-W$HE+j9mQN^8kKgcD2vdI!ukUDC@jAQoFXu{DxNx%2h8&?C z@4tQR=UM)IH$og<`22`UtKBk2Q;{%Tj*DohBa73OuxnpZ)MZ&zQvVu`TR_=3kT(Of zD_)vg?Nn40OF;S(pB$BuJ$s622>&&1t z6}2MdxrQl!^&|CRsC=y43ymuu{IIgH=wNMR%ag5l7yGwW%Gzm6Bch?en+OnhL63X#V>wA9;O)z0tut_{*g7Z&eP}teeKWPOP4L! z1Dvt1>OyWkFmj>vUi4+Si$5miSzfL)zpKB@cK>FxO{>k z{SF|gG?)hI=X!&RgFSH=BrnnEi+T}{#(u-XI$5y67j^z-4TnW1hf+OrWbz(B6s#G# z(#YdZBGOP*_N~+cXsvl_Ytwi?s3_QSE#y&t4L&$*&%uGNUUi%Rcb4 z(2~YzdVV4QdUWD{_$(M`w{c!bD0lIW+Bs+_xku`lCLQRGX8wP$duZo8b@jHf5q$@4 zH`g@12AI>6lym!6iAmCrZaWsKG9<|FIPn6_a9h9RaucCoSCZ1_22 zEtDmaEI3w{CPBV%GR>JQf%8=mZoiGRSUgo^uTlNNp=-s3 z%MHr+Pa0s>3Zsj_i|nuV%?_IeO^MYVJ4aI)DSG&76K8 zs%6M2;biZ;?$#)({FbTJw8*7gZ^M-qzovqC$YXG!{n+p6FC#htCkO7hQYbtSNQNPPl3Cgi6e{fs)5Iqtc zBP+qqNAlTR>;kD=N{}%9Zb@|K+pV%&$GKh3++Ka^5ifm0ho7D^A;w6YTW>)u49wSmFX4cYdKkY zzyU>-y=edA?Fh&4lL9_!{*(#(+(Df%8IU2aOikf{QIM4Ylj9`1Xlt(KhM#O)f`VLp zgyCWnyNTtyiMz%1wr{Jbc!@D$W)~(VCVo$CUV55p>c!0f56px;GBT2~yBla41A4k; zX}Y-rY*mOMP^oxgdEgBwwX^t9QF4xoDC4*+K-=QHDU>lfHaZp!949d`5?yP~G;(Z{ zFgt-k1mSq9lt~zFaj~5R@F9CI{ye(nifR||8>mxtb*q3t0ycIa9bwZECKBL%-<=8i zFEk=`J5QquB&qOhVuc9p{p06k^se|5F4vY2$!>$E=EijYj*jW%W`FX*V_Q@yPBZ>eX?(`>7EV)*9&jAbWpa^der+s&^5RJ5K2j z0AOxZ!v$vNei>G3t@+fHR=suOClLZj<+biGFBV-iR;B@B)|V9P5G2`wW|#erhVNF? z*1^SAbv8>)UhwSbG5N4=hvza%l9-$iZbw1aEBoz^)c+{hgXr>lD#!NZd*c|BzIMRW z)PQLb4)93@ySXZH9C3S63F@IdOPVY}--q9oqW}JqM4lv$hg9fi7q$8qKv!iF6$$XE zvW1_9E7kD2x_a%hnR^Gc9*4~0Px!2Q2Zy`#c#(Nz$j)JiT^b9B+1OD3VXPz6nf{+KX4%4mK4p&c z5Haadyq6)6+QSuxoSc!TLD~ULVN6v9Zx@*>fLN=wWTWs|9YfGP&4TP+T{*p-%NO5Es+w z?E5QgobM60YbV}-w7H{e#I60gwYpHj@0L-2?)soQDJf|+yXRZg@IX;f5x4J25T?+B zy{G4pX%+wEa%)|&;N}G|j8tTX&vw291pQQ}K@bxHmyfy{lYhKic^VCg#Mk$@-Ad8( z<<(G9Vn{m5{U!>2i73R*e>S#Y*M9qrR8C%D+VfF?+szp^Gu7aOr-@MNvY3}Zn`jal z(s}X_vRm01(n$KWikiZr`_P5ocGI;ZxZ})sY$Fr1W#Iy7BO~yOXC+q8*JxEZfV2aoEVLc-Q18`n3O2wD&V>-4nOHGWg zuC6>>x{t`Y0w&St)&WA>wBPZ6j-{Zf3?BD4Y0i#&z026(`Q*%GpgG~pG zi5x5KduN%QyL6PoIfiq7D46~2alu=3P$Hu2pqiXdozLNIQ6QQD%-Aq7PGFJUOCekYH|$9_9DqOm2e?TVFK z^lmbRCT$dYOXTHZqeErg*A|=>mxw0tO z@pEKoH)-4R)5$o5>lM3>0_o(A*)KkrUb!wvx{3cnO5 z{tj?QG?O8;gP0wim1*s5?@I{@KtH)wpLDDarSq-hf-A~xeC;e8`QsYb7=vQn!U~=y z!wP-Rd$YE2;~j*8E^!bAl5zxvoT##{?nX#btg9nvB2DUs4mSZvDlE-?GE`W{ zEss~H*!_)pSKj;SDYMp==xUnOX~q=2XFsJQ7OYP-4n@Kk$NfG%0~HRtHh@^T5;&J7 zgJ`sADPgE`a&o$ytab+7WBSM68SmEvgHU}U+b7CXA!4$f%^Zk72O}1Yihefl@%ygy zPD=EnV36>9rHhy|dWnRXUclV$q9{$dcG7Iu?sIZN9@1s5HOs$JSzb%A8H=k%JPf1? z3r6P)x=Hfa%_;DlY{JLYAk0#={gR4Svq}vzq+z?a{BZc#$lqZQ{n5m@(sxc)T|ukmKN`Izr*^pb+m;rVScX&)L?5+OA+ z_4o5ym;?riB5$SZ3L|Xcns+}#JQ7zJDHZqnZCrpwB?CsvoVsB}@mrsrX7tAt44;cQ zis1^Mr*pLum)m#OWPy-e9YNqbNV5e!*wbVYf%c#_|L6P^{qo{s1~0F_;Rv8mzjpyi z#%lz!e8Qb?=;t2OJ(TrSOn96@6MQ8_^Jbn#&WZ814><`b*dc(6fP{stzfGW(Z&tv;cS211hitht~Pg? zff0p#<%8eDH%uYm7P&G;t+`lL%2s*GHJJ2T?ALBiH*Uq&)9N_&7pj_;0&I0Prh2>U=aJZ}jsi6A_Fw&sN zBz~h|IyropZ%c6K~ZC$S}b|=v%C*O9C$JqQ{CVUl69QKkXaqy!X0(lQv zs>EVJ(>BolR_=6LKue>L&GRO*H+imVXxqKIy1GC5Z3c19Z`FmH`E=&2Bu*?=0w8mc z*&hcS1NE0qptGTP`n^CP{6#6TUP;h^ck^c~3zz)-{2a)1V_29<%St_pyo|lXImuP~ zQ4D(>xDBAMzKrJ86f_-`WVp^WwFW&tn!~eSv$C8$INH4B!H9|hvR5ZdGs9miwYj^~ z6(9C9-mpS+3uQOjO8P|D-Z}T6cSe1%x^AOVpZXI0g@o6cE@70V&_x*hrawtKKQ1@d zZQ1u;TA!5bmGlp$VSqOIdr0`U``z{eJ-mN${JRhU$CLN-f`_0uVP947D)^T3wl>@q zy6&iSRiEq3sdSq>m!0g4r)SbvBdk;vxM3Bqo9t@8@P8nBdNjtD=Bu7>FeeKWi?p~- ze}vTy#kS^6?476SU`XMSwKU;%ria2_fKJ>!9ZJed7Mkr=fcM-P=q3Q>PN1MH`<{<( z^hXU!B6-rlBd>kJ15QIeAJWK+hb!xmcg+BY2?eTXn4OB-3{e_JYCjmi-RgI zE~BEN_nO*>>tjr}e$>~yS=f&c4#NRjS7TDRvhv~E*DpkThR~O}&MLZ-(S5EVdD+X$ z!>z60+$|i`eurMDp|f*Mdj-5GRsVQ-k+>Acec2$?{W`*sfJRDBMYq19OnWUvg;pN0 z83Kl)^soCvD1udChwT%yvp{oI-I3+dL2;?!$HnVuK5(*!uZ++6WZ6=KVw3ys`uO-N zJL=ou*z)~HAD?fO-&&Snwfmat#*F^4RqGA2XJnxfNWbdMWbC_LMPtWu1U~TchQDk3 zCVE(f`DZORmwUy1V^!jQ_y`|NUVC}T+H%tf)Mb-ikpHh$qRKGi2j(X{i5~&s^7OeN zNHO*qm8Q3j=9ipoJ*M_JIjpDCXqS?;J=OI_xEohiCdSJ94Yu5scK*9smooVK*`If= zZb{gRAfCvBa@Y5@JfcZ>IkP79;CO$auN?JF;PhZhfde6AHyWdo_T{1^+TnV1b-l}dh@FMwxO{?b^ebl#ib^!gmm$qq? z+&ckTrP_z5dPM}meCFhed=S4@F=f8$+O4jf&GAKcMs^kvA>n+@r-b3dw#VK1Gp`-T zMwt|rg81;!&Urw;4}L@4S8T@Jl)jUwhfQIZoaP2&gOIyVA~v%y2b0k~Gv#j1CodN# zhS>h$>aD74I(q1lRPZ)5Ffah1<|)5l|BE_)xWu<#xof$q6qRiShCVJqnvfJmBHiKX zX0)LTO@GLN^2k2)vMivuvo~aLaB!xsZmR*2EEqTn+lGD8i)}6TEGxi+fens?+<-o& zM9zP?Nwq@PxoppSk&UiZj#%I<3$ML^cDPTXU{==|^G75AySY$o}-C;8KIk}Ys|Mi zUx}LNB0{{|nhLfXcL1A^Ide@a?!PrKezX$GpYtk4;7;@9dkO%723quzUl1+K`})dd ziQ^Dv&e*lpNcB<~sL@OypWH~;G5^!&1<)QvOd#RS2K|LV}pBuJ|V#t9zirG6!vA<6^fYKtVR~^t{vdd)f(s8t$klc(m#TkDqmi zGkIME5NxP2)c$0vI59E@z=WlK6lqhl%aa_g379u}ha4d3Vv>rfYj1xkPtkvcbbKBC zMN+CoJlwNhX~M8|<$k0AnBxFn(9rNhxP0ai{Wq~mk_j*P&89ojYf*I<-QHBi0djdP zr0^Z`uc@p1eyM@lJmmfma`{yVIe`k4j!uR~othooNgIa!8G`x-Nn@IGnvD$^>_7pR z;fIS@8nj4rut-Xk4NqeIAeQPk+u5Gjtr^~GYAx5o>oThRy?DR`Wa~qgg`;X*bb5M# zV$R>O9_hFWM$v=NWa&}Kko5A@e|_uIK%#Q?2BNEk8*DV`zx!iBLArMVR%w?- z0qD?R$U12HRjUC3N-l&V0G~<(Le`5^VV$CQ~PS`(ZiJ=@1)?I6e>L6WTw!uULlz&G&8%< z>~(D$Ip;l@9`aWSX6$`8RYdxD5@s%+!;&OT{5J*}u-)wKR{)t*HAYyF>A$r+lBXNB ziRt6)4s`kk&k5S#b*qeavtBD>+$at}0A&<=>ub+OsM*Zu8P=CpoGnSKBZD$C^&afq@AmiAPyfjtdBUfN zYW6CLv?6-7p9bYt^ab zj!;H>!xpZbG)yUPzw1nsCC}P#@mV=tI=`whQZ5Lhx=SjLy&}#JjA)w8@xH? z3q{v?kpN#4xvY%z5pwMTq90CLAy&PCkUWh@(~XU|+c*nru7n{%;p8%0d4(+6@jvKD zIB`KG21%%(+#Z$;Z?%k`(_QB#4JKvJ9Rj!1SZMB7z)DZ)6|cZL`&%3+k^_DEmscN+ z#5-Fn3ns|z7tiKlnuWu-=!9Ex6s%t*R;=C!`(u5>p*KvRiWvSpp>gGE6u-$(m|;Nt zQo^xu(b0tSivw2%;9zZ=z-!@@j{HfS0WCPzfH^DtQeLk2l|l7=#zR2t(uhyqhX9DO zC|p4{2E#N`(UtoSG0K)mAxhP|&CT~+`J0l_$<>M<)fr@sYafL#7gtAm+zY_C60H?g zqc$O0b#|^sZ37;ePk1@Q&Sl3?FhZcyT#t#fFBM^$JT*3T7PhQjbMwrG-32N`nHh2} zZcQMI*9&{?QoWMy@sC|`_FNgvAIRE#M1vtTSm7oNQpmqHuTHB1IWJKTF)1*1wy{JL zP~K-)5!u*s46=%`Vn7n$%DPq#8IoLBty40kujRiNw^?zu0@?B1~-a@p>Me= zgOYRw30=bxVUL6Vgcyax(_&-HvR^w;rF3Qh>M|&GS~je0XJ-H#s;;fxzxZ$4B$N~r?VV;LiQDl*eM25rDqpRH& z9@Adlmy73ctMsd* zg+*b%_JU$a?pmja?qqy;&(+%x^YaTfTnn(E(IiEjp*}ZYs9ZSJNd%oEChHx3Fhr3V zD@Sm`?a7%0MJ@tVQ9Yik+C8;~W@Bryqyy9ZghdV_*twTr$)=-wEoYR_Z;WW_^;050 zW$TT30xm{gyq2;|zA2#zZlw4U+hV*?f*e1T(U%7giu5CUtcOJbgCs3ABC#gV@JDE_ z3fb2iRrkBaDH92o^Q*f%u6o_zM8(e#sr+p?hAtn$7#|bf?H))30pNzdR2p2_TL1a2 zg?C+_ADouAJbuF#@M90tA9@#;{+!s~M%ZD=Gtw+m>sil9y)E3RqwOr*w#k9XD z_&MhbmDkLuBOOw-XL1KZu)ZZqO304^b5lmGyfN^@NNjfaA&AO!1%2DJckl0`_lnV~ zM_6Sb1fNSOK@;PP4p+_-!5kYAMRUtz0mY*oLq5r-tHuB?^hE`o(*O%MVVnXTEfMtlQ3 zyKM;wH*k=~;1aZ)RxwjFH{HQ=YC7_F--Lc0J_W-@=Sn!;- z9otpW;gZz6c?Y7~qSCBtc8@HJ9Zrf6hYnn3!89+`U@pGCPY-j|q&_!mC#wdPir@Vy zf;08tV_6^cJ#Uk?lL4D1C;5NVCH9h~0<<5)3_($Ly}P^EZuZ@MvM+aGw# z`>y9kOn?gi%_kxo2hFF>(2odtx$bv=tPf;)c;kk4UIjsh1h&nRPQ6dzW4iQ2C7z>0 zZGEu~>4Nv-(N?ECrSY*%c$E7nZ297hw`k^MNz%qtu?udn_dz4L3y3?a8zKjZr*L zr;3r!d$;Qcsu&37Qm9~&f%80BzP(c?( z$d-t-S*^iV_5P+Wg9bxrH#T6x>bcQrtFf(f>4C+4iDWEGr63U+y~C{5Y1to|yQ?+V z3vRlb{;VL*55d98LJyd_2fEXQ~U$h z84r3_?ud(EU=mr4Z?@wF?4&=hQiEVK!W{>vsf#2D3*>W%@d5@VG(1Fdyhf*|Uu(Ky^B8D70i%DRR!a zJ?dpdeB;pUrHaBKc#t?RG0}xWTKP0iNZ@3<`Mmp{IeSd_=K^_RRyi7fzfctmm!RHd zcXO*9CzTNrZ!t9rEvw)4;v5I^4(ox>=~myKWeEiZ68U!|qWMrK_2{SFH1z&E_#AOt z0jCRO_3USWST9lOUDp+U-EYA4NoNL?eg-Y>nDlln=T$T@q097r5H2naIBozPYn{g( zhQ7+fLX_pX-*tMujkY{KpR?3tBhkU^wDm`=K~Xkp?Bcoi$?gA;bXH+)aNQCP9-z?R z+Tt$79SXtS-QC@-IK?%%dvS*X#odBK(c-SfN|By?|GDPjCa|;Do;CB12w#u_2rqVi z^+E;sg}OKf2TWfi1csjx_aUgl3C$h5{Z04oU!4}bkK!%^$=6TgNg%O7`r*`_9UnU? z03u~4^_{`y+xNeS^8IVa7;Doh4)0(&9AnMaNpFMt_$>pS2p8GhvXxh)boe2|TOql0 zGafiQ+?dLn;U^y(7Re;0&x60u_z(~NSbF~w(DG3WC*PGU8$KA{kCzYh0xwy6q zM~VVXSFesobl|chIpoS>Z;#T1Sq!g{>w93*Qr@QWw+>Jkadxv@p&p==Tg$^RR7vW) z_0ir*M?T3%zL;sTKv$U=R?;)U+rj?vxU#patBvP?)?XB;LZJ|lA=3}+f|twW_J3A? zn5BLUTWsm?gbx)mZT%bQ>g+Ly1(vmou)<^QBMsdg6dKOXFaEjhg@-9+sK+#DAYAH% zBPD2=K~eJ z>r{pX!Gu~67%=CVuU!A}v5+_SXYU&Dit?b0w!7t4*D2=U(!$hqXK_6BD< z^h63GK!uw{NA>3XcII_)=0!nK5hGS}7^VQmN#oS5RllTW#QZxm%$}%^X+fhY{~_sU+&JRLR0{(?~V#A-h7 zkIF%3115UicK_7&S_Bm5?f!l1D_Tbv%mi)r)$DFM3YS4ls_^5%O4CDb?&9pJlNxiU zCmVzi#UFh)unH1pw_=yrV2VU4qx#1P0;6x=fM3pXg1-EjrBGZzg!wq2Gt;jZ#z9B0 zdmLun|Ji0P?e5;ZWXJL~Yk)A7ZiMp#B7g$Zm2?+Gv$BSFgu(TBb3TAZm^4w^YW|6v zl>k22m$@e=KvIa(eXdg7yaWO1pw)3EsH0?#iCm`sUI|1w-qT~b>A?5|bFBVaR4DD{ z=@au{$)N}ax`8M>6xkw^n)#|9TfGZ11v@hnW~e!&A`Wl@DG)(`aP$o1nGeSFe;K=a z8ZDpe7AavEy{^K|K<*&`#Fu2^J=&JHW|*pX4!(^buitOu7oi} zHEWYpF=D6_700d3Zz~Nxs!KNhfl*2*%@pWMv-M;wIFpN6UD;BjKhZLK!FUWYl%b^X z9RYR}d9u|4`!rvNjcnVazt#Lz1JL*DaipUBSfvaGGi1%W9BhSa1CsV4yEQepe zxx+agWdaZA)8i>Xx4vcEF3ir0m1gqUR-?|6PTaJ5Uk1R@!Ls=_%?i1*Fy?9$Jl?J& zwi1tO)!>;Ukndtz2bbM^3%*wKmX8h@va2sZkur+U)mLzI{Zo%A4N@yUYT8ks1J4?> zSQLu_+OG)^`EA}Q8i@#X6lU4=w0X`P<$x*Kf&Ikg0MRj~e$g{!Axr8-ZzYz555(v# zT^C|-#EAJ~4`{Ue&b?5Z0F6p5X6#v#R$YZI)CJ5E#Z^+0)_v!$=rO>!l>CJK0n^QG ziHcwQZc$AD)bbWmxx^AF)ybjQ#_xKI0J>}ZjPZ-~DYWo?txJ6JT{t!u5u>Qc*j4y# zi}3@^Tvk;qiV$oi`Z4C$OAIe#G&7F{=%?eDzhEF)y$q)er&Q+h4hxlFmIwo@<@jyKnb>X17S{ zy1iQmUXBK461cIPAf-$Li&=8P_goCP`r*OzY(?+`G_t%LwtZDlinGtA1;W!<1UN8b zB|hHWwY0T*dU zEW;2V+;6NJAtO=Q>>-~(BQPk7c}KzhTRoQ3yfSoL|DS?+yuK(XDN*nfjC`8Auk)*E z^>7?(*xZQY@vlxGqkP<*-XBIFz*$uxrcX6abC8_ZNmyL8QZFh$Cs#{ybVS85W0XRz z<`chKDK0piucNBm+}rb)w{`JrEV??k<|fZa!fGm_{>10~1I;hMOY0eA@Yvk&*nZ_r zhpzZ4F18Xzyp+72>I%k~U~bV-C$IC3ATw#y@R3sDeYYTgB1UV)zwl%^p_RGTZXUqW zUE27hGiE$XI$JP6Er!3Wr}KGfIWkEX8&s_E3D=w~Um#B-R7pe_E9){EKQBMBV6$1g z;Iuo1Iu}rgB`Cy^41j*R^yw65jGKudXuOYL$O;&yk23l)UCEWW)b%m z(|*E?H_(|{(t2)gyh(1UBF+{|GP`lKJ=xOMH{J17Q^1_Y>=xu>%jHGe3Arj@9a>+$ zbgFW88{2o1aQ(i2u&p%QSe1uIE5Y7%rr;xFl&z}+Y+u$uO@awki}D>VLOfYT_Imc5 z>dlqP#Hl#%YPIT>xnQRDuU1y6(_c_v--Jnq)>iVrN9=l|+)droQHnwEqOEra zCnpW8%Zp2nbwK*0W3n#4A`m$+dD6J6yUnW9+0VhW^YfAh5gS6Nc2CZCE&TTJ3;TzK zYu0Bmx!8q;U;gVZtv+oIk4tOh!AW~E?AU_0UIxlmN9MR71IqMeLp=SqPWw=J6krj^ z6>v2FYK{LXH(r@l#VA~ZPNT8a0mEVA6m!+p&STeEWZzB?i9sDrx$I~c*FLi7dwUPA z4aWSC4UCkkKq)Q9KhTkTmS6R=WfjjuT#h`{?)zuA2OkTUQVq%|=^=X4vKkMQkWf(u!?0Bj#~!7`+R~$e>r`$2PmLz$w6y5;e-Lh=8jC_6UVZ zCeVs}zqg1ZUWcFCx6d6*9{&Gb-0wyv#)1^_%8kD?UjN!EMeOIc>Q}e z=J@>1lDS&{y+Lc?KU4knkwT^vmb?V&gon}5)YTIO_A)zUr1`y?>I5lu--|t*M;Zhi zaK};rm7Hyrj+(Q5!UP5VZ-*xTj0jZ3YEm1IcCPh6%+=GRbDnXBT^cM3MB5&SOnORm zQ_#}XB0Mdw5dSVWc+9G~)!U2SO5M`(0JZ`B%lbG{%&x#TM2;U18~0aFPI|VsROmH+ z&4Pf)uOY$f7i~JsP0Z+_4WzMYzv0i<7>1?gA|*({2f?Gq+4eHUmex2oElRjKt3^?F zm$`5v`@Eq*SJF|}4ad7nH!kdOScDl_!jFM7OJ<0xuDq&g+;%6}Rq=ZWsHn->5it$T zxOQEUUIL90VicQ)1P6!;0|qo1&@(_sN*Yu8_3^&S?f|$&o=y?>3}`nB=;;$W^(rP= zTr-dFaLibc05LJuRD?c3Ueqa3)0Sw4n4_)gKbf*o4pv}~uW(RjpeqFZvd6qb3?SAB zAu1pdCSD-8HOgvVewrqQDoP!q(D_qEp09RvOjF1g?{cie)B4iauGL;LejO}{@?+;` z_kH>V&5ymQ^D`#5(2UB?i~gE;|8bbGcX9crO*;?(D&cD9bM0iiYLMl4Pm15AuEi{r zJt4uGz(P%N)hyq%x>JOoLr+{bTzUQSeP>HdMC+wdHHKy|qny^@f(8KqZm}_YySw&L z767%Z*#%Hkh+>aloA~VdTe}JM`fI1nOhju9@3!l)kj_)eP0`Z1f2C?<>`F-I2^AZ>@wO1D6wBq)iq#} zC4|5+KE%Yu!HN%uOxeOU0#CGQKncsFW4UBKj$EE#L*A9}!ea`fe*To!F-XRWIU{Bb>+2HRZkgZJu{xZ05Gt~5 z_;N)2al;KH4vV3=cWe}eeU%$F)768C;jlfm?aHD9#|ro2)kdrRC^WCuu~w-f_2Som z_O87f0Jx6f+soilY|tH+B7xR81pu`ow)bD%%VWWT2&Yyx%m+@GMg*v)A2MO6CvHA@ zQ7F$;@k9e8ZtuOQ&01mYbV?m9}ZR`Lijbj)}^D%2?$E|$l|6f3M~!Ebu7~uXz$oh zAa1MY`$ORiCyN^QZB2B=;cRVO8H%YMw>DERB=EHEYyT;JgZ+-#tTYfA~NSFxmQo;udn=*K}n z#^aLM33%}=wXH?S_}pD@A2i#(-LCVV*VUrwQdb>R;E%SXepPyBh{^W>wkaV81Au)` zs|wIl5y^`_bdx?^Pmra}T<-jZ^w zr_){61$KKt0rmgZdX?kjFh@nF>4p@g1kg5{ho9Y)5PAP#>Et|GjK6*%Lq7cLVT$%Z zv~UP099NAcJz=K&meyM_XCZ*7pM)$G8y+cf+SW^N>1jB`rhIN6BAGxO;X%ufJMD*X z2G-&j>(|e<55b)`qxisJn``nrbBgxOs!pAO9OAB*$C@g!SEfT-p*r-bwE*v=3dD$u zW^Qg?OrNfiwpB$61Nftbvhi|O+Y*P`tmZv0^+Xa{dILf5faLOnXtVde%fc6q-*$_t z+QN^${|T%HX?dX3;C+b=m2ssA_^aYU$-3->dEq~T^P9Zzi0NXF?0iP)-B2}2jUwpy zCIC71{%nK9%g-)gOoow5z?Cl-vTy*}DmCY9&;9Sz}>O%-sN|7qA&C_*K8EAaMFX z^PAC>MyHjM;TXZfut$iSkxFKwF&}9UN5}`q)|nr>&fg?JXE%-4FoA{@6Z)}DAUo&B zM^rJF*d${RI8&(%GvH2@Cw2V5Ej+1=6C@WgXj?wo&^`Fl zIWi;8mc@EU$02>R``XC$=)*T&xVDxt4d2w)`wtFF^E&WDvlEQVze+>;(Y&w0W-xB*OyVP*jJx?{XDv(YiA*Fwp<{{LE@R(jGtTQOl<`uEDXdsb#<)3Lw;BGKr(8zoz^;v7-;8 zxGlJ$H$LUd3;=*&09;vA2A;HiL;1**ahhIqvNFrZv(KC~;y_q+qgt_DN5t`YR)j^N z+w^j0Cv6wF@y(Y1+c$i!7zrbYPkz{4d50(igeG;$9haoa$pnC9V5=^K$WE~nG2H&; zj;eVau}+epQ_&S>8==FFhUL<~1m(b^`Nl3amjvEk`eh)2gAF+5agsaC)fXBA8pA8K z+4)(#dSPuW{0?=&>yEUI9b;3K5v6o3&+d-3-1yN5xS7l`vTtg3ySz$@5719uh@p5K zDD=U&=(IE0f1%a_Yp2sf+#WKwN!SyznjxJPYrGfLO*osPq91f%@sLZYifs^hs6_b> zT85?lxAMKRHvC=yAJEVZL3uGN;^s;zKE9P}>1Qo#^R#oTt}p%Aa^d7&dp0>w`Wb9A zQN>Li-!BS7ff>25+`95ons(eNvzXK5#w-lW6uA_>jqldE2{^h(CqF$|NVlSk6(1a8 zkIN+U@Mu0+VumRD>lTHJrovn?{u?9vX-~ryp*&xyiS0yE;uH_JBjeLP(oFTQ@1twB z7TbZ0K$c=Z(t`Blf~m)2$SLimD* zHHhadBuvYms6R!&)VEg)a=3sMsTV`#DrJDyP8;VyPZS=fhAzAsyWp?jGvcET^au~| zqy;eK+c{O1(IhPpupDoOuinRGC5FW>7D?OXe_^zxnw3hgMJj;ov zGv1ChjHUYwP4#yoA&gb9w$cg_dO;i#)R#=_pdFOeVA0(@9zAh8J4aZHVaXi zxCvO*qA8e!m9{mh_arALAAIuQPq@|jh`PcF0^5DNx$ZNLM5#|_j1Zy|s7(moZ1oO2 z;;gN;e|3sBgWoncsYingWg|*7mZ3%+F(JC7q6inD=<5TiR`}aoGF$%Rv!-_Vya^daM%#C& z%`R=+m4&O!VY_uzPvfAJEa0qS@Klr@;iOh7ywht#dZkoI&RK#vG$G7J744H^m@G=4 zaP1hG-RhEZV@8WZo#THw&Xp7-I0pq@dQt4vIHWSRr9uU7xWWYh6gBN*tlFQ2bE>Z6AfiD2d@2Yw*e4&BHYMS4UhDKcTXtNxv$M0vx>)|-{LhXe_C>MwceB5I zkY6yqw!9vV`OYrc7n8rkN872awg1WXXPSU(Qi7H!49rQZUiYwHcr6Ubv?$h;&WX_2 z#G(^2+Pm%JJ7+Qh6!MpDZ7**W;s2#R?`97{ZMCzYiv1EooTFxG_kg45K$u z;zBd-!2b2@y+H7hpT&nL223ufwdRx=qeaFPf;V zN)_dkP%?QE0IUBy_RuVLQw2zp2$1jFnP=P0L$u3m_E;NUjkur)oMp3Ht8{TcU z5-^;XT_OUQY{05M%Wj>{+%zQKDQ8*@3WpzsYhboaQ|ZJecJ4?<8u8H5{O8OSS+4 z=kI;DoPlh=wxOdHPUVp1()ayl5(fYeESP3c3k!*dDT@#ZVgQs%*nIi=*c1#%NjT)Y zg+=gkfHb7J3G@Nay^lyhO4VReZILGcd2ntp=xg0u2%m%;I>MYh9aY9NN8>vXq73_T zQ(1?kh5h%eR9!S6g-nz8XKnq^jk&Nn&d)Jlxha_AaPua zoNqD~46~FJN!p01hWNqh%4Qe{?yJw!EU|+QJnMNdsd}WOAjzwI!Xh)z6m&EN?eoo{sXxlx}fyGaTHmMT=mJm4AcFZQZS3 zKBP8!T&giY(FNjkCoh_*4nrmqbL!;ih1ob>3*P5u&Fud>sq_5Y+bWOs2Sf9%mK)*U z-&dTQ(7(&u7Qff8UVlSd>pi&W2PPKVPb8$c8;0Efot`KjSq(bhCH|5#NaJ^6OMkft zvul7&Agk zQ%Y{Y<)SrEg*$XkSPQhobUd%~=?b0ys(0L{_av3pqwMEMFL3?{fOy zTP}$>Y;@MZk3XL}HwG4K&!#9%QeliqKQ$>?(MJJGHsD-P(vq4i6n^OoLVp_ zAVf4{^OuQspXNVoQ0STRjj=olB_dX5hsQH&$hnn2UV*?3%uF9{z+D^pSik@Qg@Jzj zScaoOjMCr4jDk;H)!lnoMr*bv7?{y=!@2Q^i4jw7P&<@4#AolI2Y|Bh{LRTV=G?8Q zlMVJtV^8jWKzUJ^F5qYT9dq?mME`w$fC@tEieLWCOy#=yZ+3b6e;O}$@S{TofiW~i zOaCBYIYh;m7t^c3;PHi@RaJ4*XuBiw!G1HeITzhmTU*eqtY2i_%^j5vDVr8Jhp%e{ z4$ypoK#|+2+u-qBEjwQB2W0)%o)E<5v8cJ%P#TJGcAJY=;XG4Co;5cEo-B3@DLxy# zBZGze9Ud*9YU%j(#;j>$z@}OjtE+G*QS+}Qsm5xX@ zu+Um(&)ZUlA8}{qF55u@XL$wD`Nt((GM%UVdi7rwUq4Z)@d~rClPsKmf z7Cju=IP1?$5Fo_gCzokMm`M}aq@t)yP*!!^0m!80lw?K7t|NSVm|ypPSJ&rrS;{GS zwZW9&H3t|ptf0pz5fMH_^g2s2Ump`YLF5fH4k`o$F1K?_{<}*!X+uxa6*f#&J+7i7 z4ps3Cy54KsI%PHXr(dgIKtze_r%qOv*T5LsU0?Ixs-^d|i%Z4TJxA?o1?n%Jvu|It~!leA3V z!85sXsQh*j!vgz-Grz#pCZXa#A56U>l218&=QZ$LN0QkGlXGlud=%6MRQXt|R*OyJ zQvL!~_}lf-&>e`JQaRB~GpcrN&T&!S2W_MphsQ~k%-P)e2uVsx_M0>`xi!rkp?@9N zk@%;&)CB+4#YOF!E*{DCpbii*Z39V(I;krCwFrpPsTm z(OFt;Xey(sU|t6U%^dXS5|Dqr_Be(b7?b6)@kNe`Osf zJw+=n#}Rc9Eh%XDw{f4tS0ri?5g{fR01*N4Tdi*vGZD$H&^g`1^$ITyG?#xm1^nxMjmUm*yKjNhc~8&?!arI!AZ}e6JVlmPZ@91LDqf zlnRTP+unNEPdF%9)a4hYEaEqS%Hq&jT3QGCUCi1lJI~PBFJJFW?v83H;x?}wFu0SX zx3?`?FJ$+Sr+kn+v6UKP_-sHo7!t-v#5vzIdGWory=x70{RHl`$Vo~r2YgNeOoX^A z#ns8`uAcqPJ{8RI_8_o4>Hw}Bn_!3o&A~20KigoDclA`xX3o)$DYgNTa{eV-y=-gP zXU6*2FmTATymYmuS$1}p*~I}66)Kku&;#$0nuha{OC)_fUg5&Xnr>Q(gOkN)f|nXp z6|UhE)K$L80Kz})0^hn{1O62;;xW()D3uo%W+ zBf$=*3o+FUeMmj{F7YaEITvE0w?zE(N>c8en>bZb@##ZRb-s=&axlD*;nI)N+2*E}bO3qMcm&5`BHkq}XRa%P-!t}1ys_bWpV!4=aF z0_tg{_0XL8k5MnZt%AFdHk5(hKQ4QM{@Gt#9%|et(3wZA1&9;u0idN8#4?<2TU%Ol zYx)#Fcq#fBQ{O+KNY=QP^-kwHMhilq_~>14iRkx$NqqCIyD2aU1&})R^qL+Ku*wp_ z*_3B$_^Zn@ol_KfILoPRr=Ne`x`~yFda`Nq0EQU?^bsz23ggF0QO@ab$uW>b#tIo} zYQplV-rf&v&`}vN0F%Ft1VS&9ws<2W<)fgcaVs5hpB^L0nw|Cph z^Pm9Vk{G9Sk2aqfeVPf@aSIu+CzDwK0H7F0ef_#5Su*0L+x>qNq}NSGGAMkk?)@6dQLvG}HN|fxAU@<)ZTP;3ms`YbFkn+z;+e;nh{y zhhFHan&coLVnK3Ntx$6~ERY&h3y4pcpkgZ))GH41o$=Yz9kUa9O;Leer`d>>#(8Q1 zxBCOaul;|FnhPnrPKr}%=y+D%ETvPEr;)0gdLFP3uSP(r=PT(JHq)@Tu)gi(UBzU=Wd(rRo z5qA72VTYpf(!te)Xa85KJGq3H&WDewa!QwvE>0Xdut;xgOzh`wOIS~1ba+)2Xi2*N zPquo3L&6jh&E9(8>%ZE~Kc8y6ArPgz(hfyrRD=@Opu!+!|4lki?k;#Ps)Zc;HG6da z@6Z03nEw3cam8Z1@VN+*YF{qHX@)mHX)FbPcVwwpOBpc8m?OtA(W?n6bhjUbX0&S$ zvmD&YO;(f(Y7TkwFm@#0Y3pbh?0a>Mvw1;aZ)0(8!eWQ*V@*aSA~Lkfo^}(mMLAa` z-TCt*1e8#kuqy2QB5eTn}-NLJkUPQQQ+oqR0 zW)R|ckyCL~JByf#3!(jm)UpBl>winxg)GMlor0&4^=18}@T>otXlKkB8lA|^uruYJ zj8}>BlQrQUjFT!DMK#o{mG2)0{Os{GY(IQR|HKNNP?K%?K1EcrpeK&M1pS#T-39cE?n=hxj3~E8#BT(HrZlYH;1K!GLqGA zGc`F$qT~e-m6D3A8QW1F$Al8?*%Z`U5h~PLw7=Kcy@8%?<>l0V6olTJ5J&UWI~(?j zXSi()TWV!Q+bfKJlVO6|Ryv(;c5Y=`d0PJj9Vf5d8?LwieQj$&b&{0IRf9fkdO6q4 zBk=N5S4+$@h*18t8KRd9Lukyeqvv2(G~ahJCfRJs@VibC74S?wRNn%KXTpudA&My@* z$uXY=-HL*xs##5C`Z)J$N<*#e*J{tDPb;3Y-RZ1oeVPXy8R-Wvf!4Q~n%}tOw0T;1 z?@-*%55Gc<8%mjH${DMuKlB?_Ozpkwq^xF$+{-yQADm-K2eq21OS9cAxF1=rC9pH* zLJ0vwo`5Fe}@tY zcG>kT27?*%GJk`oqII%(%Iy?BVlYTboPFwjdNv=r^mF!e)nj-Ggep#A{-dRRUuNAI zq=|jU0FTey54^JL!50dL0wwx7D|&Xy($m&?&k-6(j6cXD|2IWfzK(vHV=wg_Gg!6i zY>?w*H+R&magLMpFwWIsY=+o-iFAoN=q)X2fNzmpL_^_ApdF^z< zh3}kW$gBv{I7pk`$tCUA_wAZy^yVLoMJ1d`a_O#ewp`ejm=Bemj3|Y1t|JMZraQUp z#49+e@w9{OoFmwsAC>Q&f>W^bJ}z+Oxgz)R%QW?yHv7xm?cF3a&&=3H zQY!1*%Tmk2y3YC6pWmaOyj#;ODmMtw!aWqVKSan;OFeBXCY6$dscQcj+>9(wopUkF zZEfl;kx-tb)@AaE{Hnn&45mq)J8jf|uI3T>c)THOyNrZGr*OL_MrO3gjtF7r`624q zSvR_OGxu$hl+T(gUxu3AJePBXW;H1}UDw9^>gu#0%XZR}-w~TF4}ncldC0?%QYS@MxOg$=dpWc2n~ai+^qM+(9| z)R8|}&zRwSd(ns_FDd}24P`%99vy-#d-= zxMed(Lc2a!1@6593fO8J=F?M?dF+$xUnAN)KRMXfKWHgw$nt5l*Y*}=SST=LvM;A9W z36q+r%lsrHst%ZpOoaX{v{@zs6ogJy_ns(Rj=dQFa)-Hsl)u$({hFVdvtAaY18{Y`6%eHe2`4PfV2}SuyBes0J zH3j5%5O&I(+h*DgA*MSoaeK&YRJuu1>o=bfL*&t}u4=FQ_vbbzND3PM{fcR+=@8sQ z{dh$B1DTZRy_k{pxGbZRP?WuUfMp7=u(!ZM^TIjomLK|-fB`+(vtp?7^u51a3rR?F zn7YBw%gISjC|*C!SvZ%%quP#RH{-RQDi9EszS@|dYZNYLKzK~v(;(i*r8&L7A%Z8F&jdH6{U+-7I7Sa znx7NR^5j+c>x-r0dnxDhNxH1=SChnY3|bC&G@L@V9quHLH4fjLQNOO4zcU>_AL}-G zDegd`*;_DF(hS{MzbiP${6J*zAgitcacUzD=19t`W8n0#PM#}|V>=WTYrVb`nR3I!&$wbI<8@79D| z&S2rFp)Kc;m}`E2>ATLn1JW!`1H5KW?c}){>i7ZvgA|qp_R4vBUkpZ5g(juWh8Me5 z^INMRYs0H@t#K2c!)exRq2zG;ztOJf7jMBWw{Nw$#j*-sx@IR_Mf3>^q>`R z^bp3L+dthU(j6gwgr>=Tcgt0T8;Ni=gb{Q1?_aU9)Au}y(-K6#50m9W4v5MjUJK{Z zBzjOBNo8@*=F#RP%a_N;f%R>80E@W1a^?CMysZ{#R)IYNGN(V^dK{13CqEHt zyY&AVXqLaVq_ij~LM0TJY0UnSmS8}Yh!onT^>F3(Q_jl0a!4lIzpduD<}6O~;TOgz z(E@jN)Lb}7Q~3$LkI#hrll==#-2fjuKedO1b7U@Og2nUs1m{S+vIM^%-yf9zrb!2N z5P|wi*gPgEWeWds2<)yTwX-Wj50a-Qq?2ePoW@KYHf77S0E3iSKdL6o&&xv_51=Rv zo58_kTR)OGF@Z$g1T%V76suNTh}Af85pm1>(Po;g=z@L5Ol%mHLg?ED1}1WT z?A2&}jUXQ(LNROeAyCvcFd~%;K5zJGfKB@olA-w}n#Ej<4|~}ofu^~CzfHgZSyBoR z$JvLhzQ%tGjW%C0<-isp#8$AjFc`n9fv=-SMmO^)NPXwf>QkPc<;EfJH=wGd(Y1QX zPNG>#;2#?`XGH-z%5_g@*ln;b$kH~_-AA`q(28@4-8AQ6v|+O%IwEH{EdC+O%kPG= zyJ*wm$4m=7X21m}xh6zqU$S&xZYY?w2wsf3;6DuITJM#PGKSx$xfONH#Yp9 zIx*m=rUf$`sp)AAAAJ~ZGHW8D&Q|bo$mjM|twgS}^Ozi`(P7P5AW+Qx97~mISDuZW z;@DrxS$=Gy5l_oin&O5@E{j~xLnEzoHcn~9Y zAj*wg)#~s?by)+ss$Y(835z}ZmhOn}cE0?Pj#70b+M=lvGsCV~c7BT#+;qzD0KNG+ zc{BtsHqvO}_D!k1-zsgN>ati=w&I5HPo_QAe`*^!9|aY9X}%yH;gGQbr=Mv}%lm48 z`QXy6GRgl=?zS2l>d41m8s5J`qmMEy-ai?N10#0Vx~r?$;%p`YBkN*7 zQ_z{0+q5GGm|Azfr>I=F{ z);5reH2VFWgug%uR_OaEmqZYGI2(S|>9?skWg9F&pH$eALlc6v1D{l^jMwU>lLz5A z(+T@csghi3?ZRB->W+|YV7ydw8ytCQ)#1A^{=wuC!U|G0F!o4rN)P!{Pxl@vkw)ye z4HAIo9RY@nTJt6?S7z*UkI<8btePQD*!tlzgL$rb9O?iNqZ96+M87XlK$d+ZI2oGh zh1>7TC;ii+MNiQSAjPmp`mSo#q6OfRo>D_7kKa7#B$=z$`FMt!oX3;6kUo;iFv7nV zB@_8BWA4lZ;wlbFUY7}pp`G0Z2;Q@xOo(j)XZ2@I!c9yeuQy`@JA zXP1J-f2!vuhz5bXVXR_sSi^fVGYxzU=0MZX*}}=?z;U^E$o3&~*}QingELZ?vh@vv zpT399HJUgB2a~^H0CY52o@hxL`s20%Kv^yOY>fUdNJQfw{rv>q3CJUfR`sVl5A>y? z7T``d4jy{J?}ib;1H>inf5sESSwq|~WgdQRoB)4{=?=nCFzZia=D;A-Wu&tv2?%?+ z?^!hBTsCERG)GInx?_ITx7#5dw=$2!gHuk|CZ{!TL%O(sj>S#oAI*kf;^j<2#E1}> z<;CgFyRZr4ELr^)fcrUH%XjqCggfh^ z?`exO*}_#GKZ7CGZ6ui*CU{!|kXLu%P2%SF)xsSV)z1lo-K*T8{Ng(7BtrO0sl%Rm z2qO}5QvdeCbcgtlL~&P>>-dE5tK1XVD1b!}cg#EzS?}K56Cg$S&}o+_Y?u{E1=Zy9 zA^~=1>LOLMPt8jM8xRONzuYMBu}p2k2H9d&_Ak|y{?ap5s7lqU1El8RD#k*pOJwJX zw2;PHRa*!L6eDGygE-PzJRB=Kui)vpDS}tH>jxE~u14l-1=GxUBUyyICg$viIH6uo zm>c5fEH;qleS)%g*4OTmUzQxX>gmSmheEfBGu-bGe{F2KNLpsGHIvF2A;#$5;0iR= zcFr%_aw35xICS25RfiCwCbQ9IjzvYa5a;ca;ow{4HkaJgj-UJlwtJ4@9t5bc4gxVg zp8`h+sU(P&h(5e^&au;QALYVRTQX8pKopd}{0!ia(O(#>O3LL_LMe!*8RGAY9v|w) z%zjdVkr00(#uS5OV`#Dab;93EVUca^5iW!T>cP7~kKwR7`Hmp!;m8MRML&vhO|H1UIHu$a?~1 zK&r5Q`pMsHoUjlGDVLn5$UtkDdpQ}TJnrPHo%2CYYm&pIT)Kn{U)aIOW#oQQvg!5TI1!1QS2)N-0r2h1H%)Hn z>efRHXoz2#<+}1wR#@h}r%Nwq>Z4dNTY3ty09n8cLVZa+s1-c?b}9#3&P$ksmw1>gv82bLC$(q?hw2v<EtYv3gB@z=2^^ z8l~Dio1D-%exZ^pLfJ7}bq{vcXV(AAaT=O_+Yyh|2c@#_vxA^bESEf#5{3z5} z?`Sj9a;S5K+$@Zfq;KqifbUB8qRh;bZc=wphYjg|L(sjoM#U#=?J#Y@aqzIa$IOTUUp-$H>D z=}~kqfqJvnjfyH5|C6{vlOxz+h<&M27Me_%CTiJH*2AGa*NfmkM1ZQQk2AuF@itt3 z*ZH85IUiA~+_+-jK3n9lkR%-Iza_S7aALI5+{5%`t*xyjybgaXma6guy}i6WzYdy; ze7E7t&P8O`Ea#eM8Z8>rhoy%huC#?`fUk>is+x~Ka}!o)a6V{P1EgThm^9;A;o)h2cPELBT2iH!Tu7w~ zMYz#%rcW~ITt9$ix(MIf|FLHyw2-_REx`!f?5r#Jqxv?UF}go5d&w-kX%5Ge za^ndihx$_EJ6@ipQ8Ne%t~<5AKZ04q9C>oq52{Yj&(Z!;>227t#WV$O#N$yCK1PHF zh6K-kYha>M=*6?!=)66^^d^y5s|q_TJz$)6rsLa3zQM zV2>cGsY^o`S0b%mf^rUv(3rx4jc^L~4`c?Nm|$gQWM*JtVPs^bXJ&ghQ);4opTFMo z&}zdau;|Ms$5f&4^oo;65>Ho`5p*`)e%X5^qWG&F2fWfU$=dq@^}p28TUCS7L@?z)jTWZ;eH!PlA)i zflmi(*~E$~s3YT>T*&u@pFXVl4Heb5U!`s>RcqBDLKnKXo~)U_Ul+$(P3H%mpM)hf zIs6|{Ul|o;8?`$~OM{dkodVJ&DGk!yARW@3Ln$?YgyaCyN=kRjATJ??mOOaz=i=*mGvdZCr z*48D+`8zxfUf;5k1Pc<)>42)B(MRpj5>~iDfeM9v$>q%y?1p4Mb+2U zz3=B(r?75clyrte7uzeG+vnPzNY;#ngQ6gQ9T%u?+r>=${2DBav_EAWdA5EGuvO64 zSAX+PF9abS;_|g<%E(Y<&Epl{`t@@Tg+|RFuP?Q58$JW|tUv}^bb#7I8Ls{O7?WY; zXAB;d2xb^pP7~8Sue@HJmuKmCX6U8p)@ZgU;=O3feCG-2S`BiQoc4N0gVeSrIM#9_ zjuZ^u{bu6YdGUi{|LXEm^1cVPQ=(J$qdxQux&Wnedz?fmd=_(xXf|W zQ_VcyFmV&*^+*H8t;<_FS8+MinWc`5CNfUWPLH?isk9x)@Z~Ly#9k&o)1g;cSZG9` z?Sq|=oe#*)+f$B>jg1-PGWoc9-H%XJ9V(``!ygbH$>ghZ4r-UF_}zduAExM+Okw@= zds2#+!9Og4og|Eik)+po?vARzmLMmn=H-YWF|{={i06qd?veV{!0KYJl1y36kt>_` ze5p;D{oLrV*7KziGz($t`);8le8*cUVZH8_&O}Ql>rqh$02uOt5uYQ}y!k5ak~Y4} znu7%%kH*@O?LOnMro&7HoZM;d@jQQDqsJES3@tL-M@E`!BcAsr1R54_D zScd*?3{l8MFFham8j3aU6tS}E{<=&xSKHLErM}2k$IGmVf6Unn%k08mh3^R#%SMD*Tq7cmsmN@!fh1UpXZ(@)0kg zF`=t#6k7RBrxbs=z&^08xg*##TR0@hYc2bu#M?0hwF1enahU_@C`(88$n%&b^3QRgwcGk`TJk`nkn> z3vAiis%472*)D$)ZuwiG!U4@K{Z@mkU}|P+YU=mn(FgLu1S-D)J%c>(;|;u@b51$M zdl7G@8%Twqnyz7G4~l&}zg`0ZcJfU{0~&M~o+n7&{$zAg#(+ zFL|l?ZBP`s=oZQ!bO;5aok`NdzY)3KD5(g$R8;{h_=vmny3oxMWbOIAA@{sExQr^O7N-_@W zr6edeM!2r2fv$OF5-HMHuls88%p@HvUsjNsU(IalJq9eo8^>}vp3mDt&ZyH>D;$H{ zI7VFAeZ}Gs)HL-I*1Ru_+Etq~E6b@TyD<6CM5gf)H*)M{j_D>u+&XUYE|;j>`->EaT<3I}c-6w=Bxx#ggkPO;|B(A8LS zlHfw+6RWG9Wvp$il4zIUce*~b#sRC@-oAFYYEqsJ;Kxx(CHui3mPjv#1#45E`E5{B zRWZ$EYGfqZWp#f%Mhj7}Hgenns+|0q3b6HH>3f5@v<{in%3tF>p<6_L*0F>Pnb(CY z32HvHQ~SSpp&l^GfPer@sz_>jF&5QK9+%zUYI`$Pe{?=?spK1ot20Dtzubk@3g}}g z5}ycOOmST9r*Ul#vwybT*yzu9;0Pm!8Z>_IGs*W_aH4*5YDAEoMLOoV!%;rDcA$blS&v(nI!{`Lyj>$ZmAS}AH zU&gW%s10uTUkIVjf-hQ_p{N_C_6^+S7Xzzk1-cK3sO$5pWb&IJNbRPjK>5x!Phl0r zBdZ_Uu|TWQ5N+KPj?(bK`fXet{N+zoA&eP~S5NRM3X)8MBzLwmV7p{KId57IAm4D} zJtZCAFVD@+|Jri9i1xj@))(^Sfn&p5Qw#pM%#T64{GyV{rp}>I&MuFZ;X8SBN=k~g5hy;e^%xH?AlxZ|Q1P<6)2y}g? z+dEZeCf8ttdl>oB%->hJbP2lbkZ54`&|7*9_w9{%*l0A zQ>j67_5ADvpY!FuQN7)Z0}zKWx8Kr2E1u6qyYIo{@}?kJWbLn9*H5I0a=z4NzMwBk z3|do5_owMT36D#sL#br1O#O9Hy`wZ;&6f)0;{n-4M^_ii%#J>jql|u@mVOvNOP%$f zp%@|=EX>u33p_zLm*tkSX0OR&W&2#8rolDJ#}4`QjmpZ(Gq_YuNu#ZEP^$fwUB;|- zcHRfxH=l5;nCq;s8&RLs{!DDz+!C&K+CR$bc34>u^}=_|@j_F1`__Iu-@wGgq|-%q zF=w1S-|vG&w;>-zmVT1F?Tq(h_s<)6v4Yc8W!K+H2gShB$`KoSNfu<6o!Cze6XIM0{Q*r^rhY5mWxpn!%;^vfV`4-+>(|S24`>IqO zDzHSoUYe|JnSjn zX+ln#xJ(|euSuj@uA=)bcxc3cUE2M)!L9MQgWSmu?P=y$z{BVVG+FiIMtt^%3ky){ z_I?i~Ds;1%yTG8m-Ce-_y^53;ys$Kl-$OK4Bd$68cyo?05psK636TQA9&Qb9LUU!* zSiNuFowu1qeffi91lMXr6T|b-OQ=1e86??Kt>3>uF-Fkd-v;H}F`oP$wSoqXeT*hk zds#=j9T0_L4<(QzahviJZ>;|&mHvq^qWFP*&2N-5KVuCAlv|I~6_jYIy(Cn7A+1`r zoT+s`F!W)_L5fzuXQ|$kwFDP*Xl5q(%rWpQ?;EVu*Mbn2?)<0p{AYZno5j(qv6iMd zFk&q=fT(mXUnutac3Qx1|L}16&)mc4=5BEG1$45b{;e0QUEpXO6c?pR>vFpKqeayp zIZj$vX#P&OIq`dC{3$ zpk!*VhT2;utgmMmfD4#;rPbd`BCToN?#HK}*K;oV!S^{hM(X#;RunJTo>NPsy@M0K zkyzD`W7Bsj7qyf22}wxky$n@LC0ugO%I9pt0`c@wkkFUY@GGP`_V)4n?*BQtKN|

KYqYrmFUqml_4RBpz1Za%Y)}&IYG>@aXB~ zO4zF_1`UGS+bx!BiBL?1eAFEknv4VqrTaL8GP$rf7HH#jo15>Q7Zo|2Dm>$#W^hW_ zAKter262=jL_==RF+2iW2NBNzdoh`rzjedS^=xF&lueqfp`k6QiaVSC) zHQ^7*$LX858BLWzZdaFmtA%H9F}KO}Aw??YA=hRgvaGDCq_Oqp+njJ7YFI@5E6-=2 zM=%&=un39G26@Uc-Bn0dEG;e)cggb)tLDV38~lfE$VW3p&vc*ZIqJN%?44z>Mvu_J z2Iz}-`N-RhhN5QNk~W$|xxN*5u!V)S_1ZL-Nt5f+;wM!6SOc9nt0@WZ*;IE z(xn-E_9gr4iT$cvP#*q~kax<{TNY*rl33OfJ&Xlc&5y5IT>_1QL@twZ$6yZ^V!0=8-_{*v z@-J25mbx2Xbqy27Fei$PQtc^Gh(R!-J$}7AJ2yQ7X5>{}f8%bOlBnjI{LUXkFQzf2 z4D)RT)y7Zs{TE-=zcUFMj~Cm0QY-gyz<;oC2Py1axcDWd$is7j6j$o_aE^862;y{p zqR~Vkx0<2KX@&bw8W7?qD^g$~`fWWZ-X3z*7FEz&`%+hl;VFnf?m1xUS-q}m{~CRW zVcsQ9c_6bkllw4<4~9Y#HQ;?K(o2z}S~_lVc4NqS+nr$^7}~$P3vc2m9m};4cr~-Z zn->vZ&DzrA2`4cwtS(#Y-_3P9_|s9D#=hJCMuAVK~>|ok8Z>@_5lG%PtqsAUAY+QT+Y#j5&x=^4=(Rw#QY7p$YGwJoh zcaV2sJT)&As4p%qFT0Lc_M{hkpZe46`zGTg+n|#E9o5Hwm&dq;mzp@ua;x=VcmB=QyjvG#(%=>O?_VU~ zYgu(y_=bV0$M?pf+4o&*)nYT%Myb!GUJ@tXuMbLvu54s*D&mIv;PSxoz=y3@OCiE~ zx@r!w^Ibl7=al@3H%)ET$kl>q`>{MTLqq@a^~au`o@Uo}my7Z#Ou5oYZx{GNYcu%H zgx`Mb^~ZS@MQ3s@lS|~|bs6~afi}DJ;D{_NO18i)^kI)c;xs-!ol7h$8{1ZM9{okv z#96>okmhzc^%eGYHUe(`S+-3%H|HQ-I~$jB?JSP(v9{ud;!v2Y-)C#|G)QvE*9Fy zQ2cG}KtgRL3XXzHob~CjE#UIUp1p31>c=8imdeeeS9ue(3EeG4Lgf?4_lsvLdJ=2m zy@u6e_fw*&fZQc+(CSI=$qW;m!u~lcWmDW!4dnu^MRqDu2*q18Yl(yh!FJ5-d4@MH zzB3h1t+6cbbza`?by8AtKOB|3m~WcuK3S!j4Uk_N70(!SX&R1-;Tn(DO|WWPqI%kd ze?INqFm>t86AR!PXR9Nx+W+?9P~#8sE{n3VZl%2CZk#_D2f_g^v$NZ(6KBM!RQT)a zsH<@+P_5r#`EF;S*#Dy0ciY)pL`>+MPoiZbvc9o}TviiqT|5*v4`_UMHQn`5 z;_}Mei#$D(-%Bq8^Bw2u+NYad0j{}OG6y#rhw3ZgnW3I>@hvDvb%*Ct1o~j=3)Y)@ zDE^v8sQ+k2DmH1M0~T7DKzX@(j%3K(`Hn@~&1U}NdY6y$^l%Dvjzr=ZaaX4LuLJaq zVfR&lC53t}H@n?!ep;4*c(|w?Sa;uEE-SR5f!sU?f0_>n)X}!81kIMw*vA z&*?v3-VjM+eUK?QS`L`W&%3Wb$b7P#ZwVyy9A3ly45~~&XRATIWR8=-r>PZ^;0upi z0l?|TZ_(>Y^le7y)%>!Pv&&mNHH=R54(l^a z>)0jthJx`K0mAj3MV7V74#@lV0Xkai*%7}vnpTGuVCLQIC)|WC1M!M}W2OuB0@x@| zYvzlwI3kmsqCYXsdgi8**%*+`E~pQDv5X*Eh3*LO&*bGg&o=@s=VkaSJp323s_xJ5 zYME^9?eqN}Z)Rs_IrJ<4)CIg%w6Hj+t|G>)FC>Nxj9wFZDS;9u-5ILsU2EqD)w0BG zcYgx}IrTj8p^ewC;(d(Lficv$#8n62y6`XL@~+9{z~#V4Oqg+)nx+2x2dUtSUepOJ zwz?s^7g8mjeZ{eF;gUi3*C*;3dzlB_KpO3MUWTcsa_F1KUA>BC*V%~SeM zIYBq12G5?6RU0(|?b+f%ZZ00))EA0p@pwFBWQ)T4{L))G8vION z4_5FSNA!@SiT1u>QjZ9P=^d)|alZITn1T^cfoQ_ZC*LcE13qMdm-N9cRhQ8I?8L{x zrr*p(QZ-ITLA!;sc1BOa1XABY=bKvR{aUv2^fFI!vo@s6%szj423iyiS_l|0I?lqR z=NU>-PVdl1L_v}py5J32-LGp-E6!e8XXAs)<)S8eC-I0_f26o_SY%4s*YWqWC{s|_ z>TW?TXPGER%Lh0qnWEN=ZS2{85|MSWCN*=V7w_-CZ(q#I$Pfl1<0J$H@$m4v!ckP% ziSthJXV_5(qeRT$g?)v<84J9iP-xM`w?L6b27V62hOe!iEj*LA2*@F?{pR?kn|tRw z-_85p&`xfUy|@8G-uZb%Utdb$YF}aB>MDbgeqwhBOVs-2#@v2WL_`G8pbH5J>KPni zqoYE33Ax5s_Arxeqet|NO|`X6fe+~CTnUCoS-q)+1=HqvGfhK73xj6otHaV#mhN!^ zXqj;$7XHt~_YedE5f2!D#Q%E%NozpQd9rMfzOm3Dp0j9f2>g_Vg@KNaf!Pb(51@C+ zq|sxzFgckWky@atE}&(Z3B|&pG8TDHD4)=Y#AB(*J_vVH@j#b{BP!uiigLrG{v7IdnJ~^r#0XUJAaLZD;jOe$fBP3QGS+hjcn?w>iszqWZcbLWZD;F>FxXG7F=Oc z_*}yKl%a=$%BIkYy6mejU9lDhA7Fm-lPQ^TJyzyekTgInC>T6H?SJs&-udOJymP3P zh+UCEZ?Zi#^><}8?h9S`@jAtuq#uEIMGp@rnrWV~6i}At!R-bLew9h$S_cCi|DY&T z^1fXq%UZ*iEUI5Kc?!yaSNr_Y)gTFWGG{a@eeusY#E#OL|Gw|t&sS=UQMefWQP?CZ zDsQ*&uLFbH9zwfI9c45ng8O?DFd`8fr~cx;?IAzLx`6L5R!)T_(H@=Myng*UB0TJ8 zB4Zelw%(MhUvUuPMRY7p(7y2CAFEnbsE7w}Sk~K(A0F&rqeWs*7``ZwK+$|Qu;E!! z28i97ik5e;cN(pMt=w|5hKYgE_F615JWQP(EBWpd7b)lo4@JTdY}X$O1BjHpnFBlC zp=Tz|K!_a|aU$-E^Sg(mR~Dg4W~3lKKEA__twZi*e;v-}Z&iXQ)^2ZaEhJ7y6lw1+ zPLlSftIY0xFF&Tgw*4(|P{;Ag!gpw3u$0LP;`Vr&0i4@yA$v^)9_r#`$bo*Td>#+0 zH*b3XjAoDJ2uVD)FMD-q7}!8*R1EMKK_os1;xb*diG^5>DJd8>|Fn8O8T=wV8sCE;05^LE9jd;a^ zhW!oF`~SW=N!w^RsGjn9%3yk{w0h#7$dwXn`9q6zU{CET5FxJ(L1$71KrrA%qRdMM zB(d4pP}d_(dw&+P#m($p1a>m812B^`&%4zh0IYP{y=6gdJlc;1u5z*K@b|&z@9H+=g^vf-j}no;`p1b%0_7KJ<9_!jJmQ z?m4`bRpMb#s`cpBxr;D2svAn^lFQRniFX$zGM*1dG$*4$4PCMr1wT$!JZ3?T6aYDQ&;^v(;1$}19VM}Q6 zqu`q7QJ%zMUMnfAv(2er+Tm>m!HYu6~WKz(hL9k(zoypbvaTz-1eYz6H=YcXmFwXDFqd4^Sc9HY<(NCJG`c ziWF%BiVF%>GMAZ>mF_Qwy5BE1xhUzPgQmVL1&m7!wxYhE7jOh{L-3i9s{1fb>eX`4 z(K0g7?&rnsANCSt(8_=smGJ7bA}!V98KCbv>5S4D33!CeZ_VR62)fN+9|5og=D-Q{cNsynjhYBscb@ve*LwTmGX}| z%wNkjdxM8co>yjz%=Z4Ya@YcDbdx)~_Vns`r#1}P(Ap$QC@R87@WgO=0{N$Hw?N-? zuHr#a@OIB9sEKBnnFA9`=lL&!d+c2SSmrhejkS9KQ<`o>PLCPhx7>X7%AgkR7k zLZ`;b+4=6E2K)j)I_W@AzsmFmI4DYg4*w#`-OaG@7ywEeF$SwupBanpbtv=e#j?WT zl>0K)ylb0_UEa?x5)u-Sfww!qbFTrOMZNdVR`vZtY%H1AWeT%_sZU9%^EXUvdOb0} zuE$&Fq85=}k%sd(vPavY<{w|gb+XFqen2Q2$YZ6G*%jwtM1llxqlA6#RMEh{=jQ*? z>Mq;cwAcmZdIUP$XeYzeSIHKw~~Go))p{A>SI$`LNBN+XxZJ?6jFEE{T8$g4Kr^`3M`L% z@4(Zuv!cFrYqQAv=K3OQC7BJViOaNG=$(mO{Kk)tFeYW;C2t)dL?PS@j1NZ6>m)1L zc&DM`bA@OwOSH`+`S&%`J?iB2L|nY}@GTA2`8_9bgM^RVwzt^LPxE!a3_G!RH_`p+IA$LW?;yl*74Md;NsH-$)3amElrH8Cz&6Pp{C- z4DKisnb}Qit#<6~@;6O0`JYYvALUIJ@jA-X^(wUO<*_Eu^*3_!z*$>z&UQ8*T8XXW z91AIZ9UZTRqvJnC-R0kAQ+E-mmYzpRVDY-8o->@Mp{MnCj&Z(`yx%G+g0{mk*-EVG zx$i@f^~f6yCt&XR$4A%{9I@tXYiqlB3Xp58Cw@m#teNTQRTv~h1aL>c(egRBr0(!# z8$pYO{|3)bv6%Yw$rLqFlli6Y(f z&bG{r9}@h8gl;g^7V0ZY^Eshuwn7dN+JZ(W964IE=61$ss9{}7_q}?0F1<0{BHi$N zEP0t2oe17vD5zGPe1r#%*!Y?~u0cBxXx3SMnVK@KFultH)A)M8KlC7dE2~O39yjW> zagoPUI#ZPYU2wqJ`9_ceUxYYbLCR;adBWj-B`I`W`HDxfg_nr_*;91zeSc8&5nh;A zSMbg0R$hpZ&z4xpIaM>b-g7vO2st_HwHbUsWwFKHhS$43m0=!wIBTzQd#M7yajxl@ zTV77ObdrO5`DNb$;4JHURCzd7K|FV7aZAf}n1Ghe#)qxE7w=N_;00B*PVEYUw)Bfn z*XlJ2OPo>=as~-np+?I3=~E)!fc82ke=jR%Eh{HA-BA$VQ8J5cZ8l&hT9>EY`ed=| zmsikS_tj&1frGA*5JI>tgU-9SMVnaRNWf>g*~vatDj2z&OWSnub^YAqJKc38<{y2u7_syOxKPX`KLq>WYc(x@zopkJFb1Uu_%t8sDPM_R1xQGm0l-~a? zHTS}uotO*Y#_&T8q?wrNnl*WlZM)YmGJzf z{9S#(!2S|C3+%QvhnQH3HGtaUvlJ2V&9153>3WK>d!fMtF*No;n(labgu5axJLTDV zVSGkH<-{S6mfi)mrp9bNK(BD*UIVk=q z?b-rcs@GHgk6v|e80mR#B=M?y*s(BZcB8hE8l}?aS>5sHR?5j>pBiQujYKc9aUk5X#xL#3HALP z9Th*tuF$V)TaDw89PzD8c5f+bx0f;_Eocz9;^EG)x!E*v{RVx_>@(it)`Q$;?55tP z6*+C>7Qea|bL-B1o)yOr#78X&d941>dRRX_Jrz6b;P+u+75FEHUA!@8qDcGd&Iii1bSj{1=j657301%3p5%t?Oyw z)GX%Jo+;Mpto;k(Q^@BSX9=2*-|gyosHkv?NG>3Y(h?fm6R&6EZ|A?36)JGGaewSfUjtW$mTqC?mu)~Y5lxdIizZW8jvZLcpSzf9VQ9*8p*9YY<4CAzz-o3uJ^stJ1%!CfO;dj+|K|={h-e9djc;&5Cpwen9-iH z4L_N+{-N!|qpWcOi}1*^=hqnV#(VHmwD`hNZDcdVki4Fx_xb8tradrt$m?0e_^1YJ zbaQP#=uJYGF5U|*=vLkOaz4SJn>2hU)Sw$p`kitB>ix4 z>fr+KF+E+=z(SmQD{R0x;=hXa--Z;a#bp)hEM!Y%)p&egnpC|vvK+vwcWVOyI;+ji zjTciPrK-8N09j61B7*34P)DQ6qWSRCNzIe+o_+%H?Y-qKa2RiTa+U{Bx#cK~q4uC; zJysos+V@3ar{Nt+DFD(AfJI#`ROV!h4=@11fupWiPHr$47i`9AbYU&2wWA8PzR9FZJz*Rl^nWU&Zt^ zLfpL8d^i0c(fBmSsC3>Yy+cu6FcEEA=WKR~{{6aOF9)xw8Kp-jYO0i} z<@+|+rBJMKF9x3hQx)w#J$m@HEP?o++1Y@eAxJH`h5t%Fft0Esj-KvDV*AnSmtwPP zKUUlqyp9NR)wg~Tp?$3{IwL2L*gv~bcUNd=Y{V-TPxHzHoLW;;y*%R;vj=tb%|m;t zkx`|B)b-WFD1nihizwc|@44LZg&1{hBHvItPaULPp-S^ivj+ueZ~%samNLlVBBZ5> z?&mKCdtzn^f;&$^jO)6e;qHGg=9&^-e6$h$vvfe~$r487M4$lfuC-HuS-L=W1jfFc z!-#2xe6HkImAM>c6{V;Z^iz0@r{*fOU?_w=>4zvV5e34D?X&irs<_~u8?3*niINPJ zNHkV#xF{+Q+o6@gjlV$?dPL)5j^w`6fT1U#0}Ei)c*{M)FQ$sd|NKHx?DBkn;kQvE z$DcA(7P{f8|G!AFfklEmNdlYJ*~BWvF=eM;uaPAAHTf0qmMavk{IOy-s+?}`0*`YE zhYTH2d6&cx&b5XN=I33nbxJ*Il{0TnkD26MFCN({tB0gkbRXZ4&=xsPR~r3;q(Y=_ zCw`46r8IaB#C6}_3>__jc=z27ff`@4{df{i}PIDGt9i!hkYrrX`{*1Gu zG_-RcKYSJg5k7yNdZsA!`%ACs6Zk9&`J=PnS$Bm=G`4L9t{g86UQ2qpKGgvBCfFoT1k# z!O2c`39`VyX`5z8rJMHNDwbL8l?MgXbxLkea)~I>QuUJ|@&9cmL@0liruRhDCmk5y zOxl*_FU9BLCqp81GS1Jv>B~z`@0K4AKWCUr_89sA1O=TkHL)ft0II(M)@vKUfzeri zqw~fnJ^$_qY(JXmF)=Wbq9{}F>p|*arzgx!$#Da!0Z?hEmhbJ#{U%6Yqz0j`XPas@ zUFUrCks7R-;y1|0j;*eda}7)#+MsC2w-0d5tAqq4e@&;zqy$@g3Dh6Mt+> zZP2qO6QlFZv7zFL4>RsfUM+y%?0l(sf@{Rh$|aiwvZuCm?|%3K9mb5Gq_2^`?;#-s z@xUd4Gihnd14iJ(vK75>!tKr8s~n+|$TXytU2FM9K`VWNlMw>qg|V}>Rx6Vxa@Lev zp+T4Z&pgWbLqJ?YKBQ{GkE*5Uh&-yZOr~=HAxqz8%+3H=|3Dp%cN|-r2e7 zKMK7XmAaC5QDGRF1iJPEe=BA){|R{x$=mCM8FShcH6JtZvAt9dzn^;cpe*~p{(ZSM zB&huBQ>+I7Y6rk%bro%j1Q-zi7@Z^R;_B?!8>AD-|F}lGbe9AW6C55!Y52Lu#=aqo z(>69Y|MFU@8))h}@O;H&duNuLJ7`to^r;}Y;X(XMZDodXnGRz_;+m?rK*kp>f9CMN zHoc;~=%ML4?YD6u9Mu znW`-H1BK6V8C1#uh0z^Kv)#MyBq!NYlYzRPkG00Z3M$lMKmu(-K3U{%ZZCQcnV@I`KS-F120 z9ZV<*1fbFXo}0Xl&;Lgup=~m*V*UFfSQ6}1=l&O%e;Ts!J32ZFMKk_ABk=LSdc#C5 z37ZdT5>rubcoRW_ff@%IEbMJ7O?Z^%cK=8?hy`SOqP@dKBVl?ALK}(ssg#11jGfNQ zLNSub7R82UEd!#@qN5~%pwO$v$ffL&W6@EDaZ(zn&a4g@+Cm)DykOqBEEFXWN63!u z44Vc|9EI&`HFpVkD@+tYx$%V`_EEI}n1rml+mWrS3sO{qV(1e!$Y-HmJ#vjy;va;f zAszzLU1iRLuZio(n7lwLNfGraPtwDfP~RRX2? z(<>De)+hcQ-QC?a&3=}aJx5mrxVY!ues>{{4xHtCukHIR4gSNhbV60p!mwcF<=La9 z0)E+pIfw4AT>vHJ;F@pi7}=Tf?AYzMzm>T2aT|(3dVe8aa_Kl|tf_0667F!4SUEbv z?VZCVz#=wVL(-_-&c)Aw+`Z>6z8o?!Qr)t;Y2qgFxXB^Lx&nFflI*cLKn2W&1P z#JLjmqxbW&K!}q^E{t-9y(GA%Vfedx^16qrq%}O_^tAM=!kS|$2Q`RsigW(OOZu1# zjGQJP7n9}1;*lm!#l)E}uK=v;+LC;)W3zlOfb7_)op zRYsbTPD)M=%y0tv`Ka-uXW!H4PR8EX_t1-+A7h@zG6CnR1HQSm622 z(Ge{L#WtWX+TGvX+l!B4Ow*FVngiDeY*D`hkWIIH*uk;@g#xT#Yw`@fSZ8|%jBbP- z-Nf1O{{bejKayIXQ8mVoL$`vK>t_#GC9-1WsyiQVt4x|hub1v*6ZJ*6z>kQ0pqAtd zf(pfx_^A zVB8X~Pz;j2A8rVAkxKzF#AdmJT& zHn@yThoe|%zmfSzg19@n8(K!*cxMmt2PT!KO+JuBE6E8`z+~#^q980qUq3STv45*j zp%@$T-Y`-tt4+>~`!rLWr~YU}Su{0u-sE4uFj5hkdNvS1{}FQC ziwSVvwWi>?C}0qwC4|%{nMrhqQ3fH<=Euf(OZ;CapiYo7QZj@jma#Enq1bC%T3no( zT5NpxaNDqzmm^^h2R01AQMnz88wCEVfa5dy^SWE=UL#zPUs;J+@DTINl6we1>5u7d z67Ik2r$t^PtGmr-E+Vb5dEo%cv}BuMAyk^4>v?v1c(X!q*tvC=E#x2(N3_%#boGy6 zb+jB*8Zom1J$H3%1)(pIkb8M%yJFSnU%tXbaJ zP=gtRT3^z>sh-osIhHH+hmbM*^`{s2FGQXQh%}(*i6^l#i{nF46?-Pk70V+%=|Juv z{1`@Q>R(GA^H5|mt$z@GDaM&zM+df$VvvL~qBoZ$hTzisQ!w2owTSxu(%BMhOoND> zX3E$i=%6(sqA#MaP{-m<0n`)j#7R@ugXIBOJG`$`Y8%Tov+)|v1TjVMT7ZAl@DFEw zR0_-!L^wA9r5iD?nON>qCvh4#N5=r$}(g-mY>GE=n!j2z2=1%2MK4! z2<%(Tt^LX{pIgp9Bk0k_y6nuu_dIdnR;-mWQ z-6r4#uB7!Da7-VqtX$~msN8?wJnzyx&qWqrULIFyDHFvCkne$WN&V*JWmviraPZ+` zp#J*`priX%Q{(35yti|RkM^Xknmh1xkZ5ssz?Kw?#O|?0Lp8zwrA{S3K z+u%Pr$P|6Wprv9>)N}Li78jx^Ys@t)$1*WNZ=^T|lYxoZvIl(EHkA<0Sep zO*R3A+jcpI(4OSlzrFEA^bIbW z3c2c!b~6E88nu>EZ-GE|@d(SN536`-r-_$&O-<9lFMGXR9!3$5X=Nsenj7#@HHW_Q zcZFSxfU=JRI0*bXbaT~UVpA-;Z6n+2sIB#w ziu-)jd3Ttumh%uQC4)u($!6B7(az46l#DDri7F!_KR-En#rw4yOKN#}A9~n-opagN zm(N~iGeBX;L$QCj>*V72t){B#+c(Ge@Ar@P5I(tHONgmX7_Ebw+mXXj%ke5OvanSs z(!#|9?j9cA=LsXPI@j3>DLi;gILRso9C+Fmsv8;v+!Q{}AEYy3TL(u_(E=N9ZOp>B z-R|HQ5$q4d$ZK4DuaY#(_V4QPzeua*h4Gw*K#vIq?8Q3-x)Zo?3 zdy1aIL=V9i;e&Bqy&V}o?6}Lzf4m-)Rpf!SSCofdVSUvm>~ordqIGf43G+8tw1?2 z(aXgAzvn11@xbah%i_u*weBLgpgPXCDL*K5p4o0k$uXdSQkXp8f@-Nu$}>I?k5dn* z7d$}Vxf&C(U40f|9oQf`+&YR!2Fri)-JT<2n+_s!+>>~j)b^yko|7iS>aV~g`HmB| zH~KeqB6?|(+DCn`eauDE#nJ49feGy{4r;RcMtX+!+}z~pO!wo(2Kgg-f*g}_DMVWZ z&}XNc1@5d4#Gjp>*%&FOYZrX|52iexWop$!2aVeOgCB=SMp#v|&jIzb>ueqK->Zta za-D}rf+c&1V|J6niIq{hd?7Zl@hRlA(*8|-1qB5GH$6O~8K>;a3X8|OmSX>f>l4Pi zmbyAVqh!iO@|UFr)vu`W^S#X)fxi72?(nJPl)T{A8}ysPDC++nsSS|en;!^>v)wwU z)=mV3g}cuu-vZj{kdP1q^MI0Ut7vc-v!nK5@Me}h@bk6QY%u(S8Gz8^V7$HW4Kf|+cAwe4XKdgHFd5Q-F z@Yi+8^hCD+dNZKIU3KQ}rcO}crO!?WxK1Q2YHjy%9HnachwdU+^s+ka|J`e7`9>-I zQY!3QqYiwzweIf#_2b!65N#Lojfr!-6AzEdOpQr~>1V04{rXy-8zSH?<5e~{UoyK| zc$hNjPKm+8aP7ltZfFoc{~HMg^eU+=7L4-huGO>e@i8JIbzGx>hi779EI~6qq!uMH z737_Z&jnc40L;F#vk%pQoH5@&N2EfgoI);V?S))$kmFuVK=UVqHDFaMO()Hc4pL5;L?E!~0oxDo-Tboe;X;-; zKCtQ&sD!I6x+NVEx82Bxn(l7!!5q-^oy0A z-+uU5>cSzU^k|ShYzeYyhIbOxyck#oZ^rRd%L)jSYh)*I7P+$7laqCPVA#{aK5fT?5Mb8BG#Z{TjoI=I%$m z8AZO2D!|H6cXi!`@R21{o3(GQhf0=s4_>-_qi^vstI6c)UGa{q^T5Rl((6@*+0}EI zclb}0>j5t84}T*G;1X904gzh!XW7}$0iH@fo^bG0`*PK-NFvmdfVo_g+wx;$O~}~R*}oNgwcwQmaMsKCzx7x_Muvy+-9OFE&$kDh z*>x||CJhqAKz~x;U_@qRW$^eb^rmU+w z?+(VZ1OW15rlN(9&;3iD<#qRV%}Sj{dq7)yb+sMVHe$)e(Vy*mHe@wjwBmEp;qdLW zKU>I$cMo7)*PD;Etp}sqcRgO!PY4Ulsx$U`-k$ATulk&P!W91X>zC*E5dc%QscJPJ zNnths+IKl#yM#dAvVQ0s0)a=&LF#)+%HMvKny;ilSE#P8{x{D8+JcEJG~v|hicsfO zB>d5I&H+=7h#R$kjWSk#N;<&*U3VYw+Q+^2TBijGOb|<&i?1tr*7O>k3)l8u^`Zji zOOR1gc2|(IoSK}Kyr@jeb^LViKQ*_=AM8BPBN-Jgp@M^|(&)06eIP`8x&$N+=|se; z77sA4>juJU$x`08{Uuey(y_fOFIB`Q$Zpqv=3w{_^>#dT(~_KBi)Y`KHuLugkiZ_O zUhHFBTcMDNYzxIX*9=}i9Ss0V38vZ~JAGRxElEWDeWyGQfavh4d!2Jakkh391(2z8 z5#{*b?E^eK@{okH!#;ciL6_Z8z``vivM2mtE&z8(61Xq#-=mAke82&gv(#!;Xq5LE z6FN)31RMl#5&w8}-LJ6vpGGAlBz!Fonkmr$=M4Zurcd`L4)*r`n-R1accK_WHM+)M ze207Zfb8C(>+-7TC2>J~i9~pn6_9I=uynRKAE;@6Na!B>Ut29!-nl-&Tw|_rUZ2Nj zRj+&{g@pg=>O%N>&4T#g5D@E-dGpV~;^5;yuS;JWM}J0?igpAD9pWNBqSFoW-g12* zs9s*JrP9Gc-x*oE1c~&g97C8uo*uVnj%-QZ8x;%nF3!%tMAh5=styJs3Jj0(?Nfv& z@4-*^(jjXnFEOuoQF0sy*D$?yi&qc8Cyp)WafSw-ElL76Hxwsct|?LBaK^H-&c8Xv zy?_Jq^14k_5^i{{rOo00Un3N-_tX-FLy7QNsn7juDb^$kxNhgBx4SsLgydFsE%`GfUqEQAqJ32CW99HYh zMgRiV-&u({T?c?flb@dth&j>uX(Jf+h&|0nPB#GEdH*`+Sy6*7rlHJF)5o0$=Q%n2 zer*e;Qz0b`|MYs+Eq80%s(dP#z&kJ;t+ND} zOAeiU@pZ^F02(`z`h31$EX^?bd1Ob&8P-}V#!N8<tcC(;tjrjgWE_7g5u%fq0h;R$B_THdLuB%V`_@v?pH+6-gW>tS3p^=h|kMUSAZ)C z74m7CKLixfg~bM~8@)R|hWz|%ia{00+0Zu~9aG%We_K$^I0OJ%UY+ zoLlVy7tp-se}BLB{FDihIO50xLM*64mhFMnfKtFtL}wQlT*;x)(dAl`feyeKJU?Co z$>d*qt?qidYX*4SfWp;BzsuRurmA}w!uJjEh)8g4RMjDnoB)QZVFA#7$KggbMi$MPFv;^*;Ht~wHBuYzQr?RN8r2} zyTxEtk{^7+-yJMCT=1E+*xU}(+E>Q;4C+jWdP30wO0r16(@d%Am^r7|@6!RYlamvV zR=p5N{lWuqg2E86R;5Pb&{w))z=smY4G6uC1K3|;K@=g&o?9`QF0u)NuP06do_*>U zd`Fm@^?(pJq_XG8D<>z1$NCZIPXj`6EA}G4(G5@TX`dNGW>uq==^6mzsMKweDEQFd zGHFT2%4*;ZYn5%(|Esm~H@3&Z$(2r8W^%Y$zjtu~rkf2Ms+mRrt_2X)8R(fSOG+UB zJIutP3&4wBH&**LRu_9%YT8qi`2T*&dECY9ngE7lo}`mfQdso9{Vuo`A&HC+g**Yp zNb60)`}cD1r4Gb_Sax(af=cn1g*EOul~&PMcnK3Yxtn!({l0%Ew$C`=e#C&T@czXh zL=RX8V;-fQ6k=k|v{?Sy`J`#EY#M|N22uFu0>YobH2sdVQ8bnq@~3X|dZTDuNhe;C z1Zpg?WqS`RtJ0-o#n(>!Sx87ofDsz98Abz%IT|{8VwGCl(FM>w)@4dDeYNGvZ>A0C zfdN?3$7*1Dn<0`?Rz*sxA0S37+g~e}r+QlbXEM%RJv;&~Ho{g`R-#ov zuv1S!@Jh((%ryE`#_r;uTP^x$fyCU@)Lb#^ZKZJ!BGCGX4e42GwqNP6(KOzExByc6 zAj2O}J?-1erOu!sVZRq9fHz4Q2zU;|is%S}v~5H^TX1Bk!~xS#f=~BraYxz6vI#)R zTGtM1mHv?Y6+tT6>AlOjhyAmLox;n)i{w;;za5}2CBp=abWDHqv7VGuPl5t}&}Tst z8eH}GfgG&Q`$ybf)UB8zjhQ6RuP{C5C&3LzNamjs4HWp`)#`oS8NYJ#!YfP0EVBMKnb;F)?v@Ss6eekhuGs=cOT0 zxbB;!>bv*tv{hk}Ql~utQavC2c9OB6wBy;F_oMaWQx|?Cwsq89m;D()PTv)u6=oa_ zXh&T(uRJNp5-n%PV%gi<-mNxwox{#{lpdZi%kqx|pz9f(7jIHW-VA?tD!c`*7s7)E z+J{S5>*_x;bP0m{tF9*#s4Urm1R#{$z!0x3dMN`yONCT`=zy47jCT*vQ~`MOKxm8b z)C>PVGk>K&4WJz%vabMl;VSn=?7`mv7%s>@u#V>iE&^K$J5+w{7UqE-g+^#WU)lDw z{wQK&Y03L^&lu@^UfKM$u(0s8Y4qb&?Eug<;Hp~_3;(Ahv)kMyNvP1qLLG9n+IZ-* z_M~VbJ@CauN^T#>(Z;*}z{lSY45eKmkmJlwzrYB*5oSO81ux=cn5jC>M-FmTv3=nN zK)eW$wgY%w+$$sffWQpV+0qL!*O$t1TKb z?IAc!U!9!$sH@7j{PLfoaUd(@Y8u7goQo(dx)~zYZTP2Hq;q2Lob>r|Adc)gxjKLu zFYYDjJMb;C?^^4lF3{AXx@V5e2Ug;;`aL!+{a#%~p{TI&wr(J<@4LtnfYmJ6QXa;= z5;R}49YhGMp%Um=AD~Zh5hV%2JBbUuCXeL}eq~$7uf-ASGOG$m8!k>wspzUw$zq#~ z+P=ld`5Qo^@T34Q0~iY!17{3xBt#b2cYy+^?q0ooF>|!Ab$#FWMI88reqNW|@yCPt zu57L!vSMu7x2FS2PhBEp&ktlKzAJ9?S#EQ{Q2)#)p#9eDf1A?Q))xPWC!5fDTe;-$ zIIM}WaR0l_9}wJ$BNO>(dUwhx^#}sLpPj}IkrIWI41^3MU3<8S%%1o}90-9xwBz1t zNu#RCqU1jpCi&Rd0#WB&g#i3RL}U%-2)@Vq_qeHe?6Q;OMq9oMD32*q<1Mo}?qSN#cK z0DW+ji{OQE0@Xi`$?!*i7@#+N?C!JB;#8JT3DTcOzPn#qT>SCxsSD$kUyiq*3=fUT zCg&N~{sJBa_ZEBGf`5K~`dH0#`9zSIQnahvd9m}{abrvwPf+Tx4*->Wjt<05 zSt;!pDJmdE0&pUrj#mu^@NO~yp-W&YY$1`LDv?vj5Ps~=5wUGty2AVwE1tKG3G3WL z@mQ<(xrS}|O7N2S1ND|e%hlp_N>|3g964&g=ho+!Yxh?kfHg_DlZlw{+v9?F4jM@8 zDc1*fAb|)dV$oXeE}&r3uO4pK55?tsYo}VI(zeP?q$|OnRGt`lxqri8FiQueA6p

m1_KGX}6TdoJ)({hu88sd*2Cm)Etu9;Q`*&?n3%4%J9NUG-Y1EpmFl*7axE1s$K6|N7w^Al}t19Rrf6F#Dt_WxAQw)lQ&* zJ?T6j2L?w=6j3R_#}=}!M+s3_Vt*yssE0u0SXQ?JC*&c23Ab-Ej=y-AN4t4=sOn1- zxb(l#2AEku#ZEwxv63xV+*~|h$fd)SGGS{)7}a*ronIDi>gL@&>13T9jsOYu1Q~%M zA-iet#!i)0_4l$FW>wQ}H1XfQ*V4TW?#`B)e-#m;Xf-9uy#O2OLH!rS5B`ug;utb& z4(M>Fnn7L038Mz`Yqds!a+I6`uvAAmpjbZX+{2DS?2(QXFwV3dh>k3!rTlHKxA{8`dW8ZH#*9cv0~1ZzFDh9hXZu~tt~A} zs;X97O{B&B^r=absZlAC%8)qw<;|y$*KV(7bDjk)m~hZKo>zScjDYos-vG^&1g6KM zZWK)VXaFjN35>M2w`o-CG&-Id11mSboliWkm~^2ce-gP}c$6w&hk%!z&r@03AI=WD zJbNvgN}?t8)sIGnV%~w@m`!fA^yMbgeOM*|!~bC045tt!7|Fc6g?IRIsS&oNSKvf~ zj7xz`0*5XBYl7?4Q;;a;pe+`%3rs&)a}f;<9+ZJMI!HGhGao2JK3A_WvrKZ=#Au>7aKS=Y5rx_%{<3CLM3>He^ee5BXv`@Q!erOJURr zFu-*jV_(TW{k5*W@e^Mjt93piQ!@*Zv)=_Sdt>*#4gvFviwyCPT>P>LpxpBM@`8@7 zW1h9mO!`R5p}KBEnIErEne*OYXMO@QL~78N>$r+OK8t9eVYPLC{p1J`#)o9bdgC58 zVJ9o6urrTD9@?~TcTP?NT&LefY*b^F%Ov*8cX8n62@>n1{Ibw(*AKu!+!nsn{ ziMXNDcpw69!LJG4wkjLc7!@=y?_M2Kd08a;>!IN8>rjQQUb^*6cDYOHi!Wu{1Uc+5 zAHS@rPdslaqp{X}5{j%MBn0!ORck0Y+OEav&GJbBp90@q>=B=d0P5GGMErat$&26n z0SHpOjo%3j+;4XPrpIkbTU;+e0Bo4i!1K1y!S8-as(>P1hNL1D=%`D6P)86;JK!Qg zvq}F9qK_;qW1yJo7WyEFH8D2wIO{;>b3hi5!(}z~rOdo}iOhQ&-y+MUsmN-GDzZ%Q z`6z(wVF965>*yyZ^*>Ch;kB;Yqhw@ZG#VhJ$bvIawz&6$O0*P)i+Sq z8;ERZIImmKBSI<=7kDw9VnGz+6jov=dbBbqtDA?9N|YZDU9+jmy})oNvMu6|@}I6k ziN(Q2Vp0|A@4zK^afm+mkNgM{IQ3)p_360qVHu(nKTM3CMGXuB*!ttQ|Cko>F(X4W z88}&@wH$Mcn?)dzRuU>qib_)H;xs-P4k2tkg0BHttgEnm)L}SK&4Ju?+mX3sk7J}S zO^;UG9RSSv1%V|ayAOuPUFo&BsQv~d-pF?jawn|bkf8N?^ev#=8rZ7sIM)sz*ebp- zMmJ%49aoSPfU)mNg;B`#I_iU!a@Q31qa^}ADJIYd1+x|1scJ3Ad zjkeuI)^86}BY@OY53Ec-lU+6;uIywz(ji?G|`UlaniP#*?4I`vG?$PgZB^ohQMgN(tf zP!(bLs{(_9@rjf+~37zLUSe zP~4ZrAd@Y?ibjpO@=yynJ4szV!90kh3fRr(+?&WsuG8JV z$D6(2_f`5$md4q-MMR|MZ_p{CO~3M-Xr12O+ms*NSHwo6T72}LMRLjHAn)gsCNR+! zBW-`%c{MDsbSUr%^>f5fyvT7~IZMsLC^BL%$8S;uGC5Q3DHUL!7_bCl@fCIUa_OG$ z?c$q{uW0mc5dN*efN24wzP`Qyp9A{gD|)9oCV{MR)$1sRVtKFKuIB}wq0QxeSB6Lq zqKWh_!>54k0E|LU{d4aQ+EM++fi16xI3+G_?n2>sgruaT=!*x0)@9gp`+omlQ4v6) z8Gx5R0jef5vvWuf{&MtUX^>!u*u76q0U*;)Ek*+9acl1{d!46dw{!8ZDyh#>S=wln zU#50F&3bfQRsw>_v{{%C`H!A3ulC;4;}ieAFV;s-s)m99DcC^p;^)g}4=Rcrx8kU& zU%r(T2)U$&}1f-dwd0xa3xGgIJEif%PFD$-tF7x<#j0oEnnf zvA{;^U#6bf)4>-_6$TyojXdV#CGw2f08+Ez&5js|Mcw1a%)O*p{fm&()Z0QIXHt=( z+*bmEdEZ8C?QXZQ2)n>pphc!v@+X8ukmDV$)$cC^JZ!DFwJ+U?89JK9Wq4fmAZ?vb z%X0{;QxBQ;iZv>0+StuDF+Zv(jtYpIkcg@U0Jr0xG=I{lRtrdP{rLUjdpFVq99AQ| zG|~qWcTY2a#ElJ&0Y5@U_3!H!1pWFiN~epYD6zUGH2*3gXwI)L&aJM(&ToMXHIWlL ztasm)kF&nc*|`pY5C9>CBaAQrNZtL$!OVOd6!zNtcQ?x6;enI0((5oltPhx0aGjmi zERGz+jt7(m6Qm(xua(-UWr4n@-sx#1pe+VWv98#A_@>x2bD6I((O1G&)EJnwl+YoY zfmqSX?vx>K@Za~>3TW+!euv|w1iR>;i0O&TkmeU+Zve6np`nkGv+Uv_fc9xlOclmF&{5M@=vo?$d#y|Ru zhsKBaOg6-TKB^E_9IcQHIY<(SWyxPnQZJpIUC6vHCJ~m)5j%Rb%8EsZN*_1H zt$JU^#PKv=hPECtm1Z_?_n@a_voF}Fx+y4m)o-U7_tROBKqy1Vf0S0$BfQYTiU($( zKwC*F*)PKT;7ky|6K$cyj?^)Y zjl)Q*tmK|6P z)xKh`0;cX$4GEl-peA_%?8`vN*I~j-{Gjmpvl!S4DmSQDj0|{2EJ2wx6$}X$bbh>5%Ietl8yyby41h7`{HSTD%-W8ClcnENkMF z$I-zzo3;JI_PUN!lE7** zmlY?LmQF*hzR#FiN7_P@E#GL?wtlHn3LE{d05ulpQ=c_^sS4%#;mIN9XOkDQ$n~=T zvR6S9Od)B990KW=i9-J5-z7p2+2=-HH)&kIbZnl;u%${;h6+~cKTdKs=MibIAVZuduW(>Rf+fTn@EUt9JkL^tZ7Uiq#i-vOpo(= zD8|2rEi~HusoRx{1ewmhwI7Gp)9eZ`Gvt2=P@4W6C%ihjh+q68UjMC;+|Ep-AQJoC zzQ8^^4dkFq_r80{;oJG?xJFln&J=oamdjA#dwd0Yd7kNzY)XJk!G(3!ID5QY<#TOKfYIIHZsxhGTR9eM6q7&G+If~?|34h0^f~JgWy0QS&4UQWwTl8T=qQ; zfX>TD0Wn+4N(_ps&^sFs+toG`5ApXFyCG;bNjoX$KNeOQxb>i z*vzp$-@kfx5I+|c8`p8%cMDA{BBmatzrUD>Hw$;I^13ffHF!3BuitE1BT!GH@4cmf zpZy`;O0%@6*uH`mvF}mHN7(Bm=lSOPAS4=J=U-WBvihEl-je#fNwXl%|62m`ub!0 z4KBBg)pn(aaI;?->hja3&KP$;x?XBJw9rk5u)!mPO%$mRmR9{>PzxYNtFIkLSth}w zP-aY_G3;j5+b&4H-&JB!X2a#y`zQSW`x2!^f0d&t8fw5h{$kCaa3b|D z&vc&e4s#MV%_zG5{&C}y*C_MUH2QUXLGTuRhlENrP5BrDz4<4~HwbL-JqgBKOPS#K zIc7$0YN$*mIi>2!&@$I-udR8NIg^*AcrX%-j0S$yXIZy+gxY7UOr4;^ zQTvr3+B{>nNGG~11X?u``_Q`gaT`;bYOy`WztU{_EKZp4q>*i=ZRc@^?BtYvC_$qZ zz|iqoq1?hzj%8%Ea8d+t`KSRSC=(8zC&#!xi^y#8>h;V4bDMoG{XR@+)2fo{-xI!u zE|v>>?R7o-UKNlst2rf4q^0k>F>p=IPpYgfckUnWvByJ%1G={baXHsXuoevK7D$35 zN=zP693&CmNq?+mCUL=u%i-VxF$&yYdsjt8K;IWDpbU`1E?=MSW|Im+<_p0e2_h`zdQIPjsn2`25o^+6NhDtE$nQPzc zV>h`vcU2`53{RdO?mDN*O9+qf7~MZKJmezw^9r%BcD<8_F1)-MXX;1ajm;r5 zZDgUJZ9cu_&wlpDuqu6G;RQ7n<+Rj~>E?m&SP=+Jel}>)*)wJOobsXfKL-HNiD>K= zKjf*r3?p%Cd~`R{Bi@Bt6#Hx1rxKZtJecZlvqYsGJ%GV)E^L=nv09e+KTVT$~6_IXO`m=4zP(`sUSr) zdX)BUy)le1FURSFbg^pO!`0J+I39rN?uO*>ts(gkRLsc~+Yjid`*#cfT6G$c(U=XyZO_3uwxz5sg_LqUlL zBR{cn$1Qt;L19^kUzgeb>l49Veg#x`kR+nv?H>jTL{t!3&j&4b9PJ)^|73pnK@bvo za-RBrhY56Hb1apgM!PO$2%zK%%1 z@#jfc_QuMw3Dt2}lhl%oyEoI2T~K85wV&zO=4Z+-Qp5op4UIyHH0WfuJ7>l_bC(uZ z%29b1A*ZYFDa`2T9fw5keWQ(q8|3H^sNOEmdll9D2qIYLTHkhHRv2`BYZ@#Mmp99s zAUZxGS`cJgonD<>;A|^l*+%3broNfIuN;d}3x} zjIf0od$TCjJhc#)0)2hbV^Z;H!PG3G$RY)dq_fcr8H0hzrfdlezc(@O=k0#AIGO4H ziqWVl`^7o8#Erc&&9mrpWve@}U6${xkEfdQe`lOfwXGOko?TuV8NR2YVWzo)JJ=dp z{^zbB0qE3%M49Z}U5|Y2M&{?LZx<30a!8?`EKng)_8`$A+kDPU-Bv*HsX*^5ol6d} z8V?=WHU-B8Ye(@H^aGZXl8P$&xR%L5v(nu^(mkPYsk#HG81$T}Z}~v0-wma3WLzCS ztZdF=Vq9YmtRBV1U{XT@e^)7wZMu;yG}N{IEWm2G$?R_xwKsQ$w>5J+FOND}7nyy;qF+Mx#6P6&7mLC0;kLsY*EVlS zjZbUiI`1m;6$YE`QjqD%3wro--wIFMD^Ikg%$I~7KVC^6O_bVk&( zmIy(RGOwugIgzv4OXloa6WvYmPqb1hCRz7Q4(0T*hzRJ6c1ezKA=p$I^8K5wNu_jX zjZ#;gcaP(wZDNq1z#8Xi@!A*C6dbjlwv^eDKUqCV%a%ws9DfeASfSy_;8L6%VwB@K_G9&{g?!wbvnb&qp_@OH z=S_#ngH7{Ke{1#ROB5Bp|IZI-Iv3Bw4dQEdERcxre>&O5n((!5HXR=ur=fktsfbqv_kG_kR*l1drZc z;E3Y*Z%eVHjzaki zaV)KW|BAKmtut4o7E7~s;pbAHR*U_@$y$0Wr&iR$wNvLnisdThDn-bLZ>N#gkHc*c z?L{-01s-8@9;#sbvVd!nfB)Ms!zhc&vw3+Ke$2nKVO+Ztv{Zn zi?ZG$G8|d%5l@^g78e43uVujx=-}O57x8t6vy7jpk{hL0c9#5pU#n#z_=LAffQFb5 z@GkgAjl|L13+iBwPz7%?@{c;UL|E1F`H>*%oRTcE@<1_Ar@(m<0~$Maft{{_kKa6W zNFtVsf|6Uv`5=Z&$Uyfh)Jz?YxL~ePRpWzJiKlOj4VkYeunwx(T6;!Qr^~sw?UVHQ zYvM72=?>|uV7C+BW1EBd})drhUd+C)P?l`iQF{c@($1lzT0juMvtK%A>j%h2a zXy@sq81Bud7cflgV-SzAN_&J}wf(V48?hC`RhZW2pAEN4TP>Qsu1GUbJu0AsUG7!X;k=e>hNs4wNATvk#vou zAdJ90lLk&Rns}A2i7we@TqVUWOH+PAEE&j`d^ToA0|DL}(=&`4;n(SwAnN ztfXyh!<9F}c%$7v+I*b)yTh>!JjHjqqB@O59C$iay8Ph^Q>JXiosgCi2%|bqb`}w@ z-+FTDsx?{qZu-)RdiO9*g6gBR$F~PL@pY)rL6qR|k+-U5yBLCU(Jh*of3tqxcjhX7 z1l+OrCu@!^&wYNAYWCj_e*_)(sqYkGvd|+@x2^18+}J9{D7n}#j_7Kv*5@)A7VCj= z%#Cfi(l^S0@7KCwpBf22ky9xHR+a;|iama^$%zdQwsub~{$9O=^SVCprfj$JS4+j5 zcq{#vU~WFabdLA~_u`SA(>8kiGhLzotZZKn_LCp9=d5NXaakpX2;a3DJqxW2{r!8f z)^4dwtDo28MYt$D;(4O$ek(6CoT-ExAD*R7_sqq(=fg`RXK*waBK0EvRsQP54K+9f zls9N*sov%qts1OhZroCQ{44%zl-{?CSCrVfSB@WyzW3UXHNK2jDWc<7RU?0)nrI>} zZu06>#Q_d$p>|v~EUpn#i3aKCIbTK6htZhJrsI{n@z?_W2$H0?tU9YIbTsYP$HH7( z+${kkhqLNf*Na{|{=``GI_+qvAhVS2y^vU;NprSL`Q^YL#G9&Xm zgy7R;FTzXEjH9&wFncZBkf(S-Iv9%)OS+$NB;0a*T-F1h*a4|tO zJ=#Q42n6aKMu_=~Xmp23zraMAwxlPz-0M7sKenyE=5BDj&4-p8$iG8|wle%@#QNK@ zRn^`s)D$Ka9_V7bdRaJF6ACv%e(odVE6)i-G3@;i34h9G+d(vo;&(Yqj8vjQU{K$o zzQsIav2isGN4vZY3H$rWpuiCFWl!$F2Z1jpDs8TVXcVCD!}4yg$%OpAx44#xI8&%u z>rU02uFkcA^2n9JEWA0)8?xqNp0`~%C)uB3=|a{wgUL|gBFA)<7Z;VgE=3(ihc7aJ zy*?JHT{%?#8U99s?#ScbncgJQg#`pQGwkP&gRMS|UlnATsGX-YRp~cdJH1yWym6{4 zK35y~DQbK-3S(ZP+))16oG&aN?zRv}36F-Ow;lKVKoc$oDQk(+wEeax0c3${zD!sd zzXczLqa0?FQFb*$#c^N0*QkWS4t%ym_XeT*tny6`*X?e`fH1H$jd9N|y^W=vbp`WF z{!j=G!RF}FZmJCy2uI9u{Cw=JsvL|qjx_#6gO=MQDto+me{w#+?fUnX{c)X+$~>+XtD<~~z4GeD5?MJdeJib!_}X}=Dbfs5-R4Jj3oyKk1J564w-ULT4?EQb8>tD zj|`emLtpGV?}qLupn~=^fQ6i<9MFnx3%pwV6vNoy$Ye~RRURTgzxmVd)8F5J$dHx5 zWX2Y0h0o(PnjUxX?|o3}341qll?enIPsTJ2mg-JCXJ#HkXMyQ~-h_23p1~rb9OLxV-3n}wk+#&^3|Wh#g-ok6^a(JICA6Z%Yc@T3 zfZr$VGY#^K5#EbA7uL6G_t{$0%Gg!<-X{aNE3k(RfqO-aiyYE}ZEy&~+psRjr^PtI z+tDo>yg6jsADd!SFOYbmZD|sB7b=W87`2#Ntbix8Wy#>Ij5_SjuNlOBO7)iB=B>5uaroa_4E0+@jCgm}K%Zq-XRf&W>uAF)r6!LD#NWN5H%ERh8uz1%2oA70AC1QiT45 ziuO{e7QtR{&)&O%#*xs-M|Z3BE+U>cY_$E#JS+v{cgm(UwKN0*-o00bV%+z>f2$b0 zqN$?!Zj8hwxx)8`6L*B{m3;ke!o@xopKFMujw_s{ zQ`C3VS=L7!kpI>GcBtuTqgIQ5Ab#InGq>bhUTbMS-Fm~MZ}iM?dgrj2Qov;MSp+XX ze0&*=WZJFQtpxaMS@GA)eb#5#aB=-I9#eHG`a-ys6KFfd40CqnetT~OU&u6%S!-Es zTV+Uon31Ba*#5FWM0>Ge*Vf|s&o5Vg+AsTx9!6gJcKUAGAAU-P4#gWPvRbx0Ls{o5 zeHo;)Iuq^!r9h$4RdWAbQpbTR-Ahr)j&ZGVpEvM5lgFXF)}Lg&IXZ3mX)I6W^#_< zR;734eI*Q1E-GR8^sw!YLo%am)lpre111jv(m;qr{2D7oB(t(k*QmjA=6D(jv}}o# zB~_f((m@YWVznV!l5b7^^Mmr-0kbupC_RPztXncXm?99;w@YuOr=n~}2@eke{%^ne zZEuhS2A7sf7njNyQo=+06!a5cbIke=($aJ?I=fv#gS9|5^=TXk3F-&is^Gdiv+jfx zc^V5H_|p6`fKb!0go75&$*$8qiAQIn|6%5bWT!0MIQ0au8KBH;6gb;ZTTy^eybVl8JXl6H7NxqeP*tGpLjvNGuGPedt zv&25Tx=foKyR3wWN+Jc}%YRo9wEjuOK)p>@S=9Zwr$`1d-9oAB{BE~PoX8{zp0ZW0 z0&RTkFo*CRh>Pr2V$^K3Q|&C;4C6y&i1Ntq9H4rvSlKU`yL1-Q2S<~`QLTeVA&4R<1>q12HOWap>^Kn3-X4+!FeJx{9Y)`}n;bgi9 z3)GWhiO$-+6(~p|M-#p$=A4(4$c>yPcJ!L)z;PbON?X}*RzV|2+{43PDs-Fv(5Y-HmqVuH)^c(%Z|WS)!H&gbxNO z@BXyK%%yj$DxZFBE=SWtZz@A&~|mzDNI z4?_*m+EXi@KRLU@!U}|>W1<*1g2F?}-iF0YLUXh=KlcB>O7yO#)#b4KV_GglqWTH8 z=Tm?tG(-u!Tettpu9_-FMUEvF`tcmsJn{wk?&z{9gAaqnWu6SD7@U?cx;n++t&AX+ zI>(B;ZgLTR8Ix#uWS|Rl^*tCusF=L!HzsEP&8D=h9IFSr$9vu%k*B(WxVv>-(-#$I z8JmQqq#)thW&}HRyYR1Aoy z6o#k!rbS~i*I)t^5Lz^ZLqLES6`-dMI@!=ciNb8Z+m|~o9MHK?Or3{$^;velD3kNJ zinw~$?zwt}kV3}upR1U9c>|i!On$Gq!us5%T{~R}b!h_+i zFGJ$7VvztM)bu;}_w9T;uhq*5O;yQ6-GYc2M+Tr@Hp_Rs!*GtBKW^GN{Eqg&rvd(< zKh7Lg?!1DOuAev4a5EP6j`h}!P_4rUl7LyBumHMC=D)sc%T%UtURV}oa0$-X0;Rwo zEnBN;N(U(`L$(%1jX3Acrbk?LORI3YkrGQ;UB2+dubHEEhmOVpEZqN|;||o$?W8Uz zn{W`hLg=`V2qnQUsmX{5877%+$Al^{3#B%ACu8*6oWzh*(9z`&?%X9p6?;iQqsrun z1nbEM3<>Y?#wkU?yX7r9bTq0^Et>h`pEj<#t@)q9H4DmYp%2DdKj3=;$%6xl(}gqN zR#aFGos$2*&ri7McC*XLZqFVxZH1r}@O*1V$JcMw>B$Aerybqz9vcM` zN6kC{oaJ=;B3li{i=7yV95OCty1(q<-|ei9h9vR&J&lq@>&`hZ&H*3UR#vGoxw!|6 zeFZ5b{sVmEIP}uBfJEXGp|WhiR+q@3%j82EIK!%bo;fAsf+E zn^ZA2%^pu{uS+3_r2>`Q@8-+G^G7!1a0p3H-kWRthr(b(W;hA4BkxuP#I_rbEfw70e0blXLFe$MFGD;FswcN3 zVO$}laO#oyzsJ6B9~zwT*@!^XTWs{`cJ9Q_X(Imlq}uVa^qd` z#6u(F=^6t+J=KlA7cumzo-XF+lHmS(38(bqel0gfX*T2VgFwmz`=+i9)yMKmOeXz{ z2De=Sxb!!@Iocm~q2i{=UL(3E3rW#CY|3*)HA7x8|JaKQaDCj2Ccgi09p@q*L3_Sz z6e8R*pXZ;LxJ!-FSTRF{gAaj+e7nPW^zA|?Cjm|1!-@8Us%y2FPWKyNhFZgs8(g`X z_dJbS)>>J|m*M>)-C2-V;$N~c3I?(LSRj17HKZF0Ggfb$+b@~A1jEr%LPzp-z#vh& z!jPZD1gYA+f}CtuGY7YClV8gBYra(*4EjI&zbvW#vzqg?Vz8zx1qbnB!ObBMJRiS) zy0lqaOxRg~AyjugTpjExRbCy;>&?w-bY5+BwQpJ>k|$ZEGc0KUkJqMeub|gBDhm$! zM2gS{*DOks8Lln~|JObAmWI=PD)V^x?e}w4_^53NXtT%bfh0!8^wy8qp9QB(3MUC3 z)V^E}CRbCZOlP#6K2%u{?mgI=qkE?q4pKve6D_LAUD4Qbg_dyin#s4fzJQQN?a{K-;HQ;DZX78%wc_AKqsFS(<%KvVo zZ?y)2jmP!w&l6P^Jz|IFLdW?6%%bb@u{riMe@98={0E@>t73t9Bs`h8V|bm+qTSxZ z=WIC=|GHP~0F70zv#fK_O_(bxVmf0@STG&Y@M(BX{e8OcTjanV`IniiUALD!JPrc! zYR6`kHUQ%C{JGHW^wsMc@y*M{n*_$E`!-YK_89%}HBAicJj&~p8tU?uz0 zXC2t|n{tqGpDUD8%^+Uxu@N(K`5f7&kW=gr7!DI%36zw4yN(KlqN@!D6j+ArSGKmP z4waXFyF&Ye4jXRWi7)-wyD{JM3tf2dD22kM2H%RnLBqaV9}oQ|lGa56@4F)xNetQ9CEK@c`52 zjto#{zb`PXzWL?gYj-J!7AT0nbeSdc{MdDOvbrrlJO3)|s^hNmS@-dW!{e4_KwbZk z`D+oUqe_w6VUJDFOQokbe19$tZtv^CAjgum)~oo&LMi(bkIt)Pkw<$2*aI=YljS_) z^JSd(uc0`QD0u(==VRC&Rl6M`R)A4RVJq_-vf$q1WxI7_@*ob9cm97J1YO_B0?Pq zl2;EdV_dJ}F?SIvGW$jPrc6atG^3P?O*Ig=npEcve7Go1IHG;SpOcKRrGxik>R8V_ zAJENlZPT^*~=-wp&MPpvZea&Te-H3Y? z_Pi!yxBtHVk%F|CRv**f*DS5)XJ7kI#uPt$ll_go2{G&SpWWhHm~RCh`QbpL?k6iv ztl5a50I$c$$X4SP_Cc?jL=Bn-^ltk*tDHq)fPqGX|$Om;->%7x{dNdab)A3D+YeERY|BWUvOFgrd{(hPrKN?)14^>gmmbC7VsEg z@U8lLx93~;-K-P8V`2w$Xn6@yQE@%pKSAB2UH6sdELUYK^`GU*pnCCF#5(0&UB`d2 zn^p%jnpS_UvXoXc2<*c+!xY~D=NaoVdtwK4&KD-J zDzN5hzw`wzH-#nOrdZ@M{Z8w?ifQfAa!A|BsOMqJ{EvHf%J75ieKIskHr#~h#= zBpsD;xZ-~7`mANfyWVujdMp4PC*tYS<$HXta=o-Jk_m)}(QO2WP@xN-tvlTMz6qg( z3kLIOfZdvQMW4D}tlw>w-KI(*%YeWTxOr&BaHyxz1S-7abN=TciN(C~sDtut@c)n~ z;nANQV~o#g&FW0LbL$%PAfvY#-l0Ag`I^hur}qe;K=EojVJ-^D*vx|8{X~-zWuV66 zpHCx^IPl~}inan~@F2_3yj)}I{sG_*ElrVg_Zf$}CS2w3-}~J|M}6(r%(~4jKgK~| zbv1RPkl#BB=b0c-DeQRdzSrO$Iw5%bgI4&S@pfLrM{PVbPg{mWo;x61dvxeF z$s6f=4Sh3S8@X3po7Q>!lTgHik+V+(wXI}oD~zBQ3Ck`gper%o^QlAQ!0XPKIPk9J^8z#gGxf7z>OWJqsT$wV$Y zMEtK~ouw$_Ygt$bcNys&4SZk7?K~Phk;OGWpC8&0Ql&=kB-9SoZgg(7e|!+h)wI3q zad>zrKCxo1WmFR$_-iIB_S$;t>a%gx`Zq3Xo|5Fk9p zpXXWc`M_Fy7}m^i&i?PX_V2P#xhM-_0zc#8j?FU2?`N))6R7YUh`aM`wUu02=ltp_ zZzzpJ#IL~p5sXTM#uT`x(}MX73yMW`NO2uw{N%=L=O{LRqJH(aK0`^6`TriToMsgd z3d~RCf!{jY^LeY7E(8x;^$a?wtab1H!DgOT4}J^WB9fKM=Vk#DH%8FUl@?=9pv5`X zGEDxCg#)YT$lJg|ns#HWCkBj_tlnnMjnIC=H@+E)+oEo6Uhn_6bG`HLSYsXq^wr=f z{pr4`Fy>)RyX_X~PS_av=Rco)DnY-#TysS8L)|*L)BfEcBVTE@_QQoX)X%%?OI?>o zmvHm@cILbiYELu*8632}dJDza)Ub=202PV$mlHAvN>`0F+0*XC68^#-1MMkMjq42o zcqy-^vRJq!*pma~aU?)h5r+xg!!dGV{b}L}zVy>C*UIwN4F@LCo=dG{IM-(l&t;hU z-yHx+q_V5-M*Cc=^_z#|2}?ny|Mw+rgq0-+cmit4gw7tt3)7j`M-euM)0=yGSqm*t z0oUCI?In|Ox{v5zOAk*lh%lz+te*K)Og?M*1oAvse#lB=J9M z#O1%Z-lHRdTYU0lU)zVZ?7KcIX!fDM$tSpbK;xjqF5(AX&rxYwihx<{IGmEB!Pls* z{0C^|`At!E*dMg7T#~GGyk~`}C@5~0Tw*Kx%W4qEmt6SUBK(fW-|W@UKsdzrq-Wx? zIG$q8X)(EP0_O~A-_U+#LW@?uJP#p7oqeU7Z;rB6E%%5+T}zkdZ$0OXs13m zlZ9w*Zv87j@H0>Hf2GUqPsyyPOZ&%Z@rRil7ZAt?IX}hn`GG$+;bUTMsPs#{60FzR zTt3-P0Z$!;pmxrqJZv`S%qE+{z-gcKfl)$kG7!p%f{Gfdh@6`}Zw@EetPn4=*$8Lknow5+4mNr~005w7qJn-0YxF4qRu zZC%Gr1xLsb&mS9hNCtjHx}**T8vzO~F0I{L+ycU118BnDCX;2d6zs$nuD6eZb}0cT z`wLO|tIB2i*UJmh=h4bfy=Eo$eChwMZu}lBCxzX5o&DrCDEkZ~ZQE=&Q;V>=?S&Ab zFj<_Xnt8Ae!fP+6Z-h=MO(8r;k_ghn_YvTNcR{ z9y`VTp^wJ*6CcHT0m)3;MO1+I=Jk73ikH+ki1LRM_)LGgW+k#>&;ZK6-%S}1JLrso{(}S@sf|_R=XTMY?QOEj;s5Ym ze9`^h^@`2n99^8|Ka=(kV1Q*{tk;}lWx}nFkJ9%l2p3JBB##gHa$z`p>~0~}O!gp; zUwrZh2?8V|YB0LpAAD%9f8zhefAf_FFR*(!`6NF94@Hs&l#3%FNDH>c{fjn+Ih@n~ zj6~8Lw_0)o_$?Msq^tC=g(d3{&Ks*V(s*)%6bd#VAcJWnaG2;5R2>1`67@b(@sAE? z+n;9n83$L=E+;CX>XOp!y6gsiyOgJvz3cgcQ^Jc)E;{}HbN3&&Pi_QKKkfxWpT5px z?rHl!YWsUg=WlO?$KpvMM(nh-eYZzCMn8c-HK&hLEOd?soenz4g6yY}n2ejIx-Y4* zYynR%$&K|Ib2|`F^QXC0G|+|AmiX=5Yx-I{krup`)l}oBVY{}Q(nrtJN#G&M!&qMY z+c@Y*FC4Qk>Sjsu)pIXh%R!Iip9L;}EBoEF$QE5P53E;|8=vRViTccYQ|r%7=-@r} zlKV`Ll@znTGFtwA*b|{Ou(ONkK%VkWI|(hg%=f$5rO5=UO1{X~cps{O8_vKZF@hqj zPC>LIWc!0JFdU?UR`iNBF#KEi8v;j8N6rux$rMRaT8UYE7f@FDpTPzR+F%LxdK<`) zWJFL;2N&sf_<&+O0OxnyP5Pt|ff?_jr1(-H9`Zp!1ud8UT47vEiCewGX}dqm5U}1I zU+a0fw`)hZIdAoEM`JDB>ELDke)fEY=U(My^y&adnLhsc-G=A?DOSXqv2)Xgh?&75 zFF)^UiJjdjmpfq=*U{{K6)GVTZT%UO+4nt+$pm&>F+kiymOTkHZTc9f7(WaoMm=5c9=exK1zd*6X5X&|Jl#<{JfuIJ zOkP-S5B%6gd{@z0{%bOD+}C=MZ7lA!dw#B==KR63Em1~Fo&-g@GZqIW_!mw4X}yY2&wO883FXn_GuYF{d$B(L;dgg#o&M_u4&odUDIzM zeh--cYX0Abh6J0@?GDG2qu)gRMrCwSqp%K17(tST27wRz$lm0Tme}dbHe+EniRvls zS&jzR>Cf6?0G=~7xwf6KsO-|{T<-IEr_WPg`Tb)q71}PFje#n@FBI#~RRnD_sUy`n&<%-pJFntFqtfl+ zS*@hsnJN`l7vvmfy}sh%;~%p~$l4{pY2!HXglO*N?yi;l?j=giU*}^GaWgQY{qL*( zM2&9y0ddN>mt?$c7DVJ|@Tjw%=>PdOZ<%+a%T!{PGwSO-F?hw{u5vnzPNi+XHBEXC zBKR^8XOKtHBEiTlNcm+_z?!SJm|G{0wl4fM?v}p5R0ri5*dn~3SV2aE{!^S7&r*E7 z?Wf-Wxs>fO&#LZFVCpO%Bl*a2?{P4o_4LB{Uh1Q+d35&sAD-;i?uW*srRMIdZ3fry z^vK?zngy z)r_G3%h7Z0udS=c2ubK$38NTJa(sO+SvLdYI1XJj*$Y#(rV?D!~G1_@4Xo(?JZ zJal(56*g$$a4b0CRb*lP&($2ETq?zIl|1G!=LNfK3)p5f8<;%0?<*;>>&(`l$=y3G z_}s|n%{fPtY*Y+V0n?(rfdH96mE!+-*Fa#tNn+ev$noAvR2G%w(Ce?hri_jnUS z+t==_qr9#5>PoE9L)W#`K4amYXG(CH8^Ktj4G;$)_$F0dFH|4JEYAq>^(<4l5eoD347E3Yqu#iL%eRi-znfM|BiS%zy^L=$~fP4 zxd`mx6yMCwKZo#LIY?Q4@K5EDYVLxNzsvuTBKOw2u?d?}pdw%yKsmOWGYsJ>3^i7i zRRsSC#I1u{ze9oX5ld0pOJdUfr71p4$a5`6Dy z@~lXogsc8IM*9<@Wh17JnaMX_g3lnQk){6|h}hGyZ~WiLG#4B@B*7iPAN3WS6%()Tp6y?s)xZ{biwa?d;59;?(^BiOL3tJ zD2Lh>dl{JBXsz7Q84WW9HuSyvWqL%qjHNB4vE1%-|K#I$EJ{IVpn({KEqdC2`JCOze=6{$E4*bL8eFzl0iu08|HOYDKeuEraY&5; z)?K#$*5lJncKXwfkCSVlxYsgXVcApHkZ@?hTEO|-ciZrfRph_18)J*FvmZ>(ptV%K zkj>_7e`kxnmD#7;%RQsQ10WEzy@D;hV4@Ar-PoSTm#mtzXU=TJUxNkX7|sAT=0?}! zF??;8^PW6f&0ju_bWxwp@XY%@gV_e-f!?@ka)e5N;XLVNlLk~!Rj*%V4bY*Xb=j+y zi`^2W`DujlBg9k{*DTVNo%}y~0`DjB#dowPNMu;p3%cZ?F?p9JL|VZBOy&Q4tn96J zErO7?%elM9?Yiabyd#HN){}gUr00m}^2e1Cj;9Cg#~oMfQroiJ(;Ym1O|B>Hb zVkx6gz*?(?_*MUIX`k@Z#$7rB>!4h$&c;Zw!9H7f<3%6Rtgz#>z?|Nek~iG(U0%)!C*z+fdwVcm4+F*FrYz+j3d^KbmUj%#9ZX zDP3L+RrU7vP9Xo{d{f(X6JlXzF|Ybh_<4))TbglFJn*=CdOQ!pT^@C^7n|k9d${-e z+;&rveOsvQ;lCc$0!(^ZD{OAoOSa~CH7~>6wz*kbFLe6&muAmyb82Oid7tLh$qVp{ zpUx(t1N_~`7?U)f*6_)L#DKQxC>~ZwctixRVl`_#P6}|PrEw{qdGu(Wm6J!vEPGx| zaThM^Yeo^0Q@=B)3bi5QkJ7*EejN--9r14=cY*&7C$moJQ>5b+o2>QOt1UfBNK9;4 zbWE-Co)I7xk?c-gYpAj+pEAGk$V6t zRe?R3_mw&rOJawWRJX+v6*CYp89J~ILTj;mJZ!Eb|TOS`& zfhTMB;BqYvk5n6&uK_`N>`s)No&^VvV^M(6*b? z@`r_&%U;|6oJ3uZ*5K9OW2ajeV|0eudl#)9}0u{Jv^X3JdBKtC>5U7vN{6*dULMK zOiJN8hnf_%<}ZGpk7_-R>kW&gzNC^b)S1kJL6{Ngy$CW?+@#_i?_aI?Yps2<$`oR5 zEA12cpCjbu4cL<{()e$JS(UT*!VQ{beMV)YgK@&7zIb~JWh%WKwOBSl=3#<(6kUB5 z{R7$$ot;M;ED*j&8$(iR@``YU?)CBFVgtSiNW&Qm6pgmqvfAsxKYOOcmaL9-^yal& zFwUDyrK^jJmYc0~fGcNcXh=ubRjeV;cdBTlX~3?n4meP|4t;xmmY%K;6grOnFfm#w zFz}y>rWQ^bF$dE|s?`l?YG}Ou@AM+h?W*CmrhkS~pX>~qBm3)zsj~(DQ>WH|r*DnM zeh@^!{XAn`_WmP~kx}0T1?VprT=ZaQv;P4Ai^uO2mICB}x9H58Brw5yjVS$YB_gOf!_wiWR>E2|fQaV3S{3ca)hSfTOe+0^qJ*KT?MD#Aw&E35?oJ)vj zasiGX(X)8C5U;E74#3{`#ZVj^?Xv1Na4uH$Z*kz%c}xgdNSUkoAx>JC?9-~h4Su`z zPFHNu=N5QgN6^H*SXJcNahem#f_3#04}Ah{lk5GaUTYvh!>+4u#HV<2{#JXx}5TViKRuCE6;VEGwwpcpuX z#bsq>%>aP}fYPR9Wbh~gWT*!A<j5NbBvs*M50Y`}r|HGgW2 zJQFXZp|!QO8t*xL;t5*}KUVzHl%?DP9FPT;{)OLCEefvn@=4nanIzHf` z#n{IdQ4sPvort%&;;I|EBzUeKaMj|KD9y6VlBL^ZVx(DFHpqN5;a~=FfkCfjn_`Yc z;m~^5U+wGnJ$*9)5Cn=;Wnt;|)#Kj9xX#+p701Kk{8sn`-kbrH6##$B%af;(EhyF5 zz>>Hlvknh|bgZhX>M8&xKpX5TCdfICn4(W1v)9{UWz7kH!+NIGf`TJSi!XXF^K@B? ze;`V9)~I*rnlyvttJhzrWpZec`5zn=+n6#<_ym^2-0>?z*LpQuo`Dm_Yuf0`N z)X)%_5u&jA14IyP|HxwS31~gk-97OC;|Ru)R)vJR ztX-cN3VTp2wFOu^x95h3{Ez@y6zvQ?n0&DHg;;<2GIMz#-P7|~`;S43qoJ8u-q0pj zWJ8j#px^Cz1V`&}FS%2stl3idd{wWO)2EPXnp{D{swgLP{>c4v4T!Hftk7)ZI?(U# z(xQ4!f#I&7E@n{ylz?|s0 zOsn@`1FN23 zEstP)Ej!AIQ0scnz{n6PE3@DFtGv<3)eGcgnBVDp(RF@28+d@yvzG;Aat?xi^U6j4{2j+N&|(=lY7G+OjhDrRK&^^z1Qy{lVi zz8{stRj<)+C?d#_2$PFa#C`>~`nW2pJpISU@*950Xju-*jkR^_J@p5vW=~#1$$@=a zZSHist#ueNrIkOoJjli4Gs;)zJ)(gICAoL*GU4`A#;ihIoPwrl1qC*)N5oPY87Tr@ zryJ1@`qMED{DK&jG6eiiH^O;Kw9E7;F+&KX8!DgI zzo2a}D9dYkxUtGc)Ipt;Y6ypojhfv`%L@jmSXB?UyB6hYVKhYm!3}X?K+m{ z?$NBnw2@%iKL&iV7Zk>wQRt?Dj1S``=#!HWdJDiZ*);YLz$xY%!kMXDTc(r#X3L}x#a<~+9f z6dt7Cc^~Sl)HPP-PTJkoD4u$Xifydwht2rT`GXyzae<;_!QP zoko|`4AQX;kc5T3six|ue|IZu1yvPo`K)UC6d5FBDg9xh4hUhTi)Qz~egb61TH3b( zt>^#38>_y3H4fDilK_y|ejgvIHitsd(LprHBLKpaO2qkBO^v4N)KxW^gjg9_jIy$H zVTOff+R#UWO|91Y_g)N{l+=8#=r8Me>*C)9=PG?oFXmARVSZI_TA(ILqj?aH=+LlW z2v2Kc3c?9BwSa4lf&e$LVCWL(^qhN50c%O=#64Btg4OqVTULTuoxqa6sCgkBbP0~a< za^#!_ZP_Y1Z9+~0m%`$|_bO8_)S7M2Ra*g(l27zH7GMtd&R%30+<10N0Ar3Z@ivPy zeHe#EdP*+lkR$_*)-JT4i7`>M%}T5{&s0*Fx5yl#bw^d-@3RD~%Zp z@-43+=BB!4$n_5AP*vxuvzaS*-ubS}6mUvKMMl<#4-F=j$A6f*ismL^*RAsQt|-47 z*ds^ul^_1O-vJdKU|UmGG{m}zWu`(mlh zV{<}b6Ppb-mX>6CvN0(D*40vM>~T{D zKCYsvhaxFCr42Rd=)}F zGE2v;6S(*U1uLOPONk{~HMO-{>jFm$9W=QXx&@{x1ps435=`t#clVxY_&@` z9-lKyNUVxd`tQ_CBQrt2!oKj}!E?Sg_AiuqvI`KwGSANsiV;B}M62ZF zBHu2{p0YNop}xWF?xeT8X~d9;IozAO|MGMQeLTp1n&TPn%l55m&Rz3X*GkQejEOl~ ztX$tDHlo3@x{%E(n?J8F{2S*wG-zoaG={rP1gi8xNxTw;2^kh<-A&JRHDy0^T$R|_ zF*CEw)v$ZtaFEN$%J8*aY({FC@@wKLiLWr zi0oBUnB>SnS!GK(wCzDj#O7*B14dTKAXFHAu(N!5*&SqTJO6jsQQL8W=OC3r2x2H& z<=Pj`Z{(LB;9TT+GT0mAm*YGxU$6`6kIE2%ZV6nvm6w}Op=+y#iIN~aZ!nyiZ*7?< zAPtBP6X(N{^9|S5Mj_#={rS`pOLFQ*&zsQi8L?j+Q~NDgj~bQH9k)o-KLYC%W~$tL zE~h)OqQcPIhert-=bqew8y_j@Pe45uO@(Oj`@ZX}DSoVBzN9v?if4%hh;bUrKlQun zO4msfl`6>gjfsiZ9vqASRyq1jj+qPV4TJm=EiHZn=CJNPD0){F>9RTuk<%oaaR&6) z_a3r%+g1Q+76R}h1z449a5$zqSL$%{CXi|C=^ClBr)$S8?b-dYNBb1#m@Hh!{=3W3 z7+5q(lP(Q22QqVrn{O^-+HNGN1L}I8w`}W-xb47q7NpiouS`J1@~Qo7CHM9=*CZsJ z5ZyP&q4{NV=m z^YGH9%D3SxcsjQ~*ZyXEozTPKWV$X+$Zy{1%Tc~MDdXMk!Fnx4o(Dl8iZzYQ69~Xf zWDZZfwq=YQBIX;DN#Bd@Kqcfk?>4C+;%sbc_>M4DV^hKDHlELZ|zq$t~cv@k-Ji`>qf}KmSWU+sQ|qz zqZHm^(*a6iru;QbE9=J+z$Y>m&Zrz286~^L#%@%rboE^22RSATj|OzS%1#ETWa@~e zI?s|7vjms!E{DEjQQ2Dq9c%+_d0pvlsW+x+p<8ZqB4S=qUQT0X@@E%D0^2k0j6cWr zEh7&Q)mGb=2M>OJXkg++BxI@D1=#lw8ez~T`OiAe@22U@)+H?3?sX5Z#j>=)@TsaM zK>cn5gRvXdx*gosgh}~&TK@n^<;4RjzaA5TUL_=dY}+S$rotPW3K3|_F6)vRw>M59 z%fBFm3ZqbPP&6CAn0pHqVnMCq zb!mTAV7D)NN&YY;!N8^v@Hz(zwa^b8y)$Uma~Eo`8T_&99wSEkl?Lu+Y+*PRq}(Hm zJ}=x9O(irrIZN=4HNy-zWQF*+Cg%M4k`%O94Ufug%+o?87Y7iW>I`d2qLNZ?L6;`= zzid7j7ggltnK}PzgEh_{co-L}gK0C)fC3sY*_m|c8JcMd|NKtfk*7%h^m(fIIU~3* ze3za5C5Mh!Jyc)zZf-nwH7*Vhz(ps}3^auNNO*T1npBv)^xajwZOc?&e}TNb&Hr|? z*q-4d7)L&t-MH3v3MOZo^M~R;J&8|H&eU&=pC-QV!(?sV;x#zI3#3022^%Vn3YzUJ z=r5!LFWxv_m3ouIG-!AXmSUn0{^={vN*fHUM@fJ`SBBW#yN@hMimwvs5xL9wx<{4$ zTp&RzM@m<(i=K46AYwY>=V|c6!l6KeJtus;B{KdL`;F)g2vcfGxBB<~qb!RShl! zyNUvTtTVd zlidkDC!;Oizf|n`v{f6*8vzz|WK^`p1hRyJjkddL7?`__kExZ2nsT8q59C%--Iv$_;dgJ&X+zO zM_J)Lps$=jxpzCe9y2pNHFw}K_rC!FvAQ|FT--VBU+2}%oQ{sz)!2_1SU8fwIKZn$ z$JX_0eH#k)n7T2%HHipNtcZ4Ha8Nk2_xf-t8il`H-I9DXg-A)1F&%z&gvM^T@d>4l z_eGM}TKG3DE7sEogCqje)-Y!u#N4@}P;doJJ$zI{lCtDqyS>id9ifM7u{*qFfCpv5 z5f4S$xVIfY6t}6vWMyT9=I4us@a;2>*xzAvH?{b_4fkD1$C2!3%QlGlL8Id8g_%Yi zO}QX0CNvL6w7AVxm6VUUvMZUU0qtNO0VtZ}Xq>z!cJwh3Z7>eZq~NN5rQ=!t?_%c* zHAS=jqwYLuDaq8s3on*E2CSp#o&w;Qm|9LZO^L_Hhr?Y;D~5G(8Y*)nnebD?io2Q^ zFyM#uSBV_ws*b)z_}Z!}(lvKMmnPP08>>6rziHQD)CxDhU>3SSuqki8KmE+nngaBG zOH{o5TBH8s_Eex~RIbgJ@ZV$vVQ!!q_ZyooeWcd^LG%Q9eY~RL>@h~V|B@cuu9oro8GgP+b`4M; zJI&U2ypIgHn+eDyq1GmlcJ^PJt#o7N<>ah1UGG(4`ppz>lLj5f=Xl2B$!@U;Ny#pc zL<4`|W@N>8Z`y^}@zvQ5rOy41<+R)e<4fYYlCq?3g@WXNz`~`jc1DxEp;QBnM7&QE zGRCHIu1ya9*wn(@r@h#DerD7uJz3yY$+v4KIy}3MD!kw0xiod4t_F$krGKN#F8g}@ z=EnEGW!!u7>^-FJUK2+Goa6Nn-5GanZOn{2X3oW)yyuP1@{FJk3e>F{D7p34H#Mo9 zWMJTV;cstQ8Bgj*-^CKzm&!2KaIKw1!2!`)PV_om5rX9W8T?!2`=?w0N?`E2@GfbB zO`$kVKm>v-`GUqY0Y`=)1jgLL6O{>~P2l|Md!iY*EqNb|BY76!V{zyqPf#j3hDEU!KU29Iqfd^_khCgSCD|XP^%uFhI z*}NppLKfyX7Q=&4*cOF_scnJsH3gf5m@NYnUs7~Q?)lJ*vT7s zoCWn$t|sc7A8?9uxtfAtY1a}&(-NlU4IxshOlH-~Tp&oiJ`qZOxI8H?3Ze3>1q-O4 z;_L+?60|+{J5@=a2v2%ukxE(<)I+KA1;uJ)Xf(!)Bv(5t*;s*68+nTf+VO7DOeSsat_sRgwuci~EZzyy7ct_;51A zBisNCFg*lLGkOB&4hR%kV!2!y-UFj30_kU@5Wzg` zg!}G*-V_U6z)1{?Dr( zKXEVTJ?B6+11g3$$%n#1kZTirbCl)~ZebaAR&^Z-i5!@W%GXq-*BO-JwE?+MiHi4i z0${XX#~*ZBcMH~wxl`?eaR_`FB!@_zNja`Ia20GIQlU2qt*(Jo^575sPBL<0S#B0 z0kW)6wmdAHc2i33KtvOalCyz&S&BwW@K2$zDLOhcswoX<#zeS9y#ZNFrtFi6&s!ot zD4UgV?9-cA$c`#`$mmvdhdsnTee=Z2NVS7se*u+skoeI@&IKEvx35#iQ!`k{Eq z=ka%nU2rCjjF+v*ID&6#Dm6_B)l{!&aC5*yO|YIuH&G=VU`A4;IEr&liKmU7Sd{&& z{wta|LFN#j6FE8tX=?51bUGRu_#N{mCMBbgY;=rGdMh~qp~lF)evIXa07q>;_J0Ka^>E>(FdxC1?m5c(r#7YJf+Ql_6G{Hz9cyK z?~PnmAIPxxk))H5FSokeDMO)mbJc+356sCLjE({rJHdH+NsP4BCYm;gWdg>i3siqq z{ET=RO%Mp{j?DK_eswevxeC$bj61;SU68f@MO$T(1OMrkrX@O45P3ciX$!pM#Xyp^A;=<@nVB}6UmZNhV;pEX z(>|Cc`1vHnp0Af1-e8VZ1QX=t=1U4=4EZ&TtiD0vOAgxLek#fp$oTa}ozx)Z#J0fo zq{$C^HFOorae7CY1k6yp;Pn`0O+fQhIJW+{+uq0_# zGmhMmT=ZW^h>Mk?L;FWG43k&zN~ycR){%wD{Mb(V?zG)(feN!+@pWUJ3{nay*s%sr z@_WX4A=i^l`6@q{RX5iw7Q!LL!V2I~&t@rOnQ(x;SDVYJ&TCpG5g32X7qj&@Fp(F| z*t;MUwMZvD!mE@`SzXyZhuTt}@kycP_!^D>m81C|Pu6(BxRKum^BT6lI7A7FJKy)B zbIB@OM8za?=@pmQtb@T18!bt*CIWequ<`c&+pOU zK`qDu7sR?NQSdryOIYkPDYsC}7J5RD02LjrKjkc9i%y$WXcAHhm zffHnh5gneocufbHbYqy{kl8;EC&p%Pp4xOD@vz$+Rv zc_tj4O506t>ZfbKM;!2%X>N%7t?x_QIPHFc{kVc{&H~7_n*Ns~cqB{!Zg0x|yYV^B zJAfagn8$`SSE_krYQY>}3p)e%0Ul|um&AO0Jd;yXgoMOCB2=1i5;JY=Swr6GV9=2% zm6e!R?0ZAkyYWqpN!M!Y=Xz;uYWe7yGEc41bPEJM{q(G|Q*mp&SH%3)v8WL66UjRGwnX%>_Aa}j zj+P!*#ojNu!VvcbyJssDcc_+_%1b6+!-1XC0rRV{*u3v}_%CPyX5}KT_Q*HD(6z-V zOQZ?woND z^KpO^WC5ZN)2~=chnOr85&v4chz15uf77dsR7oIqI1Gfz++6tS<&DD1m?MyTRHV_IDnOA6){$ zaw)wLz z{RQ*jxW%1;z&c~9gRyyXZS6n0o0nDQeM=pgM{m*ZtSiD)P(Z<4RMc6EZ4JFAKaP~w<$(uP=ume|uP1yNF5OYM?!%2zB zwjR8Ft_>%L#c)4ecNcTyt+Sz^;zi`aqKN0p!DWYsm9D$>BXfXmN_SJ=W$RS+cIIOo z{o~rN!^zUw_Kgny1!JG|X@qvAM`D|R6a>E#?sK;^H+_OvRHkfQCXe!skul!zj@!1t zWT3Hs!ty&@A#)CXvlinMAn=8w^qiml$DeXnq2Y~zy0PZ#+x}Kts7rBE`^OjAdksK3 zusHDP;hX+R$I@tNc}?4m*jXfeYSkKbm=W5boh?FeChb#y_MJzI24?(#V$OxFxRwq z5M5`KMzv3rB+c|%gMSie&K~&10J8A~5O!Utf+@_9&}-+u9WxKG?jRW)a?vAGnGVk-(zJWeFHo|m-Umhh!3)`kKi%!tSm zK#che6x_L5nC(^2kiQwS9dO>Yos~{OZ55tdOa=lRVBOr^nm11XbM55BL`_ZYnaOD) zV&W!X4R|P@3Ba0$^C+fgL>8zOU+ssffM{OUO#7cEhpbf-UyrYG$AlYLcQkgsz6u>e zPKK?|QB)Jo+`?=PpJ6{|ub0TGmT;LWFDcg1X*PW?DE7P-#n>=ps-Xqy#som&8`0aJ z&n`yU*+uNWDH&6`!cNUVu3|cA2C;0=&z(q|iB1~Akmr1(P?@jxeB_xK8YJB1%~**A zHYZ3=uGspMf%@P;UP|$nyuJKwbRS4Z&k(|tF?%ONs3O4RZn}79jLqA&M($F1f7yXZ zin6A3$uD!ky5ZEt+}VhNyH zVud%3aQ}Wmt6-5PPulXkFAS!tNn4G-@ItdF>4}_{D}KExfM+q5W%fJ_@CwCaKg*;4 zQ7fWDl(T%_w|e-1`h72mklfunU0tg>bzj5H_KWW#&&ZHFnJNK2S(GKWZg7Jp;MDjY zU%1kunPc7B|JSP77=b2w@H@#UMc|YVBW76FYHdZMmubFR>96G0&CfYx^e=Yts(H`a zXe@D-wMC6Z1W6~4A0USHB03lL$0gB{f?0FcQer=9J+G~kG_!}nNb(e!n}2s*89E2J>M@|=2rTD3R`^o^l!v3j}{ov zl%@MHE*+>v>LUR9ARaYf>wO=GAh!2fQ-^g4&be=GoISa3O<7rux=G@=R8=2bS3gBd z{uMcSH?rAk=ma#y!683n^88v_7^A7FNl8gbNK&F@p63?qd89|VW{p@LVBO`?M~L4J zb7T8&&|!TK>d(cBJI}gVXnOz#tP4OB*=ea+?(M|Ce>7&rfUcGHKMk?BHlZ*kz>hxr zsdJl{UGKVg&xvBOEx^q!!L^JjybCEsi;jUV6Z$=dT6ha0xFzD8&rCt9*81k$TXHD(L7>V*p$nl18?K{%a)fQZ1Z(Zg)6FZi9--vHRzhK4)` zuP*YQ;9`5qn5#e;c!$(%OdM=Y_e;(9g z`&+#@i_9D^vH@0z*70L7z6n8CTHI8}l1eKlug_PcOx;t+^45KpNo>dg6}-LYezBYd z_+P&p^-jQ^tRIFFq%e~+94s{s!cR)1&#^em_U)vGaQ1gAI~@7kjjU2~xXtiRag-No z(56Mrd5L|IBQ0pcdhM{lB?t~tezfgEwL9<#`Jqtd`t)yl=I-jgBJWS*N!D|$Wb}5Y zUmTGcE<5dk%Xfp?OlC7m-t=fV>+0(8u;9|>OH}t4v^1 zh;C;q{lz!C$iwTxHwC1}rPGK=92b2{FLikWX~BvB!1gTJvDy$x0;Wx}QH+j)bP|M& z_ zdX546YQgl}g0C=k|3Fz9A4{B8qF8ICyu7Z7r=p4o{I-#f!o*^wxU{OtA?5SSp|>za zS1-~J{WI!W>&~=Nnp1!mJ%y;Z2K?`k2ilwHsOY7#XKZNVXT4{6h0)i8cD6@;zrS3_ z5PUg4KCCW(V&dh)+uMug;plftO{Ms(tVl4be|wz$kj`SjBl>UQaxfq!ODK+n%9$|9 z{ytOuDp9K-Z=EvTcz3SGYiFmm@s2tV#^+|fd|ZjzF<(Gv{OGP-|Koj+9KH8dGyHKY z1|#OMwB<>4YV|F@n{KPu(>eCjFS|5T5~Q0@tQ?>Nffa0?=|pAZ6zxuI4)HBE0Vtpv zcXxM>CPQ>hdZg$@J1Ta+4A6%i{htPSI~(G_r|QVKaGPd3SI%S z>g}p=7w&k%HRgl2KX+m9QbEedIDHB;$&BcpHNe{)a-l=@OoD*l=rjqW6WY&Es*y&X z@!rs7Hg$;5pz71>scw9abR~D$sJ2{l?_3|)o&p16_aDJZ@wtiBo3vljY&kG6y%5N*d@317r zb&_$UBCOW_KbEdCDyp{a4hT4eFtl_d4bmM0NH;@wOLuoj2}nt|v>+i}f*{?}-QC^a zd3?WFvsm+kALpF=I#=(#)7E}!5Y*oZM#+k$Mu7AJw|^p{a#e6}31uq~b({7sxaYkh zrzVX9*25ZQ!?EBathAe1MYk`PF!IxeVrkP1K3;g^CR6f2Fd?bG%Uc%H6W|emtlrh0 z-9jP8a#HHvaO86tG~#h8G(#=qte?~DU5vVHm5HWy#XDyP_v`MzeMt)>HMHIvzY}R0 zne57D)PZ~!OwKKNGcTwp8U!B%f4(8jR(^W+7KOQ`NfWIGmzD*yNlv7CrrO|Zv& zhjlB{M~=aoTUf8vsL9%cS3FFujimb7Ub12MWfnh52k)c(51x#X?tHZ;{SUNZPzaPk z!~QfyQ$_L$M^ozqxA3gM)ROSB`=WHuuq(>lk>0YMk&&o58XN#p4DI1V>3pm94>qT( zIm7Spe2zP4ZDS+8Rs|61d;A;4UFOmJ+o8udmOp!V0@U180hRF*2Z5z&;IlNvB`BCF zPp#T-#+`v1ajB0F-a%_1cNQwa!vQCJXSj+Xt(X@jyZzoaf9(YRQ^N{PjYdBTfxM8a9K zje8*)7BpgF_&Yc_ouLQGv)r-;Kv_0w(7;Q;i|)lp^^1Y`!Mv2_ias-!%RBQ4cG)#3 zn#OsY{szo@B<<^-budwy)s>d4(x>&I`&X$*LKQ-ZVwfONve&S9y3O=Jno{=O=84dW>?XlGMOdcJv=dv7Uj#!3I`irC*va$@`MqG5S+-w=#DS z)VPZ1I4p6=LGXF%<8O_PwEG-htR(=>3DkHD34^<=$%Ci@RJpgo`%F2_=(SW>R_&x* zM(ww?Y+lmP9* z^6HENBMP^GGG$9cN(b)p(q01G*SuX3zVY;=PthgYdgq6m$! z_LvVrr{({N_?uO5IY(bN0B;F?zSk2s&qnY|MD;p&yAT#}ocTUh7jX|;PeSKB7k z7V-+1qWYN@n5NkxHL)=dt`kFk+=!)vFzG(qPWKU<%mmZiX$AHQ1e7W;yWSIjiQ`%C zSs|l@*GxVUt<#`IC@}EP}3|8p` zst>eOp`flxpBNGeXoP+3oE{5wm)JiGlpjB+1MhI*E$k_#9zFC-WoUkV?otm2rim)! zrQ=~={As~0m4LB*rkCy_rv6&ZVF$W%hvTk4s_@m-yq1CW=%fP6ix^gZ=I%^~_5yYn12UFQXInLtu z;-4$)Y4(1j<^r2(fp^EDHRgyx4;}92JXkhW#z7KTvu1?2kTMbFmGE)0y_eYw9G5Qfv}1ECmi$*p;*+pVS|2BRQIIllMj@OG68BH16dn zaTErjH0qkJKW1Q){9gBFt;Is~PTtK&zjlMNvbXEDGP1uLsK}jDP?JZT$P&`rS#36n zsOEkiGDBOxo6e?3aoTej{e-?ZB(d%CF#zyeNo=wgPGv;aq!e#Ww22*Ux+=tzVv@M_(StHyh3KK~Q=S7Q}+xBK^8Co*-^lwsOae!%*^g$-2O;HmqC^(25>u>Z)5-LI9rqF zskxG(^@o{R38x}!KnJ|YMo9bdpiSmuR0hox8 zzHT^Np1)HJN1+3O&oc>*h(so1V&S`HZL@{irToGI^&-;u%l6ZCQhNOg@5@#mo^N7G zOBXmE)1Y*?!5cnR?q8D!-R*n|ENAdMjAibkd;}%8vJ=>S$T?ljPO8ZsAM9Q(4Lgj* z-Q4PCHm*E{I*qFGYideLq$fH5-!%8I{&`c*hH!ak7qEFsY{2=@Xrqm{^N2+I5erKD zr`OQ;`Hm_aeeD6yKnzq?Y9Ig?1l6Mvj9P?3)0TEzk(QD+Q<|Zj8Pom_<34Q7N zW9L89BFx{tE>=7yUVNTjwRtJj<)m@;>2XlR|rb@@~Rb20S5ESfnRn}WRi$+B>TIdq0QUwacqV(i8 z?N-y!7*hA325D%RVQIu-D}y1_2%!**X?B-xB6T~4LdUfds;DB~w_lRtWePyhi?jd) zWk;&4L3!OWCAZmBXR0Yh<`Ebyqrs@t;c?rcdFi+*lO*DHBAV5AM_}bZhOptlSBhK| zx5q;KeM0tIE_OUd6wqko=C94OR{+ME`S$o113V1Zqr1Tf&j04N`wTm1-`dhn0yC3= zbKK4KurKoa8nac26EBzOhWiw}sDxaEWHi3t`ecs1{l}((W|*m|>B?W0A537 zlNuBZ3u&L095}0(ERK`Z-S)v#FNyeteRf?io*nxgGZz8EXRN5{RbXnkFk8A55}xwF z!)$>=S7705}@1B+XpD$4Hn0Ph2KYUXAx8K+NKY(s?w$$)kc=@C1NLktPtUn;kTx4@{ zGhUENf9N?Jq3AEyIXdjt_I&!Daz~oC>Dv94^eU;W>_7SDT75sc422oD1)9lEJD*h% zYo3b##FE6kyFC3<93`72UpE(Q9KO=|c~5IcE^QCM(!dLPmoOp#PTUL{-Oy2bdh2mfi--HKm5(L zZ)n_!#0SU)67~g%4#?PYLx+acEW0uXsJ%_Zu_X(+aOCsp1cft6_hn*e^e)ng?O$_& zvwy@7mhRW3;7!JE^vA~T_5UmwLjHuP9<(Uo{*7^Zu3UZD;iie8rL(RF3@`=oW+`S2 zbEljlz+0vNdE2v~@d-x^q>_1jAJ)EZH~z$$HT=0Jy-Z!Yuvt;!_4==p{NSn)1p}TY zWAG$aFhoeX>T&O!W22zhKqQeg8yMn{{M5zHq7qp7tT@{meL}B0TY|-MYgbtXaJE%0 zRQkK~5~C|j_7RVIAfT5O-z`t{_&xPrd-(FC?HTMbyM7bEh{+NZ6v8S{?0~>S5LBh6 z*7LXg=7tyU+iyIk`}{fwOso}XMh=Mt+!+d{G%NMjuKDT2D89@J#STp89)z_gF6sOh zkF6n0(Z%i>kg8L0G*0beW0n1lr$hkPNdER2|Evkhyy2(;G@!5(%gTOcor`#qJfm?)KVO z1)xhfiX!Rc>#)NVf=^NbeCbN|^|Rl?V{%=55h93fYGLYnKcF}KUM}Ukz1R9A&mCpy zS^T(~RP;8hX<51)ZL>$K*Y?y-&7_Ky*e$mRIX#Lg+JBK>CZW=6fjmPL0L&ug@H=cf z-M9-e7)s@$IPhhw061>-3{pv95!C4VpY zb+EL4f>OU=NS{QfiVi?e_3N{G2vn6+XX->s^aic@`4sn>a011zJDm}&z#@0ehQf~& zFLBZV?8)8+bP~a@Dhv!?himz`Og05P>UlwKjW+n+qnB?yj!(SG^;!bzmdvR%*ypMp z9Ct=$e-@vPZyg6_sKE1R{#cVm71nQ_YYJEtj*yNhN@1c4*p+EMF{<%+`PE0?v!?22 zzLtEYIQsq+5jPngMZyTZLkMf!mqiu!ob9OvX{GscA!z%fTZ?A=? z{j;0wx)r~+%;AvI_2^hM^PmR>lh3k!pzA6QyZ894c8O{sKya@fZuZc;!nnu5+-=7qW0 zeP(9Wx4B_=19j>ydx#X_%q48jl{iS_S>Cy0lEEm0n#rQ@p(W_LC4C?7aFcT(QN?ss zyU+ZU=ww2Ce!jXj`mgLyGYhBene?3kjP*+^RiaDEcy<{Wr)>!-*mRpEfbpzH_t@e6 zT?e)NMOY@LzRR2gd2h1b9lM+EBsFs0cK|xQ7nD7o7=H6SXvhnX2>KGOq;%zW$7mze zv&B>EAw|9k{_rXG&?ev_%xdQ&lz=DZR@u`%-)ucn4R=sZ|pNuiP)&(J~i0RF+nE(>BhBsiU3xwU>d=(0vgdWO$LgM zET_KMh?vV9vKu-jIxrBz9P&`_|)G^RW*NMtH*n?iV{truEUW8ZwFW%$F3Sqs(AeR z&CX%!Z|<1|D|HVIW~TawOq!#~-=@TA8p>-GboqOYY3#`pY+!7lnQOI{KIYuOtj9OC zV7bYJP}Yd5N5j=#W*sgSp4Kx72~zf9YuxX@?@Kb!J9y(GEj)v-;;1anwZ?^>W9-6D zfD-@X-FL)taXAr_!DbS4S+)*ZT-a0L+M779s$W}fkk0sh@5Um>ie3XPAt6C)Gc@w3 zbu`{Vq9-m~?hILFi0-e+n&v;thxF&{Ur)HcHP1}7mR1J($f4pON<4ofcVAh5NVc|M zWKL-(|QtZ3l@Y=J~D*3WLDcpZ%c5HQSA^D%LvnNJT%rbsy zy%L4hyGYgSe0zTkGA5t3g~_36E)g+Zzn4{Z>{9PhTP?3Ja>#Ml_}dgsnf_sX9TZX0 zyswesRCK%|G(IO3pRJ5DIDpKhPNq}U?CGB+D{mZ3_>oRe9QGq%^ET2<>2+h&BWj9g zG!-&XJjte&fhfp%jY)l|wZ@dQ4tNQ&4!6^YT|%EwQ}BN19i3SrAOhHkl0{2%5_^&` z&`2oEy7%nb3h?buACa$DOu)U7XzeL8x5xs|%OK zb%}A@40O6oM4G?|x-Yt5BgT*POjLoXtaxbT5&N#1tgf)1r+U-ud{e|_3hxk999v0j zS`0(=42eKP-|9y``g8*8jfKP4>&?n6Hrge;dY@IGpbLr--%^brQJg75!poHV9rGc1 z^oRl1td9(0LBTm>HW8wBvHQumTp&8pRohpT&dRU05D}9>;99N%OwqHH*YL@3!F@fp ziwW#X#y~WMDuYAQj7rLYiyoANZ$jgdr$(8yiz*{`#-VG><<2R0k1q})---C5G)9W^T3R*o^6qh&Y5{XnbW4-7Ml zy0gs^9zN#_jnT5v_)IrRNjcF$SmeiD6J66i?hdQIUG#9_9l2Frm<{rh4JyF@RI1yd zThg%$jlBZ78?r67j%h=(Np}f^+$6^B&M*F1%r}9Yf75)KiD6M3^|RzKjZbYcjA~TB z(`Zl6@n#a008tv*g4A{I1N!DGP^K6kNMPn9!&b+e`} z6;m1PEMta+DF*SqS%7djZ05hpRj*SUevgg6v)x02>bW-`8bPzp2c z#DyETidHE5u;#bdvcA07E#ef)TEW;|>hc(G+OK32!b9YaEA*yU86?ROoTt*lN&ewhl$Py*!<7M7+z1eDf z^0Kt3v1fn&DIG#SdFIeD<_OGCA}H!z?zpS(OGb4zoXdT?s0wJvbA9NzQ{QKDkxaX; z+q@V=+(d&Gf{Gmm`AWlNLtE)R&X`v<^yt}az3r8R=?^UWfmo&m=a{3%OkQ^^f!Hiwr$gRj{c1CruQc)Md0Jxm z@w?&f9*xTn%g1&n4}zmZkI!1Gp5(yzEp8Jo^9GlNfXeq0YjzX zU@)!}W?PHHw^fu2{2Ix6l$}nQJr+}YZVmoF6^kc{&>%KI8=5%-9* zCd6)#)6C?{gDKX@;dxVfqGZ8A5#qtIDF~uxbpxzmawsviwZY&HGMw@kMD1WLic}B? z4my({h`@(HM`5io$e09GoW95hp_(Uz@nYKGfI=Z41k2h~r~!_qXTAj(0wwMp&)Di~ zh+eirCj>(Xc0KkCrtxzBJie-M7|$^C44I4GRTwy-%S}bm@Og-}#rs5SxfAz9 zPJIz*#GD1cZGG}7hQ`LmD;EwLZF#aYp5bwkF$Ii^Iq{Dy zDMj~+y9z0Ss3jX%FbAErXs1}z=Cu|gto()_OhF?d(~YG@1p$TsHXb9gf5H)iERx1e zuBFNqPIMxMwA@anXFfgaV1mo49Wq}yHn{%_aVi3mv%Q-}#S5%`DJ^I^bd{^0l0Y?pI zh=3T$Fs7{xii4R8f=ZwFFRt5~SPPOUVZ~s{#^wzO+`HM~@Pfh>HxT&M zSr|r+d<*#^8mOlB)X%nk2~SylJ1e0WZwIW`n)f6aATV;i#QnO!E1xrIFomhGf6On1 zbS7t1XQ@JSt-c&dewCg2C19&SL|llT-SImmH&nS(xv5@)lSobqE9gtYARINp;L)E6 z9XZLmxsTu8V#^|G*4ZQ~ifd?pNb-#;;DLfwgb7Ba#KGlAi845no(qwU3rF+}48co6 zdC!#z%q`}%Bv1)@~6**l_dXb|$j8_V-251=W z0{iKnS)`(1x^nF&Ft1`vp~;5$o^qAlMNxi&N!GDOQ`zKRW_r3Rd#NvFQ7xa=gP$+m zABjnFgaqnN+~>Pk;0 z8k#n>q__Ait~)RVxj*WNQ9Tv=oyERaG!Y@3InqkkOnSTo#OX-#QQYNv zJJW?0&}bt!=oMKcT9&h#-(DPk9~oBMP9#tm-msM&My(%KQny_(bJF#SnA$XHJ=y#l z$uTDw7#u;~)ATbz+F9~o;97XI^v4*1M6N~h>gi#;CSET-+bW+-;1_XIQ>5Lv*SHt+ zPx*tEY~*Ml#gQ-;62sj+Imrp>;vI;FD#7y9?(yutogKT5IYRQ^v8K67$e0T;S?z2-bjU63Ht!$K`#t~nvG9{bIJqR)Y%ix5&KQySWr zrF)xSJNJm4(*9+CyK_u)f?obRe-dbhJdC9I9P z@zHhMn)BDOo)wYenONvQ+FJ9wY!5+i5LX;K8th)N9?%yZGF-L@MUjh&SvVP=4HVj1 z84m`%Dl|85P@~meznJte~X%fyVcs=>KT6Tax?j~!$qa8gIm9~qu`jAv&E|HSJt)idMtPfQHTZhl0!_# zvqn0<-O)l+db#8`>igKyOd&oJ1&E?yTb=eQNHJ{v$&y2zO(TVnK{w{FmFv7$yl$tj zLc+KF^@M@>`90I#f`V7s({43WaDnyCtYUEMo*@csxU@+x;Emr{^2ni?)N_~Yq9BMY zm?qWggc))MFKS}kI-^vrfh$_F#j%#hC`+lMstH?B4v<21qRF^^Sb#m zzQK_KgQ`nMY5(-@Eq~>lnIO-E?#iksSeVjHBH-a29cBM+kA;E3+Qv}#?6{?EDa&PN zIeu89Z4edmghTb<<1Hi_I?;I-R_U_#EZ1IB!@N+>mpP&ZmF@*%Odp1-X>_U7I((PBDl zhvcHQotpgWuv)n9`D%HyZw5zCk*M7zB8<~Kkt2IFb09M>~APmX~J4^}SebHkVt@*7iG-{dQ+<0iVAzY7qI&&=+x`zvEY z%_W3&9ZfU(V!O0_MVkQ!5Bga2t%;K7ZiVA{Po6Vx``e-G%x1M()%{tsWzN-fiVXZW z*$Lf~gZpxVfY8^P#U&Gtu4ebAx*v569#?m4uI=CN^U5lpHP;~xihRx@*5dUTSXNfk z`wSVyOvjE|@Qw3tvT(sxuR%}?Tt(3sq6q!`!L;}OD(jt6=<8;xVVhxd81X2!-PWhs zf+Q0_qecg>Gig=#k-&F;DilHyq3mkb9dL#-RLE#!ZEbCH;#vsc2S|K(^Rfq1-o>>Q z-QZ}Fc@F5Q##YO6abK=wRiG&*IkIeYI65CzkeTCI)Y~r9`;3ydEcpwJP9I#oiHT{s z795*d;<%o_p-ybF-4Sb>RKN*?Xw-RI3Lw7gpR@A3zKky|_;KDe)Lxup`_YaoO}@b! zFEoS40YFKP-N~iUDz)pkx+nIl4G~^!1YX?T$yxTU@R@ztdPBs$d*wY%`|qq5(f^l8 zVbc}nZ=P#eTfaeyw?YZcx2ls_Hi_g}*td40%uSb#r+aIc&VvLvYI(em2@2 zknWuxacgkDWkQnX{J!bLovmO22`7jD@`@K*Mk%h4J=o>wLv$|-c`ytm{(HIB!N-rW zhEbwWP-v1_ZEf4Qk>2-mPTUsWgpnS$LWoTMvL*#VctOzKt$btS5Hj7?C5xX|McsFa zKy`8A;G{}dk|M?8g<8we{*4p8z=Y7o&CKJr%oi^ zNzF9W+0QoiZ#ggRIUl+|Y!7GT_&wh_1Z?PHJ3K4P6%M$Q{G4e(;eVw96Gi3tTwUkF z0ii^|d=2VjVj0Tf$ccg_uGog5xf#&2sx#QPR0j1G3cV-n=`*w{Ctf)}5stj&kJ z{>zkyj{cXC2?YU5@BA6I+!WGuonIUw6yIi?G-}Pb#G;Uql0uZfc#1*8szNBoH9l{e z3D-dQFRZz-W|A_$%I!coBFBw!&j(vBA&%0nyGI(`RHEQ8h-j!p+J>=lkq`A-Gfe`A z&qX`UJIx_?>av47f7LmOG9x~%x8hUAuFJC%q1mMOs2}2*!$IWVBN7MKOs!tYe!>X_ zgi@;#Fqldrajq~Bls4pZ~FGe<9<|S>az{bNruC|RHhcPGOv9&XGrwMwgo>8g;T?M+_e4&~lf}yxH!d(1b zo5_(L=uJn{Sw8MRK00(nccOS@-z0w(z8m7$bKUM+2rpjoqT0)$*J+qinfdO58|nQD zOA??g)o=jYFpOASL}~#hlK0U1vi^}!-_~&QzI#&Xhi-%7*R6$|uTlwH`}_Ml{_D|q zzLhQIK8HtSJv&9MM;YH?Zv5fVDOV9YbJQX{67)1$lk91>yl-;|FARl&a7=E~_!H44TM6(Ep&03uQVsHeNmK-ZmeqsR@ zjK_BVvXIrUf8&gNy7hmru|>mHwtzScmJET?_t?%OSQO^jcxGD`{~hY};d~W1AyC%x z4HOYEc@_);`LW7EAyOHnFO^ALVUr z!M%7w+y_O&3uL^GM!!G30e087697ZW_C1#g>2%!xTN)V|SsG*ygK59|(ajJHp$M*? zIjEq&c6W6>Txf9UjU_om`|>XhTkycxEPJ~l%_Pjx0g3$sk0Me9Ig?6=l(*-{u{?;= z)+UV}H-C>ar%=ZdtqEYPGk)|P{M0?MBDDF`G<;0F&YQi1oR{>aoNqgPm3*CIz0${s zVo;0JiYcQL3{S)5?|*$u@#3g_J*7P`aA_a6tVVsF=DM;swA(Txgb4+W4eyVT2kS6E z4*2-b&kwvxXVo>#S?I@`TKQWf@it7U#OV>};J)m5$$-dDk3=SSr#3(}9 z{p92b7f1=>1(DMTc3!n7ywR;coUbizD(tc4Ug@DkHHDna)dIBTUt#Lm0)E!6p1fXn z>h7ypMS&F&y5CXIDG=zCann}szZNksG@Q)VmAAJW;PfmQ#*%|cNJt6`3xQYwP*aGD zi{JTXtCrEp3X&>yZ#3C*rwndgcqvS3ZJoYBR{@<0vJTdKpX{$0F{w+Hr&Ym8X+Hr> z1>o}Kaadj)8~c?W{m&2t4ywM-dwc+=oxdKvMsrs}>@^wiWW$P}sg2Um-MrL_aZh{} zZFvnMcPx=E%KvJ7kkBNMZk0I-FZmk;l0WsOcFK1+&)LR@0~utLPS};#N7xK3>CO|D z`xB;=hobE@5KafjK>iWla?(iU&DzKOI@sK-IVrJd#StSZdtZ( zUIMKT>D9kR0sdbY=3{7!I?mQHLwP&cuml6xAvi?-d-yioJVZnMU`Q_C@Jb|CvXMOf zy6cC2<26z0J&dw9h2Cu-`p_?3zDziK~Qw- zv#?7O>CObJF>z5*WAN|-u{1V3hD z+1$!^Oj+HUiD62cq*XDFH#TO^E9675gc>eaviy)@;nSq_a0*PF*mHMnROsezeLX>7 zAkH3G$tr4X^?iu_kJTEW5UAytbm6Ft?lfY_F%91$BBrOS)2Zm8p((`01u$k6HuHoT z>BlT8ZT1S{v%mtFzJ9IGD3I^5#KwGPx_#o9oQzkNhT;6u=zf@nrwAtJ&8o!8`7yi0 zSY9&!(@V|E>rP)OV$-<^;K|GbJipaP=5S~pAfdsywB#~5c~M)7iu$b1j!dqLNw%wI z1Zayz`w1IrYe)Y0*=A~|=2H86&1ggOzZj2pF<#NE$CWWGX<9up-gJ_=OQ02jg~FTf z{uf}fZ+}y$IC#Cy5VUb#_H~FM%rQtcwf|e4@7D^s?PJIMZ#-`?5kMe>criTcpczhI zdn<42_wyWE9c(0?F2G#TZK`=l$-#Oa@ZPmLSKcH7B$mP`+FzGQm7HXpUT)R7@#?J@ zun<6f18wjK5G37Rb|Rh`+~n{@Fi=mdXdzD zbwhdvr5psjq^h!xlsN6Klk32HJ5jkT*7T_ik6#>{7J-u94nKF zG6GD*uaN_#r&G9enM6=-4E4U3)#3@D@n=IKM7_@@$^jX_9oMU=oEILvlxM{o4q0gy zLKRi|hT-jNPgnk>EN(p&m2qq(p}P8ljj=rOcPn0&{_fmPg)ZErN;GOrN^N2xuYOU` z4JJ%kq+B)Mr(CEe9xcY|r%=;OGRLKLFiIlm4b!>MJ~Y9YIuyDw7~@Q9EZlz2_Px z6wHP5uiM6Gg1ZH5XSid;K9|_s4-bmxNfgy}J)Gn$!nS|P>J(MBQortw6urHY`~9Sb z8f0bS$X#L#4;o|3j-#IR3j6Q;BLsI`w5Wq#gB-ycjYownx^|J}i{?UA_0Np2d6*>< z#cMm~93OjkyaP^Nzk4}NuJu{4&bGg>)0zC%anbk5xYFJeC>7=e?QfbmEb~o6V&`ec zjasKl#*>s4Xwu#Fk!0U@68gVng<3Oc0E_$++E;~J>yDP|kK@QW!!YaD*6bWYko}go z=e=Iq!^LV^zcnAS(m$ZgEQK5xLWi-xt1^JEDNVI8c?8)!Ho1i7Y^$VoOenbAdsddB z`DE)d(}%Il{-!i&9<)Efa6|?nCBnMS!gc|+90iJX2q*~x@>-r#zW;SRl;`r? z&=G>|lKqDvUUSlP$1;vg!OX6HxXvO4*(l5;#)_eB7_N?jIvtCIXo$w72gVFEdpt1T zQG{N4`@!6dFoxOcYurb9~EJ<>i{9y}I#&tXtyD-Q- zuc{!JV)R0AG&&zZrL+C9mGs?pA7oerT5fMYX=|6$Xp*E|zg#TrEbnI^7qeqjl)uCj zyJMmpT5SBf?1~TS7x8Jaa^LQg?f%SSTXnub{1^pjn5$7#aU;8+PJU^t9Znh5{?F;T%LSo zRmwTE=|Ee?i^Ka&=$pLAx!EmK6*?4+hCJ&m-9gq7YcMndgc#^d`|#=Y;Dw2!5w-W; z(Po4sPTk@*%IcqYErnye+G}H;zSC`iG@ijsG?hcbr za`yVmXRZGx3se~LRedz|^AazW!b3zCixuD1X?P>n7tFj(Wf#7=-!qd0FAt`rF1Er( z&5Ne0bySo|}F?P&$5IoUZ?^PX$cdul94=E{BT#LiN=+dsGnS}Vr-Wssea~jDfL-kNp8sa&fY@^3WN+TFk~JH zTtzLLts_$eLWA>+F8HR8AZ0Xd=U%xZo=uYc<0D{7?`)rYrawY77|p- zCqc%+?9CMwx(rNFbCgtIP$3QkgxCpFJ0DB*RY+#KkgiW7YJfT2udYf97mFES$t(If z6;?DS{B4f8NUxlQlOf$fX8VeBQy22%$HUIF2qa3BD7i z`BQ(|>%k;(a!x%KYE+*@RIYPacSYNfRe)gAr(D}EG1S-29{(z7u@wgu+DAc?27YsW z%bHWGJ5+adMcVLC;4gLF+HcFAreN!04s=2P_g!AW26Q2Erfqi4?^4in9=Dq!$HPuX z$rgVWR}@utFYyUJt@j`QP6=;wzxetm>b1cB%uMoKCU>@J$`M-*JoF=5daEG!!}V6) z2P9E$^l{JwmKp;wVsOZnublEbmp?;)Jqg6z7WoXq0WJEa;1!kY= z2ZLaE5I`^l4JFUj;VvxVz&Xzxq1!b_pr9e_d@G7xQmW_3FSs@X`>7jE68uXAMDh9y zyU3~Li=V~BuF$FaRC@&yoGBDe0_+rM&6}dZK@daUX@q4)Kp{=v{x#pg?+)XReS1fxzM0rkUb5V>Sizu>Ty%G z{K~mgrvL+oB7R*YWMjPpJsVjo?eM$$>)`*CTpyKh=;PaKnxR9cr}+Z#;XTBy>~fZ)MJZzq-3YZRdGiCj;nd1GDpv*JX~Bn&-n7L7)S_Ne%aXACAi zE0!u9nr+f(a|a1=^Pyw1d*{eM0QEwsC^8&KIT%QE6R9&4;X%q!dYW`3P+&xO#6z zWLe%+R-E@IoGz?*wz-&D*tzwyinJd6YDUsFOHt=2Ps+DG(Ia@OC2MoB_iXOpx%RQbf&?BBb)!D`=3sT_&QDCFrg=ZZG( za)(z7w)UTZ3TZ&(acRZxCSwJ~Jb6F}V@BkOMoyAT1r}B3!3~oxu!V?^xYsCu)pKxZ#j;Y>GFl`E|2Sm ztzLP37SBpKE43A+`#H<|kM|2%AG9Cu!t4F*eOe|>jBLWRpSmJ1ZL<7cE~lH$!cJWT z<^#5>kNgs!JN%w5!^Y=w=9h0#G3=dMG8wOQ7!%6jLC!iXrQf)MMe>4mP#~fF29^n* zZ9|&s01>?t{~ZB+BAOzxn zrul5=<<&%8JxBuBK+&(D z&DS}g_5`Ej^2_9!Ry@8=sa~>nojQ!rC%KZN8S|ei|rR%#e!n#zcOG^S^CBW3~{*JRLFN@njb}2 zX}Kco{A@E`l>8GmGm#7R4lj!?qZRrsUw6E?bJm{TCH9B@Uh@>JWI)Nqse<|b_6t?b z-g$!*GG$S##bnEg_UTSOGihXqCg=8XNl5dtf9LAuSyP*cJ#ee+Kesaj_uTC~<@M4< z_H?>LruX(~*z)>uF(f0-HSVz?0O+artlTfYQnI&8L~vgoT}%Y$om|I0cY3TGXBgbZ zDIMK)RbG>2AK&G$66ZzgRmt<;>1jLg)xWF-2h4_u!q zjPmw}-Nm9HH9Abdtf*{%KVd>OXS8^7Z}tWZydN$$OqzhBXvz@Yf0tN(j}QOyB;e3C z&U4c;dg9|Pw=if4AXMa}h~$=DE{aLDHFXE7UHrC5c?5|bPgeLYrwj9PgsS#{QX=b+ z=wTQTkZdp74DfRbfJN?5v19^wkC`;l)6+BJvV-3Ce6Ji3;OK;TkznR>>Wz(q(08UO zCw7u`f)A!>{OYeFt7*}w1%{h0Df*bEE=)+d7dp9TWo18C>v8hcZUJC*n*HP|LJ?|K zI|qCw0acBYLt;`=WF{8cgpcE9(1@0q>j)*Qj#m(?IW&xtVQYF;R$x{$UuUVL@7FfD zha(x!1^MrmIsX5aFHV%mH5CdcI?k>%SB|qD+jcMn+nct^9kQSH^JKDxOJ?5Br1>P@`BsWD z{$gh|N5I9x)sYPc+?^jkwr;|mQ4s3;2O|{+gc8_q_EUfuuVrc55x|z&F0}jo{u5*0 zkYWDeW?{dGGfn>b=H_LPr`_p(2|IeN>d=v|oapm1R!9R{Bqh*W`&#b5+8w)dv1a6K z&%n)vmy=O6=V~f9xYKdEnBC?x2t24N5Mb3q!&Q3!IXipB4w2)3Z~SoiyO(3fnUjFC zUx~UpFQ;KtBK$sEmKe6raXCJ}Ovb8le=RDhzj-+-fA_pwSkR(V7Vz?pOmK?ldSQkC zXZiHggQ1|-X4^v<47i4A2vi+5%y_58Nr;9Zo~LYNOpz?B0u|XX{e%}~z^oCyaOBRf zk6!$H%`qZ@8VnDOcvIq?6ZxUQQZeVbrQ`WvxsoNTrNrtfc3nZ>sBXBX2Z;|4-M_Cr z-(rIP<{NdBT!L}#RBAVSuD+SOla=x_GmAXpn7RyOn`wc3;u;7LS#8E3%1@4xl8!^h+y-~s;m z#?Yt7ROljO^ccuwzDK4m1VR4WdZlhtJaKJShrw_VZ3t}$KtN@&)ze~R^fh>A0Fd3dGk0<89=`DB26=NFSE)uXkn>S;K zFEMV!R<*|dzijiZMWJtN(|&b!{L97u9P6>|%T%xX<3@(%^Zpd}%kf#(%X6C02krZA z;k*yhyjd!os*RE4Rr}_@hgtDqZ0OVz0MmC}n�xD($4#8`dS)j7zMR?h~~_>@XnV zDVBNn)Zow&N&nQwdK_oz@^TSIRVaC!;7N0!^Svf_i~FT)B$-kWF`GU@9e+ztEi#*)602sqLRJ+(^av^xH3Q)EdA|ybu2Po z`KP1W_MPEU)3cH3@Gz+eLF~(U^bgjKoeINem|w#q+1ed+8c{3vh;s&uoNLAF*ZN-6#LCAkXXnBd_6a|6}Yta?FzY@6H z6|St^ccp#U`H>ZCcK&koj_mKj*30FUrY>XD^M%WUJpUf{b0`5!KxCtXmxykWGoou} z!GWt$i#_mY8yD>?nMZs-@D6=i@p}ICr@6-P?tyGN$@BSfhg5`%|An>K8&}de z=08`7`pVC5QcW#?YVT;)nt`6`Rt*(~dc6G+T$ycXR$7EoGomtMQepj>Mm5WF)MDb} zc_}CT|6j+V;r~(f)lpG(VZR3)VMJm`X&9s%q}u_cLAtx7J0%6AL%I==E@^2&I;BKJ zy1TpXdHue7*ShoPti>>M_TJBa;uj*xdfbHJ_p1ZT%_f_LH#H7qEuPj!CzpOJOMk2> zZ0t8)6-=4m-skwdgU4S*Qnb|;PFx-}e{>)_Bu-B^XZaVZgX20oJ9~O0XAf!;HN}JJ zuhgtX87K+M(0WrBULP|&#WPj!BraTwS>QpmhJA}Q{|!e2dbAGDep{26htjc&QlN#Z zCt;L+|KFWTF+I2mPFkwky?9_skJn?uB{(-8RaxHlmbZa1Z+OI!Pl=|KS8vaY+6k`!)wL}#C4yF^&;?ios`E<529 zKY#YG-&wPb{wFr1#@esavtQ^fHjM=SB)!5M1YT0_jCAX zLb@lplXOp2%u37IDIp9Gan_n)0qKLISn^pN%<6|HQyPTS((L-jA0rh(rFOY*uHILx^`67R?bz$nDATuz2*)QCyu1>!dd``G`L7b z_oxoeEFI}fpz6D}-N+E8%^dwWNtCwPMv&j{RJOTl6AZ!Gb~Oluc@Lx1`tYhI^wc)> zsPz`6kfGcw%<-qO$`7lXydsfUHkcKXB~8xx)3VI_u-&Ig`vS9Wrroh-7j+Y*q3)M1 z%Z3t9C2%ZeeRQVAnYnzWwBmtTjy*uBQRl41=18@LJ5Tz)~5D+41cxgeo#uqPBL^rBLCc42{;iMrR4C`ELQ$kv*LDP_BXv(1)&d-TH3WDjkwMF@b<*=Y0tdm{AXtvz67-q_}ACqx7?} zI&L4U)pX~Ub&JCzUcajgF-pCwMs>RTg&!@}`ZEf|?q2T89tOhguC?Qz z)9vmXC%gBiAsNV_4?nUM1g!_3^QuSnA;xEJr8*X7Yf}f`1>@a>l70{f6xGsf>d3YB zunUUW>owgL(dIos==^Y6s5PobnfYah_xOg`hsj6<*l7u$ zK(NPxT7+7~0eEoYJI0=GadwDqc;FM*2ZWgHGi45dbPcB0j0?61#@PXYGOMz+?SP5p zEev~1Im%&`PY%F)>ft+`QVOt9W}_0S$}$wds6fCYV`u6_nF9(!ngIwfqRGMQ6>fn8o!&=hAcs)(_<9YwV zlj!N8M?6{c1@iQ;%_;+*%bu7*RovRg-J@+S9>Z^RnWOE$xsRqmyO2t6XWN~Q_T$mn z5-&GxeHp$iGH9lssnK4?-lBVL8O}(})y>khbYjrac=Dgi^->3nC>CJ=aCG-R@Syn9 z)H=l#Kj3%h{1_fSTk!PZe3QJjIgjJv_2rU7hr16mY0Gk%|GD1f%-E%my~64t>F)(S zSN-~vev`H6PNyUoAeJ#{yw{ZpXO)T!nBWUMA>TB4773sW~bu!yRfeWCzbJge3yZR-3^0=#k)R zA5_B`Qh*4WvNUlRRSf;W-inXFx74w6sGq5w6)3{9Qv0hGW zXM=r9AH`j;Y|rbrSZFJ}c(AD{k-igc_gVWpb;t5i{$-*;rvBAD??Q#6*6!b!4B@@` z<221%sT71d$SZO*^evFVK>~uF=Neu==(atNmNyXK6hSwL|d=rzmyrMNiH?Pd+W>WW&zc+HIFph3+wxB%T%-!{lB^}Ng zUj_`D&HTo~njAbwu@d0nNeic@SFVdbHdy>*!nN<-f;&t}xd_!H^w-|lMKGHd>Uu+|K$>i%7=BKqJlCqE=o zFiSM_wUuHnBRwR#PpA^I=iSkqLmjob${BOZX%_)qn-d2YR<;u-L`8gV&er};2;1kk zkM1vFPe8S$zgU=rx4NHOcP@nVhzG6Kv-XWtUOenH9bDpPa(yEcEF0nrMjkkGX!ZHx zpj|xpTFc_q`;j*gWxgz;=Tep?;TdgX)1fTP1SYZ{oOSLWn))_&U;8Q6j+}qTm@M!q>J9+XiP_t>6R;!{X7uT zDF{FwJQez2O9}kn+^blFS(7ICZcER>uKHz>wSxg+@!22V=@fs?$(Tj#=(yF@SABh! z2M61$%UdKtt8%=%dKZs14!bU7BQ@VQxhmefFJmo=-0ys+r^N%}G^Usyddf?895@GX z3h-Wk8{M2&SG7ni$PI~8T-z1Kl-&lCkI95uxqF)MO-wKv+#E!c6-XlYVLV0npC4Qw zx?wO&<{X+%59l)b9>$E}QH_c*PIB)q>S^m-*=$bv9NZu@<+s+G#NK!vR*>slaJ13> zC`2<{UVKrwnPQmKQJ=RqmLu@<;1o*wVq{}fY4pB@qMe&C3h!Y@sW23InPn#XeLevo zgox#t=ECQPOIXrbkXx5?444R1-SxC&RqOec6tz`!UACY$+LazXBjqK8-meR0vAJUr zi1_I!KOR=sK3@HOc{44XrfynNRM2$`&V3$_)<&c2+ka^G`X5(EjeoWb&Ap8(v`Pnc z5U+h$Lpe}j;Fz9~5l;A(GRW8-6YOh3Q9hKYMZ85j^ntvEQYzP8zoT3pBP1qgoUNWz z-dX0Co>pt0_(Xo2arReD0B_pb7l%!brlLKoWumH`|ki85j#7#Fj-{ZGviuKjvOL9019~xdy0gBz}KD{6itTb^`8ov8U6$#D#+4G z!XHxav)HH_?(qYuOPneDQkyqCzmjH>za4g5aE7fMlKpjZ_VK31Bhra_IO}cLoM{Xr zMFrx#{7gd*i+CE^1x0Gr%+rR~ozvYTTOCxj$8+?DXiU@eG}IO16Gxd2qZE6RldK!? zTOPNLQFs2vAOZ)7b1wZw+j$d5+b4LPf9T-LEX|}4%$_^bXFUas5HZp6_|EER->vz(+yhxlv$uJFI=&(6O7j_=Q&KJqvkxKA<| z`P{!|d+i=WJ;!kJNOVD@?d-1+r|+)8+2)^}$Cb1oo;FMb*v~+Z?|(VP|6(gM-H>sk zqDI&6F_)~xh6#^l47T-e|@Rsk#UB4 zQ8)c?xSTc=lrQ+JzWT?{HpS^WTiOJ9THJ66)(o&Wx$VkG9vSju*@qPiTMZPs#fy5j zThB3E(T+_C0jO~YW@ojTi4Rwbw9%jeqz(+ndp@#@HS^l~%n@<)Xt{YtZo5&<=%d!x zLY;sk461S4eBT2CkDjPLtg=~>ni7 zdl5^3`E{NdyBrFcSlBBq>PYn|xuzvs=Tgwd>xL!f>(`Wx-!s5kjN)?oPpLMmM-q8J&8a=s0{sh*3Q>*2xtFRV2QG9{N~ zACg6xJ13FrKAs^2iiFxrW+@}VcAnQrFPnqG>H3Hb0HnUfUquw_#Hp(ROD8tF6JF}$ z_J6E6WL8pm>g<@=SY2uNQOE!RK#Z!&RdIyvJP8)u@vR`gb5-w`-4oXQhedls|99_% z{PWkvfnO;E(|r56XQTk2dIL6g*fviHv?B^@Q#O(1aEI^H;#CxG+a|yg!7Y1 zrD7$`>0M>I^GrRQjJR#|&Ed31{0w_z`JxdCP`HQmA1j7t_COkJ$8Yb`ZS0slP#e76t;Mnlb%URLK6A$Ww-=Y zlB{I@IM|YAO<}Tav?Y}I*i>=g3)k;SFQ~!H70fx0fzhW&Pwy(I+ayU)i;Bh%f0>eh zqaaz^IeujVnJdE%r34aHBDfbc^y_>bc#iM4UrFVsD>Bb&G`NIT^p7Ha&@7BeKt=<8 zZtI&1)M6q>;UmL7XVULrgayIgiqX@g!q9;Nt(U}wkX1d--5iZ*#r!I*iAXWkur~nk z)=K%X^#ym)e(T4p6VYqQFtyCd%R;tGmmq*iUuta<{?V5&9uQS#fdNd}}O)i_Z%hYpJ zKV3<)x3-siGU8;!^rnzEROEd*@_L!}*ND9w8^OTG&(Tt+6)Us2eEQdsk|%r<+_-Te z64vanoy?QVt0!5i4|9fOb~Fbnx3@ErZq8zzaj^ z4NgR%j8I4VW#a{7&W|sMfS~~Ek~1l zb_@k*UMQSse6{7njN*Q6IK5GR_{>9J9wp%Y5R!*^E}rYtsu(FCto7X*-@zXT=DN6D zWT*pY7m%yf(U8-{fxT9t}Rwda@vk_-V<#xK--#(s%y7gbKOH-kAEkPy>|eW$(9v2vD+rz#s(ny12>(*s_5f@8WnA`EcjhKy zq4B8oW6*#rSPX$?C?0*58@MG=d9>vJmpa)QCS;QI1Hq}q4)5-exI#bJG;p6;bQ;eq4&I%e-f~u0k84wyyr*Ajgm#p zw*U zw5YDS%(=CR%K(URR%K^pzOQS#n1~W}v70D;a;{D?7!f(rJL`!9{N!JcwQvp!Jkp}C zsf+7Nw`Oj>{1c4KP8R++6vFiAQ~CZ(N2Dcl*oA-JT`rngU>;SJn3k)aSpx-NE_@Za zJzVi3X_Se$9d%>zcWvQCfvPZW=hjX7#9>ZT4Pnfbn#R|c<2zDNA_|HQ&*iT5p%he9 zluZlJp)#tpG!cfN?^S zJ}-Jc?1UZnZ*9#-AqkU*JYlyU0zRx?Yo@IzoE2U772r@f1b`b5un@$jYOYvCJrhHw z#6v)=m>u^V8H*-o>I7_tyaEo-iV@Jx)N(7k^Q5F_AHHu?n24xK5z4-%O&Itx2?W@( zzKHFKAD|xRlANqbi;5pp1w6A+6KO%N`TztukV#l`f&XFj4@hDn;ROkF+7-X1ueZOQ zsLTx?Wr;B9==AZR|98V*hiTAiDo`gpNY^x#zoW&=de*};l{8gT(wK9cmsX_a&w7_kS zpU>HP`*OLrK8r!xbX`fgr@r>FE%WgV9ovG%`v}rL|0S2#P|1fFsajbE#`YICp)Bi? zX|0^(VF&Lj)wnrxOlYFZ^hI*8AjqJSt)*ht!NE>?R+d8dYI-SE3{%tu{jMu`jhR&c zCX2~iF#!!txCqbQeQzRqp8N(M}u*Gq~!bgFKD@*7b!vE0}^Z$X)Qvo!Frw2O z(V8VU8QOg16^*mYW!-`t7-hdx98R7I8ht?6r1n|6j_`hDEGDgu>^UtPont_8Q`$lk zZfNZdN>vfLGjvJt0VOvPreI^(;tq0WQ1X>k)&I-o$iQ|fYs%O<07T?bCk%iX=^?X~ z@b15V_1IxyeXHObN%F|D&p=%}YTObrc0L|~KXa3{R;PvayT8;mG}1>bex?2k1$gE1 zqW&Um(#RbZB`YICFQMPjs_y;lsv1%H9(TI03*Jswrd-ab`IUSu%($F(C577xj|;MMyYiavJLPJh%dO^U{AM|~ z51oAXj>(;ZJewK)>5=tfzZrGAnnTONW{Hbn%6Z>+O(jF`YD)kY4F~}6bma{fe%?sC z*k#S-zVHfMZFg$8ed*^Dz9~C=82vJk@~#4vhr+hF{k&Gb<{j^r<|C~c&(@%RL*RBu zmcqI@>#h-d52kooF-zXe?vIqHJ1ce()X!O*jj~UAY!)BJvg_s>l@+72W8FDg~3`B+byNruwX1hwgTvkpVS|_Yw%Y*GZR|kE_THw-zxKv=+4a(Umq>F~Ccwy(M{D10OJcd*wmd5D z;j87ll4Mo7qmE|NdxwMhQo{=`ZfD=c?aD1k`yXTN=I=Qwe}rFL8C41pLd`|WC%)AG zR^P3|fcyXgT0cJS{cif=T;E+kzRFykPMxi*yHY=>LY?shASEV^HDBB{7yhN!$Ckm) zm_FclYeLcPZdW)|3<6{>d@I|mu9-?9{@t-`j+ryIdJD=M*niY@{rqhu}EcsbgCBKxR8r!Yyi>}YAZsp~*4;C8b1!T)tp!gE{|CsIo;;FQa=#A3TLPZ5>lT$E)Bt zMZePCwa%BxUUQL9|5`&R@VL=s^!uB+;qi?dK?@Sjc*Dc;MBqZTTw7 z!4qH7XQsD%O5m&xh|TzXJ;H(*x6Lhc?H~aol8P;_K`m>#{O|&-cat)6rLrkAvkFox z%fs)*A!_{PHW6)y?|Yg&PuzJ$g3(tnc zMTbz~yW8aV%`Ull%lE!FVT$)`pE4H~WKYwqS1DS*>qi$vuRsLH8L?k=d34+k4vCVJ zk*)KO+VYr|GenrM6N60jM%(#jm&=2i8qWOjFjyC%O?qJslQq%CORWc+ZhclH(=Q&{ zGvv0vbPJVQGtd=js9KHKICzEL)_CDII+_&Dv1uuYjTB!?@X!dji|V$T#@0@Sd__RT zymJn}pK6I@1*yqwxQ-64v|$kr3g-oU-D1iqgpo-)z$U& z+pLC?ubY}cDpr<|*U3akWMt=OO0+!sk_h+fiGqiP_R1lB!|UrD8!Iag;^=ba|M(Rf zwYKKSKgF1}Ro4b>%zWTJj`~0D)61x!*gl6++vTMSlDNchxtGC;A59+*Z8d#nmr5HC zst6rae%p-8pc0oYlBP>DSsTL_+oufeYwz}w>)nGefjHIr7p#~5w<;75U&%iyupO4L z_!l@(r)hq#Y346eTu@O~@!X6y&>7){i8-1e7nQMlk}oP(2v_ih@0cKk&f+QFb0EGoB8Ys zoBY^gPs`>l0U`vIP+~cjV)6F5t{`9CpQ;9er|rXbkIUOkUKoPRH;$Y`;Kidro3MR& zc-t%bczRu%H7+fIBMFGH=HNoQ^&gQ_d!CnEFI^86@g_Po_x zFr-KWf&8^iRt~1qv@}UHfK$BU&t3jcsD*8ANraS`&QRiArkwjnUL&!p><#@z)$`bfZhC@EduDm7dHM>O^J7l$K*q{hUUt$9dg5@gixVK zA!kXi<~;m6-+(!5FgqL=P@MT?X=_741%b94N1#5!*jhGIo<^rnhIDTBcqLb1rjPO2}7NtjM4v(@$3gbyz-Q8rJS!RzvOwS>^a z)GMXKKW#1FzNOZa|JT9vv8a#-h~+jx-ZQ95*DR2+@7r%Yl`=O1>p}#?_pd)LpP4r* zcT}~@_FUa1J~}Ar^Ko)EwN@o4Phqhz+*3l957JM5QO(SjT$izAj@%ztgN>mlyK<4P z$cv4zo7ZEXML)G?6}Vlsk|)5u?$I?hvZj=E)UF!)o!*xYVPPp5G@4URDes)8VhkOY zyU*Iy2vKwhP;W_4KS+ft>5|Xu?>Yyoq&0^2g5RWI_P5)4X|-+4g4f}hRiihDT zz1%C_G|h*z-%ViXSuV5=HQ*&v@Hv;CdCOjNb_IU5APn^~QMIQa4~*E1?5e)p!Ff2Ba{SHrUNI7*7_a|Ix{qXFU=ExglLEY9LAJ zNay=&ncT{gm|ARb?ku@rwuue_Jl#A^WdXJmgz*4GESonB1~mJy72;9GOcDW~13*s_ zO4&rCh-cS7ch{f07$ZUX17A>!J!3(SAAO$rZEs^5>lwccDkUISg$c)T%PR)mj5HUq z`dqwU74a&|B1t%2s~)sk!Gi%Y1Xx#-H4$#0+&bkZgMM7_;J1m?4o=nJNT#&X%F2o# z)JzF_@?=2b!zv%*9q4h-n)5sqfhBV1sGxvhBmA$pu+>S^HhbN}mpq#8%89tN ztYmcl8&g_ys81ed;SwUEHlw%n!q8{ZjteIHa(fTKhyg6Y^oRqwQe}%OvA7&m>nAS` z-R-x0m!k`cLMLVq^>!YO$Jk?ybU<UOby=5T%t)c6ul9xS*7jCHzA zQNkoZ#W7S_F!(5PSNCDtR?~q}Z%gbW=d3@U?{&e!>+scyQsM!#l$#_J<*R}UGJMS4 zHf$m>`e!}@Rqsl^!71d-ocGx4yqQsgiWIZ&9^@89JuaAgA+Mo8y;cRNMIz|FuhVF+ z?<5n>JcI{);E+=zvM9jC5M#iXcny;b;LaSwDc3L4EAVKb{(uyq*(a&bNLO^#T#|nj zZ7sr(dx)_i9{vV9@A^p1$3g&C;d=(=7iordNa&cFhFt>*{V)5P@9Q3`muvwm1!~tf zSk7(|(qaSieYr3d#MeS3S!!(p38!r-laI#s-{fAn!-C^xZF$nHs_E5!s?k^X+gHss-WBOSxMr{j1vztJGE-$kja#NX7>=Q`_lhq1|=&VOGdcCHxf z3#Z)W6?0*QdOH^De&dDRerC4lD{ez+MUDd8<=+#A(eX!b#m~*C-Tf?b9mfWaXN@@b zITnwXR9W+vbw#h&sveIjCKFsj0yVf1$>}%NqS!#r^qC=+?c+ot_vKHGFSt*&myneC zORI^ZHDVq4d_Z{t5UMU^sgwgxbOasOvsf?x)Z1JhEOr>Lz6t7l`%OA71RMR7LlW#L zXDPA`aE@-CgcFXgf1dU3tKONI5V^@lgG)e)2oe&4#%C#8XU`TBVc&NdKQA%Y^C`?E z6)reE|9fPAGcJlc&c&SoPq(g~WhQuz2qPOL+z`hGl{=yE#<@1%zW-|0*gluN{!ho% z9J26UPggTAj{^%@HoxOY^Y!(3>gqS9|K(_7iTET?^{T=;dB)@cB6F*2$}g8hn7JVO zXufdLx2^X5@=erOWrDwxZl98a&MQJF%X%pqZPjJ0(A>DI2iv7h3y=3muc)Zs!C-x$ z#Ph!>_iVQMx2JOg3tJ>DS?haMRV&yN$>(Pv#);WKp!;;LOtxe3)!D+6(jiimd(;=} zoJ0?I+fN=HcP4n*&KQR(BZC1hZ_3&LoJ-dSDpu_hf z*Pq{iokGzN&G&Pq>PAyvx=lx`(@1u6#Csbi9KLRs@;qbw+(fX2^QuU(o)-0@8gzH3lYaDBAj|-Pj##6@2 z$jA`3ZyefxSnz0n^-8$Hinzel-u1*no&F6S~Gc2B^qBr`TzLB|` z=xvZnw~4`2$j(m1D0t#s`&#fz% zLYLBU#!gV7q&GoQ?*3k@@3$>?6$nBo^)xl^EXg-t%+$M%v7E64Pk7b;HuE?ve%S`NPl z#>e9*Osc7yWAuPGX4fHscUxt7UYF;A9wT2$dQP<_Z(s|HmVO$q_iqXbF_Oj$cD;Oel@_NfOwn=!Q>fwG70~6gS z>gj9gB9+Ix`l?y73n}vVpm$Pr0|-$7&kyE9`(rSQ%wKI)_uJUUtu-lKYlnM6?IjTC zz$-@{9e?$9GFqz2BLf|aM}2XY7}0OaDbmrtetphDac7e@qGD0&vxVS;fDoO96_7iG z5?kB3+M3k}L1LF(0e5r;e#F9x{`^B3v%@}dN?Wh_V^_09Lmwv-YsN?nI@aT)W`}@) zfJ$l+$X3zOxxszhp!3*Y*mrFLpNe@Y`suR|7#J8}=B3$7`$TwCct7K5RyK~K8)K2u z6PjQZWP8ixPIa&HfUuXr-L!A=f3#6eTa%EGDbgXr{$nW|oV^pXg6q$JHEt`t8?5?wp?)Y9buBmS@@q=hgId?>7v%7wIa9hYr~`b`sM z^k|TP&FjIz(#`D0u1ewe;NSpmVGh!sd^piudJ@&urlw|gVOrmOe)%s$I*Hf5DVSwNKO<){_ii#RbA^dC3Me^4+Jpc;U>2^GP?+j8`p9Z;%sY;&; zt-z!nYynljJ1aQ!v%Kimy_RS-n=|kj@(yYV!(|HG-?ZqpIP?Xjh1zxvEV_-otH=Hie`eytgDH=4f39pN6zS;9 zk$atCYbVbx|LZLrIDl-SRFb##0eQoNDr9|Wz-QpA%gU_+XPS`j&Q#hfOD=?&%%B-3 z+3pO5&rxULh6SkjQ#{C?lh-xOPD4YZBywN3V06{_oGmJcpJ1zH&;FfDV@2aHE*TBM zW}R8OG%N-%Sg&NSVDJ4lVc*FJMO5Hw7U95K`%R#%U3FhP5QLO zod40%)YMci??V5VS3nm?7)UYj%r(`Qva%|ngqG+2hYZ^g=>@^pO~zFQ&!swEhMV&Y zCFE%h60d;}-dV5qQy))`ZkNnFda$c>d$Dh0YrW!SFv;~x1pRcIDwr}DR7T5>71=qx zjv?dsDJY_2_o3l)mE_UoS2m$B;(ZG5;meU-(fdO^l;L5!*z9}z%Wh6~Xox>%l4}^D z{!x222*1yXCu6^wE;}_Xz1ewPRwP9g-`vq?X=DZzYT6V%yztaw`ge+bqrk;?c>>+W zo}im7I=yzPwaOX)mbJ-T&gQ-78VLG(gVdg^>QC76>|;(sW1s+!jh$5Jv~>?*Et46M zMbcg=6dpO9_S)(|PCl$W-bxQmOV zdBL>xx>xpBY=CgXR8VVrX6A6Vg_k%oIyy!LP$`*OZp5nE-(pXf_ioFs$N7hDsrqlg z$Q$sP8O=bFXu7V)@yt+V+l6O0L7)j6u5ZivpIiL1l7Z;B7c#%EMP0R&&fJUwNlSG`?cnWV4M(jQK zNHsDPV2~&dl7(Pnk%p`o9E6$n!Z)weqa(+6P8+9>F;z`~R)-75P6dtn*0EDLCuFhZ|?(_Yk zqTZvqquc0rzt%0G$@~!}Pm=(WKkDo2@$vDil{NmgTKszc?Z2YL7u{4)Pv9AV2V}>| zbNUJ;>{h2z@a*s&w`L`kFGQYP-<_SEg^Z56GCqYb@vR+udx#zd%$gtpU%gpBYG={I z89`3_H*}k1-Fm`;h`g`vMyX2Zy;|{W$-jQ(=5~T>?n?VoR%J1X>E8GL90P-#XAqPp z1|dmB{}v76HH!90US{J@K_LtXcA_S|PI296ojyr5un&A|#^UB6)4d8-M4364xKZ2+ zp!|xBpn#5!4rOq;R+6!Xs4N4MhB={yTeg68`p&VZj_zDDtA~EOr!w8pC+WJDvNAgb zl7D)+gd2V#TDa`P7=nkSmg{jy0lR`naZh0DF5-Hihq>_wtd3>ot3FBq-t{ zJVAN7B)4)y2hOzLuB`vY9zJ!G=u{CVlID_fa*@;#FaXgHzP7gLN!qghYjPfF{tHB( z0muF>7ki-JZJaR#DhBXJt(FS(5$`O-D}u(Rg&#vd0>P=U{BbR?OQxyW2Oh}({#auP z60cG;IsF?n9`{yP5Qo0$S{@h}Fk=5s9QyCW<^Fwm06?Y47G^Jde0~{Z^8*>C{=Dt5 zYMAjwWX~HZsryqu2pssn?yfmA_3iHgei{lY!c{jJB_z1fyLTUp;u%~bZ1w8Wk+gBo z{9rGu7S8lT zhW#PYL}9?NfS$M)PpsSN?|%0e7_#F$Q>{YCzu$-n4_@Uf8k(M(pc>*t(5DQIjEpKo zDSd6?`1eKIyTM^*DoGvy$ebxd9dJNW`g5H4*V3qfk=4{rCQb1FF;vlk!#1Go&|hx`{; zPv?VN#3_|a#pv4)Gl_t(dOlhraz1VGd0cSh?7txTN-&a7ags|(`BPQhltr!Y*<1W$ zJrR7e4%fQ^-qAw#So;afJjj#R6Wn_@ys`V0?t|4$52}ZxqO*cy$6Ymj;sJ4#+ph7& zulQxHiFN06h#B=V3s){zl%d!-5+W2?hlkCs*UmHFfA*i9?U`^DDAT9yDLTk#gF8Wd zE;%d1rj_7R5#cJ9pKKWH12q|;-|4Nxg%YYN?t~t|`OEAw*pr#yStz&qQ{WvjD=X^-9==S?-FVed5KJXg4noK3 zksYx?13desrqHu?!)Z}!CB~^^)q{%)m3XTVYS~wrDJ)DKhSOPETKtXu4G;`HZtZu; zeE0RmOx66w5^n7Ne(_QO3bX*A3WmKE|M6nMd%fO6u%*v3C|r#;Wvq;`5SHi3S@Vru z_^9Qfh zw7-I~%^>2%n_LDInX<0`H8V3dHd1m85&^M75S?P*LEXr16ytw&_bnP2p*ETmi~*2$ z;pB8Ruj-GlM>_lS_+^>7KBpU1wvXy06)dl-xEm&h014u#%^Ez=@4|5MSSc!O1$FFT z;$OBrEhE>Ik-DUBNX{*01l|*^`Vo}TvY3Sa!!zz zkA5n`4kH!vzIz#Pa$EQXZje{XC zKN+!qrbin)B^{gFbI$D~tflz(s&jc+ zjrhsJTp(1Ln=FIu;2=kOgM%)N6d97B=UKCRva}%(t8Iw@zB2Iqdo(ei3!{ht3e)g{G$rwIrk$WxB0KMq%=Q+#w266u4JJ0bTy3u&4)Z<2|( zMxVtP|2sxy>fUzVXj^@G&m$HS{4%mrQeBxpo0k|}F-5Lwrpl89TOKl? zKH2JZ_#>l@O8MEnn8$Mb_Q1ePPQ(Rk(wA!%dO0pQj(XLc*~>7W&hjdXz3wqx2#n|1`kG=9{+5NC*_59 z;&55pt7;z}PF@b~-Pi_oK`&@!^iu%)%eAdf>Eqx}Tlz9Y{=6|vjFsS-rRht3eO$r0 zh`l$ju$~zwasPJ_yKL$+XL~efdbg1~k;17nMuta5!QRQg8{`L@?=J}Y695udYVBvV zb)Hc42#Sh2N>fS%0bOMw#o+iL*KEomzB}D)XWne5xDZ8+^s`#X+vc3Pj|OB;1b`SE zz(aiFGU#eMh0~8%yxNve2j>mdI-rBAIG$wGabGn*m~Q>2=jpZhxiC>h_L;B000w*w zw!#+-MTN0Rf86;MO=5w9O56K!V>Qeof82<%Oj&Glx#?5;!O$u^AZ|gu(QWYu7R6tU z!(R+dJ|g*V0c<+{ArjtGP-xs)-%s1}SUrXREVC4w+<+t%fJ-E~dbYQ;c);UTKb19` zi-%;fy5@**ke~56486sA2!eYi64Q~40?PoczgvGY_y}y@H%Zf%+MdA zqB)?kN6A_D)m5L%uYU;3;i7QxggpVFAo1Hf*ALfni)trCn9q1Av~kc{CoR0>>3Dlr zs?2Kj(+Th)hxc>riN%m70DreroHz`Sebz&q3kpLzleW^KEPnxvqCj380xf`f2qh?R z^EwI3n;w82w^9z&P-EArQKG>MAU3kBML>Vsqf8(rA3&`GJwye>++#Lzx~ zYCzB6%niErWhGxfsHwB;eJoc1!O9)VAh9o=nI!5>_U7|T`iL4=#;)|RfBT69%(@OX z%#K9_UVZ_-t63jsCHQl2IYaO_bWrgCX(UNfEPFmXrZqR`eCBCt)yiCkK;7kmufOPEn zhX9z)aTqF9*mSVJ#o8c^S2i7E+pkmzr6Q-+9(k35R{QN+F$S?rO6mCW$?()77&27l z;UV`S_o+Vwh(VsUb|-elDJsa1%xwzc>xK4vYZi=IO-9BUWW}K&vS08);K5>vWGv6V^ua2ST>1zmXEaa2K!#vP zb;&Zq)puN=Q9|u(d1+}#6Y_tmsMgm%k+>KNK5eaS*FN%bdgD2Dp;nE4nY4F<`D@Z}vVw#hf!m#a_HBSz6L-Ce>N;>NyAZnwr867CCl6j}P!7ZHNWMLX}_rB}fs4BVb^t!8=1qm^O)*lz={k zA;mu*QNTFI2(3+(ZK(O9lQXmK-YKD=uMjTGM-V=SiG>wARyXCz$P83f-honTr#`GH zS!5IxWJGBol$?YpM$(kH7pNHrG-d-K;BE~TqZ`6ZDQm4MEp2YDY+u-R1_6TBn&2`y z#QG%?v{!VSi>|03ek;lTi?ff$O*?epT9bDsCz$g|_kho$(W-&GysC!M-uIwF@NCP+-jhd z>vfBF`t zKofJLpR}GtkRU@J1$^!<`3}#TUMh7YXyU6Sw<0n(qmqf$ju$oamEg>rKu>ZiYrs_1?Z;8Ov~mHnYG#B0cM+JVP{)*c5MUy2NMJQ($f zFqz-1qzNV4(oEa%+S*oe4UrC8G5$IUehbB3IR{xQ5c&WZ0vdor!sq|Rk_$G36Bq=D z0Z2d9Y#pcTDi<4!71Gd8gQ;RvadVqMLQ%CB{!vX^d3#C2Z)2XV+?G~%1yjixd7)2e z@}RbnLIZb-cF(0n%gq^B+#ow%zaV^B+0OkRXYr z+tdS=1;+yi*-WgVruV(kBs_Mdl@;6>Bg{JBB;QX28PQ*v_n#a>>Mc1a1I-`Lj0Mif z$A$^fdv`$h?9x&}eZ=F^5{W~*%rmctYc??O=9;eJy12NA7(N36R=>%!dR+Bm0|C@B zN+Q=+S;_W2tH-=pZQO!_y8p>|V6n_opoJ`WuEzNrBO&NzRKOOtg2H(ytDU|5!B+LE z(Ac8MU-95LN~mH{ZB1TYMMZs0&4d}}!sy)Smsi4?m94&4Ag6*Yv&ne|hL~ziA291$ z{`vFg(Of82V`CNnSO1*J?K`8GkKyS962Rw~LQ9>GDwqWBahDIDm$i5VGY2q?;t>H% z!9^iD3Q?-*Jc%P%&Y9{sveYHAOZ#E3!~zbgjgjaJ09g%F<&3KKc^VUzoPK)7CG*!H z^P>BuwAu&i{QL>^Ecxp`Rfc^7LQ)fL1(v(th5UJm&Y7dObrDptOA{ZJxI?yBBr!9P zl$(8A0$@kT@(?LDHgr|Qv(EH9Ydq;1gXI51(^mjR{e5pQ>5?u;2}+A}NlCMmba!`m zH^R~&f`o)1spLvGh;&OgCI6e3%RxF1a26O{Mlhr=(E~49b++^CL z6GLB06}-+k8m}5KB|J)A)-f?S;F1#`54H5>=b2tmerQeG^tJnU{~yaQ1;rE5mG}qO zb}T8k2+2GP**8x_0)1_L=^rGO)Rum3ljmEsiprX$=$u$G9NvKv1G6pew75uhln_sQ z&-xHkduwZNZ|}o0o_dHD< zB7k4tJn(%}v#>v+q!c0a65*R;LjI`)n6dmGx0rYIw)EurFiMsXDdZpZ?wv-A)Y8XU zq~2sb&lgCMr=z2A&Z}3+o@Y+DxSE=2CreR|K)O|p=;fOi8H&mD#QvjTF<`yAYAImG zW@tlG%ScxjiWx2DO=Y1@`*A0!B=BUF64fFA?`c2=KzK&uw)XO8aqqE zc}Qd4n4HSiVa}P5i7LD<ooTzSScZ? z>pfwtKoY4ZR6?AWja0$fof#8?Dh_zF@HeDVKrD#<_$^4Z{K|i6X>}nCm%C~DlHx<$ z0j*Oot+Q3FmguD^Uo~e1J3RV?$=+S}>{^vOH-7j{B3|MMm|paTD2kqus{*bECuJg) z|H%k&?4WqvBkiCS)OCK>ljW18z~ZYH;^bn5wY-ByGhS=RO8AuOI5)s&XPJLBcz55H z9u=}&Za|Rep)=i4*++CNk|Aaj4b!Pt%Fn3-3Ci6HxEyFX=aakNlA&G zJm2KDr;AH&X=!^hKy{x#fBpaylh0}C$C0nIN7X~oOH&7h$>ZS1z%Id+Hc8jeFgrVY zkBSV;yfp>3H#E3dT6!L?JUHK5VUy#h(@|Glr^htcd^VV~>Mc_~^hUXeTG>eVP|`yWFMi}eTokqu-NDX8L1TEx6><#TnW zffz`bgO1noXATGoYA7lcqD4H7w4;Ie<#?<}HZgw;U6&5mbMZJ%VfG+p@L$FIrKmix z3np|>701}#6qZoqG%K+ib5J}L#7CAnhF(+Wr!4Waby8tH9 zCCree01G0*Eou%WkadQQP_jN0I_%xVQ!{HUVbxiaA#RsBH9#J>3V;l~1%l>-uq>ngq-PGw@VBw)W{15jY^gn^2;kG!Sc*mI7XRvc}_7Hb? z?1q#CwIR1mU0znUZ|$g&Q4a8V`Ll}TiF)emXMQFTXlX@ZMv?FAI){jsy`tyl6UBru zvap=^t_~@pz5X$GDbVYCckNZJ%+Je93`Otjd%o;^v7b_@D(@xeXZLw#1MEgCs~!+P zX_IVCLjKObu=!+T4KVdxWJi*m4L+r?Dd+EW4lrmtwzV2~^1VkP9UBcQeEz!tm*>f!p3ru=b`kP(E9Brh$;M?SH;PCVl2ov+;DvSl=A(eXN*X-93^~2XLhN*eud6F0gjF|UNE^NeKxz< z0tp*6)>PLwVm%vG&Hg~K{q%#rfx(XF5F^s`E5#6np!^Qw8pg-&Ew;gd^u%FeOuR^) z3p|wXSHNBK=XS)a$SrChXl2S*CBCZTn*f7nXF^Btj2^$ApEkC7{WdsL3-o1_CVTt( zRs}u##rRFycTGp=a(#G_#Fop`W&L`&fnEg>kIP}ir;*kBH!-F0ZATORf4=V`+qCty zt@*NW#A2!<^}V8MzKuijzJ+eS$Ws+2iR&?Ma!k8>+Z#v(4yAp~{JuB%h?5Lkhr4RR z>pvX+KpH3nN*xezNmI=`Iy*W(v2biy3=&-($%%k5v#`|Z zv;K6oG&kp_Ol&c%T--gK@cRoFc!C0%nw}v)J;cboLqm&^f`6I>eH?dT7rrdIr2f>E z6|dcy6=&NUv*!ceElxM}Y`oK2$hLcH6_tmGKfg*eNXo3n4vZ9XYrpSOl9P+h`T-9~ zx25~Ln|ha3|K05l7(6wV*yQpX5IlVNmkeL_+s{8qbh5d@*Gz534X2%0p z_XAB4zB#cnWo3=E-9--&-yL>g@P0HaGjE1QvH|*UCk1G2>z&o!4L%|d9bXq$P#07MndIC492hb=U>@g{~013&6&UZinhPM zpGR|60a?XwZA5cZQ@+1`k6Uujdu$A@v;_cPgMDGHC-?~} zimLJs4i3VU7i^T<-YsPgrIJ5gsmb1d_|P(9mgEU=Gell4El3))&4kAvCp@udXB3lgA~ZcfC! zGPQ4PIQiA{G2aYsmxl_4L&Qb-guaXIJ?!D3pBK@}!WV6>=tLP%55%ECpeh;(-Y&G* zH20Atk4MB*9wa#>uxDMCC_%O6YEf};y28W9rK-&@x(;UW2Q2dt5v3QVIdCXO=D5L{ zaSaXk1b@hpyCNJJc<1l7Z>>*=Tr@JY^rpzE!bK^oUM=o>|>iTuVRf7V5_s1yq3DJn=w)xMG2;3SaBaF zf?=-pzSXO5q&Y^Vu}>)|sB!_O1ZMX>2zj$kXRo|jSjen%SX9Xo{4li1ZrBR;uj>0q z9MEvkGY*T5vHvdQ?Rb|Oc^w2jfqKOZxm^fT$_z*W3F z0ihB@+_dc!RN=KxaYaMT;bIB<1f`|qQ1shiu}}Q|GtOyF_)r?Njf9Io)~p$^3KhHw_oF@! zuCL9ieGa$4m{Z2!pE$9wBG5)L(mPJwPcXof-s55`UW2A}22vl3AH{ewM4_zt z;){3gl^o2~S{TTBBw;aSeE%AO^X?>0F5-P$+s1T)mo#R1sqa;pB>I;<2iwxm0;?iC zyt!{i@QoUUJwHxpGwn91u;^$JJtIjdtkDY}vM8&Jf>p^+2l&dOgsL->vu#c=5B$W> zoZKdcpn&eOl!PV+dvCrPa5Pu51UmyW!Xs3Q_Gl+Dgd6>FIx7TX*1OsIh;l z>*+5+M59CIl)dp|*obl#rPB`#-hYxIMYHLRH12T!IX7NV2q^lf!`OE!XF9GOG+Yuh#NS>joaRS-!NcJo2j3nlYPS; z2}eUi+nw=Rx;`o`HEQ<6g6M1hswk^4Y7X)Dx3+Z!Mkn<%+GW$)jT*?=hK2@EyK=Cy z;^N{uFEyI=JgOTUJO(-p3h5j`UHbLwPB4-5W*wgQ{dYff>?EJsd7*Zjoq-NqUgaf~ z2RjGK^n_@4Xc2~IJBo0f7hHQR+jA{j4NuNvFO=_)HxJMu)PeU#@&eb9onslf3t^?#FR_Vn;7ze0qfXRE&Nf zWM$*YBp;bF7?VYjx#q9zF|s4eKRYcoI`v)GUvndk^5^IFz7N!pKHuz$$sXNeEwwAJ z#tCJd9qUoX($*|!$*(W1?RR(Ury8`r9?NNL%$sqV(R=yo&vVK`B8G~=!WT`B%HIvp z-!=VVfqER~BgAt^M@R-7r+Kurw6sf_GvC;NGPmP=r|j?bhmaA5$0?CJoZY_x}PkeOD~oQxeTXLmCp^_VFDxBcH~0b z|56Lwn(HdVGmVe4yY>zaVR3OuuRQ(zHzhr$zX|JL7F0ErZ1b7qgZHVD$tJ@}WJkzI zX1zh8BsnhTr%yJ%ImnXA86y_wX&zxdz8v{aUV=tWP6^b8MyP#m)6-fE@0dsuAL8TJ z)z|+{S8}hTrI)j}wQZbOEGt7%0d?lvdR!26InhTrz3jWk6K@UF-gA7W7z<2mG zMMRHG+3e=-`&BY2NU9hv$!F39Jz=ZbB8){~{FdIKt2e0fxHsV)xa%fQI&Jxogs9YG zySOA#Esf>X)l*wu^W~Ty`Xx%uAfBUm4{}b*$3AQ7XzH!1Z{_LUeBB#Lf#G5(+-=^C zkq15XaEnHl^(xZpBY=yhY8jC|pe9M^UU&O2Lf4PkkG*k_sEYxI5`>n^co7qz(_|dqsQkf_p7#0kkw0-NWqrmMI-U#Vma=?Vk~?sj!Vrm)Qp$vl}sIcH50Ga4gM2Ti}3#*vWJRk)6OSxqMqx>>$r6Gt&^*yr19e7UMq{Bb`!p=-NT!| z^EZc#EpLQ79^rc2ovajHV7?Lvqn1GPuhyMroz&Vdoi?`jYVa(cE*Q5htRWwZ2H6=2Syn-RxYGxuqpVhIg|@3*7aMjhYoe+GuS>)Ea~JS(nHq=>rO!}_I3VU zUmOP^QepHgnM3>DcCoayUm1(y2{Azsd3o7Sdcg&OBwI?W5nt!P*$arGf7W+Oj*p5& z;?abL?M)P3+`x0`VQ~49Jc?qwW|$0FNNqR-io#LnVFMwEgrY^0p~J*K9kgm-XFENB z{n2hdqecKSWL%uANr01?+5P>&YmS4LjSY2^&sb8_gEa*B)7g@8xS)9qayJXjv1j^S zJjJ9W#(rnhc}6X3M*=FfgP>S>g1+)+Q16c#bMd)C*WWh z>+9#IPzrqc`;@XIvv~jUT@7+8UPa*9ExgumuogHdGz^p3P<$g z&m^Ryi*<&}GT*T-qm*xRPFU7^6dmi@zgvRm8oO^usZCP|v*ykXq7Y$g_yM<22`Cv2V($Fx*5a{6 z*xqqV;9Sx~jME}tUW1bMmFi2|)vH>-$AgktrKp*6$=5H$c-)9X=Zhs2y9T=RDg0~Z zmj({jpyh`BEo>Ly%P1--8v`=n=f)gz(J$aBm3>C6;*37OMLb7*&i7Q&S#_sr32t}Y z*hvLaE+W@+7F2#zAApqpzekkT={3C=zIwR3y=`o0IJ|*VR^~7}sf9%or=al0#tO`n zC+xodw5=p&kArkaR)?3Z ztYjBt`QB8`H$DYPqQ*Yk`!e`-o@IJ9(K;XFQBZKWSJbuU7aeIJO%Hx}znf2U4v8)d zk0%_WpXn22t9tsHxP8C9_2RM9KJ%8g=_8@N^Oflpk@|X1M?0V@^}>frkO+$I%Lf5G2C%k10){$m zBBt@O-iM;zd++VY(>Y88T}@X5cKiW{5qL>dmxE$DEo~}FP?|Z_(a8bSLMFPpj3jYj z`E6`$P9pL(vhDkfY&~8GiE_P$fzz>f$4R`Zh>yk05C}ZlLtM|hC^?|<^YyuPSof5|_P1l*?;+G{ zu~GWhyXart=WE&Iw-=4>80q-wBLN_ApAF?bmZUS-+c1`Q|A0&|KQ-ruMGlWuUM(3L4&;dGzqp+ zHTBA>5bWfNKnih;-k5E`ALk{r7N9RUhGbV(Qrf82%bnd7_;Pao-=>W6==%nCrHp}r zLurv!uye4#b-jLtZ(F-h$Mf_dWO+FZ?v3}*M1`!4l0DsF_D^M*j%-}d#Odj9z+iR! zIEzkYX4?GXWZOHbR4#8X&sz18#^r;coAdThn^2mi2A9Kmk1g_qUhp2^@9+Fi|EB*X zwbfL;6J#Y8_5MBi=XcrTSln&BX}?*BQbrXnpq52N!Aqb@TJ03EI@XzGUgy=CaR#!8;OH5&cl6T;Fu&7!hQ>mUFVNsa)iX0Qe6zE6u?rLm z*t(X1g|5yt2*Skj+9*1?5YwW!W6sX?4Rt^~kzWTKWU%L!23(EK&UxFYaUbHb4RdI+ zOlM>?p*=Y7xDlD0oSe2F*>DfyiTeJ!GdR6?nj)d9sw#>~^w@csC6|Mhjm-i9_`crB zxSD0t12I@oGg?X&`bq;P7LvGP^?|Fah^D5dgoFg}KV{7cqyWk*0s?Pbmk!XFJNFL_ zEeoA>p<^^<`}O7wA}q=} z_io@?|9acxGNS8ZIf`p@TU+cZ35{)N0`Dmo%qyTP^TCnK&n10E3)EgpEe|<)rmRs? zY~@b%74H8Q$o0TdRH?btLPIajN-vFGZkB<sfW#P#(JyK!sn zmoESx%TZ)lb@S7yG_2s=$uo8J_4VEADJlXic;fhoqc3IQ-`V)LmHUkxG#8lDm!`l7xTmkkJLGBnmVUYYB!ZD=sMnN3YiJ-v=(Wuc>M z5zB-@XqH}ZbMx`7Or0Ga0f9lceK&AcfBhno`P^0k$jSm7`t>5BTxu>nx1ykTwt90J zYaZXy($Z$^Qlwn*dLt1rz$1niZ*p5O zjM_AwpqC8J{>A;jgWV)~>G``fo(pPLrw`o3*Vfi(dl_>%Mn^{C@_&1`+y$F(lEq8W z8YBCT?9m~Dl;u>vx%>mvwCa!@E}o99`W5UP91ZpLd;JmdRRbkVleKF{z5xL@vZB&J z1CNntVQ%hyc4qrlj-+!#z?KRHMTVy!TdcW@Np68YDrD#Ey8Bn56R>qLI`ZASOPKQ& zUYVW;Mp-7{6BXpDlu_Ndt!61PBs8CqJsq%-W_KfCFu42GlI)BYc>ERG;V}EsQag6A z%nmRObrsTLlgg2f#23tH@m%(gmQmxM>ufAxiIAGL7;_BASF~PhSTDvzQEvP0kcNwd zhpO8@ZVfv4t#|KqL2xDHk0Ff#8@>O9Tov~B^qsAIo^y;5F0rE8aPJ*h`w|^usFB;un)A(2>SXOj)c7{XB&c@C@>rf}o`Qzj|`|}_7rpz%?8|zJi zu*_mi)O%NfJs{jGip*zBrhkc=T`+9yau%|V?C2mqGJ6;X+u7g7$Gn|4JlOd?*B%t$ z<2eS;6X4@xhx&f(;3vBN-D}B7Mn?8~%(@1o#4Idj;o)ebyY))e);48znX89~yZg&Y zOtQ!~&enWZdd7zOw>p1!OhUfO6Myq>V~4^E#s$2tPrr))9Z{ckZwf3iM1dgSpdST; zkG~?LbJUv|JdU-}o3(!3=$59Vy>o_%pe9W47p4S{!^Z%{)O(VXSxKJE13SI7<+=E$ zUxI}6h{mGfCbVldw2x~2&e=l7dhhK6ZdvvdMIY@^nx=4m!^sTEG)+<0@aa=$yAU>9 zeFfZ<_S206&1`+=cFzQ3wEZ3}&!Z8TBPfci-mkVcR&zGj*BY~rY;e!(_t5jL8MtOs z7(`wQ=ggXjdGF1QH#nkTs#mbZ_zI`mZ}09>%-4Y76WKJYgU0`>BF>MuziZ6`T6&I1 zLw&O`yAEe>vDrLd61^PkE#xD;d_0$YjMc*K_OQQTIBdeObaDUq<}OdQf87lTpy{R2 zp!;MZrRWK_cXsqw=X8yX098V$dFT3ChCLl9mVElOwXmd4MIx8%AK*WlEjsDQM@m8x z5D@T0y1Inl#@bq`Yj4jsQHE|bOKGw-aDy2h&6PH?$!)N?d!`?c0)fH9sG(t;A|e`$ zB$E7Hx&8G6$Ahsej&P_(N#VKzMT`uwR-IEokVS!ZSxMPc$|a1@Q0Z|aGNc=HB7cXiBbTF751h6~e`w&r|rYI?R< zwvS&sEAp{x6Olx0Ku}=vkEOZmt|Ns|$akN{{Dr#S+qqvwIHV!$3L-|!Q4gtt_m296 z1O=tk=n<}VssDE-^w3tT_@-wF2kSxT0#iKTUA_q06k?XDx>$`DpF|xCrS1EG|6&+| z77vK37XFitQ&|~`Bn5t~rVT^HxBxCR5v+Rx-(i#7~45Qwgf8oCN zYrwsU`{m2!g#~QL7gh&v?_y&Y`gcd8qi+)Z{6JG-+}6^~&dT0?=ll{SN=${)4hr?oYn3xuOQ)!&#M`c=BaKq$`M%h7Z1^@D@ImkD~vSsA2pQHp%> z((>{~|6r9ZPtq$zE#?h(KX-R#!kEChu1)u=jBPy$7&W4;tu63R$6qJ{He}}Fu0osS zUcxQ&hdfaW6M_h1mFKP*DjJ%+Vs5Y!XjmnQs$VI0DW=G%O>;c)I)#?@ifD;Y=4*}>G&0~$n{Q=6dB)w{B-QRFbcVhsz?;s@LP#AlMu zxeCb`?H>lNe!M1lHqHa0fa;z9siu)RM|C~l>< z(qaQT)~FVN$5Y!P0T;eviqv8>zb1+luHZT93-j~i6Ul4gf3LOi3UFVEvema(&5B?l zP14ZOQA}s&_+jGF1o_s)JTMOFZ%r>4@LJ|M2c=~kU1W0!p)~Q2*V_|v7q(zlufA5# zZpqbU6lWM4RA`qi{XBYO^o0gWQS9a(yu+bPm9?OI;!t_hf_bpNbC|%;#ooX?fZ8jJ z1=+F?C1!J$2{Jj3`EJ4v) zBe^IqFE7hI_NS4gSjbNdquIVbhU=Sa#YsRELWP`OoRyTWwSLlN9UUI!?lrH{Eq&&< zzUjb20;KUiWM=A{s30pUf)tIs{QRzdSIrUt`LLSDX7Lw$^K?VpRJVsOpeRJOU-;`-a$Yq*ewHSOA`=#hLL)PY zNruBML#B&Y(5yG>#9r?1ECftg6uX@H@~9h=opiNP5TVEIS4_c#D9|eUAk#iYB75x36Mi(6y9;P@#(-iJO$d7hJcH%`!lx}w> zeUwQ1sB5`cn&M@+Si7tJ>6$b&k9Fvw@a0EpC?bmK9}W`L)l|2n*IZf<1&6Yv4sHzT zq>-ejr|aN$F_H}7LgWaIKmcydedHU$-xi<1*LPWA=k zzYh)oklx1h=@kRv)2B}XNVdMdt|ELix1jIu>&RFo+=3v{O%GKHYYPmfl?*>9CtLek=uSoyD&|K5F#F@{|4ttz;YYK26_^1>ua`z3LuL^VGH!S@r~5L?W@xKBG z2LI&xGW0vw1a%mchS8iJ9$h`3ZMOh5>-vHnF;Nt!(|)qwF>2+}FagY-ECwtI;XEQQ zvIZQLRmNT=*E-A|WTk!-hap7hTBOyMhOGf!`Kh>AaY(*?V+nGOcWHkU|5vuC$>Y~0-&1T9~-RdffyElc5xwccdBu`h~sS* z|BzDn`|NBV8K}KiZ?XZ7`WHtC7^jA5|$#}<~t zI34c3?KRmvugqCy)l@3b&$+(3%nI?^o>EXKD^sF2m4hR=4!#UF(SsSTao-QVAqfC7mi4VzISZ0W?l$;HXL zywi~j4GQYLot4F(5i^tqz?PuwU%j4b_`7){*1vU9@8sj| zU)IgJ&;8En_KQd}!^2Ajg`w4(m5Wv3U{ni5ED`r#ojExNpuBBrx&noSf1C57ca?6f zQWiPf8M6kP*xZX#*T0n?H}g|SZMFi}>cj08QAsEtlf9g1z{!}|eqjS7?42ce z_5b&9vK^Yjk0EpQZLO_9b)~t!zPY*;AmB9O&JFzJ39!TdxVST3(XY`zcuP+ZH~8zgcg#fWQ9OdO-NDBNjB>z{19h^F`t0dDXa4kr z$G)xc67u`Nt#%XVYW45kYFv-SX#V~T>nLY0C~90STu5(itACYQvJ+N{wAbqY)i=ZPh}rv(A366rA0^Q z`T6e3@X^AHJv*e-Z!%ggh6g3EE+7c~X z5&8QpIfBl;aENYnnsumN5uQYSV|f4ei&{Q{^Q#Ant0;wI)XP6;!<@{k(HyPA`Jbju8T|J;>P_~ z6}3&q0iw`{s=&c}e|NXdcbkx=yM79gn9X*2FbGtQmE`7|hRVSL?_NroJj1&) ze>(C6qPU`~2#kilp~;H&lLOe|gi_nUN1b@QXX<)FvtKBB7?O;V@}y)VdEc^(@3;-X z=3c$^b=MlTL4^{0gV4w@(eV|;`|K?S$-Fn@*b5r${yzs{gxE zLl?`-1mh?d>z^|)fb+b$l@=F~OpP5JLQRItC8IP+gw7hPUZ7?Z57IsR%=)UTpYBbx zN&Uma@zhWUhqB1LsIDYefq1TZ)-p|I6!Y^sMDWQ_B3pSe9l3cu)*pib!7Qb=c}M%q z%`<-#1T?R1dOBv{NR~3__qZ&TTFRm-t8rS9NPr9pDT$PvI&l#aNt*w#e%mN>)4Rne za~G2cZEkY%-K%bv)KV)wx)vY1%$-|1FRzMu$LrR10BgM7do9&Cv?Gh!A+pSb{y5Td))yGL6%qZ;swB)o(~ZJ zm&QC9W&3fy3%-|^^?w5?fJ&*>AO+l!&GryC66p$Ex^L+Rwod$n+2uf;T|Kr1ny3l? z)Z91SU&`xoXMkOtW~C+%W=N=Es=T?C6lO`y?<78iDau{gOKA~7^D1`}{Ea4g68iva z7FtnRkt67}mLyAgy)fIpb8$mf09(O)(A#yfQWLUj47x{a z-FV2CYi!UV-ZeYnLE4T)&}gI8^6~_mcUbcUuaapwyI6}@vg|~@S4iJHfoS6|L-%fSS{A=6W?Ro8|>iv-I`wfxm<|)|KrB*dfxgvjW zvs_w#PB9aq@3}X%G`B?h0MgoSoD^*swX5A3U#O~{i?ik+-Xw105~UJdz0#G@k0p5x zRL+KcDPd717(aK0ex1ApS3Fk;`Ttc7yi1fu(|Wt&P@IT~z2UvJiM6GrFthhyfVZC$ z6N-X>^s2(X)%8g_;W&9y4MjY4X>%Uag z*5cM_r4BZv7#N4->Ixgy1;sjXJwv z$k-M$S_-;@U-XbxeH?7JdjVH0TRT|}Fxq)iA|6tU_y;_z&M-m@mDUJY^?SBC3canL z&b&Jbf!e7(Y3FZR4w^19kbzc~mgpA49;i##uk8pYYL&7G)L@(>{e6{a0~ap@s7MHY zhVEduv=WH~Lkbuh(t4dk|PG*XLMM5Rw zlNhmM^BWD>l_o9qdUe0P691R}!zmMeE)JHVG?P3v^Q;i+^dT=FpPRsT>l!(yGGhl$ z2ztCx{%33$WW3&~i&I)nU5nTP3gN`P`xbE$BX#Rwa$`@MG^amtQs|$njrP#h?{Jfc zJ}i{GMl&P8jWtBCsCk8l>BZS_PtT)id*KajdegMss||DOCQKM=>-5W9GT^_Eg8Dji zcB=-r%YFu3D*_gl=)@)or6IB3PMv;)WNrO(w=+@xB)(4cwn+T&3(Gu1Q;>2PK(B+J zI8DPAD>X1sEWdtED`UZlC@Jf?8c5F#aJIJJq3F0BdidleAru{2)M`7y)-D01g;HDm zpi7iTCy=pKP_+u=4?i&zeE98ODHH~SFhaxry+A&Ezinx=dB69+-k2zjb6w>iSttx7 ziuGxL3y__Y2_SwnNOXQQmkgQtzcPwO0!Ae525$dN_q9oa?!}Ezf?ibkmvP(v*ol=H zd`K;8*)y%@2=d67DiEI~pmsbIb@n4M&k9rcvhWZeSsir6VcbTuW4U4g!sxi3MuNQT zc;)L)#{_l6GvsGKDRusx`BEzwoDA~4oY&(WMJjNK>vc4z=&mM{T55_ui8+^Lhz;vo z_iCyQE6J)W-Fb+1TP5@q0gIbHJsup-WotWj6saMf4{$mDLzj2ItWVF*$Sj5Gquu5a zfbr}+r=`qiIsK_fuIS12@UQ8^+YOV2`n#`5bV;@;j&fhJ4NwAEzD5Prz6J~AG(T@| zSEE0mJ=CPnYk+(|THdvo?ty01Pix84pyxQ_g&uNwcSwbUNi6LaApG+c{S*8?^D1#_ zYB@rS;rqG!ssAmjz~K+R%QF46w#LTl+D0CJv4;9gZ+ACAeP)^OYRXU5v?h6!JO-*+ zp{StaQ+wwV&&8)yHqTR9*UT90+<-&3cXsWx=**WzW+AOcji5{vB1s^GJ1yGtWQI}e zrfrm$L92RudvUvbH#1B?8(|R(_iteW7v+!Mrb!GOOo)_z_iB~c0h&y3h5GQ!u9YM* zLuQ$exfuoGAww4lJ-Q#hDiorBM?XLXZRDL@Sdj7Z8qabJ|JJ9tfkN|5yCAK})qKBu z$Upc+^F_`9$DRBa7u_R_JZea9W*bVTmkz^4{k&^kqNz?aT|KPI z`0MNngFx~tBvg6FA1=(`mdvrza?Rx8On_L6->&Nu4R>BvWTCGclYy}e&2)`HEQ>CJ8Tc~Sq-uU{hGrI zkN=PEEBzwb{;$bcr2Zk-U}p(jndw z2AoV}OxzZd$JSH&3A+7;BHk{L{OCRZwDS@Jk}R^4c0*NB$)c^&5(vkS|UPwmf)UE3c3l z#Xr5l-iAYAk^Wt(TEssqTF+qO(4-S4$a^aS=xau72xGAwAGv>>zJW0?wl6JR^KJ?6 z6c>1M?%!6;D%8wr^4d+5lp0FrWI0rpu8pAV5Ib-Kg&d5iL-r^jzwO9)v2?4fsc5Yi z&edW#ikx;DEZ}{aOPyvVoj-o};GwZV$bGj1lD+lPf)|E;ic5peR3%zFfJXoU-NMa2 z4uZ+}r*n@MLlUV6>sNj#-Ia97%~B8Q@3hPERptz_DXAzaTGq^`rIB=TUY6g{kUGY5 zi8Izg@=}7`ouUY4*me^#j>N>Sb_RMf8fP!+M55X`Yh5_U*R*({G~&=dg>u=6PQj}3 zN}y175%t+vI?);ZAnL{3){a8$dpa(wrL=Bb_P>kQ8N1<|Nh()!dkz5DiVzS8K+gf{ z4q^{gg3R@UYtZ3aSkRZu`}mIFOj5587lQc6D31tzMV~z2__=fqT8kirGD&Sy|z@{l+moHc~v+%Klk@YkdrXc@Z5e@07)bP2hPx=3E$u*2_F}GJRUT zcXlIqog)5zfy8A?r7Rf!xQfk1LAKEcD0KPznFWL9O+f)*ZLbJWnWv~R%Ow|=qJKGa zx2w^%!;>9<{(~Ev#yr05rOmw0u8sSVxt%?UY?4 zKfic#b0#V4Ul3!%k?Us>yd(DalNhn|(MnfTn=uE6MbG~zh3h8+JEid`i}g4W-;R&5 zi-;_6X@HzY2q&4Li3taBT;H3gB5rHGpd-7zeQFV_sS+XwFzPh1DCP(%D;L z^JQZfTdhd{H4HoKcS>F9OsQxG70s_bX-Q~RDD|=+DR z^`^d6ice6;U#$lv>B8Shv0~{GKE`9=R(aof^(!yMuWdy#3V);hYQ!ewa{WvL$s5-+ zl}9&jbW>+bIKj<(Vy+(U3afFZW~eKV`F#Zyi6OA z+Q@WyX&d#;QClkOPN0e}mtrt_lt{ZgScL>OlNLHj*N3X@seE4FJo zhs}S-^F&1TB4B&00|MDuM}`p7AtY+#(#nlLrZnsCw7Z2GE;vT|^MNxk*H7dB%#C;rP7 z6&0Z23Alvgwo`6&Z3mqwfa!`33rk7Xuh{f%qDGKe7|mj^elD%1D;1;)g*kHMk)e@s zqBydY9tx78;u3^{B;6d&_^eR7LT7L~HI+B|J)?QU;!|8qx};ZN$k10fpHXldr}6XP z6)uE;h+b}jUXR!+eGl8N{rw`9TM4raR8U)>Dtf*a+7R|KS9r8ohleEKI?klp>E^O( zcD9&Z#ih9BQ95c{S8QL@vyvP=WKnwO12F_mXy|V|dmftJuyR_7FJcyTrPK?9nI0O? z&;ThO0_pwzfp_miiSg+r`iQjMB)8JN?)4w1erf~&t=igxc6LDcETOyWVhVvuNX;wl zO2fpn-cH6%rq84aS6_Gk$dP?;fzHcM!&h`$>?WG@W`s-N|j=DL!FKP*8ha-s`~`eXt>~jY^!f>UYMMG!NapOJzvDgj^NCb(n|Fe_BQ=% z1nPbZ3kxSFFS;5oLBZgoHLLVZ`8s;wY75kh&B4^u?mMgPg`$oVv`~mdD6HT5MK$X` z-6dKCPnN&H6Ze%SViIhWa9lWqpc@r46oncUlQBGnTLuM&f)0Z;7J&~twcB;zjPUMy z8U#87N>(sD<5e?0XK1qO%A*Gi!7grqn<5AeSFP;n3dx0_C9$9V<;my=mk0=E-k#cB z#p_pt`*Dp+qS7Z!r3UImuL;a&&x)P5y=v6TiPK|wvtCNZcU?)j6;9WmpWCz_Pn$A& z?pz)R*K4IX& zq=v(2`l%n%{q2`zB}@P{!T)3FtOBBH!!10dLnuQJAl;pU(u{O>cXyYx2uO-_NlQyR zba!{BfOJVWoc*73xZrLSbnh?LyVkR^{ymJbjb<68UohE45d*IE;nH(WIU@Y&&MKj#`e?w3I zUtsG6BF*H_8|1(oZhWZf#PPV!8`)tfHvo11WJ;R+g z8WS_Jv~)1Fd9ZTySfW-0R4D_dRKT4pC&%>m4MuQP>vLfcCrG(m*OAwbEhPg1L|Hn2 z7)kXp`UlgLvD`F(=L8{`1|jr7Q8AT~kuix-5)u1AFwqVO2RIewb8TOj06_)?2v+Cd z8wNFoM1nt%Z2VF!NtOi;wiaR#9)+t1e<1Dh_RXs=V%g)Br3ZS2>&?+$V=c4Ww%)JE zB)d(|ph+Sm>$B!Q_dhi2;;V>45G5HoS@YFdcZp~9%YW;zR%ppBTGm=FUXLa@zW;H! z+?SYMxHva=J(AK6peFpzxM>yxpzB9%ggQM?fK@^ z<;Vy?ItCOGoAeg{|1-7vnLA3XmIB+ve-j1Zun2I9cgHfDUAKpB+F$&mT>u)w^V6$l z{^sCL5CO7mb>2Xyv9-1J07tYwTYXbgO%+GBVkJ8pTPD9p-qSe~L5T8~=vyKppq~)PeE` zhGx)bdWMHE1Ps$I0lbo5AWL;I97Ef#v$nt`8YId?OxttqU{W;h%HSz>oNSLoAEo`;5rh=R3N_01R5hrov;Wjk->nd3z(F6 zhynKsF!BKGY!$!s7%?2R$yQ5_0gqStAz+;Y+Ba)h+~_sOMxnn!9Fhvlzere$-wV76 zX8E+0t0;rv)f-8Y1Z&bAJRD880Vd!lq~`#i0~Hbi&9A>kS?63|DZV&;)rK3O zX%HcT0$HBpaR6>|?bmkek8|~HR|hj1({&R}lthHiOH zV_I59wvnCzknaAGo{kj`L0%O;QpN>(Q`pEL80y-THY3V%Kt9y04_W|%eRkNg zRs_80(tz1-yQa2wI*-97edp!@u(f&}FBJf4=#*%t8ZhH5)2gf}FE1%gmo4B2uwA;k ze!}aD=(=xq^J8@StcBF{-+yrW)$lHR6TO(NlEwFTP@%V3mb8_2WrmMN<*wabQrA?0 zeK%^$xQk=;C(JsppO8KPl?JJz5Do93|6PELEe=MM%+$QQD%D3GHa`@QnZr6GMC%WTGn z14j<5e#ebOfWl`uwm0{U1#ChA@!lMy_vrRM3nf%!k+&$@9*lt_k*>0Su2NFLSLp(O z$bG$RBFx9X38vI&MW8${NvD(fgDVr-oOHLJY27o$pCOJB$c;!nz9O1D{%C)?_Pr>i&B@N6zv(^f{WeyUg~t4qE_^7V?El=$cd6j| z=ftr(j=q7@n^%Ycsic!V@NW3<@DO;MQrHYXcpmL31>Pr!Qwa;U*jZa^DJjJk+C+24 z_HVnE08!-miq%m|H-CTs024BfgPT_rArPt|>)H~#^qu#djFC98{U~AsmbFb4Wu><1 zg%rlI{YcO+VAC+7HC;L&(w;R4%0rMP z-9`cX!0&raY>|i|shqYtn$_A&1W~)=M#bt(fD5NbY61_CK24;m%hj1gUhc+M9ld}D z3`nMPr2(rDo&=kxi|`brK(eGQzykiBe$a7|RC!fw6n93@WR%dQ85T*8o~AIOJGv_s zI;lUswkY<;*%6K@4qjZ^UQ(PHU}!Dzd5sJR$tLP$c#QG=p`2^N!koh>%jVi1Xr;l} z$Pm?hQ#=gIRvE^ic&1N?HRE3($ce|&%t4EmjmGNC$&LstXpZd%HyBh)=^w&vlFCA) zgTqP;x$P9fEEdUe@XuCh(+891IF%1lpJy z6*gjzOfi*fDH;XjWJeO@#+G<01X1aH(?ebe-XQDJ-Kb@OB2iDN)cL(d3OWLiwFn22 z*eY{TD7CprL2;YOEy9DxU0`7HqPZ!)23)+0vIBHA|Kd848ENaUGDpLY9LfGCMI4y(E!Ooywf_q;>pRwyfIvhbuyGL4GWv$sd8MPk57hxE1ZpwiQfUI_ zLS07>F#k5q6coh&ToxUdnMChS#aS6~Vuc9~R%XDX2&vYNRs{pUsa&uPePfP=si}Br8k~rR{;f}6uey3Q$6jU36|UN+!sYS7 z+AeaXT30=Sacdl&gOPN`m~lb)v74{UaCU;3cE5qlSiq^+rn6-uCEQ@Aur_o`^B$H0 zH>+{p?XlZV>=dPsn4kfujV8U)_H=Ty6V1#rErqFuYe5M{@Ahw%c}67g@B;2@%#(}y zJ_`wVwy!!@eM8~_KBbxriv$SD9l8_C&FnLar41S(AP3DMv!Y?zQT&fSmen;igDb3) zzT`2U;+#Is=}6eMwlVO<%@OfWLUy=8i8=MuCYI+Rj;9Aew1;+9-@&4ggWG^vlL!Qf zG%cQ;e}{KupI~t`qB-;HAT9K9#@ZrZnh^;^)mzlp9Q>tw%^K1z>AzuX14sUndDiSH z6OJeveWd!yv!$xRevh{&81Lj#T!IpDd3;&wf8A)U=$4jCKC0}W;g;ym7mm)KmGzynjhhwX-Gg0Bt2?XmwxfF zUVpowl1Y4WbXwSUjX*T^glp=uC9(`IW24_$sCfIbe}w32avL$SrI4i4&k+1;gp{bu z?YqK1&s12?(3@gbUIm2q%+(f{uKkQ;p4fK#>_Dgm22om;=&msk&NCPIedI%MWz1?| z|Nak!D!mjP+@@tPEr1NBq?9bs)M1?U|7^WdFaQoHef1OI?uV~-I*k3F2Ba3^Ot0Vb zO|6J!7#Jb}uEv{HFC*b>;7bJX&h$nR8u?$%Dl`1{ogV;>?>@c{{=$_e&;nPPM4BoM zgUG$66W2zNah*b8QPKHb3p+%bEOa_LTzF!P6VL_`iM@Ffgnb4!_Dzy^98Te|`TuF`4NCijLv|h~LE>pxC`7 zC&xm51k?|}@j7U(8c)+f825wZ$P9``mCPA0SW2IF*Tv>EgDd^1)ljDgqAUr7cPwV) zVF;(HKbD0f-gAc++K$wf9S+x3aCiI}jdO&A)Fm0|2kuMJ-gK{Z2C)7?jYb6L%(57H zyPUzeQIXoR94(WXw2ZRuH)<82$}kfhQ%jT9ns@{9XV|({+uK7~85~nf1EV&Mm#tHC zg2(~fNtmH^B9O|pBP`-?zfJl(kkik;C|TmZv3)n3T!x=|w|!u%6CNF&=@q(chKLw_ zxb!e z?q+xHrZZT5m$M#Sm!`U@xDKWI2Qfb|iF|A!LvDf7k`LCx6{v zroj`28+JGTb+)1xdVYH6dtNl$ucXD)F}uXJB^hAK!Yo#%n`8POdyUX3%H-D}v)N%~ zCaYFxN06!^3o`mNB<}tS*@8F_-Zb%oVxR zPwlrc&Lb#ye?9DS6S;;XCG%qi@Av*y7;KrD0mo58g(592EsVPslT^LIJp|)uo#-{9 zW;+O4v7v#8P{4jtFB~<#z+qn2Dnk$QH;CP{*lWHs*<8I!u3%ORpsw0WwfWrmWBu-- z!m066?FXo&1}x>R!k!zA1@}m=zJ>S+^Kod-7iG2jSp-W2Znu3Umqvb~qFG+WF_kAt zVy@j)+KzUf&lLfCQ&tlNff0x89sg>s{~#7>udY@da+=FP?Is(|xcu6bu!?ngc^Vn^ zrN+7cgiI7%qG6<)aeul?=(+y1r72 zHr^U$^B3c8bYy2ZApvk_0|*XlYlvCrv8O;D&5Eph=v#7)zGCVW{u;(yV?B{jfzuU5 z`g)Hzr~RUN>0$wqqn|JNcofKoSHP4p%tA*o9dQ>tT6l8v3Mb-L%8^G$6IqNRL^_@( zG7qSDb?_BbG{BY&&jfJvX(NlZR!BhHPf7~VOBnE`coZNZnHW1KWk0-Z)ggn1TE%=T zbEKFwD{DEWw!uN9>&{>(oLKUm-XQDte39#?FPv);@4Y~5Da;T|FjO;TcT{!|KLRP* zQPt!}dGwO8@BDGP6el7CF$95o^k`(M+UnwW;GR~%2hIt`sGb4&%J#?dGWPbG$#;9` zQZUZNq0x%RzwKk0?DXybMz0@E(0U%_*=GvB#XR-arx<>ZJ5oJ8`3RzkUiOlq~y%w?_!- zkfxcPR2yc0Vo4~BV}EXa{`zJBCod@}hA*(mZ*%2xUAcTamdb>W$F(3Lfh6I=`k&oH z`(1P)+C(WHduzk(5c7UMd+7CB*DK2KDqMxv6kWk-&HpZXwUqX3wfgzmC)u{`qtV;zXG0bO zWg2_nuk8AJFf$(ICXUui*5Ye4Kn>eGatAol?`nHvB1SE_3I&uGXU>Dk#>s9%8A z6+dVaF5n>y&i@DW0DyxNls)M0g4}4LIYBx4Y({NK{f zr6qF-3b>!c^U%^l*y*224?#bgDKbI`;z%W`f9rl)L;^W#l~^teyQh!h>O8BH%3nw(DAS$Vt&&@Ylr9G-2u zS}hOcC;pq^>y;cs#vK{R{5Eq|iu(GkEq?-?q1daA>h@8_O4_eO@rfw^bU%<2`W=5( zm_8(=tO}|PMZ^wDOiZcK8Yi)p+nd03cqPfBU<~?tEn=$08&hh?H2}Xs1xKz-W%(Qp zmwe_8;S64z__qqbD>xsVwQN+hUvx}#=ng7XdMyDD!hkWndFz-GoR#z=6%!;&wCVp? z7sEo|CHeg)a34Sqt$6J}uO7;|bLKr~@A}Gshv2Dm=F0=wvrh&g|2!51)MHJcWa$6I zUdfi3nr4d;mhGsp!smJmq!lN7C$+kBQ+*rOsW~%148}_hHSc}g9SdmcpYDZjPS6_h zzzk+!N*Rc{wSM}C8&xji?+^DaT)*r62PnmX|6?@0VZo3z&PKuKqk;>koekWt?dMS+ z{Z|8G^?JY@%UopSWFG>qde-IHgac(D$QvY2q{OCw?;AC-#Rmoh;P14!hw9vFVd0d) zAm!_C=H(D72GQF&flLLWkngpomeE|-bxN%s3_CyGP+0@T_Bbfflkd2NLk3FRUe(LF za7xw_c0#C-DT7A>VwquJ#3m03z!X+rw)_F!J5WTu&GLUQ*&C2oVZ}Y~7msCDKqm4Q zB#Y_=T%A@{I}4iM)Ow&Cm>-kzrDRs_@nAk`Kt!qQ;J`L8@Z}4ZFrdT%IZ(hL$2yE7 z5yy#&T{>^eJk9+sTw0ZeXeeJw)Q1PMga9~NEb`bpe}GF^O;s8o zp^~dJJsc`YtH*9-w{2hk!-n@TY_f_qK^YOcQt@$YW5w}G3a?ka)M9>MVEck_%F5^l z?j^DEP|cvS+{8FN>3d4vQSQD3i411m`&RI$*KJrNG-i>~NUoWgAPGr`^~Q@*1zSDh zuUyPkfe(Ct8kxIYv+jJP5J*mdJA(~I=Cr0_Oh<7$gT%z-yj9C>J7Q@OU_H=ANh@bM z%CbhTN-g^m`h5q5L8_|Ps1(Wh$8B?LH#3q6IN`YlqchWGg`MJ`(@5MFe~&`H06hz| z%XK!aTEs>Zrzc73^*G_fU9(+xR`h>=v;)o3vLyl8%~tJYB&HG)6Q;{lVB?;lO7)Z) zSi|`IY(b-js6q#WYpCFHYun%1KJ;am7PDx|PS$2!zj7I|j%Y3l>Ac}0AAiIFh$#|- z^yi;M1!Ldw$@zg-4qwty7u>6{-tBe#?t`PuRo-k zLtrW~;KpOrwfhwade!lYv;@=}v{l7tu~_WZhFa>qC*Hq5Z$rZ9XV=$BwMIkUL?RUq zK|pMbxtq7L+VT_=Ty30r#Jguk(t@LUHILF;4J2L%|4^=~wP> zRZjURglLKSP4wnfHUt$(_k2xm7DS15xr9I0OvErrnbqPX4>=8eHl29RDu;aKlD`KE z35YVPE$ITqB}}ku(*4%qJ8LEEjsbWQLm(5DAlJph(+ zU^Fo{I{xZoXFqu$%iC|tKxYk*g{*Thn8%l>b^w?w_Xo3O0omi*liyyB?tD*#6J-k+ z3cxDh%skck`;xYINP2ZbtxdTDw%Tgqz$`NaThu|y!Xlar1pN|?fC@pv0~H>|xci_` z5=2QX)OE)JG!Ki#I-z&(@(*{;&(AT~TP_EvvHG{F?WS4+CZ2GSqfub;z@$M2z-)$X zcs46cyFZ%XOK9RHaYl01p^f6pl@4#K(hl2Jl(Q))jPrOr4a8CBF`S2@@MSVu1R+Qg ziOv-BoVyV<87K4F{`Z1A%*i&bRmo>oIYgnibZ2ICHi@g{*UpJ3=qJ4OMiOEzt zrb&lE)qsx+H^Yh-9nlq!97M@zLDUhhVhc?~=%7gcQX|(5RUY;^l@FWXjgqj%s2Qej z#hAIyQNKsQ>@E3KJQ+Z$42bSesD*M+g~sm@*yYUpo8+&Au9ok zyV$&C%b^iaW00wYx?emUmn;+|NDP&-2TBP7j9|B8B|KuLuxBzq6 z-7N~g$O?Seg!SH{Jy}<|U~Dv}0iQZRLXXT7Wz_rM9s+fZtCMD|K3p^vJo{x=)%>9v zOpqOK2DsC0cT9^Z3F*}vfd0~cKEw<(#sMkU67ua^Z_Oc%X|7g7EEg8#kRQK1F*(40 zZhN{y_ZyX2CI~RRq_$QEA?A2osEiVcICV)x#&WRG9NqH5>BCOrfn!g#AwXHpvxS%1 z=;lxUnzaQ}f_qJ?y)XAByT6_~I61Y8MF3e1dJz_os0yA`F%YEy?!(Fq#ic+V5k>4b z5KS=0RW9L?q_?^`B|WpH5|>XF1^80D8H$Xo!eqkZq6ot^LD(Qnj*y%ZEBdKM$^67= zg-o0bPE=%xczprSS0QwK^VJZ;{ zLPY-Yir*M-<%a`J!g&>zuOwt&9b|rR}1-zYAQ@jh4@6P{6tP zDWbARw_?>7V2MS8kg-Gl5#R8G0sD{G<+sNu#4en!Q`T56^d4iWzY__n9Ce?zZ9S-0 z7g9^uqIV_P8=7zE$Zlb_TJ;QosQ-r4-LYN(2aP#hw^)NIrb%k{B$di+nNoV$PP*@x zZN4h6eUO9(fkX#`MJfB$zlj0Z+27EZMy(xP)L^9hr5B7VEj3>XGRm`rgyWcD~f@4N2n#-XNb_N+4xTEdGe zpGE&s7_zKeTR-rrKM46gITAY>+s47^QerOtu{x1y=CJqda;o4`a?IRlefG4= zo|^kbvQL-d;KZCIn9>X+rd(#hJ|6SrU3fptN{vUo%%H>;)6T)^nQPtir8<>~horY8 z23|etm$}IfkfW%kNP4S7&$P6LNXy#*1Bp+9$Ch!k(iu{CU1Ss0z%O~n?SXi8spysgUdyRua_9~}~i0&%2Rop%i?;5KqZc0~HS zeCTNCZ>^SOkwP^yo;ya4{$P<7!C1MK!{n2=oalxxQ#Q7>zu9?=mZ5e@i&7VavTnC} z{kA#qvMl!Xr~027g}VewYL>^x`-LdP%tR$I@zudlJwbgEsghb{QmwIk=0rUZ2Zg$#wV<;0pw0%A_G}L;tQrlEBl$!4i zgY7q7>{7)^Q~cq>tko^&A>oHa-8NvUp{uU9FqGZb&z`V<>P9N%lMW>)ewM`<+lXS~ybVt){5G)%@cT2r5Gh4h{Gd0`L91>G$dNnY$l-8qYCAA2pJu(O_$iF;%xrx zvg|1gfvF^{pfeLXO3MUkPnZYEFWWECeU@HnVv=N}4B_v@1<9bR(sC}6wVm4ezw-U; z!13p2l}WU!EJLEY4(?|M2zsxx(s}js?Nd_Uj;_CdBbOVe+rIt%ZlD5UYnw9p$K{PR zWe87Y4$gFOMpI`UvyYf`$>i??1=AX@q+6ftLU;s9wE#B%0`b~QLzdF~R2jUWdK81c z#wtM7K{jhH*?0n5yP?a~N(_8<*D@#$oBQ3nvLnmMS6oL49z1SSOXQ4Nc)d&jcG;ot zJF=|x4zM(3nV-Sw{HELy1ENgCscyBnjcb#?Kc9&hjB5S}`}FuIt|qX;=lwH(Ha5+I z`?{`*)>Z|o@Fu`Ti{JnLydZ5df6{zdx$L%rF@7;dA3k9Wo57E^$-5}_-t|zWV_plH z-|oBH_N%tXeJz0VPp=;vlBT5ed^FUHi{d<=^(w&aCp!yOew>idrxW_E+Ccv$S;lhG&t6Y(D~@bNfk@# z)lo^r#kj)Q^ES~0!JE_9Wgx`v(u$zsG98WEi`72jJ3aCcH=mS8it;;P-GJx zjU=T2NP!vk!a|#Cv3&v0h{bEoi7*c^z`%kzS$eC zQU6)l$+ML{S2U?1F{KOxVzgKyg(yU9xds#xNreR3+1ZID6#yJGC(WmA+JbcdC0j(T z9ERSGYl4FEKM3Fc`Lf}h3h9Chc%A42Xoo+4{;VA7S8HE-uR!Ac*o?|%0scYipgk~K zB@UxRkGIh+1lsg~F8N7<3(`G1BO%{GoW!@BM#5fqgyaIh5Ky7CAWl?-g`pf-Mo}<8 zGBk-^URnY+e=*rKi^WmP>C>D2rZ<$B$cVYh3<-l2A+NjNdmd@mF1{ruUSjI=@VC(b z#5>NA@0_I!vOL6a2fcY9ZKDAI)q8-%6kq{@4Q#2FXdK%p>?iA#Cu&IafQiCXO4M;8 zkjXK#jR4r7W>;I*S5zEBh81UjsOA3bth@({Ry_zQs3`8w%5$4MV@Oqb`Eh-~jhBDG zi=4>A;t__(kfAUT3L$?B)qFIe0YfMybzB_danL7Yyu470CG0af|GYQnl1^LYVUoxg z!lW|hyPR#j!q5`k^^fReV_iI!^_03^N#OrYzWcJ_YE#gp1OEEORxoTpM@X;Nv7LA- z$c8%{QV1{E%1}Fezk*A)7boHk;Q~P@;?w2PHb3QmcOWRs8O2~ltOAz@tBk%o&^Ukv zaSRe_bOqEILxGC>XvPtm5HXes$vQ5PrVp(w?r&u=n^eqNc|K?|V>{foiCgvTqCNQD z^j@#xTcauY9ky@p?q2#lywfF0hB30{YFr$`FAIfcC@~OGpwx2X6`Fj!9^J3bUhy#@ zLl7~o^Z8vGQ%g2`lx@uyGS8dz!WKHaJHLh8cDL(llU3rA1hfaEoHTguqEXfzpLs9q z>Iw((9CjRS_Rk!txZw+105ZJ(Jcld|`yMAdB zlS0BB;&Js%j@AC&(ulps0O3RnB$1@IQQl_^{blt!Ib5!Er}TqCeWJ3FAb!-XItY)r}D3d`j?xR+kUJ%$8{asL@g!%oAsC2mmPYAMufB3GZnEgFS$_(2no%qt<{{e1V3@50nL&} zpFxX2vaya=SU3zK-`9l|eaTv3x#K#EUPVVhu%o#Ywq&MQs6pSB$hi~p$afF}hid?S z)0>Ay%~{R8h7$B(X)v|~@N>-_dQ9_h_JI+)ITSPb3l95My~zCn12trDNK-Uq)3)-N zfLWMt`Y0XHOECce<_s&5K*bi3LE4fA;)vbSJ>guEV1PfJs2Xy+bmi7I6B-;+4f^*L zk`4iY4~#JY8-N3#>pD8vJuJ<)ug%SQZjSQx0r9K(?@WBR)8l4@(cV;TasZBpcVzN@s_CNhWRHgsXD5Y*eb ztDr*Oc=lbBvb6fq`ff_F<`jeph#M0~Dpj_)u(bc~gHd{S6EnajR(=e%xU^NUMzlHq zdKbijEZJSE9?c);kJih2fp+kX)DhA(R#CA^-E37<7{KdeVx%{3T%*vH3UT!%hykGo z@i3+Fv9||ki0e@jfy?4Ir+??R`C?s03oa~Xsh)jPw8lL+r+ef6`qy9HXH!?&*??0s z|JEmALeoqBHs@#vRg>AP2g&52t6eS0mjhF&%{C!1@MIm`5XhP82jDZBxEW0D1_7if zREj?6vER}i8%S)yI&mtrhQjw=b&voBcR3GJl*G2Q{T;EB%jjtNFMHmHS1o62+W%~A zj*nFD37c_+vH!|tL^amLw7%lkTS4TI?Y&Yhk(354FOI;naQZ+!hz{iX<9p=|o)~dCdLynGAVxGROH6R4zo~g2J*8)|CJpvR^RB8V%0Ln>c z0U~PE2eW>;EAH|CAhkL}R<(`>XDC8kh|xS{N$HLVV2Rl+oa4%Og@NC;s!z9+oBDJ3h@VAia8N)l}Ix{&YEBTkSO6X1NP~@+s$)V zpL|+u4T|87K9DLZW3FOm2+)u)ynWKRYq)C*G#xkUC)1#rVS?m)5~90&%F&>uv&10_ zWU(0#pPdn+;TnM1QD#Hzoibl~QN|#hM=>0a4pqljz_HNL%t7SDDg7-eqo$z&0)DOB zL`_?O%yDpa2dzS@CQ(4A6qja5VXku+t{Lg?-t~;bAcEV*jPx{>04xLm<4#lTnloD@ z^PT5Pa?zTH1qOm?pOpcoThXZj%_;8CQWOR)%@1Bcg-5iDoLtoMG0^UkmGwj4K!*)K zs;NRv7P||xD&{KY&h*=ww4<>k0v>W8RfAJ@ogd3nS3R+t5L>KxI)1A@P8Vlx4~UaU zGf~>t+Pm2Kab+{sc00>ZOEmw=-Xox8ME7I&!_(P!3MP&ZGuI0VqlAi%E$3P>p%+=& zkhFrOM{CErUzfsoM@KU}arkI(_bEKa+*9^nR8CSySzj>^ueuhZ65w!UCz4`^8D6d7 z415rT3g|FH#wS{<#~8vujFetdmA(Ns z7b{p&SKFe#s-j_SoJLtjVXUw8ZZJ#Iwd*|c4m*BB%`iV9K9I}pT*Z_uZj-<0YTDGIOZtPLegEEYylX+tPSXV*>@MzkcA6a$;}p?hlT*7%xo zZnX+4@$cQ+=PO0CkpC!#LV!;6>yu@#{mtItt*vpx56oz$Ml2E7beFu+Wtbd@-w)oL z9xS7?RkO>@yaiGlAqCe@0ljx>dtCJnu=c!GH1|9?n zpx@)G#~yso%L?3_423*$Sf8t6^X!sswZ8*RUp4?2hSwpo0?mF+cM>?RQ`G4=9q^l! z>dpQ@yLGtW<`}gsm(dW5@#R6|kR@`9>@gY4_ra+VRAJX}$MyCjozz~o-PyeFviBc% zWH$gLkMug?y#>N66VWTne9ig8*`=xb8LZKFz-Nb)RjG{_DOlv{6v4F7lne~Pc&Zja zUj`o4vu66Ll|adFvI!UTIF<6~ z@i1ldzVt3N+@HUk>W#B%lpLKr!vinrZ{XqZ62j-j_J{qUJwTBMnVsB;JQ&dhK|6snp6}evz_#}$-%kRwh<*rgu{$1S4%RuEy{1^D=E}?VH2sNpM5($XQwZ> z1sEw6(Tc3*!wR4t=lPC_e`^n9{s;R~SUsyaArNSW)XQ?Vh?h$z&UT#9V>8w;_xQ_Y zD4)^~G9xEs0m(NamzR}~_bI252TsqIuf~OM&NFt4&=uf^e*k|;zpPa3DiW0Hi5UWNP3=x^O35plYCF zAmn$;LCI^zLJ$QUT?aX$x0CP{)XHX=D1TR0p6Zrma2DFys?BFMaA0Z=9KP}K)mzrP z=rt|So{^xTVlYLfbWIM9u7<fphkgteAb$VPTWWG(tmrUmt0^xctE$Z3s8~o-`&Z*5TL=o}3|Zj6VS-PT z?eUdkvAp=$3t<+exeq5wqzi`_jg&|Wq!^@+uoPAcI;)r~B4&0m+jNhE)d%m#i8yJc zCZfSb;P3(i4V$oMfFwzj@6=3#3h?JA3rca4{Yp#0w@4UtoU#_Z>+5qj)*nOyd@gtM z^CzE`zkvGjV>nSIST|ocsFbL({(3heoPwJA8tN=)jv!9e2hI1XtIl{PwzQNto@oCDk5m&D^%|0z0`1gL3f=rYj z0_hTGx#LMOnhj6*oQ43_h}}61-*jd;z=sq7!bmQKVG$aIc%4R?L7I<)?WSrStP0P) z5h6VzHX&urQpq~mMk5JVd9)uleDc1-i!DvgcYBEP$LIlzrt(Hli|zM> zFa1X3%RKA+p(@%<&hGnNgKAb4iO<{Z&zYXhoFV~dM}M0xv}$WU`1s%keKQCg|Bz#l z-sJiS-@<1Y)jW=B#MrgEzzov$R3Ln(o*8&8@^~2da7;LNu+04Re5g@NlP%L{>r(wb z`|*tLhDHzq108M7Oa|VyzqAK9yFRC%+HUVuwh4=Po?OnpeO`awNbv;nXzzF>*;~&J z?&t^z6}&C?frj>9y&zByN8m=0+y!^Wp-~L0->wrt=o2s+{2_92e!gJ7rulq!DoEyr z7EK&g6nO9S();4!&SrOtiCg>IeS2nVwzgkMN$Yv{WnYW@*bB`s=0D?Twi!UAl3CDYhIFAk>`f3Pxp=n__?g@tD+quB3Pxf=php|_RDpQ7K+3=ul{Qxgh*gh z{{8*s!meBsC?|I08|S+GK%hxfd+6g>I9TWBTU9Sayp~^#Gi)fM)Y{lor^@F5qC`$a zkQqv0@S#XTx9d!$i!u8|!=~Y|AOz0QH1m4zx~@AdsOFwIkN zSe(9-?+_6%7*AoGO7#BP-Yms6@87CH2_eKrk)B?{au>vX%`!6UhO$hd>^eBf8|YQJ zqk?Uk2mwh@&LsG-{5{UB_&MzZNBho#8sCLy1=qEyF5~k^`a5|n@ zqOlU!?-x$iMYZ(oGW@aye4t^q1c!FE21TEXE}APyz1+=7uDExePtVYx78B7Tme}s5 z&ZB`^SQERP`(=|O{73L}Ip<@Jb)Y{CD zFf8Eu=FnC#tBwCH*}J&9u7Rt~b?KMW4VPjP6(HM?`o>o%GWl4v6j*S=z7wAx2jmE} zX~lfP{;B-t9uL(seS^rK7QNAM%AVyysJ*>Z4g!GOOTUkQQ!F9DxR zdad!ao&Y~JSezUGruFP7!)RW@DQC z&0@5vDu_AbgzaTcH-vL3f$zQcix(uy6b}3}`k={rum}Q&eOr&MDWB^T=dWPw;0_*= zMp;I+`o~V|E>;$&3i*_SIy`7Zm;n^@aQtl*NWsv^O;A|s@a@k_C31(`%JJ=kt=z{{_`yla3 z`j#T0tCi5J&=pkLLuL(wE4rILBq~r4_G%3#g`{b#kj5treEmFC z8DO?En*GDj8WaShmdaYlyrc{J!o~cp?ff**)plBZ7W>nFyYJmu;9Zc{X=`jiu6et=-wpiqlyBAN)b|M6A@g@F!W6xZT(BRH%?LH2Q&HjZEKdggthJ5GW^yA^Bg)If!LgS6e$A zXpXVAFIq{a0N0HI+}Uj3lg5XWW}wxsl^UazCL)gk2bMm%Gc$A9(<2TPsk2~!f*__r z`~|C-e9Oy6EmQWq2p~X40rVm&6TrH#d%%Q4VH|{wZr+=X`TOz?CC5q8(k-n}CWnEl zzqBU9GJrSx5RIl87WY~EQQe2~qkR@eklU-WvOnqSOqd`TI2YM3oM{@9qmLpv?)fTJh`$ueW6Q1KVYI1w8?DJSsuxq~A= z5iuMh*--~rc4UFw``qUh)*UBBp_s!tLIO>7?w*U+$%XrAQHW?X0xgIl9NT<`H$7TR z5-mvkUs2aa-9KSj@E0ic0JOTKHJ?jGl2Y|CtL@`j=f{~Egdhkdm;o*?%K}0!_a9gd z1yRp%r0NW1eJYTo`rP?+w&B}N4F;h~zvFIGsXUmOE%Z@GWuVZGxUnvWa=g4CQ|)9A zfct!!CBm69D)`~Imy?~ zy11IQjhU8bzS_c$Q#mBoNk9-~RK|CjDB|HejJkvMI=-efM+$@dPLHu?Y@gz2DK}}S zGM1EBoW_YhJ^R+$ChR|CKSIP1CyN>Ol!}94tZ)(9;`!2SxTBEMY^TCySwYV`M>Y`?Z7bOI4tw*^PKyf>zwnu z`0OufgbX;^ss5UkS5#ym()!V2z6^ff{sO~B{nu|@EDu}R(mJx8Ki=R-M@m+NP_Z8+ zo4IGrvFU`vEdG8}suyFKomG$O%UvHb6H;hRNQm@_LJh@0>LXNR;JExsRNrLFM&Lr1 zZG-0T?#e`fTyg`dl}YJkk%`^qVwX&I>++f^MXxB~Pr-L%!Z!_X_FshT$I6p>24{eb zW~muO8jd_d0*GR5(?Q++(u+M52*jYDJad4Q8!&bo0Wjdj^8oyC0|ieVd$a-O?8e2c zM*rVfcEY!*zBB7AO+NO6+!ZyS*kbXZs zpUL5uvt3+rTjzT`5ZP)1=?tko%AUXDApLEbtN4o)0F!>1jH$~#hI~S-=XdOp92f)` z=-(TN{;td81EC@)NXrXn-wEi9$A~c~x$gilkNCQzHfsPv#*v%Acz0N2)K*n{ElWAg z#xFP)pRf1ppl|Gtso%LBP#J5VRT+B%9x#V_|D5mr?N=IOBrcK&a@|p>;93L%_@V&h zjLmdz9F9>z3jCLr$~eYwg`8LnEi{_g0HV15g_Lx=&#a-TNgoSUvR486~r>j$7@#A%2XJxfrDHp(2#$2FP#K&h% z6a8xxUgCnr@w1VmdJbXG4S8Q(#T@gwMULI}QuPd=tQu<^hT@WZy;7^z?5R~D6% zu3mExndjqLSMe*a1)IXanBppXtf~s^?5)Okz0NaZ2*zRg(p8o8W-N3)`+>{X#;YGRLiy%g0&u!;>#A+fJ@QZH^W<+R!*9-J(e|a< zZooL;+}k+a`@zf1dV3^Q49*eng=FAE_egb`doDWDPnjMeiqbWvvW=TPtga_d=ZoN> zMt)q>elm5kJ^H2>KM@t&hdg;vzP3{LIeC0%m48T+y3;$JnFeTZBguFR6h zzvFXX)yXHr)P77I01%Mv4zy!cJbOMi7WF@-)AR3M7D@4#5J1qIK}Td_G%PR0XCu<5 z^yN?L^+|E3Fs9Eu@Q-gl-w+>YqR9Ab&{V9JJSC#e9sGs&Z1ux(1n5*Yd9JG8HD})R zzkKN;bDHe7hbnBgDq!Jn7c zpWh-wK2h5q_MR*)F+GATj)*DjoGT5pK_DX1JHl>^?n{Bt-*#Yo#(6~qf>;i2y3l_3 z4%bS+2-PbXBOP_{amaV__E-7X_F|R`&ygFOmWyB?^E1L3Vz4pZh-={^ltIiC1)u;(Dl7npz!u|fBm3+d3~Ni)WUJ2 z()tL9ork_4hbU%GDymAT#j^~kEd0u~WjUd8@PcCd+1`#ut5^5V71gUlkUJ;j{pd{M z=yAJVz7U!iob!ilecA_`uQIea59WMtlY86SKLGzcTU*z<*#uLSKU)!62pxOG4A%Q7 z8N(D*YLhs&r^H{Lv-OlEBoa~wkdy0%zi&@__V?0@HCeW*qGANJL@TI7BQv9bCs)?w z8+&;R+AvC<*g8e(C2*nT^V28!0~Xx7G^_A>fdy)Jo^*&vC18|83~9G4pr6{=m(nl!e(iKTVr; zC9%8W%?9zC)#1_F=68yp1DHHM`8qCJJ(Da;@clzb`P{?LQGA7wVQKezN?FDH!~IFH zFVD=};M!ogCCu90P#~D+#ojdOSqGnPk-pmwop{o_1 zro$mO*w$*(bqS{Lu*Zbp*d4G~mdmHkz`x{STHKR{ak^L98>?5|| zE(Y;EYnG!a%smhhMs~Sa_Sa;XHYEC!pEnZ@IjOfjwmhW!CHD2+sa2ZKJfUmt*F3G6 zt#jD}gtJ@=KxvDUZX?w9Vkg5afTwhsfX^K5@#EUMI+mU?TwSZml$NY9%XrJm&mnQr z0BC`oUfUy|2-1aT&z=Ds03h~o%JJj>WjPe5=34NiKa7rPB~)67hlNyX^|6Hmw3DVzLVgV`s)(PqS&m{~PK2W9DW`;| zt;);URiozLL?7WhPu)Mu3F#}MWb4V`55Ecv(}6ahGN9N9JVxCwxO|s*GA7t}&L^M} z+h)?viGiva<+_P6d)E)sr-5_|Ik~Ieq);AqVqnuredXbF6UeAKtGY!y8|d$4Wo0QY zqU%|GyWab@Xk`knS)@z1o93v>rSDl`S|TI0@m8nmk>Bny_EE)ay z@qTZ(Cr0AYFF8FHzu{X@O*pLo$)K_C`&=@djWYE?mg-e;g7|qJr#j7BcQaGdQk|p{ zLORj>wRzy^L~)cH-|IkqfAud~*?mFfO9SgJc;zYlMlh$CqT4 z%*EF!`=Qn3*87&DbY*%W&+-ccTUcDrY?du{*T&YkOXRs(KEAVeH$w9`|CO>H0X70o zMDs=DhH3gYSc-~xTu)sQ&__y8!J^XkuQC|yF811#ZKbX&hy>WiZZb|dS`Tv zKxuMvi^N*<1Z&o&ulo3(`M>44Tr3m9YiFUrxT35?>khKgX8c@|F#)m z8Uzp(6=|=CAr9ZYI~wa;(mnQ8a)vd~1Ca$F^fSwJQ|dc%v**h1q@zqYQ2T4HOodS{khaO<~^l9BuvUSzB0n#A=jy*NFoXuR8PPEunn8tY< zol|h4ait;xwPB;)ebH;lI*osol+u)ZYgq7LcD%IWL8NF>w0Zm<9eZ`t1)SLElAIRh z`PjT!TAVXpVZ-A%RvG=PJp_rgEDIP_0W?K=)ySR1TYDQBLPaexDNf19r+5xDbtyQy znGL)Y;TKMyRUIn1Td>fhfAUxe*bu-KpQP~3Ev_#E!t zPEV1FR+g6BFHLX4_nTIKz5M4g5Ov?(jMikg*PG9y=e5?ArW;fW?UZXkuvCOrY=tDRW@o0bxz3S~8y6v#JWM zTwlYNs9N6utIS+-+Z!7uJ6?;?CT}N)hW-+9Jjcb>g{kZN&UYV4GZQ!h5-X++Id`(G z8kkE8dd$x?%G&sY@vB7&NS?uiHj8k~Ybk|=0$K)eUVQuoV0D5F&E6)1bUQg2d?g14 zK9bHK1`oNj?)a`PyBwI%0cMbP@37o+lwT1%6II7A)aOVW9|~;4J=SZQ8d`F2$J2!B z)YeiYv%}QFrE7nC!Q5}0YK^jJpP(@`pT0z=LG}Z`Zv%57jhL&SNbxCU8KxTwC#|`N z$t3CN#qze#LYiYgmhj2ZEGpz5BYvS}Am6Vazi5JP&`ZPGuto7CLdCNWgC2XriI5Tq z2=#q$PrFJ>3r57=i3M}~TYe$~>6AS#b0o)(?vB*>g7K>6>Ube&fzp*W#IS zvx-HI7MSZ~^|dA}myL{}Rg!*Z9_|=KNReCU%6;mLv(Rf2&LNQ5Y4fZErr=ihrYxN! zmO7rcjxW89S41>^b>}ZbKvZwyU{MRCzvs)o?RMUf5G^}9Q%?kh89~K z#_n?Z7J3MORW*KSn%Z^Lmg744aro0aC+7@$Gas0T{cR>Zw{zc{u(xo0JIFAlH+JIi zK!L(*CC}$@K1ZE?fXsWx_HIdgAdS^gFT=;s?XaLhcgRyngyx!*^h<<%QZc02ZeGrH zA7ctdzke&oi`W#ZWC9&!)aY5ZZS=Xa4$c1Qa@*^B+2HFIn@%0}+Bm_!&6lg^7dR3u zh&XH4=LX1Y?;V>CQZM5-4~I*tNZMTlxy@;KRhwX(xlas*$uu|OR<>lF#3wUbgLTRPKH6}(*7K<1gpTDoh3x&WvMm2 zvGiX8eY^B7e{@J_d;2Fbv7y354ygjIXs(E7x~I1l^pfq--XmWgOS5F_X{D{Mer6aV zLw{V2iGxZx8ff+=l3mWx@t0bW8cRDQ`dfj~v-}EmQTHyy5Xgwze1GIY-P|b>#1G#Q5o6Jv`~JJvO`ZPz z*fU%CerA3xip%x;yS!@2v|`2Kv+2*YJeOr(K+m%BE=O_^H3THuRU39SV~IDo%FK99c`b|os)U|B zp@xhU_sCZ5;Mx>9-NsiFu>6)g-AT3#h2Sj2eM~!uXl6aU^okv}!i|juW@x|y>py!g zUi0v2_rk9~4kFruTq5Bt-5vrk$Y0(()bTO3a)g(yZD!A==n5(}`bYs30v1`q*`;FM zt*5f1xnAt67N zrG9+tiWG=X#X!s;1sC_kK^)8yECZb?waSRlG_lD)*xhysLjD0)b1Q2t0edFzJz1}D zIsZsKnC1`r;+{HuYSn~t-0ZGWYBV}>UEPR?h^hpJn#aD(P^E9*YAuq(y`O7CcnZ_2 z8{2s~Z07xXrcjz_%9i6N2c~xeKjOUUXT`j|?aB^LjFG^(yD7fYJ^UT}y{%-=y}^nQ zRkSQ#x$PGke%q$gSNr|o=L01T7v@cZcSXnB8-s;~eFH4(a0o>C(|*hOj?H1yvJjpE z+jF)@ikTi;Z|=o*L*}Qw=A(qR@-D~q-JEkQEo6MJJ@1R|4#=m)YPEV~h0gmZ`w594 z7`sh*Z{Wyw)xwNQPYv+R6mI+W@7H-y_97`SDpM6rVsV5zQ4zko8lK2R>?PtTNob5qCo9glLqEtE;O4z5^L1FgB4&fL3=zgt+X! z>RbFCpxp1LEEUVBA!gnW>_PeWYJmMBjAE4TFA)?2=$dvlB_#u#eX2}cz>ZZ&sIjId zNtzB{Vvv(1UK$`9?B2CP5AD^{-+eB;(E7XUx$N_cIf70-H*9iK+M~gfnuS_otUhQ4Hk?jQcd7Brivjed5xYGpj@t{>$My>GooSDIzKoI z912*5@h;)xb5RnUa$)U81nWJQ){R;gPXmyWJ`vX})(hF}kNCYQYd}bzx>V5DVpdkt zI>j$bBA5Y}zqQsPoI(BThuKXBPl1fEZf-AY`TirO2~@<*{>tghGZ+L|s*Vm}~#{OxF$O4FoPsbRl5p)$|+c#r=aw7JhO z<_#5P@aBspeU}o~bA6Ww^z-q}^l#cT$$h7?HFBJ?9#aLQ?O%5^9c*&zBKsM1^w*tZ zH1eKWb`@s0A1T}~AKLQ&PSBC^JezA^xZFBSW@3(d&N7BJT`clv_4&E?nfd+V%GAD~ zFU)kc^~K$%qu=vII551o$z@2&-0)URxsEYS)@lpLYK+Z)YMo^K^e`!z<{R! z1@QMymM$wx>9g>urcrrxxwEq~tRU=te|s*OJy-;9mT2x>+lIrrxw+|aLV;@!@Et}V z5Fh~uGf#;UkA8edO+tM#hsJ>QKC?JKdA8(5B_ z)HaLo805dGT?8^?qFMhYCpTT{fH*n7Kd^orFKudLBR38zEqn=Hu*XV^Q@UQw zL#D}tfi!k6n>P#)dJPRP$uJ#Ps+1r!QiIYLJb3RMbt6lKuh`?2gNFH}xR2T+4VFsW?16lR$(MMOq| zv2~&}5=KX7y^o8k>BeK0jm_sh~jZvOk9jETiuO}RG=!I26= zry4Lzr};*~U>G8&vPce{Zk!o72|K%XkOI6*fmm31UCNa4-FMTc*f;Gc*dZ;8Ft8iF zyZQAdyCHvS`6k7XESWr0x)@bEWomlL+oy?+%s}XQ8o$rZyx_g@{a(WyTP?*!j;-K< zjIFoWDYx(9{SD>0mnmI*iNjI|Cr_ZxQzXdJr>518`)&V$RTZmz(Lnp}F)xXt&;)D6 zt6Uxz9ro+rjcx0vXv-4oxJWGZ8NNV*kleP~-k&n~T-#h@#e3hrxXHeEKUc_2NGvx+ zd4Ci@YGfGY(A=MovvWBkeEkItU}KVs?WCSbkp2}yqg&}|I=x8rwd%ptCD*^zpTF4B zZtTF`xQrL}*j1nNfJ9&4JD6Q986!_lm+im4zZ+;e+1?zjbZQfaz^}704*lqZ*1^Kh zfuTLdCOkldFMYL>9BwJ|J*H3=^966Z2Vc4}9#jcWLJ5ci&-h+_6(2WW*m++2Y=nH~ zZAHuJQz+@nhKR)78e$$*H8b{MPTSf|gGw!~UJp;t>x+wvt*x#8bbfU;HRb#`-Fa?; zF#IrylJNpKlX1A?_YoIu-C#!{fVgKSaNU^z4ipwdE~pwM>OhVcUI0Y2p7m$B7Ss+B zYGjzYn`7TJ?kH7w zqokKHt4OQlj~GV@x&Vv==m6-MP35zkP9YbxEMjdX8Q}I8T9&407r2vh(#9;Qa;;C} zsS#|&F4Tr!43;rhPkSHy%aY|&>Gby8>{Cy&;G91b`DOl(_^Wh^JZ#-#L)JDsiZs)57*rJpD+Bb`!2OQ32f>OY9;*A7T{v^lk@x3T%DwH#7>h; z?;W|BN+*B&Yul@z#^F-5a*r$e+^jEmJcEQUIX~gNqT1ET2_W9oK~99KT#JrvG>W!_C(C;#n!%<)S$)Fi(0-qjNv%iEtfiAS^a|% z$T&Em5D|n9==y3`j$qi}KhGB8;bD*h%VMN7Sfbc|Sd4fwpvl6RBIc@1@CIKd*Hn#Z zRJ8OH8YvjGfW!0Qpd{gx0|Ki%U#wXg3bjyXi6Udd_S)aS$cf~NuQT5HsB(WTLj}KL zn-Ai(?>R5lIya{`jY_bf>+ocN@ET_-$)H?a@vq#=rlbs^%_kSBvn+>q+93=HQhD%G zlI7>zw-MOUqkYn~W^b|Pwy=?ql2UqH#A41KUKr1(xZPgvw0`_uAUshwJ3X~} zfX01QTO+6^cFrKO5I*|0YiY*B<3!fQ0ktRaY%hcIzALl-^lbXpr^yaTzNJ>iSGRZC zu!hqagyYLt6@@h3F%tAst}2I$08+n&t@p1UiK;zCm=trujCn@&hsz}y)X8HfTx_DdH>M=|5uHZYhxEzaO4)p6(0se2{q zeA+I^C&H2?^v-cQC@QLf_V7m@nTVLv6}4onZ?x{io+HK7K(IDUI?Du%;Zo%W7&`|+ z7?Phw3b1j4fTWYTr;KZ~k3`jFvH*C`)YnhU%;1HJ^NNYmqN4i6kB*8P$HKv`T;J}I zT@XSi5=#`_0k$8IoU4>ym^bIpzkPb8^){bTx>Td$MVTs`$c$yfes9N8&V0!6xEwet z=#_A^JL%4Y`ffQ&*atj-dz0moF>_)$QqhG>6CQ#%OuPcMpfIP4;}UGl=9)=m+aln& z^th@fr%nCf&g;EInk^MXCc%KC$aVuiW4> zy86hF#*5|7+Aoc`?Xza@cJ0{ESk?VP%L{=E16%#fAB^GbSur}xgz1Jvph=gSl6IrU zeSXuasXKZ%xO>hMZ1gs3X&qO`9^J2@*ovw0szF8a{_bL1Jb}Vv1K0Cx=g>{yFDm$m z9&$vqb*(OSmX)wH$G%0sh$G;@XOR*^n=`zr!^#5;el1ew_oHkV?CZ!egK4FzOk>39 zAWuo3z|lW#$ESX|JWWJ;XP!pw_dZ0|FL=sxp)=aCJ;_2VoW0)$5jR=J3>x(Wn8CL` z_a7perga|4RmJ@SJNX zSyw?yCA)r%mIIH5cxk%WE{LD0CPIe-y$g5A=~KeAE^A_v>7+#!XtFHY{Z!76j*d>} zcWq?Qg;GtBTp;J9sK4NRDV)Y$VzUt`)tMfe# zW^Q&NA?~+j6%`uH3EM%`U(0o4c^DxOIp~A?jg^(HudlkRD<^>po~g0>*99$~dMe}I zO{Y1Z4)RN{yL?K4!!N@f(x+4AE|gP)BP33>bfI6L+c)E5vNOeV$T$w9@n7C_CV$rS z-N*{2a64$$5a^dEd&q@=sPVd4vanzZhiRQTweRxwy)>gFCh6W0&%D<$$bBa!G=ws% zdGs_jrRZb9P5wZ0!jz#jX_Ls?Yev596K0%^bl3#DxbMY>IoH?LoOJiMHfAau9wDzfAT&Tu0c9o- zgk{nWie_y%@e!9F1(I3Q_D*$?aEF64aMw1@ls8#6p~qA@M0lY@O*%(8AFS2?V3u*= zEfJ@AkFEr*%*&HZSdA#tLT>hKm0wuveBQ&@H2hb#BHW`5M~3IoIM9gH7K37jE7{dBacE1$|M+`9eB*fyMMZpb!N7aRiHN zOB;&0sx#wbl|`jFyjW+O@)BwyHz}WA*hPt;hJkQUn>K*S(oddDK`K*>mFRO`#N)eG zo@rxcTWxM6uJYu+sC2R5b#e($I|jRBiPJ?EOk#~neouMVOEqp1eKrY)H+x%KFG6t{ z0QHn3Iou)zTpQ*5pD|s7gGxZjKfYW#bTj@jxc=;2x;&66sbj>S%;KpU4nHw4)_CFf zJKq>)FF6}0ihX%?0jnci z3U@Z5=a0XHYle#dC@)uGgnvG#jKccdS&;wtW-vKM+aq{{x}s34QKLfKA$sAez3~u7 zRDdFCV7(=QpgpNAsU||N93aDcVr*zCc;{c$=q;qZ`Z_c_TLgy>toz;});kCZ))dHj zx%mePzFwrmY*s!sEM02_#sPbc*L(iW?`xIp)GA{B=Z=2S zFIiHsc6@1BIW>Y;D%*}oe}%TaeT2;j{SoaZR)4tAoa%_!nVH!Er%HJ_2LS8m7!6kR zD(9;*B_<{$1_uQRQYdiZM6~~CHu$%E_zNyd*Tvc;4o>?%I$>19mXLV6$Z-3}1XInk zq5I>6LI~tfO&jmBm+pah1<#_k1oV17;M?lzDySHyr?*et{2^GhkThfZ^oGscFW2Y$ zo`-FTglp@`rhzSsitm!Tke3rcMPXLHt;IqkX_BTL&faca0?{1@wE+YO(qa9l9=Zvp zQZYo!38q!m)mABLjCfa9SGPXG=sl{<*FjjH>&sHowYp%kA9Wn~b*ho*#G>o2#UWV8_^V?!6S5l_bM?ZB`G;M=ud$w3^7Rhz?3d+uDl$(3?l9R zIrvk+f)F||(WVxO9`1ANP57`|59PNX9phjz1EU*U-lOvGW_5C#4wX#8ZBtOJ>;Gce z8yfm!0c({}&OWy15LBwu6wQ29a7kSD=ZY*M88X})4DIJC!a;{YkrCZ5$Phevs!Wx| zjJ8u)c6vIdWB4+pe{e7k#)tq3>XzHvx z;x;-8AL+E`QKU`elsSnp!a@(k?Tk;oupOSEs+00d&1@gfdha*zyY8l?C8Z@LC8eaK zq$hW+mc=R4)Ym=QC+treUbVEa+6EqjpKSMcww?yJ#LyB4QN#BC_lh!uD++i?pl^S6 zyYDL;{n6?CWOt%5yS(5vPUtVVG~?=Xf0W?k?Y$>ium=aRg1o$I5X!Z*wbPR`$TNvD zGcvl@!%jW=IE#!*0RRDP41l}y(yQ5k4JQ&6Wf3D0GVx7iGAQ;HLx7_|(@>_3fjSq` zv@Oo=w^ZA!5x2Rnwh9U_Ad1p9HdTt9uYsrg9Ba=}B>A);#lU&v`E6U9 zL4BqI^s*44@J^CdY>Pom{ z1!RGR;;fUmny&x!=V*kmbr*hh5;B%37!CyHPznmRjCfOIwMx<;f-)04Lq&Euv(nby z&J#L40J2iU@86w7R@T-)&xic3ssg@c?%dj}iQ~6V(I0@etI|0%QWaW7nr3}xQ+aW4 z;BM8sXa}nQl-zvy7MPKwl_JRg(2Ul=#)|_zNC0u8&Jhh z-aVwk^(FQTYkrc$oKPr_*It{W#DV`r+D>K1oDucs{|TA}t&*7~Oj1KAF9d!AK4)oZ zDQ=#8hJd?+k0H#*aBreCn5k7zp+ z;1cj4i#R(wSvz@n(17z=nvQ?G0(i3>BLV|?fy(>fMGe{xphGR^2>D}%R8S=5CCZrd zNt3|5lX&V$5uj$+SU4BF06sVN_N7`y&d$!0^pL7ILOIHO=|XP9NOflH5G+xE%b@T+ z8MO7@E(NfDkA}IqxxG^lBm;X3VEn?-{xtA!Zi8R(c7kbq4`|INYgW4&`SKexlVR5B z6l)cY0nKk_hW4sr;b{OugzUY7k-5scNNbJ^PJ;ceq;a93(xIZ)z( z)k6tTUv+Puut5JOX$}7KkToq+K%wAI0k3_;`J61hAeW%`&Bftj)}qN<=c|+aSO)ob zy8YrmO2G|eVDKA(=-NH&E(=FbqweU722PUVbWzjGrxzX`9L{U*R*!`v(vRQ69V zbslzl3iL|U;iIFYP^lOo^swPQMZJMlcP!c|idvTr70W*g3Gkzp>Liw|f;D_4Gidm_ zp~j%rK5=tX-?6;8zdujbjx8!%uE)5B_V@}4^*^<6{2teCH2B8nP%;xZ!!0f@4h{~2 z&<;F$quSKQ$B_LYRBK0h`mBliS}B=8g}T>eG)+69jX4bK2R|9=SgZz^v{LO36*qoIRy@89YHyGz#r)bB{``V05)zF+ zn|=L-iRujty?LIcCFj41RVY*7X3I0vf1>1rLEJn=BaUHhFmy_FkBA@FA%&N8yhVv!MHl7fEUcc z!NJEzD7Gt2zo%>Tc3#PqaLuY&OIjQ-`cOP-i5YAR%wVf{{(2rk*qDK(6icKimZvY3 zp9~aPK4Q*o?pXk^fQ7y$QR`z|FUUa-lhlo-U*C}~u8GmrW^S*mzw7@fW^L#&X(CIc z2r5BW|A)c8ht|s;5zkQz_8@B$E7NaqX#ICRVb&|#t0GUR<@4r7M?s@mjWsd0Yh-0) zWn*GvV#19+mV$&LB`M8rtJv<{3o+7U^?aOjIM~fLiVm2>wR>5Zthx8 zP%FrFo?Xri9WAYlv~G1C#It+Ei{BP&b+C^Wz{ z#Y>3tX&}&;c*@ThbHLD`TEY4Gxr+4sT0I1tQ?3pq*k6AL9k$|jnGCT#f4E`gr_HGd z!X{r8B$&Q!Cte1#lVyd)#q!b?m0DZ7yAd762Xn6lnODBv4;**I#4|x0Fasd?{(Jm? zSquIBAm=XCHzSSjOT-$oM2-3p02)j6>h42{XO&tB(WteqrAwQ~m$@$KP)p3bAMhAd z`0(pN{MgWk5t+HWC#+I2%ssglQw3sMl_bkL0=BS;<{V~zGR)ac&~6C8n$FNeFZ|z| z^)sp6hUY9fV1aCdtJN5}DUu)jQ?tQ4tfwdE=i?8D$0*3VSL5TY=~W8GF|zdv zMw8mWvzDd!9V8TRixigM%F+*kiV4hrp4JWy&S7H)KT(zj%bg-`lR=5MyKA#gWQR=i z3+}P-$0tw4_oyomv_MRg!PzuS7Vt6dAE14LVyCjwq(N^zZG79gdU|+lOs|CWE6x6V zlfKf*IreY7?RLjlN8GM7>3oH8crKKi#U5IZq9L5jmf191wz2|p&4K}PED)k3Is zYcKx7nVlO+;UrKibg0!yyz!rFpR3o2O&`R})PAIg+vf5JxtU*JAw&>ElM#;qWNYi| zDw`)qM*vs=2n0BbPkge$XSXnt-mJ(*5-Jambacm8>7R zODHi?k80T%8VV)K$>8j6>^Oa@HO(TIdTR!}X)7x$YjyTq`Cmb(%9w5D8|B8y=db6^ z)zs8vhfmxDVt&@l>WSk-(Lpx-1fWRF2>z4V4UOH zQ_IK1d|-2RWIQsecTHyLZ_UZ5=t5ZCwQzX@ZZ+O(*b!oO3M0Ny8htSVDxWxJ_6#HI zTOB)@jWoUb(B-IdRe0Z`;y87>l7u6iDXGn-xkiA3K07@>2f<%6ff%=k^#7tW z59ZOS4{{YsK64KSPy-33ON}LkGWGTJDERmVxP=hEfB&wis41%z;1pOn^Z*cdPfu-N zvH370$=Sk2Y#dT$li%=%G}^u&k;}Vu7^KKJGdr6;usW^`eDc7T)u<&tHbx5@3rM0! z!Dj$c-|_TtLm9_s)Odf&R`tWcfY)8sG_11RnYBYkKISJd7zak4E2SM2+Pb=?;qDDS zq0&`?R-7cJy@jf;?V(p<187KDyo7^~9{8-`j(+jD!n&-Ez)z zS~f7^A8sK~0RiEpF$yVKd5ZGc-0ZAFw4GgcG6NQJntE(tn@Ugg2lG~xpsuY|t*i|F z3}*4FYv-m^ERpttWCt^*bXl#Czd9h({L^;+LAqdc>)&KK7*G!`slPeeIx3S<(`qRl zQ=A$7C4+;H*yCAKcwvxaR;sMovmVsmF@TXyPE65wT+DCVa9RcQaOwO`t^Yd}XV^M< z+ASt3f1*=A?2_5F8kOqSxr|!2cXuEC`*+aU+4*3-ovC21oTrdE!pVgbdg65M?Cc_u ztu(v&6NfI~ZB=ox15Bo9`>PEV5xNAjx3?e|Elrj!Dl9Z9n*^p-DP|3NJkLR#N%-0Z z0)4_5fjDgd4lo#EO2D!=Fn}xw{*n*)29CGPyUonIJ=Z4By%~fP)9&dNW6lHJjW!uH z>N5ujL&dk8U*bL8ovM^c5dOCx5)yzT@neJ)D*nfX{h#R+&cTi~y5wxXPLYoXuV5m} z+IBmBWB~Ru%v^n9obX9E4!Oc=y9%PsYqLf66Lko7;vDuFhWO`N8U@p%mt8xe#U>8* z-`j7mNIuZaZt$wbA(yDdiaAPdn+<$_-K4>+9wE)bbK73gGdel4GX(v${Q1+|OZ) zrg_SU4{ta$owtGQd9);@Hy(CBM%0=EZ(@(ct9Vn11Ujjh$7pESrK(Jz=fFV{;p4Y} z#z?1<{Fk!P!%|2)?c-{rD=Y|#6FNO{mci|Rq@-R3^I1~Su zuOiJ9phUc9)`Le$dP$76{K6Fj8>#6&GKnDt_7z4$i{6j~U5u3F@}^sr$t z%@@t$_@Q{CIlLd>($=8YsVh$K$mg)0Z;ok++M1wz1UkR`lCfh1v zDaPl7bda98d6H_wwl*%$x>^Pz(tOSk=G#Gs3U^WWU|{KMA@u*BAO2n~nbg#+>&I7K zb08y}wryZZ?~|344UdY5hztjEEhtUF$q16!;ngEfE-n`rCm<(#3g|&NgzlltM~`;Gm>PQ()&(FFB`=_xJm#&g6+VStlgd@{a#|u018S<* z-h9pVA9GiyYo|V=dZQGWT%>#2ZRx)%w+rE$gM-kVpQlJ-M&iLV!ZY)C1+F`@lSI+4 zic$!ciJUfBkOt0z4k(-KuU_a2J34QFqUJN%%qULKZ?c+iPN-oXOtG+U{l53<%(I#& z0Bv62oc|3)&zQA)B9%dzPKNY4r2jRt&n&hB-v-9FX3M(g%VQHn|4h_o!4kHF$Zrh` zUUl7U5eP5MI=Z-W0GtU6y&gOPjZkC?jMlB&aqz zSWm>yPdr3hshX>j53etW&&Td;Vmo+#;|ff4-u9Z`_}mBLPW)df(Cpk50qXV8%+$~y zWz2Q6uI+ivzo2{WdKmw*2oR}E_Qo+{orHSTZ znmtUtW|q;W*WX_Lo8{NsZumT@L7HZ6V2FqxV1xC%1rte>9veuyyL^qRSpLdg zSyzAMPm8KiQD=;}!$LZ-|75%uff415!kfW>E%@Zd)~Th0GQr0GK|x6)$=C@Rn#=5L zQ#L!6IBL5@-;bzAXB;uq$bR%)-iIGzcVCSb%=2HbsVE3@y?a+DqfcUL7~gd{qW&KLgIr-8+B%8N}7wO01(nCqIkHt z9*e!t8#YWK_+gP)raK@#3&9#AtjuCzi+*V6IXOE|^-$0A)E3V>kL#u#1N|GJRb&@g zRya|x?(#4#F@=v?fsO=u`i~S-;NNs^_fWxb*!||@%Z{~#?eN!e0vvqDo;|?{;xpme zhVA&xwo9ESRm`pW3TZ2lgZH>*{{KAdQeEk2ps}}`q~?uhYlv0RNl~ZnqwR8;`>VUu zDlTz~*}2n}Lj*#nYpmzEveu&PwB&=h->6Zt?s^z8wm%m9kw6hoC8|#AT_9^-3l&E} z{>(-Bu_kEst}=n?xB#s7iKC&Rf|7dW&UImTgN~wElsrkWqQeOV^Z=0H<>cgmhG$k* z*6M1!D0O0wX>Kk9o&=a~mXnjSv9a-K(uYDd6Ntnh6X7^rkI$MQb}lehiYNx0&~uYw z4=FOtj)wC&fY%0MRH$f{iDtMmNmv^Yd3ILUDQ{kny^o{YJnPSGc6+A@zv)`!cq7kB z#F9sPDT12C89GwX=g%N1AY{~OaccUQTAsa$ z!eEf$qC!On4FKB=60845)mMf^*{;zJA)x{Tl7ciyBi$hkIh2HSh?I1Lz|h^@A>e?t zfOJWBcQ;6P*LmIF-rqTA{&K;^yzleevF^3j{ld-bp!6X~>y=1Fk&7j>1*^Deuy|3* ztj=g|LBnfGPhIiH$8|R3Xu7XI%Ldn#^iy@NE2+dQXDNX#BMcuOQp)R^&3<+sE#kkx z>JozngaRh&y#y{7LNn8nXfrY7QkIymx`l2qeVkzFA8QUY&fGk;@0<5SK)-h3&+5!NDF=i=@G2?+_H?D& zKG^NA?)5KbW@d7n0b-y2a@AJeAppOFAt8}>M6CMFVo$C14Y57E|fd>}>%IsS?6 z-@Lu7X^B$+z`oNAmnEVQ|7vRoV}5@~8q7L~GK!`WkX*Ok+&;Gz=41rl31-tQ~(I@;9Cl}6hMLBe|e-6>*3jnxO( zx$M^T=FLIPk{gsi?yq=^!ejJdA$a7F-)W{QNW|`MXfg(w2MnE{VAZ=b0!XB+e*AIqXb%amCfgj{$K)&O}2lkMq#Zx>2$(aAyh^ z@td@=-}EGo>y@to0LW+;rs1_W{vqTQ>*U-^l%Thxi)F55_a~s;mYNzv?#XN4HLY>E zlJLXStZxngZ%AfWIGPu1QBEjyNs#-4b)%|WIn^rq5)2FDhc!iqz0i_1<+u7?`WsP# z(YTLZ0x)2T-0Q&6*P*T?3IJ2!vH6PjgDgD_gkR)=wwA>m#fjb=`fl#k7eZN}H^md9 z0r(A*hIrU~+m=W%eL#NrC$3*&*Y9>xkQC3G_%ii-wk#iA)zr8wI1CHb=~x3=Zp;6C zi~$Oup1kSDxA|SF66jJnA=*{w%dJ?s(+094q%`e%1nk{YCL5Yya*D!T(v;s9nk zwTv^r)66@ECSME#_Dm)<=Am3`(}lh7{XMH5ojJkKqbL;6$zT8w2}n7Zs&6iAgmCZh zwEbb<%3%5q>2|+3Ncq+bo0D18sYX{W1Ajsj!kipb+7r+??}R=jWtL1lJx=`jqm{?F z|I{|5Cm&<0d!jwfsh^qdW$svQqGj`74gw+8RPiiM&zrovY*{AB?A!Ph<{SzNWqQ~E zz(cd2toY~V=G9uuf+%Wv59I~}2TYR@y^c#OBp@TisF%(#6YcH#Di#oew5x6s!=+=B zllzcG@BMXUZs@Ni{$RA&@0=Z20qLLA3gKrCHbYUE1Thz=OURX&!5lFhE<^h}4G?<+K zjAR4I#m>$S(3=3`1Ry8K!r3`EfYH{sJ-2s+;gGgfB}aQhpq9dY{%oa>d2D$BpaJ+{ zoLtFf@x2=g`Z#U@mmOHYH`2eGg->{tniWOp1-W_I>_tMpnJgaZ=Cbnv2FG&873oI_}Fgy&c`HBX=Rk%N2Xt4keJIa zOyGDqTMn&|c!LZrdaimOBcrv+v=p**6;i^HDA0^bREgwG(7+8B*wIwg$gogH-78m+ zs2)#f|HYtQ0LlAU)mhVI66})H4+?lHXJ0k52`-=v=pTUSo$jRyERJDFV(ua-lr1|L{wIsfxURshg%H%iAPlz8GmvPR{+s2A(W{ z-aZ@!K#781&A)sB+B&?j&ICZ@`eFKGY}Ub=WJ+%`b*+<54=?;WSjzm^by6_8vm-}J7enLL%S{bZr4-$vS#yTS z_Lv|m&{N=azThzk9+n?i9A6y6KYzH*xNA&xGw1uF!L;T4xY6v>P_IQTmX(#9)=zMN zW#6F!?bU$?)*d2ydtY5t0|oJ3--Q=Fw%F#`K`_btoY-G_5fNZ9YRX>K`umEHKzF^~ zeVc=|5ln~u{OIIF%3DdcUlQhMd~#6`Go9bv7037-?yn5`#Am-qg^hD@OP`(hMPt_2 z=l;I^O}3o$S1vvm>jCN+5x`I@FYWSRetK%D)cp0lA;Ew1G5`w*02kzxvWeL|?;*c* zuf7yW9}jm|UWX1@0M<`RN-F&IbfT@zLs&nYr^DrNjzR`UXEaCI#zj>fOl?%IbYsK% z#sG`gk3$-YKv1~rxF?KH}Yi|W75xsj=>#vJ|=;39QC-VBf z&*uQ^vTAv5i8rQiig^nAG)Tv-&RW>cdUrSyg8<{~&5*--1oVcY_(^&D^99t>8<4nL zYX81_QFGW_*|2F~VJMhTK={$UhrmI=BJW~%h@VoXG<6AGn9 z?;j+tohD2K+z4ntCPfa(k$kht z*|~bzdA>f8mRiRM!;#S*lhG6%{48fdrK+72nccIn_@S;4&z=P8~ZxdW<^;xHCXLy zd7Uf18et#`(OzRx@!oe&TN{L+dq1T4)cedg7}#RLt=IP1_Y2nxS!8cghl=OPXG#&f z@u#AHyNxm+2fF^Gc{~=)hc-}D@SZec`a1Ko=v^N^D_e1+3RSBE9+Rvn`ZKYiq2b39 z#^FY8q21|>sKc43y2YUSTny5DXktQtQ_YJ~6<)2x7*>LyM0`>Kr|a45xMk1%aykAp ztmfN<*4FpqoB_Z7O`ucY`rU*VX!IB;NUa9nYHCoi>6$YAxeW$plhcDniD8E6~>f43oV|fv2 zRz_9x8Pw&AlsJ}>bsy(?Kg{<|uDRY=Jh8sQRQi#&OqTN1(DEZ~Jm?+d1>XwXLU=u~ zUDA8_X#Zj0^6-Uo@b!a(ipW)jsfvl!UiO+m6$lt;jITS0q$elQQ-~7g1h|$rfN~(c z`L_a|_D3KJ;*<wpLA{ zT0(6SWSM=A-_{&`4i|CWJzk}fLdo=cjKdPyzi^Xa{5KcS`|=l*GruGsNj3I6eE5DQs?8v|2hlJuQ8n zf=hgew`Rso58kUWXb3VWas!Bu)C}0Z#0Ya;hdy?fEbsX!r8sBm+sUG9&>H+Tf|p5C zd7ILIU16fVyvax9Z~2wKyj~j#SKAP!E<_P5L7i`7q0F!m5mW6f>5unrKZ?9!Z04?p zYofi&tw?2wGG?G9*NLG9D)D1Ge`PI>h>(pw=z4p2<$V3E&EYQe+Ap{V;b7CxK4?aK=Q+0~r2HeZI>(*nOw zzUKT`bcdPnGOp1=0Q@6ue>f`rA?|Z~UMCB$D6;IzVQC`k!J!0snlK04Vy-cj^YwKrpDtr z0xu$EPrPH`Lz2TcG^K(xyQni9`ebCnwX0$Ih~m7_(sg7|u!|G7*G1leaFDam6-bP? z%oJKFwe?&?8j-0FpCFqmW5&s+D7JZod2jW9_l-YJq4$aLS1k!8mk{-O>Y5kS% zyeP{b-S&xNagTwV@8Y5DV#<-CTmV5E3eHk=qa51lmRakN$B?LGGxAFu}wf z#|#$Zm69c28PhFP<|?s-J6n2gW(huu4gR7QdpXok>rPg5#1MFM{>QRfIoID7Qd8(* zBBeNS;9=|rG(c)$XBT2J-k*{@U0k4k(=d-u2gmzkA2A46!@q&h5Suxx%xr-{c$>*U zI0z>@l^RU_5vp zDUu4atF6!xwq1lH3;HUgo9xjHS+TcLKH$X%r=Uwi*po%cU0Z6%#Rxw;wHx$2)HD7$ z?eYJO8V8UDLyO7n+bd)PKXC1-jf3T3i|^@62%V76>veVS!L+DT1FhP*| zcz?jDc|oX$c*3tq4i8qmP&9g?EtjDD%-0$3kvC>Y&KDMz2%Yv=<%XWF)D+L~B?X7-Bk=QjXqr~r3AzgB3LhisCfnaI~ zaF34>Y()32Ttk#ol&?Vrh5q0W~Lb&eGpKePksN?jfzl+iw=8~4K* zOwV4NMG0NbKv|n0^b(DKMitd0(2!_Ba`utKJk@JXgXZ!~0XCvix{D$#Fj*70&oghKo>t$|04&R_cw)v&YJ zPik@&4h^Bb`xovg>oo05+5~?J{k+2LGKNqJePFI~&CbkYs_+{7rsXi&O zr?cVem<1!YPT>L>hGkd~N$dauI%xd|ee7^Hwpe|ePv+03eXXtyo3a5)Ngbd!7=Nle zSX{(z(N>|44*kNi0bM4`yv^kq8rTZej!LA;)Kz%uEYD8;GRj;=MJd(TRJrr^64Ta0 zpT-aUy?2$e6H|%=sN+Iet59yfgIyf>XRpn&v_#P;pmhUr;%^@>2d8Pri;&8@8tPH8 zs$FV=J_|3x=|kysopODHX-#j;cNY5A7}vL#l1cdPcP--L;x?6cno&v7q$vHt|4(P} z8ckR2xU`j_`dQ=C=^;|q>=ImBd6F~CX*^rH?|EYM$GGk+k?NmrCpd19dHahXYdio8e(HcbGL1E72k4tF*IenpLdx#oqzlIIAfuRm32QZ^6*Mc?WpdkS(@7LdD*}9f!g0w z;DXGyW%qcW{U*wC&xk}nIr*!HnPHvMrP(gv7n=3QW_cRtZqOa`zUZyKNKnqZtfh&$V`nIrP2Az)r7whbFSkPC~d(qLb&m1eRY>{5wZhG|3+ zp4Bd-Qo4l#x79;cNtW4&H4@{&cEfS9ScLQEovC$Q4Hf?Mj{!n`FMc$cgD#Urb+ksz zhpAG}s658l^NHh-BhRabN70q3d22w%n)fBt8RvhAK@%tsTnR2YB}KSq3K;ubh7}(3 z$(rq)xvcEJBkN`94Sg)q7ph&)o9f8OkpJ8(*ZFHmJx=sx!+E0jRmQ_Ur{?a}38j??tLjk=dP?wsncv^i`Lt<%l4bi`I3Cx$ZAR6LB z^)J#qA_9&;0MKS3aAzP(D^i2cc#YL&E`egbRSHDmuf$(of4(p?o8V!sEq<2UaCybW zDHCrC0B|4flYdfVs(@;?W=1D&KciYh6P~ZDs03k=SzCW0A|wP`|J^Y7w}tj)j2uyW zb~GbGU0=hNdR#xAu=^y;W^YepWsi*L{VtBx^PuC;<5nP2$%!JCGo@iCC(q&@;kIs! z_w>o(vy9_|c%_c&Z+CYm`Sgqe@4*B+xr)d}XEYeC!U&35LNHYy(KpU;c#P~R%^ zMdejL^FUK)Gg$+z7u;_VtVoi$IB1Lgb8w}}{Wb6fgn76c8&_QFt@mEfjL*?S<24hb zM}z}yAsY)NJMLNnBIU3nvL@iAjg0hETfNE1D9m}+1$p&v|3VIUmNmX&kvK%VQPq46 zH5n@YE;qyz2BDyEm|Y9$oJ^a46*Pwf41#JUs3agEI9a7lPe+o+d=o#Rrr}Zf906_* z9xw*6-S8-Q1>X3}dJ++@GZsG%M4%%?B*(T_3ymZ1^A{W$KZOfDApain5%Ui2-%$;~ z@&=w}dW*Z4*$h!I+r?6T<&DE+rlulO5sA}JWXAwJ-DL^ZrEbXD;bWtuNRMzZOt9(GJHiF5f1QusQ2 zDH|EhJ5Tz#vi4qa>c_=3b$Sm|QN1i(mTCFOpLNyeTbCW%F6XY)YUlXCi~eaddx;TS ztSF*87=%laex_RYuA<_bjJ-``ISt`zdf2k4Ag2{C^_; z7l8-s{O2M7yBLJYQ2Umn7FC*VUVrsz-T7Dm!(5Rh=Bgq1}O1Xwa}+X?)+_WIkB8u%d?TP<0Z3S6uY@V+vGh%*pUm z6t?>VorLS0pvf(nMxkbQ$dRJUY5P#Hqvqw#T;R+Ll2Hn1JkF>ZFc&ab*OS>Ne%ElD zQiS|_jY*A#CF%Kxr2J6=$AZ^V*+qIIkmHl0xqx8lVDWJ+@ee`N`5Y;#`~WZI57xhD zKkV<>_wT(f*fgq8{c`RuT-jv7kU^~3&mIYiKCUNS1f+Wu%Ix7}k%Nzuyeo@wY_B~_E>4cgr@j|J$J=LWE9D>gOhE-wU$-t3cpXWLN0mBDt z3sLl&Q8IwoiU^L(lD&91Yq)X>fR!n*rlcQPg~F_*t_%dznbev&ILkj4CZqi%bz0N@ zY^@t}bZAyzR&(LA^ilRp+xG3Gz~yDP?=o?wa8-J}M;TY(F;Bi^Rz~vt44ZwVNOQxH zsm;=klUYs?5Xcei5AEx6Mw9aj3zW01CqB67J#RK7TQ*6M-aP zqv00}`N)~o|H`=mmdb#=5ezppFIW2Uiun%)cfAuLL2oo9FPX8Fc>{{?1o3$M`TRu@ zqb3yn77;t?@SClkHVtyES^<8deM;e)&BfP+Pzd)@EIn65pE*TCv^?|1SxP;;uryG# zmJb{buzJ)y-=aIhhT(N`iS&p5%SV&A=p&p(y_WVRCdniCg{A26oml#o zUwZf0Vn#BQ-g#e=1Ai|3J7M|yw}Hh%V#WNx<2znuxEY0&Wp(O)xcB8DMi=Jd$h}?U zq?_WwA3EA!BcNq&l-Ei}yy^p_!z zk@`=EVP(Um8cEaA@zGZ0Bl6646%x&hQ{aysV`4xQ((p?DHoqAvnOl()UdC+P*ZHGc ziv9sC$9y8rHAL+X9x}JEcomf;m`0N-wN7QApgwrw-3$qB{*UTBLif_u(M-xX2OHo`ArPblZ_tYG7}l8y_F1IU6|+FPnMq@ z5Dhl%!&o!;g~?`4;H{)Jg+OOd{`*BhRUF59eqz%0S*KW&7JCYlKqIEm#tRpoUk6{T z4^A8Cbogx&z|YO_?4gR8f_hF@f{hN}hO-L{63lNZKTBn4Gg8Fh3hd#7K@xuF+Mdjs~n zA`i#5o$s!=x?c&aw{4rGm*Fy+CdTQD@ZAnvdkn7GMCdYg%^D-u1=}3mNlMTDZ2sy- z!sqE}I$RxSJF@f5|2yMkuj*lgc-+(S=&DiObN}?^&VZn7u)T%$8rgDVCD)34Vj-&{ zYr2MpL(kZtRS1k3<4$|6#p5n>U!@GI%$Su`upM}3K(QKM{9+UhY|n2b!^DgW+@HI| zNQI;#V3xXl2KXrx>T7>_E_q*n*5x2b<4?}vFbhwTR7lH?RA}M(CVh!ZMWIxNPmq`g z^#92N>QzndF37qp`OwvblbfB-J6eJyPpTFgV!uqe=n*!y*2ex|DP*Pl{y8`iE)!p! z9P7C68RzIUTN)$DtZiT>$F|V@MeSXzqmY%t?~qr%ntiB%EQW4tt=3mCJ=A=X5fjqM zkKyXa+#1N8V&5;l^-)ogIa5zBlU`N0=CgLbHjV#pc)XhE$3i2_C@ehj<}^2VZ>3N( zB6v!`swsi11@F|x6c^$pmTa`~^PN`?D&0xy9QZeFqCxIZd-mB~vExw;``?f9!O

UKYYLb>jWJVCf$K&nmC`0;rrMuBxU>GN;V0m)EDPHQ z=q%pPg^ZGg5A7>JoN02{Y4Vd)Y=+7JvA@)GeOvRSVU@L(b4Q1dM%oFnVde7K_#H|$ zmp@RyEW>uLmorHF)%S3vJNE83wVh!%zpLHrwS&~ho;KmHI<+2j)50k37k5qw^6|M9 z5wJyOmme8j_AmA@M>BxD>g`av_t)q6ceRxw;1i(!cF0MRxG^M=_!p)CrKr3ziW}ym zhQ3skb`>Eaf-D+GXg+PE*WJKKf4kJ8PoT9|()gQ(I-70}mb;mb4woU$gOJ6s?*Odh z4bqw%>*MIw7fGbyIUudgZ^-Q}Fk%!9p?(iD{Ivhft5yi7xu_L7I+u!BFTq3|Z%~pc zBH(=O1z6e6&N~fX!mu1&obQC{Ded~Dw-Yg=NS}sA>TJk*KhkWo9`2wQRL}0wWHlnA z>6mT#^(1dt)SC{MTyb`K;Ao+NMA5g{9fmXc?mH%CmNS*^aOCfhLdDQeBR*3R3qbsd z%TfPZ@uLZE0Ta2;SYiu%w`+$>rM4?2Hj%|9_@|xqokp@BWfS6{y})zx0sBXo?H>S2 zpOk*Qr|G?XqO~Cigc2D6Pqy7UUnS9O`9n^us9mI5LFVX-GXZ;jN@e zCSeMK|IhB5-sWnQCYCDZRLQ$yx&+rzOS?FjirMcCx)Z3K0(% zAu)1r6R!_6Dt{+bu+bG*QwdKE&m-^_vlDI`y4>S`TUmt!-Ol+G2L%Ps;Gy&4N}N*x zF&6|HsskAFAsqlO=Vlk6biL!Azz+>vSlTj};gs(nX12jYh?cfa62S@PbmKCxeCVa? zCSE8sR&926-Wu$kodi6qLPE_GQUVwx+aqC8 zb#o! zZlT0;ctrMAH`i25RNW{_d~prQ8&eaX1vE`yq!JCbq5$hWLpKL{+6F>?Re2pXYo`*gr6La)q6JH%y+;Y98d;tLD*3Ow z#~pi>4u0>G=GV!8vpgk{dz0|lD)27%q)EsmB3bJXY$tBVrYLtJ6BW-5oKB=UaER|T z`k;yG9v&O0bZ2&T`Kzpo{a*os=--Y=VS<@EqgOkF%ijp)crp+**uFNmudc>rmsYcdgpuxJR_Ow$kQ&HjVj_x1Dz7*iuezFX?f5ZU>eB)#Qu+j_4X_7$F)qkqb4(XD?{0{mb*O3-Ad73z zbc>xGD@!f^aUZ#NZ?A1ZXRBFQyHZ$R8~`a(0BXHS_LlP>913re_b$&+=WhXl{-Ym< zsTc2P<}t6~)@e*gOfKJMDGb%xJRC(0J%$Q#W^W5m(Z(WdUL=W(^zN~M$3^l%z)#^V z&HkEX&FSX8Dt|)T%64k%Yn@cYZDu4eb_}-6WvzGEs7O z^v5yWE>adNx1Q>E@>A*Dw3VJn*X6yM7&$JGn_YjwT}xuZCBh}bnW>Y(e0aNWV3RA! z#PSjWnT(+0W$AzJ6OW+!M|Bz;c)G)A@iYONQl&=c&;94hh1B#oRlGb*&L(^p{mY)s zhFbAshJ(`T7E|uwfyW*Lfwe@whzMf5db~>zgJ2SD1VUSDf?t#%P<(Wvx2YF&0nR}3 zKB3@}8=hkeRT@G#bn zdmQBO23>`VCS56#_Z{(Mml+C)coNyMMy9!R+&%B8?Z#VZ#s zJ}#o@^&5M|m#ynoGjFCUZ>q{7G9H%drbbGaFw0ct#^jLE-WTc67bo=nkea)<&d_Tc zOmtxB)%s1mf6hpiGN1+5+Sb#hOnMR(Gjb}@V>4$E?}`Dh^n@jxr>VR$RguUq%jUti z*XUH=mW-sS6*=hW}wbcYMtCUM;ZCd^S?XS4I)utZ~c^ZheJ=N~PIn2h?f&sc4}$M)YLPU&|_`=)A)%nfY8i z#$@@8!>GXI-kZw1=ljg7Gb+!~XtljJao@XI#j#bUY_aPR+yM>g`uo&;j&SqIcewf8MGQC; zNghA2DZdnQtk@;@PuefOrw;YNA(OHvnq)^@Lg@>4MnuJ4CPH7UKE9nC28b3g)Xt643^ z(>_yy31T-V*}VZ`DLP#;_WnEs(;uOD?oa_SPotloT~t)nm)hYS`*ON6#;S0!={ws) z$BJUQP)LC;cXtKTI49qB&c!p|q9EgxSf-=7rUp3Nc=~XWwdH*5xXz)nz{uV%PnDsZ znSea|TTjLEPNe(&^tdG|f^0BS_^Lx$WE3NV?le1n0++AO*!(NlmCd}1%je)?)-I6f zhA7^k*WP7XP~z01iDTZH^@Gt-WKcsYiJ4~7X~t$cG_|Y={=^}GA0_0q7DUC&?CC{r zA5&FhW$g>0)y^kiwd(=Iu828Jv>O$}@nD(Ny1FAWP0x=@6NUv0!0zIPoCY@+PtWPd zj}agh80Oo8Jju7i!UusqO1!&(jC;`K$P95PTB+Y~t&8zmBfK2F;A2sza&HCG{EV)e z+G6&LFaM#i_!7%o)l#~Ta#iVY&@g;;k}A;JCV3xDR7yrI>y$FL!yF9I3Gyk`<)L{U zYZ~QCUG5k$IFu@FcI_Pp56{|zVXMDw7Mh+~o1L>tHPfewUho3$Fn|7p1!y5=itw?= z7f)1Us^+r?c%4lET7Di^T^x83+&)h^2`w89WgD-HyJkQw!X_hSW)$39Q*%)%;8xv9 zRXOVmkx`@`Aw5O-T{EoU@wDJL_^ZcQ!=N`FXxwjR4|M(0^$m$GY8@rMvgU{7b>1rk z?Vo-m5w0l>CB4r6K}HV9$|v+9)k{=m;W(8{4D5TrOa8ja7DgfAMQ%H9RdcXZY3gKb z3~X_-`%)e~%+a+^%$n`b_SejSThLN-T9cm&F%@EN`G@o+Dpl$LpA>n;cL|TyJf&aE zgbJ<;n0>SOBBqcH{~t?Tuna5Kl0-Q#LmLaI9Fj@1JQphjvgRCkiA8chI-6pnka_!w z4rh8E?<=&FJ^>d{x*Bt6vk>DW`7;w05f3x@_5f;Krs?4E?4nTn8$~O~2EJXOGp!as zjI_I6L2gk&ae=@bqg(6V=ch{LK>ha{EaxLFX41!@nV2Vw-N7v-aU0GlBAeOCiE3v2 zh(8s^Y^w<5wI@d~K<*V9AV!T*#&gOGU)IT2TsH%zkJ z-NkEXkMtPT6j^At&){N%uER_9!Ja|47B2e_=hZ9$W9^mwWy4IH78j*TD?_Duk<=#?tC-hl~F56XL`raxHGb$#ft=99$_s%vbCS=WHZvIF1m$b2 z8g3#JQKI~N_(0OC$}}=@)B(zd0^f6ErO!nQZ|c)bhl`7uHdP!L*K@JaI2{TV$=NM(u%{AMGZ zQj(S^{zGEA|0fp3{+I~c$P&?M%f;fV+41pxXlt$5mufA6qyapOuE@ZVB zF)mGZ-r+=xvLU8NfV+^cAv-zi4pvu_xm}1nWZmCQK!0e6%YE9~;YoZ>^P&!#XmezC zTcWr$MeyQrVNgIXJvD0p*A|*AkIw0~DAMGzGJ2qUK#`>qLu8n3lCCG~d2n5K^ipxt zBCn~g_a%J{-5))oy$M4+=;uwb{Gh?nxm zZwXp;=QR#Hkt@f!Xix*7<-Jd3P{%f`AOkwwh)!I!XWm*Ri3<5pcx*B<4w`U0Dh4Gb z1tgXlP|jDqQJOkxs@{Da)$Nod{IO@yblgwg3Ul+eXH!a)x+9w}mqjtRyOlf3yXRnh z%f>xUGpm3)4Qt)y%lmJ!xg}P2eobM^uN6s2g|wsiEq?OB-`*^?UMlf=n??-`l7-*g z>*?nFuGK8UrJbrs#dpB8?roY`x|f9hJ_|91+L>v%jaVMX=gBGTtWZ)+mR9+%(X4X+EKv0E^RP1 z&?orsLd#bn{u(q?|J?rWgi$qZDxdbxXQ`AfG;LDdTQ8yUG_^yNdXL<7R8|N_PiMz( zNTP8n;u#+PMM z6RO=-&?C-R(89~jnKCrXW-`Q{`8sVeCHTIw;pz&4h^qbCq(b7wFqvHs{3$oB`e{Eq z<5N=ZkZXnN2fB#bb9Xl_Ekx3x6Jpc1+<1D_@8TFAFVeP~MFclpb_0z!8LGqW~sh}ZLBqoxl4IRhR6L8wQ?uhF>fd&*_B{nW`=h3!VP9@+^JO-9g>%@=is@;o!wEQg+l#u)SLeR~F9T)n@L9`GwAvoTO{c!#6i|oY(314;jr*z;cpQ|T zoOm-At$^cke`7)MfY=i2CiQ_-Ep`fa1$LBhbNqSMkBS1TcQdp*IH`PPnmCRrQC}r= z(o=nyDZP@l(CjX(@8RTpoU#!euHfe z%7Rf^2>_{!7kP1Vc96SkFag03z?mabM;uI|LMqjL%azw}k4D*z)oz_T(m z=N)aHFS2hG4l-ImJM7_swfVX&tuoH#k||KvI;oJ&X!T-*BeYYpE_V}hu{7Vk zK<{JDwH(QSHDGc{+!vlZ(}J!O9mJ3YXfEC;!~?D<&njJ=*ET{ba`RFBA{Da4&=c?` zSI0<5)g0&<0-QqdJ3JCsB;TmK@(td!|7vcSV@(Jki%$F*G18EYe{7d**X@iQCnLRc zRf_;BmvfgmJheQXg;NTGzvP4@&Xi_SswE|g*iq0|NUa&JF1L4m-!}|f4{{pM@mD3< zi$D=>$PL0dy`BwOzHepZn9T{HQ@MRuP0_D+nii>p4eKs4yEt+Vl8U&5P*WjC+Kz@w z%g3e{hN7j&HkoJ<>@>jZ)wnSL7zsB;86@z#>+;)C{uKrgn1f7{o(5IQTG*R3stKRD zx;b7hm|tcsHhH;=Z=4C#OU2GsDPNSZ9pAh=5#vSlN_vO&CX{64jS+b}hQ~xY<8bDn!MZ=bU!Pd`p=y7uH}CSGTv|iD$tT$6>&fw2 zUts}fw>!$~R}?|_3Z$T5lG%W_0VI6wF0$m3$V06=&$&?-Sy|;pV2zTRI|fX2<-6Al zBbAlCq4-|4<)h4gi+YHy7%wOijR70s(U=&wlEC29cI*hre>MQ1A@fkHOR8h=ZUj#| zdARMw-JzznHB)`AM7`CLSl4^|Swy3U;IGD1SbjaYQ0|JSyP@szM(5V${!C^b)x)Lg5*!NF zUcNQ&{B>HzPxF2D=!AEoLX(fRgc8MRaNCw#tiU>%IE;~=qG?rBYR1O?5#XBx!yPp_ zlz3ZjNBCpYYQa4JYqG^9CkO4dFah@|*~xyA-;|;~j;rYzWsi>7 zQ?Wa#JCI!vzoJbx1R2XX^v>h^UgF?lrYYwj>Fi|bMV$oA$T%%1lmncM?Lj$pX{1dD z9Ly2ztPxoPg6?Zo4uRro$)EUzV$!OY@zuWaSKj(1k?CvCpB=CK79H7+XGuMRfG9sq z=DSk|6)M#~;v{5Xx`^7(p!(TdC2zb`TV|fONm4s{wLJtosv>Lra|!^*tC3rfD}q~> zrAZQGljc2=JV3lGj$SZr&M`g?wo z@cnVe46Da3Hye%nfjEdR8(Mdo)nex?&{DXWuTsfMK+>K=J(p4kj zj|RJo3C-uQLg#Z-@3@_NKqe@FbAN8Ijnq_P!@L;|^7j^xK?gS&p(74#KkwdFx85^r ztP`@+)?O13A9or8){MQZB@wBptZChoRX5M=EpDCWcy1hW?*E{mHWY6mwLQfU-fGe# z(`#OLJGRj#4m%KdHTra{6iua9T^fo5SCA;s!l&548A9{dkZyKzKJB?@uq=K+_Lf2w zXmJ~NE)#DU;P$%g&~Tw#m<7AtM{ww3WCunop0{Js4`ojrVFAvPfall<34SGD>11MR z+ECvR&{-&Y9MjI{;x1UT;5>glGs7_~`0y~^U6^;WvXZGgXcd}+Xupx9!LCS06R#|b z?$INTxseVn%+IxYvT?14wRWM%H(Nds#N>vIB+r~+9a&1v&TcqpScZ+$+anj&8DcG7 zQlS>qjBZz|2=b(bQedE@9nC+9O1XS+lA70f{(k5atXk3GnTyMjb!hcc%y4O9zgicK z%!{vx=vl+e<&JKvReo*Slj&D;C6hiF@;_IXO}m( zay%~8&tRCL_f`QPHb~MBdmfwpB2I<^JemzOM2(XU<&niAgtHV;XsxS#i9H?VrOP!) z_}!~D=s=VR$9Ep`5fOl(pbPu&U}>dmy-#pO9KlOb)ZNX|m`3}LO{CCc*Q{JQ$!WdB zogM)>LN%M$5Ge6Q!lnli8VGK?+E|~#+0N@_(fS;-2aFJ{EQSWN;17FqIwcooSFF9~ zqgHJ$$j1+`zHU<=GWDAr$|#}T1i%l279$lg96dgnMnQU4Q{VtWgGfKg4jq(@?ES6- zid-H{Qm3mN3MN0E>PUrlFCn8zF%d}uJYA3Z8f$U%O9>=Y?KMF6+hXp*Nv`jF>C<^_ zLB+8c5#tRN5w4qABnaM81es@>V&OR3-_3d9=*p6vxc;2+5VJZmM_!UdUKUB{>c}&sZ}Yc5R&z zEM2g6)_M6g_tIfU2tAOb_24;Xy)Q_Vm90wLsB<>)kNGo=GC8MJ_5M)O7hHjUXvD;p z%>klLK@FM?U4AG0m2zDL)D{)RvK5ebyQj!Pt&f5hCwyG?<$PG=h%t_fEhUpSS-9^Z zW%|&0#J5M|58oe5~_pxXeGwzxeT;zK|BmPEpOz0$%tn9cx4g_#E|J&m`*- zk7w^i$PboSS>@RMqe-TyW3MmF`vPM-s!U@2W3TfV$1OBWPmzY9U~%Yl_a|G+D` zEE2gg=ru^Rk(NwaEZ7qkUuy>Fx`i!MeYjnlS>X80ag7<0L2a2$yE6>}CthnNqEE?1i5dvTra+G}t6(k?8WM0H!OVU*We|hIPWS?r z)8ZF22b84Y#|8Ueq5uve$swUPFFENr{!A zM>@FY`*_@|SHMzkKR>^4^mY*-5p*FKDsHl z8k6SC%eCn)6?C*Txvqpa+QIX(v;L;p`>cM1yWeLPtR|V+OBZ z@x=Iclo`BB*i{-NTQ=ihi;g{qzjO zk$g!Bu$Xa|39NbUa5aA~ZE2U}<>ja^Yl86<`=^7N!T7<5eQFSh)K&)aUX`ZXvx07K zH@WJI;}~OaJh(}=oRP=L-TC?F;%d`By($rjMANA2A2M<9vKf4$o~y8lnKXGSioSdl zBIVPL;Chbpd*AXCIYyDEv@JeXx{t}o?P&V7t~M%TYTGK(+PQ(BS! zBEwHms^4C@d~B)&7au<{K3(T$#m};uq<324+E*kRpQgA|*jur1#!LdgwLJ@Ohr^eeSw{!QDS(opV;Q&g?mpnLYc= zykD$?I15$TCtcXgS@|`GH#$yF)%|h9B6N)>Y$X1R(idjZ7D|b;zC~Ukl3y;p@};4B z29(MCsLy1z`4w~0M)PL<_`EO47Z*!(4f9z`(#!eyAEVId0xOuToLRtqpwN!3d|PX4 z36U|-8jH#=Et(ptj~3u{BPP|`5L4i1U$A&(`L)+Pu)y;;SG%_Uj(}_$z2i2$OO3g^ znV~?^5e4ka&88o|8?8Gx?|EVSpI0M*COJ-j)~(KIsj0O`2>LmA)v~cE+^l)+a>Esj<-9*tp=?o283(unjWIN!b}6n1Y2gd3NotR7uriR1rkj zAj*>g<#31bUkd-!by4nC^pt|0Uj;)XAx~W|B;f5~-AZ564+Cm7Rli)GyXur5z24<( zt>{sw>5%sc;9GMZsXls;-`*9QwN4#PxTw2q`N?E@fkYK1+n$caPNdVUZhYlV0_3#( z{Cr|s5t_TVWFlkdweYGl*dd4k%ECgzsHkPT(H~VmJlx$$z*h12Sb$bdK=1uTt0Oe1 zlzIt9+;D%kz81XMMiq%}mLGd%A9+dmH#TNv(bHjmReV+>@W%H})7STkC&9xjH{#JN zEz&96DrP_5Dz-8*nY~z#&M9$hn2=7%qK=y`P9cSqz_V9rCM$Zv6WdcGbe8RZDgCIM zQaAf6+Nkpru8S^du5qqH3k%NP=nmDJZc5MYNBtyB)?nPpoaL>%v0%ifev!wyguQLj zIFa>$zN%T&SJBedCD_|}qLAts7NSz1q1;=WQ$OZW7s&ls5E5xp5<413NXI7N?Q7XZ zJ~?8WOcbZ_lA233YEMxBDVs75CwAyv6t0%u$@Pb$2#_@RPQSlUn{70zq`%%(_Y7*9 zkRBO&j!oj-gnRo{@ksi{2tHxj|Dsc!s#3#ME+d4To*xF81+K{7N zI<>FKT!Z#(1Cdhl`&Y(UT$BmKF_y7))1TNce{$c0P7$ZR5E;iFWqh2B$4^0GtM z8-Z|}h5h*eg^F59Wu_$Ql4d^-=TMjBZJ`#tq1+n7__`$ay;?fk6SvZl9i z-)gIZ_Vp^R#oh{TjL(GwIh5CQ&2QedppEu1B9x2rw$8GY8UlIWOWhphT*a?MKII>y zR-{82ZI&1ah9W|M#N}-lP)_36#-MmpWCQ;XKwx+U24?ca6Hv}|X z^72>+zdZ*znZm~e4Au2uhE!70yShgh{8(Y2XxKtbOw54i5Q~q8r`N>TcAY`-QR%oz z;=$o^sriOT^JMmOn+IZ4LmXK{oB({>C*qjs~WkQr&G*TT-2BITx8Mt*e@Ul4M!HPX+RfGiD{oa zc_K9fAhqRhgH2!Sr-a-oxMkIEa&$9xQ8atCx4&N@O5v4cW82_}&5=o*hc_0Swu88e^&TW#s=zVD*FAsgy*15xn)Jn0?th90HnE1Q|yJT{5 zCc?wRQ|yyjDS{xem#xQtSF)LiH_t>6OJGz;{)@YI!V%CIvc!PCv} z(qC%F_-q?@6=hN3SzG;mKPR>rvt93-1*{$RG*;F$COK{|_bcFI_)+H-bnkIi5nM8b zgO83oI(|%Kn^irItZ3ku+@;OXv7@*y&%r25!fK7^6kv)~yFY~rnW>rgo5s~9yuW^S zV?i@Q1=&F(UDb5F)A3`=WimlJu-c&H^+7$4S2m`r&!92i9tMMXy;VzD20iISdOA>w zZ8-M}lL_Ki#>@4{IV~6Erp}S0xqE_-f8*jt7CF>kNAc6oPIXSEm-Qj)5SG_<+=~v2 zuap?u(H-(~a`O`;G1{}D@fzZilE#6jZt}kMlbb8-%U(@x!$qyhTue+%>aw81y2n6; zAqIxSYBL0%2WXBqAldg~CPxF-$J-)!D<#XDmZsB2$!xQD^4*!)Ip+9XM@L_k6(|vtvdLEhs%4syguZ$J|0Kg=`CHFSBY}vO(A8C&y+- z*4(t$M}X|ViUGIz;}`4>_ST&}J+aZz5EX@7gt-!V%Ro&GKguOjru0M_;QgnrO6*os&C$?aefmZyz2j0xi{wb#i!Z0bVnpSE1DeMqW?Bj>Med}d#bgJ zWKAPxUw`jF(sy!WQLyw}Z?SSxMCv(urK-#|5*D%_F730uSgF5()0t%7$$ZU0Cev_@ z|M9oEGB1{2Jkc?`F81HyyhFm_9AOc{DMQsq?r|#qxFF8&{6y~ChnyT7Yeu@mlY@g` zAsHd5XK&xg$WS{91@!7lR2~rARFlP1l;M-$0pA0T7nVx(&mi9NNvRq9glC3&&w2t) ztVR3+4BY19*}+PT$(6j_?bRp87Xb)TZqfKfW(67s8lMy72Oz#_p;!;q0y>3SGLGA0@5!*F@w?^)oN-yDlbYJ&*7DALP+z6J zs($eOs7k3geP0OzAoNXJ0;wXif^h+5cjhMORmJ(`{6-R}oR zLWB0gvsDh0p%x1R9pP3OEjVI}n|-)wSjjSxo6)e5Tn!@D)M4tIHWAa3kzW(~a8>l<$k ze)P{`4X-0tag#7M@0GhWZi4O6lp3~s@xc~HcX5yEE0+iwWJ||-n?zH~pV)Pv?SOdy zCRsou?7&(2$!h}sFuP+?C!r(Nv24H1A>)p;4BdQb_di+JVlL`UCknOL)jV>4-%wS8 z6d6^Q78I-=8=`>}A$Rwtg9E@P0JG!b45nRqTTa_@zlKT>hzPwjoA1l2KbHzzPhK${ zYIMkf>wLCglTWMUtZLcamh5d6@p)*{7vlZ`!4iQUhGvNl!Z)?76jztL#?!H2w<^rS zVpk$T4(~00orDAi?Cu9S@X_Is=ue%yncNCCYipC9Q^s0a%mmjVpyj_EH${TY}goz%)tYz5x8}!wcPP zwfhBww%RyfmA|Qbeyd^7&_wjsCfz_p`6J@2Y-hj@Mb#F)Az(ELip2( z&BVi6$bPp5+77Pto<)E*nj-D@bnADv|)`mMiy`9e)g>$^MWWUnc#pOU|M2)Dw7+g14X zu2xLkXZrNu-o1O4aJRkT6*$vH33(pBfj>;I=G}Nr_i%k}(S|dI7&F!oDwh_keS&Ye zU1v93q-|Rzt|y|VQvNPD7r?nCjdeY-?NM8$jxvSJ(YA5| zpgCGkM}=qqQ_x(_#q|uIh1}LSFLi9}_Te@l1$~innR5J2F0&y#NgF{z7+ek=Egc%t z3e(_#LS5dyXX_R~4!!03YS=8r#FUr&ZTGwu4iW=xedB$PAy_ zWhDg{@CaLpA7pXN65Xrc??FV5VrKY)LmOkHftwj)p zO>PG4&H*vdTC3Tn;Na`otKZ@%U;Kf_Mt$mZF>-}K%w4glI9eX6ZjcWGTH$i{bmuH$ zSG_}izRyg&PfJ@ox{m*H=j?(ypD0FskR3@9u{dVv)ObFi2m)*8ku}Wf<{MSW1DVqsz$eWEH6_maxn5@DrKW~?secXeqC{4BT(rjDUI3OJ8?#t z{XBaH^w`J<_V@ScBwd%lx=2Bi+eoeZEHBlTD3=45?M!*NxMEr=u0|8j)DB{w*0wf= zY5j4hZ@9hgXAl8hz_}AW^i1{@w8)6uQ*Q)|?bH@I8nmBCz zM8oIQ=JCcpYKs^1p#n&av=1Mux10R0W19UpQrjGLIkg$8&#*|vzVUJJLPpu}2Qw@4 z4pA7h!W-RdGIY?`P65WyJcK4YpUOd>=-N-g1fHU|^Sctf1i=orf5Gpno;{0uz$;Ei z0x6^>upUZp{q#vya)0fa-H5ELtn;7gt0rYe<&S4OI#I*l?K8EOwHmI(x7g z@k82wsi&HhAtOEO3@fR9Z+@)pb_!tv1_!xFZuY$_(SE3a+nF;~`EUlbiyz$FvPzgr zqO(Jt5B@F~w$&{)QlOsqZaQrz%y5r(9eVbBa%AE!K&4db6^)c(K%m1%eKSnvbsZ7e z#F#HXZS-|KM-n7rQ4~@*yS=^f$J*Yy#`*HR>hMrMBIH3RSZG^1oRX3fV1Bf9Ks!Qv z_m8va7-!+B#@E{xzMg_8v12+!$xNj=nf<`Oz@+IbpmHhF>L$+D_HPZrMJ*_ezfRWr)8n+K<&RO*a z>~t+o?`-+r+hD*AU`GZ&R)hFU<+!_6R3Vqg`t!hZna;{$NQhYVM(N4Iu5t~&fJ<6w z@%g6=34OLJdV|l#M?BJv%!&X;(c?4Fw??QUgEY+f7X0 z8SWY$-a8gHuSbEnzNgWjpgwg#WNd4waJKnyB5ZGUQ<#=9bkSSJ1_oQoFUdHPK7I?N zR>8^ri_T8%d2ukQ$8&7u>C)yvqTWpRCGMw`&frRI1`4$kri=r&(}QP6j4zibADn+m zy=O(F$@LGUWx(9C&a}wxG^W6G>-$&$_D>VWo>S5rx$YF`F4Xri@UYmIXh4$+mc|2| zp+>uiXO+4QbLRb8auY|RQB!a@CsKnfbUZCB8;D1bi~vQj1a#iu8o+~rC3QE0%oUzp zdBjd#fRzA{-(V3pbdM5TbFd$%j~^uKs?bSG@k{l==`eer*t;s|a^CM)-6o;)I-Uo! zmnt7Z)rME4M58ZDiSV4s9oKFx-)h{tO&YksR@S`N_G(*HvMH;$!qjWyyDEdvs%fCE ze-m`4Wu$_>7(NWa?;{FrddHsCID(ssiYd!+npS&%IOdQ_8=p_GB|p9*zaJH#jhu zm)C1hqEgcDjOeQ1R=5wnYgc|94=?oNQA9o`1raKBb~^{ap0a-=+)b#gs&e;m+dn!0 zn?-Wad2`Zk9m$bcDw~80pkEPSg!994+)x*kk%ak{S;e$?b_pjb(vT_zytC>ZGHum;(k7$q1 z{OJI1TY1vHFD(s3kf`wRKY#vwURpv74Y7v+U?1quLhY)4TIi{hgTl%XL6s=vaxkk0 zK2}zimUJW;y~4s$!a|}#!UE>ili-q{iPO$Ko1G8JtZXYu zue)v@9)}}1pv6o(;TyPPtGxJeGpH*$VS`=dPwPz6l>5#QvhUvdX>g^yr%tI-Ttdvi zo)^j8+?Oe74j!czZ#Sy9jRG$G?7qINF8j38{haHR)!r%7=`4`#sb%{^oUPS(e1%__ zc@WwNq+J5fuidNYIn6*lWoTL{uerHga%`Y*O(9RyqEw-%q1ibq$H&{zv9!MUtSdRc zJMM${b8%Ky)+xto=4iyCS3DDfGZ~Vl;uIN8H28)1R!BcAUPv}PETUMO^Jz^Zr7#QV zh6uv?NRbuXXXgP4Xu(xiWfc{b$xV<_6I;(rOLM{023H5@r*NPP5fFbE3@l)Bv9WP+ zad8YVF*3@0OwgUjo}W}P<6wKaf$sA4%yoP>xF*Yphu1T@X+z~qr)%>5L@RV<^kCHF zi=EV@*iLUF?bg%Y3FGX*KSt|&&xmsZu_AIdgzH$%BOQtr7Br#6fOd@oEX_S$kHplv z*lD|YN{}vJ!_Pu@xj-a^Oy=pq-N5Y*@{6RwBi$wGib6iPoCf>$Jq!U@PqAIS$G1aClgDDpfW*IWUdekK(ebmme^X%Q0xi{ybnI zC(*?m5B;1_RQ894lEFiN2(T@eCT318QI{AX)rlZsAGHWleQ(@}zqddd8KAy@BA3H{ z5NnViTsd4#s-%R0#4a#qApO=g&`#^=2Df>}UDR=_TMO9MsdB#FCymAU z56ik>!oLMvPD=XHeuBwYHR8stU{_h z;fr>-psGhd>QsnTiHa}kRB5V~kbWEmH^HSktVEPG4ho2VV-<8c>RP%mLD zT+3+&b3{FtoWFZ4+ch0sa6gtmyTBlXl~c<6WcXp)ub>wn>%SKR8Wb3UZQ*by)iP!U zld@3&QTz0UFjpdhp69CLAcHEUz)&z$szIK5@iW`F32TAnl@%(=2g|FgR+bhI@7@Z~ z_vC*MD&T$bb-bz2_nq1yaT#lAG;-Rbb`l- z>?Y73*5Y_(t8D9Ipkc0I$ieO1lcd#8Xx>vf8^;Lh#V}yC-p+VIS2w zzozV77LF^v((LtLba+K26RBP>lb&?kQkPt~%TE)xlCpWDUagjsdnQaVks0*i|I!D$ z*Y-i%3vAimtCe&_kb<`QWxAG<2x*X~Q>1krv%c@+P$SIEtyH4UMG!JK|M)FG{?|tZ MIThJbnYY3J1)8g==l}o! diff --git a/hls4ml/docs/img/reuse_factor_paper_fig_8.png b/hls4ml/docs/img/reuse_factor_paper_fig_8.png deleted file mode 100644 index 664a7548fd3a407ac54fa2204a909b84e5b9c2a2..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 82654 zcmeFZ2T)X5*EY(C;-CT|IgcnHA|N?~3W$h+NNO^OB%21w8B}sM5F{%|w1FmcgG9-a zZ9)T0Qjko8NKVb&2gh&b{l2<&?_c#--MUr()C_~i?j6@&>sf0*&k4V;p+a$n`3wmO z35Dt%r3WM=WH=HM(ieY{fhUFa%Fjqh{vuITx~b!tx;*aV`p7D6{?Ng&{ja;{U%Y*C zF5*q#lPFd}rc+TT;3v+Uyct$_r_#<|fai+s{iI;l7@n*vFJE4axTn$<@^oG)TkJFc ziP|ugpg+UDO`Shw`dsuyj@_T?X8x-jFEp;|=Huh>93>7^tbX+xw zJOKs*{`qXDRl@V@SCW|i1Q8@p zMH`vH_wK*g+uMVc+rG5un$IgP7Tn6FAbD~7xa~8XNTxC&(E3|iaya3kvYyN*KzztMxpN;o zC7)E>+|W?>{J*-tfAY6A43u^n&0#Mt_TYZp3|@2)4gU|1A~#yp89v!Fj%_hRQRyh zT$)H-Z?M=~)_)JAp|Qkkeb(02_RUW}0puSue=iLu@_1A+j($r<29tA*%=csdb0n2Z zMb!_N3M(dlrf;#pKlwsaI^HE{?(36PZcCXtIovaHemDaga`>m3zy9m*+isnO3)byC zM4Hxmcf!i7)$zg}lYSlt8As~@Q_VD*QLi=SrBzYRcW|M5DI{1pCiv0-iwU51i z0to^p%>4ZPYXrvt8ULgG$+p?x%5$CT(Mck&>o9~!##r|@SqcZCX~VhfIACNTUjYVuXg(;ohdxxZ6<6(P90O z1aatS&#c6Cp-*z*{p+cKqtzp}ArlXinw_agkGAm&3+kwa&O+~G#Q(*{;0mu(+CWSZhq9kA2HG4(=`GF-oQQP-e%8qAk{lDJi2 z>FO%Fmg+I))E0**$6#jNy5!{K?ABqm-flgc>Hd2Ql7#a9DRi+PqG!nxEPqU||5n-B zcKwlD@yBSdgSEElIBkt^0%lZT$saN#TLdDyjU&L0X?)c;Yd&YNK5MHXVhs$jZUz#ql`L##o<>k$KMu_-%*_i0X#l^~noRkaQ9~mtW zg9W;4trB%Y#g@!x%_Js$T};=jzrRwOax3;RG2I_=A4N=0MGE%770zOzX$|}D6SPmv za~$ooh5czk@kZj`#bw>{?4VVQ9iSqL_RQhw)a&2<0p@Z%4KQ~Q%g{ykE? zZm?=_4%aD7XxiYW>&cayI3t>FLZ=z=XZd!SEjCk0%ZZW61 zG>=wj&^v?0{&aKcJ`OcA{En(d{%cb$YmsILoJ*;5!brqerO?)hO-8w;^yYx}G+6dX zn{+R3afdNRI1U-EaJ1W7;=B9P6PRlT=1`)3w|nhiz1JpfXm>6nW37h6^~=q*K#qgI z_V)LuH3Rmv$2_N_oz_|yR7^}vT4(Bi1Pgw5lc$epUnK-mN?)0gAO?w<*8zv~%g24U zd>4vtujkl$N9A{=sA{pm6IcPX4kb?0t)2_niG7&$Xh+{m1Xe;>ue{UI;kM?-)kITo z-nyL@!PcpkNNwDxRiepMFoWqE=h~0w;QA(Rx`xKf1VZ=6*sZOtazA6|b}ns!fPF$m z+bb@$2MLp4Gc9L2;tjUT9=1f357~p&Z~{kTCUyE&>EpqIxb^BLN||QMSh2a)D4C^RVJ#_pLNOfmFtDXdZZz3#Sx?Kktt zzbqYl7s{UOslc)KCG>)c2z0Q0HfF&Rvp_y0^AKtLQJ7zY-F?_1d@iXuM?K|XQwT+w zG}uqkEEo1N>+a;dq9P3`g&|`Xb-Z&U^K~;2A%4Xh;Du{tdlZfinnxwU@zlnRdoLB1 zP2`J@gix?NT&>;K7-|8)j&BiC+yVR5lz z>5ZYGVIM|FR5Wf#yi{s$K8vPS@C*|L1%-(CWO#CN@`EjxMsl7<#v1?K43*cmD(dBc zsl2g0w8`)jCuju$_vY0*UD_$2meK}{J29%W#|q}No-&zQI!4@I&G1zCpkExb>c3rW z)LMTyT5n~%@|2YF&Pbc74G!qZP|3+pY7!YWA_m&ro^;P)K>a-4G&_(zCJ#o_~px&+VLo4`Sn zW1HC~jo5nS>QJPkIXoFrx$@(*a2WyGVQ#7I*2IS+=%wk|P>Gh?(fln^73}nE9_h@1 znn!iR+9S7q4R~ig<0#eh5SYD;)i~s6ODvi~?7G1b_EPJ>&ejRt~#p3 zd*(Da5G^rR(gTRQ%}bj8QQrirmoF$PD)uw)uhs{2U}9u{evaBt1uHwB72CQs>ZmZ| zhU(XlY3a*W%PK1qfudy1EH(97@6kaoQxo2`XY%`JsX@mo^)@^jZn?QQSSFqBw>83) zvT9(H7Vj)TT;n(rnwwS+{@a5*m5RTBqxUpVVa1)qIMFmEe@b}#5XKk(i5rUl{Po4t z6To}-;@R$Lr%eOjKurz2mt&v5ibH{xt}dNgqChkU@ki*|M!)7MYV7*>(EuRu3n;i;wS*bHeTFDNC0rP0_bDIlzZP-NLfzcQ zAP1$l40?~xA*GVN4rW}>KMk5SD9NRzZZxd3K;)*mC!?=yK$bQwKPQb;RvH7Ipal4& z8L=bCn7)N_uuF_RX^I;?+-Xyg|6qX_uX6KBOL5-*{#vsYEbuF5%BrfW3x#|j00u(= zuqz*Ffp~TbV{Adl*OX|{ZVTYV5l36j3ZSgc!q3Vb&HyYX%_k8L|dqUNI2 zc?b41XhK{^B|TevCSwB*reVP7P1|%=1S-EiH=u}zHW*&?F=sfG{6+k)6|8B*%Tn2U1*1>V z5B~(?<44WV;AXF)$l=+%87T4V*I#UTZ*FdOchm%-59(q*L7)D)ms6X72JUUpPmRt4ejT`EA^j}Yj+k_l!f)~JpXOcT_0iX@J@lUKN zda#t-X9z>EexibFyqm%Tqnidqt+0QKOrUfGX z6z2KyRD*-iB~GoyrV*n=@uL$)7cCH&6DcUYWUwLkZDDXYT+ImJkJSvEF?2{785xI# zbG*mqG-L>IrS;mHa$?1YHS1}s_h@nKKFcMQ+Y>$&Q2?mP79S$b>gBia@Zz|`fx*G- zYyG36DdLH%eycuS1e-1hOvSBl|7fDpgv%tc`fXs#*pbZ4%xBW)Ad_P-e$r6hYL}aq zAf5~S1V7?qLqh|TWEM%83Y2Zk4nzf^y)Fz}Uq*KKe-fv$K-7_8lKzC#kd7)|3ZDFU z5gE^IQ7B5wjMFPJPQnl+>eixb=GH)OZ4e49O@kNLicgnT6EH>{&JZqI$7|*ciD<)Q z*1JTs6QIDiE?)#v7YQpC$AJA&ZQ=pIMbY3`zQ5)&|1Na4HlsU5qRj}{d>kRw4QJrx zXmfSu0`APq1(zJ!RrZsgmH!@Q2wL?SV*E8rgrBEZ5*8)idt&rn2zr-(*jgAF9j!*z zbL|RbTdFWVir^5m+Y+vYQfB!@DRFdS3j;vU0a2@+$tLw>Y4lLt7J6;Dtk>Gu{U@6Z zSn9U&?brNB6W_HK-p*wJB)r(PmAqm5pH9oaz;r1b_A6vxD|^)b$`cItC%zYg*X5Q~ zyR#t6Sk(ye+rN_5r2jNf0iY;FQdlIs+p%mqx5WnbHDo< zrN?}R0(ecc!$tjm%$nfQPr>k^dVr$)R`j4X%QYsxv5Zk2oh;D(;Jh*ij5HRCQ{hec zagWI~%vf>2;Zk!O9rfBXD&N1X4y`&9viV&=w(*`Y_eGIP{lLggQ+7>BM^`r+c~s<4 z%UA3*>kOc%3Iahu#`tn9NcI25ich7)QY;F`PK+W$(85&qWlXpZIaIebEXBc`&mCq| z?T+}BkAYwC!hwN-5CCl}wd$OzoQ{bE{VJ=ds6a`nK}D@ye+~q`rP)9?W(1s$f4R5H{PW>;%0FyPrB#B z4mW|1-(QtWt%6iaNLE&MMp|OrRU6Oh5{wQD17Is|2BaSqokvbAE5kY^P;@d#b^jnkZSDR9g=&x$OuY@H?2s;|0!iK4CsL~S_AcwQ-4Ns- z_S+sWE2{35-){z|Gr=VW9CJr}6-ep4*ld80=-BgFt@XgofRTrUgeVn{4$m(ycfsql zsc|{mZevg`0;}|T%X-W&&CPv^?!sBrj+x`S-A+-(b3WaVPPhlkzCV!Zpnp@j+hEvPZ| zHih$~zge2$Bn4@m;ICi>z&qdOC-Gbh!~^rujphLlwz%g2Jj~D!&k7^CDD1PDaYdC= z5#^PlgTqd3Y||4ytI=U$a~T2hF_KeZ%?)bOV`=#xS+VNV2T-;he1ErdZ*?dWz4|FL7#TG^JJoC9Y|e*9 zVZcer8FMOyazn_mv?&ms4uE;astMMLQc8V)5#<6o7;U19va&KO@YKgHD1we}ie+-= zHmle7mrPkEnDaUj7Kq)SGf?RUfj~Gx_`KV3p)cErSW{TY53-`k8L`RyKjFavY}>#` zwrHua#Y_Il29j)GY99CCW{D>529OOIRMdBik2Zr~iJ94^00@7irqNKTIa08WG2`f9 z&Jy?#bzft(glpjt1X|mfj-7M62PO27-G`G}sjI8IdU$9q9SHriau+QM@0~Cr4?@49 zu;)6DyDN^0zX3~O9lpztRA+_oU{qApBjeRRHNkG_NkvMj%TVx-TB1C6t~q65Bd6Sr zxHnX9ZCpnSFw1n3Y<*oF!1sv*pkh|G&~Ta~>a1YRbxOI7FP?KUlfP^gz$2Fd9Imyb zjhzL8l2ZV52zn~8h}(xP71ztxfh4<)oG0}>$Kkxn^yf>lQNWkC#QJY!c`oMX_TiY= z*wmR+U*743o%RTl~Dj5Xt`K)k%zY@zFi%Te#2& z_P&mbNPe?AS!|WQsh1VL3aY+M`Dqdf`Pv#%T+_~tcuC+*uGm0DkuF6pHr(Bv7&Kfn zZMypSx6DbjUZV%n4_W;=HrTbzF!`PZ!qZa{Tg9hebkAt}XQ!ZM%_=y&VpijtprFT; zNP}wy_1qg$P%+q?{Q~Z;Y+k%4jBn5KFEvSI^eHs%A+aYJm0;}*^XX5Si1Qqun3yQ{ zn($rk4j;;3#esM!EWm?|24DYGz&_q4&-;8>l-`A#g-T6^swdGGDBy|bQkhmqSI4>N za8Q+++f+guRt;*v7m5XuGsng8|JszPn}yAdhN`E~7tX@D=mzdyLz)Obv6dATK2J>z zasbc*rRuR>dPhyH9Ihve3>GNi!I8q=Y=Ih_vjdX=;%_ zr7aCA2N4$rdUgiYoJJ?bTXMQvA%r-0Xrm= z7oD_@+PZsxbZ0oLMSdf7RvHk{&;6M+yAmX^JKRaL?jDA4* z*mCw7GF}UOP~W03^*R2{$1`x%=Xg**b!aqHR15^ezyAm_gzLH1GQV0}vM4+kh<`Ie z37=scXzg-`b@Kc@u37@Z5!&S6Js^Y8{p{f~M~C^Pe?GRf=10amfsTwoN6`1r!28J` zTLz)WEC^6d5$_LGe=kq;_G>Dzftu&PhZ+@%)_;z7iLKXJR_q)FfATsHdUl1SP~Ajy z-3vv>$Y`dyL$jQrga_<)n)PkDv~wkbwtmiZ_Tw19kY>|=Z3NR7>pjY zc_oqEO>f0>zMwG6_)Vwvrgc8>P!!qy=u8bFUhWGEWsQ{@CR?}hAkja%se?)RRU!v9(gl=MaQ^z`hEulh3poLvG6rT`9ydDJeM@OBdF zkBDXTIH+cG4E)&KK(r5&S*;OS0e%eJ;_zI^NVNY~w|7}_vtZj_OCtH-z7^-TgBjhq ziMZE?5@)!Hh(aY?1N9MBr)KK#p26>H4OITu`MbIw!6L(=>Ziq>T;1Kd@yXCsF61BCcT5SpZFnI3gk}45~7N zA`DdW=Sg#Yg}SZExPpw0P>y_v&5W>J=H0TtJAR{l@Sf+YD`l>rurO^PDXGx)>*oUl zo3zO~zx9x3%<1xDB~tp?MFyzdj3)NQu;P+l&+2URJDhyiv9V4Os0$$rY4B3GMdUbR2_wfEw646Zj*0K|baLZrobHjnJVdNOr9s z?Kd)iq)^d6gxx=nbM9org;c*%zF<+P6QyLP=C)`vb&kzL)$Htj?qARy0ctmr;6N~f zvFTV^sNYwUpvT?42)Q6os=O_bMT*cWn=u4Ur8J;$gXiPv=(M!7sObg^u?ip7{gr!A zIF7=>;;#HmVdkpY02s@PT|_~9MmX@I5l;L!lAvpgr&61Z{VdZltA(atz_#TKYMyzl zUHSb8aeg4@i`O=T+Oa2wx)aX|9LMU&tsVFG5v4~f)r3DGGR8&`&m>PXCyD$+8*+%p zSH0j0)N;%=>Ks+;4Kqzww{m3{MRlDVle3M64vqK&A=jFL~Kt3(pKjf14-RM&oda zDnp;<6OQWY%fy%~SOctdbcb#$x~Obu_!Dl2W>#cbHI~w88R*iuQSf82GH+&TdRxVW zlwmA{*qGTI+sBV1TB{1yTnx3?Hq`}>w$;1YunHD|S)}7w>;yZ3K)_;07+!1l%-;{# zs1r_{Ag+CkqTE*2H*yih4c%6Ea~l}IDf?Nfm3wTrJt%-4tzA@A@}j;!4^p_`@#Czz5;19Svc4F;*WV^$_=oQJ^nMwB36{o3bZ|&X zS6Nv{M@+@>If6?FKS__KD^3kvoSkP|iA+d31222R-+U*)6%$h%E1eavyl(O6ud31H z%(|S%oga&f3S(!t(#v~aV87MP+pC*w&1@U{xKe#ze{g*Wo}97TbR)y;=ZY z%6ykCSGB*sdo6XhS8tZ%pBq4WJ=jv@Y{x+zBZqx4lL8f*O(@Rp-Am%UZQfnl?1Gf4 zhu7yMJ~d&7Ce9DiNn!N0*eD20b>|A`RMZv6;T}<19vTD9SJuZe}eT)@*7TMw5zgW5B&YbD(;QQZ#~N%BH0IC zCN}5V?reC7TGzT?EnvyYQc%0x&V>8<=e$O^1x*UaaBaLp&H;gtDWqY*90F#Gg9nBR1*AhbZ|~tV1)5fy5KM#W zSP@ogA?836b*Ql(0#i;vgRjVIQ@%JYO5@%xqq-xDw3l6^7-`%8N6z*xf5b4sc|U(L)Z zJc@s#qhV1fj;}IoTdnLAzPc!rS@FT?mTa&1<4m+d=V8WSlA@`ym9CDib=V}G7`9Jl zkWaopqp<53f|J}oZxQYLCwyck;)-J0=Cs+0-hh(J&<$@!#+BHj1bklV`SZ!%!hT7z z%zlM5TNQbn1uXbXAM+{8p4*m{^)h(of(ND+ayRA4uTP?4Vp`{am~Xh>t0K)UklO3h=wl5*hb7L5OEW9-zI-p` zt#Z5KU_=R&(jy}Dd(?sXV3a+M?ng!4R*Gu#AHc4D$SNw4G91Nc7S_s~IaICuwzBqF zM{AUQTC*y(0Yxto!2iC^VLGc6BPH!~+RW?Z{GD@~9?I)F9)Ws;%4#-_=g!48&(GVQ zquNr|R~rxNEmGDR8BUY(r*u@@4Qy%k zKW&L!D;%^i7-I?T z$xX4be%8U)oT~U`>%Wi`?gQDkU#8o~MlMIeJ~f@GvrRMPYj6UQwFyvqMKDn1k2~cYU9H~n&)O#qPdAF3Wt@|ux5U-|9AGL+iW?H#$6&hN z*1G#1(NVnI!=<+O>d~M0$9o2$mz{&rIpiKDI^wP~JAN5hMkXdNt9yG>UX>z?74{#% z{oZF!C4OClSqVjuEX_#9yo;$KAF?iz*?eG@RaHLZD-R6ZcK+^pAF|Ob_eN>Eb-wG! z=nYYgN+X2x^(5y9Sov?My1A2vlDtS=o5RB5gXq>9qdSfmUGYbIg+)Klz>|8uRbuDC zUNGpQAH{0%e!C{;o9jzC$aTPacem?9R8CxP4by?W)y;pX5{j@+fee+Kbq6W71guE3 zO@FkjCJ#7QIpE*!dd%5YG z`^HtpaIw22e@J_)OHWyyKt%-YY@4BVsrT%A#Od9;&; zj6>;7CQW0Xs#2(UOz(Wmb6Dtfp3{>GMlaWEOJ-T{&tNnAIq1}D?OamB|Hpcf;hmM^ z^rOxxdHA>t4C%~DCJHL_Aq8h-zN*Gr(cnr2OqNjz`^i@xz&bC9Z}u8OA-^EMs8{xO zh-F$l3OB?WZ#jxY#%@j2%`c@qc2#N$f@fp0Z4P?(_ARR~~lf9I&6V@8!aWa~6Q^ir=r(Aqc1))1@4vyW+pLx~k;T}lc3Z==< zwox&&3f94>1nJS#A*F>-^h9`lpm@hI^%i8E)Jb%l0w z7hhckwd92u^j^<5J3n`>-JRr~|FH+P>NF>m*kkc2wvSMPRrr)l@tK#_@z?ep|6))lYX&Ons*eH=jO>cwohW;~vj` z<6=ZJdwoEGnAhD9E;xVAA5(h$A7)Rw;?&~_PBBit z(!~_L60#;(25=O&!?hWw$5L}!9b4WhAGo_LW>+Zh3iUSO@0rpF@;!hV>Co}}eZ16c zYt|Wb=K&r6;cNOUR{(1$cdhzUL6+OkEs?u&Hc6(TV7y-KhgBLXyP;_8e3B=FO`nWI zsB~r#=enD47FfJL|5?0ipoqyxps8alwq0)j8BDO1JdOi~Lq(oA=p9gIdvqVtO?@qUw%M(k&R@IEr2fKT4}hBY&xH(&lxvl4gHU+a#{M zwOPGq8JN_we~$H8KBRh8S!Sj<{_u*MYOUMenI+B#o!^sjbx04rs11!8 z?L2O7g+lxH6<&>r{gIhf`92+!t}JYeF*)QykG0f|?m1lvP8qU5gcOH|-ML_XyNacC zbupRqGM5yt-2#El?693@P33$AGEq1osJi{C@aeEyO@@X~kx_fRbF@NZqZhMynQ_*OhF7d%k&ICwki?P{=0#$N`SKJhnZ<<;vZ?$9QLXJqSIk5HGwy;sjsM}J zskHNv^evekc-xiv@VaLWjI69-#EBwXc)@t-JzclWox}+{YAaDPJU3ZTL|&0&cuzW! zk(E6hlzkZ4m|`00R_1Ms)WWTkR;QT4CuSGp)=dri84-|gzRsu^7%22*h>eY(LQ8JV zDToe@#VHu+^3Qs~QWoHxoLWy+$>|~=YFVid3`!^|DjIdLJpxvue@n^I$90>c^~dKs zQBmhwZkXwpsTd)YJ5mc+4718|1wU+%0g1zg09pH>2?+-$Bgpt+M)rc9@+HQ7f!?nf#lDajZ}dWanzWLHPW^A1_) zTITdNCK2te>i$WZE#C#B%T>NvG0~wB*de)#RgWUBWRYU!*EFIpj~3d+Oz_t4vPGoN zCX;*6%`gJ+JoZ}rLy{OcAyH;D;V14b3QuQ&WF{yZT%EXSmy7H1NXIDaMq|ZvJ(I#w z^wdIQ429_So~51C96;R^M|OWf-`@bwFC7xJ%;JA+nwDR=x-=@%HXdfMV{aw@uYr97 zm${4!u`wZw*VYO+A16y&nqTJhPH`Sfqouw?ik12nlasGvH6ZHV9H)7QWzzhdD_Y3} zCz!{VSy04PMds{)e6lW&Y&mB}(|)3B-dtaFNH2S(40xU#7Tja}Lvz{VzxDQ z^G5lt56a?q??N>x5`L~F<%V93Yrg@oAJJA`9;R^~Z6)}5lWnuNZL z(h&;zv10>_(b3?-ckBX!%_QIDQApQ;*vrqKpPZwhX!-GkSZrqqV;TK^L({cfTv^f2 z1l$PSHt|ywMY}6`6nTpo-6qQ_uD)bwl$@1cBE-y+lKC;4A7AZlY3@nSVQJ`arjq_{ zH^}~7J<65UT(m{S#cx|`YnPW}RBSW@z8`8yLg|vsFCX^ey#*nNMk5Tma&0}q!2@{I zCn(_ei8*KBBxl2ad6CO@Fp(%je2PHvZjM6xrWg@C&~f{c-Gn6KqJ% zT>hmBxFXlc{Pb5#cYcxj?|XxY`wJ?)_$3T@yK&A#Ovwr=Bj_k87oy0n-TUoh{~OEy ze_3h(=%FW$+X6d#`PW$g&!hjd69300en~j&fTV!RZ(cD(C@Tw%Mh_>7liXnbbvA)& z#_yV8`RD7_t^-3u4Eud#BzG?T7JmG1m+$;9AM8`A%(%s+rRkdW9ufMT&vV{?kX#Y_ z?bY6W2aox-ba?*&ObO^d>$emr4E6s)zw`Hn?(b^S-*-;GE7QNPtRa!o|90un|L?lK z+f&RwMjNY8@o~8SifaAtQb2Q6B97LFnKwx6U(%?z-&1|2)tokTm!{1EDsGymd`~r^ ziWD68wl&^2ON{@UJ^lO2{^={1|7WW8KOYtmLXz9(bBCZGklTZLK8R6&%eU@33kQom zBUmLh8kUfz{KwYErP^B{-krwSTNJ{csl)ixPqTZPFIPoFGcQtvQxg z#id>bA11?qpGBY@z2vJ`uRiEG@~Gb*|L-64ZN%R?Yrh+W{&>tX?%+W}a)_djv&nI5 zfV12KIJ3X+8A;+&k6EsS_PIzu+A>RNKs!Bd9dK?*NlERB_07yCkzHy4mHZ+}+-T>$ z3<#5!MW!5+)t1Clm|Kl_`%~h8;uI1ePe7nw!aX?yf`f~HMRoYOf!ofFBplx`7 z`3Tr&usUBah?8Y7G!6s=Q-EvQDb)rDe-eoC^z`(am@+h4%<8aqdqM^xMFKv6V&)LS z9^A^#9ut_3eTem^WHI3wd)m&*}RNrPm^KMC%pm- z0widORMUb@i zcgzm1Y1HTE+g{|GuKc*;@&?edgph@>5>9$S)6}6O{o?%maKI#5v-1W_u(nIFvJaN~{~$Tr3bbrA zX=!O|`I>%LfpTc>`|2Td2mK{fo)%K{9AD+i0?t*7JjJIIoX8o5;hxI1X=N(IY(R85 zjUkn<3{VY5gH}XL0dd%Iw^wEvrFhK`g!|geY5pFbDDGAQCP907xmTW_m~Q_$lC&Oib6) zBA}orV!_Wz1RTjWpa)~**!`?BwF!1BHMHE=vSQplGAAde^5AFZVW&IEo0}i>3M0-6 zRQ@IZ^JOfpE6?7&SV&vw1ZW*?fEr@y=~+3POa7)U2rWxGstX4jV61!aN$(c$AvL=u zG6_1c6iE&F-S41u++I16ooJfuRu{_Zd`EU`*pg|84KPe4Y8TY)mJYxOx+{HoL(uJQ zB3}LgV|aB7@&d$h#pbLyG8$Z{#Os?TZCg@B$N^p8P%|(B2A}{@EYZA35|ap93xG-RK=ts$#zG9l&BB94eRi7t%9h=! zr>`(Tga&B6QP7pV^3i_vi;{8o(H7p^)C*uijY>3muWQD+!+c71Ts|`mr$=Zz?tl?x zS|EU>cQdteazdhua<>XV?LAh3x(yW?F+e=njw-X=4*h=(#k>&@9Y=^#jIBSPQs@L2 z&~0FFz$nj=aLr&8{n_1xVX4rk4)1e*AgmfxTIxdBK zZ%HoYiwZ9mT-|oq8)HZ1HCTk{cidHgOuzQlUFs{q``*#C?RT z4rrfRfv_s_P?*F@3C36UR`0n5p|gUGeu(1mI_1t+VmPrb`2%VzN1~Q@t*W-R)^5E9 zNC#+&6{L`2Vq#W6{|Ed@!-4bWpgzM!+zW`{-6DVTX-!u(%DrD*q6N@Fh4_9dHp6~% z0nA_o;NsW7Fa=|PGFNgg)g==Mzly3?s^(W$V}Z0phTVJq-Ld8q5VMVDfmi8;q?hy% z3;G=3l>%kK6hs~I1(eZn$aOegQR$$zo3^eH{rB^9%8xAq(_q+cA{@8cNmIPTyz1{^QS3`}56V`m|rdDo^4r zr5n)=ebOvvH@{)R`5YI)to*Jj{qtiw{lF!yo7t6r(YcT0J#C15%Ia3j`JC)y=G&9+ zMDNGke|va9e2Ph8b$Kt6^nSO-tavWgY+ubT_doQ80*@pxft1=h%eI(7AGa`|Cz%1_^aot7(!1YZ zLqaGuVv{CSnN9C)3yXJGmqu!RJb(oG!KyAN=Yz|y8w`t1KBf?6LaLs6Ou>k>;)d}F z2w0nslYfJ8P!dWtO) z?NCa#>(*UwkgRuJm)c@ED~*e+DNrVW#JmHX<)$eeCnu+}xZ^|=;Hy^}41JU;qlA4>F7(|@5?lpJp&!GUyV6j^L(6Bq0dne%^>GQ-sE1|e0?!!Ns^NTbX z9>7Qf#W?#qfw(K-M@pJFB01BlX04TlypvJZ6Lp)8hB}{zClF0e!~I?^ZvYL~htS{f zN9_p(%F+>l@Iru~go7|zp$Ru~T^u0i@ZWhi@!_VD(&bK|9vMJ-yi9ZLQ}qOTq7omE znCSG@9Y9eRa-IKTeaarhVLFWCWYKoPd1}=@VOZzjjlyRM4at2s3jYX1cb$Y&h6Xkm zQsS#U1i?PxhhL3H0&X>H03aI*`R+W4ViY=|aPT8=c?Lui%ho6+Pk=oj(f-5vFYm5g zz1j%)@=rltWWtr}REO^$f1y!-szVjcH(~F%lD=imSR` znzsm}LjDJyz*lhPWo zvt2KZSAa4%ns~TB4J?{rJO)JcGeDtwN_lmhF!r&`oXMfW2rKPVI}s%Pz2>>qCOk>=70c-bj(2lFe~Hq6!-@L0^lCj zfHc5{oeSgD9*yFYWj3)LgBPgt`D~t}?_UW5jCkd>PcPdaHG&*Kv$nH041COTV;*Y< zOdY7o3l6iXsHtBXy01>~`@9vXOX{|+0UP68{5%JA5|rXtCsjzWyv}%EOY7bC)$`QZ z@2yd|2Pe>EkICSc3LHcjNwGE%zJ^1UHxc<`Q)Fn>dJX zcZ61pYkr=QcAbBlmjQNg8sOIl1Da~!g)*T);c^e<#~yI7R-WJnbWB|K@Q^o3aRg^h z*gmj*C*azz)}g?Or8UCXW2+CM_JL^m3CNdtCrthJx0H2t<0oclsHy)T!Pt*Hm-pMd zQkTwjj%+O8=n(K}u|9Odv*6UD3QEGWf@f1}Hty$MtC*O!gTZFw$VYqDs!fefhwKr$BXN z+4o-cm6(k*Y=1Vzk@mDnNuf!#`wmc)Sq*Lhd&wP-Z7~0YvS@np_0u29qgmPc;0+7I z0AHQ)vTAaGttXlZl2poQt&GlztH5rRmtt8JOeksAUg9*#9@WYTVn?`=mn=dq+f zrt+oQm6Nl>1%zjjYxI#*0pGWlhN!?B4sg}J<24{I+tr3TEDx7>Cu%6gy_IUfQ>@~q zGR(?rZWP{hc`k+qW`7QsT$gecrY3$~?qTzv27W*`+hASB<(tu9=x~P~X#)J9>d;4! z*?~RR?K3YdDxw53I<=i1Y8BRJKtNTz1E6`dW8E$pzyxgIg)*VQZ_(zadp~ZRGeSQO z22tOveD)q0{4^!&CrC$+(g5Nq29PyIEiOK0m3%@nTH)9ht3V9n)z10){H(wo7!l~O zukFgufLCy2A9yqa@!_+Mj=LaIT*&a><2ChO;2I`uz0cr~^DfUMVEe%QNP&J(SGIUnfjorZ%Wt0j9~6dbhL&Yr8^p1{^Fl~=hFBFwMnc`PU?#pRxO z_H=4HB;9*4KVnj=?*U@6jtz+7E2ez%fJd7*k*0omL%P9G@h!&ArgmZmel^Wp{R8A- z#TLvdZZyhi%=M&Mb(dybx46|u$ah2KY^-@Z&-kF|kBBHB&Z1qd+ZTY&8|i6E;2KGX zD;&)lodI_E7=&;IWMd1kti1aZ6^&|7RSS~8;Sfz2|sokW7uRKu8e)mJiM17FK3Z~emr;v==cjCMPGK9Y7Qsg{{eCa zZ1puxPE`XIq&o0{So8bU?u6?uv-iM47{?fcZM?H3O1@S@g=gVz5cn zpq{O&U}k1U72Edur->nOBMs))Eq04*w~}k3-`=k~4KMlD1^Ft2PFS@&KlFZvp!0@efk6&nt64?iKIW1q|h{Z`c$ND531wYE#5x zWi054Gg#5mrMN6$l>6JOL*;{mgZYLoE=ZGVdSHdFjZVUj6SB2xmpEU=#W&!im_%qP zPJs71oP`e+4OTBBw}GKu@fLjqsO^kJ)YMw7T`3Zp*v(1v>py0`U4S-iy;s2uD~=u| zoEr+e!uOdR_mlrV{4X9@r_Nj4(}v_4bN<2CUf$a)(WRxrKorgJCs`n@^bxr%yJq9Q zHT0gQ?&oMW8TZR4Pp8Rvv~Dc)uK~TJIZ)sY*LamJ6F5T}@C`uZD>>2`7Qop6Dh|BT z+WZqBw$Mzp`*iT;zCZBg4n*&?r2<4coDdko{MG22GoEz9Bl|lPMWM zSaW_e;5!~R80H+ikngA9Bu~1(p2>eef=WtO z$-kENW!#tVc+y&EX}z41=GIJ4y{}TBpws&ZHL8G7$S~Cb;IVaY_7y-V*@C(-O66fKNmJ7-X#T!zEeEOAoeJ#=%>EZiB-i z1yE0E=036^@br?2F!jvGM-7H`ryoc+5Fjsa@OfvBKgdZyslc>xn?cYNSs4QaQkSpU{QTaos*uudPDpj> zQQbifSNFw5C~=aP3AM#?WZ+7BWY_OCI6G7=a1SopFEkheKnkh?7q)^Mo;)Gr=J-QZ z>EmVib2M-*AP%~|@VjW9hx5JLDE4760+jiF&CD9aWz{1?Azc}e`=Jn{;$UJ10DJ3g zP^|(Ov;a-4WY|3VS@L~*RF227Js+4;kZdiFkXDSQo%d~#C>x0ir{NLrL3)A0!l@$@ z!m;~@3NIbrjL2*r1Jo8!uXhlMpippSX9dLJn8W?;T-{E9`me(D3fcs$g<*WWysA}4 z!25> z#O6&{&DESbOaB%Cg3^k8@WzO2k9Oed*$-VgNYS+Hd%os4kZWvE;+%EW)hR%x@C=)! zG7Tbj>(f6-uY3M5m)-dq2>zwIZ)C)`<7;BVmWj2sj67O>ybBHu^v0H~%1TCXdm+Q_ z`Sp)`$)e=-#Jvw!J8e+bpH{}JtpXH?LLs06%X;hGeM5etoF7y+>5~Vp4ZQD4YlEB~ z=s(XvAh@)&w49x82MDIPC~$ov{%aR)3OuT^frOb1OlYtioOMOC_YBZ82|@Mi?6C?U zjO5CArK}{Sx^0dCHaicns-hShFZs}!vu8I262rm_!yvb{wHX@Ar85M^_`x>kE2wMb zLQ4=jTUccIHla~P;fN0ou--Y`9da^wN655>!lCQPHsY>P0V1{CoI-MEqHt&XbM|BF zGA6}P%9eqAZ7aF%sTi4+FZTNYP7MsL8`3!f6MG_^##>>KC}48kZBeJT^D^h%l$D0$ zSNB5$gbLZXVuO9bOSCqiQ8|x+ky@E~+>T;==kbxh50vH~g~C84@OhJ~)LLcp%*a{*h)M+?cuaWZrf1Iv@3j39 zh6X47!K!n3*cBBnpSGI?;!nQbsO>~xft38>K3fI!CNho|RV5`}A=o7@MkFsw47@)l z+WOs#ZfiBXjY}XpNyLVGgNqI+2JH|uwR!aaMc1D|QyIN&z_`rHoFRlVWsZ!Q$vhM) z9YjYm9#aaL%ajP2MN(!tGG)lvQIa{+F_Ti6WtQ=OKl=UN|NDO5`m9!KwW@RWv!A{1 zec#u8UDpp@{@C(&M5M=If6#G z4>RYpxm2qbUs%(H_1GmhFKoIy4MdS|QABif0+pVNX2UTL-b1Y35S2NP*c5Im4nq=o zG_j!clrj#&rIt-q!=52;7%jKEqMs&hkSGmjWVXMw3`*$y`781$j3H##NIjR6ljA0P zy6k`*vFqa%w)^u97u5aEaX`0XQ3kBPIww3*^Z?&ZU3EAG=M4v>YFMu)>gsQLc~PV9 z@12806PuO~sm-+*`ZYXcd_7Rdo9^ZY9v=84>#3_>{Pk3#>zPMX>97-$aMzC2tw7%1 zPCYo`GcQu#&dZf4{OP~%xy!NQc*AI|mur-ubMF|s2rTo4ab|T8MXZO*Y);9zobCcR zIiaN!j&q)EOHfNS*+CJQcKw7@0T!zcrO3(3yO4m1!{TcQjt)2Du?VVw1H%kY)ejDa zk&rvj8{M|t{h53VEuChwm<9ES$-nnKcE8V{?5zm+8(lgo8GfaM6W>+*0qCZwpfMr; zZ=@gpJ5teutpP}uIzLAIBE@iPMPE}yMKV*8-fd>Q!Lu#_GI-YaSr}nM>&J57&-p1S7cqU=gv({ezCwKZb<0}A1)BZP51jQ(OD&Oj`7_Fnfjr^a-NR#(+Y1+#0UAoxp0dhSKurp*5w(ELvQAgPq^qa?(WbD75(b(r1 z&1em(*j=i<^5;3SZ&rL1!96F)<^_b3phLi)s855bT~IgWDKVA1%79c#;9|ufXeuvPD20cYUDphjdLKx#4>JEdWy175#kZ8HL+BxLD1bF$ z*;`%+q_q<{^be!hWHJHZirX!OxEOJmnF0Y66ib^hGN&tS0uV>yp~^jD&NM9z%Jt1?f}p#-#-1}C-fpr_D;F;bg*k-5xSpo3!S4jafx)PUhIn%*IiEf3HJ@(g@-DjUcd$o1M6^1`hdkxZl`u`jPkAH9g zLJ?xa@4dVa`y@%sd=WN+-rv95Uq7M6&`^!W?L!}p64qDs0vhp=pvc&Tn%1V6nE>fx z1T8Z&GoSPOg~a69oad?XJ&e?O^D?GEkg!FKriITV1 zB~)A&XLzssr6L{m&y|7TP4vhe?Z)6f>SMZ9R85EHAhFM)23aLw7~ni zb!2h)(DH@?0ky9su@D5+jZ+F4j6j`vFRjc$ddhA0O*2zGVVG+1)hk zq?=q3h z)jQJVZ{Rlq0XXqV)>T)Rvo6up6+`I-%DOa#PzY4rfJ)$Qhj_~?s6{AZ{GU3zXUML@ zM#*M;dK$c55!Ct)83s0aX5iOxIEs)gVP>*X8CrjYh{uhqmM008c2S+(`*v0AU<@BV{vH$4WfzkwV-3dc(Kldv5n*scSJ*KIWi=PhckEgS2oH3sbSY;hl|fJXWhHWs#2s?2E_|DF4% zqBNTyHN##GgZjtJ=oYvw#b+vAzW9>?C zbRxiJ_gwZsH-OeLaFhv+go*S#w|iM&_Kax~kb5K1uL`Kz?-4>gS5m6R+#~Te&vd_M z_@-kOSHE!kBtzaJha~?z#mq;0T|dqWPP?*$V@SnsjznHW{Bf zgG(Ix_%!C<`R{r@WtTVcRB@;reVON#@)lMhGuBz^gRh!Lw#95Xj#VJm6_=U>O{^#|J>noD zrBs2Ftp||x7htGaGVD|h=sTG7>=8kS-O*DSfPogM`JO`5It9!RA#iyWL{>S!sOV>~ z^oas)LCCUtmXT_FVNDGy)NW1Ssyb4A8Ai~IU|g^Pet_Z4N8}wKUkF&+y>dJT7G=r> zo8zQ398d3cT_l4RHa+;i5W``562av;{MU_yP0NU3S$57nh9iOr9vVqZR{3VZsBEx| zxEFySdA8;!4-dSMyqPO~eR=ZMsvOcMfoBycvFm=Buv`}I44_xR(UQCTZ(%`Sg+JMW z3A+!UQ|1x}ojT?s-7`@gsZx6Gjff@Azc-I6y&a03KTHxaUl&be@Bz*Z@76pJD473! z*}o%~4(405PZ;I9Gn>cj`V72VbFXh@mqOdj!2K`)%JHMmVllaSQ`p-dW(;4eulsSi z@lX^)-~=7hfBO}Ev|j-{$h?_rTH*=TIPTwaNB~q}4ZaHJ&zawMAH6ozUcMk;fZ%G= z2kgoLbrUwQhp7Me1JPssAxy=|XkE^`8u?Zr7}CrcB^SuHGn4z^7lZh8cm&Du zNqvMM%)50rhC^{(G(J51{=e|A3Bdn*{dhETY{l1I+yT_JVkY&1J%o|CUGJUs*?&)S z!D&dWb-w8cQ;koZohvL(8taCB{a~#=oQ7im&T{Awwb`T+V=QX!)76X1L368OCh39` zR6KDqILJ@`{o{xBk`d6})m=|@gY;z1g0J9JW~SQe&LNm5{fpzLj*@d$nCPDu27)X8 zuKm`VIHmaK1CgVbp4@xzeB;^Lf(d==^;})u`}gBF=f&ppn2qYa?rHAqLc^KcTk({< zr<11RS?9&!axz$yjN^ld^8zw~Mqx-Ft}KW?xjhuZRoPGPd+%?SMJK~~X1;tW4Ou9U z=5QnVMp-BlN7bxSwHOWKyrk!__9g0)gO| zx$Nnrx`VZJbCD~s=G3?T8pw=iBLz#sGspeG|8fCTDe*(^>?!fcq_7pRV*mOnwWehb zm)jK%^V&QrwQkW}+JbBoULl>4r4A$3fvyA=xZbLqSm1#lBR--`2f`wJIB+?Nk3RhW z`GH8xjg!%UlH@8fP(Xtc$CjZ2eR(tkj0I?e&WDRg3Qcz((Fg1{3w0+F$9+XaR1VU2vZ>cw-qYNUUs)na7i+zJj?WZ!jd$$xmjcInYs(7a1 z5FEfWk;n1Ya45mQcxW)U)4f$#anpF&6tNjz7mW=MzYv0BO=CjNML4;+2ZAa9!qT3g zy?*Ul{s6npb0iDDKbhg)Cg78-t)6hBmkOUi>I6Lqr!-j|1&?i>tw6C;$U@Dmdu>v}$5>+ULN<2rP(j+EmuD%E zi;EC~g+bbXC&e*~9f87UI-fP)HYyZqQ-cn{YS;rPIglAlkm;w!eC9-E9=2kb3A7p5cK;G|n98v1AGp7tc^?S1 zxsU7d-mgeCcJa(6tiFFsMMYKh5%$$Iq^e(QueA{({SWMqoL8?@kYhbOoIalki^PUZ znM;r&JvOs2N}3z5t(p;c1FP{_m_4J3uW~4iJ$nrEVO9hjji_YhRuTVYfJn-I=KK zMjn0$x`To~lt>2;Sz_ue|B3d-nXzI`uW8j@i*Xs%7q*h9CNPemE6l7aM!v0T_F5W$ zG7llFW%q<%kI9ZP7U~P!JeJ()Qxaf)woPq?ll;RE*=zCj1R9;FM!z_NM(#iz-0OWi zE!8KtCU575lE3PA32VI*)OttL%3C(8zK{u!M@l3lAmdd8Y%sIs14X?73(K^PHT=|4 zZv8z!P#07C%Yw^$|5mlz>xtxZy+hFP(TF*wMExS1U-{pWYm~_!q(l>Oe6b{jK87S~ znDlWW!vq0Q;xr+D>E}Jqwm3>LEj4mdLULk4a?+35iRB^fx4VA-;@Ykdu2ua)Y&-D{hWuj-@>ex4NgUZjg8H`L2@y;V$|*yzxVzj)chL_3bCJGT+V|v0%R&=jnLTu zwE>VNR*H~)egUy|IX+6#lM61KrwTvCQ{$U{t`bU+VlL_?Pa5TP>_en^_m%Ld+o64c z-VbPGwJ>5z6SE3}&s%R@EJMF~4jL?xTdAA4k)pmuILgstQ@7Onn}CL(wtk9VSh&Ad z9pWCrzvI`DVI}K}qyE^T=E)9QiH=;J!%lR_z#`f2x6Q`lC?T&8cO;wa;n&?Pz7`jMf5^N-F1r9U(k9PewGEQNXkN+6qAtpvXT2ejy0CaOr-?sa=F&D z*T5Of?_snlU0rqWv3}LbN!|T_zW!kC3d{|(M(fXC$u31))iYZjj=K#0@5v!_6Vn&} z9CD1rhCD*wnqv26li>%G30lP@aHW)1bW@k)524qR3wE&DeLHnJNB2B7LK}!v#e}|KjyiLinwvhh_7OZd z(-UN^``%@gO0RE}!#JkIT(Raga2Q8$5HVZa zkDcI!B!?8)H>*OZ$TQ#8n&uUU1TXaxh4s8@Tv@XW)Sk9bHwy8yabq5_l4@zV)Jh)T z97bqI(Y9};v3wySOvuP`F6g66=6Xn^k)9IHpS+?BOhf1j6uM&A$Pg9M8=)JLV`6jxb>ic#U#jqb zboCVT$+ca}wLhG=Vsv)Fr2Vzm=jZ6k5kte7uokvw9(AGqFF#vn_p>>ou7>#KxG|xu zq*<*bYrjPv-j<{c^lrs3~!&Ap8 zdeig~{SDPVyC^=C6Ny_=LY8ssS9M9^KvRmF)NI1GeXl1gNla>q-p09aCoa@JWk1nc zWAfLJg9>-ck}&zlH>sAP2TI~Nr;3kL!m7{D=P%UzPw6Hk%UV^XrY6~meLy=6b>IFS z!+bp@C4cul${yMw~&Y&#i) zTL{SQ$+`X|(oS>Uu$_P_Hs|ln6@{=;VLN@C`uacHNj-oB6HtAd^y<$&4gZqyJDwk!`o3W8Gpe5Rzwnr`qBIYOOtkg|R^Wd7t3lQL)q$-!>yi z231~t>fQFf7t5(~N~uajl)$ZtZ7tot3-$l>x;9&>{Whc7 zf@Lj=zb;h#82Yu0Rjn4R`N^tkqbK}?9Aq6_{weHKG4+%<^E;6gC*mdJ*>i5Ue0wc- z$x>#)cEv;Dlzxd^`2{RwhoLgDQ}u8p#nJbNFblpPKQ(bN-*`AO4(K+ww5^{H?cJKc7hca(}NL z^9TWPksOGh=wC_rN#`-Y>Vt*cYSkaX5+MtoY*?nNYWZkUEG>%y)$2^LR+aZX#O1?d z>&%BbWbe{SEqc8}Zra46Un%Zu1vZmla(=p*X2<9R7tsDxqFJ~=8capXz3}o*R+WeV zN=Q}1?`y&vmLmSu9Mp}9po$Wjw>+w0J$HhJUu@Tk!DB}fvj(9%e}*a#*L)_$ympuU zPP!3gE9A6!snOt`pK~rlOZd&5z36u|_y|K2M`oHfYAN$4G?vNt70F`k&iM(fz2ql+ z8N}nyYNdOZLlX5OpF9V3^K(q-=jiLo8IS9`LLTyB|F)WOr4*!5L;ig^UG51-HMlT0_Cprg z57yEm0cwhQSD7d!%S%g3f9mxq!EHbLk-V-qfh&@W%(5rgoryPEtQa=h0DJO9?X13^m z-eRLBr6VV%B`2nfyLg=&w-EK~x0QVQu!zl?wbjwtj5(fUQ;m6ymzQ6l=mXB3`w!D+;)x`+1YYk0G|xHKLQK z#@tUhN9@*g>@v5LOXHUxUz+YpTyo;SZXFbQbIM)zHd%kChK`!hdwQ}vQPMJdI$kki z?;Od)b&VCsL~kyIv)ruwkWOoMM;e8Bqo*K-*?SR(E?TM9QJfcA@W9ct1@NRP7O0gl ztq+AhD!;LCH#VT!`lD2&{Om&F5V}bGA;0jY-n=ml!Eg4I*E`+*s#AAWmvHTCrRJ*s zoQ$p0x}SRz`+SOD@8E5dwuV(x$Ax=uw2C!_rlS8k^T^Y1eGitfRn#Zp2(SNq{u+n; zXdAhfA8T%3>8sYiW1RA}MsqJXL%SJI4w>r3g+!|+jNIN#Z+*0C_~xfZNV|mGnU1Eo z^p?nHj<wj9!NTeqD7zR)$O5sjfxVOAtX(6Xc2`NwWiWI5MS zKW`!}ND`lSg%eV{>}>V_v)P_J=2yb4p8okg@Gj@Z`&80bA}-_-FU8u}#e>I5Yy>7K zn7CaCM!qNgy`1;!xUUgUip?E$)w$nKTWOWgU_w!vEQd4lnA@v;>VLL1e&DvMDBhKz zMP{F-ql~sAZ&zioQw*y@LpuLY^}9r39`&g&E6y0*E{pNq~P+i3C-| z#fY{{)(7JBmSz9o)Am;;3WMETV&XNr_x&18+ABTgO_l);XZ+}+rS`%U4&L~RC>@e*RGFYUiP?hV;WW3jrM#67g26iQU~i>q-aFLaqP z-t2cZTEwbGSMNA};h9l@nm^x-G2=8A>|blL`5$&PPiRAqlRSwbhZ5@-7muc^IC0FO z{3#cPY9T`wyn@upoQ-AeG<1>5_iB0$8tEr?yy*K|*99nmj+6A>II||%IR?FT? zjqR6m?9riE*$k~<{(C9JjVz7dh?>?O_ME*SWK{i)VR`AQCkNR>x95r*rLUer;DctB zx@(x6*Af;2Unu^qg^>59I=3k`1k-L~19oC5=dI|xiAKVgEYx)?CV_RktJhO@7Y0!4 z>Sr6g|H$o0&XGE?25`SQnA5kvRGY!&v(yMz5^hW|WWNTw9w4`k?mu;rPs^nI2Uwgu z$C#<^*W_g)MQxnw>?S+@Ct=r+C643mQ~$jboV|vX5naUeJ2mr;Gxhxh4_Gr*ALyMf zIrRhIV;@?maOU0_lL$8zJl_gdx}ZddKPE_OSVAc?Kb*pZUJYwYg1Ku|-h6ghR#Y&k zkK-gEN{^o7#5r>Of^iH!GPCRxD>Is%va|zFC>yD3v{a;Sm`;Q;3k%Q!Seh?S{oC37 zu6k4^&TQd_I~Cv1iwoVs8q_%KJ2h3V#J|>O@BLom=Xy;b=x=!~!P{_7G^p_FBf?t7 zU)$Cq2Ya+dDpnnK>YN2ak!5SLv|a~oN$Ae`4c!S^gm6@}#NXZldA0lDA@msj;b0Au zHK(kV-!GuiCU{n&Hp(QwUX7M|;;J0q9IaM$?x&M<#?`&jrPb7aYr4enGloXQwHF&` zxUrXM?kI%nADeoJ?GJ!5^LwMv1BQIGCu$E{29vk4Y^qz%TNf-Qii%$e5I94bG0JhW zp9iCv;!Mqu=qME|P~OOG5G2()nc{;p%N{$sy#37n$#QMSLg}lGzY6sEDo^@ep6ZXN z*xu*gPShQbs3k-w{ZcVFKwowYS;;hy8W4p zGzZUO&xkbmD||cNIv;40GZQ}}@EBOW+V>}K!DWX3Vl%)!z|`VreEV=BPn{Hd5q@x_aM5g=126dVpe}F6yGyq zbvREBb7KGAo(#^N%Q_P}72P_k!4vgEcKG!Rml8ZrgxiP5$79$;AB+5S;w-E!8*Ztw zY|MH=gTEcfOvjL7@3%s&XH`XI~6)283Lh9*f0eTD7am1vT)&iuM*`|kZz z{5D-UDJIsP;S(0~{k>5E69d-5w`lttD<@h1h0u>yt!Y!xLC{2cD`s^&=~DR(RkA;( zM(#R~MpzOZ0rN6c4rpLSL!At4Pox2I1g#^KvJ_f{(zB|5;q(+hE@pS4W8uz&YUuV( zRT44lVLbX_4?6KsyTy$*pz#o0NC}q^>l5%+rkFBmvPEQTI$w|1!=-)_Kc-TJ?wk&( z%X?1|Ol$5i#wrO|&T+eW?WGL9ShUTL(wi*$`VtAmm{_;`l`4VX9;Q~s5`2c@xWrBJ z#)ihMn>6^4^8D#g39=gkPgOp8f90fIxThFRBGUCccIa6LTBGCVB};aSUGV_Vf-9dl zxCx8fj~6x*Mm9$@l|4gyi|~O``fld5K@W&e2NvSWK7b=U_xnktG&*X!z22NZFTsHw zcYCw_bOXB3Gxet0L|1e5W9tr(Ydim^k&^JG3T<$TU;C+NSUa%<@%Kv!Xoo(6(jKF; z*0z@!7^8EKIp2#__OuE*X45)CEzo=t zkzzERyh5uGNGDu6`6Pp}AB$c@8?@Q6Q&gFj?VnLboye+FamqM%lC327utA_cQ0pQm zwz(*{$4C@on~APHVH-YsU)bJFDW-nn%Zb-qf0$C$H?0{ahK8$bnR%{o#vW&oc43>s z&NDG$fjjO6ttr>~jigiyVR1-c^POzP*=Tg1KkmyqACjTjv z->CSiSc;J10juV7#Q`*COrO8}gi=9*FzUFk^%v`qEUt0YJ=(Sf@kbf$c_Tk|r+Xw& zSGQ7RwTsc61o2sFT;b%ev1eRL6c4JMZA-%3E}qupl{)s?U8~Qv)n2P??^5g2iXp9) z*5h`WjQmd6i{o0fbBFS6`)H9R_ApW*T|)YEczfP4@kW_)J=rzgVr$jKHSaIhu4XSw zX&LF;BTZ_oq7-lN4#yZ}2-$kRq>FZ6DLN@!Ar<oIB}rzRRDMo=X_sx|-!B5QU%|e;KjO`=JdzVZ98&l~%=ivi#45)t@}D zKJb|&FC0BD_l#quxr;&9FX4u|*cXR~&8D%d=`KBnBrn8+Q%Ers*`G76(%03t1aaLW zho05l_%+Fn>2*r{H6=a*5GoMIc<9mbw3-fXqdU=JI3p(g740hWy`_{#cZ0?M>ZbyW zk>Xf31&IKLu&IFyWxW~OoVe$e&4Y^EiM$=4$0S>ynW1T;)8Q#z+v|HSX{j;m;WsY^ zPO;{bH@~Q{}K)lxq5?RZPX2hd(Zdb@F@qRg|EMN=gjZb7w$(Yf`J%+k-UrT|w!Bp76Jb_^)_%yvGpzny7zt z%>0G3*O<>ARt0wyp4f>JU)BfgPejt2U)uRn(R?|MVA{NdRQxo&P958!Nhi^Nu-Z3DDEG6L+DY+)^y zKq>MZVMaUG&=Mc!tZtisJ|!{TaZ?HPeFZ;j*X6-(GYX?z-8rTQ;;fL)u5WoPr9X3@O%C=>p9S?R%we&)R;k<^NG|5KBSd zOUi8GR1rI+o->J@)S=S%^aCqX79sa-iqQ=uc%9O$MU)ft0gRF}OR^Pg&3%o2iPGZj z-Ot>0H$QO#550hCnoUp-7}lEiwt@u+*ZW4GlD|2M%XZDn89&m*2W^B=Y!6nS#0>@` zT@jfR1?pYsQYW3$SSd+TCj-{tim)V-V{E|{3wI=0Nk$Lhh2v{^2>vo-c@>Ud z$!2lp{Zk(@a{s?vfE%AElg^zm$~=}Z5>=KoSbeilXoHe67TNced@S4XGnBhJbeSqjP+f zMVg~7JFUW8zQucAIv>GLK&}-0hl1UqS zIz`EOLY%$OM=54|JZL!m_UK1rR5V(>+LgMJkVm0@U_S2sO(IH7wy}dd<~5-t7`R3& zcI;Fz8I1`tYtRxtUsHVXQf#<3=cKE}(?KQHu%asJPVJJ-^@~Vzr?~N;W+E;0T5plL zOpkx(Rv~>X>-S(Om^b>M3SsggkXoPP#f<=2bp_#teFYX_rj7;7-Ldd%O+QM)DLq-O zJd)1R9{YArx8pJ)EyXKgTg|I;wm}tNgLPY(&uh@~pAS;BQn`l88nz5sRau+-&f}M7 zR3nlcES2x453%SWK-8jXrbBtH6s|S6)C%H9_T&>>rPe*<a@F*-_a?yzvr?%r9H z{535bmYeib*q-kXN7HtUvZ65QIP0@6BG(i%v;-C1P{xPik9)qW=yYt7C$@AR=+R@3 zsqh$*=FL<}H+|=SB$ME%S5*R$f5Ef%3+crnP&pT93?`_;MXhmT-3WX4h;NZIb6`R6 zaJ<~kh6lB~Gg8P?Oo3>Uy39lS|A0Bk757`WK-h?63nD6S&{4< zX^^>Qz1Drut~=#<(P)MrI{wFwpQdK0i7+%adDNCb6j#7>O7p{wXk#zBn_X%QOiWF) zf@)9lZlbsg)c38`lC)2~HYA@aS@lnaZ6bW(TWHH-NY)4Hv`$jrM?{ya$QX@h6HT~O zw}lkknBRXB7FM@G)p~3s-+IG)s zz-1%#rn4bxo!)llmEl}BXEjWp)UGI?X71xG%)*WbYOmKP=_Kb2mcE*UDyv-h+>5td z)q3sUiYR#%IRPDg(B5ujVtLFpXGl7dtG~J^o&Ubvl%RXoyN<%WokT0BZs7(wWnR~4 z(1u}=H^Hs>^x0VVEx(n}qc>6IziX)YtKn$~W>d69+mcz@Y3S7!(`#Gz+VQyE;dy6# zh5|j^v1ioy)r;tOO8otQOPmy=o?UqjSS&_LJv^vU(BDmW_JANY2fpEKtTyoxwWK@uvHs5|qV98Rg@J)C;C-Xy?VKK^;pr zAiDu_m}&)#|G3@i>S`3E>-nLEtk`BUC*Ml;ufB|uS+b~hm810pS`@AdHd@Y1EgLT_cak&2l{HOTWzN4Xe5S0v}g;-9F%w z3cL^ePbS>2md%ERu(0LyVN2qT`JE3*o+Y_f|F3TZc?nI;{aG&W_;=u}fG`jBQ^Zu= z8$o$|<%m&c>3^Tb)4;#nfp#&8O^ODeYvEHmE%hBZlzqN^hO9t&o^DVCqX?k>hMPTu zJVfLo8sk~Q#96X9F|d$|gd^l#NyBgItfUx%KVZtDJ;8JFG_{D{87ky@<+$AqAP^56 zQG6=T|DmcJx5)MeN^GU_ff&N9ZJqxFTF>k5OZ$Myyo^Cx7OppD$ zyQaWp(i^&kaNb@u?(YtGKRz2Ql{wZ7XsGs&qhWxc4^7?|^o2sNc6}x?4EoE;NxC45Pvwcwkn5xN#3}c47^*ce+b`*3lO=-Q8lswJ9osE0P#W z3fQp)bS1OwP~3c^3JQ2}+3rUmBBDpc4Lvnr-g03Nq-;Sp_`yNzwgiE=K*=LX^2+z- zIH|YbgEduaS$w0%r)Jy*x8=Jr$GhWQfCg>pml0R0BH-&8)fYf-zRhCFolIYo`zicF zURpwiBo&?jfxlNZ3qZtXmaHl`RANOmQt2%8UKu1fQ#BE0bq50h3r4?VhQq_P!|G2c zf2_=$x$DD z#V=x~VGwq$b;(mq8Nb25zh?*bU4#TBzFRKHmEL7Ud0;0%3F>;LqF9{SNn}Z>C$vk_ z;Ng(tnzx+=jZj|=gJUj2XL48O@VH5H^M=vrkjonW_HdY7z1}%4$DX->$)}4yG7Kfr zmDhi|Ih(I&_iLru3VADVv|=<{qS@)5sNWxR=YbnJS7=Ok-9e3nY&?;tc$}FinUfhA zW)=o{3Tu#|#15BZ8#A0XEizdcsnBx|GcWT5JD~_A*0}#OHiB0W09I`az&K#&ox@eW zw%3xvs1g;t!E(lu-z5mM})-uqwnwAe#u=4O`?j;qVMVN^y^+_RF(f1;z?&!j_?YHOzAN~ML><<{ACb_V>2uZUi1+tim>V|&-WOj%*VM|E z3nE*)%+tD;s$9l-wkC~QGG0nv!`NRsw1|AKY;&j#bT)U~ql`=83VY}^(za_pgCm`kDxrLS)s=4Z*SJ>!3@p;nQ<6k>H=torTli6@)j>HDfqLFq8YcU9c( zQQE7kw_F|oR+EA6*6l(0Lt{I)1*UUvQg@k`KZc*>~_r1!>YmWZ5&G#7Ve#)UT zDU>f>sbmR)Zhu4-TcfaH3|LTj4R$LJRbadA%45)8a;MBol0iSD}yw*r>H+Q5VD zK`WY);4|o(|BQp&;WiNH<0i+!RR#0(oMQdt-L7HtMdah}~N zr2Tkl(ZIoX1+!GSj4|r|_hattgIwU5$Ntt|KQk9i<8O$!Kt(rDEv*iz>2=WGb_vtU$8SrfXYsds* z034XgTtV*U%+<@11yR;ptiKJLl6aA!NM(Jsld|W+fN#{3@Urd+-pG=o2O)yNma&gH zKfw~G@bA~ka_9^h-F<%xF9+g%Rs2RaNX-N$t<;{cfthLB2)NeF-$D+AYv4YV{?M7d zf%R{6!!h$D63q2LkMCUfl3}<_Qzh7CkS)ke)gnElg@!A@LdAKx*39Pq-{F#BAUtb; zSJItvl@))?jyJ5LJBwL;-Fw_ispZAPxDgM=itYIYGryY4JL2r>>VIFk%rh-KuzevH zY*95Hqh%6nXpNwLt z_MV4PIJ2N_^dejeoV-7Nr&tB?U0PUfA;VnGlbHQ|3M=*BK=A{|`fTrKL#x^D=3?pX z=HrUG9hh?4&bW#|5Y!^dCIXe_2%LkN?-=xxlUT*l~^S04cmVD)m2i<9BQSz zExz>L{d1Mkt{9G!s@?I~H&jjobvyKP7{}f>*2}*GRzAFaz%!TS+iTg4NCyvyqFdw7 z<&D#{R^9u3d2jYgU|hu&#IgWXG>DFyQ5@A?sv@EnT;bfzeYe%LzDy8R3zwhY2~5U~ z0Bs(EQ22C@6zW8%TG?b3Vs(1=jMgPk$)4!2Eyw!zfPQ&lqTc+z_vjYkA*$w9#W1GW z|2=2zmW{nNEuGERgX^l-Vj8p(Rg30)BTJWee|Elg*zLQLy8vT7uswf>s6W`>!bUlC z^bAZG0X@D|)u_#4BzW~$9f?--)dj6f_{M)R7UN{NC%d9mMB0NYoV_L=<-3ZOc%46f z{5T|`XZ~wHrmmhXVFA#{HX_IHb#n&K5ufByF6Z$YMi4SQQ_QdQ*>D_grw*AHUsd#I zf;-sEgvSl@rjpKZwY}J}s4ck%1ZpDl+T27UaJe!bee{R#8dc-P*D>rlKdzf95r8X* z*5<|`(0HyF-Eu5LQWvliDcd5NEY$43AdZTs?w3CFs9ri|Qb~e|dNk6I+Zb@CB(wrfQnjXd6R{S-4nzPZ>op*%P0*-~VB60wg$ zMmpsNi6SymQxm{S5Q1?l#~4Tu&V%|2tVn)CVssqr5?(AUDmnCK+ip}f)NpL=ZWhqL zrFwonlE#GH&_NGgz2w-$tarpt z{eh&m(DIVf@y8!$2J|L;-5^@P1>9;%(P`oLV`tHwGCtfUox?8&9+?H}lVNUy^%E)P zIR#iFNrT=SRww^j86S#hdNQ1ik`xTJ{y8m8zo>8;zL28NH$1uv{-lOQ%Ud_tKRh#? z>#(eytfHAOoWEWOS-enBj9)(MMg2rv5oC=j77yZoNpX@FX$_Q|9Wqg%-{?_HT^G0O zI;}A^0#)rE z-e+eoNfzbh94986=`V<1So8z!*c!z#^=zS{;EJ1+{jB!vP)0hEO&@Yc;!^LIfP2A6 zNC47A9p2Y}et~xijKh!+aN&Gmkkq$t@{I`!FH1n6_y8&_7^mG+pn~*#`w7Wcv4f&s zK*2}1V!5di_T@n?+OGOQhLc9^6}!q3*V(?k@t!5=Umkk-H*u&|hI?H9g{)D}#w;j9 z)n!k2X*-MHpwSZysYcsG10v5mGJQd2Ht%}LW>3nkQrm;*7szlKu{lQz6@1RXfb875 z#Q!qF`nSQ_2u%SF`vCGaI4tG(S<$s{uCI&A7dj7@k$_>&`%GsaNRh2T(2+V=>C5?N zBzgCGTK#5I@BYxs=XcRunb}AgK1Vh_*I`^+OTJ4uIa9C8vI62pftt&R&ki^*^@Agk z4A@j@j8=en2vy~hK4AmyfgD&VVn?#gUoNlkDqz5}^T@ZaX8jVG=R z{X~Sk<1eA$0HNzMtO)x*u~V{*4OkO{_ogB^^C*cJ7%-D)-uqHIanOVOw8Qcq$ zhoJWVVH_0<+Gz|2mLwP#^0N`L@>S^|wf{bcGdu@CTHv=Ak$LPa_)GJrA8P{(WflCb z4hNfGkO*{I?vCc|mK5av+#^a2FB@$5AT#9Y6)P8flcuILu+y>rhK>vrgC>59HK3Zk zaG_o9M&04TZiL;T*YX5An~Y08_^D-Tc2eQZZok660)T zT-TIgIO0r%I-wPXe9*CBM)EUX=$}|Ue)e2H?m5rouw|pNi_x%MTz7|EL+jezuB{jj ziRa6^X_Y5`Ki~2S&`lwJ=I+RbPf`rj-Zi<_-~P(e*!iQGX@=z+)2h$*hVpQ`kvS?p z8w}rR?lzjpIQDBdf;(^*`8~dmsoCWL(W5tF!2GS*&!X!wymNvv;h!H+zDazbkHM_xvD1 zT(M8{?q`?gY2VV~rpGkH90ECT12Vgw$bxcj#|@NH)r+vnfC{sV}uN|dV~_tN~v z2O=7NTJj+!e*)P{bykaa3?qr>5102=4h~i{3?uoLMriT3X)pkh02JhVvv?3(X~O}# ztpMMmkTAsZ^XT>-(GQGPi`n3?g%>=v|2yy??^jnkW_SIh%cpQUL<)xSh|-Kc?XbJv zT~MdF(=zg`bg@E4I*fsQ43$8BIs*o!r+GB(zA){Oi`&@R zzNBlotjsFof(0`ufz@J>kC58&N@6v*=)fO_4!Ie+>*IARgE(*=v#@}Kq{3%y2E4uA zgCVz#FL4$e+>$=)Gu6tGOVf9OU2G59k;;~E9b8YoT!+NT!I2KncRr1OM>7cW58wRy z#ag>){rAu7hzGjPF$t^S9Aa2F%7(mnBzo>_E*8ec(T`f6s)7?%5v+SEYDA-0rFs!< z^e8vM2X70wIBs&eB;v+(>HJroy`fEJ7lKE0DnF*0!>X_c!OGk~VREXB>s$CATVOhn z2L!yFx;ll6(f8U0wYcaYt_R+IanNlr{LUdBG}~CXHmm|t;cmsP7kv6DuP#0A7_&OM z#ciy^10EWTCHl+`Z9KCyqwcqhRM~^aU}Z~wd~dJuKKQAFd#=Ux`FRlW(o`?4U3*co zZlXFgv^nx&@0IReWgg)-($|^(g7bat9Z6=%)b7&Qa>cm$J2o~0J5tM;k)p<$nqgq) zx6DbeA~`e(%Q=BY|@H& za!fcGa$8LGb0OFffA3S{{(w6O5O2SAP|^=^oZ|x*t+v+Io(yn|lItALH{z}b z=b*}#g819Ft|>B;MSZHUb3ffVKJf6c==6ub1N~nPH}6=RJ0>2U&g0LpyN*Fj-@t>! zG;i<&QkMIG3()jd?eFtKU}=;E@TcYE-!88?$z5>D=F8}|^F2)v-a2a)^=dPx@8bxw zYA06{>vvV;atxPWBPE`_z&M^e{hU?GF&lx|kST{?=;WI%0cIllpt;4%!0wmp_J(Pl zg^*aK7$?5A6*AzI`+TwDWn{46s&w7x7XsO;Z=24mp50>f{dM&<0`Gj27h~r_!HH)E zpsJ!Z%y&=;zF`tEvj^j#N-eoUuqH+B?^1sX4MI}~Qy+M#t02O{)SC@%HL49}qC@fDTQ$obVpDhnL{r;%> zLwbd`C44wCvJ?ILFo=~i$}eoyDwuK1481FPSrv6j0lB*IL+6|ZO5Op4zcp}bM%B}K ztZw(j1Z0DshyyHMvv>BfkcQ=eowHnqtGD-i#JCxbG-QSxM=OiKAxQ4W??Z?iC~y@O za|^vXIzmL3(sQ(VY+G{P)zsKmCiupxCbG>@jsi;|0Rbauls#?q zL=5EThRcEZgn9+XB;WKH_=;D68$=ZNKwMG!o86i{$QwtmyhAmegB2>}l*<&~o= z!ek0>z@{U7^zZ;g4XlB|i%wb)PCNiungi38>%dg&uk*g=G4oXwDz2}$4tH)S9`3Ep zP1M)+_Vo>5O(pgr?dXU8aVB`Wn3~pAceutfW?{i%={dc%-AbL+6;y#bB z1lIz*lMF$63t1qK4utxvU}TNBy+G`Qxp^t4$UPD~>o@QE~N4cd?!^6WhU@QqC=!U z9bIobhMjm^5$~sz5k|?9>FPgkj-TAS;6Da;o1{W^vZ(i>Ec&|cep1@7B_?jserK~} zF_xe|@y8eCeP@X(f2ZMVi9alFygTMfkyi#5elyIC+o2nSbLyu-CtZ3$Lc(KVNPIMz zg@6EpjM`C|1Y1HDXB;0$%nVV5rX)bN++qMpQpCbfHrJw=_V>)Fn;LAW+AgCB0Vj0?k-OtJO6Cs@wbdW`6;JYja{wuX?llf*doT5pZ6BIM|wNu zlqYR3s?}9f!-MEFpxy@(Deof$f-*lc;>|oZktD07qDyOLmxiOIK!yAVCI_2jXl&P>CgV{;B zX1`^$$Ua_KEPK*85bA*)vSG12$^1r~RypmV{n+!IOCqp&dZ)Ox(%Yxyz+X)LtJ=0= zOGVWm#%x16mnr)sr&S;^(QWPk*jhn#LSW2|w)~Q=ohVfWmrPodoTt!aj_-m%cU|Nrsdp{ z)Jsa#Bd0CBHVO5XE_pJGUvc8ZVB8$6$Y|eP zK0u|%B4F=SuUN6{WhaWw6q>q9Y?PR4EK#SHV@cYphWwZLE)ncp(X?%#n_p`7#s zA@(w17VpEB-XXa`!j%niP6T5RxUj9?}giNXByur3}m-H zO5O3{vau8Q%W2%iK!RY(L%#$~35)dok*Xwd6EuAw~~~ zU<0L|Hz%|99GzU9K;o@7Z@=kBzKtbN9phVo{+4uDvc3Ux<&sW4L;Hmj9;})*5Zjy ztQ=K?CxwQ$b6s1I4!cum--+x>c-K_KY~~9jnlgPKj&_oTrEkkB-X9 znxbn?9D2+6d1DW^S=%s4js89ng99i3v&pOOf<9tP9c;aQz4Fa@VUo(;3=r zPrUw>XYy62Lsxc$TE@a>Q?SR}A};*I$F`u*<%pb^P{z)yu;b`Y3da1n)u=y?IUiOI z?wGzl{qvmaY4w|A*B5mh^0P!}o2%!3sV104k>nX&!Or(Us5BPNnK2&)e0-hBNuBgV z7EYWo2~XnH`^z^ob*d(Xigc3J8~t9H2Y%lVCQY-6`FJN*Ti%^}TG|o}jB1hXn+r{s z_rh5_O-3i`2-}8!4}Z$o2SP7d50A@}P1aeY34O5tHBCro-rdHS_4CWwiK4bvB=i?F83 z#(}`~?m+@~OK(Ns1lH?1a!TROwLSL&37rCfC*fqhh>cTKP@cCYoiy0VO&*$;K1e#e ze#0xY9N@~Wqi2jX9qI3=clzdLo2Hz{&n(lXF0(ejxzM?*jC1hYvkCi=Yby$ly9-M? zLg@E$d)_jB!Z084!PN`ELV;8TWOBg5Hmk(cCQW=fi(1|{Pgv9A-y-w_Lv0xym_+DpkOsh zO8)MJuR83-y^b}JVwS~HpM&q7Ob9(*(`Lz5^O#!WotpXIuQ&-`;imC8=EG&=--AG@ znw3tg|HgqS1&eA7Rz!Y_C=-EXA<;WwlJ_NVQ1%nSLGTTcM`nokz}PlecOjvP67hD> z82yr!T#PS6L1Uml(>Bmh#Yb;n(o(RgwMd)NWmbQ~Z|)Scnd`4^O1nWp>@oiPO4~q@ z#;+GA&C^;tC@s#@pkVoh3du^u{oQI+3L~mg?UWuim9Ta=MUNf4DZDd^g5-pfCi;g0 zOps?VmI5x*U*QWlt3`HG@!rc{@82Ul%{7@SvGD!0`jmX%?_;x@sg7pG2K8KMdAj?r z3Q+}zqLiEndTa9T@tnsN$7&kp=l5sJ;uvtdYt_tjC^=M8?pDcrh93qGXywZ>F@i8_ z`-QP9t#Eb`!OB5dmOEGcn&pG1l9#l5kATYY_e%{GV(VBFSzUmb*!g6+Uk~SWfy(i> zkUcH|EBis5S$1j2DlCt9wI&Y7Jd^+0r39dZmA_IUSYC7E_UnH^%}CYk){lpBZ5|hj z=Yvf3uPQT83^&fS`MDj$&|d}6ceDFX^$6+XnF?9zF3=r!S%-44&Fqge>uX_ z1&!tZAYWwJ@fjRnxin_e%S^JH8+8)6ty0%?)JuJ^@<(x1Od9nSr#Y;NAr*%wc_#B+$+GoW{=3$NIY$0<>LE)JzI2@lsgzS!wN4J zqpO`*=OA*snEiMw>b?Kt(2|~joFNR>Tm4*%RUWL?@it?}iTpx0Ip5egv3#-N+ za}kW>%?O;Y;L=r_PHbB0whxzfl}~;QOKHs7E%RCj&JsZ8yIlXARLpzyPK3zDgnZuZ z$ktyM)lt1OO~jLQd7S`koL)zgNp~ox0oR{}Lb^6p#70(2sUG=`*R_?m)I=}OJj0sz zMNJ2-I6BqatPT&)bQZzrMADy|4!lMu*# z;TSt&=f@LH_UoNUh*gqE#)pvAS|sAJiMWRj*|LAch7$u~Q#9}(HYev%{oa=MRFTo; zmO6|c=1>x-+IP-_==bL(w*gl4iJfl#b+r!T!u|xiu-_rswo3*`NjE5x{KEaK4`(lf z^UWAQ?)27iEH=^P#(Y~w;troZv8K`w*EA;PycFEdaMSG{z7Pg;oK87f&2{;=ZD8fFUmfp zY}W?>9{st^Jwk7gsGqMDJCx#e4sX|Op!`^b`aN??vEwi8-Nu8FDG`G_wmSM>ERvXEGavS1y}Bh!H}VseOOvM1F=9OL@p~1U4Q|dN>W$;W2A;o z_KSA7gcBiVBK_S2v;%XAazzIqhmn|IST>+m>>A7QOrK4F7KP!AMf+kdR5q@isOD{O zqIjf}lGJAh&LQA{%bzYK_2l5Br6j9HJi@g!Qll5@InEdA{CTfk@Lsx=vv;D!^iDz8 z*pqzmA;rx5g|77h@vb|s{`_199Q0h7^$wl69V?s=kQ0dRLI+M%ckdOx8~f+y9a>jUA9Hm18%T7~scKYx=lY{Zwu1vErwypZ-qo~OfWt2P)UX}00m79Rb>+jpnKpfq zd;5`o^`(?gA;oq`Jxj^h*)gPDb)@n1$47SOMaOWyzv@wNyR>8^*Mo}*Ke>7V`)0Tx zF*;6WDd!N14#EZX8>HBWt^)Y764Zv)*=CvYcS6)k`tzlLHB~(WfsI4Kb>wmAchixO zfa*oT{dX^G)ABP^>i4)Gr}ajrl-g4IbIFw+wi?@Cr8BVcl<@^Z&y5@F$=1{ z4jvT95&0rlu7B0(&*3sff%J2n`I7E`!1D^r>*rSx76>6E z4txIfCKxw?8j9J<;F=2V4_MJd9C|`ni7au|h@t}V^`YtNGBsPlvU>ENm{4-eNl`@ga2>P5;{Ab^|~Zw~HoaI!rH?uK~&VQ}q;ezYd%jn36w4*^Uo z`)GoAEP_#1u8}GN@77TFf806RkCzV=WgNUVI_>*Saz`TfwY85=kc5x-!f6-iM0(kR zx=OYRdRqbQ61avCaN-P5R*-OO!<;(;PX0Ut1l{l)=uo6!z+EPs(U=_UVEup__5~G( zoW7c*Gu%{dkrb;R(uEY=N7w~rGe;f{l{yhqxBZnqkZL!kbBO*cR)$Itm#<#(SqByj zzgr^Rz)gm9&D^U$j%PP|w%Wk6!r9LZ8U_!9F2zQ?;)R=8q> z1E3{%N`K(**n6`eil9XRYCiIqTo|6R2PJ7C0V%`GEaS6<^-k%uD;~b^BYfP>;7>pF zCZ>nM0D)8=CZU+AlSbC(EKR~Zuq*>>x~rMW=otqMDeoEssZF;R=g2l_C^A}E$k$2Q zWvHuNbvn|DvyC~%bQT;VnN-Tea5iM0%su`AVM@tGnObX_@a`yo79V^@8e(5{D}Cf3O=&JW2WggJs5SzCLsvMYVt+ zUEb4-;tzP_O_zUPvv_A!-7)*lN{Ec$6a9Yjlgmb5h=N>AY@K!1wA4!ID|h;zr%93R20+ubniGsdpp~M!;LddM z-HK`~&ktH{L%64dhFt8z3)t$P^#8ez{z%&qrB-h^&muf(uih4A%f2NC2_4Qmfl%kL zp~F_UR~AN&=FKBKWcPR^C8MJ6u|cN#ARv{PR=^#oE@pMbd$!8#=~Shxy$ev*lcvIM zBXNHa*p$JqF9Aj+9i-9>t?~|-C}k1FtqP+bkj2E5;!(t}`d=p!u&P+v62YUrE85YD z{y~3wDFCXU4A*i#U_L)#{zWxy0o63D8eXKqhahzC#bCnc!z8Uu1+d?0ZWX~Wx-Et6 z+zMDDPFx^0Q}UDe=4rX3NB%@G(?0lpv*wVG0$<_U-)uOb43QM{M75PrwkftKdg+MvcK)lN;3SSAwEvW&m;l2*-_c-&z_SE-Z##)qrc&c1^!AR?|>qPD<#ta^mJ} zaEf}c3cdJI+@OZ2O}Tgd=PCPa%CYj#ae4$+lGq?!7I6x0F26pq>6zzTiNrW}{_}xS zS+CCoUNtzL9lN_K@*s+BeK!U}evGu@mqnUP`S0zS4ek&9vu-q9fL& zw6}4+zr?Y4)e`0D+I-GXY_de_g89q5*lR>}GZC_`fz%EkAdo2t!s)!Y4q#jE{b&`t z7uOG_;zNk0Q%fr+*>LOnx8zgDA61Ga{^Os1riIK{#sN6ZC^HtjfrR|NOkJO+xas#d z8+0v8t?n@opvJ-66EZC})}Q2boeowufbfw7?&XeuF7zmMYFga`Z)XL7d^Y*j2k6>L89p$f_ zad8jNB;ER|g&eipJG-&gNdVGvd3^@V_dZBg|4?HOGW<>Qz?}gU zmW*cWg>Mo9IYraMp}u#28Nx*Yvgo6s$x| zLt{O_H{ep0I{fpr{kUL0)qhRYwomIK?fac);Uc8zS4k`o`5vMC;}8oeRc7Np7yuIT znQlFh)7}Ya26~v;`W;9=d~{NLC96#zz5L2F4^%~|XiD4qxI4WFj=3-QKbNtuQ;IA3 zHK~hhvG;q6?TL+V#F6*6{vx^8rtB}PMGi4`s!l3}#F*Lz65qJx(lb9>%m1?NmL(n5 zPcPRWI$zF@l}vv_X7yZ1LrF!PFftn^qyuzzZ}xQHZ_*+7bo8Ig4E+SssvlDep(9rZ z(1Ibrez@dVKzCBiyrc69P<{f?!JGfQgPKZ3dG|A9rNR0B501YdgS)y;aVaUdOc78qC<%+ZlqYPQy@ zx(IyDq#sdjcH??EeWXzrVmzAd(_j7=pC4cRhYP^1Ism7^*LGybxd7o-aI~XFz|meg z$zBdi<&`ZQ%loFPTn`&>1i0d_*gtyUIh~lS#E)#)$*T6;9!Ml=XzVjvs2l*iReRU- zG=Lsqx9nzG(u26~JxdP?Wp(*&glI+v%z1fv*-GbWNvtloYeG^e<7~X5aCWSws9E9b zdv1^=40!Vo4TLTONlX{f!l=&PkRv%ZE)A-J;&qpSK-xXcbso$U26G#Q8i3|Q?aWC&Z17eeHQM-O2&5*C$A;1j+XZz zS^zhP2ST64z-LYZ*BPwS2f0gi9W5Z?!zs73AAwCPO5dMATSD}J*ZlbLoYuU#A zgh^3Y(p_k8OU2$Y-W2xcST0z@c_n33B*z-bJTyn2a%0Vq(hB`{c%}*QVc;f)0?83# z@9BOesQvpAf?yTzUV<_Fp%aI>@IM(>4Cd}W8l#lddv*3&NeKy` zptn=;32tugkH15qu+$978mgFqnPKPt`Lbp z(Q~#W>+ved@A_Ak&`>2y9*n-2jxXB6=jnMx`t8R;TbDuSR(X5N4cYbWgTg z!|;R=c&Z;K`Nj>4U#MN5h@M!SFIg@)s2F~y$A&Z68L(zJxnAjyHuK(keGYPSVeP8& z*jKn&8}ytbYsqK;dz_XEet;ZsUKA+TNxsKRUk=>9reF9pXWrDCww;zaAI4@3Zk7`* zDwpS7m&Xco9{qFsBsdv?aXA9CEqC2FAHKEl0)$Fe>eV3}a7+MxZx8X_O++4%+no9M zpqlD*mCmf>d1@>Jg+=0S0%|C`Sn0Y@$*??&OG-t2RVdP8se?IPKvrxSbD1f~jQj>; zk{qjD5o0eEjkb!G^&!9;qQ*&!P!e_$n{~=y>xGJRVTXxpQ&8drEO`~EkElOU0t{m3 z@p?26j*3b6-MZC3>%QyXP%zL6P?XaSuT;YVg-& zO?|!3$ifwA5|2zq>#RHN8Xx9L?ctR?fwFuhcPxV@a7z$hts|RsKHc^`!2gEFKEeYZ zcImzkq6Iq>DkY$d$Mf7r*T?sv?P2(MZlYRz
`2-wmi%7L0^U)gv6*R-y6ka8d z>)2HNF7yNYo<1q;F00tL)|V=hgJaa310RQ}(!&p6vamDE$C@JLQNdvr`BXvT-0|VR zEn}s+-Hi5%W11K)!@Tz}HHH2Q=SyCn8i*>LiE?Nf6}-OCpZ2@A0LI1oTd4Q~MLN-5 z&Y^RpnB7Rc%GtJKoi@w4o-1pFi1W!3_?o+-TY;Ud2-A9Tq#VRCFotx;_ z{PWDsQu%3xXUV%$d8g9)<6RdTGWb75m89t_jtyOV$!=Hw{<(_Y<7&-Xy>kvFn*J{s zo$jJe7qw9y+L5C479iYL9wHK%xftYDJuA+%N41$2Cs?cJSI4Lh`VY%2i1Xx^KaZs5 z;PNeWp=|x6G-xI>9daOPJBsl7^1?h@*M+$a70eCl)uDJ8&NQ7`oF;8)X_=BHSeN|p zc|9_nz6&IUC&E8Xaz7GYuJ=&Ss@x0aYDBtkA`4J}s2Xy*d$Nb6cBI}QS z5n+FRaBb`%9)*`ll}@efpjhhbr7-WKHyO>%piHdNmEe3`lqp3ffG#SR5*|i*P3c~) z)8|~VD`Qc!D;MxQu2woar*zr&9rsbn8njlX%viZ7imGVKQvB!%3ctrUDUWvf4Xa=1 z4%f}R7a5TKp6ysH%ZR(;=B9wG;>=VwPI`jN!pW{$GYk8Xg{!b0lzhZ7p#_zi7GCGqg$%>j(-a1^SiC!(-qiojh>4Q?c;k5^I5hW1^Fa-nVY|H-Q zCqi$OPdYRV(il>ga8KGUGcFU(qS$fzid%6!Z0JtARe2BML;Uaetuk8`<2mb!KOZL+ z`}u65bVg9*RSNQ8cXOK8j=gwwm?$YJj6Tv6W+^Tc_ANVNu$*qy_ie;gg^mY9+ha!p zI^3xW`r}9K)#ZcQ=uUoN@!TF=#YYgvx_;;t5+s$vh*nlRzu_w#gxxO%kxHvNi!N59 zpK$VWYPVfT`BKBj0@J+E8_X;5Lj76+EAm#2&*X~26s!Z}FYP{XeyPnxIHGu^0#?KMum&L=msH$Vy zr+hR*9^&L(xiu^)7Rn6Y=J`~w2R_7yst6mkl5);lbzEA~=e#RP`x%qkugqRty41_U z>GJFaJD|Jn9wR1sV&A(h%#13!pEDPfaz{})oD<5XJ&HE3nX1-gRb6Jk7&$t{?dd|L zZe5?q{c|k4A0tnja?M$ZpI6e)C-vn^1FZma*)Ur7QGbo9WcvnRW9=KL(GT0Q4fifh z-VG$@ELTo`N#r;eBBR9-Le7XAy&STxB^C6_Di9y~P$1kvLh{xTBF<(3b8XM4cW4R; zBgi?e*J|qLg+4~gEL*Y2>6`ED9ze;S(#Cw-%F@M2vv)pNB#*q5;t~yD^e!F3WX{oc zGSf!rFI~WR#{|0+u5G2W8TS|4TVxC`$POx5y|YVX2`HW=Wv^)98(9h;)$z&X?+Oce zYeNly(PJUvT!DWj)lPuqD_d1-y1#hwO!pBcL_QtsHWP<+vy zf%JO_xtkw3rJhgqEGw$oO`%MW(br$TqKBgDJf%cf#_-vzQQ2Zt=Obb|97leuiB}Q~ z>B>wsewh`uM<<}v@`{&GLY5jej>he_m62M$8k3rOj{J#s1_2Y|nh9${3C%$a<%(Z( zQabC?gNc|1vv(-=%t9VfEUr#WQ&m~$WKQ^^jL`B|GzLmpI=s~pHD+?L zN`pf>ED@;8TN}4u`R->|!0xYMQDeKaX?HV4kkL3-ZYTa+Rydj2`|{4OC*LJ<$ChF` zFAL0s9RExoE*CP*g7D1Gz($9M$@+D( zC0JJNf5=Gs=pjmP>bZPLkqd3?P^HluQ8r14vH8FUV_GRB%UtMDxJ3UH3Y6`#Xo&}! znJZ!Sk4!V?NBv~u8_tys=LFklegzxO!%Y4%bpJj)C~!)*nX$v1w=;2VQdz7yJm@Rm z;!d~nt^#lR+8!ILDGZnqnt%`cc#9` zH+8a(ff`L(M$u4HqHE_C<%7f-s+GKbBz_}Z!=w}rQdG^)ewy#N_lN-u`T?ah%9B>QFcD5LV^e!8dCVKgAV~L z&3pS}6VN4uLM1`nqOtYLJlgk%laH?d_yjSOb%<8sX?WTTm z*L36!j~fpQj)d$_siCNvy?v*yNw3cgj46a{?V$<>QoAw-0cXU zJ67kJ?L_XK3Uu!-oewFRxM$aG5paB|i_cV=wy2GG7wl#qzU} z+gxkPW|@6n=X_b|VWqzhf&3ZfRk)fjw|SRRp)^$5PkOhJa#qL4?&f}|s%~d%ZE}(m z>BA`l1%0a|K54)5ZBflUG2?Gn-CrDZ%FCqYYy_Ja^=&M^E1TJ*+*_b;`+fx~wx+z~ zDw0}oiizkmgvKS3HI%ab!&G)^b!q1xoxoY=C09ycXCJ}U%~9rwZzW4>UDm#+I>9b( zpMNdob=GasK=K|rlhL!unGr=E7n1z2Er6qtl&sm)O^X~GvJ`Qs{OulBF!K?4yIkro zJ0)1J0*xNGhYkCr{nS~L?5MfEN{k)|{`1k5So#8g*19v*lBo-As8hlzZk;+N$~^(g z@6Xi#`6!&e^PrJ@;OV6LYH^$~mhYZ)P$;hdh9d3Bt5L%5yF}^#PS+dU$e1t=(2=>A~!5Z;=c4wDIAjBx}(}&-b1l(wS{1 z_S155a<-Qja|&XY>p#AfGZ`CydghGKi9^TeeIL9Wq1j0v>wUVffUuhSri37$d05wi7Fb%geF1L@ziP&sAOI z6BeeI38wcO8QIk9jLBE?c=uqaeW;HrhtmtVDLCIMON*f5*128*tqRMq=Qj(y)Ul2> zHX+4$p&CJ32mGzweoX*DB@bLNc z0wG~{=3si7;O9$UwG8RZEh9(Hs{I$fGbYCUmzZc8VDn{yhI6w8tigk)!YpGD~MP z9Sw~iaKz#hpFVvWgL(Y;u_2&m+S}VHpsl^6qoboF7K^19bH88TldHnM0C1B!AY~f? z)TzL8>sxQ3iNb5(8rebp_PrTSh20K7XlxEp`PH6xaCdia+Wzy;KLdMU@CEP-TC0di zmx86`Z&1!nADo8)1WisK11N|LI)6Q(9L?4j@2HHE!7-VA2D1P@iLp;CE-qe56af?% zKuJWeBuqo~I~dC3Hcgn@w{A)5>FEvLdrN+04hU8)3(#q*&9%O|x;kcPh=84PO^lC^ zS6+dpd}3qEMUU`ABJq}9$>*;tE0fpe>keZ=b4SkR*mWmK6_~dtp2E~;HN+xTNgmx@W&sZY~n=S1d*28!a_(dWPp6wzP~SR zFbl)oNMxgTH=I;>yP14pV`p$>icLMLYB&#n3D5<;1WHyE(Jy^SEE#sHXyqnkA)`_1Gi#>2y-y}SFgj*ia4`13o& zxOwQ_CAzboo0}^HweZ{}-9UKklv{{w{O;35j!`^jWpV`b+zi2UFOI(}cM}zbH-LpZ z+5yeCoUm`-z5?TS&tl?{4i$6DaEH#}wy#Uj8D6|+y^xX!V-k`6%_O0rG;cMZ@72PR~KV+vzzmpkm$4R52Yy0Q#7fkIOG<|;!vT6B(Z4Yt+4S)@5 z6DtrXC5c!70RqB#pciqrA`V?KYpn%@N&1W!KyRJ$P7}Fcz}Mfm)i$tB*sBFM2Qf(- ziWk8WX8;c*wp5W*K`-VEJL+v|g0C&lFC%MLJxR1bR2&h`@d&R`(+`W#b4UaXhv<^Ww{PDpS02G))-NKt zz&FM(dRSZ^g=J$;!Mae6cm7QdiNEV)`mdIPEJlD#(2o9!Fmx?oT(?*6&QQ< z<1Fw*=SR2%+j{az@62tPYm}kT%`kQ>PKcs8lDqA6H%znnDCF_Y5wMEsFAc0={2pw> zo6Ea$6LkySOy9-Yn&9W_8xcWez-fdI@I7<(Y*1@!Ys?n`51@UN-v{7M5-L+tvKH7{ zT_5t*GF3)lBnhYh5>tlV_r;%HyzOK$b$=r{mDEzTl0}oyaM?z7l?a|Pw#2y!(w&qm9i;`jW>T%;*-ocRqy`B#<)1Sf8T)Z z6me4MmnV!oMWj9T%Wbs{a;krM!gp{1wqGuA z%itXw!kVBPvDr6I3;3**2$I|&-_@0uQc_v8&~5-di}6sA{{vcHA);!$tDgVg+~5D7 zUhDsTx8+C&d~F@K=R&~J61Zjj8?qFLcCy&j>u^Bu_L|V;K5S}dT5qSkPsShSb(6@( z{w0Fp$_e=^PsBGFp3}H!T3P_oX#|wEAz*lH$W9s1l^R4>2d|8kK|`3iH3%5eUQgE6N`75lt|PxX z$TKzv{aNV~fh~zMqo=3G^?}ER&0X#5>k~FX+Miz071u&ztgTzVI%VGmv;8l}n1qZC zgLVTfy5(p`ohkIm;}sG*LqP#l{~)0586UeHspG7H0Kl&#-2K^~CwUaci(6h@ZGc{^ z_K%WdZOYl8N5+-uJEh&R01q{+*CS`c=^(c9uT?cs5y28tf#biv9s-Q>&&aj{qd^2X zJ#YW~^vMtCEJ+t?U`a3L50_}jdSW!oPPH(p+rQdd`Z zbaJYLRz?~Qm0+g7ZcY)(%SVCKTGr~u`;)AsSQ%I^LemP~+c3!%o0pz$R=0Y{ue_5V^?%Gsny9PuQa~L_>qy3 zTLhQ_lG%s!wY@Uz1|5c!oe`w=FW!A!ji%GePD@p7`;MEl?mn%)nto`k!Z!~H=90-c z@U5C(i;LobB;H+d`)3ikAz^w1({dPb-{Cd8AwK>rFu~rv=EhARPVaZYU|@v3bU%m$ zhM-PNKITvp4<0&nW&Q{TeDr@Jr;qu9qWOv{liRocC(%G->SO%p&9^(7k!$JPZSO6> zJ)gXYb8>V<1|cc_h(Y}y)lG!^wlu%FB*k)J`7L-y0pCa>XC zr{)uPrq)Kz-_b*TzckYOAH?HF01+B0{Rq$WKoL5T;XHt|BCzTLoFKm8DWF#s=vO0A z^ow`9He6OV8R8b?DOU1V0O#H_I0fEun|oqqWo3*9qSUr`m0D=%IvEDW?@rt~NiF(3 zU(gLL0y7X=JYc$M)TeXd@+{D23emS=-?Jg4(ewFg@jFX{?Q32;JgBSrq?n>7cwb*= zvg_Q0hY_N|kP*?CUw|%`T^ad zA=*d!N+Jq%0bJ+*`2K$R5wZO*5n zaN0Rr@~(4WAa=d%z2r~JtkK*qb9v`Qi98zo2MNULeC^u?sqR4Md@EC2dopSNcCzkJoS^)0N|r-}QQ>zc~2XkMNdCbmBT}?yUcEx|vhV(Xq|HJhHYW8YB5j zJg^9Q;UJ#<-xpxUpkqURxxjlW;^dWI)<%tJx?i5{Yd-P)FBedGt4Z_AHY~30j3p|Z z{3TQp54z32aaiF0L9hX$dX00~owH3HvV0YO!g-j!B} yR7`0R+xdUU-{BdJcjImN!;S9;>fisgLv}{clDQK45B~?Z*&&z! diff --git a/hls4ml/docs/img/weights_hls4ml.png b/hls4ml/docs/img/weights_hls4ml.png deleted file mode 100644 index 3b0c244dd87763287236179b6080860aba5a5ab3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10231 zcmcJVcT^K=+wN0@5*C9qC12 z+# zfiPpWF-8ywvwCHxT!F;39Fxq`My6$w_Z*OCJb?k@ffmg`~gafIx(t zu$UVrfiD-wf>XKgrfsb*v%uijZk+k@0UH%jblvQhttK#Vx&58fZRLxI$85?PpV=&@ zO#>2={N7I%mLOWjI81I40=Z{Pa5 zUVxwek?NrAps%UQ#^(bQ(?Q&ZiC+y0CM{i2KK$xCq`SMD^DCXRv-9h3|Km}P2^)jX zlrr$!(6*Amcgcigl4|0$#`2p)1TqzDH)0*?7zI?W(8AsHmWh={F#Yc5tovEKPrgk;TN?>7a6eg=mQt&WT~tx z@qEnP&9-FE#dF3QICAf5!mM74N1 zBArr!MxEx1uWxnnm-@1v`=Ui4C#jvbI z69S!5uL{B0t1oBWH=nJxhP|yAepC5m5VD2AgnNDw2gc|>_|UC9Vwf4Fx41C1?lCiU zpN;&riIH10pYp`p5BVt}9DoDAu}y6vvS3-+Q4)~zxr0iu3B)1YKGH(dALe~0-G#r5 zcl)9w*9ocz;1$93U`p`yZV9}X!IOakOMxqq@CYc2X$*TRV*kR>i{u(*^NG>^d9n6h zA|D+uw28qzC71cbeTS!ux2fxQDE3IRpOkQOw~^Qxi)&!nMLef!Rb~UBm`Ou zvq0=~2YPz$PC#oQZseopF8zkn+W}M|{CHs`k6X*ZZ7~njc%nmFG=kD)0KnNx?bwHKV@)Ip% z+-N*1H(|uuM@{=~GE69LPC=D|xkXVcK$poJ$^#7#lxVkLN?aCn?n#F;umUr4&-Y=> zsW5kIN5ldmE4woUQF~cmiNJYMv_{&-MCw2>OEscVcJX=x)HqT%l2OD#N#er>%>K+! zVQbJXr{#mppC%HE_1x(~wfZ%o}siZFeHT2%1mRE=B%6nuT zuASMojK%lC;$VV_iwzSDhj|WaHWrHqiBWlwP{5k3IG(cbtE!Ov5hqnZc+;bx#T!1{ z@j(t?>>$!a9?aimU12CcSd%GdjV;zx$o&@2SJ*-9^>^wxQY~lg!aD6O?^IRa_S`?4 zHNF*qIL#kyxbR}~$i3LnIVn}s)vM>=V$wCl_PX}+?7m%Iz$LRa$>GRKvY@?7dBuW0 z@{K6jimx>d`Q{Qzj*=R72lIsu$rT|T;nR41qt_EVh zKtoOj`jiZLAn#)_?l4t-f2x}IIjJl@Wb;#kERf`64h)h)Cr6#IHG)_^)aZ;CJ%KQQ zd?SNm;Gf(Q&aJ0dW-b=j@uA;0Mi)GQ1?PH}>r!>H(MnTwR^$Vss*M&dG$-}&;&9qf zoS(+Ex3i?(AAW`oYjBIh2>I*&cW>j{&Kcjujrp~%atuelF)F@m-iq^hXgaHJ_^iY* z{OGF5b+S@N$+?MhNW<5k8XzgJqXQ1_otMgq#4fyA;6wU0Od*9XGYgEdw zB>9%xB6)J;u{-Waj(w25qon1EiV&ZjVnWdGuey(Ba#=_z1JzQ<`(l>nlk5>%Xt89O zMonv1kN&SFo)sQZ#AcGbZax3t%&hTIFm*XU0YK{~uWMjgbgXo%$7}dLoSYN~pz8Nu z55*yVY~70r4p&2LJL+VqU)h@8mkW97&ZU~yyM1w9m8bkor}w}j^F_SsaMGSzv_V;a zV%o&>&)Jk=jnBNs*hgK+rC}y`J$t)wi?|)DyvL3_~z*BVfB+9KE3wRsXK!%t`kd6=H&x6 zhLq`$TS1}x!^3l3{VTWh9yx9KL^-7MA_sWI4#h0Dgz!6^EQ4P!>=|NSvLg4OKJE4Q zj=W+Ynv-abf_V1wl7(>eyx=%*ZA=lD59#4i`a;2vO->C}$O4r646?t&fR`h8snd^) zu(5j+4_bx*W@*Fm_rPi5kB+7Xc86ViYAOfH(mnDi7sO~)!VjCGdrFx!%ZCrI&@_mrwf9CC%7fVq2U52X&q_o*6^z?YKW#R=|Mv01 zsUqIB$igUEYqn-}?-3f0pyu#@Hs9erR2^>GW+7JVgi8=B%~jsLZdoy=x?@MRN1_+r z{kk9QqJ}Ls99=OfXHCs;ZIKoyCv8kS;l3vc&GonQezf4v&v40V`TQ)uck?v+pd1k9 z#PZ;CaX;(O7P^>UX;d+@sBcS5w)2qLn|YYw@WP56a&<~((VfblU--51w&>B#@{D&! zx|E~PQ`hJE!{jab9;gl95SY+dUtB+8Ar@pth6xOW!iQtOwnc8mp-SFvR!q&ztCbdN{OpjeM0)&o8XJ_3Z?F^My=$Nl5|9L10*AAUlDFdrL4FRmRJ&qIzUm zj_~Fc7;Q3-y*1%HkM?`aW8ly7<4*b3_anzO){ca?2@5x(qdRy%?%c!JBxZzN8-66B z&LZBZI>z#-HC~v1N6pDRH@l-zf|7?nO3p34q=c=nbQn)?;LLA)|4`N&hf!8`=-?hu zjg;b2x;t@em67`V4lmPXF>IfpuI8TEc)QmfChO;!RPJo0eAk&D=bx*@2DnzFm^$tx zwx8|vWW&9GeTk-5o(D6$teEV7kv8jOC_f>az9Tq?a4*&;SLX7DwDLOyxgaR55*R}lKzn1%Wgl)U7ixY23>cZTNlSvlm|%-yG-&5%!loC z^*gbG%RGxb!ufKWDF<9iPqP%>IR(};IvppE$9zY=$)WpgJdT?S!y3_doSm=LjQNk| zAm8-23n(c>_GGDKSbH^AeV6R+7S%Eukf8fL*O4`K*kXNSLrTG7c{Zx2=)y~&n^RIj zq5WG&qL%nQXZ@*Yh3@X67vS{YV+c!kx1w2JN%;H3a9)l&s=~;F`HuaW!De^bRiP_F zlG5`QhKCO$aB&A;Qs*D8WGojpWFeY&nxrK9u0Hm?q@$=ozJk?t)l<_ovRevp{n78A z?CZCdBO_hmZ}wTGEC3#@(XuL=8y-K>JgLO8eaHBfEB`QyT1Hw`%F@!(EA{P|WFbjx zOnX@Jt`62kThWf3Ni@?|JarM}$vM-;GW+^r^F>;_$?RQX6VbNWlx$giFD5OGEpPgy zD9S)^uY>j|G9O4M3@M>Dp(Q+?wpeReFew{l$Iz{xSpRTWi*=TGu-=fCrf;vI)^h){ z_m_{?G0Ji`ci`<|kI$FW`4^C`0Xp_kq|A=6@5?V-rSr0H+Ce_{_e)o|TLWVRXI5h+ zzxPR&pH8}0&cMx~7)v7`Q+*TucGRmX@8gPrC%i`Fsj-{AOG*%)nOi(vSo18}Uhw+xy)>|>?JtYhPi%#Zu4D6kqd47E^ z-a<&tyA(Kg?amF)JeeY3`*sp#;r%!k?d?d>LNRbDQQ(X z{`KMJzU;5k(k#_=eCVB>DNIuvWB=NqR7_LCg8bepM5bA`x#lfNa;f~J@>(Rgsc;?R zry?j0hht)@+>AccoCkH$&%4 ziFo(!#t}IG?#ttQ|1R0ZG16`YxU4IGrFG(WJ?FonJ9b`sUYt*v6uK+2p^h+SJJJwBHti3 zP3+(6=`PAV7fQeBNi8G%?x%93Vt>ThKLb^B*D-OXpI=%MSES<-@T#DQh(=1YL@+qtq6w{)QTpi^!?S_}=mFDTz-y}B@&ttDBN@b*3CbIee8c#@!jE?+2 zV2EkylyMEGHUiw#I$%EId~I4<)nosMr&e!KYk!L*1iJt02Lhtj$2`G}+=6S@a|8-| zW2g0q=OK)cprD{uj&e(%I^BZ0w_#nSxU)m|dJrX@%vvATJHNU+wK&*IUH9_x8o27b z_zFbq`@mQE4ZvC%_}ejIv!Ab@k+dYM6MJ7&vaSSTin@Bj8NJ2JPHoG2TgFU*c%B!j zWRyZV&oUSmT|IaNAl9bPnGl;y5g(qF@)-ncW z_=3uo7OO)m0lJGq(l@5-Vkt`a4B;v6Cv5LPp-bqH~$F!1QXvA&d!dMu`!(Q31+v4u*Av}L(H{C!cWN@ZSXM{IUb|pvVtw5O;rid;X>SO%L)7~IRg}wH-jvh(rieK0c&Q1OX z+*gMqSBE~jVG|=_B7IPkhcdem>MU*JD%Aw*6OjbMp#+y;Fvzc##4<+ELsV2$98Zsf zifC4PHDI8s@D=Nj_j!2pGeX}?D~KkHKsW@wbo|0oHw2Z#J`___Xm$r9YOp~b9p!Rl zmUQSgwZInJHjdZ{30inEJ2)?r z+HeM3+FyzK_)jP|8&tdu8RUz&>g?h5xq4HFdrNqovBh>Ugo+-6^HVJ!)Of4A&Dpt? z9n!4U{LcPE8U;*q^^<)clY-G>Vzeo?{(xlPD$*l#XtUpKP{))m@19y11MVR2hs4sd zpAu9eLnBkP)3Y<=z?5A9m#LuqC6hOl+;jdon@Z9EJb@%)?{nWKl9E92b6*n-6Q(5| zikj>L7J+UXwzUY6{&^bBbdL5@of{E2T1E= zWtTA_8rH_ljA?OHFBQ;-i8J&*A?)>kheu@3&NM%~mrja_Oo64Ch-1hwd+2pT!#)N1 zcipYVO1`&~ZVMIH_+#zgTQcomMX6?R$@A4-k>_SxGdb43$&w~_Da;xPk%&12ERpZrBu4rZ~2LOI~wQ6 zN>}_*r3foMrKvkR^3f5n8Kg-sT5%wJOO;i}Yd^RbOh4+~G?4$~)xzQ%}LOP^EjU ze8a!LSOPs_(Zbvgc}%k@wZ^TpQu7aCF_5c@^RCgsTC9)%K@KINq(q#7f#K=t_eD8i zW|p<56j^FS+f!EmgWaFa^I%Xqzt7J*dV80FzEUxr6-?m_2t=Cl!v~-4r25JH!w+=3 zzwn=Ml#na21NAdw*niH~Y@(syyQEboNTvG=aK5V<=igsDxc6z0XUMyeFLW$&id%p0 zr-&O}0-N8$L^G_Fk>jo@F_Ip%>XNc=Al1>R1eS+f?C59 z3MbD{IkD#2BSHf^jM$@e+i*Ryk`?757?7AQRtwBaCS@jWCOvuzrez%YhQ)jF)-|je z*gvK~Q%LufU>PU}<}#*K5xf%hBvk-UsRNkBT;o6|O8TC6hJ`+ki<3ZTdr)d8jOVgH zxpK;0*jvHd?@Bq70zH^P>Ah1)9Y6{4zE{;_tQABQ@|{Mzu+=&=RLAwH=zj4+lN&-q zsO*kL%M?)@VXK}D%)qk^61|{7k$e4>JdpvA8fN2(zf%6^D=r_NTyAr=Yz(Su)+u;Y z<@W0P^0I{Dzn+A$6#ZdDRZ)i%owd`i;Adr&Pb$D_wTAO|Q~@3ca@FP6_yuTub^+5CVVQ(n$`z=Ui1)&Q(#o967Dg@(1Vzq$@UW|A*T& z_-O(f7C+ObxUqXaVAAJ$?H_0}_9u`6(?3xJtEK;cV%nNjGi&Q^QB#fIFiexn+*`67 zsNBssuDgn2JuGUHOK~-COUk`eztj7r^z<2<{j?`nWFoQuRqizFkRM$_=wgwCyAMW|Hi5cJCFAfVTgG zu;~9G@1IUeTvw+e?RmVhoHhJq!YB&KVGpha-GgZ9&Usj>gNJrz@yFkJ-(qP)M8E?s z3blob(w|0@^!7B%pbjAyX-3r7KyeadLM6oF&obz+SW-kdfZxBm-1F#INS_kg1aPO{ zMu|rDAXGCYjIQa%2Azk;K!bvV3zMbhF$V_+pt{IbqUxT_ZY~po?b(uH%NIDG%czr0 zuu6Gfu>cZKAz~<|fm)u2@6n4F@z&HTz|e{fbjNHtpwGPt7Gn))F`)(tIr{Aq5}Q-U z9MT?H#;Rr}6CHX*yB$Pw}_a!`UUE3`GB#ipY z{nI@~ja(@OWH_#~ds1Pe1vy((k|#oSjV<>IS(f#kl^@cl9!wc(#hH zhQUux$90}*k#$3qn`fh!$r|^_-rL(fKKGW_G`W(eYk!*3d{9TR#4PEk9T(jo>Q`Hud}n6b59dY>Eu0yNN7c5516!Zk%xqpXzz4ABd}&0maK1R04><990PmexCE_a zJ;1eIr(Q(xd>LGOD}vYNMrrV?4%#+eno~7YNxyP*N%)i zmw+VrF zFz$MZ$2tqt0WMk=-=ZV$;t%^s_tzo{BklrUWIV-J5ZE!=26GNR1@!rS_p)z>2zy&% z+#%0>g$;!gf^aYQj?%uKG9y;XN=l=mN~&{p(iys8O{d?Ae(=G^?y&zXp)@b}yF8G8 zW$EJ=Bu}+?*5FKF!u|-`g?4WD6$H;8;M}rrt z_BIX-=|!SA#M2FEZTX|Lv$>j z^#;C4=L(7!Hp9*n4OTeO$%18s)UnQK(2lg2ri*|V+sy4-m%o;DUnOlcwmvKv^bKj2 zY?)!<`AqvB>)`R}9Aj*z1Inn|b#rreI2`_O$LmHZdZjKoANU_7;Gc!y4;9G@hCka` z3KoAGYS1kog7d%pGl1kSEy}u_b{kY)Ya7nnN z_Fyv|C!241?XP-H;C~MtVr7^+5^Tl|SI(VUK>zxcj-i`DPS>93Pru9m2VbjH#zio0 zgw}t!hseGJ(2aIv9BuitZ<4RM=Pn+sp4SLviit z=sAx1@?8Z<`JN*~{1;_vfz5EAHfVa{(Tkt&HGl%m&8cHg+=P$AxFOsp-ojIF7rSF@ zU|&vg^C2ak5n}OVu+PP%z`nh3B4%H*Otj}HO-4K$$c5BswoJeV;d~w>9C(^}V^tq- z8&S&%L&tJ%zjS%NR$=Q%`aN3__$+|EGv-USrV4$CbYs(r+RE0Dze>9tronEj-`K{2 zmK0xdw600*E)MTM^~^waY9$RCG7Vh9GuqE{uJzJ;7F8_eX=#cwoIh@OnTa_%f0$cd z=1U2R(lt`=@9t$hp?1+tpR$_Y%07jYVlIwLcui@YC0ihq6o08pv36o;J0 zj+}U_5SwucPcM1mtyHWMORWw0LHV9F7kQfvNW|~c12=%?nYjtbf&`5p;8)J%?&)^_ z#RxMWOh&6f)-_vln7cvNB~UZ*0#W1Ro%jueq{rN=6j>xM{PhaIp8%Ju{xhy zxjHyD7HW-Irx;iJ+6k~BS)h(Ow%`d4dyEDH0N3&W-1X$;Ka)sfR6WyLu2&6i9{wiR z8wNJ>I=1QrRl~U&|J|nmjXCGXfq@SIaQ#m+Ed#Q1YrYtDvr?%huQ66P__);zc9+fnjK!Z3`Cp@l0ojw-BuXR7ynRfn zK!@6rpJ=qnTbYhjq>wa-;tA^|uco+)uPmmaXobYyMBk84tFk)eFO|3^G%Zh~O`4Zn zU1v%zd>VJR=*m{ZRpR={y6R3Y={;Be`;$c<&^^R;N34hdSb2uNPl_k(@lSfx-7bYX z!!jbG(=WKGw2ovEEZC4j@HU79-v2HCh3Lh#AS#g%i12nYe#G;bVWdjbBV`$G=b`^% zgUchB$O4^k^>>fuh(0Ls7;v@5>>CDUzhvSb)gC((Z=nRkwy%Q(;W@oCNUy;=^TTX+ zCpeMuCeKFU+jvZfRA%VG?>m=R+=>=&kQ2cB_KX?4i|ybV(tu3bMq&@Mp9!bOU`B+$ z@~yZ66@U@!g~$~%S9m&x@m0Mk@Q5KK;diwnBxH=R{0uL8Lhvh&sZ`DJ zh$lUY5C*vIb4Yxc7USZWz`&_hRaf(>k>ta0?%Jy;PcM?EP#FxH&b+|5ACh{t57rCY zOj_>)W+VD^(;2^fe5x=%U>_W&Nm-=O@Bd1Fhf@4*^CkSH2<-$lxR~HmIfpm@AxGWM zMaXDwR0^RC4DTAkcs=sIndT=Ph@-BQl1Gd*M#mYsPq_=>w+I$8bG!RDW*oHBPuZ}% zWJ9K$=jpSkQ<;|hG%~&ULQ8stB8TJ7#E{K^fCvHBJ3@ZJop)YUbu9|VJ6H9X+lvWe zoecdBq)-N83NNL6{{3Zj&oK>+=PWcgNh7z^_M0&n>#?K!lu_f;ti?D_!-p~z9zT(6 zLi~@P=`u4S+M+@K?D67YWMzz)W#?<-Ts-MDDJdd_W$Xwe-M6OxR=!^pH5>>AC^%;g zuYttF!`YFKkJ|Gs3qdng^d*a4k3Kg6_ax1a$u2T2DV4Mgn}GP!`^OQ&MHSE$$!R9O z5N8Sp){8+-qh(OQg|JIuX6XD-|%KzbK@)6?O?!C2A UATb|&2oHkQFu;`DwEyLQ0k0&QH2?qr diff --git a/hls4ml/docs/img/weights_keras.png b/hls4ml/docs/img/weights_keras.png deleted file mode 100644 index 73707f63162c7228b83d008e0efc550894c6c7bc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10969 zcmb7qcU)83x@|}(A_7JPL=Yu_C`d2T*(xnmMMH0b1nIrEWV?|lA{uI_mI%^&3lgF< zfgsx;y=Bv4q=WPTce&3w?|09=_x;{|f2^5gWzMWM=lsSuzA>_%+%_}dJbC^k2n6DU z8|qqsKunduap?praAxqzWe?y*Em+Sw_^ux^IP89)8_4v2u)nuou(yZHrBJuPAP+xZ zMHxjIrE8ZS1_%2Gsmsdx{GSdoeu3_?r|*#oz>l!|8`=bcKpZ^34kl1m&N&cBQXj5+ z<6d~q@?^vZAq4Jty+7j*X|el*P_WpglQ-^tJMl*MjsQ!TUOSWC#0~uisdo$lvYGU{ z@@G|lZ$w?8_9(y-A(AgDPcYwR{&YqK^8VZ(*G|{3F^*IVU;7pwOc8fN!w_4CM5l@a z!X#pF&>Ug4GeEny6>%1tiNg){op)n%Mj}=C%z)O?`2WAvxK`qbK+6nykC z7{0pLN!Z-~^73c7Z<*iO{-{6j;@Be!@@NcOo>A-3>bl4VKfdJg8ebR1&^&;vH?+BN zzX-m=?Yj30UqKtM5sSxnyV?(Di%Wl5O`?S;|0&!n@2Az-NG<+))3VE&8XQQGG#T3w#zj_n`N#xPP&>>Fq!ujCZ@zwLSN&zg4@}>y&W$? z2T(ZD9g+lUmT)NMdKhX8u>7<{+Y z$6PMh3lVG)7>a=gsL44{a$>b!AM3BPO>iwuDfviH1Dq40kC~~M=qMhl2{o3wN{XiN zC-7{#T7soe$&eZpXo`cuo@te-qE@jArW%vxNeWbcl77ULFDbWdhGiQh%p}~`zmxE3 ze=5VT;>Lb@G@C#7+AZfp%?fB&bD*~|%AC6x}K90Y!j?YINAmF(w+lXmNnDgNOAK%cC3t-4B>}W zT3wI?{*~+7!)&PLprr)fg@%b?(%9Y}BplQzE~}%_`ei5O%(!6I^IShBcUspHmp5yU zk-D}YgiBz#%6`P1pLSSvwvj2^tvM#Wkgd{Gq}~-3w4uNcF^3TIoE{0HXm$IH?(s2I zvx}b>b6LC%_sM&9j$Js?!dr|7e%UAD*;yBc%vuvMT6kv`&Pq?_W4~s8%;SIiuUFn; zm9k7-V6TPNCX{ViqQvw!9p)j((7O=eCMP7rT&lzjt;a+ET{2u?>9NS0~ zN{=)|x<^%_wik>Lvu7h^*5u@x`(pP+t~QzVCbzJM`!%a(3m@202?ZV!FSI|0!eZ9) zPcg*$$FH>Frf{^wyTsOj`+|No8zW=mFd5VeQIUvw5i7X6E?40& zerOZ^-0Mp8NWE%-;|ss(6BA=2(-Bjf8_NSh*A|(~#bcWGob$p!YUr0l6KTj4c4xrA z(COh*_N6=2Ly{Qvl11R1`#Qvm)6*qrDKw{Hx3@~AN48HBc%-?b>S$JIrv`p8vpKo8 zVv{k$wvrX06L->@BbV9?YjD<@McJ|2IF?R8TpOe;O*F6wbp6V1RbTCI7 zdq1e{1wCNL%RZ6?4Kc+U8=X9$4O3508C=?NM8kbb?d3}ON~@_a9;ANsvCmeXAJ2f{ z`1tm@?%wFLs%5hIU5DY=8~cI_p#2L?dY}Ge-d^2v9zo(m*5|S#7x@N&Nxc8WScXfN zz@O=$4_S9_-=C`AbI!HLxNd%8^r9OYM#|x81X~1MIb^0b$46Xb<>On?5o~Oobl1 z-SFiLdTJl~Huj$`=9%oj@V?@xynCt0?W=c+(A`Y}GWc9q4BjuUZ%ivJXre5yV%~L= z_pQ{%bm4gp!~IKFzpe;ElWEKoa{7e8)?CE{d6wjP;ZZC7Rw@3}G*b)DYl zV87#fD&|&*3;cOeghOr~sg-ra^DKXrQrG1ArXIm1*z@QBhQc@DgOGmDFmCZ3{3-hf z*RL?jcy7s>Tv?gDzvDhd^g-#=vajB>SafNKK7d(VAL-pV)HmTy`9fEA$r6V4dsY#3 zQV4UO`zvM+xk^JBy6x9tGc5Q#lsou%Y@up+Zm%yZdA@mb*=#$cZIZ;-A%(U%+>-4i zo@*@gm>hu3C`%7bc2{YNuJ&4v%TuC!$zoc(yG~T%`|-;SYt z(oPFUkyO*9{3OE6dG?YA0YA*7BNEbgSk<9Wky2}_P0GL$^;C#gkJ7#!Yum2AsBOVt z#mn^Eg*hPOcq*UchmCAgf;Z}RHQu&@YqF-k>*=r_=}|1P1=#(;xbx8Dq|1#$z5OUr zanlr~lyAJDl(ppOne{NNZJ9>MZmK(^Kee_3-;LcCiFuW$met54=JV2ZS_kIqnH6)* zBOAHkmJg#RwUzqf51V-C;Fk+Sq9AC1B3*D-1H}=fa41D*bjC;EbW&uD_Tu)!?Qc;A z#9K-`B>Ta6(sWa`2w{^Sc}w43%pxcp;>$V7b!>+GteR5`RqiQg{A&8=_}XevVGy0Z z0fU+fS<$*;9T|^4hDOnF%2Y;~J=e92pX~5OqbxCx1cqKPhAVl#5S%bao67#l6dRg% zeQ0O5a$_;nJC1RML{s$!-H*JLL6 zZ$}?)OE^69*D5MmIZIJ{E3$Dkk{w)5ceKDd{pR^GWTq9}U_>ydTu0IIiIFR!A&Qg6 z2h$OUXGY8{f`-T9@v>5_OvLlhq>Bz=A1Xv=pcbFTQojAJ`uHcy++&sA3fKi%4Xk5^ zIDUt`@8fcjZcTjlQ`_k#p=Y&hFVoxGARs@acL6`N_d0$ySnnulei+UVl@?iUa8lL^ z#bb{OHlrm-)|hgE*-Sj)im)GIfXaWnaP`6 zrKSD!v`D-!PLEDAoTfy{z=#9Mw5ko=cE`hL?J$un(`(HyVtE?=__n-VfU&mmHAK43 z%IGR8mWVfy$mG)WoWI-4OL{`t-qmn&mU79G9B`i6BUNF425K%4(aoMiLiS~K{LaGB z!`0^bV11@Tli^>egHx!f3hz|R`?`^ESY|T9j8w7Z)?vuqEie&?xS_bnkq=Q+tg)4R z$8wo|+TsGP1m@Dpx0x))nOt*U*Tg5Q;p*C>?-saVMZr^czIB`>!t{RgvCCz3GI0c- zJ5G7MV^Sp?>$mV(Dt+&S&d0(fq5eUM5mBJy;Sv(LG|y728jJH&b+KP;oPE}&VcT`V zT}Zo}R<-gyivJDX6)q~0FG^R$5@qSh*z%XP{J-fkd^6Wqw?8Lq*-`{I)Xg)3qlt%k z6TfHQery>eSEHUS|8{^9!DBbgW34M>Se&*QXaTK#2VGyngmV?;gxWO`2kW5$T5|bH z=N0j`_DKPo3t0VQ6~!7R%WVXv8RF+R@dR_AZyoDS`3?W#%*bnOy+qo}5xM+}~@w`qPCR&lA}6M4U@48M_1%~V2Rn@J5 z5%Br(G*bfh%t$n~k`ebQU_(JrsKw;@Bm5HI2$b{{Th3z1FE$)>9|Ji`Ke1_d-trV(&s7+!^U+7qwg$vG$Rz zm#dAeAR$-7y%wU6*nb zF42IC!{a-!mbIA;Mff1`<;!&Pl!2EUu6FyL*w#-!%Gg~4u6DBIyCuHN-r^CAaNHbI z!BPkV)2kK!BTC)2eA2bA)X~Tb9a2(iMAUMi*kFBAgG=$~7S#^Oc2-xnqOsAzt5p{p zUQ{lMJH@D2^Fp|BKlr+0jnS26j;i~bZU~S}GRBTc?C{(Ouj;$K8;>l^I?5`i)?$@Q zGK$42zec_(H5$@98`b-Il5}OWM#pj?z_2HUzE0KTQXS6>61it4P!c&f!;Bmf$+#$h`VkZHZJ7DL@56^hQ3m~JMsm_5TJHeV>g z*^DB!4{@5?e?r=16eDuC8fVk9_s`x@M%1xvhfz;cO??(xF`qtPKq4Q_RV4^hZP*U| zKi4i2>yBS|499pK$0i`u4nJUAtMMs8Oucu-OkV~!<7EvAk{K4GT$f-gPcTv_gKR69 zhl)Q>Wffj(vzBIb5^lF}&|nzol#D}Irghbp272oqnmCVxyL8*Vodt?A#YtF9xb~Vf zwUTS7Mq;f5)5vCRKnZr1sRU(8d0h54*WG2)Kq4Wf`3{&w4qB-hECdS9Nk4XiyN7&m ziZ~8EJX>R|N3=0%udsQ=7WRoTq?ugXQ_<-(Z_h_o>fC zI{!0Nh9Qb2>BP&Gj)N?a=geG*Cypz7OOuh>F?PaH5A}-VO&r}N!zKb@TRDqnZ znWGtqJvaHsr!v&n^`orJ9z$oKhss1`ehxL#*^(zjfTKe4$=mFM63OugqEc@;s`x`b zg0`7n)Jjz#@uaCK(Y6OEFvZLh5BEW2uJ!Ibwxt~Fob-HFJLjWM68-1vD2dv5ANn@W*%IvwYebusjH(zOfX~i5iBqcJD|qjqA5>k)oY_9fMgbV~AS(nZvVva*8$B zv>0gD3Ta$03^qgT^ns_q#Vv?XXv0Fc1|(yCd=q_3+=ttq+dH#AdHGX{xb5Tbp=!JU zbN=X%e)rKL6fF2943#Z9vl#DTeriS#8aB^2CXDAB1_%_z3$6Mvc(U2S?A`NO!?BTw zYwcf4Y(IS;{L>0C{^iM=rHcyrY(f)_q8@FG;}gy?)ISU6o!A)!|KqR-Tm+tP^!{Ee z4#(M8%`*bUm$!O}A2NkE;rr0$DPKGs#OXWODGB;j(k;`nl0DKK##)y!;wrkIN}AYL zsA1*yw0~|vI4$a8`PWLb?X~K$ZgKc3=!=hSe*h_9Z~HANax#%ZCcMtbs7OXmdX;O` zxd#9^iRSXgU{s5i&XK)?Clsn2i%r3bQrW#lnTSN5Oq&D%yrs51L`)l*>}@guR$P4X z?_U`$-%Jh0;N!7ozVd!g@ik(Tm+0zn1LeNydz2<@Xi6~h_zh62NM31XS*VsALU@@w zbZ7bI+FF zg}=8Lw8R{psl@m;fNuEdDo7UH*vVphqU=>6nela%OK7rDv8P%Rlwr_b2Cb5mZT4Ed z1>CDbS`1eyPg^>oJIi3mM~&j!u!y!%cxiXvR|J< zMpUWz0}c4t$wtUXg8gQR&94y_?_Ta5is?q=7lD_j#I%nr9f5{lQnKRr**v!+MvSzNq4=}9K&r}@Fja#QOhdcWC zlf+6KZBkY+uvOmY_Rme&^KPu!7v43;XTP9W7y0dsR7ky ztGUubQj0%`0=GWJM4>{&OxYH|9H=HN61=1PM_i>VQiZfX8sFIH|K*1m^+ohSE#ufI z>*O7yNVThI-o>bv^WkB{81}U@)ORF)I3kOM?8l1&Vtn+KfU#8359t!KEk$%)$>?CC zOayOyxR@5(u5hKh?nHK>DU%8Y)4b`jX;@Z)Bgq=_$1?j{_JRP!kXcQtTy7LTDH@Pu zLzo92F5R54pvdfP6G?i@d}La+8DvTTdYu>9BBa{tAE&*>W%04M3*8E-5by=aSLNJ3 zj^xYCm0o*#tct;DQ_tRshl#=SMN)1=ZvLUI?)a=*M0jIFoJwRoozR%QU>3XCn(V4@TQ={a_If3!Mg^%--Wo62?cMD8u3xW*6Qo_{|mTeK| z9ZX|QAEQC9T>%a@W7=ko#nS4|$G4F^e|((p-Q6FZ6ud|MbC@i!wa*n}YS5Zp_mqC1 zC)D(GV~C`N)fS~6G_j5-W4FN=Oy_V_H_7(Yh0L8aQiXVK8r!}eWe}^K?RVTP*6^iM zB*ss!X8nBa$DopeRX~GrF|SrjQA)rxo@<4sWpo4>Ve zqMNWoY{C#n>_v5eZ43K=+5?;VFSb27nDGQ-Wo*l)DiF@%FT5s6o%rxM+CsW&994sg zBfDAFKg9^zY2g;pAz#YmvL8;^7Uu&@SP7dEfwzLD42_(lz)rn26`%!B!$L$hXjOu@ z-Mc}17_xZ#0$aq2GWWu`AObGba&Ed(a$Mzo&ymPx1nFav2393i2|+2?J9vBd;(k!1QA$-nc+Km2}P}$ZKLL!l_ zGkpJI;lHEkSGZYtf)|14q`Ekp=`w<*!3{>4^+%6OtwZBH`dg=J=@WBjgplv9HnR*R zRZ9*;UMxU-ed@Nexj8+pwa~TNGK+j0$=GORJn3F6J!n5X42xRH%pWIrR@5e{5)hVv zGPp@$!TP5EY>M@1)1@)Rb#=|o;gZZf*h>=uMH&j;h9#vnHi%gjDk=*4yM>9UJS!|c zNs535$fEEAC_1irs6NX}B zniaH#+lRcPfzo41-zE*C%B1?b_qC1b5-_uOTCrLgR5>Yx^cOU5Q|VLXpUo_d55K~a z+wWV7)ENVoz^}yN8St+{#uyPCNf|l^ zw!CMHR1d*mc;o+iGvxXm)Y8nvZy0%BI7S%mms)EZCfbiFL76Z8ClY=ggC@di2Oj#E>zn4K^bWZ!pX< zmGs1o7_d?N%P^W`aY~#o^L7dM3Y8_E6KA&0q^yv{-+h zf#*xDlZ-5-%2hF{lU->SV7z$WT&z9$$sg|77;-9*eA4&d_h4MkT==(uPB|A{A@SFB-7t$ELIY2;*4Nr#i#CAj2tNB4Qf* zs-u_=j3dulvoEDo7*4#U%V#hh|K|DRMlt(#t$&rIVoktR$AFPWt&shFxA>YYqIe~y zF*jNuI>jd5jqgRdoEVIFD!Qrh6}}E_x=A>nYyYXKE61rO|eN8eMi7 zCd|5!dLaRaORKO1{3DZpue;WriNL}d{|^>ZjHz>rWYNw28$WX})|9PRedNyj&7scx>F~08&ZBnZ5wXh7`fv0IYC*W-6uQ}DHa+H6FXrB*_`ltxFON|U&t6v&c zy2zsOc%3oQEABIpAuV50Reb8R=X~*Iz-@i-VkkWAF<>ToQ?p6KQT#+ezuo}&@YfjY z)ntz2Jk{oDGIPJsOC`Y3uWflFlTO5JXverWTT%n(*ijrD`wLEK> zW`{Cvv?N0Y%e*aRiUI?4pLyNeR|{Hb2i#r4*)&t;UzIF3&gWa?px*WOt&nVa*Uq%u zSfbTom~%-0oL8|VMZLklxI7eC=WGA2WC2M3(^dX25Khm#EC*);(e&FVU85P-s2$Mt zw*_>o1^SV?H?l2DD=I2|>s1cC?I(S)#vdf>Uh&AWfy4zWXX#8Op=|J9)$7lo&;SG zY%}a*TUhW2LPv@BeQuy7A)A4u5UW2P-zeoDsjCUPL|4i)f#sgBT7qj}hrlYy7=16V zV!Q{55AWNVu@G+NwHcmqS3nfdxMM-a#X^&+Z1qgH>;e~`RYsRoOC&1~iFj~2m#HM@ z_Wt>GQC6hxgr8X5=PAe%IJ$Hmh@YO!K_4>Nam^aD^(}c$Vd$?nIlG<_LgA{g z$gu90K(u~(Z|e*VNW=4>dN=W_dhNW}BQ5i0j>gw(FL+R{;$>oWwi65NpE)WR74 z-FUpti!Xj!FAAL?r9A`Ypiqdmo%M0F#m-+JFxKK80Rx+M{Eb^~LxRHJ!=m?X&GabC zBmja6kGp)iqOZfYm-K(d*4ufR_Q)S0*K_08h8!R^X5z3FF@%eX;$o(q+M|++UxxJ7 z<{p{Q{mZI+ry?y^Q~kg5uEZ)Ghgl#iNtmM9qyciV&-V6sjW_*IbZ9|Et2Rf)&T8f5 zH?g@84bX_UP;$OrF~>IZFpFY#IKV7Db=QySPg*;bvNJn#NW~WXrIkx@aDo}dPfW|L zbleR&Ib3nWOG|a7U7>*4vWnmgT4dsY&DfCQNwm&t2Otq$bk$G%_G7lB2!%Hly5JIKQv zjq!}XQnCS$0wrHZlC2f6jTl;F@=&$0Upe5>V?>*Yc$Mv13fSv`|TJ4|h zV1HN-m$T4+8n5+N(BBV%0onLJ_4~*B!58>4@e304)aW=)xPj6BMfj?u2JvMrCaxau zDxM3Yzp9=UgeUF}r~HJ#r_*YkM%P1(C=(y1TP5h&>RExZ57PiOPkuJGGTxNG$A+Cs zOv4aaU$_JF6uf^+f7ry8PlC(c7xrA;D5E7jJn_`A!x`nY{qH)#Ka zQ9T_lxQ2D?8ZYHIIF=?|g|5e12)wd*c3)$Gb{Knk?a`6$=-5Noer+a3>2OuZ8;-8q zZnfj-nBD2C6e}~^h!ATie=`rTI69Y;3ngV05QnmpGwIgNnU~-onoj>JIZ*7$ce(fxIVo9XXpyEaCgJO5z|=w z?h}k6N>MDAh4J3~V}0z3@6vG}dit(QMW=QBElT zS97WrlklT6>Xqb}kMU95S${KKdj#?srd7O9BTjnJ}wJiA-+`pfto+Q{ppb^bv&y9sa!2o7^# z+1|mZ*3S9s_pMvq%58Z2rRvty($`k!aJb%@KU4aWA^f&Vl4XELVEsNr(Y4e zC-k=){tr+5f7jZW!Xsbfy~N7i-pk!phijx>$gpV*7vriqpAyVm^nR8=%0)=`-XG!s zTq5Pmz(as1sUiSB-a+a;tRenVt}Q;NOjrtD+PWZl9s|yi@@4udsszcBQ~+J?7f*OF zbUme)i$w0!ryP;qLf1v%DJWyH78oqnF4P-&Nag$chWq~qto?WN{lCu~#*OuQE4_Bl SEdw4i1;O>qbjxo#|NbBGUaoHd diff --git a/hls4ml/docs/img/zynq_interfaces.png b/hls4ml/docs/img/zynq_interfaces.png deleted file mode 100644 index 9b8dc29c99bd7b0962edda2a1bd9b4361e9a7cfa..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 76029 zcmV)-K!?AHP)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!~g&e!~vBn4jTXf|D{PpK~#8N>|F<7 z9L3fCu6OB9y~~zuxi^dprWs7{y#l-yqQt85wM?uv$X%^N@8DutAa`esQHhC zgv5T0eM%|~&0bVCxX{qzL2Z)@)%7j#27~YiLU8+nXl-qS6eAMU#DGz+gW0Hqfqrc! zBXZO2NVZ#$Xf~lRHyIkWI+|KSLSkQHACO8LKAO7@3pbRZs?I5bF9L+1g}+sY2sENu z8g7$9LSiR0tgonA>9-YHRRjiA82YvlOa={xF`=!_<`=@JmuStu78Foh`AjaL36H57^6pj1?~_~ly5daR0w zMH6TOf?fptKIn*S&Oc%zPMkRs6NY7z2J`c)N&HC$LJ_z~TlY{1M^7;Nu{RAgHIQ(#}|t>VMR$JOtvH<6C1*6 zJ;J1!s7WtTDI!8SK|m1#l2jzxV`mNNSY1`r4AF>sq;88q8+OC%ZYI^&hm)rj;P{!N zF>_2l40>%e{r-unMi*+E+*rD~3Z=EpsH5%jx+Y;FW^=IDAA~;?Muf;E+WL|h)Mz8} z(88!AGBIgkH5-wfXhC+09hu2CjL3FibY2>A(-L4c8KUP54El{!-u`|y-u!AAiW~h1 zX&q1*Y-kg9Z~h<@B9ZhV(Kk@4FxMy!!f+Z10p5Bzw0;cDa^S9ukHOH4B#~}^!kVHQ zy!P!Xl-0RW>-3ZzfFwr(^3&}o%udFIvnL?cVNuTOXY@I#Y%HzCm4AO9HO)cj?3rlQ+r+2N zABECO!rPKaB>Dq8>lS}&T;tkEFsq4NT0QkR^`OzX?#zRcnPL-Z_7z;-0G|778D9Qs zC4Bw>T0=qjd~SpSevy0ja^ojacW^#n!66|L!)~IR_FJQIikVaelf?$T$xLLUK|w|` zZa8ZOW=$+0;^{xlz0XJ`9BIXWK3;@pKUs>1(GHlCMW?%(K5zLcNbC!+*BJM~5UoHL zaU;-NhYYJ4f4k;*955nBNi={cscOU*OSa;(MVqi{b15wJUHkoh_6f*o$!Eb{sc#I3B&`B;{Z~VRLys?tkSwEZJCr zptl)*w}+J{s8y7Y4(2ThiT(yRWFn-!XfzsF5*$$LO}OBwiTK0WGsRf2zQA6Q%KWw4 zamTZB&=}C6&6X8y4=T1i35op?N^?gAO^R|y+1L1SRWbhfpD)qW z)I@|r+DR}dT8iT9kmfCkeT10E4~Igq*ld8#j2Yt#@X)14BP%6A9NQc0F{zZ-H{+bU zUV&GgfHrfo_)w*}OXBAOQD57r0pu*fu?OVik*iLO9_bsrG-m~#`(in&s;Ut5xJ6S- zY9omxCT-2Bw1Xb9*LHl;-gWsj5{35k6QrSDG-O;R4dTvm$5 z-f6A4_`0eiWk0EhCoXr!l4Lwx(o@4pDT1a9TAHYeoqT-dHQoW-M;9N_z*X# zy!O>{)CaVP=&Yh;Cp8p_UlNqsO!~Ja6$>|3U|yVNu###g9)5iu)L|d|K3`Nri5`@Y z_!%N*3iG&{(co;xgRjlqG5t$Ch>KLdTd@`I%-amLEuB6-si8>xGQl51M6HJ@DI0%z z@k=zix^JQJ9A0<5@HHx{>(JtC5>u^6LXr3-fQf~Jr>PEKEZT^tKUy3;(lg>BmHS`* z9u`L`B9N|;62EM4qbaOQM2#zqH@;aFjqMz7|F9Now^gF0xe+=d6bXr664)z@SKsxy zoOp50a%`(;j2`J8J(9}&H6^HObfe9XNP97FFA0fXCb)i!8g5L&x63!d?F)$bjtGXs z_}^#C;cjVWM}IMYmxRPG4Q$Tk2~V9)7ao4=$7pQ#==MwS1;Tj#v&GO@QV>zGmNIzU zhkd~a>RbF>h6YyveoC`Hqs1FSUB|przFof1E;E_N5Dd49<04SnMt1;SssnpC>B7$x zJTpWqBGic5TI1_dTe<(0A5mS`C_Xspf3>Ux8tgPsXxV!j;*6(%DX>Y@*c3VYF(25hWqfu0x3+0WqdhjIF}9Hb@~ zgbK^xzk012o2y*-XlXf;tOjT)?_2}i{*c(e=&&K;`8kr)FWg*@O_eSfbm~DRJR{WJ zQhDn!HWhf|?q5Y?74VD_m;e1kRFsv$?^inK2Mcb^i7UKTj9fgC&=0%U}jxI@seUmT$+k4p1K!U_%r*_-mAr)S5j0I?h-MFKb z&v5&~Lij##{zNo;g1G443$dZ11t$IO1Kqe-V`s*W@IG&IQgPX60^aYc!iwv(yO26| zuCm+Zc~wLzKi+#ZMr9`8!S~nU+x0cj(#hj;9C+wg6QQB)f6dt}h$Sn@gg34~7(-Gm zcvvy`s+(^)2S11#VH4g_=@@#{PyAZ zP_(TG!RV;@!2oX{h#6xGaPMU&Aw(6hRS?Tg@OQ!sKFwdX6@PrEHm zTSQn_){Ko+E*v>7U8H$!!FDVwZW8?vExr&U?Z}70WYFRb3bPWERG8ASNy)CI&;@)@H ziuh>-$%1Gm<=F+HFs}ahPuN!D!Ir8PyuPp$v+n%{U#%uB#W=VI^l{m>wy9vW+A(ic zu@XsGys;dOPM4TAd@#*A+{UCqE%wm`S(sFqgNY+@Few_w49&!pQTaGvWG)VyI2?Y` z2D)hL>@$eQ89l?S$s=&+q!E}tZYahL%fi@UnPUH{GY-St|K5TlrWB&F*-ecK(zn0wFB z7@L!T^h6Ur`M<;Qmvbf{$*RL^H_XJszZ`>k_a1|PUveO{^c4nT=M%VtJlSf%^Hnm_B-!*Zw6~8M zG$bs61iDWGX(s$3@%tZqs=xBw&>B(M;KBClCK3DPlC5yLT%x5un99U#)MMeAB8l|nf$LK^64_w^;D!-5x)>ZM8ekMS zz6&Et^ctKlA~XsSC(c!t&g~L(C=^Zq(f2End)zIU^sDAQ*TSXm~Z>fsL?v5@=w(@R=GmcxWw zPaP}v&-urEA#j&$b;4xSVrF3~CJ%8S7_G3Jenw$363u$lwFI!HtOZuH7Q@r6ICM-Z z7C&$trjnMx<}|xmhZ7IT#;3Q>g3}!!ov=-iQ);3C^ZtC4(2Q7fVo$S!MxNAlYt)%j1uptsq1~pN$^LiB{-Ia0l3&Vlu#CGf`Tf zVsauvN-!HpE7A#J-ccL(`U6B*Vfvm7l+FYzo!`*pA|g;D#cmZEes{zYJH($&lgm#H z#4yV18u5pJyoH(u7e?fzzDLP2e*n^=tKJRIpf9t2TRJZs-#JHN|iOZv9eeh5Om0>G?A8TZEcGm*BqmC z@-HcBfSa@qE=4bCI4rQ&lsa+D{omrmhkw9XkIoYUJTKXT{8Wn&^3_fs?mT0x5a#R4 zn=$L2uW|fC-{a`}ze8QKA1MiXK{EVS;;Ha#o@3YIvIZ|{Eh^kg+7e?WJqMf9nn_4J z_2m{3H$LAkG^&R#n26EY3G^)V5l6{#@PSB0rA2@Wx{=;VS*;Td&0Z3IA!@Y^K2Z}R z7H;HlXd%*=GI0b>KVq^F!uR~=6QpJ&g;pKZS#+U!P5qarJc<13Ou+@j`eTdgC#HBjw{bR3`b8NL-)K>kXwL=q9+I~lb(UKeeMh5zOg_kq#Wb- zK6pi5mr$Bnq`|jEu)ef58oN8XB$Y6gYMYuTjvU;j%Y7PWPRqkdq)jw=0{Gj<8-=Nh zOC^J}mz!Q(F47%3Dg~)F0})3ULsPAyL!Vn-U$3r?eiQsk6hU+ZkwjU84|=-Jrm7Zv zzfO5UCrr)~+KVsHO554u9o+NwT6pN1Y>wMj?ZUZ_&l73mF}(!b6ARy_*gb<~k-G77BwA{28e< zMmR};{LLu@da45r3@R2tB{pyCvrL$!*WtZ z1B$;TKWWZy+*DGF7+!xX431U-aGYZmeC`hwW>n)6F!;_Fr8p+D-O;}ahBsw@5 zv3lyE7Ms4elUrIDQn;5=~fN?!=o5%aBNdJq96H+Enh>g zazy&)KUx4g32-jQm_~5*Bd?*Vz6C>v=fFnV$$Q@s=@7KsAft2rfeDwA88;$-|LB{->^wO85#vJ} zHMAn(5XrXA8=9T6hRYWeZM2;WaScAW{2`3Uu;EvS4iWp;J--}j2}Yq!w1>^4!JJi9 zL_h%?I4lVfQq{-i*u}9wzPbYGiAE~N?xoN!JVD#6+KWwkI|1&`h_Vcm1iKc;i`Zpd z-jFbNF-fvH&!APqq|@%m6%$O%8;9?aXwkt;n$(*Mx1*8HVGP=C6)q*UMzoaKC!s_^=~mx|W>3!f~&Pph`UZZnJe#2!>NO>TTlt$oHBM@)u}>RPhRf^%k1 z5phfkMguiy=vuLoT(hklNr`4r7uifUccs#x5ugimd{Tm0?3PvY1fGhP!QZ3Bs7JGF zSEqoUkd$B%eLfS2#CZJ4>vI-k(y#8thT73{q+@CR@5LoQMl8a=g#8sC#<@&X{)r}gXPf<`-Kl2 zi_uv&VG?6gQ*T;4Og=Ue=3lR@5^l@6Nfyz!QrYAa_vH@!OO7lMX?xR=$R;)>5;oWI zbG6SClL>1-oW@C77LSGD=WB2Ukxln^SQTxCtHNLvr79v7f3Q`IP2rDdFt=hGnmsu0 ztXUXckckZ?)p+FPImk?NM2{-q!Gr&N=LfMpW9(4mrP+~_kwm0nAwpS=sz#?61JWL1 zFk%!`9Yy0tAX?9(4&DOXz-j*mIBhI>ym*ncC6)aIsK)*p_LqPw4m)9r`N!e%dt z?!e_8ChfDKg|+wH$-E0qh142;No~D+Q@;>!XZv|G^O2WoL0P>QwJkx>iGJ|NR2(vj z0{tF7It{tW7Shby#Q2ggS1FxV(?%o=bmC|c9}n5bQgo*&H4}lHJ}nm?+F>Ms~fO-YdP$sT@>b}WAf9ME~Fp(&z#^jz%}wZW0^&l1PF-!u?tzUIn*@ z2jcbaL$edXsYd3bpfC?Zh#+&*9YkzB)Nv-S=;mMsmqKe>7`|Y*Q%}22Nky1K#7iH1 zMaY7~Oaku2KX zV+!%+f)X)`zsHY~j$8H0N7AuXxC`ih2CYgE=-p>e5Q8mWzIG_wE@>X zzf^1wdQe%lu@_tqW&cYAdr5Hv=}*5q5t*rpc<1Y-c=Xv%#DK&4W;batZt>gf@`{F5 zYIX|#@Yvhp++${p#VLnPM4i)xPv)*BQZU6&0nvnP?pj+^A+{$J=AgO7hdR1nX=Oc5 zopk_)6(y3!BQ^{s>lXr}MWp(lHkFT&`_mv_21+#(8CAj6}(Za>@+3$+PFOMDQ2!7Rk&0@;m z*K4Xp9}%15I)d~5&AJ+VysR9hbzb4(yrQ^CJU7qz#hv_2j_u78gALqDw!$rDYHez2 z8Z`5LwFi51%{pKN9=PT-+Ed}B&wj!IbUtKZwDQVqqu}@;r9rnzLr1s-XqX zy!Rtgk`nOlcgu0xZ;!|L;n_$_N)(fPUO{Odx$X>V+(ktFewp%B+t?yT#did2I)_Xg zhM5%Dbob_0OJTS1G*r8T8`eB9^U5<06Aef9V4E>+D5j3h$8>tm14iay)Akx79MW6| zT_Zz9awmk;^b8zzz)+-+*4G}LPAK`$cjn@2=sudAprU}9TC=Hb)nXuCaZwTUdcAUR5Wz_;^UF!VK62fe z(U`6=@q)jgwy8zb+Xt>X9ao%jsEEJp{#Wqk*GohvIajHY?X{@*>Tg62c1*hPKGchm z@fs5T5u`Y5SozAIP+9Lp_R%+ENM4%ozPsY|L-Cg@P806HF}(8mPZ*V-DTwXpYyW{Y zTgzcI?D~%Q`}74uxcBmtapjqZWB%%GIOLK?FeE!!e1jdqB*m`AOJ2M^8nZhNxadA2 zFeT4}4S_%q6DLo?V^#1UiD@xoOzP)*v+S&z*Vi(GZI zHFpQTS0{B_AeIjv44FVgwcx&^MWvs1$5&$DR{IZZZ_)mlyUNdC0+&IIRD47#MT3-7 zLSbt3*v!bwOh$<6TxYFwj^VzJZRPbs+wge(IC|+MCh_UOt+~=fv_n*boQ? zF=>)QDx*4(%I@|Mi5*~p(p_Ni$Kx>TaP2XLV*lTBHi@ZKc+5v{5W8My*l|y~7dsvW zN&8@}f@Ax-Lhrbj*prY@z(Tay?ZdM5+p&6E1y+&Z?if~5SjXfL706tNPCk4hv>FXQ zTd+=OF6~XQOcXrOdfA5Uly+xb7Q{mTIG&GlItF{)@pf5F6IN}C-mj!eXbSOY3!INS z(kzy3EJaaeeV2s7Q1SpQ-&Bg#TRT2)g}BCGC6t~ZNo6m=RVqv$(Y^1k!TJB3k1tnM zBh%4)udjH(KKVQk*{`0MkBgsLL?3CZuw6??3p|dz^ z;*`TCz+yIFQ%SWjf5jun_XKR3;#rFs-FiAo3J#}C7hybNx6MUV6cR2VWe6GO)pz-I2;MOb3su+P1} z`1VSDqdyvIeXF?zKKNYpBz6T?37$rsM}xMtg?OYDRTdF5pc)$APkMhzNbHL4yubD- zsR%+L^;M-cirHU-P1W$!6{9s25VK(R*L@{^4rqf~ql4CDg*ho3dPjz6aI}SkR9ATX z(7@CUNh%T&yJNp36~3)ntAj>wM(xVaQM3GWgsL_oEy)6#%?hKLX9|_1GB`nXgBlw? zj|)wWPBeNV&?e;||ESAg&7UlY1w32u01%5L6$y#mvEPvj-;&8hZM0(j3)jJ4^b-!8 zJ`-pD_F9Y>I~nGLM3_t_N*6srV$gwS2KIT0RO)I`U0#GY|MMsoFPslc;o%thtG^-a zZ33^ts8r4VBNs_35)!*(zZ(1~G!(rN0cSOqKX5dRH7hV@O*LNqem;&o`CO!C=fZ5Z zNJ1IpU=x?gWJ6xTNF03Bsd)8=d3bfsGC16uu>9_Wl>RrJkrFbY$jTxivAH=gFCdgh7bt&;C3~mWJ@E4kDG-5 ze!c?Z#*fCj=dM9($VcR&k(?qSu^(Z;H5IKj1*Km<3!}FlxBuf6Bsv^uaW>Eq9>}*d zNJ#8jCjeKolNumt`1|W$AyTse)jz!hwa!GzWPOp4*k3SUq{1yvway4v>1s4CdIf*^ z@4FZ~aU!)smG$r?kw|ohSp9G{Hz7SU9d|wYEb8XH0AFnpX)8LeVB)xhgv7qZfRPG| z1hrO=hBaSrc{j%g_EWs(pVzQs*+PAB=!#s9H}sPx($SonVW-f zkjKABNbDJSWlCFuBf2f8^^uU+UogO=qWl<*8nm{yN=@Y#h=J-aXHwDWqsuk-svfn8 zYPc^H8?N&b62Ei|EU83CK1)ZPUncmys#I;zXjCxhwJ`D`OZ^q}yxL!Ht8W`3ROMA| z5fKKm8Zx2qNPPZf^Ig2)w2??;KtgZwB_Yuj25y=b?x+uVuySLqnBbUkTee2q?uUm)IV>`&llF_8AKt*izMS8nXp8vVUP8%$O^ zHZA=DAN=cfOn=vqD7N}uu9uw&E~h$7t^$`Q%e4jd^nxW zXlZU1bDPCeKQtN*l9Q9^TM2S?>lbi7)EX@s8XBYpK_V{fA9H{0$D|S~0{+nTdLt5& zjIdaF`BX9UI=pVcbd*$S0At5bK)IB9feF>!m-1QYGEh`uKl%yij z6C|m0gV^Jn%x1Jk+OTQu3VinND_Hp5XK*&uLuW8x`e8@o;3JO5%%e_((P*MpCw+L- zf_(Ju8}a44uR#|TGTa)?${T{&r<{Y?C!RrsF%Iq)=WeY(z9+Xpt+qtW9BP7&1PzlE z3re+GOTu^v&bs(o9C7?*xFr>pty?R1}k!wk-sEsD?BtJ$(=%ICbenDw+W$ zm7zn2z>?@dTPP^545{(vmj1Qc)Hh#P-%5-vUUV7&LzQ>ZH2E`)hYQzO3m@J-zF z``_Za^NvMhT`iG_0X}yN=6>Z(%Fx?>x(!K5r*YPMJ~^T^{d z`=qmwnL9+pG5+?aYq5<;#$Yf;W7$XoT>I5ut|e&)Nq6#z-qBoOYy%p2y%;yt1#RXEvj@x)Luu@gUxL@hQxC|8+Q$ z_2OEw>+HBj^gcWhC<~t0J$ix=QS?W}|LDBF3h{$YUMeD#2&}#`m~06|C}YKYW5SE( zaA3h@M}%g}7=ncKe59r(leTCU8g9=}^2{P_DR7ibCR-i$O~&EF9VNz|F@y zu0Bt!-NBeV{ZRa{&4m{~pNnVS`w-84{3YI*yAiK^xeSwL91bH1qFr3Ksj&)m)!X28Hw!ZzYdP@}OM8g>`on1Uh0x*)BN*l~ z?D{&bl-|`YeK^t^uPMdkrD+QzF<5~_(g@N<97yHIrk>clBhqP8iTiNBS+{}8fp0C+ zl4jAF=TuKY^u_T%x<-UF14qtCBxjF6UVg4vJBW#8Ps%k0KGz?Nz(wW7WyXQa&mWRr zT@nL}L9MB<8EezpWmvUnzSti-X&N4V=@Sy31`?!=C@ro-RargUEzKA*Vl4js=2!GP z3ZV`IH-iBms>&--N@S!Zp?lD*W1~l)bk@i5O}pJLl-B{JMfFI}QVUHf2ELzGr-zkV z+pj+TC~mm$1e`i;1kO5S5>A_5i0dvm7H>TF1d#!~P$CvvVj@0y`+1ym_yM@{nv1aL z`#HGc%voap^Y>oGjlVt%w_Sd=crM1rZ@z}ZM-0U|hmOPWp=P1Ief`0kbf4pK`izk{ z`_M^r?szG+Cva*n#o=zdH7a<*)nHkV>zasaE&~voW^{i>R!W5R7kw)* zBqyPuFcm{bq#$>wodmssv;l3W1i{`&TCJX1@dgl2p}e#>dW3~Mn~kD7-9SwOi=BZ? z77OSS*syAua)7`@r6-~K!s8F%@q2H^>Lm-JM-;4Ez6g)rc>~_~-xDyIEvy)k_Tdx9 ziZ-snA1^(de%FfqhT3X48;HHSfQ7>HpMJ!jZ@d_5mM&Bx2_9E7K7H#YRF-V((Q9cJ z(6h6(i7VP!Q&Mrx)O4JEU>bf!La(MJ*lE#tf?-U_x8kzHvPJqc4oHQ~Kw3tR0yk#z zYK7HX28$&T_PnuF9+Y1e*d%>58a)y+a*>jhjMQWYg`^&WL!`ANXCW~y54pp~BD-KT zg)zMdqmeUw91=6~p*NYSOz1h7XemDl=|hp2o`hAN&hmf95d}&wVMArXMN< zXHPBAkUxl~`f5~_7U9ZYUx|QUA%)>1$C7rGhb2FLk7xdNkBCdp%EdkZdP>c!PD>lfGxx~%NG0~G_0hQG}7EY#9MRM;?@VB#No%E4kMkjCvL(l(tOlnZY4p= zt?HS>6EQT!1b+u@h095+R^j+@DPq6DqX^k}2qos_l@O?)_h~`eh-re<+hXh4(|c0W zyGza;3w!<;BAj@`Xe8y1f+Ke%60=9_ad6NzjJ6~}vVv@BgFDRm+DSQu$jQlp*=iN_ zGG2{^&4}4{J$`?5F1>RVURY3t%U;}wD_-7+C%-Efro#lIj>q$uLEe0*AFk0#3 z4UoXy38iy75(;9~l7*t9eCviaB5dEb35$N1Lvck)UralcY}ttOXV1Xw(K-0f-|hm> zqRY78%0FPt#3{lD!n!A`|BvJ`V4{{7f{Ku5tCvBxHew-+i$|nE(EszvIkb zUxbn4CgZ9<-j0i}zg@($X=ml4pGaF_fhD2@F1z_2{Px;gFn!iBC>%41G_Ro;J#nh& zgJGm*WMBr}cf!XrbC>l9b#p3p{BnVqtkP;3c_NG_N_?;$AO@s7Mk zf4NU15>&jO20AGD5!s05i?l0>v!&(0mOca-sVT4$vBcvQ#t28+Fe%px167jk4L-cQ zuv!pty*q@j*EiwP7dB#Ztp~<_8;B|)u`32GsfZS_PQPPPLa)b-rp9VvH`h@7L=v#s z`9p=^j-}C&<`MIz>JAoL0?xbi_qgev`-H~Pahys#$j%>zN1l5RC!G0fG&KZJU0EjL zGjsDX_2Aj0)p&#!67aQ&aWpYF>+6UtqB{ic9~s5O<`0RL6PXbF-T-`F_YR>I42J2L zV&dYMLykTXNog5k9MQ9nK8&*toq)?uIRQnRHww?OJp~tmKh%m9rHWI;NfT08rV8yP zh8BNVm~vQKd0~DvlFd6kCpuhj=*YN0z zAK~RWOY!BJYW&|rkD;cjQ3!_J1DjrM zeex_Z-X*V~5Y7fCnwuKMG48`L(>)lqH5FL9VlnK=R^bh1vZ%3g(U0O7BZ~-*ozr@} zEt64{7`Wq|FVAGb`3=*v+Y-#U`|*F_tsmCo;pg5J=@@K=Gmy~QQ)pCbgo(`OtW%zC zQm$2aFERY#2&U&N&-vB526#v)c2|Be*KUXCLrFyHu*ql*2So!R-g4hxpyXFEg(aj7 zfh8pySy>sv6xK#$+ZA+LHJUsjyu6@B?2pPcij$8(SV4gY=g zK5;BPD+l8yPeZWBZV0|6&)$CxNhuEe@1OVKuInzvPv6ZaZL1t( zCLJi!F?>EBNx7Okmw2TE=0u zk%-WT2$dmEpCj-y6r`MGek7hPAVLkE5Rq(<-goRdI|Com>(uyWVVyQ9wZGWjjqM!JmuPK9E)wo5DEp6J9Idb)6>QN zszvi~{*e=L-|sKRkQ89^n$`IDjpuOLX@}tKLq_4`DfuEDgXbd)c=vqHHqMWcTG!j^ z0$5q@5`6$COh`sWQvfrE*-?;c!j@VeHdc8_ICfu-v7Q2_E)%Yx8rAC;BINUuW~U|2 zq+>rLV&S^T{dMf*(NMewP1R-CUb3AU2!4u>Ch7w0^_4){yQko)cV@S`*#o;8> zuetR>6pI0ys=c`DgKhX|Wxa?$v@j7m(q1~WEhHrR1#;?pLhPe9^1wk7 zP)&_>q`@>&Jbi_fZ>u#`^t*811AB>y^z^a2nrlgmaKda!7yTQeQzm6#~EG`6502vbUPvXhMs#0q&L>ijPp9tfZY}2+uS><&Cuxc6pA3 zBXBlV(0M8(CS^yj$x=np(0Ck+!E6?qT4Q|)<;8%cWKKuIjm~E>YN7L6+rlK=^+-(4 zArdr+{-v(G+4#D#S&Co%Y(B=#JQfim;0O^051V=7e0t|p+pd0Pvv4mq>Q(sX`GrU{ zX>s?5#kl&2Y@{dXaqiP=DIIB8l=hj6Mj_Rv!}YIi7B0qm(sI-qJ-p3z*!cC!yUbD? zp?undWw2=5;21iIo{!#1I7oTr-0>f->bvKAUqZZFRz>Jxn<_SRRF8p)AFq zM|vu0a8pmDa%at0d57&0>oZ}Eg~5N>i*9VD8TXw&91Wfze*eZ6K`QJGwgY?~)qB#p zQM%+yRMj<#!IwSNUkM30OK}h2kBC25FSYWy6>c)wkeoIYDe1$ILcgp+ywC{7SjB8(P& zHe3-sYB&Cbkl(GC!npq^N+^6yZY*&5?~5Sv5q&P+=kkw`Ag?P~gXY>wl$BM8_t$YO zR99d^(GyW{|5J-E1h+o|cOU{!uoYPjc7dlfL|CFrW$#~`ghY>!q!I-S%9yZZf?ivjV)kycQ`5dopPyk5%En zt?=kl;nyUhaoY+6y)F{mOe}nmwB@iLwG`i2)Vok$v>Np#YvC;0u*adkcs0DvdMYM`#HmL{P+~i7?yx3c{ZFeIRzKa zP|UbBdXZWZpu5)%ClNh%Tp0+y3zy+(L?v2bG<&zh#Up;-m5AeDHlt6~b{>F>R|WcV}*Xewe;n41K8Y~UuZ3CaC1@r0e`O&0un8U&fD zwIX*-rR&ksSci&ACRuu3I=(01bCXCTsci}2Ln_-hmek;tpK7R_>rvSpKt>|Zc`YF^ zaFC=T(JxRG3KDtI1~PMq-J}(vW24!m*NE11$4L)ah}$TAvQ@WZ%nHZxzjzQ!<T?doyT+7^%xtGT9uo0 zWT#G`FAI@Fh}Q(VvX5nQz;aMo9$w6p@jU{KLudVCI*vVa#oH#=!`6(W&c zLuY9^2Kqxp$4vmrLyx9K35lK|$Eis48MrE0%_e-icrE_@`W&RCrJ$9BTVF$HD_T7o zw=BZ;um8JaoQfFl(FJ0a+!|?usnv}JZ8ln|PWDu@`8;0qE48f}K3@PuMMYxlNN*7e zg)n^hFd|hID$2_Qp=f#Gn{L46X)qX&m!Ai{*~Sq{=tBrXqrhHr#lu!C9fFLt<+qe6S*{)(se`@Ys`#j0`xP zPWZ*tD?NK-#oi?o3)j8=C``VhFep-f0)c>t+tc?iF?b*cKkg~$i{rs_ENFSc$Cv=- z-}WJ9N{H=uhTf#z0Zv0dyb`4?(GH`@-0eAhj0FPEHf&8uB%#jZGrCXD!mW+834T8I zj$(66kNN5bY^pLFjL6SUMP6na^0N9Pq$58k1^0`~dg3A!Ml21J5P!shV5rYSQ2YkrCqj{ABGDI+no1YoV&P?aYpW{oz%7@fym%|_ zf9iFNoiamAZNku#@U+>?uq2W&jR0PXb~ZW%X(-<(i7_r-aV-O^iB=N$A{stQ+uY>p zdPZY9hbKkkC5cxqo{PrXD&!0;#E3By5e)cuJBRPXgvZ3Q_=m6IaXT^d$P?&@GFw1L z@cDdBLZSmNKKU@-{@=e)FmfDT{Ok*ql_=BF_g2X8^-M-RR&Cmj55HRii%ucnz6agM z=0Mr{#hCl@1Ei@Glcv(W`HRnCe=n-=ND7nIp|-)Nu~F%8*>wG!ocq1iRJaUSQyDjI zJc^2o;PZOp)>PO;mYJD}aM0gRWl7J+WhATuWcVNUgM{fuLbJ($`;i9W<5xQb?BTrSL+T~;B*TUx}+U+=Wc!HQBW#Y6$ z@Z869DSeYT-Z4GTUABJ3B3yp@;Uez*ORmTNJ^VQ8Y8u3Zm#p!Sd?dnY7I|Q>MlO&4Zxm%-iL9fC8sySuwXAZT!R z3GN!)CAcOKY#?}Ww+!yEllR*_`v<1xRCm|C_pyrP3BJX8;dJzd2hjwSz|9C8v|gu( z(1ajH99nDa?lJDV-z;mr?9K!av?rW41#BA$f4WyDu+-c>Ro$RU7-Ueg9YOWI3h{SW zDCI&}mUvc<6{JEe#6oQ^fj zrT*gD+Jqtq_FC)6bqvGIs;{Jfd%&)o89}w>zp$;yl!&IX-oI}9%;tLS=v6+( zzyvsl&3zeJ@S>qB2W;P&7oDzXg3R0LVh58&!dhX!9ptogoDCuikbG*r{2a)fy9=!< zVdGwAF#-lyc&O0b`A%dlJ#reP6?LqbH{KY}{(bPKz$$|`5guivoP&X|g-|Xlt|lE= zQ^4I1Dj(l17PRNbop~%LyMHftyVr39D;KykRFHAy8oYc;NxZ!Ce8-|g=e9?$i4iY& zT>C=>nOu`FnT6WNsXL6NbCg@MLOPi_^f~ktEzGNKCe==Jc`CB5KTzXu;QG#YL^r9gUg{&@M73G|voW?P<)1*n4orxF54`zaj z8|&Hjwe!$e*w^{LK~eo8@5Oq$Z|tgl&6Ynm9YDxAM5m(*WC^$0ZfcWH34fQiQr>%$ zf_ttA>!0U`sYpwU>my;`NQzfR!osD@j(*T6k>tiV;Ves4{$>gvX(6brINVw*3>vtf z#M}CTorq7`RLUI^nlda+G`8FX@tN64PA6MYDlMa?i8-YU=olJ@IG^gqsJ!(Kx=_W! z(xc3f?rN3H{VDD^_L2Uz_Zz;ZANVUUgU3o(1U~~82}Gc`aNYJD*;rz)nqMMsWT*%2 zt`?f6Y@cds`-@Hk{`~qqoK%6(x-QV7h*G{B=0S3gk^8|j;4~sQu2VAUBi!Ad(%iQj zUjvTwS!y*BBumf@p@p+o%V@NF8U|HJgB|nFW5PzERQXBwospK{xY3N?Kc-3P`ut4r z-yd4ofG!fna+6z;p2BF-7vb+uyMNu}f}lXh;Vj4Jb>s;1O~-7 zNCnRe)3H1pNMk0==#z+vi_9J}X-yq!SMHM^qlL`Dgy@UAX?GC~sz_M(|oglh>P1*7f$9?^fod zoFpUeagx9rYSu42N@b%dWrh_H#gA|}zL{q&W1ZOMk|`=@EPGB%I#t<$8Fav(QoCIu zBk#MEDkk>Cu@8lPw^SEhZ~RhDHlTPfM8BRnYVp~_Qh{zud;9cm~I7Icj03o4CGFJo~^jj9)Nm?Z$bd@1!4MdE*0c?3juH^gzIwkWVk}iaip*;Xas^}*ptT$gH zG8Z8UB!2R=s8ShC$q*0+Kmj5}gPbtXbvj{@Pehy*U(8h%LK0y=orUpdDJ}Q7@VfNv zp&Gr{?7EF-9PWOf*$5l78xr#5-%TZU1%wLHWp(Z53_f>BqCV`MGU8wC{}08%c$B?y@^qL_uvg+#qwrH-rztGDL)5F=F@V4XsueL*vCZOzMG4Xd)>++MYpEQ47!754z{(}%a1yF0-}W4 ztg*Wivc6$;0u<~}QTT#VtLoV{P4$5@q3V=4i%3p=OawPXAowxKy5G#vPbfeMzvxLYHTY zq=JPi2#PgM7^XX^1&{8LuC~za;r;FN{YUHqz!PymAxt_9tZ+^b^N!t*=yVRs5rJTXUOb|CJH=q?k^0Xw!lm+Gk&Ro<`XZoBSE**u5PIJ_XbMlA-ViIn6bUrU`hg zctC(zdC4D<5HR*2eZ3R9vaP~uFf|WJQv?2&KS6D^z81UJ3kMbZOOSyiB@G5)(!#nr zfeA-E5mYaC1RUe}4;Jp}f>cN>P38B(piL~2n*j}@s~7m%Ixo#G*#S?Qf4azj~DR4BQTkrw51q4I|N4Gpgs#fxGnAMXP}PT>L)w%n>j}y zps|YN!Vx;(>;g$+aB5F~m{nkG{ zezX7+?Tz|L9={8o2(D_-?S^?lXMI_;}ORN0HWnxpr3 zzy!M6dLQ!lke$vCRXy0;C~|QVK!s))_+IlUycN=i)QzH&^KygRAMONmd;7HYYp^L+ zG)LyM6Ej8@TNI9l^k$o6O?Dbuh|xNw%R3Ai1PfKak4I!@Y?|F@P``&g8+=MkzUyAM zi1emz6VzAym&hW;u&!D>2H{gjq?e3Xe#Ep9<|lsb)>g9qLwR%$u?sdY?#eY=yq=Up z4rXAeB;tO3{p{Q78PJK(u$Y@5T;Sl)s)2#;nDgP$*BcmNlFoI+oQ3AV!urWvvkgHq zH9^4JgGm^w9B$^c%68RTAjpV|DK~7{U^ha2>H77zGXSKBCb zXCXn#`xgrNa_q)NRTcTDpB8}Ub&2WH9DTVal*_#P%>mE%PSsyOwR@~#+UpG?WdD9n zZ|{h`iH9@*go6}nZM_8jf*w85me`}C{4Yq1)j##XQ(T;1JWii@A!#MpWVRwmgRG}I zI~$v(%KbHs7IPXe*BgcZE4-@dI#@PAW=~#Uh5fva)KD7Ohn?G(NLKob4ZOc912XW-;{2E+ zh5$hJ?=MdhmCNe!Eh@rX5Ti%5urGyI(z9cc@rU>D^cWe?S`_RK&1}UyNTBbp6Az<@ ztx*s2S0RlxRQ)l|DK+iTrr;YXFNccruMS=K+9owDU$rHmwGBzenm1Kl4&(pbeH{Ng zP9EgRR4;fcXXLrT>H9JhjLB9S--Mtv-dW8Ztu^$pJ(;AgUeWoXg%12Ssk(g=Mz@vY zX}g=_X|vlrk3Kk$@h%T2Fh(3&UPc}hVi$>Eo)HYa@P|G5#w=x|V1P$Q$^&sGlsnP~ zTa;l^ePXt=&3V2L>)7s~R~pWJ_lGt}Q0c^5(#jmf%@?8Ic17H`K70&B3IdWMb@SpX z1W!{ApATh=BdR=!cxa{C88_6sAvGWONPUtHN7TOwbo;1IIOaM1hN!+8Y9!}KdH%Kc zk8e{Q+qY5lqZmB}qJYlbUgrlh>AL9Mw{OholS9tq7=Qv(%HvmBPVwY25Y)@-6_bCh z_Ak+WBS6PiF1wszH6EEUnJY2m%%`HEqeY<{3#1~`ZJ~gZ^&pylI_G`(`|N4Xxh91W zz|bud@j)JKQNf5+8OXdf0Nvl$E`zC3*&JpCrFu2)dsCj|4o(@z{``3)&v^Z$P~g_= zrBnM@wcF(rS^MuNx3gs$E1}M?jW`d%X+=bzTo|1xMCB)^ zR@emw!$;QgO6by__w@nmsZ}PQgC+brX+`6Yv2n5K`XpK2-=X?FOj5h%u*X`&bao$t zYljXUPcRc(%>xqQf(02(jZ71@QwR`$=*J2{;m5343`3?p_m%3{sNa?i0kOwXq{^^6 zq122>tWtEjOZ8I6?FlzcktumO33F)`=0Ctr3%bmjtJeW0(@%md1wv56CCQk_ZGal(hUxkv0QfGAZh*RRmDW!=iE&m2xjDZtTyJDT0 z4=8+&I5;n@RO!0zw*0SIMz3{U$7%|v#yhjeseomKl-pxh?=jcM`I^q(4>G2J#M|yT zK6wJL1!l0eqFs1C`8-mcgqx2+fXs|;>EH=&`~=`PWqKp)8bjz3?fS)|ZRh&ou?Ewz zZq>puFHXfvKfR#@xmj2UIe{dIJyUbbeXP5!swQAirS{#K)UbimWXK@YVupquHwbg5(!kK4@ zLj3WmrhIWspuh+J z)EL+&II4>^)eQ;da?=|zj!x?snNikh9nGLf5onv+T6 z47V3-he@@H9sVRqsR621jO$685Ghrb4)k;GY0O;1Qe%#iEUqlj%G{p?J5RnfIzNA! zD>9$rBIC#gJHa5yB!EjiHMgM3UxfJ9sFb3Gx6*l~2Kyjf1*$xgF)$)hke{vR#3r6Z zOn-zW`#B*iHewLEqhhJgtTLR)j!-*TTV{oh%&2HSjPltuDVe5y8J2WMk|N*zyWOJS!`9GcB(fMK#Ml2f8OfsB={gNTggOO$OTl z@T)RryA;CP<0}4xn>**2asgdytSOe!)Re$Ih!{M#LnVBB(ZRrM54hw69W#SN4Pxk6 z+p5}FG~uj|cpaP?Fp5^ol}4MxFG!Ko8WT2hgZ2E)M>;6*HBn5nmCfzJ5xj1hwGskb z_?E5&4*6sk-CR6;JVotIgx>Dqk#zJuXc>0(tkTB^zpIwJ(f4~%%MBNYHuYG?O2A>4 zAS6^e0ae9bi!0>jpKL)ubGvGztSo)SkLExw$Si<%sH`|VXPLG|o3Q`*S^L4fMN{dO zol)9?f?b!ZQs-kM(@Or;m*hZ*g{VWj7Cc{4d> zN6Z>x!*%hvm|_Hb!tlAS(QQb`0=KXBs0Iu5$*y zI*}Kmc~qTgh{$OzTfX)|RUlN)F$NymTZYj8!3hDy=0eCoD67I)slt$&)Zb1+Ns~*v zUH5&m7mXFpE7X*9qXBZ_WJvY?#ftLe~Q10;zBk)5WZdkK8$SCnpdyW13Mnxd?uFc+%StJ zX1Vjszvq5tjq`QR!k4+5KBEee)?;pftXdio{()y*ee=C+XnzPDlNwpJxYIZH2o--C zLU1!U1Jkw%Ny3-(#4>0P!)!mfIM`t(VnbL^ztFR)uLK-!5y)wH+r^okU&9-hRQm%- zguRS0Ug3egOQGqXf@a7}U!e|!Gnaac;qky{zls*!W^WX?tkHenI^K#|Vg})>{~tvr z^U@hW{dPZi|ASn#+w_-dZ3;Ng-deGL;DBAwlW~JoiF)1L6>z{lBg!4zWnffQ`wd5t zZ`|>1VtQIi;|{Tjy){ErgJz^$PqFxXEVhn;`Rf6D%mc-gDTH2@U}|z_Dr6rSkyORO zR=A^GjvghwE2wa&ExZQe?`o}53)++`qZw8yp!T7{1=y+6Qx*}LX=Do^nRinpU9mGU zLaV~A<{#?DYk@S*AP&bI4rRuhWgoLjxQiM_lLw@4C%Jn9xG|mot%hDu8c*-NXD|Q6 z1?K%2?Jg+-JeH-LOBfM{s~v$mwnWzu>_H$>N26wRjo0H6D8%35B8V84pg-?69^fh< zHmft=UwP&M^LDw@|MOq`_gYg(;k=sf#&R^>s2`!}Np5pHg(zg*ItWCngzGq#!9ap6 zBm8tj0MCC=)MQ{ktGVqZM2@B;wzg_PG(8e5m!WY*EgmbSMMml`Um@~4IgOO%N;8Q` z>SN0g-7jaw+hhpKd!u4ZdN8~k?T^V^9!xhfo?hRuA)&wyoF+l{oHySi5zDvT?*q^e zGp6(;G+zCQfyIrOd&1A$QDu~t$yqreJ!dTL=Np)DzI~te7t<<+kDUD=bZZ2JZVe3Q ze{Ota_9f8@5w9;43Do_>(0XG=;(hCF4dE+sv^Vr}4Xm*sqsLAiv+n}D9C2Zxm$8|v7D zk~!)Dxe3vLh1#%E$Wqnc`kn^@)z6lTj#crvZBkfqQo$0bu_K~Ts-anf%z|VYQNgbc zGzHJ+3@?Wz>b@^M00GO`W{w=aj=ryIbg8f`wZ+LFyiv0mblUgYED7q0DX`|FC}LW} zTNiN3#sDA<4Rhrw2@4rf57~IxyGBl&u{)-{+0O@MPTN-ESozS7bWX^`NO2+=S6C1f zB`(|-z)zYM_WowT*ussWB+UWpf*-Q}vE7b+by;YdG%}B$I6^smCUxXR-j^IMMJ@^X zuV2D!sSc`_|9-6d5EU`wuK( zVY&a?%rDfK4#QQbC>phbENdH~^MyuXj;3qzil)oa*9H1vmB9>AYn#Z<><*PX`{h|Z zPBOS$W28(M&DME zlk`sb*ndX9Xq8e#%`EXylEeg;1MyN=D)l8fPqp0kR4Ed~JBW+VlFTgBd*)W<(ip=> zQzCEDt&VQtw{d3kUkMAm&`|cSXBf>{@95_+TnkN z4hA{1SXjwQI;J`F;5cnQ*?FmS8dr*=mSxtO+hKCK|yQf0dUfhW81i=i%85YtOLZU~FmGhH!gB4og01L&aj6dO>Cv7hb3o-@lz9WWDvO7Rl{9W;@uXCF%BINNn1Xi59<4Ze?c4h z*<~KNL8`l}c8wOm;dje`mW1?G2XWk75Sm2T(1!F3;P-;>N%qB2%AfEh-Omw`ToA2! zl(b}7ZTjW{CDP7APu-N8>{pV^FzE(2C|OkNEQY`9i(|$b)DHb{ zT;kVp8ot}X;>0TK?daCjULeYtRoE~wV82^IiT7@w)3=G2<1UV#_#~aJnA1lCj|wTH zcZ{94PK%ByU!6wsyb~T~lJ==Y3-6R89~K|XY%T7HJc&Wu7?E?Wfpu97DND%5>3RY# zrV)v%Bkm5otI=9{8HU^S4KmJw*wBMtvWWWW&qI^0p5E4FM`sMOTqHu}<95dwT}BG) zoWtd7juXE^1duq8RpCikbd29PgrzQ9`(yjsOQ-;hNMb$x?It?Nq^ zkJ^~DK8ERO7T>7;@%3UGORg9q`jvXl+L%+8N%f!VhqK5`f(WT&=fPb*OWzFts;v!o zD1X^yaVzFpHpUBLgezIW*ZC#pE&+!IP-I53lwdLEWzq?ZlH-s98gfRsZvoDycxl~! z;|v>X;@C1&+<2*+fAT$^xN%<+Ygqh2jyH)8i1QTiO&r+>VP+Nq`v5tK+x! zo?+iD250c@II9YQKX;jlxHN9gk1*Kgwd5GHDzt>PB?>HLM2HrW z^ijE-Sk2HR{#&ev*4MAMBuo=qwaf95poHZ<7bFFKmcM~|{W#O#oYV39IecQ^Mi~mH zGNi!^+QsdCP?S^y1v+8z#UjTXx*YDEFLipR)ppEh20hhdD5XJ_NGYz2K^f#oF+m}< zknRR~`)8TMFyy0@a8|^OAhaP#b{$TCfwdAs&Fc6aBP7OvHQu@&x9P~ylspDBoQg#C z?G;^cHHL4SrchF-C9-exgGFNmXL)RM+G_qd=IUhhO~{@lRPs^=1$}c+GhCWIRnyi) zNXypEiGQ27o6|5x?QbE%&FJ|?_=L$*^h&;Q5jCy8u(Y1*5VhOkLWn1 z>38u3oU#1PCzei08Y|s^(%OEuu@<+#D3!5o+m9XB0}#Y|;v>t0G%DM6X^k~bB^`DTB?gq=|srY|V@MO$oD-cMm3F~H*6nh1k zyx6%SPg0a<6jxh8UQG%G7GxZd8f=hlF>@<@d}pXYOrc`A?`K}EB9&SsRU-X7x&{KC z5tcJje!+PxTAXsHhr;0!{L7DJx7EHhb~gmBEp$J|NoP^~$ejEM)`P0vulQIU{Y#aL^88Y?h!Zi(3h-g}w) zEsZGPsHUTd(lDc_f4s6ciVTNww0EdeEg&@+cf+}%v$IONo5J*c)HB;#fL^D7guM+f zMl|$4J=~1>{`K>mns#JGH!{!ff07Vht*?Xgz`B=CJh&BmWJl0pQS}N8?hN5#FUep0 z#=afzMa%gLWyI&*`zz21Dp<&I+p#)q^LIx`1EF1Yl#FUl!X89u_vKqcAhFO^;oo1w z!F@&u%TaR(gk@Qx575V@iLkj>j+*=w8AV^+36)m! zd+`F1`;w+YK;CiS;kbMAr_xcdPAzubTm7IPG`Xv!Misnh8(I*WPu?|&rSX#MbbpQ< zLtBE-JPZR9Q>e+zuJR6oDK!?l<;@Z2a$m2M*F+0a9D%g_EL^AX3QuNA3Z?L};G@BbO>iENE6)h`_Vc4-VwxygYSQNr zrG|106#i7+k4Otohh;LY=lerm?&ib4HUhd{L8k>0g=i2)Z}Lo%2#Yd$%fYxu2DEJ~ z>9V3#=znCl5!OLf{d}K;tm_d$v6@WuO+1bdDd={EHPe8>*!w!=l$q*bu-)K1{E4{_LaF zmLXId7KG70<&Wx?ppR~x2+OQj(%VfIDddT1mD{M=JjR~I$E0!wPo z@`qA}iSU^52TR^JNEA-t=|%`*c!S>e&xA}hjsWy1TF7`8eIX}((hWfmDeo4Tvd--} zXv=P#<&*$RzX_)-+D3c_s`Zo%0x}v`)1+Wzf9`zZhP9Sx(5DV*CSFz>`|(3g|!I5kl#ON>bfcd4x4({FTT7SG>I;Eo0(#q|CA;sPTP z+h&PHC~=njo4Qux>bynMd7ie=z(m*n2o-4Fj>If)Oj#^80+I39m(FudtN=UA(E4%A zM0nA->q%zLr`NX)xI%KMCu@mmSX`v1XuU`(O4g303J!Ss8VB@ft~HHQ+?J0%2qp#D z^RC4=YgO@|SD+v;-+ya|n&}Z@xF1vMXZf6l=y{EY==rYU{OU5tqB~f@EZTLSST-K0 zbgLB3?MV$Cm20J^wA|Cmto0J9Ai_GKvBDnGmcwu=X>40d##;{6>Q>1L?;9+_&QG%( zi@z`?5F*+gPl}q^1sFi(!hilyAySDMqd%OSmKFl*Z(&ryQGDJi13vmVES>%8^&BD) z=z(lxjET~qjh)!KO@5d-pqd^8m2o*&N(zRvCXbbRhdg4)5ELOe*@B?LK0v6cQC&_t zRN;WpzW8J|TkU@TQxa0q?qsd}O z$a5>KOq0U-yA)G|ZNSTJ$RS6}9-$(0t3|`Yglg5=KbAF>4dO*H#;fw4wEp-+P|7)w zjZijd{u^2O8?|rM5InWC*N4^a7#9EY@!& zI4^vIi7#Hvh(-1EnJe7?o(l+%?%@JVOz7f}_!!`F~W49-Xv0+93(Gtt$ zh;e{s$`HCzmd)`kUs(8qztkcY)+O-YhJ}t2Ix^D7t71KGx;hO-GGVk5fjAbJoa!&P z`^9tyRY!)OFH17m6;M-gmJ?xRu|O!5B?$(vECU%3{G*3exFFgHUNl^O-i(zshdjl0 zwNiN1Gjh1#BkIZuVdDxGrcFY`?^Kr}ZAb9gI#W~N5uy+PRkfh&nk1Mb)V$h8X`&^@ zTZhOO?E|DoiG;lGP;haYnHj`2Jr?hB4$>I%oXFS&eU~rDeB4s9i;;eU12%=4r0v0+(T&z)**HdvunzRDy2P@XJ zWqL@!^RmYIxU#FuD6^P05<6{+L>RN}{?@(hek?B9aK111IdX)6uf-3w0hs<8z==tH zNvCNDxokP!bxuSeTT`^|EcsC;Mb~D`ctQHYbBe{BZ$1 zdz50au>6rBoBIq;!+@x)*vf*CzW8BWx7_#TRKwj$MuLjPug$IS z9O*3Q6jV7>>0V^B3;hs2R-y@eI|G%cY68q|O+QgqoYT_`X>RtAi0=`!@-f?&tkgDF zPXRS>1v1#vOD*^36tLZ8SFP~K$kRZIVu1TQ_Tl0p7@-<_sL<9*3 zKlA!+zzLZ#azGs&PeyG-LqxRnQrFm!`!m;BEFU=MB}EPKcC=>O#RaJBoKFBtxD=}I z^^qFjsoqP@JZUtf^=WtRBSDG_mcf?pX|5T+qg3|)Bt`$gj818)I)XxHi5%@DIam@<`))4LdTl}v+ev{FhLmx7SQ-nwefo^^(DA*fta`l>Q=2#lcz!_Z zkK@3{inoOL7y#tn?x{Nd`m@w#9*bu&)6zfN;^{O-?E!Iyk7RAT(q^i@Y&r@A-;X7m zqNr#P{`?7*(`CiIK3q!dzv2MQ_UlEPnW~d2=l-HjXd^)*OqFLGr<>jl9;l>ca76}I zo;x2lD`?r#;dihRn&{2yHKV^}%Gj0EuzdS12rDvWL1AVo9w*b^9aSWeW-4v96}t30 z98@w@wcLlc--|YCI7f7o+IV=$ori%ow2gTZr!#!6rDP(UZIwjVzz{Q%o=70Vf%F-= z?tyRByuW^kRc0}OtwcXZSMrC!aw%% zSXiiCd5t4MaCiogJSwuX@m)$>vmdm1~rQ;SqHvR5#eIQ4gpN5X(rBqvk zb2p&3w=Cx0nwYZMnxh@8X&>kcWQD$nW7q3CRr|0B! z^yr;T*#!FL<%Qp%(KrKN)?N6$Jqdrj z&k+`v+Aevq#PuScH_*C3ROJW+%c2=Y_@{5~@Nf5*(2!_8B&hy|aSVl>Lzw@j1~Hbk zFSeVn@Y!8mc5$}o4JN>2A@x$5^ZVtS70s--BR*edzB5oI2%x~G#SxV!XXiOott>6> zTuyMIdEAYDHD2uEDD7a#8_@S5Ha7OIN;TpuN&(N~2Ohq~(&lE3^tkS*kOZTGfQGz0 zC%rUo`I_zZn>Mg63B9l~F9RzpBqwaMOiJ~rUe#J$zML0pWOaOc-1P0zmBe#Q+O18% z8tx4aLXt^TLLxU<@#&^;DybZ@Tb9z1X4Pel=fwS?zLK;L$wH4YTLX_7%-#1YZeUdugqf& z2g1>DK&Z}3vwrX!RvlYDnS2EIaY59UwcPP^PV6o-1L`OD|T~yYx`SpWgA-@|K0;P>j1tq3I zN5UYWNxU8^+B0t0+tEJItBDIRr!K4 zY+OGuQP#BPU7mkS@0RU{UVTIvJ(a+ko&JxGVDQK6r4ju$E&@Ea=(U5O##mX~3*hpe#o5*60{WxwSiCc+l zt~#IiG`|Ufk0_gj?i4j*LR@DI;>PTxNB#nT{Gl)?ssnrsyEKc6J&DMvr01jsS}KSy zAL4jgqrqm6K=4MOh0j?>8P@kUh0+Ka+fW5niMn!(?28Wnb{crX)hZ%HRV)Xy#xCZa z`&pG54Sx7&JqTDWcb53N{8An{W+pSBpHSgEsn&7j8D6u9C_yFiMC*Gagb;WK$pIse zT#z3J)^rqU`v%Mb?B7z8m+6<;g-*1I&|1e}7g(P$tqVm$(yh2FW`s#JlY{|@>0ViN zF%$T#oD9VpHCy)XB^J**PA0uL*ZnVu-a#JlCM^ELu;Y(*`qlDr3(y!RHF zM00v1`kL%Z|EFAmjY3?R)Y$Nmi)B|iYlLFG)W*vUv4QX2q6M#RWQhWAX6YH706_k= z@RqVXJ0;*W{gC%yp1bbhZQd39?3oy94`L+n*dd=Kx>3(K?g!jLx;L6nvOZ;8TgLcA zrI4|@4=dbbv)^Q_>=>ccP7!9voQ{30{#xWGN2esJFrlTiJvO?a1K-8_roUVL3*xO- zk`~L>T6tP>@^S}(fCj(@wVO2X-Ma7zFPPs>P)L)Z;6Ncv_QfyigUfVBe& zXETkI9LO%eeF>q)t(qOnzZ`^*&*s6EC}byMr<@aNtd7I)QJ9**eiqJ;+k@PS_X-0E z5QQa!Q*NZq!ORR~?AL3Yb#*@Pr+8AWI)Ru|+{N^hAd8d#OZ@Ehf?OW@@6JBxO$$B> zF=nJL!dKi>M!PLJ9;v@XVx0$!%DEwRVLh|U1p?TxVG<8=HTUI8Cv_p z1*{JPgH*N-n{pB!f{q1+A^CBQYrk+RqrswZ8DFQJu&+m*bFWeV1JE7?N3DB_8OrNI z`=*kTC&Ud&=7)t!8au+sv(=PrxP@K#d5DzP|MCJ?16*9#u&}V?TK*PFpDs8gWIcYK z%Hic8N|@6_in(YCj<4FpkdGkGr*Bh|YaOj*FAfZTHo{^~d!xkMB4l=w{j&Hy0P?cB?W%4-g%b)4`p8q{a5XDv~dtS~6{*n|Eg??Pch{A#yL503UHGc+OfnwbbZ zqxbF^lERjbvY}NO^KH2W{)N(&V;pb#0-uSp)RcnA_#Obq>iko{N5lev@R5CZO*=F( zp9>`66TlBqGKUgyf|0{EgWg1SSl! zKh^%|$lU4oefM_S><$4PNv*muguiT;{*&dFnpvSc)EHo*U!Q~rc-ay%g_p>b3@0MI z|7FsvMH1R+ODFFece&i-R^#B{ zh^w+DpPVqYwssa3H($uN?A)goq{jEmX(lK0q5@o;Z$>*9iqcDK`bf}dPaECmZ6I^3 zyJV^YJtnkKCAKBmv|uOZlroy!%*0@vDGTlSFoP1_I+e?(#oYz_;gOGBa1p+D!(EN5 zSWV8mf-e_kgi=KLV#8&}QQLE0MJxQ1kFa*#1#}o?`+Q2MkqUcXxi0$NmZ<8vUk-BK zhiUo$&`!j_DVbuTY~~_GT>nusl%64!(NLgMvZ;w)%k>AhS(aT**j{r69W+TXu^>L< zv7q+;FRDW%6oY6ofIlzL5fEf0Rtl*QMAYWX6AuCT15fL4;ql64c<2!%EO8 zJ;ThZYQB!5EY->ob}gOjk_ z==X72%!4Y(djY2hJTlx!zmmSo3I2Fq-?S@;vm>FTrQvV1*ZaNb0#i^C8-lzZriXFv zNC2zwV!BGr<$q!^$D#A+t?{hQn^7lFb5oAj>!5>ZObkcd2%_p3qgao2=5eCEN?r6LzmRx^q zAMEyfddz>y$$25BCYWM=!stt!CkD!gVHlqFFnqf&Zk@w0Wx!Hy@-agjCAb_sVwD6= zLo#zbqK)qNRs*kllvozal=NDVlTYNtfzSXHUO_*@06>5kT}Q4 zWHk)Vv^jU&UM%GFii{3_ROI3gP*?5eJ3fw|o)r(Dsmf?qG@4V4H~@r%$8I#VF@9Jx zEgG5IC^8(*dQ^BDqHtpB*&^;6h`umhNo>=uSCQ^#4>T_i<1j~gFz!n+lGRQI64H?3 z$qq3;px50D@-JjUyt{s~cC|GS^&OE@(9Rno9tGvo(W{AxqA>t^XnLl69kQ#eIu`}@ z39TB6`IhO}Sq9-qiHNqB>kW{M(K+8sQ;g#J3LuGVnQ-6TuTA%p*5a(^{c#KNBAJ?s z2M_mv`PKc-lUi!wIL)rii-U17j9X_)lhy zxRJcM#`1ki9B@oC4%@UjdF`7*)+?_FJjy$~qdXX)`f`@8r%59<%?}qeuThPh3@?8h zca4euV6Hq3p<4u+xmx64;x};)(Av!v3b>Ba3cZxtab=U-2b4!ZfRshPqi78FoB}8` zUIKz)Q2gxGCn3eka4Az@s)A5@hSP9{Em@qRiYVgO0*~mU=>Cfr%@~^XIh_3A%&h_N zkW@UA(m-tC!9OoO;Bp<1lVLirfoah%1pfB5R3qf92Uhrf#B)yX6FPzXI7wAK!>DA3 zF>Zo&+S<0jt?t)ivxQ|lRPfEP50b9oL6hF5incydyOUmmIyU;iKaqdyFOs~V?R^38 z7Ig$RR9NqnN~t61=Nn_OK>PRDEB=~&K?h9G^Tnx(wrIGJ4P7BCv5?d7IePrSkabS zSfs+;zi)%=Sll+OmxisN`+V=MwK6iJJO5c>8v?II$R7U&gExBGyb7AlSd za8^|Gt_z&(wOmfdD)s z@NU6#F=tn=3`)XRYMe`>2Vc3MW5q*k*2x;~4is%wk=XYiaAHP!O zyW3rWHS>Ye-v^(l2GvP|MsNmMR-?WdiJuEVqo+Mv!g&xI$l$ynbIBc93W`=1#Iz#z zXZ(0tRWB~E4%x8j6szHIW~)80(2r#YSTD=K)t|$*zMIY;e~LDW{!&rB64LJngawDy zv&3|Dczr$Qe*4=!LaB+|7F|08bzSL4&X7mO2%q^b&qb)$1BY&E&n+6u*w2>Uu$bkh zn2`(ET34v%bIq2=Tm;$dbTCs}IYthBDujsh$lz(N{xkwo08SIw#v2d)-{X^(qxX0H zCDeL9EZ{tLbyd8*;j+8bB2E)F3N8y-8DNRu16Mw1gd;q(F)D4WqSx4uhZ%0OUBnXv@%ws?d z_JZ9g`-1si2pC7x9i9cLl;;}F@&YvA69DWF32Uua+cmwG%9!D}f1asG+W*+T@AA-q z3FJvK@;%yvg;c3uV4AY=#nlyBuVNr$rW^C8iGvnu(%trglL9Vfc$&RUZ__BqosjM6 z+KQXqcDafC9{q6m#R(Gy?AV$oO~Lf$^n|j`8WJ!udVfih%go1H6p~RY_E_4q7VZ~> zI3_e-Ku2BqM4a!xCNeP9BLW?Qmr#FJXIr5lQ4A?FOK0J@djxcQuM5sg(Qf*!^7en~ z|BZ=UK*h;<*lgQKB|C{&nC^gf^1ns>xH;Wuy-n)?+dI&3!T^{BPq$6f_^onc-Dz2V z!A4KP<|x4h>jq~=U4V(1bbL4kxd*Bm$*Rz?S`~r0pvb0Vo$wpkqnyYq9-fH4{03!V z>0fu(_@$M^(mPtxzdt^p>s`oT+Zs+oTxKbLB4R&utyArCZXho>(HtJ1LcO zSOz@2tOq=bmlo5@orOOphYmddyCc|)fHIz$8JZGw(~nO^M<+QsOqN@O#A>R+1y}cT zQgJF(P}m5A zvyl@N<2x7k2ZRthH$NXz@15V?X=D)m>l;4fjwS(DSUd^KV}|v|TZ>3hXR%IJcdEOVFAcB+=e5)q zOcuw`ItG&9Ic*BDF&s;3)ypp0H$OD09t>}nO)sYzItgz}Y7BWyicPk8m}iW(O|6vn z!H-Z54zQH4AT;7N0VA?Dt5HX9J}3!R1eyP(Tpk(ui=rJINnfqcKy5=S?fTFaklaFm z#I2J#ZH-dXRLiJpz2U9|tiA z$yPA3!i3) z(^OM1cbu>zGq^g26{@|IU}_F+%KEIoqK?EHu*wQnvjH07grHzT%}+o&jT9WN^=y{& z5=8FvUZdj-=>1u9xs2_Q+XIQ25XrK^HsK_Zx+8fo4U#AM7w5j0c`n7p`!Uhgy{&W{ zD!d+?KS;i&r)$34W?hwZ8UqDKS5aqzc;|A1V$KyNv;7!R(_MW;7wj6Gz&}TDTeY#7Bm&7eqI?Sk4 zQdbdb*`7{Z@6s8J!b~Ghss|ydOk|G5hfVI7eGJEH=~`2P7;d=6&A&jUnA~fQZ*mplv-=W$bM&0td~>t2WQ{l# zLotIX$as~g-cA_LK@REsDG`(d6hCZr4K^vVIW|AEh}gyhfzhb=k^IQ=9Q$1d?yD+8 z`)L|@nP!9cK@z-?ojt#1fv3$nV!cN}dE?c&mm?fb@-l-^B*A zZMldC6KUz6a+G2}lR?KvH)3Gaz#EfHIVwhwJcaMNbVvwh#Z__>`bE&s1H{D4BXV%j zc&pN&WrGH&iz`PiO3tL;p!gw5t(--o`bHN<6F%h^+ehuuY3gJ}{mKhyTlhr*7~Atl zH_EcmE`?AUm>H(#g0ex(X5K;Sm!By$h5J;5AXTKa89eps&kwlv36)whB=J=6`@7~F zljX(O<@!_PBI!>3n(LMbmeYG+RqG1(OMgWmJN}mYtqPIwJh@<6?sGGdCH+&}%O$V; z&HYH!ZuROX*9xS_UD(xdr4TkY7>ByorQLVGNW+P^Td}_xD(}@&jT-g<^?e>rPRzBW zG^v9|*R;KY`)Vj_XKCl9N(FGXfRk5a)4RW&5~<`xviUrmYM#)06~nxK9=$Wn!#yAl z{Ea5fngxIKy4FTp{(Iaqxhyjw^y8IYDl@sglIY_?E7(m z<}-403~5L-!`wBcs7R4JWpXhS4VRrZr4;KX>^bQvO^Yl%9Ik7=>kEIy$-lC;N;}-_ zHp!FKl}Q_$46Pw8W_m3wLYNFR4qvGEiZ4ywwKD%2`Ku_hy03VRxL^?JQGo(KyeFHD z=-USw-RBKt?HA0n6s2?G ziFcGhc!weCB~X{U_Exy(*SU4gTACr1A&8Ni5?}M~R&GY6ofaB}9-f*tE~fNq>dTq> zu!z%v?>YOn=V_^7alpoPUew3=Sp?J@D_DU%`2}^Nt&yH^CC@2Oy+6d~_m9254O9+= zGo*N}W$m2=y`qget{;Y)d%>NjqM6CL3mygRD7&ZWsse zhOYy>r`%TJ1FmoLL#dHeZA{G@Rtr?mNDF1_3F&OuW8%YzEmqV5q=_*VUy**K(7Gep}Qs>fiJih z&{&B&t%?f5V;0i~v|ve^4%^g5Ag{>-b>g(k&!9PN>g9FN`t!xEZ6_pe{80eKZP9p(Sy+VkNr1Hw^ zE4}eR`d-4*8LWb5@IxU2#Z4(j1uPUm`fnVW@zV=hM}57=oUTd62xQ#U2N5Xi!!wf` zuooP~8O`d;FCa)DkZJaVHMU-dpy{l@0V&X$X}Z_Q57%!J97^hccz6)X?8K&>nYQS)0KR4wzFka&O|VK45XTRV>xdPKfORflJts9q z)z+$0V5qC7$=aA%_M5igIyNVu3VX0{-oqeNU3SggWYGT!bu(|z&gQf02`ndj0q0`m zvttuG1$3$uRBRtSij6)2@!vi?2C-)nuLa($i%>nufP zb~aZ`c${qmioPDStlkb)gLRulv2G|r_64x3i;kGZ)Sy4Y;I8Zb&y9DNvJJh&{}*l+ z_}#+_@^S}^q62x=<^ZUL%|8gZuyuk@P&Z4HQd1*SVu-9`>V)%!p;K{L@BqLmn~Qe$ zdSyupN4J$y3fU64b4HSn3qrKAKPclImw)bV+>yPBqv&nxtd}hD-fX(*y5rX@M~11k z!DOnfu#&f;jFdC@K#b((h8tg~ZL zDEvH+++ex^p4_h6!FuARXw?{K&vXt5+`eE0SnEqtLxe_DgvPQbL-7mIWo)g40jrdr zO3h4B`>%$4z`-_Lu^MkZIujG~yqaw<6rtqHql)m}9~S}&+=Hn{AqmuDl5A3c&ws0z zH2#l5Us|^l80sYWro5QqGy|B z5uHB+)`HI(bV43%3#|mdR0yq?ai5tu*aaF};24<+S;WaZI?0A|p@vh+g1jGw2M3Q1 z4i5S+WuzDm3XIc3qhd;wh&6$b35!#78d92Ug8 z_w1UGV7={dful6z2j_dfGC~}2d$HkseU9aF)nkM)fx|H3sW_wFu&AlIcj{_I`Pr-U zQWL<~mDHKsp!fgW&&1@Ht8Pjz`-*6^yg>hT&5x`5u8_ zuZDU3vcVGBx>-QUX7f5q9AYM4JFv7jXi-YSuw2q}g{HmZ=c-#>3g|T z&V~p+xf3=QU}If%RYWUuq3+imMIFo(?Mj|&4RU_Ga;~3Lu{0hic_aU9?jmE(1?&C} z`u&xDS#u9+-3o$Xo7o@YfUQ^?D2s>kY_3>bH&T#Um%0}%mJ2aMTM^wDw_8FsSUP&D zS+IktbMApj4ox_{`8JVg)E>vl!Q}HzHRE3dJGr-{0Dv(&b1mRbGfWIM5_h$3w5B{D z;!lNB2b!XL6gmeOZSA;(neA)Rtz)}#ZP$ z`aVui%;CXQRz<*i@Q8)MZLu|Oiv9S)Sj;>4s=&+I?^YFrVIIlK33TJ){hglN+i*v- zO8k8MaO85Vf@S&pwAOEfn!L>Hs2HPwi1p67PUHy;HPX~$y2S6e%>CvQ+=>Gj%e=-?x!J z0?V;I*FW{IXI6wqIR4Q#g`b40TMuUX?Wy#Np#eLB2){d6KJ?E?SzXi6?~yIW<3W8K zgivEPsK3$VrIh^H8Jh=*#R7BSrD8H7+J?BEJ;d3jUw$n~&@+Fr`k9cMfujh6OefQq zfWZwcxfN5A>zNvLfr~K5DUk&GVA0ud_#7c%Phbh)IcK=z=#<@XQd|V1yX6!g%nHmt zltopXcrSm(sf+`r6X^m~_H~`wocjDI1eto6xCnJm@MjaldLJnL_q1|zCce$=HXWNE zEY-#Ehw0!)yB1n7kk1+a2UdcpFPE!W*lB3NURWu+Q8!VR!xH;8g-g-`ch~)@LnL4_ z%((e-Oo<1!R|^Bg4NX$U54e;XlP#y<+HEX6oXaAld|*{R+f?%&&HVL^gdeAD_JJxI zLr%s4eUJtW$f{HCc+!xDKNPc3Sj!DD~dv9Y&S}1o)itfquuG zNe~QQDe=Kd#}Nbvh&a;-#m|ed+Ar$NE8X|oGTo4h3DIlYc!?9^0o9 z8&l@qt7{0~k0zusoxV!#-N2ptQuR}_@#hnA)R_DPbqyF4z|Al}LZaZ{$Ni@^tq?N= zOL=FEjxVA@%|9LbnfT60FmRzBjz}a*jKo2PlnW=)W^Ht0EHGmx{Bj^FlmJ(gK|DVbhigL7W+zYS|6NF6Z~U8W_5!ns)h(?kbTtZ zbyT7yFq_VoR+~*zBdnZ?+KL0;U&4&{+g;=PaIB{L#s7NZEHfH^!m^?7_n9yq=;AS= zn!l5aUO*F&bGyEZ1gLa+6Zc^BpC4RB}s z&mG_iTbgkK(*1dJlvjA24=c;QA(OMw!C2}C@lKm-hVW#rp814OIMgPQb-ooB;AL3! z4b!6?Ryy>qoDr(-BJX5FEes-tbzaI{6j_JPd?jV6&hNjsPJ z&~Ygzs;$2Z_EFV*|7vYY;p>|Zm2Q1b%>fua3kUmDxeR5mPBezBO&7xsET6*{0;z51j%pM3khD8^|vh z0UV6E4y-=SBRC3M?9W|}WfY>tr8JaOYg-k)N9Yv@rSOUJfb;u#Vz1EJ)#M1B`H)hw zc&4a$ofJ-`c`-aV9AJMLRqW4!Hi-7E-v$hz@yUwPB}-KqYd5==}1FjWw#);?~` z>Za~C94~qY!Vek%$>%lT{5#TWr=!o+J9^flvy0;kYaxW{5u>1P~+U0=hNhm-ZYiuU;X(At60 zI^OzvWB>b)#<*0gUFjGhlB~#e7xlcLt0F2$3oO*4vgL4ZZJEkEGj{9Qy?7Ic4Ee@N zA07&0a%^G*r;Z*X?kt!LEVR<_0z1}%Stnk+&vKWvF`4edQbb=ZYs7ZRtxN{{N{gHi zPw?388jZqL!}FpS3-$-~=;fe7$Ga3mW9Hz%euGF}US6!3Q+qjs(aN$N8(TXg*7J{H zC0RRYbXQ4z45GJ_u-{42La9CRTzR+{Dn381M5iW1fG$=UgUQQ%Sr=ITwD@A=xV+v>v_j&0F&i#DcH^uY=mie zzwJEY;K`(!w~f>i4a1>|fr0e2GMK-AveYr!*!rd8?DmC?eGP(t^C9OAl~GyOo_X~r zqo2?F^A`pDq;Iy=`V_)KIGF{}FzxM7Pd?69(q^@YY0z*vdM+8YlD(nizRZyOO~jv}QB0IG8al zA?_9L+NNn-A@Iy|5BRjhKlVYJ3upQ8+YI3j_`B z?Q$i4cY$MLOSyrc%Tb#BK<_|rcnPh+@?p)D6>i>9Pp{q>Ux^cTH6HjpSod=?l=UFP z;o~Vs>)SSQQly?Xdg}DIYo$|^)!HFymaG~Bg^YbXcB#4dbqqOV#>FR#RhUj?uiqda z5Aw-K*-<%V9OmPj81uTUru2qtS$k4WMhsM>99*Ujvhq^%`V`S7W0lNf#6fVM<{w^- zOkpxHEe)YT@`W~M8g}lfS8e&|Wx>fns>@tN)1{HP95Y=e_7eOWl!bm!s zLggPK>5}QDGUGXa7WG7#I|KUek$vyN$6DV~3e2`~)dYX+K`2wD;FWzFvj03?Oerg) z-gFtF@+jX(nKsGBk(iztO#afwiXV&pOQ_AZ9HSRFsf4OWaSHVnYX_}~T;jpgItjyy zG#$1@Bq&rrhxO|}LCmLrpiLtvU|(XVIsJ9makk=S`%!m9_Pc2@+v-pq*oZNXVLClF zi$xekJFlxj`JfSrnUbjl=jr22w-Y??1@_;axjrAsn|mB-eTw`J&BhA0m_`)5n{W6x zeS>1!vTP5xDfZNMvO=>zuWn`bt6q{K?&zKWI)U=UzjNio*CMMXE44^rp#S*}?EU3_ zL>#Q0=cNE5v2oYqT-4~>S5t5wD+l}ksu~aEt0PSve<(Na2P(_gmg{AF9M*~ne7NnQ ze_6j@)Vne&tLGk?Mnc$PpWDr0_l;uZ_+t-G6-HYJ9r-)?cBD5mJM#S8-gN7HtSa*@ zr&&@_|42z5%PD*0gGg8o-{4JMm4m>>xFR*LeX#fQa-&i&lSHn8(`W>nS-@GlCb0IG z*iMU7|9ye9^O?AEq-4xhAtS2gxe4r0qY)+6Bl*`@?R7U6H z<3~X%s&0p5@PEYsb1JbRW8prhr8_)yQ8V)$dp(n_e9{!KjyzDv^e zS2^HinD&uNFkcAddcr+1ys$lnFcjP__T>txog}}uf%i|g)jYdblC-`QL=1qCor8ma zMQ}q@*&!LHgn2G6>HC-7V})pEDq;JO`Qwa+YK$>}!=_$d$lw+@KEl&){&pZu$Pj!sD?ZK;=DVHSoU^(lOcyJG*>tAcJ%~ zsgSY0h4;faUhi{;e}SQN>DR@j;NZdKKodbFdAd|z8$qY&^M1Z5HQ9tvcVRxT@-if3 z?*2f11a9{3%M+BfQg#dfeWu={Y$`F_s58J0Ir zDFN~{gH8mGSQ*C%kov_fVcb+^X|v$kfKVIQu+WNqpK5+!f*Uq#y-1P}+j9+@x61&G zfI(}4b0Bgkl6@%|J7;Fy>j2GcT0(G`!-TB-4PwLA=iePQ)n9ML#Y(( z@9=2Vm%ZS$0PDpzryg~>*Z|2A8%QCf8c)hCP}j69KRA%r_5}w_ZvMyqvt-UiW8ltt z^~~!Emk3vVc|O^9=gS&I?m0^S9N@V{(fK%q*81AqQQmYVW~dlE>)}C(xNemkyJkB? zInxzG+L9V28k4I8D}F0%9Nd&cv;B^KHK`xdSV`2#*>?TbaWya^ueCcp@FQxkyIvNv#qdXOGLHxA0a8Pm6yYM%dGPN`pfpX`@Y^|Sk z&@Vhb_Zyp=kxy)CO&@z+DZZ(XNHA&68yu)))TA+>eGQkoz9{6^sK%V9m@`5BZh;*a zc2&Nex?P?X>jxhO(hA)?a>gQ|+;Xq3ad1=={TzCvkh%6w{l4@c$K-j_fXUS_*&^Z@ zQNAv!(atp6RV3j27>pJI1HSwDUTo3@_f_itu5GX+_5Oe?LKBi=-1M4(>}o87uoyxq ztFd|i`#25B8fAC3|JQwH*pRFh$2O(?b546#duR7)#ZyH~jh}l!$z}Cos5eq$W6EB2 z-(LONRQsX8(KK@iIt>EEC7Pp$)#3DH&RWJ@&(Y6o#8vdhM+$~vk(hC{@yP0*!QUk) z$qlAd65UkXbV`*#1Ji)`~f}oF&9n)Yv4&OYHzAS>?2ZCWscbd)LOqVZwcHhpV_Dj#C7mo0d z$JrAD@AnQ)EN1QYR#s3qbvc&Hq`KNjz5IUDRcBKTdQ=p$ekcXaJVPM|i`D)&T<9;j>A|X@Q*Yx*Z-E z7AEv{b)7`*ps;){w_+~M%i6N^n!6CXZVJZD?V}DVcO7HG&rDT+O*6zfIV^R2L91DM z%J785_iwjy<%~%rVoEfdu)cP@o@0J0ttU(?1&!-+c5tCJiB`d!c(L%XK{o(%!wz9# zfHIAs`L0~2fCa47r6=&u;r%S)P6%Xe=5Fyh0#X-j!q z-}8BS-Uz@x)5&(-MgW^`mdzic43AFO!HTzd9=^0&!*4gj1|x-&ePU0Mmqi`b$kQkj z2X6gIO5~+jmlW?`U59FB_N9Uw{7bR0!)n5_h&oQsT4YuDq4N4GSEAAs?cvu(BR_AW z=xJ62&0~OOXz>k8+ammV&}_A4q=0zXLkhlTn&v6>vU>K5Xkww&@CWVw7h#Bo5ctPZ z&$Q}nPSZq;x~=HZ0(iX4SNr=2TZ0xZH$_sDJjVL$c`@EFa?8n90G3if|2{PWWFQBGi5P4QePGWAWFgNHtDI(x~ zP0Zvu##awxFcUQ0o{An{K-7ctT%hHmnqKh9C;*Sk?l8exGL%ibIT_-R*G-Lk&;dpX zcf?}9YEiO0^mE-OH=wh@mUQ`mTAb();wC%XeqH@wl#-I)qSiyN&8pSBE@-@l3U@w5 z*`aHl{tMjeWX%rkURm8Tz+Yk5II{7*um(=`qE_`DuC)=$xivjZ$)jiL6Zw8)SG)1jy^DUPcE1wVB zm`*h^2i#F(V+o3(R|e8jpqsXkwH5q+uyA%#_H@VBs?mpI@?o!nN8FZov<6$Rg%<-m*hdeSYficQa|xcpqs+#VsOps@ z=S|mtswWmwoNzy@&fipKFr#s6EQ{hg`7Qq8jMAmIQvL%uv+0HM=w-&_YrzzaIMCm} zC8btN;0=wgO1LwNbN{pK{%V_)I$8Ign7*dWx@FCIujw}|w!zD^pHsVfCcbd+b>Z=^ z`_^9$Kcp#aqAJV2-xDB+mn@3jtB_HS=Fu-5mM3on#j@eLwLP3fSWk+XcoPBy1A61f z$K}XrFO~|fg|Pek15tWp+@D|%rf2#y(n70)Q^WDmAa&j_g%>MFu)i#!% zGpSYLq`mFVyfQ23YiGhSHhrQSjhY5f2=ef{`Ic*)88l;8H_-J5X61|@0 z1GgX~8_&MtG&44bz%4B-BjW|YhKf03LmDY_E7}18(f4sm9&@ajQc|+s5J8%77e#6Z zK_y?=n3dk!&xt`U@jK$Hlt^Wu+G>5Y$qeD9BvSjui7XK%T?J_+e(31bc*v41j-H>9?cm{v_90 zC8CIbofIRPbdCi*<>S_$EXQ z2#$k}n-GPEgCTjOq4W84t_w3dDRX6@iq|)?$JY5sXg$&j<{y?2%zb{UO+^{4HMD#<}KD(Xwe>`wen#BMq0BFOal3Sr0$?_GfAaU_@qtyi1|B}BJI zi~8ggc%COA3++~ie*S?XGd(G-H|H1;=JhDgeR)4CEj}yNH7m)F=}W=fVrDK1ZYvjx zZG2&Orv>g2H1V?^o~I^|g$B#e;YFpqo82=oqHnZixcHDDt(@^ZoAbIT@H-0sgl{2Q zK#R{~gBNdLyl&a6;}yg(;1DMDBw0b@Ays@M(g$Xm%Nr)%V}sYYODUl){a)91+yt5is~(`sOoje^tn^6J0kVjYtLe|bKg;-P9C+puqaMe3Pm6~9Uqj$fuEEV z0V6RZV)SsRmAK!x4{o!+fnz&fTA4^{DcBK5uU7QgqPs7Vc=}SwGVM6vch5)?TclhA zFU=fdg3a_;Lx$Zp!G2kV*?{t}^r$d5&JA4wozlGOfTBi)?p*gp=4?dL2x1ol7)NO< zBjMz0FcDYz0TgH#f^Fg#VgfEbp%gDfHHpIc^rBBP4O;TMWE!QeNl z%x3aebj*3==-o=^vzwmDUYQ$a3c9~Rf3k=ByT2UJP%Y@xEGPkXP>WNb6`>Sje^(l4}=p(05a%oXi> z3$zlxLfV*lqJpzsEmNCNaTT3KW#lE&SRVr>QB-0ANK@ag&o^3e_^HhFzuVaTRem7A zp~^{>Fhua35t__TXdBK8jM6C98_t$3)t+)aTYI25rgsu-Gge{|zsx&$Z@L%+#L(3` z+SVmVWnm$`CT;|Nlzm#1_tFqwv1@#`e!31w6h?Ri@FR*Ti6(~MXY7>01JDnEPqp>r1j z;8d8i^dp-ttIG1{@M{mBl+o)C``Y|5C?L={^}5gjs=18=pKUDIes?pHlV%fY_iA2! zwvY@h`|X4@Z^1}^m;aMI!oI^OqAoHd^oq0v^UcGBSZ%WXStwD)o8(_m4KaJ>3!iSY z>yU*D=ZV@7)~9T~G_n?tDseC`Q?nbg3TDGaF-4(I`-qgGC4+Ue$2PYBn;50QgwD&^ z#DxWw>D}CUQid2Ml;90Q#aBC^*D3rSj6B&BcFZ z^1&yL6jD?ED*RC^<1jOeY*bR%CEEl=0-1Zhrs-9+6FL?KTAJ2 zFbZjX@`OWX7%_3ZShgC#vJENv0?{wZqrrq7S_g6Tut8Nj>kyRn_tyED|F*hy)DHjD zu0mB)mulIyCoRrwv7)YhJ-ms_pG{1(`d5US^rz_Y&cYynB)SbVQHm4);xZX;T(wUm zAG4J!JJ;az<8M-8y6=mOum1t9pw}?jT+Lp`P!s5_MvgrJ_-WkeqMw*>DWbn*CRCtM z@}Ry);?lTC5&TQEXW?ra2rkE)t~IK_WMJ)@@KSH6&L+YRY{AB!m9ZcUC*RLDW=>K1 zzM9N=q3b`tvITx`hf4N5*$XP7kpvKk2B=l>cM3GfRayzB8&bi>{Z8}f6sn7#>m_Ct z@YbeEng*Ze^A)v;3#y>=@P}s;t<>X_KX>)s?_#^9C8&aTcT%12`Y@H=Ro74=3!`5%z!Izto?n6?q_B0wPg<-;m2X;HOhN1JR zNUGi5Y@w3G$kD1&d74!6G6v%zwQN~YoulG5*4F6iC=GQ^GvaKg8af7t!;_e}fbTvg z^Ys1wC?shbpqoE?{O@(^kuQXTtk$XH*u+C?I;6dgI7;u`{V=&3lBZF{rT@KXoCG-8 z-SE^i)x$^&50x8HNe4ZF)tSQ6O^yEhVM81_J?!ootxgM(CJM4SU0S_Y=zfs-Ju3I*{=o5+xN z=QQTfkrPTXb!>|Jy&n)^JMmm5;+*!aw_?*0UG3!N%llw7*a2EHyvoWoUctQ$3S?w=#trbn|z5 ziQUD6h{D<^ivaq5OrF!MO8U@Y52%qtEuBu|g+fXtc{nlO${JANsNHF>HAW-=AO|_C;c9yuhTJU&m?SBeXLNw;!_Od8O}<}u}z1>d@?vB-NOr7 z7foA-P0xOvZLVd;SYIC&l^<{Y z2u;&X6IITS3Hme#q3T@cWWj}M!x4$&Xqpcb&~seWIij!U+6|% z7WgYL>MOgxkb1K42QkZ^T4))PPbgxIKM23_+<%p%Yia#mu6{k{_cbt@tfw0#SsO;SzsXY8hUa;mC>z$mfsWPuuvOdYo5vsd*|?aV2VTV zN63UJwa_lfS#>(TaBNPl0|{iwg;my*HqUSZSRmgc5|NaIdHjG7Uuu56lOX8sJohc| zzQ25S=jXty``N~W3aF>E)1bK6Dw024G4=fy?3gVh{iz3l#fXs_J6VQbj1{{-oz49E zw}rHcb>e)8(7&^+r7s?{Wp!OeGdz@GWnh%4#+#PZzJ9e9bQlkMn#-!4!Lr+Bu+!DR z;~qLsUM*~@l`?QX;n-29diYtkdhUUJcvMu~48TcE%@WL5mg!6r^!hGZgSCM71Z~Fa z|Eev1GVgf12TW@ISFz!MD>k*u4MO<*tplv8IB!UQ6DDp3%lB(KlJ;Fp%vA8Fq$&!r zNbq-TZE~%I$v6GvO`k%{NHRVq6mHCj+j+IQmxd_{7K$6a`hpebh22%H8k?ujxtKok z{yoMZB}`fAf4d6&HTCH+&*ocSJ0I6-YVlT`J$l%F-=*8rl`&+*QyB~Y0vB28yi^9s z{mEXEOZ>0HcHTxBAa>^9G3cHZhWXXpd{*Ds%^4sI3z}sE#;(a{_|H#b8K0^|v8$L2 z>m>jIvivds(tzIhq*Muu;h@e2zfVG+ESA^gj)U!u$Fy(e}gqnvP)k`|pV|?O5Q$l|*#ahdn}$ zH2x9^@}t`K>{D#SXE^goBz2XO=N2Ioa#JTEAugE5Vjy&I-*Z}AA+WQ=XFfH*HpemS zOQ{q%A}G&d9me`WI4apk@+V@Ji|PGUWu-1*fbIUid)NzWhpG04B5nHDUq)UTYe=8? zQAaPo;YmR*qL>G~N^6d_iTk^kc0%MD+#xC#sC~N#l=CP|Ihc}0Kdhl93kX(`r2CVy zLx-yl{f=ClaMA~lnlXx8v(7VWzhF}aOHff!wYf@~x`6Hv>D!HcXF#o4VD~Ho@u%i_ z9I;^Ij32s)G5>|EHRf7!CRf;m8XV5>YbsU?ErHb#nf-$}9t`qNKC?RG6q@pz_IZ5J|ojFzieWBc0VZ=t6;t35yBSgqV?`3xqAlp6AC?1K)VY z0UIPEkf_9B{#Z<$hsS&rB^c{SYzSy!4{MSu1t6DRBnEb!_+JS0#zmuz53m)f?V{RW z)Lq{Q2GTn6{rEfy+otS-IZc#B_lm|PrFCm z_J*MINt-Gy#Y+;dZ@NFJ&nO}hdWfhpsLPorJ-K02Ts3o#J4tm}R~9{Ka~8^Of+puD?}k{lf@f?tOul3xsO8F!(a~d&D&Ez_ z{@4V(r;1IIh~Nsv4;;sCC$dSbyo|&wd+eu-tGMU$^0*XC1afe8aOTYHm1oxRENo%Q z)Sa#!+|&e7S^2n|+_nUI?s&KkF*`O0_hr7FPtI>OMOdQXb0T9b=L6Cjgy-X_lC-U) z>3&-*RFz>Snk!GxW5q6i5jUA3f%j3Ofx1W*dt0x6RV7G?-s5yxpQOu>86%EZY{UlA z&`@IA_OQr_VWw$4528Q$VC(YK#03Xl;U&#xYauRE@|_d4 zp(+FAR^MX5hOhb>~SEU0hG@NnU0#S${rt>v_+d$z^SGusfyQvI6+G5{c9sR83VP& zk5oYqB~23~$#MRHZ^?!PjAlQJGAivi`jaftDYC-J$wHhgkPcIfk2IgZx$FtsPfw(~hF)iZM1=E`VIV=Nu%nuPix>N<#70qYC~4%ErzmNf zNLG0gI!a3t`X1!VRw-W`OMdl&+2+3TB!4=1YZ3lCu}osO$cKl2CMvNFz{qg7GlPjc zd1Dh!m6QvPlV3V|!%{-On3ltR?`j}Y{%b)0_1EMQ7NQHD7t4pUzU1fF!ubtr#a$=2 z&cW+zfwX)ey3G&pd1cf1*;vDjAn*~ElRH7WzIKv9FlFvVt9U`k{h6ZBhMU7770Uw) zA<9h?*cF0}Gxiu9Q#w_Kvv<2+@4fl(qW4~>S8liK&tT9p-`-Lfq*p4v)?W_4im2LS zir_mZo5-i1gor3UJp)I5TgY#7Dd)!x~G(sJah2 z50O=u-wY7O)N?E_3vNdmJA`j1g|BnD?zn3t*hIvD`o3({%zejl#89fH0@79ah~U^H9BjZy@Y zvf@s|JL1pc2f|BCzzw+J-}){9{DElHQH+}py?sn`4fkUI5cAqvXiksk#WCkpW}5gC zOoAL$w%`5f4A;=c29u3KK~h%v*w+lD#o%7c*`G^-)^Y8k>gho5a0*)Dwo7RKSi~h1 z^2(oThf;HMA7^Kfg*ug`Kitw}^u&c-lD0zaseI^_obaF;CR;7VATIg4MK1Zu@s2LC zDCx6})AgeA()di4o6a2?k)O08`>W)|gZW{&DvtV%jTOTTq>nU^-C{yYyM1wfYE6qx z{p8C*)T}rx^nMUrtTz0%o}>i39{%K+QVmp%o#B?XnYV5ood`Tly#qr^)(R+CwhE(H zw$7U51eX;q8*a}R+3O}dZ=~BOn%~2l?<$jHR7YY72DKxL|4YX-L1oyN(O+j)&aSs> zmiJ0d4nSw|h@G;#B_|fGA4N;gX8u*j$ZLH2r5ZLB7{PghY2Vn;n$8XxBu#B4vo*dL z3^>vlpcpNB^<#?>Ba5mAUV3Pl?w{h&8h89uFKGNibvnLGx#yIxG1x`j1S_n~MMT&- zS^*K~JtS`9^x>WR^tUBd$yczHVBuup=aC!;K!9`Fh0!Tkl?`2@2BHspu7zO)Zq{Gs zh%nr2P10j>x8vSUeZNP)olClhcUh2~<(Zb_K`?O1{<3KDA%)g$q!K;WV0XI7Jf z^b(t|)%@#j_&7XmhRO>h|P`8#a2Z+v%fy&*jXrTBJ6y1dmek6 zvu-ME@pgAdIVD0}^q^Q=T4qdcDv-fxZEcjH2=~0U|=w;7_p?I zc`-uBL0?-8W+CPpnXZ#Pmi#ql`8iYyP4~PzieSRu6K1Vt2djkZjj!gA4x5N(|!Sxhe z4-ckn%K&V(;i?HY8ZoO&|NroGR#9y(1)R)kJH8|xOK`e~f?HYZEwZJ1H{ zxJY14b?$Fzl8@LlUrlHVbbmMjEJ*c4%xh+x2(_dBpkn@NmEyxsyD+46MvRb%$6t!>L5L`Nq&C`tH zL=^UW0_L_udbBqy1dnv7&0p!F*)#-kjkJtV>%^^gmP~sqW&WxuNjI!3eS=m~(u;-Y zn09lTG-#(ERLWE$8vsSl6;e8udvh{Eh>AmW=NyJC74n$HCF6k$}t@cM5q8z!Wiilm*z;_r(pI31{M z=}}=n9e9ZT;d+2}Uq+K$pWdK8kDfe;T{-1X22d)N=FTXPW z9F356WfF5hI-Gzhu#XEd;1S_H`+D@siaPYZKZAFBm67tfNd!{Q4GX5s=R!<4Wn3}v z>eO3US_r?)0Z;J?MJX!4M-=}#;Z!n&QnGfDBJo9df4@Pm(%%mrHA(Gn!0eZ3EQ2;? zNw4?4W1GkTBRk#;q8j#85=$Fe_}!<{8mX3GZ?9t(UIflhie9^^D^es9C+OXI7(+47 zNdP6xn|Bu_Fa7&|@Je{j7S~DfK?_scB&)Eax}W(yBrGSu$=(}qI0nU`ty)ngUe7$# zI2<4AlC5H&G*joqK%p!34W!`a+LLej+-beOE)GkLOW~}q8b~=LG>Pr48VqEoBn9U6 zyy9&8ydfeD>wm7EmBCo6wNI0x=n}0p%X@5&d)$=Te#0w{6(nwbJ>&3x9ikr@kq+sz z0Q4uZd}SMnM~I#^37yQ1M%VL2)W~r`X&Baz;P8w!O868J8BUfU_57)8xtIqkPrj&t z+)9z}&1K>32{`rV6)xKsyhwAdkC4Y5N#nt>AuZcrJ;2Uy)kL6~#IaHb(FR;Rb9cY( z8B<*P0wNVUu`z0caxsCxp5sm|U*48x>3Xq-##!#?3XTAUQ70)26r6?HSK>3FPFVFI zI4@!jsNrl^(uV7ozTE7!sDDlrK1iNI?p2JEE|g2PS^@e!&tGoLUW4HrWsu}MVJeD- zidaw19KIVuZqgJw^T%Q#(%I_PG5}`V&B8n-t_QC{u27%R)xH?s{LS2xEI!Ru_LlrGqoE@K#`{uP}A@xkPfv)E* zL8fTkq>DrKcYh`76qQx_tGGDXYNil9orvOzN(r|}HG71it`*LE;9AK%hO5mdQ(4O* ztkj=?W}LNTfrE1&0p;gQG*pkK`gq(JLto%&!oQMZk^g;o%VX5A)g0b03r%hk!m_S_ ztLN2`u^$6wOjRcQA?#cgl8lE6Oh=Ey` z`_oeu_WBvSPuk-_`hZc_0tNMjN?vL!X^GV)Zl$Mjrxl;+F8iayR)gD>AcF=?F-+y4 z?;aH?!ihhjJr^D$P~Zc;L`Vuoj7Nqvf>#L@jE8RZ6WDi=OfLPH$htj!-Mw@*A7_&a z$r5a&^8naF#K#hgM-(>MY=|y?e!*UO9+>-aAMn3aNr})RcKa}$gm3+mC{Aw%!%RsP z)GHT7%I2fM6N?V|+iz{ksQUIj$@|WQ$l+Wbs)0ia(`f1z#B^;sKTATk+0Ro> z70^Gc$c7sd0!u@Xj+B?yi4{?ddF6zK@|>W;@<~)OHnh->+ye^@*oy6Sj372{dd{$!3RBH zd7V9nj*?%Pfo@}vr*->FuLg26*KC0_Qzq8rtM_GA z{kR$;*OUG4(Y28>Wc(G2w2U6l%Cop-WhFB}#(x8(-(}Y`Lt|&=eB}me5QKRNfEH}^ zUm=m@a~A_0xV0y4^*h%DgT1c2T|HHVfejm`sVoHwAMOCe`E+d=y3{q!~SBj2JcFp8V7Na_3s^IK*RU+dw6OAt0h~mq+@xnPt(&~9D zmvWj-f32Q3n*NT(aqIw#zAPC$2QL{q6gW(EhRm>@k4NmA_iVJD>8Robtm`3X5&Jfc z$HG;=olE>4qp3U<@pM_R%yKoxOT!oN*Goo3nA&~g&oEFE+0SpKZ^y5!*vMv+-+unW zPCOr_2ps}q;0RH1@5YGJUe)mm+M`8?H8{slR_)sA<|n1!)`ViK-QTd((aY_Afg#RohQ>uMZdvA4ZBU~3gTKhfh%iw4(*a2-C7 z_>0B~;UB-fsIO_nS#J9Uyx2bCA_kHCg&@F^QdHj?)>N)G4)PK-(u3B1d)8N1go7Qm zCFpWHUmz3a&&eIY2K!TLNOTnDCPr~SX#GDwynYTocx-H{7pF_fs@o#5CHjA=*3sA# z=fOK~adO6%&ziIyX4Y32s!q#|@X4hKsI{rMTI(QuuRd6TgqPIYjQXBuGMjvNu>*0Z z4?tjlotu(c3x=@wy-;G}KQgp|Jga6n%Zv&h>;BKWUKjP6R}UzkUxj9`{6kDPe;
  • BWV5>@W(`Tp*>{IT#tC{R>HNgv&|uB4~EU?XCu!oBRd4WyY`8`GSBx}{<}!iqzL zg+Z8)%ivMxECK&f*arEAIOlc{;6-N%;=E%82u8Q1C9I?xIAuSYC-qv+CYHGlgdBg3 z>c&+nul$WAZrk}2a;RyG;P@p3{E*?=seI7Kj9P_#8(GJr>Gr$77-!KMT$T%6$Tb=c zWA5?q@q5u?Xasj14E+JmjFapZqzUz(;{K#uf`>WSO^~o?BB-Ar@)>J)e~nSF|G2p{ z+7^wAtcC4e+I)U7^hy5kdlCA8o7NxB-;$=kb-wj*!N_w#1FnmwETR;dtM+ZP}1nvKnAF4*1Pp?KD$KWkbb5~3zRCtRu#(KBSb_yeXB zc-iq2Q(6a66UFuTX$&SwCivd>@jHM{&5fR3SB<6!X&C?!#k_wy%Z%!^!N=3{ib^ed z3?yuZWDpNGHgHZ*JrAR5jT?@JOWl0?`KmrzDM@({9!0L-a(>c78pEbRGqU6+!; zcLs59XrMYJulP^2TQY&)dc#6Jg>{+G8=R2ilSm#oMtCHR>lr_nb9Uy!@kxAP3PTrl zBP7rt7dQ3=8&j20?H6g}4X1Np+x|Is{!ifhc(mVY{}GdBuuUci{IicUd;OzG{ktUD zO&@&{%i(@2m@&i$xsmm$+5Bp3{{xih;b*kwrf+!HAU*~b{qFs83jg@th<-I$X4juO zxRr#34G9{Qf2ztUBK=nKKVBzI>TRz^SV#V81a}r16&II8x%j<56APfq@0Qdd{`}(^ zqB)I1+|KU-zfqvVo5};kxbgr!sy2x*P}fmXlJidoH#yUr$$vn}9!+DNwZo<}j>c*- z(50@Eagry+8h+C*#ZTH+q&vw7d=D~^XEVN-z=vqI=?D4lr66c+s4on6)U?jDP;`ch zU+}6C<|$_0;QF*6lH@OJq$qeq^s&rjXfHGNaHh8olM^2Z&-2E1H$-Um&P` zyA%>|9{<^Pr~fgSr67!_k4)q`F}kDN@v-S+?2T$aPCmWASo<+}rt;qJ;wB8#M-~9* zS;r@p;b1??O}niOs$5N>JAuz7g~{m4K9NjoMuRaGu-B_7!XJbm_^3Ww@Fe5Ij;N-q zI9J?%k7CZnf4P&IDKA*G=(xSQX2uICT{acxn2ZgZnsM&QMT2mXb+S>BTNXpx?7n@W ziE1;1KTN*G!B|T=HlT$Oxip3EJ1*=V*9vW^)+RzaXw!WuTg0TKtWF>4k!g94%tF3z zysD|cu+Go~_INhlM`RJ;VTi{@eXp0IoTI(Khww53&_@$dwS5WH#4#zd%LO(A+V!@3 z_{aMYVGhQzS=`}VR?YCmJ{n+sY$+|jCX^5(zHX4&Vi0KTFggy_BR)JOTBS z0juYFD_}l7C5xPmJlo?$D)R|==FErDq%9W*YfHAN?UO4ur{#L;ONXPw25i4`^nSjN zV$rg}^E;b3LW zG1ved6>~jl(rh?(zzyz=NejnKjp|{Lb_GK}5OSatJ*<)7Zrjs$+UA|?tZYa%QYWY+ zhgMGWU<>=*L};dEhlcL{qRyb@Upd7?1Z+kA=)6FKXjHoF z`G}k^)gYNwA1ZEcsJ_(KjMc@M+kKcaA{Jc^EnwFJl>1||f>@HFt@5PhE?*k;hf{TV z+OB-+wER0hC@6H`@}ZfA21VZ-E$1wdKUX4%o9y|o^Pl=c6(=7J&-ZT=-mfUp&(k)a zt<+UAwWrBxX)S^&{|53d`Sbk}G7@ls_@eT4>bu>lAchU#@wk85)B)VZ5MsoN4LZ#b zc{=$k0pkR{slibHLQKk$uQ#m9eCQ*v>FUL=#EM%?UN+$sk{iAKWb% z3^A(=Hf)~_Et7NwFmHLm=JmX=b$`4HlG04{Yx(83bY$!C+3ERH@J{MGHeU3K1g z8nj;Vd##5!_emyKrSuuBECgo|qSTr0?K?_Jk9{S_S=Ch?o=sMz4SyzL%gv5Vo$q&% zxNV!^+S*E0ylPR+!iEWr9Xxr9>n(Q(Q|77C8L)N!@n-0+6c3W ze@j|M&Kd-)CNsohlLZY__dOB`1ev+|?v_SAkc^Vk{%rkppNut2W;5DJng0CG+(<`W z4`W0cbBMy>$gKHWmJ|nY$EkdznN6?`hje&+a?ro z>E#CNAOK8Gx~LGF`+4qInCKqxkCxzv@4gl|=f%XE&6u@xA}5I1Y!cIOtdsZAV$G;N zC*yU^>FXat#-M>!s=3QDWIsObD zkqxy-NtLF21{ETamV8)@-BH^C#$x9MPmjs?x%MoRV~dkfOw7$Ao&iipR@m<`gMtTf z7hQ4Q0*C3FUb>?-lT4=-_8E^u&CLfDG{i4$tBkn6sExS(82s?#!)FFGZ(Wj{t3<+4Q5lUf9%9ZE_m7matI@3KC?|H^fS#9b_!(9~qda>l zEt0}%&7`GYGJK0GP9LHv*n|CA>|z+Z4Ixh0edh@@Tz8fWngmIJb+dc{`F>CWS!ioxeLQ;}w25QM}kXQqIoor;JOwo7K zRk&hPvnU~aTshr}07|E&w{ zqhl91iynN24u?V{yA)@_e9(o6NaXb8hw)?Iu_Vrga6URNm=>0QZK9Vo zUS*I`m~4IfY%2`=d^=OdEf(4q(d&Z7wHf)vJ|Ru+pXgG2Jp~Qz0Y>m+qWI4=uDuYU zy?`2+@3N!>DpcvRdlm2uW||W@2w2-z&5i{*U*aVI_|;S4O?wGaQzIh(5w11v}EIz}Ubi+l_0A$wgWuKfb}+pMr7Kn`@!yZ_1V)JtPRD+TRjf)8*jPe&fic)22o zZCST!*h0g3cISdh;hIoAm+8OX$bY8z9fz z)Af&TAq|y-jS!CCG4N5n15R&JjS^Kp@5)A72Iev;CpY?x#20mK0lN4YOT{x~*Ekb1 zdeC65!r>$yt600Kf5*vjJezca!GZM0P~7;NPT-kM=N;$cMJ##!x=$2Fxt93x4vsPs z)wSX|op555TTq`3E-03+84d$5w&oI5u*%2#>zkfTXM%_hW6N4l6b<94M;!&dT#NVs zd`I_R?8Y5OK1CHZM(srkZG-v|6Vu`%gAu%yhUfYB?+!i!1pTZxgPusKP^9eRfy|}Tv%ck-?Z-ysr%n5zLzYqNiod@9B{+@}SNk$Y4u35b` zk8{z=8}RO zH{P*kB}rw4W5@jw_spuY_N+>sYdO^%>vIorB*_HH_JaAz=os+Si{O@HnS0B<0S{*0 zY-Mnl?`nvR-KOMQiRB0b3YJa5X9g&|@tj6}01ZO?WBa6Sxz*Og1vh`DOQ~>*D=3;7 zzitiPVF2P2q`k_!;VgK@ZL@K}*h)cn-b;zU=nn2Q9Ak|*rJz%IQhPA_u>Kyt9?8+|h4ZH9Zm z>O>?@XZL7J>G6tl^rH}QXk4SE-{H!?f^40YI=SP`PT!mNSHb!mQQ2Yn^MSB0;JvdJ z*sVWHm_s1=Bc$(ITBo3pT1Lo^d4ST6ao{xu*j*%m+Qnn{LX%)LLq@BLn6(x+VfaE5 z*dI6qk70Rats2SWv68_A3#eg&U(4&J;r8Eo8388KAf0g|(c*iI*ePQz5kw^R!TE_{ zh5TEbmdFMEYgD7vB=;`BLnEm{Y2HVaNPZc8P^*HVM|nThRnvp}Hf~c}!y(JUbZDih zuWshi`E`l(bIou3rlrNsYaP>>Lr*e1(X=s(124M*W#I|oFq?Rb1;2|XLhiGhUp)@V z)bGIW<3pNamhb>Uq>Zuhs)(;8dCCWrpF_~=PQR|_-d}{y58vrnRm0JS3>Yd%ORB4f zTCi>D9SR~3G_d7e9mV1$z?Cu2B7E;t2xQ|Jj)q{iyBQ0f?JLRmNs(ly-(4*s+%w5a zx#egF$a3S)Q-@_%~gbxp~U z%}dOiMigiL4;I>FB5+2dA?OyQ!?5jYBRpnV_7yKy$(Elk(v1%Ssc;vvlk%!YQ2fGM z9e4~qR8XNz zu#n@CaFP;Piv3oLiuhB4jBfV#+B03M{6AVj$P_}-%bA3##kHEL8!1(=*{(54J`8L^ zs40h{`7fmP2=Tp5wldglTjE9BB7`@h=5^o9o5HiZ;uDFb6an^$THiRd1)H21`MP3j z;<7x3$(#J+1O%AYjO4fZG5g#>k<3J(t+(@oJIg`W8b8Zw${ew|oRP905TxBo^(y{* z=W(UWqm6BIxjit!8dL&*y!(y`|1~A6r$t4#NZs-acwxYDB53_*T;}2pO_^Oj>aAw0 zX~fIdNbW*_@o2j>SFesvi+Jle0nPvpyey0$)8zf(_}rAxS6|C9`{$+& zb?=KGX(@T3o&6A5>k*Yh7G4X_Yfe_plA4CmX{S~)yK7$^NJtPqFDMKh!?MuS_Fsz0 zoRL-uZcnZ1UxfRMG>S}+>t|AinO@V8pg-Mc&1X2_9f%#I-fWc0uSv^xwcmvKKCqw< zg>D)L#Pv!n%C)0@|0Upv59=qJCzWDoE4OCyAZMTgol4Ahy4yGaybBOYtDPg&jUpf! zgJ29+iuw-jmPq@9*1T^&epvLNIOeb+c|??u&@GFdd4RNxyy0EI@;|YlMmT|DL3Hc6 zjmq6i0q^!clfZQ?csEgE-8)VrV#4@R2YfxV`aRyg%l2n|9Y2{x-R@YaI#rUdVHNLj!rz@V#ay`y40UX=kgv?0f zabI!J29#Q$YZ_8=5Gob*yE8k(IW5X{WebXB`?R`|NVz?*yrr{=o*6m#$cepRiAv}d zTk274tM!8Y8)YBZsaPo4oVX!^mq%%`PNp6Il6jJ*#Jg-I<$0q7HDvN4~hgiVle(?*Q>Cu*Ay$y{*Drl<#s{a?LiRus1cnU zfQa0;v#iGpjB7L<*0}76-5*BlrF}skMtni(jnnr<(4ij~xe?MH>w^?#>C<11IbLt? zOiu1H)lt!qw>C27!Sfm^y0y#ld5k*!m$FUHWVerk%w;j z-Z^tcx4%wkHKndqP~D$i4clIK1ZMZW^vASF*eLK@iGGGpPze6sAg!tZi`Je_kjqG9 zUT~vRoz`RZ9=rB}#0_H@7&eM>_z6OHe{06AWm>5&FXfl!^g38sSEdxw0+QzzqZGT% zq{~ECe}gE1XJ2mTZ$zP|n~(ZMEA-H*OUgQGfI!a{cbeC!eqqy0wXj;8g-FwGa{-j8 zeq}&84jgY1fc#V%@>xMIv#Y8t?55czpSmgVe>mEgrkQIVfD;OD=e8Wd6_49~HN*Wq zA77G8!FVc^<7qo@xVgbWC<%%CxPPJw%M(-Rul9=EW|fQZAi=lQ6CXw3t0|cg^3_gS z^j0y-kBl#$-+u%@Mow-cHgaanecpAL%zhS(B~lAE9zyvhC!Wy;M}>t_;69^>Lc}M- z;K%+X7c8no8ZH;Nm}5VbAxUXAgsuQu+()**nH6tKa9r_vP!NIWk>Go@;uM3!tWG-- zDg1)HaX59K1vnO#`|8zck@h=~L*Y|z|FZ4OnhsQtTd~^{p@U4B<0Vy@=ov-DP1VXo zwZvfyDQpY1nBO!K9tK4YYxKg#=m3mqvhG#HV8zVTXv*K)Pi>1AEJsxu&+p0nqMF*f zK>_d?(I3$QpD2sSr_JVNAg86}wYyAa;W8ok!EVf(enP1%JHj)Jjr)%vFJjWW7kR1A zN5~47&24Vm^Hi9_Iyv*s`MB-i@s*>DXJ-&LDNRsNJoTa zjmpp>8A&4vBm$KBozC5T`tF;D8wT_t%k@4+-gud?ZaeLhwqM}*%BX^_5Tt_gpx;vP znW_@0;#7K%M^|z{-0>w&Sb`|1Oe*(d*;%^`K~o`r*-CNo%Mk4}W517n?LZ?0L_CCW zP=?z0!0ZMz+|F*4Gh$!1z@NYB2Z6FE7%6DB#z5UEIMv!rM(sqf^Rx_BciPya(J$Co zQIfsxoAG`Zvuv%M?WUrD{Tru&kH>EW)fnbC{6h5bk*D z>F}sRugveE1X5Sip^0{n7QmR=4THw`uL5qAB}j={>vKmv_3|Lca~CMzHq@JNs)G5_ za|2<6PzjH#dUVcvac``_lYxA%%}X(=QD`_oCzCA2$bNSr&sts*AveKn!qhn*G7<^h z<(3T9Zf;`Kto%#egTo5FxX?({@`}nc=LB4mI7y;MS0&I-p<+&cUf?|3`3=`4pFi6z|vYIjDF$$nE z9{!D?VjwS?;`KhdDBBW|Xo*FEA@o^G(^H@Q9Tn{@!V`(?-1gSs7k;QR^@j)~8rpZP z=9%aQSJW3wuh8=;E!+J|Zu_O{hSNWBo|~D?t6}DC(u`oxw=Ff$GhoAK@v!aT*_zd5 zOd~n7?EcpHz()OeBem#n6`Wc20p+DPOASI*zS1pw}Q*`+^!d}bw zp_+EXUx5-u0Kx90N$q0A9R)(hR{aCeaJ1hqc(%Uq#5zZ%^mVPMXVmc%$u;%9zXaJP z3cqLtIM0;y)I#9W=Ef?UU&wdNr8Y$075BhzyEs2bu`(~MzZy!jo&|?CFpu zwB=uQkG<;vMafoiATjRK@s>5+*zOHYsIVYC{ImOEOJDgwwy-buSw>S8N2ky!wt)Tq zV%ym^?tWI>R`B|M;8Z>qNl9th-s}ripOQ`p)PTh9*gDBKIaBd8J!7U=?-*5-7L@SldAZj= zEtdhg9;2cW%7m>4agO5}_eGP`Ssc}3`JOJ-#mAmrKZaPeC=}BCfN5odVNFQ|zFu4n z@}X^G;`IOiYvq2r<@k}`BDLW%g%T`)wvaLr<}^Td-Gt+&v$MZglR+;a!N^S)k*5Or z_f?Z`*8gg)VVn8dpmq~eCE-(6rtXf-Q|0#^_O@%hlGJp(VLa=FRb_K*ID^|C?~~m2 zyqYFpHMV23NZQa_^*mXH{2tWKoo063kHbjBtmi@+HTI)}LKDBo?Op3!4E;iXhp$xU zP`q%s;podoV_LEeT!SnSBbli5DM;-Batm{f%({riB zkk$IhoJ2^X>d#Tlu>BS`xBtHjYP;@$4?VX6&5zYQ*?1KeET|q0{MocA%SP(`bHqQ! zhlx*kY>sXM;G;B_DAI!`?!S+h)2B-b*GmiSBWEk2_&DqSB$~((JNi8z;U9VqI733+ zaPQZq{g@W$KwIT?Jd9xrRm(Lwl3|s3m;djF@%b5zvt|WwkD}Qm+Lzz99kJcc=$n~boK02?d7BR@$jagDuuqkmW+hEg9%flI^M`&2?@;as{VL zsYZ@e!du}qdQ0+!%gTdjy+gDT4{>qxjyJoO zp9qLJjoJkh&o@lk|2e*2qf%B;j!a04e;Bd*zFjmv;%KMk$c6^Yb&qbZB$hhmt}7?T zry7QU%y5Td&*WdB{)N@BpAO$h{q@7Q1h{{shJXTr-ZAIF~)9MnyrEzViM3JA&> zripU85uOGiqYDSeZM&;;e4F{(R9?*xwq6nKoRyd>e#$;5*t|70521gE@&8K%fI+Kf zMH_zInpI;UmiIawg5$N=33}We>B&9Y!_^JF~=_YjmSbF4B(&z2l1$#G;%h^{9FI{hQd7)$F z18i*46ZELDrO&hBY{*7mPlhjEfW=i5I8*P$(?ag_z8_%gtME|)ipkJKYzSa7c;8nv z^bO>LFLh1qOW+J$#_p%w&Jde0zXcp&LmM-|7P9>saOJx2Uglj0A?#dr*YfP^@?!c{J=~8zL_er+2URKLSW_m^W2BIm;TLC9@yVEjYDj z$$Q$5`^If{kYCXIpNH^%Tjo{TelT18wz6DL6^0!)tsOYEHzEh^7dFCB5ygnc3gRQ5 zoq?W~W?82?53C%sM}i{lqnLynoUTDFbVz0T?!3D4jzI_)UXE8v)W*KQs(F}6oc8c> zBb~2D+UL79LW_5g2477cnrFc?RHb7XdeW$_0T5*fx$v0ruY?oNMW6WW+=M*C@B>6B=cbSM^ zK<G9-EozS5Vl`;f`dXDc^ zPr`LF0$waF@{WKqPMNvn4(D>mHoM+NUIK!0%j^+8Po1x$R~At7BgpjIZSoBIzTxyE7Zd2-7#!hS`Tn-MT)!Mt6v?b!0DTh^br}lmKI3&bpvLPh> z<%M}<^TAon8&z4EB2h?$8OIN*J|CAh=mDxp+TD7t%jle8iuFOwaM$^11GVi)7=0(I z=g6@3ZFqQiaE{5suVMJL@R3;Kr?AhXQ;XwFtu!vPC#-;EFOJ^tp zp2|%!A9e-Z|Dc#v@PsTtOSNiF(#v#f)~Ca=dYktuIhE6UaYO0i_Li}L80)j0hkpWr ziT<_2YL{P`tK^&P&*lR~Ob&_-?+O9lFQDNO{+%Bu6Rv9`)%(T2r6_y?^qRlEu1P|p zj%>ZSpGXjejLQo^%xb5wyGCDr5BE!?!IasVVv`VDkg13~Rsr@bq2I#8BqA;;R!t4!q$?Li%QVOtgN6l=cM^1% zhL10MpyvJGSg(@TN1bL#aaKE+72O7yCEZYbm=n%_G;!X(xnf{L<*65ikkZ0%MwJeg~# zI5kA!>a_`i2s?|>)(A%j&HE&v>^2R}{Tg*Je-n;h~*{?_!YF!{ZN#pkJz`bUG3#FEffol^_TYo15U8 zbVZRGPBB@zA&PthgVQ8)Og6lWJ7mSy zM~#KrY{VEZ@@Vap6VB@747(Hoh#W))#R4Be;>%oFZ#w&D{6EC8O$qy_?W77^gEv!G zPO~@tWdF-`Q`33N#7uo^rbQ&|@gm^#>x`+2Mj>*TTgCG_Nq`J77j?AC{6ZW_v@P2( z{w)JBZyi-qjxu#3BcP|$H%!A)jUNU> z=NxbO_c4l51V3TqC6Vj^v2keW0HaGylyqHdO)?0P67&M`plN;o_fgg#?@pXUByP~U zcT5ZFi+yL{a7z~tcPxC@&EA{UM;|@bCO^3l(H~6rv-*S%Qzgqrn_=92D&5cof(j24 zfn;4;5yiDLQHA}?-ALL_Z0g>(`YURv?vMX26I5mtP>jka5F73kFA{$y4LbD(ZQtdA2kIr}Cg4zwyiI1P_cj@hcGs|8GFZKPSU!aYPAynHunbH9Az6Ba$A^Pqy zTY+oh!lc5~@Zv$Ckz?+(G>KFF()rD?7gJ=K{@1{VD z%o_E9+{}WioiWb{u{mmpD1}t(I|EVhtPKv_cTYNq>y$V|2A;3d?Ph$e3kg)eZVSs< z=)`}xDhuvfW0K;)OdHUe)gOG(-^S9DAG9RUj>-z3HrD6do}$WXGINMiCrNR}&q)BSGdnc{ zsYtrQN#^tY`5c?F6316m{8dXGch3kO?P{DB3Vt?A{$cv!-fc!_AnGsKL5Kh)F;^^d zjH`brGo~d5GJxk-@9|}4^v9naqHNb0D4w?+7_c91UKqNbmxS~60%1x*ng^3FkOCYn z$}yiH`;MHH?{@l%9XVYi1q7^Px%p=4-?EANS-TwAu)Bf^KLqu+z0Lj~S%eJxX zD;Cmj7&G<$g*EeV#7i`Y_T?Xax5CG+tXL5SDUXPzE0n|T*^l6Eq%wwGkPHPsrm|`a z6c>Wc2Cw()vlGulc-K|0T-x3`1SjtAF*oR|+6Mm{w%do$hW)UTmoXmfWeNFs`BbEj z|H|wv_=htBp8VdJg#N?7A?Ts+>)9X-_;1of-#4=2SqqFPv8#vMym~Vb+3W41$zzug zDFXw|QnAS^=bNOi^EuCk2Pot5wi>t2xo=xjz^(Mr_Xbq9Gna(c2f+m;9IbvUU$pyVl|HD{wG-w3SN?SSrjeZ{~=LnmQY*c)}Mml*%ZK~@R)WT_ZYpT5v$%{eU%P#1~ zMY_ysE0?j6HVGIn{@ai#J4kXHr{4{7IVK94yR%OIN5$@l^b&Q`xWa0k%G}>G%>&>} zNom?_m~ZJmOsF21#haia55+$2v*JPyI8B;~<8i(WVs)Q*?@vO3^WKAG{j&*$4X7(B ziQw^MlB=gb%%J5@3Y~Y@zCU*dFA2GJ9$e1AH)8Ir&61{~`T z7BkOqmII)ZSUP6AszFR12no`Zx6F+G2XDLz~o00FT;fmb2CP*$UmPhPf~L zj*!Jg4JPzbNZrQxq6e(+{Jv6J*q9T7fQT3swrJBo67Ym4^p%9xm3}iVU3o%md2KPD z=-bmyj}ft-HBK#ifZ?TB*|Oox#mn9fCR1-d>r1~y3mMTZxy)XAICa9gg_P1M^lQJ< zY|?0PKl{TrKwoz6YA-AuGO86qwnIRoB1L5LF3gZOcnB>{WxRKKOb*z4vTP~--5-H% ziUIL2LE}B5C>|8V0cLO$9L;8Bab{3@8FOT?*ezYsY}ct*@m)a-=D~Mvq*zEshl$!% zKM(z5J2Ux#UgGW?D7aA$-g2F@y|MEBSEXx{G)yByy@f=(CBfuM;))ht!QG7~Vk6;p zvf<+Wh2?Z24-hy;m>Kf|B!7 zD64iLEXOeDHz6%zn;Lo4!k`TVU{V#fjM36vv@dGd8uB$bCK^i5eHC1Xt7MEo0WE=8 z&v`9ev`=E-9=m{s5T;kTp~Ru!m8nX)_fpsgQy+%N^_F(#^j2iXeZhQa{ac{A07B6N zuN1>em)?SEs_5pz&8kv?4uCQM=r|ZcBhOCC38MP@TT?8Zzl(O$`-?;Y& zG^itd?EKCViM)nDnqBUc!0tOFh74iZs-B(g);ZS8u;F?Cv#A^!y7%_nqZqsQ{+ZcT zo_u!}ndKUQ!}jBa9SSik9MEk5=V8%+WB&JD#U4Pzlk^LzOD`)&!tKQN5&9_Ro zTNfXrFk{9obda$1*l?J(g} zf*P$Jk}(V3>TfJ90s47AV1`BJW$#pTWzTni9ghxokSkdg+u zwq-(=f8pNuy(G)OoONY0bMa()amX%<*hbH~ROHm?b{{Ob9^!8pD36wtOD|Uhwp>~H z&i@Dui!%p^DKRY<=AIZ+DH7Cd`@U})&JKmF?V}qlK07k}Y{Xz{P5bP-4(ojL9$|Oe z9`hi-;pNLTV!YXAMO*3z#!+W{MFyHf+z|{isTg4>6Nyo2>3&jRp+GV;!Gz3Q%2eDp zK}?}qTqr0$+ZU}6TTwiB}{=7zWxx%gzuImDsVFX10Rti(U z$~q4AF*&0OM#|zi?jA_;;v3lDkEY(A-%!?c0nggc^CE}dDrmwN6o3>$hi6!guk^JD zYNmG(_u$Rm@*M}5eKaAqZWKsM0zMqnUl>IGFnFx@;t?YKxZC(pXS>LTQMT<*I&#MS zB|%D);WE2iEJQF0u@!n>XXUt)jZ%riUz-%-t-CrW+i(M^79!Wxo0IR{1UoDz*$pm3 zC+Yj)9FuslEmdhF6@dcth5hd;Q|(+)-0LKjq8myyIoe*!D2jil$ zkOL~V*Nn^X@zcepHEt4z8p(RfGUILIcc|(usp-fxHoCs_y70eMWsCm8=-aXvg2)ou!7dnF|3k z)P9Fs%hLA{Mb7ko9TjDS9J}v3WQEP#Lr1^ucRtbQc3qm)=kng@FB|R&%ZILOF{ic) z4aY{ckz}uzU20WGu2YEc@O@hjp4SeB`*}0-iIafxT&vC6@ag*T4!Qk&(Dd$K zei8|gMA?d+IRJ}F20F*6)$p4y8j=&cSY@>V4a0>c3la7$A=c{>?K4md$#9oCBDXf*D*}-L8 za!vKJZCnT6EXxR|`1xRpmphhxv9`rXYh(UV$bP^j*ze50NOw_CTe!_x#euy2xB8SQ zTYZY*m4K;Y~e9-#!X7MZO8leucgOkhRW95t*%xFyV4^F zj@e?bIZ)j=I4SzwxzRezf|$m6Gy#H|vB?J%cy|<2&ayo6EOOgm& zw8EO4rUZNHHiR_Tl2c)_*hEHwJxRaEg_6ayG56Q6uwlglDJYU17Wvfn00~2%2rbdx z_U3dXe$zPZ#od|UX1UzTi^u!g7#EkaN(TvwuFD=u0!5OU_%{~mxKMW;#dGAMPm?VP z^Jjc7#ANN#c_L$=)0?EANOoA{lPlRtP&z-V=#ge|DXpr-?8Pe)>7z|_H6$o5M-9Gs z^)yD2W#qnxi75*F-UhKwc7BxX=txnK?C@|E z2X}ttSv74o14axi#6F`2!D=>wM;7QFy4j0pj0x`~x8E?FeA!(Xxz7ZcElFZBFwvK3 zo9k^PlI?>O70C`ylydhriy4KvnHW@<2baq9?3N-m zP9x9BV{9@N0Z-J$9b?&3sjn@^{Haq!)8gK|w-QN5*yTGviWim)fm*Aj1zJHl+Zb7h zrkB3yJAXDwS9hjyi(8%e9zXup=V3#W_}X6F%_Odsdt&n^_mknC{;ln!#OJ;eO3&@Y zoGxvhgSW9k%Xu6Fyhd&X4;~T8ETy6_RWgam}1CzGEyX%SuWSZfTzX z4#LOLsA9;BmBZ)vW7W!4FzEDR{kFh8^HVc(vB#muE5f0sdrH3p&wpQAz5ykRW+6!5 zMQbpLq>r&{cYb8cM+U`UGUM-QKjFG_kHu5>-Gne195ys;pty3Bjc{6KDzfv_ke-z) zhCWet-&XUXkT?!cMVp$goF_9k4JNZitl#oHoF>I!^YAW-xYi_PSpBxr#Mi{vvufEw z%$xN)yzcl|VJ+;#>qIYo#IRz~TrumU&7RD+ONmc`Kb!8$$LGrMH=lk8*Pe3>zM1?M zQqodrOY3RsX@bEBR&T7pinZlvXz+@<6>eV47oyc_pwo3d=qYehUkwR~PG=Au74`IN z@bk1yfqr)lJqtWczmA?g9s{gm-3rY4`AaOC`J)gQ{@o;!9gUq{RM^z9QQ)z--g)71 z%$qqK)#YU{S**fX8Vn{B4M-Ec;9q*|e%yTVd3g4Ld*G<4L|##<$oV!ks$GHiXTwp@ zFO>{_6mLHJIBufz+;_uOC|h5G{QjvT9h(if3VDU8FjE@8`ru96a>)gF@aF3<@2~0T zKZxR*l(@|i=OIH?wxI;qpLZOtJZ(IteEcpJ7xy!6YL7KoIN7s<3)P z1?rq`(UYEyHVa`_1N{+xy5i?-y}he}pG=;5ntSK3T{aK%rhkK_e@~}OKs`MRBsj9z zOR|HqOH|bJta^X{_A{2mXFxputn76=myj(!TrP@Db^F{-HZ4=lwQbe*A76 zKej(EKJj=mpe7+oT{V~qLtk52hLiUnheHP2@zgyxVhWw-qt~9n=?4$TaeIzIO=Y}YQ{$d%FTmFyzKQo=`X4So`yTi(r&;z0TTdvFvs` zX=TEYZH?BT#?l3|gm=TNSTY|vqmFXrdQ_Edz*6#Fmd=|kJU2cYpObUYC!YCF5~L)Q zlYo8v`m-d`ipRD6P`r{DJ<=XufV`Gc%wN7n)UotMG%5{zV10j1{Z>rglvCJW3>WY9`os(T#?uc_?tGRQHh#d+kw|Sa zeEf_bzar!R7K~g(MaPLnVoUFF>oGb|o=X4n{ zNfjk4kw*S(Dn=oH82e9{h}@z9NYBc{f#WAqZrOvhOe>za>snlT(!sd&l!K9#YZS#w zv@^otF!K8iMDdUj;uv%O_*G1!Eb241BYa)lw3x3sJ=Kna>I{>2lOz?*qr);H%BGTQ;(edB0T=am*O0+e>D^TyzZ`e3}D&9*&;ox1?yHVMQ&j+ zo__y3yz=?)B3=6&e0V%2FpIpd`r0bt)wBoCe{QyD&~KoU_Wnb-ZYG(wD8H@O?Z(8@ z&&14it1+jt6e$@wlw;Oo7~O)5JT`U;1NFej;b|nkX<}}dl(aN4WIwZX!QZH@t`Htf zQ^D#j_>_F#eSsiS?N$`$XQQ|vTQoCH!0GBrNWl38u`JfL9Je!p8;bCV(m6$gvB$v^ zNn8$w*_tdyDVCieC4Cuwenp0s0yi*ePK%k)iCcO4F#aHQ~jathLM+WA++ zV*qIx+3aA5Nx%Mm=Og%X)*6hVT&!B7Lq={M&b|C5QRKzA>S_^=1e71P9oWz>m@^N@ z?yPv3qGg|w}RkJlK*qIrKpL*KuC|JC?W6mjPL-+J~5 z@}|D#}5U)l91z5s7gpLKYnUy%jy^ zY*6-%3Xg|9UVf1juXGzjlnzeE;g)mkRpt#Cii1u%AA5~IL5yC^NMVumRk-=SS+9kW z9gt?UC@MCyUgWM-LRfZexNe}Lv@*WLvoH=4#b6A-v^)g8le~za-$%xy5up%JzJ9IP z&Wze?Khf%ki-v}c+C!}S?vuAL|L=Lo@1KO%{`UZCE6at~F#gz6;qxg&(H;PAXdl)c!ALI@gE?!O+t#B|XIz{Fe566MW zpNoOCoxN#^q|d=G7iF?E8Xg^d>x^xSqLO4HPlf)`>B)swZ7lwPM_a8hO z7hnG$Q3RLZpV!`rulN3s~XjG3=erA4l&(uqLbmemCNwXbB|&}$x6|Dmr10Mi>K1FQt-hm zPh~WU>+U3vNZG$V4nF!M{Jf+NcRlq4UjF z?ek-OSrz_Xw1x!0EnFB4&!k9!J3fZV)8mP|x)MUPZ#dvZx*b~7!CN=6Fh zg6V})bD6X-alLCKx*Hz)qBS^o%f{PiHsOs)4`RwE?}+uI$#~xO=u0FT4dUvRqOl+@ zy5G44RIo{OwlSd^xIn~y_zW-`P@(!MnnEQXt}v8jG;y2rgN zC=3sq!ZSVcWL?||a%ss%RMt9$C&zUme4F@Qbw1F+`A;N>0ePvS;$KYH%-iUk+Y;yx zrA;GR(yd%D6MudGA(s9w29V~gi6^W!T!lfu&C6Gp$N}dR>Ijt60m<$@)Il6<( zi9&0Icx=%51<%S&#_LZ#i02=^L#)p$EXLiF-V_z_ECvZ8;Rp%`^g~igYJ81CR(_#q zwZ&cv8>8m%dARPTc<^vh;eOVo|A&9y_5j}fX+b=O;!Vx_>vx!ix6+8D^qd!0{F<|=_s5V4u{x*NZQ)tfgem(@z-J^>GN zq7v>n*Q=CP)nILD1tOw&Y1@dBfEVYcBGqb91fd&X5e|6Wm^<|=@=)f942#ih6&W0f zq%YH`qKr6s*GcBBy%eU^C-~TLLlN}3DL2J0WnVTXZNnMo=0#o!_ytZ%vg3nSU&jlN z+$q-O_aBI_X0JhBQLz}xg^R@!FS^BM#iV<0$J!N3ML!YIe}#DAq1&-;)mo9mWC3YP zVws0y;eC7_FMMtfd|od`7j9X+QMqx0DDp^3PQ}7G({cUzCm=wc0Sn2iU(di1Cmav2 z#|@pH(?o*M>|8BEs$sHdV5GEZ=`yOyH;UEVl8JlDi=Vd@(QUe(@)%+$n?#We-_gb< z?F3k$w)az#@G#hO;S2$fFwD28C82L07GV;qSP)5i6-M;S!R|wIpwb!?Vd==dZv&-h zE(kvfi=I5xP4~4+X_ZKN2evr|@>0kE;h@p|;%m30RZ%&5>|jyutKue=@papdQkd48 zgvqcJ#`{w+)fJ_}ATw-ezy9b=+i~5G}fR+$>LnQ*7hryjdG=vOy;Pq!G9lqF&;x< z{|QHn&o#>y;o{@CQoT`>x8FVKZ44T|8#b;jCsA_4+t5HiZcLo7Jz@F!qc`x;>#tHy zIE3GS`5s??_=Z?-w%Rao@Q4oowsU50Ky6hyoQ@h{0C`-p^3o0Qtw3%;KZx7PjR$(m zFt&3V<@bm$bOCOvqKZXeH>okaePjM4+jm=kFZlnFnsc?U(-pmz;Q@IPSlW-wzMpa#=hUFz&D;k(QAn%Gh~~ zvP~8Kbf|}Ky&NZuEyjft$BA^Eap(wqNn*i+Kek392`r-b-*7R`o-i7>Tzsk!24=5) z4@J@7;b`7!Rmq>L5DL;WP&_2Q7C7gKJ#g0e(fIY7Phm|;B0)A{#o~GKtw2G)fucEf zuL%mJF}A7ft|!jPccGE)j}HU$QpMO}McFAxOSXu+XC)@T@vMk9@$`!@ctjC&Mh&jK z?IE$w<96b)JFY@%rggJLC-^GK;lcRhP7@06sI5jNx>y0Z+dEC3#3* z(Ey;nesez&u8Uc>YMDq+BDh^ntY5hd?1i>ORP@zQ)Z)RHC!?VMKva~jhtI3j#S9^j z?4IYiK?2wH&^hQHB}nv;bvr#c?yQR_?ijJm_a@-?i6T5kt5;#(%xU7g)n>T5R76~ouaUJ(mQ$;zczy=)$8D%R5$HPW*3 zu+M=KTv%6AqJPV1sMf04od9=cYHSBSi?J@-ExBX=Js41LpcanV$Rs~$gp^&W|# z9~b}Me^FglfvMk4CNZi-LGf@LFyVNy4-c=->0;rW@#E*BSnh(W?-53t#n@;x!R>b8 zr!U@v*X=-V(O`@_yp*JHHNJoJB3Mky^YQWY#dzTDpHW*`N#|2CE)vPE0NVr} zO}(HnA0PbZAIM6k40H>$DJrYW9QgO+lToV5g-W9%Gsp&`^DmJwta>vVR?ouPk8Z|S zf0w{&&pz}A`^kH__xcOPAd-{bpMu@@IS}3k56m_z-Kf@^Z?cW_xILn> zp9h9yZFSRrVNuq@<@OpRr`g4Nj$2<>Cq$$v;`&LzxE~Bx%4)UBHAfqlazk4EWw>X1U<*M>z{lbG4r`K47(HjbQ!JZF^tkw*TY~xMoZNYl_vQEo5*CY1 z=_BKGC{1x0-p*pXY2R>6h@VccBadzst~hNx#jnMASKW*&Z@M2I54mK$0i}yu!`*Ps z;aK7{VaOTxBkZo(>RxOMEGqC+;eY{2c>V^ZZx2&i?ZO2Qe}$Z4y3v^97`H zF%i+!nD!Lhr(@qkj}%@C4~F>T=O5FR(Dl&e@PLYrT1TtF>2Qesxyh%4JP=;Tg2LCt zfqTCv)^ptEg2IS+r1C5R4bJ*a$L8;RT)qd1<1ySZGI7k-LQ8_e@H%Da83zfRLwF8+ z%{)FA@8@oCZ4#73d>(K!aaIU7>+vWY;n%s_$~Py zJU`6JrSq|H*6+yN{a8eU?k+x=Eh8i~7KT;j!?{N&#eWI%GwqnLXE7|v$@H7{p0_QL zY)^!Zq6}0{b~cXQdvL3Q!n7$W%s-FWOH|ftV?HJK)>Rr~&=@SpI_NCC_RJ%wt#Y+F zc~p18mEWnESvcp48^!X>so$Z#wpKJ8NEp!0h6JsxZ%*@g%|n8gwzU>oqougbLsMGY z)_i>;1cN@zoc4o=bJDq&V)*D$2*k%F>q*3eP3KQe)8n;A??Av(3~Tlv%CLkfvGGXT z49|n9fyQ5ly$0uEpW&N_qDgSV0mG0&o}kHW+T_(qBs()a$)P1F8M_r{Vcdw#4WC+~ zT~WzNOTvMp`oZtw;st);x&~+HVv!)S#+@dXAH3lL=uJk^4}^_HPv!X72=TFbsFh<+ zJr5uMz8Ft`Fh$fQ^j7)*&V=$isc_d5ui&Gpb8-32_mW4%O`H{j-xF+lKIr%vIVt$% zs~<39+Rx}W{z9mY7J9I_p+WbYn=?+@#t<~&1{}NZFo^kRnlY>}1BZ+$Mx)jwu2~9; zWM_s;C%EXoQDeYq2akY3*LIO_yQ0F}chTWU*GHiWxG5jUFK_1sKp1z*QF8m{;a7i* zAHMz^4_tSl7{pP`(;heen+!-d!sq5nCa$|k&n!e*MxMxzO1cLBJ~n$AGV=z3JBV^F zrn?2D`Ln>C8Bi!((o=_P8Y#9_M+z)@odi}fvW z>zM~2Gb2qryWRb~N+f*+<=Iw2Z?WNkk^OMcZY_6sY>MV?BL2NqOh&yJE`Hwf5=0Hj zLKrr`f*ptLPK=JDx1~d$-XDK`^dy$f{vG>II8IFZ#K+^wix|%RH8m{T8W{d$)fJ-v zH`^{l=K}veO-FcczI|H*ixk&Sal;DksA)1A@!}(Q;nCZ!fN98h6i>Vn5ua0do82X7 z?4@W(q;$c0Jag^wu$o$FvrX_=W8p{?3)a;j5O1p8-NGu7Y;Qy>P|7%^q^9EWD~?85 zQu|dFZMObuipv|ozaIGrD{3j%Z_R?H=@=cIfsF>eds?F%zRHzY@$yATvzu}9h1cNN zGcHDMVSnh2Di~USpCvmv{6KPvRxkk6RaayAf?0U+q1&-!@j_%Caw+l-ItS5!mjs2p z=Wa3N?6-1`hn~&)wYc!`QMlnB2gSFwPQBZYE1&ub^Ovnfy`u&O{npLWC6XN#k+G*> z&&d8eQ1mpXmn<5_~}K| z%=`ohJCUE4i{#V{*z75aYa?Pxb}8t0=MJYXhXZTYtQM=xgAc}l6K;XdVkZy9)r}qs zKZ}%6YScjQS&u`8XW_}Kj*o9^pOVUYT>RJ5h=qJ`moG>C@|kGl zp<%?BV^UCdRYdWZMh{EgAlQq>K$ny*#-WRb1Ip9ZErOzCS$M<`aKqiJk#5AAFcH8eD(F-pQto+Xz2j zD#Tb&!Y=q~HsX_e&&H_!*~;cl@}D<;#;4O4p`o@)6dOokk?g4O;IlTn9XfLoUbyZ= z958C@X7z2+B~fASee*}WKW!!S>HX-%!Xt3_UYkd=?h5z6GaVmIT>w{2g{a`7c1*ZGHI?_q5ZInh;KnKB!1 zd_NCW6_sMLJATPp8W&C?*9oao zV)AcrMGQa^&j?0tKM9MJ70LF7A8@Ys)5m;pRh8k;kr}w-yb0)+ld44MA%2*%8n?gl zBit?*f?hYmkq~%l0$Cg;*`6d!emER~#bO2wR_ryj5D#B^BnmTIpZKq%bepI!%hp%n zmKVRrstPA`_FP2uHWC%iA97&$RPu$A^fbzIri{(N!~dwD^_C;zbz20{Ei!0-K_FEnxlBXm&_0SRcztawY)!aQj z&bw7q61?&4Y%O}t{}d9Ecb zw0aYg)6y`qI2*VA;~)%bV`|{8&?BP4tgUe1^WT@?<7tcG@T+06ry-&-qftffzfxT) z;>mYOvMn@yc)1fJ4*{f$1%Z$cfre`AF(4BY_Z^DU4&IGksO{?+tSqg?x3gB^>pzxb z^~OpV)iDJ8eguPlXf-+{rgE0WW0Gy5sYz$AgvV1fo6KUC+7P`X`;IKeDF=?gk$Vq> z+1UL<3-y?&B;c9y-ur1D-uz)6pw>cdu%gkJj7Aa|x<28(Y-aO!e*JqiVUisNHnB=k zR#Ozi%btm75<+f0!d_^6wFrfSulta^#-e^fl=t5QWQgM#08|NFB>FaFDc+fMU;t@%>7?nr%)*;!JgyiI8f9l9F|v$nkwz9aOis3hQlDN8CGShlVTtIBFnRqsP>LjZM@gYyR>2y;<*C@eh{ ziGZI2Exj-%y#_k18fLu~xv6HP+KfoIo3QtgLKNqvAuH8}%w&tQ(+);yjSCeH4`#0@ z!@6oG>Rmo;sIC{E-ar^p%6a;NArcrR2P)acA+xR3X`$C@U?Q)BE7{ZSR%E9qAur8_ z0lBFd)jtz?>2{>pO!1w1hwUONO%X2(y)=G$VLbjILZm2B%9(L, in particular, were important for the development of this project. - - -Contributors -============ - -Thanks to our contributors! - -.. contributors:: fastmachinelearning/hls4ml - :avatars: - :limit: 100 - :order: DESC diff --git a/hls4ml/docs/release_notes.rst b/hls4ml/docs/release_notes.rst deleted file mode 100644 index 55c4090..0000000 --- a/hls4ml/docs/release_notes.rst +++ /dev/null @@ -1,8 +0,0 @@ -======================== -Release Notes -======================== - -.. changelog:: - :changelog-url: https://fastmachinelearning.org/hls4ml/release_notes.html - :github: https://github.com/fastmachinelearning/hls4ml/releases/ - :pypi: https://pypi.org/project/hls4ml/ diff --git a/hls4ml/docs/requirements.txt b/hls4ml/docs/requirements.txt deleted file mode 100644 index 37774f2..0000000 --- a/hls4ml/docs/requirements.txt +++ /dev/null @@ -1,15 +0,0 @@ -. -h5py -matplotlib -numpy -onnx>=1.4.0 -pandas -pyyaml -seaborn -setuptools_scm[toml]>=5 -sphinx>=3.2.1 -sphinx_contributors -sphinx_github_changelog -sphinx_rtd_theme -tensorflow -toposort>=1.5.0 diff --git a/hls4ml/docs/setup.rst b/hls4ml/docs/setup.rst deleted file mode 100644 index f99b2f2..0000000 --- a/hls4ml/docs/setup.rst +++ /dev/null @@ -1,187 +0,0 @@ -===================== -Setup and Quick Start -===================== - -Getting started with ``hls4ml`` is very easy. There are several installation options available and once installed, -it takes only a few lines of code to run your first synthesis. - -Installation -============ - -The latest release of ``hls4ml`` can be installed with ``pip``: - -.. code-block:: - - pip install hls4ml - -If you want to use our :doc:`profiling ` toolbox, you might need to install extra dependencies: - -.. code-block:: - - pip install hls4ml[profiling] - -``hls4ml`` is also available as a ``conda`` package in the ``conda-forge`` repository. To install, run: - -.. warning:: - Version of hls4ml available on ``conda-forge`` is outdated, we recommend installing with ``pip`` to get the latest version. - -.. code-block:: - - conda install -c conda-forge hls4ml - -Development version -------------------- - -``hls4ml`` is rapidly evolving and many experimental features and bugfixes are available on the development branch. Development -version can be installed directly from ``git``: - -.. code-block:: - - pip install git+https://github.com/fastmachinelearning/hls4ml@main - - -Dependencies -============ - -The ``hls4ml`` library depends on a number of Python packages and external tools for synthesis and simulation. Python dependencies are automatically managed -by ``pip`` or ``conda``. - -* `TensorFlow `_ (version 2.4 and newer) and `QKeras `_ are required by the Keras converter. -* `ONNX `_ (version 1.4.0 and newer) is required by the ONNX converter. -* `PyTorch `_ package is optional. If not installed, the PyTorch converter will not be available. - -Running C simulation from Python requires a C++11-compatible compiler. On Linux, a GCC C++ compiler ``g++`` is required. Any version from a recent -Linux should work. On MacOS, the *clang*-based ``g++`` is enough. - -To run FPGA synthesis, installation of following tools is required: - -* Xilinx Vivado HLS 2018.2 to 2020.1 for synthesis for Xilinx FPGAs - - * Vitis HLS 2022.1 or newer is required for synthesis for Xilinx FPGAs using the experimental ``Vitis`` backend. - -* Intel Quartus 20.1 to 21.4 for the synthesis for Intel FPGAs - - -Quick Start -============= - -For basic concepts to understand the tool, please visit the :doc:`Concepts ` chapter. -Here we give line-by-line instructions to demonstrate the general workflow. - -.. code-block:: python - - import hls4ml - - # Fetch a keras model from our example repository - # This will download our example model to your working directory and return an example configuration file - config = hls4ml.utils.fetch_example_model('KERAS_3layer.json') - - # You can print it to see some default parameters - print(config) - - # Convert it to a hls project - hls_model = hls4ml.converters.keras_to_hls(config) - - # Print full list of example model if you want to explore more - hls4ml.utils.fetch_example_list() - -After that, you can use :code:`Vivado HLS` to synthesize the model: - -.. code-block:: python - - # Use Vivado HLS to synthesize the model - # This might take several minutes - hls_model.build() - - # Print out the report if you want - hls4ml.report.read_vivado_report('my-hls-test') - -Done! You've built your first project using ``hls4ml``! To learn more about our various API functionalities, check out our tutorials `here `__. - -If you want to configure your model further, check out our :doc:`Configuration ` page. - -Apart from our main API, we also support model conversion using a command line interface, check out our next section to find out more: - -Getting started with hls4ml CLI (deprecated) --------------------------------------------- - -As an alternative to the recommended Python PI, the command-line interface is provided via the ``hls4ml`` command. - -To follow this tutorial, you must first download our ``example-models`` repository: - -.. code-block:: bash - - git clone https://github.com/fastmachinelearning/example-models - -Alternatively, you can clone the ``hls4ml`` repository with submodules - -.. code-block:: bash - - git clone --recurse-submodules https://github.com/fastmachinelearning/hls4ml - -The model files, along with other configuration parameters, are defined in the ``.yml`` files. -Further information about ``.yml`` files can be found in :doc:`Configuration ` page. - -In order to create an example HLS project, first go to ``example-models/`` from the main directory: - -.. code-block:: bash - - cd example-models/ - -And use this command to translate a Keras model: - -.. code-block:: bash - - hls4ml convert -c keras-config.yml - -This will create a new HLS project directory with an implementation of a model from the ``example-models/keras/`` directory. -To build the HLS project, do: - -.. code-block:: bash - - hls4ml build -p my-hls-test -a - -This will create a Vivado HLS project with your model implementation! - -**NOTE:** For the last step, you can alternatively do the following to build the HLS project: - -.. code-block:: Bash - - cd my-hls-test - vivado_hls -f build_prj.tcl - -``vivado_hls`` can be controlled with: - -.. code-block:: bash - - vivado_hls -f build_prj.tcl "csim=1 synth=1 cosim=1 export=1 vsynth=1" - -Setting the additional parameters from ``1`` to ``0`` disables that step, but disabling ``synth`` also disables ``cosim`` and ``export``. - -Further help -^^^^^^^^^^^^ - -* For further information about how to use ``hls4ml``\ , do: ``hls4ml --help`` or ``hls4ml -h`` -* If you need help for a particular ``command``\ , ``hls4ml command -h`` will show help for the requested ``command`` -* We provide a detailed documentation for each of the command in the :doc:`Command Help <../command>` section - -Existing examples ------------------ - -* Examples of model files and weights can be found in `example_models `_ directory. -* Training codes and examples of resources needed to train the models can be found in the `tutorial `__. - -Uninstalling ------------- - -To uninstall ``hls4ml``: - -.. code-block:: bash - - pip uninstall hls4ml - -If installed with ``conda``, remove the package with: - -.. code-block:: bash - - conda remove hls4ml diff --git a/hls4ml/docs/status.rst b/hls4ml/docs/status.rst deleted file mode 100644 index e4cac5e..0000000 --- a/hls4ml/docs/status.rst +++ /dev/null @@ -1,92 +0,0 @@ -=================== -Status and Features -=================== - -Status -====== - -The latest version (built from ``main``) is |version|. -The stable version (released on PyPI) is |release|. -See the :ref:`Release Notes` section for a changelog. - - -Features -======== - -A list of supported ML frameworks, HLS backends, and neural network architectures, including a summary table is below. Dependencies are given in the :doc:`Setup ` page. - -ML framework support: - -* (Q)Keras -* PyTorch (limited) -* (Q)ONNX (in development) - -Neural network architectures: - -* Fully connected NN (multilayer perceptron, MLP) -* Convolutional NN -* Recurrent NN (LSTM) -* Graph NN (GarNet) - -HLS backends: - -* Vivado HLS -* Intel HLS -* Vitis HLS (experimental) - -A summary of the on-going status of the ``hls4ml`` tool is in the table below. - -.. list-table:: - :header-rows: 1 - - * - ML framework/HLS backend - - (Q)Keras - - PyTorch - - (Q)ONNX - - Vivado HLS - - Intel HLS - - Vitis HLS - * - MLP - - ``supported`` - - ``limited`` - - ``in development`` - - ``supported`` - - ``supported`` - - ``experimental`` - * - CNN - - ``supported`` - - ``limited`` - - ``in development`` - - ``supported`` - - ``supported`` - - ``experimental`` - * - RNN (LSTM) - - ``supported`` - - ``N/A`` - - ``in development`` - - ``supported`` - - ``supported`` - - ``N/A`` - * - GNN (GarNet) - - ``supported`` - - ``N/A`` - - ``N/A`` - - ``N/A`` - - ``N/A`` - - ``N/A`` - - -Other feature notes: - -* ``hls4ml`` is tested on Linux, and supports - * Vivado HLS versions 2018.2 to 2020.1 - * Intel HLS versions 20.1 to 21.4 - * Vitis HLS versions 2020.2 to 2022.2 (experimentally) -* Windows and macOS are not supported -* BDT support has moved to the `Conifer `__ package - -Example Models -============== - -We also provide and document several example ``hls4ml`` models in `this GitHub repository `_, which is included as a submodule. -You can check it out by doing ``git submodule update --init --recursive`` from the top level directory of ``hls4ml``. diff --git a/hls4ml/hls4ml/__init__.py b/hls4ml/hls4ml/__init__.py deleted file mode 100644 index 81b2859..0000000 --- a/hls4ml/hls4ml/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from hls4ml import converters, report, utils # noqa: F401 - -try: - from ._version import version as __version__ - from ._version import version_tuple -except ImportError: - __version__ = "unknown version" - version_tuple = (0, 0, "unknown version") - - -def reseed(newseed): - print(f'\npytest-randomly: reseed with {newseed}') - try: - import tensorflow - - tensorflow.random.set_seed(newseed) - except ImportError: - print('\nTensorFlow seed not set') - try: - import torch - - torch.manual_seed(newseed) - except ImportError: - print('\nPyTorch seed not set') diff --git a/hls4ml/hls4ml/backends/__init__.py b/hls4ml/hls4ml/backends/__init__.py deleted file mode 100644 index 6396d78..0000000 --- a/hls4ml/hls4ml/backends/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from hls4ml.backends.backend import Backend, get_available_backends, get_backend, register_backend # noqa: F401 -from hls4ml.backends.fpga.fpga_backend import FPGABackend # noqa: F401 -from hls4ml.backends.quartus.quartus_backend import QuartusBackend -from hls4ml.backends.symbolic.symbolic_backend import SymbolicExpressionBackend -from hls4ml.backends.vivado.vivado_backend import VivadoBackend -from hls4ml.backends.vivado_accelerator.vivado_accelerator_backend import VivadoAcceleratorBackend -from hls4ml.backends.vivado_accelerator.vivado_accelerator_config import VivadoAcceleratorConfig # noqa: F401 - -from hls4ml.backends.vitis.vitis_backend import VitisBackend # isort: skip - -register_backend('Vivado', VivadoBackend) -register_backend('VivadoAccelerator', VivadoAcceleratorBackend) -register_backend('Vitis', VitisBackend) -register_backend('Quartus', QuartusBackend) -register_backend('SymbolicExpression', SymbolicExpressionBackend) diff --git a/hls4ml/hls4ml/backends/backend.py b/hls4ml/hls4ml/backends/backend.py deleted file mode 100644 index eff87be..0000000 --- a/hls4ml/hls4ml/backends/backend.py +++ /dev/null @@ -1,177 +0,0 @@ -import inspect -import os -from pathlib import Path - -from hls4ml.backends.template import Template -from hls4ml.model.flow import get_backend_flows, update_flow -from hls4ml.model.optimizer import ( - LayerOptimizerPass, - extract_optimizers_from_object, - extract_optimizers_from_path, - get_backend_passes, - get_optimizer, - register_pass, -) - - -class Backend: - def __init__(self, name): - self.name = name - self.custom_source = {} - self._init_optimizers() - - def _init_optimizers(self): - optimizers = {} - optimizers.update(self._init_class_optimizers()) - optimizers.update(self._init_file_optimizers()) - for opt_name, opt in optimizers.items(): - self.register_pass(opt_name, opt) - - def _init_class_optimizers(self): - class_optimizers = extract_optimizers_from_object(self) - return class_optimizers - - def _init_file_optimizers(self): - file_optimizers = {} - for cls in [*self.__class__.__bases__, self.__class__]: - opt_path = os.path.dirname(inspect.getfile(cls)) + '/passes' - module_path = cls.__module__[: cls.__module__.rfind('.')] + '.passes' - cls_optimizers = extract_optimizers_from_path(opt_path, module_path, self) - file_optimizers.update(cls_optimizers) - return file_optimizers - - def _get_layer_initializers(self): - all_initializers = { - name: get_optimizer(name) - for name in get_backend_passes(self.name) - if isinstance(get_optimizer(name), LayerOptimizerPass) - } - - # Sort through the initializers based on the base class (e.g., to apply 'Layer' optimizers before 'Dense') - sorted_initializers = sorted(all_initializers.items(), key=lambda x: len(x[1].layer_class.mro())) - - # Return only the names of the initializers - return [opt[0] for opt in sorted_initializers] - - def _get_layer_templates(self): - return [name for name in get_backend_passes(self.name) if isinstance(get_optimizer(name), Template)] - - def create_initial_config(self, **kwargs): - """Create the minimal conversion config for the backend. - - Subclasses should implement this method to provide the initial configuration for the conversion. - """ - raise NotImplementedError - - def create_layer_class(self, layer_class): - """Wrap the original layer class into the backend-specific layer class. - - Backends should extend base layer classes with new attributes and variables as needed. These new classes are then - used within the model. - - Args: - layer_class (class): Base class to extend - """ - raise NotImplementedError - - def get_available_flows(self): - """Returns the list of flows registered for this backend. - - Returns: - list: The list of registered flows. - """ - return get_backend_flows(self.name) - - def get_default_flow(self): - """The name of the default flow of the backend. - - Default flow is used as the conversion target if the target flow has not been specified. - """ - raise NotImplementedError - - def get_custom_source(self): - """Returns the registered custom source files. - - Returns: - dict: Custom source files. Keys represent destination paths, values are absolute paths to registered source - files. - """ - return self.custom_source - - def register_source(self, source_file, destination_dir='nnet_utils'): - """Register custom source that is not part of the backend's templates. - - Args: - source_file (str or Path): Absolute path to the source file. - destination_dir (str, optional): The sub-directory of the output project to write the source file to. - Defaults to 'nnet_utils'. - - Raises: - Exception: If the source file is not a str or Path, or if the path is not absolute - """ - if isinstance(source_file, str): - if not os.path.isabs(source_file): - raise Exception(f'Expected absolute path to custom source file, got: "{source_file}"') - source_path = Path(source_file) - elif isinstance(source_file, Path): - source_path = source_file - else: - raise Exception(f'Expected string or Path, got: "{type(source_file)}"') - - self.custom_source[destination_dir + os.path.sep + source_path.name] = source_path - - def register_pass(self, name, opt_cls, flow=None): - """Register an optimizer path for the backend. - - Note that user-provided optimizers registered without specifying any flow will not be invoked. - - Args: - name (str): Name of the optimizer - opt_cls (class): Optimizer class - flow (str, list or tuple, optional): Existing flow(s) to add the optimizer to. Defaults to None. - """ - opt_name = register_pass(name, opt_cls, backend=self.name) - if flow is not None: - if not isinstance(flow, (list, tuple)): - flow = [flow] - - for f in flow: - update_flow(f, add_optimizers=[opt_name]) - - def register_template(self, template_cls): - """Register a template "optimizer". - - E.g., function call template or op configuration template. - - Args: - template_cls (class): Template to register. - """ - template = template_cls() - register_pass(template.get_name(), template, backend=self.name) - - -backend_map = {} - - -def register_backend(name, backend_cls): - """Create the backend instance and add it to the registry. - - Args: - name (str): Name of the backend. - backend_cls (class): Backend class to instantiate. Class must implement a constructor without parameters. - - Raises: - Exception: If the backend has already been registered. - """ - if name.lower() in backend_map: - raise Exception(f'Backend {name} already registered') - - backend_map[name.lower()] = backend_cls() - - -def get_backend(name): - return backend_map[name.lower()] - - -def get_available_backends(): - return list(backend_map.keys()) diff --git a/hls4ml/hls4ml/backends/fpga/__init__.py b/hls4ml/hls4ml/backends/fpga/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/backends/fpga/fpga_backend.py b/hls4ml/hls4ml/backends/fpga/fpga_backend.py deleted file mode 100644 index 8cfaec8..0000000 --- a/hls4ml/hls4ml/backends/fpga/fpga_backend.py +++ /dev/null @@ -1,866 +0,0 @@ -import math -import os -import re -from bisect import bisect_left -from collections.abc import Iterable - -import numpy as np - -from hls4ml.backends.backend import Backend -from hls4ml.model.attributes import ChoiceAttribute, ConfigurableAttribute, TypeAttribute -from hls4ml.model.layers import ( - GRU, - LSTM, - Activation, - BatchNormalization, - Conv1D, - Conv2D, - Dense, - Dot, - Embedding, - GarNet, - GarNetStack, - GlobalPooling1D, - GlobalPooling2D, - Pooling1D, - Pooling2D, - SeparableConv1D, - SeparableConv2D, - SimpleRNN, - Softmax, -) -from hls4ml.model.optimizer import model_optimizer -from hls4ml.model.types import ( - ExponentPrecisionType, - FixedPrecisionType, - IntegerPrecisionType, - RoundingMode, - SaturationMode, - XnorPrecisionType, -) -from hls4ml.writer import get_writer - - -class FPGABackend(Backend): - def __init__(self, name): - super().__init__(name) - - self.writer = get_writer(self.name) - - self.attribute_map = {} - - accum_layers = [ - Dense, - Conv1D, - Conv2D, - SeparableConv1D, - SeparableConv2D, - Pooling1D, - Pooling2D, - GlobalPooling1D, - GlobalPooling2D, - SimpleRNN, - LSTM, - GRU, - Dot, - ] - - for layer in accum_layers: - attrs = self.attribute_map.get(layer, []) - attrs.append(TypeAttribute('accum')) - self.attribute_map[layer] = attrs - - rf_layers = accum_layers + [BatchNormalization, Activation, Embedding, GarNet, GarNetStack] - - for layer in rf_layers: - attrs = self.attribute_map.get(layer, []) - attrs.append(ConfigurableAttribute('reuse_factor', default=1)) - self.attribute_map[layer] = attrs - - act_attrs = self.attribute_map.get(Activation, []) - act_attrs.append(ConfigurableAttribute('table_size', default=1024)) - act_attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8))) - self.attribute_map[Activation] = act_attrs - - softmax_attrs = self.attribute_map.get(Softmax, []) - softmax_attrs.append(ChoiceAttribute('implementation', ['latency', 'stable', 'argmax', 'legacy'], default='stable')) - softmax_attrs.append(ConfigurableAttribute('skip', value_type=bool, default=False)) - softmax_attrs.append( - TypeAttribute( - 'exp_table', - default=FixedPrecisionType(18, 8, rounding_mode=RoundingMode.RND, saturation_mode=SaturationMode.SAT), - ) - ) - softmax_attrs.append( - TypeAttribute( - 'inv_table', - default=FixedPrecisionType(18, 8, rounding_mode=RoundingMode.RND, saturation_mode=SaturationMode.SAT), - ) - ) - self.attribute_map[Softmax] = softmax_attrs - - def create_layer_class(self, layer_class): - new_attrubutes = [] - for cls, attributes in self.attribute_map.items(): - if issubclass(layer_class, cls): - new_attrubutes.extend(attributes) - - return type( - self.name + layer_class.__name__, (layer_class,), {'_expected_attributes': new_attrubutes, '_wrapped': True} - ) - - def compile(self, model): - """Compile the generated project that can be linked into Python runtime. - - Args: - model (ModelGraph): Model to compile. - - Raises: - Exception: If the project failed to compile - - Returns: - string: Returns the name of the compiled library. - """ - curr_dir = os.getcwd() - os.chdir(model.config.get_output_dir()) - - lib_name = None - try: - ret_val = os.system('bash build_lib.sh') - if ret_val != 0: - raise Exception(f'Failed to compile project "{model.config.get_project_name()}"') - lib_name = '{}/firmware/{}-{}.so'.format( - model.config.get_output_dir(), model.config.get_project_name(), model.config.get_config_value('Stamp') - ) - finally: - os.chdir(curr_dir) - - return lib_name - - def write(self, model): - """Write the generated project to disk. - - This function converts the model to C++ and writes the generated files in the output - directory specified in the `config`. - - Args: - model (ModelGraph): Model to write. - """ - - model.apply_flow(self.get_writer_flow()) - - def get_writer_flow(self): - raise NotImplementedError - - def get_layer_mult_size(self, layer): - if 'Dense' in layer.class_name: - n_in = layer.get_attr('n_in') - n_out = layer.get_attr('n_out') - return n_in, n_out - - if 'Conv1D' in layer.class_name: - n_in = layer.get_attr('n_chan') * layer.get_attr('filt_width') - n_out = layer.get_attr('n_filt') - return n_in, n_out - - if 'Conv2D' in layer.class_name: - n_in = layer.get_attr('n_chan') * layer.get_attr('filt_height') * layer.get_attr('filt_width') - n_out = layer.get_attr('n_filt') - return n_in, n_out - - if 'LSTM' in layer.class_name: - n_in = layer.get_attr('n_in') - n_out = layer.get_attr('n_out') * 4 - n_in_recr = layer.get_attr('n_out') - n_out_recr = n_out - return n_in, n_out, n_in_recr, n_out_recr - - if 'GRU' in layer.class_name: - n_in = layer.get_attr('n_in') - n_out = layer.get_attr('n_out') * 3 - n_in_recr = layer.get_attr('n_out') - n_out_recr = n_out - return n_in, n_out, n_in_recr, n_out_recr - - raise Exception(f'Cannot get mult size for layer {layer.name} ({layer.class_name})') - - def get_valid_reuse_factors(self, n_in, n_out): - max_rf = n_in * n_out - valid_reuse_factors = [] - for rf in range(1, max_rf + 1): - _assert = self._validate_reuse_factor(n_in, n_out, rf) - if _assert: - valid_reuse_factors.append(rf) - return valid_reuse_factors - - def _validate_reuse_factor(self, n_in, n_out, rf): - multfactor = min(n_in, rf) - multiplier_limit = int(math.ceil((n_in * n_out) / float(multfactor))) - # - # THIS ASSERTION IS FOR THE FUNCTIONAL CORRECTNESS OF THE DENSE LAYER - # - _assert = ((multiplier_limit % n_out) == 0) or (rf >= n_in) - _assert = _assert and (((rf % n_in) == 0) or (rf < n_in)) - # - # THIS ASSERTION IS FOR QoR AND EXECUTION TIME - # - _assert = _assert and (((n_in * n_out) % rf) == 0) - - return _assert - - def get_closest_reuse_factor(self, valid_rf, chosen_rf): - """ - Returns closest value to chosen_rf. valid_rf is sorted (obtained from get_valid_reuse_factors()) - If two numbers are equally close, return the smallest number. - """ - pos = bisect_left(valid_rf, chosen_rf) - if pos == 0: - return valid_rf[0] - if pos == len(valid_rf): - return valid_rf[-1] - before = valid_rf[pos - 1] - after = valid_rf[pos] - if after - chosen_rf < chosen_rf - before: - return after - else: - return before - - def set_closest_reuse_factor(self, layer, n_in, n_out, attribute='reuse_factor'): - assert attribute is not None, 'Reuse factor attribute cannot be None' - - valid_rf = self.get_valid_reuse_factors(n_in, n_out) - chosen_rf = layer.get_attr(attribute) - if chosen_rf not in valid_rf: - closest_rf = self.get_closest_reuse_factor(valid_rf, chosen_rf) - valid_rf_str = ','.join(map(str, valid_rf)) - print( - f'WARNING: Invalid ReuseFactor={chosen_rf} in layer "{layer.name}".' - f'Using ReuseFactor={closest_rf} instead. Valid ReuseFactor(s): {valid_rf_str}.' - ) - layer.set_attr(attribute, closest_rf) - - def set_target_reuse_factor(self, layer): - # TODO update target reuse factor for the RNN layers - targ_cycles = layer.get_attr('target_cycles') - - shuffle_cycles = 6 # Number of clock cycles to move data around - if targ_cycles is not None: - if 'Dense' in layer.class_name: - kernel_multiplies = layer.get_attr('n_out') - elif 'Conv1D' in layer.class_name: - kernel_multiplies = layer.get_attr('out_width') - elif 'Conv2D' in layer.class_name: - kernel_multiplies = layer.get_attr('out_height') * layer.get_attr('out_width') - else: - print(f'Unable to set target reuse factor for layer: {layer.name} ({layer.class_name})') - return - - if targ_cycles < shuffle_cycles * kernel_multiplies: # 6 clock min (6 * out_height * out_width) - print( - 'Latency can not be achieved with current target {}. Mininum {}.'.format( - targ_cycles, shuffle_cycles * kernel_multiplies + 1 - ) - ) - return - else: - rf = targ_cycles - shuffle_cycles * kernel_multiplies # subtract data shuffling overhead - - layer.set_attr('reuse_factor', float(rf) / kernel_multiplies) - - def get_valid_conv_partition_splits(self, out_height, out_width): - """Generate valid partition splits of a Conv1D/2D layer. - - Essentially a list of divisors of the number of pixels of the output image. - - Args: - out_height (int): The height of the output image - out_width (int): The width of the output image - - Returns: - list: List of valid partition splits - """ - n_pixels = out_height * out_width - valid_n_partitions = [] - for i in range(1, int(n_pixels / 2) + 1): - if n_pixels % i == 0: - valid_n_partitions.append(i) - valid_n_partitions.append(n_pixels) - - return valid_n_partitions - - @classmethod - def convert_precision_string(cls, precision): - if isinstance(precision, IntegerPrecisionType) or isinstance(precision, FixedPrecisionType): - return precision - - if precision.startswith('ac_'): - return cls._convert_ac_type(precision) - else: - return cls._convert_ap_type(precision) - - @classmethod - def _convert_ap_type(cls, precision): - ''' - Convert a precision string (e.g. "ap_fixed<16,6>" to the internal FixedPrecisionTypes etc) - ''' - bits = re.search('.+<(.+?)>', precision).group(1).split(',') - sat_mode = None - round_mode = None - sat_bits = None - if 'fixed' in precision: - width = int(bits[0]) - integer = int(bits[1]) - fields = 2 - signed = not ('u' in precision) - elif 'int' in precision: - width = int(bits[0]) - integer = width - fields = 1 - signed = not ('u' in precision) - if len(bits) > fields: - round_mode = bits[fields] - if len(bits) > fields + 1: - sat_mode = bits[fields + 1] - if len(bits) > fields + 2: - sat_bits = int(bits[fields + 2]) - if 'fixed' in precision: - return FixedPrecisionType(width, integer, signed, round_mode, sat_mode, sat_bits) - elif 'int' in precision: - return IntegerPrecisionType(width, signed) - - @classmethod - def _convert_ac_type(cls, precision): - ''' - Convert a precision string (e.g. "ac_fixed<16,6>" to the internal FixedPrecisionTypes etc) - ''' - bits = re.search('.+<(.+?)>', precision).group(1).split(',') - signed = True # default is signed - sat_mode = None - round_mode = None - if 'fixed' in precision: - width = int(bits[0]) - integer = int(bits[1]) - fields = 2 - if len(bits) > 2: - # only if the third argument is false or 0, set signed to False - # (default is True) - if bits[2].strip().lower() in ['false', '0']: - signed = False - fields = 3 - elif 'int' in precision: - width = int(bits[0]) - integer = width - fields = 1 - if len(bits) > 1: - # only if the second argument is false or 0, set signed to False - # (default is True) - if bits[1].strip().lower() in ['false', '0']: - signed = False - fields = 2 - if len(bits) > fields: - round_mode = bits[fields] - if len(bits) > fields + 1: - sat_mode = bits[fields + 1] - if 'fixed' in precision: - return FixedPrecisionType(width, integer, signed, round_mode, sat_mode) - elif 'int' in precision: - return IntegerPrecisionType(width, signed) - - def product_type(self, data_T, weight_T): - ''' - Helper function to determine which product implementation to use during inference - ''' - assert not isinstance( - data_T, ExponentPrecisionType - ), "Only ExponentPrecisionType (aka 'power of 2') weights are currently supported, not data." - product = 'mult' - if isinstance(weight_T, ExponentPrecisionType): - product = 'weight_exponential' - else: - # if binary - if isinstance(weight_T, XnorPrecisionType) and isinstance(data_T, XnorPrecisionType): - product = 'both_binary' - elif isinstance(weight_T, XnorPrecisionType): # data is not xnor-binary - product = 'weight_binary' - elif isinstance(data_T, XnorPrecisionType): # data is xnor, weight is not - product = 'data_binary' - elif isinstance(weight_T, IntegerPrecisionType) and weight_T.width == 2 and weight_T.signed: - product = 'weight_ternary' - else: - product = 'mult' - return product - - def compute_conv1d_instructions(self, in_W, in_C, kernel_size=3, stride=1, pad=0): - # Current limitations - assert pad == 0 - - if kernel_size >= stride: - min_W = (math.ceil(kernel_size / stride) - 1) * stride + kernel_size - else: - min_W = (math.ceil(stride / kernel_size) - 1) * stride + kernel_size - - # if the standard min_W is smaller than the in_W, then use unscaled - if min_W > in_W: - min_W = in_W - - min_oW = int((min_W - kernel_size) // stride + 1) - - out_W = int((in_W - kernel_size) // stride + 1) - scaled_W = (out_W - 1) * stride + kernel_size - - if scaled_W < in_W: - min_W += 1 - - windows_bin = [[0 for _ in range(kernel_size)] for _ in range(min_W)] - - for i_ow in range(min_oW): - for i_fw in range(kernel_size): - index_data = i_ow * stride + i_fw - pad - windows_bin[index_data][i_fw] = 1 - - windows_int = [] - - for i in range(min_W): - windows_int.append(int(''.join(str(p) for p in reversed(windows_bin[i])), 2)) - - return (min_W, windows_int) - - def compute_conv2d_instructions(self, in_H, in_W, in_C, kernel_size=3, stride=1, pad=0): - if isinstance(kernel_size, Iterable): - kernel_height = kernel_size[0] - kernel_width = kernel_size[1] - else: - kernel_height = kernel_size - kernel_width = kernel_size - - if isinstance(stride, Iterable): - stride_height = stride[0] - stride_width = stride[1] - else: - stride_height = stride - stride_width = stride - - # Current limitations - assert kernel_height == kernel_width - assert stride_height == stride_width - assert pad == 0 - - if kernel_height >= stride_height: - min_H = (math.ceil(kernel_height / stride_height) - 1) * stride_height + kernel_height - else: - min_H = (math.ceil(stride_height / kernel_height) - 1) * stride_height + kernel_height - - if min_H > in_H: - min_H = in_H - - if kernel_width >= stride_width: - min_W = (math.ceil(kernel_width / stride_width) - 1) * stride_width + kernel_width - else: - min_W = (math.ceil(stride_width / kernel_width) - 1) * stride_width + kernel_width - - if min_W > in_W: - min_W = in_W - - min_oH = int((min_H - kernel_height) // stride_height + 1) - min_oW = int((min_W - kernel_width) // stride_width + 1) - - out_H = int((in_H - kernel_height) // stride_height + 1) - out_W = int((in_W - kernel_width) // stride_width + 1) - scaled_H = (out_H - 1) * stride_height + kernel_height - scaled_W = (out_W - 1) * stride_width + kernel_width - - if scaled_H < in_H: - min_H += 1 - if scaled_W < in_W: - min_W += 1 - - # Let's hardcode a few common cases: - if ( - min_H == 1 - and min_W == 1 - and kernel_height == 1 - and kernel_width == 1 - and stride == 1 - and scaled_H == in_H - and scaled_W == in_W - ): - return (1, 1, map(str, [1])) - if ( - min_H == 5 - and min_W == 5 - and kernel_height == 3 - and kernel_width == 3 - and stride == 1 - and scaled_H == in_H - and scaled_W == in_W - ): - return ( - 5, - 5, - map( - str, - [ - 1, - 3, - 7, - 6, - 4, - 9, - 27, - 63, - 54, - 36, - 73, - 219, - 511, - 438, - 292, - 72, - 216, - 504, - 432, - 288, - 64, - 192, - 448, - 384, - 256, - ], - ), - ) - if ( - min_H == 9 - and min_W == 9 - and kernel_height == 5 - and kernel_width == 5 - and stride == 1 - and scaled_H == in_H - and scaled_W == in_W - ): - return ( - 9, - 9, - map( - str, - [ - 1, - 3, - 7, - 15, - 31, - 30, - 28, - 24, - 16, - 33, - 99, - 231, - 495, - 1023, - 990, - 924, - 792, - 528, - 1057, - 3171, - 7399, - 15855, - 32767, - 31710, - 29596, - 25368, - 16912, - 33825, - 101475, - 236775, - 507375, - 1048575, - 1014750, - 947100, - 811800, - 541200, - 1082401, - 3247203, - 7576807, - 16236015, - 33554431, - 32472030, - 30307228, - 25977624, - 17318416, - 1082400, - 3247200, - 7576800, - 16236000, - 33554400, - 32472000, - 30307200, - 25977600, - 17318400, - 1082368, - 3247104, - 7576576, - 16235520, - 33553408, - 32471040, - 30306304, - 25976832, - 17317888, - 1081344, - 3244032, - 7569408, - 16220160, - 33521664, - 32440320, - 30277632, - 25952256, - 17301504, - 1048576, - 3145728, - 7340032, - 15728640, - 32505856, - 31457280, - 29360128, - 25165824, - 16777216, - ], - ), - ) - - windows_bin = [[0 for _ in range(kernel_height * kernel_width)] for _ in range(min_H * min_W)] - - for i_oh in range(min_oH): - for i_ow in range(min_oW): - for i_fh in range(kernel_height): - for i_fw in range(kernel_width): - index_data = (i_oh * stride_height + i_fh - pad) * min_W + (i_ow * stride_width + i_fw - pad) - windows_bin[index_data][i_fh * kernel_width + i_fw] = 1 - - windows_int = [] - - for i in range(min_H): - for j in range(min_W): - windows_int.append(int(''.join(str(p) for p in reversed(windows_bin[i * min_W + j])), 2)) - - return (min_H, min_W, windows_int) - - def _compute_conv1d_im2col(self, input_shape, kernel=3, stride=1, pad=(0, 0), dilation=1): - W, C = input_shape - pad_l, pad_r = pad - - out_w = (W + pad_l + pad_r - (dilation * (kernel - 1) + 1)) // stride + 1 - - input_img = np.arange(1, W * C + 1) - im_matrix = np.zeros((kernel * C * out_w,)) - - index = 0 - for i_ow in range(out_w): - for i_kw in range(kernel): - for i_c in range(C): - input_col = -pad_l + i_kw * dilation + i_ow * stride - if input_col >= 0 and input_col < W: - im_matrix[index] = input_img[input_col * C + i_c] - else: - im_matrix[index] = 0 - index += 1 - - im_matrix = im_matrix.reshape(out_w, -1) - return im_matrix - - def generate_conv1d_line_buffer_fn(self, layer_idx, n_partitions, in_W, in_C, kernel=3, stride=1, pad=0, dilation=1): - """Generate a C++ function that mimics the im2col algorithm. This function works for 1D convolution. - - The HLS compiler produces suboptimal designs for a im2col algorithm implementation, so a trick we use is - to generate a resulting a result of im2col transformation explicitly, instead of relying on loops. Since - the result depends on the paraleters of the convolution layer (the input size, the kernel size, stride etc), - we need to do this for every convolution layer. - - Args: - layer_idx (int): Index of layer ('index' attribute). - n_partitions (int): Number of partitions to divide the input into. - The pixels in each partition will be processed in parallel. - in_W (int): Width of input. - in_C (int): Number of channels. - kernel (int, optional): Size of the kernel. Defaults to 3. - stride (int, optional): Stride length. Defaults to 1. - pad (int or Iterable, optional): Padding to apply. Defaults to 0. - Specified as either a number or a list [left_pad, right_pad]. - dilation (int, optional): Dilation rate. Defaults to 1. - - Returns: - str: Generated C++ function - """ - if isinstance(pad, Iterable): - pad_left = pad[0] - pad_right = pad[1] - else: - pad_left = pad - pad_right = pad - - im2col_matrix = self._compute_conv1d_im2col((in_W, in_C), kernel, stride, (pad_left, pad_right), dilation) - - generated_code = ( - "template\n" - "class fill_buffer_{index} : public FillConv1DBuffer {{\n" - " public:\n" - " static void fill_buffer(\n" - " data_T data[CONFIG_T::in_width * CONFIG_T::n_chan],\n" - " data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_width * CONFIG_T::n_chan],\n" - " const unsigned partition\n" - " ) {{\n" - ).format(index=layer_idx) - indent = ' ' - - for partition_idx, partition in enumerate(np.split(im2col_matrix, n_partitions)): - generated_code += indent * 2 + f'if (partition == {partition_idx:>3}) {{\n' - for pixel_idx, arr in enumerate(partition): - buffer_stmts = [] - for j, v in enumerate(arr): - if v == 0: - val = '0' - else: - val = f'data[{int(v - 1)}]' - buffer_stmts.append(f'buffer[{pixel_idx}][{j}] = {val:>10};') - generated_code += indent * 3 + ' '.join(buffer_stmts) + '\n' - generated_code += '\n' + indent * 2 + '}\n' - - generated_code += indent + '}\n' - generated_code += '};\n' - - return generated_code - - def _compute_conv2d_im2col(self, input_shape, kernel=(3, 3), stride=(1, 1), pad=(0, 0, 0, 0), dilation=(1, 1)): - H, W, C = input_shape - kernel_h, kernel_w = kernel - stride_h, stride_w = stride - pad_t, pad_b, pad_l, pad_r = pad - dilation_h, dilation_w = dilation - - out_h = (H + pad_t + pad_b - (dilation_h * (kernel_h - 1) + 1)) // stride_h + 1 - out_w = (W + pad_l + pad_r - (dilation_w * (kernel_w - 1) + 1)) // stride_w + 1 - - input_img = np.arange(1, H * W * C + 1) - im_matrix = np.zeros((kernel_h * kernel_w * C * out_h * out_w,)) - - index = 0 - for i_oh in range(out_h): - for i_ow in range(out_w): - for i_kh in range(kernel_h): - input_row = -pad_t + i_kh * dilation_h + i_oh * stride_h - for i_kw in range(kernel_w): - for i_c in range(C): - if input_row < 0 or input_row >= H: - im_matrix[index] = 0 - else: - input_col = -pad_l + i_kw * dilation_w + i_ow * stride_w - if input_col >= 0 and input_col < W: - im_matrix[index] = input_img[input_row * W * C + input_col * C + i_c] - else: - im_matrix[index] = 0 - index += 1 - - im_matrix = im_matrix.reshape(out_h * out_w, -1) - return im_matrix - - def generate_conv2d_line_buffer_fn( - self, layer_idx, n_partitions, in_H, in_W, in_C, kernel=(3, 3), stride=(1, 1), pad=(0, 0, 0, 0), dilation=(1, 1) - ): - """Generate a C++ function that mimics the im2col algorithm. This function works for 2D convolution. - - The HLS compiler produces suboptimal designs for a im2col algorithm implementation, so a trick we use is - to generate a resulting a result of im2col transformation explicitly, instead of relying on loops. Since - the result depends on the paraleters of the convolution layer (the input size, the kernel size, stride etc), - we need to do this for every convolution layer. - - Args: - layer_idx (int): Index of layer ('index' attribute). - n_partitions (int): Number of partitions to divide the input into. - The pixels in each partition will be processed in parallel. - in_H (int): Height of input. - in_W (int): Width of input. - in_C (int): Number of channels. - kernel (int or Iterable, optional): Size of the kernel. Defaults to (3,3). - stride (int or Iterable, optional): Stride length. Defaults to (1,1). - pad (int or Iterable, optional): Padding to apply. Defaults to 0. - Specified as either a number or a list [top_pad, bottom_pad, left_pad, right_pad]. - dilation (int or Iterable, optional): Dilation rate. Defaults to (1,1). - - Returns: - str: Generated C++ function - """ - - if isinstance(kernel, Iterable): - kernel_height = kernel[0] - kernel_width = kernel[1] - else: - kernel_height = kernel - kernel_width = kernel - - if isinstance(stride, Iterable): - stride_height = stride[0] - stride_width = stride[1] - else: - stride_height = stride - stride_width = stride - - if isinstance(pad, Iterable): - pad_top = pad[0] - pad_bottom = pad[1] - pad_left = pad[2] - pad_right = pad[3] - else: - pad_top = pad - pad_bottom = pad - pad_left = pad - pad_right = pad - - if isinstance(dilation, Iterable): - dilation_height = dilation[0] - dilation_width = dilation[1] - else: - dilation_height = dilation - dilation_width = dilation - - im2col_matrix = self._compute_conv2d_im2col( - (in_H, in_W, in_C), - (kernel_height, kernel_width), - (stride_height, stride_width), - (pad_top, pad_bottom, pad_left, pad_right), - (dilation_height, dilation_width), - ) - - generated_code = ( - "template\n" - "class fill_buffer_{index} : public FillConv2DBuffer {{\n" - " public:\n" - " static void fill_buffer(\n" - " data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan],\n" - " data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan],\n" - " const unsigned partition\n" - " ) {{\n" - ).format(index=layer_idx) - indent = ' ' - - for partition_idx, partition in enumerate(np.split(im2col_matrix, n_partitions)): - generated_code += indent * 2 + f'if (partition == {partition_idx:>3}) {{\n' - for pixel_idx, arr in enumerate(partition): - buffer_stmts = [] - for j, v in enumerate(arr): - if v == 0: - val = '0' - else: - val = f'data[{int(v - 1)}]' - buffer_stmts.append(f'buffer[{pixel_idx}][{j}] = {val:>10};') - generated_code += indent * 3 + ' '.join(buffer_stmts) + '\n' - generated_code += '\n' + indent * 2 + '}\n' - - generated_code += indent + '}\n' - generated_code += '};\n' - - return generated_code - - @model_optimizer() - def write_hls(self, model): - self.writer.write_hls(model) - return True diff --git a/hls4ml/hls4ml/backends/fpga/fpga_layers.py b/hls4ml/hls4ml/backends/fpga/fpga_layers.py deleted file mode 100644 index 3569735..0000000 --- a/hls4ml/hls4ml/backends/fpga/fpga_layers.py +++ /dev/null @@ -1,84 +0,0 @@ -import numpy as np - -from hls4ml.model.attributes import Attribute, ConfigurableAttribute, TypeAttribute -from hls4ml.model.layers import Conv1D, Conv2D, Layer -from hls4ml.model.types import IntegerPrecisionType, XnorPrecisionType - - -class BatchNormalizationQuantizedTanh(Layer): - '''Merged Batch Normalization and quantized (binary or ternary) Tanh layer. - The mean, variance, beta, gamma parameters are folded into the threshold(s) at which the - sign of the input flips after the quantized (binary or ternary) Tanh activation. - ''' - - _expected_attributes = [ - Attribute('n_in'), - Attribute('n_filt', default=0), - TypeAttribute('accum'), - ConfigurableAttribute('reuse_factor', default=1), - ] - - def initialize(self): - inp = self.get_input_variable() - shape = inp.shape - dims = inp.dim_names - if self.get_attr('quantize') == 2: - self.add_output_variable(shape, dims, precision=XnorPrecisionType()) - elif self.get_attr('quantize') == 3: - self.add_output_variable(shape, dims, precision=IntegerPrecisionType(width=2)) - else: - raise Exception( - 'Unsupported quantize attribute for BatchNormalizationQuantizedTanh: {}'.format(self.get_attr('quantize')) - ) - - def set_thresholds(self, scale, bias, ternary_threshold=0.5): - inp = self.get_input_variable() - shape = inp.shape - dims = inp.dim_names - precision = self.model.config.backend.convert_precision_string(inp.type.precision) - F = precision.fractional - threshold = -bias / scale - if self.get_attr('quantize') == 2: - self.add_output_variable(shape, dims, precision=XnorPrecisionType()) - threshold = np.floor(threshold * 2**F) / 2**F - self.add_weights_variable( - name='threshold', - var_name='t{index}', - data=threshold, - type_name='threshold{index}_t', - precision=inp.type.precision, - ) - elif self.get_attr('quantize') == 3: - self.add_output_variable(shape, dims, precision=IntegerPrecisionType(width=2)) - threshold_hi = ternary_threshold / scale + threshold - threshold_lo = -ternary_threshold / scale + threshold - threshold_hi = np.floor(threshold_hi * 2**F) / 2**F - threshold_lo = np.floor(threshold_lo * 2**F) / 2**F - self.add_weights_variable( - name='threshold_hi', - var_name='th{index}', - data=threshold_hi, - type_name='threshold_hi_{index}_t', - precision=inp.type.precision, - ) - self.add_weights_variable( - name='threshold_lo', - var_name='tl{index}', - data=threshold_lo, - type_name='threshold_lo_{index}_t', - precision=inp.type.precision, - ) - - -class PointwiseConv1D(Conv1D): - '''Optimized Conv1D implementation for 1x1 kernels.''' - - # Nothing to do, will pick up function and config from class name - pass - - -class PointwiseConv2D(Conv2D): - '''Optimized Conv2D implementation for 1x1 kernels.''' - - # Nothing to do, will pick up function and config from class name - pass diff --git a/hls4ml/hls4ml/backends/fpga/fpga_types.py b/hls4ml/hls4ml/backends/fpga/fpga_types.py deleted file mode 100644 index ceac0b5..0000000 --- a/hls4ml/hls4ml/backends/fpga/fpga_types.py +++ /dev/null @@ -1,459 +0,0 @@ -import numpy as np - -from hls4ml.model.types import ( - CompressedType, - ExponentPrecisionType, - ExponentType, - FixedPrecisionType, - IntegerPrecisionType, - NamedType, - PackedType, - XnorPrecisionType, -) - -# region Precision types - - -class PrecisionDefinition: - def definition_cpp(self): - raise NotImplementedError - - -class APIntegerPrecisionDefinition(PrecisionDefinition): - def definition_cpp(self): - typestring = 'ap_{signed}int<{width}>'.format(signed='u' if not self.signed else '', width=self.width) - return typestring - - -class APFixedPrecisionDefinition(PrecisionDefinition): - def _rounding_mode_cpp(self, mode): - if mode is not None: - return 'AP_' + str(mode) - - def _saturation_mode_cpp(self, mode): - if mode is not None: - return 'AP_' + str(mode) - - def definition_cpp(self): - args = [ - self.width, - self.integer, - self._rounding_mode_cpp(self.rounding_mode), - self._saturation_mode_cpp(self.saturation_mode), - self.saturation_bits, - ] - args = ','.join([str(arg) for arg in args if arg is not None]) - typestring = 'ap_{signed}fixed<{args}>'.format(signed='u' if not self.signed else '', args=args) - return typestring - - -class ACIntegerPrecisionDefinition(PrecisionDefinition): - def definition_cpp(self): - typestring = f'ac_int<{self.width}, {str(self.signed).lower()}>' - return typestring - - -class ACFixedPrecisionDefinition(PrecisionDefinition): - def _rounding_mode_cpp(self, mode): - if mode is not None: - return 'AC_' + str(mode) - - def _saturation_mode_cpp(self, mode): - if mode is not None: - return 'AC_' + str(mode) - - def definition_cpp(self): - args = [ - self.width, - self.integer, - str(self.signed).lower(), - self._rounding_mode_cpp(self.rounding_mode), - self._saturation_mode_cpp(self.saturation_mode), - self.saturation_bits, - ] - args = ','.join([str(arg) for arg in args if arg is not None]) - typestring = f'ac_fixed<{args}>' - return typestring - - -class PrecisionConverter: - def convert(self, precision_type): - raise NotImplementedError - - -class FixedPrecisionConverter(PrecisionConverter): - def __init__(self, type_map, prefix): - self.type_map = type_map - self.prefix = prefix - - def convert(self, precision_type): - type_cls = type(precision_type) - type_cls_name = type_cls.__name__ - - # If the type is already converted, do nothing - if type_cls_name.startswith(self.prefix): - return precision_type - - definition_cls = self.type_map.get(type_cls, None) - - if definition_cls is not None: - precision_type.__class__ = type(self.prefix + type_cls_name, (type_cls, definition_cls), {}) - return precision_type - else: - raise Exception(f'Cannot convert precision type to {self.prefix}: {precision_type.__class__.__name__}') - - -class APTypeConverter(FixedPrecisionConverter): - def __init__(self): - super().__init__( - type_map={ - FixedPrecisionType: APFixedPrecisionDefinition, - IntegerPrecisionType: APIntegerPrecisionDefinition, - ExponentPrecisionType: APIntegerPrecisionDefinition, - XnorPrecisionType: APIntegerPrecisionDefinition, - }, - prefix='AP', - ) - - -class ACTypeConverter(FixedPrecisionConverter): - def __init__(self): - super().__init__( - type_map={ - FixedPrecisionType: ACFixedPrecisionDefinition, - IntegerPrecisionType: ACIntegerPrecisionDefinition, - ExponentPrecisionType: ACIntegerPrecisionDefinition, - XnorPrecisionType: ACIntegerPrecisionDefinition, - }, - prefix='AC', - ) - - -# endregion - -# region Data types - - -class TypeDefinition: - def definition_cpp(self): - raise NotImplementedError - - -class TypePrecisionConverter: - def convert_precision(self, precision_converter): - self.precision = precision_converter.convert(self.precision) - - -class NamedTypeConverter(TypeDefinition, TypePrecisionConverter): - def definition_cpp(self): - return f'typedef {self.precision.definition_cpp()} {self.name};\n' - - -class CompressedTypeConverter(TypeDefinition, TypePrecisionConverter): - def definition_cpp(self): - cpp_fmt = 'typedef struct {name} {{' '{index} row_index;' '{index} col_index;' '{precision} weight; }} {name};\n' - return cpp_fmt.format(name=self.name, index=self.index_precision, precision=self.precision.definition_cpp()) - - def convert_precision(self, precision_converter): - super().convert_precision(precision_converter) - self.index_precision = precision_converter.convert(self.index_precision) - - -class ExponentTypeConverter(TypeDefinition, TypePrecisionConverter): - def definition_cpp(self): - cpp_fmt = 'typedef struct {name} {{' '{sign} sign;' '{precision} weight; }} {name};\n' - return cpp_fmt.format(name=self.name, precision=self.precision.definition_cpp(), sign=self.sign.definition_cpp()) - - def convert_precision(self, precision_converter): - super().convert_precision(precision_converter) - self.sign = precision_converter.convert(self.sign) - - -class PackedTypeConverter(TypeDefinition, TypePrecisionConverter): - def definition_cpp(self): - n_elem_expr = '/' if self.unpack else '*' - return 'typedef nnet::array<{precision}, {n_elem}> {name};\n'.format( - name=self.name, - precision=self.precision.definition_cpp(), - n_elem=str(self.n_elem) + n_elem_expr + str(self.n_pack), - ) - - -class HLSTypeConverter: - def __init__(self, precision_converter): - self.precision_converter = precision_converter - self.type_map = { - NamedType: NamedTypeConverter, - CompressedType: CompressedTypeConverter, - ExponentType: ExponentTypeConverter, - PackedType: PackedTypeConverter, - } - - def convert(self, atype): - type_cls = type(atype) - type_cls_name = type_cls.__name__ - - # If the type is already converted, do nothing - if type_cls_name.startswith('HLS'): - return atype - - conversion_cls = self.type_map.get(type_cls, None) - - if conversion_cls is not None: - atype.__class__ = type('HLS' + type_cls_name, (type_cls, conversion_cls), {}) - atype.convert_precision(self.precision_converter) - return atype - else: - raise Exception(f'Cannot convert type: {atype.__class__.__name__}') - - -# endregion - -# region Variables - - -class VariableDefinition: - def definition_cpp(self, name_suffix='', as_reference=False): - raise NotImplementedError - - -# region ArrayVariable - - -class VivadoArrayVariableDefinition(VariableDefinition): - def definition_cpp(self, name_suffix='', as_reference=False): - return '{type} {name}{suffix}[{shape}]'.format( - type=self.type.name, name=self.name, suffix=name_suffix, shape=self.size_cpp() - ) - - -class QuartusArrayVariableDefinition(VariableDefinition): - def definition_cpp(self, name_suffix='', as_reference=False): - return '{type} {name}{suffix}[{shape}] {pragma}'.format( - type=self.type.name, name=self.name, suffix=name_suffix, shape=self.size_cpp(), pragma=self.pragma - ) - - -class VivadoInplaceArrayVariableDefinition(VariableDefinition): - def definition_cpp(self): - return f'auto& {self.name} = {self.input_var.name}' - - -class QuartusInplaceArrayVariableDefinition(VariableDefinition): - def definition_cpp(self): - return f'auto& {self.name} = {self.input_var.name}' - - -class ArrayVariableConverter: - def __init__(self, type_converter, prefix, definition_cls): - self.type_converter = type_converter - self.prefix = prefix - self.definition_cls = definition_cls - - def convert(self, tensor_var, pragma='partition'): - if isinstance(tensor_var, self.definition_cls): # Already converted - return tensor_var - - tensor_var.pragma = pragma - tensor_var.type = self.type_converter.convert(tensor_var.type) - - tensor_var.__class__ = type(self.prefix + 'ArrayVariable', (type(tensor_var), self.definition_cls), {}) - return tensor_var - - -class VivadoArrayVariableConverter(ArrayVariableConverter): - def __init__(self, type_converter): - super().__init__(type_converter=type_converter, prefix='Vivado', definition_cls=VivadoArrayVariableDefinition) - - -class QuartusArrayVariableConverter(ArrayVariableConverter): - def __init__(self, type_converter): - super().__init__(type_converter=type_converter, prefix='Quartus', definition_cls=QuartusArrayVariableDefinition) - - -class VivadoInplaceArrayVariableConverter(ArrayVariableConverter): - def __init__(self, type_converter): - super().__init__(type_converter=type_converter, prefix='Vivado', definition_cls=VivadoInplaceArrayVariableDefinition) - - -class QuartusInplaceArrayVariableConverter(ArrayVariableConverter): - def __init__(self, type_converter): - super().__init__( - type_converter=type_converter, prefix='Quartus', definition_cls=QuartusInplaceArrayVariableDefinition - ) - - -# endregion - -# region StructMemberVariable - - -class QuartusStructMemberVariableDefinition(VariableDefinition): - def definition_cpp(self, name_suffix='', as_reference=False): - return '{type} {name}{suffix}[{shape}]'.format( - type=self.type.name, name=self.member_name, suffix=name_suffix, shape=self.size_cpp() - ) - - -class StructMemberVariableConverter: - def __init__(self, type_converter, prefix, definition_cls): - self.type_converter = type_converter - self.prefix = prefix - self.definition_cls = definition_cls - - def convert(self, tensor_var, pragma='partition', struct_name=None): - if isinstance(tensor_var, self.definition_cls): # Already converted - return tensor_var - - tensor_var.pragma = pragma - tensor_var.type = self.type_converter.convert(tensor_var.type) - - assert struct_name is not None, 'struct_name must be provided when creating a StructMemberVariable' - tensor_var.struct_name = str(struct_name) - tensor_var.member_name = tensor_var.name - tensor_var.name = tensor_var.struct_name + '.' + tensor_var.member_name - - tensor_var.__class__ = type(self.prefix + 'StructMemberVariable', (type(tensor_var), self.definition_cls), {}) - return tensor_var - - -class QuartusStructMemberVariableConverter(StructMemberVariableConverter): - def __init__(self, type_converter): - super().__init__( - type_converter=type_converter, prefix='Quartus', definition_cls=QuartusStructMemberVariableDefinition - ) - - -# endregion - -# region StreamVariable - - -class VivadoStreamVariableDefinition(VariableDefinition): - def definition_cpp(self, name_suffix='', as_reference=False): - if as_reference: # Function parameter - return f'hls::stream<{self.type.name}> &{self.name}{name_suffix}' - else: # Declaration - return 'hls::stream<{type}> {name}{suffix}("{name}")'.format( - type=self.type.name, name=self.name, suffix=name_suffix - ) - - -class VivadoInplaceStreamVariableDefinition(VariableDefinition): - def definition_cpp(self): - return f'auto& {self.name} = {self.input_var.name}' - - -class QuartusStreamVariableDefinition(VariableDefinition): - def definition_cpp(self, name_suffix='', as_reference=False): - if as_reference: # Function parameter - return f'stream<{self.type.name}> &{self.name}{name_suffix}' - else: # Declaration - return f'stream<{self.type.name}> {self.name}{name_suffix}' - - -class QuartusInplaceStreamVariableDefinition(VariableDefinition): - def definition_cpp(self): - return f'auto& {self.name} = {self.input_var.name}' - - -class StreamVariableConverter: - def __init__(self, type_converter, prefix, definition_cls): - self.type_converter = type_converter - self.prefix = prefix - self.definition_cls = definition_cls - - def convert(self, tensor_var, n_pack=1, depth=0): - if isinstance(tensor_var, self.definition_cls): # Already converted - return tensor_var - - if depth == 0: - depth = np.prod(tensor_var.shape) // tensor_var.shape[-1] - tensor_var.pragma = ('stream', depth) - tensor_var.type = self.type_converter.convert( - PackedType(tensor_var.type.name, tensor_var.type.precision, tensor_var.shape[-1], n_pack) - ) - - tensor_var.__class__ = type(self.prefix + 'StreamVariable', (type(tensor_var), self.definition_cls), {}) - return tensor_var - - -class VivadoStreamVariableConverter(StreamVariableConverter): - def __init__(self, type_converter): - super().__init__(type_converter=type_converter, prefix='Vivado', definition_cls=VivadoStreamVariableDefinition) - - -class QuartusStreamVariableConverter(StreamVariableConverter): - def __init__(self, type_converter): - super().__init__(type_converter=type_converter, prefix='Quartus', definition_cls=QuartusStreamVariableDefinition) - - -# endregion - -# region InplaceStreamVariable - - -class InplaceStreamVariableConverter(StreamVariableConverter): - def convert(self, tensor_var, n_pack=1, depth=0): - if isinstance(tensor_var, self.definition_cls): # Already converted - return tensor_var - - tensor_var.pragma = None - tensor_var.type = self.type_converter.convert( - PackedType(tensor_var.type.name, tensor_var.type.precision, tensor_var.input_var.shape[-1], n_pack) - ) - - tensor_var.__class__ = type(self.prefix + 'StreamVariable', (type(tensor_var), self.definition_cls), {}) - return tensor_var - - -class VivadoInplaceStreamVariableConverter(InplaceStreamVariableConverter): - def __init__(self, type_converter): - super().__init__( - type_converter=type_converter, prefix='Vivado', definition_cls=VivadoInplaceStreamVariableDefinition - ) - - -class QuartusInplaceStreamVariableConverter(InplaceStreamVariableConverter): - def __init__(self, type_converter): - super().__init__( - type_converter=type_converter, prefix='Quartus', definition_cls=QuartusInplaceStreamVariableDefinition - ) - - -# endregion - -# region WeightsVariable - - -class StaticWeightVariableDefinition(VariableDefinition): - def definition_cpp(self, name_suffix='', as_reference=False): - return f'{self.type.name} {self.name}[{self.data_length}]' - - -class StaticWeightVariableConverter: - def __init__(self, type_converter): - self.type_converter = type_converter - - def convert(self, weight_var): - if isinstance(weight_var, StaticWeightVariableDefinition): # Already converted - return weight_var - - weight_var.weight_class = weight_var.__class__.__name__ - weight_var.storage = 'register' - weight_var.type = self.type_converter.convert(weight_var.type) - - weight_var.__class__ = type('StaticWeightVariable', (type(weight_var), StaticWeightVariableDefinition), {}) - return weight_var - - -class BramWeightVariableConverter: - @classmethod - def convert(cls, weight_var): - weight_var.storage = 'bram' - return weight_var - - -# endregion - -# endregion diff --git a/hls4ml/hls4ml/backends/fpga/passes/__init__.py b/hls4ml/hls4ml/backends/fpga/passes/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/backends/fpga/passes/bn_quant.py b/hls4ml/hls4ml/backends/fpga/passes/bn_quant.py deleted file mode 100644 index 3224b00..0000000 --- a/hls4ml/hls4ml/backends/fpga/passes/bn_quant.py +++ /dev/null @@ -1,169 +0,0 @@ -import numpy as np - -from hls4ml.backends.fpga.fpga_layers import BatchNormalizationQuantizedTanh -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import BatchNormalization, register_layer -from hls4ml.model.optimizer import OptimizerPass -from hls4ml.model.types import IntegerPrecisionType, NamedType, XnorPrecisionType - -batchnorm_quantized_tanh_config_template = """struct config{index} : nnet::batchnorm_quantized_tanh_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_filt = {n_filt}; - static const unsigned n_scale_bias = (n_filt == -1) ? n_in : n_filt; - static const unsigned io_type = nnet::{iotype}; - static const unsigned reuse_factor = {reuse}; -}};\n""" - -batchnorm_quantized_tanh_function_template = ( - 'nnet::normalize_{quantize}_tanh<{input_t}, {config}>({input}, {output}, {threshold});' -) - -bn_include_list = ['nnet_utils/nnet_batchnorm.h', 'nnet_utils/nnet_batchnorm_stream.h'] - - -class BatchNormalizationQuantizedTanhConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(BatchNormalizationQuantizedTanh) - self.template = batchnorm_quantized_tanh_config_template - - def format(self, node): - params = self._default_config_params(node) - params['n_in'] = node.get_input_variable().size_cpp() - - return self.template.format(**params) - - -class BatchNormalizationQuantizedTanhFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(BatchNormalizationQuantizedTanh, include_header=bn_include_list) - self.template = batchnorm_quantized_tanh_function_template - - def format(self, node): - params = self._default_function_params(node) - if node.get_attr('quantize') == 2: - params['quantize'] = 'binary' - params['threshold'] = node.get_weights('threshold').name - elif node.get_attr('quantize') == 3: - params['quantize'] = 'ternary' - params['threshold'] = node.get_weights('threshold_hi').name + ', ' + node.get_weights('threshold_lo').name - - return self.template.format(**params) - - -def register_bn_quant(backend): - # Register the layer types to the layer map - register_layer('BatchNormalizationQuantizedTanh', BatchNormalizationQuantizedTanh) - - # Register the optimization passes - backend.register_pass('merge_batch_norm_quantized_tanh', MergeBatchNormAndQuantizedTanh) - backend.register_pass('quantize_dense_output', QuantizeDenseOutput) - - # Register template passes - backend.register_template(BatchNormalizationQuantizedTanhConfigTemplate) - backend.register_template(BatchNormalizationQuantizedTanhFunctionTemplate) - - -class MergeBatchNormAndQuantizedTanh(OptimizerPass): - def match(self, node): - is_match = ( - node.class_name == 'Activation' - and node.get_attr('activation') in ['binary', 'binary_tanh', 'ternary', 'ternary_tanh'] - or node.class_name == 'TernaryTanh' - ) - is_match = is_match and isinstance(node.get_input_node(), BatchNormalization) - return is_match - - def transform(self, model, node): - bn_layer = node.get_input_node() - # Make a new layer with the new attributes - quantize = 0 - if 'binary' in node.get_attr('activation'): - quantize = 2 - if 'ternary' in node.get_attr('activation'): - quantize = 3 - attrs = { - 'name': bn_layer.get_attr('name'), - 'original_name': bn_layer.get_attr('name'), - 'class_name': 'BatchNormalizationQuantizedTanh', - 'n_in': bn_layer.get_attr('n_in'), - 'n_out': bn_layer.get_attr('n_in'), - 'n_filt': bn_layer.get_attr('n_filt'), - 'quantize': quantize, - 'trace': bn_layer.get_attr('trace'), - } - bnbt_layer = model.make_node(BatchNormalizationQuantizedTanh, 'bnbt_' + bn_layer.name, attrs, bn_layer.inputs) - bnbt_layer.set_thresholds( - bn_layer.get_weights('scale').data, bn_layer.get_weights('bias').data, node.get_attr('threshold', 0.5) - ) - # Remove the BatchNormalization layer - model.remove_node(bn_layer, rewire=True) - # Replace the old Activation layer with this one - model.replace_node(node, bnbt_layer) - - return True - - -class QuantizeDenseOutput(OptimizerPass): - def match(self, node): - is_dense = node.class_name == 'Dense' - input_node = node.get_input_node() - is_input_bnqt = input_node is not None and input_node.class_name == 'BatchNormalizationQuantizedTanh' - quantizer = node.get_attr('weight_quantizer') - is_binary_ternary = quantizer is not None and ( - quantizer.__class__.__name__ == 'BinaryQuantizer' or quantizer.__class__.__name__ == 'TernaryQuantizer' - ) - return is_dense and is_input_bnqt and is_binary_ternary - - def transform(self, model, node): - # Compute the required precision and update the variables - # Number of bits for output is log2 of number of input nodes - # Since this is the number of uint<1>'s which are summed - nbits = int(np.ceil(np.log2(node.attributes['n_in'])) + 2) - out_type = IntegerPrecisionType(width=nbits) - accum_t = NamedType(f'layer{node.index}_accum_t', out_type) - node.set_attr('accum_t', accum_t) - out_var = node.get_output_variable() - out_var.type.precision = out_type - - quantized_data = None - quantized_precision = None - quantizer = node.get_attr('weight_quantizer') - if quantizer.__class__.__name__ == 'BinaryQuantizer': - quantized_precision = XnorPrecisionType() - elif quantizer.__class__.__name__ == 'TernaryQuantizer': - quantized_precision = IntegerPrecisionType(width=2) - else: - print(f'WARNING: Unknown quantizer - {quantizer.__class__.__name__}. Bailing out') - return False - quantizer.bits = quantized_precision.width - quantizer.hls_type = quantized_precision - quantized_data = quantizer(node.weights['weight'].data) - - weights = node.weights['weight'] - weights.data = quantized_data - weights.type.name = f'weight{node.index}_t' - weights.update_precision(quantized_precision) - - bias = node.weights['bias'] - bias.data = np.zeros(shape=(node.get_attr('n_out'))) - bias.type.name = f'bias{node.index}_t' - bias.nzeros = 0 - bias.update_precision(quantized_precision) - - # If followed by the BatchNormalizationBinaryTanh, update its input - # Also requantise the weights - bd_out_nodes = node.get_output_nodes() - for out_node in bd_out_nodes: - if isinstance(out_node, BatchNormalizationQuantizedTanh): - var_names = [] - if quantizer.__class__.__name__ == 'BinaryQuantizer': - var_names.append('threshold') - elif quantizer.__class__.__name__ == 'TernaryQuantizer': - var_names.append('threshold_hi') - var_names.append('threshold_lo') - for var_name in var_names: - threshold_var = out_node.weights[var_name] - threshold_var.update_precision(out_type) - threshold_var.data = np.floor(threshold_var.data) - - return False diff --git a/hls4ml/hls4ml/backends/fpga/passes/bram_weights.py b/hls4ml/hls4ml/backends/fpga/passes/bram_weights.py deleted file mode 100644 index 88aba21..0000000 --- a/hls4ml/hls4ml/backends/fpga/passes/bram_weights.py +++ /dev/null @@ -1,16 +0,0 @@ -import numpy as np - -from hls4ml.backends.fpga.fpga_types import BramWeightVariableConverter -from hls4ml.model.optimizer import OptimizerPass - - -class RegisterBramWeights(OptimizerPass): - def match(self, node): - return len(node.weights) > 0 - - def transform(self, model, node): - bramport_size = model.config.get_bram_size(node) - for w_name, w_var in node.weights.items(): - if ('storage' in w_var.__dict__ and w_var.storage != 'bram') and np.prod(w_var.shape) > bramport_size: - new_weight = BramWeightVariableConverter.convert(w_var) - node.set_attr(w_name, new_weight) diff --git a/hls4ml/hls4ml/backends/fpga/passes/clone.py b/hls4ml/hls4ml/backends/fpga/passes/clone.py deleted file mode 100644 index 40462f1..0000000 --- a/hls4ml/hls4ml/backends/fpga/passes/clone.py +++ /dev/null @@ -1,92 +0,0 @@ -import numpy as np - -from hls4ml.backends.template import FunctionCallTemplate -from hls4ml.model.layers import Layer, register_layer -from hls4ml.model.optimizer import OptimizerPass - - -class Clone(Layer): - '''Inserted after the layer whose output is used more than once.''' - - def initialize(self): - inp = self.get_input_variable() - for i, out_name in enumerate(self.outputs): - self.add_output_variable(inp.shape, inp.dim_names, out_name=out_name, var_name='layer{index}_cpy' + str(i + 1)) - - -clone_include_list = ['nnet_utils/nnet_stream.h'] - - -class CloneFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(Clone, include_header=clone_include_list) - self.template = None # to be filled once number of clones known - - def format(self, node): - params = self._default_function_params(node) - for i, _output in enumerate(node.outputs): - params['output' + str(i + 1)] = node.variables[node.outputs[i]].name - - if self.template is None: - self.template = ( - 'nnet::clone_stream<{input_t}, {output_t}, {size}>({input}, ' - + ', '.join(['{output' + str(i + 1) + '}' for i in range(len(node.outputs))]) - + ');' - ) - - return self.template.format(**params) - - -def register_clone(backend): - # Register the layer types to the layer map - register_layer('Clone', Clone) - - # Register the optimization passes - backend.register_pass('clone_output', CloneOutput) - - # Register template passes - backend.register_template(CloneFunctionTemplate) - - -class CloneOutput(OptimizerPass): - '''Clones streams that are used multiple times''' - - def match(self, node): - # We may have already inserted the Clone layer - if isinstance(node, Clone): - return False - - return True - - def transform(self, model, node): - if model.config.get_config_value('IOType') != 'io_stream': - return False - - output_map = node.get_output_use_map() - - transformed = False - for output in node.outputs: - if len(output_map[output]) > 1: - if len(output_map[output]) > 3: - print( - 'WARNING: Cloning output {} of {} ({}) more than 3 times not currently supported'.format( - output, node.__class__.__name__, node.name - ) - ) - return False - out_var = node.get_output_variable(output) - for i, layer in enumerate(output_map[output], 1): - attrs = {'size': np.prod(out_var.shape)} - idx = layer.inputs.index(output) - layer.inputs[idx] = output + '_cpy' + str(i) - clone_layer = model.make_node( - Clone, - 'clone_' + node.name, - attrs, - [output], - [output + '_cpy' + str(i + 1) for i in range(len(output_map[output]))], - ) - model.insert_node(clone_layer) - transformed = True - - return transformed diff --git a/hls4ml/hls4ml/backends/fpga/passes/codegen.py b/hls4ml/hls4ml/backends/fpga/passes/codegen.py deleted file mode 100644 index f1f1080..0000000 --- a/hls4ml/hls4ml/backends/fpga/passes/codegen.py +++ /dev/null @@ -1,51 +0,0 @@ -from hls4ml.model.layers import Conv1D, Conv2D -from hls4ml.model.optimizer import OptimizerPass -from hls4ml.model.types import Source - - -class GenerateConvIm2col(OptimizerPass): - '''Generates tcode for im2col step of 1D/2d convolution''' - - def match(self, node): - return isinstance(node, (Conv1D, Conv2D)) and node.model.config.get_config_value('IOType') == 'io_parallel' - - def transform(self, model, node): - node_class = node.__class__.__name__ - if '1D' in node_class: - self._generate_im2col_1d(node) - elif '2D' in node_class: - self._generate_im2col_2d(node) - else: - raise Exception(f'Cannot generate instructions for node {node.name} ({node_class})') - - def _generate_im2col_1d(self, node): - code_str = node.model.config.backend.generate_conv1d_line_buffer_fn( - node.get_attr('index'), - node.get_attr('n_partitions'), - node.get_input_variable().shape[0], - node.get_input_variable().shape[1], - kernel=node.get_attr('filt_width'), - stride=node.get_attr('stride_width'), - pad=(node.get_attr('pad_left'), node.get_attr('pad_right')), - ) - - node.set_attr('line_buffer_codegen', Source(code_str)) - - def _generate_im2col_2d(self, node): - code_str = node.model.config.backend.generate_conv2d_line_buffer_fn( - node.get_attr('index'), - node.get_attr('n_partitions'), - node.get_input_variable().shape[0], - node.get_input_variable().shape[1], - node.get_input_variable().shape[2], - kernel=(node.get_attr('filt_height'), node.get_attr('filt_width')), - stride=(node.get_attr('stride_height'), node.get_attr('stride_width')), - pad=( - node.get_attr('pad_top'), - node.get_attr('pad_bottom'), - node.get_attr('pad_left'), - node.get_attr('pad_right'), - ), - ) - - node.set_attr('line_buffer_codegen', Source(code_str)) diff --git a/hls4ml/hls4ml/backends/fpga/passes/embedding.py b/hls4ml/hls4ml/backends/fpga/passes/embedding.py deleted file mode 100644 index c1b0a33..0000000 --- a/hls4ml/hls4ml/backends/fpga/passes/embedding.py +++ /dev/null @@ -1,37 +0,0 @@ -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import Embedding - -embed_config_template = """struct config{index} : nnet::embed_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - static const unsigned vocab_size = {vocab_size}; - static const unsigned io_type = nnet::{iotype}; - static const unsigned reuse_factor = {reuse}; - typedef {embeddings_t.name} embeddings_t; -}};\n""" - -embed_function_template = 'nnet::embedding<{input_t}, {output_t}, {config}>({input}, {output}, {e});' - -embed_include_list = ['nnet_utils/nnet_embed.h', 'nnet_utils/nnet_embed_stream.h'] - - -class EmbeddingConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Embedding) - self.template = embed_config_template - - def format(self, node): - params = self._default_config_params(node) - return self.template.format(**params) - - -class EmbeddingFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(Embedding, include_header=embed_include_list) - self.template = embed_function_template - - def format(self, node): - params = self._default_function_params(node) - params['e'] = node.get_weights('embeddings').name - - return self.template.format(**params) diff --git a/hls4ml/hls4ml/backends/fpga/passes/final_reshape.py b/hls4ml/hls4ml/backends/fpga/passes/final_reshape.py deleted file mode 100644 index 7b60c1d..0000000 --- a/hls4ml/hls4ml/backends/fpga/passes/final_reshape.py +++ /dev/null @@ -1,23 +0,0 @@ -from hls4ml.model.layers import Reshape -from hls4ml.model.optimizer import OptimizerPass - - -class RemoveFinalReshape(OptimizerPass): - '''Remove reshape if final layer''' - - def match(self, node): - # match if reshape is final node - return isinstance(node, Reshape) and not node.get_output_nodes() - - def transform(self, model, node): - if model.config.get_config_value('IOType') == 'io_parallel': - print('WARNING: Final layer is a Reshape, which does not affect the output for io_parallel; removing it') - # remove, but don't rewire because it's the output layer - model.remove_node(node, rewire=False) - return True - elif model.config.get_config_value('IOType') == 'io_stream': - print( - 'WARNING: Final layer is a Reshape, which may incur a large resource cost for io_stream; ' - 'consider removing it' - ) - return False diff --git a/hls4ml/hls4ml/backends/fpga/passes/inplace_parallel_reshape.py b/hls4ml/hls4ml/backends/fpga/passes/inplace_parallel_reshape.py deleted file mode 100644 index 532becc..0000000 --- a/hls4ml/hls4ml/backends/fpga/passes/inplace_parallel_reshape.py +++ /dev/null @@ -1,24 +0,0 @@ -from hls4ml.model.layers import Reshape -from hls4ml.model.optimizer import OptimizerPass -from hls4ml.model.types import InplaceTensorVariable - - -class InplaceParallelReshape(OptimizerPass): - """ - Replaces the output variable of Reshape layer with an inplace variable when using io_parallel. - - This is done because in io_parallel tensors are stored as flat arrays, requiring no reshaping. - """ - - def match(self, node): - return isinstance(node, Reshape) - - def transform(self, model, node): - if model.config.get_config_value('IOType') != 'io_parallel': - return False - - outvar = node.get_output_variable() - invar = node.get_input_variable() - newoutvar = InplaceTensorVariable(outvar, invar) - node.set_attr(node.outputs[0], newoutvar) - return False diff --git a/hls4ml/hls4ml/backends/fpga/passes/inplace_stream_flatten.py b/hls4ml/hls4ml/backends/fpga/passes/inplace_stream_flatten.py deleted file mode 100644 index a16ffef..0000000 --- a/hls4ml/hls4ml/backends/fpga/passes/inplace_stream_flatten.py +++ /dev/null @@ -1,25 +0,0 @@ -from hls4ml.model.layers import Reshape -from hls4ml.model.optimizer import OptimizerPass -from hls4ml.model.types import InplaceTensorVariable - - -class InplaceStreamFlatten(OptimizerPass): - """ - Replaces the output variable of Reshape (flatten) layer with an inplace variable when using io_stream. - - This optimizer avoids the expensive repacking of the stream when Reshape layer flattens the tensor to 1d. - """ - - def match(self, node): - # Reshape acts as a Flatten layer when the result has 1 dimension - return isinstance(node, Reshape) and len(node.get_output_variable().shape) == 1 - - def transform(self, model, node): - if model.config.get_config_value('IOType') != 'io_stream': - return False - - outvar = node.get_output_variable() - invar = node.get_input_variable() - newoutvar = InplaceTensorVariable(outvar, invar) - node.set_attr(node.outputs[0], newoutvar) - return False diff --git a/hls4ml/hls4ml/backends/fpga/passes/remove_softmax.py b/hls4ml/hls4ml/backends/fpga/passes/remove_softmax.py deleted file mode 100644 index 5425c86..0000000 --- a/hls4ml/hls4ml/backends/fpga/passes/remove_softmax.py +++ /dev/null @@ -1,13 +0,0 @@ -from hls4ml.model.layers import Softmax -from hls4ml.model.optimizer.optimizer import OptimizerPass - - -class SkipSoftmax(OptimizerPass): - def match(self, node): - is_softmax = isinstance(node, Softmax) - remove_softmax = node.get_attr('skip', False) - return is_softmax and remove_softmax - - def transform(self, model, node): - model.remove_node(node, rewire=True) - return True diff --git a/hls4ml/hls4ml/backends/fpga/passes/repack_stream.py b/hls4ml/hls4ml/backends/fpga/passes/repack_stream.py deleted file mode 100644 index a502f0a..0000000 --- a/hls4ml/hls4ml/backends/fpga/passes/repack_stream.py +++ /dev/null @@ -1,64 +0,0 @@ -import numpy as np - -from hls4ml.backends.template import FunctionCallTemplate -from hls4ml.model.layers import Layer, Reshape, register_layer -from hls4ml.model.optimizer import OptimizerPass - - -class Repack(Layer): - '''Inserted between layers with different packing factors.''' - - def initialize(self): - shape = self.attributes['target_shape'] - if shape[0] is None: - shape = shape[1:] - dims = [f'N_SIZE_{i}_{self.index}' for i in range(1, len(shape) + 1)] - - self.add_output_variable(shape, dims) - - -repack_function_template = 'nnet::repack_stream<{input_t}, {output_t}, {size}>({input}, {output});' -repack_include_list = ['nnet_utils/nnet_stream.h'] - - -class RepackFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(Repack, include_header=repack_include_list) - self.template = repack_function_template - - def format(self, node): - params = self._default_function_params(node) - params['size'] = np.prod(node.get_output_variable().shape) - - return self.template.format(**params) - - -def register_repack_stream(backend): - # Register the layer types to the layer map - register_layer('Repack', Repack) - - # Register the optimization passes - backend.register_pass('reshape_stream', ReshapeStream) - - # Register template passes - backend.register_template(RepackFunctionTemplate) - - -class ReshapeStream(OptimizerPass): - '''Repacks stream for Reshape layer''' - - def match(self, node): - # do not run optimizer pass for a flatten layer (1 output dimension) - return isinstance(node, Reshape) and len(node.get_output_variable().shape) > 1 - - def transform(self, model, node): - if model.config.get_config_value('IOType') != 'io_stream': - return False - - attrs = {'target_shape': node.get_attr('target_shape')} - - # Insert new Repack node instead of Reshape - repack_layer = model.make_node(Repack, 'repack_' + node.name, attrs, node.inputs.copy()) - model.replace_node(node, repack_layer) - - return True diff --git a/hls4ml/hls4ml/backends/fpga/passes/xnor_pooling.py b/hls4ml/hls4ml/backends/fpga/passes/xnor_pooling.py deleted file mode 100644 index 73fee98..0000000 --- a/hls4ml/hls4ml/backends/fpga/passes/xnor_pooling.py +++ /dev/null @@ -1,22 +0,0 @@ -from hls4ml.model.layers import GlobalPooling1D, GlobalPooling2D, Pooling1D, Pooling2D -from hls4ml.model.optimizer import OptimizerPass -from hls4ml.model.types import XnorPrecisionType - - -class XnorPooling(OptimizerPass): - ''' - For correct behavior, for MaxPooling and similar, for XnorPrecisionType, have to propagate - the type to the output. - ''' - - def match(self, node): - if isinstance(node, (Pooling1D, Pooling2D, GlobalPooling1D, GlobalPooling2D)) and node.get_attr('pool_op') == 'Max': - return isinstance(node.get_input_variable().type.precision, XnorPrecisionType) and not isinstance( - node.get_output_variable().type.precision, XnorPrecisionType - ) - return False - - def transform(self, model, node): - outvar = node.get_output_variable() - outvar.type.precision = XnorPrecisionType() - return True diff --git a/hls4ml/hls4ml/backends/quartus/__init__.py b/hls4ml/hls4ml/backends/quartus/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/backends/quartus/passes/__init__.py b/hls4ml/hls4ml/backends/quartus/passes/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/backends/quartus/passes/convolution_templates.py b/hls4ml/hls4ml/backends/quartus/passes/convolution_templates.py deleted file mode 100644 index 75f8ca6..0000000 --- a/hls4ml/hls4ml/backends/quartus/passes/convolution_templates.py +++ /dev/null @@ -1,183 +0,0 @@ -from hls4ml.backends.backend import get_backend -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import Conv1D, Conv2D, Conv2DBatchnorm - -# TODO - Dilation rate ? - -''' Shared mutliplication config ''' -conv_mult_config_template = """struct config{index}_mult : nnet::dense_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - - static const unsigned rf_pad = {rfpad}; - static const unsigned bf_pad = {bfpad}; - - static const unsigned reuse_factor = {reuse}; - static const unsigned reuse_factor_rounded = reuse_factor + rf_pad; - static const unsigned block_factor = DIV_ROUNDUP(n_in*n_out, reuse_factor); - static const unsigned block_factor_rounded = block_factor + bf_pad; - static const unsigned multiplier_factor = MIN(n_in, reuse_factor); - static const unsigned multiplier_limit = DIV_ROUNDUP(n_in*n_out, multiplier_factor); - static const unsigned multiplier_scale = multiplier_limit/n_out; - - typedef {accum_t.name} accum_t; - typedef {bias_t.name} bias_t; - typedef {weight_t.name} weight_t; - - template - using product = nnet::product::{product_type}; -}};\n""" - -''' 1D Conv ''' -conv1d_config_template = """struct config{index} : nnet::conv1d_config {{ - static const unsigned in_width = {in_width}; - static const unsigned n_chan = {n_chan}; - - static const unsigned filt_width = {filt_width}; - static const unsigned impl_filt_width = {impl_filt_width}; - static const unsigned kernel_size = filt_width; - - static const unsigned n_filt = {n_filt}; - static const unsigned out_width = {out_width}; - - static const unsigned pad_left = {pad_left}; - static const unsigned pad_right = {pad_right}; - static const unsigned stride_width = {stride_width}; - static const unsigned dilation = {dilation}; - - static const unsigned reuse_factor = {reuse}; - static const unsigned parallelisation_factor = {parallelization}; - static const bool store_weights_in_bram = false; - - static const nnet::conv1d_implementation implementation = nnet::conv1d_implementation::{implementation}; - - typedef {accum_t.name} accum_t; - typedef {bias_t.name} bias_t; - typedef {weight_t.name} weight_t; - typedef {config_t} mult_config; -}}; -""" - -conv1d_function_template = 'nnet::conv_1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});' -conv1d_include_list = ['nnet_utils/nnet_conv1d.h', 'nnet_utils/nnet_conv1d_stream.h'] - - -class Conv1DConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Conv1D) - self.template = conv1d_config_template - self.mult_template = conv_mult_config_template - - def format(self, node): - conv_params = self._default_config_params(node) - conv_params['dilation'] = node.get_attr('dilation', 1) - if conv_params['dilation'] != 1: - raise Exception('dilation != 1 not supported yet') - conv_params['config_t'] = f'config{node.index}_mult' - conv_config = self.template.format(**conv_params) - - mult_params = self._default_config_params(node) - mult_params['n_in'] = node.get_attr('n_chan') * node.get_attr('filt_width') - mult_params['n_out'] = node.get_attr('n_filt') - mult_params['product_type'] = get_backend('quartus').product_type( - node.get_input_variable().type.precision, node.get_weights('weight').type.precision - ) - mult_config = self.mult_template.format(**mult_params) - - return mult_config + '\n' + conv_config - - -class Conv1DFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(Conv1D, include_header=conv1d_include_list) - self.template = conv1d_function_template - - def format(self, node): - params = self._default_function_params(node) - if node.get_attr('data_format') == 'channels_first': - raise Exception('channels_first not supported on Quartus') - params['data_format'] = 'cl' - params['w'] = node.get_weights('weight').name - params['b'] = node.get_weights('bias').name - - return self.template.format(**params) - - -''' 2D Conv ''' -conv2d_config_template = """struct config{index} : nnet::conv2d_config {{ - static const unsigned in_height = {in_height}; - static const unsigned in_width = {in_width}; - static const unsigned n_chan = {n_chan}; - - static const unsigned out_height = {out_height}; - static const unsigned out_width = {out_width}; - - static const unsigned n_filt = {n_filt}; - static const unsigned filt_height = {filt_height}; - static const unsigned filt_width = {filt_width}; - static const unsigned impl_filt_height = {impl_filt_height}; - static const unsigned impl_filt_width = {impl_filt_width}; - static const unsigned kernel_size = filt_height * filt_width; - - static const unsigned pad_top = {pad_top}; - static const unsigned pad_bottom = {pad_bottom}; - static const unsigned pad_left = {pad_left}; - static const unsigned pad_right = {pad_right}; - static const unsigned stride_height = {stride_height}; - static const unsigned stride_width = {stride_width}; - - static const unsigned reuse_factor = {reuse}; - static const unsigned parallelisation_factor = {parallelization}; - static const bool store_weights_in_bram = false; - - static const nnet::conv2d_implementation implementation = nnet::conv2d_implementation::{implementation}; - - typedef {accum_t.name} accum_t; - typedef {bias_t.name} bias_t; - typedef {weight_t.name} weight_t; - typedef {config_t} mult_config; -}};\n""" - -conv2d_function_template = 'nnet::conv_2d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});' -conv2d_include_list = ['nnet_utils/nnet_conv2d.h', 'nnet_utils/nnet_conv2d_stream.h'] - - -class Conv2DConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__((Conv2D, Conv2DBatchnorm)) - self.template = conv2d_config_template - self.mult_template = conv_mult_config_template - - def format(self, node): - conv_params = self._default_config_params(node) - conv_params['dilation'] = node.get_attr('dilation', 1) - if conv_params['dilation'] != 1: - raise Exception('dilation != 1 not supported yet') - conv_params['config_t'] = f'config{node.index}_mult' - conv_config = self.template.format(**conv_params) - - mult_params = self._default_config_params(node) - mult_params['n_in'] = node.get_attr('n_chan') * node.get_attr('filt_height') * node.get_attr('filt_width') - mult_params['n_out'] = node.get_attr('n_filt') - mult_params['product_type'] = get_backend('quartus').product_type( - node.get_input_variable().type.precision, node.get_weights('weight').type.precision - ) - mult_config = self.mult_template.format(**mult_params) - - return mult_config + '\n' + conv_config - - -class Conv2DFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__((Conv2D, Conv2DBatchnorm), include_header=conv2d_include_list) - self.template = conv2d_function_template - - def format(self, node): - params = self._default_function_params(node) - if node.get_attr('data_format') == 'channels_first': - raise Exception('channels_first not supported for Quartus') - params['data_format'] = 'cl' - params['w'] = node.get_weights('weight').name - params['b'] = node.get_weights('bias').name - - return self.template.format(**params) diff --git a/hls4ml/hls4ml/backends/quartus/passes/convolution_winograd.py b/hls4ml/hls4ml/backends/quartus/passes/convolution_winograd.py deleted file mode 100644 index 9a66864..0000000 --- a/hls4ml/hls4ml/backends/quartus/passes/convolution_winograd.py +++ /dev/null @@ -1,177 +0,0 @@ -import math - -import numpy as np - -from hls4ml.model.layers import Conv1D, Conv2D -from hls4ml.model.optimizer import OptimizerPass - - -class ApplyWinogradKernelTransformation(OptimizerPass): - ''' - Transforms the weights of a Conv2D kernel to a format suitable for Wingorad convolution - For further information, refer to Lavin & Gray, 2015 - Fast Algorithms for Convolutional Neural Networks - ''' - - def match(self, node): - node_matches = isinstance(node, (Conv1D, Conv2D)) - - # This optimizer works only after the Resource Strategy Optimizer, since order of transposition matters - weights_transformed = node.get_attr('_weights_transposed', False) is True - - # User opted for Winograd - implementation_is_winograd = ( - node.get_attr('implementation', 'combination') == 'combination' - or node.get_attr('implementation', 'combination') == 'winograd' - ) - - parallel_io_type = node.model.config.get_config_value('IOType') == 'io_parallel' - - # Winograd algorithm-specific conditions - if isinstance(node, Conv1D): - # Winograd only applies to specific kernel sizes - # Current implementation only supports fs = 3; easily extendable to other filter sizes - filter_size_matches = node.get_attr('filt_width', 3) == 3 - - # Winograd's minimal filtering algorithm doesn't work with stride != 1 - stride_is_one = node.get_attr('stride_width', 1) == 1 - - # HLS Compiler fails to pipeline the entire component if Winograd loop only executes once - loop_itr_gt_one = node.get_attr('out_width') > 2 - - winograd_conditions = filter_size_matches and stride_is_one and loop_itr_gt_one and parallel_io_type - - elif isinstance(node, (Conv2D)): - # Winograd only applies to specific kernel sizes - # Current implementation only supports fs = 3; easily extendable to other filter sizes - filter_size_matches = node.get_attr('filt_height', 3) == 3 and node.get_attr('filt_width', 3) == 3 - - # Winograd's minimal filtering algorithm doesn't work with striede != 1 - stride_is_one = node.get_attr('stride_height', 1) == 1 and node.get_attr('stride_width', 1) == 1 - - # HLS Compiler fails to pipeline the entire component if Winograd loop only executes once - loop_itr_gt_one = node.get_attr('out_height') > 2 and node.get_attr('out_width') > 2 - - padding_is_equal = node.get_attr('pad_top', 0) == node.get_attr('pad_bottom', 0) and node.get_attr( - 'pad_left', 0 - ) == node.get_attr('pad_right', 0) - - winograd_conditions = ( - filter_size_matches and stride_is_one and padding_is_equal and loop_itr_gt_one and parallel_io_type - ) - - else: - winograd_conditions = False - - # Check any previous transformations - already_transformed = node.get_attr('_winograd_transformation_applied', False) is True - - if not winograd_conditions and node.get_attr('implementation', 'combination') == 'winograd': - raise RuntimeError( - 'Not possible to use Winograd algorithm with current architecture. ' - 'Please set implementation to im2col or combination' - ) - - return ( - node_matches - and weights_transformed - and winograd_conditions - and not already_transformed - and implementation_is_winograd - ) - - def transform(self, model, node): - if isinstance(node, Conv1D): - if node.get_attr('filt_width', 3) == 3: - # First, transpose to a format suitable for the Winograd algorithm (F, C, W) - # Note, this assumes a format post-resource strategy optimizer, that is (F, W, C) - # Therefore, (F, W, C) => (F, C, W) - node.weights['weight'].data = np.transpose(node.weights['weight'].data, axes=[0, 2, 1]) - - # Temporary copy of data - weights = node.weights['weight'].data - - # Expand weight dimensionality (3) => (4) - node.weights['weight'].data = np.zeros((weights.shape[0], weights.shape[1], 4)) - - # Transformation matrices for 3x1 kernels - G = np.array([[1, 0, 0], [0.5, 0.5, 0.5], [0.5, -0.5, 0.5], [0, 0, 1]]) - - # Transformation GfG' - for filter in range(0, weights.data.shape[0]): - for channel in range(0, weights.data.shape[1]): - node.weights['weight'].data[filter][channel] = np.matmul(G, weights[filter][channel]) - node.weights['weight'].data_length = node.weights['weight'].data.size - - # Winograd's minimal filtering algorithm transforms the weight matrix - # This transformation consists of addition and division (by 2&4) of the weight matrix - # Therefore, increase precision (if needed), to accomodate for new weights - # This error is only noticeable for low precisions, such as those used with QKeras - - # Integer precision is only updated if it exceeds the one defined in hls4ml config - maximum_value_rounded = int(math.ceil(np.abs(node.weights['weight'].data).max())) - if maximum_value_rounded.bit_length() + 1 > node.weights['weight'].type.precision.integer: - node.weights['weight'].type.precision.integer = maximum_value_rounded.bit_length() + 1 - node.weights['weight'].type.precision.width += ( - maximum_value_rounded.bit_length() + 1 - node.weights['weight'].type.precision.integer - ) - - # Fractional precision is increased by 2 bits (division by 4), - # for low-precision (less than 8) fractional weights - if node.weights['weight'].type.precision.fractional < 8: - node.weights['weight'].type.precision.fractional += 2 - node.weights['weight'].type.precision.width += 2 - - # Modified kernel size - node.set_attr('impl_filt_width', 4) - - elif isinstance(node, Conv2D): - if node.get_attr('filt_height', 3) == 3 and node.get_attr('filt_width', 3) == 3: - # First, transpose to a format suitable for the Winograd algorithm (F, C, H, W) - # Note, this assumes a format post-resource strategy optimizer, that is (F, H, W, C) - # Therefore, (F, H, W, C) => (F, C, H, W) - node.weights['weight'].data = np.transpose(node.weights['weight'].data, axes=[0, 3, 1, 2]) - - # Temporary copy of data - weights = node.weights['weight'].data - - # Expand weight dimensionality (3x3) => (4x4) - node.weights['weight'].data = np.zeros((weights.shape[0], weights.shape[1], 4, 4)) - - # Transformation matrices for 3x3 kernels - G = np.array([[1, 0, 0], [0.5, 0.5, 0.5], [0.5, -0.5, 0.5], [0, 0, 1]]) - GT = np.array([[1, 0.5, 0.5, 0], [0, 0.5, -0.5, 0], [0, 0.5, 0.5, 1]]) - - # Transformation GfG' - for filter in range(0, weights.data.shape[0]): - for channel in range(0, weights.data.shape[1]): - node.weights['weight'].data[filter][channel] = np.matmul(np.matmul(G, weights[filter][channel]), GT) - node.weights['weight'].data_length = node.weights['weight'].data.size - - # Winograd's minimal filtering algorithm transforms the weight matrix - # This transformation consists of addition and division (by 2&4) of the weight matrix - # Therefore, increase precision (if needed), to accomodate for new weights - # This error is only noticeable for low precisions, such as those used with QKeras - - # Integer precision is only updated if it exceeds the one defined in hls4ml config - maximum_value_rounded = int(math.ceil(np.abs(node.weights['weight'].data).max())) - if maximum_value_rounded.bit_length() + 1 > node.weights['weight'].type.precision.integer: - node.weights['weight'].type.precision.integer = maximum_value_rounded.bit_length() + 1 - node.weights['weight'].type.precision.width += ( - maximum_value_rounded.bit_length() + 1 - node.weights['weight'].type.precision.integer - ) - - # Fractional precision is increased by 2 bits (division by 4), - # for low-precision (less than 8) fractional weights - if node.weights['weight'].type.precision.fractional < 8: - node.weights['weight'].type.precision.fractional += 2 - node.weights['weight'].type.precision.width += 2 - - # Modified kernel size - node.set_attr('impl_filt_height', 4) - node.set_attr('impl_filt_width', 4) - else: - raise Exception(f'Unexpected layer {node.class_name} with Winograd kernel optimizer') - - node.set_attr('_winograd_transformation_applied', True) - - return False diff --git a/hls4ml/hls4ml/backends/quartus/passes/core_templates.py b/hls4ml/hls4ml/backends/quartus/passes/core_templates.py deleted file mode 100644 index aece9fc..0000000 --- a/hls4ml/hls4ml/backends/quartus/passes/core_templates.py +++ /dev/null @@ -1,221 +0,0 @@ -from hls4ml.backends.backend import get_backend -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import Activation, BatchNormalization, Dense, HardActivation, ParametrizedActivation, PReLU, Softmax - -# Dense templates - -dense_config_template = """struct config{index} : nnet::dense_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - static const unsigned io_type = nnet::{iotype}; - static const unsigned n_zeros = {nzeros}; - static const unsigned n_nonzeros = {nonzeros}; - static const bool store_weights_in_bram = false; - - static const unsigned rf_pad = {rfpad}; - static const unsigned bf_pad = {bfpad}; - - static const unsigned reuse_factor = {reuse}; - static const unsigned compressed_block_factor = DIV_ROUNDUP(n_nonzeros, reuse_factor); - static const unsigned reuse_factor_rounded = reuse_factor + rf_pad; - static const unsigned block_factor = DIV_ROUNDUP(n_in*n_out, reuse_factor); - static const unsigned block_factor_rounded = block_factor + bf_pad; - static const unsigned multiplier_factor = MIN(n_in, reuse_factor); - static const unsigned multiplier_limit = DIV_ROUNDUP(n_in*n_out, multiplier_factor); - static const unsigned multiplier_scale = multiplier_limit/n_out; - - typedef {accum_t.name} accum_t; - typedef {bias_t.name} bias_t; - typedef {weight_t.name} weight_t; - typedef {index_t.name} index_t; - - template - using product = nnet::product::{product_type}; -}};\n""" - -dense_function_template = 'nnet::dense_{strategy}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});' - -dense_include_list = ['nnet_utils/nnet_dense.h', 'nnet_utils/nnet_dense_compressed.h', 'nnet_utils/nnet_dense_stream.h'] - - -class DenseConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Dense) - self.template = dense_config_template - - def format(self, node): - params = self._default_config_params(node) - params['nzeros'] = node.get_weights('weight').nzeros - params['nonzeros'] = node.get_weights('weight').nonzeros - params['product_type'] = get_backend('quartus').product_type( - node.get_input_variable().type.precision, node.get_weights('weight').type.precision - ) - - return self.template.format(**params) - - -class DenseFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(Dense, include_header=dense_include_list) - self.template = dense_function_template - - def format(self, node): - params = self._default_function_params(node) - params['w'] = node.get_weights('weight').name - params['b'] = node.get_weights('bias').name - - return self.template.format(**params) - - -# BatchNormalization templates - -batchnorm_config_template = """struct config{index} : nnet::batchnorm_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_filt = {n_filt}; - static const unsigned io_type = nnet::{iotype}; - static const unsigned reuse_factor = {reuse}; - static const bool store_weights_in_bram = false; - typedef {bias_t.name} bias_t; - typedef {scale_t.name} scale_t; - template - using product = nnet::product::{product_type}; -}};\n""" - -batchnorm_function_template = 'nnet::normalize<{input_t}, {output_t}, {config}>({input}, {output}, {scale}, {bias});' - -batchnorm_include_list = ['nnet_utils/nnet_batchnorm.h', 'nnet_utils/nnet_batchnorm_stream.h'] - - -class BatchNormalizationConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(BatchNormalization) - self.template = batchnorm_config_template - - def format(self, node): - params = self._default_config_params(node) - params['n_in'] = node.get_input_variable().size_cpp() - params['product_type'] = get_backend('quartus').product_type( - node.get_input_variable().type.precision, node.get_weights('scale').type.precision - ) - - return self.template.format(**params) - - -class BatchNormalizationFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(BatchNormalization, include_header=batchnorm_include_list) - self.template = batchnorm_function_template - - def format(self, node): - params = self._default_function_params(node) - params['scale'] = node.get_weights('scale').name - params['bias'] = node.get_weights('bias').name - - return self.template.format(**params) - - -# Activation templates - -activ_config_template = """struct {type}_config{index} : nnet::activ_config {{ - static const unsigned n_in = {n_in}; - static const unsigned table_size = {table_size}; - static const unsigned io_type = nnet::{iotype}; - static const unsigned reuse_factor = {reuse}; - typedef {table_t.name} table_t; -}};\n""" - -hard_activ_config_template = """struct {type}_config{index} {{ - static const unsigned n_in = {n_in}; - static const {slope_t.name} slope; - static const {shift_t.name} shift; - static const unsigned io_type = nnet::{iotype}; - static const unsigned reuse_factor = {reuse}; -}}; -const {slope_t.name} {type}_config{index}::slope = {slope}; -const {shift_t.name} {type}_config{index}::shift = {shift};\n""" - -softmax_config_template = """struct {type}_config{index} : nnet::activ_config {{ - static const unsigned n_in = {n_in}; - static const unsigned table_size = {table_size}; - static const unsigned io_type = nnet::{iotype}; - static const unsigned reuse_factor = {reuse}; - static const nnet::softmax_implementation implementation = nnet::softmax_implementation::{implementation}; - typedef {exp_table_t.name} exp_table_t; - typedef {inv_table_t.name} inv_table_t; -}};\n""" - -activ_function_template = 'nnet::{activation}<{input_t}, {output_t}, {config}>({input}, {output});' -param_activ_function_template = 'nnet::{activation}<{input_t}, {output_t}, {config}>({input}, {param}, {output});' - -activ_include_list = ['nnet_utils/nnet_activation.h', 'nnet_utils/nnet_activation_stream.h'] - - -class ActivationConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__((Activation, ParametrizedActivation, PReLU)) - self.template = activ_config_template - - def format(self, node): - params = self._default_config_params(node) - params['type'] = node.get_attr('activation') - - return self.template.format(**params) - - -class HardActivationConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(HardActivation) - self.template = hard_activ_config_template - - def format(self, node): - params = self._default_config_params(node) - params['type'] = node.get_attr('activation') - - return self.template.format(**params) - - -class SoftmaxConfigTemplate(ActivationConfigTemplate): - def __init__(self): - super(ActivationConfigTemplate, self).__init__(Softmax) # Skip ActivationConfigTemplate's __init__ - self.template = softmax_config_template - - -class ActivationFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__((Activation, HardActivation, Softmax), include_header=activ_include_list) - self.template = activ_function_template - - def format(self, node): - params = self._default_function_params(node) - params['activation'] = node.get_attr('activation').lower() - params['config'] = '{}_config{}'.format(node.get_attr('activation'), node.index) - - return self.template.format(**params) - - -class ParametrizedActivationFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(ParametrizedActivation, include_header=activ_include_list) - self.template = param_activ_function_template - - def format(self, node): - params = self._default_function_params(node) - params['activation'] = node._get_act_function_name() - params['param'] = node.get_attr('activ_param', 1.0) - params['config'] = '{}_config{}'.format(node.get_attr('activation'), node.index) - - return self.template.format(**params) - - -class PReLUFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(PReLU, include_header=activ_include_list) - self.template = param_activ_function_template - - def format(self, node): - params = self._default_function_params(node) - params['activation'] = node.get_attr('activation').lower() - params['param'] = node.get_weights('alpha').name - params['config'] = '{}_config{}'.format(node.get_attr('activation'), node.index) - - return self.template.format(**params) diff --git a/hls4ml/hls4ml/backends/quartus/passes/merge_templates.py b/hls4ml/hls4ml/backends/quartus/passes/merge_templates.py deleted file mode 100644 index 0cf6121..0000000 --- a/hls4ml/hls4ml/backends/quartus/passes/merge_templates.py +++ /dev/null @@ -1,108 +0,0 @@ -from hls4ml.backends.backend import get_backend -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import Concatenate, Dot, Merge - -# TODO - Very similar to vivado/merge_templates.py - only difference is on line 67: -# TODO - get_backend('vivado').product_type(inp1.type.precision, inp2.type.precision) -# TODO - Look into ways of having passes similar accross many backends in a shared folder thorugh inheritance and overriding. - -# Merge templates -merge_config_template = """struct config{index} : nnet::merge_config {{ - static const unsigned n_elem = {n_elem}; -}};\n""" - -merge_function_template = 'nnet::{merge}<{input1_t}, {input2_t}, {output_t}, {config}>({input1}, {input2}, {output});' -merge_include_list = ['nnet_utils/nnet_merge.h', 'nnet_utils/nnet_merge_stream.h'] - - -class MergeConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Merge) - self.template = merge_config_template - - def format(self, node): - params = self._default_config_params(node) - params['n_elem'] = node.get_input_variable(node.inputs[0]).size_cpp() - - return self.template.format(**params) - - -class MergeFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__((Merge, Concatenate, Dot), include_header=merge_include_list) - self.template = merge_function_template - - def format(self, node): - params = {} - params['merge'] = node.get_attr('op').lower() - params['config'] = f'config{node.index}' - params['input1_t'] = node.get_input_variable(node.inputs[0]).type.name - params['input2_t'] = node.get_input_variable(node.inputs[1]).type.name - params['output_t'] = node.get_output_variable().type.name - params['input1'] = node.get_input_variable(node.inputs[0]).name - params['input2'] = node.get_input_variable(node.inputs[1]).name - params['output'] = node.get_output_variable().name - - return self.template.format(**params) - - -# Dot templates -dot_config_template = """struct config{index} : nnet::dot_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - - static const unsigned reuse_factor = {reuse}; - - typedef {accum_t.name} accum_t; - - template - using product = nnet::product::{product_type}; -}};\n""" - - -class DotConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Dot) - self.template = dot_config_template - - def format(self, node): - inp1 = node.get_input_variable(node.inputs[0]) - inp2 = node.get_input_variable(node.inputs[1]) - params = self._default_config_params(node) - params['n_out'] = 1 - params['n_in'] = inp1.shape[0] - params['product_type'] = get_backend('quartus').product_type(inp1.type.precision, inp2.type.precision) - - return self.template.format(**params) - - -# Concatenate templates -concat_config_template = """struct config{index} : nnet::concat_config {{ - static const unsigned n_elem1_0 = {n_elem1_0}; - static const unsigned n_elem1_1 = {n_elem1_1}; - static const unsigned n_elem1_2 = {n_elem1_2}; - static const unsigned n_elem2_0 = {n_elem2_0}; - static const unsigned n_elem2_1 = {n_elem2_1}; - static const unsigned n_elem2_2 = {n_elem2_2}; - - static const int axis = {axis}; -}};\n""" - - -class ConcatenateConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Concatenate) - self.template = concat_config_template - - def format(self, node): - params = self._default_config_params(node) - for i in range(3): - params.setdefault(f'n_elem1_{i}', 0) - params.setdefault(f'n_elem2_{i}', 0) - inp1 = node.get_input_variable(node.inputs[0]) - inp2 = node.get_input_variable(node.inputs[1]) - for i, (s1, s2) in enumerate(zip(inp1.shape, inp2.shape)): - params[f'n_elem1_{i}'] = s1 - params[f'n_elem2_{i}'] = s2 - - return self.template.format(**params) diff --git a/hls4ml/hls4ml/backends/quartus/passes/pointwise.py b/hls4ml/hls4ml/backends/quartus/passes/pointwise.py deleted file mode 100644 index 84ae79e..0000000 --- a/hls4ml/hls4ml/backends/quartus/passes/pointwise.py +++ /dev/null @@ -1,95 +0,0 @@ -from copy import copy - -import numpy as np - -from hls4ml.backends.fpga.fpga_layers import PointwiseConv1D, PointwiseConv2D -from hls4ml.backends.quartus.passes.convolution_templates import ( - Conv1DConfigTemplate, - Conv1DFunctionTemplate, - Conv2DConfigTemplate, - Conv2DFunctionTemplate, - conv1d_config_template, - conv2d_config_template, - conv_mult_config_template, -) -from hls4ml.model.layers import register_layer -from hls4ml.model.optimizer import OptimizerPass - -''' -Custom hls4ml layer implementation for 1x1 Conv filters using im2col -Allows lower latency andresource usage, due to less loop invocations -''' - -pointwise_conv1d_function_template = ( - 'nnet::pointwise_conv_1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});' -) -pointwise_conv2d_function_template = ( - 'nnet::pointwise_conv_2d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});' -) - -sepconv1d_include_list = ['nnet_utils/nnet_conv1d.h'] -sepconv2d_include_list = ['nnet_utils/nnet_conv2d.h'] - - -class PointwiseConv1DConfigTemplate(Conv1DConfigTemplate): - def __init__(self): - super(Conv1DConfigTemplate, self).__init__(PointwiseConv1D) - self.template = conv1d_config_template - self.mult_template = conv_mult_config_template - - -class PointwiseConv1DFunctionTemplate(Conv1DFunctionTemplate): - def __init__(self): - super(Conv1DFunctionTemplate, self).__init__(PointwiseConv1D, include_header=sepconv1d_include_list) - self.template = pointwise_conv1d_function_template - - -class PointwiseConv2DConfigTemplate(Conv2DConfigTemplate): - def __init__(self): - super(Conv2DConfigTemplate, self).__init__(PointwiseConv2D) - self.template = conv2d_config_template - self.mult_template = conv_mult_config_template - - -class PointwiseConv2DFunctionTemplate(Conv2DFunctionTemplate): - def __init__(self): - super(Conv2DFunctionTemplate, self).__init__(PointwiseConv2D, include_header=sepconv2d_include_list) - self.template = pointwise_conv2d_function_template - - -def register_pointwise(backend): - # Register the layer types to the layer map - register_layer('PointwiseConv1D', PointwiseConv1D) - register_layer('PointwiseConv2D', PointwiseConv2D) - - # Register the optimization passes - backend.register_pass('optimize_pointwise_conv', OptimizePointwiseConv) - - # Register template passes - backend.register_template(PointwiseConv1DConfigTemplate) - backend.register_template(PointwiseConv1DFunctionTemplate) - backend.register_template(PointwiseConv2DConfigTemplate) - backend.register_template(PointwiseConv2DFunctionTemplate) - - -class OptimizePointwiseConv(OptimizerPass): - def match(self, node): - return ( - node.class_name in ('Conv1D', 'Conv2D') - and node.get_attr('filt_height', 1) == 1 - and node.get_attr('filt_width') == 1 - and node.model.config.get_config_value('IOType') == 'io_parallel' - ) - - def transform(self, model, node): - dim = node.__class__.__name__[-2:] # '1D' or '2D' - pw_node = model.make_node( - 'PointwiseConv' + dim, node.name, copy(node.attributes), node.inputs.copy(), outputs=node.outputs.copy() - ) - if len(node.weights['weight'].data.shape) == 2: # This can happen if we assign weights of Dense layer to 1x1 Conv2D - expand_axis = tuple(range(int(dim[0]))) - pw_node.weights['weight'].data = np.expand_dims(node.weights['weight'].data, axis=expand_axis) - pw_node.weights['bias'].data = node.weights['bias'].data - model.replace_node(node, pw_node) - - return True diff --git a/hls4ml/hls4ml/backends/quartus/passes/pooling_templates.py b/hls4ml/hls4ml/backends/quartus/passes/pooling_templates.py deleted file mode 100644 index 9a3ee41..0000000 --- a/hls4ml/hls4ml/backends/quartus/passes/pooling_templates.py +++ /dev/null @@ -1,111 +0,0 @@ -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import GlobalPooling1D, GlobalPooling2D, Pooling1D, Pooling2D - -# TODO - Move to ../fpga/passes, once streaming is supported on Quartus (should be identical to Vivado) - -pooling1d_config_template = """struct config{index} : nnet::pooling1d_config {{ - static const unsigned stride_width = {stride_width}; - static const unsigned pool_width = {pool_width}; - - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - static const unsigned filt_width = {pool_width}; - - static const unsigned n_filt = {n_filt}; - static const unsigned n_chan = {n_filt}; - - static const unsigned in_width = {n_in}; - - static const unsigned pad_left = {pad_left}; - static const unsigned pad_right = {pad_right}; - static const bool count_pad = {count_pad}; - - static const nnet::Pool_Op pool_op = nnet::{pool_op}; - typedef {accum_t.name} accum_t; -}};\n""" - -pooling2d_config_template = """struct config{index} : nnet::pooling2d_config {{ - static const unsigned stride_height = {stride_height}; - static const unsigned stride_width = {stride_width}; - - static const unsigned pool_height = {pool_height}; - static const unsigned pool_width = {pool_width}; - static const unsigned filt_height = {pool_height}; - static const unsigned filt_width = {pool_width}; - - static const unsigned in_height = {in_height}; - static const unsigned in_width = {in_width}; - static const unsigned out_height = {out_height}; - static const unsigned out_width = {out_width}; - - static const unsigned n_filt = {n_filt}; - static const unsigned n_chan = {n_filt}; - - static const unsigned pad_top = {pad_top}; - static const unsigned pad_bottom = {pad_bottom}; - static const unsigned pad_left = {pad_left}; - static const unsigned pad_right = {pad_right}; - static const bool count_pad = {count_pad}; - - static const nnet::Pool_Op pool_op = nnet::{pool_op}; - typedef {accum_t.name} accum_t; -}};\n""" - -global_pooling1d_config_template = """struct config{index} : nnet::pooling1d_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_filt = {n_filt}; - static const nnet::Pool_Op pool_op = nnet::{pool_op}; - typedef {accum_t.name} accum_t; -}};\n""" - -global_pooling2d_config_template = """struct config{index} : nnet::pooling2d_config {{ - static const unsigned in_height = {in_height}; - static const unsigned in_width = {in_width}; - static const unsigned n_filt = {n_filt}; - static const nnet::Pool_Op pool_op = nnet::{pool_op}; - typedef {accum_t.name} accum_t; -}};\n""" - -pooling1d_function_template = 'nnet::pooling1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output});' -pooling2d_function_template = 'nnet::pooling2d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output});' -global_pooling1d_function_template = ( - 'nnet::global_pooling1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output});' -) -global_pooling2d_function_template = ( - 'nnet::global_pooling2d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output});' -) - -pooling_include_list = ['nnet_utils/nnet_pooling.h', 'nnet_utils/nnet_pooling_stream.h'] - - -class PoolingConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__((Pooling1D, Pooling2D, GlobalPooling1D, GlobalPooling2D)) - self.templates = { - 'Pooling1D': pooling1d_config_template, - 'Pooling2D': pooling2d_config_template, - 'GlobalPooling1D': global_pooling1d_config_template, - 'GlobalPooling2D': global_pooling2d_config_template, - } - - def format(self, node): - params = self._default_config_params(node) - return self.templates[node.class_name].format(**params) - - -class PoolingFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__((Pooling1D, Pooling2D, GlobalPooling1D, GlobalPooling2D), include_header=pooling_include_list) - self.templates = { - 'Pooling1D': pooling1d_function_template, - 'Pooling2D': pooling2d_function_template, - 'GlobalPooling1D': global_pooling1d_function_template, - 'GlobalPooling2D': global_pooling2d_function_template, - } - - def format(self, node): - params = self._default_function_params(node) - if node.get_attr('data_format') == 'channels_first': - raise Exception('channels_first not supported for Quartus') - params['data_format'] = 'cl' - return self.templates[node.class_name].format(**params) diff --git a/hls4ml/hls4ml/backends/quartus/passes/quantization_templates.py b/hls4ml/hls4ml/backends/quartus/passes/quantization_templates.py deleted file mode 100644 index d6cf2d2..0000000 --- a/hls4ml/hls4ml/backends/quartus/passes/quantization_templates.py +++ /dev/null @@ -1,36 +0,0 @@ -from hls4ml.backends.backend import get_backend -from hls4ml.backends.quartus.passes.core_templates import ( - batchnorm_config_template, - batchnorm_function_template, - batchnorm_include_list, -) -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.optimizer.passes.qkeras import ApplyAlpha - - -class ApplyAlphaConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(ApplyAlpha) - self.template = batchnorm_config_template - - def format(self, node): - params = self._default_config_params(node) - params['n_in'] = node.get_input_variable().size_cpp() - params['product_type'] = get_backend('quartus').product_type( - node.get_input_variable().type.precision, node.get_weights('scale').type.precision - ) - - return self.template.format(**params) - - -class ApplyAlphaFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(ApplyAlpha, include_header=batchnorm_include_list) - self.template = batchnorm_function_template - - def format(self, node): - params = self._default_function_params(node) - params['scale'] = node.get_weights('scale').name - params['bias'] = node.get_weights('bias').name - - return self.template.format(**params) diff --git a/hls4ml/hls4ml/backends/quartus/passes/recurrent_templates.py b/hls4ml/hls4ml/backends/quartus/passes/recurrent_templates.py deleted file mode 100644 index 2bf4535..0000000 --- a/hls4ml/hls4ml/backends/quartus/passes/recurrent_templates.py +++ /dev/null @@ -1,305 +0,0 @@ -from hls4ml.backends.backend import get_backend -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import GRU, LSTM, SimpleRNN - -recurrent_include_list = ['nnet_utils/nnet_recurrent.h', 'nnet_utils/nnet_recurrent_stream.h'] - -################################################ -# Shared Matrix Multiplication Template (Dense) -################################################ -recr_mult_config_template = '''struct config{index}_mult : nnet::dense_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - - static const unsigned rf_pad = {rfpad}; - static const unsigned bf_pad = {bfpad}; - static const unsigned reuse_factor = {reuse}; - static const unsigned reuse_factor_rounded = reuse_factor + rf_pad; - static const unsigned block_factor = DIV_ROUNDUP(n_in*n_out, reuse_factor); - static const unsigned block_factor_rounded = block_factor + bf_pad; - static const unsigned multiplier_factor = MIN(n_in, reuse_factor); - static const unsigned multiplier_limit = DIV_ROUNDUP(n_in*n_out, multiplier_factor); - static const unsigned multiplier_scale = multiplier_limit/n_out; - typedef {accum_t.name} accum_t; - typedef {bias_t.name} bias_t; - typedef {weight_t.name} weight_t; - - template - using product = nnet::product::{product_type}; -}};\n''' - -################################################ -# Shared Activation Template -################################################ -activ_config_template = '''struct {type}_config{index} : nnet::activ_config {{ - static const unsigned n_in = {n_in}; - static const unsigned table_size = {table_size}; - static const unsigned io_type = nnet::{iotype}; - static const unsigned reuse_factor = {reuse}; - typedef {table_t.name} table_t; -}};\n''' - -################################################ -# GRU Template -################################################ -gru_config_template = '''struct config{index} : nnet::gru_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - static const unsigned n_units = {n_units}; - static const unsigned n_timesteps = {n_timesteps}; - static const unsigned n_outputs = {n_outputs}; - static const bool return_sequences = {return_sequences}; - - typedef {accum_t.name} accum_t; - typedef {weight_t.name} weight_t; - typedef {bias_t.name} bias_t; - - typedef {config_mult_x} mult_config_x; - typedef {config_mult_h} mult_config_h; - - typedef {act_t} ACT_CONFIG_T; - template - using activation = nnet::activation::{activation}; - - typedef {act_recurrent_t} ACT_CONFIG_RECURRENT_T; - template - using activation_recr = nnet::activation::{recurrent_activation}; - - static const unsigned reuse_factor = {reuse}; - static const bool store_weights_in_bram = false; -}};\n''' - -gru_function_template = 'nnet::gru<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {wr}, {b}, {br});' - - -class GRUConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(GRU) - self.gru_template = gru_config_template - self.act_template = activ_config_template - self.recr_act_template = activ_config_template - self.mult_x_template = recr_mult_config_template - self.mult_h_template = recr_mult_config_template - - def format(self, node): - # Input has shape (n_timesteps, inp_dimensionality) - # Output / hidden units has shape (1 if !return_sequences else n_timesteps , n_units) - params = self._default_config_params(node) - params['n_units'] = node.get_attr('n_out') - params['n_outputs'] = node.get_attr('n_timesteps') if node.get_attr('return_sequences', False) else '1' - params['return_sequences'] = 'true' if node.get_attr('return_sequences', False) else 'false' - params['config_mult_x'] = f'config{node.index}_x_mult' - params['config_mult_h'] = f'config{node.index}_h_mult' - params['act_t'] = '{}_config{}'.format(node.get_attr('activation'), str(node.index) + '_act') - params['act_recurrent_t'] = '{}_config{}'.format(node.get_attr('recurrent_activation'), str(node.index) + '_rec_act') - gru_config = self.gru_template.format(**params) - - # Activation is on candidate hidden state, dimensionality (1, n_units) - act_params = self._default_config_params(node) - act_params['type'] = node.get_attr('activation') - act_params['n_in'] = node.get_attr('n_out') - act_params['index'] = str(node.index) + '_act' - act_config = self.act_template.format(**act_params) - - # Recurrent activation is on reset and update gates (therefore x2), dimensionality (1, n_units) - recr_act_params = self._default_config_params(node) - recr_act_params['type'] = node.get_attr('recurrent_activation') - recr_act_params['n_in'] = str(node.get_attr('n_out')) + ' * 2' - recr_act_params['index'] = str(node.index) + '_rec_act' - recr_act_config = self.recr_act_template.format(**recr_act_params) - - # Multiplication config for matrix multiplications of type Wx (reset, update and candidate states) - mult_params_x = self._default_config_params(node) - mult_params_x['n_in'] = node.get_attr('n_in') - mult_params_x['n_out'] = str(node.get_attr('n_out')) + ' * 3' - mult_params_x['product_type'] = get_backend('quartus').product_type( - node.get_input_variable().type.precision, node.get_weights('weight').type.precision - ) - mult_params_x['index'] = str(node.index) + '_x' - mult_config_x = self.mult_x_template.format(**mult_params_x) - - # Multiplication config for matrix multiplications of type Wh (reset, update and candidate states) - mult_params_h = self._default_config_params(node) - mult_params_h['n_in'] = node.get_attr('n_out') - mult_params_h['n_out'] = str(node.get_attr('n_out')) + ' * 3' - mult_params_h['reuse_factor'] = params['recurrent_reuse_factor'] - mult_params_h['product_type'] = get_backend('quartus').product_type( - node.get_input_variable().type.precision, node.get_weights('recurrent_weight').type.precision - ) - mult_params_h['index'] = str(node.index) + '_h' - mult_config_h = self.mult_h_template.format(**mult_params_h) - - return mult_config_x + '\n' + mult_config_h + '\n' + recr_act_config + '\n' + act_config + '\n' + gru_config - - -class GRUFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(GRU, include_header=recurrent_include_list) - self.template = gru_function_template - - def format(self, node): - params = self._default_function_params(node) - params['w'] = node.get_weights('weight').name - params['b'] = node.get_weights('bias').name - params['wr'] = node.get_weights('recurrent_weight').name - params['br'] = node.get_weights('recurrent_bias').name - return self.template.format(**params) - - -################################################ -# LSTM Template -################################################ -lstm_config_template = """struct config{index} : nnet::lstm_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - static const unsigned n_timesteps = {n_timesteps}; - static const unsigned return_sequences = {return_sequences}; - - typedef {accum_t.name} accum_t; - typedef {weight_t.name} weight_t; - typedef {bias_t.name} bias_t; - - typedef {act_t} ACT_CONFIG_T; - template - using activation = nnet::activation::{activation}; - - typedef {act_recurrent_t} ACT_CONFIG_RECURRENT_T; - template - using activation_recr = nnet::activation::{recurrent_activation}; - - static const unsigned reuse_factor = {reuse}; - static const bool store_weights_in_bram = false; -}};\n""" - -lstm_function_template = 'nnet::lstm<{input_t}, {output_t}, {config}>({input}, {output}, {weights});' - - -class LSTMConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(LSTM) - self.template = lstm_config_template - self.act_template = activ_config_template - self.recr_act_template = activ_config_template - - def format(self, node): - lstm_params = self._default_config_params(node) - lstm_params['n_in'] = node.get_attr('n_in') - lstm_params['n_out'] = node.get_attr('n_out') - lstm_params['n_outputs'] = node.get_attr('n_timesteps') if node.get_attr('return_sequences', False) else '1' - - lstm_params['return_sequences'] = str(node.get_attr('return_sequences')).lower() - lstm_params['act_t'] = '{}_config{}'.format(node.get_attr('activation'), str(node.index) + '_act') - lstm_params['act_recurrent_t'] = '{}_config{}'.format( - node.get_attr('recurrent_activation'), str(node.index) + '_rec_act' - ) - lstm_config = self.template.format(**lstm_params) - - act_params = self._default_config_params(node) - act_params['type'] = node.get_attr('activation') - act_params['n_in'] = node.get_attr('n_out') - act_params['index'] = str(node.index) + '_act' - act_config = self.act_template.format(**act_params) - - recr_act_params = self._default_config_params(node) - recr_act_params['type'] = node.get_attr('recurrent_activation') - recr_act_params['n_in'] = node.get_attr('n_out') - recr_act_params['index'] = str(node.index) + '_rec_act' - recr_act_config = self.recr_act_template.format(**recr_act_params) - - return act_config + '\n' + recr_act_config + '\n' + lstm_config - - -class LSTMFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(LSTM, include_header=recurrent_include_list) - self.template = lstm_function_template - - def format(self, node): - params = self._default_function_params(node) - - types = ['i', 'f', 'c', 'o'] - params['weights'] = '' - for t in types: - params['weights'] += f'kernel_{t}_{str(node.index)},' - for t in types: - params['weights'] += f'recurrent_kernel_{t}_{str(node.index)},' - for t in types: - params['weights'] += 'bias_{}_{}{}'.format(t, str(node.index), ',' if t != 'o' else '') - - return self.template.format(**params) - - -################################################ -# SimpleRNN Template -################################################ -simple_rnn_config_template = """struct config{index} : nnet::simpleRNN_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - static const unsigned n_outputs = {n_outputs}; - static const unsigned n_timesteps = {n_timesteps}; - static const unsigned return_sequences = {return_sequences}; - - typedef {accum_t.name} accum_t; - typedef {weight_t.name} weight_t; - typedef {bias_t.name} bias_t; - - typedef {act_t} ACT_CONFIG_T; - template - using activation = nnet::activation::{activation}; - - typedef {act_recurrent_t} ACT_CONFIG_RECURRENT_T; - template - using activation_recr = nnet::activation::{recurrent_activation}; - - static const unsigned reuse_factor = {reuse}; - static const bool store_weights_in_bram = false; -}};\n""" - -simple_rnn_function_template = 'nnet::simple_rnn<{input_t}, {output_t}, {config}>({input}, {output}, {weights});' - - -class SimpleRNNConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(SimpleRNN) - self.template = simple_rnn_config_template - self.act_template = activ_config_template - self.recr_act_template = activ_config_template - - def format(self, node): - simple_rnn_params = self._default_config_params(node) - simple_rnn_params['n_in'] = node.get_attr('n_in') - simple_rnn_params['n_out'] = node.get_attr('n_out') - simple_rnn_params['n_outputs'] = node.get_attr('n_timesteps') if node.get_attr('return_sequences', False) else '1' - simple_rnn_params['return_sequences'] = str(node.get_attr('return_sequences')).lower() - simple_rnn_params['act_t'] = '{}_config{}'.format(node.get_attr('activation'), str(node.index) + '_act') - simple_rnn_params['act_recurrent_t'] = '{}_config{}'.format( - node.get_attr('recurrent_activation'), str(node.index) + '_rec_act' - ) - simple_rnn_params['recurrent_activation'] = 'relu' - - simple_rnn_config = self.template.format(**simple_rnn_params) - - act_params = self._default_config_params(node) - act_params['type'] = node.get_attr('activation') - act_params['n_in'] = node.get_attr('n_out') - act_params['index'] = str(node.index) + '_act' - act_config = self.act_template.format(**act_params) - - recr_act_params = self._default_config_params(node) - recr_act_params['type'] = node.get_attr('recurrent_activation') - recr_act_params['n_in'] = node.get_attr('n_out') - recr_act_params['index'] = str(node.index) + '_rec_act' - recr_act_config = self.recr_act_template.format(**recr_act_params) - - return act_config + '\n' + recr_act_config + '\n' + simple_rnn_config - - -class SimpleRNNFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(SimpleRNN, include_header=recurrent_include_list) - self.template = simple_rnn_function_template - - def format(self, node): - params = self._default_function_params(node) - params['weights'] = 'w{0}, wr{0}, b{0}'.format(str(node.index)) - return self.template.format(**params) diff --git a/hls4ml/hls4ml/backends/quartus/passes/reshaping_templates.py b/hls4ml/hls4ml/backends/quartus/passes/reshaping_templates.py deleted file mode 100644 index 0db01e6..0000000 --- a/hls4ml/hls4ml/backends/quartus/passes/reshaping_templates.py +++ /dev/null @@ -1,138 +0,0 @@ -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import Resize, Transpose, ZeroPadding1D, ZeroPadding2D - -# ZeroPadding templates - -zeropad1d_config_template = """struct config{index} : nnet::padding1d_config {{ - static const unsigned in_width = {in_width}; - static const unsigned out_width = {out_width}; - static const unsigned n_chan = {n_chan}; - - static const unsigned pad_left = {pad_left}; - static const unsigned pad_right = {pad_right}; -}};\n""" - -zeropad2d_config_template = """struct config{index} : nnet::padding2d_config {{ - static const unsigned in_height = {in_height}; - static const unsigned in_width = {in_width}; - static const unsigned out_height = {out_height}; - static const unsigned out_width = {out_width}; - static const unsigned n_chan = {n_chan}; - - static const unsigned pad_top = {pad_top}; - static const unsigned pad_bottom = {pad_bottom}; - static const unsigned pad_left = {pad_left}; - static const unsigned pad_right = {pad_right}; -}};\n""" - -zeropad1d_function_template = 'nnet::zeropad1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output});' -zeropad2d_function_template = 'nnet::zeropad2d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output});' - -padding_include_list = ['nnet_utils/nnet_padding.h', 'nnet_utils/nnet_padding_stream.h'] - - -class ZeroPaddingConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__((ZeroPadding1D, ZeroPadding2D)) - self.templates = { - 'ZeroPadding1D': zeropad1d_config_template, - 'ZeroPadding2D': zeropad2d_config_template, - } - - def format(self, node): - params = self._default_config_params(node) - return self.templates[node.class_name].format(**params) - - -class ZeroPaddingFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__((ZeroPadding1D, ZeroPadding2D), include_header=padding_include_list) - self.templates = { - 'ZeroPadding1D': zeropad1d_function_template, - 'ZeroPadding2D': zeropad2d_function_template, - } - - def format(self, node): - params = self._default_function_params(node) - if node.get_attr('data_format') == 'channels_first': - raise Exception('Quartus only supports channels_last data format') - params['data_format'] = 'cl' - - return self.templates[node.class_name].format(**params) - - -# Resize templates - -resize_config_template = """struct config{index} : nnet::resize_config {{ - static const unsigned height = {in_height}; - static const unsigned width = {in_width}; - - static const unsigned new_height = {out_height}; - static const unsigned new_width = {out_width}; - - static const unsigned n_chan = {n_chan}; -}};\n""" - -resize_function_template = 'nnet::resize_{algorithm}<{input_t}, {config}>({input}, {output});' -resize_include_list = ['nnet_utils/nnet_resize.h', 'nnet_utils/nnet_resize_stream.h'] - - -class ResizeConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Resize) - self.template = resize_config_template - - def format(self, node): - params = self._default_config_params(node) - - return self.template.format(**params) - - -class ResizeFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(Resize, include_header=resize_include_list) - self.template = resize_function_template - - def format(self, node): - params = self._default_function_params(node) - if node.get_attr('algorithm') != 'nearest': - raise Exception('Currently only supporting resize_nearest') - params['algorithm'] = node.get_attr('algorithm') - - return self.template.format(**params) - - -# Transpose templates - -transpose_config_template = """struct config{index} : nnet::transpose_config {{ - static const unsigned depth = {depth}; - static const unsigned height = {height}; - static const unsigned width = {width}; - static constexpr unsigned perm[3] = {{{perm_str}}}; -}};\n""" - -transpose_function_template = 'nnet::transpose_{dim}<{input_t}, {output_t}, {config}>({input}, {output});' -transpose_include_list = ['nnet_utils/nnet_transpose.h', 'nnet_utils/nnet_transpose_stream.h'] - - -class TransposeConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Transpose) - self.template = transpose_config_template - - def format(self, node): - params = self._default_config_params(node) - - return self.template.format(**params) - - -class TransposeFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(Transpose, include_header=transpose_include_list) - self.template = transpose_function_template - - def format(self, node): - params = self._default_function_params(node) - params['dim'] = node.get_attr('dim') - - return self.template.format(**params) diff --git a/hls4ml/hls4ml/backends/quartus/passes/resource_strategy.py b/hls4ml/hls4ml/backends/quartus/passes/resource_strategy.py deleted file mode 100644 index 00fe890..0000000 --- a/hls4ml/hls4ml/backends/quartus/passes/resource_strategy.py +++ /dev/null @@ -1,77 +0,0 @@ -import numpy as np - -from hls4ml.model.layers import GRU, LSTM, Conv1D, Conv2D, Dense, SimpleRNN -from hls4ml.model.optimizer import OptimizerPass - - -class ApplyResourceStrategy(OptimizerPass): - '''Transposes the weights to use the dense_resource matrix multiply routine''' - - def match(self, node): - node_matches = isinstance(node, (Dense, Conv1D, Conv2D, GRU, LSTM, SimpleRNN)) - is_resource_strategy = ( - True # node.get_attr('strategy', '').lower() == 'resource' -> Quartus only supportr Resource strategy - ) - already_transformed = node.get_attr('_weights_transposed', False) is True - return node_matches and is_resource_strategy and not already_transformed - - def transform(self, model, node): - if isinstance(node, Dense) and not node.model.config.get_compression(node): - rf = node.get_attr('reuse_factor') - bf = int((node.attributes['n_in'] * node.attributes['n_out']) / rf) - bf_rounded = int(pow(2, np.ceil(np.log2(bf)))) - rf_rounded = int(pow(2, np.ceil(np.log2(rf)))) - - node.weights['weight'].data = np.transpose(node.weights['weight'].data).flatten() - - if node.attributes['n_in'] * node.attributes['n_out'] > 2048 and rf_rounded != rf: - node.set_attr('rfpad', rf_rounded - rf) - node.set_attr('bfpad', bf_rounded - bf) - - temp = np.empty([bf_rounded, rf_rounded]) - for i in range(rf_rounded): - for j in range(bf_rounded): - if i < rf and j < bf: - w_index = i + rf * j - temp[j][i] = node.weights['weight'].data[w_index] - else: - temp[j][i] = 0 - node.weights['weight'].data = temp.flatten() - node.weights['weight'].data_length = node.weights['weight'].data.size - - elif isinstance(node, Conv1D): - # (W,C,F) => (F,W,C) - # IMPORTANT - This format only works with im2col convolution - # - Future commits add new optimizers that further transpose THIS format to a format - # useful for Winograd's minimal filtering algorithm - node.weights['weight'].data = np.transpose(node.weights['weight'].data, axes=[2, 0, 1]) - - elif isinstance(node, Conv2D): - # (H,W,C,F) => (F,H,W,C) - # IMPORTANT - This format only works with im2col convolution - # - Future commits add new optimizers that further transpose THIS format to a format - # useful for Winograd's minimal filtering algorithm - node.weights['weight'].data = np.transpose(node.weights['weight'].data, axes=[3, 0, 1, 2]) - - elif isinstance(node, GRU): - node.weights['weight'].data = np.transpose(node.weights['weight'].data) - node.weights['recurrent_weight'].data = np.transpose(node.weights['recurrent_weight'].data) - - elif isinstance(node, SimpleRNN): - node.weights['weight'].data = np.transpose(node.weights['weight'].data) - node.weights['recurrent_weight'].data = np.transpose(node.weights['recurrent_weight'].data) - - elif isinstance(node, LSTM): - node.weights['weight'].data = np.transpose(node.weights['weight'].data) - node.weights['recurrent_weight'].data = np.transpose(node.weights['recurrent_weight'].data) - - for weight_type in ['i', 'f', 'c', 'o']: - node.weights[f'weight_{weight_type}'].data = np.transpose(node.weights[f'weight_{weight_type}'].data) - node.weights[f'recurrent_weight_{weight_type}'].data = np.transpose( - node.weights[f'recurrent_weight_{weight_type}'].data - ) - - else: - raise Exception(f'Unexpected layer {node.class_name} with resource strategy') - node.set_attr('_weights_transposed', True) - return False diff --git a/hls4ml/hls4ml/backends/quartus/passes/transform_types.py b/hls4ml/hls4ml/backends/quartus/passes/transform_types.py deleted file mode 100644 index 67de32a..0000000 --- a/hls4ml/hls4ml/backends/quartus/passes/transform_types.py +++ /dev/null @@ -1,54 +0,0 @@ -from hls4ml.backends.fpga.fpga_types import ( - ACTypeConverter, - HLSTypeConverter, - QuartusArrayVariableConverter, - QuartusInplaceArrayVariableConverter, - QuartusInplaceStreamVariableConverter, - QuartusStreamVariableConverter, - QuartusStructMemberVariableConverter, - StaticWeightVariableConverter, -) -from hls4ml.model.optimizer import GlobalOptimizerPass -from hls4ml.model.types import InplaceTensorVariable - - -class TransformTypes(GlobalOptimizerPass): - def __init__(self): - self.type_converter = HLSTypeConverter(precision_converter=ACTypeConverter()) - self.array_var_converter = QuartusArrayVariableConverter(type_converter=self.type_converter) - self.inplace_array_var_converter = QuartusInplaceArrayVariableConverter(type_converter=self.type_converter) - self.struct_var_converter = QuartusStructMemberVariableConverter(type_converter=self.type_converter) - self.stream_var_converter = QuartusStreamVariableConverter(type_converter=self.type_converter) - self.inplace_stream_var_converter = QuartusInplaceStreamVariableConverter(type_converter=self.type_converter) - self.weight_var_converter = StaticWeightVariableConverter(type_converter=self.type_converter) - - def transform(self, model, node): - io_type = node.model.config.get_config_value('IOType') - - for out_name, var in node.variables.items(): - if io_type == 'io_stream': - if isinstance(var, InplaceTensorVariable): - new_var = self.inplace_stream_var_converter.convert(var) - else: - new_var = self.stream_var_converter.convert(var) - elif io_type == 'io_parallel': - if out_name in node.model.inputs: - new_var = self.struct_var_converter.convert(var, pragma='hls_register', struct_name='inputs') - elif out_name in node.model.outputs: - new_var = self.struct_var_converter.convert(var, pragma='hls_register', struct_name='outputs') - elif isinstance(var, InplaceTensorVariable): - new_var = self.inplace_array_var_converter.convert(var, pragma='') - else: - new_var = self.array_var_converter.convert(var, pragma='hls_register') - else: - raise Exception(f'Unknown IOType {io_type} in {node.name} ({node.class_name})') - - node.set_attr(out_name, new_var) - - for w_name, weight in node.weights.items(): - new_weight = self.weight_var_converter.convert(weight) - node.set_attr(w_name, new_weight) - - for t_name, type in node.types.items(): - new_type = self.type_converter.convert(type) - node.set_attr(t_name, new_type) diff --git a/hls4ml/hls4ml/backends/quartus/quartus_backend.py b/hls4ml/hls4ml/backends/quartus/quartus_backend.py deleted file mode 100644 index 382cd40..0000000 --- a/hls4ml/hls4ml/backends/quartus/quartus_backend.py +++ /dev/null @@ -1,362 +0,0 @@ -import os -from contextlib import contextmanager - -import numpy as np - -from hls4ml.backends import FPGABackend -from hls4ml.model.attributes import ConfigurableAttribute, TypeAttribute -from hls4ml.model.flow import register_flow -from hls4ml.model.layers import GRU, LSTM, Activation, Conv1D, Conv2D, Dense, Embedding, Layer, SimpleRNN, Softmax -from hls4ml.model.optimizer import get_backend_passes, layer_optimizer -from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType -from hls4ml.report import parse_quartus_report - - -@contextmanager -def chdir(newdir): - prevdir = os.getcwd() - os.chdir(os.path.expanduser(newdir)) - try: - yield - finally: - os.chdir(prevdir) - - -class QuartusBackend(FPGABackend): - def __init__(self): - super().__init__('Quartus') - self._register_layer_attributes() - self._register_flows() - - def _register_layer_attributes(self): - # Add RNN-specific recurrent_reuse_factor attribute - rnn_layers = [ - SimpleRNN, - LSTM, - GRU, - ] - - for layer in rnn_layers: - attrs = self.attribute_map.get(layer, []) - attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1)) - attrs.append(ConfigurableAttribute('table_size', default=1024)) - attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8))) - self.attribute_map[layer] = attrs - - def _register_flows(self): - initializers = self._get_layer_initializers() - init_flow = register_flow('init_layers', initializers, requires=['optimize'], backend=self.name) - - streaming_passes = ['quartus:reshape_stream', 'quartus:clone_output'] - streaming_flow = register_flow('streaming', streaming_passes, requires=[init_flow], backend=self.name) - - quartus_types = [ - 'quartus:transform_types', - 'quartus:register_bram_weights', - 'quartus:apply_resource_strategy', - 'quartus:apply_winograd_kernel_transformation', - ] - quartus_types_flow = register_flow('specific_types', quartus_types, requires=[init_flow], backend=self.name) - - quantization_passes = [ - 'quartus:merge_batch_norm_quantized_tanh', - 'quartus:quantize_dense_output', - 'fuse_consecutive_batch_normalization', - 'quartus:xnor_pooling', - ] - quantization_flow = register_flow('quantization', quantization_passes, requires=[init_flow], backend=self.name) - - optimization_passes = [ - 'quartus:remove_final_reshape', - 'quartus:optimize_pointwise_conv', - 'quartus:inplace_parallel_reshape', - 'quartus:inplace_stream_flatten', - 'quartus:skip_softmax', - ] - optimization_flow = register_flow('optimize', optimization_passes, requires=[init_flow], backend=self.name) - - templates = self._get_layer_templates() - template_flow = register_flow('apply_templates', self._get_layer_templates, requires=[init_flow], backend=self.name) - - writer_passes = ['make_stamp', 'quartus:write_hls'] - - self._writer_flow = register_flow('write', writer_passes, requires=['quartus:ip'], backend=self.name) - - all_passes = get_backend_passes(self.name) - - extras = [ - # Ideally this should be empty - opt_pass - for opt_pass in all_passes - if opt_pass - not in initializers - + streaming_passes - + quartus_types - + quantization_passes - + templates - + optimization_passes - + writer_passes - ] - - if len(extras) > 0: - extras_flow = register_flow('extras', extras, requires=[init_flow], backend=self.name) - else: - extras_flow = None - - ip_flow_requirements = [ - 'optimize', - init_flow, - streaming_flow, - quantization_flow, - optimization_flow, - quartus_types_flow, - extras_flow, - template_flow, - ] - ip_flow_requirements = list(filter(None, ip_flow_requirements)) - - self._default_flow = register_flow('ip', None, requires=ip_flow_requirements, backend=self.name) - - def get_default_flow(self): - return self._default_flow - - def get_writer_flow(self): - return self._writer_flow - - def create_initial_config(self, part='Arria10', clock_period=5, io_type='io_parallel'): - config = {} - - config['Part'] = part if part is not None else 'Arria10' - config['ClockPeriod'] = clock_period - config['IOType'] = io_type - config['HLSConfig'] = {} - - return config - - def build(self, model, synth=True, fpgasynth=False, log_level=1, cont_if_large_area=False): - """ - Builds the project using Intel HLS compiler. - - Args: - model (ModelGraph): The model to build - synth, optional: Whether to run HLS synthesis - fpgasynth, optional: Whether to run FPGA synthesis (Quartus Compile) - log_level, optional: Logging level to be displayed during HLS synthesis (0, 1, 2) - cont_if_large_area: Instruct the HLS compiler to continue synthesis if the estimated resource usage exceeds - device resources - Errors raise exceptions - """ - - # Check software needed is present - found = os.system('command -v i++ > /dev/null') - if found != 0: - raise Exception('Intel HLS installation not found. Make sure "i++" is on PATH.') - - if fpgasynth: - if fpgasynth and not synth: - raise Exception('HLS Synthesis needs to be run before FPGA synthesis') - found = os.system('command -v quartus_sh > /dev/null') - if found != 0: - raise Exception('Quartus installation not found. Make sure "quartus_sh" is on PATH.') - - with chdir(model.config.get_output_dir()): - if synth: - quartus_compile = 'QUARTUS_COMPILE=--quartus-compile' if fpgasynth else '' - cont_synth = 'CONT_IF_LARGE_AREA=--dont-error-if-large-area-est' if cont_if_large_area else '' - log_1 = 'LOGGING_1=-v ' if log_level >= 1 else '' - log_2 = 'LOGGING_2=-v ' if log_level >= 2 else '' - os.system(f'make {model.config.get_project_name()}-fpga {log_1} {log_2} {cont_synth} {quartus_compile}') - - # If running i++ through a container, such a singularity, this command will throw an exception, because the - # host OS doesn't have access to HLS simulation tools. To avoid the exception, shell into the container - # (e.g. singularity shell ....) and then execute the following command manually - # This command simply tests the IP using a simulation tool and obtains the latency and initiation interval - os.system(f'./{model.config.get_project_name()}-fpga') - - return parse_quartus_report(model.config.get_output_dir()) - - @layer_optimizer(Layer) - def init_base_layer(self, layer): - reuse_factor = layer.model.config.get_reuse_factor(layer) - layer.set_attr('reuse_factor', reuse_factor) - - target_cycles = layer.model.config.get_target_cycles(layer) - layer.set_attr('target_cycles', target_cycles) - - @layer_optimizer(Dense) - def init_dense(self, layer): - index_t = IntegerPrecisionType(width=1, signed=False) - - layer.set_attr('rfpad', 0) - layer.set_attr('bfpad', 0) - - if layer.model.config.get_compression(layer): - layer.set_attr('strategy', 'compressed') - else: - n_in, n_out = self.get_layer_mult_size(layer) - self.set_closest_reuse_factor(layer, n_in, n_out) - layer.set_attr('strategy', 'resource') - - if layer.model.config.is_resource_strategy(layer): - if layer.model.config.get_compression(layer): - index_t = layer.get_weights('weight').type.index_precision - - layer.set_attr('index_t', NamedType(f'layer{layer.index}_index', index_t)) - - @layer_optimizer(Activation) - def init_activation(self, layer): - if layer.get_attr('activation') == 'tanh': - layer.set_attr('activation', 'dense_tanh') - if layer.get_attr('recurrent_activation') == 'tanh': - layer.set_attr('recurrent_activation', 'dense_tanh') - - @layer_optimizer(Softmax) - def init_softmax(self, layer): - if layer.model.config.get_config_value('IOType') == 'io_parallel': - assert ( - len(layer.get_input_variable().shape) == 1 - ), 'Softmax with io_parallel strategy cannot be used on multidimensional tensors.' - - @layer_optimizer(Embedding) - def init_embed(self, layer): - if layer.attributes['n_in'] is None: - raise Exception('Input length of Embedding layer must be specified.') - - @layer_optimizer(GRU) - def init_gru(self, layer): - reuse_factor = layer.model.config.get_reuse_factor(layer) - layer.set_attr('recurrent_reuse_factor', reuse_factor) - - # Dense multiplication properties - layer.set_attr('rfpad', 0) - layer.set_attr('bfpad', 0) - - index_t = IntegerPrecisionType(width=1, signed=False) - layer.set_attr('index_t', index_t) - - if 'table_t' not in layer.attributes: - layer.set_attr( - 'table_t', NamedType(name=layer.name + '_table_t', precision=FixedPrecisionType(width=18, integer=8)) - ) - if 'table_size' not in layer.attributes: - layer.set_attr('table_size', 1024) - if True: # layer.model.config.is_resource_strategy(layer): ... Quartus only supports Dense resource multiplication - n_in, n_out, n_in_recr, n_out_recr = self.get_layer_mult_size(layer) - self.set_closest_reuse_factor(layer, n_in, n_out) - self.set_closest_reuse_factor(layer, n_in_recr, n_out_recr, attribute='recurrent_reuse_factor') - layer.set_attr('strategy', 'resource') - - layer.set_attr('index_t', index_t) - - @layer_optimizer(Conv1D) - def init_conv1d(self, layer): - # This can happen if we assign weights of Dense layer to 1x1 Conv1D - if len(layer.weights['weight'].data.shape) == 2: - layer.weights['weight'].data = np.expand_dims(layer.weights['weight'].data, axis=(0, 1)) - - # Dense matrix multiply properties - layer.set_attr('rfpad', 0) - layer.set_attr('bfpad', 0) - - # Reuse and parallelization factors - layer.set_attr('strategy', 'resource') - n_in, n_out = self.get_layer_mult_size(layer) - self.set_target_reuse_factor(layer) - self.set_closest_reuse_factor(layer, n_in, n_out) - layer.set_attr('parallelization', layer.model.config.get_layer_config_value(layer, 'ParallelizationFactor', 1)) - - # impl_filt_width determines the filter size post-Winograd transformation - layer.set_attr('impl_filt_width', layer.get_attr('filt_width')) - - # Implementation: - # - combination - at compile-time, the decision between Winograd and im2col is made - # - im2col - specifically use im2col - # - Winograd - use Winograd, if possible - layer.set_attr('implementation', layer.model.config.get_layer_config_value(layer, 'Implementation', 'combination')) - - layer.set_attr( - 'n_partitions', 1 - ) # TODO Not used yet as there is no codegen implementation of CNNs for Quartus backend - - @layer_optimizer(Conv2D) - def init_conv2d(self, layer): - # This can happen if we assign weights of Dense layer to 1x1 Conv2D - if len(layer.weights['weight'].data.shape) == 2: - layer.weights['weight'].data = np.expand_dims(layer.weights['weight'].data, axis=(0, 1)) - - # Dense matrix multiply properties - layer.set_attr('rfpad', 0) - layer.set_attr('bfpad', 0) - - # Reuse and parallelization factors - layer.set_attr('strategy', 'resource') - n_in, n_out = self.get_layer_mult_size(layer) - self.set_target_reuse_factor(layer) - self.set_closest_reuse_factor(layer, n_in, n_out) - layer.set_attr('parallelization', layer.model.config.get_layer_config_value(layer, 'ParallelizationFactor', 1)) - - # impl_filt_width & impl_filt_height determine the filter size post-Winograd transformation - layer.set_attr('impl_filt_height', layer.get_attr('filt_height')) - layer.set_attr('impl_filt_width', layer.get_attr('filt_width')) - - # Implementation: - # - combination - at compile-time, the decision between Winograd and im2col is made - # - im2col - specifically use im2col - # - Winograd - use Winograd, if possible - layer.set_attr('implementation', layer.model.config.get_layer_config_value(layer, 'Implementation', 'combination')) - - layer.set_attr( - 'n_partitions', 1 - ) # TODO Not used yet as there is no codegen implementation of CNNs for Quartus backend - - @layer_optimizer(LSTM) - def init_lstm(self, layer): - reuse_factor = layer.model.config.get_reuse_factor(layer) - layer.set_attr('recurrent_reuse_factor', reuse_factor) - - # We don't use RF yet - if True: # layer.model.config.is_resource_strategy(layer): ... Quartus only supports Dense resource multiplication - n_in, n_out, n_in_recr, n_out_recr = self.get_layer_mult_size(layer) - self.set_closest_reuse_factor(layer, n_in, n_out) - self.set_closest_reuse_factor(layer, n_in_recr, n_out_recr, attribute='recurrent_reuse_factor') - layer.set_attr('strategy', 'resource') - - # Split weights for easier storage in on-chip memory and implementation in HLS - weights_data = layer.weights['weight'].data - rec_weights_data = layer.weights['recurrent_weight'].data - bias_data = layer.weights['bias'].data - - weight_types = ['i', 'f', 'c', 'o'] - for i in range(0, 4): - layer.add_weights_variable( - name=f'weight_{weight_types[i]}', - var_name=f'kernel_{weight_types[i]}_{{index}}', - data=weights_data[ - 0 : layer.get_attr('n_in'), i * layer.get_attr('n_out') : (i + 1) * layer.get_attr('n_out') - ], - quantizer=layer.get_attr('weight_quantizer'), - compression=None, - ) - layer.add_weights_variable( - name=f'recurrent_weight_{weight_types[i]}', - var_name=f'recurrent_kernel_{weight_types[i]}_{{index}}', - data=rec_weights_data[ - 0 : layer.get_attr('n_out'), i * layer.get_attr('n_out') : (i + 1) * layer.get_attr('n_out') - ], - quantizer=layer.get_attr('weight_quantizer'), - compression=None, - ) - layer.add_weights_variable( - name=f'bias_{weight_types[i]}', - var_name=f'bias_{weight_types[i]}_{{index}}', - data=bias_data[i * layer.get_attr('n_out') : (i + 1) * (layer.get_attr('n_out'))], - quantizer=layer.get_attr('weight_quantizer'), - compression=None, - ) - - @layer_optimizer(SimpleRNN) - def init_simple_rnn(self, layer): - reuse_factor = layer.model.config.get_reuse_factor(layer) - layer.set_attr('recurrent_reuse_factor', reuse_factor) - - # TODO - Consider setting and using RF diff --git a/hls4ml/hls4ml/backends/symbolic/__init__.py b/hls4ml/hls4ml/backends/symbolic/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/backends/symbolic/passes/__init__.py b/hls4ml/hls4ml/backends/symbolic/passes/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/backends/symbolic/passes/expr_templates.py b/hls4ml/hls4ml/backends/symbolic/passes/expr_templates.py deleted file mode 100644 index 0e8eac4..0000000 --- a/hls4ml/hls4ml/backends/symbolic/passes/expr_templates.py +++ /dev/null @@ -1,172 +0,0 @@ -import re - -from sympy.core import S -from sympy.core.numbers import Integer -from sympy.printing.cxx import CXX11CodePrinter - -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import SymbolicExpression - -# Expression templates - -expr_function_template = 'y[{y_index}] = {expr_str};' - -expr_include_list = ['hls_math.h', 'nnet_utils/nnet_math.h'] - -built_in_luts = ['sin_lut', 'cos_lut'] - - -class HLSCodePrinter(CXX11CodePrinter): - _ns = 'hls::' - - def __init__(self, layer, lut_functions, use_built_in_luts=False, settings=None): - if lut_functions is not None: - if use_built_in_luts: - # Check if user's LUTs override built-in LUTs - for lut_name in lut_functions.keys(): - if lut_name in built_in_luts: - print(f'WARNING: User-specified LUT function {lut_name} overrides built-in LUT function.') - - if settings is None: - settings = {'user_functions': lut_functions} - else: - user_functions = settings.get('user_functions', {}) - user_functions.update(lut_functions) - settings['user_functions'] = user_functions - - super().__init__(settings) - self.layer = layer - self.use_built_in_luts = use_built_in_luts - - for k in ( - 'Abs Sqrt exp exp2 expm1 log log10 log2 log1p Cbrt hypot fma' - ' loggamma sin cos tan asin acos atan atan2 sinh cosh tanh asinh acosh ' - 'atanh erf erfc loggamma gamma ceiling floor' - ).split(): - setattr(HLSCodePrinter, '_print_%s' % k, HLSCodePrinter._print_math) - - def _symbol_to_array(self, name): - return re.sub(r'([a-zA-Z]+)(\d+)', r'\1[\2]', name) - - def _wrap_with_type_name(self, expr_str): - type_name = self.layer.types['result_t'].name - return f'{type_name}({expr_str})' - - def _print_Integer(self, expr): - int_str = super()._print_Integer(expr) - return self._wrap_with_type_name(int_str) - - def _print_Float(self, flt): - float_str = super()._print_Float(flt) - return self._wrap_with_type_name(float_str) - - def _print_Rational(self, expr): - p, q = int(expr.p), int(expr.q) - p_q_str = f'{p}.0/{q}.0' - return self._wrap_with_type_name(p_q_str) - - def _print_Pow(self, expr): - type_name = self.layer.types['result_t'].name - type_precision = self.layer.types['result_t'].precision - if isinstance(expr.exp, Integer): - l_brac, r_brac = ('(', ')') if len(expr.base.args) > 1 else ('', '') - if expr.exp > 1: - return ( - '(' - + '*'.join([l_brac + self._symbol_to_array(self._print(expr.base)) + r_brac for _ in range(expr.exp)]) - + ')' - ) - elif expr.exp == -1: # 1/x - base = l_brac + self._symbol_to_array(self._print(expr.base)) + r_brac - return f'hls::recip<{type_precision.width}, {type_precision.integer}>(({type_name}){base})' - else: - return super()._print_Pow(expr) - else: - base = self._print(expr.base) - if expr.exp == 0.5: - return f'{self._ns}sqrt<{type_precision.width}, {type_precision.integer}>(({type_name})({base}))' - elif expr.exp == S.One / 3: - return f'{self._ns}cbrt<{type_precision.width}, {type_precision.integer}>(({type_name})({base}))' - else: - exp = self._print(expr.exp) - return f'{self._ns}pow<{type_precision.width}, {type_precision.integer}>(({type_name})({base}), {exp})' - - def _print_math(self, expr): - name = self.known_functions[expr.__class__.__name__] - if not isinstance(name, str): - for cb, fname in name: - if cb(*expr.args): - name = fname - break - else: - raise ValueError("No matching printer") - - # Setting precision of math functions required some rethinking - # Doing e.g., hls::pow(x, y) passes C sim, but fails synthesis, need to use hls::pow<16,6>(x,y) - type_name = self.layer.types['result_t'].name - type_precision = self.layer.types['result_t'].precision - template = f'<{type_precision.width}, {type_precision.integer}>' - cast = f'({type_name})' - args = ', '.join(map(lambda arg: self._print(arg), expr.args)) - - if self.use_built_in_luts and name + '_lut' in built_in_luts: - ns = 'nnet::' - name = name + '_lut' - template = f'<{type_name}>' - else: - ns = self._ns - - return f'{ns}{name}{template}({cast}({args}))' - - def _print_Symbol(self, expr): - name = super()._print_Symbol(expr) - return self._symbol_to_array(name) - - -class ExpressionFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(SymbolicExpression, include_header=expr_include_list) - self.template = expr_function_template - - def format(self, node): - params = self._default_function_params(node) - - lut_functions = {lut_fun.name: lut_fun.name for lut_fun in params['lut_functions']} - printer = HLSCodePrinter(node, lut_functions=lut_functions, use_built_in_luts=node.attributes['use_built_in_luts']) - - fn_templates = [] - for i, expr in enumerate(node.attributes['expression']): - params['expr_str'] = printer.doprint(expr) - params['y_index'] = str(i) - fn_templates.append(self.template.format(**params)) - - return fn_templates - - -class ExpressionConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(SymbolicExpression) - - def format(self, node): - params = self._default_config_params(node) - - lut_defs = [] - for lut_fun in params['lut_functions']: - type_name = params['result_t'].name - if lut_fun.math_func in ['sinpi', 'cospi', 'sin', 'cos', 'asin', 'acos', 'atan', 'atan2']: - # We have return type overrides for these functions - namespace = 'nnet::' - else: - namespace = 'hls::' - lut_def = ( - f'nnet::lookup_table<{type_name}, ' - f'{lut_fun.table_size}, ' - f'{namespace}' - f'{lut_fun.math_func}> ' - f'{lut_fun.name}' - f'({lut_fun.range_start}, ' - f'{lut_fun.range_end});' - ) - lut_defs.append(lut_def) - - return '\n'.join(lut_defs) diff --git a/hls4ml/hls4ml/backends/symbolic/passes/validate_lut.py b/hls4ml/hls4ml/backends/symbolic/passes/validate_lut.py deleted file mode 100644 index 0288b72..0000000 --- a/hls4ml/hls4ml/backends/symbolic/passes/validate_lut.py +++ /dev/null @@ -1,37 +0,0 @@ -from hls4ml.model.layers import SymbolicExpression -from hls4ml.model.optimizer import ConfigurableOptimizerPass - - -class ValidateUserLookupTable(ConfigurableOptimizerPass): - '''Validates the precision of user-defined LUTs is adequate''' - - def __init__(self): - self.raise_exception = False - - def match(self, node): - return isinstance(node, SymbolicExpression) and len(node.get_attr('lut_functions', [])) > 0 - - def transform(self, model, node): - precision = node.get_output_variable().type.precision - range = 2 ** (precision.integer - precision.signed) - frac_step = 1 / 2**precision.fractional - - for lut_fn in node.get_attr('lut_functions'): - lut_range = lut_fn.range_end - lut_fn.range_start - lut_step = lut_range / lut_fn.table_size - - if lut_step < frac_step: - msg = f'LUT function {lut_fn.name} requires more fractional bits.' - if self.raise_exception: - raise Exception(msg) - else: - print('WARNING:', msg) - - if lut_range > range: - msg = f'LUT function {lut_fn.name} requires more integer bits.' - if self.raise_exception: - raise Exception(msg) - else: - print('WARNING:', msg) - - return False diff --git a/hls4ml/hls4ml/backends/symbolic/symbolic_backend.py b/hls4ml/hls4ml/backends/symbolic/symbolic_backend.py deleted file mode 100644 index e7201d0..0000000 --- a/hls4ml/hls4ml/backends/symbolic/symbolic_backend.py +++ /dev/null @@ -1,104 +0,0 @@ -import os -import sys - -from hls4ml.backends import FPGABackend -from hls4ml.model.flow import register_flow -from hls4ml.report import parse_vivado_report - - -class SymbolicExpressionBackend(FPGABackend): - def __init__(self): - super().__init__('SymbolicExpression') - self._register_flows() - - def _register_flows(self): - vivado_types = [ - 'vivado:transform_types', - ] - vivado_types_flow = register_flow('specific_types', vivado_types, requires=None, backend=self.name) - - validation_passes = [ - 'symbolicexpression:validate_user_lookup_table', - ] - validation_flow = register_flow('validation', validation_passes, requires=None, backend=self.name) - - template_flow = register_flow('apply_templates', self._get_layer_templates, requires=None, backend=self.name) - - writer_passes = ['make_stamp', 'symbolicexpression:write_hls'] - self._writer_flow = register_flow('write', writer_passes, requires=['vivado:ip'], backend=self.name) - - ip_flow_requirements = [vivado_types_flow, validation_flow, template_flow] - ip_flow_requirements = list(filter(None, ip_flow_requirements)) - - self._default_flow = register_flow('ip', None, requires=ip_flow_requirements, backend=self.name) - - def get_default_flow(self): - return self._default_flow - - def get_writer_flow(self): - return self._writer_flow - - def create_initial_config( - self, - part='xcvu9p-flga2577-2-e', - clock_period=5, - io_type='io_parallel', - compiler='vivado_hls', - hls_include_path=None, - hls_libs_path=None, - ): - config = {} - - config['Part'] = part if part is not None else 'xcvu9p-flga2577-2-e' - config['ClockPeriod'] = clock_period - config['IOType'] = io_type - config['Compiler'] = compiler if compiler is not None else 'vivado_hls' - if not all([hls_include_path, hls_libs_path]): - # Try to infer the include path from Vivado path - bin_path = os.popen(f'command -v {compiler}').read().strip() - if hls_include_path is None: - hls_include_path = bin_path.replace(f'/bin/{compiler}', '/include') - if not os.path.exists(hls_include_path + '/hls_math.h'): - raise Exception( - 'Vivado HLS header files not found. Make sure you pass the proper path ' - 'to the "include" directory (for example "/opt/Xilinx/Vivado/2020.1/include").' - ) - elif hls_include_path == '': - print( - 'No HLS include path provided, using HLS math functions from Python (i.e., predict()) will not work. ' - 'Consider using only LUT approximations.' - ) - if hls_libs_path is None: - hls_libs_path = bin_path.replace(f'/bin/{compiler}', '/lnx64') - if not os.path.exists(hls_libs_path + '/lib/csim/libhlsmc++-GCC46.so'): - raise Exception( - 'Vivado HLS libraries not found. Make sure you pass the proper path ' - 'to the "lnx64" directory (for example "/opt/Xilinx/Vivado/2020.1/lnx64").' - ) - config['HLSIncludePath'] = hls_include_path - config['HLSLibsPath'] = hls_libs_path - config['HLSConfig'] = {} - - return config - - def build(self, model, reset=False, csim=True, synth=True, cosim=False, validation=False, export=False, vsynth=False): - if 'linux' in sys.platform: - found = os.system('command -v vivado_hls > /dev/null') - if found != 0: - raise Exception('Vivado HLS installation not found. Make sure "vivado_hls" is on PATH.') - - curr_dir = os.getcwd() - os.chdir(model.config.get_output_dir()) - vivado_cmd = ( - f'vivado_hls -f build_prj.tcl "reset={reset} ' - f'csim={csim} ' - f'synth={synth} ' - f'cosim={cosim} ' - f'validation={validation} ' - f'export={export} ' - f'vsynth={vsynth}"' - ) - os.system(vivado_cmd) - os.chdir(curr_dir) - - return parse_vivado_report(model.config.get_output_dir()) diff --git a/hls4ml/hls4ml/backends/template.py b/hls4ml/hls4ml/backends/template.py deleted file mode 100644 index 9638b53..0000000 --- a/hls4ml/hls4ml/backends/template.py +++ /dev/null @@ -1,80 +0,0 @@ -from hls4ml.model.optimizer.optimizer import OptimizerPass - - -class Template(OptimizerPass): - def __init__(self, name, layer_class, attribute_name): - self.name = name - self.layer_class = layer_class - if not isinstance(self.layer_class, (list, tuple, set)): - self.layer_class = [self.layer_class] - self.attribute_name = attribute_name - - def match(self, node): - for layer_cls in self.layer_class: - if node.class_name == layer_cls.__name__: - return True - return False - - def transform(self, model, node): - formatted_template = self.format(node) - node.set_attr(self.attribute_name, formatted_template) - return False - - def format(self, node): - raise NotImplementedError - - def get_name(self): - return self.name - - def _default_params(self, node): - params = {} - params.update(node.attributes) - # Convert all bool attributes to lowercase strings - params = {key: str(val).lower() if isinstance(val, bool) else val for key, val in params.items()} - - return params - - -class LayerConfigTemplate(Template): - def __init__(self, layer_class): - if isinstance(layer_class, (list, tuple, set)): - name = '_'.join([cls.__name__.lower() for cls in layer_class]) - else: - name = layer_class.__name__.lower() - name += '_config_template' - super().__init__(name, layer_class, 'config_cpp') - - def _default_config_params(self, layer): - params = self._default_params(layer) - params['iotype'] = layer.model.config.get_config_value('IOType') - params['reuse'] = layer.get_attr('reuse_factor') - - return params - - -class FunctionCallTemplate(Template): - def __init__(self, layer_class, include_header=None): - if isinstance(layer_class, (list, tuple, set)): - name = '_'.join([cls.__name__.lower() for cls in layer_class]) - else: - name = layer_class.__name__.lower() - name += '_function_template' - super().__init__(name, layer_class, 'function_cpp') - if include_header is None: - self.include_header = () - else: - self.include_header = include_header - - def _default_function_params(self, layer): - params = self._default_params(layer) - params['config'] = f'config{layer.index}' - params['input_t'] = layer.get_input_variable().type.name - params['output_t'] = layer.get_output_variable().type.name - params['input'] = layer.get_input_variable().name - params['output'] = layer.get_output_variable().name - - return params - - def transform(self, model, node): - node.set_attr('include_header', self.include_header) - return super().transform(model, node) diff --git a/hls4ml/hls4ml/backends/vitis/__init__.py b/hls4ml/hls4ml/backends/vitis/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/backends/vitis/passes/__init__.py b/hls4ml/hls4ml/backends/vitis/passes/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/backends/vitis/passes/feature_check.py b/hls4ml/hls4ml/backends/vitis/passes/feature_check.py deleted file mode 100644 index d7f9c2a..0000000 --- a/hls4ml/hls4ml/backends/vitis/passes/feature_check.py +++ /dev/null @@ -1,34 +0,0 @@ -from hls4ml.model.optimizer import OptimizerPass - - -class ValidateConvImplementation(OptimizerPass): - def match(self, node): - return 'Conv' in node.class_name - - def transform(self, model, node): - if node.get_attr('implementation', 'linebuffer') == 'encoded': - print( - f'WARNING: "Encoded" implementation in "{node.name}" ({node.class_name}) is not supported in Vitis backend. ' - 'Switching to "LineBuffer" implementation.' - ) - node.set_attr('implementation', 'linebuffer') - - -class ValidateStrategy(OptimizerPass): - _resource_layer_cls = ['Conv1D', 'Conv2D', 'Dense'] - - def match(self, node): - is_resource_layer = len([layer_cls for layer_cls in self._resource_layer_cls if layer_cls in node.class_name]) > 0 - is_resource_strategy = node.model.config.is_resource_strategy(node) - - return is_resource_layer and is_resource_strategy - - def transform(self, model, node): - n_in, _ = model.config.backend.get_layer_mult_size(node) - rf = node.get_attr('reuse_factor') - if rf > n_in and rf % n_in > 0: - print( - f'WARNING: "Resource" strategy in "{node.name}" ({node.class_name}) may have suboptimal QoR in Vitis ' - 'backend due to use of "urem" cores.\n' - 'Consider using a different ReuseFactor or switching to "Latency" strategy.' - ) diff --git a/hls4ml/hls4ml/backends/vitis/vitis_backend.py b/hls4ml/hls4ml/backends/vitis/vitis_backend.py deleted file mode 100644 index 20dd519..0000000 --- a/hls4ml/hls4ml/backends/vitis/vitis_backend.py +++ /dev/null @@ -1,52 +0,0 @@ -import os -import sys - -from hls4ml.backends import VivadoBackend -from hls4ml.model.flow import get_flow, register_flow -from hls4ml.report import parse_vivado_report - - -class VitisBackend(VivadoBackend): - def __init__(self): - super(VivadoBackend, self).__init__(name='Vitis') - self._register_layer_attributes() - self._register_flows() - - def _register_flows(self): - validation_passes = [ - 'vitis:validate_conv_implementation', - 'vitis:validate_strategy', - ] - validation_flow = register_flow('validation', validation_passes, requires=['vivado:init_layers'], backend=self.name) - - # Any potential templates registered specifically for Vitis backend - template_flow = register_flow( - 'apply_templates', self._get_layer_templates, requires=['vivado:init_layers'], backend=self.name - ) - - writer_passes = ['make_stamp', 'vitis:write_hls'] - self._writer_flow = register_flow('write', writer_passes, requires=['vitis:ip'], backend=self.name) - - ip_flow_requirements = get_flow('vivado:ip').requires.copy() - ip_flow_requirements.insert(ip_flow_requirements.index('vivado:init_layers'), validation_flow) - ip_flow_requirements.insert(ip_flow_requirements.index('vivado:apply_templates'), template_flow) - - self._default_flow = register_flow('ip', None, requires=ip_flow_requirements, backend=self.name) - - def build(self, model, reset=False, csim=True, synth=True, cosim=False, validation=False, export=False, vsynth=False): - if 'linux' in sys.platform: - found = os.system('command -v vitis_hls > /dev/null') - if found != 0: - raise Exception('Vitis HLS installation not found. Make sure "vitis_hls" is on PATH.') - - curr_dir = os.getcwd() - os.chdir(model.config.get_output_dir()) - os.system( - ( - 'vitis_hls -f build_prj.tcl "reset={reset} csim={csim} synth={synth} cosim={cosim} ' - 'validation={validation} export={export} vsynth={vsynth}"' - ).format(reset=reset, csim=csim, synth=synth, cosim=cosim, validation=validation, export=export, vsynth=vsynth) - ) - os.chdir(curr_dir) - - return parse_vivado_report(model.config.get_output_dir()) diff --git a/hls4ml/hls4ml/backends/vivado/__init__.py b/hls4ml/hls4ml/backends/vivado/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/backends/vivado/passes/__init__.py b/hls4ml/hls4ml/backends/vivado/passes/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/backends/vivado/passes/broadcast_stream.py b/hls4ml/hls4ml/backends/vivado/passes/broadcast_stream.py deleted file mode 100644 index ec6322c..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/broadcast_stream.py +++ /dev/null @@ -1,117 +0,0 @@ -import numpy as np - -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import Concatenate, Layer, Merge, register_layer -from hls4ml.model.optimizer import OptimizerPass - - -class Broadcast(Layer): - '''Inserted between layers for broadcasting.''' - - def initialize(self): - shape = self.attributes['target_shape'] - if shape[0] is None: - shape = shape[1:] - dims = [f'N_SIZE_{i}_{self.index}' for i in range(1, len(shape) + 1)] - self.add_output_variable(shape, dims) - - -broadcast_function_template = 'nnet::broadcast_stream<{input_t}, {output_t}, {config}>({input}, {output});' -broadcast_config_template = """struct config{index} : nnet::broadcast_config {{ - static const unsigned in_width = {in_width}; - static const unsigned in_height = {in_height}; - static const unsigned in_chan = {in_chan}; - static const unsigned out_width = {out_width}; - static const unsigned out_height = {out_height}; - static const unsigned out_chan = {out_chan}; -}};\n""" -broadcast_include_list = ['nnet_utils/nnet_stream.h'] - - -class BroadcastConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Broadcast) - self.template = broadcast_config_template - - def format(self, node): - params = self._default_config_params(node) - params['in_height'] = node.get_input_variable().shape[0] - params['in_width'] = node.get_input_variable().shape[1] - params['in_chan'] = node.get_input_variable().shape[2] - params['out_height'] = node.get_output_variable().shape[0] - params['out_width'] = node.get_output_variable().shape[1] - params['out_chan'] = node.get_output_variable().shape[2] - - return self.template.format(**params) - - -class BroadcastFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(Broadcast, include_header=broadcast_include_list) - self.template = broadcast_function_template - - def format(self, node): - params = self._default_function_params(node) - return self.template.format(**params) - - -def register_broadcast_stream(backend): - # Register the layer types to the layer map - register_layer('Broadcast', Broadcast) - - # Register the optimization passes - backend.register_pass('broadcast_stream', BroadcastStream) - - # Register template passes - backend.register_template(BroadcastConfigTemplate) - backend.register_template(BroadcastFunctionTemplate) - - -class BroadcastStream(OptimizerPass): - def match(self, node): - if isinstance(node, Merge) and not isinstance(node, Concatenate): - inp1 = node.get_input_variable(node.inputs[0]) - inp2 = node.get_input_variable(node.inputs[1]) - return inp1.shape != inp2.shape - else: - return False - - def transform(self, model, node): - if model.config.backend.name not in ['Vivado'] or model.config.get_config_value('IOType') != 'io_stream': - return False - - inp = [node.get_input_variable(inp_name) for inp_name in node.inputs] - - if np.prod(inp[0].shape) > np.prod(inp[1].shape): - idx = 1 - attrs = {'target_shape': inp[0].shape} - else: - idx = 0 - attrs = {'target_shape': inp[1].shape} - - def supported_broadcast(inp_shape, target_shape): - # Must be (H, W, C) - if not len(inp_shape) == 3: - return False - # Supported: (1, 1, C) -> (H, W, C) - if inp_shape[0] == inp_shape[1] == 1 and inp_shape[2] == target_shape[2]: - return True - # Supported: (H, W, 1) -> (H, W, C) - if inp_shape[2] == 1 and inp_shape[0] == target_shape[0] and inp_shape[1] == target_shape[1]: - return True - return False - - brdcst_inp = node.inputs[idx] - inp_shape = node.get_input_variable(brdcst_inp).shape - target_shape = attrs['target_shape'] - if not supported_broadcast(inp_shape, target_shape): - raise RuntimeError( - f'Unsupported broadcast type for stream: {inp_shape} -> {target_shape};' - + 'Only (1, 1, C) -> (H, W, C) and (H, W, 1) -> (H, W, C) currently supported' - ) - brdcst_out = 'broadcast_' + brdcst_inp - brdcst_layer = model.make_node('Broadcast', brdcst_out, attrs, [brdcst_inp].copy()) - model.insert_node(brdcst_layer, before=node, input_idx=idx) - node.inputs[idx] = brdcst_out - - return True diff --git a/hls4ml/hls4ml/backends/vivado/passes/conv_same_pad.py b/hls4ml/hls4ml/backends/vivado/passes/conv_same_pad.py deleted file mode 100644 index bb8354a..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/conv_same_pad.py +++ /dev/null @@ -1,109 +0,0 @@ -from hls4ml.model.layers import Conv1D, Conv2D, SeparableConv1D, SeparableConv2D -from hls4ml.model.optimizer import OptimizerPass - - -class InsertZeroPaddingBeforeConv1D(OptimizerPass): - name = 'insert_zero_padding_before_conv1d' - - def match(self, node): - is_match = ( - isinstance(node, (Conv1D, SeparableConv1D)) - and ((node.get_attr('padding') == 'same') or (node.get_attr('padding') == 'causal')) - and node.get_attr('filt_width') != 1 - ) - return is_match - - def transform(self, model, node): - if model.config.get_config_value('IOType') != 'io_stream': - return False - - # Get the padding parameters from Conv1D layer - pad_left = node.get_attr('pad_left') - pad_right = node.get_attr('pad_right') - - # Check if no padding needs to be done - if pad_left == pad_right == 0: - return False - - out_width = pad_left + node.get_attr('in_width') + pad_right - - attrs = { - 'pad_left': pad_left, - 'pad_right': pad_right, - 'in_width': node.get_attr('in_width'), - 'out_width': out_width, - 'n_chan': node.get_attr('n_chan'), - 'data_format': node.get_attr('data_format', 'channels_last'), - } - - # Switch Conv1D layer padding to 'valid' - node.set_attr('padding', 'valid') - node.set_attr('pad_left', 0) - node.set_attr('pad_right', 0) - node.set_attr('in_width', out_width) - - # Insert new ZeroPadding1D node above Conv1D - padding_layer = model.make_node('ZeroPadding1D', 'zp1d_' + node.name, attrs, node.inputs.copy()) - padding_layer.get_output_variable().type.precision = node.get_input_variable().type.precision - model.insert_node(padding_layer) - - return True - - -class InsertZeroPaddingBeforeConv2D(OptimizerPass): - name = 'insert_zero_padding_before_conv2d' - - def match(self, node): - is_match = ( - isinstance(node, (Conv2D, SeparableConv2D)) - and node.get_attr('padding') == 'same' - and node.get_attr('filt_height') != 1 - and node.get_attr('filt_width') != 1 - ) - return is_match - - def transform(self, model, node): - if model.config.get_config_value('IOType') != 'io_stream': - return False - - # Get the padding parameters from Conv2D layer - pad_top = node.get_attr('pad_top') - pad_bottom = node.get_attr('pad_bottom') - pad_left = node.get_attr('pad_left') - pad_right = node.get_attr('pad_right') - - # Check if no padding neeeds to be done - if pad_top == pad_bottom == pad_left == pad_right == 0: - return False - - out_height = pad_top + node.get_attr('in_height') + pad_bottom - out_width = pad_left + node.get_attr('in_width') + pad_right - - attrs = { - 'pad_top': pad_top, - 'pad_bottom': pad_bottom, - 'pad_left': pad_left, - 'pad_right': pad_right, - 'in_height': node.get_attr('in_height'), - 'in_width': node.get_attr('in_width'), - 'out_height': out_height, - 'out_width': out_width, - 'n_chan': node.get_attr('n_chan'), - 'data_format': node.get_attr('data_format', 'channels_last'), - } - - # Switch Conv2D layer padding to 'valid' - node.set_attr('padding', 'valid') - node.set_attr('pad_top', 0) - node.set_attr('pad_bottom', 0) - node.set_attr('pad_left', 0) - node.set_attr('pad_right', 0) - node.set_attr('in_height', out_height) - node.set_attr('in_width', out_width) - - # Insert new ZeroPadding2D node above Conv2D - padding_layer = model.make_node('ZeroPadding2D', 'zp2d_' + node.name, attrs, node.inputs.copy()) - padding_layer.get_output_variable().type.precision = node.get_input_variable().type.precision - model.insert_node(padding_layer, before=node) - - return True diff --git a/hls4ml/hls4ml/backends/vivado/passes/conv_stream.py b/hls4ml/hls4ml/backends/vivado/passes/conv_stream.py deleted file mode 100644 index e0bb853..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/conv_stream.py +++ /dev/null @@ -1,52 +0,0 @@ -from hls4ml.model.layers import Conv1D, Conv2D, SeparableConv1D, SeparableConv2D -from hls4ml.model.optimizer import OptimizerPass - - -class GenerateConvStreamingInstructions(OptimizerPass): - '''Generates the instructions for streaming implementation of CNNs''' - - def match(self, node): - return isinstance(node, (Conv1D, SeparableConv1D, Conv2D, SeparableConv2D)) - - def transform(self, model, node): - node_class = node.__class__.__name__ - if '1D' in node_class: - self._generate_1d_instructions(node) - elif '2D' in node_class: - self._generate_2d_instructions(node) - else: - raise Exception(f'Cannot generate instructions for node {node.name} ({node_class})') - - def _generate_1d_instructions(self, node): - if node.model.config.get_config_value('IOType') == 'io_stream': - min_w, instructions = node.model.config.backend.compute_conv1d_instructions( - node.get_input_variable().shape[0], - node.get_input_variable().shape[1], - node.get_attr('filt_width'), - node.get_attr('stride_width'), - ) - instructions_str = ','.join(str(i) for i in instructions) - node.set_attr('min_width', min_w) - node.set_attr('instructions', instructions_str) - else: - # these are unused; just put dummy values - node.set_attr('min_width', node.get_attr('in_width')) - node.set_attr('instructions', '0') - - def _generate_2d_instructions(self, node): - if node.model.config.get_config_value('IOType') == 'io_stream': - min_h, min_w, instructions = node.model.config.backend.compute_conv2d_instructions( - node.get_input_variable().shape[0], - node.get_input_variable().shape[1], - node.get_input_variable().shape[2], - node.get_attr('filt_height'), - node.get_attr('stride_height'), - ) - instructions_str = ','.join(str(i) for i in instructions) - node.set_attr('min_height', min_h) - node.set_attr('min_width', min_w) - node.set_attr('instructions', instructions_str) - else: - node.set_attr('min_height', node.get_attr('in_height')) - node.set_attr('min_width', node.get_attr('in_width')) - node.set_attr('instructions', '0') diff --git a/hls4ml/hls4ml/backends/vivado/passes/convolution_templates.py b/hls4ml/hls4ml/backends/vivado/passes/convolution_templates.py deleted file mode 100644 index 874349a..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/convolution_templates.py +++ /dev/null @@ -1,500 +0,0 @@ -from hls4ml.backends.backend import get_backend -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import ( - Conv1D, - Conv2D, - Conv2DBatchnorm, - DepthwiseConv1D, - DepthwiseConv2D, - SeparableConv1D, - SeparableConv2D, -) - -# Shared multiplication template - -conv_mult_config_template = """struct config{index}_mult : nnet::dense_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - static const unsigned reuse_factor = {reuse}; - static const unsigned strategy = nnet::{strategy}; - static const unsigned n_zeros = {nzeros}; - static const unsigned multiplier_limit = DIV_ROUNDUP(n_in * n_out, reuse_factor) - n_zeros / reuse_factor; - typedef {accum_t.name} accum_t; - typedef {bias_t.name} bias_t; - typedef {weight_t.name} weight_t; - template - using product = nnet::product::{product_type}; -}};\n""" - -# Conv1D templates - -conv1d_config_template = """struct config{index} : nnet::conv1d_config {{ - static const unsigned pad_left = {pad_left}; - static const unsigned pad_right = {pad_right}; - static const unsigned in_width = {in_width}; - static const unsigned n_chan = {n_chan}; - static const unsigned filt_width = {filt_width}; - static const unsigned kernel_size = filt_width; - static const unsigned n_filt = {n_filt}; - static const unsigned stride_width = {stride_width}; - static const unsigned dilation = {dilation}; - static const unsigned out_width = {out_width}; - static const unsigned reuse_factor = {reuse}; - static const unsigned n_zeros = {nzeros}; - static const bool store_weights_in_bram = false; - static const unsigned strategy = nnet::{strategy}; - static const nnet::conv_implementation implementation = nnet::conv_implementation::{implementation}; - static const unsigned min_width = {min_width}; - static const ap_uint pixels[min_width]; - static const unsigned n_partitions = {n_partitions}; - static const unsigned n_pixels = out_width / n_partitions; - template - using fill_buffer = nnet::{fill_fn}; - typedef {accum_t.name} accum_t; - typedef {bias_t.name} bias_t; - typedef {weight_t.name} weight_t; - typedef {config_t} mult_config; - template - using scale_index = nnet::{scale_index_type}; -}}; -const ap_uint config{index}::pixels[] = {{{instructions}}};\n""" - -conv1d_function_template = 'nnet::conv_1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});' -depthconv1d_function_template = ( - 'nnet::depthwise_conv_1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});' -) - -conv1d_include_list = ['nnet_utils/nnet_conv1d.h', 'nnet_utils/nnet_conv1d_stream.h'] - - -class Conv1DConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__((Conv1D, DepthwiseConv1D)) - self.template = conv1d_config_template - self.mult_template = conv_mult_config_template - - def format(self, node): - params = self._default_config_params(node) - params['dilation'] = node.get_attr('dilation', 1) - params['nzeros'] = node.get_weights('weight').nzeros - - params['config_t'] = f'config{node.index}_mult' - if node.get_attr('in_width') == node.get_attr('min_width'): - params['scale_index_type'] = 'scale_index_unscaled' - else: - params['scale_index_type'] = 'scale_index_regular' - - if node.model.config.get_config_value('IOType') == 'io_parallel': - params['fill_fn'] = f'fill_buffer_{node.index}' - else: - params['fill_fn'] = 'FillConv1DBuffer' - - conv_config = self.template.format(**params) - - mult_params = self._default_config_params(node) - mult_params['n_in'] = node.get_attr('n_chan') * node.get_attr('filt_width') - mult_params['n_out'] = node.get_attr('n_filt') - mult_params['nzeros'] = node.get_weights('weight').nzeros - mult_params['product_type'] = get_backend('vivado').product_type( - node.get_input_variable().type.precision, node.get_weights('weight').type.precision - ) - mult_config = self.mult_template.format(**mult_params) - - return mult_config + '\n' + conv_config - - -class Conv1DFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(Conv1D, include_header=conv1d_include_list) - self.template = conv1d_function_template - - def format(self, node): - params = self._default_function_params(node) - params['data_format'] = 'cf' if node.get_attr('data_format') == 'channels_first' else 'cl' - params['w'] = node.get_weights('weight').name - params['b'] = node.get_weights('bias').name - - return self.template.format(**params) - - -class DepthwiseConv1DFunctionTemplate(Conv1DFunctionTemplate): - def __init__(self): - super(Conv1DFunctionTemplate, self).__init__(DepthwiseConv1D, include_header=sepconv1d_include_list) - self.template = depthconv1d_function_template - - -# Conv2D Templates - -conv2d_config_template = """struct config{index} : nnet::conv2d_config {{ - static const unsigned pad_top = {pad_top}; - static const unsigned pad_bottom = {pad_bottom}; - static const unsigned pad_left = {pad_left}; - static const unsigned pad_right = {pad_right}; - static const unsigned in_height = {in_height}; - static const unsigned in_width = {in_width}; - static const unsigned n_chan = {n_chan}; - static const unsigned filt_height = {filt_height}; - static const unsigned filt_width = {filt_width}; - static const unsigned kernel_size = filt_height * filt_width; - static const unsigned n_filt = {n_filt}; - static const unsigned stride_height = {stride_height}; - static const unsigned stride_width = {stride_width}; - static const unsigned out_height = {out_height}; - static const unsigned out_width = {out_width}; - static const unsigned reuse_factor = {reuse}; - static const unsigned n_zeros = {nzeros}; - static const unsigned multiplier_limit = - DIV_ROUNDUP(kernel_size * n_chan * n_filt, reuse_factor) - n_zeros / reuse_factor; - static const bool store_weights_in_bram = false; - static const unsigned strategy = nnet::{strategy}; - static const nnet::conv_implementation implementation = nnet::conv_implementation::{implementation}; - static const unsigned min_height = {min_height}; - static const unsigned min_width = {min_width}; - static const ap_uint pixels[min_height * min_width]; - static const unsigned n_partitions = {n_partitions}; - static const unsigned n_pixels = out_height * out_width / n_partitions; - template - using fill_buffer = nnet::{fill_fn}; - typedef {accum_t.name} accum_t; - typedef {bias_t.name} bias_t; - typedef {weight_t.name} weight_t; - typedef {config_t} mult_config; - template - using scale_index_height = nnet::{scale_index_height_type}; - template - using scale_index_width = nnet::{scale_index_width_type}; -}}; -const ap_uint config{index}::pixels[] = {{{instructions}}};\n""" - -conv2d_function_template = 'nnet::conv_2d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});' -depthconv2d_function_template = ( - 'nnet::depthwise_conv_2d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});' -) - -conv2d_include_list = ['nnet_utils/nnet_conv2d.h', 'nnet_utils/nnet_conv2d_stream.h'] - - -class Conv2DConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__((Conv2D, Conv2DBatchnorm, DepthwiseConv2D)) - self.template = conv2d_config_template - self.mult_template = conv_mult_config_template - - def format(self, node): - params = self._default_config_params(node) - params['dilation'] = node.get_attr('dilation', 1) - params['nzeros'] = node.get_weights('weight').nzeros - - params['config_t'] = f'config{node.index}_mult' - - if node.get_attr('in_height') == node.get_attr('min_height'): - params['scale_index_height_type'] = 'scale_index_unscaled' - else: - params['scale_index_height_type'] = 'scale_index_regular' - - if node.get_attr('in_width') == node.get_attr('min_width'): - params['scale_index_width_type'] = 'scale_index_unscaled' - else: - params['scale_index_width_type'] = 'scale_index_regular' - - if node.model.config.get_config_value('IOType') == 'io_parallel': - params['fill_fn'] = f'fill_buffer_{node.index}' - else: - params['fill_fn'] = 'FillConv2DBuffer' - - conv_config = self.template.format(**params) - - mult_params = self._default_config_params(node) - mult_params['n_in'] = node.get_attr('n_chan') * node.get_attr('filt_height') * node.get_attr('filt_width') - mult_params['n_out'] = node.get_attr('n_filt') - mult_params['nzeros'] = node.get_weights('weight').nzeros - mult_params['product_type'] = get_backend('vivado').product_type( - node.get_input_variable().type.precision, node.get_weights('weight').type.precision - ) - mult_config = self.mult_template.format(**mult_params) - - return mult_config + '\n' + conv_config - - -class Conv2DFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__((Conv2D, Conv2DBatchnorm), include_header=conv2d_include_list) - self.template = conv2d_function_template - - def format(self, node): - params = self._default_function_params(node) - params['data_format'] = 'cf' if node.get_attr('data_format') == 'channels_first' else 'cl' - params['w'] = node.get_weights('weight').name - params['b'] = node.get_weights('bias').name - - return self.template.format(**params) - - -class DepthwiseConv2DFunctionTemplate(Conv2DFunctionTemplate): - def __init__(self): - super(Conv2DFunctionTemplate, self).__init__(DepthwiseConv2D, include_header=sepconv2d_include_list) - self.template = depthconv2d_function_template - - -# SeparableConv1D/2D Templates - -sepconv_config_template = """struct config{index} {{ - typedef {depthwise_config} depthwise_config; - typedef {pointwise_config} pointwise_config; -}};\n""" - -sepconv1d_function_template = ( - 'nnet::separable_conv_1d_{data_format}<{input_t}, {dw_output_t}, {output_t}, {config}>(' - '{input}, {output}, {d}, {p}, {z}, {b});' -) -sepconv2d_function_template = ( - 'nnet::separable_conv_2d_{data_format}<{input_t}, {dw_output_t}, {output_t}, {config}>(' - '{input}, {output}, {d}, {p}, {z}, {b});' -) - -sepconv1d_include_list = ['nnet_utils/nnet_conv1d.h', 'nnet_utils/nnet_sepconv1d_stream.h'] -sepconv2d_include_list = ['nnet_utils/nnet_conv2d.h', 'nnet_utils/nnet_sepconv2d_stream.h'] - - -class SeparableConv1DConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(SeparableConv1D) - self.template = sepconv_config_template - self.depthwise_template = conv1d_config_template - self.pointwise_template = conv1d_config_template - self.depthwise_mult_template = conv_mult_config_template - self.pointwise_mult_template = conv_mult_config_template - - def format(self, node): - # Separable master config - params = {} - params['index'] = node.index - params['depthwise_config'] = f'config{node.index}_depthwise' - params['pointwise_config'] = f'config{node.index}_pointwise' - sep_config = self.template.format(**params) - - # Depthwise config - params = self._default_config_params(node) - # Override bias and bias_t since these are zeros in depthwise step of SepConv1D - params['bias'] = params['zero_bias'] - params['bias_t'] = params['zero_bias_t'] - params['n_filt'] = params['n_chan'] # In depthwise step n_chan == n_filt - params['dilation'] = node.get_attr('dilation', 1) - params['nzeros'] = node.get_weights('depthwise').nzeros - params['index'] = str(node.index) + '_depthwise' - params['weight_t'] = node.get_weights('depthwise').type - params['fill_fn'] = 'FillConv1DBuffer' - - if node.get_attr('unscaled'): - params['scale_index_type'] = 'scale_index_unscaled' - else: - params['scale_index_type'] = 'scale_index_regular' - - params['config_t'] = f'config{node.index}_depthwise_mult' - depthwise_config = self.depthwise_template.format(**params) - - # Depthwise mult config - mult_params = self._default_config_params(node) - mult_params['index'] = str(node.index) + '_depthwise' - mult_params['n_in'] = node.get_attr('n_chan') * node.get_attr('filt_width') - mult_params['n_out'] = node.get_attr('n_chan') - mult_params['nzeros'] = node.get_weights('depthwise').nzeros - mult_params['weight_t'] = node.get_weights('depthwise').type - mult_params['product_type'] = get_backend('vivado').product_type( - node.get_input_variable().type.precision, node.get_weights('depthwise').type.precision - ) - depthwise_mult_config = self.depthwise_mult_template.format(**mult_params) - - # Pointwise config - params = self._default_config_params(node) - if node.get_attr('data_format') == 'channels_last': - params['in_width'] = node.get_output_variable().shape[0] - else: - params['in_width'] = node.get_output_variable().shape[1] - - params['filt_width'] = 1 - params['stride_width'] = 1 - params['dilation'] = node.get_attr('dilation', 1) - params['nzeros'] = node.get_weights('pointwise').nzeros - params['index'] = str(node.index) + '_pointwise' - params['weight_t'] = node.get_weights('pointwise').type - params['min_width'] = params['in_width'] - params['instructions'] = '0' - params['fill_fn'] = 'FillConv1DBuffer' - - if node.get_attr('unscaled'): - params['scale_index_type'] = 'scale_index_unscaled' - else: - params['scale_index_type'] = 'scale_index_regular' - - params['config_t'] = f'config{node.index}_pointwise_mult' - pointwise_config = self.pointwise_template.format(**params) - - # Pointwise mult config - mult_params = self._default_config_params(node) - mult_params['index'] = str(node.index) + '_pointwise' - mult_params['n_in'] = node.get_attr('n_chan') - mult_params['n_out'] = node.get_attr('n_filt') - mult_params['nzeros'] = node.get_weights('pointwise').nzeros - mult_params['weight_t'] = node.get_weights('pointwise').type - mult_params['product_type'] = get_backend('vivado').product_type( - node.get_input_variable().type.precision, node.get_weights('pointwise').type.precision - ) - pointwise_mult_config = self.pointwise_mult_template.format(**mult_params) - - return ( - depthwise_mult_config - + '\n' - + depthwise_config - + '\n' - + pointwise_mult_config - + '\n' - + pointwise_config - + '\n' - + sep_config - ) - - -class SeparableConv1DFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(SeparableConv1D, include_header=sepconv1d_include_list) - self.template = sepconv1d_function_template - - def format(self, node): - params = self._default_function_params(node) - params['dw_output_t'] = node.get_attr('dw_output_t').name - params['data_format'] = 'cf' if node.get_attr('data_format') == 'channels_first' else 'cl' - params['d'] = node.get_weights('depthwise').name - params['p'] = node.get_weights('pointwise').name - params['b'] = node.get_weights('bias').name - params['z'] = node.get_weights('zero_bias').name - - return self.template.format(**params) - - -class SeparableConv2DConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(SeparableConv2D) - self.template = sepconv_config_template - self.depthwise_template = conv2d_config_template - self.pointwise_template = conv2d_config_template - self.depthwise_mult_template = conv_mult_config_template - self.pointwise_mult_template = conv_mult_config_template - - def format(self, node): - # Separable master config - params = {} - params['index'] = node.index - params['depthwise_config'] = f'config{node.index}_depthwise' - params['pointwise_config'] = f'config{node.index}_pointwise' - sep_config = self.template.format(**params) - - # Depthwise config - params = self._default_config_params(node) - # Override bias and bias_t since these are zeros in depthwise step of SepConv2D - params['bias'] = params['zero_bias'] - params['bias_t'] = params['zero_bias_t'] - params['n_filt'] = params['n_chan'] # In depthwise step n_chan == n_filt - params['dilation'] = node.get_attr('dilation', 1) - params['nzeros'] = node.get_weights('depthwise').nzeros - params['index'] = str(node.index) + '_depthwise' - params['weight_t'] = node.get_weights('depthwise').type - params['fill_fn'] = 'FillConv2DBuffer' - - if node.get_attr('unscaled_h'): - params['scale_index_height_type'] = 'scale_index_unscaled' - else: - params['scale_index_height_type'] = 'scale_index_regular' - - if node.get_attr('unscaled_w'): - params['scale_index_width_type'] = 'scale_index_unscaled' - else: - params['scale_index_width_type'] = 'scale_index_regular' - - params['config_t'] = f'config{node.index}_depthwise_mult' - depthwise_config = self.depthwise_template.format(**params) - - # Depthwise mult config - mult_params = self._default_config_params(node) - mult_params['index'] = str(node.index) + '_depthwise' - mult_params['n_in'] = node.get_attr('n_chan') * node.get_attr('filt_height') * node.get_attr('filt_width') - mult_params['n_out'] = node.get_attr('n_chan') - mult_params['nzeros'] = node.get_weights('depthwise').nzeros - mult_params['weight_t'] = node.get_weights('depthwise').type - mult_params['product_type'] = get_backend('vivado').product_type( - node.get_input_variable().type.precision, node.get_weights('depthwise').type.precision - ) - depthwise_mult_config = self.depthwise_mult_template.format(**mult_params) - - # Pointwise config - params = self._default_config_params(node) - if node.get_attr('data_format') == 'channels_last': - params['in_height'] = node.get_output_variable().shape[0] - params['in_width'] = node.get_output_variable().shape[1] - else: - params['in_height'] = node.get_output_variable().shape[1] - params['in_width'] = node.get_output_variable().shape[2] - - params['filt_height'] = params['filt_width'] = 1 - params['stride_height'] = params['stride_width'] = 1 - params['dilation'] = node.get_attr('dilation', 1) - params['nzeros'] = node.get_weights('pointwise').nzeros - params['index'] = str(node.index) + '_pointwise' - params['weight_t'] = node.get_weights('pointwise').type - params['min_height'] = params['in_height'] - params['min_width'] = params['in_width'] - params['instructions'] = '0' - params['fill_fn'] = 'FillConv2DBuffer' - - if node.get_attr('unscaled_h'): - params['scale_index_height_type'] = 'scale_index_unscaled' - else: - params['scale_index_height_type'] = 'scale_index_regular' - - if node.get_attr('unscaled_w'): - params['scale_index_width_type'] = 'scale_index_unscaled' - else: - params['scale_index_width_type'] = 'scale_index_regular' - params['config_t'] = f'config{node.index}_pointwise_mult' - pointwise_config = self.pointwise_template.format(**params) - - # Pointwise mult config - mult_params = self._default_config_params(node) - mult_params['index'] = str(node.index) + '_pointwise' - mult_params['n_in'] = node.get_attr('n_chan') - mult_params['n_out'] = node.get_attr('n_filt') - mult_params['nzeros'] = node.get_weights('pointwise').nzeros - mult_params['weight_t'] = node.get_weights('pointwise').type - mult_params['product_type'] = get_backend('vivado').product_type( - node.get_input_variable().type.precision, node.get_weights('pointwise').type.precision - ) - pointwise_mult_config = self.pointwise_mult_template.format(**mult_params) - - return ( - depthwise_mult_config - + '\n' - + depthwise_config - + '\n' - + pointwise_mult_config - + '\n' - + pointwise_config - + '\n' - + sep_config - ) - - -class SeparableConv2DFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(SeparableConv2D, include_header=sepconv2d_include_list) - self.template = sepconv2d_function_template - - def format(self, node): - params = self._default_function_params(node) - params['dw_output_t'] = node.get_attr('dw_output_t').name - params['data_format'] = 'cf' if node.get_attr('data_format') == 'channels_first' else 'cl' - params['d'] = node.get_weights('depthwise').name - params['p'] = node.get_weights('pointwise').name - params['b'] = node.get_weights('bias').name - params['z'] = node.get_weights('zero_bias').name - - return self.template.format(**params) diff --git a/hls4ml/hls4ml/backends/vivado/passes/core_templates.py b/hls4ml/hls4ml/backends/vivado/passes/core_templates.py deleted file mode 100644 index c8119c0..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/core_templates.py +++ /dev/null @@ -1,213 +0,0 @@ -from hls4ml.backends.backend import get_backend -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import Activation, BatchNormalization, Dense, HardActivation, ParametrizedActivation, PReLU, Softmax - -# Dense templates - -dense_config_template = """struct config{index} : nnet::dense_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - static const unsigned io_type = nnet::{iotype}; - static const unsigned strategy = nnet::{strategy}; - static const unsigned reuse_factor = {reuse}; - static const unsigned n_zeros = {nzeros}; - static const unsigned n_nonzeros = {nonzeros}; - static const unsigned multiplier_limit = DIV_ROUNDUP(n_in * n_out, reuse_factor) - n_zeros / reuse_factor; - static const bool store_weights_in_bram = false; - typedef {accum_t.name} accum_t; - typedef {bias_t.name} bias_t; - typedef {weight_t.name} weight_t; - typedef {index_t.name} index_t; - template - using product = nnet::product::{product_type}; -}};\n""" - -dense_function_template = 'nnet::dense<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});' - -dense_include_list = ['nnet_utils/nnet_dense.h', 'nnet_utils/nnet_dense_compressed.h', 'nnet_utils/nnet_dense_stream.h'] - - -class DenseConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Dense) - self.template = dense_config_template - - def format(self, node): - params = self._default_config_params(node) - params['nzeros'] = node.get_weights('weight').nzeros - params['nonzeros'] = node.get_weights('weight').nonzeros - params['product_type'] = get_backend('vivado').product_type( - node.get_input_variable().type.precision, node.get_weights('weight').type.precision - ) - - return self.template.format(**params) - - -class DenseFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(Dense, include_header=dense_include_list) - self.template = dense_function_template - - def format(self, node): - params = self._default_function_params(node) - params['w'] = node.get_weights('weight').name - params['b'] = node.get_weights('bias').name - - return self.template.format(**params) - - -# BatchNormalization templates - -batchnorm_config_template = """struct config{index} : nnet::batchnorm_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_filt = {n_filt}; - static const unsigned n_scale_bias = (n_filt == -1) ? n_in : n_filt; - static const unsigned io_type = nnet::{iotype}; - static const unsigned reuse_factor = {reuse}; - static const unsigned multiplier_limit = DIV_ROUNDUP(n_in, reuse_factor); - static const bool store_weights_in_bram = false; - typedef {bias_t.name} bias_t; - typedef {scale_t.name} scale_t; - template - using product = nnet::product::{product_type}; -}};\n""" - -batchnorm_function_template = 'nnet::normalize<{input_t}, {output_t}, {config}>({input}, {output}, {scale}, {bias});' - -batchnorm_include_list = ['nnet_utils/nnet_batchnorm.h', 'nnet_utils/nnet_batchnorm_stream.h'] - - -class BatchNormalizationConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(BatchNormalization) - self.template = batchnorm_config_template - - def format(self, node): - params = self._default_config_params(node) - params['n_in'] = node.get_input_variable().size_cpp() - params['product_type'] = get_backend('vivado').product_type( - node.get_input_variable().type.precision, node.get_weights('scale').type.precision - ) - - return self.template.format(**params) - - -class BatchNormalizationFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(BatchNormalization, include_header=batchnorm_include_list) - self.template = batchnorm_function_template - - def format(self, node): - params = self._default_function_params(node) - params['scale'] = node.get_weights('scale').name - params['bias'] = node.get_weights('bias').name - - return self.template.format(**params) - - -# Activation templates - -activ_config_template = """struct {type}_config{index} : nnet::activ_config {{ - static const unsigned n_in = {n_in}; - static const unsigned table_size = {table_size}; - static const unsigned io_type = nnet::{iotype}; - static const unsigned reuse_factor = {reuse}; - typedef {table_t.name} table_t; -}};\n""" - -hard_activ_config_template = """struct {type}_config{index} {{ - static const unsigned n_in = {n_in}; - static const {slope_t.name} slope; - static const {shift_t.name} shift; - static const unsigned io_type = nnet::{iotype}; - static const unsigned reuse_factor = {reuse}; -}}; -const {slope_t.name} {type}_config{index}::slope = {slope}; -const {shift_t.name} {type}_config{index}::shift = {shift};\n""" - -softmax_config_template = """struct {type}_config{index} : nnet::activ_config {{ - static const unsigned n_in = {n_in}; - static const unsigned table_size = {table_size}; - static const unsigned io_type = nnet::{iotype}; - static const unsigned reuse_factor = {reuse}; - static const unsigned axis = {axis}; - static const nnet::softmax_implementation implementation = nnet::softmax_implementation::{implementation}; - typedef {exp_table_t.name} exp_table_t; - typedef {inv_table_t.name} inv_table_t; -}};\n""" - -activ_function_template = 'nnet::{activation}<{input_t}, {output_t}, {config}>({input}, {output});' -param_activ_function_template = 'nnet::{activation}<{input_t}, {output_t}, {config}>({input}, {param}, {output});' - -activ_include_list = ['nnet_utils/nnet_activation.h', 'nnet_utils/nnet_activation_stream.h'] - - -class ActivationConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__((Activation, ParametrizedActivation, PReLU)) - self.template = activ_config_template - - def format(self, node): - params = self._default_config_params(node) - params['type'] = node.get_attr('activation') - - return self.template.format(**params) - - -class HardActivationConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(HardActivation) - self.template = hard_activ_config_template - - def format(self, node): - params = self._default_config_params(node) - params['type'] = node.get_attr('activation') - - return self.template.format(**params) - - -class SoftmaxConfigTemplate(ActivationConfigTemplate): - def __init__(self): - super(ActivationConfigTemplate, self).__init__(Softmax) # Skip ActivationConfigTemplate's __init__ - self.template = softmax_config_template - - -class ActivationFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__((Activation, HardActivation, Softmax), include_header=activ_include_list) - self.template = activ_function_template - - def format(self, node): - params = self._default_function_params(node) - params['activation'] = node.get_attr('activation').lower() - params['config'] = '{}_config{}'.format(node.get_attr('activation'), node.index) - - return self.template.format(**params) - - -class ParametrizedActivationFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(ParametrizedActivation, include_header=activ_include_list) - self.template = param_activ_function_template - - def format(self, node): - params = self._default_function_params(node) - params['activation'] = node._get_act_function_name() - params['param'] = node.get_attr('activ_param', 1.0) - params['config'] = '{}_config{}'.format(node.get_attr('activation'), node.index) - - return self.template.format(**params) - - -class PReLUFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(PReLU, include_header=activ_include_list) - self.template = param_activ_function_template - - def format(self, node): - params = self._default_function_params(node) - params['activation'] = node.get_attr('activation').lower() - params['param'] = node.get_weights('alpha').name - params['config'] = '{}_config{}'.format(node.get_attr('activation'), node.index) - - return self.template.format(**params) diff --git a/hls4ml/hls4ml/backends/vivado/passes/fifo_depth_optimization.py b/hls4ml/hls4ml/backends/vivado/passes/fifo_depth_optimization.py deleted file mode 100644 index 4d92e98..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/fifo_depth_optimization.py +++ /dev/null @@ -1,104 +0,0 @@ -import json - -from pyDigitalWaveTools.vcd.parser import VcdParser - -from hls4ml.model.optimizer.optimizer import ConfigurableOptimizerPass, ModelOptimizerPass - - -def populate_values(values, name, data, depth): - def get_values(x): - return int(x[1][1:], 2) - - values.append({'name': name, 'data': [], 'max': 0, 'depth': 0}) - values[-1]['data'] = [get_values(x) for x in data] - values[-1]['max'] = max(values[-1]['data']) - values[-1]['depth'] = int(depth[0][1][1:], 2) - return values - - -def set_big_fifos(vars_to_profile, profiling_fifo_depth): - for v in vars_to_profile.values(): - if v.pragma: - v.pragma = (v.pragma[0], profiling_fifo_depth) - - -def get_vcd_data(model): - model.write() - model.build(reset=False, csim=True, synth=True, cosim=True, validation=False, export=False, vsynth=False, fifo_opt=True) - - with open( - model.config.get_output_dir() - + '/' - + model.config.get_project_name() - + '_prj' - + '/solution1/sim/verilog/fifo_opt.vcd' - ) as vcd_file: - vcd = VcdParser() - vcd.parse(vcd_file) - data = vcd.scope.toJson() - return data - - -def generate_max_depth_file(model, maxs): - with open(model.config.get_output_dir() + '/max_depth.json', 'w') as f: - json.dump(maxs, f, indent=4) - - -def set_fifo_depth(model, maxs): - for v in model.output_vars.values(): - if v.pragma: - filtered_max = [x['max'] for x in maxs if v.name in x['name']] - if len(filtered_max) == 0: - continue - if len(filtered_max) > 1: - print('WARNING! Check names of FIFOs') - v.pragma = (v.pragma[0], filtered_max[0] + 1) - - -class FifoDepthOptimization(ConfigurableOptimizerPass, ModelOptimizerPass): - def __init__(self): - self.values = [] - - def transform(self, model): - # use `large_fifo_depth = 0` to keep the default fifo depth - profiling_fifo_depth = getattr(self, 'profiling_fifo_depth', 100_000) - - # check axi-stream or io-stream, if not one the 2 exit - if not (model.config.get_config_value('IOType') == 'io_stream'): - raise RuntimeError('To use this optimization you have to set `IOType` field to `io_stream` in the HLS config') - - # initialize all the fifos to `profiling_fifo_depth` so that they will be automatically implemented in BRAMs - # and so they will be profiled - if profiling_fifo_depth: - vars_to_profile = { - k: v - for k, v in model.output_vars.items() - if v != model.get_output_variables()[0] and v != model.get_input_variables()[0] - } - - set_big_fifos(vars_to_profile, profiling_fifo_depth) - - data = get_vcd_data(model) - - if len(data['children']) == 0: - print( - "FIFO depth optimization found no FIFOs implemented using BRAMs in the design, no optimization is possible." - ) - print("Consider increasing profiling_fifo_depth.") - return False - - n_elem = len(data['children'][0]['children'][0]['children']) - for i in range(n_elem): - name = data['children'][0]['children'][0]['children'][i]['name'] - data_p = data['children'][0]['children'][0]['children'][i]['children'][0]['data'] - depth = data['children'][0]['children'][0]['children'][i]['children'][1]['data'] - populate_values(self.values, name, data_p, depth) - - maxs = [{'name': i['name'], 'max': i['max'], 'depth': i['depth']} for i in self.values] - - generate_max_depth_file(model, maxs) - - set_fifo_depth(model, maxs) - - print('[hls4ml] - FIFO optimization completed') - return False diff --git a/hls4ml/hls4ml/backends/vivado/passes/garnet_templates.py b/hls4ml/hls4ml/backends/vivado/passes/garnet_templates.py deleted file mode 100644 index 4b968b0..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/garnet_templates.py +++ /dev/null @@ -1,249 +0,0 @@ -import numpy as np - -from hls4ml.backends.fpga.fpga_types import APTypeConverter -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import GarNet, GarNetStack -from hls4ml.model.types import FixedPrecisionType - -# GarNet templates - -garnet_common_config_template = """ - static const unsigned n_vertices = {n_vertices}; - static const unsigned n_vertices_width = {n_vertices_width}; - static const unsigned n_in_features = {n_in_features}; - static const unsigned distance_width = {distance_width}; - static const unsigned output_collapse = {collapse_type}; - static const bool mean_by_nvert = {mean_by_nvert}; - - typedef {norm_t} norm_t; - typedef ap_fixed<{distance_width}, {distance_nint}, AP_TRN, AP_SAT> distance_t; - typedef {edge_weight_t} edge_weight_t; - typedef {edge_weight_aggr_t} edge_weight_aggr_t; - typedef {aggr_t} aggr_t; - typedef {output_t} output_t; - - static const unsigned reuse_factor = {reuse}; - static const unsigned log2_reuse_factor = {log2_reuse}; -""" - -garnet_config_template = """struct config{index} : nnet::garnet_config {{""" -garnet_config_template += garnet_common_config_template -garnet_config_template += """ - static const unsigned n_propagate = {n_propagate}; - static const unsigned n_aggregators = {n_aggregators}; - static const unsigned n_out_features = {n_out_features}; - - typedef {input_transform_weights_t} input_transform_weights_t; - typedef {input_transform_biases_t} input_transform_biases_t; - typedef {aggregator_distance_weights_t} aggregator_distance_weights_t; - typedef {aggregator_distance_biases_t} aggregator_distance_biases_t; - typedef {output_transform_weights_t} output_transform_weights_t; - typedef {output_transform_biases_t} output_transform_biases_t; - - static const input_transform_weights_t (&input_transform_weights)[{input_transform_weights_size}]; - static const input_transform_biases_t (&input_transform_biases)[{input_transform_biases_size}]; - static const aggregator_distance_weights_t (&aggregator_distance_weights)[{aggregator_distance_weights_size}]; - static const aggregator_distance_biases_t (&aggregator_distance_biases)[{aggregator_distance_biases_size}]; - static const output_transform_weights_t (&output_transform_weights)[{output_transform_weights_size}]; - static const output_transform_biases_t (&output_transform_biases)[{output_transform_biases_size}]; - - typedef config{index} base_t; -}}; - -const config{index}::input_transform_weights_t (&config{index}::input_transform_weights)[{input_transform_weights_size}] = {input_transform_weights}; -const config{index}::input_transform_biases_t (&config{index}::input_transform_biases)[{input_transform_biases_size}] = {input_transform_biases}; -const config{index}::aggregator_distance_weights_t (&config{index}::aggregator_distance_weights)[{aggregator_distance_weights_size}] = {aggregator_distance_weights}; -const config{index}::aggregator_distance_biases_t (&config{index}::aggregator_distance_biases)[{aggregator_distance_biases_size}] = {aggregator_distance_biases}; -const config{index}::output_transform_weights_t (&config{index}::output_transform_weights)[{output_transform_weights_size}] = {output_transform_weights}; -const config{index}::output_transform_biases_t (&config{index}::output_transform_biases)[{output_transform_biases_size}] = {output_transform_biases}; -""" # noqa: E501 - -garnet_function_template = ( - 'nnet::garnet{impl}<{input_t}, {integer_input_t}, {output_t}, {config}>({input}, {nvtx}, {output});' -) - -garnet_include_list = ['nnet_utils/nnet_garnet.h'] - - -class GarNetConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(GarNet) - self.template = (garnet_config_template,) - - def get_transforms_config(self, node, params): - params['n_in_features'] = node.attributes['n_in_features'] - params['n_propagate'] = node.attributes['n_propagate'] - params['n_aggregators'] = node.get_weights('aggregator_distance_biases').shape[0] - params['n_out_features'] = node.get_weights('output_transform_biases').shape[0] - - for wname, weights in node.weights.items(): - params[wname] = weights.name - params[f'{wname}_t'] = weights.type.name - params[f'{wname}_size'] = weights.data_length - - def format(self, node): - params = self._default_config_params(node) - - params['n_vertices'] = node.attributes['n_vertices'] - params['n_vertices_width'] = int(np.log2(params['n_vertices'])) - params['distance_width'] = 12 - params['distance_nint'] = min(4, params['distance_width'] - 6) # this is tuned - params['log2_reuse'] = int(np.log2(params['reuse'])) - - # Define default precisions for various internal arrays (can be overridden from the config file) - # We always give 10 digits for the subintegral part - fwidth = 10 - # Integral precision for aggr_t depends on how large the temporary sum for weighed feature mean will be - aggr_intw = max(params['log2_reuse'], params['n_vertices_width'] - params['log2_reuse']) + 3 # safety factor 2**3 - aggr_w = aggr_intw + fwidth - # edge_weight_aggr_t does not need the safety factor - ew_aggr_intw = aggr_intw - 3 - ew_aggr_w = ew_aggr_intw + fwidth - # Integral precision for norm is fixed to 4 - norm_intw = 4 - norm_w = norm_intw + fwidth - - vspecs = [ - ('edge_weight', FixedPrecisionType(10, 0, signed=False)), - ('edge_weight_aggr', FixedPrecisionType(ew_aggr_w, ew_aggr_intw, signed=False)), - ('aggr', FixedPrecisionType(aggr_w, aggr_intw)), - ('norm', FixedPrecisionType(norm_w, norm_intw, signed=False)), - ] - precision_converter = APTypeConverter() - for vname, default_precision in vspecs: - params[f'{vname}_t'], type_name = node.model.config.get_precision(node, var=vname) - if type_name.endswith('default_t'): - params[f'{vname}_t'] = precision_converter.convert(default_precision).definition_cpp() - else: - params[f'{vname}_t'] = precision_converter.convert(params[f'{vname}_t']).definition_cpp() - params['output_t'] = node.get_output_variable().type.name - - if node.attributes['collapse'] in ['mean', 'max']: - params['collapse_type'] = 'collapse_{}'.format(node.attributes['collapse']) - else: - params['collapse_type'] = 'no_collapse' - - params['mean_by_nvert'] = str(node.attributes['mean_by_nvert']).lower() - - self.get_transforms_config(node, params) - - return self.template[0].format(**params) - - -class GarNetFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(GarNet, include_header=garnet_include_list) - self.template = garnet_function_template - - def format(self, node): - params = self._default_function_params(node) - - data = node.get_input_variable(node.inputs[0]) - integer_input = node.get_input_variable(node.inputs[1]) - params['input_t'] = data.type.name - params['input'] = data.name - - params['integer_input_t'] = integer_input.type.name - params['nvtx'] = integer_input.name - - if node.ref_impl: - params['impl'] = '_ref' - else: - params['impl'] = '' - - return self.template.format(**params) - - -# GarNetStack Templates - -garnet_stack_base_config_template = """struct config{index}_base : nnet::garnet_config {{""" -garnet_stack_base_config_template += garnet_common_config_template -garnet_stack_base_config_template += """ - static const bool is_stack = true; - - typedef config{index}_base base_t; -}}; - -struct config{index} : config{index}_base {{ - static const unsigned n_sublayers = {n_sublayers}; - - template - struct sublayer_t : config{index}_base {{}}; -}}; - -{sublayer_configs} -""" - -garnet_stack_sublayer_config_template = """template<> -struct config{index}::sublayer_t<{il}> : config{index}_base {{ - static const unsigned n_in_features = {n_in_features}; - static const unsigned n_propagate = {n_propagate}; - static const unsigned n_aggregators = {n_aggregators}; - static const unsigned n_out_features = {n_out_features}; - - typedef {input_transform_weights_t} input_transform_weights_t; - typedef {input_transform_biases_t} input_transform_biases_t; - typedef {aggregator_distance_weights_t} aggregator_distance_weights_t; - typedef {aggregator_distance_biases_t} aggregator_distance_biases_t; - typedef {output_transform_biases_t} output_transform_biases_t; - - static const input_transform_weights_t (&input_transform_weights)[{input_transform_weights_size}]; - static const input_transform_biases_t (&input_transform_biases)[{input_transform_biases_size}]; - static const aggregator_distance_weights_t (&aggregator_distance_weights)[{aggregator_distance_weights_size}]; - static const aggregator_distance_biases_t (&aggregator_distance_biases)[{aggregator_distance_biases_size}]; - static const output_transform_biases_t (&output_transform_biases)[{output_transform_biases_size}]; - - typedef config{index}::sublayer_t<{next}> next_layer_t; -}}; - -const config{index}::sublayer_t<{il}>::input_transform_weights_t (&config{index}::sublayer_t<{il}>::input_transform_weights)[{input_transform_weights_size}] = {input_transform_weights}; -const config{index}::sublayer_t<{il}>::input_transform_biases_t (&config{index}::sublayer_t<{il}>::input_transform_biases)[{input_transform_biases_size}] = {input_transform_biases}; -const config{index}::sublayer_t<{il}>::aggregator_distance_weights_t (&config{index}::sublayer_t<{il}>::aggregator_distance_weights)[{aggregator_distance_weights_size}] = {aggregator_distance_weights}; -const config{index}::sublayer_t<{il}>::aggregator_distance_biases_t (&config{index}::sublayer_t<{il}>::aggregator_distance_biases)[{aggregator_distance_biases_size}] = {aggregator_distance_biases}; -const config{index}::sublayer_t<{il}>::output_transform_biases_t (&config{index}::sublayer_t<{il}>::output_transform_biases)[{output_transform_biases_size}] = {output_transform_biases}; -""" # noqa: E501 - -garnet_stack_config_template = (garnet_stack_base_config_template, garnet_stack_sublayer_config_template) -garnet_stack_function_template = ( - 'nnet::garnet_stack<{input_t}, {integer_input_t}, {output_t}, {config}>({input}, {nvtx}, {output});' -) - - -class GarNetStackConfigTemplate(GarNetConfigTemplate): - def __init__(self): - super(GarNetConfigTemplate, self).__init__(GarNetStack) - self.template = garnet_stack_config_template - - def get_transforms_config(self, node, params): - _, sublayer_template = self.template - - params['n_sublayers'] = node.attributes['n_sublayers'] - params['n_in_features'] = node.attributes['n_in_features'][0] - params['n_out_features'] = node.attributes['n_out_features'][-1] - - sublayer_configs = [] - for il in range(node.attributes['n_sublayers'] - 1, -1, -1): - sub_params = {'index': node.index, 'il': il} - - for p in ['n_in_features', 'n_propagate', 'n_aggregators', 'n_out_features']: - sub_params[p] = node.attributes[p][il] - - for wname, weights in node._sublayer_weights[il].items(): - sub_params[wname] = weights.name - sub_params[f'{wname}_t'] = weights.type.name - sub_params[f'{wname}_size'] = weights.data_length - - if il != node.attributes['n_sublayers'] - 1: - sub_params['next'] = il + 1 - else: - sub_params['next'] = 0 - - sublayer_configs.append(sublayer_template.format(**sub_params)) - - params['sublayer_configs'] = '\n'.join(sublayer_configs) - - -class GarNetStackFunctionTemplate(GarNetFunctionTemplate): - def __init__(self): - super(GarNetFunctionTemplate, self).__init__(GarNetStack, include_header=garnet_include_list) - self.template = garnet_stack_function_template diff --git a/hls4ml/hls4ml/backends/vivado/passes/merge_templates.py b/hls4ml/hls4ml/backends/vivado/passes/merge_templates.py deleted file mode 100644 index 078e004..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/merge_templates.py +++ /dev/null @@ -1,106 +0,0 @@ -from hls4ml.backends.backend import get_backend -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import Concatenate, Dot, Merge - -# Merge templates - -merge_config_template = """struct config{index} : nnet::merge_config {{ - static const unsigned n_elem = {n_elem}; -}};\n""" - -merge_function_template = 'nnet::{merge}<{input1_t}, {input2_t}, {output_t}, {config}>({input1}, {input2}, {output});' - -merge_include_list = ['nnet_utils/nnet_merge.h', 'nnet_utils/nnet_merge_stream.h'] - - -class MergeConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Merge) - self.template = merge_config_template - - def format(self, node): - params = self._default_config_params(node) - params['n_elem'] = node.get_input_variable(node.inputs[0]).size_cpp() - - return self.template.format(**params) - - -class MergeFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__((Merge, Concatenate, Dot), include_header=merge_include_list) - self.template = merge_function_template - - def format(self, node): - params = {} - params['merge'] = node.get_attr('op').lower() - params['config'] = f'config{node.index}' - params['input1_t'] = node.get_input_variable(node.inputs[0]).type.name - params['input2_t'] = node.get_input_variable(node.inputs[1]).type.name - params['output_t'] = node.get_output_variable().type.name - params['input1'] = node.get_input_variable(node.inputs[0]).name - params['input2'] = node.get_input_variable(node.inputs[1]).name - params['output'] = node.get_output_variable().name - - return self.template.format(**params) - - -# Dot templates - -dot_config_template = """struct config{index} : nnet::dot_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - static const unsigned reuse_factor = {reuse}; - static const unsigned multiplier_limit = DIV_ROUNDUP(n_in, reuse_factor); - typedef {accum_t.name} accum_t; - template - using product = nnet::product::{product_type}; -}};\n""" - - -class DotConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Dot) - self.template = dot_config_template - - def format(self, node): - inp1 = node.get_input_variable(node.inputs[0]) - inp2 = node.get_input_variable(node.inputs[1]) - params = self._default_config_params(node) - params['n_out'] = 1 - params['n_in'] = inp1.shape[0] - params['product_type'] = get_backend('vivado').product_type(inp1.type.precision, inp2.type.precision) - - return self.template.format(**params) - - -# Concatenate templates - -concat_config_template = """struct config{index} : nnet::concat_config {{ - static const unsigned n_elem1_0 = {n_elem1_0}; - static const unsigned n_elem1_1 = {n_elem1_1}; - static const unsigned n_elem1_2 = {n_elem1_2}; - static const unsigned n_elem2_0 = {n_elem2_0}; - static const unsigned n_elem2_1 = {n_elem2_1}; - static const unsigned n_elem2_2 = {n_elem2_2}; - - static const int axis = {axis}; -}};\n""" - - -class ConcatenateConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Concatenate) - self.template = concat_config_template - - def format(self, node): - params = self._default_config_params(node) - for i in range(3): - params.setdefault(f'n_elem1_{i}', 0) - params.setdefault(f'n_elem2_{i}', 0) - inp1 = node.get_input_variable(node.inputs[0]) - inp2 = node.get_input_variable(node.inputs[1]) - for i, (s1, s2) in enumerate(zip(inp1.shape, inp2.shape)): - params[f'n_elem1_{i}'] = s1 - params[f'n_elem2_{i}'] = s2 - - return self.template.format(**params) diff --git a/hls4ml/hls4ml/backends/vivado/passes/pointwise.py b/hls4ml/hls4ml/backends/vivado/passes/pointwise.py deleted file mode 100644 index c353a10..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/pointwise.py +++ /dev/null @@ -1,92 +0,0 @@ -from copy import copy - -import numpy as np - -from hls4ml.backends.fpga.fpga_layers import PointwiseConv1D, PointwiseConv2D -from hls4ml.backends.vivado.passes.convolution_templates import ( - Conv1DConfigTemplate, - Conv1DFunctionTemplate, - Conv2DConfigTemplate, - Conv2DFunctionTemplate, - conv1d_config_template, - conv2d_config_template, - conv_mult_config_template, -) -from hls4ml.model.layers import register_layer -from hls4ml.model.optimizer import OptimizerPass - -pointwise_conv1d_function_template = ( - 'nnet::pointwise_conv_1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});' -) -pointwise_conv2d_function_template = ( - 'nnet::pointwise_conv_2d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {b});' -) - -sepconv1d_include_list = ['nnet_utils/nnet_conv1d.h', 'nnet_utils/nnet_sepconv1d_stream.h'] -sepconv2d_include_list = ['nnet_utils/nnet_conv2d.h', 'nnet_utils/nnet_sepconv2d_stream.h'] - - -class PointwiseConv1DConfigTemplate(Conv1DConfigTemplate): - def __init__(self): - super(Conv1DConfigTemplate, self).__init__(PointwiseConv1D) - self.template = conv1d_config_template - self.mult_template = conv_mult_config_template - - -class PointwiseConv1DFunctionTemplate(Conv1DFunctionTemplate): - def __init__(self): - super(Conv1DFunctionTemplate, self).__init__(PointwiseConv1D, include_header=sepconv1d_include_list) - self.template = pointwise_conv1d_function_template - - -class PointwiseConv2DConfigTemplate(Conv2DConfigTemplate): - def __init__(self): - super(Conv2DConfigTemplate, self).__init__(PointwiseConv2D) - self.template = conv2d_config_template - self.mult_template = conv_mult_config_template - - -class PointwiseConv2DFunctionTemplate(Conv2DFunctionTemplate): - def __init__(self): - super(Conv2DFunctionTemplate, self).__init__(PointwiseConv2D, include_header=sepconv2d_include_list) - self.template = pointwise_conv2d_function_template - - -def register_pointwise(backend): - # Register the layer types to the layer map - register_layer('PointwiseConv1D', PointwiseConv1D) - register_layer('PointwiseConv2D', PointwiseConv2D) - - # Register the optimization passes - backend.register_pass('optimize_pointwise_conv', OptimizePointwiseConv) - - # Register template passes - backend.register_template(PointwiseConv1DConfigTemplate) - backend.register_template(PointwiseConv1DFunctionTemplate) - backend.register_template(PointwiseConv2DConfigTemplate) - backend.register_template(PointwiseConv2DFunctionTemplate) - - -class OptimizePointwiseConv(OptimizerPass): - def match(self, node): - return ( - node.class_name in ('Conv1D', 'Conv2D') - and node.get_attr('filt_height', 1) == 1 - and node.get_attr('filt_width') == 1 - ) - - def transform(self, model, node): - dim = node.__class__.__name__[-2:] # '1D' or '2D' - pw_node = model.make_node('PointwiseConv' + dim, node.name, copy(node.attributes), node.inputs.copy()) - if len(node.weights['weight'].data.shape) == 2: # This can happen if we assign weights of Dense layer to 1x1 Conv2D - expand_axis = tuple(range(int(dim[0]))) - pw_node.weights['weight'].data = np.expand_dims(node.weights['weight'].data, axis=expand_axis) - pw_node.weights['bias'].data = node.weights['bias'].data - # Set strategy to ensure lowercase string is passed to the template - if model.config.is_resource_strategy(pw_node): - pw_node.set_attr('strategy', 'resource') - else: - pw_node.set_attr('strategy', 'latency') - model.replace_node(node, pw_node) - - return True diff --git a/hls4ml/hls4ml/backends/vivado/passes/pooling_templates.py b/hls4ml/hls4ml/backends/vivado/passes/pooling_templates.py deleted file mode 100644 index 77205a5..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/pooling_templates.py +++ /dev/null @@ -1,109 +0,0 @@ -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import GlobalPooling1D, GlobalPooling2D, Pooling1D, Pooling2D - -# Pooling templates - -pooling1d_config_template = """struct config{index} : nnet::pooling1d_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - static const unsigned n_filt = {n_filt}; - static const unsigned pool_width = {pool_width}; - - static const unsigned filt_width = pool_width; - static const unsigned n_chan = n_filt; - - static const unsigned pad_left = {pad_left}; - static const unsigned pad_right = {pad_right}; - static const bool count_pad = {count_pad}; - static const unsigned stride_width = {stride_width}; - static const nnet::Pool_Op pool_op = nnet::{pool_op}; - static const nnet::conv_implementation implementation = nnet::conv_implementation::{implementation}; - static const unsigned reuse_factor = {reuse}; - typedef {accum_t.name} accum_t; -}};\n""" - -pooling2d_config_template = """struct config{index} : nnet::pooling2d_config {{ - static const unsigned in_height = {in_height}; - static const unsigned in_width = {in_width}; - static const unsigned n_filt = {n_filt}; - static const unsigned stride_height = {stride_height}; - static const unsigned stride_width = {stride_width}; - static const unsigned pool_height = {pool_height}; - static const unsigned pool_width = {pool_width}; - - static const unsigned filt_height = pool_height; - static const unsigned filt_width = pool_width; - static const unsigned n_chan = n_filt; - - static const unsigned out_height = {out_height}; - static const unsigned out_width = {out_width}; - static const unsigned pad_top = {pad_top}; - static const unsigned pad_bottom = {pad_bottom}; - static const unsigned pad_left = {pad_left}; - static const unsigned pad_right = {pad_right}; - static const bool count_pad = {count_pad}; - static const nnet::Pool_Op pool_op = nnet::{pool_op}; - static const nnet::conv_implementation implementation = nnet::conv_implementation::{implementation}; - static const unsigned reuse_factor = {reuse}; - typedef {accum_t.name} accum_t; -}};\n""" - -global_pooling1d_config_template = """struct config{index} : nnet::pooling1d_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_filt = {n_filt}; - static const nnet::Pool_Op pool_op = nnet::{pool_op}; - static const unsigned reuse_factor = {reuse}; - typedef {accum_t.name} accum_t; -}};\n""" - -global_pooling2d_config_template = """struct config{index} : nnet::pooling2d_config {{ - static const unsigned in_height = {in_height}; - static const unsigned in_width = {in_width}; - static const unsigned n_filt = {n_filt}; - static const nnet::Pool_Op pool_op = nnet::{pool_op}; - static const unsigned reuse_factor = {reuse}; - typedef {accum_t.name} accum_t; -}};\n""" - -pooling1d_function_template = 'nnet::pooling1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output});' -pooling2d_function_template = 'nnet::pooling2d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output});' -global_pooling1d_function_template = ( - 'nnet::global_pooling1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output});' -) -global_pooling2d_function_template = ( - 'nnet::global_pooling2d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output});' -) - -pooling_include_list = ['nnet_utils/nnet_pooling.h', 'nnet_utils/nnet_pooling_stream.h'] - - -class PoolingConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__((Pooling1D, Pooling2D, GlobalPooling1D, GlobalPooling2D)) - self.templates = { - 'Pooling1D': pooling1d_config_template, - 'Pooling2D': pooling2d_config_template, - 'GlobalPooling1D': global_pooling1d_config_template, - 'GlobalPooling2D': global_pooling2d_config_template, - } - - def format(self, node): - params = self._default_config_params(node) - return self.templates[node.class_name].format(**params) - - -class PoolingFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__((Pooling1D, Pooling2D, GlobalPooling1D, GlobalPooling2D), include_header=pooling_include_list) - self.templates = { - 'Pooling1D': pooling1d_function_template, - 'Pooling2D': pooling2d_function_template, - 'GlobalPooling1D': global_pooling1d_function_template, - 'GlobalPooling2D': global_pooling2d_function_template, - } - - def format(self, node): - params = self._default_function_params(node) - params['data_format'] = 'cf' if node.get_attr('data_format') == 'channels_first' else 'cl' - - return self.templates[node.class_name].format(**params) diff --git a/hls4ml/hls4ml/backends/vivado/passes/quantization_templates.py b/hls4ml/hls4ml/backends/vivado/passes/quantization_templates.py deleted file mode 100644 index 2936523..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/quantization_templates.py +++ /dev/null @@ -1,36 +0,0 @@ -from hls4ml.backends.backend import get_backend -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.backends.vivado.passes.core_templates import ( - batchnorm_config_template, - batchnorm_function_template, - batchnorm_include_list, -) -from hls4ml.model.optimizer.passes.qkeras import ApplyAlpha - - -class ApplyAlphaConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(ApplyAlpha) - self.template = batchnorm_config_template - - def format(self, node): - params = self._default_config_params(node) - params['n_in'] = node.get_input_variable().size_cpp() - params['product_type'] = get_backend('vivado').product_type( - node.get_input_variable().type.precision, node.get_weights('scale').type.precision - ) - - return self.template.format(**params) - - -class ApplyAlphaFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(ApplyAlpha, include_header=batchnorm_include_list) - self.template = batchnorm_function_template - - def format(self, node): - params = self._default_function_params(node) - params['scale'] = node.get_weights('scale').name - params['bias'] = node.get_weights('bias').name - - return self.template.format(**params) diff --git a/hls4ml/hls4ml/backends/vivado/passes/recurrent_templates.py b/hls4ml/hls4ml/backends/vivado/passes/recurrent_templates.py deleted file mode 100644 index aae806b..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/recurrent_templates.py +++ /dev/null @@ -1,175 +0,0 @@ -from hls4ml.backends.backend import get_backend -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import GRU, LSTM - -# recurrent multiplication template - -recr_mult_config_template = """struct config{index} : nnet::dense_config {{ - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - static const unsigned strategy = nnet::{strategy}; - static const unsigned reuse_factor = {reuse}; - static const unsigned n_zeros = {nzeros}; - static const unsigned n_nonzeros = {nonzeros}; - static const unsigned multiplier_limit = DIV_ROUNDUP(n_in * n_out, reuse_factor) - n_zeros / reuse_factor; - static const bool store_weights_in_bram = false; - typedef {accum_t.name} accum_t; - typedef {bias_t.name} bias_t; - typedef {weight_t.name} weight_t; - typedef {index_t.name} index_t; - template - using product = nnet::product::{product_type}; -}};\n""" - -# activation templates - -activ_config_template = """struct {type}_config{index} : nnet::activ_config {{ - static const unsigned n_in = {n_in}; - static const unsigned table_size = {table_size}; - static const unsigned io_type = nnet::{iotype}; - static const unsigned reuse_factor = {reuse}; - typedef {table_t.name} table_t; -}};\n""" - -recr_activ_config_template = """struct {type}_config{index}_recr : nnet::activ_config {{ - static const unsigned n_in = {n_in}; - static const unsigned table_size = {table_size}; - static const unsigned io_type = nnet::{iotype}; - static const unsigned reuse_factor = {reuse}; - typedef {table_t.name} table_t; -}};\n""" - -# LSTM + GRU templates - -recr_config_template = """struct config{index} : nnet::{recr_type}_config {{ - typedef {accum_t.name} accum_t; - typedef {weight_t.name} weight_t; // Matrix - typedef {bias_t.name} bias_t; // Vector - typedef {config_mult_t1} mult_config1; - typedef {config_mult_t2} mult_config2; - typedef {recr_act_t} ACT_CONFIG_{RECR_TYPE}; - template - using activation_recr = nnet::activation::{recurrent_activation}; - typedef {act_t} ACT_CONFIG_T; - template - using activation = nnet::activation::{activation}; - static const unsigned n_in = {n_in}; - static const unsigned n_out = {n_out}; - static const unsigned n_state = {n_state}; - static const unsigned n_sequence = {n_sequence}; - static const unsigned n_sequence_out = {n_sequence_out}; - static const unsigned io_type = nnet::{strategy}; - static const unsigned reuse_factor = {reuse}; - static const bool store_weights_in_bram = false; - static const bool use_static = {static}; -}};\n""" - -recr_function_template = 'nnet::{recr_type}_stack<{input_t}, {output_t}, {config}>({input}, {output}, {w}, {wr}, {b}, {br});' - -recr_include_list = ['nnet_utils/nnet_recurrent.h'] - - -class RecurrentConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__((LSTM, GRU)) - self.template = recr_config_template - self.act_template = activ_config_template - self.recr_act_template = recr_activ_config_template - self.mult1_template = recr_mult_config_template - self.mult2_template = recr_mult_config_template - - def format(self, node): - params = self._default_config_params(node) - - params['n_in'] = node.get_input_variable().dim_names[1] - params['n_sequence'] = node.get_input_variable().dim_names[0] - if node.get_attr('return_sequences'): - params['n_sequence_out'] = node.get_output_variable().dim_names[0] - params['n_state'] = node.get_output_variable().dim_names[1] - params['n_out'] = node.get_output_variable().dim_names[1] - else: - params['n_sequence_out'] = 1 - params['n_state'] = node.get_output_variable().dim_names[0] - params['n_out'] = node.get_output_variable().dim_names[0] - params['config_mult_t1'] = f'config{node.index}_1' - params['config_mult_t2'] = f'config{node.index}_2' - params['recr_act_t'] = '{}_config{}_recr'.format(node.get_attr('recurrent_activation'), node.index) - params['act_t'] = '{}_config{}'.format(node.get_attr('activation'), node.index) - params['strategy'] = node.get_attr('strategy') - params['static'] = 'true' if node.attributes['static'] else 'false' - params['recr_type'] = node.class_name.lower() - params['RECR_TYPE'] = node.class_name - - if node.class_name == 'LSTM': - n_recr_mult = 4 - else: # GRU - n_recr_mult = 3 - - recr_config = self.template.format(**params) - - act_params = self._default_config_params(node) - recr_act_params = self._default_config_params(node) - - act_params['type'] = node.get_attr('activation') - recr_act_params['type'] = node.get_attr('recurrent_activation') - if node.get_attr('return_sequences'): - act_params['n_in'] = node.get_output_variable().dim_names[1] - recr_act_params['n_in'] = node.get_output_variable().dim_names[1] + ' * %i' % (n_recr_mult - 1) - else: - act_params['n_in'] = node.get_output_variable().dim_names[0] - recr_act_params['n_in'] = node.get_output_variable().dim_names[0] + ' * %i' % (n_recr_mult - 1) - - act_config = self.act_template.format(**act_params) - recr_act_config = self.recr_act_template.format(**recr_act_params) - - mult_params1 = self._default_config_params(node) - mult_params2 = self._default_config_params(node) - - mult_params1['n_in'] = node.get_input_variable().dim_names[1] - if node.get_attr('return_sequences'): - mult_params1['n_out'] = node.get_output_variable().dim_names[1] + ' * %i' % n_recr_mult - else: - mult_params1['n_out'] = node.get_output_variable().dim_names[0] + ' * %i' % n_recr_mult - mult_params1['product_type'] = get_backend('vivado').product_type( - node.get_input_variable().type.precision, node.get_weights('weight').type.precision - ) - mult_params1['reuse'] = params['reuse'] - mult_params1['index'] = str(node.index) + '_1' - mult_params1['nzeros'] = node.get_weights('weight').nzeros - mult_params1['nonzeros'] = node.get_weights('weight').nonzeros - if node.get_attr('return_sequences'): - mult_params2['n_in'] = node.get_output_variable().dim_names[1] - mult_params2['n_out'] = node.get_output_variable().dim_names[1] + ' * %i' % n_recr_mult - else: - mult_params2['n_in'] = node.get_output_variable().dim_names[0] - mult_params2['n_out'] = node.get_output_variable().dim_names[0] + ' * %i' % n_recr_mult - mult_params2['product_type'] = get_backend('vivado').product_type( - node.get_input_variable().type.precision, node.get_weights('recurrent_weight').type.precision - ) - mult_params2['reuse'] = node.attributes['recurrent_reuse_factor'] - mult_params2['index'] = str(node.index) + '_2' - mult_params2['nzeros'] = node.get_weights('recurrent_weight').nzeros - mult_params2['nonzeros'] = node.get_weights('recurrent_weight').nonzeros - - mult_config1 = self.mult1_template.format(**mult_params1) - mult_config2 = self.mult2_template.format(**mult_params2) - - return mult_config1 + '\n' + mult_config2 + '\n' + recr_act_config + '\n' + act_config + '\n' + recr_config - - -class RecurrentFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__((LSTM, GRU), include_header=recr_include_list) - self.template = recr_function_template - - def format(self, node): - params = self._default_function_params(node) - params['w'] = node.get_weights('weight').name - params['b'] = node.get_weights('bias').name - params['wr'] = node.get_weights('recurrent_weight').name - params['br'] = node.get_weights('recurrent_bias').name - params['activation'] = node.get_attr('activation') - params['recurrent_activation'] = node.get_attr('recurrent_activation') - params['recr_type'] = node.class_name.lower() - - return self.template.format(**params) diff --git a/hls4ml/hls4ml/backends/vivado/passes/reshaping_templates.py b/hls4ml/hls4ml/backends/vivado/passes/reshaping_templates.py deleted file mode 100644 index ec6705e..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/reshaping_templates.py +++ /dev/null @@ -1,132 +0,0 @@ -from hls4ml.backends.template import FunctionCallTemplate, LayerConfigTemplate -from hls4ml.model.layers import Resize, Transpose, ZeroPadding1D, ZeroPadding2D - -# ZeroPadding templates - -zeropad1d_config_template = """struct config{index} : nnet::padding1d_config {{ - static const unsigned in_width = {in_width}; - static const unsigned n_chan = {n_chan}; - static const unsigned out_width = {out_width}; - static const unsigned pad_left = {pad_left}; - static const unsigned pad_right = {pad_right}; -}};\n""" - -zeropad2d_config_template = """struct config{index} : nnet::padding2d_config {{ - static const unsigned in_height = {in_height}; - static const unsigned in_width = {in_width}; - static const unsigned n_chan = {n_chan}; - static const unsigned out_height = {out_height}; - static const unsigned out_width = {out_width}; - static const unsigned pad_top = {pad_top}; - static const unsigned pad_bottom = {pad_bottom}; - static const unsigned pad_left = {pad_left}; - static const unsigned pad_right = {pad_right}; -}};\n""" - -zeropad1d_function_template = 'nnet::zeropad1d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output});' -zeropad2d_function_template = 'nnet::zeropad2d_{data_format}<{input_t}, {output_t}, {config}>({input}, {output});' - -padding_include_list = ['nnet_utils/nnet_padding.h', 'nnet_utils/nnet_padding_stream.h'] - - -class ZeroPaddingConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__((ZeroPadding1D, ZeroPadding2D)) - self.templates = { - 'ZeroPadding1D': zeropad1d_config_template, - 'ZeroPadding2D': zeropad2d_config_template, - } - - def format(self, node): - params = self._default_config_params(node) - return self.templates[node.class_name].format(**params) - - -class ZeroPaddingFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__((ZeroPadding1D, ZeroPadding2D), include_header=padding_include_list) - self.templates = { - 'ZeroPadding1D': zeropad1d_function_template, - 'ZeroPadding2D': zeropad2d_function_template, - } - - def format(self, node): - params = self._default_function_params(node) - params['data_format'] = 'cf' if node.get_attr('data_format') == 'channels_first' else 'cl' - - return self.templates[node.class_name].format(**params) - - -# Resize templates - -resize_config_template = """struct config{index} : nnet::resize_config {{ - static const unsigned height = {in_height}; - static const unsigned width = {in_width}; - static const unsigned n_chan = {n_chan}; - static const unsigned new_height = {out_height}; - static const unsigned new_width = {out_width}; -}};\n""" - -resize_function_template = 'nnet::resize_{algorithm}<{input_t}, {config}>({input}, {output});' - -resize_include_list = ['nnet_utils/nnet_image.h', 'nnet_utils/nnet_image_stream.h'] - - -class ResizeConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Resize) - self.template = resize_config_template - - def format(self, node): - params = self._default_config_params(node) - - return self.template.format(**params) - - -class ResizeFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(Resize, include_header=resize_include_list) - self.template = resize_function_template - - def format(self, node): - params = self._default_function_params(node) - params['algorithm'] = node.get_attr('algorithm') - - return self.template.format(**params) - - -# Transpose templates - -transpose_config_template = """struct config{index} : nnet::transpose_config {{ - static const unsigned depth = {depth}; - static const unsigned height = {height}; - static const unsigned width = {width}; - static constexpr unsigned perm[3] = {{{perm_str}}}; -}};\n""" - -transpose_function_template = 'nnet::transpose_{dim}<{input_t}, {output_t}, {config}>({input}, {output});' - -transpose_include_list = ['nnet_utils/nnet_array.h', 'nnet_utils/nnet_stream.h'] - - -class TransposeConfigTemplate(LayerConfigTemplate): - def __init__(self): - super().__init__(Transpose) - self.template = transpose_config_template - - def format(self, node): - params = self._default_config_params(node) - - return self.template.format(**params) - - -class TransposeFunctionTemplate(FunctionCallTemplate): - def __init__(self): - super().__init__(Transpose, include_header=transpose_include_list) - self.template = transpose_function_template - - def format(self, node): - params = self._default_function_params(node) - params['dim'] = node.get_attr('dim') - - return self.template.format(**params) diff --git a/hls4ml/hls4ml/backends/vivado/passes/resource_strategy.py b/hls4ml/hls4ml/backends/vivado/passes/resource_strategy.py deleted file mode 100644 index 63e6e0b..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/resource_strategy.py +++ /dev/null @@ -1,48 +0,0 @@ -import numpy as np - -from hls4ml.model.layers import GRU, LSTM, Conv1D, Conv2D, Dense, SeparableConv1D, SeparableConv2D -from hls4ml.model.optimizer import OptimizerPass - - -class ApplyResourceStrategy(OptimizerPass): - '''Transposes the weights to use the dense_resource matrix multiply routine''' - - def match(self, node): - node_matches = isinstance(node, (Dense, Conv1D, SeparableConv1D, Conv2D, SeparableConv2D, LSTM, GRU)) - is_resource_strategy = node.get_attr('strategy', '').lower() == 'resource' - already_transformed = node.get_attr('_weights_transposed', False) is True - - return node_matches and is_resource_strategy and not already_transformed - - def transform(self, model, node): - if isinstance(node, Dense): - node.weights['weight'].data = np.transpose(node.weights['weight'].data) - elif isinstance(node, Conv1D): - node.weights['weight'].data = np.transpose(node.weights['weight'].data, axes=[2, 0, 1]) # (W,C,F) => (F,W,C) - elif isinstance(node, SeparableConv1D): - node.weights['depthwise'].data = np.transpose( - node.weights['depthwise'].data, axes=[2, 0, 1] - ) # (W,C,F) => (F,W,C) - node.weights['pointwise'].data = np.transpose( - node.weights['pointwise'].data, axes=[2, 0, 1] - ) # (W,C,F) => (F,W,C) - elif isinstance(node, Conv2D): - node.weights['weight'].data = np.transpose( - node.weights['weight'].data, axes=[3, 0, 1, 2] - ) # (H,W,C,F) => (F,H,W,C) - elif isinstance(node, SeparableConv2D): - node.weights['depthwise'].data = np.transpose( - node.weights['depthwise'].data, axes=[3, 0, 1, 2] - ) # (H,W,C,F) => (F,H,W,C) - node.weights['pointwise'].data = np.transpose( - node.weights['pointwise'].data, axes=[3, 0, 1, 2] - ) # (H,W,C,F) => (F,H,W,C) - elif isinstance(node, (LSTM, GRU)): - node.weights['weight'].data = np.transpose(node.weights['weight'].data) - node.weights['recurrent_weight'].data = np.transpose(node.weights['recurrent_weight'].data) - else: - raise Exception(f'Unexpected layer {node.class_name} with resource strategy') - - node.set_attr('_weights_transposed', True) - - return False diff --git a/hls4ml/hls4ml/backends/vivado/passes/transform_types.py b/hls4ml/hls4ml/backends/vivado/passes/transform_types.py deleted file mode 100644 index 3462578..0000000 --- a/hls4ml/hls4ml/backends/vivado/passes/transform_types.py +++ /dev/null @@ -1,52 +0,0 @@ -from hls4ml.backends.fpga.fpga_types import ( - APTypeConverter, - HLSTypeConverter, - StaticWeightVariableConverter, - VivadoArrayVariableConverter, - VivadoInplaceArrayVariableConverter, - VivadoInplaceStreamVariableConverter, - VivadoStreamVariableConverter, -) -from hls4ml.model.optimizer import GlobalOptimizerPass -from hls4ml.model.types import InplaceTensorVariable - - -class TransformTypes(GlobalOptimizerPass): - def __init__(self): - self.type_converter = HLSTypeConverter(precision_converter=APTypeConverter()) - self.array_var_converter = VivadoArrayVariableConverter(type_converter=self.type_converter) - self.inplace_array_var_converter = VivadoInplaceArrayVariableConverter(type_converter=self.type_converter) - self.stream_var_converter = VivadoStreamVariableConverter(type_converter=self.type_converter) - self.inplace_stream_var_converter = VivadoInplaceStreamVariableConverter(type_converter=self.type_converter) - self.weight_var_converter = StaticWeightVariableConverter(type_converter=self.type_converter) - - def transform(self, model, node): - io_type = node.model.config.get_config_value('IOType') - - for out_name, var in node.variables.items(): - if io_type == 'io_stream': - if isinstance(var, InplaceTensorVariable): - new_var = self.inplace_stream_var_converter.convert(var) - else: - new_var = self.stream_var_converter.convert(var) - elif io_type == 'io_serial': - new_var = self.array_var_converter.convert(var, pragma='stream') - elif io_type == 'io_parallel': - if out_name in node.model.inputs: - new_var = self.array_var_converter.convert(var, pragma='reshape') - elif isinstance(var, InplaceTensorVariable): - new_var = self.inplace_array_var_converter.convert(var, pragma='') - else: - new_var = self.array_var_converter.convert(var, pragma='partition') - else: - raise Exception(f'Unknown IOType {io_type} in {node.name} ({node.__class__.__name__})') - - node.set_attr(out_name, new_var) - - for w_name, weight in node.weights.items(): - new_weight = self.weight_var_converter.convert(weight) - node.set_attr(w_name, new_weight) - - for t_name, type in node.types.items(): - new_type = self.type_converter.convert(type) - node.set_attr(t_name, new_type) diff --git a/hls4ml/hls4ml/backends/vivado/vivado_backend.py b/hls4ml/hls4ml/backends/vivado/vivado_backend.py deleted file mode 100644 index 011d576..0000000 --- a/hls4ml/hls4ml/backends/vivado/vivado_backend.py +++ /dev/null @@ -1,476 +0,0 @@ -import os -import sys - -import numpy as np - -from hls4ml.backends import FPGABackend -from hls4ml.backends.fpga.fpga_types import APTypeConverter, HLSTypeConverter, VivadoArrayVariableConverter -from hls4ml.model.attributes import ChoiceAttribute, ConfigurableAttribute, TypeAttribute -from hls4ml.model.flow import register_flow -from hls4ml.model.layers import ( - GRU, - LSTM, - Conv1D, - Conv2D, - Dense, - DepthwiseConv2D, - Embedding, - GarNet, - GarNetStack, - GlobalPooling1D, - GlobalPooling2D, - Layer, - Pooling1D, - Pooling2D, - SeparableConv1D, - SeparableConv2D, - SimpleRNN, - Softmax, -) -from hls4ml.model.optimizer import get_backend_passes, layer_optimizer -from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType, PackedType -from hls4ml.report import parse_vivado_report -from hls4ml.utils.fixed_point_utils import ceil_log2 - - -class VivadoBackend(FPGABackend): - def __init__(self): - super().__init__('Vivado') - self._register_layer_attributes() - self._register_flows() - - def _register_layer_attributes(self): - # Add RNN-specific attributes, recurrent_reuse_factor and static implementation - rnn_layers = [ - SimpleRNN, - LSTM, - GRU, - ] - - for layer in rnn_layers: - attrs = self.attribute_map.get(layer, []) - attrs.append(ConfigurableAttribute('recurrent_reuse_factor', default=1)) - attrs.append(ConfigurableAttribute('static', value_type=bool, default=True)) - attrs.append(ConfigurableAttribute('table_size', default=1024)) - attrs.append(TypeAttribute('table', default=FixedPrecisionType(18, 8))) - self.attribute_map[layer] = attrs - - # Add ParallelizationFactor to Conv1D/2D - pf_layers = [ - Conv1D, - Conv2D, - ] - - for layer in pf_layers: - attrs = self.attribute_map.get(layer, []) - attrs.append(ConfigurableAttribute('parallelization_factor', default=1)) - self.attribute_map[layer] = attrs - - # Add ConvImplementation to Convolution+Pooling layers - cnn_layers = [Conv1D, Conv2D, SeparableConv1D, SeparableConv2D, DepthwiseConv2D, Pooling1D, Pooling2D] - - for layer in cnn_layers: - attrs = self.attribute_map.get(layer, []) - attrs.append( - ChoiceAttribute('conv_implementation', choices=['LineBuffer', 'Encoded', 'Pointwise'], default='LineBuffer') - ) - self.attribute_map[layer] = attrs - - sep_conv_layers = [SeparableConv1D, SeparableConv2D] - for layer in sep_conv_layers: - attrs = self.attribute_map.get(layer, []) - attrs.append(TypeAttribute('dw_output', default=FixedPrecisionType(18, 8))) - self.attribute_map[layer] = attrs - - def _register_flows(self): - initializers = self._get_layer_initializers() - init_flow = register_flow('init_layers', initializers, requires=['optimize'], backend=self.name) - - streaming_passes = [ - 'vivado:reshape_stream', - 'vivado:clone_output', - 'vivado:insert_zero_padding_before_conv1d', - 'vivado:insert_zero_padding_before_conv2d', - 'vivado:broadcast_stream', - ] - streaming_flow = register_flow('streaming', streaming_passes, requires=[init_flow], backend=self.name) - - quantization_passes = [ - 'vivado:merge_batch_norm_quantized_tanh', - 'vivado:quantize_dense_output', - 'fuse_consecutive_batch_normalization', - 'vivado:xnor_pooling', - ] - quantization_flow = register_flow('quantization', quantization_passes, requires=[init_flow], backend=self.name) - - optimization_passes = [ - 'vivado:remove_final_reshape', - 'vivado:optimize_pointwise_conv', - 'vivado:inplace_parallel_reshape', - 'vivado:inplace_stream_flatten', - 'vivado:skip_softmax', - ] - optimization_flow = register_flow('optimize', optimization_passes, requires=[init_flow], backend=self.name) - - vivado_types = [ - 'vivado:transform_types', - 'vivado:register_bram_weights', - 'vivado:generate_conv_streaming_instructions', - 'vivado:apply_resource_strategy', - 'vivado:generate_conv_im2col', - ] - vivado_types_flow = register_flow('specific_types', vivado_types, requires=[init_flow], backend=self.name) - - templates = self._get_layer_templates() - template_flow = register_flow('apply_templates', self._get_layer_templates, requires=[init_flow], backend=self.name) - - writer_passes = ['make_stamp', 'vivado:write_hls'] - self._writer_flow = register_flow('write', writer_passes, requires=['vivado:ip'], backend=self.name) - - fifo_depth_opt_passes = [ - 'vivado:fifo_depth_optimization' - ] + writer_passes # After optimization, a new project will be written - - register_flow('fifo_depth_optimization', fifo_depth_opt_passes, requires=['vivado:ip'], backend=self.name) - - all_passes = get_backend_passes(self.name) - - extras = [ - # Ideally this should be empty - opt_pass - for opt_pass in all_passes - if opt_pass - not in initializers - + streaming_passes - + quantization_passes - + optimization_passes - + vivado_types - + templates - + writer_passes - + fifo_depth_opt_passes - ] - - if len(extras) > 0: - extras_flow = register_flow('extras', extras, requires=[init_flow], backend=self.name) - else: - extras_flow = None - - ip_flow_requirements = [ - 'optimize', - init_flow, - streaming_flow, - quantization_flow, - optimization_flow, - vivado_types_flow, - extras_flow, - template_flow, - ] - ip_flow_requirements = list(filter(None, ip_flow_requirements)) - - self._default_flow = register_flow('ip', None, requires=ip_flow_requirements, backend=self.name) - - def get_default_flow(self): - return self._default_flow - - def get_writer_flow(self): - return self._writer_flow - - def create_initial_config(self, part='xcku115-flvb2104-2-i', clock_period=5, io_type='io_parallel'): - config = {} - - config['Part'] = part if part is not None else 'xcku115-flvb2104-2-i' - config['ClockPeriod'] = clock_period - config['IOType'] = io_type - config['HLSConfig'] = {} - - return config - - def build( - self, - model, - reset=False, - csim=True, - synth=True, - cosim=False, - validation=False, - export=False, - vsynth=False, - fifo_opt=False, - ): - if 'linux' in sys.platform: - found = os.system('command -v vivado_hls > /dev/null') - if found != 0: - raise Exception('Vivado HLS installation not found. Make sure "vivado_hls" is on PATH.') - - curr_dir = os.getcwd() - os.chdir(model.config.get_output_dir()) - vivado_cmd = ( - f'vivado_hls -f build_prj.tcl "reset={reset} ' - f'csim={csim} ' - f'synth={synth} ' - f'cosim={cosim} ' - f'validation={validation} ' - f'export={export} ' - f'vsynth={vsynth} ' - f'fifo_opt={fifo_opt}"' - ) - os.system(vivado_cmd) - os.chdir(curr_dir) - - return parse_vivado_report(model.config.get_output_dir()) - - def _validate_conv_strategy(self, layer): - if layer.model.config.pipeline_style.lower() != 'dataflow': - print(f'WARNING: Layer {layer.name} requires "dataflow" pipeline style. Switching to "dataflow" pipeline style.') - layer.model.config.pipeline_style = 'dataflow' - - @layer_optimizer(Layer) - def init_base_layer(self, layer): - reuse_factor = layer.model.config.get_reuse_factor(layer) - layer.set_attr('reuse_factor', reuse_factor) - - target_cycles = layer.model.config.get_target_cycles(layer) - layer.set_attr('target_cycles', target_cycles) - - @layer_optimizer(Dense) - def init_dense(self, layer): - index_t = IntegerPrecisionType(width=1, signed=False) - compression = layer.model.config.get_compression(layer) - if layer.model.config.is_resource_strategy(layer): - n_in, n_out = self.get_layer_mult_size(layer) - self.set_target_reuse_factor(layer) - self.set_closest_reuse_factor(layer, n_in, n_out) - if compression: - layer.set_attr('strategy', 'compressed') - index_t = layer.get_weights('weight').type.index_precision - else: - layer.set_attr('strategy', 'resource') - else: - layer.set_attr('strategy', 'latency') - layer.set_attr('index_t', NamedType(f'layer{layer.index}_index', index_t)) - - # TODO consolidate these functions into a single `init_conv` - @layer_optimizer(Conv1D) - def init_conv1d(self, layer): - if len(layer.weights['weight'].data.shape) == 2: # This can happen if we assign weights of Dense layer to 1x1 Conv1D - layer.weights['weight'].data = np.expand_dims(layer.weights['weight'].data, axis=(0, 1)) - - if layer.model.config.is_resource_strategy(layer): - layer.set_attr('strategy', 'resource') - n_in, n_out = self.get_layer_mult_size(layer) - self.set_target_reuse_factor(layer) - self.set_closest_reuse_factor(layer, n_in, n_out) - else: - layer.set_attr('strategy', 'latency') - - out_width = layer.get_output_variable().shape[0] - chosen_pf = layer.model.config.get_layer_config_value(layer, 'ParallelizationFactor', 1) - valid_pf = self.get_valid_conv_partition_splits(1, out_width) - if chosen_pf not in valid_pf: - closest_pf = self.get_closest_reuse_factor(valid_pf, chosen_pf) - valid_pf_str = ','.join(map(str, valid_pf)) - print( - f'WARNING: Invalid ParallelizationFactor={chosen_pf} in layer "{layer.name}".' - f'Using ParallelizationFactor={closest_pf} instead. Valid ParallelizationFactor(s): {valid_pf_str}.' - ) - else: - closest_pf = chosen_pf - layer.set_attr('n_partitions', out_width // closest_pf) - - layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) - - self._validate_conv_strategy(layer) - - @layer_optimizer(SeparableConv1D) - def init_sepconv1d(self, layer): - if layer.model.config.is_resource_strategy(layer): - layer.set_attr('strategy', 'resource') - n_in, n_out = self.get_layer_mult_size(layer) - self.set_closest_reuse_factor(layer, n_in, n_out) - else: - layer.set_attr('strategy', 'latency') - - layer.set_attr( - 'n_partitions', 1 - ) # TODO Once we have SeparableConv implementation for io_parallel this should be set properly - layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) - - # Set the output type of the depthwise phase - dw_out_precision, _ = layer.model.config.get_precision(layer, 'dw_output') - dw_out_name = layer.name + '_dw_out_t' - if layer.model.config.get_config_value('IOType') == 'io_stream': - dw_output_t = PackedType(dw_out_name, dw_out_precision, layer.get_attr('n_chan'), n_pack=1) - else: - dw_output_t = NamedType(dw_out_name, dw_out_precision) - layer.set_attr('dw_output_t', dw_output_t) - - @layer_optimizer(Conv2D) - def init_conv2d(self, layer): - if len(layer.weights['weight'].data.shape) == 2: # This can happen if we assign weights of Dense layer to 1x1 Conv2D - layer.weights['weight'].data = np.expand_dims(layer.weights['weight'].data, axis=(0, 1)) - - if layer.model.config.is_resource_strategy(layer): - layer.set_attr('strategy', 'resource') - self.set_target_reuse_factor(layer) - n_in, n_out = self.get_layer_mult_size(layer) - self.set_closest_reuse_factor(layer, n_in, n_out) - else: - layer.set_attr('strategy', 'latency') - - out_height = layer.get_output_variable().shape[0] - out_width = layer.get_output_variable().shape[1] - chosen_pf = layer.model.config.get_layer_config_value(layer, 'ParallelizationFactor', 1) - valid_pf = self.get_valid_conv_partition_splits(out_height, out_width) - if chosen_pf not in valid_pf: - closest_pf = self.get_closest_reuse_factor(valid_pf, chosen_pf) - valid_pf_str = ','.join(map(str, valid_pf)) - print( - f'WARNING: Invalid ParallelizationFactor={chosen_pf} in layer "{layer.name}".' - f'Using ParallelizationFactor={closest_pf} instead. Valid ParallelizationFactor(s): {valid_pf_str}.' - ) - else: - closest_pf = chosen_pf - layer.set_attr('n_partitions', out_height * out_width // closest_pf) - - layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) - - self._validate_conv_strategy(layer) - - @layer_optimizer(SeparableConv2D) - def init_sepconv2d(self, layer): - if layer.model.config.is_resource_strategy(layer): - layer.set_attr('strategy', 'resource') - n_in, n_out = self.get_layer_mult_size(layer) - self.set_closest_reuse_factor(layer, n_in, n_out) - else: - layer.set_attr('strategy', 'latency') - - layer.set_attr( - 'n_partitions', 1 - ) # TODO Once we have SeparableConv implementation for io_parallel this should be set properly - layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) - - # Set the output type of the depthwise phase - dw_out_precision, _ = layer.model.config.get_precision(layer, 'dw_output') - dw_out_name = layer.name + '_dw_out_t' - if layer.model.config.get_config_value('IOType') == 'io_stream': - dw_output_t = PackedType(dw_out_name, dw_out_precision, layer.get_attr('n_chan'), n_pack=1) - else: - dw_output_t = NamedType(dw_out_name, dw_out_precision) - layer.set_attr('dw_output_t', dw_output_t) - - @layer_optimizer(DepthwiseConv2D) - def init_depconv2d(self, layer): - if layer.model.config.is_resource_strategy(layer): - layer.set_attr('strategy', 'resource') - n_in, n_out = self.get_layer_mult_size(layer) - self.set_closest_reuse_factor(layer, n_in, n_out) - else: - layer.set_attr('strategy', 'latency') - - layer.set_attr( - 'n_partitions', 1 - ) # TODO Once we have SeparableConv implementation for io_parallel this should be set properly - layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) - - def _set_pooling_accum_t(self, layer, pool_size): - extra_bits = ceil_log2(pool_size) - accum_t = layer.get_attr('accum_t') - accum_t.precision.fractional += extra_bits - accum_t.precision.integer += extra_bits - - @layer_optimizer(Pooling1D) - def init_pooling1d(self, layer): - pool_size = layer.get_attr('pool_width') - self._set_pooling_accum_t(layer, pool_size) - - layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) - - @layer_optimizer(Pooling2D) - def init_pooling2d(self, layer): - pool_size = layer.get_attr('pool_height') * layer.get_attr('pool_width') - self._set_pooling_accum_t(layer, pool_size) - - layer.set_attr('implementation', layer.model.config.get_conv_implementation(layer).lower()) - - @layer_optimizer(GlobalPooling1D) - def init_global_pooling1d(self, layer): - pool_size = layer.get_attr('n_in') - self._set_pooling_accum_t(layer, pool_size) - - @layer_optimizer(GlobalPooling2D) - def init_global_pooling2d(self, layer): - pool_size = layer.get_attr('in_height') * layer.get_attr('in_width') - self._set_pooling_accum_t(layer, pool_size) - - @layer_optimizer(Softmax) - def init_softmax(self, layer): - if layer.model.config.get_config_value('IOType') == 'io_parallel': - assert ( - len(layer.get_input_variable().shape) == 1 - ), 'Softmax with io_parallel strategy cannot be used on multidimensional tensors.' - - @layer_optimizer(Embedding) - def init_embed(self, layer): - if layer.attributes['n_in'] is None: - raise Exception('Input length of Embedding layer must be specified.') - - @layer_optimizer(LSTM) - def init_lstm(self, layer): - # TODO Allow getting recurrent reuse factor from the config - reuse_factor = layer.model.config.get_reuse_factor(layer) - layer.set_attr('recurrent_reuse_factor', reuse_factor) - - if layer.model.config.is_resource_strategy(layer): - n_in, n_out, n_in_recr, n_out_recr = self.get_layer_mult_size(layer) - self.set_closest_reuse_factor(layer, n_in, n_out) - self.set_closest_reuse_factor(layer, n_in_recr, n_out_recr, attribute='recurrent_reuse_factor') - layer.set_attr('strategy', 'resource') - else: - layer.set_attr('strategy', 'latency') - - layer.set_attr('index_t', NamedType(f'layer{layer.index}_index', IntegerPrecisionType(width=1, signed=False))) - - @layer_optimizer(GRU) - def init_gru(self, layer): - reuse_factor = layer.model.config.get_reuse_factor(layer) - layer.set_attr('recurrent_reuse_factor', reuse_factor) - - if layer.model.config.is_resource_strategy(layer): - n_in, n_out, n_in_recr, n_out_recr = self.get_layer_mult_size(layer) - self.set_closest_reuse_factor(layer, n_in, n_out) - self.set_closest_reuse_factor(layer, n_in_recr, n_out_recr, attribute='recurrent_reuse_factor') - layer.set_attr('strategy', 'resource') - else: - layer.set_attr('strategy', 'latency') - - layer.set_attr('index_t', NamedType(f'layer{layer.index}_index', IntegerPrecisionType(width=1, signed=False))) - - @layer_optimizer(GarNet) - def init_garnet(self, layer): - reuse_factor = layer.attributes['reuse_factor'] - - var_converter = VivadoArrayVariableConverter(type_converter=HLSTypeConverter(precision_converter=APTypeConverter())) - - # A bit controversial but we are going to set the partitioning of the input here - in_layer = layer.model.graph[layer.inputs[0]] - in_var = layer.get_input_variable(layer.inputs[0]) - partition_factor = in_var.shape[1] * (in_var.shape[0] // reuse_factor) - in_pragma = ('partition', 'cyclic', partition_factor) - new_in_var = var_converter.convert(in_var, pragma=in_pragma) - in_layer.set_attr(layer.inputs[0], new_in_var) - - if layer.attributes['collapse']: - out_pragma = 'partition' - else: - partition_factor = layer._output_features * (layer.attributes['n_vertices'] // reuse_factor) - out_pragma = ('partition', 'cyclic', partition_factor) - - out_name, out_var = next(iter(layer.variables.items())) - new_out_var = var_converter.convert(out_var, pragma=out_pragma) - - layer.set_attr(out_name, new_out_var) - - @layer_optimizer(GarNetStack) - def init_garnet_stack(self, layer): - self.init_garnet(layer) diff --git a/hls4ml/hls4ml/backends/vivado_accelerator/__init__.py b/hls4ml/hls4ml/backends/vivado_accelerator/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/backends/vivado_accelerator/passes/__init__.py b/hls4ml/hls4ml/backends/vivado_accelerator/passes/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/backends/vivado_accelerator/passes/fifo_depth_optimization.py b/hls4ml/hls4ml/backends/vivado_accelerator/passes/fifo_depth_optimization.py deleted file mode 100644 index 61db7eb..0000000 --- a/hls4ml/hls4ml/backends/vivado_accelerator/passes/fifo_depth_optimization.py +++ /dev/null @@ -1,69 +0,0 @@ -from hls4ml.backends.vivado.passes.fifo_depth_optimization import ( - generate_max_depth_file, - get_vcd_data, - populate_values, - set_big_fifos, - set_fifo_depth, -) -from hls4ml.model.optimizer.optimizer import ConfigurableOptimizerPass, ModelOptimizerPass - - -class FifoDepthOptimization(ConfigurableOptimizerPass, ModelOptimizerPass): - def __init__(self): - self.values = [] - - def transform(self, model): - # use `large_fifo_depth = 0` to keep the default fifo depth - profiling_fifo_depth = getattr(self, 'profiling_fifo_depth', 100_000) - - # check axi-stream or io-stream, if not one the 2 exit - if not ( - model.config.get_config_value('IOType') == 'io_stream' - or model.config.get_config_value('AcceleratorConfig')['Interface'] == 'axi_stream' - or model.config.get_config_value('AcceleratorConfig')['Interface'] == 'axi_master' - ): - raise Exception( - 'To use this optimization you have to set `IOType` field to `io_stream` in the HLS config ' - 'or `axi_stream` or `axi_master` in `AcceleratorConfig` interface field' - ) - - # initialize all the fifos to 10000 so that they will be automatically implemented in BRAMs and so they will be - # profiled - - if profiling_fifo_depth: - set_big_fifos(model.output_vars, profiling_fifo_depth) - - data = get_vcd_data(model) - - for i in range(1, len(data['children'][0]['children'][0]['children'])): - # wrapper fifos - populate_values( - self.values, - data['children'][0]['children'][0]['children'][i]['name'], - data['children'][0]['children'][0]['children'][i]['children'][0]['data'], - data['children'][0]['children'][0]['children'][i]['children'][1]['data'], - ) - - n_elem = len(data['children'][0]['children'][0]['children'][0]['children']) - for i in range(n_elem): - name = data['children'][0]['children'][0]['children'][0]['children'][i]['name'] - data_p = data['children'][0]['children'][0]['children'][0]['children'][i]['children'][0]['data'] - depth = data['children'][0]['children'][0]['children'][0]['children'][i]['children'][1]['data'] - populate_values(self.values, name, data_p, depth) - - maxs = [{'name': i['name'], 'max': i['max'], 'depth': i['depth']} for i in self.values] - - generate_max_depth_file(model, maxs) - - set_fifo_depth(model, maxs) - - inp = model.get_input_variables()[0] - out = model.get_output_variables()[0] - for x in maxs: - if 'in_local' in x['name']: - inp.pragma = (inp.pragma[0], x['max'] + 1) - elif 'out_local' in x['name']: - out.pragma = (out.pragma[0], x['max'] + 1) - - print('[hls4ml] - FIFO optimization completed') - return False diff --git a/hls4ml/hls4ml/backends/vivado_accelerator/supported_boards.json b/hls4ml/hls4ml/backends/vivado_accelerator/supported_boards.json deleted file mode 100644 index 1279ec2..0000000 --- a/hls4ml/hls4ml/backends/vivado_accelerator/supported_boards.json +++ /dev/null @@ -1,42 +0,0 @@ -{ - "pynq-z2": { - "part": "xc7z020clg400-1", - "tcl_scripts": {"axi_lite": "axi_lite_design.tcl", "axi_stream": "axi_stream_design.tcl"}, - "python_drivers": {"axi_stream": "axi_stream_driver.py"}, - "c_drivers": {} - }, - "zcu102": { - "part": "xczu9eg-ffvb1156-2-e", - "tcl_scripts": { "axi_stream": "axi_stream_design.tcl"}, - "python_drivers": {"axi_stream": "axi_stream_driver.py"}, - "c_drivers": {} - }, - "alveo-u50": { - "part": "xcu50-fsvh2104-2-e", - "tcl_scripts": {"axi_stream": "axi_stream_design.tcl"}, - "python_drivers": {"axi_stream": "axi_stream_driver.py"}, - "krnl_rtl_srcs": {"axi_stream": "krnl_rtl_src"}, - "c_drivers": {} - }, - "alveo-u250": { - "part": "xcu250-figd2104-2L-e", - "tcl_scripts": {"axi_stream": "axi_stream_design.tcl"}, - "python_drivers": {"axi_stream": "axi_stream_driver.py"}, - "krnl_rtl_srcs": {"axi_stream": "krnl_rtl_src"}, - "c_drivers": {} - }, - "alveo-u200": { - "part": "xcu200-fsgd2104-2-e", - "tcl_scripts": {"axi_stream": "axi_stream_design.tcl"}, - "python_drivers": {"axi_stream": "axi_stream_driver.py"}, - "krnl_rtl_srcs": {"axi_stream": "krnl_rtl_src"}, - "c_drivers": {} - }, - "alveo-u280": { - "part": "xcu280-fsvh2892-2L-e", - "tcl_scripts": {"axi_stream": "axi_stream_design.tcl"}, - "python_drivers": {"axi_stream": "axi_stream_driver.py"}, - "krnl_rtl_srcs": {"axi_stream": "krnl_rtl_src"}, - "c_drivers": {} - } -} diff --git a/hls4ml/hls4ml/backends/vivado_accelerator/vivado_accelerator_backend.py b/hls4ml/hls4ml/backends/vivado_accelerator/vivado_accelerator_backend.py deleted file mode 100644 index e3e93e1..0000000 --- a/hls4ml/hls4ml/backends/vivado_accelerator/vivado_accelerator_backend.py +++ /dev/null @@ -1,162 +0,0 @@ -import os - -from hls4ml.backends import VivadoBackend -from hls4ml.model.flow import register_flow -from hls4ml.report import parse_vivado_report - - -class VivadoAcceleratorBackend(VivadoBackend): - def __init__(self): - super(VivadoBackend, self).__init__(name='VivadoAccelerator') - self._register_layer_attributes() - self._register_flows() - - def build( - self, - model, - reset=False, - csim=True, - synth=True, - cosim=False, - validation=False, - export=False, - vsynth=False, - fifo_opt=False, - bitfile=False, - ): - # run the VivadoBackend build - super().build( - model, - reset=reset, - csim=csim, - synth=synth, - cosim=cosim, - validation=validation, - export=export, - vsynth=vsynth, - fifo_opt=fifo_opt, - ) - # Get Config to view Board and Platform - from hls4ml.backends import VivadoAcceleratorConfig - - vivado_accelerator_config = VivadoAcceleratorConfig( - model.config, model.get_input_variables(), model.get_output_variables() - ) - # now make a bitfile - if bitfile: - if vivado_accelerator_config.get_board().startswith('alveo'): - self.make_xclbin(model, vivado_accelerator_config.get_platform()) - else: - curr_dir = os.getcwd() - os.chdir(model.config.get_output_dir()) - try: - os.system('vivado -mode batch -source design.tcl') - except Exception: - print("Something went wrong, check the Vivado logs") - os.chdir(curr_dir) - - return parse_vivado_report(model.config.get_output_dir()) - - def make_xclbin(self, model, platform='xilinx_u250_xdma_201830_2'): - """Create the xclbin for the given model and target platform. - - Args: - model (ModelGraph): Compiled and build model. - platform (str, optional): Development/Deployment target platform, must be installed first. - The host machine only requires the deployment target platform. Refer to the Getting Started section of - the Alveo guide. Defaults to 'xilinx_u250_xdma_201830_2'. - """ - curr_dir = os.getcwd() - abs_path_dir = os.path.abspath(model.config.get_output_dir()) - os.chdir(abs_path_dir) - os.makedirs('xo_files', exist_ok=True) - try: - os.system('vivado -mode batch -source design.tcl') - except Exception: - print("Something went wrong, check the Vivado logs") - project_name = model.config.get_project_name() - ip_repo_path = abs_path_dir + '/' + project_name + '_prj' + '/solution1/impl/ip' - os.makedirs('xclbin_files', exist_ok=True) - os.chdir(abs_path_dir + '/xclbin_files') - # TODO Add other platforms - vitis_cmd = ( - "v++ -t hw --platform " - + platform - + " --link ../xo_files/" - + project_name - + "_kernel.xo -o'" - + project_name - + "_kernel.xclbin' --user_ip_repo_paths " - + ip_repo_path - ) - try: - os.system(vitis_cmd) - except Exception: - print("Something went wrong, check the Vitis/Vivado logs") - os.chdir(curr_dir) - - def create_initial_config( - self, - board='pynq-z2', - part=None, - clock_period=5, - io_type='io_parallel', - interface='axi_stream', - driver='python', - input_type='float', - output_type='float', - platform='xilinx_u250_xdma_201830_2', - ): - ''' - Create initial accelerator config with default parameters - - Args: - board: one of the keys defined in supported_boards.json - clock_period: clock period passed to hls project - io_type: io_parallel or io_stream - interface: `axi_stream`: generate hardware designs and drivers which exploit axi stream channels. - `axi_master`: generate hardware designs and drivers which exploit axi master channels. - `axi_lite` : generate hardware designs and drivers which exploit axi lite channels. (Don't use it - to exchange large amount of data) - driver: `python`: generates the python driver to use the accelerator in the PYNQ stack. - `c`: generates the c driver to use the accelerator bare-metal. - input_type: the wrapper input precision. Can be `float` or an `ap_type`. Note: VivadoAcceleratorBackend - will round the number of bits used to the next power-of-2 value. - output_type: the wrapper output precision. Can be `float` or an `ap_type`. Note: - VivadoAcceleratorBackend will round the number of bits used to the next power-of-2 value. - platform: development target platform - - Returns: - populated config - ''' - board = board if board is not None else 'pynq-z2' - config = super().create_initial_config(part, clock_period, io_type) - config['AcceleratorConfig'] = {} - config['AcceleratorConfig']['Board'] = board - config['AcceleratorConfig']['Interface'] = interface # axi_stream, axi_master, axi_lite - config['AcceleratorConfig']['Driver'] = driver - config['AcceleratorConfig']['Precision'] = {} - config['AcceleratorConfig']['Precision']['Input'] = {} - config['AcceleratorConfig']['Precision']['Output'] = {} - config['AcceleratorConfig']['Precision']['Input'] = input_type # float, double or ap_fixed - config['AcceleratorConfig']['Precision']['Output'] = output_type # float, double or ap_fixed - if board.startswith('alveo'): - config['AcceleratorConfig']['Platform'] = platform - - return config - - def get_default_flow(self): - return self._default_flow - - def get_writer_flow(self): - return self._writer_flow - - def _register_flows(self): - vivado_ip = 'vivado:ip' - writer_passes = ['make_stamp', 'vivadoaccelerator:write_hls'] - self._writer_flow = register_flow('write', writer_passes, requires=[vivado_ip], backend=self.name) - self._default_flow = vivado_ip - - fifo_depth_opt_passes = ['vivadoaccelerator:fifo_depth_optimization'] + writer_passes - - register_flow('fifo_depth_optimization', fifo_depth_opt_passes, requires=[vivado_ip], backend=self.name) diff --git a/hls4ml/hls4ml/backends/vivado_accelerator/vivado_accelerator_config.py b/hls4ml/hls4ml/backends/vivado_accelerator/vivado_accelerator_config.py deleted file mode 100644 index 7bd931e..0000000 --- a/hls4ml/hls4ml/backends/vivado_accelerator/vivado_accelerator_config.py +++ /dev/null @@ -1,162 +0,0 @@ -import json -import os - -import numpy as np - -from hls4ml.model.layers import FixedPrecisionType, IntegerPrecisionType - - -class VivadoAcceleratorConfig: - def __init__(self, config, model_inputs, model_outputs): - self.config = config.config - self.board = self.config.get('AcceleratorConfig', {}).get('Board', 'pynq-z2') - self.supported_boards = json.load(open(os.path.dirname(__file__) + '/supported_boards.json')) - if self.board in self.supported_boards.keys(): - board_info = self.supported_boards[self.board] - self.part = board_info['part'] - else: - raise Exception('The board does not appear in supported_boards.json file') - - if self.config.get('Part') is not None: - if self.config.get('Part') != self.part: - print( - 'WARNING: You set a Part that does not correspond to the Board you specified. The correct ' - 'Part is now set.' - ) - self.config['Part'] = self.part - accel_config = self.config.get('AcceleratorConfig', None) - if accel_config is not None: - prec = accel_config.get('Precision') - if prec is None: - raise Exception('Precision must be provided in the AcceleratorConfig') - else: - if prec.get('Input') is None or prec.get('Output') is None: - raise Exception('Input and Output fields must be provided in the AcceleratorConfig->Precision') - else: - accel_config = { - 'Precision': {'Input': 'float', 'Output': 'float'}, - 'Driver': 'python', - 'Interface': 'axi_stream', - } - config.config['AcceleratorConfig'] = accel_config - - self.interface = self.config['AcceleratorConfig'].get('Interface', 'axi_stream') # axi_stream, axi_master, axi_lite - self.driver = self.config['AcceleratorConfig'].get('Driver', 'python') # python or c - self.input_type = self.config['AcceleratorConfig']['Precision'].get( - 'Input', 'float' - ) # float, double or ap_fixed - self.output_type = self.config['AcceleratorConfig']['Precision'].get( - 'Output', 'float' - ) # float, double or ap_fixed - self.platform = self.config['AcceleratorConfig'].get( - 'Platform', 'xilinx_u250_xdma_201830_2' - ) # Get platform folder name - - assert ( - len(model_inputs) == 1 - ), "Only models with one input tensor are currently supported by VivadoAcceleratorBackend" - assert ( - len(model_outputs) == 1 - ), "Only models with one output tensor are currently supported by VivadoAcceleratorBackend" - self.inp = model_inputs[0] - self.out = model_outputs[0] - inp_axi_t = self.input_type - out_axi_t = self.output_type - - if inp_axi_t not in ['float', 'double']: - self.input_type = self._next_factor8_type(config.backend.convert_precision_string(inp_axi_t)) - if out_axi_t not in ['float', 'double']: - self.output_type = self._next_factor8_type(config.backend.convert_precision_string(out_axi_t)) - - if self.input_type == 'float': - self.input_bitwidth = 32 - elif self.input_type == 'double': - self.input_bitwidth = 64 - else: - self.input_bitwidth = config.backend.convert_precision_string(inp_axi_t).width - - if out_axi_t == 'float': - self.output_bitwidth = 32 - elif out_axi_t == 'double': - self.output_bitwidth = 64 - else: - self.output_bitwidth = config.backend.convert_precision_string(out_axi_t).width - - def _next_factor8_type(self, p): - '''Return a new type with the width rounded to the next factor of 8 up to p's width - Args: - p : IntegerPrecisionType or FixedPrecisionType - Returns: - An IntegerPrecisionType or FixedPrecisionType with the width rounder up to the next factor of 8 - of p's width. Other parameters (fractional bits, extra modes) stay the same. - ''' - W = p.width - newW = int(np.ceil(W / 8) * 8) - if isinstance(p, FixedPrecisionType): - return FixedPrecisionType(newW, p.integer, p.signed, p.rounding_mode, p.saturation_mode, p.saturation_bits) - elif isinstance(p, IntegerPrecisionType): - return IntegerPrecisionType(newW, p.signed) - - def get_io_bitwidth(self): - return self.input_bitwidth, self.output_bitwidth - - def get_corrected_types(self): - return self.input_type, self.output_type, self.inp, self.out - - def get_interface(self): - return self.interface - - def get_board_info(self, board=None): - if board is None: - board = self.board - if board in self.supported_boards.keys(): - return self.supported_boards[board] - else: - raise Exception('The board is still not supported') - - def get_part(self): - return self.part - - def get_driver(self): - return self.driver - - def get_board(self): - return self.board - - def get_platform(self): - return self.platform - - def get_clock_period(self): - return self.clock_period - - def get_driver_path(self): - if self.board.startswith('alveo'): - return '../templates/vivado_accelerator/' + 'alveo/' + self.driver + '_drivers/' + self.get_driver_file() - else: - return '../templates/vivado_accelerator/' + self.board + '/' + self.driver + '_drivers/' + self.get_driver_file() - - def get_driver_file(self): - driver_ext = '.py' if self.driver == 'python' else '.h' - return self.interface + '_driver' + driver_ext - - def get_krnl_rtl_src_dir(self): - return '../templates/vivado_accelerator/' + 'alveo/' + '/krnl_rtl_src' - - def get_input_type(self): - return self.input_type - - def get_output_type(self): - return self.output_type - - def get_tcl_file_path(self): - board_info = self.get_board_info(self.board) - tcl_scripts = board_info.get('tcl_scripts', None) - if tcl_scripts is None: - raise Exception('No tcl scripts definition available for the board in supported_board.json') - tcl_script = tcl_scripts.get(self.interface, None) - if tcl_script is None: - raise Exception('No tcl script definition available for the desired interface in supported_board.json') - if self.board.startswith('alveo'): - return '../templates/vivado_accelerator/' + 'alveo/' + '/tcl_scripts/' + tcl_script - else: - return '../templates/vivado_accelerator/' + self.board + '/tcl_scripts/' + tcl_script diff --git a/hls4ml/hls4ml/converters/__init__.py b/hls4ml/hls4ml/converters/__init__.py deleted file mode 100644 index b5d37ce..0000000 --- a/hls4ml/hls4ml/converters/__init__.py +++ /dev/null @@ -1,481 +0,0 @@ -import importlib -import os -import warnings - -import yaml - -from hls4ml.converters.keras_to_hls import KerasFileReader # noqa: F401 -from hls4ml.converters.keras_to_hls import KerasModelReader # noqa: F401 -from hls4ml.converters.keras_to_hls import KerasReader # noqa: F401 -from hls4ml.converters.keras_to_hls import get_supported_keras_layers # noqa: F401 -from hls4ml.converters.keras_to_hls import parse_keras_model # noqa: F401 -from hls4ml.converters.keras_to_hls import keras_to_hls, register_keras_layer_handler -from hls4ml.model import ModelGraph -from hls4ml.utils.config import create_config -from hls4ml.utils.symbolic_utils import LUTFunction - -# ----------Make converters available if the libraries can be imported----------# -try: - from hls4ml.converters.pytorch_to_hls import ( # noqa: F401 - get_supported_pytorch_layers, - pytorch_to_hls, - register_pytorch_layer_handler, - ) - - __pytorch_enabled__ = True -except ImportError: - warnings.warn("WARNING: Pytorch converter is not enabled!", stacklevel=1) - __pytorch_enabled__ = False - -try: - from hls4ml.converters.onnx_to_hls import get_supported_onnx_layers # noqa: F401 - from hls4ml.converters.onnx_to_hls import onnx_to_hls, register_onnx_layer_handler - - __onnx_enabled__ = True -except ImportError: - warnings.warn("WARNING: ONNX converter is not enabled!", stacklevel=1) - __onnx_enabled__ = False - -# ----------Layer handling register----------# -model_types = ['keras', 'pytorch', 'onnx'] - -for model_type in model_types: - for module in os.listdir(os.path.dirname(__file__) + f'/{model_type}'): - if module == '__init__.py' or module[-3:] != '.py': - continue - try: - lib = importlib.import_module(__name__ + f'.{model_type}.' + module[:-3]) - for _, func in list(lib.__dict__.items()): - # if 'func' is callable (i.e., function, class...) - # and has 'handles' attribute - # and is defined in this module (i.e., not imported) - if callable(func) and hasattr(func, 'handles') and func.__module__ == lib.__name__: - for layer in func.handles: - if model_type == 'keras': - register_keras_layer_handler(layer, func) - elif model_type == 'pytorch': - register_pytorch_layer_handler(layer, func) - elif model_type == 'onnx': - register_onnx_layer_handler(layer, func) - - except ImportError as err: - print(f'WARNING: Failed to import handlers from {module}: {err.msg}.') - continue - - -def parse_yaml_config(config_file): - """Parse conversion configuration from the provided YAML file. - - This function parses the conversion configuration contained in the YAML - file provided as an argument. It ensures proper serialization of hls4ml - objects and should be called on YAML files created by hls4ml. A minimal - valid YAML file may look like this:: - - KerasH5: my_keras_model.h5 - OutputDir: my-hls-test - ProjectName: myproject - Part: xcku115-flvb2104-2-i - ClockPeriod: 5 - IOType: io_stream - HLSConfig: - Model: - Precision: ap_fixed<16,6> - ReuseFactor: 10 - - Please refer to the docs for more examples of valid YAML configurations. - - Arguments: - config_file (str): Location of the file on the filesystem. - - Returns: - dict: Parsed configuration. - """ - - def construct_keras_model(loader, node): - from tensorflow.keras.models import load_model - - model_str = loader.construct_scalar(node) - return load_model(model_str) - - yaml.add_constructor('!keras_model', construct_keras_model, Loader=yaml.SafeLoader) - - print('Loading configuration from', config_file) - with open(config_file) as file: - parsed_config = yaml.safe_load(file) - return parsed_config - - -def convert_from_config(config): - """Convert to hls4ml model based on the provided configuration. - - Arguments: - config: A string containing the path to the YAML configuration file on - the filesystem or a dict containing the parsed configuration. - - Returns: - ModelGraph: hls4ml model. - """ - - if isinstance(config, str): - yamlConfig = parse_yaml_config(config) - else: - yamlConfig = config - - model = None - if 'OnnxModel' in yamlConfig: - if __onnx_enabled__: - model = onnx_to_hls(yamlConfig) - else: - raise Exception("ONNX not found. Please install ONNX.") - elif 'PytorchModel' in yamlConfig: - if __pytorch_enabled__: - model = pytorch_to_hls(yamlConfig) - else: - raise Exception("PyTorch not found. Please install PyTorch.") - else: - model = keras_to_hls(yamlConfig) - - return model - - -def _check_hls_config(config, hls_config): - """ - Check hls_config for to set appropriate parameters for config. - """ - - if 'LayerName' in hls_config: - config['HLSConfig']['LayerName'] = hls_config['LayerName'] - - if 'LayerType' in hls_config: - config['HLSConfig']['LayerType'] = hls_config['LayerType'] - - if 'Flows' in hls_config: - config['HLSConfig']['Flows'] = hls_config['Flows'] - - if 'Optimizers' in hls_config: - config['HLSConfig']['Optimizers'] = hls_config['Optimizers'] - - if 'SkipOptimizers' in hls_config: - config['HLSConfig']['SkipOptimizers'] = hls_config['SkipOptimizers'] - - return - - -def _check_model_config(model_config): - if model_config is not None: - if not all(k in model_config for k in ('Precision', 'ReuseFactor')): - raise Exception('Precision and ReuseFactor must be provided in the hls_config') - else: - model_config = {} - model_config['Precision'] = 'ap_fixed<16,6>' - model_config['ReuseFactor'] = 1 - - return model_config - - -def convert_from_keras_model( - model, - output_dir='my-hls-test', - project_name='myproject', - input_data_tb=None, - output_data_tb=None, - backend='Vivado', - hls_config=None, - **kwargs, -): - """Convert Keras model to hls4ml model based on the provided configuration. - - Args: - model: Keras model to convert - output_dir (str, optional): Output directory of the generated HLS - project. Defaults to 'my-hls-test'. - project_name (str, optional): Name of the HLS project. - Defaults to 'myproject'. - input_data_tb (str, optional): String representing the path of input data in .npy or .dat format that will be - used during csim and cosim. - output_data_tb (str, optional): String representing the path of output data in .npy or .dat format that will be - used during csim and cosim. - backend (str, optional): Name of the backend to use, e.g., 'Vivado' - or 'Quartus'. - board (str, optional): One of target boards specified in `supported_board.json` file. If set to `None` a default - device of a backend will be used. See documentation of the backend used. - part (str, optional): The FPGA part. If set to `None` a default part of a backend will be used. - See documentation of the backend used. Note that if `board` is specified, the part associated to that board - will overwrite any part passed as a parameter. - clock_period (int, optional): Clock period of the design. - Defaults to 5. - io_type (str, optional): Type of implementation used. One of - 'io_parallel' or 'io_stream'. Defaults to 'io_parallel'. - hls_config (dict, optional): The HLS config. - kwargs** (dict, optional): Additional parameters that will be used to create the config of the specified backend - - Raises: - Exception: If precision and reuse factor are not present in 'hls_config'. - - Returns: - ModelGraph: hls4ml model. - """ - - config = create_config(output_dir=output_dir, project_name=project_name, backend=backend, **kwargs) - - config['KerasModel'] = model - config['InputData'] = input_data_tb - config['OutputPredictions'] = output_data_tb - config['HLSConfig'] = {} - - if hls_config is None: - hls_config = {} - - model_config = hls_config.get('Model', None) - config['HLSConfig']['Model'] = _check_model_config(model_config) - - _check_hls_config(config, hls_config) - - return keras_to_hls(config) - - -def convert_from_pytorch_model( - model, - input_shape, - output_dir='my-hls-test', - project_name='myproject', - input_data_tb=None, - output_data_tb=None, - backend='Vivado', - hls_config=None, - **kwargs, -): - """Convert PyTorch model to hls4ml model based on the provided configuration. - - Args: - model: PyTorch model to convert. - input_shape (list): The shape of the input tensor. First element is the batch size, needs to be None - output_dir (str, optional): Output directory of the generated HLS project. Defaults to 'my-hls-test'. - project_name (str, optional): Name of the HLS project. Defaults to 'myproject'. - input_data_tb (str, optional): String representing the path of input data in .npy or .dat format that will be - used during csim and cosim. Defaults to None. - output_data_tb (str, optional): String representing the path of output data in .npy or .dat format that will be - used during csim and cosim. Defaults to None. - backend (str, optional): Name of the backend to use, e.g., 'Vivado' or 'Quartus'. Defaults to 'Vivado'. - board (str, optional): One of target boards specified in `supported_board.json` file. If set to `None` a default - device of a backend will be used. See documentation of the backend used. - part (str, optional): The FPGA part. If set to `None` a default part of a backend will be used. - See documentation of the backend used. Note that if `board` is specified, the part associated to that board - will overwrite any part passed as a parameter. - clock_period (int, optional): Clock period of the design. - Defaults to 5. - io_type (str, optional): Type of implementation used. One of - 'io_parallel' or 'io_stream'. Defaults to 'io_parallel'. - hls_config (dict, optional): The HLS config. - kwargs** (dict, optional): Additional parameters that will be used to create the config of the specified backend. - - Raises: - Exception: If precision and reuse factor are not present in 'hls_config'. - - Notes: - Pytorch uses the "channels_first" data format for its tensors, while hls4ml expects the "channels_last" format - used by keras. By default, hls4ml will automatically add layers to the model which transpose the inputs to the - "channels_last"format. Not that this is not supported for the "io_stream" io_type, for which the user will have - to transpose the input by hand before passing it to hls4ml. In that case the "inputs_channel_last" argument of - the "config_from_pytorch_model" function needs to be set to True. By default, the output of the model remains - in the "channels_last" data format. The "transpose_outputs" argument of the "config_from_pytorch_model" can be - used to add a layer to the model that transposes back to "channels_first". As before, this will not work for - io_stream. - - Returns: - ModelGraph: hls4ml model. - """ - - config = create_config(output_dir=output_dir, project_name=project_name, backend=backend, **kwargs) - - config['PytorchModel'] = model - config['InputShape'] = input_shape - config['InputData'] = input_data_tb - config['OutputPredictions'] = output_data_tb - config['HLSConfig'] = {} - - if hls_config is None: - hls_config = {} - - model_config = hls_config.get('Model', None) - config['HLSConfig']['Model'] = _check_model_config(model_config) - - _check_hls_config(config, hls_config) - - return pytorch_to_hls(config) - - -def convert_from_onnx_model( - model, - output_dir='my-hls-test', - project_name='myproject', - input_data_tb=None, - output_data_tb=None, - backend='Vivado', - hls_config=None, - **kwargs, -): - """Convert Keras model to hls4ml model based on the provided configuration. - - Args: - model: ONNX model to convert. - output_dir (str, optional): Output directory of the generated HLS - project. Defaults to 'my-hls-test'. - project_name (str, optional): Name of the HLS project. - Defaults to 'myproject'. - input_data_tb (str, optional): String representing the path of input data in .npy or .dat format that will be - used during csim and cosim. - output_data_tb (str, optional): String representing the path of output data in .npy or .dat format that will be - used during csim and cosim. - backend (str, optional): Name of the backend to use, e.g., 'Vivado' - or 'Quartus'. - board (str, optional): One of target boards specified in `supported_board.json` file. If set to `None` a default - device of a backend will be used. See documentation of the backend used. - part (str, optional): The FPGA part. If set to `None` a default part of a backend will be used. - See documentation of the backend used. Note that if `board` is specified, the part associated to that board - will overwrite any part passed as a parameter. - clock_period (int, optional): Clock period of the design. - Defaults to 5. - io_type (str, optional): Type of implementation used. One of - 'io_parallel' or 'io_stream'. Defaults to 'io_parallel'. - hls_config (dict, optional): The HLS config. - kwargs** (dict, optional): Additional parameters that will be used to create the config of the specified backend - - Raises: - Exception: If precision and reuse factor are not present in 'hls_config'. - - Returns: - ModelGraph: hls4ml model. - """ - - config = create_config(output_dir=output_dir, project_name=project_name, backend=backend, **kwargs) - - config['OnnxModel'] = model - config['InputData'] = input_data_tb - config['OutputPredictions'] = output_data_tb - config['HLSConfig'] = {} - - if hls_config is None: - hls_config = {} - - model_config = hls_config.get('Model', None) - config['HLSConfig']['Model'] = _check_model_config(model_config) - - _check_hls_config(config, hls_config) - - return onnx_to_hls(config) - - -def convert_from_symbolic_expression( - expr, - n_symbols=None, - lut_functions=None, - use_built_in_lut_functions=False, - output_dir='my-hls-test', - project_name='myproject', - input_data_tb=None, - output_data_tb=None, - precision='ap_fixed<16,6>', - **kwargs, -): - """Converts a given (SymPy or string) expression to hls4ml model. - - Args: - expr (str or sympy.Expr): Expression to convert. The variables in the expression should be in the form of - ``x0, x1, x2, ...``. - n_symbols (int, optional): Number of symbols in the expression. If not provided, the largest index of the variable - will be used as the number of symbols. Useful if number of inputs differs from the number of variables used - in the expression. Defaults to None. - lut_functions (dict, optional): LUT function definitions. Defaults to None. - The dictionary should have the form of:: - - { - '': { - 'math_func': '', - 'table_size': , - 'range_start': , - 'range_end': , - } - } - - where ```` is a given name that can be used with PySR, ```` is the math function to - approximate (`sin`, `cos`, `log`,...), ```` is the size of the lookup table, and ```` and - ```` are the ranges in which the function will be approximated. It is **strongly** recommended to use a - power-of-two as a range. - use_built_in_lut_functions (bool, optional): Use built-in sin/cos LUT functions. Defaults to False. - output_dir (str, optional): Output directory of the generated HLS - project. Defaults to 'my-hls-test'. - project_name (str, optional): Name of the HLS project. - Defaults to 'myproject'. - input_data_tb (str, optional): String representing the path of input data in .npy or .dat format that will be - used during csim and cosim. - output_data_tb (str, optional): String representing the path of output data in .npy or .dat format that will be - used during csim and cosim. - precision (str, optional): Precision to use. Defaults to 'ap_fixed<16,6>'. - part (str, optional): The FPGA part. If set to `None` a default part of a backend will be used. - clock_period (int, optional): Clock period of the design. - Defaults to 5. - compiler (str, optional): Compiler to use, ``vivado_hls`` or ``vitis_hls``. Defaults to ``vivado_hls``. - hls_include_path (str, optional): Path to HLS inlcude files. If `None` the location will be inferred from the - location of the `compiler` used. If an empty string is passed the HLS math libraries won't be used during - compilation, meaning Python integration won't work unless all functions are LUT-based. Doesn't affect synthesis. - Defaults to None. - hls_libs_path (str, optional): Path to HLS libs files. If `None` the location will be inferred from the - location of the `compiler` used. Defaults to None. - - Returns: - ModelGraph: hls4ml model. - """ - import sympy - - if not isinstance(expr, (list, set)): - expr = [expr] - for i, e in enumerate(expr): - if not isinstance(e, sympy.Expr): - expr[i] = sympy.parsing.sympy_parser.parse_expr(e) - - if n_symbols is None: - n_symbols = 0 - for e in expr: - symbols = max([int(d.name.replace('x', '')) for d in e.free_symbols]) + 1 - if symbols > n_symbols: - n_symbols = symbols - - if lut_functions is None: - lut_functions = [] - else: - if isinstance(lut_functions, dict): - lut_functions = [ - LUTFunction(name, params['math_func'], params['range_start'], params['range_end'], params['table_size']) - for name, params in lut_functions.items() - ] - - layer_list = [] - - input_layer = {} - input_layer['name'] = 'x' - input_layer['class_name'] = 'InputLayer' - input_layer['input_shape'] = [n_symbols] - layer_list.append(input_layer) - - expr_layer = {} - expr_layer['name'] = 'expr1' - expr_layer['class_name'] = 'SymbolicExpression' - expr_layer['expression'] = [str(e) for e in expr] - expr_layer['n_symbols'] = n_symbols - expr_layer['lut_functions'] = lut_functions - expr_layer['use_built_in_luts'] = use_built_in_lut_functions - layer_list.append(expr_layer) - - config = create_config(output_dir=output_dir, project_name=project_name, backend='SymbolicExpression', **kwargs) - - # config['Expression'] = str(expr) - config['NSymbols'] = n_symbols - config['InputData'] = input_data_tb - config['OutputPredictions'] = output_data_tb - - config['HLSConfig'] = {'Model': {'Precision': precision, 'ReuseFactor': 1}} - - hls_model = ModelGraph(config, layer_list) - - return hls_model diff --git a/hls4ml/hls4ml/converters/keras/__init__.py b/hls4ml/hls4ml/converters/keras/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/converters/keras/convolution.py b/hls4ml/hls4ml/converters/keras/convolution.py deleted file mode 100644 index 39780f6..0000000 --- a/hls4ml/hls4ml/converters/keras/convolution.py +++ /dev/null @@ -1,95 +0,0 @@ -from hls4ml.converters.keras_to_hls import get_weights_data, keras_handler, parse_default_keras_layer -from hls4ml.converters.utils import compute_padding_1d, compute_padding_2d, parse_data_format - - -@keras_handler('Conv1D', 'SeparableConv1D', 'DepthwiseConv1D') -def parse_conv1d_layer(keras_layer, input_names, input_shapes, data_reader): - assert 'Conv1D' in keras_layer['class_name'] - - layer = parse_default_keras_layer(keras_layer, input_names) - - (layer['in_width'], layer['n_chan']) = parse_data_format(input_shapes[0], layer['data_format']) - - if layer['class_name'] in ['Conv1D', 'QConv1D']: - layer['weight_data'] = get_weights_data(data_reader, layer['name'], 'kernel') - elif layer['class_name'] in ['SeparableConv1D', 'QSeparableConv1D']: - layer['depthwise_data'], layer['pointwise_data'] = get_weights_data( - data_reader, layer['name'], ['depthwise_kernel', 'pointwise_kernel'] - ) - else: # DepthwiseConv1D - layer['depthwise_data'] = get_weights_data(data_reader, layer['name'], 'depthwise_kernel') - - layer['bias_data'] = get_weights_data(data_reader, layer['name'], 'bias') - - if 'filters' in keras_layer['config']: - layer['n_filt'] = keras_layer['config']['filters'] - else: - layer['n_filt'] = layer['n_chan'] - layer['filt_width'] = keras_layer['config']['kernel_size'][0] - layer['stride_width'] = keras_layer['config']['strides'][0] - layer['padding'] = keras_layer['config']['padding'] - - (layer['out_width'], layer['pad_left'], layer['pad_right']) = compute_padding_1d( - layer['padding'], layer['in_width'], layer['stride_width'], layer['filt_width'] - ) - - if layer['data_format'] == 'channels_last': - output_shape = [input_shapes[0][0], layer['out_width'], layer['n_filt']] - elif layer['data_format'] == 'channels_first': - output_shape = [input_shapes[0][0], layer['n_filt'], layer['out_width']] - - return layer, output_shape - - -@keras_handler('Conv2D', 'SeparableConv2D', 'DepthwiseConv2D') -def parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader): - assert 'Conv2D' in keras_layer['class_name'] - - layer = parse_default_keras_layer(keras_layer, input_names) - - (layer['in_height'], layer['in_width'], layer['n_chan']) = parse_data_format(input_shapes[0], layer['data_format']) - - if layer['class_name'] in ['Conv2D', 'QConv2D', 'QConv2DBatchnorm']: - layer['weight_data'] = get_weights_data(data_reader, layer['name'], 'kernel') - elif layer['class_name'] in ['SeparableConv2D', 'QSeparableConv2D']: - layer['depthwise_data'], layer['pointwise_data'] = get_weights_data( - data_reader, layer['name'], ['depthwise_kernel', 'pointwise_kernel'] - ) - else: # DepthwiseConv2D - layer['depthwise_data'] = get_weights_data(data_reader, layer['name'], 'depthwise_kernel') - - layer['bias_data'] = get_weights_data(data_reader, layer['name'], 'bias') - - if 'filters' in keras_layer['config']: - layer['n_filt'] = keras_layer['config']['filters'] - else: - layer['n_filt'] = layer['n_chan'] - layer['filt_height'] = keras_layer['config']['kernel_size'][0] - layer['filt_width'] = keras_layer['config']['kernel_size'][1] - layer['stride_height'] = keras_layer['config']['strides'][0] - layer['stride_width'] = keras_layer['config']['strides'][1] - layer['padding'] = keras_layer['config']['padding'] - - ( - layer['out_height'], - layer['out_width'], - layer['pad_top'], - layer['pad_bottom'], - layer['pad_left'], - layer['pad_right'], - ) = compute_padding_2d( - layer['padding'], - layer['in_height'], - layer['in_width'], - layer['stride_height'], - layer['stride_width'], - layer['filt_height'], - layer['filt_width'], - ) - - if layer['data_format'] == 'channels_first': - output_shape = [input_shapes[0][0], layer['n_filt'], layer['out_height'], layer['out_width']] - else: - output_shape = [input_shapes[0][0], layer['out_height'], layer['out_width'], layer['n_filt']] - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/keras/core.py b/hls4ml/hls4ml/converters/keras/core.py deleted file mode 100644 index f6119c0..0000000 --- a/hls4ml/hls4ml/converters/keras/core.py +++ /dev/null @@ -1,136 +0,0 @@ -from hls4ml.converters.keras_to_hls import get_weights_data, keras_handler, parse_default_keras_layer -from hls4ml.model.types import BinaryQuantizer, IntegerPrecisionType, TernaryQuantizer - - -@keras_handler('InputLayer') -def parse_input_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer['class_name'] == 'InputLayer' - - layer = parse_default_keras_layer(keras_layer, input_names) - - layer['input_shape'] = keras_layer['config']['batch_input_shape'][1:] - - dtype = keras_layer['config']['dtype'] - if dtype.startswith('int') or dtype.startswith('uint'): - layer['type_name'] = 'integer_input_t' - width = int(dtype[dtype.index('int') + 3 :]) - signed = not dtype.startswith('u') - layer['precision'] = IntegerPrecisionType(width=width, signed=signed) - # elif bool, q[u]int, ... - - output_shape = keras_layer['config']['batch_input_shape'] - - return layer, output_shape - - -dense_layers = ['Dense', 'BinaryDense', 'TernaryDense'] - - -@keras_handler(*dense_layers) -def parse_dense_layer(keras_layer, input_names, input_shapes, data_reader): - assert 'Dense' in keras_layer['class_name'] - - layer = parse_default_keras_layer(keras_layer, input_names) - - layer['weight_data'], layer['bias_data'] = get_weights_data(data_reader, layer['name'], ['kernel', 'bias']) - layer['n_in'] = layer['weight_data'].shape[0] - layer['n_out'] = layer['weight_data'].shape[1] - if 'Binary' in layer['class_name']: - layer['weight_quantizer'] = BinaryQuantizer(bits=2) - layer['bias_quantizer'] = BinaryQuantizer(bits=2) - elif 'Ternary' in layer['class_name']: - layer['weight_quantizer'] = TernaryQuantizer() - layer['bias_quantizer'] = TernaryQuantizer() - else: - layer['weight_quantizer'] = None - layer['bias_quantizer'] = None - output_shape = input_shapes[0][:] - output_shape[-1] = layer['n_out'] - - return layer, output_shape - - -activation_layers = ['Activation', 'LeakyReLU', 'ThresholdedReLU', 'ELU', 'PReLU', 'Softmax', 'ReLU'] - - -@keras_handler(*activation_layers) -def parse_activation_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer['class_name'] in activation_layers - - layer = parse_default_keras_layer(keras_layer, input_names) - - if layer['class_name'] != 'Activation': - layer['activation'] = layer['class_name'] - if layer['class_name'] == 'LeakyReLU': - layer['activ_param'] = keras_layer['config'].get('alpha', 0.3) - elif layer['class_name'] == 'ThresholdedReLU': - layer['activ_param'] = keras_layer['config'].get('theta', 1.0) - elif layer['class_name'] == 'ELU': - layer['activ_param'] = keras_layer['config'].get('alpha', 1.0) - elif layer['class_name'] == 'ReLU': - layer['class_name'] = 'Activation' - elif layer['class_name'] == 'PReLU': - layer['alpha_data'] = get_weights_data(data_reader, layer['name'], 'alpha') - - if layer['class_name'] == 'Activation' and layer['activation'] == 'softmax': - layer['class_name'] = 'Softmax' - if layer['class_name'] == 'Activation' and layer['activation'] == 'hard_sigmoid': - layer['class_name'] = 'HardActivation' - if layer['class_name'] == 'Softmax': - layer['axis'] = keras_layer['config'].get('axis', -1) - - return layer, [shape for shape in input_shapes[0]] - - -@keras_handler('BatchNormalization') -def parse_batchnorm_layer(keras_layer, input_names, input_shapes, data_reader): - assert 'BatchNormalization' in keras_layer['class_name'] or 'QConv2DBatchnorm' in keras_layer['class_name'] - - layer = parse_default_keras_layer(keras_layer, input_names) - - in_size = 1 - for dim in input_shapes[0][1:]: - in_size *= dim - layer['n_in'] = in_size - layer['n_out'] = layer['n_in'] - if len(input_shapes[0]) == 2: - layer['n_filt'] = -1 - elif len(input_shapes[0]) == 3: - layer['n_filt'] = input_shapes[0][2] - elif len(input_shapes[0]) == 4: - layer['n_filt'] = input_shapes[0][3] - - layer['use_gamma'] = keras_layer['config']['scale'] - if layer['use_gamma']: - layer['gamma_data'] = get_weights_data(data_reader, layer['name'], 'gamma') - else: - layer['gamma_data'] = 1 - - layer['use_beta'] = keras_layer['config']['center'] - if layer['use_beta']: - layer['beta_data'] = get_weights_data(data_reader, layer['name'], 'beta') - else: - layer['beta_data'] = 0 - - layer['mean_data'], layer['variance_data'] = get_weights_data( - data_reader, layer['name'], ['moving_mean', 'moving_variance'] - ) - - return layer, [shape for shape in input_shapes[0]] - - -@keras_handler('Embedding') -def parse_embedding_layer(keras_layer, input_names, input_shapes, data_reader): - assert 'Embedding' in keras_layer['class_name'] - - layer = parse_default_keras_layer(keras_layer, input_names) - - layer['n_in'] = input_shapes[0][1] - layer['vocab_size'] = keras_layer['config']['input_dim'] - layer['n_out'] = keras_layer['config']['output_dim'] - - layer['embeddings_data'] = get_weights_data(data_reader, layer['name'], 'embeddings') - - output_shape = input_shapes[0] + [layer['n_out']] - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/keras/graph.py b/hls4ml/hls4ml/converters/keras/graph.py deleted file mode 100644 index 5c5c224..0000000 --- a/hls4ml/hls4ml/converters/keras/graph.py +++ /dev/null @@ -1,71 +0,0 @@ -from hls4ml.converters.keras.core import TernaryQuantizer -from hls4ml.converters.keras_to_hls import get_weights_data, keras_handler, parse_default_keras_layer - - -@keras_handler('GarNet', 'GarNetStack') -def parse_garnet_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer['class_name'] in ['GarNet', 'GarNetStack'] - - if not keras_layer['config']['simplified']: - raise Exception('HLS GarNet is compatible only with keras GarNet with simplified=True') - if keras_layer['config']['output_activation'] not in [None, 'linear']: - raise Exception('HLS GarNet cannot have nonlinear output activation') - - layer = parse_default_keras_layer(keras_layer, input_names) - - layer['input_format'] = keras_layer['config']['input_format'] - if layer['input_format'] != 'xn': - raise NotImplementedError('HLS GarNet currently only implements signed inputs (input_format="xn")') - - layer['n_vertices'] = input_shapes[0][1] - layer['collapse'] = keras_layer['config']['collapse'] - layer['mean_by_nvert'] = keras_layer['config']['mean_by_nvert'] - if keras_layer['config']['quantize_transforms']: - layer['quantizer'] = TernaryQuantizer() - - layer['n_aggregators'] = keras_layer['config']['n_aggregators'] - layer['n_out_features'] = keras_layer['config']['n_filters'] # number of output features - layer['n_propagate'] = keras_layer['config']['n_propagate'] # number of latent features - - if layer['class_name'] == 'GarNet': - layer['n_in_features'] = input_shapes[0][2] - n_out_features = layer['n_out_features'] - - weights_source = [ - 'FLR_kernel', - 'FLR_bias', - 'S_kernel', - 'S_bias', - 'Fout_kernel', - 'Fout_bias', - ] - for weight in weights_source: - layer[weight + '_data'] = get_weights_data(data_reader, layer['name'], weight) - - elif layer['class_name'] == 'GarNetStack': - layer['n_sublayers'] = keras_layer['config']['n_sublayers'] - layer['n_in_features'] = [input_shapes[0][2]] - - for il in range(layer['n_sublayers']): - if il > 0: - layer['n_in_features'].append(layer['n_out_features'][il - 1]) - - weights_source = [ - f'FLR{il}_kernel', - f'FLR{il}_bias', - f'S{il}_kernel', - f'S{il}_bias', - f'Fout{il}_kernel', - f'Fout{il}_bias', - ] - for weight in weights_source: - layer[weight + '_data'] = get_weights_data(data_reader, layer['name'], weight) - - n_out_features = layer['n_out_features'][-1] - - if layer['collapse'] in ['mean', 'sum', 'max']: - output_shape = [input_shapes[0][0], n_out_features] - else: - output_shape = input_shapes[0][:2] + [n_out_features] - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/keras/merge.py b/hls4ml/hls4ml/converters/keras/merge.py deleted file mode 100644 index 1423308..0000000 --- a/hls4ml/hls4ml/converters/keras/merge.py +++ /dev/null @@ -1,32 +0,0 @@ -from hls4ml.converters.keras_to_hls import keras_handler, parse_default_keras_layer - -merge_layers = ['Add', 'Subtract', 'Multiply', 'Average', 'Maximum', 'Minimum', 'Concatenate', 'Dot'] - - -@keras_handler(*merge_layers) -def parse_merge_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer['class_name'] in merge_layers - - layer = parse_default_keras_layer(keras_layer, input_names) - - layer['op'] = layer['class_name'].lower() - - output_shape = input_shapes[0][:] - if layer['class_name'] == 'Concatenate': - rank = len(input_shapes[0][1:]) - if rank > 3: - raise Exception('ERROR: Concatenation of tensors with rank > 3 is not yet supported.') - layer['op'] = layer['class_name'].lower() + f'{rank}d' - layer['axis'] = keras_layer['config']['axis'] - output_shape[layer['axis']] += input_shapes[1][layer['axis']] - elif layer['class_name'] == 'Dot': - rank = len(input_shapes[0][1:]) - if rank > 1: - raise Exception('ERROR: Dot of tensors with rank > 1 is not yet supported.') - layer['op'] = layer['class_name'].lower() + f'{rank}d' - else: - layer['class_name'] = 'Merge' - if len(layer['inputs']) > 2: - raise Exception('ERROR: Merging more than two tensors is not yet supported.') - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/keras/model.py b/hls4ml/hls4ml/converters/keras/model.py deleted file mode 100644 index 3f22907..0000000 --- a/hls4ml/hls4ml/converters/keras/model.py +++ /dev/null @@ -1,45 +0,0 @@ -from hls4ml.converters.keras_to_hls import ( - KerasFileReader, - KerasModelReader, - KerasNestedFileReader, - keras_handler, - parse_default_keras_layer, - parse_keras_model, -) - -model_layers = ['Sequential', 'Functional'] - - -@keras_handler(*model_layers) -def parse_model_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer['class_name'] in model_layers - - layer = parse_default_keras_layer(keras_layer, input_names) - layer['class_name'] = 'LayerGroup' - - if isinstance(data_reader, KerasNestedFileReader): - # In the .h5 file, the paths don't go more than one level deep - nested_path = data_reader.nested_path - else: - nested_path = layer['name'] - - if isinstance(data_reader, KerasFileReader): - nested_reader = KerasNestedFileReader(data_reader, nested_path) - else: - nested_reader = KerasModelReader(data_reader.model.get_layer(layer['name'])) - - layer_list, input_layers, output_layers, output_shapes = parse_keras_model(keras_layer, nested_reader) - - if output_layers is None: - last_layer = layer_list[-1]['name'] - else: - last_layer = output_layers[0] - output_shape = output_shapes[last_layer] - - layer['layer_list'] = layer_list - layer['input_layers'] = input_layers if input_layers is not None else [] - layer['output_layers'] = output_layers if output_layers is not None else [] - layer['data_reader'] = nested_reader - layer['output_shape'] = output_shape - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/keras/pooling.py b/hls4ml/hls4ml/converters/keras/pooling.py deleted file mode 100644 index f0e0024..0000000 --- a/hls4ml/hls4ml/converters/keras/pooling.py +++ /dev/null @@ -1,93 +0,0 @@ -from hls4ml.converters.keras_to_hls import keras_handler, parse_default_keras_layer -from hls4ml.converters.utils import compute_padding_1d, compute_padding_2d, parse_data_format - -pooling_layers = ['MaxPooling1D', 'MaxPooling2D', 'AveragePooling1D', 'AveragePooling2D'] - - -@keras_handler(*pooling_layers) -def parse_pooling_layer(keras_layer, input_names, input_shapes, data_reader): - assert 'Pooling' in keras_layer['class_name'] - - layer = parse_default_keras_layer(keras_layer, input_names) - - if int(layer['class_name'][-2]) == 1: - (layer['n_in'], layer['n_filt']) = parse_data_format(input_shapes[0], layer['data_format']) - - layer['pool_width'] = keras_layer['config']['pool_size'][0] - layer['stride_width'] = keras_layer['config']['strides'][0] - layer['padding'] = keras_layer['config']['padding'] - - (layer['n_out'], layer['pad_left'], layer['pad_right']) = compute_padding_1d( - layer['padding'], layer['n_in'], layer['stride_width'], layer['pool_width'] - ) - - if layer['data_format'] == 'channels_last': - output_shape = [input_shapes[0][0], layer['n_out'], layer['n_filt']] - elif layer['data_format'] == 'channels_first': - output_shape = [input_shapes[0][0], layer['n_filt'], layer['n_out']] - elif int(layer['class_name'][-2]) == 2: - (layer['in_height'], layer['in_width'], layer['n_filt']) = parse_data_format(input_shapes[0], layer['data_format']) - - layer['stride_height'] = keras_layer['config']['strides'][0] - layer['stride_width'] = keras_layer['config']['strides'][1] - layer['pool_height'] = keras_layer['config']['pool_size'][0] - layer['pool_width'] = keras_layer['config']['pool_size'][1] - layer['padding'] = keras_layer['config']['padding'] - - ( - layer['out_height'], - layer['out_width'], - layer['pad_top'], - layer['pad_bottom'], - layer['pad_left'], - layer['pad_right'], - ) = compute_padding_2d( - layer['padding'], - layer['in_height'], - layer['in_width'], - layer['stride_height'], - layer['stride_width'], - layer['pool_height'], - layer['pool_width'], - ) - - if layer['data_format'] == 'channels_last': - output_shape = [input_shapes[0][0], layer['out_height'], layer['out_width'], layer['n_filt']] - elif layer['data_format'] == 'channels_first': - output_shape = [input_shapes[0][0], layer['n_filt'], layer['out_height'], layer['out_width']] - - return layer, output_shape - - -global_pooling_layers = ['GlobalMaxPooling1D', 'GlobalMaxPooling2D', 'GlobalAveragePooling1D', 'GlobalAveragePooling2D'] - - -@keras_handler(*global_pooling_layers) -def parse_global_pooling_layer(keras_layer, input_names, input_shapes, data_reader): - assert 'Pooling' in keras_layer['class_name'] - - layer = parse_default_keras_layer(keras_layer, input_names) - layer['keepdims'] = keras_layer['config']['keepdims'] - - if int(layer['class_name'][-2]) == 1: - (layer['n_in'], layer['n_filt']) = parse_data_format(input_shapes[0], layer['data_format']) - - if layer['keepdims']: - if layer['data_format'] == 'channels_last': - output_shape = [input_shapes[0][0], 1, layer['n_filt']] - elif layer['data_format'] == 'channels_first': - output_shape = [input_shapes[0][0], layer['n_filt'], 1] - else: - output_shape = [input_shapes[0][0], layer['n_filt']] - elif int(layer['class_name'][-2]) == 2: - (layer['in_height'], layer['in_width'], layer['n_filt']) = parse_data_format(input_shapes[0], layer['data_format']) - - if layer['keepdims']: - if layer['data_format'] == 'channels_last': - output_shape = [input_shapes[0][0], 1, 1, layer['n_filt']] - elif layer['data_format'] == 'channels_first': - output_shape = [input_shapes[0][0], layer['n_filt'], 1, 1] - else: - output_shape = [input_shapes[0][0], layer['n_filt']] - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/keras/qkeras.py b/hls4ml/hls4ml/converters/keras/qkeras.py deleted file mode 100644 index ba1401c..0000000 --- a/hls4ml/hls4ml/converters/keras/qkeras.py +++ /dev/null @@ -1,177 +0,0 @@ -from qkeras.quantizers import get_quantizer - -from hls4ml.converters.keras.convolution import parse_conv1d_layer, parse_conv2d_layer -from hls4ml.converters.keras.core import parse_batchnorm_layer, parse_dense_layer -from hls4ml.converters.keras_to_hls import keras_handler, parse_default_keras_layer -from hls4ml.model.types import FixedPrecisionType, QKerasBinaryQuantizer, QKerasPO2Quantizer, QKerasQuantizer - - -def get_quantizer_from_config(keras_layer, quantizer_var): - quantizer_config = keras_layer['config'][f'{quantizer_var}_quantizer'] - if keras_layer['class_name'] == 'QBatchNormalization': - return QKerasQuantizer(quantizer_config) - elif 'binary' in quantizer_config['class_name']: - return QKerasBinaryQuantizer(quantizer_config, xnor=(quantizer_var == 'kernel')) - elif quantizer_config['class_name'] == 'quantized_po2': - return QKerasPO2Quantizer(quantizer_config) - else: - return QKerasQuantizer(quantizer_config) - - -@keras_handler('QDense') -def parse_qdense_layer(keras_layer, input_names, input_shapes, data_reader): - layer, output_shape = parse_dense_layer(keras_layer, input_names, input_shapes, data_reader) - - layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel') - if keras_layer['config']['bias_quantizer'] is not None: - layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias') - else: - layer['bias_quantizer'] = None - - return layer, output_shape - - -@keras_handler('QConv1D', 'QConv2D') -def parse_qconv_layer(keras_layer, input_names, input_shapes, data_reader): - assert 'QConv' in keras_layer['class_name'] - - if '1D' in keras_layer['class_name']: - layer, output_shape = parse_conv1d_layer(keras_layer, input_names, input_shapes, data_reader) - elif '2D' in keras_layer['class_name']: - layer, output_shape = parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader) - - layer['weight_quantizer'] = get_quantizer_from_config(keras_layer, 'kernel') - if keras_layer['config']['bias_quantizer'] is not None: - layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias') - else: - layer['bias_quantizer'] = None - - return layer, output_shape - - -@keras_handler('QDepthwiseConv2D') -def parse_qdepthwiseqconv_layer(keras_layer, input_names, input_shapes, data_reader): - layer, output_shape = parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader) - - layer['depthwise_quantizer'] = get_quantizer_from_config(keras_layer, 'depthwise') - - if keras_layer['config']['bias_quantizer'] is not None: - layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias') - else: - layer['bias_quantizer'] = None - - return layer, output_shape - - -@keras_handler('QSeparableConv1D', 'QSeparableConv2D') -def parse_qsepconv_layer(keras_layer, input_names, input_shapes, data_reader): - assert 'QSeparableConv' in keras_layer['class_name'] - - if '1D' in keras_layer['class_name']: - layer, output_shape = parse_conv1d_layer(keras_layer, input_names, input_shapes, data_reader) - elif '2D' in keras_layer['class_name']: - layer, output_shape = parse_conv2d_layer(keras_layer, input_names, input_shapes, data_reader) - - layer['depthwise_quantizer'] = get_quantizer_from_config(keras_layer, 'depthwise') - layer['pointwise_quantizer'] = get_quantizer_from_config(keras_layer, 'pointwise') - - if keras_layer['config']['bias_quantizer'] is not None: - layer['bias_quantizer'] = get_quantizer_from_config(keras_layer, 'bias') - else: - layer['bias_quantizer'] = None - - return layer, output_shape - - -@keras_handler('QActivation') -def parse_qactivation_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer['class_name'] == 'QActivation' - supported_activations = [ - 'quantized_relu', - 'quantized_tanh', - 'binary_tanh', - 'ternary_tanh', - 'quantized_sigmoid', - 'quantized_bits', - 'binary', - 'ternary', - ] - - layer = parse_default_keras_layer(keras_layer, input_names) - - activation_config = keras_layer['config']['activation'] - quantizer_obj = get_quantizer(activation_config) - activation_config = {} - # some activations are classes - if hasattr(quantizer_obj, 'get_config'): - activation_config['class_name'] = quantizer_obj.__class__.__name__ - if activation_config['class_name'] == 'ternary' or activation_config['class_name'] == 'binary': - activation_config['class_name'] += '_tanh' - activation_config['config'] = quantizer_obj.get_config() - # some activation quantizers are just functions with no config - else: - activation_config['config'] = {} - if 'binary' in quantizer_obj.__name__: - activation_config['class_name'] = 'binary_tanh' - activation_config['config']['bits'] = 1 - activation_config['config']['integer'] = 1 - elif 'ternary' in quantizer_obj.__name__: - activation_config['class_name'] = 'ternary_tanh' - activation_config['config']['bits'] = 2 - activation_config['config']['integer'] = 2 - else: - activation_config['class_name'] = 'unknown' - - if activation_config['class_name'] not in supported_activations: - raise Exception('Unsupported QKeras activation: {}'.format(activation_config['class_name'])) - - if activation_config['class_name'] == 'quantized_bits': - activation_config['class_name'] = 'linear' - - if activation_config['class_name'] == 'ternary_tanh': - layer['class_name'] = 'TernaryTanh' - layer['threshold'] = activation_config.get('config', {}).get('threshold', 0.33) - if layer['threshold'] is None: - layer['threshold'] = 0.33 # the default ternary tanh threshold for QKeras - layer['activation'] = 'ternary_tanh' - elif ( - activation_config['class_name'] == 'quantized_sigmoid' - and not activation_config['config'].get('use_real_sigmoid', False) - ) or ( - activation_config['class_name'] == 'quantized_tanh' and not activation_config['config'].get('use_real_tanh', False) - ): - layer['class_name'] = 'HardActivation' - layer['slope'] = 0.5 # the default values in QKeras - layer['shift'] = 0.5 - # Quartus seems to have trouble if the width is 1. - layer['slope_prec'] = FixedPrecisionType(width=2, integer=0, signed=False) - layer['shift_prec'] = FixedPrecisionType(width=2, integer=0, signed=False) - layer['activation'] = activation_config['class_name'].replace('quantized_', 'hard_') - else: - layer['class_name'] = 'Activation' - layer['activation'] = activation_config['class_name'].replace('quantized_', '') - - layer['activation_quantizer'] = activation_config - return layer, [shape for shape in input_shapes[0]] - - -@keras_handler('QBatchNormalization') -def parse_qbatchnorm_layer(keras_layer, input_names, input_shapes, data_reader): - layer, output_shape = parse_batchnorm_layer(keras_layer, input_names, input_shapes, data_reader) - - layer['mean_quantizer'] = get_quantizer_from_config(keras_layer, 'mean') - layer['variance_quantizer'] = get_quantizer_from_config(keras_layer, 'variance') - layer['beta_quantizer'] = get_quantizer_from_config(keras_layer, 'beta') - layer['gamma_quantizer'] = get_quantizer_from_config(keras_layer, 'gamma') - - return layer, output_shape - - -@keras_handler('QConv2DBatchnorm') -def parse_qconv2dbatchnorm_layer(keras_layer, input_names, input_shapes, data_reader): - intermediate_shape = list() - conv_layer, shape_qconv = parse_qconv_layer(keras_layer, input_names, input_shapes, data_reader) - intermediate_shape.append(shape_qconv) - temp_shape = intermediate_shape - batch_layer, out_shape = parse_batchnorm_layer(keras_layer, input_names, temp_shape, data_reader) - return {**conv_layer, **batch_layer}, out_shape diff --git a/hls4ml/hls4ml/converters/keras/recurrent.py b/hls4ml/hls4ml/converters/keras/recurrent.py deleted file mode 100644 index 36d7da1..0000000 --- a/hls4ml/hls4ml/converters/keras/recurrent.py +++ /dev/null @@ -1,50 +0,0 @@ -from hls4ml.converters.keras_to_hls import get_weights_data, keras_handler, parse_default_keras_layer - -rnn_layers = ['SimpleRNN', 'LSTM', 'GRU'] - - -@keras_handler(*rnn_layers) -def parse_rnn_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer['class_name'] in rnn_layers - - layer = parse_default_keras_layer(keras_layer, input_names) - - layer['return_sequences'] = keras_layer['config']['return_sequences'] - layer['return_state'] = keras_layer['config']['return_state'] - - if layer['class_name'] != 'SimpleRNN': - layer['recurrent_activation'] = keras_layer['config']['recurrent_activation'] - - layer['time_major'] = keras_layer['config']['time_major'] if 'time_major' in keras_layer['config'] else False - - # TODO Should we handle time_major? - if layer['time_major']: - raise Exception('Time-major format is not supported by hls4ml') - - layer['n_timesteps'] = input_shapes[0][1] - layer['n_in'] = input_shapes[0][2] - - layer['n_out'] = keras_layer['config']['units'] - - layer['weight_data'], layer['recurrent_weight_data'], layer['bias_data'] = get_weights_data( - data_reader, layer['name'], ['kernel', 'recurrent_kernel', 'bias'] - ) - - if layer['class_name'] == 'GRU': - layer['apply_reset_gate'] = 'after' if keras_layer['config']['reset_after'] else 'before' - - # biases array is actually a 2-dim array of arrays (bias + recurrent bias) - # both arrays have shape: n_units * 3 (z, r, h_cand) - biases = layer['bias_data'] - layer['bias_data'] = biases[0] - layer['recurrent_bias_data'] = biases[1] - - if layer['return_sequences']: - output_shape = [input_shapes[0][0], layer['n_timesteps'], layer['n_out']] - else: - output_shape = [input_shapes[0][0], layer['n_out']] - - if layer['return_state']: - raise Exception('"return_state" of {} layer is not yet supported.') - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/keras/reshape.py b/hls4ml/hls4ml/converters/keras/reshape.py deleted file mode 100644 index bd9d519..0000000 --- a/hls4ml/hls4ml/converters/keras/reshape.py +++ /dev/null @@ -1,92 +0,0 @@ -import numpy as np - -from hls4ml.converters.keras_to_hls import keras_handler, parse_default_keras_layer -from hls4ml.converters.utils import parse_data_format - - -@keras_handler('Flatten') -def parse_flatten_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer["class_name"] == 'Flatten' - - layer = parse_default_keras_layer(keras_layer, input_names) - - layer['class_name'] = 'Reshape' - layer['target_shape'] = [input_shapes[0][0], np.prod(input_shapes[0][1:])] - output_shape = layer['target_shape'] - - return layer, output_shape - - -@keras_handler('Reshape') -def parse_reshape_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer["class_name"] == 'Reshape' - - layer = parse_default_keras_layer(keras_layer, input_names) - - layer['target_shape'] = keras_layer['config']['target_shape'] - output_shape = input_shapes[0][:1] + keras_layer['config']['target_shape'] - - return layer, output_shape - - -@keras_handler('UpSampling1D') -def parse_upsampling1d_layer(keras_layer, input_names, input_shapes, data_reader): - assert 'UpSampling' in keras_layer['class_name'] - - layer = parse_default_keras_layer(keras_layer, input_names) - - layer['in_height'] = 1 - (layer['in_width'], layer['n_chan']) = parse_data_format(input_shapes[0], layer['data_format']) - - layer['algorithm'] = 'nearest' - - layer['width_factor'] = keras_layer['config']['size'] - - layer['out_height'] = 1 - layer['out_width'] = layer['in_width'] * layer['width_factor'] - - if layer['data_format'] == 'channels_first': - output_shape = [input_shapes[0][0], layer['n_chan'], layer['out_width']] - else: - output_shape = [input_shapes[0][0], layer['out_width'], layer['n_chan']] - - return layer, output_shape - - -@keras_handler('UpSampling2D') -def parse_upsampling2d_layer(keras_layer, input_names, input_shapes, data_reader): - assert 'UpSampling2D' in keras_layer['class_name'] - - layer = parse_default_keras_layer(keras_layer, input_names) - - (layer['in_height'], layer['in_width'], layer['n_chan']) = parse_data_format(input_shapes[0], layer['data_format']) - - layer['algorithm'] = keras_layer['config']['interpolation'] - - layer['height_factor'] = keras_layer['config']['size'][0] - layer['width_factor'] = keras_layer['config']['size'][1] - - layer['out_height'] = layer['in_height'] * layer['height_factor'] - layer['out_width'] = layer['in_width'] * layer['width_factor'] - - if layer['data_format'] == 'channels_first': - output_shape = [input_shapes[0][0], layer['n_chan'], layer['out_height'], layer['out_width']] - else: - output_shape = [input_shapes[0][0], layer['out_height'], layer['out_width'], layer['n_chan']] - - return layer, output_shape - - -@keras_handler('Permute') -def parse_permute_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer['class_name'] == 'Permute' - - layer = parse_default_keras_layer(keras_layer, input_names) - - layer['class_name'] = 'Transpose' - dims = keras_layer['config']['dims'] - layer['perm'] = [dim - 1 for dim in keras_layer['config']['dims']] - - output_shape = [input_shapes[0][0]] + [input_shapes[0][s] for s in dims] - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/keras/reshaping.py b/hls4ml/hls4ml/converters/keras/reshaping.py deleted file mode 100644 index b6c0052..0000000 --- a/hls4ml/hls4ml/converters/keras/reshaping.py +++ /dev/null @@ -1,98 +0,0 @@ -import collections.abc - -from hls4ml.converters.keras_to_hls import keras_handler, parse_default_keras_layer - - -@keras_handler('ZeroPadding1D') -def parse_zeropadding1d_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer['class_name'] == 'ZeroPadding1D' - - layer = parse_default_keras_layer(keras_layer, input_names) - - padding = keras_layer['config']['padding'] - if isinstance(padding, int): - layer['pad_left'] = padding - layer['pad_right'] = padding - elif isinstance(padding, collections.abc.Sequence): - layer['pad_left'] = padding[0] - layer['pad_right'] = padding[1] - - if layer['data_format'] == 'channels_first': - output_shape = [ - input_shapes[0][0], # Batch - input_shapes[0][1], # Channels - layer['pad_left'] + input_shapes[0][2] + layer['pad_right'], # Width - ] - layer['out_width'] = output_shape[2] - layer['n_chan'] = output_shape[1] - - layer['in_width'] = input_shapes[0][2] - else: - output_shape = [ - input_shapes[0][0], # Batch - layer['pad_left'] + input_shapes[0][1] + layer['pad_right'], # Width - input_shapes[0][2], # Channels - ] - layer['out_width'] = output_shape[1] - layer['n_chan'] = output_shape[2] - - layer['in_width'] = input_shapes[0][1] - - return layer, output_shape - - -@keras_handler('ZeroPadding2D') -def parse_zeropadding2d_layer(keras_layer, input_names, input_shapes, data_reader): - assert keras_layer['class_name'] == 'ZeroPadding2D' - - layer = parse_default_keras_layer(keras_layer, input_names) - - padding = keras_layer['config']['padding'] - if isinstance(padding, int): - layer['pad_top'] = padding - layer['pad_bottom'] = padding - layer['pad_left'] = padding - layer['pad_right'] = padding - elif isinstance(padding, collections.abc.Sequence): - height_pad, width_pad = padding - if isinstance(height_pad, collections.abc.Sequence): - layer['pad_top'] = height_pad[0] - layer['pad_bottom'] = height_pad[1] - else: - layer['pad_top'] = height_pad - layer['pad_bottom'] = height_pad - if isinstance(width_pad, collections.abc.Sequence): - layer['pad_left'] = width_pad[0] - layer['pad_right'] = width_pad[1] - else: - layer['pad_left'] = width_pad - layer['pad_bottom'] = width_pad - - if layer['data_format'] == 'channels_first': - output_shape = [ - input_shapes[0][0], # Batch - input_shapes[0][1], # Channels - layer['pad_top'] + input_shapes[0][2] + layer['pad_bottom'], # Height - layer['pad_left'] + input_shapes[0][3] + layer['pad_right'], # Width - ] - layer['out_height'] = output_shape[2] - layer['out_width'] = output_shape[3] - layer['n_chan'] = output_shape[1] - - layer['in_height'] = input_shapes[0][2] - layer['in_width'] = input_shapes[0][3] - else: - output_shape = [ - input_shapes[0][0], # Batch - layer['pad_top'] + input_shapes[0][1] + layer['pad_bottom'], # Height - layer['pad_left'] + input_shapes[0][2] + layer['pad_right'], # Width - input_shapes[0][3], # Channels - ] - layer['out_height'] = output_shape[1] - layer['out_width'] = output_shape[2] - layer['n_chan'] = output_shape[3] - - layer['in_height'] = input_shapes[0][1] - layer['in_width'] = input_shapes[0][2] - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/keras_to_hls.py b/hls4ml/hls4ml/converters/keras_to_hls.py deleted file mode 100644 index 1d2376f..0000000 --- a/hls4ml/hls4ml/converters/keras_to_hls.py +++ /dev/null @@ -1,336 +0,0 @@ -import json - -import h5py - -from hls4ml.model import ModelGraph - -MAXMULT = 4096 - - -class KerasReader: - def get_weights_data(self, layer_name, var_name): - raise NotImplementedError - - -class KerasFileReader(KerasReader): - def __init__(self, config): - self.config = config - self.h5file = h5py.File(config['KerasH5'], mode='r') - - def __del__(self): - if self.h5file: - self.h5file.close() - - def _find_data(self, layer_name, var_name): - def h5_visitor_func(name): - if var_name in name: - return name - - if 'model_weights' in list(self.h5file.keys()): # h5 file comes from model.save() - layer_path = f'model_weights/{layer_name}' - else: - layer_path = layer_name - - data_path = self.h5file[layer_path].visit(h5_visitor_func) - if data_path: - return self.h5file[f'/{layer_path}/{data_path}'] - else: - return None - - def get_weights_data(self, layer_name, var_name): - data = self._find_data(layer_name, var_name) - if data: - return data[()] - else: - return None - - -class KerasNestedFileReader(KerasFileReader): - def __init__(self, data_reader, nested_path): - super().__init__(data_reader.config) - self.nested_path = nested_path - - def _find_data(self, layer_name, var_name): - def h5_visitor_func(name): - if var_name in name: - return name - - layer_path = f'model_weights/{self.nested_path}/{layer_name}' - - data_path = self.h5file[layer_path].visit(h5_visitor_func) - if data_path: - return self.h5file[f'/{layer_path}/{data_path}'] - else: - return None - - -class KerasModelReader(KerasReader): - def __init__(self, keras_model): - self.model = keras_model - - def get_weights_data(self, layer_name, var_name): - layer = self.model.get_layer(layer_name) - for i, w in enumerate(layer.weights): - if var_name in w.name: - try: - return w.numpy() # TF 2.x - except Exception: - return layer.get_weights()[i] # TF 1.x - - return None - - -def get_weights_data(data_reader, layer_name, var_name): - if not isinstance(var_name, (list, tuple)): - var_name = [var_name] - - data = [data_reader.get_weights_data(layer_name, var) for var in var_name] - - if len(data) == 1: - return data[0] - else: - return (*data,) - - -layer_handlers = {} - - -def register_keras_layer_handler(layer_cname, handler_func): - """Register a handler function for the given layer class name. - - The handler function should have the following signature: - parse_func(keras_layer, input_names, input_shapes, data_reader, config): - - Args: - layer_cname (str): The name of Keras layer (the 'class_name' property in the layer's config) - handler_func (callable): The handler function - - Raises: - Exception: If the layer class has already been registered. - """ - if layer_cname in layer_handlers: - raise Exception(f'Layer {layer_cname} already registered') - else: - layer_handlers[layer_cname] = handler_func - - -def get_supported_keras_layers(): - """Returns the list of Keras layers that the converter can parse. - - The returned list contains all Keras layers that can be parsed into the hls4ml internal representation. Support for - computation of these layers may vary across hls4ml backends and conversion configuration. - - Returns: - list: The names of supported Keras layers. - """ - return list(layer_handlers.keys()) - - -def keras_handler(*args): - def decorator(function): - function.handles = [arg for arg in args] - return function - - return decorator - - -def parse_default_keras_layer(keras_layer, input_names): - layer = {} - - # Extract name for finding weights and biases - layer['name'] = keras_layer['config']['name'] - layer['class_name'] = keras_layer['class_name'] - if input_names is not None: - layer['inputs'] = input_names - - layer['data_format'] = keras_layer['config'].get('data_format', 'channels_last') - - if 'activation' in keras_layer['config']: - layer['activation'] = keras_layer['config']['activation'] - if 'epsilon' in keras_layer['config']: - layer['epsilon'] = keras_layer['config']['epsilon'] - if 'use_bias' in keras_layer['config']: - layer['use_bias'] = keras_layer['config']['use_bias'] - - return layer - - -def get_model_arch(config): - if 'KerasModel' in config: - # Model instance passed in config from API - keras_model = config['KerasModel'] - if isinstance(keras_model, str): - from tensorflow.keras.models import load_model - - keras_model = load_model(keras_model) - model_arch = json.loads(keras_model.to_json()) - reader = KerasModelReader(keras_model) - elif 'KerasJson' in config: - # Extract model architecture from json - with open(config['KerasJson']) as json_file: - model_arch = json.load(json_file) - reader = KerasFileReader(config) - elif 'KerasH5' in config: - # Model arch and weights are in H5 file (from model.save() function) - with h5py.File(config['KerasH5'], mode='r') as h5file: - # Load the configuration from h5 using json's decode - model_arch = h5file.attrs.get('model_config') - if model_arch is None: - raise ValueError('No model found in config file.') - else: - # model_arch is string by default since h5py 3.0.0, keeping this condition for compatibility. - if isinstance(model_arch, bytes): - model_arch = model_arch.decode('utf-8') - model_arch = json.loads(model_arch) - reader = KerasFileReader(config) - else: - raise ValueError('No model found in config file.') - - return model_arch, reader - - -def parse_keras_model(model_arch, reader): - # This is a list of dictionaries to hold all the layer info we need to generate HLS - layer_list = [] - - # Define layers to skip for conversion to HLS - skip_layers = ['Dropout'] - # Activation layers - activation_layers = [ - 'Activation', - 'LeakyReLU', - 'ThresholdedReLU', - 'ELU', - 'PReLU', - 'Softmax', - 'TernaryTanh', - 'HardActivation', - ] - # Recurrent layers - recurrent_layers = ['SimpleRNN', 'LSTM', 'GRU'] - # All supported layers - supported_layers = get_supported_keras_layers() + skip_layers - - # Map inputs of skipped and split (activation) layers - inputs_map = {} - - # Loop through layers - layer_counter = 0 - - input_layers = None - output_layers = None - - layer_config = None - if model_arch['class_name'] == 'Sequential': - print('Interpreting Sequential') - layer_config = model_arch['config'] - if 'layers' in layer_config: # Newer Keras versions have 'layers' in 'config' key - layer_config = layer_config['layers'] - # Sequential doesn't have InputLayer in TF < 2.3 (Keras 2.4.0) - if layer_config[0]['class_name'] != 'InputLayer': - input_layer = {} - input_layer['name'] = 'input1' - input_layer['class_name'] = 'InputLayer' - input_layer['input_shape'] = layer_config[0]['config']['batch_input_shape'][1:] - layer_list.append(input_layer) - print('Input shape:', input_layer['input_shape']) - elif model_arch['class_name'] in ['Model', 'Functional']: # TF >= 2.3 calls it 'Functional' API - print('Interpreting Model') - layer_config = model_arch['config']['layers'] - input_layers = [inp[0] for inp in model_arch['config']['input_layers']] - output_layers = [out[0] for out in model_arch['config']['output_layers']] - - # Get input shape and check for unsupported layer type - for keras_layer in layer_config: - if keras_layer['class_name'] not in supported_layers: - raise Exception('ERROR: Unsupported layer type: {}'.format(keras_layer['class_name'])) - - output_shapes = {} - output_shape = None - - print('Topology:') - for keras_layer in layer_config: - if 'batch_input_shape' in keras_layer['config']: - if 'inbound_nodes' in keras_layer and len(keras_layer['inbound_nodes']) > 0: - input_shapes = [output_shapes[inbound_node[0]] for inbound_node in keras_layer['inbound_nodes'][0]] - else: - input_shapes = [keras_layer['config']['batch_input_shape']] - else: - if 'inbound_nodes' in keras_layer: - input_shapes = [output_shapes[inbound_node[0]] for inbound_node in keras_layer['inbound_nodes'][0]] - else: - # Sequential model, so output_shape from the previous layer is still valid - input_shapes = [output_shape] - - keras_class = keras_layer['class_name'] - - if keras_class in skip_layers: - if 'inbound_nodes' in keras_layer: - name = keras_layer['config']['name'] - # Currently supported skipped layers have only one input - parent_input = keras_layer['inbound_nodes'][0][0][0] - # Skipped layers can follow each other (e.g., Dropout -> Flatten) - inputs_map[name] = inputs_map.get(parent_input, parent_input) - - output_shapes[keras_layer['config']['name']] = input_shapes[0] - - continue - - if keras_class in supported_layers: - layer_counter = layer_counter + 1 - - # Extract inbound nodes - if 'inbound_nodes' in keras_layer and len(keras_layer['inbound_nodes']) > 0: - input_names = [inputs_map.get(inp[0], inp[0]) for inp in keras_layer['inbound_nodes'][0]] - else: - input_names = None - - layer, output_shape = layer_handlers[keras_class](keras_layer, input_names, input_shapes, reader) - - print( - 'Layer name: {}, layer type: {}, input shapes: {}, output shape: {}'.format( - layer['name'], layer['class_name'], input_shapes, output_shape - ) - ) - layer_list.append(layer) - if 'activation' in layer and layer['class_name'] not in activation_layers + recurrent_layers: # + qkeras_layers: - act_layer = {} - # Workaround for QKeras activations passed as an argument - if isinstance(layer['activation'], dict): - act_details = layer['activation'] - act_layer['class_name'] = 'QActivation' - act_layer['config'] = { - 'name': layer['name'] + '_' + act_details['class_name'], - 'activation': act_details, - } - act_layer, output_shape = layer_handlers['QActivation'](act_layer, None, [output_shape], reader) - else: - act_layer['name'] = layer['name'] + '_' + layer['activation'] - act_layer['activation'] = layer['activation'] - if 'activ_param' in layer: - act_layer['activ_param'] = layer['activ_param'] - act_layer['class_name'] = layer['activation'] - elif layer['activation'] == 'softmax': - act_layer['class_name'] = 'Softmax' - act_layer['axis'] = -1 - else: - act_layer['class_name'] = 'Activation' - inputs_map[layer['name']] = act_layer['name'] - if output_layers is not None and layer['name'] in output_layers: - output_layers = [act_layer['name'] if name == layer['name'] else name for name in output_layers] - output_shapes[act_layer['name']] = output_shape - layer_list.append(act_layer) - - assert output_shape is not None - - output_shapes[layer['name']] = output_shape - - return layer_list, input_layers, output_layers, output_shapes - - -def keras_to_hls(config): - model_arch, reader = get_model_arch(config) - layer_list, input_layers, output_layers, _ = parse_keras_model(model_arch, reader) - print('Creating HLS model') - hls_model = ModelGraph(config, layer_list, input_layers, output_layers) - return hls_model diff --git a/hls4ml/hls4ml/converters/onnx/__init__.py b/hls4ml/hls4ml/converters/onnx/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/converters/onnx/convolution.py b/hls4ml/hls4ml/converters/onnx/convolution.py deleted file mode 100644 index 39b2232..0000000 --- a/hls4ml/hls4ml/converters/onnx/convolution.py +++ /dev/null @@ -1,85 +0,0 @@ -from hls4ml.converters.onnx_to_hls import ( - compute_pads_1d, - compute_pads_2d, - get_onnx_attribute, - get_onnx_input_name, - onnx_handler, -) -from hls4ml.converters.utils import compute_padding_1d, compute_padding_2d - - -@onnx_handler('Conv') -def parse_conv_layer(reader, node, inputs_map, input_shapes, graph, config): - layer = {} - layer['name'] = node.name - layer['data_format'] = 'channels_first' # ONNX's default is channel first - layer['inputs'] = get_onnx_input_name(node, graph) - reader.add_input(layer['name'], node.input) - - strides = get_onnx_attribute(node, 'strides') - kernel_shape = get_onnx_attribute(node, 'kernel_shape') - - if len(input_shapes[0]) == 3: # Conv1D - layer['class_name'] = 'Conv1D' - - layer['in_width'] = input_shapes[0][2] - layer['n_chan'] = input_shapes[0][1] - layer['filt_width'] = kernel_shape[0] - layer['n_filt'] = reader.get_weights_data(layer['name'], 'kernel').shape[2] - layer['stride_width'] = strides[0] - pads = compute_pads_1d(node, layer) - - layer['pad_left'] = pads[0] - layer['pad_right'] = pads[1] - - if all(x == 0 for x in pads): # No padding, i.e., 'VALID' padding - layer['padding'] = 'valid' - else: - layer['padding'] = 'same' - - (layer['out_width'], _, _) = compute_padding_1d( - layer['padding'], layer['in_width'], layer['stride_width'], layer['filt_width'] - ) - - output_shape = [input_shapes[0][0], layer['n_filt'], layer['out_width']] - - elif len(input_shapes[0]) == 4: # Conv2D - layer['class_name'] = 'Conv2D' - - layer['in_height'] = input_shapes[0][2] - layer['in_width'] = input_shapes[0][3] - layer['n_chan'] = input_shapes[0][1] - - layer['filt_height'] = kernel_shape[0] - layer['filt_width'] = kernel_shape[1] - - layer['n_filt'] = next( - (x.type.tensor_type.shape.dim[1].dim_value for x in graph.value_info if x.name == node.output[0]), None - ) - layer['stride_height'] = strides[0] - layer['stride_width'] = strides[1] - pads = compute_pads_2d(node, layer) - - layer['pad_top'] = pads[0] - layer['pad_bottom'] = pads[2] - layer['pad_left'] = pads[1] - layer['pad_right'] = pads[3] - - if all(x == 0 for x in pads): # No padding, i.e., 'VALID' padding in Keras/Tensorflow - layer['padding'] = 'valid' - else: # Only 'valid' and 'same' padding are available in Keras - layer['padding'] = 'same' - - (layer['out_height'], layer['out_width'], _, _, _, _) = compute_padding_2d( - layer['padding'], - layer['in_height'], - layer['in_width'], - layer['stride_height'], - layer['stride_width'], - layer['filt_height'], - layer['filt_width'], - ) - - output_shape = [input_shapes[0][0], layer['n_filt'], layer['out_height'], layer['out_width']] - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/onnx/core.py b/hls4ml/hls4ml/converters/onnx/core.py deleted file mode 100644 index 940b860..0000000 --- a/hls4ml/hls4ml/converters/onnx/core.py +++ /dev/null @@ -1,122 +0,0 @@ -from hls4ml.converters.onnx_to_hls import get_onnx_attribute, get_onnx_input_name, onnx_handler - - -@onnx_handler(*['Gemm', 'MatMul']) -def parse_gemm_layer(reader, node, inputs_map, input_shapes, graph, config): - layer = {} - - layer['class_name'] = 'Dense' - layer['name'] = node.name - layer['inputs'] = get_onnx_input_name(node, graph) - - tran_weight = get_onnx_attribute(node, 'transB', 0) - reader.add_input(layer['name'], node.input, tran_weight) - - weights_shape = reader.get_weights_data(layer['name'], 'kernel').shape - layer['n_in'] = weights_shape[0] - layer['n_out'] = weights_shape[1] - - output_shape = input_shapes[0][:] - output_shape[-1] = layer['n_out'] - - return layer, output_shape - - -# ------------------Global paras for activations -# TODO: repair HardSigmoid support -# https://github.com/fastmachinelearning/hls4ml/issues/409 -activation_layers = [ - 'Relu', - 'Tanh', - 'Sigmoid', - 'LeakyRelu', - 'ThresholdedRelu', - 'Elu', - 'Selu', - 'PRelu', - 'Softmax', - 'Softsign', - 'Softplus', - 'Clip', -] - -activation_map = { - 'Relu': 'ReLU', - 'Tanh': 'Activation', - 'Sigmoid': 'Activation', - 'LeakyRelu': 'LeakyReLU', - 'ThresholdedRelu': 'ThresholdedReLU', - 'HardSigmoid': 'Activation', - 'Elu': 'ELU', - 'Selu': 'Activation', - 'PRelu': 'PReLU', - 'Softmax': 'Softmax', - 'Softsign': 'Activation', - 'Softplus': 'Activation', - 'Clip': 'Clip', -} -# --------- - - -@onnx_handler(*activation_layers) -def parse_activation_layer(reader, node, inputs_map, input_shapes, graph, config): - layer = {} - - layer['name'] = node.name - layer['class_name'] = activation_map[node.op_type] - layer['activation'] = node.op_type.lower() - layer['inputs'] = get_onnx_input_name(node, graph) - - if layer['class_name'] != 'Activation': - if layer['class_name'] == 'Softmax': - layer['activation'] = 'softmax' - - elif layer['class_name'] in ['ELU', 'LeakyReLU', 'ThresholdedReLU']: - layer['activation'] = layer['class_name'] - layer['activ_param'] = get_onnx_attribute(node, 'alpha', 0.01) - - elif layer['class_name'] == 'Clip': - clip_min_node = [x for x in graph.initializer if x.name in node.input] - clip_min = clip_min_node[0].float_data[0] - - # Check if it's relu or not - if clip_min == 0.0: - layer['class_name'] = 'Activation' - layer['activation'] = 'ReLU' - else: - raise Exception('Clip with min != 0 is not supported yet!') - - else: - layer['activation'] = layer['class_name'] - layer['class_name'] = 'Activation' - - return layer, [shape for shape in input_shapes[0]] - - -@onnx_handler('BatchNormalization') -def parse_batchnorm_layer(reader, node, inputs_map, input_shapes, graph, config): - layer = {} - - layer['class_name'] = 'BatchNormalization' - layer['data_format'] = 'channels_first' - layer['name'] = node.name - layer['inputs'] = get_onnx_input_name(node, graph) - - # Other attributes - layer['epsilon'] = get_onnx_attribute(node, 'epsilon') - layer['momentum'] = get_onnx_attribute(node, 'momentum') - - reader.add_input(layer['name'], node.input) - - in_size = 1 - for dim in input_shapes[0][1:]: - in_size *= dim - - layer['n_in'] = layer['n_out'] = in_size - - if len(input_shapes[0]) == 2: - layer['n_filt'] = -1 - elif len(input_shapes[0]) > 2: - layer['n_filt'] = input_shapes[0][1] # Always channel first for onnx - - return layer, [shape for shape in input_shapes[0]] diff --git a/hls4ml/hls4ml/converters/onnx/merge.py b/hls4ml/hls4ml/converters/onnx/merge.py deleted file mode 100644 index 9ccd432..0000000 --- a/hls4ml/hls4ml/converters/onnx/merge.py +++ /dev/null @@ -1,42 +0,0 @@ -from hls4ml.converters.onnx_to_hls import get_onnx_attribute, get_onnx_input_name, onnx_handler - -merge_layers = ['Add', 'Sub', 'Mul', 'Average', 'Max', 'Min', 'Concat', 'Sum'] - - -@onnx_handler(*merge_layers) -def parse_merge_layer(reader, node, inputs_map, input_shapes, graph, config): - layer = {} - layer['class_name'] = node.op_type - layer['name'] = node.name - layer['op'] = layer['class_name'].lower() - layer['inputs'] = get_onnx_input_name(node, graph) - output_shape = input_shapes[0] - - if layer['class_name'] == 'Concat': - rank = len(input_shapes[0][1:]) - if rank > 3: - raise Exception('ERROR: Concatenation of tensors with rank > 3 is not yet supported.') - - layer['class_name'] = 'Concatenate' - layer['op'] = layer['class_name'].lower() + f'{rank}d' - layer['axis'] = get_onnx_attribute(node, 'axis') - - # Calculate output shape - new_dim = sum( - [x.type.tensor_type.shape.dim[layer['axis']].dim_value for x in graph.value_info if x.name in node.input] - ) - output_shape[layer['axis']] = new_dim - - elif layer['class_name'] == 'Add': - # Check if the layer is an AddBias - for input in node.input: - if "bias" in input: - layer['class_name'] = 'BiasAdd' - reader.add_input(layer['name'], node.input) - else: - layer['class_name'] = 'Merge' - - if len(layer['inputs']) > 2: - raise Exception('ERROR: Merging more than two tensors is not yet supported.') - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/onnx/pooling.py b/hls4ml/hls4ml/converters/onnx/pooling.py deleted file mode 100644 index 67fa76c..0000000 --- a/hls4ml/hls4ml/converters/onnx/pooling.py +++ /dev/null @@ -1,121 +0,0 @@ -from hls4ml.converters.onnx_to_hls import ( - compute_pads_1d, - compute_pads_2d, - get_onnx_attribute, - get_onnx_input_name, - onnx_handler, -) -from hls4ml.converters.utils import compute_padding_1d, compute_padding_2d - -pool_operations = ['AveragePool', 'MaxPool'] - - -@onnx_handler(*pool_operations) -def parse_pool_layer(reader, node, inputs_map, input_shapes, graph, config): - layer = {} - layer['name'] = node.name - layer['inputs'] = get_onnx_input_name(node, graph) - layer['class_name'] = node.op_type - layer['data_format'] = 'channels_first' # Default ONNX - - info = layer['class_name'].replace('Pool', '') - strides = get_onnx_attribute(node, 'strides') - kernel_shape = get_onnx_attribute(node, 'kernel_shape') - - if len(input_shapes[0]) == 3: # 1D - layer['class_name'] = info + 'Pooling1D' - - layer['n_filt'] = input_shapes[0][1] - layer['n_in'] = input_shapes[0][2] - - layer['pool_width'] = kernel_shape[0] - layer['stride_width'] = strides[0] - - # Padding - pads = compute_pads_1d(node, layer) - layer['pad_left'] = pads[0] - layer['pad_right'] = pads[1] - - if all(x == 0 for x in pads): # No padding, i.e., 'VALID' padding - layer['padding'] = 'valid' - else: - layer['padding'] = 'same' - - (layer['n_out'], _, _) = compute_padding_1d( - layer['padding'], layer['n_in'], layer['stride_width'], layer['pool_width'] - ) - - output_shape = [input_shapes[0][0], layer['n_filt'], layer['n_out']] - - elif len(input_shapes[0]) == 4: # 2D - layer['class_name'] = info + 'Pooling2D' - - layer['n_filt'] = input_shapes[0][1] - layer['in_height'] = input_shapes[0][2] - layer['in_width'] = input_shapes[0][3] - - layer['stride_height'] = strides[0] - layer['stride_width'] = strides[1] - layer['pool_height'] = layer['filt_height'] = kernel_shape[0] - layer['pool_width'] = layer['filt_width'] = kernel_shape[1] - - pads = compute_pads_2d(node, layer) - layer['pad_top'] = pads[0] - layer['pad_bottom'] = pads[2] - layer['pad_left'] = pads[1] - layer['pad_right'] = pads[3] - - if all(x == 0 for x in pads): # No padding, i.e., 'VALID' padding in Keras/Tensorflow - layer['padding'] = 'valid' - else: # Only 'valid' and 'same' padding are available in Keras - layer['padding'] = 'same' - - (layer['out_height'], layer['out_width'], _, _, _, _) = compute_padding_2d( - layer['padding'], - layer['in_height'], - layer['in_width'], - layer['stride_height'], - layer['stride_width'], - layer['filt_height'], - layer['filt_width'], - ) - - output_shape = [input_shapes[0][0], layer['n_filt'], layer['out_height'], layer['out_width']] - - return layer, output_shape - - -global_pooling_layers = ['GlobalMaxPool', 'GlobalAveragePool'] - - -@onnx_handler(*global_pooling_layers) -def parse_global_pooling_layer(reader, node, inputs_map, input_shapes, graph, config): - layer = {} - layer['name'] = node.name - layer['inputs'] = get_onnx_input_name(node, graph) - layer['class_name'] = node.op_type - layer['data_format'] = 'channels_first' - - # Sonme default parameters for global pooling - layer['n_out'] = 1 - layer['pad_left'] = layer['pad_right'] = 0 - layer['stride'] = 0 - - info = layer['class_name'].replace('Pool', '') - - if len(input_shapes[0]) == 3: # 1D - layer['class_name'] = info + 'Pooling1D' - - layer['n_in'] = input_shapes[0][2] - layer['n_filt'] = input_shapes[0][1] - - elif len(input_shapes[0]) == 4: - layer['class_name'] = info + 'Pooling2D' - - layer['n_filt'] = input_shapes[0][1] - layer['in_height'] = input_shapes[0][2] - layer['in_width'] = input_shapes[0][3] - - output_shape = [input_shapes[0][0], layer['n_filt']] + [1] * (len(input_shapes[0]) - 2) - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/onnx/reshape.py b/hls4ml/hls4ml/converters/onnx/reshape.py deleted file mode 100644 index 5bbf58b..0000000 --- a/hls4ml/hls4ml/converters/onnx/reshape.py +++ /dev/null @@ -1,39 +0,0 @@ -import numpy as np - -from hls4ml.converters.onnx_to_hls import get_onnx_input_name, onnx_handler - - -@onnx_handler('Transpose') -def parse_transpose_layer(reader, node, inputs_map, input_shapes, graph, config): - layer = {} - layer['name'] = node.name - layer['class_name'] = 'Transpose' - layer['inputs'] = get_onnx_input_name(node, graph) - - perm = [list(i.ints) for i in node.attribute][0] # This will get something like [[a,b,c]][0] = [a,b,c] - layer['perm'] = [x - 1 for x in perm[1:]] # Ignore the batch dimension in ONNX, and adjust the perm indexing - - output_shape = [input_shapes[0][i] for i in perm] - - return layer, output_shape - - -@onnx_handler('Reshape') -def parse_reshape_layer(reader, node, inputs_map, input_shapes, graph, config): - layer = {} - layer['name'] = node.name - layer['class_name'] = 'Reshape' - layer['inputs'] = get_onnx_input_name(node, graph) - - target_shape = list([x for x in graph.initializer if x.name == node.input[1]][0].int64_data)[1:] - - if -1 in target_shape: # Need to infer shape for -1 - print("WARNING: Inferring -1 shape ... ") - dummy_x = np.ones(input_shapes[0][1:]) - dummy_y = np.reshape(dummy_x, target_shape) - target_shape = list(dummy_y.shape) - - layer['target_shape'] = target_shape - output_shape = input_shapes[0][:1] + layer['target_shape'] - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/onnx_to_hls.py b/hls4ml/hls4ml/converters/onnx_to_hls.py deleted file mode 100644 index 106daf6..0000000 --- a/hls4ml/hls4ml/converters/onnx_to_hls.py +++ /dev/null @@ -1,319 +0,0 @@ -import numpy as np -import onnx -from onnx import helper, numpy_helper, shape_inference - -from hls4ml.model import ModelGraph - -MAXMULT = 4096 - - -class ONNXDataReader: - """ - ONNX data reader to be used for extracting relevant information during conversion. - """ - - def __init__(self, model): - self.model = model - self.input_map = {} - self.index_map = { - # Dense - 'kernel': 1, - 'bias': 2, - # BatchNormalization - 'gamma': 1, - 'beta': 2, - 'moving_mean': 3, - 'moving_variance': 4, - } - - def get_weights_data(self, layer_name, var_name): - """Extract weights data from ONNX model. - - Args: - layer_name (str): Layer's name in the ONNX model. - var_name (str): Variable to be extracted. - - Returns: - ndarray: Extracted weights data. - """ - # Get the node associated with the layer name - node = next(node for node in self.model.graph.node if node.name == layer_name) - - inputs = self.input_map[layer_name] - inp_idx = self.index_map[var_name] - - if inp_idx >= len(inputs['inputs']): - # Check if the layer is an AddBias layer - if (node.op_type == 'Add') and (var_name == 'bias'): - inp_idx = 1 - else: - # Input not found, likely a bias tensor is not available - return None - - tensor = next((x for x in self.model.graph.initializer if x.name == inputs['inputs'][inp_idx]), None) - - if tensor is not None: - data = numpy_helper.to_array(tensor) - - if inputs['transpose']: - if inputs['perm'] is not None and len(data.shape) == len(inputs['perm']): - data = data.transpose(inputs['perm']) - else: - data = data.transpose() - - # Check for transB in Gemm - if node.op_type == 'Gemm': - if not get_onnx_attribute(node, 'transB'): - data = data.transpose() - - return data - - def add_input(self, layer_name, inputs, transpose=True, perm=None): - self.input_map[layer_name] = {'inputs': inputs, 'transpose': transpose, 'perm': perm} - - -# ----------------------Helpers--------------------- # -def sanitize_layer_name(layer): - new_name = layer['name'] - if new_name[0].isdigit(): - new_name = layer['class_name'].lower() + new_name - - layer['name'] = new_name - - -def replace_char_inconsitency(name): - """ - Replace some inconsistent characters that cause issues when writing into HLS. - """ - return name.replace('.', '_') - - -def get_onnx_attribute(operation, name, default=None): - attr = next((x for x in operation.attribute if x.name == name), None) - if attr is None: - value = default - else: - value = helper.get_attribute_value(attr) - if isinstance(value, bytes): - value = value.decode() - return value - - -def get_input_shape(model, operation, input_idx=0): - value_info_idx = next((i for i, x in enumerate(model.graph.value_info) if x.name == operation.input[input_idx]), 0) - return [d.dim_value for d in model.graph.value_info[value_info_idx].type.tensor_type.shape.dim] - - -def compute_pads_1d(operation, layer): - auto_pad = get_onnx_attribute(operation, 'auto_pad', 'NOTSET') - if auto_pad != 'NOTSET': - if layer['in_width'] % layer['stride_width'] == 0: - pad_along_width = max(layer['filt_width'] - layer['stride_width'], 0) - else: - pad_along_width = max(layer['filt_width'] - (layer['in_width'] % layer['stride_width']), 0) - - pads = [pad_along_width // 2, pad_along_width - (pad_along_width // 2)] - - if auto_pad == 'SAME_UPPER': - pads = sorted(pads) - elif auto_pad == 'SAME_LOWER': - pads = sorted(pads, reverse=True) - else: # 'VALID' padding - pads = [0, 0] - else: - pads = get_onnx_attribute(operation, 'pads', [0, 0]) - - return pads - - -def compute_pads_2d(operation, layer): - auto_pad = get_onnx_attribute(operation, 'auto_pad', 'NOTSET') - if auto_pad != 'NOTSET': - # Height - if layer['in_height'] % layer['stride_height'] == 0: - pad_along_height = max(layer['filt_height'] - layer['stride_height'], 0) - else: - pad_along_height = max(layer['filt_height'] - (layer['in_height'] % layer['stride_height']), 0) - pad_height = [pad_along_height // 2, pad_along_height - pad_along_height // 2] - - # Width - if layer['in_width'] % layer['stride_width'] == 0: - pad_along_width = max(layer['filt_width'] - layer['stride_width'], 0) - else: - pad_along_width = max(layer['filt_width'] - (layer['in_width'] % layer['stride_width']), 0) - pad_width = [pad_along_width // 2, pad_along_width - pad_along_width // 2] - - if auto_pad == 'SAME_UPPER': - pads = [min(pad_height), min(pad_width), max(pad_height), max(pad_width)] - elif auto_pad == 'SAME_LOWER': - pads = [max(pad_height), max(pad_width), min(pad_height), min(pad_width)] - else: # 'VALID' padding - pads = [0, 0, 0, 0] - else: - pads = get_onnx_attribute(operation, 'pads', [0, 0, 0, 0]) - - return pads - - -# ----------------------Layer handling--------------------- # -layer_handlers = {} - - -def register_onnx_layer_handler(layer_name, handler_func): - if layer_name in layer_handlers: - raise Exception(f'Layer {layer_name} already registered') - else: - layer_handlers[layer_name] = handler_func - - -def get_supported_onnx_layers(): - return list(layer_handlers.keys()) - - -def onnx_handler(*args): - def decorator(function): - function.handles = [arg for arg in args] - return function - - return decorator - - -# --->> A set of functions to address the naming convetion in ONNx's graph -def get_onnx_input_name(node, graph): - """ - In ONNX, when calling node.input, it returns the node input's index in the graph instead of the input's name. - However, the input's name is used for indexing in ModelGraph's graph. This function return the input node's name instead. - """ - - in_node = [in_node for in_node in graph.node if (in_node.output[0] in node.input)] - - if in_node: - if in_node[0].op_type != 'Flatten': - input_node_name = [x.name for x in in_node] - else: # IF it's a flatten - input_node_name = [x.name for x in graph.node if (x.output[0] in in_node[0].input)] - - return input_node_name - - else: # If there is no input name it's actually the first layer - return [replace_char_inconsitency(node.input[0])] - - -def get_out_layer_name(graph): - """ - Get the output layer's name for the model. - graph.output only returns the output's node index - """ - output_index_list = [x.name for x in graph.output] - return [node.name for node in graph.node if node.output[0] in output_index_list] - - -def onnx_to_hls(config): - """Convert onnx model to hls model from configuration. - - Args: - config (dict): ONNX configuration from yaml file or passed through API. - - Raises: - Exception: Raised if an unsupported operation is found in the ONNX model. - - Returns: - ModelGraph: hls4ml model object - """ - # This is a list of dictionaries to hold all the layer info we need to generate HLS - layer_list = [] - - # Extract model architecture - print('Interpreting Model ...') - - model = onnx.load(config['OnnxModel']) if isinstance(config['OnnxModel'], str) else config['OnnxModel'] - - model = shape_inference.infer_shapes(model) - graph = model.graph - - reader = ONNXDataReader(model) - - # Obtain list of input/ouput layers - all_inputs = [x.name for x in model.graph.input] - all_initializers = [x.name for x in model.graph.initializer] - input_layers = [x for x in all_inputs if x not in all_initializers] - output_layers = get_out_layer_name(graph) - - print("Output layers: ", output_layers) - - for i, inp in enumerate(input_layers): - input_layer = {} - input_layer['name'] = replace_char_inconsitency(inp) - input_layer['class_name'] = 'InputLayer' - inp_shape = next((x.type.tensor_type.shape.dim for x in model.graph.input if x.name == inp), None) - input_layer['input_shape'] = [x.dim_value for x in inp_shape] - - if len(input_layer['input_shape']) > 1: - input_layer['input_shape'][0] = None # Firt dim is batch - - # Clean the layer name for specific models - sanitize_layer_name(input_layer) - input_layers[i] = input_layer['name'] - - layer_list.append(input_layer) - - # Defined supported layers and check for unsupported layer type - skip_layers = ['Dropout', 'Identity', 'Flatten'] - - # Map inputs of skipped layers - inputs_map = {} - - supported_layers = get_supported_onnx_layers() + skip_layers - - # Get input shape - current_shape = [input_layer['input_shape']] - print('Input shape:', current_shape[0]) - - # Loop through layers - layer_counter = 0 - - # Output shape tracking - output_shape = None - - print('Topology:') - for node in graph.node: - if node.op_type not in supported_layers: - raise Exception(f'ERROR: Unsupported operation type: {node.op_type}') - - # If not the first layer then input shape is taken from last layer's output - if layer_counter != 0: - current_shape = [output_shape] - - if node.op_type in skip_layers: - if node.op_type == 'Flatten': - output_shape = [current_shape[0][0], np.prod(current_shape[0][1:])] - - else: - # Currently supported skipped layers have only one input and output - # Skipped layers can follow each other (e.g., Dropout -> Flatten) - - # Mapping inputs - input_name = inputs_map.get(node.input[0], node.input[0]) - output_name = node.output[0] - inputs_map[output_name] = input_name - - output_shape = current_shape[0] - continue - - if node.op_type in supported_layers: - layer_counter = layer_counter + 1 - - # Process the layer - layer, output_shape = layer_handlers[node.op_type](reader, node, inputs_map, current_shape, graph, config) - - sanitize_layer_name(layer) - print('Layer name: {}, layer type: {}, current shape: {}'.format(layer['name'], layer['class_name'], current_shape)) - layer_list.append(layer) - - ################# - # Generate HLS - ################# - - print('Creating HLS model') - hls_model = ModelGraph(config, reader, layer_list, input_layers, output_layers) - return hls_model diff --git a/hls4ml/hls4ml/converters/pytorch/__init__.py b/hls4ml/hls4ml/converters/pytorch/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/converters/pytorch/convolution.py b/hls4ml/hls4ml/converters/pytorch/convolution.py deleted file mode 100644 index 5c0d4d2..0000000 --- a/hls4ml/hls4ml/converters/pytorch/convolution.py +++ /dev/null @@ -1,107 +0,0 @@ -from hls4ml.converters.pytorch_to_hls import pytorch_handler -from hls4ml.converters.utils import compute_padding_1d_pytorch, compute_padding_2d_pytorch, parse_data_format - - -@pytorch_handler('Conv1d') -def parse_conv1d_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): - assert 'Conv1d' in operation - - layer = {} - - layer['name'] = layer_name - layer['inputs'] = input_names - layer['class_name'] = 'Conv1D' - layer['data_format'] = 'channels_first' # Pytorch default (can't change) - - layer['weight_data'] = class_object.weight.data.numpy() - if class_object.bias is not None: - layer['bias_data'] = class_object.bias.data.numpy() - else: - layer['bias_data'] = None - - # Input info - (layer['in_width'], layer['n_chan']) = parse_data_format( - input_shapes[0], 'channels_first' - ) # Keras's default is channels_last - - # Additional parameters - layer['n_filt'] = class_object.out_channels - layer['filt_width'] = class_object.kernel_size[0] - layer['stride_width'] = class_object.stride[0] - layer['dilation'] = class_object.dilation[0] - - if type(class_object.padding) is tuple: - padding = class_object.padding[0] - else: - padding = class_object.padding - - if padding == 0: # No padding, i.e., 'VALID' padding in Keras/Tensorflow - layer['padding'] = 'valid' - else: # Only 'valid' and 'same' padding are available in Keras - layer['padding'] = 'same' - - # Ouput info - (layer['out_width'], pad_left, pad_right) = compute_padding_1d_pytorch( - padding, layer['in_width'], layer['stride_width'], layer['filt_width'], layer['dilation'] - ) - layer['pad_left'] = pad_left - layer['pad_right'] = pad_right - - output_shape = [input_shapes[0][0], layer['n_filt'], layer['out_width']] # Channel first as default - - return layer, output_shape - - -@pytorch_handler('Conv2d') -def parse_conv2d_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): - assert 'Conv2d' in operation - - layer = {} - - layer['name'] = layer_name - layer['inputs'] = input_names - layer['class_name'] = 'Conv2D' - layer['data_format'] = 'channels_first' # Pytorch default (can't change) - - layer['weight_data'] = class_object.weight.data.numpy() - if class_object.bias is not None: - layer['bias_data'] = class_object.bias.data.numpy() - else: - layer['bias_data'] = None - - # Input info - (layer['in_height'], layer['in_width'], layer['n_chan']) = parse_data_format( - input_shapes[0], 'channels_first' - ) # Keras's default is channels_last - - # Additional parameters - layer['n_filt'] = class_object.out_channels - layer['filt_height'] = class_object.kernel_size[0] - layer['filt_width'] = class_object.kernel_size[1] - layer['stride_height'] = class_object.stride[0] - layer['stride_width'] = class_object.stride[1] - layer['dilation'] = class_object.dilation[0] - layer['pad_top'] = layer['pad_bottom'] = class_object.padding[0] - layer['pad_left'] = layer['pad_right'] = class_object.padding[1] - - if all(x == 0 for x in class_object.padding): # No padding, i.e., 'VALID' padding in Keras/Tensorflow - layer['padding'] = 'valid' - else: # Only 'valid' and 'same' padding are available in Keras - layer['padding'] = 'same' - - # Ouput info - (layer['out_height'], layer['out_width'], _, _, _, _) = compute_padding_2d_pytorch( - class_object.padding, - layer['in_height'], - layer['in_width'], - layer['stride_height'], - layer['stride_width'], - layer['filt_height'], - layer['filt_width'], - class_object.dilation[0], - class_object.dilation[1], - ) - - output_shape = [input_shapes[0][0], layer['n_filt'], layer['out_height'], layer['out_width']] - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/pytorch/core.py b/hls4ml/hls4ml/converters/pytorch/core.py deleted file mode 100644 index 0262fda..0000000 --- a/hls4ml/hls4ml/converters/pytorch/core.py +++ /dev/null @@ -1,133 +0,0 @@ -from hls4ml.converters.pytorch_to_hls import pytorch_handler - - -@pytorch_handler('Linear') -def parse_linear_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): - assert 'Linear' in operation - - layer = {} - - layer['class_name'] = 'Dense' - layer['name'] = layer_name - layer['inputs'] = input_names - - layer['weight_data'] = class_object.weight.data.numpy() - if class_object.bias is not None: - layer['bias_data'] = class_object.bias.data.numpy() - else: - layer['bias_data'] = None - - if class_object is not None: - layer['n_in'] = class_object.in_features - layer['n_out'] = class_object.out_features - else: - raise Exception('parsing of torch.nn.functional.linear not supported yet, please use torch.nn.Linear class') - - # Handling whether bias is used or not - if class_object.bias is None: - layer['use_bias'] = False - else: - layer['use_bias'] = True - - output_shape = input_shapes[0][:] - output_shape[-1] = layer['n_out'] - - return layer, output_shape - - -activation_layers = ['Softmax', 'ReLU', 'LeakyReLU', 'Threshold', 'ELU', 'PReLU', 'Sigmoid', 'Tanh'] - - -@pytorch_handler(*activation_layers) -def parse_activation_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): - layer = {} - - layer['class_name'] = operation - layer['activation'] = layer['class_name'] - layer['name'] = layer_name - layer['inputs'] = input_names - - # if layer['class_name'] != 'Activation': - # layer['activation'] = layer['class_name'] - if node.op == 'call_module': - if layer['class_name'] == 'ReLU' or layer['class_name'] == 'Sigmoid': - layer['class_name'] = 'Activation' - if layer['class_name'] == 'LeakyReLU': - layer['activ_param'] = class_object.negative_slope - if layer['class_name'] == 'ELU': - layer['activ_param'] = class_object.alpha - if layer['class_name'] == 'PReLU': - layer['alpha_data'] = class_object.weight.data.numpy() - if layer['class_name'] == 'Threshold': - layer['activ_param'] = class_object.threshold - layer['class_name'] = 'ThresholdedReLU' - layer['activation'] = 'ThresholdedReLU' - if layer['activ_param'] < 0: - raise Exception('negative threshold values not supported') - - if hasattr(node, 'dim'): - layer['axis'] = class_object.dim - else: - if layer['class_name'] == 'ReLU' or layer['class_name'] == 'Sigmoid': - layer['class_name'] = 'Activation' - if layer['class_name'] == 'LeakyReLU': - layer['activ_param'] = node.kwargs['negative_slope'] - if layer['class_name'] == 'ELU': - layer['activ_param'] = node.kwargs['alpha'] - if layer['class_name'] == 'Threshold': - layer['activ_param'] = node.args[1] - if layer['activ_param'] < 0: - raise Exception('negative threshold values not supported') - layer['class_name'] = 'ThresholdedReLU' - layer['activation'] = 'ThresholdedReLU' - if 'dim' in node.kwargs: - layer['axis'] = node.kwargs['dim'] - - output_shape = input_shapes[0] - return layer, output_shape - - -batchnorm_layers = ['BatchNorm2d', 'BatchNorm1d', 'Batch_norm'] - - -@pytorch_handler(*batchnorm_layers) -def parse_batchnorm_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): - assert 'BatchNorm' in operation - - layer = {} - - layer['class_name'] = 'BatchNormalization' - layer['data_format'] = 'channels_first' - layer['name'] = layer_name - layer['inputs'] = input_names - - # batchnorm para - if node.op == 'call_module': - layer['epsilon'] = class_object.eps - layer['use_gamma'] = layer['use_beta'] = class_object.affine - - if layer['use_gamma']: - layer['gamma_data'] = class_object.weight.data.numpy() - else: - layer['gamma_data'] = 1 - - if layer['use_beta']: - layer['beta_data'] = class_object.bias.data.numpy() - else: - layer['beta_data'] = 0 - - layer['mean_data'] = class_object.running_mean.data.numpy() - layer['variance_data'] = class_object.running_var.data.numpy() - - in_size = 1 - for dim in input_shapes[0][1:]: - in_size *= dim - - layer['n_in'] = layer['n_out'] = in_size - - if len(input_shapes[0]) == 2: - layer['n_filt'] = -1 - elif len(input_shapes[0]) > 2: - layer['n_filt'] = input_shapes[0][1] # Always channel first for Pytorch - - return layer, [shape for shape in input_shapes[0]] diff --git a/hls4ml/hls4ml/converters/pytorch/merge.py b/hls4ml/hls4ml/converters/pytorch/merge.py deleted file mode 100644 index 1f1e11d..0000000 --- a/hls4ml/hls4ml/converters/pytorch/merge.py +++ /dev/null @@ -1,61 +0,0 @@ -from hls4ml.converters.pytorch_to_hls import pytorch_handler - -concat_layers = ['cat', 'concat', 'concatenate'] - - -@pytorch_handler(*concat_layers) -def parse_concat_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): - assert operation in concat_layers - - layer = {} - layer['class_name'] = 'Concatenate' - layer['name'] = layer_name - layer['op'] = 'concatenate' - layer['inputs'] = input_names - - if len(layer['inputs']) > 2: - raise Exception('ERROR: Merging more than two tensors is not yet supported.') - - rank = len(input_shapes[0][1:]) - if rank > 3: - raise Exception('ERROR: Concatenation of tensors with rank > 3 is not yet supported.') - layer['op'] = layer['class_name'].lower() + f'{rank}d' - layer['axis'] = node.kwargs.get('dim', 0) - - output_shape = input_shapes[0][:] - output_shape[layer['axis']] += input_shapes[1][layer['axis']] - - return layer, output_shape - - -add_layers = ['add'] -multiply_layers = ['mul', 'multiply'] -subtract_layers = ['sub', 'subtract'] -min_layers = ['fmin', 'minimum'] -max_layers = ['fmax', 'maximum'] -merge_layers = [*add_layers, *multiply_layers, *subtract_layers, *min_layers, *max_layers] - - -@pytorch_handler(*merge_layers) -def parse_merge_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): - assert operation in merge_layers - - layer = {} - layer['class_name'] = 'Merge' - layer['name'] = layer_name - if operation in add_layers: - layer['op'] = 'add' - elif operation in multiply_layers: - layer['op'] = 'multiply' - elif operation in subtract_layers: - layer['op'] = 'subtract' - elif operation in min_layers: - layer['op'] = 'minimum' - elif operation in max_layers: - layer['op'] = 'maximum' - - layer['inputs'] = input_names - - output_shape = input_shapes[0][:] - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/pytorch/pooling.py b/hls4ml/hls4ml/converters/pytorch/pooling.py deleted file mode 100644 index 8256a9f..0000000 --- a/hls4ml/hls4ml/converters/pytorch/pooling.py +++ /dev/null @@ -1,136 +0,0 @@ -from hls4ml.converters.pytorch_to_hls import pytorch_handler -from hls4ml.converters.utils import compute_padding_1d_pytorch, compute_padding_2d_pytorch, parse_data_format - -pooling_layers = ['MaxPool1d', 'MaxPool2d', 'AvgPool1d', 'AvgPool2d'] - - -@pytorch_handler(*pooling_layers) -def parse_pooling_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): - assert 'Pool' in operation or 'pool' in operation - - layer = {} - - if operation == 'MaxPool1d': - layer['class_name'] = 'MaxPooling1D' - if operation == 'MaxPool2d': - layer['class_name'] = 'MaxPooling2D' - if operation == 'AvgPool1d': - layer['class_name'] = 'AveragePooling1D' - if operation == 'AvgPool2d': - layer['class_name'] = 'AveragePooling2D' - - layer['name'] = layer_name - layer['inputs'] = input_names - layer['data_format'] = 'channels_first' # Pytorch default (can't change) - if node.op == 'call_module' and 'Avg' in operation: - if class_object.count_include_pad: - layer['count_pad'] = True - else: - layer['count_pad'] = False - else: - layer['count_pad'] = True - - if int(layer['class_name'][-2]) == 1: - (layer['n_in'], layer['n_filt']) = parse_data_format(input_shapes[0], layer['data_format']) - if node.op == 'call_module': - layer['pool_width'] = ( - class_object.kernel_size if not type(class_object.kernel_size) is tuple else class_object.kernel_size[0] - ) - layer['stride_width'] = class_object.stride if not type(class_object.stride) is tuple else class_object.stride[0] - - if type(class_object.padding) is tuple: - padding = class_object.padding[0] - else: - padding = class_object.padding - - else: - layer['pool_width'] = int(node.args[1]) - layer['stride_width'] = node.kwargs['stride'] if node.kwargs['stride'] is not None else int(node.args[1]) - padding = node.kwargs['padding'] - - if padding == 0: # No padding, i.e., 'VALID' padding in Keras/Tensorflow - layer['padding'] = 'valid' - else: # Only 'valid' and 'same' padding are available in Keras - layer['padding'] = 'same' - - (layer['n_out'], layer['pad_left'], layer['pad_right']) = compute_padding_1d_pytorch( - padding, layer['n_in'], layer['stride_width'], layer['pool_width'], 1 - ) - - if layer['data_format'] == 'channels_last': - output_shape = [input_shapes[0][0], layer['n_out'], layer['n_filt']] - elif layer['data_format'] == 'channels_first': - output_shape = [input_shapes[0][0], layer['n_filt'], layer['n_out']] - - elif int(layer['class_name'][-2]) == 2: - (layer['in_height'], layer['in_width'], layer['n_filt']) = parse_data_format(input_shapes[0], layer['data_format']) - - if node.op == 'call_module': - if type(class_object.stride) is tuple: - layer['stride_height'] = class_object.stride[0] - layer['stride_width'] = class_object.stride[1] - else: - layer['stride_height'] = class_object.stride - layer['stride_width'] = class_object.stride - - if type(class_object.kernel_size) is tuple: - layer['pool_height'] = class_object.kernel_size[0] - layer['pool_width'] = class_object.kernel_size[1] - else: - layer['pool_height'] = class_object.kernel_size - layer['pool_width'] = class_object.kernel_size - - if type(class_object.padding) is tuple: - padding = class_object.padding - else: - padding = [class_object.padding, class_object.padding] - - else: - if type(node.kwargs['stride']) is tuple: - layer['stride_height'] = node.kwargs['stride'][0] - layer['stride_width'] = node.kwargs['stride'][1] - else: - layer['stride_height'] = node.kwargs['stride'] - layer['stride_width'] = node.kwargs['stride'] - if type(node.kwargs['kernel_size']) is tuple: - layer['pool_height'] = node.kwargs['kernel_size'][0] - layer['pool_width'] = node.kwargs['kernel_size'][1] - else: - layer['pool_height'] = node.kwargs['kernel_size'] - layer['pool_width'] = node.kwargs['kernel_size'] - - if type(node.kwargs['padding']) is tuple: - padding = node.kwargs['padding'] - else: - padding = [node.kwargs['padding'], node.kwargs['padding']] - - if all(x == 0 for x in padding): # No padding, i.e., 'VALID' padding in Keras/Tensorflow - layer['padding'] = 'valid' - else: # Only 'valid' and 'same' padding are available in Keras - layer['padding'] = 'same' - - ( - layer['out_height'], - layer['out_width'], - layer['pad_top'], - layer['pad_bottom'], - layer['pad_left'], - layer['pad_right'], - ) = compute_padding_2d_pytorch( - padding, - layer['in_height'], - layer['in_width'], - layer['stride_height'], - layer['stride_width'], - layer['pool_height'], - layer['pool_width'], - 1, - 1, - ) - - if layer['data_format'] == 'channels_last': - output_shape = [input_shapes[0][0], layer['out_height'], layer['out_width'], layer['n_filt']] - elif layer['data_format'] == 'channels_first': - output_shape = [input_shapes[0][0], layer['n_filt'], layer['out_height'], layer['out_width']] - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/pytorch/reshape.py b/hls4ml/hls4ml/converters/pytorch/reshape.py deleted file mode 100644 index 5e5cde5..0000000 --- a/hls4ml/hls4ml/converters/pytorch/reshape.py +++ /dev/null @@ -1,108 +0,0 @@ -import numpy as np - -from hls4ml.converters.pytorch_to_hls import pytorch_handler - -reshape_layers = ['View'] - - -@pytorch_handler(*reshape_layers) -def parse_reshape_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): - assert operation == 'View' - - layer = {} - layer['class_name'] = 'Reshape' - layer['name'] = layer_name - layer['inputs'] = input_names - - layer['target_shape'] = [int(i) for i in node.args[1:]] - # View can have -1 as one as the dimensions, - # leaving it to us to deduce it from the other dimensions and the overall size - if -1 in layer['target_shape']: - size = np.prod(input_shapes[0][1:]) - for i in range(0, len(layer['target_shape'])): - if layer['target_shape'][i] == -1: - cl = layer['target_shape'][:] - cl.remove(-1) - layer['target_shape'][i] = int(size / np.prod(cl)) - - output_shape = input_shapes[0][:1] + layer['target_shape'] - - return layer, output_shape - - -@pytorch_handler('squeeze') -def parse_squeeze_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): - assert operation == 'squeeze' - - layer = {} - layer['class_name'] = 'Reshape' - layer['name'] = layer_name - - if len(node.args) > 1 or len(node.kwargs) > 0: # 'dim' argument is specified - output_shape = [i for i in input_shapes[0]] - squeeze_dim = node.kwargs.get('dim', None) - if squeeze_dim is None: - squeeze_dim = node.args[1] - if isinstance(squeeze_dim, tuple): - for dim in squeeze_dim: - del output_shape[dim] - else: - del output_shape[squeeze_dim] - else: - output_shape = [i for i in input_shapes[0] if i != 1] - - layer['target_shape'] = output_shape.copy() - if layer['target_shape'][0] is None: - del layer['target_shape'][0] - - return layer, output_shape - - -@pytorch_handler('unsqueeze') -def parse_unsqueeze_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): - assert operation == 'unsqueeze' - - layer = {} - layer['class_name'] = 'Reshape' - layer['name'] = layer_name - layer['inputs'] = input_names - - # Unlike in 'squeeze' in 'unsqueeze', dim argument must exist - output_shape = [i for i in input_shapes[0]] - if len(node.args) > 1: # Specified as unsqueeze(x, n) - squeeze_dim = node.args[1] - else: # Specified as unsqueeze(x, dim=n) - squeeze_dim = node.kwargs['dim'] - # insert() will add an element before the index, unsqueeze expects the location - index = output_shape.index(output_shape[squeeze_dim]) # + 1 - output_shape.insert(index, 1) - - layer['target_shape'] = output_shape.copy() - if layer['target_shape'][0] is None: - del layer['target_shape'][0] - - return layer, output_shape - - -@pytorch_handler('Flatten') -def parse_flatten_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): - assert operation == 'Flatten' - - layer = {} - layer['class_name'] = 'Reshape' - layer['name'] = layer_name - layer['inputs'] = input_names - - start_dim = class_object.start_dim - end_dim = class_object.end_dim - if end_dim + 1 == 0 or end_dim + 1 > len(input_shapes[0]): - end_dim = len(input_shapes[0]) - else: - end_dim = end_dim + 1 - - layer['target_shape'] = ( - input_shapes[0][0:start_dim] + [np.prod(input_shapes[0][start_dim:end_dim])] + input_shapes[0][end_dim:] - ) - output_shape = layer['target_shape'] - - return layer, output_shape diff --git a/hls4ml/hls4ml/converters/pytorch_to_hls.py b/hls4ml/hls4ml/converters/pytorch_to_hls.py deleted file mode 100644 index afa8736..0000000 --- a/hls4ml/hls4ml/converters/pytorch_to_hls.py +++ /dev/null @@ -1,340 +0,0 @@ -import torch - -from hls4ml.model import ModelGraph - - -class PyTorchModelReader: - """ - PyTorch reader to extract weights data. - """ - - def __init__(self, config): - self.torch_model = config['PytorchModel'] - self.state_dict = self.torch_model.state_dict() - self.input_shape = config['InputShape'] - - def get_weights_data(self, layer_name, var_name): - data = None - - tensorName = layer_name + '.' + var_name - - if tensorName in self.state_dict: - data = self.state_dict[tensorName].numpy() - - return data - - -class PyTorchFileReader(PyTorchModelReader): # Inherit get_weights_data method - def __init__(self, config): - self.config = config - - if not torch.cuda.is_available(): - self.torch_model = torch.load(config['PytorchModel'], map_location=lambda storage, loc: storage) - else: - self.torch_model = torch.load(config['PytorchModel']) - - # Get input tensor's shape - self.input_shape = config.get('InputShape') - - if self.input_shape is None: - raise Exception('Must specify input shape ("InputShape") in config!') - - # Convert it to a list - self.input_shape = self.input_shape.strip('(,)').split(',') - self.input_shape = [None if n == 'None' else int(n) for n in self.input_shape] - - self.state_dict = self.torch_model.state_dict() - - -def get_weights_data(data_reader, layer_name, var_name): - if not isinstance(var_name, (list, tuple)): - var_name = [var_name] - - data = [data_reader.get_weights_data(layer_name, var) for var in var_name] - - if len(data) == 1: - return data[0] - else: - return (*data,) - - -# ----------------------Layer handling--------------------- # -layer_handlers = {} - - -def register_pytorch_layer_handler(layer_name, handler_func): - if layer_name in layer_handlers: - raise Exception(f'Layer {layer_name} already registered') - else: - layer_handlers[layer_name] = handler_func - - -def get_supported_pytorch_layers(): - return list(layer_handlers.keys()) - - -def pytorch_handler(*args): - def decorator(function): - function.handles = [arg for arg in args] - return function - - return decorator - - -# map names of operations between toch.nn and torch.nn.functionals -layer_name_map = { - 'relu': 'ReLU', - 'leaky_relu': 'LeakyReLU', - 'elu': 'ELU', - 'prelu': 'PReLU', - 'sigmoid': 'Sigmoid', - '_threshold': 'Threshold', - 'softmax': 'Softmax', - 'max_pool1d': 'MaxPool1d', - 'max_pool2d': 'MaxPool2d', - 'avg_pool1d': 'AvgPool1d', - 'avg_pool2d': 'AvgPool2d', - 'flatten': 'Flatten', -} - - -# ---------------------------------------------------------------- - - -def pytorch_to_hls(config): - """Convert PyTorch model to hls4ml ModelGraph. - - Args: - config (dict): The conversion config - - Raises: - Exception: On unsupported features of the model. - - Returns: - ModelGraph: hls4ml model object. - """ - - # This is a list of dictionaries to hold all the layer info we need to generate HLS - layer_list = [] - - print('Interpreting Model ...') - - reader = PyTorchFileReader(config) if isinstance(config['PytorchModel'], str) else PyTorchModelReader(config) - if type(reader.input_shape) is tuple: - input_shapes = [list(reader.input_shape)] - else: - input_shapes = list(reader.input_shape) - input_shapes = [list(shape) for shape in input_shapes] - - model = reader.torch_model - - # dict of layer objects in non-traced form for access lateron - children = {c[0]: c[1] for c in model.named_children()} - # use symbolic_trace to get a full graph of the model - from torch.fx import symbolic_trace - - traced_model = symbolic_trace(model) - # Define layers to skip for conversion to HLS - skip_layers = ['Dropout', 'Sequential'] - - # All supported layers - supported_layers = get_supported_pytorch_layers() + skip_layers - - # Map inputs of skipped and split (activation) layers - inputs_map = {} - - input_layers = [] - - # Output shape tracking - output_shapes = {} - output_shape = None - - # Loop through layers - print('Topology:') - layer_counter = 0 - - n_inputs = 0 - - for node in traced_model.graph.nodes: - if node.op == 'call_module': - # modules that are part of a torch.nn.Sequential with name 'name' have target names 'name.x', - # where x is an integer numbering the elements of the Sequential - if '.' in node.target: - fqn_path = node.target.split('.') - sub_children = dict(children[fqn_path[0]].named_children()) - for name in fqn_path[1:-1]: - sub_children = dict(sub_children[name].named_children()) - sub_children[fqn_path[-1]] - class_object = sub_children[fqn_path[-1]] - else: - class_object = children[node.target] - - pytorch_class = class_object.__class__.__name__ - - if pytorch_class not in supported_layers: - raise Exception(f'Unsupported layer {pytorch_class}') - - if layer_counter != 0: - input_shapes = [output_shape] # In case there are multiple inputs - - layer_name = node.name - - # Handle skipped layers - if pytorch_class in skip_layers: - if pytorch_class == 'Sequential': # Ignore the mother module's class name - continue - - # Assuming only one input - parent_input = [str(i) for i in node.args][0] - inputs_map[layer_name] = inputs_map.get(parent_input, parent_input) - - output_shapes[layer_name] = input_shapes[0] - - continue - - # Increment the layer counter after initial screenings - if pytorch_class in supported_layers: - layer_counter += 1 - - # parse info from class object - input_names = [inputs_map.get(str(i), str(i)) for i in node.args] - input_shapes = [output_shapes[str(i)] for i in node.args] - - # for Conv layers - if 'Conv' in pytorch_class: - if not class_object.padding_mode == 'zeros': - raise Exception('Padding modes other than "zeros" not implemented yet') - if not class_object.groups == 1: - raise Exception('Non-default options for groups not implemented yet') - - # Process the layer - layer, output_shape = layer_handlers[pytorch_class]( - pytorch_class, layer_name, input_names, input_shapes, node, class_object, reader, config - ) - - print( - 'Layer name: {}, layer type: {}, input shape: {}'.format( - layer['name'], - layer['class_name'], - input_shapes, - ) - ) - layer_list.append(layer) - - assert output_shape is not None - output_shapes[layer['name']] = output_shape - - layer_counter += 1 - - if node.op == 'placeholder': - # 'placeholder' indicates an input layer. Multiple inputs are supported - - input_layer = {} - input_layer['name'] = node.name - input_layer['class_name'] = 'InputLayer' - input_layer['input_shape'] = list(input_shapes[n_inputs][1:]) - layer_list.insert(n_inputs, input_layer) - - output_shapes[input_layer['name']] = list(input_shapes[n_inputs]) - input_layers.append(input_layer['name']) - n_inputs += 1 - - layer_counter += 1 - - if node.op == 'call_function': - # Function calls in the graph have to be transformed to layers known to hls4ml - - # operations that appear repeatedly have '_n' appended to their name for the nth repetition - operation = node.name - if node.name.split('_')[-1].isdigit(): - operation = '_'.join(node.name.split('_')[:-1]) - - if operation in layer_name_map: - operation = layer_name_map[operation] - - # only a limited number of functions are supported - if operation not in supported_layers: - raise Exception(f'Unsupported function {operation}') - if operation == 'PReLU' or operation == 'batch_norm' or operation == 'conv1d' or operation == 'conv2d': - raise Exception( - f'Function {operation} cannot be parsed as torch.nn.functional. Use the torch.nn implementation instead' - ) - - layer_name = node.name - - layer_counter += 1 - - input_names = [inputs_map.get(str(i), str(i)) for i in node.all_input_nodes] - input_shapes = [list(output_shapes[str(i)]) for i in input_names] - - # Process the layer - layer, output_shape = layer_handlers[operation]( - operation, layer_name, input_names, input_shapes, node, None, reader, config - ) - - print('Layer name: {}, layer type: {}, input shape: {}'.format(layer['name'], layer['class_name'], input_shapes)) - layer_list.append(layer) - - assert output_shape is not None - output_shapes[layer['name']] = output_shape - - if node.op == 'get_attr': - # Deals with tensors that are member variables of the model class - # We insert these tensors are input layer nodes into the hls4ML model graph - if '.' not in node.target: - obj = getattr(model, node.name) - else: - obj = getattr(children[node.target.split('.')[0], node.name]) - - input_layer = {} - input_layer['name'] = node.name - input_layer['class_name'] = 'InputLayer' - input_layer['input_shape'] = [None] + list(obj.size()) - layer_list.insert(n_inputs, input_layer) - - output_shapes[input_layer['name']] = [None] + list(obj.size()) - input_layers.append(input_layer['name']) - n_inputs += 1 - - layer_counter += 1 - - if node.op == 'call_method': - # Method calls in the graph have to be transformed to layers known to hls4ml - - # operations that appear repeatedly have '_n' appended to their name for the nth repetition - operation = node.name - if node.name.split('_')[-1].isdigit(): - operation = '_'.join(node.name.split('_')[:-1]) - - if operation in layer_name_map: - operation = layer_name_map[operation] - - # only a limited number of functions are supported - if operation not in supported_layers: - raise Exception(f'Unsupported function {operation}') - - layer_name = node.name - - layer_counter += 1 - - input_names = [inputs_map.get(str(i), str(i)) for i in node.all_input_nodes] - - # Process the layer - input_shapes = [list(output_shapes[str(i)]) for i in input_names] - - layer, output_shape = layer_handlers[operation]( - operation, layer_name, input_names, input_shapes, node, None, reader, config - ) - - print('Layer name: {}, layer type: {}, input shape: {}'.format(layer['name'], layer['class_name'], input_shapes)) - layer_list.append(layer) - - assert output_shape is not None - output_shapes[layer['name']] = output_shape - - if len(input_layers) == 0: - input_layers = None - - print('Creating HLS model') - hls_model = ModelGraph(config, layer_list, inputs=input_layers) - return hls_model diff --git a/hls4ml/hls4ml/converters/utils.py b/hls4ml/hls4ml/converters/utils.py deleted file mode 100644 index d1c9e05..0000000 --- a/hls4ml/hls4ml/converters/utils.py +++ /dev/null @@ -1,245 +0,0 @@ -import math - - -def parse_data_format(input_shape, data_format='channels_last'): - """Parses the given input shape according to the specified data format. - - This function can be used to ensure the shapes of convolutional and pooling layers is correctly parsed. If the first - element of the given ``input_shape`` is ``None`` it is interpreted as a batch dimension and discarded.The returned tuple - will have the channels dimension last. - - Args: - input_shape (list or tuple): Input shape of 2D or 3D tensor with optional batch dimension of ``None``. - data_format (str, optional): Data format type, one of ``channels_first`` or ``channels_last``. (case insensitive). - Defaults to 'channels_last'. - - Raises: - Exception: Raised if the data format type is unknown. - - Returns: - tuple: The input shape (without the batch dimension) in ``channels_last`` format. - """ - if input_shape[0] is None: - # Ignore batch size - input_shape = input_shape[1:] - - if data_format.lower() == 'channels_last': - if len(input_shape) == 2: # 1D, (n_in, n_filt) - return (input_shape[0], input_shape[1]) - elif len(input_shape) == 3: # 2D, (in_height, in_width, n_filt) - return (input_shape[0], input_shape[1], input_shape[2]) - - elif data_format.lower() == 'channels_first': - if len(input_shape) == 2: # 1D, (n_filt, n_in) - return (input_shape[1], input_shape[0]) - elif len(input_shape) == 3: # 2D, (n_filt, in_height, in_width) - return (input_shape[1], input_shape[2], input_shape[0]) - else: - raise Exception(f'Unknown data format: {data_format}') - - -def compute_padding_1d(pad_type, in_size, stride, filt_size): - """Computes the amount of padding required on each side of the 1D input tensor. - - In case of ``same`` padding, this routine tries to pad evenly left and right, but if the amount of columns to be added - is odd, it will add the extra column to the right. - - Args: - pad_type (str): Padding type, one of ``same``, `valid`` or ``causal`` (case insensitive). - in_size (int): Input size. - stride (int): Stride length. - filt_size (int): Length of the kernel window. - - Raises: - Exception: Raised if the padding type is unknown. - - Returns: - tuple: Tuple containing the padded input size, left and right padding values. - """ - if pad_type.lower() == 'same': - n_out = int(math.ceil(float(in_size) / float(stride))) - if in_size % stride == 0: - pad_along_size = max(filt_size - stride, 0) - else: - pad_along_size = max(filt_size - (in_size % stride), 0) - pad_left = pad_along_size // 2 - pad_right = pad_along_size - pad_left - elif pad_type.lower() == 'valid': - n_out = int(math.ceil(float(in_size - filt_size + 1) / float(stride))) - pad_left = 0 - pad_right = 0 - elif pad_type.lower() == 'causal': - n_out = int(math.ceil(float(in_size) / float(stride))) - if in_size % stride == 0: - pad_along_size = max(filt_size - stride, 0) - else: - pad_along_size = max(filt_size - (in_size % stride), 0) - pad_left = pad_along_size - pad_right = 0 - else: - raise Exception(f'Unknown padding type: {pad_type}') - - return (n_out, pad_left, pad_right) - - -def compute_padding_2d(pad_type, in_height, in_width, stride_height, stride_width, filt_height, filt_width): - """Computes the amount of padding required on each side of the 2D input tensor. - - In case of ``same`` padding, this routine tries to pad evenly left and right (top and bottom), but if the amount of - columns to be added is odd, it will add the extra column to the right/bottom. - - Args: - pad_type (str): Padding type, one of ``same`` or ``valid`` (case insensitive). - in_height (int): The height of the input tensor. - in_width (int): The width of the input tensor. - stride_height (int): Stride height. - stride_width (int): Stride width. - filt_height (int): Height of the kernel window. - filt_width (int): Width of the kernel window. - - Raises: - Exception: Raised if the padding type is unknown. - - Returns: - tuple: Tuple containing the padded input height, width, and top, bottom, left and right padding values. - """ - if pad_type.lower() == 'same': - # Height - out_height = int(math.ceil(float(in_height) / float(stride_height))) - if in_height % stride_height == 0: - pad_along_height = max(filt_height - stride_height, 0) - else: - pad_along_height = max(filt_height - (in_height % stride_height), 0) - pad_top = pad_along_height // 2 - pad_bottom = pad_along_height - pad_top - # Width - out_width = int(math.ceil(float(in_width) / float(stride_width))) - if in_width % stride_width == 0: - pad_along_width = max(filt_width - stride_width, 0) - else: - pad_along_width = max(filt_width - (in_width % stride_width), 0) - pad_left = pad_along_width // 2 - pad_right = pad_along_width - pad_left - elif pad_type.lower() == 'valid': - out_height = int(math.ceil(float(in_height - filt_height + 1) / float(stride_height))) - out_width = int(math.ceil(float(in_width - filt_width + 1) / float(stride_width))) - - pad_top = 0 - pad_bottom = 0 - pad_left = 0 - pad_right = 0 - else: - raise Exception(f'Unknown padding type: {pad_type}') - - return (out_height, out_width, pad_top, pad_bottom, pad_left, pad_right) - - -def compute_padding_1d_pytorch(pad_type, in_size, stride, filt_size, dilation): - if isinstance(pad_type, str): - if pad_type.lower() == 'same': - n_out = int( - math.floor((float(in_size) + 2 - float(dilation) * (float(filt_size) - 1) - 1) / float(stride) + 1) - ) # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html - if in_size % stride == 0: - pad_along_size = max(filt_size - stride, 0) - else: - pad_along_size = max(filt_size - (in_size % stride), 0) - pad_right = pad_along_size // 2 - pad_left = pad_along_size - pad_right - elif pad_type.lower() == 'valid': - n_out = int( - math.floor((float(in_size) - float(dilation) * (float(filt_size) - 1) - 1) / float(stride) + 1) - ) # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html - pad_left = 0 - pad_right = 0 - else: - raise Exception(f'Unknown padding type: {pad_type}') - else: - if pad_type > 0: - n_out = int( - math.floor( - (float(in_size) + 2 * pad_type - float(dilation) * (float(filt_size) - 1) - 1) / float(stride) + 1 - ) - ) # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html - pad_right = pad_type - pad_left = pad_type - else: - n_out = int( - math.floor((float(in_size) - float(dilation) * (float(filt_size) - 1) - 1) / float(stride) + 1) - ) # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html - pad_left = 0 - pad_right = 0 - - return (n_out, pad_left, pad_right) - - -def compute_padding_2d_pytorch( - pad_type, in_height, in_width, stride_height, stride_width, filt_height, filt_width, dilation_height, dilation_width -): - if isinstance(pad_type, str): - if pad_type.lower() == 'same': - # Height - out_height = int( - math.floor(float(in_height + 2 - dilation_height * (filt_height - 1) - 1) / float(stride_height) + 1) - ) - if in_height % stride_height == 0: - pad_along_height = max(filt_height - stride_height, 0) - else: - pad_along_height = max(filt_height - (in_height % stride_height), 0) - pad_bottom = pad_along_height // 2 - pad_top = pad_along_height - pad_bottom - pad_top = 1 - # Width - out_width = int( - math.floor(float(in_width + 2 - dilation_width * (filt_width - 1) - 1) / float(stride_width) + 1) - ) - if in_width % stride_width == 0: - pad_along_width = max(filt_width - stride_width, 0) - else: - pad_along_width = max(filt_width - (in_width % stride_width), 0) - pad_right = pad_along_width // 2 - pad_left = pad_along_width - pad_right - elif pad_type.lower() == 'valid': - out_height = int( - math.floor(float(in_height - dilation_height * (filt_height - 1) - 1) / float(stride_height) + 1) - ) - out_width = int(math.floor(float(in_width - dilation_width * (filt_width - 1) - 1) / float(stride_width) + 1)) - - pad_top = 0 - pad_bottom = 0 - pad_left = 0 - pad_right = 0 - else: - raise Exception(f'Unknown padding type: {pad_type}') - - else: - if pad_type[0] == 0 and pad_type[1] == 0: - out_height = int( - math.floor(float(in_height - dilation_height * (filt_height - 1) - 1) / float(stride_height) + 1) - ) - out_width = int(math.floor(float(in_width - dilation_width * (filt_width - 1) - 1) / float(stride_width) + 1)) - - pad_top = 0 - pad_bottom = 0 - pad_left = 0 - pad_right = 0 - - else: - # Height - pad_height = pad_type[0] - pad_width = pad_type[1] - out_height = int( - math.floor( - float(in_height + 2 * pad_height - dilation_height * (filt_height - 1) - 1) / float(stride_height) + 1 - ) - ) - pad_bottom = pad_height - pad_top = pad_height - # Width - out_width = int( - math.floor(float(in_width + 2 * pad_width - dilation_width * (filt_width - 1) - 1) / float(stride_width) + 1) - ) - pad_right = pad_width - pad_left = pad_width - - return (out_height, out_width, pad_top, pad_bottom, pad_left, pad_right) diff --git a/hls4ml/hls4ml/model/__init__.py b/hls4ml/hls4ml/model/__init__.py deleted file mode 100644 index fc50439..0000000 --- a/hls4ml/hls4ml/model/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from hls4ml.model.graph import HLSConfig, ModelGraph # noqa: F401 - -try: - from hls4ml.model import profiling # noqa: F401 - - __profiling_enabled__ = True -except ImportError: - __profiling_enabled__ = False diff --git a/hls4ml/hls4ml/model/attributes.py b/hls4ml/hls4ml/model/attributes.py deleted file mode 100644 index 0e8df6e..0000000 --- a/hls4ml/hls4ml/model/attributes.py +++ /dev/null @@ -1,244 +0,0 @@ -""" -All information about a layer is stored in the attributes of a layer instance. This information can be properties of -a layer, like a number of hidden units in Dense layer or number of filters in a convolutional layer, but also includes -information about weight variables, output variables and all data types defined. The attribute system provides a mechanism -that ensures layers are correctly initialized, have the valid information stored and have configurable endpoints exposed. - -This module contains the definitions of classes for handling attributes. The ``Attribute`` class and its subclasses provide -information about an expected attribute, but the actual value will be stored within the instance's ``attribute`` dict. This -provides an unified view (mapping) of all attributes, but for convenience there are mappings that expose only certain types -of attributes, such as types, variables, weights etc, via the ``AttributeMapping`` class. -""" - -from collections.abc import MutableMapping -from numbers import Integral - -from hls4ml.model.types import NamedType, Source, TensorVariable, WeightVariable -from hls4ml.utils.string_utils import convert_to_pascal_case - -# region Attribute class definitions - - -class Attribute: - """ - Base attribute class. - - Attribute consists of a name, the type of value it will store, the optional default if no value is specified during - layer creation, and a flag indicating if the value can be modified by the user. This class is generally expected to - exist only as part of the ``expected_attributes`` property of the layer class. - - Args: - name (str): Name of the attribute - value_type (optional): Type of the value expected to be stored in the attribute. - If not specified, no validation of the stored value will be performed. Defaults to ``int``. - default (optional): Default value if no value is specified during layer creation. Defaults to ``None``. - configurable (bool, optional): Specifies if the attribute can be modified after creation. Defaults to ``False``. - - """ - - def __init__(self, name, value_type=Integral, default=None, configurable=False): - self.name = name - self.value_type = value_type - self.default = default - self.configurable = configurable - - def validate_value(self, value): - if self.value_type is not None: - return issubclass(type(value), self.value_type) - else: - return True # Meaning we don't care - - @property - def config_name(self): - """Returns the name of the attribute as it will appear in the ``attribute`` dict of the layer instance. - - The format will be in pascal case, e.g., ``AttributeName`` -> ``attribute_name``. - - Returns: - str: The pascal_case of the name of the attribute. - """ - return convert_to_pascal_case(self.name) - - -class ConfigurableAttribute(Attribute): - """ - Represents a configurable attribute, i.e., the attribute whose value can be modified by the user. - - This is a convenience class. It is advised to use ``ConfigurableAttribute`` over ``Attribute(..., configurable=True)`` - when defining the expected attributes of layer classes. - """ - - def __init__(self, name, value_type=int, default=None): - super().__init__(name, value_type, default, configurable=True) - - -class TypeAttribute(Attribute): - """ - Represents an attribute that will store a type, i.e., an instance of ``NamedType`` or its subclasses. - - As a convention, the name of the attribute storing a type will end in ``_t``. - """ - - def __init__(self, name, default=None, configurable=True): - if not name.endswith('_t'): - name += '_t' - super().__init__(name, value_type=NamedType, default=default, configurable=configurable) - - -class ChoiceAttribute(Attribute): - """ - Represents an attribute whose value can be one of several predefined values. - """ - - def __init__(self, name, choices, default=None, configurable=True): - super().__init__(name, value_type=list, default=default, configurable=configurable) - assert len(choices) > 0 - if default is not None: - assert default in choices - self.choices = choices - self.value_type = str(self.choices) - - def validate_value(self, value): - return value in self.choices - - -class WeightAttribute(Attribute): - """ - Represents an attribute that will store a weight variable. - """ - - def __init__(self, name): - super().__init__(name, value_type=WeightVariable, default=None, configurable=False) - - -class CodeAttrubute(Attribute): - """ - Represents an attribute that will store generated source code block. - """ - - def __init__(self, name): - super(WeightAttribute, self).__init__(name, value_type=Source, default=None, configurable=False) - - -# endregion - -# region Attribute mapping definitions - - -class AttributeDict(MutableMapping): - """ - Class containing all attributes of a given layer. - - Instances of this class behave like a dictionary. Upon insertion, the key/value may trigger additional actions, - such as registering variables or modifying the key name to ensure it follows the convention. - - Specific "views" (mappings) of this class can be used to filter desired attributes via the ``AttributeMapping`` class. - """ - - def __init__(self, layer): - self.layer = layer - self.attributes = {} - self._expected_attributes = [a.name for a in self.layer.expected_attributes] - - def __getitem__(self, key): - return self.attributes[key] - - def __len__(self): - return len(self.attributes) - - def __iter__(self): - yield from self.attributes.keys() - - def __setitem__(self, key, value): - if isinstance(value, TensorVariable): - self.layer.model.register_output_variable(key, value) - self.attributes['result_t'] = value.type - if key in self._expected_attributes and key in self.layer.outputs: - key = 'out_' + key - elif isinstance(value, WeightVariable): - self.attributes[key + '_t'] = value.type - - self.attributes[key] = value - - def __delitem__(self, key): - self.attributes.remove(key) - - -class AttributeMapping(MutableMapping): - """ - Base class used to filter attributes based on their expected class. - """ - - def __init__(self, attributes, clazz): - self.attributes = attributes - self.clazz = clazz - - def __getitem__(self, key): - return self.attributes[key] - - def __len__(self): - return sum(map(lambda x: isinstance(x, self.clazz), self.attributes.values())) - - def __iter__(self): - precision_keys = [k for k, v in self.attributes.items() if isinstance(v, self.clazz)] - yield from precision_keys - - def __setitem__(self, key, value): - self.attributes[key] = value - - def __delitem__(self, key): - self.attributes.remove(key) - - -class WeightMapping(AttributeMapping): - """ - Mapping that only sees ``WeightVariable`` instances (i.e., weights). - """ - - def __init__(self, attributes): - super().__init__(attributes, WeightVariable) - - -class VariableMapping(AttributeMapping): - """ - Mapping that only sees ``TensorVariable`` instances (i.e., activation tensors). - """ - - def __init__(self, attributes): - super().__init__(attributes, TensorVariable) - - def __getitem__(self, key): - if 'out_' + key in self.attributes: - return self.attributes['out_' + key] - else: - return self.attributes[key] - - def __iter__(self): - precision_keys = [k for k, v in self.attributes.items() if isinstance(v, self.clazz)] - for key in precision_keys: - if key.startswith('out_'): - yield key[len('out_') :] - else: - yield key - super().__iter__() - - -class TypeMapping(AttributeMapping): - """ - Mapping that only sees ``NamedType`` instances (i.e., defined types). - """ - - def __init__(self, attributes): - super().__init__(attributes, NamedType) - - -class CodeMapping(AttributeMapping): - """ - Mapping that only sees ``Source`` instances (i.e., generated source code blocks). - """ - - def __init__(self, attributes): - super().__init__(attributes, Source) - - -# endregion diff --git a/hls4ml/hls4ml/model/flow/__init__.py b/hls4ml/hls4ml/model/flow/__init__.py deleted file mode 100644 index 0e2a180..0000000 --- a/hls4ml/hls4ml/model/flow/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from hls4ml.model.flow.flow import ( # noqa: F401 - Flow, - get_available_flows, - get_backend_flows, - get_flow, - register_flow, - update_flow, -) diff --git a/hls4ml/hls4ml/model/flow/flow.py b/hls4ml/hls4ml/model/flow/flow.py deleted file mode 100644 index 43415f5..0000000 --- a/hls4ml/hls4ml/model/flow/flow.py +++ /dev/null @@ -1,142 +0,0 @@ -class Flow: - """This class represents a collection of optimizers. The flow can optionally depend on other flows.""" - - def __init__(self, name, optimizers, requires=None): - """Creates a new flow. - - Args: - name (str): Unique name of the flow. - optimizers (list, optional): List of optimizers. - requires (list, optional): List (str) of flows which have to be applied before this flow. Defaults to None. - """ - self.name = name - if optimizers is None: - self._optimizers = [] - else: - self._optimizers = optimizers - if requires is None: - self.requires = [] - else: - self.requires = requires - - @property - def optimizers(self): - return self._optimizers - - def _add_optimizer(self, opt_name): - self._optimizers.append(opt_name) - - def _remove_optimizer(self, opt_name): - self._optimizers.remove(opt_name) - - -class DynamicFlow(Flow): - """A dynamically updated flow. - - This flow will get the list of optimizers by calling optimizer_func. Useful to represent a view of all available - optimizers of a certain type. - """ - - def __init__(self, name, optimizer_func, requires=None): - """Creates a new dynamic flow. - - Args: - name (str): Unique name of the flow. - optimizer_func (callable): Function to call (without arguments) to get the list of optimizers. - requires (_type_, optional): List (str) of flows which have to be applied before this flow. Defaults to None. - """ - self.name = name - self._optimizer_func = optimizer_func - self._added_optimizers = set() - self._removed_optimizers = set() - if requires is None: - self.requires = [] - else: - self.requires = requires - - @property - def optimizers(self): - optimizers = self._optimizer_func() - optimizers.extend(self._added_optimizers) - optimizers = [o for o in optimizers if o not in self._removed_optimizers] - return optimizers - - def _add_optimizer(self, opt_name): - self._added_optimizers.put(opt_name) - - def _remove_optimizer(self, opt_name): - self._removed_optimizers.put(opt_name) - - -flow_map = {} - - -def _get_backend_name_prefix(name, backend): - if backend is not None and not name.startswith(backend.lower()): - name = backend.lower() + ':' + name - - return name - - -def register_flow(name, optimizers, requires=None, backend=None): - """Create a flow and add it to the registry. - - Args: - name (str): _description_ - optimizers (list): List of optimizers. - requires (list, optional): List (str) of flows which have to be applied before this flow. Defaults to None. - backend (str, optional): Backend to which the flow will belong. If not None, the name of the backend will be - appended to the name of the registered flow. Defaults to None. - - Raises: - Exception: If the flow has already been registered. - - Returns: - str: The name of the registered flow. - """ - name = _get_backend_name_prefix(name, backend) - - if name in flow_map: - raise Exception(f'Flow {name} already registered') - - if callable(optimizers): - flow = DynamicFlow(name, optimizer_func=optimizers, requires=requires) - else: - flow = Flow(name, optimizers=optimizers, requires=requires) - - flow_map[name] = flow - - return name - - -def update_flow(flow_name, add_optimizers=None, remove_optimizers=None): - """Add or remove optimizers to/from an existing flow. - - Args: - flow_name (str): The name of the flow to update. - add_optimizers (list, optional): List (str) of optimizers to add. Defaults to None. - remove_optimizers (list, optional): List (str) of optimizers to remove. Defaults to None. - """ - flow = get_flow(flow_name) - if add_optimizers is not None: - for opt in add_optimizers: - flow._add_optimizer(opt) - - if remove_optimizers is not None: - for opt in remove_optimizers: - flow._remove_optimizer(opt) - - -def get_flow(name): - if name in flow_map: - return flow_map[name] - else: - raise Exception(f'Unknown flow: {name}') - - -def get_backend_flows(backend): - return [flow for flow in flow_map.keys() if flow.startswith(backend.lower() + ':')] - - -def get_available_flows(): - return list(flow_map.keys()) diff --git a/hls4ml/hls4ml/model/graph.py b/hls4ml/hls4ml/model/graph.py deleted file mode 100644 index c44fd8f..0000000 --- a/hls4ml/hls4ml/model/graph.py +++ /dev/null @@ -1,854 +0,0 @@ -import ctypes -import os -import platform -from collections import OrderedDict - -import numpy as np -import numpy.ctypeslib as npc - -from hls4ml.backends import get_backend -from hls4ml.model.flow import get_flow -from hls4ml.model.layers import layer_map -from hls4ml.model.optimizer import get_available_passes, optimize_model - - -class HLSConfig: - """The configuration class as stored in the ModelGraph. - - Args: - config (dict): The configuration dictionary - """ - - def __init__(self, config): - self.config = config - self.backend = get_backend(self.config.get('Backend', 'Vivado')) - - self.model_precision = {} - self.layer_type_precision = {} - self.layer_name_precision = {} - - self.model_rf = None - self.layer_type_rf = {} - self.layer_name_rf = {} - - self.model_targ_cycles = None - self.layer_type_targ_cycles = {} - self.layer_name_targ_cycles = {} - - self.model_strategy = 'Latency' - self.layer_type_strategy = {} - self.layer_name_strategy = {} - - self.model_conv_implementation = 'LineBuffer' - self.layer_type_conv_implementation = {} - self.layer_name_conv_implementation = {} - - self.model_compression = False - self.layer_type_compression = {} - self.layer_name_compression = {} - - self.trace_output = self.get_config_value('TraceOutput', False) - - self.pipeline_style = 'pipeline' - - self._parse_hls_config() - self._validate_hls_config() - - def get_config_value(self, key, default=None): - return self.config.get(key, default) - - def get_project_name(self): - return self.get_config_value('ProjectName') - - def get_output_dir(self): - return self.get_config_value('OutputDir') - - def get_layer_config_value(self, layer, key, default=None): - hls_config = self.config['HLSConfig'] - - name_config = hls_config.get('LayerName', {}).get(layer.name, None) - if name_config is not None: - return name_config.get(key, default) - - type_config = hls_config.get('LayerType', {}).get(layer.class_name, None) - if type_config is not None: - return type_config.get(key, default) - - model_config = hls_config.get('Model', None) - if model_config is not None: - return model_config.get(key, default) - - return default - - def get_layer_config(self, layer): - hls_config = self.config['HLSConfig'] - layer_config = {} - - type_config = hls_config.get('LayerType', {}).get(layer.class_name, None) - if type_config is not None: - layer_config.update(type_config) - - name_config = hls_config.get('LayerName', {}).get(layer.name, None) - if name_config is not None: - layer_config.update(name_config) - - return layer_config - - def get_precision(self, layer, var='default'): - precision = self.layer_name_precision.get(layer.name.lower() + '_' + var) - type_name = layer.name.lower() + '_' + var + '_t' - if precision is None: - precision = self.layer_name_precision.get(layer.name.lower() + '_default') - type_name = layer.name.lower() + '_default_t' - - if precision is None: - precision = self.layer_type_precision.get(layer.class_name.lower() + '_' + var) - type_name = layer.class_name + '_' + var + '_t' - if precision is None: - precision = self.layer_type_precision.get(layer.class_name.lower() + '_default') - type_name = layer.class_name + '_default_t' - - if precision is None: - precision = self.model_precision.get(var) - type_name = var + '_default_t' - if precision is None: - precision = self.model_precision.get('default') - type_name = 'model_default_t' - - if precision is None: - raise Exception(f'No precision for {layer.name}->{var} found and no default specified.') - - precision = self.backend.convert_precision_string(precision) - - return (precision, type_name) - - def get_bram_size(self, layer): - bf = self.model_bf - return bf - - def get_reuse_factor(self, layer): - rf = self.layer_name_rf.get(layer.name.lower()) - if rf is None: - rf = self.layer_type_rf.get(layer.class_name.lower()) - if rf is None: - rf = self.model_rf - - if rf is None: - raise Exception(f'No reuse factor for {layer.name} found and no default specified.') - - return rf - - def get_target_cycles(self, layer): - targ_cycles = self.layer_name_targ_cycles.get(layer.name.lower()) - if targ_cycles is None: - targ_cycles = self.layer_name_targ_cycles.get(layer.__class__.__name__.lower()) - if targ_cycles is None: - targ_cycles = self.model_targ_cycles - - return targ_cycles - - def get_strategy(self, layer): - strategy = self.layer_name_strategy.get(layer.name.lower()) - if strategy is None: - strategy = self.layer_type_strategy.get(layer.class_name.lower()) - if strategy is None: - strategy = self.model_strategy - - return strategy - - def get_conv_implementation(self, layer): - conv_implementation = self.layer_name_conv_implementation.get(layer.name.lower()) - if conv_implementation is None: - conv_implementation = self.layer_type_conv_implementation.get(layer.__class__.__name__.lower()) - if conv_implementation is None: - conv_implementation = self.model_conv_implementation - - return conv_implementation - - def is_resource_strategy(self, layer): - return self.get_strategy(layer).lower() == 'resource' - - def get_compression(self, layer): - compression = self.layer_name_compression.get(layer.name.lower()) - if compression is None: - compression = self.layer_type_compression.get(layer.class_name.lower()) - if compression is None: - compression = self.model_compression - - return compression - - def _parse_hls_config(self): - hls_config = self.config['HLSConfig'] - - self.flows = hls_config.get('Flows') - if self.flows is None: - self.flows = [self.backend.get_default_flow()] - - # TODO this is now effectively broken - self.optimizers = hls_config.get('Optimizers') - if 'SkipOptimizers' in hls_config: - if self.optimizers is not None: - raise Exception('Invalid optimizer configuration, please use either "Optimizers" or "SkipOptimizers".') - skip_optimizers = hls_config.get('SkipOptimizers') - selected_optimizers = get_available_passes() - for opt in skip_optimizers: - try: - selected_optimizers.remove(opt) - except ValueError: - pass - self.optimizers = selected_optimizers - - model_cfg = hls_config.get('Model') - if model_cfg is not None: - precision_cfg = model_cfg.get('Precision') - if precision_cfg is not None: - if isinstance(precision_cfg, dict): - for var, precision in precision_cfg.items(): - self.model_precision[var] = precision - else: - self.model_precision['default'] = precision_cfg # Default precision for everything - - self.model_bf = model_cfg.get('BramFactor', np.inf) # Weight threshold to be external BRAM - self.model_rf = model_cfg.get('ReuseFactor') - self.model_targ_cycles = model_cfg.get('TargetCycles') - self.model_conv_implementation = model_cfg.get('ConvImplementation', 'LineBuffer') - self.model_strategy = model_cfg.get('Strategy', 'Latency') - self.model_compression = bool(model_cfg.get('Compression', 0)) - self.pipeline_style = model_cfg.get('PipelineStyle', 'pipeline') - - layer_type_cfg = hls_config.get('LayerType') - if layer_type_cfg is not None: - for layer_type, layer_cfg in layer_type_cfg.items(): - precision_cfg = layer_cfg.get('Precision') - if isinstance(precision_cfg, dict): - for var, precision in precision_cfg.items(): - self.layer_type_precision[layer_type.lower() + '_' + var] = precision - else: - self.layer_type_precision[layer_type.lower() + '_default'] = precision_cfg - - rf = layer_cfg.get('ReuseFactor') - if rf is not None: - self.layer_type_rf[layer_type.lower()] = rf - - targ_cycles = layer_cfg.get('TargetCycles') - if targ_cycles is not None: - self.layer_type_targ_cycles[layer_type.lower()] = targ_cycles - - strategy = layer_cfg.get('Strategy') - if strategy is not None: - self.layer_type_strategy[layer_type.lower()] = strategy - - conv_implementation = layer_cfg.get('ConvImplementation') - if conv_implementation is not None: - self.layer_type_conv_implementation[layer_type.lower()] = conv_implementation - - compression = layer_cfg.get('Compression') - if compression is not None: - self.layer_type_compression[layer_type.lower()] = bool(compression) - - layer_name_cfg = hls_config.get('LayerName') - if layer_name_cfg is not None: - for layer_name, layer_cfg in layer_name_cfg.items(): - precision_cfg = layer_cfg.get('Precision') - if isinstance(precision_cfg, dict): - for var, precision in precision_cfg.items(): - self.layer_name_precision[layer_name.lower() + '_' + var] = precision - else: - self.layer_name_precision[layer_name.lower() + '_default'] = precision_cfg - - rf = layer_cfg.get('ReuseFactor') - if rf is not None: - self.layer_name_rf[layer_name.lower()] = rf - - targ_cycles = layer_cfg.get('TargetCycles') - if targ_cycles is not None: - self.layer_name_targ_cycles[layer_name.lower()] = targ_cycles - - strategy = layer_cfg.get('Strategy') - if strategy is not None: - self.layer_name_strategy[layer_name.lower()] = strategy - - conv_implementation = layer_cfg.get('ConvImplementation') - if conv_implementation is not None: - self.layer_name_conv_implementation[layer_name.lower()] = conv_implementation - - compression = layer_cfg.get('Compression') - if compression is not None: - self.layer_name_compression[layer_name.lower()] = bool(compression) - - def _validate_hls_config(self): - use_dataflow = False - if self.pipeline_style.lower() == 'pipeline' and self.model_compression: - print('WARNING: Compression enabled while pipeline style set to "pipeline".') - use_dataflow = True - for layer_type, strategy in self.layer_type_strategy.items(): - if strategy.lower() == 'resource' and self.pipeline_style.lower() == 'pipeline': - print( - 'WARNING: Strategy for layer type {} set to "Resource", while pipeline style set to "pipeline".'.format( - layer_type - ) - ) - use_dataflow = True - - for layer_name, strategy in self.layer_name_strategy.items(): - if strategy.lower() == 'resource' and self.pipeline_style.lower() == 'pipeline': - print( - 'WARNING: Strategy for layer {} set to "Resource", while pipeline style set to "pipeline".'.format( - layer_name - ) - ) - use_dataflow = True - - for layer_type, compression in self.layer_type_compression.items(): - if compression and self.pipeline_style.lower() == 'pipeline': - print( - 'WARNING: Compression enabled for layer type {}, while pipeline style set to "pipeline".'.format( - layer_type - ) - ) - use_dataflow = True - - for layer_name, compression in self.layer_name_compression.items(): - if compression and self.pipeline_style.lower() == 'pipeline': - print(f'WARNING: Compression enabled for layer {layer_name}, while pipeline style set to "pipeline".') - use_dataflow = True - - if self.model_strategy.lower() == 'resource': - use_dataflow = True - - if use_dataflow: - print('WARNING: Changing pipeline style to "dataflow".') - self.pipeline_style = 'dataflow' - - -class ModelGraph: - """The ModelGraph represents the network that is being processed by hls4ml. - - Args: - config (dict): The configuration dictionary - layer_list (list(dict)): The list contains a dictionary for each input layer - inputs (list, optional): The inputs to the model. If None, determined from layer_list - outputs (list, optional): The outputs to the model. If None, determined from layer_list - """ - - def __init__(self, config, layer_list, inputs=None, outputs=None): - self.config = HLSConfig(config) - - # keep track of the applied flows - self._applied_flows = [] - - # If not provided, assumes layer_list[0] is the input layer, and layer_list[-1] is output layer - - # Note, these are actually the variable names, which may differ from the layer name - input_layers = inputs if inputs is not None else [layer_list[0]['name']] - output_layers = outputs if outputs is not None else [layer_list[-1]['name']] - self.inputs = self._find_output_variable_names(layer_list, input_layers) - if self.inputs != input_layers: - raise RuntimeError( - "Currently only support the case when input variables and input layer names match\n" - + f"Input layers = {input_layers}, input_vars = {self.inputs}" - ) - self.outputs = self._find_output_variable_names(layer_list, output_layers) - - self.index = 0 - self.graph = OrderedDict() # where the nodes are stored - self.output_vars = {} - - self._top_function_lib = None - - self._make_graph(layer_list) - - for flow in self.config.flows: - self.apply_flow(flow) - - def _find_output_variable_names(self, layer_list, layer_names): - """Given a list of all layers, and a list input/output names, find the names of the their outputs that will be used - as the name of the output variables.""" - inout_nodes = [node for node in layer_list if node['name'] in layer_names] - all_node_output_names = [node['outputs'] if 'outputs' in node else [node['name']] for node in inout_nodes] - return [output for node_output_names in all_node_output_names for output in node_output_names] # to flatten - - def _make_graph(self, layer_list): - for layer in layer_list: - kind = layer['class_name'] - name = layer['name'] - inputs = layer.get('inputs', []) - outputs = layer.get('outputs', []) - if kind in ['InputLayer', 'Input']: - inputs = ['input'] - elif len(inputs) == 0: - inputs = [next(reversed(self.graph), 'input')] - if len(outputs) == 0: - outputs = [name] - - self.graph[name] = self.make_node(kind, name, layer, inputs, outputs) - - def apply_flow(self, flow, reapply='single'): - """Applies a flow (a collection of optimizers). - - Args: - flow (str): The name of the flow to apply - reapply (str, optional): Determines the action to take if the flow and its requirements have already been - applied. Possible values are: - - 'all': Apply the flow and all its requirements. - - 'single': Apply only the given flow, but skip the already applied requirements. - - 'none': Skip applying the flow. - Defaults to 'single'. - """ - - def all_applied_flows(): - applied_flows = {} - - for flow_group in self._applied_flows: - applied_flows.update({flow: set() for flow in flow_group.keys()}) - - return applied_flows - - assert reapply in ['all', 'single', 'none'] - - if reapply == 'all': - applied_flows = {} - elif reapply == 'single': - applied_flows = all_applied_flows() - applied_flows.pop(flow, None) - else: # reapply == 'none' - applied_flows = all_applied_flows() - if flow in applied_flows: - return - - self._applied_flows.append(applied_flows) - self._apply_sub_flow(flow, applied_flows) - - def _apply_sub_flow(self, flow_name, applied_flows): - if flow_name in applied_flows: - return - flow = get_flow(flow_name) - - for sub_flow in flow.requires: - if sub_flow not in applied_flows.keys(): - self._apply_sub_flow(sub_flow, applied_flows) - - if len(flow.optimizers) > 0: - applied_passes = optimize_model(self, flow.optimizers) - else: - applied_passes = set() - applied_flows[flow.name] = applied_passes - - def make_node(self, kind, name, attributes, inputs, outputs=None): - """Make a new node not connected to the model graph. - - The 'kind' should be a valid layer registered with `register_layer`. If no outputs - are specified, a default output named the same as the node will be created. The - returned node should be added to the graph with `insert_node` or `replace_node` - functions. - - Args: - kind (type or str): Type of node to add - name (str): Name of the node - attributes (dict): Initial set of attributes required to construct the node (Layer) - inputs (list): List of inputs to the layer - outputs (list, optional): The optional list of named outputs of the node - - Raises: - Exception: If an attempt to insert a node with multiple inputs is made or if - `before` does not specify a correct node in sequence. - - Returns: - Layer: The node created. - """ - - if isinstance(kind, str): - if kind not in layer_map: - raise Exception(f'Layer {kind} not found in registry.') - layer_cls = layer_map[kind] - else: - if kind not in layer_map.values(): - raise Exception(f'Layer {kind} not found in registry.') - layer_cls = kind - - if self.config.backend is not None: - layer_cls = self.config.backend.create_layer_class(layer_cls) - node = layer_cls(self, name, attributes, inputs, outputs) - for o in node.outputs: - out_var = node.get_output_variable(output_name=o) - if o in self.outputs: - out_var.type.name = 'result_t' - self.output_vars[o] = out_var - return node - - def insert_node(self, node, before=None, input_idx=0): - """Insert a new node into the model graph. - - The node to be inserted should be created with `make_node()` function. The optional - parameter `before` can be used to specify the node that follows in case of ambiguities. - - Args: - node (Layer): Node to insert - before (Layer, optional): The next node in sequence before which a - new node should be inserted. - input_idx (int, optional): If the next node takes multiple inputs, the input index - Raises: - Exception: If an attempt to insert a node with multiple inputs is made or if - `before` does not specify a correct node in sequence. - - """ - if len(node.inputs) > 1: - raise Exception('Cannot insert a node with more than one input (for now).') - - prev_node = node.get_input_node(node.inputs[0]) - next_nodes = [] - for x in self.graph.values(): - overlap = [value for value in x.inputs if value in prev_node.outputs] - if overlap: - next_nodes.append(x) - - if before is None: - next_node = next((x for x in self.graph.values() if x.inputs[0] in prev_node.outputs), None) - else: - if before not in next_nodes: - raise Exception( - 'Cannot insert a node {} before {} (candidates: {}).'.format( - node.name, before.name, ','.join([n.name for n in next_nodes]) - ) - ) - next_node = before - - if next_node is not None: - next_node.inputs[input_idx] = node.outputs[0] - - new_graph = OrderedDict() - for k, v in self.graph.items(): - new_graph[k] = v - if k == prev_node.name: - new_graph[node.name] = node - - self.graph = new_graph - self._update_model_outputs() - - def remove_node(self, node, rewire=True): - """Remove a node from a graph. - - By default, this function can connect the outputs of previous node to the input of next one. - Note that when removing a leaf node `rewire` should be set to `False`. - - Args: - node (Layer): The node to remove - rewire (bool, optional): If `True`, connects the outputs of the previous node - to the inputs of the next node - - Raises: - Exception: If an attempt is made to rewire a leaf node or a node with multiple - inputs/outputs. - - """ - if rewire: - inputs = [inp for inp in node.inputs if inp] - outputs = [outp for outp in node.outputs if outp] - if len(inputs) > 1 or len(outputs) > 1: - raise Exception('Cannot rewire a node with multiple inputs/outputs') - prev_node = node.get_input_node(node.inputs[0]) - next_nodes = [x for x in self.graph.values() if node.outputs[0] in x.inputs] - if prev_node is not None: - if len(next_nodes) > 0: - for next_node in next_nodes: - for i, _ in enumerate(next_node.inputs): - if node.outputs[0] == next_node.inputs[i]: - next_node.inputs[i] = prev_node.outputs[0] - break - else: - if not node.outputs[0] in self.outputs: - raise Exception('Cannot rewire a node without child') - else: - raise Exception('Cannot rewire a node without a parent') - - del self.output_vars[node.outputs[0]] - del self.graph[node.name] - self._update_model_outputs() - - def replace_node(self, old_node, new_node): - """Replace an existing node in the graph with a new one. - - Args: - old_node (Layer): The node to replace - new_node (Layer): The new node - - """ - prev_node = self.graph.get(old_node.inputs[0]) - next_node = next((x for x in self.graph.values() if x.inputs[0] == old_node.outputs[0]), None) - if next_node is not None: - next_node.inputs[0] = new_node.outputs[0] - if prev_node is not None: - if new_node.inputs is None or len(new_node.inputs) == 0: # Check if already rewired - new_node.inputs = [prev_node.outputs[0]] - - self.graph = OrderedDict((new_node.name, new_node) if k == old_node.name else (k, v) for k, v in self.graph.items()) - self._update_model_outputs() - - def _update_model_outputs(self): - '''Update the model outputs - - All node outputs and inputs are found. The model outputs are set to all node outputs - that are not also node inputs. - ''' - node_outputs = [out for node in self.graph.values() for out in node.outputs] - node_inputs = [inp for node in self.graph.values() for inp in node.inputs] - self.outputs = [out for out in node_outputs if out not in node_inputs] - - def next_layer(self): - self.index += 1 - return self.index - - def get_layers(self): - return self.graph.values() - - def get_input_variables(self): - variables = [] - for inp in self.inputs: - variables.append(self.graph[inp].get_output_variable()) - return variables - - def register_output_variable(self, out_name, variable): - if out_name in self.outputs: - variable.type.name = 'result_t' - self.output_vars[out_name] = variable - - def get_output_variables(self): - variables = [] - for out in self.outputs: - variables.append(self.output_vars[out]) - return variables - - def get_layer_output_variable(self, output_name): - return self.output_vars.get(output_name, None) - - def get_weight_variables(self): - variables = [] - for layer in self.get_layers(): - weights = layer.get_weights() - variables.extend(weights) - - return variables - - def write(self): - """Write the generated project to disk. - - This function converts the model to C++ and writes the generated files in the output - directory specified in the `config`. - """ - - self.config.backend.write(self) - - def compile(self): - """Compile the generated project and link the library into current environment. - - Users should call this function if they want to use `predict` functionality for simulation. - """ - self.write() - - lib_name = self.config.backend.compile(self) - if self._top_function_lib is not None: - if platform.system() == "Linux": - libdl_libs = ['libdl.so', 'libdl.so.2'] - for libdl in libdl_libs: - try: - dlclose_func = ctypes.CDLL(libdl).dlclose - break - except Exception: - continue - elif platform.system() == "Darwin": - dlclose_func = ctypes.CDLL('libc.dylib').dlclose - - dlclose_func.argtypes = [ctypes.c_void_p] - dlclose_func.restype = ctypes.c_int - dlclose_func(self._top_function_lib._handle) - self._top_function_lib = ctypes.cdll.LoadLibrary(lib_name) - - def _get_top_function(self, x): - if self._top_function_lib is None: - raise Exception('Model not compiled') - if len(self.get_input_variables()) == 1: - xlist = [x] - else: - xlist = x - n_outputs = len(self.get_output_variables()) - - for xi in xlist: - if not isinstance(xi, np.ndarray): - raise Exception(f'Expected numpy.ndarray, but got {type(x)}') - if not xi.flags['C_CONTIGUOUS']: - raise Exception('Array must be c_contiguous, try using numpy.ascontiguousarray(x)') - - x0 = xlist[0] - if x0.dtype in [np.single, np.float32]: - top_function = getattr(self._top_function_lib, self.config.get_project_name() + '_float') - ctype = ctypes.c_float - elif x0.dtype in [np.double, np.float64, np.float_]: - top_function = getattr(self._top_function_lib, self.config.get_project_name() + '_double') - ctype = ctypes.c_double - else: - raise Exception( - 'Invalid type ({}) of numpy array. Supported types are: single, float32, double, float64, float_.'.format( - x0.dtype - ) - ) - - top_function.restype = None - top_function.argtypes = [npc.ndpointer(ctype, flags="C_CONTIGUOUS") for i in range(len(xlist) + n_outputs)] - - return top_function, ctype - - def _compute_n_samples(self, x): - if len(self.get_input_variables()) == 1: - xlist = [x] - else: - xlist = x - n_samples = [] - for i, xi in enumerate(xlist): - expected_size = self.get_input_variables()[i].size() - x_size = np.prod(xi.shape) - n_sample, rem = divmod(x_size, expected_size) - if rem != 0: - raise Exception(f'Input size mismatch, got {x_size.shape}, expected {self.get_input_variables()[i].shape}') - n_samples.append(n_sample) - - if not all([n_samples[i] == n_samples[i + 1] for i in range(len(xlist) - 1)]): - raise Exception('Input size mismatch, not all inputs match') - - return int(n_sample) - - def predict(self, x): - top_function, ctype = self._get_top_function(x) - n_samples = self._compute_n_samples(x) - n_inputs = len(self.get_input_variables()) - n_outputs = len(self.get_output_variables()) - - curr_dir = os.getcwd() - os.chdir(self.config.get_output_dir() + '/firmware') - - output = [] - if n_samples == 1 and n_inputs == 1: - x = [x] - - try: - for i in range(n_samples): - predictions = [np.zeros(yj.size(), dtype=ctype) for yj in self.get_output_variables()] - if n_inputs == 1: - inp = [np.asarray(x[i])] - else: - inp = [np.asarray(xj[i]) for xj in x] - argtuple = inp - argtuple += predictions - argtuple = tuple(argtuple) - top_function(*argtuple) - output.append(predictions) - - # Convert to list of numpy arrays (one for each output) - output = [ - np.asarray([output[i_sample][i_output] for i_sample in range(n_samples)]) for i_output in range(n_outputs) - ] - finally: - os.chdir(curr_dir) - - if n_samples == 1 and n_outputs == 1: - return output[0][0] - elif n_outputs == 1: - return output[0] - elif n_samples == 1: - return [output_i[0] for output_i in output] - else: - return output - - def trace(self, x): - print(f'Recompiling {self.config.get_project_name()} with tracing') - self.config.trace_output = True - self.compile() - - top_function, ctype = self._get_top_function(x) - n_samples = self._compute_n_samples(x) - n_inputs = len(self.get_input_variables()) - n_outputs = len(self.get_output_variables()) - - class TraceData(ctypes.Structure): - _fields_ = [('name', ctypes.c_char_p), ('data', ctypes.c_void_p)] - - trace_output = {} - layer_sizes = {} - n_traced = 0 - for layer in self.get_layers(): - if layer.get_attr('function_cpp', None) and layer.get_attr('trace', False): - n_traced += len(layer.get_variables()) - trace_output[layer.name] = [] - layer_sizes[layer.name] = layer.get_output_variable().shape - - collect_func = self._top_function_lib.collect_trace_output - collect_func.argtypes = [ctypes.POINTER(TraceData)] - collect_func.restype = None - trace_data = (TraceData * n_traced)() - - alloc_func = self._top_function_lib.allocate_trace_storage - alloc_func.argtypes = [ctypes.c_size_t] - alloc_func.restype = None - - free_func = self._top_function_lib.free_trace_storage - free_func.argtypes = None - free_func.restype = None - - curr_dir = os.getcwd() - os.chdir(self.config.get_output_dir() + '/firmware') - - output = [] - if n_samples == 1 and n_inputs == 1: - x = [x] - - try: - alloc_func(ctypes.sizeof(ctype)) - - for i in range(n_samples): - predictions = [np.zeros(yj.size(), dtype=ctype) for yj in self.get_output_variables()] - if n_inputs == 1: - inp = [np.asarray(x[i])] - else: - inp = [np.asarray(xj[i]) for xj in x] - argtuple = inp - argtuple += predictions - argtuple = tuple(argtuple) - top_function(*argtuple) - output.append(predictions) - collect_func(trace_data) - for trace in trace_data: - layer_name = str(trace.name, 'utf-8') - layer_data = ctypes.cast(trace.data, ctypes.POINTER(ctype)) - np_array = np.ctypeslib.as_array(layer_data, shape=layer_sizes[layer_name]) - trace_output[layer_name].append(np.copy(np_array)) - - for key in trace_output.keys(): - trace_output[key] = np.asarray(trace_output[key]) - - # Convert to list of numpy arrays (one for each output) - output = [ - np.asarray([output[i_sample][i_output] for i_sample in range(n_samples)]) for i_output in range(n_outputs) - ] - - free_func() - finally: - os.chdir(curr_dir) - - if n_samples == 1 and n_outputs == 1: - return output[0][0], trace_output - elif n_outputs == 1: - return output[0], trace_output - elif n_samples == 1: - return [output_i[0] for output_i in output], trace_output - else: - return output, trace_output - - def build(self, **kwargs): - """Builds the generated project using HLS compiler. - - Please see the `build()` function of backends for a list of possible arguments. - """ - if not os.path.exists(self.config.get_output_dir()): - # Assume the project wasn't written before - self.write() - - return self.config.backend.build(self, **kwargs) diff --git a/hls4ml/hls4ml/model/layers.py b/hls4ml/hls4ml/model/layers.py deleted file mode 100644 index 4c7c210..0000000 --- a/hls4ml/hls4ml/model/layers.py +++ /dev/null @@ -1,1385 +0,0 @@ -import typing - -import numpy as np - -from hls4ml.model.attributes import ( - Attribute, - AttributeDict, - ChoiceAttribute, - CodeMapping, - ConfigurableAttribute, - TypeAttribute, - TypeMapping, - VariableMapping, - WeightAttribute, - WeightMapping, -) -from hls4ml.model.types import ( - CompressedWeightVariable, - ExponentPrecisionType, - ExponentWeightVariable, - FixedPrecisionType, - IntegerPrecisionType, - NamedType, - TensorVariable, - WeightVariable, - find_minimum_width, -) -from hls4ml.utils.string_utils import convert_to_snake_case - - -# TODO move this to some utility module -class classproperty: - def __init__(self, func): - self.func = func - - def __get__(self, obj, owner): - return self.func(owner) - - -class Layer: - """The base class for all layers, which are the nodes in the model graph. - Note: they don't necessarily correspond 1:1 with the network layers. - - The expected attributes are `index`, `trace` (configurable), and `result` (type) - - Args: - model (ModelGraph): The ModelGraph that this Layer is part of - name (str): The node name - attributes (dict): Initial set of attributes required to construct the node (Layer) - inputs (list): List of inputs to the layer - outputs (list, optional): The optional list of named outputs of the node - """ - - _expected_attributes = [ - Attribute('index'), - ConfigurableAttribute('trace', default=False), - TypeAttribute('result'), - ] - - @classproperty - def expected_attributes(cls): - """Returns the expected attributes of a class.""" - all_attributes = [] - for base_cls in reversed(cls.mro()): # Iterate over all base classes in the hierarchy - if cls == base_cls: # Skip adding attributes from self - continue - if hasattr(base_cls, '_expected_attributes'): # Only consider classes with '_expected_attributes' defined - all_attributes.extend(base_cls._expected_attributes) - if '_expected_attributes' in cls.__dict__: - # Add new attributes defined in the class - # TODO Support overriding attributes from parent class - all_attributes.extend(cls._expected_attributes) - return all_attributes - - def __init__(self, model, name, attributes, inputs, outputs=None): - if name == 'input': - raise RuntimeError( - "No model layer should be named 'input' because that is a reserved;" - + "layer name in ModelGraph; Please rename the layer in your model" - ) - self.model = model - self.name = name - self.index = model.next_layer() - self.inputs = inputs - self.outputs = outputs - if self.outputs is None: - self.outputs = [self.name] - - self.attributes = AttributeDict(self) - self.attributes.update(attributes) - - self.set_attr('index', self.index) - - self.weights = WeightMapping(self.attributes) - self.variables = VariableMapping(self.attributes) - self.types = TypeMapping(self.attributes) - self.code = CodeMapping(self.attributes) - - self._set_accum_t() - - layer_config = self.model.config.get_layer_config(self) - for config_key, config_value in layer_config.items(): - config_key = convert_to_snake_case(config_key) - if config_key in self.attributes: - print( - 'WARNING: Config parameter "{}" overwrites an existing attribute in layer "{}" ({})'.format( - config_key, self.name, self.class_name - ) - ) - if config_key.endswith('_t') and isinstance( - config_value, str - ): # TODO maybe move this to __setitem__ of AttributeDict? - precision = self.model.config.backend.convert_precision_string(config_value) - config_value = NamedType(self.name + '_' + config_key, precision) - self.attributes[config_key] = config_value - - self.initialize() - self._validate_attributes() - - @property - def class_name(self, include_wrapped=False): - if include_wrapped: - return self.__class__.__name__ - else: - if hasattr(self, '_wrapped'): - return self.__class__.__bases__[0].__name__ - else: - return self.__class__.__name__ - - def initialize(self): - raise NotImplementedError - - def set_attr(self, key, value): - self.attributes[key] = value - - def get_attr(self, key, default=None): - return self.attributes.get(key, default) - - def _validate_attributes(self): - all_attributes = {} - for attr in self.expected_attributes: - all_attributes[attr.name] = attr - - # Validate existing attributes - for attr_name, attr_value in self.attributes.items(): - exp_attr = all_attributes.pop(attr_name, None) - if exp_attr is not None: - if not exp_attr.validate_value(attr_value): - raise Exception( - 'Unexpected value of attribute "{}" of layer "{}" ({}). Expected {}, got {} ({})'.format( - attr_name, self.name, self.class_name, exp_attr.value_type, type(attr_value), attr_value - ) - ) - else: - pass # TODO layer contains attribute that is not expected. we can log this for debugging - - # If any expected attributes remain, try adding their default values - for attr_name, attr in all_attributes.items(): - if attr.default is not None: - if isinstance(attr, TypeAttribute): - self.set_attr(attr_name, self._wrap_precision_to_type(self.name + '_' + attr_name, attr.default)) - else: - self.set_attr(attr_name, attr.default) - else: - raise Exception( - 'Attribute "{}" of layer {} ({}) not set and no default value is specified.'.format( - attr_name, self.name, self.class_name - ) - ) - - def _wrap_precision_to_type(self, name, precision): - if isinstance(precision, str): - precision = self.convert_precision_string(precision) - return NamedType(name=name, precision=precision) - - def _set_accum_t(self): - has_accum_t = any(a for a in self.expected_attributes if a.name == 'accum_t' and isinstance(a, TypeAttribute)) - if has_accum_t: - accum_t = NamedType(*reversed(self.model.config.get_precision(self, 'accum'))) - self.set_attr('accum_t', accum_t) - - def get_input_node(self, input_name=None): - if input_name is None: - if len(self.inputs) > 0: - input_name = self.inputs[0] - else: - return None - nodes = [node for node in self.model.graph.values() if input_name in node.outputs] - if len(nodes) == 0: - return None - else: - return nodes[0] - - def get_input_variable(self, input_name=None): - if input_name is not None: - return self.model.get_layer_output_variable(input_name) - else: - return self.model.get_layer_output_variable(self.inputs[0]) - - def get_output_use_map(self): - output_map = {} - for output in self.outputs: - output_map[output] = [] - for layer in self.model.get_layers(): - for inp in layer.inputs: - if output == inp: - output_map[output].append(layer) - return output_map - - def get_output_nodes(self, output_name=None): - output_nodes = [] - if output_name is not None: - outputs = [output_name] - else: - outputs = self.outputs - for output in outputs: - for layer in self.model.get_layers(): - for inp in layer.inputs: - if output == inp: - output_nodes.append(layer) - return output_nodes - - def get_output_variable(self, output_name=None): - if output_name is not None: - return self.variables[output_name] - else: - return next(iter(self.variables.values())) - - def get_weights(self, var_name=None): - if var_name: - return self.weights[var_name] - - return self.weights.values() - - def get_variables(self): - return self.variables.values() - - def add_output_variable( - self, shape, dim_names, out_name=None, var_name='layer{index}_out', type_name='layer{index}_t', precision=None - ): - if out_name is None: - out_name = self.outputs[0] - - if precision is None: - precision, _ = self.model.config.get_precision(self, var='result') - - out = TensorVariable(shape, dim_names, var_name=var_name, type_name=type_name, precision=precision, index=self.index) - - self.set_attr(out_name, out) - - def add_weights(self, quantizer=None, compression=False): - self.add_weights_variable( - name='weight', var_name='w{index}', data='weight', quantizer=quantizer, compression=compression - ) - - def add_bias(self, quantizer=None): - data = self.get_attr('bias_data', None) - precision = None - type_name = None - if data is None: - data = np.zeros(self.get_output_variable().shape[-1]) - precision = IntegerPrecisionType(width=1, signed=False) - type_name = 'bias{index}_t' - quantizer = None # Don't quantize non-existant bias - - self.add_weights_variable( - name='bias', var_name='b{index}', type_name=type_name, precision=precision, data=data, quantizer=quantizer - ) - - def add_weights_variable( - self, name, var_name=None, type_name=None, precision=None, data=None, quantizer=None, compression=False - ): - if var_name is None: - var_name = name + '{index}' - - if precision is None: - precision, _ = self.model.config.get_precision(self, var=name) - elif type_name is None: - # If precision is specified but no type name is given, assign a dedicated - # type name made from variable name and layer index - type_name = name + '{index}_t' - - if type_name is None: - _, type_name = self.model.config.get_precision(self, var=name) - - if data is None: - data = self.get_attr(name + '_data') - elif isinstance(data, str): - data = self.get_attr(data + '_data') - - data_unquantized = data - exponent_type = False - if quantizer is not None: - precision = quantizer.hls_type - type_name = name + '{index}_t' - data = quantizer(data) - if isinstance(quantizer.hls_type, ExponentPrecisionType): - exponent_type = True - - if compression: - # TODO reuse factor may not be available here - var = CompressedWeightVariable( - var_name, - type_name=type_name, - precision=precision, - quantizer=quantizer, - data=data, - reuse_factor=self.get_attr('reuse_factor', 1), - index=self.index, - ) - elif exponent_type: - var = ExponentWeightVariable( - var_name, type_name=type_name, precision=precision, quantizer=quantizer, data=data, index=self.index - ) - else: - var = WeightVariable( - var_name, type_name=type_name, precision=precision, quantizer=quantizer, data=data, index=self.index - ) - - var.data_unquantized = data_unquantized - - self.set_attr(name, var) - - def get_layer_precision(self): - precision = {} - for data_type in self.types.values(): - precision[data_type.name] = data_type - return precision - - -class Input(Layer): - def initialize(self): - shape = self.attributes['input_shape'] - if shape[0] is None: - shape = shape[1:] - dims = [f'N_INPUT_{i}_{self.index}' for i in range(1, len(shape) + 1)] - if self.index == 1: - default_type_name = 'input_t' - else: - default_type_name = f'input{self.index}_t' - type_name = self.attributes.get('type_name', default_type_name) - precision, _ = self.model.config.get_precision(self, var='result') - self.add_output_variable(shape, dims, var_name=self.name, type_name=type_name, precision=precision) - - -class Reshape(Layer): - _expected_attributes = [ - Attribute('target_shape', value_type=typing.Sequence), - ] - - def initialize(self): - input_shape = self.get_input_variable(self.inputs[0]).shape - target_shape = self.get_attr('target_shape') - if target_shape is None: - # need to get it from the input - shape_node = self.get_input_node(self.inputs[1]) - # for QONNX, remove batch dimension - if shape_node: - target_shape = shape_node.value[1:] - else: - raise RuntimeError("Reshape for ONNX requires the target shape to be a second input.") - - # remove Nones -- is this ever triggered? - if target_shape[0] is None: - target_shape = target_shape[1:] - - # take care of -1 shapes - shape = self._infer_output_shape(input_shape, target_shape) - - # update the target shape with chnges from above - self.set_attr('target_shape', shape) - - dims = [f'N_SIZE_{i}_{self.index}' for i in range(len(shape))] - - self.add_output_variable(shape, dims) - - def _infer_output_shape(self, input_shape, target_shape): - """Expand the shape that potentially includes -1 as one of the dimensions.""" - if -1 in target_shape: # Need to infer shape for -1 - dummy_x = np.ones(input_shape) - dummy_y = np.reshape(dummy_x, target_shape) - return list(dummy_y.shape) - return target_shape - - -class Dense(Layer): - _expected_attributes = [ - Attribute('n_in'), - Attribute('n_out'), - WeightAttribute('weight'), - WeightAttribute('bias'), - TypeAttribute('weight'), - TypeAttribute('bias'), - ] - - def initialize(self): - shape = self.get_input_variable().shape[:] - shape[-1] = self.attributes['n_out'] - if len(shape) > 1: - dims = [f'N_LAYER_{i}_{self.index}' for i in range(1, len(shape) + 1)] - else: - dims = [f'N_LAYER_{self.index}'] - self.add_output_variable(shape, dims) - self.add_weights(quantizer=self.get_attr('weight_quantizer'), compression=self.model.config.get_compression(self)) - self.add_bias(quantizer=self.get_attr('bias_quantizer')) - - -class Conv1D(Layer): - _expected_attributes = [ - Attribute('in_width'), - Attribute('out_width'), - Attribute('n_chan'), - Attribute('n_filt'), - Attribute('filt_width'), - Attribute('stride_width'), - Attribute('pad_left'), - Attribute('pad_right'), - WeightAttribute('weight'), - WeightAttribute('bias'), - TypeAttribute('weight'), - TypeAttribute('bias'), - ] - - def initialize(self): - if self.get_attr('data_format') == 'channels_last': - shape = [self.attributes['out_width'], self.attributes['n_filt']] - dims = [f'N_OUTPUTS_{self.index}', f'N_FILT_{self.index}'] - else: - shape = [self.attributes['n_filt'], self.attributes['out_width']] - dims = [f'N_FILT_{self.index}', f'N_OUTPUTS_{self.index}'] - - self.add_output_variable(shape, dims) - self.add_weights(quantizer=self.get_attr('weight_quantizer')) - self.add_bias(quantizer=self.get_attr('bias_quantizer')) - - -class SeparableConv1D(Layer): - _expected_attributes = [ - Attribute('in_width'), - Attribute('out_width'), - Attribute('n_chan'), - Attribute('n_filt'), - Attribute('filt_width'), - Attribute('stride_width'), - Attribute('pad_left'), - Attribute('pad_right'), - WeightAttribute('depthwise'), - WeightAttribute('pointwise'), - WeightAttribute('bias'), - TypeAttribute('depthwise'), - TypeAttribute('pointwise'), - TypeAttribute('bias'), - ] - - def initialize(self): - if self.get_attr('data_format') == 'channels_last': - shape = [self.attributes['out_width'], self.attributes['n_filt']] - dims = [f'N_OUTPUTS_{self.index}', f'N_FILT_{self.index}'] - else: - shape = [self.attributes['n_filt'], self.attributes['out_width']] - dims = [f'N_FILT_{self.index}', f'N_OUTPUTS_{self.index}'] - self.add_output_variable(shape, dims) - - self.add_weights_variable(name='depthwise', var_name='d{index}', quantizer=self.get_attr('depthwise_quantizer')) - self.add_weights_variable(name='pointwise', var_name='p{index}', quantizer=self.get_attr('pointwise_quantizer')) - - zero_bias_data = np.zeros((self.attributes['n_chan'],)) - precision = IntegerPrecisionType(width=1, signed=False) - self.add_weights_variable(name='zero_bias', var_name='z{index}', data=zero_bias_data, precision=precision) - - self.add_bias(quantizer=self.get_attr('bias_quantizer')) - - -class DepthwiseConv1D(Conv1D): - def initialize(self): - if self.get_attr('data_format') == 'channels_last': - shape = [self.attributes['out_width'], self.attributes['n_chan']] - dims = [f'OUT_HEIGHT_{self.index}', f'N_CHAN_{self.index}'] - else: - shape = [self.attributes['n_chan'], self.attributes['out_width']] - dims = [f'N_CHAN_{self.index}', f'OUT_WIDTH_{self.index}'] - self.add_output_variable(shape, dims) - - self.add_weights_variable( - name='weight', var_name='w{index}', data='depthwise', quantizer=self.get_attr('depthwise_quantizer') - ) - - self.add_bias(quantizer=self.get_attr('bias_quantizer')) - - -class Conv2D(Layer): - _expected_attributes = [ - Attribute('in_height'), - Attribute('in_width'), - Attribute('out_height'), - Attribute('out_width'), - Attribute('n_chan'), - Attribute('n_filt'), - Attribute('filt_height'), - Attribute('filt_width'), - Attribute('stride_height'), - Attribute('stride_width'), - Attribute('pad_top'), - Attribute('pad_bottom'), - Attribute('pad_left'), - Attribute('pad_right'), - WeightAttribute('weight'), - WeightAttribute('bias'), - TypeAttribute('weight'), - TypeAttribute('bias'), - ] - - def initialize(self): - if self.get_attr('data_format') == 'channels_last': - shape = [self.attributes['out_height'], self.attributes['out_width'], self.attributes['n_filt']] - dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}', f'N_FILT_{self.index}'] - else: - shape = [self.attributes['n_filt'], self.attributes['out_height'], self.attributes['out_width']] - dims = [f'N_FILT_{self.index}', f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}'] - self.add_output_variable(shape, dims) - self.add_weights(quantizer=self.get_attr('weight_quantizer')) - self.add_bias(quantizer=self.get_attr('bias_quantizer')) - - -class Conv2DBatchnorm(Conv2D): - def _get_folded_weights(self): - """ - Function to get the batchnorm folded weights. - This function converts the weights by folding batchnorm parameters into - the weight of QConv2D. The high-level equation: - W_fold = gamma * W / sqrt(variance + epsilon) - bias_fold = gamma * (bias - moving_mean) / sqrt(variance + epsilon) + beta - """ - kernel = self.get_attr('weight_data') - bias = self.get_attr('bias_data') - if bias is None: - bias = 0 - - # get batchnorm weights and moving stats - gamma = self.get_attr('gamma_data') - beta = self.get_attr('beta_data') - moving_mean = self.get_attr('mean_data') - moving_variance = self.get_attr('variance_data') - # get the inversion factor so that we replace division by multiplication - inv = np.reciprocal(np.sqrt(moving_variance + self.get_attr('epsilon'))) - if gamma is not None: - inv *= gamma - - # wrap conv kernel and bias with bn parameters - folded_kernel = inv * kernel - folded_bias = inv * (bias - moving_mean) - if beta is not None: - folded_bias += beta - - return [folded_kernel, folded_bias] - - def initialize(self): - super().initialize() - folded_weights, folded_bias = self._get_folded_weights() - if self.model.config.is_resource_strategy(self) and self.model.config.backend.name in [ - 'Vivado', - 'VivadoAccelerator', - ]: - self.weights['weight'].data_unquantized = np.transpose(folded_weights, axes=[3, 0, 1, 2]) - self.weights['weight'].data = self.get_attr('weight_quantizer')(self.weights['weight'].data_unquantized) - - else: - self.weights['weight'].data_unquantized = folded_weights - self.weights['weight'].data = self.get_attr('weight_quantizer')(folded_weights) - self.weights['bias'].data_unquantized = folded_bias - bias_q = self.get_attr('bias_quantizer') - if bias_q is not None: - self.weights['bias'].data = bias_q(folded_bias) - - -class SeparableConv2D(Layer): - _expected_attributes = [ - Attribute('in_height'), - Attribute('in_width'), - Attribute('out_height'), - Attribute('out_width'), - Attribute('n_chan'), - Attribute('n_filt'), - Attribute('filt_height'), - Attribute('filt_width'), - Attribute('stride_height'), - Attribute('stride_width'), - Attribute('pad_top'), - Attribute('pad_bottom'), - Attribute('pad_left'), - Attribute('pad_right'), - WeightAttribute('depthwise'), - WeightAttribute('pointwise'), - WeightAttribute('bias'), - TypeAttribute('depthwise'), - TypeAttribute('pointwise'), - TypeAttribute('bias'), - ] - - def initialize(self): - if self.get_attr('data_format') == 'channels_last': - shape = [self.attributes['out_height'], self.attributes['out_width'], self.attributes['n_filt']] - dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}', f'N_FILT_{self.index}'] - else: - shape = [self.attributes['n_filt'], self.attributes['out_height'], self.attributes['out_width']] - dims = [f'N_FILT_{self.index}', f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}'] - self.add_output_variable(shape, dims) - - self.add_weights_variable(name='depthwise', var_name='d{index}', quantizer=self.get_attr('depthwise_quantizer')) - self.add_weights_variable(name='pointwise', var_name='p{index}', quantizer=self.get_attr('pointwise_quantizer')) - - zero_bias_data = np.zeros((self.attributes['n_chan'],)) - precision = IntegerPrecisionType(width=1, signed=False) - self.add_weights_variable(name='zero_bias', var_name='z{index}', data=zero_bias_data, precision=precision) - - self.add_bias(quantizer=self.get_attr('bias_quantizer')) - - -class DepthwiseConv2D(Conv2D): - def initialize(self): - if self.get_attr('data_format') == 'channels_last': - shape = [self.attributes['out_height'], self.attributes['out_width'], self.attributes['n_chan']] - dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}'] - else: - shape = [self.attributes['n_chan'], self.attributes['out_height'], self.attributes['out_width']] - dims = [f'N_CHAN_{self.index}', f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}'] - self.add_output_variable(shape, dims) - - self.add_weights_variable( - name='weight', var_name='w{index}', data='depthwise', quantizer=self.get_attr('depthwise_quantizer') - ) - - self.add_bias(quantizer=self.get_attr('bias_quantizer')) - - -class Pooling1D(Layer): - _expected_attributes = [ - Attribute('n_in'), - Attribute('n_out'), - Attribute('n_filt'), - Attribute('pool_width'), - Attribute('stride_width'), - Attribute('pad_left'), - Attribute('pad_right'), - Attribute('count_pad', value_type=bool, default=False), - ChoiceAttribute('pool_op', ['Max', 'Average'], configurable=False), - ] - - def initialize(self): - if self.get_attr('data_format') == 'channels_last': - shape = [self.attributes['n_out'], self.attributes['n_filt']] - dims = [f'N_OUTPUTS_{self.index}', f'N_FILT_{self.index}'] - else: - shape = [self.attributes['n_filt'], self.attributes['n_out']] - dims = [f'N_FILT_{self.index}', f'N_OUTPUTS_{self.index}'] - self.add_output_variable(shape, dims) - self.set_attr('pool_op', self.get_attr('class_name').split('Pooling')[0]) - - -class Pooling2D(Layer): - _expected_attributes = [ - Attribute('in_height'), - Attribute('in_width'), - Attribute('out_height'), - Attribute('out_width'), - Attribute('n_filt'), - Attribute('pool_height'), - Attribute('pool_width'), - Attribute('stride_height'), - Attribute('stride_width'), - Attribute('pad_top'), - Attribute('pad_bottom'), - Attribute('pad_left'), - Attribute('pad_right'), - Attribute('count_pad', value_type=bool, default=False), - ChoiceAttribute('pool_op', ['Max', 'Average'], configurable=False), - ] - - def initialize(self): - if self.get_attr('data_format') == 'channels_last': - shape = [self.attributes['out_height'], self.attributes['out_width'], self.attributes['n_filt']] - dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}', f'N_FILT_{self.index}'] - else: - shape = [self.attributes['n_filt'], self.attributes['out_height'], self.attributes['out_width']] - dims = [f'N_FILT_{self.index}', f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}'] - self.add_output_variable(shape, dims) - self.set_attr('pool_op', self.get_attr('class_name').split('Pooling')[0]) - - -class GlobalPooling1D(Layer): - _expected_attributes = [ - Attribute('n_in'), - Attribute('n_filt'), - ChoiceAttribute('pool_op', ['Max', 'Average'], configurable=False), - ] - - def initialize(self): - shape = [self.attributes['n_filt']] - dims = [f'N_FILT_{self.index}'] - self.add_output_variable(shape, dims) - self.set_attr('pool_op', self.get_attr('class_name').split('Pooling')[0].replace('Global', '')) - - -class GlobalPooling2D(Layer): - _expected_attributes = [ - Attribute('in_height'), - Attribute('in_width'), - Attribute('n_filt'), - ChoiceAttribute('pool_op', ['Max', 'Average'], configurable=False), - ] - - def initialize(self): - shape = [self.attributes['n_filt']] - dims = [f'N_FILT_{self.index}'] - self.add_output_variable(shape, dims) - self.set_attr('pool_op', self.get_attr('class_name').split('Pooling')[0].replace('Global', '')) - - -class ZeroPadding1D(Layer): - _expected_attributes = [ - Attribute('in_width'), - Attribute('out_width'), - Attribute('n_chan'), - Attribute('pad_left'), - Attribute('pad_right'), - ] - - def initialize(self): - inp = self.get_input_variable() - if self.get_attr('data_format') == 'channels_last': - shape = [self.attributes['out_width'], self.attributes['n_chan']] - dims = [f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}'] - else: - shape = [self.attributes['n_chan'], self.attributes['out_width']] - dims = [f'N_CHAN_{self.index}', f'OUT_WIDTH_{self.index}'] - self.add_output_variable(shape, dims, precision=inp.type.precision) - - -class ZeroPadding2D(Layer): - _expected_attributes = [ - Attribute('in_height'), - Attribute('in_width'), - Attribute('out_height'), - Attribute('out_width'), - Attribute('n_chan'), - Attribute('pad_top'), - Attribute('pad_bottom'), - Attribute('pad_left'), - Attribute('pad_right'), - ] - - def initialize(self): - inp = self.get_input_variable() - if self.get_attr('data_format') == 'channels_last': - shape = [self.attributes['out_height'], self.attributes['out_width'], self.attributes['n_chan']] - dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}'] - else: - shape = [self.attributes['n_chan'], self.attributes['out_height'], self.attributes['out_width']] - dims = [f'N_CHAN_{self.index}', f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}'] - self.add_output_variable(shape, dims, precision=inp.type.precision) - - -class Activation(Layer): - _expected_attributes = [ - Attribute('n_in'), - Attribute('activation', value_type=str), - ] - - def initialize(self): - inp = self.get_input_variable() - shape = inp.shape - dims = inp.dim_names - self.add_output_variable(shape, dims) - self.set_attr('n_in', self.get_input_variable().size()) - - -class ParametrizedActivation(Activation): - def _get_act_function_name(self): - act = self.get_attr('activation').lower() - if act == 'leakyrelu': - return 'leaky_relu' - elif act == 'thresholdedrelu': - return 'thresholded_relu' - else: - return act # ELU activation - - -class HardActivation(Activation): - ''' - Implements the hard sigmoid and tan function in keras and qkeras - (Default parameters in qkeras are different, so should be configured) - The hard sigmoid unction is clip(slope * x + shift, 0, 1), and the - hard tanh function is 2 * hard_sigmoid - 1 - ''' - - _expected_attributes = [ - Attribute('slope', value_type=float, default=0.2, configurable=False), - Attribute('shift', value_type=float, default=0.5, configurable=False), - TypeAttribute('slope_t'), - TypeAttribute('shift_t'), - ] - - def initialize(self): - super().initialize() - slope_prec = self.get_attr('slope_prec', FixedPrecisionType(width=16, integer=0, signed=False)) - shift_prec = self.get_attr('shift_prec', FixedPrecisionType(width=1, integer=0, signed=False)) - index = self.get_attr('index') - slope_t = NamedType(f'slope{index}_t', precision=slope_prec) - shift_t = NamedType(f'shift{index}_t', precision=shift_prec) - self.set_attr('slope_t', slope_t) - self.set_attr('shift_t', shift_t) - - -class PReLU(Activation): - def initialize(self): - super().initialize() - self.add_weights_variable(name='alpha', var_name='a{index}') - - -class Softmax(Activation): - def initialize(self): - super().initialize() - - -class TernaryTanh(Activation): - def initialize(self): - super().initialize() - - -class BatchNormalization(Layer): - _expected_attributes = [ - Attribute('n_in'), - Attribute('n_filt', default=0), - WeightAttribute('scale'), - WeightAttribute('bias'), - TypeAttribute('scale'), - TypeAttribute('bias'), - Attribute('use_gamma', value_type=bool, default=True), - Attribute('use_beta', value_type=bool, default=True), - ] - - def initialize(self): - inp = self.get_input_variable() - shape = inp.shape - dims = inp.dim_names - self.add_output_variable(shape, dims) - - gamma = self.get_attr('gamma_data') - beta = self.get_attr('beta_data') - mean = self.get_attr('mean_data') - var = self.get_attr('variance_data') - - scale = gamma / np.sqrt(var + self.get_attr('epsilon')) - bias = beta - scale * mean - - self.add_weights_variable(name='scale', var_name='s{index}', data=scale) - self.add_weights_variable(name='bias', var_name='b{index}', data=bias) - - -class Merge(Layer): - def initialize(self): - assert len(self.inputs) == 2 - inp1 = self.get_input_variable(self.inputs[0]) - inp2 = self.get_input_variable(self.inputs[1]) - if np.prod(inp2.shape) > np.prod(inp1.shape): - shape = inp2.shape.copy() - dims = inp2.dim_names.copy() - else: - shape = inp1.shape.copy() - dims = inp1.dim_names.copy() - self.add_output_variable(shape, dims) - - -class Dot(Merge): - def initialize(self): - assert len(self.inputs) == 2 - inp1 = self.get_input_variable(self.inputs[0]) - inp2 = self.get_input_variable(self.inputs[1]) - assert inp1.shape == inp2.shape - if len(inp1.shape) > 1: - raise Exception('ERROR: Dot of tensors with rank > 1 is not yet supported.') - - self.add_output_variable(shape=[1], dim_names=[f'OUT_DOT_{self.index}']) - - -class Concatenate(Merge): - def initialize(self): - assert len(self.inputs) == 2 - inp1 = self.get_input_variable(self.inputs[0]) - inp2 = self.get_input_variable(self.inputs[1]) - axis = self.attributes['axis'] - if axis > 0: - axis -= 1 - shape = inp1.shape[:] - shape[axis] += inp2.shape[axis] - rank = len(shape) - if rank > 1: - dims = [f'OUT_CONCAT_{i}_{self.index}' for i in range(rank)] - else: - dims = [f'OUT_CONCAT_{self.index}'] - self.add_output_variable(shape, dims) - - -class BiasAdd(Merge): # TensorFlow's operator that gets merged into Dense/Conv - def initialize(self): - inp = self.get_input_variable(self.inputs[0]) - shape = inp.shape - dims = inp.dim_names - self.add_bias() - self.add_output_variable(shape, dims) - - -class Resize(Layer): - def initialize(self): - inp = self.get_input_variable() - if len(inp.shape) == 2: # 1D -> width + chan - shape = [self.get_attr('out_width'), self.get_attr('n_chan')] - dims = [f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}'] - elif len(inp.shape) == 3: # 2D -> height + width + chan - shape = [self.get_attr('out_height'), self.get_attr('out_width'), self.get_attr('n_chan')] - dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}', f'N_CHAN_{self.index}'] - self.add_output_variable(shape, dims, precision=inp.type.precision) - - -class Transpose(Layer): - def initialize(self): - inp = self.get_input_variable(self.inputs[0]) - perm = self.get_attr('perm') - self.set_attr('dim', f'{len(inp.shape)}d') - - if len(perm) > 3: - raise Exception('ERROR: Transpose of tensors with rank > 3 is not yet supported.') - - # ONNX double transpose specific, sometimes ONNX injects - # useless double transpose layers when converting - # from other frameworks - if len(perm) == 1: - shape = inp.shape # dummy shape - dims = ['DUMMY'] # dummy dims - self.set_attr('perm', [0]) - else: - shape = [inp.shape[i] for i in perm] - - self.set_attr('perm_str', ','.join([str(i) for i in perm])) - - if len(shape) == 2: - self.set_attr('perm_str', ','.join(['0'] + [str(i + 1) for i in perm])) - dims = [f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}'] - self.set_attr('depth', 1) - self.set_attr('height', inp.shape[0]) - self.set_attr('width', inp.shape[1]) - elif len(shape) > 2: - dims = [f'OUT_DEPTH_{self.index}', f'OUT_HEIGHT_{self.index}', f'OUT_WIDTH_{self.index}'] - self.set_attr('depth', inp.shape[0]) - self.set_attr('height', inp.shape[1]) - self.set_attr('width', inp.shape[2]) - self.add_output_variable(shape, dims, precision=inp.type.precision) - - -class Embedding(Layer): - _expected_attributes = [ - Attribute('n_in'), - Attribute('n_out'), - Attribute('vocab_size'), - WeightAttribute('embeddings'), - TypeAttribute('embeddings'), - ] - - def initialize(self): - shape = self.get_input_variable().shape[:] - shape += [self.attributes['n_out']] - if len(shape) > 1: - dims = [f'N_LAYER_{i}_{self.index}' for i in range(1, len(shape) + 1)] - else: - dims = [f'N_LAYER_{self.index}'] - self.add_output_variable(shape, dims) - - self.add_weights_variable(name='embeddings', var_name='e{index}') - - -class SimpleRNN(Layer): - _expected_attributes = [ - Attribute('n_out'), - Attribute('activation', value_type=str), - Attribute('return_sequences', value_type=bool, default=False), - Attribute('return_state', value_type=bool, default=False), - ChoiceAttribute('direction', ['forward', 'backward'], default='forward'), - WeightAttribute('weight'), - WeightAttribute('bias'), - WeightAttribute('recurrent_weight'), - TypeAttribute('weight'), - TypeAttribute('bias'), - TypeAttribute('recurrent_weight'), - ] - - def initialize(self): - if self.attributes['return_sequences']: - shape = [self.attributes['n_timesteps'], self.attributes['n_out']] - dims = [f'N_TIME_STEPS_{self.index}', f'N_OUT_{self.index}'] - else: - shape = [self.attributes['n_out']] - dims = [f'N_OUT_{self.index}'] - - self.add_output_variable(shape, dims) - - if self.attributes['return_state']: - state_shape = [self.attributes['n_out']] - state_dims = [f'N_OUT_{self.index}'] - self.add_output_variable( - state_shape, state_dims, out_name=self.outputs[1], var_name='layer{index}_h', type_name='layer{index}_h_t' - ) - self.add_output_variable( - state_shape, state_dims, out_name=self.outputs[2], var_name='layer{index}_c', type_name='layer{index}_c_t' - ) - - # weights - self.add_weights() - - # recurrent weights - self.add_weights_variable(name='recurrent_weight', var_name='wr{index}') - - # biases - self.add_weights_variable(name='bias', var_name='b{index}') - - -class LSTM(Layer): - _expected_attributes = [ - Attribute('n_out'), - Attribute('activation', value_type=str), - Attribute('recurrent_activation', value_type=str), - Attribute('return_sequences', value_type=bool, default=False), - Attribute('return_state', value_type=bool, default=False), - ChoiceAttribute('direction', ['forward', 'backward'], default='forward'), - Attribute('time_major', value_type=bool, default=False), - WeightAttribute('weight'), - WeightAttribute('bias'), - WeightAttribute('recurrent_weight'), - WeightAttribute('recurrent_bias'), - TypeAttribute('weight'), - TypeAttribute('bias'), - TypeAttribute('recurrent_weight'), - TypeAttribute('recurrent_bias'), - ] - - def initialize(self): - if self.attributes['return_sequences']: - shape = [self.attributes['n_timesteps'], self.attributes['n_out']] - dims = [f'N_TIME_STEPS_{self.index}', f'N_OUT_{self.index}'] - else: - shape = [self.attributes['n_out']] - dims = [f'N_OUT_{self.index}'] - - self.add_output_variable(shape, dims) - - if self.attributes['return_state']: - state_shape = [self.attributes['n_out']] - state_dims = [f'N_OUT_{self.index}'] - self.add_output_variable( - state_shape, state_dims, out_name=self.outputs[1], var_name='layer{index}_h', type_name='layer{index}_h_t' - ) - self.add_output_variable( - state_shape, state_dims, out_name=self.outputs[2], var_name='layer{index}_c', type_name='layer{index}_c_t' - ) - - # weights - self.add_weights() - - # recurrent weights - recurrent_weight = self.get_attr('recurrent_weight_data') - self.add_weights_variable(name='recurrent_weight', var_name='wr{index}', data=recurrent_weight) - - # biases - self.add_weights_variable(name='bias', var_name='b{index}') - - recurrent_bias = np.zeros(recurrent_weight.shape[1]) - self.add_weights_variable(name='recurrent_bias', var_name='br{index}', data=recurrent_bias) - - -class GRU(Layer): - _expected_attributes = [ - Attribute('n_out'), - Attribute('activation', value_type=str), - Attribute('recurrent_activation', value_type=str), - Attribute('return_sequences', value_type=bool, default=False), - Attribute('return_state', value_type=bool, default=False), - ChoiceAttribute('direction', ['forward', 'backward'], default='forward'), - Attribute('time_major', value_type=bool, default=False), - ChoiceAttribute('apply_reset_gate', ['before', 'after'], default='after'), - WeightAttribute('weight'), - WeightAttribute('bias'), - WeightAttribute('recurrent_weight'), - WeightAttribute('recurrent_bias'), - TypeAttribute('weight'), - TypeAttribute('bias'), - TypeAttribute('recurrent_weight'), - TypeAttribute('recurrent_bias'), - ] - - def initialize(self): - if self.attributes['return_sequences']: - shape = [self.attributes['n_timesteps'], self.attributes['n_out']] - dims = [f'N_TIME_STEPS_{self.index}', f'N_OUT_{self.index}'] - else: - shape = [self.attributes['n_out']] - dims = [f'N_OUT_{self.index}'] - - self.add_output_variable(shape, dims) - - if self.attributes['return_state']: - state_shape = [self.attributes['n_out']] - state_dims = [f'N_OUT_{self.index}'] - self.add_output_variable( - state_shape, state_dims, out_name=self.outputs[1], var_name='layer{index}_h', type_name='layer{index}_h_t' - ) - self.add_output_variable( - state_shape, state_dims, out_name=self.outputs[2], var_name='layer{index}_c', type_name='layer{index}_c_t' - ) - - # weights - self.add_weights() - - # recurrent weights - self.add_weights_variable(name='recurrent_weight', var_name='wr{index}') - - # biases - self.add_weights_variable(name='bias', var_name='b{index}') - self.add_weights_variable(name='recurrent_bias', var_name='br{index}') - - -class GarNet(Layer): - ref_impl = False - - def initialize(self): - reuse_factor = self.model.config.get_reuse_factor(self) - if self.attributes['n_vertices'] % reuse_factor != 0: - raise Exception( - 'GarNet vertex loop has no bound check;' - f'number of vertices must be divisible by the reuse factor ({reuse_factor}).' - ) - - self._initialize_transforms() - - if self.attributes['collapse']: - shape = [self._output_features] - dims = [f'OUT_FEATURES_{self.index}'] - else: - shape = [self.attributes['n_vertices'], self._output_features] - dims = [f'VERTICES_{self.index}', f'OUT_FEATURES_{self.index}'] - - self.add_output_variable(shape, dims) - - def _initialize_transforms(self): - n_propagate = self.attributes['n_propagate'] - n_aggregators = self.attributes['n_aggregators'] - n_out_features = self.attributes['n_out_features'] - - if self.ref_impl: - weights_source = [ - ('input_transform', 'FLR', 'kernel'), - ('input_transform', 'FLR', 'bias'), - ('aggregator_distance', 'S', 'kernel'), - ('aggregator_distance', 'S', 'bias'), - ('output_transform', 'Fout', 'kernel'), - ('output_transform', 'Fout', 'bias'), - ] - - else: - quantize = self.get_attr('quantizer') is not None - kernel, bias = self._make_input_transform_weights(n_propagate, n_aggregators, n_out_features, quantize=quantize) - - self._add_variable( - 'input_transform_weights', 'input_transform_w{index}', kernel, frac_width=10, quantize=quantize - ) - self._add_variable('input_transform_biases', 'input_transform_b{index}', bias, frac_width=10, quantize=quantize) - # dummy - self.add_weights_variable(name='output_transform_weights', var_name='output_transform_w{index}', data=np.ones(1)) - - weights_source = [ - ('aggregator_distance', 'S', 'kernel'), - ('aggregator_distance', 'S', 'bias'), - ('output_transform', 'Fout', 'bias'), - ] - - for op_name, lname, wtype in weights_source: - data = self.get_attr(f'{lname}_{wtype}_data') - if wtype == 'kernel': - data = data.transpose((1, 0)) - vtype = 'weights' - else: - vtype = 'biases' - - name = f'{op_name}_{vtype}' - var_name = f'{op_name}_{vtype[0]}{{index}}' - - self._add_variable(name, var_name, data, frac_width=10, quantize=False) - - self._output_features = self.attributes['n_out_features'] - - def _make_input_transform_weights(self, n_propagate, n_aggregators, n_out_features, quantize=False, sublayer=''): - # Due to linearity of the input transform, input weights and biases can be contracted away at conversion time - output_transform_kernel = self.get_attr( - f'Fout{sublayer}_kernel_data' - ) # [(n_aggregators, n_propagate), n_out_features] - output_transform_kernel = output_transform_kernel.reshape((n_aggregators, n_propagate, n_out_features)) - if quantize: - output_transform_kernel = self.get_attr('quantizer')(output_transform_kernel) - - input_transform_kernel = self.get_attr(f'FLR{sublayer}_kernel_data') # [n_in_features, n_propagate] - if quantize: - input_transform_kernel = self.get_attr('quantizer')(input_transform_kernel) - data = np.dot(input_transform_kernel, output_transform_kernel) # [n_in_features, n_aggregators, n_out_features] - kernel = data.transpose((2, 1, 0)) - - input_transform_bias = self.get_attr(f'FLR{sublayer}_bias_data') # [n_propagate] - if quantize: - input_transform_bias = self.get_attr('quantizer')(input_transform_bias) - data = np.dot(input_transform_bias, output_transform_kernel) # [n_aggregators, n_out_features] - bias = data.transpose((1, 0)) - - return kernel, bias - - def _add_variable(self, name, var_name, data, frac_width=10, quantize=False): - # Wrapper for add_weights_variable with precision determination from data - - # automatically make the variable unsigned if data are all positive - signed = np.amin(data) < 0.0 - - int_width = find_minimum_width(data, signed=signed) - - if quantize: - precision = IntegerPrecisionType(width=int_width, signed=signed) - else: - width = int_width + frac_width - precision = FixedPrecisionType( - width=width, integer=int_width, signed=signed, rounding_mode='AP_RND', saturation_mode='AP_SAT' - ) - - self.add_weights_variable(name=name, var_name=var_name, data=data, precision=precision) - - -class GarNetStack(GarNet): - def _initialize_transforms(self): - self._sublayer_weights = [] - - quantize = self.get_attr('quantizer') is not None - - for il in range(self.attributes['n_sublayers']): - sublayer_weights = {} - - n_aggregators = self.attributes['n_aggregators'][il] - n_out_features = self.attributes['n_out_features'][il] - n_propagate = self.attributes['n_propagate'][il] - - kernel, bias = self._make_input_transform_weights( - n_propagate, n_aggregators, n_out_features, quantize=quantize, sublayer=il - ) - - name = f'input_transform_{il}_weights' - self._add_variable(name, f'input_transform_{il}_w{{index}}', kernel, frac_width=10, quantize=quantize) - sublayer_weights['input_transform_weights'] = self.weights[name] - - name = f'input_transform_{il}_biases' - self._add_variable(name, f'input_transform_{il}_b{{index}}', bias, frac_width=10, quantize=quantize) - sublayer_weights['input_transform_biases'] = self.weights[name] - - weights_source = [ - ('aggregator_distance', f'S{il}', 'kernel'), - ('aggregator_distance', f'S{il}', 'bias'), - ('output_transform', f'Fout{il}', 'bias'), - ] - - for op_name, lname, wtype in weights_source: - data = self.get_attr(f'{lname}_{wtype}_data') - if wtype == 'kernel': - data = data.transpose((1, 0)) - vtype = 'weights' - else: - vtype = 'biases' - - name = f'{op_name}_{il}_{vtype}' - var_name = f'{op_name}_{il}_{vtype[0]}{{index}}' - - self._add_variable(name, var_name, data, frac_width=10, quantize=False) - sublayer_weights[f'{op_name}_{vtype}'] = self.weights[name] - - self._sublayer_weights.append(sublayer_weights) - - self._output_features = self.attributes['n_out_features'][-1] - - -class LayerGroup(Layer): - _expected_attributes = [ - Attribute('layer_list', value_type=list), - Attribute('input_layers', value_type=list), - Attribute('output_layers', value_type=list), - Attribute('data_reader', value_type=object), - Attribute('output_shape', value_type=list), - ] - - def initialize(self): - shape = self.get_attr('output_shape') - if shape[0] is None: - shape.pop(0) - dims = [f'N_INPUT_{self.index}_{i+1}' for i in range(len(shape))] - - self.add_output_variable(shape, dims) - - -class SymbolicExpression(Layer): - _expected_attributes = [ - Attribute('expression', value_type=list), - Attribute('n_symbols'), - Attribute('lut_functions', value_type=list, default=[]), - ] - - def initialize(self): - self.set_attr('expr_t', NamedType(*reversed(self.model.config.get_precision(self, 'expr')))) - self.add_output_variable([len(self.get_attr('expression'))], [f'N_OUTPUTS_{self.index}'], var_name='y') - - -layer_map = { - 'Input': Input, - 'InputLayer': Input, - 'Activation': Activation, - 'QActivation': Activation, - 'LeakyReLU': ParametrizedActivation, - 'ThresholdedReLU': ParametrizedActivation, - 'ELU': ParametrizedActivation, - 'PReLU': PReLU, - 'Softmax': Softmax, - 'TernaryTanh': TernaryTanh, - 'HardActivation': HardActivation, - 'Reshape': Reshape, - 'Dense': Dense, - 'BinaryDense': Dense, - 'TernaryDense': Dense, - 'QDense': Dense, - 'Conv1D': Conv1D, - 'QConv1D': Conv1D, - 'Conv2D': Conv2D, - 'BinaryConv2D': Conv2D, - 'QConv2D': Conv2D, - 'QConv2DBatchnorm': Conv2DBatchnorm, - 'SeparableConv1D': SeparableConv1D, - 'QSeparableConv1D': SeparableConv1D, - 'DepthwiseConv1D': DepthwiseConv1D, - 'SeparableConv2D': SeparableConv2D, - 'QSeparableConv2D': SeparableConv2D, - 'DepthwiseConv2D': DepthwiseConv2D, - 'QDepthwiseConv2D': DepthwiseConv2D, - 'BatchNormalization': BatchNormalization, - 'QBatchNormalization': BatchNormalization, - 'MaxPooling1D': Pooling1D, - 'AveragePooling1D': Pooling1D, - 'MaxPooling2D': Pooling2D, - 'AveragePooling2D': Pooling2D, - 'GlobalMaxPooling1D': GlobalPooling1D, - 'GlobalAveragePooling1D': GlobalPooling1D, - 'GlobalMaxPooling2D': GlobalPooling2D, - 'GlobalAveragePooling2D': GlobalPooling2D, - 'ZeroPadding1D': ZeroPadding1D, - 'ZeroPadding2D': ZeroPadding2D, - 'Merge': Merge, - 'Dot': Dot, - 'Concatenate': Concatenate, - 'Resize': Resize, - 'UpSampling1D': Resize, - 'UpSampling2D': Resize, - 'Transpose': Transpose, - 'Embedding': Embedding, - 'SimpleRNN': SimpleRNN, - 'LSTM': LSTM, - 'GRU': GRU, - 'GarNet': GarNet, - 'GarNetStack': GarNetStack, - 'LayerGroup': LayerGroup, - 'SymbolicExpression': SymbolicExpression, - # TensorFlow-specific layers: - 'BiasAdd': BiasAdd, -} - - -def register_layer(name, clazz): - global layer_map - layer_map[name] = clazz diff --git a/hls4ml/hls4ml/model/optimizer/__init__.py b/hls4ml/hls4ml/model/optimizer/__init__.py deleted file mode 100644 index 2e9b197..0000000 --- a/hls4ml/hls4ml/model/optimizer/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -import os - -from hls4ml.model.flow.flow import register_flow -from hls4ml.model.optimizer.optimizer import ( # noqa: F401 - ConfigurableOptimizerPass, - GlobalOptimizerPass, - LayerOptimizerPass, - ModelOptimizerPass, - OptimizerPass, - extract_optimizers_from_object, - extract_optimizers_from_path, - get_available_passes, - get_backend_passes, - get_optimizer, - layer_optimizer, - model_optimizer, - optimize_model, - optimizer_pass, - register_pass, -) - -opt_path = os.path.dirname(__file__) + '/passes' -module_path = __name__ + '.passes' - -optimizers = extract_optimizers_from_path(opt_path, module_path) -for opt_name, opt in optimizers.items(): - register_pass(opt_name, opt) - -del opt_path -del module_path -del optimizers - -register_flow( - 'convert', - [ - 'channels_last_converter', - 'fuse_bias_add', - 'remove_useless_transpose', - 'expand_layer_group', - 'output_rounding_saturation_mode', - 'qkeras_factorize_alpha', - 'extract_ternary_threshold', - 'fuse_consecutive_batch_normalization', - ], -) # TODO Maybe not all QKeras optmizers belong here? - -register_flow( - 'optimize', - [ - 'eliminate_linear_activation', - 'fuse_consecutive_batch_normalization', - 'fuse_batch_normalization', - 'replace_multidimensional_dense_with_conv', - 'set_precision_concat', - ], - requires=['convert'], -) diff --git a/hls4ml/hls4ml/model/optimizer/optimizer.py b/hls4ml/hls4ml/model/optimizer/optimizer.py deleted file mode 100644 index 640b3f5..0000000 --- a/hls4ml/hls4ml/model/optimizer/optimizer.py +++ /dev/null @@ -1,328 +0,0 @@ -import importlib -import inspect -import os - -from hls4ml.utils.string_utils import convert_to_snake_case - - -class OptimizerPass: - """Base optimizer class from which all other optimizer types are derived.""" - - name = None - - def __init__(self): - pass - - def match(self, node): - """Predicate to match on a given node. - - Args: - node (Layer): Node in the model graph to try matching the optimizer on. - """ - raise NotImplementedError - - def transform(self, model, node): - """Transformation to apply if matching was successful. - - Transform should return a boolean value indicating if the model graph was altered (by adding/removing nodes). - - Args: - model (ModelGraph): Model to optimize - node (Layer): The matched node in the model graph. - """ - raise NotImplementedError - - @classmethod - def get_name(cls): - if cls.name is None: - return convert_to_snake_case(cls.__name__) # OptimizerPass -> optimizer_pass - else: - return cls.name - - -class GlobalOptimizerPass(OptimizerPass): - """Global optimizer that matches on every node in the model graph.""" - - def match(self, node): - return True # Match everything - - -class WrappedOptimizerPass(OptimizerPass): - """An optimizer class created by wrapping a function call. - - Users should generally not create any wrapped optimizer passes manually. - """ - - def __init__(self, name, condition, transform): - self.name = name - self.condition = condition - self.transform_func = transform - - def match(self, node): - return self.condition(node) - - def transform(self, model, node): - retval = self.transform_func(node) - return retval if retval is not None else False - - def get_name(self): - return self.name - - -class LayerOptimizerPass(WrappedOptimizerPass): - """An wrapper optimizer specific to a layer class. - - Commonly used by backends to add extra initialization to a layer instance. - """ - - def __init__(self, name, layer_class, transform): - super().__init__(name, lambda node: isinstance(node, layer_class), transform) - self.layer_class = layer_class - - -class ModelOptimizerPass(OptimizerPass): - """A special optimizer that works with the model itself. - - Examples include writing the model to C++/HLS. - """ - - def __init__(self, name, transform): - self.name = name - self.transform_func = transform - - def transform(self, model): - retval = self.transform_func(model) - return retval if retval is not None else False - - -class ConfigurableOptimizerPass(OptimizerPass): - """An optimizer that can be configured. - - Existing instances of this class in the registry can be configured with the configure() method. Multiple instances - with different configuration can co-exist if registered with different names. - """ - - def configure(self, **kwargs): - for key, value in kwargs.items(): - setattr(self, key, value) - - def get_config(self): - attrs = vars(self) - return attrs.copy() - - -# Decorator optimizers - - -def optimizer_pass(condition): - def decorator(function): - function._condition = condition - return function - - return decorator - - -def layer_optimizer(layer): - """Decorator to turn a function into the optimization pass. - - Example:: - - @layer_optimizer(MyLayer) - def init_mylayer(self, layer): - layer.set_attr('new_attribute', 'some_value') - - Args: - layer (_type_): _description_ - """ - - def decorator(function): - return optimizer_pass(layer)(function) - - return decorator - - -def model_optimizer(): - """Decorator to turn a function into a model optimizer.""" - - def decorator(function): - return optimizer_pass(None)(function) - - return decorator - - -# Helpers for extracting optimizers from objects - - -def extract_optimizers_from_path(opt_path, module_path, initializer=None): - optimizers = {} - - if not os.path.exists(opt_path): - return optimizers - - if not module_path.endswith('.'): - module_path += '.' - - for module in os.listdir(opt_path): - if module == '__init__.py' or module[-3:] != '.py': - continue - try: - lib = importlib.import_module(module_path + module[:-3]) - if 'register_' + module[:-3] in lib.__dict__: - opt_init_func = lib.__dict__['register_' + module[:-3]] - if initializer is not None: - opt_init_func(initializer) - else: - opt_init_func() - else: - for func in list(lib.__dict__.values()): - # if 'func' is a class - # and it inherits from OptimizerPass - # and is defined in this module (i.e., not imported) - if inspect.isclass(func) and issubclass(func, OptimizerPass) and func.__module__ == lib.__name__: - if inspect.ismethod(func.get_name): - optimizers[func.get_name()] = func - else: - func_instance = func() - optimizers[func_instance.get_name()] = func_instance - - except ImportError as e: - print(f'WARN: Unable to import optimizer(s) from {module}: {e}') - continue - - return optimizers - - -def extract_optimizers_from_object(clazz): - optimizers = {} - optimizer_list = [ - func for func in dir(clazz) if callable(getattr(clazz, func)) and hasattr(getattr(clazz, func), '_condition') - ] - for opt_name in optimizer_list: - func = getattr(clazz, opt_name) - if func._condition is None: - opt = ModelOptimizerPass(name=opt_name, transform=func) - elif inspect.isclass(func._condition): - opt = LayerOptimizerPass(name=opt_name, layer_class=func._condition, transform=func) - else: - opt = WrappedOptimizerPass(name=opt_name, condition=func._condition, transform=func) - optimizers[opt_name] = opt - - return optimizers - - -# Optimizer registry - -optimizer_map = {} - - -def _get_backend_name_prefix(name, backend): - if backend is not None and not name.startswith(backend.lower() + ':'): - name = backend.lower() + ':' + name - - return name - - -def register_pass(name, opt_cls, backend=None): - """Register a new optimizer pass. - - Args: - name (str): Name of the optimizer - opt_cls (class): The class of the optimizer. - backend (str, optional): Optional backend to register the optimizer to. If not None, the name of the backend - will be appended to the name of the registered flow. Defaults to None. - - Raises: - Exception: If the optimization pass has already been registered with the given name. - - Returns: - str: The name of the registered optimizer. - """ - name = _get_backend_name_prefix(name, backend) - - if name in optimizer_map: - raise Exception(f'Optimization pass {name} already registered') - - if inspect.isclass(opt_cls): - opt = opt_cls() - else: - opt = opt_cls - - optimizer_map[name] = opt - - return name - - -def get_optimizer(name): - """Return the optimizer instance registered with the given name. - - Args: - name (str): Name of the optimizer in the registry. - - Raises: - Exception: If the optimizer with the given name is not found in the registry. - - Returns: - OptimizerPass: The optimizer from the registry. - """ - if name in optimizer_map: - return optimizer_map[name] - else: - raise Exception(f'Unknown optimizer: {name}') - - -def get_backend_passes(backend): - """Returns the list of optimizer passes belonging to a backend - - Args: - backend (str): Name of the backend. - - Returns: - list: List of optimizer names registered with the given backend. - """ - return [opt for opt in optimizer_map.keys() if opt.startswith(backend.lower() + ':')] - - -def get_available_passes(): - """Return the list of all registered optimizer passes. - - Returns: - list: List of registered passes. - """ - return list(optimizer_map.keys()) - - -def optimize_model(model, passes): - """Optimize a given model with the given passes. - - The passes are attempted until all passes no longer match or no changes to the model graph occur. - - Args: - model (ModelGraph): The model to optimize. - passes (list): List of passes to apply. - - Returns: - set: The set of applied passes (the passes that matched the predicate). - """ - optimizers = {opt_pass: get_optimizer(opt_pass) for opt_pass in passes} - applied_passes = set() - optimization_done = False - while not optimization_done: - for opt_name, opt in optimizers.items(): - if isinstance(opt, ModelOptimizerPass) and opt_name not in applied_passes: - res = opt.transform(model) - if res: - applied_passes.add(opt_name) - continue - for node in model.graph.values(): - if opt.match(node): - res = opt.transform(model, node) - applied_passes.add(opt_name) - if res: - break - else: - continue - break - else: - optimization_done = True - - return applied_passes diff --git a/hls4ml/hls4ml/model/optimizer/passes/__init__.py b/hls4ml/hls4ml/model/optimizer/passes/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/hls4ml/hls4ml/model/optimizer/passes/bn_fuse.py b/hls4ml/hls4ml/model/optimizer/passes/bn_fuse.py deleted file mode 100644 index 02d9b84..0000000 --- a/hls4ml/hls4ml/model/optimizer/passes/bn_fuse.py +++ /dev/null @@ -1,38 +0,0 @@ -from hls4ml.model.layers import BatchNormalization, Conv1D, Conv2D, Dense -from hls4ml.model.optimizer import OptimizerPass - - -class FuseBatchNormalization(OptimizerPass): - def match(self, node): - is_match = ( - isinstance(node, BatchNormalization) - and isinstance(node.get_input_node(), (Dense, Conv1D, Conv2D)) - and node.get_input_node().get_attr('weight_quantizer') is None - and node.get_input_node().get_attr('bias_quantizer') is None - ) - return is_match - - def transform(self, model, node): - # Fuse weight and bias of Dense/Conv1D/Conv2D layer with BN values - parent_node = node.get_input_node() - parent_map = parent_node.get_output_use_map() - node_map = node.get_output_use_map() - if len(parent_map[parent_node.name]) > 1 or len(node_map[node.name]) > 1: - return False - - parent_weight = parent_node.weights['weight'] - parent_bias = parent_node.weights['bias'] - - bn_scale = node.weights['scale'] - bn_bias = node.weights['bias'] - - fused_weight = bn_scale.data * parent_weight.data - fused_bias = bn_scale.data * parent_bias.data + bn_bias.data - - model.remove_node(node, rewire=True) - parent_weight.data = fused_weight - parent_bias.data = fused_bias - if not parent_node.get_attr('use_bias', True): - parent_bias.update_precision(bn_bias.type.precision) - - return True diff --git a/hls4ml/hls4ml/model/optimizer/passes/convert_to_channels_last.py b/hls4ml/hls4ml/model/optimizer/passes/convert_to_channels_last.py deleted file mode 100644 index 9c19711..0000000 --- a/hls4ml/hls4ml/model/optimizer/passes/convert_to_channels_last.py +++ /dev/null @@ -1,135 +0,0 @@ -# Conversion of model from channels_first to channels_last data format -# Based on https://github.com/fastmachinelearning/qonnx/blob/ -# 12c96a3ded06beacab08e0f554e4ed014476c0aa/src/qonnx/transformation/channels_last.py - -from hls4ml.model.layers import Concatenate, Input, Reshape -from hls4ml.model.optimizer import OptimizerPass - - -class ChannelsLastConverter(OptimizerPass): - '''Converts a model from channels_first to channels_last data format by transposing the weights of relevant layers - and adding a transpose layer for the inputs and outputs, if necessary''' - - def match(self, node): - if not hasattr(node, 'channels_last_converted'): - return True - - def transform(self, model, node): - # If this parameter has not been set, this model does not need to be converted - if 'InputsChannelLast' not in model.config.config['HLSConfig']['Model']: - node.channels_last_converted = True - return False - outshape = node.get_output_variable().shape - - if isinstance(node, Input): - # if inputs are not yet transposed into channels_last, add transpose layer - if not model.config.config['HLSConfig']['Model']['InputsChannelLast'] and len(outshape) > 1: - # Add transpose for input layer - input = node.name - if len(outshape) == 2: - attributes = {'perm': [1, 0]} - else: - attributes = {'perm': [1, 2, 0]} - - transpose_node = model.make_node( - 'Transpose', f'transpose_input_for_{node.get_attr("name")}', attributes, [input] - ) - transpose_node.set_attr('name', f'transpose_input_for_{node.get_attr("name")}') - transpose_node.channels_last_converted = True - - model.insert_node(transpose_node) - else: - input_shape = node.get_output_variable().shape - input_shape.append(input_shape.pop(0)) - node.get_output_variable().shape = input_shape - dim_names = [f'N_INPUT_{i}_{node.index}' for i in range(1, len(input_shape) + 1)] - node.get_output_variable().dim_names = dim_names - else: - # Transpose weight tensors - tensors = ['weight', 'depthwise', 'pointwise', 'zero_bias', 'scale', 'recurrent_weight'] - for tensor in tensors: - try: - if len(node.get_weights(tensor).shape) == 2: - weights_channels_last = node.get_weights(tensor).data.transpose() - node.get_weights(tensor).data = weights_channels_last - elif len(node.get_weights(tensor).shape) == 3: - weights_channels_last = node.get_weights(tensor).data.transpose([2, 1, 0]) - node.get_weights(tensor).data = weights_channels_last - elif len(node.get_weights(tensor).shape) == 4: - weights_channels_last = node.get_weights(tensor).data.transpose([2, 3, 1, 0]) - node.get_weights(tensor).data = weights_channels_last - except KeyError: - pass - try: - node.set_attr('data_format', 'channels_last') - except AttributeError: - pass - - # Adjust axis of operation - if isinstance(node, Concatenate): - old_axis = node.get_attr('axis') - if len(outshape) == 2: - if old_axis == -1 or old_axis == 2: - node.set_attr('axis', 1) - else: - node.set_attr('axis', 2) - elif len(outshape) == 3: - if old_axis == 3 or old_axis == -1: - node.set_attr('axis', 1) - elif old_axis == 2 or old_axis == -2: - node.set_attr('axis', 2) # Not required, but left for clarity - else: - node.set_attr('axis', 3) - - # Adjust output shape - outdims = node.get_output_variable().dim_names - if len(outshape) == 2: - shape = [outshape[1], outshape[0]] - dims = [outdims[1], outdims[0]] - node.add_output_variable(shape, dims) - elif len(outshape) == 3: - shape = [outshape[1], outshape[2], outshape[0]] - dims = [outdims[1], outdims[2], outdims[0]] - node.add_output_variable(shape, dims) - - # Have to transpose back before flattening to get correct order of elements in the flattened tensor - if isinstance(node, Reshape) and len(node.attributes['target_shape']) == 1: - previous_node = node.get_input_node(node.inputs[0]) - input = previous_node.name - outshape = previous_node.get_output_variable().shape - - if len(outshape) == 2: - attributes = {'perm': [1, 0]} - else: - attributes = {'perm': [2, 0, 1]} - - transpose_node = model.make_node( - 'Transpose', f'transpose_input_for_{node.get_attr("name")}', attributes, [input] - ) - transpose_node.channels_last_converted = True - - model.insert_node(transpose_node) - - # Add transpose for output layer - elif ( - node.get_attr('name') in model.outputs - and len(outshape) > 1 - and model.config.config['HLSConfig']['Model']['TransposeOutputs'] - ): - input = node.name - outshape = node.get_output_variable().shape - - if len(outshape) == 2: - attributes = {'perm': [1, 0]} - else: - attributes = {'perm': [2, 0, 1]} - - transpose_node = model.make_node( - 'Transpose', f'transpose_ouput_for_{node.get_attr("name")}', attributes, [input] - ) - transpose_node.channels_last_converted = True - - model.insert_node(transpose_node) - - node.channels_last_converted = True - return True diff --git a/hls4ml/hls4ml/model/optimizer/passes/expand_layer_group.py b/hls4ml/hls4ml/model/optimizer/passes/expand_layer_group.py deleted file mode 100644 index 97bfc72..0000000 --- a/hls4ml/hls4ml/model/optimizer/passes/expand_layer_group.py +++ /dev/null @@ -1,46 +0,0 @@ -from hls4ml.model.layers import Input, LayerGroup -from hls4ml.model.optimizer import OptimizerPass - - -class ExpandLayerGroup(OptimizerPass): - '''Expands LayerGroup (a nested model) into the parent model.''' - - def match(self, node): - return isinstance(node, LayerGroup) - - def transform(self, model, node): - layer_list = node.get_attr('layer_list') - - # We'll keep track of inserted Input nodes to remove later - inserted_input_nodes = [] - - for i, layer in enumerate(layer_list): - kind = layer['class_name'] - name = layer['name'] - inputs = layer.get('inputs', []) - outputs = layer.get('outputs', []) - - if name in model.graph.keys(): - raise Exception(f'Layer names must be unique: "{name}" already found in the model graph.') - - if len(inputs) == 0: - if kind in ['InputLayer', 'Input']: - inputs = node.inputs.copy() - else: - inputs = model.graph[layer_list[i - 1]['name']].outputs.copy() - if len(outputs) == 0: - outputs = [name] - - new_node = model.make_node(kind, name, layer, inputs, outputs) - model.insert_node(new_node) - if isinstance(new_node, Input): - inserted_input_nodes.append(new_node) - - rewire = not node.outputs[0] in model.outputs - - model.remove_node(node, rewire) - - for input_node in inserted_input_nodes: - model.remove_node(input_node, rewire=True) - - return True diff --git a/hls4ml/hls4ml/model/optimizer/passes/fuse_biasadd.py b/hls4ml/hls4ml/model/optimizer/passes/fuse_biasadd.py deleted file mode 100644 index eda4d08..0000000 --- a/hls4ml/hls4ml/model/optimizer/passes/fuse_biasadd.py +++ /dev/null @@ -1,18 +0,0 @@ -from hls4ml.model.layers import BiasAdd, Conv1D, Conv2D, Dense -from hls4ml.model.optimizer import OptimizerPass - - -class FuseBiasAdd(OptimizerPass): - '''Fuses BiasAdd into Dense/Conv2D layer (common in TF models).''' - - def match(self, node): - return isinstance(node, BiasAdd) and isinstance(node.get_input_node(), (Dense, Conv1D, Conv2D)) - - def transform(self, model, node): - # Fuse BiasAdd into Dense layer - dense_layer = node.get_input_node() - dense_layer.get_weights('bias').data = node.get_weights('bias').data - - model.remove_node(node, rewire=True) - - return True diff --git a/hls4ml/hls4ml/model/optimizer/passes/multi_dense.py b/hls4ml/hls4ml/model/optimizer/passes/multi_dense.py deleted file mode 100644 index 2b303ea..0000000 --- a/hls4ml/hls4ml/model/optimizer/passes/multi_dense.py +++ /dev/null @@ -1,68 +0,0 @@ -import numpy as np - -from hls4ml.model.layers import Dense -from hls4ml.model.optimizer import OptimizerPass - - -class ReplaceMultidimensionalDenseWithConv(OptimizerPass): - def match(self, node): - return ( - isinstance(node, Dense) - and len(node.get_input_variable().shape) - sum(d == 1 for d in node.get_input_variable().shape) > 1 - ) - # The above sum checks for the number of dimensions in the Dense with size 1 - # The subtraction allows the check to only count the number of dimensions with non-1 size - # For example, this prevents matching for a Dense layer with shape (1,N) - - def transform(self, model, node): - dim = len(node.get_input_variable().shape) - 1 - input_shape = node.get_input_variable().shape - - pointwise_attrs = { - 'data_format': 'channels_last', - 'padding': 'valid', - 'n_chan': input_shape[-1], - 'n_filt': node.get_attr('n_out'), - 'weight_data': node.get_attr('weight_data'), - 'bias_data': node.get_attr('bias_data'), - } - - if dim == 1: - pointwise_attrs.update( - { - 'in_width': input_shape[0], - 'out_width': input_shape[0], - 'filt_width': 1, - 'stride_width': 1, - 'pad_left': 0, - 'pad_right': 0, - } - ) - elif dim == 2: - pointwise_attrs.update( - { - 'in_height': input_shape[0], - 'in_width': input_shape[1], - 'out_height': input_shape[0], - 'out_width': input_shape[1], - 'filt_height': 1, - 'filt_width': 1, - 'stride_height': 1, - 'stride_width': 1, - 'pad_top': 0, - 'pad_bottom': 0, - 'pad_left': 0, - 'pad_right': 0, - } - ) - else: - raise Exception('Cannot replace Dense over {dim}D tensor with Conv{dim}D.'.format(dim=dim)) - - class_name = 'PointwiseConv' + str(dim) + 'D' - pw_node = model.make_node(class_name, node.name, pointwise_attrs, node.inputs.copy()) - if len(node.weights['weight'].data.shape) == 2: # This can happen if we assign weights of Dense layer to 1x1 Conv2D - pw_node.weights['weight'].data = np.expand_dims(node.weights['weight'].data, axis=tuple(range(dim))) - pw_node.weights['bias'].data = node.weights['bias'].data - model.replace_node(node, pw_node) - - return True diff --git a/hls4ml/hls4ml/model/optimizer/passes/nop.py b/hls4ml/hls4ml/model/optimizer/passes/nop.py deleted file mode 100644 index 55fcf16..0000000 --- a/hls4ml/hls4ml/model/optimizer/passes/nop.py +++ /dev/null @@ -1,14 +0,0 @@ -from hls4ml.model.layers import Activation -from hls4ml.model.optimizer import OptimizerPass - - -class EliminateLinearActivation(OptimizerPass): - def match(self, node): - cast = False - if isinstance(node, Activation): - cast = node.get_input_variable().type.precision != node.get_output_variable().type.precision - return isinstance(node, Activation) and node.get_attr('activation') == 'linear' and not cast - - def transform(self, model, node): - model.remove_node(node) - return True diff --git a/hls4ml/hls4ml/model/optimizer/passes/precision_merge.py b/hls4ml/hls4ml/model/optimizer/passes/precision_merge.py deleted file mode 100644 index 019bfd7..0000000 --- a/hls4ml/hls4ml/model/optimizer/passes/precision_merge.py +++ /dev/null @@ -1,40 +0,0 @@ -from hls4ml.model.optimizer import OptimizerPass -from hls4ml.model.types import FixedPrecisionType - - -def get_concat_type(itype1, itype2): - newwidth = max(itype1.width, itype2.width) - newint = max(itype1.integer, itype2.integer) - if itype1.signed ^ itype2.signed: # XOR - newint += 1 - newwidth += 1 - newrmode = itype1.rounding_mode if itype1.rounding_mode is not None else itype2.rounding_mode - newsmode = itype1.saturation_mode if itype1.saturation_mode is not None else itype2.saturation_mode - newsbits = itype1.saturation_bits if itype1.saturation_bits is not None else itype2.saturation_bits - - newtype = FixedPrecisionType(newwidth, newint, itype1.signed or itype2.signed, newrmode, newsmode, newsbits) - return newtype - - -class SetPrecisionConcat(OptimizerPass): - def match(self, node): - if node.__class__.__name__ == 'Concatenate': - otype = node.get_output_variable().type.precision - itype1 = node.get_input_variable(node.inputs[0]).type.precision - itype2 = node.get_input_variable(node.inputs[1]).type.precision - if isinstance(otype, FixedPrecisionType) and otype != get_concat_type(itype1, itype2): - return True - return False - - def transform(self, model, node): - """ - Set concat output precision - """ - otype = node.get_output_variable().type.precision - itype1 = node.get_input_variable(node.inputs[0]).type.precision - itype2 = node.get_input_variable(node.inputs[1]).type.precision - newtype = get_concat_type(itype1, itype2) - print(f"Found {node.name} in the model, optimizing {otype} to {newtype}...") - node.get_output_variable().type.precision = newtype - - return True diff --git a/hls4ml/hls4ml/model/optimizer/passes/qkeras.py b/hls4ml/hls4ml/model/optimizer/passes/qkeras.py deleted file mode 100644 index cdbb56e..0000000 --- a/hls4ml/hls4ml/model/optimizer/passes/qkeras.py +++ /dev/null @@ -1,276 +0,0 @@ -import numpy as np -import tensorflow as tf - -from hls4ml.model.layers import BatchNormalization, register_layer -from hls4ml.model.optimizer import ConfigurableOptimizerPass, OptimizerPass, register_pass -from hls4ml.model.types import FixedPrecisionType, IntegerPrecisionType, NamedType, QKerasPO2Quantizer - - -class OutputRoundingSaturationMode(ConfigurableOptimizerPass): - ''' - Set the Rounding and Saturation mode of the output (and accumulator, if applicable) - of the layers specific in layer list. - The layer list is empty by default. - To specify which layer to apply this pass to, perform e.g.: - hls4ml.model.optimizer.get_optimizer('output_rounding_saturation_mode').configure(layers=['Dense', 'Activation']) - The Rounding and Saturation modes are 'None' by default (so use the compiler defaults) - To set which mode to use: - hls4ml.model.optimizer.get_optimizer('output_rounding_saturation_mode').configure(rounding_mode='AP_RND_CONV') - hls4ml.model.optimizer.get_optimizer('output_rounding_saturation_mode').configure(saturation_mode='AP_SAT') - ''' - - def __init__(self): - self.layers = [] - self.rounding_mode = None - self.saturation_mode = None - self.saturation_bits = None - - def match(self, node): - layer_match = node.class_name in self.layers or node.name in self.layers - t = str(node.get_output_variable().type.precision) - # check that the type doesn't already contain the rounding mode - rs_match = False - if self.rounding_mode is not None: - rs_match = rs_match or not (self.rounding_mode in t) - if self.saturation_mode is not None: - rs_match = rs_match or not (self.saturation_mode in t) - return layer_match and rs_match - - def transform(self, model, node): - old_precision = node.get_output_variable().type.precision - if isinstance(old_precision, IntegerPrecisionType): - new_precision = IntegerPrecisionType(old_precision.width, old_precision.signed) - elif isinstance(old_precision, FixedPrecisionType): - new_precision = FixedPrecisionType( - old_precision.width, - old_precision.integer, - old_precision.signed, - self.rounding_mode, - self.saturation_mode, - self.saturation_bits, - ) - else: # in case the precision is a string - new_precision = self.precision_string_modify(old_precision) - - out_var = node.get_output_variable() - out_t = NamedType(out_var.type.name, new_precision) - out_var.type = out_t - node.attributes['result_t'] = out_t - - if node.get_attr('accum_t') is not None: - accum_t = NamedType(f'layer{node.index}_accum_t', new_precision) - node.set_attr('accum_t', accum_t) - return False - - def precision_string_modify(self, pstr): - # For when the type is a string not an Type - mode = '' - if self.rounding_mode is not None: - mode += ',' + self.rounding_mode - if self.saturation_mode is not None: - mode += ',' + self.saturation_mode - if self.saturation_bits is not None: - mode += ',' + str(self.saturation_bits) - mode += '>' - pstr = pstr.replace('>', mode) - return pstr - - -class ApplyAlpha(BatchNormalization): - '''A custom layer to scale the output of a QDense layer which used 'alpha != 1' - Inference computation uses BatchNormalization methods''' - - def initialize(self): - inp = self.get_input_variable() - shape = inp.shape - dims = inp.dim_names - self.add_output_variable(shape, dims) - - scale = self.get_attr('scale_data') - scale_quantizer = self.get_attr('scale_quantizer') - bias = self.get_attr('bias_data') - bias_quantizer = self.get_attr('bias_quantizer') - - self.add_weights(scale, quantizer=scale_quantizer) - self.add_bias(bias, quantizer=bias_quantizer) - - def add_weights(self, scale, quantizer=None): - self.add_weights_variable(name='scale', var_name='s{index}', data=scale, quantizer=quantizer) - - def add_bias(self, bias, quantizer=None): - self.add_weights_variable(name='bias', var_name='b{index}', data=bias, quantizer=quantizer) - - -def register_qkeras(): - # Register the layer types to the layer map - register_layer('ApplyAlpha', ApplyAlpha) - - # Register the optimization passes - register_pass('output_rounding_saturation_mode', OutputRoundingSaturationMode) - register_pass('qkeras_factorize_alpha', QKerasFactorizeAlpha) - register_pass('extract_ternary_threshold', ExtractTernaryThreshold) - register_pass('fuse_consecutive_batch_normalization', FuseConsecutiveBatchNormalization) - - -class QKerasFactorizeAlpha(OptimizerPass): - '''OptimizerPass for extracting alpha "scale" from QKeras quantized layer. - The weights of the Q{Dense, Conv} layer are scaled to the common data type, - and an 'ApplyAlpha' layer is inserted to reapply the scale. - ''' - - def match(self, node): - q_layer = node.class_name in ['Dense', 'Conv1D', 'Conv2D', 'Conv2DBatchnorm'] - has_w_quant = node.get_attr('weight_quantizer') is not None - has_b_quant = node.get_attr('bias_quantizer') is not None - has_w_alpha, has_b_alpha = False, False - if has_w_quant: - if hasattr(node.get_attr('weight_quantizer'), 'alpha'): - w_alpha = node.get_attr('weight_quantizer').alpha - has_w_alpha = w_alpha != 1 and w_alpha is not None - else: - has_w_alpha = False - if has_b_quant: - if hasattr(node.get_attr('bias_quantizer'), 'alpha'): - b_alpha = node.get_attr('bias_quantizer').alpha - has_b_alpha = b_alpha != 1 and b_alpha is not None - else: - has_b_alpha = False - is_match = q_layer and ((has_w_quant and has_w_alpha) or (has_b_quant and has_b_alpha)) - return is_match - - def transform(self, model, node): - # The quantizer has to be applied to set the scale attribute - # This must be applied to the _unquantized_ weights to obtain the correct scale - quantizer = node.weights['weight'].quantizer.quantizer_fn # get QKeras quantizer - weights = node.weights['weight'].data_unquantized # get weights - qweights = quantizer(tf.convert_to_tensor(weights)) - if isinstance(quantizer.scale, (int, float)): - scale = np.ones(shape=node.get_output_variable().shape[-1]) * quantizer.scale - else: - scale = quantizer.scale.numpy() - unscale = 1.0 / scale - - new_weights = unscale * qweights # use the quantized weights for safety - - qcfg = quantizer.get_config() - alpha = qcfg['alpha'] - # Set the alpha to 1 to avoid hitting this pass again - qcfg['alpha'] = 1 - node.weights['weight'].quantizer.quantizer_fn = quantizer.from_config(qcfg) - - # update the weights also applying the hls4ml quantizer - # this is only needed for the binary layers which encode -1 as 0 - quantized_new_weights = node.weights['weight'].quantizer(new_weights.numpy()) - node.weights['weight'].data = quantized_new_weights - - # Move the biases from the Dense layer to the ApplyAlpha layer - bias = node.weights['bias'].data - bias_quantizer = None - if hasattr(node.weights['bias'], 'quantizer'): - bias_quantizer = node.weights['bias'].quantizer - node.weights['bias'].data = np.zeros(bias.shape) - - has_w_quant = node.get_attr('weight_quantizer') is not None - has_b_quant = node.get_attr('bias_quantizer') is not None - if has_w_quant: - node.attributes['weight_quantizer'].alpha = 1 - if has_b_quant: - node.attributes['bias_quantizer'].alpha = 1 - - # insert a Batch Normalization layer to apply the alpha scale - if alpha == 'auto_po2': - scale_bits = np.maximum(np.abs(np.log2(scale)).max().astype('int') + 1, 2) - scale_quantizer = QKerasPO2Quantizer({'class_name': 'quantized_po2', 'config': {'bits': scale_bits}}) - else: - scale_quantizer = None - - if 'Dense' in node.class_name: - n_in = node.get_attr('n_out') - elif 'Conv' in node.class_name: - n_in = node.get_attr('out_width') * node.get_attr('out_height', 1) * node.get_attr('n_filt') - else: - n_in = node.get_attr('n_out') - - attrs = { - 'name': node.get_attr('name') + '_alpha', - 'class_name': 'Alpha', - 'inputs': node.outputs, - 'n_in': n_in, - 'n_filt': node.get_attr('n_filt', -1), - 'reuse_factor': node.get_attr('reuse_factor'), - 'scale_data': scale, - 'scale_quantizer': scale_quantizer, - 'bias_data': bias, - 'bias_quantizer': bias_quantizer, - 'trace': node.get_attr('trace', False), - } - alpha_layer = model.make_node(ApplyAlpha, node.name + '_alpha', attrs, node.outputs) - model.insert_node(alpha_layer) - return True - - -class FuseConsecutiveBatchNormalization(OptimizerPass): - '''OptimizerPass to merge consecutive BatchNormalization layers. - These may exist in a model after QKerasFactorizeAlpha layer. - Scale and Bias of each layer are combined into scale and bias of a single layer. - ''' - - def match(self, node): - return isinstance(node, BatchNormalization) and isinstance(node.get_input_node(), BatchNormalization) - - def transform(self, model, node): - bn0 = node.get_input_node() - bn1 = node - bn0_map = bn0.get_output_use_map() - bn1_map = bn1.get_output_use_map() - if len(bn0_map[bn0.name]) > 1 or len(bn1_map[bn1.name]) > 1: - return False - - s0 = bn0.weights['scale'].data - b0 = bn0.weights['bias'].data - s1 = bn1.weights['scale'].data - b1 = bn1.weights['bias'].data - - s2 = s0 * s1 - b2 = s1 * b0 + b1 - - bn0.weights['scale'].data = s2 - bn0.weights['bias'].data = b2 - - model.remove_node(node, rewire=True) - return True - - -class ExtractTernaryThreshold(OptimizerPass): - '''The input value (threshold) at which the output of a a ternary activation - changes is configurable. This pass extracts that threshold point, inserting - a BatchNormalization layer to execute the scaling. That BatchNormalization - layer is then expected to be fused into a BatchNormalizationQuantizedTanh - layer configured with the correct threshold. - ''' - - def match(self, node): - return node.class_name == 'TernaryTanh' and node.get_attr('threshold', None) != 0.5 - - def transform(self, model, node): - shape = node.get_input_variable().shape - scale = np.full(shape, 0.5 / node.get_attr('threshold', 0.5)) - bias = np.zeros_like(scale) - node.set_attr('threshold', 0.5) - - attrs = { - 'name': node.get_attr('name') + '_scale', - 'class_name': 'Alpha', - 'inputs': node.get_input_node().outputs, - 'outputs': node.inputs, - 'n_in': node.get_attr('n_in'), - 'n_filt': node.get_attr('n_filt', -1), - 'reuse_factor': node.get_attr('reuse_factor'), - 'scale_data': scale, - 'bias_data': bias, - 'trace': node.get_attr('trace', False), - } - - layer = model.make_node(ApplyAlpha, node.name + '_scale', attrs, node.inputs.copy()) - model.insert_node(layer, before=node) - return True diff --git a/hls4ml/hls4ml/model/optimizer/passes/stamp.py b/hls4ml/hls4ml/model/optimizer/passes/stamp.py deleted file mode 100644 index f29ae2a..0000000 --- a/hls4ml/hls4ml/model/optimizer/passes/stamp.py +++ /dev/null @@ -1,20 +0,0 @@ -from hls4ml.model.optimizer import ModelOptimizerPass - - -class MakeStamp(ModelOptimizerPass): - def __init__(self): - self.name = 'make_stamp' - - def transform(self, model): - def _make_stamp(): - """Create a unique identifier for the generated code. This identifier is used to - compile a unique library and link it with python.""" - from random import choice - from string import hexdigits - - length = 8 - return ''.join(choice(hexdigits) for m in range(length)) - - model.config.config['Stamp'] = _make_stamp() - - return False # No model graph changes made diff --git a/hls4ml/hls4ml/model/optimizer/passes/transpose_opt.py b/hls4ml/hls4ml/model/optimizer/passes/transpose_opt.py deleted file mode 100644 index a5bff8a..0000000 --- a/hls4ml/hls4ml/model/optimizer/passes/transpose_opt.py +++ /dev/null @@ -1,21 +0,0 @@ -from hls4ml.model.layers import Transpose -from hls4ml.model.optimizer import OptimizerPass - - -class RemoveUselessTranspose(OptimizerPass): - def match(self, node): - is_match = isinstance(node, Transpose) and node.get_attr('perm') == [0] # Useless transpose - return is_match - - def transform(self, model, node): - """ - Remove a transpose layer if it doesn't do anything. i.e 1D input and perm = [0] - """ - print(f"Unnessary {node.name} in the model, optimizing ...") - if not node.get_output_nodes(): - print(f"WARNING: {node.name} is the output layer! No rewiring performed.") - model.remove_node(node, rewire=False) # Don't rewire if there is no output layer - else: - model.remove_node(node, rewire=True) - - return True diff --git a/hls4ml/hls4ml/model/profiling.py b/hls4ml/hls4ml/model/profiling.py deleted file mode 100644 index be76afc..0000000 --- a/hls4ml/hls4ml/model/profiling.py +++ /dev/null @@ -1,684 +0,0 @@ -import json -import os -import shutil -import uuid -from collections import defaultdict - -import matplotlib.pyplot as plt -import numpy as np -import pandas -import seaborn as sb - -from hls4ml.model.graph import ModelGraph -from hls4ml.model.layers import GRU, LSTM - -try: - import qkeras - from tensorflow import keras - - __tf_profiling_enabled__ = True -except ImportError: - __tf_profiling_enabled__ = False - -try: - import torch - - __torch_profiling_enabled__ = True -except ImportError: - __torch_profiling_enabled__ = False - - -def get_unoptimized_hlsmodel(model): - from hls4ml.converters import convert_from_config - - new_config = model.config.config.copy() - new_config['HLSConfig'] = json.loads(json.dumps(new_config['HLSConfig'])) - - new_output_dir = uuid.uuid4().hex - - while os.path.exists(new_output_dir): - new_output_dir = uuid.uuid4().hex - - if 'SkipOptimizers' in new_config['HLSConfig']: - del new_config['HLSConfig']['SkipOptimizers'] - - new_config['HLSConfig']['Optimizers'] = [] - new_config['OutputDir'] = new_output_dir - - return convert_from_config(new_config), new_output_dir - - -def array_to_summary(x, fmt='boxplot'): - if fmt == 'boxplot': - y = {'med': np.median(x), 'q1': np.percentile(x, 25), 'q3': np.percentile(x, 75), 'whislo': min(x), 'whishi': max(x)} - elif fmt == 'histogram': - # Power of 2 bins covering data range - high = np.ceil(np.log2(max(x))) + 1 - low = np.floor(np.log2(min(x))) - 1 - bits = np.arange(low, high, 1) - bins = 2**bits - h, b = np.histogram(x, bins=bins) - h = h * 1.0 / float(sum(h)) # normalize - y = {'h': h, 'b': np.log2(b)} - return y - - -def boxplot(data, fmt='longform'): - if fmt == 'longform': - f = plt.figure() # figsize=(3, 3)) - hue = 'layer' if 'layer' in data.keys() else None - vp = sb.boxplot(x='x', y='weight', hue=hue, data=data[data['x'] > 0], showfliers=False) - vp.set_yticklabels(vp.get_yticklabels(), rotation=45, ha='right') - if hue is not None: - vp.get_legend().remove() - vp.set_xscale('log', base=2) - return f - elif fmt == 'summary': - from matplotlib.patches import Rectangle - - medianprops = dict(linestyle='-', color='k') - f, ax = plt.subplots(1, 1) - data.reverse() - colors = sb.color_palette("Blues", len(data)) - bp = ax.bxp(data, showfliers=False, vert=False, medianprops=medianprops) - # add colored boxes - for line, color in zip(bp['boxes'], colors): - x = line.get_xdata() - xl, xh = min(x), max(x) - y = line.get_ydata() - yl, yh = min(y), max(y) - rect = Rectangle((xl, yl), (xh - xl), (yh - yl), fill=True, color=color) - ax.add_patch(rect) - ax.set_yticklabels([d['weight'] for d in data]) - ax.set_xscale('log', base=2) - plt.xlabel('x') - return f - else: - return None - - -def histogram(data, fmt='longform'): - f = plt.figure() - from matplotlib.ticker import MaxNLocator - - n = len(data) if fmt == 'summary' else len(data['weight'].unique()) - colors = sb.color_palette("husl", n) - if fmt == 'longform': - for i, weight in enumerate(data['weight'].unique()): - y = array_to_summary(data[data['weight'] == weight]['x'], fmt='histogram') - plt.bar(y['b'][:-1], y['h'], width=1, fill=False, label=weight, edgecolor=colors[i]) - elif fmt == 'summary': - for i, weight in enumerate(data): - plt.bar(weight['b'][:-1], weight['h'], width=1, fill=False, label=weight['weight'], edgecolor=colors[i]) - - plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True)) - plt.xlabel('log2(x)') - plt.ylabel('frequency') - plt.legend() - return f - - -plots = {'boxplot': boxplot, 'histogram': histogram} - - -def types_boxplot(data, fmt='longform'): - from matplotlib.patches import PathPatch, Rectangle - - ax = plt.gca() - _ = plt.gcf() - # Scale the data - data['low'] = 2.0 ** data['low'] - data['high'] = 2.0 ** data['high'] - - # Plot the custom precisions - ticks = np.array([tick.get_text() for tick in plt.yticks()[1]]) - # Get the coordinates of the boxes to place the markers - if fmt == 'longform': - # seaborn adjusts the box positions slightly in groups - boxes = [c.get_extents().inverse_transformed(ax.transData) for c in ax.get_children() if isinstance(c, PathPatch)] - ys = [(box.y0 + box.y1) / 2 for box in boxes] - ys = [(y, y) for y in ys] - elif fmt == 'summary': - ys = [(y, y) for y in plt.yticks()[0]] - for _irow, row in data[data['layer'] != 'model'].iterrows(): - if row['layer'] in ticks: - iy = np.argwhere(ticks == row['layer'])[0][0] # Determine which layer in the plot - rectangle = Rectangle( - (row['low'], ys[iy][0] - 0.4), row['high'] - row['low'], 0.8, fill=True, color='grey', alpha=0.2 - ) - ax.add_patch(rectangle) - - -def types_histogram(data, fmt='longform'): - ax = plt.gca() - layers = np.array(ax.get_legend_handles_labels()[1]) - colors = sb.color_palette("husl", len(layers)) - ylim = ax.get_ylim() - for _irow, row in data[data['layer'] != 'model'].iterrows(): - if row['layer'] in layers: - col = colors[np.argwhere(layers == row['layer'])[0][0]] - plt.plot((row['low'], row['low']), ylim, '--', color=col) - plt.plot((row['high'], row['high']), ylim, '--', color=col) - - -types_plots = {'boxplot': types_boxplot, 'histogram': types_histogram} - - -def ap_fixed_WIFS(dtype): - from hls4ml.backends import VivadoBackend - - dtype = VivadoBackend.convert_precision_string(dtype) - W, I, F, S = dtype.width, dtype.integer, dtype.fractional, dtype.signed - return W, I, F, S - - -def types_hlsmodel(model): - data = {'layer': [], 'low': [], 'high': []} - # Plot the default precision - default_precision = model.config.model_precision['default'] - W, I, F, S = ap_fixed_WIFS(default_precision) - data['layer'].append('model') - data['low'].append(-F) - data['high'].append(I - 1 if S else I) - - for layer in model.get_layers(): - if isinstance(layer, GRU) or isinstance(layer, LSTM): - suffix = ['w', 'rw', 'b', 'rb'] - else: - suffix = ['w', 'b'] - for iw, weight in enumerate(layer.get_weights()): - wname = f'{layer.name}/{suffix[iw]}' - T = weight.type - if T.name != 'model': - W, I, F, S = ap_fixed_WIFS(T.precision) - data['layer'].append(wname) - data['low'].append(-F) - data['high'].append(I - 1 if S else I) - data = pandas.DataFrame(data) - return data - - -def activation_types_hlsmodel(model): - data = {'layer': [], 'low': [], 'high': []} - # Get the default precision - default_precision = model.config.model_precision['default'] - W, I, F, S = ap_fixed_WIFS(default_precision) - data['layer'].append('model') - data['low'].append(-F) - data['high'].append(I - 1 if S else I) - for layer in model.get_layers(): - T = layer.get_output_variable().type.precision - W, I, F, S = ap_fixed_WIFS(T) - data['layer'].append(layer.name) - data['low'].append(-F) - data['high'].append(I - 1 if S else I) - data = pandas.DataFrame(data) - return data - - -def weights_hlsmodel(model, fmt='longform', plot='boxplot'): - if fmt == 'longform': - data = {'x': [], 'layer': [], 'weight': []} - elif fmt == 'summary': - data = [] - - for layer in model.get_layers(): - if isinstance(layer, GRU) or isinstance(layer, LSTM): - suffix = ['w', 'rw', 'b', 'rb'] - else: - suffix = ['w', 'b'] - name = layer.name - for iw, weight in enumerate(layer.get_weights()): - label = f'{name}/{suffix[iw]}' - w = weight.data.flatten() - w = abs(w[w != 0]) - n = len(w) - if n == 0: - print(f'Weights for {name} are only zeros, ignoring.') - break - if fmt == 'longform': - data['x'].extend(w.tolist()) - data['layer'].extend([name] * len(w)) - data['weight'].extend([label] * len(w)) - elif fmt == 'summary': - data.append(array_to_summary(w, fmt=plot)) - data[-1]['layer'] = name - data[-1]['weight'] = label - - if fmt == 'longform': - data = pandas.DataFrame(data) - return data - - -def _keras_batchnorm(layer): - weights = layer.get_weights() - epsilon = layer.epsilon - - gamma = weights[0] - beta = weights[1] - mean = weights[2] - var = weights[3] - - scale = gamma / np.sqrt(var + epsilon) - bias = beta - gamma * mean / np.sqrt(var + epsilon) - - return [scale, bias], ['s', 'b'] - - -def _keras_layer(layer): - return layer.get_weights(), ['w', 'b'] - - -keras_process_layer_map = defaultdict( - lambda: _keras_layer, {'BatchNormalization': _keras_batchnorm, 'QBatchNormalization': _keras_batchnorm} -) - - -def activations_hlsmodel(model, X, fmt='summary', plot='boxplot'): - if fmt == 'longform': - raise NotImplementedError - elif fmt == 'summary': - data = [] - - _, trace = model.trace(np.ascontiguousarray(X)) - - if len(trace) == 0: - raise RuntimeError("ModelGraph must have tracing on for at least 1 layer (this can be set in its config)") - - for layer in trace.keys(): - print(f" {layer}") - - if fmt == 'summary': - y = trace[layer].flatten() - y = abs(y[y != 0]) - - if len(y) == 0: - print(f'Activations for {layer} are only zeros, ignoring.') - continue - - data.append(array_to_summary(y, fmt=plot)) - data[-1]['weight'] = layer - - return data - - -def weights_keras(model, fmt='longform', plot='boxplot'): - if fmt == 'longform': - data = {'x': [], 'layer': [], 'weight': []} - elif fmt == 'summary': - data = [] - for layer in model.layers: - name = layer.name - weights, suffix = keras_process_layer_map[type(layer).__name__](layer) - - for i, w in enumerate(weights): - label = f'{name}/{suffix[i]}' - w = w.flatten() - w = abs(w[w != 0]) - n = len(w) - if n == 0: - print(f'Weights for {name} are only zeros, ignoring.') - break - if fmt == 'longform': - data['x'].extend(w.tolist()) - data['layer'].extend([name] * n) - data['weight'].extend([label] * n) - elif fmt == 'summary': - data.append(array_to_summary(w, fmt=plot)) - data[-1]['layer'] = name - data[-1]['weight'] = label - - if fmt == 'longform': - data = pandas.DataFrame(data) - return data - - -def activations_keras(model, X, fmt='longform', plot='boxplot'): - # test layer by layer on data - if fmt == 'longform': - # return long form pandas dataframe for - # seaborn boxplot - data = {'x': [], 'weight': []} - elif fmt == 'summary': - # return summary statistics for matplotlib.axes.Axes.bxp - # or histogram bin edges and heights - data = [] - outputs = _get_outputs( - [layer for layer in model.layers if not isinstance(layer, keras.layers.InputLayer)], X, model.input - ) - for layer_name, y in outputs.items(): - print(f" {layer_name}") - y = y.flatten() - y = abs(y[y != 0]) - if len(y) == 0: - print(f'Activations for {layer_name} are only zeros, ignoring.') - continue - if fmt == 'longform': - data['x'].extend(y.tolist()) - data['weight'].extend([layer_name for i in range(len(y))]) - elif fmt == 'summary': - data.append(array_to_summary(y, fmt=plot)) - data[-1]['weight'] = layer_name - - if fmt == 'longform': - data = pandas.DataFrame(data) - return data - - -def weights_torch(model, fmt='longform', plot='boxplot'): - suffix = ['w', 'b'] - if fmt == 'longform': - data = {'x': [], 'layer': [], 'weight': []} - elif fmt == 'summary': - data = [] - for layer in model.children(): - if isinstance(layer, torch.nn.Linear): - name = layer.__class__.__name__ - weights = list(layer.parameters()) - for i, w in enumerate(weights): - label = f'{name}/{suffix[i]}' - w = weights[i].detach().numpy() - w = w.flatten() - w = abs(w[w != 0]) - n = len(w) - if n == 0: - print(f'Weights for {name} are only zeros, ignoring.') - break - if fmt == 'longform': - data['x'].extend(w.tolist()) - data['layer'].extend([name] * n) - data['weight'].extend([label] * n) - elif fmt == 'summary': - data.append(array_to_summary(w, fmt=plot)) - data[-1]['layer'] = name - data[-1]['weight'] = label - - if fmt == 'longform': - data = pandas.DataFrame(data) - return data - - -def activations_torch(model, X, fmt='longform', plot='boxplot'): - X = torch.Tensor(X) - if fmt == 'longform': - data = {'x': [], 'weight': []} - elif fmt == 'summary': - data = [] - - partial_model = torch.nn.Sequential - layers = [] - for layer in model.children(): - lname = layer.__class__.__name__ - layers.append(layer) - pm = partial_model(*layers) - print(f" {lname}") - y = pm(X).flatten().detach().numpy() - y = abs(y[y != 0]) - if len(y) == 0: - print(f'Activations for {lname} are only zeros, ignoring.') - continue - if fmt == 'longform': - data['x'].extend(y.tolist()) - data['weight'].extend([lname for _ in range(len(y))]) - elif fmt == 'summary': - data.append(array_to_summary(y, fmt=plot)) - data[-1]['weight'] = lname - - if fmt == 'longform': - data = pandas.DataFrame(data) - return data - - -def numerical(model=None, hls_model=None, X=None, plot='boxplot'): - """Perform numerical profiling of a model. - - Args: - model (optional): Keras of PyTorch model. Defaults to None. - hls_model (ModelGraph, optional): The ModelGraph to profile. Defaults to None. - X (ndarray, optional): Test data on which to evaluate the model to profile activations. - Must be formatted suitably for the ``model.predict(X)``. Defaults to None. - plot (str, optional): The type of plot to produce. Options are: 'boxplot' (default), 'violinplot', 'histogram', - 'FacetGrid'. Defaults to 'boxplot'. - - Returns: - tuple: The quadruple of produced figures. First weights and biases - for the pre- and post-optimization models respectively, - then activations for the pre- and post-optimization models - respectively. (Optimizations are applied to an ModelGraph by hls4ml, - a post-optimization ModelGraph is a final model). - """ - wp, wph, ap, aph = None, None, None, None - - hls_model_present = hls_model is not None and isinstance(hls_model, ModelGraph) - model_present = model is not None - - if hls_model_present: - before = " (before optimization)" - after = " (final / after optimization)" - hls_model_unoptimized, tmp_output_dir = get_unoptimized_hlsmodel(hls_model) - else: - before = "" - after = "" - hls_model_unoptimized, tmp_output_dir = None, None - - print("Profiling weights" + before) - data = None - - if hls_model_present: - data = weights_hlsmodel(hls_model_unoptimized, fmt='summary', plot=plot) - elif model_present: - if __tf_profiling_enabled__ and isinstance(model, keras.Model): - data = weights_keras(model, fmt='summary', plot=plot) - elif __torch_profiling_enabled__ and isinstance(model, torch.nn.Sequential): - data = weights_torch(model, fmt='summary', plot=plot) - - if data is None: - print("Only keras, PyTorch (Sequential) and ModelGraph models " + "can currently be profiled") - - if hls_model_present and os.path.exists(tmp_output_dir): - shutil.rmtree(tmp_output_dir) - - return wp, wph, ap, aph - - wp = plots[plot](data, fmt='summary') # weight plot - - if hls_model_present and plot in types_plots: - t_data = types_hlsmodel(hls_model_unoptimized) - types_plots[plot](t_data, fmt='summary') - - plt.title("Distribution of (non-zero) weights" + before) - plt.tight_layout() - - if hls_model_present: - print("Profiling weights" + after) - - data = weights_hlsmodel(hls_model, fmt='summary', plot=plot) - wph = plots[plot](data, fmt='summary') # weight plot - - if plot in types_plots: - t_data = types_hlsmodel(hls_model) - types_plots[plot](t_data, fmt='summary') - - plt.title("Distribution of (non-zero) weights" + after) - plt.tight_layout() - - if X is not None: - print("Profiling activations" + before) - data = None - if __tf_profiling_enabled__ and isinstance(model, keras.Model): - data = activations_keras(model, X, fmt='summary', plot=plot) - elif __torch_profiling_enabled__ and isinstance(model, torch.nn.Sequential): - data = activations_torch(model, X, fmt='summary', plot=plot) - - if data is not None: - ap = plots[plot](data, fmt='summary') # activation plot - if hls_model_present and plot in types_plots: - t_data = activation_types_hlsmodel(hls_model_unoptimized) - types_plots[plot](t_data, fmt='summary') - plt.title("Distribution of (non-zero) activations" + before) - plt.tight_layout() - - if hls_model_present: - print("Profiling activations" + after) - data = activations_hlsmodel(hls_model, X, fmt='summary', plot=plot) - aph = plots[plot](data, fmt='summary') - - t_data = activation_types_hlsmodel(hls_model) - types_plots[plot](t_data, fmt='summary') - - plt.title("Distribution of (non-zero) activations (final / after optimization)") - plt.tight_layout() - - if hls_model_present and os.path.exists(tmp_output_dir): - shutil.rmtree(tmp_output_dir) - - return wp, wph, ap, aph - - -######### -# COMPARE OUTPUT IMPLEMENTATION -######### -def _is_ignored_layer(layer): - """Some layers need to be ingored during inference""" - if isinstance(layer, (keras.layers.InputLayer, keras.layers.Dropout)): - return True - return False - - -def _get_outputs(layers, X, model_input): - """Get outputs of intermediate layers""" - partial_models = keras.models.Model(inputs=model_input, outputs=[layer.output for layer in layers]) - y = partial_models.predict(X) - return y - - -def get_ymodel_keras(keras_model, X): - """Calculate each layer's ouput and put them into a dictionary. - - Args: - keras_model (_type_): A keras Model - X (ndarray): Test data on which to evaluate the model to profile activations. - Must be formatted suitably for the ``model.predict(X)``. - - Returns: - dict: A dictionary in the form {"layer_name": ouput array of layer}. - """ - ymodel = {} - traced_layers = [] - layer_names = [] - for layer in keras_model.layers: - if _is_ignored_layer(layer): - continue - # If the layer has activation integrated then separate them - # Note that if the layer is a standalone activation layer then skip this - name = layer.name - if ( - hasattr(layer, "activation") - and layer.activation.__name__ != "linear" - and not isinstance(layer, (keras.layers.Activation, qkeras.qlayers.QActivation)) - ): - tmp_activation = layer.activation - layer.activation = None - ymodel.update({layer.name: _get_outputs([layer], X, keras_model.input)}) - layer.activation = tmp_activation - name = layer.name + f"_{tmp_activation.__name__}" - traced_layers.append(layer) - layer_names.append(name) - outputs = _get_outputs(traced_layers, X, keras_model.input) - for name, output in zip(layer_names, outputs): - ymodel[name] = output - print("Done taking outputs for Keras model.") - return ymodel - - -def _norm_diff(ymodel, ysim): - """Calculate the square root of the sum of the squares of the differences""" - diff = {} - - for key in list(ysim.keys()): - diff[key] = np.linalg.norm(ysim[key] - ymodel[key]) - - # ---Bar Plot--- - f, ax = plt.subplots() - plt.bar(list(diff.keys()), list(diff.values())) - plt.title("layer-by-layer output differences") - ax.set_ylabel('Norm of difference vector') - plt.xticks(rotation=90) - plt.tight_layout() - return f - - -def _dist_diff(ymodel, ysim): - """ - Calculate the normalized distribution of the differences of the elements - of the output vectors. - If difference >= original value then the normalized difference will be set to 1, - meaning "very difference". - If difference < original value then the normalized difference would be difference/original. - """ - - diff = {} - - for key in list(ysim.keys()): - flattened_ysim = ysim[key].flatten() - flattened_ymodel = np.array(ymodel[key]).flatten() - - diff[key] = np.absolute(flattened_ymodel - flattened_ysim) / np.linalg.norm(flattened_ymodel - flattened_ysim) - diff_vector = np.absolute(flattened_ymodel - flattened_ysim) - abs_ymodel = np.absolute(flattened_ymodel) - - normalized_diff = np.zeros(diff_vector.shape) - normalized_diff[(diff_vector >= abs_ymodel) & (abs_ymodel > 0) & (diff_vector > 0)] = 1 - - # Fill out the rest - index = diff_vector < abs_ymodel - normalized_diff[index] = diff_vector[index] / abs_ymodel[index] - - diff[key] = normalized_diff - - # ---Box Plot--- - f, ax = plt.subplots() - pos = np.array(range(len(list(diff.values())))) + 1 - ax.boxplot(list(diff.values()), sym='k+', positions=pos) - - # --formatting - plt.title("Layer-by-layer distribution of output differences") - ax.set_xticklabels(list(diff.keys())) - ax.set_ylabel('Normalized difference') - ax.set_ylabel('Percent difference.') - plt.xticks(rotation=90) - plt.tight_layout() - - return f - - -def compare(keras_model, hls_model, X, plot_type="dist_diff"): - """Compare each layer's output in keras and hls model. Note that the hls_model should not be compiled before using this. - - Args: - keras_model: Original keras model. - hls_model (ModelGraph): Converted ModelGraph, with "Trace:True" in the configuration file. - X (ndarray): Input tensor for the model. - plot_type (str, optional): Different methods to visualize the y_model and y_sim differences. - Possible options include: - - 'norm_diff':: square root of the sum of the squares of the differences between each output vectors. - - 'dist_diff':: The normalized distribution of the differences of the elements between two output vectors. - Defaults to "dist_diff". - - Returns: - matplotlib figure: Plot object of the histogram depicting the difference in each layer's output. - """ - - # Take in output from both models - # Note that each y is a dictionary with structure {"layer_name": flattened ouput array} - ymodel = get_ymodel_keras(keras_model, X) - _, ysim = hls_model.trace(X) - - print("Plotting difference...") - f = plt.figure() - if plot_type == "norm_diff": - f = _norm_diff(ymodel, ysim) - elif plot_type == "dist_diff": - f = _dist_diff(ymodel, ysim) - - return f diff --git a/hls4ml/hls4ml/model/types.py b/hls4ml/hls4ml/model/types.py deleted file mode 100644 index 3c1f4d5..0000000 --- a/hls4ml/hls4ml/model/types.py +++ /dev/null @@ -1,689 +0,0 @@ -""" -This module contains the definitions of classes hls4ml uses to represent data types. The data types are equivalents of -C++/HLS data types. The basic type(``PrecisionType``) is defined as having a specified width in bits (it's 'precision'). -The Precision types are given names for convenience (``NamedType``). Named types are the building blocks of -higher-dimensional tensors, which are defined as arrays or FIFO streams in the generated code. -""" - -from enum import Enum - -import numpy as np -import tensorflow as tf -from qkeras.quantizers import get_quantizer - -# region Quantizer definition - - -class Quantizer: - """ - Base class for representing quantizers in hls4ml. - - Subclasses of ``Quantizer`` are expected to wrap the quantizers of upstream tools (e.g., QKeras). - - Args: - bits (int): Total number of bits used by the quantizer. - hls_type (NamedType): The hls4ml type used by the quantizer. - """ - - def __init__(self, bits, hls_type): - self.bits = bits - self.hls_type = hls_type - - def __call__(self, data): - raise NotImplementedError - - -class BinaryQuantizer(Quantizer): - """Quantizer that quantizes to 0 and 1 (``bits=1``) or -1 and 1 (``bits==2``). - - Args: - bits (int, optional): Number of bits used by the quantizer. Defaults to 2. - - Raises: - Exception: Raised if ``bits>2`` - """ - - def __init__(self, bits=2): - if bits == 1: - hls_type = XnorPrecisionType() - elif bits == 2: - hls_type = IntegerPrecisionType(width=2) - else: - raise Exception(f'BinaryQuantizer suppots 1 or 2 bits, but called with bits={bits}') - super().__init__(bits, hls_type) - - def __call__(self, data): - zeros = np.zeros_like(data) - ones = np.ones_like(data) - quant_data = data - if self.bits == 1: - quant_data = np.where(data > 0, ones, zeros).astype('int') - if self.bits == 2: - quant_data = np.where(data > 0, ones, -ones) - return quant_data - - -class TernaryQuantizer(Quantizer): - """Quantizer that quantizes to -1, 0 and 1.""" - - def __init__(self): - super().__init__(2, IntegerPrecisionType(width=2)) - - def __call__(self, data): - zeros = np.zeros_like(data) - ones = np.ones_like(data) - return np.where(data > 0.5, ones, np.where(data <= -0.5, -ones, zeros)) - - -class QKerasQuantizer(Quantizer): - """Wrapper around QKeras quantizers. - - Args: - config (dict): Config of the QKeras quantizer to wrap. - """ - - def __init__(self, config): - self.quantizer_fn = get_quantizer(config) - self.alpha = config['config'].get('alpha', None) - if config['class_name'] == 'quantized_bits': - self.bits = config['config']['bits'] - self.hls_type = self._get_type(config) - # ! includes stochastic_ternary - elif 'ternary' in config['class_name']: - self.bits = 2 - self.hls_type = IntegerPrecisionType(width=2, signed=True) - # ! includes stochastic_binary - elif 'binary' in config['class_name']: - self.bits = 1 - self.hls_type = XnorPrecisionType() - else: - print("Unsupported quantizer: " + config['class_name']) - self.bits = 16 - self.hls_type = FixedPrecisionType(width=16, integer=6, signed=True) - - def __call__(self, data): - tf_data = tf.convert_to_tensor(data) - return self.quantizer_fn(tf_data).numpy() - # return self.quantizer_fn(data) - - def _get_type(self, quantizer_config): - width = quantizer_config['config']['bits'] - integer = quantizer_config['config'].get('integer', 0) - if quantizer_config['class_name'] == 'quantized_po2': - return ExponentPrecisionType(width=width, signed=True) - if width == integer: - if width == 1: - return XnorPrecisionType() - else: - return IntegerPrecisionType(width=width, signed=True) - else: - return FixedPrecisionType(width=width, integer=integer + 1, signed=True) - - -class QKerasBinaryQuantizer(Quantizer): - """Wrapper around QKeras binary quantizer. - - Args: - config (dict): Config of the QKeras quantizer to wrap. - """ - - def __init__(self, config, xnor=False): - self.bits = 1 if xnor else 2 - self.hls_type = XnorPrecisionType() if xnor else IntegerPrecisionType(width=2, signed=True) - self.alpha = config['config']['alpha'] - # Use the QKeras quantizer to handle any stochastic / alpha stuff - self.quantizer_fn = get_quantizer(config) - # Then we use our BinaryQuantizer to convert to '0,1' format - self.binary_quantizer = BinaryQuantizer(1) if xnor else BinaryQuantizer(2) - - def __call__(self, data): - x = tf.convert_to_tensor(data) - y = self.quantizer_fn(x).numpy() - return self.binary_quantizer(y) - - -class QKerasPO2Quantizer(Quantizer): - """Wrapper around QKeras power-of-2 quantizers. - - Args: - config (dict): Config of the QKeras quantizer to wrap. - """ - - def __init__(self, config): - self.bits = config['config']['bits'] - self.quantizer_fn = get_quantizer(config) - self.hls_type = ExponentPrecisionType(width=self.bits, signed=True) - - def __call__(self, data): - # Weights are quantized to nearest power of two - x = tf.convert_to_tensor(data) - y = self.quantizer_fn(x) - if hasattr(y, 'numpy'): - y = y.numpy() - return y - - -# endregion - -# region Precision types - - -class RoundingMode(Enum): - TRN = 1 - TRN_ZERO = 2 - RND = 3 - RND_ZERO = 4 - RND_INF = 5 - RND_MIN_INF = 6 - RND_CONV = 7 - - def __str__(self): - return self.name - - @classmethod - def from_string(cls, mode): - mode = mode.strip().replace('AP_', '').upper() - mode = mode.strip().replace('AC_', '').upper() - - return cls[mode] - - -class SaturationMode(Enum): - WRAP = 1 - SAT = 2 - SAT_ZERO = 3 - SAT_SYM = 4 - - def __str__(self): - return self.name - - @classmethod - def from_string(cls, mode): - mode = mode.strip().replace('AP_', '').upper() - mode = mode.strip().replace('AC_', '').upper() - - return cls[mode] - - -class PrecisionType: - """ - Base class representing a precision type of specified width. - - Subclasses of this provide concrete implementations of arbitrary precision integer and fixed-point types. - - Args: - width (int): Number of bits used by the precision type. - signed (bool): Signed or unsigned type. - """ - - def __init__(self, width, signed): - self.width = width - self.signed = signed - - def __eq__(self, other): - eq = self.width == other.width - eq = eq and self.signed == other.signed - - -class IntegerPrecisionType(PrecisionType): - """Arbitrary precision integer data type. - - This type is equivalent to ap_(u)int and ac_int HLS types. - - Args: - width (int, optional): Number of bits used. Defaults to 16. - signed (bool, optional): Signed or unsigned type. Defaults to ``True``. - """ - - def __init__(self, width=16, signed=True): - super().__init__(width=width, signed=signed) - self.integer = width - self.fractional = 0 - - def __str__(self): - typestring = '{signed}int<{width}>'.format(signed='u' if not self.signed else '', width=self.width) - return typestring - - def __eq__(self, other): - eq = self.width == other.width - eq = eq and self.signed == other.signed - # These are probably unnecessary - eq = eq and self.integer == other.integer - eq = eq and self.fractional == other.fractional - return eq - - -class FixedPrecisionType(PrecisionType): - """Arbitrary precision fixed-point data type. - - This type is equivalent to ap_(u)fixed and ac_fixed HLS types. - - Args: - width (int, optional): Total number of bits used. Defaults to 16. - integer (int, optional): Number of integer bits left of the decimal point. Defaults to 6. - signed (bool, optional): Signed or unsigned type. Defaults to ``True``. - rounding_mode (RoundingMode, optional): Quantization mode. Defaults to ``None`` (TRN). - saturation_mode (SaturationMode, optional): Overflow mode. Defaults to ``None`` (WRAP). - saturation_bits (int, optional): The number of saturation bits. Defaults to ``None``. - """ - - def __init__(self, width=16, integer=6, signed=True, rounding_mode=None, saturation_mode=None, saturation_bits=None): - super().__init__(width=width, signed=signed) - self.integer = integer - self.fractional = width - integer - self.rounding_mode = rounding_mode - self.saturation_mode = saturation_mode - self.saturation_bits = saturation_bits - - @property - def rounding_mode(self): - return self._rounding_mode - - @rounding_mode.setter - def rounding_mode(self, mode): - if isinstance(mode, str): - self._rounding_mode = RoundingMode.from_string(mode) - else: - self._rounding_mode = mode - - @property - def saturation_mode(self): - return self._saturation_mode - - @saturation_mode.setter - def saturation_mode(self, mode): - if isinstance(mode, str): - self._saturation_mode = SaturationMode.from_string(mode) - else: - self._saturation_mode = mode - - def __str__(self): - args = [self.width, self.integer, self.rounding_mode, self.saturation_mode, self.saturation_bits] - args = ','.join([str(arg) for arg in args if arg is not None]) - typestring = '{signed}fixed<{args}>'.format(signed='u' if not self.signed else '', args=args) - return typestring - - def __eq__(self, other): - eq = self.width == other.width - eq = eq and self.integer == other.integer - eq = eq and self.fractional == other.fractional - eq = eq and self.signed == other.signed - eq = eq and self.rounding_mode == other.rounding_mode - eq = eq and self.saturation_mode == other.saturation_mode - eq = eq and self.saturation_bits == other.saturation_bits - return eq - - -class XnorPrecisionType(PrecisionType): - """ - Convenience class to differentiate 'regular' integers from BNN Xnor ones - """ - - def __init__(self): - super().__init__(width=1, signed=False) - self.integer = 1 - - def __str__(self): - typestring = 'uint<1>' - return typestring - - -class ExponentPrecisionType(PrecisionType): - """ - Convenience class to differentiate 'regular' integers from those which represent exponents, - for QKeras po2 quantizers, for example. - """ - - def __init__(self, width=16, signed=True): - super().__init__(width=width, signed=signed) - - def __str__(self): - typestring = '{signed}int<{width}>'.format(signed='u' if not self.signed else '', width=self.width) - return typestring - - -def find_minimum_width(data, signed=True): - """ - Helper function to find the minimum integer width to express all entries in the data array - without saturation / overflow. - - Args: - data (ndarray): Data array. - signed (bool, optional): Signed or unsigned type. Defaults to ``True.`` - - Returns: - int: Minimum integer width required. - """ - maxdata = np.amax(np.abs(data)) - if maxdata == 0.0: - # fringe case (amax(abs(data)) == 0 -> data is uniformly zero) - return 1 - - log2max = np.log2(maxdata) - - iwidth = max(0, int(np.ceil(log2max))) - if iwidth == int(np.floor(log2max)): # is a power-of-two integer -> need one extra bit - iwidth += 1 - - if signed: - # add the sign bit - iwidth += 1 - - return iwidth - - -# endregion - -# region Data type definitions - - -class NamedType: - """Class representing a named type. - - For convenience, hls4ml gives names to data types used in the generated HLS. This is equivalent to defining types - in C/C++ like:: - - typedef precision name; - - Args: - name (str): Name given to the type (used in generated C++/HLS). - precision (PrecisionType): Precision data type. - """ - - def __init__(self, name, precision, **kwargs): - self.name = name.format(**kwargs) - self.precision = precision - - -class CompressedType(NamedType): - """Class representing a compressed type in COO format. - - Args: - name (str): Name given to the type (used in generated C++/HLS). - precision (PrecisionType): Precision data type. - index_precision (PrecisionType): Precision of the index of COO format. - """ - - def __init__(self, name, precision, index_precision, **kwargs): - if not name.startswith('compressed_'): - name = 'compressed_' + name - super().__init__(name, precision, **kwargs) - self.index_precision = index_precision - - -class ExponentType(NamedType): - """Special type used to mark an exponent type, used by the power-of-2 quantizers. - - Args: - name (str): Name given to the type (used in generated C++/HLS). - precision (PrecisionType): Precision data type. - """ - - def __init__(self, name, precision, **kwargs): - if not name.startswith('exponent_'): - name = 'exponent_' + name - super().__init__(name, precision, **kwargs) - self.sign = XnorPrecisionType() - - -class PackedType(NamedType): - """A type where multiple elements of the tensor are concatenated and stored as a single element, used by the streaming - implementations to store elements of the last dimension of a tensor as a single element. - - The tensor of shape ``(H, W, C)`` will be represented as a FIFO stream having ``H * W / n_pack`` elements where each - element will be a concatenation of ``n_elem * n_pack`` elements of the original tensor. - - Args: - name (str): Name given to the type (used in generated C++/HLS). - precision (PrecisionType): Precision data type. - n_elem (int): Number of packed elements. - n_pack (int): _description_ - """ - - def __init__(self, name, precision, n_elem, n_pack, **kwargs): - super().__init__(name, precision, **kwargs) - self.n_elem = n_elem - if n_pack < 0: - self.n_pack = -n_pack - self.unpack = True - else: - self.n_pack = n_pack - self.unpack = False - - -# endregion - -# region Variables - - -class Variable: - """Base class representing a named multidimensional tensor. - - Args: - var_name (str): Name of the variable in the generated C++/HLS. - atype (NamedType): Data type used by the tensor. - """ - - def __init__(self, var_name, atype, **kwargs): - self.name = var_name.format(**kwargs) - self.type = atype - - -class TensorVariable(Variable): - """Class representing the output of a layer (like an activation tensor). - - Args: - shape (list, tuple): Shape of the tensor. - dim_names (list, tuple): Names given to the dimensions of the tensor. - var_name (str, optional): Name of the variable in the generated C++/HLS. Defaults to ``layer{index}``. - type_name (str, optional): Name of the data type used (in NamedType). Defaults to ``layer{index}_t``. - precision (PrecisionType, optional): Precision data type. Defaults to ``None``. - """ - - def __init__(self, shape, dim_names, var_name='layer{index}', type_name='layer{index}_t', precision=None, **kwargs): - super().__init__(var_name, NamedType(type_name, precision, **kwargs), **kwargs) - self.shape = shape - self.dim_names = dim_names - - def get_shape(self): - return zip(self.dim_names, self.shape) - - def size(self): - nelem = 1 - for dim in self.shape: - nelem *= dim - return nelem - - def size_cpp(self): - # TODO get rid of size_cpp() (and dim_names) - return '*'.join([str(k) for k in self.dim_names]) - - -class InplaceTensorVariable(TensorVariable): - """A ``TensorVariable`` that is just a link to another ``TensorVariable``. - - Args: - tv (TensorVariable): The tensor variable to link. - input_var (_type_): The input variable that should be should link to. - """ - - def __init__(self, tv, input_var): - self.__dict__.update(tv.__dict__) - self.type = input_var.type - self.input_var = input_var - - -class WeightVariable(Variable): - """Class representing a tensor containing the weights of a layer. - - Precision type of the instance can be modified with the ``update_precision`` method. - - Args: - var_name (str, optional): Name of the variable in the generated C++/HLS. - type_name (str, optional): Name of the data type used (in NamedType). - precision (PrecisionType, optional): Precision data type. - data (ndarray): The data array. - quantizer (_type_, optional): Quantizer to apply to the data array. Defaults to ``None``. - """ - - def __init__(self, var_name, type_name, precision, data, quantizer=None, **kwargs): - super().__init__(var_name, NamedType(type_name, precision, **kwargs), **kwargs) - self.data = data - self.nzeros = -1 - self.shape = list(self.data.shape) - self.data_length = np.prod(self.data.shape) - self.nonzeros = np.count_nonzero(self.data) - self.nzeros = self.data_length - self.nonzeros - self.min = np.min(self.data) - self.max = np.max(self.data) - self._iterator = None - self.update_precision(precision) - self.quantizer = quantizer - - def __iter__(self): - self._iterator = np.nditer(self.data, order='C') - return self - - def __next__(self): - if not self._iterator.finished: - value = self._iterator[0] - self._iterator.iternext() - return self.precision_fmt.format(value) - else: - raise StopIteration - - next = __next__ - - def update_precision(self, new_precision): - self.type.precision = new_precision - if isinstance(new_precision, (IntegerPrecisionType, XnorPrecisionType, ExponentPrecisionType)): - self.precision_fmt = '{:.0f}' - elif isinstance(new_precision, FixedPrecisionType): - if new_precision.fractional > 0: - # Use str to represent the float with digits, get the length - # to right of decimal point - lsb = 2**-new_precision.fractional - decimal_spaces = len(str(lsb).split('.')[1]) - self.precision_fmt = f'{{:.{decimal_spaces}f}}' - else: - self.precision_fmt = '{:.0f}' - else: - raise RuntimeError(f"Unexpected new precision type: {new_precision}") - - -class CompressedWeightVariable(WeightVariable): - """Class representing a tensor containing the weights of a layer represented in the COO format. - - Args: - var_name (str, optional): Name of the variable in the generated C++/HLS. - type_name (str, optional): Name of the data type used (in NamedType). - precision (PrecisionType, optional): Precision data type. - data (ndarray): The data array. - reuse_factor (_type_): The reuse factor used to pad the data array. - quantizer (_type_, optional): Quantizer to apply to the data array. Defaults to ``None``. - """ - - def __init__(self, var_name, type_name, precision, data, reuse_factor, quantizer=None, **kwargs): - super().__init__(var_name, type_name, precision, data, quantizer=quantizer, **kwargs) - self.extra_zeros = 0 - self.data_length = np.prod(data.shape) - self.nzeros - while self.data_length % reuse_factor != 0: - self.extra_zeros += 1 - self.data_length += 1 - self.nonzeros = np.prod(data.shape) - self.nzeros + self.extra_zeros - - # Compress the array - weights = [] - extra_nzero_cnt = self.extra_zeros - it = np.nditer(data, order='C', flags=['multi_index']) - max_idx = 0 - while not it.finished: - val = it[0] - if not (val == 0 and extra_nzero_cnt < 1): - if val == 0: - extra_nzero_cnt -= 1 - if it.multi_index[0] > max_idx: - max_idx = it.multi_index[0] - if it.multi_index[1] > max_idx: - max_idx = it.multi_index[1] - weights.append([it.multi_index[1], it.multi_index[0], val]) - it.iternext() - weights.sort() - - index_precision = 32 - if max_idx > 0: - index_precision = int(np.log2(max_idx) + 1) - self.type = CompressedType(type_name, precision, IntegerPrecisionType(width=index_precision, signed=False), **kwargs) - - self.data = weights - - def __iter__(self): - self._iterator = iter(self.data) - return self - - def __next__(self): - value = next(self._iterator) - value_fmt = self.precision_fmt.format(value[2]) - return f'{{{value[1]}, {value[0]}, {value_fmt}}}' - - next = __next__ - - -class ExponentWeightVariable(WeightVariable): - """WeightVariable for Exponent aka power-of-2 data. The data should already by quantized by the quantizer. - - Args: - var_name (str, optional): Name of the variable in the generated C++/HLS. - type_name (str, optional): Name of the data type used (in NamedType). - precision (PrecisionType, optional): Precision data type. - data (ndarray): The data array. - quantizer (_type_, optional): Quantizer to apply to the data array. Defaults to ``None``. - """ - - def __init__(self, var_name, type_name, precision, data, quantizer=None, **kwargs): - super().__init__(var_name, type_name, precision, data, quantizer, **kwargs) - self.type = ExponentType(type_name, precision, **kwargs) - self.shape = list(self.data.shape[:-1]) - - def _format(self): - y = self.data - # Use an XnorBinary-like representation for the sign - sign = np.where(y < 0, np.zeros_like(y), np.ones_like(y)) - # Take the logarithm, since this is what we will write to the header - # for the optimized product using shifts - y = (np.log2(np.abs(y)) / np.log2(2.0)).astype('int') - return np.stack((sign, y), axis=-1) - - def __iter__(self): - data = self._format() - self._iterator = iter(data.reshape((np.product(data.shape[:-1]), 2))) - return self - - def __next__(self): - value = next(self._iterator) - value_fmt = self.precision_fmt.format(value[1]) - return f'{{{value[0]}, {value_fmt}}}' - - next = __next__ - - -# endregion - -# region Custom source - - -class Source: - """Class representing generated source code blocks. - - Args: - code (str): Generated source code. - """ - - def __init__(self, code): - self.code = code - - def __str__(self): - return str(self.code) - - -# endregion diff --git a/hls4ml/hls4ml/report/__init__.py b/hls4ml/hls4ml/report/__init__.py deleted file mode 100644 index b73558f..0000000 --- a/hls4ml/hls4ml/report/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from hls4ml.report.quartus_report import parse_quartus_report # noqa: F401 -from hls4ml.report.quartus_report import read_quartus_report # noqa: F401 -from hls4ml.report.vivado_report import parse_vivado_report # noqa: F401 -from hls4ml.report.vivado_report import print_vivado_report # noqa: F401 -from hls4ml.report.vivado_report import read_vivado_report # noqa: F401 diff --git a/hls4ml/hls4ml/report/quartus_report.py b/hls4ml/hls4ml/report/quartus_report.py deleted file mode 100644 index c337e5d..0000000 --- a/hls4ml/hls4ml/report/quartus_report.py +++ /dev/null @@ -1,258 +0,0 @@ -import os -import webbrowser -from ast import literal_eval - -from calmjs.parse import asttypes, es5 -from tabulate import tabulate - - -def parse_quartus_report(hls_dir, write_to_file=True): - ''' - Parse a report from a given Quartus project as a dictionary. - - Args: - hls_dir (string): The directory where the project is found - write_to_file (bool): A flag indicating whether to write the results to a separate file - - Returns: - results (dict): The report dictionary, containing latency, resource usage etc. - - ''' - if not os.path.exists(hls_dir): - print(f'Path {hls_dir} does not exist. Exiting.') - return - - prj_dir = _find_project_dir(hls_dir) - - rpt_dir = hls_dir + '/' + prj_dir + '/reports' - if not os.path.exists(rpt_dir): - print(f'Project {prj_dir} does not exist. Rerun "hls4ml build -p {hls_dir}".') - return - - results = _find_reports(rpt_dir) - print(results) - if write_to_file: - print("Here") - f = open(hls_dir + '/' 'synthesis-report.txt', 'w') - f.write('HLS Synthesis Latency & Resource Usage Report') - for key in results: - f.write(str(key) + ':' + str(results[key]) + '\n') - print("There") - print(f'Saved latency & resource usage summary to {hls_dir}/synthesis-report.txt') - return results - - -def read_quartus_report(hls_dir, open_browser=False): - ''' - Parse and print the Quartus report to print the report. Optionally open a browser. - - Args: - hls_dir (string): The directory where the project is found - open_browser, optional: whether to open a browser - - Returns: - None - ''' - report = parse_quartus_report(hls_dir) - - print('HLS Resource Summary\n') - print(tabulate(list(report.items())[0:10], tablefmt='orgtbl', headers=['Resource', 'Utilization'])) - print('\n\nHLS Validation Summary\n') - print(tabulate(list(report.items())[11:13], tablefmt='orgtbl', headers=['', '[Min, Max, Avg]'])) - if 'Clock' in report.keys(): - print('\n\nQuartus Synthesis Summary\n') - print(tabulate(list(report.items())[13:], tablefmt='orgtbl', headers=['Resource', 'Utilization'])) - else: - print('Quartus compile data not found! To generate data run FPGA synthesis') - - if open_browser: - url = 'file:' + os.getcwd() + '/' + _find_project_dir(hls_dir) + '/report.html' - webbrowser.open(url) - - -def _find_project_dir(hls_dir): - ''' - Finds the synthesis folder from the HLS project directory - - Args: - hls_dir (string): HLS project location - - Returns: - project_dir (string): Synthesis folder within HLS project directory - ''' - top_func_name = None - - with open(hls_dir + '/build_lib.sh') as f: - for line in f.readlines(): - if 'PROJECT=' in line: - top_func_name = line.split(sep='=')[-1].rstrip() - - return top_func_name + '-fpga.prj' - - -def read_js_object(js_script): - ''' - Reads the JavaScript file and return a dictionary of variables definded in the script. - - Args: - js_script (string) - path to JavaScript File - - Returns: - Dictionary of variables defines in script - ''' - - def visit(node): - if isinstance(node, asttypes.Program): - d = {} - for child in node: - if not isinstance(child, asttypes.VarStatement): - raise ValueError("All statements should be var statements") - key, val = visit(child) - d[key] = val - return d - elif isinstance(node, asttypes.VarStatement): - return visit(node.children()[0]) - elif isinstance(node, asttypes.VarDecl): - return (visit(node.identifier), visit(node.initializer)) - elif isinstance(node, asttypes.Object): - d = {} - for property in node: - key = visit(property.left) - value = visit(property.right) - d[key] = value - return d - elif isinstance(node, asttypes.BinOp): - # simple constant folding - if node.op == '+': - if isinstance(node.left, asttypes.String) and isinstance(node.right, asttypes.String): - return visit(node.left) + visit(node.right) - elif isinstance(node.left, asttypes.Number) and isinstance(node.right, asttypes.Number): - return visit(node.left) + visit(node.right) - else: - raise ValueError("Cannot + on anything other than two literals") - else: - raise ValueError("Cannot do operator '%s'" % node.op) - - elif isinstance(node, asttypes.String) or isinstance(node, asttypes.Number): - return literal_eval(node.value) - elif isinstance(node, asttypes.Array): - return [visit(x) for x in node] - elif isinstance(node, asttypes.Null): - return None - elif isinstance(node, asttypes.Boolean): - if str(node) == "false": - return False - else: - return True - elif isinstance(node, asttypes.Identifier): - return node.value - else: - raise Exception("Unhandled node: %r" % node) - - return visit(es5(js_script)) - - -def _read_quartus_file(filename): - ''' - Reads results (clock frequency, resource usage) obtained through FPGA synthesis (full Quartus compilation) - - Args: - filename (string): Location of Quartus report - - Returns: - results (dict): Resource usage obtained through Quartus Compile - ''' - - with open(filename) as dataFile: - quartus_data = dataFile.read() - quartus_data = read_js_object(quartus_data) - - results = {} - if quartus_data['quartusJSON']['quartusFitClockSummary']['nodes'][0]['clock'] != "TBD": - results['Clock'] = quartus_data['quartusJSON']['quartusFitClockSummary']['nodes'][0]['clock'] - results['Quartus ALM'] = quartus_data['quartusJSON']['quartusFitResourceUsageSummary']['nodes'][-1]['alm'] - results['Quartus REG'] = quartus_data['quartusJSON']['quartusFitResourceUsageSummary']['nodes'][-1]['reg'] - results['Quartus DSP'] = quartus_data['quartusJSON']['quartusFitResourceUsageSummary']['nodes'][-1]['dsp'] - results['Quartus RAM'] = quartus_data['quartusJSON']['quartusFitResourceUsageSummary']['nodes'][-1]['ram'] - results['Quartus MLAB'] = quartus_data['quartusJSON']['quartusFitResourceUsageSummary']['nodes'][-1]['mlab'] - else: - print( - 'Quartus report not found. ' - 'Run Quartus Compilation using Quartus Shell or Full Compilation from Intel Quartus Prime' - ) - return results - - -def _read_hls_file(filename): - ''' - Reads HLS resource estimate obtained through HLS synthesis - - Args: - filename (string): Location of HLS report - - Returns: - results (dict): Resource usage obtained through HLS Estimation - ''' - with open(filename) as dataFile: - report_data = dataFile.read() - report_data = report_data[: report_data.rfind('var fileJSON')] - report_data = read_js_object(report_data) - results = {} - ( - results['HLS Estimate ALUT'], - results['HLS Estimate FF'], - results['HLS Estimate RAM'], - results['HLS Estimate DSP'], - results['HLS Estimate MLAB'], - ) = report_data['areaJSON']['total'] - ( - results['HLS Estimate ALUT (%)'], - results['HLS Estimate FF(%)'], - results['HLS Estimate RAM (%)'], - results['HLS Estimate DSP (%)'], - results['HLS Estimate MLAB (%)'], - ) = report_data['areaJSON']['total_percent'] - return results - - -def _read_verification_file(filename): - ''' - Reads verification data (latency, initiation interval) obtained through simulation - - Args: - filename (string): Location of verification file - - Returns: - results (dict): Verification data obtained from simulation - ''' - results = {} - if os.path.isfile(filename): - with open(filename) as dataFile: - verification_data = dataFile.read() - verification_data = read_js_object(verification_data) - - try: - results['Number of Invoations'] = verification_data['verifJSON']['functions'][0]['data'][0] - - latency = verification_data['verifJSON']['functions'][0]['data'][1].split(",") - results['Latency (MIN)'] = latency[0] - results['Latency (MAX)'] = latency[1] - results['Latency (AVG)'] = latency[2] - - ii = verification_data['verifJSON']['functions'][0]['data'][2].split(",") - results['ii (MIN)'] = ii[0] - results['ii (MAX)'] = ii[1] - results['ii (AVG)'] = ii[2] - except Exception: - print('Verification data not found. Run ./[projectname]-fpga to generate.') - else: - print('Verification file not found. Run ./[projectname]-fpga to generate.') - return results - - -def _find_reports(rpt_dir): - results = {} - results.update(_read_hls_file(rpt_dir + '/lib/report_data.js')) - results.update(_read_verification_file(rpt_dir + '/lib/verification_data.js')) - results.update(_read_quartus_file(rpt_dir + '/lib/quartus_data.js')) - return results diff --git a/hls4ml/hls4ml/report/vivado_report.py b/hls4ml/hls4ml/report/vivado_report.py deleted file mode 100644 index 8beb429..0000000 --- a/hls4ml/hls4ml/report/vivado_report.py +++ /dev/null @@ -1,674 +0,0 @@ -import os -import re -import sys -import xml.etree.ElementTree as ET - - -def read_vivado_report(hls_dir, full_report=False): - if not os.path.exists(hls_dir): - print(f'Path {hls_dir} does not exist. Exiting.') - return - - prj_dir = None - top_func_name = None - - if os.path.isfile(hls_dir + '/project.tcl'): - prj_dir, top_func_name = _parse_project_script(hls_dir) - - if prj_dir is None or top_func_name is None: - print('Unable to read project data. Exiting.') - return - - sln_dir = hls_dir + '/' + prj_dir - if not os.path.exists(sln_dir): - print(f'Project {prj_dir} does not exist. Rerun "hls4ml build -p {hls_dir}".') - return - - solutions = _find_solutions(sln_dir) - print(f'Found {len(solutions)} solution(s) in {sln_dir}.') - - for sln in solutions: - print(f'Reports for solution "{sln}":\n') - _find_reports(sln_dir + '/' + sln, top_func_name, full_report) - - -def _parse_project_script(path): - prj_dir = None - top_func_name = None - - project_path = path + '/project.tcl' - - with open(project_path) as f: - for line in f.readlines(): - if 'set project_name' in line: - top_func_name = line.split('"')[-2] - prj_dir = top_func_name + '_prj' - if 'set backend' in line: - backend_name = line.split('"')[-2] - - if 'accelerator' in backend_name: - top_func_name += '_axi' - - return prj_dir, top_func_name - - -def _find_solutions(sln_dir): - solutions = [] - - if os.path.isfile(sln_dir + '/vivado_hls.app'): - sln_file = 'vivado_hls.app' - elif os.path.isfile(sln_dir + '/hls.app'): - sln_file = 'hls.app' - else: - return solutions - - with open(sln_dir + '/' + sln_file) as f: - # Get rid of namespaces (workaround to support two types of vivado_hls.app files) - xmlstring = re.sub(' xmlns="[^"]+"', '', f.read(), count=1) - - root = ET.fromstring(xmlstring) - for sln_tag in root.findall('solutions/solution'): - sln_name = sln_tag.get('name') - if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name): - solutions.append(sln_name) - - return solutions - - -def _find_reports(sln_dir, top_func_name, full_report=False): - csim_file = sln_dir + f'/csim/report/{top_func_name}_csim.log' - if os.path.isfile(csim_file): - _show_csim_report(csim_file) - else: - print('C simulation report not found.') - - syn_file = sln_dir + f'/syn/report/{top_func_name}_csynth.rpt' - if os.path.isfile(syn_file): - _show_synth_report(syn_file, full_report) - else: - print('Synthesis report not found.') - - cosim_file = sln_dir + f'/sim/report/{top_func_name}_cosim.rpt' - if os.path.isfile(cosim_file): - _show_cosim_report(cosim_file) - else: - print('Co-simulation report not found.') - - -def _show_csim_report(csim_file): - with open(csim_file) as f: - print('C SIMULATION RESULT:') - print(f.read()) - - -def _show_synth_report(synth_file, full_report=False): - with open(synth_file) as f: - print('SYNTHESIS REPORT:') - for line in f.readlines()[2:]: - if not full_report and '* DSP48' in line: - break - print(line, end='') - - -def _show_cosim_report(cosim_file): - with open(cosim_file) as f: - print('CO-SIMULATION RESULT:') - print(f.read()) - - -def _get_abs_and_percentage_values(unparsed_cell): - return int(unparsed_cell.split('(')[0]), float(unparsed_cell.split('(')[1].replace('%', '').replace(')', '')) - - -def parse_vivado_report(hls_dir): - if not os.path.exists(hls_dir): - print(f'Path {hls_dir} does not exist. Exiting.') - return - - prj_dir = None - top_func_name = None - - if os.path.isfile(hls_dir + '/project.tcl'): - prj_dir, top_func_name = _parse_project_script(hls_dir) - - if prj_dir is None or top_func_name is None: - print('Unable to read project data. Exiting.') - return - - sln_dir = hls_dir + '/' + prj_dir - if not os.path.exists(sln_dir): - print(f'Project {prj_dir} does not exist. Rerun "hls4ml build -p {hls_dir}".') - return - - solutions = _find_solutions(sln_dir) - if len(solutions) > 1: - print(f'WARNING: Found {len(solutions)} solution(s) in {sln_dir}. Using the first solution.') - - report = {} - - sim_file = hls_dir + '/tb_data/csim_results.log' - if os.path.isfile(sim_file): - csim_results = [] - with open(sim_file) as f: - for line in f.readlines(): - csim_results.append([r for r in line.split()]) - report['CSimResults'] = csim_results - - sim_file = hls_dir + '/tb_data/rtl_cosim_results.log' - if os.path.isfile(sim_file): - cosim_results = [] - with open(sim_file) as f: - for line in f.readlines(): - cosim_results.append([r for r in line.split()]) - report['CosimResults'] = cosim_results - - syn_file = sln_dir + '/' + solutions[0] + f'/syn/report/{top_func_name}_csynth.xml' - c_synth_report = {} - if os.path.isfile(syn_file): - root = ET.parse(syn_file).getroot() - - # Performance - perf_node = root.find('./PerformanceEstimates') - c_synth_report['TargetClockPeriod'] = root.find('./UserAssignments/TargetClockPeriod').text - c_synth_report['EstimatedClockPeriod'] = perf_node.find('./SummaryOfTimingAnalysis/EstimatedClockPeriod').text - c_synth_report['BestLatency'] = perf_node.find('./SummaryOfOverallLatency/Best-caseLatency').text - c_synth_report['WorstLatency'] = perf_node.find('./SummaryOfOverallLatency/Worst-caseLatency').text - c_synth_report['IntervalMin'] = perf_node.find('./SummaryOfOverallLatency/Interval-min').text - c_synth_report['IntervalMax'] = perf_node.find('./SummaryOfOverallLatency/Interval-max').text - # Area - area_node = root.find('./AreaEstimates') - for child in area_node.find('./Resources'): - # DSPs are called 'DSP48E' in Vivado and just 'DSP' in Vitis. Overriding here to have consistent keys - if child.tag == 'DSP48E': - child.tag = 'DSP' - c_synth_report[child.tag] = child.text - for child in area_node.find('./AvailableResources'): - if child.tag == 'DSP48E': - child.tag = 'DSP' - c_synth_report['Available' + child.tag] = child.text - report['CSynthesisReport'] = c_synth_report - else: - print('CSynthesis report not found.') - - vivado_syn_file = hls_dir + '/vivado_synth.rpt' - if os.path.isfile(vivado_syn_file): - vivado_synth_rpt = {} - with open(vivado_syn_file) as f: - section = 0 - for line in f.readlines(): - match = re.match(r'^(\d)\.', line) - if match: - section = int(match.group(1)) - # Sometimes, phrases such as 'CLB Registers' can show up in the non-tabular sections of the report - if '|' in line: - # CLB (2019.X) vs. Slice (2020.X) - if ('CLB LUTs' in line or 'Slice LUTs' in line) and section == 1: - vivado_synth_rpt['LUT'] = line.split('|')[2].strip() - elif ('CLB Registers' in line or 'Slice Registers' in line) and section == 1: - vivado_synth_rpt['FF'] = line.split('|')[2].strip() - elif 'Block RAM Tile' in line and section == 2: - vivado_synth_rpt['BRAM_18K'] = line.split('|')[2].strip() - elif 'URAM' in line and section == 2: - vivado_synth_rpt['URAM'] = line.split('|')[2].strip() - elif 'DSPs' in line and section == 3: - vivado_synth_rpt['DSP48E'] = line.split('|')[2].strip() - report['VivadoSynthReport'] = vivado_synth_rpt - else: - print('Vivado synthesis report not found.') - - cosim_file = sln_dir + '/' + solutions[0] + f'/sim/report/{top_func_name}_cosim.rpt' - if os.path.isfile(cosim_file): - cosim_report = {} - with open(cosim_file) as f: - for line in f.readlines(): - if re.search('VHDL', line) or re.search('Verilog', line): - result = line[1:].split() # [1:] skips the leading '|' - result = [res[:-1] if res[-1] == '|' else res for res in result] - # RTL, Status, Latency-min, Latency-avg, Latency-max, Interval-min, Interval-avg, Interval-max - if result[1] == 'NA': - continue - else: - cosim_report['RTL'] = result[0] - cosim_report['Status'] = result[1] - cosim_report['LatencyMin'] = result[2] - cosim_report['LatencyMax'] = result[4] - cosim_report['IntervalMin'] = result[5] - cosim_report['IntervalMax'] = result[7] - report['CosimReport'] = cosim_report - else: - print('Cosim report not found.') - - if os.path.isfile(cosim_file): - transaction_file = ( - sln_dir - + '/' - + solutions[0] - + '/sim/' - + report['CosimReport']['RTL'].lower() - + '/' - + top_func_name - + '.performance.result.transaction.xml' - ) - if os.path.isfile(transaction_file): - cosim_transactions = { - 'InitiationInterval': {'max': 0, 'min': sys.maxsize, 'avg': 0.0}, - 'Latency': {'max': 0, 'min': sys.maxsize, 'avg': 0.0}, - } - with open(transaction_file) as f: - i = 1 - for line in f.readlines(): - if re.search('transaction', line): - result = line.split() - # update min - if result[3] != 'x': - cosim_transactions['InitiationInterval']['min'] = ( - int(result[3]) - if int(result[3]) < cosim_transactions['InitiationInterval']['min'] - else cosim_transactions['InitiationInterval']['min'] - ) - cosim_transactions['Latency']['min'] = ( - int(result[2]) - if int(result[2]) < cosim_transactions['Latency']['min'] - else cosim_transactions['Latency']['min'] - ) - # update max - if result[3] != 'x': - cosim_transactions['InitiationInterval']['max'] = ( - int(result[3]) - if int(result[3]) > cosim_transactions['InitiationInterval']['max'] - else cosim_transactions['InitiationInterval']['max'] - ) - cosim_transactions['Latency']['max'] = ( - int(result[2]) - if int(result[2]) > cosim_transactions['Latency']['max'] - else cosim_transactions['Latency']['max'] - ) - # update avg - if result[3] != 'x': - cosim_transactions['InitiationInterval']['avg'] = cosim_transactions['InitiationInterval'][ - 'avg' - ] + float((int(result[3]) - cosim_transactions['InitiationInterval']['avg']) / i) - cosim_transactions['Latency']['avg'] = cosim_transactions['Latency']['avg'] + float( - (int(result[2]) - cosim_transactions['Latency']['avg']) / i - ) - i += 1 - - report['CosimReport']['LatencyMin'] = cosim_transactions['Latency']['min'] - report['CosimReport']['LatencyMax'] = cosim_transactions['Latency']['max'] - report['CosimReport']['LatencyAvg'] = cosim_transactions['Latency']['avg'] - - report['CosimReport']['IntervalMin'] = cosim_transactions['InitiationInterval']['min'] - report['CosimReport']['IntervalMax'] = cosim_transactions['InitiationInterval']['max'] - report['CosimReport']['IntervalAvg'] = cosim_transactions['InitiationInterval']['avg'] - - util_rpt_file = hls_dir + '/util.rpt' - if os.path.isfile(util_rpt_file): - implementation_report = {} - with open(util_rpt_file) as f: - for line in f.readlines(): - if re.search(r'\(top\)', line): - # Total LUTs | Logic LUTs | LUTRAMs | SRLs | FFs | RAMB36 | RAMB18 (| URAM )| DSP48 Blocks - # skipping the first 2 unuseful cells with [:2] - results = [_get_abs_and_percentage_values(elem) for elem in line.replace('|', '').split()[2:]] - implementation_report['TotLUTs'] = results[0][0] - implementation_report['TotLUTs%'] = results[0][1] - - implementation_report['LogicLUTs'] = results[1][0] - implementation_report['LogicLUTs%'] = results[1][1] - - implementation_report['LUTRAMs'] = results[2][0] - implementation_report['LUTRAMs%'] = results[2][1] - - implementation_report['SRLs'] = results[3][0] - implementation_report['SRLs%'] = results[3][1] - - implementation_report['FFs'] = results[4][0] - implementation_report['FFs%'] = results[4][1] - - implementation_report['RAMB36s'] = results[5][0] - implementation_report['RAMB36s%'] = results[5][1] - - implementation_report['RAMB18s'] = results[6][0] - implementation_report['RAMB18s%'] = results[6][1] - - if len(results) == 9: - implementation_report['URAMs'] = results[7][0] - implementation_report['URAMs%'] = results[7][1] - - implementation_report['DSPs'] = results[8][0] - implementation_report['DSPs%'] = results[8][1] - else: - implementation_report['DSPs'] = results[7][0] - implementation_report['DSPs%'] = results[7][1] - report['ImplementationReport'] = implementation_report - else: - print('Implementation report not found.') - - timing_report_file = ( - hls_dir - + '/' - + prj_dir.split('_')[0] - + '_vivado_accelerator/project_1.runs/impl_1/design_1_wrapper_timing_summary_routed.rpt' - ) - if os.path.isfile(timing_report_file): - timing_report = {} - with open(timing_report_file) as f: - while not re.search('WNS', next(f)): - pass - # skip the successive line - next(f) - result = next(f).split() - - timing_report['WNS'] = float(result[0]) - timing_report['TNS'] = float(result[1]) - timing_report['WHS'] = float(result[4]) - timing_report['THS'] = float(result[5]) - timing_report['WPWS'] = float(result[8]) - timing_report['TPWS'] = float(result[9]) - - report['TimingReport'] = timing_report - else: - print('Timing report not found.') - return report - - -def print_vivado_report(report_dict): - if _is_running_in_notebook(): - _print_ipython_report(report_dict) - else: - _print_str_report(report_dict) - - -def _print_ipython_report(report_dict): - from IPython.display import HTML, display - - html = '\n' + _table_css + '
    ' - body = _make_report_body(report_dict, _make_html_table_template, _make_html_header) - html += body + '\n
    \n' - display(HTML(html)) - - -def _print_str_report(report_dict): - body = _make_report_body(report_dict, _make_str_table_template, _make_str_header) - print(body) - - -def _is_running_in_notebook(): - try: - from IPython import get_ipython - - shell = get_ipython().__class__.__name__ - if shell == 'ZMQInteractiveShell': - return True # Jupyter notebook or qtconsole - elif shell == 'TerminalInteractiveShell': - return False # Terminal running IPython - else: - return False # Other type (?) - except NameError: - return False # Probably standard Python interpreter - - -_table_css = """ - -""" - -_table_base_template = """ - - - - - - - -{table_rows} - -
    {table_header}
    -""" - -_row_base_template = " {row_title}{{{row_key}}}" - - -def _make_html_table_template(table_header, row_templates): - table_rows = '\n'.join( - [_row_base_template.format(row_title=row_title, row_key=row_key) for row_title, row_key in row_templates.items()] - ) - return _table_base_template.format(table_header=table_header, table_rows=table_rows) - - -def _make_str_table_template(table_header, row_templates): - len_title = 0 - for row_title in row_templates.keys(): - if len(row_title) > len_title: - len_title = len(row_title) - head = f'\n - {table_header}:\n' - table_rows = '\n'.join( - [' ' + f'{row_title}:'.ljust(len_title + 1) + f' {{{row_key}}}' for row_title, row_key in row_templates.items()] - ) - return head + table_rows + '\n' - - -def _make_html_header(report_header): - return f'

    {report_header}:

    ' - - -def _make_str_header(report_header): - sep = '=' * 54 + '\n' - return '\n' + sep + '== ' + report_header + '\n' + sep - - -def _convert_cycles_to_time(n_cycles, clock_period): - time_in_ns = n_cycles * clock_period - if time_in_ns < 1000: - return str(time_in_ns) + ' ns' - - time_in_us = time_in_ns / 1000 - if time_in_us < 1000: - return str(time_in_us) + ' \u00B5s' - - time_in_ms = time_in_us / 1000 - if time_in_ms < 1000: - return str(time_in_ms) + ' ms' - - time_in_s = time_in_ms / 1000 - if time_in_s < 1000: - return str(time_in_s) + ' s' - - -def _make_report_body(report_dict, make_table_template, make_header_template): - body = '' - - if 'CSynthesisReport' in report_dict: - body += make_header_template('C Synthesis report') - perf_rows = { - 'Best-case latency': 'best_latency', - 'Worst-case latency': 'worst_latency', - 'Interval Min': 'interval_min', - 'Interval Max': 'interval_max', - 'Estimated Clock Period': 'estimated_clock', - } - area_rows = { - 'BRAM_18K': 'bram', - 'DSP': 'dsp', - 'FF': 'ff', - 'LUT': 'lut', - 'URAM': 'uram', - } - body += make_table_template('Performance estimates', perf_rows) - body += make_table_template('Resource estimates', area_rows) - - csynth_report = report_dict['CSynthesisReport'] - target_clock = float(csynth_report['TargetClockPeriod']) - best_latency = int(csynth_report['BestLatency']) - worst_latency = int(csynth_report['BestLatency']) - bram = int(csynth_report['BRAM_18K']) - avail_bram = int(csynth_report['AvailableBRAM_18K']) - dsp = int(csynth_report['DSP']) - avail_dsp = int(csynth_report['AvailableDSP']) - ff = int(csynth_report['FF']) - avail_ff = int(csynth_report['AvailableFF']) - lut = int(csynth_report['LUT']) - avail_lut = int(csynth_report['AvailableLUT']) - if 'URAM' in csynth_report: - uram = int(csynth_report['URAM']) - avail_uram = int(csynth_report['AvailableURAM']) - - params = {} - - params['best_latency'] = str(best_latency) + ' (' + _convert_cycles_to_time(best_latency, target_clock) + ')' - params['worst_latency'] = str(worst_latency) + ' (' + _convert_cycles_to_time(worst_latency, target_clock) + ')' - params['interval_min'] = csynth_report['IntervalMin'] - params['interval_max'] = csynth_report['IntervalMax'] - params['estimated_clock'] = csynth_report['EstimatedClockPeriod'] - - params['bram'] = str(bram) + ' / ' + str(avail_bram) + ' (' + str(round(bram / avail_bram * 100, 1)) + '%)' - params['dsp'] = str(dsp) + ' / ' + str(avail_dsp) + ' (' + str(round(dsp / avail_dsp * 100, 1)) + '%)' - params['ff'] = str(ff) + ' / ' + str(avail_ff) + ' (' + str(round(ff / avail_ff * 100, 1)) + '%)' - params['lut'] = str(lut) + ' / ' + str(avail_lut) + ' (' + str(round(lut / avail_lut * 100, 1)) + '%)' - if 'URAM' in csynth_report and avail_uram > 0: - params['uram'] = str(uram) + ' / ' + str(avail_uram) + ' (' + str(round(uram / avail_uram * 100, 1)) + '%)' - else: - params['uram'] = 'N/A' - - body = body.format(**params) - - if 'VivadoSynthReport' in report_dict: - body += make_header_template('Vivado Synthesis report') - area_rows = { - 'BRAM_18K': 'bram', - 'DSP48E': 'dsp', - 'FF': 'ff', - 'LUT': 'lut', - 'URAM': 'uram', - } - body += make_table_template('Resource utilization', area_rows) - - vsynth_report = report_dict['VivadoSynthReport'] - - params = {} - params['bram'] = vsynth_report['BRAM_18K'] - params['dsp'] = vsynth_report['DSP48E'] - params['ff'] = vsynth_report['FF'] - params['lut'] = vsynth_report['LUT'] - params['uram'] = vsynth_report['URAM'] if 'URAM' in vsynth_report else 'N/A' - - body = body.format(**params) - - if 'CosimReport' in report_dict: - body += make_header_template('Co-Simulation report') - perf_rows = { - 'Status': 'status', - 'Best-case latency': 'best_latency', - 'Worst-case latency': 'worst_latency', - 'Interval Min': 'interval_min', - 'Interval Max': 'interval_max', - } - body += make_table_template('Performance', perf_rows) - - cosim_report = report_dict['CosimReport'] - - params = {} - params['status'] = cosim_report['Status'] - params['best_latency'] = cosim_report['LatencyMin'] - params['worst_latency'] = cosim_report['LatencyMax'] - params['interval_min'] = cosim_report['IntervalMin'] - params['interval_max'] = cosim_report['IntervalMax'] - - body = body.format(**params) - - if 'ImplementationReport' in report_dict: - body += make_header_template('Implementation report') - area_rows = { - 'Total LUTs': 'lut', - 'Logic LUTs': 'logiclut', - 'LUTRAM': 'lutram', - 'SRLs': 'srl', - 'FF': 'ff', - 'RAMB18': 'bram18', - 'RAMB36': 'bram36', - 'DSP': 'dsp', - 'URAM': 'uram', - } - body += make_table_template('Resource utilization', area_rows) - - impl_report = report_dict['ImplementationReport'] - - params = {} - params['lut'] = impl_report['TotLUTs'] + ' (' + impl_report['TotLUTs%'] + '%)' - params['logiclut'] = impl_report['LogicLUTs'] + ' (' + impl_report['LogicLUTs%'] + '%)' - params['lutram'] = impl_report['LUTRAMs'] + ' (' + impl_report['LUTRAMs%'] + '%)' - params['srl'] = impl_report['SRLs'] + ' (' + impl_report['SRLs%'] + '%)' - params['ff'] = impl_report['FFs'] + ' (' + impl_report['FFs%'] + '%)' - params['bram18'] = impl_report['RAMB18s'] + ' (' + impl_report['RAMB18s%'] + '%)' - params['bram36'] = impl_report['RAMB36s'] + ' (' + impl_report['RAMB36s%'] + '%)' - params['dsp'] = impl_report['DSPs'] + ' (' + impl_report['DSPs%'] + '%)' - if 'URAMs' in impl_report: - params['uram'] = impl_report['URAMs'] + ' (' + impl_report['URAMs%'] + '%)' - else: - params['uram'] = 'N/A' - - body = body.format(**params) - - if 'TimingReport' in report_dict: - body += make_header_template('Timing report') - perf_rows = { - 'Worst Negative Slack (WNS)': 'wns', - 'Total Negative Slack (TNS)': 'tns', - 'Worst Hold Slack (WHS)': 'whs', - 'Total Hold Slack (THS)': 'ths', - 'Worst Pulse Width Slack (WPWS)': 'wpws', - 'Total Pulse Width Slack (TPWS)': 'tpws', - } - body += make_table_template('Timing', perf_rows) - - timing_report = report_dict['TimingReport'] - - params = {} - params['wns'] = round(timing_report['WNS'], 2) - params['tns'] = round(timing_report['TNS'], 2) - params['whs'] = round(timing_report['WHS'], 2) - params['ths'] = round(timing_report['THS'], 2) - params['wpws'] = round(timing_report['WPWS'], 2) - params['tpws'] = round(timing_report['TPWS'], 2) - - body = body.format(**params) - - return body diff --git a/hls4ml/hls4ml/templates/quartus/Makefile b/hls4ml/hls4ml/templates/quartus/Makefile deleted file mode 100644 index 754ea35..0000000 --- a/hls4ml/hls4ml/templates/quartus/Makefile +++ /dev/null @@ -1,30 +0,0 @@ -DEVICE := Arria10 -TARGETS := myproject-fpga - -CXX := i++ -CXXFLAGS := -march=$(DEVICE) -RM := rm -rf -DEBUG_FLAGS := --time quartus-hlssynt.log -SOURCE_FILES := myproject_test.cpp firmware/myproject.cpp -HEADER_FILES := firmware/myproject.h -LOGGING_1 := -LOGGING_2 := -QUARTUS_COMPILE := -CONT_IF_LARGE_AREA := - -.PHONY: test -test: $(TARGETS) - @$(foreach t,$(TARGETS),echo ./$(t); ./$(t) | tee $(t)_run.log; echo "";) - -.PHONY: all -all: $(TARGETS) - -.PHONY: clean -clean: - -$(RM) $(foreach t,$(TARGETS),$(t).prj $(t) $(t)_time.log) - -.PHONY: myproject-fpga -myproject-fpga: CXXFLAGS := $(CXXFLAGS) - -$(TARGETS) : $(SOURCE_FILES) $(HEADER_FILES) - $(CXX) $(LOGGING_1) $(LOGGING_2) $(CXXFLAGS) $(DEBUG_FLAGS) $(SOURCE_FILES) $(CONT_IF_LARGE_AREA) $(QUARTUS_COMPILE) -o $@ diff --git a/hls4ml/hls4ml/templates/quartus/ac_types/ac_channel.h b/hls4ml/hls4ml/templates/quartus/ac_types/ac_channel.h deleted file mode 100644 index 62e0542..0000000 --- a/hls4ml/hls4ml/templates/quartus/ac_types/ac_channel.h +++ /dev/null @@ -1,555 +0,0 @@ -/************************************************************************** - * * - * Algorithmic C (tm) Datatypes * - * * - * Software Version: 4.0 * - * * - * Release Date : Sat Jun 13 12:35:18 PDT 2020 * - * Release Type : Production Release * - * Release Build : 4.0.0 * - * * - * Copyright 2004-2020, Mentor Graphics Corporation, * - * * - * All Rights Reserved. * - * * - ************************************************************************** - * Licensed under the Apache License, Version 2.0 (the "License"); * - * you may not use this file except in compliance with the License. * - * You may obtain a copy of the License at * - * * - * http://www.apache.org/licenses/LICENSE-2.0 * - * * - * Unless required by applicable law or agreed to in writing, software * - * distributed under the License is distributed on an "AS IS" BASIS, * - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * - * implied. * - * See the License for the specific language governing permissions and * - * limitations under the License. * - ************************************************************************** - * * - * The most recent version of this package is available at github. * - * * - *************************************************************************/ - -/* -// Source: ac_channel.h -// Description: templatized channel communication class -// Author: Andres Takach, Ph.D. -*/ - -#ifndef __AC_CHANNEL_H -#define __AC_CHANNEL_H - -#ifndef __cplusplus -# error C++ is required to include this header file -#endif - -#include -#include - -#if !defined(AC_USER_DEFINED_ASSERT) && !defined(AC_ASSERT_THROW_EXCEPTION) -# include -#endif - -// not directly used by this include -#include -#include - -// Macro Definitions (obsolete - provided here for backward compatibility) -#define AC_CHAN_CTOR(varname) varname -#define AC_CHAN_CTOR_INIT(varname,init) varname(init) -#define AC_CHAN_CTOR_VAL(varname,init,val) varname(init,val) - -//////////////////////////////////////////////// -// Struct: ac_exception / ac_channel_exception -//////////////////////////////////////////////// - -#ifndef __INCLUDED_AC_EXCEPTION -# define __INCLUDED_AC_EXCEPTION -struct ac_exception { - const char *const file; - const unsigned int line; - const int code; - const char *const msg; - ac_exception(const char *file_, const unsigned int &line_, const int &code_, const char *msg_) - : file(file_), line(line_), code(code_), msg(msg_) { } -}; -#endif - -struct ac_channel_exception { - enum { code_begin = 1024 }; - enum code { - read_from_empty_channel = code_begin, - fifo_not_empty_when_reset, - no_operator_sb_defined_for_channel_type, - no_insert_defined_for_channel_type, - no_size_in_connections, - no_num_free_in_connections, - no_output_empty_in_connections - }; - static inline const char *msg(const code &code_) { - static const char *const s[] = { - "Read from empty channel", - "fifo not empty when reset", - "No operator[] defined for channel type", - "No insert defined for channel type", - "Connections does not support size()", - "Connections does not support num_free()", - "Connections::Out does not support empty()" - }; - return s[code_-code_begin]; - } -}; - -/////////////////////////////////////////// -// Class: ac_channel -////////////////////////////////////////// - -template -class ac_channel { -public: - typedef T element_type; - - // constructors - ac_channel(); - ac_channel(int init); - ac_channel(int init, T val); - - T read() { return chan.read(); } - void read(T& t) { t = read(); } - bool nb_read(T& t) { return chan.nb_read(t); } - - void write(const T& t) { chan.write(t); } - bool nb_write(T& t) { - chan.incr_size_call_count(); - return chan.nb_write(t); - } - - unsigned int size() { - chan.incr_size_call_count(); - return chan.size(); - } - bool empty() { return chan.empty(); } - - // Return true if channel has at least k entries - bool available(unsigned int k) const { return chan.available(k); } - - void reset() { chan.reset(); } - - unsigned int debug_size() const { return chan.size(); } - - const T &operator[](unsigned int pos) const { return chan[pos]; } - - int get_size_call_count() { return chan.get_size_call_count(); } - -#ifdef SYSTEMC_INCLUDED - void bind(sc_fifo_in &f) { chan.bind(f); } - void bind(sc_fifo_out &f) { chan.bind(f); } -#endif - -#ifdef __CONNECTIONS__CONNECTIONS_H__ - void bind(Connections::Out& c) { chan.bind(c); } - void bind(Connections::In& c) { chan.bind(c); } - void bind(Connections::SyncIn &c) { chan.bind(c); } - void bind(Connections::SyncOut &c) { chan.bind(c); } -#endif - -private: -# ifndef AC_CHANNEL_ASSERT -# define AC_CHANNEL_ASSERT(cond, code) ac_assert(cond, __FILE__, __LINE__, code) - static inline void ac_assert(bool condition, const char *file, int line, const ac_channel_exception::code &code) { -# ifndef AC_USER_DEFINED_ASSERT - if(!condition) { - const ac_exception e(file, line, code, ac_channel_exception::msg(code)); -# ifdef AC_ASSERT_THROW_EXCEPTION -# ifdef AC_ASSERT_THROW_EXCEPTION_AS_CONST_CHAR - throw(e.msg); -# else - throw(e); -# endif -# else - std::cerr << "Assert"; - if(e.file) - std::cerr << " in file " << e.file << ":" << e.line; - std::cerr << " " << e.msg << std::endl; - assert(0); -# endif - } -# else - AC_USER_DEFINED_ASSERT(condition, file, line, ac_channel_exception::msg(code)); -# endif - } -# else -# error "private use only - AC_CHANNEL_ASSERT macro already defined" -# endif - -public: - class fifo { - enum fifo_type { - fifo_ac_channel_type, - fifo_sc_fifo_type, - fifo_connections_type, - fifo_connections_sync_type - }; - - struct fifo_abstract { - virtual ~fifo_abstract() {} - virtual fifo_type get_fifo_type() const = 0; - virtual T read() = 0; - virtual bool nb_read(T& t) = 0; - virtual void write(const T& t) = 0; - virtual bool nb_write(T& t) = 0; - virtual bool empty() = 0; - virtual bool available(unsigned int k) const = 0; - virtual unsigned int size() const = 0; - virtual unsigned int num_free() const = 0; - virtual void reset() = 0; - virtual const T &operator_sb(const unsigned int &pos, const T &default_value) const = 0; - }; - - struct fifo_ac_channel : fifo_abstract { - std::deque ch; - - ~fifo_ac_channel() {} - - static inline fifo_type ftype() { return fifo_ac_channel_type; } - - fifo_type get_fifo_type() const { return ftype(); } - - T read() { - { - // If you hit this assert you attempted a read on an empty channel. Perhaps - // you need to guard the execution of the read with a call to the available() - // function: - // if (myInputChan.available(2)) { - // // it is safe to read two values - // cout << myInputChan.read(); - // cout << myInputChan.read(); - // } - AC_CHANNEL_ASSERT(!empty(), ac_channel_exception::read_from_empty_channel); - } - T t = ch.front(); - ch.pop_front(); - return t; - } - bool nb_read(T& t) { return empty() ? false : (t = read(), true); } - - void write(const T& t) { ch.push_back(t); } - bool nb_write(T& t) { return !num_free() ? false : (write(t), true); } - - bool empty() { return size() == 0; } - bool available(unsigned int k) const { return size() >= k; } - unsigned int size() const { return (int)ch.size(); } - unsigned int num_free() const { return ch.max_size() - ch.size(); } - - void reset() { ch.clear(); } - - const T &operator_sb(const unsigned int &pos, const T &) const { - return ch[pos]; - } - }; - -#ifdef SYSTEMC_INCLUDED - struct fifo_sc_fifo : fifo_abstract { - sc_fifo_in *fifo_in; - sc_fifo_out *fifo_out; - - ~fifo_sc_fifo() {} - - static inline fifo_type ftype() { return fifo_sc_fifo_type; } - - fifo_type get_fifo_type() const { return ftype(); } - - T read() { return fifo_in->read(); } - bool nb_read(T& t) { return empty() ? false : (t = read(), true); } - - void write(const T& t) { fifo_out->write(t); } - bool nb_write(T& t) { return !num_free() ? false : (write(t), true); } - - bool empty() { return size() == 0; } - bool available(unsigned int k) const { return size() >= k; } - unsigned int size() const { return fifo_in->num_available(); } - unsigned int num_free() const { return fifo_out->num_free(); } - - void reset() { - AC_CHANNEL_ASSERT(empty(), ac_channel_exception::fifo_not_empty_when_reset); - } - - const T &operator_sb(const unsigned int &, const T &default_value) const { - AC_CHANNEL_ASSERT(0, ac_channel_exception::no_operator_sb_defined_for_channel_type); - return default_value; - } - }; -public: - void bind(sc_fifo_in &f) { get_fifo().fifo_in = &f; } - void bind(sc_fifo_out &f) { get_fifo().fifo_out = &f; } -private: -#endif - -#ifdef __CONNECTIONS__CONNECTIONS_H__ - struct fifo_connections : fifo_abstract { - Connections::In *fifo_in; - Connections::Out *fifo_out; - - ~fifo_connections() {} - static inline fifo_type ftype() { return fifo_connections_type; } - fifo_type get_fifo_type() const { return ftype(); } - - T read() { return fifo_in->Pop(); } - bool nb_read(T& t) { return fifo_in->PopNB(t); } - - void write(const T& t) { fifo_out->Push(t); } - bool nb_write(T& t) { return fifo_out->PushNB(t); } - - bool empty() { - if (fifo_in) - return fifo_in->Empty(); - else - AC_CHANNEL_ASSERT(0, ac_channel_exception::no_output_empty_in_connections); - return false; - } - bool available(unsigned int k) const { return true; } - unsigned int size() const { - AC_CHANNEL_ASSERT(0, ac_channel_exception::no_size_in_connections); - return 0; - } - unsigned int num_free() const { - AC_CHANNEL_ASSERT(0, ac_channel_exception::no_num_free_in_connections); - return 0; - } - - void reset() { - AC_CHANNEL_ASSERT(empty(), ac_channel_exception::fifo_not_empty_when_reset); - } - - const T &operator_sb(const unsigned int &, const T &default_value) const { - AC_CHANNEL_ASSERT(0, ac_channel_exception::no_operator_sb_defined_for_channel_type); - return default_value; - } - }; - - struct fifo_connections_sync : fifo_abstract { - Connections::SyncIn *sync_in; - Connections::SyncOut *sync_out; - - ~fifo_connections_sync() {} - static inline fifo_type ftype() { return fifo_connections_sync_type; } - fifo_type get_fifo_type() const { return ftype(); } - - bool read() { sync_in->sync_in(); return true; } - bool nb_read(T& t) { t=true; return(sync_in->nb_sync_in()); } - - void write(const T& t) { sync_out->sync_out(); } - bool nb_write(T& t) { sync_out->sync_out(); return true; } - - bool empty() { - AC_CHANNEL_ASSERT(0, ac_channel_exception::no_output_empty_in_connections); - return(false); - } - bool available(unsigned int k) const { return true; } - unsigned int size() const { - AC_CHANNEL_ASSERT(0, ac_channel_exception::no_size_in_connections); - return 0; - } - unsigned int num_free() const { - AC_CHANNEL_ASSERT(0, ac_channel_exception::no_num_free_in_connections); - return 0; - } - void reset() { - if (sync_in) sync_in->reset_sync_in(); - if (sync_out) sync_out->reset_sync_out(); - } - const T &operator_sb(const unsigned int &, const T &default_value) const { - AC_CHANNEL_ASSERT(0, ac_channel_exception::no_operator_sb_defined_for_channel_type); - return default_value; - } - }; - - - public: - void bind(Connections::In& c) { get_fifo().fifo_in = &c; } - void bind(Connections::Out& c) { get_fifo().fifo_out = &c; } - - void bind(Connections::SyncIn &c) { get_fifo().sync_in = &c; } - void bind(Connections::SyncOut &c) { get_fifo().sync_out = &c; } - - private: -#endif - - template - fifo_T &get_fifo() { - if (!f || f->get_fifo_type() != fifo_T::ftype()) { - if (f) { - AC_CHANNEL_ASSERT(f->empty(), ac_channel_exception::fifo_not_empty_when_reset); - delete f; - } - f = new fifo_T; - } - return static_cast(*f); - } - - fifo_abstract *f; - unsigned int rSz; // reset size - T rVal; // resetValue - int size_call_count; - - public: - fifo() : f(0), rSz(0), size_call_count(0) { get_fifo(); } - fifo(int init) : f(0), rSz(init), size_call_count(0) { get_fifo(); } - fifo(int init, T val) : f(0), rSz(init), rVal(val), size_call_count(0) { get_fifo(); } - ~fifo() { delete f; } - - inline T read() { return f->read(); } - inline bool nb_read(T& t) { return f->nb_read(t); } - - inline void write(const T& t) { f->write(t); } - inline bool nb_write(T& t) { return f->nb_write(t); } - - inline bool empty() { return f->empty(); } - inline bool available(unsigned int k) const { return f->available(k); } - inline unsigned int size() const { return f->size(); } - inline unsigned int num_free() const { return f->num_free(); } - - inline void reset() { - f->reset(); - for (int i=0; i<(int)rSz; i++) - write(rVal); - } - - inline const T &operator[](unsigned int pos) const { return f->operator_sb(pos, rVal); } - - void incr_size_call_count() { ++size_call_count; } - int get_size_call_count() { - int tmp=size_call_count; - size_call_count=0; - return tmp; - } - - // obsolete - provided here for backward compatibility with ac_channel - struct iterator { - iterator operator+(unsigned int pos_) const { - return iterator(itr, pos_); - } - private: - friend class fifo; - iterator(const typename std::deque::iterator &itr_, unsigned int pos=0) - : itr(itr_) { if (pos) itr += pos; } - typename std::deque::iterator itr; - }; - iterator begin() { - AC_CHANNEL_ASSERT(f->get_fifo_type() == fifo_ac_channel_type, ac_channel_exception::no_insert_defined_for_channel_type); - return iterator(get_fifo().ch.begin()); - } - void insert(iterator itr, const T& t) { - AC_CHANNEL_ASSERT(f->get_fifo_type() == fifo_ac_channel_type, ac_channel_exception::no_insert_defined_for_channel_type); - get_fifo().ch.insert(itr.itr,t); - } - }; - fifo chan; - -private: - // Prevent the compiler from autogenerating these. - // (This enforces that channels are always passed by reference.) - ac_channel(const ac_channel< T >&); - ac_channel& operator=(const ac_channel< T >&); -}; - -template -ac_channel::ac_channel() : chan() {} - -template -ac_channel::ac_channel(int init) : chan(init) -{ - for (int i=init; i>0; i--) { - T dc; - write(dc); - } -} - -template -ac_channel::ac_channel(int init, T val) : chan(init, val) -{ - for (int i=init; i>0; i--) - write(val); -} - -template -inline std::ostream& operator<< (std::ostream& os, ac_channel &a) -{ - for (unsigned int i=0; i 0) os << " "; - os << a[i]; - } - return os; -} - -// This general case is meant to cover non channel (or array of them) args -// Its result will be ignored -template -bool nb_read_chan_rdy(T &x) { return true; } - -template -bool nb_read_chan_rdy(ac_channel &chan) { return !chan.empty(); } - -template -bool nb_read_chan_rdy(ac_channel (&chan)[N] ) { - bool r = true; - for(int i=0; i 199711L -template -bool nb_read_chan_rdy(Args&... args) { - const int n_args = sizeof...(args); - // only every other arg is a channel (or an array of channels) - bool rdy[n_args] = { (nb_read_chan_rdy(args))... }; - bool r = true; - for(int i=0; i < n_args; i+=2) - r &= rdy[i]; - return r; -} -#endif - -template -void nb_read_r(ac_channel &chan, T &var) { - chan.nb_read(var); -} - -template -void nb_read_r(ac_channel (&chan)[N], T (&var)[N]) { - for(int i=0; i 199711L -template -void nb_read_r(ac_channel &chan, T &var, Args&... args) { - chan.nb_read(var); - nb_read_r(args...); -} - -template -void nb_read_r(ac_channel (&chan)[N], T (&var)[N], Args&... args) { - for(int i=0; i -bool nb_read_join(Args&... args) { - if(nb_read_chan_rdy(args...)) { - nb_read_r(args...); - return true; - } - return false; -} -#endif - -/* undo macro adjustments */ -#ifdef AC_CHANNEL_ASSERT -# undef AC_CHANNEL_ASSERT -#endif - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/ac_types/ac_complex.h b/hls4ml/hls4ml/templates/quartus/ac_types/ac_complex.h deleted file mode 100644 index 56821a0..0000000 --- a/hls4ml/hls4ml/templates/quartus/ac_types/ac_complex.h +++ /dev/null @@ -1,445 +0,0 @@ -/************************************************************************** - * * - * Algorithmic C (tm) Datatypes * - * * - * Software Version: 4.0 * - * * - * Release Date : Sat Jun 13 12:35:18 PDT 2020 * - * Release Type : Production Release * - * Release Build : 4.0.0 * - * * - * Copyright 2008-2019, Mentor Graphics Corporation, * - * * - * All Rights Reserved. * - * * - ************************************************************************** - * Licensed under the Apache License, Version 2.0 (the "License"); * - * you may not use this file except in compliance with the License. * - * You may obtain a copy of the License at * - * * - * http://www.apache.org/licenses/LICENSE-2.0 * - * * - * Unless required by applicable law or agreed to in writing, software * - * distributed under the License is distributed on an "AS IS" BASIS, * - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * - * implied. * - * See the License for the specific language governing permissions and * - * limitations under the License. * - ************************************************************************** - * * - * The most recent version of this package is available at github. * - * * - *************************************************************************/ - -/* -// Source: ac_complex.h -// Description: complex type with parameterized type that can be: -// - C integer types -// - C floating point types -// - ac_int -// - ac_fixed -// - ac_float -// ac_complex based on C integers, ac_int, ac_fixed and ac_float can -// be mixed -// Author: Andres Takach, Ph.D. -*/ - -#ifndef __AC_COMPLEX_H -#define __AC_COMPLEX_H - -#include - -#ifdef __AC_NAMESPACE -namespace __AC_NAMESPACE { -#endif - -template class ac_complex; - -namespace ac_private { - // specializations after definition of ac_complex - template - struct rt_ac_complex_T { - template - struct op1 { - typedef typename T::template rt_T< ac_complex >::mult mult; - typedef typename T::template rt_T< ac_complex >::plus plus; - typedef typename T::template rt_T< ac_complex >::minus2 minus; - typedef typename T::template rt_T< ac_complex >::minus minus2; - typedef typename T::template rt_T< ac_complex >::logic logic; - typedef typename T::template rt_T< ac_complex >::div2 div; - typedef typename T::template rt_T< ac_complex >::div div2; - }; - }; -} // namespace ac_private - -template -class ac_complex { -public: // temporary workaround - T _r; - T _i; - typedef typename ac_private::map::t map_T; - typedef typename map_T::rt_unary::mag_sqr T_sqr; - typedef typename ac_private::map::t map_T_sqr; - typedef typename ac_private::map::t map_T_mag; -public: - typedef T element_type; - template - struct rt_T { - typedef typename ac_private::map::t map_T2; - typedef typename ac_private::rt_ac_complex_T::template op1::mult mult; - typedef typename ac_private::rt_ac_complex_T::template op1::plus plus; - typedef typename ac_private::rt_ac_complex_T::template op1::minus minus; - typedef typename ac_private::rt_ac_complex_T::template op1::minus2 minus2; - typedef typename ac_private::rt_ac_complex_T::template op1::logic logic; - typedef typename ac_private::rt_ac_complex_T::template op1::div div; - typedef typename ac_private::rt_ac_complex_T::template op1::div2 div2; - typedef ac_complex arg1; - }; - - struct rt_unary { - typedef typename map_T_sqr::template rt_T::plus mag_sqr; - typedef typename map_T_mag::template rt_T::plus mag; // overly conservative for signed - typedef ac_complex neg; - template - struct set { - typedef ac_complex::sum> sum; - }; - }; - - ac_complex() { } - template - ac_complex(const ac_complex &c) : _r(c.r()), _i(c.i()) {} - template - ac_complex(const T2 &r) : _r(r), _i(0) {} - template - ac_complex(const T2 &r, const T3 &i) : _r(r), _i(i) {} - const T &r() const { return _r; } - const T &i() const { return _i; } - T &r() { return _r; } - T &i() { return _i; } - const T &real() const { return _r; } - const T &imag() const { return _i; } - T &real() { return _r; } - T &imag() { return _i; } - template - void set_r(const T2 &r) { _r = r;} - template - void set_i(const T2 &i) { _i = i;} - - // const binary operators are global rather than members because of compiler errors due to ambiguity - // (would appear as a compiler bug) - - template - ac_complex &operator +=(const ac_complex &op2) { - _r += op2.r(); - _i += op2.i(); - return *this; - } - - template - ac_complex &operator +=(const T2 &op2) { - _r += op2; - return *this; - } - - template - ac_complex &operator -=(const ac_complex &op2) { - _r -= op2.r(); - _i -= op2.i(); - return *this; - } - - template - ac_complex &operator -=(const T2 &op2) { - _r -= op2; - return *this; - } - - template - ac_complex &operator *=(const ac_complex &op2) { - T r0 = _r*op2.r() - _i*op2.i(); - _i = _r*op2.i() + _i*op2.r(); - _r = r0; - return *this; - } - - template - ac_complex &operator *=(const T2 &op2) { - _r = _r*op2; - _i = _i*op2; - return *this; - } - - template - ac_complex &operator /=(const ac_complex &op2) { - typename ac_complex::rt_unary::mag_sqr d = op2.mag_sqr(); - T r0 = (_r*op2.r() + _i*op2.i())/d; - _i = (_i*op2.r() - _r*op2.i())/d; - _r = r0; - return *this; - } - - template - ac_complex &operator /=(const T2 &op2) { - _r = _r/op2; - _i = _i/op2; - return *this; - } - - // Arithmetic Unary -------------------------------------------------------- - ac_complex operator +() { - return *this; - } - typename rt_unary::neg operator -() const { - typename rt_unary::neg res(-_r, -_i); - return res; - } - - // ! ------------------------------------------------------------------------ - bool operator ! () const { - return !_r && !_i; - } - - typename rt_unary::neg conj() const { - typename rt_unary::neg res(_r, -_i); - return res; - } - - typename rt_unary::mag_sqr mag_sqr() const { - return _r*_r + _i*_i; - } - - ac_complex< ac_int<2,true> > sign_conj() const { - return ac_complex< ac_int<2,true> >( - _r ? (_r < 0 ? -1 : 1) : 0, - _i ? (_i < 0 ? 1 : -1) : 0 - ); - } - - inline static std::string type_name() { - typedef typename ac_private::map::t map_T; - std::string r = "ac_complex<"; - r += map_T::type_name(); - r += '>'; - return r; - } - -}; - -namespace ac_private { - // with T2 == ac_complex - template - struct rt_ac_complex_T< ac_complex > { - template - struct op1 { - typedef ac_complex::plus> plus; - typedef ac_complex::minus> minus; - typedef ac_complex::minus2> minus2; - typedef ac_complex::logic> logic; - typedef ac_complex::div> div; - typedef ac_complex::div2> div2; - typedef ac_complex::mult, typename ac::rt_2T::mult>::plus, - typename ac::rt_2T::mult, typename ac::rt_2T::mult>::minus - >::logic> mult; - }; - }; - // with T2 == ac_float - template< AC_FL_T0(2) > - struct rt_ac_complex_T< AC_FL0(2) > { - typedef AC_FL0(2) T2; - template - struct op1 { - typedef ac_complex::plus> plus; - typedef ac_complex::minus> minus; - typedef ac_complex::minus2> minus2; - typedef ac_complex::logic> logic; - typedef ac_complex::div> div; - typedef ac_complex::div2> div2; - typedef ac_complex::mult> mult; - }; - }; - // with T2 == ac_fixed - template - struct rt_ac_complex_T< ac_fixed > { - typedef ac_fixed T2; - template - struct op1 { - typedef ac_complex::plus> plus; - typedef ac_complex::minus> minus; - typedef ac_complex::minus2> minus2; - typedef ac_complex::logic> logic; - typedef ac_complex::div> div; - typedef ac_complex::div2> div2; - typedef ac_complex::mult> mult; - }; - }; - // with T2 == ac_int - template - struct rt_ac_complex_T< ac_int > { - typedef ac_int T2; - template - struct op1 { - typedef ac_complex::plus> plus; - typedef ac_complex::minus> minus; - typedef ac_complex::minus2> minus2; - typedef ac_complex::logic> logic; - typedef ac_complex::div> div; - typedef ac_complex::div2> div2; - typedef ac_complex::mult> mult; - }; - }; - // with T2 == c_type - template - struct rt_ac_complex_T< c_type > { - typedef c_type T2; - template - struct op1 { - typedef ac_complex::plus> plus; - typedef ac_complex::minus> minus; - typedef ac_complex::minus2> minus2; - typedef ac_complex::logic> logic; - typedef ac_complex::div> div; - typedef ac_complex::div2> div2; - typedef ac_complex::mult> mult; - }; - }; -} - -template -inline typename ac_complex::template rt_T >::plus operator +(const ac_complex &op, const ac_complex &op2) { - typename ac_complex::template rt_T >::plus res( op.r() + op2.r(), op.i() + op2.i() ); - return res; -} - -template -inline typename ac_complex::template rt_T::plus operator +(const T &op, const ac_complex &op2) { - typename ac_complex::template rt_T::plus res( op + op2.r(), op2.i() ); - return res; -} - -template -inline typename ac_complex::template rt_T::plus operator +(const ac_complex &op, const T2 &op2) { - typename ac_complex::template rt_T::plus res( op.r() + op2, op.i() ); - return res; -} - -template -inline typename ac_complex::template rt_T >::minus operator -(const ac_complex &op, const ac_complex &op2) { - typename ac_complex::template rt_T >::minus res( op.r() - op2.r(), op.i() - op2.i() ); - return res; -} - -template -inline typename ac_complex::template rt_T::minus2 operator -(const T &op, const ac_complex &op2) { - typename ac_complex::template rt_T::minus2 res( op - op2.r(), -op2.i() ); - return res; -} - -template -inline typename ac_complex::template rt_T::minus operator -(const ac_complex &op, const T2 &op2) { - typename ac_complex::template rt_T::minus res( op.r() - op2, op.i() ); - return res; -} - -template -inline typename ac_complex::template rt_T >::mult operator *(const ac_complex &op, const ac_complex &op2) { - typename ac_complex::template rt_T >::mult res( op.r()*op2.r() - op.i()*op2.i(), op.i()*op2.r() + op.r()*op2.i() ); - return res; -} - -template -inline typename ac_complex::template rt_T::mult operator *(const T &op, const ac_complex &op2) { - typename ac_complex::template rt_T::mult res( op*op2.r(), op*op2.i()); - return res; -} - -template -inline typename ac_complex::template rt_T::mult operator *(const ac_complex &op, const T2 &op2) { - typename ac_complex::template rt_T::mult res( op.r()*op2, op.i()*op2 ); - return res; -} - -template -inline typename ac_complex::template rt_T >::div operator /(const ac_complex &op, const ac_complex &op2) { - typename ac_complex::rt_unary::mag_sqr d = op2.mag_sqr(); - typename ac_complex::template rt_T >::div res((op.r()*op2.r() + op.i()*op2.i())/d, (op.i()*op2.r() - op.r()*op2.i())/d); - return res; -} - -template -inline typename ac_complex::template rt_T::div operator /(const ac_complex &op, const T2 &op2) { - typename ac_complex::template rt_T::div res( op.r()/op2, op.i()/op2 ); - return res; -} - -template -inline typename ac_complex::template rt_T::div2 operator /(const T &op, const ac_complex &op2) { - typename ac_complex::rt_unary::mag_sqr d = op2.mag_sqr(); - typename ac_complex::template rt_T::div2 res(op*op2.r()/d, - op*op2.i()/d); - return res; -} - -template -inline bool operator == (const ac_complex &op, const ac_complex &op2) { - return op.r() == op2.r() && op.i() == op2.i(); -} - -template -inline bool operator == (const T &op, const ac_complex &op2) { - return op == op2.r() && op2.i() == 0; -} - -template -inline bool operator == (const ac_complex &op, const T2 &op2) { - return op.r() == op2 && op.i() == 0; -} - -template -inline bool operator != (const ac_complex &op, const ac_complex &op2) { - return op.r() != op2.r() || op.i() != op2.i(); -} - -template -inline bool operator != (const T &op, const ac_complex &op2) { - return op != op2.r() || op2.i() != 0; -} - -template -inline bool operator != (const ac_complex &op, const T2 &op2) { - return op.r() != op2 || op.i() != 0; -} - -// Stream -------------------------------------------------------------------- - -template -inline std::ostream& operator << (std::ostream &os, const ac_complex &x) { -#ifndef __SYNTHESIS__ - os << "(" << x.r() << ", " << x.i() << ")"; -#endif - return os; -} - -template -inline ac_complex value(ac_complex) { - T val = value((T) 0); - ac_complex r(val, val); - return r; -} - -namespace ac { - template - inline bool init_array(ac_complex *a, int n) { - T val = value((T) 0); - ac_complex t(val, val); - for(int i=0; i < n; i++) - a[i] = t; - return true; - } -} - -#ifdef __AC_NAMESPACE -} -#endif - -#endif // __AC_COMPLEX_H diff --git a/hls4ml/hls4ml/templates/quartus/ac_types/ac_fixed.h b/hls4ml/hls4ml/templates/quartus/ac_types/ac_fixed.h deleted file mode 100644 index cb95db8..0000000 --- a/hls4ml/hls4ml/templates/quartus/ac_types/ac_fixed.h +++ /dev/null @@ -1,1546 +0,0 @@ -/************************************************************************** - * * - * Algorithmic C (tm) Datatypes * - * * - * Software Version: 4.0 * - * * - * Release Date : Sat Jun 13 12:35:18 PDT 2020 * - * Release Type : Production Release * - * Release Build : 4.0.0 * - * * - * Copyright 2005-2020, Mentor Graphics Corporation, * - * * - * All Rights Reserved. * - * * - ************************************************************************** - * Licensed under the Apache License, Version 2.0 (the "License"); * - * you may not use this file except in compliance with the License. * - * You may obtain a copy of the License at * - * * - * http://www.apache.org/licenses/LICENSE-2.0 * - * * - * Unless required by applicable law or agreed to in writing, software * - * distributed under the License is distributed on an "AS IS" BASIS, * - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * - * implied. * - * See the License for the specific language governing permissions and * - * limitations under the License. * - ************************************************************************** - * * - * The most recent version of this package is available at github. * - * * - *************************************************************************/ - -/* -// Source: ac_fixed.h -// Description: class for fixed point operation handling in C++ -// Author: Andres Takach, Ph.D. -*/ - -#ifndef __AC_FIXED_H -#define __AC_FIXED_H - -#include "ac_int.h" - -#if (defined(__GNUC__) && __GNUC__ < 3 && !defined(__EDG__)) -#error GCC version 3 or greater is required to include this header file -#endif - -#if (defined(_MSC_VER) && _MSC_VER < 1400 && !defined(__EDG__)) -#error Microsoft Visual Studio 8 or newer is required to include this header file -#endif - -#if (defined(_MSC_VER) && !defined(__EDG__)) -#pragma warning( push ) -#pragma warning( disable: 4127 4308 4365 4514 4800 ) -#endif - -#ifndef __SYNTHESIS__ -#ifndef __AC_FIXED_UTILITY_BASE -#define __AC_FIXED_UTILITY_BASE -#endif - -#endif - -#ifdef __SYNTHESIS__ -#ifdef __AC_FIXED_NUMERICAL_ANALYSIS_BASE -#undef __AC_FIXED_NUMERICAL_ANALYSIS_BASE -#endif -#endif - -#ifdef __AC_NAMESPACE -namespace __AC_NAMESPACE { -#endif - -namespace ac_private { - template - struct rt_ac_fixed_T { - template - struct op1 { - typedef typename T::template rt_T< ac_fixed >::mult mult; - typedef typename T::template rt_T< ac_fixed >::plus plus; - typedef typename T::template rt_T< ac_fixed >::minus2 minus; - typedef typename T::template rt_T< ac_fixed >::minus minus2; - typedef typename T::template rt_T< ac_fixed >::logic logic; - typedef typename T::template rt_T< ac_fixed >::div2 div; - typedef typename T::template rt_T< ac_fixed >::div div2; - }; - }; - // specializations after definition of ac_fixed -} - -namespace ac { - template - class basic_num_ovf_base { - bool d_enable; - public: - basic_num_ovf_base() : d_enable(true) {} - void enable_ovf(bool a) { d_enable = a; } - bool is_enabled() const { return d_enable; } - template - void update(bool overflow, bool neg, const basic_num_ovf_base &op2) { -#ifndef __AC_OVERRIDE_OVF_UPDATE_BODY - if(d_enable) { - if(overflow) { - std::cerr << (neg ? "-" : "+") << "OVF: "; - std::cerr << type_name() << " ( " << basic_num_ovf_base::type_name(); - std::cerr << " ( " << op2.value().to_double() << " ) )" << std::endl; - } - } -#else - __AC_OVERRIDE_OVF_UPDATE_BODY -#endif - } - void update(bool overflow, bool neg, double op2) { -#ifndef __AC_OVERRIDE_OVF_UPDATE2_BODY - if(d_enable) { - if(overflow) { - std::cerr << (neg ? "-" : "+") << "OVF: "; - std::cerr << type_name() << " ( " << "double"; - std::cerr << " ( " << op2 << " ) )" << std::endl; - } - } -#else - __AC_OVERRIDE_OVF_UPDATE2_BODY -#endif - } - const ac_fixed &value() const; - static std::string type_name(); - }; -} - -////////////////////////////////////////////////////////////////////////////// -// ac_fixed -////////////////////////////////////////////////////////////////////////////// - -//enum ac_q_mode { AC_TRN, AC_RND, AC_TRN_ZERO, AC_RND_ZERO, AC_RND_INF, AC_RND_MIN_INF, AC_RND_CONV, AC_RND_CONV_ODD }; -//enum ac_o_mode { AC_WRAP, AC_SAT, AC_SAT_ZERO, AC_SAT_SYM }; - -template -class ac_fixed : private ac_private::iv<(W+31+!S)/32> -#ifndef __SYNTHESIS__ -__AC_FIXED_UTILITY_BASE -#endif -#ifdef __AC_FIXED_NUMERICAL_ANALYSIS_BASE -, public __AC_FIXED_NUMERICAL_ANALYSIS_BASE -#endif -{ -#if defined(__SYNTHESIS__) && !defined(AC_IGNORE_BUILTINS) -#pragma builtin -#endif - - enum {N=(W+31+!S)/32}; - - template - struct rt_priv { - enum {w_shiftl = AC_MAX(W+W2,1) }; - typedef ac_fixed shiftl; - }; - - typedef ac_private::iv Base; - - inline void bit_adjust() { - const unsigned rem = (32-W)&31; - Base::v[N-1] = S ? ((signed) ((unsigned)Base::v[N-1] << rem) >> rem) : (rem ? - ((unsigned) Base::v[N-1] << rem) >> rem : 0); - } - inline Base &base() { return *this; } - inline const Base &base() const { return *this; } - - inline void overflow_adjust(bool overflow, bool neg) { - if(O==AC_WRAP) { - bit_adjust(); - return; - } - else if(O==AC_SAT_ZERO) { - if(overflow) - ac_private::iv_extend(Base::v, 0); - else - bit_adjust(); - } - else if(S) { - if(overflow) { - if(!neg) { - ac_private::iv_extend(Base::v, ~0); - Base::v[N-1] = ~((unsigned)~0 << ((W-1)&31)); - } else { - ac_private::iv_extend(Base::v, 0); - Base::v[N-1] = ((unsigned)~0 << ((W-1)&31)); - if(O==AC_SAT_SYM) - Base::v[0] |= 1; - } - } else - bit_adjust(); - } - else { - if(overflow) { - if(!neg) { - ac_private::iv_extend(Base::v, ~0); - Base::v[N-1] = ~((unsigned)~0 << (W&31)); - } else - ac_private::iv_extend(Base::v, 0); - } else - bit_adjust(); - } - } - - inline bool quantization_adjust(bool qb, bool r, bool s) { - if(Q==AC_TRN) - return false; - if(Q==AC_RND_ZERO) - qb &= s || r; - else if(Q==AC_RND_MIN_INF) - qb &= r; - else if(Q==AC_RND_INF) - qb &= !s || r; - else if(Q==AC_RND_CONV) - qb &= (Base::v[0] & 1) || r; - else if(Q==AC_RND_CONV_ODD) - qb &= (!(Base::v[0] & 1)) || r; - else if(Q==AC_TRN_ZERO) - qb = s && ( qb || r ); - return ac_private::iv_uadd_carry(Base::v, qb, Base::v); - } - - inline bool is_neg() const { return S && Base::v[N-1] < 0; } - -public: - static const int width = W; - static const int i_width = I; - static const bool sign = S; - static const ac_o_mode o_mode = O; - static const ac_q_mode q_mode = Q; - static const int e_width = 0; -#ifdef __AC_FIXED_NUMERICAL_ANALYSIS_BASE - static const bool compute_overflow_for_wrap = true; -#else - static const bool compute_overflow_for_wrap = false; -#endif - - template - struct rt { - enum { - F=W-I, - F2=W2-I2, - mult_w = W+W2, - mult_i = I+I2, - mult_s = S||S2, - plus_w = AC_MAX(I+(S2&&!S),I2+(S&&!S2))+1+AC_MAX(F,F2), - plus_i = AC_MAX(I+(S2&&!S),I2+(S&&!S2))+1, - plus_s = S||S2, - minus_w = AC_MAX(I+(S2&&!S),I2+(S&&!S2))+1+AC_MAX(F,F2), - minus_i = AC_MAX(I+(S2&&!S),I2+(S&&!S2))+1, - minus_s = true, - div_w = W+AC_MAX(W2-I2,0)+S2, - div_i = I+(W2-I2)+S2, - div_s = S||S2, - logic_w = AC_MAX(I+(S2&&!S),I2+(S&&!S2))+AC_MAX(F,F2), - logic_i = AC_MAX(I+(S2&&!S),I2+(S&&!S2)), - logic_s = S||S2 - }; - typedef ac_fixed mult; - typedef ac_fixed plus; - typedef ac_fixed minus; - typedef ac_fixed logic; - typedef ac_fixed div; - typedef ac_fixed arg1; - }; - - template - struct rt_T { - typedef typename ac_private::map::t map_T; - typedef typename ac_private::rt_ac_fixed_T::template op1::mult mult; - typedef typename ac_private::rt_ac_fixed_T::template op1::plus plus; - typedef typename ac_private::rt_ac_fixed_T::template op1::minus minus; - typedef typename ac_private::rt_ac_fixed_T::template op1::minus2 minus2; - typedef typename ac_private::rt_ac_fixed_T::template op1::logic logic; - typedef typename ac_private::rt_ac_fixed_T::template op1::div div; - typedef typename ac_private::rt_ac_fixed_T::template op1::div2 div2; - typedef ac_fixed arg1; - }; - - struct rt_unary { - enum { - neg_w = W+1, - neg_i = I+1, - neg_s = true, - mag_sqr_w = 2*W-S, - mag_sqr_i = 2*I-S, - mag_sqr_s = false, - mag_w = W+S, - mag_i = I+S, - mag_s = false, - leading_sign_w = ac::log2_ceil::val, - leading_sign_s = false - }; - typedef ac_int leading_sign; - typedef ac_fixed neg; - typedef ac_fixed mag_sqr; - typedef ac_fixed mag; - template - struct set { - enum { sum_w = W + ac::log2_ceil::val, sum_i = (sum_w-W) + I, sum_s = S}; - typedef ac_fixed sum; - }; - }; - - ac_fixed(const ac_fixed &op): Base(op) { } - - template friend class ac_fixed; - ac_fixed() { -#if !defined(__SYNTHESIS__) && defined(AC_DEFAULT_IN_RANGE) - bit_adjust(); - if( O==AC_SAT_SYM && S && Base::v[N-1] < 0 && (W > 1 ? ac_private::iv_equal_zeros_to(Base::v) : true) ) - Base::v[0] |= 1; -#endif - } - template - inline ac_fixed (const ac_fixed &op) { - enum {N2=(W2+31+!S2)/32, F=W-I, F2=W2-I2, QUAN_INC = F2>F && !(Q==AC_TRN || (Q==AC_TRN_ZERO && !S2)) }; - bool carry = false; - // handle quantization - if(F2 == F) - Base::operator =(op); - else if(F2 > F) { - op.template const_shift_r(*this); -// ac_private::iv_const_shift_r(op.v, Base::v); - if(Q!=AC_TRN && !(Q==AC_TRN_ZERO && !S2)) { - bool qb = (F2-F > W2) ? (op.v[N2-1] < 0) : (bool) op[F2-F-1]; - bool r = (F2 > F+1) ? !ac_private::iv_equal_zeros_to(op.v) : false; - carry = quantization_adjust(qb, r, S2 && op.v[N2-1] < 0); - } - } - else // no quantization - op.template const_shift_l(*this); -// ac_private::iv_const_shift_l(op.v, Base::v); - // handle overflow - if((O!=AC_WRAP || compute_overflow_for_wrap) - && ((!S && S2) || I-S < I2-S2+(QUAN_INC || (S2 && O==AC_SAT_SYM && (O2 != AC_SAT_SYM || F2 > F) ))) - ) { // saturation - bool deleted_bits_zero = !(W&31)&S || !(Base::v[N-1] >> (W&31)); - bool deleted_bits_one = !(W&31)&S || !~(Base::v[N-1] >> (W&31)); - bool neg_src; - if(F2-F+32*N < W2) { - bool all_ones = ac_private::iv_equal_ones_from(op.v); - deleted_bits_zero = deleted_bits_zero && (carry ? all_ones : ac_private::iv_equal_zeros_from(op.v)); - deleted_bits_one = deleted_bits_one && (carry ? ac_private::iv_equal_ones_from<1+F2-F+32*N,N2>(op.v) && !op[F2-F+32*N] : all_ones); - neg_src = S2 && op.v[N2-1] < 0 && !(carry & all_ones); - } - else - neg_src = S2 && op.v[N2-1] < 0 && Base::v[N-1] < 0; - bool neg_trg = S && (bool) this->operator[](W-1); - bool overflow = !neg_src && (neg_trg || !deleted_bits_zero); - overflow |= neg_src && (!neg_trg || !deleted_bits_one); - if(O==AC_SAT_SYM && S && S2) - overflow |= neg_src && (W > 1 ? ac_private::iv_equal_zeros_to(Base::v) : true); - overflow_adjust(overflow, neg_src); -#ifdef __AC_FIXED_NUMERICAL_ANALYSIS_BASE - __AC_FIXED_NUMERICAL_ANALYSIS_BASE::update(overflow,neg_src,op); -#endif - } - else - bit_adjust(); - } - - template - inline ac_fixed (const ac_int &op) { - ac_fixed f_op; - f_op.base().operator =(op); - *this = f_op; - } - - template - typename rt_priv::shiftl shiftl() const { - typedef typename rt_priv::shiftl shiftl_t; - shiftl_t r; - Base::template const_shift_l(r); - return r; - } - - inline ac_fixed( bool b ) { *this = (ac_int<1,false>) b; } - inline ac_fixed( char b ) { *this = (ac_int<8,true>) b; } - inline ac_fixed( signed char b ) { *this = (ac_int<8,true>) b; } - inline ac_fixed( unsigned char b ) { *this = (ac_int<8,false>) b; } - inline ac_fixed( signed short b ) { *this = (ac_int<16,true>) b; } - inline ac_fixed( unsigned short b ) { *this = (ac_int<16,false>) b; } - inline ac_fixed( signed int b ) { *this = (ac_int<32,true>) b; } - inline ac_fixed( unsigned int b ) { *this = (ac_int<32,false>) b; } - inline ac_fixed( signed long b ) { *this = (ac_int) b; } - inline ac_fixed( unsigned long b ) { *this = (ac_int) b; } - inline ac_fixed( Slong b ) { *this = (ac_int<64,true>) b; } - inline ac_fixed( Ulong b ) { *this = (ac_int<64,false>) b; } - - inline ac_fixed( double d ) { - double di = ac_private::ldexpr<-(I+!S+((32-W-!S)&31))>(d); - bool o, qb, r; - bool neg_src = d < 0; - Base::conv_from_fraction(di, &qb, &r, &o); - quantization_adjust(qb, r, neg_src); - // a neg number may become non neg (0) after quantization - neg_src &= o || Base::v[N-1] < 0; - - if(O!=AC_WRAP || compute_overflow_for_wrap) { // saturation - bool overflow; - bool neg_trg = S && (bool) this->operator[](W-1); - if(o) { - overflow = true; - } else { - bool deleted_bits_zero = !(W&31)&S || !(Base::v[N-1] >> (W&31)); - bool deleted_bits_one = !(W&31)&S || !~(Base::v[N-1] >> (W&31)); - overflow = !neg_src && (neg_trg || !deleted_bits_zero); - overflow |= neg_src && (!neg_trg || !deleted_bits_one); - } - if(O==AC_SAT_SYM && S) - overflow |= neg_src && (W > 1 ? ac_private::iv_equal_zeros_to(Base::v) : true); - overflow_adjust(overflow, neg_src); -#ifdef __AC_FIXED_NUMERICAL_ANALYSIS_BASE - __AC_FIXED_NUMERICAL_ANALYSIS_BASE::update(overflow,neg_src,d); -#endif - } else - bit_adjust(); - } - -#if (defined(_MSC_VER) && !defined(__EDG__)) -#pragma warning( push ) -#pragma warning( disable: 4700 ) -#endif -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wuninitialized" -#endif -#if defined(__clang__) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wuninitialized" -#endif - template - inline ac_fixed &set_val() { - if(V == AC_VAL_DC) { - ac_fixed r; - Base::operator =(r); - bit_adjust(); - } - else if(V == AC_VAL_0 || V == AC_VAL_MIN || V == AC_VAL_QUANTUM) { - Base::operator =(0); - if(S && V == AC_VAL_MIN) { - const unsigned rem = (W-1)&31; - Base::v[N-1] = ((unsigned)~0 << rem); - if(O == AC_SAT_SYM) { - if(W == 1) - Base::v[0] = 0; - else - Base::v[0] |= 1; - } - } else if(V == AC_VAL_QUANTUM) - Base::v[0] = 1; - } - else { // AC_VAL_MAX - Base::operator =(-1); - const unsigned int rem = (32-W - (unsigned) !S )&31; - Base::v[N-1] = ((unsigned) (-1) >> 1) >> rem; - } - return *this; - } -#if (defined(_MSC_VER) && !defined(__EDG__)) -#pragma warning( pop ) -#endif -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic pop -#endif -#if defined(__clang__) -#pragma clang diagnostic pop -#endif - - // Explicit conversion functions to ac_int that captures all integer bits (bits are truncated) - inline ac_int to_ac_int() const { return ((ac_fixed) *this).template slc(0); } - - // Explicit conversion functions to C built-in types ------------- - inline int to_int() const { return ((I-W) >= 32) ? 0 : (signed int) to_ac_int(); } - inline unsigned to_uint() const { return ((I-W) >= 32) ? 0 : (unsigned int) to_ac_int(); } - inline long to_long() const { return ((I-W) >= ac_private::long_w) ? 0 : (signed long) to_ac_int(); } - inline unsigned long to_ulong() const { return ((I-W) >= ac_private::long_w) ? 0 : (unsigned long) to_ac_int(); } - inline Slong to_int64() const { return ((I-W) >= 64) ? 0 : (Slong) to_ac_int(); } - inline Ulong to_uint64() const { return ((I-W) >= 64) ? 0 : (Ulong) to_ac_int(); } - inline double to_double() const { return ac_private::ldexpr(Base::to_double()); } - - inline int length() const { return W; } - - inline std::string to_string(ac_base_mode base_rep, bool sign_mag = false) const { - // base_rep == AC_DEC => sign_mag == don't care (always print decimal in sign magnitude) - char r[(W-AC_MIN(AC_MIN(W-I,I),0)+31)/32*32+5] = {0}; - int i = 0; - if(sign_mag) - r[i++] = is_neg() ? '-' : '+'; - else if (base_rep == AC_DEC && is_neg()) - r[i++] = '-'; - if(base_rep != AC_DEC) { - r[i++] = '0'; - r[i++] = base_rep == AC_BIN ? 'b' : (base_rep == AC_OCT ? 'o' : 'x'); - } - ac_fixed t; - if( (base_rep == AC_DEC || sign_mag) && is_neg() ) - t = operator -(); - else - t = *this; - ac_fixed i_part = t; - ac_fixed f_part = t; - i += ac_private::to_string(i_part.v, AC_MAX(I+1,1), sign_mag, base_rep, false, r+i); - if(W-I > 0) { - r[i++] = '.'; - if(!ac_private::to_string(f_part.v, W-I, false, base_rep, true, r+i)) - r[--i] = 0; - } - if(!i) { - r[0] = '0'; - r[1] = 0; - } - return std::string(r); - } - inline static std::string type_name() { - const char *tf[] = {"false", "true" }; - const char *q[] = {"AC_TRN", "AC_RND", "AC_TRN_ZERO", "AC_RND_ZERO", "AC_RND_INF", "AC_RND_MIN_INF", "AC_RND_CONV", "AC_RND_CONV_ODD" }; - const char *o[] = {"AC_WRAP", "AC_SAT", "AC_SAT_ZERO", "AC_SAT_SYM" }; - std::string r = "ac_fixed<"; - r += ac_int<32,true>(W).to_string(AC_DEC) + ','; - r += ac_int<32,true>(I).to_string(AC_DEC) + ','; - r += tf[S]; - r += ','; - r += q[Q]; - r += ','; - r += o[O]; - r += '>'; - return r; - } - - // Arithmetic : Binary ---------------------------------------------------- - template - typename rt::mult operator *( const ac_fixed &op2) const { - typename rt::mult r; - Base::mult(op2, r); - return r; - } - template - typename rt::plus operator +( const ac_fixed &op2) const { - enum { F=W-I, F2=W2-I2 }; - typename rt::plus r; - if(F == F2) - Base::add(op2, r); - else if(F > F2) - Base::add(op2.template shiftl(), r); - else - shiftl().add(op2, r); - return r; - } - template - typename rt::minus operator -( const ac_fixed &op2) const { - enum { F=W-I, F2=W2-I2 }; - typename rt::minus r; - if(F == F2) - Base::sub(op2, r); - else if(F > F2) - Base::sub(op2.template shiftl(), r); - else - shiftl().sub(op2, r); - return r; - } -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wenum-compare" -#endif - template - typename rt::div operator /( const ac_fixed &op2) const { - typename rt::div r; - enum { Num_w = W+AC_MAX(W2-I2,0), Num_i = I, Num_w_minus = Num_w+S, Num_i_minus = Num_i+S, - N1 = ac_fixed::N, N1minus = ac_fixed::N, - N2 = ac_fixed::N, N2minus = ac_fixed::N, - num_s = S + (N1minus > N1), den_s = S2 + (N2minus > N2), Nr = rt::div::N }; - ac_fixed t = *this; - t.template div(op2, r); - return r; - } -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic pop -#endif - // Arithmetic assign ------------------------------------------------------ - template - ac_fixed &operator *=( const ac_fixed &op2) { - *this = this->operator *(op2); - return *this; - } - template - ac_fixed &operator +=( const ac_fixed &op2) { - *this = this->operator +(op2); - return *this; - } - template - ac_fixed &operator -=( const ac_fixed &op2) { - *this = this->operator -(op2); - return *this; - } - template - ac_fixed &operator /=( const ac_fixed &op2) { - *this = this->operator /(op2); - return *this; - } - // increment/decrement by quantum (smallest difference that can be represented) - // Arithmetic prefix increment, decrement --------------------------------- - ac_fixed &operator ++() { - ac_fixed<1,I-W+1,false> q; - q.template set_val(); - operator += (q); - return *this; - } - ac_fixed &operator --() { - ac_fixed<1,I-W+1,false> q; - q.template set_val(); - operator -= (q); - return *this; - } - // Arithmetic postfix increment, decrement --------------------------------- - const ac_fixed operator ++(int) { - ac_fixed t = *this; - ac_fixed<1,I-W+1,false> q; - q.template set_val(); - operator += (q); - return t; - } - const ac_fixed operator --(int) { - ac_fixed t = *this; - ac_fixed<1,I-W+1,false> q; - q.template set_val(); - operator -= (q); - return t; - } - // Arithmetic Unary -------------------------------------------------------- - ac_fixed operator +() { - return *this; - } - typename rt_unary::neg operator -() const { - typename rt_unary::neg r; - Base::neg(r); - r.bit_adjust(); - return r; - } - // ! ------------------------------------------------------------------------ - bool operator ! () const { - return Base::equal_zero(); - } - - // Bitwise (arithmetic) unary: complement ----------------------------- - ac_fixed operator ~() const { - ac_fixed r; - Base::bitwise_complement(r); - return r; - } - // Bitwise (not arithmetic) bit complement ----------------------------- - ac_fixed bit_complement() const { - ac_fixed r; - Base::bitwise_complement(r); - r.bit_adjust(); - return r; - } - // Bitwise (not arithmetic): and, or, xor ---------------------------------- - template - typename rt::logic operator &( const ac_fixed &op2) const { - enum { F=W-I, F2=W2-I2 }; - typename rt::logic r; - if(F == F2) - Base::bitwise_and(op2, r); - else if(F > F2) - Base::bitwise_and(op2.template shiftl(), r); - else - shiftl().bitwise_and(op2, r); - return r; - } - template - typename rt::logic operator |( const ac_fixed &op2) const { - enum { F=W-I, F2=W2-I2 }; - typename rt::logic r; - if(F == F2) - Base::bitwise_or(op2, r); - else if(F > F2) - Base::bitwise_or(op2.template shiftl(), r); - else - shiftl().bitwise_or(op2, r); - return r; - } - template - typename rt::logic operator ^( const ac_fixed &op2) const { - enum { F=W-I, F2=W2-I2 }; - typename rt::logic r; - if(F == F2) - Base::bitwise_xor(op2, r); - else if(F > F2) - Base::bitwise_xor(op2.template shiftl(), r); - else - shiftl().bitwise_xor(op2, r); - return r; - } - // Bitwise assign (not arithmetic): and, or, xor ---------------------------- - template - ac_fixed &operator &= ( const ac_fixed &op2 ) { - *this = this->operator &(op2); - return *this; - } - template - ac_fixed &operator |= ( const ac_fixed &op2 ) { - *this = this->operator |(op2); - return *this; - } - template - ac_fixed &operator ^= ( const ac_fixed &op2 ) { - *this = this->operator ^(op2); - return *this; - } - // Shift (result constrained by left operand) ------------------------------- - template - ac_fixed operator << ( const ac_int &op2 ) const { - // currently not written to overflow or quantize (neg shift) - ac_fixed r; - Base::shift_l2(op2.to_int(), r); - r.bit_adjust(); - return r; - } - template - ac_fixed operator << ( const ac_int &op2 ) const { - // currently not written to overflow - ac_fixed r; - Base::shift_l(op2.to_uint(), r); - r.bit_adjust(); - return r; - } - template - ac_fixed operator >> ( const ac_int &op2 ) const { - // currently not written to quantize or overflow (neg shift) - ac_fixed r; - Base::shift_r2(op2.to_int(), r); - r.bit_adjust(); - return r; - } - template - ac_fixed operator >> ( const ac_int &op2 ) const { - // currently not written to quantize - ac_fixed r; - Base::shift_r(op2.to_uint(), r); - r.bit_adjust(); - return r; - } - // Shift assign ------------------------------------------------------------ - template - ac_fixed operator <<= ( const ac_int &op2 ) { - // currently not written to overflow or quantize (neg shift) - Base r; - Base::shift_l2(op2.to_int(), r); - Base::operator=(r); - bit_adjust(); - return *this; - } - template - ac_fixed operator <<= ( const ac_int &op2 ) { - // currently not written to overflow - Base r; - Base::shift_l(op2.to_uint(), r); - Base::operator=(r); - bit_adjust(); - return *this; - } - template - ac_fixed operator >>= ( const ac_int &op2 ) { - // currently not written to quantize or overflow (neg shift) - Base r; - Base::shift_r2(op2.to_int(), r); - Base::operator=(r); - bit_adjust(); - return *this; - } - template - ac_fixed operator >>= ( const ac_int &op2 ) { - // currently not written to quantize - Base r; - Base::shift_r(op2.to_uint(), r); - Base::operator=(r); - bit_adjust(); - return *this; - } - // Relational --------------------------------------------------------------- - template - bool operator == ( const ac_fixed &op2) const { - enum { F=W-I, F2=W2-I2 }; - if(F == F2) - return Base::equal(op2); - else if(F > F2) - return Base::equal(op2.template shiftl()); - else - return shiftl().equal(op2); - } - template - bool operator != ( const ac_fixed &op2) const { - enum { F=W-I, F2=W2-I2 }; - if(F == F2) - return ! Base::equal(op2); - else if(F > F2) - return ! Base::equal(op2.template shiftl()); - else - return ! shiftl().equal(op2); - } - template - bool operator < ( const ac_fixed &op2) const { - enum { F=W-I, F2=W2-I2 }; - if(F == F2) - return Base::less_than(op2); - else if(F > F2) - return Base::less_than(op2.template shiftl()); - else - return shiftl().less_than(op2); - } - template - bool operator >= ( const ac_fixed &op2) const { - enum { F=W-I, F2=W2-I2 }; - if(F == F2) - return ! Base::less_than(op2); - else if(F > F2) - return ! Base::less_than(op2.template shiftl()); - else - return ! shiftl().less_than(op2); - } - template - bool operator > ( const ac_fixed &op2) const { - enum { F=W-I, F2=W2-I2 }; - if(F == F2) - return Base::greater_than(op2); - else if(F > F2) - return Base::greater_than(op2.template shiftl()); - else - return shiftl().greater_than(op2); - } - template - bool operator <= ( const ac_fixed &op2) const { - enum { F=W-I, F2=W2-I2 }; - if(F == F2) - return ! Base::greater_than(op2); - else if(F > F2) - return ! Base::greater_than(op2.template shiftl()); - else - return ! shiftl().greater_than(op2); - } - bool operator == ( double d) const { - if(is_neg() != (d < 0.0)) - return false; - double di = ac_private::ldexpr<-(I+!S+((32-W-!S)&31))>(d); - bool overflow, qb, r; - ac_fixed t; - t.conv_from_fraction(di, &qb, &r, &overflow); - if(qb || r || overflow) - return false; - return operator == (t); - } - bool operator != ( double d) const { - return !operator == ( d ); - } - bool operator < ( double d) const { - if(is_neg() != (d < 0.0)) - return is_neg(); - double di = ac_private::ldexpr<-(I+!S+((32-W-!S)&31))>(d); - bool overflow, qb, r; - ac_fixed t; - t.conv_from_fraction(di, &qb, &r, &overflow); - if(is_neg() && overflow) - return false; - return (!is_neg() && overflow) || ((qb || r) && operator <= (t)) || operator < (t); - } - bool operator >= ( double d) const { - return !operator < ( d ); - } - bool operator > ( double d) const { - if(is_neg() != (d < 0.0)) - return !is_neg(); - double di = ac_private::ldexpr<-(I+!S+((32-W-!S)&31))>(d); - bool overflow, qb, r; - ac_fixed t; - t.conv_from_fraction(di, &qb, &r, &overflow); - if(!is_neg() && overflow ) - return false; - return (is_neg() && overflow) || operator > (t); - } - bool operator <= ( double d) const { - return !operator > ( d ); - } - - // Bit and Slice Select ----------------------------------------------------- - template - inline const ac_int slc(const ac_int &index) const { - ac_int r; - AC_ASSERT(index.to_int() >= 0, "Attempting to read slc with negative indeces"); - unsigned uindex = ac_int(index).to_uint(); - Base::shift_r(uindex, r); - r.bit_adjust(); - return r; - } - - template - inline const ac_int slc(signed index) const { - ac_int r; - AC_ASSERT(index >= 0, "Attempting to read slc with negative indeces"); - unsigned uindex = index & ((unsigned)~0 >> 1); - Base::shift_r(uindex, r); - r.bit_adjust(); - return r; - } - template - inline const ac_int slc(unsigned uindex) const { - ac_int r; - Base::shift_r(uindex, r); - r.bit_adjust(); - return r; - } - - template - inline ac_fixed &set_slc(const ac_int lsb, const ac_int &slc) { - AC_ASSERT(lsb.to_int() + W2 <= W && lsb.to_int() >= 0, "Out of bounds set_slc"); - if(W == W2) - Base::operator =(slc); - else { - unsigned ulsb = ac_int(lsb).to_uint(); - Base::set_slc(ulsb, W2, (ac_int) slc); - } - bit_adjust(); // in case sign bit was assigned - return *this; - } - template - inline ac_fixed &set_slc(signed lsb, const ac_int &slc) { - AC_ASSERT(lsb + W2 <= W && lsb >= 0, "Out of bounds set_slc"); - if(W == W2) - Base::operator =(slc); - else { - unsigned ulsb = lsb & ((unsigned)~0 >> 1); - Base::set_slc(ulsb, W2, (ac_int) slc); - } - bit_adjust(); // in case sign bit was assigned - return *this; - } - template - inline ac_fixed &set_slc(unsigned ulsb, const ac_int &slc) { - AC_ASSERT(ulsb + W2 <= W, "Out of bounds set_slc"); - if(W == W2) - Base::operator =(slc); - else - Base::set_slc(ulsb, W2, (ac_int) slc); - bit_adjust(); // in case sign bit was assigned - return *this; - } - - template - inline ac::sliceref range() { - #if __cplusplus > 199711L - static_assert(Msb-Lsb+1 > 0, "Range length not positive: MSB < LSB"); - static_assert(Lsb >= 0, "LSB is negative"); - static_assert(Msb < W, "MSB >= W"); - #endif - return ac::sliceref(Base::v); - } - - class ac_bitref { -# if defined(__SYNTHESIS__) && !defined(AC_IGNORE_BUILTINS) -# pragma builtin -# endif - ac_fixed &d_bv; - unsigned d_index; - public: - ac_bitref( ac_fixed *bv, unsigned index=0 ) : d_bv(*bv), d_index(index) {} - operator bool () const { return (d_index < W) ? (d_bv.v[d_index>>5]>>(d_index&31) & 1) : 0; } - - inline ac_bitref operator = ( int val ) { - // lsb of int (val&1) is written to bit - if(d_index < W) { - int *pval = &d_bv.v[d_index>>5]; - *pval ^= (*pval ^ ((unsigned) val << (d_index&31) )) & 1 << (d_index&31); - d_bv.bit_adjust(); // in case sign bit was assigned - } - return *this; - } - template - inline ac_bitref operator = ( const ac_int &val ) { - return operator =(val.to_int()); - } - inline ac_bitref operator = ( const ac_bitref &val ) { - return operator =((int) (bool) val); - } - }; - - ac_bitref operator [] ( unsigned int uindex) { - AC_ASSERT(uindex < W, "Attempting to read bit beyond MSB"); - ac_bitref bvh( this, uindex ); - return bvh; - } - ac_bitref operator [] ( int index) { - AC_ASSERT(index >= 0, "Attempting to read bit with negative index"); - unsigned uindex = index & ((unsigned)~0 >> 1); - AC_ASSERT(uindex < W, "Attempting to read bit beyond MSB"); - ac_bitref bvh( this, uindex ); - return bvh; - } - template - ac_bitref operator [] ( const ac_int &index) { - AC_ASSERT(index.to_int() >= 0, "Attempting to read bit with negative index"); - unsigned uindex = ac_int(index).to_uint(); - AC_ASSERT(uindex < W, "Attempting to read bit beyond MSB"); - ac_bitref bvh( this, uindex ); - return bvh; - } - - bool operator [] ( unsigned int uindex) const { - AC_ASSERT(uindex < W, "Attempting to read bit beyond MSB"); - return (uindex < W) ? (Base::v[uindex>>5]>>(uindex&31) & 1) : 0; - } - bool operator [] ( int index) const { - AC_ASSERT(index >= 0, "Attempting to read bit with negative index"); - unsigned uindex = index & ((unsigned)~0 >> 1); - AC_ASSERT(uindex < W, "Attempting to read bit beyond MSB"); - return (uindex < W) ? (Base::v[uindex>>5]>>(uindex&31) & 1) : 0; - } - template - bool operator [] ( const ac_int &index) const { - AC_ASSERT(index.to_int() >= 0, "Attempting to read bit with negative index"); - unsigned uindex = ac_int(index).to_uint(); - AC_ASSERT(uindex < W, "Attempting to read bit beyond MSB"); - return (uindex < W) ? (Base::v[uindex>>5]>>(uindex&31) & 1) : 0; - } - typename rt_unary::leading_sign leading_sign() const { - unsigned ls = Base::leading_bits(S & (Base::v[N-1] < 0)) - (32*N - W)-S; - return ls; - } - typename rt_unary::leading_sign leading_sign(bool &all_sign) const { - unsigned ls = Base::leading_bits(S & (Base::v[N-1] < 0)) - (32*N - W)-S; - all_sign = (ls == W-S); - return ls; - } - // returns false if number is denormal - template - bool normalize(ac_int &exp) { - ac_int m = this->template slc(0); - bool r = m.normalize(exp); - this->set_slc(0,m); - return r; - } - // returns false if number is denormal, minimum exponent is reserved (usually for encoding special values/errors) - template - bool normalize_RME(ac_int &exp) { - ac_int m = this->template slc(0); - bool r = m.normalize_RME(exp); - this->set_slc(0,m); - return r; - } - inline void bit_fill_hex(const char *str) { - // Zero Pads if str is too short, throws ms bits away if str is too long - // Asserts if anything other than 0-9a-fA-F is encountered - ac_int x; - x.bit_fill_hex(str); - set_slc(0, x); - } - template - inline void bit_fill(const int (&ivec)[N], bool bigendian=true) { - // bit_fill from integer vector - // if W > N*32, missing most significant bits are zeroed - // if W < N*32, additional bits in ivec are ignored (no overflow checking) - // - // Example: - // ac_fixed<80,40,false> x; int vec[] = { 0xffffa987, 0x6543210f, 0xedcba987 }; - // x.bit_fill(vec); // vec[0] fill bits 79-64 - ac_int x; - x.bit_fill(ivec, bigendian); - set_slc(0, x); - } -}; - -namespace ac { - template - struct ac_fixed_represent { - enum { t_w = ac_private::c_type_params::W, t_i = t_w, t_s = ac_private::c_type_params::S }; - typedef ac_fixed type; - }; - template<> struct ac_fixed_represent {}; - template<> struct ac_fixed_represent {}; - template - struct ac_fixed_represent< ac_int > { - typedef ac_fixed type; - }; - template - struct ac_fixed_represent< ac_fixed > { - typedef ac_fixed type; - }; -} - -namespace ac_private { - // with T == ac_fixed - template - struct rt_ac_fixed_T< ac_fixed > { - typedef ac_fixed fx2_t; - template - struct op1 { - typedef ac_fixed fx_t; - typedef typename fx_t::template rt::mult mult; - typedef typename fx_t::template rt::plus plus; - typedef typename fx_t::template rt::minus minus; - typedef typename fx2_t::template rt::minus minus2; - typedef typename fx_t::template rt::logic logic; - typedef typename fx_t::template rt::div div; - typedef typename fx2_t::template rt::div div2; - }; - }; - // with T == ac_int - template - struct rt_ac_fixed_T< ac_int > { - typedef ac_fixed fx2_t; - template - struct op1 { - typedef ac_fixed fx_t; - typedef typename fx_t::template rt::mult mult; - typedef typename fx_t::template rt::plus plus; - typedef typename fx_t::template rt::minus minus; - typedef typename fx2_t::template rt::minus minus2; - typedef typename fx_t::template rt::logic logic; - typedef typename fx_t::template rt::div div; - typedef typename fx2_t::template rt::div div2; - }; - }; - - template - struct rt_ac_fixed_T< c_type > { - typedef typename ac::ac_fixed_represent::type fx2_t; - enum { W2 = fx2_t::width, I2 = W2, S2 = fx2_t::sign }; - template - struct op1 { - typedef ac_fixed fx_t; - typedef typename fx_t::template rt::mult mult; - typedef typename fx_t::template rt::plus plus; - typedef typename fx_t::template rt::minus minus; - typedef typename fx2_t::template rt::minus minus2; - typedef typename fx_t::template rt::logic logic; - typedef typename fx_t::template rt::div div; - typedef typename fx2_t::template rt::div div2; - }; - }; -} - - -// Specializations for constructors on integers that bypass bit adjusting -// and are therefore more efficient -template<> inline ac_fixed<1,1,true,AC_TRN,AC_WRAP>::ac_fixed( bool b ) { v[0] = b ? -1 : 0; } - -template<> inline ac_fixed<1,1,false,AC_TRN,AC_WRAP>::ac_fixed( bool b ) { v[0] = b; } -template<> inline ac_fixed<1,1,false,AC_TRN,AC_WRAP>::ac_fixed( signed char b ) { v[0] = b&1; } -template<> inline ac_fixed<1,1,false,AC_TRN,AC_WRAP>::ac_fixed( unsigned char b ) { v[0] = b&1; } -template<> inline ac_fixed<1,1,false,AC_TRN,AC_WRAP>::ac_fixed( signed short b ) { v[0] = b&1; } -template<> inline ac_fixed<1,1,false,AC_TRN,AC_WRAP>::ac_fixed( unsigned short b ) { v[0] = b&1; } -template<> inline ac_fixed<1,1,false,AC_TRN,AC_WRAP>::ac_fixed( signed int b ) { v[0] = b&1; } -template<> inline ac_fixed<1,1,false,AC_TRN,AC_WRAP>::ac_fixed( unsigned int b ) { v[0] = b&1; } -template<> inline ac_fixed<1,1,false,AC_TRN,AC_WRAP>::ac_fixed( signed long b ) { v[0] = b&1; } -template<> inline ac_fixed<1,1,false,AC_TRN,AC_WRAP>::ac_fixed( unsigned long b ) { v[0] = b&1; } -template<> inline ac_fixed<1,1,false,AC_TRN,AC_WRAP>::ac_fixed( Ulong b ) { v[0] = (int) b&1; } -template<> inline ac_fixed<1,1,false,AC_TRN,AC_WRAP>::ac_fixed( Slong b ) { v[0] = (int) b&1; } - -template<> inline ac_fixed<8,8,true,AC_TRN,AC_WRAP>::ac_fixed( bool b ) { v[0] = b; } -template<> inline ac_fixed<8,8,false,AC_TRN,AC_WRAP>::ac_fixed( bool b ) { v[0] = b; } -template<> inline ac_fixed<8,8,true,AC_TRN,AC_WRAP>::ac_fixed( signed char b ) { v[0] = b; } -template<> inline ac_fixed<8,8,false,AC_TRN,AC_WRAP>::ac_fixed( unsigned char b ) { v[0] = b; } -template<> inline ac_fixed<8,8,true,AC_TRN,AC_WRAP>::ac_fixed( unsigned char b ) { v[0] = (signed char) b; } -template<> inline ac_fixed<8,8,false,AC_TRN,AC_WRAP>::ac_fixed( signed char b ) { v[0] = (unsigned char) b; } - -template<> inline ac_fixed<16,16,true,AC_TRN,AC_WRAP>::ac_fixed( bool b ) { v[0] = b; } -template<> inline ac_fixed<16,16,false,AC_TRN,AC_WRAP>::ac_fixed( bool b ) { v[0] = b; } -template<> inline ac_fixed<16,16,true,AC_TRN,AC_WRAP>::ac_fixed( signed char b ) { v[0] = b; } -template<> inline ac_fixed<16,16,false,AC_TRN,AC_WRAP>::ac_fixed( unsigned char b ) { v[0] = b; } -template<> inline ac_fixed<16,16,true,AC_TRN,AC_WRAP>::ac_fixed( unsigned char b ) { v[0] = b; } -template<> inline ac_fixed<16,16,false,AC_TRN,AC_WRAP>::ac_fixed( signed char b ) { v[0] = (unsigned short) b; } -template<> inline ac_fixed<16,16,true,AC_TRN,AC_WRAP>::ac_fixed( signed short b ) { v[0] = b; } -template<> inline ac_fixed<16,16,false,AC_TRN,AC_WRAP>::ac_fixed( unsigned short b ) { v[0] = b; } -template<> inline ac_fixed<16,16,true,AC_TRN,AC_WRAP>::ac_fixed( unsigned short b ) { v[0] = (signed short) b; } -template<> inline ac_fixed<16,16,false,AC_TRN,AC_WRAP>::ac_fixed( signed short b ) { v[0] = (unsigned short) b; } - -template<> inline ac_fixed<32,32,true,AC_TRN,AC_WRAP>::ac_fixed( signed int b ) { v[0] = b; } -template<> inline ac_fixed<32,32,true,AC_TRN,AC_WRAP>::ac_fixed( unsigned int b ) { v[0] = b; } -template<> inline ac_fixed<32,32,false,AC_TRN,AC_WRAP>::ac_fixed( signed int b ) { v[0] = b; v[1] = 0;} -template<> inline ac_fixed<32,32,false,AC_TRN,AC_WRAP>::ac_fixed( unsigned int b ) { v[0] = b; v[1] = 0;} - -template<> inline ac_fixed<32,32,true,AC_TRN,AC_WRAP>::ac_fixed( Slong b ) { v[0] = (int) b; } -template<> inline ac_fixed<32,32,true,AC_TRN,AC_WRAP>::ac_fixed( Ulong b ) { v[0] = (int) b; } -template<> inline ac_fixed<32,32,false,AC_TRN,AC_WRAP>::ac_fixed( Slong b ) { v[0] = (int) b; v[1] = 0;} -template<> inline ac_fixed<32,32,false,AC_TRN,AC_WRAP>::ac_fixed( Ulong b ) { v[0] = (int) b; v[1] = 0;} - -template<> inline ac_fixed<64,64,true,AC_TRN,AC_WRAP>::ac_fixed( Slong b ) { v[0] = (int) b; v[1] = (int) (b >> 32); } -template<> inline ac_fixed<64,64,true,AC_TRN,AC_WRAP>::ac_fixed( Ulong b ) { v[0] = (int) b; v[1] = (int) (b >> 32);} -template<> inline ac_fixed<64,64,false,AC_TRN,AC_WRAP>::ac_fixed( Slong b ) { v[0] = (int) b; v[1] = (int) ((Ulong) b >> 32); v[2] = 0; } -template<> inline ac_fixed<64,64,false,AC_TRN,AC_WRAP>::ac_fixed( Ulong b ) { v[0] = (int) b; v[1] = (int) (b >> 32); v[2] = 0; } - - -// Stream -------------------------------------------------------------------- - -template -inline std::ostream& operator << (std::ostream &os, const ac_fixed &x) { -#ifndef __SYNTHESIS__ - if ((os.flags() & std::ios::hex) != 0) { - os << x.to_string(AC_HEX); - } else if ((os.flags() & std::ios::oct) != 0) { - os << x.to_string(AC_OCT); - } else { - os << x.to_string(AC_DEC); - } -#endif - return os; -} - - -// Macros for Binary Operators with C Integers -------------------------------------------- - -#define FX_BIN_OP_WITH_INT_2I(BIN_OP, C_TYPE, WI, SI) \ - template \ - inline ac_fixed operator BIN_OP ( const ac_fixed &op, C_TYPE i_op) { \ - return op.operator BIN_OP (ac_int(i_op)); \ - } - -#define FX_BIN_OP_WITH_INT(BIN_OP, C_TYPE, WI, SI, RTYPE) \ - template \ - inline typename ac_fixed::template rt::RTYPE operator BIN_OP ( C_TYPE i_op, const ac_fixed &op) { \ - return ac_fixed(i_op).operator BIN_OP (op); \ - } \ - template \ - inline typename ac_fixed::template rt::RTYPE operator BIN_OP ( const ac_fixed &op, C_TYPE i_op) { \ - return op.operator BIN_OP (ac_fixed(i_op)); \ - } - -#define FX_REL_OP_WITH_INT(REL_OP, C_TYPE, W2, S2) \ - template \ - inline bool operator REL_OP ( const ac_fixed &op, C_TYPE op2) { \ - return op.operator REL_OP (ac_fixed(op2)); \ - } \ - template \ - inline bool operator REL_OP ( C_TYPE op2, const ac_fixed &op) { \ - return ac_fixed(op2).operator REL_OP (op); \ - } - -#define FX_ASSIGN_OP_WITH_INT_2(ASSIGN_OP, C_TYPE, W2, S2) \ - template \ - inline ac_fixed &operator ASSIGN_OP ( ac_fixed &op, C_TYPE op2) { \ - return op.operator ASSIGN_OP (ac_fixed(op2)); \ - } - -#define FX_ASSIGN_OP_WITH_INT_2I(ASSIGN_OP, C_TYPE, W2, S2) \ - template \ - inline ac_fixed operator ASSIGN_OP ( ac_fixed &op, C_TYPE op2) { \ - return op.operator ASSIGN_OP (ac_int(op2)); \ - } - -#define FX_OPS_WITH_INT(C_TYPE, WI, SI) \ - FX_BIN_OP_WITH_INT(*, C_TYPE, WI, SI, mult) \ - FX_BIN_OP_WITH_INT(+, C_TYPE, WI, SI, plus) \ - FX_BIN_OP_WITH_INT(-, C_TYPE, WI, SI, minus) \ - FX_BIN_OP_WITH_INT(/, C_TYPE, WI, SI, div) \ - FX_BIN_OP_WITH_INT_2I(>>, C_TYPE, WI, SI) \ - FX_BIN_OP_WITH_INT_2I(<<, C_TYPE, WI, SI) \ - FX_BIN_OP_WITH_INT(&, C_TYPE, WI, SI, logic) \ - FX_BIN_OP_WITH_INT(|, C_TYPE, WI, SI, logic) \ - FX_BIN_OP_WITH_INT(^, C_TYPE, WI, SI, logic) \ - \ - FX_REL_OP_WITH_INT(==, C_TYPE, WI, SI) \ - FX_REL_OP_WITH_INT(!=, C_TYPE, WI, SI) \ - FX_REL_OP_WITH_INT(>, C_TYPE, WI, SI) \ - FX_REL_OP_WITH_INT(>=, C_TYPE, WI, SI) \ - FX_REL_OP_WITH_INT(<, C_TYPE, WI, SI) \ - FX_REL_OP_WITH_INT(<=, C_TYPE, WI, SI) \ - \ - FX_ASSIGN_OP_WITH_INT_2(+=, C_TYPE, WI, SI) \ - FX_ASSIGN_OP_WITH_INT_2(-=, C_TYPE, WI, SI) \ - FX_ASSIGN_OP_WITH_INT_2(*=, C_TYPE, WI, SI) \ - FX_ASSIGN_OP_WITH_INT_2(/=, C_TYPE, WI, SI) \ - FX_ASSIGN_OP_WITH_INT_2I(>>=, C_TYPE, WI, SI) \ - FX_ASSIGN_OP_WITH_INT_2I(<<=, C_TYPE, WI, SI) \ - FX_ASSIGN_OP_WITH_INT_2(&=, C_TYPE, WI, SI) \ - FX_ASSIGN_OP_WITH_INT_2(|=, C_TYPE, WI, SI) \ - FX_ASSIGN_OP_WITH_INT_2(^=, C_TYPE, WI, SI) - -// --------------------------------------- End of Macros for Binary Operators with C Integers - -#ifdef AC_FIXED_NS_FOR_MIXED_OPERATORS -namespace ac { - namespace ops_with_other_types { -#endif - // Binary Operators with C Integers -------------------------------------------- - FX_OPS_WITH_INT(bool, 1, false) - FX_OPS_WITH_INT(char, 8, true) - FX_OPS_WITH_INT(signed char, 8, true) - FX_OPS_WITH_INT(unsigned char, 8, false) - FX_OPS_WITH_INT(short, 16, true) - FX_OPS_WITH_INT(unsigned short, 16, false) - FX_OPS_WITH_INT(int, 32, true) - FX_OPS_WITH_INT(unsigned int, 32, false) - FX_OPS_WITH_INT(long, ac_private::long_w, true) - FX_OPS_WITH_INT(unsigned long, ac_private::long_w, false) - FX_OPS_WITH_INT(Slong, 64, true) - FX_OPS_WITH_INT(Ulong, 64, false) - // -------------------------------------- End of Binary Operators with Integers -#ifdef AC_FIXED_NS_FOR_MIXED_OPERATORS - } // ops_with_other_types namespace -} // ac namespace -#endif - - -// Macros for Binary Operators with ac_int -------------------------------------------- - -#define FX_BIN_OP_WITH_AC_INT_1(BIN_OP, RTYPE) \ - template \ - inline typename ac_fixed::template rt::RTYPE operator BIN_OP ( const ac_int &i_op, const ac_fixed &op) { \ - return ac_fixed(i_op).operator BIN_OP (op); \ - } - -#define FX_BIN_OP_WITH_AC_INT_2(BIN_OP, RTYPE) \ - template \ - inline typename ac_fixed::template rt::RTYPE operator BIN_OP ( const ac_fixed &op, const ac_int &i_op) { \ - return op.operator BIN_OP (ac_fixed(i_op)); \ - } - -#define FX_BIN_OP_WITH_AC_INT(BIN_OP, RTYPE) \ - FX_BIN_OP_WITH_AC_INT_1(BIN_OP, RTYPE) \ - FX_BIN_OP_WITH_AC_INT_2(BIN_OP, RTYPE) - -#define FX_REL_OP_WITH_AC_INT(REL_OP) \ - template \ - inline bool operator REL_OP ( const ac_fixed &op, const ac_int &op2) { \ - return op.operator REL_OP (ac_fixed(op2)); \ - } \ - template \ - inline bool operator REL_OP ( ac_int &op2, const ac_fixed &op) { \ - return ac_fixed(op2).operator REL_OP (op); \ - } - -#define FX_ASSIGN_OP_WITH_AC_INT(ASSIGN_OP) \ - template \ - inline ac_fixed &operator ASSIGN_OP ( ac_fixed &op, const ac_int &op2) { \ - return op.operator ASSIGN_OP (ac_fixed(op2)); \ - } \ - template \ - inline ac_int &operator ASSIGN_OP ( ac_int &op, const ac_fixed &op2) { \ - return op.operator ASSIGN_OP (op2.to_ac_int()); \ - } - -// -------------------------------------------- End of Macros for Binary Operators with ac_int - -#ifdef AC_FIXED_NS_FOR_MIXED_OPERATORS -namespace ac { - namespace ops_with_other_types { -#endif - // Binary Operators with ac_int -------------------------------------------- - FX_BIN_OP_WITH_AC_INT(*, mult) - FX_BIN_OP_WITH_AC_INT(+, plus) - FX_BIN_OP_WITH_AC_INT(-, minus) - FX_BIN_OP_WITH_AC_INT(/, div) - FX_BIN_OP_WITH_AC_INT(&, logic) - FX_BIN_OP_WITH_AC_INT(|, logic) - FX_BIN_OP_WITH_AC_INT(^, logic) - - FX_REL_OP_WITH_AC_INT(==) - FX_REL_OP_WITH_AC_INT(!=) - FX_REL_OP_WITH_AC_INT(>) - FX_REL_OP_WITH_AC_INT(>=) - FX_REL_OP_WITH_AC_INT(<) - FX_REL_OP_WITH_AC_INT(<=) - - FX_ASSIGN_OP_WITH_AC_INT(+=) - FX_ASSIGN_OP_WITH_AC_INT(-=) - FX_ASSIGN_OP_WITH_AC_INT(*=) - FX_ASSIGN_OP_WITH_AC_INT(/=) - FX_ASSIGN_OP_WITH_AC_INT(&=) - FX_ASSIGN_OP_WITH_AC_INT(|=) - FX_ASSIGN_OP_WITH_AC_INT(^=) - // -------------------------------------- End of Binary Operators with ac_int - - // Relational Operators with double -------------------------------------- - template - inline bool operator == ( double op, const ac_fixed &op2) { - return op2.operator == (op); - } - template - inline bool operator != ( double op, const ac_fixed &op2) { - return op2.operator != (op); - } - template - inline bool operator > ( double op, const ac_fixed &op2) { - return op2.operator < (op); - } - template - inline bool operator < ( double op, const ac_fixed &op2) { - return op2.operator > (op); - } - template - inline bool operator <= ( double op, const ac_fixed &op2) { - return op2.operator >= (op); - } - template - inline bool operator >= ( double op, const ac_fixed &op2) { - return op2.operator <= (op); - } - // -------------------------------------- End of Relational Operators with double -#ifdef AC_FIXED_NS_FOR_MIXED_OPERATORS - } // ops_with_other_types namespace -} // ac namespace -using namespace ac::ops_with_other_types; -#endif - - -#if (defined(_MSC_VER) && !defined(__EDG__)) -#pragma warning( disable: 4700 ) -#endif -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wuninitialized" -#endif -#if defined(__clang__) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wuninitialized" -#endif - -// Global templatized functions for easy initialization to special values -template -inline ac_fixed value(ac_fixed) { - ac_fixed r; - return r.template set_val(); -} - -namespace ac { -// PUBLIC FUNCTIONS -// function to initialize (or uninitialize) arrays - template - inline bool init_array(ac_fixed *a, int n) { - ac_fixed t; - t.template set_val(); - for(int i=0; i < n; i++) - a[i] = t; - return true; - } - - inline ac_fixed<54,2,true> frexp_d(double d, ac_int<11,true> &exp) { - enum {Min_Exp = -1022, Max_Exp = 1023, Mant_W = 52, Denorm_Min_Exp = Min_Exp - Mant_W}; - if(!d) { - exp = 0; - return 0; - } - int exp_i; - double f0 = frexp(d, &exp_i); - AC_ASSERT(exp_i <= Max_Exp+1, "Exponent greater than standard double-precision float exponent max (+1024). It is probably an extended double"); - AC_ASSERT(exp_i >= Denorm_Min_Exp+1, "Exponent less than standard double-precision float exponent min (-1021). It is probably an extended double"); - exp_i--; - int rshift = exp_i < Min_Exp ? Min_Exp - exp_i : (exp_i > Min_Exp && f0 < 0 && f0 >= -0.5) ? -1 : 0; - exp = exp_i + rshift; - ac_int f_i = f0 * ((Ulong) 1 << (Mant_W + 1 -rshift)); - ac_fixed r; - r.set_slc(0, f_i); - return r; - } - inline ac_fixed<25,2,true> frexp_f(float f, ac_int<8,true> &exp) { - enum {Min_Exp = -126, Max_Exp = 127, Mant_W = 23, Denorm_Min_Exp = Min_Exp - Mant_W}; - if(!f) { - exp = 0; - return 0; - } - int exp_i; - float f0 = frexpf(f, &exp_i); - AC_ASSERT(exp_i <= Max_Exp+1, "Exponent greater than standard single-precision float exponent max (+128). It is probably an extended float"); - AC_ASSERT(exp_i >= Denorm_Min_Exp+1, "Exponent less than standard single-precision float exponent min (-125). It is probably an extended float"); - exp_i--; - int rshift = exp_i < Min_Exp ? Min_Exp - exp_i : (exp_i >= Min_Exp && f0 < 0 && f0 >= -0.5) ? -1 : 0; - exp = exp_i + rshift; - ac_int f_i = f0 * (1 << (Mant_W + 1 - rshift)); - ac_fixed r; - r.set_slc(0, f_i); - return r; - } - - inline ac_fixed<53,1,false> frexp_sm_d(double d, ac_int<11,true> &exp, bool &sign) { - enum {Min_Exp = -1022, Max_Exp = 1023, Mant_W = 52, Denorm_Min_Exp = Min_Exp - Mant_W}; - if(!d) { - exp = 0; - sign = false; - return 0; - } - int exp_i; - bool s = d < 0; - double f0 = frexp(s ? -d : d, &exp_i); - AC_ASSERT(exp_i <= Max_Exp+1, "Exponent greater than standard double-precision float exponent max (+1024). It is probably an extended double"); - AC_ASSERT(exp_i >= Denorm_Min_Exp+1, "Exponent less than standard double-precision float exponent min (-1021). It is probably an extended double"); - exp_i--; - int rshift = exp_i < Min_Exp ? Min_Exp - exp_i : 0; - exp = exp_i + rshift; - ac_int f_i = f0 * ((Ulong) 1 << (Mant_W + 1 -rshift)); - ac_fixed r; - r.set_slc(0, f_i); - sign = s; - return r; - } - inline ac_fixed<24,1,false> frexp_sm_f(float f, ac_int<8,true> &exp, bool &sign) { - enum {Min_Exp = -126, Max_Exp = 127, Mant_W = 23, Denorm_Min_Exp = Min_Exp - Mant_W}; - if(!f) { - exp = 0; - sign = false; - return 0; - } - int exp_i; - bool s = f < 0; - float f0 = frexp(s ? -f : f, &exp_i); - AC_ASSERT(exp_i <= Max_Exp+1, "Exponent greater than standard single-precision float exponent max (+128). It is probably an extended float"); - AC_ASSERT(exp_i >= Denorm_Min_Exp+1, "Exponent less than standard single-precision float exponent min (-125). It is probably an extended float"); - exp_i--; - int rshift = exp_i < Min_Exp ? Min_Exp - exp_i : 0; - exp = exp_i + rshift; - ac_int<24,false> f_i = f0 * (1 << (Mant_W + 1 - rshift)); - ac_fixed<24,1,false> r; - r.set_slc(0, f_i); - sign = s; - return r; - } - - template - const ac_fixed &basic_num_ovf_base::value() const { - return (const ac_fixed &) *this; - } - - template std::string basic_num_ovf_base::type_name() { - return ac_fixed::type_name(); - } -} - - -/////////////////////////////////////////////////////////////////////////////// - -#if (defined(_MSC_VER) && !defined(__EDG__)) -#pragma warning( pop ) -#endif -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic pop -#endif -#if defined(__clang__) -#pragma clang diagnostic pop -#endif - -#ifdef __AC_NAMESPACE -} -#endif - -#endif // __AC_FIXED_H diff --git a/hls4ml/hls4ml/templates/quartus/ac_types/ac_float.h b/hls4ml/hls4ml/templates/quartus/ac_types/ac_float.h deleted file mode 100644 index 6174528..0000000 --- a/hls4ml/hls4ml/templates/quartus/ac_types/ac_float.h +++ /dev/null @@ -1,1196 +0,0 @@ -/************************************************************************** - * * - * Algorithmic C (tm) Datatypes * - * * - * Software Version: 4.0 * - * * - * Release Date : Sat Jun 13 12:35:18 PDT 2020 * - * Release Type : Production Release * - * Release Build : 4.0.0 * - * * - * Copyright 2013-2019, Mentor Graphics Corporation, * - * * - * All Rights Reserved. * - * * - ************************************************************************** - * Licensed under the Apache License, Version 2.0 (the "License"); * - * you may not use this file except in compliance with the License. * - * You may obtain a copy of the License at * - * * - * http://www.apache.org/licenses/LICENSE-2.0 * - * * - * Unless required by applicable law or agreed to in writing, software * - * distributed under the License is distributed on an "AS IS" BASIS, * - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * - * implied. * - * See the License for the specific language governing permissions and * - * limitations under the License. * - ************************************************************************** - * * - * The most recent version of this package is available at github. * - * * - *************************************************************************/ - -// Source: ac_float.h -// Description: class for floating point operation handling in C++ -// Author: Andres Takach, Ph.D. - -#ifndef __AC_FLOAT_H -#define __AC_FLOAT_H - -#include - -#ifndef __SYNTHESIS__ -#include -#endif - -#if (defined(__GNUC__) && __GNUC__ < 3 && !defined(__EDG__)) -#error GCC version 3 or greater is required to include this header file -#endif - -#if (defined(_MSC_VER) && _MSC_VER < 1400 && !defined(__EDG__)) -#error Microsoft Visual Studio 8 or newer is required to include this header file -#endif - -#if (defined(_MSC_VER) && !defined(__EDG__)) -#pragma warning( push ) -#pragma warning( disable: 4003 4127 4308 4365 4514 4800 ) -#endif -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wparentheses" -#endif -#if defined(__clang__) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wparentheses" -#pragma clang diagnostic ignored "-Wlogical-op-parentheses" -#pragma clang diagnostic ignored "-Wbitwise-op-parentheses" -#endif - -// for safety -#if (defined(E) || defined(WF) || defined(IF) || defined(SF)) -#error One or more of the following is defined: E, WF, IF, SF. Definition conflicts with their usage as template parameters. -#error DO NOT use defines before including third party header files. -#endif - -#define AC_FL(v) ac_float -#define AC_FL0(v) ac_float -#define AC_FL_T(v) int W##v, int I##v, int E##v, ac_q_mode Q##v -#define AC_FL_TV(v) W##v, I##v, E##v, Q##v -#define AC_FL_T0(v) int W##v, int I##v, int E##v -#define AC_FL_TV0(v) W##v, I##v, E##v - -#ifdef __AC_NAMESPACE -namespace __AC_NAMESPACE { -#endif - -template class ac_float; - -namespace ac_private { - - typedef ac_float<54,2,11> ac_float_cdouble_t; - typedef ac_float<25,2,8> ac_float_cfloat_t; - - template - struct rt_ac_float_T { - template< AC_FL_T0() > - struct op1 { - typedef AC_FL0() fl_t; - typedef typename T::template rt_T::mult mult; - typedef typename T::template rt_T::plus plus; - typedef typename T::template rt_T::minus2 minus; - typedef typename T::template rt_T::minus minus2; - typedef typename T::template rt_T::logic logic; - typedef typename T::template rt_T::div2 div; - typedef typename T::template rt_T::div div2; - }; - }; - // specializations after definition of ac_float - - inline ac_float_cdouble_t double_to_ac_float(double d); - inline ac_float_cfloat_t float_to_ac_float(float f); -} - -////////////////////////////////////////////////////////////////////////////// -// ac_float -////////////////////////////////////////////////////////////////////////////// - -template< AC_FL_T() > -class ac_float { - enum { NO_UN = true, S = true, S2 = true, SR = true }; -public: - typedef ac_fixed mant_t; - typedef ac_int exp_t; - mant_t m; - exp_t e; - - void set_mantissa(const ac_fixed &man) { m = man; } - void set_exp(const ac_int &exp) { if(E) e = exp; } - -private: - inline bool is_neg() const { return m < 0; } // is_neg would be more efficient - - enum {NZ_E = !!E, MIN_EXP = -(NZ_E << (E-NZ_E)), MAX_EXP = (1 << (E-NZ_E))-1}; - -public: - static const int width = W; - static const int i_width = I; - static const int e_width = E; - static const bool sign = S; - static const ac_q_mode q_mode = Q; - static const ac_o_mode o_mode = AC_SAT; - - template< AC_FL_T0(2) > - struct rt { - enum { - // need to validate - F=W-I, - F2=W2-I2, - mult_w = W+W2, - mult_i = I+I2, - mult_e = AC_MAX(E,E2)+1, - mult_s = S||S2, - plus_w = AC_MAX(I+(S2&&!S),I2+(S&&!S2))+1+AC_MAX(F,F2), - plus_i = AC_MAX(I+(S2&&!S),I2+(S&&!S2))+1, - plus_e = AC_MAX(E,E2), - plus_s = S||S2, - minus_w = AC_MAX(I+(S2&&!S),I2+(S&&!S2))+1+AC_MAX(F,F2), - minus_i = AC_MAX(I+(S2&&!S),I2+(S&&!S2))+1, - minus_e = AC_MAX(E,E2), - minus_s = true, - div_w = W+AC_MAX(W2-I2,0)+S2, - div_i = I+(W2-I2)+S2, - div_e = AC_MAX(E,E2)+1, - div_s = S||S2, - logic_w = AC_MAX(I+(S2&&!S),I2+(S&&!S2))+AC_MAX(F,F2), - logic_i = AC_MAX(I+(S2&&!S),I2+(S&&!S2)), - logic_s = S||S2, - logic_e = AC_MAX(E,E2) - }; - typedef ac_float mult; - typedef ac_float plus; - typedef ac_float minus; - typedef ac_float logic; - typedef ac_float div; - typedef ac_float arg1; - - }; - - template - struct rt_i { - enum { - lshift_w = W, - lshift_i = I, - lshift_s = S, - lshift_e_0 = exp_t::template rt::plus::width, - lshift_e = AC_MIN(lshift_e_0, 24), - rshift_w = W, - rshift_i = I, - rshift_s = S, - rshift_e_0 = exp_t::template rt::minus::width, - rshift_e = AC_MIN(rshift_e_0, 24) - }; - typedef ac_float lshift; - typedef ac_float rshift; - }; - - template - struct rt_T { - typedef typename ac_private::map::t map_T; - typedef typename ac_private::rt_ac_float_T::template op1< AC_FL_TV0() >::mult mult; - typedef typename ac_private::rt_ac_float_T::template op1< AC_FL_TV0() >::plus plus; - typedef typename ac_private::rt_ac_float_T::template op1< AC_FL_TV0() >::minus minus; - typedef typename ac_private::rt_ac_float_T::template op1< AC_FL_TV0() >::minus2 minus2; - typedef typename ac_private::rt_ac_float_T::template op1< AC_FL_TV0() >::logic logic; - typedef typename ac_private::rt_ac_float_T::template op1< AC_FL_TV0() >::div div; - typedef typename ac_private::rt_ac_float_T::template op1< AC_FL_TV0() >::div2 div2; - typedef ac_float arg1; - }; - - template - struct rt_T2 { - typedef typename ac_private::map::t map_T; - typedef typename ac_private::rt_ac_float_T::template op1< AC_FL_TV0() >::mult mult; - typedef typename ac_private::rt_ac_float_T::template op1< AC_FL_TV0() >::plus plus; - typedef typename ac_private::rt_ac_float_T::template op1< AC_FL_TV0() >::minus2 minus; - typedef typename ac_private::rt_ac_float_T::template op1< AC_FL_TV0() >::minus minus2; - typedef typename ac_private::rt_ac_float_T::template op1< AC_FL_TV0() >::logic logic; - typedef typename ac_private::rt_ac_float_T::template op1< AC_FL_TV0() >::div2 div; - typedef typename ac_private::rt_ac_float_T::template op1< AC_FL_TV0() >::div div2; - typedef ac_float arg1; - }; - - struct rt_unary { - enum { - neg_w = W+1, - neg_i = I+1, - neg_e = E, - neg_s = true, - mag_sqr_w = 2*W-S + NO_UN, - mag_sqr_i = 2*I-S + NO_UN, - mag_sqr_e = E, - mag_sqr_s = false | NO_UN, - mag_w = W+S + NO_UN, - mag_i = I+S + NO_UN, - mag_e = E, - mag_s = false | NO_UN, - to_fx_i = I + MAX_EXP, - to_fx_w = W + MAX_EXP - MIN_EXP, - to_fx_s = S, - to_i_w = AC_MAX(to_fx_i,1), - to_i_s = S - }; - typedef ac_float neg; - typedef ac_float mag_sqr; - typedef ac_float mag; - template - struct set { - enum { sum_w = W + ac::log2_ceil::val, sum_i = (sum_w-W) + I, sum_e = E, sum_s = S}; - typedef ac_float sum; - }; - typedef ac_fixed to_ac_fixed_t; - typedef ac_int to_ac_int_t; - }; - - template friend class ac_float; - - ac_float() { -#if defined(AC_DEFAULT_IN_RANGE) -#endif - } - ac_float(const ac_float &op) { - m = op.m; - e = op.e; - } - -private: - template - bool round(const ac_fixed &op2, bool assert_on_rounding=false) { - const bool rnd = Q!=AC_TRN && Q!=AC_TRN_ZERO && W2 > W; - bool rnd_ovfl = false; - m = 0; - if(rnd) { - ac_fixed m_1 = op2; - // overflow because of rounding would lead to go from 001111 to 01000 (extra bit prevents it) - // change from 01000 to 00100 and store 0100 in m - rnd_ovfl = !m_1[W] & m_1[W-1]; - m_1[W-1] = m_1[W-1] & !rnd_ovfl; - m_1[W-2] = m_1[W-2] | rnd_ovfl; - m.set_slc(0, m_1.template slc(0)); - if(assert_on_rounding) - AC_ASSERT(m == op2, "Loss of precision due to Rounding"); - return rnd_ovfl; - } else { - ac_fixed m_0 = op2; - m.set_slc(0, m_0.template slc(0)); - return false; - } - } - - template - void assign_from(const ac_fixed &m2, int e2, bool sticky_bit, bool normalize, bool assert_on_rounding=false) { - const bool rnd = Q!=AC_TRN & Q!=AC_TRN_ZERO & W2 > W; - const bool need_rnd_bit = Q != AC_TRN; - const bool need_rem_bits = need_rnd_bit && Q != AC_RND; - - const int msb_min_power = I-1 + MIN_EXP; - const int msb_min_power2 = I2-1 + min_exp2; - const int msb_min_power_dif = msb_min_power - msb_min_power2; - // if > 0: target has additional negative exponent range - // subnormal maybe be further normalized (done even if normalize==false) - // if < 0: target has less negative exponent range - // mantissa may need to be shifted right - // in either case if source is unnormalized - // normalization could take place - - const int msb_max_power = I-1 + MAX_EXP; - const int msb_max_power2 = I2-1 + max_exp2 + rnd; - const int msb_max_power_dif = msb_max_power - msb_max_power2; - - const bool may_shift_right = msb_min_power_dif > 0; - const int max_right_shift = may_shift_right ? msb_min_power_dif : 0; - const int t_width = W2 + (W >= W2 ? AC_MIN(W-W2+may_shift_right, max_right_shift) : 0); - - int e_t = e2; - e_t += I2-I; - typedef ac_fixed op2_t; - op2_t op2 = m2; - int ls = 0; - bool r_zero; - if(normalize) { - bool all_sign; - ls = m2.leading_sign(all_sign); - r_zero = all_sign & !m2[0]; - } else if(msb_min_power_dif < 0 || msb_max_power_dif < 0 || W2 > W) { - // msb_min_power_dif < 0: src exponent less negative than trg exp represents - // oportunity to further normalize value in trg representation - // msb_max_power_dif < 0: max target exp is less than max src exp - // if un-normalized exp may overflow resulting in incorrect saturation - // normalization is needed for correctness - // W2 > W - // if un-normalized, extra bits may be incorrectly quantized away - const int msb_range_dif = AC_MAX(-msb_min_power_dif, -msb_max_power_dif); - const int msb_range_dif_norm_w = AC_MIN(msb_range_dif,W2-1); - const int extra_bits = AC_MAX(W2-W,0); - const int norm_w = AC_MAX(msb_range_dif_norm_w, extra_bits) + 1; - bool all_sign; - ls = m2.template slc(W2-norm_w).leading_sign(all_sign); - r_zero = all_sign & !m2[W2-1] & !(m2 << norm_w); - } else { - r_zero = !m2; - } - int actual_max_shift_left = (1 << (E-1)) + e_t; - if(may_shift_right && actual_max_shift_left < 0) { - const int shift_r_w = ac::nbits::val; - ac_int shift_r = -actual_max_shift_left; - if((1 << (E-1)) + min_exp2 + I2-I < 0 && need_rem_bits) { - op2_t shifted_out_bits = op2; - shifted_out_bits &= ~((~op2_t(0)) << shift_r); - sticky_bit |= !!shifted_out_bits; - } - op2 >>= shift_r; - e_t += shift_r; - } else { - bool shift_exponent_limited = ls >= actual_max_shift_left; - int shift_l = shift_exponent_limited ? actual_max_shift_left : (int) ls; - op2 <<= shift_l; - e_t = shift_exponent_limited ? MIN_EXP : e_t - ls; - } - ac_fixed r_pre_rnd = 0; - r_pre_rnd.set_slc(need_rem_bits, op2.template slc(0)); - if(need_rem_bits) - r_pre_rnd[0] = sticky_bit; - - bool shift_r1 = round(r_pre_rnd); - e_t = r_zero ? 0 : e_t + shift_r1; - if(!(e_t < 0) & !!(e_t >> E-1)) { - e = MAX_EXP; - m = m < 0 ? value(m) : value(m); - } else { - e = e_t; - } - } - -public: - template - ac_float(const AC_FL(2) &op, bool assert_on_overflow=false, bool assert_on_rounding=false) { - typedef AC_FL(2) fl2_t; - const int min_exp2 = fl2_t::MIN_EXP; - const int max_exp2 = fl2_t::MAX_EXP; - assign_from(op.m, op.e, false, false); - } - - ac_float(const ac_fixed &m2, const ac_int &e2, bool normalize=true) { - m = m2; - e = e2; - if(normalize) - this->normalize(); - else - e &= ac_int<1,true>(!!m); - } - - template - ac_float(const ac_fixed &m2, const ac_int &e2, bool normalize=true) { - enum { WF2 = WFX+!SFX, IF2 = IFX+!SFX }; - ac_float f(ac_fixed(m2), e2, normalize); - *this = f; - } - - template - ac_float(const ac_fixed &op) { - assign_from<0,0>(ac_fixed(op), 0, false, true); - } - - template - ac_float(const ac_int &op) { - *this = ac_fixed(op); - } - - inline ac_float( bool b ) { *this = (ac_int<1,false>) b; } - inline ac_float( char b ) { *this = (ac_int<8,true>) b; } - inline ac_float( signed char b ) { *this = (ac_int<8,true>) b; } - inline ac_float( unsigned char b ) { *this = (ac_int<8,false>) b; } - inline ac_float( signed short b ) { *this = (ac_int<16,true>) b; } - inline ac_float( unsigned short b ) { *this = (ac_int<16,false>) b; } - inline ac_float( signed int b ) { *this = (ac_int<32,true>) b; } - inline ac_float( unsigned int b ) { *this = (ac_int<32,false>) b; } - inline ac_float( signed long b ) { *this = (ac_int) b; } - inline ac_float( unsigned long b ) { *this = (ac_int) b; } - inline ac_float( Slong b ) { *this = (ac_int<64,true>) b; } - inline ac_float( Ulong b ) { *this = (ac_int<64,false>) b; } - - // Explicit conversion functions to ac_int and ac_fixed - inline typename rt_unary::to_ac_fixed_t to_ac_fixed() const { - typename rt_unary::to_ac_fixed_t r = m; - r <<= e; - return r; - } - inline typename rt_unary::to_ac_int_t to_ac_int() const { - return to_ac_fixed().to_ac_int(); - } - - // Explicit conversion functions to C built-in types ------------- - inline int to_int() const { return to_ac_int().to_int(); } - inline unsigned to_uint() const { return to_ac_int().to_uint(); } - inline long to_long() const { return (signed long) to_ac_int().to_int64(); } - inline unsigned long to_ulong() const { return (unsigned long) to_ac_int().to_uint64(); } - inline Slong to_int64() const { return to_ac_int().to_int64(); } - inline Ulong to_uint64() const { return to_ac_int().to_uint64(); } - inline float to_float() const { return ldexpf(m.to_double(), exp()); } - inline double to_double() const { return ldexp(m.to_double(), exp()); } - - const ac_fixed mantissa() const { return m; } - const ac_int exp() const { return e; } - bool normalize() { - bool all_sign; - int ls = m.leading_sign(all_sign); - bool m_zero = all_sign & !m[0]; - const int max_shift_left = (1 << (E-1)) + e; - bool normal = ls <= max_shift_left; - int shift_l = normal ? ls : max_shift_left; - m <<= shift_l; - e = ac_int<1,true>(!m_zero) & (e - shift_l); - return normal; - } - - ac_float( double d, bool assert_on_overflow=false, bool assert_on_rounding=false ) { - enum { I_EXT = AC_MAX(I,1), W_EXT = ac_private::ac_float_cdouble_t::width + I_EXT - 1, }; - ac_private::ac_float_cdouble_t t = ac_private::double_to_ac_float(d); - ac_float r(t, assert_on_overflow, assert_on_rounding); - *this = r; - } - - ac_float( float f, bool assert_on_overflow=false, bool assert_on_rounding=false ) { - enum { I_EXT = AC_MAX(I,1), W_EXT = ac_private::ac_float_cfloat_t::width + I_EXT - 1, }; - ac_private::ac_float_cfloat_t t = ac_private::float_to_ac_float(f); - ac_float r(t, assert_on_overflow, assert_on_rounding); - *this = r; - } - - template - bool compare(const AC_FL(2) &op2, bool *gt) const { - typedef ac_fixed fx2_t; - typedef typename ac_fixed::template rt_T< fx2_t >::logic fx_t; - typedef ac_fixed fxu_t; - - fx2_t op2_m_0; - op2_m_0.set_slc(0, op2.m.template slc(0)); - - fx_t op1_m = m; - fx_t op2_m = op2_m_0; - int e_dif = exp() - op2.exp() + I - I2; - bool op2_m_neg = op2_m[fx_t::width-1]; - fx_t out_bits = op2_m ^ ((op2_m_neg & e_dif < 0) ? ~fx_t(0) : fx_t(0)); - out_bits &= ~(fxu_t(~fxu_t(0)) << e_dif); - op2_m >>= e_dif; - bool overflow = e_dif < 0 & !!out_bits | op2_m_neg ^ op2_m[fx_t::width-1]; - - *gt = overflow & op2_m_neg | !overflow & op1_m > op2_m; - bool eq = op1_m == op2_m & !overflow & !out_bits; - return eq; - } - - template - void plus_minus(const AC_FL(2) &op2, AC_FL(R) &r, bool sub=false) const { - typedef AC_FL(2) op2_t; - enum { IT = AC_MAX(I,I2) }; - typedef ac_fixed fx1_t; - typedef ac_fixed fx2_t; - // covers fx1_t and r mantissas (adds additional LSBs if WR > W) - typedef typename fx1_t::template rt_T< ac_fixed >::logic fx1r_t; - // covers fx2_t and r mantissas (adds additional LSBs if WR > W2) - typedef typename fx2_t::template rt_T< ac_fixed >::logic fx2r_t; - // mt_t adds one integer bit for the plus - // op1_m, op2_m, op_sl, sticky_bits - typedef typename fx1r_t::template rt_T::plus mt_t; - - const bool round_bit_needed = QR != AC_TRN; - const bool remaining_bits_needed = !(QR == AC_TRN || QR == AC_RND); - - const int w_r_with_round_bits = WR + round_bit_needed; - - // naming: sn = subnormal, n = normal, wc = worst case - // worst case (wc) normalize is when one operand has smallest subnormal - // and other operand is shifted right so that its MSB lines up with LSB of subnormal - const int power_smallest_sn1 = I - W - (1 << (E-1)); - const int power_smallest_sn2 = I2 - W2 - (1 << (E2-1)); - const int power_smallest_sn_dif1 = AC_MAX(0,power_smallest_sn2 - power_smallest_sn1); - const int power_smallest_sn_dif2 = AC_MAX(0,power_smallest_sn1 - power_smallest_sn2); - const int wc_norm_shift1 = W2-1 + AC_MIN(power_smallest_sn_dif1, W-1); - const int wc_norm_shift2 = W-1 + AC_MIN(power_smallest_sn_dif2, W2-1); - const int wc_sn_norm_shift = AC_MAX(wc_norm_shift1, wc_norm_shift2); - const int w_sn_overlap = wc_sn_norm_shift + 1; - - // cases when one operand is subnormal and other is shifted right and does not overlap bits - // subnormal op could be normalized by width-1 bits - const int w_sn_no_overlap1 = W + AC_MIN(w_r_with_round_bits, power_smallest_sn_dif2); - const int w_sn_no_overlap2 = W2 + AC_MIN(w_r_with_round_bits, power_smallest_sn_dif1); - const int w_sn_no_overlap = AC_MAX(w_sn_no_overlap1, w_sn_no_overlap2); - - const int w_sn = AC_MAX(w_sn_overlap, w_sn_no_overlap); - - // For example 0100 + (1000 0001 >> 1) = 0000 0000 1, wc_n_norm_shift = max(4,8) - const int msb0h1 = I-1 + (int) MAX_EXP; - const int msb1h1 = msb0h1-1; - const int msb0l1 = I-1 + (int) MIN_EXP; - const int msb1l1 = msb0h1-1; - const int msb0h2 = I2-1 + (int) op2_t::MAX_EXP; - const int msb1h2 = msb0h2-1; - const int msb0l2 = I2-1 + (int) op2_t::MIN_EXP; - const int msb1l2 = msb0h2-1; - // bit W-1 overlap with bit W2-2 - const bool msb_overlap1 = msb1h2 >= msb0h1 && msb0h1 <= msb1l2 - || msb1h2 >= msb0l1 && msb0l1 <= msb1l2 - || msb0h1 >= msb1h2 && msb1h2 >= msb0l1; - // bit W2-1 overlap with bit W1-2 - const bool msb_overlap2 = msb1h1 >= msb0h2 && msb0h2 <= msb1l1 - || msb1h1 >= msb0l2 && msb0l2 <= msb1l1 - || msb0h2 >= msb1h1 && msb1h1 >= msb0l2; - const bool msb_overlap = msb_overlap1 || msb_overlap2; - const int wc_n_norm_shift = AC_MAX(W,W2); - const int w_n_msb_overlap = msb_overlap ? wc_n_norm_shift + 1 : 0; - // addition of two numbers of different sign can result in a normalization by 1 (therefore + 1) - const int w_n_no_msb_overlap = w_r_with_round_bits + 1; - const int w_n = AC_MAX(w_n_msb_overlap, w_n_no_msb_overlap); - - // +1 is to prevent overflow during addition - const int tr_t_width = AC_MAX(w_n, w_sn) + 1; - typedef ac_fixed add_t; - - const int min_E = (int) MIN_EXP + I-IT; - const int min_E2 = (int) AC_FL(2)::MIN_EXP + I2-IT; - const int min_ET = AC_MIN(min_E, min_E2); - - const int max_E = (int) MAX_EXP + I-IT; - const int max_E2 = (int) AC_FL(2)::MAX_EXP + I2-IT; - const int max_ET = AC_MAX(max_E, max_E2); - - ac_fixed op1_m_0 = m; - mt_t op1_m = 0; - op1_m.set_slc(0, op1_m_0.template slc(0)); - int op1_e = exp() + I-IT; - - ac_fixed op2_m_0 = op2.m; - mt_t op2_m = 0; - op2_m.set_slc(0, op2_m_0.template slc(0)); - if(sub) - op2_m = -op2_m; - int op2_e = op2.exp() + I2-IT; - - bool op1_zero = operator !(); - bool op2_zero = !op2; - int e_dif = op1_e - op2_e; - bool e1_lt_e2 = e_dif < 0; - e_dif = (op1_zero | op2_zero) ? 0 : e1_lt_e2 ? -e_dif : e_dif; - - add_t op_lshift = e1_lt_e2 ? op1_m : op2_m; - mt_t op_no_shift = e1_lt_e2 ? op2_m : op1_m; - - bool sticky_bit = false; - if(remaining_bits_needed) { - mt_t shifted_out_bits = op_lshift; - // bits that are shifted out of a add_t (does not include potential 3 spare bits) - shifted_out_bits &= ~((~add_t(0)) << e_dif); - sticky_bit = !!shifted_out_bits; - } - op_lshift >>= e_dif; - - add_t add_r = op_lshift + op_no_shift; - int e_t = (e1_lt_e2 & !op2_zero | op1_zero ? op2_e : op1_e); - - r.template assign_from(add_r, e_t, sticky_bit, true); - } - - template - ac_float add(const AC_FL(1) &op1, const AC_FL(2) &op2) { - op1.plus_minus(op2, *this); - return *this; - } - - template - ac_float sub(const AC_FL(1) &op1, const AC_FL(2) &op2) { - op1.plus_minus(op2, *this, true); - return *this; - } - - typename rt_unary::neg abs() const { - typedef typename rt_unary::neg r_t; - r_t r; - r.m = is_neg() ? -m : r_t::mant_t(m); - r.e = e; - return r; - } - -#ifdef __AC_FLOAT_ENABLE_ALPHA - // These will be changed!!! For now only enable to explore integration with ac_complex - template - typename rt< AC_FL_TV0(2) >::plus operator +(const AC_FL(2) &op2) const { - typename rt< AC_FL_TV0(2) >::plus r; - plus_minus(op2, r); - return r; - } - template - typename rt< AC_FL_TV0(2) >::minus operator -(const AC_FL(2) &op2) const { - typename rt< AC_FL_TV0(2) >::minus r; - plus_minus(op2, r, true); - return r; - } -#endif - - template - typename rt< AC_FL_TV0(2) >::mult operator *(const AC_FL(2) &op2) const { - typedef typename rt< AC_FL_TV0(2) >::mult r_t; - r_t r(m*op2.m, exp()+op2.exp(), false); - return r; - } - - template - typename rt< AC_FL_TV0(2) >::div operator /(const AC_FL(2) &op2) const { - typename rt< AC_FL_TV0(2) >::div r(m/op2.m, exp()-op2.exp()); - return r; - } - template - ac_float &operator +=(const AC_FL(2) &op2) { - ac_float r; - plus_minus(op2, r); - *this = r; - return *this; - } - template - ac_float &operator -=(const AC_FL(2) &op2) { - ac_float r; - plus_minus(op2, r, true); - *this = r; - return *this; - } - template - ac_float &operator *=(const AC_FL(2) &op2) { - *this = *this * op2; - return *this; - } - template - ac_float &operator /=(const AC_FL(2) &op2) { - *this = *this / op2; - return *this; - } - ac_float operator + () const { - return *this; - } - typename rt_unary::neg operator - () const { - typename rt_unary::neg r; - r.m = -m; - r.e = e; - return r; - } - bool operator ! () const { - return !m; - } - - // Shift -------------------------------------------------------------------- - template - typename rt_i::lshift operator << ( const ac_int &op2 ) const { - typename rt_i::lshift r; - r.m = m; - r.e = e + op2; - return r; - } - template - typename rt_i::rshift operator >> ( const ac_int &op2 ) const { - typename rt_i::rshift r; - r.m = m; - r.e = e - op2; - return r; - } - // Shift assign ------------------------------------------------------------- - template - ac_float &operator <<= ( const ac_int &op2 ) { - *this = operator << (op2); - return *this; - } - template - ac_float &operator >>= ( const ac_int &op2 ) { - *this = operator >> (op2); - return *this; - } - - template - bool operator == (const AC_FL(2) &f) const { - bool gt; - return compare(f, >); - } - template - bool operator != (const AC_FL(2) &f) const { - return !operator == (f); - } - template - bool operator < (const AC_FL(2) &f) const { - bool gt; - bool eq = compare(f, >); - return !(eq | gt); - } - template - bool operator >= (const AC_FL(2) &f) const { - return !operator < (f); - } - template - bool operator > (const AC_FL(2) &f) const { - bool gt; - compare(f, >); - return gt; - } - template - bool operator <= (const AC_FL(2) &f) const { - return !operator > (f); - } - - inline std::string to_string(ac_base_mode base_rep, bool sign_mag = false, bool hw=true) const { - // TODO: printing decimal with exponent - if(!hw) { - ac_fixed mantissa; - mantissa.set_slc(0, m.template slc(0)); - std::string r = mantissa.to_string(base_rep, sign_mag); - r += "e2"; - r += (e + I).to_string(base_rep, sign_mag | base_rep == AC_DEC); - return r; - } else { - std::string r = m.to_string(base_rep, sign_mag); - if(base_rep != AC_DEC) - r += "_"; - r += "e2"; - if(base_rep != AC_DEC) - r += "_"; - if(E) - r += e.to_string(base_rep, sign_mag | base_rep == AC_DEC); - else - r += "0"; - return r; - } - } - - inline static std::string type_name() { - const char *tf[] = {"false", "true" }; - const char *q[] = {"AC_TRN", "AC_RND", "AC_TRN_ZERO", "AC_RND_ZERO", "AC_RND_INF", "AC_RND_MIN_INF", "AC_RND_CONV" }; - std::string r = "ac_float<"; - r += ac_int<32,true>(W).to_string(AC_DEC) + ','; - r += ac_int<32,true>(I).to_string(AC_DEC) + ','; - r += ac_int<32,true>(E).to_string(AC_DEC) + ','; - r += tf[S]; - r += ','; - r += q[Q]; - r += '>'; - return r; - } - - template - inline ac_float &set_val() { - m.template set_val(); - if(V == AC_VAL_MIN) - e.template set_val(); - else if(V == AC_VAL_QUANTUM) - e.template set_val(); - else - e.template set_val(); - return *this; - } -}; - -namespace ac_private { - template - bool ac_fpclassify(T x, bool &inf) { - bool nan = !(x==x); - if(!nan) { - T d = x - x; - inf = !(d==d); - } - return nan; - } - - inline ac_float_cdouble_t double_to_ac_float(double d) { - typedef ac_float_cdouble_t r_t; -#ifndef __SYNTHESIS__ - bool inf; - bool nan = ac_fpclassify(d, inf); - if(nan) - AC_ASSERT(0, "In conversion from double to ac_float: double is NaN"); - else if(inf) - AC_ASSERT(0, "In conversion from double to ac_float: double is Infinite"); -#endif - r_t::exp_t exp; - r_t::mant_t mant = ac::frexp_d(d, exp); - return r_t(mant, exp, false); - } - - inline ac_float_cfloat_t float_to_ac_float(float f) { - typedef ac_float_cfloat_t r_t; -#ifndef __SYNTHESIS__ - bool inf; - bool nan = ac_fpclassify(f, inf); - if(nan) - AC_ASSERT(0, "In conversion from float to ac_float: float is NaN"); - else if(inf) - AC_ASSERT(0, "In conversion from float to ac_float: float is Infinite"); -#endif - r_t::exp_t exp; - r_t::mant_t mant = ac::frexp_f(f, exp); - return r_t(mant, exp, false); - } -}; - -namespace ac { - template - struct ac_float_represent { - typedef typename ac_fixed_represent::type fx_t; - typedef ac_float type; - }; - template<> struct ac_float_represent { - typedef ac_private::ac_float_cfloat_t type; - }; - template<> struct ac_float_represent { - typedef ac_private::ac_float_cdouble_t type; - }; -} - -namespace ac_private { - // with T == ac_float - template< AC_FL_T0(2) > - struct rt_ac_float_T< AC_FL0(2) > { - typedef AC_FL0(2) fl2_t; - template< AC_FL_T0() > - struct op1 { - typedef AC_FL0() fl_t; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::mult mult; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::plus plus; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::minus minus; - typedef typename fl2_t::template rt< AC_FL_TV0() >::minus minus2; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::logic logic; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::div div; - typedef typename fl2_t::template rt< AC_FL_TV0() >::div div2; - }; - }; - // with T == ac_fixed - template - struct rt_ac_float_T< ac_fixed > { - // For now E2 > 0 - enum { E2 = 1, S2 = true, W2 = WFX + !SFX, I2 = IFX + !SFX }; - typedef AC_FL0(2) fl2_t; - template< AC_FL_T0() > - struct op1 { - typedef AC_FL0() fl_t; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::mult mult; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::plus plus; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::minus minus; - typedef typename fl2_t::template rt< AC_FL_TV0() >::minus minus2; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::logic logic; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::div div; - typedef typename fl2_t::template rt< AC_FL_TV0() >::div div2; - }; - }; - // with T == ac_int - template - struct rt_ac_float_T< ac_int > { - // For now E2 > 0 - enum { E2 = 1, S2 = true, I2 = WI + !SI, W2 = I2 }; - typedef AC_FL0(2) fl2_t; - template< AC_FL_T0() > - struct op1 { - typedef AC_FL0() fl_t; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::mult mult; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::plus plus; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::minus minus; - typedef typename fl2_t::template rt< AC_FL_TV0() >::minus minus2; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::logic logic; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::div div; - typedef typename fl2_t::template rt< AC_FL_TV0() >::div div2; - }; - }; - - // Multiplication is optimizable, general operator +/- is not yet supported - template - struct rt_ac_float_T< c_type > { - // For now E2 > 0 - enum { SCT = c_type_params::S, S2 = true, W2 = c_type_params::W + !SCT, I2 = c_type_params::I + !SCT, E2 = AC_MAX(1, c_type_params::E) }; - typedef AC_FL0(2) fl2_t; - template< AC_FL_T0() > - struct op1 { - typedef AC_FL0() fl_t; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::mult mult; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::plus plus; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::minus minus; - typedef typename fl2_t::template rt< AC_FL_TV0() >::minus minus2; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::logic logic; - typedef typename fl_t::template rt< AC_FL_TV0(2) >::div div; - typedef typename fl2_t::template rt< AC_FL_TV0() >::div div2; - }; - }; -} - -// Stream -------------------------------------------------------------------- - -#ifndef __SYNTHESIS__ -template -inline std::ostream& operator << (std::ostream &os, const AC_FL() &x) { - os << x.to_string(AC_DEC); - return os; -} -#endif - -#define FL_BIN_OP_WITH_CTYPE(BIN_OP, C_TYPE, RTYPE) \ - template< AC_FL_T() > \ - inline typename AC_FL()::template rt_T2::RTYPE operator BIN_OP ( C_TYPE c_op, const AC_FL() &op) { \ - typedef typename ac::template ac_float_represent::type fl2_t; \ - return fl2_t(c_op).operator BIN_OP (op); \ - } \ - template< AC_FL_T() > \ - inline typename AC_FL()::template rt_T::RTYPE operator BIN_OP ( const AC_FL() &op, C_TYPE c_op) { \ - typedef typename ac::template ac_float_represent::type fl2_t; \ - return op.operator BIN_OP (fl2_t(c_op)); \ - } - -#define FL_REL_OP_WITH_CTYPE(REL_OP, C_TYPE) \ - template< AC_FL_T() > \ - inline bool operator REL_OP ( const AC_FL() &op, C_TYPE op2) { \ - typedef typename ac::template ac_float_represent::type fl2_t; \ - return op.operator REL_OP (fl2_t(op2)); \ - } \ - template< AC_FL_T() > \ - inline bool operator REL_OP ( C_TYPE op2, const AC_FL() &op) { \ - typedef typename ac::template ac_float_represent::type fl2_t; \ - return fl2_t(op2).operator REL_OP (op); \ - } - -#define FL_ASSIGN_OP_WITH_CTYPE_2(ASSIGN_OP, C_TYPE) \ - template< AC_FL_T() > \ - inline AC_FL() &operator ASSIGN_OP ( AC_FL() &op, C_TYPE op2) { \ - typedef typename ac::template ac_float_represent::type fl2_t; \ - return op.operator ASSIGN_OP (fl2_t(op2)); \ - } - -#ifdef __AC_FLOAT_ENABLE_ALPHA -#define FL_BIN_OP_WITH_CTYPE_ALPHA(C_TYPE) \ - FL_BIN_OP_WITH_CTYPE(+, C_TYPE, plus) \ - FL_BIN_OP_WITH_CTYPE(-, C_TYPE, minus) -#else -#define FL_BIN_OP_WITH_CTYPE_ALPHA(C_TYPE) -#endif - -#define FL_OPS_WITH_CTYPE(C_TYPE) \ - FL_BIN_OP_WITH_CTYPE_ALPHA(C_TYPE) \ - FL_BIN_OP_WITH_CTYPE(*, C_TYPE, mult) \ - FL_BIN_OP_WITH_CTYPE(/, C_TYPE, div) \ - \ - FL_REL_OP_WITH_CTYPE(==, C_TYPE) \ - FL_REL_OP_WITH_CTYPE(!=, C_TYPE) \ - FL_REL_OP_WITH_CTYPE(>, C_TYPE) \ - FL_REL_OP_WITH_CTYPE(>=, C_TYPE) \ - FL_REL_OP_WITH_CTYPE(<, C_TYPE) \ - FL_REL_OP_WITH_CTYPE(<=, C_TYPE) \ - \ - FL_ASSIGN_OP_WITH_CTYPE_2(+=, C_TYPE) \ - FL_ASSIGN_OP_WITH_CTYPE_2(-=, C_TYPE) \ - FL_ASSIGN_OP_WITH_CTYPE_2(*=, C_TYPE) \ - FL_ASSIGN_OP_WITH_CTYPE_2(/=, C_TYPE) - -#define FL_SHIFT_OP_WITH_INT_CTYPE(BIN_OP, C_TYPE, RTYPE) \ - template< AC_FL_T() > \ - inline typename AC_FL()::template rt_i< ac_private::c_type_params::W, ac_private::c_type_params::S >::RTYPE operator BIN_OP ( const AC_FL() &op, C_TYPE i_op) { \ - typedef typename ac::template ac_int_represent::type i_t; \ - return op.operator BIN_OP (i_t(i_op)); \ - } - -#define FL_SHIFT_ASSIGN_OP_WITH_INT_CTYPE(ASSIGN_OP, C_TYPE) \ - template< AC_FL_T() > \ - inline AC_FL() &operator ASSIGN_OP ( AC_FL() &op, C_TYPE i_op) { \ - typedef typename ac::template ac_int_represent::type i_t; \ - return op.operator ASSIGN_OP (i_t(i_op)); \ - } - -#define FL_SHIFT_OPS_WITH_INT_CTYPE(C_TYPE) \ - FL_SHIFT_OP_WITH_INT_CTYPE(>>, C_TYPE, rshift) \ - FL_SHIFT_OP_WITH_INT_CTYPE(<<, C_TYPE, lshift) \ - FL_SHIFT_ASSIGN_OP_WITH_INT_CTYPE(>>=, C_TYPE) \ - FL_SHIFT_ASSIGN_OP_WITH_INT_CTYPE(<<=, C_TYPE) - -#define FL_OPS_WITH_INT_CTYPE(C_TYPE) \ - FL_OPS_WITH_CTYPE(C_TYPE) \ - FL_SHIFT_OPS_WITH_INT_CTYPE(C_TYPE) - -// --------------------------------------- End of Macros for Binary Operators with C Floats - - // Binary Operators with C Floats -------------------------------------------- - FL_OPS_WITH_CTYPE(float) - FL_OPS_WITH_CTYPE(double) - FL_OPS_WITH_INT_CTYPE(bool) - FL_OPS_WITH_INT_CTYPE(char) - FL_OPS_WITH_INT_CTYPE(signed char) - FL_OPS_WITH_INT_CTYPE(unsigned char) - FL_OPS_WITH_INT_CTYPE(short) - FL_OPS_WITH_INT_CTYPE(unsigned short) - FL_OPS_WITH_INT_CTYPE(int) - FL_OPS_WITH_INT_CTYPE(unsigned int) - FL_OPS_WITH_INT_CTYPE(long) - FL_OPS_WITH_INT_CTYPE(unsigned long) - FL_OPS_WITH_INT_CTYPE(Slong) - FL_OPS_WITH_INT_CTYPE(Ulong) - // -------------------------------------- End of Binary Operators with C Floats - -// Macros for Binary Operators with ac_int -------------------------------------------- - -#define FL_BIN_OP_WITH_AC_INT_1(BIN_OP, RTYPE) \ - template< AC_FL_T(), int WI, bool SI> \ - inline typename AC_FL()::template rt_T2< ac_int >::RTYPE operator BIN_OP ( const ac_int &i_op, const AC_FL() &op) { \ - typedef typename ac::template ac_float_represent< ac_int >::type fl2_t; \ - return fl2_t(i_op).operator BIN_OP (op); \ - } - -#define FL_BIN_OP_WITH_AC_INT_2(BIN_OP, RTYPE) \ - template< AC_FL_T(), int WI, bool SI> \ - inline typename AC_FL()::template rt_T2< ac_int >::RTYPE operator BIN_OP ( const AC_FL() &op, const ac_int &i_op) { \ - typedef typename ac::template ac_float_represent< ac_int >::type fl2_t; \ - return op.operator BIN_OP (fl2_t(i_op)); \ - } - -#define FL_BIN_OP_WITH_AC_INT(BIN_OP, RTYPE) \ - FL_BIN_OP_WITH_AC_INT_1(BIN_OP, RTYPE) \ - FL_BIN_OP_WITH_AC_INT_2(BIN_OP, RTYPE) - -#define FL_REL_OP_WITH_AC_INT(REL_OP) \ - template< AC_FL_T(), int WI, bool SI> \ - inline bool operator REL_OP ( const AC_FL() &op, const ac_int &op2) { \ - typedef typename ac::template ac_float_represent< ac_int >::type fl2_t; \ - return op.operator REL_OP (fl2_t(op2)); \ - } \ - template< AC_FL_T(), int WI, bool SI> \ - inline bool operator REL_OP ( ac_int &op2, const AC_FL() &op) { \ - typedef typename ac::template ac_float_represent< ac_int >::type fl2_t; \ - return fl2_t(op2).operator REL_OP (op); \ - } - -#define FL_ASSIGN_OP_WITH_AC_INT(ASSIGN_OP) \ - template< AC_FL_T(), int WI, bool SI> \ - inline AC_FL() &operator ASSIGN_OP ( AC_FL() &op, const ac_int &op2) { \ - typedef typename ac::template ac_float_represent< ac_int >::type fl2_t; \ - return op.operator ASSIGN_OP (fl2_t(op2)); \ - } - -// -------------------------------------------- End of Macros for Binary Operators with ac_int - - // Binary Operators with ac_int -------------------------------------------- -#ifdef __AC_FLOAT_ENABLE_ALPHA - FL_BIN_OP_WITH_AC_INT(+, plus) - FL_BIN_OP_WITH_AC_INT(-, minus) -#endif - FL_BIN_OP_WITH_AC_INT(*, mult) - FL_BIN_OP_WITH_AC_INT(/, div) - - FL_REL_OP_WITH_AC_INT(==) - FL_REL_OP_WITH_AC_INT(!=) - FL_REL_OP_WITH_AC_INT(>) - FL_REL_OP_WITH_AC_INT(>=) - FL_REL_OP_WITH_AC_INT(<) - FL_REL_OP_WITH_AC_INT(<=) - - FL_ASSIGN_OP_WITH_AC_INT(+=) - FL_ASSIGN_OP_WITH_AC_INT(-=) - FL_ASSIGN_OP_WITH_AC_INT(*=) - FL_ASSIGN_OP_WITH_AC_INT(/=) - FL_ASSIGN_OP_WITH_AC_INT(%=) - // -------------------------------------- End of Binary Operators with ac_int - -// Macros for Binary Operators with ac_fixed -------------------------------------------- - -#define FL_BIN_OP_WITH_AC_FIXED_1(BIN_OP, RTYPE) \ - template< AC_FL_T(), int WF, int IF, bool SF, ac_q_mode QF, ac_o_mode OF> \ - inline typename AC_FL()::template rt_T2< ac_fixed >::RTYPE operator BIN_OP ( const ac_fixed &f_op, const AC_FL() &op) { \ - typedef typename ac::template ac_float_represent< ac_fixed >::type fl2_t; \ - return fl2_t(f_op).operator BIN_OP (op); \ - } - -#define FL_BIN_OP_WITH_AC_FIXED_2(BIN_OP, RTYPE) \ - template< AC_FL_T(), int WF, int IF, bool SF, ac_q_mode QF, ac_o_mode OF> \ - inline typename AC_FL()::template rt_T2< ac_fixed >::RTYPE operator BIN_OP ( const AC_FL() &op, const ac_fixed &f_op) { \ - typedef typename ac::template ac_float_represent< ac_fixed >::type fl2_t; \ - return op.operator BIN_OP (fl2_t(f_op)); \ - } - -#define FL_BIN_OP_WITH_AC_FIXED(BIN_OP, RTYPE) \ - FL_BIN_OP_WITH_AC_FIXED_1(BIN_OP, RTYPE) \ - FL_BIN_OP_WITH_AC_FIXED_2(BIN_OP, RTYPE) - -#define FL_REL_OP_WITH_AC_FIXED(REL_OP) \ - template< AC_FL_T(), int WF, int IF, bool SF, ac_q_mode QF, ac_o_mode OF> \ - inline bool operator REL_OP ( const AC_FL() &op, const ac_fixed &op2) { \ - typedef typename ac::template ac_float_represent< ac_fixed >::type fl2_t; \ - return op.operator REL_OP (fl2_t(op2)); \ - } \ - template< AC_FL_T(), int WF, int IF, bool SF, ac_q_mode QF, ac_o_mode OF> \ - inline bool operator REL_OP ( ac_fixed &op2, const AC_FL() &op) { \ - typedef typename ac::template ac_float_represent< ac_fixed >::type fl2_t; \ - return fl2_t(op2).operator REL_OP (op); \ - } - -#define FL_ASSIGN_OP_WITH_AC_FIXED(ASSIGN_OP) \ - template< AC_FL_T(), int WF, int IF, bool SF, ac_q_mode QF, ac_o_mode OF> \ - inline AC_FL() &operator ASSIGN_OP ( AC_FL() &op, const ac_fixed &op2) { \ - typedef typename ac::template ac_float_represent< ac_fixed >::type fl2_t; \ - return op.operator ASSIGN_OP (fl2_t(op2)); \ - } - -// -------------------------------------------- End of Macros for Binary Operators with ac_fixed - - // Binary Operators with ac_fixed -------------------------------------------- -#ifdef __AC_FLOAT_ENABLE_ALPHA - FL_BIN_OP_WITH_AC_FIXED(+, plus) - FL_BIN_OP_WITH_AC_FIXED(-, minus) -#endif - FL_BIN_OP_WITH_AC_FIXED(*, mult) - FL_BIN_OP_WITH_AC_FIXED(/, div) - - FL_REL_OP_WITH_AC_FIXED(==) - FL_REL_OP_WITH_AC_FIXED(!=) - FL_REL_OP_WITH_AC_FIXED(>) - FL_REL_OP_WITH_AC_FIXED(>=) - FL_REL_OP_WITH_AC_FIXED(<) - FL_REL_OP_WITH_AC_FIXED(<=) - - FL_ASSIGN_OP_WITH_AC_FIXED(+=) - FL_ASSIGN_OP_WITH_AC_FIXED(-=) - FL_ASSIGN_OP_WITH_AC_FIXED(*=) - FL_ASSIGN_OP_WITH_AC_FIXED(/=) - // -------------------------------------- End of Binary Operators with ac_fixed - -// Global templatized functions for easy initialization to special values -template -inline AC_FL() value( AC_FL() ) { - AC_FL() r; - return r.template set_val(); -} - -namespace ac { -// function to initialize (or uninitialize) arrays - template - inline bool init_array( AC_FL() *a, int n) { - AC_FL0() t; - t.template set_val(); - for(int i=0; i < n; i++) - a[i] = t; - return true; - } -} - -/////////////////////////////////////////////////////////////////////////////// - -#if (defined(_MSC_VER) && !defined(__EDG__)) -#pragma warning( pop ) -#endif -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic pop -#endif -#if defined(__clang__) -#pragma clang diagnostic pop -#endif - -#ifdef __AC_NAMESPACE -} -#endif - -#endif // __AC_FLOAT_H diff --git a/hls4ml/hls4ml/templates/quartus/ac_types/ac_int.h b/hls4ml/hls4ml/templates/quartus/ac_types/ac_int.h deleted file mode 100644 index 4651339..0000000 --- a/hls4ml/hls4ml/templates/quartus/ac_types/ac_int.h +++ /dev/null @@ -1,3099 +0,0 @@ -/************************************************************************** - * * - * Algorithmic C (tm) Datatypes * - * * - * Software Version: 4.0 * - * * - * Release Date : Sat Jun 13 12:35:18 PDT 2020 * - * Release Type : Production Release * - * Release Build : 4.0.0 * - * * - * Copyright 2004-2020, Mentor Graphics Corporation, * - * * - * All Rights Reserved. * - * * - ************************************************************************** - * Licensed under the Apache License, Version 2.0 (the "License"); * - * you may not use this file except in compliance with the License. * - * You may obtain a copy of the License at * - * * - * http://www.apache.org/licenses/LICENSE-2.0 * - * * - * Unless required by applicable law or agreed to in writing, software * - * distributed under the License is distributed on an "AS IS" BASIS, * - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * - * implied. * - * See the License for the specific language governing permissions and * - * limitations under the License. * - ************************************************************************** - * * - * The most recent version of this package is available at github. * - * * - *************************************************************************/ - -/* -// Source: ac_int.h -// Description: fast arbitrary-length bit-accurate integer types: -// - unsigned integer of length W: ac_int -// - signed integer of length W: ac_int -// Author: Andres Takach, Ph.D. -// Notes: -// - C++ Runtime: important to use optimization flag (for example -O3) -// -// - Compiler support: recent GNU compilers are required for correct -// template compilation -// -// - Most frequent migration issues: -// - need to cast to common type when using question mark operator: -// (a < 0) ? -a : a; // a is ac_int -// change to: -// (a < 0) ? -a : (ac_int) a; -// or -// (a < 0) ? (ac_int) -a : (ac_int) a; -// -// - left shift is not arithmetic ("a< b = a << 1; // a is ac_int -// is not equivalent to b=2*a. In order to get 2*a behavior change to: -// ac_int b = (ac_int)a << 1; -// -// - only static length read/write slices are supported: -// - read: x.slc<4>(k) => returns ac_int for 4-bit slice x(4+k-1 DOWNTO k) -// - write: x.set_slc(k,y) = writes bits of y to x starting at index k -*/ - -#ifndef __AC_INT_H -#define __AC_INT_H - -#define AC_VERSION 3 -#define AC_VERSION_MINOR 9 - -#ifndef __cplusplus -#error C++ is required to include this header file -#endif - -#if (defined(__GNUC__) && __GNUC__ < 3 && !defined(__EDG__)) -#error GCC version 3 or greater is required to include this header file -#endif - -#if (defined(_MSC_VER) && _MSC_VER < 1400 && !defined(__EDG__)) -#error Microsoft Visual Studio 8 or newer is required to include this header file -#endif - -#if (defined(_MSC_VER) && !defined(__EDG__)) -#pragma warning( push ) -#pragma warning( disable: 4127 4100 4244 4307 4310 4365 4514 4554 4706 4800 ) -#endif - -// for safety -#if (defined(N) || defined(N2)) -#error One or more of the following is defined: N, N2. Definition conflicts with their usage as template parameters. -#error DO NOT use defines before including third party header files. -#endif - -// for safety -#if (defined(W) || defined(I) || defined(S) || defined(W2) || defined(I2) || defined(S2)) -#error One or more of the following is defined: W, I, S, W2, I2, S2. Definition conflicts with their usage as template parameters. -#error DO NOT use defines before including third party header files. -#endif - -#if defined(true) -#warning The C++ keyword true is defined which may result in subtle compilation problems. Undefining it. -#undef true -#endif -#if defined(false) -#warning The C++ keyword false is defined which may result in subtle compilation problems. Undefining it. -#undef false -#endif - -#ifndef __ASSERT_H__ -#define __ASSERT_H__ -#include -#endif -#include -#ifndef AC_USER_DEFINED_ASSERT -#include -#else -#include -#endif -#include -#include - -#ifndef __SYNTHESIS__ -#ifndef __AC_INT_UTILITY_BASE -#define __AC_INT_UTILITY_BASE -#endif - -#endif - -#ifdef __AC_NAMESPACE -namespace __AC_NAMESPACE { -#endif - -#define AC_MAX(a,b) ((a) > (b) ? (a) : (b)) -#define AC_MIN(a,b) ((a) < (b) ? (a) : (b)) -#define AC_ABS(a) ((a) < 0 ? -(a) : (a)) - -#if defined(_MSC_VER) -typedef unsigned __int64 Ulong; -typedef signed __int64 Slong; -#else -typedef unsigned long long Ulong; -typedef signed long long Slong; -#endif - -enum ac_base_mode { AC_BIN=2, AC_OCT=8, AC_DEC=10, AC_HEX=16 }; -enum ac_special_val {AC_VAL_DC, AC_VAL_0, AC_VAL_MIN, AC_VAL_MAX, AC_VAL_QUANTUM}; - -template class ac_int; - -namespace ac_private { -#if defined(__SYNTHESIS__) && !defined(AC_IGNORE_BUILTINS) -#pragma builtin -#endif - - enum {long_w = std::numeric_limits::digits}; - const unsigned int all_ones = (unsigned) ~0; - - // PRIVATE FUNCTIONS in namespace: for implementing ac_int/ac_fixed - -#ifndef __SYNTHESIS__ - inline double mgc_floor(double d) { return floor(d); } -#else - inline double mgc_floor(double d) { return 0.0; } -#endif - - #define AC_ASSERT(cond, msg) ac_private::ac_assert(cond, __FILE__, __LINE__, msg) - inline void ac_assert(bool condition, const char *file=0, int line=0, const char *msg=0) { - #ifndef __SYNTHESIS__ - #ifndef AC_USER_DEFINED_ASSERT - if(!condition) { - std::cerr << "Assert"; - if(file) - std::cerr << " in file " << file << ":" << line; - if(msg) - std::cerr << " " << msg; - std::cerr << std::endl; - assert(0); - } - #else - AC_USER_DEFINED_ASSERT(condition, file, line, msg); - #endif - #endif - } - - // helper structs for statically computing log2 like functions (nbits, log2_floor, log2_ceil) - // using recursive templates - template - struct s_N { - template - struct s_X { - enum { - X2 = X >> N, - N_div_2 = N >> 1, - nbits = X ? (X2 ? N + (int) s_N::template s_X::nbits : (int) s_N::template s_X::nbits) : 0 - }; - }; - }; - template<> struct s_N<0> { - template - struct s_X { - enum {nbits = !!X }; - }; - }; - - template - inline double ldexpr32(double d) { - double d2 = d; - if(N < 0) - for(int i=0; i < -N; i++) - d2 /= (Ulong) 1 << 32; - else - for(int i=0; i < N; i++) - d2 *= (Ulong) 1 << 32; - return d2; - } - template<> inline double ldexpr32<0>(double d) { return d; } - template<> inline double ldexpr32<1>(double d) { return d * ((Ulong) 1 << 32); } - template<> inline double ldexpr32<-1>(double d) { return d / ((Ulong) 1 << 32); } - template<> inline double ldexpr32<2>(double d) { return (d * ((Ulong) 1 << 32)) * ((Ulong) 1 << 32); } - template<> inline double ldexpr32<-2>(double d) { return (d / ((Ulong) 1 << 32)) / ((Ulong) 1 << 32); } - - template - inline double ldexpr(double d) { - return ldexpr32( N < 0 ? d/( (unsigned) 1 << (-N & 31)) : d * ( (unsigned) 1 << (N & 31))); - } - - template - inline void iv_copy(const int *op, int *r) { - for(int i=0; i < N; i++) - r[i] = op[i]; - } - template<> inline void iv_copy<1>(const int *op, int *r) { - r[0] = op[0]; - } - template<> inline void iv_copy<2>(const int *op, int *r) { - r[0] = op[0]; - r[1] = op[1]; - } - - template - inline bool iv_equal_zero(const int *op){ - for(int i=0; i < N; i++) - if(op[i]) - return false; - return true; - } - template<> inline bool iv_equal_zero<0>(const int * /*op*/) { return true; } - template<> inline bool iv_equal_zero<1>(const int *op) { - return !op[0]; - } - template<> inline bool iv_equal_zero<2>(const int *op) { - return !(op[0] || op[1]); - } - - template - inline bool iv_equal_ones(const int *op){ - for(int i=0; i < N; i++) - if(~op[i]) - return false; - return true; - } - template<> inline bool iv_equal_ones<0>(const int * /*op*/) { return true; } - template<> inline bool iv_equal_ones<1>(const int *op) { - return !~op[0]; - } - template<> inline bool iv_equal_ones<2>(const int *op) { - return !(~op[0] || ~op[1]); - } - - template - inline bool iv_equal(const int *op1, const int *op2){ - const int M1 = AC_MAX(N1,N2); - const int M2 = AC_MIN(N1,N2); - const int *OP1 = N1 >= N2 ? op1 : op2; - const int *OP2 = N1 >= N2 ? op2 : op1; - for(int i=0; i < M2; i++) - if(OP1[i] != OP2[i]) - return false; - int ext = OP2[M2-1] < 0 ? ~0 : 0; - for(int i=M2; i < M1; i++) - if(OP1[i] != ext) - return false; - return true; - } - template<> inline bool iv_equal<1,1>(const int *op1, const int *op2) { - return op1[0] == op2[0]; - } - - template - inline bool iv_equal_ones_from(const int *op){ - if((B >= 32*N && op[N-1] >= 0) || (B&31 && ~(op[B/32] >> (B&31)))) - return false; - return iv_equal_ones(&op[(B+31)/32]); - } - template<> inline bool iv_equal_ones_from<0,1>(const int *op){ - return iv_equal_ones<1>(op); - } - template<> inline bool iv_equal_ones_from<0,2>(const int *op){ - return iv_equal_ones<2>(op); - } - - template - inline bool iv_equal_zeros_from(const int *op){ - if((B >= 32*N && op[N-1] < 0) || (B&31 && (op[B/32] >> (B&31)))) - return false; - return iv_equal_zero(&op[(B+31)/32]); - } - template<> inline bool iv_equal_zeros_from<0,1>(const int *op){ - return iv_equal_zero<1>(op); - } - template<> inline bool iv_equal_zeros_from<0,2>(const int *op){ - return iv_equal_zero<2>(op); - } - - template - inline bool iv_equal_ones_to(const int *op){ - if((B >= 32*N && op[N-1] >= 0) || (B&31 && ~(op[B/32] | (all_ones << (B&31))))) - return false; - return iv_equal_ones(op); - } - template<> inline bool iv_equal_ones_to<0,1>(const int *op){ - return iv_equal_ones<1>(op); - } - template<> inline bool iv_equal_ones_to<0,2>(const int *op){ - return iv_equal_ones<2>(op); - } - - template - inline bool iv_equal_zeros_to(const int *op){ - if((B >= 32*N && op[N-1] < 0) || (B&31 && (op[B/32] & ~(all_ones << (B&31))))) - return false; - return iv_equal_zero(op); - } - template<> inline bool iv_equal_zeros_to<0,1>(const int *op){ - return iv_equal_zero<1>(op); - } - template<> inline bool iv_equal_zeros_to<0,2>(const int *op){ - return iv_equal_zero<2>(op); - } - - template - inline bool iv_compare(const int *op1, const int *op2){ - const int M1 = AC_MAX(N1,N2); - const int M2 = AC_MIN(N1,N2); - const int *OP1 = N1 >= N2 ? op1 : op2; - const int *OP2 = N1 >= N2 ? op2 : op1; - const bool b = (N1 >= N2) == greater; - int ext = OP2[M2-1] < 0 ? ~0 : 0; - int i2 = M1 > M2 ? ext : OP2[M1-1]; - if(OP1[M1-1] != i2) - return b ^ (OP1[M1-1] < i2); - for(int i=M1-2; i >= M2; i--) { - if((unsigned) OP1[i] != (unsigned) ext) - return b ^ ((unsigned) OP1[i] < (unsigned) ext); - } - for(int i=M2-1; i >= 0; i--) { - if((unsigned) OP1[i] != (unsigned) OP2[i]) - return b ^ ((unsigned) OP1[i] < (unsigned) OP2[i]); - } - return false; - } - template<> inline bool iv_compare<1,1,true>(const int *op1, const int *op2) { - return op1[0] > op2[0]; - } - template<> inline bool iv_compare<1,1,false>(const int *op1, const int *op2) { - return op1[0] < op2[0]; - } - - template - inline void iv_extend(int *r, int ext) { - for(int i=0; i < N; i++) - r[i] = ext; - } - template<> inline void iv_extend<-2>(int * /*r*/, int /*ext*/) { } - template<> inline void iv_extend<-1>(int * /*r*/, int /*ext*/) { } - template<> inline void iv_extend<0>(int * /*r*/, int /*ext*/) { } - template<> inline void iv_extend<1>(int *r, int ext) { - r[0] = ext; - } - template<> inline void iv_extend<2>(int *r, int ext) { - r[0] = ext; - r[1] = ext; - } - - template - inline void iv_assign_int64(int *r, Slong l) { - r[0] = (int) l; - if(Nr > 1) { - r[1] = (int) (l >> 32); - iv_extend(r+2, (r[1] < 0) ? ~0 : 0); - } - } - template<> inline void iv_assign_int64<1>(int *r, Slong l) { - r[0] = (int) l; - } - template<> inline void iv_assign_int64<2>(int *r, Slong l) { - r[0] = (int) l; - r[1] = (int) (l >> 32); - } - - template - inline void iv_assign_uint64(int *r, Ulong l) { - r[0] = (int) l; - if(Nr > 1) { - r[1] = (int) (l >> 32); - iv_extend(r+2, 0); - } - } - template<> inline void iv_assign_uint64<1>(int *r, Ulong l) { - r[0] = (int) l; - } - template<> inline void iv_assign_uint64<2>(int *r, Ulong l) { - r[0] = (int) l; - r[1] = (int) (l >> 32); - } - - inline Ulong mult_u_u(int a, int b) { - return (Ulong) (unsigned) a * (Ulong) (unsigned) b; - } - inline Slong mult_u_s(int a, int b) { - return (Ulong) (unsigned) a * (Slong) (signed) b; - } - inline Slong mult_s_u(int a, int b) { - return (Slong) (signed) a * (Ulong) (unsigned) b; - } - inline Slong mult_s_s(int a, int b) { - return (Slong) (signed) a * (Slong) (signed) b; - } - inline void accumulate(Ulong a, Ulong &l1, Slong &l2) { - l1 += (Ulong) (unsigned) a; - l2 += a >> 32; - } - inline void accumulate(Slong a, Ulong &l1, Slong &l2) { - l1 += (Ulong) (unsigned) a; - l2 += a >> 32; - } - - template - inline void iv_mult(const int *op1, const int *op2, int *r) { - if(Nr==1) - r[0] = op1[0] * op2[0]; - else if(N1==1 && N2==1) - iv_assign_int64(r, ((Slong) op1[0]) * ((Slong) op2[0])); - else { - const int M1 = AC_MAX(N1,N2); - const int M2 = AC_MIN(N1,N2); - const int *OP1 = N1 >= N2 ? op1 : op2; - const int *OP2 = N1 >= N2 ? op2 : op1; - const int T1 = AC_MIN(M2-1,Nr); - const int T2 = AC_MIN(M1-1,Nr); - const int T3 = AC_MIN(M1+M2-2,Nr); - - Ulong l1 = 0; - Slong l2 = 0; - for(int k=0; k < T1; k++) { - for(int i=0; i < k+1; i++) - accumulate(mult_u_u(OP1[k-i], OP2[i]), l1, l2); - l2 += (Ulong) (unsigned) (l1 >> 32); - r[k] = (int) l1; - l1 = (unsigned) l2; - l2 >>= 32; - } - for(int k=T1; k < T2; k++) { - accumulate(mult_u_s(OP1[k-M2+1], OP2[M2-1]), l1, l2); - for(int i=0; i < M2-1; i++) - accumulate(mult_u_u(OP1[k-i], OP2[i]), l1, l2); - l2 += (Ulong) (unsigned) (l1 >> 32); - r[k] = (int) l1; - l1 = (unsigned) l2; - l2 >>= 32; - } - for(int k=T2; k < T3; k++) { - accumulate(mult_u_s(OP1[k-M2+1], OP2[M2-1]), l1, l2); - for(int i=k-T2+1; i < M2-1; i++) - accumulate(mult_u_u(OP1[k-i], OP2[i]), l1, l2); - accumulate(mult_s_u(OP1[M1-1], OP2[k-M1+1]), l1, l2); - l2 += (Ulong) (unsigned) (l1 >> 32); - r[k] = (int) l1; - l1 = (unsigned) l2; - l2 >>= 32; - } - if(Nr >= M1+M2-1) { - accumulate(mult_s_s(OP1[M1-1], OP2[M2-1]), l1, l2); - r[M1+M2-2] = (int) l1; - if(Nr >= M1+M2) { - l2 += (Ulong) (unsigned) (l1 >> 32); - r[M1+M2-1] = (int) l2; - iv_extend(r+M1+M2, (r[M1+M2-1] < 0) ? ~0 : 0); - } - } - } - } - template<> inline void iv_mult<1,1,1>(const int *op1, const int *op2, int *r) { - r[0] = op1[0] * op2[0]; - } - template<> inline void iv_mult<1,1,2>(const int *op1, const int *op2, int *r) { - iv_assign_int64<2>(r, ((Slong) op1[0]) * ((Slong) op2[0])); - } - - template - inline bool iv_uadd_carry(const int *op1, bool carry, int *r) { - Slong l = carry; - for(int i=0; i < N; i++) { - l += (Ulong) (unsigned) op1[i]; - r[i] = (int) l; - l >>= 32; - } - return l != 0; - } - template<> inline bool iv_uadd_carry<0>(const int * /*op1*/, bool carry, int * /*r*/) { return carry; } - template<> inline bool iv_uadd_carry<1>(const int *op1, bool carry, int *r) { - Ulong l = carry + (Ulong) (unsigned) op1[0]; - r[0] = (int) l; - return (l >> 32) & 1; - } - - template - inline bool iv_add_int_carry(const int *op1, int op2, bool carry, int *r) { - if(N==0) - return carry; - if(N==1) { - Ulong l = carry + (Slong) op1[0] + (Slong) op2; - r[0] = (int) l; - return (l >> 32) & 1; - } - Slong l = carry + (Ulong) (unsigned) op1[0] + (Slong) op2; - r[0] = (int) l; - l >>= 32; - for(int i=1; i < N-1; i++) { - l += (Ulong) (unsigned) op1[i]; - r[i] = (int) l; - l >>= 32; - } - l += (Slong) op1[N-1]; - r[N-1] = (int) l; - return (l >> 32) & 1; - } - template<> inline bool iv_add_int_carry<0>(const int * /*op1*/, int /*op2*/, bool carry, int * /*r*/) { return carry; } - template<> inline bool iv_add_int_carry<1>(const int *op1, int op2, bool carry, int *r) { - Ulong l = carry + (Slong) op1[0] + (Slong) op2; - r[0] = (int) l; - return (l >> 32) & 1; - } - - template - inline bool iv_uadd_n(const int *op1, const int *op2, int *r) { - Ulong l = 0; - for(int i=0; i < N; i++) { - l += (Ulong)(unsigned) op1[i] + (Ulong)(unsigned) op2[i]; - r[i] = (int) l; - l >>= 32; - } - return l & 1; - } - template<> inline bool iv_uadd_n<0>(const int * /*op1*/, const int * /*op2*/, int * /*r*/) { return false; } - template<> inline bool iv_uadd_n<1>(const int *op1, const int *op2, int *r) { - Ulong l = (Ulong) (unsigned) op1[0] + (Ulong) (unsigned) op2[0]; - r[0] = (int) l; - return (l >> 32) & 1; - } - template<> inline bool iv_uadd_n<2>(const int *op1, const int *op2, int *r) { - Ulong l = (Ulong) (unsigned) op1[0] + (Ulong) (unsigned) op2[0]; - r[0] = (int) l; - l >>= 32; - l += (Ulong) (unsigned) op1[1] + (Ulong) (unsigned) op2[1]; - r[1] = (int) l; - return (l >> 32) & 1; - } - - template - inline void iv_add(const int *op1, const int *op2, int *r) { - if(Nr==1) - r[0] = op1[0] + op2[0]; - else { - const int M1 = AC_MAX(N1,N2); - const int M2 = AC_MIN(N1,N2); - const int *OP1 = N1 >= N2 ? op1 : op2; - const int *OP2 = N1 >= N2 ? op2 : op1; - const int T1 = AC_MIN(M2-1,Nr); - const int T2 = AC_MIN(M1,Nr); - - bool carry = iv_uadd_n(OP1, OP2, r); - carry = iv_add_int_carry(OP1+T1, OP2[T1], carry, r+T1); - iv_extend(r+T2, carry ? ~0 : 0); - } - } - template<> inline void iv_add<1,1,1>(const int *op1, const int *op2, int *r) { - r[0] = op1[0] + op2[0]; - } - template<> inline void iv_add<1,1,2>(const int *op1, const int *op2, int *r) { - iv_assign_int64<2>(r, (Slong) op1[0] + (Slong) op2[0]); - } - - template - inline bool iv_sub_int_borrow(const int *op1, int op2, bool borrow, int *r) { - if(N==1) { - Ulong l = (Slong) op1[0] - (Slong) op2 - borrow; - r[0] = (int) l; - return (l >> 32) & 1; - } - Slong l = (Ulong) (unsigned) op1[0] - (Slong) op2 - borrow; - r[0] = (int) l; - l >>= 32; - for(int i=1; i < N-1; i++) { - l += (Ulong) (unsigned) op1[i]; - r[i] = (int) l; - l >>= 32; - } - l += (Slong) op1[N-1]; - r[N-1] = (int) l; - return (l >> 32) & 1; - } - template<> inline bool iv_sub_int_borrow<0>(const int * /*op1*/, int /*op2*/, bool borrow, int * /*r*/) { return borrow; } - template<> inline bool iv_sub_int_borrow<1>(const int *op1, int op2, bool borrow, int *r) { - Ulong l = (Slong) op1[0] - (Slong) op2 - borrow; - r[0] = (int) l; - return (l >> 32) & 1; - } - - template - inline bool iv_sub_int_borrow(int op1, const int *op2, bool borrow, int *r) { - if(N==1) { - Ulong l = (Slong) op1 - (Slong) op2[0] - borrow; - r[0] = (int) l; - return (l >> 32) & 1; - } - Slong l = (Slong) op1 - (Ulong) (unsigned) op2[0] - borrow; - r[0] = (int) l; - l >>= 32; - for(int i=1; i < N-1; i++) { - l -= (Ulong) (unsigned) op2[i]; - r[i] = (int) l; - l >>= 32; - } - l -= (Slong) op2[N-1]; - r[N-1] = (int) l; - return (l >> 32) & 1; - } - template<> inline bool iv_sub_int_borrow<0>(int /*op1*/, const int * /*op2*/, bool borrow, int * /*r*/) { return borrow; } - template<> inline bool iv_sub_int_borrow<1>(int op1, const int *op2, bool borrow, int *r) { - Ulong l = (Slong) op1 - (Slong) op2[0] - borrow; - r[0] = (int) l; - return (l >> 32) & 1; - } - - template - inline bool iv_usub_n(const int *op1, const int *op2, int *r) { - Slong l = 0; - for(int i=0; i < N; i++) { - l += (Ulong)(unsigned) op1[i] - (Ulong)(unsigned) op2[i]; - r[i] = (int) l; - l >>= 32; - } - return l & 1; - } - template<> inline bool iv_usub_n<1>(const int *op1, const int *op2, int *r) { - Ulong l = (Ulong) (unsigned) op1[0] - (Ulong) (unsigned) op2[0]; - r[0] = (int) l; - return (l >> 32) & 1; - } - template<> inline bool iv_usub_n<2>(const int *op1, const int *op2, int *r) { - Slong l = (Ulong) (unsigned) op1[0] - (Ulong) (unsigned) op2[0]; - r[0] = (int) l; - l >>= 32; - l += (Ulong) (unsigned) op1[1] - (Ulong) (unsigned) op2[1]; - r[1] = (int) l; - return (l >> 32) & 1; - } - - template - inline void iv_sub(const int *op1, const int *op2, int *r) { - if(Nr==1) - r[0] = op1[0] - op2[0]; - else { - const int M1 = AC_MAX(N1,N2); - const int M2 = AC_MIN(N1,N2); - const int T1 = AC_MIN(M2-1,Nr); - const int T2 = AC_MIN(M1,Nr); - bool borrow = iv_usub_n(op1, op2, r); - if(N1 > N2) - borrow = iv_sub_int_borrow(op1+T1, op2[T1], borrow, r+T1); - else - borrow = iv_sub_int_borrow(op1[T1], op2+T1, borrow, r+T1); - iv_extend(r+T2, borrow ? ~0 : 0); - } - } - template<> inline void iv_sub<1,1,1>(const int *op1, const int *op2, int *r) { - r[0] = op1[0] - op2[0]; - } - template<> inline void iv_sub<1,1,2>(const int *op1, const int *op2, int *r) { - iv_assign_int64<2>(r, (Slong) op1[0] - (Slong) op2[0]); - } - - template - inline bool iv_all_bits_same(const int *op, bool bit) { - int t = bit ? ~0 : 0; - for(int i=0; i < N; i++) - if(op[i] != t) - return false; - return true; - } - template<> inline bool iv_all_bits_same<0>(const int * /*op*/, bool /*bit*/) { return true; } - template<> inline bool iv_all_bits_same<1>(const int *op, bool bit) { - return op[0] == (bit ? ~0 : 0); - } - - template - void iv_neg(const int *op1, int *r) { - Slong l = 0; - for(int k = 0; k < AC_MIN(N,Nr); k++) { - l -= (Ulong) (unsigned) op1[k]; - r[k] = (unsigned) l; - l >>= 32; - } - if(Nr > N) { - r[N] = (unsigned) (l - (op1[N-1] < 0 ? ~0 : 0)); - iv_extend(r+N+1, r[N] < 0 ? ~0 : 0); - } - } - - template - void iv_abs(const int *op1, int *r) { - if( S && op1[N-1] < 0) { - iv_neg(op1, r); - } else { - iv_copy(op1, r); - iv_extend(r+N, 0); - } - } - - template - void iv_udiv(const sw2 *n, const sw2 *d, sw2 *q, sw2 *r) { - const int w2_length = 2*w1_length; - int d_msi; // most significant int for d - for(d_msi = D-1; d_msi > 0 && !d[d_msi]; d_msi--) {} - uw4 d1 = 0; - if(!d_msi && !d[0]) { - d1 = n[0]/d[0]; // d is zero => divide by zero - return; - } - int n_msi; // most significant int for n - for(n_msi = N-1; n_msi > 0 && !n[n_msi]; n_msi--) {} - for(int i=0; i < Q; i++) - q[i] = 0; - for(int i=0; i < R; i++) - r[i] = n[i]; - // write most significant "words" into d1 - bool d_mss_odd = (bool) (d[d_msi] >> w1_length); - int d_mss= 2*d_msi + d_mss_odd; // index to most significant short (16-bit) - d1 = (uw4) (uw2) d[d_msi] << (w1_length << (int) !d_mss_odd); - if(d_msi) - d1 |= (uw2) d[d_msi-1] >> (d_mss_odd ? w1_length : 0); - bool n_mss_odd = (bool) (n[n_msi] >> w1_length); - int n_mss = 2*n_msi + n_mss_odd; - if(n_mss < d_mss) { - // q already initialized to 0 - if(R) { - int r_msi = AC_MIN(R-1, n_msi); - for(int j = 0; j <= r_msi; j++) - r[j] = n[j]; - for(int j = r_msi+1; j < R; j++) - r[j] = 0; - } - } else { - uw2 r1[N+1]; - r1[n_msi+1] = 0; - for(int k = n_msi; k >= 0; k--) - r1[k] = n[k]; - for(int k = n_mss; k >=d_mss; k--) { - int k_msi = k >> 1; - bool odd = k & 1; - uw2 r1m1 = k_msi > 0 ? r1[k_msi-1] : (uw2) 0; - uw4 n1 = odd ? - (uw4) ((r1[k_msi+1] << w1_length) | (r1[k_msi] >> w1_length)) << w2_length | ((r1[k_msi] << w1_length) | (r1m1 >> w1_length)) : - (uw4) r1[k_msi] << w2_length | r1m1; - uw2 q1 = n1/d1; - if(q1 >> w1_length) - q1--; - AC_ASSERT(!(q1 >> w1_length), "Problem detected in long division algorithm, Please report"); - unsigned k2 = k - d_mss; - unsigned k2_i = k2 >> 1; - bool odd_2 = k2 & 1; - uw2 q2 = q1 << (odd_2 ? w1_length : 0); - sw4 l = 0; - for(int j = 0; j <= d_msi; j++) { - l += r1[k2_i + j]; - bool l_sign = l < 0; - sw4 prod = (uw4) (uw2) d[j] * (uw4) q2; - l -= prod; - bool ov1 = (l >= 0) & ((prod < 0) | l_sign); - bool ov2 = (l < 0) & (prod < 0) & l_sign; - r1[k2_i + j] = (uw2) l; - l >>= w2_length; - if(ov1) - l |= ((uw4) -1 << w2_length); - if(ov2) - l ^= ((sw4) 1 << w2_length); - } - if(odd_2 | d_mss_odd) { - l += r1[k2_i + d_msi + 1]; - r1[k2_i + d_msi + 1] = (uw2) l; - } - if(l < 0) { - l = 0; - for(int j = 0; j <= d_msi; j++) { - l += (sw4) (uw2) d[j] << (odd_2 ? w1_length : 0); - l += r1[k2_i + j]; - r1[k2_i + j] = (uw2) l; - l >>= w2_length; - } - if(odd_2 | d_mss_odd) - r1[k2_i + d_msi + 1] += (uw2) l; - q1--; - } - if(Q && k2_i < Q) { - if(odd_2) - q[k2_i] = q1 << w1_length; - else - q[k2_i] |= q1; - } - } - if(R) { - int r_msi = AC_MIN(R-1, n_msi); - for(int j = 0; j <= r_msi; j++) - r[j] = r1[j]; - for(int j = r_msi+1; j < R; j++) - r[j] = 0; - } - } - } - - template - inline void iv_div(const int *op1, const int *op2, int *r) { - enum { N1_over = N1+(Den_s && (Num_s==2)) }; - if(N1_over==1 && N2==1) { - r[0] = op1[0] / op2[0]; - iv_extend(r+1, ((Num_s || Den_s) && (r[0] < 0)) ? ~0 : 0); - } - else if(N1_over==1 && N2==2) - iv_assign_int64(r, ( (Slong) op1[0]) / (((Slong) op2[1] << 32) | (unsigned) op2[0]) ); - else if(N1_over==2 && N2==1) - if(N1 == 1) - iv_assign_int64(r, ( (Slong) op1[0]) / ( (Slong) op2[0]) ); - else - iv_assign_int64(r, (((Slong) op1[1] << 32) | (unsigned) op1[0]) / ( (Slong) op2[0]) ); - else if(N1_over==2 && N2==2) - if(N1 == 1) - iv_assign_int64(r, ( (Slong) op1[0]) / (((Slong) op2[1] << 32) | (unsigned) op2[0]) ); - else - iv_assign_int64(r, (((Slong) op1[1] << 32) | (unsigned) op1[0]) / (((Slong) op2[1] << 32) | (unsigned) op2[0]) ); - else if(!Num_s && !Den_s) { - iv_udiv(op1, op2, r, 0); - } - else { - enum { N1_neg = N1+(Num_s==2), N2_neg = N2+(Den_s==2)}; - int numerator[N1_neg]; - int denominator[N2_neg]; - int quotient[N1_neg]; - iv_abs(op1, numerator); - iv_abs(op2, denominator); - iv_udiv(numerator, denominator, quotient, 0); - if( (Num_s && op1[N1-1] < 0) ^ (Den_s && op2[N2-1] < 0) ) - iv_neg(quotient, r); - else { - iv_copy(quotient, r); - iv_extend(r+N1_neg, (Num_s || Den_s) && r[N1_neg-1] < 0 ? ~0 : 0); - } - } - } - - template - inline void iv_rem(const int *op1, const int *op2, int *r) { - enum { N1_over = N1+(Den_s && (Num_s==2)) }; // N1_over corresponds to the division - if(N1_over==1 && N2==1) { - r[0] = op1[0] % op2[0]; - iv_extend(r+1, Num_s && r[0] < 0 ? ~0 : 0); - } - else if(N1_over==1 && N2==2) - iv_assign_int64(r, ( (Slong) op1[0]) % (((Slong) op2[1] << 32) | (unsigned) op2[0]) ); - else if(N1_over==2 && N2==1) - if(N1 == 1) - iv_assign_int64(r, ( (Slong) op1[0]) % ( (Slong) op2[0]) ); - else - iv_assign_int64(r, (((Slong) op1[1] << 32) | (unsigned) op1[0]) % ( (Slong) op2[0]) ); - else if(N1_over==2 && N2==2) - if(N1 == 1) - iv_assign_int64(r, ( (Slong) op1[0]) % (((Slong) op2[1] << 32) | (unsigned) op2[0]) ); - else - iv_assign_int64(r, (((Slong) op1[1] << 32) | (unsigned) op1[0]) % (((Slong) op2[1] << 32) | (unsigned) op2[0]) ); - else if(!Num_s && !Den_s) { - iv_udiv(op1, op2, 0, r); - } - else { - enum { N1_neg = N1+(Num_s==2), N2_neg = N2+(Den_s==2)}; - int numerator[N1_neg]; - int denominator[N2_neg]; - int remainder[N2]; - iv_abs(op1, numerator); - iv_abs(op2, denominator); - iv_udiv(numerator, denominator, 0, remainder); - if( (Num_s && op1[N1-1] < 0) ) - iv_neg(remainder, r); - else { - iv_copy(remainder, r); - iv_extend(r+N2, Num_s && r[N2-1] < 0 ? ~0 : 0); - } - } - } - - template - inline void iv_bitwise_complement_n(const int *op, int *r) { - for(int i=0; i < N; i++) - r[i] = ~op[i]; - } - template<> inline void iv_bitwise_complement_n<1>(const int *op, int *r) { - r[0] = ~op[0]; - } - template<> inline void iv_bitwise_complement_n<2>(const int *op, int *r) { - r[0] = ~op[0]; - r[1] = ~op[1]; - } - - template - inline void iv_bitwise_complement(const int *op, int *r) { - const int M = AC_MIN(N,Nr); - iv_bitwise_complement_n(op, r); - iv_extend(r+M, (r[M-1] < 0) ? ~0 : 0); - } - - template - inline void iv_bitwise_and_n(const int *op1, const int *op2, int *r) { - for(int i=0; i < N; i++) - r[i] = op1[i] & op2[i]; - } - template<> inline void iv_bitwise_and_n<1>(const int *op1, const int *op2, int *r) { - r[0] = op1[0] & op2[0]; - } - template<> inline void iv_bitwise_and_n<2>(const int *op1, const int *op2, int *r) { - r[0] = op1[0] & op2[0]; - r[1] = op1[1] & op2[1]; - } - - template - inline void iv_bitwise_and(const int *op1, const int *op2, int *r) { - const int M1 = AC_MIN(AC_MAX(N1,N2), Nr); - const int M2 = AC_MIN(AC_MIN(N1,N2), Nr); - const int *OP1 = N1 > N2 ? op1 : op2; - const int *OP2 = N1 > N2 ? op2 : op1; - - iv_bitwise_and_n(op1, op2, r); - if(OP2[M2-1] < 0) - iv_copy(OP1+M2, r+M2); - else - iv_extend(r+M2, 0); - iv_extend(r+M1, (r[M1-1] < 0) ? ~0 : 0); - } - - template - inline void iv_bitwise_or_n(const int *op1, const int *op2, int *r) { - for(int i=0; i < N; i++) - r[i] = op1[i] | op2[i]; - } - template<> inline void iv_bitwise_or_n<1>(const int *op1, const int *op2, int *r) { - r[0] = op1[0] | op2[0]; - } - template<> inline void iv_bitwise_or_n<2>(const int *op1, const int *op2, int *r) { - r[0] = op1[0] | op2[0]; - r[1] = op1[1] | op2[1]; - } - - template - inline void iv_bitwise_or(const int *op1, const int *op2, int *r) { - const int M1 = AC_MIN(AC_MAX(N1,N2), Nr); - const int M2 = AC_MIN(AC_MIN(N1,N2), Nr); - const int *OP1 = N1 >= N2 ? op1 : op2; - const int *OP2 = N1 >= N2 ? op2 : op1; - - iv_bitwise_or_n(op1, op2, r); - if(OP2[M2-1] < 0) - iv_extend(r+M2, ~0); - else - iv_copy(OP1+M2, r+M2); - iv_extend(r+M1, (r[M1-1] < 0) ? ~0 : 0); - } - - template - inline void iv_bitwise_xor_n(const int *op1, const int *op2, int *r) { - for(int i=0; i < N; i++) - r[i] = op1[i] ^ op2[i]; - } - template<> inline void iv_bitwise_xor_n<1>(const int *op1, const int *op2, int *r) { - r[0] = op1[0] ^ op2[0]; - } - template<> inline void iv_bitwise_xor_n<2>(const int *op1, const int *op2, int *r) { - r[0] = op1[0] ^ op2[0]; - r[1] = op1[1] ^ op2[1]; - } - - template - inline void iv_bitwise_xor(const int *op1, const int *op2, int *r) { - const int M1 = AC_MIN(AC_MAX(N1,N2), Nr); - const int M2 = AC_MIN(AC_MIN(N1,N2), Nr); - const int *OP1 = N1 >= N2 ? op1 : op2; - const int *OP2 = N1 >= N2 ? op2 : op1; - - iv_bitwise_xor_n(op1, op2, r); - if(OP2[M2-1] < 0) - iv_bitwise_complement_n(OP1+M2, r+M2); - else - iv_copy(OP1+M2, r+M2); - iv_extend(r+M1, (r[M1-1] < 0) ? ~0 : 0); - } - - template - inline void iv_shift_l(const int *op1, unsigned op2, int *r) { - AC_ASSERT(Nr <= N, "iv_shift_l, incorrect usage Nr > N"); - unsigned s31 = op2 & 31; - unsigned ishift = (op2 >> 5) > Nr ? Nr : (op2 >> 5); - if(s31 && ishift!=Nr) { - unsigned lw = 0; - for(unsigned i=0; i < Nr; i++) { - unsigned hw = (i >= ishift) ? op1[i-ishift] : 0; - r[i] = (hw << s31) | (lw >> (32-s31)); - lw = hw; - } - } else { - for(unsigned i=0; i < Nr ; i++) - r[i] = (i >= ishift) ? op1[i-ishift] : 0; - } - } - - template - inline void iv_shift_r(const int *op1, unsigned op2, int *r) { - unsigned s31 = op2 & 31; - unsigned ishift = (op2 >> 5) > N ? N : (op2 >> 5); - int ext = op1[N-1] < 0 ? ~0 : 0; - if(s31 && ishift!=N) { - unsigned lw = (ishift < N) ? op1[ishift] : ext; - for(unsigned i=0; i < Nr; i++) { - unsigned hw = (i+ishift+1 < N) ? op1[i+ishift+1] : ext; - r[i] = (lw >> s31) | (hw << (32-s31)); - lw = hw; - } - } else { - for(unsigned i=0; i < Nr ; i++) - r[i] = (i+ishift < N) ? op1[i+ishift] : ext; - } - } - - template - inline void iv_shift_l2(const int *op1, signed op2, int *r) { - if(S && op2 < 0) - iv_shift_r(op1, -op2, r); - else - iv_shift_l(op1, op2, r); - } - - template<> inline void iv_shift_l2<1,1,false>(const int *op1, signed op2, int *r) { - r[0] = (op2 < 32) ? ( (unsigned) op1[0] << op2) : 0; - } - template<> inline void iv_shift_l2<1,1,true>(const int *op1, signed op2, int *r) { - r[0] = (op2 >= 0) ? - (op2 < 32) ? ( (unsigned) op1[0] << op2) : 0 : - (op2 > -32) ? (op1[0] >> -op2) : (op1[0] >> 31); - } - - template - inline void iv_shift_r2(const int *op1, signed op2, int *r) { - if(S && op2 < 0) - iv_shift_l(op1, -op2, r); - else - iv_shift_r(op1, op2, r); - } - - template<> inline void iv_shift_r2<1,1,false>(const int *op1, signed op2, int *r) { - r[0] = (op2 < 32) ? (op1[0] >> op2) : (op1[0] >> 31); - } - template<> inline void iv_shift_r2<1,1,true>(const int *op1, signed op2, int *r) { - r[0] = (op2 >= 0) ? - (op2 < 32) ? (op1[0] >> op2) : (op1[0] >> 31) : - (op2 > -32) ? ( (unsigned) op1[0] << -op2) : 0; - } - - template - inline void iv_const_shift_l(const int *op1, int *r) { - // B >= 0 - if(!B) { - const int M1 = AC_MIN(N,Nr); - iv_copy(op1, r); - iv_extend(r+M1, r[M1-1] < 0 ? -1 : 0); - } - else { - const unsigned s31 = B & 31; - const int ishift = (((B >> 5) > Nr) ? Nr : (B >> 5)); - iv_extend(r, 0); - const int M1 = AC_MIN(N+ishift,Nr); - if(s31) { - unsigned lw = 0; - for(int i=ishift; i < M1; i++) { - unsigned hw = op1[i-ishift]; - r[i] = (hw << s31) | (lw >> ((32-s31)&31)); // &31 is to quiet compilers - lw = hw; - } - if(Nr > M1) { - r[M1] = (signed) lw >> ((32-s31)&31); // &31 is to quiet compilers - iv_extend(r+M1+1, r[M1] < 0 ? ~0 : 0); - } - } else { - for(int i=ishift; i < M1 ; i++) - r[i] = op1[i-ishift]; - iv_extend(r+M1, r[M1-1] < 0 ? -1 : 0); - } - } - } - template<> inline void iv_const_shift_l<1,1,0>(const int *op1, int *r) { - r[0] = op1[0]; - } - template<> inline void iv_const_shift_l<2,1,0>(const int *op1, int *r) { - r[0] = op1[0]; - } - - template - inline void iv_const_shift_r(const int *op1, int *r) { - if(!B) { - const int M1 = AC_MIN(N,Nr); - iv_copy(op1, r); - iv_extend(r+M1, r[M1-1] < 0 ? ~0 : 0); - } - else { - const unsigned s31 = B & 31; - const int ishift = (((B >> 5) > N) ? N : (B >> 5)); - int ext = op1[N-1] < 0 ? ~0 : 0; - if(s31 && ishift!=N) { - unsigned lw = (ishift < N) ? op1[ishift] : ext; - for(int i=0; i < Nr; i++) { - unsigned hw = (i+ishift+1 < N) ? op1[i+ishift+1] : ext; - r[i] = (lw >> s31) | (hw << ((32-s31)&31)); // &31 is to quiet compilers - lw = hw; - } - } else { - for(int i=0; i < Nr ; i++) - r[i] = (i+ishift < N) ? op1[i+ishift] : ext; - } - } - } - template<> inline void iv_const_shift_r<1,1,0>(const int *op1, int *r) { - r[0] = op1[0]; - } - template<> inline void iv_const_shift_r<2,1,0>(const int *op1, int *r) { - r[0] = op1[0]; - } - - template - inline void iv_conv_from_fraction(double d, int *r, bool *qb, bool *rbits, bool *o) { - bool b = d < 0; - double d2 = b ? -d : d; - double dfloor = mgc_floor(d2); - *o = dfloor != 0.0; - d2 = d2 - dfloor; - for(int i=N-1; i >=0; i--) { - d2 *= (Ulong) 1 << 32; - unsigned k = (unsigned int) d2; - r[i] = b ? ~k : k; - d2 -= k; - } - d2 *= 2; - bool k = ((int) d2) != 0; // is 0 or 1 - d2 -= k; - *rbits = d2 != 0.0; - *qb = (b && *rbits) ^ k; - if(b && !*rbits && !*qb) - iv_uadd_carry(r, true, r); - *o |= b ^ (r[N-1] < 0); - } - - template - inline int to_str(int *v, int w, bool left_just, char *r) { - const char digits[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; - const unsigned char B = b==AC_BIN ? 1 : (b==AC_OCT ? 3 : (b==AC_HEX ? 4 : 0)); - int k = (w+B-1)/B; - int n = (w+31) >> 5; - int bits = 0; - if(b != AC_BIN && left_just) { - if( (bits = -(w % B)) ) - r[--k] = 0; - } - for(int i = 0; i < n; i++) { - if (b != AC_BIN && bits < 0) - r[k] += (unsigned char) (( (unsigned) v[i] << (B+bits)) & (b-1)); - unsigned int m = (unsigned) v[i] >> -bits; - for(bits += 32; bits > 0 && k; bits -= B) { - r[--k] = (char) (m & (b-1)); - m >>= B; - } - } - for(int i=0; i < (w+B-1)/B; i++) - r[i] = digits[(int)r[i]]; - return (w+B-1)/B; - } - template<> inline int to_str(int *v, int w, bool left_just, char *r) { - int k = 0; - int msw = (w-1) >> 5; - if(left_just) { - unsigned bits_msw = w & 31; - if(bits_msw) { - unsigned left_shift = 32 - bits_msw; - for(int i=msw; i > 0; i--) - v[i] = (unsigned) v[i] << left_shift | (unsigned) v[i-1] >> bits_msw; - v[0] = (unsigned) v[0] << left_shift; - } - int lsw = 0; - while(lsw < msw || v[msw] ) { - Ulong l = 0; - for(int i=lsw; i <= msw; i++) { - l += (Ulong) (unsigned) v[i] * 10; - v[i] = l; - l >>= 32; - if(i==lsw && !v[i]) - lsw++; - } - r[k++] = (char) ('0' + (int) l); - } - } else { - const unsigned d = 1000000000; // 10E9 - for(; msw > 0 && !v[msw]; msw--) {} - while(msw >= 0) { - Ulong nl = 0; - for(int i = msw; i >= 0; i--) { - nl <<= 32; - nl |= (unsigned) v[i]; - unsigned q = nl/d; - nl -= (Ulong) q * d; - v[i] = q; - } - if(!v[msw]) - msw--; - bool last = msw == -1; - unsigned rem = (unsigned) nl; - for(int i=0; (i < 9 && !last) || rem; i++) { - r[k++] = (char) ('0' + (int) (rem % 10)); - rem /= 10; - } - } - for(int i=0; i < k/2; i++) { - char c = r[i]; - r[i] = r[k-1-i]; - r[k-1-i] = c; - } - } - r[k] = 0; - return k; - } - - inline int to_string(int *v, int w, bool sign_mag, ac_base_mode base, bool left_just, char *r) { - int n = (w+31) >> 5; - bool neg = !sign_mag && v[n-1] < 0; - if(!left_just) { - while(n-- && v[n] == (neg ? ~0 : 0)) {} - int w2 = 32*(n+1); - if(w2) { - int m = v[n]; - for(int i = 16; i > 0; i >>= 1) { - if((m >> i) == (neg ? ~0 : 0)) - w2 -= i; - else - m >>= i; - } - } - if(w2 < w) - w = w2; - w += !sign_mag; - } - if(base == AC_DEC) - return to_str(v, w, left_just, r); - else if (base == AC_HEX) - return to_str(v, w, left_just, r); - else if (base == AC_OCT) - return to_str(v, w, left_just, r); - else if (base == AC_BIN) - return to_str(v, w, left_just, r); - return 0; - } - - template - inline unsigned iv_leading_bits(const int *op, bool bit); - - template<> inline unsigned iv_leading_bits<1>(const int *op, bool bit) { - const unsigned char tab[] = {4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0}; - unsigned t = bit ? ~*op : *op; - unsigned cnt = 0; - if(t >> 16) - t >>= 16; - else - cnt += 16; - if(t >> 8) - t >>= 8; - else - cnt += 8; - if(t >> 4) - t >>= 4; - else - cnt += 4; - cnt += tab[t]; - return cnt; - } - - template - inline unsigned iv_leading_bits(const int *op, bool bit) { - int ext_sign = bit ? -1 : 0; - int k; - for(k = N-1; k >= 0 && op[k] == ext_sign; k--) {} - return 32*(N-1-k) + (k < 0 ? 0 : iv_leading_bits<1>(op+k, bit)); - } - - ////////////////////////////////////////////////////////////////////////////// - // Integer Vector class: iv - ////////////////////////////////////////////////////////////////////////////// - template - class iv { - protected: - int v[N]; - public: - template friend class iv; - iv() {} - template - iv ( const iv &b ) { - const int M = AC_MIN(N,N2); - iv_copy(b.v, v); - iv_extend(v+M, (v[M-1] < 0) ? ~0 : 0); - } - iv ( Slong t) { - iv_assign_int64(v, t); - } - iv ( Ulong t) { - iv_assign_uint64(v, t); - } - iv ( int t) { - v[0] = t; - iv_extend(v+1, (t < 0) ? ~0 : 0); - } - iv ( unsigned int t) { - v[0] = t; - iv_extend(v+1, 0); - } - iv ( long t) { - if(long_w == 32) { - v[0] = t; - iv_extend(v+1, (t < 0) ? ~0 : 0); - } else - iv_assign_int64(v, t); - } - iv ( unsigned long t) { - if(long_w == 32) { - v[0] = t; - iv_extend(v+1, 0); - } else - iv_assign_uint64(v, t); - } - iv ( double d ) { - double d2 = ldexpr32<-N>(d); - bool qb, rbits, o; - iv_conv_from_fraction(d2, v, &qb, &rbits, &o); - } - - // Explicit conversion functions to C built-in types ------------- - inline Slong to_int64() const { return N==1 ? v[0] : ((Ulong)v[1] << 32) | (Ulong) (unsigned) v[0]; } - inline Ulong to_uint64() const { return N==1 ? (Ulong) v[0] : ((Ulong)v[1] << 32) | (Ulong) (unsigned) v[0]; } - inline double to_double() const { - double a = v[N-1]; - for(int i=N-2; i >= 0; i--) { - a *= (Ulong) 1 << 32; - a += (unsigned) v[i]; - } - return a; - } - inline void conv_from_fraction(double d, bool *qb, bool *rbits, bool *o) { - iv_conv_from_fraction(d, v, qb, rbits, o); - } - - template - inline void mult(const iv &op2, iv &r) const { - iv_mult(v, op2.v, r.v); - } - template - void add(const iv &op2, iv &r) const { - iv_add(v, op2.v, r.v); - } - template - void sub(const iv &op2, iv &r) const { - iv_sub(v, op2.v, r.v); - } - template - void div(const iv &op2, iv &r) const { - iv_div(v, op2.v, r.v); - } - template - void rem(const iv &op2, iv &r) const { - iv_rem(v, op2.v, r.v); - } - void increment() { - iv_uadd_carry(v, true, v); - } - void decrement() { - iv_sub_int_borrow(v, 0, true, v); - } - template - void neg(iv &r) const { - iv_neg(v, r.v); - } - template - void shift_l(unsigned op2, iv &r) const { - iv_shift_l(v, op2, r.v); - } - template - void shift_l2(signed op2, iv &r) const { - iv_shift_l2(v, op2, r.v); - } - template - void shift_r(unsigned op2, iv &r) const { - iv_shift_r(v, op2, r.v); - } - template - void shift_r2(signed op2, iv &r) const { - iv_shift_r2(v, op2, r.v); - } - template - void const_shift_l(iv &r) const { - iv_const_shift_l(v, r.v); - } - template - void const_shift_r(iv &r) const { - iv_const_shift_r(v, r.v); - } - template - void bitwise_complement(iv &r) const { - iv_bitwise_complement(v, r.v); - } - template - void bitwise_and(const iv &op2, iv &r) const { - iv_bitwise_and(v, op2.v, r.v); - } - template - void bitwise_or(const iv &op2, iv &r) const { - iv_bitwise_or(v, op2.v, r.v); - } - template - void bitwise_xor(const iv &op2, iv &r) const { - iv_bitwise_xor(v, op2.v, r.v); - } - template - bool equal(const iv &op2) const { - return iv_equal(v, op2.v); - } - template - bool greater_than(const iv &op2) const { - return iv_compare(v, op2.v); - } - template - bool less_than(const iv &op2) const { - return iv_compare(v, op2.v); - } - bool equal_zero() const { - return iv_equal_zero(v); - } - template - void set_slc(unsigned lsb, int WS, const iv &op2) { - AC_ASSERT((31+WS)/32 == N2, "Bad usage: WS greater than length of slice"); - unsigned msb = lsb+WS-1; - unsigned lsb_v = lsb >> 5; - unsigned lsb_b = lsb & 31; - unsigned msb_v = msb >> 5; - unsigned msb_b = msb & 31; - if(N2==1) { - if(msb_v == lsb_v) - v[lsb_v] ^= (v[lsb_v] ^ ((unsigned) op2.v[0] << lsb_b)) & (~(WS==32 ? 0 : all_ones<> 1) >> (31-lsb_b)); - v[msb_v] ^= (v[msb_v] ^ m) & ~((all_ones<<1)<> 1) >> (31-lsb_b)); - unsigned t = ((unsigned) op2.v[N2-1] << lsb_b) | (((unsigned) op2.v[N2-2] >> 1) >> (31-lsb_b)); - unsigned m; - if(msb_v-lsb_v == N2) { - v[msb_v-1] = t; - m = (((unsigned) op2.v[N2-1] >> 1) >> (31-lsb_b)); - } - else - m = t; - v[msb_v] ^= (v[msb_v] ^ m) & ~((all_ones<<1)<(v, bit); - } - }; - - template<> inline Slong iv<1>::to_int64() const { return v[0]; } - template<> inline Ulong iv<1>::to_uint64() const { return v[0]; } - - template<> inline Slong iv<2>::to_int64() const { - return ((Ulong)v[1] << 32) | (Ulong) (unsigned) v[0]; - } - template<> inline Ulong iv<2>::to_uint64() const { - return ((Ulong)v[1] << 32) | (Ulong) (unsigned) v[0]; - } - - template<> template<> inline void iv<1>::set_slc(unsigned lsb, int WS, const iv<1> &op2) { - v[0] ^= (v[0] ^ ((unsigned) op2.v[0] << lsb)) & (~(WS==32 ? 0 : all_ones< template<> inline void iv<2>::set_slc(unsigned lsb, int WS, const iv<1> &op2) { - Ulong l = to_uint64(); - Ulong l2 = op2.to_uint64(); - l ^= (l ^ (l2 << lsb)) & (~((~(Ulong)0)< template<> inline void iv<2>::set_slc(unsigned lsb, int WS, const iv<2> &op2) { - Ulong l = to_uint64(); - Ulong l2 = op2.to_uint64(); - l ^= (l ^ (l2 << lsb)) & (~(WS==64 ? (Ulong) 0 : ~(Ulong)0< - class iv_conv : public iv { - protected: - iv_conv() {} - template iv_conv(const T& t) : iv(t) {} - }; - - template - class iv_conv : public iv { - public: - operator Ulong () const { return iv::to_uint64(); } - protected: - iv_conv() {} - template iv_conv(const T& t) : iv(t) {} - }; - - template - class iv_conv : public iv { - public: - operator Slong () const { return iv::to_int64(); } - protected: - iv_conv() {} - template iv_conv(const T& t) : iv(t) {} - }; - - // Set default to promote to int as this is the case for almost all types - // create exceptions using specializations - template - struct c_prom { - typedef int promoted_type; - }; - template<> struct c_prom { - typedef unsigned promoted_type; - }; - template<> struct c_prom { - typedef long promoted_type; - }; - template<> struct c_prom { - typedef unsigned long promoted_type; - }; - template<> struct c_prom { - typedef Slong promoted_type; - }; - template<> struct c_prom { - typedef Ulong promoted_type; - }; - template<> struct c_prom { - typedef float promoted_type; - }; - template<> struct c_prom { - typedef double promoted_type; - }; - - template - struct c_arith { - // will error out for pairs of T and T2 that are not defined through specialization - }; - template struct c_arith { - typedef T arith_conv; - }; - - #define C_ARITH(C_TYPE1, C_TYPE2) \ - template<> struct c_arith { \ - typedef C_TYPE1 arith_conv; \ - }; \ - template<> struct c_arith { \ - typedef C_TYPE1 arith_conv; \ - }; - - C_ARITH(double, float) - C_ARITH(double, int) - C_ARITH(double, unsigned) - C_ARITH(double, long) - C_ARITH(double, unsigned long) - C_ARITH(double, Slong) - C_ARITH(double, Ulong) - C_ARITH(float, int) - C_ARITH(float, unsigned) - C_ARITH(float, long) - C_ARITH(float, unsigned long) - C_ARITH(float, Slong) - C_ARITH(float, Ulong) - - C_ARITH(Slong, int) - C_ARITH(Slong, unsigned) - C_ARITH(Ulong, int) - C_ARITH(Ulong, unsigned) - - template - struct map { - typedef T t; - }; - template - struct c_type_params { - // will error out for T for which this template struct is not specialized - }; - - template inline const char *c_type_name() { return "unknown"; } - template<> inline const char *c_type_name() { return "bool";} - template<> inline const char *c_type_name() { return "char";} - template<> inline const char *c_type_name() { return "signed char";} - template<> inline const char *c_type_name() { return "unsigned char";} - template<> inline const char *c_type_name() { return "signed short";} - template<> inline const char *c_type_name() { return "unsigned short";} - template<> inline const char *c_type_name() { return "int";} - template<> inline const char *c_type_name() { return "unsigned";} - template<> inline const char *c_type_name() { return "signed long";} - template<> inline const char *c_type_name() { return "unsigned long";} - template<> inline const char *c_type_name() { return "signed long long";} - template<> inline const char *c_type_name() { return "unsigned long long";} - template<> inline const char *c_type_name() { return "float";} - template<> inline const char *c_type_name() { return "double";} - - template struct c_type; - - template - struct rt_c_type_T { - template - struct op1 { - typedef typename T::template rt_T< c_type >::mult mult; - typedef typename T::template rt_T< c_type >::plus plus; - typedef typename T::template rt_T< c_type >::minus2 minus; - typedef typename T::template rt_T< c_type >::minus minus2; - typedef typename T::template rt_T< c_type >::logic logic; - typedef typename T::template rt_T< c_type >::div2 div; - typedef typename T::template rt_T< c_type >::div div2; - }; - }; - template - struct c_type { - typedef typename c_prom::promoted_type c_prom_T; - struct rt_unary { - typedef c_prom_T neg; - typedef c_prom_T mag_sqr; - typedef c_prom_T mag; - template - struct set { - typedef c_prom_T sum; - }; - }; - template - struct rt_T { - typedef typename rt_c_type_T::template op1::mult mult; - typedef typename rt_c_type_T::template op1::plus plus; - typedef typename rt_c_type_T::template op1::minus minus; - typedef typename rt_c_type_T::template op1::minus2 minus2; - typedef typename rt_c_type_T::template op1::logic logic; - typedef typename rt_c_type_T::template op1::div div; - typedef typename rt_c_type_T::template op1::div2 div2; - }; - inline static std::string type_name() { - std::string r = c_type_name(); - return r; - } - - }; - // with T == c_type - template - struct rt_c_type_T< c_type > { - typedef typename c_prom::promoted_type c_prom_T; - template - struct op1 { - typedef typename c_prom::promoted_type c_prom_T2; - typedef typename c_arith< c_prom_T, c_prom_T2 >::arith_conv mult; - typedef typename c_arith< c_prom_T, c_prom_T2 >::arith_conv plus; - typedef typename c_arith< c_prom_T, c_prom_T2 >::arith_conv minus; - typedef typename c_arith< c_prom_T, c_prom_T2 >::arith_conv minus2; - typedef typename c_arith< c_prom_T, c_prom_T2 >::arith_conv logic; - typedef typename c_arith< c_prom_T, c_prom_T2 >::arith_conv div; - typedef typename c_arith< c_prom_T, c_prom_T2 >::arith_conv div2; - }; - }; - - #define C_TYPE_MAP(C_TYPE) \ - template<> struct map { \ - typedef c_type t; \ - }; - - #define C_TYPE_PARAMS(C_TYPE, WI, SI) \ - template<> struct c_type_params { \ - enum { W = WI, I = WI, E = 0, S = SI, floating_point = 0 }; \ - }; - - #define C_TYPE_MAP_INT(C_TYPE, WI, SI) \ - C_TYPE_MAP(C_TYPE) \ - C_TYPE_PARAMS(C_TYPE, WI, SI) - - #define C_TYPE_MAP_FLOAT(C_TYPE, FP, WFP, IFP, EFP) \ - C_TYPE_MAP(C_TYPE) \ - template<> struct c_type_params { \ - enum { W = WFP, I = IFP, E = EFP, S = true, floating_point = FP }; \ - }; - - C_TYPE_MAP_INT(bool, 1, false) - C_TYPE_MAP_INT(char, 8, true) - C_TYPE_MAP_INT(signed char, 8, true) - C_TYPE_MAP_INT(unsigned char, 8, false) - C_TYPE_MAP_INT(signed short, 16, true) - C_TYPE_MAP_INT(unsigned short, 16, false) - C_TYPE_MAP_INT(signed int, 32, true) - C_TYPE_MAP_INT(unsigned int, 32, false) - C_TYPE_MAP_INT(signed long, ac_private::long_w, true) - C_TYPE_MAP_INT(unsigned long, ac_private::long_w, false) - C_TYPE_MAP_INT(signed long long, 64, true) - C_TYPE_MAP_INT(unsigned long long, 64, false) - C_TYPE_MAP_FLOAT(float, 1, 25, 1, 8) - C_TYPE_MAP_FLOAT(double, 2, 54, 1, 11) - - #undef C_TYPE_INT - #undef C_TYPE_PARAMS - #undef C_TYPE_FLOAT - #undef C_TYPE_MAP - - // specializations for following struct declared/defined after definition of ac_int - template - struct rt_ac_int_T { - template - struct op1 { - typedef typename T::template rt_T< ac_int >::mult mult; - typedef typename T::template rt_T< ac_int >::plus plus; - typedef typename T::template rt_T< ac_int >::minus2 minus; - typedef typename T::template rt_T< ac_int >::minus minus2; - typedef typename T::template rt_T< ac_int >::logic logic; - typedef typename T::template rt_T< ac_int >::div2 div; - typedef typename T::template rt_T< ac_int >::div div2; - }; - }; -} - -namespace ac { - // compiler time constant for log2 like functions - template - struct nbits { - enum { val = X ? ac_private::s_N<16>::s_X::nbits : 1 }; - }; - - template - struct log2_floor { - enum { val = nbits::val - 1 }; - }; - - // log2 of 0 is not defined: generate compiler error - template<> struct log2_floor<0> {}; - - template - struct log2_ceil { - enum { lf = log2_floor::val, val = (X == (1 << lf) ? lf : lf+1) }; - }; - - // log2 of 0 is not defined: generate compiler error - template<> struct log2_ceil<0> {}; - - template - struct int_range { - enum { l_s = (LowerBound < 0), u_s = (UpperBound < 0), - signedness = l_s || u_s, - l_nbits = nbits::val, - u_nbits = nbits::val, - nbits = AC_MAX(l_nbits, u_nbits + (!u_s && signedness)) - }; - typedef ac_int type; - }; - - template - class sliceref { -# if defined(__SYNTHESIS__) && !defined(AC_IGNORE_BUILTINS) -# pragma builtin -# endif - int *d_iv; - template friend class sliceref; - public: - sliceref( int *iv ) : d_iv(iv) {} - - inline const sliceref operator = ( const sliceref &val ) { - return operator=(val); - } - - template - inline const sliceref operator = ( const sliceref &val ) { - const int src_lsi = P2/32; - const int src_msi = (P2+W-1)/32; - const int trg_lsi = P/32; - const int trg_msi = (P+W-1)/32; - const int trg_lsb = P&31; - const int trg_msb = (P+W-1)&31; - const int N = src_msi-src_lsi+1; - const int Nr = trg_msi-trg_lsi+1; - const int rshift = (P2&31) - (P&31); - int shifted_src[Nr]; - int *aligned_src = val.d_iv+src_lsi; - if(rshift) { - if(rshift < 0) - ac_private::iv_shift_l(aligned_src, -rshift, shifted_src); - else - ac_private::iv_shift_r(aligned_src, rshift, shifted_src); - aligned_src = shifted_src; - } - unsigned mask_lsi = ac_private::all_ones << trg_lsb; - unsigned mask_msi = ac_private::all_ones >> (31-trg_msb); - if(Nr==1) - mask_lsi &= mask_msi; - int *v = d_iv+trg_lsi; - v[0] ^= (v[0] ^ ((unsigned) aligned_src[0])) & mask_lsi; - for(int k=1; k < Nr-1; k++) - v[k] = aligned_src[k]; - if(Nr > 1) - v[Nr-1] ^= (v[Nr-1] ^ ((unsigned) aligned_src[Nr-1])) & mask_msi; - if(Is_MSB) { - const unsigned rem = 31-trg_msb; - if(rem) { - v[Nr-1] = S ? ((signed) ((unsigned) v[Nr-1] << rem) >> rem) - : ((unsigned) v[Nr-1] << rem) >> rem; - } else if(!S) { - v[Nr] = 0; - } - } - return *this; - } - }; -} - -enum ac_q_mode { AC_TRN, AC_RND, AC_TRN_ZERO, AC_RND_ZERO, AC_RND_INF, AC_RND_MIN_INF, AC_RND_CONV, AC_RND_CONV_ODD }; -enum ac_o_mode { AC_WRAP, AC_SAT, AC_SAT_ZERO, AC_SAT_SYM }; -template class ac_fixed; - -////////////////////////////////////////////////////////////////////////////// -// Arbitrary-Length Integer: ac_int -////////////////////////////////////////////////////////////////////////////// - -template -class ac_int : public ac_private::iv_conv<(W+31+!S)/32, S, W<=64> -#ifndef __SYNTHESIS__ -__AC_INT_UTILITY_BASE -#endif -{ -#if defined(__SYNTHESIS__) && !defined(AC_IGNORE_BUILTINS) -#pragma builtin -#endif - - enum {N=(W+31+!S)/32}; - typedef ac_private::iv_conv ConvBase; - typedef ac_private::iv Base; - - inline void bit_adjust() { - const unsigned rem = (32-W)&31; - Base::v[N-1] = S ? ((signed) ((unsigned) Base::v[N-1] << rem) >> rem) : (rem ? - ((unsigned) Base::v[N-1] << rem) >> rem : 0); - } - - inline bool is_neg() const { return S && Base::v[N-1] < 0; } - - // returns false if number is denormal - template - bool normalize_private(ac_int &exp, bool reserved_min_exp=false) { - int expt = exp; - int lshift = leading_sign(); - bool fully_normalized = true; - ac_int min_exp; - min_exp.template set_val(); - int max_shift = exp - min_exp - reserved_min_exp; - if(lshift > max_shift) { - lshift = ac_int(max_shift); - expt = min_exp + reserved_min_exp; - fully_normalized = false; - } else { - expt -= lshift; - } - if(Base::equal_zero()) { - expt = 0; - fully_normalized = true; - } - exp = expt; - Base r; - Base::shift_l(lshift, r); - Base::operator=(r); - bit_adjust(); - return fully_normalized; - } - -public: - static const int width = W; - static const int i_width = W; - static const bool sign = S; - static const ac_q_mode q_mode = AC_TRN; - static const ac_o_mode o_mode = AC_WRAP; - static const int e_width = 0; - - template - struct rt { - enum { - mult_w = W+W2, - mult_s = S||S2, - plus_w = AC_MAX(W+(S2&&!S),W2+(S&&!S2))+1, - plus_s = S||S2, - minus_w = AC_MAX(W+(S2&&!S),W2+(S&&!S2))+1, - minus_s = true, - div_w = W+S2, - div_s = S||S2, - mod_w = AC_MIN(W,W2+(!S2&&S)), - mod_s = S, - logic_w = AC_MAX(W+(S2&&!S),W2+(S&&!S2)), - logic_s = S||S2 - }; - typedef ac_int mult; - typedef ac_int plus; - typedef ac_int minus; - typedef ac_int logic; - typedef ac_int div; - typedef ac_int mod; - typedef ac_int arg1; - }; - - template - struct rt_T { - typedef typename ac_private::map::t map_T; - typedef typename ac_private::rt_ac_int_T::template op1::mult mult; - typedef typename ac_private::rt_ac_int_T::template op1::plus plus; - typedef typename ac_private::rt_ac_int_T::template op1::minus minus; - typedef typename ac_private::rt_ac_int_T::template op1::minus2 minus2; - typedef typename ac_private::rt_ac_int_T::template op1::logic logic; - typedef typename ac_private::rt_ac_int_T::template op1::div div; - typedef typename ac_private::rt_ac_int_T::template op1::div2 div2; - typedef ac_int arg1; - }; - - struct rt_unary { - enum { - neg_w = W+1, - neg_s = true, - mag_sqr_w = 2*W-S, - mag_sqr_s = false, - mag_w = W+S, - mag_s = false, - leading_sign_w = ac::log2_ceil::val, - leading_sign_s = false - }; - typedef ac_int neg; - typedef ac_int mag_sqr; - typedef ac_int mag; - typedef ac_int leading_sign; - template - struct set { - enum { sum_w = W + ac::log2_ceil::val, sum_s = S}; - typedef ac_int sum; - }; - }; - - template friend class ac_int; - template friend class ac_fixed; - ac_int() { -#if !defined(__SYNTHESIS__) && defined(AC_DEFAULT_IN_RANGE) - bit_adjust(); -#endif - } - template - inline ac_int (const ac_int &op) { - Base::operator =(op); - bit_adjust(); - } - - inline ac_int( bool b ) : ConvBase(b) { bit_adjust(); } - inline ac_int( char b ) : ConvBase(b) { bit_adjust(); } - inline ac_int( signed char b ) : ConvBase(b) { bit_adjust(); } - inline ac_int( unsigned char b ) : ConvBase(b) { bit_adjust(); } - inline ac_int( signed short b ) : ConvBase(b) { bit_adjust(); } - inline ac_int( unsigned short b ) : ConvBase(b) { bit_adjust(); } - inline ac_int( signed int b ) : ConvBase(b) { bit_adjust(); } - inline ac_int( unsigned int b ) : ConvBase(b) { bit_adjust(); } - inline ac_int( signed long b ) : ConvBase(b) { bit_adjust(); } - inline ac_int( unsigned long b ) : ConvBase(b) { bit_adjust(); } - inline ac_int( Slong b ) : ConvBase(b) { bit_adjust(); } - inline ac_int( Ulong b ) : ConvBase(b) { bit_adjust(); } - inline ac_int( double d ) : ConvBase(d) { bit_adjust(); } - - -#if (defined(_MSC_VER) && !defined(__EDG__)) -#pragma warning( push ) -#pragma warning( disable: 4700 ) -#endif -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wuninitialized" -#endif -#if defined(__clang__) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wuninitialized" -#endif - template - inline ac_int &set_val() { - const unsigned int all_ones = (unsigned) ~0; - if(V == AC_VAL_DC) { - ac_int r; - Base::operator =(r); - bit_adjust(); - } - else if(V == AC_VAL_0 || V == AC_VAL_MIN || V == AC_VAL_QUANTUM) { - Base::operator =(0); - if(S && V == AC_VAL_MIN) { - const unsigned int rem = (W-1)&31; - Base::v[N-1] = (all_ones << rem); - } else if(V == AC_VAL_QUANTUM) - Base::v[0] = 1; - } - else { // AC_VAL_MAX - Base::operator =(-1); - const unsigned int rem = (32-W - !S )&31; - Base::v[N-1] = (all_ones >> 1) >> rem; - } - return *this; - } -#if (defined(_MSC_VER) && !defined(__EDG__)) -#pragma warning( pop ) -#endif -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic pop -#endif -#if defined(__clang__) -#pragma clang diagnostic pop -#endif - - // Explicit conversion functions to C built-in types ------------- - inline int to_int() const { return Base::v[0]; } - inline unsigned to_uint() const { return Base::v[0]; } - inline long to_long() const { - return ac_private::long_w == 32 ? (long) Base::v[0] : (long) Base::to_int64(); - } - inline unsigned long to_ulong() const { - return ac_private::long_w == 32 ? (unsigned long) Base::v[0] : (unsigned long) Base::to_uint64(); - } - inline Slong to_int64() const { return Base::to_int64(); } - inline Ulong to_uint64() const { return Base::to_uint64(); } - inline double to_double() const { return Base::to_double(); } - - inline int length() const { return W; } - - inline std::string to_string(ac_base_mode base_rep, bool sign_mag = false) const { - // base_rep == AC_DEC => sign_mag == don't care (always print decimal in sign magnitude) - char r[N*32+4] = {0}; - int i = 0; - if(sign_mag) - r[i++] = is_neg() ? '-' : '+'; - else if (base_rep == AC_DEC && is_neg()) - r[i++] = '-'; - if(base_rep != AC_DEC) { - r[i++] = '0'; - r[i++] = base_rep == AC_BIN ? 'b' : (base_rep == AC_OCT ? 'o' : 'x'); - } - int str_w; - if( (base_rep == AC_DEC || sign_mag) && is_neg() ) { - ac_int mag = operator -(); - str_w = ac_private::to_string(mag.v, W+1, sign_mag, base_rep, false, r+i); - } else { - ac_int tmp = *this; - str_w = ac_private::to_string(tmp.v, W+!S, sign_mag, base_rep, false, r+i); - } - if(!str_w) { - r[i] = '0'; - r[i+1] = 0; - } - return std::string(r); - } - inline static std::string type_name() { - const char *tf[] = {",false>", ",true>"}; - std::string r = "ac_int<"; - r += ac_int<32,true>(W).to_string(AC_DEC); - r += tf[S]; - return r; - } - - // Arithmetic : Binary ---------------------------------------------------- - template - typename rt::mult operator *( const ac_int &op2) const { - typename rt::mult r; - Base::mult(op2, r); - return r; - } - template - typename rt::plus operator +( const ac_int &op2) const { - typename rt::plus r; - Base::add(op2, r); - return r; - } - template - typename rt::minus operator -( const ac_int &op2) const { - typename rt::minus r; - Base::sub(op2, r); - return r; - } -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wenum-compare" -#endif - template - typename rt::div operator /( const ac_int &op2) const { - typename rt::div r; - enum {Nminus = ac_int::N, N2 = ac_int::N, N2minus = ac_int::N, - num_s = S + (Nminus > N), den_s = S2 + (N2minus > N2), Nr = rt::div::N }; - Base::template div(op2, r); - return r; - } - template - typename rt::mod operator %( const ac_int &op2) const { - typename rt::mod r; - enum {Nminus = ac_int::N, N2 = ac_int::N, N2minus = ac_int::N, - num_s = S + (Nminus > N), den_s = S2 + (N2minus > N2), Nr = rt::mod::N }; - Base::template rem(op2, r); - return r; - } -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic pop -#endif - // Arithmetic assign ------------------------------------------------------ - template - ac_int &operator *=( const ac_int &op2) { - Base r; - Base::mult(op2, r); - Base::operator=(r); - bit_adjust(); - return *this; - } - template - ac_int &operator +=( const ac_int &op2) { - Base r; - Base::add(op2, r); - Base::operator=(r); - bit_adjust(); - return *this; - } - template - ac_int &operator -=( const ac_int &op2) { - Base r; - Base::sub(op2, r); - Base::operator=(r); - bit_adjust(); - return *this; - } -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wenum-compare" -#endif - template - ac_int &operator /=( const ac_int &op2) { - enum {Nminus = ac_int::N, N2 = ac_int::N, N2minus = ac_int::N, - num_s = S + (Nminus > N), den_s = S2 + (N2minus > N2), Nr = N }; - Base r; - Base::template div(op2, r); - Base::operator=(r); - bit_adjust(); - return *this; - } - template - ac_int &operator %=( const ac_int &op2) { - enum {Nminus = ac_int::N, N2 = ac_int::N, N2minus = ac_int::N, - num_s = S + (Nminus > N), den_s = S2 + (N2minus > N2), Nr = N }; - Base r; - Base::template rem(op2, r); - Base::operator=(r); - bit_adjust(); - return *this; - } -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic pop -#endif - // Arithmetic prefix increment, decrement ---------------------------------- - ac_int &operator ++() { - Base::increment(); - bit_adjust(); - return *this; - } - ac_int &operator --() { - Base::decrement(); - bit_adjust(); - return *this; - } - // Arithmetic postfix increment, decrement --------------------------------- - const ac_int operator ++(int) { - ac_int t = *this; - Base::increment(); - bit_adjust(); - return t; - } - const ac_int operator --(int) { - ac_int t = *this; - Base::decrement(); - bit_adjust(); - return t; - } - // Arithmetic Unary -------------------------------------------------------- - ac_int operator +() { - return *this; - } - typename rt_unary::neg operator -() const { - typename rt_unary::neg r; - Base::neg(r); - r.bit_adjust(); - return r; - } - // ! ------------------------------------------------------------------------ - bool operator ! () const { - return Base::equal_zero(); - } - - // Bitwise (arithmetic) unary: complement ----------------------------- - ac_int operator ~() const { - ac_int r; - Base::bitwise_complement(r); - return r; - } - // Bitwise (non-arithmetic) bit_complement ----------------------------- - ac_int bit_complement() const { - ac_int r; - Base::bitwise_complement(r); - r.bit_adjust(); - return r; - } - // Bitwise (arithmetic): and, or, xor ---------------------------------- - template - typename rt::logic operator & ( const ac_int &op2) const { - typename rt::logic r; - Base::bitwise_and(op2, r); - return r; - } - template - typename rt::logic operator | ( const ac_int &op2) const { - typename rt::logic r; - Base::bitwise_or(op2, r); - return r; - } - template - typename rt::logic operator ^ ( const ac_int &op2) const { - typename rt::logic r; - Base::bitwise_xor(op2, r); - return r; - } - // Bitwise assign (not arithmetic): and, or, xor ---------------------------- - template - ac_int &operator &= ( const ac_int &op2 ) { - Base r; - Base::bitwise_and(op2, r); - Base::operator=(r); - bit_adjust(); - return *this; - } - template - ac_int &operator |= ( const ac_int &op2 ) { - Base r; - Base::bitwise_or(op2, r); - Base::operator=(r); - bit_adjust(); - return *this; - } - template - ac_int &operator ^= ( const ac_int &op2 ) { - Base r; - Base::bitwise_xor(op2, r); - Base::operator=(r); - bit_adjust(); - return *this; - } - // Shift (result constrained by left operand) ------------------------------- - template - ac_int operator << ( const ac_int &op2 ) const { - ac_int r; - Base::shift_l2(op2.to_int(), r); - r.bit_adjust(); - return r; - } - template - ac_int operator << ( const ac_int &op2 ) const { - ac_int r; - Base::shift_l(op2.to_uint(), r); - r.bit_adjust(); - return r; - } - template - ac_int operator >> ( const ac_int &op2 ) const { - ac_int r; - Base::shift_r2(op2.to_int(), r); - r.bit_adjust(); - return r; - } - template - ac_int operator >> ( const ac_int &op2 ) const { - ac_int r; - Base::shift_r(op2.to_uint(), r); - r.bit_adjust(); - return r; - } - // Shift assign ------------------------------------------------------------ - template - ac_int &operator <<= ( const ac_int &op2 ) { - Base r; - Base::shift_l2(op2.to_int(), r); - Base::operator=(r); - bit_adjust(); - return *this; - } - template - ac_int &operator <<= ( const ac_int &op2 ) { - Base r; - Base::shift_l(op2.to_uint(), r); - Base::operator=(r); - bit_adjust(); - return *this; - } - template - ac_int &operator >>= ( const ac_int &op2 ) { - Base r; - Base::shift_r2(op2.to_int(), r); - Base::operator=(r); - bit_adjust(); - return *this; - } - template - ac_int &operator >>= ( const ac_int &op2 ) { - Base r; - Base::shift_r(op2.to_uint(), r); - Base::operator=(r); - bit_adjust(); - return *this; - } - // Relational --------------------------------------------------------------- - template - bool operator == ( const ac_int &op2) const { - return Base::equal(op2); - } - template - bool operator != ( const ac_int &op2) const { - return !Base::equal(op2); - } - template - bool operator < ( const ac_int &op2) const { - return Base::less_than(op2); - } - template - bool operator >= ( const ac_int &op2) const { - return !Base::less_than(op2); - } - template - bool operator > ( const ac_int &op2) const { - return Base::greater_than(op2); - } - template - bool operator <= ( const ac_int &op2) const { - return !Base::greater_than(op2); - } - - // Bit and Slice Select ----------------------------------------------------- - template - inline const ac_int slc(const ac_int &index) const { - ac_int r; - AC_ASSERT(index.to_int() >= 0, "Attempting to read slc with negative indeces"); - unsigned uindex = ac_int(index).to_uint(); - Base::shift_r(uindex, r); - r.bit_adjust(); - return r; - } - - template - inline const ac_int slc(signed index) const { - ac_int r; - AC_ASSERT(index >= 0, "Attempting to read slc with negative indeces"); - unsigned uindex = index & ((unsigned)~0 >> 1); - Base::shift_r(uindex, r); - r.bit_adjust(); - return r; - } - template - inline const ac_int slc(unsigned uindex) const { - ac_int r; - Base::shift_r(uindex, r); - r.bit_adjust(); - return r; - } - - template - inline ac_int &set_slc(const ac_int lsb, const ac_int &slc) { - AC_ASSERT(lsb.to_int() + W2 <= W && lsb.to_int() >= 0, "Out of bounds set_slc"); - if(W == W2) - Base::operator =(slc); - else { - unsigned ulsb = ac_int(lsb).to_uint(); - Base::set_slc(ulsb, W2, (ac_int) slc); - } - bit_adjust(); // in case sign bit was assigned - return *this; - } - template - inline ac_int &set_slc(signed lsb, const ac_int &slc) { - AC_ASSERT(lsb + W2 <= W && lsb >= 0, "Out of bounds set_slc"); - if(W == W2) - Base::operator =(slc); - else { - unsigned ulsb = lsb & ((unsigned)~0 >> 1); - Base::set_slc(ulsb, W2, (ac_int) slc); - } - bit_adjust(); // in case sign bit was assigned - return *this; - } - template - inline ac_int &set_slc(unsigned ulsb, const ac_int &slc) { - AC_ASSERT(ulsb + W2 <= W, "Out of bounds set_slc"); - if(W == W2) - Base::operator =(slc); - else - Base::set_slc(ulsb, W2, (ac_int) slc); - bit_adjust(); // in case sign bit was assigned - return *this; - } - - template - inline ac::sliceref range() { - #if __cplusplus > 199711L - static_assert(Msb-Lsb+1 > 0, "Range length not positive: MSB < LSB"); - static_assert(Lsb >= 0, "LSB is negative"); - static_assert(Msb < W, "MSB >= W"); - #endif - return ac::sliceref(Base::v); - } - - class ac_bitref { -# if defined(__SYNTHESIS__) && !defined(AC_IGNORE_BUILTINS) -# pragma builtin -# endif - ac_int &d_bv; - unsigned d_index; - public: - ac_bitref( ac_int *bv, unsigned index=0 ) : d_bv(*bv), d_index(index) {} - operator bool () const { return (d_index < W) ? (d_bv.v[d_index>>5]>>(d_index&31) & 1) : 0; } - - template - operator ac_int () const { return operator bool (); } - - inline ac_bitref operator = ( int val ) { - // lsb of int (val&1) is written to bit - if(d_index < W) { - int *pval = &d_bv.v[d_index>>5]; - *pval ^= (*pval ^ ( (unsigned) val << (d_index&31) )) & 1 << (d_index&31); - d_bv.bit_adjust(); // in case sign bit was assigned - } - return *this; - } - template - inline ac_bitref operator = ( const ac_int &val ) { - return operator =(val.to_int()); - } - inline ac_bitref operator = ( const ac_bitref &val ) { - return operator =((int) (bool) val); - } - }; - - ac_bitref operator [] ( unsigned int uindex) { - AC_ASSERT(uindex < W, "Attempting to read bit beyond MSB"); - ac_bitref bvh( this, uindex ); - return bvh; - } - ac_bitref operator [] ( int index) { - AC_ASSERT(index >= 0, "Attempting to read bit with negative index"); - unsigned uindex = index & ((unsigned)~0 >> 1); - AC_ASSERT(uindex < W, "Attempting to read bit beyond MSB"); - ac_bitref bvh( this, uindex ); - return bvh; - } - template - ac_bitref operator [] ( const ac_int &index) { - AC_ASSERT(index.to_int() >= 0, "Attempting to read bit with negative index"); - unsigned uindex = ac_int(index).to_uint(); - AC_ASSERT(uindex < W, "Attempting to read bit beyond MSB"); - ac_bitref bvh( this, uindex ); - return bvh; - } - bool operator [] ( unsigned int uindex) const { - AC_ASSERT(uindex < W, "Attempting to read bit beyond MSB"); - return (uindex < W) ? (Base::v[uindex>>5]>>(uindex&31) & 1) : 0; - } - bool operator [] ( int index) const { - AC_ASSERT(index >= 0, "Attempting to read bit with negative index"); - unsigned uindex = index & ((unsigned)~0 >> 1); - AC_ASSERT(uindex < W, "Attempting to read bit beyond MSB"); - return (uindex < W) ? (Base::v[uindex>>5]>>(uindex&31) & 1) : 0; - } - template - bool operator [] ( const ac_int &index) const { - AC_ASSERT(index.to_int() >= 0, "Attempting to read bit with negative index"); - unsigned uindex = ac_int(index).to_uint(); - AC_ASSERT(uindex < W, "Attempting to read bit beyond MSB"); - return (uindex < W) ? (Base::v[uindex>>5]>>(uindex&31) & 1) : 0; - } - - typename rt_unary::leading_sign leading_sign() const { - unsigned ls = Base::leading_bits(S & (Base::v[N-1] < 0)) - (32*N - W)-S; - return ls; - } - typename rt_unary::leading_sign leading_sign(bool &all_sign) const { - unsigned ls = Base::leading_bits(S & (Base::v[N-1] < 0)) - (32*N - W)-S; - all_sign = (ls == W-S); - return ls; - } - // returns false if number is denormal - template - bool normalize(ac_int &exp) { - return normalize_private(exp); - } - // returns false if number is denormal, minimum exponent is reserved (usually for encoding special values/errors) - template - bool normalize_RME(ac_int &exp) { - return normalize_private(exp, true); - } - bool and_reduce() const { - return ac_private::iv_equal_ones_to(Base::v); - } - bool or_reduce() const { - return !Base::equal_zero(); - } - bool xor_reduce() const { - unsigned r = Base::v[N-1]; - if(S) { - const unsigned rem = (32-W)&31; - r = (r << rem) >> rem; - } - if(N > 1) - r ^= Base::v[N-2]; - if(N > 2) { - for(int i=0; i 16) - r ^= r >> 16; - if(W > 8) - r ^= r >> 8; - if(W > 4) - r ^= r >> 4; - if(W > 2) - r ^= r >> 2; - if(W > 1) - r ^= r >> 1; - return r&1; - } - - inline void bit_fill_hex(const char *str) { - // Zero Pads if str is too short, throws ms bits away if str is too long - // Asserts if anything other than 0-9a-fA-F is encountered - ac_int res = 0; - while(*str) { - char c = *str; - int h = 0; - if(c >= '0' && c <= '9') - h = c - '0'; - else if(c >= 'A' && c <= 'F') - h = c - 'A' + 10; - else if(c >= 'a' && c <= 'f') - h = c - 'a' + 10; - else { - AC_ASSERT(!c, "Invalid hex digit"); - break; - } - res <<= ac_int<3,false>(4); - res |= ac_int<4,false>(h); - str++; - } - *this = res; - } - - template - inline void bit_fill(const int (&ivec)[Na], bool bigendian=true) { - // bit_fill from integer vector - // if W > N*32, missing most significant bits are zeroed - // if W < N*32, additional bits in ivec are ignored (no overflow checking) - // Example: - // ac_int<80,false> x; int vec[] = { 0xffffa987, 0x6543210f, 0xedcba987 }; - // x.bit_fill(vec); // vec[0] fill bits 79-64 - enum { N0 = (W+31)/32, M = AC_MIN(N0,Na) }; - ac_int res = 0; - for(int i=0; i < M; i++) - res.set_slc(i*32, ac_int<32>(ivec[bigendian ? M-1-i : i])); - *this = res; - } -}; - -namespace ac { - template - struct rt_2T { - typedef typename ac_private::map::t map_T; - typedef typename ac_private::map::t map_T2; - typedef typename map_T::template rt_T< map_T2 >::mult mult; - typedef typename map_T::template rt_T< map_T2 >::plus plus; - typedef typename map_T::template rt_T< map_T2 >::minus minus; - typedef typename map_T::template rt_T< map_T2 >::minus2 minus2; - typedef typename map_T::template rt_T< map_T2 >::logic logic; - typedef typename map_T::template rt_T< map_T2 >::div div; - typedef typename map_T::template rt_T< map_T2 >::div2 div2; - }; -} - -namespace ac { - template - struct ac_int_represent { - enum { t_w = ac_private::c_type_params::W, t_s = ac_private::c_type_params::S }; - typedef ac_int type; - }; - template<> struct ac_int_represent {}; - template<> struct ac_int_represent {}; - template - struct ac_int_represent< ac_int > { - typedef ac_int type; - }; -} - -namespace ac_private { - template - struct rt_ac_int_T< ac_int > { - typedef ac_int i2_t; - template - struct op1 { - typedef ac_int i_t; - typedef typename i_t::template rt::mult mult; - typedef typename i_t::template rt::plus plus; - typedef typename i_t::template rt::minus minus; - typedef typename i2_t::template rt::minus minus2; - typedef typename i_t::template rt::logic logic; - typedef typename i_t::template rt::div div; - typedef typename i2_t::template rt::div div2; - typedef typename i_t::template rt::mod mod; - typedef typename i2_t::template rt::mod mod2; - }; - }; - - template - struct rt_ac_int_T< c_type > { - typedef typename ac::ac_int_represent::type i2_t; - enum { W2 = i2_t::width, S2 = i2_t::sign }; - template - struct op1 { - typedef ac_int i_t; - typedef typename i_t::template rt::mult mult; - typedef typename i_t::template rt::plus plus; - typedef typename i_t::template rt::minus minus; - typedef typename i2_t::template rt::minus minus2; - typedef typename i_t::template rt::logic logic; - typedef typename i_t::template rt::div div; - typedef typename i2_t::template rt::div div2; - typedef typename i_t::template rt::mod mod; - typedef typename i2_t::template rt::mod mod2; - }; - }; -} - - -// Specializations for constructors on integers that bypass bit adjusting -// and are therefore more efficient -template<> inline ac_int<1,true>::ac_int( bool b ) { v[0] = b ? -1 : 0; } - -template<> inline ac_int<1,false>::ac_int( bool b ) { v[0] = b; } -template<> inline ac_int<1,false>::ac_int( signed char b ) { v[0] = b&1; } -template<> inline ac_int<1,false>::ac_int( unsigned char b ) { v[0] = b&1; } -template<> inline ac_int<1,false>::ac_int( signed short b ) { v[0] = b&1; } -template<> inline ac_int<1,false>::ac_int( unsigned short b ) { v[0] = b&1; } -template<> inline ac_int<1,false>::ac_int( signed int b ) { v[0] = b&1; } -template<> inline ac_int<1,false>::ac_int( unsigned int b ) { v[0] = b&1; } -template<> inline ac_int<1,false>::ac_int( signed long b ) { v[0] = b&1; } -template<> inline ac_int<1,false>::ac_int( unsigned long b ) { v[0] = b&1; } -template<> inline ac_int<1,false>::ac_int( Ulong b ) { v[0] = (int) b&1; } -template<> inline ac_int<1,false>::ac_int( Slong b ) { v[0] = (int) b&1; } - -template<> inline ac_int<8,true>::ac_int( bool b ) { v[0] = b; } -template<> inline ac_int<8,false>::ac_int( bool b ) { v[0] = b; } -template<> inline ac_int<8,true>::ac_int( signed char b ) { v[0] = b; } -template<> inline ac_int<8,false>::ac_int( unsigned char b ) { v[0] = b; } -template<> inline ac_int<8,true>::ac_int( unsigned char b ) { v[0] = (signed char) b; } -template<> inline ac_int<8,false>::ac_int( signed char b ) { v[0] = (unsigned char) b; } - -template<> inline ac_int<16,true>::ac_int( bool b ) { v[0] = b; } -template<> inline ac_int<16,false>::ac_int( bool b ) { v[0] = b; } -template<> inline ac_int<16,true>::ac_int( signed char b ) { v[0] = b; } -template<> inline ac_int<16,false>::ac_int( unsigned char b ) { v[0] = b; } -template<> inline ac_int<16,true>::ac_int( unsigned char b ) { v[0] = b; } -template<> inline ac_int<16,false>::ac_int( signed char b ) { v[0] = (unsigned short) b; } -template<> inline ac_int<16,true>::ac_int( signed short b ) { v[0] = b; } -template<> inline ac_int<16,false>::ac_int( unsigned short b ) { v[0] = b; } -template<> inline ac_int<16,true>::ac_int( unsigned short b ) { v[0] = (signed short) b; } -template<> inline ac_int<16,false>::ac_int( signed short b ) { v[0] = (unsigned short) b; } - -template<> inline ac_int<32,true>::ac_int( signed int b ) { v[0] = b; } -template<> inline ac_int<32,true>::ac_int( unsigned int b ) { v[0] = b; } -template<> inline ac_int<32,false>::ac_int( signed int b ) { v[0] = b; v[1] = 0;} -template<> inline ac_int<32,false>::ac_int( unsigned int b ) { v[0] = b; v[1] = 0;} - -template<> inline ac_int<32,true>::ac_int( Slong b ) { v[0] = (int) b; } -template<> inline ac_int<32,true>::ac_int( Ulong b ) { v[0] = (int) b; } -template<> inline ac_int<32,false>::ac_int( Slong b ) { v[0] = (int) b; v[1] = 0;} -template<> inline ac_int<32,false>::ac_int( Ulong b ) { v[0] = (int) b; v[1] = 0;} - -template<> inline ac_int<64,true>::ac_int( Slong b ) { v[0] = (int) b; v[1] = (int) (b >> 32); } -template<> inline ac_int<64,true>::ac_int( Ulong b ) { v[0] = (int) b; v[1] = (int) (b >> 32);} -template<> inline ac_int<64,false>::ac_int( Slong b ) { v[0] = (int) b; v[1] = (int) ((Ulong) b >> 32); v[2] = 0; } -template<> inline ac_int<64,false>::ac_int( Ulong b ) { v[0] = (int) b; v[1] = (int) (b >> 32); v[2] = 0; } - -// Stream -------------------------------------------------------------------- - -template -inline std::ostream& operator << (std::ostream &os, const ac_int &x) { -#ifndef __SYNTHESIS__ - if ((os.flags() & std::ios::hex) != 0) { - os << x.to_string(AC_HEX); - } else if ((os.flags() & std::ios::oct) != 0) { - os << x.to_string(AC_OCT); - } else { - os << x.to_string(AC_DEC); - } -#endif - return os; -} - -// Macros for Binary Operators with Integers -------------------------------------------- - -#define BIN_OP_WITH_INT(BIN_OP, C_TYPE, WI, SI, RTYPE) \ - template \ - inline typename ac_int::template rt::RTYPE operator BIN_OP ( C_TYPE i_op, const ac_int &op) { \ - return ac_int(i_op).operator BIN_OP (op); \ - } \ - template \ - inline typename ac_int::template rt::RTYPE operator BIN_OP ( const ac_int &op, C_TYPE i_op) { \ - return op.operator BIN_OP (ac_int(i_op)); \ - } - -#define REL_OP_WITH_INT(REL_OP, C_TYPE, W2, S2) \ - template \ - inline bool operator REL_OP ( const ac_int &op, C_TYPE op2) { \ - return op.operator REL_OP (ac_int(op2)); \ - } \ - template \ - inline bool operator REL_OP ( C_TYPE op2, const ac_int &op) { \ - return ac_int(op2).operator REL_OP (op); \ - } - -#define ASSIGN_OP_WITH_INT(ASSIGN_OP, C_TYPE, W2, S2) \ - template \ - inline ac_int &operator ASSIGN_OP ( ac_int &op, C_TYPE op2) { \ - return op.operator ASSIGN_OP (ac_int(op2)); \ - } - -#define OPS_WITH_INT(C_TYPE, WI, SI) \ - BIN_OP_WITH_INT(*, C_TYPE, WI, SI, mult) \ - BIN_OP_WITH_INT(+, C_TYPE, WI, SI, plus) \ - BIN_OP_WITH_INT(-, C_TYPE, WI, SI, minus) \ - BIN_OP_WITH_INT(/, C_TYPE, WI, SI, div) \ - BIN_OP_WITH_INT(%, C_TYPE, WI, SI, mod) \ - BIN_OP_WITH_INT(>>, C_TYPE, WI, SI, arg1) \ - BIN_OP_WITH_INT(<<, C_TYPE, WI, SI, arg1) \ - BIN_OP_WITH_INT(&, C_TYPE, WI, SI, logic) \ - BIN_OP_WITH_INT(|, C_TYPE, WI, SI, logic) \ - BIN_OP_WITH_INT(^, C_TYPE, WI, SI, logic) \ - \ - REL_OP_WITH_INT(==, C_TYPE, WI, SI) \ - REL_OP_WITH_INT(!=, C_TYPE, WI, SI) \ - REL_OP_WITH_INT(>, C_TYPE, WI, SI) \ - REL_OP_WITH_INT(>=, C_TYPE, WI, SI) \ - REL_OP_WITH_INT(<, C_TYPE, WI, SI) \ - REL_OP_WITH_INT(<=, C_TYPE, WI, SI) \ - \ - ASSIGN_OP_WITH_INT(+=, C_TYPE, WI, SI) \ - ASSIGN_OP_WITH_INT(-=, C_TYPE, WI, SI) \ - ASSIGN_OP_WITH_INT(*=, C_TYPE, WI, SI) \ - ASSIGN_OP_WITH_INT(/=, C_TYPE, WI, SI) \ - ASSIGN_OP_WITH_INT(%=, C_TYPE, WI, SI) \ - ASSIGN_OP_WITH_INT(>>=, C_TYPE, WI, SI) \ - ASSIGN_OP_WITH_INT(<<=, C_TYPE, WI, SI) \ - ASSIGN_OP_WITH_INT(&=, C_TYPE, WI, SI) \ - ASSIGN_OP_WITH_INT(|=, C_TYPE, WI, SI) \ - ASSIGN_OP_WITH_INT(^=, C_TYPE, WI, SI) - -// ------------------------------------- End of Macros for Binary Operators with Integers - -// for backward compatability with v3.9.0 and earlier define following macro -#ifdef AC_INT_NS_FOR_MIXED_OPERATORS -namespace ac { - namespace ops_with_other_types { -#endif -// Mixed Operators with Integers ----------------------------------------------- -OPS_WITH_INT(bool, 1, false) -OPS_WITH_INT(char, 8, true) -OPS_WITH_INT(signed char, 8, true) -OPS_WITH_INT(unsigned char, 8, false) -OPS_WITH_INT(short, 16, true) -OPS_WITH_INT(unsigned short, 16, false) -OPS_WITH_INT(int, 32, true) -OPS_WITH_INT(unsigned int, 32, false) -OPS_WITH_INT(long, ac_private::long_w, true) -OPS_WITH_INT(unsigned long, ac_private::long_w, false) -OPS_WITH_INT(Slong, 64, true) -OPS_WITH_INT(Ulong, 64, false) -// ----------------------------------------- End of Mixed Operators with Integers -#ifdef AC_INT_NS_FOR_MIXED_OPERATORS - } // ops_with_other_types namespace -} -using namespace ac::ops_with_other_types; -#endif - -namespace ac { - // Functions to fill bits - - template - inline T bit_fill_hex(const char *str) { - T res; - res.bit_fill_hex(str); - return res; - } - - // returns bit_fill for type - // example: - // ac_int<80,false> x = ac::bit_fill< ac_int<80,false> > ((int [3]) {0xffffa987, 0x6543210f, 0xedcba987 }); - template - inline T bit_fill(const int (&ivec)[N], bool bigendian=true) { - T res; - res.bit_fill(ivec, bigendian); - return res; - } - -} // ac namespace - -// Mixed Operators with Pointers ----------------------------------------------- - -// Addition of ac_int and pointer -template -T *operator +(T *ptr, const ac_int &op2) { - return ptr + op2.to_int64(); -} -template -T *operator +(const ac_int &op2, T *ptr) { - return ptr + op2.to_int64(); -} -// Subtraction of ac_int from pointer -template -T *operator -(T *ptr, const ac_int &op2) { - return ptr - op2.to_int64(); -} -// ----------------------------------------- End of Mixed Operators with Pointers - -namespace ac_intN { - /////////////////////////////////////////////////////////////////////////////// - // Predefined for ease of use - /////////////////////////////////////////////////////////////////////////////// - typedef ac_int<1, true> int1; - typedef ac_int<1, false> uint1; - typedef ac_int<2, true> int2; - typedef ac_int<2, false> uint2; - typedef ac_int<3, true> int3; - typedef ac_int<3, false> uint3; - typedef ac_int<4, true> int4; - typedef ac_int<4, false> uint4; - typedef ac_int<5, true> int5; - typedef ac_int<5, false> uint5; - typedef ac_int<6, true> int6; - typedef ac_int<6, false> uint6; - typedef ac_int<7, true> int7; - typedef ac_int<7, false> uint7; - typedef ac_int<8, true> int8; - typedef ac_int<8, false> uint8; - typedef ac_int<9, true> int9; - typedef ac_int<9, false> uint9; - typedef ac_int<10, true> int10; - typedef ac_int<10, false> uint10; - typedef ac_int<11, true> int11; - typedef ac_int<11, false> uint11; - typedef ac_int<12, true> int12; - typedef ac_int<12, false> uint12; - typedef ac_int<13, true> int13; - typedef ac_int<13, false> uint13; - typedef ac_int<14, true> int14; - typedef ac_int<14, false> uint14; - typedef ac_int<15, true> int15; - typedef ac_int<15, false> uint15; - typedef ac_int<16, true> int16; - typedef ac_int<16, false> uint16; - typedef ac_int<17, true> int17; - typedef ac_int<17, false> uint17; - typedef ac_int<18, true> int18; - typedef ac_int<18, false> uint18; - typedef ac_int<19, true> int19; - typedef ac_int<19, false> uint19; - typedef ac_int<20, true> int20; - typedef ac_int<20, false> uint20; - typedef ac_int<21, true> int21; - typedef ac_int<21, false> uint21; - typedef ac_int<22, true> int22; - typedef ac_int<22, false> uint22; - typedef ac_int<23, true> int23; - typedef ac_int<23, false> uint23; - typedef ac_int<24, true> int24; - typedef ac_int<24, false> uint24; - typedef ac_int<25, true> int25; - typedef ac_int<25, false> uint25; - typedef ac_int<26, true> int26; - typedef ac_int<26, false> uint26; - typedef ac_int<27, true> int27; - typedef ac_int<27, false> uint27; - typedef ac_int<28, true> int28; - typedef ac_int<28, false> uint28; - typedef ac_int<29, true> int29; - typedef ac_int<29, false> uint29; - typedef ac_int<30, true> int30; - typedef ac_int<30, false> uint30; - typedef ac_int<31, true> int31; - typedef ac_int<31, false> uint31; - typedef ac_int<32, true> int32; - typedef ac_int<32, false> uint32; - typedef ac_int<33, true> int33; - typedef ac_int<33, false> uint33; - typedef ac_int<34, true> int34; - typedef ac_int<34, false> uint34; - typedef ac_int<35, true> int35; - typedef ac_int<35, false> uint35; - typedef ac_int<36, true> int36; - typedef ac_int<36, false> uint36; - typedef ac_int<37, true> int37; - typedef ac_int<37, false> uint37; - typedef ac_int<38, true> int38; - typedef ac_int<38, false> uint38; - typedef ac_int<39, true> int39; - typedef ac_int<39, false> uint39; - typedef ac_int<40, true> int40; - typedef ac_int<40, false> uint40; - typedef ac_int<41, true> int41; - typedef ac_int<41, false> uint41; - typedef ac_int<42, true> int42; - typedef ac_int<42, false> uint42; - typedef ac_int<43, true> int43; - typedef ac_int<43, false> uint43; - typedef ac_int<44, true> int44; - typedef ac_int<44, false> uint44; - typedef ac_int<45, true> int45; - typedef ac_int<45, false> uint45; - typedef ac_int<46, true> int46; - typedef ac_int<46, false> uint46; - typedef ac_int<47, true> int47; - typedef ac_int<47, false> uint47; - typedef ac_int<48, true> int48; - typedef ac_int<48, false> uint48; - typedef ac_int<49, true> int49; - typedef ac_int<49, false> uint49; - typedef ac_int<50, true> int50; - typedef ac_int<50, false> uint50; - typedef ac_int<51, true> int51; - typedef ac_int<51, false> uint51; - typedef ac_int<52, true> int52; - typedef ac_int<52, false> uint52; - typedef ac_int<53, true> int53; - typedef ac_int<53, false> uint53; - typedef ac_int<54, true> int54; - typedef ac_int<54, false> uint54; - typedef ac_int<55, true> int55; - typedef ac_int<55, false> uint55; - typedef ac_int<56, true> int56; - typedef ac_int<56, false> uint56; - typedef ac_int<57, true> int57; - typedef ac_int<57, false> uint57; - typedef ac_int<58, true> int58; - typedef ac_int<58, false> uint58; - typedef ac_int<59, true> int59; - typedef ac_int<59, false> uint59; - typedef ac_int<60, true> int60; - typedef ac_int<60, false> uint60; - typedef ac_int<61, true> int61; - typedef ac_int<61, false> uint61; - typedef ac_int<62, true> int62; - typedef ac_int<62, false> uint62; - typedef ac_int<63, true> int63; - typedef ac_int<63, false> uint63; -} // namespace ac_intN - -#ifndef AC_NOT_USING_INTN -using namespace ac_intN; -#endif - -/////////////////////////////////////////////////////////////////////////////// - -#if (defined(_MSC_VER) && !defined(__EDG__)) -#pragma warning( disable: 4700 ) -#endif -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wuninitialized" -#endif -#if defined(__clang__) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wuninitialized" -#endif - -// Global templatized functions for easy initialization to special values -template -inline ac_int value(ac_int) { - ac_int r; - return r.template set_val(); -} -// forward declaration, otherwise GCC errors when calling init_array -template -inline ac_fixed value(ac_fixed); - -#define SPECIAL_VAL_FOR_INTS_DC(C_TYPE, WI, SI) \ -template<> inline C_TYPE value(C_TYPE) { C_TYPE x; return x; } - -// -- C int types ----------------------------------------------------------------- -#define SPECIAL_VAL_FOR_INTS(C_TYPE, WI, SI) \ -template inline C_TYPE value(C_TYPE); \ -template<> inline C_TYPE value(C_TYPE) { return (C_TYPE)0; } \ -SPECIAL_VAL_FOR_INTS_DC(C_TYPE, WI, SI) \ -template<> inline C_TYPE value(C_TYPE) { return (C_TYPE)1; } \ -template<> inline C_TYPE value(C_TYPE) { return (C_TYPE)(SI ? ~(((C_TYPE) 1) << (WI-1)) : (C_TYPE) -1); } \ -template<> inline C_TYPE value(C_TYPE) { return (C_TYPE)(SI ? ((C_TYPE) 1) << (WI-1) : (C_TYPE) 0); } - -SPECIAL_VAL_FOR_INTS(bool, 1, false) -SPECIAL_VAL_FOR_INTS(char, 8, true) -SPECIAL_VAL_FOR_INTS(signed char, 8, true) -SPECIAL_VAL_FOR_INTS(unsigned char, 8, false) -SPECIAL_VAL_FOR_INTS(short, 16, true) -SPECIAL_VAL_FOR_INTS(unsigned short, 16, false) -SPECIAL_VAL_FOR_INTS(int, 32, true) -SPECIAL_VAL_FOR_INTS(unsigned int, 32, false) -SPECIAL_VAL_FOR_INTS(long, ac_private::long_w, true) -SPECIAL_VAL_FOR_INTS(unsigned long, ac_private::long_w, false) -SPECIAL_VAL_FOR_INTS(Slong, 64, true) -SPECIAL_VAL_FOR_INTS(Ulong, 64, false) - -#define INIT_ARRAY_SPECIAL_VAL_FOR_INTS(C_TYPE) \ - template \ - inline bool init_array(C_TYPE *a, int n) { \ - C_TYPE t = value((C_TYPE) 0); \ - for(int i=0; i < n; i++) \ - a[i] = t; \ - return true; \ - } - -namespace ac { -// PUBLIC FUNCTIONS -// function to initialize (or uninitialize) arrays - template - inline bool init_array(ac_int *a, int n) { - ac_int t; - t.template set_val(); - for(int i=0; i < n; i++) - a[i] = t; - return true; - } - - INIT_ARRAY_SPECIAL_VAL_FOR_INTS(bool) - INIT_ARRAY_SPECIAL_VAL_FOR_INTS(char) - INIT_ARRAY_SPECIAL_VAL_FOR_INTS(signed char) - INIT_ARRAY_SPECIAL_VAL_FOR_INTS(unsigned char) - INIT_ARRAY_SPECIAL_VAL_FOR_INTS(signed short) - INIT_ARRAY_SPECIAL_VAL_FOR_INTS(unsigned short) - INIT_ARRAY_SPECIAL_VAL_FOR_INTS(signed int) - INIT_ARRAY_SPECIAL_VAL_FOR_INTS(unsigned int) - INIT_ARRAY_SPECIAL_VAL_FOR_INTS(signed long) - INIT_ARRAY_SPECIAL_VAL_FOR_INTS(unsigned long) - INIT_ARRAY_SPECIAL_VAL_FOR_INTS(signed long long) - INIT_ARRAY_SPECIAL_VAL_FOR_INTS(unsigned long long) -} - -#if (defined(_MSC_VER) && !defined(__EDG__)) -#pragma warning( pop ) -#endif -#if (defined(__GNUC__) && ( __GNUC__ == 4 && __GNUC_MINOR__ >= 6 || __GNUC__ > 4 ) && !defined(__EDG__)) -#pragma GCC diagnostic pop -#endif -#if defined(__clang__) -#pragma clang diagnostic pop -#endif - -#ifdef __AC_NAMESPACE -} -#endif - -#endif // __AC_INT_H diff --git a/hls4ml/hls4ml/templates/quartus/ac_types/ac_sc.h b/hls4ml/hls4ml/templates/quartus/ac_types/ac_sc.h deleted file mode 100644 index 01601a5..0000000 --- a/hls4ml/hls4ml/templates/quartus/ac_types/ac_sc.h +++ /dev/null @@ -1,552 +0,0 @@ -/************************************************************************** - * * - * Algorithmic C (tm) Datatypes * - * * - * Software Version: 4.0 * - * * - * Release Date : Sat Jun 13 12:35:18 PDT 2020 * - * Release Type : Production Release * - * Release Build : 4.0.0 * - * * - * Copyright 2004-2019, Mentor Graphics Corporation, * - * * - * All Rights Reserved. * - * * - ************************************************************************** - * Licensed under the Apache License, Version 2.0 (the "License"); * - * you may not use this file except in compliance with the License. * - * You may obtain a copy of the License at * - * * - * http://www.apache.org/licenses/LICENSE-2.0 * - * * - * Unless required by applicable law or agreed to in writing, software * - * distributed under the License is distributed on an "AS IS" BASIS, * - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * - * implied. * - * See the License for the specific language governing permissions and * - * limitations under the License. * - ************************************************************************** - * * - * The most recent version of this package is available at github. * - * * - *************************************************************************/ - -#ifndef __AC_SC_H -#define __AC_SC_H - -#ifndef __cplusplus -#error C++ is required to include this header file -#endif - -#if !defined(IEEE_1666_SYSTEMC) && !defined(SYSTEMC_VERSION) && !defined(SC_API_VERSION_STRING) -#error SystemC header file needs to be included before the ac_sc is included -#endif - -#include - -#ifdef __AC_NAMESPACE -namespace __AC_NAMESPACE { -#endif - -// Explicit conversion functions from ac to sc and viceversa -template -ac_int to_ac(const sc_dt::sc_bigint &val){ - enum {N = (W+31)/32 }; - sc_dt::sc_bigint v = val; - ac_int r = 0; -#ifdef __SYNTHESIS__ -#pragma UNROLL y -#endif - for(int i = 0; i < N; i++) { - r.set_slc(i*32, ac_int<32,true>(v.to_int())); - v >>= 32; - } - return ac_int(r); -} - -template -ac_int to_ac(const sc_dt::sc_biguint &val){ - enum {N = (W+31)/32 }; - sc_dt::sc_biguint v = val; - ac_int r = 0; -#ifdef __SYNTHESIS__ -#pragma UNROLL y -#endif - for(int i = 0; i < N; i++) { - r.set_slc(i*32, ac_int<32,true>(v.to_int())); - v >>= 32; - } - return ac_int(r); -} - -template -sc_dt::sc_bigint to_sc(const ac_int &val) { - enum {N = (W+31)/32 }; - ac_int v = val; - sc_dt::sc_bigint r; -#ifdef __SYNTHESIS__ -#pragma UNROLL y -#endif - for(int i = N-1; i >= 0; i--) { - r <<= 32; - r.range(31, 0) = (v.template slc<32>(i*32)).to_int(); - } - return sc_dt::sc_bigint(r); -} - -template -sc_dt::sc_biguint to_sc(const ac_int &val) { - enum {N = (W+31)/32 }; - ac_int v = val; - sc_dt::sc_biguint r; -#ifdef __SYNTHESIS__ -#pragma UNROLL y -#endif - for(int i = N-1; i >= 0; i--) { - r <<= 32; - r.range(31, 0) = (v.template slc<32>(i*32)).to_int(); - } - return sc_dt::sc_biguint(r); -} - -#ifdef SC_INCLUDE_FX -template -ac_fixed to_ac(const sc_dt::sc_fixed &val){ - ac_fixed r = 0; - sc_dt::sc_fixed fv; - fv.range(W-1,0) = val.range(W-1,0); - sc_dt::sc_bigint v(fv); - r.set_slc(0, to_ac(v)); - return r; -} - -template -ac_fixed to_ac(const sc_dt::sc_ufixed &val){ - ac_fixed r = 0; - sc_dt::sc_ufixed fv; - fv.range(W-1,0) = val.range(W-1,0); - sc_dt::sc_biguint v(fv); - r.set_slc(0, to_ac(v)); - return r; -} - -template -sc_dt::sc_fixed to_sc(const ac_fixed &val) { - ac_int v = val.template slc(0); - sc_dt::sc_bigint i = to_sc(v); - sc_dt::sc_fixed f(i); - sc_dt::sc_fixed r; - r.range(W-1,0) = f.range(W-1,0); - return r; -} - -template -sc_dt::sc_ufixed to_sc(const ac_fixed &val) { - ac_int v = val.template slc(0); - sc_dt::sc_biguint i = to_sc(v); - sc_dt::sc_ufixed f(i); - sc_dt::sc_ufixed r; - r.range(W-1,0) = f.range(W-1,0); - return r; -} -#endif - -// Utility global functions for initialization - -template -inline sc_dt::sc_int value(sc_dt::sc_int) { - sc_dt::sc_int r; - if(V == AC_VAL_DC) { - int t; - r = t; - } else if(V == AC_VAL_0 || V == AC_VAL_MIN || V == AC_VAL_QUANTUM) { - r = 0; - if(V == AC_VAL_MIN) - r[W-1] = 1; - else if(V == AC_VAL_QUANTUM) - r[0] = 1; - } else if(AC_VAL_MAX) { - r = -1; - r[W-1] = 0; - } - return r; -} - -template -inline sc_dt::sc_uint value(sc_dt::sc_uint) { - sc_dt::sc_uint r; - if(V == AC_VAL_DC) { - int t; - r = t; - } else if(V == AC_VAL_0 || V == AC_VAL_MIN || V == AC_VAL_QUANTUM) { - r = 0; - if(V == AC_VAL_QUANTUM) - r[0] = 1; - } else if(AC_VAL_MAX) - r = -1; - return r; -} - -template -inline sc_dt::sc_bigint value(sc_dt::sc_bigint) { - sc_dt::sc_bigint r; - if(V == AC_VAL_DC) { - int t; - r = t; - } else if(V == AC_VAL_0 || V == AC_VAL_MIN || V == AC_VAL_QUANTUM) { - r = 0; - if(V == AC_VAL_MIN) - r[W-1] = 1; - else if(V == AC_VAL_QUANTUM) - r[0] = 1; - } else if(AC_VAL_MAX) { - r = -1; - r[W-1] = 0; - } - return r; -} - -template -inline sc_dt::sc_biguint value(sc_dt::sc_biguint) { - sc_dt::sc_biguint r; - if(V == AC_VAL_DC) { - int t; - r = t; - } else if(V == AC_VAL_0 || V == AC_VAL_MIN || V == AC_VAL_QUANTUM) { - r = 0; - if(V == AC_VAL_QUANTUM) - r[0] = 1; - } else if(AC_VAL_MAX) - r = -1; - return r; -} - -#ifdef SC_INCLUDE_FX -template -inline sc_dt::sc_fixed value(sc_dt::sc_fixed) { - sc_dt::sc_fixed r; - if(V == AC_VAL_DC) { - int t; - r = t; - } else if(V == AC_VAL_0 || V == AC_VAL_MIN || V == AC_VAL_QUANTUM) { - r = 0; - if(V == AC_VAL_MIN) - r[W-1] = 1; - else if(V == AC_VAL_QUANTUM) - r[0] = 1; - } else if(AC_VAL_MAX) { - r = ~ (sc_dt::sc_fixed) 0; - r[W-1] = 0; - } - return r; -} - -template -inline sc_dt::sc_ufixed value(sc_dt::sc_ufixed) { - sc_dt::sc_ufixed r; - if(V == AC_VAL_DC) { - int t; - r = t; - } else if(V == AC_VAL_0 || V == AC_VAL_MIN || V == AC_VAL_QUANTUM) { - r = 0; - if(V == AC_VAL_QUANTUM) - r[0] = 1; - } else if(AC_VAL_MAX) - r = ~ (sc_dt::sc_ufixed) 0; - return r; -} -#endif - - -namespace ac { -// PUBLIC FUNCTIONS -// function to initialize (or uninitialize) arrays - template - inline bool init_array(sc_dt::sc_int *a, int n) { - sc_dt::sc_int t = value(*a); - for(int i=0; i < n; i++) - a[i] = t; - return true; - } - template - inline bool init_array(sc_dt::sc_uint *a, int n) { - sc_dt::sc_uint t = value(*a); - for(int i=0; i < n; i++) - a[i] = t; - return true; - } - template - inline bool init_array(sc_dt::sc_bigint *a, int n) { - sc_dt::sc_bigint t = value(*a); - for(int i=0; i < n; i++) - a[i] = t; - return true; - } - template - inline bool init_array(sc_dt::sc_biguint *a, int n) { - sc_dt::sc_biguint t = value(*a); - for(int i=0; i < n; i++) - a[i] = t; - return true; - } -#ifdef SC_INCLUDE_FX - template - inline bool init_array(sc_dt::sc_fixed *a, int n) { - sc_dt::sc_fixed t = value(*a); - for(int i=0; i < n; i++) - a[i] = t; - return true; - } - template - inline bool init_array(sc_dt::sc_ufixed *a, int n) { - sc_dt::sc_ufixed t = value(*a); - for(int i=0; i < n; i++) - a[i] = t; - return true; - } -#endif -} - -#ifdef __AC_NAMESPACE -} -#endif - - -// TRACE FUNCTIONS - -// SystemC Versions - 2.2.0 20070314 -// 2.3.0 20120701 -// 2.3.1 20140417 -// 2.3.2 20171012 - -#if !defined(NCSC) -#if (SYSTEMC_VERSION >= 20140417) && !defined(SC_TRACE_FILE_BASE_H_INCLUDED_) -namespace sc_core { -class vcd_trace; -class sc_trace_file_base - : public sc_trace_file -{ -public: - enum vcd_enum {VCD_WIRE=0, VCD_REAL, VCD_EVENT, VCD_TIME, VCD_LAST}; - virtual void do_initialize() = 0; - FILE* fp; -#if (SYSTEMC_VERSION >= 20171012) - sc_time::value_type trace_unit_fs, kernel_unit_fs; -#else - double timescale_unit; -#endif - bool timescale_set_by_user; - std::string filename_; - bool initialized_; - bool trace_delta_cycles_; - virtual ~sc_trace_file_base(); -}; -class vcd_trace_file - : public sc_trace_file_base -{ -public: - ~vcd_trace_file(); - std::string obtain_name(); - virtual void do_initialize(); - unsigned vcd_name_index; -#if (SYSTEMC_VERSION >= 20171012) - sc_time::value_type previous_time_units_low, previous_time_units_high; -#else - unsigned previous_time_units_low, previous_time_units_high; -#endif - std::vector traces; -}; -} -#endif - -namespace sc_core { -//============================================================================== -// The following block of code is copied from the file sc_vcd_trace.cpp in the -// SystemC distribution. This code should have been placed in the file -// sc_vcd_trace.h to allow proper C++ derivation. -class vcd_trace -{ -public: - vcd_trace(const std::string& name_, const std::string& vcd_name_); - virtual void write(FILE* f) = 0; - virtual void set_width(); - virtual bool changed() = 0; -#if (SYSTEMC_VERSION >= 20171012) - virtual void print_variable_declaration_line(FILE* f, const char* scoped_name); -#else - virtual void print_variable_declaration_line(FILE* f); -#endif - void compose_data_line(char* rawdata, char* compdata); - -#if (SYSTEMC_VERSION >= 20140417) - std::string compose_line(const std::string& data); -#else - std::string compose_line(const std::string data); -#endif - virtual ~vcd_trace(); - const std::string name; - const std::string vcd_name; -#if (SYSTEMC_VERSION >= 20171012) - vcd_trace_file::vcd_enum vcd_var_type; -#else - const char* vcd_var_typ_name; -#endif - int bit_width; -}; -} -#endif - -#ifdef __AC_NAMESPACE -namespace __AC_NAMESPACE { -#endif - -namespace ac_tracing { - -//============================================================================== -// TRACING SUPPORT FOR AC_INT -template -class vcd_ac_int_trace : public sc_core::vcd_trace -{ -public: - vcd_ac_int_trace(const ac_int &object_, const std::string& name_, const std::string& vcd_name_) : - vcd_trace(name_, vcd_name_), object(object_) - { -#if (SYSTEMC_VERSION >= 20171012) - vcd_var_type = sc_core::vcd_trace_file::VCD_WIRE; -#else - vcd_var_typ_name = "wire"; // SystemC does not expose vcd_types[] in sc_vcd_trace.h -#endif - bit_width = W; // bit_width defined in base class 'vcd_trace' - } - - virtual void write(FILE* f) { - // The function to_string(AC_BIN) returns a string with the zero-radix prefix (i.e. "0b"). - // Strip that prefix off because compose_line will add its own. - std::fprintf(f, "%s", compose_line(((ac_int)object).to_string(AC_BIN,true).substr(3)).c_str()); - old_value = object; - } - - virtual void set_width() { bit_width = W; } - - // Comparison function needs to be pure virtual too - virtual bool changed() { return !(object == old_value); } - - virtual ~vcd_ac_int_trace() {} -protected: - const ac_int &object; - ac_int old_value; -}; - -template -inline void sc_trace(sc_core::sc_trace_file *tf, const ac_int &a, const std::string &name) -{ - using namespace sc_core; - if (tf) { - vcd_trace *t = (vcd_trace*) new vcd_ac_int_trace(a,name,((vcd_trace_file*)tf)->obtain_name()); - ((vcd_trace_file*)tf)->traces.push_back(t); - } -} -//============================================================================== - -#if !defined(__AC_FIXED_MTI_H) -// The ac_fixed.h shipped with ModelSim/QuestaSim has a stub for sc_trace() for ac_fixed so -// this code is not used. The stub should be removed in a future release of the simulator. -#if defined(__AC_FIXED_H) && !defined(SC_TRACE_AC_FIXED) -#define SC_TRACE_AC_FIXED -//============================================================================== -// TRACING SUPPORT FOR AC_FIXED -template -inline void sc_trace(sc_core::sc_trace_file *tf, const ac_fixed &a, const std::string &name) -{ - const int iv_N = (W+31+!S)/32; - typedef typename ac_private::template iv CommonBase_t; - sc_trace(tf, *(const ac_int*)(const CommonBase_t*) &a, name); -} -//============================================================================== -#endif -#endif - -#if defined(__AC_FLOAT_H) && !defined(SC_TRACE_AC_FLOAT) -#define SC_TRACE_AC_FLOAT -//============================================================================== -// TRACING SUPPORT FOR AC_FLOAT -template -inline void sc_trace(sc_core::sc_trace_file *tf, const ac_float &a, const std::string &name) -{ - sc_trace(tf, a.m, name + ".m"); - sc_trace(tf, a.e, name + ".e"); -} -//============================================================================== -#endif - -#if defined(__AC_STD_FLOAT_H) && !defined(SC_TRACE_AC_STD_FLOAT) -#define SC_TRACE_AC_STD_FLOAT -//============================================================================== -// TRACING SUPPORT FOR AC_STD_FLOAT -template -inline void sc_trace(sc_core::sc_trace_file *tf, const ac_std_float &a, const std::string &name) -{ - sc_trace(tf, a.data(), name + ".d"); -} -//============================================================================== -//============================================================================== -// TRACING SUPPORT FOR AC_IEEE_FLOAT -inline void sc_trace(sc_core::sc_trace_file *tf, const ac_ieee_float &a, const std::string &name) -{ - sc_trace(tf, a.data(), name + ".d"); -} -inline void sc_trace(sc_core::sc_trace_file *tf, const ac_ieee_float &a, const std::string &name) -{ - sc_trace(tf, *(const int*) &a.data(), name + ".d"); -} -inline void sc_trace(sc_core::sc_trace_file *tf, const ac_ieee_float &a, const std::string &name) -{ - sc_trace(tf, *(const long long*) &a.data(), name + ".d"); -} -inline void sc_trace(sc_core::sc_trace_file *tf, const ac_ieee_float &a, const std::string &name) -{ - sc_trace(tf, ((const long long*) &a.data())[0], name + ".d0"); - sc_trace(tf, ((const long long*) &a.data())[1], name + ".d1"); -} -inline void sc_trace(sc_core::sc_trace_file *tf, const ac_ieee_float &a, const std::string &name) -{ - sc_trace(tf, ((const long long*) &a.data())[0], name + ".d0"); - sc_trace(tf, ((const long long*) &a.data())[1], name + ".d1"); - sc_trace(tf, ((const long long*) &a.data())[2], name + ".d2"); - sc_trace(tf, ((const long long*) &a.data())[3], name + ".d3"); -} -// TRACING SUPPORT FOR AC::BFLOAT16 -inline void sc_trace(sc_core::sc_trace_file *tf, const ac::bfloat16 &a, const std::string &name) -{ - sc_trace(tf, a.data(), name + ".d"); -} -//============================================================================== -#endif - -#if defined(__AC_COMPLEX_H) && !defined(SC_TRACE_AC_COMPLEX) -#define SC_TRACE_AC_COMPLEX -//============================================================================== -// TRACING SUPPORT FOR AC_COMPLEX -template -inline void sc_trace(sc_core::sc_trace_file *tf, const ac_complex &a, const std::string &name) -{ - sc_trace(tf, a.real(), name + ".r"); - sc_trace(tf, a.imag(), name + ".i"); -} -#endif - -} // namespace ac_tracing - -#ifdef __AC_NAMESPACE -} -#endif - -namespace sc_core { -#ifdef __AC_NAMESPACE - using __AC_NAMESPACE::ac_tracing::sc_trace; -#else - using ac_tracing::sc_trace; -#endif -} - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/ac_types/ac_std_float.h b/hls4ml/hls4ml/templates/quartus/ac_types/ac_std_float.h deleted file mode 100644 index 3b335b9..0000000 --- a/hls4ml/hls4ml/templates/quartus/ac_types/ac_std_float.h +++ /dev/null @@ -1,2318 +0,0 @@ -/************************************************************************** - * * - * Algorithmic C (tm) Datatypes * - * * - * Software Version: 4.0 * - * * - * Release Date : Sat Jun 13 12:35:18 PDT 2020 * - * Release Type : Production Release * - * Release Build : 4.0.0 * - * * - * Copyright 2018-2020, Mentor Graphics Corporation, * - * * - * All Rights Reserved. * - * * - ************************************************************************** - * Licensed under the Apache License, Version 2.0 (the "License"); * - * you may not use this file except in compliance with the License. * - * You may obtain a copy of the License at * - * * - * http://www.apache.org/licenses/LICENSE-2.0 * - * * - * Unless required by applicable law or agreed to in writing, software * - * distributed under the License is distributed on an "AS IS" BASIS, * - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * - * implied. * - * See the License for the specific language governing permissions and * - * limitations under the License. * - ************************************************************************** - * * - * The most recent version of this package is available at github. * - * * - *************************************************************************/ - -/* Source: ac_std_float.h - * Description: class for floating point operation handling in C++ - * Author: Andres Takach, Ph.D. - -Overview: this header defines three classes - - ac_ieee_float - Meant to store floats in IEEE standard binary format - Format indicate width: - binary16: (half float) uses short - binary32: (float) uses int - binary64: (double) uses array of long long with one element - binary128: (long double in some platforms) uses array of long long with two elements - binary256: uses array of long long with four elements - - ac::bfloat16 - Implements Google's tensorflow::bfloat16 - Stores data as "short" - - ac_std_float - Superset of ac_ieee_float in that any bit width and exponent width is - allowed - This is used by ac_ieee_float and ac::bfloat16 - - Uses an ac_int that holds the bit pattern for a standard (IEEE) style binary - float: - 1) sign-magnitude representation, sign is MSB - 2) mantissa (significand) with implied bit for normal numbers - 3) E is not restricted to IEEE widths, another class ac_ieee_float does that - - Provides easy way to conver to/from the closest covering ac_float: - Constructor from ac_float - Most two negative exponents of ac_float are not representable: shift - significand futher to the right (for now no attempt to round) - Most negative mantissa of ac_float (in two's complement) when converted - to sign-magnitute requires a right shift (add +1 to exponent) - If exponent is already max, two alternatives: - - "saturate" (store most negative number) - - Store as -Inf (currently this option not available) - Exponent is offset - Mantissa implied bit is removed from normal numbers - - Explicit convertion to_ac_float - Ignores exceptions (Inf, NaN) - Does inverse as above to obtain ac_float -*/ - -#ifndef __AC_STD_FLOAT_H -#define __AC_STD_FLOAT_H -#include -#include -// Inclusion of cmath undefs all macros such as signbit etc that some parsers may define for C -#include - -#ifdef __SYNTHESIS__ -#ifdef AC_IEEE_FLOAT_USE_BUILTIN -#undef AC_IEEE_FLOAT_USE_BUILTIN -#endif -#endif - -#ifdef __AC_NAMESPACE -namespace __AC_NAMESPACE { -#endif - -// For now make data members public since SCVerify needs it -//#ifdef __AC_MAKE_PRIVATE_DATA_PUBLIC -#if 1 -#define __AC_DATA_PRIVATE public: -#else -#define __AC_DATA_PRIVATE private: -#endif - -namespace ac_private { - template - struct check_rounding { enum {Only_symmetrical_roundings_or_truncations_supported}; }; - template<> struct check_rounding {}; - - template - void check_supported() { - // only symmetrical roundings supported - const bool supported = Q==AC_RND_CONV || Q==AC_TRN_ZERO || Q==AC_RND_INF || Q == AC_RND_CONV_ODD; -#if __cplusplus > 199711L - static_assert(supported, "Only symmetrical roundings/truncations supported"); -#else - (void) check_rounding::Only_symmetrical_roundings_or_truncations_supported; -#endif - } - - template - struct check_rounding2 { enum {Only_round_to_even_supported_when_using_BUILTIN}; }; - template<> struct check_rounding2 {}; - - template - void check_supported2() { -#ifdef AC_IEEE_FLOAT_USE_BUILTIN - const bool supported = Q==AC_RND_CONV; -#if __cplusplus > 199711L - static_assert(supported, "Only round to even supported"); -#else - (void) check_rounding2::Only_round_to_even_supported_when_using_BUILTIN; -#endif -#endif - } - - template - struct rt_closed_T { - }; - template - struct rt_closed_T { - typedef T type; - }; -} - -namespace ac { - #pragma hls_design ccore - #pragma hls_ccore_type sequential - template - void fx_div(ac_int op1, ac_int op2, ac_int "ient, bool &exact) { - ac_int R = op1; - bool R_neg = false; - ac_int D = op2; - ac_int neg_D = -D; - ac_int Q = 0; - for(int i=0; i < W+2; i++) { - // take MSB of N, shift it in from right to R - R += ( R_neg ? (ac_int) D : neg_D ); - Q = (Q << 1) | ((R >= 0) & 1); - R_neg = R[W]; - R <<= 1; - } - quotient = Q; - exact = !R | R_neg & (R >> 1) == neg_D; - } - - template - void fx_div_sim(ac_int op1, ac_int op2, ac_int "ient, bool &exact) { - // need to compute extra rnd bit, - // +2 because we may need to shift left by 1 (mant divisor > mant dividend) - ac_int<2*W+1,false> op1_mi = op1; - op1_mi <<= W+1; - // +1 bit to compute rnd bit - quotient = (op1_mi / op2); - exact = !(op1_mi % op2); - } - - #pragma hls_design ccore - #pragma hls_ccore_type sequential - template - bool fx_sqrt( ac_int x, ac_int &sqrt) { - // x is ac_fixed, sqrt is ac_fixed - const bool W_odd = W&1; - const int ZW = W + W_odd; // make it even - ac_int z = x; - z <<= W_odd; - // masks used only to hint synthesis on precision - ac_int mask_d = 0; - ac_int d = 0; - ac_int r = 0; - unsigned int z_shift = ZW-2; - for(int i = WR-1; i >= 0; i--) { - r <<= 1; - mask_d = (mask_d << 2) | 0x3; - d = (mask_d & (d << 2)) | ((z >> z_shift) & 0x3 ); - ac_int t = d - (( ((ac_int)r) << 1) | 0x1); - if( !t[WR+1] ) { // since t is unsigned, look at MSB - r |= 0x1; - d = mask_d & t; - } - z <<= 2; - } - - bool rem = (d != 0) || ((z >> 2*W) != 0); - sqrt = r; - return rem; - } -} - -#ifndef AC_STD_FLOAT_FX_DIV_OVERRIDE -#ifdef __SYNTHESIS__ -#define AC_STD_FLOAT_FX_DIV_OVERRIDE ac::fx_div -#else -#define AC_STD_FLOAT_FX_DIV_OVERRIDE ac::fx_div_sim -#endif -#endif - -template class ac_std_float; - -#ifdef __AC_NAMESPACE -} -#endif - -#ifdef AC_STD_FLOAT_OVERRIDE_NAMESPACE -#define AC_STD_FLOAT_OVERRIDE_NS ::AC_STD_FLOAT_OVERRIDE_NAMESPACE:: -namespace AC_STD_FLOAT_OVERRIDE_NAMESPACE { -#ifdef __AC_NAMESPACE - using __AC_NAMESPACE::ac_q_mode; - using __AC_NAMESPACE::ac_std_float; -#endif -#else -#define AC_STD_FLOAT_OVERRIDE_NS -#endif - -#ifdef AC_STD_FLOAT_ADD_OVERRIDE -template -ac_std_float AC_STD_FLOAT_ADD_OVERRIDE(const ac_std_float &op, const ac_std_float &op2); -#endif - -#ifdef AC_STD_FLOAT_MULT_OVERRIDE -template -ac_std_float AC_STD_FLOAT_MULT_OVERRIDE(const ac_std_float &op, const ac_std_float &op2); -#endif - -#ifdef AC_STD_FLOAT_DIV_OVERRIDE -template -ac_std_float AC_STD_FLOAT_DIV_OVERRIDE(const ac_std_float &op, const ac_std_float &op2); -#endif - -#ifdef AC_STD_FLOAT_FMA_OVERRIDE -template -ac_std_float AC_STD_FLOAT_FMA_OVERRIDE(const ac_std_float &op, const ac_std_float &op2, const ac_std_float &op3); -#endif - -#ifdef AC_STD_FLOAT_SQRT_OVERRIDE -template -ac_std_float AC_STD_FLOAT_SQRT_OVERRIDE(const ac_std_float &op); -#endif - -#ifdef AC_STD_FLOAT_OVERRIDE_NAMESPACE -} -#endif - -#ifdef __AC_NAMESPACE -namespace __AC_NAMESPACE { -#endif - -namespace ac { - inline void copy_bits(float a, float *b) { *b = a; } - inline void copy_bits(double a, double *b) { *b = a; } - - inline void copy_bits(short a, short *b) { *b = a; } - inline void copy_bits(const ac_int<16,true> &a, short *b) { *b = (short) a.to_int(); } - inline void copy_bits(short a, ac_int<16,true> *b) { *b = ac_int<16,true>(a); } - inline void copy_bits(int a, int *b) { *b = a; } - inline void copy_bits(const ac_int<32,true> &a, int *b) { *b = a.to_int(); } - inline void copy_bits(int a, ac_int<32,true> *b) { *b = ac_int<32,true>(a); } - inline void copy_bits(long long a, long long *b) { *b = a; } - inline void copy_bits(const ac_int<64,true> &a, long long *b) { *b = a.to_int64(); } - inline void copy_bits(long long a, ac_int<64,true> *b) { *b = ac_int<64,true>(a); } - inline void copy_bits(const long long a[2], long long (*b)[2]) { - (*b)[0] = a[0]; - (*b)[1] = a[1]; - } - inline void copy_bits(const ac_int<128,true> &a, long long (*b)[2]) { - (*b)[0] = a.to_int64(); - (*b)[1] = a.slc<64>(64).to_int64(); - } - inline void copy_bits(const long long a[2], ac_int<128,true> *b) { - *b = 0; - b->set_slc(0,ac_int<64,true>(a[0])); - b->set_slc(64,ac_int<64,true>(a[1])); - } - inline void copy_bits(const long long a[4], long long (*b)[4]) { - (*b)[0] = a[0]; - (*b)[1] = a[1]; - (*b)[2] = a[2]; - (*b)[3] = a[3]; - } - inline void copy_bits(const ac_int<256,true> &a, long long (*b)[4]) { - (*b)[0] = a.to_int64(); - (*b)[1] = a.slc<64>(64).to_int64(); - (*b)[2] = a.slc<64>(128).to_int64(); - (*b)[3] = a.slc<64>(192).to_int64(); - } - inline void copy_bits(const long long a[4], ac_int<256,true> *b) { - *b = 0; - b->set_slc(0,ac_int<64,true>(a[0])); - b->set_slc(64,ac_int<64,true>(a[1])); - b->set_slc(128,ac_int<64,true>(a[2])); - b->set_slc(192,ac_int<64,true>(a[3])); - } - inline void copy_bits(float f, int *x); - inline void copy_bits(double f, long long *x); - inline void copy_bits(int x, float *f); - inline void copy_bits(long long x, double *f); - - inline void copy_bits(float f, ac_int<32,true> *x) { - int x_i; - copy_bits(f, &x_i); - *x = x_i; - } - inline void copy_bits(double f, ac_int<64,true> *x) { - long long x_i; - copy_bits(f, &x_i); - *x = x_i; - } - inline void copy_bits(const ac_int<32,true> &x, float *f) { copy_bits(x.to_int(), f); } - inline void copy_bits(const ac_int<64,true> &x, double *f) { copy_bits(x.to_int64(), f); } -} - -enum ac_ieee_float_format { binary16, binary32, binary64, binary128, binary256}; - -// Forward declarations for ac_ieee_float and bfloat16 -template -class ac_ieee_float; -namespace ac { - class bfloat16; -} - -template -class ac_std_float { -__AC_DATA_PRIVATE - ac_int d; -public: - static const int width = W; - static const int e_width = E; - static const int mant_bits = W - E - 1; - static const int exp_bias = (1 << (E-1)) - 1; - static const int min_exp = -exp_bias + 1; - static const int max_exp = exp_bias; - static const int mu_bits = mant_bits + 1; -private: - typedef ac_int mu_t; - typedef ac_int mu1_t; - typedef ac_int mu2_t; - typedef ac_int m_t; // mantissa in two's complement representation -public: - typedef ac_int e_t; - typedef ac_float ac_float_t; - static ac_std_float nan() { - ac_std_float r; - r.d = 0; - r.d.set_slc(mant_bits-1, ac_int(-1)); - return r; - } - static ac_std_float inf() { - ac_std_float r; - r.d = 0; - r.d.set_slc(mant_bits, ac_int(-1)); - return r; - } - static ac_std_float denorm_min() { // smallest positive non zero value (subnorm if supported) - ac_std_float r; - r.d = 1; - return r; - } - static ac_std_float min() { // smallest NORMAL positive non zero value - ac_std_float r; - r.d = 0; - r.d[width-1-e_width] = true; - return r; - } - static ac_std_float max() { // largest pos finite value - ac_std_float r; - r.d = -1; - r.d[width-1] = false; - r.d[width-1-e_width] = false; - return r; - } - static ac_std_float epsilon() { - ac_int exp = -mant_bits + exp_bias; - ac_std_float r; - r.d = 0; - r.d.set_slc(mant_bits, exp); - return r; - } - ac_std_float() {} - ac_std_float(const ac_std_float &f) : d(f.d) {} - template - ac_std_float convert() const { - ac_private::check_supported(); - ac_std_float r; - if(W <= WR) { - r.d = 0; - r.d.set_slc(WR-W, d); - } else { - typedef ac_std_float r_t; - const int r_mant_bits = r_t::mant_bits; - const int r_mu_bits = r_t::mu_bits; - e_t f_e = d.template slc(mant_bits); - bool f_normal = !!f_e; - mu_t mu = d; - mu[r_mant_bits] = f_normal; - ac_fixed r_rnd = mu; - bool rnd_ovf = r_rnd[r_mu_bits]; - ac_int m_r = r_rnd.template slc(0); - e_t e_r = f_e + rnd_ovf; - r.d = m_r; - r.d.set_slc(r_mant_bits, e_r); - r.d[WR-1] = d[W-1]; - } - return r; - } - - template - ac_fixed convert_to_ac_fixed(bool map_inf=false) const { - static const bool rnd = QFX!=AC_TRN && QFX!=AC_TRN_ZERO; - static const bool need_rnd_bit = QFX != AC_TRN; - static const bool need_rem_bits = need_rnd_bit && QFX != AC_RND; - static const bool need_ovf = OFX != AC_WRAP; - static const int t_width = AC_MAX(mu_bits+1, WFX+!SFX) + need_rnd_bit + need_ovf; - - bool f_sign, f_normal, f_zero, f_inf, f_nan; - mu_t f_mu; - e_t f_e; - extract(f_mu, f_e, f_sign, f_normal, f_zero, f_inf, f_nan); - if(map_inf) { - ac_fixed rv; - if(f_sign) - rv.template set_val(); - else - rv.template set_val(); - return rv; - } - AC_ASSERT(!f_inf && !f_nan, "Expects finite float (not Nan or Inf)"); - m_t f_m = f_sign ? m_t(-f_mu) : m_t(f_mu); - typedef ac_int t_t; - typedef ac_int t2_t; - t_t t = f_m; - t <<= need_rnd_bit; - static const int lsb_src = -mant_bits; - static const int lsb_trg = IFX-WFX; - int rshift = lsb_trg - lsb_src - (int)f_e; - - bool sticky_bit_rnd = false; - bool rshift_neg = rshift < 0; - if(need_rem_bits) { - t_t shifted_out_bits = t; - typedef ac_int< ac::template nbits< AC_MAX(lsb_trg - lsb_src - min_exp,1) >::val, false> shift_ut; - shifted_out_bits &= ~(t_t(0).bit_complement() << (shift_ut) rshift); - sticky_bit_rnd = !!shifted_out_bits & !rshift_neg; - } - bool ovf = false; - if(need_ovf) { - t_t shifted_out_bits = t < 0 ? t_t(~t) : t; - // shift right by -rshift + 1 - // +1 is OK since added extra MSB - typedef ac_int< ac::template nbits< AC_MAX(-(lsb_trg - lsb_src - max_exp + 1),1) >::val, false> shift_ut; - shifted_out_bits &= ~((t_t(0).bit_complement() >> 2) >> (shift_ut) ~rshift); - ovf = !!shifted_out_bits & rshift_neg; - } - - t >>= rshift; - - t[t_width-1] = t[t_width-1] ^ (ovf & (t[t_width-1] ^ f_sign)); - t[t_width-2] = t[t_width-2] ^ (ovf & (t[t_width-2] ^ !f_sign)); - t2_t t2 = t; - if(need_rem_bits) { - t2 <<= 1; - t2[0] = t2[0] | sticky_bit_rnd; - } - - ac_fixed ri = t2; - ac_fixed r = 0; - r.set_slc(0,ri.template slc(0)); - return r; - } - - template - explicit ac_std_float(const ac_std_float &f) { - *this = f.template convert(); - } - template - ac_std_float convert() const { - ac_private::check_supported(); - typedef ac_std_float r_t; - typedef typename r_t::e_t r_e_t; - int const r_mu_bits = r_t::mu_bits; - int const r_mant_bits = r_t::mant_bits; - int const r_min_exp = r_t::min_exp; - int const r_max_exp = r_t::max_exp; - int const r_exp_bias = r_t::exp_bias; - bool f_sign, f_normal, f_zero, f_inf, f_nan; - mu_t f_mu; - e_t f_e; - r_t r; - extract(f_mu, f_e, f_sign, f_normal, f_zero, f_inf, f_nan); - int exp = f_e; - ac_fixed r_rnd; - if(ER >= E) { - if(ER > E && !f_normal) { - int ls = f_mu.leading_sign(); - int max_shift_left = f_e - r_min_exp + 1; - bool shift_exponent_limited = ls >= max_shift_left; - int shift_l = shift_exponent_limited ? max_shift_left : ls; - f_mu <<= shift_l; - exp -= shift_l; - } - r_rnd = f_mu; - } else { - int shift_r = r_min_exp - f_e; - typedef ac_fixed t_t; - t_t r_t = f_mu; - bool sticky_bit = !!(f_mu & ~((~mu_t(0)) << mant_bits-r_mant_bits-1)); - if(shift_r > 0) { - t_t shifted_out_bits = r_t; - shifted_out_bits &= ~((~t_t(0)) << shift_r); - sticky_bit |= !!shifted_out_bits; - r_t >>= shift_r; - exp += shift_r; - } - ac_fixed r_t2 = r_t; - r_t2[0] = sticky_bit; - r_rnd = r_t2; - } - bool rnd_ovf = r_rnd[r_mu_bits]; - ac_int r_m = r_rnd.template slc(0); - bool r_normal = r_rnd[r_mant_bits] | rnd_ovf; - exp += rnd_ovf; - bool exception = f_inf | f_nan | (exp > r_max_exp); - r_e_t r_e = exception ? -1 : (f_zero | !r_normal) ? 0 : exp + r_exp_bias; - if(exception) { - r_m = 0; - r_m[r_mant_bits-1] = f_nan; - } - r.d = r_m; - r.d.set_slc(r_mant_bits, r_e); - r.d[WR-1] = d[W-1]; - return r; - } - template - explicit ac_std_float(const ac_std_float &f) { - *this = f.template convert(); - } - template - explicit ac_std_float(const ac_ieee_float &f); - - explicit ac_std_float(const ac::bfloat16 &f); - - template - explicit ac_std_float(const ac_float &f) { - bool sign = f.mantissa() < 0; - m_t m_s = f.m.template slc(0); - mu1_t m_u = sign ? (mu1_t) -m_s : (mu1_t) m_s; - bool most_neg_m = m_u[mu_bits]; - bool is_max_exp = f.exp() == (1 << (E-1)) - 1; - ac_int e = f.exp() + exp_bias + (most_neg_m & !is_max_exp); - mu_t m = m_u | ac_int<1,true>(most_neg_m & is_max_exp); - m[mant_bits] = m[mant_bits] | most_neg_m; - bool exp_dont_map = !e | e==-1; - m >>= !e; - m >>= 2*(e==-1); - // exp_dont_map guarantees subnornal => e = 0 - e &= ac_int<1,true>(!exp_dont_map & !!m); - d = m.template slc(0); - d.set_slc(mant_bits, e); - d[W-1] = sign; - } - template - void assign_from(const ac_fixed &fx) { - ac_private::check_supported(); - bool sign = fx < 0.0; - ac_fixed x = 0; - x.set_slc(0,fx.template slc(0)); - bool all_sign; - int ls = x.leading_sign(all_sign); - int max_shift_left = IFX-1 - min_exp + 1; - bool shift_exponent_limited = ls >= max_shift_left; - int shift_l = shift_exponent_limited ? max_shift_left : ls; - ac_fixed x_u = sign ? (ac_fixed) -x : (ac_fixed) x; - x_u <<= shift_l; - int exp = IFX-1; - exp -= shift_l; - ac_fixed m_rnd = x_u; - mu1_t m_u = 0; m_u.set_slc(0, m_rnd.template slc(0)); - bool shiftr1 = m_u[mu_bits]; // msb - bool r_normal = m_u[mu_bits] | m_u[mu_bits-1]; - m_u >>= shiftr1; - exp += shiftr1; - bool fx_zero = all_sign & !sign; - bool r_inf = (exp > max_exp) & !fx_zero; - if(Q==AC_TRN_ZERO) { - exp = r_inf ? max_exp + exp_bias : exp; - m_u |= ac_int<1,true>(-r_inf); // saturate (set all bits to 1) if r_inf - r_inf = false; - } - e_t e = r_inf ? -1 : (!r_normal) ? 0 : exp + exp_bias; - m_u &= ac_int<1,true>(!r_inf); - e &= ac_int<1,true>(r_normal); - d = m_u.template slc(0); - d.set_slc(mant_bits, e); - d[W-1] = sign; - } - template - void assign_from(const ac_int &x) { - this->template assign_from(ac_fixed(x)); - } - template - explicit ac_std_float(const ac_fixed &fx) { - assign_from(fx); - } - explicit ac_std_float(float f) { - const int w_bits = sizeof(f)*8; - const int m_bits = std::numeric_limits::digits; - const int e_bits = w_bits - m_bits; - ac_int t_i; - ac::copy_bits(f, &t_i); - ac_std_float t; - t.set_data(t_i); - *this = ac_std_float(t); - } - explicit ac_std_float(double f) { - const int w_bits = sizeof(f)*8; - const int m_bits = std::numeric_limits::digits; - const int e_bits = w_bits - m_bits; - ac_int t_i; - ac::copy_bits(f, &t_i); - ac_std_float t; - t.set_data(t_i); - *this = ac_std_float(t); - } - explicit ac_std_float(int x) { - *this = ac_std_float(ac_fixed<32,32,true>(x)); - } - explicit ac_std_float(long long x) { - *this = ac_std_float(ac_fixed<64,64,true>(x)); - } - const ac_int &data() const { return d; } - void set_data(const ac_int &data, bool assert_on_nan=false, bool assert_on_inf=false) { - d = data; - if(assert_on_nan) - AC_ASSERT(!isnan(), "Float is NaN"); - if(assert_on_inf) - AC_ASSERT(!isinf(), "Float is Inf"); - } - int fpclassify() const { - ac_int e = d.template slc(mant_bits); - if(e) { - if(e == -1) - return !(ac_int)d ? FP_INFINITE : FP_NAN; - else - return FP_NORMAL; - } - else - return !(ac_int)d ? FP_ZERO : FP_SUBNORMAL; - } - bool isfinite() const { - ac_int e = d.template slc(mant_bits); - return e != -1; - } - bool isnormal() const { - ac_int e = d.template slc(mant_bits); - return (e || !(ac_int)d)&& e != -1; - } - bool isnan() const { - if(isfinite()) - return false; - ac_int m = d; - return !!m; - } - bool isinf() const { - if(isfinite()) - return false; - ac_int m = d; - return !m; - } - const ac_float to_ac_float() const { - ac_int e = d.template slc(mant_bits); - bool normal = !!e; - bool sign = d[W-1]; - bool inf = e == -1; - ac_int m = d; - ac_int m1 = m; - m1[mant_bits] = normal; - ac_int m_s = sign ? -m1 : (ac_int) m1; - ac_fixed fx = 0; - fx.set_slc(0, m_s); - e -= exp_bias; - // if number is subnormal, e will be MIN_EXP + 1 (10...01), but it needs to be - // MIN_EXP + 2 (10...010) - e[0] = e[0] & normal; - e[1] = e[1] | !normal; - // normalization by at most 2 places - bool shiftl1 = !(fx[mant_bits+1] ^ fx[mant_bits]); - bool shiftl2 = shiftl1 & !(fx[mant_bits+1] ^ fx[mant_bits-1]); - fx <<= shiftl1; - fx <<= shiftl2; - e -= shiftl1 + shiftl2; - e = inf ? value(e) : e; - fx = inf ? (sign ? value(fx) : value(fx)) : fx; - return ac_float(fx, e, false); - } - float to_float() const { - ac_std_float<32,8> t(*this); - float f; - ac::copy_bits(t.d, &f); - return f; - } - double to_double() const { - ac_std_float<64,11> t(*this); - double f; - ac::copy_bits(t.d, &f); - return f; - } -private: - void extract(mu_t &m, e_t &e, bool &sign, bool &normal, bool &zero, bool &inf, bool &nan, bool biased_exp=false, bool no_subnormals=false) const { - e = d.template slc(mant_bits); - bool exception = e == -1; - normal = !!e | no_subnormals; - m = d; - bool m_zero = !m.template slc(0); - zero = (!e) & (no_subnormals | m_zero); - m[mant_bits] = !!e; - if(!biased_exp) { - e -= exp_bias; - e += !normal; - } - sign = d[W-1]; - inf = exception & m_zero; - nan = exception & !m_zero; - } -public: - static ac_std_float zero() { - ac_std_float r; - r.d = 0; - return r; - } - static ac_std_float one() { - ac_std_float r; - r.d = 0; - r.d.set_slc(mant_bits, ac_int(exp_bias)); - return r; - } - template - ac_std_float add_generic(const ac_std_float &op2) const { - ac_private::check_supported(); - // +1 for possible negation, +1 for bit growth due to addition - const int tr_t_iwidth = mu_bits + 1 + 1; - // extra bit for rounding, extra bit for left shift - const int tr_t_width = tr_t_iwidth + 1 + 1; - typedef ac_fixed add_t; - typedef ac_fixed r_un_t; - e_t op1_e, op2_e; - bool op1_normal, op1_sign, op1_zero, op2_normal, op2_sign, op2_zero; - bool op1_inf, op1_nan, op2_inf, op2_nan; - mu_t op1_mu, op2_mu; - extract(op1_mu, op1_e, op1_sign, op1_normal, op1_zero, op1_inf, op1_nan, true, No_SubNormals); - m_t op1_m = op1_sign ? m_t(-op1_mu) : m_t(op1_mu); - op1_m &= m_t(No_SubNormals & op1_zero ? 0 : -1); - op2.extract(op2_mu, op2_e, op2_sign, op2_normal, op2_zero, op2_inf, op2_nan, true, No_SubNormals); - m_t op2_m = op2_sign ? m_t(-op2_mu) : m_t(op2_mu); - op2_m &= m_t(No_SubNormals & op2_zero ? 0 : -1); - - unsigned op1_e_b = ac_int(op1_e) + !op1_normal; - unsigned op2_e_b = ac_int(op2_e) + !op2_normal; - int e_dif = op1_e_b - op2_e_b; - bool e1_lt_e2 = e_dif < 0; - e_dif = (op1_zero | op2_zero) ? 0 : e1_lt_e2 ? -e_dif : e_dif; - - add_t op_lshift = e1_lt_e2 ? op1_m : op2_m; - m_t op_no_shift = e1_lt_e2 ? op2_m : op1_m; - add_t shifted_out_bits = op_lshift; - shifted_out_bits &= ~((~add_t(0)) << (unsigned) e_dif); - bool sticky_bit = !!shifted_out_bits; - - op_lshift >>= (unsigned) e_dif; - add_t add_r = op_lshift + op_no_shift; - int exp = ( (e1_lt_e2 & !op2_zero) | op1_zero ? op2_e_b : op1_e_b); - bool all_sign; - int ls = add_r.leading_sign(all_sign); - bool r_zero = !add_r[0] & all_sign; - // +1 to account for bit growth of add_r - int max_shift_left = exp + (- min_exp - exp_bias + 1); - bool shift_exponent_limited = ls >= max_shift_left; - int shift_l = shift_exponent_limited ? max_shift_left : ls; - add_r <<= shift_l; - add_r[0] = add_r[0] | sticky_bit; - ac_fixed r_rnd = add_r; - typedef ac_int t_h; - t_h t = add_r.to_ac_int(); - bool rnd_ovf = QR == AC_RND_CONV && t == t_h(-1); - bool r_sign = r_rnd[mu_bits] ^ rnd_ovf; - bool shift_r = rnd_ovf | (r_sign & !r_rnd.template slc(0)); - r_un_t r_un = r_sign ? (r_un_t) -r_rnd : (r_un_t) r_rnd; - // get rid of implied bit, assign to ac_int - bool r_normal = r_un[mant_bits] | shift_r; - r_zero |= No_SubNormals & !r_normal; - ac_int m_r = r_un.template slc(0); - exp = (shift_exponent_limited ? min_exp + exp_bias : exp - ls + 1) + shift_r; - bool r_inf = exp > max_exp + exp_bias; - if(QR==AC_TRN_ZERO) { - exp = r_inf ? max_exp + exp_bias : exp; - m_r |= ac_int<1,true>(-r_inf); // saturate (set all bits to 1) if r_inf - r_inf = false; - } - bool r_nan = op1_nan | op2_nan | ((op1_inf & op2_inf) & (op1_sign ^ op2_sign)); - bool exception = op1_inf | op2_inf | op1_nan | op2_nan | r_inf; - ac_int e_r = exception ? -1 : (r_zero | !r_normal) ? 0 : exp; - if(exception | r_zero) { - m_r = 0; - m_r[mant_bits-1] = r_nan; - } - ac_int d_r = m_r; - d_r.set_slc(mant_bits, e_r); - d_r[W-1] = r_sign; - ac_std_float r; - r.set_data(d_r); - return r; - } - template - ac_std_float add(const ac_std_float &op2) const { -#ifndef AC_STD_FLOAT_ADD_OVERRIDE - return add_generic(op2); -#else - return AC_STD_FLOAT_OVERRIDE_NS AC_STD_FLOAT_ADD_OVERRIDE(*this, op2); -#endif - } - template - ac_std_float sub(const ac_std_float &op2) const { - return add(-op2); - } - template - ac_std_float mult_generic(const ac_std_float &op2) const { - ac_private::check_supported(); - e_t op1_e, op2_e; - bool op1_normal, op1_sign, op1_zero, op2_normal, op2_sign, op2_zero; - bool op1_inf, op1_nan, op2_inf, op2_nan; - mu_t op1_mu, op2_mu; - extract(op1_mu, op1_e, op1_sign, op1_normal, op1_zero, op1_inf, op1_nan, true, No_SubNormals); - op2.extract(op2_mu, op2_e, op2_sign, op2_normal, op2_zero, op2_inf, op2_nan, true, No_SubNormals); - bool r_sign = op1_sign ^ op2_sign; - bool r_nan = op1_nan | op2_nan | (op1_inf & op2_zero) | (op1_zero & op2_inf); - bool r_zero = op1_zero | op2_zero; // r_nan takes precedence later on - int exp = ac_int(op1_e) + ac_int(op2_e) + !op1_normal + !op2_normal - exp_bias; - ac_int<2*mu_bits,false> p = op1_mu * op2_mu; - int max_shift_left = exp + (- min_exp - exp_bias + 1); - int shift_l = 0; - bool shift_l_1 = false; - typedef ac_int t_h; - typedef ac_int t_l; - t_h p_h; - t_l p_l = p; - bool r_normal; - bool r_inf; - ac_fixed r_rnd; - ac_int m_r; - if(max_shift_left >= 0) { - r_inf = exp > max_exp + exp_bias; - bool exp_is_max = exp == max_exp + exp_bias; - bool exp_is_max_m1 = exp == max_exp + exp_bias - 1; - unsigned ls = No_SubNormals ? 0 : (unsigned) (op1_normal ? op2_mu : op1_mu).leading_sign(); - bool shift_exponent_limited = ls >= (unsigned) max_shift_left; - shift_l = shift_exponent_limited ? (unsigned) max_shift_left : ls; - p <<= (unsigned) shift_l; - exp -= shift_l; - shift_l_1 = !(shift_exponent_limited | p[2*mu_bits-1]); - p = shift_l_1 ? p << 1 : p; - exp += !shift_l_1; - p_h = p >> (mu_bits-1); - p_l &= (t_l(-1) >> shift_l) >> shift_l_1; - ac_int p_bef_rnd = p_h; - p_bef_rnd <<= 1; - p_bef_rnd[0] = !!p_l; - r_rnd = p_bef_rnd; - m_r = r_rnd.template slc(0); - bool rnd_ovf = QR == AC_RND_CONV && p_h == t_h(-1); - exp += rnd_ovf; - r_inf |= (exp_is_max & (!shift_l_1 | rnd_ovf)) | (exp_is_max_m1 & !shift_l_1 & rnd_ovf); - r_normal = r_rnd[mant_bits] | rnd_ovf; - r_zero |= !r_normal & No_SubNormals; - if(QR==AC_TRN_ZERO) { - exp = r_inf ? max_exp + exp_bias : exp; - m_r |= ac_int<1,true>(-r_inf); // saturate (set all bits to 1) if r_inf - r_inf = false; - } - } else { - shift_l = max_shift_left; - exp -= shift_l; - unsigned shift_r_m1 = ~shift_l; - p_h = p >> (mu_bits-1); - t_h shifted_out_bits = p_h; - shifted_out_bits &= ~((~t_h(1)) << shift_r_m1); - p_h >>= shift_r_m1; - p_h >>= 1; - ac_int p_bef_rnd = p_h; - p_bef_rnd <<= 1; - p_bef_rnd[0] = !!p_l | !!shifted_out_bits; - r_rnd = p_bef_rnd; - m_r = r_rnd.template slc(0); - r_normal = false; - r_inf = false; - r_zero |= No_SubNormals; - } - bool exception = op1_inf | op2_inf | op1_nan | op2_nan | r_inf; - ac_int e_r = exception ? -1 : (r_zero | !r_normal) ? 0 : exp; - if(exception | r_zero) { - m_r = 0; - m_r[mant_bits-1] = r_nan; - } - ac_int d_r = m_r; - d_r.set_slc(mant_bits, e_r); - d_r[W-1] = r_sign; - ac_std_float r; - r.set_data(d_r); - return r; - } - template - ac_std_float mult(const ac_std_float &op2) const { -#ifndef AC_STD_FLOAT_MULT_OVERRIDE - return mult_generic(op2); -#else - return AC_STD_FLOAT_OVERRIDE_NS AC_STD_FLOAT_MULT_OVERRIDE(*this, op2); -#endif - } - template - ac_std_float div_generic(const ac_std_float &op2) const { - ac_private::check_supported(); - e_t op1_e, op2_e; - bool op1_normal, op1_sign, op1_zero, op2_normal, op2_sign, op2_zero; - bool op1_inf, op1_nan, op2_inf, op2_nan; - mu_t op1_mu, op2_mu; - extract(op1_mu, op1_e, op1_sign, op1_normal, op1_zero, op1_inf, op1_nan, true, No_SubNormals); - op2.extract(op2_mu, op2_e, op2_sign, op2_normal, op2_zero, op2_inf, op2_nan, true, No_SubNormals); - bool r_sign = op1_sign ^ op2_sign; - int ls_op1 = No_SubNormals ? 0 : (unsigned) op1_mu.leading_sign(); - op1_mu <<= ls_op1; - int ls_op2 = No_SubNormals ? 0 : (unsigned) op2_mu.leading_sign(); - op2_mu <<= ls_op2; - int exp = ac_int(op1_e) - ac_int(op2_e) + !op1_normal - !op2_normal - ls_op1 + ls_op2 + exp_bias; - ac_int q0 = 0; - bool exact = true; - bool div_by_zero = op2_zero; -#ifdef __SYNTHESIS__ - div_by_zero = false; -#endif - if(!div_by_zero) { - AC_STD_FLOAT_FX_DIV_OVERRIDE(op1_mu, op2_mu, q0, exact); - } - ac_int q = q0; - q <<= 1; - int shift_r = min_exp + exp_bias - exp; - bool sticky_bit = !exact; - if(shift_r >= 0) { - typedef ac_int t_t; - t_t shifted_out_bits = q; - shifted_out_bits &= ~((~t_t(0)) << shift_r); - sticky_bit |= !!shifted_out_bits; - q >>= shift_r; - exp += shift_r; - } else { - bool shift_l = !q[mu_bits+2]; - q <<= shift_l; - exp -= shift_l; - } - q[0] = q[0] | sticky_bit; - ac_fixed r_rnd = q; - bool rnd_ovf = r_rnd[mu_bits]; - ac_int m_r = r_rnd.template slc(0); - bool r_normal = r_rnd[mant_bits] | rnd_ovf; - bool r_nan = op1_nan | op2_nan | (op1_zero & op2_zero) | (op1_inf & op2_inf); - bool r_zero = op1_zero | op2_inf; - r_zero |= !r_normal & No_SubNormals; - exp += rnd_ovf; - bool r_inf0 = op1_inf | op2_zero; // this is not affected by rounding - bool r_inf = (!r_zero & (exp > max_exp + exp_bias)) | r_inf0; - if(QR==AC_TRN_ZERO && !r_inf0) { - exp = r_inf ? max_exp + exp_bias : exp; - m_r |= ac_int<1,true>(-r_inf); // saturate (set all bits to 1) if r_inf - r_inf = false; - } - bool exception = r_nan | r_inf; - ac_int e_r = exception ? -1 : (r_zero | !r_normal) ? 0 : exp; - if(exception | r_zero) { - m_r = 0; - m_r[mant_bits-1] = r_nan; - } - ac_int d_r = m_r; - d_r.set_slc(mant_bits, e_r); - d_r[W-1] = r_sign; - ac_std_float r; - r.set_data(d_r); - return r; - } - template - ac_std_float div(const ac_std_float &op2) const { -#ifndef AC_STD_FLOAT_DIV_OVERRIDE - return div_generic(op2); -#else - return AC_STD_FLOAT_OVERRIDE_NS AC_STD_FLOAT_DIV_OVERRIDE(*this, op2); -#endif - } - template - ac_std_float fma_generic(const ac_std_float &op2, const ac_std_float &op3) const { - ac_private::check_supported(); - e_t op1_e, op2_e, op3_e; - bool op1_normal, op1_sign, op1_zero, op2_normal, op2_sign, op2_zero, op3_normal, op3_sign, op3_zero; - bool op1_inf, op1_nan, op2_inf, op2_nan, op3_inf, op3_nan; - mu_t op1_mu, op2_mu, op3_mu; - extract(op1_mu, op1_e, op1_sign, op1_normal, op1_zero, op1_inf, op1_nan, true, No_SubNormals); - op2.extract(op2_mu, op2_e, op2_sign, op2_normal, op2_zero, op2_inf, op2_nan, true, No_SubNormals); - op3.extract(op3_mu, op3_e, op3_sign, op3_normal, op3_zero, op3_inf, op3_nan, true, No_SubNormals); - if(No_SubNormals) - op3_mu &= mu_t(op3_zero ? 0 : -1); - bool mult_sign = (op1_sign ^ op2_sign) | (op1_zero & op2_inf) | (op1_inf & op1_zero); - bool mult_nan = op1_nan | op2_nan | (op1_zero & op2_inf) | (op1_inf & op2_zero); - bool mult_zero = op1_zero | op2_zero; // mult_nan has precedence later on - int mult_exp_b = ac_int(op1_e) + ac_int(op2_e) + !op1_normal + !op2_normal - exp_bias; - mult_exp_b |= ac_int( op1_inf | op2_inf ? -1 : 0 ); - ac_int<2*mu_bits,false> p = op1_mu * op2_mu; - if(No_SubNormals) - p &= ac_int<2*mu_bits,false>(mult_zero ? 0 : -1); - bool mult_inf = op1_inf | op2_inf; - - bool diff_signs = mult_sign ^ op3_sign; - bool toggle_r_sign = mult_sign; - m_t op3_m = diff_signs ? m_t(-op3_mu) : m_t(op3_mu); - unsigned op3_e_b = ac_int(op3_e) + !op3_normal; - - int e_dif = mult_exp_b - op3_e_b; - bool emult_lt_e3 = e_dif < 0; - e_dif = (mult_zero | op3_zero) ? 0 : emult_lt_e3 ? -e_dif : e_dif; - - typedef ac_int<2*mu_bits+4,true> add_t; - add_t op3_m_s = op3_m; - op3_m_s <<= mu_bits+1; // mult: ii.ffff, op3: i.ff - add_t p_s = p; - p_s <<= 2; - add_t op_lshift = emult_lt_e3 ? p_s : op3_m_s; - add_t op_no_shift = emult_lt_e3 ? op3_m_s : p_s; - - add_t shifted_out_bits = op_lshift; - shifted_out_bits &= ~((~add_t(0)) << (unsigned) e_dif); - bool sticky_bit = !!shifted_out_bits; - - op_lshift >>= (unsigned) e_dif; - add_t add_r = op_lshift + op_no_shift; - int exp = ( (emult_lt_e3 & !op3_zero) | mult_zero ? op3_e_b : mult_exp_b); - - bool all_sign; - int ls = add_r.leading_sign(all_sign); - // no bit growth of add_r - int max_shift_left = exp + (- min_exp - exp_bias + 2); - bool shift_exponent_limited = ls >= max_shift_left; - int shift_l = shift_exponent_limited ? max_shift_left : ls; - add_r <<= shift_l; - add_r[0] = add_r[0] | sticky_bit; - - ac_fixed r_rnd = add_r; - - typedef ac_int t_h; - t_h t = add_r.template slc(mu_bits+2); - bool rnd_ovf = QR == AC_RND_CONV && !add_r[2*mu_bits+3] && t == t_h(-1); - bool r_neg = r_rnd[mu_bits] ^ rnd_ovf; - bool r_sign = op3_inf ? op3_sign : mult_inf ? mult_sign : r_neg ^ toggle_r_sign; - ac_int r_rnd_i = r_rnd.template slc(0); - bool r_zero = !rnd_ovf & !r_rnd_i; - bool shift_r = rnd_ovf | (r_neg & !r_rnd_i.template slc(0)); - typedef ac_int r_un_t; - r_un_t r_un = r_neg ? (r_un_t) -r_rnd_i : (r_un_t) r_rnd_i; - // get rid of implied bit, assign to ac_int - bool r_normal = r_un[mant_bits] | shift_r; - r_zero |= No_SubNormals & !r_normal; - ac_int m_r = r_un.template slc(0); - exp = (shift_exponent_limited ? min_exp + exp_bias : exp - ls + 2) + shift_r; - bool r_inf = mult_inf | op3_inf | (exp > max_exp + exp_bias); - if(QR==AC_TRN_ZERO) { - exp = r_inf ? max_exp + exp_bias : exp; - m_r |= ac_int<1,true>(-r_inf); // saturate (set all bits to 1) if r_inf - r_inf = false; - } - bool r_nan = op3_nan | mult_nan | ((op3_inf & (op1_inf | op2_inf)) & (op3_sign ^ mult_sign)); - bool exception = op3_inf | mult_inf | op3_nan | mult_nan | r_inf; - ac_int e_r = exception ? -1 : (r_zero | !r_normal) ? 0 : exp; - if(exception | r_zero) { - m_r = 0; - m_r[mant_bits-1] = r_nan; - } - ac_int d_r = m_r; - d_r.set_slc(mant_bits, e_r); - d_r[W-1] = r_sign; - ac_std_float r; - r.set_data(d_r); - return r; - } - template - ac_std_float fma(const ac_std_float &op2, const ac_std_float &op3) const { -#ifndef AC_STD_FLOAT_FMA_OVERRIDE - return fma_generic(op2,op3); -#else - return AC_STD_FLOAT_OVERRIDE_NS AC_STD_FLOAT_FMA_OVERRIDE(*this,op2,op3); -#endif - } - template - ac_std_float sqrt_generic() const { - ac_private::check_supported(); - const bool rnd = QR != AC_TRN_ZERO; // need msb(rounded bits) - const bool rbits = QR != AC_TRN_ZERO; // need bits after msb(rounded bits) - e_t op1_e; - bool op1_normal, op1_sign, op1_zero; - bool op1_inf, op1_nan; - mu_t op1_mu; - extract(op1_mu, op1_e, op1_sign, op1_normal, op1_zero, op1_inf, op1_nan, true, No_SubNormals); - int ls_op1 = No_SubNormals ? 0 : (unsigned) op1_mu.leading_sign(); - op1_mu <<= ls_op1; - op1_mu[mu_bits-1] = true; // Since it is normalized, zero is captured by op1_zero - - bool exp_odd = (op1_e ^ !op1_normal ^ ls_op1 ^ exp_bias) & 1; - - int exp = ac_int(op1_e) + !op1_normal - ls_op1 - exp_bias; - exp >>= 1; // divide by 2, truncate towards -inf - - ac_int op1_mi = op1_mu; - op1_mi <<= exp_odd; - ac_int sq_rt; - bool sticky_bit = ac::fx_sqrt(op1_mi, sq_rt); - bool r_normal = true; // true for most practical cases on W,E - if(mant_bits > -min_exp) { - int exp_over = min_exp - exp; - if(exp_over > 0) { - if(rbits) { - typedef ac_int t_t; - t_t shifted_out_bits = sq_rt; - shifted_out_bits &= ~((~t_t(0)) << exp_over); - sticky_bit |= !!shifted_out_bits; - } - sq_rt >>= exp_over; - exp = min_exp; - r_normal = false; - } - } - // rounding should not trigger overflow (unless truncate towards +inf which is currently not supported) - ac_fixed sq_rt_rnd = 0; - if(rbits) - sq_rt_rnd[0] = sq_rt_rnd[0] | sticky_bit; - sq_rt_rnd.set_slc(rbits, sq_rt); - ac_fixed sq_rt_fx = sq_rt_rnd; - - ac_int m_r = sq_rt_fx.template slc(0); - bool r_nan = op1_nan | (op1_sign & !op1_zero); - bool r_zero = op1_zero; - r_zero |= !r_normal & No_SubNormals; - bool r_inf = op1_inf; - bool exception = r_nan | r_inf; - exp += exp_bias; - ac_int e_r = exception ? -1 : (r_zero | !r_normal) ? 0 : exp; - if(exception | r_zero) { - m_r = 0; - m_r[mant_bits-1] = r_nan; - } - ac_int d_r = m_r; - d_r.set_slc(mant_bits, e_r); - ac_std_float r; - r.set_data(d_r); - return r; - } - template - ac_std_float sqrt() const { -#ifndef AC_STD_FLOAT_SQRT_OVERRIDE - return sqrt_generic(); -#else - return AC_STD_FLOAT_OVERRIDE_NS AC_STD_FLOAT_SQRT_OVERRIDE(*this); -#endif - } - ac_std_float operator +(const ac_std_float &op2) const { - return add(op2); - } - ac_std_float operator -(const ac_std_float &op2) const { - return sub(op2); - } - ac_std_float operator *(const ac_std_float &op2) const { - return mult(op2); - } - ac_std_float operator /(const ac_std_float &op2) const { - return div(op2); - } - ac_std_float &operator +=(const ac_std_float &op2) { - *this = operator +(op2); - return *this; - } - ac_std_float &operator -=(const ac_std_float &op2) { - *this = operator -(op2); - return *this; - } - ac_std_float &operator *=(const ac_std_float &op2) { - *this = operator *(op2); - } - ac_std_float &operator /=(const ac_std_float &op2) { - *this = operator /(op2); - return *this; - } - bool operator ==(const ac_std_float &op2) const { - return ((d == op2.d) && !isnan()) || (operator !() && op2.operator !()); - } - bool operator !=(const ac_std_float &op2) const { - return !operator ==(op2); - } - bool magnitude_lt(const ac_std_float &op2) const { - return ac_int(d) < ac_int(op2.d); - } - bool neg() const { return d[W-1]; } - bool operator <(const ac_std_float &op2) const { - return - operator !=(op2) && ( (neg() && !op2.neg()) || (!(neg() ^ op2.neg()) && neg() ^ magnitude_lt(op2)) ) - && !isnan() && !op2.isnan(); - } - bool operator >=(const ac_std_float &op2) const { - return - (operator ==(op2) || (!neg() && op2.neg()) || (!(neg() ^ op2.neg()) && !neg() ^ magnitude_lt(op2)) ) - && !isnan() && !op2.isnan(); - } - bool operator >(const ac_std_float &op2) const { - return - operator !=(op2) - && ( (!neg() && op2.neg()) || (!(neg() ^ op2.neg()) && !neg() ^ magnitude_lt(op2)) ) - && !isnan() && !op2.isnan(); - } - bool operator <=(const ac_std_float &op2) const { - return - (operator == (op2) || (neg() && !op2.neg()) || (!neg() ^ op2.neg() && neg() ^ magnitude_lt(op2)) ) - && !isnan() && !op2.isnan(); - } - bool operator !() const { return !ac_int(d); } - ac_std_float operator -() const { - ac_std_float r(*this); - r.d[W-1] = !d[W-1]; - return r; - } - ac_std_float operator +() const { - return ac_std_float(*this); - } - ac_std_float abs() const { - ac_std_float r(*this); - r.d[W-1] = false; - return r; - } - ac_std_float copysign(const ac_std_float &op2) const { - ac_std_float r(*this); - r.d[W-1] = op2.d[W-1]; - return r; - } - bool signbit() const { - return d[W-1]; - } - void set_signbit(bool s) { - d[W-1] = s; - } - ac_std_float ceil() const { - ac_int e = d.template slc(mant_bits); - bool sign = d[W-1]; - if(!d.template slc(0)) - return *this; - if(e < exp_bias) { - return sign ? zero() : one(); - } else { - ac_std_float r(*this); - int e_dif = mant_bits + exp_bias - e; - if((e_dif < 0) | (e == ac_int(-1))) - return r; - else { - typedef ac_int mant_t; - mant_t m = d; - mant_t mask = (~mant_t(0)) << e_dif; - bool non_zero_fractional = !!(m & ~mask); - if(!sign) { - m |= ~mask; - mu_t mu = m + mant_t(non_zero_fractional); - e += mu[mant_bits]; - r.d.set_slc(mant_bits, e); - m = mu; - } - m &= mask; // truncate fractional bits - r.d.set_slc(0, m); - return r; - } - } - } - ac_std_float floor() const { - ac_int e = d.template slc(mant_bits); - bool sign = d[W-1]; - if(!d.template slc(0)) - return *this; - if(e < exp_bias) { - return sign ? -one() : zero(); - } else { - ac_std_float r(*this); - int e_dif = mant_bits + exp_bias - e; - if((e_dif < 0) | (e == ac_int(-1))) - return r; - else { - typedef ac_int mant_t; - mant_t m = d; - mant_t mask = (~mant_t(0)) << e_dif; - bool non_zero_fractional = !!(m & ~mask); - if(sign) { - m |= ~mask; - mu_t mu = m + mant_t(non_zero_fractional); - e += mu[mant_bits]; - r.d.set_slc(mant_bits, e); - m = mu; - } - m &= mask; // truncate fractional bits - r.d.set_slc(0, m); - return r; - } - } - } - ac_std_float trunc() const { - ac_int e = d.template slc(mant_bits); - if(e < exp_bias) { - return zero(); - } else { - ac_std_float r(*this); - int e_dif = mant_bits + exp_bias - e; - if((e_dif < 0) | (e == ac_int(-1))) - return r; - else { - typedef ac_int mant_t; - mant_t m = d; - mant_t mask = (~mant_t(0)) << e_dif; - m &= mask; // truncate fractional bits - r.d.set_slc(0, m); - return r; - } - } - } - ac_std_float round() const { - ac_int e = d.template slc(mant_bits); - if(e < exp_bias-1) { - return zero(); - } else { - ac_std_float r(*this); - int e_dif = mant_bits + exp_bias -1 - e; - if((e_dif < 0) | (e == ac_int(-1))) - return r; - else { - typedef ac_int mant_t; - mant_t m = d; - mant_t mask = (~mant_t(0)) << e_dif; - m |= ~mask; - mu_t mu = m + mant_t(1); - e += mu[mant_bits]; - r.d.set_slc(mant_bits, e); - m = mu; - m &= mask << 1; // truncate fractional bits - r.d.set_slc(0, m); - return r; - } - } - } -}; - -template -inline std::ostream& operator << (std::ostream &os, const ac_std_float &x) { - // for now just print the raw ac_int for it - os << x.data().to_string(AC_HEX); - return os; -} - -namespace ac { - // Type punning: using memcpy to avoid strict aliasing - inline void copy_bits(float f, int *x) { - std::memcpy(x, &f, sizeof(int)); - } - inline void copy_bits(double f, long long *x) { - std::memcpy(x, &f, sizeof(long long)); - } - inline void copy_bits(int x, float *f) { - std::memcpy(f, &x, sizeof(float)); - } - inline void copy_bits(long long x, double *f) { - std::memcpy(f, &x, sizeof(double)); - } - - inline void copy_bits(const ac_std_float<32,8> &x, float *f) { - copy_bits(x.data().to_int(), f); - } - inline void copy_bits(const ac_std_float<64,11> &x, double *f) { - copy_bits(x.data().to_int64(), f); - } -} - -template -class ac_ieee_float_base { -public: - static const int width = 1 << ((int)Format + 4); - // exponents are {5,8,11,15,19}, but the first three are specialized elsewhere - static const int e_width = 11 + ((int)Format - binary64)*4; // 11, 15, 19 - static const int lls = width >> 6; - typedef long long (data_t)[lls]; - typedef ac_std_float ac_std_float_t; - typedef ac_std_float helper_t; - typedef ac_float ac_float_t; - data_t d; - ac_ieee_float_base() {} - ac_ieee_float_base(const ac_ieee_float_base &f) { - ac::copy_bits(f.d, &d); - } - explicit ac_ieee_float_base(const helper_t &op) { - ac::copy_bits(op.data(), &d); - } - explicit ac_ieee_float_base(double f); -protected: - helper_t to_helper_t() const { - ac_int dat; - ac::copy_bits(d, &dat); - helper_t x; - x.set_data(dat); - return x; - } -public: - void set_data(const data_t &op) { ac::copy_bits(op, &d); } - void set_data(const ac_int &op) { ac::copy_bits(op, &d); } - const data_t &data() const { return d; } - ac_int data_ac_int() const { - ac_int x; - ac::copy_bits(d, &x); - return x; - } - bool signbit() const { return d[lls-1] < 0; } - void set_signbit(bool s) { - ac_int<64,true> t(d[lls-1]); - t[63] = s; - d[lls-1] = t.to_int64(); - } -}; - -template -inline std::ostream& operator << (std::ostream &os, const ac_ieee_float_base &x) { - // for now print the 128 and 256 as raw ac_int - os << x.data_ac_int().to_string(AC_HEX); - return os; -} - -template<> class ac_ieee_float_base { -public: - static const int width = 16; - static const int e_width = 5; - typedef ac_std_float ac_std_float_t; - typedef short data_t; - typedef ac_std_float helper_t; - typedef ac_float ac_float_t; - data_t d; - ac_ieee_float_base() {} - ac_ieee_float_base(const ac_ieee_float_base &f) : d(f.d) {} - explicit ac_ieee_float_base(const helper_t &op) : d(op.data()) {} - explicit ac_ieee_float_base(float f) : d((short)ac_std_float(f).data().to_int()) {} -protected: - helper_t to_helper_t() const { - helper_t x; - x.set_data(d); - return x; - } -public: - float to_float() const { - ac_std_float_t t; - t.set_data(this->data_ac_int()); - return t.to_float(); - } -#if __cplusplus > 199711L - explicit operator float() const { return to_float(); } -#endif - void set_data(short op) { ac::copy_bits(op, &d); } - void set_data(const ac_int &op) { ac::copy_bits(op, &d); } - const data_t &data() const { return d; } - ac_int data_ac_int() const { - ac_int x; - ac::copy_bits(d, &x); - return x; - } - bool signbit() const { return d < 0; } - void set_signbit(bool s) { - ac_int t(d); - t[width-1] = s; - d = t; - } -}; - -inline std::ostream& operator << (std::ostream &os, const ac_ieee_float_base &x) { - os << x.to_float(); - return os; -} - -struct float_helper { - float d; - float_helper() {} - float_helper(float f) { d = f; } - float_helper(const float_helper &f) { d = f.d; } - float_helper(const float_helper &f, bool no_subnormals) { - d = no_subnormals && f.fpclassify() == FP_SUBNORMAL ? std::signbit(f.d) ? -0.0 : 0.0 : f.d; - } - float_helper(const ac_std_float<32,8> &f) { set_data(f.data().to_int()); } - template - float_helper(const ac_float<25,2,8,Q> &f) : d(f.to_float()) {} - const float &data() const { return d; } - void set_data(int data) { ac::copy_bits(data, &d); } - void set_data(float data) { d = data; } - operator float() const { return d; } - float to_float() const { return d; } - int fpclassify() const { return std::fpclassify(d); } - bool isfinite() const { return std::isfinite(d); } - bool isnormal() const { return std::isnormal(d); } - bool isinf() const { return std::isinf(d); } - bool isnan() const { return std::isnan(d); } - static float nan() { return ac_std_float<32,8>::nan().to_float(); } - static float inf() { return ac_std_float<32,8>::inf().to_float(); } - static float denorm_min() { return ac_std_float<32,8>::denorm_min().to_float(); } - static float min() { return ac_std_float<32,8>::min().to_float(); } - static float max() { return ac_std_float<32,8>::max().to_float(); } - static float epsilon() { return ac_std_float<32,8>::epsilon().to_float(); } - template - float_helper add(const float_helper &op2) const { - ac_private::check_supported2(); - return float_helper( float_helper(*this, No_SubNormals) + float_helper(op2, No_SubNormals), No_SubNormals); - } - template - float_helper sub(const float_helper &op2) const { - ac_private::check_supported2(); - return float_helper( float_helper(*this, No_SubNormals) - float_helper(op2, No_SubNormals), No_SubNormals); - } - template - float_helper mult(const float_helper &op2) const { - ac_private::check_supported2(); - return float_helper( float_helper(*this, No_SubNormals) * float_helper(op2, No_SubNormals), No_SubNormals); - } - template - float_helper div(const float_helper &op2) const { - ac_private::check_supported2(); - return float_helper( float_helper(*this, No_SubNormals) / float_helper(op2, No_SubNormals), No_SubNormals); - } - template - float_helper fma(const float_helper &op2, const float_helper &op3) const { - ac_private::check_supported2(); - return float_helper( ::fmaf(float_helper(*this, No_SubNormals), float_helper(op2, No_SubNormals), float_helper(op3, No_SubNormals)), No_SubNormals); - } - template - float_helper sqrt() const { - ac_private::check_supported2(); - return float_helper( ::sqrtf(float_helper(*this, No_SubNormals)), No_SubNormals); - } - float_helper ceil() const { return float_helper(std::ceil(d)); } - float_helper floor() const { return float_helper(std::floor(d)); } - float_helper trunc() const { return float_helper(::truncf(d)); } - float_helper round() const { return float_helper(::roundf(d)); } -}; - -template<> class ac_ieee_float_base { -public: - static const int width = 32; - static const int e_width = 8; - typedef ac_std_float ac_std_float_t; -#ifdef AC_IEEE_FLOAT_USE_BUILTIN - typedef float data_t; - typedef float_helper helper_t; -#else - typedef int data_t; - typedef ac_std_float helper_t; -#endif - typedef ac_float ac_float_t; - data_t d; - ac_ieee_float_base() {} - ac_ieee_float_base(const ac_ieee_float_base &f) : d(f.d) {} - explicit ac_ieee_float_base(const helper_t &op) : d(op.data()) {} - explicit ac_ieee_float_base(float f) { ac::copy_bits(f, &d); } -protected: - helper_t to_helper_t() const { - helper_t x; - x.set_data(d); - return x; - } -public: -#if __cplusplus > 199711L - explicit operator float() const { - float f; - ac::copy_bits(d, &f); - return f; - } -#endif - float to_float() const { - float f; - ac::copy_bits(d, &f); - return f; - } - void set_data(int op) { ac::copy_bits(op, &d); } - void set_data(float op) { ac::copy_bits(op, &d); } - void set_data(const ac_int &op) { ac::copy_bits(op, &d); } - const data_t &data() const { return d; } - ac_int data_ac_int() const { - ac_int x; - ac::copy_bits(d, &x); - return x; - } - bool signbit() const { - int x; ac::copy_bits(d, &x); - return x < 0; - } - void set_signbit(bool s) { - ac_int t; - ac::copy_bits(d, &t); - t[width-1] = s; - ac::copy_bits(t, &d); - } -}; - -inline std::ostream& operator << (std::ostream &os, const ac_ieee_float_base &x) { - os << x.to_float(); - return os; -} - -struct double_helper { - double d; - double_helper() {} - double_helper(double f) { d = f; } - double_helper(const float_helper &f) { d = f.d; } - double_helper(const double_helper &f, bool no_subnormals) { - d = no_subnormals && f.fpclassify() == FP_SUBNORMAL ? std::signbit(f.d) ? -0.0 : 0.0 : f.d; - } - double_helper(const ac_std_float<64,11> &f) { set_data(f.data().to_int64()); } - template - double_helper(const ac_float<54,2,11,Q> &f) : d(f.to_double()) {} - const double &data() const { return d; } - void set_data(long long data) { - ac::copy_bits(data, &d); - } - void set_data(double data) { d = data; } - operator double() const { return d; } - double to_double() const { return d; } - int fpclassify() const { return std::fpclassify(d); } - bool isfinite() const { return std::isfinite(d); } - bool isnormal() const { return std::isnormal(d); } - bool isinf() const { return std::isinf(d); } - bool isnan() const { return std::isnan(d); } - static double nan() { return ac_std_float<64,11>::nan().to_double(); } - static double inf() { return ac_std_float<64,11>::inf().to_double(); } - static double denorm_min() { return ac_std_float<64,11>::denorm_min().to_double(); } - static double min() { return ac_std_float<64,11>::min().to_double(); } - static double max() { return ac_std_float<64,11>::max().to_double(); } - static double epsilon() { return ac_std_float<64,11>::epsilon().to_double(); } - template - double_helper add(const double_helper &op2) const { - ac_private::check_supported2(); - return double_helper( double_helper(*this, No_SubNormals) + double_helper(op2, No_SubNormals), No_SubNormals); - } - template - double_helper sub(const double_helper &op2) const { - ac_private::check_supported2(); - return double_helper( double_helper(*this, No_SubNormals) - double_helper(op2, No_SubNormals), No_SubNormals); - } - template - double_helper mult(const double_helper &op2) const { - ac_private::check_supported2(); - return double_helper( double_helper(*this, No_SubNormals) * double_helper(op2, No_SubNormals), No_SubNormals); - } - template - double_helper div(const double_helper &op2) const { - ac_private::check_supported2(); - return double_helper( double_helper(*this, No_SubNormals) / double_helper(op2, No_SubNormals), No_SubNormals); - } - template - double_helper fma(const double_helper &op2, const double_helper &op3) const { - ac_private::check_supported2(); - return double_helper( ::fma((double) double_helper(*this, No_SubNormals), (double) double_helper(op2, No_SubNormals), (double) double_helper(op3, No_SubNormals)), No_SubNormals); - } - template - double_helper sqrt() const { - ac_private::check_supported2(); - return double_helper( ::sqrt((double) double_helper(*this, No_SubNormals)), No_SubNormals); - } - double_helper ceil() const { return double_helper(std::ceil(d)); } - double_helper floor() const { return double_helper(std::floor(d)); } - double_helper trunc() const { return double_helper(::trunc(d)); } - double_helper round() const { return double_helper(::round(d)); } -}; - -template<> class ac_ieee_float_base { -public: - static const int width = 64; - static const int e_width = 11; - typedef ac_std_float ac_std_float_t; -#ifdef AC_IEEE_FLOAT_USE_BUILTIN - typedef double data_t; - typedef double_helper helper_t; -#else - typedef long long data_t; - typedef ac_std_float helper_t; -#endif - typedef ac_float ac_float_t; - data_t d; - ac_ieee_float_base() {} - ac_ieee_float_base(const ac_ieee_float_base &f) : d(f.d) {} - explicit ac_ieee_float_base(const helper_t &op) : d(op.data()) {} - explicit ac_ieee_float_base(double f) { ac::copy_bits(f, &d); } -protected: - helper_t to_helper_t() const { - helper_t x; - x.set_data(d); - return x; - } -public: -#if __cplusplus > 199711L - explicit operator double() const { - double f; - ac::copy_bits(d, &f); - return f; - } -#endif - double to_double() const { - double f; - ac::copy_bits(d, &f); - return f; - } - void set_data(long long op) { ac::copy_bits(op, &d); } - void set_data(double op) { ac::copy_bits(op, &d); } - void set_data(const ac_int &op) { ac::copy_bits(op, &d); } - const data_t &data() const { return d; } - ac_int data_ac_int() const { - ac_int x; - ac::copy_bits(d, &x); - return x; - } - bool signbit() const { - long long x; ac::copy_bits(d, &x); - return x < 0; - } - void set_signbit(bool s) { - ac_int t; - ac::copy_bits(d, &t); - t[width-1] = s; - ac::copy_bits(t, &d); - } -}; - -inline std::ostream& operator << (std::ostream &os, const ac_ieee_float_base &x) { - os << x.to_double(); - return os; -} - -namespace ac_private { - template - struct ac_ieee_float_constructor {}; - template<> struct ac_ieee_float_constructor { - typedef int type_explicit; - }; - template<> struct ac_ieee_float_constructor { - typedef int type_explicit; - }; - template<> struct ac_ieee_float_constructor { - typedef int type; - }; - template<> struct ac_ieee_float_constructor { - typedef int type_explicit; - }; - template<> struct ac_ieee_float_constructor { - typedef int type_explicit; - }; - template<> struct ac_ieee_float_constructor { - typedef int type; - }; - template<> struct ac_ieee_float_constructor { - typedef int type_explicit; - }; - template<> struct ac_ieee_float_constructor { - typedef int type_explicit; - }; - template<> struct ac_ieee_float_constructor { - typedef int type_explicit; - }; - template<> struct ac_ieee_float_constructor { - typedef int type_explicit; - }; -} - -template -class ac_ieee_float : public ac_ieee_float_base { -public: - typedef ac_ieee_float_base Base; - template - struct rt_T { - typedef typename ac_private::rt_closed_T::type mult; - typedef typename ac_private::rt_closed_T::type plus; - typedef typename ac_private::rt_closed_T::type minus; - typedef typename ac_private::rt_closed_T::type minus2; - typedef typename ac_private::rt_closed_T::type logic; - typedef typename ac_private::rt_closed_T::type div; - typedef typename ac_private::rt_closed_T::type div2; - }; - struct rt_unary { - typedef ac_ieee_float neg; - typedef ac_ieee_float mag_sqr; - typedef ac_ieee_float mag; - }; - static const int width = Base::width; - static const int e_width = Base::e_width; - static const int lls = width >> 6; - typedef typename Base::data_t data_t; - typedef typename Base::helper_t helper_t; - typedef typename Base::ac_float_t ac_float_t; - typedef ac_std_float ac_std_float_t; -public: - static ac_ieee_float nan() { return ac_ieee_float(helper_t::nan()); } - static ac_ieee_float inf() { return ac_ieee_float(helper_t::inf()); } - static ac_ieee_float denorm_min() { return ac_ieee_float(helper_t::denorm_min()); } - static ac_ieee_float min() { return ac_ieee_float(helper_t::min()); } - static ac_ieee_float max() { return ac_ieee_float(helper_t::max()); } - static ac_ieee_float epsilon() { return ac_ieee_float(helper_t::epsilon()); } - static ac_ieee_float zero() { return ac_ieee_float(ac_std_float_t::zero()); } - static ac_ieee_float one() { return ac_ieee_float(ac_std_float_t::one()); } - ac_ieee_float() {} -private: - ac_ieee_float(const Base &f) : Base(f) {} -public: - ac_ieee_float(const ac_std_float &f) : Base(f) {} - ac_ieee_float(const ac_ieee_float &f) : Base(f) {} - template - explicit ac_ieee_float(const ac_ieee_float &f) : Base(ac_std_float_t(f.to_ac_std_float())) {} - template - explicit ac_ieee_float(const ac_std_float &f) : Base(ac_std_float_t(f)) {} - explicit ac_ieee_float(const ac::bfloat16 &f); - explicit ac_ieee_float(const ac_float_t &f) : Base(ac_std_float_t(f)) {} - template - explicit ac_ieee_float(const ac_fixed &fx) : Base(ac_std_float_t(fx)) {} - template - explicit ac_ieee_float(const ac_float &f) : Base(ac_std_float_t(f)) {} - template - ac_ieee_float to_ac_ieee_float() const { return ac_ieee_float(*this); } - const ac_float_t to_ac_float() const { - return to_ac_std_float().to_ac_float(); - } - const ac_std_float to_ac_std_float() const { - ac_std_float r; - r.set_data(data_ac_int()); - return r; - } - template - ac_fixed convert_to_ac_fixed(bool map_inf=false) const { - return to_ac_std_float().template convert_to_ac_fixed(map_inf); - } - void set_data(const data_t &data) { - Base::set_data(data); - } - const ac_int data_ac_int() const { return Base::data_ac_int(); } - const data_t &data() const { return Base::d; } - template - ac_ieee_float(const T &f, typename ac_private::template ac_ieee_float_constructor::type d = 0) : Base(ac_std_float_t(f)) {} - template - explicit ac_ieee_float(const T &f, typename ac_private::template ac_ieee_float_constructor::type_explicit d = 0) : Base(ac_std_float_t(f)) {} - explicit ac_ieee_float(int x) { - *this = ac_ieee_float(ac_fixed<32,32,true>(x)); - } - explicit ac_ieee_float(long long x) { - *this = ac_ieee_float(ac_fixed<64,64,true>(x)); - } - int fpclassify() const { return Base::to_helper_t().fpclassify(); } - bool isfinite() const { return Base::to_helper_t().isfinite(); } - bool isnormal() const { return Base::to_helper_t().isnormal(); } - bool isinf() const { return Base::to_helper_t().isinf(); } - bool isnan() const { return Base::to_helper_t().isnan(); } - - template - ac_ieee_float add(const ac_ieee_float &op2) const { - return ac_ieee_float(Base(Base::to_helper_t().template add(op2.Base::to_helper_t()))); - } - template - ac_ieee_float sub(const ac_ieee_float &op2) const { - return ac_ieee_float(Base(Base::to_helper_t().template sub(op2.Base::to_helper_t()))); - } - template - ac_ieee_float mult(const ac_ieee_float &op2) const { - return ac_ieee_float(Base(Base::to_helper_t().template mult(op2.Base::to_helper_t()))); - } - template - ac_ieee_float div(const ac_ieee_float &op2) const { - return ac_ieee_float(Base(Base::to_helper_t().template div(op2.Base::to_helper_t()))); - } - template - ac_ieee_float fma(const ac_ieee_float &op2, const ac_ieee_float &op3) const { - return ac_ieee_float(Base(Base::to_helper_t().template fma(op2.Base::to_helper_t(), op3.Base::to_helper_t()))); - } - template - ac_ieee_float sqrt() const { - return ac_ieee_float(Base(Base::to_helper_t().template sqrt())); - } - - ac_ieee_float operator +(const ac_ieee_float &op2) const { - return ac_ieee_float(Base(Base::to_helper_t() + op2.Base::to_helper_t())); - } - ac_ieee_float operator -(const ac_ieee_float &op2) const { - return ac_ieee_float(Base(Base::to_helper_t() - op2.Base::to_helper_t())); - } - ac_ieee_float operator *(const ac_ieee_float &op2) const { - return ac_ieee_float(Base(Base::to_helper_t() * op2.Base::to_helper_t())); - } - ac_ieee_float operator /(const ac_ieee_float &op2) const { - return ac_ieee_float(Base(Base::to_helper_t() / op2.Base::to_helper_t())); - } - - ac_ieee_float &operator +=(const ac_ieee_float &op2) { - return *this = operator +(op2); - } - ac_ieee_float &operator -=(const ac_ieee_float &op2) { - return *this = operator -(op2); - } - ac_ieee_float &operator *=(const ac_ieee_float &op2) { - return *this = operator *(op2); - } - ac_ieee_float &operator /=(const ac_ieee_float &op2) { - return *this = operator /(op2); - } - - bool operator ==(const ac_ieee_float &op2) const { - return Base::to_helper_t() == op2.Base::to_helper_t(); - } - bool operator !=(const ac_ieee_float &op2) const { - return Base::to_helper_t() != op2.Base::to_helper_t(); - } - bool operator <(const ac_ieee_float &op2) const { - return Base::to_helper_t() < op2.Base::to_helper_t(); - } - bool operator >=(const ac_ieee_float &op2) const { - return Base::to_helper_t() >= op2.Base::to_helper_t(); - } - bool operator >(const ac_ieee_float &op2) const { - return Base::to_helper_t() > op2.Base::to_helper_t(); - } - bool operator <=(const ac_ieee_float &op2) const { - return Base::to_helper_t() <= op2.Base::to_helper_t(); - } - - ac_ieee_float operator -() const { - ac_ieee_float r(*this); - r.set_signbit(!this->signbit()); - return r; - } - ac_ieee_float operator +() const { - return ac_ieee_float(*this); - } - ac_ieee_float abs() const { - ac_ieee_float r(*this); - r.set_signbit(false); - return r; - } - ac_ieee_float copysign(const ac_ieee_float &op2) const { - ac_ieee_float r(*this); - r.set_signbit(this->signbit()); - return r; - } - bool signbit() const { return Base::signbit(); } - ac_ieee_float add(const ac_ieee_float &op1, const ac_ieee_float &op2) { - return *this = op1 + op2; - } - ac_ieee_float ceil() const { - return ac_ieee_float(Base(Base::to_helper_t().ceil())); - } - ac_ieee_float floor() const { - return ac_ieee_float(Base(Base::to_helper_t().floor())); - } - ac_ieee_float trunc() const { - return ac_ieee_float(Base(Base::to_helper_t().trunc())); - } - ac_ieee_float round() const { - return ac_ieee_float(Base(Base::to_helper_t().round())); - } - ac_ieee_float sub(const ac_ieee_float &op1, const ac_ieee_float &op2) { - return *this = op1 - op2; - } - ac_ieee_float mult(const ac_ieee_float &op1, const ac_ieee_float &op2) { - return *this = op1 * op2; - } - ac_ieee_float div(const ac_ieee_float &op1, const ac_ieee_float &op2) { - return *this = op1 / op2; - } -}; - -template -inline std::ostream& operator << (std::ostream &os, const ac_ieee_float &x) { - os << (const ac_ieee_float_base&) x; - return os; -} - -namespace ac { -class bfloat16 { -public: - template - struct rt_T { - typedef typename ac_private::rt_closed_T::type mult; - typedef typename ac_private::rt_closed_T::type plus; - typedef typename ac_private::rt_closed_T::type minus; - typedef typename ac_private::rt_closed_T::type minus2; - typedef typename ac_private::rt_closed_T::type logic; - typedef typename ac_private::rt_closed_T::type div; - typedef typename ac_private::rt_closed_T::type div2; - }; - struct rt_unary { - typedef bfloat16 neg; - typedef bfloat16 mag_sqr; - typedef bfloat16 mag; - }; - static const int width = 16; - static const int e_width = 8; - static bfloat16 nan() { return bfloat16(helper_t::nan()); } - static bfloat16 inf() { return bfloat16(helper_t::inf()); } - static bfloat16 denorm_min() { return bfloat16(helper_t::denorm_min()); } - static bfloat16 min() { return bfloat16(helper_t::min()); } - static bfloat16 max() { return bfloat16(helper_t::max()); } - static bfloat16 epsilon() { return bfloat16(helper_t::epsilon()); } - static bfloat16 zero() { return bfloat16(ac_std_float_t::zero()); } - static bfloat16 one() { return bfloat16(ac_std_float_t::one()); } - typedef ac_std_float helper_t; - typedef short data_t; - typedef ac_float ac_float_t; - typedef ac_std_float ac_std_float_t; - data_t d; - bfloat16() {} - bfloat16(const bfloat16 &f) : d(f.d) {} - bfloat16(const ac_std_float_t &op) : d(op.data()) {} - bfloat16(float f) { int x; ac::copy_bits(f, &x); d = (short) (x >> 16); } - template - explicit bfloat16(const ac_std_float &f) { - *this = f.template convert(); - } - template - explicit bfloat16(const ac_std_float &f) { - *this = f.template convert(); - } - template - explicit bfloat16(const ac_ieee_float &f) { - *this = f.to_ac_std_float().template convert(); - } - template - explicit bfloat16(const ac_fixed &fx) { - ac_std_float_t x; - x.assign_from(fx); - *this = x; - } -private: - const helper_t to_helper_t() const { - helper_t x; - x.set_data(d); - return x; - } -public: - const ac_std_float_t to_ac_std_float() const { - ac_std_float_t x; - x.set_data(d); - return x; - } - const ac_float_t to_ac_float() const { - return ac_std_float_t().to_ac_float(); - } - template - ac_fixed convert_to_ac_fixed(bool map_inf=false) const { - return to_ac_std_float().template convert_to_ac_fixed(map_inf); - } - float to_float() const { - return to_ac_std_float().to_float(); - } - double to_double() const { - return to_ac_std_float().to_double(); - } - // operator is efficient since E is identical and mantissa is longer -#if __cplusplus > 199711L - explicit operator float() const { return to_float(); } -#endif - int fpclassify() const { return to_helper_t().fpclassify(); } - bool isfinite() const { return to_helper_t().isfinite(); } - bool isnormal() const { return to_helper_t().isnormal(); } - bool isinf() const { return to_helper_t().isinf(); } - bool isnan() const { return to_helper_t().isnan(); } - void set_data(short op) { ac::copy_bits(op, &d); } - void set_data(const ac_int &op) { ac::copy_bits(op, &d); } - const data_t &data() const { return d; } - ac_int<16,true> data_ac_int() const { return ac_int<16,true>(d); } - - // mirroed most constructors in tensorflow implementation (except template version) - // tensorflow uses static_cast - // this implementation goes through ac_std_float so there is no dependency on rounding mode -// template -// explicit bfloat16(const T& val) { *this = bfloat16(static_cast(val)); } - explicit bfloat16(unsigned short val) { - ac_std_float_t t; - t.assign_from( ac_int<16,false>(val) ); - *this = t; - } - explicit bfloat16(int val) { - ac_std_float_t t; - t.assign_from( ac_int<32,true>(val) ); - *this = t; - } - explicit bfloat16(unsigned int val) { - ac_std_float_t t; - t.assign_from( ac_int<32,false>(val) ); - *this = t; - } - explicit bfloat16(long val) { - const int long_w = ac_private::long_w; - ac_std_float_t t; - t.assign_from( ac_int(val) ); - *this = t; - } - explicit bfloat16(long long val) { - ac_std_float_t t; - t.assign_from( ac_int<64,false>(val) ); - *this = t; - } - explicit bfloat16(double val) { *this = bfloat16(ac_ieee_float(val)); } - - template - bfloat16 add(const bfloat16 &op2) const { - return bfloat16(to_helper_t().add(op2.to_helper_t())); - } - template - bfloat16 sub(const bfloat16 &op2) const { - return bfloat16(to_helper_t().sub(op2.to_helper_t())); - } - template - bfloat16 mult(const bfloat16 &op2) const { - return bfloat16(to_helper_t().mult(op2.to_helper_t())); - } - template - bfloat16 div(const bfloat16 &op2) const { - return bfloat16(to_helper_t().div(op2.to_helper_t())); - } - template - bfloat16 fma(const bfloat16 &op2, const bfloat16 &op3) const { - return bfloat16(to_helper_t().fma(op2.to_helper_t(), op3.to_helper_t())); - } - template - bfloat16 sqrt() const { - return bfloat16(to_helper_t().sqrt()); - } - - bfloat16 operator +(const bfloat16 &op2) const { - return bfloat16(to_helper_t().add(op2.to_helper_t())); - } - bfloat16 operator -(const bfloat16 &op2) const { - return bfloat16(to_helper_t().sub(op2.to_helper_t())); - } - bfloat16 operator *(const bfloat16 &op2) const { - return bfloat16(to_helper_t().mult(op2.to_helper_t())); - } - bfloat16 operator /(const bfloat16 &op2) const { - return bfloat16(to_helper_t().div(op2.to_helper_t())); - } - bfloat16 &operator +=(const bfloat16 &op2) { - return *this = operator +(op2); - } - bfloat16 &operator -=(const bfloat16 &op2) { - return *this = operator -(op2); - } - bfloat16 &operator *=(const bfloat16 &op2) { - return *this = operator *(op2); - } - bfloat16 &operator /=(const bfloat16 &op2) { - return *this = operator /(op2); - } - - bool operator ==(const bfloat16 &op2) const { - return to_helper_t() == op2.to_helper_t(); - } - bool operator !=(const bfloat16 &op2) const { - return to_helper_t() != op2.to_helper_t(); - } - bool operator <(const bfloat16 &op2) const { - return to_helper_t() < op2.to_helper_t(); - } - bool operator >=(const bfloat16 &op2) const { - return to_helper_t() >= op2.to_helper_t(); - } - bool operator >(const bfloat16 &op2) const { - return to_helper_t() > op2.to_helper_t(); - } - bool operator <=(const bfloat16 &op2) const { - return to_helper_t() <= op2.to_helper_t(); - } - - bfloat16 operator -() const { - bfloat16 r(*this); - r.set_signbit(!this->signbit()); - return r; - } - bfloat16 operator +() const { - return bfloat16(*this); - } - bfloat16 abs() const { - bfloat16 r(*this); - r.set_signbit(false); - return r; - } - bfloat16 copysign(const bfloat16 &op2) const { - bfloat16 r(*this); - r.set_signbit(this->signbit()); - return r; - } - bool signbit() const { return d < 0; } - void set_signbit(bool s) { - ac_int t(d); - t[width-1] = s; - d = t; - } - bfloat16 ceil() const { return to_helper_t().ceil(); } - bfloat16 floor() const { return to_helper_t().floor(); } - bfloat16 trunc() const { return to_helper_t().trunc(); } - bfloat16 round() const { return to_helper_t().round(); } -}; - -inline std::ostream& operator << (std::ostream &os, const ac::bfloat16 &x) { - os << x.to_float(); - return os; -} - -} - -template -template -inline ac_std_float::ac_std_float(const ac_ieee_float &f) { - *this = ac_std_float(f.to_ac_std_float()); -} - -template -inline ac_std_float::ac_std_float(const ac::bfloat16 &f) { - *this = ac_std_float(f.to_ac_std_float()); -} - -template -inline ac_ieee_float::ac_ieee_float(const ac::bfloat16 &f) { - *this = ac_ieee_float(f.to_ac_std_float()); -} - -typedef ac_ieee_float ac_ieee_float16; -typedef ac_ieee_float ac_ieee_float32; -typedef ac_ieee_float ac_ieee_float64; -typedef ac_ieee_float ac_ieee_float128; -typedef ac_ieee_float ac_ieee_float256; - - -#ifdef __AC_NAMESPACE -} -#endif - -// Global functions for ac_ieee_float -namespace std { -#ifdef __AC_NAMESPACE -using namespace __AC_NAMESPACE; -#endif -template -inline ac_ieee_float abs(const ac_ieee_float &x) { return x.abs(); } -template -inline ac_ieee_float fabs(const ac_ieee_float &x) { return x.abs(); } - -template -inline ac_ieee_float copysign(const ac_ieee_float &x, const ac_ieee_float &y) { return x.copysign(y); } - -template -inline int fpclassify(const ac_ieee_float &x) { return x.fpclassify(); } -template -inline bool isfinite(const ac_ieee_float &x) { return x.isfinite(); } -template -inline bool isnormal(const ac_ieee_float &x) { return x.isnormal(); } -template -inline bool isinf(const ac_ieee_float &x) { return x.isinf(); } -template -inline bool isnan(const ac_ieee_float &x) { return x.isnan(); } - -// Don't do "long double" versions since they are 80-bits, it is an extended presicion -// TODO: fmod, fmodf, fmodl -// TODO: fmod, remainder, remquo, fma, fmax, fmin, fdim -// remainder(x,y), x - n*y, where n = x/y rounded to the nearest integer (RND_CONV) -// remquo(x,y, int *quo), returns same as remainder, unclear what quo is, also Nan, inf etc -// fmax, fmin: if one number is Nan, the other is returned -// fdim(x,y) returns max(x-y,0), if x or y is NaN, a NaN is returned, if result overflows, HUGE_VAL is returned -// TODO: ceil, floor, trunc, round, lround, nearbyint, rint, lrint, llround, llrint -// if x is +0, -0, NaN or Inf, x is returned -// ceil(x), floor(x), trunc(x) -// round(x) : RND_INF -// nearbyint: depends on rounding mode -// rint, same as nearbyint, but may raise inexaxt exception (FE_INEXACT) -// TODO: frexp, ldexp, modf, nextafter, nexttoward, copysign -// modf(x, *iptr), modff break into integral (*iptr) and fractional (returned) values, -// Don't cause exception: isgreater, isgreaterequal, isless, islessequal, islessgreater, isunordered -// isunordered: x or y is NaN -template -inline bool signbit(const ac_ieee_float &x) { return x.signbit(); } - -// Global functions for bfloat16 -inline bool signbit(const ac::bfloat16 &x) { return x.signbit(); } - -inline int fpclassify(const ac::bfloat16 &x) { return x.fpclassify(); } -inline bool isfinite(const ac::bfloat16 &x) { return x.isfinite(); } -inline bool isnormal(const ac::bfloat16 &x) { return x.isnormal(); } -inline bool isinf(const ac::bfloat16 &x) { return x.isinf(); } -inline bool isnan(const ac::bfloat16 &x) { return x.isnan(); } -} - -#undef __AC_DATA_PRIVATE -#undef AC_STD_FLOAT_FX_DIV_OVERRIDE - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/ac_types/stream.h b/hls4ml/hls4ml/templates/quartus/ac_types/stream.h deleted file mode 100644 index b19ad74..0000000 --- a/hls4ml/hls4ml/templates/quartus/ac_types/stream.h +++ /dev/null @@ -1,36 +0,0 @@ -#ifndef NNET_STREAM_H -#define NNET_STREAM_H - -#include - -namespace nnet { - -/* -* A struct with the same high-level functionality as Intel's HLS ihc::stream -* This struct is used during GCC compilation / hls4ml model.predict(...) -* This is because GCC does not have access to HLS source files (ihc::stream) -* Software-wise, this struct behaves like a first-in, first-out (FIFO) buffer -* However, it cannot be used for HLS synthesis, since it uses dynamic memory allocation (deque) -*/ -template -struct stream { - private: - std::deque _data; - - public: - stream() {} - - T read() { - T element = _data.front(); - _data.pop_front(); - return element; - } - - void write(const T& element) { - _data.push_back(element); - } -}; - -} - -#endif \ No newline at end of file diff --git a/hls4ml/hls4ml/templates/quartus/build_lib.sh b/hls4ml/hls4ml/templates/quartus/build_lib.sh deleted file mode 100755 index 02e92a1..0000000 --- a/hls4ml/hls4ml/templates/quartus/build_lib.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -CC=g++ -if [[ "$OSTYPE" == "linux-gnu" ]]; then - CFLAGS="-O3 -fPIC -std=c++11 -fno-gnu-unique" -elif [[ "$OSTYPE" == "darwin"* ]]; then - CFLAGS="-O3 -fPIC -std=c++11" -fi -LDFLAGS= -INCFLAGS="-Ifirmware/ac_types/ -Ifirmware/ap_types/" -PROJECT=myproject -LIB_STAMP=mystamp - -${CC} ${CFLAGS} ${INCFLAGS} -c firmware/${PROJECT}.cpp -o ${PROJECT}.o -${CC} ${CFLAGS} ${INCFLAGS} -c ${PROJECT}_bridge.cpp -o ${PROJECT}_bridge.o -${CC} ${CFLAGS} ${INCFLAGS} -shared ${PROJECT}.o ${PROJECT}_bridge.o -o firmware/${PROJECT}-${LIB_STAMP}.so -rm -f *.o diff --git a/hls4ml/hls4ml/templates/quartus/firmware/defines.h b/hls4ml/hls4ml/templates/quartus/firmware/defines.h deleted file mode 100644 index c3fe4ec..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/defines.h +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef DEFINES_H_ -#define DEFINES_H_ - -/* - * Intel HLS makes use of three streaming interfaces: - * (1) stream_in - used as the main input to a component - * (2) stream_out - used as the main output of a component - * (3) stream - allows both reading and writing; used for inter-component connections - * ihc::stream has a implicitly deleted constructor and therefore, cannot be used as the output of a function/component - * Therefore, variables of type 'stream' are always passed by reference - */ - -#ifndef __INTELFPGA_COMPILER__ - -#include "ac_fixed.h" -#include "ac_int.h" -#define hls_register - -#include "stream.h" -template using stream = nnet::stream; -template using stream_in = nnet::stream; -template using stream_out = nnet::stream; - -#else - -#include "HLS/ac_fixed.h" -#include "HLS/ac_int.h" -#include "HLS/hls.h" - -template using stream = ihc::stream; -template using stream_in = ihc::stream_in; -template using stream_out = ihc::stream_out; - -#endif - -// Include nnet::array - a custom array-like struct, mainly used with io_stream -#include "nnet_utils/nnet_types.h" - -// hls-fpga-machine-learning insert numbers - -// hls-fpga-machine-learning insert layer-precision - -#define DIV_ROUNDUP(n, d) ((n + d - 1) / d) -#define MIN(n, d) (n > d ? d : n) -#define MAX(n, d) (n < d ? d : n) - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/myproject.cpp b/hls4ml/hls4ml/templates/quartus/firmware/myproject.cpp deleted file mode 100644 index acdde09..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/myproject.cpp +++ /dev/null @@ -1,48 +0,0 @@ -#include "myproject.h" -#include "parameters.h" - -// hls-fpga-machine-learning insert weights - -/* - * Intel HLS requires that all 'stream' types are: - * (1) Passed by reference to the top-level entity or - * (2) Declared as global variables, outside of the main function - * Therefore, layer inputs/output (connections betweenn individual layers) are declared here - */ -// hls-fpga-machine-learning insert inter-task streams - -#ifndef __INTELFPGA_COMPILER__ -/* -* The top-level function used during GCC compilation / hls4ml.predic(...) goes here -* An important distinction is made between io_stream and io_parallel: -* (1) io_parallel: - - Top-level function takes a struct containing an array as function argument - - Returns a struct containing an array - the prediction - (2) io_stream: - - Top-level function is 'void' - no return value - - Instead, both the input and output are passed by reference - - This is due the HLS Streaming Interfaces; stream cannot be copied (implicitly deleted copy constructor) -* This distinction is handled in quartus_writer.py -*/ -// hls-fpga-machine-learning instantiate GCC top-level -#else -// Maximum initiation interval, concurrency and frequency for HLS syntheis are defined here -// hls-fpga-machine-learning insert cpragmas - -/* - * The top-level function used during HLS Synthesis goes here - * In a similar manner to GCC, there is a distinction between io_stream & io_parallel - */ -// hls-fpga-machine-learning instantiate HLS top-level -#endif -// If using io_parallel, the output needs to be initialised and returned at the end of this function -// If using io_stream, no output is initialised, as it is passed by reference to the top-level function -// hls-fpga-machine-learning initialize input/output - -// **************************************** -// NETWORK INSTANTIATION -// **************************************** - -// hls-fpga-machine-learning insert layers - -// hls-fpga-machine-learning return diff --git a/hls4ml/hls4ml/templates/quartus/firmware/myproject.h b/hls4ml/hls4ml/templates/quartus/firmware/myproject.h deleted file mode 100644 index d0f577d..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/myproject.h +++ /dev/null @@ -1,48 +0,0 @@ -#ifndef MYPROJECT_H_ -#define MYPROJECT_H_ - -#ifndef __INTELFPGA_COMPILER__ -#include "ac_fixed.h" -#include "ac_int.h" -#define hls_register -#else -#include "HLS/ac_fixed.h" -#include "HLS/ac_int.h" -#include "HLS/hls.h" -#endif - -// Streams are explicitly defined in defines.h, which are included for parameters.h -// Defining them again in this file will cause compile-time errors -#include "defines.h" - -// If using io_parallel, inputs and output need to be initialised before calling the top-level function -// If using io_stream, no inputs/outputs are initialised, as they are passed by reference to the top-level function -// hls-fpga-machine-learning insert inputs -// hls-fpga-machine-learning insert outputs - -#ifndef __INTELFPGA_COMPILER__ -/* -* The top-level function used during GCC compilation / hls4ml.predic(...) goes here -* An important distinction is made between io_stream and io_parallel: -* (1) io_parallel: - - Top-level function takes a struct containing an array as function argument - - Returns a struct containing an array - the prediction - (2) io_stream: - - Top-level function is 'void' - no return value - - Instead, both the input and output are passed by reference - - This is due the HLS Streaming Interfaces; stream cannot be copied (implicitly deleted copy constructor) -* This distinction is handled in quartus_writer.py -*/ -// hls-fpga-machine-learning instantiate GCC top-level -#else -// Maximum initiation interval, concurrency and frequency for HLS syntheis are defined here -// hls-fpga-machine-learning insert cpragmas - -/* - * The top-level function used during HLS Synthesis goes here - * In a similar manner to GCC, there is a distinction between io_stream & io_parallel - */ -// hls-fpga-machine-learning instantiate HLS top-level -#endif - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_activation.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_activation.h deleted file mode 100644 index a70096e..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_activation.h +++ /dev/null @@ -1,516 +0,0 @@ -#ifndef NNET_ACTIVATION_H_ -#define NNET_ACTIVATION_H_ - -#include "nnet_common.h" - -namespace nnet { - -struct activ_config { - // IO size - static const unsigned n_in = 10; - - // Internal info - static const unsigned table_size = 512; - - // Resource reuse info - static const unsigned io_type = io_parallel; - static const unsigned reuse_factor = 1; - - // Internal data type definitions - typedef ac_fixed<16, 8> table_t; -}; - -// ************************************************* -// LINEAR Activation -- See Issue 53 -// ************************************************* -template void linear(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_T datareg = data[ii]; - res[ii] = datareg; - } -} - -// ************************************************* -// RELU Activation -// ************************************************* -template void relu(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_T datareg = data[ii]; - if (datareg > 0) - res[ii] = datareg; - else - res[ii] = 0; - } -} - -template -void relu_max(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_T datareg = data[ii]; - if (datareg < 0) - res[ii] = 0; - else if (datareg > MAX_INT) - res[ii] = MAX_INT; - else - res[ii] = datareg; - } -} - -template void relu6(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - relu_max(data, res); -} - -template void relu1(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - relu_max(data, res); -} - -// ************************************************* -// Sigmoid Activation -// ************************************************* -template -void sigmoid(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - static const int MAX_VALUE = 8; -#include "activation_tables/sigmoid_table.tb" - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_T absoluteValue hls_register; - res_T temp2 hls_register; - if (data[ii] < 0) { - absoluteValue = -data[ii]; - } else { - absoluteValue = data[ii]; - } - int index = (absoluteValue * (CONFIG_T::table_size / MAX_VALUE)).to_int(); - if (absoluteValue > MAX_VALUE) - index = CONFIG_T::table_size - 1; - temp2 = (res_T)sigmoid_table[index]; - if (data[ii] < 0) { - res[ii] = 1 - temp2; - } else { - res[ii] = temp2; - } - } -} - -// ************************************************* -// Softmax Activation -// ************************************************* - -enum class softmax_implementation { latency = 0, legacy = 1, stable = 2, argmax = 3 }; - -template inline unsigned softmax_stable_idx_from_real_val(const data_T x) { - // Number of address bits for table - static constexpr int N = ceillog2(CONFIG_T::table_size); - - // Slice the top N bits of the input - hls_register ac_int y = x.template slc(x.width - N - 1); - // If x is the most negative value, the slice will be 0, so we need to set the 0-th bit to ensure correctness - if (x != 0 && y == 0) - y[0] = 1; - return y.to_uint(); -} - -template inline unsigned softmax_latency_idx_from_real_val(const data_T x) { - // Number of address bits for table - static constexpr int N = ceillog2(CONFIG_T::table_size); - - // Slice the top N bits of the input - hls_register ac_int y = x.template slc(x.width - N); - return y.to_uint(); -} - -template -void softmax_stable(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { -// Look-up tables -#include "activation_tables/exp_table.tb" -#include "activation_tables/invert_table.tb" - - // Find maximum - Op_max op_max; - hls_register data_T x_max = reduce>(data, op_max); - - // For the diffs, use the same type as the input but force rounding and saturation - hls_register ac_fixed d_xi_xmax[CONFIG_T::n_in]; - #pragma unroll - for (unsigned i = 0; i < CONFIG_T::n_in; i++) { - d_xi_xmax[i] = data[i] - x_max; - } - - // Calculate all the e^x's - hls_register typename CONFIG_T::exp_table_t exp_res[CONFIG_T::n_in]; - #pragma unroll - for (unsigned i = 0; i < CONFIG_T::n_in; i++) { - exp_res[i] = exp_table[softmax_stable_idx_from_real_val(d_xi_xmax[i])]; - } - - // Explicitly sum previously calculated exponentials with an adder tree - Op_add op_add; - hls_register typename CONFIG_T::exp_table_t exp_sum = - reduce>(exp_res, op_add); - - // Multiply previously calculated exponetials with the reciprocal of the sum - hls_register typename CONFIG_T::inv_table_t inv_exp_sum = - invert_table[softmax_stable_idx_from_real_val(exp_sum)]; - #pragma unroll - for (unsigned i = 0; i < CONFIG_T::n_in; i++) { - res[i] = exp_res[i] * inv_exp_sum; - } -} - -// TODO - Improve accuracy -template -void softmax_latency(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { -#include "activation_tables/exp_table_latency.tb" -#include "activation_tables/invert_table_latency.tb" - - // Calculate all the e^x's - hls_register typename CONFIG_T::exp_table_t exp_res[CONFIG_T::n_in]; - #pragma unroll - for (unsigned i = 0; i < CONFIG_T::n_in; i++) { - exp_res[i] = exp_table_latency[softmax_latency_idx_from_real_val(data[i])]; - } - - // Explicitly sum the results with an adder tree. - Op_add op_add; - hls_register typename CONFIG_T::exp_table_t exp_sum = - reduce>(exp_res, op_add); - - // Multiply previously calculated exponetials with the reciprocal of the sum - hls_register typename CONFIG_T::inv_table_t inv_exp_sum = - invert_table_latency[softmax_latency_idx_from_real_val(exp_sum)]; - #pragma unroll - for (unsigned i = 0; i < CONFIG_T::n_in; i++) { - res[i] = exp_res[i] * inv_exp_sum; - } -} - -template -void softmax_legacy(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { -#include "activation_tables/exp_table_legacy.tb" -#include "activation_tables/invert_table_legacy.tb" - - hls_register int data_round[CONFIG_T::n_in]; -New_loop: - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_round[ii] = (data[ii] * CONFIG_T::table_size / 16).to_int(); - } -NN_Outer: - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - typename CONFIG_T::exp_table_t exp_res_temp = 0; - NN_Inner: - #pragma unroll - for (int jj = 0; jj < CONFIG_T::n_in; jj++) { - if (ii == jj) { - exp_res_temp += 1; - } else { - int _data_cache = (data_round[jj] - data_round[ii]); - int index = _data_cache + 8 * CONFIG_T::table_size / 16; - - if (index < 0) - index = 0; - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - - typename CONFIG_T::exp_table_t temp_exp = exp_table_legacy[index]; - exp_res_temp += temp_exp; - } - } - int exp_res_index = (exp_res_temp * CONFIG_T::table_size / 64).to_int(); - if (exp_res_index < 0) - exp_res_index = 0; - if (exp_res_index > CONFIG_T::table_size - 1) - exp_res_index = CONFIG_T::table_size - 1; - res[ii] = invert_table_legacy[exp_res_index]; - } -} - -template -void softmax_argmax(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_in; i++) { - res[i] = (res_T)0; - } - - hls_register data_T maximum = data[0]; - hls_register int idx = 0; - - #pragma ii 1 - for (int i = 1; i < CONFIG_T::n_in; i++) { - if (data[i] > maximum) { - maximum = data[i]; - idx = i; - } - } - - res[idx] = (res_T)1; -} - -template -inline void softmax(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - switch (CONFIG_T::implementation) { - case softmax_implementation::stable: - softmax_stable(data, res); - break; - case softmax_implementation::latency: - softmax_latency(data, res); - break; - case softmax_implementation::legacy: - softmax_legacy(data, res); - break; - default: - softmax_stable(data, res); - break; - case softmax_implementation::argmax: - softmax_argmax(data, res); - break; - } -} - -// ************************************************* -// TanH Activation -// ************************************************* -template -void dense_tanh(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - static const int MAX_VALUE = 4; -// Initialize the lookup table -#include "activation_tables/tanh_table.tb" - // Index into the lookup table based on data - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_T temp hls_register; - res_T temp2 hls_register; - if (data[ii] < 0) { - temp = -data[ii]; - } else { - temp = data[ii]; - } - ac_int<16> index = (temp * (CONFIG_T::table_size / MAX_VALUE)).to_int(); - if (temp > MAX_VALUE) - index = CONFIG_T::table_size - 1; - temp2 = (res_T)tanh_table[index]; - if (data[ii] < 0) { - res[ii] = -temp2; - } else { - res[ii] = temp2; - } - } -} - -// ************************************************* -// Hard sigmoid Activation -// ************************************************* -template -void hard_sigmoid(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - auto datareg = CONFIG_T::slope * data[ii] + CONFIG_T::shift; - if (datareg > 1) - datareg = 1; - else if (datareg < 0) - datareg = 0; - res[ii] = datareg; - } -} - -template -void hard_tanh(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - auto sigmoid = CONFIG_T::slope * data[ii] + CONFIG_T::shift; - if (sigmoid > 1) - sigmoid = 1; - else if (sigmoid < 0) - sigmoid = 0; - res[ii] = 2 * sigmoid - 1; - } -} - -// ************************************************* -// Leaky RELU Activation -// ************************************************* -template -void leaky_relu(data_T data[CONFIG_T::n_in], data_T alpha, res_T res[CONFIG_T::n_in]) { - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_T datareg = data[ii]; - if (datareg > 0) - res[ii] = datareg; - else - res[ii] = alpha * datareg; - } -} - -// ************************************************* -// Thresholded RELU Activation -// ************************************************* -template -void thresholded_relu(data_T data[CONFIG_T::n_in], data_T theta, res_T res[CONFIG_T::n_in]) { - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_T datareg = data[ii]; - if (datareg > theta) - res[ii] = datareg; - else - res[ii] = 0; - } -} - -// ************************************************* -// Softplus Activation -// ************************************************* -template -void softplus(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { -// Initialize the lookup table -#include "activation_tables/softplus_table.tb" - // Index into the lookup table based on data - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - ac_int<16> data_round = (data[ii] * CONFIG_T::table_size / 16).to_int(); - ac_int<16> index = data_round + 8 * CONFIG_T::table_size / 16; - if (index < 0) - index = 0; - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - res[ii] = (res_T)softplus_table[index]; - } -} - -// ************************************************* -// Softsign Activation -// ************************************************* -template -void softsign(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - static const int MAX_VALUE = 8; -// Initialize the lookup table -#include "activation_tables/softsign_table.tb" - - // Index into the lookup table based on data - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_T temp hls_register; - res_T temp2 hls_register; - if (data[ii] < 0) { - temp = -data[ii]; - } else { - temp = data[ii]; - } - ac_int<16> index = (temp * CONFIG_T::table_size / MAX_VALUE).to_int(); - if (temp > MAX_VALUE) - index = CONFIG_T::table_size - 1; - temp2 = (res_T)softsign_table[index]; - if (data[ii] < 0) { - res[ii] = -temp2; - } else { - res[ii] = temp2; - } - } -} - -// ************************************************* -// ELU Activation -// ************************************************* -template -void elu(data_T data[CONFIG_T::n_in], const res_T alpha, res_T res[CONFIG_T::n_in]) { -// Initialize the lookup table -#include "activation_tables/elu_table.tb" - // Index into the lookup table based on data - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_T datareg = data[ii]; - if (datareg >= 0) { - res[ii] = datareg; - } else { - ac_int<16> index = (datareg * CONFIG_T::table_size / -8).to_int(); - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - res[ii] = alpha * elu_table[index]; - } - } -} - -template void elu(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - elu(data, 1.0, res); -} - -// ************************************************* -// SELU Activation -// ************************************************* -template void selu(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { -// Initialize the lookup table -#include "activation_tables/selu_table.tb" - // Index into the lookup table based on data - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_T datareg = data[ii]; - if (datareg >= 0) { - res[ii] = res_T(1.0507009873554804934193349852946) * datareg; - } else { - ac_int<16> index = (datareg * CONFIG_T::table_size / -8).to_int(); - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - res[ii] = selu_table[index]; - } - } -} - -// ************************************************* -// PReLU Activation -// ************************************************* -template -void prelu(data_T data[CONFIG_T::n_in], const data_T alpha[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_T datareg = data[ii]; - if (datareg > 0) - res[ii] = datareg; - else - res[ii] = alpha[ii] * datareg; - } -} - -// ************************************************* -// Binary TanH Activation -// ************************************************* -template -void binary_tanh(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_T datareg = data[ii]; - res_T cache; - if (datareg > 0) - cache = 1; - else - cache = -1; - - res[ii] = (res_T)cache; - } -} - -// ************************************************* -// Ternary TanH Activation -// ************************************************* -template -void ternary_tanh(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_T datareg = 2 * data[ii]; - res_T cache; - if (datareg > 1) - cache = 1; - else if (datareg > -1 && datareg <= 1) - cache = 0; - else - cache = -1; - - res[ii] = (res_T)cache; - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_activation_stream.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_activation_stream.h deleted file mode 100644 index f0562a9..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_activation_stream.h +++ /dev/null @@ -1,676 +0,0 @@ -#ifndef NNET_ACTIVATION_STREAM_H_ -#define NNET_ACTIVATION_STREAM_H_ - -#include "nnet_common.h" -#include "nnet_types.h" - -namespace nnet { - -// ************************************************* -// Linear Activation -// ************************************************* -template void linear(stream &data, stream &res) { -LinearActLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - data_T in_data = data.read(); - res_T out_data; - - LinearPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - out_data[j] = in_data[j]; - } - - res.write(out_data); - } -} - -// ************************************************* -// ReLU Activation -// ************************************************* -template void relu(stream &data, stream &res) { -ReLUActLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - data_T in_data = data.read(); - res_T out_data; - - ReLUPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - if (in_data[j] > 0) - out_data[j] = in_data[j]; - else - out_data[j] = 0; - } - - res.write(out_data); - } -} - -// ************************************************* -// Leaky RELU Activation -// ************************************************* -template -void leaky_relu(stream &data, const typename data_T::value_type alpha, stream &res) { - constexpr unsigned multiplier_limit = DIV_ROUNDUP(data_T::size, CONFIG_T::reuse_factor); - constexpr unsigned pipeline = data_T::size / multiplier_limit; - -LeakyReLUActLoop: - #pragma ii pipeline - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - data_T in_data = data.read(); - res_T out_data; - - LeakyReLUPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - if (in_data[j] > 0) - out_data[j] = in_data[j]; - else - out_data[j] = alpha * in_data[j]; - } - - res.write(out_data); - } -} - -// ************************************************* -// Thresholded RELU Activation -// ************************************************* -template -void thresholded_relu(stream &data, const typename data_T::value_type theta, stream &res) { -ThresholdedReLUActLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - data_T in_data = data.read(); - res_T out_data; - - ThresholdedReLUPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - if (in_data[j] > theta) - out_data[j] = in_data[j]; - else - out_data[j] = 0; - } - - res.write(out_data); - } -} - -// ************************************************* -// ELU Activation -// ************************************************* -template -void elu(stream &data, const typename data_T::value_type alpha, stream &res) { -#include "activation_tables/elu_table.tb" - - constexpr unsigned multiplier_limit = DIV_ROUNDUP(data_T::size, CONFIG_T::reuse_factor); - constexpr unsigned pipeline = data_T::size / multiplier_limit; - -EluActLoop: - #pragma ii pipeline - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - data_T in_data = data.read(); - res_T out_data; - - EluPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - hls_register typename data_T::value_type datareg = in_data[j]; - if (datareg >= 0) { - out_data[j] = datareg; - } else { - int index = (datareg * CONFIG_T::table_size / -8).to_int(); - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - out_data[j] = alpha * elu_table[index]; - } - } - - res.write(out_data); - } -} - -template void elu(stream &data, stream &res) { - elu(data, 1.0, res); -} - -// ************************************************* -// SeLU Activation -// ************************************************* -template void selu(stream &data, stream &res) { -#include "activation_tables/selu_table.tb" - -SeluActLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - data_T in_data = data.read(); - res_T out_data; - - SeluPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - hls_register typename data_T::value_type datareg = in_data[j]; - if (datareg >= 0) { - out_data[j] = typename data_T::value_type(1.0507009873554804934193349852946) * datareg; - } else { - int index = (datareg * CONFIG_T::table_size / -8).to_int(); - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - out_data[j] = selu_table[index]; - } - } - - res.write(out_data); - } -} - -// ************************************************* -// PReLU Activation -// ************************************************* -template -void prelu(stream &data, const typename data_T::value_type alpha[CONFIG_T::n_in], stream &res) { - constexpr unsigned multiplier_limit = DIV_ROUNDUP(data_T::size, CONFIG_T::reuse_factor); - constexpr unsigned pipeline = data_T::size / multiplier_limit; - -PReLUActLoop: - #pragma ii pipeline - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - data_T in_data = data.read(); - res_T out_data; - - PReLUPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - if (in_data[j] > 0) - out_data[j] = in_data[j]; - else - out_data[j] = alpha[i * res_T::size + j] * in_data[j]; - } - - res.write(out_data); - } -} - -// ************************************************* -// Softplus Activation -// ************************************************* -template void softplus(stream &data, stream &res) { -#include "activation_tables/softplus_table.tb" - -SoftplusActLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - data_T in_data = data.read(); - res_T out_data; - - SoftplusPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - hls_register int data_round = (in_data[j] * CONFIG_T::table_size / 16).to_int(); - hls_register int index = data_round + 8 * CONFIG_T::table_size / 16; - if (index < 0) - index = 0; - else if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - out_data[j] = softplus_table[index]; - } - - res.write(out_data); - } -} - -// ************************************************* -// Softsign Activation -// ************************************************* -template void softsign(stream &data, stream &res) { -#include "activation_tables/softsign_table.tb" - - static const int MAX_VALUE = 8; - -SoftsignActLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - data_T in_data = data.read(); - res_T out_data; - - SoftsignPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - hls_register typename data_T::value_type absValue; - ; - if (in_data[j] < 0) { - absValue = -in_data[j]; - } else { - absValue = in_data[j]; - } - ac_int<16> index = (absValue * CONFIG_T::table_size / MAX_VALUE).to_int(); - if (absValue > MAX_VALUE) - index = CONFIG_T::table_size - 1; - if (in_data[j] < 0) { - out_data[j] = -(typename res_T::value_type)softsign_table[index]; - } else { - out_data[j] = (typename res_T::value_type)softsign_table[index]; - } - } - - res.write(out_data); - } -} - -// ************************************************* -// Softmax Activation -// ************************************************* - -template void softmax_stable(stream &data, stream &res) { -#include "activation_tables/exp_table.tb" -#include "activation_tables/invert_table.tb" - - constexpr unsigned multiplier_limit = DIV_ROUNDUP(data_T::size, CONFIG_T::reuse_factor); - constexpr unsigned pipeline = data_T::size / multiplier_limit; - - hls_register typename data_T::value_type data_array[data_T::size]; - -SoftmaxArrayLoop: - #pragma ii pipeline - for (unsigned i = 0; i < CONFIG_T::n_in / data_T::size; i++) { - data_T in_pack = data.read(); - - SoftmaxArrayPackLoop: - #pragma unroll - for (unsigned j = 0; j < data_T::size; j++) { - data_array[j] = in_pack[j]; - } - - // Find the max and compute all delta(x_i, x_max) - Op_max op_max; - hls_register typename data_T::value_type x_max = - reduce>(data_array, op_max); - - // For the diffs, use the same type as the input but force rounding and saturation - hls_register ac_fixed - d_xi_xmax[data_T::size]; - #pragma unroll - for (unsigned j = 0; j < data_T::size; j++) { - d_xi_xmax[j] = data_array[j] - x_max; - } - - // Calculate all the e^x's - hls_register typename CONFIG_T::exp_table_t exp_res[data_T::size]; - #pragma unroll - for (unsigned j = 0; j < data_T::size; j++) { - exp_res[j] = exp_table[softmax_stable_idx_from_real_val(d_xi_xmax[j])]; - } - - // Explicitly sum the results with an adder tree. - // Rounding & Saturation mode, which improve accuracy, prevent Vivado from expression balancing - Op_add op_add; - hls_register typename CONFIG_T::exp_table_t exp_sum = - reduce>(exp_res, op_add); - - hls_register typename CONFIG_T::inv_table_t inv_exp_sum = - invert_table[softmax_stable_idx_from_real_val(exp_sum)]; - res_T out_pack; - - SoftmaxInvPackLoop: - #pragma unroll - for (unsigned j = 0; j < res_T::size; j++) { - - // TODO - Find Quartus-equivalent pragma - // #pragma HLS ALLOCATION instances=mul limit=multiplier_limit operation - - out_pack[j] = exp_res[j] * inv_exp_sum; - } - - res.write(out_pack); - } -} - -template void softmax_latency(stream &data, stream &res) { -#include "activation_tables/exp_table_latency.tb" -#include "activation_tables/invert_table_latency.tb" - - constexpr unsigned multiplier_limit = DIV_ROUNDUP(data_T::size, CONFIG_T::reuse_factor); - constexpr unsigned pipeline = data_T::size / multiplier_limit; - - // Calculate all the e^x's - hls_register typename CONFIG_T::exp_table_t exp_res[data_T::size]; - -SoftmaxExpLoop: - #pragma ii pipeline - for (unsigned i = 0; i < CONFIG_T::n_in / data_T::size; i++) { - data_T in_pack = data.read(); - - SoftmaxExpPackLoop: - #pragma unroll - for (unsigned j = 0; j < data_T::size; j++) { - exp_res[j] = - exp_table_latency[softmax_latency_idx_from_real_val(in_pack[j])]; - } - - // Explicitly sum the results with an adder tree. - // Rounding & Saturation mode, which improve accuracy, prevent Vivado from expression balancing - Op_add op_add; - hls_register typename CONFIG_T::exp_table_t exp_sum = - reduce>(exp_res, op_add); - - // Multiply previously calculated exponetials with the reciprocal of the sum - hls_register typename CONFIG_T::inv_table_t inv_exp_sum = - invert_table_latency[softmax_latency_idx_from_real_val(exp_sum)]; - - res_T out_pack; - SoftmaxInvPackLoop: - #pragma unroll - for (unsigned j = 0; j < res_T::size; j++) { - // #pragma HLS ALLOCATION instances=mul limit=multiplier_limit operation - out_pack[j] = exp_res[j] * inv_exp_sum; - } - - res.write(out_pack); - } -} - -template void softmax_legacy(stream &data, stream &res) { -#include "activation_tables/exp_table_legacy.tb" -#include "activation_tables/invert_table_legacy.tb" - - // Index into the lookup table based on data for exponentials - hls_register typename CONFIG_T::table_t exp_res[data_T::size]; - hls_register typename CONFIG_T::table_t exp_diff_res; - hls_register typename data_T::value_type data_cache[data_T::size]; - -SoftmaxInitLoop: - #pragma ii 1 - for (unsigned s = 0; s < CONFIG_T::n_in / data_T::size; s++) { - data_T in_pack = data.read(); - - SoftmaxInitPackLoop: - #pragma unroll - for (unsigned j = 0; j < data_T::size; j++) { - data_cache[j] = in_pack[j]; - exp_res[j] = 0; - } - - SoftmaxExpLoop: - #pragma unroll - for (int i = 0; i < data_T::size; i++) { - SoftmaxExpInner: - #pragma unroll - for (int j = 0; j < data_T::size; j++) { - if (i == j) { - exp_diff_res = 1; - } else { - int data_round = ((data_cache[j] - data_cache[i]) * CONFIG_T::table_size / 16).to_int(); - int index = data_round + 8 * CONFIG_T::table_size / 16; - if (index < 0) - index = 0; - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - exp_diff_res = exp_table_legacy[index]; - } - exp_res[i] += exp_diff_res; - } - } - - res_T out_pack; - SoftmaxInvPackLoop: - #pragma unroll - for (unsigned j = 0; j < res_T::size; j++) { - int exp_res_index = (exp_res[j] * CONFIG_T::table_size / 64).to_int(); - if (exp_res_index < 0) - exp_res_index = 0; - if (exp_res_index > CONFIG_T::table_size - 1) - exp_res_index = CONFIG_T::table_size - 1; - out_pack[j] = (typename res_T::value_type)invert_table_legacy[exp_res_index]; - } - - res.write(out_pack); - } -} - -template void softmax_argmax(stream &data, stream &res) { - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - data_T in_data = data.read(); - res_T out_data; - - #pragma unroll - for (int i = 0; i < res_T::size; i++) { - out_data[i] = (typename res_T::value_type)0; - } - - hls_register typename data_T::value_type maximum = in_data[0]; - hls_register int idx = 0; - - #pragma ii 1 - for (int i = 1; i < res_T::size; i++) { - if (in_data[i] > maximum) { - maximum = in_data[i]; - idx = i; - } - } - - out_data[idx] = (typename res_T::value_type)1; - res.write(out_data); - } -} - -template void softmax(stream &data, stream &res) { - switch (CONFIG_T::implementation) { - case softmax_implementation::latency: - softmax_latency(data, res); - break; - case softmax_implementation::stable: - softmax_stable(data, res); - break; - case softmax_implementation::legacy: - softmax_legacy(data, res); - break; - case softmax_implementation::argmax: - softmax_argmax(data, res); - break; - default: - softmax_stable(data, res); - break; - } -} - -// ************************************************* -// TanH Activation -// ************************************************* -template void dense_tanh(stream &data, stream &res) { -#include "activation_tables/tanh_table.tb" - static const int MAX_VALUE = 4; - - constexpr unsigned multiplier_limit = DIV_ROUNDUP(data_T::size, CONFIG_T::reuse_factor); - constexpr unsigned pipeline = data_T::size / multiplier_limit; - -TanHActLoop: - #pragma ii pipeline - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - - data_T in_data = data.read(); - res_T out_data; - - TanHPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - hls_register typename data_T::value_type absoluteValue; - - if (in_data[j] < 0) - absoluteValue = (-1) * in_data[j]; - else - absoluteValue = in_data[j]; - - hls_register int index; - if (absoluteValue <= MAX_VALUE) - index = (absoluteValue * (CONFIG_T::table_size / MAX_VALUE)).to_int(); - else - index = CONFIG_T::table_size - 1; - - if (in_data[j] > 0) - out_data[j] = tanh_table[index]; - else - out_data[j] = -tanh_table[index]; - } - - res.write(out_data); - } -} - -// ************************************************* -// Sigmoid Activation -// ************************************************* -template void sigmoid(stream &data, stream &res) { -#include "activation_tables/sigmoid_table.tb" - static const int MAX_VALUE = 8; - - constexpr unsigned multiplier_limit = DIV_ROUNDUP(data_T::size, CONFIG_T::reuse_factor); - constexpr unsigned pipeline = data_T::size / multiplier_limit; - -SigmoidActLoop: - #pragma ii pipeline - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - data_T in_data = data.read(); - res_T out_data; - - SigmoidPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - hls_register typename data_T::value_type absoluteValue; - - if (in_data[j] < 0) - absoluteValue = (-1) * in_data[j]; - else - absoluteValue = in_data[j]; - - hls_register int index; - if (absoluteValue <= MAX_VALUE) - index = (absoluteValue * (CONFIG_T::table_size / MAX_VALUE)).to_int(); - else - index = CONFIG_T::table_size - 1; - - if (in_data[j] > 0) - out_data[j] = sigmoid_table[index]; - else - out_data[j] = 1 - sigmoid_table[index]; - } - - res.write(out_data); - } -} - -// ************************************************* -// Hard sigmoid Activation -// ************************************************* -// Note - Theano and Tensorflow might have different definitions for hard sigmoid; could provide two implementations -template void hard_sigmoid(stream &data, stream &res) { - - constexpr unsigned multiplier_limit = DIV_ROUNDUP(data_T::size, CONFIG_T::reuse_factor); - constexpr unsigned pipeline = data_T::size / multiplier_limit; - -HardSigmoidActLoop: - #pragma ii pipeline - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - - data_T in_data = data.read(); - res_T out_data; - - HardSigmoidPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - hls_register auto datareg = CONFIG_T::slope * in_data[j] + CONFIG_T::shift; - if (datareg > 1) - datareg = 1; - else if (datareg < 0) - datareg = 0; - out_data[j] = datareg; - } - - res.write(out_data); - } -} - -template void hard_tanh(stream &data, stream &res) { - - constexpr unsigned multiplier_limit = DIV_ROUNDUP(data_T::size, CONFIG_T::reuse_factor); - constexpr unsigned pipeline = data_T::size / multiplier_limit; - -HardSigmoidActLoop: - #pragma ii pipeline - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - - data_T in_data = data.read(); - res_T out_data; - - HardSigmoidPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - auto sigmoid = CONFIG_T::slope * in_data[j] + CONFIG_T::shift; - if (sigmoid > 1) - sigmoid = 1; - else if (sigmoid < 0) - sigmoid = 0; - out_data[j] = 2 * sigmoid - 1; - } - - res.write(out_data); - } -} - -// ************************************************* -// Binary TanH Activation -// ************************************************* -template void binary_tanh(stream &data, stream &res) { -BinaryTanHActLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - - hls_register data_T in_data = data.read(); - hls_register res_T out_data; - - BinaryTanHPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - if (in_data[j] > 0) - out_data[j] = (typename res_T::value_type)1; - else - out_data[j] = (typename res_T::value_type) - 1; - } - - res.write(out_data); - } -} - -// ************************************************* -// Ternary TanH Activation -// ************************************************* -template void ternary_tanh(stream &data, stream &res) { -TernaryTanHActLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - - hls_register data_T in_data = data.read(); - hls_register res_T out_data; - - TernaryTanHPackLoop: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - if (in_data[j] > 1) - out_data[j] = (typename res_T::value_type)1; - else if (in_data[j] <= -1) - out_data[j] = (typename res_T::value_type) - 1; - else - out_data[j] = (typename res_T::value_type)0; - } - - res.write(out_data); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_batchnorm.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_batchnorm.h deleted file mode 100644 index 7b84a9c..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_batchnorm.h +++ /dev/null @@ -1,104 +0,0 @@ -#ifndef NNET_BATCHNORM_H_ -#define NNET_BATCHNORM_H_ - -#include "nnet_common.h" -#include "nnet_helpers.h" -#include "nnet_mult.h" - -namespace nnet { - -struct batchnorm_config { - // Internal data type definitions - typedef float bias_t; - typedef float scale_t; - - // Layer Sizes - static const unsigned n_in = 10; - static const unsigned n_filt = -1; - static const unsigned n_scale_bias = 10; - - // Resource reuse info - static const unsigned io_type = io_parallel; - static const unsigned reuse_factor = 1; - static const bool store_weights_in_bram = false; - static const unsigned n_zeros = 0; - // partitioning arrays cyclically to go with roll factors? - - // Default multiplication - template using product = nnet::product::mult; -}; - -template -void normalize(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in], - const typename CONFIG_T::scale_t scale[CONFIG_T::n_scale_bias], - const typename CONFIG_T::bias_t bias[CONFIG_T::n_scale_bias]) { -// Calcuate result -Result: - #pragma unroll - for (int ires = 0; ires < CONFIG_T::n_in; ires++) { - if (CONFIG_T::n_filt == -1) { - res[ires] = CONFIG_T::template product::product(data[ires], scale[ires]) + - bias[ires]; - } else { - int norm_index = ires % CONFIG_T::n_filt; - res[ires] = - CONFIG_T::template product::product(data[ires], scale[norm_index]) + - bias[norm_index]; - } - } -} - -// **************************************************** -// Merged Batch Normalization and Quantized Tanh -// **************************************************** -struct batchnorm_quantized_tanh_config { - // Layer Sizes - static const unsigned n_in = 10; - static const unsigned n_filt = -1; - static const unsigned n_scale_bias = 10; - - // Resource reuse info - static const unsigned io_type = io_parallel; - static const unsigned reuse_factor = 1; - static const unsigned n_zeros = 0; -}; - -template -void normalize_binary_tanh(data_T data[CONFIG_T::n_in], ac_int<1, false> res[CONFIG_T::n_in], - const data_T threshold[CONFIG_T::n_scale_bias]) { - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - ac_int<1, false> cache; - data_T datareg = data[ii]; - int norm_index = CONFIG_T::n_filt == -1 ? ii : ii % CONFIG_T::n_filt; - if (datareg >= threshold[norm_index]) - cache = 1; - else - cache = 0; - - res[ii] = cache; - } -} - -template -void normalize_ternary_tanh(data_T data[CONFIG_T::n_in], ac_int<2, true> res[CONFIG_T::n_in], - const data_T threshold_hi[CONFIG_T::n_scale_bias], - const data_T threshold_lo[CONFIG_T::n_scale_bias]) { - #pragma unroll - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - ac_int<2, true> cache; - data_T datareg = data[ii]; - int norm_index = CONFIG_T::n_filt == -1 ? ii : ii % CONFIG_T::n_filt; - if (datareg > threshold_hi[norm_index]) - cache = 1; - else if (datareg <= threshold_lo[norm_index]) - cache = -1; - else - cache = 0; - res[ii] = cache; - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_batchnorm_stream.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_batchnorm_stream.h deleted file mode 100644 index 7a1f48d..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_batchnorm_stream.h +++ /dev/null @@ -1,108 +0,0 @@ -#ifndef NNET_BATCHNORM_STREAM_H_ -#define NNET_BATCHNORM_STREAM_H_ - -#include "nnet_common.h" -#include "nnet_helpers.h" -#include "nnet_mult.h" -#include "nnet_types.h" - -namespace nnet { - -// **************************************************** -// Streaming Batch Normalization -// **************************************************** -template -void normalize(stream &data, stream &res, const typename CONFIG_T::scale_t scale[CONFIG_T::n_scale_bias], - const typename CONFIG_T::bias_t bias[CONFIG_T::n_scale_bias]) { - - constexpr unsigned multiplier_limit = DIV_ROUNDUP(CONFIG_T::n_in, CONFIG_T::reuse_factor); - constexpr unsigned pipeline = CONFIG_T::n_in / multiplier_limit; - CONFIG_T::template product::limit(multiplier_limit); - -BatchNormLoop: - #pragma ii pipeline - for (int i = 0; i < CONFIG_T::n_in / data_T::size; i++) { - data_T in_data = data.read(); - res_T out_data; - - BatchNormpack: - #pragma unroll - for (int j = 0; j < data_T::size; j++) { - int norm_index; - if (CONFIG_T::n_filt == -1) - norm_index = i * data_T::size + j; - else - norm_index = j % CONFIG_T::n_filt; - out_data[j] = CONFIG_T::template product::product( - in_data[j], scale[norm_index]) + - bias[norm_index]; - } - - res.write(out_data); - } -} - -// **************************************************** -// Merged Batch Normalization and Quantized Tanh -// **************************************************** -template -void normalize_binary_tanh(stream &data, stream, CONFIG_T::n_scale_bias>> &res, - const typename data_T::value_type threshold[CONFIG_T::n_scale_bias]) { - -BinaryNormLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_in / data_T::size; i++) { - data_T in_data = data.read(); - nnet::array, CONFIG_T::n_scale_bias> out_data; - - BatchNormPack: - #pragma unroll - for (int j = 0; j < data_T::size; j++) { - int norm_index; - if (CONFIG_T::n_filt == -1) - norm_index = i * data_T::size + j; - else - norm_index = j % CONFIG_T::n_filt; - - out_data[j] = (in_data[j] >= threshold[norm_index]) ? 1 : 0; - } - - res.write(out_data); - } -} - -template -void normalize_ternary_tanh(stream &data, stream, CONFIG_T::n_scale_bias>> &res, - const typename data_T::value_type threshold_hi[CONFIG_T::n_scale_bias], - const typename data_T::value_type threshold_lo[CONFIG_T::n_scale_bias]) { - -TernaryNormLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_in / data_T::size; i++) { - data_T in_data = data.read(); - nnet::array, CONFIG_T::n_scale_bias> out_data; - - BatchNormPack: - #pragma unroll - for (int j = 0; j < data_T::size; j++) { - int norm_index; - if (CONFIG_T::n_filt == -1) - norm_index = i * data_T::size + j; - else - norm_index = j % CONFIG_T::n_filt; - - if (in_data[j] > threshold_hi[norm_index]) - out_data[j] = 1; - else if (in_data[j] <= threshold_lo[norm_index]) - out_data[j] = -1; - else - out_data[j] = 0; - } - - res.write(out_data); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_common.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_common.h deleted file mode 100644 index 1af60ab..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_common.h +++ /dev/null @@ -1,71 +0,0 @@ -#ifndef NNET_COMMON_H_ -#define NNET_COMMON_H_ - -#ifndef __INTELFPGA_COMPILER__ -#include "ac_fixed.h" -#include "ac_int.h" -#include "math.h" -#else -#include "HLS/ac_fixed.h" -#include "HLS/ac_int.h" -#include "HLS/math.h" -#endif - -#include "nnet_helpers.h" - -typedef ac_fixed<16, 6> table_default_t; - -namespace nnet { - -// Common type definitions -enum io_type { io_parallel = 0, io_stream }; - -// Default data types (??) TODO: Deprecate -typedef ac_fixed<16, 4> weight_t_def; -typedef ac_fixed<16, 4> bias_t_def; -typedef ac_fixed<32, 10> accum_t_def; - -template void merge(data_T data1[NIN1], data_T data2[NIN2], data_T res[NIN1 + NIN2]) { - #pragma unroll - for (int ii = 0; ii < NIN1; ii++) { - res[ii] = data1[ii]; - } - #pragma unroll - for (int ii = 0; ii < NIN2; ii++) { - res[NIN1 + ii] = data2[ii]; - } -} - -/* --- - * Balanced tree reduce implementation. - * For use in scenarios where Quartus cannot expression balance - * Reduces an array of inputs to a single value using the template binary operator 'Op', - * for example summing all elements with Op_add, or finding the maximum with Op_max - * Use only when the input array is fully unrolled. Or, slice out a fully unrolled section - * before applying and accumulate the result over the rolled dimension. - * --- */ -template T reduce(const T *x, Op op) { - static constexpr int leftN = pow2(floorlog2(N - 1)) > 0 ? pow2(floorlog2(N - 1)) : 0; - static constexpr int rightN = N - leftN > 0 ? N - leftN : 0; - if (N == 1) { - return x[0]; - } - if (N == 2) { - return op(x[0], x[1]); - } - return op(reduce(x, op), reduce(x + leftN, op)); -} - -template class Op_add { - public: - T operator()(T a, T b) { return a + b; } -}; - -template class Op_max { - public: - T operator()(T a, T b) { return a >= b ? a : b; } -}; - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv1d.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv1d.h deleted file mode 100644 index 8897e13..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv1d.h +++ /dev/null @@ -1,64 +0,0 @@ -#ifndef NNET_CONV1D_H_ -#define NNET_CONV1D_H_ - -#include "nnet_common.h" -#include "nnet_conv1d_resource.h" - -namespace nnet { - -struct conv1d_config { - // I/O sizes - static const unsigned in_width = 10; - static const unsigned out_width = 10; - - // Number of channels, filters - static const unsigned n_chan = 1; - static const unsigned n_filt = 1; - - // Original filter size - static const unsigned filt_width = 1; - static const unsigned kernel_size = filt_width; - - // Modified filter size (post-Wionograd transformation, if applied) - static const unsigned impl_filt_height = 1; - static const unsigned impl_filt_width = 1; - - // Padding, stride, dilation - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; - static const unsigned stride_width = 1; - static const unsigned dilation = 1; - - // Run-time Configuration - static const unsigned n_zeros = 0; - static const unsigned reuse_factor = 1; - static const unsigned parallelisation_factor = 1; - - // TODO: BRAM Storage on Quartus - static const bool store_weights_in_bram = false; - - // Internal data type definitions - typedef float bias_t; - typedef float weight_t; - typedef float accum_t; -}; - -template -void conv_1d_cl(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - const typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - conv_1d_resource_cl(data, res, weights, biases); -} - -template -void pointwise_conv_1d_cl(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - const typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::filt_width == 1); - pointwise_conv_1d_resource_cl(data, res, weights, biases); -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv1d_resource.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv1d_resource.h deleted file mode 100644 index a110d6d..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv1d_resource.h +++ /dev/null @@ -1,241 +0,0 @@ -#ifndef NNET_CONV1D_RESOURCE_H_ -#define NNET_CONV1D_RESOURCE_H_ - -#include "nnet_common.h" -#include "nnet_dense.h" - -namespace nnet { - -enum class conv1d_implementation { combination, im2col, winograd }; - -// **************************************************************** -// im2col - General-purpose 1D Convolution algorithm -// **************************************************************** - -template -void im2col_1d_cl(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], - data_T data_col[CONFIG_T::impl_filt_width * CONFIG_T::n_chan], const int col) { - // im2col can be unrolled fully, since number of parallel executions = filt_w x n_chann ~ O(100) and very little DSP - // usage - - hls_register int index = 0; - -KernelLoop: - #pragma unroll - for (int kernel_col = 0; kernel_col < CONFIG_T::impl_filt_width; kernel_col++) { - ChannelLoop: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_chan; channel++) { - hls_register int index_data = - (col * CONFIG_T::stride_width + kernel_col - CONFIG_T::pad_left) * CONFIG_T::n_chan + channel; - if (index_data >= 0 && index_data < CONFIG_T::in_width * CONFIG_T::n_chan) { - data_col[index++] = data[index_data]; - } else { - data_col[index++] = 0; - } - } - } -} - -template -void conv_1d_im2col_cl( - data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - const typename CONFIG_T::weight_t weights[CONFIG_T::impl_filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - // im2col performs no filter transformations; therefore, filter size remains constant - assert(CONFIG_T::filt_width == CONFIG_T::impl_filt_width); - - // Unroll factor for loop traversing input image, derived from parallelisation_factor - static constexpr int pf = MIN(CONFIG_T::parallelisation_factor, CONFIG_T::out_width); - -ColLoop: - #pragma unroll pf - #pragma ii CONFIG_T::reuse_factor - for (int i = 0; i < CONFIG_T::out_width; i++) { - // Loop variables should always be declared in the deepest scope available - // See Intel's HLS - Loop Best Practices - // https://www.intel.com/content/www/us/en/docs/programmable/683152/22-2/declare-variables-in-the-deepest-scope.html - - hls_register data_T data_col[CONFIG_T::impl_filt_width * CONFIG_T::n_chan]; - im2col_1d_cl(data, data_col, i); - - hls_register res_T res_col[CONFIG_T::n_filt]; - dense_resource(data_col, res_col, weights, biases); - - // Unroll fully, since - // (1) n_filt is usually low in io_parallel (< 32) - // (2) no complex operations handled in loop, this loop performs a simple register writing operation - FiltLoop: - #pragma unroll - for (int j = 0; j < CONFIG_T::n_filt; j++) { - res[i * CONFIG_T::n_filt + j] = res_col[j]; - } - } -} - -// **************************************************************** -// 1D Convolution for 3x1 kernels from Winograd's algoirithm -// **************************************************************** - -// Explicity transofrmed input (B'dB) needed for Winograd convolution, as explained by Lavin & Gray (2015) -template -inline void winograd_transform_input_tile_3x1_kernel(const data_T I[4], res_T D[4]) { - D[0] = I[0] - I[2]; - D[1] = I[1] + I[2]; - D[2] = -I[1] + I[2]; - D[3] = I[1] - I[3]; -} - -template -void winograd_conv1d_3x1_kernel_cl( - data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - const typename CONFIG_T::weight_t weights[CONFIG_T::impl_filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - // Ensure Winograd conditions are met - assert(CONFIG_T::filt_width == 3); - assert(CONFIG_T::stride_width == 1); - assert(CONFIG_T::out_width > 2); - - // Unroll factor for loop traversing input image, derived from parallelisation_factor - static constexpr int pf = MIN(CONFIG_T::parallelisation_factor, CONFIG_T::out_width); - - // Initialise result to bias - // Unroll fully, as loop performs a simple operation - assigning the outputs to a constant value - #pragma unroll - for (int i = 0; i < CONFIG_T::out_width; i++) { - int offset = CONFIG_T::n_filt * i; - #pragma unroll - for (int f = 0; f < CONFIG_T::n_filt; f++) { - res[offset + f] = static_cast(biases[f]); - } - } - -WidthLoop: - #pragma unroll pf - for (int col = 0; col < CONFIG_T::out_width; col += 2) { - ChannelLoop: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_chan; channel++) { - // Get current 4x1 tile - hls_register data_T T[16]; - hls_register uint8_t p = 0; - - #pragma unroll - for (int c = col - (int)CONFIG_T::pad_left; c < col + 4 - (int)CONFIG_T::pad_left; c++) { - if (c < CONFIG_T::in_width && c >= 0) { - T[p++] = data[c * CONFIG_T::n_chan + channel]; - } else { - T[p++] = 0; - } - } - - // Transform input tile - hls_register typename CONFIG_T::accum_t D[4]; - winograd_transform_input_tile_3x1_kernel(T, D); - - #pragma unroll - for (int filter = 0; filter < CONFIG_T::n_filt; filter++) { - hls_register int filter_offset = 4 * (CONFIG_T::n_chan * filter + channel); - - // Hadamard product between transformed input tile and kernel - hls_register typename CONFIG_T::accum_t Y[4]; - #pragma unroll - for (int i = 0; i < 4; i++) { - Y[i] = static_cast(D[i] * weights[filter_offset + i]); - } - - // Explicitly transform intermediate result Z = A'YA and save to output - res[CONFIG_T::n_filt * col + filter] += static_cast(Y[0] + Y[1] + Y[2]); - if ((col + 1) < CONFIG_T::out_width) - res[CONFIG_T::n_filt * (col + 1) + filter] += static_cast(Y[1] - Y[2] - Y[3]); - } - } - } -} - -// **************************************************************** -// 1D Convolution for 1x1 kernels using optimized im2col -// **************************************************************** - -template -void im2col_1d_pointwise_cl(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], data_T data_col[CONFIG_T::n_chan], - const int col) { - // pointwise_im2col can be unrolled fully, only one loop with n_chan iterations - - hls_register int index = 0; - -ChannelLoop: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_chan; channel++) { - hls_register int index_data = (col * CONFIG_T::stride_width - CONFIG_T::pad_left) * CONFIG_T::n_chan + channel; - if (index_data >= 0 && index_data < CONFIG_T::in_width * CONFIG_T::n_chan) { - data_col[index++] = data[index_data]; - } else { - data_col[index++] = 0; - } - } -} - -template -void pointwise_conv_1d_resource_cl(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - const typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::filt_width == 1); - - // Unroll factor for loop traversing input image, derived from parallelisation_factor - static constexpr int pf = MIN(CONFIG_T::parallelisation_factor, CONFIG_T::out_width); - -ColLoop: - #pragma unroll pf - #pragma ii CONFIG_T::reuse_factor - for (int col = 0; col < CONFIG_T::out_width; col++) { - // Loop variables should always be declared in the deepest scope available - // See Intel's HLS - Loop Best Practices - // https://www.intel.com/content/www/us/en/docs/programmable/683152/22-2/declare-variables-in-the-deepest-scope.html - - hls_register data_T data_col[CONFIG_T::n_chan]; - im2col_1d_pointwise_cl(data, data_col, col); - - hls_register res_T res_col[CONFIG_T::n_filt]; - dense_resource(data_col, res_col, weights, biases); - - // Unroll fully, since - // (1) n_filt is usually low in io_parallel (< 32) - // (2) no complex operations handled in loop, this loop performs a simple register writing operation - FiltLoop: - #pragma unroll - for (int k = 0; k < CONFIG_T::n_filt; k++) { - res[col * CONFIG_T::n_filt + k] = res_col[k]; - } - } -} - -// **************************************************************** -// Top-level function - handles different implementations -// **************************************************************** -template -void conv_1d_resource_cl( - data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - const typename CONFIG_T::weight_t weights[CONFIG_T::impl_filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - static constexpr bool winograd_conditions = - // Winograd's minimal filtering algorithm not applicable to stride != 1 - CONFIG_T::stride_width == 1 && - - // Intel HLS will fail to pipeline the entire component if the Winograd loop only runs once - CONFIG_T::out_width > 2 && - - // Verify user opted for Winograd - CONFIG_T::implementation == nnet::conv1d_implementation::combination || - CONFIG_T::implementation == nnet::conv1d_implementation::winograd; - - if (CONFIG_T::filt_width == 3 && winograd_conditions) { - winograd_conv1d_3x1_kernel_cl(data, res, weights, biases); - } else { - conv_1d_im2col_cl(data, res, weights, biases); - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv1d_stream.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv1d_stream.h deleted file mode 100644 index 4fbfafd..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv1d_stream.h +++ /dev/null @@ -1,172 +0,0 @@ -#ifndef NNET_CONV1D_STREAM_H_ -#define NNET_CONV1D_STREAM_H_ - -#include "nnet_dense.h" -#include "nnet_types.h" - -namespace nnet { - -/* - * void kernel_shift(shift_buffer, kernel_window) - * - * Args: - * shift_buffer - array elements popped from the line the buffer during the shift line buffer operation - * kernel_window - array of values from the input curently being convolved with the kernel - * - * Values from shift_buffer are inserted into kernel_window, updating the values to be convolved - */ -template -void kernel_shift_1d(typename data_T::value_type shift_buffer[CONFIG_T::n_chan], - typename data_T::value_type kernel_window[CONFIG_T::filt_width * CONFIG_T::n_chan]) { -/* - * Manually shift kernel_window by one step to the left - * Not possible to use nnet::shift_reg as the kernel window is convolved with the kernel weights using dense matrix - * multiplication Dense matrix multiplication is only implemented for arrays However, provided certain timing constrains are - * met, Intel HLS automatically infers a shift operation and implements kernel_window as a shift register To verify, see - * synthesis report in report.html > Area Analysis of System - */ -KernelShiftWidth: - #pragma unroll - for (int col = 0; col < CONFIG_T::filt_width - 1; col++) { - KernelShiftChannel: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_chan; channel++) { - kernel_window[col * CONFIG_T::n_chan + channel] = kernel_window[(col + 1) * CONFIG_T::n_chan + channel]; - } - } - -// Insert shift_buffer values into the last column of the kernel window -KernelPushChannel: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_chan; channel++) { - kernel_window[(CONFIG_T::filt_width - 1) * CONFIG_T::n_chan + channel] = shift_buffer[channel]; - } -} - -/* - * void shift_line_buffer(in_element, line_buffer, shift_buffer) - * - * Args: - * in_element - current elements from input image, data_T type is usually nnet::array, size of array corresponds to number - * of channels line_buffer - chained array of shift registers, one for each row of the kernel and channel shift_buffer - - * array elements popped from the line the buffer during the shift operation - * - * Values from in_element are inserted into the line buffer, causing all other elements to be shifted by one - * Popped elements are later used to update the kernel window, during the kernel_shift operation - */ -template -void shift_line_buffer_1d( - const data_T &in_elem, - nnet::shift_reg - line_buffer[CONFIG_T::n_chan], - typename data_T::value_type shift_buffer[CONFIG_T::n_chan]) { -// For every channel, insert the incoming pixel at end of the shift buffer -UpdateBuffer: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_chan; channel++) { - shift_buffer[channel] = in_elem[channel]; - } -} - -/* - * void compute_output_buffer(in_element, res_stream, line_buffer, kernel_window, weights, biases) - * - * Args: - * in_element - current elements from input image, data_T type is usually nnet::array, size of array corresponds to number - * of channels res_stream - output stream, passed by reference to allow direct writing line_buffer - chained array of shift - * registers, one for each row of the kernel and channel kernel_window - array of values from the input curently convolved - * with the kernel weights - Conv1D layer weights biases - Conv1D layer biases - * - * Function executes 4 steps: - * (1) Shift line buffer - updates the contents of the chained shift registers, inserting the new inputs and removing last - * elements (2) Kernel shift - updates the elements of the kernel window, by storing the new inputs and popped elements from - * the line buffer (3) Matrix mulitplication - performs dense matrix multiplication between the current input window and - * kernel weights (4) Counter housekeeping - keeps track of current pixel and stride - */ -template -void compute_output_buffer_1d( - const data_T &in_elem, stream &res_stream, - nnet::shift_reg - line_buffer[CONFIG_T::n_chan], - typename data_T::value_type kernel_window[CONFIG_T::filt_width * CONFIG_T::n_chan], - const typename CONFIG_T::weight_t weights[CONFIG_T::kernel_size * CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - // Thresholds - static constexpr int lShiftX = CONFIG_T::filt_width - 1; - - // X position pixel - static int pX = 0; - - // X strides - static int sX = 0; - - // Step 1 - Shift line buffer - hls_register typename data_T::value_type shift_buffer[CONFIG_T::n_chan]; - nnet::shift_line_buffer_1d(in_elem, line_buffer, shift_buffer); - - // Step 2 - Kernel shift - nnet::kernel_shift_1d(shift_buffer, kernel_window); - - // Check to see if we have a full kernel - if ((sX - lShiftX) == 0 && pX > (lShiftX - 1)) { - // Step 3 - Dense matrix multiplication - hls_register typename res_T::value_type res_out[CONFIG_T::n_filt]; - dense_resource( - kernel_window, res_out, weights, biases); - - // Write result to output stream - hls_register res_T res_pack; - CastLoop: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_filt; channel++) { - res_pack[channel] = res_out[channel]; - } - res_stream.write(res_pack); - } - - // Reached end of image - if ((pX + 1) == (CONFIG_T::in_width + CONFIG_T::pad_left + CONFIG_T::pad_right)) { - pX = 0; - sX = 0; - // Move to the right - } else { - pX++; - sX = ((sX - lShiftX) == 0) ? (sX - CONFIG_T::stride_width + 1) : (sX + 1); - } -} - -template -void conv_1d_cl(stream &data, stream &res, - const typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - // Line buffer and kernel window - hls_register static nnet::shift_reg - line_buffer[CONFIG_T::n_chan]; - hls_register static typename data_T::value_type kernel_window[CONFIG_T::filt_width * CONFIG_T::n_chan]; - - // An array of length CONFIG_T::n_chan, with elements set to zero (padding for each channel) - static const data_T padds(0); - -// Input image left-side padding -PaddingLeftWidth: - for (int col = 0; col < CONFIG_T::pad_left; col++) { - compute_output_buffer_1d(padds, res, line_buffer, kernel_window, weights, biases); - } - -// Read input image -ReadInputWidth: - for (int col = 0; col < CONFIG_T::in_width; col++) { - compute_output_buffer_1d(data.read(), res, line_buffer, kernel_window, weights, biases); - } - -// Input image right-side padding -PaddingRightWidth: - for (int col = 0; col < CONFIG_T::pad_right; col++) { - compute_output_buffer_1d(padds, res, line_buffer, kernel_window, weights, biases); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv2d.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv2d.h deleted file mode 100644 index 3aa71a7..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv2d.h +++ /dev/null @@ -1,72 +0,0 @@ -#ifndef NNET_CONV2D_H_ -#define NNET_CONV2D_H_ - -#include "nnet_conv2d_resource.h" - -namespace nnet { - -struct conv2d_config { - // I/O sizes - static const unsigned in_height = 10; - static const unsigned in_width = 10; - static const unsigned out_height = 10; - static const unsigned out_width = 10; - - // Number of channels, filters - static const unsigned n_chan = 1; - static const unsigned n_filt = 1; - - // Original filter size - static const unsigned filt_height = 1; - static const unsigned filt_width = 1; - static const unsigned kernel_size = filt_height * filt_width; - - // Modified filter size (post-Wionograd transformation, if applied) - static const unsigned impl_filt_height = 1; - static const unsigned impl_filt_width = 1; - - // Padding, stride, dilation - static const unsigned pad_top = 0; - static const unsigned pad_bottom = 0; - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; - static const unsigned stride_height = 1; - static const unsigned stride_width = 1; - static const unsigned dilation_height = 1; - static const unsigned dilation_width = 1; - - // Run-time configuration - static const unsigned n_zeros = 0; - static const unsigned reuse_factor = 1; - static const unsigned parallelisation_factor = 1; - - // TODO: BRAM Storage on Quartus - static const bool store_weights_in_bram = false; - - // Internal data type definitions - typedef float bias_t; - typedef float weight_t; - typedef float accum_t; -}; - -template -void conv_2d_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt], - const typename CONFIG_T::weight_t - weights[CONFIG_T::impl_filt_height * CONFIG_T::impl_filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - conv_2d_resource_cl(data, res, weights, biases); -} - -template -void pointwise_conv_2d_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt], - const typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::filt_height == 1 && CONFIG_T::filt_width == 1); - pointwise_conv_2d_resource_cl(data, res, weights, biases); -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv2d_resource.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv2d_resource.h deleted file mode 100644 index 73ad455..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv2d_resource.h +++ /dev/null @@ -1,303 +0,0 @@ -#ifndef NNET_CONV2D_RESOURCE_H_ -#define NNET_CONV2D_RESOURCE_H_ - -#include "nnet_common.h" -#include "nnet_dense.h" -#include "nnet_helpers.h" - -namespace nnet { - -enum class conv2d_implementation { combination, im2col, winograd }; - -// **************************************************************** -// im2col - General-purpose 2D Convolution algorithm -// **************************************************************** - -template -void im2col_2d_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - data_T data_col[CONFIG_T::impl_filt_height * CONFIG_T::impl_filt_width * CONFIG_T::n_chan], const int row, - const int col) { - // im2col can be unrolled fully, since number of parallel executions = filt_h x filt_w x n_chann ~ O(100) and very little - // DSP usage - - hls_register int index = 0; - -FiltHeightLoop: - #pragma unroll - for (int kernel_row = 0; kernel_row < CONFIG_T::impl_filt_height; kernel_row++) { - hls_register int input_row = - -CONFIG_T::pad_top + kernel_row * CONFIG_T::dilation_height + row * CONFIG_T::stride_height; - - FiltWidthLoop: - #pragma unroll - for (int kernel_col = 0; kernel_col < CONFIG_T::impl_filt_width; kernel_col++) { - hls_register int input_col = - -CONFIG_T::pad_left + kernel_col * CONFIG_T::dilation_width + col * CONFIG_T::stride_width; - - ChannelLoop: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_chan; channel++) { - if (input_row >= 0 && input_row < CONFIG_T::in_height && input_col >= 0 && input_col < CONFIG_T::in_width) { - data_col[index++] = - data[input_row * CONFIG_T::in_width * CONFIG_T::n_chan + input_col * CONFIG_T::n_chan + channel]; - } else { - data_col[index++] = 0; - } - } - } - } -} - -template -void conv_2d_im2col_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt], - const typename CONFIG_T::weight_t weights[CONFIG_T::impl_filt_height * CONFIG_T::impl_filt_width * - CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - // im2col performs no filter transformations; therefore, filter size remains constant - assert(CONFIG_T::filt_height == CONFIG_T::impl_filt_height && CONFIG_T::filt_width == CONFIG_T::impl_filt_width); - - // Unroll factors for loop traversing input image, derived from parallelisation_factor - // Outer loop only gets unrolled after inner loop is fully unrolled - static constexpr int pfc = MIN(CONFIG_T::parallelisation_factor, CONFIG_T::out_width); - static constexpr int pfr = MIN((CONFIG_T::parallelisation_factor / pfc), CONFIG_T::out_height); - -HeightLoop: - #pragma unroll pfr - for (int i = 0; i < CONFIG_T::out_height; i++) { - WidthLoop: - #pragma unroll pfc - #pragma ii CONFIG_T::reuse_factor - for (int j = 0; j < CONFIG_T::out_width; j++) { - // Loop variables should always be declared in the deepest scope available - // See Intel's HLS - Loop Best Practices - // https://www.intel.com/content/www/us/en/docs/programmable/683152/22-2/declare-variables-in-the-deepest-scope.html - - hls_register data_T data_col[CONFIG_T::impl_filt_height * CONFIG_T::impl_filt_width * CONFIG_T::n_chan]; - im2col_2d_cl(data, data_col, i, j); - - hls_register res_T res_col[CONFIG_T::n_filt]; - dense_resource(data_col, res_col, weights, biases); - - // Unroll fully, since - // (1) n_filt is usually low in io_parallel (< 32) - // (2) no complex operations handled in loop, this loop performs a simple register writing operation - FiltLoop: - #pragma unroll - for (int k = 0; k < CONFIG_T::n_filt; k++) { - res[i * CONFIG_T::out_width * CONFIG_T::n_filt + j * CONFIG_T::n_filt + k] = res_col[k]; - } - } - } -} - -// **************************************************************** -// 2D Convolution for 3x3 kernels from Winograd's algoirithm -// **************************************************************** - -// Explicity transofrmed input (B'dB) needed for Winograd calculation, as explained by Lavin & Gray, 2015 -template -inline void winograd_transform_input_tile_3x3_kernel(const data_T I[16], res_T D[16]) { - D[0] = I[0] - I[2] - I[8] + I[10]; - D[1] = I[1] + I[2] - I[9] - I[10]; - D[2] = -I[1] + I[2] + I[9] - I[10]; - D[3] = I[1] - I[3] - I[9] + I[11]; - - D[4] = I[4] - I[6] + I[8] - I[10]; - D[5] = I[5] + I[6] + I[9] + I[10]; - D[6] = -I[5] + I[6] - I[9] + I[10]; - D[7] = I[5] - I[7] + I[9] - I[11]; - - D[8] = -I[4] + I[6] + I[8] - I[10]; - D[9] = -I[5] - I[6] + I[9] + I[10]; - D[10] = I[5] - I[6] - I[9] + I[10]; - D[11] = -I[5] + I[7] + I[9] - I[11]; - - D[12] = I[4] - I[6] - I[12] + I[14]; - D[13] = I[5] + I[6] - I[13] - I[14]; - D[14] = I[6] - I[5] + I[13] - I[14]; - D[15] = I[5] - I[7] - I[13] + I[15]; -} - -template -void winograd_conv2d_3x3_kernel_cl( - data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt], - const typename CONFIG_T::weight_t - weights[CONFIG_T::n_filt * CONFIG_T::n_chan * CONFIG_T::impl_filt_height * CONFIG_T::impl_filt_width], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - // Ensure Winograd conditions are met - assert(CONFIG_T::filt_height == 3 && CONFIG_T::filt_width == 3); - assert(CONFIG_T::stride_height == 1 && CONFIG_T::stride_width == 1); - assert(CONFIG_T::pad_left == CONFIG_T::pad_right && CONFIG_T::pad_top == CONFIG_T::pad_bottom); - assert(CONFIG_T::out_height > 2 && CONFIG_T::out_width > 2); - - // Unroll factor for loop traversing input image, derived from parallelisation_factor - // Outer loop only gets unrolled after inner loop is fully unrolled - static constexpr int pfc = MIN(CONFIG_T::parallelisation_factor, DIV_ROUNDUP(CONFIG_T::out_width, 2)); - static constexpr int pfr = MIN((CONFIG_T::parallelisation_factor / pfc), DIV_ROUNDUP(CONFIG_T::out_height, 2)); - - // Initialise result to bias - // Unroll fully, as loop performs a simple operation - assigning the outputs to a constant value - #pragma unroll - for (int i = 0; i < CONFIG_T::out_height * CONFIG_T::out_width; i++) { - int offset = CONFIG_T::n_filt * i; - #pragma unroll - for (int f = 0; f < CONFIG_T::n_filt; f++) { - res[offset + f] = static_cast(biases[f]); - } - } - -HeightLoop: - #pragma unroll pfr - for (int row = 0; row < CONFIG_T::out_height; row += 2) { - WidthLoop: - #pragma unroll pfc - for (int col = 0; col < CONFIG_T::out_width; col += 2) { - ChannelLoop: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_chan; channel++) { - // Get current 4x4 tile - hls_register data_T T[16]; - hls_register typename CONFIG_T::accum_t D[16]; - hls_register uint8_t p = 0; - - #pragma unroll - for (int r = row - (int)CONFIG_T::pad_top; r < row + 4 - (int)CONFIG_T::pad_top; r++) { - #pragma unroll - for (int c = col - (int)CONFIG_T::pad_left; c < col + 4 - (int)CONFIG_T::pad_left; c++) { - if (r < CONFIG_T::in_height && r >= 0 && c < CONFIG_T::in_width && c >= 0) { - T[p++] = data[r * CONFIG_T::in_width * CONFIG_T::n_chan + c * CONFIG_T::n_chan + channel]; - } else { - T[p++] = 0; - } - } - } - - // Transform input tile - winograd_transform_input_tile_3x3_kernel(T, D); - - #pragma unroll - for (int filter = 0; filter < CONFIG_T::n_filt; filter++) { - hls_register int filter_offset = 16 * (CONFIG_T::n_chan * filter + channel); - - // Hadamard product between transformed input tile and kernel - hls_register typename CONFIG_T::accum_t Y[16]; - #pragma unroll - for (int i = 0; i < 16; i++) { - Y[i] = static_cast(D[i] * weights[filter_offset + i]); - } - - // Explicitly transform intermediate result Z = A'YA and save to output - res[CONFIG_T::n_filt * (row * CONFIG_T::out_width + col) + filter] += - static_cast(Y[0] + Y[1] + Y[2] + Y[4] + Y[5] + Y[6] + Y[8] + Y[9] + Y[10]); - if ((col + 1) < CONFIG_T::out_height) - res[CONFIG_T::n_filt * (row * CONFIG_T::out_width + (col + 1)) + filter] += - static_cast(Y[1] - Y[2] - Y[3] + Y[5] - Y[6] - Y[7] + Y[9] - Y[10] - Y[11]); - if ((row + 1) < CONFIG_T::out_width) - res[CONFIG_T::n_filt * ((row + 1) * CONFIG_T::out_width + col) + filter] += - static_cast(Y[4] + Y[5] + Y[6] - Y[8] - Y[9] - Y[10] - Y[12] - Y[13] - Y[14]); - if ((row + 1) < (CONFIG_T::out_width) && (col + 1) < CONFIG_T::out_height) - res[CONFIG_T::n_filt * ((row + 1) * CONFIG_T::out_width + (col + 1)) + filter] += - static_cast(Y[5] - Y[6] - Y[7] - Y[9] + Y[10] + Y[11] + Y[15] - Y[13] + Y[14]); - } - } - } - } -} - -// **************************************************************** -// 2D Convolution for 1x1 kernels using optimized im2col -// **************************************************************** - -template -void im2col_2d_pointwise_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - data_T data_col[CONFIG_T::n_chan], const int row, const int col) { - // pointwise_im2col can be unrolled fully, only one loop with n_chan iterations - - hls_register int index = 0; - -ChannelLoop: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_chan; channel++) { - - hls_register int input_row = -CONFIG_T::pad_top + row * CONFIG_T::stride_height; - hls_register int input_col = -CONFIG_T::pad_left + col * CONFIG_T::stride_width; - - if (input_row >= 0 && input_row < CONFIG_T::in_height && input_col >= 0 && input_col < CONFIG_T::in_width) { - data_col[index++] = - data[input_row * CONFIG_T::in_width * CONFIG_T::n_chan + input_col * CONFIG_T::n_chan + channel]; - } else { - data_col[index++] = 0; - } - } -} - -template -void pointwise_conv_2d_resource_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt], - const typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::filt_height == 1 && CONFIG_T::filt_width == 1); - - // Unroll factors for loop traversing input image, derived from parallelisation_factor - // Outer loop only gets unrolled after inner loop is fully unrolled - static constexpr int pfc = MIN(CONFIG_T::parallelisation_factor, CONFIG_T::out_width); - static constexpr int pfr = MIN((CONFIG_T::parallelisation_factor / pfc), CONFIG_T::out_height); - -HeightLoop: - #pragma unroll pfr - for (int row = 0; row < CONFIG_T::out_height; row++) { - WidthLoop: - #pragma unroll pfc - #pragma ii CONFIG_T::reuse_factor - for (int col = 0; col < CONFIG_T::out_width; col++) { - // Loop variables should always be declared in the deepest scope available - // See Intel's HLS - Loop Best Practices - // https://www.intel.com/content/www/us/en/docs/programmable/683152/22-2/declare-variables-in-the-deepest-scope.html - - hls_register data_T data_col[CONFIG_T::n_chan]; - im2col_2d_pointwise_cl(data, data_col, row, col); - - hls_register res_T res_col[CONFIG_T::n_filt]; - dense_resource(data_col, res_col, weights, biases); - - FiltLoop: - #pragma unroll - for (int k = 0; k < CONFIG_T::n_filt; k++) { - res[row * CONFIG_T::out_width * CONFIG_T::n_filt + col * CONFIG_T::n_filt + k] = res_col[k]; - } - } - } -} - -// **************************************************************** -// Top-level function - handles different implementations -// **************************************************************** -template -void conv_2d_resource_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt], - const typename CONFIG_T::weight_t weights[CONFIG_T::impl_filt_height * CONFIG_T::impl_filt_width * - CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - static constexpr bool winograd_conditions = - // Winograd's minimal filtering algorithm not applicable to stride != 1 - CONFIG_T::stride_height == 1 && CONFIG_T::stride_width == 1 && - - // Intel HLS will fail to pipeline the entire component if the Winograd loop only runs once - CONFIG_T::out_height > 2 && CONFIG_T::out_width > 2 && - - // Verify user opted for Winograd - CONFIG_T::implementation == nnet::conv2d_implementation::combination || - CONFIG_T::implementation == nnet::conv2d_implementation::winograd; - - if (CONFIG_T::filt_height == 3 && CONFIG_T::filt_width == 3 && winograd_conditions) { - winograd_conv2d_3x3_kernel_cl(data, res, weights, biases); - } else { - conv_2d_im2col_cl(data, res, weights, biases); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv2d_stream.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv2d_stream.h deleted file mode 100644 index 1b3fb31..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_conv2d_stream.h +++ /dev/null @@ -1,238 +0,0 @@ -#ifndef NNET_CONV2D_STREAM_H_ -#define NNET_CONV2D_STREAM_H_ - -#include "nnet_dense.h" -#include "nnet_types.h" - -namespace nnet { - -/* - * void kernel_shift(shift_buffer, kernel_window) - * - * Args: - * shift_buffer - array elements popped from the line the buffer during the shift line buffer operation - * kernel_window - array of values from the input curently being convolved with the kernel - * - * Values from shift_buffer are inserted into kernel_window, updating the values to be convolved - */ -template -void kernel_shift_2d( - typename data_T::value_type shift_buffer[CONFIG_T::filt_height][CONFIG_T::n_chan], - typename data_T::value_type kernel_window[CONFIG_T::filt_width * CONFIG_T::filt_height * CONFIG_T::n_chan]) { -/* - * Manually shift kernel_window by one step to the left - * Not possible to use nnet::shift_reg as the kernel window is convolved with the kernel weights using dense matrix - * multiplication Dense matrix multiplication is only implemented for arrays However, provided certain timing constrains are - * met, Intel HLS automatically infers a shift operation and implements kernel_window as a shift register To verify, see - * synthesis report in report.html > Area Analysis of System - */ -KernelShiftWidth: - #pragma unroll - for (int col = 0; col < CONFIG_T::filt_width - 1; col++) { - KernelShiftHeight: - #pragma unroll - for (int row = 0; row < CONFIG_T::filt_height; row++) { - KernelShiftChannel: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_chan; channel++) { - kernel_window[row * CONFIG_T::filt_width * CONFIG_T::n_chan + col * CONFIG_T::n_chan + channel] = - kernel_window[row * CONFIG_T::filt_width * CONFIG_T::n_chan + (col + 1) * CONFIG_T::n_chan + channel]; - } - } - } - -// Insert shift_buffer values into the last column of the kernel window -KernelPushHeight: - #pragma unroll - for (int col = 0; col < CONFIG_T::filt_height; col++) { - KernelPushChannel: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_chan; channel++) { - kernel_window[(CONFIG_T::filt_width - 1) * CONFIG_T::n_chan + col * CONFIG_T::filt_width * CONFIG_T::n_chan + - channel] = shift_buffer[col][channel]; - } - } -} - -/* - * void shift_line_buffer(in_element, line_buffer, shift_buffer) - * - * Args: - * in_element - current elements from input image, data_T type is usually nnet::array, size of array corresponds to number - * of channels line_buffer - chained array of shift registers, one for each row of the kernel and channel shift_buffer - - * array elements popped from the line the buffer during the shift operation - * - * Values from in_element are inserted into the line buffer, causing all other elements to be shifted by one - * Popped elements are later used to update the kernel window, during the kernel_shift operation - */ -template -void shift_line_buffer_2d( - const data_T &in_elem, - nnet::shift_reg - line_buffer[CONFIG_T::filt_height - 1][CONFIG_T::n_chan], - typename data_T::value_type shift_buffer[CONFIG_T::filt_height][CONFIG_T::n_chan]) { -// For every channel, insert the incoming pixel at end of the shift buffer -UpdateBuffer: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_chan; channel++) { - shift_buffer[CONFIG_T::filt_height - 1][channel] = in_elem[channel]; - } - -// Shift line buffer and save popped values to shift buffer -LineBufferDataIn: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_chan; channel++) { - LineBufferShift: - #pragma unroll - for (unsigned col = 1; col < CONFIG_T::filt_height; col++) { - // Shift the line buffer, return the popped pixel - typename data_T::value_type pop = - line_buffer[col - 1][channel].shift(shift_buffer[CONFIG_T::filt_height - col][channel]); - - // Place popped pixed into the shift buffer, one row above - shift_buffer[CONFIG_T::filt_height - col - 1][channel] = pop; - } - } -} - -/* - * void compute_output_buffer(in_element, res_stream, line_buffer, kernel_window, weights, biases) - * - * Args: - * in_element - current elements from input image, data_T type is usually nnet::array, size of array corresponds to number - * of channels res_stream - output stream, passed by reference to allow direct writing line_buffer - chained array of shift - * registers, one for each row of the kernel and channel kernel_window - array of values from the input curently convolved - * with the kernel weights - Conv1D/Conv2D layer weights biases - Conv1D/Conv2D layer biases - * - * Function executes 4 steps: - * (1) Shift line buffer - updates the contents of the chained shift registers, inserting the new inputs and removing last - * elements (2) Kernel shift - updates the elements of the kernel window, by storing the new inputs and popped elements from - * the line buffer (3) Matrix mulitplication - performs dense matrix multiplication between the current input window and - * kernel weights (4) Counter housekeeping - keeps track of current pixel and stride - */ -template -void compute_output_buffer_2d( - const data_T &in_elem, stream &res_stream, - nnet::shift_reg - line_buffer[MAX(CONFIG_T::filt_height - 1, 1)][CONFIG_T::n_chan], - typename data_T::value_type kernel_window[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan], - const typename CONFIG_T::weight_t weights[CONFIG_T::kernel_size * CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - // Thresholds - static constexpr int lShiftX = CONFIG_T::filt_width - 1; - static constexpr int lShiftY = CONFIG_T::filt_height - 1; - - // X, Y position pixels - static int pX = 0; - static int pY = 0; - - // X, Y strides - static int sX = 0; - static int sY = 0; - - // Step 1 - Shift line buffer - hls_register typename data_T::value_type shift_buffer[CONFIG_T::filt_height][CONFIG_T::n_chan]; - nnet::shift_line_buffer_2d(in_elem, line_buffer, shift_buffer); - - // Step 2 - Kernel shift - nnet::kernel_shift_2d(shift_buffer, kernel_window); - - // Check to see if we have a full kernel - if ((sX - lShiftX) == 0 && (sY - lShiftY) == 0 && pY > (lShiftY - 1) && pX > (lShiftX - 1)) { - // Step 3 - Dense matrix multiplication - hls_register typename res_T::value_type res_out[CONFIG_T::n_filt]; - dense_resource( - kernel_window, res_out, weights, biases); - - // Write result to output stream - hls_register res_T res_pack; - CastLoop: - #pragma unroll - for (int channel = 0; channel < CONFIG_T::n_filt; channel++) { - res_pack[channel] = res_out[channel]; - } - res_stream.write(res_pack); - } - - // Reached end of image - if ((pX + 1) == (CONFIG_T::in_width + CONFIG_T::pad_left + CONFIG_T::pad_right) && - (pY + 1) == (CONFIG_T::in_height + CONFIG_T::pad_top + CONFIG_T::pad_bottom)) { - pX = 0; - sX = 0; - pY = 0; - sY = 0; - // Reached end of row - } else if ((pX + 1) == (CONFIG_T::in_width + CONFIG_T::pad_left + CONFIG_T::pad_right)) { - pX = 0; - sX = 0; - pY++; - sY = ((sY - lShiftY) == 0) ? (sY - CONFIG_T::stride_height + 1) : (sY + 1); - // Same row, same colum, therefore, move to the right - } else { - pX++; - sX = ((sX - lShiftX) == 0) ? (sX - CONFIG_T::stride_width + 1) : (sX + 1); - } -} - -template -void conv_2d_cl(stream &data, stream &res, - const typename CONFIG_T::weight_t - weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - - // Line buffer and kernel window - hls_register static nnet::shift_reg - line_buffer[MAX(CONFIG_T::filt_height - 1, 1)][CONFIG_T::n_chan]; - hls_register static - typename data_T::value_type kernel_window[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan]; - - // An array of length CONFIG_T::n_chan, with elements set to zero (padding for each channel) - static const data_T padds(0); - -// Padding above input image -PaddingTopHeight: - #pragma loop_coalesce 2 - for (int row = 0; row < CONFIG_T::pad_top; row++) { - PaddingTopWidth: - for (int col = 0; col < CONFIG_T::pad_left + CONFIG_T::in_width + CONFIG_T::pad_right; col++) { - compute_output_buffer_2d(padds, res, line_buffer, kernel_window, weights, biases); - } - } - -ReadInputHeight: - #pragma loop_coalesce 2 - for (int row = 0; row < CONFIG_T::in_height; row++) { - // Input image left-side padding - PaddingLeftWidth: - for (int col = 0; col < CONFIG_T::pad_left; col++) { - compute_output_buffer_2d(padds, res, line_buffer, kernel_window, weights, biases); - } - - // Read input image - ReadInputWidth: - for (int col = 0; col < CONFIG_T::in_width; col++) { - compute_output_buffer_2d(data.read(), res, line_buffer, kernel_window, weights, biases); - } - - // Input image right-side padding - PaddingRightWidth: - for (int col = 0; col < CONFIG_T::pad_right; col++) { - compute_output_buffer_2d(padds, res, line_buffer, kernel_window, weights, biases); - } - } - -// Padding below input image -PaddingBottomHeight: - #pragma loop_coalesce 2 - for (int row = 0; row < CONFIG_T::pad_bottom; row++) { - PaddingBottomWidth: - for (int col = 0; col < CONFIG_T::pad_left + CONFIG_T::in_width + CONFIG_T::pad_right; col++) { - compute_output_buffer_2d(padds, res, line_buffer, kernel_window, weights, biases); - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_dense.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_dense.h deleted file mode 100644 index aba0803..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_dense.h +++ /dev/null @@ -1,169 +0,0 @@ -#ifndef NNET_DENSE_LARGE_H_ -#define NNET_DENSE_LARGE_H_ - -#include "nnet_common.h" -#include "nnet_helpers.h" -#include "nnet_mult.h" - -namespace nnet { - -struct dense_config { - // Internal data type definitions - typedef float bias_t; - typedef float weight_t; - typedef float accum_t; - - // Layer Sizes - static const unsigned n_in = 10; - static const unsigned n_out = 10; - - static const unsigned reuse_factor = 1; - static const unsigned block_factor = 1; // DIV_ROUNDUP(CONFIG_T::n_in*CONFIG_T::n_out, CONFIG_T::reuse_factor); - static const unsigned multiplier_limit = 1; // DIV_ROUNDUP(CONFIG_T::n_in*CONFIG_T::n_out, multfactor) - static const unsigned multiplier_factor = 1; // min n_in, rf - static const unsigned multiplier_scale = 1; // M_LIMIT/CONFIG_T::n_out; - static const unsigned reciprocal = 1; // 2^35 / 25 - static const unsigned rf_pad = 0; - static const unsigned bf_pad = 0; - // Resource reuse info - static const unsigned io_type = io_parallel; - static const bool store_weights_in_bram = false; - static const unsigned n_zeros = 0; - // partitioning arrays cyclically to go with roll factors? - - // Default multiplication - template using product = nnet::product::mult; -}; - -template -void dense_rf_gt(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - const typename CONFIG_T::weight_t weights[CONFIG_T::reuse_factor_rounded * CONFIG_T::block_factor_rounded], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - assert((CONFIG_T::multiplier_limit % CONFIG_T::n_out == 0 || CONFIG_T::reuse_factor >= CONFIG_T::n_in) && - "The current Reuse Factor is not allowed"); - assert((CONFIG_T::reuse_factor > CONFIG_T::n_in) && "This function is correct only for RF > N_IN"); - //#pragma ii CONFIG_T::reuse_factor - hls_register typename CONFIG_T::accum_t acc[CONFIG_T::n_out]; -Load: - #pragma unroll - for (int iacc = 0; iacc < CONFIG_T::n_out; iacc++) { - acc[iacc] = (typename CONFIG_T::accum_t)biases[iacc]; - } - hls_register int out_index[CONFIG_T::reuse_factor][CONFIG_T::block_factor]; - hls_register int d_index[CONFIG_T::reuse_factor][CONFIG_T::block_factor]; - - #pragma unroll - for (int ir = 0; ir < CONFIG_T::reuse_factor; ir++) { - #pragma unroll - for (int im = 0; im < CONFIG_T::block_factor; im++) { - uint32 w_index = ir + CONFIG_T::reuse_factor * im; - out_index[ir][im] = (w_index / CONFIG_T::multiplier_factor).to_int(); - d_index[ir][im] = w_index % CONFIG_T::n_in; - } - } -Product1: - #pragma nofusion - #pragma speculated_iterations 0 - for (int ir = 0; ir < CONFIG_T::reuse_factor; ir++) { - hls_register typename CONFIG_T::accum_t tmp_acc[CONFIG_T::block_factor]; - Product2: - #pragma unroll - for (int im = 0; im < CONFIG_T::block_factor; im++) { - uint32 w_index = ir + (CONFIG_T::reuse_factor_rounded)*im; - if (w_index >= CONFIG_T::reuse_factor_rounded * CONFIG_T::block_factor_rounded) - continue; - int data_index = d_index[ir][im]; - // Modified this - tmp_acc[im] = - CONFIG_T::template product::product(data[data_index], weights[w_index]); - } - hls_register typename CONFIG_T::accum_t mult[CONFIG_T::multiplier_limit]; - ResetMult: - #pragma unroll - for (int imult = 0; imult < CONFIG_T::multiplier_limit; imult++) { - mult[imult] = 0; - } - AccumLoop1: - #pragma unroll - for (int im = 0; im < CONFIG_T::block_factor; im++) { - int o_index = out_index[ir][im]; - if (o_index >= CONFIG_T::n_out) - continue; // check out of bounds - mult[o_index] += tmp_acc[im]; - } - AccumLoop2: - #pragma unroll - for (int im = 0; im < CONFIG_T::multiplier_limit; im++) { - acc[im] += mult[im]; - } - } -Store: - #pragma unroll - for (int ires = 0; ires < CONFIG_T::n_out; ires++) { - res[ires] = cast(acc[ires]); // acc[jj]; - } -} -template -void dense_rf_lt(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - const typename CONFIG_T::weight_t weights[CONFIG_T::reuse_factor_rounded * CONFIG_T::block_factor_rounded], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - assert((CONFIG_T::multiplier_limit % CONFIG_T::n_out == 0 || CONFIG_T::reuse_factor >= CONFIG_T::n_in) && - "The current Reuse Factor is not allowed"); - assert((CONFIG_T::multiplier_limit == CONFIG_T::block_factor) && "This function is correct only for RF <= N_IN"); - - hls_register typename CONFIG_T::accum_t acc[CONFIG_T::n_out]; -InitAccum: - #pragma unroll - for (int iacc = 0; iacc < CONFIG_T::n_out; iacc++) { - acc[iacc] = (typename CONFIG_T::accum_t)biases[iacc]; - } -ReuseLoop: - #pragma nofusion - #pragma speculated_iterations 0 - for (int ir = 0; ir < CONFIG_T::reuse_factor; ir++) { - hls_register typename CONFIG_T::accum_t mult[CONFIG_T::block_factor]; - MultLoop: - #pragma unroll - for (int im = 0, in_index = ir; im < CONFIG_T::block_factor; im++) { - uint32 w_index = ir + (CONFIG_T::reuse_factor_rounded)*im; - if (ir + CONFIG_T::reuse_factor * im >= CONFIG_T::n_in * CONFIG_T::n_out) - continue; - // Modified this - mult[im] = - CONFIG_T::template product::product(data[in_index], weights[w_index]); - in_index += CONFIG_T::reuse_factor; - if (in_index >= CONFIG_T::n_in) - in_index = ir; - } - AccumLoop: - #pragma unroll - for (int im = 0, out_index = 0, acc_step = 0; im < CONFIG_T::block_factor; im++) { - acc[out_index] += mult[im]; - if (acc_step + 1 >= CONFIG_T::multiplier_scale) { - acc_step = 0; - out_index++; - } else { - acc_step++; - } - } - } -// Cast to "res_t" type -Result: - #pragma unroll - for (int ires = 0; ires < CONFIG_T::n_out; ires++) { - res[ires] = cast(acc[ires]); - } -} -template -void dense_resource( - data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - const typename CONFIG_T::weight_t weights[CONFIG_T::reuse_factor_rounded * CONFIG_T::block_factor_rounded], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - if (CONFIG_T::reuse_factor <= CONFIG_T::n_in) { - dense_rf_lt(data, res, weights, biases); - } else { - dense_rf_gt(data, res, weights, biases); - } -} -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_dense_compressed.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_dense_compressed.h deleted file mode 100644 index 5619e29..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_dense_compressed.h +++ /dev/null @@ -1,80 +0,0 @@ -#ifndef NNET_COMPRESSED_LAYER_H_ -#define NNET_COMPRESSED_LAYER_H_ - -#include "nnet_common.h" -#include "nnet_dense.h" - -namespace nnet { - -template -void dense_compressed(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - const typename CONFIG_T::weight_t weights[CONFIG_T::n_nonzeros], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - - hls_register typename CONFIG_T::accum_t acc[CONFIG_T::n_out]; - -InitAccum: - #pragma unroll - for (int i = 0; i < CONFIG_T::n_out; i++) { - acc[i] = (typename CONFIG_T::accum_t)(biases[i]); - } - - hls_register int out_index[CONFIG_T::reuse_factor][CONFIG_T::compressed_block_factor]; - hls_register data_T inputs[CONFIG_T::reuse_factor][CONFIG_T::compressed_block_factor]; - - #pragma unroll - for (int ir = 0; ir < CONFIG_T::reuse_factor; ir++) { - #pragma unroll - for (int im = 0; im < CONFIG_T::compressed_block_factor; im++) { - uint32 w = ir + CONFIG_T::reuse_factor * im; - inputs[ir][im] = data[weights[w].row_index]; - out_index[ir][im] = weights[w].col_index; - } - } -ReuseLoop: - #pragma nofusion - #pragma speculated_iterations 0 - for (int ir = 0; ir < CONFIG_T::reuse_factor; ir++) { - hls_register typename CONFIG_T::accum_t mult[CONFIG_T::compressed_block_factor]; - CompressedMultLoop: - #pragma unroll - for (int im = 0; im < CONFIG_T::compressed_block_factor; im++) { - uint32 w = ir + CONFIG_T::reuse_factor * im; - // if (w >= CONFIG_T::reuse_factor*CONFIG_T::compressed_block_factor) continue; - typename CONFIG_T::accum_t prod = mult[im] = - CONFIG_T::template product::product(inputs[0][im], weights[w].weight); - #pragma unroll - for (int is = 0; is < CONFIG_T::reuse_factor - 1; is++) { - inputs[is][im] = inputs[is + 1][im]; - } - } - hls_register typename CONFIG_T::accum_t tmp_acc[CONFIG_T::n_out]; - ResetMult: - #pragma unroll - for (int tacc = 0; tacc < CONFIG_T::n_out; tacc++) { - tmp_acc[tacc] = 0; - } - AccumLoop1: - #pragma unroll - for (int im = 0; im < CONFIG_T::compressed_block_factor; im++) { - int col = out_index[ir][im]; - tmp_acc[col] += mult[im]; - } - AccumLoop2: - #pragma unroll - for (int im = 0; im < CONFIG_T::n_out; im++) { - acc[im] += tmp_acc[im]; - } - } - -// Cast to "res_t" type -ResultLoop: - #pragma unroll - for (unsigned i = 0; i < CONFIG_T::n_out; i++) { - res[i] = cast(acc[i]); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_dense_stream.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_dense_stream.h deleted file mode 100644 index 5a9b385..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_dense_stream.h +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef NNET_DENSE_STREAM_H_ -#define NNET_DENSE_STREAM_H_ - -#include "nnet_common.h" -#include "nnet_dense.h" -#include "nnet_types.h" - -namespace nnet { - -template -void dense_resource(stream &data_stream, stream &res_stream, - const typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - const typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - hls_register typename data_T::value_type data[CONFIG_T::n_in]; - hls_register typename res_T::value_type res[CONFIG_T::n_out]; - -DataPrepare: - #pragma ii 1 - for (int i_in = 0; i_in < CONFIG_T::n_in / data_T::size; i_in++) { - data_T data_pack = data_stream.read(); - DataPack: - #pragma unroll - for (int i_pack = 0; i_pack < data_T::size; i_pack++) { - data[i_in * data_T::size + i_pack] = data_pack[i_pack]; - } - } - - dense_resource(data, res, weights, biases); - -ResWrite: - #pragma ii 1 - for (unsigned i_out = 0; i_out < CONFIG_T::n_out / res_T::size; i_out++) { - res_T res_pack; - ResPack: - #pragma unroll - for (int i_pack = 0; i_pack < res_T::size; i_pack++) { - res_pack[i_pack] = res[i_out * res_T::size + i_pack]; - } - - res_stream.write(res_pack); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_embed.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_embed.h deleted file mode 100644 index 5191239..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_embed.h +++ /dev/null @@ -1,45 +0,0 @@ -#ifndef NNET_EMBED_H_ -#define NNET_EMBED_H_ - -#include "nnet_common.h" -#include "nnet_helpers.h" - -namespace nnet { - -struct embed_config { - // Internal data type definitions - typedef float embeddings_t; - - // (Default layer sizes, overwritten form the backend - static const unsigned n_in = 10; - static const unsigned n_out = 16; - static const unsigned vocab_size = 50; - - // Resource reuse info - static const unsigned io_type = io_parallel; - static const unsigned reuse_factor = 1; -}; - -template -void embedding(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in * CONFIG_T::n_out], - const typename CONFIG_T::embeddings_t embeddings[CONFIG_T::vocab_size * CONFIG_T::n_out]) { - - /* - * Can store embeddings[] in a register, but a large multiiplexer - * is created due to a non-constant access pattern - */ - -InputSequence: - #pragma ii CONFIG_T::reuse_factor - #pragma unroll - for (int j = 0; j < CONFIG_T::n_in; j++) { - DenseEmbedding: - #pragma unroll - for (int i = 0; i < CONFIG_T::n_out; i++) { - res[j * CONFIG_T::n_out + i] = embeddings[data[j].to_uint() * CONFIG_T::n_out + i]; - } - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_embed_stream.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_embed_stream.h deleted file mode 100644 index 51e54e9..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_embed_stream.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef NNET_EMBED_STREAM_H_ -#define NNET_EMBED_STREAM_H_ - -namespace nnet { - -template -void embedding(stream &data, stream &res, - const typename CONFIG_T::embeddings_t embeddings[CONFIG_T::vocab_size * CONFIG_T::n_out]) { - data_T in_data = data.read(); - -InputSequence: - #pragma ii CONFIG_T::reuse_factor - for (int j = 0; j < data_T::size; j++) { - - res_T res_pack; - - DenseEmbedding: - #pragma unroll - for (int i = 0; i < CONFIG_T::n_out; i++) { - res_pack[i] = embeddings[in_data[j] * CONFIG_T::n_out + i]; - } - - res.write(res_pack); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_helpers.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_helpers.h deleted file mode 100644 index 3bd78c7..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_helpers.h +++ /dev/null @@ -1,140 +0,0 @@ -#ifndef NNET_HELPERS_H -#define NNET_HELPERS_H - -#include -#include -#include -#include -#include -#include -#include -#include - -namespace nnet { - -template void convert_data(srcType *src, dstType *dst) { - for (size_t i = 0; i < SIZE; i++) { - dst[i] = dstType(src[i]); - } -} - -template void convert_data_back(srcType *src, dstType *dst) { - for (size_t i = 0; i < SIZE; i++) { - dst[i] = static_cast(src[i].to_double()); - } -} - -template void convert_data(srcType *src, stream_in &dst) { - for (size_t i = 0; i < SIZE / dstType::size; i++) { - dstType ctype; - for (size_t j = 0; j < dstType::size; j++) { - ctype[j] = typename dstType::value_type(src[i * dstType::size + j]); - } - dst.write(ctype); - } -} - -template void convert_data_back(stream_out &src, dstType *dst) { - for (size_t i = 0; i < SIZE / srcType::size; i++) { - srcType ctype = src.read(); - for (size_t j = 0; j < srcType::size; j++) { - dst[i * srcType::size + j] = dstType(ctype[j].to_double()); - } - } -} - -extern bool trace_enabled; -extern std::map *trace_outputs; -extern size_t trace_type_size; - -constexpr int ceillog2(int x) { return (x <= 2) ? 1 : 1 + ceillog2((x + 1) / 2); } - -constexpr int floorlog2(int x) { return (x < 2) ? 0 : 1 + floorlog2(x / 2); } - -constexpr int pow2(int x) { return x == 0 ? 1 : 2 * pow2(x - 1); } - -template void save_output_array(data_T *data, save_T *ptr, size_t layer_size) { - for (int i = 0; i < layer_size; i++) { - ptr[i] = static_cast(data[i].to_double()); - } -} - -template void save_output_array(stream &data, save_T *ptr, size_t layer_size) { - for (size_t i = 0; i < layer_size / data_T::size; i++) { - data_T ctype = data.read(); - for (size_t j = 0; j < data_T::size; j++) { - ptr[i * data_T::size + j] = static_cast(ctype[j].to_double()); - } - data.write(ctype); - } -} - -// We don't want to include save_T in this function because it will be inserted into myproject.cpp -// so a workaround with element size is used -template void save_layer_output(data_T *data, const char *layer_name, size_t layer_size) { - if (!trace_enabled) - return; - - if (trace_outputs) { - if (trace_outputs->count(layer_name) > 0) { - if (trace_type_size == 4) { - save_output_array(data, (float *)(*trace_outputs)[layer_name], layer_size); - } else if (trace_type_size == 8) { - save_output_array(data, (double *)(*trace_outputs)[layer_name], layer_size); - } else { - std::cout << "Unknown trace type!" << std::endl; - } - } else { - std::cout << "Layer name: " << layer_name << " not found in debug storage!" << std::endl; - } - } else { - std::ostringstream filename; - filename << "./tb_data/" << layer_name << "_output.log"; // TODO if run as a shared lib, path should be ../tb_data - std::fstream out; - out.open(filename.str(), std::ios::app); - assert(out.is_open()); - for (int i = 0; i < layer_size; i++) { - out << data[i] << " "; // We don't care about precision in text files - } - out << std::endl; - out.close(); - } -} - -template void save_layer_output(stream &data, const char *layer_name, size_t layer_size) { - if (!trace_enabled) - return; - - if (trace_outputs) { - if (trace_outputs->count(layer_name) > 0) { - if (trace_type_size == 4) { - save_output_array(data, (float *)(*trace_outputs)[layer_name], layer_size); - } else if (trace_type_size == 8) { - save_output_array(data, (double *)(*trace_outputs)[layer_name], layer_size); - } else { - std::cout << "Unknown trace type!" << std::endl; - } - } else { - std::cout << "Layer name: " << layer_name << " not found in debug storage!" << std::endl; - } - } else { - std::ostringstream filename; - filename << "./tb_data/" << layer_name << "_output.log"; // TODO if run as a shared lib, path should be ../tb_data - std::fstream out; - out.open(filename.str(), std::ios::app); - assert(out.is_open()); - for (size_t i = 0; i < layer_size / data_T::size; i++) { - data_T ctype = data.read(); - for (size_t j = 0; j < data_T::size; j++) { - out << ctype[j] << " "; - } - data.write(ctype); - } - out << std::endl; - out.close(); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_merge.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_merge.h deleted file mode 100644 index 766ef2e..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_merge.h +++ /dev/null @@ -1,249 +0,0 @@ -#ifndef NNET_MERGE_H_ -#define NNET_MERGE_H_ - -#include "nnet_mult.h" - -namespace nnet { - -struct merge_config { - static const unsigned n_elem = 10; -}; - -struct dot_config { - static const unsigned n_in = 10; - static const unsigned n_out = 1; - - static const unsigned reuse_factor = 1; - - typedef float accum_t; - - template using product = nnet::product::mult; -}; - -struct concat_config { - static const unsigned n_elem1_0 = 10; - static const unsigned n_elem1_1 = 10; - static const unsigned n_elem1_2 = 10; - static const unsigned n_elem2_0 = 10; - static const unsigned n_elem2_1 = 10; - static const unsigned n_elem2_2 = 10; - - static const unsigned axis = -1; -}; - -template -void add(input1_T data1[CONFIG_T::n_elem], input2_T data2[CONFIG_T::n_elem], res_T res[CONFIG_T::n_elem]) { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_elem; i++) { - res[i] = static_cast(data1[i] + data2[i]); - } -} - -template -void subtract(input1_T data1[CONFIG_T::n_elem], input2_T data2[CONFIG_T::n_elem], res_T res[CONFIG_T::n_elem]) { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_elem; i++) { - res[i] = static_cast(data1[i] - data2[i]); - } -} - -template -void multiply(input1_T data1[CONFIG_T::n_elem], input2_T data2[CONFIG_T::n_elem], res_T res[CONFIG_T::n_elem]) { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_elem; i++) { - res[i] = static_cast(data1[i] * data2[i]); - } -} - -template -void average(input1_T data1[CONFIG_T::n_elem], input2_T data2[CONFIG_T::n_elem], res_T res[CONFIG_T::n_elem]) { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_elem; i++) { - res[i] = static_cast((data1[i] + data2[i]) / (res_T)2); - } -} - -template -void maximum(input1_T data1[CONFIG_T::n_elem], input2_T data2[CONFIG_T::n_elem], res_T res[CONFIG_T::n_elem]) { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_elem; i++) { - res[i] = (data1[i] > data2[i]) ? static_cast(data1[i]) : static_cast(data2[i]); - } -} - -template -void minimum(input1_T data1[CONFIG_T::n_elem], input2_T data2[CONFIG_T::n_elem], res_T res[CONFIG_T::n_elem]) { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_elem; i++) { - res[i] = (data1[i] < data2[i]) ? static_cast(data1[i]) : static_cast(data2[i]); - } -} - -template -void dot1d(input1_T data1[CONFIG_T::n_in], input2_T data2[CONFIG_T::n_in], res_T res[CONFIG_T::n_out]) { - constexpr unsigned multiplier_limit = DIV_ROUNDUP(CONFIG_T::n_in, CONFIG_T::reuse_factor); - - hls_register typename CONFIG_T::accum_t mult[CONFIG_T::n_in]; -Product: - #pragma unroll multiplier_limit - for (int i = 0; i < CONFIG_T::n_in; i++) { - mult[i] = CONFIG_T::template product::product(data1[i], data2[i]); - } - - hls_register typename CONFIG_T::accum_t acc = 0; -Accum: - #pragma unroll - for (int i = 0; i < CONFIG_T::n_in; i++) { - acc += mult[i]; - } - - res[0] = static_cast(acc); -} - -template -void concatenate1d(input1_T data1[CONFIG_T::n_elem1_0], input2_T data2[CONFIG_T::n_elem2_0], - res_T res[CONFIG_T::n_elem1_0 + CONFIG_T::n_elem2_0]) { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_elem1_0; i++) { - res[i] = static_cast(data1[i]); - } - - #pragma unroll - for (int i = 0; i < CONFIG_T::n_elem2_0; i++) { - res[CONFIG_T::n_elem1_0 + i] = static_cast(data2[i]); - } -} - -template -void concatenate2d_0(input1_T data1[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1], - input2_T data2[CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1], - res_T res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 + CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1]) { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1; i++) { - res[i] = static_cast(data1[i]); - } - - #pragma unroll - for (int i = 0; i < CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1; i++) { - res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 + i] = static_cast(data2[i]); - } -} - -template -void concatenate2d_1(input1_T data1[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1], - input2_T data2[CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1], - res_T res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 + CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1]) { - for (int i = 0; i < CONFIG_T::n_elem1_0; i++) { - #pragma unroll - for (int j = 0; j < CONFIG_T::n_elem1_1; j++) { - res[i * (CONFIG_T::n_elem1_1 + CONFIG_T::n_elem2_1) + j] = - static_cast(data1[i * CONFIG_T::n_elem1_1 + j]); - } - - #pragma unroll - for (int j = 0; j < CONFIG_T::n_elem2_1; j++) { - res[i * (CONFIG_T::n_elem1_1 + CONFIG_T::n_elem2_1) + CONFIG_T::n_elem1_1 + j] = - static_cast(data2[i * CONFIG_T::n_elem2_1 + j]); - } - } -} - -template -void concatenate2d(input1_T data1[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1], - input2_T data2[CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1], - res_T res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 + CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1]) { - if (CONFIG_T::axis == 2 || CONFIG_T::axis == -1) { - concatenate2d_1(data1, data2, res); - } else { - concatenate2d_0(data1, data2, res); - } -} - -template -void concatenate3d_0(input1_T data1[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2], - input2_T data2[CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2], - res_T res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2 + - CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2]) { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2; i++) { - res[i] = static_cast(data1[i]); - } - - #pragma unroll - for (int i = 0; i < CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2; i++) { - res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2 + i] = static_cast(data2[i]); - } -} - -template -void concatenate3d_1(input1_T data1[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2], - input2_T data2[CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2], - res_T res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2 + - CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2]) { - for (int i = 0; i < CONFIG_T::n_elem1_0; i++) { - for (int j = 0; j < CONFIG_T::n_elem1_1; j++) { - #pragma unroll - for (int k = 0; k < CONFIG_T::n_elem1_2; k++) { - int res_idx = - i * (CONFIG_T::n_elem1_1 + CONFIG_T::n_elem2_1) * CONFIG_T::n_elem1_2 + j * CONFIG_T::n_elem1_2 + k; - int data_idx = i * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2 + j * CONFIG_T::n_elem1_2 + k; - res[res_idx] = static_cast(data1[data_idx]); - } - } - - for (int j = 0; j < CONFIG_T::n_elem2_1; j++) { - #pragma unroll - for (int k = 0; k < CONFIG_T::n_elem2_2; k++) { - int res_idx = i * (CONFIG_T::n_elem1_1 + CONFIG_T::n_elem2_1) * CONFIG_T::n_elem1_2 + - (j + CONFIG_T::n_elem1_1) * CONFIG_T::n_elem1_2 + k; - int data_idx = i * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2 + j * CONFIG_T::n_elem2_2 + k; - res[res_idx] = static_cast(data2[data_idx]); - } - } - } -} - -template -void concatenate3d_2(input1_T data1[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2], - input2_T data2[CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2], - res_T res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2 + - CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2]) { - for (int i = 0; i < CONFIG_T::n_elem1_0; i++) { - for (int j = 0; j < CONFIG_T::n_elem1_1; j++) { - - #pragma unroll - for (int k = 0; k < CONFIG_T::n_elem1_2; k++) { - int res_idx = i * CONFIG_T::n_elem1_1 * (CONFIG_T::n_elem1_2 + CONFIG_T::n_elem2_2) + - j * (CONFIG_T::n_elem1_2 + CONFIG_T::n_elem2_2) + k; - int data_idx = i * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2 + j * CONFIG_T::n_elem1_2 + k; - res[res_idx] = static_cast(data1[data_idx]); - } - - #pragma unroll - for (int k = 0; k < CONFIG_T::n_elem1_2; k++) { - int res_idx = i * CONFIG_T::n_elem1_1 * (CONFIG_T::n_elem1_2 + CONFIG_T::n_elem2_2) + - j * (CONFIG_T::n_elem1_2 + CONFIG_T::n_elem2_2) + k + CONFIG_T::n_elem1_2; - int data_idx = i * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2 + j * CONFIG_T::n_elem2_2 + k; - res[res_idx] = static_cast(data2[data_idx]); - } - } - } -} - -template -void concatenate3d(input1_T data1[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2], - input2_T data2[CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2], - res_T res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2 + - CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2]) { - if (CONFIG_T::axis == 3 || CONFIG_T::axis == -1) { - concatenate3d_2(data1, data2, res); - } else if (CONFIG_T::axis == 2 || CONFIG_T::axis == -2) { - concatenate3d_1(data1, data2, res); - } else { - concatenate3d_0(data1, data2, res); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_merge_stream.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_merge_stream.h deleted file mode 100644 index 428d323..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_merge_stream.h +++ /dev/null @@ -1,357 +0,0 @@ -#ifndef NNET_MERGE_STREAM_H_ -#define NNET_MERGE_STREAM_H_ - -namespace nnet { - -template -void add(stream &data1, stream &data2, stream &res) { - assert(input1_T::size == input2_T::size && input1_T::size == res_T::size); - -AddLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_elem / input1_T::size; i++) { - hls_register input1_T in_data1 = data1.read(); - hls_register input2_T in_data2 = data2.read(); - - hls_register res_T out_data; - - AddPack: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - out_data[j] = static_cast(in_data1[j] + in_data2[j]); - } - - res.write(out_data); - } -} - -template -void subtract(stream &data1, stream &data2, stream &res) { - assert(input1_T::size == input2_T::size && input1_T::size == res_T::size); - -SubtractLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_elem / input1_T::size; i++) { - hls_register input1_T in_data1 = data1.read(); - hls_register input2_T in_data2 = data2.read(); - - hls_register res_T out_data; - - SubtractPack: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - out_data[j] = static_cast(in_data1[j] - in_data2[j]); - } - - res.write(out_data); - } -} - -template -void multiply(stream &data1, stream &data2, stream &res) { - assert(input1_T::size == input2_T::size && input1_T::size == res_T::size); - -MultLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_elem / input1_T::size; i++) { - hls_register input1_T in_data1 = data1.read(); - hls_register input2_T in_data2 = data2.read(); - - hls_register res_T out_data; - - MultPack: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - out_data[j] = static_cast(in_data1[j] * in_data2[j]); - } - - res.write(out_data); - } -} - -template -void average(stream &data1, stream &data2, stream &res) { - assert(input1_T::size == input2_T::size && input1_T::size == res_T::size); - -AvgLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_elem / input1_T::size; i++) { - hls_register input1_T in_data1 = data1.read(); - hls_register input2_T in_data2 = data2.read(); - - hls_register res_T out_data; - - AvgPack: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - out_data[j] = - static_cast((in_data1[j] + in_data2[j]) / (typename res_T::value_type)2); - } - - res.write(out_data); - } -} - -template -void maximum(stream &data1, stream &data2, stream &res) { - assert(input1_T::size == input2_T::size && input1_T::size == res_T::size); - -MaxLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_elem / input1_T::size; i++) { - hls_register input1_T in_data1 = data1.read(); - hls_register input2_T in_data2 = data2.read(); - - hls_register res_T out_data; - - MaxPack: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - out_data[j] = static_cast(out_data[j] = (in_data1[j] > in_data2[j]) ? in_data1[j] - : in_data2[j]); - } - - res.write(out_data); - } -} - -template -void minimum(stream &data1, stream &data2, stream &res) { - assert(input1_T::size == input2_T::size && input1_T::size == res_T::size); - -MinLoop: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_elem / input1_T::size; i++) { - hls_register input1_T in_data1 = data1.read(); - hls_register input2_T in_data2 = data2.read(); - - hls_register res_T out_data; - - MinPack: - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - out_data[j] = static_cast(out_data[j] = (in_data1[j] < in_data2[j]) ? in_data1[j] - : in_data2[j]); - } - - res.write(out_data); - } -} - -template -void concatenate1d(stream &data1, stream &data2, stream &res) { - hls_register res_T out_data; - -ConcatLoop1: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_elem1_0 / input1_T::size; i++) { - hls_register input1_T in_data1 = data1.read(); - ConcatPack1: - #pragma unroll - for (int j = 0; j < input1_T::size; j++) { - out_data[j + (i * input1_T::size)] = static_cast(in_data1[j]); - } - } - -ConcatLoop2: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_elem2_0 / input2_T::size; i++) { - hls_register input2_T in_data2 = data2.read(); - ConcatPack2: - #pragma unroll - for (int j = 0; j < input2_T::size; j++) { - out_data[j + (i * input2_T::size) + (CONFIG_T::n_elem1_0)] = - static_cast(in_data2[j]); - } - } - res.write(out_data); -} - -template -void concatenate2d_0(stream &data1, stream &data2, stream &res) { -ConcatLoopHeight1: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_elem1_0; i++) { - - hls_register input1_T in_data1 = data1.read(); - hls_register res_T out_data; - - ConcatPackInput1: - #pragma unroll - for (int k = 0; k < input1_T::size; k++) { - out_data[k] = static_cast(in_data1[k]); - } - - res.write(out_data); - } - -ConcatLoopHeight2: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_elem2_0; i++) { - hls_register input2_T in_data2 = data2.read(); - hls_register res_T out_data; - - ConcatPackInput2: - #pragma unroll - for (int k = 0; k < input2_T::size; k++) { - out_data[k] = static_cast(in_data2[k]); - } - - res.write(out_data); - } -} - -template -void concatenate2d_1(stream &data1, stream &data2, stream &res) { -ConcatLoopHeight: - #pragma ii 1 - for (int i = 0; i < CONFIG_T::n_elem1_0; i++) { - hls_register input1_T in_data1 = data1.read(); - hls_register input2_T in_data2 = data2.read(); - hls_register res_T out_data; - - ConcatPackInput1: - #pragma unroll - for (int k = 0; k < input1_T::size; k++) { - out_data[k] = static_cast(in_data1[k]); - } - - ConcatPackInput2: - #pragma unroll - for (int k = 0; k < input2_T::size; k++) { - out_data[input1_T::size + k] = static_cast(in_data2[k]); - } - - res.write(out_data); - } -} - -template -void concatenate2d(stream &data1, stream &data2, stream &res) { - if (CONFIG_T::axis == 2 || CONFIG_T::axis == -1) { - concatenate2d_1(data1, data2, res); - } else { - concatenate2d_0(data1, data2, res); - } -} - -template -void concatenate3d_0(stream &data1, stream &data2, stream &res) { -ConcatLoopHeight1: - for (int i = 0; i < CONFIG_T::n_elem1_0; i++) { - ConcatLoopWidth1: - #pragma ii 1 - for (int j = 0; j < CONFIG_T::n_elem1_1; j++) { - - hls_register input1_T in_data1 = data1.read(); - hls_register res_T out_data; - ConcatPackInput1: - #pragma unroll - for (int k = 0; k < input1_T::size; k++) { - out_data[k] = static_cast(in_data1[k]); - } - - res.write(out_data); - } - } - -ConcatLoopHeight2: - for (int i = 0; i < CONFIG_T::n_elem2_0; i++) { - ConcatLoopWidth2: - #pragma ii 1 - for (int j = 0; j < CONFIG_T::n_elem2_1; j++) { - - hls_register input2_T in_data2 = data2.read(); - hls_register res_T out_data; - - ConcatPackInput2: - #pragma unroll - for (int k = 0; k < input2_T::size; k++) { - out_data[k] = static_cast(in_data2[k]); - } - - res.write(out_data); - } - } -} - -template -void concatenate3d_1(stream &data1, stream &data2, stream &res) { -ConcatLoopHeight: - for (int i = 0; i < CONFIG_T::n_elem1_0; i++) { - ConcatLoopWidth1: - #pragma ii 1 - for (int j = 0; j < CONFIG_T::n_elem1_1; j++) { - - hls_register input1_T in_data1 = data1.read(); - hls_register res_T out_data; - - ConcatPackInput1: - #pragma unroll - for (int k = 0; k < input1_T::size; k++) { - out_data[k] = static_cast(in_data1[k]); - } - - res.write(out_data); - } - ConcatLoopWidth2: - #pragma ii 1 - for (int j = 0; j < CONFIG_T::n_elem2_1; j++) { - - hls_register input2_T in_data2 = data2.read(); - hls_register res_T out_data; - - ConcatPackInput2: - #pragma unroll - for (int k = 0; k < input2_T::size; k++) { - out_data[k] = static_cast(in_data2[k]); - } - - res.write(out_data); - } - } -} - -template -void concatenate3d_2(stream &data1, stream &data2, stream &res) { -ConcatLoopHeight: - for (int i = 0; i < CONFIG_T::n_elem1_0; i++) { - ConcatLoopWidth: - #pragma ii 1 - for (int j = 0; j < CONFIG_T::n_elem1_1; j++) { - - hls_register input1_T in_data1 = data1.read(); - hls_register input2_T in_data2 = data2.read(); - hls_register res_T out_data; - - ConcatPackInput1: - #pragma unroll - for (int k = 0; k < input1_T::size; k++) { - out_data[k] = static_cast(in_data1[k]); - } - - ConcatPackInput2: - #pragma unroll - for (int k = 0; k < input2_T::size; k++) { - out_data[input1_T::size + k] = static_cast(in_data2[k]); - } - - res.write(out_data); - } - } -} - -template -void concatenate3d(stream &data1, stream &data2, stream &res) { - if (CONFIG_T::axis == 3 || CONFIG_T::axis == -1) { - concatenate3d_2(data1, data2, res); - } else if (CONFIG_T::axis == 2 || CONFIG_T::axis == -2) { - concatenate3d_1(data1, data2, res); - } else { - concatenate3d_0(data1, data2, res); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_mult.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_mult.h deleted file mode 100644 index 5be7728..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_mult.h +++ /dev/null @@ -1,113 +0,0 @@ -#ifndef NNET_MULT_H_ -#define NNET_MULT_H_ - -#include "nnet_common.h" -#include "nnet_helpers.h" -#include - -namespace nnet { - -// Different methods to perform the product of input and weight, depending on their types. -namespace product { - -class Product { - public: - static void limit(unsigned multiplier_limit) {} -}; - -template class both_binary : public Product { - public: - inline static x_T product(x_T a, w_T w) { - // specialisation for 1-bit weights and incoming data - return a == w; - } -}; - -template class weight_binary : public Product { - public: - inline static auto product(x_T a, w_T w) -> decltype(-a) { - // Specialisation for 1-bit weights, arbitrary data - if (w == 0) - return -a; - else - return a; - } -}; - -template class data_binary : public Product { - public: - inline static auto product(x_T a, w_T w) -> decltype(-w) { - // Specialisation for 1-bit data, arbitrary weight - if (a == 0) - return -w; - else - return w; - } -}; - -template class weight_ternary : public Product { - public: - inline static auto product(x_T a, w_T w) -> decltype(-a) { - // Specialisation for 2-bit weights, arbitrary data - if (w == 0) - return 0; - else if (w == -1) - return -a; - else - return a; // if(w == 1) - } -}; - -template class mult : public Product { - public: - inline static auto product(x_T a, w_T w) -> decltype(a * w) { - // 'Normal' product - return a * w; - } - static void limit(unsigned multiplier_limit) { - // TODO: Implement for Quartus - // #pragma HLS ALLOCATION instances=mul limit=multiplier_limit operation > Vivado-only, replace with Intel HLS - // pragma - } -}; - -template class weight_exponential : public Product { - public: - using r_T = ac_fixed<2 * (decltype(w_T::weight)::width + x_T::width), (decltype(w_T::weight)::width + x_T::width), true>; - inline static r_T product(x_T a, w_T w) { - // Shift product for exponential weights - // Shift by the exponent. Negative weights shift right - r_T y = static_cast(a) << w.weight; - - // Negate or not depending on weight sign - return w.sign == 1 ? y : static_cast(-y); - } -}; -} // namespace product - -// TO-DO: These may need extra variants if ac_int types are used in more places -template -inline typename std::enable_if>::value && - std::is_same>::value, - ac_int>::type -cast(typename CONFIG_T::accum_t x) { - return static_cast>(((x - CONFIG_T::n_in / 2) * 2).to_ac_int()); -} - -template -inline typename std::enable_if>::value && - !std::is_same>::value, - res_T>::type -cast(typename CONFIG_T::accum_t x) { - return static_cast(x); -} - -template -inline typename std::enable_if<(!std::is_same>::value), res_T>::type -cast(typename CONFIG_T::accum_t x) { - return static_cast(x); -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_padding.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_padding.h deleted file mode 100644 index a95f9ab..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_padding.h +++ /dev/null @@ -1,99 +0,0 @@ -#ifndef NNET_PADDING_H_ -#define NNET_PADDING_H_ - -namespace nnet { - -struct padding1d_config { - static const unsigned in_width = 10; - static const unsigned out_width = 10; - static const unsigned n_chan = 10; - - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; -}; - -template -void zeropad1d_cl(data_T data[CONFIG_T::n_chan * CONFIG_T::in_width], res_T res[CONFIG_T::n_chan * CONFIG_T::out_width]) { - for (int i = 0; i < CONFIG_T::pad_left; i++) { - #pragma unroll - for (int j = 0; j < CONFIG_T::n_chan; j++) { - *(res++) = 0; - } - } - - for (int i = 0; i < CONFIG_T::in_width; i++) { - #pragma unroll - for (int j = 0; j < CONFIG_T::n_chan; j++) { - *(res++) = (res_T) * (data++); - } - } - - for (int i = 0; i < CONFIG_T::pad_right; i++) { - #pragma unroll - for (int j = 0; j < CONFIG_T::n_chan; j++) { - *(res++) = 0; - } - } -} - -struct padding2d_config { - static const unsigned in_height = 10; - static const unsigned in_width = 10; - - static const unsigned out_height = 10; - static const unsigned out_width = 10; - - static const unsigned n_chan = 10; - - static const unsigned pad_top = 0; - static const unsigned pad_bottom = 0; - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; -}; - -template -void zeropad2d_cl(data_T data[CONFIG_T::n_chan * CONFIG_T::in_height * CONFIG_T::in_width], - res_T res[CONFIG_T::n_chan * CONFIG_T::out_height * CONFIG_T::out_width]) { - for (int i = 0; i < CONFIG_T::pad_top; i++) { - for (int j = 0; j < CONFIG_T::out_width; j++) { - #pragma unroll - for (int k = 0; k < CONFIG_T::n_chan; k++) { - *(res++) = 0; - } - } - } - - for (int i = 0; i < CONFIG_T::in_height; i++) { - for (int j = 0; j < CONFIG_T::pad_left; j++) { - #pragma unroll - for (int k = 0; k < CONFIG_T::n_chan; k++) { - *(res++) = 0; - } - } - for (int j = 0; j < CONFIG_T::in_width; j++) { - #pragma unroll - for (int k = 0; k < CONFIG_T::n_chan; k++) { - *(res++) = (res_T) * (data++); - } - } - for (int j = 0; j < CONFIG_T::pad_right; j++) { - #pragma unroll - for (int k = 0; k < CONFIG_T::n_chan; k++) { - *(res++) = 0; - } - } - } - - for (int i = 0; i < CONFIG_T::pad_bottom; i++) { - for (int j = 0; j < CONFIG_T::out_width; j++) { - #pragma unroll - for (int k = 0; k < CONFIG_T::n_chan; k++) { - *(res++) = 0; - } - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_padding_stream.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_padding_stream.h deleted file mode 100644 index 6d40bd2..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_padding_stream.h +++ /dev/null @@ -1,83 +0,0 @@ -#ifndef NNET_PADDING_STREAM_H_ -#define NNET_PADDING_STREAM_H_ - -namespace nnet { - -template inline void fill_zero(stream &res) { - hls_register res_T res_part; - #pragma unroll - for (int i = 0; i < CONFIG_T::n_chan; i++) { - res_part[i] = 0; - } - res.write(res_part); -} - -template inline void fill_data(stream &data, stream &res) { - hls_register data_T data_part = data.read(); - hls_register res_T res_part; - #pragma unroll - for (int i = 0; i < CONFIG_T::n_chan; i++) { - res_part[i] = data_part[i]; - } - res.write(res_part); -} - -template void zeropad1d_cl(stream &data, stream &res) { -PadLeft: - for (int i = 0; i < CONFIG_T::pad_left; i++) { - fill_zero(res); - } - -CopyMain: - for (int i = 0; i < CONFIG_T::in_width; i++) { - fill_data(data, res); - } - -PadRight: - for (int i = 0; i < CONFIG_T::pad_right; i++) { - fill_zero(res); - } -} - -template void zeropad2d_cl(stream &data, stream &res) { -PadTop: - #pragma loop_coalesce 2 - for (int i = 0; i < CONFIG_T::pad_top; i++) { - PadTopWidth: - for (int j = 0; j < CONFIG_T::out_width; j++) { - fill_zero(res); - } - } - -PadMain: - #pragma loop_coalesce 2 - for (int i = 0; i < CONFIG_T::in_height; i++) { - - PadLeft: - for (int j = 0; j < CONFIG_T::pad_left; j++) { - fill_zero(res); - } - - CopyMain: - for (int j = 0; j < CONFIG_T::in_width; j++) { - fill_data(data, res); - } - - PadRight: - for (int j = 0; j < CONFIG_T::pad_right; j++) { - fill_zero(res); - } - } - -PadBottom: - for (int i = 0; i < CONFIG_T::pad_bottom; i++) { - PadBottomWidth: - for (int j = 0; j < CONFIG_T::out_width; j++) { - fill_zero(res); - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_pooling.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_pooling.h deleted file mode 100644 index bbfc090..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_pooling.h +++ /dev/null @@ -1,319 +0,0 @@ -#ifndef NNET_POOLING_H_ -#define NNET_POOLING_H_ - -#include "nnet_common.h" - -namespace nnet { - -// Returns the maximum value from an array of size N -template T max(T x[N]) { - hls_register T y = x[0]; - - // Due to loop dependencies, pipelining & unrolling is not possible - // Explictily disabling pipeline significantly reduces resource usage - #pragma disable_loop_pipelining - for (int i = 1; i < N; i++) { - if (x[i] > y) - y = x[i]; - } - - return y; -} - -// Returns the mean value of an array of size N -template T avg(T (&x)[N]) { - hls_register T y = 0; - - // Due to loop dependencies, pipelining & unrolling is not possible - // Explictily disabling pipeline significantly reduces resource usage - #pragma disable_loop_pipelining - for (int i = 0; i < N; i++) { - y += x[i]; - } - - y /= N; - return y; -} - -// Returns the mean value of an array of size N -// Overload of the above function; using a wider accumulator than the input to avoid overflow -template ac_int avg(ac_int (&x)[N]) { - hls_register ac_int tmp = 0; - - // Due to loop dependencies, pipelining & unrolling is not possible - // Explictily disabling pipeline significantly reduces resource usage - #pragma disable_loop_pipelining - for (int i = 0; i < N; i++) { - tmp += x[i]; - } - - tmp /= N; - - // Cast back to original type - ac_int y = static_cast>(tmp); - return tmp; -} - -// Returns the mean value of an array of size N -// Overload of the above function; using a wider accumulator than the input to avoid overflow -template ac_fixed avg(ac_fixed (&x)[N]) { - hls_register ac_fixed tmp = 0; - - // Due to loop dependencies, pipelining & unrolling is not possible - // Explictily disabling pipeline significantly reduces resource usage - #pragma disable_loop_pipelining - for (int i = 0; i < N; i++) { - tmp += x[i]; - } - - tmp /= N; - - // Cast back to original type - ac_fixed y = tmp; - return y; -} - -// Enumeration for pooling functions -enum Pool_Op { Max, Average }; -template T pool_op(T (&x)[N]) { - switch (op) { - case Max: - return max(x); - case Average: - return avg(x); - } -} - -/* - * In Tensorflow, pooling ignores the value in the padded cells - * For Avg pooling, return 0 (the divisior is modified to the area overlapping the unpadded image.) - * For ax pooling, return the most negative value for the type. - */ -template inline T pad_val() { - switch (op) { - case Max: { - T x = 0; - x[x.width - 1] = 1; - return x; - } - case Average: - return 0; - } -} - -struct pooling1d_config { - // Pooling paramaters - static const unsigned pool_width = 2; - static const unsigned stride_width = 2; - - // I/O sizes - static const unsigned n_in = 10; - static const unsigned n_out = (n_in - pool_width) / stride_width + 1; - static const unsigned n_filt = 4; - - // Padding - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; - static const bool count_pad = false; - - // Pooling function - static const Pool_Op pool_op = Max; -}; - -template -void pooling1d_cl(data_T data[CONFIG_T::n_in * CONFIG_T::n_filt], res_T res[CONFIG_T::n_out * CONFIG_T::n_filt]) { - // For 'same' padding, increase input width by left- and right-side padding - // For 'valid' padding, reduce input width to area covered by pooling function - static constexpr int padded_width = (CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0) - ? (CONFIG_T::n_in / CONFIG_T::stride_width * CONFIG_T::stride_width) - : (CONFIG_T::n_in + CONFIG_T::pad_left + CONFIG_T::pad_right); - -FiltLoop: - #pragma unroll - #pragma disable_loop_pipelining - for (int filt = 0; filt < CONFIG_T::n_filt; filt++) { - InputWidthLoop: - #pragma unroll - #pragma disable_loop_pipelining - for (int inp_col = 0; inp_col < padded_width; inp_col += CONFIG_T::stride_width) { - hls_register data_T pool[CONFIG_T::pool_width]; - - // Keep track of number of pixels in image vs padding region; needed for rescaling Average Pooling - hls_register unsigned img_overlap = 0; - - PoolWidthLoop: - #pragma unroll - #pragma disable_loop_pipelining - for (int pool_col = 0; pool_col < CONFIG_T::stride_width; pool_col++) { - if (inp_col + pool_col < CONFIG_T::pad_left || inp_col + pool_col >= (padded_width - CONFIG_T::pad_right)) { - // Add padding - pool[pool_col] = pad_val(); - if (CONFIG_T::count_pad) - img_overlap++; - } else { - // Current element is from input image - pool[pool_col] = data[(inp_col + pool_col - CONFIG_T::pad_left) * CONFIG_T::n_filt + filt]; - img_overlap++; - } - } - - // Pooling operation - res[(inp_col / CONFIG_T::stride_width) * CONFIG_T::n_filt + filt] = - static_cast(pool_op(pool)); - - // If the pool op is Average, the zero-padding needs to be removed from the results - if (CONFIG_T::pool_op == Average) - res[(inp_col / CONFIG_T::stride_width) * CONFIG_T::n_filt + filt] *= - (static_cast(CONFIG_T::pool_width) / img_overlap); - } - } -} - -template -void global_pooling1d_cl(data_T data[CONFIG_T::n_in * CONFIG_T::n_filt], res_T res[CONFIG_T::n_filt]) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pool_width == CONFIG_T::stride_width); - -FiltLoop: - #pragma unroll - #pragma disable_loop_pipelining - for (int filt = 0; filt < CONFIG_T::n_filt; filt++) { - hls_register data_T pool[CONFIG_T::n_in]; - - InputWidthLoop: - #pragma unroll - #pragma disable_loop_pipelining - for (int col = 0; col < CONFIG_T::n_in; col++) { - pool[col] = data[col * CONFIG_T::n_filt + filt]; - } - - res[filt] = static_cast(pool_op(pool)); - } -} - -struct pooling2d_config { - // Pooling parameters - static const unsigned stride_height = 2; - static const unsigned stride_width = 2; - static const unsigned pool_height = 2; - static const unsigned pool_width = 2; - - // I/O sizes - static const unsigned in_height = 10; - static const unsigned in_width = 10; - static const unsigned n_filt = 4; - - static const unsigned out_height = (in_height - pool_height) / stride_height + 1; - static const unsigned out_width = (in_width - pool_width) / stride_width + 1; - - // Padding - static const unsigned pad_top = 0; - static const unsigned pad_bottom = 0; - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; - static const bool count_pad = false; - - // Pooling function - static const Pool_Op pool_op = Max; -}; - -template -void pooling2d_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_filt], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt]) { - // For 'same' padding, increase input width by left- and right-side padding - // For 'valid' padding, reduce input width to area covered by pooling function - static constexpr int padded_width = (CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0) - ? (CONFIG_T::in_width / CONFIG_T::stride_width * CONFIG_T::stride_width) - : (CONFIG_T::in_width + CONFIG_T::pad_left + CONFIG_T::pad_right); - static constexpr int padded_height = (CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0) - ? (CONFIG_T::in_height / CONFIG_T::stride_height * CONFIG_T::stride_height) - : (CONFIG_T::in_height + CONFIG_T::pad_top + CONFIG_T::pad_bottom); - -FiltLoop: - #pragma unroll - #pragma disable_loop_pipelining - for (int filt = 0; filt < CONFIG_T::n_filt; filt++) { - InputHeightLoop: - #pragma unroll - #pragma disable_loop_pipelining - for (int inp_col = 0; inp_col < padded_height; inp_col += CONFIG_T::stride_height) { - InputWidthLoop: - #pragma unroll - #pragma disable_loop_pipelining - for (int inp_width = 0; inp_width < padded_width; inp_width += CONFIG_T::stride_width) { - hls_register data_T pool[CONFIG_T::pool_height * CONFIG_T::pool_width]; - - // Keep track of number of pixels in image vs padding region; needed for rescaling Average Pooling - hls_register unsigned img_overlap = 0; - - PoolHeightLoop: - #pragma unroll - #pragma disable_loop_pipelining - for (int pool_col = 0; pool_col < CONFIG_T::stride_height; pool_col++) { - PoolWidthLoop: - #pragma unroll - #pragma disable_loop_pipelining - for (int pool_row = 0; pool_row < CONFIG_T::stride_width; pool_row++) { - if (inp_col + pool_col < CONFIG_T::pad_top || - inp_col + pool_col >= (padded_height - CONFIG_T::pad_bottom) || - inp_width + pool_row < CONFIG_T::pad_left || - inp_width + pool_row >= (padded_width - CONFIG_T::pad_right)) { - // Add padding - pool[pool_col * CONFIG_T::stride_width + pool_row] = pad_val(); - if (CONFIG_T::count_pad) - img_overlap++; - } else { - // Current element is from input image - pool[pool_col * CONFIG_T::stride_width + pool_row] = - data[(inp_col + pool_col - CONFIG_T::pad_top) * CONFIG_T::in_width * CONFIG_T::n_filt + - (inp_width + pool_row - CONFIG_T::pad_left) * CONFIG_T::n_filt + filt]; - img_overlap++; - } - } - } - - // Pooling operation - res[(inp_col / CONFIG_T::stride_height) * CONFIG_T::out_width * CONFIG_T::n_filt + - (inp_width / CONFIG_T::stride_width) * CONFIG_T::n_filt + filt] = - static_cast( - pool_op(pool)); - - // If the pool op is Average, the zero-padding needs to be removed from the results - if (CONFIG_T::pool_op == Average) - res[(inp_col / CONFIG_T::stride_height) * CONFIG_T::out_width * CONFIG_T::n_filt + - (inp_width / CONFIG_T::stride_width) * CONFIG_T::n_filt + filt] *= - (static_cast(CONFIG_T::pool_height) * static_cast(CONFIG_T::pool_width) / - img_overlap); - } - } - } -} - -template -void global_pooling2d_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_filt], - res_T res[CONFIG_T::n_filt]) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0); - assert(CONFIG_T::pool_width == CONFIG_T::stride_width); - assert(CONFIG_T::pool_height == CONFIG_T::stride_height); - -FiltLoop: - #pragma unroll - #pragma disable_loop_pipelining - for (int filt = 0; filt < CONFIG_T::n_filt; filt++) { - hls_register data_T pool[CONFIG_T::in_height * CONFIG_T::in_width]; - - InputLoop: - #pragma unroll - #pragma disable_loop_pipelining - for (int i = 0; i < CONFIG_T::in_height * CONFIG_T::in_width; i++) { - pool[i] = data[i * CONFIG_T::n_filt + filt]; - } - - res[filt] = static_cast(pool_op(pool)); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_pooling_stream.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_pooling_stream.h deleted file mode 100644 index 2560072..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_pooling_stream.h +++ /dev/null @@ -1,316 +0,0 @@ -#ifndef NNET_POOLING_STREAM_H_ -#define NNET_POOLING_STREAM_H_ - -#include "nnet_conv1d_stream.h" -#include "nnet_conv2d_stream.h" -#include "nnet_pooling.h" -#include "nnet_types.h" - -namespace nnet { - -/* - * void compute_pool_buffer_1d(in_element, res_stream, line_buffer, kernel_window) - * - * Args: - * in_element - current elements from input image, data_T type is usually nnet::array, size of array corresponds to number - * of channels res_stream - output stream, passed by reference to allow direct writing line_buffer - chained array of shift - * registers, one for each row of the pool and channel kernel_window - array of values from the input curently being pooled - * - * Function executes 4 steps: - * (1) Shift line buffer - updates the contents of the chained shift registers, inserting the new inputs and removing last - * elements (2) Kernel shift - updates the elements of the kernel window, by storing the new inputs and popped elements from - * the line buffer (3) Pooling - performs dense matrix multiplication between the current input window and kernel weights (4) - * Counter housekeeping - performs the required pooling operation - * - */ -template -void compute_pool_buffer_1d(const data_T &in_elem, stream &res_stream, - nnet::shift_reg line_buffer[CONFIG_T::n_filt], - typename data_T::value_type kernel_window[CONFIG_T::pool_width * CONFIG_T::n_filt]) { - // Thresholds - static constexpr int lShiftX = CONFIG_T::pool_width - 1; - - // X position pixels - static int pX = 0; - - // X strides - static int sX = 0; - - // Step 1 - Shift line buffer - hls_register typename data_T::value_type shift_buffer[CONFIG_T::n_filt]; - nnet::shift_line_buffer_1d(in_elem, line_buffer, shift_buffer); - - // Step 2 - Kernel shift - nnet::kernel_shift_1d(shift_buffer, kernel_window); - - // Check to see if we have a full pool window - if ((sX - lShiftX) == 0 && pX > (lShiftX - 1)) { - hls_register res_T res_pack; - - FiltLoop: - #pragma unroll - for (int filter = 0; filter < CONFIG_T::n_filt; filter++) { - hls_register typename data_T::value_type pool_window[CONFIG_T::pool_width]; - - // Retrieve data for current channel - PoolLoop: - #pragma unroll - for (int i = 0; i < CONFIG_T::pool_width; i++) { - pool_window[i] = kernel_window[i * CONFIG_T::n_filt + filter]; - } - - // Step 3 - Pooling - res_pack[filter] = static_cast( - pool_op(pool_window)); - } - - // Write result to output stream - res_stream.write(res_pack); - } - - // Reached end of image - if ((pX + 1) == (CONFIG_T::in_width + CONFIG_T::pad_left + CONFIG_T::pad_right)) { - pX = 0; - sX = 0; - // Move to the right - } else { - pX++; - sX = ((sX - lShiftX) == 0) ? (sX - CONFIG_T::stride_width + 1) : (sX + 1); - } -} - -template void pooling1d_cl(stream &data, stream &res) { - assert(CONFIG_T::pool_width == CONFIG_T::stride_width); - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - - // Line buffer and kernel window - hls_register static nnet::shift_reg line_buffer[CONFIG_T::n_filt]; - hls_register static typename data_T::value_type kernel_window[CONFIG_T::pool_width * CONFIG_T::n_filt]; - -// Read input image -ReadInputWidth: - for (int col = 0; col < CONFIG_T::in_width; col++) { - compute_pool_buffer_1d(data.read(), res, line_buffer, kernel_window); - } -} - -/* - * void compute_pool_buffer_2d(in_element, res_stream, line_buffer, kernel_window) - * - * Args: - * in_element - current elements from input image, data_T type is usually nnet::array, size of array corresponds to number - * of channels res_stream - output stream, passed by reference to allow direct writing line_buffer - chained array of shift - * registers, one for each row of the pool and channel kernel_window - array of values from the input curently being pooled - * - * Function executes 4 steps: - * (1) Shift line buffer - updates the contents of the chained shift registers, inserting the new inputs and removing last - * elements (2) Kernel shift - updates the elements of the kernel window, by storing the new inputs and popped elements from - * the line buffer (3) Pooling - performs dense matrix multiplication between the current input window and kernel weights (4) - * Counter housekeeping - performs the required pooling operation - * - */ -template -void compute_pool_buffer_2d( - const data_T &in_elem, stream &res_stream, - nnet::shift_reg line_buffer[CONFIG_T::pool_height - 1] - [CONFIG_T::n_filt], - typename data_T::value_type kernel_window[CONFIG_T::pool_height * CONFIG_T::pool_width * CONFIG_T::n_filt]) { - // Thresholds - static constexpr int lShiftX = CONFIG_T::pool_width - 1; - static constexpr int lShiftY = CONFIG_T::pool_height - 1; - - // X, Y position pixels - static int pX = 0; - static int pY = 0; - - // X, Y strides - static int sX = 0; - static int sY = 0; - - // Step 1 - Shift line buffer - hls_register typename data_T::value_type shift_buffer[CONFIG_T::pool_height][CONFIG_T::n_filt]; - nnet::shift_line_buffer_2d(in_elem, line_buffer, shift_buffer); - - // Step 2 - Kernel shift - nnet::kernel_shift_2d(shift_buffer, kernel_window); - - // Check to see if we have a full pool window - if ((sX - lShiftX) == 0 && (sY - lShiftY) == 0 && pY > (lShiftY - 1) && pX > (lShiftX - 1)) { - hls_register res_T res_pack; - - FiltLoop: - #pragma unroll - for (int filter = 0; filter < CONFIG_T::n_filt; filter++) { - hls_register typename data_T::value_type pool_window[CONFIG_T::pool_height * CONFIG_T::pool_width]; - - // Retrieve data for current channel - PoolLoop: - #pragma unroll - for (int i = 0; i < CONFIG_T::pool_height * CONFIG_T::pool_width; i++) { - pool_window[i] = kernel_window[i * CONFIG_T::n_filt + filter]; - } - - // Step 3 - Pooling - res_pack[filter] = static_cast( - pool_op( - pool_window)); - } - - // Write result to output stream - res_stream.write(res_pack); - } - - // Reached end of image - if ((pX + 1) == (CONFIG_T::in_width + CONFIG_T::pad_left + CONFIG_T::pad_right) && - (pY + 1) == (CONFIG_T::in_height + CONFIG_T::pad_top + CONFIG_T::pad_bottom)) { - pX = 0; - sX = 0; - pY = 0; - sY = 0; - // Reached end of row - } else if ((pX + 1) == (CONFIG_T::in_width + CONFIG_T::pad_left + CONFIG_T::pad_right)) { - pX = 0; - sX = 0; - pY++; - sY = ((sY - lShiftY) == 0) ? (sY - CONFIG_T::stride_height + 1) : (sY + 1); - // Same row, same colum, therefore, move to the right - } else { - pX++; - sX = ((sX - lShiftX) == 0) ? (sX - CONFIG_T::stride_width + 1) : (sX + 1); - } -} - -template void pooling2d_cl(stream &data, stream &res) { - assert(CONFIG_T::pool_height == CONFIG_T::stride_height && CONFIG_T::pool_width == CONFIG_T::stride_width); - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0); - - // Line buffer and kernel window - hls_register static nnet::shift_reg - line_buffer[MAX(CONFIG_T::pool_height - 1, 1)][CONFIG_T::n_filt]; - hls_register static - typename data_T::value_type kernel_window[CONFIG_T::pool_height * CONFIG_T::pool_width * CONFIG_T::n_filt]; - -ReadInputHeight: - #pragma loop_coalesce 2 - for (int row = 0; row < CONFIG_T::in_height; row++) { - // Read input image - ReadInputWidth: - for (int col = 0; col < CONFIG_T::in_width; col++) { - compute_pool_buffer_2d(data.read(), res, line_buffer, kernel_window); - } - } -} - -/* - * A function used with Global Pooling - * Returns the value before pooling - * Max : Return the minimal possible value - * Avg : Return 0 - */ -template inline T init_pool_value() { - switch (op) { - case Max: { - T x = 0; - x[x.width - 1] = 1; - return x; - } - case Average: - return 0; - } -} - -/* - * A function used with Global Pooling - * Updates the output pooling value - * Max : Return the maximum between the previous maximum and current input - * Avg : Returns the cumulative sum - */ -template inline T_y reduce_global_pool(T_y y, T_x x) { - if (op == Max) { - return (x > y) ? (T_y)x : y; - } else { - return (T_y)(x + y); - } -} - -/* - * A function used with Global Pooling - * For every filter, it updates the value by summing the current input (Average) or updating the maximum value (Max) - */ -template -void compute_global_pool(const data_T &in_elem, typename CONFIG_T::accum_t data_input[CONFIG_T::n_filt]) { - #pragma unroll - for (unsigned i = 0; i < CONFIG_T::n_filt; i++) { - data_input[i] = reduce_global_pool( - data_input[i], in_elem[i]); - } -} - -template void global_pooling1d_cl(stream &data, stream &res) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - - hls_register typename CONFIG_T::accum_t data_input[CONFIG_T::n_filt]; - - #pragma unroll - for (int i = 0; i < CONFIG_T::n_filt; i++) { - data_input[i] = init_pool_value(); - } - - for (int i = 0; i < CONFIG_T::n_in; i++) { - compute_global_pool(data.read(), data_input); - } - - hls_register res_T res_pack; - if (CONFIG_T::pool_op == Average) { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_filt; i++) { - res_pack[i] = static_cast(data_input[i] / CONFIG_T::n_in); - } - } else { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_filt; i++) { - res_pack[i] = static_cast(data_input[i]); - } - } - - res.write(res_pack); -} - -template void global_pooling2d_cl(stream &data, stream &res) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0); - - hls_register typename CONFIG_T::accum_t data_input[CONFIG_T::n_filt]; - - #pragma unroll - for (int i = 0; i < CONFIG_T::n_filt; i++) { - data_input[i] = init_pool_value(); - } - - for (int i = 0; i < CONFIG_T::in_height; i++) { - for (int j = 0; j < CONFIG_T::in_width; j++) { - compute_global_pool(data.read(), data_input); - } - } - - hls_register res_T res_pack; - if (CONFIG_T::pool_op == Average) { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_filt; i++) { - res_pack[i] = - static_cast(data_input[i] / (CONFIG_T::in_width * CONFIG_T::in_height)); - } - } else { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_filt; i++) { - res_pack[i] = static_cast(data_input[i]); - } - } - - res.write(res_pack); -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_recurrent.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_recurrent.h deleted file mode 100644 index 464c6d4..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_recurrent.h +++ /dev/null @@ -1,583 +0,0 @@ -#ifndef NNET_RECURRENT_H_ -#define NNET_RECURRENT_H_ - -#include "nnet_common.h" -#include "nnet_dense.h" -#include "nnet_recurrent_activation.h" - -namespace nnet { - -//---------------------- -// Utils -//---------------------- - -template -void multiply_W(data_T input[N_IN], res_T out[N_OUT], const weight_t weight[N_IN * N_OUT]) { -MULTIPLY_W_LOOP_I: - #pragma unroll - for (int i = 0; i < N_OUT; i++) { - out[i] = 0; - - MULTIPLY_W_LOOP_J: - #pragma unroll - for (int j = 0; j < N_IN; j++) { - out[i] += input[j] * weight[i * N_IN + j]; - } - } -} - -template -void multiply_U(data_T input[N_OUT], res_T out[N_OUT], const weight_t weight[N_OUT * N_OUT]) { -MULTIPLY_U_LOOP_I: - #pragma unroll - for (int i = 0; i < N_OUT; i++) { - out[i] = 0; - - MULTIPLY_U_LOOP_J: - #pragma unroll - for (int j = 0; j < N_OUT; j++) { - out[i] += input[j] * weight[i * N_OUT + j]; - } - } -} - -template -void add_bias(data_T inputs[N], res_T out[N], const bias_t bias[N]) { -ADD_BIAS_LOOP: - #pragma unroll - for (int i = 0; i < N; i++) { - out[i] = inputs[i] + bias[i]; - } -} - -template void multiply_vectors(data_T in1[N], data_T in2[N], res_T out[N]) { -MULTIPLY_VECT_LOOP: - #pragma unroll - for (int i = 0; i < N; i++) { - out[i] = in1[i] * in2[i]; - } -} - -template void add_vectors(data_T in1[N], data_T in2[N], res_T out[N]) { -ADD_VECTOR_LOOP: - #pragma unroll - for (int i = 0; i < N; i++) { - out[i] = in1[i] + in2[i]; - } -} - -//---------------------- -// GRU -//---------------------- - -struct gru_config { - // Internal data type definitions - typedef float weight_t; - typedef float bias_t; - typedef float accum_t; - - // Layer Sizes - static const unsigned n_in = 1; - static const unsigned n_out = 1; - static const unsigned n_units = 1; - static const unsigned n_timesteps = 1; - static const unsigned n_outputs = 1; - static const bool return_sequences = false; - - // Resource reuse info - static const unsigned io_type = io_parallel; - static const unsigned reuse_factor = 1; - static const bool store_weights_in_bram = false; - - // Activation - template using activation_recr = nnet::activation::relu; - - template using activation = nnet::activation::relu; -}; - -template -void gru_cell(data_T x[CONFIG_T::n_in], res_T h[CONFIG_T::n_units], - const typename CONFIG_T::weight_t weights[3 * CONFIG_T::n_units * CONFIG_T::n_in], - const typename CONFIG_T::weight_t recurrent_weights[3 * CONFIG_T::n_units * CONFIG_T::n_units], - const typename CONFIG_T::bias_t bias[3 * CONFIG_T::n_units], - const typename CONFIG_T::bias_t recurrent_bias[3 * CONFIG_T::n_units]) { - static constexpr int recurrent_unroll_factor = CONFIG_T::n_units / CONFIG_T::reuse_factor; - // A matrix containing the values of matrix product between input (x) and weights (weights), for update, reset and - // candidate state gates, for each of the units - hls_register typename CONFIG_T::accum_t mat_mul_x_w[3 * CONFIG_T::n_units]; - nnet::dense_resource(x, mat_mul_x_w, weights, - bias); - - // A matrix containing the values of matrix product between previou state (h) and recurrent weights (recurrent_weights), - // for update, reset and candidate state gates, for each of the units - hls_register typename CONFIG_T::accum_t mat_mul_h_wr[3 * CONFIG_T::n_units]; - nnet::dense_resource( - h, mat_mul_h_wr, recurrent_weights, recurrent_bias); - - // A vector containing both the values of z(t) and r(t) for every state - hls_register typename CONFIG_T::accum_t z_r[2 * CONFIG_T::n_units]; - - // Add the individual vectors from the multiplication of mat_mul_x_w = Wx*x(t) and mat_mul_h_wr = Wh*h(t-1) - // Unrolled fully, no DSPs used - #pragma unroll - for (int i = 0; i < (2 * CONFIG_T::n_units); i++) { - z_r[i] = mat_mul_x_w[i] + mat_mul_h_wr[i]; - } - - // Activation on z(t) and r(t) - hls_register typename CONFIG_T::accum_t z_r_act[2 * CONFIG_T::n_units]; - CONFIG_T::template activation_recr::activation(z_r, z_r_act); - - // A matrix containing the values of Hadamard product between r(t) = z_r_act[n_units:2*n_units] and h(t-1) = h - hls_register typename CONFIG_T::accum_t hadamard_r_h[CONFIG_T::n_units]; - #pragma unroll recurrent_unroll_factor - for (int i = 0; i < (CONFIG_T::n_units); i++) { - hadamard_r_h[i] = z_r_act[i + CONFIG_T::n_units] * mat_mul_h_wr[i + 2 * CONFIG_T::n_units]; - } - - // The candidate state; X * W_{hx} + hadmard(r(t), h_(t-1)) * W_{hh} + b_{h} - typename CONFIG_T::accum_t h_cand[CONFIG_T::n_units]; - // Addition - can unroll fully; no DSPs used here - #pragma unroll - for (int i = 0; i < (CONFIG_T::n_units); i++) { - h_cand[i] = mat_mul_x_w[i + 2 * CONFIG_T::n_units] + hadamard_r_h[i]; - } - - // Activation on candidate state - hls_register typename CONFIG_T::accum_t h_cand_act[CONFIG_T::n_units]; - CONFIG_T::template activation::activation(h_cand, h_cand_act); - - // Update state - #pragma unroll recurrent_unroll_factor - for (int i = 0; i < (CONFIG_T::n_units); i++) { - h[i] = static_cast(h_cand_act[i] * (1 - z_r_act[i]) + h[i] * z_r_act[i]); - } -} - -template -void gru(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_outputs * CONFIG_T::n_units], - const typename CONFIG_T::weight_t weights[3 * CONFIG_T::n_units * CONFIG_T::n_in], - const typename CONFIG_T::weight_t recurrent_weights[3 * CONFIG_T::n_units * CONFIG_T::n_units], - const typename CONFIG_T::bias_t bias[3 * CONFIG_T::n_units], - const typename CONFIG_T::bias_t recurrent_bias[3 * CONFIG_T::n_units]) { - - hls_register data_T x[CONFIG_T::n_in]; - hls_register res_T h[CONFIG_T::n_units]; - - #pragma unroll - for (int i = 0; i < CONFIG_T::n_units; i++) { - h[i] = 0; - } - - // Loop depedency - cannot pipeline - #pragma disable_loop_pipelining - for (int t = 0; t < CONFIG_T::n_timesteps; t++) { - // Get data at current time step - #pragma unroll - for (int j = 0; j < CONFIG_T::n_in; j++) { - x[j] = data[j + t * CONFIG_T::n_in]; - } - - nnet::gru_cell(x, h, weights, recurrent_weights, bias, recurrent_bias); - - if (CONFIG_T::return_sequences) { - #pragma unroll - for (int i = 0; i < CONFIG_T::n_units; i++) { - res[CONFIG_T::n_units * t + i] = h[i]; - } - } - } - - if (!CONFIG_T::return_sequences) { - #pragma unroll - for (int i = 0; i < (CONFIG_T::n_units); i++) { - res[i] = h[i]; - } - } -} - -//---------------------- -// SimpleRNN -//---------------------- - -struct simpleRNN_config { - // Internal data type definitions - typedef float weight_t; - typedef float bias_t; - typedef float accum_t; - - // Layer Sizes - static const unsigned n_in = 1; - static const unsigned n_out = 1; - static const unsigned n_outputs = 1; - static const unsigned n_timesteps = 1; - static const bool return_sequences = false; - - // Resource reuse info - static const unsigned io_type = io_parallel; - static const unsigned reuse_factor = 1; - static const bool store_weights_in_bram = false; - - // Activation - template using activation_recr = nnet::activation::relu; - - template using activation = nnet::activation::relu; -}; - -template -void simple_rnn_cell(data_T inputs[CONFIG_T::n_in], res_T hidden_state[CONFIG_T::n_out], - res_T hidden_state_o[CONFIG_T::n_out], - const typename CONFIG_T::weight_t kernel[CONFIG_T::n_in * CONFIG_T::n_out], - const typename CONFIG_T::weight_t rec_kernel[CONFIG_T::n_out * CONFIG_T::n_out], - const typename CONFIG_T::bias_t bias[CONFIG_T::n_out]) { - // Weight multiplication - typename CONFIG_T::accum_t afterW[CONFIG_T::n_out] hls_register; - multiply_W( - inputs, afterW, kernel); - - // Bias addition - typename CONFIG_T::accum_t afterBias[CONFIG_T::n_out] hls_register; - add_bias( - afterW, afterBias, bias); - - // Hidden state - typename CONFIG_T::accum_t hiddenCand[CONFIG_T::n_out] hls_register; - multiply_U(hidden_state, hiddenCand, - rec_kernel); - - // Vector addition - typename CONFIG_T::accum_t afterAdd[CONFIG_T::n_out]; - add_vectors(afterBias, hiddenCand, afterAdd); - - // Activation - CONFIG_T::template activation::activation( - afterAdd, hidden_state_o); -} - -template -void simple_rnn(data_T data[CONFIG_T::n_timesteps * CONFIG_T::n_in], res_T res[CONFIG_T::n_outputs * CONFIG_T::n_out], - const typename CONFIG_T::weight_t kernel[CONFIG_T::n_in * CONFIG_T::n_out], - const typename CONFIG_T::weight_t rec_kernel[CONFIG_T::n_out * CONFIG_T::n_out], - const typename CONFIG_T::bias_t bias[CONFIG_T::n_out]) { - res_T hidden_state[CONFIG_T::n_out][CONFIG_T::n_timesteps + 1] hls_register; - res_T hidden_state_temp[CONFIG_T::n_out] hls_register; - res_T h[CONFIG_T::n_out] hls_register; - data_T in[CONFIG_T::n_in] hls_register; - -// Set initially hidden state (output) to zero -INIT_LOOP: - #pragma unroll - for (int x = 0; x < CONFIG_T::n_out; x++) { - hidden_state[x][0] = 0; - } - - #pragma disable_loop_pipelining - for (int i = 0; i < CONFIG_T::n_timesteps; i++) { - - // Data at current time step - #pragma unroll - for (int x = 0; x < CONFIG_T::n_in; x++) { - in[x] = data[x + i * CONFIG_T::n_in]; - } - - // Hidden state at current time step - #pragma unroll - for (int x = 0; x < CONFIG_T::n_out; x++) { - hidden_state_temp[x] = hidden_state[x][i]; - } - - // Do SimpleRNN - simple_rnn_cell(in, hidden_state_temp, h, kernel, rec_kernel, bias); - - // Write result - #pragma unroll - for (int x = 0; x < CONFIG_T::n_out; x++) { - hidden_state[x][i + 1] = h[x]; - } - } - - if (CONFIG_T::return_sequences == 0) { - // Output when return_sequences is false - #pragma unroll - for (int x = 0; x < CONFIG_T::n_out; x++) { - res[x] = hidden_state[x][CONFIG_T::n_timesteps]; - } - } else { - // Output when return_sequences is true - #pragma unroll - for (int x = 0; x < CONFIG_T::n_timesteps; x++) { - #pragma unroll - for (int h = 0; h < CONFIG_T::n_out; h++) { - res[x * CONFIG_T::n_out + h] = hidden_state[h][x + 1]; - } - } - } -} - -//---------------------- -// LSTM -//---------------------- - -struct lstm_config { - // Internal data type definitions - typedef float weight_t; - typedef float bias_t; - typedef float accum_t; - - // Layer Sizes - static const unsigned n_in = 1; - static const unsigned n_out = 1; - static const unsigned n_outputs = 1; - - static const unsigned n_timesteps = 1; - static const bool return_sequences = false; - - // Resource reuse info - static const unsigned io_type = io_parallel; - static const unsigned reuse_factor = 1; - static const bool store_weights_in_bram = false; - - // Activation - template using activation_recr = nnet::activation::relu; - - template using activation = nnet::activation::relu; -}; - -template -void lstm_cell(data_T inputs[CONFIG_T::n_in], res_T hidden_state[CONFIG_T::n_out], res_T hidden_state_o[CONFIG_T::n_out], - res_T cell_state[CONFIG_T::n_out], res_T cell_state_o[CONFIG_T::n_out], - const typename CONFIG_T::weight_t WI[CONFIG_T::n_in * CONFIG_T::n_out], - const typename CONFIG_T::weight_t WF[CONFIG_T::n_in * CONFIG_T::n_out], - const typename CONFIG_T::weight_t WC[CONFIG_T::n_in * CONFIG_T::n_out], - const typename CONFIG_T::weight_t WO[CONFIG_T::n_in * CONFIG_T::n_out], - const typename CONFIG_T::weight_t RWI[CONFIG_T::n_out * CONFIG_T::n_out], - const typename CONFIG_T::weight_t RWF[CONFIG_T::n_out * CONFIG_T::n_out], - const typename CONFIG_T::weight_t RWC[CONFIG_T::n_out * CONFIG_T::n_out], - const typename CONFIG_T::weight_t RWO[CONFIG_T::n_out * CONFIG_T::n_out], - const typename CONFIG_T::bias_t BI[CONFIG_T::n_out], const typename CONFIG_T::bias_t BF[CONFIG_T::n_out], - const typename CONFIG_T::bias_t BC[CONFIG_T::n_out], const typename CONFIG_T::bias_t BO[CONFIG_T::n_out]) { - - // Internals definitions - typename CONFIG_T::accum_t i_afterW[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t i_afterBias[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t c_afterW[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t c_afterBias[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t o_afterW[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t o_afterBias[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t f_afterW[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t f_afterBias[CONFIG_T::n_out] hls_register; - - // Hidden state Gate candidates, intermediate variables - typename CONFIG_T::accum_t i_hiddenCand[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t f_hiddenCand[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t c_hiddenCand[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t o_hiddenCand[CONFIG_T::n_out] hls_register; - - // After addition, intermediate variables - typename CONFIG_T::accum_t i_afterAdd[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t f_afterAdd[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t c_afterAdd[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t o_afterAdd[CONFIG_T::n_out] hls_register; - - // Gate outputs - typename CONFIG_T::accum_t gate_i[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t gate_f[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t gate_c[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t gate_o[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t gate_ic[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t gate_forget[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t h[CONFIG_T::n_out] hls_register; - - // Intermediate variable cell calculation - typename CONFIG_T::accum_t cell_act_multp[CONFIG_T::n_out] hls_register; - typename CONFIG_T::accum_t cell_act_add[CONFIG_T::n_out] hls_register; - - //-----------Gate I Calculations - // Weight multiplication - multiply_W( - inputs, i_afterW, WI); - - // Bias addition - add_bias( - i_afterW, i_afterBias, BI); - - // Hidden Candidate - multiply_U(hidden_state, i_hiddenCand, - RWI); - - // Vector addition - add_vectors(i_afterBias, i_hiddenCand, - i_afterAdd); - - // Activation - CONFIG_T::template activation_recr::activation(i_afterAdd, gate_i); - - //-----------Gate F Calculations - // Weight multiplication - multiply_W( - inputs, f_afterW, WF); - - // Bias addition - add_bias( - f_afterW, f_afterBias, BF); - - // Hidden Candidate - multiply_U(hidden_state, f_hiddenCand, - RWF); - - // Vector addition - add_vectors(f_afterBias, f_hiddenCand, - f_afterAdd); - - // Activation - CONFIG_T::template activation_recr::activation(f_afterAdd, gate_f); - - //-----------Gate C Calculations - // Weight multiplication - multiply_W( - inputs, c_afterW, WC); - - // Bias addition - add_bias( - c_afterW, c_afterBias, BC); - - // Hidden Candidate - multiply_U(hidden_state, c_hiddenCand, - RWC); - - // Vector addition - add_vectors(c_afterBias, c_hiddenCand, - c_afterAdd); - - // Activation - CONFIG_T::template activation::activation(c_afterAdd, gate_c); - - //-----------gate I and C multiply - // Vector multiplication - multiply_vectors(gate_i, gate_c, gate_ic); - - //-----------Gate O Calculations - // Weight multiplication - multiply_W( - inputs, o_afterW, WO); - - // Bias addition - add_bias( - o_afterW, o_afterBias, BO); - - // Hidden Candidate - multiply_U(hidden_state, o_hiddenCand, - RWO); - - // Vector addition - add_vectors(o_afterBias, o_hiddenCand, - o_afterAdd); - - // Activation - CONFIG_T::template activation_recr::activation(o_afterAdd, gate_o); - - //-----------Cell State Calculation - // Vector multiplication - multiply_vectors(gate_f, cell_state, cell_act_multp); - - // Vector addition - add_vectors(gate_ic, cell_act_multp, - cell_act_add); - - //-----------Forget gate Calculation - // Activation - CONFIG_T::template activation::activation(cell_act_add, gate_forget); - - // Vector multiplication - multiply_vectors(gate_o, gate_forget, h); - -OUTPUT_WRITE_LOOP: - #pragma unroll - for (int x = (CONFIG_T::n_out - 1); x >= 0; x--) { - hidden_state_o[x] = h[x]; - cell_state_o[x] = cell_act_add[x]; - } -} - -template -void lstm(data_T data[CONFIG_T::n_timesteps * CONFIG_T::n_in], res_T res[CONFIG_T::n_outputs * CONFIG_T::n_out], - const typename CONFIG_T::weight_t WI[CONFIG_T::n_in * CONFIG_T::n_out], - const typename CONFIG_T::weight_t WF[CONFIG_T::n_in * CONFIG_T::n_out], - const typename CONFIG_T::weight_t WC[CONFIG_T::n_in * CONFIG_T::n_out], - const typename CONFIG_T::weight_t WO[CONFIG_T::n_in * CONFIG_T::n_out], - const typename CONFIG_T::weight_t RWI[CONFIG_T::n_out * CONFIG_T::n_out], - const typename CONFIG_T::weight_t RWF[CONFIG_T::n_out * CONFIG_T::n_out], - const typename CONFIG_T::weight_t RWC[CONFIG_T::n_out * CONFIG_T::n_out], - const typename CONFIG_T::weight_t RWO[CONFIG_T::n_out * CONFIG_T::n_out], - const typename CONFIG_T::bias_t BI[CONFIG_T::n_out], const typename CONFIG_T::bias_t BF[CONFIG_T::n_out], - const typename CONFIG_T::bias_t BC[CONFIG_T::n_out], const typename CONFIG_T::bias_t BO[CONFIG_T::n_out]) { - res_T hidden_state[CONFIG_T::n_out][CONFIG_T::n_timesteps + 1] hls_register; - res_T hidden_state_temp[CONFIG_T::n_out] hls_register; - res_T cell_state[CONFIG_T::n_out][CONFIG_T::n_timesteps + 1] hls_register; - res_T cell_state_temp[CONFIG_T::n_out] hls_register; - res_T h[CONFIG_T::n_out] hls_register; - res_T c[CONFIG_T::n_out] hls_register; - data_T in[CONFIG_T::n_in] hls_register; - -// Set initially hidden state (output) to zero -INIT_LOOP: - #pragma unroll - for (int x = 0; x < CONFIG_T::n_out; x++) { - hidden_state[x][0] = 0; - cell_state[x][0] = 0; - } - - // Input dimension - #pragma disable_loop_pipelining - for (int i = 0; i < CONFIG_T::n_timesteps; i++) { - // Data at current time step - for (int x = 0; x < CONFIG_T::n_in; x++) { - in[x] = data[x + i * CONFIG_T::n_in]; - } - - // Hidden state at current time step - #pragma unroll - for (int x = 0; x < CONFIG_T::n_out; x++) { - hidden_state_temp[x] = hidden_state[x][i]; - cell_state_temp[x] = cell_state[x][i]; - } - - // Do LSTM - lstm_cell(in, hidden_state_temp, h, cell_state_temp, c, WI, WF, WC, WO, RWI, RWF, RWC, RWO, - BI, BF, BC, BO); - - // Write result - #pragma unroll - for (int x = 0; x < CONFIG_T::n_out; x++) { - hidden_state[x][i + 1] = h[x]; - cell_state[x][i + 1] = c[x]; - } - } - - if (CONFIG_T::return_sequences == 0) { - // Output when return_sequences is false - #pragma unroll - for (int x = 0; x < CONFIG_T::n_out; x++) { - res[x] = hidden_state[x][CONFIG_T::n_timesteps]; - } - } else { - // Output when return_sequences is true - #pragma unroll - for (int x = 0; x < CONFIG_T::n_timesteps; x++) { - for (int h = 0; h < CONFIG_T::n_out; h++) { - res[x * CONFIG_T::n_out + h] = hidden_state[h][x + 1]; - } - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_recurrent_activation.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_recurrent_activation.h deleted file mode 100644 index e5896e6..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_recurrent_activation.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef NNET_RECR_ACTIVATION_H_ -#define NNET_RECR_ACTIVATION_H_ - -#include "nnet_activation.h" -#include "nnet_common.h" - -namespace nnet { - -namespace activation { - -template class Activation { - public: - // ************************************************* - // Blank Activation - // ************************************************* - static void activation(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) {} -}; - -template class relu : public Activation { - public: - // ************************************************* - // Relu Activation - // ************************************************* - static void activation(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - nnet::relu(data, res); - } -}; - -template class sigmoid : public Activation { - public: - // ************************************************* - // Sigmoid Activation - // ************************************************* - static void activation(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - nnet::sigmoid(data, res); - } -}; - -template class tanh : public Activation { - public: - // ************************************************* - // TanH Activation - // ************************************************* - static void activation(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - nnet::dense_tanh(data, res); - } -}; - -} // namespace activation - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_recurrent_stream.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_recurrent_stream.h deleted file mode 100644 index d439428..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_recurrent_stream.h +++ /dev/null @@ -1,65 +0,0 @@ -#ifndef NNET_RECURRENT_STREAM_H_ -#define NNET_RECURRENT_STREAM_H_ - -#include "nnet_common.h" -#include "nnet_dense.h" -#include "nnet_recurrent_activation.h" - -namespace nnet { -template -void gru(stream &data_stream, stream &res_stream, - const typename CONFIG_T::weight_t weights[3 * CONFIG_T::n_units * CONFIG_T::n_in], - const typename CONFIG_T::weight_t recurrent_weights[3 * CONFIG_T::n_units * CONFIG_T::n_units], - const typename CONFIG_T::bias_t bias[3 * CONFIG_T::n_units], - const typename CONFIG_T::bias_t recurrent_bias[3 * CONFIG_T::n_units]) { - - hls_register typename res_T::value_type h[CONFIG_T::n_units]; - #pragma unroll - for (int i = 0; i < CONFIG_T::n_units; i++) { - h[i] = 0; - } - - hls_register typename data_T::value_type x[CONFIG_T::n_in]; - -DataPropagation: - for (int i_in = 0; i_in < CONFIG_T::n_timesteps * CONFIG_T::n_in / data_T::size; i_in++) { - data_T data_pack = data_stream.read(); - - DataPack: - #pragma unroll - for (int i_pack = 0; i_pack < data_T::size; i_pack++) { - x[i_pack] = data_pack[i_pack]; - } - - nnet::gru_cell(x, h, weights, recurrent_weights, - bias, recurrent_bias); - - if (CONFIG_T::return_sequences) { - res_T res_pack; - - ResPackRetSeq: - #pragma unroll - for (int i_pack = 0; i_pack < res_T::size; i_pack++) { - res_pack[i_pack] = h[i_pack]; - } - - res_stream.write(res_pack); - } - } - - if (!CONFIG_T::return_sequences) { - res_T res_pack; - - ResPackNoRetSeq: - #pragma unroll - for (int i_pack = 0; i_pack < res_T::size; i_pack++) { - res_pack[i_pack] = h[i_pack]; - } - - res_stream.write(res_pack); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_resize.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_resize.h deleted file mode 100644 index a8e3ffe..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_resize.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef NNET_IMAGE_H_ -#define NNET_IMAGE_H_ - -namespace nnet { - -struct resize_config { - static const unsigned in_height = 10; - static const unsigned in_width = 10; - - static const unsigned out_height = 10; - static const unsigned out_width = 10; - - static const unsigned n_chan = 10; -}; - -template -void resize_nearest(data_T image[CONFIG_T::height * CONFIG_T::width * CONFIG_T::n_chan], - data_T resized[CONFIG_T::new_height * CONFIG_T::new_width * CONFIG_T::n_chan]) { - int y_ratio = (int)((CONFIG_T::height << 16) / CONFIG_T::new_height) + 1; - int x_ratio = (int)((CONFIG_T::width << 16) / CONFIG_T::new_width) + 1; - - for (int i = 0; i < CONFIG_T::new_height; i++) { - for (int j = 0; j < CONFIG_T::new_width; j++) { - int x = ((j * x_ratio) >> 16); - int y = ((i * y_ratio) >> 16); - - #pragma unroll - for (int k = 0; k < CONFIG_T::n_chan; k++) { - resized[(i * CONFIG_T::new_width * CONFIG_T::n_chan) + j * CONFIG_T::n_chan + k] = - image[(y * CONFIG_T::width * CONFIG_T::n_chan) + x * CONFIG_T::n_chan + k]; - } - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_resize_stream.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_resize_stream.h deleted file mode 100644 index 1b8864d..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_resize_stream.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef NNET_IMAGE_STREAM_H_ -#define NNET_IMAGE_STREAM_H_ - -#include "nnet_common.h" - -namespace nnet { - -template void resize_nearest(stream &image, stream &resized) { - assert(CONFIG_T::new_height % CONFIG_T::height == 0); - assert(CONFIG_T::new_width % CONFIG_T::width == 0); - - constexpr unsigned ratio_height = CONFIG_T::new_height / CONFIG_T::height; - constexpr unsigned ratio_width = CONFIG_T::new_width / CONFIG_T::width; - -ImageHeight: - for (unsigned h = 0; h < CONFIG_T::height; h++) { - hls_register data_T data_in_row[CONFIG_T::width]; - - ImageWidth: - for (unsigned i = 0; i < CONFIG_T::width; i++) { - hls_register data_T in_data = image.read(); - - ImageChan: - #pragma unroll - for (unsigned j = 0; j < CONFIG_T::n_chan; j++) { - data_in_row[i][j] = in_data[j]; - } - } - - ResizeHeight: - for (unsigned i = 0; i < ratio_height; i++) { - - ImageWidth2: - for (unsigned l = 0; l < CONFIG_T::width; l++) { - - ResizeWidth: - for (unsigned j = 0; j < ratio_width; j++) { - - hls_register data_T out_data; - - ResizeChan: - #pragma unroll - for (unsigned k = 0; k < CONFIG_T::n_chan; k++) { - out_data[k] = data_in_row[l][k]; - } - - resized.write(out_data); - } - } - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_stream.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_stream.h deleted file mode 100644 index b5b55e2..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_stream.h +++ /dev/null @@ -1,121 +0,0 @@ -#ifndef NNET_CLONE_H -#define NNET_CLONE_H - -#include "nnet_common.h" - -namespace nnet { - -struct broadcast_config { - static const unsigned in_height = 10; - static const unsigned in_width = 10; - static const unsigned n_chan = 1; - static const unsigned n_dupl = 2; -}; - -template -void clone_stream(stream &data, stream &res1, stream &res2) { -CloneLoop: - #pragma ii 1 - for (int i = 0; i < N / data_T::size; i++) { - data_T in_data = data.read(); - res_T out_data1; - res_T out_data2; - - ClonePack: - #pragma unroll - for (int j = 0; j < data_T::size; j++) { - out_data1[j] = in_data[j]; - out_data2[j] = in_data[j]; - } - - res1.write(out_data1); - res2.write(out_data2); - } -} - -template -void clone_stream(stream &data, stream &res1, stream &res2, stream &res3) { -CloneLoop: - #pragma ii 1 - for (int i = 0; i < N / data_T::size; i++) { - data_T in_data = data.read(); - res_T out_data1; - res_T out_data2; - res_T out_data3; - - ClonePack: - #pragma unroll - for (int j = 0; j < data_T::size; j++) { - out_data1[j] = in_data[j]; - out_data2[j] = in_data[j]; - out_data3[j] = in_data[j]; - } - - res1.write(out_data1); - res2.write(out_data2); - res3.write(out_data3); - } -} - -template void repack_stream(stream &data, stream &res) { - if (data_T::size == res_T::size) { - #pragma ii 1 - for (int i = 0; i < N / data_T::size; i++) { - - data_T in_data = data.read(); - res_T out_data; - - #pragma unroll - for (int j = 0; j < data_T::size; j++) { - out_data[j] = in_data[j]; - } - - res.write(out_data); - } - } else if (data_T::size > res_T::size) { - constexpr unsigned pack_diff = data_T::size / res_T::size; - - for (int i = 0; i < N / data_T::size; i++) { - - data_T in_data = data.read(); - res_T out_data; - - #pragma ii 1 - for (int j = 0; j < pack_diff; j++) { - - res_T out_data; - - #pragma unroll - for (int k = 0; k < res_T::size; k++) { - out_data[k] = in_data[j * res_T::size + k]; - } - res.write(out_data); - } - } - } else { // data_T::size < res_T::size - res_T out_data; - constexpr unsigned pack_diff = res_T::size / data_T::size; - unsigned pack_cnt = 0; - #pragma ii 1 - for (int i = 0; i < N / data_T::size; i++) { - - data_T in_data = data.read(); - - #pragma unroll - for (int j = 0; j < data_T::size; j++) { - out_data[pack_cnt * data_T::size + j] = in_data[j]; - } - - if (pack_cnt == pack_diff - 1) { - res.write(out_data); - pack_cnt = 0; - } else { - pack_cnt++; - } - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_transpose.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_transpose.h deleted file mode 100644 index 05fd5fe..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_transpose.h +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef NNET_TRANSPOSE_H_ -#define NNET_TRANSPOSE_H_ - -namespace nnet { - -struct transpose_config { - static const unsigned height = 10; - static const unsigned width = 10; - static const unsigned depth = 10; - static constexpr unsigned perm[3] = {2, 0, 1}; -}; - -template -void transpose_2d(data_T data[CONFIG_T::height * CONFIG_T::width], res_T res[CONFIG_T::height * CONFIG_T::width]) { - for (int i = 0; i < CONFIG_T::height; i++) { - #pragma unroll - for (int j = 0; j < CONFIG_T::width; j++) { - res[j * CONFIG_T::height + i] = static_cast(data[i * CONFIG_T::width + j]); - } - } -} - -template -void transpose_3d(data_T data[CONFIG_T::depth * CONFIG_T::height * CONFIG_T::width], - res_T res[CONFIG_T::depth * CONFIG_T::height * CONFIG_T::width]) { - static constexpr unsigned dim_data[3] = {CONFIG_T::depth, CONFIG_T::height, CONFIG_T::width}; - static constexpr unsigned dim_res[3] = {dim_data[CONFIG_T::perm[0]], dim_data[CONFIG_T::perm[1]], - dim_data[CONFIG_T::perm[2]]}; - - int index_data[3] = {0}, index_res[3] = {0}; - - for (index_data[0] = 0; index_data[0] < dim_data[0]; index_data[0]++) { - #pragma unroll - for (index_data[1] = 0; index_data[1] < dim_data[1]; index_data[1]++) { - #pragma unroll - for (index_data[2] = 0; index_data[2] < dim_data[2]; index_data[2]++) { - index_res[0] = index_data[CONFIG_T::perm[0]]; - index_res[1] = index_data[CONFIG_T::perm[1]]; - index_res[2] = index_data[CONFIG_T::perm[2]]; - - res[index_res[0] * dim_res[1] * dim_res[2] + index_res[1] * dim_res[2] + index_res[2]] = static_cast( - data[index_data[0] * dim_data[1] * dim_data[2] + index_data[1] * dim_data[2] + index_data[2]]); - } - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_transpose_stream.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_transpose_stream.h deleted file mode 100644 index cc6cd81..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_transpose_stream.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef NNET_TRANSPOSE_STREAM_H_ -#define NNET_TRANSPOSE_STREAM_H_ - -namespace nnet { - -template void transpose_2d(stream &data, stream &res) { - hls_register typename data_T::value_type data_array[CONFIG_T::height * CONFIG_T::width]; - - for (int i = 0; i < CONFIG_T::height * CONFIG_T::width / data_T::size; i++) { - hls_register data_T in_data = data.read(); - - #pragma unroll - for (int j = 0; j < data_T::size; j++) { - data_array[i * data_T::size + j] = typename data_T::value_type(in_data[j]); - } - } - - for (int i = 0; i < CONFIG_T::height * CONFIG_T::width / res_T::size; i++) { - hls_register res_T out_data; - - #pragma unroll - for (int j = 0; j < res_T::size; j++) { - out_data[j] = typename res_T::value_type(data_array[j * data_T::size + i]); - } - - res.write(out_data); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_types.h b/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_types.h deleted file mode 100644 index 3209b9a..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/nnet_utils/nnet_types.h +++ /dev/null @@ -1,78 +0,0 @@ -#ifndef NNET_TYPES_H_ -#define NNET_TYPES_H_ - -#include -#include -#include - -namespace nnet { - -// Fixed-size array -template struct array { - typedef T value_type; - static const unsigned size = N; - - T data[N]; - - array() {} - - array(T x) { - #pragma unroll - for (int i = 0; i < N; i++) { - data[i] = x; - } - } - - T &operator[](size_t pos) { return data[pos]; } - - const T &operator[](size_t pos) const { return data[pos]; } - - array &operator=(const array &other) { - if (&other == this) - return *this; - - assert(N == other.size && "Array sizes must match."); - - #pragma unroll - for (unsigned i = 0; i < N; i++) { - data[i] = other[i]; - } - return *this; - } -}; - -/* - * HLS Shift Register Implementation - * To verify a shift register is used in hardware, go to report.html > Area Analysis of System - * Unrolling the shift loop minimizes resource usage and latency at the same time - * The shift loop should be either fully unrolled or not unrolled at all - * Unrolling with a specific unroll factor or pipelining with certain ii's, can cause an irregular access pattern, which - * wouldn't allow shift register usage in RTL - */ -template struct shift_reg { - private: - T data[N]; - - public: - // Default constructor - shift_reg() {} - - // Shift queue, insert new element and return element from the front - T shift(T inp) { - T out = data[N - 1]; - - #pragma unroll - for (int i = N - 1; i > 0; i--) { - data[i] = data[i - 1]; - } - data[0] = inp; - - return out; - } - - T read(int pos) { return data[pos]; } -}; - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/firmware/parameters.h b/hls4ml/hls4ml/templates/quartus/firmware/parameters.h deleted file mode 100644 index e23ca97..0000000 --- a/hls4ml/hls4ml/templates/quartus/firmware/parameters.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef PARAMETERS_H_ -#define PARAMETERS_H_ - -#include "defines.h" - -#include "nnet_utils/nnet_helpers.h" -// hls-fpga-machine-learning insert includes - -// hls-fpga-machine-learning insert layer-config - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/myproject_bridge.cpp b/hls4ml/hls4ml/templates/quartus/myproject_bridge.cpp deleted file mode 100644 index 35c1997..0000000 --- a/hls4ml/hls4ml/templates/quartus/myproject_bridge.cpp +++ /dev/null @@ -1,66 +0,0 @@ -#ifndef MYPROJECT_BRIDGE_H_ -#define MYPROJECT_BRIDGE_H_ - -#include "firmware/myproject.h" -#include "firmware/nnet_utils/nnet_helpers.h" -#include -#include - -// hls-fpga-machine-learning insert bram - -namespace nnet { -bool trace_enabled = false; -std::map *trace_outputs = NULL; -size_t trace_type_size = sizeof(double); -} // namespace nnet - -extern "C" { - -struct trace_data { - const char *name; - void *data; -}; - -void allocate_trace_storage(size_t element_size) { - nnet::trace_enabled = true; - nnet::trace_outputs = new std::map; - nnet::trace_type_size = element_size; - // hls-fpga-machine-learning insert trace_outputs -} - -void free_trace_storage() { - for (std::map::iterator i = nnet::trace_outputs->begin(); i != nnet::trace_outputs->end(); i++) { - void *ptr = i->second; - free(ptr); - } - nnet::trace_outputs->clear(); - delete nnet::trace_outputs; - nnet::trace_outputs = NULL; - nnet::trace_enabled = false; -} - -void collect_trace_output(struct trace_data *c_trace_outputs) { - int ii = 0; - for (std::map::iterator i = nnet::trace_outputs->begin(); i != nnet::trace_outputs->end(); i++) { - c_trace_outputs[ii].name = i->first.c_str(); - c_trace_outputs[ii].data = i->second; - ii++; - } -} - -// Wrapper of top level function for Python bridge -void myproject_float( - // hls-fpga-machine-learning insert header #float -) { - - // hls-fpga-machine-learning insert wrapper #float -} - -void myproject_double( - // hls-fpga-machine-learning insert header #double -) { - // hls-fpga-machine-learning insert wrapper #double -} -} - -#endif diff --git a/hls4ml/hls4ml/templates/quartus/myproject_test_parallel.cpp b/hls4ml/hls4ml/templates/quartus/myproject_test_parallel.cpp deleted file mode 100644 index 5e3dd96..0000000 --- a/hls4ml/hls4ml/templates/quartus/myproject_test_parallel.cpp +++ /dev/null @@ -1,112 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include "firmware/myproject.h" -#include "firmware/parameters.h" - -// hls-fpga-machine-learning insert bram - -#define CHECKPOINT 5000 - -// This function is written to avoid stringstream, which is -// not supported in cosim 20.1, and because strtok -// requires a const_cast or allocation to use with std::strings. -// This function returns the next float (by argument) at position pos, -// updating pos. True is returned if conversion done, false if the string -// has ended, and std::invalid_argument exception if the sting was bad. -bool nextToken(const std::string &str, std::size_t &pos, float &val) { - while (pos < str.size() && std::isspace(static_cast(str[pos]))) { - pos++; - } - if (pos >= str.size()) { - return false; - } - std::size_t offset = 0; - val = std::stof(str.substr(pos), &offset); - pos += offset; - return true; -} - -int main(int argc, char **argv) { - // load input data from text file - std::ifstream fin("tb_data/tb_input_features.dat"); - // load predictions from text file - std::ifstream fpr("tb_data/tb_output_predictions.dat"); - - std::string RESULTS_LOG = "tb_data/results.log"; - std::ofstream fout(RESULTS_LOG); - - std::string iline; - std::string pline; - - std::vector inputs; - std::vector outputs; - - if (fin.is_open() && fpr.is_open()) { - std::vector> predictions; - unsigned int num_iterations = 0; - for (; std::getline(fin, iline) && std::getline(fpr, pline); num_iterations++) { - if (num_iterations % CHECKPOINT == 0) { - std::cout << "Processing input " << num_iterations << std::endl; - } - - std::vector in; - std::vector pr; - float current; - - std::size_t pos = 0; - while (nextToken(iline, pos, current)) { - in.push_back(current); - } - - pos = 0; - while (nextToken(pline, pos, current)) { - pr.push_back(current); - } - - // hls-fpga-machine-learning insert data - predictions.push_back(std::move(pr)); - } - - // Do this separately to avoid vector reallocation - // hls-fpga-machine-learning insert top-level-function - - // hls-fpga-machine-learning insert run - - for (int j = 0; j < num_iterations; j++) { - // hls-fpga-machine-learning insert tb-output - if (j % CHECKPOINT == 0) { - std::cout << "Predictions" << std::endl; - // hls-fpga-machine-learning insert predictions - std::cout << "Quantized predictions" << std::endl; - // hls-fpga-machine-learning insert quantized - } - } - fin.close(); - fpr.close(); - } else { - const unsigned int num_iterations = 10; - std::cout << "INFO: Unable to open input/predictions file, using default input with " << num_iterations - << " invocations." << std::endl; - // hls-fpga-machine-learning insert zero - - // hls-fpga-machine-learning insert top-level-function - - // hls-fpga-machine-learning insert run - - for (int j = 0; j < num_iterations; j++) { - // hls-fpga-machine-learning insert output - - // hls-fpga-machine-learning insert tb-output - } - } - - fout.close(); - std::cout << "INFO: Saved inference results to file: " << RESULTS_LOG << std::endl; - - return 0; -} diff --git a/hls4ml/hls4ml/templates/quartus/myproject_test_stream.cpp b/hls4ml/hls4ml/templates/quartus/myproject_test_stream.cpp deleted file mode 100644 index 8761114..0000000 --- a/hls4ml/hls4ml/templates/quartus/myproject_test_stream.cpp +++ /dev/null @@ -1,129 +0,0 @@ -#include -#include -#include -#include -#include -#include - -#include "firmware/myproject.h" -#include "firmware/parameters.h" - -#include "firmware/nnet_utils/nnet_helpers.h" - -// hls-fpga-machine-learning insert bram - -#define CHECKPOINT 5000 - -// This function is written to avoid stringstream, which is -// not supported in cosim 20.1, and because strtok -// requires a const_cast or allocation to use with std::strings. -// This function returns the next float (by argument) at position pos, -// updating pos. True is returned if conversion done, false if the string -// has ended, and std::invalid_argument exception if the sting was bad. -bool nextToken(const std::string &str, std::size_t &pos, float &val) { - while (pos < str.size() && std::isspace(static_cast(str[pos]))) { - pos++; - } - if (pos >= str.size()) { - return false; - } - std::size_t offset = 0; - val = std::stof(str.substr(pos), &offset); - pos += offset; - return true; -} - -int main(int argc, char **argv) { - // Load input data from text file - std::ifstream fin("tb_data/tb_input_features.dat"); - std::string iline; - - // Load predictions from text file - std::ifstream fpr("tb_data/tb_output_predictions.dat"); - std::string pline; - - // Output log - std::string RESULTS_LOG = "tb_data/results.log"; - std::ofstream fout(RESULTS_LOG); - - if (fin.is_open() && fpr.is_open()) { - std::vector> predictions; - - unsigned int iteration = 0; - while (std::getline(fin, iline) && std::getline(fpr, pline)) { - if (iteration % CHECKPOINT == 0) { - std::cout << "Processing input " << iteration << std::endl; - } - - // hls-fpga-machine learning instantiate inputs and outputs - - std::vector in; - std::vector pr; - float current; - - std::size_t pos = 0; - while (nextToken(iline, pos, current)) { - in.push_back(current); - } - - pos = 0; - while (nextToken(pline, pos, current)) { - pr.push_back(current); - } - - // hls-fpga-machine-learning insert data - - predictions.push_back(std::move(pr)); - - // hls-fpga-machine-learning insert top-level-function - - // hls-fpga-machine-learning insert run - - // hls-fpga-machine-learning convert output - - // hls-fpga-machine-learning insert tb-output - - if (iteration % CHECKPOINT == 0) { - std::cout << "Python Predictions" << std::endl; - // hls-fpga-machine-learning print predictions - - std::cout << "HLS predictions" << std::endl; - // hls-fpga-machine-learning print output - } - - iteration++; - } - - fin.close(); - fpr.close(); - - } else { - const unsigned int num_iterations = 10; - std::cout << "INFO: Unable to open input/predictions file, using default input with " << num_iterations - << " invocations." << std::endl; - - for (int iteration = 0; iteration < num_iterations; iteration++) { - // hls-fpga-machine learning instantiate inputs and outputs - - // hls-fpga-machine-learning insert zero - - // hls-fpga-machine-learning insert top-level-function - - // hls-fpga-machine-learning insert run - - // hls-fpga-machine-learning convert output - - // hls-fpga-machine-learning insert tb-output - - if (iteration % CHECKPOINT == 0) { - std::cout << "HLS predictions" << std::endl; - // hls-fpga-machine-learning print output - } - } - } - - fout.close(); - std::cout << "INFO: Saved inference results to file: " << RESULTS_LOG << std::endl; - - return 0; -} diff --git a/hls4ml/hls4ml/templates/symbolic/build_lib.sh b/hls4ml/hls4ml/templates/symbolic/build_lib.sh deleted file mode 100755 index 3c07926..0000000 --- a/hls4ml/hls4ml/templates/symbolic/build_lib.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -CC=g++ -if [[ "$OSTYPE" == "linux-gnu" ]]; then - CFLAGS="-O3 -fPIC -std=c++11 -fno-gnu-unique" -elif [[ "$OSTYPE" == "darwin"* ]]; then - CFLAGS="-O3 -fPIC -std=c++11" -fi -HLS_LIBS_PATH=mylibspath -LDFLAGS="-Wl,--no-undefined -Wl,--no-allow-shlib-undefined -Wl,--no-as-needed -Wl,-rpath,${HLS_LIBS_PATH}/lib/csim -L ${HLS_LIBS_PATH}/lib/csim -lhlsmc++-GCC46 -lhlsm-GCC46 -fno-builtin -fno-inline -Wl,-rpath,${HLS_LIBS_PATH}/tools/fpo_v7_0 -L ${HLS_LIBS_PATH}/tools/fpo_v7_0 -lgmp -lmpfr -lIp_floating_point_v7_0_bitacc_cmodel" -INCFLAGS="-Ifirmware/ap_types/" -PROJECT=myproject -LIB_STAMP=mystamp - -${CC} ${CFLAGS} ${INCFLAGS} -c firmware/${PROJECT}.cpp -o ${PROJECT}.o -${CC} ${CFLAGS} ${INCFLAGS} -c ${PROJECT}_bridge.cpp -o ${PROJECT}_bridge.o -${CC} ${CFLAGS} ${INCFLAGS} -shared ${PROJECT}.o ${PROJECT}_bridge.o -o firmware/${PROJECT}-${LIB_STAMP}.so ${LDFLAGS} -rm -f *.o diff --git a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d.h b/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d.h deleted file mode 100644 index 52a4046..0000000 --- a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d.h +++ /dev/null @@ -1,68 +0,0 @@ -#ifndef NNET_CONV1D_H_ -#define NNET_CONV1D_H_ - -#include "nnet_common.h" -#include "nnet_conv1d_latency.h" -#include "nnet_conv1d_resource.h" -#include - -namespace nnet { - -struct conv1d_config { - // Internal data type definitions - typedef float bias_t; - typedef float weight_t; - typedef float accum_t; - - // Convolutional parameters - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; - static const unsigned in_width = 10; - static const unsigned n_chan = 0; - static const unsigned filt_width = 1; - static const unsigned kernel_size = filt_width; - static const unsigned n_filt = 1; - static const unsigned stride_width = 1; - static const unsigned dilation = 1; - static const unsigned out_width = 10; //(N_IN + PAD_LEFT * PAD_RIGHT - (DILATION * (FILT_WIDTH - 1) + 1)) / STRIDE + 1 - - static const unsigned reuse_factor = 1; - static const bool store_weights_in_bram = false; - static const unsigned n_zeros = 0; // not used yet -}; - -template -void conv_1d_cl(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - // Inlining helps reduce latency, but may also cause timing issues in some cases, use carefully. - //#pragma HLS INLINE recursive - - if (CONFIG_T::strategy == nnet::latency) { - conv_1d_latency_cl(data, res, weights, biases); - } else { - conv_1d_resource_cl(data, res, weights, biases); - } -} - -template -void pointwise_conv_1d_cl(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::filt_width == 1); - - // Inlining helps reduce latency, but may also cause timing issues in some cases, use carefully. - //#pragma HLS INLINE recursive - - // Nothing special to be done for io_parallel implementation - if (CONFIG_T::strategy == nnet::latency) { - conv_1d_latency_cl(data, res, weights, biases); - } else { - conv_1d_resource_cl(data, res, weights, biases); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d_latency.h b/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d_latency.h deleted file mode 100644 index 1bf25cc..0000000 --- a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d_latency.h +++ /dev/null @@ -1,89 +0,0 @@ -#ifndef NNET_CONV1D_LATENCY_H_ -#define NNET_CONV1D_LATENCY_H_ - -#include "nnet_common.h" -#include "nnet_mult.h" -#include - -namespace nnet { - -template -void conv_1d_latency_cl(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - constexpr unsigned mult_n_in = CONFIG_T::filt_width * CONFIG_T::n_chan; - constexpr unsigned mult_n_out = CONFIG_T::n_filt; - - data_T data_buf[CONFIG_T::n_pixels][mult_n_in]; - #pragma HLS ARRAY_PARTITION variable=data_buf complete dim=0 - - typename CONFIG_T::accum_t mult[mult_n_in * mult_n_out]; - #pragma HLS ARRAY_PARTITION variable=mult complete - - typename CONFIG_T::accum_t acc[mult_n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete - - #pragma HLS ARRAY_PARTITION variable=weights complete - #pragma HLS ARRAY_PARTITION variable=biases complete - - // Limit multipliers to control parallelization - #pragma HLS ALLOCATION operation instances=mul limit=CONFIG_T::mult_config::multiplier_limit - -PartitionLoop: - for (int i_part = 0; i_part < CONFIG_T::n_partitions; i_part++) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor rewind - - CONFIG_T::template fill_buffer::fill_buffer(data, data_buf, i_part); - - PixelLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - - data_T cache; - - // Do the matrix-multiply - Product1: - for (int i_in = 0; i_in < mult_n_in; i_in++) { - #pragma HLS UNROLL - cache = data_buf[i_pxl][i_in]; - Product2: - for (int i_out = 0; i_out < mult_n_out; i_out++) { - #pragma HLS UNROLL - mult[i_in * mult_n_out + i_out] = - CONFIG_T::mult_config::template product::product( - cache, weights[i_in * mult_n_out + i_out]); - } - } - - // Initialize accumulator with input biases - ResetAccum: - for (int i_acc = 0; i_acc < mult_n_out; i_acc++) { - #pragma HLS UNROLL - acc[i_acc] = (typename CONFIG_T::accum_t)biases[i_acc]; - } - - // Accumulate multiplication result - Accum1: - for (int i_in = 0; i_in < mult_n_in; i_in++) { - #pragma HLS UNROLL - Accum2: - for (int i_out = 0; i_out < mult_n_out; i_out++) { - #pragma HLS UNROLL - acc[i_out] += mult[i_in * mult_n_out + i_out]; - } - } - - // Cast to "res_t" type - Result: - for (int i_res = 0; i_res < mult_n_out; i_res++) { - #pragma HLS UNROLL - res[i_part * CONFIG_T::n_pixels * mult_n_out + i_pxl * mult_n_out + i_res] = - cast(acc[i_res]); - } - } - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d_resource.h b/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d_resource.h deleted file mode 100644 index dd488ac..0000000 --- a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d_resource.h +++ /dev/null @@ -1,104 +0,0 @@ -#ifndef NNET_CONV1D_RESOURCE_H_ -#define NNET_CONV1D_RESOURCE_H_ - -#include "nnet_common.h" -#include "nnet_dense.h" - -namespace nnet { - -template -void conv_1d_resource_cl(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - constexpr unsigned mult_n_in = CONFIG_T::filt_width * CONFIG_T::n_chan; - constexpr unsigned mult_n_out = CONFIG_T::n_filt; - constexpr unsigned block_factor = DIV_ROUNDUP(mult_n_in * mult_n_out, CONFIG_T::reuse_factor); - constexpr unsigned multscale = block_factor / mult_n_out; - - assert((block_factor % mult_n_out == 0 || CONFIG_T::reuse_factor >= mult_n_in) && - "The current Reuse Factor is not allowed"); - assert((CONFIG_T::reuse_factor <= CONFIG_T::filt_width * CONFIG_T::n_chan) && - "This function is correct only for RF <= FILT_WIDTH * N_CHAN"); - - // Treating weights as 2d is required to make sure Vitis doesn't use urem cores to calculate indices. - // Also, we don't apply ARRAY_RESHAPE pragma as Vitis figures this out on its own. - typename CONFIG_T::weight_t(*weights_2d)[CONFIG_T::reuse_factor] = - (typename CONFIG_T::weight_t(*)[CONFIG_T::reuse_factor])weights; - - data_T data_buf[CONFIG_T::n_pixels][mult_n_in]; - #pragma HLS ARRAY_PARTITION variable=data_buf complete dim=0 - - #pragma HLS ARRAY_PARTITION variable=biases complete - - typename CONFIG_T::accum_t acc[CONFIG_T::n_pixels][mult_n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete dim=0 - -PartitionLoop: - for (unsigned i_part = 0; i_part < CONFIG_T::n_partitions; i_part++) { - //#pragma HLS UNROLL // We don't want this loop unrolled - - CONFIG_T::template fill_buffer::fill_buffer(data, data_buf, i_part); - - PixelInitAccumLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - - InitAccumLoop: - for (unsigned i_acc = 0; i_acc < mult_n_out; i_acc++) { - #pragma HLS UNROLL - acc[i_pxl][i_acc] = (typename CONFIG_T::accum_t)biases[i_acc]; - } - } - - ReuseLoop: - for (unsigned i_rf = 0; i_rf < CONFIG_T::reuse_factor; i_rf++) { - #pragma HLS PIPELINE II=1 rewind - - unsigned i_in = i_rf; - unsigned i_out = 0; - unsigned i_acc = 0; - - MultLoop: - for (unsigned i_blk = 0; i_blk < block_factor; i_blk++) { - #pragma HLS UNROLL - - PixelMultLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - - acc[i_pxl][i_out] += static_cast( - CONFIG_T::mult_config::template product::product( - data_buf[i_pxl][i_in], weights_2d[i_blk][i_rf])); - } - - // Increment i_in - i_in += CONFIG_T::reuse_factor; - if (i_in >= mult_n_in) { - i_in = i_rf; - } - // Increment i_out - if (i_acc + 1 >= multscale) { - i_acc = 0; - i_out++; - } else { - i_acc++; - } - } - } - - PixelResultLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - // Cast to "res_t" type - ResultLoop: - for (unsigned i_res = 0; i_res < mult_n_out; i_res++) { - #pragma HLS UNROLL - *(res++) = cast(acc[i_pxl][i_res]); - } - } - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d_stream.h b/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d_stream.h deleted file mode 100644 index 49b2849..0000000 --- a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv1d_stream.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef NNET_CONV1D_STREAM_H_ -#define NNET_CONV1D_STREAM_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_conv_stream.h" - -namespace nnet { - -template -void conv_1d_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::implementation == conv_implementation::linebuffer && - "Only \"linebuffer\" implementation is supported in Vitis HLS."); - - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - - if (CONFIG_T::strategy == nnet::latency) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - compute_output_buffer_1d(data.read(), res, weights, biases); - } - } else { - ReadInputWidthSerial: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - compute_output_buffer_1d(data.read(), res, weights, biases); - } - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d.h b/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d.h deleted file mode 100644 index 7095428..0000000 --- a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d.h +++ /dev/null @@ -1,77 +0,0 @@ -#ifndef NNET_CONV2D_H_ -#define NNET_CONV2D_H_ - -#include "nnet_common.h" -#include "nnet_conv2d_latency.h" -#include "nnet_conv2d_resource.h" -#include - -namespace nnet { - -struct conv2d_config { - // Internal data type definitions - typedef float bias_t; - typedef float weight_t; - typedef float accum_t; - - // Convolutional parameters - static const unsigned pad_top = 0; - static const unsigned pad_bottom = 0; - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; - static const unsigned in_height = 10; - static const unsigned in_width = 10; - static const unsigned n_chan = 1; - static const unsigned filt_height = 1; - static const unsigned filt_width = 1; - static const unsigned kernel_size = filt_height * filt_width; - static const unsigned n_filt = 1; - static const unsigned stride_height = 1; - static const unsigned stride_width = 1; - static const unsigned out_height = 10; - static const unsigned out_width = 10; - static const unsigned dilation_height = 1; - static const unsigned dilation_width = 1; - - static const unsigned reuse_factor = 1; - static const bool store_weights_in_bram = false; - static const unsigned n_zeros = 0; // not used yet -}; - -template -void conv_2d_cl( - data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - // Inlining helps reduce latency, but may also cause timing issues in some cases, use carefully. - //#pragma HLS INLINE recursive - - if (CONFIG_T::strategy == nnet::latency) { - conv_2d_latency_cl(data, res, weights, biases); - } else { - conv_2d_resource_cl(data, res, weights, biases); - } -} - -template -void pointwise_conv_2d_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::filt_width == 1); - - // Inlining helps reduce latency, but may also cause timing issues in some cases, use carefully. - //#pragma HLS INLINE recursive - - // Nothing special to be done for io_parallel implementation - if (CONFIG_T::strategy == nnet::latency) { - conv_2d_latency_cl(data, res, weights, biases); - } else { - conv_2d_resource_cl(data, res, weights, biases); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d_latency.h b/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d_latency.h deleted file mode 100644 index c286e86..0000000 --- a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d_latency.h +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef NNET_CONV2D_LATENCY_H_ -#define NNET_CONV2D_LATENCY_H_ - -#include "nnet_common.h" -#include "nnet_mult.h" -#include - -namespace nnet { - -template -void conv_2d_latency_cl( - data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - constexpr unsigned mult_n_in = CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan; - constexpr unsigned mult_n_out = CONFIG_T::n_filt; - - data_T data_buf[CONFIG_T::n_pixels][mult_n_in]; - #pragma HLS ARRAY_PARTITION variable=data_buf complete dim=0 - - typename CONFIG_T::accum_t mult[mult_n_in * mult_n_out]; - #pragma HLS ARRAY_PARTITION variable=mult complete - - typename CONFIG_T::accum_t acc[mult_n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete - - #pragma HLS ARRAY_PARTITION variable=weights complete - #pragma HLS ARRAY_PARTITION variable=biases complete - - // Limit multipliers to control parallelization - #pragma HLS ALLOCATION operation instances=mul limit=CONFIG_T::mult_config::multiplier_limit - -PartitionLoop: - for (int i_part = 0; i_part < CONFIG_T::n_partitions; i_part++) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor rewind - - CONFIG_T::template fill_buffer::fill_buffer(data, data_buf, i_part); - - PixelLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - - data_T cache; - - // Do the matrix-multiply - Product1: - for (int i_in = 0; i_in < mult_n_in; i_in++) { - #pragma HLS UNROLL - cache = data_buf[i_pxl][i_in]; - Product2: - for (int i_out = 0; i_out < mult_n_out; i_out++) { - #pragma HLS UNROLL - mult[i_in * mult_n_out + i_out] = - CONFIG_T::mult_config::template product::product( - cache, weights[i_in * mult_n_out + i_out]); - } - } - - // Initialize accumulator with input biases - ResetAccum: - for (int i_acc = 0; i_acc < mult_n_out; i_acc++) { - #pragma HLS UNROLL - acc[i_acc] = (typename CONFIG_T::accum_t)biases[i_acc]; - } - - // Accumulate multiplication result - Accum1: - for (int i_in = 0; i_in < mult_n_in; i_in++) { - #pragma HLS UNROLL - Accum2: - for (int i_out = 0; i_out < mult_n_out; i_out++) { - #pragma HLS UNROLL - acc[i_out] += mult[i_in * mult_n_out + i_out]; - } - } - - // Cast to "res_t" type - Result: - for (int i_res = 0; i_res < mult_n_out; i_res++) { - #pragma HLS UNROLL - res[i_part * CONFIG_T::n_pixels * mult_n_out + i_pxl * mult_n_out + i_res] = - cast(acc[i_res]); - } - } - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d_resource.h b/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d_resource.h deleted file mode 100644 index e0d30da..0000000 --- a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d_resource.h +++ /dev/null @@ -1,107 +0,0 @@ -#ifndef NNET_CONV2D_RESOURCE_H_ -#define NNET_CONV2D_RESOURCE_H_ - -#include "nnet_common.h" -#include "nnet_dense.h" - -namespace nnet { - -template -void conv_2d_resource_cl( - data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - constexpr unsigned mult_n_in = CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan; - constexpr unsigned mult_n_out = CONFIG_T::n_filt; - constexpr unsigned block_factor = DIV_ROUNDUP(mult_n_in * mult_n_out, CONFIG_T::reuse_factor); - - constexpr unsigned multiplier_limit = DIV_ROUNDUP(mult_n_in * mult_n_out, CONFIG_T::reuse_factor); - constexpr unsigned multscale = multiplier_limit / mult_n_out; - - assert((multiplier_limit % mult_n_out == 0 || CONFIG_T::reuse_factor >= mult_n_in) && - "The current Reuse Factor is not allowed"); - assert((multiplier_limit == block_factor) && - "This function is correct only for RF <= FILT_HEIGHT * FILT_WIDTH * N_CHAN"); - - // Treating weights as 2d is required to make sure Vitis doesn't use urem cores to calculate indices. - // Also, we don't apply ARRAY_RESHAPE pragma as Vitis figures this out on its own. - typename CONFIG_T::weight_t(*weights_2d)[CONFIG_T::reuse_factor] = - (typename CONFIG_T::weight_t(*)[CONFIG_T::reuse_factor])weights; - - data_T data_buf[CONFIG_T::n_pixels][mult_n_in]; - #pragma HLS ARRAY_PARTITION variable=data_buf complete dim=0 - - #pragma HLS ARRAY_PARTITION variable=biases complete - - typename CONFIG_T::accum_t acc[CONFIG_T::n_pixels][mult_n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete dim=0 - -PartitionLoop: - for (unsigned i_part = 0; i_part < CONFIG_T::n_partitions; i_part++) { - //#pragma HLS UNROLL // We don't want this loop unrolled - - CONFIG_T::template fill_buffer::fill_buffer(data, data_buf, i_part); - - PixelInitAccumLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - - InitAccumLoop: - for (unsigned i_acc = 0; i_acc < mult_n_out; i_acc++) { - #pragma HLS UNROLL - acc[i_pxl][i_acc] = (typename CONFIG_T::accum_t)biases[i_acc]; - } - } - - ReuseLoop: - for (unsigned i_rf = 0; i_rf < CONFIG_T::reuse_factor; i_rf++) { - #pragma HLS PIPELINE II=1 rewind - - unsigned i_in = i_rf; - unsigned i_out = 0; - unsigned i_acc = 0; - - MultLoop: - for (unsigned i_blk = 0; i_blk < block_factor; i_blk++) { - #pragma HLS UNROLL - - PixelMultLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - - acc[i_pxl][i_out] += static_cast( - CONFIG_T::mult_config::template product::product( - data_buf[i_pxl][i_in], weights_2d[i_blk][i_rf])); - } - - // Increment i_in - i_in += CONFIG_T::reuse_factor; - if (i_in >= mult_n_in) { - i_in = i_rf; - } - // Increment i_out - if (i_acc + 1 >= multscale) { - i_acc = 0; - i_out++; - } else { - i_acc++; - } - } - } - - PixelResultLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - // Cast to "res_t" type - ResultLoop: - for (unsigned i_res = 0; i_res < mult_n_out; i_res++) { - #pragma HLS UNROLL - *(res++) = cast(acc[i_pxl][i_res]); - } - } - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d_stream.h b/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d_stream.h deleted file mode 100644 index b1648e7..0000000 --- a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_conv2d_stream.h +++ /dev/null @@ -1,82 +0,0 @@ -#ifndef NNET_CONV2D_STREAM_H_ -#define NNET_CONV2D_STREAM_H_ - -#include "ap_shift_reg.h" -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_conv_stream.h" - -namespace nnet { - -// Line Buffer -template -void conv_2d_buffer_latency_cl( - hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - - static ap_shift_reg line_buffer[MAX(CONFIG_T::filt_height - 1, 1)] - [CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable = line_buffer complete dim = 2 - -ReadInputHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS LOOP_FLATTEN - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - if (CONFIG_T::filt_height > 1) { - compute_output_buffer_2d(data.read(), line_buffer, res, weights, biases); - } else { - compute_output_buffer_1d(data.read(), res, weights, biases); - } - } - } -} - -template -void conv_2d_buffer_resource_cl( - hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - - static ap_shift_reg line_buffer[MAX(CONFIG_T::filt_height - 1, 1)] - [CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable = line_buffer complete dim = 2 - -ReadInputHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS LOOP_FLATTEN - - if (CONFIG_T::filt_height > 1) { - compute_output_buffer_2d(data.read(), line_buffer, res, weights, biases); - } else { - compute_output_buffer_1d(data.read(), res, weights, biases); - } - } - } -} - -template -void conv_2d_cl( - hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::implementation == conv_implementation::linebuffer && - "Only \"linebuffer\" implementation is supported in Vitis HLS."); - - #pragma HLS INLINE recursive - if (CONFIG_T::strategy == nnet::latency) { - conv_2d_buffer_latency_cl(data, res, weights, biases); - } else { - conv_2d_buffer_resource_cl(data, res, weights, biases); - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_dense_resource.h b/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_dense_resource.h deleted file mode 100644 index 71776cf..0000000 --- a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_dense_resource.h +++ /dev/null @@ -1,250 +0,0 @@ -#ifndef NNET_DENSE_RESOURCE_H_ -#define NNET_DENSE_RESOURCE_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_mult.h" -#include -#include - -namespace nnet { - -template -void dense_resource_rf_leq_nin(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - - const int multiplier_limit = DIV_ROUNDUP(CONFIG_T::n_in * CONFIG_T::n_out, CONFIG_T::reuse_factor); - const int block_factor = DIV_ROUNDUP(CONFIG_T::n_in * CONFIG_T::n_out, CONFIG_T::reuse_factor); - const int multscale = multiplier_limit / CONFIG_T::n_out; - - assert((multiplier_limit % CONFIG_T::n_out == 0 || CONFIG_T::reuse_factor >= CONFIG_T::n_in) && - "The current Reuse Factor is not allowed"); - assert((multiplier_limit == block_factor) && "This function is correct only for RF <= N_IN"); - - // Treating weights as 2d is required to make sure Vitis doesn't use urem cores to calculate indices. - // Also, we don't apply ARRAY_RESHAPE pragma as Vitis figures this out on its own. - typename CONFIG_T::weight_t(*weights_2d)[CONFIG_T::reuse_factor] = - (typename CONFIG_T::weight_t(*)[CONFIG_T::reuse_factor])weights; // I got you now motherfucker! - - #pragma HLS ARRAY_PARTITION variable=biases complete - - typename CONFIG_T::accum_t acc[CONFIG_T::n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete - -InitAccum: - for (int iacc = 0; iacc < CONFIG_T::n_out; iacc++) { - #pragma HLS UNROLL - acc[iacc] = (typename CONFIG_T::accum_t)biases[iacc]; - } - -ReuseLoop: - for (int ir = 0; ir < CONFIG_T::reuse_factor; ir++) { - #pragma HLS PIPELINE II=1 rewind - - int in_index = ir; - int out_index = 0; - int acc_step = 0; - - MultLoop: - for (int im = 0; im < block_factor; im++) { - #pragma HLS UNROLL - - acc[out_index] += static_cast( - CONFIG_T::template product::product(data[in_index], - weights_2d[im][ir])); - - // Increment in_index - in_index += CONFIG_T::reuse_factor; - if (in_index >= CONFIG_T::n_in) { - in_index = ir; - } - // Increment out_index - if (acc_step + 1 >= multscale) { - acc_step = 0; - out_index++; - } else { - acc_step++; - } - } - } - -// Cast to "res_t" type -Result: - for (int ires = 0; ires < CONFIG_T::n_out; ires++) { - #pragma HLS UNROLL - res[ires] = cast(acc[ires]); - } -} - -template -void dense_resource_rf_gt_nin_rem0(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - - const int multiplier_limit = DIV_ROUNDUP(CONFIG_T::n_in * CONFIG_T::n_out, CONFIG_T::n_in); - const int block_factor = DIV_ROUNDUP(CONFIG_T::n_in * CONFIG_T::n_out, CONFIG_T::reuse_factor); - - assert((multiplier_limit % CONFIG_T::n_out == 0 || CONFIG_T::reuse_factor >= CONFIG_T::n_in) && - "The current Reuse Factor is not allowed"); - assert((CONFIG_T::reuse_factor > CONFIG_T::n_in && CONFIG_T::reuse_factor % CONFIG_T::n_in == 0) && - "This function is correct only for RF > N_IN && RF % N_IN == 0"); - - // Treating weights as 2d is required to make sure Vitis doesn't use urem cores to calculate indices. - // Also, we don't apply ARRAY_RESHAPE pragma as Vitis figures this out on its own. - typename CONFIG_T::weight_t(*weights_2d)[CONFIG_T::reuse_factor] = - (typename CONFIG_T::weight_t(*)[CONFIG_T::reuse_factor])weights; - - #pragma HLS ARRAY_PARTITION variable=biases complete - - typename CONFIG_T::accum_t acc[CONFIG_T::n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete - -InitAccum: - for (int iacc = 0; iacc < CONFIG_T::n_out; iacc++) { - #pragma HLS UNROLL - acc[iacc] = (typename CONFIG_T::accum_t)biases[iacc]; - } - - int in_index = 0; - int out_index; - int outstep = 0; - const int outscale = CONFIG_T::reuse_factor / CONFIG_T::n_in; - - int outidx[CONFIG_T::reuse_factor]; -IndexLoop: - for (int ir = 0; ir < CONFIG_T::reuse_factor; ir++) { - outidx[ir] = outstep; - if ((ir + 1) % CONFIG_T::n_in == 0) { - outstep++; - } - } - -ReuseLoop: - for (int ir = 0; ir < CONFIG_T::reuse_factor; ir++) { - #pragma HLS PIPELINE II=1 rewind - - out_index = outidx[ir] /*outstep*/; - - MultLoop: - for (int im = 0; im < block_factor; im++) { - #pragma HLS UNROLL - - acc[out_index] += static_cast( - CONFIG_T::template product::product(data[in_index], - weights_2d[im][ir])); - - out_index += outscale; - } - - in_index++; - if (in_index >= CONFIG_T::n_in) { - in_index = 0; - // outstep++; // This causes a huge increase in scheduling and RTL generation times, hence the above workaround. - } - } - -// Cast to "res_t" type -Result: - for (int ires = 0; ires < CONFIG_T::n_out; ires++) { - #pragma HLS UNROLL - res[ires] = cast(acc[ires]); - } -} - -template -void dense_resource_rf_gt_nin(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - - const int multiplier_limit = CONFIG_T::n_out; - const int block_factor = DIV_ROUNDUP(CONFIG_T::n_in * CONFIG_T::n_out, CONFIG_T::reuse_factor); - - assert((multiplier_limit % CONFIG_T::n_out == 0 || CONFIG_T::reuse_factor >= CONFIG_T::n_in) && - "The current Reuse Factor is not allowed"); - assert((CONFIG_T::reuse_factor > CONFIG_T::n_in) && "This function is correct only for RF > N_IN"); - - // Treating weights as 2d is required to make sure Vitis doesn't use urem cores to calculate indices. - // Also, we don't apply ARRAY_RESHAPE pragma as Vitis figures this out on its own. - typename CONFIG_T::weight_t(*weights_2d)[CONFIG_T::reuse_factor] = - (typename CONFIG_T::weight_t(*)[CONFIG_T::reuse_factor])weights; - - #pragma HLS ARRAY_PARTITION variable=biases complete - - typename CONFIG_T::accum_t acc[CONFIG_T::n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete - -InitAccum: - for (int iacc = 0; iacc < CONFIG_T::n_out; iacc++) { - #pragma HLS UNROLL - acc[iacc] = (typename CONFIG_T::accum_t)biases[iacc]; - } - -ReuseLoop: - for (int ir = 0; ir < CONFIG_T::reuse_factor; ir++) { - #pragma HLS PIPELINE II=1 rewind - typename CONFIG_T::accum_t tmpmult[block_factor]; - #pragma HLS ARRAY_PARTITION variable=tmpmult complete - - MultLoop: - for (int im = 0; im < block_factor; im++) { - #pragma HLS UNROLL - int w_index = ir + CONFIG_T::reuse_factor * im; - int in_index = w_index % CONFIG_T::n_in; // As of Vitis HLS 2022.1, this still results in urem core being used. - tmpmult[im] = - CONFIG_T::template product::product(data[in_index], weights_2d[im][ir]); - } - - typename CONFIG_T::accum_t mult[multiplier_limit]; - #pragma HLS ARRAY_PARTITION variable=mult complete - - ResetMult: - for (int imult = 0; imult < multiplier_limit; imult++) { - #pragma HLS UNROLL - mult[imult] = 0; - } - - AccumLoop1: - for (int im = 0; im < block_factor; im++) { - #pragma HLS UNROLL - int w_index = ir + CONFIG_T::reuse_factor * im; - int out_index = w_index / CONFIG_T::n_in; - if (out_index >= multiplier_limit) - continue; // check out of bounds - mult[out_index] += tmpmult[im]; - } - - AccumLoop2: - for (int im = 0; im < multiplier_limit; im++) { - #pragma HLS UNROLL - acc[im] += mult[im]; // If RF > N_IN then multiplier_limit == n_out - } - } - -// Cast to "res_t" type -Result: - for (int ires = 0; ires < CONFIG_T::n_out; ires++) { - #pragma HLS UNROLL - res[ires] = cast(acc[ires]); - } -} - -template -void dense_resource(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - - #pragma HLS INLINE recursive - - if (CONFIG_T::reuse_factor <= CONFIG_T::n_in) { - dense_resource_rf_leq_nin(data, res, weights, biases); - } else if (CONFIG_T::reuse_factor % CONFIG_T::n_in == 0) { - dense_resource_rf_gt_nin_rem0(data, res, weights, biases); - } else { - dense_resource_rf_gt_nin(data, res, weights, biases); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_dense_stream.h b/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_dense_stream.h deleted file mode 100644 index a2bc1eb..0000000 --- a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_dense_stream.h +++ /dev/null @@ -1,105 +0,0 @@ -#ifndef NNET_DENSE_STREAM_H_ -#define NNET_DENSE_STREAM_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_types.h" -#include -#include - -namespace nnet { - -template -void dense_latency_wrapper(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - dense_latency(data, res, weights, biases); -} - -template -void dense_resource_wrapper(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - dense_resource(data, res, weights, biases); -} - -template -void data_prepare(hls::stream &data_stream, typename data_T::value_type data[CONFIG_T::n_in]) { - #pragma HLS INLINE - - if (CONFIG_T::n_in / data_T::size > 1) { - DataPrepare: - for (int i_in = 0; i_in < CONFIG_T::n_in / data_T::size; i_in++) { - #pragma HLS PIPELINE - data_T data_pack = data_stream.read(); - DataPackPipeline: - for (int i_pack = 0; i_pack < data_T::size; i_pack++) { - #pragma HLS UNROLL - data[i_in * data_T::size + i_pack] = data_pack[i_pack]; - } - } - } else { - data_T data_pack = data_stream.read(); - DataPackSingle: - for (int i_pack = 0; i_pack < data_T::size; i_pack++) { - #pragma HLS UNROLL - data[i_pack] = data_pack[i_pack]; - } - } -} - -template -void res_write(typename res_T::value_type res[CONFIG_T::n_out], hls::stream &res_stream) { - #pragma HLS INLINE - - if (CONFIG_T::n_out / res_T::size > 1) { - ResWrite: - for (unsigned i_out = 0; i_out < CONFIG_T::n_out / res_T::size; i_out++) { - #pragma HLS PIPELINE - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - ResPackPipeline: - for (int i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = res[i_out * res_T::size + i_pack]; - } - res_stream.write(res_pack); - } - } else { - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - ResPackSingle: - for (int i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = res[i_pack]; - } - res_stream.write(res_pack); - } -} - -template -void dense(hls::stream &data_stream, hls::stream &res_stream, - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - #pragma HLS INLINE recursive - - typename data_T::value_type data[CONFIG_T::n_in]; - #pragma HLS ARRAY_PARTITION variable=data complete - - typename res_T::value_type res[CONFIG_T::n_out]; - #pragma HLS ARRAY_PARTITION variable=res complete - - data_prepare(data_stream, data); - if (CONFIG_T::strategy == nnet::latency) { - dense_latency_wrapper(data, res, weights, biases); - } else { - dense_resource_wrapper(data, res, weights, - biases); - } - res_write(res, res_stream); -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_pooling.h b/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_pooling.h deleted file mode 100644 index 17bb6de..0000000 --- a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_pooling.h +++ /dev/null @@ -1,341 +0,0 @@ -#ifndef NNET_POOLING_H_ -#define NNET_POOLING_H_ - -#include "nnet_common.h" -#include "nnet_helpers.h" -#include - -namespace nnet { - -// Return the maximum value from an array -template T max(T x[N]) { - T y = x[0]; - for (int i = 1; i < N; i++) { - y = x[i] > y ? x[i] : y; - } - return y; -} - -template ap_int avg(ap_int (&x)[N]) { - // Use a wider accumulator than the input to avoid overflow - ap_int tmp = 0; - for (int i = 0; i < N; i++) { - tmp += x[i]; - } - tmp /= N; - // Now cast back to original type - ap_int y = tmp; - return tmp; -} - -template ap_fixed avg(ap_fixed (&x)[N]) { - // Use a wider accumulator than the input to avoid overflow - ap_fixed tmp = 0; - for (int i = 0; i < N; i++) { - tmp += x[i]; - } - tmp /= N; - // Now cast back to original type - ap_fixed y = tmp; - return y; -} - -// Return the mean value of an array -template T avg(T (&x)[N]) { - T y = 0; - for (int i = 0; i < N; i++) { - y += x[i]; - } - y /= N; - return y; -} - -// Enumeration for pooling operation (max, avg, l2norm pooling) -enum Pool_Op { Max, Average }; // L2Norm }; -template T pool_op(T (&x)[N]) { - switch (op) { - case Max: - return max(x); - case Average: - return avg(x); - // case L2Norm: return l2norm(x); - } -} - -template T pad_val() { - /*--- - *- In Tensorflow, pooling ignores the value in the padded cells - *- For Avg pooling, return 0 (the divisior is modified to the - *- area overlapping the unpadded image. - *- For max pooling, return the most negative value for the type. - *- TODO this is not really generic, it assumes fixed point or integer T - ---*/ - switch (op) { - case Max: { - T x = 0; - x[x.width - 1] = 1; - return x; - break; - } - case Average: - return 0; - } -} - -struct pooling1d_config { - // IO size - static const unsigned n_in = 10; - static const unsigned pool_width = 2; - static const unsigned stride_width = 2; - static const unsigned n_out = (n_in - pool_width) / stride_width + 1; - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; - // Pooling function - static const Pool_Op pool_op = Max; -}; - -template constexpr int pool_op_limit_1d() { - return CONFIG_T::n_in * CONFIG_T::n_filt / CONFIG_T::reuse_factor; -} - -template -void pooling1d_cl(data_T data[CONFIG_T::n_in * CONFIG_T::n_filt], res_T res[CONFIG_T::n_out * CONFIG_T::n_filt]) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - // TODO partition the arrays according to the reuse factor - const int limit = pool_op_limit_1d(); - #pragma HLS ALLOCATION function instances=pool_op limit=limit - // Add any necessary padding - unsigned padded_width = CONFIG_T::n_in + CONFIG_T::pad_left + CONFIG_T::pad_right; - if (CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0) { - padded_width -= padded_width - (padded_width / CONFIG_T::stride_width * CONFIG_T::stride_width); - } - - for (int ff = 0; ff < CONFIG_T::n_filt; ff++) { - // Loop over input image x in steps of stride - for (int ii = 0; ii < padded_width; ii += CONFIG_T::stride_width) { - data_T pool[CONFIG_T::pool_width]; - // Keep track of number of pixels in image vs padding region - unsigned img_overlap = 0; - // Loop over pool window x - for (int jj = 0; jj < CONFIG_T::stride_width; jj++) { - if (ii + jj < CONFIG_T::pad_left || ii + jj >= (padded_width - CONFIG_T::pad_right)) { - // Add padding - pool[jj] = pad_val(); - } else { - pool[jj] = data[(ii + jj - CONFIG_T::pad_left) * CONFIG_T::n_filt + ff]; - img_overlap++; - } - } - // do the pooling - // TODO in the case of average pooling, need to reduce width to area of pool window - // not overlapping padding region - res[(ii / CONFIG_T::stride_width) * CONFIG_T::n_filt + ff] = - pool_op(pool); - // If the pool op is Average, the zero-padding needs to be removed from the results - if (CONFIG_T::pool_op == Average) { - data_T rescale = static_cast(CONFIG_T::pool_width) / img_overlap; - res[(ii / CONFIG_T::stride_width) * CONFIG_T::n_filt + ff] *= rescale; - } - } - } -} - -template -void global_pooling1d_cl(data_T data[CONFIG_T::n_in * CONFIG_T::n_filt], res_T res[CONFIG_T::n_filt]) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pool_width == CONFIG_T::stride_width); - - // TODO partition the arrays according to the reuse factor - const int limit = pool_op_limit_1d(); - #pragma HLS ALLOCATION function instances=pool_op limit=limit - - for (int ff = 0; ff < CONFIG_T::n_filt; ff++) { - data_T pool[CONFIG_T::n_in]; - for (int jj = 0; jj < CONFIG_T::n_in; jj++) { - pool[jj] = data[jj * CONFIG_T::n_filt + ff]; - } - // do the pooling - res[ff] = pool_op(pool); - } -} - -struct pooling2d_config { - // IO size - static const unsigned in_height = 10; - static const unsigned in_width = 10; - static const unsigned n_filt = 4; - static const unsigned stride_height = 2; - static const unsigned stride_width = 2; - static const unsigned pool_height = 2; - static const unsigned pool_width = 2; - static const unsigned out_height = (in_height - pool_height) / stride_height + 1; - static const unsigned out_width = (in_width - pool_width) / stride_width + 1; - // Padding - static const unsigned pad_top = 0; - static const unsigned pad_bottom = 0; - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; - // Pooling function - static const Pool_Op pool_op = Max; - // Reuse factor - static const unsigned reuse_factor = 1; - - // Internal data type definitions - typedef float accum_t; -}; - -template constexpr int pool_op_limit() { - return DIV_ROUNDUP((CONFIG_T::out_height * CONFIG_T::out_width) * CONFIG_T::n_filt, CONFIG_T::reuse_factor); -} - -template -void pooling2d_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_filt], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt]) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - // TODO partition the arrays according to the reuse factor - const int limit = pool_op_limit(); - #pragma HLS ALLOCATION function instances=pool_op limit=limit - // Add any necessary padding - unsigned padded_height = CONFIG_T::in_height + CONFIG_T::pad_top + CONFIG_T::pad_bottom; - unsigned padded_width = CONFIG_T::in_width + CONFIG_T::pad_left + CONFIG_T::pad_right; - if (CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0) { - padded_height -= padded_height - (padded_height / CONFIG_T::stride_height * CONFIG_T::stride_height); - padded_width -= padded_width - (padded_width / CONFIG_T::stride_width * CONFIG_T::stride_width); - } - - for (int ff = 0; ff < CONFIG_T::n_filt; ff++) { - // Loop over input image y in steps of stride - for (int ii = 0; ii < padded_height; ii += CONFIG_T::stride_height) { - // Loop over input image x in steps of stride - for (int jj = 0; jj < padded_width; jj += CONFIG_T::stride_width) { - data_T pool[CONFIG_T::pool_height * CONFIG_T::pool_width]; - // Keep track of number of pixels in image vs padding region - unsigned img_overlap = 0; - // Loop over pool window y - for (int kk = 0; kk < CONFIG_T::stride_height; kk++) { - // Loop over pool window x - for (int ll = 0; ll < CONFIG_T::stride_width; ll++) { - if (ii + kk < CONFIG_T::pad_top || ii + kk >= (padded_height - CONFIG_T::pad_bottom) || - jj + ll < CONFIG_T::pad_left || jj + ll >= (padded_width - CONFIG_T::pad_right)) { - // Add padding - pool[kk * CONFIG_T::stride_width + ll] = pad_val(); - } else { - pool[kk * CONFIG_T::stride_width + ll] = - data[(ii + kk - CONFIG_T::pad_top) * CONFIG_T::in_width * CONFIG_T::n_filt + - (jj + ll - CONFIG_T::pad_left) * CONFIG_T::n_filt + ff]; - img_overlap++; - } - } - } - // do the pooling - // TODO in the case of average pooling, need to reduce height * width to area of pool window - // not overlapping padding region - res[(ii / CONFIG_T::stride_height) * CONFIG_T::out_width * CONFIG_T::n_filt + - (jj / CONFIG_T::stride_width) * CONFIG_T::n_filt + ff] = - pool_op(pool); - // If the pool op is Average, the zero-padding needs to be removed from the results - if (CONFIG_T::pool_op == Average) { - data_T rescale = - static_cast(CONFIG_T::pool_height) * static_cast(CONFIG_T::pool_width) / img_overlap; - res[(ii / CONFIG_T::stride_height) * CONFIG_T::out_width * CONFIG_T::n_filt + - (jj / CONFIG_T::stride_width) * CONFIG_T::n_filt + ff] *= rescale; - } - } - } - } -} - -template -void pooling2d_cf(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_filt], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt]) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - // TODO partition the arrays according to the reuse factor - const int limit = pool_op_limit(); - #pragma HLS ALLOCATION function instances=pool_op limit=limit - // Add any necessary padding - unsigned padded_height = CONFIG_T::in_height + CONFIG_T::pad_top + CONFIG_T::pad_bottom; - unsigned padded_width = CONFIG_T::in_width + CONFIG_T::pad_left + CONFIG_T::pad_right; - if (CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0) { - padded_height -= padded_height - (padded_height / CONFIG_T::stride_height * CONFIG_T::stride_height); - padded_width -= padded_width - (padded_width / CONFIG_T::stride_width * CONFIG_T::stride_width); - } - - for (int ff = 0; ff < CONFIG_T::n_filt; ff++) { - // Loop over input image y in steps of stride - for (int ii = 0; ii < padded_height; ii += CONFIG_T::stride_height) { - // Loop over input image x in steps of stride - for (int jj = 0; jj < padded_width; jj += CONFIG_T::stride_width) { - data_T pool[CONFIG_T::pool_height * CONFIG_T::pool_width]; - // Keep track of number of pixels in image vs padding region - unsigned img_overlap = 0; - // Loop over pool window y - for (int kk = 0; kk < CONFIG_T::stride_height; kk++) { - // Loop over pool window x - for (int ll = 0; ll < CONFIG_T::stride_width; ll++) { - if (ii + kk < CONFIG_T::pad_top || ii + kk >= (padded_height - CONFIG_T::pad_bottom) || - jj + ll < CONFIG_T::pad_left || jj + ll >= (padded_width - CONFIG_T::pad_right)) { - // Add padding - pool[kk * CONFIG_T::stride_width + ll] = pad_val(); - } else { - pool[kk * CONFIG_T::stride_width + ll] = - data[(ii + kk) * CONFIG_T::in_width + ff * CONFIG_T::in_width * CONFIG_T::in_height + ll + - jj]; - img_overlap++; - } - } - } - // do the pooling - // TODO in the case of average pooling, need to reduce height * width to area of pool window - // not overlapping padding region - res[(ii / CONFIG_T::stride_height) * CONFIG_T::out_width + (jj / CONFIG_T::stride_width) + - ff * CONFIG_T::out_height * CONFIG_T::out_width] = - pool_op(pool); - // If the pool op is Average, the zero-padding needs to be removed from the results - if (CONFIG_T::pool_op == Average) { - data_T rescale = - static_cast(CONFIG_T::pool_height) * static_cast(CONFIG_T::pool_width) / img_overlap; - res[(ii / CONFIG_T::stride_height) * CONFIG_T::out_width + (jj / CONFIG_T::stride_width) + - ff * CONFIG_T::out_height * CONFIG_T::out_width] *= rescale; - } - } - } - } -} - -template -void global_pooling2d_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_filt], - res_T res[CONFIG_T::n_filt]) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0); - assert(CONFIG_T::pool_width == CONFIG_T::stride_width); - assert(CONFIG_T::pool_height == CONFIG_T::stride_height); - - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - const int limit = pool_op_limit(); - #pragma HLS ALLOCATION function instances=pool_op limit=limit - -FiltLoop: - for (int filt = 0; filt < CONFIG_T::n_filt; filt++) { - data_T pool[CONFIG_T::in_height * CONFIG_T::in_width]; - - InputLoop: - for (int i = 0; i < CONFIG_T::in_height * CONFIG_T::in_width; i++) { - pool[i] = data[i * CONFIG_T::n_filt + filt]; - } - - res[filt] = static_cast(pool_op(pool)); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_pooling_stream.h b/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_pooling_stream.h deleted file mode 100644 index 37ff3c6..0000000 --- a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_pooling_stream.h +++ /dev/null @@ -1,344 +0,0 @@ -#ifndef NNET_POOLING_STREAM_H_ -#define NNET_POOLING_STREAM_H_ - -#include "ap_shift_reg.h" -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_conv_stream.h" -#include "nnet_pooling.h" -#include "utils/x_hls_utils.h" - -namespace nnet { - -// ************************************************* -// Max/average pooling -// ************************************************* - -template T reduce_pool(T x[N]) { - #pragma HLS INLINE - if (CONFIG_T::pool_op == Max) { - Op_max op_max; - return reduce>(x, op_max); - } else { - Op_add op_add; - T sum = reduce>(x, op_add); - return sum / N; - } -} - -template -void compute_pool_buffer_2d(const data_T &in_elem, - ap_shift_reg - line_buffer[MAX(CONFIG_T::pool_height - 1, 1)][CONFIG_T::n_filt], - hls::stream &res) { - #pragma HLS INLINE - const static int lShiftX = CONFIG_T::pool_width - 1; - const static int lShiftY = CONFIG_T::pool_height - 1; - static int pX = 0; // pixel X - static int pY = 0; // pixel Y - static int sX = 0; // stride X - static int sY = 0; // stride Y - - typename CONFIG_T::accum_t pool_window[CONFIG_T::pool_height * CONFIG_T::pool_width]; - #pragma HLS ARRAY_PARTITION variable=pool_window complete - - static typename data_T::value_type kernel_data[CONFIG_T::pool_height * CONFIG_T::pool_width * CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable = kernel_data complete dim = 0 - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - - // Add pixel into line buffer, return pooling kernels - nnet::shift_line_buffer(in_elem, line_buffer, kernel_data); - - // Can compute pooling output - if ((sX - lShiftX) == 0 && (sY - lShiftY) == 0 && pY > lShiftY - 1 && pX > lShiftX - 1) { - FiltLoop: - for (unsigned i_ic = 0; i_ic < CONFIG_T::n_filt; i_ic++) { - #pragma HLS PIPELINE - - // Retrieve data for current channel - PoolLoop: - for (unsigned i_ihw = 0; i_ihw < CONFIG_T::pool_height * CONFIG_T::pool_width; i_ihw++) { - pool_window[i_ihw] = kernel_data[i_ihw * CONFIG_T::n_filt + i_ic]; - } - - // Compute Pooling - res_pack[i_ic] = - reduce_pool(pool_window); - } - - // Write to output - res.write(res_pack); - } - - // Counter Housekeeping - if (pX + 1 == CONFIG_T::in_width) // Includes padding, end of line (padded) - { - pX = 0; - sX = 0; - if (pY + 1 == CONFIG_T::in_height) { // Reached bottom of image - pY = 0; - sY = 0; - } else { // Next line - pY = pY + 1; - // Update stride (threshold) ? subtract stride : increment stride - sY = ((sY - lShiftY) == 0) ? sY - CONFIG_T::stride_height + 1 : sY + 1; - } - } else { - pX = pX + 1; - // Update stride (threshold) ? subtract stride : increment stride - sX = ((sX - lShiftX) == 0) ? sX - CONFIG_T::stride_width + 1 : sX + 1; - } -} - -template -void pooling2d_cl(hls::stream &data, hls::stream &res) { - assert(CONFIG_T::implementation == conv_implementation::linebuffer && - "Only \"linebuffer\" implementation is supported in Vitis HLS."); - - #pragma HLS INLINE recursive - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pool_height == CONFIG_T::stride_height && CONFIG_T::pool_width == CONFIG_T::stride_width); - - static ap_shift_reg line_buffer[MAX(CONFIG_T::pool_height - 1, 1)] - [CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable = line_buffer complete dim = 2 - -ReadInputHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS LOOP_FLATTEN - #pragma HLS PIPELINE - - compute_pool_buffer_2d(data.read(), line_buffer, res); - } - } -} - -// ************************************************* -// Pooling 1D -// ************************************************* -template -void compute_pool_buffer_1d(const data_T &in_elem, hls::stream &res) { - #pragma HLS INLINE - const static int lShiftX = CONFIG_T::pool_width - 1; - // Counters - static int pX = 0; - static int sX = 0; - - typename CONFIG_T::accum_t pool_window[CONFIG_T::pool_width]; - #pragma HLS ARRAY_PARTITION variable=pool_window complete - - static typename data_T::value_type kernel_data[CONFIG_T::pool_width * CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable = kernel_data complete dim = 0 - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - - // Add pixel into line buffer, return pooling kernels - // 1D case line buffer not necessary. Put directly into the kernel_data buffer - nnet::kernel_shift_1d(in_elem, kernel_data); - - // Can compute pooling output - if ((sX - lShiftX) == 0 && pX > lShiftX - 1) { - FiltLoop: - for (unsigned i_ic = 0; i_ic < CONFIG_T::n_filt; i_ic++) { - #pragma HLS PIPELINE - - // Retrieve data for current channel - PoolLoop: - for (unsigned i_iw = 0; i_iw < CONFIG_T::pool_width; i_iw++) { - pool_window[i_iw] = kernel_data[i_iw * CONFIG_T::n_filt + i_ic]; - } - - // Compute Pooling - res_pack[i_ic] = reduce_pool(pool_window); - } - - // Write to output - res.write(res_pack); - } - - // Counter Housekeeping - if (pX + 1 == CONFIG_T::n_in) // Includes padding, end of line (padded) - { - pX = 0; - sX = 0; - } else { - pX = pX + 1; - // Update stride (threshold) ? subtract stride : increment stride - sX = ((sX - lShiftX) == 0) ? sX - CONFIG_T::stride_width + 1 : sX + 1; - } -} - -template -void pooling1d_cl(hls::stream &data, hls::stream &res) { - assert(CONFIG_T::implementation == conv_implementation::linebuffer && - "Only \"linebuffer\" implementation is supported in Vitis HLS."); - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - - #pragma HLS inline recursive - -ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::n_in; i_iw++) { - #pragma HLS PIPELINE - compute_pool_buffer_1d(data.read(), res); - } -} - -// ************************************************* -// Global max/average pooling -// ************************************************* - -template T reduce_global_pool(T x, T y[N]) { - #pragma HLS INLINE - if (CONFIG_T::pool_op == Max) { - Op_max op_max; - T y_max = reduce>(y, op_max); - return (x > y_max) ? x : y_max; - } else { - Op_add op_add; - T y_sum = reduce>(y, op_add); - return x + y_sum; - } -} - -template -void compute_global_pool(const data_T &in_elem, typename CONFIG_T::accum_t data_window[CONFIG_T::n_filt]) { -PoolFilt: - for (unsigned c = 0; c < CONFIG_T::n_filt; c++) { - #pragma HLS UNROLL - - typename CONFIG_T::accum_t data_pack[data_T::size / CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable=data_pack complete dim=0 - - PixelLoop: - for (unsigned p = 0; p < data_T::size / CONFIG_T::n_filt; p++) { - #pragma HLS UNROLL - data_pack[p] = in_elem[p * CONFIG_T::n_filt + c]; - } - data_window[c] = reduce_global_pool( - data_window[c], data_pack); - } -} - -template -void global_pooling2d_cl(hls::stream &data, hls::stream &res) { - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pool_height == CONFIG_T::stride_height && CONFIG_T::pool_width == CONFIG_T::stride_width); - - typename CONFIG_T::accum_t data_window[CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable=data_window complete - - typename CONFIG_T::accum_t init = 0; - if (CONFIG_T::pool_op == Max) { - init = hls::numeric_limits::min(); - } - -PoolInitLoop: - for (unsigned i_init = 0; i_init < CONFIG_T::n_filt; i_init++) { - #pragma HLS UNROLL - data_window[i_init] = init; - } - -ReadInputHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width / (data_T::size / CONFIG_T::n_filt); i_iw++) { - #pragma HLS LOOP_FLATTEN - compute_global_pool(data.read(), data_window); - } - } - - if (CONFIG_T::pool_op == Max) { - MaxPoolRes: - for (unsigned i_res = 0; i_res < CONFIG_T::n_filt / res_T::size; i_res++) { - #pragma HLS PIPELINE - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - MaxPoolPack: - for (unsigned i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = data_window[i_pack]; - } - res.write(res_pack); - } - } else { - AvgPoolRes: - for (unsigned i_res = 0; i_res < CONFIG_T::n_filt / res_T::size; i_res++) { - #pragma HLS PIPELINE - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - AvgPoolPack: - for (unsigned i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = data_window[i_pack] / (CONFIG_T::in_height * CONFIG_T::in_width); - } - res.write(res_pack); - } - } -} - -template -void global_pooling1d_cl(hls::stream &data, hls::stream &res) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pool_width == CONFIG_T::stride_width); - - typename CONFIG_T::accum_t data_window[CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable=data_window complete - - typename CONFIG_T::accum_t init = 0; - if (CONFIG_T::pool_op == Max) { - init = hls::numeric_limits::min(); - } - -PoolInitLoop: - for (unsigned i_init = 0; i_init < CONFIG_T::n_filt; i_init++) { - #pragma HLS UNROLL - data_window[i_init] = init; - } - -ReadInput: - for (unsigned i_iw = 0; i_iw < CONFIG_T::n_in / (data_T::size / CONFIG_T::n_filt); i_iw++) { - #pragma HLS LOOP_FLATTEN - compute_global_pool(data.read(), data_window); - } - - if (CONFIG_T::pool_op == Max) { - MaxPoolRes: - for (unsigned i_res = 0; i_res < CONFIG_T::n_filt / res_T::size; i_res++) { - #pragma HLS PIPELINE - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - MaxPoolPack: - for (unsigned i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = data_window[i_pack]; - } - res.write(res_pack); - } - } else { - AvgPoolRes: - for (unsigned i_res = 0; i_res < CONFIG_T::n_filt / res_T::size; i_res++) { - #pragma HLS PIPELINE - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - AvgPoolPack: - for (unsigned i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = data_window[i_pack] / CONFIG_T::n_in; - } - res.write(res_pack); - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_sepconv1d_stream.h b/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_sepconv1d_stream.h deleted file mode 100644 index 20b6fec..0000000 --- a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_sepconv1d_stream.h +++ /dev/null @@ -1,99 +0,0 @@ -#ifndef NNET_SEPARABLE_CONV1D_STREAM_H_ -#define NNET_SEPARABLE_CONV1D_STREAM_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_conv1d_stream.h" -#include "nnet_sepconv_stream.h" - -namespace nnet { - -template -void depthwise_conv_1d_buffer_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - - if (CONFIG_T::strategy == nnet::latency) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - compute_depthwise_output_buffer_1d(data.read(), res, weights, biases); - } - } else { - ReadInputWidthSerial: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - compute_depthwise_output_buffer_1d(data.read(), res, weights, biases); - } - } -} - -template -void depthwise_conv_1d_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) { - assert(CONFIG_T::implementation == conv_implementation::linebuffer && - "Only \"linebuffer\" implementation is supported in Vitis HLS."); - #pragma HLS inline recursive - depthwise_conv_1d_buffer_cl(data, res, weights, biases); -} - -template -void pointwise_conv_1d_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::filt_width == 1); - - #pragma HLS ARRAY_PARTITION variable=weights complete - #pragma HLS ARRAY_PARTITION variable=biases complete - - if (CONFIG_T::strategy == nnet::latency) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - if (i_iw % CONFIG_T::stride_width == 0) { - pointwise_mult_buffer(data.read(), res, weights, biases); - } else { - data.read(); - } - } - } else { - ReadInputWidthSerial: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - if (i_iw % CONFIG_T::stride_width == 0) { - pointwise_mult_buffer(data.read(), res, weights, biases); - } else { - data.read(); - } - } - } -} - -template -void separable_conv_1d_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::depthwise_config::weight_t - depthwise_weights[CONFIG_T::depthwise_config::filt_width * CONFIG_T::depthwise_config::n_chan], - typename CONFIG_T::pointwise_config::weight_t - pointwise_weights[CONFIG_T::pointwise_config::n_chan * CONFIG_T::pointwise_config::n_filt], - typename CONFIG_T::depthwise_config::bias_t depthwise_biases[CONFIG_T::depthwise_config::n_chan], - typename CONFIG_T::pointwise_config::bias_t pointwise_biases[CONFIG_T::pointwise_config::n_filt]) { - assert(CONFIG_T::depthwise_config::implementation == conv_implementation::linebuffer && - "Only \"linebuffer\" implementation is supported in Vitis HLS."); - assert(CONFIG_T::pointwise_config::implementation == conv_implementation::linebuffer && - "Only \"linebuffer\" implementation is supported in Vitis HLS."); - - #pragma HLS DATAFLOW - - hls::stream depthwise_res; - unsigned res_depth = CONFIG_T::depthwise_config::out_width; - #pragma HLS STREAM variable=depthwise_res depth=res_depth - - depthwise_conv_1d_buffer_cl(data, depthwise_res, - depthwise_weights, depthwise_biases); - pointwise_conv_1d_cl(depthwise_res, res, pointwise_weights, - pointwise_biases); -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_sepconv2d_stream.h b/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_sepconv2d_stream.h deleted file mode 100644 index a374799..0000000 --- a/hls4ml/hls4ml/templates/vitis/nnet_utils/nnet_sepconv2d_stream.h +++ /dev/null @@ -1,133 +0,0 @@ -#ifndef NNET_SEPARABLE_CONV2D_STREAM_H_ -#define NNET_SEPARABLE_CONV2D_STREAM_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_conv2d_stream.h" -#include "nnet_sepconv_stream.h" - -namespace nnet { - -// Line Buffer Implementation (Phil's) -template -void depthwise_conv_2d_buffer_cl( - hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) { - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - - static ap_shift_reg line_buffer[CONFIG_T::filt_height - 1] - [CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable = line_buffer complete dim = 2 - - if (CONFIG_T::strategy == nnet::latency) { - ReadInputHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS LOOP_FLATTEN - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - if (CONFIG_T::filt_height > 1) { - compute_depthwise_output_buffer_2d(data.read(), line_buffer, res, weights, - biases); - } else { - compute_depthwise_output_buffer_1d(data.read(), res, weights, biases); - } - } - } - } else { - ReadInputHeightSerial: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidthSerial: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS LOOP_FLATTEN - if (CONFIG_T::filt_height > 1) { - compute_depthwise_output_buffer_2d(data.read(), line_buffer, res, weights, - biases); - } else { - compute_depthwise_output_buffer_1d(data.read(), res, weights, biases); - } - } - } - } -} - -template -void pointwise_conv_2d_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::filt_height == 1 && CONFIG_T::filt_width == 1); - - #pragma HLS ARRAY_PARTITION variable=weights complete - #pragma HLS ARRAY_PARTITION variable=biases complete - - if (CONFIG_T::strategy == nnet::latency) { - ReadInputHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS LOOP_FLATTEN - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - if (i_ih % CONFIG_T::stride_height == 0 && i_iw % CONFIG_T::stride_width == 0) { - pointwise_mult_buffer(data.read(), res, weights, biases); - } else { - data.read(); - } - } - } - } else { - ReadInputHeightSerial: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidthSerial: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS LOOP_FLATTEN - if (i_ih % CONFIG_T::stride_height == 0 && i_iw % CONFIG_T::stride_width == 0) { - pointwise_mult_buffer(data.read(), res, weights, biases); - } else { - data.read(); - } - } - } - } -} - -template -void depthwise_conv_2d_cl( - hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) { - assert(CONFIG_T::implementation == conv_implementation::linebuffer && - "Only \"linebuffer\" implementation is supported in Vitis HLS."); - #pragma HLS inline recursive - depthwise_conv_2d_buffer_cl(data, res, weights, biases); -} - -template -void separable_conv_2d_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::depthwise_config::weight_t - depthwise_weights[CONFIG_T::depthwise_config::filt_height * - CONFIG_T::depthwise_config::filt_width * CONFIG_T::depthwise_config::n_chan], - typename CONFIG_T::pointwise_config::weight_t - pointwise_weights[CONFIG_T::pointwise_config::n_chan * CONFIG_T::pointwise_config::n_filt], - typename CONFIG_T::depthwise_config::bias_t depthwise_biases[CONFIG_T::depthwise_config::n_chan], - typename CONFIG_T::pointwise_config::bias_t pointwise_biases[CONFIG_T::pointwise_config::n_filt]) { - assert(CONFIG_T::depthwise_config::implementation == conv_implementation::linebuffer && - "Only \"linebuffer\" implementation is supported in Vitis HLS."); - assert(CONFIG_T::pointwise_config::implementation == conv_implementation::linebuffer && - "Only \"linebuffer\" implementation is supported in Vitis HLS."); - - #pragma HLS DATAFLOW - - hls::stream depthwise_res; - unsigned res_depth = CONFIG_T::depthwise_config::out_height * CONFIG_T::depthwise_config::out_width; - #pragma HLS STREAM variable=depthwise_res depth=res_depth - - depthwise_conv_2d_buffer_cl(data, depthwise_res, - depthwise_weights, depthwise_biases); - pointwise_conv_2d_cl(depthwise_res, res, pointwise_weights, - pointwise_biases); -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/ap_common.h b/hls4ml/hls4ml/templates/vivado/ap_types/ap_common.h deleted file mode 100644 index 4d2886c..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/ap_common.h +++ /dev/null @@ -1,376 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_COMMON_H__ -#define __AP_COMMON_H__ - -// ---------------------------------------------------------------------- - -// Forward declaration of all AP types. -#include - - -#ifdef __SYNTHESIS__ -#error "The open-source version of AP types does not support synthesis." -#endif // ifdef __SYNTHESIS__ -#define _AP_ENABLE_HALF_ 0 - - -#if _AP_ENABLE_HALF_ == 1 -// Before ap_private definition. -#ifdef __SYNTHESIS__ -#define _HLS_HALF_DEFINED_ -typedef __fp16 half; -#else -class half; -#endif // __SYNTHESIS__ -#endif // _AP_ENABLE_HALF_ - -// ---------------------------------------------------------------------- - -// Macro functions -#define AP_MAX(a, b) ((a) > (b) ? (a) : (b)) -#define AP_MIN(a, b) ((a) < (b) ? (a) : (b)) -#define AP_ABS(a) ((a) >= 0 ? (a) : -(a)) - -#ifndef AP_ASSERT -#ifndef __SYNTHESIS__ -#include -#define AP_ASSERT(cond, msg) assert((cond) && (msg)) -#else -#define AP_ASSERT(cond, msg) -#endif // ifndef __SYNTHESIS__ -#endif // ifndef AP_ASSERT - -#ifndef __SYNTHESIS__ -// for fprintf messages. -#include -// for exit on error. -#include -#endif - -// same disable condition as assert. -#if !defined(__SYNTHESIS__) && !defined(NDEBUG) - -#define _AP_DEBUG(cond, ...) \ - do { \ - if ((cond)) { \ - fprintf(stderr, "DEBUG: " __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - } \ - } while (0) -#define _AP_WARNING(cond, ...) \ - do { \ - if ((cond)) { \ - fprintf(stderr, "WARNING: " __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - } \ - } while (0) -#define _AP_ERROR(cond, ...) \ - do { \ - if ((cond)) { \ - fprintf(stderr, "ERROR: " __VA_ARGS__); \ - fprintf(stderr, "\n"); \ - abort(); \ - } \ - } while (0) - -#else // if !defined(__SYNTHESIS__) && !defined(NDEBUG) - -#define __AP_VOID_CAST static_cast -#define _AP_DEBUG(cond, ...) (__AP_VOID_CAST(0)) -#define _AP_WARNING(cond, ...) (__AP_VOID_CAST(0)) -#define _AP_ERROR(cond, ...) (__AP_VOID_CAST(0)) - -#endif // if !defined(__SYNTHESIS__) && !defined(NDEBUG) else - -// ---------------------------------------------------------------------- - -// Attribute only for synthesis -#ifdef __SYNTHESIS__ -#define INLINE inline __attribute__((always_inline)) -//#define INLINE inline __attribute__((noinline)) -#else -#define INLINE inline -#endif - -#define AP_WEAK -// __attribute__((weak)) - -#ifndef AP_INT_MAX_W -#define AP_INT_MAX_W 1024 -#endif - -#define BIT_WIDTH_UPPER_LIMIT (1 << 15) -#if AP_INT_MAX_W > BIT_WIDTH_UPPER_LIMIT -#error "Bitwidth exceeds 32768 (1 << 15), the maximum allowed value" -#endif - -#define MAX_MODE(BITS) ((BITS + 1023) / 1024) - -// ---------------------------------------------------------------------- - -// XXX apcc cannot handle global std::ios_base::Init() brought in by -#ifndef AP_AUTOCC -#ifndef __SYNTHESIS__ -// for overload operator<< -#include -#endif -#endif // ifndef AP_AUTOCC - -#ifndef __SYNTHESIS__ -// for string format. -#include -// for string. -#include -#endif - -// for detecting if char is signed. -enum { CHAR_IS_SIGNED = (char)-1 < 0 }; - -// TODO we have similar traits in x_hls_utils.h, should consider unify. -namespace _ap_type { -template -struct is_signed { - static const bool value = _Tp(-1) < _Tp(1); -}; - -template -struct is_integral { - static const bool value = false; -}; -#define DEF_IS_INTEGRAL(CTYPE) \ - template <> \ - struct is_integral { \ - static const bool value = true; \ - }; -DEF_IS_INTEGRAL(bool) -DEF_IS_INTEGRAL(char) -DEF_IS_INTEGRAL(signed char) -DEF_IS_INTEGRAL(unsigned char) -DEF_IS_INTEGRAL(short) -DEF_IS_INTEGRAL(unsigned short) -DEF_IS_INTEGRAL(int) -DEF_IS_INTEGRAL(unsigned int) -DEF_IS_INTEGRAL(long) -DEF_IS_INTEGRAL(unsigned long) -DEF_IS_INTEGRAL(ap_slong) -DEF_IS_INTEGRAL(ap_ulong) -#undef DEF_IS_INTEGRAL - -template -struct enable_if {}; -// partial specialization for true -template -struct enable_if { - typedef _Tp type; -}; - -template -struct remove_const { - typedef _Tp type; -}; - -template -struct remove_const<_Tp const> { - typedef _Tp type; -}; -} // namespace _ap_type - -// ---------------------------------------------------------------------- - -// Define ssdm_int and _ssdm_op. -// XXX deleted in open-source version - -#ifndef NON_C99STRING -#define _AP_C99 true -#else -#define _AP_C99 false -#endif - -static inline unsigned char guess_radix(const char* s) { - unsigned char rd = 10; ///< default radix - const char* p = s; - // skip neg sign if it exists - if (p[0] == '-' || p[0] == '+') ++p; - // guess based on following two bits. - if (p[0] == '0') { - if (p[1] == 'b' || p[1] == 'B') { - rd = 2; - } else if (p[1] == 'o' || p[1] == 'O') { - rd = 8; - } else if (p[1] == 'x' || p[1] == 'X') { - rd = 16; - } else if (p[1] == 'd' || p[1] == 'D') { - rd = 10; - } - } - return rd; -} - -// ---------------------------------------------------------------------- - -// Basic integral struct upon which ap_int and ap_fixed are defined. -#ifdef __SYNTHESIS__ -// Use ssdm_int, a compiler dependent, attribute constrained integeral type as -// basic data type. -#define _AP_ROOT_TYPE ssdm_int -// Basic ops. -#define _AP_ROOT_op_concat(Ret, X, Y) _ssdm_op_concat(Ret, X, Y) -#define _AP_ROOT_op_get_bit(Val, Bit) _ssdm_op_get_bit(Val, Bit) -#define _AP_ROOT_op_set_bit(Val, Bit, Repl) _ssdm_op_set_bit(Val, Bit, Repl) -#define _AP_ROOT_op_get_range(Val, Lo, Hi) _ssdm_op_get_range(Val, Lo, Hi) -#define _AP_ROOT_op_set_range(Val, Lo, Hi, Repl) \ - _ssdm_op_set_range(Val, Lo, Hi, Repl) -#define _AP_ROOT_op_reduce(Op, Val) _ssdm_op_reduce(Op, Val) -#else // ifdef __SYNTHESIS__ -// Use ap_private for compiler-independent basic data type -template -class ap_private; -/// model ssdm_int in standard C++ for simulation. -template -struct ssdm_int_sim { - /// integral type with template-specified width and signedness. - ap_private<_AP_W, _AP_S> V; - ssdm_int_sim() {} -}; -#define _AP_ROOT_TYPE ssdm_int_sim -// private's ref uses _AP_ROOT_TYPE. -#include -// XXX The C-sim model cannot use GCC-extension -// Basic ops. Ret and Val are ap_private. -template -inline _Tp1 _AP_ROOT_op_concat(const _Tp1& Ret, const _Tp2& X, const _Tp3& Y) { - _Tp1 r = (X).operator,(Y); - return r; -} -#define _AP_ROOT_op_get_bit(Val, Bit) (Val).get_bit((Bit)) -template -inline _Tp1& _AP_ROOT_op_set_bit(_Tp1& Val, const _Tp2& Bit, const _Tp3& Repl) { - (Val).set_bit((Bit), (Repl)); - return Val; -} -// notice the order of high and low index is different in ssdm call and -// ap_private.range()... -#define _AP_ROOT_op_get_range(Val, Lo, Hi) (Val).range((Hi), (Lo)) -template -inline _Tp1& _AP_ROOT_op_set_range(_Tp1& Val, const _Tp2& Lo, const _Tp3& Hi, - const _Tp4& Repl) { - (Val).range((Hi), (Lo)) = Repl; - return (Val); -} -#define _AP_ROOT_op_and_reduce(Val) (Val).and_reduce() -#define _AP_ROOT_op_nand_reduce(Val) (Val).nand_reduce() -#define _AP_ROOT_op_or_reduce(Val) (Val).or_reduce() -#define _AP_ROOT_op_xor_reduce(Val) (Val).xor_reduce() -// ## is the concatenation in preprocessor: -#define _AP_ROOT_op_reduce(Op, Val) _AP_ROOT_op_##Op##_reduce(Val) -#endif // ifdef __SYNTHESIS__ else - -// ---------------------------------------------------------------------- - -// Constants for half, single, double pricision floating points -#define HALF_MAN 10 -#define FLOAT_MAN 23 -#define DOUBLE_MAN 52 - -#define HALF_EXP 5 -#define FLOAT_EXP 8 -#define DOUBLE_EXP 11 - -#define BIAS(e) ((1L << (e - 1L)) - 1L) -#define HALF_BIAS BIAS(HALF_EXP) -#define FLOAT_BIAS BIAS(FLOAT_EXP) -#define DOUBLE_BIAS BIAS(DOUBLE_EXP) - -#define APFX_IEEE_DOUBLE_E_MAX DOUBLE_BIAS -#define APFX_IEEE_DOUBLE_E_MIN (-DOUBLE_BIAS + 1) - -INLINE ap_ulong doubleToRawBits(double pf) { - union { - ap_ulong __L; - double __D; - } LD; - LD.__D = pf; - return LD.__L; -} - -INLINE unsigned int floatToRawBits(float pf) { - union { - unsigned int __L; - float __D; - } LD; - LD.__D = pf; - return LD.__L; -} - -#if _AP_ENABLE_HALF_ == 1 -INLINE unsigned short halfToRawBits(half pf) { -#ifdef __SYNTHESIS__ - union { - unsigned short __L; - half __D; - } LD; - LD.__D = pf; - return LD.__L; -#else - return pf.get_bits(); -#endif -} -#endif - -// usigned long long is at least 64-bit -INLINE double rawBitsToDouble(ap_ulong pi) { - union { - ap_ulong __L; - double __D; - } LD; - LD.__L = pi; - return LD.__D; -} - -// long is at least 32-bit -INLINE float rawBitsToFloat(unsigned long pi) { - union { - unsigned int __L; - float __D; - } LD; - LD.__L = pi; - return LD.__D; -} - -#if _AP_ENABLE_HALF_ == 1 -// short is at least 16-bit -INLINE half rawBitsToHalf(unsigned short pi) { -#ifdef __SYNTHESIS__ - union { - unsigned short __L; - half __D; - } LD; - LD.__L = pi; - return LD.__D; -#else - // sim model of half has a non-trivial constructor - half __D; - __D.set_bits(pi); - return __D; -#endif -} -#endif - -#endif // ifndef __AP_COMMON_H__ - -// -*- cpp -*- diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/ap_decl.h b/hls4ml/hls4ml/templates/vivado/ap_types/ap_decl.h deleted file mode 100644 index ddd00f1..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/ap_decl.h +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_DECL_H__ -#define __AP_DECL_H__ - -// ---------------------------------------------------------------------- - -#if !defined(__AP_FIXED_H__) && !defined(__AP_INT_H__) && !defined(__AUTOPILOT_CBE_H__) && !defined(__HLS_HALF_H__) -#error "Only ap_fixed.h and ap_int.h can be included directly in user code." -#endif - -// Test __SYNTHESIS__ only for mode -#if !defined(__SYNTHESIS__) && (defined(AESL_SYN) || defined(__HLS_SYN__)) -//#pragma message "AESL_SYN and __HLS_SYN__ should be replaced by __SYNTHESIS__" -#define __SYNTHESIS__ -#endif - -/* for safety*/ -#if (defined(_AP_N) || defined(_AP_C)) -#error One or more of the following is defined: _AP_N, _AP_C. Definition conflicts with their usage as template parameters. -#endif - -/* for safety*/ -#if (defined(_AP_W) || defined(_AP_I) || defined(_AP_S) || defined(_AP_Q) || \ - defined(_AP_O) || defined(_AP_W2) || defined(_AP_I2) || \ - defined(_AP_S2) || defined(_AP_Q2) || defined(_AP_O2) || \ - defined(_AP_N) || defined(_AP_N2)) -#error \ - "One or more of the following is defined: _AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N, _AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2. Definition conflicts with their usage as template parameters." -#endif - -/*for safety*/ -#if (defined(_AP_W3) || defined(_AP_S3) || defined(_AP_W4) || defined(_AP_S4)) -#error \ - "One or more of the following is defined: _AP_W3, _AP_S3, _AP_W4,_AP_S4. Definition conflicts with their usage as template parameters." -#endif - -#if (defined(_AP_W1) || defined(_AP_S1) || defined(_AP_T) || \ - defined(_AP_T1) || defined(_AP_T2) || defined(_AP_T3) || defined(_AP_T4)) -#error \ - "One or more of the following is defined: _AP_W1, _AP_S1, _AP_T, _AP_T1, _AP_T2, _AP_T3, _AP_T4. Definition conflicts with their usage as template parameters." -#endif - -#ifndef __cplusplus -#error "AP data type can only be used in C++" -#endif - -// ---------------------------------------------------------------------- - -#ifndef __SC_COMPATIBLE__ -/// ap_fixed quantification mode -enum ap_q_mode { - AP_RND, //< rounding to plus infinity - AP_RND_ZERO, //< rounding to zero - AP_RND_MIN_INF, //< rounding to minus infinity - AP_RND_INF, //< rounding to infinity - AP_RND_CONV, //< convergent rounding - AP_TRN, //< truncation - AP_TRN_ZERO, //< truncation to zero -}; - -// FIXME for legacy code -#ifndef SYSTEMC_INCLUDED -#define SC_RND AP_RND -#define SC_RND_ZERO AP_RND_ZERO -#define SC_RND_MIN_INF AP_RND_MIN_INF -#define SC_RND_INF AP_RND_INF -#define SC_RND_CONV AP_RND_CONV -#define SC_TRN AP_TRN -#define SC_TRN_ZERO AP_TRN_ZERO -#endif // !defined(SYSTEMC_INCLUDED) - -/// ap_fixed saturation mode -enum ap_o_mode { - AP_SAT, //< saturation - AP_SAT_ZERO, //< saturation to zero - AP_SAT_SYM, //< symmetrical saturation - AP_WRAP, //< wrap-around (*) - AP_WRAP_SM, //< sign magnitude wrap-around (*) -}; - -// FIXME for legacy code -#ifndef SYSTEMC_INCLUDED -#define SC_SAT AP_SAT -#define SC_SAT_ZERO AP_SAT_ZERO -#define SC_SAT_SYM AP_SAT_SYM -#define SC_WRAP AP_WRAP -#define SC_WRAP_SM AP_WRAP_SM -#endif // !defined(SYSTEMC_INCLUDED) - -#else // defined(__SC_COMPATIBLE__) - -// There will not be sc_fxdefs.h, and the emu should be defined by ap_fixed. - -/// ap_fixed quantification mode -enum ap_q_mode { - SC_RND, //< rounding to plus infinity - SC_RND_ZERO, //< rounding to zero - SC_RND_MIN_INF, //< rounding to minus infinity - SC_RND_INF, //< rounding to infinity - SC_RND_CONV, //< convergent rounding - SC_TRN, //< truncation - SC_TRN_ZERO, //< truncation to zero -}; - -#define AP_RND SC_RND -#define AP_RND_ZERO SC_RND_ZERO -#define AP_RND_MIN_INF SC_RND_MIN_INF -#define AP_RND_INF SC_RND_INF -#define AP_RND_CONV SC_RND_CONV -#define AP_TRN SC_TRN -#define AP_TRN_ZERO SC_TRN_ZERO - -/// ap_fixed saturation mode -enum ap_o_mode { - SC_SAT, //< saturation - SC_SAT_ZERO, //< saturation to zero - SC_SAT_SYM, //< symmetrical saturation - SC_WRAP, //< wrap-around (*) - SC_WRAP_SM, //< sign magnitude wrap-around (*) -}; - -#define AP_SAT SC_SAT -#define AP_SAT_ZERO SC_SAT_ZERO -#define AP_SAT_SYM SC_SAT_SYM -#define AP_WRAP SC_WRAP -#define AP_WRAP_SM SC_WRAP_SM - -#endif // defined(__SC_COMPATIBLE__) - -template -struct ap_int_base; - -template -struct ap_int; - -template -struct ap_uint; - -template -struct ap_range_ref; - -template -struct ap_bit_ref; - -template -struct ap_concat_ref; - -template -struct ap_fixed_base; - -template -struct ap_fixed; - -template -struct ap_ufixed; - -template -struct af_range_ref; - -template -struct af_bit_ref; - -/// string base mode -enum BaseMode { AP_BIN = 2, AP_OCT = 8, AP_DEC = 10, AP_HEX = 16 }; - -#ifndef SYSTEMC_INCLUDED -#define SC_BIN 2 -#define SC_OCT 8 -#define SC_DEC 10 -#define SC_HEX 16 -#endif // !defined(SYSTEMC_INCLUDED) - -// Alias C data types -#ifdef _MSC_VER -typedef signed __int64 ap_slong; -typedef unsigned __int64 ap_ulong; -#else // !defined(_MSC_VER) -typedef signed long long ap_slong; -typedef unsigned long long ap_ulong; -#endif // !defined(_MSC_VER) - -enum { - _AP_SIZE_char = 8, - _AP_SIZE_short = sizeof(short) * 8, - _AP_SIZE_int = sizeof(int) * 8, - _AP_SIZE_long = sizeof(long) * 8, - _AP_SIZE_ap_slong = sizeof(ap_slong) * 8 -}; - -#endif // !defined(__AP_DECL_H__) - -// -*- cpp -*- diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed.h b/hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed.h deleted file mode 100644 index cd0192b..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed.h +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_FIXED_H__ -#define __AP_FIXED_H__ - -#include -#include -#include - -//--------------------------------------------------------------- - -/// Signed Arbitrary Precision Fixed-Point Type. -// default for _AP_Q, _AP_O and _AP_N set in ap_decl.h -template -struct ap_fixed : ap_fixed_base<_AP_W, _AP_I, true, _AP_Q, _AP_O, _AP_N> { - typedef ap_fixed_base<_AP_W, _AP_I, true, _AP_Q, _AP_O, _AP_N> Base; - // Constructor - /// default ctor - INLINE ap_fixed() : Base() {} - - /// default copy ctor - INLINE ap_fixed(const ap_fixed& op) { Base::V = op.V; } - - /// copy ctor from ap_fixed_base. - template - INLINE ap_fixed(const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, - _AP_O2, _AP_N2>& op) - : Base(op) {} - - template - INLINE ap_fixed(const volatile ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, - _AP_O2, _AP_N2>& op) - : Base(op) {} - - //// from ap_fixed - //template - //INLINE ap_fixed( - // const ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>(op)) {} - - //template - //INLINE ap_fixed( - // const volatile ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>(op)) {} - - //// from ap_ufixed. - //template - //INLINE ap_fixed( - // const ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>(op)) { - //} - - //template - //INLINE ap_fixed( - // const volatile ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>(op)) { - //} - - /// copy ctor from ap_int_base. - template - INLINE ap_fixed(const ap_int_base<_AP_W2, _AP_S2>& op) : Base(op) {} - - template - INLINE ap_fixed(const volatile ap_int_base<_AP_W2, _AP_S2>& op) : Base(op) {} - - //// from ap_int. - //template - //INLINE ap_fixed(const ap_int<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, true>(op)) {} - - //template - //INLINE ap_fixed(const volatile ap_int<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, true>(op)) {} - - //// from ap_uint. - //template - //INLINE ap_fixed(const ap_uint<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, false>(op)) {} - - //template - //INLINE ap_fixed(const volatile ap_uint<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, false>(op)) {} - - // from ap_bit_ref. - template - INLINE ap_fixed(const ap_bit_ref<_AP_W2, _AP_S2>& op) : Base(op) {} - - // from ap_range_ref. - template - INLINE ap_fixed(const ap_range_ref<_AP_W2, _AP_S2>& op) : Base(op) {} - - // from ap_concat_ref. - template - INLINE ap_fixed(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& op) - : Base(op) {} - - // from af_bit_ref. - template - INLINE ap_fixed( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - - // from af_range_ref. - template - INLINE ap_fixed( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - -// from c types. -#define CTOR(TYPE) \ - INLINE ap_fixed(TYPE v) : Base(v) {} - - CTOR(bool) - CTOR(char) - CTOR(signed char) - CTOR(unsigned char) - CTOR(short) - CTOR(unsigned short) - CTOR(int) - CTOR(unsigned int) - CTOR(long) - CTOR(unsigned long) - CTOR(ap_slong) - CTOR(ap_ulong) -#if _AP_ENABLE_HALF_ == 1 - CTOR(half) -#endif - CTOR(float) - CTOR(double) -#undef CTOR - - INLINE ap_fixed(const char* s) : Base(s) {} - - INLINE ap_fixed(const char* s, signed char rd) : Base(s, rd) {} - - // Assignment - // The assignment operator is technically inherited; however, it is always - // hidden by an explicitly or implicitly defined assignment operator for the - // derived class. - /* XXX ctor will be used when right is not of proper type. */ - INLINE ap_fixed& operator=( - const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) { - Base::V = op.V; - return *this; - } - - INLINE void operator=( - const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) volatile { - Base::V = op.V; - } - - INLINE ap_fixed& operator=( - const volatile ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) { - Base::V = op.V; - return *this; - } - - INLINE void operator=( - const volatile ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) volatile { - Base::V = op.V; - } -}; // struct ap_fixed. - -//------------------------------------------------------------------- - -// Unsigned Arbitrary Precision Fixed-Point Type. -// default for _AP_Q, _AP_O and _AP_N set in ap_decl.h -template -struct ap_ufixed : ap_fixed_base<_AP_W, _AP_I, false, _AP_Q, _AP_O, _AP_N> { - typedef ap_fixed_base<_AP_W, _AP_I, false, _AP_Q, _AP_O, _AP_N> Base; - // Constructor - /// default ctor - INLINE ap_ufixed() : Base() {} - - /// default copy ctor - INLINE ap_ufixed(const ap_ufixed& op) { Base::V = op.V; } - - /// copy ctor from ap_fixed_base - template - INLINE ap_ufixed(const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, - _AP_O2, _AP_N2>& op) - : Base(op) {} - - /// copy ctor from ap_fixed_base - template - INLINE ap_ufixed(const volatile ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, - _AP_O2, _AP_N2>& op) - : Base(op) {} - - //template - //INLINE ap_ufixed( - // const ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>(op)) {} - - //template - //INLINE ap_ufixed( - // const volatile ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>(op)) {} - - //template - //INLINE ap_ufixed( - // const ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>(op)) { - //} - - //template - //INLINE ap_ufixed( - // const volatile ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - // : Base(ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>(op)) { - //} - - /// copy ctor from ap_int_base. - template - INLINE ap_ufixed(const ap_int_base<_AP_W2, _AP_S2>& op) : Base(op) {} - - template - INLINE ap_ufixed(const volatile ap_int_base<_AP_W2, _AP_S2>& op) : Base(op) {} - - //template - //INLINE ap_ufixed(const ap_int<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, true>(op)) {} - - //template - //INLINE ap_ufixed(const volatile ap_int<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, true>(op)) {} - - //template - //INLINE ap_ufixed(const ap_uint<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, false>(op)) {} - - //template - //INLINE ap_ufixed(const volatile ap_uint<_AP_W2>& op) - // : Base(ap_int_base<_AP_W2, false>(op)) {} - - template - INLINE ap_ufixed(const ap_bit_ref<_AP_W2, _AP_S2>& op) : Base(op) {} - - template - INLINE ap_ufixed(const ap_range_ref<_AP_W2, _AP_S2>& op) : Base(op) {} - - template - INLINE ap_ufixed(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& op) - : Base(op) {} - - template - INLINE ap_ufixed( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - - template - INLINE ap_ufixed( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - -#define CTOR(TYPE) \ - INLINE ap_ufixed(TYPE v) : Base(v) {} - - CTOR(bool) - CTOR(char) - CTOR(signed char) - CTOR(unsigned char) - CTOR(short) - CTOR(unsigned short) - CTOR(int) - CTOR(unsigned int) - CTOR(long) - CTOR(unsigned long) - CTOR(ap_slong) - CTOR(ap_ulong) -#if _AP_ENABLE_HALF_ == 1 - CTOR(half) -#endif - CTOR(float) - CTOR(double) -#undef CTOR - - INLINE ap_ufixed(const char* s) : Base(s) {} - - INLINE ap_ufixed(const char* s, signed char rd) : Base(s, rd) {} - - // Assignment - INLINE ap_ufixed& operator=( - const ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) { - Base::V = op.V; - return *this; - } - - INLINE void operator=( - const ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) volatile { - Base::V = op.V; - } - - INLINE ap_ufixed& operator=( - const volatile ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op) { - Base::V = op.V; - return *this; - } - - INLINE void operator=(const volatile ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, - _AP_N>& op) volatile { - Base::V = op.V; - } -}; // struct ap_ufixed - - -#if !defined(__SYNTHESIS__) && (defined(SYSTEMC_H) || defined(SYSTEMC_INCLUDED)) -// XXX sc_trace overload for ap_fixed is already included in -// "ap_sysc/ap_sc_extras.h", so do not define in synthesis. -template -INLINE void sc_trace(sc_core::sc_trace_file* tf, - const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op, - const std::string& name) { - tf->trace(sc_dt::sc_lv<_AP_W>(op.to_string(2).c_str()), name); -} - -template -INLINE void sc_trace(sc_core::sc_trace_file* tf, - const ap_ufixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N>& op, - const std::string& name) { - tf->trace(sc_dt::sc_lv<_AP_W>(op.to_string(2).c_str()), name); -} -#endif // System C sim - -// Specialization of std containers, so that std::complex can have its -// image part automatically zero-initialized when only real part is provided. -#include - -#endif // ifndef __AP_FIXED_H__ - -// -*- cpp -*- diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed_base.h b/hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed_base.h deleted file mode 100644 index 1d94b93..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed_base.h +++ /dev/null @@ -1,2354 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_FIXED_BASE_H__ -#define __AP_FIXED_BASE_H__ - -#ifndef __AP_FIXED_H__ -#error "Only ap_fixed.h and ap_int.h can be included directly in user code." -#endif - -// for ap_int_base and its reference types. -#include -#ifndef __SYNTHESIS__ -#if _AP_ENABLE_HALF_ == 1 -// for half type -#include -#endif -// for std io -#include -#endif - -#ifndef __cplusplus -#error "C++ is required to include this header file" -#else // __cplusplus - -// for warning on unsupported rounding mode in conversion to float/double. -#if !defined(__SYNTHESIS__) && __cplusplus >= 201103L && \ - (defined(__gnu_linux__) || defined(_WIN32)) -#define AP_FIXED_ENABLE_CPP_FENV 1 -#include -#endif - -// ---------------------------------------------------------------------- - -/* Major TODO - long double support: constructor, assign and other operators. - binary operators with ap_fixed_base and const char*. - return ap_fixed/ap_ufixed when result signedness is known. -*/ - -// Helper function in conversion to floating point types. - -#ifdef __SYNTHESIS__ -#define _AP_ctype_op_get_bit(var, index) _AP_ROOT_op_get_bit(var, index) -#define _AP_ctype_op_set_bit(var, index, x) _AP_ROOT_op_set_bit(var, index, x) -#define _AP_ctype_op_get_range(var, low, high) \ - _AP_ROOT_op_get_range(var, low, high) -#define _AP_ctype_op_set_range(var, low, high, x) \ - _AP_ROOT_op_set_range(var, low, high, x) -#else // ifdef __SYNTHESIS__ -template -inline bool _AP_ctype_op_get_bit(_Tp1& var, const _Tp2& index) { - return !!(var & (1ull << (index))); -} -template -inline _Tp1 _AP_ctype_op_set_bit(_Tp1& var, const _Tp2& index, const _Tp3& x) { - var |= (((x) ? 1ull : 0ull) << (index)); - return var; -} -template -inline _Tp1 _AP_ctype_op_get_range(_Tp1& var, const _Tp2& low, - const _Tp3& high) { - _Tp1 r = var; - ap_ulong mask = -1ll; - mask >>= (sizeof(_Tp1) * 8 - ((high) - (low) + 1)); - r >>= (low); - r &= mask; - return r; -} -template -inline _Tp1 _AP_ctype_op_set_range(_Tp1& var, const _Tp2& low, const _Tp3& high, - const _Tp4& x) { - ap_ulong mask = -1ll; - mask >>= (_AP_SIZE_ap_slong - ((high) - (low) + 1)); - var &= ~(mask << (low)); - var |= ((mask & x) << (low)); - return var; -} -#endif // ifdef __SYNTHESIS__ - - -// trait for letting base class to return derived class. -// Notice that derived class template is incomplete, and we cannot use -// the member of the derived class. -template -struct _ap_fixed_factory; -template -struct _ap_fixed_factory<_AP_W2, _AP_I2, true> { - typedef ap_fixed<_AP_W2, _AP_I2> type; -}; -template -struct _ap_fixed_factory<_AP_W2, _AP_I2, false> { - typedef ap_ufixed<_AP_W2, _AP_I2> type; -}; - -/// ap_fixed_base: AutoPilot fixed point. -/** partial specialization of signed. - @tparam _AP_W width. - @tparam _AP_I integral part width. - @tparam _AP_S signed. - @tparam _AP_Q quantization mode. Default is AP_TRN. - @tparam _AP_O saturation mode. Default is AP_WRAP. - @tparam _AP_N saturation wrap value. Default is 0. - */ -// default for _AP_Q, _AP_O and _AP_N set in ap_decl.h -template -struct ap_fixed_base : _AP_ROOT_TYPE<_AP_W, _AP_S> { - public: - typedef _AP_ROOT_TYPE<_AP_W, _AP_S> Base; - static const int width = _AP_W; - static const int iwidth = _AP_I; - static const ap_q_mode qmode = _AP_Q; - static const ap_o_mode omode = _AP_O; - - /// Return type trait. - template - struct RType { - enum { - _AP_F = _AP_W - _AP_I, - F2 = _AP_W2 - _AP_I2, - mult_w = _AP_W + _AP_W2, - mult_i = _AP_I + _AP_I2, - mult_s = _AP_S || _AP_S2, - plus_w = AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + - 1 + AP_MAX(_AP_F, F2), - plus_i = - AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + 1, - plus_s = _AP_S || _AP_S2, - minus_w = - AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + 1 + - AP_MAX(_AP_F, F2), - minus_i = - AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + 1, - minus_s = true, -#ifndef __SC_COMPATIBLE__ - div_w = _AP_S2 + _AP_W + AP_MAX(F2, 0), -#else - div_w = _AP_S2 + _AP_W + AP_MAX(F2, 0) + AP_MAX(_AP_I2, 0), -#endif - div_i = _AP_S2 + _AP_I + F2, - div_s = _AP_S || _AP_S2, - logic_w = - AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)) + - AP_MAX(_AP_F, F2), - logic_i = AP_MAX(_AP_I + (_AP_S2 && !_AP_S), _AP_I2 + (_AP_S && !_AP_S2)), - logic_s = _AP_S || _AP_S2 - }; - - typedef ap_fixed_base<_AP_W, _AP_I, _AP_S> lhs; - typedef ap_fixed_base<_AP_W2, _AP_I2, _AP_S2> rhs; - - typedef ap_fixed_base mult_base; - typedef ap_fixed_base plus_base; - typedef ap_fixed_base minus_base; - typedef ap_fixed_base logic_base; - typedef ap_fixed_base div_base; - typedef ap_fixed_base<_AP_W, _AP_I, _AP_S> arg1_base; - - typedef typename _ap_fixed_factory::type mult; - typedef typename _ap_fixed_factory::type plus; - typedef typename _ap_fixed_factory::type minus; - typedef typename _ap_fixed_factory::type logic; - typedef typename _ap_fixed_factory::type div; - typedef typename _ap_fixed_factory<_AP_W, _AP_I, _AP_S>::type arg1; - }; - - private: -#ifndef __SYNTHESIS__ - // This cannot handle hex float format string. - void fromString(const std::string& val, unsigned char radix) { - _AP_ERROR(!(radix == 2 || radix == 8 || radix == 10 || radix == 16), - "ap_fixed_base::fromString(%s, %d)", val.c_str(), radix); - - Base::V = 0; - int startPos = 0; - int endPos = val.length(); - int decPos = val.find("."); - if (decPos == -1) decPos = endPos; - - // handle sign - bool isNegative = false; - if (val[0] == '-') { - isNegative = true; - ++startPos; - } else if (val[0] == '+') - ++startPos; - - // If there are no integer bits, e.g.: - // .0000XXXX, then keep at least one bit. - // If the width is greater than the number of integer bits, e.g.: - // XXXX.XXXX, then we keep the integer bits - // if the number of integer bits is greater than the width, e.g.: - // XXX000 then we keep the integer bits. - // Always keep one bit. - ap_fixed_base - integer_bits = 0; - - // Figure out if we can shift instead of multiply - unsigned shift = (radix == 16 ? 4 : radix == 8 ? 3 : radix == 2 ? 1 : 0); - - //std::cout << "\n\n" << val << "\n"; - //std::cout << startPos << " " << decPos << " " << endPos << "\n"; - - bool sticky_int = false; - - // Traverse the integer digits from the MSD, multiplying by radix as we go. - for (int i = startPos; i < decPos; i++) { - // Get a digit - char cdigit = val[i]; - if (cdigit == '\0') continue; - unsigned digit = ap_private_ops::decode_digit(cdigit, radix); - - sticky_int |= integer_bits[AP_MAX(_AP_I, 4) + 4 - 1] | - integer_bits[AP_MAX(_AP_I, 4) + 4 - 2] | - integer_bits[AP_MAX(_AP_I, 4) + 4 - 3] | - integer_bits[AP_MAX(_AP_I, 4) + 4 - 4]; - // Shift or multiply the value by the radix - if (shift) - integer_bits <<= shift; - else - integer_bits *= radix; - - // Add in the digit we just interpreted - integer_bits += digit; - //std::cout << "idigit = " << digit << " " << integer_bits.to_string() - // << " " << sticky_int << "\n"; - } - integer_bits[AP_MAX(_AP_I, 4) + 4 - 3] = - integer_bits[AP_MAX(_AP_I, 4) + 4 - 3] | sticky_int; - - ap_fixed_base fractional_bits = 0; - bool sticky = false; - - // Traverse the fractional digits from the LSD, dividing by radix as we go. - for (int i = endPos - 1; i >= decPos + 1; i--) { - // Get a digit - char cdigit = val[i]; - if (cdigit == '\0') continue; - unsigned digit = ap_private_ops::decode_digit(cdigit, radix); - // Add in the digit we just interpreted - fractional_bits += digit; - - sticky |= fractional_bits[0] | fractional_bits[1] | fractional_bits[2] | - fractional_bits[3]; - // Shift or divide the value by the radix - if (shift) - fractional_bits >>= shift; - else - fractional_bits /= radix; - - //std::cout << "fdigit = " << digit << " " << fractional_bits.to_string() - // << " " << sticky << "\n"; - } - - //std::cout << "Int =" << integer_bits.to_string() << " " << - // fractional_bits.to_string() << "\n"; - - fractional_bits[0] = fractional_bits[0] | sticky; - - if (isNegative) - *this = -(integer_bits + fractional_bits); - else - *this = integer_bits + fractional_bits; - - //std::cout << "end = " << this->to_string(16) << "\n"; - } - - /// report invalid constrction of ap_fixed_base - INLINE void report() { - if (!_AP_S && _AP_O == AP_WRAP_SM) { - fprintf(stderr, "ap_ufxied<...> cannot support AP_WRAP_SM.\n"); - exit(1); - } - if (_AP_W > MAX_MODE(AP_INT_MAX_W) * 1024) { - fprintf(stderr, - "[E] ap_%sfixed<%d, ...>: Bitwidth exceeds the " - "default max value %d. Please use macro " - "AP_INT_MAX_W to set a larger max value.\n", - _AP_S ? "" : "u", _AP_W, MAX_MODE(AP_INT_MAX_W) * 1024); - exit(1); - } - } -#else - INLINE void report() {} -#endif // ifdef __SYNTHESIS__ - - /// @name helper functions. - // @{ - INLINE void overflow_adjust(bool underflow, bool overflow, bool lD, - bool sign) { - if (!underflow && !overflow) return; - if (_AP_O == AP_WRAP) { - if (_AP_N == 0) return; - if (_AP_S) { - // signed AP_WRAP - // n_bits == 1 - Base::V = _AP_ROOT_op_set_bit(Base::V, _AP_W - 1, sign); - if (_AP_N > 1) { - // n_bits > 1 - ap_int_base<_AP_W, false> mask(-1); - if (sign) mask.V = 0; - Base::V = - _AP_ROOT_op_set_range(Base::V, _AP_W - _AP_N, _AP_W - 2, mask.V); - } - } else { - // unsigned AP_WRAP - ap_int_base<_AP_W, false> mask(-1); - Base::V = - _AP_ROOT_op_set_range(Base::V, _AP_W - _AP_N, _AP_W - 1, mask.V); - } - } else if (_AP_O == AP_SAT_ZERO) { - Base::V = 0; - } else if (_AP_O == AP_WRAP_SM && _AP_S) { - bool Ro = _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); - if (_AP_N == 0) { - if (lD != Ro) { - Base::V = ~Base::V; - Base::V = _AP_ROOT_op_set_bit(Base::V, _AP_W - 1, lD); - } - } else { - if (_AP_N == 1 && sign != Ro) { - Base::V = ~Base::V; - } else if (_AP_N > 1) { - bool lNo = _AP_ROOT_op_get_bit(Base::V, _AP_W - _AP_N); - if (lNo == sign) Base::V = ~Base::V; - ap_int_base<_AP_W, false> mask(-1); - if (sign) mask.V = 0; - Base::V = - _AP_ROOT_op_set_range(Base::V, _AP_W - _AP_N, _AP_W - 2, mask.V); - } - Base::V = _AP_ROOT_op_set_bit(Base::V, _AP_W - 1, sign); - } - } else { - if (_AP_S) { - if (overflow) { - Base::V = 1; - Base::V <<= _AP_W - 1; - Base::V = ~Base::V; - } else if (underflow) { - Base::V = 1; - Base::V <<= _AP_W - 1; - if (_AP_O == AP_SAT_SYM) Base::V |= 1; - } - } else { - if (overflow) - Base::V = ~(ap_int_base<_AP_W, false>(0).V); - else if (underflow) - Base::V = 0; - } - } - } - - INLINE bool quantization_adjust(bool qb, bool r, bool s) { - bool carry = (bool)_AP_ROOT_op_get_bit(Base::V, _AP_W - 1); - if (_AP_Q == AP_TRN) return false; - if (_AP_Q == AP_RND_ZERO) - qb &= s || r; - else if (_AP_Q == AP_RND_MIN_INF) - qb &= r; - else if (_AP_Q == AP_RND_INF) - qb &= !s || r; - else if (_AP_Q == AP_RND_CONV) - qb &= _AP_ROOT_op_get_bit(Base::V, 0) || r; - else if (_AP_Q == AP_TRN_ZERO) - qb = s && (qb || r); - Base::V += qb; - return carry && (!(bool)_AP_ROOT_op_get_bit(Base::V, _AP_W - 1)); - } - // @} - - public: - /// @name constructors. - // @{ - /// default ctor. - INLINE ap_fixed_base() {} - - /// copy ctor. - template - INLINE ap_fixed_base( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - operator=(op); - report(); - } - - template - INLINE ap_fixed_base( - const volatile ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - operator=(op); - report(); - } - - template - INLINE ap_fixed_base(const ap_int_base<_AP_W2, _AP_S2>& op) { - ap_fixed_base<_AP_W2, _AP_W2, _AP_S2> tmp; - tmp.V = op.V; - operator=(tmp); - report(); - } - - template - INLINE ap_fixed_base(const volatile ap_int_base<_AP_W2, _AP_S2>& op) { - ap_fixed_base<_AP_W2, _AP_W2, _AP_S2> tmp; - tmp.V = op.V; - operator=(tmp); - report(); - } - -#ifndef __SYNTHESIS__ -#ifndef NON_C99STRING - INLINE ap_fixed_base(const char* s, signed char rd = 0) { - unsigned char radix = rd; - std::string str = ap_private_ops::parseString(s, radix); // will guess rd, default 10 - _AP_ERROR(radix == 0, "ap_fixed_base(const char* \"%s\", %d), str=%s, radix = %d", - s, rd, str.c_str(), radix); // TODO remove this check - fromString(str, radix); - } -#else - INLINE ap_fixed_base(const char* s, signed char rd = 10) { - ap_int_base<_AP_W, _AP_S> t(s, rd); - Base::V = t.V; - } -#endif // ifndef NON_C99STRING -#else // ifndef __SYNTHESIS__ - // XXX _ssdm_string2bits only takes const string and const radix. - // It seems XFORM will do compile time processing of the string. - INLINE ap_fixed_base(const char* s) { - typeof(Base::V) t; - _ssdm_string2bits((void*)(&t), (const char*)(s), 10, _AP_I, _AP_S, _AP_Q, - _AP_O, _AP_N, _AP_C99); - Base::V = t; - } - INLINE ap_fixed_base(const char* s, signed char rd) { - typeof(Base::V) t; - _ssdm_string2bits((void*)(&t), (const char*)(s), rd, _AP_I, _AP_S, _AP_Q, - _AP_O, _AP_N, _AP_C99); - Base::V = t; - } -#endif // ifndef __SYNTHESIS__ else - - template - INLINE ap_fixed_base(const ap_bit_ref<_AP_W2, _AP_S2>& op) { - *this = ((bool)op); - report(); - } - - template - INLINE ap_fixed_base(const ap_range_ref<_AP_W2, _AP_S2>& op) { - *this = (ap_int_base<_AP_W2, false>(op)); - report(); - } - - template - INLINE ap_fixed_base( - const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& op) { - *this = (ap_int_base<_AP_W2 + _AP_W3, false>(op)); - report(); - } - - template - INLINE ap_fixed_base( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - *this = (bool(op)); - report(); - } - - template - INLINE ap_fixed_base( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - *this = (ap_int_base<_AP_W2, false>(op)); - report(); - } - - // ctors from c types. - // make a temp ap_fixed_base first, and use ap_fixed_base.operator= -#define CTOR_FROM_INT(C_TYPE, _AP_W2, _AP_S2) \ - INLINE ap_fixed_base(const C_TYPE x) { \ - ap_fixed_base<(_AP_W2), (_AP_W2), (_AP_S2)> tmp; \ - tmp.V = x; \ - *this = tmp; \ - } - - CTOR_FROM_INT(bool, 1, false) - CTOR_FROM_INT(char, 8, CHAR_IS_SIGNED) - CTOR_FROM_INT(signed char, 8, true) - CTOR_FROM_INT(unsigned char, 8, false) - CTOR_FROM_INT(short, _AP_SIZE_short, true) - CTOR_FROM_INT(unsigned short, _AP_SIZE_short, false) - CTOR_FROM_INT(int, _AP_SIZE_int, true) - CTOR_FROM_INT(unsigned int, _AP_SIZE_int, false) - CTOR_FROM_INT(long, _AP_SIZE_long, true) - CTOR_FROM_INT(unsigned long, _AP_SIZE_long, false) - CTOR_FROM_INT(ap_slong, _AP_SIZE_ap_slong, true) - CTOR_FROM_INT(ap_ulong, _AP_SIZE_ap_slong, false) -#undef CTOR_FROM_INT -/* - * TODO: - *Theere used to be several funtions which were AP_WEAK. - *Now they're all INLINE expect ap_fixed_base(double d) - *Maybe we can use '#pragma HLS inline' instead of INLINE. - */ - AP_WEAK ap_fixed_base(double d) { - ap_int_base<64, false> ireg; - ireg.V = doubleToRawBits(d); - bool isneg = _AP_ROOT_op_get_bit(ireg.V, 63); - - ap_int_base exp; - ap_int_base exp_tmp; - exp_tmp.V = - _AP_ROOT_op_get_range(ireg.V, DOUBLE_MAN, DOUBLE_MAN + DOUBLE_EXP - 1); - exp = exp_tmp - DOUBLE_BIAS; - ap_int_base man; - man.V = _AP_ROOT_op_get_range(ireg.V, 0, DOUBLE_MAN - 1); - // do not support NaN - _AP_WARNING(exp == APFX_IEEE_DOUBLE_E_MAX + 1 && man.V != 0, - "assign NaN to fixed point value"); - man.V = _AP_ROOT_op_set_bit(man.V, DOUBLE_MAN, 1); - if (isneg) man = -man; - if ((ireg.V & 0x7fffffffffffffffLL) == 0) { - Base::V = 0; - } else { - int _AP_W2 = DOUBLE_MAN + 2, _AP_I2 = exp.V + 2, _AP_F = _AP_W - _AP_I, - F2 = _AP_W2 - _AP_I2; - bool _AP_S2 = true, - QUAN_INC = F2 > _AP_F && - !(_AP_Q == AP_TRN || (_AP_Q == AP_TRN_ZERO && !_AP_S2)); - bool carry = false; - // handle quantization - unsigned sh_amt = (F2 > _AP_F) ? F2 - _AP_F : _AP_F - F2; - if (F2 == _AP_F) - Base::V = man.V; - else if (F2 > _AP_F) { - if (sh_amt < DOUBLE_MAN + 2) - Base::V = man.V >> sh_amt; - else { - Base::V = isneg ? -1 : 0; - } - if ((_AP_Q != AP_TRN) && !((_AP_Q == AP_TRN_ZERO) && !_AP_S2)) { - bool qb = (F2 - _AP_F > _AP_W2) ? isneg : (bool)_AP_ROOT_op_get_bit( - man.V, F2 - _AP_F - 1); - bool r = - (F2 > _AP_F + 1) - ? _AP_ROOT_op_get_range(man.V, 0, (F2 - _AP_F - 2 < _AP_W2) - ? (F2 - _AP_F - 2) - : (_AP_W2 - 1)) != 0 - : false; - carry = quantization_adjust(qb, r, isneg); - } - } else { // no quantization - Base::V = man.V; - if (sh_amt < _AP_W) - Base::V = Base::V << sh_amt; - else - Base::V = 0; - } - // handle overflow/underflow - if ((_AP_O != AP_WRAP || _AP_N != 0) && - ((!_AP_S && _AP_S2) || - _AP_I - _AP_S < - _AP_I2 - _AP_S2 + - (QUAN_INC || - (_AP_S2 && (_AP_O == AP_SAT_SYM))))) { // saturation - bool deleted_zeros = _AP_S2 ? true : !carry, deleted_ones = true; - bool neg_src = isneg; - bool lD = false; - int pos1 = F2 - _AP_F + _AP_W; - int pos2 = F2 - _AP_F + _AP_W + 1; - bool newsignbit = _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); - if (pos1 < _AP_W2 && pos1 >= 0) - // lD = _AP_ROOT_op_get_bit(man.V, pos1); - lD = (man.V >> pos1) & 1; - if (pos1 < _AP_W2) { - bool Range1_all_ones = true; - bool Range1_all_zeros = true; - bool Range2_all_ones = true; - ap_int_base Range2; - ap_int_base all_ones(-1); - - if (pos2 >= 0 && pos2 < _AP_W2) { - // Range2.V = _AP_ROOT_op_get_range(man.V, - // pos2, _AP_W2 - 1); - Range2.V = man.V; - Range2.V >>= pos2; - Range2_all_ones = Range2 == (all_ones >> pos2); - } else if (pos2 < 0) - Range2_all_ones = false; - if (pos1 >= 0 && pos2 < _AP_W2) { - Range1_all_ones = Range2_all_ones && lD; - Range1_all_zeros = !Range2.V && !lD; - } else if (pos2 == _AP_W2) { - Range1_all_ones = lD; - Range1_all_zeros = !lD; - } else if (pos1 < 0) { - Range1_all_zeros = !man.V; - Range1_all_ones = false; - } - - deleted_zeros = - deleted_zeros && (carry ? Range1_all_ones : Range1_all_zeros); - deleted_ones = - carry ? Range2_all_ones && (pos1 < 0 || !lD) : Range1_all_ones; - neg_src = isneg && !(carry && Range1_all_ones); - } else - neg_src = isneg && newsignbit; - bool neg_trg = _AP_S && newsignbit; - bool overflow = (neg_trg || !deleted_zeros) && !isneg; - bool underflow = (!neg_trg || !deleted_ones) && neg_src; - if ((_AP_O == AP_SAT_SYM) && _AP_S2 && _AP_S) - underflow |= - neg_src && - (_AP_W > 1 ? _AP_ROOT_op_get_range(Base::V, 0, _AP_W - 2) == 0 - : true); - overflow_adjust(underflow, overflow, lD, neg_src); - } - } - report(); - } - - // TODO more optimized implementation. - INLINE ap_fixed_base(float d) { *this = ap_fixed_base(double(d)); } - -#if _AP_ENABLE_HALF_ == 1 - // TODO more optimized implementation. - INLINE ap_fixed_base(half d) { *this = ap_fixed_base(double(d)); } -#endif - // @} - - /// @name assign operator - /// assign, using another ap_fixed_base of same template parameters. - /* - INLINE ap_fixed_base& operator=( - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { - Base::V = op.V; - return *this; - } - */ - - template - INLINE ap_fixed_base& operator=( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - - const int _AP_F = _AP_W - _AP_I; - const int F2 = _AP_W2 - _AP_I2; - const int QUAN_INC = - F2 > _AP_F && !(_AP_Q == AP_TRN || (_AP_Q == AP_TRN_ZERO && !_AP_S2)); - - if (!op) Base::V = 0; - bool carry = false; - bool signbit = _AP_ROOT_op_get_bit(op.V, _AP_W2 - 1); - bool isneg = signbit && _AP_S2; - if (F2 == _AP_F) - Base::V = op.V; - else if (F2 > _AP_F) { - unsigned int sh_amt = F2 - _AP_F; - // moves bits right, handle quantization. - if (sh_amt < _AP_W2) { - Base::V = op.V >> sh_amt; - } else { - Base::V = isneg ? -1 : 0; - } - if (_AP_Q != AP_TRN && !(_AP_Q == AP_TRN_ZERO && !_AP_S2)) { - bool qbit = _AP_ROOT_op_get_bit(op.V, F2 - _AP_F - 1); - // bit after LSB. - bool qb = (F2 - _AP_F > _AP_W2) ? _AP_S2 && signbit : qbit; - enum { hi = ((F2 - _AP_F - 2) < _AP_W2) ? (F2 - _AP_F - 2) : (_AP_W2 - 1) }; - // bits after qb. - bool r = (F2 > _AP_F + 1) ? (_AP_ROOT_op_get_range(op.V, 0, hi) != 0) : false; - carry = quantization_adjust(qb, r, isneg); - } - } else { - unsigned sh_amt = _AP_F - F2; - // moves bits left, no quantization - if (sh_amt < _AP_W) { - if (_AP_W > _AP_W2) { - // extend and then shift, avoid losing bits. - Base::V = op.V; - Base::V <<= sh_amt; - } else { - // shift and truncate. - Base::V = op.V << sh_amt; - } - } else { - Base::V = 0; - } - } - // handle overflow/underflow - if ((_AP_O != AP_WRAP || _AP_N != 0) && - ((!_AP_S && _AP_S2) || - _AP_I - _AP_S < - _AP_I2 - _AP_S2 + - (QUAN_INC || (_AP_S2 && _AP_O == AP_SAT_SYM)))) { // saturation - bool deleted_zeros = _AP_S2 ? true : !carry; - bool deleted_ones = true; - bool neg_src = isneg; - bool newsignbit = _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); - enum { pos1 = F2 - _AP_F + _AP_W, pos2 = F2 - _AP_F + _AP_W + 1 }; - bool lD = (pos1 < _AP_W2 && pos1 >= 0) ? _AP_ROOT_op_get_bit(op.V, pos1) - : false; - if (pos1 < _AP_W2) { - bool Range1_all_ones = true; - bool Range1_all_zeros = true; - bool Range2_all_ones = true; - ap_int_base<_AP_W2, false> all_ones(-1); - - if (pos2 < _AP_W2 && pos2 >= 0) { - ap_int_base<_AP_W2, false> Range2; - Range2.V = _AP_ROOT_op_get_range(op.V, pos2, _AP_W2 - 1); - Range2_all_ones = Range2 == (all_ones >> pos2); - } else if (pos2 < 0) { - Range2_all_ones = false; - } - - if (pos1 >= 0 && pos2 < _AP_W2) { - ap_int_base<_AP_W2, false> Range1; - Range1.V = _AP_ROOT_op_get_range(op.V, pos1, _AP_W2 - 1); - Range1_all_ones = Range1 == (all_ones >> pos1); - Range1_all_zeros = !Range1.V; - } else if (pos2 == _AP_W2) { - Range1_all_ones = lD; - Range1_all_zeros = !lD; - } else if (pos1 < 0) { - Range1_all_zeros = !op.V; - Range1_all_ones = false; - } - - deleted_zeros = - deleted_zeros && (carry ? Range1_all_ones : Range1_all_zeros); - deleted_ones = - carry ? Range2_all_ones && (pos1 < 0 || !lD) : Range1_all_ones; - neg_src = isneg && !(carry && Range1_all_ones); - } else - neg_src = isneg && newsignbit; - bool neg_trg = _AP_S && newsignbit; - bool overflow = (neg_trg || !deleted_zeros) && !isneg; - bool underflow = (!neg_trg || !deleted_ones) && neg_src; - if ((_AP_O == AP_SAT_SYM) && _AP_S2 && _AP_S) - underflow |= - neg_src && - (_AP_W > 1 ? _AP_ROOT_op_get_range(Base::V, 0, _AP_W - 2) == 0 - : true); - - overflow_adjust(underflow, overflow, lD, neg_src); - } - return *this; - } // operator= - - template - INLINE ap_fixed_base& operator=( - const volatile ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - operator=(const_cast&>(op)); - return *this; - } - - /// Set this ap_fixed_base with ULL. - INLINE ap_fixed_base& setBits(ap_ulong bv) { - // TODO when ull is not be long enough... - Base::V = bv; - return *this; - } - - /// Return a ap_fixed_base object whose this->V is assigned by bv. - static INLINE ap_fixed_base bitsToFixed(ap_ulong bv) { - // TODO fix when ull is not be long enough... - ap_fixed_base t; -#ifdef __SYNTHESIS__ - t.V = bv; -#else - t.V.set_bits(bv); -#endif - return t; - } - - // Explicit conversion functions to ap_int_base. - /** Captures all integer bits, in truncate mode. - * @param[in] Cnative follow conversion from double to int. - */ - INLINE ap_int_base to_ap_int_base( - bool Cnative = true) const { - ap_int_base ret; - if (_AP_I == 0) { - ret.V = 0; - } else if (_AP_I > 0 && _AP_I <= _AP_W) { - ret.V = _AP_ROOT_op_get_range(Base::V, _AP_W - _AP_I, _AP_W - 1); - } else if (_AP_I > _AP_W) { - ret.V = _AP_ROOT_op_get_range(Base::V, 0, _AP_W - 1); - ret.V <<= (_AP_I - _AP_W); - } - /* Consider the following case - * float f = -7.5f; - * ap_fixed<8,4> t = f; // -8 0 0 0 . 0.5 - * int i = t.to_int(); - * the result should be -7 instead of -8. - * Therefore, after truncation, the value should be increated by 1. - * For (-1, 0), carry to MSB will happen, but result 0 is still correct. - */ - if (Cnative && _AP_I < _AP_W) { - // Follow C native data type, conversion from double to int - if (_AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1) && (_AP_I < _AP_W) && - (_AP_ROOT_op_get_range( - Base::V, 0, _AP_I < 0 ? _AP_W - 1 : _AP_W - _AP_I - 1) != 0)) - ++ret; - } else { - // Follow OSCI library, conversion from sc_fixed to sc_int - } - return ret; - }; - - public: - template - INLINE operator ap_int_base<_AP_W2, _AP_S2>() const { - return ap_int_base<_AP_W2, _AP_S2>(to_ap_int_base()); - } - - // Explicit conversion function to C built-in integral type. - INLINE char to_char() const { return to_ap_int_base().to_char(); } - - INLINE int to_int() const { return to_ap_int_base().to_int(); } - - INLINE unsigned to_uint() const { return to_ap_int_base().to_uint(); } - - INLINE ap_slong to_int64() const { return to_ap_int_base().to_int64(); } - - INLINE ap_ulong to_uint64() const { return to_ap_int_base().to_uint64(); } - - /// covert function to double. - /** only round-half-to-even mode supported, does not obey FE env. */ - INLINE double to_double() const { -#if defined(AP_FIXED_ENABLE_CPP_FENV) - _AP_WARNING(std::fegetround() != FE_TONEAREST, - "Only FE_TONEAREST is supported"); -#endif - enum { BITS = DOUBLE_MAN + DOUBLE_EXP + 1 }; - if (!Base::V) return 0.0f; - bool s = _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); ///< sign. - ap_int_base<_AP_W, false> tmp; - if (s) - tmp.V = -Base::V; // may truncate one bit extra from neg in sim. - else - tmp.V = Base::V; - int l = tmp.countLeadingZeros(); ///< number of leading zeros. - int e = _AP_I - l - 1 + DOUBLE_BIAS; ///< exponent - int lsb_index = _AP_W - l - 1 - DOUBLE_MAN; - // more than 0.5? - bool a = (lsb_index >=2) ? - (_AP_ROOT_op_get_range(tmp.V, 0, lsb_index - 2) != 0) : 0; - // round to even - a |= (lsb_index >=0) ? _AP_ROOT_op_get_bit(tmp.V, lsb_index) : 0; - // ull is at least 64-bit - ap_ulong m; - // may actually left shift, ensure buffer is wide enough. - if (_AP_W > BITS) { - m = (lsb_index >= 1) ? (ap_ulong)(tmp.V >> (lsb_index - 1)) - : (ap_ulong)(tmp.V << (1 - lsb_index)); - } else { - m = (ap_ulong)tmp.V; - m = (lsb_index >= 1) ? (m >> (lsb_index - 1)) - : (m << (1 - lsb_index)); - } - m += a; - m >>= 1; - //std::cout << '\n' << std::hex << m << '\n'; // TODO delete this - // carry to MSB, increase exponent - if (_AP_ctype_op_get_bit(m, DOUBLE_MAN + 1)) { - e += 1; - } - // set sign and exponent - m = _AP_ctype_op_set_bit(m, BITS - 1, s); - //std::cout << m << '\n'; // TODO delete this - m = _AP_ctype_op_set_range(m, DOUBLE_MAN, DOUBLE_MAN + DOUBLE_EXP - 1, e); - //std::cout << std::hex << m << std::dec << std::endl; // TODO delete this - // cast to fp - return rawBitsToDouble(m); - } - - /// convert function to float. - /** only round-half-to-even mode supported, does not obey FE env. */ - INLINE float to_float() const { -#if defined(AP_FIXED_ENABLE_CPP_FENV) - _AP_WARNING(std::fegetround() != FE_TONEAREST, - "Only FE_TONEAREST is supported"); -#endif - enum { BITS = FLOAT_MAN + FLOAT_EXP + 1 }; - if (!Base::V) return 0.0f; - bool s = _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); ///< sign. - ap_int_base<_AP_W, false> tmp; - if (s) - tmp.V = -Base::V; // may truncate one bit extra from neg in sim. - else - tmp.V = Base::V; - int l = tmp.countLeadingZeros(); ///< number of leading zeros. - int e = _AP_I - l - 1 + FLOAT_BIAS; ///< exponent - int lsb_index = _AP_W - l - 1 - FLOAT_MAN; - // more than 0.5? - bool a = (lsb_index >=2) ? - (_AP_ROOT_op_get_range(tmp.V, 0, lsb_index - 2) != 0) : 0; - // round to even - a |= (lsb_index >=0) ? _AP_ROOT_op_get_bit(tmp.V, lsb_index) : 0; - // ul is at least 32-bit - unsigned long m; - // may actually left shift, ensure buffer is wide enough. - if (_AP_W > BITS) { - m = (lsb_index >= 1) ? (unsigned long)(tmp.V >> (lsb_index - 1)) - : (unsigned long)(tmp.V << (1 - lsb_index)); - } else { - m = (unsigned long)tmp.V; - m = (lsb_index >= 1) ? (m >> (lsb_index - 1)) - : (m << (1 - lsb_index)); - } - m += a; - m >>= 1; - // carry to MSB, increase exponent - if (_AP_ctype_op_get_bit(m, FLOAT_MAN + 1)) { - e += 1; - } - // set sign and exponent - m = _AP_ctype_op_set_bit(m, BITS - 1, s); - m = _AP_ctype_op_set_range(m, FLOAT_MAN, FLOAT_MAN + FLOAT_EXP - 1, e); - // cast to fp - return rawBitsToFloat(m); - } - -#if _AP_ENABLE_HALF_ == 1 - /// convert function to half. - /** only round-half-to-even mode supported, does not obey FE env. */ - INLINE half to_half() const { -#if defined(AP_FIXED_ENABLE_CPP_FENV) - _AP_WARNING(std::fegetround() != FE_TONEAREST, - "Only FE_TONEAREST is supported"); -#endif - enum { BITS = HALF_MAN + HALF_EXP + 1 }; - if (!Base::V) return 0.0f; - bool s = _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); ///< sign. - ap_int_base<_AP_W, false> tmp; - if (s) - tmp.V = -Base::V; // may truncate one bit extra from neg in sim. - else - tmp.V = Base::V; - int l = tmp.countLeadingZeros(); ///< number of leading zeros. - int e = _AP_I - l - 1 + HALF_BIAS; ///< exponent - int lsb_index = _AP_W - l - 1 - HALF_MAN; - // more than 0.5? - bool a = (lsb_index >=2) ? - (_AP_ROOT_op_get_range(tmp.V, 0, lsb_index - 2) != 0) : 0; - // round to even - a |= (lsb_index >=0) ? _AP_ROOT_op_get_bit(tmp.V, lsb_index) : 0; - // short is at least 16-bit - unsigned short m; - // may actually left shift, ensure buffer is wide enough. - if (_AP_W > BITS) { - m = (lsb_index >= 1) ? (unsigned short)(tmp.V >> (lsb_index - 1)) - : (unsigned short)(tmp.V << (1 - lsb_index)); - } else { - m = (unsigned short)tmp.V; - m = (lsb_index >= 1) ? (m >> (lsb_index - 1)) - : (m << (1 - lsb_index)); - } - m += a; - m >>= 1; - // carry to MSB, increase exponent - if (_AP_ctype_op_get_bit(m, HALF_MAN + 1)) { - e += 1; - } - // set sign and exponent - m = _AP_ctype_op_set_bit(m, BITS - 1, s); - m = _AP_ctype_op_set_range(m, HALF_MAN, HALF_MAN + HALF_EXP - 1, e); - // cast to fp - return rawBitsToHalf(m); - } -#endif - - // FIXME inherited from old code, this may loose precision! - INLINE operator long double() const { return (long double)to_double(); } - - INLINE operator double() const { return to_double(); } - - INLINE operator float() const { return to_float(); } - -#if _AP_ENABLE_HALF_ == 1 - INLINE operator half() const { return to_half(); } -#endif - - INLINE operator bool() const { return (bool)Base::V != 0; } - - INLINE operator char() const { return (char)to_int(); } - - INLINE operator signed char() const { return (signed char)to_int(); } - - INLINE operator unsigned char() const { return (unsigned char)to_uint(); } - - INLINE operator short() const { return (short)to_int(); } - - INLINE operator unsigned short() const { return (unsigned short)to_uint(); } - - INLINE operator int() const { return to_int(); } - - INLINE operator unsigned int() const { return to_uint(); } - -// FIXME don't assume data width... -#ifdef __x86_64__ - INLINE operator long() const { return (long)to_int64(); } - - INLINE operator unsigned long() const { return (unsigned long)to_uint64(); } -#else - INLINE operator long() const { return (long)to_int(); } - - INLINE operator unsigned long() const { return (unsigned long)to_uint(); } -#endif // ifdef __x86_64__ else - - INLINE operator ap_ulong() const { return to_uint64(); } - - INLINE operator ap_slong() const { return to_int64(); } - - INLINE int length() const { return _AP_W; }; - - // bits_to_int64 deleted. -#ifndef __SYNTHESIS__ - // Used in autowrap, when _AP_W < 64. - INLINE ap_ulong bits_to_uint64() const { - return (Base::V).to_uint64(); - } -#endif - - // Count the number of zeros from the most significant bit - // to the first one bit. Note this is only for ap_fixed_base whose - // _AP_W <= 64, otherwise will incur assertion. - INLINE int countLeadingZeros() { -#ifdef __SYNTHESIS__ - // TODO: used llvm.ctlz intrinsic ? - if (_AP_W <= 32) { - ap_int_base<32, false> t(-1ULL); - t.range(_AP_W - 1, 0) = this->range(0, _AP_W - 1); - return __builtin_ctz(t.V); - } else if (_AP_W <= 64) { - ap_int_base<64, false> t(-1ULL); - t.range(_AP_W - 1, 0) = this->range(0, _AP_W - 1); - return __builtin_ctzll(t.V); - } else { - enum {__N = (_AP_W + 63) / 64}; - int NZeros = 0; - int i = 0; - bool hitNonZero = false; - for (i = 0; i < __N - 1; ++i) { - ap_int_base<64, false> t; - t.range(0, 63) = this->range(_AP_W - i * 64 - 64, _AP_W - i * 64 - 1); - NZeros += hitNonZero ? 0 : __builtin_clzll(t.V); - hitNonZero |= (t != 0); - } - if (!hitNonZero) { - ap_int_base<64, false> t(-1ULL); - t.range(63 - (_AP_W - 1) % 64, 63) = this->range(0, (_AP_W - 1) % 64); - NZeros += __builtin_clzll(t.V); - } - return NZeros; - } -#else - return Base::V.countLeadingZeros(); -#endif - } - - // Arithmetic : Binary - // ------------------------------------------------------------------------- - template - INLINE typename RType<_AP_W2, _AP_I2, _AP_S2>::mult operator*( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) - const { - typename RType<_AP_W2, _AP_I2, _AP_S2>::mult_base r, t; - r.V = Base::V; - t.V = op2.V; - r.V *= op2.V; - return r; - } - - // multiply function deleted. - - template - INLINE typename RType<_AP_W2, _AP_I2, _AP_S2>::div operator/( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) - const { - typename RType<_AP_W2, _AP_I2, _AP_S2>::div_base r; -#ifndef __SYNTHESIS__ - enum {F2 = _AP_W2-_AP_I2, - _W1=AP_MAX(_AP_W + AP_MAX(F2, 0) + ((_AP_S2 && !_AP_S) ? 1 : 0), _AP_W2 + ((_AP_S && !_AP_S2) ? 1 : 0))}; - ap_int_base<_W1,_AP_S||_AP_S2> dividend,divisior; - ap_int_base<_W1,_AP_S> tmp1; - ap_int_base<_W1,_AP_S2> tmp2; - tmp1.V = Base::V; - tmp1.V <<= AP_MAX(F2,0); - tmp2.V = op2.V; - dividend = tmp1; - divisior = tmp2; - r.V = ((_AP_S||_AP_S2) ? dividend.V.sdiv(divisior.V): dividend.V.udiv(divisior.V)); -#else - #ifndef __SC_COMPATIBLE__ - ap_fixed_base<_AP_W + AP_MAX(_AP_W2 - _AP_I2, 0),_AP_I, _AP_S> t(*this); - #else - ap_fixed_base<_AP_W + AP_MAX(_AP_W2 - _AP_I2, 0) + AP_MAX(_AP_I2, 0),_AP_I, _AP_S> t(*this); - #endif - r.V = t.V / op2.V; -#endif -/* - enum { - F2 = _AP_W2 - _AP_I2, - shl = AP_MAX(F2, 0) + AP_MAX(_AP_I2, 0), -#ifndef __SC_COMPATIBLE__ - shr = AP_MAX(_AP_I2, 0), -#else - shr = 0, -#endif - W3 = _AP_S2 + _AP_W + shl, - S3 = _AP_S || _AP_S2, - }; - ap_int_base dividend, t; - dividend.V = Base::V; - // multiply both by (1 << F2), and than do integer division. - dividend.V <<= (int) shl; -#ifdef __SYNTHESIS__ - // .V's have right signedness, and will have right extending. - t.V = dividend.V / op2.V; -#else - // XXX op2 may be wider than dividend, and sdiv and udiv takes the same with - // as left hand operand, so data might be truncated by mistake if not - // handled here. - t.V = S3 ? dividend.V.sdiv(op2.V) : dividend.V.udiv(op2.V); -#endif - r.V = t.V >> (int) shr; -*/ - return r; - } - -#define OP_BIN_AF(Sym, Rty) \ - template \ - INLINE typename RType<_AP_W2, _AP_I2, _AP_S2>::Rty operator Sym( \ - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& \ - op2) const { \ - typename RType<_AP_W2, _AP_I2, _AP_S2>::Rty##_base ret, lhs(*this), \ - rhs(op2); \ - ret.V = lhs.V Sym rhs.V; \ - return ret; \ - } - - OP_BIN_AF(+, plus) - OP_BIN_AF(-, minus) - OP_BIN_AF(&, logic) - OP_BIN_AF(|, logic) - OP_BIN_AF(^, logic) - -// Arithmetic : assign -// ------------------------------------------------------------------------- -#define OP_ASSIGN_AF(Sym) \ - template \ - INLINE ap_fixed_base& operator Sym##=( \ - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& \ - op2) { \ - *this = operator Sym(op2); \ - return *this; \ - } - - OP_ASSIGN_AF(*) - OP_ASSIGN_AF(/) - OP_ASSIGN_AF(+) - OP_ASSIGN_AF(-) - OP_ASSIGN_AF(&) - OP_ASSIGN_AF(|) - OP_ASSIGN_AF(^) - - // Prefix and postfix increment and decrement. - // ------------------------------------------------------------------------- - - /// Prefix increment - INLINE ap_fixed_base& operator++() { - operator+=(ap_fixed_base<_AP_W - _AP_I + 1, 1, false>(1)); - return *this; - } - - /// Prefix decrement. - INLINE ap_fixed_base& operator--() { - operator-=(ap_fixed_base<_AP_W - _AP_I + 1, 1, false>(1)); - return *this; - } - - /// Postfix increment - INLINE const ap_fixed_base operator++(int) { - ap_fixed_base r(*this); - operator++(); - return r; - } - - /// Postfix decrement - INLINE const ap_fixed_base operator--(int) { - ap_fixed_base r(*this); - operator--(); - return r; - } - - // Unary arithmetic. - // ------------------------------------------------------------------------- - INLINE ap_fixed_base operator+() { return *this; } - - INLINE ap_fixed_base<_AP_W + 1, _AP_I + 1, true> operator-() const { - ap_fixed_base<_AP_W + 1, _AP_I + 1, true> r(*this); - r.V = -r.V; - return r; - } - - INLINE ap_fixed_base<_AP_W, _AP_I, true, _AP_Q, _AP_O, _AP_N> getNeg() { - ap_fixed_base<_AP_W, _AP_I, true, _AP_Q, _AP_O, _AP_N> r(*this); - r.V = -r.V; - return r; - } - - // Not (!) - // ------------------------------------------------------------------------- - INLINE bool operator!() const { return Base::V == 0; } - - // Bitwise complement - // ------------------------------------------------------------------------- - // XXX different from Mentor's ac_fixed. - INLINE ap_fixed_base<_AP_W, _AP_I, _AP_S> operator~() const { - ap_fixed_base<_AP_W, _AP_I, _AP_S> r; - r.V = ~Base::V; - return r; - } - - // Shift - // ------------------------------------------------------------------------- - // left shift is the same as moving point right, i.e. increate I. - template - INLINE ap_fixed_base<_AP_W, _AP_I + _AP_SHIFT, _AP_S> lshift() const { - ap_fixed_base<_AP_W, _AP_I + _AP_SHIFT, _AP_S> r; - r.V = Base::V; - return r; - } - - template - INLINE ap_fixed_base<_AP_W, _AP_I - _AP_SHIFT, _AP_S> rshift() const { - ap_fixed_base<_AP_W, _AP_I - _AP_SHIFT, _AP_S> r; - r.V = Base::V; - return r; - } - - // Because the return type is the type of the the first operand, shift assign - // operators do not carry out any quantization or overflow - // While systemc, shift assigns for sc_fixed/sc_ufixed will result in - // quantization or overflow (depending on the mode of the first operand) - INLINE ap_fixed_base operator<<(unsigned int sh) const { - ap_fixed_base r; - r.V = Base::V << sh; -// TODO check shift overflow? -#ifdef __SC_COMPATIBLE__ - if (sh == 0) return r; - if (_AP_O != AP_WRAP || _AP_N != 0) { - bool neg_src = _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1); - bool allones, allzeros; - ap_int_base<_AP_W, false> ones(-1); - if (sh <= _AP_W) { - ap_int_base<_AP_W, false> range1; - range1.V = _AP_ROOT_op_get_range( - const_cast(this)->Base::V, _AP_W - sh, _AP_W - 1); - allones = range1 == (ones >> (_AP_W - sh)); - allzeros = range1 == 0; - } else { - allones = false; - allzeros = Base::V == 0; - } - bool overflow = !allzeros && !neg_src; - bool underflow = !allones && neg_src; - if ((_AP_O == AP_SAT_SYM) && _AP_S) - underflow |= - neg_src && - (_AP_W > 1 ? _AP_ROOT_op_get_range(r.V, 0, _AP_W - 2) == 0 : true); - bool lD = false; - if (sh < _AP_W) lD = _AP_ROOT_op_get_bit(Base::V, _AP_W - sh - 1); - r.overflow_adjust(underflow, overflow, lD, neg_src); - } -#endif - return r; - } - - INLINE ap_fixed_base operator>>(unsigned int sh) const { - ap_fixed_base r; - r.V = Base::V >> sh; -// TODO check shift overflow? -#ifdef __SC_COMPATIBLE__ - if (sh == 0) return r; - if (_AP_Q != AP_TRN) { - bool qb = false; - if (sh <= _AP_W) qb = _AP_ROOT_op_get_bit(Base::V, sh - 1); - bool rb = false; - if (sh > 1 && sh <= _AP_W) - rb = _AP_ROOT_op_get_range(const_cast(this)->Base::V, 0, - sh - 2) != 0; - else if (sh > _AP_W) - rb = Base::V != 0; - r.quantization_adjust(qb, rb, - _AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1)); - } -#endif - return r; - } - - // left and right shift for int - INLINE ap_fixed_base operator<<(int sh) const { - ap_fixed_base r; - bool isNeg = sh < 0; - unsigned int ush = isNeg ? -sh : sh; - if (isNeg) { - return operator>>(ush); - } else { - return operator<<(ush); - } - } - - INLINE ap_fixed_base operator>>(int sh) const { - bool isNeg = sh < 0; - unsigned int ush = isNeg ? -sh : sh; - if (isNeg) { - return operator<<(ush); - } else { - return operator>>(ush); - } - } - - // left and right shift for ap_int. - template - INLINE ap_fixed_base operator<<(const ap_int_base<_AP_W2, true>& op2) const { - // TODO the code seems not optimal. ap_fixed<8,8> << ap_int<2> needs only a - // small mux, but integer need a big one! - int sh = op2.to_int(); - return operator<<(sh); - } - - template - INLINE ap_fixed_base operator>>(const ap_int_base<_AP_W2, true>& op2) const { - int sh = op2.to_int(); - return operator>>(sh); - } - - // left and right shift for ap_uint. - template - INLINE ap_fixed_base operator<<(const ap_int_base<_AP_W2, false>& op2) const { - unsigned int sh = op2.to_uint(); - return operator<<(sh); - } - - template - INLINE ap_fixed_base operator>>(const ap_int_base<_AP_W2, false>& op2) const { - unsigned int sh = op2.to_uint(); - return operator>>(sh); - } - - // left and right shift for ap_fixed - template - INLINE ap_fixed_base operator<<( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& - op2) { - return operator<<(op2.to_ap_int_base()); - } - - template - INLINE ap_fixed_base operator>>( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& - op2) { - return operator>>(op2.to_ap_int_base()); - } - - // Shift assign. - // ------------------------------------------------------------------------- - - // left shift assign. - INLINE ap_fixed_base& operator<<=(const int sh) { - *this = operator<<(sh); - return *this; - } - - INLINE ap_fixed_base& operator<<=(const unsigned int sh) { - *this = operator<<(sh); - return *this; - } - - template - INLINE ap_fixed_base& operator<<=(const ap_int_base<_AP_W2, _AP_S2>& sh) { - *this = operator<<(sh.to_int()); - return *this; - } - - template - INLINE ap_fixed_base& operator<<=( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& - sh) { - *this = operator<<(sh.to_int()); - return *this; - } - - // right shift assign. - INLINE ap_fixed_base& operator>>=(const int sh) { - *this = operator>>(sh); - return *this; - } - - INLINE ap_fixed_base& operator>>=(const unsigned int sh) { - *this = operator>>(sh); - return *this; - } - - template - INLINE ap_fixed_base& operator>>=(const ap_int_base<_AP_W2, _AP_S2>& sh) { - *this = operator>>(sh.to_int()); - return *this; - } - - template - INLINE ap_fixed_base& operator>>=( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& - sh) { - *this = operator>>(sh.to_int()); - return *this; - } - -// Comparisons. -// ------------------------------------------------------------------------- -#define OP_CMP_AF(Sym) \ - template \ - INLINE bool operator Sym(const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, \ - _AP_O2, _AP_N2>& op2) const { \ - enum { _AP_F = _AP_W - _AP_I, F2 = _AP_W2 - _AP_I2 }; \ - if (_AP_F == F2) \ - return Base::V Sym op2.V; \ - else if (_AP_F > F2) \ - return Base::V Sym ap_fixed_base(op2).V; \ - else \ - return ap_fixed_base(*this).V Sym op2.V; \ - return false; \ - } - - OP_CMP_AF(>) - OP_CMP_AF(<) - OP_CMP_AF(>=) - OP_CMP_AF(<=) - OP_CMP_AF(==) - OP_CMP_AF(!=) -// FIXME: Move compare with double out of struct ap_fixed_base defination -// and combine it with compare operator(double, ap_fixed_base) -#define DOUBLE_CMP_AF(Sym) \ - INLINE bool operator Sym(double d) const { return to_double() Sym d; } - - DOUBLE_CMP_AF(>) - DOUBLE_CMP_AF(<) - DOUBLE_CMP_AF(>=) - DOUBLE_CMP_AF(<=) - DOUBLE_CMP_AF(==) - DOUBLE_CMP_AF(!=) - - // Bit and Slice Select - INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator[]( - unsigned index) { - _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); - return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, index); - } - - template - INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator[]( - const ap_int_base<_AP_W2, _AP_S2>& index) { - _AP_WARNING(index < 0, "Attempting to read bit with negative index"); - _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); - return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, - index.to_int()); - } - - INLINE bool operator[](unsigned index) const { - _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); - return _AP_ROOT_op_get_bit(const_cast(this)->V, index); - } - - INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> bit( - unsigned index) { - _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); - return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, index); - } - - template - INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> bit( - const ap_int_base<_AP_W2, _AP_S2>& index) { - _AP_WARNING(index < 0, "Attempting to read bit with negative index"); - _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); - return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, - index.to_int()); - } - - INLINE bool bit(unsigned index) const { - _AP_WARNING(index >= _AP_W, "Attempting to read bit beyond MSB"); - return _AP_ROOT_op_get_bit(const_cast(this)->V, index); - } - - template - INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> get_bit( - const ap_int_base<_AP_W2, true>& index) { - _AP_WARNING(index < _AP_I - _AP_W, - "Attempting to read bit with negative index"); - _AP_WARNING(index >= _AP_I, "Attempting to read bit beyond MSB"); - return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>( - this, index.to_int() + _AP_W - _AP_I); - } - - INLINE bool get_bit(int index) const { - _AP_WARNING(index >= _AP_I, "Attempting to read bit beyond MSB"); - _AP_WARNING(index < _AP_I - _AP_W, "Attempting to read bit beyond MSB"); - return _AP_ROOT_op_get_bit(const_cast(this)->V, - index + _AP_W - _AP_I); - } -#if 0 - INLINE af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> get_bit( - int index) { - _AP_WARNING(index < _AP_I - _AP_W, - "Attempting to read bit with negative index"); - _AP_WARNING(index >= _AP_I, "Attempting to read bit beyond MSB"); - return af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>( - this, index + _AP_W - _AP_I); - } -#endif - - template - INLINE bool get_bit(const ap_int_base<_AP_W2, true>& index) const { - _AP_WARNING(index >= _AP_I, "Attempting to read bit beyond MSB"); - _AP_WARNING(index < _AP_I - _AP_W, "Attempting to read bit beyond MSB"); - return _AP_ROOT_op_get_bit(const_cast(this)->V, - index.to_int() + _AP_W - _AP_I); - } - - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range(int Hi, - int Lo) { - _AP_WARNING((Hi >= _AP_W) || (Lo >= _AP_W), "Out of bounds in range()"); - return af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(this, Hi, Lo); - } - - // This is a must to strip constness to produce reference type. - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range( - int Hi, int Lo) const { - _AP_WARNING((Hi >= _AP_W) || (Lo >= _AP_W), "Out of bounds in range()"); - return af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>( - const_cast(this), Hi, Lo); - } - - template - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - template - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) const { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range() { - return this->range(_AP_W - 1, 0); - } - - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> range() const { - return this->range(_AP_W - 1, 0); - } - - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator()( - int Hi, int Lo) { - return this->range(Hi, Lo); - } - - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator()( - int Hi, int Lo) const { - return this->range(Hi, Lo); - } - - template - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator()( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - template - INLINE af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> operator()( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) const { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - INLINE bool is_zero() const { return Base::V == 0; } - - INLINE bool is_neg() const { - if (_AP_S && _AP_ROOT_op_get_bit(Base::V, _AP_W - 1)) return true; - return false; - } - - INLINE int wl() const { return _AP_W; } - - INLINE int iwl() const { return _AP_I; } - - INLINE ap_q_mode q_mode() const { return _AP_Q; } - - INLINE ap_o_mode o_mode() const { return _AP_O; } - - INLINE int n_bits() const { return _AP_N; } - - // print a string representation of this number in the given radix. - // Radix support is 2, 8, 10, or 16. - // The result will include a prefix indicating the radix, except for decimal, - // where no prefix is needed. The default is to output a signed representation - // of signed numbers, or an unsigned representation of unsigned numbers. For - // non-decimal formats, this can be changed by the 'sign' argument. -#ifndef __SYNTHESIS__ - std::string to_string(unsigned char radix = 2, bool sign = _AP_S) const { - // XXX in autosim/autowrap.tcl "(${name}).to_string(2).c_str()" is used to - // initialize sc_lv, which seems incapable of handling format "-0b". - if (radix == 2) sign = false; - - std::string str; - str.clear(); - char step = 0; - bool isNeg = sign && (Base::V < 0); - - // Extend to take care of the -MAX case. - ap_fixed_base<_AP_W + 1, _AP_I + 1> tmp(*this); - if (isNeg) { - tmp = -tmp; - str += '-'; - } - std::string prefix; - switch (radix) { - case 2: - prefix = "0b"; - step = 1; - break; - case 8: - prefix = "0o"; - step = 3; - break; - case 16: - prefix = "0x"; - step = 4; - break; - default: - break; - } - - if (_AP_I > 0) { - // Note we drop the quantization and rounding flags here. The - // integer part is always in range, and the fractional part we - // want to drop. Also, the number is always positive, because - // of the absolute value above. - ap_int_base int_part; - // [1] [ I ] d [ W - I ] - // | | | - // | W-I 0 - // W - int_part.V = _AP_ROOT_op_get_range( - tmp.V, _AP_W - _AP_I, _AP_W); - str += int_part.to_string(radix, false); - } else { - str += prefix; - str += '0'; - } - - ap_fixed_base frac_part = tmp; - - if (radix == 10) { - if (frac_part != 0) { - str += "."; - while (frac_part != 0) { - char digit = (frac_part * radix).to_char(); - str += static_cast(digit + '0'); - frac_part *= radix; - } - } - } else { - if (frac_part != 0) { - str += "."; - for (signed i = _AP_W - _AP_I - 1; i >= 0; i -= step) { - char digit = frac_part.range(i, AP_MAX(0, i - step + 1)).to_char(); - // If we have a partial bit pattern at the end, then we need - // to put it in the high-order bits of 'digit'. - int offset = AP_MIN(0, i - step + 1); - digit <<= -offset; - str += digit < 10 ? static_cast(digit + '0') - : static_cast(digit - 10 + 'a'); - } - if (radix == 16) - str += "p0"; // C99 Hex constants are required to have an exponent. - } - } - return str; - } -#else - // XXX HLS will delete this in synthesis - INLINE char* to_string(unsigned char radix = 2, bool sign = _AP_S) const { - return 0; - } -#endif -}; // struct ap_fixed_base. - -template -INLINE void b_not( - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { - ret.V = ~op.V; -} - -template -INLINE void b_and( - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - ret.V = op1.V & op2.V; -} - -template -INLINE void b_or( - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - ret.V = op1.V | op2.V; -} - -template -INLINE void b_xor( - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - ret.V = op1.V ^ op2.V; -} - -template -INLINE void neg( - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - ap_fixed_base<_AP_W2 + !_AP_S2, _AP_I2 + !_AP_S2, true, _AP_Q2, _AP_O2, - _AP_N2> - t; - t.V = -op.V; - ret = t; -} - -template -INLINE void lshift( - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op, - int i) { - enum { - F2 = _AP_W2 - _AP_I2, - _AP_I3 = AP_MAX(_AP_I, _AP_I2), - _AP_W3 = _AP_I3 + F2, - }; - // wide buffer - ap_fixed_base<_AP_W3, _AP_I3, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> t; - t.V = op.V; - t.V <<= i; // FIXME overflow? - // handle quantization and overflow - ret = t; -} - -template -INLINE void rshift( - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ret, - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op, - int i) { - enum { - F = _AP_W - _AP_I, - F2 = _AP_W2 - _AP_I2, - F3 = AP_MAX(F, F2), - _AP_W3 = _AP_I2 + F3, - sh = F - F2, - }; - // wide buffer - ap_fixed_base<_AP_W3, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> t; - t.V = op.V; - if (sh >= 0) - t.V <<= (int) sh; - t.V >>= i; - // handle quantization and overflow - ret = t; -} - -//// FIXME -//// These partial specialization ctors allow code like -//// char c = 'a'; -//// ap_fixed_base<8, 8, true> x(c); -//// but what bout ap_fixed_base<9, 9, true> y(c) ? -// - -#ifndef __SYNTHESIS__ -INLINE std::string scientificFormat(std::string& input) { - if (input.length() == 0) return input; - - size_t decPosition = input.find('.'); - if (decPosition == std::string::npos) decPosition = input.length(); - - size_t firstNonZeroPos = 0; - for (; input[firstNonZeroPos] > '9' || input[firstNonZeroPos] < '1'; - firstNonZeroPos++) - ; - - int exp; - if (firstNonZeroPos > decPosition) - exp = decPosition - firstNonZeroPos; - else - exp = decPosition - firstNonZeroPos - 1; - std::string expString = ""; - if (exp == 0) - ; - else if (exp < 0) { - expString += "e-"; - exp = -exp; - } else - expString += "e+"; - - if (exp < 10 && exp > 0) { - expString += '0'; - expString += (char)('0' + exp); - } else if (exp != 0) { - std::string tmp; - - std::ostringstream oss; - oss << exp; - - tmp = oss.str(); - expString += tmp; - } - - int lastNonZeroPos = (int)(input.length() - 1); - for (; lastNonZeroPos >= 0; --lastNonZeroPos) - if (input[lastNonZeroPos] <= '9' && input[lastNonZeroPos] > '0') break; - - std::string ans = ""; - ans += input[firstNonZeroPos]; - if (firstNonZeroPos != (size_t)lastNonZeroPos) { - ans += '.'; - for (int i = firstNonZeroPos + 1; i <= lastNonZeroPos; i++) - if (input[i] != '.') ans += input[i]; - } - - ans += expString; - return ans; -} - -INLINE std::string reduceToPrecision(std::string& input, int precision) { - bool isZero = true; - size_t inputLen = input.length(); - for (size_t i = 0; i < inputLen && isZero; i++) - if (input[i] != '.' && input[i] != '0') isZero = false; - if (isZero) return "0"; - - // Find the first valid number, skip '-' - int FirstNonZeroPos = 0; - int LastNonZeroPos = (int)inputLen - 1; - int truncBitPosition = 0; - size_t decPosition = input.find('.'); - for (; input[FirstNonZeroPos] < '1' || input[FirstNonZeroPos] > '9'; - FirstNonZeroPos++) - ; - - for (; input[LastNonZeroPos] < '1' || input[LastNonZeroPos] > '9'; - LastNonZeroPos--) - ; - - if (decPosition == std::string::npos) decPosition = inputLen; - // Count the valid number, to decide whether we need to truncate - if ((int)decPosition > LastNonZeroPos) { - if (LastNonZeroPos - FirstNonZeroPos + 1 <= precision) return input; - truncBitPosition = FirstNonZeroPos + precision; - } else if ((int)decPosition < FirstNonZeroPos) { // This is pure decimal - if (LastNonZeroPos - FirstNonZeroPos + 1 <= precision) { - if (FirstNonZeroPos - decPosition - 1 < 4) { - return input; - } else { - if (input[0] == '-') { - std::string tmp = input.substr(1, inputLen - 1); - return std::string("-") + scientificFormat(tmp); - } else - return scientificFormat(input); - } - } - truncBitPosition = FirstNonZeroPos + precision; - } else { - if (LastNonZeroPos - FirstNonZeroPos <= precision) return input; - truncBitPosition = FirstNonZeroPos + precision + 1; - } - - // duplicate the input string, we want to add "0" before the valid numbers - // This is easy for quantization, since we may change 9999 to 10000 - std::string ans = ""; - std::string dupInput = "0"; - if (input[0] == '-') { - ans += '-'; - dupInput += input.substr(1, inputLen - 1); - } else { - dupInput += input.substr(0, inputLen); - ++truncBitPosition; - } - - // Add 'carry' after truncation, if necessary - bool carry = dupInput[truncBitPosition] > '4'; - for (int i = truncBitPosition - 1; i >= 0 && carry; i--) { - if (dupInput[i] == '.') continue; - if (dupInput[i] == '9') - dupInput[i] = '0'; - else { - ++dupInput[i]; - carry = false; - } - } - - // bits outside precision range should be set to 0 - if (dupInput[0] == '1') - FirstNonZeroPos = 0; - else { - FirstNonZeroPos = 0; - while (dupInput[FirstNonZeroPos] < '1' || dupInput[FirstNonZeroPos] > '9') - ++FirstNonZeroPos; - } - - unsigned it = FirstNonZeroPos; - int NValidNumber = 0; - while (it < dupInput.length()) { - if (dupInput[it] == '.') { - ++it; - continue; - } - ++NValidNumber; - if (NValidNumber > precision) dupInput[it] = '0'; - ++it; - } - - // Here we wanted to adjust the truncate position and the value - decPosition = dupInput.find('.'); - if (decPosition == std::string::npos) // When this is integer - truncBitPosition = (int)dupInput.length(); - else - for (truncBitPosition = (int)(dupInput.length() - 1); truncBitPosition >= 0; - --truncBitPosition) { - if (dupInput[truncBitPosition] == '.') break; - if (dupInput[truncBitPosition] != '0') { - truncBitPosition++; - break; - } - } - - if (dupInput[0] == '1') - dupInput = dupInput.substr(0, truncBitPosition); - else - dupInput = dupInput.substr(1, truncBitPosition - 1); - - decPosition = dupInput.find('.'); - if (decPosition != std::string::npos) { - size_t it = 0; - for (it = decPosition + 1; dupInput[it] == '0'; it++) - ; - if (it - decPosition - 1 < 4) { - ans += dupInput; - return ans; - } else { - ans += scientificFormat(dupInput); - return ans; - } - } else if ((int)(dupInput.length()) <= precision) { - ans += dupInput; - return ans; - } - - ans += scientificFormat(dupInput); - return ans; -} - -template -INLINE void print( - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { - if (_AP_I > 0) { - ap_int_base<_AP_I, _AP_S> p1; - p1.V = x.V >> (_AP_W - _AP_I); - print(p1.V); // print overlaod for .V should exit - } else { - printf("0"); - } - printf("."); - if (_AP_I < _AP_W) { - ap_int_base<_AP_W - _AP_I, false> p2; - p2.V = _AP_ROOT_op_get_range(x.V, 0, _AP_W - _AP_I); - print(p2.V, false); // print overlaod for .V should exit - } -} -#endif // ifndef __SYNTHESIS__ - -// XXX the following two functions have to exist in synthesis, -// as some old HLS Video Library code uses the ostream overload, -// although HLS will later delete I/O function call. - -/// Output streaming -//----------------------------------------------------------------------------- -// XXX apcc cannot handle global std::ios_base::Init() brought in by -#ifndef AP_AUTOCC -#ifndef __SYNTHESIS__ -template -INLINE std::ostream& operator<<( - std::ostream& out, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { - // TODO support std::ios_base::fmtflags - unsigned width = out.width(); - unsigned precision = out.precision(); - char fill = out.fill(); - std::string str = x.to_string(10, _AP_S); - str = reduceToPrecision(str, precision); - if (width > str.length()) { - for (unsigned i = 0; i < width - str.length(); ++i) - out << fill; - } - out << str; - return out; -} -#endif // ifndef __SYNTHESIS__ - -/// Input streaming -// ----------------------------------------------------------------------------- -#ifndef __SYNTHESIS__ -template -INLINE std::istream& operator>>( - std::istream& in, - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { - double d; - in >> d; - x = ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>(d); - return in; -} -#endif -#endif // ifndef AP_AUTOCC - -/// Operators mixing Integers with ap_fixed_base -// ----------------------------------------------------------------------------- -#define AF_BIN_OP_WITH_INT_SF(BIN_OP, C_TYPE, _AP_W2, _AP_S2, RTYPE) \ - template \ - INLINE typename ap_fixed_base<_AP_W, _AP_I, _AP_S>::template RType< \ - _AP_W2, _AP_W2, _AP_S2>::RTYPE \ - operator BIN_OP( \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - C_TYPE i_op) { \ - return op.operator BIN_OP(ap_int_base<_AP_W2, _AP_S2>(i_op)); \ - } - -#define AF_BIN_OP_WITH_INT(BIN_OP, C_TYPE, _AP_W2, _AP_S2, RTYPE) \ - template \ - INLINE typename ap_fixed_base<_AP_W, _AP_I, _AP_S>::template RType< \ - _AP_W2, _AP_W2, _AP_S2>::RTYPE \ - operator BIN_OP( \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - C_TYPE i_op) { \ - return op.operator BIN_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ - } \ - template \ - INLINE typename ap_fixed_base<_AP_W, _AP_I, _AP_S>::template RType< \ - _AP_W2, _AP_W2, _AP_S2>::RTYPE \ - operator BIN_OP( \ - C_TYPE i_op, \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op).operator BIN_OP(op); \ - } - -#define AF_REL_OP_WITH_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE bool operator REL_OP( \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - C_TYPE i_op) { \ - return op.operator REL_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ - } \ - template \ - INLINE bool operator REL_OP( \ - C_TYPE i_op, \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op).operator REL_OP(op); \ - } - -#define AF_ASSIGN_OP_WITH_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& \ - operator ASSIGN_OP( \ - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - C_TYPE i_op) { \ - return op.operator ASSIGN_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ - } - -#define AF_ASSIGN_OP_WITH_INT_SF(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& \ - operator ASSIGN_OP( \ - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - C_TYPE i_op) { \ - return op.operator ASSIGN_OP(ap_int_base<_AP_W2, _AP_S2>(i_op)); \ - } - -#define ALL_AF_OP_WITH_INT(C_TYPE, BITS, SIGN) \ - AF_BIN_OP_WITH_INT(+, C_TYPE, (BITS), (SIGN), plus) \ - AF_BIN_OP_WITH_INT(-, C_TYPE, (BITS), (SIGN), minus) \ - AF_BIN_OP_WITH_INT(*, C_TYPE, (BITS), (SIGN), mult) \ - AF_BIN_OP_WITH_INT(/, C_TYPE, (BITS), (SIGN), div) \ - AF_BIN_OP_WITH_INT(&, C_TYPE, (BITS), (SIGN), logic) \ - AF_BIN_OP_WITH_INT(|, C_TYPE, (BITS), (SIGN), logic) \ - AF_BIN_OP_WITH_INT(^, C_TYPE, (BITS), (SIGN), logic) \ - AF_BIN_OP_WITH_INT_SF(>>, C_TYPE, (BITS), (SIGN), lhs) \ - AF_BIN_OP_WITH_INT_SF(<<, C_TYPE, (BITS), (SIGN), lhs) \ - \ - AF_ASSIGN_OP_WITH_INT(+=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT(-=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT(*=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT(/=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT(&=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT(|=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT(^=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT_SF(>>=, C_TYPE, (BITS), (SIGN)) \ - AF_ASSIGN_OP_WITH_INT_SF(<<=, C_TYPE, (BITS), (SIGN)) \ - \ - AF_REL_OP_WITH_INT(>, C_TYPE, (BITS), (SIGN)) \ - AF_REL_OP_WITH_INT(<, C_TYPE, (BITS), (SIGN)) \ - AF_REL_OP_WITH_INT(>=, C_TYPE, (BITS), (SIGN)) \ - AF_REL_OP_WITH_INT(<=, C_TYPE, (BITS), (SIGN)) \ - AF_REL_OP_WITH_INT(==, C_TYPE, (BITS), (SIGN)) \ - AF_REL_OP_WITH_INT(!=, C_TYPE, (BITS), (SIGN)) - -ALL_AF_OP_WITH_INT(bool, 1, false) -ALL_AF_OP_WITH_INT(char, 8, CHAR_IS_SIGNED) -ALL_AF_OP_WITH_INT(signed char, 8, true) -ALL_AF_OP_WITH_INT(unsigned char, 8, false) -ALL_AF_OP_WITH_INT(short, _AP_SIZE_short, true) -ALL_AF_OP_WITH_INT(unsigned short, _AP_SIZE_short, false) -ALL_AF_OP_WITH_INT(int, _AP_SIZE_int, true) -ALL_AF_OP_WITH_INT(unsigned int, _AP_SIZE_int, false) -ALL_AF_OP_WITH_INT(long, _AP_SIZE_long, true) -ALL_AF_OP_WITH_INT(unsigned long, _AP_SIZE_long, false) -ALL_AF_OP_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) -ALL_AF_OP_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef ALL_AF_OP_WITH_INT -#undef AF_BIN_OP_WITH_INT -#undef AF_BIN_OP_WITH_INT_SF -#undef AF_ASSIGN_OP_WITH_INT -#undef AF_ASSIGN_OP_WITH_INT_SF -#undef AF_REL_OP_WITH_INT - -/* - * ********************************************************************** - * TODO - * There is no operator defined with float/double/long double, so that - * code like - * ap_fixed<8,4> a = 1.5f; - * a += 0.5f; - * will fail in compilation. - * Operator with warning about conversion might be wanted. - * ********************************************************************** - */ - -#define AF_BIN_OP_WITH_AP_INT(BIN_OP, RTYPE) \ - template \ - INLINE typename ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>::template RType< \ - _AP_W, _AP_I, _AP_S>::RTYPE \ - operator BIN_OP( \ - const ap_int_base<_AP_W2, _AP_S2>& i_op, \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op).operator BIN_OP(op); \ - } \ - \ - template \ - INLINE typename ap_fixed_base<_AP_W, _AP_I, _AP_S>::template RType< \ - _AP_W2, _AP_W2, _AP_S2>::RTYPE \ - operator BIN_OP( \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - const ap_int_base<_AP_W2, _AP_S2>& i_op) { \ - return op.operator BIN_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ - } - -#define AF_REL_OP_WITH_AP_INT(REL_OP) \ - template \ - INLINE bool operator REL_OP( \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - const ap_int_base<_AP_W2, _AP_S2>& i_op) { \ - return op.operator REL_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ - } \ - \ - template \ - INLINE bool operator REL_OP( \ - const ap_int_base<_AP_W2, _AP_S2>& i_op, \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op).operator REL_OP(op); \ - } - -#define AF_ASSIGN_OP_WITH_AP_INT(ASSIGN_OP) \ - template \ - INLINE ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& \ - operator ASSIGN_OP( \ - ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - const ap_int_base<_AP_W2, _AP_S2>& i_op) { \ - return op.operator ASSIGN_OP(ap_fixed_base<_AP_W2, _AP_W2, _AP_S2>(i_op)); \ - } \ - \ - template \ - INLINE ap_int_base<_AP_W2, _AP_S2>& operator ASSIGN_OP( \ - ap_int_base<_AP_W2, _AP_S2>& i_op, \ - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return i_op.operator ASSIGN_OP(op.to_ap_int_base()); \ - } - -AF_BIN_OP_WITH_AP_INT(+, plus) -AF_BIN_OP_WITH_AP_INT(-, minus) -AF_BIN_OP_WITH_AP_INT(*, mult) -AF_BIN_OP_WITH_AP_INT(/, div) -AF_BIN_OP_WITH_AP_INT(&, logic) -AF_BIN_OP_WITH_AP_INT(|, logic) -AF_BIN_OP_WITH_AP_INT(^, logic) - -#undef AF_BIN_OP_WITH_AP_INT - -AF_ASSIGN_OP_WITH_AP_INT(+=) -AF_ASSIGN_OP_WITH_AP_INT(-=) -AF_ASSIGN_OP_WITH_AP_INT(*=) -AF_ASSIGN_OP_WITH_AP_INT(/=) -AF_ASSIGN_OP_WITH_AP_INT(&=) -AF_ASSIGN_OP_WITH_AP_INT(|=) -AF_ASSIGN_OP_WITH_AP_INT(^=) - -#undef AF_ASSIGN_OP_WITH_AP_INT - -AF_REL_OP_WITH_AP_INT(==) -AF_REL_OP_WITH_AP_INT(!=) -AF_REL_OP_WITH_AP_INT(>) -AF_REL_OP_WITH_AP_INT(>=) -AF_REL_OP_WITH_AP_INT(<) -AF_REL_OP_WITH_AP_INT(<=) - -#undef AF_REL_OP_WITH_AP_INT - -// Relational Operators with double -template -INLINE bool operator==( - double op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - return op2.operator==(op1); -} - -template -INLINE bool operator!=( - double op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - return op2.operator!=(op1); -} - -template -INLINE bool operator>( - double op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - return op2.operator<(op1); -} - -template -INLINE bool operator>=( - double op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - return op2.operator<=(op1); -} - -template -INLINE bool operator<( - double op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - return op2.operator>(op1); -} - -template -INLINE bool operator<=( - double op1, - const ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op2) { - return op2.operator>=(op1); -} - -#endif // ifndef __cplusplus else - -#endif // ifndef __AP_FIXED_BASE_H__ else - -// -*- cpp -*- diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed_ref.h b/hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed_ref.h deleted file mode 100644 index aefda0a..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed_ref.h +++ /dev/null @@ -1,718 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_FIXED_REF_H__ -#define __AP_FIXED_REF_H__ - -#ifndef __AP_FIXED_H__ -#error "Only ap_fixed.h and ap_int.h can be included directly in user code." -#endif - -#ifndef __cplusplus -#error "C++ is required to include this header file" - -#else -#ifndef __SYNTHESIS__ -#include -#endif -/// Proxy class, which allows bit selection to be used as both rvalue (for -/// reading) and lvalue (for writing) -template -struct af_bit_ref { -#ifdef _MSC_VER -#pragma warning(disable : 4521 4522) -#endif - typedef ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> ref_type; - ref_type& d_bv; - int d_index; - - public: - INLINE af_bit_ref( - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ref) - : d_bv(ref.d_bv), d_index(ref.d_index) { -#ifndef __SYNTHESIS__ - _AP_WARNING(d_index < 0, "Index of bit vector (%d) cannot be negative.", - d_index); - _AP_WARNING(d_index >= _AP_W, "Index of bit vector (%d) out of range (%d).", - d_index, _AP_W); -#endif - } - - INLINE af_bit_ref(ref_type* bv, int index = 0) : d_bv(*bv), d_index(index) {} - - INLINE af_bit_ref(const ref_type* bv, int index = 0) - : d_bv(*const_cast(bv)), d_index(index) {} - - /// convert operators. - INLINE operator bool() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } - - /// @name assign operators - // @{ - INLINE af_bit_ref& operator=(bool val) { - d_bv.V = _AP_ROOT_op_set_bit(d_bv.V, d_index, val); - return *this; - } - - // Be explicit to prevent it from being deleted, as field d_bv - // is of reference type. - INLINE af_bit_ref& operator=(const af_bit_ref& val) { - return operator=(bool(val)); - } - - template - INLINE af_bit_ref& operator=( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=(bool(val)); - } - - template - INLINE af_bit_ref& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& val) { - return operator=(bool(val)); - } - - template - INLINE af_bit_ref& operator=(const ap_int_base<_AP_W2, _AP_S2>& val) { - return operator=(val != 0); - } - - template - INLINE af_bit_ref& operator=(const ap_range_ref<_AP_W2, _AP_S2>& val) { - return operator=(ap_int_base<_AP_W2, false>(val)); - } - - template - INLINE af_bit_ref& operator=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=(ap_int_base<_AP_W2, false>(val)); - } - - template - INLINE af_bit_ref& operator=( - const ap_concat_ref<_AP_W2, _AP_T3, _AP_W3, _AP_T3>& val) { - return operator=(ap_int_base<_AP_W2 + _AP_W3, false>(val)); - } - // @} - - /// @name concatenate operators - // @{ - template - INLINE ap_concat_ref<1, af_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(ap_int_base<_AP_W2, _AP_S2> &op) { - return ap_concat_ref<1, af_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( - *this, op); - } - - template - INLINE ap_concat_ref<1, af_bit_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> > operator,( - const ap_bit_ref<_AP_W2, _AP_S2> &op) { - return ap_concat_ref<1, af_bit_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> >(*this, - op); - } - - template - INLINE ap_concat_ref<1, af_bit_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > - operator,(const ap_range_ref<_AP_W2, _AP_S2> &op) { - return ap_concat_ref<1, af_bit_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> >( - *this, op); - } - - template - INLINE ap_concat_ref<1, af_bit_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > - operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &op) { - return ap_concat_ref<1, af_bit_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >(*this, - op); - } - - template - INLINE ap_concat_ref< - 1, af_bit_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &op) { - return ap_concat_ref< - 1, af_bit_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, - op); - } - - template - INLINE ap_concat_ref<1, af_bit_ref, 1, af_bit_ref<_AP_W2, _AP_I2, _AP_S2, - _AP_Q2, _AP_O2, _AP_N2> > - operator,( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &op) { - return ap_concat_ref<1, af_bit_ref, 1, af_bit_ref<_AP_W2, _AP_I2, _AP_S2, - _AP_Q2, _AP_O2, _AP_N2> >( - *this, - const_cast&>( - op)); - } - // @} - - /// @name comparison - // @{ - template - INLINE bool operator==( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - return get() == op.get(); - } - - template - INLINE bool operator!=( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - return get() != op.get(); - } - // @} - - INLINE bool operator~() const { - bool bit = _AP_ROOT_op_get_bit(d_bv.V, d_index); - return bit ? false : true; - } - - INLINE bool get() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } - - INLINE int length() const { return 1; } - -#ifndef __SYNTHESIS__ - std::string to_string() const { return get() ? "1" : "0"; } -#else - // XXX HLS will delete this in synthesis - INLINE char* to_string() const { return 0; } -#endif -}; // struct af_bit_ref - -// XXX apcc cannot handle global std::ios_base::Init() brought in by -#ifndef AP_AUTOCC -#ifndef __SYNTHESIS__ -template -INLINE std::ostream& operator<<( - std::ostream& os, - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { - os << x.to_string(); - return os; -} -#endif // ifndef __SYNTHESIS__ -#endif // ifndef AP_AUTOCC - -/// Range (slice) reference. -template -struct af_range_ref { -#ifdef _MSC_VER -#pragma warning(disable : 4521 4522) -#endif - typedef ap_fixed_base<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> ref_type; - ref_type& d_bv; - int l_index; - int h_index; - - public: - /// copy ctor - INLINE af_range_ref( - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& ref) - : d_bv(ref.d_bv), l_index(ref.l_index), h_index(ref.h_index) {} - - /// ctor from ap_fixed_base, higher and lower bound. - /** if h is less than l, the bits selected will be returned in reverse order. - */ - INLINE af_range_ref(ref_type* bv, int h, int l) - : d_bv(*bv), l_index(l), h_index(h) { -#ifndef __SYNTHESIS__ - _AP_WARNING(h < 0 || l < 0, - "Higher bound(%d) and lower(%d) bound cannot be negative.", h, - l); - _AP_WARNING(h >= _AP_W || l >= _AP_W, - "Higher bound(%d) or lower(%d) bound out of range.", h, l); - _AP_WARNING(h < l, "The bits selected will be returned in reverse order."); -#endif - } - - INLINE af_range_ref(const ref_type* bv, int h, int l) - : d_bv(*const_cast(bv)), l_index(l), h_index(h) { -#ifndef __SYNTHESIS__ - _AP_WARNING(h < 0 || l < 0, - "Higher bound(%d) and lower(%d) bound cannot be negative.", h, - l); - _AP_WARNING(h >= _AP_W || l >= _AP_W, - "Higher bound(%d) or lower(%d) bound out of range.", h, l); - _AP_WARNING(h < l, "The bits selected will be returned in reverse order."); -#endif - } - - /// @name assign operators - // @{ - -#define ASSIGN_CTYPE_TO_AF_RANGE(DATA_TYPE) \ - INLINE af_range_ref& operator=(const DATA_TYPE val) { \ - ap_int_base<_AP_W, false> loc(val); \ - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, loc.V); \ - return *this; \ - } - - ASSIGN_CTYPE_TO_AF_RANGE(bool) - ASSIGN_CTYPE_TO_AF_RANGE(char) - ASSIGN_CTYPE_TO_AF_RANGE(signed char) - ASSIGN_CTYPE_TO_AF_RANGE(unsigned char) - ASSIGN_CTYPE_TO_AF_RANGE(short) - ASSIGN_CTYPE_TO_AF_RANGE(unsigned short) - ASSIGN_CTYPE_TO_AF_RANGE(int) - ASSIGN_CTYPE_TO_AF_RANGE(unsigned int) - ASSIGN_CTYPE_TO_AF_RANGE(long) - ASSIGN_CTYPE_TO_AF_RANGE(unsigned long) - ASSIGN_CTYPE_TO_AF_RANGE(ap_slong) - ASSIGN_CTYPE_TO_AF_RANGE(ap_ulong) -#if _AP_ENABLE_HALF_ == 1 - ASSIGN_CTYPE_TO_AF_RANGE(half) -#endif - ASSIGN_CTYPE_TO_AF_RANGE(float) - ASSIGN_CTYPE_TO_AF_RANGE(double) -#undef ASSIGN_CTYPE_TO_AF_RANGE - - /// assgin using a string. XXX crucial for cosim. - INLINE af_range_ref& operator=(const char* val) { - const ap_int_base<_AP_W, false> tmp(val); // XXX figure out radix - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); - return *this; - } - - /// assign from ap_int_base. - // NOTE Base of other assgin operators. - template - INLINE af_range_ref& operator=(const ap_int_base<_AP_W3, _AP_S3>& val) { - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, val.V); - return *this; - } - - /// assign from range reference to ap_int_base. - template - INLINE af_range_ref& operator=(const ap_range_ref<_AP_W2, _AP_S2>& val) { - const ap_int_base<_AP_W2, false> tmp(val); - return operator=(tmp); - } - - /// assign from bit reference to ap_int_base.. - template - INLINE af_range_ref& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& val) { - const ap_int_base<1, false> tmp((bool)val); - return operator=(tmp); - } - - /// assgin from ap_fixed_base. - template - INLINE af_range_ref& operator=( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& - val) { - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, val.V); - return *this; - } - - /// copy assgin. - // XXX This has to be explicit, otherwise it will be deleted, as d_bv is - // of reference type. - INLINE af_range_ref& operator=(const af_range_ref& val) { - ap_int_base<_AP_W, false> tmp(val); - return operator=(tmp); - } - - /// assign from range reference to ap_fixed_base. - template - INLINE af_range_ref& operator=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - ap_int_base<_AP_W2, false> tmp(val); - return operator=(tmp); - } - - /// assign from bit reference to ap_fixed_base. - template - INLINE af_range_ref& operator=( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - ap_int_base<1, false> tmp((bool)val); - return operator=(tmp); - } - - /// assign from compound reference. - template - INLINE af_range_ref& operator=( - const ap_concat_ref<_AP_W2, _AP_T3, _AP_W3, _AP_T3>& val) { - const ap_int_base<_AP_W2 + _AP_W3, false> tmp(val); - return operator=(tmp); - } - // @} - - /// @name comparison operators with ap_range_ref. - // @{ - template - INLINE bool operator==(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> rop(op2); - return lop == rop; - } - - template - INLINE bool operator!=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return !(operator==(op2)); - } - - template - INLINE bool operator<(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> rop(op2); - return lop < rop; - } - - template - INLINE bool operator>(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> rop(op2); - return lop > rop; - } - - template - INLINE bool operator<=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return !(operator>(op2)); - } - - template - INLINE bool operator>=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return !(operator<(op2)); - } - // @} - - /// @name comparison operators with af_range_ref. - // @{ - template - INLINE bool operator==( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> rop(op2); - return lop == rop; - } - - template - INLINE bool operator!=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { - return !(operator==(op2)); - } - - template - INLINE bool operator<( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> rop(op2); - return lop < rop; - } - - template - INLINE bool operator>( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> rop(op2); - return lop > rop; - } - - template - INLINE bool operator<=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { - return !(operator>(op2)); - } - - template - INLINE bool operator>=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op2) { - return !(operator<(op2)); - } - // @} - - /// @name concatenate operators. - /// @{ - /// concatenate with ap_int_base. - template - INLINE - ap_concat_ref<_AP_W, af_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(ap_int_base<_AP_W2, _AP_S2> &op) { - return ap_concat_ref<_AP_W, af_range_ref, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >(*this, op); - } - - /// concatenate with ap_bit_ref. - template - INLINE ap_concat_ref<_AP_W, af_range_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> > - operator,(const ap_bit_ref<_AP_W2, _AP_S2> &op) { - return ap_concat_ref<_AP_W, af_range_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> >( - *this, const_cast&>(op)); - } - - /// concatenate with ap_bit_ref. - template - INLINE ap_concat_ref<_AP_W, af_range_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > - operator,(const ap_range_ref<_AP_W2, _AP_S2> &op) { - return ap_concat_ref<_AP_W, af_range_ref, _AP_W2, - ap_range_ref<_AP_W2, _AP_S2> >( - *this, const_cast&>(op)); - } - - /// concatenate with ap_concat_ref. - template - INLINE ap_concat_ref<_AP_W, af_range_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > - operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &op) { - return ap_concat_ref<_AP_W, af_range_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( - *this, const_cast&>(op)); - } - - /// concatenate with another af_range_ref. - template - INLINE - ap_concat_ref<_AP_W, af_range_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> - &op) { - return ap_concat_ref< - _AP_W, af_range_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( - *this, - const_cast&>( - op)); - } - - /// concatenate with another af_bit_ref. - template - INLINE - ap_concat_ref<_AP_W, af_range_ref, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &op) { - return ap_concat_ref< - _AP_W, af_range_ref, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( - *this, - const_cast&>( - op)); - } - // @} - - INLINE operator ap_ulong() const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret.to_uint64(); - } - - INLINE operator ap_int_base<_AP_W, false>() const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret; - } - - INLINE ap_int_base<_AP_W, false> to_ap_int_base() const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret; - } - - // used in ap_fixed_base::to_string() - INLINE char to_char() const { - return (char)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE int to_int() const { - return (int)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE unsigned to_uint() const { - return (unsigned)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE long to_long() const { - return (long)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE unsigned long to_ulong() const { - return (unsigned long)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE ap_slong to_int64() const { - return (ap_slong)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE ap_ulong to_uint64() const { - return (ap_ulong)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE ap_int_base<_AP_W, false> get() const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret; - } - - template - INLINE void set(const ap_int_base<_AP_W2, false>& val) { - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, val.V); - } - - INLINE int length() const { - return h_index >= l_index ? h_index - l_index + 1 : l_index - h_index + 1; - } - -#ifndef __SYNTHESIS__ - std::string to_string(signed char rd = 2) const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret.to_string(rd); - } -#else - // XXX HLS will delete this in synthesis - INLINE char* to_string(signed char rd = 2) const { - return 0; - } -#endif -}; // struct af_range_ref - -// XXX apcc cannot handle global std::ios_base::Init() brought in by -#ifndef AP_AUTOCC -#ifndef __SYNTHESIS__ -template -INLINE std::ostream& operator<<( - std::ostream& os, - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& x) { - os << x.to_string(); - return os; -} -#endif -#endif // ifndef AP_AUTOCC - -#define AF_REF_REL_OP_WITH_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE bool operator REL_OP( \ - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - C_TYPE op2) { \ - return ap_int_base<_AP_W, false>(op) \ - REL_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ - } \ - \ - template \ - INLINE bool operator REL_OP( \ - C_TYPE op2, \ - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return ap_int_base<_AP_W2, _AP_S2>(op2) \ - REL_OP ap_int_base<_AP_W, false>(op); \ - } \ - \ - template \ - INLINE bool operator REL_OP( \ - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - C_TYPE op2) { \ - return bool(op) REL_OP op2; \ - } \ - \ - template \ - INLINE bool operator REL_OP( \ - C_TYPE op2, \ - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return op2 REL_OP bool(op); \ - } - -#define AF_REF_REL_OPS_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ - AF_REF_REL_OP_WITH_INT(>, C_TYPE, (_AP_W2), (_AP_S2)) \ - AF_REF_REL_OP_WITH_INT(<, C_TYPE, (_AP_W2), (_AP_S2)) \ - AF_REF_REL_OP_WITH_INT(>=, C_TYPE, (_AP_W2), (_AP_S2)) \ - AF_REF_REL_OP_WITH_INT(<=, C_TYPE, (_AP_W2), (_AP_S2)) \ - AF_REF_REL_OP_WITH_INT(==, C_TYPE, (_AP_W2), (_AP_S2)) \ - AF_REF_REL_OP_WITH_INT(!=, C_TYPE, (_AP_W2), (_AP_S2)) - -AF_REF_REL_OPS_WITH_INT(bool, 1, false) -AF_REF_REL_OPS_WITH_INT(char, 8, CHAR_IS_SIGNED) -AF_REF_REL_OPS_WITH_INT(signed char, 8, true) -AF_REF_REL_OPS_WITH_INT(unsigned char, 8, false) -AF_REF_REL_OPS_WITH_INT(short, _AP_SIZE_short, true) -AF_REF_REL_OPS_WITH_INT(unsigned short, _AP_SIZE_short, false) -AF_REF_REL_OPS_WITH_INT(int, _AP_SIZE_int, true) -AF_REF_REL_OPS_WITH_INT(unsigned int, _AP_SIZE_int, false) -AF_REF_REL_OPS_WITH_INT(long, _AP_SIZE_long, true) -AF_REF_REL_OPS_WITH_INT(unsigned long, _AP_SIZE_long, false) -AF_REF_REL_OPS_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) -AF_REF_REL_OPS_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef AF_REF_REL_OP_INT -#undef AF_REF_REL_OPS_WITH_INT - -#define AF_REF_REL_OP_WITH_AP_INT(REL_OP) \ - template \ - INLINE bool operator REL_OP( \ - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - const ap_int_base<_AP_W2, _AP_S>& op2) { \ - return ap_int_base<_AP_W, false>(op) REL_OP op2; \ - } \ - template \ - INLINE bool operator REL_OP( \ - const ap_int_base<_AP_W2, _AP_S2>& op2, \ - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return op2 REL_OP ap_int_base<_AP_W, false>(op); \ - } \ - template \ - INLINE bool operator REL_OP( \ - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op, \ - const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - return ap_int_base<1, false>(op) REL_OP op2; \ - } \ - template \ - INLINE bool operator REL_OP( \ - const ap_int_base<_AP_W2, _AP_S2>& op2, \ - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N>& op) { \ - return op2 REL_OP ap_int_base<1, false>(op); \ - } - -AF_REF_REL_OP_WITH_AP_INT(>) -AF_REF_REL_OP_WITH_AP_INT(<) -AF_REF_REL_OP_WITH_AP_INT(>=) -AF_REF_REL_OP_WITH_AP_INT(<=) -AF_REF_REL_OP_WITH_AP_INT(==) -AF_REF_REL_OP_WITH_AP_INT(!=) - -#endif // ifndef __cplusplus - -#endif // ifndef __AP_FIXED_REF_H__ - -// -*- cpp -*- diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed_special.h b/hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed_special.h deleted file mode 100644 index 0f7a9f7..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/ap_fixed_special.h +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_FIXED_SPECIAL_H__ -#define __AP_FIXED_SPECIAL_H__ - -#ifndef __AP_FIXED_H__ -#error "Only ap_fixed.h and ap_int.h can be included directly in user code." -#endif - -#ifndef __SYNTHESIS__ -#include -#include -#endif -// FIXME AP_AUTOCC cannot handle many standard headers, so declare instead of -// include. -// #include -namespace std { -template class complex; -} - -/* - TODO: Modernize the code using C++11/C++14 - 1. constexpr http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0415r0.html - 2. move constructor -*/ - -namespace std { -/* - Specialize std::complex to zero initialization ap_fixed. - - To reduce the area cost, ap_fixed is not zero initialized, just like basic - types float or double. However, libstdc++ provides specialization for float, - double and long double, initializing image part to 0 when not specified. - - This has become a difficulty in switching legacy code from these C types to - ap_fixed. To ease the tranform of legacy code, we have to implement - specialization of std::complex<> for our type. - - As ap_fixed is a template, it is impossible to specialize only the methods - that causes default initialization of value type in std::complex<>. An - explicit full specialization of the template class has to be done, covering - all the member functions and operators of std::complex<> as specified - in standard 26.2.4 and 26.2.5. -*/ -template -class complex > { - public: - typedef ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> _Tp; - typedef _Tp value_type; - - // 26.2.4/1 - // Constructor without argument - // Default initialize, so that in dataflow, the variable is only written once. - complex() : _M_real(_Tp()), _M_imag(_Tp()) {} - // Constructor with ap_fixed. - // Zero initialize image part when not specified, so that `C(1) == C(1,0)` - complex(const _Tp &__r, const _Tp &__i = _Tp(0)) - : _M_real(__r), _M_imag(__i) {} - - // Constructor with another complex number - template - complex(const complex<_Up> &__z) : _M_real(__z.real()), _M_imag(__z.imag()) {} - -#if __cplusplus >= 201103L - const _Tp& real() const { return _M_real; } - const _Tp& imag() const { return _M_imag; } -#else - _Tp& real() { return _M_real; } - const _Tp& real() const { return _M_real; } - _Tp& imag() { return _M_imag; } - const _Tp& imag() const { return _M_imag; } -#endif - - void real(_Tp __val) { _M_real = __val; } - - void imag(_Tp __val) { _M_imag = __val; } - - // Assign this complex number with ap_fixed. - // Zero initialize image poarrt, so that `C c; c = 1; c == C(1,0);` - complex<_Tp> &operator=(const _Tp __t) { - _M_real = __t; - _M_imag = _Tp(0); - return *this; - } - - // 26.2.5/1 - // Add ap_fixed to this complex number. - complex<_Tp> &operator+=(const _Tp &__t) { - _M_real += __t; - return *this; - } - - // 26.2.5/3 - // Subtract ap_fixed from this complex number. - complex<_Tp> &operator-=(const _Tp &__t) { - _M_real -= __t; - return *this; - } - - // 26.2.5/5 - // Multiply this complex number by ap_fixed. - complex<_Tp> &operator*=(const _Tp &__t) { - _M_real *= __t; - _M_imag *= __t; - return *this; - } - - // 26.2.5/7 - // Divide this complex number by ap_fixed. - complex<_Tp> &operator/=(const _Tp &__t) { - _M_real /= __t; - _M_imag /= __t; - return *this; - } - - // Assign complex number to this complex number. - template - complex<_Tp> &operator=(const complex<_Up> &__z) { - _M_real = __z.real(); - _M_imag = __z.imag(); - return *this; - } - - // 26.2.5/9 - // Add complex number to this. - template - complex<_Tp> &operator+=(const complex<_Up> &__z) { - _M_real += __z.real(); - _M_imag += __z.imag(); - return *this; - } - - // 26.2.5/11 - // Subtract complex number from this. - template - complex<_Tp> &operator-=(const complex<_Up> &__z) { - _M_real -= __z.real(); - _M_imag -= __z.imag(); - return *this; - } - - // 26.2.5/13 - // Multiply this by complex number. - template - complex<_Tp> &operator*=(const complex<_Up> &__z) { - const _Tp __r = _M_real * __z.real() - _M_imag * __z.imag(); - _M_imag = _M_real * __z.imag() + _M_imag * __z.real(); - _M_real = __r; - return *this; - } - - // 26.2.5/15 - // Divide this by complex number. - template - complex<_Tp> &operator/=(const complex<_Up> &__z) { - complex<_Tp> cj (__z.real(), -__z.imag()); - complex<_Tp> a = (*this) * cj; - complex<_Tp> b = cj * __z; - _M_real = a.real() / b.real(); - _M_imag = a.imag() / b.real(); - return *this; - } - - private: - _Tp _M_real; - _Tp _M_imag; - -}; // class complex > - -/* - Non-member operations - These operations are not required by standard in 26.2.6, but libstdc++ - defines them for - float, double or long double's specialization. -*/ -// Compare complex number with ap_fixed. -template -inline bool operator==( - const complex > &__x, - const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> &__y) { - return __x.real() == __y && - __x.imag() == 0; -} - -// Compare ap_fixed with complex number. -template -inline bool operator==( - const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> &__x, - const complex > &__y) { - return __x == __y.real() && - 0 == __y.imag(); -} - -// Compare complex number with ap_fixed. -template -inline bool operator!=( - const complex > &__x, - const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> &__y) { - return __x.real() != __y || - __x.imag() != 0; -} - -// Compare ap_fixed with complex number. -template -inline bool operator!=( - const ap_fixed<_AP_W, _AP_I, _AP_Q, _AP_O, _AP_N> &__x, - const complex > &__y) { - return __x != __y.real() || - 0 != __y.imag(); -} - -} // namespace std - -#endif // ifndef __AP_FIXED_SPECIAL_H__ - -// -*- cpp -*- diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/ap_int.h b/hls4ml/hls4ml/templates/vivado/ap_types/ap_int.h deleted file mode 100644 index db3044d..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/ap_int.h +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_INT_H__ -#define __AP_INT_H__ - -#include -#include -#include - -//--------------------------------------------------------------- - -/// Sign Arbitrary Precision Type. -template -struct ap_int : ap_int_base<_AP_W, true> { - typedef ap_int_base<_AP_W, true> Base; - // Constructor - INLINE ap_int() : Base() {} - - // Copy ctor - INLINE ap_int(const ap_int& op) { Base::V = op.V; } - - template - INLINE ap_int(const ap_int<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_int(const volatile ap_int<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_int(const ap_uint<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_int(const volatile ap_uint<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_int(const ap_range_ref<_AP_W2, _AP_S2>& ref) : Base(ref) {} - - template - INLINE ap_int(const ap_bit_ref<_AP_W2, _AP_S2>& ref) : Base(ref) {} - - template - INLINE ap_int(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) - : Base(ref) {} - - template - INLINE ap_int(const ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>)op) {} - - template - INLINE ap_int(const ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>)op) { - } - - template - INLINE ap_int( - const volatile ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>)op) {} - - template - INLINE ap_int( - const volatile ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>)op) { - } - - template - INLINE ap_int(const ap_int_base<_AP_W2, _AP_S2>& op) { - Base::V = op.V; - } - - template - INLINE ap_int( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - - template - INLINE ap_int( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - - template - INLINE ap_int( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - -#define CTOR(TYPE) \ - INLINE ap_int(TYPE val) { Base::V = val; } - CTOR(bool) - CTOR(char) - CTOR(signed char) - CTOR(unsigned char) - CTOR(short) - CTOR(unsigned short) - CTOR(int) - CTOR(unsigned int) - CTOR(long) - CTOR(unsigned long) - CTOR(ap_slong) - CTOR(ap_ulong) -#undef CTOR - ap_int(double val) : Base(val) {} - ap_int(float val) : Base(val) {} -#if _AP_ENABLE_HALF_ == 1 - ap_int(half val) : Base(val) {} -#endif - - // ap_int_base will guess radix if radix is not provided. - INLINE ap_int(const char* s) : Base(s) {} - - INLINE ap_int(const char* s, signed char rd) : Base(s, rd) {} - - // Assignment - /* ctor will be used when right is not of proper type. */ - - INLINE ap_int& operator=(const ap_int<_AP_W>& op2) { - Base::V = op2.V; - return *this; - } - - /* cannot bind volatile reference to non-volatile type. */ - INLINE ap_int& operator=(const volatile ap_int<_AP_W>& op2) { - Base::V = op2.V; - return *this; - } - - /* cannot return volatile *this. */ - INLINE void operator=(const ap_int<_AP_W>& op2) volatile { Base::V = op2.V; } - - INLINE void operator=(const volatile ap_int<_AP_W>& op2) volatile { - Base::V = op2.V; - } - -}; // struct ap_int. - -//--------------------------------------------------------------- - -/// Unsigned Arbitrary Precision Type. -template -struct ap_uint : ap_int_base<_AP_W, false> { - typedef ap_int_base<_AP_W, false> Base; - // Constructor - INLINE ap_uint() : Base() {} - - // Copy ctor - INLINE ap_uint(const ap_uint& op) { Base::V = op.V; } - - template - INLINE ap_uint(const ap_uint<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_uint(const ap_int<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_uint(const volatile ap_uint<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_uint(const volatile ap_int<_AP_W2>& op) { - Base::V = op.V; - } - - template - INLINE ap_uint(const ap_range_ref<_AP_W2, _AP_S2>& ref) : Base(ref) {} - - template - INLINE ap_uint(const ap_bit_ref<_AP_W2, _AP_S2>& ref) : Base(ref) {} - - template - INLINE ap_uint(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) - : Base(ref) {} - - template - INLINE ap_uint(const ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>)op) {} - - template - INLINE ap_uint(const ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>)op) { - } - - template - INLINE ap_uint( - const volatile ap_fixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, true, _AP_Q2, _AP_O2, _AP_N2>)op) {} - - template - INLINE ap_uint( - const volatile ap_ufixed<_AP_W2, _AP_I2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base((ap_fixed_base<_AP_W2, _AP_I2, false, _AP_Q2, _AP_O2, _AP_N2>)op) { - } - - template - INLINE ap_uint(const ap_int_base<_AP_W2, _AP_S2>& op) { - Base::V = op.V; - } - - template - INLINE ap_uint( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - - template - INLINE ap_uint( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - - template - INLINE ap_uint( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) - : Base(op) {} - -#define CTOR(TYPE) \ - INLINE ap_uint(TYPE val) { Base::V = val; } - CTOR(bool) - CTOR(char) - CTOR(signed char) - CTOR(unsigned char) - CTOR(short) - CTOR(unsigned short) - CTOR(int) - CTOR(unsigned int) - CTOR(long) - CTOR(unsigned long) - CTOR(ap_slong) - CTOR(ap_ulong) -#undef CTOR - ap_uint(double val) : Base(val) {} - ap_uint(float val) : Base(val) {} -#if _AP_ENABLE_HALF_ == 1 - ap_uint(half val) : Base(val) {} -#endif - - // ap_int_base will guess radix if radix is not provided. - INLINE ap_uint(const char* s) : Base(s) {} - - INLINE ap_uint(const char* s, signed char rd) : Base(s, rd) {} - - // Assignment - /* XXX ctor will be used when right is not of proper type. */ - - INLINE ap_uint& operator=(const ap_uint<_AP_W>& op2) { - Base::V = op2.V; - return *this; - } - - /* cannot bind volatile reference to non-volatile type. */ - INLINE ap_uint& operator=(const volatile ap_uint<_AP_W>& op2) { - Base::V = op2.V; - return *this; - } - - /* cannot return volatile *this. */ - INLINE void operator=(const ap_uint<_AP_W>& op2) volatile { Base::V = op2.V; } - - INLINE void operator=(const volatile ap_uint<_AP_W>& op2) volatile { - Base::V = op2.V; - } - -}; // struct ap_uint. - -#define ap_bigint ap_int -#define ap_biguint ap_uint - -#if !defined(__SYNTHESIS__) && (defined(SYSTEMC_H) || defined(SYSTEMC_INCLUDED)) -// XXX sc_trace overload for ap_fixed is already included in -// "ap_sysc/ap_sc_extras.h", so do not define in synthesis. -template -INLINE void sc_trace(sc_core::sc_trace_file* tf, const ap_int<_AP_W>& op, - const std::string& name) { - if (tf) tf->trace(sc_dt::sc_lv<_AP_W>(op.to_string(2).c_str()), name); -} - -template -INLINE void sc_trace(sc_core::sc_trace_file* tf, const ap_uint<_AP_W>& op, - const std::string& name) { - if (tf) tf->trace(sc_dt::sc_lv<_AP_W>(op.to_string(2).c_str()), name); -} -#endif // System C sim - -#include - -#endif // ifndef __AP_INT_H__ else - -// FIXME user should include ap_fixed.h when using ap_fixed. -// to avoid circular inclusion, must check whether this is required by -// ap_fixed.h -#ifndef __AP_FIXED_H__ -#include -#endif - -// -*- cpp -*- diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/ap_int_base.h b/hls4ml/hls4ml/templates/vivado/ap_types/ap_int_base.h deleted file mode 100644 index 091552a..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/ap_int_base.h +++ /dev/null @@ -1,1885 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_INT_BASE_H__ -#define __AP_INT_BASE_H__ - -#ifndef __AP_INT_H__ -#error "Only ap_fixed.h and ap_int.h can be included directly in user code." -#endif - -#ifndef __cplusplus -#error "C++ is required to include this header file" -#else - -#include -#ifndef __SYNTHESIS__ -#if _AP_ENABLE_HALF_ == 1 -#include -#endif -#include -#include -#endif - -/* ---------------------------------------------------------------- - * ap_int_base: AutoPilot integer/Arbitrary precision integer. - * ---------------------------------------------------------------- - */ - -/* helper trait. Selecting the smallest C type that can hold the value, - * return 64 bit C type if not possible. - */ -template -struct retval; - -// at least 64 bit -template -struct retval<_AP_N, true> { - typedef ap_slong Type; -}; - -template -struct retval<_AP_N, false> { - typedef ap_ulong Type; -}; - -// at least 8 bit -template <> -struct retval<1, true> { - typedef signed char Type; -}; - -template <> -struct retval<1, false> { - typedef unsigned char Type; -}; - -// at least 16 bit -template <> -struct retval<2, true> { - typedef short Type; -}; - -template <> -struct retval<2, false> { - typedef unsigned short Type; -}; - -// at least 32 bit -template <> -struct retval<3, true> { - typedef long Type; -}; - -template <> -struct retval<3, false> { - typedef unsigned long Type; -}; - -template <> -struct retval<4, true> { - typedef long Type; -}; - -template <> -struct retval<4, false> { - typedef unsigned long Type; -}; - -// trait for letting base class to return derived class. -// Notice that derived class template is incomplete, and we cannot use -// the member of the derived class. -template -struct _ap_int_factory; -template -struct _ap_int_factory<_AP_W2,true> { typedef ap_int<_AP_W2> type; }; -template -struct _ap_int_factory<_AP_W2,false> { typedef ap_uint<_AP_W2> type; }; - -template -struct ap_int_base : public _AP_ROOT_TYPE<_AP_W, _AP_S> { - public: - typedef _AP_ROOT_TYPE<_AP_W, _AP_S> Base; - - /* ap_int_base<_AP_W, _AP_S, true> - * typedef typename retval<(_AP_W + 7) / 8, _AP_S>::Type RetType; - * - * ap_int_base<_AP_W, _AP_S, false> - * typedef typename retval<8, _AP_S>::Type RetType; - */ - typedef typename retval::Type RetType; - - static const int width = _AP_W; - - template - struct RType { - enum { - mult_w = _AP_W + _AP_W2, - mult_s = _AP_S || _AP_S2, - plus_w = - AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, - plus_s = _AP_S || _AP_S2, - minus_w = - AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, - minus_s = true, - div_w = _AP_W + _AP_S2, - div_s = _AP_S || _AP_S2, - mod_w = AP_MIN(_AP_W, _AP_W2 + (!_AP_S2 && _AP_S)), - mod_s = _AP_S, - logic_w = AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)), - logic_s = _AP_S || _AP_S2 - }; - - - typedef ap_int_base mult_base; - typedef ap_int_base plus_base; - typedef ap_int_base minus_base; - typedef ap_int_base logic_base; - typedef ap_int_base div_base; - typedef ap_int_base mod_base; - typedef ap_int_base<_AP_W, _AP_S> arg1_base; - - typedef typename _ap_int_factory::type mult; - typedef typename _ap_int_factory::type plus; - typedef typename _ap_int_factory::type minus; - typedef typename _ap_int_factory::type logic; - typedef typename _ap_int_factory::type div; - typedef typename _ap_int_factory::type mod; - typedef typename _ap_int_factory<_AP_W, _AP_S>::type arg1; - typedef bool reduce; - }; - - /* Constructors. - * ---------------------------------------------------------------- - */ - /// default ctor - INLINE ap_int_base() { - /* - #ifdef __SC_COMPATIBLE__ - Base::V = 0; - #endif - */ - } - - /// copy ctor - template - INLINE ap_int_base(const ap_int_base<_AP_W2, _AP_S2>& op) { - Base::V = op.V; - } - - /// volatile copy ctor - template - INLINE ap_int_base(const volatile ap_int_base<_AP_W2, _AP_S2>& op) { - Base::V = op.V; - } - -// XXX C++11 feature. -// The explicit specifier specifies that a constructor or conversion function -// (since C++11) doesn't allow implicit conversions or copy-initialization. -// ap_int_base x = 1; -// ap_int_base foo() { return 1; } -// but allows -// ap_int_base x(1); -// ap_int_base y {1}; - -/// from all c types. -#define CTOR_FROM_INT(Type, Size, Signed) \ - INLINE ap_int_base(const Type op) { Base::V = op; } - - CTOR_FROM_INT(bool, 1, false) - CTOR_FROM_INT(char, 8, CHAR_IS_SIGNED) - CTOR_FROM_INT(signed char, 8, true) - CTOR_FROM_INT(unsigned char, 8, false) - CTOR_FROM_INT(short, _AP_SIZE_short, true) - CTOR_FROM_INT(unsigned short, _AP_SIZE_short, false) - CTOR_FROM_INT(int, _AP_SIZE_int, true) - CTOR_FROM_INT(unsigned int, _AP_SIZE_int, false) - CTOR_FROM_INT(long, _AP_SIZE_long, true) - CTOR_FROM_INT(unsigned long, _AP_SIZE_long, false) - CTOR_FROM_INT(ap_slong, _AP_SIZE_ap_slong, true) - CTOR_FROM_INT(ap_ulong, _AP_SIZE_ap_slong, false) -#undef CTOR_FROM_INT - -#if _AP_ENABLE_HALF_ == 1 - /// ctor from half. - // TODO optimize - INLINE ap_int_base(half op) { - ap_int_base<_AP_W, _AP_S> t((float)op); - Base::V = t.V; - } -#endif - - /// ctor from float. - INLINE ap_int_base(float op) { - const int BITS = FLOAT_MAN + FLOAT_EXP + 1; - ap_int_base reg; - reg.V = floatToRawBits(op); - bool is_neg = _AP_ROOT_op_get_bit(reg.V, BITS - 1); - - ap_int_base exp = 0; - exp.V = _AP_ROOT_op_get_range(reg.V, FLOAT_MAN, BITS - 2); - exp = exp - FLOAT_BIAS; - - ap_int_base man; - man.V = _AP_ROOT_op_get_range(reg.V, 0, FLOAT_MAN - 1); - // check for NaN - _AP_WARNING(exp == ((unsigned char)(FLOAT_BIAS + 1)) && man.V != 0, - "assign NaN to ap integer value"); - // set leading 1. - man.V = _AP_ROOT_op_set_bit(man.V, FLOAT_MAN, 1); - //if (is_neg) man = -man; - - if ((reg.V & 0x7ffffffful) == 0) { - Base::V = 0; - } else { - int sh_amt = FLOAT_MAN - exp.V; - if (sh_amt == 0) { - Base::V = man.V; - } else if (sh_amt > 0) { - if (sh_amt < FLOAT_MAN + 2) { - Base::V = man.V >> sh_amt; - } else { - if (is_neg) - Base::V = -1; - else - Base::V = 0; - } - } else { - sh_amt = -sh_amt; - if (sh_amt < _AP_W) { - Base::V = man.V; - Base::V <<= sh_amt; - } else { - Base::V = 0; - } - } - } - if (is_neg) *this = -(*this); - } - - /// ctor from double. - INLINE ap_int_base(double op) { - const int BITS = DOUBLE_MAN + DOUBLE_EXP + 1; - ap_int_base reg; - reg.V = doubleToRawBits(op); - bool is_neg = _AP_ROOT_op_get_bit(reg.V, BITS - 1); - - ap_int_base exp = 0; - exp.V = _AP_ROOT_op_get_range(reg.V, DOUBLE_MAN, BITS - 2); - exp = exp - DOUBLE_BIAS; - - ap_int_base man; - man.V = _AP_ROOT_op_get_range(reg.V, 0, DOUBLE_MAN - 1); - // check for NaN - _AP_WARNING(exp == ((unsigned char)(DOUBLE_BIAS + 1)) && man.V != 0, - "assign NaN to ap integer value"); - // set leading 1. - man.V = _AP_ROOT_op_set_bit(man.V, DOUBLE_MAN, 1); - //if (is_neg) man = -man; - - if ((reg.V & 0x7fffffffffffffffull) == 0) { - Base::V = 0; - } else { - int sh_amt = DOUBLE_MAN - exp.V; - if (sh_amt == 0) { - Base::V = man.V; - } else if (sh_amt > 0) { - if (sh_amt < DOUBLE_MAN + 2) { - Base::V = man.V >> sh_amt; - } else { - if (is_neg) - Base::V = -1; - else - Base::V = 0; - } - } else { - sh_amt = -sh_amt; - if (sh_amt < _AP_W) { - Base::V = man.V; - Base::V <<= sh_amt; - } else { - Base::V = 0; - } - } - } - if (is_neg) *this = -(*this); - } - - /// from higer rank type. - template - INLINE ap_int_base( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - Base::V = op.to_ap_int_base().V; - } - - template - INLINE ap_int_base(const ap_range_ref<_AP_W2, _AP_S2>& ref) { - Base::V = (ref.get()).V; - } - - template - INLINE ap_int_base(const ap_bit_ref<_AP_W2, _AP_S2>& ref) { - Base::V = ref.operator bool(); - } - - template - INLINE ap_int_base(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) { - const ap_int_base::_AP_WR, - false> - tmp = ref.get(); - Base::V = tmp.V; - } - - /* radix has default value in set */ - -#ifndef __SYNTHESIS__ - INLINE ap_int_base(const char* s, signed char rd = 0) { - if (rd == 0) - rd = guess_radix(s); - unsigned int length = strlen(s); - Base::V.fromString(s, length, rd); - } -#else - // XXX __builtin_bit_from_string(...) requires const C string and radix. - INLINE ap_int_base(const char* s) { - typeof(Base::V) t; - _ssdm_string2bits((void*)(&t), (const char*)(s), 10, _AP_W, _AP_S, - AP_TRN, AP_WRAP, 0, _AP_C99); - Base::V = t; - } - INLINE ap_int_base(const char* s, signed char rd) { - typeof(Base::V) t; - _ssdm_string2bits((void*)(&t), (const char*)(s), rd, _AP_W, _AP_S, - AP_TRN, AP_WRAP, 0, _AP_C99); - Base::V = t; - } -#endif - - template - INLINE ap_int_base( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - Base::V = (val.get()).V; - } - - template - INLINE ap_int_base( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - Base::V = val.operator bool(); - } - - INLINE ap_int_base read() volatile { - /*AP_DEBUG(printf("call read %d\n", Base::V););*/ - ap_int_base ret; - ret.V = Base::V; - return ret; - } - - INLINE void write(const ap_int_base<_AP_W, _AP_S>& op2) volatile { - /*AP_DEBUG(printf("call write %d\n", op2.V););*/ - Base::V = op2.V; - } - - /* Another form of "write".*/ - template - INLINE void operator=( - const volatile ap_int_base<_AP_W2, _AP_S2>& op2) volatile { - Base::V = op2.V; - } - - INLINE void operator=( - const volatile ap_int_base<_AP_W, _AP_S>& op2) volatile { - Base::V = op2.V; - } - - template - INLINE void operator=(const ap_int_base<_AP_W2, _AP_S2>& op2) volatile { - Base::V = op2.V; - } - - INLINE void operator=(const ap_int_base<_AP_W, _AP_S>& op2) volatile { - Base::V = op2.V; - } - - template - INLINE ap_int_base& operator=( - const volatile ap_int_base<_AP_W2, _AP_S2>& op2) { - Base::V = op2.V; - return *this; - } - - template - INLINE ap_int_base& operator=(const ap_int_base<_AP_W2, _AP_S2>& op2) { - Base::V = op2.V; - return *this; - } - - INLINE ap_int_base& operator=(const volatile ap_int_base<_AP_W, _AP_S>& op2) { - Base::V = op2.V; - return *this; - } - - INLINE ap_int_base& operator=(const ap_int_base<_AP_W, _AP_S>& op2) { - Base::V = op2.V; - return *this; - } - - -#define ASSIGN_OP_FROM_INT(Type, Size, Signed) \ - INLINE ap_int_base& operator=(Type op) { \ - Base::V = op; \ - return *this; \ - } - - ASSIGN_OP_FROM_INT(bool, 1, false) - ASSIGN_OP_FROM_INT(char, 8, CHAR_IS_SIGNED) - ASSIGN_OP_FROM_INT(signed char, 8, true) - ASSIGN_OP_FROM_INT(unsigned char, 8, false) - ASSIGN_OP_FROM_INT(short, _AP_SIZE_short, true) - ASSIGN_OP_FROM_INT(unsigned short, _AP_SIZE_short, false) - ASSIGN_OP_FROM_INT(int, _AP_SIZE_int, true) - ASSIGN_OP_FROM_INT(unsigned int, _AP_SIZE_int, false) - ASSIGN_OP_FROM_INT(long, _AP_SIZE_long, true) - ASSIGN_OP_FROM_INT(unsigned long, _AP_SIZE_long, false) - ASSIGN_OP_FROM_INT(ap_slong, _AP_SIZE_ap_slong, true) - ASSIGN_OP_FROM_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef ASSIGN_OP_FROM_INT - - template - INLINE ap_int_base& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& op2) { - Base::V = (bool)op2; - return *this; - } - - template - INLINE ap_int_base& operator=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - Base::V = (ap_int_base<_AP_W2, false>(op2)).V; - return *this; - } - - template - INLINE ap_int_base& operator=( - const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& op2) { - Base::V = op2.get().V; - return *this; - } - - template - INLINE ap_int_base& operator=( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - Base::V = op.to_ap_int_base().V; - return *this; - } - - template - INLINE ap_int_base& operator=( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - Base::V = (bool)op; - return *this; - } - - template - INLINE ap_int_base& operator=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& op) { - Base::V = ((const ap_int_base<_AP_W2, false>)(op)).V; - return *this; - } - - // FIXME: UG902 has clearly required user to use to_int() to convert to built-in - // types, but this implicit conversion is relied on in hls_cordic.h and hls_rsr.h. - // For example: - // int d_exp = fps_x.exp - fps_y.exp; - INLINE operator RetType() const { return (RetType)(Base::V); } - - /* Explicit conversions to C types. - * ---------------------------------------------------------------- - */ - INLINE bool to_bool() const { return (bool)(Base::V); } - INLINE char to_char() const { return (char)(Base::V); } - INLINE signed char to_schar() const { return (signed char)(Base::V); } - INLINE unsigned char to_uchar() const { return (unsigned char)(Base::V); } - INLINE short to_short() const { return (short)(Base::V); } - INLINE unsigned short to_ushort() const { return (unsigned short)(Base::V); } - INLINE int to_int() const { return (int)(Base::V); } - INLINE unsigned to_uint() const { return (unsigned)(Base::V); } - INLINE long to_long() const { return (long)(Base::V); } - INLINE unsigned long to_ulong() const { return (unsigned long)(Base::V); } - INLINE ap_slong to_int64() const { return (ap_slong)(Base::V); } - INLINE ap_ulong to_uint64() const { return (ap_ulong)(Base::V); } - INLINE float to_float() const { return (float)(Base::V); } - INLINE double to_double() const { return (double)(Base::V); } - - // TODO decide if user-defined conversion should be provided. -#if 0 - INLINE operator char() const { return (char)(Base::V); } - INLINE operator signed char() const { return (signed char)(Base::V); } - INLINE operator unsigned char() const { return (unsigned char)(Base::V); } - INLINE operator short() const { return (short)(Base::V); } - INLINE operator unsigned short() const { return (unsigned short)(Base::V); } - INLINE operator int() const { return (int)(Base::V); } - INLINE operator unsigned int () const { return (unsigned)(Base::V); } - INLINE operator long () const { return (long)(Base::V); } - INLINE operator unsigned long () const { return (unsigned long)(Base::V); } - INLINE operator ap_slong () { return (ap_slong)(Base::V); } - INLINE operator ap_ulong () { return (ap_ulong)(Base::V); } -#endif - - /* Helper methods. - ---------------------------------------------------------------- - */ - /* we cannot call a non-volatile function on a volatile instance. - * but calling a volatile function is ok. - * XXX deleted non-volatile version. - */ - INLINE int length() const volatile { return _AP_W; } - - /*Return true if the value of ap_int_base instance is zero*/ - INLINE bool iszero() const { return Base::V == 0; } - - /*Return true if the value of ap_int_base instance is zero*/ - INLINE bool is_zero() const { return Base::V == 0; } - - /* x < 0 */ - INLINE bool sign() const { - if (_AP_S && - _AP_ROOT_op_get_bit(Base::V, _AP_W - 1)) - return true; - else - return false; - } - - /* x[i] = 0 */ - INLINE void clear(int i) { - AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); - Base::V = _AP_ROOT_op_set_bit(Base::V, i, 0); - } - - /* x[i] = !x[i]*/ - INLINE void invert(int i) { - AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); - bool val = _AP_ROOT_op_get_bit(Base::V, i); - if (val) - Base::V = _AP_ROOT_op_set_bit(Base::V, i, 0); - else - Base::V = _AP_ROOT_op_set_bit(Base::V, i, 1); - } - - INLINE bool test(int i) const { - AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); - return _AP_ROOT_op_get_bit(Base::V, i); - } - - // Get self. For ap_concat_ref expansion. - INLINE ap_int_base& get() { return *this; } - - // Set the ith bit into 1 - INLINE void set(int i) { - AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); - Base::V = _AP_ROOT_op_set_bit(Base::V, i, 1); - } - - // Set the ith bit into v - INLINE void set(int i, bool v) { - AP_ASSERT(i >= 0 && i < _AP_W, "position out of range"); - Base::V = _AP_ROOT_op_set_bit(Base::V, i, v); - } - - // This is used for sc_lv and sc_bv, which is implemented by sc_uint - // Rotate an ap_int_base object n places to the left - INLINE ap_int_base& lrotate(int n) { - AP_ASSERT(n >= 0 && n < _AP_W, "shift value out of range"); - // TODO unify this. -#ifdef __SYNTHESIS__ - typeof(Base::V) l_p = Base::V << n; - typeof(Base::V) r_p = Base::V >> (_AP_W - n); - Base::V = l_p | r_p; -#else - Base::V.lrotate(n); -#endif - return *this; - } - - // This is used for sc_lv and sc_bv, which is implemented by sc_uint - // Rotate an ap_int_base object n places to the right - INLINE ap_int_base& rrotate(int n) { - AP_ASSERT(n >= 0 && n < _AP_W, "shift value out of range"); - // TODO unify this. -#ifdef __SYNTHESIS__ - typeof(Base::V) l_p = Base::V << (_AP_W - n); - typeof(Base::V) r_p = Base::V >> n; - Base::V = l_p | r_p; -#else - Base::V.rrotate(n); -#endif - return *this; - } - - // Reverse the contents of ap_int_base instance. - // I.e. LSB becomes MSB and vise versa. - INLINE ap_int_base& reverse() { - Base::V = _AP_ROOT_op_get_range(Base::V, _AP_W - 1, 0); - return *this; - } - - // Set the ith bit into v - INLINE void set_bit(int i, bool v) { - Base::V = _AP_ROOT_op_set_bit(Base::V, i, v); - } - - // Get the value of ith bit - INLINE bool get_bit(int i) const { - return (bool)_AP_ROOT_op_get_bit(Base::V, i); - } - - // complements every bit - INLINE void b_not() { Base::V = ~Base::V; } - -#define OP_ASSIGN_AP(Sym) \ - template \ - INLINE ap_int_base& operator Sym(const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - Base::V Sym op2.V; \ - return *this; \ - } - - /* Arithmetic assign. - * ---------------------------------------------------------------- - */ - OP_ASSIGN_AP(*=) - OP_ASSIGN_AP(+=) - OP_ASSIGN_AP(-=) - OP_ASSIGN_AP(/=) - OP_ASSIGN_AP(%=) -#undef OP_ASSIGN_AP - - /* Bitwise assign: and, or, xor. - * ---------------------------------------------------------------- - */ -#define OP_ASSIGN_AP_CHK(Sym) \ - template \ - INLINE ap_int_base& operator Sym(const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - _AP_WARNING((_AP_W != _AP_W2), \ - "Bitsize mismatch for ap_[u]int" #Sym "ap_[u]int."); \ - Base::V Sym op2.V; \ - return *this; \ - } - OP_ASSIGN_AP_CHK(&=) - OP_ASSIGN_AP_CHK(|=) - OP_ASSIGN_AP_CHK(^=) -#undef OP_ASSIGN_AP_CHK - - /* Prefix increment, decrement. - * ---------------------------------------------------------------- - */ - INLINE ap_int_base& operator++() { - operator+=((ap_int_base<1, false>)1); - return *this; - } - INLINE ap_int_base& operator--() { - operator-=((ap_int_base<1, false>)1); - return *this; - } - - /* Postfix increment, decrement - * ---------------------------------------------------------------- - */ - INLINE const typename RType<_AP_W,_AP_S>::arg1 operator++(int) { - ap_int_base t = *this; - operator+=((ap_int_base<1, false>)1); - return t; - } - INLINE const typename RType<_AP_W,_AP_S>::arg1 operator--(int) { - ap_int_base t = *this; - operator-=((ap_int_base<1, false>)1); - return t; - } - - /* Unary arithmetic. - * ---------------------------------------------------------------- - */ - INLINE typename RType<_AP_W,_AP_S>::arg1 operator+() const { return *this; } - - // TODO used to be W>64 only... need check. - INLINE typename RType<1, false>::minus operator-() const { - return ap_int_base<1, false>(0) - *this; - } - - /* Not (!) - * ---------------------------------------------------------------- - */ - INLINE bool operator!() const { return Base::V == 0; } - - /* Bitwise (arithmetic) unary: complement - ---------------------------------------------------------------- - */ - // XXX different from Mentor's ac_int! - INLINE typename RType<_AP_W,_AP_S>::arg1 operator~() const { - ap_int_base<_AP_W, _AP_S> r; - r.V = ~Base::V; - return r; - } - - /* Shift (result constrained by left operand). - * ---------------------------------------------------------------- - */ - template - INLINE typename RType<_AP_W,_AP_S>::arg1 operator<<(const ap_int_base<_AP_W2, true>& op2) const { - bool isNeg = _AP_ROOT_op_get_bit(op2.V, _AP_W2 - 1); - ap_int_base<_AP_W2, false> sh = op2; - if (isNeg) { - sh = -op2; - return operator>>(sh); - } else - return operator<<(sh); - } - - template - INLINE typename RType<_AP_W,_AP_S>::arg1 operator<<(const ap_int_base<_AP_W2, false>& op2) const { - ap_int_base r; - r.V = Base::V << op2.to_uint(); - return r; - } - - template - INLINE typename RType<_AP_W,_AP_S>::arg1 operator>>(const ap_int_base<_AP_W2, true>& op2) const { - bool isNeg = _AP_ROOT_op_get_bit(op2.V, _AP_W2 - 1); - ap_int_base<_AP_W2, false> sh = op2; - if (isNeg) { - sh = -op2; - return operator<<(sh); - } - return operator>>(sh); - } - - template - INLINE typename RType<_AP_W,_AP_S>::arg1 operator>>(const ap_int_base<_AP_W2, false>& op2) const { - ap_int_base r; - r.V = Base::V >> op2.to_uint(); - return r; - } - - // FIXME we standalone operator>> for ap_int_base and ap_range_ref. -#if 0 - template - INLINE ap_int_base operator<<(const ap_range_ref<_AP_W2, _AP_S2>& op2) const { - return *this << (op2.operator ap_int_base<_AP_W2, false>()); - } - - template - INLINE ap_int_base operator>>(const ap_range_ref<_AP_W2, _AP_S2>& op2) const { - return *this >> (op2.operator ap_int_base<_AP_W2, false>()); - } -#endif - - /* Shift assign - * ---------------------------------------------------------------- - */ - template - INLINE ap_int_base& operator<<=(const ap_int_base<_AP_W2, true>& op2) { - bool isNeg = _AP_ROOT_op_get_bit(op2.V, _AP_W2 - 1); - ap_int_base<_AP_W2, false> sh = op2; - if (isNeg) { - sh = -op2; - return operator>>=(sh); - } else - return operator<<=(sh); - } - - template - INLINE ap_int_base& operator<<=(const ap_int_base<_AP_W2, false>& op2) { - Base::V <<= op2.to_uint(); - return *this; - } - - template - INLINE ap_int_base& operator>>=(const ap_int_base<_AP_W2, true>& op2) { - bool isNeg = _AP_ROOT_op_get_bit(op2.V, _AP_W2 - 1); - ap_int_base<_AP_W2, false> sh = op2; - if (isNeg) { - sh = -op2; - return operator<<=(sh); - } - return operator>>=(sh); - } - - template - INLINE ap_int_base& operator>>=(const ap_int_base<_AP_W2, false>& op2) { - Base::V >>= op2.to_uint(); - return *this; - } - - // FIXME we standalone operator>> for ap_int_base and ap_range_ref. -#if 0 - template - INLINE ap_int_base& operator<<=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return *this <<= (op2.operator ap_int_base<_AP_W2, false>()); - } - template - INLINE ap_int_base& operator>>=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return *this >>= (op2.operator ap_int_base<_AP_W2, false>()); - } -#endif - - /* Equality and Relational. - * ---------------------------------------------------------------- - */ - template - INLINE bool operator==(const ap_int_base<_AP_W2, _AP_S2>& op2) const { - return Base::V == op2.V; - } - template - INLINE bool operator!=(const ap_int_base<_AP_W2, _AP_S2>& op2) const { - return !(Base::V == op2.V); - } - template - INLINE bool operator<(const ap_int_base<_AP_W2, _AP_S2>& op2) const { - return Base::V < op2.V; - } - template - INLINE bool operator>=(const ap_int_base<_AP_W2, _AP_S2>& op2) const { - return Base::V >= op2.V; - } - template - INLINE bool operator>(const ap_int_base<_AP_W2, _AP_S2>& op2) const { - return Base::V > op2.V; - } - template - INLINE bool operator<=(const ap_int_base<_AP_W2, _AP_S2>& op2) const { - return Base::V <= op2.V; - } - - /* Bit and Part Select - * ---------------------------------------------------------------- - */ - INLINE ap_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) { - _AP_ERROR(Hi >= _AP_W, "Hi(%d)out of bound(%d) in range()", Hi, _AP_W); - _AP_ERROR(Lo >= _AP_W, "Lo(%d)out of bound(%d) in range()", Lo, _AP_W); - return ap_range_ref<_AP_W, _AP_S>(this, Hi, Lo); - } - - // This is a must to strip constness to produce reference type. - INLINE ap_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) const { - _AP_ERROR(Hi >= _AP_W, "Hi(%d)out of bound(%d) in range()", Hi, _AP_W); - _AP_ERROR(Lo >= _AP_W, "Lo(%d)out of bound(%d) in range()", Lo, _AP_W); - return ap_range_ref<_AP_W, _AP_S>(const_cast(this), Hi, Lo); - } - - template - INLINE ap_range_ref<_AP_W, _AP_S> range( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - template - INLINE ap_range_ref<_AP_W, _AP_S> range( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) const { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - INLINE ap_range_ref<_AP_W, _AP_S> range() { - return this->range(_AP_W - 1, 0); - } - - INLINE ap_range_ref<_AP_W, _AP_S> range() const { - return this->range(_AP_W - 1, 0); - } - - INLINE ap_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) { - return this->range(Hi, Lo); - } - - INLINE ap_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) const { - return this->range(Hi, Lo); - } - - template - INLINE ap_range_ref<_AP_W, _AP_S> operator()( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - template - INLINE ap_range_ref<_AP_W, _AP_S> operator()( - const ap_int_base<_AP_W2, _AP_S2>& HiIdx, - const ap_int_base<_AP_W3, _AP_S3>& LoIdx) const { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - -#if 0 - template - INLINE ap_int_base slice() const { - AP_ASSERT(Hi >= Lo && Hi < _AP_W && Lo < _AP_W, "Out of bounds in slice()"); - ap_int_base tmp ; - tmp.V = _AP_ROOT_op_get_range(Base::V, Lo, Hi); - return tmp; - } - - INLINE ap_bit_ref<_AP_W,_AP_S> operator [] ( unsigned int uindex) { - AP_ASSERT(uindex < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W,_AP_S> bvh( this, uindex ); - return bvh; - } -#endif - - INLINE ap_bit_ref<_AP_W, _AP_S> operator[](int index) { - AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W, _AP_S> bvh(this, index); - return bvh; - } - - template - INLINE ap_bit_ref<_AP_W, _AP_S> operator[]( - const ap_int_base<_AP_W2, _AP_S2>& index) { - AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W, _AP_S> bvh(this, index.to_int()); - return bvh; - } - - INLINE bool operator[](int index) const { - AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W, _AP_S> br(this, index); - return br.to_bool(); - } - template - INLINE bool operator[](const ap_int_base<_AP_W2, _AP_S2>& index) const { - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W, _AP_S> br(this, index.to_int()); - return br.to_bool(); - } - - INLINE ap_bit_ref<_AP_W, _AP_S> bit(int index) { - AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W, _AP_S> bvh(this, index); - return bvh; - } - template - INLINE ap_bit_ref<_AP_W, _AP_S> bit( - const ap_int_base<_AP_W2, _AP_S2>& index) { - AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W, _AP_S> bvh(this, index.to_int()); - return bvh; - } - - INLINE bool bit(int index) const { - AP_ASSERT(index >= 0, "Attempting to read bit with negative index"); - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W, _AP_S> br(this, index); - return br.to_bool(); - } - - template - INLINE bool bit(const ap_int_base<_AP_W2, _AP_S2>& index) const { - return bit(index.to_int()); - } - -#if 0 - template - INLINE bool operator[](_AP_T index) const { - AP_ASSERT(index < _AP_W, "Attempting to read bit beyond MSB"); - ap_bit_ref<_AP_W,_AP_S> br = operator[](index); - return br.to_bool(); - } -#endif - - // Count the number of zeros from the most significant bit - // to the first one bit. - INLINE int countLeadingZeros() { -#ifdef __SYNTHESIS__ - if (_AP_W <= 32) { - ap_int_base<32, false> t(-1UL), x; - x.V = _AP_ROOT_op_get_range(this->V, _AP_W - 1, 0); // reverse - t.V = _AP_ROOT_op_set_range(t.V, 0, _AP_W - 1, x.V); - return __builtin_ctz(t.V); // count trailing zeros. - } else if (_AP_W <= 64) { - ap_int_base<64, false> t(-1ULL); - ap_int_base<64, false> x; - x.V = _AP_ROOT_op_get_range(this->V, _AP_W - 1, 0); // reverse - t.V = _AP_ROOT_op_set_range(t.V, 0, _AP_W - 1, x.V); - return __builtin_ctzll(t.V); // count trailing zeros. - } else { - enum { __N = (_AP_W + 63) / 64 }; - int NZeros = 0; - int i = 0; - bool hitNonZero = false; - for (i = 0; i < __N - 1; ++i) { - ap_int_base<64, false> t; - t.V = _AP_ROOT_op_get_range(this->V, _AP_W - i * 64 - 64, _AP_W - i * 64 - 1); - NZeros += hitNonZero ? 0 : __builtin_clzll(t.V); // count leading zeros. - hitNonZero |= (t.V != 0); - } - if (!hitNonZero) { - ap_int_base<64, false> t(-1ULL); - enum { REST = (_AP_W - 1) % 64 }; - ap_int_base<64, false> x; - x.V = _AP_ROOT_op_get_range(this->V, 0, REST); - t.V = _AP_ROOT_op_set_range(t.V, 63 - REST, 63, x.V); - NZeros += __builtin_clzll(t.V); - } - return NZeros; - } -#else - return (Base::V).countLeadingZeros(); -#endif - } // countLeadingZeros - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - concat(const ap_int_base<_AP_W2, _AP_S2>& a2) const { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >( - const_cast&>(*this), - const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - concat(ap_int_base<_AP_W2, _AP_S2>& a2) { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >(*this, a2); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > - operator,(const ap_range_ref<_AP_W2, _AP_S2> &a2) const { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_range_ref<_AP_W2, _AP_S2> >( - const_cast&>(*this), - const_cast&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > - operator,(ap_range_ref<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_range_ref<_AP_W2, _AP_S2> >(*this, a2); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(const ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(ap_int_base<_AP_W2, _AP_S2> &a2) const { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >( - const_cast&>(*this), a2); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(const ap_int_base<_AP_W2, _AP_S2> &a2) const { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >( - const_cast&>(*this), - const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >(*this, a2); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, 1, ap_bit_ref<_AP_W2, _AP_S2> > - operator,(const ap_bit_ref<_AP_W2, _AP_S2> &a2) const { - return ap_concat_ref<_AP_W, ap_int_base, 1, ap_bit_ref<_AP_W2, _AP_S2> >( - const_cast&>(*this), - const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, 1, ap_bit_ref<_AP_W2, _AP_S2> > - operator,(ap_bit_ref<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_int_base, 1, ap_bit_ref<_AP_W2, _AP_S2> >( - *this, a2); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > - operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( - const_cast&>(*this), - const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_W, ap_int_base, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > - operator,(ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { - return ap_concat_ref<_AP_W, ap_int_base, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >(*this, - a2); - } - - template - INLINE ap_concat_ref< - _AP_W, ap_int_base, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> - &a2) const { - return ap_concat_ref< - _AP_W, ap_int_base, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( - const_cast&>(*this), - const_cast< - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); - } - - template - INLINE ap_concat_ref< - _AP_W, ap_int_base, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,(af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { - return ap_concat_ref< - _AP_W, ap_int_base, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, - a2); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_int_base, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> - &a2) const { - return ap_concat_ref< - _AP_W, ap_int_base, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( - const_cast&>(*this), - const_cast&>( - a2)); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_int_base, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,( - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { - return ap_concat_ref< - _AP_W, ap_int_base, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, a2); - } - - template - INLINE ap_int_base operator&( - const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { - return *this & a2.get(); - } - - template - INLINE ap_int_base operator|( - const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { - return *this | a2.get(); - } - - template - INLINE ap_int_base operator^( - const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { - return *this ^ a2.get(); - } - - template - INLINE void set(const ap_int_base<_AP_W3, false>& val) { - Base::V = val.V; - } - - /* Reduce operations. - * ---------------------------------------------------------------- - */ - // XXX non-const version deleted. - INLINE bool and_reduce() const { return _AP_ROOT_op_reduce(and, Base::V); } - INLINE bool nand_reduce() const { return _AP_ROOT_op_reduce(nand, Base::V); } - INLINE bool or_reduce() const { return _AP_ROOT_op_reduce(or, Base::V); } - INLINE bool nor_reduce() const { return !(_AP_ROOT_op_reduce(or, Base::V)); } - INLINE bool xor_reduce() const { return _AP_ROOT_op_reduce (xor, Base::V); } - INLINE bool xnor_reduce() const { - return !(_AP_ROOT_op_reduce (xor, Base::V)); - } - - /* Output as a string. - * ---------------------------------------------------------------- - */ -#ifndef __SYNTHESIS__ - std::string to_string(signed char rd = 2, bool sign = _AP_S) const { - // XXX in autosim/autowrap.tcl "(${name}).to_string(2).c_str()" is used to - // initialize sc_lv, which seems incapable of handling format "-0b". - if (rd == 2) sign = false; - return (Base::V).to_string(rd, sign); - } -#else - INLINE char* to_string(signed char rd = 2, bool sign = _AP_S) const { - return 0; - } -#endif -}; // struct ap_int_base - -// XXX apcc cannot handle global std::ios_base::Init() brought in by -#ifndef AP_AUTOCC -#ifndef __SYNTHESIS__ -template -INLINE std::ostream& operator<<(std::ostream& os, - const ap_int_base<_AP_W, _AP_S>& x) { - std::ios_base::fmtflags ff = std::cout.flags(); - if (ff & std::cout.hex) { - os << x.to_string(16); // don't print sign - } else if (ff & std::cout.oct) { - os << x.to_string(8); // don't print sign - } else { - os << x.to_string(10); - } - return os; -} -#endif // ifndef __SYNTHESIS__ - -#ifndef __SYNTHESIS__ -template -INLINE std::istream& operator>>(std::istream& in, - ap_int_base<_AP_W, _AP_S>& op) { - std::string str; - in >> str; - const std::ios_base::fmtflags basefield = in.flags() & std::ios_base::basefield; - unsigned radix = (basefield == std::ios_base::dec) ? 0 : ( - (basefield == std::ios_base::oct) ? 8 : ( - (basefield == std::ios_base::hex) ? 16 : 0)); - op = ap_int_base<_AP_W, _AP_S>(str.c_str(), radix); - return in; -} -#endif // ifndef __SYNTHESIS__ -#endif // ifndef AP_AUTOCC - -/* Operators with another ap_int_base. - * ---------------------------------------------------------------- - */ -#define OP_BIN_AP(Sym, Rty) \ - template \ - INLINE \ - typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W2, _AP_S2>::Rty \ - operator Sym(const ap_int_base<_AP_W, _AP_S>& op, \ - const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - typename ap_int_base<_AP_W, _AP_S>::template RType< \ - _AP_W2, _AP_S2>::Rty##_base lhs(op); \ - typename ap_int_base<_AP_W, _AP_S>::template RType< \ - _AP_W2, _AP_S2>::Rty##_base rhs(op2); \ - typename ap_int_base<_AP_W, _AP_S>::template RType< \ - _AP_W2, _AP_S2>::Rty##_base ret; \ - ret.V = lhs.V Sym rhs.V; \ - return ret; \ - } - -OP_BIN_AP(*, mult) -OP_BIN_AP(+, plus) -OP_BIN_AP(-, minus) -OP_BIN_AP(&, logic) -OP_BIN_AP(|, logic) -OP_BIN_AP(^, logic) - -#define OP_BIN_AP2(Sym, Rty) \ - template \ - INLINE \ - typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W2, _AP_S2>::Rty \ - operator Sym(const ap_int_base<_AP_W, _AP_S>& op, \ - const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - typename ap_int_base<_AP_W, _AP_S>::template RType< \ - _AP_W2, _AP_S2>::Rty##_base ret; \ - ret.V = op.V Sym op2.V; \ - return ret; \ - } - -OP_BIN_AP2(/, div) -OP_BIN_AP2(%, mod) - -// shift operators are defined inside class. -// compound assignment operators are defined inside class. - -/* Operators with a pointer type. - * ---------------------------------------------------------------- - * char a[100]; - * char* ptr = a; - * ap_int<2> n = 3; - * char* ptr2 = ptr + n*2; - * avoid ambiguous errors. - */ -#define OP_BIN_WITH_PTR(BIN_OP) \ - template \ - INLINE PTR_TYPE* operator BIN_OP(PTR_TYPE* i_op, \ - const ap_int_base<_AP_W, _AP_S>& op) { \ - ap_slong op2 = op.to_int64(); /* Not all implementation */ \ - return i_op BIN_OP op2; \ - } \ - template \ - INLINE PTR_TYPE* operator BIN_OP(const ap_int_base<_AP_W, _AP_S>& op, \ - PTR_TYPE* i_op) { \ - ap_slong op2 = op.to_int64(); /* Not all implementation */ \ - return op2 BIN_OP i_op; \ - } - -OP_BIN_WITH_PTR(+) -OP_BIN_WITH_PTR(-) - -/* Operators with a native floating point types. - * ---------------------------------------------------------------- - */ -// float OP ap_int -// when ap_int's width > 64, then trunc ap_int to ap_int<64> -#define OP_BIN_WITH_FLOAT(BIN_OP, C_TYPE) \ - template \ - INLINE C_TYPE operator BIN_OP(C_TYPE i_op, \ - const ap_int_base<_AP_W, _AP_S>& op) { \ - typename ap_int_base<_AP_W, _AP_S>::RetType op2 = op; \ - return i_op BIN_OP op2; \ - } \ - template \ - INLINE C_TYPE operator BIN_OP(const ap_int_base<_AP_W, _AP_S>& op, \ - C_TYPE i_op) { \ - typename ap_int_base<_AP_W, _AP_S>::RetType op2 = op; \ - return op2 BIN_OP i_op; \ - } - -#define ALL_OP_WITH_FLOAT(C_TYPE) \ - OP_BIN_WITH_FLOAT(*, C_TYPE) \ - OP_BIN_WITH_FLOAT(/, C_TYPE) \ - OP_BIN_WITH_FLOAT(+, C_TYPE) \ - OP_BIN_WITH_FLOAT(-, C_TYPE) - -#if _AP_ENABLE_HALF_ == 1 -ALL_OP_WITH_FLOAT(half) -#endif -ALL_OP_WITH_FLOAT(float) -ALL_OP_WITH_FLOAT(double) - -// TODO no shift? - -/* Operators with a native integral types. - * ---------------------------------------------------------------- - */ -// arithmetic and bitwise operators. -#define OP_BIN_WITH_INT(BIN_OP, C_TYPE, _AP_W2, _AP_S2, RTYPE) \ - template \ - INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W2, \ - _AP_S2>::RTYPE \ - operator BIN_OP(C_TYPE i_op, const ap_int_base<_AP_W, _AP_S>& op) { \ - return ap_int_base<_AP_W2, _AP_S2>(i_op) BIN_OP(op); \ - } \ - template \ - INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W2, \ - _AP_S2>::RTYPE \ - operator BIN_OP(const ap_int_base<_AP_W, _AP_S>& op, C_TYPE i_op) { \ - return op BIN_OP ap_int_base<_AP_W2, _AP_S2>(i_op); \ - } - -#define ALL_OP_BIN_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ - OP_BIN_WITH_INT(*, C_TYPE, _AP_W2, _AP_S2, mult) \ - OP_BIN_WITH_INT(+, C_TYPE, _AP_W2, _AP_S2, plus) \ - OP_BIN_WITH_INT(-, C_TYPE, _AP_W2, _AP_S2, minus) \ - OP_BIN_WITH_INT(/, C_TYPE, _AP_W2, _AP_S2, div) \ - OP_BIN_WITH_INT(%, C_TYPE, _AP_W2, _AP_S2, mod) \ - OP_BIN_WITH_INT(&, C_TYPE, _AP_W2, _AP_S2, logic) \ - OP_BIN_WITH_INT(|, C_TYPE, _AP_W2, _AP_S2, logic) \ - OP_BIN_WITH_INT(^, C_TYPE, _AP_W2, _AP_S2, logic) - -ALL_OP_BIN_WITH_INT(bool, 1, false) -ALL_OP_BIN_WITH_INT(char, 8, CHAR_IS_SIGNED) -ALL_OP_BIN_WITH_INT(signed char, 8, true) -ALL_OP_BIN_WITH_INT(unsigned char, 8, false) -ALL_OP_BIN_WITH_INT(short, _AP_SIZE_short, true) -ALL_OP_BIN_WITH_INT(unsigned short, _AP_SIZE_short, false) -ALL_OP_BIN_WITH_INT(int, _AP_SIZE_int, true) -ALL_OP_BIN_WITH_INT(unsigned int, _AP_SIZE_int, false) -ALL_OP_BIN_WITH_INT(long, _AP_SIZE_long, true) -ALL_OP_BIN_WITH_INT(unsigned long, _AP_SIZE_long, false) -ALL_OP_BIN_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) -ALL_OP_BIN_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef OP_BIN_WITH_INT -#undef ALL_OP_BIN_WITH_INT - -// shift operators. -#define ALL_OP_SHIFT_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W,_AP_S>::arg1 operator<<( \ - const ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ - ap_int_base<_AP_W, _AP_S> r; \ - if (_AP_S2) \ - r.V = op2 >= 0 ? (op.V << op2) : (op.V >> (-op2)); \ - else \ - r.V = op.V << op2; \ - return r; \ - } \ - template \ - INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W,_AP_S>::arg1 operator>>( \ - const ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ - ap_int_base<_AP_W, _AP_S> r; \ - if (_AP_S2) \ - r.V = op2 >= 0 ? (op.V >> op2) : (op.V << (-op2)); \ - else \ - r.V = op.V >> op2; \ - return r; \ - } - -ALL_OP_SHIFT_WITH_INT(char, 8, CHAR_IS_SIGNED) -ALL_OP_SHIFT_WITH_INT(signed char, 8, true) -ALL_OP_SHIFT_WITH_INT(short, _AP_SIZE_short, true) -ALL_OP_SHIFT_WITH_INT(int, _AP_SIZE_int, true) -ALL_OP_SHIFT_WITH_INT(long, _AP_SIZE_long, true) -ALL_OP_SHIFT_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) - -#undef ALL_OP_SHIFT_WITH_INT - -#define ALL_OP_SHIFT_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W,_AP_S>::arg1 operator<<( \ - const ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ - ap_int_base<_AP_W, _AP_S> r; \ - r.V = op.V << op2; \ - return r; \ - } \ - template \ - INLINE typename ap_int_base<_AP_W, _AP_S>::template RType<_AP_W,_AP_S>::arg1 operator>>( \ - const ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ - ap_int_base<_AP_W, _AP_S> r; \ - r.V = op.V >> op2; \ - return r; \ - } -ALL_OP_SHIFT_WITH_INT(bool, 1, false) -ALL_OP_SHIFT_WITH_INT(unsigned char, 8, false) -ALL_OP_SHIFT_WITH_INT(unsigned short, _AP_SIZE_short, false) -ALL_OP_SHIFT_WITH_INT(unsigned int, _AP_SIZE_int, false) -ALL_OP_SHIFT_WITH_INT(unsigned long, _AP_SIZE_long, false) -ALL_OP_SHIFT_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef ALL_OP_SHIFT_WITH_INT - -// compound assign operators. -#define OP_ASSIGN_WITH_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE ap_int_base<_AP_W, _AP_S>& operator ASSIGN_OP( \ - ap_int_base<_AP_W, _AP_S>& op, C_TYPE op2) { \ - return op ASSIGN_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ - } - -// TODO int a; ap_int<16> b; a += b; - -#define ALL_OP_ASSIGN_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(+=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(-=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(*=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(/=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(%=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(&=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(|=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(^=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(>>=, C_TYPE, _AP_W2, _AP_S2) \ - OP_ASSIGN_WITH_INT(<<=, C_TYPE, _AP_W2, _AP_S2) - -ALL_OP_ASSIGN_WITH_INT(bool, 1, false) -ALL_OP_ASSIGN_WITH_INT(char, 8, CHAR_IS_SIGNED) -ALL_OP_ASSIGN_WITH_INT(signed char, 8, true) -ALL_OP_ASSIGN_WITH_INT(unsigned char, 8, false) -ALL_OP_ASSIGN_WITH_INT(short, _AP_SIZE_short, true) -ALL_OP_ASSIGN_WITH_INT(unsigned short, _AP_SIZE_short, false) -ALL_OP_ASSIGN_WITH_INT(int, _AP_SIZE_int, true) -ALL_OP_ASSIGN_WITH_INT(unsigned int, _AP_SIZE_int, false) -ALL_OP_ASSIGN_WITH_INT(long, _AP_SIZE_long, true) -ALL_OP_ASSIGN_WITH_INT(unsigned long, _AP_SIZE_long, false) -ALL_OP_ASSIGN_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) -ALL_OP_ASSIGN_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef OP_ASSIGN_WITH_INT -#undef ALL_OP_ASSIGN_WITH_INT - -// equality and relational operators. -#define OP_REL_WITH_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE bool operator REL_OP(C_TYPE i_op, \ - const ap_int_base<_AP_W, _AP_S>& op) { \ - return ap_int_base<_AP_W2, _AP_S2>(i_op) REL_OP op; \ - } \ - template \ - INLINE bool operator REL_OP(const ap_int_base<_AP_W, _AP_S>& op, \ - C_TYPE op2) { \ - return op REL_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ - } - -#define ALL_OP_REL_WITH_INT(C_TYPE, _AP_W2, _AP_S2) \ - OP_REL_WITH_INT(>, C_TYPE, _AP_W2, _AP_S2) \ - OP_REL_WITH_INT(<, C_TYPE, _AP_W2, _AP_S2) \ - OP_REL_WITH_INT(>=, C_TYPE, _AP_W2, _AP_S2) \ - OP_REL_WITH_INT(<=, C_TYPE, _AP_W2, _AP_S2) \ - OP_REL_WITH_INT(==, C_TYPE, _AP_W2, _AP_S2) \ - OP_REL_WITH_INT(!=, C_TYPE, _AP_W2, _AP_S2) - -ALL_OP_REL_WITH_INT(bool, 1, false) -ALL_OP_REL_WITH_INT(char, 8, CHAR_IS_SIGNED) -ALL_OP_REL_WITH_INT(signed char, 8, true) -ALL_OP_REL_WITH_INT(unsigned char, 8, false) -ALL_OP_REL_WITH_INT(short, _AP_SIZE_short, true) -ALL_OP_REL_WITH_INT(unsigned short, _AP_SIZE_short, false) -ALL_OP_REL_WITH_INT(int, _AP_SIZE_int, true) -ALL_OP_REL_WITH_INT(unsigned int, _AP_SIZE_int, false) -ALL_OP_REL_WITH_INT(long, _AP_SIZE_long, true) -ALL_OP_REL_WITH_INT(unsigned long, _AP_SIZE_long, false) -ALL_OP_REL_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) -ALL_OP_REL_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef OP_REL_WITH_INT -#undef ALL_OP_BIN_WITH_INT - -#define OP_REL_WITH_DOUBLE_OR_FLOAT(Sym) \ - template \ - INLINE bool operator Sym(const ap_int_base<_AP_W, _AP_S>& op1, \ - double op2) { \ - return op1.to_double() Sym op2 ; \ - } \ - template \ - INLINE bool operator Sym(double op1, \ - const ap_int_base<_AP_W, _AP_S>& op2) { \ - return op1 Sym op2.to_double() ; \ - } \ - template \ - INLINE bool operator Sym(const ap_int_base<_AP_W, _AP_S>& op1, \ - float op2) { \ - return op1.to_double() Sym op2 ; \ - } \ - template \ - INLINE bool operator Sym(float op1, \ - const ap_int_base<_AP_W, _AP_S>& op2) { \ - return op1 Sym op2.to_double() ; \ - } - OP_REL_WITH_DOUBLE_OR_FLOAT(>) - OP_REL_WITH_DOUBLE_OR_FLOAT(<) - OP_REL_WITH_DOUBLE_OR_FLOAT(>=) - OP_REL_WITH_DOUBLE_OR_FLOAT(<=) - OP_REL_WITH_DOUBLE_OR_FLOAT(==) - OP_REL_WITH_DOUBLE_OR_FLOAT(!=) - -#undef OP_REL_WITH_DOUBLE_OR_FLOAT - - -/* Operators with ap_bit_ref. - * ------------------------------------------------------------ - */ -// arithmetic, bitwise and shift operators. -#define OP_BIN_WITH_RANGE(BIN_OP, RTYPE) \ - template \ - INLINE typename ap_int_base<_AP_W1, _AP_S1>::template RType<_AP_W2, \ - _AP_S2>::RTYPE \ - operator BIN_OP(const ap_range_ref<_AP_W1, _AP_S1>& op1, \ - const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - return ap_int_base<_AP_W1, false>(op1) BIN_OP op2; \ - } \ - template \ - INLINE typename ap_int_base<_AP_W1, _AP_S1>::template RType<_AP_W2, \ - _AP_S2>::RTYPE \ - operator BIN_OP(const ap_int_base<_AP_W1, _AP_S1>& op1, \ - const ap_range_ref<_AP_W2, _AP_S2>& op2) { \ - return op1 BIN_OP ap_int_base<_AP_W2, false>(op2); \ - } - -OP_BIN_WITH_RANGE(+, plus) -OP_BIN_WITH_RANGE(-, minus) -OP_BIN_WITH_RANGE(*, mult) -OP_BIN_WITH_RANGE(/, div) -OP_BIN_WITH_RANGE(%, mod) -OP_BIN_WITH_RANGE(&, logic) -OP_BIN_WITH_RANGE(|, logic) -OP_BIN_WITH_RANGE(^, logic) -OP_BIN_WITH_RANGE(>>, arg1) -OP_BIN_WITH_RANGE(<<, arg1) - -#undef OP_BIN_WITH_RANGE - -// compound assignment operators. -#define OP_ASSIGN_WITH_RANGE(ASSIGN_OP) \ - template \ - INLINE ap_int_base<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - ap_int_base<_AP_W1, _AP_S1>& op1, ap_range_ref<_AP_W2, _AP_S2>& op2) { \ - return op1 ASSIGN_OP ap_int_base<_AP_W2, false>(op2); \ - } \ - template \ - INLINE ap_range_ref<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - ap_range_ref<_AP_W1, _AP_S1>& op1, ap_int_base<_AP_W2, _AP_S2>& op2) { \ - ap_int_base<_AP_W1, false> tmp(op1); \ - tmp ASSIGN_OP op2; \ - op1 = tmp; \ - return op1; \ - } - -OP_ASSIGN_WITH_RANGE(+=) -OP_ASSIGN_WITH_RANGE(-=) -OP_ASSIGN_WITH_RANGE(*=) -OP_ASSIGN_WITH_RANGE(/=) -OP_ASSIGN_WITH_RANGE(%=) -OP_ASSIGN_WITH_RANGE(&=) -OP_ASSIGN_WITH_RANGE(|=) -OP_ASSIGN_WITH_RANGE(^=) -OP_ASSIGN_WITH_RANGE(>>=) -OP_ASSIGN_WITH_RANGE(<<=) - -#undef OP_ASSIGN_WITH_RANGE - -// equality and relational operators -#define OP_REL_WITH_RANGE(REL_OP) \ - template \ - INLINE bool operator REL_OP(const ap_range_ref<_AP_W1, _AP_S1>& op1, \ - const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - return ap_int_base<_AP_W1, false>(op1).operator REL_OP(op2); \ - } \ - template \ - INLINE bool operator REL_OP(const ap_int_base<_AP_W1, _AP_S1>& op1, \ - const ap_range_ref<_AP_W2, _AP_S2>& op2) { \ - return op1.operator REL_OP(op2.operator ap_int_base<_AP_W2, false>()); \ - } - -OP_REL_WITH_RANGE(==) -OP_REL_WITH_RANGE(!=) -OP_REL_WITH_RANGE(>) -OP_REL_WITH_RANGE(>=) -OP_REL_WITH_RANGE(<) -OP_REL_WITH_RANGE(<=) - -#undef OP_REL_WITH_RANGE - -/* Operators with ap_bit_ref. - * ------------------------------------------------------------ - */ -// arithmetic, bitwise and shift operators. -#define OP_BIN_WITH_BIT(BIN_OP, RTYPE) \ - template \ - INLINE typename ap_int_base<_AP_W1, _AP_S1>::template RType<1, false>::RTYPE \ - operator BIN_OP(const ap_int_base<_AP_W1, _AP_S1>& op1, \ - const ap_bit_ref<_AP_W2, _AP_S2>& op2) { \ - return op1 BIN_OP ap_int_base<1, false>(op2); \ - } \ - template \ - INLINE typename ap_int_base<1, false>::template RType<_AP_W2, _AP_S2>::RTYPE \ - operator BIN_OP(const ap_bit_ref<_AP_W1, _AP_S1>& op1, \ - const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - return ap_int_base<1, false>(op1) BIN_OP op2; \ - } - -OP_BIN_WITH_BIT(+, plus) -OP_BIN_WITH_BIT(-, minus) -OP_BIN_WITH_BIT(*, mult) -OP_BIN_WITH_BIT(/, div) -OP_BIN_WITH_BIT(%, mod) -OP_BIN_WITH_BIT(&, logic) -OP_BIN_WITH_BIT(|, logic) -OP_BIN_WITH_BIT(^, logic) -OP_BIN_WITH_BIT(>>, arg1) -OP_BIN_WITH_BIT(<<, arg1) - -#undef OP_BIN_WITH_BIT - -// compound assignment operators. -#define OP_ASSIGN_WITH_BIT(ASSIGN_OP) \ - template \ - INLINE ap_int_base<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - ap_int_base<_AP_W1, _AP_S1>& op1, ap_bit_ref<_AP_W2, _AP_S2>& op2) { \ - return op1 ASSIGN_OP ap_int_base<1, false>(op2); \ - } \ - template \ - INLINE ap_bit_ref<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - ap_bit_ref<_AP_W1, _AP_S1>& op1, ap_int_base<_AP_W2, _AP_S2>& op2) { \ - ap_int_base<1, false> tmp(op1); \ - tmp ASSIGN_OP op2; \ - op1 = tmp; \ - return op1; \ - } - -OP_ASSIGN_WITH_BIT(+=) -OP_ASSIGN_WITH_BIT(-=) -OP_ASSIGN_WITH_BIT(*=) -OP_ASSIGN_WITH_BIT(/=) -OP_ASSIGN_WITH_BIT(%=) -OP_ASSIGN_WITH_BIT(&=) -OP_ASSIGN_WITH_BIT(|=) -OP_ASSIGN_WITH_BIT(^=) -OP_ASSIGN_WITH_BIT(>>=) -OP_ASSIGN_WITH_BIT(<<=) - -#undef OP_ASSIGN_WITH_BIT - -// equality and relational operators. -#define OP_REL_WITH_BIT(REL_OP) \ - template \ - INLINE bool operator REL_OP(const ap_int_base<_AP_W1, _AP_S1>& op1, \ - const ap_bit_ref<_AP_W2, _AP_S2>& op2) { \ - return op1 REL_OP ap_int_base<1, false>(op2); \ - } \ - template \ - INLINE bool operator REL_OP(const ap_bit_ref<_AP_W1, _AP_S1>& op1, \ - const ap_int_base<_AP_W2, _AP_S2>& op2) { \ - return ap_int_base<1, false>(op1) REL_OP op2; \ - } - -OP_REL_WITH_BIT(==) -OP_REL_WITH_BIT(!=) -OP_REL_WITH_BIT(>) -OP_REL_WITH_BIT(>=) -OP_REL_WITH_BIT(<) -OP_REL_WITH_BIT(<=) - -#undef OP_REL_WITH_BIT - - -/* Operators with ap_concat_ref. - * ------------------------------------------------------------ - */ -// arithmetic, bitwise and shift operators. -// bitwise operators are defined in struct. -// TODO specify whether to define arithmetic and bitwise operators. -#if 0 -#define OP_BIN_WITH_CONCAT(BIN_OP, RTYPE) \ - template \ - INLINE typename ap_int_base<_AP_W3, _AP_S3>::template RType<_AP_W1 + _AP_W2, \ - false>::RTYPE \ - operator BIN_OP(const ap_int_base<_AP_W3, _AP_S3>& op1, \ - const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op2) { \ - /* convert ap_concat_ref to ap_int_base */ \ - return op1 BIN_OP op2.get(); \ - } \ - template \ - INLINE typename ap_int_base<_AP_W1 + _AP_W2, \ - false>::template RType<_AP_W3, _AP_S3>::RTYPE \ - operator BIN_OP(const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op1, \ - const ap_int_base<_AP_W3, _AP_S3>& op2) { \ - /* convert ap_concat_ref to ap_int_base */ \ - return op1.get() BIN_OP op2; \ - } - -OP_BIN_WITH_CONCAT(+, plus) -OP_BIN_WITH_CONCAT(-, minus) -OP_BIN_WITH_CONCAT(*, mult) -OP_BIN_WITH_CONCAT(/, div) -OP_BIN_WITH_CONCAT(%, mod) -OP_BIN_WITH_CONCAT(&, logic) -OP_BIN_WITH_CONCAT(|, logic) -OP_BIN_WITH_CONCAT(^, logic) -OP_BIN_WITH_CONCAT(>>, arg1) -OP_BIN_WITH_CONCAT(<<, arg1) - -#undef OP_BIN_WITH_CONCAT - -// compound assignment operators. -#define OP_ASSIGN_WITH_CONCAT(ASSIGN_OP) \ - template \ - INLINE typename ap_int_base<_AP_W3, _AP_S3>::template RType<_AP_W1 + _AP_W2, \ - false>::RTYPE \ - operator ASSIGN_OP( \ - const ap_int_base<_AP_W3, _AP_S3>& op1, \ - const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op2) { \ - /* convert ap_concat_ref to ap_int_base */ \ - return op1 ASSIGN_OP op2.get(); \ - } \ - template \ - INLINE typename ap_int_base<_AP_W1 + _AP_W2, \ - false>::template RType<_AP_W3, _AP_S3>::RTYPE \ - operator ASSIGN_OP(const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op1, \ - const ap_int_base<_AP_W3, _AP_S3>& op2) { \ - /* convert ap_concat_ref to ap_int_base */ \ - ap_int_base<_AP_W1 + _AP_W2, false> tmp = op1.get(); \ - tmp ASSIGN_OP op2; \ - op1 = tmp; \ - return op1; \ - } - -OP_ASSIGN_WITH_CONCAT(+=) -OP_ASSIGN_WITH_CONCAT(-=) -OP_ASSIGN_WITH_CONCAT(*=) -OP_ASSIGN_WITH_CONCAT(/=) -OP_ASSIGN_WITH_CONCAT(%=) -OP_ASSIGN_WITH_CONCAT(&=) -OP_ASSIGN_WITH_CONCAT(|=) -OP_ASSIGN_WITH_CONCAT(^=) -OP_ASSIGN_WITH_CONCAT(>>=) -OP_ASSIGN_WITH_CONCAT(<<=) - -#undef OP_ASSIGN_WITH_CONCAT -#endif - -// equality and relational operators. -#define OP_REL_WITH_CONCAT(REL_OP) \ - template \ - INLINE bool operator REL_OP( \ - const ap_int_base<_AP_W3, _AP_S3>& op1, \ - const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op2) { \ - /* convert ap_concat_ref to ap_int_base */ \ - return op1 REL_OP op2.get(); \ - } \ - template \ - INLINE bool operator REL_OP( \ - const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& op1, \ - const ap_int_base<_AP_W3, _AP_S3>& op2) { \ - /* convert ap_concat_ref to ap_int_base */ \ - return op1.get() REL_OP op2; \ - } - -OP_REL_WITH_CONCAT(==) -OP_REL_WITH_CONCAT(!=) -OP_REL_WITH_CONCAT(>) -OP_REL_WITH_CONCAT(>=) -OP_REL_WITH_CONCAT(<) -OP_REL_WITH_CONCAT(<=) - -#undef OP_REL_WITH_CONCAT - -#endif // ifndef __cplusplus -#endif // ifndef __AP_INT_BASE_H__ - -// -*- cpp -*- diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/ap_int_ref.h b/hls4ml/hls4ml/templates/vivado/ap_types/ap_int_ref.h deleted file mode 100644 index 421f09f..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/ap_int_ref.h +++ /dev/null @@ -1,1346 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_INT_REF_H__ -#define __AP_INT_REF_H__ - -#ifndef __AP_INT_H__ -#error "Only ap_fixed.h and ap_int.h can be included directly in user code." -#endif - -#ifndef __cplusplus -#error "C++ is required to include this header file" - -#else - -#ifndef __SYNTHESIS__ -#include -#endif - -/* Concatination reference. - ---------------------------------------------------------------- -*/ -template -struct ap_concat_ref { - enum { - _AP_WR = _AP_W1 + _AP_W2, - }; - - _AP_T1& mbv1; - _AP_T2& mbv2; - - INLINE ap_concat_ref(const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& ref) - : mbv1(ref.mbv1), mbv2(ref.mbv2) {} - - INLINE ap_concat_ref(_AP_T1& bv1, _AP_T2& bv2) : mbv1(bv1), mbv2(bv2) {} - - template - INLINE ap_concat_ref& operator=(const ap_int_base<_AP_W3, _AP_S3>& val) { - ap_int_base<_AP_W1 + _AP_W2, false> vval(val); - int W_ref1 = mbv1.length(); - int W_ref2 = mbv2.length(); - ap_int_base<_AP_W1, false> Part1; - Part1.V = _AP_ROOT_op_get_range(vval.V, W_ref2, W_ref1 + W_ref2 - 1); - mbv1.set(Part1); - ap_int_base<_AP_W2, false> Part2; - Part2.V = _AP_ROOT_op_get_range(vval.V, 0, W_ref2 - 1); - mbv2.set(Part2); - return *this; - } - - // assign op from hls supported C integral types. - // FIXME disabled to support legacy code directly assign from sc_signal - //template - //INLINE typename _ap_type::enable_if<_ap_type::is_integral::value, - // ap_concat_ref&>::type - //operator=(T val) { - // ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); - // return operator=(tmpVal); - //} -#define ASSIGN_WITH_CTYPE(_Tp) \ - INLINE ap_concat_ref& operator=(_Tp val) { \ - ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); \ - return operator=(tmpVal); \ - } - - ASSIGN_WITH_CTYPE(bool) - ASSIGN_WITH_CTYPE(char) - ASSIGN_WITH_CTYPE(signed char) - ASSIGN_WITH_CTYPE(unsigned char) - ASSIGN_WITH_CTYPE(short) - ASSIGN_WITH_CTYPE(unsigned short) - ASSIGN_WITH_CTYPE(int) - ASSIGN_WITH_CTYPE(unsigned int) - ASSIGN_WITH_CTYPE(long) - ASSIGN_WITH_CTYPE(unsigned long) - ASSIGN_WITH_CTYPE(ap_slong) - ASSIGN_WITH_CTYPE(ap_ulong) -#if _AP_ENABLE_HALF_ == 1 - ASSIGN_WITH_CTYPE(half) -#endif - ASSIGN_WITH_CTYPE(float) - ASSIGN_WITH_CTYPE(double) - -#undef ASSIGN_WITH_CTYPE - - // Be explicit to prevent it from being deleted, as field d_bv - // is of reference type. - INLINE ap_concat_ref& operator=( - const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& val) { - ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); - return operator=(tmpVal); - } - - template - INLINE ap_concat_ref& operator=( - const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4>& val) { - ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); - return operator=(tmpVal); - } - - template - INLINE ap_concat_ref& operator=(const ap_bit_ref<_AP_W3, _AP_S3>& val) { - ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); - return operator=(tmpVal); - } - template - INLINE ap_concat_ref& operator=(const ap_range_ref<_AP_W3, _AP_S3>& val) { - ap_int_base<_AP_W1 + _AP_W2, false> tmpVal(val); - return operator=(tmpVal); - } - - template - INLINE ap_concat_ref& operator=( - const af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& val) { - return operator=((const ap_int_base<_AP_W3, false>)(val)); - } - - template - INLINE ap_concat_ref& operator=( - const ap_fixed_base<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& - val) { - return operator=(val.to_ap_int_base()); - } - - template - INLINE ap_concat_ref& operator=( - const af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& val) { - return operator=((ap_ulong)(bool)(val)); - } - - INLINE operator ap_int_base<_AP_WR, false>() const { return get(); } - - INLINE operator ap_ulong() const { return get().to_uint64(); } - - template - INLINE ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, - ap_range_ref<_AP_W3, _AP_S3> > - operator,(const ap_range_ref<_AP_W3, _AP_S3> &a2) { - return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, - ap_range_ref<_AP_W3, _AP_S3> >( - *this, const_cast&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_int_base<_AP_W3, _AP_S3> > - operator,(ap_int_base<_AP_W3, _AP_S3> &a2) { - return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, - ap_int_base<_AP_W3, _AP_S3> >(*this, a2); - } - - template - INLINE - ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_int_base<_AP_W3, _AP_S3> > - operator,(volatile ap_int_base<_AP_W3, _AP_S3> &a2) { - return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, - ap_int_base<_AP_W3, _AP_S3> >( - *this, const_cast&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_int_base<_AP_W3, _AP_S3> > - operator,(const ap_int_base<_AP_W3, _AP_S3> &a2) { - return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, - ap_int_base<_AP_W3, _AP_S3> >( - *this, const_cast&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_int_base<_AP_W3, _AP_S3> > - operator,(const volatile ap_int_base<_AP_W3, _AP_S3> &a2) { - // FIXME op's life does not seem long enough - ap_int_base<_AP_W3, _AP_S3> op(a2); - return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, - ap_int_base<_AP_W3, _AP_S3> >( - *this, const_cast&>(op)); - } - - template - INLINE ap_concat_ref<_AP_WR, ap_concat_ref, 1, ap_bit_ref<_AP_W3, _AP_S3> > - operator,(const ap_bit_ref<_AP_W3, _AP_S3> &a2) { - return ap_concat_ref<_AP_WR, ap_concat_ref, 1, ap_bit_ref<_AP_W3, _AP_S3> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3 + _AP_W4, - ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> > - operator,(const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> &a2) { - return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3 + _AP_W4, - ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref< - _AP_WR, ap_concat_ref, _AP_W3, - af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> > - operator,( - const af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> &a2) { - return ap_concat_ref< - _AP_WR, ap_concat_ref, _AP_W3, - af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> >( - *this, - const_cast< - af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_WR, ap_concat_ref, 1, - af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> > - operator,(const af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> - &a2) { - return ap_concat_ref< - _AP_WR, ap_concat_ref, 1, - af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> >( - *this, - const_cast&>( - a2)); - } - - template - INLINE ap_int_base operator&( - const ap_int_base<_AP_W3, _AP_S3>& a2) { - return get() & a2; - } - - template - INLINE ap_int_base operator|( - const ap_int_base<_AP_W3, _AP_S3>& a2) { - return get() | a2; - } - - template - INLINE ap_int_base operator^( - const ap_int_base<_AP_W3, _AP_S3>& a2) { - return get() ^ a2; - } - -#if 0 - template - INLINE ap_int_base slice() { - ap_int_base<_AP_WR, false> bv = get(); - return bv.slice(); - } -#endif - - INLINE ap_int_base<_AP_WR, false> get() const { - ap_int_base<_AP_WR, false> tmpVal(0); - int W_ref1 = mbv1.length(); - int W_ref2 = mbv2.length(); - ap_int_base<_AP_W2, false> v2(mbv2); - ap_int_base<_AP_W1, false> v1(mbv1); - tmpVal.V = _AP_ROOT_op_set_range(tmpVal.V, 0, W_ref2 - 1, v2.V); - tmpVal.V = - _AP_ROOT_op_set_range(tmpVal.V, W_ref2, W_ref1 + W_ref2 - 1, v1.V); - return tmpVal; - } - - template - INLINE void set(const ap_int_base<_AP_W3, false>& val) { - ap_int_base<_AP_W1 + _AP_W2, false> vval(val); - int W_ref1 = mbv1.length(); - int W_ref2 = mbv2.length(); - ap_int_base<_AP_W1, false> tmpVal1; - tmpVal1.V = _AP_ROOT_op_get_range(vval.V, W_ref2, W_ref1 + W_ref2 - 1); - mbv1.set(tmpVal1); - ap_int_base<_AP_W2, false> tmpVal2; - tmpVal2.V = _AP_ROOT_op_get_range(vval.V, 0, W_ref2 - 1); - mbv2.set(tmpVal2); - } - - INLINE int length() const { return mbv1.length() + mbv2.length(); } -}; // struct ap_concat_ref - -/* Range (slice) reference. - ---------------------------------------------------------------- -*/ -template -struct ap_range_ref { - // struct ssdm_int or its sim model. - // TODO make it possible to reference to ap_fixed_base/ap_fixed/ap_ufixed - // and then we can retire af_range_ref. - typedef ap_int_base<_AP_W, _AP_S> ref_type; - ref_type& d_bv; - int l_index; - int h_index; - - public: - INLINE ap_range_ref(const ap_range_ref<_AP_W, _AP_S>& ref) - : d_bv(ref.d_bv), l_index(ref.l_index), h_index(ref.h_index) {} - - INLINE ap_range_ref(ref_type* bv, int h, int l) - : d_bv(*bv), l_index(l), h_index(h) {} - - INLINE ap_range_ref(const ref_type* bv, int h, int l) - : d_bv(*const_cast(bv)), l_index(l), h_index(h) {} - - INLINE operator ap_int_base<_AP_W, false>() const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret; - } - - INLINE operator ap_ulong() const { return to_uint64(); } - - /// @name assign operators - // @{ - - // FIXME disabled to work-around lagacy code assigning from sc_signal, - // which dependes on implicit type conversion. - // - // /// assign from hls supported C integral types. - // template - // INLINE typename _ap_type::enable_if<_ap_type::is_integral::value, - // ap_range_ref&>::type - // operator=(T val) { - // ap_int_base<_AP_W, false> tmp(val); - // d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); - // return *this; - // } -#define ASSIGN_WITH_CTYPE(_Tp) \ - INLINE ap_range_ref& operator=(_Tp val) { \ - ap_int_base<_AP_W, false> tmp(val); \ - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); \ - return *this; \ - } - - ASSIGN_WITH_CTYPE(bool) - ASSIGN_WITH_CTYPE(char) - ASSIGN_WITH_CTYPE(signed char) - ASSIGN_WITH_CTYPE(unsigned char) - ASSIGN_WITH_CTYPE(short) - ASSIGN_WITH_CTYPE(unsigned short) - ASSIGN_WITH_CTYPE(int) - ASSIGN_WITH_CTYPE(unsigned int) - ASSIGN_WITH_CTYPE(long) - ASSIGN_WITH_CTYPE(unsigned long) - ASSIGN_WITH_CTYPE(ap_slong) - ASSIGN_WITH_CTYPE(ap_ulong) -#if _AP_ENABLE_HALF_ == 1 - ASSIGN_WITH_CTYPE(half) -#endif - ASSIGN_WITH_CTYPE(float) - ASSIGN_WITH_CTYPE(double) - -#undef ASSIGN_WITH_CTYPE - - /// assign using string. XXX crucial for cosim. - INLINE ap_range_ref& operator=(const char* val) { - const ap_int_base<_AP_W, false> tmp(val); // XXX figure out radix - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); - return *this; - } - - /// assign from ap_int_base. - template - INLINE ap_range_ref& operator=(const ap_int_base<_AP_W2, _AP_S2>& val) { - ap_int_base<_AP_W, false> tmp(val); - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, tmp.V); - return *this; - } - - /// copy assign operator - // XXX Be explicit to prevent it from being deleted, as field d_bv - // is of reference type. - INLINE ap_range_ref& operator=(const ap_range_ref& val) { - return operator=((const ap_int_base<_AP_W, false>)val); - } - - /// assign from range reference to ap_int_base. - template - INLINE ap_range_ref& operator=(const ap_range_ref<_AP_W2, _AP_S2>& val) { - return operator=((const ap_int_base<_AP_W2, false>)val); - } - - /// assign from bit reference to ap_int_base. - template - INLINE ap_range_ref& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& val) { - return operator=((ap_ulong)(bool)(val)); - } - - /// assign from ap_fixed_base. - template - INLINE ap_range_ref& operator=( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& - val) { - return operator=(val.to_ap_int_base()); - } - - /// assign from range reference to ap_fixed_base. - template - INLINE ap_range_ref& operator=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=((const ap_int_base<_AP_W2, false>)val); - } - - /// assign from bit reference to ap_fixed_base. - template - INLINE ap_range_ref& operator=( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=((ap_ulong)(bool)(val)); - } - - /// assign from compound reference. - template - INLINE ap_range_ref& operator=( - const ap_concat_ref<_AP_W2, _AP_T3, _AP_W3, _AP_T3>& val) { - return operator=((const ap_int_base<_AP_W2 + _AP_W3, false>)(val)); - } - // @} - - template - INLINE - ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > - operator,(const ap_range_ref<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, - ap_range_ref<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >(*this, a2); - } - - INLINE - ap_concat_ref<_AP_W, ap_range_ref, _AP_W, ap_int_base<_AP_W, _AP_S> > - operator,(ap_int_base<_AP_W, _AP_S>& a2) { - return ap_concat_ref<_AP_W, ap_range_ref, _AP_W, - ap_int_base<_AP_W, _AP_S> >(*this, a2); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(volatile ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(const ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(const volatile ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2, - ap_int_base<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_W, ap_range_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> > - operator,(const ap_bit_ref<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<_AP_W, ap_range_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<_AP_W, ap_range_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > - operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { - return ap_concat_ref<_AP_W, ap_range_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref< - _AP_W, ap_range_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> a2) { - return ap_concat_ref< - _AP_W, ap_range_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( - *this, - const_cast< - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); - } - - template - INLINE - ap_concat_ref<_AP_W, ap_range_ref, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> - &a2) { - return ap_concat_ref< - _AP_W, ap_range_ref, 1, - af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( - *this, - const_cast&>( - a2)); - } - - template - INLINE bool operator==(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> hop(op2); - return lop == hop; - } - - template - INLINE bool operator!=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return !(operator==(op2)); - } - - template - INLINE bool operator<(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> hop(op2); - return lop < hop; - } - - template - INLINE bool operator<=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - ap_int_base<_AP_W, false> lop(*this); - ap_int_base<_AP_W2, false> hop(op2); - return lop <= hop; - } - - template - INLINE bool operator>(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return !(operator<=(op2)); - } - - template - INLINE bool operator>=(const ap_range_ref<_AP_W2, _AP_S2>& op2) { - return !(operator<(op2)); - } - - template - INLINE ap_range_ref<_AP_W, _AP_S>& operator|=( - const ap_range_ref<_AP_W2, _AP_S2>& op2) { - (this->d_bv).V |= (op2.d_bv).V; - return *this; - }; - - template - INLINE ap_range_ref<_AP_W, _AP_S>& operator|=( - const ap_int_base<_AP_W2, _AP_S2>& op2) { - (this->d_bv).V |= op2.V; - return *this; - }; - - template - INLINE ap_range_ref<_AP_W, _AP_S>& operator&=( - const ap_range_ref<_AP_W2, _AP_S2>& op2) { - (this->d_bv).V &= (op2.d_bv).V; - return *this; - }; - - template - INLINE ap_range_ref<_AP_W, _AP_S>& operator&=( - const ap_int_base<_AP_W2, _AP_S2>& op2) { - (this->d_bv).V &= op2.V; - return *this; - }; - - template - INLINE ap_range_ref<_AP_W, _AP_S>& operator^=( - const ap_range_ref<_AP_W2, _AP_S2>& op2) { - (this->d_bv).V ^= (op2.d_bv).V; - return *this; - }; - - template - INLINE ap_range_ref<_AP_W, _AP_S>& operator^=( - const ap_int_base<_AP_W2, _AP_S2>& op2) { - (this->d_bv).V ^= op2.V; - return *this; - }; - - INLINE ap_int_base<_AP_W, false> get() const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret; - } - - template - INLINE void set(const ap_int_base<_AP_W2, false>& val) { - d_bv.V = _AP_ROOT_op_set_range(d_bv.V, l_index, h_index, val.V); - } - - INLINE int length() const { - return h_index >= l_index ? h_index - l_index + 1 : l_index - h_index + 1; - } - - INLINE int to_int() const { - return (int)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE unsigned to_uint() const { - return (unsigned)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE long to_long() const { - return (long)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE unsigned long to_ulong() const { - return (unsigned long)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE ap_slong to_int64() const { - return (ap_slong)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE ap_ulong to_uint64() const { - return (ap_ulong)(_AP_ROOT_op_get_range(d_bv.V, l_index, h_index)); - } - - INLINE bool and_reduce() const { - bool ret = true; - bool reverse = l_index > h_index; - unsigned low = reverse ? h_index : l_index; - unsigned high = reverse ? l_index : h_index; - for (unsigned i = low; i != high; ++i) { -#ifdef __SYNTHESIS__ -#pragma HLS unroll -#endif - ret &= _AP_ROOT_op_get_bit(d_bv.V, i); - } - return ret; - } - - INLINE bool or_reduce() const { - bool ret = false; - bool reverse = l_index > h_index; - unsigned low = reverse ? h_index : l_index; - unsigned high = reverse ? l_index : h_index; - for (unsigned i = low; i != high; ++i) { -#ifdef __SYNTHESIS__ -#pragma HLS unroll -#endif - ret |= _AP_ROOT_op_get_bit(d_bv.V, i); - } - return ret; - } - - INLINE bool xor_reduce() const { - bool ret = false; - bool reverse = l_index > h_index; - unsigned low = reverse ? h_index : l_index; - unsigned high = reverse ? l_index : h_index; - for (unsigned i = low; i != high; ++i) { -#ifdef __SYNTHESIS__ -#pragma HLS unroll -#endif - ret ^= _AP_ROOT_op_get_bit(d_bv.V, i); - } - return ret; - } -#ifndef __SYNTHESIS__ - std::string to_string(signed char radix = 2) const { - ap_int_base<_AP_W, false> ret; - ret.V = _AP_ROOT_op_get_range(d_bv.V, l_index, h_index); - return ret.to_string(radix); - } -#else - // XXX HLS will delete this in synthesis - INLINE char* to_string(signed char radix = 2) const { - return 0; - } -#endif -}; // struct ap_range_ref - -// XXX apcc cannot handle global std::ios_base::Init() brought in by -#ifndef AP_AUTOCC -#ifndef __SYNTHESIS__ -template -INLINE std::ostream& operator<<(std::ostream& os, - const ap_range_ref<_AP_W, _AP_S>& x) { - std::ios_base::fmtflags ff = std::cout.flags(); - if (ff & std::cout.hex) { - os << x.to_string(16); // don't print sign - } else if (ff & std::cout.oct) { - os << x.to_string(8); // don't print sign - } else { - os << x.to_string(10); - } - return os; -} -#endif // ifndef __SYNTHESIS__ - -#ifndef __SYNTHESIS__ -template -INLINE std::istream& operator>>(std::istream& in, - ap_range_ref<_AP_W, _AP_S>& op) { - std::string str; - in >> str; - op = ap_int_base<_AP_W, _AP_S>(str.c_str()); - return in; -} -#endif // ifndef __SYNTHESIS__ -#endif // ifndef AP_AUTOCC - -/* Bit reference. - ---------------------------------------------------------------- -*/ -template -struct ap_bit_ref { - // struct ssdm_int or its sim model. - // TODO make it possible to reference to ap_fixed_base/ap_fixed/ap_ufixed - // and then we can retire af_bit_ref. - typedef ap_int_base<_AP_W, _AP_S> ref_type; - ref_type& d_bv; - int d_index; - - public: - // copy ctor - INLINE ap_bit_ref(const ap_bit_ref<_AP_W, _AP_S>& ref) - : d_bv(ref.d_bv), d_index(ref.d_index) {} - - INLINE ap_bit_ref(ref_type* bv, int index = 0) : d_bv(*bv), d_index(index) {} - - INLINE ap_bit_ref(const ref_type* bv, int index = 0) - : d_bv(*const_cast(bv)), d_index(index) {} - - INLINE operator bool() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } - INLINE bool to_bool() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } - - // assign op from hls supported C integral types. - // FIXME disabled to support sc_signal. - // NOTE this used to be unsigned long long. - //template - //INLINE typename _ap_type::enable_if<_ap_type::is_integral::value, - // ap_bit_ref&>::type - //operator=(T val) { - // d_bv.V = _AP_ROOT_op_set_bit(d_bv.V, d_index, val); - // return *this; - //} -#define ASSIGN_WITH_CTYPE(_Tp) \ - INLINE ap_bit_ref& operator=(_Tp val) { \ - d_bv.V = _AP_ROOT_op_set_bit(d_bv.V, d_index, val); \ - return *this; \ - } - - ASSIGN_WITH_CTYPE(bool) - ASSIGN_WITH_CTYPE(char) - ASSIGN_WITH_CTYPE(signed char) - ASSIGN_WITH_CTYPE(unsigned char) - ASSIGN_WITH_CTYPE(short) - ASSIGN_WITH_CTYPE(unsigned short) - ASSIGN_WITH_CTYPE(int) - ASSIGN_WITH_CTYPE(unsigned int) - ASSIGN_WITH_CTYPE(long) - ASSIGN_WITH_CTYPE(unsigned long) - ASSIGN_WITH_CTYPE(ap_slong) - ASSIGN_WITH_CTYPE(ap_ulong) - -#undef ASSIGN_WITH_CTYPE - -#define ASSIGN_WITH_CTYPE_FP(_Tp) \ - INLINE ap_bit_ref& operator=(_Tp val) { \ - bool tmp_val = val; \ - d_bv.V = _AP_ROOT_op_set_bit(d_bv.V, d_index,tmp_val); \ - return *this; \ - } - -#if _AP_ENABLE_HALF_ == 1 - ASSIGN_WITH_CTYPE_FP(half) -#endif - ASSIGN_WITH_CTYPE_FP(float) - ASSIGN_WITH_CTYPE_FP(double) - -#undef ASSIGN_WITH_CTYPE_FP - - - template - INLINE ap_bit_ref& operator=(const ap_int_base<_AP_W2, _AP_S2>& val) { - return operator=((ap_ulong)(val.V != 0)); - } - - template - INLINE ap_bit_ref& operator=(const ap_range_ref<_AP_W2, _AP_S2>& val) { - return operator=((ap_int_base<_AP_W2, false>)val); - } - - // Be explicit to prevent it from being deleted, as field d_bv - // is of reference type. - INLINE ap_bit_ref& operator=(const ap_bit_ref& val) { - return operator=((ap_ulong)(bool)val); - } - - template - INLINE ap_bit_ref& operator=(const ap_bit_ref<_AP_W2, _AP_S2>& val) { - return operator=((ap_ulong)(bool)val); - } - - template - INLINE ap_bit_ref& operator=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=((const ap_int_base<_AP_W2, false>)val); - } - - template - INLINE ap_bit_ref& operator=( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=((ap_ulong)(bool)val); - } - - template - INLINE ap_bit_ref& operator=( - const ap_concat_ref<_AP_W2, _AP_T3, _AP_W3, _AP_T3>& val) { - return operator=((const ap_int_base<_AP_W2 + _AP_W3, false>)val); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( - *this, a2); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(volatile ap_int_base<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(const ap_int_base<_AP_W2, _AP_S2> &a2) { - ap_int_base<_AP_W2, _AP_S2> op(a2); - return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( - *this, const_cast&>(op)); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> > - operator,(const volatile ap_int_base<_AP_W2, _AP_S2> &a2) { - ap_int_base<_AP_W2, _AP_S2> op(a2); - return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_int_base<_AP_W2, _AP_S2> >( - *this, const_cast&>(op)); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> > - operator,(const ap_range_ref<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<1, ap_bit_ref, _AP_W2, ap_range_ref<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> > operator,( - const ap_bit_ref<_AP_W2, _AP_S2> &a2) { - return ap_concat_ref<1, ap_bit_ref, 1, ap_bit_ref<_AP_W2, _AP_S2> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > - operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { - return ap_concat_ref<1, ap_bit_ref, _AP_W2 + _AP_W3, - ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( - *this, const_cast&>(a2)); - } - - template - INLINE ap_concat_ref< - 1, ap_bit_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > - operator,( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { - return ap_concat_ref< - 1, ap_bit_ref, _AP_W2, - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( - *this, - const_cast< - af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); - } - - template - INLINE ap_concat_ref<1, ap_bit_ref, 1, af_bit_ref<_AP_W2, _AP_I2, _AP_S2, - _AP_Q2, _AP_O2, _AP_N2> > - operator,( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { - return ap_concat_ref<1, ap_bit_ref, 1, af_bit_ref<_AP_W2, _AP_I2, _AP_S2, - _AP_Q2, _AP_O2, _AP_N2> >( - *this, - const_cast&>( - a2)); - } - - template - INLINE bool operator==(const ap_bit_ref<_AP_W2, _AP_S2>& op) { - return get() == op.get(); - } - - template - INLINE bool operator!=(const ap_bit_ref<_AP_W2, _AP_S2>& op) { - return get() != op.get(); - } - - INLINE bool get() const { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } - - INLINE bool get() { return _AP_ROOT_op_get_bit(d_bv.V, d_index); } - - template - INLINE void set(const ap_int_base<_AP_W3, false>& val) { - operator=(val); - } - - INLINE bool operator~() const { - bool bit = _AP_ROOT_op_get_bit(d_bv.V, d_index); - return bit ? false : true; - } - - INLINE int length() const { return 1; } - -#ifndef __SYNTHESIS__ - std::string to_string() const { return get() ? "1" : "0"; } -#else - // XXX HLS will delete this in synthesis - INLINE char* to_string() const { return 0; } -#endif -}; // struct ap_bit_ref - -/* ap_range_ref with int. - * ------------------------------------------------------------ - */ -// equality and relational operators. -#define REF_REL_OP_WITH_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE bool operator REL_OP(const ap_range_ref<_AP_W, _AP_S>& op, \ - C_TYPE op2) { \ - return ap_int_base<_AP_W, false>(op) \ - REL_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ - } \ - template \ - INLINE bool operator REL_OP(const ap_bit_ref<_AP_W, _AP_S>& op, \ - C_TYPE op2) { \ - return bool(op) REL_OP op2; \ - } \ - template \ - INLINE bool operator REL_OP(C_TYPE op2, \ - const ap_bit_ref<_AP_W, _AP_S>& op) { \ - return op2 REL_OP bool(op); \ - } \ - template \ - INLINE bool operator REL_OP( \ - const ap_concat_ref<_AP_W, _AP_T, _AP_W1, _AP_T1>& op, C_TYPE op2) { \ - return ap_int_base<_AP_W + _AP_W1, false>(op) \ - REL_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ - } - -// Make the line shorter than 5000 chars -#define REF_REL_WITH_INT_1(C_TYPE, _AP_WI, _AP_SI) \ - REF_REL_OP_WITH_INT(>, C_TYPE, _AP_WI, _AP_SI) \ - REF_REL_OP_WITH_INT(<, C_TYPE, _AP_WI, _AP_SI) \ - REF_REL_OP_WITH_INT(>=, C_TYPE, _AP_WI, _AP_SI) \ - REF_REL_OP_WITH_INT(<=, C_TYPE, _AP_WI, _AP_SI) - -REF_REL_WITH_INT_1(bool, 1, false) -REF_REL_WITH_INT_1(char, 8, CHAR_IS_SIGNED) -REF_REL_WITH_INT_1(signed char, 8, true) -REF_REL_WITH_INT_1(unsigned char, 8, false) -REF_REL_WITH_INT_1(short, _AP_SIZE_short, true) -REF_REL_WITH_INT_1(unsigned short, _AP_SIZE_short, false) -REF_REL_WITH_INT_1(int, _AP_SIZE_int, true) -REF_REL_WITH_INT_1(unsigned int, _AP_SIZE_int, false) -REF_REL_WITH_INT_1(long, _AP_SIZE_long, true) -REF_REL_WITH_INT_1(unsigned long, _AP_SIZE_long, false) -REF_REL_WITH_INT_1(ap_slong, _AP_SIZE_ap_slong, true) -REF_REL_WITH_INT_1(ap_ulong, _AP_SIZE_ap_slong, false) - -// Make the line shorter than 5000 chars -#define REF_REL_WITH_INT_2(C_TYPE, _AP_WI, _AP_SI) \ - REF_REL_OP_WITH_INT(==, C_TYPE, _AP_WI, _AP_SI) \ - REF_REL_OP_WITH_INT(!=, C_TYPE, _AP_WI, _AP_SI) - -REF_REL_WITH_INT_2(bool, 1, false) -REF_REL_WITH_INT_2(char, 8, CHAR_IS_SIGNED) -REF_REL_WITH_INT_2(signed char, 8, true) -REF_REL_WITH_INT_2(unsigned char, 8, false) -REF_REL_WITH_INT_2(short, _AP_SIZE_short, true) -REF_REL_WITH_INT_2(unsigned short, _AP_SIZE_short, false) -REF_REL_WITH_INT_2(int, _AP_SIZE_int, true) -REF_REL_WITH_INT_2(unsigned int, _AP_SIZE_int, false) -REF_REL_WITH_INT_2(long, _AP_SIZE_long, true) -REF_REL_WITH_INT_2(unsigned long, _AP_SIZE_long, false) -REF_REL_WITH_INT_2(ap_slong, _AP_SIZE_ap_slong, true) -REF_REL_WITH_INT_2(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef REF_REL_OP_WITH_INT -#undef REF_REL_WITH_INT_1 -#undef REF_REL_WITH_INT_2 - -#define REF_BIN_OP_WITH_INT(BIN_OP, RTYPE, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE typename ap_int_base<_AP_W, false>::template RType<_AP_W2, \ - _AP_S2>::RTYPE \ - operator BIN_OP(const ap_range_ref<_AP_W, _AP_S>& op, C_TYPE op2) { \ - return ap_int_base<_AP_W, false>(op) \ - BIN_OP ap_int_base<_AP_W2, _AP_S2>(op2); \ - } \ - template \ - INLINE typename ap_int_base<_AP_W2, _AP_S2>::template RType<_AP_W, \ - false>::RTYPE \ - operator BIN_OP(C_TYPE op2, const ap_range_ref<_AP_W, _AP_S>& op) { \ - return ap_int_base<_AP_W2, _AP_S2>(op2) \ - BIN_OP ap_int_base<_AP_W, false>(op); \ - } - -// arithmetic operators. -#define REF_BIN_OP_WITH_INT_ARITH(C_TYPE, _AP_W2, _AP_S2) \ - REF_BIN_OP_WITH_INT(+, plus, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(-, minus, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(*, mult, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(/, div, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(%, mod, C_TYPE, (_AP_W2), (_AP_S2)) - -REF_BIN_OP_WITH_INT_ARITH(bool, 1, false) -REF_BIN_OP_WITH_INT_ARITH(char, 8, CHAR_IS_SIGNED) -REF_BIN_OP_WITH_INT_ARITH(signed char, 8, true) -REF_BIN_OP_WITH_INT_ARITH(unsigned char, 8, false) -REF_BIN_OP_WITH_INT_ARITH(short, _AP_SIZE_short, true) -REF_BIN_OP_WITH_INT_ARITH(unsigned short, _AP_SIZE_short, false) -REF_BIN_OP_WITH_INT_ARITH(int, _AP_SIZE_int, true) -REF_BIN_OP_WITH_INT_ARITH(unsigned int, _AP_SIZE_int, false) -REF_BIN_OP_WITH_INT_ARITH(long, _AP_SIZE_long, true) -REF_BIN_OP_WITH_INT_ARITH(unsigned long, _AP_SIZE_long, false) -REF_BIN_OP_WITH_INT_ARITH(ap_slong, _AP_SIZE_ap_slong, true) -REF_BIN_OP_WITH_INT_ARITH(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef REF_BIN_OP_WITH_INT_ARITH - -// bitwise and shift operators -#define REF_BIN_OP_WITH_INT_BITS(C_TYPE, _AP_W2, _AP_S2) \ - REF_BIN_OP_WITH_INT(&, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(|, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(^, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(>>, arg1, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_WITH_INT(<<, arg1, C_TYPE, (_AP_W2), (_AP_S2)) - -REF_BIN_OP_WITH_INT_BITS(bool, 1, false) -REF_BIN_OP_WITH_INT_BITS(char, 8, CHAR_IS_SIGNED) -REF_BIN_OP_WITH_INT_BITS(signed char, 8, true) -REF_BIN_OP_WITH_INT_BITS(unsigned char, 8, false) -REF_BIN_OP_WITH_INT_BITS(short, _AP_SIZE_short, true) -REF_BIN_OP_WITH_INT_BITS(unsigned short, _AP_SIZE_short, false) -REF_BIN_OP_WITH_INT_BITS(int, _AP_SIZE_int, true) -REF_BIN_OP_WITH_INT_BITS(unsigned int, _AP_SIZE_int, false) -REF_BIN_OP_WITH_INT_BITS(long, _AP_SIZE_long, true) -REF_BIN_OP_WITH_INT_BITS(unsigned long, _AP_SIZE_long, false) -REF_BIN_OP_WITH_INT_BITS(ap_slong, _AP_SIZE_ap_slong, true) -REF_BIN_OP_WITH_INT_BITS(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef REF_BIN_OP_WITH_INT_BITS - -/* ap_range_ref with ap_range_ref - * ------------------------------------------------------------ - */ -#define REF_BIN_OP(BIN_OP, RTYPE) \ - template \ - INLINE \ - typename ap_int_base<_AP_W, false>::template RType<_AP_W2, false>::RTYPE \ - operator BIN_OP(const ap_range_ref<_AP_W, _AP_S>& lhs, \ - const ap_range_ref<_AP_W2, _AP_S2>& rhs) { \ - return (lhs.operator ap_int_base<_AP_W, false>())BIN_OP( \ - rhs.operator ap_int_base<_AP_W2, false>()); \ - } - -REF_BIN_OP(+, plus) -REF_BIN_OP(-, minus) -REF_BIN_OP(*, mult) -REF_BIN_OP(/, div) -REF_BIN_OP(%, mod) -REF_BIN_OP(&, logic) -REF_BIN_OP(|, logic) -REF_BIN_OP(^, logic) -REF_BIN_OP(>>, arg1) -REF_BIN_OP(<<, arg1) - -/* ap_concat_ref with ap_concat_ref. - * ------------------------------------------------------------ - */ - -//************************************************************************ -// Implement -// ap_int_base = ap_concat_ref OP ap_concat_ref -// for operators +, -, *, /, %, >>, <<, &, |, ^ -// Without these operators the operands are converted to int64 and -// larger results lose informations (higher order bits). -// -// operand OP -// / | -// left-concat right-concat -// / | / | -// -// -// _AP_LW1, _AP_LT1 (width and type of left-concat's left side) -// _AP_LW2, _AP_LT2 (width and type of left-concat's right side) -// Similarly for RHS of operand OP: _AP_RW1, AP_RW2, _AP_RT1, _AP_RT2 -// -// In Verilog 2001 result of concatenation is always unsigned even -// when both sides are signed. -//************************************************************************ - -#undef SYN_CONCAT_REF_BIN_OP - -#define SYN_CONCAT_REF_BIN_OP(BIN_OP, RTYPE) \ - template \ - INLINE typename ap_int_base<_AP_LW1 + _AP_LW2, false>::template RType< \ - _AP_RW1 + _AP_RW2, false>::RTYPE \ - operator BIN_OP( \ - const ap_concat_ref<_AP_LW1, _AP_LT1, _AP_LW2, _AP_LT2>& lhs, \ - const ap_concat_ref<_AP_RW1, _AP_RT1, _AP_RW2, _AP_RT2>& rhs) { \ - return lhs.get() BIN_OP rhs.get(); \ - } - -SYN_CONCAT_REF_BIN_OP(+, plus) -SYN_CONCAT_REF_BIN_OP(-, minus) -SYN_CONCAT_REF_BIN_OP(*, mult) -SYN_CONCAT_REF_BIN_OP(/, div) -SYN_CONCAT_REF_BIN_OP(%, mod) -SYN_CONCAT_REF_BIN_OP(&, logic) -SYN_CONCAT_REF_BIN_OP(|, logic) -SYN_CONCAT_REF_BIN_OP(^, logic) -SYN_CONCAT_REF_BIN_OP(>>, arg1) -SYN_CONCAT_REF_BIN_OP(<<, arg1) - -#undef SYN_CONCAT_REF_BIN_OP - -#define CONCAT_OP_WITH_INT(C_TYPE, _AP_WI, _AP_SI) \ - template \ - INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ - const ap_int_base<_AP_W, _AP_S> &op1, C_TYPE op2) { \ - ap_int_base<_AP_WI + _AP_W, false> val(op2); \ - ap_int_base<_AP_WI + _AP_W, false> ret(op1); \ - ret <<= _AP_WI; \ - if (_AP_SI) { \ - val <<= _AP_W; \ - val >>= _AP_W; \ - } \ - ret |= val; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ - C_TYPE op1, const ap_int_base<_AP_W, _AP_S> &op2) { \ - ap_int_base<_AP_WI + _AP_W, false> val(op1); \ - ap_int_base<_AP_WI + _AP_W, false> ret(op2); \ - if (_AP_S) { \ - ret <<= _AP_WI; \ - ret >>= _AP_WI; \ - } \ - ret |= val << _AP_W; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ - const ap_range_ref<_AP_W, _AP_S> &op1, C_TYPE op2) { \ - ap_int_base<_AP_WI + _AP_W, false> val(op2); \ - ap_int_base<_AP_WI + _AP_W, false> ret(op1); \ - ret <<= _AP_WI; \ - if (_AP_SI) { \ - val <<= _AP_W; \ - val >>= _AP_W; \ - } \ - ret |= val; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ - C_TYPE op1, const ap_range_ref<_AP_W, _AP_S> &op2) { \ - ap_int_base<_AP_WI + _AP_W, false> val(op1); \ - ap_int_base<_AP_WI + _AP_W, false> ret(op2); \ - int len = op2.length(); \ - val <<= len; \ - ret |= val; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<_AP_WI + 1, false> operator,( \ - const ap_bit_ref<_AP_W, _AP_S> &op1, C_TYPE op2) { \ - ap_int_base<_AP_WI + 1, false> val(op2); \ - val[_AP_WI] = op1; \ - return val; \ - } \ - template \ - INLINE ap_int_base<_AP_WI + 1, false> operator,( \ - C_TYPE op1, const ap_bit_ref<_AP_W, _AP_S> &op2) { \ - ap_int_base<_AP_WI + 1, false> val(op1); \ - val <<= 1; \ - val[0] = op2; \ - return val; \ - } \ - template \ - INLINE ap_int_base<_AP_W + _AP_W2 + _AP_WI, false> operator,( \ - const ap_concat_ref<_AP_W, _AP_T, _AP_W2, _AP_T2> &op1, C_TYPE op2) { \ - ap_int_base<_AP_WI + _AP_W + _AP_W2, _AP_SI> val(op2); \ - ap_int_base<_AP_WI + _AP_W + _AP_W2, _AP_SI> ret(op1); \ - if (_AP_SI) { \ - val <<= _AP_W + _AP_W2; \ - val >>= _AP_W + _AP_W2; \ - } \ - ret <<= _AP_WI; \ - ret |= val; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<_AP_W + _AP_W2 + _AP_WI, false> operator,( \ - C_TYPE op1, const ap_concat_ref<_AP_W, _AP_T, _AP_W2, _AP_T2> &op2) { \ - ap_int_base<_AP_WI + _AP_W + _AP_W2, _AP_SI> val(op1); \ - ap_int_base<_AP_WI + _AP_W + _AP_W2, _AP_SI> ret(op2); \ - int len = op2.length(); \ - val <<= len; \ - ret |= val; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> &op1, \ - C_TYPE op2) { \ - ap_int_base<_AP_WI + _AP_W, false> val(op2); \ - ap_int_base<_AP_WI + _AP_W, false> ret(op1); \ - if (_AP_SI) { \ - val <<= _AP_W; \ - val >>= _AP_W; \ - } \ - ret <<= _AP_WI; \ - ret |= val; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<_AP_W + _AP_WI, false> operator,( \ - C_TYPE op1, \ - const af_range_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> &op2) { \ - ap_int_base<_AP_WI + _AP_W, false> val(op1); \ - ap_int_base<_AP_WI + _AP_W, false> ret(op2); \ - int len = op2.length(); \ - val <<= len; \ - ret |= val; \ - return ret; \ - } \ - template \ - INLINE ap_int_base<1 + _AP_WI, false> operator,( \ - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> &op1, \ - C_TYPE op2) { \ - ap_int_base<_AP_WI + 1, _AP_SI> val(op2); \ - val[_AP_WI] = op1; \ - return val; \ - } \ - template \ - INLINE ap_int_base<1 + _AP_WI, false> operator,( \ - C_TYPE op1, \ - const af_bit_ref<_AP_W, _AP_I, _AP_S, _AP_Q, _AP_O, _AP_N> &op2) { \ - ap_int_base<_AP_WI + 1, _AP_SI> val(op1); \ - val <<= 1; \ - val[0] = op2; \ - return val; \ - } - -CONCAT_OP_WITH_INT(bool, 1, false) -CONCAT_OP_WITH_INT(char, 8, CHAR_IS_SIGNED) -CONCAT_OP_WITH_INT(signed char, 8, true) -CONCAT_OP_WITH_INT(unsigned char, 8, false) -CONCAT_OP_WITH_INT(short, _AP_SIZE_short, true) -CONCAT_OP_WITH_INT(unsigned short, _AP_SIZE_short, false) -CONCAT_OP_WITH_INT(int, _AP_SIZE_int, true) -CONCAT_OP_WITH_INT(unsigned int, _AP_SIZE_int, false) -CONCAT_OP_WITH_INT(long, _AP_SIZE_long, true) -CONCAT_OP_WITH_INT(unsigned long, _AP_SIZE_long, false) -CONCAT_OP_WITH_INT(ap_slong, _AP_SIZE_ap_slong, true) -CONCAT_OP_WITH_INT(ap_ulong, _AP_SIZE_ap_slong, false) - -#undef CONCAT_OP_WITH_INT - -#define CONCAT_SHIFT_WITH_INT(C_TYPE, OP) \ - template \ - INLINE ap_uint<_AP_W + _AP_W1> operator OP( \ - const ap_concat_ref<_AP_W, _AP_T, _AP_W1, _AP_T1> lhs, C_TYPE rhs) { \ - return ap_uint<_AP_W + _AP_W1>(lhs).get() OP int(rhs); \ - } - -// FIXME int(rhs) may loose precision. - -CONCAT_SHIFT_WITH_INT(int, <<) -CONCAT_SHIFT_WITH_INT(unsigned int, <<) -CONCAT_SHIFT_WITH_INT(long, <<) -CONCAT_SHIFT_WITH_INT(unsigned long, <<) -CONCAT_SHIFT_WITH_INT(ap_slong, <<) -CONCAT_SHIFT_WITH_INT(ap_ulong, <<) - -CONCAT_SHIFT_WITH_INT(int, >>) -CONCAT_SHIFT_WITH_INT(unsigned int, >>) -CONCAT_SHIFT_WITH_INT(long, >>) -CONCAT_SHIFT_WITH_INT(unsigned long, >>) -CONCAT_SHIFT_WITH_INT(ap_slong, >>) -CONCAT_SHIFT_WITH_INT(ap_ulong, >>) - -#endif // ifndef __cplusplus -#endif // ifndef __AP_INT_REF_H__ - -// -*- cpp -*- diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/ap_int_special.h b/hls4ml/hls4ml/templates/vivado/ap_types/ap_int_special.h deleted file mode 100644 index 3afc619..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/ap_int_special.h +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_INT_SPECIAL_H__ -#define __AP_INT_SPECIAL_H__ - -#ifndef __AP_INT_H__ -#error "Only ap_fixed.h and ap_int.h can be included directly in user code." -#endif - -#ifndef __SYNTHESIS__ -#include -#include -#endif -// FIXME AP_AUTOCC cannot handle many standard headers, so declare instead of -// include. -// #include -namespace std { -template class complex; -} - -/* - TODO: Modernize the code using C++11/C++14 - 1. constexpr http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2016/p0415r0.html - 2. move constructor -*/ - -namespace std { -/* - Specialize std::complex to zero initialization ap_int. - - To reduce the area cost, ap_int is not zero initialized, just like basic - types float or double. However, libstdc++ provides specialization for float, - double and long double, initializing image part to 0 when not specified. - - This has become a difficulty in switching legacy code from these C types to - ap_int. To ease the tranform of legacy code, we have to implement - specialization of std::complex<> for our type. - - As ap_int is a template, it is impossible to specialize only the methods - that causes default initialization of value type in std::complex<>. An - explicit full specialization of the template class has to be done, covering - all the member functions and operators of std::complex<> as specified - in standard 26.2.4 and 26.2.5. -*/ -template -class complex > { - public: - typedef ap_int<_AP_W> _Tp; - typedef _Tp value_type; - - // 26.2.4/1 - // Constructor without argument - // Default initialize, so that in dataflow, the variable is only written once. - complex() : _M_real(_Tp()), _M_imag(_Tp()) {} - // Constructor with ap_int. - // Zero initialize image part when not specified, so that `C(1) == C(1,0)` - complex(const _Tp &__r, const _Tp &__i = _Tp(0)) - : _M_real(__r), _M_imag(__i) {} - - // Constructor with another complex number - template - complex(const complex<_Up> &__z) : _M_real(__z.real()), _M_imag(__z.imag()) {} - -#if __cplusplus >= 201103L - const _Tp& real() const { return _M_real; } - const _Tp& imag() const { return _M_imag; } -#else - _Tp& real() { return _M_real; } - const _Tp& real() const { return _M_real; } - _Tp& imag() { return _M_imag; } - const _Tp& imag() const { return _M_imag; } -#endif - - void real(_Tp __val) { _M_real = __val; } - - void imag(_Tp __val) { _M_imag = __val; } - - // Assign this complex number with ap_int. - // Zero initialize image poarrt, so that `C c; c = 1; c == C(1,0);` - complex<_Tp> &operator=(const _Tp __t) { - _M_real = __t; - _M_imag = _Tp(0); - return *this; - } - - // 26.2.5/1 - // Add ap_int to this complex number. - complex<_Tp> &operator+=(const _Tp &__t) { - _M_real += __t; - return *this; - } - - // 26.2.5/3 - // Subtract ap_int from this complex number. - complex<_Tp> &operator-=(const _Tp &__t) { - _M_real -= __t; - return *this; - } - - // 26.2.5/5 - // Multiply this complex number by ap_int. - complex<_Tp> &operator*=(const _Tp &__t) { - _M_real *= __t; - _M_imag *= __t; - return *this; - } - - // 26.2.5/7 - // Divide this complex number by ap_int. - complex<_Tp> &operator/=(const _Tp &__t) { - _M_real /= __t; - _M_imag /= __t; - return *this; - } - - // Assign complex number to this complex number. - template - complex<_Tp> &operator=(const complex<_Up> &__z) { - _M_real = __z.real(); - _M_imag = __z.imag(); - return *this; - } - - // 26.2.5/9 - // Add complex number to this. - template - complex<_Tp> &operator+=(const complex<_Up> &__z) { - _M_real += __z.real(); - _M_imag += __z.imag(); - return *this; - } - - // 26.2.5/11 - // Subtract complex number from this. - template - complex<_Tp> &operator-=(const complex<_Up> &__z) { - _M_real -= __z.real(); - _M_imag -= __z.imag(); - return *this; - } - - // 26.2.5/13 - // Multiply this by complex number. - template - complex<_Tp> &operator*=(const complex<_Up> &__z) { - const _Tp __r = _M_real * __z.real() - _M_imag * __z.imag(); - _M_imag = _M_real * __z.imag() + _M_imag * __z.real(); - _M_real = __r; - return *this; - } - - // 26.2.5/15 - // Divide this by complex number. - template - complex<_Tp> &operator/=(const complex<_Up> &__z) { - complex<_Tp> cj (__z.real(), -__z.imag()); - complex<_Tp> a = (*this) * cj; - complex<_Tp> b = cj * __z; - _M_real = a.real() / b.real(); - _M_imag = a.imag() / b.real(); - return *this; - } - - private: - _Tp _M_real; - _Tp _M_imag; - -}; // class complex > - - -/* - Non-member operations - These operations are not required by standard in 26.2.6, but libstdc++ - defines them for - float, double or long double's specialization. -*/ -// Compare complex number with ap_int. -template -inline bool operator==(const complex > &__x, const ap_int<_AP_W> &__y) { - return __x.real() == __y && - __x.imag() == 0; -} - -// Compare ap_int with complex number. -template -inline bool operator==(const ap_int<_AP_W> &__x, const complex > &__y) { - return __x == __y.real() && - 0 == __y.imag(); -} - -// Compare complex number with ap_int. -template -inline bool operator!=(const complex > &__x, const ap_int<_AP_W> &__y) { - return __x.real() != __y || - __x.imag() != 0; -} - -// Compare ap_int with complex number. -template -inline bool operator!=(const ap_int<_AP_W> &__x, const complex > &__y) { - return __x != __y.real() || - 0 != __y.imag(); -} - -} // namespace std - -#endif // ifndef __AP_INT_SPECIAL_H__ - -// -*- cpp -*- diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/ap_shift_reg.h b/hls4ml/hls4ml/templates/vivado/ap_types/ap_shift_reg.h deleted file mode 100644 index 94dba51..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/ap_shift_reg.h +++ /dev/null @@ -1,138 +0,0 @@ -/* -#- (c) Copyright 2011-2019 Xilinx, Inc. All rights reserved. -#- -#- This file contains confidential and proprietary information -#- of Xilinx, Inc. and is protected under U.S. and -#- international copyright and other intellectual property -#- laws. -#- -#- DISCLAIMER -#- This disclaimer is not a license and does not grant any -#- rights to the materials distributed herewith. Except as -#- otherwise provided in a valid license issued to you by -#- Xilinx, and to the maximum extent permitted by applicable -#- law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND -#- WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES -#- AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING -#- BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- -#- INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and -#- (2) Xilinx shall not be liable (whether in contract or tort, -#- including negligence, or under any other theory of -#- liability) for any loss or damage of any kind or nature -#- related to, arising under or in connection with these -#- materials, including for any direct, or any indirect, -#- special, incidental, or consequential loss or damage -#- (including loss of data, profits, goodwill, or any type of -#- loss or damage suffered as a result of any action brought -#- by a third party) even if such damage or loss was -#- reasonably foreseeable or Xilinx had been advised of the -#- possibility of the same. -#- -#- CRITICAL APPLICATIONS -#- Xilinx products are not designed or intended to be fail- -#- safe, or for use in any application requiring fail-safe -#- performance, such as life-support or safety devices or -#- systems, Class III medical devices, nuclear facilities, -#- applications related to the deployment of airbags, or any -#- other applications that could lead to death, personal -#- injury, or severe property or environmental damage -#- (individually and collectively, "Critical -#- Applications"). Customer assumes the sole risk and -#- liability of any use of Xilinx products in Critical -#- Applications, subject only to applicable laws and -#- regulations governing limitations on product liability. -#- -#- THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS -#- PART OF THIS FILE AT ALL TIMES. -#- ************************************************************************ - - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -#ifndef __SIM_AP_SHIFT_REG_H__ -#define __SIM_AP_SHIFT_REG_H__ - - -/* - * This file contains a C++ model of shift register. - * It defines C level simulation model. - */ -#ifndef __cplusplus -#error C++ is required to include this header file -#else - -#include - -////////////////////////////////////////////// -// C level simulation model for ap_shift_reg -////////////////////////////////////////////// -template -class ap_shift_reg -{ - public: - /// Constructors - ap_shift_reg() { } - ap_shift_reg(const char* name) { } - /// Destructor - virtual ~ap_shift_reg() { } - - private: - /// Make copy constructor and assignment operator private - ap_shift_reg(const ap_shift_reg< __SHIFT_T__, __SHIFT_DEPTH__ >& shreg) - { - for (unsigned i = 0; i < __SHIFT_DEPTH__; ++i) - Array[i] = shreg.Array[i]; - } - - ap_shift_reg& operator = (const ap_shift_reg< __SHIFT_T__, - __SHIFT_DEPTH__ >& shreg) - { - for (unsigned i = 0; i < __SHIFT_DEPTH__; ++i) - Array[i] = shreg.Array[i]; - return *this; - } - - public: - // Shift the queue, push to back and read from a given address. - __SHIFT_T__ shift(__SHIFT_T__ DataIn, - unsigned int Addr = __SHIFT_DEPTH__ - 1, bool Enable = true) - { - assert(Addr < __SHIFT_DEPTH__ && - "Out-of-bound shift is found in ap_shift_reg."); - __SHIFT_T__ ret = Array[Addr]; - if (Enable) { - for (unsigned int i = __SHIFT_DEPTH__ - 1; i > 0; --i) - Array[i] = Array[i-1]; - Array[0] = DataIn; - } - return ret; - } - - // Read from a given address. - __SHIFT_T__ read(unsigned int Addr = __SHIFT_DEPTH__ - 1) const - { - assert(Addr < __SHIFT_DEPTH__ && - "Out-of-bound read is found in ap_shift_reg."); - return Array[Addr]; - } - - protected: - __SHIFT_T__ Array[__SHIFT_DEPTH__]; -}; - -#endif //__cplusplus - -#endif //__SIM_AP_SHIFT_REG_H__ - - diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/etc/ap_private.h b/hls4ml/hls4ml/templates/vivado/ap_types/etc/ap_private.h deleted file mode 100644 index 0c29a0a..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/etc/ap_private.h +++ /dev/null @@ -1,7199 +0,0 @@ -/* - * Copyright 2011-2019 Xilinx, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __AP_PRIVATE_H__ -#define __AP_PRIVATE_H__ - -// common macros and type declarations are now defined in ap_common.h, and -// ap_private becomes part of it. -#ifndef __AP_COMMON_H__ -#error "etc/ap_private.h cannot be included directly." -#endif - -// forward declarations -//template -//class ap_private; // moved to ap_common.h -template -struct _private_range_ref; -template -struct _private_bit_ref; - -// TODO clean up this part. -#ifndef LLVM_SUPPORT_MATHEXTRAS_H -#define LLVM_SUPPORT_MATHEXTRAS_H - -#ifdef _MSC_VER -#if _MSC_VER <= 1500 -typedef __int8 int8_t; -typedef unsigned __int8 uint8_t; -typedef __int16 int16_t; -typedef unsigned __int16 uint16_t; -typedef __int32 int32_t; -typedef unsigned __int32 uint32_t; -typedef __int64 int64_t; -typedef unsigned __int64 uint64_t; -#else -#include -#endif -#else -#include -#endif - -#ifndef INLINE -#define INLINE inline -// Enable to debug ap_int/ap_fixed -// #define INLINE __attribute__((weak)) -#endif - -// NOTE: The following support functions use the _32/_64 extensions instead of -// type overloading so that signed and unsigned integers can be used without -// ambiguity. -namespace AESL_std { -template -DataType INLINE min(DataType a, DataType b) { - return (a >= b) ? b : a; -} - -template -DataType INLINE max(DataType a, DataType b) { - return (a >= b) ? a : b; -} -} // namespace AESL_std - -// TODO clean up included headers. -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace ap_private_ops { -/// Hi_32 - This function returns the high 32 bits of a 64 bit value. -static INLINE uint32_t Hi_32(uint64_t Value) { - return static_cast(Value >> 32); -} - -/// Lo_32 - This function returns the low 32 bits of a 64 bit value. -static INLINE uint32_t Lo_32(uint64_t Value) { - return static_cast(Value); -} - -template -INLINE bool isNegative(const ap_private<_AP_W, false>& a) { - return false; -} - -template -INLINE bool isNegative(const ap_private<_AP_W, true>& a) { - enum { - APINT_BITS_PER_WORD = 64, - _AP_N = (_AP_W + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD - }; - static const uint64_t sign_mask = 1ULL << ((_AP_W - 1) % APINT_BITS_PER_WORD); - return (sign_mask & a.get_pVal(_AP_N - 1)) != 0; -} - -/// CountLeadingZeros_32 - this function performs the platform optimal form of -/// counting the number of zeros from the most significant bit to the first one -/// bit. Ex. CountLeadingZeros_32(0x00F000FF) == 8. -/// Returns 32 if the word is zero. -static INLINE unsigned CountLeadingZeros_32(uint32_t Value) { - unsigned Count; // result -#if __GNUC__ >= 4 -// PowerPC is defined for __builtin_clz(0) -#if !defined(__ppc__) && !defined(__ppc64__) - if (Value == 0) return 32; -#endif - Count = __builtin_clz(Value); -#else - if (Value == 0) return 32; - Count = 0; - // bisecton method for count leading zeros - for (unsigned Shift = 32 >> 1; Shift; Shift >>= 1) { - uint32_t Tmp = (Value) >> (Shift); - if (Tmp) { - Value = Tmp; - } else { - Count |= Shift; - } - } -#endif - return Count; -} - -/// CountLeadingZeros_64 - This function performs the platform optimal form -/// of counting the number of zeros from the most significant bit to the first -/// one bit (64 bit edition.) -/// Returns 64 if the word is zero. -static INLINE unsigned CountLeadingZeros_64(uint64_t Value) { - unsigned Count; // result -#if __GNUC__ >= 4 -// PowerPC is defined for __builtin_clzll(0) -#if !defined(__ppc__) && !defined(__ppc64__) - if (!Value) return 64; -#endif - Count = __builtin_clzll(Value); -#else - if (sizeof(long) == sizeof(int64_t)) { - if (!Value) return 64; - Count = 0; - // bisecton method for count leading zeros - for (unsigned Shift = 64 >> 1; Shift; Shift >>= 1) { - uint64_t Tmp = (Value) >> (Shift); - if (Tmp) { - Value = Tmp; - } else { - Count |= Shift; - } - } - } else { - // get hi portion - uint32_t Hi = Hi_32(Value); - - // if some bits in hi portion - if (Hi) { - // leading zeros in hi portion plus all bits in lo portion - Count = CountLeadingZeros_32(Hi); - } else { - // get lo portion - uint32_t Lo = Lo_32(Value); - // same as 32 bit value - Count = CountLeadingZeros_32(Lo) + 32; - } - } -#endif - return Count; -} - -/// CountTrailingZeros_64 - This function performs the platform optimal form -/// of counting the number of zeros from the least significant bit to the first -/// one bit (64 bit edition.) -/// Returns 64 if the word is zero. -static INLINE unsigned CountTrailingZeros_64(uint64_t Value) { -#if __GNUC__ >= 4 - return (Value != 0) ? __builtin_ctzll(Value) : 64; -#else - static const unsigned Mod67Position[] = { - 64, 0, 1, 39, 2, 15, 40, 23, 3, 12, 16, 59, 41, 19, 24, 54, 4, - 64, 13, 10, 17, 62, 60, 28, 42, 30, 20, 51, 25, 44, 55, 47, 5, 32, - 65, 38, 14, 22, 11, 58, 18, 53, 63, 9, 61, 27, 29, 50, 43, 46, 31, - 37, 21, 57, 52, 8, 26, 49, 45, 36, 56, 7, 48, 35, 6, 34, 33, 0}; - return Mod67Position[(uint64_t)(-(int64_t)Value & (int64_t)Value) % 67]; -#endif -} - -/// CountPopulation_64 - this function counts the number of set bits in a value, -/// (64 bit edition.) -static INLINE unsigned CountPopulation_64(uint64_t Value) { -#if __GNUC__ >= 4 - return __builtin_popcountll(Value); -#else - uint64_t v = Value - (((Value) >> 1) & 0x5555555555555555ULL); - v = (v & 0x3333333333333333ULL) + (((v) >> 2) & 0x3333333333333333ULL); - v = (v + ((v) >> 4)) & 0x0F0F0F0F0F0F0F0FULL; - return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56); -#endif -} - -static INLINE uint32_t countLeadingOnes_64(uint64_t __V, uint32_t skip) { - uint32_t Count = 0; - if (skip) (__V) <<= (skip); - while (__V && (__V & (1ULL << 63))) { - Count++; - (__V) <<= 1; - } - return Count; -} - -static INLINE std::string oct2Bin(char oct) { - switch (oct) { - case '\0': { - return ""; - } - case '.': { - return "."; - } - case '0': { - return "000"; - } - case '1': { - return "001"; - } - case '2': { - return "010"; - } - case '3': { - return "011"; - } - case '4': { - return "100"; - } - case '5': { - return "101"; - } - case '6': { - return "110"; - } - case '7': { - return "111"; - } - } - assert(0 && "Invalid character in digit string"); - return ""; -} - -static INLINE std::string hex2Bin(char hex) { - switch (hex) { - case '\0': { - return ""; - } - case '.': { - return "."; - } - case '0': { - return "0000"; - } - case '1': { - return "0001"; - } - case '2': { - return "0010"; - } - case '3': { - return "0011"; - } - case '4': { - return "0100"; - } - case '5': { - return "0101"; - } - case '6': { - return "0110"; - } - case '7': { - return "0111"; - } - case '8': { - return "1000"; - } - case '9': { - return "1001"; - } - case 'A': - case 'a': { - return "1010"; - } - case 'B': - case 'b': { - return "1011"; - } - case 'C': - case 'c': { - return "1100"; - } - case 'D': - case 'd': { - return "1101"; - } - case 'E': - case 'e': { - return "1110"; - } - case 'F': - case 'f': { - return "1111"; - } - } - assert(0 && "Invalid character in digit string"); - return ""; -} - -static INLINE uint32_t decode_digit(char cdigit, int radix) { - uint32_t digit = 0; - if (radix == 16) { -#define isxdigit(c) \ - (((c) >= '0' && (c) <= '9') || ((c) >= 'a' && (c) <= 'f') || \ - ((c) >= 'A' && (c) <= 'F')) -#define isdigit(c) ((c) >= '0' && (c) <= '9') - if (!isxdigit(cdigit)) assert(0 && "Invalid hex digit in string"); - if (isdigit(cdigit)) - digit = cdigit - '0'; - else if (cdigit >= 'a') - digit = cdigit - 'a' + 10; - else if (cdigit >= 'A') - digit = cdigit - 'A' + 10; - else - assert(0 && "huh? we shouldn't get here"); - } else if (isdigit(cdigit)) { - digit = cdigit - '0'; - } else { - assert(0 && "Invalid character in digit string"); - } -#undef isxdigit -#undef isdigit - return digit; -} - -// Determine the radix of "val". -static INLINE std::string parseString(const std::string& input, unsigned char& radix) { - size_t len = input.length(); - if (len == 0) { - if (radix == 0) radix = 10; - return input; - } - - size_t startPos = 0; - // Trim whitespace - while (input[startPos] == ' ' && startPos < len) startPos++; - while (input[len - 1] == ' ' && startPos < len) len--; - - std::string val = input.substr(startPos, len - startPos); - // std::cout << "val = " << val << "\n"; - len = val.length(); - startPos = 0; - - // If the length of the string is less than 2, then radix - // is decimal and there is no exponent. - if (len < 2) { - if (radix == 0) radix = 10; - return val; - } - - bool isNegative = false; - std::string ans; - - // First check to see if we start with a sign indicator - if (val[0] == '-') { - ans = "-"; - ++startPos; - isNegative = true; - } else if (val[0] == '+') - ++startPos; - - if (len - startPos < 2) { - if (radix == 0) radix = 10; - return val; - } - - if (val.substr(startPos, 2) == "0x" || val.substr(startPos, 2) == "0X") { - // If we start with "0x", then the radix is hex. - radix = 16; - startPos += 2; - } else if (val.substr(startPos, 2) == "0b" || - val.substr(startPos, 2) == "0B") { - // If we start with "0b", then the radix is binary. - radix = 2; - startPos += 2; - } else if (val.substr(startPos, 2) == "0o" || - val.substr(startPos, 2) == "0O") { - // If we start with "0o", then the radix is octal. - radix = 8; - startPos += 2; - } else if (radix == 0) { - radix = 10; - } - - int exp = 0; - if (radix == 10) { - // If radix is decimal, then see if there is an - // exponent indicator. - size_t expPos = val.find('e'); - bool has_exponent = true; - if (expPos == std::string::npos) expPos = val.find('E'); - if (expPos == std::string::npos) { - // No exponent indicator, so the mantissa goes to the end. - expPos = len; - has_exponent = false; - } - // std::cout << "startPos = " << startPos << " " << expPos << "\n"; - - ans += val.substr(startPos, expPos - startPos); - if (has_exponent) { - // Parse the exponent. - std::istringstream iss(val.substr(expPos + 1, len - expPos - 1)); - iss >> exp; - } - } else { - // Check for a binary exponent indicator. - size_t expPos = val.find('p'); - bool has_exponent = true; - if (expPos == std::string::npos) expPos = val.find('P'); - if (expPos == std::string::npos) { - // No exponent indicator, so the mantissa goes to the end. - expPos = len; - has_exponent = false; - } - - // std::cout << "startPos = " << startPos << " " << expPos << "\n"; - - assert(startPos <= expPos); - // Convert to binary as we go. - for (size_t i = startPos; i < expPos; ++i) { - if (radix == 16) { - ans += hex2Bin(val[i]); - } else if (radix == 8) { - ans += oct2Bin(val[i]); - } else { // radix == 2 - ans += val[i]; - } - } - // End in binary - radix = 2; - if (has_exponent) { - // Parse the exponent. - std::istringstream iss(val.substr(expPos + 1, len - expPos - 1)); - iss >> exp; - } - } - if (exp == 0) return ans; - - size_t decPos = ans.find('.'); - if (decPos == std::string::npos) decPos = ans.length(); - if ((int)decPos + exp >= (int)ans.length()) { - int i = decPos; - for (; i < (int)ans.length() - 1; ++i) ans[i] = ans[i + 1]; - for (; i < (int)ans.length(); ++i) ans[i] = '0'; - for (; i < (int)decPos + exp; ++i) ans += '0'; - return ans; - } else if ((int)decPos + exp < (int)isNegative) { - std::string dupAns = "0."; - if (ans[0] == '-') dupAns = "-0."; - for (int i = 0; i < isNegative - (int)decPos - exp; ++i) dupAns += '0'; - for (size_t i = isNegative; i < ans.length(); ++i) - if (ans[i] != '.') dupAns += ans[i]; - return dupAns; - } - - if (exp > 0) - for (size_t i = decPos; i < decPos + exp; ++i) ans[i] = ans[i + 1]; - else { - if (decPos == ans.length()) ans += ' '; - for (int i = decPos; i > (int)decPos + exp; --i) ans[i] = ans[i - 1]; - } - ans[decPos + exp] = '.'; - return ans; -} - -/// sub_1 - This function subtracts a single "digit" (64-bit word), y, from -/// the multi-digit integer array, x[], propagating the borrowed 1 value until -/// no further borrowing is neeeded or it runs out of "digits" in x. The result -/// is 1 if "borrowing" exhausted the digits in x, or 0 if x was not exhausted. -/// In other words, if y > x then this function returns 1, otherwise 0. -/// @returns the borrow out of the subtraction -static INLINE bool sub_1(uint64_t x[], uint32_t len, uint64_t y) { - for (uint32_t i = 0; i < len; ++i) { - uint64_t __X = x[i]; - x[i] -= y; - if (y > __X) - y = 1; // We have to "borrow 1" from next "digit" - else { - y = 0; // No need to borrow - break; // Remaining digits are unchanged so exit early - } - } - return (y != 0); -} - -/// add_1 - This function adds a single "digit" integer, y, to the multiple -/// "digit" integer array, x[]. x[] is modified to reflect the addition and -/// 1 is returned if there is a carry out, otherwise 0 is returned. -/// @returns the carry of the addition. -static INLINE bool add_1(uint64_t dest[], uint64_t x[], uint32_t len, - uint64_t y) { - for (uint32_t i = 0; i < len; ++i) { - dest[i] = y + x[i]; - if (dest[i] < y) - y = 1; // Carry one to next digit. - else { - y = 0; // No need to carry so exit early - break; - } - } - return (y != 0); -} - -/// add - This function adds the integer array x to the integer array Y and -/// places the result in dest. -/// @returns the carry out from the addition -/// @brief General addition of 64-bit integer arrays -static INLINE bool add(uint64_t* dest, const uint64_t* x, const uint64_t* y, - uint32_t destlen, uint32_t xlen, uint32_t ylen, - bool xsigned, bool ysigned) { - bool carry = false; - uint32_t len = AESL_std::min(xlen, ylen); - uint32_t i; - for (i = 0; i < len && i < destlen; ++i) { - uint64_t limit = - AESL_std::min(x[i], y[i]); // must come first in case dest == x - dest[i] = x[i] + y[i] + carry; - carry = dest[i] < limit || (carry && dest[i] == limit); - } - if (xlen > ylen) { - const uint64_t yext = ysigned && int64_t(y[ylen - 1]) < 0 ? -1 : 0; - for (i = ylen; i < xlen && i < destlen; i++) { - uint64_t limit = AESL_std::min(x[i], yext); - dest[i] = x[i] + yext + carry; - carry = (dest[i] < limit) || (carry && dest[i] == limit); - } - } else if (ylen > xlen) { - const uint64_t xext = xsigned && int64_t(x[xlen - 1]) < 0 ? -1 : 0; - for (i = xlen; i < ylen && i < destlen; i++) { - uint64_t limit = AESL_std::min(xext, y[i]); - dest[i] = xext + y[i] + carry; - carry = (dest[i] < limit) || (carry && dest[i] == limit); - } - } - return carry; -} - -/// @returns returns the borrow out. -/// @brief Generalized subtraction of 64-bit integer arrays. -static INLINE bool sub(uint64_t* dest, const uint64_t* x, const uint64_t* y, - uint32_t destlen, uint32_t xlen, uint32_t ylen, - bool xsigned, bool ysigned) { - bool borrow = false; - uint32_t i; - uint32_t len = AESL_std::min(xlen, ylen); - for (i = 0; i < len && i < destlen; ++i) { - uint64_t x_tmp = borrow ? x[i] - 1 : x[i]; - borrow = y[i] > x_tmp || (borrow && x[i] == 0); - dest[i] = x_tmp - y[i]; - } - if (xlen > ylen) { - const uint64_t yext = ysigned && int64_t(y[ylen - 1]) < 0 ? -1 : 0; - for (i = ylen; i < xlen && i < destlen; i++) { - uint64_t x_tmp = borrow ? x[i] - 1 : x[i]; - borrow = yext > x_tmp || (borrow && x[i] == 0); - dest[i] = x_tmp - yext; - } - } else if (ylen > xlen) { - const uint64_t xext = xsigned && int64_t(x[xlen - 1]) < 0 ? -1 : 0; - for (i = xlen; i < ylen && i < destlen; i++) { - uint64_t x_tmp = borrow ? xext - 1 : xext; - borrow = y[i] > x_tmp || (borrow && xext == 0); - dest[i] = x_tmp - y[i]; - } - } - return borrow; -} - -/// Subtracts the RHS ap_private from this ap_private -/// @returns this, after subtraction -/// @brief Subtraction assignment operator. - -/// Multiplies an integer array, x by a a uint64_t integer and places the result -/// into dest. -/// @returns the carry out of the multiplication. -/// @brief Multiply a multi-digit ap_private by a single digit (64-bit) integer. -static INLINE uint64_t mul_1(uint64_t dest[], const uint64_t x[], uint32_t len, - uint64_t y) { - // Split y into high 32-bit part (hy) and low 32-bit part (ly) - uint64_t ly = y & 0xffffffffULL, hy = (y) >> 32; - uint64_t carry = 0; - static const uint64_t two_power_32 = 1ULL << 32; - // For each digit of x. - for (uint32_t i = 0; i < len; ++i) { - // Split x into high and low words - uint64_t lx = x[i] & 0xffffffffULL; - uint64_t hx = (x[i]) >> 32; - // hasCarry - A flag to indicate if there is a carry to the next digit. - // hasCarry == 0, no carry - // hasCarry == 1, has carry - // hasCarry == 2, no carry and the calculation result == 0. - uint8_t hasCarry = 0; - dest[i] = carry + lx * ly; - // Determine if the add above introduces carry. - hasCarry = (dest[i] < carry) ? 1 : 0; - carry = hx * ly + ((dest[i]) >> 32) + (hasCarry ? two_power_32 : 0); - // The upper limit of carry can be (2^32 - 1)(2^32 - 1) + - // (2^32 - 1) + 2^32 = 2^64. - hasCarry = (!carry && hasCarry) ? 1 : (!carry ? 2 : 0); - - carry += (lx * hy) & 0xffffffffULL; - dest[i] = ((carry) << 32) | (dest[i] & 0xffffffffULL); - carry = (((!carry && hasCarry != 2) || hasCarry == 1) ? two_power_32 : 0) + - ((carry) >> 32) + ((lx * hy) >> 32) + hx * hy; - } - return carry; -} - -/// Multiplies integer array x by integer array y and stores the result into -/// the integer array dest. Note that dest's size must be >= xlen + ylen in -/// order to -/// do a full precision computation. If it is not, then only the low-order words -/// are returned. -/// @brief Generalized multiplicate of integer arrays. -static INLINE void mul(uint64_t dest[], const uint64_t x[], uint32_t xlen, - const uint64_t y[], uint32_t ylen, uint32_t destlen) { - assert(xlen > 0); - assert(ylen > 0); - assert(destlen >= xlen + ylen); - if (xlen < destlen) dest[xlen] = mul_1(dest, x, xlen, y[0]); - for (uint32_t i = 1; i < ylen; ++i) { - uint64_t ly = y[i] & 0xffffffffULL, hy = (y[i]) >> 32; - uint64_t carry = 0, lx = 0, hx = 0; - for (uint32_t j = 0; j < xlen; ++j) { - lx = x[j] & 0xffffffffULL; - hx = (x[j]) >> 32; - // hasCarry - A flag to indicate if has carry. - // hasCarry == 0, no carry - // hasCarry == 1, has carry - // hasCarry == 2, no carry and the calculation result == 0. - uint8_t hasCarry = 0; - uint64_t resul = carry + lx * ly; - hasCarry = (resul < carry) ? 1 : 0; - carry = (hasCarry ? (1ULL << 32) : 0) + hx * ly + ((resul) >> 32); - hasCarry = (!carry && hasCarry) ? 1 : (!carry ? 2 : 0); - carry += (lx * hy) & 0xffffffffULL; - resul = ((carry) << 32) | (resul & 0xffffffffULL); - if (i + j < destlen) dest[i + j] += resul; - carry = - (((!carry && hasCarry != 2) || hasCarry == 1) ? (1ULL << 32) : 0) + - ((carry) >> 32) + (dest[i + j] < resul ? 1 : 0) + ((lx * hy) >> 32) + - hx * hy; - } - if (i + xlen < destlen) dest[i + xlen] = carry; - } -} - -/// Implementation of Knuth's Algorithm D (Division of nonnegative integers) -/// from "Art of Computer Programming, Volume 2", section 4.3.1, p. 272. The -/// variables here have the same names as in the algorithm. Comments explain -/// the algorithm and any deviation from it. -static INLINE void KnuthDiv(uint32_t* u, uint32_t* v, uint32_t* q, uint32_t* r, - uint32_t m, uint32_t n) { - assert(u && "Must provide dividend"); - assert(v && "Must provide divisor"); - assert(q && "Must provide quotient"); - assert(u != v && u != q && v != q && "Must us different memory"); - assert(n > 1 && "n must be > 1"); - - // Knuth uses the value b as the base of the number system. In our case b - // is 2^31 so we just set it to -1u. - uint64_t b = uint64_t(1) << 32; - - // DEBUG(cerr << "KnuthDiv: m=" << m << " n=" << n << '\n'); - // DEBUG(cerr << "KnuthDiv: original:"); - // DEBUG(for (int i = m+n; i >=0; i--) cerr << " " << std::setbase(16) << - // u[i]); - // DEBUG(cerr << " by"); - // DEBUG(for (int i = n; i >0; i--) cerr << " " << std::setbase(16) << - // v[i-1]); - // DEBUG(cerr << '\n'); - // D1. [Normalize.] Set d = b / (v[n-1] + 1) and multiply all the digits of - // u and v by d. Note that we have taken Knuth's advice here to use a power - // of 2 value for d such that d * v[n-1] >= b/2 (b is the base). A power of - // 2 allows us to shift instead of multiply and it is easy to determine the - // shift amount from the leading zeros. We are basically normalizing the u - // and v so that its high bits are shifted to the top of v's range without - // overflow. Note that this can require an extra word in u so that u must - // be of length m+n+1. - uint32_t shift = CountLeadingZeros_32(v[n - 1]); - uint32_t v_carry = 0; - uint32_t u_carry = 0; - if (shift) { - for (uint32_t i = 0; i < m + n; ++i) { - uint32_t u_tmp = (u[i]) >> (32 - shift); - u[i] = ((u[i]) << (shift)) | u_carry; - u_carry = u_tmp; - } - for (uint32_t i = 0; i < n; ++i) { - uint32_t v_tmp = (v[i]) >> (32 - shift); - v[i] = ((v[i]) << (shift)) | v_carry; - v_carry = v_tmp; - } - } - u[m + n] = u_carry; - // DEBUG(cerr << "KnuthDiv: normal:"); - // DEBUG(for (int i = m+n; i >=0; i--) cerr << " " << std::setbase(16) << - // u[i]); - // DEBUG(cerr << " by"); - // DEBUG(for (int i = n; i >0; i--) cerr << " " << std::setbase(16) << - // v[i-1]); - // DEBUG(cerr << '\n'); - - // D2. [Initialize j.] Set j to m. This is the loop counter over the places. - int j = m; - do { - // DEBUG(cerr << "KnuthDiv: quotient digit #" << j << '\n'); - // D3. [Calculate q'.]. - // Set qp = (u[j+n]*b + u[j+n-1]) / v[n-1]. (qp=qprime=q') - // Set rp = (u[j+n]*b + u[j+n-1]) % v[n-1]. (rp=rprime=r') - // Now test if qp == b or qp*v[n-2] > b*rp + u[j+n-2]; if so, decrease - // qp by 1, inrease rp by v[n-1], and repeat this test if rp < b. The test - // on v[n-2] determines at high speed most of the cases in which the trial - // value qp is one too large, and it eliminates all cases where qp is two - // too large. - uint64_t dividend = ((uint64_t(u[j + n]) << 32) + u[j + n - 1]); - // DEBUG(cerr << "KnuthDiv: dividend == " << dividend << '\n'); - uint64_t qp = dividend / v[n - 1]; - uint64_t rp = dividend % v[n - 1]; - if (qp == b || qp * v[n - 2] > b * rp + u[j + n - 2]) { - qp--; - rp += v[n - 1]; - if (rp < b && (qp == b || qp * v[n - 2] > b * rp + u[j + n - 2])) qp--; - } - // DEBUG(cerr << "KnuthDiv: qp == " << qp << ", rp == " << rp << '\n'); - - // D4. [Multiply and subtract.] Replace (u[j+n]u[j+n-1]...u[j]) with - // (u[j+n]u[j+n-1]..u[j]) - qp * (v[n-1]...v[1]v[0]). This computation - // consists of a simple multiplication by a one-place number, combined with - // a subtraction. - bool isNeg = false; - for (uint32_t i = 0; i < n; ++i) { - uint64_t u_tmp = uint64_t(u[j + i]) | ((uint64_t(u[j + i + 1])) << 32); - uint64_t subtrahend = uint64_t(qp) * uint64_t(v[i]); - bool borrow = subtrahend > u_tmp; - /*DEBUG(cerr << "KnuthDiv: u_tmp == " << u_tmp - << ", subtrahend == " << subtrahend - << ", borrow = " << borrow << '\n');*/ - - uint64_t result = u_tmp - subtrahend; - uint32_t k = j + i; - u[k++] = (uint32_t)(result & (b - 1)); // subtract low word - u[k++] = (uint32_t)((result) >> 32); // subtract high word - while (borrow && k <= m + n) { // deal with borrow to the left - borrow = u[k] == 0; - u[k]--; - k++; - } - isNeg |= borrow; - /*DEBUG(cerr << "KnuthDiv: u[j+i] == " << u[j+i] << ", u[j+i+1] == " << - u[j+i+1] << '\n');*/ - } - /*DEBUG(cerr << "KnuthDiv: after subtraction:"); - DEBUG(for (int i = m+n; i >=0; i--) cerr << " " << u[i]); - DEBUG(cerr << '\n');*/ - // The digits (u[j+n]...u[j]) should be kept positive; if the result of - // this step is actually negative, (u[j+n]...u[j]) should be left as the - // true value plus b**(n+1), namely as the b's complement of - // the true value, and a "borrow" to the left should be remembered. - // - if (isNeg) { - bool carry = true; // true because b's complement is "complement + 1" - for (uint32_t i = 0; i <= m + n; ++i) { - u[i] = ~u[i] + carry; // b's complement - carry = carry && u[i] == 0; - } - } - /*DEBUG(cerr << "KnuthDiv: after complement:"); - DEBUG(for (int i = m+n; i >=0; i--) cerr << " " << u[i]); - DEBUG(cerr << '\n');*/ - - // D5. [Test remainder.] Set q[j] = qp. If the result of step D4 was - // negative, go to step D6; otherwise go on to step D7. - q[j] = (uint32_t)qp; - if (isNeg) { - // D6. [Add back]. The probability that this step is necessary is very - // small, on the order of only 2/b. Make sure that test data accounts for - // this possibility. Decrease q[j] by 1 - q[j]--; - // and add (0v[n-1]...v[1]v[0]) to (u[j+n]u[j+n-1]...u[j+1]u[j]). - // A carry will occur to the left of u[j+n], and it should be ignored - // since it cancels with the borrow that occurred in D4. - bool carry = false; - for (uint32_t i = 0; i < n; i++) { - uint32_t limit = AESL_std::min(u[j + i], v[i]); - u[j + i] += v[i] + carry; - carry = u[j + i] < limit || (carry && u[j + i] == limit); - } - u[j + n] += carry; - } - /*DEBUG(cerr << "KnuthDiv: after correction:"); - DEBUG(for (int i = m+n; i >=0; i--) cerr <<" " << u[i]); - DEBUG(cerr << "\nKnuthDiv: digit result = " << q[j] << '\n');*/ - - // D7. [Loop on j.] Decrease j by one. Now if j >= 0, go back to D3. - } while (--j >= 0); - - /*DEBUG(cerr << "KnuthDiv: quotient:"); - DEBUG(for (int i = m; i >=0; i--) cerr <<" " << q[i]); - DEBUG(cerr << '\n');*/ - - // D8. [Unnormalize]. Now q[...] is the desired quotient, and the desired - // remainder may be obtained by dividing u[...] by d. If r is non-null we - // compute the remainder (urem uses this). - if (r) { - // The value d is expressed by the "shift" value above since we avoided - // multiplication by d by using a shift left. So, all we have to do is - // shift right here. In order to mak - if (shift) { - uint32_t carry = 0; - // DEBUG(cerr << "KnuthDiv: remainder:"); - for (int i = n - 1; i >= 0; i--) { - r[i] = ((u[i]) >> (shift)) | carry; - carry = (u[i]) << (32 - shift); - // DEBUG(cerr << " " << r[i]); - } - } else { - for (int i = n - 1; i >= 0; i--) { - r[i] = u[i]; - // DEBUG(cerr << " " << r[i]); - } - } - // DEBUG(cerr << '\n'); - } - // DEBUG(cerr << std::setbase(10) << '\n'); -} - -template -void divide(const ap_private<_AP_W, _AP_S>& LHS, uint32_t lhsWords, - const ap_private<_AP_W, _AP_S>& RHS, uint32_t rhsWords, - ap_private<_AP_W, _AP_S>* Quotient, - ap_private<_AP_W, _AP_S>* Remainder) { - assert(lhsWords >= rhsWords && "Fractional result"); - enum { APINT_BITS_PER_WORD = 64 }; - // First, compose the values into an array of 32-bit words instead of - // 64-bit words. This is a necessity of both the "short division" algorithm - // and the the Knuth "classical algorithm" which requires there to be native - // operations for +, -, and * on an m bit value with an m*2 bit result. We - // can't use 64-bit operands here because we don't have native results of - // 128-bits. Furthremore, casting the 64-bit values to 32-bit values won't - // work on large-endian machines. - uint64_t mask = ~0ull >> (sizeof(uint32_t) * 8); - uint32_t n = rhsWords * 2; - uint32_t m = (lhsWords * 2) - n; - - // Allocate space for the temporary values we need either on the stack, if - // it will fit, or on the heap if it won't. - uint32_t SPACE[128]; - uint32_t* __U = 0; - uint32_t* __V = 0; - uint32_t* __Q = 0; - uint32_t* __R = 0; - if ((Remainder ? 4 : 3) * n + 2 * m + 1 <= 128) { - __U = &SPACE[0]; - __V = &SPACE[m + n + 1]; - __Q = &SPACE[(m + n + 1) + n]; - if (Remainder) __R = &SPACE[(m + n + 1) + n + (m + n)]; - } else { - __U = new uint32_t[m + n + 1]; - __V = new uint32_t[n]; - __Q = new uint32_t[m + n]; - if (Remainder) __R = new uint32_t[n]; - } - - // Initialize the dividend - memset(__U, 0, (m + n + 1) * sizeof(uint32_t)); - for (unsigned i = 0; i < lhsWords; ++i) { - uint64_t tmp = LHS.get_pVal(i); - __U[i * 2] = (uint32_t)(tmp & mask); - __U[i * 2 + 1] = (tmp) >> (sizeof(uint32_t) * 8); - } - __U[m + n] = 0; // this extra word is for "spill" in the Knuth algorithm. - - // Initialize the divisor - memset(__V, 0, (n) * sizeof(uint32_t)); - for (unsigned i = 0; i < rhsWords; ++i) { - uint64_t tmp = RHS.get_pVal(i); - __V[i * 2] = (uint32_t)(tmp & mask); - __V[i * 2 + 1] = (tmp) >> (sizeof(uint32_t) * 8); - } - - // initialize the quotient and remainder - memset(__Q, 0, (m + n) * sizeof(uint32_t)); - if (Remainder) memset(__R, 0, n * sizeof(uint32_t)); - - // Now, adjust m and n for the Knuth division. n is the number of words in - // the divisor. m is the number of words by which the dividend exceeds the - // divisor (i.e. m+n is the length of the dividend). These sizes must not - // contain any zero words or the Knuth algorithm fails. - for (unsigned i = n; i > 0 && __V[i - 1] == 0; i--) { - n--; - m++; - } - for (unsigned i = m + n; i > 0 && __U[i - 1] == 0; i--) m--; - - // If we're left with only a single word for the divisor, Knuth doesn't work - // so we implement the short division algorithm here. This is much simpler - // and faster because we are certain that we can divide a 64-bit quantity - // by a 32-bit quantity at hardware speed and short division is simply a - // series of such operations. This is just like doing short division but we - // are using base 2^32 instead of base 10. - assert(n != 0 && "Divide by zero?"); - if (n == 1) { - uint32_t divisor = __V[0]; - uint32_t remainder = 0; - for (int i = m + n - 1; i >= 0; i--) { - uint64_t partial_dividend = (uint64_t(remainder)) << 32 | __U[i]; - if (partial_dividend == 0) { - __Q[i] = 0; - remainder = 0; - } else if (partial_dividend < divisor) { - __Q[i] = 0; - remainder = (uint32_t)partial_dividend; - } else if (partial_dividend == divisor) { - __Q[i] = 1; - remainder = 0; - } else { - __Q[i] = (uint32_t)(partial_dividend / divisor); - remainder = (uint32_t)(partial_dividend - (__Q[i] * divisor)); - } - } - if (__R) __R[0] = remainder; - } else { - // Now we're ready to invoke the Knuth classical divide algorithm. In this - // case n > 1. - KnuthDiv(__U, __V, __Q, __R, m, n); - } - - // If the caller wants the quotient - if (Quotient) { - // Set up the Quotient value's memory. - if (Quotient->BitWidth != LHS.BitWidth) { - if (Quotient->isSingleWord()) Quotient->set_VAL(0); - } else - Quotient->clear(); - - // The quotient is in Q. Reconstitute the quotient into Quotient's low - // order words. - if (lhsWords == 1) { - uint64_t tmp = - uint64_t(__Q[0]) | ((uint64_t(__Q[1])) << (APINT_BITS_PER_WORD / 2)); - Quotient->set_VAL(tmp); - } else { - assert(!Quotient->isSingleWord() && - "Quotient ap_private not large enough"); - for (unsigned i = 0; i < lhsWords; ++i) - Quotient->set_pVal( - i, uint64_t(__Q[i * 2]) | - ((uint64_t(__Q[i * 2 + 1])) << (APINT_BITS_PER_WORD / 2))); - } - Quotient->clearUnusedBits(); - } - - // If the caller wants the remainder - if (Remainder) { - // Set up the Remainder value's memory. - if (Remainder->BitWidth != RHS.BitWidth) { - if (Remainder->isSingleWord()) Remainder->set_VAL(0); - } else - Remainder->clear(); - - // The remainder is in R. Reconstitute the remainder into Remainder's low - // order words. - if (rhsWords == 1) { - uint64_t tmp = - uint64_t(__R[0]) | ((uint64_t(__R[1])) << (APINT_BITS_PER_WORD / 2)); - Remainder->set_VAL(tmp); - } else { - assert(!Remainder->isSingleWord() && - "Remainder ap_private not large enough"); - for (unsigned i = 0; i < rhsWords; ++i) - Remainder->set_pVal( - i, uint64_t(__R[i * 2]) | - ((uint64_t(__R[i * 2 + 1])) << (APINT_BITS_PER_WORD / 2))); - } - Remainder->clearUnusedBits(); - } - - // Clean up the memory we allocated. - if (__U != &SPACE[0]) { - delete[] __U; - delete[] __V; - delete[] __Q; - delete[] __R; - } -} - -template -void divide(const ap_private<_AP_W, _AP_S>& LHS, uint32_t lhsWords, - uint64_t RHS, ap_private<_AP_W, _AP_S>* Quotient, - ap_private<_AP_W, _AP_S>* Remainder) { - uint32_t rhsWords = 1; - assert(lhsWords >= rhsWords && "Fractional result"); - enum { APINT_BITS_PER_WORD = 64 }; - // First, compose the values into an array of 32-bit words instead of - // 64-bit words. This is a necessity of both the "short division" algorithm - // and the the Knuth "classical algorithm" which requires there to be native - // operations for +, -, and * on an m bit value with an m*2 bit result. We - // can't use 64-bit operands here because we don't have native results of - // 128-bits. Furthremore, casting the 64-bit values to 32-bit values won't - // work on large-endian machines. - uint64_t mask = ~0ull >> (sizeof(uint32_t) * 8); - uint32_t n = 2; - uint32_t m = (lhsWords * 2) - n; - - // Allocate space for the temporary values we need either on the stack, if - // it will fit, or on the heap if it won't. - uint32_t SPACE[128]; - uint32_t* __U = 0; - uint32_t* __V = 0; - uint32_t* __Q = 0; - uint32_t* __R = 0; - if ((Remainder ? 4 : 3) * n + 2 * m + 1 <= 128) { - __U = &SPACE[0]; - __V = &SPACE[m + n + 1]; - __Q = &SPACE[(m + n + 1) + n]; - if (Remainder) __R = &SPACE[(m + n + 1) + n + (m + n)]; - } else { - __U = new uint32_t[m + n + 1]; - __V = new uint32_t[n]; - __Q = new uint32_t[m + n]; - if (Remainder) __R = new uint32_t[n]; - } - - // Initialize the dividend - memset(__U, 0, (m + n + 1) * sizeof(uint32_t)); - for (unsigned i = 0; i < lhsWords; ++i) { - uint64_t tmp = LHS.get_pVal(i); - __U[i * 2] = tmp & mask; - __U[i * 2 + 1] = (tmp) >> (sizeof(uint32_t) * 8); - } - __U[m + n] = 0; // this extra word is for "spill" in the Knuth algorithm. - - // Initialize the divisor - memset(__V, 0, (n) * sizeof(uint32_t)); - __V[0] = RHS & mask; - __V[1] = (RHS) >> (sizeof(uint32_t) * 8); - - // initialize the quotient and remainder - memset(__Q, 0, (m + n) * sizeof(uint32_t)); - if (Remainder) memset(__R, 0, n * sizeof(uint32_t)); - - // Now, adjust m and n for the Knuth division. n is the number of words in - // the divisor. m is the number of words by which the dividend exceeds the - // divisor (i.e. m+n is the length of the dividend). These sizes must not - // contain any zero words or the Knuth algorithm fails. - for (unsigned i = n; i > 0 && __V[i - 1] == 0; i--) { - n--; - m++; - } - for (unsigned i = m + n; i > 0 && __U[i - 1] == 0; i--) m--; - - // If we're left with only a single word for the divisor, Knuth doesn't work - // so we implement the short division algorithm here. This is much simpler - // and faster because we are certain that we can divide a 64-bit quantity - // by a 32-bit quantity at hardware speed and short division is simply a - // series of such operations. This is just like doing short division but we - // are using base 2^32 instead of base 10. - assert(n != 0 && "Divide by zero?"); - if (n == 1) { - uint32_t divisor = __V[0]; - uint32_t remainder = 0; - for (int i = m + n - 1; i >= 0; i--) { - uint64_t partial_dividend = (uint64_t(remainder)) << 32 | __U[i]; - if (partial_dividend == 0) { - __Q[i] = 0; - remainder = 0; - } else if (partial_dividend < divisor) { - __Q[i] = 0; - remainder = partial_dividend; - } else if (partial_dividend == divisor) { - __Q[i] = 1; - remainder = 0; - } else { - __Q[i] = partial_dividend / divisor; - remainder = partial_dividend - (__Q[i] * divisor); - } - } - if (__R) __R[0] = remainder; - } else { - // Now we're ready to invoke the Knuth classical divide algorithm. In this - // case n > 1. - KnuthDiv(__U, __V, __Q, __R, m, n); - } - - // If the caller wants the quotient - if (Quotient) { - // Set up the Quotient value's memory. - if (Quotient->BitWidth != LHS.BitWidth) { - if (Quotient->isSingleWord()) Quotient->set_VAL(0); - } else - Quotient->clear(); - - // The quotient is in Q. Reconstitute the quotient into Quotient's low - // order words. - if (lhsWords == 1) { - uint64_t tmp = - uint64_t(__Q[0]) | ((uint64_t(__Q[1])) << (APINT_BITS_PER_WORD / 2)); - Quotient->set_VAL(tmp); - } else { - assert(!Quotient->isSingleWord() && - "Quotient ap_private not large enough"); - for (unsigned i = 0; i < lhsWords; ++i) - Quotient->set_pVal( - i, uint64_t(__Q[i * 2]) | - ((uint64_t(__Q[i * 2 + 1])) << (APINT_BITS_PER_WORD / 2))); - } - Quotient->clearUnusedBits(); - } - - // If the caller wants the remainder - if (Remainder) { - // Set up the Remainder value's memory. - if (Remainder->BitWidth != 64 /* RHS.BitWidth */) { - if (Remainder->isSingleWord()) Remainder->set_VAL(0); - } else - Remainder->clear(); - - // The remainder is in __R. Reconstitute the remainder into Remainder's low - // order words. - if (rhsWords == 1) { - uint64_t tmp = - uint64_t(__R[0]) | ((uint64_t(__R[1])) << (APINT_BITS_PER_WORD / 2)); - Remainder->set_VAL(tmp); - } else { - assert(!Remainder->isSingleWord() && - "Remainder ap_private not large enough"); - for (unsigned i = 0; i < rhsWords; ++i) - Remainder->set_pVal( - i, uint64_t(__R[i * 2]) | - ((uint64_t(__R[i * 2 + 1])) << (APINT_BITS_PER_WORD / 2))); - } - Remainder->clearUnusedBits(); - } - - // Clean up the memory we allocated. - if (__U != &SPACE[0]) { - delete[] __U; - delete[] __V; - delete[] __Q; - delete[] __R; - } -} - -/// @brief Logical right-shift function. -template -INLINE ap_private<_AP_W, _AP_S, _AP_C> lshr( - const ap_private<_AP_W, _AP_S, _AP_C>& LHS, uint32_t shiftAmt) { - return LHS.lshr(shiftAmt); -} - -/// Left-shift the ap_private by shiftAmt. -/// @brief Left-shift function. -template -INLINE ap_private<_AP_W, _AP_S, _AP_C> shl( - const ap_private<_AP_W, _AP_S, _AP_C>& LHS, uint32_t shiftAmt) { - return LHS.shl(shiftAmt); -} - -} // namespace ap_private_ops - -#endif // LLVM_SUPPORT_MATHEXTRAS_H - -/// This enumeration just provides for internal constants used in this -/// translation unit. -enum { - MIN_INT_BITS = 1, ///< Minimum number of bits that can be specified - ///< Note that this must remain synchronized with IntegerType::MIN_INT_BITS - MAX_INT_BITS = (1 << 23) - 1 ///< Maximum number of bits that can be specified - ///< Note that this must remain synchronized with IntegerType::MAX_INT_BITS -}; - -//===----------------------------------------------------------------------===// -// ap_private Class -//===----------------------------------------------------------------------===// - -/// ap_private - This class represents arbitrary precision constant integral -/// values. -/// It is a functional replacement for common case unsigned integer type like -/// "unsigned", "unsigned long" or "uint64_t", but also allows non-byte-width -/// integer sizes and large integer value types such as 3-bits, 15-bits, or more -/// than 64-bits of precision. ap_private provides a variety of arithmetic -/// operators -/// and methods to manipulate integer values of any bit-width. It supports both -/// the typical integer arithmetic and comparison operations as well as bitwise -/// manipulation. -/// -/// The class has several invariants worth noting: -/// * All bit, byte, and word positions are zero-based. -/// * Once the bit width is set, it doesn't change except by the Truncate, -/// SignExtend, or ZeroExtend operations. -/// * All binary operators must be on ap_private instances of the same bit -/// width. -/// Attempting to use these operators on instances with different bit -/// widths will yield an assertion. -/// * The value is stored canonically as an unsigned value. For operations -/// where it makes a difference, there are both signed and unsigned variants -/// of the operation. For example, sdiv and udiv. However, because the bit -/// widths must be the same, operations such as Mul and Add produce the same -/// results regardless of whether the values are interpreted as signed or -/// not. -/// * In general, the class tries to follow the style of computation that LLVM -/// uses in its IR. This simplifies its use for LLVM. -/// -/// @brief Class for arbitrary precision integers. - -#if defined(_MSC_VER) -#if _MSC_VER < 1400 && !defined(for) -#define for if (0); else for -#endif -typedef unsigned __int64 ap_ulong; -typedef signed __int64 ap_slong; -#else -typedef unsigned long long ap_ulong; -typedef signed long long ap_slong; -#endif -template -struct valtype; - -template -struct valtype<_AP_N8, false> { - typedef uint64_t Type; -}; - -template -struct valtype<_AP_N8, true> { - typedef int64_t Type; -}; - -template <> -struct valtype<1, false> { - typedef unsigned char Type; -}; -template <> -struct valtype<2, false> { - typedef unsigned short Type; -}; -template <> -struct valtype<3, false> { - typedef unsigned int Type; -}; -template <> -struct valtype<4, false> { - typedef unsigned int Type; -}; -template <> -struct valtype<1, true> { - typedef signed char Type; -}; -template <> -struct valtype<2, true> { - typedef short Type; -}; -template <> -struct valtype<3, true> { - typedef int Type; -}; -template <> -struct valtype<4, true> { - typedef int Type; -}; - -template -struct ap_private_enable_if {}; -template <> -struct ap_private_enable_if { - static const bool isValid = true; -}; - -// When bitwidth < 64 -template -class ap_private<_AP_W, _AP_S, true> { - // SFINAE pattern. Only consider this class when _AP_W <= 64 - const static bool valid = ap_private_enable_if<_AP_W <= 64>::isValid; - -#ifdef _MSC_VER -#pragma warning(disable : 4521 4522) -#endif - public: - typedef typename valtype<(_AP_W + 7) / 8, _AP_S>::Type ValType; - typedef ap_private<_AP_W, _AP_S> Type; - template - struct RType { - enum { - mult_w = _AP_W + _AP_W2, - mult_s = _AP_S || _AP_S2, - plus_w = - AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, - plus_s = _AP_S || _AP_S2, - minus_w = - AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, - minus_s = true, - div_w = _AP_W + _AP_S2, - div_s = _AP_S || _AP_S2, - mod_w = AP_MIN(_AP_W, _AP_W2 + (!_AP_S2 && _AP_S)), - mod_s = _AP_S, - logic_w = AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)), - logic_s = _AP_S || _AP_S2 - }; - typedef ap_private mult; - typedef ap_private plus; - typedef ap_private minus; - typedef ap_private logic; - typedef ap_private div; - typedef ap_private mod; - typedef ap_private<_AP_W, _AP_S> arg1; - typedef bool reduce; - }; - enum { APINT_BITS_PER_WORD = sizeof(uint64_t) * 8 }; - enum { - excess_bits = (_AP_W % APINT_BITS_PER_WORD) - ? APINT_BITS_PER_WORD - (_AP_W % APINT_BITS_PER_WORD) - : 0 - }; - static const uint64_t mask = ((uint64_t)~0ULL >> (excess_bits)); - static const uint64_t not_mask = ~mask; - static const uint64_t sign_bit_mask = 1ULL << (APINT_BITS_PER_WORD - 1); - template - struct sign_ext_mask { - static const uint64_t mask = ~0ULL << _AP_W1; - }; - static const int width = _AP_W; - - enum { - BitWidth = _AP_W, - _AP_N = 1, - }; - ValType VAL; ///< Used to store the <= 64 bits integer value. -#ifdef AP_CANARY - ValType CANARY; - void check_canary() { assert(CANARY == (ValType)0xDEADBEEFDEADBEEF); } - void set_canary() { CANARY = (ValType)0xDEADBEEFDEADBEEF; } -#else - void check_canary() {} - void set_canary() {} -#endif - - INLINE ValType& get_VAL(void) { return VAL; } - INLINE ValType get_VAL(void) const { return VAL; } - INLINE ValType get_VAL(void) const volatile { return VAL; } - INLINE void set_VAL(uint64_t value) { VAL = (ValType)value; } - INLINE ValType& get_pVal(int i) { return VAL; } - INLINE ValType get_pVal(int i) const { return VAL; } - INLINE const uint64_t* get_pVal() const { - assert(0 && "invalid usage"); - return 0; - } - INLINE ValType get_pVal(int i) const volatile { return VAL; } - INLINE uint64_t* get_pVal() const volatile { - assert(0 && "invalid usage"); - return 0; - } - INLINE void set_pVal(int i, uint64_t value) { VAL = (ValType)value; } - - INLINE uint32_t getBitWidth() const { return BitWidth; } - - template - ap_private<_AP_W, _AP_S>& operator=(const ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(RHS.get_VAL()); - clearUnusedBits(); - return *this; - } - - template - ap_private<_AP_W, _AP_S>& operator=( - const volatile ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(RHS.get_VAL()); // TODO check here about ap_private - clearUnusedBits(); - return *this; - } - - void operator=(const ap_private& RHS) volatile { - // Don't do anything for X = X - VAL = RHS.get_VAL(); // No need to check because no harm done by copying. - clearUnusedBits(); - } - - ap_private& operator=(const ap_private& RHS) { - // Don't do anything for X = X - VAL = RHS.get_VAL(); // No need to check because no harm done by copying. - clearUnusedBits(); - return *this; - } - - void operator=(const volatile ap_private& RHS) volatile { - // Don't do anything for X = X - VAL = RHS.get_VAL(); // No need to check because no harm done by copying. - clearUnusedBits(); - } - - ap_private& operator=(const volatile ap_private& RHS) { - // Don't do anything for X = X - VAL = RHS.get_VAL(); // No need to check because no harm done by copying. - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - *this = ap_private<_AP_W2, false>(op2); - return *this; - } - -#define ASSIGN_OP_FROM_INT(C_TYPE) \ - INLINE ap_private& operator=(const C_TYPE v) { \ - set_canary(); \ - this->VAL = (ValType)v; \ - clearUnusedBits(); \ - check_canary(); \ - return *this; \ - } - -ASSIGN_OP_FROM_INT(bool) -ASSIGN_OP_FROM_INT(char) -ASSIGN_OP_FROM_INT(signed char) -ASSIGN_OP_FROM_INT(unsigned char) -ASSIGN_OP_FROM_INT(short) -ASSIGN_OP_FROM_INT(unsigned short) -ASSIGN_OP_FROM_INT(int) -ASSIGN_OP_FROM_INT(unsigned int) -ASSIGN_OP_FROM_INT(long) -ASSIGN_OP_FROM_INT(unsigned long) -ASSIGN_OP_FROM_INT(ap_slong) -ASSIGN_OP_FROM_INT(ap_ulong) -#if 0 -ASSIGN_OP_FROM_INT(half) -ASSIGN_OP_FROM_INT(float) -ASSIGN_OP_FROM_INT(double) -#endif -#undef ASSIGN_OP_FROM_INT - - // XXX This is a must to prevent pointer being converted to bool. - INLINE ap_private& operator=(const char* s) { - ap_private tmp(s); // XXX direct-initialization, as ctor is explicit. - operator=(tmp); - return *this; - } - - private: - explicit INLINE ap_private(uint64_t* val) : VAL(val[0]) { - set_canary(); - clearUnusedBits(); - check_canary(); - } - - INLINE bool isSingleWord() const { return true; } - - public: - INLINE void fromString(const char* strStart, uint32_t slen, uint8_t radix) { - bool isNeg = strStart[0] == '-'; - if (isNeg) { - strStart++; - slen--; - } - - if (strStart[0] == '0' && (strStart[1] == 'b' || strStart[1] == 'B')) { - //if(radix == 0) radix = 2; - _AP_WARNING(radix != 2, "%s seems to have base %d, but %d given.", strStart, 2, radix); - strStart += 2; - slen -=2; - } else if (strStart[0] == '0' && (strStart[1] == 'o' || strStart[1] == 'O')) { - //if (radix == 0) radix = 8; - _AP_WARNING(radix != 8, "%s seems to have base %d, but %d given.", strStart, 8, radix); - strStart += 2; - slen -=2; - } else if (strStart[0] == '0' && (strStart[1] == 'x' || strStart[1] == 'X')) { - //if (radix == 0) radix = 16; - _AP_WARNING(radix != 16, "%s seems to have base %d, but %d given.", strStart, 16, radix); - strStart += 2; - slen -=2; - } else if (strStart[0] == '0' && (strStart[1] == 'd' || strStart[1] == 'D')) { - //if (radix == 0) radix = 10; - _AP_WARNING(radix != 10, "%s seems to have base %d, but %d given.", strStart, 10, radix); - strStart += 2; - slen -=2; - } else if (radix == 0) { - //radix = 2; // XXX default value - } - - // Check our assumptions here - assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) && - "Radix should be 2, 8, 10, or 16!"); - assert(strStart && "String is null?"); - - // Clear bits. - uint64_t tmpVAL = VAL = 0; - - switch (radix) { - case 2: - // sscanf(strStart,"%b",&VAL); - // tmpVAL = *strStart =='1' ? ~0ULL : 0; - for (; *strStart; ++strStart) { - assert((*strStart == '0' || *strStart == '1') && - ("Wrong binary number")); - tmpVAL <<= 1; - tmpVAL |= (*strStart - '0'); - } - break; - case 8: -#ifdef _MSC_VER - sscanf_s(strStart, "%llo", &tmpVAL, slen + 1); -#else -#if defined(__x86_64__) && !defined(__MINGW32__) && !defined(__WIN32__) - sscanf(strStart, "%lo", &tmpVAL); -#else - sscanf(strStart, "%llo", &tmpVAL); -#endif //__x86_64__ -#endif //_MSC_VER - break; - case 10: -#ifdef _MSC_VER - sscanf_s(strStart, "%llu", &tmpVAL, slen + 1); -#else -#if defined(__x86_64__) && !defined(__MINGW32__) && !defined(__WIN32__) - sscanf(strStart, "%lu", &tmpVAL); -#else - sscanf(strStart, "%llu", &tmpVAL); -#endif //__x86_64__ -#endif //_MSC_VER - break; - case 16: -#ifdef _MSC_VER - sscanf_s(strStart, "%llx", &tmpVAL, slen + 1); -#else -#if defined(__x86_64__) && !defined(__MINGW32__) && !defined(__WIN32__) - sscanf(strStart, "%lx", &tmpVAL); -#else - sscanf(strStart, "%llx", &tmpVAL); -#endif //__x86_64__ -#endif //_MSC_VER - break; - default: - assert(true && "Unknown radix"); - // error - } - VAL = isNeg ? (ValType)(-tmpVAL) : (ValType)(tmpVAL); - - clearUnusedBits(); - } - - private: - INLINE ap_private(const std::string& val, uint8_t radix = 2) : VAL(0) { - assert(!val.empty() && "String empty?"); - set_canary(); - fromString(val.c_str(), val.size(), radix); - check_canary(); - } - - INLINE ap_private(const char strStart[], uint32_t slen, uint8_t radix) - : VAL(0) { - set_canary(); - fromString(strStart, slen, radix); - check_canary(); - } - - INLINE ap_private(uint32_t numWords, const uint64_t bigVal[]) - : VAL(bigVal[0]) { - set_canary(); - clearUnusedBits(); - check_canary(); - } - - public: - INLINE ap_private() { - set_canary(); - clearUnusedBits(); - check_canary(); - } - -#define CTOR(TYPE) \ - INLINE ap_private(TYPE v) : VAL((ValType)v) { \ - set_canary(); \ - clearUnusedBits(); \ - check_canary(); \ - } - CTOR(bool) - CTOR(char) - CTOR(signed char) - CTOR(unsigned char) - CTOR(short) - CTOR(unsigned short) - CTOR(int) - CTOR(unsigned int) - CTOR(long) - CTOR(unsigned long) - CTOR(ap_slong) - CTOR(ap_ulong) -#if 0 - CTOR(half) - CTOR(float) - CTOR(double) -#endif -#undef CTOR - - template - INLINE ap_private(const ap_private<_AP_W1, _AP_S1, _AP_OPT>& that) - : VAL((ValType)that.get_VAL()) { - set_canary(); - clearUnusedBits(); - check_canary(); - } - - template - INLINE ap_private(const volatile ap_private<_AP_W1, _AP_S1, _AP_OPT>& that) - : VAL((ValType)that.get_VAL()) { - set_canary(); - clearUnusedBits(); - check_canary(); - } - - explicit INLINE ap_private(const char* val) { - set_canary(); - unsigned char radix = 10; - std::string str = ap_private_ops::parseString(val, radix); // will set radix. - std::string::size_type pos = str.find('.'); - // trunc all fraction part - if (pos != std::string::npos) str = str.substr(pos); - - ap_private<_AP_W, _AP_S> ap_private_val(str, radix); - operator=(ap_private_val); - check_canary(); - } - - INLINE ap_private(const char* val, signed char rd) { - set_canary(); - unsigned char radix = rd; - std::string str = ap_private_ops::parseString(val, radix); // will set radix. - std::string::size_type pos = str.find('.'); - // trunc all fraction part - if (pos != std::string::npos) str = str.substr(pos); - - ap_private<_AP_W, _AP_S> ap_private_val(str, radix); - operator=(ap_private_val); - check_canary(); - } - - INLINE ~ap_private() { check_canary(); } - - INLINE bool isNegative() const { - static const uint64_t sign_mask = 1ULL << (_AP_W - 1); - return _AP_S && (sign_mask & VAL); - } - - INLINE bool isPositive() const { return !isNegative(); } - - INLINE bool isStrictlyPositive() const { return !isNegative() && VAL != 0; } - - INLINE bool isAllOnesValue() const { return (mask & VAL) == mask; } - - INLINE bool operator==(const ap_private<_AP_W, _AP_S>& RHS) const { - return VAL == RHS.get_VAL(); - } - INLINE bool operator==(const ap_private<_AP_W, !_AP_S>& RHS) const { - return (uint64_t)VAL == (uint64_t)RHS.get_VAL(); - } - - INLINE bool operator==(uint64_t Val) const { return ((uint64_t)VAL == Val); } - INLINE bool operator!=(uint64_t Val) const { return ((uint64_t)VAL != Val); } - INLINE bool operator!=(const ap_private<_AP_W, _AP_S>& RHS) const { - return VAL != RHS.get_VAL(); - } - INLINE bool operator!=(const ap_private<_AP_W, !_AP_S>& RHS) const { - return (uint64_t)VAL != (uint64_t)RHS.get_VAL(); - } - - /// postfix increment. - const ap_private operator++(int) { - ap_private orig(*this); - VAL++; - clearUnusedBits(); - return orig; - } - - /// prefix increment. - const ap_private operator++() { - ++VAL; - clearUnusedBits(); - return *this; - } - - /// postfix decrement. - const ap_private operator--(int) { - ap_private orig(*this); - --VAL; - clearUnusedBits(); - return orig; - } - - /// prefix decrement. - const ap_private operator--() { - --VAL; - clearUnusedBits(); - return *this; - } - - /// one's complement. - INLINE ap_private<_AP_W + !_AP_S, true> operator~() const { - ap_private<_AP_W + !_AP_S, true> Result(*this); - Result.flip(); - return Result; - } - - /// two's complement. - INLINE typename RType<1, false>::minus operator-() const { - return ap_private<1, false>(0) - (*this); - } - - /// logic negation. - INLINE bool operator!() const { return !VAL; } - - INLINE std::string toString(uint8_t radix, bool wantSigned) const; - INLINE std::string toStringUnsigned(uint8_t radix = 10) const { - return toString(radix, false); - } - INLINE std::string toStringSigned(uint8_t radix = 10) const { - return toString(radix, true); - } - INLINE void clear() { VAL = 0; } - INLINE ap_private& clear(uint32_t bitPosition) { - VAL &= ~(1ULL << (bitPosition)); - clearUnusedBits(); - return *this; - } - - INLINE ap_private ashr(uint32_t shiftAmt) const { - if (_AP_S) - return ap_private((shiftAmt == BitWidth) ? 0 - : ((int64_t)VAL) >> (shiftAmt)); - else - return ap_private((shiftAmt == BitWidth) ? 0 - : ((uint64_t)VAL) >> (shiftAmt)); - } - - INLINE ap_private lshr(uint32_t shiftAmt) const { - return ap_private((shiftAmt == BitWidth) - ? ap_private(0) - : ap_private((VAL & mask) >> (shiftAmt))); - } - - INLINE ap_private shl(uint32_t shiftAmt) const -// just for clang compiler -#if defined(__clang__) && !defined(__CLANG_3_1__) - __attribute__((no_sanitize("undefined"))) -#endif - { - if (shiftAmt > BitWidth) { - if (!isNegative()) - return ap_private(0); - else - return ap_private(-1); - } - if (shiftAmt == BitWidth) - return ap_private(0); - else - return ap_private((VAL) << (shiftAmt)); - // return ap_private((shiftAmt == BitWidth) ? ap_private(0ULL) : - // ap_private(VAL << shiftAmt)); - } - - INLINE int64_t getSExtValue() const { return VAL; } - - // XXX XXX this function is used in CBE - INLINE uint64_t getZExtValue() const { return VAL & mask; } - - template - INLINE ap_private(const _private_range_ref<_AP_W2, _AP_S2>& ref) { - set_canary(); - *this = ref.get(); - check_canary(); - } - - template - INLINE ap_private(const _private_bit_ref<_AP_W2, _AP_S2>& ref) { - set_canary(); - *this = ((uint64_t)(bool)ref); - check_canary(); - } - -// template -// INLINE ap_private(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) { -// set_canary(); -// *this = ref.get(); -// check_canary(); -// } -// -// template -// INLINE ap_private( -// const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { -// set_canary(); -// *this = ((val.operator ap_private<_AP_W2, false>())); -// check_canary(); -// } -// -// template -// INLINE ap_private( -// const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { -// set_canary(); -// *this = (uint64_t)(bool)val; -// check_canary(); -// } - - INLINE void write(const ap_private<_AP_W, _AP_S>& op2) volatile { - *this = (op2); - } - - // Explicit conversions to C interger types - //----------------------------------------------------------- - INLINE operator ValType() const { return get_VAL(); } - - INLINE int to_uchar() const { return (unsigned char)get_VAL(); } - - INLINE int to_char() const { return (signed char)get_VAL(); } - - INLINE int to_ushort() const { return (unsigned short)get_VAL(); } - - INLINE int to_short() const { return (short)get_VAL(); } - - INLINE int to_int() const { - // ap_private<64 /* _AP_W */, _AP_S> res(V); - return (int)get_VAL(); - } - - INLINE unsigned to_uint() const { return (unsigned)get_VAL(); } - - INLINE long to_long() const { return (long)get_VAL(); } - - INLINE unsigned long to_ulong() const { return (unsigned long)get_VAL(); } - - INLINE ap_slong to_int64() const { return (ap_slong)get_VAL(); } - - INLINE ap_ulong to_uint64() const { return (ap_ulong)get_VAL(); } - - INLINE double to_double() const { - if (isNegative()) - return roundToDouble(true); - else - return roundToDouble(false); - } - - INLINE unsigned length() const { return _AP_W; } - - INLINE bool isMinValue() const { return VAL == 0; } - template - INLINE ap_private& operator&=(const ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(((uint64_t)VAL) & RHS.get_VAL()); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator|=(const ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(((uint64_t)VAL) | RHS.get_VAL()); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator^=(const ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(((uint64_t)VAL) ^ RHS.get_VAL()); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator*=(const ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(((uint64_t)VAL) * RHS.get_VAL()); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator+=(const ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(((uint64_t)VAL) + RHS.get_VAL()); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator-=(const ap_private<_AP_W1, _AP_S1>& RHS) { - VAL = (ValType)(((uint64_t)VAL) - RHS.get_VAL()); - clearUnusedBits(); - return *this; - } - - template - INLINE typename RType<_AP_W1, _AP_S1>::logic operator&( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - if (RType<_AP_W1, _AP_S1>::logic_w <= 64) { - typename RType<_AP_W1, _AP_S1>::logic Ret(((uint64_t)VAL) & - RHS.get_VAL()); - return Ret; - } else { - typename RType<_AP_W1, _AP_S1>::logic Ret = *this; - return Ret & RHS; - } - } - - template - INLINE typename RType<_AP_W1, _AP_S1>::logic operator^( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - if (RType<_AP_W1, _AP_S1>::logic_w <= 64) { - typename RType<_AP_W1, _AP_S1>::logic Ret(((uint64_t)VAL) ^ - RHS.get_VAL()); - return Ret; - } else { - typename RType<_AP_W1, _AP_S1>::logic Ret = *this; - return Ret ^ RHS; - } - } - - template - INLINE typename RType<_AP_W1, _AP_S1>::logic operator|( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - if (RType<_AP_W1, _AP_S1>::logic_w <= 64) { - typename RType<_AP_W1, _AP_S1>::logic Ret(((uint64_t)VAL) | - RHS.get_VAL()); - return Ret; - } else { - typename RType<_AP_W1, _AP_S1>::logic Ret = *this; - return Ret | RHS; - } - } - - INLINE ap_private And(const ap_private& RHS) const { - return ap_private(VAL & RHS.get_VAL()); - } - - INLINE ap_private Or(const ap_private& RHS) const { - return ap_private(VAL | RHS.get_VAL()); - } - - INLINE ap_private Xor(const ap_private& RHS) const { - return ap_private(VAL ^ RHS.get_VAL()); - } -#if 1 - template - INLINE typename RType<_AP_W1, _AP_S1>::mult operator*( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - if (RType<_AP_W1, _AP_S1>::mult_w <= 64) { - typename RType<_AP_W1, _AP_S1>::mult Result(((uint64_t)VAL) * - RHS.get_VAL()); - return Result; - } else { - typename RType<_AP_W1, _AP_S1>::mult Result(*this); - Result *= RHS; - return Result; - } - } -#endif - INLINE ap_private Mul(const ap_private& RHS) const { - return ap_private(VAL * RHS.get_VAL()); - } - - INLINE ap_private Add(const ap_private& RHS) const { - return ap_private(VAL + RHS.get_VAL()); - } - - INLINE ap_private Sub(const ap_private& RHS) const { - return ap_private(VAL - RHS.get_VAL()); - } - - INLINE ap_private& operator&=(uint64_t RHS) { - VAL &= (ValType)RHS; - clearUnusedBits(); - return *this; - } - INLINE ap_private& operator|=(uint64_t RHS) { - VAL |= (ValType)RHS; - clearUnusedBits(); - return *this; - } - INLINE ap_private& operator^=(uint64_t RHS) { - VAL ^= (ValType)RHS; - clearUnusedBits(); - return *this; - } - INLINE ap_private& operator*=(uint64_t RHS) { - VAL *= (ValType)RHS; - clearUnusedBits(); - return *this; - } - INLINE ap_private& operator+=(uint64_t RHS) { - VAL += (ValType)RHS; - clearUnusedBits(); - return *this; - } - INLINE ap_private& operator-=(uint64_t RHS) { - VAL -= (ValType)RHS; - clearUnusedBits(); - return *this; - } - - INLINE bool isMinSignedValue() const { - static const uint64_t min_mask = ~(~0ULL << (_AP_W - 1)); - return BitWidth == 1 ? VAL == 1 - : (ap_private_ops::isNegative<_AP_W>(*this) && - ((min_mask & VAL) == 0)); - } - - template - INLINE typename RType<_AP_W1, _AP_S1>::plus operator+( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - if (RType<_AP_W1, _AP_S1>::plus_w <= 64) - return typename RType<_AP_W1, _AP_S1>::plus( - RType<_AP_W1, _AP_S1>::plus_s - ? int64_t(((uint64_t)VAL) + RHS.get_VAL()) - : uint64_t(((uint64_t)VAL) + RHS.get_VAL())); - typename RType<_AP_W1, _AP_S1>::plus Result = RHS; - Result += VAL; - return Result; - } - - template - INLINE typename RType<_AP_W1, _AP_S1>::minus operator-( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - if (RType<_AP_W1, _AP_S1>::minus_w <= 64) - return typename RType<_AP_W1, _AP_S1>::minus( - int64_t(((uint64_t)VAL) - RHS.get_VAL())); - typename RType<_AP_W1, _AP_S1>::minus Result = *this; - Result -= RHS; - return Result; - } - - INLINE uint32_t countPopulation() const { - return ap_private_ops::CountPopulation_64(VAL); - } - INLINE uint32_t countLeadingZeros() const { - int remainder = BitWidth % 64; - int excessBits = (64 - remainder) % 64; - uint32_t Count = ap_private_ops::CountLeadingZeros_64(VAL); - if (Count) Count -= excessBits; - return AESL_std::min(Count, (uint32_t)_AP_W); - } - - /// HiBits - This function returns the high "numBits" bits of this ap_private. - INLINE ap_private<_AP_W, _AP_S> getHiBits(uint32_t numBits) const { - ap_private<_AP_W, _AP_S> ret(*this); - ret = (ret) >> (BitWidth - numBits); - return ret; - } - - /// LoBits - This function returns the low "numBits" bits of this ap_private. - INLINE ap_private<_AP_W, _AP_S> getLoBits(uint32_t numBits) const { - ap_private<_AP_W, _AP_S> ret(((uint64_t)VAL) << (BitWidth - numBits)); - ret = (ret) >> (BitWidth - numBits); - return ret; - // return ap_private(numBits, (VAL << (BitWidth - numBits))>> (BitWidth - - // numBits)); - } - - INLINE ap_private<_AP_W, _AP_S>& set(uint32_t bitPosition) { - VAL |= (1ULL << (bitPosition)); - clearUnusedBits(); - return *this; // clearUnusedBits(); - } - - INLINE void set() { - VAL = (ValType)~0ULL; - clearUnusedBits(); - } - - template - INLINE void set(const ap_private<_AP_W3, false>& val) { - operator=(ap_private<_AP_W3, _AP_S>(val)); - } - - INLINE void set(const ap_private& val) { operator=(val); } - - INLINE void clearUnusedBits(void) volatile -// just for clang compiler -#if defined(__clang__) && !defined(__CLANG_3_1__) - __attribute__((no_sanitize("undefined"))) -#endif - { - enum { excess_bits = (_AP_W % 64) ? 64 - _AP_W % 64 : 0 }; - VAL = (ValType)( - _AP_S - ? ((((int64_t)VAL) << (excess_bits)) >> (excess_bits)) - : (excess_bits ? (((uint64_t)VAL) << (excess_bits)) >> (excess_bits) - : (uint64_t)VAL)); - } - - INLINE void clearUnusedBitsToZero(void) { - enum { excess_bits = (_AP_W % 64) ? 64 - _AP_W % 64 : 0 }; - static uint64_t mask = ~0ULL >> (excess_bits); - VAL &= mask; - } - - INLINE ap_private udiv(const ap_private& RHS) const { - return ap_private((uint64_t)VAL / RHS.get_VAL()); - } - - /// Signed divide this ap_private by ap_private RHS. - /// @brief Signed division function for ap_private. - INLINE ap_private sdiv(const ap_private& RHS) const { - if (isNegative()) - if (RHS.isNegative()) - return ((uint64_t)(0 - (*this))) / (uint64_t)(0 - RHS); - else - return 0 - ((uint64_t)(0 - (*this)) / (uint64_t)(RHS)); - else if (RHS.isNegative()) - return 0 - (this->udiv((ap_private)(0 - RHS))); - return this->udiv(RHS); - } - - template - INLINE ap_private urem(const ap_private<_AP_W, _AP_S2>& RHS) const { - assert(RHS.get_VAL() != 0 && "Divide by 0"); - return ap_private(((uint64_t)VAL) % ((uint64_t)RHS.get_VAL())); - } - - /// Signed remainder operation on ap_private. - /// @brief Function for signed remainder operation. - template - INLINE ap_private srem(const ap_private<_AP_W, _AP_S2>& RHS) const { - if (isNegative()) { - ap_private lhs = 0 - (*this); - if (RHS.isNegative()) { - ap_private rhs = 0 - RHS; - return 0 - (lhs.urem(rhs)); - } else - return 0 - (lhs.urem(RHS)); - } else if (RHS.isNegative()) { - ap_private rhs = 0 - RHS; - return this->urem(rhs); - } - return this->urem(RHS); - } - - template - INLINE bool eq(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return (*this) == RHS; - } - - template - INLINE bool ne(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return !((*this) == RHS); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// the validity of the less-than relationship. - /// @returns true if *this < RHS when both are considered unsigned. - /// @brief Unsigned less than comparison - template - INLINE bool ult(const ap_private<_AP_W1, _AP_S1>& RHS) const { - if (_AP_W1 <= 64) { - uint64_t lhsZext = ((uint64_t(VAL)) << (64 - _AP_W)) >> (64 - _AP_W); - uint64_t rhsZext = - ((uint64_t(RHS.get_VAL())) << (64 - _AP_W1)) >> (64 - _AP_W1); - return lhsZext < rhsZext; - } else - return RHS.uge(*this); - } - - /// Regards both *this and RHS as signed quantities and compares them for - /// validity of the less-than relationship. - /// @returns true if *this < RHS when both are considered signed. - /// @brief Signed less than comparison - template - INLINE bool slt(const ap_private<_AP_W1, _AP_S1>& RHS) const -// just for clang compiler -#if defined(__clang__) && !defined(__CLANG_3_1__) - __attribute__((no_sanitize("undefined"))) -#endif - { - if (_AP_W1 <= 64) { - int64_t lhsSext = ((int64_t(VAL)) << (64 - _AP_W)) >> (64 - _AP_W); - int64_t rhsSext = - ((int64_t(RHS.get_VAL())) << (64 - _AP_W1)) >> (64 - _AP_W1); - return lhsSext < rhsSext; - } else - return RHS.sge(*this); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// validity of the less-or-equal relationship. - /// @returns true if *this <= RHS when both are considered unsigned. - /// @brief Unsigned less or equal comparison - template - INLINE bool ule(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return ult(RHS) || eq(RHS); - } - - /// Regards both *this and RHS as signed quantities and compares them for - /// validity of the less-or-equal relationship. - /// @returns true if *this <= RHS when both are considered signed. - /// @brief Signed less or equal comparison - template - INLINE bool sle(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return slt(RHS) || eq(RHS); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// the validity of the greater-than relationship. - /// @returns true if *this > RHS when both are considered unsigned. - /// @brief Unsigned greather than comparison - template - INLINE bool ugt(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return !ult(RHS) && !eq(RHS); - } - - /// Regards both *this and RHS as signed quantities and compares them for - /// the validity of the greater-than relationship. - /// @returns true if *this > RHS when both are considered signed. - /// @brief Signed greather than comparison - template - INLINE bool sgt(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return !slt(RHS) && !eq(RHS); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// validity of the greater-or-equal relationship. - /// @returns true if *this >= RHS when both are considered unsigned. - /// @brief Unsigned greater or equal comparison - template - INLINE bool uge(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return !ult(RHS); - } - - /// Regards both *this and RHS as signed quantities and compares them for - /// validity of the greater-or-equal relationship. - /// @returns true if *this >= RHS when both are considered signed. - /// @brief Signed greather or equal comparison - template - INLINE bool sge(const ap_private<_AP_W1, _AP_S1>& RHS) const { - return !slt(RHS); - } - - INLINE ap_private abs() const { - if (isNegative()) return -(*this); - return *this; - } - - INLINE ap_private<_AP_W, false> get() const { - ap_private<_AP_W, false> ret(*this); - return ret; - } - - INLINE static uint32_t getBitsNeeded(const char* str, uint32_t slen, - uint8_t radix) { - return _AP_W; - } - - INLINE uint32_t getActiveBits() const { - uint32_t bits = _AP_W - countLeadingZeros(); - return bits ? bits : 1; - } - - INLINE double roundToDouble(bool isSigned = false) const { - return isSigned ? double((int64_t)VAL) : double((uint64_t)VAL); - } - - /*Reverse the contents of ap_private instance. I.e. LSB becomes MSB and vise - * versa*/ - INLINE ap_private& reverse() { - for (int i = 0; i < _AP_W / 2; ++i) { - bool tmp = operator[](i); - if (operator[](_AP_W - 1 - i)) - set(i); - else - clear(i); - if (tmp) - set(_AP_W - 1 - i); - else - clear(_AP_W - 1 - i); - } - clearUnusedBits(); - return *this; - } - - /*Return true if the value of ap_private instance is zero*/ - INLINE bool iszero() const { return isMinValue(); } - - INLINE bool to_bool() const { return !iszero(); } - - /* x < 0 */ - INLINE bool sign() const { - if (isNegative()) return true; - return false; - } - - /* x[i] = !x[i] */ - INLINE void invert(int i) { - assert(i >= 0 && "Attempting to read bit with negative index"); - assert(i < _AP_W && "Attempting to read bit beyond MSB"); - flip(i); - } - - /* x[i] */ - INLINE bool test(int i) const { - assert(i >= 0 && "Attempting to read bit with negative index"); - assert(i < _AP_W && "Attempting to read bit beyond MSB"); - return operator[](i); - } - - // This is used for sc_lv and sc_bv, which is implemented by sc_uint - // Rotate an ap_private object n places to the left - INLINE void lrotate(int n) { - assert(n >= 0 && "Attempting to shift negative index"); - assert(n < _AP_W && "Shift value larger than bit width"); - operator=(shl(n) | lshr(_AP_W - n)); - } - - // This is used for sc_lv and sc_bv, which is implemented by sc_uint - // Rotate an ap_private object n places to the right - INLINE void rrotate(int n) { - assert(n >= 0 && "Attempting to shift negative index"); - assert(n < _AP_W && "Shift value larger than bit width"); - operator=(lshr(n) | shl(_AP_W - n)); - } - - // Set the ith bit into v - INLINE void set(int i, bool v) { - assert(i >= 0 && "Attempting to write bit with negative index"); - assert(i < _AP_W && "Attempting to write bit beyond MSB"); - v ? set(i) : clear(i); - } - - // Set the ith bit into v - INLINE void set_bit(int i, bool v) { - assert(i >= 0 && "Attempting to write bit with negative index"); - assert(i < _AP_W && "Attempting to write bit beyond MSB"); - v ? set(i) : clear(i); - } - - // Get the value of ith bit - INLINE bool get_bit(int i) const { - assert(i >= 0 && "Attempting to read bit with negative index"); - assert(i < _AP_W && "Attempting to read bit beyond MSB"); - return (((1ULL << i) & VAL) != 0); - } - - /// Toggle all bits. - INLINE ap_private& flip() { - VAL = (ValType)((~0ULL ^ VAL) & mask); - clearUnusedBits(); - return *this; - } - - /// Toggles a given bit to its opposite value. - INLINE ap_private& flip(uint32_t bitPosition) { - assert(bitPosition < BitWidth && "Out of the bit-width range!"); - set_bit(bitPosition, !get_bit(bitPosition)); - return *this; - } - - // complements every bit - INLINE void b_not() { flip(); } - -// Binary Arithmetic -//----------------------------------------------------------- -#define OP_BIN_AP(Sym, Rty, Fun) \ - template \ - INLINE typename RType<_AP_W2, _AP_S2>::Rty operator Sym( \ - const ap_private<_AP_W2, _AP_S2>& op) const { \ - typename RType<_AP_W2, _AP_S2>::Rty lhs(*this); \ - typename RType<_AP_W2, _AP_S2>::Rty rhs(op); \ - return lhs.Fun(rhs); \ - } - -/// Bitwise and, or, xor -// OP_BIN_AP(&,logic, And) -// OP_BIN_AP(|,logic, Or) -// OP_BIN_AP(^,logic, Xor) -#undef OP_BIN_AP - - template - INLINE typename RType<_AP_W2, _AP_S2>::div operator/( - const ap_private<_AP_W2, _AP_S2>& op) const { - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - lhs = *this; - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - rhs = op; - return typename RType<_AP_W2, _AP_S2>::div( - (_AP_S || _AP_S2) ? lhs.sdiv(rhs) : lhs.udiv(rhs)); - } - - template - INLINE typename RType<_AP_W2, _AP_S2>::mod operator%( - const ap_private<_AP_W2, _AP_S2>& op) const { - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - lhs = *this; - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - rhs = op; - typename RType<_AP_W2, _AP_S2>::mod res = - typename RType<_AP_W2, _AP_S2>::mod(_AP_S ? lhs.srem(rhs) - : lhs.urem(rhs)); - return res; - } - -#define OP_ASSIGN_AP_2(Sym) \ - template \ - INLINE ap_private<_AP_W, _AP_S>& operator Sym##=( \ - const ap_private<_AP_W2, _AP_S2>& op) { \ - *this = operator Sym(op); \ - return *this; \ - } - - OP_ASSIGN_AP_2(/) - OP_ASSIGN_AP_2(%) -#undef OP_ASSIGN_AP_2 - -/// Bitwise assign: and, or, xor -//------------------------------------------------------------- -// OP_ASSIGN_AP(&) -// OP_ASSIGN_AP(^) -// OP_ASSIGN_AP(|) - -#define OP_LEFT_SHIFT_CTYPE(TYPE, SIGNED) \ - INLINE ap_private operator<<(const TYPE op) const { \ - if (op >= _AP_W) return ap_private(0); \ - if (SIGNED && op < 0) return *this >> (0 - op); \ - return shl(op); \ - } - - // OP_LEFT_SHIFT_CTYPE(bool, false) - OP_LEFT_SHIFT_CTYPE(char, CHAR_IS_SIGNED) - OP_LEFT_SHIFT_CTYPE(signed char, true) - OP_LEFT_SHIFT_CTYPE(unsigned char, false) - OP_LEFT_SHIFT_CTYPE(short, true) - OP_LEFT_SHIFT_CTYPE(unsigned short, false) - OP_LEFT_SHIFT_CTYPE(int, true) - OP_LEFT_SHIFT_CTYPE(unsigned int, false) - OP_LEFT_SHIFT_CTYPE(long, true) - OP_LEFT_SHIFT_CTYPE(unsigned long, false) - OP_LEFT_SHIFT_CTYPE(long long, true) - OP_LEFT_SHIFT_CTYPE(unsigned long long, false) -#if 0 - OP_LEFT_SHIFT_CTYPE(half, false) - OP_LEFT_SHIFT_CTYPE(float, false) - OP_LEFT_SHIFT_CTYPE(double, false) -#endif - -#undef OP_LEFT_SHIFT_CTYPE - - template - INLINE ap_private operator<<(const ap_private<_AP_W2, _AP_S2>& op2) const { - if (_AP_S2 == false) { - uint32_t sh = op2.to_uint(); - return *this << sh; - } else { - int sh = op2.to_int(); - return *this << sh; - } - } - -#define OP_RIGHT_SHIFT_CTYPE(TYPE, SIGNED) \ - INLINE ap_private operator>>(const TYPE op) const { \ - if (op >= _AP_W) { \ - if (isNegative()) \ - return ap_private(-1); \ - else \ - return ap_private(0); \ - } \ - if ((SIGNED) && op < 0) return *this << (0 - op); \ - if (_AP_S) \ - return ashr(op); \ - else \ - return lshr(op); \ - } - - // OP_RIGHT_SHIFT_CTYPE(bool, false) - OP_RIGHT_SHIFT_CTYPE(char, CHAR_IS_SIGNED) - OP_RIGHT_SHIFT_CTYPE(signed char, true) - OP_RIGHT_SHIFT_CTYPE(unsigned char, false) - OP_RIGHT_SHIFT_CTYPE(short, true) - OP_RIGHT_SHIFT_CTYPE(unsigned short, false) - OP_RIGHT_SHIFT_CTYPE(int, true) - OP_RIGHT_SHIFT_CTYPE(unsigned int, false) - OP_RIGHT_SHIFT_CTYPE(long, true) - OP_RIGHT_SHIFT_CTYPE(unsigned long, false) - OP_RIGHT_SHIFT_CTYPE(unsigned long long, false) - OP_RIGHT_SHIFT_CTYPE(long long, true) -#if 0 - OP_RIGHT_SHIFT_CTYPE(half, false) - OP_RIGHT_SHIFT_CTYPE(float, false) - OP_RIGHT_SHIFT_CTYPE(double, false) -#endif - -#undef OP_RIGHT_SHIFT_CTYPE - - template - INLINE ap_private operator>>(const ap_private<_AP_W2, _AP_S2>& op2) const { - if (_AP_S2 == false) { - uint32_t sh = op2.to_uint(); - return *this >> sh; - } else { - int sh = op2.to_int(); - return *this >> sh; - } - } - - /// Shift assign - //----------------------------------------------------------------- - - //INLINE const ap_private& operator<<=(uint32_t shiftAmt) { - // VAL <<= shiftAmt; - // clearUnusedBits(); - // return *this; - //} - -#define OP_ASSIGN_AP(Sym) \ - template \ - INLINE ap_private& operator Sym##=(int op) { \ - *this = operator Sym(op); \ - clearUnusedBits(); \ - return *this; \ - } \ - INLINE ap_private& operator Sym##=(unsigned int op) { \ - *this = operator Sym(op); \ - clearUnusedBits(); \ - return *this; \ - } \ - template \ - INLINE ap_private& operator Sym##=(const ap_private<_AP_W2, _AP_S2>& op) { \ - *this = operator Sym(op); \ - clearUnusedBits(); \ - return *this; \ - } - - OP_ASSIGN_AP(>>) - OP_ASSIGN_AP(<<) -#undef OP_ASSIGN_AP - - /// Comparisons - //----------------------------------------------------------------- - template - INLINE bool operator==(const ap_private<_AP_W1, _AP_S1>& op) const { - enum { _AP_MAX_W = AP_MAX(AP_MAX(_AP_W, _AP_W1), 32) }; - ap_private<_AP_MAX_W, false> lhs(*this); - ap_private<_AP_MAX_W, false> rhs(op); - if (_AP_MAX_W <= 64) { - return (uint64_t)lhs.get_VAL() == (uint64_t)rhs.get_VAL(); - } else - return lhs == rhs; - } - - template - INLINE bool operator!=(const ap_private<_AP_W2, _AP_S2>& op) const { - return !(*this == op); - } - - template - INLINE bool operator>(const ap_private<_AP_W2, _AP_S2>& op) const { - enum { - _AP_MAX_W = AP_MAX(_AP_W + (_AP_S || _AP_S2), _AP_W2 + (_AP_S || _AP_S2)) - }; - ap_private<_AP_MAX_W, _AP_S> lhs(*this); - ap_private<_AP_MAX_W, _AP_S2> rhs(op); - // this will follow gcc rule for comparison - // between different bitwidth and signness - if (_AP_S == _AP_S2) - return _AP_S ? lhs.sgt(rhs) : lhs.ugt(rhs); - else if (_AP_W < 32 && _AP_W2 < 32) - // different signness but both bitwidth is less than 32 - return lhs.sgt(rhs); - else - // different signness but bigger bitwidth - // is greater or equal to 32 - if (_AP_S) - if (_AP_W2 >= _AP_W) - return lhs.ugt(rhs); - else - return lhs.sgt(rhs); - else if (_AP_W >= _AP_W2) - return lhs.ugt(rhs); - else - return lhs.sgt(rhs); - } - - template - INLINE bool operator<=(const ap_private<_AP_W2, _AP_S2>& op) const { - return !(*this > op); - } - - template - INLINE bool operator<(const ap_private<_AP_W2, _AP_S2>& op) const { - enum { - _AP_MAX_W = AP_MAX(_AP_W + (_AP_S || _AP_S2), _AP_W2 + (_AP_S || _AP_S2)) - }; - ap_private<_AP_MAX_W, _AP_S> lhs(*this); - ap_private<_AP_MAX_W, _AP_S2> rhs(op); - if (_AP_S == _AP_S2) - return _AP_S ? lhs.slt(rhs) : lhs.ult(rhs); - else if (_AP_W < 32 && _AP_W2 < 32) - return lhs.slt(rhs); - else if (_AP_S) - if (_AP_W2 >= _AP_W) - return lhs.ult(rhs); - else - return lhs.slt(rhs); - else if (_AP_W >= _AP_W2) - return lhs.ult(rhs); - else - return lhs.slt(rhs); - } - - template - INLINE bool operator>=(const ap_private<_AP_W2, _AP_S2>& op) const { - return !(*this < op); - } - - /// Bit and Part Select - //-------------------------------------------------------------- - // FIXME now _private_range_ref refs to _AP_ROOT_TYPE(struct ssdm_int). - INLINE _private_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) { - return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); - } - - INLINE _private_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) const { - return _private_range_ref<_AP_W, _AP_S>( - const_cast*>(this), Hi, Lo); - } - - INLINE _private_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) const { - return _private_range_ref<_AP_W, _AP_S>( - (const_cast*>(this)), Hi, Lo); - } - - INLINE _private_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) { - return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); - } - - INLINE _private_bit_ref<_AP_W, _AP_S> operator[](int index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index); - } - - template - INLINE _private_bit_ref<_AP_W, _AP_S> operator[]( - const ap_private<_AP_W2, _AP_S2>& index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index.to_int()); - } - - INLINE const _private_bit_ref<_AP_W, _AP_S> operator[](int index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index); - } - - template - INLINE const _private_bit_ref<_AP_W, _AP_S> operator[]( - const ap_private<_AP_W2, _AP_S2>& index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index.to_int()); - } - - INLINE _private_bit_ref<_AP_W, _AP_S> bit(int index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index); - } - - template - INLINE _private_bit_ref<_AP_W, _AP_S> bit(const ap_private<_AP_W2, _AP_S2>& index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index.to_int()); - } - - INLINE const _private_bit_ref<_AP_W, _AP_S> bit(int index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index); - } - - template - INLINE const _private_bit_ref<_AP_W, _AP_S> bit( - const ap_private<_AP_W2, _AP_S2>& index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index.to_int()); - } - -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> > -// concat(const ap_private<_AP_W2, _AP_S2>& a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> > -// concat(ap_private<_AP_W2, _AP_S2>& a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(const ap_private<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(const ap_private<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// *this, const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(ap_private<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// const_cast&>(*this), a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(ap_private<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> > -// operator,(const _private_range_ref<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast<_private_range_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> > -// operator,(_private_range_ref<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> > -// operator,(const _private_bit_ref<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast<_private_bit_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> > -// operator,(_private_bit_ref<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > -// operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( -// const_cast&>(*this), -// const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > -// operator,(ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >(*this, -// a2); -// } -// -// template -// INLINE ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> -// &a2) const { -// return ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( -// const_cast&>(*this), -// const_cast< -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { -// return ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, -// a2); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> -// &a2) const { -// return ap_concat_ref< -// _AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( -// const_cast&>(*this), -// const_cast&>( -// a2)); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,( -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { -// return ap_concat_ref< -// _AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, a2); -// } -// -// template -// INLINE ap_private operator&( -// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { -// return *this & a2.get(); -// } -// -// template -// INLINE ap_private operator|( -// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { -// return *this | a2.get(); -// } -// -// template -// INLINE ap_private operator^( -// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { -// return *this ^ a2.get(); -// } - - // Reduce operation - //----------------------------------------------------------- - INLINE bool and_reduce() const { return (VAL & mask) == mask; } - - INLINE bool nand_reduce() const { return (VAL & mask) != mask; } - - INLINE bool or_reduce() const { return (bool)VAL; } - - INLINE bool nor_reduce() const { return VAL == 0; } - - INLINE bool xor_reduce() const { - unsigned int i = countPopulation(); - return (i % 2) ? true : false; - } - - INLINE bool xnor_reduce() const { - unsigned int i = countPopulation(); - return (i % 2) ? false : true; - } - - INLINE std::string to_string(uint8_t radix = 2, bool sign = false) const { - return toString(radix, radix == 10 ? _AP_S : sign); - } -}; // End of class ap_private <_AP_W, _AP_S, true> - -template -std::string ap_private<_AP_W, _AP_S, true>::toString(uint8_t radix, - bool wantSigned) const { - assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) && - "Radix should be 2, 8, 10, or 16!"); - static const char* digits[] = {"0", "1", "2", "3", "4", "5", "6", "7", - "8", "9", "a", "b", "c", "d", "e", "f"}; - std::string result; - if (radix != 10) { - // For the 2, 8 and 16 bit cases, we can just shift instead of divide - // because the number of bits per digit (1,3 and 4 respectively) divides - // equaly. We just shift until there value is zero. - - // First, check for a zero value and just short circuit the logic below. - if (*this == (uint64_t)(0)) { - // Always generate a radix indicator because fixed-point - // formats require it. - switch (radix) { - case 2: - result = "0b0"; - break; - case 8: - result = "0o0"; - break; - case 16: - result = "0x0"; - break; - default: - assert("invalid radix" && 0); - } - } else { - ap_private<_AP_W, false, true> tmp(*this); - size_t insert_at = 0; - bool leading_zero = true; - if (wantSigned && isNegative()) { - // They want to print the signed version and it is a negative value - // Flip the bits and add one to turn it into the equivalent positive - // value and put a '-' in the result. - tmp.flip(); - tmp++; - result = "-"; - insert_at = 1; - leading_zero = false; - } - switch (radix) { - case 2: - result += "0b"; - break; - case 8: - result += "0o"; - break; - case 16: - result += "0x"; - break; - default: - assert("invalid radix" && 0); - } - insert_at += 2; - - // Just shift tmp right for each digit width until it becomes zero - uint32_t shift = (radix == 16 ? 4 : (radix == 8 ? 3 : 1)); - uint64_t mask = radix - 1; - ap_private<_AP_W, false, true> zero(0); - unsigned bits = 0; - bool msb = false; - while (tmp.ne(zero)) { - unsigned digit = (unsigned)(tmp.get_VAL() & mask); - result.insert(insert_at, digits[digit]); - tmp = tmp.lshr(shift); - bits++; - msb = (digit >> (shift - 1)) == 1; - } - bits *= shift; - if (bits < _AP_W && leading_zero && msb) - result.insert(insert_at, digits[0]); - } - return result; - } - - ap_private<_AP_W, false, true> tmp(*this); - ap_private<6, false, true> divisor(radix); - ap_private<_AP_W, _AP_S, true> zero(0); - size_t insert_at = 0; - if (wantSigned && isNegative()) { - // They want to print the signed version and it is a negative value - // Flip the bits and add one to turn it into the equivalent positive - // value and put a '-' in the result. - tmp.flip(); - tmp++; - result = "-"; - insert_at = 1; - } - if (tmp == ap_private<_AP_W, false, true>(0ULL)) - result = "0"; - else - while (tmp.ne(zero)) { - ap_private<_AP_W, false, true> APdigit = tmp % divisor; - ap_private<_AP_W, false, true> tmp2 = tmp / divisor; - uint32_t digit = (uint32_t)(APdigit.getZExtValue()); - assert(digit < radix && "divide failed"); - result.insert(insert_at, digits[digit]); - tmp = tmp2; - } - return result; - -} // End of ap_private<_AP_W, _AP_S, true>::toString() - -// bitwidth > 64 -template -class ap_private<_AP_W, _AP_S, false> { - // SFINAE pattern. Only consider this class when _AP_W > 64 - const static bool valid = ap_private_enable_if<(_AP_W > 64)>::isValid; - -#ifdef _MSC_VER -#pragma warning(disable : 4521 4522) -#endif - public: - enum { BitWidth = _AP_W, _AP_N = (_AP_W + 63) / 64 }; - static const int width = _AP_W; - - private: - /// This constructor is used only internally for speed of construction of - /// temporaries. It is unsafe for general use so it is not public. - - /* Constructors */ - /// Note that numWords can be smaller or larger than the corresponding bit - /// width but any extraneous bits will be dropped. - /// @param numWords the number of words in bigVal - /// @param bigVal a sequence of words to form the initial value of the - /// ap_private - /// @brief Construct an ap_private, initialized as bigVal[]. - INLINE ap_private(uint32_t numWords, const uint64_t bigVal[]) { - set_canary(); - assert(bigVal && "Null pointer detected!"); - { - // Get memory, cleared to 0 - memset(pVal, 0, _AP_N * sizeof(uint64_t)); - - // Calculate the number of words to copy - uint32_t words = AESL_std::min(numWords, _AP_N); - // Copy the words from bigVal to pVal - memcpy(pVal, bigVal, words * APINT_WORD_SIZE); - if (words >= _AP_W) clearUnusedBits(); - // Make sure unused high bits are cleared - } - check_canary(); - } - - /// This constructor interprets Val as a string in the given radix. The - /// interpretation stops when the first charater that is not suitable for the - /// radix is encountered. Acceptable radix values are 2, 8, 10 and 16. It is - /// an error for the value implied by the string to require more bits than - /// numBits. - /// @param val the string to be interpreted - /// @param radix the radix of Val to use for the intepretation - /// @brief Construct an ap_private from a string representation. - INLINE ap_private(const std::string& val, uint8_t radix = 2) { - set_canary(); - assert(!val.empty() && "The input string is empty."); - const char* c_str = val.c_str(); - fromString(c_str, val.size(), radix); - check_canary(); - } - - /// This constructor interprets the slen characters starting at StrStart as - /// a string in the given radix. The interpretation stops when the first - /// character that is not suitable for the radix is encountered. Acceptable - /// radix values are 2, 8, 10 and 16. It is an error for the value implied by - /// the string to require more bits than numBits. - /// @param strStart the start of the string to be interpreted - /// @param slen the maximum number of characters to interpret - /// @param radix the radix to use for the conversion - /// @brief Construct an ap_private from a string representation. - /// This method does not consider whether it is negative or not. - INLINE ap_private(const char strStart[], uint32_t slen, uint8_t radix) { - set_canary(); - fromString(strStart, slen, radix); - check_canary(); - } - - INLINE void report() { - _AP_ERROR(_AP_W > MAX_MODE(AP_INT_MAX_W) * 1024, - "ap_%sint<%d>: Bitwidth exceeds the " - "default max value %d. Please use macro " - "AP_INT_MAX_W to set a larger max value.", - _AP_S ? "" : "u", _AP_W, MAX_MODE(AP_INT_MAX_W) * 1024); - } - /// This union is used to store the integer value. When the - /// integer bit-width <= 64, it uses VAL, otherwise it uses pVal. - - /// This enum is used to hold the constants we needed for ap_private. - // uint64_t VAL; ///< Used to store the <= 64 bits integer value. - uint64_t pVal[_AP_N]; ///< Used to store the >64 bits integer value. -#ifdef AP_CANARY - uint64_t CANARY; - INLINE void check_canary() { assert(CANARY == (uint64_t)0xDEADBEEFDEADBEEF); } - INLINE void set_canary() { CANARY = (uint64_t)0xDEADBEEFDEADBEEF; } -#else - INLINE void check_canary() {} - INLINE void set_canary() {} -#endif - - public: - typedef typename valtype<8, _AP_S>::Type ValType; - typedef ap_private<_AP_W, _AP_S> Type; - // FIXME remove friend type? - template - friend struct ap_fixed_base; - /// return type of variety of operations - //---------------------------------------------------------- - template - struct RType { - enum { - mult_w = _AP_W + _AP_W2, - mult_s = _AP_S || _AP_S2, - plus_w = - AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, - plus_s = _AP_S || _AP_S2, - minus_w = - AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)) + 1, - minus_s = true, - div_w = _AP_W + _AP_S2, - div_s = _AP_S || _AP_S2, - mod_w = AP_MIN(_AP_W, _AP_W2 + (!_AP_S2 && _AP_S)), - mod_s = _AP_S, - logic_w = AP_MAX(_AP_W + (_AP_S2 && !_AP_S), _AP_W2 + (_AP_S && !_AP_S2)), - logic_s = _AP_S || _AP_S2 - }; - typedef ap_private mult; - typedef ap_private plus; - typedef ap_private minus; - typedef ap_private logic; - typedef ap_private div; - typedef ap_private mod; - typedef ap_private<_AP_W, _AP_S> arg1; - typedef bool reduce; - }; - - INLINE uint64_t& get_VAL(void) { return pVal[0]; } - INLINE uint64_t get_VAL(void) const { return pVal[0]; } - INLINE uint64_t get_VAL(void) const volatile { return pVal[0]; } - INLINE void set_VAL(uint64_t value) { pVal[0] = value; } - INLINE uint64_t& get_pVal(int index) { return pVal[index]; } - INLINE uint64_t* get_pVal() { return pVal; } - INLINE const uint64_t* get_pVal() const { return pVal; } - INLINE uint64_t get_pVal(int index) const { return pVal[index]; } - INLINE uint64_t* get_pVal() const volatile { return pVal; } - INLINE uint64_t get_pVal(int index) const volatile { return pVal[index]; } - INLINE void set_pVal(int i, uint64_t value) { pVal[i] = value; } - - /// This enum is used to hold the constants we needed for ap_private. - enum { - APINT_BITS_PER_WORD = sizeof(uint64_t) * 8, ///< Bits in a word - APINT_WORD_SIZE = sizeof(uint64_t) ///< Byte size of a word - }; - - enum { - excess_bits = (_AP_W % APINT_BITS_PER_WORD) - ? APINT_BITS_PER_WORD - (_AP_W % APINT_BITS_PER_WORD) - : 0 - }; - static const uint64_t mask = ((uint64_t)~0ULL >> (excess_bits)); - - public: - // NOTE changed to explicit to be consistent with ap_private - explicit INLINE ap_private(const char* val) { - set_canary(); - unsigned char radix = 10; - std::string str = ap_private_ops::parseString(val, radix); // determine radix. - std::string::size_type pos = str.find('.'); - if (pos != std::string::npos) str = str.substr(pos); - ap_private ap_private_val(str, radix); - operator=(ap_private_val); - report(); - check_canary(); - } - - INLINE ap_private(const char* val, unsigned char rd) { - set_canary(); - unsigned char radix = rd; - std::string str = ap_private_ops::parseString(val, radix); // determine radix. - std::string::size_type pos = str.find('.'); - if (pos != std::string::npos) str = str.substr(pos); - ap_private ap_private_val(str, radix); - operator=(ap_private_val); - report(); - - report(); - check_canary(); - } - - template - INLINE ap_private(const _private_range_ref<_AP_W2, _AP_S2>& ref) { - set_canary(); - *this = ref.get(); - report(); - check_canary(); - } - - template - INLINE ap_private(const _private_bit_ref<_AP_W2, _AP_S2>& ref) { - set_canary(); - *this = ((uint64_t)(bool)ref); - report(); - check_canary(); - } - -// template -// INLINE ap_private(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& ref) { -// set_canary(); -// *this = ref.get(); -// report(); -// check_canary(); -// } -// -// template -// INLINE ap_private( -// const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { -// set_canary(); -// *this = ((val.operator ap_private<_AP_W2, false>())); -// report(); -// check_canary(); -// } -// -// template -// INLINE ap_private( -// const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { -// set_canary(); -// *this = (uint64_t)(bool)val; -// report(); -// check_canary(); -// } - - /// Simply makes *this a copy of that. - /// @brief Copy Constructor. - INLINE ap_private(const ap_private& that) { - set_canary(); - memcpy(pVal, that.get_pVal(), _AP_N * APINT_WORD_SIZE); - clearUnusedBits(); - check_canary(); - } - - template - INLINE ap_private(const ap_private<_AP_W1, _AP_S1, false>& that) { - set_canary(); - operator=(that); - check_canary(); - } - - template - INLINE ap_private(const volatile ap_private<_AP_W1, _AP_S1, false>& that) { - set_canary(); - operator=(const_cast&>(that)); - check_canary(); - } - - template - INLINE ap_private(const ap_private<_AP_W1, _AP_S1, true>& that) { - set_canary(); - static const uint64_t that_sign_ext_mask = - (_AP_W1 == APINT_BITS_PER_WORD) - ? 0 - : ~0ULL >> (_AP_W1 % APINT_BITS_PER_WORD) - << (_AP_W1 % APINT_BITS_PER_WORD); - if (that.isNegative()) { - pVal[0] = that.get_VAL() | that_sign_ext_mask; - memset(pVal + 1, ~0, sizeof(uint64_t) * (_AP_N - 1)); - } else { - pVal[0] = that.get_VAL(); - memset(pVal + 1, 0, sizeof(uint64_t) * (_AP_N - 1)); - } - clearUnusedBits(); - check_canary(); - } - - template - INLINE ap_private(const volatile ap_private<_AP_W1, _AP_S1, true>& that) { - set_canary(); - operator=(const_cast&>(that)); - check_canary(); - } - - /// @brief Destructor. - // virtual ~ap_private() {} - INLINE ~ap_private() { check_canary(); } - - /// @name Constructors - /// @{ - - /// Default constructor that creates an uninitialized ap_private. This is - /// useful - /// for object deserialization (pair this with the static method Read). - INLINE ap_private() { - set_canary(); - clearUnusedBits(); - check_canary(); - } - - INLINE ap_private(uint64_t* val, uint32_t bits = _AP_W) { assert(0); } - INLINE ap_private(const uint64_t* const val, uint32_t bits) { assert(0); } - -/// If isSigned is true then val is treated as if it were a signed value -/// (i.e. as an int64_t) and the appropriate sign extension to the bit width -/// will be done. Otherwise, no sign extension occurs (high order bits beyond -/// the range of val are zero filled). -/// @param numBits the bit width of the constructed ap_private -/// @param val the initial value of the ap_private -/// @param isSigned how to treat signedness of val -/// @brief Create a new ap_private of numBits width, initialized as val. -#define CTOR(TYPE, SIGNED) \ - INLINE ap_private(TYPE val, bool isSigned = SIGNED) { \ - set_canary(); \ - pVal[0] = (ValType)val; \ - if (isSigned && int64_t(pVal[0]) < 0) { \ - memset(pVal + 1, ~0, sizeof(uint64_t) * (_AP_N - 1)); \ - } else { \ - memset(pVal + 1, 0, sizeof(uint64_t) * (_AP_N - 1)); \ - } \ - clearUnusedBits(); \ - check_canary(); \ - } - - CTOR(bool, false) - CTOR(char, CHAR_IS_SIGNED) - CTOR(signed char, true) - CTOR(unsigned char, false) - CTOR(short, true) - CTOR(unsigned short, false) - CTOR(int, true) - CTOR(unsigned int, false) - CTOR(long, true) - CTOR(unsigned long, false) - CTOR(ap_slong, true) - CTOR(ap_ulong, false) -#if 0 - CTOR(half, false) - CTOR(float, false) - CTOR(double, false) -#endif -#undef CTOR - - /// @returns true if the number of bits <= 64, false otherwise. - /// @brief Determine if this ap_private just has one word to store value. - INLINE bool isSingleWord() const { return false; } - - /// @returns the word position for the specified bit position. - /// @brief Determine which word a bit is in. - static INLINE uint32_t whichWord(uint32_t bitPosition) { - // return bitPosition / APINT_BITS_PER_WORD; - return (bitPosition) >> 6; - } - - /// @returns the bit position in a word for the specified bit position - /// in the ap_private. - /// @brief Determine which bit in a word a bit is in. - static INLINE uint32_t whichBit(uint32_t bitPosition) { - // return bitPosition % APINT_BITS_PER_WORD; - return bitPosition & 0x3f; - } - - /// bit at a specific bit position. This is used to mask the bit in the - /// corresponding word. - /// @returns a uint64_t with only bit at "whichBit(bitPosition)" set - /// @brief Get a single bit mask. - static INLINE uint64_t maskBit(uint32_t bitPosition) { - return 1ULL << (whichBit(bitPosition)); - } - - /// @returns the corresponding word for the specified bit position. - /// @brief Get the word corresponding to a bit position - INLINE uint64_t getWord(uint32_t bitPosition) const { - return pVal[whichWord(bitPosition)]; - } - - /// This method is used internally to clear the to "N" bits in the high order - /// word that are not used by the ap_private. This is needed after the most - /// significant word is assigned a value to ensure that those bits are - /// zero'd out. - /// @brief Clear unused high order bits - INLINE void clearUnusedBits(void) volatile -// just for clang compiler -#if defined(__clang__) && !defined(__CLANG_3_1__) - __attribute__((no_sanitize("undefined"))) -#endif - { - pVal[_AP_N - 1] = - _AP_S ? ((((int64_t)pVal[_AP_N - 1]) << (excess_bits)) >> excess_bits) - : (excess_bits - ? ((pVal[_AP_N - 1]) << (excess_bits)) >> (excess_bits) - : pVal[_AP_N - 1]); - } - - INLINE void clearUnusedBitsToZero(void) { pVal[_AP_N - 1] &= mask; } - - INLINE void clearUnusedBitsToOne(void) { pVal[_AP_N - 1] |= mask; } - - /// This is used by the constructors that take string arguments. - /// @brief Convert a char array into an ap_private - INLINE void fromString(const char* str, uint32_t slen, uint8_t radix) { - enum { numbits = _AP_W }; - bool isNeg = str[0] == '-'; - if (isNeg) { - str++; - slen--; - } - - if (str[0] == '0' && (str[1] == 'b' || str[1] == 'B')) { - //if(radix == 0) radix = 2; - _AP_WARNING(radix != 2, "%s seems to have base %d, but %d given.", str, 2, radix); - str += 2; - slen -=2; - } else if (str[0] == '0' && (str[1] == 'o' || str[1] == 'O')) { - //if (radix == 0) radix = 8; - _AP_WARNING(radix != 8, "%s seems to have base %d, but %d given.", str, 8, radix); - str += 2; - slen -=2; - } else if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) { - //if (radix == 0) radix = 16; - _AP_WARNING(radix != 16, "%s seems to have base %d, but %d given.", str, 16, radix); - str += 2; - slen -=2; - } else if (str[0] == '0' && (str[1] == 'd' || str[1] == 'D')) { - //if (radix == 0) radix = 10; - _AP_WARNING(radix != 10, "%s seems to have base %d, but %d given.", str, 10, radix); - str += 2; - slen -=2; - } else if (radix == 0) { - //radix = 2; // XXX default value - } - - // Check our assumptions here - assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) && - "Radix should be 2, 8, 10, or 16!"); - assert(str && "String is null?"); - - // skip any leading zero - while (*str == '0' && *(str + 1) != '\0') { - str++; - slen--; - } - assert((slen <= numbits || radix != 2) && "Insufficient bit width"); - assert(((slen - 1) * 3 <= numbits || radix != 8) && - "Insufficient bit width"); - assert(((slen - 1) * 4 <= numbits || radix != 16) && - "Insufficient bit width"); - assert((((slen - 1) * 64) / 22 <= numbits || radix != 10) && - "Insufficient bit width"); - - // clear bits - memset(pVal, 0, _AP_N * sizeof(uint64_t)); - - // Figure out if we can shift instead of multiply - uint32_t shift = (radix == 16 ? 4 : radix == 8 ? 3 : radix == 2 ? 1 : 0); - - // Set up an ap_private for the digit to add outside the loop so we don't - // constantly construct/destruct it. - uint64_t bigVal[_AP_N]; - memset(bigVal, 0, _AP_N * sizeof(uint64_t)); - ap_private<_AP_W, _AP_S> apdigit(getBitWidth(), bigVal); - ap_private<_AP_W, _AP_S> apradix(radix); - - // Enter digit traversal loop - for (unsigned i = 0; i < slen; i++) { - // Get a digit - uint32_t digit = 0; - char cdigit = str[i]; - if (radix == 16) { -#define isxdigit(c) \ - (((c) >= '0' && (c) <= '9') || ((c) >= 'a' && (c) <= 'f') || \ - ((c) >= 'A' && (c) <= 'F')) -#define isdigit(c) ((c) >= '0' && (c) <= '9') - if (!isxdigit(cdigit)) assert(0 && "Invalid hex digit in string"); - if (isdigit(cdigit)) - digit = cdigit - '0'; - else if (cdigit >= 'a') - digit = cdigit - 'a' + 10; - else if (cdigit >= 'A') - digit = cdigit - 'A' + 10; - else - assert(0 && "huh? we shouldn't get here"); - } else if (isdigit(cdigit)) { - digit = cdigit - '0'; - } else if (cdigit != '\0') { - assert(0 && "Invalid character in digit string"); - } -#undef isxdigit -#undef isdigit - // Shift or multiply the value by the radix - if (shift) - *this <<= shift; - else - *this *= apradix; - - // Add in the digit we just interpreted - apdigit.set_VAL(digit); - *this += apdigit; - } - // If its negative, put it in two's complement form - if (isNeg) { - (*this)--; - this->flip(); - } - clearUnusedBits(); - } - - INLINE ap_private read() volatile { return *this; } - - INLINE void write(const ap_private& op2) volatile { *this = (op2); } - - INLINE operator ValType() const { return get_VAL(); } - - INLINE int to_uchar() const { return (unsigned char)get_VAL(); } - - INLINE int to_char() const { return (signed char)get_VAL(); } - - INLINE int to_ushort() const { return (unsigned short)get_VAL(); } - - INLINE int to_short() const { return (short)get_VAL(); } - - INLINE int to_int() const { return (int)get_VAL(); } - - INLINE unsigned to_uint() const { return (unsigned)get_VAL(); } - - INLINE long to_long() const { return (long)get_VAL(); } - - INLINE unsigned long to_ulong() const { return (unsigned long)get_VAL(); } - - INLINE ap_slong to_int64() const { return (ap_slong)get_VAL(); } - - INLINE ap_ulong to_uint64() const { return (ap_ulong)get_VAL(); } - - INLINE double to_double() const { - if (isNegative()) - return roundToDouble(true); - else - return roundToDouble(false); - } - - INLINE unsigned length() const { return _AP_W; } - - /*Reverse the contents of ap_private instance. I.e. LSB becomes MSB and vise - * versa*/ - INLINE ap_private& reverse() { - for (int i = 0; i < _AP_W / 2; ++i) { - bool tmp = operator[](i); - if (operator[](_AP_W - 1 - i)) - set(i); - else - clear(i); - if (tmp) - set(_AP_W - 1 - i); - else - clear(_AP_W - 1 - i); - } - clearUnusedBits(); - return *this; - } - - /*Return true if the value of ap_private instance is zero*/ - INLINE bool iszero() const { return isMinValue(); } - - INLINE bool to_bool() const { return !iszero(); } - - /* x < 0 */ - INLINE bool sign() const { - if (isNegative()) return true; - return false; - } - - /* x[i] = !x[i] */ - INLINE void invert(int i) { - assert(i >= 0 && "Attempting to read bit with negative index"); - assert(i < _AP_W && "Attempting to read bit beyond MSB"); - flip(i); - } - - /* x[i] */ - INLINE bool test(int i) const { - assert(i >= 0 && "Attempting to read bit with negative index"); - assert(i < _AP_W && "Attempting to read bit beyond MSB"); - return operator[](i); - } - - // Set the ith bit into v - INLINE void set(int i, bool v) { - assert(i >= 0 && "Attempting to write bit with negative index"); - assert(i < _AP_W && "Attempting to write bit beyond MSB"); - v ? set(i) : clear(i); - } - - // Set the ith bit into v - INLINE void set_bit(int i, bool v) { - assert(i >= 0 && "Attempting to write bit with negative index"); - assert(i < _AP_W && "Attempting to write bit beyond MSB"); - v ? set(i) : clear(i); - } - - // FIXME different argument for different action? - INLINE ap_private& set(uint32_t bitPosition) { - pVal[whichWord(bitPosition)] |= maskBit(bitPosition); - clearUnusedBits(); - return *this; - } - - INLINE void set() { - for (int i = 0; i < _AP_N; ++i) pVal[i] = ~0ULL; - clearUnusedBits(); - } - - // Get the value of ith bit - INLINE bool get(int i) const { - assert(i >= 0 && "Attempting to read bit with negative index"); - assert(i < _AP_W && "Attempting to read bit beyond MSB"); - return ((maskBit(i) & (pVal[whichWord(i)])) != 0); - } - - // Get the value of ith bit - INLINE bool get_bit(int i) const { - assert(i >= 0 && "Attempting to read bit with negative index"); - assert(i < _AP_W && "Attempting to read bit beyond MSB"); - return ((maskBit(i) & (pVal[whichWord(i)])) != 0); - } - - // This is used for sc_lv and sc_bv, which is implemented by sc_uint - // Rotate an ap_private object n places to the left - INLINE void lrotate(int n) { - assert(n >= 0 && "Attempting to shift negative index"); - assert(n < _AP_W && "Shift value larger than bit width"); - operator=(shl(n) | lshr(_AP_W - n)); - } - - // This is used for sc_lv and sc_bv, which is implemented by sc_uint - // Rotate an ap_private object n places to the right - INLINE void rrotate(int n) { - assert(n >= 0 && "Attempting to shift negative index"); - assert(n < _AP_W && "Shift value larger than bit width"); - operator=(lshr(n) | shl(_AP_W - n)); - } - - /// Set the given bit to 0 whose position is given as "bitPosition". - /// @brief Set a given bit to 0. - INLINE ap_private& clear(uint32_t bitPosition) { - pVal[whichWord(bitPosition)] &= ~maskBit(bitPosition); - clearUnusedBits(); - return *this; - } - - /// @brief Set every bit to 0. - INLINE void clear() { memset(pVal, 0, _AP_N * APINT_WORD_SIZE); } - - /// @brief Toggle every bit to its opposite value. - ap_private& flip() { - for (int i = 0; i < _AP_N; ++i) pVal[i] ^= ~0ULL; - clearUnusedBits(); - return *this; - } - - /// @brief Toggles a given bit to its opposite value. - INLINE ap_private& flip(uint32_t bitPosition) { - assert(bitPosition < BitWidth && "Out of the bit-width range!"); - set_bit(bitPosition, !get_bit(bitPosition)); - return *this; - } - - // complements every bit - INLINE void b_not() { flip(); } - - INLINE ap_private getLoBits(uint32_t numBits) const { - return ap_private_ops::lshr(ap_private_ops::shl(*this, _AP_W - numBits), - _AP_W - numBits); - } - - INLINE ap_private getHiBits(uint32_t numBits) const { - return ap_private_ops::lshr(*this, _AP_W - numBits); - } - - // Binary Arithmetic - //----------------------------------------------------------- - -// template -// INLINE ap_private operator&( -// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { -// return *this & a2.get(); -// } -// -// template -// INLINE ap_private operator|( -// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { -// return *this | a2.get(); -// } -// -// template -// INLINE ap_private operator^( -// const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3>& a2) { -// return *this ^ a2.get(); -// } - -/// Arithmetic assign -//------------------------------------------------------------- - -#define OP_BIN_LOGIC_ASSIGN_AP(Sym) \ - template \ - INLINE ap_private& operator Sym(const ap_private<_AP_W1, _AP_S1>& RHS) { \ - const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; \ - uint32_t numWords = AESL_std::min((int)_AP_N, _AP_N1); \ - uint32_t i; \ - if (_AP_W != _AP_W1) \ - fprintf(stderr, \ - "Warning! Bitsize mismach for ap_[u]int " #Sym " ap_[u]int.\n"); \ - for (i = 0; i < numWords; ++i) pVal[i] Sym RHS.get_pVal(i); \ - if (_AP_N1 < _AP_N) { \ - uint64_t ext = RHS.isNegative() ? ~0ULL : 0; \ - for (; i < _AP_N; i++) pVal[i] Sym ext; \ - } \ - clearUnusedBits(); \ - return *this; \ - } - - OP_BIN_LOGIC_ASSIGN_AP(&=); - OP_BIN_LOGIC_ASSIGN_AP(|=); - OP_BIN_LOGIC_ASSIGN_AP(^=); -#undef OP_BIN_LOGIC_ASSIGN_AP - - /// Adds the RHS APint to this ap_private. - /// @returns this, after addition of RHS. - /// @brief Addition assignment operator. - template - INLINE ap_private& operator+=(const ap_private<_AP_W1, _AP_S1>& RHS) { - const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; - uint64_t RHSpVal[_AP_N1]; - for (int i = 0; i < _AP_N1; ++i) RHSpVal[i] = RHS.get_pVal(i); - ap_private_ops::add(pVal, pVal, RHSpVal, _AP_N, _AP_N, _AP_N1, _AP_S, - _AP_S1); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator-=(const ap_private<_AP_W1, _AP_S1>& RHS) { - const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; - uint64_t RHSpVal[_AP_N1]; - for (int i = 0; i < _AP_N1; ++i) RHSpVal[i] = RHS.get_pVal(i); - ap_private_ops::sub(pVal, pVal, RHSpVal, _AP_N, _AP_N, _AP_N1, _AP_S, - _AP_S1); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator*=(const ap_private<_AP_W1, _AP_S1>& RHS) { - // Get some bit facts about LHS and check for zero - uint32_t lhsBits = getActiveBits(); - uint32_t lhsWords = !lhsBits ? 0 : whichWord(lhsBits - 1) + 1; - if (!lhsWords) { - // 0 * X ===> 0 - return *this; - } - - ap_private dupRHS = RHS; - // Get some bit facts about RHS and check for zero - uint32_t rhsBits = dupRHS.getActiveBits(); - uint32_t rhsWords = !rhsBits ? 0 : whichWord(rhsBits - 1) + 1; - if (!rhsWords) { - // X * 0 ===> 0 - clear(); - return *this; - } - - // Allocate space for the result - uint32_t destWords = rhsWords + lhsWords; - uint64_t* dest = (uint64_t*)malloc(destWords * sizeof(uint64_t)); - - // Perform the long multiply - ap_private_ops::mul(dest, pVal, lhsWords, dupRHS.get_pVal(), rhsWords, - destWords); - - // Copy result back into *this - clear(); - uint32_t wordsToCopy = destWords >= _AP_N ? _AP_N : destWords; - - memcpy(pVal, dest, wordsToCopy * APINT_WORD_SIZE); - - uint64_t ext = (isNegative() ^ RHS.isNegative()) ? ~0ULL : 0ULL; - for (int i = wordsToCopy; i < _AP_N; i++) pVal[i] = ext; - clearUnusedBits(); - // delete dest array and return - free(dest); - return *this; - } - -#define OP_ASSIGN_AP(Sym) \ - template \ - INLINE ap_private& operator Sym##=(const ap_private<_AP_W2, _AP_S2>& op) { \ - *this = operator Sym(op); \ - return *this; \ - } - - OP_ASSIGN_AP(/) - OP_ASSIGN_AP(%) -#undef OP_ASSIGN_AP - -#define OP_BIN_LOGIC_AP(Sym) \ - template \ - INLINE typename RType<_AP_W1, _AP_S1>::logic operator Sym( \ - const ap_private<_AP_W1, _AP_S1>& RHS) const { \ - enum { \ - numWords = (RType<_AP_W1, _AP_S1>::logic_w + APINT_BITS_PER_WORD - 1) / \ - APINT_BITS_PER_WORD \ - }; \ - typename RType<_AP_W1, _AP_S1>::logic Result; \ - uint32_t i; \ - const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; \ - uint32_t min_N = std::min((int)_AP_N, _AP_N1); \ - uint32_t max_N = std::max((int)_AP_N, _AP_N1); \ - for (i = 0; i < min_N; ++i) \ - Result.set_pVal(i, pVal[i] Sym RHS.get_pVal(i)); \ - if (numWords > i) { \ - uint64_t ext = ((_AP_N < _AP_N1 && isNegative()) || \ - (_AP_N1 < _AP_N && RHS.isNegative())) \ - ? ~0ULL \ - : 0; \ - if (_AP_N > _AP_N1) \ - for (; i < max_N; i++) Result.set_pVal(i, pVal[i] Sym ext); \ - else \ - for (; i < max_N; i++) Result.set_pVal(i, RHS.get_pVal(i) Sym ext); \ - if (numWords > i) { \ - uint64_t ext2 = ((_AP_N > _AP_N1 && isNegative()) || \ - (_AP_N1 > _AP_N && RHS.isNegative())) \ - ? ~0ULL \ - : 0; \ - Result.set_pVal(i, ext Sym ext2); \ - } \ - } \ - Result.clearUnusedBits(); \ - return Result; \ - } - - OP_BIN_LOGIC_AP(|); - OP_BIN_LOGIC_AP(&); - OP_BIN_LOGIC_AP(^); - -#undef OP_BIN_LOGIC_AP - - template - INLINE typename RType<_AP_W1, _AP_S1>::plus operator+( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - typename RType<_AP_W1, _AP_S1>::plus Result, lhs(*this), rhs(RHS); - const int Result_AP_N = (RType<_AP_W1, _AP_S1>::plus_w + 63) / 64; - ap_private_ops::add(Result.get_pVal(), lhs.get_pVal(), rhs.get_pVal(), - Result_AP_N, Result_AP_N, Result_AP_N, _AP_S, _AP_S1); - Result.clearUnusedBits(); - return Result; - } - - template - INLINE typename RType<_AP_W1, _AP_S1>::minus operator-( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - typename RType<_AP_W1, _AP_S1>::minus Result, lhs(*this), rhs(RHS); - const int Result_AP_N = (RType<_AP_W1, _AP_S1>::minus_w + 63) / 64; - ap_private_ops::sub(Result.get_pVal(), lhs.get_pVal(), rhs.get_pVal(), - Result_AP_N, Result_AP_N, Result_AP_N, _AP_S, _AP_S1); - Result.clearUnusedBits(); - return Result; - } - - template - INLINE typename RType<_AP_W1, _AP_S1>::mult operator*( - const ap_private<_AP_W1, _AP_S1>& RHS) const { - typename RType<_AP_W1, _AP_S1>::mult temp = *this; - temp *= RHS; - return temp; - } - - template - INLINE typename RType<_AP_W2, _AP_S2>::div operator/( - const ap_private<_AP_W2, _AP_S2>& op) const { - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - lhs = *this; - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - rhs = op; - return typename RType<_AP_W2, _AP_S2>::div( - (_AP_S || _AP_S2) ? lhs.sdiv(rhs) : lhs.udiv(rhs)); - } - - template - INLINE typename RType<_AP_W2, _AP_S2>::mod operator%( - const ap_private<_AP_W2, _AP_S2>& op) const { - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - lhs = *this; - ap_private _AP_W2 ? _AP_S - : (_AP_W2 > _AP_W ? _AP_S2 : _AP_S || _AP_S2))> - rhs = op; - typename RType<_AP_W2, _AP_S2>::mod res = - typename RType<_AP_W2, _AP_S2>::mod(_AP_S ? lhs.srem(rhs) - : lhs.urem(rhs)); - return res; - } - -#define OP_LEFT_SHIFT_CTYPE(TYPE, SIGNED) \ - INLINE ap_private operator<<(const TYPE op) const { \ - if (op >= _AP_W) return ap_private(0); \ - if (SIGNED && op < 0) return *this >> (0 - op); \ - return shl(op); \ - } - - OP_LEFT_SHIFT_CTYPE(int, true) - // OP_LEFT_SHIFT_CTYPE(bool, false) - OP_LEFT_SHIFT_CTYPE(signed char, true) - OP_LEFT_SHIFT_CTYPE(unsigned char, false) - OP_LEFT_SHIFT_CTYPE(short, true) - OP_LEFT_SHIFT_CTYPE(unsigned short, false) - OP_LEFT_SHIFT_CTYPE(unsigned int, false) - OP_LEFT_SHIFT_CTYPE(long, true) - OP_LEFT_SHIFT_CTYPE(unsigned long, false) - OP_LEFT_SHIFT_CTYPE(unsigned long long, false) - OP_LEFT_SHIFT_CTYPE(long long, true) -#if 0 - OP_LEFT_SHIFT_CTYPE(half, false) - OP_LEFT_SHIFT_CTYPE(float, false) - OP_LEFT_SHIFT_CTYPE(double, false) -#endif -#undef OP_LEFT_SHIFT_CTYPE - - template - INLINE ap_private operator<<(const ap_private<_AP_W2, _AP_S2>& op2) const { - if (_AP_S2 == false) { - uint32_t sh = op2.to_uint(); - return *this << sh; - } else { - int sh = op2.to_int(); - return *this << sh; - } - } - -#define OP_RIGHT_SHIFT_CTYPE(TYPE, SIGNED) \ - INLINE ap_private operator>>(const TYPE op) const { \ - if (op >= _AP_W) { \ - if (isNegative()) \ - return ap_private(-1); \ - else \ - return ap_private(0); \ - } \ - if ((SIGNED) && op < 0) return *this << (0 - op); \ - if (_AP_S) \ - return ashr(op); \ - else \ - return lshr(op); \ - } - - // OP_RIGHT_SHIFT_CTYPE(bool, false) - OP_RIGHT_SHIFT_CTYPE(char, CHAR_IS_SIGNED) - OP_RIGHT_SHIFT_CTYPE(signed char, true) - OP_RIGHT_SHIFT_CTYPE(unsigned char, false) - OP_RIGHT_SHIFT_CTYPE(short, true) - OP_RIGHT_SHIFT_CTYPE(unsigned short, false) - OP_RIGHT_SHIFT_CTYPE(int, true) - OP_RIGHT_SHIFT_CTYPE(unsigned int, false) - OP_RIGHT_SHIFT_CTYPE(long, true) - OP_RIGHT_SHIFT_CTYPE(unsigned long, false) - OP_RIGHT_SHIFT_CTYPE(unsigned long long, false) - OP_RIGHT_SHIFT_CTYPE(long long, true) -#if 0 - OP_RIGHT_SHIFT_CTYPE(half, false) - OP_RIGHT_SHIFT_CTYPE(float, false) - OP_RIGHT_SHIFT_CTYPE(double, false) -#endif -#undef OP_RIGHT_SHIFT_CTYPE - - template - INLINE ap_private operator>>(const ap_private<_AP_W2, _AP_S2>& op2) const { - if (_AP_S2 == false) { - uint32_t sh = op2.to_uint(); - return *this >> sh; - } else { - int sh = op2.to_int(); - return *this >> sh; - } - } - - /// Shift assign - //------------------------------------------------------------------ - // TODO call clearUnusedBits ? -#define OP_ASSIGN_AP(Sym) \ - template \ - INLINE ap_private& operator Sym##=(int op) { \ - *this = operator Sym(op); \ - return *this; \ - } \ - INLINE ap_private& operator Sym##=(unsigned int op) { \ - *this = operator Sym(op); \ - return *this; \ - } \ - template \ - INLINE ap_private& operator Sym##=(const ap_private<_AP_W2, _AP_S2>& op) { \ - *this = operator Sym(op); \ - return *this; \ - } - OP_ASSIGN_AP(>>) - OP_ASSIGN_AP(<<) -#undef OP_ASSIGN_AP - - /// Comparisons - //----------------------------------------------------------------- - INLINE bool operator==(const ap_private& RHS) const { - // Get some facts about the number of bits used in the two operands. - uint32_t n1 = getActiveBits(); - uint32_t n2 = RHS.getActiveBits(); - - // If the number of bits isn't the same, they aren't equal - if (n1 != n2) return false; - - // If the number of bits fits in a word, we only need to compare the low - // word. - if (n1 <= APINT_BITS_PER_WORD) return pVal[0] == RHS.get_pVal(0); - - // Otherwise, compare everything - for (int i = whichWord(n1 - 1); i >= 0; --i) - if (pVal[i] != RHS.get_pVal(i)) return false; - return true; - } - - template - INLINE bool operator==(const ap_private<_AP_W2, _AP_S2>& op) const { - enum { - _AP_MAX_W = AP_MAX(_AP_W, _AP_W2), - }; - ap_private<_AP_MAX_W, false> lhs(*this); - ap_private<_AP_MAX_W, false> rhs(op); - return lhs == rhs; - } - - INLINE bool operator==(uint64_t Val) const { - uint32_t n = getActiveBits(); - if (n <= APINT_BITS_PER_WORD) - return pVal[0] == Val; - else - return false; - } - - template - INLINE bool operator!=(const ap_private<_AP_W2, _AP_S2>& op) const { - return !(*this == op); - } - - template - INLINE bool operator!=(const ap_private<_AP_W, _AP_S1>& RHS) const { - return !((*this) == RHS); - } - - INLINE bool operator!=(uint64_t Val) const { return !((*this) == Val); } - - template - INLINE bool operator<=(const ap_private<_AP_W2, _AP_S2>& op) const { - return !(*this > op); - } - - INLINE bool operator<(const ap_private& op) const { - return _AP_S ? slt(op) : ult(op); - } - - template - INLINE bool operator<(const ap_private<_AP_W2, _AP_S2>& op) const { - enum { - _AP_MAX_W = AP_MAX(_AP_W + (_AP_S || _AP_S2), _AP_W2 + (_AP_S || _AP_S2)) - }; - ap_private<_AP_MAX_W, _AP_S> lhs(*this); - ap_private<_AP_MAX_W, _AP_S2> rhs(op); - if (_AP_S == _AP_S2) - return _AP_S ? lhs.slt(rhs) : lhs.ult(rhs); - else if (_AP_S) - if (_AP_W2 >= _AP_W) - return lhs.ult(rhs); - else - return lhs.slt(rhs); - else if (_AP_W >= _AP_W2) - return lhs.ult(rhs); - else - return lhs.slt(rhs); - } - - template - INLINE bool operator>=(const ap_private<_AP_W2, _AP_S2>& op) const { - return !(*this < op); - } - - INLINE bool operator>(const ap_private& op) const { - return _AP_S ? sgt(op) : ugt(op); - } - - template - INLINE bool operator>(const ap_private<_AP_W2, _AP_S2>& op) const { - enum { - _AP_MAX_W = AP_MAX(_AP_W + (_AP_S || _AP_S2), _AP_W2 + (_AP_S || _AP_S2)) - }; - ap_private<_AP_MAX_W, _AP_S> lhs(*this); - ap_private<_AP_MAX_W, _AP_S2> rhs(op); - if (_AP_S == _AP_S2) - return _AP_S ? lhs.sgt(rhs) : lhs.ugt(rhs); - else if (_AP_S) - if (_AP_W2 >= _AP_W) - return lhs.ugt(rhs); - else - return lhs.sgt(rhs); - else if (_AP_W >= _AP_W2) - return lhs.ugt(rhs); - else - return lhs.sgt(rhs); - } - - /// Bit and Part Select - //-------------------------------------------------------------- - INLINE _private_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) { - return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); - } - - INLINE _private_range_ref<_AP_W, _AP_S> operator()(int Hi, int Lo) const { - return _private_range_ref<_AP_W, _AP_S>( - const_cast*>(this), Hi, Lo); - } - - INLINE _private_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) const { - return _private_range_ref<_AP_W, _AP_S>( - (const_cast*>(this)), Hi, Lo); - } - - INLINE _private_range_ref<_AP_W, _AP_S> range(int Hi, int Lo) { - return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); - } - - template - INLINE _private_range_ref<_AP_W, _AP_S> range( - const ap_private<_AP_W2, _AP_S2>& HiIdx, - const ap_private<_AP_W3, _AP_S3>& LoIdx) { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); - } - - template - INLINE _private_range_ref<_AP_W, _AP_S> operator()( - const ap_private<_AP_W2, _AP_S2>& HiIdx, - const ap_private<_AP_W3, _AP_S3>& LoIdx) { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return _private_range_ref<_AP_W, _AP_S>(this, Hi, Lo); - } - - template - INLINE _private_range_ref<_AP_W, _AP_S> range( - const ap_private<_AP_W2, _AP_S2>& HiIdx, - const ap_private<_AP_W3, _AP_S3>& LoIdx) const { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return _private_range_ref<_AP_W, _AP_S>(const_cast(this), Hi, Lo); - } - - template - INLINE _private_range_ref<_AP_W, _AP_S> operator()( - const ap_private<_AP_W2, _AP_S2>& HiIdx, - const ap_private<_AP_W3, _AP_S3>& LoIdx) const { - int Hi = HiIdx.to_int(); - int Lo = LoIdx.to_int(); - return this->range(Hi, Lo); - } - - INLINE _private_bit_ref<_AP_W, _AP_S> operator[](int index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index); - } - - template - INLINE _private_bit_ref<_AP_W, _AP_S> operator[]( - const ap_private<_AP_W2, _AP_S2>& index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index.to_int()); - } - - template - INLINE const _private_bit_ref<_AP_W, _AP_S> operator[]( - const ap_private<_AP_W2, _AP_S2>& index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index.to_int()); - } - - INLINE const _private_bit_ref<_AP_W, _AP_S> operator[](int index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index); - } - - INLINE _private_bit_ref<_AP_W, _AP_S> bit(int index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index); - } - - template - INLINE _private_bit_ref<_AP_W, _AP_S> bit(const ap_private<_AP_W2, _AP_S2>& index) { - return _private_bit_ref<_AP_W, _AP_S>(*this, index.to_int()); - } - - INLINE const _private_bit_ref<_AP_W, _AP_S> bit(int index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index); - } - - template - INLINE const _private_bit_ref<_AP_W, _AP_S> bit( - const ap_private<_AP_W2, _AP_S2>& index) const { - return _private_bit_ref<_AP_W, _AP_S>( - const_cast&>(*this), index.to_int()); - } - -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> > -// concat(ap_private<_AP_W2, _AP_S2>& a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> > -// concat(const ap_private<_AP_W2, _AP_S2>& a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(ap_private<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(ap_private<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// const_cast&>(*this), a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(const ap_private<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// *this, const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private, _AP_W2, ap_private<_AP_W2, _AP_S2> > -// operator,(const ap_private<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> > -// operator,(const _private_range_ref<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast<_private_range_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> > -// operator,(_private_range_ref<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> > -// operator,(const _private_bit_ref<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> >( -// const_cast&>(*this), -// const_cast<_private_bit_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> > -// operator,(_private_bit_ref<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, 1, -// _private_bit_ref<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > -// operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) const { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( -// const_cast&>(*this), -// const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > -// operator,(ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { -// return ap_concat_ref<_AP_W, ap_private<_AP_W, _AP_S>, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >(*this, -// a2); -// } -// -// template -// INLINE ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> -// &a2) const { -// return ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( -// const_cast&>(*this), -// const_cast< -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { -// return ap_concat_ref< -// _AP_W, ap_private, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, -// a2); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> -// &a2) const { -// return ap_concat_ref< -// _AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( -// const_cast&>(*this), -// const_cast&>( -// a2)); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,( -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { -// return ap_concat_ref< -// _AP_W, ap_private, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >(*this, a2); -// } - - INLINE ap_private<_AP_W, false> get() const { - ap_private<_AP_W, false> ret(*this); - return ret; - } - - template - INLINE void set(const ap_private<_AP_W3, false>& val) { - operator=(ap_private<_AP_W3, _AP_S>(val)); - } - - /// - /// @name Value Tests - /// - /// This tests the high bit of this ap_private to determine if it is set. - /// @returns true if this ap_private is negative, false otherwise - /// @brief Determine sign of this ap_private. - INLINE bool isNegative() const { - // just for get rid of warnings - enum { shift = (_AP_W - APINT_BITS_PER_WORD * (_AP_N - 1) - 1) }; - static const uint64_t mask = 1ULL << (shift); - return _AP_S && (pVal[_AP_N - 1] & mask); - } - - /// This tests the high bit of the ap_private to determine if it is unset. - /// @brief Determine if this ap_private Value is positive (not negative). - INLINE bool isPositive() const { return !isNegative(); } - - /// This tests if the value of this ap_private is strictly positive (> 0). - /// @returns true if this ap_private is Positive and not zero. - /// @brief Determine if this ap_private Value is strictly positive. - INLINE bool isStrictlyPositive() const { - return isPositive() && (*this) != 0; - } - - /// This checks to see if the value has all bits of the ap_private are set or - /// not. - /// @brief Determine if all bits are set - INLINE bool isAllOnesValue() const { return countPopulation() == _AP_W; } - - /// This checks to see if the value of this ap_private is the maximum unsigned - /// value for the ap_private's bit width. - /// @brief Determine if this is the largest unsigned value. - INLINE bool isMaxValue() const { return countPopulation() == _AP_W; } - - /// This checks to see if the value of this ap_private is the maximum signed - /// value for the ap_private's bit width. - /// @brief Determine if this is the largest signed value. - INLINE bool isMaxSignedValue() const { - return !isNegative() && countPopulation() == _AP_W - 1; - } - - /// This checks to see if the value of this ap_private is the minimum unsigned - /// value for the ap_private's bit width. - /// @brief Determine if this is the smallest unsigned value. - INLINE bool isMinValue() const { return countPopulation() == 0; } - - /// This checks to see if the value of this ap_private is the minimum signed - /// value for the ap_private's bit width. - /// @brief Determine if this is the smallest signed value. - INLINE bool isMinSignedValue() const { - return isNegative() && countPopulation() == 1; - } - - /// This function returns a pointer to the internal storage of the ap_private. - /// This is useful for writing out the ap_private in binary form without any - /// conversions. - INLINE const uint64_t* getRawData() const { return &pVal[0]; } - - // Square Root - this method computes and returns the square root of "this". - // Three mechanisms are used for computation. For small values (<= 5 bits), - // a table lookup is done. This gets some performance for common cases. For - // values using less than 52 bits, the value is converted to double and then - // the libc sqrt function is called. The result is rounded and then converted - // back to a uint64_t which is then used to construct the result. Finally, - // the Babylonian method for computing square roots is used. - INLINE ap_private sqrt() const { - // Determine the magnitude of the value. - uint32_t magnitude = getActiveBits(); - - // Use a fast table for some small values. This also gets rid of some - // rounding errors in libc sqrt for small values. - if (magnitude <= 5) { - static const uint8_t results[32] = { - /* 0 */ 0, - /* 1- 2 */ 1, 1, - /* 3- 6 */ 2, 2, 2, 2, - /* 7-12 */ 3, 3, 3, 3, 3, 3, - /* 13-20 */ 4, 4, 4, 4, 4, 4, 4, 4, - /* 21-30 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 31 */ 6}; - return ap_private<_AP_W, _AP_S>(/*BitWidth,*/ results[get_VAL()]); - } - - // If the magnitude of the value fits in less than 52 bits (the precision of - // an IEEE double precision floating point value), then we can use the - // libc sqrt function which will probably use a hardware sqrt computation. - // This should be faster than the algorithm below. - if (magnitude < 52) { -#ifdef _MSC_VER - // Amazingly, VC++ doesn't have round(). - return ap_private<_AP_W, _AP_S>(/*BitWidth,*/ - uint64_t(::sqrt(double(get_VAL()))) + - 0.5); -#else - return ap_private<_AP_W, _AP_S>(/*BitWidth,*/ - uint64_t( - ::round(::sqrt(double(get_VAL()))))); -#endif - } - - // Okay, all the short cuts are exhausted. We must compute it. The following - // is a classical Babylonian method for computing the square root. This code - // was adapted to APINt from a wikipedia article on such computations. - // See http://www.wikipedia.org/ and go to the page named - // Calculate_an_integer_square_root. - uint32_t nbits = BitWidth, i = 4; - ap_private<_AP_W, _AP_S> testy(16); - ap_private<_AP_W, _AP_S> x_old(/*BitWidth,*/ 1); - ap_private<_AP_W, _AP_S> x_new(0); - ap_private<_AP_W, _AP_S> two(/*BitWidth,*/ 2); - - // Select a good starting value using binary logarithms. - for (;; i += 2, testy = testy.shl(2)) - if (i >= nbits || this->ule(testy)) { - x_old = x_old.shl(i / 2); - break; - } - - // Use the Babylonian method to arrive at the integer square root: - for (;;) { - x_new = (this->udiv(x_old) + x_old).udiv(two); - if (x_old.ule(x_new)) break; - x_old = x_new; - } - - // Make sure we return the closest approximation - // NOTE: The rounding calculation below is correct. It will produce an - // off-by-one discrepancy with results from pari/gp. That discrepancy has - // been - // determined to be a rounding issue with pari/gp as it begins to use a - // floating point representation after 192 bits. There are no discrepancies - // between this algorithm and pari/gp for bit widths < 192 bits. - ap_private<_AP_W, _AP_S> square(x_old * x_old); - ap_private<_AP_W, _AP_S> nextSquare((x_old + 1) * (x_old + 1)); - if (this->ult(square)) - return x_old; - else if (this->ule(nextSquare)) { - ap_private<_AP_W, _AP_S> midpoint((nextSquare - square).udiv(two)); - ap_private<_AP_W, _AP_S> offset(*this - square); - if (offset.ult(midpoint)) - return x_old; - else - return x_old + 1; - } else - assert(0 && "Error in ap_private<_AP_W, _AP_S>::sqrt computation"); - return x_old + 1; - } - - /// - /// @Assignment Operators - /// - /// @returns *this after assignment of RHS. - /// @brief Copy assignment operator. - INLINE ap_private& operator=(const ap_private& RHS) { - if (this != &RHS) memcpy(pVal, RHS.get_pVal(), _AP_N * APINT_WORD_SIZE); - clearUnusedBits(); - return *this; - } - INLINE ap_private& operator=(const volatile ap_private& RHS) { - if (this != &RHS) - for (int i = 0; i < _AP_N; ++i) pVal[i] = RHS.get_pVal(i); - clearUnusedBits(); - return *this; - } - INLINE void operator=(const ap_private& RHS) volatile { - if (this != &RHS) - for (int i = 0; i < _AP_N; ++i) pVal[i] = RHS.get_pVal(i); - clearUnusedBits(); - } - INLINE void operator=(const volatile ap_private& RHS) volatile { - if (this != &RHS) - for (int i = 0; i < _AP_N; ++i) pVal[i] = RHS.get_pVal(i); - clearUnusedBits(); - } - - template - INLINE ap_private& operator=(const ap_private<_AP_W1, _AP_S1>& RHS) { - if (_AP_S1) - cpSextOrTrunc(RHS); - else - cpZextOrTrunc(RHS); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator=(const volatile ap_private<_AP_W1, _AP_S1>& RHS) { - if (_AP_S1) - cpSextOrTrunc(RHS); - else - cpZextOrTrunc(RHS); - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - *this = ap_private<_AP_W2, false>(op2); - return *this; - } - -#if 0 - template - INLINE ap_private& operator=(const ap_private<_AP_W1, _AP_S1, true>& RHS) { - static const uint64_t that_sign_ext_mask = (_AP_W1==APINT_BITS_PER_WORD)?0:~0ULL>>(_AP_W1%APINT_BITS_PER_WORD)<<(_AP_W1%APINT_BITS_PER_WORD); - if (RHS.isNegative()) { - pVal[0] = RHS.get_VAL() | that_sign_ext_mask; - memset(pVal+1,~0, APINT_WORD_SIZE*(_AP_N-1)); - } else { - pVal[0] = RHS.get_VAL(); - memset(pVal+1, 0, APINT_WORD_SIZE*(_AP_N-1)); - } - clearUnusedBits(); - return *this; - } - - template - INLINE ap_private& operator=(const volatile ap_private<_AP_W1, _AP_S1, true>& RHS) { - static const uint64_t that_sign_ext_mask = (_AP_W1==APINT_BITS_PER_WORD)?0:~0ULL>>(_AP_W1%APINT_BITS_PER_WORD)<<(_AP_W1%APINT_BITS_PER_WORD); - if (RHS.isNegative()) { - pVal[0] = RHS.get_VAL() | that_sign_ext_mask; - memset(pVal+1,~0, APINT_WORD_SIZE*(_AP_N-1)); - } else { - pVal[0] = RHS.get_VAL(); - memset(pVal+1, 0, APINT_WORD_SIZE*(_AP_N-1)); - } - clearUnusedBits(); - return *this; - } -#endif - -/// from all c types. -#define ASSIGN_OP_FROM_INT(C_TYPE, _AP_W2, _AP_S2) \ - INLINE ap_private& operator=(const C_TYPE rhs) { \ - ap_private<(_AP_W2), (_AP_S2)> tmp = rhs; \ - operator=(tmp); \ - return *this; \ - } - - ASSIGN_OP_FROM_INT(bool, 1, false) - ASSIGN_OP_FROM_INT(char, 8, CHAR_IS_SIGNED) - ASSIGN_OP_FROM_INT(signed char, 8, true) - ASSIGN_OP_FROM_INT(unsigned char, 8, false) - ASSIGN_OP_FROM_INT(short, sizeof(short) * 8, true) - ASSIGN_OP_FROM_INT(unsigned short, sizeof(unsigned short) * 8, false) - ASSIGN_OP_FROM_INT(int, sizeof(int) * 8, true) - ASSIGN_OP_FROM_INT(unsigned int, sizeof(unsigned int) * 8, false) - ASSIGN_OP_FROM_INT(long, sizeof(long) * 8, true) - ASSIGN_OP_FROM_INT(unsigned long, sizeof(unsigned long) * 8, false) - ASSIGN_OP_FROM_INT(ap_slong, sizeof(ap_slong) * 8, true) - ASSIGN_OP_FROM_INT(ap_ulong, sizeof(ap_ulong) * 8, false) -#undef ASSIGN_OP_FROM_INT - - /// from c string. - // XXX this is a must, to prevent pointer being converted to bool. - INLINE ap_private& operator=(const char* s) { - ap_private tmp(s); // XXX direct initialization, as ctor is explicit. - operator=(tmp); - return *this; - } - - /// - /// @name Unary Operators - /// - /// @returns a new ap_private value representing *this incremented by one - /// @brief Postfix increment operator. - INLINE const ap_private operator++(int) { - ap_private API(*this); - ++(*this); - return API; - } - - /// @returns *this incremented by one - /// @brief Prefix increment operator. - INLINE ap_private& operator++() { - ap_private_ops::add_1(pVal, pVal, _AP_N, 1); - clearUnusedBits(); - return *this; - } - - /// @returns a new ap_private representing *this decremented by one. - /// @brief Postfix decrement operator. - INLINE const ap_private operator--(int) { - ap_private API(*this); - --(*this); - return API; - } - - /// @returns *this decremented by one. - /// @brief Prefix decrement operator. - INLINE ap_private& operator--() { - ap_private_ops::sub_1(pVal, _AP_N, 1); - clearUnusedBits(); - return *this; - } - - /// Performs a bitwise complement operation on this ap_private. - /// @returns an ap_private that is the bitwise complement of *this - /// @brief Unary bitwise complement operator. - INLINE ap_private<_AP_W + !_AP_S, true> operator~() const { - ap_private<_AP_W + !_AP_S, true> Result(*this); - Result.flip(); - return Result; - } - - /// Negates *this using two's complement logic. - /// @returns An ap_private value representing the negation of *this. - /// @brief Unary negation operator - INLINE typename RType<1, false>::minus operator-() const { - return ap_private<1, false>(0) - (*this); - } - - /// Performs logical negation operation on this ap_private. - /// @returns true if *this is zero, false otherwise. - /// @brief Logical negation operator. - INLINE bool operator!() const { - for (int i = 0; i < _AP_N; ++i) - if (pVal[i]) return false; - return true; - } - - template - INLINE ap_private<_AP_W, _AP_S || _AP_S1> And( - const ap_private<_AP_W, _AP_S1>& RHS) const { - return this->operator&(RHS); - } - template - INLINE ap_private Or(const ap_private<_AP_W, _AP_S1>& RHS) const { - return this->operator|(RHS); - } - template - INLINE ap_private Xor(const ap_private<_AP_W, _AP_S1>& RHS) const { - return this->operator^(RHS); - } - - INLINE ap_private Mul(const ap_private& RHS) const { - ap_private Result(*this); - Result *= RHS; - return Result; - } - - INLINE ap_private Add(const ap_private& RHS) const { - ap_private Result(0); - ap_private_ops::add(Result.get_pVal(), pVal, RHS.get_pVal(), _AP_N, _AP_N, - _AP_N, _AP_S, _AP_S); - Result.clearUnusedBits(); - return Result; - } - - INLINE ap_private Sub(const ap_private& RHS) const { - ap_private Result(0); - ap_private_ops::sub(Result.get_pVal(), pVal, RHS.get_pVal(), _AP_N, _AP_N, - _AP_N, _AP_S, _AP_S); - Result.clearUnusedBits(); - return Result; - } - - /// Arithmetic right-shift this ap_private by shiftAmt. - /// @brief Arithmetic right-shift function. - INLINE ap_private ashr(uint32_t shiftAmt) const { - assert(shiftAmt <= BitWidth && "Invalid shift amount, too big"); - // Handle a degenerate case - if (shiftAmt == 0) return ap_private(*this); - - // If all the bits were shifted out, the result is, technically, undefined. - // We return -1 if it was negative, 0 otherwise. We check this early to - // avoid - // issues in the algorithm below. - if (shiftAmt == BitWidth) { - if (isNegative()) - return ap_private(-1); - else - return ap_private(0); - } - - // Create some space for the result. - ap_private Retval(0); - uint64_t* val = Retval.get_pVal(); - - // Compute some values needed by the following shift algorithms - uint32_t wordShift = - shiftAmt % APINT_BITS_PER_WORD; // bits to shift per word - uint32_t offset = shiftAmt / APINT_BITS_PER_WORD; // word offset for shift - uint32_t breakWord = _AP_N - 1 - offset; // last word affected - uint32_t bitsInWord = whichBit(BitWidth); // how many bits in last word? - if (bitsInWord == 0) bitsInWord = APINT_BITS_PER_WORD; - - // If we are shifting whole words, just move whole words - if (wordShift == 0) { - // Move the words containing significant bits - for (uint32_t i = 0; i <= breakWord; ++i) - val[i] = pVal[i + offset]; // move whole word - - // Adjust the top significant word for sign bit fill, if negative - if (isNegative()) - if (bitsInWord < APINT_BITS_PER_WORD) - val[breakWord] |= ~0ULL << (bitsInWord); // set high bits - } else { - // Shift the low order words - for (uint32_t i = 0; i < breakWord; ++i) { - // This combines the shifted corresponding word with the low bits from - // the next word (shifted into this word's high bits). - val[i] = ((pVal[i + offset]) >> (wordShift)); - val[i] |= ((pVal[i + offset + 1]) << (APINT_BITS_PER_WORD - wordShift)); - } - - // Shift the break word. In this case there are no bits from the next word - // to include in this word. - val[breakWord] = (pVal[breakWord + offset]) >> (wordShift); - - // Deal with sign extenstion in the break word, and possibly the word - // before - // it. - if (isNegative()) { - if (wordShift > bitsInWord) { - if (breakWord > 0) - val[breakWord - 1] |= - ~0ULL << (APINT_BITS_PER_WORD - (wordShift - bitsInWord)); - val[breakWord] |= ~0ULL; - } else - val[breakWord] |= (~0ULL << (bitsInWord - wordShift)); - } - } - - // Remaining words are 0 or -1, just assign them. - uint64_t fillValue = (isNegative() ? ~0ULL : 0); - for (int i = breakWord + 1; i < _AP_N; ++i) val[i] = fillValue; - Retval.clearUnusedBits(); - return Retval; - } - - /// Logical right-shift this ap_private by shiftAmt. - /// @brief Logical right-shift function. - INLINE ap_private lshr(uint32_t shiftAmt) const { - // If all the bits were shifted out, the result is 0. This avoids issues - // with shifting by the size of the integer type, which produces undefined - // results. We define these "undefined results" to always be 0. - if (shiftAmt == BitWidth) return ap_private(0); - - // If none of the bits are shifted out, the result is *this. This avoids - // issues with shifting byt he size of the integer type, which produces - // undefined results in the code below. This is also an optimization. - if (shiftAmt == 0) return ap_private(*this); - - // Create some space for the result. - ap_private Retval(0); - uint64_t* val = Retval.get_pVal(); - - // If we are shifting less than a word, compute the shift with a simple - // carry - if (shiftAmt < APINT_BITS_PER_WORD) { - uint64_t carry = 0; - for (int i = _AP_N - 1; i >= 0; --i) { - val[i] = ((pVal[i]) >> (shiftAmt)) | carry; - carry = (pVal[i]) << (APINT_BITS_PER_WORD - shiftAmt); - } - Retval.clearUnusedBits(); - return Retval; - } - - // Compute some values needed by the remaining shift algorithms - uint32_t wordShift = shiftAmt % APINT_BITS_PER_WORD; - uint32_t offset = shiftAmt / APINT_BITS_PER_WORD; - - // If we are shifting whole words, just move whole words - if (wordShift == 0) { - for (uint32_t i = 0; i < _AP_N - offset; ++i) val[i] = pVal[i + offset]; - for (uint32_t i = _AP_N - offset; i < _AP_N; i++) val[i] = 0; - Retval.clearUnusedBits(); - return Retval; - } - - // Shift the low order words - uint32_t breakWord = _AP_N - offset - 1; - for (uint32_t i = 0; i < breakWord; ++i) - val[i] = ((pVal[i + offset]) >> (wordShift)) | - ((pVal[i + offset + 1]) << (APINT_BITS_PER_WORD - wordShift)); - // Shift the break word. - val[breakWord] = (pVal[breakWord + offset]) >> (wordShift); - - // Remaining words are 0 - for (int i = breakWord + 1; i < _AP_N; ++i) val[i] = 0; - Retval.clearUnusedBits(); - return Retval; - } - - /// Left-shift this ap_private by shiftAmt. - /// @brief Left-shift function. - INLINE ap_private shl(uint32_t shiftAmt) const { - assert(shiftAmt <= BitWidth && "Invalid shift amount, too big"); - // If all the bits were shifted out, the result is 0. This avoids issues - // with shifting by the size of the integer type, which produces undefined - // results. We define these "undefined results" to always be 0. - if (shiftAmt == BitWidth) return ap_private(0); - - // If none of the bits are shifted out, the result is *this. This avoids a - // lshr by the words size in the loop below which can produce incorrect - // results. It also avoids the expensive computation below for a common - // case. - if (shiftAmt == 0) return ap_private(*this); - - // Create some space for the result. - ap_private Retval(0); - uint64_t* val = Retval.get_pVal(); - // If we are shifting less than a word, do it the easy way - if (shiftAmt < APINT_BITS_PER_WORD) { - uint64_t carry = 0; - for (int i = 0; i < _AP_N; i++) { - val[i] = ((pVal[i]) << (shiftAmt)) | carry; - carry = (pVal[i]) >> (APINT_BITS_PER_WORD - shiftAmt); - } - Retval.clearUnusedBits(); - return Retval; - } - - // Compute some values needed by the remaining shift algorithms - uint32_t wordShift = shiftAmt % APINT_BITS_PER_WORD; - uint32_t offset = shiftAmt / APINT_BITS_PER_WORD; - - // If we are shifting whole words, just move whole words - if (wordShift == 0) { - for (uint32_t i = 0; i < offset; i++) val[i] = 0; - for (int i = offset; i < _AP_N; i++) val[i] = pVal[i - offset]; - Retval.clearUnusedBits(); - return Retval; - } - - // Copy whole words from this to Result. - uint32_t i = _AP_N - 1; - for (; i > offset; --i) - val[i] = (pVal[i - offset]) << (wordShift) | - (pVal[i - offset - 1]) >> (APINT_BITS_PER_WORD - wordShift); - val[offset] = (pVal[0]) << (wordShift); - for (i = 0; i < offset; ++i) val[i] = 0; - Retval.clearUnusedBits(); - return Retval; - } - - INLINE ap_private rotl(uint32_t rotateAmt) const { - if (rotateAmt == 0) return ap_private(*this); - // Don't get too fancy, just use existing shift/or facilities - ap_private hi(*this); - ap_private lo(*this); - hi.shl(rotateAmt); - lo.lshr(BitWidth - rotateAmt); - return hi | lo; - } - - INLINE ap_private rotr(uint32_t rotateAmt) const { - if (rotateAmt == 0) return ap_private(*this); - // Don't get too fancy, just use existing shift/or facilities - ap_private hi(*this); - ap_private lo(*this); - lo.lshr(rotateAmt); - hi.shl(BitWidth - rotateAmt); - return hi | lo; - } - - /// Perform an unsigned divide operation on this ap_private by RHS. Both this - /// and - /// RHS are treated as unsigned quantities for purposes of this division. - /// @returns a new ap_private value containing the division result - /// @brief Unsigned division operation. - INLINE ap_private udiv(const ap_private& RHS) const { - // Get some facts about the LHS and RHS number of bits and words - uint32_t rhsBits = RHS.getActiveBits(); - uint32_t rhsWords = !rhsBits ? 0 : (whichWord(rhsBits - 1) + 1); - assert(rhsWords && "Divided by zero???"); - uint32_t lhsBits = this->getActiveBits(); - uint32_t lhsWords = !lhsBits ? 0 : (whichWord(lhsBits - 1) + 1); - - // Deal with some degenerate cases - if (!lhsWords) - // 0 / X ===> 0 - return ap_private(0); - else if (lhsWords < rhsWords || this->ult(RHS)) { - // X / Y ===> 0, iff X < Y - return ap_private(0); - } else if (*this == RHS) { - // X / X ===> 1 - return ap_private(1); - } else if (lhsWords == 1 && rhsWords == 1) { - // All high words are zero, just use native divide - return ap_private(this->pVal[0] / RHS.get_pVal(0)); - } - - // We have to compute it the hard way. Invoke the Knuth divide algorithm. - ap_private Quotient(0); // to hold result. - ap_private_ops::divide(*this, lhsWords, RHS, rhsWords, &Quotient, - (ap_private*)0); - return Quotient; - } - - /// Signed divide this ap_private by ap_private RHS. - /// @brief Signed division function for ap_private. - INLINE ap_private sdiv(const ap_private& RHS) const { - if (isNegative()) - if (RHS.isNegative()) - return (-(*this)).udiv(-RHS); - else - return -((-(*this)).udiv(RHS)); - else if (RHS.isNegative()) - return -(this->udiv((ap_private)(-RHS))); - return this->udiv(RHS); - } - - /// Perform an unsigned remainder operation on this ap_private with RHS being - /// the - /// divisor. Both this and RHS are treated as unsigned quantities for purposes - /// of this operation. Note that this is a true remainder operation and not - /// a modulo operation because the sign follows the sign of the dividend - /// which is *this. - /// @returns a new ap_private value containing the remainder result - /// @brief Unsigned remainder operation. - INLINE ap_private urem(const ap_private& RHS) const { - // Get some facts about the LHS - uint32_t lhsBits = getActiveBits(); - uint32_t lhsWords = !lhsBits ? 0 : (whichWord(lhsBits - 1) + 1); - - // Get some facts about the RHS - uint32_t rhsBits = RHS.getActiveBits(); - uint32_t rhsWords = !rhsBits ? 0 : (whichWord(rhsBits - 1) + 1); - assert(rhsWords && "Performing remainder operation by zero ???"); - - // Check the degenerate cases - if (lhsWords == 0) { - // 0 % Y ===> 0 - return ap_private(0); - } else if (lhsWords < rhsWords || this->ult(RHS)) { - // X % Y ===> X, iff X < Y - return *this; - } else if (*this == RHS) { - // X % X == 0; - return ap_private(0); - } else if (lhsWords == 1) { - // All high words are zero, just use native remainder - return ap_private(pVal[0] % RHS.get_pVal(0)); - } - - // We have to compute it the hard way. Invoke the Knuth divide algorithm. - ap_private Remainder(0); - ap_private_ops::divide(*this, lhsWords, RHS, rhsWords, (ap_private*)(0), - &Remainder); - return Remainder; - } - - INLINE ap_private urem(uint64_t RHS) const { - // Get some facts about the LHS - uint32_t lhsBits = getActiveBits(); - uint32_t lhsWords = !lhsBits ? 0 : (whichWord(lhsBits - 1) + 1); - // Get some facts about the RHS - uint32_t rhsWords = 1; //! rhsBits ? 0 : (ap_private<_AP_W, - //! _AP_S>::whichWord(rhsBits - 1) + 1); - assert(rhsWords && "Performing remainder operation by zero ???"); - // Check the degenerate cases - if (lhsWords == 0) { - // 0 % Y ===> 0 - return ap_private(0); - } else if (lhsWords < rhsWords || this->ult(RHS)) { - // X % Y ===> X, iff X < Y - return *this; - } else if (*this == RHS) { - // X % X == 0; - return ap_private(0); - } else if (lhsWords == 1) { - // All high words are zero, just use native remainder - return ap_private(pVal[0] % RHS); - } - - // We have to compute it the hard way. Invoke the Knuth divide algorithm. - ap_private Remainder(0); - divide(*this, lhsWords, RHS, (ap_private*)(0), &Remainder); - return Remainder; - } - - /// Signed remainder operation on ap_private. - /// @brief Function for signed remainder operation. - INLINE ap_private srem(const ap_private& RHS) const { - if (isNegative()) { - ap_private lhs = -(*this); - if (RHS.isNegative()) { - ap_private rhs = -RHS; - return -(lhs.urem(rhs)); - } else - return -(lhs.urem(RHS)); - } else if (RHS.isNegative()) { - ap_private rhs = -RHS; - return this->urem(rhs); - } - return this->urem(RHS); - } - - /// Signed remainder operation on ap_private. - /// @brief Function for signed remainder operation. - INLINE ap_private srem(int64_t RHS) const { - if (isNegative()) - if (RHS < 0) - return -((-(*this)).urem(-RHS)); - else - return -((-(*this)).urem(RHS)); - else if (RHS < 0) - return this->urem(-RHS); - return this->urem(RHS); - } - - /// Compares this ap_private with RHS for the validity of the equality - /// relationship. - /// @returns true if *this == Val - /// @brief Equality comparison. - template - INLINE bool eq(const ap_private<_AP_W, _AP_S1>& RHS) const { - return (*this) == RHS; - } - - /// Compares this ap_private with RHS for the validity of the inequality - /// relationship. - /// @returns true if *this != Val - /// @brief Inequality comparison - template - INLINE bool ne(const ap_private<_AP_W, _AP_S1>& RHS) const { - return !((*this) == RHS); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// the validity of the less-than relationship. - /// @returns true if *this < RHS when both are considered unsigned. - /// @brief Unsigned less than comparison - template - INLINE bool ult(const ap_private<_AP_W, _AP_S1>& RHS) const { - // Get active bit length of both operands - uint32_t n1 = getActiveBits(); - uint32_t n2 = RHS.getActiveBits(); - - // If magnitude of LHS is less than RHS, return true. - if (n1 < n2) return true; - - // If magnitude of RHS is greather than LHS, return false. - if (n2 < n1) return false; - - // If they bot fit in a word, just compare the low order word - if (n1 <= APINT_BITS_PER_WORD && n2 <= APINT_BITS_PER_WORD) - return pVal[0] < RHS.get_pVal(0); - - // Otherwise, compare all words - uint32_t topWord = whichWord(AESL_std::max(n1, n2) - 1); - for (int i = topWord; i >= 0; --i) { - if (pVal[i] > RHS.get_pVal(i)) return false; - if (pVal[i] < RHS.get_pVal(i)) return true; - } - return false; - } - - INLINE bool ult(uint64_t RHS) const { - // Get active bit length of both operands - uint32_t n1 = getActiveBits(); - uint32_t n2 = - 64 - ap_private_ops::CountLeadingZeros_64(RHS); // RHS.getActiveBits(); - - // If magnitude of LHS is less than RHS, return true. - if (n1 < n2) return true; - - // If magnitude of RHS is greather than LHS, return false. - if (n2 < n1) return false; - - // If they bot fit in a word, just compare the low order word - if (n1 <= APINT_BITS_PER_WORD && n2 <= APINT_BITS_PER_WORD) - return pVal[0] < RHS; - assert(0); - } - - template - INLINE bool slt(const ap_private<_AP_W, _AP_S1>& RHS) const { - ap_private lhs(*this); - ap_private<_AP_W, _AP_S1> rhs(RHS); - bool lhsNeg = isNegative(); - bool rhsNeg = rhs.isNegative(); - if (lhsNeg) { - // Sign bit is set so perform two's complement to make it positive - lhs.flip(); - lhs++; - } - if (rhsNeg) { - // Sign bit is set so perform two's complement to make it positive - rhs.flip(); - rhs++; - } - - // Now we have unsigned values to compare so do the comparison if necessary - // based on the negativeness of the values. - if (lhsNeg) - if (rhsNeg) - return lhs.ugt(rhs); - else - return true; - else if (rhsNeg) - return false; - else - return lhs.ult(rhs); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// validity of the less-or-equal relationship. - /// @returns true if *this <= RHS when both are considered unsigned. - /// @brief Unsigned less or equal comparison - template - INLINE bool ule(const ap_private<_AP_W, _AP_S1>& RHS) const { - return ult(RHS) || eq(RHS); - } - - /// Regards both *this and RHS as signed quantities and compares them for - /// validity of the less-or-equal relationship. - /// @returns true if *this <= RHS when both are considered signed. - /// @brief Signed less or equal comparison - template - INLINE bool sle(const ap_private<_AP_W, _AP_S1>& RHS) const { - return slt(RHS) || eq(RHS); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// the validity of the greater-than relationship. - /// @returns true if *this > RHS when both are considered unsigned. - /// @brief Unsigned greather than comparison - template - INLINE bool ugt(const ap_private<_AP_W, _AP_S1>& RHS) const { - return !ult(RHS) && !eq(RHS); - } - - /// Regards both *this and RHS as signed quantities and compares them for - /// the validity of the greater-than relationship. - /// @returns true if *this > RHS when both are considered signed. - /// @brief Signed greather than comparison - template - INLINE bool sgt(const ap_private<_AP_W, _AP_S1>& RHS) const { - return !slt(RHS) && !eq(RHS); - } - - /// Regards both *this and RHS as unsigned quantities and compares them for - /// validity of the greater-or-equal relationship. - /// @returns true if *this >= RHS when both are considered unsigned. - /// @brief Unsigned greater or equal comparison - template - INLINE bool uge(const ap_private<_AP_W, _AP_S>& RHS) const { - return !ult(RHS); - } - - /// Regards both *this and RHS as signed quantities and compares them for - /// validity of the greater-or-equal relationship. - /// @returns true if *this >= RHS when both are considered signed. - /// @brief Signed greather or equal comparison - template - INLINE bool sge(const ap_private<_AP_W, _AP_S1>& RHS) const { - return !slt(RHS); - } - - // Sign extend to a new width. - template - INLINE void cpSext(const ap_private<_AP_W1, _AP_S1>& that) { - assert(_AP_W1 < BitWidth && "Invalid ap_private SignExtend request"); - assert(_AP_W1 <= MAX_INT_BITS && "Too many bits"); - // If the sign bit isn't set, this is the same as zext. - if (!that.isNegative()) { - cpZext(that); - return; - } - - // The sign bit is set. First, get some facts - enum { wordBits = _AP_W1 % APINT_BITS_PER_WORD }; - const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; - // Mask the high order word appropriately - if (_AP_N1 == _AP_N) { - enum { newWordBits = _AP_W % APINT_BITS_PER_WORD }; - // The extension is contained to the wordsBefore-1th word. - static const uint64_t mask = wordBits ? (~0ULL << (wordBits)) : 0ULL; - for (int i = 0; i < _AP_N; ++i) pVal[i] = that.get_pVal(i); - pVal[_AP_N - 1] |= mask; - return; - } - - enum { newWordBits = _AP_W % APINT_BITS_PER_WORD }; - // The extension is contained to the wordsBefore-1th word. - static const uint64_t mask = wordBits ? (~0ULL << (wordBits)) : 0ULL; - int i; - for (i = 0; i < _AP_N1; ++i) pVal[i] = that.get_pVal(i); - pVal[i - 1] |= mask; - for (; i < _AP_N - 1; i++) pVal[i] = ~0ULL; - pVal[i] = ~0ULL; - clearUnusedBits(); - return; - } - - // Zero extend to a new width. - template - INLINE void cpZext(const ap_private<_AP_W1, _AP_S1>& that) { - assert(_AP_W1 < BitWidth && "Invalid ap_private ZeroExtend request"); - assert(_AP_W1 <= MAX_INT_BITS && "Too many bits"); - const int _AP_N1 = ap_private<_AP_W1, _AP_S1>::_AP_N; - int i = 0; - for (; i < _AP_N1; ++i) pVal[i] = that.get_pVal(i); - for (; i < _AP_N; ++i) pVal[i] = 0; - clearUnusedBits(); - } - - template - INLINE void cpZextOrTrunc(const ap_private<_AP_W1, _AP_S1>& that) { - if (BitWidth > _AP_W1) - cpZext(that); - else { - for (int i = 0; i < _AP_N; ++i) pVal[i] = that.get_pVal(i); - clearUnusedBits(); - } - } - - template - INLINE void cpSextOrTrunc(const ap_private<_AP_W1, _AP_S1>& that) { - if (BitWidth > _AP_W1) - cpSext(that); - else { - for (int i = 0; i < _AP_N; ++i) pVal[i] = that.get_pVal(i); - clearUnusedBits(); - } - } - - /// @} - /// @name Value Characterization Functions - /// @{ - - /// @returns the total number of bits. - INLINE uint32_t getBitWidth() const { return BitWidth; } - - /// Here one word's bitwidth equals to that of uint64_t. - /// @returns the number of words to hold the integer value of this ap_private. - /// @brief Get the number of words. - INLINE uint32_t getNumWords() const { - return (BitWidth + APINT_BITS_PER_WORD - 1) / APINT_BITS_PER_WORD; - } - - /// This function returns the number of active bits which is defined as the - /// bit width minus the number of leading zeros. This is used in several - /// computations to see how "wide" the value is. - /// @brief Compute the number of active bits in the value - INLINE uint32_t getActiveBits() const { - uint32_t bits = BitWidth - countLeadingZeros(); - return bits ? bits : 1; - } - - /// This method attempts to return the value of this ap_private as a zero - /// extended - /// uint64_t. The bitwidth must be <= 64 or the value must fit within a - /// uint64_t. Otherwise an assertion will result. - /// @brief Get zero extended value - INLINE uint64_t getZExtValue() const { - assert(getActiveBits() <= 64 && "Too many bits for uint64_t"); - return *pVal; - } - - /// This method attempts to return the value of this ap_private as a sign - /// extended - /// int64_t. The bit width must be <= 64 or the value must fit within an - /// int64_t. Otherwise an assertion will result. - /// @brief Get sign extended value - INLINE int64_t getSExtValue() const { - assert(getActiveBits() <= 64 && "Too many bits for int64_t"); - return int64_t(pVal[0]); - } - - /// This method determines how many bits are required to hold the ap_private - /// equivalent of the string given by \p str of length \p slen. - /// @brief Get bits required for string value. - INLINE static uint32_t getBitsNeeded(const char* str, uint32_t slen, - uint8_t radix) { - assert(str != 0 && "Invalid value string"); - assert(slen > 0 && "Invalid string length"); - - // Each computation below needs to know if its negative - uint32_t isNegative = str[0] == '-'; - if (isNegative) { - slen--; - str++; - } - // For radixes of power-of-two values, the bits required is accurately and - // easily computed - if (radix == 2) return slen + isNegative; - if (radix == 8) return slen * 3 + isNegative; - if (radix == 16) return slen * 4 + isNegative; - - // Otherwise it must be radix == 10, the hard case - assert(radix == 10 && "Invalid radix"); - - // Convert to the actual binary value. - // ap_private<_AP_W, _AP_S> tmp(sufficient, str, slen, radix); - - // Compute how many bits are required. - // return isNegative + tmp.logBase2() + 1; - return isNegative + slen * 4; - } - - /// countLeadingZeros - This function is an ap_private version of the - /// countLeadingZeros_{32,64} functions in MathExtras.h. It counts the number - /// of zeros from the most significant bit to the first one bit. - /// @returns BitWidth if the value is zero. - /// @returns the number of zeros from the most significant bit to the first - /// one bits. - INLINE uint32_t countLeadingZeros() const { - enum { - msw_bits = (BitWidth % APINT_BITS_PER_WORD) - ? (BitWidth % APINT_BITS_PER_WORD) - : APINT_BITS_PER_WORD, - excessBits = APINT_BITS_PER_WORD - msw_bits - }; - uint32_t Count = ap_private_ops::CountLeadingZeros_64(pVal[_AP_N - 1]); - if (Count >= excessBits) Count -= excessBits; - if (!pVal[_AP_N - 1]) { - for (int i = _AP_N - 1; i; --i) { - if (!pVal[i - 1]) - Count += APINT_BITS_PER_WORD; - else { - Count += ap_private_ops::CountLeadingZeros_64(pVal[i - 1]); - break; - } - } - } - return Count; - } - - /// countLeadingOnes - This function counts the number of contiguous 1 bits - /// in the high order bits. The count stops when the first 0 bit is reached. - /// @returns 0 if the high order bit is not set - /// @returns the number of 1 bits from the most significant to the least - /// @brief Count the number of leading one bits. - INLINE uint32_t countLeadingOnes() const { - if (isSingleWord()) - return countLeadingOnes_64(get_VAL(), APINT_BITS_PER_WORD - BitWidth); - - uint32_t highWordBits = BitWidth % APINT_BITS_PER_WORD; - uint32_t shift = - (highWordBits == 0 ? 0 : APINT_BITS_PER_WORD - highWordBits); - int i = _AP_N - 1; - uint32_t Count = countLeadingOnes_64(get_pVal(i), shift); - if (Count == highWordBits) { - for (i--; i >= 0; --i) { - if (get_pVal(i) == ~0ULL) - Count += APINT_BITS_PER_WORD; - else { - Count += countLeadingOnes_64(get_pVal(i), 0); - break; - } - } - } - return Count; - } - - /// countTrailingZeros - This function is an ap_private version of the - /// countTrailingZoers_{32,64} functions in MathExtras.h. It counts - /// the number of zeros from the least significant bit to the first set bit. - /// @returns BitWidth if the value is zero. - /// @returns the number of zeros from the least significant bit to the first - /// one bit. - /// @brief Count the number of trailing zero bits. - INLINE uint32_t countTrailingZeros() const { - uint32_t Count = 0; - uint32_t i = 0; - for (; i < _AP_N && get_pVal(i) == 0; ++i) Count += APINT_BITS_PER_WORD; - if (i < _AP_N) Count += ap_private_ops::CountTrailingZeros_64(get_pVal(i)); - return AESL_std::min(Count, BitWidth); - } - /// countPopulation - This function is an ap_private version of the - /// countPopulation_{32,64} functions in MathExtras.h. It counts the number - /// of 1 bits in the ap_private value. - /// @returns 0 if the value is zero. - /// @returns the number of set bits. - /// @brief Count the number of bits set. - INLINE uint32_t countPopulation() const { - uint32_t Count = 0; - for (int i = 0; i < _AP_N - 1; ++i) - Count += ap_private_ops::CountPopulation_64(pVal[i]); - Count += ap_private_ops::CountPopulation_64(pVal[_AP_N - 1] & mask); - return Count; - } - - /// @} - /// @name Conversion Functions - /// @ - - /// This is used internally to convert an ap_private to a string. - /// @brief Converts an ap_private to a std::string - INLINE std::string toString(uint8_t radix, bool wantSigned) const; - - /// Considers the ap_private to be unsigned and converts it into a string in - /// the - /// radix given. The radix can be 2, 8, 10 or 16. - /// @returns a character interpretation of the ap_private - /// @brief Convert unsigned ap_private to string representation. - INLINE std::string toStringUnsigned(uint8_t radix = 10) const { - return toString(radix, false); - } - - /// Considers the ap_private to be unsigned and converts it into a string in - /// the - /// radix given. The radix can be 2, 8, 10 or 16. - /// @returns a character interpretation of the ap_private - /// @brief Convert unsigned ap_private to string representation. - INLINE std::string toStringSigned(uint8_t radix = 10) const { - return toString(radix, true); - } - - /// @brief Converts this ap_private to a double value. - INLINE double roundToDouble(bool isSigned) const { - // Handle the simple case where the value is contained in one uint64_t. - if (isSingleWord() || getActiveBits() <= APINT_BITS_PER_WORD) { - uint64_t val = pVal[0]; - if (isSigned) { - int64_t sext = ((int64_t(val)) << (64 - BitWidth)) >> (64 - BitWidth); - return double(sext); - } else - return double(val); - } - - // Determine if the value is negative. - bool isNeg = isSigned ? (*this)[BitWidth - 1] : false; - - // Construct the absolute value if we're negative. - ap_private<_AP_W, _AP_S> Tmp(isNeg ? -(*this) : (*this)); - - // Figure out how many bits we're using. - uint32_t n = Tmp.getActiveBits(); - - // The exponent (without bias normalization) is just the number of bits - // we are using. Note that the sign bit is gone since we constructed the - // absolute value. - uint64_t exp = n; - - // Return infinity for exponent overflow - if (exp > 1023) { - if (!isSigned || !isNeg) - return std::numeric_limits::infinity(); - else - return -std::numeric_limits::infinity(); - } - exp += 1023; // Increment for 1023 bias - - // Number of bits in mantissa is 52. To obtain the mantissa value, we must - // extract the high 52 bits from the correct words in pVal. - uint64_t mantissa; - unsigned hiWord = whichWord(n - 1); - if (hiWord == 0) { - mantissa = Tmp.get_pVal(0); - if (n > 52) - (mantissa) >>= (n - 52); // shift down, we want the top 52 bits. - } else { - assert(hiWord > 0 && "High word is negative?"); - uint64_t hibits = (Tmp.get_pVal(hiWord)) - << (52 - n % APINT_BITS_PER_WORD); - uint64_t lobits = - (Tmp.get_pVal(hiWord - 1)) >> (11 + n % APINT_BITS_PER_WORD); - mantissa = hibits | lobits; - } - - // The leading bit of mantissa is implicit, so get rid of it. - uint64_t sign = isNeg ? (1ULL << (APINT_BITS_PER_WORD - 1)) : 0; - union { - double __D; - uint64_t __I; - } __T; - __T.__I = sign | ((exp) << 52) | mantissa; - return __T.__D; - } - - /// @brief Converts this unsigned ap_private to a double value. - INLINE double roundToDouble() const { return roundToDouble(false); } - - /// @brief Converts this signed ap_private to a double value. - INLINE double signedRoundToDouble() const { return roundToDouble(true); } - - /// The conversion does not do a translation from integer to double, it just - /// re-interprets the bits as a double. Note that it is valid to do this on - /// any bit width. Exactly 64 bits will be translated. - /// @brief Converts ap_private bits to a double - INLINE double bitsToDouble() const { - union { - uint64_t __I; - double __D; - } __T; - __T.__I = pVal[0]; - return __T.__D; - } - - /// The conversion does not do a translation from integer to float, it just - /// re-interprets the bits as a float. Note that it is valid to do this on - /// any bit width. Exactly 32 bits will be translated. - /// @brief Converts ap_private bits to a double - INLINE float bitsToFloat() const { - union { - uint32_t __I; - float __F; - } __T; - __T.__I = uint32_t(pVal[0]); - return __T.__F; - } - - /// The conversion does not do a translation from double to integer, it just - /// re-interprets the bits of the double. Note that it is valid to do this on - /// any bit width but bits from V may get truncated. - /// @brief Converts a double to ap_private bits. - INLINE ap_private& doubleToBits(double __V) { - union { - uint64_t __I; - double __D; - } __T; - __T.__D = __V; - pVal[0] = __T.__I; - return *this; - } - - /// The conversion does not do a translation from float to integer, it just - /// re-interprets the bits of the float. Note that it is valid to do this on - /// any bit width but bits from V may get truncated. - /// @brief Converts a float to ap_private bits. - INLINE ap_private& floatToBits(float __V) { - union { - uint32_t __I; - float __F; - } __T; - __T.__F = __V; - pVal[0] = __T.__I; - } - - // Reduce operation - //----------------------------------------------------------- - INLINE bool and_reduce() const { return isMaxValue(); } - - INLINE bool nand_reduce() const { return isMinValue(); } - - INLINE bool or_reduce() const { return (bool)countPopulation(); } - - INLINE bool nor_reduce() const { return countPopulation() == 0; } - - INLINE bool xor_reduce() const { - unsigned int i = countPopulation(); - return (i % 2) ? true : false; - } - - INLINE bool xnor_reduce() const { - unsigned int i = countPopulation(); - return (i % 2) ? false : true; - } - INLINE std::string to_string(uint8_t radix = 16, bool sign = false) const { - return toString(radix, radix == 10 ? _AP_S : sign); - } -}; // End of class ap_private <_AP_W, _AP_S, false> - -namespace ap_private_ops { - -enum { APINT_BITS_PER_WORD = 64 }; -template -INLINE bool operator==(uint64_t V1, const ap_private<_AP_W, _AP_S>& V2) { - return V2 == V1; -} - -template -INLINE bool operator!=(uint64_t V1, const ap_private<_AP_W, _AP_S>& V2) { - return V2 != V1; -} - -template -INLINE bool get(const ap_private<_AP_W, _AP_S>& a) { - static const uint64_t mask = 1ULL << (index & 0x3f); - return ((mask & a.get_pVal((index) >> 6)) != 0); -} - -template -INLINE void set(ap_private<_AP_W, _AP_S>& a, - const ap_private& mark1 = 0, - const ap_private& mark2 = 0) { - enum { - APINT_BITS_PER_WORD = 64, - lsb_word = lsb_index / APINT_BITS_PER_WORD, - msb_word = msb_index / APINT_BITS_PER_WORD, - msb = msb_index % APINT_BITS_PER_WORD, - lsb = lsb_index % APINT_BITS_PER_WORD - }; - if (msb_word == lsb_word) { - const uint64_t mask = ~0ULL >> - (lsb) << (APINT_BITS_PER_WORD - msb + lsb - 1) >> - (APINT_BITS_PER_WORD - msb - 1); - // a.set_pVal(msb_word, a.get_pVal(msb_word) | mask); - a.get_pVal(msb_word) |= mask; - } else { - const uint64_t lsb_mask = ~0ULL >> (lsb) << (lsb); - const uint64_t msb_mask = ~0ULL << (APINT_BITS_PER_WORD - msb - 1) >> - (APINT_BITS_PER_WORD - msb - 1); - // a.set_pVal(lsb_word, a.get_pVal(lsb_word) | lsb_mask); - a.get_pVal(lsb_word) |= lsb_mask; - for (int i = lsb_word + 1; i < msb_word; i++) { - a.set_pVal(i, ~0ULL); - // a.get_pVal(i)=0; - } - // a.set_pVal(msb_word, a.get_pVal(msb_word) | msb_mask); - - a.get_pVal(msb_word) |= msb_mask; - } - a.clearUnusedBits(); -} - -template -INLINE void clear(ap_private<_AP_W, _AP_S>& a, - const ap_private& mark1 = 0, - const ap_private& mark2 = 0) { - enum { - APINT_BITS_PER_WORD = 64, - lsb_word = lsb_index / APINT_BITS_PER_WORD, - msb_word = msb_index / APINT_BITS_PER_WORD, - msb = msb_index % APINT_BITS_PER_WORD, - lsb = lsb_index % APINT_BITS_PER_WORD - }; - if (msb_word == lsb_word) { - const uint64_t mask = - ~(~0ULL >> (lsb) << (APINT_BITS_PER_WORD - msb + lsb - 1) >> - (APINT_BITS_PER_WORD - msb - 1)); - // a.set_pVal(msb_word, a.get_pVal(msb_word) & mask); - a.get_pVal(msb_word) &= mask; - } else { - const uint64_t lsb_mask = ~(~0ULL >> (lsb) << (lsb)); - const uint64_t msb_mask = ~(~0ULL << (APINT_BITS_PER_WORD - msb - 1) >> - (APINT_BITS_PER_WORD - msb - 1)); - // a.set_pVal(lsb_word, a.get_pVal(lsb_word) & lsb_mask); - a.get_pVal(lsb_word) &= lsb_mask; - for (int i = lsb_word + 1; i < msb_word; i++) { - // a.set_pVal(i, 0); - a.get_pVal(i) = 0; - } - // a.set_pVal(msb_word, a.get_pVal(msb_word) & msb_mask); - a.get_pVal(msb_word) &= msb_mask; - } - a.clearUnusedBits(); -} - -template -INLINE void set(ap_private<_AP_W, _AP_S>& a, - const ap_private& mark = 0) { - enum { APINT_BITS_PER_WORD = 64, word = index / APINT_BITS_PER_WORD }; - static const uint64_t mask = 1ULL << (index % APINT_BITS_PER_WORD); - // a.set_pVal(word, a.get_pVal(word) | mask); - a.get_pVal(word) |= mask; - a.clearUnusedBits(); -} - -template -INLINE void clear(ap_private<_AP_W, _AP_S>& a, - const ap_private& mark = 0) { - enum { APINT_BITS_PER_WORD = 64, word = index / APINT_BITS_PER_WORD }; - static const uint64_t mask = ~(1ULL << (index % APINT_BITS_PER_WORD)); - // a.set_pVal(word, a.get_pVal(word) & mask); - a.get_pVal(word) &= mask; - a.clearUnusedBits(); -} - -} // End of ap_private_ops namespace - -template -INLINE std::string ap_private<_AP_W, _AP_S, false>::toString( - uint8_t radix, bool wantSigned) const { - assert((radix == 10 || radix == 8 || radix == 16 || radix == 2) && - "Radix should be 2, 8, 10, or 16!"); - static const char* digits[] = {"0", "1", "2", "3", "4", "5", "6", "7", - "8", "9", "A", "B", "C", "D", "E", "F"}; - std::string result; - - if (radix != 10) { - // For the 2, 8 and 16 bit cases, we can just shift instead of divide - // because the number of bits per digit (1,3 and 4 respectively) divides - // equaly. We just shift until there value is zero. - - // First, check for a zero value and just short circuit the logic below. - if (*this == (uint64_t)(0)) - result = "0"; - else { - ap_private<_AP_W, false> tmp(*this); - size_t insert_at = 0; - bool leading_zero = true; - if (wantSigned && isNegative()) { - // They want to print the signed version and it is a negative value - // Flip the bits and add one to turn it into the equivalent positive - // value and put a '-' in the result. - tmp.flip(); - tmp++; - tmp.clearUnusedBitsToZero(); - result = "-"; - insert_at = 1; - leading_zero = false; - } - switch (radix) { - case 2: - result += "0b"; - break; - case 8: - result += "0o"; - break; - case 16: - result += "0x"; - break; - default: - assert("invalid radix" && 0); - } - insert_at += 2; - // Just shift tmp right for each digit width until it becomes zero - uint32_t shift = (radix == 16 ? 4 : (radix == 8 ? 3 : 1)); - uint64_t mask = radix - 1; - ap_private<_AP_W, false> zero(0); - unsigned bits = 0; - while (tmp.ne(zero)) { - uint64_t digit = tmp.get_VAL() & mask; - result.insert(insert_at, digits[digit]); - tmp = tmp.lshr(shift); - ++bits; - } - bits *= shift; - if (bits < _AP_W && leading_zero) result.insert(insert_at, digits[0]); - } - return result; - } - - ap_private<_AP_W, false> tmp(*this); - ap_private<_AP_W, false> divisor(radix); - ap_private<_AP_W, false> zero(0); - size_t insert_at = 0; - if (wantSigned && isNegative()) { - // They want to print the signed version and it is a negative value - // Flip the bits and add one to turn it into the equivalent positive - // value and put a '-' in the result. - tmp.flip(); - tmp++; - tmp.clearUnusedBitsToZero(); - result = "-"; - insert_at = 1; - } - if (tmp == ap_private<_AP_W, false>(0)) - result = "0"; - else - while (tmp.ne(zero)) { - ap_private<_AP_W, false> APdigit(0); - ap_private<_AP_W, false> tmp2(0); - ap_private_ops::divide(tmp, tmp.getNumWords(), divisor, - divisor.getNumWords(), &tmp2, &APdigit); - uint64_t digit = APdigit.getZExtValue(); - assert(digit < radix && "divide failed"); - result.insert(insert_at, digits[digit]); - tmp = tmp2; - } - - return result; -} // End of ap_private<_AP_W, _AP_S, false>::toString() - -template -std::ostream &operator<<(std::ostream &os, const ap_private<_AP_W, _AP_S> &x) { - std::ios_base::fmtflags ff = std::cout.flags(); - if (ff & std::cout.hex) { - os << x.toString(16, false); // don't print sign - } else if (ff & std::cout.oct) { - os << x.toString(8, false); // don't print sign - } else { - os << x.toString(10, _AP_S); - } - return os; -} - -// ------------------------------------------------------------ // -// XXX moved here from ap_int_sim.h XXX // -// ------------------------------------------------------------ // - -/// Concatination reference. -/// Proxy class which allows concatination to be used as rvalue(for reading) and -/// lvalue(for writing) -// ---------------------------------------------------------------- -// template -// struct ap_concat_ref { -//#ifdef _MSC_VER -//#pragma warning(disable : 4521 4522) -//#endif -// enum { -// _AP_WR = _AP_W1 + _AP_W2, -// }; -// _AP_T1& mbv1; -// _AP_T2& mbv2; -// -// INLINE ap_concat_ref(const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& -// ref) -// : mbv1(ref.mbv1), mbv2(ref.mbv2) {} -// -// INLINE ap_concat_ref(_AP_T1& bv1, _AP_T2& bv2) : mbv1(bv1), mbv2(bv2) {} -// -// template -// INLINE ap_concat_ref& operator=(const ap_private<_AP_W3, _AP_S3>& val) { -// ap_private<_AP_W1 + _AP_W2, false> vval(val); -// int W_ref1 = mbv1.length(); -// int W_ref2 = mbv2.length(); -// ap_private<_AP_W1, false> mask1(-1); -// mask1 >>= _AP_W1 - W_ref1; -// ap_private<_AP_W2, false> mask2(-1); -// mask2 >>= _AP_W2 - W_ref2; -// mbv1.set(ap_private<_AP_W1, false>((vval >> W_ref2) & mask1)); -// mbv2.set(ap_private<_AP_W2, false>(vval & mask2)); -// return *this; -// } -// -// INLINE ap_concat_ref& operator=(unsigned long long val) { -// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); -// return operator=(tmpVal); -// } -// -// template -// INLINE ap_concat_ref& operator=( -// const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4>& val) { -// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); -// return operator=(tmpVal); -// } -// -// INLINE ap_concat_ref& operator=( -// const ap_concat_ref<_AP_W1, _AP_T1, _AP_W2, _AP_T2>& val) { -// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); -// return operator=(tmpVal); -// } -// -// template -// INLINE ap_concat_ref& operator=(const _private_bit_ref<_AP_W3, _AP_S3>& -// val) { -// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); -// return operator=(tmpVal); -// } -// -// template -// INLINE ap_concat_ref& operator=(const _private_range_ref<_AP_W3, _AP_S3>& -// val) { -// ap_private<_AP_W1 + _AP_W2, false> tmpVal(val); -// return operator=(tmpVal); -// } -// -// template -// INLINE ap_concat_ref& operator=( -// const af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& val) -// { -// return operator=((const ap_private<_AP_W3, false>)(val)); -// } -// -// template -// INLINE ap_concat_ref& operator=( -// const ap_fixed_base<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& -// val) { -// return operator=(val.to_ap_private()); -// } -// -// template -// INLINE ap_concat_ref& operator=( -// const af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3>& val) { -// return operator=((unsigned long long)(bool)(val)); -// } -// -// INLINE operator ap_private<_AP_WR, false>() const { return get(); } -// -// INLINE operator unsigned long long() const { return get().to_uint64(); } -// -// template -// INLINE ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, -// _private_range_ref<_AP_W3, _AP_S3> > -// operator,(const _private_range_ref<_AP_W3, _AP_S3> &a2) { -// return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, -// _private_range_ref<_AP_W3, _AP_S3> >( -// *this, const_cast<_private_range_ref<_AP_W3, _AP_S3>&>(a2)); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_private<_AP_W3, _AP_S3> -// > -// operator,(ap_private<_AP_W3, _AP_S3> &a2) { -// return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, -// ap_private<_AP_W3, _AP_S3> >(*this, a2); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, ap_private<_AP_W3, _AP_S3> -// > -// operator,(const ap_private<_AP_W3, _AP_S3> &a2) { -// return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3, -// ap_private<_AP_W3, _AP_S3> >( -// *this, const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_WR, ap_concat_ref, 1, _private_bit_ref<_AP_W3, -// _AP_S3> > -// operator,(const _private_bit_ref<_AP_W3, _AP_S3> &a2) { -// return ap_concat_ref<_AP_WR, ap_concat_ref, 1, _private_bit_ref<_AP_W3, -// _AP_S3> >( -// *this, const_cast<_private_bit_ref<_AP_W3, _AP_S3>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3 + _AP_W4, -// ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> > -// operator,(const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> &a2) { -// return ap_concat_ref<_AP_WR, ap_concat_ref, _AP_W3 + _AP_W4, -// ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4> >( -// *this, const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref< -// _AP_WR, ap_concat_ref, _AP_W3, -// af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> > -// operator,( -// const af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> &a2) -// { -// return ap_concat_ref< -// _AP_WR, ap_concat_ref, _AP_W3, -// af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> >( -// *this, -// const_cast< -// af_range_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, -// _AP_N3>&>(a2)); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_WR, ap_concat_ref, 1, -// af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> -// > -// operator,(const af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, -// _AP_N3> -// &a2) { -// return ap_concat_ref< -// _AP_WR, ap_concat_ref, 1, -// af_bit_ref<_AP_W3, _AP_I3, _AP_S3, _AP_Q3, _AP_O3, _AP_N3> >( -// *this, -// const_cast&>( -// a2)); -// } -// -// template -// INLINE ap_private operator&( -// const ap_private<_AP_W3, _AP_S3>& a2) { -// return get() & a2; -// } -// -// template -// INLINE ap_private operator|( -// const ap_private<_AP_W3, _AP_S3>& a2) { -// return get() | a2; -// } -// -// template -// INLINE ap_private operator^( -// const ap_private<_AP_W3, _AP_S3>& a2) { -// return ap_private(get() ^ a2); -// } -// -// INLINE const ap_private<_AP_WR, false> get() const { -// ap_private<_AP_W1 + _AP_W2, false> tmpVal = -// ap_private<_AP_W1 + _AP_W2, false>(mbv1.get()); -// ap_private<_AP_W1 + _AP_W2, false> tmpVal2 = -// ap_private<_AP_W1 + _AP_W2, false>(mbv2.get()); -// int W_ref2 = mbv2.length(); -// tmpVal <<= W_ref2; -// tmpVal |= tmpVal2; -// return tmpVal; -// } -// -// INLINE const ap_private<_AP_WR, false> get() { -// ap_private<_AP_W1 + _AP_W2, false> tmpVal = -// ap_private<_AP_W1 + _AP_W2, false>(mbv1.get()); -// ap_private<_AP_W1 + _AP_W2, false> tmpVal2 = -// ap_private<_AP_W1 + _AP_W2, false>(mbv2.get()); -// int W_ref2 = mbv2.length(); -// tmpVal <<= W_ref2; -// tmpVal |= tmpVal2; -// return tmpVal; -// } -// -// template -// INLINE void set(const ap_private<_AP_W3, false>& val) { -// ap_private<_AP_W1 + _AP_W2, false> vval(val); -// int W_ref1 = mbv1.length(); -// int W_ref2 = mbv2.length(); -// ap_private<_AP_W1, false> mask1(-1); -// mask1 >>= _AP_W1 - W_ref1; -// ap_private<_AP_W2, false> mask2(-1); -// mask2 >>= _AP_W2 - W_ref2; -// mbv1.set(ap_private<_AP_W1, false>((vval >> W_ref2) & mask1)); -// mbv2.set(ap_private<_AP_W2, false>(vval & mask2)); -// } -// -// INLINE int length() const { return mbv1.length() + mbv2.length(); } -// -// INLINE std::string to_string(uint8_t radix = 2) const { -// return get().to_string(radix); -// } -//}; // struct ap_concat_ref. - -/// Range(slice) reference -/// Proxy class, which allows part selection to be used as rvalue(for reading) -/// and lvalue(for writing) -//------------------------------------------------------------ -template -struct _private_range_ref { -#ifdef _MSC_VER -#pragma warning(disable : 4521 4522) -#endif - ap_private<_AP_W, _AP_S>& d_bv; - int l_index; - int h_index; - - public: - /// copy ctor. - INLINE _private_range_ref(const _private_range_ref<_AP_W, _AP_S>& ref) - : d_bv(ref.d_bv), l_index(ref.l_index), h_index(ref.h_index) {} - - /// direct ctor. - INLINE _private_range_ref(ap_private<_AP_W, _AP_S>* bv, int h, int l) - : d_bv(*bv), l_index(l), h_index(h) { - _AP_WARNING(h < 0 || l < 0, - "Higher bound (%d) and lower bound (%d) cannot be " - "negative.", - h, l); - _AP_WARNING(h >= _AP_W || l >= _AP_W, - "Higher bound (%d) or lower bound (%d) out of range (%d).", h, l, - _AP_W); - } - - /// compound or assignment. - template - INLINE _private_range_ref<_AP_W, _AP_S>& operator|=( - const _private_range_ref<_AP_W2, _AP_S2>& ref) { - _AP_WARNING((h_index - l_index) != (ref.h_index - ref.l_index), - "Bitsize mismach for ap_private<>.range() &= " - "ap_private<>.range()."); - this->d_bv |= ref.d_bv; - return *this; - } - - /// compound or assignment with root type. - template - INLINE _private_range_ref<_AP_W, _AP_S>& operator|=( - const _AP_ROOT_TYPE<_AP_W2, _AP_S2>& ref) { - _AP_WARNING((h_index - l_index + 1) != _AP_W2, - "Bitsize mismach for ap_private<>.range() |= _AP_ROOT_TYPE<>."); - this->d_bv |= ref.V; - return *this; - } - - /// compound and assignment. - template - INLINE _private_range_ref<_AP_W, _AP_S>& operator&=( - const _private_range_ref<_AP_W2, _AP_S2>& ref) { - _AP_WARNING((h_index - l_index) != (ref.h_index - ref.l_index), - "Bitsize mismach for ap_private<>.range() &= " - "ap_private<>.range()."); - this->d_bv &= ref.d_bv; - return *this; - }; - - /// compound and assignment with root type. - template - INLINE _private_range_ref<_AP_W, _AP_S>& operator&=( - const _AP_ROOT_TYPE<_AP_W2, _AP_S2>& ref) { - _AP_WARNING((h_index - l_index + 1) != _AP_W2, - "Bitsize mismach for ap_private<>.range() &= _AP_ROOT_TYPE<>."); - this->d_bv &= ref.V; - return *this; - } - - /// compound xor assignment. - template - INLINE _private_range_ref<_AP_W, _AP_S>& operator^=( - const _private_range_ref<_AP_W2, _AP_S2>& ref) { - _AP_WARNING((h_index - l_index) != (ref.h_index - ref.l_index), - "Bitsize mismach for ap_private<>.range() ^= " - "ap_private<>.range()."); - this->d_bv ^= ref.d_bv; - return *this; - }; - - /// compound xor assignment with root type. - template - INLINE _private_range_ref<_AP_W, _AP_S>& operator^=( - const _AP_ROOT_TYPE<_AP_W2, _AP_S2>& ref) { - _AP_WARNING((h_index - l_index + 1) != _AP_W2, - "Bitsize mismach for ap_private<>.range() ^= _AP_ROOT_TYPE<>."); - this->d_bv ^= ref.V; - return *this; - } - - /// @name convertors. - // @{ - INLINE operator ap_private<_AP_W, false>() const { - ap_private<_AP_W, false> val(0); - if (h_index >= l_index) { - if (_AP_W > 64) { - val = d_bv; - ap_private<_AP_W, false> mask(-1); - mask >>= _AP_W - (h_index - l_index + 1); - val >>= l_index; - val &= mask; - } else { - const static uint64_t mask = (~0ULL >> (64 > _AP_W ? (64 - _AP_W) : 0)); - val = (d_bv >> l_index) & (mask >> (_AP_W - (h_index - l_index + 1))); - } - } else { - for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) - if ((d_bv)[j]) val.set(i); - } - return val; - } - - INLINE operator unsigned long long() const { return to_uint64(); } - // @} - - template - INLINE _private_range_ref& operator=(const ap_private<_AP_W2, _AP_S2>& val) { - ap_private<_AP_W, false> vval = ap_private<_AP_W, false>(val); - if (l_index > h_index) { - for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) - (vval)[i] ? d_bv.set(j) : d_bv.clear(j); - } else { - if (_AP_W > 64) { - ap_private<_AP_W, false> mask(-1); - if (l_index > 0) { - mask <<= l_index; - vval <<= l_index; - } - if (h_index < _AP_W - 1) { - ap_private<_AP_W, false> mask2(-1); - mask2 >>= _AP_W - h_index - 1; - mask &= mask2; - vval &= mask2; - } - mask.flip(); - d_bv &= mask; - d_bv |= vval; - } else { - unsigned shift = 64 - _AP_W; - uint64_t mask = ~0ULL >> (shift); - if (l_index > 0) { - vval = mask & vval << l_index; - mask = mask & mask << l_index; - } - if (h_index < _AP_W - 1) { - uint64_t mask2 = mask; - mask2 >>= (_AP_W - h_index - 1); - mask &= mask2; - vval &= mask2; - } - mask = ~mask; - d_bv &= mask; - d_bv |= vval; - } - } - return *this; - } // operator=(const ap_private<>&) - - INLINE _private_range_ref& operator=(unsigned long long val) { - const ap_private<_AP_W, _AP_S> vval = val; - return operator=(vval); - } - - template - INLINE _private_range_ref& operator=( - const _private_bit_ref<_AP_W2, _AP_S2>& val) { - return operator=((unsigned long long)(bool)val); - } - - template - INLINE _private_range_ref& operator=( - const _private_range_ref<_AP_W2, _AP_S2>& val) { - const ap_private<_AP_W, false> tmpVal(val); - return operator=(tmpVal); - } - -// template -// INLINE _private_range_ref& operator=( -// const ap_concat_ref<_AP_W3, _AP_T3, _AP_W4, _AP_T4>& val) { -// const ap_private<_AP_W, false> tmpVal(val); -// return operator=(tmpVal); -// } - - // TODO from ap_int_base, ap_bit_ref and ap_range_ref. - - template - INLINE _private_range_ref& operator=( - const ap_fixed_base<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=(val.to_ap_int_base().V); - } - - template - INLINE _private_range_ref& operator=( - const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=(val.operator ap_int_base<_AP_W2, false>().V); - } - - template - INLINE _private_range_ref& operator=( - const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>& val) { - return operator=((unsigned long long)(bool)val); - } - -// template -// INLINE ap_concat_ref<_AP_W, _private_range_ref, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> > -// operator,(const _private_range_ref<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, _private_range_ref, _AP_W2, -// _private_range_ref<_AP_W2, _AP_S2> >( -// *this, const_cast<_private_range_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, _private_range_ref, _AP_W2, -// ap_private<_AP_W2, _AP_S2> > -// operator,(ap_private<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, _private_range_ref, _AP_W2, -// ap_private<_AP_W2, _AP_S2> >(*this, a2); -// } -// -// INLINE -// ap_concat_ref<_AP_W, _private_range_ref, _AP_W, ap_private<_AP_W, _AP_S> > -// operator,(ap_private<_AP_W, _AP_S>& a2) { -// return ap_concat_ref<_AP_W, _private_range_ref, _AP_W, -// ap_private<_AP_W, _AP_S> >(*this, a2); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, _private_range_ref, 1, -// _private_bit_ref<_AP_W2, _AP_S2> > -// operator,(const _private_bit_ref<_AP_W2, _AP_S2> &a2) { -// return ap_concat_ref<_AP_W, _private_range_ref, 1, -// _private_bit_ref<_AP_W2, _AP_S2> >( -// *this, const_cast<_private_bit_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<_AP_W, _private_range_ref, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > -// operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) { -// return ap_concat_ref<_AP_W, _private_range_ref, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( -// *this, const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref< -// _AP_W, _private_range_ref, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,( -// const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> &a2) { -// return ap_concat_ref< -// _AP_W, _private_range_ref, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( -// *this, -// const_cast< -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2>&>(a2)); -// } -// -// template -// INLINE -// ap_concat_ref<_AP_W, _private_range_ref, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> -// &a2) { -// return ap_concat_ref< -// _AP_W, _private_range_ref, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( -// *this, -// const_cast&>( -// a2)); -// } - - template - INLINE bool operator==(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - ap_private<_AP_W, false> lhs = get(); - ap_private<_AP_W2, false> rhs = op2.get(); - return lhs == rhs; - } - - template - INLINE bool operator!=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - ap_private<_AP_W, false> lhs = get(); - ap_private<_AP_W2, false> rhs = op2.get(); - return lhs != rhs; - } - - template - INLINE bool operator>(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - ap_private<_AP_W, false> lhs = get(); - ap_private<_AP_W2, false> rhs = op2.get(); - return lhs > rhs; - } - - template - INLINE bool operator>=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - ap_private<_AP_W, false> lhs = get(); - ap_private<_AP_W2, false> rhs = op2.get(); - return lhs >= rhs; - } - - template - INLINE bool operator<(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - ap_private<_AP_W, false> lhs = get(); - ap_private<_AP_W2, false> rhs = op2.get(); - return lhs < rhs; - } - - template - INLINE bool operator<=(const _private_range_ref<_AP_W2, _AP_S2>& op2) { - ap_private<_AP_W, false> lhs = get(); - ap_private<_AP_W2, false> rhs = op2.get(); - return lhs <= rhs; - } - - template - INLINE void set(const ap_private<_AP_W2, false>& val) { - ap_private<_AP_W, _AP_S> vval = val; - if (l_index > h_index) { - for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) - (vval)[i] ? d_bv.set(j) : d_bv.clear(j); - } else { - if (_AP_W > 64) { - ap_private<_AP_W, _AP_S> mask(-1); - if (l_index > 0) { - ap_private<_AP_W, false> mask1(-1); - mask1 >>= _AP_W - l_index; - mask1.flip(); - mask = mask1; - // vval&=mask1; - vval <<= l_index; - } - if (h_index < _AP_W - 1) { - ap_private<_AP_W, false> mask2(-1); - mask2 <<= h_index + 1; - mask2.flip(); - mask &= mask2; - vval &= mask2; - } - mask.flip(); - d_bv &= mask; - d_bv |= vval; - } else { - uint64_t mask = ~0ULL >> (64 - _AP_W); - if (l_index > 0) { - uint64_t mask1 = mask; - mask1 = mask & (mask1 >> (_AP_W - l_index)); - vval = mask & (vval << l_index); - mask = ~mask1 & mask; - // vval&=mask1; - } - if (h_index < _AP_W - 1) { - uint64_t mask2 = ~0ULL >> (64 - _AP_W); - mask2 = mask & (mask2 << (h_index + 1)); - mask &= ~mask2; - vval &= ~mask2; - } - d_bv &= (~mask & (~0ULL >> (64 - _AP_W))); - d_bv |= vval; - } - } - } - - INLINE ap_private<_AP_W, false> get() const { - ap_private<_AP_W, false> val(0); - if (h_index < l_index) { - for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) - if ((d_bv)[j]) val.set(i); - } else { - val = d_bv; - val >>= l_index; - if (h_index < _AP_W - 1) { - if (_AP_W <= 64) { - const static uint64_t mask = - (~0ULL >> (64 > _AP_W ? (64 - _AP_W) : 0)); - val &= (mask >> (_AP_W - (h_index - l_index + 1))); - } else { - ap_private<_AP_W, false> mask(-1); - mask >>= _AP_W - (h_index - l_index + 1); - val &= mask; - } - } - } - return val; - } - - INLINE ap_private<_AP_W, false> get() { - ap_private<_AP_W, false> val(0); - if (h_index < l_index) { - for (int i = 0, j = l_index; j >= 0 && j >= h_index; j--, i++) - if ((d_bv)[j]) val.set(i); - } else { - val = d_bv; - val >>= l_index; - if (h_index < _AP_W - 1) { - if (_AP_W <= 64) { - static const uint64_t mask = ~0ULL >> (64 > _AP_W ? (64 - _AP_W) : 0); - return val &= ((mask) >> (_AP_W - (h_index - l_index + 1))); - } else { - ap_private<_AP_W, false> mask(-1); - mask >>= _AP_W - (h_index - l_index + 1); - val &= mask; - } - } - } - return val; - } - - INLINE int length() const { - return h_index >= l_index ? h_index - l_index + 1 : l_index - h_index + 1; - } - - INLINE int to_int() const { - ap_private<_AP_W, false> val = get(); - return val.to_int(); - } - - INLINE unsigned int to_uint() const { - ap_private<_AP_W, false> val = get(); - return val.to_uint(); - } - - INLINE long to_long() const { - ap_private<_AP_W, false> val = get(); - return val.to_long(); - } - - INLINE unsigned long to_ulong() const { - ap_private<_AP_W, false> val = get(); - return val.to_ulong(); - } - - INLINE ap_slong to_int64() const { - ap_private<_AP_W, false> val = get(); - return val.to_int64(); - } - - INLINE ap_ulong to_uint64() const { - ap_private<_AP_W, false> val = get(); - return val.to_uint64(); - } - - INLINE std::string to_string(uint8_t radix = 2) const { - return get().to_string(radix); - } - - INLINE bool and_reduce() { - bool ret = true; - bool reverse = l_index > h_index; - unsigned low = reverse ? h_index : l_index; - unsigned high = reverse ? l_index : h_index; - for (unsigned i = low; i != high; ++i) ret &= d_bv[i]; - return ret; - } - - INLINE bool or_reduce() { - bool ret = false; - bool reverse = l_index > h_index; - unsigned low = reverse ? h_index : l_index; - unsigned high = reverse ? l_index : h_index; - for (unsigned i = low; i != high; ++i) ret |= d_bv[i]; - return ret; - } - - INLINE bool xor_reduce() { - bool ret = false; - bool reverse = l_index > h_index; - unsigned low = reverse ? h_index : l_index; - unsigned high = reverse ? l_index : h_index; - for (unsigned i = low; i != high; ++i) ret ^= d_bv[i]; - return ret; - } -}; // struct _private_range_ref. - -/// Bit reference -/// Proxy class, which allows bit selection to be used as rvalue(for reading) -/// and lvalue(for writing) -//-------------------------------------------------------------- -template -struct _private_bit_ref { -#ifdef _MSC_VER -#pragma warning(disable : 4521 4522) -#endif - ap_private<_AP_W, _AP_S>& d_bv; - int d_index; - - public: - // copy ctor. - INLINE _private_bit_ref(const _private_bit_ref<_AP_W, _AP_S>& ref) - : d_bv(ref.d_bv), d_index(ref.d_index) {} - - // director ctor. - INLINE _private_bit_ref(ap_private<_AP_W, _AP_S>& bv, int index = 0) - : d_bv(bv), d_index(index) { - _AP_WARNING(d_index < 0, "Index of bit vector (%d) cannot be negative.\n", - d_index); - _AP_WARNING(d_index >= _AP_W, - "Index of bit vector (%d) out of range (%d).\n", d_index, _AP_W); - } - - INLINE operator bool() const { return d_bv.get_bit(d_index); } - - INLINE bool to_bool() const { return operator bool(); } - - template - INLINE _private_bit_ref& operator=(const T& val) { - if (!!val) - d_bv.set(d_index); - else - d_bv.clear(d_index); - return *this; - } - -// template -// INLINE ap_concat_ref<1, _private_bit_ref, _AP_W2, ap_private<_AP_W2, -// _AP_S2> > -// operator,(ap_private<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<1, _private_bit_ref, _AP_W2, ap_private<_AP_W2, -// _AP_S2> >( -// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), a2); -// } -// -// template -// INLINE ap_concat_ref<1, _private_bit_ref, _AP_W2, -// _private_range_ref<_AP_W2, -// _AP_S2> > -// operator,(const _private_range_ref<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<1, _private_bit_ref, _AP_W2, -// _private_range_ref<_AP_W2, -// _AP_S2> >( -// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), -// const_cast<_private_range_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<1, _private_bit_ref, 1, _private_bit_ref<_AP_W2, -// _AP_S2> > operator,( -// const _private_bit_ref<_AP_W2, _AP_S2> &a2) const { -// return ap_concat_ref<1, _private_bit_ref, 1, -// _private_bit_ref<_AP_W2, _AP_S2> >( -// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), -// const_cast<_private_bit_ref<_AP_W2, _AP_S2>&>(a2)); -// } -// -// INLINE ap_concat_ref<1, _private_bit_ref, 1, _private_bit_ref> -// operator,( -// const _private_bit_ref &a2) const { -// return ap_concat_ref<1, _private_bit_ref, 1, _private_bit_ref>( -// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), -// const_cast<_private_bit_ref&>(a2)); -// } -// -// template -// INLINE ap_concat_ref<1, _private_bit_ref, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> > -// operator,(const ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> &a2) const { -// return ap_concat_ref<1, _private_bit_ref, _AP_W2 + _AP_W3, -// ap_concat_ref<_AP_W2, _AP_T2, _AP_W3, _AP_T3> >( -// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), -// const_cast&>(a2)); -// } -// -// template -// INLINE ap_concat_ref< -// 1, _private_bit_ref, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> > -// operator,(const af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, -// _AP_N2> -// &a2) const { -// return ap_concat_ref< -// 1, _private_bit_ref, _AP_W2, -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, _AP_N2> >( -// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), -// const_cast< -// af_range_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, -// _AP_N2>&>(a2)); -// } -// -// template -// INLINE -// ap_concat_ref<1, _private_bit_ref, 1, -// af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, -// _AP_N2> > -// operator,(const af_bit_ref<_AP_W2, _AP_I2, _AP_S2, _AP_Q2, _AP_O2, -// _AP_N2> -// &a2) const { -// return ap_concat_ref<1, _private_bit_ref, 1, af_bit_ref<_AP_W2, -// _AP_I2, _AP_S2, -// _AP_Q2, _AP_O2, -// _AP_N2> >( -// const_cast<_private_bit_ref<_AP_W, _AP_S>&>(*this), -// const_cast&>( -// a2)); -// } - - template - INLINE bool operator==(const _private_bit_ref<_AP_W2, _AP_S2>& op) const { - return get() == op.get(); - } - - template - INLINE bool operator!=(const _private_bit_ref<_AP_W2, _AP_S2>& op) const { - return get() != op.get(); - } - - INLINE bool get() const { return operator bool(); } - - // template - // INLINE void set(const ap_private<_AP_W3, false>& val) { - // operator=(val); - // } - - // INLINE bool operator~() const { - // bool bit = (d_bv)[d_index]; - // return bit ? false : true; - // } - - INLINE int length() const { return 1; } - - // INLINE std::string to_string() const { - // bool val = get(); - // return val ? "1" : "0"; - // } - -}; // struct _private_bit_ref. - -// char a[100]; -// char* ptr = a; -// ap_int<2> n = 3; -// char* ptr2 = ptr + n*2; -// avoid ambiguous errors -#define OP_BIN_MIX_PTR(BIN_OP) \ - template \ - INLINE PTR_TYPE* operator BIN_OP(PTR_TYPE* i_op, \ - const ap_private<_AP_W, _AP_S>& op) { \ - typename ap_private<_AP_W, _AP_S>::ValType op2 = op; \ - return i_op BIN_OP op2; \ - } \ - template \ - INLINE PTR_TYPE* operator BIN_OP(const ap_private<_AP_W, _AP_S>& op, \ - PTR_TYPE* i_op) { \ - typename ap_private<_AP_W, _AP_S>::ValType op2 = op; \ - return op2 BIN_OP i_op; \ - } - -OP_BIN_MIX_PTR(+) -OP_BIN_MIX_PTR(-) -#undef OP_BIN_MIX_PTR - -// float OP ap_int -// when ap_int's width > 64, then trunc ap_int to ap_int<64> -#define OP_BIN_MIX_FLOAT(BIN_OP, C_TYPE) \ - template \ - INLINE C_TYPE operator BIN_OP(C_TYPE i_op, \ - const ap_private<_AP_W, _AP_S>& op) { \ - typename ap_private<_AP_W, _AP_S>::ValType op2 = op; \ - return i_op BIN_OP op2; \ - } \ - template \ - INLINE C_TYPE operator BIN_OP(const ap_private<_AP_W, _AP_S>& op, \ - C_TYPE i_op) { \ - typename ap_private<_AP_W, _AP_S>::ValType op2 = op; \ - return op2 BIN_OP i_op; \ - } - -#define OPS_MIX_FLOAT(C_TYPE) \ - OP_BIN_MIX_FLOAT(*, C_TYPE) \ - OP_BIN_MIX_FLOAT(/, C_TYPE) \ - OP_BIN_MIX_FLOAT(+, C_TYPE) \ - OP_BIN_MIX_FLOAT(-, C_TYPE) - -OPS_MIX_FLOAT(float) -OPS_MIX_FLOAT(double) -#undef OP_BIN_MIX_FLOAT -#undef OPS_MIX_FLOAT - -/// Operators mixing Integers with AP_Int -// ---------------------------------------------------------------- - -// partially specialize template argument _AP_C in order that: -// for _AP_W > 64, we will explicitly convert operand with native data type -// into corresponding ap_private -// for _AP_W <= 64, we will implicitly convert operand with ap_private into -// (unsigned) long long -#define OP_BIN_MIX_INT(BIN_OP, C_TYPE, _AP_WI, _AP_SI, RTYPE) \ - template \ - INLINE \ - typename ap_private<_AP_WI, _AP_SI>::template RType<_AP_W, _AP_S>::RTYPE \ - operator BIN_OP(C_TYPE i_op, const ap_private<_AP_W, _AP_S>& op) { \ - return ap_private<_AP_WI, _AP_SI>(i_op).operator BIN_OP(op); \ - } \ - template \ - INLINE \ - typename ap_private<_AP_W, _AP_S>::template RType<_AP_WI, _AP_SI>::RTYPE \ - operator BIN_OP(const ap_private<_AP_W, _AP_S>& op, C_TYPE i_op) { \ - return op.operator BIN_OP(ap_private<_AP_WI, _AP_SI>(i_op)); \ - } - -#define OP_REL_MIX_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE bool operator REL_OP(const ap_private<_AP_W, _AP_S>& op, \ - C_TYPE op2) { \ - return op.operator REL_OP(ap_private<_AP_W2, _AP_S2>(op2)); \ - } \ - template \ - INLINE bool operator REL_OP(C_TYPE op2, \ - const ap_private<_AP_W, _AP_S, false>& op) { \ - return ap_private<_AP_W2, _AP_S2>(op2).operator REL_OP(op); \ - } - -#define OP_ASSIGN_MIX_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE ap_private<_AP_W, _AP_S>& operator ASSIGN_OP( \ - ap_private<_AP_W, _AP_S>& op, C_TYPE op2) { \ - return op.operator ASSIGN_OP(ap_private<_AP_W2, _AP_S2>(op2)); \ - } - -#define OP_BIN_SHIFT_INT(BIN_OP, C_TYPE, _AP_WI, _AP_SI, RTYPE) \ - template \ - C_TYPE operator BIN_OP(C_TYPE i_op, \ - const ap_private<_AP_W, _AP_S, false>& op) { \ - return i_op BIN_OP(op.get_VAL()); \ - } \ - template \ - INLINE \ - typename ap_private<_AP_W, _AP_S>::template RType<_AP_WI, _AP_SI>::RTYPE \ - operator BIN_OP(const ap_private<_AP_W, _AP_S>& op, C_TYPE i_op) { \ - return op.operator BIN_OP(i_op); \ - } - -#define OP_ASSIGN_RSHIFT_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE ap_private<_AP_W, _AP_S>& operator ASSIGN_OP( \ - ap_private<_AP_W, _AP_S>& op, C_TYPE op2) { \ - op = op.operator>>(op2); \ - return op; \ - } - -#define OP_ASSIGN_LSHIFT_INT(ASSIGN_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE ap_private<_AP_W, _AP_S>& operator ASSIGN_OP( \ - ap_private<_AP_W, _AP_S>& op, C_TYPE op2) { \ - op = op.operator<<(op2); \ - return op; \ - } - -#define OPS_MIX_INT(C_TYPE, _AP_W2, _AP_S2) \ - OP_BIN_MIX_INT(*, C_TYPE, (_AP_W2), (_AP_S2), mult) \ - OP_BIN_MIX_INT(+, C_TYPE, (_AP_W2), (_AP_S2), plus) \ - OP_BIN_MIX_INT(-, C_TYPE, (_AP_W2), (_AP_S2), minus) \ - OP_BIN_MIX_INT(/, C_TYPE, (_AP_W2), (_AP_S2), div) \ - OP_BIN_MIX_INT(%, C_TYPE, (_AP_W2), (_AP_S2), mod) \ - OP_BIN_MIX_INT(&, C_TYPE, (_AP_W2), (_AP_S2), logic) \ - OP_BIN_MIX_INT(|, C_TYPE, (_AP_W2), (_AP_S2), logic) \ - OP_BIN_MIX_INT (^, C_TYPE, (_AP_W2), (_AP_S2), logic) \ - OP_BIN_SHIFT_INT(>>, C_TYPE, (_AP_W2), (_AP_S2), arg1) \ - OP_BIN_SHIFT_INT(<<, C_TYPE, (_AP_W2), (_AP_S2), arg1) \ - \ - OP_ASSIGN_MIX_INT(+=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_MIX_INT(-=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_MIX_INT(*=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_MIX_INT(/=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_MIX_INT(%=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_MIX_INT(&=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_MIX_INT(|=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_MIX_INT(^=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_RSHIFT_INT(>>=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_ASSIGN_LSHIFT_INT(<<=, C_TYPE, (_AP_W2), (_AP_S2)) \ - \ - OP_REL_MIX_INT(>, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_REL_MIX_INT(<, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_REL_MIX_INT(>=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_REL_MIX_INT(<=, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_REL_MIX_INT(==, C_TYPE, (_AP_W2), (_AP_S2)) \ - OP_REL_MIX_INT(!=, C_TYPE, (_AP_W2), (_AP_S2)) - -OPS_MIX_INT(bool, 1, false) -OPS_MIX_INT(char, 8, CHAR_IS_SIGNED) -OPS_MIX_INT(signed char, 8, true) -OPS_MIX_INT(unsigned char, 8, false) -OPS_MIX_INT(short, sizeof(short) * 8, true) -OPS_MIX_INT(unsigned short, sizeof(unsigned short) * 8, false) -OPS_MIX_INT(int, sizeof(int) * 8, true) -OPS_MIX_INT(unsigned int, sizeof(unsigned int) * 8, false) -OPS_MIX_INT(long, sizeof(long) * 8, true) -OPS_MIX_INT(unsigned long, sizeof(unsigned long) * 8, false) -OPS_MIX_INT(ap_slong, sizeof(ap_slong) * 8, true) -OPS_MIX_INT(ap_ulong, sizeof(ap_ulong) * 8, false) - -#undef OP_BIN_MIX_INT -#undef OP_BIN_SHIFT_INT -#undef OP_ASSIGN_MIX_INT -#undef OP_ASSIGN_RSHIFT_INT -#undef OP_ASSIGN_LSHIFT_INT -#undef OP_REL_MIX_INT -#undef OPS_MIX_INT - -#define OP_BIN_MIX_RANGE(BIN_OP, RTYPE) \ - template \ - INLINE typename ap_private<_AP_W1, _AP_S1>::template RType<_AP_W2, \ - _AP_S2>::RTYPE \ - operator BIN_OP(const _private_range_ref<_AP_W1, _AP_S1>& op1, \ - const ap_private<_AP_W2, _AP_S2>& op2) { \ - return ap_private<_AP_W1, false>(op1).operator BIN_OP(op2); \ - } \ - template \ - INLINE typename ap_private<_AP_W1, _AP_S1>::template RType<_AP_W2, \ - _AP_S2>::RTYPE \ - operator BIN_OP(const ap_private<_AP_W1, _AP_S1>& op1, \ - const _private_range_ref<_AP_W2, _AP_S2>& op2) { \ - return op1.operator BIN_OP(ap_private<_AP_W2, false>(op2)); \ - } - -#define OP_ASSIGN_MIX_RANGE(ASSIGN_OP) \ - template \ - INLINE ap_private<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - ap_private<_AP_W1, _AP_S1>& op1, \ - const _private_range_ref<_AP_W2, _AP_S2>& op2) { \ - return op1.operator ASSIGN_OP(ap_private<_AP_W2, false>(op2)); \ - } \ - template \ - INLINE _private_range_ref<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - _private_range_ref<_AP_W1, _AP_S1>& op1, \ - ap_private<_AP_W2, _AP_S2>& op2) { \ - ap_private<_AP_W1, false> tmp(op1); \ - tmp.operator ASSIGN_OP(op2); \ - op1 = tmp; \ - return op1; \ - } - -#define OP_REL_MIX_RANGE(REL_OP) \ - template \ - INLINE bool operator REL_OP(const _private_range_ref<_AP_W1, _AP_S1>& op1, \ - const ap_private<_AP_W2, _AP_S2>& op2) { \ - return ap_private<_AP_W1, false>(op1).operator REL_OP(op2); \ - } \ - template \ - INLINE bool operator REL_OP(const ap_private<_AP_W1, _AP_S1>& op1, \ - const _private_range_ref<_AP_W2, _AP_S2>& op2) { \ - return op1.operator REL_OP(op2.operator ap_private<_AP_W2, false>()); \ - } - -OP_BIN_MIX_RANGE(+, plus) -OP_BIN_MIX_RANGE(-, minus) -OP_BIN_MIX_RANGE(*, mult) -OP_BIN_MIX_RANGE(/, div) -OP_BIN_MIX_RANGE(%, mod) -OP_BIN_MIX_RANGE(&, logic) -OP_BIN_MIX_RANGE(|, logic) -OP_BIN_MIX_RANGE(^, logic) -OP_BIN_MIX_RANGE(>>, arg1) -OP_BIN_MIX_RANGE(<<, arg1) -#undef OP_BIN_MIX_RANGE - -OP_ASSIGN_MIX_RANGE(+=) -OP_ASSIGN_MIX_RANGE(-=) -OP_ASSIGN_MIX_RANGE(*=) -OP_ASSIGN_MIX_RANGE(/=) -OP_ASSIGN_MIX_RANGE(%=) -OP_ASSIGN_MIX_RANGE(&=) -OP_ASSIGN_MIX_RANGE(|=) -OP_ASSIGN_MIX_RANGE(^=) -OP_ASSIGN_MIX_RANGE(>>=) -OP_ASSIGN_MIX_RANGE(<<=) -#undef OP_ASSIGN_MIX_RANGE - -OP_REL_MIX_RANGE(>) -OP_REL_MIX_RANGE(<) -OP_REL_MIX_RANGE(>=) -OP_REL_MIX_RANGE(<=) -OP_REL_MIX_RANGE(==) -OP_REL_MIX_RANGE(!=) -#undef OP_REL_MIX_RANGE - -#define OP_BIN_MIX_BIT(BIN_OP, RTYPE) \ - template \ - INLINE typename ap_private<1, false>::template RType<_AP_W2, _AP_S2>::RTYPE \ - operator BIN_OP(const _private_bit_ref<_AP_W1, _AP_S1>& op1, \ - const ap_private<_AP_W2, _AP_S2>& op2) { \ - return ap_private<1, false>(op1).operator BIN_OP(op2); \ - } \ - template \ - INLINE typename ap_private<_AP_W1, _AP_S1>::template RType<1, false>::RTYPE \ - operator BIN_OP(const ap_private<_AP_W1, _AP_S1>& op1, \ - const _private_bit_ref<_AP_W2, _AP_S2>& op2) { \ - return op1.operator BIN_OP(ap_private<1, false>(op2)); \ - } - -#define OP_ASSIGN_MIX_BIT(ASSIGN_OP) \ - template \ - INLINE ap_private<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - ap_private<_AP_W1, _AP_S1>& op1, \ - _private_bit_ref<_AP_W2, _AP_S2>& op2) { \ - return op1.operator ASSIGN_OP(ap_private<1, false>(op2)); \ - } \ - template \ - INLINE _private_bit_ref<_AP_W1, _AP_S1>& operator ASSIGN_OP( \ - _private_bit_ref<_AP_W1, _AP_S1>& op1, \ - ap_private<_AP_W2, _AP_S2>& op2) { \ - ap_private<1, false> tmp(op1); \ - tmp.operator ASSIGN_OP(op2); \ - op1 = tmp; \ - return op1; \ - } - -#define OP_REL_MIX_BIT(REL_OP) \ - template \ - INLINE bool operator REL_OP(const _private_bit_ref<_AP_W1, _AP_S1>& op1, \ - const ap_private<_AP_W2, _AP_S2>& op2) { \ - return ap_private<_AP_W1, false>(op1).operator REL_OP(op2); \ - } \ - template \ - INLINE bool operator REL_OP(const ap_private<_AP_W1, _AP_S1>& op1, \ - const _private_bit_ref<_AP_W2, _AP_S2>& op2) { \ - return op1.operator REL_OP(ap_private<1, false>(op2)); \ - } - -OP_ASSIGN_MIX_BIT(+=) -OP_ASSIGN_MIX_BIT(-=) -OP_ASSIGN_MIX_BIT(*=) -OP_ASSIGN_MIX_BIT(/=) -OP_ASSIGN_MIX_BIT(%=) -OP_ASSIGN_MIX_BIT(&=) -OP_ASSIGN_MIX_BIT(|=) -OP_ASSIGN_MIX_BIT(^=) -OP_ASSIGN_MIX_BIT(>>=) -OP_ASSIGN_MIX_BIT(<<=) -#undef OP_ASSIGN_MIX_BIT - -OP_BIN_MIX_BIT(+, plus) -OP_BIN_MIX_BIT(-, minus) -OP_BIN_MIX_BIT(*, mult) -OP_BIN_MIX_BIT(/, div) -OP_BIN_MIX_BIT(%, mod) -OP_BIN_MIX_BIT(&, logic) -OP_BIN_MIX_BIT(|, logic) -OP_BIN_MIX_BIT(^, logic) -OP_BIN_MIX_BIT(>>, arg1) -OP_BIN_MIX_BIT(<<, arg1) -#undef OP_BIN_MIX_BIT - -OP_REL_MIX_BIT(>) -OP_REL_MIX_BIT(<) -OP_REL_MIX_BIT(<=) -OP_REL_MIX_BIT(>=) -OP_REL_MIX_BIT(==) -OP_REL_MIX_BIT(!=) -#undef OP_REL_MIX_BIT - -#define REF_REL_OP_MIX_INT(REL_OP, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE bool operator REL_OP(const _private_range_ref<_AP_W, _AP_S>& op, \ - C_TYPE op2) { \ - return (ap_private<_AP_W, false>(op)) \ - . \ - operator REL_OP(ap_private<_AP_W2, _AP_S2>(op2)); \ - } \ - template \ - INLINE bool operator REL_OP(C_TYPE op2, \ - const _private_range_ref<_AP_W, _AP_S>& op) { \ - return ap_private<_AP_W2, _AP_S2>(op2).operator REL_OP( \ - ap_private<_AP_W, false>(op)); \ - } \ - template \ - INLINE bool operator REL_OP(const _private_bit_ref<_AP_W, _AP_S>& op, \ - C_TYPE op2) { \ - return (bool(op))REL_OP op2; \ - } \ - template \ - INLINE bool operator REL_OP(C_TYPE op2, \ - const _private_bit_ref<_AP_W, _AP_S>& op) { \ - return op2 REL_OP(bool(op)); \ - } - -#define REF_REL_MIX_INT(C_TYPE, _AP_W2, _AP_S2) \ - REF_REL_OP_MIX_INT(>, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_REL_OP_MIX_INT(<, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_REL_OP_MIX_INT(>=, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_REL_OP_MIX_INT(<=, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_REL_OP_MIX_INT(==, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_REL_OP_MIX_INT(!=, C_TYPE, (_AP_W2), (_AP_S2)) - -REF_REL_MIX_INT(bool, 1, false) -REF_REL_MIX_INT(char, 8, CHAR_IS_SIGNED) -REF_REL_MIX_INT(signed char, 8, true) -REF_REL_MIX_INT(unsigned char, 8, false) -REF_REL_MIX_INT(short, sizeof(short) * 8, true) -REF_REL_MIX_INT(unsigned short, sizeof(unsigned short) * 8, false) -REF_REL_MIX_INT(int, sizeof(int) * 8, true) -REF_REL_MIX_INT(unsigned int, sizeof(unsigned int) * 8, false) -REF_REL_MIX_INT(long, sizeof(long) * 8, true) -REF_REL_MIX_INT(unsigned long, sizeof(unsigned long) * 8, false) -REF_REL_MIX_INT(ap_slong, sizeof(ap_slong) * 8, true) -REF_REL_MIX_INT(ap_ulong, sizeof(ap_ulong) * 8, false) -#undef REF_REL_OP_MIX_INT -#undef REF_REL_MIX_INT - -#define REF_BIN_OP_MIX_INT(BIN_OP, RTYPE, C_TYPE, _AP_W2, _AP_S2) \ - template \ - INLINE \ - typename ap_private<_AP_W, false>::template RType<_AP_W2, _AP_S2>::RTYPE \ - operator BIN_OP(const _private_range_ref<_AP_W, _AP_S>& op, \ - C_TYPE op2) { \ - return (ap_private<_AP_W, false>(op)) \ - . \ - operator BIN_OP(ap_private<_AP_W2, _AP_S2>(op2)); \ - } \ - template \ - INLINE \ - typename ap_private<_AP_W2, _AP_S2>::template RType<_AP_W, false>::RTYPE \ - operator BIN_OP(C_TYPE op2, \ - const _private_range_ref<_AP_W, _AP_S>& op) { \ - return ap_private<_AP_W2, _AP_S2>(op2).operator BIN_OP( \ - ap_private<_AP_W, false>(op)); \ - } - -#define REF_BIN_MIX_INT(C_TYPE, _AP_W2, _AP_S2) \ - REF_BIN_OP_MIX_INT(+, plus, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(-, minus, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(*, mult, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(/, div, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(%, mod, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(&, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(|, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(^, logic, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(>>, arg1, C_TYPE, (_AP_W2), (_AP_S2)) \ - REF_BIN_OP_MIX_INT(<<, arg1, C_TYPE, (_AP_W2), (_AP_S2)) - -REF_BIN_MIX_INT(bool, 1, false) -REF_BIN_MIX_INT(char, 8, CHAR_IS_SIGNED) -REF_BIN_MIX_INT(signed char, 8, true) -REF_BIN_MIX_INT(unsigned char, 8, false) -REF_BIN_MIX_INT(short, sizeof(short) * 8, true) -REF_BIN_MIX_INT(unsigned short, sizeof(unsigned short) * 8, false) -REF_BIN_MIX_INT(int, sizeof(int) * 8, true) -REF_BIN_MIX_INT(unsigned int, sizeof(unsigned int) * 8, false) -REF_BIN_MIX_INT(long, sizeof(long) * 8, true) -REF_BIN_MIX_INT(unsigned long, sizeof(unsigned long) * 8, false) -REF_BIN_MIX_INT(ap_slong, sizeof(ap_slong) * 8, true) -REF_BIN_MIX_INT(ap_ulong, sizeof(ap_ulong) * 8, false) -#undef REF_BIN_OP_MIX_INT -#undef REF_BIN_MIX_INT - -#define REF_BIN_OP(BIN_OP, RTYPE) \ - template \ - INLINE \ - typename ap_private<_AP_W, false>::template RType<_AP_W2, false>::RTYPE \ - operator BIN_OP(const _private_range_ref<_AP_W, _AP_S>& lhs, \ - const _private_range_ref<_AP_W2, _AP_S2>& rhs) { \ - return ap_private<_AP_W, false>(lhs).operator BIN_OP( \ - ap_private<_AP_W2, false>(rhs)); \ - } - -REF_BIN_OP(+, plus) -REF_BIN_OP(-, minus) -REF_BIN_OP(*, mult) -REF_BIN_OP(/, div) -REF_BIN_OP(%, mod) -REF_BIN_OP(&, logic) -REF_BIN_OP(|, logic) -REF_BIN_OP(^, logic) -REF_BIN_OP(>>, arg1) -REF_BIN_OP(<<, arg1) -#undef REF_BIN_OP - -//************************************************************************ -// Implement -// ap_private = ap_concat_ref OP ap_concat_ref -// for operators +, -, *, /, %, >>, <<, &, |, ^ -// Without these operators the operands are converted to int64 and -// larger results lose informations (higher order bits). -// -// operand OP -// / | -// left-concat right-concat -// / | / | -// -// -// _AP_LW1, _AP_LT1 (width and type of left-concat's left side) -// _AP_LW2, _AP_LT2 (width and type of left-concat's right side) -// Similarly for RHS of operand OP: _AP_RW1, AP_RW2, _AP_RT1, _AP_RT2 -// -// In Verilog 2001 result of concatenation is always unsigned even -// when both sides are signed. -//************************************************************************ - -#endif // ifndef __AP_PRIVATE_H__ - -// -*- cpp -*- diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/hls_math.h b/hls4ml/hls4ml/templates/vivado/ap_types/hls_math.h deleted file mode 100644 index f129971..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/hls_math.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef X_HLS_MATH_H -#define X_HLS_MATH_H - -#include -#include "ap_fixed.h" - -namespace hls { - -template -static T exp(const T x) { - return (T) std::exp(x.to_double()); -} - -template T sin(T x) { return (T) std::sin(x.to_double()); }; - -template T cos(T x) { return (T) std::cos(x.to_double()); }; - -template T asin(T x) { return (T) std::asin(x.to_double()); }; - -template T acos(T x) { return (T) std::acos(x.to_double()); }; - -template T atan(T x) { return (T) std::atan(x.to_double()); }; - -template T atan2(T x, T y) { return (T) hls::atan2(x.to_double(), y.to_double()); }; - -} -#endif diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/hls_stream.h b/hls4ml/hls4ml/templates/vivado/ap_types/hls_stream.h deleted file mode 100644 index f516c39..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/hls_stream.h +++ /dev/null @@ -1,263 +0,0 @@ -/* -#- (c) Copyright 2011-2018 Xilinx, Inc. All rights reserved. -#- -#- This file contains confidential and proprietary information -#- of Xilinx, Inc. and is protected under U.S. and -#- international copyright and other intellectual property -#- laws. -#- -#- DISCLAIMER -#- This disclaimer is not a license and does not grant any -#- rights to the materials distributed herewith. Except as -#- otherwise provided in a valid license issued to you by -#- Xilinx, and to the maximum extent permitted by applicable -#- law: (1) THESE MATERIALS ARE MADE AVAILABLE "AS IS" AND -#- WITH ALL FAULTS, AND XILINX HEREBY DISCLAIMS ALL WARRANTIES -#- AND CONDITIONS, EXPRESS, IMPLIED, OR STATUTORY, INCLUDING -#- BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, NON- -#- INFRINGEMENT, OR FITNESS FOR ANY PARTICULAR PURPOSE; and -#- (2) Xilinx shall not be liable (whether in contract or tort, -#- including negligence, or under any other theory of -#- liability) for any loss or damage of any kind or nature -#- related to, arising under or in connection with these -#- materials, including for any direct, or any indirect, -#- special, incidental, or consequential loss or damage -#- (including loss of data, profits, goodwill, or any type of -#- loss or damage suffered as a result of any action brought -#- by a third party) even if such damage or loss was -#- reasonably foreseeable or Xilinx had been advised of the -#- possibility of the same. -#- -#- CRITICAL APPLICATIONS -#- Xilinx products are not designed or intended to be fail- -#- safe, or for use in any application requiring fail-safe -#- performance, such as life-support or safety devices or -#- systems, Class III medical devices, nuclear facilities, -#- applications related to the deployment of airbags, or any -#- other applications that could lead to death, personal -#- injury, or severe property or environmental damage -#- (individually and collectively, "Critical -#- Applications"). Customer assumes the sole risk and -#- liability of any use of Xilinx products in Critical -#- Applications, subject only to applicable laws and -#- regulations governing limitations on product liability. -#- -#- THIS COPYRIGHT NOTICE AND DISCLAIMER MUST BE RETAINED AS -#- PART OF THIS FILE AT ALL TIMES. -#- ************************************************************************ - - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -#ifndef X_HLS_STREAM_SIM_H -#define X_HLS_STREAM_SIM_H - -/* - * This file contains a C++ model of hls::stream. - * It defines C simulation model. - */ -#ifndef __cplusplus - -#error C++ is required to include this header file - -#else - -////////////////////////////////////////////// -// C level simulation models for hls::stream -////////////////////////////////////////////// -#include -#include -#include -#include -#include - -#ifdef HLS_STREAM_THREAD_SAFE -#include -#include -#endif - -#ifndef _MSC_VER -#include -#include -#endif - -namespace hls { - -template -class stream -{ - protected: - std::string _name; - std::deque<__STREAM_T__> _data; // container for the elements -#ifdef HLS_STREAM_THREAD_SAFE - std::mutex _mutex; - std::condition_variable _condition_var; -#endif - - public: - /// Constructors - // Keep consistent with the synthesis model's constructors - stream() { - static unsigned _counter = 1; - std::stringstream ss; -#ifndef _MSC_VER - char* _demangle_name = abi::__cxa_demangle(typeid(*this).name(), 0, 0, 0); - if (_demangle_name) { - _name = _demangle_name; - free(_demangle_name); - } - else { - _name = "hls_stream"; - } -#else - _name = typeid(*this).name(); -#endif - - ss << _counter++; - _name += "." + ss.str(); - } - - stream(const std::string name) { - // default constructor, - // capacity set to predefined maximum - _name = name; - } - - /// Make copy constructor and assignment operator private - private: - stream(const stream< __STREAM_T__ >& chn): - _name(chn._name), _data(chn._data) { - } - - stream& operator = (const stream< __STREAM_T__ >& chn) { - _name = chn._name; - _data = chn._data; - return *this; - } - - public: - /// Overload >> and << operators to implement read() and write() - void operator >> (__STREAM_T__& rdata) { - read(rdata); - } - - void operator << (const __STREAM_T__& wdata) { - write(wdata); - } - - - public: - /// Destructor - /// Check status of the queue - virtual ~stream() { - if (!_data.empty()) - { - std::cout << "WARNING: Hls::stream '" - << _name - << "' contains leftover data," - << " which may result in RTL simulation hanging." - << std::endl; - } - } - - /// Status of the queue - bool empty() { -#ifdef HLS_STREAM_THREAD_SAFE - std::lock_guard lg(_mutex); -#endif - return _data.empty(); - } - - bool full() const { return false; } - - /// Blocking read - void read(__STREAM_T__& head) { - head = read(); - } - -#ifdef HLS_STREAM_THREAD_SAFE - __STREAM_T__ read() { - std::unique_lock ul(_mutex); - while (_data.empty()) { - _condition_var.wait(ul); - } - - __STREAM_T__ elem; - elem = _data.front(); - _data.pop_front(); - return elem; - } -#else - __STREAM_T__ read() { - __STREAM_T__ elem; - if (_data.empty()) { - std::cout << "WARNING: Hls::stream '" - << _name - << "' is read while empty," - << " which may result in RTL simulation hanging." - << std::endl; - elem = __STREAM_T__(); - } else { - elem = _data.front(); - _data.pop_front(); - } - return elem; - } -#endif - - /// Blocking write - void write(const __STREAM_T__& tail) { -#ifdef HLS_STREAM_THREAD_SAFE - std::unique_lock ul(_mutex); -#endif - _data.push_back(tail); -#ifdef HLS_STREAM_THREAD_SAFE - _condition_var.notify_one(); -#endif - } - - /// Nonblocking read - bool read_nb(__STREAM_T__& head) { -#ifdef HLS_STREAM_THREAD_SAFE - std::lock_guard lg(_mutex); -#endif - bool is_empty = _data.empty(); - if (is_empty) { - head = __STREAM_T__(); - } else { - __STREAM_T__ elem(_data.front()); - _data.pop_front(); - head = elem; - } - return !is_empty; - } - - /// Nonblocking write - bool write_nb(const __STREAM_T__& tail) { - bool is_full = full(); - write(tail); - return !is_full; - } - - /// Fifo size - size_t size() { - return _data.size(); - } -}; - -} // namespace hls - -#endif // __cplusplus -#endif // X_HLS_STREAM_SIM_H - diff --git a/hls4ml/hls4ml/templates/vivado/ap_types/utils/x_hls_utils.h b/hls4ml/hls4ml/templates/vivado/ap_types/utils/x_hls_utils.h deleted file mode 100644 index 3e751c3..0000000 --- a/hls4ml/hls4ml/templates/vivado/ap_types/utils/x_hls_utils.h +++ /dev/null @@ -1,80 +0,0 @@ -#ifndef X_HLS_UTILS_H -#define X_HLS_UTILS_H -#include "ap_fixed.h" -#include - -namespace hls { - - template - class numeric_limits { - public: - static T max() { return std::numeric_limits::max(); } - static T min() { return std::numeric_limits::min(); } - static T epsilon() { return std::numeric_limits::epsilon(); } - }; - - template - class numeric_limits > { - public: - static ap_fixed max() { - ap_int m = ::hls::numeric_limits >::max(); - ap_fixed x; - x(W-1,0) = m(W-1,0); - return x; - } - static ap_fixed min() { - ap_int m = ::hls::numeric_limits >::min(); - ap_fixed x; - x(W-1,0) = m(W-1,0); - return x; - } - static ap_fixed epsilon() { - ap_fixed x = 0; - x[0] = 1; - return x; - } - }; - - template - class numeric_limits > { - public: - static ap_ufixed max() { - ap_uint m = ::hls::numeric_limits >::max(); - ap_ufixed x; - x(W-1,0) = m(W-1,0); - return x; - } - static ap_ufixed min() { return 0; } - static ap_ufixed epsilon() { - ap_ufixed x = 0; - x[0] = 1; - return x; - } - }; - - template - class numeric_limits > { - public: - static ap_int max() { ap_int m = min(); return ~m; } - static ap_int min() { ap_int m = 0; m[W-1] = 1; return m; } - static ap_int epsilon() { - ap_int x = 0; - x[0] = 1; - return x; - } - }; - - template - class numeric_limits > { - public: - static ap_uint max() { ap_uint zero = 0; return ~zero; } - static ap_uint min() { return 0; } - static ap_uint epsilon() { - ap_uint x = 0; - x[0] = 1; - return x; - } - }; -} - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/build_lib.sh b/hls4ml/hls4ml/templates/vivado/build_lib.sh deleted file mode 100755 index 19f2d0a..0000000 --- a/hls4ml/hls4ml/templates/vivado/build_lib.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -CC=g++ -if [[ "$OSTYPE" == "linux-gnu" ]]; then - CFLAGS="-O3 -fPIC -std=c++11 -fno-gnu-unique" -elif [[ "$OSTYPE" == "darwin"* ]]; then - CFLAGS="-O3 -fPIC -std=c++11" -fi -LDFLAGS= -INCFLAGS="-Ifirmware/ap_types/" -PROJECT=myproject -LIB_STAMP=mystamp - -${CC} ${CFLAGS} ${INCFLAGS} -c firmware/${PROJECT}.cpp -o ${PROJECT}.o -${CC} ${CFLAGS} ${INCFLAGS} -c ${PROJECT}_bridge.cpp -o ${PROJECT}_bridge.o -${CC} ${CFLAGS} ${INCFLAGS} -shared ${PROJECT}.o ${PROJECT}_bridge.o -o firmware/${PROJECT}-${LIB_STAMP}.so -rm -f *.o diff --git a/hls4ml/hls4ml/templates/vivado/build_prj.tcl b/hls4ml/hls4ml/templates/vivado/build_prj.tcl deleted file mode 100644 index 82b3c5a..0000000 --- a/hls4ml/hls4ml/templates/vivado/build_prj.tcl +++ /dev/null @@ -1,250 +0,0 @@ -################# -# HLS4ML -################# -array set opt { - reset 0 - csim 1 - synth 1 - cosim 1 - validation 1 - export 0 - vsynth 0 - fifo_opt 0 -} - -set tcldir [file dirname [info script]] -source [file join $tcldir project.tcl] - -proc remove_recursive_log_wave {} { - set tcldir [file dirname [info script]] - source [file join $tcldir project.tcl] - - set filename ${project_name}_prj/solution1/sim/verilog/${project_name}.tcl - set timestamp [clock format [clock seconds] -format {%Y%m%d%H%M%S}] - set temp $filename.new.$timestamp - # set backup $filename.bak.$timestamp - - set in [open $filename r] - set out [open $temp w] - - # line-by-line, read the original file - while {[gets $in line] != -1} { - if {[string equal "$line" "log_wave -r /"]} { - set line { } - } - puts $out $line - } - - close $in - close $out - - # move the new data to the proper filename - file delete -force $filename - file rename -force $temp $filename -} - -proc add_vcd_instructions_tcl {} { - set tcldir [file dirname [info script]] - source [file join $tcldir project.tcl] - - set filename ${project_name}_prj/solution1/sim/verilog/${project_name}.tcl - set timestamp [clock format [clock seconds] -format {%Y%m%d%H%M%S}] - set temp $filename.new.$timestamp - # set backup $filename.bak.$timestamp - - set in [open $filename r] - set out [open $temp w] - - # line-by-line, read the original file - while {[gets $in line] != -1} { - if {[string equal "$line" "log_wave -r /"]} { - set line {source "../../../../project.tcl" - if {[string equal "$backend" "vivadoaccelerator"]} { - current_scope [get_scopes -regex "/apatb_${project_name}_axi_top/AESL_inst_${project_name}_axi/${project_name}_U0.*"] - set scopes [get_scopes -regexp {layer(\d*)_.*data_0_V_U.*}] - append scopes { } - current_scope "/apatb_${project_name}_axi_top/AESL_inst_${project_name}_axi" - append scopes [get_scopes -regexp {(in_local_V_data.*_0_.*)}] - append scopes { } - append scopes [get_scopes -regexp {(out_local_V_data.*_0_.*)}] - } else { - current_scope [get_scopes -regex "/apatb_${project_name}_top/AESL_inst_${project_name}"] - set scopes [get_scopes -regexp {layer(\d*)_.*data_0_V_U.*}] - } - open_vcd fifo_opt.vcd - foreach scope $scopes { - current_scope $scope - if {[catch [get_objects usedw]] == 0} { - puts "$scope skipped" - continue - } - set usedw [get_objects usedw] - set depth [get_objects DEPTH] - add_wave $usedw - log_vcd $usedw - log_wave $usedw - add_wave $depth - log_vcd $depth - log_wave $depth - } - } - } - - if {[string equal "$line" "quit"]} { - set line {flush_vcd - close_vcd - quit - } - } - # then write the transformed line - puts $out $line - } - - close $in - close $out - - # move the new data to the proper filename - file delete -force $filename - file rename -force $temp $filename -} - -foreach arg $::argv { - foreach o [lsort [array names opt]] { - regexp "$o=+(\\w+)" $arg unused opt($o) - } -} - -proc report_time { op_name time_start time_end } { - set time_taken [expr $time_end - $time_start] - set time_s [expr ($time_taken / 1000) % 60] - set time_m [expr ($time_taken / (1000*60)) % 60] - set time_h [expr ($time_taken / (1000*60*60)) % 24] - puts "***** ${op_name} COMPLETED IN ${time_h}h${time_m}m${time_s}s *****" -} - -# Compare file content: 1 = same, 0 = different -proc compare_files {file_1 file_2} { - # Check if files exist, error otherwise - if {! ([file exists $file_1] && [file exists $file_2])} { - return 0 - } - # Files with different sizes are obviously different - if {[file size $file_1] != [file size $file_2]} { - return 0 - } - - # String compare the content of the files - set fh_1 [open $file_1 r] - set fh_2 [open $file_2 r] - set equal [string equal [read $fh_1] [read $fh_2]] - close $fh_1 - close $fh_2 - return $equal -} - -file mkdir tb_data -set CSIM_RESULTS "./tb_data/csim_results.log" -set RTL_COSIM_RESULTS "./tb_data/rtl_cosim_results.log" - -if {$opt(reset)} { - open_project -reset ${project_name}_prj -} else { - open_project ${project_name}_prj -} -set_top ${project_name} -add_files firmware/${project_name}.cpp -cflags "-std=c++0x" -add_files -tb ${project_name}_test.cpp -cflags "-std=c++0x" -add_files -tb firmware/weights -add_files -tb tb_data -if {$opt(reset)} { - open_solution -reset "solution1" -} else { - open_solution "solution1" -} -catch {config_array_partition -maximum_size 8192} -config_compile -name_max_length 80 -set_part $part -config_schedule -enable_dsp_full_reg=false -create_clock -period $clock_period -name default -set_clock_uncertainty $clock_uncertainty default - - -if {$opt(csim)} { - puts "***** C SIMULATION *****" - set time_start [clock clicks -milliseconds] - csim_design - set time_end [clock clicks -milliseconds] - report_time "C SIMULATION" $time_start $time_end -} - -if {$opt(synth)} { - puts "***** C/RTL SYNTHESIS *****" - set time_start [clock clicks -milliseconds] - csynth_design - set time_end [clock clicks -milliseconds] - report_time "C/RTL SYNTHESIS" $time_start $time_end -} - -if {$opt(cosim)} { - puts "***** C/RTL SIMULATION *****" - # TODO: This is a workaround (Xilinx defines __RTL_SIMULATION__ only for SystemC testbenches). - add_files -tb ${project_name}_test.cpp -cflags "-std=c++0x -DRTL_SIM" - set time_start [clock clicks -milliseconds] - - cosim_design -trace_level all -setup - - if {$opt(fifo_opt)} { - puts "\[hls4ml\] - FIFO optimization started" - add_vcd_instructions_tcl - } - - remove_recursive_log_wave - set old_pwd [pwd] - cd ${project_name}_prj/solution1/sim/verilog/ - source run_sim.tcl - cd $old_pwd - - set time_end [clock clicks -milliseconds] - puts "INFO:" - if {[string equal "$backend" "vivadoaccelerator"]} { - puts [read [open ${project_name}_prj/solution1/sim/report/${project_name}_axi_cosim.rpt r]] - } else { - puts [read [open ${project_name}_prj/solution1/sim/report/${project_name}_cosim.rpt r]] - } - report_time "C/RTL SIMULATION" $time_start $time_end -} - -if {$opt(validation)} { - puts "***** C/RTL VALIDATION *****" - if {[compare_files $CSIM_RESULTS $RTL_COSIM_RESULTS]} { - puts "INFO: Test PASSED" - } else { - puts "ERROR: Test failed" - puts "ERROR: - csim log: $CSIM_RESULTS" - puts "ERROR: - RTL-cosim log: $RTL_COSIM_RESULTS" - exit 1 - } -} - -if {$opt(export)} { - puts "***** EXPORT IP *****" - set time_start [clock clicks -milliseconds] - export_design -format ip_catalog -version $version - set time_end [clock clicks -milliseconds] - report_time "EXPORT IP" $time_start $time_end -} - -if {$opt(vsynth)} { - puts "***** VIVADO SYNTHESIS *****" - if {[file exist ${project_name}_prj/solution1/syn/vhdl]} { - set time_start [clock clicks -milliseconds] - exec vivado -mode batch -source vivado_synth.tcl >@ stdout - set time_end [clock clicks -milliseconds] - report_time "VIVADO SYNTHESIS" $time_start $time_end - } else { - puts "ERROR: Cannot find generated VHDL files. Did you run C synthesis?" - exit 1 - } -} - -exit diff --git a/hls4ml/hls4ml/templates/vivado/firmware/defines.h b/hls4ml/hls4ml/templates/vivado/firmware/defines.h deleted file mode 100644 index 1f11b02..0000000 --- a/hls4ml/hls4ml/templates/vivado/firmware/defines.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef DEFINES_H_ -#define DEFINES_H_ - -#include "ap_fixed.h" -#include "ap_int.h" -#include "nnet_utils/nnet_types.h" -#include -#include - -// hls-fpga-machine-learning insert numbers - -// hls-fpga-machine-learning insert layer-precision - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/firmware/myproject.cpp b/hls4ml/hls4ml/templates/vivado/firmware/myproject.cpp deleted file mode 100644 index 133c62c..0000000 --- a/hls4ml/hls4ml/templates/vivado/firmware/myproject.cpp +++ /dev/null @@ -1,25 +0,0 @@ -#include - -#include "myproject.h" -#include "parameters.h" - -void myproject( - // hls-fpga-machine-learning insert header -) { - - // hls-fpga-machine-learning insert IO - -#ifndef __SYNTHESIS__ - static bool loaded_weights = false; - if (!loaded_weights) { - // hls-fpga-machine-learning insert load weights - loaded_weights = true; - } -#endif - - // **************************************** - // NETWORK INSTANTIATION - // **************************************** - - // hls-fpga-machine-learning insert layers -} diff --git a/hls4ml/hls4ml/templates/vivado/firmware/myproject.h b/hls4ml/hls4ml/templates/vivado/firmware/myproject.h deleted file mode 100644 index 4900e97..0000000 --- a/hls4ml/hls4ml/templates/vivado/firmware/myproject.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef MYPROJECT_H_ -#define MYPROJECT_H_ - -#include "ap_fixed.h" -#include "ap_int.h" -#include "hls_stream.h" - -#include "defines.h" - -// Prototype of top level function for C-synthesis -void myproject( - // hls-fpga-machine-learning insert header -); - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/firmware/parameters.h b/hls4ml/hls4ml/templates/vivado/firmware/parameters.h deleted file mode 100644 index 2d9dded..0000000 --- a/hls4ml/hls4ml/templates/vivado/firmware/parameters.h +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef PARAMETERS_H_ -#define PARAMETERS_H_ - -#include "ap_fixed.h" -#include "ap_int.h" - -#include "nnet_utils/nnet_code_gen.h" -#include "nnet_utils/nnet_helpers.h" -// hls-fpga-machine-learning insert includes - -// hls-fpga-machine-learning insert weights - -// hls-fpga-machine-learning insert layer-config - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/myproject_bridge.cpp b/hls4ml/hls4ml/templates/vivado/myproject_bridge.cpp deleted file mode 100644 index 35c1997..0000000 --- a/hls4ml/hls4ml/templates/vivado/myproject_bridge.cpp +++ /dev/null @@ -1,66 +0,0 @@ -#ifndef MYPROJECT_BRIDGE_H_ -#define MYPROJECT_BRIDGE_H_ - -#include "firmware/myproject.h" -#include "firmware/nnet_utils/nnet_helpers.h" -#include -#include - -// hls-fpga-machine-learning insert bram - -namespace nnet { -bool trace_enabled = false; -std::map *trace_outputs = NULL; -size_t trace_type_size = sizeof(double); -} // namespace nnet - -extern "C" { - -struct trace_data { - const char *name; - void *data; -}; - -void allocate_trace_storage(size_t element_size) { - nnet::trace_enabled = true; - nnet::trace_outputs = new std::map; - nnet::trace_type_size = element_size; - // hls-fpga-machine-learning insert trace_outputs -} - -void free_trace_storage() { - for (std::map::iterator i = nnet::trace_outputs->begin(); i != nnet::trace_outputs->end(); i++) { - void *ptr = i->second; - free(ptr); - } - nnet::trace_outputs->clear(); - delete nnet::trace_outputs; - nnet::trace_outputs = NULL; - nnet::trace_enabled = false; -} - -void collect_trace_output(struct trace_data *c_trace_outputs) { - int ii = 0; - for (std::map::iterator i = nnet::trace_outputs->begin(); i != nnet::trace_outputs->end(); i++) { - c_trace_outputs[ii].name = i->first.c_str(); - c_trace_outputs[ii].data = i->second; - ii++; - } -} - -// Wrapper of top level function for Python bridge -void myproject_float( - // hls-fpga-machine-learning insert header #float -) { - - // hls-fpga-machine-learning insert wrapper #float -} - -void myproject_double( - // hls-fpga-machine-learning insert header #double -) { - // hls-fpga-machine-learning insert wrapper #double -} -} - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/myproject_test.cpp b/hls4ml/hls4ml/templates/vivado/myproject_test.cpp deleted file mode 100644 index 2fd9747..0000000 --- a/hls4ml/hls4ml/templates/vivado/myproject_test.cpp +++ /dev/null @@ -1,92 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include - -#include "firmware/myproject.h" -#include "firmware/nnet_utils/nnet_helpers.h" - -// hls-fpga-machine-learning insert bram - -#define CHECKPOINT 5000 - -namespace nnet { -bool trace_enabled = true; -std::map *trace_outputs = NULL; -size_t trace_type_size = sizeof(double); -} // namespace nnet - -int main(int argc, char **argv) { - // load input data from text file - std::ifstream fin("tb_data/tb_input_features.dat"); - // load predictions from text file - std::ifstream fpr("tb_data/tb_output_predictions.dat"); - -#ifdef RTL_SIM - std::string RESULTS_LOG = "tb_data/rtl_cosim_results.log"; -#else - std::string RESULTS_LOG = "tb_data/csim_results.log"; -#endif - std::ofstream fout(RESULTS_LOG); - - std::string iline; - std::string pline; - int e = 0; - - if (fin.is_open() && fpr.is_open()) { - while (std::getline(fin, iline) && std::getline(fpr, pline)) { - if (e % CHECKPOINT == 0) - std::cout << "Processing input " << e << std::endl; - char *cstr = const_cast(iline.c_str()); - char *current; - std::vector in; - current = strtok(cstr, " "); - while (current != NULL) { - in.push_back(atof(current)); - current = strtok(NULL, " "); - } - cstr = const_cast(pline.c_str()); - std::vector pr; - current = strtok(cstr, " "); - while (current != NULL) { - pr.push_back(atof(current)); - current = strtok(NULL, " "); - } - - // hls-fpga-machine-learning insert data - - // hls-fpga-machine-learning insert top-level-function - - if (e % CHECKPOINT == 0) { - std::cout << "Predictions" << std::endl; - // hls-fpga-machine-learning insert predictions - std::cout << "Quantized predictions" << std::endl; - // hls-fpga-machine-learning insert quantized - } - e++; - - // hls-fpga-machine-learning insert tb-output - } - fin.close(); - fpr.close(); - } else { - std::cout << "INFO: Unable to open input/predictions file, using default input." << std::endl; - - // hls-fpga-machine-learning insert zero - - // hls-fpga-machine-learning insert top-level-function - - // hls-fpga-machine-learning insert output - - // hls-fpga-machine-learning insert tb-output - } - - fout.close(); - std::cout << "INFO: Saved inference results to file: " << RESULTS_LOG << std::endl; - - return 0; -} diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_activation.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_activation.h deleted file mode 100644 index 8baadf2..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_activation.h +++ /dev/null @@ -1,777 +0,0 @@ -#ifndef NNET_ACTIVATION_H_ -#define NNET_ACTIVATION_H_ - -#include "ap_fixed.h" -#include "nnet_common.h" -#include - -namespace nnet { - -struct activ_config { - // IO size - static const unsigned n_in = 10; - - // Internal info - static const unsigned table_size = 1024; - - // Resource reuse info - static const unsigned io_type = io_parallel; - static const unsigned reuse_factor = 1; - - // Internal data type definitions - typedef ap_fixed<18, 8> table_t; -}; - -// ************************************************* -// LINEAR Activation -- See Issue 53 -// ************************************************* -template void linear(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma HLS PIPELINE - - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - res[ii] = data[ii]; - } -} - -// ************************************************* -// RELU Activation -// ************************************************* -template void relu(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma HLS PIPELINE - - data_T datareg; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - datareg = data[ii]; - if (datareg > 0) - res[ii] = datareg; - else - res[ii] = 0; - } -} - -template -void relu_max(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma HLS PIPELINE - - data_T datareg; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - datareg = data[ii]; - if (datareg < 0) - res[ii] = 0; - else if (datareg > MAX_INT) - res[ii] = MAX_INT; - else - res[ii] = datareg; - } -} - -template void relu6(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - relu_max(data, res); -} - -template void relu1(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - relu_max(data, res); -} - -// ************************************************* -// Sigmoid Activation -// ************************************************* -inline float sigmoid_fcn_float(float input) { return 1.0 / (1 + std::exp(-input)); } - -template void init_sigmoid_table(typename CONFIG_T::table_t table_out[N_TABLE]) { - // Default logistic sigmoid function: - // result = 1/(1+e^(-x)) - for (int ii = 0; ii < N_TABLE; ii++) { - // First, convert from table index to X-value (signed 8-bit, range -8 to +8) - float in_val = 2 * 8.0 * (ii - float(N_TABLE) / 2.0) / float(N_TABLE); - // Next, compute lookup table function - typename CONFIG_T::table_t real_val = sigmoid_fcn_float(in_val); - // std::cout << "Lookup table In Value: " << in_val << " Result: " << real_val << std::endl; - table_out[ii] = real_val; - } -} - -template -void sigmoid(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - // Initialize the lookup table -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::table_t sigmoid_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::table_t sigmoid_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_sigmoid_table(sigmoid_table); - initialized = true; - } - - #pragma HLS PIPELINE - - // Index into the lookup table based on data - int data_round; - int index; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_round = data[ii] * CONFIG_T::table_size / 16; - index = data_round + 8 * CONFIG_T::table_size / 16; - if (index < 0) - index = 0; - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - res[ii] = (res_T)sigmoid_table[index]; - } -} - -// ************************************************* -// Softmax Activation -// ************************************************* - -enum class softmax_implementation { latency = 0, legacy = 1, stable = 2, argmax = 3 }; - -inline float exp_fcn_float(float input) { return std::exp(input); } - -template inline float softmax_real_val_from_idx(unsigned i) { - // Treat the index as the top N bits - static constexpr int N = ceillog2(CONFIG_T::table_size); // number of address bits for table - data_T x(0); - x(x.width - 1, x.width - N) = i; - return (float)x; -} - -template inline unsigned softmax_idx_from_real_val(data_T x) { - // Slice the top N bits to get an index into the table - static constexpr int N = ceillog2(CONFIG_T::table_size); // number of address bits for table - ap_uint y = x(x.width - 1, x.width - N); // slice the top N bits of input - return (unsigned)y(N - 1, 0); -} - -template -void init_exp_table(typename CONFIG_T::exp_table_t table_out[CONFIG_T::table_size]) { - // The template data_T is the data type used to address the table - for (unsigned i = 0; i < CONFIG_T::table_size; i++) { - // Slicing bits for address is going to round towards 0, so take the central value - float x = softmax_real_val_from_idx(i); - typename CONFIG_T::exp_table_t exp_x = exp_fcn_float(x); - table_out[i] = exp_x; - } -} - -template -void init_invert_table(typename CONFIG_T::inv_table_t table_out[CONFIG_T::table_size]) { - // The template data_T is the data type used to address the table - for (unsigned i = 0; i < CONFIG_T::table_size; i++) { - float x = softmax_real_val_from_idx(i); - typename CONFIG_T::inv_table_t inv_x = 1 / x; - table_out[i] = inv_x; - } -} - -template -void softmax_latency(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma HLS pipeline - // Initialize the lookup tables -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::exp_table_t exp_table[CONFIG_T::table_size]; - typename CONFIG_T::inv_table_t invert_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::exp_table_t exp_table[CONFIG_T::table_size]; - static typename CONFIG_T::inv_table_t invert_table[CONFIG_T::table_size]; - -#endif - if (!initialized) { - // Note we are exponentiating the inputs, which have type data_T - init_exp_table(exp_table); - // Note we are inverting the exponentials, which have type exp_table_t - init_invert_table(invert_table); - initialized = true; - } - - // Calculate all the e^x's - typename CONFIG_T::exp_table_t exp_res[CONFIG_T::n_in]; - #pragma HLS array_partition variable=exp_res complete - typename CONFIG_T::exp_table_t exp_sum(0); - for (unsigned i = 0; i < CONFIG_T::n_in; i++) { - #pragma HLS unroll - unsigned x = softmax_idx_from_real_val(data[i]); - exp_res[i] = exp_table[x]; - } - - // Explicitly sum the results with an adder tree. - // Rounding & Saturation mode, which improve accuracy, prevent Vivado from expression balancing - Op_add op_add; - exp_sum = - reduce>(exp_res, op_add); - - typename CONFIG_T::inv_table_t inv_exp_sum = - invert_table[softmax_idx_from_real_val(exp_sum)]; - for (unsigned i = 0; i < CONFIG_T::n_in; i++) { - #pragma HLS unroll - res[i] = exp_res[i] * inv_exp_sum; - } -} - -template -void softmax_stable(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma HLS pipeline - // Initialize the lookup tables -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::exp_table_t exp_table[CONFIG_T::table_size]; - typename CONFIG_T::inv_table_t invert_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::exp_table_t exp_table[CONFIG_T::table_size]; - static typename CONFIG_T::inv_table_t invert_table[CONFIG_T::table_size]; - -#endif - if (!initialized) { - // Note we are exponentiating the inputs, which have type data_T - init_exp_table(exp_table); - // Note we are inverting the exponentials, which have type exp_table_t - init_invert_table(invert_table); - initialized = true; - } - - // Find the max and compute all delta(x_i, x_max) - Op_max op_max; - data_T x_max = reduce>(data, op_max); - - // For the diffs, use the same type as the input but force rounding and saturation - ap_fixed d_xi_xmax[CONFIG_T::n_in]; - for (unsigned i = 0; i < CONFIG_T::n_in; i++) { - #pragma HLS unroll - d_xi_xmax[i] = data[i] - x_max; - } - - // Calculate all the e^x's - typename CONFIG_T::exp_table_t exp_res[CONFIG_T::n_in]; - #pragma HLS array_partition variable=exp_res complete - typename CONFIG_T::exp_table_t exp_sum(0); - for (unsigned i = 0; i < CONFIG_T::n_in; i++) { - #pragma HLS unroll - unsigned x = softmax_idx_from_real_val(d_xi_xmax[i]); - exp_res[i] = exp_table[x]; - } - - // Explicitly sum the results with an adder tree. - // Rounding & Saturation mode, which improve accuracy, prevent Vivado from expression balancing - Op_add op_add; - exp_sum = - reduce>(exp_res, op_add); - - typename CONFIG_T::inv_table_t inv_exp_sum = - invert_table[softmax_idx_from_real_val(exp_sum)]; - for (unsigned i = 0; i < CONFIG_T::n_in; i++) { - #pragma HLS unroll - res[i] = exp_res[i] * inv_exp_sum; - } -} - -template void init_exp_table_legacy(typename CONFIG_T::table_t table_out[N_TABLE]) { - for (int ii = 0; ii < N_TABLE; ii++) { - // First, convert from table index to X-value (signed 8-bit, range -8 to +8) - float in_val = 2 * 8.0 * (ii - float(N_TABLE) / 2.0) / float(N_TABLE); - // Next, compute lookup table function - typename CONFIG_T::table_t real_val = exp_fcn_float(in_val); - // std::cout << "Lookup table In Value: " << in_val << " Result: " << real_val << std::endl; - table_out[ii] = real_val; - } -} - -template void init_invert_table_legacy(typename CONFIG_T::table_t table_out[N_TABLE]) { - // Inversion function: - // result = 1/x - for (int ii = 0; ii < N_TABLE; ii++) { - // First, convert from table index to X-value (signed 8-bit, range 0 to +64) - float in_val = 64.0 * ii / float(N_TABLE); - // Next, compute lookup table function - if (in_val > 0.0) - table_out[ii] = 1.0 / in_val; - else - table_out[ii] = 0.0; - } -} - -template -void softmax_legacy(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - // Initialize the lookup table -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::table_t exp_table[CONFIG_T::table_size]; - typename CONFIG_T::table_t invert_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::table_t exp_table[CONFIG_T::table_size]; - static typename CONFIG_T::table_t invert_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_exp_table_legacy(exp_table); - init_invert_table_legacy(invert_table); - initialized = true; - } - - #pragma HLS PIPELINE - - // Index into the lookup table based on data for exponentials - typename CONFIG_T::table_t exp_res[CONFIG_T::n_in]; // different, independent, fixed point precision - typename CONFIG_T::table_t exp_diff_res; // different, independent, fixed point precision - data_T data_cache[CONFIG_T::n_in]; - int data_round; - int index; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_cache[ii] = data[ii]; - exp_res[ii] = 0; - } - - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - for (int jj = 0; jj < CONFIG_T::n_in; jj++) { - if (ii == jj) - exp_diff_res = 1; - else { - data_round = (data_cache[jj] - data_cache[ii]) * CONFIG_T::table_size / 16; - index = data_round + 8 * CONFIG_T::table_size / 16; - if (index < 0) - index = 0; - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - exp_diff_res = exp_table[index]; - } - exp_res[ii] += exp_diff_res; - } - } - - // Second loop to invert - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - int exp_res_index = exp_res[ii] * CONFIG_T::table_size / 64; - if (exp_res_index < 0) - exp_res_index = 0; - if (exp_res_index > CONFIG_T::table_size - 1) - exp_res_index = CONFIG_T::table_size - 1; - // typename CONFIG_T::table_t exp_res_invert = invert_table[exp_res_index]; - res[ii] = (res_T)invert_table[exp_res_index]; - } -} - -template -void softmax_argmax(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - for (int i = 0; i < CONFIG_T::n_in; i++) { - #pragma HLS UNROLL - res[i] = (res_T)0; - } - - data_T maximum = data[0]; - int idx = 0; - - for (int i = 1; i < CONFIG_T::n_in; i++) { - #pragma HLS PIPELINE - if (data[i] > maximum) { - maximum = data[i]; - idx = i; - } - } - - res[idx] = (res_T)1; -} - -template -void softmax(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma HLS inline - switch (CONFIG_T::implementation) { - case softmax_implementation::latency: - softmax_latency(data, res); - break; - case softmax_implementation::stable: - softmax_stable(data, res); - break; - case softmax_implementation::legacy: - softmax_legacy(data, res); - break; - case softmax_implementation::argmax: - softmax_argmax(data, res); - break; - } -} - -// ************************************************* -// TanH Activation -// ************************************************* -template void init_tanh_table(typename CONFIG_T::table_t table_out[N_TABLE]) { - // Implement tanh lookup - for (int ii = 0; ii < N_TABLE; ii++) { - // First, convert from table index to X-value (signed 8-bit, range -4 to +4) - float in_val = 2 * 4.0 * (ii - float(N_TABLE) / 2.0) / float(N_TABLE); - // Next, compute lookup table function - typename CONFIG_T::table_t real_val = tanh(in_val); - // std::cout << "Tanh: Lookup table Index: " << ii<< " In Value: " << in_val << " Result: " << real_val << - // std::endl; - table_out[ii] = real_val; - } -} - -template void tanh(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - // Initialize the lookup table -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::table_t tanh_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::table_t tanh_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_tanh_table(tanh_table); - initialized = true; - } - - #pragma HLS PIPELINE - - // Index into the lookup table based on data - int data_round; - int index; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_round = data[ii] * CONFIG_T::table_size / 8; - index = data_round + 4 * CONFIG_T::table_size / 8; - // std::cout << "Input: " << data[ii] << " Round: " << data_round << " Index: " << index << std::endl; - if (index < 0) - index = 0; - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - res[ii] = (res_T)tanh_table[index]; - } -} - -// ************************************************* -// Hard sigmoid Activation -// ************************************************* -template -void hard_sigmoid(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma HLS PIPELINE - - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - auto datareg = CONFIG_T::slope * data[ii] + CONFIG_T::shift; - if (datareg > 1) - datareg = 1; - else if (datareg < 0) - datareg = 0; - res[ii] = datareg; - } -} - -template -void hard_tanh(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - if (CONFIG_T::io_type == io_parallel) { - #pragma HLS PIPELINE - } - - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - auto sigmoid = CONFIG_T::slope * data[ii] + CONFIG_T::shift; - if (sigmoid > 1) - sigmoid = 1; - else if (sigmoid < 0) - sigmoid = 0; - res[ii] = 2 * sigmoid - 1; - } -} - -// ************************************************* -// Leaky RELU Activation -// ************************************************* -template -void leaky_relu(data_T data[CONFIG_T::n_in], data_T alpha, res_T res[CONFIG_T::n_in]) { - #pragma HLS PIPELINE - - data_T datareg; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - datareg = data[ii]; - if (datareg > 0) - res[ii] = datareg; - else - res[ii] = alpha * datareg; - } -} - -// ************************************************* -// Thresholded RELU Activation -// ************************************************* -template -void thresholded_relu(data_T data[CONFIG_T::n_in], data_T theta, res_T res[CONFIG_T::n_in]) { - #pragma HLS PIPELINE - - data_T datareg; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - datareg = data[ii]; - if (datareg > theta) - res[ii] = datareg; - else - res[ii] = 0; - } -} - -// ************************************************* -// Softplus Activation -// ************************************************* -inline float softplus_fcn_float(float input) { return std::log(std::exp(input) + 1.); } - -template void init_softplus_table(typename CONFIG_T::table_t table_out[N_TABLE]) { - // Default softplus function: - // result = log(exp(x) + 1) - for (int ii = 0; ii < N_TABLE; ii++) { - // First, convert from table index to X-value (signed 8-bit, range -8 to +8) - float in_val = 2 * 8.0 * (ii - float(N_TABLE) / 2.0) / float(N_TABLE); - // Next, compute lookup table function - typename CONFIG_T::table_t real_val = softplus_fcn_float(in_val); - // std::cout << "Lookup table In Value: " << in_val << " Result: " << real_val << std::endl; - table_out[ii] = real_val; - } -} - -template -void softplus(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - // Initialize the lookup table -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::table_t softplus_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::table_t softplus_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_softplus_table(softplus_table); - initialized = true; - } - - #pragma HLS PIPELINE - - // Index into the lookup table based on data - int data_round; - int index; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_round = data[ii] * CONFIG_T::table_size / 16; - index = data_round + 8 * CONFIG_T::table_size / 16; - if (index < 0) - index = 0; - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - res[ii] = (res_T)softplus_table[index]; - } -} - -// ************************************************* -// Softsign Activation -// ************************************************* -inline float softsign_fcn_float(float input) { return input / (std::abs(input) + 1.); } - -template void init_softsign_table(typename CONFIG_T::table_t table_out[N_TABLE]) { - // Default softsign function: - // result = x / (abs(x) + 1) - for (int ii = 0; ii < N_TABLE; ii++) { - // First, convert from table index to X-value (signed 8-bit, range -8 to +8) - float in_val = 2 * 8.0 * (ii - float(N_TABLE) / 2.0) / float(N_TABLE); - // Next, compute lookup table function - typename CONFIG_T::table_t real_val = softsign_fcn_float(in_val); - // std::cout << "Lookup table In Value: " << in_val << " Result: " << real_val << std::endl; - table_out[ii] = real_val; - } -} - -template -void softsign(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - // Initialize the lookup table -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::table_t softsign_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::table_t softsign_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_softsign_table(softsign_table); - initialized = true; - } - - #pragma HLS PIPELINE - - // Index into the lookup table based on data - int data_round; - int index; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - data_round = data[ii] * CONFIG_T::table_size / 16; - index = data_round + 8 * CONFIG_T::table_size / 16; - if (index < 0) - index = 0; - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - res[ii] = (res_T)softsign_table[index]; - } -} - -// ************************************************* -// ELU Activation -// ************************************************* -inline float elu_fcn_float(float input) { return std::exp(input) - 1.; } - -template void init_elu_table(typename CONFIG_T::table_t table_out[N_TABLE]) { - // Default ELU function: - // result = alpha * (e^(x) - 1) - for (int ii = 0; ii < N_TABLE; ii++) { - // First, convert from table index to X-value (signed 8-bit, range -8 to 0) - float in_val = -8.0 * ii / float(N_TABLE); - // Next, compute lookup table function - typename CONFIG_T::table_t real_val = elu_fcn_float(in_val); - // std::cout << "Lookup table In Value: " << in_val << " Result: " << real_val << std::endl; - table_out[ii] = real_val; - } -} - -template -void elu(data_T data[CONFIG_T::n_in], const res_T alpha, res_T res[CONFIG_T::n_in]) { - // Initialize the lookup table -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::table_t elu_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::table_t elu_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_elu_table(elu_table); - initialized = true; - } - - #pragma HLS PIPELINE - - data_T datareg; - // Index into the lookup table based on data - int index; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - datareg = data[ii]; - if (datareg >= 0) { - res[ii] = datareg; - } else { - index = datareg * CONFIG_T::table_size / -8; - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - res[ii] = alpha * elu_table[index]; - } - } -} - -template void elu(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - elu(data, 1.0, res); -} - -// ************************************************* -// SELU Activation -// ************************************************* -inline float selu_fcn_float(float input) { - return 1.0507009873554804934193349852946 * (1.6732632423543772848170429916717 * (std::exp(input) - 1.)); -} - -template void init_selu_table(typename CONFIG_T::table_t table_out[N_TABLE]) { - // Default SELU function: - // result = 1.05 * (1.673 * (e^(x) - 1)) - for (int ii = 0; ii < N_TABLE; ii++) { - // First, convert from table index to X-value (signed 8-bit, range -8 to 0) - float in_val = -8.0 * ii / float(N_TABLE); - // Next, compute lookup table function - typename CONFIG_T::table_t real_val = selu_fcn_float(in_val); - // std::cout << "Lookup table In Value: " << in_val << " Result: " << real_val << std::endl; - table_out[ii] = real_val; - } -} - -template void selu(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - // Initialize the lookup table -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::table_t selu_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::table_t selu_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_selu_table(selu_table); - initialized = true; - } - - #pragma HLS PIPELINE - - data_T datareg; - // Index into the lookup table based on data - int index; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - datareg = data[ii]; - if (datareg >= 0) { - res[ii] = res_T(1.0507009873554804934193349852946) * datareg; - } else { - index = datareg * CONFIG_T::table_size / -8; - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - res[ii] = selu_table[index]; - } - } -} - -// ************************************************* -// PReLU Activation -// ************************************************* -template -void prelu(data_T data[CONFIG_T::n_in], data_T alpha[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma HLS PIPELINE - - data_T datareg; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - datareg = data[ii]; - if (datareg > 0) - res[ii] = datareg; - else - res[ii] = alpha[ii] * datareg; - } -} - -// ************************************************* -// Binary TanH Activation -// ************************************************* -template -void binary_tanh(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma HLS PIPELINE - - data_T datareg; - res_T cache; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - datareg = data[ii]; - if (datareg > 0) - cache = 1; - else - cache = -1; - - res[ii] = (res_T)cache; - } -} - -// ************************************************* -// Ternary TanH Activation -// ************************************************* -template -void ternary_tanh(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - #pragma HLS PIPELINE - - data_T datareg; - res_T cache; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - datareg = 2 * data[ii]; - if (datareg > 1) - cache = 1; - else if (datareg > -1 && datareg <= 1) - cache = 0; - else - cache = -1; - - res[ii] = (res_T)cache; - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_activation_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_activation_stream.h deleted file mode 100644 index b72809e..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_activation_stream.h +++ /dev/null @@ -1,777 +0,0 @@ -#ifndef NNET_ACTIVATION_STREAM_H_ -#define NNET_ACTIVATION_STREAM_H_ - -#include "ap_fixed.h" -#include "hls_stream.h" -#include "nnet_activation.h" -#include "nnet_common.h" -#include "nnet_stream.h" -#include "nnet_types.h" -#include - -namespace nnet { - -// ************************************************* -// LINEAR Activation -// ************************************************* -template void linear(hls::stream &data, hls::stream &res) { -LinearActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - LinearPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - out_data[j] = in_data[j]; - } - - res.write(out_data); - } -} - -// ************************************************* -// RELU Activation -// ************************************************* -template void relu(hls::stream &data, hls::stream &res) { -ReLUActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - ReLUPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - if (in_data[j] > 0) - out_data[j] = in_data[j]; - else - out_data[j] = 0; - } - - res.write(out_data); - } -} - -// ************************************************* -// Sigmoid Activation -// ************************************************* - -template void sigmoid(hls::stream &data, hls::stream &res) { - // Initialize the lookup table -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::table_t sigmoid_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::table_t sigmoid_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_sigmoid_table(sigmoid_table); - initialized = true; - } - -SigmoidActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - SigmoidPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - int data_round = in_data[j] * CONFIG_T::table_size / 16; - int index = data_round + 8 * CONFIG_T::table_size / 16; - if (index < 0) - index = 0; - else if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - out_data[j] = sigmoid_table[index]; - } - - res.write(out_data); - } -} - -// ************************************************* -// Softmax Activation -// ************************************************* - -template -void softmax_latency(hls::stream &data, hls::stream &res) { - // Initialize the lookup tables -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::exp_table_t exp_table[CONFIG_T::table_size]; - typename CONFIG_T::inv_table_t invert_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::exp_table_t exp_table[CONFIG_T::table_size]; - static typename CONFIG_T::inv_table_t invert_table[CONFIG_T::table_size]; - -#endif - if (!initialized) { - // Note we are exponentiating the inputs, which have type data_T - init_exp_table(exp_table); - // Note we are inverting the exponentials, which have type exp_table_t - init_invert_table(invert_table); - initialized = true; - } - - constexpr unsigned multiplier_limit = DIV_ROUNDUP(data_T::size, CONFIG_T::reuse_factor); - constexpr unsigned ii = data_T::size / multiplier_limit; - - // Calculate all the e^x's - typename CONFIG_T::exp_table_t exp_res[data_T::size]; - #pragma HLS array_partition variable=exp_res complete - typename CONFIG_T::exp_table_t exp_sum(0); -SoftmaxExpLoop: - for (unsigned i = 0; i < CONFIG_T::n_in / data_T::size; i++) { - #pragma HLS PIPELINE II=ii - - data_T in_pack = data.read(); - SoftmaxExpPackLoop: - for (unsigned j = 0; j < data_T::size; j++) { - #pragma HLS UNROLL - unsigned x = softmax_idx_from_real_val(in_pack[j]); - exp_res[j] = exp_table[x]; - } - - // Explicitly sum the results with an adder tree. - // Rounding & Saturation mode, which improve accuracy, prevent Vivado from expression balancing - Op_add op_add; - exp_sum = - reduce>(exp_res, op_add); - - typename CONFIG_T::inv_table_t inv_exp_sum = - invert_table[softmax_idx_from_real_val(exp_sum)]; - - res_T out_pack; - PRAGMA_DATA_PACK(out_pack) - - SoftmaxInvPackLoop: - for (unsigned j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - #pragma HLS ALLOCATION operation instances=mul limit=multiplier_limit - out_pack[j] = exp_res[j] * inv_exp_sum; - } - res.write(out_pack); - } -} - -template -void softmax_stable(hls::stream &data, hls::stream &res) { - // Initialize the lookup tables -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::exp_table_t exp_table[CONFIG_T::table_size]; - typename CONFIG_T::inv_table_t invert_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::exp_table_t exp_table[CONFIG_T::table_size]; - static typename CONFIG_T::inv_table_t invert_table[CONFIG_T::table_size]; - -#endif - if (!initialized) { - // Note we are exponentiating the inputs, which have type data_T - init_exp_table(exp_table); - // Note we are inverting the exponentials, which have type exp_table_t - init_invert_table(invert_table); - initialized = true; - } - - constexpr unsigned multiplier_limit = DIV_ROUNDUP(data_T::size, CONFIG_T::reuse_factor); - constexpr unsigned ii = data_T::size / multiplier_limit; - - typename data_T::value_type data_array[data_T::size]; -#pragma HLS ARRAY_PARTITION variable=data_array complete -SoftmaxArrayLoop: - for (unsigned i = 0; i < CONFIG_T::n_in / data_T::size; i++) { - #pragma HLS PIPELINE II=ii - - data_T in_pack = data.read(); - SoftmaxArrayPackLoop: - for (unsigned j = 0; j < data_T::size; j++) { - #pragma HLS UNROLL - data_array[j] = in_pack[j]; - } - - // Find the max and compute all delta(x_i, x_max) - Op_max op_max; - typename data_T::value_type x_max = - reduce>(data_array, op_max); - - // For the diffs, use the same type as the input but force rounding and saturation - ap_fixed d_xi_xmax[data_T::size]; - for (unsigned j = 0; j < data_T::size; j++) { - #pragma HLS UNROLL - d_xi_xmax[j] = data_array[j] - x_max; - } - - // Calculate all the e^x's - typename CONFIG_T::exp_table_t exp_res[data_T::size]; - #pragma HLS ARRAY_PARTITION variable=exp_res complete - typename CONFIG_T::exp_table_t exp_sum(0); - for (unsigned j = 0; j < data_T::size; j++) { - #pragma HLS UNROLL - unsigned x = softmax_idx_from_real_val(d_xi_xmax[j]); - exp_res[j] = exp_table[x]; - } - - // Explicitly sum the results with an adder tree. - // Rounding & Saturation mode, which improve accuracy, prevent Vivado from expression balancing - Op_add op_add; - exp_sum = - reduce>(exp_res, op_add); - - typename CONFIG_T::inv_table_t inv_exp_sum = - invert_table[softmax_idx_from_real_val(exp_sum)]; - - res_T out_pack; - PRAGMA_DATA_PACK(out_pack) - - SoftmaxInvPackLoop: - for (unsigned j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - #pragma HLS ALLOCATION operation instances=mul limit=multiplier_limit - out_pack[j] = exp_res[j] * inv_exp_sum; - } - res.write(out_pack); - } -} - -template -void softmax_legacy(hls::stream &data, hls::stream &res) { - // Initialize the lookup table -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::table_t exp_table[CONFIG_T::table_size]; - typename CONFIG_T::table_t invert_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::table_t exp_table[CONFIG_T::table_size]; - static typename CONFIG_T::table_t invert_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_exp_table_legacy(exp_table); - init_invert_table_legacy(invert_table); - initialized = true; - } - - // Index into the lookup table based on data for exponentials - typename CONFIG_T::table_t exp_res[data_T::size]; - typename CONFIG_T::table_t exp_diff_res; - typename data_T::value_type data_cache[data_T::size]; - -SoftmaxInitLoop: - for (unsigned s = 0; s < CONFIG_T::n_in / data_T::size; s++) { - #pragma HLS PIPELINE - data_T in_pack = data.read(); - SoftmaxInitPackLoop: - for (unsigned j = 0; j < data_T::size; j++) { - #pragma HLS UNROLL - data_cache[j] = in_pack[j]; - exp_res[j] = 0; - } - - SoftmaxExpLoop: - for (int i = 0; i < data_T::size; i++) { - #pragma HLS UNROLL - SoftmaxExpInner: - for (int j = 0; j < data_T::size; j++) { - #pragma HLS UNROLL - - if (i == j) { - exp_diff_res = 1; - } else { - int data_round = (data_cache[j] - data_cache[i]) * CONFIG_T::table_size / 16; - int index = data_round + 8 * CONFIG_T::table_size / 16; - if (index < 0) - index = 0; - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - exp_diff_res = exp_table[index]; - } - - exp_res[i] += exp_diff_res; - } - } - - res_T out_pack; - PRAGMA_DATA_PACK(out_pack) - - SoftmaxInvPackLoop: - for (unsigned j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - - int exp_res_index = exp_res[j] * CONFIG_T::table_size / 64; - if (exp_res_index < 0) - exp_res_index = 0; - if (exp_res_index > CONFIG_T::table_size - 1) - exp_res_index = CONFIG_T::table_size - 1; - - out_pack[j] = (typename res_T::value_type)invert_table[exp_res_index]; - } - res.write(out_pack); - } -} - -template -void softmax_argmax(hls::stream &data, hls::stream &res) { - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - data_T in_data = data.read(); - res_T out_data; - - for (int i = 0; i < res_T::size; i++) { - #pragma HLS UNROLL - out_data[i] = (typename res_T::value_type)0; - } - - typename data_T::value_type maximum = in_data[0]; - int idx = 0; - - for (int i = 1; i < res_T::size; i++) { - #pragma HLS PIPELINE - if (in_data[i] > maximum) { - maximum = in_data[i]; - idx = i; - } - } - - out_data[idx] = (typename res_T::value_type)1; - res.write(out_data); - } -} - -template void softmax(hls::stream &data, hls::stream &res) { - assert(CONFIG_T::axis == -1); - - switch (CONFIG_T::implementation) { - case softmax_implementation::latency: - softmax_latency(data, res); - break; - case softmax_implementation::stable: - softmax_stable(data, res); - break; - case softmax_implementation::legacy: - softmax_legacy(data, res); - break; - case softmax_implementation::argmax: - softmax_argmax(data, res); - break; - } -} - -// ************************************************* -// TanH Activation -// ************************************************* - -template void tanh(hls::stream &data, hls::stream &res) { - // Initialize the lookup table -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::table_t tanh_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::table_t tanh_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_tanh_table(tanh_table); - initialized = true; - } - -TanHActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - TanHPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - int data_round = in_data[j] * CONFIG_T::table_size / 8; - int index = data_round + 4 * CONFIG_T::table_size / 8; - if (index < 0) - index = 0; - else if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - out_data[j] = tanh_table[index]; - } - - res.write(out_data); - } -} - -// ************************************************* -// Hard sigmoid Activation -// ************************************************* - -template -void hard_sigmoid(hls::stream &data, hls::stream &res) { - -HardSigmoidActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - HardSigmoidPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - auto datareg = CONFIG_T::slope * in_data[j] + CONFIG_T::shift; - if (datareg > 1) - datareg = 1; - else if (datareg < 0) - datareg = 0; - out_data[j] = datareg; - } - - res.write(out_data); - } -} - -template void hard_tanh(hls::stream &data, hls::stream &res) { - -HardSigmoidActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - HardSigmoidPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - auto sigmoid = CONFIG_T::slope * in_data[j] + CONFIG_T::shift; - if (sigmoid > 1) - sigmoid = 1; - else if (sigmoid < 0) - sigmoid = 0; - out_data[j] = 2 * sigmoid - 1; - } - - res.write(out_data); - } -} - -// ************************************************* -// Leaky RELU Activation -// ************************************************* - -template -void leaky_relu(hls::stream &data, typename data_T::value_type alpha, hls::stream &res) { -LeakyReLUActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - LeakyReLUPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - if (in_data[j] > 0) - out_data[j] = in_data[j]; - else - out_data[j] = alpha * in_data[j]; - } - res.write(out_data); - } -} - -// ************************************************* -// Thresholded RELU Activation -// ************************************************* - -template -void thresholded_relu(hls::stream &data, typename data_T::value_type theta, hls::stream &res) { -ThresholdedReLUActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - ThresholdedReLUPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - if (in_data[j] > theta) - out_data[j] = in_data[j]; - else - out_data[j] = 0; - } - - res.write(out_data); - } -} - -// ************************************************* -// Softplus Activation -// ************************************************* - -template void softplus(hls::stream &data, hls::stream &res) { - // Initialize the lookup table -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::table_t softplus_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::table_t softplus_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_softplus_table(softplus_table); - initialized = true; - } - -SoftplusActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - SoftplusPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - int data_round = in_data[j] * CONFIG_T::table_size / 16; - int index = data_round + 8 * CONFIG_T::table_size / 16; - if (index < 0) - index = 0; - else if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - out_data[j] = softplus_table[index]; - } - res.write(out_data); - } -} - -// ************************************************* -// Softsign Activation -// ************************************************* - -template void softsign(hls::stream &data, hls::stream &res) { - // Initialize the lookup table -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::table_t softsign_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::table_t softsign_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_softsign_table(softsign_table); - initialized = true; - } - -SoftsignActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - SoftsignPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - int data_round = in_data[j] * CONFIG_T::table_size / 16; - int index = data_round + 8 * CONFIG_T::table_size / 16; - if (index < 0) - index = 0; - else if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - out_data[j] = softsign_table[index]; - } - res.write(out_data); - } -} - -// ************************************************* -// ELU Activation -// ************************************************* -template -void elu(hls::stream &data, typename data_T::value_type alpha, hls::stream &res) { - // Initialize the lookup table -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::table_t elu_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::table_t elu_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_elu_table(elu_table); - initialized = true; - } - -EluActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - EluPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - - typename data_T::value_type datareg = in_data[j]; - if (datareg >= 0) { - out_data[j] = datareg; - } else { - int index = datareg * CONFIG_T::table_size / -8; - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - out_data[j] = alpha * elu_table[index]; - } - } - res.write(out_data); - } -} - -template void elu(hls::stream &data, hls::stream &res) { - elu(data, 1.0, res); -} - -// ************************************************* -// SELU Activation -// ************************************************* - -template void selu(hls::stream &data, hls::stream &res) { - // Initialize the lookup table -#ifdef __HLS_SYN__ - bool initialized = false; - typename CONFIG_T::table_t selu_table[CONFIG_T::table_size]; -#else - static bool initialized = false; - static typename CONFIG_T::table_t selu_table[CONFIG_T::table_size]; -#endif - if (!initialized) { - init_selu_table(selu_table); - initialized = true; - } - -SeluActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - SeluPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - - typename data_T::value_type datareg = in_data[j]; - if (datareg >= 0) { - out_data[j] = (typename data_T::value_type)1.0507009873554804934193349852946 * datareg; - } else { - int index = datareg * CONFIG_T::table_size / -8; - if (index > CONFIG_T::table_size - 1) - index = CONFIG_T::table_size - 1; - out_data[j] = selu_table[index]; - } - } - res.write(out_data); - } -} - -// ************************************************* -// PReLU Activation -// ************************************************* - -template -void prelu(hls::stream &data, typename data_T::value_type alpha[CONFIG_T::n_in], hls::stream &res) { -PReLUActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - PReLUPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - if (in_data[j] > 0) - out_data[j] = in_data[j]; - else - out_data[j] = alpha[i * res_T::size + j] * in_data[j]; - } - res.write(out_data); - } -} - -// ************************************************* -// Binary TanH Activation -// ************************************************* -template -void binary_tanh(hls::stream &data, hls::stream &res) { -PReLUActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - PReLUPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - if (in_data[j] > 0) - out_data[j] = (typename res_T::value_type)1; - else - out_data[j] = (typename res_T::value_type) - 1; - } - res.write(out_data); - } -} - -// ************************************************* -// Ternary TanH Activation -// ************************************************* -template -void ternary_tanh(hls::stream &data, hls::stream &res) { -PReLUActLoop: - for (int i = 0; i < CONFIG_T::n_in / res_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - PReLUPackLoop: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - if (in_data[j] > 1) - out_data[j] = (typename res_T::value_type)1; - else if (in_data[j] <= -1) - out_data[j] = (typename res_T::value_type) - 1; - else - out_data[j] = (typename res_T::value_type)0; - } - res.write(out_data); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_array.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_array.h deleted file mode 100644 index d179102..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_array.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef NNET_ARRAY_H_ -#define NNET_ARRAY_H_ - -#include - -namespace nnet { - -struct transpose_config { - static const unsigned height = 10; - static const unsigned width = 10; - static const unsigned depth = 10; - static constexpr unsigned perm[3] = {2, 0, 1}; -}; - -template -void transpose_2d(data_T data[CONFIG_T::height * CONFIG_T::width], res_T data_t[CONFIG_T::height * CONFIG_T::width]) { - #pragma HLS PIPELINE - - for (int i = 0; i < CONFIG_T::height; i++) { - for (int j = 0; j < CONFIG_T::width; j++) { - data_t[j * CONFIG_T::height + i] = data[i * CONFIG_T::width + j]; - } - } -} - -template -void transpose_3d(data_T data[CONFIG_T::depth * CONFIG_T::height * CONFIG_T::width], - res_T data_t[CONFIG_T::depth * CONFIG_T::height * CONFIG_T::width]) { - unsigned dims[3] = {CONFIG_T::depth, CONFIG_T::height, CONFIG_T::width}; - unsigned dims_t[3]; - dims_t[0] = dims[CONFIG_T::perm[0]]; - dims_t[1] = dims[CONFIG_T::perm[1]]; - dims_t[2] = dims[CONFIG_T::perm[2]]; - - int idx[3] = {0}, idx_t[3] = {0}; - for (idx[0] = 0; idx[0] < dims[0]; idx[0]++) { - for (idx[1] = 0; idx[1] < dims[1]; idx[1]++) { - for (idx[2] = 0; idx[2] < dims[2]; idx[2]++) { - idx_t[0] = idx[CONFIG_T::perm[0]]; - idx_t[1] = idx[CONFIG_T::perm[1]]; - idx_t[2] = idx[CONFIG_T::perm[2]]; - - data_t[idx_t[0] * dims_t[1] * dims_t[2] + idx_t[1] * dims_t[2] + idx_t[2]] = - data[idx[0] * dims[1] * dims[2] + idx[1] * dims[2] + idx[2]]; - } - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_batchnorm.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_batchnorm.h deleted file mode 100644 index d8be45b..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_batchnorm.h +++ /dev/null @@ -1,124 +0,0 @@ -#ifndef NNET_BATCHNORM_H_ -#define NNET_BATCHNORM_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_dense.h" -#include - -namespace nnet { - -struct batchnorm_config { - // Internal data type definitions - typedef float bias_t; - typedef float scale_t; - - // Layer Sizes - static const unsigned n_in = 10; - static const unsigned n_filt = -1; - static const unsigned n_scale_bias = 10; - - // Resource reuse info - static const unsigned io_type = io_parallel; - static const unsigned reuse_factor = 1; - static const bool store_weights_in_bram = false; - static const unsigned n_zeros = 0; - // partitioning arrays cyclically to go with roll factors? - template using product = nnet::product::mult; -}; - -template -void normalize(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in], - typename CONFIG_T::scale_t scale[CONFIG_T::n_scale_bias], - typename CONFIG_T::bias_t bias[CONFIG_T::n_scale_bias]) { - data_T cache; - - // Use a function_instantiate in case it helps to explicitly optimize unchanging weights/biases - #pragma HLS function_instantiate variable=scale,bias - - // For parallel inputs: - // - completely partition arrays -- target fabric - // - if we have an unroll factor, limit number of multipliers - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - // #pragma HLS ARRAY_PARTITION variable=weights complete // remove this line for now, it breaks compression sometimes - #pragma HLS ARRAY_PARTITION variable=scale complete - #pragma HLS ARRAY_PARTITION variable=bias complete - - #pragma HLS ALLOCATION operation instances=mul limit=CONFIG_T::multiplier_limit - -// Calcuate result -Result: - for (int ires = 0; ires < CONFIG_T::n_in; ires++) { - if (CONFIG_T::n_filt == -1) { - res[ires] = CONFIG_T::template product::product(data[ires], scale[ires]) + - bias[ires]; - } else { - int norm_index = ires % CONFIG_T::n_filt; - res[ires] = - CONFIG_T::template product::product(data[ires], scale[norm_index]) + - bias[norm_index]; - } - } -} - -// **************************************************** -// Merged Batch Normalization and Quantized Tanh -// **************************************************** -struct batchnorm_quantized_tanh_config { - // Layer Sizes - static const unsigned n_in = 10; - static const unsigned n_filt = -1; - static const unsigned n_scale_bias = 10; - - // Resource reuse info - static const unsigned io_type = io_parallel; - static const unsigned reuse_factor = 1; - static const unsigned n_zeros = 0; -}; - -template -void normalize_binary_tanh(data_T data[CONFIG_T::n_in], ap_uint<1> res[CONFIG_T::n_in], - data_T threshold[CONFIG_T::n_scale_bias]) { - #pragma HLS PIPELINE - #pragma HLS ARRAY_PARTITION variable=res complete - - data_T datareg; - ap_uint<1> cache; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - datareg = data[ii]; - int norm_index = CONFIG_T::n_filt == -1 ? ii : ii % CONFIG_T::n_filt; - if (datareg >= threshold[norm_index]) - cache = 1; - else - cache = 0; - - res[ii] = cache; - } -} - -template -void normalize_ternary_tanh(data_T data[CONFIG_T::n_in], ap_int<2> res[CONFIG_T::n_in], - data_T threshold_hi[CONFIG_T::n_scale_bias], data_T threshold_lo[CONFIG_T::n_scale_bias]) { - #pragma HLS PIPELINE - #pragma HLS ARRAY_PARTITION variable=res complete - - data_T datareg; - ap_int<2> cache; - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - datareg = data[ii]; - int norm_index = CONFIG_T::n_filt == -1 ? ii : ii % CONFIG_T::n_filt; - if (datareg > threshold_hi[norm_index]) - cache = 1; - else if (datareg <= threshold_lo[norm_index]) - cache = -1; - else - cache = 0; - - res[ii] = cache; - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_batchnorm_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_batchnorm_stream.h deleted file mode 100644 index a064677..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_batchnorm_stream.h +++ /dev/null @@ -1,123 +0,0 @@ -#ifndef NNET_BATCHNORM_STREAM_H_ -#define NNET_BATCHNORM_STREAM_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_mult.h" -#include "nnet_types.h" - -namespace nnet { - -// **************************************************** -// Streaming Batch Normalization -// **************************************************** - -template -void normalize(hls::stream &data, hls::stream &res, typename CONFIG_T::scale_t scale[CONFIG_T::n_scale_bias], - typename CONFIG_T::bias_t bias[CONFIG_T::n_scale_bias]) { - #pragma HLS ARRAY_PARTITION variable=scale complete - #pragma HLS ARRAY_PARTITION variable=bias complete - - constexpr unsigned ii = CONFIG_T::n_in / CONFIG_T::multiplier_limit; - #pragma HLS ALLOCATION operation instances=mul limit=CONFIG_T::multiplier_limit - -BatchNormLoop: - for (int i = 0; i < CONFIG_T::n_in / data_T::size; i++) { - #pragma HLS PIPELINE II=ii - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - BatchNormpack: - for (int j = 0; j < data_T::size; j++) { - #pragma HLS UNROLL - int norm_index; - if (CONFIG_T::n_filt == -1) { - norm_index = i * data_T::size + j; - } else { - norm_index = j % CONFIG_T::n_filt; - } - out_data[j] = CONFIG_T::template product::product( - in_data[j], scale[norm_index]) + - bias[norm_index]; - } - - res.write(out_data); - } -} - -// **************************************************** -// Merged Batch Normalization and Quantized Tanh -// **************************************************** -template -void normalize_binary_tanh(hls::stream &data, hls::stream, CONFIG_T::n_scale_bias>> &res, - typename data_T::value_type threshold[CONFIG_T::n_scale_bias]) { - #pragma HLS ARRAY_PARTITION variable=threshold complete - -BinaryNormLoop: - for (int i = 0; i < CONFIG_T::n_in / data_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - nnet::array, CONFIG_T::n_scale_bias> out_data; - PRAGMA_DATA_PACK(out_data) - - BatchNormPack: - for (int j = 0; j < data_T::size; j++) { - #pragma HLS UNROLL - int norm_index; - if (CONFIG_T::n_filt == -1) { - norm_index = i * data_T::size + j; - } else { - norm_index = j % CONFIG_T::n_filt; - } - out_data[j] = (in_data[j] >= threshold[norm_index]) ? 1 : 0; - } - - res.write(out_data); - } -} - -template -void normalize_ternary_tanh(hls::stream &data, hls::stream, CONFIG_T::n_scale_bias>> &res, - typename data_T::value_type threshold_hi[CONFIG_T::n_scale_bias], - typename data_T::value_type threshold_lo[CONFIG_T::n_scale_bias]) { - #pragma HLS ARRAY_PARTITION variable=threshold_hi complete - #pragma HLS ARRAY_PARTITION variable=threshold_lo complete - -TernaryNormLoop: - for (int i = 0; i < CONFIG_T::n_in / data_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - nnet::array, CONFIG_T::n_scale_bias> out_data; - PRAGMA_DATA_PACK(out_data) - - BatchNormPack: - for (int j = 0; j < data_T::size; j++) { - #pragma HLS UNROLL - - int norm_index; - if (CONFIG_T::n_filt == -1) { - norm_index = i * data_T::size + j; - } else { - norm_index = j % CONFIG_T::n_filt; - } - - if (in_data[j] > threshold_hi[norm_index]) { - out_data[j] = 1; - } else if (in_data[j] <= threshold_lo[norm_index]) { - out_data[j] = -1; - } else { - out_data[j] = 0; - } - } - - res.write(out_data); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_code_gen.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_code_gen.h deleted file mode 100644 index e4db436..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_code_gen.h +++ /dev/null @@ -1,32 +0,0 @@ -#ifndef NNET_INSTR_GEN_H_ -#define NNET_INSTR_GEN_H_ - -#include "nnet_helpers.h" -#include - -namespace nnet { - -template class FillConv1DBuffer { - public: - static void fill_buffer(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], - data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_width * CONFIG_T::n_chan], - const unsigned partition) { - // To be implemented in subclasses - } -}; - -template class FillConv2DBuffer { - public: - static void - fill_buffer(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - data_T buffer[CONFIG_T::n_pixels][CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan], - const unsigned partition) { - // To be implemented in subclasses - } -}; - -// hls4ml insert code - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_common.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_common.h deleted file mode 100644 index e942a1d..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_common.h +++ /dev/null @@ -1,76 +0,0 @@ -#ifndef NNET_COMMON_H_ -#define NNET_COMMON_H_ - -#include "ap_fixed.h" - -// This is a substitute for "ceil(n/(float)d)". -#define DIV_ROUNDUP(n, d) ((n + d - 1) / d) -#define MIN(n, d) (n > d ? d : n) -#define MAX(n, d) (n > d ? n : d) - -#define STRINGIFY(x) #x -#define EXPAND_STRING(x) STRINGIFY(x) - -#ifndef __VITIS_HLS__ -#define DATA_PACK_TXT HLS DATA_PACK variable = -#define DATA_PACK_PRAGMA(variable) DATA_PACK_TXT variable -#define PRAGMA_DATA_PACK(variable) _Pragma(EXPAND_STRING(DATA_PACK_PRAGMA(variable))) -#else -#define PRAGMA_DATA_PACK(variable) -#endif - -namespace nnet { - -// Common type definitions -enum io_type { io_parallel = 0, io_stream }; -enum strategy { latency, resource }; -enum class conv_implementation { linebuffer = 0, encoded = 1, pointwise = 2 }; - -/* --- - * Balanced tree reduce implementation. - * For use in scenarios where Vivado cannot expression balance - * Reduces an array of inputs to a single value using the template binary operator 'Op', - * for example summing all elements with Op_add, or finding the maximum with Op_max - * Use only when the input array is fully unrolled. Or, slice out a fully unrolled section - * before applying and accumulate the result over the rolled dimension. - * --- */ -template T reduce(const T *x, Op op) { - static constexpr int leftN = pow2(floorlog2(N - 1)) > 0 ? pow2(floorlog2(N - 1)) : 0; - static constexpr int rightN = N - leftN > 0 ? N - leftN : 0; - if (N == 1) { - return x[0]; - } - if (N == 2) { - return op(x[0], x[1]); - } - return op(reduce(x, op), reduce(x + leftN, op)); -} - -template class Op_add { - public: - T operator()(T a, T b) { return a + b; } -}; - -template class Op_and { - public: - T operator()(T a, T b) { return a && b; } -}; - -template class Op_or { - public: - T operator()(T a, T b) { return a || b; } -}; - -template class Op_max { - public: - T operator()(T a, T b) { return a >= b ? a : b; } -}; - -template class Op_min { - public: - T operator()(T a, T b) { return a <= b ? a : b; } -}; - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d.h deleted file mode 100644 index 0f2e89a..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d.h +++ /dev/null @@ -1,76 +0,0 @@ -#ifndef NNET_CONV1D_H_ -#define NNET_CONV1D_H_ - -#include "nnet_common.h" -#include "nnet_conv1d_latency.h" -#include "nnet_conv1d_resource.h" -#include - -namespace nnet { - -struct conv1d_config { - // Internal data type definitions - typedef float bias_t; - typedef float weight_t; - typedef float accum_t; - - // Convolutional parameters - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; - static const unsigned in_width = 10; - static const unsigned n_chan = 0; - static const unsigned filt_width = 1; - static const unsigned kernel_size = filt_width; - static const unsigned n_filt = 1; - static const unsigned stride_width = 1; - static const unsigned dilation = 1; - static const unsigned out_width = 10; //(N_IN + PAD_LEFT * PAD_RIGHT - (DILATION * (FILT_WIDTH - 1) + 1)) / STRIDE + 1 - - static const unsigned reuse_factor = 1; - static const bool store_weights_in_bram = false; - static const unsigned n_zeros = 0; // not used yet -}; - -template -void conv_1d_cl(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - #pragma HLS INLINE region - - if (CONFIG_T::strategy == nnet::latency) { - conv_1d_latency_cl(data, res, weights, biases); - } else { - conv_1d_resource_cl(data, res, weights, biases); - } -} - -template -void pointwise_conv_1d_cl(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::filt_width == 1); - - #pragma HLS INLINE region - - if (CONFIG_T::strategy == nnet::latency) { - if (CONFIG_T::implementation == conv_implementation::pointwise) { - // Use pointwise unrolled implementation - if (CONFIG_T::reuse_factor > 1 && CONFIG_T::reuse_factor <= 120) { - pointwise_conv_1d_latency_cl_split_by_rf(data, res, weights, biases); - } else { - assert(CONFIG_T::reuse_factor == 1); - pointwise_conv_1d_latency_cl(data, res, weights, biases); - } - } else { - // Use standard unrolled implementation - conv_1d_latency_cl(data, res, weights, biases); - } - } else { - conv_1d_resource_cl(data, res, weights, biases); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d_latency.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d_latency.h deleted file mode 100644 index aabc869..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d_latency.h +++ /dev/null @@ -1,439 +0,0 @@ -#ifndef NNET_CONV1D_LATENCY_H_ -#define NNET_CONV1D_LATENCY_H_ - -#include "nnet_common.h" -#include "nnet_mult.h" -#include - -namespace nnet { - -template -void conv_1d_latency_cl(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - constexpr unsigned mult_n_in = CONFIG_T::filt_width * CONFIG_T::n_chan; - constexpr unsigned mult_n_out = CONFIG_T::n_filt; - - data_T data_buf[CONFIG_T::n_pixels][mult_n_in]; - #pragma HLS ARRAY_PARTITION variable=data_buf complete dim=0 - - typename CONFIG_T::accum_t mult[mult_n_in * mult_n_out]; - #pragma HLS ARRAY_PARTITION variable=mult complete - - typename CONFIG_T::accum_t acc[mult_n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete - - #pragma HLS ARRAY_PARTITION variable=weights complete - #pragma HLS ARRAY_PARTITION variable=biases complete - - // Limit multipliers to control parallelization - #pragma HLS ALLOCATION operation instances=mul limit=CONFIG_T::mult_config::multiplier_limit - -PartitionLoop: - for (int i_part = 0; i_part < CONFIG_T::n_partitions; i_part++) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor rewind - - CONFIG_T::template fill_buffer::fill_buffer(data, data_buf, i_part); - - PixelLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - - data_T cache; - - // Do the matrix-multiply - Product1: - for (int i_in = 0; i_in < mult_n_in; i_in++) { - #pragma HLS UNROLL - cache = data_buf[i_pxl][i_in]; - Product2: - for (int i_out = 0; i_out < mult_n_out; i_out++) { - #pragma HLS UNROLL - mult[i_in * mult_n_out + i_out] = - CONFIG_T::mult_config::template product::product( - cache, weights[i_in * mult_n_out + i_out]); - } - } - - // Initialize accumulator with input biases - ResetAccum: - for (int i_acc = 0; i_acc < mult_n_out; i_acc++) { - #pragma HLS UNROLL - acc[i_acc] = (typename CONFIG_T::accum_t)biases[i_acc]; - } - - // Accumulate multiplication result - Accum1: - for (int i_in = 0; i_in < mult_n_in; i_in++) { - #pragma HLS UNROLL - Accum2: - for (int i_out = 0; i_out < mult_n_out; i_out++) { - #pragma HLS UNROLL - acc[i_out] += mult[i_in * mult_n_out + i_out]; - } - } - - // Cast to "res_t" type - Result: - for (int i_res = 0; i_res < mult_n_out; i_res++) { - #pragma HLS UNROLL - *(res++) = cast(acc[i_res]); - } - } - } -} - -template -void pointwise_conv_1d_latency_cl(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan / CONFIG_T::reuse_factor], - res_T res[CONFIG_T::out_width * CONFIG_T::n_filt / CONFIG_T::reuse_factor], - typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::filt_width == 1); - - typename CONFIG_T::accum_t mult[CONFIG_T::out_width * CONFIG_T::n_filt * CONFIG_T::n_chan / CONFIG_T::reuse_factor]; - typename CONFIG_T::accum_t acc[CONFIG_T::out_width / CONFIG_T::reuse_factor][CONFIG_T::n_filt]; - - #pragma HLS ARRAY_PARTITION variable=mult complete dim=0 - #pragma HLS ARRAY_PARTITION variable=acc complete dim=0 - - // Use a function_instantiate in case it helps to explicitly optimize unchanging weights/biases - #pragma HLS function_instantiate variable=weights,biases - - // Parallel mode - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - #pragma HLS ARRAY_PARTITION variable=weights complete dim=0 - #pragma HLS ARRAY_PARTITION variable=biases complete dim=0 - - // Limit multipliers to control parallelization - int multiplier_limit = - ceil((float(CONFIG_T::out_width) / float(CONFIG_T::reuse_factor) * CONFIG_T::n_filt * CONFIG_T::n_chan) / - float(CONFIG_T::reuse_factor)); -#pragma HLS ALLOCATION operation instances=mul limit=multiplier_limit - -// Convolve, saving all multiplication results to accumulate later -ConvOut: - for (int ii = 0; ii < CONFIG_T::out_width / CONFIG_T::reuse_factor; ii++) { - ConvFilt: - for (int ff = 0; ff < CONFIG_T::n_filt; ff++) { - ConvChan: - for (int cc = 0; cc < CONFIG_T::n_chan; cc++) { - #pragma HLS UNROLL - int index_mult = ii * CONFIG_T::n_filt * CONFIG_T::n_chan + ff * CONFIG_T::n_chan + cc; - int index_weight = cc * CONFIG_T::n_filt + ff; - int index_data = (ii * CONFIG_T::stride_width - CONFIG_T::pad_left) * CONFIG_T::n_chan + cc; - - if ((ii * CONFIG_T::stride_width) < CONFIG_T::pad_left || - (ii * CONFIG_T::stride_width) >= (CONFIG_T::pad_left + CONFIG_T::in_width)) { - mult[index_mult] = 0; - } else { - mult[index_mult] = data[index_data] * weights[index_weight]; - } - } // end channel loop - } // end filter loop - } // end output loop - - // Initialize accumulator with input biases - for (int ii = 0; ii < CONFIG_T::out_width / CONFIG_T::reuse_factor; ii++) { - for (int ff = 0; ff < CONFIG_T::n_filt; ff++) { - #pragma HLS UNROLL - acc[ii][ff] = biases[ff]; - } - } - -// Accumulate multiplication result -AccumOut: - for (int ii = 0; ii < CONFIG_T::out_width / CONFIG_T::reuse_factor; ii++) { - AccumFilt: - for (int ff = 0; ff < CONFIG_T::n_filt; ff++) { - // Do "dot product" sum within filter and sum over channels - AccumChan: - for (int cc = 0; cc < CONFIG_T::n_chan; cc++) { - int index_mult = ii * CONFIG_T::n_filt * CONFIG_T::n_chan + ff * CONFIG_T::n_chan + cc; - acc[ii][ff] += mult[index_mult]; - } // end channel loop - } // end filter loop - } // end output loop - - // Cast to "res_t" type - for (int ii = 0; ii < CONFIG_T::out_width / CONFIG_T::reuse_factor; ii++) { - for (int ff = 0; ff < CONFIG_T::n_filt; ff++) { - #pragma HLS UNROLL - res[ii * CONFIG_T::n_filt + ff] = (res_T)(acc[ii][ff]); - } - } -} - -template -void pointwise_conv_1d_latency_cl_split_by_rf(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - - data_T data_tmp[CONFIG_T::reuse_factor][CONFIG_T::in_width * CONFIG_T::n_chan / CONFIG_T::reuse_factor]; - #pragma HLS ARRAY_PARTITION variable=data_tmp complete dim=0 - res_T res_tmp[CONFIG_T::reuse_factor][CONFIG_T::out_width * CONFIG_T::n_filt / CONFIG_T::reuse_factor]; - #pragma HLS ARRAY_PARTITION variable=res_tmp complete dim=0 - -RFInputLoop: - for (int jj = 0; jj < CONFIG_T::reuse_factor; jj++) { - #pragma HLS UNROLL - InnerInputLoop: - for (int ii = 0; ii < CONFIG_T::in_width * CONFIG_T::n_chan / CONFIG_T::reuse_factor; ii++) { - #pragma HLS UNROLL - data_tmp[jj][ii] = data[jj * CONFIG_T::in_width * CONFIG_T::n_chan / CONFIG_T::reuse_factor + ii]; - } - } - - pointwise_conv_1d_latency_cl(data_tmp[0], res_tmp[0], weights, biases); - pointwise_conv_1d_latency_cl(data_tmp[1], res_tmp[1], weights, biases); - if (CONFIG_T::reuse_factor > 2) - pointwise_conv_1d_latency_cl(data_tmp[2], res_tmp[2], weights, biases); - if (CONFIG_T::reuse_factor > 3) - pointwise_conv_1d_latency_cl(data_tmp[3], res_tmp[3], weights, biases); - if (CONFIG_T::reuse_factor > 4) - pointwise_conv_1d_latency_cl(data_tmp[4], res_tmp[4], weights, biases); - if (CONFIG_T::reuse_factor > 5) - pointwise_conv_1d_latency_cl(data_tmp[5], res_tmp[5], weights, biases); - if (CONFIG_T::reuse_factor > 6) - pointwise_conv_1d_latency_cl(data_tmp[6], res_tmp[6], weights, biases); - if (CONFIG_T::reuse_factor > 7) - pointwise_conv_1d_latency_cl(data_tmp[7], res_tmp[7], weights, biases); - if (CONFIG_T::reuse_factor > 8) - pointwise_conv_1d_latency_cl(data_tmp[8], res_tmp[8], weights, biases); - if (CONFIG_T::reuse_factor > 9) - pointwise_conv_1d_latency_cl(data_tmp[9], res_tmp[9], weights, biases); - if (CONFIG_T::reuse_factor > 10) - pointwise_conv_1d_latency_cl(data_tmp[10], res_tmp[10], weights, biases); - if (CONFIG_T::reuse_factor > 11) - pointwise_conv_1d_latency_cl(data_tmp[11], res_tmp[11], weights, biases); - if (CONFIG_T::reuse_factor > 12) - pointwise_conv_1d_latency_cl(data_tmp[12], res_tmp[12], weights, biases); - if (CONFIG_T::reuse_factor > 13) - pointwise_conv_1d_latency_cl(data_tmp[13], res_tmp[13], weights, biases); - if (CONFIG_T::reuse_factor > 14) - pointwise_conv_1d_latency_cl(data_tmp[14], res_tmp[14], weights, biases); - if (CONFIG_T::reuse_factor > 15) - pointwise_conv_1d_latency_cl(data_tmp[15], res_tmp[15], weights, biases); - if (CONFIG_T::reuse_factor > 16) - pointwise_conv_1d_latency_cl(data_tmp[16], res_tmp[16], weights, biases); - if (CONFIG_T::reuse_factor > 17) - pointwise_conv_1d_latency_cl(data_tmp[17], res_tmp[17], weights, biases); - if (CONFIG_T::reuse_factor > 18) - pointwise_conv_1d_latency_cl(data_tmp[18], res_tmp[18], weights, biases); - if (CONFIG_T::reuse_factor > 19) - pointwise_conv_1d_latency_cl(data_tmp[19], res_tmp[19], weights, biases); - if (CONFIG_T::reuse_factor > 20) - pointwise_conv_1d_latency_cl(data_tmp[20], res_tmp[20], weights, biases); - if (CONFIG_T::reuse_factor > 21) - pointwise_conv_1d_latency_cl(data_tmp[21], res_tmp[21], weights, biases); - if (CONFIG_T::reuse_factor > 22) - pointwise_conv_1d_latency_cl(data_tmp[22], res_tmp[22], weights, biases); - if (CONFIG_T::reuse_factor > 23) - pointwise_conv_1d_latency_cl(data_tmp[23], res_tmp[23], weights, biases); - if (CONFIG_T::reuse_factor > 24) - pointwise_conv_1d_latency_cl(data_tmp[24], res_tmp[24], weights, biases); - if (CONFIG_T::reuse_factor > 25) - pointwise_conv_1d_latency_cl(data_tmp[25], res_tmp[25], weights, biases); - if (CONFIG_T::reuse_factor > 26) - pointwise_conv_1d_latency_cl(data_tmp[26], res_tmp[26], weights, biases); - if (CONFIG_T::reuse_factor > 27) - pointwise_conv_1d_latency_cl(data_tmp[27], res_tmp[27], weights, biases); - if (CONFIG_T::reuse_factor > 28) - pointwise_conv_1d_latency_cl(data_tmp[28], res_tmp[28], weights, biases); - if (CONFIG_T::reuse_factor > 29) - pointwise_conv_1d_latency_cl(data_tmp[29], res_tmp[29], weights, biases); - if (CONFIG_T::reuse_factor > 30) - pointwise_conv_1d_latency_cl(data_tmp[30], res_tmp[30], weights, biases); - if (CONFIG_T::reuse_factor > 31) - pointwise_conv_1d_latency_cl(data_tmp[31], res_tmp[31], weights, biases); - if (CONFIG_T::reuse_factor > 32) - pointwise_conv_1d_latency_cl(data_tmp[32], res_tmp[32], weights, biases); - if (CONFIG_T::reuse_factor > 33) - pointwise_conv_1d_latency_cl(data_tmp[33], res_tmp[33], weights, biases); - if (CONFIG_T::reuse_factor > 34) - pointwise_conv_1d_latency_cl(data_tmp[34], res_tmp[34], weights, biases); - if (CONFIG_T::reuse_factor > 35) - pointwise_conv_1d_latency_cl(data_tmp[35], res_tmp[35], weights, biases); - if (CONFIG_T::reuse_factor > 36) - pointwise_conv_1d_latency_cl(data_tmp[36], res_tmp[36], weights, biases); - if (CONFIG_T::reuse_factor > 37) - pointwise_conv_1d_latency_cl(data_tmp[37], res_tmp[37], weights, biases); - if (CONFIG_T::reuse_factor > 38) - pointwise_conv_1d_latency_cl(data_tmp[38], res_tmp[38], weights, biases); - if (CONFIG_T::reuse_factor > 39) - pointwise_conv_1d_latency_cl(data_tmp[39], res_tmp[39], weights, biases); - if (CONFIG_T::reuse_factor > 40) - pointwise_conv_1d_latency_cl(data_tmp[40], res_tmp[40], weights, biases); - if (CONFIG_T::reuse_factor > 41) - pointwise_conv_1d_latency_cl(data_tmp[41], res_tmp[41], weights, biases); - if (CONFIG_T::reuse_factor > 42) - pointwise_conv_1d_latency_cl(data_tmp[42], res_tmp[42], weights, biases); - if (CONFIG_T::reuse_factor > 43) - pointwise_conv_1d_latency_cl(data_tmp[43], res_tmp[43], weights, biases); - if (CONFIG_T::reuse_factor > 44) - pointwise_conv_1d_latency_cl(data_tmp[44], res_tmp[44], weights, biases); - if (CONFIG_T::reuse_factor > 45) - pointwise_conv_1d_latency_cl(data_tmp[45], res_tmp[45], weights, biases); - if (CONFIG_T::reuse_factor > 46) - pointwise_conv_1d_latency_cl(data_tmp[46], res_tmp[45], weights, biases); - if (CONFIG_T::reuse_factor > 47) - pointwise_conv_1d_latency_cl(data_tmp[47], res_tmp[47], weights, biases); - if (CONFIG_T::reuse_factor > 48) - pointwise_conv_1d_latency_cl(data_tmp[48], res_tmp[48], weights, biases); - if (CONFIG_T::reuse_factor > 49) - pointwise_conv_1d_latency_cl(data_tmp[49], res_tmp[49], weights, biases); - if (CONFIG_T::reuse_factor > 50) - pointwise_conv_1d_latency_cl(data_tmp[50], res_tmp[50], weights, biases); - if (CONFIG_T::reuse_factor > 51) - pointwise_conv_1d_latency_cl(data_tmp[51], res_tmp[51], weights, biases); - if (CONFIG_T::reuse_factor > 52) - pointwise_conv_1d_latency_cl(data_tmp[52], res_tmp[52], weights, biases); - if (CONFIG_T::reuse_factor > 53) - pointwise_conv_1d_latency_cl(data_tmp[53], res_tmp[53], weights, biases); - if (CONFIG_T::reuse_factor > 54) - pointwise_conv_1d_latency_cl(data_tmp[54], res_tmp[54], weights, biases); - if (CONFIG_T::reuse_factor > 55) - pointwise_conv_1d_latency_cl(data_tmp[55], res_tmp[55], weights, biases); - if (CONFIG_T::reuse_factor > 56) - pointwise_conv_1d_latency_cl(data_tmp[56], res_tmp[55], weights, biases); - if (CONFIG_T::reuse_factor > 57) - pointwise_conv_1d_latency_cl(data_tmp[57], res_tmp[57], weights, biases); - if (CONFIG_T::reuse_factor > 58) - pointwise_conv_1d_latency_cl(data_tmp[58], res_tmp[58], weights, biases); - if (CONFIG_T::reuse_factor > 59) - pointwise_conv_1d_latency_cl(data_tmp[59], res_tmp[59], weights, biases); - if (CONFIG_T::reuse_factor > 60) - pointwise_conv_1d_latency_cl(data_tmp[60], res_tmp[60], weights, biases); - if (CONFIG_T::reuse_factor > 61) - pointwise_conv_1d_latency_cl(data_tmp[61], res_tmp[61], weights, biases); - if (CONFIG_T::reuse_factor > 62) - pointwise_conv_1d_latency_cl(data_tmp[62], res_tmp[62], weights, biases); - if (CONFIG_T::reuse_factor > 63) - pointwise_conv_1d_latency_cl(data_tmp[63], res_tmp[63], weights, biases); - if (CONFIG_T::reuse_factor > 64) - pointwise_conv_1d_latency_cl(data_tmp[64], res_tmp[64], weights, biases); - if (CONFIG_T::reuse_factor > 65) - pointwise_conv_1d_latency_cl(data_tmp[65], res_tmp[65], weights, biases); - if (CONFIG_T::reuse_factor > 66) - pointwise_conv_1d_latency_cl(data_tmp[66], res_tmp[66], weights, biases); - if (CONFIG_T::reuse_factor > 67) - pointwise_conv_1d_latency_cl(data_tmp[67], res_tmp[67], weights, biases); - if (CONFIG_T::reuse_factor > 68) - pointwise_conv_1d_latency_cl(data_tmp[68], res_tmp[68], weights, biases); - if (CONFIG_T::reuse_factor > 69) - pointwise_conv_1d_latency_cl(data_tmp[69], res_tmp[69], weights, biases); - if (CONFIG_T::reuse_factor > 70) - pointwise_conv_1d_latency_cl(data_tmp[70], res_tmp[70], weights, biases); - if (CONFIG_T::reuse_factor > 71) - pointwise_conv_1d_latency_cl(data_tmp[71], res_tmp[71], weights, biases); - if (CONFIG_T::reuse_factor > 72) - pointwise_conv_1d_latency_cl(data_tmp[72], res_tmp[72], weights, biases); - if (CONFIG_T::reuse_factor > 73) - pointwise_conv_1d_latency_cl(data_tmp[73], res_tmp[73], weights, biases); - if (CONFIG_T::reuse_factor > 74) - pointwise_conv_1d_latency_cl(data_tmp[74], res_tmp[74], weights, biases); - if (CONFIG_T::reuse_factor > 75) - pointwise_conv_1d_latency_cl(data_tmp[75], res_tmp[75], weights, biases); - if (CONFIG_T::reuse_factor > 76) - pointwise_conv_1d_latency_cl(data_tmp[76], res_tmp[76], weights, biases); - if (CONFIG_T::reuse_factor > 77) - pointwise_conv_1d_latency_cl(data_tmp[77], res_tmp[77], weights, biases); - if (CONFIG_T::reuse_factor > 78) - pointwise_conv_1d_latency_cl(data_tmp[78], res_tmp[78], weights, biases); - if (CONFIG_T::reuse_factor > 79) - pointwise_conv_1d_latency_cl(data_tmp[79], res_tmp[79], weights, biases); - if (CONFIG_T::reuse_factor > 80) - pointwise_conv_1d_latency_cl(data_tmp[80], res_tmp[80], weights, biases); - if (CONFIG_T::reuse_factor > 81) - pointwise_conv_1d_latency_cl(data_tmp[81], res_tmp[81], weights, biases); - if (CONFIG_T::reuse_factor > 82) - pointwise_conv_1d_latency_cl(data_tmp[82], res_tmp[82], weights, biases); - if (CONFIG_T::reuse_factor > 83) - pointwise_conv_1d_latency_cl(data_tmp[83], res_tmp[83], weights, biases); - if (CONFIG_T::reuse_factor > 84) - pointwise_conv_1d_latency_cl(data_tmp[84], res_tmp[84], weights, biases); - if (CONFIG_T::reuse_factor > 85) - pointwise_conv_1d_latency_cl(data_tmp[85], res_tmp[85], weights, biases); - if (CONFIG_T::reuse_factor > 86) - pointwise_conv_1d_latency_cl(data_tmp[86], res_tmp[86], weights, biases); - if (CONFIG_T::reuse_factor > 87) - pointwise_conv_1d_latency_cl(data_tmp[87], res_tmp[87], weights, biases); - if (CONFIG_T::reuse_factor > 88) - pointwise_conv_1d_latency_cl(data_tmp[88], res_tmp[88], weights, biases); - if (CONFIG_T::reuse_factor > 89) - pointwise_conv_1d_latency_cl(data_tmp[89], res_tmp[89], weights, biases); - if (CONFIG_T::reuse_factor > 90) - pointwise_conv_1d_latency_cl(data_tmp[90], res_tmp[90], weights, biases); - if (CONFIG_T::reuse_factor > 91) - pointwise_conv_1d_latency_cl(data_tmp[91], res_tmp[91], weights, biases); - if (CONFIG_T::reuse_factor > 92) - pointwise_conv_1d_latency_cl(data_tmp[92], res_tmp[92], weights, biases); - if (CONFIG_T::reuse_factor > 93) - pointwise_conv_1d_latency_cl(data_tmp[93], res_tmp[93], weights, biases); - if (CONFIG_T::reuse_factor > 94) - pointwise_conv_1d_latency_cl(data_tmp[94], res_tmp[94], weights, biases); - if (CONFIG_T::reuse_factor > 95) - pointwise_conv_1d_latency_cl(data_tmp[95], res_tmp[95], weights, biases); - if (CONFIG_T::reuse_factor > 96) - pointwise_conv_1d_latency_cl(data_tmp[96], res_tmp[96], weights, biases); - if (CONFIG_T::reuse_factor > 97) - pointwise_conv_1d_latency_cl(data_tmp[97], res_tmp[97], weights, biases); - if (CONFIG_T::reuse_factor > 98) - pointwise_conv_1d_latency_cl(data_tmp[98], res_tmp[98], weights, biases); - if (CONFIG_T::reuse_factor > 99) - pointwise_conv_1d_latency_cl(data_tmp[99], res_tmp[99], weights, biases); - if (CONFIG_T::reuse_factor > 100) - pointwise_conv_1d_latency_cl(data_tmp[100], res_tmp[100], weights, biases); - if (CONFIG_T::reuse_factor > 101) - pointwise_conv_1d_latency_cl(data_tmp[101], res_tmp[101], weights, biases); - if (CONFIG_T::reuse_factor > 102) - pointwise_conv_1d_latency_cl(data_tmp[102], res_tmp[102], weights, biases); - if (CONFIG_T::reuse_factor > 103) - pointwise_conv_1d_latency_cl(data_tmp[103], res_tmp[103], weights, biases); - if (CONFIG_T::reuse_factor > 104) - pointwise_conv_1d_latency_cl(data_tmp[104], res_tmp[104], weights, biases); - if (CONFIG_T::reuse_factor > 105) - pointwise_conv_1d_latency_cl(data_tmp[105], res_tmp[105], weights, biases); - if (CONFIG_T::reuse_factor > 106) - pointwise_conv_1d_latency_cl(data_tmp[106], res_tmp[106], weights, biases); - if (CONFIG_T::reuse_factor > 107) - pointwise_conv_1d_latency_cl(data_tmp[107], res_tmp[107], weights, biases); - if (CONFIG_T::reuse_factor > 108) - pointwise_conv_1d_latency_cl(data_tmp[108], res_tmp[108], weights, biases); - if (CONFIG_T::reuse_factor > 109) - pointwise_conv_1d_latency_cl(data_tmp[109], res_tmp[109], weights, biases); - if (CONFIG_T::reuse_factor > 110) - pointwise_conv_1d_latency_cl(data_tmp[110], res_tmp[110], weights, biases); - if (CONFIG_T::reuse_factor > 111) - pointwise_conv_1d_latency_cl(data_tmp[111], res_tmp[111], weights, biases); - if (CONFIG_T::reuse_factor > 112) - pointwise_conv_1d_latency_cl(data_tmp[112], res_tmp[112], weights, biases); - if (CONFIG_T::reuse_factor > 113) - pointwise_conv_1d_latency_cl(data_tmp[113], res_tmp[113], weights, biases); - if (CONFIG_T::reuse_factor > 114) - pointwise_conv_1d_latency_cl(data_tmp[114], res_tmp[114], weights, biases); - if (CONFIG_T::reuse_factor > 115) - pointwise_conv_1d_latency_cl(data_tmp[115], res_tmp[115], weights, biases); - if (CONFIG_T::reuse_factor > 116) - pointwise_conv_1d_latency_cl(data_tmp[116], res_tmp[116], weights, biases); - if (CONFIG_T::reuse_factor > 117) - pointwise_conv_1d_latency_cl(data_tmp[117], res_tmp[117], weights, biases); - if (CONFIG_T::reuse_factor > 118) - pointwise_conv_1d_latency_cl(data_tmp[118], res_tmp[118], weights, biases); - if (CONFIG_T::reuse_factor > 119) - pointwise_conv_1d_latency_cl(data_tmp[119], res_tmp[119], weights, biases); - -RFOutputLoop: - for (int jj = 0; jj < CONFIG_T::reuse_factor; jj++) { - #pragma HLS UNROLL - InnerOutputLoop: - for (int ii = 0; ii < CONFIG_T::out_width * CONFIG_T::n_filt / CONFIG_T::reuse_factor; ii++) { - #pragma HLS UNROLL - res[jj * CONFIG_T::out_width * CONFIG_T::n_filt / CONFIG_T::reuse_factor + ii] = res_tmp[jj][ii]; - } - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d_resource.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d_resource.h deleted file mode 100644 index 6e70158..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d_resource.h +++ /dev/null @@ -1,103 +0,0 @@ -#ifndef NNET_CONV1D_RESOURCE_H_ -#define NNET_CONV1D_RESOURCE_H_ - -#include "nnet_common.h" -#include "nnet_dense.h" - -namespace nnet { - -template -void conv_1d_resource_cl(data_T data[CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - constexpr unsigned mult_n_in = CONFIG_T::filt_width * CONFIG_T::n_chan; - constexpr unsigned mult_n_out = CONFIG_T::n_filt; - constexpr unsigned block_factor = DIV_ROUNDUP(mult_n_in * mult_n_out, CONFIG_T::reuse_factor); - constexpr unsigned multscale = block_factor / mult_n_out; - - assert((block_factor % mult_n_out == 0 || CONFIG_T::reuse_factor >= mult_n_in) && - "The current Reuse Factor is not allowed"); - assert((CONFIG_T::reuse_factor <= CONFIG_T::filt_width * CONFIG_T::n_chan) && - "This function is correct only for RF <= FILT_WIDTH * N_CHAN"); - - data_T data_buf[CONFIG_T::n_pixels][mult_n_in]; - #pragma HLS ARRAY_PARTITION variable=data_buf complete dim=0 - - #pragma HLS ARRAY_RESHAPE variable=weights block factor=block_factor - #pragma HLS ARRAY_PARTITION variable=biases complete - - typename CONFIG_T::accum_t acc[CONFIG_T::n_pixels][mult_n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete dim=0 - -PartitionLoop: - for (unsigned i_part = 0; i_part < CONFIG_T::n_partitions; i_part++) { - //#pragma HLS UNROLL // We don't want this loop unrolled - - CONFIG_T::template fill_buffer::fill_buffer(data, data_buf, i_part); - - PixelInitAccumLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - - InitAccumLoop: - for (unsigned i_acc = 0; i_acc < mult_n_out; i_acc++) { - #pragma HLS UNROLL - acc[i_pxl][i_acc] = (typename CONFIG_T::accum_t)biases[i_acc]; - } - } - - ReuseLoop: - for (unsigned i_rf = 0; i_rf < CONFIG_T::reuse_factor; i_rf++) { - #pragma HLS PIPELINE II=1 rewind - - unsigned i_w = i_rf; - unsigned i_in = i_rf; - unsigned i_out = 0; - unsigned i_acc = 0; - - MultLoop: - for (unsigned i_blk = 0; i_blk < block_factor; i_blk++) { - #pragma HLS UNROLL - - PixelMultLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - - acc[i_pxl][i_out] += static_cast( - CONFIG_T::mult_config::template product::product( - data_buf[i_pxl][i_in], weights[i_w])); - } - - // Increment i_w - i_w += CONFIG_T::reuse_factor; - // Increment i_in - i_in += CONFIG_T::reuse_factor; - if (i_in >= mult_n_in) { - i_in = i_rf; - } - // Increment i_out - if (i_acc + 1 >= multscale) { - i_acc = 0; - i_out++; - } else { - i_acc++; - } - } - } - - PixelResultLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - // Cast to "res_t" type - ResultLoop: - for (unsigned i_res = 0; i_res < mult_n_out; i_res++) { - #pragma HLS UNROLL - *(res++) = cast(acc[i_pxl][i_res]); - } - } - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d_stream.h deleted file mode 100644 index b23c330..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv1d_stream.h +++ /dev/null @@ -1,89 +0,0 @@ -#ifndef NNET_CONV1D_STREAM_H_ -#define NNET_CONV1D_STREAM_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_conv_stream.h" - -namespace nnet { - -template -void compute_scaled_indices_1d(const unsigned w_idx, ap_uint *pixel_idx) { - unsigned wp_idx = w_idx * (data_T::size / CONFIG_T::n_chan); - -ComputeIndex: - for (unsigned p = 0; p < data_T::size / CONFIG_T::n_chan; p++) { - #pragma HLS UNROLL - unsigned sw_idx = - CONFIG_T::template scale_index::scale_index( - wp_idx + p); - pixel_idx[p] = CONFIG_T::pixels[sw_idx]; - } -} - -template -void conv_1d_encoded_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - - hls::stream data_window[CONFIG_T::filt_width * CONFIG_T::n_chan]; - const int win_depth = CONFIG_T::out_width; - for (unsigned i_out = 0; i_out < CONFIG_T::filt_width * CONFIG_T::n_chan; i_out++) { - #pragma HLS STREAM variable=data_window[i_out] depth=win_depth - } - - #pragma HLS ARRAY_PARTITION variable=CONFIG_T::pixels complete - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - unsigned outputs_ready = 0; - - ap_uint pixel_idx[data_T::size / CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable=pixel_idx complete - -ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width / (data_T::size / CONFIG_T::n_chan); i_iw++) { - #pragma HLS LOOP_FLATTEN - if (CONFIG_T::strategy == nnet::latency && data_T::size / CONFIG_T::n_chan == 1) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - } - compute_scaled_indices_1d(i_iw, pixel_idx); - compute_output_encoded(data.read(), data_window, res, res_pack, outputs_ready, weights, - biases, pixel_idx); - } -} - -template -void conv_1d_buffer_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - -ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS LOOP_FLATTEN - if (CONFIG_T::strategy == nnet::latency) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - } - compute_output_buffer_1d(data.read(), res, weights, biases); - } -} - -template -void conv_1d_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - #pragma HLS inline recursive - switch (CONFIG_T::implementation) { - case conv_implementation::linebuffer: - conv_1d_buffer_cl(data, res, weights, biases); - break; - case conv_implementation::encoded: - conv_1d_encoded_cl(data, res, weights, biases); - break; - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d.h deleted file mode 100644 index 71a88f4..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef NNET_CONV2D_H_ -#define NNET_CONV2D_H_ - -#include "nnet_common.h" -#include "nnet_conv2d_latency.h" -#include "nnet_conv2d_resource.h" -#include - -namespace nnet { - -struct conv2d_config { - // Internal data type definitions - typedef float bias_t; - typedef float weight_t; - typedef float accum_t; - - // Convolutional parameters - static const unsigned pad_top = 0; - static const unsigned pad_bottom = 0; - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; - static const unsigned in_height = 10; - static const unsigned in_width = 10; - static const unsigned n_chan = 1; - static const unsigned filt_height = 1; - static const unsigned filt_width = 1; - static const unsigned kernel_size = filt_height * filt_width; - static const unsigned n_filt = 1; - static const unsigned stride_height = 1; - static const unsigned stride_width = 1; - static const unsigned out_height = 10; - static const unsigned out_width = 10; - static const unsigned dilation_height = 1; - static const unsigned dilation_width = 1; - - static const unsigned reuse_factor = 1; - static const bool store_weights_in_bram = false; - static const unsigned n_zeros = 0; // not used yet -}; - -template -void conv_2d_cl( - data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - #pragma HLS INLINE region - - if (CONFIG_T::strategy == nnet::latency) { - conv_2d_latency_cl(data, res, weights, biases); - } else { - conv_2d_resource_cl(data, res, weights, biases); - } -} - -template -void pointwise_conv_2d_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::filt_width == 1); - - #pragma HLS INLINE region - - // Nothing special to be done for io_parallel implementation - if (CONFIG_T::strategy == nnet::latency) { - conv_2d_latency_cl(data, res, weights, biases); - } else { - conv_2d_resource_cl(data, res, weights, biases); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d_latency.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d_latency.h deleted file mode 100644 index 5114af7..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d_latency.h +++ /dev/null @@ -1,89 +0,0 @@ -#ifndef NNET_CONV2D_LATENCY_H_ -#define NNET_CONV2D_LATENCY_H_ - -#include "nnet_common.h" -#include "nnet_mult.h" -#include - -namespace nnet { - -template -void conv_2d_latency_cl( - data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - constexpr unsigned mult_n_in = CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan; - constexpr unsigned mult_n_out = CONFIG_T::n_filt; - - data_T data_buf[CONFIG_T::n_pixels][mult_n_in]; - #pragma HLS ARRAY_PARTITION variable=data_buf complete dim=0 - - typename CONFIG_T::accum_t mult[mult_n_in * mult_n_out]; - #pragma HLS ARRAY_PARTITION variable=mult complete - - typename CONFIG_T::accum_t acc[mult_n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete - - #pragma HLS ARRAY_PARTITION variable=weights complete - #pragma HLS ARRAY_PARTITION variable=biases complete - - // Limit multipliers to control parallelization - #pragma HLS ALLOCATION operation instances=mul limit=CONFIG_T::mult_config::multiplier_limit - -PartitionLoop: - for (int i_part = 0; i_part < CONFIG_T::n_partitions; i_part++) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor rewind - - CONFIG_T::template fill_buffer::fill_buffer(data, data_buf, i_part); - - PixelLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - - data_T cache; - - // Do the matrix-multiply - Product1: - for (int i_in = 0; i_in < mult_n_in; i_in++) { - #pragma HLS UNROLL - cache = data_buf[i_pxl][i_in]; - Product2: - for (int i_out = 0; i_out < mult_n_out; i_out++) { - #pragma HLS UNROLL - mult[i_in * mult_n_out + i_out] = - CONFIG_T::mult_config::template product::product( - cache, weights[i_in * mult_n_out + i_out]); - } - } - - // Initialize accumulator with input biases - ResetAccum: - for (int i_acc = 0; i_acc < mult_n_out; i_acc++) { - #pragma HLS UNROLL - acc[i_acc] = (typename CONFIG_T::accum_t)biases[i_acc]; - } - - // Accumulate multiplication result - Accum1: - for (int i_in = 0; i_in < mult_n_in; i_in++) { - #pragma HLS UNROLL - Accum2: - for (int i_out = 0; i_out < mult_n_out; i_out++) { - #pragma HLS UNROLL - acc[i_out] += mult[i_in * mult_n_out + i_out]; - } - } - - // Cast to "res_t" type - Result: - for (int i_res = 0; i_res < mult_n_out; i_res++) { - #pragma HLS UNROLL - *(res++) = cast(acc[i_res]); - } - } - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d_resource.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d_resource.h deleted file mode 100644 index eb7e18e..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d_resource.h +++ /dev/null @@ -1,105 +0,0 @@ -#ifndef NNET_CONV2D_RESOURCE_H_ -#define NNET_CONV2D_RESOURCE_H_ - -#include "nnet_common.h" -#include "nnet_dense.h" - -namespace nnet { - -template -void conv_2d_resource_cl( - data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt], - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - constexpr unsigned mult_n_in = CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan; - constexpr unsigned mult_n_out = CONFIG_T::n_filt; - constexpr unsigned block_factor = DIV_ROUNDUP(mult_n_in * mult_n_out, CONFIG_T::reuse_factor); - - constexpr unsigned multscale = block_factor / mult_n_out; - - assert((block_factor % mult_n_out == 0 || CONFIG_T::reuse_factor >= mult_n_in) && - "The current Reuse Factor is not allowed"); - assert((CONFIG_T::reuse_factor <= CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan) && - "This function is correct only for RF <= FILT_HEIGHT * FILT_WIDTH * N_CHAN"); - - data_T data_buf[CONFIG_T::n_pixels][mult_n_in]; - #pragma HLS ARRAY_PARTITION variable=data_buf complete dim=0 - - #pragma HLS ARRAY_RESHAPE variable=weights block factor=block_factor - #pragma HLS ARRAY_PARTITION variable=biases complete - - typename CONFIG_T::accum_t acc[CONFIG_T::n_pixels][mult_n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete dim=0 - -PartitionLoop: - for (unsigned i_part = 0; i_part < CONFIG_T::n_partitions; i_part++) { - //#pragma HLS UNROLL // We don't want this loop unrolled - - CONFIG_T::template fill_buffer::fill_buffer(data, data_buf, i_part); - - PixelInitAccumLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - - InitAccumLoop: - for (unsigned i_acc = 0; i_acc < mult_n_out; i_acc++) { - #pragma HLS UNROLL - acc[i_pxl][i_acc] = (typename CONFIG_T::accum_t)biases[i_acc]; - } - } - - ReuseLoop: - for (unsigned i_rf = 0; i_rf < CONFIG_T::reuse_factor; i_rf++) { - #pragma HLS PIPELINE II=1 rewind - - unsigned i_w = i_rf; - unsigned i_in = i_rf; - unsigned i_out = 0; - unsigned i_acc = 0; - - MultLoop: - for (unsigned i_blk = 0; i_blk < block_factor; i_blk++) { - #pragma HLS UNROLL - - PixelMultLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - - acc[i_pxl][i_out] += static_cast( - CONFIG_T::mult_config::template product::product( - data_buf[i_pxl][i_in], weights[i_w])); - } - - // Increment i_w - i_w += CONFIG_T::reuse_factor; - // Increment i_in - i_in += CONFIG_T::reuse_factor; - if (i_in >= mult_n_in) { - i_in = i_rf; - } - // Increment i_out - if (i_acc + 1 >= multscale) { - i_acc = 0; - i_out++; - } else { - i_acc++; - } - } - } - - PixelResultLoop: - for (unsigned i_pxl = 0; i_pxl < CONFIG_T::n_pixels; i_pxl++) { - #pragma HLS UNROLL - // Cast to "res_t" type - ResultLoop: - for (unsigned i_res = 0; i_res < mult_n_out; i_res++) { - #pragma HLS UNROLL - *(res++) = cast(acc[i_pxl][i_res]); - } - } - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d_stream.h deleted file mode 100644 index 8a4fb6b..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv2d_stream.h +++ /dev/null @@ -1,112 +0,0 @@ -#ifndef NNET_CONV2D_STREAM_H_ -#define NNET_CONV2D_STREAM_H_ - -#include "ap_shift_reg.h" -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_conv_stream.h" - -namespace nnet { - -template -void compute_scaled_indices_2d(const unsigned h_idx, const unsigned w_idx, - ap_uint *pixel_idx) { - const unsigned sh_idx = CONFIG_T::template scale_index_height::scale_index(h_idx); - unsigned wp_idx = w_idx * (data_T::size / CONFIG_T::n_chan); - -ComputeIndex: - for (unsigned p = 0; p < data_T::size / CONFIG_T::n_chan; p++) { - #pragma HLS UNROLL - - unsigned sw_idx = CONFIG_T::template scale_index_width::scale_index(wp_idx + p); - pixel_idx[p] = CONFIG_T::pixels[sh_idx * CONFIG_T::min_width + sw_idx]; - } -} - -template -void conv_2d_encoded_cl( - hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::filt_height == CONFIG_T::filt_width); - - hls::stream data_window[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan]; - const int win_depth = CONFIG_T::filt_height * CONFIG_T::out_width; - for (unsigned i_out = 0; i_out < CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan; i_out++) { - #pragma HLS STREAM variable=data_window[i_out] depth=win_depth - } - - #pragma HLS ARRAY_PARTITION variable=CONFIG_T::pixels complete - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - unsigned outputs_ready = 0; - - ap_uint pixel_idx[data_T::size / CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable=pixel_idx complete - -ReadInputHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width / (data_T::size / CONFIG_T::n_chan); i_iw++) { - #pragma HLS LOOP_FLATTEN - if (CONFIG_T::strategy == nnet::latency && data_T::size / CONFIG_T::n_chan == 1) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - } - compute_scaled_indices_2d(i_ih, i_iw, pixel_idx); - compute_output_encoded(data.read(), data_window, res, res_pack, outputs_ready, weights, - biases, pixel_idx); - } - } -} - -// Line Buffer -template -void conv_2d_buffer_cl( - hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - - static ap_shift_reg line_buffer[MAX(CONFIG_T::filt_height - 1, 1)] - [CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable = line_buffer complete dim = 2 - -ReadInputHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS LOOP_FLATTEN - if (CONFIG_T::strategy == nnet::latency) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - } - if (CONFIG_T::filt_height > 1) { - compute_output_buffer_2d(data.read(), line_buffer, res, weights, biases); - } else { - compute_output_buffer_1d(data.read(), res, weights, biases); - } - } - } -} - -template -void conv_2d_cl( - hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - #pragma HLS inline recursive - switch (CONFIG_T::implementation) { - case conv_implementation::linebuffer: - conv_2d_buffer_cl(data, res, weights, biases); - break; - case conv_implementation::encoded: - conv_2d_encoded_cl(data, res, weights, biases); - break; - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv_stream.h deleted file mode 100644 index b763938..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_conv_stream.h +++ /dev/null @@ -1,394 +0,0 @@ -#ifndef NNET_CONV_STREAM_H_ -#define NNET_CONV_STREAM_H_ - -#include "ap_shift_reg.h" -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_dense.h" - -namespace nnet { - -// ************************************************* -// Encoded Implementation (Vlad's) -// ************************************************* -template unsigned scale_index_K_gte_S(const unsigned idx) { - #pragma HLS INLINE - - if (idx < K - S) { - return idx; - } - - constexpr unsigned nW = ((W - K) / S) * S + K; // Nearest W without unused pixels on the right - constexpr unsigned sW = (DIV_ROUNDUP(K, S) - 1) * S + K; // Scaled W that behaves like original W - if (idx >= nW) { - return sW; - } - - const unsigned r = nW - idx; - if (r <= K - S) { - return sW - r; - } - - return K - S + (idx - (K - S)) % S; -} - -template unsigned scale_index_K_lt_S(const unsigned idx) { - #pragma HLS INLINE - - if (idx < S - K) { - return idx; - } - - constexpr unsigned nW = ((W - K) / S) * S + K; // Nearest W without unused pixels on the right - constexpr unsigned sW = (DIV_ROUNDUP(S, K) - 1) * S + K; // Scaled W that behaves like original W - if (idx >= nW) { - return sW; - } - - const unsigned r = nW - idx; - if (r <= S - K) { - return sW - r; - } - - return S - K + (idx - (S - K)) % S; -} - -template class scale_index_regular { - public: - static unsigned scale_index(const unsigned idx) { - #pragma HLS INLINE - - if (K >= S) { - return scale_index_K_gte_S(idx); - } else { - return scale_index_K_lt_S(idx); - } - } -}; - -template class scale_index_unscaled { - public: - static unsigned scale_index(const unsigned idx) { - #pragma HLS INLINE - return idx; - } -}; - -template -void mult_buffer(hls::stream data_window[CONFIG_T::kernel_size * CONFIG_T::n_chan], - res_T &res_pack, hls::stream &res_stream, unsigned &outputs_ready, - typename CONFIG_T::weight_t weights[CONFIG_T::kernel_size * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - #pragma HLS INLINE - - typename data_T::value_type data[CONFIG_T::kernel_size * CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable = data complete - typename res_T::value_type res[CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable = res complete - -InitData: - for (int id = 0; id < CONFIG_T::kernel_size * CONFIG_T::n_chan; id++) { - #pragma HLS UNROLL - data[id] = data_window[id].read(); - } - - #pragma HLS INLINE recursive - if (CONFIG_T::strategy == nnet::latency) { - dense_latency( - data, res, weights, biases); - } else { - dense_resource( - data, res, weights, biases); - } - -CastLoop: - for (unsigned jj = 0; jj < CONFIG_T::n_filt; jj++) { - #pragma HLS UNROLL - if (res_T::size / CONFIG_T::n_filt == 1) { - res_pack[jj] = res[jj]; - } else { - res_pack[outputs_ready * CONFIG_T::n_filt + jj] = res[jj]; - } - } - - if (res_T::size / CONFIG_T::n_filt == 1) { - res_stream.write(res_pack); - } else { - if (outputs_ready == (res_T::size / CONFIG_T::n_filt) - 1) { - res_stream.write(res_pack); - outputs_ready = 0; - } else { - outputs_ready++; - } - } -} - -template -void compute_output_encoded(const data_T &in_elem, - hls::stream data_window[CONFIG_T::kernel_size * CONFIG_T::n_chan], - hls::stream &res, res_T &res_pack, unsigned &outputs_ready, - typename CONFIG_T::weight_t weights[CONFIG_T::kernel_size * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt], ap_uint *pixel_idx) { - #pragma HLS INLINE - -MultLoop: - for (unsigned p = 0; p < data_T::size / CONFIG_T::n_chan; p++) { - #pragma HLS PIPELINE II = CONFIG_T::reuse_factor - CopyDataFilt: - for (unsigned f = 0; f < CONFIG_T::kernel_size; f++) { - #pragma HLS UNROLL - CopyDataChan: - for (unsigned c = 0; c < CONFIG_T::n_chan; c++) { - #pragma HLS UNROLL - if (pixel_idx[p][f]) - data_window[f * CONFIG_T::n_chan + c].write(in_elem[p * CONFIG_T::n_chan + c]); - } - } - if (pixel_idx[p][CONFIG_T::kernel_size - 1]) { - mult_buffer(data_window, res_pack, res, outputs_ready, weights, biases); - } - } -} - -// ************************************************* -// Line Buffer Implementation (Phil's) -// ************************************************* -template -void kernel_shift_1d(const data_T &in_elem, - typename data_T::value_type kernel_window[CONFIG_T::filt_width * CONFIG_T::n_chan]) { - #pragma HLS inline - - // Shift kernel_window by one step to the left (manual shift operation) - static const int filt_width = CONFIG_T::filt_width - 1; -KernelShiftWidth: - for (int i_iw = 0; i_iw < filt_width; i_iw++) { - #pragma HLS PIPELINE II = 1 - KernelShiftChannel: - for (unsigned i_ic = 0; i_ic < CONFIG_T::n_chan; i_ic++) { - #pragma HLS UNROLL - // Shift every element in kernel_window to the left - kernel_window[i_iw * CONFIG_T::n_chan + i_ic] = kernel_window[(i_iw + 1) * CONFIG_T::n_chan + i_ic]; - } - } - - // Insert shift_buffer column into right-most column of kernel - static const int lastheight = (CONFIG_T::filt_width - 1) * CONFIG_T::n_chan; -KernelPushChannel: - for (int i_ic = 0; i_ic < CONFIG_T::n_chan; i_ic++) { - #pragma HLS UNROLL - kernel_window[lastheight + i_ic] = in_elem[i_ic]; - } -} - -template -void kernel_shift_2d( - typename data_T::value_type shift_buffer[CONFIG_T::filt_height][CONFIG_T::n_chan], - typename data_T::value_type kernel_window[CONFIG_T::filt_width * CONFIG_T::filt_height * CONFIG_T::n_chan]) { - #pragma HLS inline - - // Shift kernel_window by one step to the left (manual shift operation) - static const int filt_width = CONFIG_T::filt_width - 1; -KernelShiftWidth: - for (int i_iw = 0; i_iw < filt_width; i_iw++) { - #pragma HLS PIPELINE II = 1 - KernelShiftHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::filt_height; i_ih++) { - KernelShiftChannel: - for (unsigned i_ic = 0; i_ic < CONFIG_T::n_chan; i_ic++) { - // Shift every element in kernel_window to the left - kernel_window[i_ih * CONFIG_T::filt_width * CONFIG_T::n_chan + i_iw * CONFIG_T::n_chan + i_ic] = - kernel_window[i_ih * CONFIG_T::filt_width * CONFIG_T::n_chan + (i_iw + 1) * CONFIG_T::n_chan + i_ic]; - } - } - } - - // Insert shift_buffer column into right-most column of kernel - static const int lastheight = (CONFIG_T::filt_width - 1) * CONFIG_T::n_chan; -KernelPushHeight: - for (int i_ih = 0; i_ih < CONFIG_T::filt_height; i_ih++) { - #pragma HLS UNROLL - KernelPushChannel: - for (int i_ic = 0; i_ic < CONFIG_T::n_chan; i_ic++) { - kernel_window[lastheight + i_ih * CONFIG_T::filt_width * CONFIG_T::n_chan + i_ic] = shift_buffer[i_ih][i_ic]; - } - } -} - -template -void shift_line_buffer( - const data_T &in_elem, - ap_shift_reg line_buffer[MAX(CONFIG_T::filt_height - 1, 1)] - [CONFIG_T::n_chan], - typename data_T::value_type kernel_window[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan]) { - - #pragma HLS PIPELINE - - // Temporary buffer for popped (shifted) elements - typename data_T::value_type shift_buffer[CONFIG_T::filt_height][CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable = shift_buffer complete dim = 0 - -UpdateBuffer: - for (int i_ic = 0; i_ic < CONFIG_T::n_chan; i_ic++) { - #pragma HLS UNROLL - - // Insert pixel(s) at end of shift buffer - shift_buffer[CONFIG_T::filt_height - 1][i_ic] = in_elem[i_ic]; - } - -LineBufferDataIn: - for (int i_ic = 0; i_ic < CONFIG_T::n_chan; i_ic++) { - // Shift the shift buffer into the line buffer - LineBufferShift: - for (unsigned i_ih = 1; i_ih < CONFIG_T::filt_height; i_ih++) { - #pragma HLS UNROLL - typename data_T::value_type pop_elem = line_buffer[i_ih - 1][i_ic].shift( - shift_buffer[CONFIG_T::filt_height - i_ih][i_ic]); // Shift the line buffer, return the popped pixel - shift_buffer[CONFIG_T::filt_height - i_ih - 1][i_ic] = - pop_elem; // Popped element placed back into shift_buffer, one row up. - } - } - kernel_shift_2d(shift_buffer, kernel_window); -} - -template -void compute_output_buffer_2d( - const data_T &in_elem, - ap_shift_reg line_buffer[MAX(CONFIG_T::filt_height - 1, 1)] - [CONFIG_T::n_chan], - hls::stream &res_stream, - typename CONFIG_T::weight_t weights[CONFIG_T::kernel_size * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - #pragma HLS INLINE OFF - - // Thresholds - const static int lShiftX = CONFIG_T::filt_width - 1; - const static int lShiftY = CONFIG_T::filt_height - 1; - - // Counters - static int pX = 0; // Pixel X - static int pY = 0; // Pixel Y - - static int sX = 0; // Stride X - static int sY = 0; // Stride Y - - static typename data_T::value_type kernel_data[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable = kernel_data complete - - typename res_T::value_type res_out[CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable = res_out complete dim = 0 - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - - // Add pixel to buffer - nnet::shift_line_buffer(in_elem, line_buffer, kernel_data); - - // Check to see if we have a full kernel - if ((sX - lShiftX) == 0 && (sY - lShiftY) == 0 && pY > lShiftY - 1 && pX > lShiftX - 1) { - - // Dense multiply - // #pragma HLS INLINE recursive - if (CONFIG_T::strategy == nnet::latency) { - dense_latency( - kernel_data, res_out, weights, biases); - } else { - dense_resource( - kernel_data, res_out, weights, biases); - } - - // Pack output - CastLoop: - for (unsigned i_ic = 0; i_ic < CONFIG_T::n_filt; i_ic++) { - #pragma HLS UNROLL - res_pack[i_ic] = res_out[i_ic]; - } - - // Write output to stream when output ready - res_stream.write(res_pack); - } - - // Counter Housekeeping - if (pX + 1 == CONFIG_T::in_width) // Includes padding, end of line (padded) - { - pX = 0; - sX = 0; - if (pY + 1 == CONFIG_T::in_height) { // Reached bottom of image - pY = 0; - sY = 0; - } else { - pY = pY + 1; - // Update stride (threshold) ? subtract stride : increment stride - sY = ((sY - lShiftY) == 0) ? sY - CONFIG_T::stride_height + 1 : sY + 1; - } - } else { - pX = pX + 1; - // Update stride (threshold) ? subtract stride : increment stride - sX = ((sX - lShiftX) == 0) ? sX - CONFIG_T::stride_width + 1 : sX + 1; - } -} - -// Conv 1D compute output -template -void compute_output_buffer_1d( - const data_T &in_elem, hls::stream &res_stream, - typename CONFIG_T::weight_t weights[CONFIG_T::kernel_size * CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - #pragma HLS INLINE - - // Thresholds - const static int lShiftX = CONFIG_T::filt_width - 1; - - // Counters - static int pX = 0; // pixel counter - static int sX = 0; // stride counter - - static typename data_T::value_type kernel_data[CONFIG_T::filt_width * CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable = kernel_data complete - - typename res_T::value_type res_out[CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable = res_out complete dim = 0 - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - - // Add pixel to buffer - nnet::kernel_shift_1d(in_elem, kernel_data); - - // Check to see if we have a full kernel - if ((sX - lShiftX) == 0 && pX > lShiftX - 1) { - - // Dense multiply - #pragma HLS INLINE recursive - if (CONFIG_T::strategy == nnet::latency) { - dense_latency( - kernel_data, res_out, weights, biases); - } else { - dense_resource( - kernel_data, res_out, weights, biases); - } - - // Pack output - CastLoop: - for (unsigned i_ic = 0; i_ic < CONFIG_T::n_filt; i_ic++) { - #pragma HLS UNROLL - res_pack[i_ic] = res_out[i_ic]; - } - - // Write output to stream when output ready - res_stream.write(res_pack); - } - - // Counter Housekeeping - if (pX + 1 == CONFIG_T::in_width) // Includes padding, end of line (padded) - { - pX = 0; - sX = 0; - } else { - pX = pX + 1; - // Update stride (threshold) ? subtract stride : increment stride - sX = ((sX - lShiftX) == 0) ? sX - CONFIG_T::stride_width + 1 : sX + 1; - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense.h deleted file mode 100644 index c5155d8..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense.h +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef NNET_DENSE_H_ -#define NNET_DENSE_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_dense_latency.h" -#include "nnet_dense_resource.h" -#include "nnet_helpers.h" -#include "nnet_mult.h" -#include - -namespace nnet { - -struct dense_config { - // Internal data type definitions - typedef float bias_t; - typedef float weight_t; - typedef float accum_t; - - // Layer Sizes - static const unsigned n_in = 10; - static const unsigned n_out = 10; - - // Resource reuse info - static const unsigned io_type = io_parallel; - static const unsigned strategy = latency; - static const unsigned reuse_factor = 1; - static const bool store_weights_in_bram = false; - static const unsigned n_zeros = 0; - // partitioning arrays cyclically to go with roll factors? - // Product function to use - template using product = nnet::product::mult; -}; - -template -void dense(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - #pragma HLS inline - if (CONFIG_T::strategy == nnet::latency) { - dense_latency(data, res, weights, biases); - } else { - dense_resource(data, res, weights, biases); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_compressed.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_compressed.h deleted file mode 100644 index 029b748..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_compressed.h +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef NNET_COMPRESSED_LAYER_H_ -#define NNET_COMPRESSED_LAYER_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_dense.h" -#include - -namespace nnet { - -template -void fill_mult(typename CONFIG_T::index_t index, typename CONFIG_T::accum_t mult[CONFIG_T::n_out], - typename CONFIG_T::accum_t weight) { - for (unsigned k = 0; k < CONFIG_T::n_out; k++) { - #pragma HLS UNROLL - if (k == index) - mult[k] += weight; - } -} - -template -void dense_compressed(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - typename CONFIG_T::weight_t weights[CONFIG_T::n_nonzeros], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - - const int multiplier_limit = DIV_ROUNDUP(CONFIG_T::n_nonzeros, CONFIG_T::reuse_factor); - - typename CONFIG_T::accum_t acc[CONFIG_T::n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete - #pragma HLS ARRAY_PARTITION variable=biases complete - #pragma HLS ARRAY_RESHAPE variable=weights block factor=multiplier_limit - -#ifdef __VITIS_HLS__ - #pragma HLS AGGREGATE variable=weights -#else - #pragma HLS data_pack variable=weights struct_level -#endif - -InitAccum: - for (unsigned i = 0; i < CONFIG_T::n_out; i++) { - #pragma HLS UNROLL - acc[i] = (typename CONFIG_T::accum_t)(biases[i]); - } - - // Do the compressed matrix-multiply - const int rufactor = CONFIG_T::reuse_factor; -ReuseLoop: - for (unsigned ir = 0; ir < rufactor; ir++) { - #pragma HLS PIPELINE II=1 rewind - - typename CONFIG_T::accum_t mult[CONFIG_T::n_out]; - #pragma HLS ARRAY_PARTITION variable=mult complete - - ResetMult: - for (int imult = 0; imult < CONFIG_T::n_out; imult++) { - #pragma HLS UNROLL - mult[imult] = 0; - } - - CompressedMultLoop: - for (unsigned im = 0; im < multiplier_limit; im++) { - #pragma HLS UNROLL - unsigned w = im * rufactor + ir; - auto row = weights[w].row_index; - auto col = weights[w].col_index; - auto weight_cache = weights[w].weight; - data_T data_cache = data[row]; - // mult[col] += weight_cache * data_cache; - typename CONFIG_T::accum_t prod = - CONFIG_T::template product::product(data_cache, weight_cache); - fill_mult(col, mult, prod); - } - - for (int im = 0; im < CONFIG_T::n_out; im++) { - acc[im] += mult[im]; - } - } - -// Cast to "res_t" type -ResultLoop: - for (unsigned i = 0; i < CONFIG_T::n_out; i++) { - #pragma HLS UNROLL - // res[i] = (res_T) (acc[i]); - res[i] = cast(acc[i]); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_latency.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_latency.h deleted file mode 100644 index 02802c4..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_latency.h +++ /dev/null @@ -1,72 +0,0 @@ -#ifndef NNET_DENSE_LATENCY_H_ -#define NNET_DENSE_LATENCY_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_helpers.h" -#include "nnet_mult.h" -#include - -namespace nnet { - -template -void dense_latency(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - data_T cache; - typename CONFIG_T::accum_t mult[CONFIG_T::n_in * CONFIG_T::n_out]; - typename CONFIG_T::accum_t acc[CONFIG_T::n_out]; - - // Use a function_instantiate in case it helps to explicitly optimize unchanging weights/biases - #pragma HLS function_instantiate variable=weights,biases - - // For parallel inputs: - // - completely partition arrays -- target fabric - // - if we have an unroll factor, limit number of multipliers - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - // #pragma HLS ARRAY_PARTITION variable=weights complete // remove this line for now, it breaks compression sometimes - #pragma HLS ARRAY_PARTITION variable=biases complete - #pragma HLS ARRAY_PARTITION variable=mult complete - #pragma HLS ARRAY_PARTITION variable=acc complete - - #pragma HLS ALLOCATION operation instances=mul limit=CONFIG_T::multiplier_limit - -// Do the matrix-multiply -Product1: - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - cache = data[ii]; - Product2: - for (int jj = 0; jj < CONFIG_T::n_out; jj++) { - int index = ii * CONFIG_T::n_out + jj; - mult[index] = CONFIG_T::template product::product(cache, weights[index]); - } - } - -// Initialize accumulator with input biases -ResetAccum: - for (int iacc = 0; iacc < CONFIG_T::n_out; iacc++) { - acc[iacc] = (typename CONFIG_T::accum_t)biases[iacc]; - } - -// Accumulate multiplication result -Accum1: - for (int ii = 0; ii < CONFIG_T::n_in; ii++) { - Accum2: - for (int jj = 0; jj < CONFIG_T::n_out; jj++) { - int index = ii * CONFIG_T::n_out + jj; - acc[jj] += mult[index]; - } - } - -// Cast to "res_t" type -Result: - for (int ires = 0; ires < CONFIG_T::n_out; ires++) { - // res[ires] = (res_T) (acc[ires]); - res[ires] = cast(acc[ires]); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_resource.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_resource.h deleted file mode 100644 index 88de947..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_resource.h +++ /dev/null @@ -1,263 +0,0 @@ -#ifndef NNET_DENSE_RESOURCE_H_ -#define NNET_DENSE_RESOURCE_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_mult.h" -#include -#include - -namespace nnet { - -template -void dense_resource_rf_leq_nin(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - - const int rufactor = CONFIG_T::reuse_factor; - const int multfactor = MIN(CONFIG_T::n_in, CONFIG_T::reuse_factor); - const int multiplier_limit = DIV_ROUNDUP(CONFIG_T::n_in * CONFIG_T::n_out, multfactor); - const int block_factor = DIV_ROUNDUP(CONFIG_T::n_in * CONFIG_T::n_out, CONFIG_T::reuse_factor); - const int multscale = multiplier_limit / CONFIG_T::n_out; - const int nin = CONFIG_T::n_in; - const int nout = CONFIG_T::n_out; - - assert((multiplier_limit % nout == 0 || rufactor >= nin) && "The current Reuse Factor is not allowed"); - assert((multiplier_limit == block_factor) && "This function is correct only for RF <= N_IN"); - - #pragma HLS function_instantiate variable=weights,biases - //#pragma HLS RESOURCE variable=weights core=RAM_2P_BRAM Commenting out the deisgnation HLS seems to choose correctly - #pragma HLS ARRAY_RESHAPE variable=weights block factor=block_factor - #pragma HLS ARRAY_PARTITION variable=biases complete - - typename CONFIG_T::accum_t acc[CONFIG_T::n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete - -InitAccum: - for (int iacc = 0; iacc < nout; iacc++) { - #pragma HLS UNROLL - acc[iacc] = (typename CONFIG_T::accum_t)biases[iacc]; - } - -ReuseLoop: - for (int ir = 0; ir < rufactor; ir++) { - #pragma HLS PIPELINE II=1 rewind - - int w_index = ir; - int in_index = ir; - int out_index = 0; - int acc_step = 0; - - MultLoop: - for (int im = 0; im < block_factor; im++) { - #pragma HLS UNROLL - - acc[out_index] += static_cast( - CONFIG_T::template product::product(data[in_index], weights[w_index])); - - // Increment w_index - w_index += rufactor; - // Increment in_index - in_index += rufactor; - if (in_index >= nin) { - in_index = ir; - } - // Increment out_index - if (acc_step + 1 >= multscale) { - acc_step = 0; - out_index++; - } else { - acc_step++; - } - } - } - -// Cast to "res_t" type -Result: - for (int ires = 0; ires < CONFIG_T::n_out; ires++) { - #pragma HLS UNROLL - res[ires] = cast(acc[ires]); - } -} - -template -void dense_resource_rf_gt_nin_rem0(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - - const int rufactor = MIN(CONFIG_T::reuse_factor, CONFIG_T::n_in * CONFIG_T::n_out); - const int multfactor = MIN(CONFIG_T::n_in, CONFIG_T::reuse_factor); - const int multiplier_limit = DIV_ROUNDUP(CONFIG_T::n_in * CONFIG_T::n_out, multfactor); - const int block_factor = DIV_ROUNDUP(CONFIG_T::n_in * CONFIG_T::n_out, CONFIG_T::reuse_factor); - const int multscale = multiplier_limit / CONFIG_T::n_out; - const int nin = CONFIG_T::n_in; - const int nout = CONFIG_T::n_out; - - assert((multiplier_limit % nout == 0 || rufactor >= nin) && "The current Reuse Factor is not allowed"); - assert((rufactor > nin && rufactor % nin == 0) && "This function is correct only for RF > N_IN && RF % N_IN == 0"); - - #pragma HLS function_instantiate variable=weights,biases - //#pragma HLS RESOURCE variable=weights core=RAM_2P_BRAM Commenting out the deisgnation HLS seems to choose correctly - #pragma HLS ARRAY_RESHAPE variable=weights block factor=block_factor - #pragma HLS ARRAY_PARTITION variable=biases complete - - typename CONFIG_T::accum_t acc[CONFIG_T::n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete - -InitAccum: - for (int iacc = 0; iacc < nout; iacc++) { - #pragma HLS UNROLL - acc[iacc] = (typename CONFIG_T::accum_t)biases[iacc]; - } - - int w_index; - int in_index = 0; - int out_index; - int outstep = 0; - const int outscale = rufactor / nin; - - int outidx[rufactor]; -IndexLoop: - for (int ir = 0; ir < rufactor; ir++) { - outidx[ir] = outstep; - if ((ir + 1) % nin == 0) { - outstep++; - } - } - -ReuseLoop: - for (int ir = 0; ir < rufactor; ir++) { - #pragma HLS PIPELINE II=1 rewind - - w_index = ir; - out_index = outidx[ir] /*outstep*/; - - MultLoop: - for (int im = 0; im < block_factor; im++) { - #pragma HLS UNROLL - acc[out_index] += static_cast( - CONFIG_T::template product::product(data[in_index], weights[w_index])); - - w_index += rufactor; - if (w_index >= CONFIG_T::n_in * CONFIG_T::n_out) - break; // check out of bounds - out_index += outscale; - } - - in_index++; - if (in_index >= nin) { - in_index = 0; - // outstep++; // This causes a huge increase in scheduling and RTL generation times, hence the above workaround. - } - } - -// Cast to "res_t" type -Result: - for (int ires = 0; ires < CONFIG_T::n_out; ires++) { - #pragma HLS UNROLL - res[ires] = cast(acc[ires]); - } -} - -template -void dense_resource_rf_gt_nin(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - - const int rufactor = CONFIG_T::reuse_factor; - const int multfactor = MIN(CONFIG_T::n_in, CONFIG_T::reuse_factor); - const int multiplier_limit = DIV_ROUNDUP(CONFIG_T::n_in * CONFIG_T::n_out, multfactor); - const int block_factor = DIV_ROUNDUP(CONFIG_T::n_in * CONFIG_T::n_out, CONFIG_T::reuse_factor); - const int multscale = multiplier_limit / CONFIG_T::n_out; - const int nin = CONFIG_T::n_in; - const int nout = CONFIG_T::n_out; - - assert((multiplier_limit % nout == 0 || rufactor >= nin) && "The current Reuse Factor is not allowed"); - assert((rufactor > nin) && "This function is correct only for RF > N_IN"); - - #pragma HLS function_instantiate variable=weights,biases - //#pragma HLS RESOURCE variable=weights core=RAM_2P_BRAM Commenting out the deisgnation HLS seems to choose correctly - #pragma HLS ARRAY_RESHAPE variable=weights block factor=block_factor - #pragma HLS ARRAY_PARTITION variable=biases complete - - typename CONFIG_T::accum_t acc[CONFIG_T::n_out]; - #pragma HLS ARRAY_PARTITION variable=acc complete - -InitAccum: - for (int iacc = 0; iacc < nout; iacc++) { - #pragma HLS UNROLL - acc[iacc] = (typename CONFIG_T::accum_t)biases[iacc]; - } - -ReuseLoop: - for (int ir = 0; ir < rufactor; ir++) { - #pragma HLS PIPELINE II=1 rewind - typename CONFIG_T::accum_t tmpmult[block_factor]; - #pragma HLS ARRAY_PARTITION variable=tmpmult complete - - MultLoop: - for (int im = 0; im < block_factor; im++) { - #pragma HLS UNROLL - int w_index = ir + rufactor * im; - int in_index = w_index % nin; - if (w_index >= CONFIG_T::n_in * CONFIG_T::n_out) - continue; // check out of bounds - tmpmult[im] = - CONFIG_T::template product::product(data[in_index], weights[w_index]); - } - - typename CONFIG_T::accum_t mult[multiplier_limit]; - #pragma HLS ARRAY_PARTITION variable=mult complete - - ResetMult: - for (int imult = 0; imult < multiplier_limit; imult++) { - #pragma HLS UNROLL - mult[imult] = 0; - } - - AccumLoop1: - for (int im = 0; im < block_factor; im++) { - #pragma HLS UNROLL - int w_index = ir + rufactor * im; - int out_index = w_index / multfactor; - if (out_index >= multiplier_limit) - continue; // check out of bounds - mult[out_index] += tmpmult[im]; - } - - AccumLoop2: - for (int im = 0; im < multiplier_limit; im++) { - #pragma HLS UNROLL - // int out_index = im/multscale; // This is the general case - // acc[out_index] += mult[im]; - acc[im] += mult[im]; // If RF > N_IN then multiplier_limit == n_out - } - } - -// Cast to "res_t" type -Result: - for (int ires = 0; ires < CONFIG_T::n_out; ires++) { - #pragma HLS UNROLL - res[ires] = cast(acc[ires]); - } -} - -template -void dense_resource(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - - #pragma HLS INLINE recursive - - if (CONFIG_T::reuse_factor <= CONFIG_T::n_in) { - dense_resource_rf_leq_nin(data, res, weights, biases); - } else if (CONFIG_T::reuse_factor % CONFIG_T::n_in == 0) { - dense_resource_rf_gt_nin_rem0(data, res, weights, biases); - } else { - dense_resource_rf_gt_nin(data, res, weights, biases); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_stream.h deleted file mode 100644 index ad3a972..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_dense_stream.h +++ /dev/null @@ -1,68 +0,0 @@ -#ifndef NNET_DENSE_STREAM_H_ -#define NNET_DENSE_STREAM_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_types.h" -#include -#include - -namespace nnet { - -template -void dense_wrapper(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_out], - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - #pragma HLS INLINE recursive - if (CONFIG_T::strategy == nnet::latency) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - dense_latency(data, res, weights, biases); - } else { - dense_resource(data, res, weights, biases); - } -} - -template -void dense(hls::stream &data_stream, hls::stream &res_stream, - typename CONFIG_T::weight_t weights[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::bias_t biases[CONFIG_T::n_out]) { - typename data_T::value_type data[CONFIG_T::n_in]; - #pragma HLS ARRAY_PARTITION variable=data complete - - typename res_T::value_type res[CONFIG_T::n_out]; - #pragma HLS ARRAY_PARTITION variable=res complete - -DataPrepare: - for (int i_in = 0; i_in < CONFIG_T::n_in / data_T::size; i_in++) { - if (CONFIG_T::n_in / data_T::size > 1) { - #pragma HLS PIPELINE - } - data_T data_pack = data_stream.read(); - DataPack: - for (int i_pack = 0; i_pack < data_T::size; i_pack++) { - #pragma HLS UNROLL - data[i_in * data_T::size + i_pack] = data_pack[i_pack]; - } - } - - dense_wrapper(data, res, weights, biases); - -ResWrite: - for (unsigned i_out = 0; i_out < CONFIG_T::n_out / res_T::size; i_out++) { - if (CONFIG_T::n_out / res_T::size > 1) { - #pragma HLS PIPELINE - } - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - ResPack: - for (int i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = res[i_out * res_T::size + i_pack]; - } - res_stream.write(res_pack); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_embed.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_embed.h deleted file mode 100644 index dfc77af..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_embed.h +++ /dev/null @@ -1,45 +0,0 @@ -#ifndef NNET_EMBED_H_ -#define NNET_EMBED_H_ - -#include "nnet_common.h" -#include "nnet_helpers.h" - -namespace nnet { - -struct embed_config { - // Internal data type definitions - typedef float embeddings_t; - - // Layer Sizes - static const unsigned n_in = 10; - static const unsigned n_out = 16; - static const unsigned vocab_size = 50; - - // Resource reuse info - static const unsigned io_type = io_parallel; - static const unsigned reuse_factor = 1; -}; - -template -void embedding(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in * CONFIG_T::n_out], - typename CONFIG_T::embeddings_t embeddings[CONFIG_T::vocab_size * CONFIG_T::n_out]) { - - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - // This can save a few cycles, but it will create a large multiplexer due to - // non-constant access pattern, so let's leave it out - //#pragma HLS ARRAY_PARTITION variable=embeddings complete - -InputSequence: - for (int j = 0; j < CONFIG_T::n_in; j++) { - #pragma HLS UNROLL - DenseEmbedding: - for (int i = 0; i < CONFIG_T::n_out; i++) { - #pragma HLS UNROLL - res[j * CONFIG_T::n_out + i] = embeddings[data[j] * CONFIG_T::n_out + i]; - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_embed_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_embed_stream.h deleted file mode 100644 index 79ae9bc..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_embed_stream.h +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef NNET_EMBED_STREAM_H_ -#define NNET_EMBED_STREAM_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_helpers.h" - -namespace nnet { - -template -void embedding(hls::stream &data, hls::stream &res, - typename CONFIG_T::embeddings_t embeddings[CONFIG_T::vocab_size * CONFIG_T::n_out]) { - data_T in_data = data.read(); - -InputSequence: - for (int j = 0; j < data_T::size; j++) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - - DenseEmbedding: - for (int i = 0; i < CONFIG_T::n_out; i++) { - #pragma HLS UNROLL - res_pack[i] = embeddings[in_data[j] * CONFIG_T::n_out + i]; - } - res.write(res_pack); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_garnet.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_garnet.h deleted file mode 100644 index 1fcd554..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_garnet.h +++ /dev/null @@ -1,816 +0,0 @@ -#ifndef NNET_GARNET_H_ -#define NNET_GARNET_H_ - -#include "hls_math.h" -#include "hls_stream.h" -#include "nnet_common.h" - -namespace nnet { -namespace garnet_utils { - -template -inline typename std::enable_if::value>::type -initialize_edge_weights_table(typename CONFIG_T::edge_weight_t edge_weights_table[]) { - typedef ap_uint index_t; - - unsigned const table_size = (1 << CONFIG_T::distance_width); - - index_t index; - typename CONFIG_T::distance_t distance; - - // edge_weight_t is ap_ufixed with 0 iwidth -> let index 0 be a saturated version of 1 - edge_weights_table[0] = ap_ufixed(1.); - - for (unsigned iw = 1; iw < table_size; ++iw) { - index = iw; - distance.range(CONFIG_T::distance_width - 1, 0) = index.range(CONFIG_T::distance_width - 1, 0); - edge_weights_table[iw] = hls::exp(-distance * distance); - } -} - -template -inline typename std::enable_if::value>::type -initialize_edge_weights_table(typename CONFIG_T::edge_weight_t edge_weights_table[]) { - unsigned const table_size = (1 << CONFIG_T::distance_width); - double const step = 64. / table_size; - - typename CONFIG_T::distance_t v = -32.; - for (unsigned iw = 0; iw < table_size; ++iw) { - edge_weights_table[iw] = std::exp(-v * v); - v += step; - } -} - -template -inline typename std::enable_if::value, typename CONFIG_T::edge_weight_t>::type -get_edge_weight(typename CONFIG_T::distance_t distance, typename CONFIG_T::edge_weight_t edge_weights_table[]) { - typedef ap_uint index_t; - - index_t index(distance.range(CONFIG_T::distance_width - 1, 0)); - - return edge_weights_table[index]; -} - -template -inline - typename std::enable_if::value, typename CONFIG_T::edge_weight_t>::type - get_edge_weight(typename CONFIG_T::distance_t distance, typename CONFIG_T::edge_weight_t edge_weights_table[]) { - unsigned const table_size = (1 << CONFIG_T::distance_width); - double const step = 64. / table_size; - - int index = (distance + 32.) / step; - if (index < 0) - index = 0; - else if (index >= table_size) - index = table_size - 1; - - return edge_weights_table[index]; -} - -template typename CONFIG_T::edge_weight_t compute_edge_weight(typename CONFIG_T::distance_t distance) { - if (CONFIG_T::is_stack) { - #pragma HLS INLINE OFF - } -#ifdef __SYNTHESIS__ - typename CONFIG_T::edge_weight_t edge_weights_table[1 << CONFIG_T::distance_width]; - // unsigned const reshape_factor = CONFIG_T::n_aggregators * CONFIG_T::n_in_features * (CONFIG_T::n_vertices / - // CONFIG_T::reuse_factor); - // #pragma HLS ARRAY_RESHAPE variable=edge_weights_table cyclic factor=reshape_factor dim=1 - bool initialized = false; -#else - static typename CONFIG_T::edge_weight_t edge_weights_table[1 << CONFIG_T::distance_width]; - static bool initialized = false; -#endif - if (not initialized) { - initialize_edge_weights_table(edge_weights_table); - initialized = true; - } - - return get_edge_weight(distance, edge_weights_table); -} - -template -inline typename std::enable_if::value, dividend_T>::type normalize_log2(dividend_T dividend, - exponent_T exponent) { - #pragma HLS INLINE - return dividend >> exponent; -} - -template -inline typename std::enable_if::value, dividend_T>::type normalize_log2(dividend_T dividend, - exponent_T exponent) { - #pragma HLS INLINE - return dividend / std::pow(2., exponent); -} - -template struct Means { - typedef E edge_weight_t; - - edge_weight_t edge_weight_mean[CONFIG_T::n_aggregators]; - typename CONFIG_T::aggr_t weighted_feature_mean[CONFIG_T::n_aggregators * CONFIG_T::n_in_features]; - - Means() { - #pragma HLS INLINE - #pragma HLS ARRAY_PARTITION variable=edge_weight_mean complete - #pragma HLS ARRAY_PARTITION variable=weighted_feature_mean complete - #pragma HLS UNROLL region - - Aggregators: - for (unsigned ia = 0; ia < CONFIG_T::n_aggregators; ++ia) { - edge_weight_mean[ia] = 0.; - - InFeatures: - for (unsigned ix = 0; ix < CONFIG_T::n_in_features; ++ix) { - unsigned const iax = ia * CONFIG_T::n_in_features + ix; - weighted_feature_mean[iax] = 0.; - } - } - } - - void set_weight(unsigned, edge_weight_t const &) { - #pragma HLS INLINE - } - - void add_means_normalized(Means const &local) { - #pragma HLS INLINE - // Always called within a pipelined region - no UNROLL needed - - unsigned const log2_unroll_factor = CONFIG_T::n_vertices_width - CONFIG_T::log2_reuse_factor; - - Aggregators: - for (unsigned ia = 0; ia < CONFIG_T::n_aggregators; ++ia) { - edge_weight_mean[ia] += normalize_log2(local.edge_weight_mean[ia], log2_unroll_factor); - - InFeatures: - for (unsigned ix = 0; ix < CONFIG_T::n_in_features; ++ix) { - unsigned const iax = ia * CONFIG_T::n_in_features + ix; - weighted_feature_mean[iax] += normalize_log2(local.weighted_feature_mean[iax], log2_unroll_factor); - } - } - } - - template - typename std::enable_if::type set_means_normalized(nvtx_T const nvtx, arrays_T const &accum) { - #pragma HLS INLINE - #pragma HLS UNROLL region - - // accum comes divided by unroll factor - typename T::norm_t nvtx_norm = (T::n_vertices / T::reuse_factor) / nvtx; - - Aggregators: - for (unsigned ia = 0; ia < T::n_aggregators; ++ia) { - edge_weight_mean[ia] = accum.edge_weight_mean[ia] * nvtx_norm; - - InFeatures: - for (unsigned ix = 0; ix < T::n_in_features; ++ix) { - unsigned const iax = ia * T::n_in_features + ix; - - weighted_feature_mean[iax] = accum.weighted_feature_mean[iax] * nvtx_norm; - } - } - } - - template - typename std::enable_if::type set_means_normalized(nvtx_T const nvtx, arrays_T const &accum) { - #pragma HLS INLINE - #pragma HLS UNROLL region - - Aggregators: - for (unsigned ia = 0; ia < T::n_aggregators; ++ia) { - - edge_weight_mean[ia] = normalize_log2(accum.edge_weight_mean[ia], T::log2_reuse_factor); - - InFeatures: - for (unsigned ix = 0; ix < T::n_in_features; ++ix) { - unsigned const iax = ia * T::n_in_features + ix; - - weighted_feature_mean[iax] = normalize_log2(accum.weighted_feature_mean[iax], T::log2_reuse_factor); - } - } - } -}; - -template struct WeightsAndMeans : public Means { - typedef E edge_weight_t; - - edge_weight_t edge_weights[CONFIG_T::n_vertices * CONFIG_T::n_aggregators]; - - WeightsAndMeans() : Means() { - #pragma HLS INLINE - unsigned const reshape_factor = CONFIG_T::n_aggregators * (CONFIG_T::n_vertices / CONFIG_T::reuse_factor); - #pragma HLS ARRAY_PARTITION variable=edge_weights cyclic factor=reshape_factor - } - - void set_weight(unsigned iva, edge_weight_t const &weight) { - #pragma HLS INLINE - edge_weights[iva] = weight; - } -}; - -template struct OutputBiasNormalizer; - -template -struct OutputBiasNormalizer::type> { - typedef typename CONFIG_T::output_transform_biases_t biases_t; - - biases_t const (&output_biases)[CONFIG_T::n_out_features]; - - OutputBiasNormalizer(nvtx_T const) : output_biases{CONFIG_T::output_transform_biases} { - #pragma HLS INLINE - } -}; - -template -struct OutputBiasNormalizer::type> { - typedef typename CONFIG_T::output_transform_biases_t biases_t; - - biases_t output_biases[CONFIG_T::n_out_features]; - - OutputBiasNormalizer(nvtx_T const nvtx) { - #pragma HLS ARRAY_PARTITION variable=output_biases complete - #pragma HLS UNROLL region - - // Cannot add a loop label here due to a Vivado HLS bug, apparently - for (unsigned io = 0; io < CONFIG_T::n_out_features; ++io) { - typename CONFIG_T::aggr_t bias = CONFIG_T::output_transform_biases[io]; - bias *= nvtx; - output_biases[io] = normalize_log2(bias, CONFIG_T::n_vertices_width); - } - } -}; - -template struct InputDataGetter { - typedef data_T data_t; - - data_T const *dataref; - - InputDataGetter(data_T const *d) : dataref{d} { - #pragma HLS INLINE - } - data_T const &get(unsigned iv, unsigned ix) const { - #pragma HLS INLINE - unsigned const ivx = iv * CONFIG_T::n_in_features + ix; - return dataref[ivx]; - } -}; - -template struct SingleVertexDataGetter { - typedef data_T data_t; - - data_T const (&dataref)[CONFIG_T::n_in_features]; - - SingleVertexDataGetter(data_T const (&d)[CONFIG_T::n_in_features]) : dataref{d} { - #pragma HLS INLINE - } - data_T const &get(unsigned, unsigned ix) const { - #pragma HLS INLINE - return dataref[ix]; - } -}; - -template struct OutputResSetter { - typedef res_T res_t; - - res_T *resref; - - OutputResSetter(res_T *r) : resref{r} { - #pragma HLS INLINE - } - void set(unsigned iv, unsigned io, res_T const &acc) { - #pragma HLS INLINE - unsigned const ivo = iv * CONFIG_T::n_out_features + io; - resref[ivo] = acc; - } -}; - -template struct SingleVertexResSetter { - typedef res_T res_t; - - res_T (&resref)[CONFIG_T::n_out_features]; - - SingleVertexResSetter(res_T (&r)[CONFIG_T::n_out_features]) : resref{r} { - #pragma HLS INLINE - } - void set(unsigned, unsigned io, res_T const &acc) { - #pragma HLS INLINE - resref[io] = acc; - } -}; - -template -inline void compute_weights_aggregates(data_getter_T const &data_getter, unsigned iv, arrays_local_T &arrays_local, - arrays_T &arrays) { - #pragma HLS INLINE - -Aggregators: - for (unsigned ia = 0; ia < CONFIG_T::n_aggregators; ++ia) { - typename CONFIG_T::distance_t distance = CONFIG_T::aggregator_distance_biases[ia]; - - InFeatures1: - for (unsigned ix = 0; ix < CONFIG_T::n_in_features; ++ix) { - unsigned const iax = ia * CONFIG_T::n_in_features + ix; - - typename CONFIG_T::distance_t incr = data_getter.get(iv, ix) * CONFIG_T::aggregator_distance_weights[iax]; - - distance += incr; - } - - typename CONFIG_T::edge_weight_t edge_weight = - garnet_utils::compute_edge_weight(distance); - - arrays_local.edge_weight_mean[ia] += edge_weight; - - InFeatures2: - for (unsigned ix = 0; ix < CONFIG_T::n_in_features; ++ix) { - unsigned const iax = ia * CONFIG_T::n_in_features + ix; - - typename data_getter_T::data_t incr = data_getter.get(iv, ix) * edge_weight; - - arrays_local.weighted_feature_mean[iax] += incr; - } - - unsigned const iva = iv * CONFIG_T::n_aggregators + ia; - arrays.set_weight(iva, edge_weight); - } -} - -template -inline typename CONFIG_T::aggr_t compute_output_base_core(arrays_T const &arrays, unsigned io, unsigned ia) { - #pragma HLS INLINE - #pragma HLS UNROLL region - - unsigned const ioa = io * CONFIG_T::n_aggregators + ia; - typename CONFIG_T::aggr_t aggr = arrays.edge_weight_mean[ia] * CONFIG_T::input_transform_biases[ioa]; - -InFeatures: - for (unsigned ix = 0; ix < CONFIG_T::n_in_features; ++ix) { - unsigned const ioax = ioa * CONFIG_T::n_in_features + ix; - unsigned const iax = ia * CONFIG_T::n_in_features + ix; - - aggr += arrays.weighted_feature_mean[iax] * CONFIG_T::input_transform_weights[ioax]; - } - - return aggr; -} - -template -inline void compute_output_base(arrays_T const &arrays, - typename CONFIG_T::aggr_t output_base[CONFIG_T::n_out_features * CONFIG_T::n_aggregators]) { - #pragma HLS INLINE - #pragma HLS UNROLL region - -OutFeatures: - for (unsigned io = 0; io < CONFIG_T::n_out_features; ++io) { - Aggregators: - for (unsigned ia = 0; ia < CONFIG_T::n_aggregators; ++ia) { - unsigned const ioa = io * CONFIG_T::n_aggregators + ia; - - output_base[ioa] = compute_output_base_core(arrays, io, ia); - } - } -} - -template -inline void -compute_vertex_output(arrays_T const &arrays, unsigned iv, - typename CONFIG_T::aggr_t const output_base[CONFIG_T::n_out_features * CONFIG_T::n_aggregators], - res_setter_T &res_setter) { - #pragma HLS INLINE - - typename arrays_T::edge_weight_t edge_weights[CONFIG_T::n_aggregators]; - #pragma HLS ARRAY_PARTITION variable=edge_weights complete - -Aggregators1: - for (unsigned ia = 0; ia < CONFIG_T::n_aggregators; ++ia) { - unsigned const iva = iv * CONFIG_T::n_aggregators + ia; - - edge_weights[ia] = arrays.edge_weights[iva]; - } - -OutFeatures: - for (unsigned io = 0; io < CONFIG_T::n_out_features; ++io) { - typename res_setter_T::res_t acc = CONFIG_T::output_transform_biases[io]; - - Aggregators2: - for (unsigned ia = 0; ia < CONFIG_T::n_aggregators; ++ia) { - unsigned const ioa = io * CONFIG_T::n_aggregators + ia; - - typename res_setter_T::res_t incr = edge_weights[ia] * output_base[ioa]; - acc += incr; - } - - res_setter.set(iv, io, acc); - } -} - -template -void aggregate(data_T const data[CONFIG_T::n_vertices * CONFIG_T::n_in_features], nvtx_T const nvtx, arrays_T &arrays) { - InputDataGetter data_getter(data); - - unsigned const unroll_factor = CONFIG_T::n_vertices >> CONFIG_T::log2_reuse_factor; - - Means means_accum; - -VerticesOuter: - for (unsigned ivv = 0; ivv < CONFIG_T::reuse_factor; ++ivv) { - #pragma HLS PIPELINE - - if (ivv * unroll_factor >= nvtx) - break; - - Means means_local; - - VerticesInner: - for (unsigned ir = 0; ir < unroll_factor; ++ir) { - unsigned iv = ivv * unroll_factor + ir; - - if (iv == nvtx) - break; - - compute_weights_aggregates(data_getter, iv, means_local, arrays); - } - - means_accum.add_means_normalized(means_local); - } - - arrays.set_means_normalized(nvtx, means_accum); -} - -template -void distribute(nvtx_T const nvtx, arrays_T const &arrays, res_T res[CONFIG_T::n_vertices * CONFIG_T::n_out_features]) { - OutputResSetter res_setter(res); - - typename CONFIG_T::aggr_t output_base[CONFIG_T::n_out_features * CONFIG_T::n_aggregators]; - #pragma HLS ARRAY_PARTITION variable=output_base complete - - compute_output_base(arrays, output_base); - - unsigned const unroll_factor = CONFIG_T::n_vertices >> CONFIG_T::log2_reuse_factor; - -VerticesOuter: - for (unsigned ivv = 0; ivv < CONFIG_T::reuse_factor; ++ivv) { - #pragma HLS PIPELINE - - if (ivv * unroll_factor >= nvtx) - break; - - VerticesInner: - for (unsigned ir = 0; ir < unroll_factor; ++ir) { - unsigned iv = ivv * unroll_factor + ir; - - if (iv == nvtx) - break; - - compute_vertex_output(arrays, iv, output_base, res_setter); - } - } -} - -template -void set_output(output_biases_T const &output_transform_biases, arrays_T const &arrays, - res_T res[CONFIG_T::n_out_features]) { - #pragma HLS PIPELINE - -OutFeatures: - for (unsigned io = 0; io < CONFIG_T::n_out_features; ++io) { - res_T acc = output_transform_biases.output_biases[io]; - - Aggregators: - for (unsigned ia = 0; ia < CONFIG_T::n_aggregators; ++ia) { - typename CONFIG_T::aggr_t aggr = compute_output_base_core(arrays, io, ia); - - acc += arrays.edge_weight_mean[ia] * aggr; - } - - res[io] = acc; - } -} - -template -void distribute_aggregate(nvtx_T const nvtx, prev_arrays_T const &prev_arrays, current_arrays_T ¤t_arrays) { - typedef typename prev_layer_t::output_t data_T; - - typename prev_layer_t::aggr_t prev_output_base[prev_layer_t::n_out_features * prev_layer_t::n_aggregators]; - #pragma HLS ARRAY_PARTITION variable=prev_output_base complete - - compute_output_base(prev_arrays, prev_output_base); - - unsigned const unroll_factor = current_layer_t::n_vertices >> current_layer_t::log2_reuse_factor; - - Means means_accum; - -VerticesOuter: - for (unsigned ivv = 0; ivv < current_layer_t::reuse_factor; ++ivv) { - #pragma HLS PIPELINE - - if (ivv * unroll_factor >= nvtx) - break; - - Means means_local; - - VerticesInner: - for (unsigned ir = 0; ir < unroll_factor; ++ir) { - unsigned iv = ivv * unroll_factor + ir; - - if (iv == nvtx) - break; - - data_T data[prev_layer_t::n_out_features]; - #pragma HLS ARRAY_PARTITION variable=data complete - - SingleVertexResSetter res_setter(data); - - compute_vertex_output(prev_arrays, iv, prev_output_base, res_setter); - - SingleVertexDataGetter data_getter(data); - - compute_weights_aggregates(data_getter, iv, means_local, current_arrays); - } - - means_accum.add_means_normalized(means_local); - } - - current_arrays.set_means_normalized(nvtx, means_accum); -} - -template -inline typename std::enable_if::value>::type -sublayer(nvtx_T const nvtx, prev_arrays_T const &prev_arrays, last_arrays_T &last_arrays) { - #pragma HLS INLINE - - distribute_aggregate(nvtx, prev_arrays, last_arrays); -} - -template -inline typename std::enable_if::value>::type -sublayer(nvtx_T const nvtx, prev_arrays_T const &prev_arrays, last_arrays_T &last_arrays) { - #pragma HLS INLINE - - WeightsAndMeans current_arrays; - - distribute_aggregate(nvtx, prev_arrays, current_arrays); - - sublayer(nvtx, current_arrays, last_arrays); -} -} // namespace garnet_utils - -struct garnet_config { - // Layer specs - static const unsigned n_vertices_width = 8; - static const unsigned n_vertices = (1 << n_vertices_width); - static const unsigned n_in_features = 4; - static const unsigned n_propagate = 4; - static const unsigned n_aggregators = 4; - static const unsigned n_out_features = 4; - static const unsigned distance_width = 12; - - // Internal data type definitions - typedef float input_transform_weights_t; - typedef float input_transform_biases_t; - typedef float output_transform_weights_t; - typedef float output_transform_biases_t; - typedef float aggregator_distance_weights_t; - typedef float aggregator_distance_biases_t; - - typedef float norm_t; - typedef float distance_t; - typedef float edge_weight_t; - typedef float edge_weight_aggr_t; - typedef float aggr_t; - typedef float output_t; - - /* static const input_transform_weights_t (&input_transform_weights)[n_out_features * n_aggregators * n_in_features]; */ - /* static const input_transform_biases_t (&input_transform_biases)[n_out_features * n_aggregators]; */ - /* static const aggregator_distance_weights_t (&aggregator_distance_weights)[n_aggregators * n_in_features]; */ - /* static const aggregator_distance_biases_t (&aggregator_distance_biases)[n_aggregators]; */ - /* static const output_transform_biases_t (&output_transform_biases)[n_out_features]; */ - - enum OutputCollapse { no_collapse, collapse_mean, collapse_max }; - - static const unsigned output_collapse = no_collapse; - - static const bool mean_by_nvert = false; - static const bool is_stack = false; - - // Optimization specs - static const unsigned reuse_factor = 64; - static const unsigned log2_reuse_factor = 6; -}; - -// vertices -> vertices -template -typename std::enable_if::type -garnet(data_T const data[CONFIG_T::n_vertices * CONFIG_T::n_in_features], nvtx_T const nvtx[1], - res_T res[CONFIG_T::n_vertices * CONFIG_T::n_out_features]) { - #pragma HLS DATAFLOW - - garnet_utils::WeightsAndMeans arrays; - - garnet_utils::aggregate(data, nvtx[0], arrays); - - garnet_utils::distribute(nvtx[0], arrays, res); -} - -// vertices -> out features -template -typename std::enable_if::type -garnet(data_T const data[CONFIG_T::n_vertices * CONFIG_T::n_in_features], nvtx_T const nvtx[1], - res_T res[CONFIG_T::n_out_features]) { - #pragma HLS DATAFLOW - - garnet_utils::Means arrays; - - garnet_utils::aggregate(data, nvtx[0], arrays); - - garnet_utils::OutputBiasNormalizer normalize_bias(nvtx[0]); - - garnet_utils::set_output(normalize_bias, arrays, res); -} - -// vertices -> vertices -template -typename std::enable_if::type -garnet_stack(data_T const data[CONFIG_T::n_vertices * CONFIG_T::n_in_features], nvtx_T const nvtx[1], - res_T res[CONFIG_T::n_vertices * CONFIG_T::n_out_features]) { - #pragma HLS DATAFLOW - - typedef typename CONFIG_T::template sublayer_t<0> first_layer_t; - unsigned const ilast = CONFIG_T::n_sublayers - 1; - typedef typename CONFIG_T::template sublayer_t last_layer_t; - - garnet_utils::WeightsAndMeans arrays_first; - garnet_utils::Means arrays_last; - - garnet_utils::aggregate(data, nvtx[0], arrays_first); - - garnet_utils::sublayer(nvtx[0], arrays_first, - arrays_last); - - garnet_utils::distribute(nvtx[0], arrays_last, res); -} - -// vertices -> out features -template -typename std::enable_if::type -garnet_stack(data_T const data[CONFIG_T::n_vertices * CONFIG_T::n_in_features], nvtx_T const nvtx[1], - res_T res[CONFIG_T::n_out_features]) { - #pragma HLS DATAFLOW - - typedef typename CONFIG_T::template sublayer_t<0> first_layer_t; - unsigned const ilast = CONFIG_T::n_sublayers - 1; - typedef typename CONFIG_T::template sublayer_t last_layer_t; - - garnet_utils::WeightsAndMeans arrays_first; - garnet_utils::Means arrays_last; - - garnet_utils::aggregate(data, nvtx[0], arrays_first); - - garnet_utils::sublayer(nvtx[0], arrays_first, - arrays_last); - - garnet_utils::OutputBiasNormalizer normalize_bias(nvtx[0]); - - garnet_utils::set_output(normalize_bias, arrays_last, res); -} - -/* Reference (dumb) implementation returning (Vertices, Features) */ -template -typename std::enable_if::type -garnet_ref(data_T const data[CONFIG_T::n_vertices * CONFIG_T::n_in_features], nvtx_T const nvtx[1], - res_T res[CONFIG_T::n_vertices * CONFIG_T::n_out_features]) { - typename CONFIG_T::edge_weight_t edge_weights[CONFIG_T::n_vertices * CONFIG_T::n_aggregators]; - typename CONFIG_T::aggr_t propagated_features[CONFIG_T::n_vertices * CONFIG_T::n_propagate]; - - for (unsigned iv = 0; iv < CONFIG_T::n_vertices; ++iv) { - if (iv == nvtx[0]) - break; - - for (unsigned ip = 0; ip < CONFIG_T::n_propagate; ++ip) { - unsigned const ivp = iv * CONFIG_T::n_propagate + ip; - - propagated_features[ivp] = CONFIG_T::input_transform_biases[ip]; - - for (unsigned ix = 0; ix < CONFIG_T::n_in_features; ++ix) { - unsigned const ivx = iv * CONFIG_T::n_in_features + ix; - unsigned const ipx = ip * CONFIG_T::n_in_features + ix; - - propagated_features[ivp] += data[ivx] * CONFIG_T::input_transform_weights[ipx]; - } - } - - for (unsigned ia = 0; ia < CONFIG_T::n_aggregators; ++ia) { - unsigned const iva = iv * CONFIG_T::n_aggregators + ia; - - typename CONFIG_T::aggr_t distance = CONFIG_T::aggregator_distance_biases[ia]; - - for (unsigned ix = 0; ix < CONFIG_T::n_in_features; ++ix) { - unsigned const ivx = iv * CONFIG_T::n_in_features + ix; - unsigned const iax = ia * CONFIG_T::n_in_features + ix; - - distance += data[ivx] * CONFIG_T::aggregator_distance_weights[iax]; - } - - edge_weights[iva] = garnet_utils::compute_edge_weight(distance); - } - } - - typename CONFIG_T::aggr_t aggregated_features[CONFIG_T::n_aggregators * CONFIG_T::n_propagate]; - - for (unsigned ia = 0; ia < CONFIG_T::n_aggregators; ++ia) { - for (unsigned ip = 0; ip < CONFIG_T::n_propagate; ++ip) { - unsigned const iap = ia * CONFIG_T::n_propagate + ip; - - aggregated_features[iap] = 0.; - - for (unsigned iv = 0; iv < CONFIG_T::n_vertices; ++iv) { - if (iv == nvtx[0]) - break; - - unsigned const iva = iv * CONFIG_T::n_aggregators + ia; - unsigned const ivp = iv * CONFIG_T::n_propagate + ip; - - aggregated_features[iap] += edge_weights[iva] * propagated_features[ivp]; - } - } - } - - for (unsigned ia = 0; ia < CONFIG_T::n_aggregators; ++ia) { - for (unsigned ip = 0; ip < CONFIG_T::n_propagate; ++ip) { - unsigned const iap = ia * CONFIG_T::n_propagate + ip; - - if (CONFIG_T::mean_by_nvert) - aggregated_features[iap] /= nvtx[0]; - else { - // Not using right shift in case aggr_t is float or double - aggregated_features[iap] /= CONFIG_T::n_vertices; - } - } - } - - for (unsigned iv = 0; iv < CONFIG_T::n_vertices; ++iv) { - if (iv == nvtx[0]) - break; - - for (unsigned io = 0; io < CONFIG_T::n_out_features; ++io) { - unsigned const ivo = iv * CONFIG_T::n_out_features + io; - - typename CONFIG_T::aggr_t acc = CONFIG_T::output_transform_biases[io]; - - for (unsigned ia = 0; ia < CONFIG_T::n_aggregators; ++ia) { - unsigned const iva = iv * CONFIG_T::n_aggregators + ia; - unsigned const ioa = io * CONFIG_T::n_aggregators + ia; - - typename CONFIG_T::aggr_t aggr = 0.; - - for (unsigned ip = 0; ip < CONFIG_T::n_propagate; ++ip) { - unsigned const iap = ia * CONFIG_T::n_propagate + ip; - unsigned const ioap = ioa * CONFIG_T::n_propagate + ip; - - aggr += CONFIG_T::output_transform_weights[ioap] * aggregated_features[iap]; - } - - acc += edge_weights[iva] * aggr; - } - - res[ivo] = acc; - } - } -} - -/* Reference (dumb) implementation returning (Features) - output averaged over vertices already */ -template -typename std::enable_if::type -garnet_ref(data_T const data[CONFIG_T::n_vertices * CONFIG_T::n_in_features], nvtx_T const nvtx[1], - res_T res[CONFIG_T::n_out_features]) { - typename CONFIG_T::aggr_t vertex_res[CONFIG_T::n_vertices * CONFIG_T::n_out_features]; - - garnet_ref(data, nvtx, vertex_res); - - for (unsigned io = 0; io < CONFIG_T::n_out_features; ++io) { - typename CONFIG_T::aggr_t acc = 0.; - - for (unsigned iv = 0; iv < CONFIG_T::n_vertices; ++iv) { - if (iv == nvtx[0]) - break; - - unsigned const ivo = iv * CONFIG_T::n_out_features + io; - - acc += vertex_res[ivo]; - } - - if (CONFIG_T::mean_by_nvert) - acc /= nvtx[0]; - else { - // Not using right shift in case aggr_t is float or double - acc /= CONFIG_T::n_vertices; - } - - res[io] = acc; - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_helpers.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_helpers.h deleted file mode 100644 index b8c2a48..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_helpers.h +++ /dev/null @@ -1,382 +0,0 @@ -#ifndef NNET_HELPERS_H -#define NNET_HELPERS_H - -#include "hls_stream.h" -#include -#include -#include -#include -#include -#include -#include -#include - -namespace nnet { - -#ifndef __SYNTHESIS__ - -#ifndef WEIGHTS_DIR -#define WEIGHTS_DIR "weights" -#endif - -template void load_weights_from_txt(T *w, const char *fname) { - - std::string full_path = std::string(WEIGHTS_DIR) + "/" + std::string(fname); - std::ifstream infile(full_path.c_str(), std::ios::binary); - - if (infile.fail()) { - std::cerr << "ERROR: file " << std::string(fname) << " does not exist" << std::endl; - exit(1); - } - - std::string line; - if (std::getline(infile, line)) { - std::istringstream iss(line); - std::string token; - - size_t i = 0; - while (std::getline(iss, token, ',')) { - std::istringstream(token) >> w[i]; - i++; - } - - if (SIZE != i) { - std::cerr << "ERROR: Expected " << SIZE << " values"; - std::cerr << " but read only " << i << " values" << std::endl; - } - } -} - -template void load_compressed_weights_from_txt(T *w, const char *fname) { - - std::string full_path = std::string(WEIGHTS_DIR) + "/" + std::string(fname); - std::ifstream infile(full_path.c_str(), std::ios::binary); - - if (infile.fail()) { - std::cerr << "ERROR: file " << std::string(fname) << " does not exist" << std::endl; - exit(1); - } - - std::string line; - if (std::getline(infile, line)) { - std::istringstream iss(line); - std::string token; - std::string extra_chars = "} "; - - size_t i = 0; - while (std::getline(iss, token, '{')) { - if (token.length() == 0) { - continue; - } - for (char c : extra_chars) { - token.erase(std::remove(token.begin(), token.end(), c), token.end()); - } - if (token.back() == ',') { - token.erase(token.end() - 1); - } - - std::replace(token.begin(), token.end(), ',', ' '); - std::istringstream structss(token); - - if (!(structss >> w[i].row_index >> w[i].col_index >> w[i].weight)) { - std::cerr << "ERROR: Unable to parse file " << std::string(fname); - exit(1); - } - i++; - } - - if (SIZE != i) { - std::cerr << "ERROR: Expected " << SIZE << " values"; - std::cerr << " but read only " << i << " values" << std::endl; - } - } -} - -template void load_exponent_weights_from_txt(T *w, const char *fname) { - - std::string full_path = std::string(WEIGHTS_DIR) + "/" + std::string(fname); - std::ifstream infile(full_path.c_str(), std::ios::binary); - - if (infile.fail()) { - std::cerr << "ERROR: file " << std::string(fname) << " does not exist" << std::endl; - exit(1); - } - - std::string line; - if (std::getline(infile, line)) { - std::istringstream iss(line); - std::string token; - std::string extra_chars = "} "; - - size_t i = 0; - while (std::getline(iss, token, '{')) { - if (token.length() == 0) { - continue; - } - for (char c : extra_chars) { - token.erase(std::remove(token.begin(), token.end(), c), token.end()); - } - if (token.back() == ',') { - token.erase(token.end() - 1); - } - - std::replace(token.begin(), token.end(), ',', ' '); - std::istringstream structss(token); - - if (!(structss >> w[i].sign >> w[i].weight)) { - std::cerr << "ERROR: Unable to parse file " << std::string(fname); - exit(1); - } - i++; - } - - if (SIZE != i) { - std::cerr << "ERROR: Expected " << SIZE << " values"; - std::cerr << " but read only " << i << " values" << std::endl; - } - } -} -template void convert_data(srcType *src, dstType *dst) { - for (size_t i = 0; i < SIZE; i++) { - dst[i] = dstType(src[i]); - } -} - -template void convert_data(srcType *src, hls::stream &dst) { - for (size_t i = 0; i < SIZE / dstType::size; i++) { - dstType ctype; - for (size_t j = 0; j < dstType::size; j++) { - ctype[j] = typename dstType::value_type(src[i * dstType::size + j]); - } - dst.write(ctype); - } -} - -template void convert_data(hls::stream &src, dstType *dst) { - for (size_t i = 0; i < SIZE / srcType::size; i++) { - srcType ctype = src.read(); - for (size_t j = 0; j < srcType::size; j++) { - dst[i * srcType::size + j] = dstType(ctype[j]); - } - } -} - -extern bool trace_enabled; -extern std::map *trace_outputs; -extern size_t trace_type_size; - -template void save_output_array(data_T *data, save_T *ptr, size_t layer_size) { - for (int i = 0; i < layer_size; i++) { - ptr[i] = save_T(data[i]); - } -} - -template void save_output_array(hls::stream &data, save_T *ptr, size_t layer_size) { - for (size_t i = 0; i < layer_size / data_T::size; i++) { - data_T ctype = data.read(); - for (size_t j = 0; j < data_T::size; j++) { - ptr[i * data_T::size + j] = save_T(ctype[j]); - } - data.write(ctype); - } -} - -// We don't want to include save_T in this function because it will be inserted into myproject.cpp -// so a workaround with element size is used -template void save_layer_output(data_T *data, const char *layer_name, size_t layer_size) { - if (!trace_enabled) - return; - - if (trace_outputs) { - if (trace_outputs->count(layer_name) > 0) { - if (trace_type_size == 4) { - save_output_array(data, (float *)(*trace_outputs)[layer_name], layer_size); - } else if (trace_type_size == 8) { - save_output_array(data, (double *)(*trace_outputs)[layer_name], layer_size); - } else { - std::cout << "Unknown trace type!" << std::endl; - } - } else { - std::cout << "Layer name: " << layer_name << " not found in debug storage!" << std::endl; - } - } else { - std::ostringstream filename; - filename << "./tb_data/" << layer_name << "_output.log"; // TODO if run as a shared lib, path should be ../tb_data - std::fstream out; - out.open(filename.str(), std::ios::app); - assert(out.is_open()); - for (int i = 0; i < layer_size; i++) { - out << float(data[i]) << " "; // We don't care about precision in text files - } - out << std::endl; - out.close(); - } -} - -template void save_layer_output(hls::stream &data, const char *layer_name, size_t layer_size) { - if (!trace_enabled) - return; - - if (trace_outputs) { - if (trace_outputs->count(layer_name) > 0) { - if (trace_type_size == 4) { - save_output_array(data, (float *)(*trace_outputs)[layer_name], layer_size); - } else if (trace_type_size == 8) { - save_output_array(data, (double *)(*trace_outputs)[layer_name], layer_size); - } else { - std::cout << "Unknown trace type!" << std::endl; - } - } else { - std::cout << "Layer name: " << layer_name << " not found in debug storage!" << std::endl; - } - } else { - std::ostringstream filename; - filename << "./tb_data/" << layer_name << "_output.log"; // TODO if run as a shared lib, path should be ../tb_data - std::fstream out; - out.open(filename.str(), std::ios::app); - assert(out.is_open()); - for (size_t i = 0; i < layer_size / data_T::size; i++) { - data_T ctype = data.read(); - for (size_t j = 0; j < data_T::size; j++) { - out << float(ctype[j]) << " "; // We don't care about precision in text files - } - data.write(ctype); - } - out << std::endl; - out.close(); - } -} - -#endif - -template void copy_data(std::vector src, dst_T dst[SIZE]) { - typename std::vector::const_iterator in_begin = src.cbegin() + OFFSET; - typename std::vector::const_iterator in_end = in_begin + SIZE; - std::copy(in_begin, in_end, dst); -} - -template -void copy_data(std::vector src, hls::stream &dst) { - typename std::vector::const_iterator in_begin = src.cbegin() + OFFSET; - typename std::vector::const_iterator in_end = in_begin + SIZE; - - size_t i_pack = 0; - dst_T dst_pack; - for (typename std::vector::const_iterator i = in_begin; i != in_end; ++i) { - dst_pack[i_pack++] = typename dst_T::value_type(*i); - if (i_pack == dst_T::size) { - i_pack = 0; - dst.write(dst_pack); - } - } -} - -template void copy_data_axi(std::vector src, dst_T dst[SIZE]) { - for (auto i = 0; i < SIZE; i++) - if (i == SIZE - 1) { - dst[i].data = src[i]; - dst[i].last = 1; - } else { - dst[i].data = src[i]; - dst[i].last = 0; - } -} - -template void print_result(res_T result[SIZE], std::ostream &out, bool keep = false) { - for (int i = 0; i < SIZE; i++) { - out << result[i] << " "; - } - out << std::endl; -} - -template void print_result(hls::stream &result, std::ostream &out, bool keep = false) { - for (int i = 0; i < SIZE / res_T::size; i++) { - res_T res_pack = result.read(); - for (int j = 0; j < res_T::size; j++) { - out << res_pack[j] << " "; - } - if (keep) - result.write(res_pack); - } - out << std::endl; -} - -template void fill_zero(data_T data[SIZE]) { std::fill_n(data, SIZE, 0.); } - -template void fill_zero(hls::stream &data) { - for (int i = 0; i < SIZE / data_T::size; i++) { - data_T data_pack; - for (int j = 0; j < data_T::size; j++) { - data_pack[j] = 0.; - } - data.write(data_pack); - } -} - -template int read_file_1D(const char *filename, dataType data[nrows]) { - FILE *fp; - fp = fopen(filename, "r"); - if (fp == 0) { - return -1; - } - // Read data from file - float newval; - for (int ii = 0; ii < nrows; ii++) { - if (fscanf(fp, "%f\n", &newval) != 0) { - data[ii] = newval; - } else { - return -2; - } - } - fclose(fp); - return 0; -} - -template -int read_file_2D(const char *filename, dataType data[nrows][ncols]) { - FILE *fp; - fp = fopen(filename, "r"); - if (fp == 0) { - return -1; - } - // Read data from file - float newval; - for (int ii = 0; ii < nrows; ii++) { - for (int jj = 0; jj < ncols; jj++) { - if (fscanf(fp, "%f\n", &newval) != 0) { - data[ii][jj] = newval; - } else { - return -2; - } - } - } - fclose(fp); - return 0; -} - -template void change_type(hls::stream &in, hls::stream &out) { - in_T datareg; - hls::stream input_trunc; - for (int ii = 0; ii < N_IN; ii++) { - out << (out_T)in.read(); - } -} - -template void hls_stream_debug(hls::stream &data, hls::stream &res) { - data_T datareg; - for (int ii = 0; ii < N_IN; ii++) { - datareg = data.read(); - std::cout << "[" << ii << "]: " << datareg << std::endl; - res << datareg; - } -} - -constexpr int ceillog2(int x) { return (x <= 2) ? 1 : 1 + ceillog2((x + 1) / 2); } - -constexpr int floorlog2(int x) { return (x < 2) ? 0 : 1 + floorlog2(x / 2); } - -constexpr int pow2(int x) { return x == 0 ? 1 : 2 * pow2(x - 1); } - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_image.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_image.h deleted file mode 100644 index eeb4548..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_image.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef NNET_IMAGE_H_ -#define NNET_IMAGE_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include - -namespace nnet { - -struct resize_config { - static const unsigned height = 10; - static const unsigned width = 10; - static const unsigned n_chan = 10; - static const unsigned new_height = 10; - static const unsigned new_width = 10; -}; - -template -void resize_nearest(data_T image[CONFIG_T::height * CONFIG_T::width * CONFIG_T::n_chan], - data_T resized[CONFIG_T::new_height * CONFIG_T::new_width * CONFIG_T::n_chan]) { - int y_ratio = (int)((CONFIG_T::height << 16) / CONFIG_T::new_height) + 1; - int x_ratio = (int)((CONFIG_T::width << 16) / CONFIG_T::new_width) + 1; - int x2, y2; - - #pragma HLS PIPELINE - - for (int i = 0; i < CONFIG_T::new_height; i++) { - for (int j = 0; j < CONFIG_T::new_width; j++) { - x2 = ((j * x_ratio) >> 16); - y2 = ((i * y_ratio) >> 16); - for (int k = 0; k < CONFIG_T::n_chan; k++) { - resized[(i * CONFIG_T::new_width * CONFIG_T::n_chan) + j * CONFIG_T::n_chan + k] = - image[(y2 * CONFIG_T::width * CONFIG_T::n_chan) + x2 * CONFIG_T::n_chan + k]; - } - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_image_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_image_stream.h deleted file mode 100644 index a23a93d..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_image_stream.h +++ /dev/null @@ -1,66 +0,0 @@ -#ifndef NNET_IMAGE_STREAM_H_ -#define NNET_IMAGE_STREAM_H_ - -#include "hls_stream.h" -#include "nnet_common.h" - -namespace nnet { - -template void resize_nearest(hls::stream &image, hls::stream &resized) { - assert(CONFIG_T::new_height % CONFIG_T::height == 0); - assert(CONFIG_T::new_width % CONFIG_T::width == 0); - constexpr unsigned ratio_height = CONFIG_T::new_height / CONFIG_T::height; - constexpr unsigned ratio_width = CONFIG_T::new_width / CONFIG_T::width; - -ImageHeight: - for (unsigned h = 0; h < CONFIG_T::height; h++) { - #pragma HLS PIPELINE - - data_T data_in_row[CONFIG_T::width]; - - ImageWidth: - for (unsigned i = 0; i < CONFIG_T::width; i++) { - #pragma HLS UNROLL - - data_T in_data = image.read(); - - ImageChan: - for (unsigned j = 0; j < CONFIG_T::n_chan; j++) { - #pragma HLS UNROLL - - data_in_row[i][j] = in_data[j]; - } - } - - ResizeHeight: - for (unsigned i = 0; i < ratio_height; i++) { - #pragma HLS UNROLL - - ImageWidth2: - for (unsigned l = 0; l < CONFIG_T::width; l++) { - #pragma HLS UNROLL - - ResizeWidth: - for (unsigned j = 0; j < ratio_width; j++) { - #pragma HLS UNROLL - - data_T out_data; - PRAGMA_DATA_PACK(out_data) - - ResizeChan: - for (unsigned k = 0; k < CONFIG_T::n_chan; k++) { - #pragma HLS UNROLL - - out_data[k] = data_in_row[l][k]; - } - - resized.write(out_data); - } - } - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_math.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_math.h deleted file mode 100644 index c021d8e..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_math.h +++ /dev/null @@ -1,178 +0,0 @@ -#ifndef NNET_MATH_H_ -#define NNET_MATH_H_ - -#include "hls_math.h" - -namespace nnet { - -// This header defines the functions that return type different from the input -// For example, hls::sin(x) returns ap_fixed -// By ensuring we return the same type we can avoid casting issues in expressions - -template T sin(T x) { return (T)hls::sin(x); }; - -template T cos(T x) { return (T)hls::cos(x); }; - -template T asin(T x) { return (T)hls::asin(x); }; - -template T acos(T x) { return (T)hls::acos(x); }; - -template T atan(T x) { return (T)hls::atan(x); }; - -template T atan2(T x, T y) { return (T)hls::atan2(x, y); }; - -template void init_sincos_table(T table[1 << (W - I - 3)][2]) { - unsigned int NTE = 1 << (W - I - 3); // No of table entries - double step = M_PI / (4 * NTE); // Interval between angles - double y = 0; - // double scaled_angle = 0; - - for (unsigned int i = 0; i < NTE; i++) { - table[i][0] = std::cos(y); - table[i][1] = std::sin(y); - y += step; - // scaled_angle = y/(2*M_PI); - // printf("cos(%f) = %23.22f, sin(%f) = %23.22f index = %d, scaled angle = %13.12f \n", y, cos(y), y, sin(y), i, - // scaled_angle); - } -} - -template void sincos_lut(const T &input, T output[2]) { - - #pragma HLS INLINE - - // This implementation is based on ac_sincos_lut.h from AC math library - - static bool flag = true; - if (flag && T::width - T::iwidth > 12) { -#if !defined(__SYNTHESIS__) && defined(SINCOS_LUT_DEBUG) - std::cout << "FILE : " << __FILE__ << ", LINE : " << __LINE__ << std::endl; - std::cout << "Warning: The output of sincos_lut will not be accurate" << std::endl; -#endif - flag = false; - } - // Datatype for lookup table entries - typedef ap_ufixed luttype; - // Datatype for posinput which is used to handle negative inputs - typedef ap_ufixed posinputtype; - - typedef ap_uint<9> lutindextype; // 9 bits required for indexing into 512 entry table - typedef ap_uint<3> octanttype; // 3 bits required for octant value range of 0 thru 7 - T outputtemp[2]; - lutindextype luTdex = 0; - posinputtype posinput = input; - - // Initialize the lookup table -#ifdef __SYNTHESIS__ - bool initialized = false; - luttype sincos[512][2]; -#else - static bool initialized = false; - static luttype sincos[512][2]; -#endif - if (!initialized) { - init_sincos_table(sincos); - initialized = true; - } - - // Leaving this commented out makes the table to to BRAM - //#pragma HLS ARRAY_PARTITION variable=sincos complete dim=0 - - typedef ap_uint lutindextype1; - // Extracting (MSB-3:LSB) bits of scaled input to determine the lookup table index - lutindextype1 luTdex1 = posinput.range(AP_MAX(T::width - T::iwidth - 3, 1), 0); // Extracting the lookup table index - - if (T::width - T::iwidth >= 4 && T::width - T::iwidth <= 12) { - luTdex(8, 12 - (T::width - T::iwidth)) = luTdex1; // stride - } - // Approximation for the scaled inputs whose number of bits are greater than 12 - else if (T::width - T::iwidth > 12) { - // Lookup table index for the scaled inputs whose number of bits are greater than 12 - luTdex = luTdex1 / (1 << (AP_MAX(T::width - T::iwidth - 12, 0))); - if ((luTdex1 % (1 << (AP_MAX(T::width - T::iwidth - 12, 0)))) > (1 << (AP_MAX(T::width - T::iwidth - 13, 0)))) { - luTdex = luTdex + 1; - } - typedef ap_ufixed - datatype; - datatype x = (datatype)luTdex1; - x = x >> AP_MAX(T::width - T::iwidth - 12, 0); - if (x > 511.5) { - luTdex = 511; - } - if (luTdex1 <= 1 << (AP_MAX(T::width - T::iwidth - 13, 0)) && luTdex1 != 0) { - luTdex = 1; - } - } - - if (T::width - T::iwidth >= 3) { - // Getting the octant 0-7 by extracting the first 3 bits from MSB side of scaled input where - // octant 0 corresponds to [0-PI/4), - // octant 1 corresponds to [PI/4-2PI/4), - // octant 2 corresponds to [2PI/4-3PI/4) and so on - // octanttype octant = posinput.template slc<3>(T::width-T::iwidth-3); - octanttype octant = posinput(T::width - T::iwidth - 1, T::width - T::iwidth - 3); - luTdex = (octant[0] == 1) ? (lutindextype)(512 - luTdex) : (lutindextype)(luTdex); - // imaginary part is sine - outputtemp[1] = ((octant == 0) | (octant == 3)) ? (T)sincos[luTdex][1] - : ((octant == 2) | (octant == 1)) ? (T)sincos[luTdex][0] - : ((octant == 7) | (octant == 4)) ? (T)-sincos[luTdex][1] - : (T)-sincos[luTdex][0]; - // real part is cosine - outputtemp[0] = ((octant == 6) | (octant == 1)) ? (T)sincos[luTdex][1] - : ((octant == 3) | (octant == 4)) ? (T)-sincos[luTdex][0] - : ((octant == 2) | (octant == 5)) ? (T)-sincos[luTdex][1] - : (T)sincos[luTdex][0]; - // Below two are the cases when the output corresponds to + or - (0 or 1) for which there is no entry in the lookup - // table - output[1] = ((posinput == 0.125) | (posinput == 0.375)) ? T(0.7071067811865475244008) - : ((posinput == 0.625) | (posinput == 0.875)) ? T(-0.7071067811865475244008) - : outputtemp[1]; - output[0] = ((posinput == 0.125) | (posinput == 0.875)) ? T(0.7071067811865475244008) - : ((posinput == 0.375) | (posinput == 0.625)) ? T(-0.7071067811865475244008) - : outputtemp[0]; - } - - if (T::width - T::iwidth <= 2) { - output[1] = (posinput == 0) ? (T)0 - : (posinput == 0.25) ? (T)1 - : (posinput == 0.5) ? (T)0 - : (posinput == 0.75) ? (T)-1 - : outputtemp[1]; - output[0] = (posinput == 0) ? (T)1 - : (posinput == 0.25) ? (T)0 - : (posinput == 0.5) ? (T)-1 - : (posinput == 0.75) ? (T)0 - : outputtemp[0]; - } - -#if !defined(__SYNTHESIS__) && defined(SINCOS_LUT_DEBUG) - std::cout << "FILE : " << __FILE__ << ", LINE : " << __LINE__ << std::endl; - std::cout << "============AP_FIXED SINCOS======================" << std::endl; - std::cout << "positive input is = " << posinput << std::endl; - std::cout << "lut index is = " << luTdex << std::endl; - std::cout << "sin value is = " << output[1] << std::endl; - std::cout << "cos value is = " << output[0] << std::endl; - std::cout << "=================================================" << std::endl; -#endif -} - -template T sin_lut(const T input) { - #pragma HLS INLINE - T sincos_res[2]; - T scaled_input = input * ap_ufixed<16, 0>(0.15915494309); // 1/(2*pi) - sincos_lut(scaled_input, sincos_res); - return sincos_res[1]; -} - -template T cos_lut(const T input) { - #pragma HLS INLINE - T sincos_res[2]; - T scaled_input = input * ap_ufixed<16, 0>(0.15915494309); // 1/(2*pi) - sincos_lut(scaled_input, sincos_res); - return sincos_res[0]; -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_merge.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_merge.h deleted file mode 100644 index 8005682..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_merge.h +++ /dev/null @@ -1,256 +0,0 @@ -#ifndef NNET_MERGE_H_ -#define NNET_MERGE_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_mult.h" -#include - -namespace nnet { - -struct merge_config { - static const unsigned n_elem = 10; -}; - -struct dot_config { - static const unsigned n_in = 10; - static const unsigned n_out = 1; - static const unsigned reuse_factor = 1; - typedef float accum_t; - // Product function to use - template using product = nnet::product::mult; -}; - -struct concat_config { - static const unsigned n_elem1_0 = 10; - static const unsigned n_elem1_1 = 10; - static const unsigned n_elem1_2 = 10; - static const unsigned n_elem2_0 = 10; - static const unsigned n_elem2_1 = 10; - static const unsigned n_elem2_2 = 10; - - static const unsigned axis = -1; -}; - -template -void add(input1_T data1[CONFIG_T::n_elem], input2_T data2[CONFIG_T::n_elem], res_T res[CONFIG_T::n_elem]) { - #pragma HLS PIPELINE - - for (int ii = 0; ii < CONFIG_T::n_elem; ii++) { - res[ii] = data1[ii] + data2[ii]; - } -} - -template -void subtract(input1_T data1[CONFIG_T::n_elem], input2_T data2[CONFIG_T::n_elem], res_T res[CONFIG_T::n_elem]) { - #pragma HLS PIPELINE - - for (int ii = 0; ii < CONFIG_T::n_elem; ii++) { - res[ii] = data1[ii] - data2[ii]; - } -} - -template -void multiply(input1_T data1[CONFIG_T::n_elem], input2_T data2[CONFIG_T::n_elem], res_T res[CONFIG_T::n_elem]) { - #pragma HLS PIPELINE - - for (int ii = 0; ii < CONFIG_T::n_elem; ii++) { - res[ii] = data1[ii] * data2[ii]; - } -} - -template -void average(input1_T data1[CONFIG_T::n_elem], input2_T data2[CONFIG_T::n_elem], res_T res[CONFIG_T::n_elem]) { - #pragma HLS PIPELINE - - for (int ii = 0; ii < CONFIG_T::n_elem; ii++) { - res[ii] = (data1[ii] + data2[ii]) / (res_T)2; - } -} - -template -void maximum(input1_T data1[CONFIG_T::n_elem], input2_T data2[CONFIG_T::n_elem], res_T res[CONFIG_T::n_elem]) { - #pragma HLS PIPELINE - - for (int ii = 0; ii < CONFIG_T::n_elem; ii++) { - res[ii] = (data1[ii] > data2[ii]) ? data1[ii] : data2[ii]; - } -} - -template -void minimum(input1_T data1[CONFIG_T::n_elem], input2_T data2[CONFIG_T::n_elem], res_T res[CONFIG_T::n_elem]) { - #pragma HLS PIPELINE - - for (int ii = 0; ii < CONFIG_T::n_elem; ii++) { - res[ii] = (data1[ii] < data2[ii]) ? data1[ii] : data2[ii]; - } -} - -template -void dot1d(input1_T data1[CONFIG_T::n_in], input2_T data2[CONFIG_T::n_in], res_T res[CONFIG_T::n_out]) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - #pragma HLS ALLOCATION operation instances=mul limit=CONFIG_T::multiplier_limit - - typename CONFIG_T::accum_t mult[CONFIG_T::n_in]; - #pragma HLS ARRAY_PARTITION variable=mult complete - typename CONFIG_T::accum_t acc = 0; - -Product: - for (int i_mult = 0; i_mult < CONFIG_T::n_in; i_mult++) { - #pragma HLS UNROLL - mult[i_mult] = CONFIG_T::template product::product(data1[i_mult], data2[i_mult]); - } - -Accum: - for (int i_acc = 0; i_acc < CONFIG_T::n_in; i_acc++) { - #pragma HLS UNROLL - acc += mult[i_acc]; - } - - res[0] = cast(acc); -} - -template -void concatenate1d(input1_T data1[CONFIG_T::n_elem1_0], input2_T data2[CONFIG_T::n_elem2_0], - res_T res[CONFIG_T::n_elem1_0 + CONFIG_T::n_elem2_0]) { - #pragma HLS PIPELINE - - for (int ii = 0; ii < CONFIG_T::n_elem1_0; ii++) { - res[ii] = data1[ii]; - } - for (int ii = 0; ii < CONFIG_T::n_elem2_0; ii++) { - res[CONFIG_T::n_elem1_0 + ii] = data2[ii]; - } -} - -template -void concatenate2d_0(input1_T data1[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1], - input2_T data2[CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1], - res_T res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 + CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1]) { - #pragma HLS PIPELINE - - for (int ii = 0; ii < CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1; ii++) { - res[ii] = data1[ii]; - } - for (int ii = 0; ii < CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1; ii++) { - res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 + ii] = data2[ii]; - } -} - -template -void concatenate2d_1(input1_T data1[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1], - input2_T data2[CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1], - res_T res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 + CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1]) { - #pragma HLS PIPELINE - - for (int ii = 0; ii < CONFIG_T::n_elem1_0; ii++) { - for (int jj = 0; jj < CONFIG_T::n_elem1_1; jj++) { - res[ii * (CONFIG_T::n_elem1_1 + CONFIG_T::n_elem2_1) + jj] = data1[ii * CONFIG_T::n_elem1_1 + jj]; - } - for (int jj = 0; jj < CONFIG_T::n_elem2_1; jj++) { - res[ii * (CONFIG_T::n_elem1_1 + CONFIG_T::n_elem2_1) + CONFIG_T::n_elem1_1 + jj] = - data2[ii * CONFIG_T::n_elem2_1 + jj]; - } - } -} - -template -void concatenate2d(input1_T data1[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1], - input2_T data2[CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1], - res_T res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 + CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1]) { - #pragma HLS INLINE - - if (CONFIG_T::axis == 2 || CONFIG_T::axis == -1) { - concatenate2d_1(data1, data2, res); - } else { - concatenate2d_0(data1, data2, res); - } -} - -template -void concatenate3d_0(input1_T data1[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2], - input2_T data2[CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2], - res_T res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2 + - CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2]) { - #pragma HLS PIPELINE - - for (int ii = 0; ii < CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2; ii++) { - res[ii] = data1[ii]; - } - for (int ii = 0; ii < CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2; ii++) { - res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2 + ii] = data2[ii]; - } -} - -template -void concatenate3d_1(input1_T data1[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2], - input2_T data2[CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2], - res_T res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2 + - CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2]) { - #pragma HLS PIPELINE - - for (int ii = 0; ii < CONFIG_T::n_elem1_0; ii++) { - for (int jj = 0; jj < CONFIG_T::n_elem1_1; jj++) { - for (int kk = 0; kk < CONFIG_T::n_elem1_2; kk++) { - int res_idx = - ii * (CONFIG_T::n_elem1_1 + CONFIG_T::n_elem2_1) * CONFIG_T::n_elem1_2 + jj * CONFIG_T::n_elem1_2 + kk; - int data_idx = ii * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2 + jj * CONFIG_T::n_elem1_2 + kk; - res[res_idx] = data1[data_idx]; - } - } - for (int jj = 0; jj < CONFIG_T::n_elem2_1; jj++) { - for (int kk = 0; kk < CONFIG_T::n_elem2_2; kk++) { - int res_idx = ii * (CONFIG_T::n_elem1_1 + CONFIG_T::n_elem2_1) * CONFIG_T::n_elem1_2 + - (jj + CONFIG_T::n_elem1_1) * CONFIG_T::n_elem1_2 + kk; - int data_idx = ii * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2 + jj * CONFIG_T::n_elem2_2 + kk; - res[res_idx] = data2[data_idx]; - } - } - } -} - -template -void concatenate3d_2(input1_T data1[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2], - input2_T data2[CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2], - res_T res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2 + - CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2]) { - #pragma HLS PIPELINE - - for (int ii = 0; ii < CONFIG_T::n_elem1_0; ii++) { - for (int jj = 0; jj < CONFIG_T::n_elem1_1; jj++) { - for (int kk = 0; kk < CONFIG_T::n_elem1_2; kk++) { - int res_idx = ii * CONFIG_T::n_elem1_1 * (CONFIG_T::n_elem1_2 + CONFIG_T::n_elem2_2) + - jj * (CONFIG_T::n_elem1_2 + CONFIG_T::n_elem2_2) + kk; - int data_idx = ii * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2 + jj * CONFIG_T::n_elem1_2 + kk; - res[res_idx] = data1[data_idx]; - } - for (int kk = 0; kk < CONFIG_T::n_elem1_2; kk++) { - int res_idx = ii * CONFIG_T::n_elem1_1 * (CONFIG_T::n_elem1_2 + CONFIG_T::n_elem2_2) + - jj * (CONFIG_T::n_elem1_2 + CONFIG_T::n_elem2_2) + kk + CONFIG_T::n_elem1_2; - int data_idx = ii * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2 + jj * CONFIG_T::n_elem2_2 + kk; - res[res_idx] = data2[data_idx]; - } - } - } -} - -template -void concatenate3d(input1_T data1[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2], - input2_T data2[CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2], - res_T res[CONFIG_T::n_elem1_0 * CONFIG_T::n_elem1_1 * CONFIG_T::n_elem1_2 + - CONFIG_T::n_elem2_0 * CONFIG_T::n_elem2_1 * CONFIG_T::n_elem2_2]) { - #pragma HLS INLINE - - if (CONFIG_T::axis == 3 || CONFIG_T::axis == -1) { - concatenate3d_2(data1, data2, res); - } else if (CONFIG_T::axis == 2 || CONFIG_T::axis == -2) { - concatenate3d_1(data1, data2, res); - } else { - concatenate3d_0(data1, data2, res); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_merge_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_merge_stream.h deleted file mode 100644 index a57ec78..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_merge_stream.h +++ /dev/null @@ -1,370 +0,0 @@ -#ifndef NNET_MERGE_STREAM_H_ -#define NNET_MERGE_STREAM_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include - -namespace nnet { - -template -void add(hls::stream &data1, hls::stream &data2, hls::stream &res) { - assert(input1_T::size == input2_T::size && input1_T::size == res_T::size); - -AddLoop: - for (int i = 0; i < CONFIG_T::n_elem / input1_T::size; i++) { - #pragma HLS PIPELINE - - input1_T in_data1 = data1.read(); - input2_T in_data2 = data2.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - AddPack: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - out_data[j] = in_data1[j] + in_data2[j]; - } - - res.write(out_data); - } -} - -template -void subtract(hls::stream &data1, hls::stream &data2, hls::stream &res) { - assert(input1_T::size == input2_T::size && input1_T::size == res_T::size); - -SubtractLoop: - for (int i = 0; i < CONFIG_T::n_elem / input1_T::size; i++) { - #pragma HLS PIPELINE - - input1_T in_data1 = data1.read(); - input2_T in_data2 = data2.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - SubtractPack: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - out_data[j] = in_data1[j] - in_data2[j]; - } - - res.write(out_data); - } -} - -template -void multiply(hls::stream &data1, hls::stream &data2, hls::stream &res) { - assert(input1_T::size == input2_T::size && input1_T::size == res_T::size); - -MultiplyLoop: - for (int i = 0; i < CONFIG_T::n_elem / input1_T::size; i++) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - input1_T in_data1 = data1.read(); - input2_T in_data2 = data2.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - MultiplyPack: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - out_data[j] = in_data1[j] * in_data2[j]; - } - - res.write(out_data); - } -} - -template -void average(hls::stream &data1, hls::stream &data2, hls::stream &res) { - assert(input1_T::size == input2_T::size && input1_T::size == res_T::size); - -AverageLoop: - for (int i = 0; i < CONFIG_T::n_elem / input1_T::size; i++) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - input1_T in_data1 = data1.read(); - input2_T in_data2 = data2.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - AveragePack: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - out_data[j] = (in_data1[j] + in_data2[j]) / (typename res_T::value_type)2; - } - - res.write(out_data); - } -} - -template -void maximum(hls::stream &data1, hls::stream &data2, hls::stream &res) { - assert(input1_T::size == input2_T::size && input1_T::size == res_T::size); - -MaximumLoop: - for (int i = 0; i < CONFIG_T::n_elem / input1_T::size; i++) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - input1_T in_data1 = data1.read(); - input2_T in_data2 = data2.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - MaximumPack: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - out_data[j] = (in_data1[j] > in_data2[j]) ? in_data1[j] : in_data2[j]; - } - - res.write(out_data); - } -} - -template -void minimum(hls::stream &data1, hls::stream &data2, hls::stream &res) { - assert(input1_T::size == input2_T::size && input1_T::size == res_T::size); - -MinimumLoop: - for (int i = 0; i < CONFIG_T::n_elem / input1_T::size; i++) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - input1_T in_data1 = data1.read(); - input2_T in_data2 = data2.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - MinimumPack: - for (int j = 0; j < res_T::size; j++) { - #pragma HLS UNROLL - out_data[j] = (in_data1[j] < in_data2[j]) ? in_data1[j] : in_data2[j]; - } - - res.write(out_data); - } -} - -template -void concatenate3d_0(hls::stream &data1, hls::stream &data2, hls::stream &res) { -ConcatLoopHeight1: - for (int i = 0; i < CONFIG_T::n_elem1_0; i++) { - ConcatLoopWidth1: - for (int j = 0; j < CONFIG_T::n_elem1_1; j++) { - #pragma HLS PIPELINE II=1 - - input1_T in_data1 = data1.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - ConcatPackInput1: - for (int k = 0; k < input1_T::size; k++) { - #pragma HLS UNROLL - out_data[k] = in_data1[k]; - } - - res.write(out_data); - } - } -ConcatLoopHeight2: - for (int i = 0; i < CONFIG_T::n_elem2_0; i++) { - ConcatLoopWidth2: - for (int j = 0; j < CONFIG_T::n_elem2_1; j++) { - #pragma HLS PIPELINE II=1 - - input2_T in_data2 = data2.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - ConcatPackInput2: - for (int k = 0; k < input2_T::size; k++) { - #pragma HLS UNROLL - out_data[k] = in_data2[k]; - } - - res.write(out_data); - } - } -} - -template -void concatenate3d_1(hls::stream &data1, hls::stream &data2, hls::stream &res) { -ConcatLoopHeight: - for (int i = 0; i < CONFIG_T::n_elem1_0; i++) { - ConcatLoopWidth1: - for (int j = 0; j < CONFIG_T::n_elem1_1; j++) { - #pragma HLS PIPELINE II=1 - - input1_T in_data1 = data1.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - ConcatPackInput1: - for (int k = 0; k < input1_T::size; k++) { - #pragma HLS UNROLL - out_data[k] = in_data1[k]; - } - - res.write(out_data); - } - ConcatLoopWidth2: - for (int j = 0; j < CONFIG_T::n_elem2_1; j++) { - #pragma HLS PIPELINE II=1 - - input2_T in_data2 = data2.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - ConcatPackInput2: - for (int k = 0; k < input2_T::size; k++) { - #pragma HLS UNROLL - out_data[k] = in_data2[k]; - } - - res.write(out_data); - } - } -} - -template -void concatenate3d_2(hls::stream &data1, hls::stream &data2, hls::stream &res) { -ConcatLoopHeight: - for (int i = 0; i < CONFIG_T::n_elem1_0; i++) { - ConcatLoopWidth: - for (int j = 0; j < CONFIG_T::n_elem1_1; j++) { - #pragma HLS PIPELINE II=1 - - input1_T in_data1 = data1.read(); - input2_T in_data2 = data2.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - ConcatPackInput1: - for (int k = 0; k < input1_T::size; k++) { - #pragma HLS UNROLL - out_data[k] = in_data1[k]; - } - - ConcatPackInput2: - for (int k = 0; k < input2_T::size; k++) { - #pragma HLS UNROLL - out_data[input1_T::size + k] = in_data2[k]; - } - - res.write(out_data); - } - } -} - -template -void concatenate3d(hls::stream &data1, hls::stream &data2, hls::stream &res) { - if (CONFIG_T::axis == 3 || CONFIG_T::axis == -1) { - concatenate3d_2(data1, data2, res); - } else if (CONFIG_T::axis == 2 || CONFIG_T::axis == -2) { - concatenate3d_1(data1, data2, res); - } else { - concatenate3d_0(data1, data2, res); - } -} - -template -void concatenate2d_0(hls::stream &data1, hls::stream &data2, hls::stream &res) { -ConcatLoopHeight1: - for (int i = 0; i < CONFIG_T::n_elem1_0; i++) { - #pragma HLS PIPELINE II=1 - - input1_T in_data1 = data1.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - ConcatPackInput1: - for (int k = 0; k < input1_T::size; k++) { - #pragma HLS UNROLL - out_data[k] = in_data1[k]; - } - - res.write(out_data); - } -ConcatLoopHeight2: - for (int i = 0; i < CONFIG_T::n_elem2_0; i++) { - #pragma HLS PIPELINE II=1 - - input2_T in_data2 = data2.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - ConcatPackInput2: - for (int k = 0; k < input2_T::size; k++) { - #pragma HLS UNROLL - out_data[k] = in_data2[k]; - } - - res.write(out_data); - } -} - -template -void concatenate2d_1(hls::stream &data1, hls::stream &data2, hls::stream &res) { -ConcatLoopHeight: - for (int i = 0; i < CONFIG_T::n_elem1_0; i++) { - #pragma HLS PIPELINE II=1 - - input1_T in_data1 = data1.read(); - input2_T in_data2 = data2.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - ConcatPackInput1: - for (int k = 0; k < input1_T::size; k++) { - #pragma HLS UNROLL - out_data[k] = in_data1[k]; - } - - ConcatPackInput2: - for (int k = 0; k < input2_T::size; k++) { - #pragma HLS UNROLL - out_data[input1_T::size + k] = in_data2[k]; - } - - res.write(out_data); - } -} - -template -void concatenate2d(hls::stream &data1, hls::stream &data2, hls::stream &res) { - if (CONFIG_T::axis == 2 || CONFIG_T::axis == -1) { - concatenate2d_1(data1, data2, res); - } else { - concatenate2d_0(data1, data2, res); - } -} - -template -void concatenate1d(hls::stream &data1, hls::stream &data2, hls::stream &res) { - res_T out_data; - PRAGMA_DATA_PACK(out_data) -ConcatLoop1: - for (int i = 0; i < CONFIG_T::n_elem1_0 / input1_T::size; i++) { - #pragma HLS PIPELINE - input1_T in_data1 = data1.read(); - ConcatPack1: - for (int j = 0; j < input1_T::size; j++) { - #pragma HLS UNROLL - out_data[j + (i * input1_T::size)] = in_data1[j]; - } - } -ConcatLoop2: - for (int i = 0; i < CONFIG_T::n_elem2_0 / input2_T::size; i++) { - #pragma HLS PIPELINE - input2_T in_data2 = data2.read(); - ConcatPack2: - for (int j = 0; j < input2_T::size; j++) { - #pragma HLS UNROLL - out_data[j + (i * input2_T::size) + (CONFIG_T::n_elem1_0)] = in_data2[j]; - } - } - res.write(out_data); -} -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_mult.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_mult.h deleted file mode 100644 index 00d1c6d..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_mult.h +++ /dev/null @@ -1,116 +0,0 @@ -#ifndef NNET_MULT_H_ -#define NNET_MULT_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_helpers.h" -#include -#include - -namespace nnet { - -namespace product { - -/* --- - * different methods to perform the product of input and weight, depending on the - * types of each. - * --- */ - -class Product {}; - -template class both_binary : public Product { - public: - static x_T product(x_T a, w_T w) { - // specialisation for 1-bit weights and incoming data - #pragma HLS INLINE - return a == w; - } -}; - -template class weight_binary : public Product { - public: - static auto product(x_T a, w_T w) -> decltype(-a) { - // Specialisation for 1-bit weights, arbitrary data - #pragma HLS INLINE - if (w == 0) - return -a; - else - return a; - } -}; - -template class data_binary : public Product { - public: - static auto product(x_T a, w_T w) -> decltype(-w) { - // Specialisation for 1-bit data, arbitrary weight - #pragma HLS INLINE - if (a == 0) - return -w; - else - return w; - } -}; - -template class weight_ternary : public Product { - public: - static auto product(x_T a, w_T w) -> decltype(-a) { - // Specialisation for 2-bit weights, arbitrary data - #pragma HLS INLINE - if (w == 0) - return 0; - else if (w == -1) - return -a; - else - return a; // if(w == 1) - } -}; - -template class mult : public Product { - public: - static auto product(x_T a, w_T w) -> decltype(a * w) { - // 'Normal' product - #pragma HLS INLINE - return a * w; - } -}; - -template class weight_exponential : public Product { - public: - using r_T = ap_fixed<2 * (decltype(w_T::weight)::width + x_T::width), (decltype(w_T::weight)::width + x_T::width)>; - static r_T product(x_T a, w_T w) { - // Shift product for exponential weights - #pragma HLS INLINE - - // Shift by the exponent. Negative weights shift right - r_T y = static_cast(a) << w.weight; - - // Negate or not depending on weight sign - return w.sign == 1 ? y : static_cast(-y); - } -}; - -} // namespace product - -template -inline typename std::enable_if>::value && - std::is_same>::value, - ap_int>::type -cast(typename CONFIG_T::accum_t x) { - return (ap_int)(x - CONFIG_T::n_in / 2) * 2; -} - -template -inline typename std::enable_if< - std::is_same>::value && !std::is_same>::value, res_T>::type -cast(typename CONFIG_T::accum_t x) { - return (res_T)x; -} - -template -inline typename std::enable_if<(!std::is_same>::value), res_T>::type cast(typename CONFIG_T::accum_t x) { - return (res_T)x; -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_padding.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_padding.h deleted file mode 100644 index e48a2fb..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_padding.h +++ /dev/null @@ -1,145 +0,0 @@ -#ifndef NNET_PADDING_H_ -#define NNET_PADDING_H_ - -#include - -namespace nnet { - -struct padding1d_config { - static const unsigned n_chan = 10; - static const unsigned in_width = 10; - static const unsigned out_width = 10; - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; -}; - -template -void zeropad1d_cf(data_T data[CONFIG_T::n_chan * CONFIG_T::in_width], data_T res[CONFIG_T::n_chan * CONFIG_T::out_width]) { - #pragma HLS PIPELINE - - for (int j = 0; j < CONFIG_T::n_chan; j++) { - for (int i = 0; i < CONFIG_T::pad_left; i++) { - *(res++) = 0; - } - - for (int i = 0; i < CONFIG_T::in_width; i++) { - *(res++) = (res_T) * (data++); - } - - for (int i = 0; i < CONFIG_T::pad_right; i++) { - *(res++) = 0; - } - } -} - -template -void zeropad1d_cl(data_T data[CONFIG_T::n_chan * CONFIG_T::in_width], res_T res[CONFIG_T::n_chan * CONFIG_T::out_width]) { - #pragma HLS PIPELINE - - for (int i = 0; i < CONFIG_T::pad_left; i++) { - for (int j = 0; j < CONFIG_T::n_chan; j++) { - *(res++) = 0; - } - } - - for (int i = 0; i < CONFIG_T::in_width; i++) { - for (int j = 0; j < CONFIG_T::n_chan; j++) { - *(res++) = (res_T) * (data++); - } - } - - for (int i = 0; i < CONFIG_T::pad_right; i++) { - for (int j = 0; j < CONFIG_T::n_chan; j++) { - *(res++) = 0; - } - } -} - -struct padding2d_config { - static const unsigned n_chan = 10; - static const unsigned in_height = 10; - static const unsigned in_width = 10; - static const unsigned out_height = 10; - static const unsigned out_width = 10; - static const unsigned pad_top = 0; - static const unsigned pad_bottom = 0; - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; -}; - -template -void zeropad2d_cf(data_T data[CONFIG_T::n_chan * CONFIG_T::in_height * CONFIG_T::in_width], - data_T res[CONFIG_T::n_chan * CONFIG_T::out_height * CONFIG_T::out_width]) { - #pragma HLS PIPELINE - - for (int k = 0; k < CONFIG_T::n_chan; k++) { - - for (int i = 0; i < CONFIG_T::pad_top; i++) { - for (int j = 0; j < CONFIG_T::out_width; j++) { - *(res++) = 0; - } - } - - for (int i = 0; i < CONFIG_T::in_height; i++) { - for (int j = 0; j < CONFIG_T::pad_left; j++) { - *(res++) = 0; - } - for (int j = 0; j < CONFIG_T::in_width; j++) { - *(res++) = (res_T) * (data++); - } - for (int j = 0; j < CONFIG_T::pad_right; j++) { - *(res++) = 0; - } - } - - for (int i = 0; i < CONFIG_T::pad_bottom; i++) { - for (int j = 0; j < CONFIG_T::out_width; j++) { - *(res++) = 0; - } - } - } -} - -template -void zeropad2d_cl(data_T data[CONFIG_T::n_chan * CONFIG_T::in_height * CONFIG_T::in_width], - res_T res[CONFIG_T::n_chan * CONFIG_T::out_height * CONFIG_T::out_width]) { - #pragma HLS PIPELINE - - for (int i = 0; i < CONFIG_T::pad_top; i++) { - for (int j = 0; j < CONFIG_T::out_width; j++) { - for (int k = 0; k < CONFIG_T::n_chan; k++) { - *(res++) = 0; - } - } - } - - for (int i = 0; i < CONFIG_T::in_height; i++) { - for (int j = 0; j < CONFIG_T::pad_left; j++) { - for (int k = 0; k < CONFIG_T::n_chan; k++) { - *(res++) = 0; - } - } - for (int j = 0; j < CONFIG_T::in_width; j++) { - for (int k = 0; k < CONFIG_T::n_chan; k++) { - *(res++) = (res_T) * (data++); - } - } - for (int j = 0; j < CONFIG_T::pad_right; j++) { - for (int k = 0; k < CONFIG_T::n_chan; k++) { - *(res++) = 0; - } - } - } - - for (int i = 0; i < CONFIG_T::pad_bottom; i++) { - for (int j = 0; j < CONFIG_T::out_width; j++) { - for (int k = 0; k < CONFIG_T::n_chan; k++) { - *(res++) = 0; - } - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_padding_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_padding_stream.h deleted file mode 100644 index 9df5d54..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_padding_stream.h +++ /dev/null @@ -1,85 +0,0 @@ -#ifndef NNET_PADDING_STREAM_H_ -#define NNET_PADDING_STREAM_H_ - -#include - -namespace nnet { - -template void fill_zero(hls::stream &res) { - #pragma HLS INLINE - res_T res_part; - for (int c = 0; c < CONFIG_T::n_chan; c++) { - #pragma HLS UNROLL - res_part[c] = 0; - } - res.write(res_part); -} - -template void fill_data(hls::stream &data, hls::stream &res) { - #pragma HLS INLINE - data_T data_part = data.read(); - res_T res_part; - for (int c = 0; c < CONFIG_T::n_chan; c++) { - #pragma HLS UNROLL - res_part[c] = data_part[c]; - } - res.write(res_part); -} - -template -void zeropad1d_cl(hls::stream &data, hls::stream &res) { -PadLeft: - for (int i = 0; i < CONFIG_T::pad_left; i++) { - fill_zero(res); - } - -CopyMain: - for (int i = 0; i < CONFIG_T::in_width; i++) { - fill_data(data, res); - } - -PadRight: - for (int i = 0; i < CONFIG_T::pad_right; i++) { - fill_zero(res); - } -} - -template -void zeropad2d_cl(hls::stream &data, hls::stream &res) { - -PadTop: - for (int i = 0; i < CONFIG_T::pad_top; i++) { - PadTopWidth: - for (int j = 0; j < CONFIG_T::out_width; j++) { - fill_zero(res); - } - } - -PadMain: - for (int i = 0; i < CONFIG_T::in_height; i++) { - PadLeft: - for (int j = 0; j < CONFIG_T::pad_left; j++) { - fill_zero(res); - } - CopyMain: - for (int j = 0; j < CONFIG_T::in_width; j++) { - fill_data(data, res); - } - PadRight: - for (int j = 0; j < CONFIG_T::pad_right; j++) { - fill_zero(res); - } - } - -PadBottom: - for (int i = 0; i < CONFIG_T::pad_bottom; i++) { - PadBottomWidth: - for (int j = 0; j < CONFIG_T::out_width; j++) { - fill_zero(res); - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_pooling.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_pooling.h deleted file mode 100644 index 7401dfe..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_pooling.h +++ /dev/null @@ -1,375 +0,0 @@ -#ifndef NNET_POOLING_H_ -#define NNET_POOLING_H_ - -#include "nnet_helpers.h" -#include - -namespace nnet { - -// Return the maximum value from an array -template T max(T x[N]) { - T y = x[0]; - for (int i = 1; i < N; i++) { - y = x[i] > y ? x[i] : y; - } - return y; -} - -template ap_int avg(ap_int (&x)[N]) { - // Use a wider accumulator than the input to avoid overflow - ap_int tmp = 0; - for (int i = 0; i < N; i++) { - tmp += x[i]; - } - tmp /= N; - // Now cast back to original type - ap_int y = tmp; - return tmp; -} - -template ap_uint avg(ap_uint (&x)[N]) { - // Use a wider accumulator than the input to avoid overflow - ap_uint tmp = 0; - for (int i = 0; i < N; i++) { - tmp += x[i]; - } - tmp /= N; - // Now cast back to original type - ap_uint y = tmp; - return tmp; -} - -template -ap_fixed avg(ap_fixed (&x)[N]) { - // Use a wider accumulator than the input to avoid overflow - ap_fixed tmp = 0; - for (int i = 0; i < N; i++) { - tmp += x[i]; - } - tmp /= N; - // Now cast back to original type - ap_fixed y = tmp; - return y; -} - -template -ap_ufixed avg(ap_ufixed (&x)[N]) { - // Use a wider accumulator than the input to avoid overflow - ap_ufixed tmp = 0; - for (int i = 0; i < N; i++) { - tmp += x[i]; - } - tmp /= N; - // Now cast back to original type - ap_ufixed y = tmp; - return y; -} - -// Return the mean value of an array -template T avg(T (&x)[N]) { - T y = 0; - for (int i = 0; i < N; i++) { - y += x[i]; - } - y /= N; - return y; -} - -// Enumeration for pooling operation (max, avg, l2norm pooling) -enum Pool_Op { Max, Average }; // L2Norm }; -template T pool_op(T (&x)[N]) { - switch (op) { - case Max: - return max(x); - case Average: - return avg(x); - // case L2Norm: return l2norm(x); - } -} - -template T pad_val() { - /*--- - *- In Tensorflow, pooling ignores the value in the padded cells - *- For Avg pooling, return 0 (the divisior is modified to the - *- area overlapping the unpadded image. - *- For max pooling, return the most negative value for the type. - *- TODO this is not really generic, it assumes fixed point or integer T - ---*/ - switch (op) { - case Max: { - T x = 0; - x[x.width - 1] = 1; - return x; - break; - } - case Average: - return 0; - } -} - -struct pooling1d_config { - // IO size - static const unsigned n_in = 10; - static const unsigned pool_width = 2; - static const unsigned stride_width = 2; - static const unsigned n_out = (n_in - pool_width) / stride_width + 1; - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; - static const bool count_pad = false; - // Pooling function - static const Pool_Op pool_op = Max; -}; - -template constexpr int pool_op_limit_1d() { - return CONFIG_T::n_in * CONFIG_T::n_filt / CONFIG_T::reuse_factor; -} - -template -void pooling1d_cl(data_T data[CONFIG_T::n_in * CONFIG_T::n_filt], res_T res[CONFIG_T::n_out * CONFIG_T::n_filt]) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - // TODO partition the arrays according to the reuse factor - const int limit = pool_op_limit_1d(); - #pragma HLS ALLOCATION function instances=CONFIG_T::pool_op limit=limit - // Add any necessary padding - unsigned padded_width = CONFIG_T::n_in + CONFIG_T::pad_left + CONFIG_T::pad_right; - if (CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0) { - padded_width -= padded_width - (padded_width / CONFIG_T::stride_width * CONFIG_T::stride_width); - } - - for (int ff = 0; ff < CONFIG_T::n_filt; ff++) { - // Loop over input image x in steps of stride - for (int ii = 0; ii < padded_width; ii += CONFIG_T::stride_width) { - data_T pool[CONFIG_T::pool_width]; - #pragma HLS ARRAY_PARTITION variable=pool complete dim=0 - // Keep track of number of pixels in image vs padding region - unsigned img_overlap = 0; - // Loop over pool window x - for (int jj = 0; jj < CONFIG_T::stride_width; jj++) { - if (ii + jj < CONFIG_T::pad_left || ii + jj >= (padded_width - CONFIG_T::pad_right)) { - // Add padding - pool[jj] = pad_val(); - if (CONFIG_T::count_pad) - img_overlap++; - } else { - pool[jj] = data[(ii + jj - CONFIG_T::pad_left) * CONFIG_T::n_filt + ff]; - img_overlap++; - } - } - // do the pooling - // TODO in the case of average pooling, need to reduce width to area of pool window - // not overlapping padding region - res[(ii / CONFIG_T::stride_width) * CONFIG_T::n_filt + ff] = - pool_op(pool); - // If the pool op is Average, the zero-padding needs to be removed from the results - if (CONFIG_T::pool_op == Average) { - data_T rescale = static_cast(CONFIG_T::pool_width) / img_overlap; - res[(ii / CONFIG_T::stride_width) * CONFIG_T::n_filt + ff] *= rescale; - } - } - } -} - -template -void global_pooling1d_cl(data_T data[CONFIG_T::n_in * CONFIG_T::n_filt], res_T res[CONFIG_T::n_filt]) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pool_width == CONFIG_T::stride_width); - - // TODO partition the arrays according to the reuse factor - const int limit = pool_op_limit_1d(); - #pragma HLS ALLOCATION function instances=CONFIG_T::pool_op limit=limit - - for (int ff = 0; ff < CONFIG_T::n_filt; ff++) { - data_T pool[CONFIG_T::n_in]; - #pragma HLS ARRAY_PARTITION variable=pool complete dim=0 - for (int jj = 0; jj < CONFIG_T::n_in; jj++) { - pool[jj] = data[jj * CONFIG_T::n_filt + ff]; - } - // do the pooling - res[ff] = pool_op(pool); - } -} - -struct pooling2d_config { - // IO size - static const unsigned in_height = 10; - static const unsigned in_width = 10; - static const unsigned n_filt = 4; - static const unsigned stride_height = 2; - static const unsigned stride_width = 2; - static const unsigned pool_height = 2; - static const unsigned pool_width = 2; - static const unsigned out_height = (in_height - pool_height) / stride_height + 1; - static const unsigned out_width = (in_width - pool_width) / stride_width + 1; - // Padding - static const unsigned pad_top = 0; - static const unsigned pad_bottom = 0; - static const unsigned pad_left = 0; - static const unsigned pad_right = 0; - static const bool count_pad = false; - // Pooling function - static const Pool_Op pool_op = Max; - // Reuse factor - static const unsigned reuse_factor = 1; - - // Internal data type definitions - typedef float accum_t; -}; - -template constexpr int pool_op_limit() { - return (CONFIG_T::out_height * CONFIG_T::out_width) * CONFIG_T::n_filt / CONFIG_T::reuse_factor; -} - -template -void pooling2d_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_filt], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt]) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - // TODO partition the arrays according to the reuse factor - const int limit = pool_op_limit(); - #pragma HLS ALLOCATION function instances=CONFIG_T::pool_op limit=limit - // Add any necessary padding - unsigned padded_height = CONFIG_T::in_height + CONFIG_T::pad_top + CONFIG_T::pad_bottom; - unsigned padded_width = CONFIG_T::in_width + CONFIG_T::pad_left + CONFIG_T::pad_right; - if (CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0) { - padded_height -= padded_height - (padded_height / CONFIG_T::stride_height * CONFIG_T::stride_height); - padded_width -= padded_width - (padded_width / CONFIG_T::stride_width * CONFIG_T::stride_width); - } - - for (int ff = 0; ff < CONFIG_T::n_filt; ff++) { - // Loop over input image y in steps of stride - for (int ii = 0; ii < padded_height; ii += CONFIG_T::stride_height) { - // Loop over input image x in steps of stride - for (int jj = 0; jj < padded_width; jj += CONFIG_T::stride_width) { - data_T pool[CONFIG_T::pool_height * CONFIG_T::pool_width]; - #pragma HLS ARRAY_PARTITION variable=pool complete dim=0 - // Keep track of number of pixels in image vs padding region - unsigned img_overlap = 0; - // Loop over pool window y - for (int kk = 0; kk < CONFIG_T::stride_height; kk++) { - // Loop over pool window x - for (int ll = 0; ll < CONFIG_T::stride_width; ll++) { - if (ii + kk < CONFIG_T::pad_top || ii + kk >= (padded_height - CONFIG_T::pad_bottom) || - jj + ll < CONFIG_T::pad_left || jj + ll >= (padded_width - CONFIG_T::pad_right)) { - // Add padding - pool[kk * CONFIG_T::stride_width + ll] = pad_val(); - if (CONFIG_T::count_pad) - img_overlap++; - } else { - pool[kk * CONFIG_T::stride_width + ll] = - data[(ii + kk - CONFIG_T::pad_top) * CONFIG_T::in_width * CONFIG_T::n_filt + - (jj + ll - CONFIG_T::pad_left) * CONFIG_T::n_filt + ff]; - img_overlap++; - } - } - } - // do the pooling - // TODO in the case of average pooling, need to reduce height * width to area of pool window - // not overlapping padding region - res[(ii / CONFIG_T::stride_height) * CONFIG_T::out_width * CONFIG_T::n_filt + - (jj / CONFIG_T::stride_width) * CONFIG_T::n_filt + ff] = - pool_op(pool); - // If the pool op is Average, the zero-padding needs to be removed from the results - if (CONFIG_T::pool_op == Average) { - data_T rescale = - static_cast(CONFIG_T::pool_height) * static_cast(CONFIG_T::pool_width) / img_overlap; - res[(ii / CONFIG_T::stride_height) * CONFIG_T::out_width * CONFIG_T::n_filt + - (jj / CONFIG_T::stride_width) * CONFIG_T::n_filt + ff] *= rescale; - } - } - } - } -} - -template -void pooling2d_cf(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_filt], - res_T res[CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::n_filt]) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - // TODO partition the arrays according to the reuse factor - const int limit = pool_op_limit(); - #pragma HLS ALLOCATION function instances=CONFIG_T::pool_op limit=limit - // Add any necessary padding - unsigned padded_height = CONFIG_T::in_height + CONFIG_T::pad_top + CONFIG_T::pad_bottom; - unsigned padded_width = CONFIG_T::in_width + CONFIG_T::pad_left + CONFIG_T::pad_right; - if (CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0) { - padded_height -= padded_height - (padded_height / CONFIG_T::stride_height * CONFIG_T::stride_height); - padded_width -= padded_width - (padded_width / CONFIG_T::stride_width * CONFIG_T::stride_width); - } - - for (int ff = 0; ff < CONFIG_T::n_filt; ff++) { - // Loop over input image y in steps of stride - for (int ii = 0; ii < padded_height; ii += CONFIG_T::stride_height) { - // Loop over input image x in steps of stride - for (int jj = 0; jj < padded_width; jj += CONFIG_T::stride_width) { - data_T pool[CONFIG_T::pool_height * CONFIG_T::pool_width]; - #pragma HLS ARRAY_PARTITION variable=pool complete dim=0 - // Keep track of number of pixels in image vs padding region - unsigned img_overlap = 0; - // Loop over pool window y - for (int kk = 0; kk < CONFIG_T::stride_height; kk++) { - // Loop over pool window x - for (int ll = 0; ll < CONFIG_T::stride_width; ll++) { - if (ii + kk < CONFIG_T::pad_top || ii + kk >= (padded_height - CONFIG_T::pad_bottom) || - jj + ll < CONFIG_T::pad_left || jj + ll >= (padded_width - CONFIG_T::pad_right)) { - // Add padding - pool[kk * CONFIG_T::stride_width + ll] = pad_val(); - if (CONFIG_T::count_pad) - img_overlap++; - } else { - pool[kk * CONFIG_T::stride_width + ll] = - data[(ii + kk - CONFIG_T::pad_top) * CONFIG_T::in_width + - ff * CONFIG_T::in_width * CONFIG_T::in_height + ll + jj - CONFIG_T::pad_left]; - img_overlap++; - } - } - } - // do the pooling - // TODO in the case of average pooling, need to reduce height * width to area of pool window - // not overlapping padding region - res[(ii / CONFIG_T::stride_height) * CONFIG_T::out_width + (jj / CONFIG_T::stride_width) + - ff * CONFIG_T::out_height * CONFIG_T::out_width] = - pool_op(pool); - // If the pool op is Average, the zero-padding needs to be removed from the results - if (CONFIG_T::pool_op == Average) { - data_T rescale = - static_cast(CONFIG_T::pool_height) * static_cast(CONFIG_T::pool_width) / img_overlap; - res[(ii / CONFIG_T::stride_height) * CONFIG_T::out_width + (jj / CONFIG_T::stride_width) + - ff * CONFIG_T::out_height * CONFIG_T::out_width] *= rescale; - } - } - } - } -} - -template -void global_pooling2d_cl(data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_filt], - res_T res[CONFIG_T::n_filt]) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0); - assert(CONFIG_T::pool_width == CONFIG_T::stride_width); - assert(CONFIG_T::pool_height == CONFIG_T::stride_height); - - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - const int limit = pool_op_limit(); - #pragma HLS ALLOCATION instances=pool_op limit=limit function - -FiltLoop: - for (int filt = 0; filt < CONFIG_T::n_filt; filt++) { - data_T pool[CONFIG_T::in_height * CONFIG_T::in_width]; - - InputLoop: - for (int i = 0; i < CONFIG_T::in_height * CONFIG_T::in_width; i++) { - pool[i] = data[i * CONFIG_T::n_filt + filt]; - } - - res[filt] = static_cast(pool_op(pool)); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_pooling_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_pooling_stream.h deleted file mode 100644 index 13d5979..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_pooling_stream.h +++ /dev/null @@ -1,609 +0,0 @@ -#ifndef NNET_POOLING_STREAM_H_ -#define NNET_POOLING_STREAM_H_ - -#include "ap_shift_reg.h" -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_conv_stream.h" -#include "nnet_pooling.h" -#include "utils/x_hls_utils.h" - -namespace nnet { - -// ************************************************* -// Max/average pooling -// ************************************************* - -template T reduce_pool(T x[N]) { - #pragma HLS INLINE - if (CONFIG_T::pool_op == Max) { - Op_max op_max; - return reduce>(x, op_max); - } else { - Op_add op_add; - T sum = reduce>(x, op_add); - return sum / N; - } -} - -template void init_pool_table(unsigned table[TABLE_SIZE]) { - for (unsigned ii = 0; ii < TABLE_SIZE; ii++) { - table[ii] = ii % POOL_SIZE; - } -} - -template -void compute_pool_encoded_2d( - const unsigned h_idx, const unsigned w_idx, const data_T &in_elem, - hls::stream data_window[CONFIG_T::pool_height * CONFIG_T::pool_width * CONFIG_T::n_filt], - hls::stream &res, res_T &res_pack, unsigned &outputs_ready) { - // Nearest H without unused pixels on the right - constexpr unsigned nH = - ((CONFIG_T::in_height - CONFIG_T::pool_height) / CONFIG_T::stride_height) * CONFIG_T::stride_height + - CONFIG_T::pool_height; - // Scaled H that behaves like original H - constexpr unsigned sH = - (DIV_ROUNDUP(CONFIG_T::pool_height, CONFIG_T::stride_height) - 1) * CONFIG_T::stride_height + CONFIG_T::pool_height; - // Nearest W without unused pixels on the right - constexpr unsigned nW = ((CONFIG_T::in_width - CONFIG_T::pool_width) / CONFIG_T::stride_width) * CONFIG_T::stride_width + - CONFIG_T::pool_width; - // Scaled W that behaves like original W - constexpr unsigned sW = - (DIV_ROUNDUP(CONFIG_T::pool_width, CONFIG_T::stride_width) - 1) * CONFIG_T::stride_width + CONFIG_T::pool_width; - -#ifdef __SYNTHESIS__ - bool initialized = false; - unsigned pool_table_height[CONFIG_T::in_height]; - unsigned pool_table_width[CONFIG_T::in_width]; -#else - static bool initialized = false; - static unsigned pool_table_height[CONFIG_T::in_height]; - static unsigned pool_table_width[CONFIG_T::in_width]; -#endif - if (!initialized) { - init_pool_table(pool_table_height); - init_pool_table(pool_table_width); - initialized = true; - } - - #pragma HLS INLINE - - if (data_T::size / CONFIG_T::n_filt > 1) { - #pragma HLS ARRAY_PARTITION variable=pool_table_height complete - #pragma HLS ARRAY_PARTITION variable=pool_table_width complete - } - - typename CONFIG_T::accum_t pool_window[CONFIG_T::pool_height * CONFIG_T::pool_width]; - #pragma HLS ARRAY_PARTITION variable=pool_window complete - - const unsigned sh_idx = pool_table_height[h_idx] * CONFIG_T::pool_width; - const unsigned wp_idx = w_idx * (data_T::size / CONFIG_T::n_filt); - -PixelLoop: - for (unsigned p = 0; p < data_T::size / CONFIG_T::n_filt; p++) { - #pragma HLS PIPELINE - - ap_uint filt_mask = 0; - if ((h_idx < nH) && (wp_idx + p < nW)) { - filt_mask = sh_idx + pool_table_width[wp_idx + p] + 1; - } - - CopyDataFilt: - for (unsigned c = 0; c < CONFIG_T::n_filt; c++) { - if (filt_mask > 0) - data_window[c * CONFIG_T::pool_height * CONFIG_T::pool_width + filt_mask.to_uint() - 1].write( - in_elem[p * CONFIG_T::n_filt + c]); - } - - if (filt_mask == CONFIG_T::pool_height * CONFIG_T::pool_width) { - FiltLoop: - for (unsigned c = 0; c < CONFIG_T::n_filt; c++) { - PoolLoop: - for (unsigned f = 0; f < CONFIG_T::pool_height * CONFIG_T::pool_width; f++) { - pool_window[f] = data_window[c * CONFIG_T::pool_height * CONFIG_T::pool_width + f].read(); - } - if (res_T::size / CONFIG_T::n_filt == - 1) { // Saves resources if we don't pack output, compiler will remove the else branch - res_pack[c] = - reduce_pool( - pool_window); - } else { - res_pack[outputs_ready * CONFIG_T::n_filt + c] = - reduce_pool( - pool_window); - } - } - if (res_T::size / CONFIG_T::n_filt == - 1) { // Saves resources if we don't pack output, compiler will remove the else branch - res.write(res_pack); - } else { - if (outputs_ready == (res_T::size / CONFIG_T::n_filt) - 1) { - res.write(res_pack); - outputs_ready = 0; - } else { - outputs_ready++; - } - } - } - } -} - -template -void pooling2d_encoded_cl(hls::stream &data, hls::stream &res) { - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pool_height == CONFIG_T::stride_height && CONFIG_T::pool_width == CONFIG_T::stride_width); - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - unsigned outputs_ready = 0; - - hls::stream data_window[CONFIG_T::pool_height * CONFIG_T::pool_width * CONFIG_T::n_filt]; - constexpr int win_depth = CONFIG_T::pool_height * CONFIG_T::out_width; - for (unsigned i_out = 0; i_out < CONFIG_T::pool_height * CONFIG_T::pool_width * CONFIG_T::n_filt; i_out++) { - #pragma HLS STREAM variable=data_window[i_out] depth=win_depth - } - - constexpr int pack_factor = data_T::size / CONFIG_T::n_filt; - -ReadInputHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width / (pack_factor); i_iw++) { - #pragma HLS LOOP_FLATTEN - if (res_T::size / CONFIG_T::n_filt == 1) { - #pragma HLS PIPELINE II=pack_factor - } - compute_pool_encoded_2d(i_ih, i_iw, data.read(), data_window, res, res_pack, - outputs_ready); - } - } -} - -// ************************************************* -// Line Buffer Implementation (Phil's) -// ************************************************* -template -void compute_pool_buffer_2d(const data_T &in_elem, - ap_shift_reg - line_buffer[MAX(CONFIG_T::pool_height - 1, 1)][CONFIG_T::n_filt], - hls::stream &res) { - #pragma HLS INLINE - const static int lShiftX = CONFIG_T::pool_width - 1; - const static int lShiftY = CONFIG_T::pool_height - 1; - static int pX = 0; // pixel X - static int pY = 0; // pixel Y - static int sX = 0; // stride X - static int sY = 0; // stride Y - - typename CONFIG_T::accum_t pool_window[CONFIG_T::pool_height * CONFIG_T::pool_width]; - #pragma HLS ARRAY_PARTITION variable=pool_window complete - - static typename data_T::value_type kernel_data[CONFIG_T::pool_height * CONFIG_T::pool_width * CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable = kernel_data complete dim = 0 - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - - // Add pixel into line buffer, return pooling kernels - nnet::shift_line_buffer(in_elem, line_buffer, kernel_data); - - // Can compute pooling output - if ((sX - lShiftX) == 0 && (sY - lShiftY) == 0 && pY > lShiftY - 1 && pX > lShiftX - 1) { - FiltLoop: - for (unsigned i_ic = 0; i_ic < CONFIG_T::n_filt; i_ic++) { - #pragma HLS PIPELINE - - // Retrieve data for current channel - PoolLoop: - for (unsigned i_ihw = 0; i_ihw < CONFIG_T::pool_height * CONFIG_T::pool_width; i_ihw++) { - pool_window[i_ihw] = kernel_data[i_ihw * CONFIG_T::n_filt + i_ic]; - } - - // Compute Pooling - res_pack[i_ic] = - reduce_pool(pool_window); - } - - // Write to output - res.write(res_pack); - } - - // Counter Housekeeping - if (pX + 1 == CONFIG_T::in_width) // Includes padding, end of line (padded) - { - pX = 0; - sX = 0; - if (pY + 1 == CONFIG_T::in_height) { // Reached bottom of image - pY = 0; - sY = 0; - } else { // Next line - pY = pY + 1; - // Update stride (threshold) ? subtract stride : increment stride - sY = ((sY - lShiftY) == 0) ? sY - CONFIG_T::stride_height + 1 : sY + 1; - } - } else { - pX = pX + 1; - // Update stride (threshold) ? subtract stride : increment stride - sX = ((sX - lShiftX) == 0) ? sX - CONFIG_T::stride_width + 1 : sX + 1; - } -} - -template -void pooling2d_buffer_cl(hls::stream &data, hls::stream &res) { - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pool_height == CONFIG_T::stride_height && CONFIG_T::pool_width == CONFIG_T::stride_width); - - static ap_shift_reg line_buffer[MAX(CONFIG_T::pool_height - 1, 1)] - [CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable = line_buffer complete dim = 2 - -ReadInputHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS LOOP_FLATTEN - #pragma HLS PIPELINE - - compute_pool_buffer_2d(data.read(), line_buffer, res); - } - } -} - -template -void pooling2d_cl(hls::stream &data, hls::stream &res) { - #pragma HLS inline recursive - switch (CONFIG_T::implementation) { - case conv_implementation::linebuffer: - pooling2d_buffer_cl(data, res); - break; - case conv_implementation::encoded: - pooling2d_encoded_cl(data, res); - break; - } -} - -// ************************************************* -// Pooling 1D -// ************************************************* - -template -void compute_pool_encoded_1d(const unsigned w_idx, const data_T &in_elem, - hls::stream data_window[CONFIG_T::pool_width * CONFIG_T::n_filt], - hls::stream &res, res_T &res_pack, unsigned &outputs_ready) { - // Nearest W without unused pixels on the right - constexpr unsigned nW = - ((CONFIG_T::n_in - CONFIG_T::pool_width) / CONFIG_T::stride_width) * CONFIG_T::stride_width + CONFIG_T::pool_width; - // Scaled W that behaves like original W - constexpr unsigned sW = - (DIV_ROUNDUP(CONFIG_T::pool_width, CONFIG_T::stride_width) - 1) * CONFIG_T::stride_width + CONFIG_T::pool_width; - -#ifdef __SYNTHESIS__ - bool initialized = false; - unsigned pool_table_width[CONFIG_T::n_in]; -#else - static bool initialized = false; - static unsigned pool_table_width[CONFIG_T::n_in]; -#endif - if (!initialized) { - init_pool_table(pool_table_width); - initialized = true; - } - - #pragma HLS INLINE - - if (data_T::size / CONFIG_T::n_filt > 1) { - #pragma HLS ARRAY_PARTITION variable=pool_table_width complete - } - - typename CONFIG_T::accum_t pool_window[CONFIG_T::pool_width]; - #pragma HLS ARRAY_PARTITION variable=pool_window complete - - const unsigned wp_idx = w_idx * (data_T::size / CONFIG_T::n_filt); - -PixelLoop: - for (unsigned p = 0; p < data_T::size / CONFIG_T::n_filt; p++) { - #pragma HLS PIPELINE - - ap_uint filt_mask = 0; - if (wp_idx + p < nW) { - filt_mask = pool_table_width[wp_idx + p] + 1; - } - - CopyDataFilt: - for (unsigned c = 0; c < CONFIG_T::n_filt; c++) { - if (filt_mask > 0) - data_window[c * CONFIG_T::pool_width + filt_mask.to_uint() - 1].write(in_elem[p * CONFIG_T::n_filt + c]); - } - - if (filt_mask == CONFIG_T::pool_width) { - FiltLoop: - for (unsigned c = 0; c < CONFIG_T::n_filt; c++) { - PoolLoop: - for (unsigned f = 0; f < CONFIG_T::pool_width; f++) { - pool_window[f] = data_window[c * CONFIG_T::pool_width + f].read(); - } - if (res_T::size / CONFIG_T::n_filt == - 1) { // Saves resources if we don't pack output, compiler will remove the else branch - res_pack[c] = reduce_pool(pool_window); - } else { - res_pack[outputs_ready * CONFIG_T::n_filt + c] = - reduce_pool(pool_window); - } - } - if (res_T::size / CONFIG_T::n_filt == - 1) { // Saves resources if we don't pack output, compiler will remove the else branch - res.write(res_pack); - } else { - if (outputs_ready == (res_T::size / CONFIG_T::n_filt) - 1) { - res.write(res_pack); - outputs_ready = 0; - } else { - outputs_ready++; - } - } - } - } -} - -template -void pooling1d_encoded_cl(hls::stream &data, hls::stream &res) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pool_width == CONFIG_T::stride_width); - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - unsigned outputs_ready = 0; - - hls::stream data_window[CONFIG_T::pool_width * CONFIG_T::n_filt]; - constexpr int win_depth = CONFIG_T::n_out; - for (unsigned i_out = 0; i_out < CONFIG_T::pool_width * CONFIG_T::n_filt; i_out++) { - #pragma HLS STREAM variable=data_window[i_out] depth=win_depth - } - - constexpr int pack_factor = data_T::size / CONFIG_T::n_filt; - -ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::n_in / (pack_factor); i_iw++) { - #pragma HLS LOOP_FLATTEN - if (res_T::size / CONFIG_T::n_filt == 1) { - #pragma HLS PIPELINE II=pack_factor - } - compute_pool_encoded_1d(i_iw, data.read(), data_window, res, res_pack, outputs_ready); - } -} - -// ************************************************* -// Line Buffer Implementation (Phil's) 1D -// ************************************************* -template -void compute_pool_buffer_1d(const data_T &in_elem, hls::stream &res) { - #pragma HLS INLINE - const static int lShiftX = CONFIG_T::pool_width - 1; - // Counters - static int pX = 0; - static int sX = 0; - - typename CONFIG_T::accum_t pool_window[CONFIG_T::pool_width]; - #pragma HLS ARRAY_PARTITION variable=pool_window complete - - static typename data_T::value_type kernel_data[CONFIG_T::pool_width * CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable = kernel_data complete dim = 0 - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - - // Add pixel into line buffer, return pooling kernels - // 1D case line buffer not necessary. Put directly into the kernel_data buffer - nnet::kernel_shift_1d(in_elem, kernel_data); - - // Can compute pooling output - if ((sX - lShiftX) == 0 && pX > lShiftX - 1) { - FiltLoop: - for (unsigned i_ic = 0; i_ic < CONFIG_T::n_filt; i_ic++) { - #pragma HLS PIPELINE - - // Retrieve data for current channel - PoolLoop: - for (unsigned i_iw = 0; i_iw < CONFIG_T::pool_width; i_iw++) { - pool_window[i_iw] = kernel_data[i_iw * CONFIG_T::n_filt + i_ic]; - } - - // Compute Pooling - res_pack[i_ic] = reduce_pool(pool_window); - } - - // Write to output - res.write(res_pack); - } - - // Counter Housekeeping - if (pX + 1 == CONFIG_T::n_in) // Includes padding, end of line (padded) - { - pX = 0; - sX = 0; - } else { - pX = pX + 1; - // Update stride (threshold) ? subtract stride : increment stride - sX = ((sX - lShiftX) == 0) ? sX - CONFIG_T::stride_width + 1 : sX + 1; - } -} - -template -void pooling1d_buffer_cl(hls::stream &data, hls::stream &res) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - -ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::n_in; i_iw++) { - #pragma HLS LOOP_FLATTEN - #pragma HLS PIPELINE - compute_pool_buffer_1d(data.read(), res); - } -} - -template -void pooling1d_cl(hls::stream &data, hls::stream &res) { - #pragma HLS inline recursive - switch (CONFIG_T::implementation) { - case conv_implementation::linebuffer: - pooling1d_buffer_cl(data, res); - break; - case conv_implementation::encoded: - pooling1d_encoded_cl(data, res); - break; - } -} - -// ************************************************* -// Global max/average pooling -// ************************************************* - -template T reduce_global_pool(T x, T y[N]) { - #pragma HLS INLINE - if (CONFIG_T::pool_op == Max) { - Op_max op_max; - T y_max = reduce>(y, op_max); - return (x > y_max) ? x : y_max; - } else { - Op_add op_add; - T y_sum = reduce>(y, op_add); - return x + y_sum; - } -} - -template -void compute_global_pool(const data_T &in_elem, typename CONFIG_T::accum_t data_window[CONFIG_T::n_filt]) { -PoolFilt: - for (unsigned c = 0; c < CONFIG_T::n_filt; c++) { - #pragma HLS UNROLL - - typename CONFIG_T::accum_t data_pack[data_T::size / CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable=data_pack complete dim=0 - - PixelLoop: - for (unsigned p = 0; p < data_T::size / CONFIG_T::n_filt; p++) { - #pragma HLS UNROLL - data_pack[p] = in_elem[p * CONFIG_T::n_filt + c]; - } - data_window[c] = reduce_global_pool( - data_window[c], data_pack); - } -} - -template -void global_pooling2d_cl(hls::stream &data, hls::stream &res) { - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pool_height == CONFIG_T::stride_height && CONFIG_T::pool_width == CONFIG_T::stride_width); - - typename CONFIG_T::accum_t data_window[CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable=data_window complete - - typename CONFIG_T::accum_t init = 0; - if (CONFIG_T::pool_op == Max) { - init = hls::numeric_limits::min(); - } - -PoolInitLoop: - for (unsigned i_init = 0; i_init < CONFIG_T::n_filt; i_init++) { - #pragma HLS UNROLL - data_window[i_init] = init; - } - -ReadInputHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width / (data_T::size / CONFIG_T::n_filt); i_iw++) { - #pragma HLS LOOP_FLATTEN - compute_global_pool(data.read(), data_window); - } - } - - if (CONFIG_T::pool_op == Max) { - MaxPoolRes: - for (unsigned i_res = 0; i_res < CONFIG_T::n_filt / res_T::size; i_res++) { - #pragma HLS PIPELINE - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - MaxPoolPack: - for (unsigned i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = data_window[i_pack]; - } - res.write(res_pack); - } - } else { - AvgPoolRes: - for (unsigned i_res = 0; i_res < CONFIG_T::n_filt / res_T::size; i_res++) { - #pragma HLS PIPELINE - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - AvgPoolPack: - for (unsigned i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = data_window[i_pack] / (CONFIG_T::in_height * CONFIG_T::in_width); - } - res.write(res_pack); - } - } -} - -template -void global_pooling1d_cl(hls::stream &data, hls::stream &res) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::pool_width == CONFIG_T::stride_width); - - typename CONFIG_T::accum_t data_window[CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable=data_window complete - - typename CONFIG_T::accum_t init = 0; - if (CONFIG_T::pool_op == Max) { - init = hls::numeric_limits::min(); - } - -PoolInitLoop: - for (unsigned i_init = 0; i_init < CONFIG_T::n_filt; i_init++) { - #pragma HLS UNROLL - data_window[i_init] = init; - } - -ReadInput: - for (unsigned i_iw = 0; i_iw < CONFIG_T::n_in / (data_T::size / CONFIG_T::n_filt); i_iw++) { - #pragma HLS LOOP_FLATTEN - compute_global_pool(data.read(), data_window); - } - - if (CONFIG_T::pool_op == Max) { - MaxPoolRes: - for (unsigned i_res = 0; i_res < CONFIG_T::n_filt / res_T::size; i_res++) { - #pragma HLS PIPELINE - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - MaxPoolPack: - for (unsigned i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = data_window[i_pack]; - } - res.write(res_pack); - } - } else { - AvgPoolRes: - for (unsigned i_res = 0; i_res < CONFIG_T::n_filt / res_T::size; i_res++) { - #pragma HLS PIPELINE - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - AvgPoolPack: - for (unsigned i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = data_window[i_pack] / CONFIG_T::n_in; - } - res.write(res_pack); - } - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_recr_activations.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_recr_activations.h deleted file mode 100644 index f68d806..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_recr_activations.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef NNET_RECR_ACTIVATION_H_ -#define NNET_RECR_ACTIVATION_H_ - -#include "hls_stream.h" -#include "nnet_activation.h" -#include "nnet_common.h" -#include "nnet_helpers.h" -#include - -namespace nnet { - -namespace activation { - -template class Activation { - public: - // ************************************************* - // Blank Activation - // ************************************************* - static void activation(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) {} // Nothing to do here -}; - -template class relu : public Activation { - public: - // ************************************************* - // Relu Activation - // ************************************************* - static void activation(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - nnet::relu(data, res); - } -}; - -template class sigmoid : public Activation { - public: - // ************************************************* - // Sigmoid Activation - // ************************************************* - static void activation(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - nnet::sigmoid(data, res); - } -}; - -template class tanh : public Activation { - public: - // ************************************************* - // TanH Activation - // ************************************************* - static void activation(data_T data[CONFIG_T::n_in], res_T res[CONFIG_T::n_in]) { - nnet::tanh(data, res); - } -}; - -} // namespace activation - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_recurrent.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_recurrent.h deleted file mode 100644 index 6e86814..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_recurrent.h +++ /dev/null @@ -1,571 +0,0 @@ -#ifndef NNET_RECURSIVE_H_ -#define NNET_RECURSIVE_H_ - -#include "hls_stream.h" -#include "nnet_activation.h" -#include "nnet_common.h" -#include "nnet_dense.h" -#include "nnet_recr_activations.h" - -namespace nnet { - -struct lstm_config { - // Internal data type definitions - typedef float weight_t; - typedef float bias_t; - - // Layer Sizes - static const unsigned n_in = 2; - static const unsigned n_parts = 20; - static const unsigned n_out = 2; - static const unsigned n_state = 2; - static const unsigned n_4state = 8; - static const unsigned table_size = 1024; - - // Resource reuse info - static const unsigned io_type = io_parallel; - static const unsigned reuse_factor = 1; - static const unsigned n_zeros = 0; - static const bool store_weights_in_bram = false; - static const bool use_static = true; - - template using activation_recr = nnet::activation::relu; - template using activation = nnet::activation::relu; -}; -// Long Short term Memory NN (LSTM) -// Resources: -// https://github.com/nicodjimenez/lstm/blob/master/lstm.py -// https://github.com/llSourcell/LSTM_Networks/blob/master/LSTM%20Demo.ipynb -// https://en.wikipedia.org/wiki/Long_short-term_memory -// Notes: -// - LSTM naming conventions adopted from the above links -// - s_newstate = activation(U*input + W*state) -// - h_output = activation(U*input + W*state)*activation(s_newstate) -// - If softmax is needed on output, perform *outside* this operations -// Originall had a version allows for the state in each layer to be saved, moved this to above (this requires are LARGE -// dense network at the end) -template -void lstm(bool reset_state, data_T data[CONFIG_T::n_in], res_T h_newstate[CONFIG_T::n_state], - res_T s_newstate[CONFIG_T::n_state], typename CONFIG_T::weight_t param[CONFIG_T::n_state * 4 * CONFIG_T::n_in], - typename CONFIG_T::weight_t param_r[CONFIG_T::n_state * 4 * CONFIG_T::n_state], - typename CONFIG_T::bias_t param_b[CONFIG_T::n_state * 4], - typename CONFIG_T::bias_t param_br[CONFIG_T::n_state * 4]) { - // Initialize the state variable -- will maintain state between function calls - - typename CONFIG_T::accum_t tmpres[CONFIG_T::n_state * 4]; - typename CONFIG_T::accum_t tmpres_state[CONFIG_T::n_state * 4]; - typename CONFIG_T::accum_t tmpres_ifo[CONFIG_T::n_state * 3]; // activated i,f,o matrices (keras notation) - typename CONFIG_T::accum_t tmpres_c[CONFIG_T::n_state]; // activated c-matrix (keras notation) - typename CONFIG_T::accum_t inputacc_ifo[CONFIG_T::n_state * 3]; // i,f,o matrices (keras notation) - typename CONFIG_T::accum_t inputacc_c[CONFIG_T::n_state]; // c-matrix (keras notation) - typename CONFIG_T::accum_t s_actstate[CONFIG_T::n_state]; - - #pragma HLS ARRAY_PARTITION variable=h_newstate complete - #pragma HLS ARRAY_PARTITION variable=s_newstate complete - #pragma HLS ARRAY_PARTITION variable=tmpres complete - #pragma HLS ARRAY_PARTITION variable=tmpres_state complete - #pragma HLS ARRAY_PARTITION variable=tmpres_ifo complete - #pragma HLS ARRAY_PARTITION variable=tmpres_c complete - #pragma HLS ARRAY_PARTITION variable=inputacc_ifo complete - #pragma HLS ARRAY_PARTITION variable=inputacc_c complete - #pragma HLS ARRAY_PARTITION variable=s_actstate complete - - nnet::dense(data, tmpres, param, param_b); - nnet::dense(h_newstate, tmpres_state, param_r, param_br); - - for (int iacc = 0; iacc < (3 * CONFIG_T::n_state); iacc++) { - #pragma HLS UNROLL - int index = iacc; - if (iacc > 2 * CONFIG_T::n_state - 1) - index = iacc + CONFIG_T::n_state; - inputacc_ifo[iacc] = tmpres[index] + tmpres_state[index]; - } - for (int iacc = 0; iacc < (CONFIG_T::n_state); iacc++) { - #pragma HLS UNROLL - int index = iacc + CONFIG_T::n_state * 2; - inputacc_c[iacc] = tmpres[index] + tmpres_state[index]; - } - - CONFIG_T::template activation_recr::activation( - inputacc_ifo, tmpres_ifo); - - // Now for the confusion matrix - CONFIG_T::template activation::activation( - inputacc_c, tmpres_c); - - // Operation: s=g*i+sold*f (update state with buffer to avoid timing issues) - for (int iacc = 0; iacc < (CONFIG_T::n_state); iacc++) { - #pragma HLS UNROLL - s_newstate[iacc] = tmpres_c[iacc] * tmpres_ifo[iacc] + s_newstate[iacc] * tmpres_ifo[iacc + (CONFIG_T::n_state)]; - } - // Operation: h=act(s)*o - CONFIG_T::template activation::activation( - s_newstate, s_actstate); - - for (int iacc = 0; iacc < CONFIG_T::n_state; iacc++) { - #pragma HLS UNROLL - h_newstate[iacc] = tmpres_ifo[iacc + 2 * (CONFIG_T::n_state)] * s_actstate[iacc]; - } -} - -template -void lstm_static(bool reset_state, data_T data[CONFIG_T::n_in], res_T h_newstate[CONFIG_T::n_state], - res_T s_newstate[CONFIG_T::n_state], - typename CONFIG_T::weight_t param[CONFIG_T::n_state * 4 * CONFIG_T::n_in], - typename CONFIG_T::weight_t param_r[CONFIG_T::n_state * 4 * CONFIG_T::n_state], - typename CONFIG_T::bias_t param_b[CONFIG_T::n_state * 4], - typename CONFIG_T::bias_t param_br[CONFIG_T::n_state * 4]) { - static res_T h_state[CONFIG_T::n_state]; - static res_T s_state[CONFIG_T::n_state]; - // Initialize the state variable -- will maintain state between function calls - typename CONFIG_T::accum_t tmpres[CONFIG_T::n_state * 4]; - typename CONFIG_T::accum_t tmpres_state[CONFIG_T::n_state * 4]; - typename CONFIG_T::accum_t tmpres_ifo[CONFIG_T::n_state * 3]; // activated i,f,o matrices (keras notation) - typename CONFIG_T::accum_t tmpres_c[CONFIG_T::n_state]; // activated c-matrix (keras notation) - typename CONFIG_T::accum_t inputacc_ifo[CONFIG_T::n_state * 3]; // i,f,o matrices (keras notation) - typename CONFIG_T::accum_t inputacc_c[CONFIG_T::n_state]; // c-matrix (keras notation) - typename CONFIG_T::accum_t s_actstate[CONFIG_T::n_state]; - - #pragma HLS ARRAY_PARTITION variable=h_newstate complete - #pragma HLS ARRAY_PARTITION variable=s_newstate complete - #pragma HLS ARRAY_PARTITION variable=h_state complete - #pragma HLS ARRAY_PARTITION variable=s_state complete - #pragma HLS ARRAY_PARTITION variable=tmpres complete - #pragma HLS ARRAY_PARTITION variable=tmpres_state complete - #pragma HLS ARRAY_PARTITION variable=tmpres_ifo complete - #pragma HLS ARRAY_PARTITION variable=tmpres_c complete - #pragma HLS ARRAY_PARTITION variable=inputacc_ifo complete - #pragma HLS ARRAY_PARTITION variable=inputacc_c complete - #pragma HLS ARRAY_PARTITION variable=s_actstate complete - - if (reset_state) { - for (int i_state = 0; i_state < (CONFIG_T::n_state); i_state++) { - #pragma HLS UNROLL - s_state[i_state] = 0; - h_state[i_state] = 0; - } - } - - nnet::dense(data, tmpres, param, param_b); - nnet::dense(h_state, tmpres_state, param_r, - param_br); - - for (int iacc = 0; iacc < (3 * CONFIG_T::n_state); iacc++) { - #pragma HLS UNROLL - int index = iacc; - if (iacc > 2 * CONFIG_T::n_state - 1) - index = iacc + CONFIG_T::n_state; - inputacc_ifo[iacc] = tmpres[index] + tmpres_state[index]; - } - for (int iacc = 0; iacc < (CONFIG_T::n_state); iacc++) { - #pragma HLS UNROLL - int index = iacc + CONFIG_T::n_state * 2; - inputacc_c[iacc] = tmpres[index] + tmpres_state[index]; - } - - CONFIG_T::template activation_recr::activation( - inputacc_ifo, tmpres_ifo); - - // Now for the confusion matrix - CONFIG_T::template activation::activation( - inputacc_c, tmpres_c); - - // Operation: s=g*i+sold*f (update state with buffer to avoid timing issues) - for (int iacc = 0; iacc < (CONFIG_T::n_state); iacc++) { - #pragma HLS UNROLL - s_state[iacc] = tmpres_c[iacc] * tmpres_ifo[iacc] + s_state[iacc] * tmpres_ifo[iacc + (CONFIG_T::n_state)]; - s_newstate[iacc] = s_state[iacc]; - } - // Operation: h=act(s)*o - CONFIG_T::template activation::activation( - s_state, s_actstate); - - for (int iacc = 0; iacc < CONFIG_T::n_state; iacc++) { - #pragma HLS UNROLL - h_state[iacc] = tmpres_ifo[iacc + 2 * (CONFIG_T::n_state)] * s_actstate[iacc]; - h_newstate[iacc] = h_state[iacc]; - } -} - -template -void lstm_stack(data_T data[CONFIG_T::n_sequence * CONFIG_T::n_in], res_T res[CONFIG_T::n_sequence_out * CONFIG_T::n_state], - typename CONFIG_T::weight_t param[CONFIG_T::n_state * 4 * CONFIG_T::n_in], - typename CONFIG_T::weight_t param_r[CONFIG_T::n_state * 4 * CONFIG_T::n_state], - typename CONFIG_T::bias_t param_b[CONFIG_T::n_state * 4], - typename CONFIG_T::bias_t param_br[CONFIG_T::n_state * 4]) { - - res_T h_newstate[CONFIG_T::n_state]; - res_T s_newstate[CONFIG_T::n_state]; - data_T data_in[CONFIG_T::n_in]; - bool reset_state = true; - - #pragma HLS ARRAY_PARTITION variable=h_newstate complete - #pragma HLS ARRAY_PARTITION variable=s_newstate complete - - for (int ii = 0; ii < CONFIG_T::n_state; ii++) { - #pragma HLS UNROLL - h_newstate[ii] = 0; - s_newstate[ii] = 0; - } - for (int iloop = 0; iloop < CONFIG_T::n_sequence; iloop++) { - for (int j = 0; j < CONFIG_T::n_in; j++) { - #pragma HLS UNROLL - data_in[j] = data[j + iloop * CONFIG_T::n_in]; - } - if (CONFIG_T::use_static) - nnet::lstm_static(reset_state, data_in, h_newstate, s_newstate, param, param_r, param_b, - param_br); - else - nnet::lstm(reset_state, data_in, h_newstate, s_newstate, param, param_r, param_b, - param_br); - if (CONFIG_T::n_sequence_out > 1) - for (int i = CONFIG_T::n_state * iloop, j = 0; i < (CONFIG_T::n_state * (iloop + 1)); i++, j++) { - #pragma HLS UNROLL - res[i] = h_newstate[j]; - } - reset_state = false; - } - if (CONFIG_T::n_sequence_out == 1) - for (int i = 0; i < (CONFIG_T::n_state); i++) { - #pragma HLS UNROLL - res[i] = h_newstate[i]; - } -} - -template -void lstm_stack(hls::stream &data_stream, hls::stream &res_stream, - typename CONFIG_T::weight_t param[CONFIG_T::n_state * 4 * CONFIG_T::n_in], - typename CONFIG_T::weight_t param_r[CONFIG_T::n_state * 4 * CONFIG_T::n_state], - typename CONFIG_T::bias_t param_b[CONFIG_T::n_state * 4], - typename CONFIG_T::bias_t param_br[CONFIG_T::n_state * 4]) { - - typename res_T::value_type h_newstate[CONFIG_T::n_state]; - typename res_T::value_type s_newstate[CONFIG_T::n_state]; - #pragma HLS ARRAY_PARTITION variable=h_newstate complete - #pragma HLS ARRAY_PARTITION variable=s_newstate complete - - for (int ii = 0; ii < CONFIG_T::n_state; ii++) { - #pragma HLS UNROLL - h_newstate[ii] = 0; - s_newstate[ii] = 0; - } - - typename data_T::value_type data_in[CONFIG_T::n_in]; - bool reset_state = true; - -DataPropagation: - for (int i_in = 0; i_in < CONFIG_T::n_sequence * CONFIG_T::n_in / data_T::size; i_in++) { - if (CONFIG_T::n_sequence * CONFIG_T::n_in / data_T::size > 1) { - // #pragma HLS PIPELINE - } - data_T data_pack = data_stream.read(); - DataPack: - for (int i_pack = 0; i_pack < data_T::size; i_pack++) { - #pragma HLS UNROLL - data_in[i_pack] = data_pack[i_pack]; - } - if (CONFIG_T::use_static) - nnet::lstm_static( - reset_state, data_in, h_newstate, s_newstate, param, param_r, param_b, param_br); - else - nnet::lstm( - reset_state, data_in, h_newstate, s_newstate, param, param_r, param_b, param_br); - if (CONFIG_T::n_sequence_out > 1) { - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - ResPack_sequences: - for (int i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = h_newstate[i_pack]; - } - res_stream.write(res_pack); - } - reset_state = false; - } - - if (CONFIG_T::n_sequence_out == 1) { - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - ResPack: - for (int i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = h_newstate[i_pack]; - } - res_stream.write(res_pack); - } -} - -// Struct for the GRU template - -struct gru_config { - // Internal data type definitions - typedef float weight_t; - typedef float bias_t; - typedef float accum_t; - - // Layer Sizes - static const unsigned n_in = 2; - static const unsigned n_out = 2; - static const unsigned n_state = 2; - static const unsigned n_sequence = 2; - static const unsigned n_4state = 8; - static const unsigned table_size = 1024; - - // Resource reuse info - static const unsigned io_type = io_parallel; - static const unsigned reuse_factor = 1; - static const bool store_weights_in_bram = false; - static const bool use_static = true; - static const unsigned n_zeros = 0; - - template using activation_recr = nnet::activation::relu; - template using activation = nnet::activation::relu; -}; - -template -void gru(bool reset_state, data_T data[CONFIG_T::n_in], res_T h_newstate[CONFIG_T::n_state], - typename CONFIG_T::weight_t param[CONFIG_T::n_state * 3 * CONFIG_T::n_in], // TODO - Check the layout of the param - // weights - refer page in copy!! - typename CONFIG_T::weight_t param_zr[CONFIG_T::n_state * 3 * CONFIG_T::n_state], - typename CONFIG_T::bias_t param_b[CONFIG_T::n_state * 3], - typename CONFIG_T::bias_t param_br[CONFIG_T::n_state * 3]) { - // Initialize the state variable -- will maintain state between function calls - typename CONFIG_T::accum_t tmpres[CONFIG_T::n_state * 3]; - typename CONFIG_T::accum_t tmpres_state_zr[CONFIG_T::n_state * 3]; - typename CONFIG_T::accum_t tmpres_state_h[CONFIG_T::n_state]; - typename CONFIG_T::accum_t tmpres_zr[CONFIG_T::n_state * 2]; // activated i,f,o matrices (keras notation) - typename CONFIG_T::accum_t tmpres_h[CONFIG_T::n_state]; // activated c-matrix (keras notation) - typename CONFIG_T::accum_t inputacc_zr[CONFIG_T::n_state * 2]; // i,f,o matrices (keras notation) - typename CONFIG_T::accum_t inputacc_h[CONFIG_T::n_state]; // c-matrix (keras notation) - - #pragma HLS ARRAY_PARTITION variable=h_newstate complete - #pragma HLS ARRAY_PARTITION variable=tmpres complete - #pragma HLS ARRAY_PARTITION variable=tmpres_state_zr complete - #pragma HLS ARRAY_PARTITION variable=tmpres_state_h complete - #pragma HLS ARRAY_PARTITION variable=tmpres_zr complete - #pragma HLS ARRAY_PARTITION variable=tmpres_h complete - #pragma HLS ARRAY_PARTITION variable=inputacc_zr complete - #pragma HLS ARRAY_PARTITION variable=inputacc_h complete - - nnet::dense(data, tmpres, param, param_b); - nnet::dense(h_newstate, tmpres_state_zr, param_zr, - param_br); - - // Adding the individual vectors from the multiplication of tmpres = Wx*x(t); tmpres_state_zr = Wh*h(t-1); tmpres - // initialized with biases -- DONE - for (int iacc = 0; iacc < (2 * CONFIG_T::n_state); iacc++) { - #pragma HLS UNROLL - int index = iacc; - inputacc_zr[iacc] = tmpres[index] + tmpres_state_zr[index]; - } - - // Activation function Sub layer -- START - CONFIG_T::template activation_recr::activation(inputacc_zr, tmpres_zr); - - // Activation function Sub layer -- END - - // Hadamrd product of r(t) = inputacc_zr[2*n_state:n_state] and h(t-1) = h_newstate - for (int iacc = 0; iacc < (CONFIG_T::n_state); iacc++) { - #pragma HLS UNROLL - tmpres_state_h[iacc] = tmpres_zr[iacc + (CONFIG_T::n_state)] * tmpres_state_zr[iacc + (2 * CONFIG_T::n_state)]; - } - - // Assuming reset_after is false - for (int iacc = 0; iacc < (CONFIG_T::n_state); iacc++) { - #pragma HLS UNROLL - int index = iacc + CONFIG_T::n_state * 2; - inputacc_h[iacc] = tmpres[index] + tmpres_state_h[iacc]; - } - - // Now run the activation on this guy - CONFIG_T::template activation::activation(inputacc_h, tmpres_h); - - // Mix the stat with the previous state - for (int iacc = 0; iacc < (CONFIG_T::n_state); iacc++) { - #pragma HLS UNROLL - h_newstate[iacc] = (res_T)(tmpres_h[iacc] * (1 - tmpres_zr[iacc]) + h_newstate[iacc] * tmpres_zr[iacc]); - } -} - -template -void gru_static(bool reset_state, data_T data[CONFIG_T::n_in], res_T h_newstate[CONFIG_T::n_state], - typename CONFIG_T::weight_t param[CONFIG_T::n_state * 3 * CONFIG_T::n_in], - typename CONFIG_T::weight_t param_zr[CONFIG_T::n_state * 3 * CONFIG_T::n_state], - typename CONFIG_T::bias_t param_b[CONFIG_T::n_state * 3], - typename CONFIG_T::bias_t param_br[CONFIG_T::n_state * 3]) { - // Initialize the state variable -- will maintain state between function calls - - static res_T h_state[CONFIG_T::n_state]; - typename CONFIG_T::accum_t tmpres[CONFIG_T::n_state * 3]; - typename CONFIG_T::accum_t tmpres_state_zr[CONFIG_T::n_state * 3]; - typename CONFIG_T::accum_t tmpres_state_h[CONFIG_T::n_state]; - typename CONFIG_T::accum_t tmpres_zr[CONFIG_T::n_state * 2]; // activated i,f,o matrices (keras notation) - typename CONFIG_T::accum_t tmpres_h[CONFIG_T::n_state]; // activated c-matrix (keras notation) - typename CONFIG_T::accum_t inputacc_zr[CONFIG_T::n_state * 2]; // i,f,o matrices (keras notation) - typename CONFIG_T::accum_t inputacc_h[CONFIG_T::n_state]; // c-matrix (keras notation) - - #pragma HLS ARRAY_PARTITION variable=h_state complete - #pragma HLS ARRAY_PARTITION variable=h_newstate complete - #pragma HLS ARRAY_PARTITION variable=tmpres complete - #pragma HLS ARRAY_PARTITION variable=tmpres_state_zr complete - #pragma HLS ARRAY_PARTITION variable=tmpres_state_h complete - #pragma HLS ARRAY_PARTITION variable=tmpres_zr complete - #pragma HLS ARRAY_PARTITION variable=tmpres_h complete - #pragma HLS ARRAY_PARTITION variable=inputacc_zr complete - #pragma HLS ARRAY_PARTITION variable=inputacc_h complete - - if (reset_state) { - for (int i_h_state = 0; i_h_state < (CONFIG_T::n_state); i_h_state++) { - #pragma HLS UNROLL - h_state[i_h_state] = 0; - } - } - - nnet::dense(data, tmpres, param, param_b); - nnet::dense(h_state, tmpres_state_zr, param_zr, - param_br); - - // Adding the individual vectors from the multiplication of tmpres = Wx*x(t); tmpres_state_zr = Wh*h(t-1); tmpres - // initialized with biases -- DONE - for (int iacc = 0; iacc < (2 * CONFIG_T::n_state); iacc++) { - #pragma HLS UNROLL - int index = iacc; - inputacc_zr[iacc] = tmpres[index] + tmpres_state_zr[index]; - } - - // Activation function Sub layer -- START - CONFIG_T::template activation_recr::activation(inputacc_zr, tmpres_zr); - - // Activation function Sub layer -- END - - // Hadamrd product of r(t) = inputacc_zr[2*n_state:n_state] and h(t-1) = h_newstate - for (int iacc = 0; iacc < (CONFIG_T::n_state); iacc++) { - #pragma HLS UNROLL - tmpres_state_h[iacc] = tmpres_zr[iacc + (CONFIG_T::n_state)] * tmpres_state_zr[iacc + (2 * CONFIG_T::n_state)]; - } - - // Assuming reset_after is false - for (int iacc = 0; iacc < (CONFIG_T::n_state); iacc++) { - #pragma HLS UNROLL - int index = iacc + CONFIG_T::n_state * 2; - inputacc_h[iacc] = tmpres[index] + tmpres_state_h[iacc]; - } - - // Now run the activation on this guy - CONFIG_T::template activation::activation(inputacc_h, tmpres_h); - - // Mix the stat with the previous state - for (int iacc = 0; iacc < (CONFIG_T::n_state); iacc++) { - #pragma HLS UNROLL - h_state[iacc] = (res_T)(tmpres_h[iacc] * (1 - tmpres_zr[iacc]) + h_state[iacc] * tmpres_zr[iacc]); - h_newstate[iacc] = h_state[iacc]; - } -} - -template -void gru_stack(data_T data[CONFIG_T::n_sequence * CONFIG_T::n_in], res_T res[CONFIG_T::n_sequence_out * CONFIG_T::n_state], - typename CONFIG_T::weight_t param[CONFIG_T::n_state * 3 * CONFIG_T::n_in], - typename CONFIG_T::weight_t param_zr[CONFIG_T::n_state * 3 * CONFIG_T::n_state], - typename CONFIG_T::bias_t param_b[CONFIG_T::n_state * 3], - typename CONFIG_T::bias_t param_br[CONFIG_T::n_state * 3]) { - - res_T h_state[CONFIG_T::n_state]; - data_T data_in[CONFIG_T::n_in]; - bool reset_state = true; - - #pragma HLS ARRAY_PARTITION variable=h_state complete - #pragma HLS ARRAY_PARTITION variable=data_in complete - - for (int ii = 0; ii < CONFIG_T::n_state; ii++) { - #pragma HLS UNROLL - h_state[ii] = 0; - } - for (int iloop = 0; iloop < CONFIG_T::n_sequence; iloop++) { - for (int j = 0; j < CONFIG_T::n_in; j++) { - #pragma HLS UNROLL - data_in[j] = data[j + iloop * CONFIG_T::n_in]; - } - if (CONFIG_T::use_static) - nnet::gru_static(reset_state, data_in, h_state, param, param_zr, param_b, param_br); - else - nnet::gru(reset_state, data_in, h_state, param, param_zr, param_b, param_br); - if (CONFIG_T::n_sequence_out > 1) - for (int i = CONFIG_T::n_state * iloop, j = 0; i < (CONFIG_T::n_state * (iloop + 1)); i++, j++) { - #pragma HLS UNROLL - res[i] = h_state[j]; - } - reset_state = false; - } - if (CONFIG_T::n_sequence_out == 1) - for (int i = 0; i < (CONFIG_T::n_state); i++) { - #pragma HLS UNROLL - res[i] = h_state[i]; - } -} - -template -void gru_stack(hls::stream &data_stream, hls::stream &res_stream, - typename CONFIG_T::weight_t param[CONFIG_T::n_state * 3 * CONFIG_T::n_in], - typename CONFIG_T::weight_t param_zr[CONFIG_T::n_state * 3 * CONFIG_T::n_state], - typename CONFIG_T::bias_t param_b[CONFIG_T::n_state * 3], - typename CONFIG_T::bias_t param_br[CONFIG_T::n_state * 3]) { - - typename res_T::value_type h_newstate[CONFIG_T::n_state]; - #pragma HLS ARRAY_PARTITION variable=h_newstate complete - for (int ii = 0; ii < CONFIG_T::n_state; ii++) { - #pragma HLS UNROLL - h_newstate[ii] = 0; - } - - typename data_T::value_type data_in[CONFIG_T::n_in]; - bool reset_state = true; - -DataPropagation: - for (int i_in = 0; i_in < CONFIG_T::n_sequence * CONFIG_T::n_in / data_T::size; i_in++) { - if (CONFIG_T::n_sequence * CONFIG_T::n_in / data_T::size > 1) { - // #pragma HLS PIPELINE - } - data_T data_pack = data_stream.read(); - DataPack: - for (int i_pack = 0; i_pack < data_T::size; i_pack++) { - #pragma HLS UNROLL - data_in[i_pack] = data_pack[i_pack]; - } - if (CONFIG_T::use_static) - nnet::gru_static( - reset_state, data_in, h_newstate, param, param_zr, param_b, param_br); - else - nnet::gru(reset_state, data_in, h_newstate, - param, param_zr, param_b, param_br); - if (CONFIG_T::n_sequence_out > 1) { - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - ResPack_sequences: - for (int i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = h_newstate[i_pack]; - } - res_stream.write(res_pack); - } - reset_state = false; - } - - if (CONFIG_T::n_sequence_out == 1) { - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - ResPack: - for (int i_pack = 0; i_pack < res_T::size; i_pack++) { - #pragma HLS UNROLL - res_pack[i_pack] = h_newstate[i_pack]; - } - res_stream.write(res_pack); - } -} - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_sepconv1d_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_sepconv1d_stream.h deleted file mode 100644 index 254fc50..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_sepconv1d_stream.h +++ /dev/null @@ -1,119 +0,0 @@ -#ifndef NNET_SEPARABLE_CONV1D_STREAM_H_ -#define NNET_SEPARABLE_CONV1D_STREAM_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_conv1d_stream.h" -#include "nnet_sepconv_stream.h" - -namespace nnet { - -template -void depthwise_conv_1d_encoded_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - - hls::stream data_window[CONFIG_T::filt_width * CONFIG_T::n_chan]; - const int win_depth = CONFIG_T::out_width; - for (unsigned i_out = 0; i_out < CONFIG_T::filt_width * CONFIG_T::n_chan; i_out++) { - #pragma HLS STREAM variable=data_window[i_out] depth=win_depth - } - - #pragma HLS ARRAY_PARTITION variable=CONFIG_T::pixels complete - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - unsigned outputs_ready = 0; - - ap_uint pixel_idx[data_T::size / CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable=pixel_idx complete - -ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width / (data_T::size / CONFIG_T::n_chan); i_iw++) { - #pragma HLS LOOP_FLATTEN - if (CONFIG_T::strategy == nnet::latency && data_T::size / CONFIG_T::n_chan == 1) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - } - compute_scaled_indices_1d(i_iw, pixel_idx); - compute_depthwise_output_encoded(data.read(), data_window, res, res_pack, outputs_ready, - weights, biases, pixel_idx); - } -} - -template -void depthwise_conv_1d_buffer_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - -ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS LOOP_FLATTEN - if (CONFIG_T::strategy == nnet::latency) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - } - compute_depthwise_output_buffer_1d(data.read(), res, weights, biases); - } -} - -template -void depthwise_conv_1d_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_width * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) { - #pragma HLS inline recursive - switch (CONFIG_T::implementation) { - case conv_implementation::linebuffer: - depthwise_conv_1d_buffer_cl(data, res, weights, biases); - break; - case conv_implementation::encoded: - depthwise_conv_1d_encoded_cl(data, res, weights, biases); - break; - } -} - -template -void pointwise_conv_1d_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::filt_width == 1); - - #pragma HLS ARRAY_PARTITION variable=weights complete - #pragma HLS ARRAY_PARTITION variable=biases complete - -ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width / (data_T::size / CONFIG_T::n_chan); i_iw++) { - if (CONFIG_T::strategy == nnet::latency && data_T::size / CONFIG_T::n_chan == 1) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - } - if (i_iw % CONFIG_T::stride_width == 0) { - pointwise_mult_buffer(data.read(), res, weights, biases); - } else { - data.read(); - } - } -} - -template -void separable_conv_1d_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::depthwise_config::weight_t - depthwise_weights[CONFIG_T::depthwise_config::filt_width * CONFIG_T::depthwise_config::n_chan], - typename CONFIG_T::pointwise_config::weight_t - pointwise_weights[CONFIG_T::pointwise_config::n_chan * CONFIG_T::pointwise_config::n_filt], - typename CONFIG_T::depthwise_config::bias_t depthwise_biases[CONFIG_T::depthwise_config::n_chan], - typename CONFIG_T::pointwise_config::bias_t pointwise_biases[CONFIG_T::pointwise_config::n_filt]) { - #pragma HLS DATAFLOW - - hls::stream depthwise_res; - unsigned res_depth = CONFIG_T::depthwise_config::out_width; - #pragma HLS STREAM variable=depthwise_res depth=res_depth - - depthwise_conv_1d_cl(data, depthwise_res, depthwise_weights, - depthwise_biases); - pointwise_conv_1d_cl(depthwise_res, res, pointwise_weights, - pointwise_biases); -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_sepconv2d_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_sepconv2d_stream.h deleted file mode 100644 index d56ed6d..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_sepconv2d_stream.h +++ /dev/null @@ -1,143 +0,0 @@ -#ifndef NNET_SEPARABLE_CONV2D_STREAM_H_ -#define NNET_SEPARABLE_CONV2D_STREAM_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_conv2d_stream.h" -#include "nnet_sepconv_stream.h" -#include "nnet_types.h" - -namespace nnet { - -template -void depthwise_conv_2d_encoded_cl( - hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) { - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::filt_height == CONFIG_T::filt_width); - - hls::stream data_window[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan]; - const int win_depth = CONFIG_T::filt_height * CONFIG_T::out_width; - for (unsigned i_out = 0; i_out < CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan; i_out++) { - #pragma HLS STREAM variable=data_window[i_out] depth=win_depth - } - - #pragma HLS ARRAY_PARTITION variable=CONFIG_T::pixels complete - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - unsigned outputs_ready = 0; - - ap_uint pixel_idx[data_T::size / CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable=pixel_idx complete - -ReadInputHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width / (data_T::size / CONFIG_T::n_chan); i_iw++) { - #pragma HLS LOOP_FLATTEN - if (CONFIG_T::strategy == nnet::latency && data_T::size / CONFIG_T::n_chan == 1) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - } - compute_scaled_indices_2d(i_ih, i_iw, pixel_idx); - compute_depthwise_output_encoded(data.read(), data_window, res, res_pack, outputs_ready, - weights, biases, pixel_idx); - } - } -} - -// Line Buffer Implementation (Phil's) -template -void depthwise_conv_2d_buffer_cl( - hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) { - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - - static ap_shift_reg line_buffer[CONFIG_T::filt_height - 1] - [CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable = line_buffer complete dim = 2 - -ReadInputHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width; i_iw++) { - #pragma HLS LOOP_FLATTEN - if (CONFIG_T::strategy == nnet::latency) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - } - if (CONFIG_T::filt_height > 1) { - compute_depthwise_output_buffer_2d(data.read(), line_buffer, res, weights, biases); - } else { - compute_depthwise_output_buffer_1d(data.read(), res, weights, biases); - } - } - } -} - -template -void depthwise_conv_2d_cl( - hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) { - #pragma HLS inline recursive - switch (CONFIG_T::implementation) { - case conv_implementation::linebuffer: - depthwise_conv_2d_buffer_cl(data, res, weights, biases); - break; - case conv_implementation::encoded: - depthwise_conv_2d_encoded_cl(data, res, weights, biases); - break; - } -} - -template -void pointwise_conv_2d_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - assert(CONFIG_T::pad_top == 0 && CONFIG_T::pad_bottom == 0 && CONFIG_T::pad_left == 0 && CONFIG_T::pad_right == 0); - assert(CONFIG_T::filt_height == 1 && CONFIG_T::filt_width == 1); - - #pragma HLS ARRAY_PARTITION variable=weights complete - #pragma HLS ARRAY_PARTITION variable=biases complete - -ReadInputHeight: - for (unsigned i_ih = 0; i_ih < CONFIG_T::in_height; i_ih++) { - ReadInputWidth: - for (unsigned i_iw = 0; i_iw < CONFIG_T::in_width / (data_T::size / CONFIG_T::n_chan); i_iw++) { - if (CONFIG_T::strategy == nnet::latency && data_T::size / CONFIG_T::n_chan == 1) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - } - if (i_ih % CONFIG_T::stride_height == 0 && i_iw % CONFIG_T::stride_width == 0) { - pointwise_mult_buffer(data.read(), res, weights, biases); - } else { - data.read(); - } - } - } -} - -template -void separable_conv_2d_cl(hls::stream &data, hls::stream &res, - typename CONFIG_T::depthwise_config::weight_t - depthwise_weights[CONFIG_T::depthwise_config::filt_height * - CONFIG_T::depthwise_config::filt_width * CONFIG_T::depthwise_config::n_chan], - typename CONFIG_T::pointwise_config::weight_t - pointwise_weights[CONFIG_T::pointwise_config::n_chan * CONFIG_T::pointwise_config::n_filt], - typename CONFIG_T::depthwise_config::bias_t depthwise_biases[CONFIG_T::depthwise_config::n_chan], - typename CONFIG_T::pointwise_config::bias_t pointwise_biases[CONFIG_T::pointwise_config::n_filt]) { - #pragma HLS DATAFLOW - - hls::stream depthwise_res; - unsigned res_depth = CONFIG_T::depthwise_config::out_height * CONFIG_T::depthwise_config::out_width; - #pragma HLS STREAM variable=depthwise_res depth=res_depth - - depthwise_conv_2d_cl(data, depthwise_res, depthwise_weights, - depthwise_biases); - pointwise_conv_2d_cl(depthwise_res, res, pointwise_weights, - pointwise_biases); -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_sepconv_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_sepconv_stream.h deleted file mode 100644 index 9c16de1..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_sepconv_stream.h +++ /dev/null @@ -1,306 +0,0 @@ -#ifndef NNET_SEPARABLE_CONV_STREAM_H_ -#define NNET_SEPARABLE_CONV_STREAM_H_ - -#include "hls_stream.h" -#include "nnet_common.h" -#include "nnet_conv_stream.h" - -namespace nnet { - -template -void depthwise_product(data_T data[CONFIG_T::kernel_size * CONFIG_T::n_chan], res_T res[CONFIG_T::n_chan], - typename CONFIG_T::weight_t weights[CONFIG_T::kernel_size * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) { - #pragma HLS INLINE - - typename CONFIG_T::accum_t mult[CONFIG_T::kernel_size * CONFIG_T::n_chan]; - typename CONFIG_T::accum_t acc[CONFIG_T::n_chan]; - - // Use a function_instantiate in case it helps to explicitly optimize unchanging weights/biases - #pragma HLS function_instantiate variable=weights - - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - - #pragma HLS ARRAY_PARTITION variable=mult complete - - #pragma HLS ALLOCATION operation instances=mul limit=CONFIG_T::multiplier_limit - -// Do the matrix-multiply -Product: - for (int ii = 0; ii < CONFIG_T::kernel_size * CONFIG_T::n_chan; ii++) { - #pragma HLS UNROLL - mult[ii] = CONFIG_T::mult_config::template product::product( - data[ii], weights[ii]); - } - -// Initialize accumulator with input biases -ResetAccum: - for (int iacc = 0; iacc < CONFIG_T::n_chan; iacc++) { - #pragma HLS UNROLL - acc[iacc] = (typename CONFIG_T::accum_t)biases[iacc]; - } - -// Accumulate multiplication result -Accum1: - for (int ii = 0; ii < CONFIG_T::kernel_size; ii++) { - Accum2: - for (int jj = 0; jj < CONFIG_T::n_chan; jj++) { - int index = ii * CONFIG_T::n_chan + jj; - acc[jj] += mult[index]; - } - } - -// Cast to "res_t" type -Result: - for (int ires = 0; ires < CONFIG_T::n_chan; ires++) { - #pragma HLS UNROLL - res[ires] = cast(acc[ires]); - } -} - -template -void depthwise_mult_buffer(hls::stream data_window[CONFIG_T::kernel_size * CONFIG_T::n_chan], - res_T &res_pack, hls::stream &res_stream, unsigned &outputs_ready, - typename CONFIG_T::weight_t weights[CONFIG_T::kernel_size * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) { - #pragma HLS INLINE - - typename data_T::value_type data[CONFIG_T::kernel_size * CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable=data complete - typename res_T::value_type res[CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable=res complete - -InitData: - for (int id = 0; id < CONFIG_T::kernel_size * CONFIG_T::n_chan; id++) { - #pragma HLS UNROLL - data[id] = data_window[id].read(); - } - - #pragma HLS INLINE recursive - if (CONFIG_T::strategy == nnet::latency) { - depthwise_product(data, res, weights, biases); - } else { - assert("Resource strategy for DepthwiseConv2D is not supported." && false); - } - -CastLoop: - for (unsigned jj = 0; jj < CONFIG_T::n_chan; jj++) { - #pragma HLS UNROLL - if (res_T::size / CONFIG_T::n_chan == 1) { - res_pack[jj] = res[jj]; - } else { - res_pack[outputs_ready * CONFIG_T::n_chan + jj] = res[jj]; - } - } - - if (res_T::size / CONFIG_T::n_chan == 1) { - res_stream.write(res_pack); - } else { - if (outputs_ready == (res_T::size / CONFIG_T::n_chan) - 1) { - res_stream.write(res_pack); - outputs_ready = 0; - } else { - outputs_ready++; - } - } -} - -template -void compute_depthwise_output_encoded( - const data_T &in_elem, hls::stream data_window[CONFIG_T::kernel_size * CONFIG_T::n_chan], - hls::stream &res, res_T &res_pack, unsigned &outputs_ready, - typename CONFIG_T::weight_t weights[CONFIG_T::kernel_size * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan], ap_uint *pixel_idx) { - #pragma HLS INLINE - -MultLoop: - for (unsigned p = 0; p < data_T::size / CONFIG_T::n_chan; p++) { - #pragma HLS PIPELINE II=CONFIG_T::reuse_factor - CopyDataFilt: - for (unsigned f = 0; f < CONFIG_T::kernel_size; f++) { - #pragma HLS UNROLL - CopyDataChan: - for (unsigned c = 0; c < CONFIG_T::n_chan; c++) { - #pragma HLS UNROLL - if (pixel_idx[p][f]) - data_window[f * CONFIG_T::n_chan + c].write(in_elem[p * CONFIG_T::n_chan + c]); - } - } - if (pixel_idx[p][CONFIG_T::kernel_size - 1]) { - depthwise_mult_buffer(data_window, res_pack, res, outputs_ready, weights, biases); - } - } -} - -template -void pointwise_mult_buffer(const data_T &data_pack, hls::stream &res_stream, - typename CONFIG_T::weight_t weights[CONFIG_T::n_chan * CONFIG_T::n_filt], - typename CONFIG_T::bias_t biases[CONFIG_T::n_filt]) { - #pragma HLS INLINE - - typename data_T::value_type data[CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable=data complete - - typename res_T::value_type res[CONFIG_T::n_filt]; - #pragma HLS ARRAY_PARTITION variable=res complete - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - -InitData: - for (int id = 0; id < CONFIG_T::n_chan; id++) { - #pragma HLS UNROLL - data[id] = data_pack[id]; - } - - #pragma HLS INLINE recursive - if (CONFIG_T::strategy == nnet::latency) { - dense_latency( - data, res, weights, biases); - } else { - dense_resource( - data, res, weights, biases); - } - -CastLoop: - for (unsigned jj = 0; jj < CONFIG_T::n_filt; jj++) { - #pragma HLS UNROLL - res_pack[jj] = res[jj]; - } - - res_stream.write(res_pack); -} - -// Line Buffer Implementation (Phil's) -template -void compute_depthwise_output_buffer_1d(const data_T &in_elem, hls::stream &res_stream, - typename CONFIG_T::weight_t weights[CONFIG_T::kernel_size * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) { - #pragma HLS INLINE - - // Thresholds - const static int lShiftX = CONFIG_T::filt_width - 1; - - // Counters - static int pX = 0; - static int sX = 0; - - static typename data_T::value_type kernel_data[CONFIG_T::filt_width * CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable=kernel_data complete - - typename res_T::value_type res_out[CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable=res_out complete dim = 0 - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - - // Add pixel to buffer - nnet::kernel_shift_1d(in_elem, kernel_data); - - // Check to see if we have a full kernel - if ((sX - lShiftX) == 0 && pX > lShiftX - 1) { - // Dense multiply - #pragma HLS INLINE recursive - if (CONFIG_T::strategy == nnet::latency) { - depthwise_product(kernel_data, res_out, - weights, biases); - } else { - assert("Resource strategy for DepthwiseConv1D is not supported." && false); - } - - // Pack output - CastLoop: - for (unsigned i_ic = 0; i_ic < CONFIG_T::n_filt; i_ic++) { - #pragma HLS UNROLL - res_pack[i_ic] = res_out[i_ic]; - } - - // Write output to stream when output ready - res_stream.write(res_pack); - } - - // Pointer Housekeeping - if (pX + 1 == CONFIG_T::in_width) // Includes padding, end of line (padded) - { - pX = 0; - sX = 0; - } else { - pX = pX + 1; - sX = ((sX - lShiftX) == 0) ? sX - CONFIG_T::stride_width + 1 : sX + 1; - } -} - -template -void compute_depthwise_output_buffer_2d(const data_T &in_elem, - ap_shift_reg - line_buffer[MAX(CONFIG_T::filt_height - 1, 1)][CONFIG_T::n_chan], - hls::stream &res_stream, - typename CONFIG_T::weight_t weights[CONFIG_T::kernel_size * CONFIG_T::n_chan], - typename CONFIG_T::bias_t biases[CONFIG_T::n_chan]) { - #pragma HLS INLINE - - // Thresholds - const static int lShiftX = CONFIG_T::filt_width - 1; - const static int lShiftY = CONFIG_T::filt_height - 1; - - // counters - static int pX = 0; // pixel X - static int pY = 0; // pixel Y - - static int sX = 0; // stride X - static int sY = 0; // stride Y - - static typename data_T::value_type kernel_data[CONFIG_T::filt_height * CONFIG_T::filt_width * CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable=kernel_data complete - - typename res_T::value_type res_out[CONFIG_T::n_chan]; - #pragma HLS ARRAY_PARTITION variable=res_out complete dim = 0 - - res_T res_pack; - PRAGMA_DATA_PACK(res_pack) - - // Add pixel to buffer - nnet::shift_line_buffer(in_elem, line_buffer, kernel_data); - - // Check to see if we have a full kernel - if ((sX - lShiftX) == 0 && (sY - lShiftY) == 0 && pY > lShiftY - 1 && pX > lShiftX - 1) { - // Dense multiply - #pragma HLS INLINE recursive - if (CONFIG_T::strategy == nnet::latency) { - depthwise_product(kernel_data, res_out, - weights, biases); - } else { - assert("Resource strategy for DepthwiseConv2D is not supported." && false); - } - - // Pack output - CastLoop: - for (unsigned i_ic = 0; i_ic < CONFIG_T::n_filt; i_ic++) { - #pragma HLS UNROLL - res_pack[i_ic] = res_out[i_ic]; - } - - // Write output to stream when output ready - res_stream.write(res_pack); - } - - // Pointer Housekeeping - if (pX + 1 == CONFIG_T::in_width) // Includes padding, end of line (padded) - { - pX = 0; - sX = 0; - if (pY + 1 == CONFIG_T::in_height) { // Reached bottom of image - pY = 0; - sY = 0; - } else { - pY = pY + 1; - sY = ((sY - lShiftY) == 0) ? sY - CONFIG_T::stride_height + 1 : sY + 1; - } - } else { - pX = pX + 1; - sX = ((sX - lShiftX) == 0) ? sX - CONFIG_T::stride_width + 1 : sX + 1; - } -} - -} // namespace nnet -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_stream.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_stream.h deleted file mode 100644 index 900db16..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_stream.h +++ /dev/null @@ -1,207 +0,0 @@ - -#ifndef NNET_STREAM_H -#define NNET_STREAM_H - -#include "hls_stream.h" -#include "nnet_common.h" - -namespace nnet { - -struct broadcast_config { - static const unsigned in_height = 1; - static const unsigned in_width = 1; - static const unsigned in_chan = 3; - static const unsigned out_height = 2; - static const unsigned out_width = 2; - static const unsigned out_chan = 3; -}; - -template -void clone_stream(hls::stream &data, hls::stream &res1, hls::stream &res2) { -CloneLoop: - for (int i = 0; i < N / data_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data1; - res_T out_data2; - PRAGMA_DATA_PACK(out_data1) - PRAGMA_DATA_PACK(out_data2) - - ClonePack: - for (int j = 0; j < data_T::size; j++) { - #pragma HLS UNROLL - out_data1[j] = in_data[j]; - out_data2[j] = in_data[j]; - } - - res1.write(out_data1); - res2.write(out_data2); - } -} - -template -void clone_stream(hls::stream &data, hls::stream &res1, hls::stream &res2, hls::stream &res3) { -CloneLoop: - for (int i = 0; i < N / data_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data1; - res_T out_data2; - res_T out_data3; - PRAGMA_DATA_PACK(out_data1) - PRAGMA_DATA_PACK(out_data2) - PRAGMA_DATA_PACK(out_data3) - - ClonePack: - for (int j = 0; j < data_T::size; j++) { - #pragma HLS UNROLL - out_data1[j] = in_data[j]; - out_data2[j] = in_data[j]; - out_data3[j] = in_data[j]; - } - - res1.write(out_data1); - res2.write(out_data2); - res3.write(out_data3); - } -} - -template void repack_stream(hls::stream &data, hls::stream &res) { - if (data_T::size == res_T::size) { - for (int i = 0; i < N / data_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - for (int j = 0; j < data_T::size; j++) { - #pragma HLS UNROLL - out_data[j] = in_data[j]; - } - - res.write(out_data); - } - } else if (data_T::size > res_T::size) { - constexpr unsigned pack_diff = data_T::size / res_T::size; - for (int i = 0; i < N / data_T::size; i++) { - if (N / data_T::size > 1) { - #pragma HLS PIPELINE - } - - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - - for (int j = 0; j < pack_diff; j++) { - #pragma HLS PIPELINE - - res_T out_data; - for (int k = 0; k < res_T::size; k++) { - #pragma HLS UNROLL - out_data[k] = in_data[j * res_T::size + k]; - } - res.write(out_data); - } - } - } else { // data_T::size < res_T::size - res_T out_data; - constexpr unsigned pack_diff = res_T::size / data_T::size; - unsigned pack_cnt = 0; - for (int i = 0; i < N / data_T::size; i++) { - #pragma HLS PIPELINE - - data_T in_data = data.read(); - for (int j = 0; j < data_T::size; j++) { - #pragma HLS UNROLL - out_data[pack_cnt * data_T::size + j] = in_data[j]; - } - - if (pack_cnt == pack_diff - 1) { - res.write(out_data); - pack_cnt = 0; - } else { - pack_cnt++; - } - } - } -} - -template -void broadcast_stream_1x1xC(hls::stream &data, hls::stream &res) { - assert(CONFIG_T::in_height == 1 && CONFIG_T::in_width == 1 && CONFIG_T::in_chan == CONFIG_T::out_chan); - int n_dupl = (CONFIG_T::out_height * CONFIG_T::out_width * CONFIG_T::out_chan) / - (CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::in_chan); -BroadcastLoop: - for (int i = 0; i < CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::in_chan / data_T::size; i++) { - #pragma HLS PIPELINE - data_T in_data = data.read(); - for (int j = 0; j < n_dupl; j++) { - #pragma HLS PIPELINE - res_T out_data; - PRAGMA_DATA_PACK(out_data) - for (int k = 0; k < res_T::size; k++) { - #pragma HLS UNROLL - out_data[k] = in_data[k]; - } - res.write(out_data); - } - } -} - -template -void broadcast_stream_HxWx1(hls::stream &data, hls::stream &res) { - assert(CONFIG_T::in_chan == 1 && CONFIG_T::in_height == CONFIG_T::out_height && - CONFIG_T::in_width == CONFIG_T::out_width); -BroadcastLoop: - for (int i = 0; i < CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::in_chan / data_T::size; i++) { - #pragma HLS PIPELINE - data_T in_data = data.read(); - res_T out_data; - PRAGMA_DATA_PACK(out_data) - for (int k = 0; k < res_T::size; k++) { - #pragma HLS UNROLL - out_data[k] = in_data[0]; - } - res.write(out_data); - } -} - -template -void broadcast_stream(hls::stream &data, hls::stream &res) { - if (CONFIG_T::in_height == 1 && CONFIG_T::in_width == 1 && CONFIG_T::in_chan == CONFIG_T::out_chan) { - broadcast_stream_1x1xC(data, res); - } else if (CONFIG_T::in_chan == 1 && CONFIG_T::in_height == CONFIG_T::out_height && - CONFIG_T::in_width == CONFIG_T::out_width) { - broadcast_stream_HxWx1(data, res); - } -} - -template -void transpose_2d(hls::stream &data, hls::stream &res) { - typename data_T::value_type data_array[CONFIG_T::height * CONFIG_T::width]; - #pragma HLS ARRAY_PARTITION variable=data_array complete - - for (int i = 0; i < CONFIG_T::height * CONFIG_T::width / data_T::size; i++) { - #pragma HLS PIPELINE - data_T in_data = data.read(); - for (int j = 0; j < data_T::size; j++) { - data_array[i * data_T::size + j] = typename data_T::value_type(in_data[j]); - } - } - - for (int i = 0; i < CONFIG_T::height * CONFIG_T::width / res_T::size; i++) { - #pragma HLS PIPELINE - res_T out_data; - PRAGMA_DATA_PACK(out_data) - for (int j = 0; j < res_T::size; j++) { - out_data[j] = typename res_T::value_type(data_array[j * data_T::size + i]); - } - res.write(out_data); - } -} -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_types.h b/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_types.h deleted file mode 100644 index 0fcac13..0000000 --- a/hls4ml/hls4ml/templates/vivado/nnet_utils/nnet_types.h +++ /dev/null @@ -1,64 +0,0 @@ -#ifndef NNET_TYPES_H_ -#define NNET_TYPES_H_ - -#include -#include -#include - -namespace nnet { - -// Fixed-size array -template struct array { - typedef T value_type; - static const unsigned size = N; - - T data[N]; - - T &operator[](size_t pos) { return data[pos]; } - - const T &operator[](size_t pos) const { return data[pos]; } - - array &operator=(const array &other) { - if (&other == this) - return *this; - - assert(N == other.size && "Array sizes must match."); - - for (unsigned i = 0; i < N; i++) { - #pragma HLS UNROLL - data[i] = other[i]; - } - return *this; - } -}; - -// Generic lookup-table implementation, for use in approximations of math functions -template class lookup_table { - public: - lookup_table(T from, T to) : range_start(from), range_end(to), base_div(ap_uint<16>(N) / T(to - from)) { - T step = (range_end - range_start) / ap_uint<16>(N); - for (size_t i = 0; i < N; i++) { - T num = range_start + ap_uint<16>(i) * step; - T sample = func(num); - samples[i] = sample; - } - } - - T operator()(T n) const { - int index = (n - range_start) * base_div; - if (index < 0) - index = 0; - else if (index > N - 1) - index = N - 1; - return samples[index]; - } - - private: - T samples[N]; - const T range_start, range_end; - ap_fixed<20, 16> base_div; -}; - -} // namespace nnet - -#endif diff --git a/hls4ml/hls4ml/templates/vivado/vivado_synth.tcl b/hls4ml/hls4ml/templates/vivado/vivado_synth.tcl deleted file mode 100644 index 4634b16..0000000 --- a/hls4ml/hls4ml/templates/vivado/vivado_synth.tcl +++ /dev/null @@ -1,6 +0,0 @@ -set tcldir [file dirname [info script]] -source [file join $tcldir project.tcl] - -add_files ${project_name}_prj/solution1/syn/vhdl -synth_design -top ${project_name} -part $part -report_utilization -file vivado_synth.rpt diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_axi_read_master.sv b/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_axi_read_master.sv deleted file mode 100644 index db64a31..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_axi_read_master.sv +++ /dev/null @@ -1,278 +0,0 @@ -/** -* Copyright (C) 2019-2021 Xilinx, Inc -* -* Licensed under the Apache License, Version 2.0 (the "License"). You may -* not use this file except in compliance with the License. A copy of the -* License is located at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -* License for the specific language governing permissions and limitations -* under the License. -*/ - -/////////////////////////////////////////////////////////////////////////////// -// Description: This is a multi-threaded AXI4 read master. Each channel will -// issue commands on a different IDs. As a result data may arrive out of -// order. The amount of data requested is equal to the ctrl_length variable. -// Prog full is set and sampled such that the FIFO will never overflow. Thus -// rready can be always asserted for better timing. -/////////////////////////////////////////////////////////////////////////////// - -`default_nettype none - -module krnl_rtl_axi_read_master #( - parameter integer C_ID_WIDTH = 0, // Must be >= $clog2(C_NUM_CHANNELS) - parameter integer C_ADDR_WIDTH = 64, - parameter integer C_DATA_WIDTH = 32, - parameter integer C_NUM_CHANNELS = 1, // Only 2 tested. - parameter integer C_LENGTH_WIDTH = 32, - parameter integer C_BURST_LEN = 256, // Max AXI burst length for read commands - parameter integer C_LOG_BURST_LEN = 8, - parameter integer C_MAX_OUTSTANDING = 3 -) -( - // System signals - input wire aclk, - input wire areset, - // Control signals - input wire ctrl_start, - output wire ctrl_done, - input wire [C_NUM_CHANNELS-1:0][C_ADDR_WIDTH-1:0] ctrl_offset, - input wire [C_LENGTH_WIDTH-1:0] ctrl_length, - input wire [C_NUM_CHANNELS-1:0] ctrl_prog_full, - // AXI4 master interface - output wire arvalid, - input wire arready, - output wire [C_ADDR_WIDTH-1:0] araddr, - output wire [C_ID_WIDTH-1:0] arid, - output wire [7:0] arlen, - output wire [2:0] arsize, - input wire rvalid, - output wire rready, - input wire [C_DATA_WIDTH - 1:0] rdata, - input wire rlast, - input wire [C_ID_WIDTH - 1:0] rid, - input wire [1:0] rresp, - // AXI4-Stream master interface, 1 interface per channel. - output wire [C_NUM_CHANNELS-1:0] m_tvalid, - input wire [C_NUM_CHANNELS-1:0] m_tready, - output wire [C_NUM_CHANNELS-1:0][C_DATA_WIDTH-1:0] m_tdata, - output wire [C_NUM_CHANNELS-1:0] m_tlast -); - -timeunit 1ps; -timeprecision 1ps; - -/////////////////////////////////////////////////////////////////////////////// -// Local Parameters -/////////////////////////////////////////////////////////////////////////////// -localparam integer LP_MAX_OUTSTANDING_CNTR_WIDTH = $clog2(C_MAX_OUTSTANDING+1); -localparam integer LP_TRANSACTION_CNTR_WIDTH = C_LENGTH_WIDTH-C_LOG_BURST_LEN; - -/////////////////////////////////////////////////////////////////////////////// -// Variables -/////////////////////////////////////////////////////////////////////////////// -// Control logic -logic [C_NUM_CHANNELS-1:0] done = '0; -logic [LP_TRANSACTION_CNTR_WIDTH-1:0] num_full_bursts; -logic num_partial_bursts; -logic start = 1'b0; -logic [LP_TRANSACTION_CNTR_WIDTH-1:0] num_transactions; -logic has_partial_burst; -logic [C_LOG_BURST_LEN-1:0] final_burst_len; -logic single_transaction; -logic ar_idle = 1'b1; -logic ar_done; -// AXI Read Address Channel -logic fifo_stall; -logic arxfer; -logic arvalid_r = 1'b0; -logic [C_NUM_CHANNELS-1:0][C_ADDR_WIDTH-1:0] addr; -logic [C_ID_WIDTH-1:0] id = {C_ID_WIDTH{1'b1}}; -logic [LP_TRANSACTION_CNTR_WIDTH-1:0] ar_transactions_to_go; -logic ar_final_transaction; -logic [C_NUM_CHANNELS-1:0] incr_ar_to_r_cnt; -logic [C_NUM_CHANNELS-1:0] decr_ar_to_r_cnt; -logic [C_NUM_CHANNELS-1:0] stall_ar; -logic [C_NUM_CHANNELS-1:0][LP_MAX_OUTSTANDING_CNTR_WIDTH-1:0] outstanding_vacancy_count; -// AXI Data Channel -logic [C_NUM_CHANNELS-1:0] tvalid; -logic [C_NUM_CHANNELS-1:0][C_DATA_WIDTH-1:0] tdata; -logic [C_NUM_CHANNELS-1:0] tlast; -logic rxfer; -logic [C_NUM_CHANNELS-1:0] decr_r_transaction_cntr; -logic [C_NUM_CHANNELS-1:0][LP_TRANSACTION_CNTR_WIDTH-1:0] r_transactions_to_go; -logic [C_NUM_CHANNELS-1:0] r_final_transaction; -/////////////////////////////////////////////////////////////////////////////// -// Control Logic -/////////////////////////////////////////////////////////////////////////////// - -always @(posedge aclk) begin - for (int i = 0; i < C_NUM_CHANNELS; i++) begin - done[i] <= rxfer & rlast & (rid == i) & r_final_transaction[i] ? 1'b1 : - ctrl_done ? 1'b0 : done[i]; - end -end -assign ctrl_done = &done; - -// Determine how many full burst to issue and if there are any partial bursts. -assign num_full_bursts = ctrl_length[C_LOG_BURST_LEN+:C_LENGTH_WIDTH-C_LOG_BURST_LEN]; -assign num_partial_bursts = ctrl_length[0+:C_LOG_BURST_LEN] ? 1'b1 : 1'b0; - -always @(posedge aclk) begin - start <= ctrl_start; - num_transactions <= (num_partial_bursts == 1'b0) ? num_full_bursts - 1'b1 : num_full_bursts; - has_partial_burst <= num_partial_bursts; - final_burst_len <= ctrl_length[0+:C_LOG_BURST_LEN] - 1'b1; -end - -// Special case if there is only 1 AXI transaction. -assign single_transaction = (num_transactions == {LP_TRANSACTION_CNTR_WIDTH{1'b0}}) ? 1'b1 : 1'b0; - -/////////////////////////////////////////////////////////////////////////////// -// AXI Read Address Channel -/////////////////////////////////////////////////////////////////////////////// -assign arvalid = arvalid_r; -assign araddr = addr[id]; -assign arlen = ar_final_transaction || (start & single_transaction) ? final_burst_len : C_BURST_LEN - 1; -assign arsize = $clog2((C_DATA_WIDTH/8)); -assign arid = id; - -assign arxfer = arvalid & arready; -assign fifo_stall = ctrl_prog_full[id]; - -always @(posedge aclk) begin - if (areset) begin - arvalid_r <= 1'b0; - end - else begin - arvalid_r <= ~ar_idle & ~stall_ar[id] & ~arvalid_r & ~fifo_stall ? 1'b1 : - arready ? 1'b0 : arvalid_r; - end -end - -// When ar_idle, there are no transactions to issue. -always @(posedge aclk) begin - if (areset) begin - ar_idle <= 1'b1; - end - else begin - ar_idle <= start ? 1'b0 : - ar_done ? 1'b1 : - ar_idle; - end -end - -// each channel is assigned a different id. The transactions are interleaved. -always @(posedge aclk) begin - if (start) begin - id <= {C_ID_WIDTH{1'b1}}; - end - else begin - id <= arxfer ? id - 1'b1 : id; - end -end - - -// Increment to next address after each transaction is issued. -always @(posedge aclk) begin - for (int i = 0; i < C_NUM_CHANNELS; i++) begin - addr[i] <= ctrl_start ? ctrl_offset[i] : - arxfer && (id == i) ? addr[i] + C_BURST_LEN*C_DATA_WIDTH/8 : - addr[i]; - end -end - -// Counts down the number of transactions to send. -krnl_rtl_counter #( - .C_WIDTH ( LP_TRANSACTION_CNTR_WIDTH ) , - .C_INIT ( {LP_TRANSACTION_CNTR_WIDTH{1'b0}} ) -) -inst_ar_transaction_cntr ( - .clk ( aclk ) , - .clken ( 1'b1 ) , - .rst ( areset ) , - .load ( start ) , - .incr ( 1'b0 ) , - .decr ( arxfer && id == '0 ) , - .load_value ( num_transactions ) , - .count ( ar_transactions_to_go ) , - .is_zero ( ar_final_transaction ) -); - -assign ar_done = ar_final_transaction && arxfer && id == 1'b0; - -always_comb begin - for (int i = 0; i < C_NUM_CHANNELS; i++) begin - incr_ar_to_r_cnt[i] = rxfer & rlast & (rid == i); - decr_ar_to_r_cnt[i] = arxfer & (arid == i); - end -end - -// Keeps track of the number of outstanding transactions. Stalls -// when the value is reached so that the FIFO won't overflow. -krnl_rtl_counter #( - .C_WIDTH ( LP_MAX_OUTSTANDING_CNTR_WIDTH ) , - .C_INIT ( C_MAX_OUTSTANDING[0+:LP_MAX_OUTSTANDING_CNTR_WIDTH] ) -) -inst_ar_to_r_transaction_cntr[C_NUM_CHANNELS-1:0] ( - .clk ( aclk ) , - .clken ( 1'b1 ) , - .rst ( areset ) , - .load ( 1'b0 ) , - .incr ( incr_ar_to_r_cnt ) , - .decr ( decr_ar_to_r_cnt ) , - .load_value ( {LP_MAX_OUTSTANDING_CNTR_WIDTH{1'b0}} ) , - .count ( outstanding_vacancy_count ) , - .is_zero ( stall_ar ) -); - -/////////////////////////////////////////////////////////////////////////////// -// AXI Read Channel -/////////////////////////////////////////////////////////////////////////////// -assign m_tvalid = tvalid; -assign m_tdata = tdata; -assign m_tlast = tlast; - -always_comb begin - for (int i = 0; i < C_NUM_CHANNELS; i++) begin - tvalid[i] = rvalid && (rid == i); - tdata[i] = rdata; - tlast[i] = rlast; - end -end - -// rready can remain high for optimal timing because ar transactions are not issued -// unless there is enough space in the FIFO. -assign rready = 1'b1; -assign rxfer = rready & rvalid; - -always_comb begin - for (int i = 0; i < C_NUM_CHANNELS; i++) begin - decr_r_transaction_cntr[i] = rxfer & rlast & (rid == i); - end -end -krnl_rtl_counter #( - .C_WIDTH ( LP_TRANSACTION_CNTR_WIDTH ) , - .C_INIT ( {LP_TRANSACTION_CNTR_WIDTH{1'b0}} ) -) -inst_r_transaction_cntr[C_NUM_CHANNELS-1:0] ( - .clk ( aclk ) , - .clken ( 1'b1 ) , - .rst ( areset ) , - .load ( start ) , - .incr ( 1'b0 ) , - .decr ( decr_r_transaction_cntr ) , - .load_value ( num_transactions ) , - .count ( r_transactions_to_go ) , - .is_zero ( r_final_transaction ) -); - - -endmodule : krnl_rtl_axi_read_master - -`default_nettype wire diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_axi_write_master.sv b/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_axi_write_master.sv deleted file mode 100644 index 5c5a974..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_axi_write_master.sv +++ /dev/null @@ -1,276 +0,0 @@ -/** -* Copyright (C) 2019-2021 Xilinx, Inc -* -* Licensed under the Apache License, Version 2.0 (the "License"). You may -* not use this file except in compliance with the License. A copy of the -* License is located at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -* License for the specific language governing permissions and limitations -* under the License. -*/ - -//////////////////////////////////////////////////////////// -// Description: AXI4 Write Master. Takes a stream of data in, -// appends address information and sends it out. -`default_nettype none - -module krnl_rtl_axi_write_master #( - parameter integer C_ADDR_WIDTH = 64, - parameter integer C_DATA_WIDTH = 32, - parameter integer C_MAX_LENGTH_WIDTH = 32, - parameter integer C_BURST_LEN = 256, - parameter integer C_LOG_BURST_LEN = 8 -) -( - // Control interface - input wire ctrl_start, - input wire [C_ADDR_WIDTH-1:0] ctrl_offset, - input wire [C_MAX_LENGTH_WIDTH-1:0] ctrl_length, - output wire ctrl_done, - - // AXI4-Stream interface - input wire s_tvalid, - input wire [C_DATA_WIDTH-1:0] s_tdata, - output wire s_tready, - - // AXI Interface - input wire aclk, - input wire areset, - - output wire [C_ADDR_WIDTH-1:0] awaddr, - output wire [7:0] awlen, - output wire [2:0] awsize, - output wire awvalid, - input wire awready, - - output wire [C_DATA_WIDTH-1:0] wdata, - output wire [C_DATA_WIDTH/8-1:0] wstrb, - output wire wlast, - output wire wvalid, - input wire wready, - - input wire [1:0] bresp, - input wire bvalid, - output wire bready -); - -timeunit 1ps; -timeprecision 1ps; - -///////////////////////////////////////////////////////////////////////////// -// Local Parameters -///////////////////////////////////////////////////////////////////////////// -localparam integer LP_LOG_MAX_W_TO_AW = 8; // Allow up to 256 outstanding w to aw transactions -localparam integer LP_TRANSACTION_CNTR_WIDTH = C_MAX_LENGTH_WIDTH-C_LOG_BURST_LEN; - -///////////////////////////////////////////////////////////////////////////// -// Variables -///////////////////////////////////////////////////////////////////////////// -logic [LP_TRANSACTION_CNTR_WIDTH-1:0] num_full_bursts; -logic num_partial_bursts; -logic start = 1'b0; -logic [LP_TRANSACTION_CNTR_WIDTH-1:0] num_transactions; -logic has_partial_burst; -logic [C_LOG_BURST_LEN-1:0] final_burst_len; -logic single_transaction; - -logic wxfer; // Unregistered write data transfer -logic wfirst = 1'b1; -logic load_burst_cntr; -logic [C_LOG_BURST_LEN-1:0] wxfers_to_go; // Used for simulation debug -logic [LP_TRANSACTION_CNTR_WIDTH-1:0] w_transactions_to_go; -logic w_final_transaction; -logic w_almost_final_transaction = 1'b0; - -logic awxfer; -logic awvalid_r = 1'b0; -logic [C_ADDR_WIDTH-1:0] addr; -logic wfirst_d1 = 1'b0; -logic wfirst_pulse = 1'b0; -logic [LP_LOG_MAX_W_TO_AW-1:0] dbg_w_to_aw_outstanding; -logic idle_aw; -logic [LP_TRANSACTION_CNTR_WIDTH-1:0] aw_transactions_to_go; -logic aw_final_transaction; - -wire bxfer; -logic [LP_TRANSACTION_CNTR_WIDTH-1:0] b_transactions_to_go; -logic b_final_transaction; - -///////////////////////////////////////////////////////////////////////////// -// Control logic -///////////////////////////////////////////////////////////////////////////// -// Count the number of transfers and assert done when the last bvalid is received. -assign num_full_bursts = ctrl_length[C_LOG_BURST_LEN+:C_MAX_LENGTH_WIDTH-C_LOG_BURST_LEN]; -assign num_partial_bursts = ctrl_length[0+:C_LOG_BURST_LEN] ? 1'b1 : 1'b0; - -always @(posedge aclk) begin - start <= ctrl_start; - num_transactions <= (num_partial_bursts == 1'b0) ? num_full_bursts - 1'b1 : num_full_bursts; - has_partial_burst <= num_partial_bursts; - final_burst_len <= ctrl_length[0+:C_LOG_BURST_LEN] - 1'b1; -end - -assign ctrl_done = bxfer & b_final_transaction; -assign single_transaction = (num_transactions == {LP_TRANSACTION_CNTR_WIDTH{1'b0}}) ? 1'b1 : 1'b0; - -///////////////////////////////////////////////////////////////////////////// -// AXI Write Data Channel -///////////////////////////////////////////////////////////////////////////// -assign wvalid = s_tvalid; -assign wdata = s_tdata; -assign wstrb = {(C_DATA_WIDTH/8){1'b1}}; -assign s_tready = wready; - -assign wxfer = wvalid & wready; - -always @(posedge aclk) begin - if (areset) begin - wfirst <= 1'b1; - end - else begin - wfirst <= wxfer ? wlast : wfirst; - end -end - -// Load burst counter with partial burst if on final transaction or if there is only 1 transaction -assign load_burst_cntr = (wxfer & wlast & w_almost_final_transaction) || (start & single_transaction); - -krnl_rtl_counter #( - .C_WIDTH ( C_LOG_BURST_LEN ) , - .C_INIT ( {C_LOG_BURST_LEN{1'b1}} ) -) -inst_burst_cntr ( - .clk ( aclk ) , - .clken ( 1'b1 ) , - .rst ( areset ) , - .load ( load_burst_cntr ) , - .incr ( 1'b0 ) , - .decr ( wxfer ) , - .load_value ( final_burst_len ) , - .count ( wxfers_to_go ) , - .is_zero ( wlast ) -); - -krnl_rtl_counter #( - .C_WIDTH ( LP_TRANSACTION_CNTR_WIDTH ) , - .C_INIT ( {LP_TRANSACTION_CNTR_WIDTH{1'b0}} ) -) -inst_w_transaction_cntr ( - .clk ( aclk ) , - .clken ( 1'b1 ) , - .rst ( areset ) , - .load ( start ) , - .incr ( 1'b0 ) , - .decr ( wxfer & wlast ) , - .load_value ( num_transactions ) , - .count ( w_transactions_to_go ) , - .is_zero ( w_final_transaction ) -); - -always @(posedge aclk) begin - w_almost_final_transaction <= (w_transactions_to_go == 1) ? 1'b1 : 1'b0; -end - -///////////////////////////////////////////////////////////////////////////// -// AXI Write Address Channel -///////////////////////////////////////////////////////////////////////////// -// The address channel samples the data channel and send out transactions when -// first beat of wdata is asserted. This ensures that address requests are not -// sent without data on the way. - -assign awvalid = awvalid_r; -assign awxfer = awvalid & awready; - -always @(posedge aclk) begin - if (areset) begin - awvalid_r <= 1'b0; - end - else begin - awvalid_r <= ~idle_aw & ~awvalid_r ? 1'b1 : - awready ? 1'b0 : - awvalid_r; - end -end - -assign awaddr = addr; - -always @(posedge aclk) begin - addr <= ctrl_start ? ctrl_offset : - awxfer ? addr + C_BURST_LEN*C_DATA_WIDTH/8 : - addr; -end - -assign awlen = aw_final_transaction || (start & single_transaction) ? final_burst_len : C_BURST_LEN - 1; -assign awsize = $clog2((C_DATA_WIDTH/8)); - -krnl_rtl_counter #( - .C_WIDTH (LP_LOG_MAX_W_TO_AW), - .C_INIT ({LP_LOG_MAX_W_TO_AW{1'b0}}) -) -inst_w_to_aw_cntr ( - .clk ( aclk ) , - .clken ( 1'b1 ) , - .rst ( areset ) , - .load ( 1'b0 ) , - .incr ( wfirst_pulse ) , - .decr ( awxfer ) , - .load_value ( ) , - .count ( dbg_w_to_aw_outstanding ) , - .is_zero ( idle_aw ) -); - -always @(posedge aclk) begin - wfirst_d1 <= wvalid & wfirst; -end - -always @(posedge aclk) begin - wfirst_pulse <= wvalid & wfirst & ~wfirst_d1; -end - -krnl_rtl_counter #( - .C_WIDTH ( LP_TRANSACTION_CNTR_WIDTH ) , - .C_INIT ( {LP_TRANSACTION_CNTR_WIDTH{1'b0}} ) -) -inst_aw_transaction_cntr ( - .clk ( aclk ) , - .clken ( 1'b1 ) , - .rst ( areset ) , - .load ( start ) , - .incr ( 1'b0 ) , - .decr ( awxfer ) , - .load_value ( num_transactions ) , - .count ( aw_transactions_to_go ) , - .is_zero ( aw_final_transaction ) -); - -///////////////////////////////////////////////////////////////////////////// -// AXI Write Response Channel -///////////////////////////////////////////////////////////////////////////// - -assign bready = 1'b1; -assign bxfer = bready & bvalid; - -krnl_rtl_counter #( - .C_WIDTH ( LP_TRANSACTION_CNTR_WIDTH ) , - .C_INIT ( {LP_TRANSACTION_CNTR_WIDTH{1'b0}} ) -) -inst_b_transaction_cntr ( - .clk ( aclk ) , - .clken ( 1'b1 ) , - .rst ( areset ) , - .load ( start ) , - .incr ( 1'b0 ) , - .decr ( bxfer ) , - .load_value ( num_transactions ) , - .count ( b_transactions_to_go ) , - .is_zero ( b_final_transaction ) -); - -endmodule : krnl_rtl_axi_write_master - -`default_nettype wire diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_control_s_axi.v b/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_control_s_axi.v deleted file mode 100644 index c4a76ef..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_control_s_axi.v +++ /dev/null @@ -1,422 +0,0 @@ -/** -* Copyright (C) 2019-2021 Xilinx, Inc -* -* Licensed under the Apache License, Version 2.0 (the "License"). You may -* not use this file except in compliance with the License. A copy of the -* License is located at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -* License for the specific language governing permissions and limitations -* under the License. -*/ - -`timescale 1ns/1ps -module krnl_rtl_control_s_axi -#(parameter - C_S_AXI_ADDR_WIDTH = 6, - C_S_AXI_DATA_WIDTH = 32 -)( - // axi4 lite slave signals - input wire ACLK, - input wire ARESET, - input wire ACLK_EN, - input wire [C_S_AXI_ADDR_WIDTH-1:0] AWADDR, - input wire AWVALID, - output wire AWREADY, - input wire [C_S_AXI_DATA_WIDTH-1:0] WDATA, - input wire [C_S_AXI_DATA_WIDTH/8-1:0] WSTRB, - input wire WVALID, - output wire WREADY, - output wire [1:0] BRESP, - output wire BVALID, - input wire BREADY, - input wire [C_S_AXI_ADDR_WIDTH-1:0] ARADDR, - input wire ARVALID, - output wire ARREADY, - output wire [C_S_AXI_DATA_WIDTH-1:0] RDATA, - output wire [1:0] RRESP, - output wire RVALID, - input wire RREADY, - output wire interrupt, - // user signals - output wire ap_start, - input wire ap_done, - input wire ap_ready, - input wire ap_idle, - output wire [63:0] fifo_in, - output wire [63:0] fifo_out, - output wire [31:0] length_r_in, - output wire [31:0] length_r_out -); -//------------------------Address Info------------------- -// 0x00 : Control signals -// bit 0 - ap_start (Read/Write/COH) -// bit 1 - ap_done (Read/COR) -// bit 2 - ap_idle (Read) -// bit 3 - ap_ready (Read) -// bit 7 - auto_restart (Read/Write) -// others - reserved -// 0x04 : Global Interrupt Enable Register -// bit 0 - Global Interrupt Enable (Read/Write) -// others - reserved -// 0x08 : IP Interrupt Enable Register (Read/Write) -// bit 0 - Channel 0 (ap_done) -// bit 1 - Channel 1 (ap_ready) -// others - reserved -// 0x0c : IP Interrupt Status Register (Read/TOW) -// bit 0 - Channel 0 (ap_done) -// bit 1 - Channel 1 (ap_ready) -// others - reserved -// 0x10 : Data signal of fifo_in -// bit 31~0 - a[31:0] (Read/Write) -// 0x14 : Data signal of fifo_in -// bit 31~0 - a[63:32] (Read/Write) -// 0x18 : reserved -// 0x1c : Data signal of fifo_out -// bit 31~0 - b[31:0] (Read/Write) -// 0x20 : Data signal of fifo_out -// bit 31~0 - b[63:32] (Read/Write) -// 0x24 : reserved -// 0x28 : Data signal of length_r_in -// bit 31~0 - length_r[31:0] (Read/Write) -// 0x2c : reserved -// 0x30 : Data signal of length_r_out -// bit 31~0 - length_r[31:0] (Read/Write) -// 0x34 : reserved -// (SC = Self Clear, COR = Clear on Read, TOW = Toggle on Write, COH = Clear on Handshake) - -//------------------------Parameter---------------------- -localparam - ADDR_AP_CTRL = 6'h00, - ADDR_GIE = 6'h04, - ADDR_IER = 6'h08, - ADDR_ISR = 6'h0c, - ADDR_FIFO_IN_DATA_0 = 6'h10, - ADDR_FIFO_IN_DATA_1 = 6'h14, - ADDR_FIFO_IN_CTRL = 6'h18, - ADDR_FIFO_OUT_DATA_0 = 6'h1c, - ADDR_FIFO_OUT_DATA_1 = 6'h20, - ADDR_FIFO_OUT_CTRL = 6'h24, - ADDR_LENGTH_R_IN_DATA_0 = 6'h28, - ADDR_LENGTH_R_IN_CTRL = 6'h2c, - ADDR_LENGTH_R_OUT_DATA_0 = 6'h30, - ADDR_LENGTH_R_OUT_CTRL = 6'h34, - WRIDLE = 2'd0, - WRDATA = 2'd1, - WRRESP = 2'd2, - RDIDLE = 2'd0, - RDDATA = 2'd1, - ADDR_BITS = 6; - -//------------------------Local signal------------------- - reg [1:0] wstate = WRIDLE; - reg [1:0] wnext; - reg [ADDR_BITS-1:0] waddr; - wire [31:0] wmask; - wire aw_hs; - wire w_hs; - reg [1:0] rstate = RDIDLE; - reg [1:0] rnext; - reg [31:0] rdata; - wire ar_hs; - wire [ADDR_BITS-1:0] raddr; - // internal registers - wire int_ap_idle; - wire int_ap_ready; - reg int_ap_done = 1'b0; - reg int_ap_start = 1'b0; - reg int_auto_restart = 1'b0; - reg int_gie = 2'b0; - reg [1:0] int_ier = 2'b0; - reg [1:0] int_isr = 2'b0; - reg [63:0] int_fifo_in = 64'b0; - reg [63:0] int_fifo_out = 64'b0; - reg [63:0] int_length_r_in = 32'b0; - reg [31:0] int_length_r_out = 32'b0; - -//------------------------Instantiation------------------ - -//------------------------AXI write fsm------------------ -assign AWREADY = (~ARESET) & (wstate == WRIDLE); -assign WREADY = (wstate == WRDATA); -assign BRESP = 2'b00; // OKAY -assign BVALID = (wstate == WRRESP); -assign wmask = { {8{WSTRB[3]}}, {8{WSTRB[2]}}, {8{WSTRB[1]}}, {8{WSTRB[0]}} }; -assign aw_hs = AWVALID & AWREADY; -assign w_hs = WVALID & WREADY; - -// wstate -always @(posedge ACLK) begin - if (ARESET) - wstate <= WRIDLE; - else if (ACLK_EN) - wstate <= wnext; -end - -// wnext -always @(*) begin - case (wstate) - WRIDLE: - if (AWVALID) - wnext = WRDATA; - else - wnext = WRIDLE; - WRDATA: - if (WVALID) - wnext = WRRESP; - else - wnext = WRDATA; - WRRESP: - if (BREADY) - wnext = WRIDLE; - else - wnext = WRRESP; - default: - wnext = WRIDLE; - endcase -end - -// waddr -always @(posedge ACLK) begin - if (ACLK_EN) begin - if (aw_hs) - waddr <= AWADDR[ADDR_BITS-1:0]; - end -end - -//------------------------AXI read fsm------------------- -assign ARREADY = (~ARESET) && (rstate == RDIDLE); -assign RDATA = rdata; -assign RRESP = 2'b00; // OKAY -assign RVALID = (rstate == RDDATA); -assign ar_hs = ARVALID & ARREADY; -assign raddr = ARADDR[ADDR_BITS-1:0]; - -// rstate -always @(posedge ACLK) begin - if (ARESET) - rstate <= RDIDLE; - else if (ACLK_EN) - rstate <= rnext; -end - -// rnext -always @(*) begin - case (rstate) - RDIDLE: - if (ARVALID) - rnext = RDDATA; - else - rnext = RDIDLE; - RDDATA: - if (RREADY & RVALID) - rnext = RDIDLE; - else - rnext = RDDATA; - default: - rnext = RDIDLE; - endcase -end - -// rdata -always @(posedge ACLK) begin - if (ACLK_EN) begin - if (ar_hs) begin - rdata <= 1'b0; - case (raddr) - ADDR_AP_CTRL: begin - rdata[0] <= int_ap_start; - rdata[1] <= int_ap_done; - rdata[2] <= int_ap_idle; - rdata[3] <= int_ap_ready; - rdata[7] <= int_auto_restart; - end - ADDR_GIE: begin - rdata <= int_gie; - end - ADDR_IER: begin - rdata <= int_ier; - end - ADDR_ISR: begin - rdata <= int_isr; - end - ADDR_FIFO_IN_DATA_0: begin - rdata <= int_fifo_in[31:0]; - end - ADDR_FIFO_IN_DATA_1: begin - rdata <= int_fifo_in[63:32]; - end - ADDR_FIFO_OUT_DATA_0: begin - rdata <= int_fifo_out[31:0]; - end - ADDR_FIFO_OUT_DATA_1: begin - rdata <= int_fifo_out[63:32]; - end - ADDR_LENGTH_R_IN_DATA_0: begin - rdata <= int_length_r_in[31:0]; - end - ADDR_LENGTH_R_OUT_DATA_0: begin - rdata <= int_length_r_out[31:0]; - end - endcase - end - end -end - - -//------------------------Register logic----------------- -assign interrupt = int_gie & (|int_isr); -assign ap_start = int_ap_start; -assign int_ap_idle = ap_idle; -assign int_ap_ready = ap_ready; -assign fifo_in = int_fifo_in; -assign fifo_out = int_fifo_out; -assign length_r_in = int_length_r_in; -assign length_r_out = int_length_r_out; -// int_ap_start -always @(posedge ACLK) begin - if (ARESET) - int_ap_start <= 1'b0; - else if (ACLK_EN) begin - if (w_hs && waddr == ADDR_AP_CTRL && WSTRB[0] && WDATA[0]) - int_ap_start <= 1'b1; - else if (int_ap_ready) - int_ap_start <= int_auto_restart; // clear on handshake/auto restart - end -end - -// int_ap_done -always @(posedge ACLK) begin - if (ARESET) - int_ap_done <= 1'b0; - else if (ACLK_EN) begin - if (ap_done) - int_ap_done <= 1'b1; - else if (ar_hs && raddr == ADDR_AP_CTRL) - int_ap_done <= 1'b0; // clear on read - end -end - -// int_auto_restart -always @(posedge ACLK) begin - if (ARESET) - int_auto_restart <= 1'b0; - else if (ACLK_EN) begin - if (w_hs && waddr == ADDR_AP_CTRL && WSTRB[0]) - int_auto_restart <= WDATA[7]; - end -end - -// int_gie -always @(posedge ACLK) begin - if (ARESET) - int_gie <= 1'b0; - else if (ACLK_EN) begin - if (w_hs && waddr == ADDR_GIE && WSTRB[0]) - int_gie <= WDATA[0]; - end -end - -// int_ier -always @(posedge ACLK) begin - if (ARESET) - int_ier <= 1'b0; - else if (ACLK_EN) begin - if (w_hs && waddr == ADDR_IER && WSTRB[0]) - int_ier <= WDATA[1:0]; - end -end - -// int_isr[0] -always @(posedge ACLK) begin - if (ARESET) - int_isr[0] <= 1'b0; - else if (ACLK_EN) begin - if (int_ier[0] & ap_done) - int_isr[0] <= 1'b1; - else if (w_hs && waddr == ADDR_ISR && WSTRB[0]) - int_isr[0] <= int_isr[0] ^ WDATA[0]; // toggle on write - end -end - -// int_isr[1] -always @(posedge ACLK) begin - if (ARESET) - int_isr[1] <= 1'b0; - else if (ACLK_EN) begin - if (int_ier[1] & ap_ready) - int_isr[1] <= 1'b1; - else if (w_hs && waddr == ADDR_ISR && WSTRB[0]) - int_isr[1] <= int_isr[1] ^ WDATA[1]; // toggle on write - end -end - -// int_fifo_in[31:0] -always @(posedge ACLK) begin - if (ARESET) - int_fifo_in[31:0] <= 0; - else if (ACLK_EN) begin - if (w_hs && waddr == ADDR_FIFO_IN_DATA_0) - int_fifo_in[31:0] <= (WDATA[31:0] & wmask) | (int_fifo_in[31:0] & ~wmask); - end -end - -// int_fifo_in[63:32] -always @(posedge ACLK) begin - if (ARESET) - int_fifo_in[63:32] <= 0; - else if (ACLK_EN) begin - if (w_hs && waddr == ADDR_FIFO_IN_DATA_1) - int_fifo_in[63:32] <= (WDATA[31:0] & wmask) | (int_fifo_in[63:32] & ~wmask); - end -end - -// int_fifo_out[31:0] -always @(posedge ACLK) begin - if (ARESET) - int_fifo_out[31:0] <= 0; - else if (ACLK_EN) begin - if (w_hs && waddr == ADDR_FIFO_OUT_DATA_0) - int_fifo_out[31:0] <= (WDATA[31:0] & wmask) | (int_fifo_out[31:0] & ~wmask); - end -end - -// int_fifo_out[63:32] -always @(posedge ACLK) begin - if (ARESET) - int_fifo_out[63:32] <= 0; - else if (ACLK_EN) begin - if (w_hs && waddr == ADDR_FIFO_OUT_DATA_1) - int_fifo_out[63:32] <= (WDATA[31:0] & wmask) | (int_fifo_out[63:32] & ~wmask); - end -end - -// int_length_r_in[31:0] -always @(posedge ACLK) begin - if (ARESET) - int_length_r_in[31:0] <= 0; - else if (ACLK_EN) begin - if (w_hs && waddr == ADDR_LENGTH_R_IN_DATA_0) - int_length_r_in[31:0] <= (WDATA[31:0] & wmask) | (int_length_r_in[31:0] & ~wmask); - end -end - - -// int_length_r_out[31:0] -always @(posedge ACLK) begin - if (ARESET) - int_length_r_out[31:0] <= 0; - else if (ACLK_EN) begin - if (w_hs && waddr == ADDR_LENGTH_R_OUT_DATA_0) - int_length_r_out[31:0] <= (WDATA[31:0] & wmask) | (int_length_r_out[31:0] & ~wmask); - end -end - - -//------------------------Memory logic------------------- - -endmodule diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_counter.sv b/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_counter.sv deleted file mode 100644 index f3436b5..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_counter.sv +++ /dev/null @@ -1,87 +0,0 @@ -/** -* Copyright (C) 2019-2021 Xilinx, Inc -* -* Licensed under the Apache License, Version 2.0 (the "License"). You may -* not use this file except in compliance with the License. A copy of the -* License is located at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -* License for the specific language governing permissions and limitations -* under the License. -*/ - -//----------------------------------------------------------------------------- -// Simple up/down counter with reset. -//----------------------------------------------------------------------------- -`default_nettype none -`timescale 1ps/1ps -module krnl_rtl_counter #( - parameter integer C_WIDTH = 4, - parameter [C_WIDTH-1:0] C_INIT = {C_WIDTH{1'b0}} -) -( - input wire clk, - input wire clken, - input wire rst, - input wire load, - input wire incr, - input wire decr, - input wire [C_WIDTH-1:0] load_value, - output wire [C_WIDTH-1:0] count, - output wire is_zero -); - - localparam [C_WIDTH-1:0] LP_ZERO = {C_WIDTH{1'b0}}; - localparam [C_WIDTH-1:0] LP_ONE = {{C_WIDTH-1{1'b0}},1'b1}; - localparam [C_WIDTH-1:0] LP_MAX = {C_WIDTH{1'b1}}; - - reg [C_WIDTH-1:0] count_r = C_INIT; - reg is_zero_r = (C_INIT == LP_ZERO); - - assign count = count_r; - - always @(posedge clk) begin - if (rst) begin - count_r <= C_INIT; - end - else if (clken) begin - if (load) begin - count_r <= load_value; - end - else if (incr & ~decr) begin - count_r <= count_r + 1'b1; - end - else if (~incr & decr) begin - count_r <= count_r - 1'b1; - end - else - count_r <= count_r; - end - end - - assign is_zero = is_zero_r; - - always @(posedge clk) begin - if (rst) begin - is_zero_r <= (C_INIT == LP_ZERO); - end - else if (clken) begin - if (load) begin - is_zero_r <= (load_value == LP_ZERO); - end - else begin - is_zero_r <= incr ^ decr ? (decr && (count_r == LP_ONE)) || (incr && (count_r == LP_MAX)) : is_zero_r; - end - end - else begin - is_zero_r <= is_zero_r; - end - end - - -endmodule : krnl_rtl_counter -`default_nettype wire diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_int.sv b/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_int.sv deleted file mode 100644 index bcc3b7a..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/krnl_rtl_int.sv +++ /dev/null @@ -1,415 +0,0 @@ -/** -* Copyright (C) 2019-2021 Xilinx, Inc -* -* Licensed under the Apache License, Version 2.0 (the "License"). You may -* not use this file except in compliance with the License. A copy of the -* License is located at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -* License for the specific language governing permissions and limitations -* under the License. -*/ - -/////////////////////////////////////////////////////////////////////////////// -// Description: This is a example of how to create an RTL Kernel. The function -// of this module is to add two 32-bit values and produce a result. The values -// are read from one AXI4 memory mapped master, processed and then written out. -// -// Data flow: axi_read_master->fifo->adder->fifo->axi_write_master -/////////////////////////////////////////////////////////////////////////////// - -// default_nettype of none prevents implicit wire declaration. -`default_nettype none -`timescale 1 ns / 1 ps - -module krnl_rtl_int #( - parameter integer C_S_AXI_CONTROL_DATA_WIDTH = 32, - parameter integer C_S_AXI_CONTROL_ADDR_WIDTH = 6, - parameter integer C_M_AXI_GMEM_ID_WIDTH = 1, - parameter integer C_M_AXI_GMEM_ADDR_WIDTH = 64, - parameter integer C_M_AXI_GMEM_DATA_WIDTH = 32 -) -( - // System signals - input wire ap_clk, - input wire ap_rst_n, - // AXI4 master interface - output wire m_axi_gmem_AWVALID, - input wire m_axi_gmem_AWREADY, - output wire [C_M_AXI_GMEM_ADDR_WIDTH-1:0] m_axi_gmem_AWADDR, - output wire [C_M_AXI_GMEM_ID_WIDTH - 1:0] m_axi_gmem_AWID, - output wire [7:0] m_axi_gmem_AWLEN, - output wire [2:0] m_axi_gmem_AWSIZE, - // Tie-off AXI4 transaction options that are not being used. - output wire [1:0] m_axi_gmem_AWBURST, - output wire [1:0] m_axi_gmem_AWLOCK, - output wire [3:0] m_axi_gmem_AWCACHE, - output wire [2:0] m_axi_gmem_AWPROT, - output wire [3:0] m_axi_gmem_AWQOS, - output wire [3:0] m_axi_gmem_AWREGION, - output wire m_axi_gmem_WVALID, - input wire m_axi_gmem_WREADY, - output wire [C_M_AXI_GMEM_DATA_WIDTH-1:0] m_axi_gmem_WDATA, - output wire [C_M_AXI_GMEM_DATA_WIDTH/8-1:0] m_axi_gmem_WSTRB, - output wire m_axi_gmem_WLAST, - output wire m_axi_gmem_ARVALID, - input wire m_axi_gmem_ARREADY, - output wire [C_M_AXI_GMEM_ADDR_WIDTH-1:0] m_axi_gmem_ARADDR, - output wire [C_M_AXI_GMEM_ID_WIDTH-1:0] m_axi_gmem_ARID, - output wire [7:0] m_axi_gmem_ARLEN, - output wire [2:0] m_axi_gmem_ARSIZE, - output wire [1:0] m_axi_gmem_ARBURST, - output wire [1:0] m_axi_gmem_ARLOCK, - output wire [3:0] m_axi_gmem_ARCACHE, - output wire [2:0] m_axi_gmem_ARPROT, - output wire [3:0] m_axi_gmem_ARQOS, - output wire [3:0] m_axi_gmem_ARREGION, - input wire m_axi_gmem_RVALID, - output wire m_axi_gmem_RREADY, - input wire [C_M_AXI_GMEM_DATA_WIDTH - 1:0] m_axi_gmem_RDATA, - input wire m_axi_gmem_RLAST, - input wire [C_M_AXI_GMEM_ID_WIDTH - 1:0] m_axi_gmem_RID, - input wire [1:0] m_axi_gmem_RRESP, - input wire m_axi_gmem_BVALID, - output wire m_axi_gmem_BREADY, - input wire [1:0] m_axi_gmem_BRESP, - input wire [C_M_AXI_GMEM_ID_WIDTH - 1:0] m_axi_gmem_BID, - - // AXI4-Lite slave interface - input wire s_axi_control_AWVALID, - output wire s_axi_control_AWREADY, - input wire [C_S_AXI_CONTROL_ADDR_WIDTH-1:0] s_axi_control_AWADDR, - input wire s_axi_control_WVALID, - output wire s_axi_control_WREADY, - input wire [C_S_AXI_CONTROL_DATA_WIDTH-1:0] s_axi_control_WDATA, - input wire [C_S_AXI_CONTROL_DATA_WIDTH/8-1:0] s_axi_control_WSTRB, - input wire s_axi_control_ARVALID, - output wire s_axi_control_ARREADY, - input wire [C_S_AXI_CONTROL_ADDR_WIDTH-1:0] s_axi_control_ARADDR, - output wire s_axi_control_RVALID, - input wire s_axi_control_RREADY, - output wire [C_S_AXI_CONTROL_DATA_WIDTH-1:0] s_axi_control_RDATA, - output wire [1:0] s_axi_control_RRESP, - output wire s_axi_control_BVALID, - input wire s_axi_control_BREADY, - output wire [1:0] s_axi_control_BRESP, - output wire interrupt -); -/////////////////////////////////////////////////////////////////////////////// -// Local Parameters (constants) -/////////////////////////////////////////////////////////////////////////////// -localparam integer LP_NUM_READ_CHANNELS = 1; -localparam integer LP_LENGTH_WIDTH = 32; -localparam integer LP_DW_BYTES = C_M_AXI_GMEM_DATA_WIDTH/8; -localparam integer LP_AXI_BURST_LEN = 4096/LP_DW_BYTES < 256 ? 4096/LP_DW_BYTES : 256; -localparam integer LP_LOG_BURST_LEN = $clog2(LP_AXI_BURST_LEN); -localparam integer LP_RD_MAX_OUTSTANDING = 3; -localparam integer LP_RD_FIFO_DEPTH = LP_AXI_BURST_LEN*(LP_RD_MAX_OUTSTANDING + 1); -localparam integer LP_WR_FIFO_DEPTH = LP_AXI_BURST_LEN; - - -/////////////////////////////////////////////////////////////////////////////// -// Variables -/////////////////////////////////////////////////////////////////////////////// -logic areset = 1'b0; -logic ap_start; -logic ap_start_pulse; -logic ap_start_r; -logic ap_ready; -logic ap_done; -logic ap_idle = 1'b1; -logic [C_M_AXI_GMEM_ADDR_WIDTH-1:0] fifo_in; -logic [C_M_AXI_GMEM_ADDR_WIDTH-1:0] fifo_out; -logic [LP_LENGTH_WIDTH-1:0] length_r_in; -logic [LP_LENGTH_WIDTH-1:0] length_r_out; - -logic read_done; -logic [LP_NUM_READ_CHANNELS-1:0] rd_tvalid; -logic [LP_NUM_READ_CHANNELS-1:0] rd_tready_n; -logic [LP_NUM_READ_CHANNELS-1:0] [C_M_AXI_GMEM_DATA_WIDTH-1:0] rd_tdata; -logic [LP_NUM_READ_CHANNELS-1:0] rd_tlast; -logic [LP_NUM_READ_CHANNELS-1:0] ctrl_rd_fifo_prog_full; -logic [LP_NUM_READ_CHANNELS-1:0] rd_fifo_tvalid_n; -logic [LP_NUM_READ_CHANNELS-1:0] rd_fifo_tready; -logic [LP_NUM_READ_CHANNELS-1:0] [C_M_AXI_GMEM_DATA_WIDTH-1:0] rd_fifo_tdata; -logic [LP_NUM_READ_CHANNELS-1:0] rd_fifo_tlast; - -logic NN_inf_tvalid; -logic NN_inf_tready_n; -logic [C_M_AXI_GMEM_DATA_WIDTH-1:0] NN_inf_tdata; -logic wr_fifo_tvalid_n; -logic wr_fifo_tready; -logic [C_M_AXI_GMEM_DATA_WIDTH-1:0] wr_fifo_tdata; - -/////////////////////////////////////////////////////////////////////////////// -// RTL Logic -/////////////////////////////////////////////////////////////////////////////// -// Tie-off unused AXI protocol features -assign m_axi_gmem_AWID = {C_M_AXI_GMEM_ID_WIDTH{1'b0}}; -assign m_axi_gmem_AWBURST = 2'b01; -assign m_axi_gmem_AWLOCK = 2'b00; -assign m_axi_gmem_AWCACHE = 4'b0011; -assign m_axi_gmem_AWPROT = 3'b000; -assign m_axi_gmem_AWQOS = 4'b0000; -assign m_axi_gmem_AWREGION = 4'b0000; -assign m_axi_gmem_ARBURST = 2'b01; -assign m_axi_gmem_ARLOCK = 2'b00; -assign m_axi_gmem_ARCACHE = 4'b0011; -assign m_axi_gmem_ARPROT = 3'b000; -assign m_axi_gmem_ARQOS = 4'b0000; -assign m_axi_gmem_ARREGION = 4'b0000; - -// Register and invert reset signal for better timing. -always @(posedge ap_clk) begin - areset <= ~ap_rst_n; -end - -// create pulse when ap_start transitions to 1 -always @(posedge ap_clk) begin - begin - ap_start_r <= ap_start; - end -end - -assign ap_start_pulse = ap_start & ~ap_start_r; - -// ap_idle is asserted when done is asserted, it is de-asserted when ap_start_pulse -// is asserted -always @(posedge ap_clk) begin - if (areset) begin - ap_idle <= 1'b1; - end - else begin - ap_idle <= ap_done ? 1'b1 : - ap_start_pulse ? 1'b0 : - ap_idle; - end -end - -assign ap_ready = ap_done; - -// AXI4-Lite slave -krnl_rtl_control_s_axi #( - .C_S_AXI_ADDR_WIDTH( C_S_AXI_CONTROL_ADDR_WIDTH ), - .C_S_AXI_DATA_WIDTH( C_S_AXI_CONTROL_DATA_WIDTH ) -) -inst_krnl_control_s_axi ( - .AWVALID ( s_axi_control_AWVALID ) , - .AWREADY ( s_axi_control_AWREADY ) , - .AWADDR ( s_axi_control_AWADDR ) , - .WVALID ( s_axi_control_WVALID ) , - .WREADY ( s_axi_control_WREADY ) , - .WDATA ( s_axi_control_WDATA ) , - .WSTRB ( s_axi_control_WSTRB ) , - .ARVALID ( s_axi_control_ARVALID ) , - .ARREADY ( s_axi_control_ARREADY ) , - .ARADDR ( s_axi_control_ARADDR ) , - .RVALID ( s_axi_control_RVALID ) , - .RREADY ( s_axi_control_RREADY ) , - .RDATA ( s_axi_control_RDATA ) , - .RRESP ( s_axi_control_RRESP ) , - .BVALID ( s_axi_control_BVALID ) , - .BREADY ( s_axi_control_BREADY ) , - .BRESP ( s_axi_control_BRESP ) , - .ACLK ( ap_clk ) , - .ARESET ( areset ) , - .ACLK_EN ( 1'b1 ) , - .ap_start ( ap_start ) , - .interrupt ( interrupt ) , - .ap_ready ( ap_ready ) , - .ap_done ( ap_done ) , - .ap_idle ( ap_idle ) , - .fifo_in ( fifo_in[0+:C_M_AXI_GMEM_ADDR_WIDTH] ) , - .fifo_out ( fifo_out[0+:C_M_AXI_GMEM_ADDR_WIDTH] ) , - .length_r_in ( length_r_in[0+:LP_LENGTH_WIDTH] ) , - .length_r_out ( length_r_out[0+:LP_LENGTH_WIDTH] ) -); - -// AXI4 Read Master -krnl_rtl_axi_read_master #( - .C_ADDR_WIDTH ( C_M_AXI_GMEM_ADDR_WIDTH ) , - .C_DATA_WIDTH ( C_M_AXI_GMEM_DATA_WIDTH ) , - .C_ID_WIDTH ( C_M_AXI_GMEM_ID_WIDTH ) , - .C_NUM_CHANNELS ( LP_NUM_READ_CHANNELS ) , - .C_LENGTH_WIDTH ( LP_LENGTH_WIDTH ) , - .C_BURST_LEN ( LP_AXI_BURST_LEN ) , - .C_LOG_BURST_LEN ( LP_LOG_BURST_LEN ) , - .C_MAX_OUTSTANDING ( LP_RD_MAX_OUTSTANDING ) -) -inst_axi_read_master ( - .aclk ( ap_clk ) , - .areset ( areset ) , - - .ctrl_start ( ap_start_pulse ) , - .ctrl_done ( read_done ) , - .ctrl_offset ( fifo_in ) , - .ctrl_length ( length_r_in ) , - .ctrl_prog_full ( ctrl_rd_fifo_prog_full ) , - - .arvalid ( m_axi_gmem_ARVALID ) , - .arready ( m_axi_gmem_ARREADY ) , - .araddr ( m_axi_gmem_ARADDR ) , - .arid ( m_axi_gmem_ARID ) , - .arlen ( m_axi_gmem_ARLEN ) , - .arsize ( m_axi_gmem_ARSIZE ) , - .rvalid ( m_axi_gmem_RVALID ) , - .rready ( m_axi_gmem_RREADY ) , - .rdata ( m_axi_gmem_RDATA ) , - .rlast ( m_axi_gmem_RLAST ) , - .rid ( m_axi_gmem_RID ) , - .rresp ( m_axi_gmem_RRESP ) , - - .m_tvalid ( rd_tvalid ) , - .m_tready ( ~rd_tready_n ) , - .m_tdata ( rd_tdata ) , - .m_tlast ( rd_tlast ) -); - -// xpm_fifo_sync: Synchronous FIFO -// Xilinx Parameterized Macro, Version 2016.4 -xpm_fifo_sync # ( - .FIFO_MEMORY_TYPE ("auto"), //string; "auto", "block", "distributed", or "ultra"; - .ECC_MODE ("no_ecc"), //string; "no_ecc" or "en_ecc"; - .FIFO_WRITE_DEPTH (LP_RD_FIFO_DEPTH), //positive integer - .WRITE_DATA_WIDTH (C_M_AXI_GMEM_DATA_WIDTH+1), //positive integer - .WR_DATA_COUNT_WIDTH ($clog2(LP_RD_FIFO_DEPTH)+1), //positive integer, Not used - .PROG_FULL_THRESH (LP_AXI_BURST_LEN-2), //positive integer - .FULL_RESET_VALUE (1), //positive integer; 0 or 1 - .READ_MODE ("fwft"), //string; "std" or "fwft"; - .FIFO_READ_LATENCY (1), //positive integer; - .READ_DATA_WIDTH (C_M_AXI_GMEM_DATA_WIDTH+1), //positive integer - .RD_DATA_COUNT_WIDTH ($clog2(LP_RD_FIFO_DEPTH)+1), //positive integer, not used - .PROG_EMPTY_THRESH (10), //positive integer, not used - .DOUT_RESET_VALUE ("0"), //string, don't care - .WAKEUP_TIME (0) //positive integer; 0 or 2; - -) inst_rd_xpm_fifo_sync[LP_NUM_READ_CHANNELS-1:0] ( - .sleep ( 1'b0 ) , - .rst ( areset ) , - .wr_clk ( ap_clk ) , - .wr_en ( rd_tvalid ) , - .din ( {rd_tlast,rd_tdata} ) , - .full ( rd_tready_n ) , - .prog_full ( ctrl_rd_fifo_prog_full) , - .wr_data_count ( ) , - .overflow ( ) , - .wr_rst_busy ( ) , - .rd_en ( rd_fifo_tready ) , - .dout ( {rd_fifo_tlast,rd_fifo_tdata} ) , - .empty ( rd_fifo_tvalid_n ) , - .prog_empty ( ) , - .rd_data_count ( ) , - .underflow ( ) , - .rd_rst_busy ( ) , - .injectsbiterr ( 1'b0 ) , - .injectdbiterr ( 1'b0 ) , - .sbiterr ( ) , - .dbiterr ( ) - -); - -// NN inference -myproject_axi_0 #() -hls4ml_IP ( - .ap_clk ( ap_clk ) , - .ap_rst_n ( ap_rst_n ) , - - .in_r_TVALID ( ~rd_fifo_tvalid_n ) , - .in_r_TREADY ( rd_fifo_tready ) , - .in_r_TDATA ( rd_fifo_tdata ) , - .in_r_TLAST ( rd_fifo_tlast ) , - - .out_r_TVALID ( NN_inf_tvalid ) , - .out_r_TREADY ( ~NN_inf_tready_n ) , - .out_r_TDATA ( NN_inf_tdata ) -); - -// xpm_fifo_sync: Synchronous FIFO -// Xilinx Parameterized Macro, Version 2016.4 -xpm_fifo_sync # ( - .FIFO_MEMORY_TYPE ("auto"), //string; "auto", "block", "distributed", or "ultra"; - .ECC_MODE ("no_ecc"), //string; "no_ecc" or "en_ecc"; - .FIFO_WRITE_DEPTH (LP_WR_FIFO_DEPTH), //positive integer - .WRITE_DATA_WIDTH (C_M_AXI_GMEM_DATA_WIDTH), //positive integer - .WR_DATA_COUNT_WIDTH ($clog2(LP_WR_FIFO_DEPTH)), //positive integer, Not used - .PROG_FULL_THRESH (10), //positive integer, Not used - .FULL_RESET_VALUE (1), //positive integer; 0 or 1 - .READ_MODE ("fwft"), //string; "std" or "fwft"; - .FIFO_READ_LATENCY (1), //positive integer; - .READ_DATA_WIDTH (C_M_AXI_GMEM_DATA_WIDTH), //positive integer - .RD_DATA_COUNT_WIDTH ($clog2(LP_WR_FIFO_DEPTH)), //positive integer, not used - .PROG_EMPTY_THRESH (10), //positive integer, not used - .DOUT_RESET_VALUE ("0"), //string, don't care - .WAKEUP_TIME (0) //positive integer; 0 or 2; - -) inst_wr_xpm_fifo_sync ( - .sleep ( 1'b0 ) , - .rst ( areset ) , - .wr_clk ( ap_clk ) , - .wr_en ( NN_inf_tvalid ) , - .din ( NN_inf_tdata ) , - .full ( NN_inf_tready_n ) , - .prog_full ( ) , - .wr_data_count ( ) , - .overflow ( ) , - .wr_rst_busy ( ) , - .rd_en ( wr_fifo_tready ) , - .dout ( wr_fifo_tdata ) , - .empty ( wr_fifo_tvalid_n ) , - .prog_empty ( ) , - .rd_data_count ( ) , - .underflow ( ) , - .rd_rst_busy ( ) , - .injectsbiterr ( 1'b0 ) , - .injectdbiterr ( 1'b0 ) , - .sbiterr ( ) , - .dbiterr ( ) - -); - - -// AXI4 Write Master -krnl_rtl_axi_write_master #( - .C_ADDR_WIDTH ( C_M_AXI_GMEM_ADDR_WIDTH ) , - .C_DATA_WIDTH ( C_M_AXI_GMEM_DATA_WIDTH ) , - .C_MAX_LENGTH_WIDTH ( LP_LENGTH_WIDTH ) , - .C_BURST_LEN ( LP_AXI_BURST_LEN ) , - .C_LOG_BURST_LEN ( LP_LOG_BURST_LEN ) -) -inst_axi_write_master ( - .aclk ( ap_clk ) , - .areset ( areset ) , - - .ctrl_start ( ap_start_pulse ) , - .ctrl_offset ( fifo_out ) , - .ctrl_length ( length_r_out ) , - .ctrl_done ( ap_done ) , - - .awvalid ( m_axi_gmem_AWVALID ) , - .awready ( m_axi_gmem_AWREADY ) , - .awaddr ( m_axi_gmem_AWADDR ) , - .awlen ( m_axi_gmem_AWLEN ) , - .awsize ( m_axi_gmem_AWSIZE ) , - - .s_tvalid ( ~wr_fifo_tvalid_n ) , - .s_tready ( wr_fifo_tready ) , - .s_tdata ( wr_fifo_tdata ) , - - .wvalid ( m_axi_gmem_WVALID ) , - .wready ( m_axi_gmem_WREADY ) , - .wdata ( m_axi_gmem_WDATA ) , - .wstrb ( m_axi_gmem_WSTRB ) , - .wlast ( m_axi_gmem_WLAST ) , - - .bvalid ( m_axi_gmem_BVALID ) , - .bready ( m_axi_gmem_BREADY ) , - .bresp ( m_axi_gmem_BRESP ) -); - -endmodule : krnl_rtl_int - -`default_nettype wire diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/myproject_kernel.v b/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/myproject_kernel.v deleted file mode 100644 index 7d57e6b..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/krnl_rtl_src/myproject_kernel.v +++ /dev/null @@ -1,169 +0,0 @@ -/** -* Copyright (C) 2019-2021 Xilinx, Inc -* -* Licensed under the Apache License, Version 2.0 (the "License"). You may -* not use this file except in compliance with the License. A copy of the -* License is located at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -* License for the specific language governing permissions and limitations -* under the License. -*/ - -/////////////////////////////////////////////////////////////////////////////// -// Description: This is a wrapper of module "krnl_rtl_int" -/////////////////////////////////////////////////////////////////////////////// - -// default_nettype of none prevents implicit wire declaration. -`default_nettype none -`timescale 1 ns / 1 ps - -module krnl_rtl #( - parameter integer C_S_AXI_CONTROL_DATA_WIDTH = 32, - parameter integer C_S_AXI_CONTROL_ADDR_WIDTH = 6, - parameter integer C_M_AXI_GMEM_ID_WIDTH = 1, - parameter integer C_M_AXI_GMEM_ADDR_WIDTH = 64, - parameter integer C_M_AXI_GMEM_DATA_WIDTH = 32 -) -( - // System signals - input wire ap_clk, - input wire ap_rst_n, - // AXI4 master interface - output wire m_axi_gmem_AWVALID, - input wire m_axi_gmem_AWREADY, - output wire [C_M_AXI_GMEM_ADDR_WIDTH-1:0] m_axi_gmem_AWADDR, - output wire [C_M_AXI_GMEM_ID_WIDTH - 1:0] m_axi_gmem_AWID, - output wire [7:0] m_axi_gmem_AWLEN, - output wire [2:0] m_axi_gmem_AWSIZE, - // Tie-off AXI4 transaction options that are not being used. - output wire [1:0] m_axi_gmem_AWBURST, - output wire [1:0] m_axi_gmem_AWLOCK, - output wire [3:0] m_axi_gmem_AWCACHE, - output wire [2:0] m_axi_gmem_AWPROT, - output wire [3:0] m_axi_gmem_AWQOS, - output wire [3:0] m_axi_gmem_AWREGION, - output wire m_axi_gmem_WVALID, - input wire m_axi_gmem_WREADY, - output wire [C_M_AXI_GMEM_DATA_WIDTH-1:0] m_axi_gmem_WDATA, - output wire [C_M_AXI_GMEM_DATA_WIDTH/8-1:0] m_axi_gmem_WSTRB, - output wire m_axi_gmem_WLAST, - output wire m_axi_gmem_ARVALID, - input wire m_axi_gmem_ARREADY, - output wire [C_M_AXI_GMEM_ADDR_WIDTH-1:0] m_axi_gmem_ARADDR, - output wire [C_M_AXI_GMEM_ID_WIDTH-1:0] m_axi_gmem_ARID, - output wire [7:0] m_axi_gmem_ARLEN, - output wire [2:0] m_axi_gmem_ARSIZE, - output wire [1:0] m_axi_gmem_ARBURST, - output wire [1:0] m_axi_gmem_ARLOCK, - output wire [3:0] m_axi_gmem_ARCACHE, - output wire [2:0] m_axi_gmem_ARPROT, - output wire [3:0] m_axi_gmem_ARQOS, - output wire [3:0] m_axi_gmem_ARREGION, - input wire m_axi_gmem_RVALID, - output wire m_axi_gmem_RREADY, - input wire [C_M_AXI_GMEM_DATA_WIDTH - 1:0] m_axi_gmem_RDATA, - input wire m_axi_gmem_RLAST, - input wire [C_M_AXI_GMEM_ID_WIDTH - 1:0] m_axi_gmem_RID, - input wire [1:0] m_axi_gmem_RRESP, - input wire m_axi_gmem_BVALID, - output wire m_axi_gmem_BREADY, - input wire [1:0] m_axi_gmem_BRESP, - input wire [C_M_AXI_GMEM_ID_WIDTH - 1:0] m_axi_gmem_BID, - - // AXI4-Lite slave interface - input wire s_axi_control_AWVALID, - output wire s_axi_control_AWREADY, - input wire [C_S_AXI_CONTROL_ADDR_WIDTH-1:0] s_axi_control_AWADDR, - input wire s_axi_control_WVALID, - output wire s_axi_control_WREADY, - input wire [C_S_AXI_CONTROL_DATA_WIDTH-1:0] s_axi_control_WDATA, - input wire [C_S_AXI_CONTROL_DATA_WIDTH/8-1:0] s_axi_control_WSTRB, - input wire s_axi_control_ARVALID, - output wire s_axi_control_ARREADY, - input wire [C_S_AXI_CONTROL_ADDR_WIDTH-1:0] s_axi_control_ARADDR, - output wire s_axi_control_RVALID, - input wire s_axi_control_RREADY, - output wire [C_S_AXI_CONTROL_DATA_WIDTH-1:0] s_axi_control_RDATA, - output wire [1:0] s_axi_control_RRESP, - output wire s_axi_control_BVALID, - input wire s_axi_control_BREADY, - output wire [1:0] s_axi_control_BRESP, - output wire interrupt -); - -krnl_rtl_int #( - .C_S_AXI_CONTROL_DATA_WIDTH ( C_S_AXI_CONTROL_DATA_WIDTH ), - .C_S_AXI_CONTROL_ADDR_WIDTH ( C_S_AXI_CONTROL_ADDR_WIDTH ), - .C_M_AXI_GMEM_ID_WIDTH ( C_M_AXI_GMEM_ID_WIDTH ), - .C_M_AXI_GMEM_ADDR_WIDTH ( C_M_AXI_GMEM_ADDR_WIDTH ), - .C_M_AXI_GMEM_DATA_WIDTH ( C_M_AXI_GMEM_DATA_WIDTH ) -) -inst_krnl_rtl_int ( - .ap_clk ( ap_clk ), - .ap_rst_n ( ap_rst_n ), - .m_axi_gmem_AWVALID ( m_axi_gmem_AWVALID ), - .m_axi_gmem_AWREADY ( m_axi_gmem_AWREADY ), - .m_axi_gmem_AWADDR ( m_axi_gmem_AWADDR ), - .m_axi_gmem_AWID ( m_axi_gmem_AWID ), - .m_axi_gmem_AWLEN ( m_axi_gmem_AWLEN ), - .m_axi_gmem_AWSIZE ( m_axi_gmem_AWSIZE ), - .m_axi_gmem_AWBURST ( m_axi_gmem_AWBURST ), - .m_axi_gmem_AWLOCK ( m_axi_gmem_AWLOCK ), - .m_axi_gmem_AWCACHE ( m_axi_gmem_AWCACHE ), - .m_axi_gmem_AWPROT ( m_axi_gmem_AWPROT ), - .m_axi_gmem_AWQOS ( m_axi_gmem_AWQOS ), - .m_axi_gmem_AWREGION ( m_axi_gmem_AWREGION ), - .m_axi_gmem_WVALID ( m_axi_gmem_WVALID ), - .m_axi_gmem_WREADY ( m_axi_gmem_WREADY ), - .m_axi_gmem_WDATA ( m_axi_gmem_WDATA ), - .m_axi_gmem_WSTRB ( m_axi_gmem_WSTRB ), - .m_axi_gmem_WLAST ( m_axi_gmem_WLAST ), - .m_axi_gmem_ARVALID ( m_axi_gmem_ARVALID ), - .m_axi_gmem_ARREADY ( m_axi_gmem_ARREADY ), - .m_axi_gmem_ARADDR ( m_axi_gmem_ARADDR ), - .m_axi_gmem_ARID ( m_axi_gmem_ARID ), - .m_axi_gmem_ARLEN ( m_axi_gmem_ARLEN ), - .m_axi_gmem_ARSIZE ( m_axi_gmem_ARSIZE ), - .m_axi_gmem_ARBURST ( m_axi_gmem_ARBURST ), - .m_axi_gmem_ARLOCK ( m_axi_gmem_ARLOCK ), - .m_axi_gmem_ARCACHE ( m_axi_gmem_ARCACHE ), - .m_axi_gmem_ARPROT ( m_axi_gmem_ARPROT ), - .m_axi_gmem_ARQOS ( m_axi_gmem_ARQOS ), - .m_axi_gmem_ARREGION ( m_axi_gmem_ARREGION ), - .m_axi_gmem_RVALID ( m_axi_gmem_RVALID ), - .m_axi_gmem_RREADY ( m_axi_gmem_RREADY ), - .m_axi_gmem_RDATA ( m_axi_gmem_RDATA ), - .m_axi_gmem_RLAST ( m_axi_gmem_RLAST ), - .m_axi_gmem_RID ( m_axi_gmem_RID ), - .m_axi_gmem_RRESP ( m_axi_gmem_RRESP ), - .m_axi_gmem_BVALID ( m_axi_gmem_BVALID ), - .m_axi_gmem_BREADY ( m_axi_gmem_BREADY ), - .m_axi_gmem_BRESP ( m_axi_gmem_BRESP ), - .m_axi_gmem_BID ( m_axi_gmem_BID ), - .s_axi_control_AWVALID ( s_axi_control_AWVALID ), - .s_axi_control_AWREADY ( s_axi_control_AWREADY ), - .s_axi_control_AWADDR ( s_axi_control_AWADDR ), - .s_axi_control_WVALID ( s_axi_control_WVALID ), - .s_axi_control_WREADY ( s_axi_control_WREADY ), - .s_axi_control_WDATA ( s_axi_control_WDATA ), - .s_axi_control_WSTRB ( s_axi_control_WSTRB ), - .s_axi_control_ARVALID ( s_axi_control_ARVALID ), - .s_axi_control_ARREADY ( s_axi_control_ARREADY ), - .s_axi_control_ARADDR ( s_axi_control_ARADDR ), - .s_axi_control_RVALID ( s_axi_control_RVALID ), - .s_axi_control_RREADY ( s_axi_control_RREADY ), - .s_axi_control_RDATA ( s_axi_control_RDATA ), - .s_axi_control_RRESP ( s_axi_control_RRESP ), - .s_axi_control_BVALID ( s_axi_control_BVALID ), - .s_axi_control_BREADY ( s_axi_control_BREADY ), - .s_axi_control_BRESP ( s_axi_control_BRESP ), - .interrupt ( interrupt ) -); -endmodule : krnl_rtl - -`default_nettype wire diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/python_drivers/axi_stream_driver.py b/hls4ml/hls4ml/templates/vivado_accelerator/alveo/python_drivers/axi_stream_driver.py deleted file mode 100644 index c589bcf..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/python_drivers/axi_stream_driver.py +++ /dev/null @@ -1,101 +0,0 @@ -from datetime import datetime - -import numpy as np -from pynq import Overlay, allocate - - -class NeuralNetworkOverlay(Overlay): - def __init__(self, xclbin_name, dtbo=None, download=True, ignore_version=False, device=None): - super().__init__(xclbin_name, dtbo=dtbo, download=download, ignore_version=ignore_version, device=device) - self.input_buffer = None - self.output_buffer = None - - def allocate_mem(self, X_shape, y_shape, dtype=np.float32, trg_in=None, trg_out=None): - """Buffer allocation in the accelerator's memory. - - Args: - X_shape (list): Input buffer shape. - y_shape (list): Output buffer shape. - dtype (dtype, optional): The data type of the elements of the input/output tensors. Must be an instance of - numpy dtype. Defaults to np.float32. - - It should be set depending on the interface of the accelerator; if it uses 'float' - data type for the 'data' AXI-Stream field, 'np.float32' dtype must be used. Instead if it uses - 'ap_fixed', 'np.intA' is the correct dtype to use. Note that A cannot any integer value, but it can - assume power of 2 values, i.e., {..., 8, 16, 32, ...}. Check `numpy` documentation for more information. - In this case the encoding/decoding has to be computed by the host machine. For example for - 'ap_fixed<16,6>' type the following 2 functions are the correct one to use for encode/decode - 'float' -> 'ap_fixed<16,6>':: - - def encode(xi): - return np.int16(round(xi * 2**10)) # note 2**10 = 2**(A-B) - def decode(yi): - return yi * 2**-10 - encode_v = np.vectorize(encode) # to apply them element-wise - decode_v = np.vectorize(decode) - - trg_in (optional): Input buffer target memory. By default the v++ command set it to HBM[0] for - alveo-u50. Defaults to None. - trg_out (optional): Output buffer target memory. By default the v++ command set it to HBM[0] for - alveo-u50. Defaults to None. - """ - self.input_buffer = allocate(shape=X_shape, dtype=dtype, target=trg_in) - self.output_buffer = allocate(shape=y_shape, dtype=dtype, target=trg_out) - - def predict(self, X, y_shape, dtype=np.float32, debug=False, profile=False, encode=None, decode=None): - """Obtain the predictions of the NN implemented in the FPGA. - - Args: - X (ndarray): The input tensor. - y_shape (list): The shape of the output tensor, needed by the accelerator to set the TLAST bit properly. - dtype (dtype, optional): The data type of the elements of the input/output tensors. Must be an instance of - numpy dtype. Defaults to np.float32. - debug (bool, optional): If set, the function will print information about the data transfers status. - Defaults to False. - profile (bool, optional): If set, the function will print the performance of the algorithm in terms of - inference/s. Defaults to False. - encode (Callable, optional): Function to transform the input tensor. Defaults to None. - decode (Callable, optional): Function to transform the output tensor. Defaults to None. - - Returns: - _type_: A ``np.ndarray`` with a shape equal of ``y_shape`` and ``dtype`` data type. - """ - self.allocate_mem(X_shape=X.shape, y_shape=y_shape, dtype=dtype) - if profile: - timea = datetime.now() - if encode is not None: - X = encode(X) - in_size = np.prod(X.shape) - out_size = np.prod(y_shape) - self.input_buffer[:] = X - self.input_buffer.sync_to_device() - if debug: - print("Send OK") - self.krnl_rtl_1.call(self.input_buffer, self.output_buffer, in_size, out_size) - if debug: - print("Kernel call OK") - self.output_buffer.sync_from_device() - if debug: - print("Recieve OK") - result = self.output_buffer.copy() - if profile: - timeb = datetime.now() - dts, rate = self._print_dt(timea, timeb, len(X)) - self.input_buffer.flush() - self.output_buffer.flush() - self.free() - return result, dts, rate - self.input_buffer.flush() - self.output_buffer.flush() - return result - - def free_overlay(self): - self.free() - - def _print_dt(self, timea, timeb, N): - dt = timeb - timea - dts = dt.seconds + dt.microseconds * 10**-6 - rate = N / dts - print(f"Classified {N} samples in {dts} seconds ({rate} inferences / s)") - print(f"Or {1 / rate * 1e6} us / inferences") - return dts, rate diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/tcl_scripts/axi_stream_design.tcl b/hls4ml/hls4ml/templates/vivado_accelerator/alveo/tcl_scripts/axi_stream_design.tcl deleted file mode 100644 index 97da885..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/alveo/tcl_scripts/axi_stream_design.tcl +++ /dev/null @@ -1,109 +0,0 @@ -set tcldir [file dirname [info script]] -source [file join $tcldir project.tcl] - -create_project project_1 ${project_name}_vivado_accelerator -part ${part} -force - -set_property ip_repo_paths ${project_name}_prj [current_project] -update_ip_catalog - - -add_files -scan_for_includes [list src/krnl_rtl_int.sv src/krnl_rtl_axi_read_master.sv src/krnl_rtl_counter.sv src/${project_name}_kernel.v src/krnl_rtl_axi_write_master.sv src/krnl_rtl_control_s_axi.v] -import_files [list src/krnl_rtl_int.sv src/krnl_rtl_axi_read_master.sv src/krnl_rtl_counter.sv src/${project_name}_kernel.v src/krnl_rtl_axi_write_master.sv src/krnl_rtl_control_s_axi.v] - - - -create_ip -vlnv xilinx.com:hls:${project_name}_axi:1.0 -module_name ${project_name}_axi_0 - - -ipx::package_project -root_dir hls4ml_IP -vendor fastmachinelearning.org -library hls4ml -taxonomy /UserIP -import_files -set_current false -ipx::unload_core hls4ml_IP/component.xml -ipx::edit_ip_in_project -upgrade true -name tmp_edit_project -directory hls4ml_IP hls4ml_IP/component.xml -ipx::associate_bus_interfaces -busif m_axi_gmem -clock ap_clk [ipx::current_core] -ipx::associate_bus_interfaces -busif s_axi_control -clock ap_clk [ipx::current_core] -ipx::add_bus_parameter FREQ_HZ [ipx::get_bus_interfaces ap_clk -of_objects [ipx::current_core]] - - - -set_property value_resolve_type user [ipx::get_bus_parameters -of [::ipx::get_bus_interfaces -of [ipx::current_core] *clk*] "FREQ_HZ"] - - - -ipx::add_register CTRL [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]] -ipx::add_register GIER [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]] -ipx::add_register IP_IER [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]] -ipx::add_register IP_ISR [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]] -ipx::add_register fifo_in [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]] -ipx::add_register fifo_out [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]] -ipx::add_register length_r_in [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]] -ipx::add_register length_r_out [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]] - - -# Commands to set the descrtiprion, address offset and size - -# CTRL register properties -set_property Description "Control Signals" [ipx::get_registers CTRL -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Address_Offset 0x000 [ipx::get_registers CTRL -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Size 32 [ipx::get_registers CTRL -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] - -# GIER register properties -set_property Description "Global Interrupt Enable Register" [ipx::get_registers GIER -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Address_Offset 0x004 [ipx::get_registers GIER -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Size 32 [ipx::get_registers GIER -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] - -# IP_IER register properties -set_property Description "IP Interrupt Enable Register" [ipx::get_registers IP_IER -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Address_Offset 0x008 [ipx::get_registers IP_IER -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Size 32 [ipx::get_registers IP_IER -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] - -# IP_ISR register properties -set_property Description "IP Interrupt Status Register" [ipx::get_registers IP_ISR -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Address_Offset 0x00C [ipx::get_registers IP_ISR -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Size 32 [ipx::get_registers IP_ISR -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] - -# fifo_in register properties -set_property Description "fifo_in pointer argument" [ipx::get_registers fifo_in -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Address_Offset 0x010 [ipx::get_registers fifo_in -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Size 64 [ipx::get_registers fifo_in -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] - -# fifo_out register properties -set_property Description "fifo_out pointer argument" [ipx::get_registers fifo_out -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Address_Offset 0x01C [ipx::get_registers fifo_out -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Size 64 [ipx::get_registers fifo_out -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] - -# length_r_in register properties -set_property Description "length_r_in value" [ipx::get_registers length_r_in -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Address_Offset 0x028 [ipx::get_registers length_r_in -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Size 32 [ipx::get_registers length_r_in -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] - -# length_r_out register properties -set_property Description "length_r_out value" [ipx::get_registers length_r_out -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Address_Offset 0x030 [ipx::get_registers length_r_out -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -set_property Size 32 [ipx::get_registers length_r_out -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] - -ipx::add_register_parameter ASSOCIATED_BUSIF [ipx::get_registers fifo_in -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] -ipx::add_register_parameter ASSOCIATED_BUSIF [ipx::get_registers fifo_out -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]] - -# Commands to set m_axi_gmem as value in the register ASSOCIATED_BUSIF parameters -set_property Value m_axi_gmem [ipx::get_register_parameters ASSOCIATED_BUSIF -of_objects [ipx::get_registers fifo_in -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]]] -set_property Value m_axi_gmem [ipx::get_register_parameters ASSOCIATED_BUSIF -of_objects [ipx::get_registers fifo_out -of_objects [ipx::get_address_blocks reg0 -of_objects [ipx::get_memory_maps s_axi_control -of_objects [ipx::current_core]]]]] - -set core [ipx::current_core] - - -set_property xpm_libraries {XPM_CDC XPM_MEMORY XPM_FIFO} $core -set_property sdx_kernel true $core -set_property sdx_kernel_type rtl $core - - - -set_property core_revision 2 [ipx::current_core] -ipx::update_source_project_archive -component [ipx::current_core] -ipx::create_xgui_files [ipx::current_core] -ipx::update_checksums [ipx::current_core] -ipx::save_core [ipx::current_core] -ipx::check_integrity -quiet [ipx::current_core] -ipx::archive_core hls4ml_IP/fastmachinelearning.org_hls4ml_krnl_rtl_1.0.zip [ipx::current_core] -current_project project_1 - - -package_xo -force -xo_path xo_files/${project_name}_kernel.xo -kernel_name krnl_rtl -ip_directory hls4ml_IP diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/build_lib.sh b/hls4ml/hls4ml/templates/vivado_accelerator/build_lib.sh deleted file mode 100644 index 69a2bac..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/build_lib.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -CC=g++ -if [[ "$OSTYPE" == "linux-gnu" ]]; then - CFLAGS="-O3 -fPIC -std=c++11 -fno-gnu-unique" -elif [[ "$OSTYPE" == "darwin"* ]]; then - CFLAGS="-O3 -fPIC -std=c++11" -fi -INCFLAGS="-Ifirmware/ap_types/" -PROJECT=myproject -LIB_STAMP=mystamp - -${CC} ${CFLAGS} ${INCFLAGS} -c firmware/${PROJECT}.cpp -o ${PROJECT}.o -${CC} ${CFLAGS} ${INCFLAGS} -c firmware/${PROJECT}_axi.cpp -o ${PROJECT}_axi.o -${CC} ${CFLAGS} ${INCFLAGS} -c ${PROJECT}_bridge.cpp -o ${PROJECT}_bridge.o -${CC} ${CFLAGS} ${INCFLAGS} -shared ${PROJECT}.o ${PROJECT}_axi.o ${PROJECT}_bridge.o -o firmware/${PROJECT}-${LIB_STAMP}.so -rm -f *.o diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/myproject_axi.cpp b/hls4ml/hls4ml/templates/vivado_accelerator/myproject_axi.cpp deleted file mode 100644 index 05797f1..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/myproject_axi.cpp +++ /dev/null @@ -1,14 +0,0 @@ -// hls-fpga-machine-learning insert include - -void myproject_axi(input_axi_t in[N_IN], output_axi_t out[N_OUT]) { - - // hls-fpga-machine-learning insert interface - - // hls-fpga-machine-learning insert local vars - - // hls-fpga-machine-learning insert enqueue - - // hls-fpga-machine-learning insert call - - // hls-fpga-machine-learning insert dequeue -} diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/myproject_axi.h b/hls4ml/hls4ml/templates/vivado_accelerator/myproject_axi.h deleted file mode 100644 index a60dab3..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/myproject_axi.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef MYPROJECT_AXI_H_ -#define MYPROJECT_AXI_H_ - -#include -// hls-fpga-machine-learning insert include - -// hls-fpga-machine-learning insert definitions - -void myproject_axi(input_axi_t in[N_IN], output_axi_t out[N_OUT]); -#endif diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/pynq-z2/python_drivers/axi_stream_driver.py b/hls4ml/hls4ml/templates/vivado_accelerator/pynq-z2/python_drivers/axi_stream_driver.py deleted file mode 100644 index 1aac79f..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/pynq-z2/python_drivers/axi_stream_driver.py +++ /dev/null @@ -1,75 +0,0 @@ -from datetime import datetime - -import numpy as np -from pynq import Overlay, allocate - - -class NeuralNetworkOverlay(Overlay): - def __init__( - self, bitfile_name, x_shape, y_shape, dtype=np.float32, dtbo=None, download=True, ignore_version=False, device=None - ): - super().__init__(bitfile_name, dtbo=None, download=True, ignore_version=False, device=None) - self.sendchannel = self.hier_0.axi_dma_0.sendchannel - self.recvchannel = self.hier_0.axi_dma_0.recvchannel - self.input_buffer = allocate(shape=x_shape, dtype=dtype) - self.output_buffer = allocate(shape=y_shape, dtype=dtype) - - def _print_dt(self, timea, timeb, N): - dt = timeb - timea - dts = dt.seconds + dt.microseconds * 10**-6 - rate = N / dts - print(f"Classified {N} samples in {dts} seconds ({rate} inferences / s)") - return dts, rate - - def predict(self, X, debug=False, profile=False, encode=None, decode=None): - """ - Obtain the predictions of the NN implemented in the FPGA. - Parameters: - - X : the input vector. Should be numpy ndarray. - - dtype : the data type of the elements of the input/output vectors. - Note: it should be set depending on the interface of the accelerator; if it uses 'float' - types for the 'data' AXI-Stream field, 'np.float32' dtype is the correct one to use. - Instead if it uses 'ap_fixed', 'np.intA' is the correct one to use (note that A cannot - any integer value, but it can assume {..., 8, 16, 32, ...} values. Check `numpy` - doc for more info). - In this case the encoding/decoding has to be computed by the PS. For example for - 'ap_fixed<16,6>' type the following 2 functions are the correct one to use for encode/decode - 'float' -> 'ap_fixed<16,6>': - ``` - def encode(xi): - return np.int16(round(xi * 2**10)) # note 2**10 = 2**(A-B) - def decode(yi): - return yi * 2**-10 - encode_v = np.vectorize(encode) # to apply them element-wise - decode_v = np.vectorize(decode) - ``` - - profile : boolean. Set it to `True` to print the performance of the algorithm in term of `inference/s`. - - encode/decode: function pointers. See `dtype` section for more information. - - return: an output array based on `np.ndarray` with a shape equal to `y_shape` and a `dtype` equal to - the namesake parameter. - """ - if profile: - timea = datetime.now() - if encode is not None: - X = encode(X) - self.input_buffer[:] = X - self.sendchannel.transfer(self.input_buffer) - self.recvchannel.transfer(self.output_buffer) - if debug: - print("Transfer OK") - self.sendchannel.wait() - if debug: - print("Send OK") - self.recvchannel.wait() - if debug: - print("Receive OK") - # result = self.output_buffer.copy() - if decode is not None: - self.output_buffer = decode(self.output_buffer) - - if profile: - timeb = datetime.now() - dts, rate = self._print_dt(timea, timeb, len(X)) - return self.output_buffer, dts, rate - else: - return self.output_buffer diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/pynq-z2/tcl_scripts/axi_lite_design.tcl b/hls4ml/hls4ml/templates/vivado_accelerator/pynq-z2/tcl_scripts/axi_lite_design.tcl deleted file mode 100644 index c14aafb..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/pynq-z2/tcl_scripts/axi_lite_design.tcl +++ /dev/null @@ -1,26 +0,0 @@ -set tcldir [file dirname [info script]] -source [file join $tcldir project.tcl] - -create_project project_1 ${project_name}_vivado_accelerator -part xc7z020clg400-1 -force - -set_property board_part tul.com.tw:pynq-z2:part0:1.0 [current_project] -set_property ip_repo_paths ${project_name}_prj [current_project] -update_ip_catalog - -# Create Block Designer design -create_bd_design "design_1" -create_bd_cell -type ip -vlnv xilinx.com:ip:processing_system7:5.5 processing_system7_0 -apply_bd_automation -rule xilinx.com:bd_rule:processing_system7 -config {make_external "FIXED_IO, DDR" apply_board_preset "1" Master "Disable" Slave "Disable" } [get_bd_cells processing_system7_0] -create_bd_cell -type ip -vlnv xilinx.com:hls:${project_name}_axi:1.0 ${project_name}_axi_0 -apply_bd_automation -rule xilinx.com:bd_rule:axi4 -config { Clk_master {Auto} Clk_slave {Auto} Clk_xbar {Auto} Master {/processing_system7_0/M_AXI_GP0} Slave {/${project_name}_axi_0/s_axi_AXILiteS} ddr_seg {Auto} intc_ip {New AXI Interconnect} master_apm {0}} [get_bd_intf_pins ${project_name}_axi_0/s_axi_AXILiteS] - -make_wrapper -files [get_files ./${project_name}_vivado_accelerator/project_1.srcs/sources_1/bd/design_1/design_1.bd] -top -add_files -norecurse ./${project_name}_vivado_accelerator/project_1.srcs/sources_1/bd/design_1/hdl/design_1_wrapper.v - -reset_run impl_1 -reset_run synth_1 -launch_runs impl_1 -to_step write_bitstream -jobs 6 -wait_on_run -timeout 360 impl_1 - -open_run impl_1 -report_utilization -file util.rpt -hierarchical -hierarchical_percentages diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/pynq-z2/tcl_scripts/axi_stream_design.tcl b/hls4ml/hls4ml/templates/vivado_accelerator/pynq-z2/tcl_scripts/axi_stream_design.tcl deleted file mode 100644 index c5549dc..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/pynq-z2/tcl_scripts/axi_stream_design.tcl +++ /dev/null @@ -1,59 +0,0 @@ -#@todo: try to remove startgroup and endgroup and see if it work -set tcldir [file dirname [info script]] -source [file join $tcldir project.tcl] - -create_project project_1 ${project_name}_vivado_accelerator -part xc7z020clg400-1 -force - -set_property board_part tul.com.tw:pynq-z2:part0:1.0 [current_project] -set_property ip_repo_paths ${project_name}_prj [current_project] -update_ip_catalog - -create_bd_design "design_1" - -startgroup -create_bd_cell -type ip -vlnv xilinx.com:ip:processing_system7:5.5 processing_system7_0 -endgroup - -apply_bd_automation -rule xilinx.com:bd_rule:processing_system7 -config {make_external "FIXED_IO, DDR" apply_board_preset "1" Master "Disable" Slave "Disable" } [get_bd_cells processing_system7_0] - -startgroup -set_property -dict [list CONFIG.PCW_USE_S_AXI_HP0 {1}] [get_bd_cells processing_system7_0] -endgroup - -startgroup -create_bd_cell -type ip -vlnv xilinx.com:ip:axi_dma:7.1 axi_dma_0 -endgroup - -set_property -dict [list CONFIG.c_s_axis_s2mm_tdata_width.VALUE_SRC USER] [get_bd_cells axi_dma_0] -set_property -dict [list CONFIG.c_include_sg {0} CONFIG.c_sg_length_width {26} CONFIG.c_sg_include_stscntrl_strm {0} CONFIG.c_m_axi_mm2s_data_width ${bit_width_hls_input} CONFIG.c_m_axis_mm2s_tdata_width ${bit_width_hls_input} CONFIG.c_mm2s_burst_size {256} CONFIG.c_s_axis_s2mm_tdata_width ${bit_width_hls_output} CONFIG.c_s_axis_s2mm_data_width ${bit_width_hls_output} CONFIG.c_s2mm_burst_size {256}] [get_bd_cells axi_dma_0] - -startgroup -apply_bd_automation -rule xilinx.com:bd_rule:axi4 -config { Clk_master {Auto} Clk_slave {Auto} Clk_xbar {Auto} Master {/processing_system7_0/M_AXI_GP0} Slave {/axi_dma_0/S_AXI_LITE} ddr_seg {Auto} intc_ip {New AXI Interconnect} master_apm {0}} [get_bd_intf_pins axi_dma_0/S_AXI_LITE] - -apply_bd_automation -rule xilinx.com:bd_rule:axi4 -config { Clk_master {Auto} Clk_slave {Auto} Clk_xbar {Auto} Master {/axi_dma_0/M_AXI_MM2S} Slave {/processing_system7_0/S_AXI_HP0} ddr_seg {Auto} intc_ip {New AXI Interconnect} master_apm {0}} [get_bd_intf_pins processing_system7_0/S_AXI_HP0] -endgroup - -apply_bd_automation -rule xilinx.com:bd_rule:axi4 -config { Clk_master {Auto} Clk_slave {/processing_system7_0/FCLK_CLK0 (100 MHz)} Clk_xbar {/processing_system7_0/FCLK_CLK0 (100 MHz)} Master {/axi_dma_0/M_AXI_S2MM} Slave {/processing_system7_0/S_AXI_HP0} ddr_seg {Auto} intc_ip {/axi_mem_intercon} master_apm {0}} [get_bd_intf_pins axi_dma_0/M_AXI_S2MM] - -startgroup -create_bd_cell -type ip -vlnv xilinx.com:hls:${project_name}_axi:1.0 ${project_name}_axi_0 -endgroup - -connect_bd_intf_net [get_bd_intf_pins axi_dma_0/M_AXIS_MM2S] [get_bd_intf_pins ${project_name}_axi_0/in_r] -connect_bd_intf_net [get_bd_intf_pins ${project_name}_axi_0/out_r] [get_bd_intf_pins axi_dma_0/S_AXIS_S2MM] - -apply_bd_automation -rule xilinx.com:bd_rule:clkrst -config { Clk {/processing_system7_0/FCLK_CLK0 (100 MHz)} Freq {100} Ref_Clk0 {} Ref_Clk1 {} Ref_Clk2 {}} [get_bd_pins ${project_name}_axi_0/ap_clk] - -group_bd_cells hier_0 [get_bd_cells axi_dma_0] [get_bd_cells ${project_name}_axi_0] - -make_wrapper -files [get_files ./${project_name}_vivado_accelerator/project_1.srcs/sources_1/bd/design_1/design_1.bd] -top - -add_files -norecurse ./${project_name}_vivado_accelerator/project_1.srcs/sources_1/bd/design_1/hdl/design_1_wrapper.v - -reset_run impl_1 -reset_run synth_1 -launch_runs impl_1 -to_step write_bitstream -jobs 6 -wait_on_run -timeout 360 impl_1 - -open_run impl_1 -report_utilization -file util.rpt -hierarchical -hierarchical_percentages diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/zcu102/python_drivers/axi_stream_driver.py b/hls4ml/hls4ml/templates/vivado_accelerator/zcu102/python_drivers/axi_stream_driver.py deleted file mode 100644 index 1aac79f..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/zcu102/python_drivers/axi_stream_driver.py +++ /dev/null @@ -1,75 +0,0 @@ -from datetime import datetime - -import numpy as np -from pynq import Overlay, allocate - - -class NeuralNetworkOverlay(Overlay): - def __init__( - self, bitfile_name, x_shape, y_shape, dtype=np.float32, dtbo=None, download=True, ignore_version=False, device=None - ): - super().__init__(bitfile_name, dtbo=None, download=True, ignore_version=False, device=None) - self.sendchannel = self.hier_0.axi_dma_0.sendchannel - self.recvchannel = self.hier_0.axi_dma_0.recvchannel - self.input_buffer = allocate(shape=x_shape, dtype=dtype) - self.output_buffer = allocate(shape=y_shape, dtype=dtype) - - def _print_dt(self, timea, timeb, N): - dt = timeb - timea - dts = dt.seconds + dt.microseconds * 10**-6 - rate = N / dts - print(f"Classified {N} samples in {dts} seconds ({rate} inferences / s)") - return dts, rate - - def predict(self, X, debug=False, profile=False, encode=None, decode=None): - """ - Obtain the predictions of the NN implemented in the FPGA. - Parameters: - - X : the input vector. Should be numpy ndarray. - - dtype : the data type of the elements of the input/output vectors. - Note: it should be set depending on the interface of the accelerator; if it uses 'float' - types for the 'data' AXI-Stream field, 'np.float32' dtype is the correct one to use. - Instead if it uses 'ap_fixed', 'np.intA' is the correct one to use (note that A cannot - any integer value, but it can assume {..., 8, 16, 32, ...} values. Check `numpy` - doc for more info). - In this case the encoding/decoding has to be computed by the PS. For example for - 'ap_fixed<16,6>' type the following 2 functions are the correct one to use for encode/decode - 'float' -> 'ap_fixed<16,6>': - ``` - def encode(xi): - return np.int16(round(xi * 2**10)) # note 2**10 = 2**(A-B) - def decode(yi): - return yi * 2**-10 - encode_v = np.vectorize(encode) # to apply them element-wise - decode_v = np.vectorize(decode) - ``` - - profile : boolean. Set it to `True` to print the performance of the algorithm in term of `inference/s`. - - encode/decode: function pointers. See `dtype` section for more information. - - return: an output array based on `np.ndarray` with a shape equal to `y_shape` and a `dtype` equal to - the namesake parameter. - """ - if profile: - timea = datetime.now() - if encode is not None: - X = encode(X) - self.input_buffer[:] = X - self.sendchannel.transfer(self.input_buffer) - self.recvchannel.transfer(self.output_buffer) - if debug: - print("Transfer OK") - self.sendchannel.wait() - if debug: - print("Send OK") - self.recvchannel.wait() - if debug: - print("Receive OK") - # result = self.output_buffer.copy() - if decode is not None: - self.output_buffer = decode(self.output_buffer) - - if profile: - timeb = datetime.now() - dts, rate = self._print_dt(timea, timeb, len(X)) - return self.output_buffer, dts, rate - else: - return self.output_buffer diff --git a/hls4ml/hls4ml/templates/vivado_accelerator/zcu102/tcl_scripts/axi_stream_design.tcl b/hls4ml/hls4ml/templates/vivado_accelerator/zcu102/tcl_scripts/axi_stream_design.tcl deleted file mode 100644 index 5d886c6..0000000 --- a/hls4ml/hls4ml/templates/vivado_accelerator/zcu102/tcl_scripts/axi_stream_design.tcl +++ /dev/null @@ -1,58 +0,0 @@ -#@todo: try to remove startgroup and endgroup and see if it work -set tcldir [file dirname [info script]] -source [file join $tcldir project.tcl] - -create_project project_1 ${project_name}_vivado_accelerator -part xczu9eg-ffvb1156-2-e -force - -set_property board_part xilinx.com:zcu102:part0:3.3 [current_project] -set_property ip_repo_paths ${project_name}_prj [current_project] -update_ip_catalog - -create_bd_design "design_1" -set_property ip_repo_paths ${project_name}_prj/solution1/impl/ip [current_project] -update_ip_catalog - -startgroup -create_bd_cell -type ip -vlnv xilinx.com:ip:zynq_ultra_ps_e:3.3 zynq_ultra_ps_e_0 -endgroup - -apply_bd_automation -rule xilinx.com:bd_rule:zynq_ultra_ps_e -config {apply_board_preset "1" } [get_bd_cells zynq_ultra_ps_e_0] - -set_property -dict [list CONFIG.PSU__USE__S_AXI_GP0 {1} CONFIG.PSU__SAXIGP0__DATA_WIDTH {32}] [get_bd_cells zynq_ultra_ps_e_0] - -startgroup -create_bd_cell -type ip -vlnv xilinx.com:ip:axi_dma:7.1 axi_dma_0 -endgroup -set_property -dict [list CONFIG.c_m_axi_s2mm_data_width.VALUE_SRC USER CONFIG.c_s_axis_s2mm_tdata_width.VALUE_SRC USER] [get_bd_cells axi_dma_0] -set_property -dict [list CONFIG.c_include_sg {0} CONFIG.c_sg_length_width {26} CONFIG.c_sg_include_stscntrl_strm {0} CONFIG.c_m_axi_mm2s_data_width ${bit_width_hls_input} CONFIG.c_m_axis_mm2s_tdata_width ${bit_width_hls_input} CONFIG.c_mm2s_burst_size {256} CONFIG.c_m_axi_s2mm_data_width ${bit_width_hls_output} CONFIG.c_s_axis_s2mm_tdata_width ${bit_width_hls_output} CONFIG.c_s2mm_burst_size {256}] [get_bd_cells axi_dma_0] - -startgroup -apply_bd_automation -rule xilinx.com:bd_rule:axi4 -config { Clk_master {Auto} Clk_slave {Auto} Clk_xbar {Auto} Master {/zynq_ultra_ps_e_0/M_AXI_HPM0_FPD} Slave {/axi_dma_0/S_AXI_LITE} ddr_seg {Auto} intc_ip {New AXI Interconnect} master_apm {0}} [get_bd_intf_pins axi_dma_0/S_AXI_LITE] -apply_bd_automation -rule xilinx.com:bd_rule:axi4 -config { Clk_master {Auto} Clk_slave {Auto} Clk_xbar {Auto} Master {/axi_dma_0/M_AXI_MM2S} Slave {/zynq_ultra_ps_e_0/S_AXI_HPC0_FPD} ddr_seg {Auto} intc_ip {New AXI SmartConnect} master_apm {0}} [get_bd_intf_pins zynq_ultra_ps_e_0/S_AXI_HPC0_FPD] -endgroup - -startgroup -apply_bd_automation -rule xilinx.com:bd_rule:axi4 -config { Clk_master {Auto} Clk_slave {/zynq_ultra_ps_e_0/pl_clk0 (99 MHz)} Clk_xbar {/zynq_ultra_ps_e_0/pl_clk0 (99 MHz)} Master {/axi_dma_0/M_AXI_S2MM} Slave {/zynq_ultra_ps_e_0/S_AXI_HPC0_FPD} ddr_seg {Auto} intc_ip {/axi_smc} master_apm {0}} [get_bd_intf_pins axi_dma_0/M_AXI_S2MM] -apply_bd_automation -rule xilinx.com:bd_rule:axi4 -config { Clk_master {Auto} Clk_slave {/zynq_ultra_ps_e_0/pl_clk0 (99 MHz)} Clk_xbar {/zynq_ultra_ps_e_0/pl_clk0 (99 MHz)} Master {/zynq_ultra_ps_e_0/M_AXI_HPM1_FPD} Slave {/axi_dma_0/S_AXI_LITE} ddr_seg {Auto} intc_ip {/ps8_0_axi_periph} master_apm {0}} [get_bd_intf_pins zynq_ultra_ps_e_0/M_AXI_HPM1_FPD] -endgroup - -startgroup -create_bd_cell -type ip -vlnv xilinx.com:hls:${project_name}_axi:1.0 ${project_name}_axi_0 -endgroup -connect_bd_intf_net [get_bd_intf_pins axi_dma_0/M_AXIS_MM2S] [get_bd_intf_pins ${project_name}_axi_0/in_r] -connect_bd_intf_net [get_bd_intf_pins axi_dma_0/S_AXIS_S2MM] [get_bd_intf_pins ${project_name}_axi_0/out_r] - -apply_bd_automation -rule xilinx.com:bd_rule:clkrst -config { Clk {/zynq_ultra_ps_e_0/pl_clk0 (99 MHz)} Freq {100} Ref_Clk0 {} Ref_Clk1 {} Ref_Clk2 {}} [get_bd_pins ${project_name}_axi_0/ap_clk] -group_bd_cells hier_0 [get_bd_cells axi_dma_0] [get_bd_cells ${project_name}_axi_0] - -make_wrapper -files [get_files ./${project_name}_vivado_accelerator/project_1.srcs/sources_1/bd/design_1/design_1.bd] -top - -add_files -norecurse ./${project_name}_vivado_accelerator/project_1.srcs/sources_1/bd/design_1/hdl/design_1_wrapper.v - -reset_run impl_1 -reset_run synth_1 -launch_runs impl_1 -to_step write_bitstream -jobs 6 -wait_on_run -timeout 360 impl_1 - -open_run impl_1 -report_utilization -file util.rpt -hierarchical -hierarchical_percentages diff --git a/hls4ml/hls4ml/utils/__init__.py b/hls4ml/hls4ml/utils/__init__.py deleted file mode 100644 index f03cacb..0000000 --- a/hls4ml/hls4ml/utils/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from hls4ml.utils.config import config_from_keras_model, config_from_onnx_model, config_from_pytorch_model # noqa: F401 -from hls4ml.utils.example_models import fetch_example_list, fetch_example_model # noqa: F401 -from hls4ml.utils.plot import plot_model # noqa: F401 diff --git a/hls4ml/hls4ml/utils/config.py b/hls4ml/hls4ml/utils/config.py deleted file mode 100644 index 5d7ca1a..0000000 --- a/hls4ml/hls4ml/utils/config.py +++ /dev/null @@ -1,363 +0,0 @@ -import json - -import qkeras - -import hls4ml - - -def create_config(output_dir='my-hls-test', project_name='myproject', backend='Vivado', version='1.0.0', **kwargs): - """Create an initial configuration to guide the conversion process. - - The resulting configuration will contain general information about the project (like project name and output directory) - as well as the backend-specific configuration (part numbers, clocks etc). Extra arguments of this function will be - passed to the backend's ``create_initial_config``. For the possible list of arguments, check the documentation of each - backend. - - Args: - output_dir (str, optional): The output directory to which the generated project will be written. - Defaults to 'my-hls-test'. - project_name (str, optional): The name of the project, that will be used as a top-level function in HLS designs. - Defaults to 'myproject'. - backend (str, optional): The backend to use. Defaults to 'Vivado'. - version (str, optional): Optional string to version the generated project for backends that support it. - Defaults to '1.0.0'. - - Raises: - Exception: Raised if unknown backend is specified. - - Returns: - dict: The conversion configuration. - """ - backend_list = hls4ml.backends.get_available_backends() - if backend.lower() not in backend_list: - raise Exception(f'Unknown backend: {backend}') - - backend = hls4ml.backends.get_backend(backend) - - backend_config = backend.create_initial_config(**kwargs) - - config = {} - config['OutputDir'] = output_dir - config['ProjectName'] = project_name - config['Backend'] = backend.name - config['Version'] = version - config.update(backend_config) - - return config - - -def _get_precision_from_quantizer(quantizer): - if isinstance(quantizer, str): - quantizer_obj = qkeras.get_quantizer(quantizer) - quantizer = {} - # Some activations are classes with get_config method - if hasattr(quantizer_obj, 'get_config'): - quantizer['class_name'] = quantizer_obj.__class__.__name__ - quantizer['config'] = quantizer_obj.get_config() - # Some activations are just functions - else: - quantizer['class_name'] = quantizer_obj.__name__ - - supported_quantizers = [ - 'quantized_bits', - 'quantized_relu', - 'quantized_tanh', - 'quantized_sigmoid', - 'quantized_po2', - 'quantized_relu_po2', - 'linear', - ] - signed = True - rnd = "AP_TRN" - overflow = "AP_WRAP" - - if quantizer['class_name'] in supported_quantizers: - bits = int(quantizer['config']['bits']) - # if integer isn't specified, it should be the same as bits - integer = int(quantizer['config'].get('integer', bits - 1)) + 1 - # for quantizers use the following default rounding and overflow - rnd = "AP_RND_CONV" - overflow = "AP_SAT" - if quantizer['class_name'] in ('quantized_relu', 'quantized_relu_po2'): - signed = False - integer -= 1 - elif quantizer['class_name'] == 'quantized_tanh': - overflow = "AP_SAT_SYM" if quantizer['config']['symmetric'] else "AP_SAT" - integer = 1 - elif quantizer['class_name'] == 'quantized_sigmoid': - integer = 0 - signed = False - - elif quantizer['class_name'] in ['binary', 'stochastic_binary', 'binary_tanh']: - bits = 2 - integer = 2 - - elif quantizer['class_name'] in ['ternary', 'stochastic_ternary', 'ternary_tanh']: - bits = 2 - integer = 2 - else: - raise Exception('ERROR: Unsupported quantizer: {}'.format(quantizer['class_name'])) - - decimal = bits - integer - - if decimal > 0: - return hls4ml.model.types.FixedPrecisionType( - width=bits, integer=integer, signed=signed, rounding_mode=rnd, saturation_mode=overflow - ) - else: - return hls4ml.model.types.IntegerPrecisionType(width=integer, signed=signed) - - -def config_from_keras_model( - model, granularity='model', backend=None, default_precision='fixed<16,6>', default_reuse_factor=1 -): - """Create an HLS conversion config given the Keras model. - - This function serves as the initial step in creating the custom conversion configuration. - Users are advised to inspect the returned object to tweak the conversion configuration. - The return object can be passed as `hls_config` parameter to `convert_from_keras_model`. - - Args: - model: Keras model - granularity (str, optional): Granularity of the created config. Defaults to 'model'. - Can be set to 'model', 'type' and 'name'. - - Granularity can be used to generate a more verbose config that can be fine-tuned. - The default granularity ('model') will generate config keys that apply to the whole - model, so changes to the keys will affect the entire model. 'type' granularity will - generate config keys that affect all layers of a given type, while the 'name' granularity - will generate config keys for every layer separately, allowing for highly specific - configuration tweaks. - backend(str, optional): Name of the backend to use - default_precision (str, optional): Default precision to use. Defaults to 'fixed<16,6>'. - default_reuse_factor (int, optional): Default reuse factor. Defaults to 1. - - Raises: - Exception: If Keras model has layers not supported by hls4ml. - - Returns: - [dict]: The created config. - """ - if granularity.lower() not in ['model', 'type', 'name']: - raise Exception( - f'Invalid configuration granularity specified, expected "model", "type" or "name" got "{granularity}"' - ) - - if backend is not None: - backend = hls4ml.backends.get_backend(backend) - - # This is a list of dictionaries to hold all the layer info we need to generate HLS - layer_list = [] - - if isinstance(model, dict): - model_arch = model - else: - model_arch = json.loads(model.to_json()) - - reader = hls4ml.converters.KerasModelReader(model) - - layer_list, _, _, _ = hls4ml.converters.parse_keras_model(model_arch, reader) - - def make_layer_config(layer): - cls_name = layer['class_name'] - if 'config' in layer.keys(): - if 'activation' in layer['config'].keys(): - if layer['config']['activation'] == 'softmax': - cls_name = 'Softmax' - - layer_cls = hls4ml.model.layers.layer_map[cls_name] - if backend is not None: - layer_cls = backend.create_layer_class(layer_cls) - - layer_config = {} - - config_attrs = [a for a in layer_cls.expected_attributes if a.configurable] - for attr in config_attrs: - if isinstance(attr, hls4ml.model.attributes.TypeAttribute): - precision_cfg = layer_config.setdefault('Precision', {}) - name = attr.name - if name.endswith('_t'): - name = name[:-2] - if attr.default is None: - precision_cfg[name] = default_precision - else: - precision_cfg[name] = str(attr.default) - else: - if attr.default is not None: - layer_config[attr.config_name] = attr.default - - quantizers = {qname: qclass for qname, qclass in layer.items() if 'quantizer' in qname and qclass is not None} - for qname, qclass in quantizers.items(): - pname = qname.lower().split('_quantizer')[0] - if pname == 'activation': - pname = 'result' - if isinstance(qclass, dict): - precision = _get_precision_from_quantizer(qclass) - else: - precision = qclass.hls_type - # TODO In the next version of this function, these should not be exposed to user to tweak - layer_config['Precision'][pname] = str(precision) - - if layer['class_name'] in ['GarNet', 'GarNetStack']: - # Define default precisions for various internal arrays (can be overridden from the config file) - import math - - log2_reuse = int(math.log(default_reuse_factor, 2.0)) - n_vertices_width = int(math.log(layer['n_vertices'], 2.0)) - - # We always give 10 digits for the subintegral part - fwidth = 10 - # Integral precision for aggr_t depends on how large the temporary sum for weighed feature mean will be - aggr_intw = max(log2_reuse, n_vertices_width - log2_reuse) + 3 # safety factor 2**3 - aggr_w = aggr_intw + fwidth - # edge_weight_aggr_t does not need the safety factor - ew_aggr_intw = aggr_intw - 3 - ew_aggr_w = ew_aggr_intw + fwidth - - layer_config['Precision'] = {} - layer_config['Precision']['edge_weight'] = 'ap_ufixed<10,0,AP_TRN,AP_SAT>' - layer_config['Precision']['edge_weight_aggr'] = f'ap_ufixed<{ew_aggr_w},{ew_aggr_intw},AP_TRN,AP_SAT>' - layer_config['Precision']['aggr'] = f'ap_fixed<{aggr_w},{aggr_intw},AP_TRN,AP_SAT>' - layer_config['Precision']['norm'] = 'ap_ufixed<14,4,AP_TRN,AP_SAT>' - - layer_config['ReuseFactor'] = default_reuse_factor - - elif layer['class_name'] == 'Input': - dtype = layer['config']['dtype'] - if dtype.startswith('int') or dtype.startswith('uint'): - typename = dtype[: dtype.index('int') + 3] - width = int(dtype[dtype.index('int') + 3 :]) - layer_config['Precision']['result'] = f'ap_{typename}<{width}>' - # elif bool, q[u]int, ... - - return layer_config - - config = {} - - model_config = {} - model_config['Precision'] = default_precision - model_config['ReuseFactor'] = default_reuse_factor - model_config['Strategy'] = 'Latency' - model_config['BramFactor'] = 1_000_000_000 - model_config['TraceOutput'] = False - - config['Model'] = model_config - - if granularity.lower() == 'type': - type_config = {} - for layer in layer_list: - if layer['class_name'] in type_config: - continue - layer_config = make_layer_config(layer) - type_config[layer['class_name']] = layer_config - - config['LayerType'] = type_config - - elif granularity.lower() == 'name': - name_config = {} - for layer in layer_list: - layer_config = make_layer_config(layer) - name_config[layer['name']] = layer_config - - config['LayerName'] = name_config - - return config - - -def config_from_pytorch_model( - model, - granularity='model', - backend=None, - default_precision='ap_fixed<16,6>', - default_reuse_factor=1, - inputs_channel_last=False, - transpose_outputs=True, -): - """Create an HLS conversion config given the PyTorch model. - - This function serves as the initial step in creating the custom conversion configuration. - Users are advised to inspect the returned object to tweak the conversion configuration. - The return object can be passed as `hls_config` parameter to `convert_from_pytorch_model`. - - Args: - model: PyTorch model - granularity (str, optional): Granularity of the created config. Defaults to 'model'. - Can be set to 'model', 'type' and 'layer'. - - Granularity can be used to generate a more verbose config that can be fine-tuned. - The default granularity ('model') will generate config keys that apply to the whole - model, so changes to the keys will affect the entire model. 'type' granularity will - generate config keys that affect all layers of a given type, while the 'name' granularity - will generate config keys for every layer separately, allowing for highly specific - configuration tweaks. - backend(str, optional): Name of the backend to use - default_precision (str, optional): Default precision to use. Defaults to 'fixed<16,6>'. - default_reuse_factor (int, optional): Default reuse factor. Defaults to 1. - inputs_channel_last (bool, optional): Set to 'True' if input to the model comes in format - 'channels_last'. Defaults to 'False'. If False, inputs will be transposed internally. - transpose_outputs (bool, optional): Set to 'False' if the output should not be transposed from - channels_last into channels_first data format. Defaults to 'False'. If False, outputs needs - to be transposed manually. - - Raises: - Exception: If PyTorch model has layers not supported by hls4ml. - - Returns: - [dict]: The created config. - """ - - config = {} - - model_config = {} - model_config['Precision'] = default_precision - model_config['ReuseFactor'] = default_reuse_factor - model_config['InputsChannelLast'] = inputs_channel_last - model_config['TransposeOutputs'] = transpose_outputs - model_config['Strategy'] = 'Latency' - - config['Model'] = model_config - - return config - - -def config_from_onnx_model( - model, granularity='model', backend=None, default_precision='ap_fixed<16,6>', default_reuse_factor=1 -): - """Create an HLS conversion config given the ONNX model. - - This function serves as the initial step in creating the custom conversion configuration. - Users are advised to inspect the returned object to tweak the conversion configuration. - The return object can be passed as `hls_config` parameter to `convert_from_onnx_model`. - - Args: - model: ONNX model - granularity (str, optional): Granularity of the created config. Defaults to 'model'. - Can be set to 'model', 'type' and 'layer'. - - Granularity can be used to generate a more verbose config that can be fine-tuned. - The default granularity ('model') will generate config keys that apply to the whole - model, so changes to the keys will affect the entire model. 'type' granularity will - generate config keys that affect all layers of a given type, while the 'name' granularity - will generate config keys for every layer separately, allowing for highly specific - configuration tweaks. - backend(str, optional): Name of the backend to use - default_precision (str, optional): Default precision to use. Defaults to 'fixed<16,6>'. - default_reuse_factor (int, optional): Default reuse factor. Defaults to 1. - - Raises: - Exception: If ONNX model has layers not supported by hls4ml. - - Returns: - [dict]: The created config. - """ - - config = {} - - model_config = {} - model_config['Precision'] = default_precision - model_config['ReuseFactor'] = default_reuse_factor - model_config['Strategy'] = 'Latency' - - config['Model'] = model_config - - return config diff --git a/hls4ml/hls4ml/utils/example_models.py b/hls4ml/hls4ml/utils/example_models.py deleted file mode 100644 index 5fefbd6..0000000 --- a/hls4ml/hls4ml/utils/example_models.py +++ /dev/null @@ -1,186 +0,0 @@ -import json -import pprint -from urllib.request import urlretrieve - -import yaml - -from .config import create_config - - -def _load_data_config_avai(model_name): - """ - Check data and configuration availability for each model from this file: - - https://github.com/hls-fpga-machine-learning/example-models/blob/master/available_data_config.json - """ - - link_to_list = ( - 'https://raw.githubusercontent.com/hls-fpga-machine-learning/example-models/master/available_data_config.json' - ) - - temp_file, _ = urlretrieve(link_to_list) - - # Read data from file: - data = json.load(open(temp_file)) - - return data[model_name] - - -def _data_is_available(model_name): - data = _load_data_config_avai(model_name) - - return data['example_data'] - - -def _config_is_available(model_name): - data = _load_data_config_avai(model_name) - - return data['example_config'] - - -def _create_default_config(model_name, model_config, backend): - # Initiate the configuration file - config = create_config(backend=backend) - - # Additional configuration parameters - config[model_config] = model_name - config['HLSConfig']['Model'] = {} - config['HLSConfig']['Model']['Precision'] = 'ap_fixed<16,6>' - config['HLSConfig']['Model']['ReuseFactor'] = 1 - - return config - - -def _filter_name(model_name): - """ - Need to get "garnet_1layer" from "garnet_1layer.json" for loading of data and configuration files - """ - filtered_name = None - - if model_name.endswith('.json') or model_name.endswith('.onnx'): - filtered_name = model_name[:-5] - elif model_name.endswith('.pt') or model_name.endswith('.pb'): - filtered_name = model_name[:-3] - - return filtered_name - - -def _load_example_data(model_name): - print("Downloading input & output example files ...") - - filtered_name = _filter_name(model_name) - - input_file_name = filtered_name + "_input.dat" - output_file_name = filtered_name + "_output.dat" - - link_to_input = ( - 'https://raw.githubusercontent.com/hls-fpga-machine-learning/example-models/master/data/' + input_file_name - ) - link_to_output = ( - 'https://raw.githubusercontent.com/hls-fpga-machine-learning/example-models/master/data/' + output_file_name - ) - - urlretrieve(link_to_input, input_file_name) - urlretrieve(link_to_output, output_file_name) - - -def _load_example_config(model_name): - print("Downloading configuration files ...") - - filtered_name = _filter_name(model_name) - - config_name = filtered_name + "_config.yml" - - link_to_config = ( - 'https://raw.githubusercontent.com/hls-fpga-machine-learning/example-models/master/config-files/' + config_name - ) - - # Load the configuration as dictionary from file - urlretrieve(link_to_config, config_name) - - # Load the configuration from local yml file - with open(config_name) as ymlfile: - config = yaml.safe_load(ymlfile) - - return config - - -def fetch_example_model(model_name, backend='Vivado'): - """ - Download an example model (and example data & configuration if available) from github repo to working directory, - and return the corresponding configuration: - - https://github.com/hls-fpga-machine-learning/example-models - - Use fetch_example_list() to see all the available models. - - Args: - model_name (str): Name of the example model in the repo. Example: fetch_example_model('KERAS_3layer.json') - backend (str, optional): Name of the backend to use for model conversion. - - Return: - dict: Dictionary that stores the configuration to the model - """ - - # Initilize the download link and model type - download_link = 'https://raw.githubusercontent.com/hls-fpga-machine-learning/example-models/master/' - model_type = None - model_config = None - - # Check for model's type to update link - if '.json' in model_name: - model_type = 'keras' - model_config = 'KerasJson' - elif '.pt' in model_name: - model_type = 'pytorch' - model_config = 'PytorchModel' - elif '.onnx' in model_name: - model_type = 'onnx' - model_config = 'OnnxModel' - elif '.pb' in model_name: - model_type = 'tensorflow' - model_config = 'TensorFlowModel' - else: - raise TypeError('Model type is not supported in hls4ml.') - - download_link_model = download_link + model_type + '/' + model_name - - # Download the example model - print("Downloading example model files ...") - urlretrieve( - download_link_model, - model_name, - ) - - # Check if the example data and configuration for the model are available - if _data_is_available(model_name): - _load_example_data(model_name) - - if _config_is_available(model_name): - config = _load_example_config(model_name) - else: - config = _create_default_config(model_name, model_config, backend) - - # If the model is a keras model then have to download its weight file as well - if model_type == 'keras': - model_weight_name = model_name[:-5] + "_weights.h5" - - download_link_weight = download_link + model_type + '/' + model_weight_name - urlretrieve(download_link_weight, model_weight_name) - - config['KerasH5'] = model_weight_name # Set configuration for the weight file - - return config - - -def fetch_example_list(): - link_to_list = 'https://raw.githubusercontent.com/hls-fpga-machine-learning/example-models/master/available_models.json' - - temp_file, _ = urlretrieve(link_to_list) - - # Read data from file: - data = json.load(open(temp_file)) - - # Print in fancy format - pp = pprint.PrettyPrinter(indent=4) - pp.pprint(data) diff --git a/hls4ml/hls4ml/utils/fixed_point_utils.py b/hls4ml/hls4ml/utils/fixed_point_utils.py deleted file mode 100644 index 0ca29d7..0000000 --- a/hls4ml/hls4ml/utils/fixed_point_utils.py +++ /dev/null @@ -1,138 +0,0 @@ -import math -import sys - -''' -A helper class for handling fixed point methods -Currently, very limited, allowing only: - - Conversion to float - - Exponents - - Reciprocals -Used primarily for generating softmax look-up table -by using bit manipulation (see Vivado-equivalent implementation) -''' - - -class FixedPointEmulator: - ''' - Default constructor - Args: - - N : Total number of bits in the fixed point number - - I : Integer bits in the fixed point number - - F = N-I : Fixed point bits in the number - - signed : True/False - If True, use 2's complement when converting to float - - self.integer_bits : Bits corresponding to the integer part of the number - - self.decimal_bits : Bits corresponding to the decimal part of the number - ''' - - def __init__(self, N, I, signed=True, integer_bits=None, decimal_bits=None): # noqa E741 - self.N = N - self.I = I # noqa E741 - self.F = N - I - self.signed = signed - self.integer_bits = [0] * self.I if integer_bits is None else integer_bits - self.decimal_bits = [0] * self.F if decimal_bits is None else decimal_bits - - ''' - Converts the fixed point number stored in self.bits to a floating pont - Args: - - None - Returns: - - val : float, the floating point equivalent of the fixed point number - Description: - 1. Check if the number is signed, and if so, set intermediate result to -2.0^(I-1) or 0.0 - otherwise, set intermediate result to +2.0^(I-1) or 0.0 - 2. Traverse through integer bits, incrementing result by 2.0^(i) (using left shift) - 3. Traverse through decimal bits, incrementing result by 2.0^(-i) (using pow) - Note: - - This function uses left shifts instead of integer powers of 2. - ''' - - def to_float(self): - val = float(int(self.integer_bits[0]) << (self.I - 1)) - val = -val if self.signed else val - - for i in range(self.I - 1, 0, -1): - val += float(int(self.integer_bits[self.I - i]) << (i - 1)) - - for i in range(0, self.F): - if self.decimal_bits[i]: - val += pow(2, -(i + 1)) - - return val - - ''' - Sets the top bits of the current number - Args: - - bits : Values top bit should be set to - ''' - - def set_msb_bits(self, bits): - for i in range(0, len(bits)): - if i < self.I: - self.integer_bits[i] = bits[i] - elif i >= self.I and i < self.N: - self.decimal_bits[i - self.I] = bits[i] - - ''' - Returns e^x, where x is the current fixed point number - Args: - - None - Returns: - - Float : e^x, rounded some number of decimal points - Notice: - - If e^x overflow, maximum value of float is used - ''' - - def exp_float(self, sig_figs=12): - try: - return round(math.exp(self.to_float()), sig_figs) - except OverflowError: - return round(sys.float_info.max, sig_figs) - - ''' - Returns 1/x, where x is the current fixed point number - Args: - - None - Returns: - - Float : 1/x, rounded some number of decimal points - ''' - - def inv_float(self, sig_figs=12): - if self.to_float() != 0: - return round(1.0 / self.to_float(), sig_figs) - else: - return round(sys.float_info.max, sig_figs) - - -''' - Converts unsigned integer i to N-bit binary number - Args: - - i : Number to be converted - - N : Number of bits to be used - Note: - - N > log2(i)+1 -''' - - -def uint_to_binary(i, N): - # Gets the binary representation of the number - bits = [int(b) for b in list(f'{i:0b}')] - - # Zero padding, so exactly N bits are used - while len(bits) < N: - bits.insert(0, 0) - - return bits - - -''' - Returns log2(i), rounding up - Args: - - i : Number - Returns: - - val : representing ceil(log2(i)) -''' - - -def ceil_log2(i): - return i.bit_length() - 1 diff --git a/hls4ml/hls4ml/utils/plot.py b/hls4ml/hls4ml/utils/plot.py deleted file mode 100644 index e3424bb..0000000 --- a/hls4ml/hls4ml/utils/plot.py +++ /dev/null @@ -1,224 +0,0 @@ -# Heavily inspired by Keras's plot_model -"""Utilities related to model visualization.""" - -import os -import sys - -try: - import pydot -except ImportError: - pydot = None - - -def check_pydot(): - """Returns True if PyDot and Graphviz are available.""" - if pydot is None: - return False - try: - # Attempt to create an image of a blank graph - # to check the pydot/graphviz installation. - pydot.Dot.create(pydot.Dot()) - return True - except OSError: - return False - - -def add_edge(dot, src, dst): - if not dot.get_edge(src, dst): - dot.add_edge(pydot.Edge(src, dst)) - - -def model_to_dot( - model, show_shapes=False, show_layer_names=True, show_precision=False, rankdir='TB', dpi=96, subgraph=False -): - """Convert a HLS model to dot format. - - Arguments: - model: A HLS model instance. - show_shapes: whether to display shape information. - show_layer_names: whether to display layer names. - show_precision: whether to display precision of layer's variables. - rankdir: `rankdir` argument passed to PyDot, - a string specifying the format of the plot: - 'TB' creates a vertical plot; - 'LR' creates a horizontal plot. - dpi: Dots per inch. - subgraph: whether to return a `pydot.Cluster` instance. - - Returns: - A `pydot.Dot` instance representing the HLS model or - a `pydot.Cluster` instance representing nested model if - `subgraph=True`. - - Raises: - ImportError: if graphviz or pydot are not available. - """ - - if not check_pydot(): - if 'IPython.core.magics.namespace' in sys.modules: - # We don't raise an exception here in order to avoid crashing notebook - # tests where graphviz is not available. - print('Failed to import pydot. You must install pydot' ' and graphviz for `pydotprint` to work.') - return - else: - raise ImportError('Failed to import pydot. You must install pydot' ' and graphviz for `pydotprint` to work.') - - if subgraph: - dot = pydot.Cluster(style='dashed', graph_name=model.name) - dot.set('label', model.name) - dot.set('labeljust', 'l') - else: - dot = pydot.Dot() - dot.set('rankdir', rankdir) - dot.set('concentrate', True) - dot.set('dpi', dpi) - dot.set_node_defaults(shape='record') - - layers = model.get_layers() - - # Create graph nodes. - for i, layer in enumerate(layers): - # layer_id = str(id(layer)) - layer_id = str(layer.index) - - # Append a wrapped layer's label to node's label, if it exists. - layer_name = layer.name - class_name = layer.class_name - - # Create node's label. - if show_layer_names: - # label = '{}: {}'.format(class_name, layer_name) - # label = '{}\\l{}\\l'.format(class_name, layer_name) - label = f'{class_name}
    {layer_name}' - else: - label = class_name - - # Rebuild the label as a table including input/output shapes. - if show_shapes: - - def format_shape(shape): - return str(tuple(shape)).replace(str(None), '?') - - input_labels = '?' - try: - output_labels = format_shape(layer.get_output_variable().shape) - except AttributeError: - output_labels = '?' - if class_name != 'Input': - if len(layer.inputs) > 1: - input_shapes = [] - for i in layer.inputs: - input_layer = layer.get_input_variable(i) - if input_layer is not None: - input_shapes.append(input_layer.shape) - else: - input_shapes.append('?') - formatted_shapes = [format_shape(ishape) for ishape in input_shapes] - input_labels = ', '.join(formatted_shapes) - else: - input_layer = layer.get_input_variable() - if input_layer is not None: - input_labels = format_shape(input_layer.shape) - label = f'{label}\n|{{input: {input_labels}|output: {output_labels}}}' - - # Rebuild the label as a table including tensor precision. - if show_precision: - - def format_precision(precision): - return str(precision).replace('<', '<').replace('>', '>') - - precision_labels = [] - tensors = {} - tensors.update(layer.weights) - if len(layer.variables) == 1: - # A bit cleaner output - tensors['output'] = layer.get_output_variable() - else: - tensors.update(layer.variables) - for tensor_name, var in tensors.items(): - if show_shapes: - # tensor_label = '{} {}: {}'.format(tensor_name, - tensor_label = '{} {}:{}'.format( - tensor_name, format_shape(var.shape), format_precision(var.type.precision) - ) - else: - # tensor_label = '{}: {}'.format(tensor_name, - tensor_label = '{}:{}'.format( - tensor_name, format_precision(var.type.precision) - ) - precision_labels.append(tensor_label) - # precision_label = '
    '.join(precision_labels) - precision_label = ''.join(precision_labels) - precision_label = '' + precision_label + '
    ' - label = f'{label}|{{{precision_label}}}' - - label = '<' + label + '>' - node = pydot.Node(layer_id, label=label) - dot.add_node(node) - - # Connect nodes with edges. - for layer in layers: - layer_id = str(layer.index) - for input_name in layer.inputs: - input_layer = layer.get_input_node(input_name) - if input_layer is not None: - input_layer_id = str(input_layer.index) - add_edge(dot, input_layer_id, layer_id) - - return dot - - -def plot_model( - model, to_file='model.png', show_shapes=False, show_layer_names=True, show_precision=False, rankdir='TB', dpi=96 -): - """Converts a HLS model to dot format and save to a file. - - Arguments: - model: A HLS model instance - to_file: File name of the plot image. - show_shapes: whether to display shape information. - show_layer_names: whether to display layer names. - show_precision: whether to display precision of layer's variables. - rankdir: `rankdir` argument passed to PyDot, - a string specifying the format of the plot: - 'TB' creates a vertical plot; - 'LR' creates a horizontal plot. - dpi: Dots per inch. - - Returns: - A Jupyter notebook Image object if Jupyter is installed. - This enables in-line display of the model plots in notebooks. - """ - dot = model_to_dot( - model, - show_shapes=show_shapes, - show_layer_names=show_layer_names, - show_precision=show_precision, - rankdir=rankdir, - dpi=dpi, - ) - if dot is None: - return - - if to_file is not None: - _, extension = os.path.splitext(to_file) - if not extension: - extension = 'png' - else: - extension = extension[1:] - # Save image to disk. - dot.write(to_file, format=extension) - else: - # Return the image as a Jupyter Image object, to be displayed in-line. - # Note that we cannot easily detect whether the code is running in a - # notebook, and thus we always return the Image if Jupyter is available. - try: - import tempfile - - from IPython import display - - temp = tempfile.NamedTemporaryFile(suffix='.png') - dot.write(temp.name, format='png') - return display.Image(filename=temp.name) - except ImportError: - pass diff --git a/hls4ml/hls4ml/utils/string_utils.py b/hls4ml/hls4ml/utils/string_utils.py deleted file mode 100644 index fa341cd..0000000 --- a/hls4ml/hls4ml/utils/string_utils.py +++ /dev/null @@ -1,25 +0,0 @@ -import re - - -def convert_to_snake_case(pascal_case): - """Convert string in PascalCase to snake_case - - Args: - pascal_case (str): string to convert - - Returns: - str: converted string - """ - return re.sub(r'(? N - 1 - idx = N - 1 - end - - x_approx = range_start + step * idx - return Float32(fun(x_approx)) -end -""" - - -def init_pysr_lut_functions(init_defaults=False, function_definitions=None): - """Register LUT-based approximations with PySR. - - Functions should be in the form of:: - - (x) = math_lut(, x, N=, range_start=, range_end=) - - where ```` is a given name that can be used with PySR, ```` is the math function to approximate - (`sin`, `cos`, `log`,...), ```` is the size of the lookup table, and ```` and ```` are the - ranges in which the function will be approximated. It is **strongly** recommended to use a power-of-two as a range. - - Registered functions can be passed by name to ``PySRRegressor`` (as ``unary_operators``). - - Args: - init_defaults (bool, optional): Register the most frequently used functions (sin, cos, tan, log, exp). - Defaults to False. - function_definitions (list, optional): List of strings with function definitions to register with PySR. - Defaults to None. - """ - from pysr.julia_helpers import init_julia - - Main = init_julia() - Main.eval(math_lut_julia) - - if init_defaults: - Main.eval('sin_lut(x) = math_lut(sin, x, N=1024, range_start=-4, range_end=4)') - Main.eval('cos_lut(x) = math_lut(cos, x, N=1024, range_start=-4, range_end=4)') - Main.eval('tan_lut(x) = math_lut(tan, x, N=1024, range_start=-4, range_end=4)') - - Main.eval('log_lut(x) = math_lut(log, x, N=1024, range_start=0, range_end=8)') - Main.eval('exp_lut(x) = math_lut(exp, x, N=1024, range_start=0, range_end=16)') - - for func in function_definitions or []: - register_pysr_lut_function(func, Main) - - -def register_pysr_lut_function(func, julia_main=None): - if julia_main is None: - from pysr.julia_helpers import init_julia - - Main = init_julia() - else: - Main = julia_main - - Main.eval(func) - - -class LUTFunction: - def __init__(self, name, math_func, range_start, range_end, table_size=1024) -> None: - self.name = name - self.math_func = math_func - self.range_start = range_start - self.range_end = range_end - self.table_size = table_size - - -_binary_ops = {'/': 'x / y', '*': 'x * y', '+': 'x + y', '-': 'x - y', 'pow': 'x**y', 'pow_abs': 'Abs(x) ** y'} - - -_unary_ops = { - 'abs': 'Abs', - 'mod': 'sympy.Mod(x, 2)', - 'erf': 'sympy.erf', - 'erfc': 'sympy.erfc', - 'log': 'sympy.log(x)', - 'log10': 'sympy.log(x, 10)', - 'log2': 'sympy.log(x, 2)', - 'log1p': 'sympy.log(x + 1)', - 'log_abs': 'sympy.log(Abs(x))', - 'log10_abs': 'sympy.log(Abs(x), 10)', - 'log2_abs': 'sympy.log(Abs(x), 2)', - 'log1p_abs': 'sympy.log(Abs(x) + 1)', - 'floor': 'sympy.floor', - 'ceil': 'sympy.ceiling', - 'sqrt': 'sympy.sqrt(x)', - 'sqrt_abs': 'sympy.sqrt(Abs(x))', - 'square': 'x**2', - 'cube': 'x**3', - 'neg': '-x', - 'cos': 'sympy.cos', - 'sin': 'sympy.sin', - 'tan': 'sympy.tan', - 'cosh': 'sympy.cosh', - 'sinh': 'sympy.sinh', - 'tanh': 'sympy.tanh', - 'exp': 'sympy.exp', - 'acos': 'sympy.acos', - 'asin': 'sympy.asin', - 'atan': 'sympy.atan', - 'acosh': 'sympy.acosh(x)', - 'acosh_abs': 'sympy.acosh(Abs(x) + 1)', - 'asinh': 'sympy.asinh', - 'atanh': 'sympy.atanh(sympy.Mod(x + 1, 2) - 1)', - 'atanh_clip': 'sympy.atanh(sympy.Mod(x + 1, 2) - 1)', - 'sign': 'sympy.sign', -} - - -def generate_operator_complexity( - part, precision, unary_operators=None, binary_operators=None, hls_include_path=None, hls_libs_path=None -): - """Generates HLS projects and synthesizes them to obtain operator complexity (clock cycles per given math operation). - - This function can be used to obtain a list of operator complexity for a given FPGA part at a given precision. - - Args: - part (str): FPGA part number to use. - precision (str): Precision to use. - unary_operators (list, optional): List of unary operators to evaluate. Defaults to None. - binary_operators (list, optional): List of binary operators to evaluate. Defaults to None. - hls_include_path (str, optional): Path to the HLS include files. Defaults to None. - hls_libs_path (str, optional): Path to the HLS libs. Defaults to None. - - Returns: - dict: Dictionary of obtained operator complexities. - """ - - from sympy.parsing.sympy_parser import parse_expr as parse_sympy_expr - - from hls4ml.converters import convert_from_symbolic_expression - - if unary_operators is None: - unary_ops = _unary_ops - else: - unary_ops = {fn_name: sympy_expr for fn_name, sympy_expr in _unary_ops.items() if fn_name in unary_operators} - if binary_operators is None: - binary_ops = _binary_ops - else: - binary_ops = {fn_name: sympy_expr for fn_name, sympy_expr in _binary_ops.items() if fn_name in binary_operators} - - complexity_of_operators = {} - - with tempfile.TemporaryDirectory() as tmp_dir: - for fn_name, sympy_expr in binary_ops.items(): - print(f'Estimating complexity of {fn_name}') - equation = sympy_expr.replace('x', 'x0').replace('y', 'x1') - expression = parse_sympy_expr(equation) - hls_model = convert_from_symbolic_expression( - expression, - n_symbols=2, - output_dir=tmp_dir, - precision=precision, - part=part, - hls_include_path=hls_include_path, - hls_libs_path=hls_libs_path, - ) - hls_model.write() - subprocess.run( - ['vivado_hls', '-f', 'build_prj.tcl', '"reset=1 synth=1 csim=0 cosim=0 validation=0 export=0"'], - cwd=tmp_dir, - stdout=subprocess.DEVNULL, - stderr=subprocess.STDOUT, - ) - result = subprocess.check_output( - ['awk', 'NR==32', 'myproject_prj/solution1/syn/report/myproject_csynth.rpt'], cwd=tmp_dir - ) - cc = result.decode('utf-8').replace(' ', '').split('|')[1] - complexity_of_operators[fn_name] = max(int(cc), 1) - - for fn_name, sympy_expr in unary_ops.items(): - print(f'Estimating complexity of {fn_name}') - equation = sympy_expr.replace('sympy.', '') - if 'x' in equation and fn_name != 'exp': - equation = equation.replace('x', 'x0') - else: - equation += '(x0)' - expression = parse_sympy_expr(equation) - hls_model = convert_from_symbolic_expression( - expression, - n_symbols=1, - output_dir=tmp_dir, - precision=precision, - part=part, - hls_include_path=hls_include_path, - hls_libs_path=hls_libs_path, - ) - hls_model.write() - subprocess.run( - ['vivado_hls', '-f', 'build_prj.tcl', '"reset=1 synth=1 csim=0 cosim=0 validation=0 export=0"'], - cwd=tmp_dir, - stdout=subprocess.DEVNULL, - stderr=subprocess.STDOUT, - ) - result = subprocess.check_output( - ['awk', 'NR==32', 'myproject_prj/solution1/syn/report/myproject_csynth.rpt'], cwd=tmp_dir - ) - cc = result.decode('utf-8').replace(' ', '').split('|')[1] - complexity_of_operators[fn_name] = max(int(cc), 1) - - return complexity_of_operators diff --git a/hls4ml/hls4ml/writer/__init__.py b/hls4ml/hls4ml/writer/__init__.py deleted file mode 100644 index f4eed94..0000000 --- a/hls4ml/hls4ml/writer/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -from hls4ml.writer.quartus_writer import QuartusWriter -from hls4ml.writer.symbolic_writer import SymbolicExpressionWriter -from hls4ml.writer.vitis_writer import VitisWriter -from hls4ml.writer.vivado_accelerator_writer import VivadoAcceleratorWriter -from hls4ml.writer.vivado_writer import VivadoWriter -from hls4ml.writer.writers import Writer, get_writer, register_writer # noqa: F401 - -register_writer('Vivado', VivadoWriter) -register_writer('VivadoAccelerator', VivadoAcceleratorWriter) -register_writer('Vitis', VitisWriter) -register_writer('Quartus', QuartusWriter) -register_writer('SymbolicExpression', SymbolicExpressionWriter) diff --git a/hls4ml/hls4ml/writer/quartus_writer.py b/hls4ml/hls4ml/writer/quartus_writer.py deleted file mode 100644 index a5ba6dc..0000000 --- a/hls4ml/hls4ml/writer/quartus_writer.py +++ /dev/null @@ -1,1360 +0,0 @@ -import glob -import os -import tarfile -from collections import OrderedDict -from shutil import copyfile, copytree, rmtree - -import numpy as np -import yaml - -from hls4ml.backends import get_backend -from hls4ml.model.layers import Conv1D, Conv2D, Conv2DBatchnorm, Dense -from hls4ml.utils.fixed_point_utils import FixedPointEmulator, ceil_log2, uint_to_binary -from hls4ml.writer.writers import Writer - -config_filename = 'hls4ml_config.yml' - - -class QuartusWriter(Writer): - def next_pow2(self, x): - return 1 << (x - 1).bit_length() - - def __make_dat_file(self, original_path, project_path): - """ - Convert other input/output data types into a dat file, which is - a text file with the falttened matrix printed out. Note that ' ' is - assumed to be the delimiter. - """ - - # Take in data from current supported data files - if original_path[-3:] == "npy": - data = np.load(original_path) - else: - raise Exception("Unsupported input/output data files.") - - # Faltten data, just keep first dimension - data = data.reshape(data.shape[0], -1) - - def print_data(f): - for i in range(data.shape[0]): - for j in range(data.shape[1]): - f.write(str(data[i][j]) + " ") - f.write("\n") - - # Print out in dat file - with open(project_path, "w") as f: - print_data(f) - - def get_max_reuse_factor(self, model): - max_rf = 0 - for layer in model.get_layers(): - rf = int(layer.get_attr('reuse_factor')) - if rf > max_rf: - max_rf = rf - return max_rf - - def print_array_to_cpp(self, var, layer, odir): - """Write a weights array to C++ header files. - - Args: - var (WeightVariable): Weight to write - layer (Layer): Instance of the layer to which the weights belong - odir (str): Output directory - """ - h_file = open(f"{odir}/firmware/weights/{var.name}.h", "w") - - # meta data - h_file.write(f"//Numpy array shape {var.shape}\n") - h_file.write(f"//Min {np.min(var.min):.12f}\n") - h_file.write(f"//Max {np.max(var.max):.12f}\n") - h_file.write(f"//Number of zeros {var.nzeros}\n") - h_file.write("\n") - - h_file.write(f"#ifndef {var.name.upper()}_H_\n") - h_file.write(f"#define {var.name.upper()}_H_\n") - h_file.write("\n") - - rf = int(layer.get_attr('reuse_factor', 1)) - weight_header = '#ifdef __INTELFPGA_COMPILER__\n' - - weight_size = 0 - if isinstance(layer, (Conv2D, Conv2DBatchnorm)): - weight_size = ( - layer.get_attr('impl_filt_height') - * layer.get_attr('impl_filt_width') - * layer.get_attr('n_filt') - * layer.get_attr('n_chan') - ) - elif isinstance(layer, (Conv1D)): - weight_size = layer.get_attr('impl_filt_width') * layer.get_attr('n_filt') * layer.get_attr('n_chan') - elif isinstance(layer, (Dense)): - weight_size = layer.get_attr('n_in') * layer.get_attr('n_out') - - if rf == 1 or var.name[0] == 'b' or weight_size <= 2048 or (var.name[0] == 'w' and var.type.precision.width < 3): - weight_header += 'hls_init_on_powerup\n' - else: - block_factor = (layer.get_attr('n_in') * layer.get_attr('n_out')) / rf - nbanks = int(2 ** np.ceil(np.log2(block_factor)) / 2) - var_width = int(np.ceil(var.type.precision.width / 8)) - bwidth = self.next_pow2(var_width) - weight_header += ( - f'hls_bankwidth({bwidth})\nhls_numbanks({nbanks})\nhls_max_replicates(1)\nhls_memory_impl("BLOCK_RAM")\n' - ) - weight_header += '#endif\n' - if var.storage.lower() == 'bram': - weight_header += 'static ' - else: - weight_header += 'static const ' - h_file.write(weight_header + var.definition_cpp() + " = {") - - # fill c++ array. - # not including internal brackets for multidimensional case - sep = '' - for x in var: - h_file.write(sep + x) - sep = ", " - h_file.write("};\n") - h_file.write("\n#endif\n") - h_file.close() - - def write_project_dir(self, model): - """Write the base project directory - - Args: - model (ModelGraph): the hls4ml model. - """ - if not os.path.isdir(f"{model.config.get_output_dir()}/firmware/weights"): - os.makedirs(f"{model.config.get_output_dir()}/firmware/weights") - - def write_project_cpp(self, model): - """Write the main architecture source file (myproject.cpp) - - Args: - model (ModelGraph): the hls4ml model. - """ - project_name = model.config.get_project_name() - - filedir = os.path.dirname(os.path.abspath(__file__)) - f = open(os.path.join(filedir, '../templates/quartus/firmware/myproject.cpp')) - fout = open(f'{model.config.get_output_dir()}/firmware/{project_name}.cpp', 'w') - - model_inputs = model.get_input_variables() - model_outputs = model.get_output_variables() - model_brams = [var for var in model.get_weight_variables() if var.storage.lower() == 'bram'] - - io_type = model.config.get_config_value('IOType') - indent = ' ' - brams_str = ', \n'.join([indent + b.definition_cpp(as_reference=False) for b in model_brams]) - - for line in f.readlines(): - # Add headers to weights and biases - if 'myproject' in line: - newline = line.replace('myproject', project_name) - - # Intel HLS 'streams' need to be passed by reference to top-level entity or declared as global variables - # Streams cannot be declared inside a function - # Therefore, layer connections (inputs/outputs) are declared here - elif '// hls-fpga-machine-learning insert inter-task streams' in line: - newline = line - if io_type == 'io_stream': - for layer in model.get_layers(): - vars = layer.get_variables() - for var in vars: - def_cpp = var.definition_cpp() - if def_cpp is not None: - newline += def_cpp + ';\n' - - # Instantiate GCC top-level function, to be used during GCC compilation / hls4ml.predict() - elif '// hls-fpga-machine-learning instantiate GCC top-level' in line: - newline = line - if io_type == 'io_stream': - newline += f'void {project_name}(\n' - for inp in model_inputs: - newline += indent + f'stream_in<{inp.type.name}> &{inp.name}_stream,\n' - for out in model_outputs: - newline += indent + f'stream_out<{out.type.name}> &{out.name}_stream' - if model_brams: - newline += ',\n' + brams_str - newline += '\n) {\n' - if io_type == 'io_parallel': - newline = f'output_data {project_name}(\n' - newline += indent + 'input_data inputs' - if model_brams: - newline += ',\n' + brams_str - newline += '\n) {\n' - - # Instantiate HLS top-level function, to be used during HLS synthesis - elif '// hls-fpga-machine-learning instantiate HLS top-level' in line: - newline = line - if io_type == 'io_stream': - newline += f'component void {project_name}(\n' - for inp in model_inputs: - newline += indent + f'stream_in<{inp.type.name}> &{inp.name}_stream,\n' - for out in model_outputs: - newline += indent + f'stream_out<{out.type.name}> &{out.name}_stream' - if model_brams: - newline += ',\n' + brams_str - newline += '\n) {\n' - if io_type == 'io_parallel': - newline += f'component output_data {project_name}(\n' - newline += indent + 'input_data inputs' - if model_brams: - newline += ',\n' + brams_str - newline += '\n) {\n' - - # Insert HLS pragmas such as maximum frequency, initiation interval etc. - elif '// hls-fpga-machine-learning insert cpragmas' in line: - newline = line - if io_type == 'io_parallel': - newline += 'hls_max_concurrency(0)\n' - newline += f'hls_component_ii({self.get_max_reuse_factor(model)})\n' - clock_mhz = 1000 / (model.config.get_config_value('ClockPeriod')) - newline += f'hls_scheduler_target_fmax_mhz({np.ceil(clock_mhz).astype(int)})\n' - - # In io_parallel, an output (struct) is returned from the top-level function - # Therefore, it needs to be initialised before returning - # In io_stream, the input is of type 'stream_in' and output is of type 'stream_out' - # However, individual layers accept the type 'stream' - # Therefore, data is first read from 'stream_in', written to 'stream' and propagated through network - elif '// hls-fpga-machine-learning initialize input/output' in line: - if io_type == 'io_stream': - newline = line - for inp in model_inputs: - newline += indent + f'for (size_t i = 0; i < {inp.size_cpp()} / {inp.type.name}::size; i++) {{\n' - newline += indent + f' {inp.type.name} tmp = {inp.name}_stream.read();\n' - newline += indent + f' {inp.name}.write(tmp);\n' - newline += indent + '}\n' - else: - newline = line - newline += indent + 'hls_register output_data outputs;\n' - - # Insert weights - elif '// hls-fpga-machine-learning insert weights' in line: - newline = line - for layer in model.get_layers(): - for w in layer.get_weights(): - if w not in model_brams: - newline += f'#include "weights/{w.name}.h"\n' - - # Insert test weights - elif '// hls-fpga-machine-learning insert test weights' in line: - newline = line - for layer in model.get_layers(): - for w in layer.get_weights(): - newline += f'#include "weights/{w.name}_test.h"\n' - - # Neural net instantiation - elif '// hls-fpga-machine-learning insert layers' in line: - newline = line + '\n' - model_inputs = model.get_input_variables() - model_outputs = model.get_output_variables() - for layer in model.get_layers(): - if io_type != 'io_stream': - vars = layer.get_variables() - for var in vars: - if var not in model_inputs and var not in model_outputs: - def_cpp = var.definition_cpp() - if def_cpp is not None: - newline += ' ' + def_cpp + ';\n' - func = layer.get_attr('function_cpp', None) - if func: - newline += ' ' + func + '\n' - if model.config.trace_output and layer.get_attr('trace', False): - newline += '#ifndef HLS_SYNTHESIS\n' - for var in vars: - newline += ' nnet::save_layer_output<{}>({}, "{}", {});\n'.format( - var.type.name, var.name, layer.name, var.size_cpp() - ) - newline += '#endif\n' - newline += '\n' - - # In io_parallel, a return is required; for more details see myproject.cpp & myproject.h - elif '// hls-fpga-machine-learning return' in line: - if io_type == 'io_stream': - newline = line - for out in model_outputs: - newline += indent + f'for (size_t i = 0; i < {out.size_cpp()} / {out.type.name}::size; i++) {{\n' - newline += indent + f' {out.type.name} tmp = {out.name}.read();\n' - newline += indent + f' {out.name}_stream.write(tmp);\n' - newline += indent + '}\n' - newline += '}\n' - else: - newline = line - newline += indent + 'return outputs;\n' - newline += '}\n' - - # Just copy line - else: - newline = line - - fout.write(newline) - - f.close() - fout.close() - - def write_project_header(self, model): - """Write the main architecture header file (myproject.h) - - Args: - model (ModelGraph): the hls4ml model. - """ - - project_name = model.config.get_project_name() - - filedir = os.path.dirname(os.path.abspath(__file__)) - f = open(os.path.join(filedir, '../templates/quartus/firmware/myproject.h')) - fout = open(f'{model.config.get_output_dir()}/firmware/{project_name}.h', 'w') - - model_inputs = model.get_input_variables() - model_outputs = model.get_output_variables() - model_brams = [var for var in model.get_weight_variables() if var.storage.lower() == 'bram'] - - # io_parallel and io_stream instantiate the top-level function differently - io_type = model.config.get_config_value('IOType') - indent = ' ' - brams_str = ', \n'.join([indent + b.definition_cpp(as_reference=False) for b in model_brams]) - - for line in f.readlines(): - if 'MYPROJECT' in line: - newline = line.replace('MYPROJECT', format(project_name.upper())) - - elif 'myproject' in line: - newline = line.replace('myproject', project_name) - - elif '// hls-fpga-machine-learning instantiate GCC top-level' in line: - newline = line - # For io_stream, input and output are passed by reference; see myproject.h & myproject.cpp for more details - - if io_type == 'io_stream': - newline += f'void {project_name}(\n' - for inp in model_inputs: - newline += indent + f'stream_in<{inp.type.name}> &{inp.name}_stream,\n' - for out in model_outputs: - newline += indent + f'stream_out<{out.type.name}> &{out.name}_stream' - if model_brams: - newline += ',\n' + brams_str - newline += '\n);\n' - # In io_parallel, a struct is returned; see myproject.h & myproject.cpp for more details - else: - newline += f'output_data {project_name}(\n' - newline += indent + 'input_data inputs' - if model_brams: - newline += ',\n' + brams_str - newline += '\n);\n' - - # Similar to GCC instantiation, but with the keyword 'component' - elif '// hls-fpga-machine-learning instantiate HLS top-level' in line: - newline = line - if io_type == 'io_stream': - newline += f'component void {project_name}(\n' - for inp in model_inputs: - newline += indent + f'stream_in<{inp.type.name}> &{inp.name}_stream,\n' - for out in model_outputs: - newline += indent + f'stream_out<{out.type.name}> &{out.name}_stream' - if model_brams: - newline += ',\n' + brams_str - newline += '\n);\n' - else: - newline += f'component output_data {project_name}(\n' - newline += indent + 'input_data inputs' - if model_brams: - newline += ',\n' + brams_str - newline += '\n);\n' - - elif '// hls-fpga-machine-learning insert cpragmas' in line: - newline = line - if io_type == 'io_parallel': - newline += 'hls_max_concurrency(0)\n' - newline += f'hls_component_ii({self.get_max_reuse_factor(model)})\n' - clock_mhz = 1000 / (model.config.get_config_value('ClockPeriod')) - newline += f'hls_scheduler_target_fmax_mhz({np.ceil(clock_mhz).astype(int)})\n' - - # For io_stream, no inputs/outputs are instantiated, as they are passed by reference - # For io_parallel, input/output structs are required - elif '// hls-fpga-machine-learning insert inputs' in line: - newline = line - if io_type != 'io_stream': - newline += 'struct input_data { \n' - for inp in model_inputs: - newline += indent + inp.definition_cpp() + ';\n' - newline += '};\n' - elif '// hls-fpga-machine-learning insert outputs' in line: - newline = line - if io_type != 'io_stream': - newline += 'struct output_data { \n' - for out in model_outputs: - newline += indent + out.definition_cpp() + ';\n' - newline += '};\n' - # Simply copy line, if no inserts are required - else: - newline = line - - fout.write(newline) - - f.close() - fout.close() - - def write_defines(self, model): - """Write the C++ type definitions file (defines.h) - - Args: - model (ModelGraph): the hls4ml model. - """ - filedir = os.path.dirname(os.path.abspath(__file__)) - f = open(os.path.join(filedir, '../templates/quartus/firmware/defines.h')) - fout = open(f'{model.config.get_output_dir()}/firmware/defines.h', 'w') - - for line in f.readlines(): - # Insert numbers - if '// hls-fpga-machine-learning insert numbers' in line: - newline = line - - defines_list = [] - for layer in model.get_layers(): - defines = '' - for k, v in layer.get_output_variable().get_shape(): - defines += f'#define {k} {v}\n' - - defines_list.append(defines) - - newline += ''.join(defines_list) - - elif '// hls-fpga-machine-learning insert layer-precision' in line: - newline = line - all_precision = OrderedDict() - for layer in model.get_layers(): - layer_precision = layer.get_layer_precision() - for type_name, type_var in layer_precision.items(): - # Ensure that layer's types doesn't override existing types - # This can happen in case of InplaceVariable types - if type_name not in all_precision: - all_precision[type_name] = type_var - for used_type in all_precision.values(): - newline += used_type.definition_cpp() - - else: - newline = line - fout.write(newline) - f.close() - fout.close() - - def write_parameters(self, model): - """Write the C++ layer config file (parameters.h) - - Args: - model (ModelGraph): the hls4ml model. - """ - filedir = os.path.dirname(os.path.abspath(__file__)) - f = open(os.path.join(filedir, '../templates/quartus/firmware/parameters.h')) - fout = open(f'{model.config.get_output_dir()}/firmware/parameters.h', 'w') - - for line in f.readlines(): - if '// hls-fpga-machine-learning insert includes' in line: - newline = line - for include in sorted(set(sum((layer.get_attr('include_header', []) for layer in model.get_layers()), []))): - newline += '#include "%s"\n' % include - - elif "// hls-fpga-machine-learning insert layer-config" in line: - newline = line - for layer in model.get_layers(): - config = layer.get_attr('config_cpp', None) - if config: - newline += config + '\n' - else: - newline = line - fout.write(newline) - f.close() - fout.close() - - def write_weights(self, model): - """Write the weights into header files - - Args: - model (ModelGraph): the hls4ml model. - """ - for layer in model.get_layers(): - for weights in layer.get_weights(): - self.print_array_to_cpp(weights, layer, model.config.get_output_dir()) - - def write_testbench_parallel(self, model): - """Write the testbench file for io_parallel (myproject_test.cpp and input/output .dat files) - - Args: - model (ModelGraph): the hls4ml model. - """ - if len(model.get_output_variables()) != 1: - print("WARNING: The testbench only supports one output variable. Leaving empty testbench") - return - - outvar = model.get_output_variables()[0] - - filedir = os.path.dirname(os.path.abspath(__file__)) - - if not os.path.exists(f'{model.config.get_output_dir()}/tb_data/'): - os.mkdir(f'{model.config.get_output_dir()}/tb_data/') - - input_data = model.config.get_config_value('InputData') - output_predictions = model.config.get_config_value('OutputPredictions') - - if input_data: - if input_data[-3:] == "dat": - copyfile(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') - else: - self.__make_dat_file(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') - - if output_predictions: - if output_predictions[-3:] == "dat": - copyfile(output_predictions, f'{model.config.get_output_dir()}/tb_data/tb_output_predictions.dat') - else: - self.__make_dat_file( - output_predictions, f'{model.config.get_output_dir()}/tb_data/tb_output_predictions.dat' - ) - - f = open(os.path.join(filedir, '../templates/quartus/myproject_test_parallel.cpp')) - fout = open(f'{model.config.get_output_dir()}/{model.config.get_project_name()}_test.cpp', 'w') - - model_brams = [var for var in model.get_weight_variables() if var.storage.lower() == 'bram'] - - for line in f.readlines(): - indent = ' ' * (len(line) - len(line.lstrip(' '))) - - if 'myproject' in line: - newline = line.replace('myproject', model.config.get_project_name()) - elif '// hls-fpga-machine-learning insert bram' in line: - newline = line - for bram in model_brams: - newline += f'#include \"firmware/weights/{bram.name}.h\"\n' - elif '// hls-fpga-machine-learning insert data' in line: - newline = line - newline += ' std::vector::const_iterator in_begin = in.cbegin();\n' - newline += ' std::vector::const_iterator in_end;\n' - newline += ' inputs.emplace_back();\n' - for inp in model.get_input_variables(): - newline += f' in_end = in_begin + ({inp.size_cpp()});\n' - newline += f' std::copy(in_begin, in_end, inputs.back().{inp.member_name});\n' - newline += ' in_begin = in_end;\n' - newline += ' outputs.emplace_back();\n' - elif '// hls-fpga-machine-learning insert zero' in line: - newline = line - newline += indent + 'for(int i = 0; i < num_iterations; i++) {\n' - for inp in model.get_input_variables(): - newline += indent + ' inputs.emplace_back();\n' - newline += indent + ' outputs.emplace_back();\n' - newline += indent + f' std::fill_n(inputs[i].{inp.member_name}, {inp.size_cpp()}, 0.0);\n' - newline += indent + '}\n' - - elif '// hls-fpga-machine-learning insert top-level-function' in line: - newline = line - newline += indent + 'for(int i = 0; i < num_iterations; i++) {\n' - newline += indent + f' ihc_hls_enqueue(&outputs[i], {model.config.get_project_name()}, inputs[i]' - if model_brams: - bram_vars = ','.join([b.name for b in model_brams]) - newline += f', {bram_vars});\n' - else: - newline += ');\n' - newline += indent + '}\n' - elif 'hls-fpga-machine-learning insert run' in line: - newline = line - newline += ' ' + f'ihc_hls_component_run_all({model.config.get_project_name()});\n' - elif '// hls-fpga-machine-learning insert predictions' in line: - newline = line - newline += indent + f'for(int i = 0; i < {outvar.size_cpp()}; i++) {{\n' - newline += indent + ' std::cout << predictions[j][i] << " ";\n' - newline += indent + '}\n' - newline += indent + 'std::cout << std::endl;\n' - elif '// hls-fpga-machine-learning insert tb-output' in line: - newline = line - newline += indent + f'for(int i = 0; i < {outvar.size_cpp()}; i++) {{\n' - newline += indent + f' fout << outputs[j].{outvar.member_name}[i] << " ";\n' - newline += indent + '}\n' - newline += indent + 'fout << std::endl;\n' - elif ( - '// hls-fpga-machine-learning insert output' in line - or '// hls-fpga-machine-learning insert quantized' in line - ): - newline = line - newline += indent + f'for(int i = 0; i < {outvar.size_cpp()}; i++) {{\n' - newline += indent + f' std::cout << outputs[j].{outvar.member_name}[i] << " ";\n' - newline += indent + '}\n' - newline += indent + 'std::cout << std::endl;\n' - else: - newline = line - - fout.write(newline) - - f.close() - fout.close() - - def write_testbench_stream(self, model): - """Write the testbench file for io_stream (myproject_test.cpp and input/output .dat files) - - Args: - model (ModelGraph): the hls4ml model. - """ - if len(model.get_output_variables()) != 1: - print("WARNING: The testbench only supports one output variable. Leaving empty testbench") - return - - outvar = model.get_output_variables()[0] - - model_inputs = model.get_input_variables() - model_outputs = model.get_output_variables() - - filedir = os.path.dirname(os.path.abspath(__file__)) - - if not os.path.exists(f'{model.config.get_output_dir()}/tb_data/'): - os.mkdir(f'{model.config.get_output_dir()}/tb_data/') - - input_data = model.config.get_config_value('InputData') - output_predictions = model.config.get_config_value('OutputPredictions') - - if input_data: - if input_data[-3:] == "dat": - copyfile(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') - else: - self.__make_dat_file(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') - - if output_predictions: - if output_predictions[-3:] == "dat": - copyfile(output_predictions, f'{model.config.get_output_dir()}/tb_data/tb_output_predictions.dat') - else: - self.__make_dat_file( - output_predictions, f'{model.config.get_output_dir()}/tb_data/tb_output_predictions.dat' - ) - - f = open(os.path.join(filedir, '../templates/quartus/myproject_test_stream.cpp')) - fout = open(f'{model.config.get_output_dir()}/{model.config.get_project_name()}_test.cpp', 'w') - - model_brams = [var for var in model.get_weight_variables() if var.storage.lower() == 'bram'] - - for line in f.readlines(): - indent = ' ' * (len(line) - len(line.lstrip(' '))) - - if 'myproject' in line: - newline = line.replace('myproject', model.config.get_project_name()) - - elif '// hls-fpga-machine-learning insert bram' in line: - newline = line - for bram in model_brams: - newline += f'#include \"firmware/weights/{bram.name}.h\"\n' - - elif '// hls-fpga-machine learning instantiate inputs and outputs' in line: - newline = line - for inp in model_inputs: - newline += indent + f'stream_in<{inp.type.name}> {inp.name}_input;\n' - for out in model_outputs: - newline += indent + f'stream_out<{out.type.name}> {out.name}_output;\n' - - # TODO - This is one-input specific (are multiple model inputs needed at all?) - elif '// hls-fpga-machine-learning insert data' in line: - newline = line - c = 0 - for inp in model_inputs: - newline += indent + f'float vals_{c}[{inp.size_cpp()}]; \n' - newline += indent + f'for (int j = 0 ; j < {inp.size_cpp()} ; j++) {{\n' - newline += indent + indent + f'vals_{c}[j] = in[j]; \n' - newline += indent + '}\n' - newline += ( - indent - + f'nnet::convert_data(vals_{c}, {inp.name}_input);\n' - ) - c += 1 - - elif '// hls-fpga-machine-learning insert zero' in line: - newline = line - c = 0 - for inp in model_inputs: - newline += indent + f'float vals_{c}[{inp.size_cpp()}]; \n' - newline += indent + f'for (int j = 0 ; j < {inp.size_cpp()} ; j++) {{\n' - newline += indent + indent + f'vals_{c}[j] = 0.0; \n' - newline += indent + '}\n' - newline += ( - indent - + f'nnet::convert_data(vals_{c}, {inp.name}_input);\n' - ) - c += 1 - - elif '// hls-fpga-machine-learning insert top-level-function' in line: - newline = line - input_params = ', '.join([f'{i.name}_input' for i in model_inputs]) - output_params = ', '.join([f'{o.name}_output' for o in model_outputs]) - newline += ( - indent + f'ihc_hls_enqueue_noret(&{model.config.get_project_name()}, {input_params}, {output_params}' - ) - if model_brams: - bram_vars = ','.join([b.name for b in model_brams]) - newline += f', {bram_vars});\n' - else: - newline += ');\n' - - elif 'hls-fpga-machine-learning insert run' in line: - newline = line - newline += indent + f'ihc_hls_component_run_all({model.config.get_project_name()});\n' - - elif '// hls-fpga-machine-learning convert output' in line: - newline = line - newline += indent + f'float res[{outvar.size_cpp()}];\n' - newline += indent + 'nnet::convert_data_back<{}, float, {}>({}_output, res);\n'.format( - outvar.type.name, outvar.size_cpp(), outvar.name - ) - - elif '// hls-fpga-machine-learning insert tb-output' in line: - newline += indent + f'for(int i = 0; i < {outvar.size_cpp()}; i++) {{\n' - newline += indent + ' fout << res[i] << " ";\n' - newline += indent + '}\n' - newline += indent + 'fout << std::endl;\n' - - elif '// hls-fpga-machine-learning print predictions' in line: - newline = line - newline += indent + f'for(int i = 0; i < {outvar.size_cpp()}; i++) {{\n' - newline += indent + ' std::cout << predictions[iteration][i] << " ";\n' - newline += indent + '}\n' - newline += indent + 'std::cout << std::endl;\n' - - elif '// hls-fpga-machine-learning print output' in line: - newline = line - newline += indent + f'for(int i = 0; i < {outvar.size_cpp()}; i++) {{\n' - newline += indent + ' std::cout << res[i] << " "; \n' - newline += indent + '} \n' - newline += indent + 'std::cout << std::endl; \n' - else: - newline = line - - fout.write(newline) - - f.close() - fout.close() - - def write_test_bench(self, model): - """Write the testbench - - Args: - model (ModelGraph): the hls4ml model. - """ - # TODO - This function only works with one model input - # (NOT one data point - it works as expected with multiple data points) - io_type = model.config.get_config_value('IOType') - if io_type == 'io_parallel': - self.write_testbench_parallel(model) - elif io_type == 'io_stream': - self.write_testbench_stream(model) - - def write_bridge(self, model): - """Write the Python-C++ bridge (myproject_bridge.cpp) - - Args: - model (ModelGraph): the hls4ml model. - """ - - filedir = os.path.dirname(os.path.abspath(__file__)) - f = open(os.path.join(filedir, '../templates/quartus/myproject_bridge.cpp')) - fout = open(f'{model.config.get_output_dir()}/{model.config.get_project_name()}_bridge.cpp', 'w') - - model_inputs = model.get_input_variables() - model_outputs = model.get_output_variables() - model_brams = [var for var in model.get_weight_variables() if var.storage.lower() == 'bram'] - - io_type = model.config.get_config_value('IOType') - indent = ' ' - - for line in f.readlines(): - if 'MYPROJECT' in line: - newline = line.replace('MYPROJECT', format(model.config.get_project_name().upper())) - - elif 'myproject' in line: - newline = line.replace('myproject', format(model.config.get_project_name())) - - elif '// hls-fpga-machine-learning insert bram' in line: - newline = line - for bram in model_brams: - newline += f'#include \"firmware/weights/{bram.name}.h\"\n' - - elif '// hls-fpga-machine-learning insert header' in line: - dtype = line.split('#', 1)[1].strip() - if io_type == 'io_stream': - inputs_str = ', '.join([f'{dtype} {i.name}[{i.size_cpp()}]' for i in model_inputs]) - outputs_str = ', '.join([f'{dtype} {o.name}[{o.size_cpp()}]' for o in model_outputs]) - else: - inputs_str = ', '.join([f'{dtype} {i.member_name}[{i.size_cpp()}]' for i in model_inputs]) - outputs_str = ', '.join([f'{dtype} {o.member_name}[{o.size_cpp()}]' for o in model_outputs]) - - insize_str = ', '.join([f'unsigned short &const_size_in_{i}' for i in range(1, len(model_inputs) + 1)]) - outsize_str = ', '.join([f'unsigned short &const_size_out_{o}' for o in range(1, len(model_outputs) + 1)]) - - newline = '' - newline += indent + inputs_str + ',\n' - newline += indent + outputs_str + ',\n' - newline += indent + insize_str + ',\n' - newline += indent + outsize_str + '\n' - - elif '// hls-fpga-machine-learning insert wrapper' in line: - bram_params = '' - if model_brams: - bram_params = ', ' + ','.join([b.name for b in model_brams]) - - dtype = line.split('#', 1)[1].strip() - if io_type == 'io_stream': - newline = '' - for i in model_inputs: - # Initialise stream object and store input data (C-array) to a 'stream' object - newline += indent + f'stream_in<{i.type.name}> {i.name}_input;\n' - newline += indent + 'nnet::convert_data<{}, {}, {}>({}, {}_input);\n'.format( - dtype, i.type.name, i.size_cpp(), i.name, i.name - ) - - # Initialise stream output - for o in model_outputs: - newline += '\n' - newline += indent + f'stream_out<{o.type.name}> {o.name}_output;\n' - - # Execute top-level function - input_params = ', '.join([f'{i.name}_input' for i in model_inputs]) - output_params = ', '.join([f'{o.name}_output' for o in model_outputs]) - - top_level = ( - indent + f'{model.config.get_project_name()}({input_params}, {output_params}{bram_params});\n' - ) - newline += top_level - newline += '\n' - - # Store data from 'stream' output to C-array, to be then returned and handled in Python - for o in model_outputs: - newline += indent + 'nnet::convert_data_back<{}, {}, {}>({}_output, {});\n'.format( - o.type.name, dtype, o.size_cpp(), o.name, o.name - ) - - else: - # Convert input data from C-array to HLS type - newline = '' - newline += indent + 'input_data inputs_ap;\n' - for i in model_inputs: - newline += indent + 'nnet::convert_data<{}, {}, {}>({}, inputs_ap.{});\n'.format( - dtype, i.type.name, i.size_cpp(), i.member_name, i.member_name - ) - newline += '\n' - - # Initialise HLS output - newline += indent + 'output_data outputs_ap;\n' - - # Execute top-level function - top_level = indent + f'outputs_ap = {model.config.get_project_name()}(inputs_ap{bram_params});\n' - newline += top_level - newline += '\n' - - # Convert HLS outputs back to C-array - for o in model_outputs: - newline += indent + 'nnet::convert_data_back<{}, {}, {}>(outputs_ap.{}, {});\n'.format( - o.type.name, dtype, o.size_cpp(), o.member_name, o.member_name - ) - elif '// hls-fpga-machine-learning insert trace_outputs' in line: - newline = '' - for layer in model.get_layers(): - func = layer.get_attr('function_cpp') - if func and model.config.trace_output and layer.get_attr('trace', False): - vars = layer.get_variables() - for var in vars: - newline += ( - indent - + 'nnet::trace_outputs->insert(std::pair(' - + f'"{layer.name}", (void *) malloc({var.size_cpp()} * element_size)));\n' - ) - - else: - newline = line - fout.write(newline) - - f.close() - fout.close() - - def write_build_script(self, model): - """Write the build scripts (Makefile, build_lib.sh) - - Args: - model (ModelGraph): the hls4ml model. - """ - - # Makefile - filedir = os.path.dirname(os.path.abspath(__file__)) - f = open(os.path.join(filedir, '../templates/quartus/Makefile')) - fout = open(f'{model.config.get_output_dir()}/Makefile', 'w') - - for line in f.readlines(): - line = line.replace('myproject', model.config.get_project_name()) - - if 'DEVICE :=' in line: - line = 'DEVICE := {}\n'.format(model.config.get_config_value('Part')) - - fout.write(line) - f.close() - fout.close() - - # build_lib.sh - f = open(os.path.join(filedir, '../templates/quartus/build_lib.sh')) - fout = open(f'{model.config.get_output_dir()}/build_lib.sh', 'w') - - for line in f.readlines(): - line = line.replace('myproject', model.config.get_project_name()) - line = line.replace('mystamp', model.config.get_config_value('Stamp')) - - fout.write(line) - f.close() - fout.close() - - def write_nnet_utils(self, model): - """Copy the nnet_utils, AP types headers and any custom source to the project output directory - - Args: - model (ModelGraph): the hls4ml model. - """ - - # nnet_utils - filedir = os.path.dirname(os.path.abspath(__file__)) - - srcpath = os.path.join(filedir, '../templates/quartus/firmware/nnet_utils/') - dstpath = f'{model.config.get_output_dir()}/firmware/nnet_utils/' - - if not os.path.exists(dstpath): - os.mkdir(dstpath) - - headers = [os.path.basename(h) for h in glob.glob(srcpath + '*.h')] - - for h in headers: - copyfile(srcpath + h, dstpath + h) - - # ac_types - filedir = os.path.dirname(os.path.abspath(__file__)) - - srcpath = os.path.join(filedir, '../templates/quartus/ac_types/') - dstpath = f'{model.config.get_output_dir()}/firmware/ac_types/' - - if os.path.exists(dstpath): - rmtree(dstpath) - - copytree(srcpath, dstpath) - - # custom source - filedir = os.path.dirname(os.path.abspath(__file__)) - - custom_source = get_backend('Quartus').get_custom_source() - for dst, srcpath in custom_source.items(): - dstpath = f'{model.config.get_output_dir()}/firmware/{dst}' - copyfile(srcpath, dstpath) - - def __get_table_size(self, model, activation): - for layer in model.get_layers(): - if ( - layer.get_attr('activation') == activation or layer.get_attr('recurrent_activation') == activation - ) and layer.get_attr('table_size') is not None: - return int(layer.get_attr('table_size')) - return 1024 - - def __get_table_header(self, table_name, table_size): - table_header = '#ifdef __INTELFPGA_COMPILER__\n' - table_header += 'hls_init_on_powerup\n' - table_header += '#endif\n' - table_header += f'static const typename CONFIG_T::table_t {table_name}[{table_size}] = {{' - return table_header - - def __write_elu_table(self, model, path): - table_name = 'elu_table' - table_size = self.__get_table_size(model, 'elu') - - h_file = open(f'{path}/{table_name}.tb', 'w') - h_file.write(self.__get_table_header(table_name, table_size)) - - sep = '' - for i in range(table_size): - in_val = -8.0 * i / float(table_size) - real_val = np.exp(in_val) - 1.0 - h_file.write(sep + str(real_val)) - sep = ", " - - h_file.write('};\n') - h_file.close() - - def __write_sigmoid_table(self, model, path): - MAX_VALUE = 8 - MIN_VALUE = 0 - - table_name = 'sigmoid_table' - table_size = self.__get_table_size(model, 'sigmoid') - - h_file = open(f'{path}/{table_name}.tb', 'w') - h_file.write(self.__get_table_header(table_name, table_size)) - - sep = '' - for i in range(int(table_size)): - in_val = ( - i * (MAX_VALUE - MIN_VALUE) / float(table_size) - + (MAX_VALUE - MIN_VALUE) / (float(table_size) * 2) - + MIN_VALUE - ) - real_val = 1.0 / (1 + np.exp(-in_val)) - if real_val >= 0.5: - h_file.write(sep + str(real_val)) - sep = ", " - - h_file.write('};\n') - h_file.close() - - def __write_tanh_table(self, model, path): - MAX_VALUE = 4 - MIN_VALUE = 0 - - table_name = 'tanh_table' - table_size = self.__get_table_size(model, 'tanh') - - h_file = open(f'{path}/{table_name}.tb', 'w') - h_file.write(self.__get_table_header(table_name, table_size)) - - sep = '' - for i in range(table_size): - in_val = ( - i * (MAX_VALUE - MIN_VALUE) / float(table_size) - + (MAX_VALUE - MIN_VALUE) / (float(table_size) * 2) - + MIN_VALUE - ) - real_val = np.tanh(in_val) - if real_val >= 0: - h_file.write(sep + str(real_val)) - sep = ", " - - h_file.write('};\n') - h_file.close() - - def __write_softplus_table(self, model, path): - table_name = 'softplus_table' - table_size = self.__get_table_size(model, 'softplus') - - h_file = open(f'{path}/{table_name}.tb', 'w') - h_file.write(self.__get_table_header(table_name, table_size)) - - sep = '' - for i in range(table_size): - in_val = 2 * 8.0 * (i - float(table_size) / 2.0) / float(table_size) - real_val = np.log(np.exp(in_val) + 1.0) - h_file.write(sep + str(real_val)) - sep = ", " - - h_file.write('};\n') - h_file.close() - - def __write_softsign_table(self, model, path): - MAX_VALUE = 8 - MIN_VALUE = 0 - table_name = 'softsign_table' - table_size = self.__get_table_size(model, 'softsign') - - h_file = open(f'{path}/{table_name}.tb', 'w') - h_file.write(self.__get_table_header(table_name, table_size)) - - sep = '' - for i in range(table_size): - in_val = ( - i * (MAX_VALUE - MIN_VALUE) / float(table_size) - + (MAX_VALUE - MIN_VALUE) / (float(table_size) * 2) - + MIN_VALUE - ) - - real_val = in_val / (np.fabs(in_val) + 1.0) - if real_val >= 0: - h_file.write(sep + str(real_val)) - sep = ", " - - h_file.write('};\n') - h_file.close() - - def __write_selu_table(self, model, path): - table_name = 'selu_table' - table_size = self.__get_table_size(model, 'selu') - - h_file = open(f'{path}/{table_name}.tb', 'w') - h_file.write(self.__get_table_header(table_name, table_size)) - - sep = '' - for i in range(table_size): - in_val = -8.0 * i / float(table_size) - real_val = 1.0507009873554804934193349852946 * (1.6732632423543772848170429916717 * (np.exp(in_val) - 1.0)) - h_file.write(sep + str(real_val)) - sep = ", " - - h_file.write('};\n') - h_file.close() - - def __write_exp_table(self, model, path): - table_name = 'exp_table' - table_size = self.__get_table_size(model, 'softmax') - - h_file = open(f'{path}/{table_name}.tb', 'w') - h_file.write(self.__get_table_header(table_name, table_size)) - - # Default fixed point precision - # 6 bits for integer part, 10 bits for decimal - total, 16 - fp_bits = 16 - fp_integer = 6 - fp_signed = True - - # Exp table should use the same precision as exp_table, as seen in Vivado code - # init_exp_table(exp_table); - for layer in model.get_layers(): - if layer.name == 'softmax': - ac_type = layer.get_input_variable().type - if ac_type is not None: - try: - fp_bits = ac_type.precision.integer + ac_type.precision.fractional - fp_integer = ac_type.precision.integer - fp_signed = ac_type.precision.signed - except Exception: - # FixedPrecisionType wasn't correctly stored in layer attributes, use default values - pass - if fp_signed is False: - raise Exception('Softmax types need to be signed') - - sep = '' - N = ceil_log2(table_size) - for i in range(table_size): - f = FixedPointEmulator(fp_bits, fp_integer, signed=fp_signed) - b = uint_to_binary(i, N) - if i == 0: - b.insert(0, 0) - else: - b.insert(0, 1) - f.set_msb_bits(b) - real_val = f.exp_float() - h_file.write(sep + str(real_val)) - sep = ", " - - h_file.write('};\n') - h_file.close() - - def __write_invert_table(self, model, path): - table_name = 'invert_table' - table_size = self.__get_table_size(model, 'softmax') - - h_file = open(f'{path}/{table_name}.tb', 'w') - h_file.write(self.__get_table_header(table_name, table_size)) - - # Default fixed point precision, in case values from layer attributes cannot be extracted - # 8 bits for integer part, 10 bits for decimal - total, 18 - fp_bits = 18 - fp_integer = 8 - fp_signed = True - - # Invert table should use the same precision as exp_table, as seen in Vivado code - # init_invert_table(invert_table); - for layer in model.get_layers(): - if layer.name == 'softmax': - ac_type = layer.get_attr('exp_table_t') - if ac_type is not None: - try: - fp_bits = ac_type.precision.integer + ac_type.precision.fractional - fp_integer = ac_type.precision.integer - fp_signed = ac_type.precision.signed - except Exception: - # FixedPrecisionType wasn't correctly stored in layer attributes, use default values - pass - if fp_signed is False: - raise Exception('Softmax types need to be signed') - - sep = '' - N = ceil_log2(table_size) - for i in range(table_size): - f = FixedPointEmulator(fp_bits, fp_integer, signed=fp_signed) - b = uint_to_binary(i, N) - b.insert(0, 0) - f.set_msb_bits(b) - real_val = f.inv_float() - h_file.write(sep + str(real_val)) - sep = ", " - - h_file.write('};\n') - h_file.close() - - def __write_exp_table_latency(self, model, path): - table_name = 'exp_table_latency' - table_size = self.__get_table_size(model, 'softmax') - - h_file = open(f'{path}/{table_name}.tb', 'w') - h_file.write(self.__get_table_header(table_name, table_size)) - - # Default fixed point precision - # 6 bits for integer part, 10 bits for decimal - total, 16 - fp_bits = 16 - fp_integer = 6 - fp_signed = True - - # Exp table should use the same precision as exp_table, as seen in Vivado code - # init_exp_table(exp_table); - for layer in model.get_layers(): - if layer.name == 'softmax': - ac_type = layer.get_input_variable().type - if ac_type is not None: - try: - fp_bits = ac_type.precision.integer + ac_type.precision.fractional - fp_integer = ac_type.precision.integer - fp_signed = ac_type.precision.signed - except Exception: - # FixedPrecisionType wasn't correctly stored in layer attributes, use default values - pass - - sep = '' - N = ceil_log2(table_size) - for i in range(table_size): - f = FixedPointEmulator(fp_bits, fp_integer, signed=fp_signed) - f.set_msb_bits(uint_to_binary(i, N)) - real_val = f.exp_float() - h_file.write(sep + str(real_val)) - sep = ", " - - h_file.write('};\n') - h_file.close() - - def __write_invert_table_latency(self, model, path): - table_name = 'invert_table_latency' - table_size = self.__get_table_size(model, 'softmax') - - h_file = open(f'{path}/{table_name}.tb', 'w') - h_file.write(self.__get_table_header(table_name, table_size)) - - # Default fixed point precision, in case values from layer attributes cannot be extracted - # 8 bits for integer part, 10 bits for decimal - total, 18 - fp_bits = 18 - fp_integer = 8 - fp_signed = True - - # Invert table should use the same precision as exp_table, as seen in Vivado code - # init_invert_table(invert_table); - for layer in model.get_layers(): - if layer.name == 'softmax': - ac_type = layer.get_attr('exp_table_t') - if ac_type is not None: - try: - fp_bits = ac_type.precision.integer + ac_type.precision.fractional - fp_integer = ac_type.precision.integer - fp_signed = ac_type.precision.signed - except Exception: - # FixedPrecisionType wasn't correctly stored in layer attributes, use default values - pass - - sep = '' - N = ceil_log2(table_size) - for i in range(table_size): - f = FixedPointEmulator(fp_bits, fp_integer, signed=fp_signed) - f.set_msb_bits(uint_to_binary(i, N)) - real_val = f.inv_float() - h_file.write(sep + str(real_val)) - sep = ", " - - h_file.write('};\n') - h_file.close() - - def __write_exp_table_legacy(self, model, path): - table_name = 'exp_table_legacy' - table_size = self.__get_table_size(model, 'softmax') - - h_file = open(f'{path}/{table_name}.tb', 'w') - h_file.write(self.__get_table_header(table_name, table_size)) - - sep = '' - for i in range(table_size): - in_val = 2 * 8.0 * (i - float(table_size) / 2.0) / float(table_size) - real_val = np.exp(in_val) - h_file.write(sep + str(real_val)) - sep = ", " - - h_file.write('};\n') - h_file.close() - - def __write_invert_table_legacy(self, model, path): - table_name = 'invert_table_legacy' - table_size = self.__get_table_size(model, 'softmax') - - h_file = open(f'{path}/{table_name}.tb', 'w') - h_file.write(self.__get_table_header(table_name, table_size)) - - sep = '' - for i in range(table_size): - real_val = 0 - in_val = 64.0 * i / float(table_size) - if in_val > 0.0: - real_val = 1.0 / in_val - h_file.write(sep + str(real_val)) - sep = ", " - - h_file.write('};\n') - h_file.close() - - def write_activation_tables(self, model): - """Write the lookup tables for activation functions - - Args: - model (ModelGraph): the hls4ml model. - """ - # Output path - dstpath = f'{model.config.get_output_dir()}/firmware/nnet_utils/activation_tables' - if not os.path.exists(dstpath): - os.mkdir(dstpath) - - # Tables - # TODO - Only write tables needed by model, not all of them - self.__write_elu_table(model, dstpath) - self.__write_sigmoid_table(model, dstpath) - self.__write_tanh_table(model, dstpath) - self.__write_softplus_table(model, dstpath) - self.__write_softsign_table(model, dstpath) - self.__write_selu_table(model, dstpath) - self.__write_exp_table(model, dstpath) - self.__write_invert_table(model, dstpath) - self.__write_exp_table_latency(model, dstpath) - self.__write_invert_table_latency(model, dstpath) - self.__write_exp_table_legacy(model, dstpath) - self.__write_invert_table_legacy(model, dstpath) - - def write_yml(self, model): - """Write the config to the YAML file - - Args: - model (ModelGraph): the hls4ml model. - """ - - def keras_model_representer(dumper, keras_model): - model_path = model.config.get_output_dir() + '/keras_model.h5' - keras_model.save(model_path) - return dumper.represent_scalar('!keras_model', model_path) - - try: - from tensorflow.keras import Model as KerasModel - - yaml.add_multi_representer(KerasModel, keras_model_representer) - except Exception: - pass - - with open(model.config.get_output_dir() + '/' + config_filename, 'w') as file: - yaml.dump(model.config.config, file) - - def write_tar(self, model): - """Write the generated project as a .tar.gz archive - - Args: - model (ModelGraph): the hls4ml model. - """ - - with tarfile.open(model.config.get_output_dir() + '.tar.gz', mode='w:gz') as archive: - archive.add(model.config.get_output_dir(), recursive=True) - - def write_hls(self, model): - print('Writing HLS project') - self.write_project_dir(model) - self.write_project_cpp(model) - self.write_project_header(model) - self.write_weights(model) - self.write_defines(model) - self.write_parameters(model) - self.write_test_bench(model) - self.write_bridge(model) - self.write_build_script(model) - self.write_nnet_utils(model) - self.write_activation_tables(model) - self.write_yml(model) - self.write_tar(model) - print('Done') diff --git a/hls4ml/hls4ml/writer/symbolic_writer.py b/hls4ml/hls4ml/writer/symbolic_writer.py deleted file mode 100644 index 8ab5c53..0000000 --- a/hls4ml/hls4ml/writer/symbolic_writer.py +++ /dev/null @@ -1,114 +0,0 @@ -import glob -import os -from shutil import copyfile, copytree, rmtree - -from hls4ml.backends import get_backend -from hls4ml.writer.vivado_writer import VivadoWriter - - -class SymbolicExpressionWriter(VivadoWriter): - def write_nnet_utils(self, model): - """Copy the nnet_utils, AP types headers and any custom source to the project output directory - - Args: - model (ModelGraph): the hls4ml model. - """ - - # nnet_utils - filedir = os.path.dirname(os.path.abspath(__file__)) - - srcpath = os.path.join(filedir, '../templates/vivado/nnet_utils/') - dstpath = f'{model.config.get_output_dir()}/firmware/nnet_utils/' - - if not os.path.exists(dstpath): - os.mkdir(dstpath) - - headers = [os.path.basename(h) for h in glob.glob(srcpath + '*.h')] - - for h in headers: - copyfile(srcpath + h, dstpath + h) - - # ap_types - filedir = os.path.dirname(os.path.abspath(__file__)) - - srcpath = model.config.get_config_value('HLSIncludePath') - if not os.path.exists(srcpath): - srcpath = os.path.join(filedir, '../templates/vivado/ap_types/') - dstpath = f'{model.config.get_output_dir()}/firmware/ap_types/' - - if os.path.exists(dstpath): - rmtree(dstpath) - - copytree(srcpath, dstpath) - - # custom source - filedir = os.path.dirname(os.path.abspath(__file__)) - - custom_source = get_backend('Vivado').get_custom_source() - for dst, srcpath in custom_source.items(): - dstpath = f'{model.config.get_output_dir()}/firmware/{dst}' - copyfile(srcpath, dstpath) - - def write_build_script(self, model): - """Write the TCL/Shell build scripts (project.tcl, build_prj.tcl, vivado_synth.tcl, build_lib.sh) - - Args: - model (ModelGraph): the hls4ml model. - """ - - filedir = os.path.dirname(os.path.abspath(__file__)) - - # build_prj.tcl - f = open(f'{model.config.get_output_dir()}/project.tcl', 'w') - f.write('variable project_name\n') - f.write(f'set project_name "{model.config.get_project_name()}"\n') - f.write('variable backend\n') - f.write('set backend "vivado"\n') - f.write('variable part\n') - f.write('set part "{}"\n'.format(model.config.get_config_value('Part'))) - f.write('variable clock_period\n') - f.write('set clock_period {}\n'.format(model.config.get_config_value('ClockPeriod'))) - f.close() - - # build_prj.tcl - srcpath = os.path.join(filedir, '../templates/vivado/build_prj.tcl') - dstpath = f'{model.config.get_output_dir()}/build_prj.tcl' - copyfile(srcpath, dstpath) - - # vivado_synth.tcl - srcpath = os.path.join(filedir, '../templates/vivado/vivado_synth.tcl') - dstpath = f'{model.config.get_output_dir()}/vivado_synth.tcl' - copyfile(srcpath, dstpath) - - # build_lib.sh - f = open(os.path.join(filedir, '../templates/symbolic/build_lib.sh')) - fout = open(f'{model.config.get_output_dir()}/build_lib.sh', 'w') - - for line in f.readlines(): - line = line.replace('myproject', model.config.get_project_name()) - line = line.replace('mystamp', model.config.get_config_value('Stamp')) - line = line.replace('mylibspath', model.config.get_config_value('HLSLibsPath')) - - if 'LDFLAGS=' in line and not os.path.exists(model.config.get_config_value('HLSLibsPath')): - line = 'LDFLAGS=\n' - - fout.write(line) - f.close() - fout.close() - - def write_hls(self, model): - print('Writing HLS project') - self.write_project_dir(model) - self.write_project_cpp(model) - self.write_project_header(model) - # self.write_weights(model) # No weights to write - self.write_defines(model) - self.write_parameters(model) - self.write_test_bench(model) - self.write_bridge(model) - self.write_build_script(model) - self.write_nnet_utils(model) - self.write_generated_code(model) - self.write_yml(model) - self.write_tar(model) - print('Done') diff --git a/hls4ml/hls4ml/writer/vitis_writer.py b/hls4ml/hls4ml/writer/vitis_writer.py deleted file mode 100644 index 106cdbf..0000000 --- a/hls4ml/hls4ml/writer/vitis_writer.py +++ /dev/null @@ -1,32 +0,0 @@ -import glob -import os -from shutil import copy - -from hls4ml.writer.vivado_writer import VivadoWriter - - -class VitisWriter(VivadoWriter): - def __init__(self): - super().__init__() - - def write_nnet_utils_overrides(self, model): - ################### - # nnet_utils - ################### - - filedir = os.path.dirname(os.path.abspath(__file__)) - - srcpath = os.path.join(filedir, '../templates/vitis/nnet_utils/') - dstpath = f'{model.config.get_output_dir()}/firmware/nnet_utils/' - - headers = [os.path.basename(h) for h in glob.glob(srcpath + '*.h')] - - for h in headers: - copy(srcpath + h, dstpath + h) - - def write_hls(self, model): - """ - Write the HLS project. Calls the steps from VivadoWriter, adapted for Vitis - """ - super().write_hls(model) - self.write_nnet_utils_overrides(model) diff --git a/hls4ml/hls4ml/writer/vivado_accelerator_writer.py b/hls4ml/hls4ml/writer/vivado_accelerator_writer.py deleted file mode 100644 index 3b4e5fd..0000000 --- a/hls4ml/hls4ml/writer/vivado_accelerator_writer.py +++ /dev/null @@ -1,430 +0,0 @@ -import os -from distutils.dir_util import copy_tree -from shutil import copyfile - -from hls4ml.writer.vivado_writer import VivadoWriter - - -class VivadoAcceleratorWriter(VivadoWriter): - def __init__(self): - super().__init__() - self.vivado_accelerator_config = None - - def write_axi_wrapper(self, model): - '''Write a top level HLS C++ file to wrap the hls4ml project with AXI interfaces - Args: - model : The ModelGraph to write the wrapper for - ''' - inp_axi_t, out_axi_t, inp, out = self.vivado_accelerator_config.get_corrected_types() - indent = ' ' - - ####################### - # myproject_axi.h - ####################### - - filedir = os.path.dirname(os.path.abspath(__file__)) - f = open(os.path.join(filedir, '../templates/vivado_accelerator/myproject_axi.h')) - fout = open(f'{model.config.get_output_dir()}/firmware/{model.config.get_project_name()}_axi.h', 'w') - - for line in f.readlines(): - if 'MYPROJECT' in line: - newline = line.replace('MYPROJECT', format(model.config.get_project_name().upper())) - elif '// hls-fpga-machine-learning insert include' in line: - newline = f'#include "{model.config.get_project_name()}.h"\n' - elif 'myproject' in line: - newline = line.replace('myproject', model.config.get_project_name()) - elif '// hls-fpga-machine-learning insert definitions' in line: - newline = '' - newline += f'static const unsigned N_IN = {inp.size()};\n' - newline += f'static const unsigned N_OUT = {out.size()};\n' - if self.vivado_accelerator_config.get_interface() == 'axi_stream': - newline += f'typedef {inp_axi_t} T_in;\n' - newline += f'typedef {out_axi_t} T_out;\n' - newline += ( - 'typedef struct in_struct {\n' - + indent - + 'T_in data;\n' - + indent - + 'ap_uint<1> last;\n' - + indent - + 'in_struct(const T_in& data, const ap_uint<1>& last){this->data = data; this->last = last;};\n' - + indent - + 'in_struct(){this->data = 0; this->last = 0;};\n' - + indent - + 'friend std::ostream& operator<<(std::ostream& stream, const in_struct& in)\n' - + indent - + '{ return stream << "{ data: " << in.data << ", last: " << in.last << " }" << std::endl; }\n' - + indent - + 'operator float() const {return this->data;}\n' - + indent - + 'operator double() const {return this->data;}\n' - + indent - + 'in_struct(float data) {this->data = data; this->last = 0;}\n' - + indent - + 'in_struct(double data) {this->data = data; this->last = 0;}\n' - + '} input_axi_t;\n' - ) - newline += ( - 'typedef struct out_struct {\n' - + indent - + 'T_out data;\n' - + indent - + 'ap_uint<1> last;\n' - + indent - + 'out_struct(const T_out& data, const ap_uint<1>& last){this->data = data; this->last = last;};\n' - + indent - + 'out_struct(){this->data = 0; this->last = 0;};\n' - + indent - + 'friend std::ostream& operator<<(std::ostream& stream, const out_struct& out)\n' - + indent - + '{ return stream << "{ data: " << out.data << ", last: " << out.last << " }" << std::endl; }\n' - + indent - + 'operator float() const {return this->data;}\n' - + indent - + 'operator double() const {return this->data;}\n' - + indent - + 'out_struct(float data) {this->data = data; this->last = 0;}\n' - + indent - + 'out_struct(double data) {this->data = data; this->last = 0;}\n' - + '} output_axi_t;\n' - ) - else: - newline += f'typedef {inp_axi_t} input_axi_t;\n' - newline += f'typedef {out_axi_t} output_axi_t;\n' - else: - newline = line - fout.write(newline) - f.close() - fout.close() - - ####################### - # myproject_axi.cpp - ####################### - - f = open(os.path.join(filedir, '../templates/vivado_accelerator/myproject_axi.cpp')) - fout = open(f'{model.config.get_output_dir()}/firmware/{model.config.get_project_name()}_axi.cpp', 'w') - - io_type = model.config.get_config_value("IOType") - - for line in f.readlines(): - if 'myproject' in line: - newline = line.replace('myproject', model.config.get_project_name()) - elif '// hls-fpga-machine-learning insert include' in line: - newline = f'#include "{model.config.get_project_name()}_axi.h"\n' - elif '// hls-fpga-machine-learning insert local vars' in line: - newline = '' - if self.vivado_accelerator_config.get_interface() == 'axi_stream': - newline += indent + 'bool is_last = false;\n' - if io_type == 'io_parallel': - newline += indent + inp.type.name + ' in_local[N_IN];\n' - newline += indent + out.type.name + ' out_local[N_OUT];\n' - elif io_type == 'io_stream': - newline += indent + 'hls::stream<' + inp.type.name + '> in_local("input_1");\n' - newline += indent + 'hls::stream<' + out.type.name + '> out_local("output_1");\n\n' - newline += indent + '#pragma HLS STREAM variable=in_local depth={}\n'.format( - model.get_input_variables()[0].pragma[1] - ) - newline += indent + '#pragma HLS STREAM variable=out_local depth={}\n'.format( - model.get_output_variables()[0].pragma[1] - ) - elif '// hls-fpga-machine-learning insert call' in line: - newline = indent + f'{model.config.get_project_name()}(in_local, out_local);\n' - elif '// hls-fpga-machine-learning insert interface' in line: - if self.vivado_accelerator_config.get_interface() == 'axi_lite': - newline = '' - newline += indent + '#pragma HLS INTERFACE ap_ctrl_none port=return\n' - newline += indent + '#pragma HLS INTERFACE s_axilite port=in\n' - newline += indent + '#pragma HLS INTERFACE s_axilite port=out\n' - elif self.vivado_accelerator_config.get_interface() == 'axi_master': - newline = '' - newline += indent + '#pragma HLS INTERFACE s_axilite port=return bundle=CTRL_BUS\n' - newline += indent + '#pragma HLS INTERFACE m_axi depth={} port=in offset=slave bundle=IN_BUS\n'.format( - model.get_input_variables()[0].pragma[1] - ) - newline += indent + '#pragma HLS INTERFACE m_axi depth={} port=out offset=slave bundle=OUT_BUS\n'.format( - model.get_output_variables()[0].pragma[1] - ) - elif self.vivado_accelerator_config.get_interface() == 'axi_stream': - newline = '' - newline += indent + '#pragma HLS INTERFACE axis port=in\n' - newline += indent + '#pragma HLS INTERFACE axis port=out\n' - newline += indent + '#pragma HLS INTERFACE ap_ctrl_none port=return\n' - if model.config.get_config_value("IOType") == 'io_stream': - newline += indent + '#pragma HLS DATAFLOW\n' - elif '// hls-fpga-machine-learning insert enqueue' in line: - io_type = model.config.get_config_value("IOType") - if io_type == 'io_parallel': - newline = '' - newline += indent + 'for(unsigned i = 0; i < N_IN; i++){\n' - if self.vivado_accelerator_config.get_interface() == 'axi_stream': - newline += indent + indent + '#pragma HLS PIPELINE\n' - newline += indent + indent + 'in_local[i] = in[i].data; // Read input with cast\n' - newline += indent + indent + 'is_last |= (in[i].last == 1)? true: false;\n' - else: - newline += indent + indent + '#pragma HLS UNROLL\n' - newline += indent + indent + 'in_local[i] = in[i]; // Read input with cast\n' - newline += indent + '}\n' - elif io_type == 'io_stream': - newline = '' - newline += indent + 'for(unsigned i = 0; i < N_IN / {input_t}::size; ++i) {{\n' - # newline += indent + indent + '#pragma HLS PIPELINE\n' - newline += indent + indent + '{input_t} ctype;\n' - newline += indent + indent + '#pragma HLS DATA_PACK variable=ctype\n' - newline += indent + indent + 'for(unsigned j = 0; j < {input_t}::size; j++) {{\n' - # newline += indent + indent + indent + '#pragma HLS UNROLL\n' - if self.vivado_accelerator_config.get_interface() == 'axi_stream': - newline += ( - indent - + indent - + indent - + 'ctype[j] = typename {input_t}::value_type(in[i * {input_t}::size + j].data);\n' - ) - newline += ( - indent + indent + indent + 'is_last |= (in[i * input_t::size + j].last == 1)? true : false;\n' - ) - else: - newline += ( - indent - + indent - + indent - + 'ctype[j] = typename {input_t}::value_type(in[i * {input_t}::size + j]);\n' - ) - newline += indent + indent + '}}\n' - newline += indent + indent + 'in_local.write(ctype);\n' - newline += indent + '}}\n' - newline = newline.format(input_t=inp.type.name) - elif '// hls-fpga-machine-learning insert dequeue' in line: - io_type = model.config.get_config_value("IOType") - if io_type == 'io_parallel': - newline = '' - newline += indent + 'for(unsigned i = 0; i < N_OUT; i++){\n' - if self.vivado_accelerator_config.get_interface() == 'axi_stream': - newline += indent + indent + '#pragma HLS PIPELINE\n' - newline += indent + indent + 'out[i].data = out_local[i]; // Write output with cast\n' - newline += indent + indent + 'out[i].last = (is_last && (i == N_OUT - 1))? true : false;\n' - else: - newline += indent + indent + '#pragma HLS UNROLL\n' - newline += indent + indent + 'out[i] = out_local[i]; // Write output with cast\n' - newline += indent + '}\n' - elif io_type == 'io_stream': - newline = '' - newline += indent + 'for(unsigned i = 0; i < N_OUT / {result_t}::size; ++i) {{\n' - # newline += indent + indent + '#pragma HLS PIPELINE\n' - newline += indent + indent + '{result_t} ctype = out_local.read();\n' - newline += indent + indent + 'for(unsigned j = 0; j < {result_t}::size; j++) {{\n' - # newline += indent + indent + indent + '#pragma HLS UNROLL\n' - if self.vivado_accelerator_config.get_interface() == 'axi_stream': - newline += ( - indent - + indent - + indent - + 'bool last = (is_last && (i * {result_t}::size + j == N_OUT - 1)) ? true : false;\n' - ) - newline += ( - indent + indent + indent + 'out[i * {result_t}::size + j] = output_axi_t(ctype[j], last);\n' - ) - else: - newline += indent + indent + indent + 'out[i * {result_t}::size + j] = output_axi_t(ctype[j]);\n' - newline += indent + indent + '}}\n' - newline += indent + '}}\n' - newline = newline.format(result_t=out.type.name) - else: - newline = line - fout.write(newline) - f.close() - fout.close() - - def modify_build_script(self, model): - ''' - Modify the build_prj.tcl and build_lib.sh scripts to add the extra wrapper files and set the top function - ''' - filedir = os.path.dirname(os.path.abspath(__file__)) - oldfile = f'{model.config.get_output_dir()}/build_prj.tcl' - newfile = f'{model.config.get_output_dir()}/build_prj_axi.tcl' - f = open(oldfile) - fout = open(newfile, 'w') - - for line in f.readlines(): - if 'set_top' in line: - newline = line[:-1] + '_axi\n' # remove the newline from the line end and append _axi for the new top - newline += f'add_files firmware/{model.config.get_project_name()}_axi.cpp -cflags "-std=c++0x"\n' - elif f'{model.config.get_project_name()}_cosim' in line: - newline = line.replace( - f'{model.config.get_project_name()}_cosim', - f'{model.config.get_project_name()}_axi_cosim', - ) - elif '${project_name}.tcl' in line: - newline = line.replace('${project_name}.tcl', '${project_name}_axi.tcl') - else: - newline = line - fout.write(newline) - - f.close() - fout.close() - os.rename(newfile, oldfile) - - ################### - # build_lib.sh - ################### - - f = open(os.path.join(filedir, '../templates/vivado_accelerator/build_lib.sh')) - fout = open(f'{model.config.get_output_dir()}/build_lib.sh', 'w') - - for line in f.readlines(): - line = line.replace('myproject', model.config.get_project_name()) - line = line.replace('mystamp', model.config.get_config_value('Stamp')) - - fout.write(line) - f.close() - fout.close() - - def write_wrapper_test(self, model): - ################### - # write myproject_test_wrapper.cpp - ################### - oldfile = f'{model.config.get_output_dir()}/{model.config.get_project_name()}_test.cpp' - newfile = f'{model.config.get_output_dir()}/{model.config.get_project_name()}_test_wrapper.cpp' - - f = open(oldfile) - fout = open(newfile, 'w') - - inp = model.get_input_variables()[0] - out = model.get_output_variables()[0] - - for line in f.readlines(): - if f'{model.config.get_project_name()}.h' in line: - newline = line.replace(f'{model.config.get_project_name()}.h', f'{model.config.get_project_name()}_axi.h') - elif inp.definition_cpp() in line: - newline = line.replace( - inp.definition_cpp(), 'input_axi_t inputs[N_IN]' - ) # TODO instead of replacing strings, how about we use proper variables and their definition? - elif out.definition_cpp() in line: - newline = line.replace(out.definition_cpp(), 'output_axi_t outputs[N_OUT]') - elif 'unsigned short' in line: - newline = '' - elif f'{model.config.get_project_name()}(' in line: - indent_amount = line.split(model.config.get_project_name())[0] - newline = indent_amount + f'{model.config.get_project_name()}_axi(inputs,outputs);\n' - elif inp.size_cpp() in line or inp.name in line or inp.type.name in line: - newline = ( - line.replace(inp.size_cpp(), 'N_IN').replace(inp.name, 'inputs').replace(inp.type.name, 'input_axi_t') - ) - elif out.size_cpp() in line or out.name in line or out.type.name in line: - newline = ( - line.replace(out.size_cpp(), 'N_OUT').replace(out.name, 'outputs').replace(out.type.name, 'output_axi_t') - ) - else: - newline = line - if self.vivado_accelerator_config.get_interface() == 'axi_stream': - if 'nnet::fill_zero' in line: - indent = line.split('n')[0] - newline = indent + 'inputs[N_IN-1].last = 1;\n' - if 'copy_data' in line: - newline = newline.replace('copy_data', 'copy_data_axi') - fout.write(newline) - - f.close() - fout.close() - os.rename(newfile, oldfile) - - ################### - # write myproject_bridge_wrapper.cpp - ################### - oldfile = f'{model.config.get_output_dir()}/{model.config.get_project_name()}_bridge.cpp' - newfile = f'{model.config.get_output_dir()}/{model.config.get_project_name()}_bridge_wrapper.cpp' - - f = open(oldfile) - fout = open(newfile, 'w') - - inp = model.get_input_variables()[0] - out = model.get_output_variables()[0] - - for line in f.readlines(): - if f'{model.config.get_project_name()}.h' in line: - newline = line.replace(f'{model.config.get_project_name()}.h', f'{model.config.get_project_name()}_axi.h') - elif inp.definition_cpp(name_suffix='_ap') in line: - newline = line.replace(inp.definition_cpp(name_suffix='_ap'), f'input_axi_t {inp.name}_ap[N_IN]') - elif out.definition_cpp(name_suffix='_ap') in line: - newline = line.replace(out.definition_cpp(name_suffix='_ap'), f'output_axi_t {out.name}_ap[N_OUT]') - elif f'{model.config.get_project_name()}(' in line: - indent_amount = line.split(model.config.get_project_name())[0] - newline = indent_amount + '{}_axi({}_ap,{}_ap);\n'.format( - model.config.get_project_name(), inp.name, out.name - ) - elif inp.size_cpp() in line or inp.name in line or inp.type.name in line: - newline = line.replace(inp.size_cpp(), 'N_IN').replace(inp.type.name, 'input_axi_t') - elif out.size_cpp() in line or out.name in line or out.type.name in line: - newline = line.replace(out.size_cpp(), 'N_OUT').replace(out.type.name, 'output_axi_t') - else: - newline = line - fout.write(newline) - - f.close() - fout.close() - os.rename(newfile, oldfile) - - def write_board_script(self, model): - ''' - Write the tcl scripts and kernel sources to create a Vivado IPI project for the VivadoAccelerator - ''' - filedir = os.path.dirname(os.path.abspath(__file__)) - copyfile( - os.path.join(filedir, self.vivado_accelerator_config.get_tcl_file_path()), - f'{model.config.get_output_dir()}/design.tcl', - ) - # Generic alveo board - if self.vivado_accelerator_config.get_board().startswith('alveo'): - src_dir = os.path.join(filedir, self.vivado_accelerator_config.get_krnl_rtl_src_dir()) - dst_dir = os.path.abspath(model.config.get_output_dir()) + '/src' - copy_tree(src_dir, dst_dir) - - ################### - # project.tcl - ################### - f = open(f'{model.config.get_output_dir()}/project.tcl', 'w') - f.write('variable project_name\n') - f.write(f'set project_name "{model.config.get_project_name()}"\n') - f.write('variable backend\n') - f.write('set backend "vivadoaccelerator"\n') - f.write('variable part\n') - f.write(f'set part "{self.vivado_accelerator_config.get_part()}"\n') - f.write('variable clock_period\n') - f.write('set clock_period {}\n'.format(model.config.get_config_value('ClockPeriod'))) - f.write('variable clock_uncertainty\n') - f.write('set clock_uncertainty {}\n'.format(model.config.get_config_value('ClockUncertainty', '12.5%'))) - f.write('variable version\n') - f.write('set version "{}"\n'.format(model.config.get_config_value('Version', '1.0.0'))) - if self.vivado_accelerator_config.get_interface() == 'axi_stream': - in_bit, out_bit = self.vivado_accelerator_config.get_io_bitwidth() - f.write(f'set bit_width_hls_output {in_bit}\n') - f.write(f'set bit_width_hls_input {out_bit}\n') - f.close() - - def write_driver(self, model): - filedir = os.path.dirname(os.path.abspath(__file__)) - copyfile( - os.path.join(filedir, self.vivado_accelerator_config.get_driver_path()), - ('{}/' + self.vivado_accelerator_config.get_driver_file()).format(model.config.get_output_dir()), - ) - - def write_new_tar(self, model): - os.remove(model.config.get_output_dir() + '.tar.gz') - super().write_tar(model) - - def write_hls(self, model): - """ - Write the HLS project. Calls the VivadoBackend writer, and extra steps for VivadoAccelerator/AXI interface - """ - # TODO temporarily move config import here to avoid cyclic dependency, until config is moved to its own package - from hls4ml.backends import VivadoAcceleratorConfig - - self.vivado_accelerator_config = VivadoAcceleratorConfig( - model.config, model.get_input_variables(), model.get_output_variables() - ) - super().write_hls(model) - self.write_board_script(model) - self.write_driver(model) - self.write_wrapper_test(model) - self.write_axi_wrapper(model) - self.modify_build_script(model) - self.write_new_tar(model) diff --git a/hls4ml/hls4ml/writer/vivado_writer.py b/hls4ml/hls4ml/writer/vivado_writer.py deleted file mode 100644 index 412bb8d..0000000 --- a/hls4ml/hls4ml/writer/vivado_writer.py +++ /dev/null @@ -1,728 +0,0 @@ -import glob -import os -import tarfile -from collections import OrderedDict -from shutil import copyfile, copytree, rmtree - -import numpy as np -import yaml - -from hls4ml.writer.writers import Writer - -config_filename = 'hls4ml_config.yml' - - -class VivadoWriter(Writer): - def print_array_to_cpp(self, var, odir, write_txt_file=True): - """Write a weights array to C++ header files. - - Args: - var (WeightVariable): Weight to write - odir (str): Output directory - write_txt_file (bool, optional): Write txt files in addition to .h files. Defaults to True. - """ - - h_file = open(f"{odir}/firmware/weights/{var.name}.h", "w") - if write_txt_file: - txt_file = open(f"{odir}/firmware/weights/{var.name}.txt", "w") - - # meta data - h_file.write(f"//Numpy array shape {var.shape}\n") - h_file.write(f"//Min {np.min(var.min):.12f}\n") - h_file.write(f"//Max {np.max(var.max):.12f}\n") - h_file.write(f"//Number of zeros {var.nzeros}\n") - h_file.write("\n") - - h_file.write(f"#ifndef {var.name.upper()}_H_\n") - h_file.write(f"#define {var.name.upper()}_H_\n") - h_file.write("\n") - - if write_txt_file: - h_file.write("#ifndef __SYNTHESIS__\n") - h_file.write(var.definition_cpp() + ";\n") - h_file.write("#else\n") - - h_file.write(var.definition_cpp() + " = {") - - # fill c++ array. - # not including internal brackets for multidimensional case - sep = '' - for x in var: - h_file.write(sep + x) - if write_txt_file: - txt_file.write(sep + x) - sep = ", " - h_file.write("};\n") - if write_txt_file: - h_file.write("#endif\n") - txt_file.close() - h_file.write("\n#endif\n") - h_file.close() - - def write_project_dir(self, model): - """Write the base project directory - - Args: - model (ModelGraph): the hls4ml model. - """ - if not os.path.isdir(f"{model.config.get_output_dir()}/firmware/weights"): - os.makedirs(f"{model.config.get_output_dir()}/firmware/weights") - - @staticmethod - def _make_array_pragma(variable): - """ - Layers in hls_model.py can specify output array partitioning through the `pragma` attribute. - If `pragma` is a string: options are 'partition', 'reshape', or 'stream'. - If `pragma` is a tuple: (mode, type, factor) where mode is 'partition' or 'reshape', type is - 'complete', 'cyclic', or 'block', and factor is an integer only used when the type is not 'complete'. - """ - - config = variable.pragma - if type(config) is tuple: - mode = config[0] - if mode in ['partition', 'reshape']: - typ = config[1] - if typ != 'complete': - factor = config[2] - elif mode == 'stream': - depth = config[1] - else: - mode = config - typ = 'complete' - factor = 0 - - if mode in ['partition', 'reshape']: - if typ == 'complete': - template = '#pragma HLS ARRAY_{mode} variable={name} {type} dim={dim}' - else: - template = '#pragma HLS ARRAY_{mode} variable={name} {type} factor={factor} dim={dim}' - - return template.format(mode=mode.upper(), name=variable.name, type=typ, factor=factor, dim=0) - - elif mode == 'stream': - return f'#pragma HLS STREAM variable={variable.name} depth={depth}' - - def write_project_cpp(self, model): - """Write the main architecture source file (myproject.cpp) - - Args: - model (ModelGraph): the hls4ml model. - """ - - filedir = os.path.dirname(os.path.abspath(__file__)) - - f = open(os.path.join(filedir, '../templates/vivado/firmware/myproject.cpp')) - fout = open(f'{model.config.get_output_dir()}/firmware/{model.config.get_project_name()}.cpp', 'w') - - model_inputs = model.get_input_variables() - model_outputs = model.get_output_variables() - model_brams = [var for var in model.get_weight_variables() if var.storage.lower() == 'bram'] - - indent = ' ' - - for line in f.readlines(): - # Add headers to weights and biases - if 'myproject' in line: - newline = line.replace('myproject', model.config.get_project_name()) - elif '// hls-fpga-machine-learning insert header' in line: - inputs_str = ', '.join([i.definition_cpp(as_reference=True) for i in model_inputs]) - outputs_str = ', '.join([o.definition_cpp(as_reference=True) for o in model_outputs]) - brams_str = ', \n'.join([indent + b.definition_cpp(as_reference=False) for b in model_brams]) - - newline = '' - newline += indent + inputs_str + ',\n' - newline += indent + outputs_str - if len(model_brams) > 0: - newline += ',\n' + brams_str - newline += '\n' - - elif '// hls-fpga-machine-learning insert load weights' in line: - newline = line - for layer in model.get_layers(): - for w in layer.get_weights(): - if w.weight_class == 'CompressedWeightVariable': - newline += indent + ' nnet::load_compressed_weights_from_txt<{}, {}>({}, "{}.txt");\n'.format( - w.type.name, w.nonzeros, w.name, w.name - ) - elif w.weight_class == 'ExponentWeightVariable': - newline += indent + ' nnet::load_exponent_weights_from_txt<{}, {}>({}, "{}.txt");\n'.format( - w.type.name, w.data_length, w.name, w.name - ) - else: - newline += indent + ' nnet::load_weights_from_txt<{}, {}>({}, "{}.txt");\n'.format( - w.type.name, w.data_length, w.name, w.name - ) - - # Add input/output type - elif '// hls-fpga-machine-learning insert IO' in line: - newline = line - all_inputs = [i.name for i in model_inputs] - all_outputs = [o.name for o in model_outputs] - all_brams = [b.name for b in model_brams] - io_type = model.config.get_config_value("IOType") - - if io_type == 'io_parallel': - for i in model_inputs: - newline += indent + self._make_array_pragma(i) + '\n' - for o in model_outputs: - newline += indent + self._make_array_pragma(o) + '\n' - # TODO discussed adding a handle for setting the interface mode for individual input and output arrays - # Probably the handle doesn't need to be exposed to the user but should be just set in hls_model.py - newline += indent + '#pragma HLS INTERFACE ap_vld port={},{} \n'.format( - ','.join(all_inputs), ','.join(all_outputs) - ) - if model.config.pipeline_style.lower() == 'dataflow': - newline += indent + '#pragma HLS DATAFLOW \n' - else: - newline += indent + '#pragma HLS PIPELINE \n' - if io_type == 'io_stream': - newline += indent + '#pragma HLS INTERFACE axis port={},{} \n'.format( - ','.join(all_inputs), ','.join(all_outputs) - ) - if all_brams: - newline += indent + '#pragma HLS INTERFACE bram port={} \n'.format(','.join(all_brams)) - newline += indent + '#pragma HLS DATAFLOW \n' - - elif '// hls-fpga-machine-learning insert layers' in line: - newline = line + '\n' - for layer in model.get_layers(): - vars = layer.get_variables() - for var in vars: - if var not in model_inputs and var not in model_outputs: - def_cpp = var.definition_cpp() - if def_cpp is not None: - newline += ' ' + def_cpp + ';\n' - if var.pragma: - newline += ' ' + self._make_array_pragma(var) + '\n' - func = layer.get_attr('function_cpp', None) - if func: - if not isinstance(func, (list, set)): - func = [func] - if len(func) == 1: - newline += ' ' + func[0] + ' // ' + layer.name + '\n' - else: - newline += ' // ' + layer.name + '\n' - for line in func: - newline += ' ' + line + '\n' - if model.config.trace_output and layer.get_attr('trace', False): - newline += '#ifndef __SYNTHESIS__\n' - for var in vars: - newline += ' nnet::save_layer_output<{}>({}, "{}", {});\n'.format( - var.type.name, var.name, layer.name, var.size_cpp() - ) - newline += '#endif\n' - newline += '\n' - - # Just copy line - else: - newline = line - - fout.write(newline) - - f.close() - fout.close() - - def write_project_header(self, model): - """Write the main architecture header file (myproject.h) - - Args: - model (ModelGraph): the hls4ml model. - """ - - filedir = os.path.dirname(os.path.abspath(__file__)) - f = open(os.path.join(filedir, '../templates/vivado/firmware/myproject.h')) - fout = open(f'{model.config.get_output_dir()}/firmware/{model.config.get_project_name()}.h', 'w') - - model_inputs = model.get_input_variables() - model_outputs = model.get_output_variables() - model_brams = [var for var in model.get_weight_variables() if var.storage.lower() == 'bram'] - - indent = ' ' - - for line in f.readlines(): - if 'MYPROJECT' in line: - newline = line.replace('MYPROJECT', format(model.config.get_project_name().upper())) - elif 'myproject' in line: - newline = line.replace('myproject', model.config.get_project_name()) - elif '// hls-fpga-machine-learning insert header' in line: - inputs_str = ', '.join([i.definition_cpp(as_reference=True) for i in model_inputs]) - outputs_str = ', '.join([o.definition_cpp(as_reference=True) for o in model_outputs]) - brams_str = ', \n'.join([indent + b.definition_cpp(as_reference=False) for b in model_brams]) - - newline = '' - newline += indent + inputs_str + ',\n' - newline += indent + outputs_str - if len(model_brams) > 0: - newline += ',\n' + brams_str - newline += '\n' - else: - newline = line - fout.write(newline) - - f.close() - fout.close() - - def write_defines(self, model): - """Write the C++ type definitions file (defines.h) - - Args: - model (ModelGraph): the hls4ml model. - """ - filedir = os.path.dirname(os.path.abspath(__file__)) - f = open(os.path.join(filedir, '../templates/vivado/firmware/defines.h')) - fout = open(f'{model.config.get_output_dir()}/firmware/defines.h', 'w') - - for line in f.readlines(): - # Insert numbers - if '// hls-fpga-machine-learning insert numbers' in line: - newline = line - - defines_list = [] - for layer in model.get_layers(): - defines = '' - for k, v in layer.get_output_variable().get_shape(): - defines += f'#define {k} {v}\n' - - defines_list.append(defines) - - newline += ''.join(defines_list) - - elif '// hls-fpga-machine-learning insert layer-precision' in line: - newline = line - all_precision = OrderedDict() - for layer in model.get_layers(): - layer_precision = layer.get_layer_precision() - for type_name, type_var in layer_precision.items(): - # Ensure that layer's types doesn't override existing types - # This can happen in case of InplaceVariable types - if type_name not in all_precision: - all_precision[type_name] = type_var - for used_type in all_precision.values(): - newline += used_type.definition_cpp() - - else: - newline = line - fout.write(newline) - f.close() - fout.close() - - def write_parameters(self, model): - """Write the C++ layer config file (parameters.h) - - Args: - model (ModelGraph): the hls4ml model. - """ - filedir = os.path.dirname(os.path.abspath(__file__)) - f = open(os.path.join(filedir, '../templates/vivado/firmware/parameters.h')) - fout = open(f'{model.config.get_output_dir()}/firmware/parameters.h', 'w') - - for line in f.readlines(): - if '// hls-fpga-machine-learning insert includes' in line: - newline = line - for include in sorted(set(sum((layer.get_attr('include_header', []) for layer in model.get_layers()), []))): - newline += '#include "%s"\n' % include - - elif '// hls-fpga-machine-learning insert weights' in line: - newline = line - for layer in model.get_layers(): - for w in layer.get_weights(): - if w.storage.lower() != 'bram': - newline += f'#include "weights/{w.name}.h"\n' - - elif "// hls-fpga-machine-learning insert layer-config" in line: - newline = line - for layer in model.get_layers(): - config = layer.get_attr('config_cpp', None) - if config: - newline += '// ' + layer.name + '\n' - newline += config + '\n' - else: - newline = line - fout.write(newline) - f.close() - fout.close() - - def write_weights(self, model): - """Write the weights into header files - - Args: - model (ModelGraph): the hls4ml model. - """ - for layer in model.get_layers(): - for weights in layer.get_weights(): - self.print_array_to_cpp(weights, model.config.get_output_dir()) - - def __make_dat_file(self, original_path, project_path): - """ - Convert other input/output data types into a dat file, which is - a text file with the falttened matrix printed out. Note that ' ' is - assumed to be the delimiter. - """ - - # Take in data from current supported data files - if original_path[-3:] == "npy": - data = np.load(original_path) - else: - raise Exception("Unsupported input/output data files.") - - # Faltten data, just keep first dimension - data = data.reshape(data.shape[0], -1) - - def print_data(f): - for i in range(data.shape[0]): - for j in range(data.shape[1]): - f.write(str(data[i][j]) + " ") - f.write("\n") - - # Print out in dat file - with open(project_path, "w") as f: - print_data(f) - - def write_test_bench(self, model): - """Write the testbench files (myproject_test.cpp and input/output .dat files) - - Args: - model (ModelGraph): the hls4ml model. - """ - - filedir = os.path.dirname(os.path.abspath(__file__)) - - if not os.path.exists(f'{model.config.get_output_dir()}/tb_data/'): - os.mkdir(f'{model.config.get_output_dir()}/tb_data/') - - input_data = model.config.get_config_value('InputData') - output_predictions = model.config.get_config_value('OutputPredictions') - - if input_data: - if input_data[-3:] == "dat": - copyfile(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') - else: - self.__make_dat_file(input_data, f'{model.config.get_output_dir()}/tb_data/tb_input_features.dat') - - if output_predictions: - if output_predictions[-3:] == "dat": - copyfile(output_predictions, f'{model.config.get_output_dir()}/tb_data/tb_output_predictions.dat') - else: - self.__make_dat_file( - output_predictions, f'{model.config.get_output_dir()}/tb_data/tb_output_predictions.dat' - ) - - f = open(os.path.join(filedir, '../templates/vivado/myproject_test.cpp')) - fout = open(f'{model.config.get_output_dir()}/{model.config.get_project_name()}_test.cpp', 'w') - - model_inputs = model.get_input_variables() - model_outputs = model.get_output_variables() - model_brams = [var for var in model.get_weight_variables() if var.storage.lower() == 'bram'] - - for line in f.readlines(): - indent = ' ' * (len(line) - len(line.lstrip(' '))) - - # Insert numbers - if 'myproject' in line: - newline = line.replace('myproject', model.config.get_project_name()) - elif '// hls-fpga-machine-learning insert bram' in line: - newline = line - for bram in model_brams: - newline += f'#include \"firmware/weights/{bram.name}.h\"\n' - elif '// hls-fpga-machine-learning insert data' in line: - newline = line - offset = 0 - for inp in model_inputs: - newline += ' ' + inp.definition_cpp() + ';\n' - newline += ' nnet::copy_data(in, {});\n'.format( - inp.type.name, offset, inp.size_cpp(), inp.name - ) - offset += inp.size() - for out in model_outputs: - newline += ' ' + out.definition_cpp() + ';\n' - elif '// hls-fpga-machine-learning insert zero' in line: - newline = line - for inp in model_inputs: - newline += ' ' + inp.definition_cpp() + ';\n' - newline += f' nnet::fill_zero<{inp.type.name}, {inp.size_cpp()}>({inp.name});\n' - for out in model_outputs: - newline += ' ' + out.definition_cpp() + ';\n' - elif '// hls-fpga-machine-learning insert top-level-function' in line: - newline = line - - input_vars = ','.join([i.name for i in model_inputs]) - output_vars = ','.join([o.name for o in model_outputs]) - bram_vars = ','.join([b.name for b in model_brams]) - - # Concatenate the input, output, and bram variables. Filter out empty/null values - all_vars = ','.join(filter(None, [input_vars, output_vars, bram_vars])) - - top_level = indent + f'{model.config.get_project_name()}({all_vars});\n' - - newline += top_level - elif '// hls-fpga-machine-learning insert predictions' in line: - newline = line - for out in model_outputs: - newline += indent + f'for(int i = 0; i < {out.size_cpp()}; i++) {{\n' - newline += indent + ' std::cout << pr[i] << " ";\n' - newline += indent + '}\n' - newline += indent + 'std::cout << std::endl;\n' - elif '// hls-fpga-machine-learning insert tb-output' in line: - newline = line - for out in model_outputs: - newline += indent + 'nnet::print_result<{}, {}>({}, fout);\n'.format( - out.type.name, out.size_cpp(), out.name - ) # TODO enable this - elif ( - '// hls-fpga-machine-learning insert output' in line - or '// hls-fpga-machine-learning insert quantized' in line - ): - newline = line - for out in model_outputs: - newline += indent + 'nnet::print_result<{}, {}>({}, std::cout, true);\n'.format( - out.type.name, out.size_cpp(), out.name - ) - else: - newline = line - fout.write(newline) - f.close() - fout.close() - - def write_bridge(self, model): - """Write the Python-C++ bridge (myproject_bridge.cpp) - - Args: - model (ModelGraph): the hls4ml model. - """ - - filedir = os.path.dirname(os.path.abspath(__file__)) - f = open(os.path.join(filedir, '../templates/vivado/myproject_bridge.cpp')) - fout = open(f'{model.config.get_output_dir()}/{model.config.get_project_name()}_bridge.cpp', 'w') - - model_inputs = model.get_input_variables() - model_outputs = model.get_output_variables() - model_brams = [var for var in model.get_weight_variables() if var.storage.lower() == 'bram'] - - indent = ' ' - - for line in f.readlines(): - if 'MYPROJECT' in line: - newline = line.replace('MYPROJECT', format(model.config.get_project_name().upper())) - elif 'myproject' in line: - newline = line.replace('myproject', format(model.config.get_project_name())) - elif '// hls-fpga-machine-learning insert bram' in line: - newline = line - for bram in model_brams: - newline += f'#include \"firmware/weights/{bram.name}.h\"\n' - elif '// hls-fpga-machine-learning insert header' in line: - dtype = line.split('#', 1)[1].strip() - inputs_str = ', '.join([f'{dtype} {i.name}[{i.size_cpp()}]' for i in model_inputs]) - outputs_str = ', '.join([f'{dtype} {o.name}[{o.size_cpp()}]' for o in model_outputs]) - - newline = '' - newline += indent + inputs_str + ',\n' - newline += indent + outputs_str + '\n' - elif '// hls-fpga-machine-learning insert wrapper' in line: - dtype = line.split('#', 1)[1].strip() - newline = '' - for i in model_inputs: - newline += indent + '{var};\n'.format(var=i.definition_cpp(name_suffix='_ap')) - newline += indent + 'nnet::convert_data<{}, {}, {}>({}, {}_ap);\n'.format( - dtype, i.type.name, i.size_cpp(), i.name, i.name - ) - newline += '\n' - - for o in model_outputs: - newline += indent + '{var};\n'.format(var=o.definition_cpp(name_suffix='_ap')) - - newline += '\n' - - input_vars = ','.join([i.name + '_ap' for i in model_inputs]) - bram_vars = ','.join([b.name for b in model_brams]) - output_vars = ','.join([o.name + '_ap' for o in model_outputs]) - - # Concatenate the input, output, and bram variables. Filter out empty/null values - all_vars = ','.join(filter(None, [input_vars, output_vars, bram_vars])) - - top_level = indent + f'{model.config.get_project_name()}({all_vars});\n' - newline += top_level - - newline += '\n' - - for o in model_outputs: - newline += indent + 'nnet::convert_data<{}, {}, {}>({}_ap, {});\n'.format( - o.type.name, dtype, o.size_cpp(), o.name, o.name - ) - elif '// hls-fpga-machine-learning insert trace_outputs' in line: - newline = '' - for layer in model.get_layers(): - func = layer.get_attr('function_cpp', None) - if func and model.config.trace_output and layer.get_attr('trace', False): - vars = layer.get_variables() - for var in vars: - newline += ( - indent - + 'nnet::trace_outputs->insert(std::pair(' - + f'"{layer.name}", (void *) malloc({var.size_cpp()} * element_size)));\n' - ) - - else: - newline = line - fout.write(newline) - - f.close() - fout.close() - - def write_build_script(self, model): - """Write the TCL/Shell build scripts (project.tcl, build_prj.tcl, vivado_synth.tcl, build_lib.sh) - - Args: - model (ModelGraph): the hls4ml model. - """ - - filedir = os.path.dirname(os.path.abspath(__file__)) - - # project.tcl - f = open(f'{model.config.get_output_dir()}/project.tcl', 'w') - f.write('variable project_name\n') - f.write(f'set project_name "{model.config.get_project_name()}"\n') - f.write('variable backend\n') - f.write('set backend "vivado"\n') - f.write('variable part\n') - f.write('set part "{}"\n'.format(model.config.get_config_value('Part'))) - f.write('variable clock_period\n') - f.write('set clock_period {}\n'.format(model.config.get_config_value('ClockPeriod'))) - f.write('variable clock_uncertainty\n') - f.write('set clock_uncertainty {}\n'.format(model.config.get_config_value('ClockUncertainty', '12.5%'))) - f.write('variable version\n') - f.write('set version "{}"\n'.format(model.config.get_config_value('Version', '1.0.0'))) - f.close() - - # build_prj.tcl - srcpath = os.path.join(filedir, '../templates/vivado/build_prj.tcl') - dstpath = f'{model.config.get_output_dir()}/build_prj.tcl' - copyfile(srcpath, dstpath) - - # vivado_synth.tcl - srcpath = os.path.join(filedir, '../templates/vivado/vivado_synth.tcl') - dstpath = f'{model.config.get_output_dir()}/vivado_synth.tcl' - copyfile(srcpath, dstpath) - - # build_lib.sh - f = open(os.path.join(filedir, '../templates/vivado/build_lib.sh')) - fout = open(f'{model.config.get_output_dir()}/build_lib.sh', 'w') - - for line in f.readlines(): - line = line.replace('myproject', model.config.get_project_name()) - line = line.replace('mystamp', model.config.get_config_value('Stamp')) - - fout.write(line) - f.close() - fout.close() - - def write_nnet_utils(self, model): - """Copy the nnet_utils, AP types headers and any custom source to the project output directory - - Args: - model (ModelGraph): the hls4ml model. - """ - - # nnet_utils - filedir = os.path.dirname(os.path.abspath(__file__)) - - srcpath = os.path.join(filedir, '../templates/vivado/nnet_utils/') - dstpath = f'{model.config.get_output_dir()}/firmware/nnet_utils/' - - if not os.path.exists(dstpath): - os.mkdir(dstpath) - - headers = [os.path.basename(h) for h in glob.glob(srcpath + '*.h')] - - for h in headers: - copyfile(srcpath + h, dstpath + h) - - # ap_types - filedir = os.path.dirname(os.path.abspath(__file__)) - - srcpath = os.path.join(filedir, '../templates/vivado/ap_types/') - dstpath = f'{model.config.get_output_dir()}/firmware/ap_types/' - - if os.path.exists(dstpath): - rmtree(dstpath) - - copytree(srcpath, dstpath) - - # custom source - filedir = os.path.dirname(os.path.abspath(__file__)) - - custom_source = model.config.backend.get_custom_source() - for dst, srcpath in custom_source.items(): - dstpath = f'{model.config.get_output_dir()}/firmware/{dst}' - copyfile(srcpath, dstpath) - - def write_generated_code(self, model): - """Write the generated code (nnet_code_gen.h) - - Args: - model (ModelGraph): the hls4ml model. - """ - path = f'{model.config.get_output_dir()}/firmware/nnet_utils/nnet_code_gen.h' - f = open(path) - contents = f.readlines() - f.close() - f = open(path, 'w') - - for line in contents: - if '// hls4ml insert code' in line: - newline = line - for layer in model.get_layers(): - for generated_code in layer.code.values(): - newline += str(generated_code) - else: - newline = line - f.write(newline) - f.close() - - def write_yml(self, model): - """Write the config to the YAML file - - Args: - model (ModelGraph): the hls4ml model. - """ - - def keras_model_representer(dumper, keras_model): - model_path = model.config.get_output_dir() + '/keras_model.h5' - keras_model.save(model_path) - return dumper.represent_scalar('!keras_model', model_path) - - try: - from tensorflow.keras import Model as KerasModel - - yaml.add_multi_representer(KerasModel, keras_model_representer) - except Exception: - pass - - with open(model.config.get_output_dir() + '/' + config_filename, 'w') as file: - yaml.dump(model.config.config, file) - - def write_tar(self, model): - """Write the generated project as a .tar.gz archive - - Args: - model (ModelGraph): the hls4ml model. - """ - - with tarfile.open(model.config.get_output_dir() + '.tar.gz', mode='w:gz') as archive: - archive.add(model.config.get_output_dir(), recursive=True) - - def write_hls(self, model): - print('Writing HLS project') - self.write_project_dir(model) - self.write_project_cpp(model) - self.write_project_header(model) - self.write_weights(model) - self.write_defines(model) - self.write_parameters(model) - self.write_test_bench(model) - self.write_bridge(model) - self.write_build_script(model) - self.write_nnet_utils(model) - self.write_generated_code(model) - self.write_yml(model) - self.write_tar(model) - print('Done') diff --git a/hls4ml/hls4ml/writer/writers.py b/hls4ml/hls4ml/writer/writers.py deleted file mode 100644 index 54caec1..0000000 --- a/hls4ml/hls4ml/writer/writers.py +++ /dev/null @@ -1,20 +0,0 @@ -class Writer: - def __init__(self): - pass - - def write_hls(self, model): - raise NotImplementedError - - -writer_map = {} - - -def register_writer(name, writer_cls): - if name in writer_map: - raise Exception(f'Writer {name} already registered') - - writer_map[name] = writer_cls - - -def get_writer(name): - return writer_map[name]() diff --git a/hls4ml/pyproject.toml b/hls4ml/pyproject.toml deleted file mode 100644 index 6402ab0..0000000 --- a/hls4ml/pyproject.toml +++ /dev/null @@ -1,10 +0,0 @@ -[build-system] -# AVOID CHANGING REQUIRES: IT WILL BE UPDATED BY PYSCAFFOLD! -requires = ["setuptools>=46.1.0", "setuptools_scm[toml]>=5", "wheel"] -build-backend = "setuptools.build_meta" - -[tool.setuptools_scm] -# See configuration details in https://github.com/pypa/setuptools_scm -version_scheme = "release-branch-semver" -git_describe_command = "git describe --dirty --tags --long --match v* --first-parent" -write_to = "hls4ml/_version.py" diff --git a/hls4ml/scripts/hls4ml b/hls4ml/scripts/hls4ml deleted file mode 100755 index 84b1188..0000000 --- a/hls4ml/scripts/hls4ml +++ /dev/null @@ -1,328 +0,0 @@ -#!/usr/bin/env python - -import argparse -import json -import os -import sys - -import h5py -import yaml - -import hls4ml - -config_filename = 'hls4ml_config.yml' - -hls4ml_description = """ - - ╔╧╧╧╗────o - hls ║ 4 ║ ml - Machine learning inference in FPGAs - o────╚╤╤╤╝ -""" - - -def main(): - parser = argparse.ArgumentParser(description=hls4ml_description, formatter_class=argparse.RawDescriptionHelpFormatter) - subparsers = parser.add_subparsers() - - config_parser = subparsers.add_parser('config', help='Create a conversion configuration file') - convert_parser = subparsers.add_parser('convert', help='Convert Keras or ONNX model to HLS') - build_parser = subparsers.add_parser('build', help='Build generated HLS project') - report_parser = subparsers.add_parser('report', help='Show synthesis report of an HLS project') - - config_parser.add_argument( - '-m', - '--model', - help='Model file to convert (Keras .h5 or .json file, ONNX .onnx file, or TensorFlow .pb file)', - default=None, - ) - config_parser.add_argument( - '-w', '--weights', help='Optional weights file (if Keras .json file is provided))', default=None - ) - config_parser.add_argument('-p', '--project', help='Project name', default='myproject') - config_parser.add_argument('-d', '--dir', help='Project output directory', default='my-hls-test') - config_parser.add_argument('-f', '--fpga', help='FPGA part', default='xcku115-flvb2104-2-i') - config_parser.add_argument('-bo', '--board', help='Board used.', default='pynq-z2') - config_parser.add_argument( - '-ba', '--backend', help='Backend to use (Vivado, VivadoAccelerator, Quartus)', default='Vivado' - ) - config_parser.add_argument('-c', '--clock', help='Clock frequency (ns)', type=int, default=5) - config_parser.add_argument( - '-g', '--granularity', help='Granularity of configuration. One of "model", "type" or "name"', default='model' - ) - config_parser.add_argument('-x', '--precision', help='Default precision', default='ap_fixed<16,6>') - config_parser.add_argument('-r', '--reuse-factor', help='Default reuse factor', type=int, default=1) - config_parser.add_argument('-o', '--output', help='Output file name', default=None) - config_parser.set_defaults(func=_config) - - convert_parser.add_argument('-c', '--config', help='Configuration file', default=None) - convert_parser.set_defaults(func=_convert) - - build_parser.add_argument('-p', '--project', help='Project directory', default=None) - build_parser.add_argument( - '-l', '--list-options', help='List available build options for a given project', action='store_true', default=False - ) - build_parser.set_defaults(func=_build) - - report_parser.add_argument('-p', '--project', help='Project directory', default=None) - report_parser.add_argument( - '-l', '--list-options', help='List available report options for a given project', action='store_true', default=False - ) - report_parser.set_defaults(func=_report) - - parser.add_argument('--version', action='version', version=f'%(prog)s {hls4ml.__version__}') - - args, extra_args = parser.parse_known_args() - if hasattr(args, 'func'): - args.func(args, extra_args) - else: - print(hls4ml_description) - parser.print_usage() - - -def _config(args, extra_args): - if args.model is None: - print('Model file (-m or --model) must be provided.') - sys.exit(1) - - config = hls4ml.utils.config.create_config( - backend=args.backend, - output_dir=args.dir, - project_name=args.project, - part=args.fpga, - board=args.board, - clock_period=args.clock, - ) - - if args.model.endswith('.h5'): - config['KerasH5'] = args.model - - with h5py.File(args.model, mode='r') as h5file: - # Load the configuration from h5 using json's decode - model_arch = h5file.attrs.get('model_config') - if model_arch is None: - print('No model found in the provided h5 file.') - sys.exit(1) - else: - model_arch = json.loads(model_arch.decode('utf-8')) - - config['HLSConfig'] = hls4ml.utils.config_from_keras_model( - model_arch, - granularity=args.granularity, - default_precision=args.precision, - default_reuse_factor=args.reuse_factor, - ) - elif args.model.endswith('.json'): - if args.weights is None: - print('Weights file (-w or --weights) must be provided when parsing from JSON file.') - sys.exit(1) - config['KerasJson'] = args.model - config['KerasH5'] = args.weights - - with open(args.model) as json_file: - model_arch = json.load(json_file) - config['HLSConfig'] = hls4ml.utils.config_from_keras_model( - model_arch, - granularity=args.granularity, - default_precision=args.precision, - default_reuse_factor=args.reuse_factor, - ) - elif args.model.endswith('.onnx'): - print('Creating configuration for ONNX mdoels is not supported yet.') - sys.exit(1) - elif args.model.endswith('.pb'): - print('Creating configuration for Tensorflow mdoels is not supported yet.') - sys.exit(1) - - if args.output is not None: - outname = args.output - if not outname.endswith('.yml'): - outname += '.yml' - print(f'Writing config to {outname}') - with open(outname, 'w') as outfile: - yaml.dump(config, outfile, default_flow_style=False, sort_keys=False) - else: - print('Config output:') - yaml.dump(config, sys.stdout, default_flow_style=False, sort_keys=False) - - -def _convert(args, extra_args): - model = hls4ml.converters.convert_from_config(args.config) - - if model is not None: - model.write() - - -def _build(args, extra_args): - if args.project is None: - print('Project directory (-p or --project) must be provided.') - sys.exit(1) - - try: - yamlConfig = hls4ml.converters.parse_yaml_config(args.project + '/' + config_filename) - except Exception: - print(f'Project configuration file not found in "{args.project}".') - sys.exit(1) - - backend_map = {} - backend_map['vivado'] = _build_vivado - backend_map['quartus'] = _build_quartus - - backend = yamlConfig.get('Backend') - - if backend.lower() in backend_map: - backend_map[backend.lower()](args, extra_args) - else: - print(f'Backend {backend} does not support building projects.') - - -def _build_vivado(args, extra_args): - vivado_parser = argparse.ArgumentParser(prog=f'hls4ml build -p {args.project}', add_help=False) - vivado_parser.add_argument('-c', '--simulation', help='Run C simulation', action='store_true', default=False) - vivado_parser.add_argument('-s', '--synthesis', help='Run C/RTL synthesis', action='store_true', default=False) - vivado_parser.add_argument('-r', '--co-simulation', help='Run C/RTL co-simulation', action='store_true', default=False) - vivado_parser.add_argument('-v', '--validation', help='Run C/RTL validation', action='store_true', default=False) - vivado_parser.add_argument('-e', '--export', help='Export IP (implies -s)', action='store_true', default=False) - vivado_parser.add_argument( - '-l', '--vivado-synthesis', help='Run Vivado synthesis (implies -s)', action='store_true', default=False - ) - vivado_parser.add_argument( - '-a', - '--all', - help='Run C simulation, C/RTL synthesis, C/RTL co-simulation and Vivado synthesis', - action='store_true', - ) - vivado_parser.add_argument('--reset', help='Remove any previous builds', action='store_true', default=False) - - if args.list_options: - vivado_parser.print_help() - sys.exit(0) - - vivado_args = vivado_parser.parse_args(extra_args) - - reset = int(vivado_args.reset) - csim = int(vivado_args.simulation) - synth = int(vivado_args.synthesis) - cosim = int(vivado_args.co_simulation) - validation = int(vivado_args.validation) - export = int(vivado_args.export) - vsynth = int(vivado_args.vivado_synthesis) - if vivado_args.all: - csim = synth = cosim = validation = export = vsynth = 1 - - # Check if vivado_hls is available - if 'linux' in sys.platform or 'darwin' in sys.platform: - found = os.system('command -v vivado_hls > /dev/null') - if found != 0: - print('Vivado HLS installation not found. Make sure "vivado_hls" is on PATH.') - sys.exit(1) - - os.system( - ( - 'cd {dir} && vivado_hls -f build_prj.tcl "reset={reset} csim={csim} synth={synth} cosim={cosim} ' - 'validation={validation} export={export} vsynth={vsynth}"' - ).format( - dir=args.project, - reset=reset, - csim=csim, - synth=synth, - cosim=cosim, - validation=validation, - export=export, - vsynth=vsynth, - ) - ) - - -def _build_quartus(args, extra_args): - quartus_parser = argparse.ArgumentParser(prog=f'hls4ml build -p {args.project}', add_help=False) - quartus_parser.add_argument( - '-s', '--synthesis', help='Compile project and run C/RTL synthesis', action='store_true', default=False - ) - quartus_parser.add_argument( - '-q', '--quartus-synthesis', help='Run Quartus synthesis (implies -s)', action='store_true', default=False - ) - quartus_parser.add_argument( - '-a', '--all', help='Run C simulation, C/RTL synthesis, Quartus synthesis', action='store_true' - ) - - if args.list_options: - quartus_parser.print_help() - sys.exit(0) - - quartus_args = quartus_parser.parse_args(extra_args) - - synth = int(quartus_args.synthesis) - qsynth = int(quartus_args.quartus_synthesis) - if quartus_args.all: - synth = qsynth = 1 - - yamlConfig = hls4ml.converters.parse_yaml_config(args.project + '/' + config_filename) - project_name = yamlConfig['ProjectName'] - - curr_dir = os.getcwd() - - os.chdir(yamlConfig['OutputDir']) - if synth: - os.system(f'make {project_name}-fpga') - os.system(f'./{project_name}-fpga') - - if qsynth: - found = os.system('command -v quartus_sh > /dev/null') - if found != 0: - print('Quartus installation not found. Make sure "quartus_sh" is on PATH.') - sys.exit(1) - os.chdir(project_name + '-fpga.prj/quartus') - os.system('quartus_sh --flow compile quartus_compile') - - os.chdir(curr_dir) - - -def _report(args, extra_args): - if args.project is None: - print('Project directory (-p or --project) must be provided.') - sys.exit(1) - - try: - yamlConfig = hls4ml.converters.parse_yaml_config(args.project + '/' + config_filename) - except Exception: - print(f'Project configuration file not found in "{args.project}".') - sys.exit(1) - - backend_map = {} - backend_map['vivado'] = _report_vivado - backend_map['quartus'] = _report_quartus - - backend = yamlConfig.get('Backend') - - if backend.lower() in backend_map: - backend_map[backend.lower()](args, extra_args) - else: - print(f'Backend {backend} does not support reading reports.') - - -def _report_vivado(args, extra_args): - vivado_parser = argparse.ArgumentParser(prog=f'hls4ml report -p {args.project}', add_help=False) - vivado_parser.add_argument('-f', '--full', help='Show full report', action='store_true', default=False) - - if args.list_options: - vivado_parser.print_help() - else: - vivado_args = vivado_parser.parse_args(extra_args) - hls4ml.report.read_vivado_report(args.project, vivado_args.full) - - -def _report_quartus(args, extra_args): - quartus_parser = argparse.ArgumentParser(prog=f'hls4ml report -p {args.project}', add_help=False) - quartus_parser.add_argument( - '-b', '--open-browser', help='Open a web browser with the report', action='store_true', default=False - ) - - if args.list_options: - quartus_parser.print_help() - else: - quartus_args = quartus_parser.parse_args(extra_args) - hls4ml.report.read_quartus_report(args.project, quartus_args.open_browser) - - -if __name__ == "__main__": - main() diff --git a/hls4ml/setup.cfg b/hls4ml/setup.cfg deleted file mode 100644 index d442412..0000000 --- a/hls4ml/setup.cfg +++ /dev/null @@ -1,62 +0,0 @@ -[metadata] -name = hls4ml -description = Machine learning in FPGAs using HLS -long_description = file: README.md -long_description_content_type = text/markdown -url = https://fastmachinelearning.org/hls4ml -author = hls4ml Team -license = Apache-2.0 -license_files = LICENSE -classifiers = - Development Status :: 4 - Beta - Intended Audience :: Developers - Intended Audience :: Science/Research - License :: OSI Approved :: Apache Software License - Programming Language :: C++ - Programming Language :: Python :: 3 - Programming Language :: Python :: 3 :: Only - Topic :: Software Development :: Libraries - Topic :: Software Development :: Libraries :: Python Modules -description_file = README.md - -[options] -packages = find: -install_requires = - calmjs.parse - h5py - numpy - onnx>=1.4.0 - pydigitalwavetools==1.1 - pyyaml - qkeras - tabulate - tensorflow -python_requires = >=3.8 -include_package_data = True -scripts = scripts/hls4ml - -[options.entry_points] -pytest_randomly.random_seeder = - hls4ml = hls4ml:reseed - -[options.extras_require] -profiling = - matplotlib - pandas - seaborn -sr = - sympy -testing = - pytest - pytest-cov - pytest-randomly - qonnx - torch - -[check-manifest] -ignore = - .github/** - docs/** - .pre-commit-config.yaml - Jenkinsfile - hls4ml/_version.py diff --git a/hls4ml/setup.py b/hls4ml/setup.py deleted file mode 100644 index 1abbd06..0000000 --- a/hls4ml/setup.py +++ /dev/null @@ -1,4 +0,0 @@ -import setuptools - -if __name__ == "__main__": - setuptools.setup() diff --git a/hls4ml/test/build-prj.sh b/hls4ml/test/build-prj.sh deleted file mode 100755 index e85a2cf..0000000 --- a/hls4ml/test/build-prj.sh +++ /dev/null @@ -1,155 +0,0 @@ -#!/bin/bash - -basedir=vivado_prj -vivadodir=/opt/Xilinx -vivadover=2020.1 -parallel=1 - -csim="csim=0" -synth="synth=0" -cosim="cosim=0" -validation="validation=0" -vsynth="vsynth=0" -export="export=0" -reset="reset=0" - -function print_usage { - echo "Usage: `basename $0` [OPTION]" - echo "" - echo "Builds Vivado HLS projects found in the current directory." - echo "" - echo "Options are:" - echo " -d DIR" - echo " Base directory of projects to build. Defaults to 'vivado_prj'." - echo " -i DIR" - echo " Base directory of Vivado installation. Defaults to '/opt/Xilinx'." - echo " -v VERSION" - echo " Vivado HLS version to use. Defaults to '2020.1'." - echo " -p N" - echo " Run with N parallel tasks. Defaults to 1." - echo " -c" - echo " Run C simulation." - echo " -s" - echo " Run C/RTL synthesis." - echo " -r" - echo " Run C/RTL cosimulation." - echo " -t" - echo " Run C/RTL validation." - echo " -l" - echo " Run Vivado (logic) synthesis." - echo " -e" - echo " Export IP." - echo " -n" - echo " Create new project (reset any existing)." - echo " -h" - echo " Prints this help message." -} - -function run_vivado { - dir=$1 - opt=$2 - echo "Building project in ${dir} with options: ${opt}" - cd ${dir} - cmd="vivado_hls -f build_prj.tcl \"${opt}\" &> build_prj.log" - eval ${cmd} - if [ $? -eq 1 ]; then - touch BUILD_FAILED - fi - cd .. - return ${failed} -} - -function check_status { - dir=$1 - cd ${dir} - if [ -f BUILD_FAILED ]; then - echo "" - echo "Building project ${dir} (${opt}) failed. Log:" - cat build_prj.log - echo "" - failed=1 - fi - cd .. -} - -while getopts ":d:i:v:p:csrtlenh" opt; do - case "$opt" in - d) basedir=$OPTARG - ;; - i) vivadodir=$OPTARG - ;; - v) vivadover=$OPTARG - ;; - p) parallel=$OPTARG - ;; - c) csim="csim=1" - ;; - s) synth="synth=1" - ;; - r) cosim="cosim=1" - ;; - t) validation="validation=1" - ;; - l) vsynth="vsynth=1" - ;; - e) export="export=1" - ;; - n) reset="reset=1" - ;; - h) - print_usage - exit - ;; - :) - echo "Option -$OPTARG requires an argument." - exit 1 - ;; - esac -done - -if [ ! -d "${basedir}" ]; then - echo "Specified directory '${basedir}' does not exist." - exit 1 -fi - -#rundir=`pwd` - -cd "${basedir}" - -# Use .tar.gz archives to create separate project directories -for archive in *.tar.gz ; do - filename="${archive%%.*}" - dir="${filename}-${vivadover}" - tarpath=`tar -tf "${archive}" | grep -m1 "${filename}"` - slashes="${tarpath//[^\/]}" - mkdir -p "${dir}" && tar -xzf "${archive}" -C "${dir}" --strip-components ${#slashes} -done - -source ${vivadodir}/Vivado/${vivadover}/settings64.sh - -opt="${reset} ${csim} ${synth} ${cosim} ${validation} ${vsynth} ${export}" - -if [ "${parallel}" -gt 1 ]; then - # Run in parallel - ( - for dir in *-${vivadover}/ ; do - ((n=n%parallel)); ((n++==0)) && wait - run_vivado "${dir}" "${opt}" & - done - wait - ) -else - # Run sequentially - for dir in *-${vivadover}/ ; do - run_vivado "${dir}" "${opt}" - done -fi - -# Check for build errors -for dir in *-${vivadover}/ ; do - check_status "${dir}" "${opt}" -done - -#cd "${rundir}" - -exit ${failed} diff --git a/hls4ml/test/cleanup.sh b/hls4ml/test/cleanup.sh deleted file mode 100755 index a2851b5..0000000 --- a/hls4ml/test/cleanup.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash - -failed=0 -basedir=vivado_prj -all=0 - -function print_usage { - echo "Usage: `basename $0` [OPTION]" - echo "" - echo "Cleans up the projects in provided directory." - echo "" - echo "Options are:" - echo " -d DIR" - echo " Base directory where projects are located." - echo " -a" - echo " Remove all projects, even the failed ones." - echo " -h" - echo " Prints this help message." -} - -while getopts ":d:ah" opt; do - case "$opt" in - d) basedir=$OPTARG - ;; - a) all=1 - ;; - h) - print_usage - exit - ;; - esac -done - -if [ ! -d "${basedir}" ]; then - echo "Specified directory '${basedir}' does not exist." - exit 1 -fi - -if [ "${all}" -eq 1 ]; then - rm -rf "${basedir}" - exit $? -fi - -#rundir=`pwd` - -cd "${basedir}" - -rm -f *.tar.gz - -# Delete -for dir in */ ; do - if [ ! -f "${dir}BUILD_FAILED" ]; then - rm -rf "${dir}" - if [ $? -eq 0 ]; then - echo "Removed ${dir%/}." - else - failed=1 - fi - fi -done - -#cd "${rundir}" - -exit ${failed} diff --git a/hls4ml/test/compare-reports.sh b/hls4ml/test/compare-reports.sh deleted file mode 100755 index 4dc0ad0..0000000 --- a/hls4ml/test/compare-reports.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash - -failed=0 -latency=0 -utilization=0 - -function print_usage { - echo "Usage: `basename $0` [OPTION] ORIGINAL_REPORT NEW_REPORT" - echo "" - echo "Compares two synthesis reports." - echo "" - echo "Options are:" - echo " -l" - echo " Compare latency." - echo " -u" - echo " Compare utilization estimates." - echo " -h" - echo " Prints this help message." -} - -while getopts ":luh" opt; do - case "$opt" in - l) latency=1 - ;; - u) utilization=1 - ;; - h) - print_usage - exit - ;; - esac -done - -shift $((OPTIND-1)) - -report_files=("$@") -if [[ ! ${#report_files[@]} -eq 2 ]]; then - echo "Report files not specified." - exit 1 -fi - -if [[ "${latency}" -eq 0 ]] && [[ "${util}" -eq 0 ]]; then - echo "Argument -l or -u must be provided." - exit 1 -fi - -original="${report_files[0]}" -new="${report_files[1]}" - -rptname_orig=() -reports_orig=() -report="" -while IFS='' read -r line || [[ -n "${line}" ]]; do - if [[ "${line}" == "Synthesis report"* ]] && [[ "${report}" != "" ]]; then - rptname_orig+=("${line}") - reports_orig+=("${report}") - report="" - fi - report+="${line}"$'\n' -done < "${original}" - -rptname_new=() -reports_new=() -report="" -while IFS='' read -r line || [[ -n "${line}" ]]; do - if [[ "${line}" == "Synthesis report"* ]] && [[ "${report}" != "" ]]; then - rptname_new+=("${line}") - reports_new+=("${report}") - report="" - fi - report+="${line}"$'\n' -done < "${new}" - -for idx_orig in "${!rptname_orig[@]}"; do - rptname="${rptname_orig[$idx_orig]}" - idx_new="${idx_orig}" - for j in "${!rptname_new[@]}"; do - if [[ "${rptname_new[$j]}" = "${rptname}" ]]; then - idx_new="${j}" - fi - done - - report_orig="${reports_orig[$idx_orig]}" - report_new="${reports_new[$idx_new]}" - - if [ "${latency}" -eq 1 ]; then - latency_orig=$(grep -A7 "+ Latency" <<< "${report_orig}") - latency_new=$(grep -A7 "+ Latency" <<< "${report_new}") - if [[ "${latency_orig}" != "${latency_new}" ]]; then - failed=1 - echo "${rptname} has changed" - echo "" - left="Original:"$'\n' - left+="${latency_orig}" - right="New:"$'\n' - right+="${latency_new}" - column <(echo "${left}") <(echo "${right}") - echo "" - echo "" - echo "" - fi - fi - - if [ "${utilization}" -eq 1 ]; then - utilization_orig=$(grep -B3 -A13 "|DSP" <<< "${report_orig}") - utilization_new=$(grep -B3 -A13 "|DSP" <<< "${report_new}") - if [[ "${utilization_orig}" != "${utilization_new}" ]]; then - failed=1 - echo "${rptname} has changed" - echo "" - left="Original:"$'\n' - left+="${utilization_orig}" - right="New:"$'\n' - right+="${utilization_new}" - column <(echo "${left}") <(echo "${right}") - echo "" - echo "" - echo "" - fi - fi -done - -exit ${failed} diff --git a/hls4ml/test/convert-keras-models.sh b/hls4ml/test/convert-keras-models.sh deleted file mode 100755 index 51fdd68..0000000 --- a/hls4ml/test/convert-keras-models.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/bin/bash - -models_var=HLS4ML_KERAS_MODELS -models_file= -exec=echo -dir= - -function print_usage { - echo "Usage: `basename $0` [OPTION]" - echo "" - echo "Reads the model names from the ${models_var} environment variable" - echo "or provided file name and optionally starts the conversion." - echo "" - echo "Options are:" - echo " -f FINENAME" - echo " File name to read models from. If not specified, reads from ${models_var}" - echo " environment variable." - echo " -x" - echo " Execute the commands instead of just printing them." - echo " -d DIR" - echo " Output directory passed to keras-to-hls script." - echo " -h" - echo " Prints this help message." -} - -while getopts ":f:xd:h" opt; do - case "$opt" in - f) models_file=$OPTARG - ;; - x) exec=eval - ;; - d) dir="-d $OPTARG" - ;; - h) - print_usage - exit - ;; - :) - echo "Option -$OPTARG requires an argument." - exit 1 - ;; - esac -done - -if [ -z ${models_file} ]; then - if [ -z ${!models_var+x} ] ; then - echo "No file provided and ${models_var} variable not set. Nothing to do." - exit 1 - else - IFS=";" read -ra model_line <<< "${!models_var}" - fi -else - readarray model_line < "${models_file}" -fi - -for line in "${model_line[@]}" -do - params=("" "" "" "" "" "" "" "" "" "") - if [[ ${line} = *[![:space:]]* ]] && ! [[ "${line}" = \#* ]] ; then - IFS=" " read -ra model_def <<< "${line}" - for (( i=1; i<"${#model_def[@]}"; i++ )); - do - if [[ "${model_def[$i]}" == x:* ]] ; then params[0]="-x ${model_def[$i]:2} "; fi - if [[ "${model_def[$i]}" == b:* ]] ; then params[1]="-b ${model_def[$i]:2} "; fi - if [[ "${model_def[$i]}" == B:* ]] ; then params[2]="-B ${model_def[$i]:2} "; fi - if [[ "${model_def[$i]}" == c:* ]] ; then params[3]="-c ${model_def[$i]:2} "; fi - if [[ "${model_def[$i]}" == io:s ]] ; then params[4]="-s "; fi - if [[ "${model_def[$i]}" == r:* ]] ; then params[5]="-r ${model_def[$i]:2} "; fi - if [[ "${model_def[$i]}" == s:* ]] ; then params[6]="-g ${model_def[$i]:2} "; fi - if [[ "${model_def[$i]}" == i:* ]] ; then params[7]="-t ${model_def[$i]:2} "; fi - if [[ "${model_def[$i]}" == y:* ]] ; then params[8]="-y ${model_def[$i]:2} "; fi - done - params[9]=${model_def[0]} - cmd="./keras-to-hls.sh ${dir} ${params[0]}${params[1]}${params[2]}${params[3]}${params[4]}${params[5]}${params[6]}${params[7]}${params[8]}${params[9]}" - ${exec} "${cmd}" - fi -done - -#cd "${rundir}" - -exit ${failed} diff --git a/hls4ml/test/convert-onnx-models.sh b/hls4ml/test/convert-onnx-models.sh deleted file mode 100755 index b5d9c54..0000000 --- a/hls4ml/test/convert-onnx-models.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/bash - -models_var=HLS4ML_ONNX_MODELS -models_file= -exec=echo -dir= - -function print_usage { - echo "Usage: `basename $0` [OPTION]" - echo "" - echo "Reads the model names from the ${models_var} environment variable" - echo "or provided file name and optionally starts the conversion." - echo "" - echo "Options are:" - echo " -f FINENAME" - echo " File name to read models from. If not specified, reads from ${models_var}" - echo " environment variable." - echo " -x" - echo " Execute the commands instead of just printing them." - echo " -d DIR" - echo " Output directory passed to onnx-to-hls script." - echo " -h" - echo " Prints this help message." -} - -while getopts ":f:xd:h" opt; do - case "$opt" in - f) models_file=$OPTARG - ;; - x) exec=eval - ;; - d) dir="-d $OPTARG" - ;; - h) - print_usage - exit - ;; - :) - echo "Option -$OPTARG requires an argument." - exit 1 - ;; - esac -done - -if [ -z ${models_file} ]; then - if [ -z ${!models_var+x} ] ; then - echo "No file provided and ${models_var} variable not set. Nothing to do." - exit 1 - else - IFS=";" read -ra model_line <<< "${!models_var}" - fi -else - readarray model_line < "${models_file}" -fi - -for line in "${model_line[@]}" -do - params=("" "" "" "" "" "") - if [[ ${line} = *[![:space:]]* ]] && ! [[ "${line}" = \#* ]] ; then - IFS=" " read -ra model_def <<< "${line}" - for (( i=1; i<"${#model_def[@]}"; i++ )); - do - if [[ "${model_def[$i]}" == x:* ]] ; then params[0]="-x ${model_def[$i]:2} "; fi - if [[ "${model_def[$i]}" == c:* ]] ; then params[1]="-c ${model_def[$i]:2} "; fi - if [[ "${model_def[$i]}" == io:s ]] ; then params[2]="-s "; fi - if [[ "${model_def[$i]}" == r:* ]] ; then params[3]="-r ${model_def[$i]:2} "; fi - if [[ "${model_def[$i]}" == i:* ]] ; then params[4]="-t ${model_def[$i]:2} "; fi - done - params[5]=${model_def[0]} - - cmd="./onnx-to-hls.sh ${dir} ${params[0]}${params[1]}${params[2]}${params[3]}${params[4]}${params[5]}" - - ${exec} "${cmd}" - fi -done - -#cd "${rundir}" - -exit ${failed} diff --git a/hls4ml/test/convert-pytorch-models.sh b/hls4ml/test/convert-pytorch-models.sh deleted file mode 100755 index 2efcd8c..0000000 --- a/hls4ml/test/convert-pytorch-models.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/bash - -models_var=HLS4ML_PYTORCH_MODELS -models_file= -exec=echo -dir= - -function print_usage { - echo "Usage: `basename $0` [OPTION]" - echo "" - echo "Reads the model names from the ${models_var} environment variable" - echo "or provided file name and optionally starts the conversion." - echo "" - echo "Options are:" - echo " -f FINENAME" - echo " File name to read models from. If not specified, reads from ${models_var}" - echo " environment variable." - echo " -x" - echo " Execute the commands instead of just printing them." - echo " -d DIR" - echo " Output directory passed to keras-to-hls script." - echo " -h" - echo " Prints this help message." -} - -while getopts ":f:xd:h" opt; do - case "$opt" in - f) models_file=$OPTARG - ;; - x) exec=eval - ;; - d) dir="-d $OPTARG" - ;; - h) - print_usage - exit - ;; - :) - echo "Option -$OPTARG requires an argument." - exit 1 - ;; - esac -done - -if [ -z ${models_file} ]; then - if [ -z ${!models_var+x} ] ; then - echo "No file provided and ${models_var} variable not set. Nothing to do." - exit 1 - else - IFS=";" read -ra model_line <<< "${!models_var}" - fi -else - readarray model_line < "${models_file}" -fi - -for line in "${model_line[@]}" -do - params=("" "" "" "" "" "") - if [[ ${line} = *[![:space:]]* ]] && ! [[ "${line}" = \#* ]] ; then - IFS=" " read -ra model_def <<< "${line}" - for (( i=1; i<"${#model_def[@]}"; i++ )); - do - if [[ "${model_def[$i]}" == x:* ]] ; then params[0]="-x ${model_def[$i]:2} "; fi - if [[ "${model_def[$i]}" == c:* ]] ; then params[1]="-c ${model_def[$i]:2} "; fi - if [[ "${model_def[$i]}" == io:s ]] ; then params[2]="-s "; fi - if [[ "${model_def[$i]}" == r:* ]] ; then params[3]="-r ${model_def[$i]:2} "; fi - if [[ "${model_def[$i]}" == i:* ]] ; then params[4]="-t ${model_def[$i]:2} "; fi - done - params[5]=${model_def[0]} - - cmd="./pytorch-to-hls.sh ${dir} ${params[0]}${params[1]}${params[2]}${params[3]}${params[4]}${params[5]}" - - ${exec} "${cmd}" - fi -done - -#cd "${rundir}" - -exit ${failed} diff --git a/hls4ml/test/gather-reports.sh b/hls4ml/test/gather-reports.sh deleted file mode 100755 index 3d0ebd1..0000000 --- a/hls4ml/test/gather-reports.sh +++ /dev/null @@ -1,78 +0,0 @@ -#!/bin/bash - -failed=0 -basedir=vivado_prj -full=0 -brief=0 - -function print_usage { - echo "Usage: `basename $0` [OPTION]" - echo "" - echo "Prints synthesis reports found in projects in the provided directory." - echo "" - echo "Options are:" - echo " -d DIR" - echo " Base directory where projects are located." - echo " -b" - echo " Print only summary of performance and utilization estimates." - echo " -f" - echo " Print whole report." - echo " -h" - echo " Prints this help message." -} - -while getopts ":d:bfh" opt; do - case "$opt" in - d) basedir=$OPTARG - ;; - b) brief=1 - ;; - f) full=1 - ;; - h) - print_usage - exit - ;; - esac -done - -if [ "${brief}" -eq "${full}" ]; then - echo "Argument -b or -f must be provided." - exit 1 -fi - -if [ ! -d "${basedir}" ]; then - echo "Specified directory '${basedir}' does not exist." - exit 1 -fi - -#rundir=`pwd` - -cd "${basedir}" - -for dir in */ ; do - cd ${dir} - prjdir="myproject_prj" - prjname="myproject" - for subdir in *_prj/ ; do - prjdir=${subdir} - prjname="${prjdir%_prj/}" - done - prjdir="${prjdir}solution1/syn/report" - if [ -d "$prjdir" ]; then - echo "Synthesis report for ${dir%/}" - if [ "${brief}" -eq 1 ]; then - sed "/* DSP48/Q" "${prjdir}/${prjname}_csynth.rpt" - else - cat "${prjdir}/${prjname}_csynth.rpt" - fi - else - echo "No report files found in ${dir}." - failed=1 - fi - cd .. -done - -#cd "${rundir}" - -exit ${failed} diff --git a/hls4ml/test/hls4ml-keras-test.sh b/hls4ml/test/hls4ml-keras-test.sh deleted file mode 100755 index de11278..0000000 --- a/hls4ml/test/hls4ml-keras-test.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -VIVADO_INSTALL_DIR=/opt/Xilinx -VIVADO_VERSION=2020.1 - -# If running in docker image we would first need to activate the proper conda environment -#. activate hls4ml-py36 - -# Convert models in keras-models.txt -./convert-keras-models.sh -x -f keras-models.txt - -# Alternatively, keras-to-hls script can be called, with the model name(s) specified, i.e.: -#./keras-to-hls.sh KERAS_1layer KERAS_conv1d_small -./keras-to-hls.sh -b alveo-u250 -B VivadoAccelerator -x xcu250-figd2104-2L-e KERAS_3layer -./keras-to-hls.sh -b pynq-z2 -B VivadoAccelerator -x xc7z020clg400-1 KERAS_3layer -# KERAS_3layer b:pynq-z2 B:VivadoAccelerator x:xc7z020clg400-1 s:Resource - -# Build the projects generated by keras-to-hls script. -# Remove parameter -s to disable synthesis. -p controls the number of parallel tasks -./build-prj.sh -i ${VIVADO_INSTALL_DIR} -v ${VIVADO_VERSION} -c -s -p 2 - -# Go through the generated reports and print out basic information. -# Reports are available if synthesis is enabled. -./gather-reports.sh -b - -# Clean-up at the end -#./cleanup.sh diff --git a/hls4ml/test/hls4ml-onnx-test.sh b/hls4ml/test/hls4ml-onnx-test.sh deleted file mode 100755 index 7e163fa..0000000 --- a/hls4ml/test/hls4ml-onnx-test.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -VIVADO_INSTALL_DIR=/opt/Xilinx -VIVADO_VERSION=2017.2 - -# If running in docker image we would first need to activate the proper conda environment -#. activate hls4ml-py36 - -# Convert models in onnx-models.txt -./convert-onnx-models.sh -x -p 3 -f onnx-models.txt - -# Same for Python 2 -#. activate hls4ml-py27 -./convert-onnx-models.sh -x -p 2 -f onnx-models.txt - -# Alternatively, onnx-to-hls script can be called, with the model name(s) specified, i.e.: -#./onnx-to-hls.sh -p 3 three_layer_keras conv1d_small_keras -#./onnx-to-hls.sh -p 2 three_layer_keras conv1d_small_keras - -# Check if there is any difference between files generated by Python 2 and Python 3 -# Not needed if there were no changes in onnx-to-hls.py or hls-writer.py -./py-diff.sh -r 2 - -# Build the projects generated by onnx-to-hls script. -# Remove parameter -s to disable synthesis. -p controls the number of parallel tasks -./build-prj.sh -i ${VIVADO_INSTALL_DIR} -v ${VIVADO_VERSION} -c -s -p 2 - -# Go through the generated reports and print out basic information. -# Reports are available if synthesis is enabled. -./gather-reports.sh -b - -# Clean-up at the end -#./cleanup.sh diff --git a/hls4ml/test/hls4ml-pytorch-test.sh b/hls4ml/test/hls4ml-pytorch-test.sh deleted file mode 100755 index 0fb39df..0000000 --- a/hls4ml/test/hls4ml-pytorch-test.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -VIVADO_INSTALL_DIR=/opt/Xilinx -VIVADO_VERSION=2017.2 - -# If running in docker image we would first need to activate the proper conda environment -#. activate hls4ml-py36 - -# Convert models in pytorch-models.txt -./convert-pytorch-models.sh -x -p 3 -f pytorch-models.txt - -# Same for Python 2 -#. activate hls4ml-py27 -./convert-pytorch-models.sh -x -p 2 -f pytorch-models.txt - -# Alternatively, pytorch-to-hls script can be called, with the model name(s) specified, i.e.: -#./pytorch-to-hls.sh -p 3 two_layer_model three_layer_model -#./pytorch-to-hls.sh -p 2 two_layer_model three_layer_model - -# Check if there is any difference between files generated by Python 2 and Python 3 -# Not needed if there were no changes in pytorch-to-hls.py or hls-writer.py -./py-diff.sh -r 2 - -# Build the projects generated by pytorch-to-hls script. -# Remove parameter -s to disable synthesis. -p controls the number of parallel tasks -./build-prj.sh -i ${VIVADO_INSTALL_DIR} -v ${VIVADO_VERSION} -c -s -p 2 - -# Go through the generated reports and print out basic information. -# Reports are available if synthesis is enabled. -./gather-reports.sh -b - -# Clean-up at the end -#./cleanup.sh diff --git a/hls4ml/test/keras-models.txt b/hls4ml/test/keras-models.txt deleted file mode 100644 index 9734477..0000000 --- a/hls4ml/test/keras-models.txt +++ /dev/null @@ -1,53 +0,0 @@ -# Keras models from examples directory that will be used for testing -# -# Synthax: -# MODEL_NAME[:WEIGHTS_FILE] [x:PART] [b:BOARD] [B:BACKEND] [c:CLOCK_PERIOD] [io:s] [r:REUSE_FACTOR] [t:AP_TYPE] [s:STRATEGY] [y:CONFIG_FILE] -# where -# MODEL_NAME - Name of the file containing json model (without ".json") -# WEIGHTS_FILE - Name of the HDF5 file containing model weights (without ".h5") -# x:PART - FPGA part number to use -# b:BOARD - name of one board defined in supported_board.json file -# B:BACKEND - name of the backend to be used (Vivado, VivadoAccelerator) -# c:CLOCK_PERIOD - Clock period -# io:s - User streaming I/O, otherwise use parallel I/O -# r:REUSE_FACTOR - Reuse factor -# s:STRATEGY - Latency-optimized or Resource-optimized strategy -# t:AP_TYPE - Default precision -# y:CONFIG_FILE - YAML config file to copy HLSConfig from -# -# Lines starting with "#" are ignored. -# - -KERAS_1layer -KERAS_3layer -#KERAS_3layer:KERAS_3layer_70pruned_retrained_weights -#KERAS_conv1d -#KERAS_conv1d_small -#KERAS_conv2d_model -#KERAS_dense_16x100x100x100x100x100x5 -KERAS_3layer_batch_norm -KERAS_3layer_binary_smaller -KERAS_3layer_ternary_small - -# Pynq backend -KERAS_3layer b:pynq-z2 B:VivadoAccelerator x:xc7z020clg400-1 s:Resource -garnet_1layer x:xcku115-flvb2104-2-i y:garnet_1layer_config - - -# Resource strategy -KERAS_3layer r:2 s:Resource -qkeras_mnist_dense r:112 s:Resource - -#Fails synthesis due to a problem with loop unrolling -#jetTagger_Conv2D_Small:jetTagger_Conv2D_Small - -# Streaming IO -#KERAS_1layer io:s -KERAS_3layer io:s -KERAS_conv1d_small io:s -KERAS_conv2d_model io:s -jetTagger_Conv2D_Small io:s -jetTagger_Conv2D_Small_NoBatchNorm io:s - - -#KERAS_1layer x:xcku115-flvf1924-2-i diff --git a/hls4ml/test/keras-to-hls.sh b/hls4ml/test/keras-to-hls.sh deleted file mode 100755 index 3b0ef7e..0000000 --- a/hls4ml/test/keras-to-hls.sh +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/bash - -pycmd=python -part="xcvu9p-flgb2104-2-e" -board="None" -backend="Vivado" -clock=5 -io=io_parallel -rf=1 -strategy="Latency" -type="ap_fixed<16,6>" -yml="" -basedir=vivado_prj -precision="float" -sanitizer="[^A-Za-z0-9._]" - -function print_usage { - echo "Usage: `basename $0` [OPTION] MODEL[:H5FILE]..." - echo "" - echo "MODEL is the name of the model json file without extension. Optionally" - echo "a H5 file with weights can be provided using the MODEL:H5FILE synthax." - echo "By default, it is assumed that weights are stored in MODEL_weights.h5." - echo "Multiple models can be specified." - echo "" - echo "Options are:" - echo " -x PART" - echo " FPGA device part number. Defaults to 'xcvu9p-flgb2104-2-e'." - echo " -b BOARD" - echo " Board used. Defaults to 'pynq-z2'." - echo " -B BACKEND" - echo " Backend to use for the generation of the code. Defaults to 'Vivado'." - echo " -c CLOCK" - echo " Clock period to use. Defaults to 5." - echo " -s" - echo " Use streaming I/O. If not specified uses parallel I/O." - echo " -r FACTOR" - echo " Reuse factor. Defaults to 1." - echo " -g STRATEGY" - echo " Strategy. 'Latency' or 'Resource'." - echo " -t TYPE" - echo " Default precision. Defaults to 'ap_fixed<16,6>'." - echo " -d DIR" - echo " Output directory." - echo " -y FILE" - echo " YAML config file to take HLS config from. If specified, -r, -g and -t are ignored." - echo " -h" - echo " Prints this help message." -} - -while getopts ":x:b:B:c:sr:g:t:d:y:p:h" opt; do - case "$opt" in - x) part=$OPTARG - ;; - b) board=$OPTARG - ;; - B) backend=$OPTARG - ;; - c) clock=$OPTARG - ;; - s) io=io_stream - ;; - r) rf=$OPTARG - ;; - g) strategy=$OPTARG - ;; - t) type=$OPTARG - ;; - d) basedir=$OPTARG - ;; - y) yml=$OPTARG - ;; - p) precision=$OPTARG - ;; - h) - print_usage - exit - ;; - :) - echo "Option -$OPTARG requires an argument." - exit 1 - ;; - esac -done - -shift $((OPTIND-1)) - -models=("$@") -if [[ ${#models[@]} -eq 0 ]]; then - echo "No models specified." - exit 1 -fi - -mkdir -p "${basedir}" - -for model in "${models[@]}" -do - name=${model} - h5=${name}"_weights" - IFS=":" read -ra model_h5_pair <<< "${model}" # If models are provided in "json:h5" format - if [[ ${#model_h5_pair[@]} -eq 2 ]]; then - name="${model_h5_pair[0]}" - h5="${model_h5_pair[1]}" - fi - - echo "Creating config file for model '${model}'" - base=`echo "${h5}" | sed -e 's/\(_weights\)*$//g'` - file="${basedir}/${base}.yml" - prjdir="${basedir}/${base}-backend${backend}-board${board//${sanitizer}/_}-${part//${sanitizer}/_}-c${clock}-${io}-rf${rf}-${type//${sanitizer}/_}-${strategy}" - - hlscfg="" - if [ ! -z "${yml}" ]; then - hlscfg=`sed -ne '/HLSConfig/,$p' ../example-models/config-files/${yml}` - fi - echo "KerasJson: ../example-models/keras/${name}.json" > ${file} - echo "KerasH5: ../example-models/keras/${h5}.h5" >> ${file} - echo "OutputDir: ${prjdir}" >> ${file} - echo "ProjectName: myproject" >> ${file} - echo "Part: ${part}" >> ${file} - echo "Board: ${board}" >> ${file} - echo "Backend: ${backend}" >> ${file} - echo "ClockPeriod: ${clock}" >> ${file} - echo "" >> ${file} - echo "IOType: ${io}" >> ${file} - if [ -z "${hlscfg}" ] - then - echo "HLSConfig:" >> ${file} - echo " Model:" >> ${file} - echo " ReuseFactor: ${rf}" >> ${file} - echo " Precision: ${type} " >> ${file} - echo " Strategy: ${strategy} " >> ${file} - else - echo "${hlscfg}" >> ${file} - fi - # Adding VivadoAccelerator config to file - if [ "${backend}" = "VivadoAccelerator" ]; - then - echo "AcceleratorConfig:" >> ${file} - echo " Board: ${board}" >> ${file} - echo " Precision:" >> ${file} - echo " Input: ${precision}" >> ${file} - echo " Output: ${precision}" >> ${file} - fi - - ${pycmd} ../scripts/hls4ml convert -c ${file} || exit 1 - rm ${file} - rm -rf "${prjdir}" - echo "" -done diff --git a/hls4ml/test/onnx-models.txt b/hls4ml/test/onnx-models.txt deleted file mode 100644 index 6175e8b..0000000 --- a/hls4ml/test/onnx-models.txt +++ /dev/null @@ -1,27 +0,0 @@ -# ONNX models from examples directory that will be used for testing -# -# Synthax: -# MODEL_NAME [x:PART] [c:CLOCK_PERIOD] [io:s] [r:REUSE_FACTOR] [t:AP_TYPE] [s:STRATEGY] -# where -# MODEL_NAME - Name of the file containing the model (without ".onnx") -# x:PART - FPGA part number to use -# c:CLOCK_PERIOD - Clock period -# io:s - User streaming I/O, otherwise use parallel I/O -# r:REUSE_FACTOR - Reuse factor -# s:STRATEGY - Latency-optimized or Resource-optimized strategy -# t:AP_TYPE - Default precision -# -# Lines starting with "#" are ignored. -# - -conv1d_small_keras -conv2d_small_keras -conv2d_small_keras -conv2d_small_mp_keras -dense_big_keras -three_layer_bn_keras -three_layer_bn_pytorch -three_layer_keras -three_layer_pytorch -two_layer_keras -two_layer_pytorch diff --git a/hls4ml/test/onnx-to-hls.sh b/hls4ml/test/onnx-to-hls.sh deleted file mode 100755 index a0cfedf..0000000 --- a/hls4ml/test/onnx-to-hls.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/bin/bash - -pycmd=python -part="xc7vx690tffg1927-2" -clock=5 -io=io_parallel -rf=1 -strategy="Latency" -type="ap_fixed<16,6>" -basedir=vivado_prj - -sanitizer="[^A-Za-z0-9._]" - -function print_usage { - echo "Usage: `basename $0` [OPTION] MODEL..." - echo "" - echo "MODEL is the name of the model onnx file without extension. Multiple" - echo "models can be specified." - echo "" - echo "Options are:" - echo " -x DEVICE" - echo " Xilinx device part number. Defaults to 'xc7vx690tffg1927-2'." - echo " -c CLOCK" - echo " Clock period to use. Defaults to 5." - echo " -s" - echo " Use streaming I/O. If not specified uses parallel I/O." - echo " -r FACTOR" - echo " Reuse factor. Defaults to 1." - echo " -g STRATEGY" - echo " Strategy. 'Latency' or 'Resource'." - echo " -t TYPE" - echo " Default precision. Defaults to 'ap_fixed<16,6>'." - echo " -d DIR" - echo " Output directory." - echo " -h" - echo " Prints this help message." -} - -while getopts ":x:c:sr:g:t:d:h" opt; do - case "$opt" in - x) part=$OPTARG - ;; - c) clock=$OPTARG - ;; - s) io=io_stream - ;; - r) rf=$OPTARG - ;; - g) strategy=$OPTARG - ;; - t) type=$OPTARG - ;; - d) basedir=$OPTARG - ;; - h) - print_usage - exit - ;; - :) - echo "Option -$OPTARG requires an argument." - exit 1 - ;; - esac -done - -shift $((OPTIND-1)) - -models=("$@") -if [[ ${#models[@]} -eq 0 ]]; then - echo "No models specified." - exit 1 -fi - -mkdir -p "${basedir}" - -for model in "${models[@]}" -do - echo "Creating config file for model '${model}'" - base=${model%.*} - file="${basedir}/${base}.yml" - - echo "OnnxModel: ../example-models/onnx/${model}.onnx" > ${file} - echo "OutputDir: ${basedir}/${base}-${part//${sanitizer}/_}-c${clock}-${io}-rf${rf}-${type//${sanitizer}/_}-${strategy}" >> ${file} - echo "ProjectName: myproject" >> ${file} - echo "Part: ${part}" >> ${file} - echo "ClockPeriod: ${clock}" >> ${file} - echo "" >> ${file} - echo "IOType: ${io}" >> ${file} - echo "HLSConfig:" >> ${file} - echo " Model:" >> ${file} - echo " ReuseFactor: ${rf}" >> ${file} - echo " Precision: ${type} " >> ${file} - echo " Strategy: ${strategy} " >> ${file} - - ${pycmd} ../scripts/hls4ml convert -c ${file} || exit 1 - rm ${file} - echo "" -done diff --git a/hls4ml/test/pytest/ci-template.yml b/hls4ml/test/pytest/ci-template.yml deleted file mode 100644 index bbe8df4..0000000 --- a/hls4ml/test/pytest/ci-template.yml +++ /dev/null @@ -1,23 +0,0 @@ -.pytest: - stage: test - image: gitlab-registry.cern.ch/fastmachinelearning/hls4ml-testing:0.4.base - tags: - - k8s-default - before_script: - - source ~/.bashrc - - if [ $EXAMPLEMODEL == 1 ]; then git submodule init; git submodule update; fi - - conda activate hls4ml-testing - - pip install .[testing,sr] - script: - - cd test/pytest - - pytest $PYTESTFILE -rA --cov-report xml --cov-report term --cov=hls4ml --junitxml=report.xml --randomly-seed=42 --randomly-dont-reorganize --randomly-dont-reset-seed - artifacts: - when: always - reports: - junit: - - test/pytest/report.xml - coverage_report: - coverage_format: cobertura - path: test/pytest/coverage.xml - paths: - - test/pytest/hls4mlprj*.tar.gz diff --git a/hls4ml/test/pytest/generate_ci_yaml.py b/hls4ml/test/pytest/generate_ci_yaml.py deleted file mode 100644 index 6d816a7..0000000 --- a/hls4ml/test/pytest/generate_ci_yaml.py +++ /dev/null @@ -1,36 +0,0 @@ -import glob - -import yaml - -''' -Create a Gitlab CI yml file with a separate entry for each test_* file -in the pytests directory to parallelise the CI jobs. -''' - -template = """ -pytest.{}: - extends: .pytest - variables: - PYTESTFILE: {} - EXAMPLEMODEL: {} -""" - - -def uses_example_model(test_filename): - with open(test_filename) as f: - content = f.read() - return 'example-models' in content - - -yml = None -tests = glob.glob('test_*.py') -for test in tests: - name = test.replace('test_', '').replace('.py', '') - new_yml = yaml.safe_load(template.format(name, f'test_{name}.py', int(uses_example_model(test)))) - if yml is None: - yml = new_yml - else: - yml.update(new_yml) - -yamlfile = open('pytests.yml', 'w') -yaml.safe_dump(yml, yamlfile) diff --git a/hls4ml/test/pytest/test_activations.py b/hls4ml/test/pytest/test_activations.py deleted file mode 100644 index caaaed6..0000000 --- a/hls4ml/test/pytest/test_activations.py +++ /dev/null @@ -1,55 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import ELU, Activation, Input, LeakyReLU, ReLU, ThresholdedReLU -from tensorflow.keras.models import Model - -import hls4ml - -test_root_path = Path(__file__).parent - -# Variable 'name' is simply used as an identifier for the activation - - -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('shape, io_type', [((8,), 'io_parallel'), ((8,), 'io_stream'), ((8, 8, 3), 'io_stream')]) -@pytest.mark.parametrize( - 'activation, name', - [ - (ReLU(), 'relu'), - (LeakyReLU(alpha=1.5), 'leaky_relu'), - (ThresholdedReLU(theta=0.75), 'threshold_relu'), - (ELU(alpha=1.25), 'elu'), - (Activation('selu'), 'selu'), - # Tensorflow exception of multi-dimensional PReLU (8, 8, 3) - # (PReLU(alpha_initializer='zeros'), 'prelu'), - (Activation('softplus'), 'softplus'), - (Activation('softsign'), 'softsign'), - (Activation(activation='tanh'), 'tanh'), - (Activation('sigmoid'), 'sigmoid'), - # Theano and Tensorflow might have different definitions for hard sigmoid - # Result is likely to be different when |x| > 1 (see TF/Theano docs) - (Activation('hard_sigmoid'), 'hard_sigmoid'), - ], -) -def test_activations(backend, activation, name, shape, io_type): - # Subtract 0.5 to include negative values - X = np.random.rand(1000, *shape) - 0.5 - - input = Input(shape=shape) - activation = activation(input) - keras_model = Model(inputs=input, outputs=activation) - - hls_config = hls4ml.utils.config_from_keras_model(keras_model) - output_dir = str(test_root_path / 'hls4mlprj_activations_{}_{}_{}_{}').format(backend, io_type, str(shape), name) - - hls_model = hls4ml.converters.convert_from_keras_model( - keras_model, hls_config=hls_config, io_type=io_type, output_dir=output_dir, backend=backend - ) - hls_model.compile() - - keras_prediction = keras_model.predict(X) - hls_prediction = hls_model.predict(X).reshape(keras_prediction.shape) - - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=2e-2, atol=2e-2) diff --git a/hls4ml/test/pytest/test_batchnorm.py b/hls4ml/test/pytest/test_batchnorm.py deleted file mode 100644 index c0ef070..0000000 --- a/hls4ml/test/pytest/test_batchnorm.py +++ /dev/null @@ -1,49 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import BatchNormalization -from tensorflow.keras.models import Sequential - -import hls4ml - -test_root_path = Path(__file__).parent - -in_shape = 16 -atol = 5e-3 - - -@pytest.fixture(scope='module') -def data(): - np.random.seed(0) - X = np.random.rand(100, in_shape) - return X - - -@pytest.fixture(scope='module') -def model(request): - model = Sequential() - model.add(BatchNormalization(input_shape=(in_shape,), center=request.param, scale=request.param)) - model.compile() - return model - - -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('model', [True, False], indirect=True) -def test_batchnorm(model, data, backend, io_type): - default_precision = 'ac_fixed<32, 1, true>' if backend == 'Quartus' else 'ac_fixed<32, 1>' - - center = model.layers[0].center - scale = model.layers[0].scale - config = hls4ml.utils.config_from_keras_model(model, default_precision=default_precision, granularity='name') - output_dir = str(test_root_path / f'hls4mlprj_batchnorm_{backend}_{io_type}_center{center}_scale{scale}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, backend=backend, hls_config=config, io_type=io_type, output_dir=output_dir - ) - hls_model.compile() - - # Predict - y_keras = np.squeeze(model.predict(data)) - y_hls = hls_model.predict(data) - np.testing.assert_allclose(y_keras, y_hls, rtol=0, atol=atol, verbose=True) diff --git a/hls4ml/test/pytest/test_batchnorm_pytorch.py b/hls4ml/test/pytest/test_batchnorm_pytorch.py deleted file mode 100644 index a7a0c80..0000000 --- a/hls4ml/test/pytest/test_batchnorm_pytorch.py +++ /dev/null @@ -1,43 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -import torch -from torch import nn - -import hls4ml - -test_root_path = Path(__file__).parent - -in_shape = 16 -atol = 5e-3 - - -@pytest.fixture(scope='module') -def data(): - np.random.seed(0) - X = np.random.rand(100, in_shape) - return X - - -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -def test_batchnorm(data, backend, io_type): - model = nn.Sequential( - nn.BatchNorm1d(in_shape), - ).to() - model.eval() - - default_precision = 'ac_fixed<32, 1, true>' if backend == 'Quartus' else 'ac_fixed<32, 1>' - - config = hls4ml.utils.config_from_pytorch_model(model, default_precision=default_precision, granularity='name') - output_dir = str(test_root_path / f'hls4mlprj_batchnorm_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_pytorch_model( - model, (None, in_shape), backend=backend, hls_config=config, io_type=io_type, output_dir=output_dir - ) - hls_model.compile() - - # Predict - pytorch_prediction = model(torch.Tensor(data)).detach().numpy() - hls_prediction = hls_model.predict(data) - np.testing.assert_allclose(pytorch_prediction, hls_prediction, rtol=0, atol=atol, verbose=True) diff --git a/hls4ml/test/pytest/test_binary_cnn.py b/hls4ml/test/pytest/test_binary_cnn.py deleted file mode 100644 index 7114e47..0000000 --- a/hls4ml/test/pytest/test_binary_cnn.py +++ /dev/null @@ -1,101 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from qkeras import QActivation, QBatchNormalization, QConv2D, QDense -from tensorflow.keras.layers import Flatten, Input, MaxPooling2D -from tensorflow.keras.models import Model -from tensorflow.keras.regularizers import l2 - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.mark.parametrize( - 'backend,io_type,strategy', - [ - ('Quartus', 'io_parallel', 'resource'), - ('Quartus', 'io_stream', 'resource'), - ('Vivado', 'io_parallel', 'resource'), - ('Vivado', 'io_parallel', 'latency'), - ('Vivado', 'io_stream', 'latency'), - ('Vivado', 'io_stream', 'resource'), - ('Vitis', 'io_parallel', 'resource'), - ('Vitis', 'io_parallel', 'latency'), - ('Vitis', 'io_stream', 'latency'), - ('Vitis', 'io_stream', 'resource'), - ], -) -def test_binary_cnn(backend, io_type, strategy): - x_in = Input(shape=(28, 28, 1)) - - x = QConv2D( - 4, - (3, 3), - kernel_quantizer="binary", - name="conv2d_1", - kernel_regularizer=l2(0.0001), - use_bias=True, - bias_quantizer='quantized_bits(5,2)', - )(x_in) - x = QBatchNormalization()(x) - x = QActivation("binary", name="act1")(x) - - x = QConv2D(8, (3, 3), kernel_quantizer="binary", name="conv2d_2", kernel_regularizer=l2(0.0001), use_bias=False)(x) - x = QBatchNormalization()(x) - x = QActivation("binary", name="act2")(x) - x = MaxPooling2D(pool_size=(2, 2))(x) - - x = QConv2D(8, (3, 3), kernel_quantizer="binary", name="conv2d_3", kernel_regularizer=l2(0.0001), use_bias=False)(x) - x = QBatchNormalization()(x) - x = QActivation("binary", name="act3")(x) - x = MaxPooling2D(pool_size=(2, 2))(x) - - x = Flatten()(x) - - x = QDense(10, kernel_quantizer="binary", name="q_dense_6", use_bias=False)(x) - x = QBatchNormalization()(x) - x = QActivation("binary_tanh", name="act4")(x) - - x = QDense(10, kernel_quantizer="binary", activation="linear", name="q_dense_7", use_bias=False)(x) - - model2 = Model(inputs=x_in, outputs=x) - - model2.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"]) - - model2.summary() - - hls_config = hls4ml.utils.config_from_keras_model(model2, granularity="name", default_precision='fixed<32,12>') - hls_config["Model"]["Strategy"] = strategy - - # hls_config["LayerName"]["q_dense_7_softmax"]["Implementation"] = "legacy" - - hls_config["LayerName"]["conv2d_1"]["ReuseFactor"] = 9 - hls_config["LayerName"]["conv2d_2"]["ReuseFactor"] = 36 - hls_config["LayerName"]["conv2d_3"]["ReuseFactor"] = 72 - hls_config["LayerName"]["q_dense_6"]["ReuseFactor"] = 2000 - hls_config["LayerName"]["q_dense_7"]["ReuseFactor"] = 100 - - if backend == 'Quartus' and io_type == 'io_parallel': - # Winegrad imp[lementation does not support binary - hls_config["LayerName"]["conv2d_1"]["Implementation"] = "im2col" - hls_config["LayerName"]["conv2d_2"]["Implementation"] = "im2col" - hls_config["LayerName"]["conv2d_3"]["Implementation"] = "im2col" - - output_dir = str(test_root_path / f"hls4mlprj_binary_cnn_{backend}_{io_type}_{strategy}") - hls_model = hls4ml.converters.convert_from_keras_model( - model2, - hls_config=hls_config, - output_dir=output_dir, - backend=backend, - io_type=io_type, - ) - - X = np.random.rand(100, 28, 28, 1) - - hls_model.compile() - y = model2.predict(X) # noqa: F841 - y_hls = hls_model.predict(X) # noqa: F841 - - np.testing.assert_allclose(y_hls, y, rtol=1e-2, atol=0.01) diff --git a/hls4ml/test/pytest/test_bram_factor.py b/hls4ml/test/pytest/test_bram_factor.py deleted file mode 100644 index 8aa608e..0000000 --- a/hls4ml/test/pytest/test_bram_factor.py +++ /dev/null @@ -1,56 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -import tensorflow as tf -from tensorflow.keras.layers import Activation, Dense - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_bram_factor(backend, io_type): - '''A copy of the test_dense from test_keras_api.py with BramFactor set to 0''' - model = tf.keras.models.Sequential() - model.add( - Dense( - 2, - input_shape=(1,), - name='Dense', - use_bias=True, - kernel_initializer=tf.keras.initializers.RandomUniform(minval=1, maxval=10), - bias_initializer='zeros', - kernel_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - kernel_constraint=None, - bias_constraint=None, - ) - ) - model.add(Activation(activation='elu', name='Activation')) - model.compile(optimizer='adam', loss='mse') - - X_input = np.random.rand(100, 1) - - keras_prediction = model.predict(X_input) - - config = hls4ml.utils.config_from_keras_model(model) - config["Model"]["BramFactor"] = 0 - output_dir = str(test_root_path / f'hls4mlprj_bram_factor_{backend}_{io_type}') - - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, io_type=io_type, backend=backend - ) - - hls_model.compile() - - hls_prediction = hls_model.predict(X_input) - - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=1e-2, atol=0.01) - - # Check that there weights are actually remote - model_brams = [var for var in hls_model.get_weight_variables() if var.storage.lower() == 'bram'] - assert len(model_brams) == 2 diff --git a/hls4ml/test/pytest/test_causalpadding.py b/hls4ml/test/pytest/test_causalpadding.py deleted file mode 100644 index c076c99..0000000 --- a/hls4ml/test/pytest/test_causalpadding.py +++ /dev/null @@ -1,36 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import Conv1D -from tensorflow.keras.models import Sequential - -import hls4ml - -test_root_path = Path(__file__).parent - -atol = 5e-3 - - -@pytest.mark.parametrize('io_type', ['io_stream', 'io_parallel']) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -def test_causalpadding(io_type, backend): - model = Sequential() - model.add(Conv1D(1, 5, padding="causal", input_shape=(100, 1))) - model.compile() - - data = np.random.randint(0, 10, 100).astype(float) - data = np.expand_dims(data, axis=0) - data = np.expand_dims(data, axis=-1) - - config = hls4ml.utils.config_from_keras_model(model, default_precision='ap_fixed<32,16>', granularity='name') - odir = str(test_root_path / f'hls4mlprj_validpadding_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, io_type=io_type, output_dir=odir, backend=backend - ) - hls_model.compile() - - # Predict - y_keras = model.predict(data).flatten() - y_hls = hls_model.predict(data).flatten() - np.testing.assert_allclose(y_keras, y_hls, rtol=0, atol=atol, verbose=True) diff --git a/hls4ml/test/pytest/test_clone_flatten.py b/hls4ml/test/pytest/test_clone_flatten.py deleted file mode 100644 index 12f3098..0000000 --- a/hls4ml/test/pytest/test_clone_flatten.py +++ /dev/null @@ -1,60 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import Concatenate, Flatten, Input -from tensorflow.keras.models import Model - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.fixture(scope='module') -def data(): - X = np.random.randint(-5, 5, (1, 2, 3), dtype='int32') - return X - - -@pytest.fixture(scope='module') -def keras_model(): - inp1 = Input(shape=(2, 3), name='input_1') - x = Flatten()(inp1) - y = Flatten()(inp1) - out = Concatenate(axis=1)([x, y]) - model = Model(inputs=inp1, outputs=out) - return model - - -@pytest.fixture -@pytest.mark.parametrize('io_type', ['io_stream']) -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -def hls_model(keras_model, backend, io_type): - hls_config = hls4ml.utils.config_from_keras_model( - keras_model, - default_precision='ap_int<6>', - granularity='name', - ) - output_dir = str(test_root_path / f'hls4mlprj_clone_flatten_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - keras_model, - hls_config=hls_config, - io_type=io_type, - backend=backend, - output_dir=output_dir, - ) - - hls_model.compile() - return hls_model - - -@pytest.mark.parametrize('io_type', ['io_stream']) -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -def test_accuracy(data, keras_model, hls_model): - X = data - model = keras_model - # model under test predictions and accuracy - y_keras = model.predict(X) - y_hls4ml = hls_model.predict(X.astype('float32')).reshape(y_keras.shape) - # "accuracy" of hls4ml predictions vs keras - np.testing.assert_array_equal(y_keras, y_hls4ml, verbose=True) diff --git a/hls4ml/test/pytest/test_cnn_mnist.py b/hls4ml/test/pytest/test_cnn_mnist.py deleted file mode 100644 index ab3365f..0000000 --- a/hls4ml/test/pytest/test_cnn_mnist.py +++ /dev/null @@ -1,92 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from sklearn.metrics import accuracy_score -from tensorflow.keras.datasets import mnist -from tensorflow.keras.layers import Activation, AveragePooling2D, Conv2D, Dense, Flatten, MaxPooling2D -from tensorflow.keras.models import Sequential -from tensorflow.keras.utils import to_categorical - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.fixture(scope='module') -def mnist_data(): - (x_train, y_train), (x_test, y_test) = mnist.load_data() - x_train = x_train.astype("float32") / 255.0 - x_test = x_test.astype("float32") / 255.0 - x_train = np.expand_dims(x_train, -1) - x_test = np.expand_dims(x_test, -1) - y_train = to_categorical(y_train, 10) - y_test = to_categorical(y_test, 10) - x_test, y_test = x_test[:1000], y_test[:1000] - return x_train, y_train, x_test, y_test - - -@pytest.fixture(scope='module') -def keras_model(mnist_data): - # Aim of this model is to test different CNN paramaters, including: - # The common filter sizes, 3x3 and 5x5 - # A non-power of 2 number of filters - # Both Average and Max Pooling - # Both Same and Valid Padding - x_train, y_train, x_test, y_test = mnist_data - keras_model = Sequential() - keras_model.add(Conv2D(4, (3, 3), input_shape=(28, 28, 1), padding='same')) - keras_model.add(Activation('relu')) - keras_model.add(MaxPooling2D()) - keras_model.add(Conv2D(6, (5, 5), padding='valid')) - keras_model.add(Activation('relu')) - keras_model.add(AveragePooling2D()) - keras_model.add(Flatten()) - keras_model.add(Dense(10, kernel_initializer='lecun_uniform')) - keras_model.add(Activation('softmax', name='softmax')) - keras_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) - keras_model.fit(x_train, y_train, batch_size=32, epochs=5, verbose=0) - return keras_model - - -@pytest.mark.parametrize( - 'backend,io_type,strategy', - [ - ('Quartus', 'io_parallel', 'resource'), - ('Quartus', 'io_stream', 'resource'), - ('Vivado', 'io_parallel', 'resource'), - ('Vivado', 'io_parallel', 'latency'), - ('Vivado', 'io_stream', 'latency'), - ('Vivado', 'io_stream', 'resource'), - ('Vitis', 'io_parallel', 'resource'), - ('Vitis', 'io_parallel', 'latency'), - ('Vitis', 'io_stream', 'latency'), - ('Vitis', 'io_stream', 'resource'), - ], -) -def test_mnist_cnn(keras_model, mnist_data, backend, io_type, strategy): - x_train, y_train, x_test, y_test = mnist_data - - hls_config = hls4ml.utils.config_from_keras_model(keras_model, granularity='name', backend=backend) - hls_config['Model']['Strategy'] = strategy - hls_config['LayerName']['softmax']['Implementation'] = 'stable' - output_dir = str(test_root_path / f'hls4mlprj_cnn_mnist_{backend}_{io_type}_{strategy}') - - hls_model = hls4ml.converters.convert_from_keras_model( - keras_model, hls_config=hls_config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - - # Model under test predictions and accuracy - y_keras = keras_model.predict(x_test) - y_hls4ml = hls_model.predict(x_test) - - acc_keras = accuracy_score(np.argmax(y_test, axis=1), np.argmax(y_keras, axis=1)) - acc_hls4ml = accuracy_score(np.argmax(y_test, axis=1), np.argmax(y_hls4ml, axis=1)) - rel_diff = abs(acc_keras - acc_hls4ml) / acc_keras - - print(f'Accuracy keras: {acc_keras}') - print(f'Accuracy hls4ml: {acc_hls4ml}') - print(f'Relative difference: {rel_diff}') - - assert acc_keras > 0.95 and rel_diff < 0.03 diff --git a/hls4ml/test/pytest/test_cnn_mnist_qkeras.py b/hls4ml/test/pytest/test_cnn_mnist_qkeras.py deleted file mode 100644 index b4c28c7..0000000 --- a/hls4ml/test/pytest/test_cnn_mnist_qkeras.py +++ /dev/null @@ -1,104 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -import tensorflow as tf -from qkeras.utils import _add_supported_quantized_objects -from sklearn.metrics import accuracy_score -from tensorflow.keras.models import model_from_json - -import hls4ml - -co = {} -_add_supported_quantized_objects(co) - -test_root_path = Path(__file__).parent -example_model_path = (test_root_path / '../../example-models').resolve() - - -@pytest.fixture(scope='module') -def mnist_data(): - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() - # Scale images to the [0, 1] range - x_train = x_train.astype("float32") / 255 - x_test = x_test.astype("float32") / 255 - # Make sure images have shape (28, 28, 1) - x_train = np.expand_dims(x_train, -1) - x_test = np.expand_dims(x_test, -1) - y_train = tf.keras.utils.to_categorical(y_train, 10) - y_test = tf.keras.utils.to_categorical(y_test, 10) - return x_train, y_train, x_test, y_test - - -@pytest.fixture(scope='module') -def mnist_model(): - model_path = example_model_path / 'keras/qkeras_mnist_cnn.json' - with model_path.open('r') as f: - jsons = f.read() - model = model_from_json(jsons, custom_objects=co) - model.load_weights(example_model_path / 'keras/qkeras_mnist_cnn_weights.h5') - return model - - -@pytest.fixture -@pytest.mark.parametrize( - 'backend,io_type,strategy', - [ - ('Quartus', 'io_parallel', 'resource'), - ('Quartus', 'io_stream', 'resource'), - ('Vivado', 'io_parallel', 'resource'), - ('Vivado', 'io_parallel', 'latency'), - ('Vivado', 'io_stream', 'latency'), - ('Vivado', 'io_stream', 'resource'), - ('Vitis', 'io_parallel', 'resource'), - ('Vitis', 'io_parallel', 'latency'), - ('Vitis', 'io_stream', 'latency'), - ('Vitis', 'io_stream', 'resource'), - ], -) -def hls_model(mnist_model, backend, io_type, strategy): - keras_model = mnist_model - hls_config = hls4ml.utils.config_from_keras_model(keras_model, granularity='name') - hls_config['Model']['Strategy'] = strategy - hls_config['LayerName']['softmax']['Strategy'] = 'Stable' - output_dir = str(test_root_path / f'hls4mlprj_cnn_mnist_qkeras_{backend}_{io_type}_{strategy}') - - hls_model = hls4ml.converters.convert_from_keras_model( - keras_model, hls_config=hls_config, output_dir=output_dir, backend=backend, io_type=io_type - ) - - hls_model.compile() - return hls_model - - -@pytest.mark.parametrize( - 'backend,io_type,strategy', - [ - ('Quartus', 'io_parallel', 'resource'), - ('Quartus', 'io_stream', 'resource'), - ('Vivado', 'io_parallel', 'resource'), - ('Vivado', 'io_parallel', 'latency'), - ('Vivado', 'io_stream', 'latency'), - ('Vivado', 'io_stream', 'resource'), - ('Vitis', 'io_parallel', 'resource'), - ('Vitis', 'io_parallel', 'latency'), - ('Vitis', 'io_stream', 'latency'), - ('Vitis', 'io_stream', 'resource'), - ], -) -def test_accuracy(mnist_data, mnist_model, hls_model): - x_train, y_train, x_test, y_test = mnist_data - x_test, y_test = x_test[:1000], y_test[:1000] - # model under test predictions and accuracy - y_keras = mnist_model.predict(x_test) - y_hls4ml = hls_model.predict(x_test) - - acc_keras = accuracy_score(np.argmax(y_test, axis=1), np.argmax(y_keras, axis=1)) - acc_hls4ml = accuracy_score(np.argmax(y_test, axis=1), np.argmax(y_hls4ml, axis=1)) - rel_diff = abs(acc_keras - acc_hls4ml) / acc_keras - - print(f'Accuracy keras: {acc_keras}') - print(f'Accuracy hls4ml: {acc_hls4ml}') - print(f'Relative difference: {rel_diff}') - - assert acc_keras > 0.92 and rel_diff < 0.01 diff --git a/hls4ml/test/pytest/test_conv1d.py b/hls4ml/test/pytest/test_conv1d.py deleted file mode 100644 index 79beb01..0000000 --- a/hls4ml/test/pytest/test_conv1d.py +++ /dev/null @@ -1,107 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from sklearn.metrics import accuracy_score -from tensorflow.keras.models import model_from_json - -import hls4ml - -test_root_path = Path(__file__).parent -example_model_path = (test_root_path / '../../example-models').resolve() - - -@pytest.fixture(scope='module') -def data(): - X = np.random.rand(100, 10, 4) - return X - - -@pytest.fixture(scope='module') -def keras_model(): - model_path = example_model_path / 'keras/KERAS_conv1d_small.json' - with model_path.open('r') as f: - jsons = f.read() - model = model_from_json(jsons) - model.load_weights(example_model_path / 'keras/KERAS_conv1d_small_weights.h5') - return model - - -@pytest.fixture -@pytest.mark.parametrize( - 'backend,io_type,strategy', - [ - ('Quartus', 'io_parallel', 'resource'), - ('Quartus', 'io_stream', 'resource'), - ('Vivado', 'io_parallel', 'resource'), - ('Vivado', 'io_parallel', 'latency'), - ('Vivado', 'io_stream', 'latency'), - ('Vivado', 'io_stream', 'resource'), - ('Vitis', 'io_parallel', 'resource'), - ('Vitis', 'io_parallel', 'latency'), - ('Vitis', 'io_stream', 'latency'), - ('Vitis', 'io_stream', 'resource'), - ], -) -def hls_model(keras_model, backend, io_type, strategy): - default_precision = ( - 'ap_fixed<16,3,AP_RND_CONV,AP_SAT>' if backend == 'Vivado' else 'ac_fixed<16,3,true,AC_RND_CONV,AC_SAT>' - ) - fc1_weight_precision = 'ap_fixed<16,3>' if backend == 'Vivado' else 'ac_fixed<16,3,true>' - fc1_result_precision = ( - 'ap_fixed<16,6,AP_RND_CONV,AP_SAT>' if backend == 'Vivado' else 'ac_fixed<16,6,true,AC_RND_CONV,AC_SAT>' - ) - output_softmax_weight_precision = 'ap_fixed<16,6>' if backend == 'Vivado' else 'ac_fixed<16,6,true>' - output_softmax_result_precision = ( - 'ap_fixed<16,6,AP_RND_CONV,AP_SAT>' if backend == 'Vivado' else 'ac_fixed<16,6,true,AP_RND_CONV,AP_SAT>' - ) - - # Default config - hls_config = hls4ml.utils.config_from_keras_model(keras_model) - hls_config['Model']['Strategy'] = strategy - hls_config['Model']['ReuseFactor'] = 1 - hls_config['Model']['Precision'] = default_precision - - # Some model-specific precision tuning - hls_config['LayerName'] = {} - hls_config['LayerName']['fc1_relu'] = {'Precision': {'weight': fc1_weight_precision, 'result': fc1_result_precision}} - hls_config['LayerName']['output_softmax'] = { - 'Precision': {'weight': output_softmax_weight_precision, 'result': output_softmax_result_precision} - } - hls_config['LayerName']['output_softmax_softmax'] = {'Strategy': 'Stable'} - - output_dir = str(test_root_path / f'hls4mlprj_conv1d_{backend}_{io_type}_{strategy}') - hls_model = hls4ml.converters.convert_from_keras_model( - keras_model, hls_config=hls_config, backend=backend, io_type=io_type, output_dir=output_dir - ) - hls_model.compile() - return hls_model - - -@pytest.mark.parametrize( - 'backend,io_type,strategy', - [ - ('Quartus', 'io_parallel', 'resource'), - ('Quartus', 'io_stream', 'resource'), - ('Vivado', 'io_parallel', 'resource'), - ('Vivado', 'io_parallel', 'latency'), - ('Vivado', 'io_stream', 'latency'), - ('Vivado', 'io_stream', 'resource'), - ('Vitis', 'io_parallel', 'resource'), - ('Vitis', 'io_parallel', 'latency'), - ('Vitis', 'io_stream', 'latency'), - ('Vitis', 'io_stream', 'resource'), - ], -) -def test_accuracy(data, keras_model, hls_model): - X = data - model = keras_model - - # Model under test predictions and accuracy - y_keras = model.predict(X) - y_hls4ml = hls_model.predict(X) - - # "Accuracy" of hls4ml predictions vs keras - rel_acc = accuracy_score(np.argmax(y_keras, axis=1), np.argmax(y_hls4ml, axis=1)) - print(f'hls4ml accuracy relative to keras: {rel_acc}') - assert rel_acc > 0.98 diff --git a/hls4ml/test/pytest/test_conv1d_narrow.py b/hls4ml/test/pytest/test_conv1d_narrow.py deleted file mode 100644 index e3ed8f7..0000000 --- a/hls4ml/test/pytest/test_conv1d_narrow.py +++ /dev/null @@ -1,61 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import Conv1D -from tensorflow.keras.models import Sequential - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.fixture(scope='module') -def data(): - X = np.random.rand(10, 11, 3) - return X - - -@pytest.fixture(scope='module') -def model(): - model = Sequential() - model.add(Conv1D(5, 9, input_shape=(11, 3))) - model.compile() - return model - - -@pytest.mark.parametrize( - 'narrowset', - [ - ('io_stream', 'latency', 'Encoded'), - ('io_stream', 'resource', 'Encoded'), - ('io_stream', 'latency', 'LineBuffer'), - ('io_stream', 'resource', 'LineBuffer'), - ], -) -@pytest.mark.filterwarnings("error") -def test_narrow(data, model, narrowset, capfd): - ''' - Check that the implementation does not have leftover data. - ''' - io_type = narrowset[0] - strategy = narrowset[1] - conv = narrowset[2] - X = data - - output_dir = str(test_root_path / f'hls4mlprj_conv1d_narrow_{io_type}_{strategy}_{conv}') - - config = hls4ml.utils.config_from_keras_model(model) - config['Model']['Strategy'] = strategy - config['Model']['ConvImplementation'] = conv - - hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, io_type=io_type, output_dir=output_dir) - hls_model.compile() - - # model under test predictions and accuracy - y_keras = model.predict(X) - y_hls4ml = hls_model.predict(X) - - out, _ = capfd.readouterr() - assert "leftover data" not in out - np.testing.assert_allclose(y_keras.ravel(), y_hls4ml.ravel(), atol=0.05) diff --git a/hls4ml/test/pytest/test_conv2d_narrow.py b/hls4ml/test/pytest/test_conv2d_narrow.py deleted file mode 100644 index 74042fd..0000000 --- a/hls4ml/test/pytest/test_conv2d_narrow.py +++ /dev/null @@ -1,61 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import Conv2D -from tensorflow.keras.models import Sequential - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.fixture(scope='module') -def data(): - X = np.random.rand(10, 5, 5, 3) - return X - - -@pytest.fixture(scope='module') -def model(): - model = Sequential() - model.add(Conv2D(5, (4, 4), input_shape=(5, 5, 3))) - model.compile() - return model - - -@pytest.mark.parametrize( - 'narrowset', - [ - ('io_stream', 'latency', 'Encoded'), - ('io_stream', 'resource', 'Encoded'), - ('io_stream', 'latency', 'LineBuffer'), - ('io_stream', 'resource', 'LineBuffer'), - ], -) -@pytest.mark.filterwarnings("error") -def test_narrow(data, model, narrowset, capfd): - ''' - Check that the implementation does not have leftover data. - ''' - io_type = narrowset[0] - strategy = narrowset[1] - conv = narrowset[2] - X = data - - output_dir = str(test_root_path / f'hls4mlprj_conv2d_narrow_{io_type}_{strategy}_{conv}') - - config = hls4ml.utils.config_from_keras_model(model) - config['Model']['Strategy'] = strategy - config['Model']['ConvImplementation'] = conv - - hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, io_type=io_type, output_dir=output_dir) - hls_model.compile() - - # model under test predictions and accuracy - y_keras = model.predict(X) - y_hls4ml = hls_model.predict(X) - - out, _ = capfd.readouterr() - assert "leftover data" not in out - np.testing.assert_allclose(y_keras.ravel(), y_hls4ml.ravel(), atol=0.05) diff --git a/hls4ml/test/pytest/test_embed.py b/hls4ml/test/pytest/test_embed.py deleted file mode 100644 index fd8e39c..0000000 --- a/hls4ml/test/pytest/test_embed.py +++ /dev/null @@ -1,51 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import Embedding, Input -from tensorflow.keras.models import Model - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.fixture(scope='module') -def data(): - X = np.random.randint(10, size=(32, 100)) - return X - - -@pytest.fixture(scope='module') -def keras_model(): - inputs = Input(shape=(100,), name='embedding_input') - embedding = Embedding(13, 8, input_length=100, name='embedding')(inputs) - model = Model(inputs=inputs, outputs=embedding) - return model - - -@pytest.fixture -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def hls_model(keras_model, backend, io_type): - hls_config = hls4ml.utils.config_from_keras_model(keras_model, default_precision='ap_fixed<16,6>', granularity='name') - hls_config['LayerName']['embedding_input']['Precision']['result'] = 'ap_uint<4>' - out_dir = str(test_root_path / 'hls4mlprj_embed_{}_{}').format(backend, io_type) - hls_model = hls4ml.converters.convert_from_keras_model( - keras_model, backend=backend, hls_config=hls_config, io_type=io_type, output_dir=out_dir - ) - - hls_model.compile() - return hls_model - - -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_embedding_accuracy(data, keras_model, hls_model): - X = data - model = keras_model - # model under test predictions and accuracy - y_keras = model.predict(X) - y_hls4ml = hls_model.predict(X.astype(float)).reshape(y_keras.shape) - # "accuracy" of hls4ml predictions vs keras - np.testing.assert_allclose(y_keras, y_hls4ml, rtol=0, atol=1e-03, verbose=True) diff --git a/hls4ml/test/pytest/test_extensions.py b/hls4ml/test/pytest/test_extensions.py deleted file mode 100644 index 0820a58..0000000 --- a/hls4ml/test/pytest/test_extensions.py +++ /dev/null @@ -1,180 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -import tensorflow as tf - -import hls4ml - -test_root_path = Path(__file__).parent - - -# Keras implementation of a custom layer -class KReverse(tf.keras.layers.Layer): - '''Keras implementation of a hypothetical custom layer''' - - def __init__(self): - super().__init__() - - def call(self, inputs): - return tf.reverse(inputs, axis=[-1]) - - -# hls4ml layer implementation -class HReverse(hls4ml.model.layers.Layer): - '''hls4ml implementation of a hypothetical custom layer''' - - def initialize(self): - inp = self.get_input_variable() - shape = inp.shape - dims = inp.dim_names - self.add_output_variable(shape, dims) - - -# hls4ml optimizer to remove duplicate optimizer -class RemoveDuplicateReverse(hls4ml.model.optimizer.OptimizerPass): - '''OptimizerPass to remove consecutive HReverse layers.''' - - def match(self, node): - return isinstance(node, HReverse) and isinstance(node.get_input_node(), HReverse) - - def transform(self, model, node): - first = node.get_input_node() - second = node - - model.remove_node(first, rewire=True) - model.remove_node(second, rewire=True) - return True - - -# Parser for converter -def parse_reverse_layer(keras_layer, input_names, input_shapes, data_reader): - layer = {} - layer['class_name'] = 'HReverse' - layer['name'] = keras_layer['config']['name'] - layer['n_in'] = input_shapes[0][1] - - if input_names is not None: - layer['inputs'] = input_names - - return layer, [shape for shape in input_shapes[0]] - - -# HLS Templates - No specific pragmas used; generic enough for both Intel and Vivado - -rev_config_template = """struct config{index} : nnet::reverse_config {{ - static const unsigned n_in = {n_in}; -}};\n""" - -rev_function_template = 'nnet::reverse<{input_t}, {config}>({input}, {output});' -rev_include_list = ['nnet_utils/nnet_reverse.h'] - - -class HReverseConfigTemplate(hls4ml.backends.template.LayerConfigTemplate): - def __init__(self): - super().__init__(HReverse) - self.template = rev_config_template - - def format(self, node): - params = self._default_config_params(node) - return self.template.format(**params) - - -class HReverseFunctionTemplate(hls4ml.backends.template.FunctionCallTemplate): - def __init__(self): - super().__init__(HReverse, include_header=rev_include_list) - self.template = rev_function_template - - def format(self, node): - params = self._default_function_params(node) - return self.template.format(**params) - - -rev_hls = """#ifndef NNET_REVERSE_H_ -#define NNET_REVERSE_H_ - -#include "nnet_common.h" - -namespace nnet { - -struct reverse_config { - static const unsigned n_in = 10; -}; - -template -void reverse( - data_T input[CONFIG_T::n_in], - data_T reversed[CONFIG_T::n_in] -) { - for (int i = 0; i < CONFIG_T::n_in; i++) { - reversed[CONFIG_T::n_in - 1 - i] = input[i]; - } -} - -} - -#endif -""" - - -@pytest.fixture(scope='session', autouse=True) -def register_custom_layer(): - # Register the converter for custom Keras layer - hls4ml.converters.register_keras_layer_handler('KReverse', parse_reverse_layer) - - # Register the hls4ml's IR layer - hls4ml.model.layers.register_layer('HReverse', HReverse) - - -@pytest.mark.parametrize('backend_id', ['Vivado', 'Vitis', 'Quartus']) -def test_extensions(tmp_path, backend_id): - # Register the optimization passes (if any) - backend = hls4ml.backends.get_backend(backend_id) - ip_flow = hls4ml.model.flow.get_flow(backend.get_default_flow()) - # Add the pass into the main optimization flow - optimize_flow = [flow for flow in ip_flow.requires if ':optimize' in flow][0] - optmizer_name = f'{backend_id.lower()}:remove_duplicate_reverse' - backend.register_pass(optmizer_name, RemoveDuplicateReverse, flow=optimize_flow) - - # Register template passes for the given backend - backend.register_template(HReverseConfigTemplate) - backend.register_template(HReverseFunctionTemplate) - - # Register HLS implementation - p = tmp_path / 'nnet_reverse.h' - p.write_text(rev_hls) - backend.register_source(p) - - # Test if it works - kmodel = tf.keras.models.Sequential( - [ - tf.keras.layers.Input(shape=(8,)), - KReverse(), - tf.keras.layers.ReLU(), - # These two should be removed by the optimizer - KReverse(), - KReverse(), - ] - ) - - x = np.random.randint(-5, 5, (8,), dtype='int32') - kres = kmodel(x) - - hmodel = hls4ml.converters.convert_from_keras_model( - kmodel, - output_dir=str(test_root_path / f'hls4mlprj_extensions_{backend_id}'), - backend=backend_id, - io_type='io_parallel', - hls_config={'Model': {'Precision': 'ap_int<6>', 'ReuseFactor': 1}}, - ) - - hmodel.compile() - hres = hmodel.predict(x.astype('float32')) - - # Check if the optimizer pass was applied - assert optmizer_name in hmodel._applied_flows[0][optimize_flow] - - # Remove flow from - hls4ml.model.flow.update_flow(optimize_flow, remove_optimizers=[optmizer_name]) - - np.testing.assert_array_equal(kres, hres) diff --git a/hls4ml/test/pytest/test_flows.py b/hls4ml/test/pytest/test_flows.py deleted file mode 100644 index 2eec893..0000000 --- a/hls4ml/test/pytest/test_flows.py +++ /dev/null @@ -1,121 +0,0 @@ -import pytest - -import hls4ml - -''' -Tests for model flows. -Construct some dummy optimizer passes and flows that do nothing. -Passes record their label to the model. -Tests check that the order of applied passes matches the expectations -''' - - -class DummyPass(hls4ml.model.optimizer.OptimizerPass): - def __init__(self, label): - self.label = label - - def match(self, node): - return True - - def transform(self, model, node): - if getattr(model, 'test_flow_passes', None) is None: - model.test_flow_passes = [] - model.test_flow_passes.append(self.label) - return False - - -class DummyPassA(DummyPass): - def __init__(self): - super().__init__('A') - - -class DummyPassB(DummyPass): - def __init__(self): - super().__init__('B') - - -class DummyPassC(DummyPass): - def __init__(self): - super().__init__('C') - - -hls4ml.model.optimizer.register_pass('A', DummyPassA) -hls4ml.model.optimizer.register_pass('B', DummyPassB) -hls4ml.model.optimizer.register_pass('C', DummyPassC) - -DummyFlowA = hls4ml.model.flow.register_flow('A', ['A']) -DummyFlowB = hls4ml.model.flow.register_flow('B', ['B']) -DummyFlowC = hls4ml.model.flow.register_flow('C', ['C']) -DummyFlowAB = hls4ml.model.flow.register_flow('AB', ['A', 'B']) -DummyFlowBReqA = hls4ml.model.flow.register_flow('BReqA', ['B'], requires=[DummyFlowA]) -DummyFlowCReqBReqA = hls4ml.model.flow.register_flow('CReqBReqA', ['C'], requires=[DummyFlowBReqA]) - - -def dummy_flow_model(): - layers = [{'class_name': 'Input', 'name': 'layer0_input', 'input_shape': [1]}] - config = {'HLSConfig': {'Model': {'Precision': 'ap_fixed<32,16>', 'ReuseFactor': 1}, 'Flows': []}} - model = hls4ml.model.ModelGraph(config, layers) - return model - - -class FlowTester: - index = 0 - - def __init__(self, flows_to_apply, expected_pass_order, reapply): - self.flows_to_apply = flows_to_apply - self.expected_pass_order = expected_pass_order - self.reapply = reapply - self.index = FlowTester.index - FlowTester.index += 1 - - def run(self): - model = dummy_flow_model() - model.test_flow_passes = [] - for flow in self.flows_to_apply: - model.apply_flow(flow, self.reapply) - self.observed_pass_order = model.test_flow_passes - return self.observed_pass_order == self.expected_pass_order - - -flow_tests = [ - FlowTester(['A', 'B', 'C'], ['A', 'B', 'C'], 'single'), # independent flows in order - FlowTester(['A', 'A'], ['A', 'A'], 'single'), # same flow twice, single application - FlowTester(['A', 'A'], ['A', 'A'], 'all'), # same flow twice with reapply - FlowTester(['A', 'A'], ['A'], 'none'), # same flow twice with none - FlowTester(['BReqA'], ['A', 'B'], 'single'), # one flow with a dependency - FlowTester(['CReqBReqA'], ['A', 'B', 'C'], 'single'), # one flow with dependency chain - FlowTester(['CReqBReqA', 'A'], ['A', 'B', 'C', 'A'], 'single'), # one flow with dependency chain, repeat dependency - FlowTester(['CReqBReqA', 'A'], ['A', 'B', 'C', 'A'], 'all'), # one flow with dependency chain, repeat dependency - FlowTester(['CReqBReqA', 'A'], ['A', 'B', 'C'], 'none'), # one flow with dependency chain, repeat depencency - FlowTester(['A', 'CReqBReqA'], ['A', 'B', 'C'], 'single'), # one flow with dependency chain, repeat depencency - FlowTester(['A', 'CReqBReqA'], ['A', 'A', 'B', 'C'], 'all'), # one flow with dependency chain, repeat depencency - FlowTester(['A', 'CReqBReqA'], ['A', 'B', 'C'], 'none'), # one flow with dependency chain, repeat depencency - FlowTester(['A', 'BReqA'], ['A', 'B'], 'single'), # second flow dependency already run - FlowTester(['A', 'BReqA'], ['A', 'A', 'B'], 'all'), # second flow dependency reapply - FlowTester(['A', 'BReqA'], ['A', 'B'], 'none'), # second flow dependency no reapply - FlowTester(['A', 'A', 'BReqA'], ['A', 'A', 'A', 'B'], 'all'), # second flow dependency reapply - FlowTester(['A', 'A', 'BReqA'], ['A', 'B'], 'none'), # second flow dependency no reapply - FlowTester(['A', 'A', 'BReqA'], ['A', 'A', 'B'], 'single'), # second flow dependency skip requirements - FlowTester(['A', 'BReqA', 'CReqBReqA'], ['A', 'B', 'C'], 'single'), # two flows depending on earlier flows - FlowTester(['A', 'BReqA', 'CReqBReqA'], ['A', 'B', 'C'], 'none'), # two flows depending on earlier flows - FlowTester(['A', 'BReqA', 'CReqBReqA'], ['A', 'A', 'B', 'A', 'B', 'C'], 'all'), # three flows depending on earlier flows - FlowTester(['CReqBReqA', 'BReqA', 'A'], ['A', 'B', 'C', 'B', 'A'], 'single'), # three flows depending on earlier flows - FlowTester(['CReqBReqA', 'BReqA', 'A'], ['A', 'B', 'C'], 'none'), # three flows depending on earlier flows - FlowTester(['CReqBReqA', 'BReqA', 'A'], ['A', 'B', 'C', 'A', 'B', 'A'], 'all'), # three flows depending on earlier flows - FlowTester( - ['A', 'CReqBReqA', 'BReqA', 'A'], ['A', 'B', 'C', 'B', 'A'], 'single' - ), # three flows depending on earlier flows - FlowTester(['A', 'CReqBReqA', 'BReqA', 'A'], ['A', 'B', 'C'], 'none'), # three flows depending on earlier flows - FlowTester( - ['A', 'CReqBReqA', 'BReqA', 'A'], ['A', 'A', 'B', 'C', 'A', 'B', 'A'], 'all' - ), # three flows depending on earlier flows -] - - -@pytest.mark.parametrize('tester', flow_tests) -def test_flows(tester): - success = tester.run() - i = tester.index - expected = tester.expected_pass_order - observed = tester.observed_pass_order - assert success, f'Tester {i} fails: expected ({expected}), observed ({observed})' diff --git a/hls4ml/test/pytest/test_garnet.py b/hls4ml/test/pytest/test_garnet.py deleted file mode 100644 index 67ddf77..0000000 --- a/hls4ml/test/pytest/test_garnet.py +++ /dev/null @@ -1,105 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import Input -from tensorflow.keras.models import Model - -import hls4ml -from contrib.garnet import GarNet, GarNetStack - -test_root_path = Path(__file__).parent - -vmax = 16 -feat = 3 - - -@pytest.fixture(scope='module') -def garnet_models(): - x = Input(shape=(vmax, feat)) - n = Input(shape=(1,), dtype='uint16') - inputs = [x, n] - outputs = GarNet( - 8, - 8, - 16, - simplified=True, - collapse='mean', - input_format='xn', - output_activation=None, - name='gar_1', - quantize_transforms=False, - )(inputs) - model = Model(inputs=inputs, outputs=outputs) - model.summary() - - config = hls4ml.utils.config_from_keras_model(model, granularity='name') - config['Model'] = {} - config['Model']['ReuseFactor'] = 1 - config['Model']['Strategy'] = 'Latency' - config['Model']['Precision'] = 'ap_fixed<32,6>' - config['LayerName']['gar_1']['Precision'] = {'default': 'ap_fixed<32, 6, AP_RND, AP_SAT>', 'result': 'ap_fixed<32, 6>'} - - cfg = hls4ml.converters.create_config(output_dir=str(test_root_path / 'hls4mlprj_garnet'), part='xc7z020clg400-1') - cfg['HLSConfig'] = config - cfg['KerasModel'] = model - - hls_model = hls4ml.converters.keras_to_hls(cfg) - hls_model.compile() - return model, hls_model - - -@pytest.fixture(scope='module') -def garnet_stack_models(): - x = Input(shape=(vmax, feat)) - n = Input(shape=(1,), dtype='uint16') - inputs = [x, n] - outputs = GarNetStack( - ([4, 4, 8]), - ([4, 4, 8]), - ([8, 8, 16]), - simplified=True, - collapse='mean', - input_format='xn', - output_activation=None, # added output_activation_transform back in contrib.garnet.py - name='gar_1', - quantize_transforms=None, # this should be false, not None...fix in contrib.garnet.py - )(inputs) - model = Model(inputs=inputs, outputs=outputs) - model.summary() - - config = hls4ml.utils.config_from_keras_model(model, granularity='name') - config['Model'] = {} - config['Model']['ReuseFactor'] = 1 - config['Model']['Strategy'] = 'Latency' - config['Model']['Precision'] = 'ap_fixed<32,6>' - # config should now have precisions specified for ['LayerName']['gar_1']['Precision']['norm', 'aggr', etc.] - cfg = hls4ml.converters.create_config(output_dir=str(test_root_path / 'hls4mlprj_garnet'), part='xc7z020clg400-1') - cfg['HLSConfig'] = config - cfg['KerasModel'] = model - - hls_model = hls4ml.converters.keras_to_hls(cfg) - hls_model.compile() - return model, hls_model - - -@pytest.mark.parametrize('batch', [1, 3]) -def test_accuracy(garnet_models, batch): - model, hls_model = garnet_models - x = [np.random.rand(batch, vmax, feat), np.random.randint(0, vmax, size=(batch, 1))] - y = model.predict(x) - x_hls = [x[0], x[1].astype(np.float64)] - y_hls = hls_model.predict(x_hls).reshape(y.shape) - - np.testing.assert_allclose(y_hls, y, rtol=0, atol=0.1) - - -@pytest.mark.parametrize('batch', [1, 3]) -def test_accuracy_stack(garnet_stack_models, batch): - model, hls_model = garnet_stack_models - x = [np.random.rand(batch, vmax, feat), np.random.randint(0, vmax, size=(batch, 1))] - y = model.predict(x) - x_hls = [x[0], x[1].astype(np.float64)] - y_hls = hls_model.predict(x_hls).reshape(y.shape) - - np.testing.assert_allclose(y_hls, y, rtol=0, atol=0.1) diff --git a/hls4ml/test/pytest/test_globalpooling.py b/hls4ml/test/pytest/test_globalpooling.py deleted file mode 100644 index c402a53..0000000 --- a/hls4ml/test/pytest/test_globalpooling.py +++ /dev/null @@ -1,124 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import GlobalAveragePooling1D, GlobalAveragePooling2D, GlobalMaxPooling1D, GlobalMaxPooling2D -from tensorflow.keras.models import Sequential - -import hls4ml - -test_root_path = Path(__file__).parent - -in_shape = 18 -in_filt = 6 -atol = 5e-3 - - -@pytest.fixture(scope='module') -def data_1d(): - return np.random.rand(100, in_shape, in_filt) - - -@pytest.fixture(scope='module') -def keras_model_1d(request): - model_type = request.param['model_type'] - keepdims = request.param['keepdims'] - model = Sequential() - if model_type == 'avg': - model.add(GlobalAveragePooling1D(input_shape=(in_shape, in_filt), keepdims=keepdims)) - elif model_type == 'max': - model.add(GlobalMaxPooling1D(input_shape=(in_shape, in_filt), keepdims=keepdims)) - model.compile() - return model, model_type, keepdims - - -@pytest.mark.parametrize('backend', ['Quartus', 'Vitis', 'Vivado']) -@pytest.mark.parametrize( - 'keras_model_1d', - [ - {'model_type': 'max', 'keepdims': True}, - {'model_type': 'max', 'keepdims': False}, - {'model_type': 'avg', 'keepdims': True}, - {'model_type': 'avg', 'keepdims': False}, - ], - ids=[ - 'model_type-max-keepdims-True', - 'model_type-max-keepdims-False', - 'model_type-avg-keepdims-True', - 'model_type-avg-keepdims-False', - ], - indirect=True, -) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_global_pool1d(backend, keras_model_1d, data_1d, io_type): - model, model_type, keepdims = keras_model_1d - - config = hls4ml.utils.config_from_keras_model(model, default_precision='ap_fixed<32,9>', granularity='name') - - hls_model = hls4ml.converters.convert_from_keras_model( - model, - hls_config=config, - io_type=io_type, - output_dir=str(test_root_path / f'hls4mlprj_globalplool1d_{backend}_{io_type}_{model_type}_keepdims{keepdims}'), - backend=backend, - ) - hls_model.compile() - - y_keras = model.predict(data_1d) - y_hls = hls_model.predict(data_1d).reshape(y_keras.shape) - np.testing.assert_allclose(y_keras, y_hls, rtol=0, atol=atol, verbose=True) - - -@pytest.fixture(scope='module') -def data_2d(): - return np.random.rand(100, in_shape, in_shape, in_filt) - - -@pytest.fixture(scope='module') -def keras_model_2d(request): - model_type = request.param['model_type'] - keepdims = request.param['keepdims'] - model = Sequential() - if model_type == 'avg': - model.add(GlobalAveragePooling2D(input_shape=(in_shape, in_shape, in_filt), keepdims=keepdims)) - elif model_type == 'max': - model.add(GlobalMaxPooling2D(input_shape=(in_shape, in_shape, in_filt), keepdims=keepdims)) - model.compile() - return model, model_type, keepdims - - -@pytest.mark.parametrize('backend', ['Quartus', 'Vitis', 'Vivado']) -@pytest.mark.parametrize( - 'keras_model_2d', - [ - {'model_type': 'max', 'keepdims': True}, - {'model_type': 'max', 'keepdims': False}, - {'model_type': 'avg', 'keepdims': True}, - {'model_type': 'avg', 'keepdims': False}, - ], - ids=[ - 'model_type-max-keepdims-True', - 'model_type-max-keepdims-False', - 'model_type-avg-keepdims-True', - 'model_type-avg-keepdims-False', - ], - indirect=True, -) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_global_pool2d(backend, keras_model_2d, data_2d, io_type): - model, model_type, keepdims = keras_model_2d - - config = hls4ml.utils.config_from_keras_model(model, default_precision='ap_fixed<32,9>', granularity='name') - - hls_model = hls4ml.converters.convert_from_keras_model( - model, - hls_config=config, - io_type=io_type, - output_dir=str(test_root_path / f'hls4mlprj_globalplool2d_{backend}_{io_type}_{model_type}_keepdims{keepdims}'), - backend=backend, - ) - hls_model.compile() - - y_keras = model.predict(data_2d) - y_hls = hls_model.predict(data_2d).reshape(y_keras.shape) - np.testing.assert_allclose(y_keras, y_hls, rtol=0, atol=atol, verbose=True) diff --git a/hls4ml/test/pytest/test_graph.py b/hls4ml/test/pytest/test_graph.py deleted file mode 100644 index f419823..0000000 --- a/hls4ml/test/pytest/test_graph.py +++ /dev/null @@ -1,226 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -import tensorflow as tf - -import hls4ml - -test_root_path = Path(__file__).parent - - -w = np.array([2]) -b = np.array([1]) - - -def base_model(output_dir='hls4mlprj_graph_base_model', iotype='io_parallel'): - layers = [ - {'class_name': 'Input', 'name': 'layer0_input', 'input_shape': [1]}, - {'class_name': 'Dense', 'name': 'layer0', 'n_in': 1, 'n_out': 1, 'weight_data': w, 'bias_data': b}, - {'class_name': 'Dense', 'name': 'layer1', 'n_in': 1, 'n_out': 1, 'weight_data': w, 'bias_data': b}, - ] - config = {'HLSConfig': {'Model': {'Precision': 'ap_fixed<32,16>', 'ReuseFactor': 1}, 'Flows': []}} - config['OutputDir'] = output_dir - config['ProjectName'] = 'myprj' - config['IOType'] = iotype - config['Backend'] = 'Vivado' - model = hls4ml.model.ModelGraph(config, layers) - return model - - -def branch_model(output_dir='hls4mlprj_graph_branch_model', iotype='io_parallel'): - layers = [ - {'class_name': 'Input', 'name': 'layer0_input0', 'input_shape': [1], 'inputs': 'input'}, - {'class_name': 'Input', 'name': 'layer0_input1', 'input_shape': [1], 'inputs': 'input'}, - {'class_name': 'Merge', 'name': 'layer0', 'inputs': ['layer0_input0', 'layer0_input1'], 'op': 'add'}, - {'class_name': 'Merge', 'name': 'layer1', 'inputs': ['layer0_input1', 'layer0'], 'op': 'add'}, - {'class_name': 'Merge', 'name': 'layer2', 'inputs': ['layer0_input0', 'layer1'], 'op': 'add'}, - ] - config = {'HLSConfig': {'Model': {'Precision': 'ap_fixed<32,16>', 'ReuseFactor': 1}}} - config['OutputDir'] = output_dir - config['ProjectName'] = 'myprj' - config['IOType'] = iotype - model = hls4ml.model.ModelGraph(config, layers, inputs=['layer0_input0', 'layer0_input1']) - return model - - -def do_nop(model, node, layers): - return model, layers - - -def do_insert(model, node, layers): - after, before = node[0], node[1] - new_node = model.make_node('Dense', 'layer2', {'n_in': 1, 'n_out': 1, 'weight_data': w, 'bias_data': b}, [after]) - if before is not None: - before = [x for x in model.graph.values() if x.name == before][0] - model.insert_node(new_node, before=before) - iInsert = np.argwhere(layers == after)[0][0] + 1 - layers = np.insert(layers, iInsert, 'layer2') - return model, layers - - -def do_remove(model, node, layers): - node_obj = [n for n in list(model.get_layers()) if n.name == node][0] - model.remove_node(node_obj) - iRemove = np.argwhere(layers == node)[0][0] - layers = np.delete(layers, iRemove) - return model, layers - - -def do_replace(model, node, layers): - old_node = model.graph.get(node) - new_node = model.make_node('Dense', 'layer2', {'n_in': 1, 'n_out': 1, 'weight_data': w, 'bias_data': b}, old_node.inputs) - model.replace_node(old_node, new_node) - iInsert = np.argwhere(layers == node)[0][0] - layers = np.delete(layers, iInsert) - layers = np.insert(layers, iInsert, 'layer2') - return model, layers - - -graph_ops = {'insert': do_insert, 'remove': do_remove, 'replace': do_replace, 'nop': do_nop} - - -@pytest.mark.parametrize( - 'parameters', - [ - (base_model, 'nop', None, [3], False), # 0 - (base_model, 'insert', ('layer0_input', None), [7], False), # 1 - (base_model, 'insert', ('layer0', None), [7], False), # 2 - (base_model, 'insert', ('layer1', None), [7], False), # 3 - (base_model, 'remove', 'layer0', [1], False), # 4 - (base_model, 'remove', 'layer1', [1], False), # 5 - (base_model, 'replace', 'layer0', [3], False), # 6 - (base_model, 'replace', 'layer1', [3], False), - ], -) # 7 -@pytest.mark.parametrize('iotype', ['io_parallel', 'io_stream']) -def test_graph_manipulation(parameters, iotype): - model, op, node, expected, skip_layers_check = parameters[0], parameters[1], parameters[2], parameters[3], parameters[4] - odir = str(test_root_path / f'hls4mlprj_graph_{model.__name__}_{op}_{node}') - model = model(odir, iotype) - original_layers = np.array([layer.name for layer in list(model.get_layers())]) - model, expected_layers = graph_ops[op](model, node, original_layers) - model.compile() - hls4ml.utils.plot_model(model, show_shapes=True, show_precision=True, to_file=f'{odir}/model.png') - X = np.zeros((1, 1)) - y = model.predict(X) - # check the output - expected = np.array(expected) - np.testing.assert_array_equal(y, expected) - # check the order - actual_layers = np.array([layer.name for layer in list(model.get_layers())]) - if not skip_layers_check: # skip check for this model since order changes - np.testing.assert_array_equal(expected_layers, actual_layers) - - -@pytest.mark.parametrize('iotype', ['io_parallel', 'io_stream']) -@pytest.mark.parametrize('batch', [1, 100]) -def test_graph_branch(iotype, batch): - odir = str(test_root_path / f'hls4mlprj_graph_branch_model_{iotype}_batch{batch}') - model = branch_model(odir, iotype) - model.compile() - hls4ml.utils.plot_model(model, show_shapes=True, show_precision=True, to_file=f'{odir}/model.png') - X0 = np.random.rand(batch, 1) - X1 = np.random.rand(batch, 1) - y_expected = 2 * (X0 + X1) - y = model.predict([X0, X1]).reshape(y_expected.shape) - # check the output - np.testing.assert_allclose(y, y_expected, rtol=1, atol=2**-16) - - -@pytest.mark.parametrize('iotype', ['io_parallel', 'io_stream']) -def test_final_reshape(iotype): - '''Test case for a model with a Reshape as the final layer''' - inputs = tf.keras.layers.Input(shape=(1, 1, 1)) # 1 input pixel - conv = tf.keras.layers.Conv2D(6, 1) # 6 filters, 1x1 kernel - x = conv(inputs) - conv.set_weights([np.linspace(1, 6, 6).reshape(1, 1, 1, 6), np.zeros(6)]) # ascending int weights, 0 bias - x = tf.keras.layers.Reshape((3, 2))(x) # reshape the (1,1,6) output to (3,2) - model = tf.keras.models.Model(inputs=inputs, outputs=x) - - # create the ModelGraph - config = hls4ml.utils.config_from_keras_model(model, granularity='model') - odir = str(test_root_path / f'hls4mlprj_graph_final_reshape_{iotype}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, output_dir=odir, backend='Vivado', io_type=iotype, hls_config=config - ) - hls_model.compile() - - # Test on ascending integers. The weights mean that each output pixel/neuron has - # a different value - X = np.linspace(-4, 4, 9).reshape(9, 1, 1, 1) - y = model.predict(X) - y_hls = hls_model.predict(X).reshape(y.shape) - # because of integer inputs and integer weights, we can expect exact matching - np.testing.assert_allclose(y, y_hls, rtol=0) - - -@pytest.mark.parametrize( - 'shapes, layer', - [ - (((2, 2, 3), (2, 2, 1)), tf.keras.layers.Concatenate), - (((2, 2, 1), (2, 2, 3)), tf.keras.layers.Concatenate), - (((2, 2, 3), (2, 2, 1)), tf.keras.layers.Add), - (((2, 2, 1), (2, 2, 3)), tf.keras.layers.Add), - (((1, 1, 2), (3, 4, 2)), tf.keras.layers.Add), - (((3, 4, 2), (1, 1, 2)), tf.keras.layers.Add), - ], -) -def test_broadcast_stream(shapes, layer): - '''Test case for stream broadcast before Add but not before Concatenate''' - input1 = tf.keras.layers.Input(shape=shapes[0]) - input2 = tf.keras.layers.Input(shape=shapes[1]) - inputs = [input1, input2] - outputs = layer()(inputs) - model = tf.keras.models.Model(inputs=inputs, outputs=outputs) - - # create the ModelGraph - config = hls4ml.utils.config_from_keras_model(model, granularity='model', default_precision='ap_fixed<32,16>') - odir = str( - test_root_path - / 'hls4mlprj_graph_broadcast_shapes_{}_{}_stream_{}'.format( - str(shapes[0]).replace(' ', '').replace(',', '_').replace('(', '').replace(')', ''), - str(shapes[1]).replace(' ', '').replace(',', '_').replace('(', '').replace(')', ''), - layer.__name__.lower(), - ) - ) - hls_model = hls4ml.converters.convert_from_keras_model( - model, output_dir=odir, backend='Vivado', io_type='io_stream', hls_config=config - ) - hls_model.compile() - - # Test with integers (for exact agreement) - X1 = np.random.randint(0, 100, size=(1,) + shapes[0]).astype(float) - X2 = np.random.randint(0, 100, size=(1,) + shapes[1]).astype(float) - y = model.predict([X1, X2]) - y_hls = hls_model.predict([X1, X2]).reshape(y.shape) - np.testing.assert_allclose(y, y_hls, rtol=0) - - -@pytest.mark.parametrize('batch', [1, 32]) -def test_multiple_outputs(batch): - '''Test case for multiple outputs''' - input1 = tf.keras.layers.Input(shape=(10,)) - inputs = [input1] - output1 = tf.keras.layers.Dense(5, kernel_initializer='ones', use_bias=False)(input1) - output2 = tf.keras.layers.Dense(2, kernel_initializer='ones', use_bias=False)(input1) - outputs = [output1, output2] - model = tf.keras.models.Model(inputs=inputs, outputs=outputs) - - # create the ModelGraph - config = hls4ml.utils.config_from_keras_model(model, granularity='model', default_precision='ap_fixed<32,16>') - odir = str(test_root_path / 'hls4mlprj_graph_multiple_outputs') - hls_model = hls4ml.converters.convert_from_keras_model( - model, output_dir=odir, backend='Vivado', io_type='io_parallel', hls_config=config - ) - hls_model.compile() - - # Test with integers (for exact agreement) - X1 = np.random.randint(0, 100, size=(batch, 10)).astype(float) - y = model.predict(X1) - y_hls = hls_model.predict(X1) - # test trace as well - y_hls, hls_trace = hls_model.trace(X1) - for y_i, y_hls_i in zip(y, y_hls): - y_hls_i = y_hls_i.reshape(y_i.shape) - np.testing.assert_allclose(y_i, y_hls_i, rtol=0) diff --git a/hls4ml/test/pytest/test_keras_api.py b/hls4ml/test/pytest/test_keras_api.py deleted file mode 100644 index 64f6830..0000000 --- a/hls4ml/test/pytest/test_keras_api.py +++ /dev/null @@ -1,479 +0,0 @@ -import math -from pathlib import Path - -import numpy as np -import pytest -import tensorflow as tf -from tensorflow.keras.layers import ( - ELU, - Activation, - AveragePooling1D, - AveragePooling2D, - Conv1D, - Conv2D, - Dense, - DepthwiseConv1D, - DepthwiseConv2D, - LeakyReLU, - MaxPooling1D, - MaxPooling2D, - PReLU, -) - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_dense(backend, io_type): - model = tf.keras.models.Sequential() - model.add( - Dense( - 2, - input_shape=(1,), - name='Dense', - use_bias=True, - kernel_initializer=tf.keras.initializers.RandomUniform(minval=1, maxval=10), - bias_initializer='zeros', - kernel_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - kernel_constraint=None, - bias_constraint=None, - ) - ) - model.add(Activation(activation='elu', name='Activation')) - model.compile(optimizer='adam', loss='mse') - - X_input = np.random.rand(100, 1) - - keras_prediction = model.predict(X_input) - - config = hls4ml.utils.config_from_keras_model(model) - output_dir = str(test_root_path / f'hls4mlprj_keras_api_dense_{backend}_{io_type}') - - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - - hls_model.compile() - - hls_prediction = hls_model.predict(X_input) - - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=1e-2, atol=0.01) - - assert len(model.layers) + 1 == len(hls_model.get_layers()) - assert list(hls_model.get_layers())[0].attributes['class_name'] == "InputLayer" - assert list(hls_model.get_layers())[1].attributes["class_name"] == model.layers[0]._name - assert list(hls_model.get_layers())[2].attributes['class_name'] == model.layers[1]._name - assert list(hls_model.get_layers())[0].attributes['input_shape'] == list(model.layers[0].input_shape[1:]) - assert list(hls_model.get_layers())[1].attributes['n_in'] == model.layers[0].input_shape[1:][0] - assert list(hls_model.get_layers())[1].attributes['n_out'] == model.layers[0].output_shape[1:][0] - assert list(hls_model.get_layers())[2].attributes['activation'] == str(model.layers[1].activation).split()[1] - assert list(hls_model.get_layers())[1].attributes['activation'] == str(model.layers[0].activation).split()[1] - - -# TODO: add ThresholdedReLU test when it can be made to pass -# https://github.com/fastmachinelearning/hls4ml/issues/376 -@pytest.mark.parametrize( - "activation_function", - [ - Activation(activation='relu', name='Activation'), - LeakyReLU(alpha=1.0), - ELU(alpha=1.0), - PReLU( - alpha_initializer="zeros", - ), - Activation(activation='sigmoid', name='Activation'), - ], -) -# ThresholdedReLU(theta=1.0)]) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_activations(activation_function, backend, io_type): - model = tf.keras.models.Sequential() - model.add(Dense(64, input_shape=(1,), name='Dense', kernel_initializer='lecun_uniform', kernel_regularizer=None)) - model.add(activation_function) - - model.compile(optimizer='adam', loss='mse') - X_input = np.random.rand(100, 1) - keras_prediction = model.predict(X_input) - config = hls4ml.utils.config_from_keras_model(model) - output_dir = str( - test_root_path / f'hls4mlprj_keras_api_activations_{activation_function.__class__.__name__}_{backend}_{io_type}' - ) - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - hls_prediction = hls_model.predict(X_input) - - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=1e-2, atol=0.01) - - assert len(model.layers) + 1 == len(hls_model.get_layers()) - - assert list(hls_model.get_layers())[2].attributes['class_name'] == activation_function.__class__.__name__ - - -padds_options = ['same', 'valid'] - - -@pytest.mark.parametrize('padds', padds_options) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_conv1d(padds, backend, io_type): - model = tf.keras.models.Sequential() - input_shape = (10, 128, 4) - model.add( - Conv1D( - filters=32, - kernel_size=3, - strides=1, - padding=padds, - activation='relu', - input_shape=input_shape[1:], - kernel_initializer='normal', - use_bias=False, - data_format='channels_last', - ) - ) - model.add(Activation(activation='relu')) - model.compile(optimizer='adam', loss='mse') - - X_input = np.random.rand(10, 128, 4) - keras_prediction = model.predict(X_input) - - config = hls4ml.utils.config_from_keras_model(model) - output_dir = str(test_root_path / f'hls4mlprj_keras_api_conv1d_{padds}_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - hls_prediction = hls_model.predict(X_input).reshape(keras_prediction.shape) - - # 5e-2 might be too high - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=0, atol=5e-2) - - if not (backend in ['Vivado', 'Vitis'] and io_type == 'io_stream' and padds == 'same'): - # Vivado/Vitis inserts and additional layer for 'same' padding in io_stream - assert len(model.layers) + 2 == len(hls_model.get_layers()) - assert list(hls_model.get_layers())[1].attributes['name'] == model.layers[0]._name - assert list(hls_model.get_layers())[1].attributes['class_name'] == 'Conv1D' - assert list(hls_model.get_layers())[1].attributes['activation'] == str(model.layers[0].activation).split()[1] - assert list(hls_model.get_layers())[1].attributes["in_width"] == model.layers[0]._batch_input_shape[1] - assert list(hls_model.get_layers())[1].attributes['filt_width'] == model.layers[0].kernel_size[0] - assert list(hls_model.get_layers())[1].attributes['n_chan'] == model.layers[0].input_shape[2] - assert list(hls_model.get_layers())[1].attributes['n_filt'] == model.layers[0].filters - assert list(hls_model.get_layers())[1].attributes['stride_width'] == model.layers[0].strides[0] - assert list(hls_model.get_layers())[1].attributes['padding'] == model.layers[0].padding - assert list(hls_model.get_layers())[1].attributes['data_format'] == model.layers[0].data_format - assert list(hls_model.get_layers())[1].attributes["out_width"] == list(model.layers[0].output_shape)[1] - - out_width = math.ceil(float(model.layers[0]._batch_input_shape[2]) / float(model.layers[0].strides[0])) - pad_along_width = max( - (out_width - 1) * model.layers[0].strides[0] - + model.layers[0].kernel_size[0] - - model.layers[0]._batch_input_shape[2], - 0, - ) - pad_left = pad_along_width // 2 - pad_right = pad_along_width - pad_left - - if model.layers[0].padding == 'same': - assert list(hls_model.get_layers())[1].attributes['pad_left'] == pad_left - assert list(hls_model.get_layers())[1].attributes['pad_right'] == pad_right - elif model.layers[0].padding == 'valid': - assert list(hls_model.get_layers())[1].attributes['pad_left'] == 0 - assert list(hls_model.get_layers())[1].attributes['pad_right'] == 0 - - -chans_options = ['channels_last'] -padds_options = ['same', 'valid'] - - -@pytest.mark.parametrize('chans', chans_options) -@pytest.mark.parametrize('padds', padds_options) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_conv2d(chans, padds, backend, io_type): - model = tf.keras.models.Sequential() - input_shape = (28, 28, 3) - model.add( - Conv2D( - filters=32, - kernel_size=(4, 4), - strides=(4, 4), - padding=padds, - input_shape=input_shape, - kernel_initializer='normal', - use_bias=False, - data_format=chans, - ) - ) - model.compile(optimizer='adam', loss='mse') - - X_input = np.random.rand(100, *input_shape) - keras_prediction = model.predict(X_input) - - config = hls4ml.utils.config_from_keras_model(model) - output_dir = str(test_root_path / f'hls4mlprj_keras_api_conv2d_{backend}_{chans}_{padds}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - hls_prediction = hls_model.predict(X_input).reshape(keras_prediction.shape) - - # A high tolerance, simply to verify correct functionality - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=0, atol=5e-2) - - assert len(model.layers) + 1 == len(hls_model.get_layers()) - assert list(hls_model.get_layers())[1].attributes['name'] == model.layers[0]._name - assert list(hls_model.get_layers())[1].attributes['class_name'] == 'Conv2D' - assert list(hls_model.get_layers())[1].attributes['activation'] == str(model.layers[0].activation).split()[1] - assert list(hls_model.get_layers())[1].attributes['filt_width'] == model.layers[0].kernel_size[1] - assert list(hls_model.get_layers())[1].attributes['filt_height'] == model.layers[0].kernel_size[0] - assert list(hls_model.get_layers())[1].attributes['n_filt'] == model.layers[0].filters - assert list(hls_model.get_layers())[1].attributes['stride_width'] == model.layers[0].strides[1] - assert list(hls_model.get_layers())[1].attributes['stride_height'] == model.layers[0].strides[0] - assert list(hls_model.get_layers())[1].attributes['padding'] == model.layers[0].padding - assert list(hls_model.get_layers())[1].attributes['data_format'] == model.layers[0].data_format - - if model.layers[0].data_format == 'channels_first': - assert list(hls_model.get_layers())[1].attributes['n_chan'] == model.layers[0]._batch_input_shape[1] - assert list(hls_model.get_layers())[1].attributes['in_height'] == model.layers[0]._batch_input_shape[2] - assert list(hls_model.get_layers())[1].attributes['in_width'] == model.layers[0]._batch_input_shape[3] - assert list(hls_model.get_layers())[1].attributes['out_height'] == model.layers[0].output_shape[2] - assert list(hls_model.get_layers())[1].attributes['out_width'] == model.layers[0].output_shape[3] - elif model.layers[0].data_format == 'channels_last': - assert list(hls_model.get_layers())[1].attributes['n_chan'] == model.layers[0]._batch_input_shape[3] - assert list(hls_model.get_layers())[1].attributes['in_height'] == model.layers[0]._batch_input_shape[1] - assert list(hls_model.get_layers())[1].attributes['in_width'] == model.layers[0]._batch_input_shape[2] - assert list(hls_model.get_layers())[1].attributes['out_height'] == model.layers[0].output_shape[1] - assert list(hls_model.get_layers())[1].attributes['out_width'] == model.layers[0].output_shape[2] - - if model.layers[0].padding == 'same': - if model.layers[0].data_format == 'channels_first': - out_height = model.layers[0].output_shape[2] - out_width = model.layers[0].output_shape[3] - pad_along_height = max( - (out_height - 1) * model.layers[0].strides[0] - + model.layers[0].kernel_size[0] - - model.layers[0]._batch_input_shape[2], - 0, - ) - pad_along_width = max( - (out_width - 1) * model.layers[0].strides[1] - + model.layers[0].kernel_size[1] - - model.layers[0]._batch_input_shape[3], - 0, - ) - elif model.layers[0].data_format == 'channels_last': - out_height = model.layers[0].output_shape[1] - out_width = model.layers[0].output_shape[2] - pad_along_height = max( - (out_height - 1) * model.layers[0].strides[0] - + model.layers[0].kernel_size[0] - - model.layers[0]._batch_input_shape[1], - 0, - ) - pad_along_width = max( - (out_width - 1) * model.layers[0].strides[1] - + model.layers[0].kernel_size[1] - - model.layers[0]._batch_input_shape[2], - 0, - ) - pad_top = pad_along_height // 2 - pad_bottom = pad_along_height - pad_top - pad_left = pad_along_width // 2 - pad_right = pad_along_width - pad_left - assert list(hls_model.get_layers())[1].attributes['pad_top'] == pad_top - assert list(hls_model.get_layers())[1].attributes['pad_bottom'] == pad_bottom - assert list(hls_model.get_layers())[1].attributes['pad_left'] == pad_left - assert list(hls_model.get_layers())[1].attributes['pad_right'] == pad_right - elif model.layers[0].padding == 'valid': - assert list(hls_model.get_layers())[1].attributes['pad_top'] == 0 - assert list(hls_model.get_layers())[1].attributes['pad_bottom'] == 0 - assert list(hls_model.get_layers())[1].attributes['pad_left'] == 0 - assert list(hls_model.get_layers())[1].attributes['pad_right'] == 0 - - -# Currently only Vivado and Vitis is supported for io_stream. -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) -@pytest.mark.parametrize('io_type', ['io_stream']) -def test_depthwise2d(backend, io_type): - ''' - Test proper handling of DepthwiseConv2D - ''' - X = np.random.rand(10, 32, 32, 3) - X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6> - model = tf.keras.models.Sequential() - model.add(DepthwiseConv2D(kernel_size=(3, 3), input_shape=(32, 32, 3))) - model.compile() - - config = hls4ml.utils.config_from_keras_model(model, granularity='name', default_precision='fixed<32,12>') - output_dir = str(test_root_path / f'hls4mlprj_keras_api_depthwiseconv2d_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - - y_qkeras = model.predict(X) - y_hls4ml = hls_model.predict(X) - - np.testing.assert_allclose(y_qkeras, y_hls4ml.reshape(y_qkeras.shape), rtol=1e-2, atol=0.01) - - -# Currently only Vivado and Vitis is supported for io_stream. -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) -@pytest.mark.parametrize('io_type', ['io_stream']) -def test_depthwise1d(backend, io_type): - ''' - Test proper handling of DepthwiseConv1D. - ''' - X = np.random.rand(10, 32, 3) - X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6> - model = tf.keras.models.Sequential() - model.add(DepthwiseConv1D(kernel_size=3, input_shape=(32, 3))) - model.compile() - - config = hls4ml.utils.config_from_keras_model(model, granularity='name') - output_dir = str(test_root_path / f'hls4mlprj_keras_api_depthwiseconv1d_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - - y_qkeras = model.predict(X) - y_hls4ml = hls_model.predict(X) - - np.testing.assert_allclose(y_qkeras, y_hls4ml.reshape(y_qkeras.shape), rtol=1e-2, atol=0.01) - - -pooling_layers = [MaxPooling1D, MaxPooling2D, AveragePooling1D, AveragePooling2D] - - -@pytest.mark.parametrize('pooling', pooling_layers) -@pytest.mark.parametrize('padds', padds_options) -@pytest.mark.parametrize('chans', chans_options) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -def test_pooling(pooling, padds, chans, backend): - assert '1D' in pooling.__name__ or '2D' in pooling.__name__ - - input_shape = (18, 15, 3) if '2D' in pooling.__name__ else (121, 3) - X_input = np.random.rand(100, *input_shape) - - keras_model = tf.keras.models.Sequential() - keras_model.add(pooling(padding=padds, input_shape=input_shape)) - keras_model.compile() - - hls_cfg = hls4ml.utils.config_from_keras_model(keras_model) - output_dir = str( - test_root_path / f'hls4mlprj_keras_api_pooling_{pooling.__name__}_channels_{chans}_padds_{padds}_backend_{backend}' - ) - hls_model = hls4ml.converters.convert_from_keras_model( - keras_model, hls_config=hls_cfg, output_dir=output_dir, backend=backend - ) - hls_model.compile() - - # Verify accuracy - keras_prediction = keras_model.predict(X_input) - hls_prediction = hls_model.predict(X_input).reshape(keras_prediction.shape) - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=0, atol=3e-2) - - # Verify correct parsing of layer - hls_pool = list(hls_model.get_layers())[-1] - ker_pool = keras_model.layers[-1] - if '2D' in pooling.__name__: - assert hls_pool.attributes['name'] == ker_pool._name - assert hls_pool.attributes['class_name'][-2] == str(2) - assert hls_pool.attributes['stride_height'] == ker_pool.strides[0] - assert hls_pool.attributes['stride_width'] == ker_pool.strides[1] - assert hls_pool.attributes['pool_height'] == ker_pool.pool_size[1] - assert hls_pool.attributes['pool_width'] == ker_pool.pool_size[0] - assert hls_pool.attributes['padding'] == ker_pool.padding - - if hls_pool.attributes['data_format'] == 'channels_last': - assert hls_pool.attributes['in_height'] == ker_pool.input_shape[1] - assert hls_pool.attributes['in_width'] == ker_pool.input_shape[2] - assert hls_pool.attributes['n_filt'] == ker_pool.input_shape[3] - elif hls_pool.attributes['data_format'] == 'channels_first': - assert hls_pool.attributes['in_height'] == ker_pool.input_shape[2] - assert hls_pool.attributes['in_width'] == ker_pool.input_shape[3] - assert hls_pool.attributes['n_filt'] == ker_pool.input_shape[1] - - if hls_pool.attributes['padding'] == 'same': - # Height - in_height = ker_pool.input_shape[1] - if ker_pool.data_format == 'channels_first': - in_height = ker_pool.input_shape[2] - out_height = int(math.ceil(float(in_height) / float(ker_pool.strides[0]))) - assert out_height == hls_pool.attributes['out_height'] - if in_height % ker_pool.strides[0] == 0: - pad_along_height = max(ker_pool.pool_size[1] - ker_pool.strides[0], 0) - else: - pad_along_height = max(ker_pool.pool_size[1] - (in_height % ker_pool.strides[0]), 0) - pad_top = pad_along_height // 2 - pad_bottom = pad_along_height - pad_top - assert pad_bottom == hls_pool.attributes['pad_bottom'] - assert pad_top == hls_pool.attributes['pad_top'] - - # Width - in_width = ker_pool.input_shape[2] - if ker_pool.data_format == 'channels_first': - in_height = keras_model.layers[1].input_shape[-1] - out_width = int(math.ceil(float(in_width) / float(ker_pool.strides[1]))) - assert out_width == hls_pool.attributes['out_width'] - if in_width % ker_pool.strides[1] == 0: - pad_along_width = max(ker_pool.pool_size[0] - ker_pool.strides[1], 0) - else: - pad_along_width = max(ker_pool.pool_size[0] - (in_width % ker_pool.strides[1]), 0) - pad_left = pad_along_width // 2 - pad_right = pad_along_width - pad_left - assert pad_left == hls_pool.attributes['pad_left'] - assert pad_right == hls_pool.attributes['pad_right'] - - elif hls_pool.attributes['padding'] == 'valid': - if hls_pool.attributes['data_format'] == 'channels_first': - in_height = ker_pool.input_shape[2] - in_width = ker_pool.input_shape[3] - elif hls_pool.attributes['data_format'] == 'channels_last': - in_height = ker_pool.input_shape[1] - in_width = ker_pool.input_shape[2] - - out_width = int(math.ceil(float(in_width - ker_pool.pool_size[0] + 1) / float(ker_pool.strides[1]))) - out_height = int(math.ceil(float(in_height - ker_pool.pool_size[1] + 1) / float(ker_pool.strides[0]))) - - assert hls_pool.attributes['out_height'] == out_height - assert hls_pool.attributes['out_width'] == out_width - assert hls_pool.attributes['pad_top'] == 0 - assert hls_pool.attributes['pad_bottom'] == 0 - assert hls_pool.attributes['pad_left'] == 0 - assert hls_pool.attributes['pad_right'] == 0 - - elif '1D' in pooling.__name__: - assert hls_pool.attributes['name'] == ker_pool._name - assert hls_pool.attributes['class_name'][-2] == str(1) - assert hls_pool.attributes['n_in'] == ker_pool.input_shape[1] - assert hls_pool.attributes['n_filt'] == ker_pool.input_shape[2] - assert hls_pool.attributes['pool_width'] == ker_pool.pool_size[0] - assert hls_pool.attributes['stride_width'] == ker_pool.strides[0] - assert hls_pool.attributes['padding'] == ker_pool.padding - - out_same = math.ceil(float(ker_pool.input_shape[1]) / float(ker_pool.strides[0])) - out_valid = math.ceil(float(ker_pool.input_shape[1] - ker_pool.pool_size[0] + 1) / ker_pool.strides[0]) - - if hls_pool.attributes['padding'] == 'same': - assert hls_pool.attributes['n_out'] == out_same - if ker_pool.input_shape[1] % ker_pool.strides[0] == 0: - pad_along_width = max(ker_pool.pool_size[0] - ker_pool.strides[0], 0) - else: - pad_along_width = max(ker_pool.pool_size[0] - (ker_pool.input_shape[1] % ker_pool.strides[0]), 0) - assert hls_pool.attributes['pad_left'] == pad_along_width // 2 - assert hls_pool.attributes['pad_right'] == pad_along_width - pad_along_width // 2 - - elif hls_pool.attributes['padding'] == 'valid': - assert hls_pool.attributes['n_out'] == out_valid - assert hls_pool.attributes['pad_left'] == 0 - assert hls_pool.attributes['pad_right'] == 0 diff --git a/hls4ml/test/pytest/test_keras_h5_loader.py b/hls4ml/test/pytest/test_keras_h5_loader.py deleted file mode 100644 index b53bb3a..0000000 --- a/hls4ml/test/pytest/test_keras_h5_loader.py +++ /dev/null @@ -1,39 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -import tensorflow as tf - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -def test_keras_h5_loader(backend): - input_shape = (10,) - model = tf.keras.models.Sequential( - [ - tf.keras.layers.InputLayer(input_shape=input_shape), - tf.keras.layers.Activation(activation='relu'), - ] - ) - - hls_config = hls4ml.utils.config_from_keras_model(model, granularity='name') - - config = { - 'OutputDir': str(test_root_path / f'hls4mlprj_KerasH5_loader_test_{backend}'), - 'ProjectName': f'KerasH5_loader_test_{backend}', - 'Backend': backend, - 'ClockPeriod': 25.0, - 'IOType': 'io_parallel', - 'HLSConfig': hls_config, - 'KerasH5': str(test_root_path / f'hls4mlprj_KerasH5_loader_test_{backend}/model.h5'), - } - - model.save(config['KerasH5']) - hls_model = hls4ml.converters.keras_to_hls(config) - hls_model.compile() - data = np.random.rand(1000, 10).astype(np.float32) - pred = hls_model.predict(data) - np.testing.assert_allclose(pred, model.predict(data), rtol=5e-3, atol=5e-3) diff --git a/hls4ml/test/pytest/test_keras_nested_model.py b/hls4ml/test/pytest/test_keras_nested_model.py deleted file mode 100755 index 8c4670a..0000000 --- a/hls4ml/test/pytest/test_keras_nested_model.py +++ /dev/null @@ -1,173 +0,0 @@ -""" Test that nested models in Keras is properly parsed and expanded by the optimizers. -""" - -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import Dense, Input -from tensorflow.keras.models import Model, Sequential - -import hls4ml - -test_root_path = Path(__file__).parent - - -def make_nested_model(input_shape): - """ - This model will have the following architecture: - Functional (fun_model) - Dense (fun_first_dense) - Sequential (seq_sub) - Dense - Dense - Dense (fun_middle_dense) - Functional (fun_sub) - Dense - Dense - Dense (fun_last_dense) - """ - seq_sub = Sequential(name='seq_sub') - seq_sub.add(Dense(5, activation='linear', input_shape=(5,), name='seq_sub_dense_1')) - seq_sub.add(Dense(3, activation='linear', name='seq_sub_dense_2')) - - fun_input = Input(shape=(8,), name='fun_input') - fun_x = Dense(7, activation='linear', name='fun_sub_dense_1')(fun_input) - fun_x = Dense(6, activation='linear', name='fun_sub_dense_2')(fun_x) - fun_sub = Model(inputs=fun_input, outputs=fun_x, name='fun_sub') - - input = Input(shape=input_shape, name='model_input') - x = Dense(5, activation='linear', name='fun_first_dense')(input) - x = seq_sub(x) - x = Dense(8, activation='linear', name='fun_middle_dense')(x) - x = fun_sub(x) - x = Dense(4, activation='linear', name='fun_last_dense')(x) - fun_model = Model(inputs=input, outputs=x, name='fun_model') - - return fun_model - - -def make_sub_nested_model(input_shape): - """ - The following abomination will create this hierarchy: - Sequential - Dense (first_dense) - Functional (fun_model) - Dense (fun_first_dense) - Sequential (fun_model_seq_sub) - Dense - Dense - Dense (fun_middle_dense) - Functional (fun_model_fun_sub) - Dense - Dense - Dense (fun_last_dense) - Dense (middle_dense) - Sequential (seq_model) - Dense - Functional (seq_model_fun_sub) - Dense - Dense - Dense - Sequential (seq_model_seq_sub) - Dense - Dense - Dense - Dense (last_dense) - """ - fun_model_seq_sub = Sequential(name='fun_model_seq_sub') - fun_model_seq_sub.add(Dense(5, activation='linear', input_shape=(5,), name='fun_seq_sub_dense_1')) - fun_model_seq_sub.add(Dense(3, activation='linear', name='fun_seq_sub_dense_2')) - - fun_fun_input = Input(shape=(8,), name='fun_fun_input') - fun_fun_x = Dense(7, activation='linear', name='fun_fun_sub_dense_1')(fun_fun_input) - fun_fun_x = Dense(6, activation='linear', name='fun_fun_sub_dense_2')(fun_fun_x) - fun_model_fun_sub = Model(inputs=fun_fun_input, outputs=fun_fun_x, name='fun_model_fun_sub') - - fun_input = Input(shape=(10,), name='fun_input') - fun_x = Dense(5, activation='linear', name='fun_first_dense')(fun_input) - fun_x = fun_model_seq_sub(fun_x) - fun_x = Dense(8, activation='linear', name='fun_middle_dense')(fun_x) - fun_x = fun_model_fun_sub(fun_x) - fun_x = Dense(4, activation='linear', name='fun_last_dense')(fun_x) - fun_model = Model(inputs=fun_input, outputs=fun_x, name='fun_model') - - seq_fun_input = Input(shape=(2,), name='seq_fun_input') - seq_fun_x = Dense(9, activation='linear', name='seq_fun_sub_dense_1')(seq_fun_input) - seq_fun_x = Dense(3, activation='linear', name='seq_fun_sub_dense_2')(seq_fun_x) - seq_model_fun_sub = Model(inputs=seq_fun_input, outputs=seq_fun_x, name='seq_model_fun_sub') - - seq_model_seq_sub = Sequential(name='seq_model_seq_sub') - seq_model_seq_sub.add(Dense(5, activation='linear', input_shape=(2,), name='seq_seq_sub_dense_1')) - seq_model_seq_sub.add(Dense(7, activation='linear', name='seq_seq_sub_dense_2')) - - seq_model = Sequential(name='seq_model') - seq_model.add(Dense(2, activation='linear', input_shape=(6,), name='seq_first_dense')) - seq_model.add(seq_model_fun_sub) - seq_model.add(Dense(2, activation='linear', name='seq_middle_dense')) - seq_model.add(seq_model_seq_sub) - seq_model.add(Dense(2, activation='linear', name='seq_last_dense')) - - model = Sequential() - model.add(Dense(10, activation='linear', input_shape=input_shape, name='first_dense')) - model.add(fun_model) - model.add(Dense(6, activation='linear', name='middle_dense')) - model.add(seq_model) - model.add(Dense(4, activation='linear', name='last_dense')) - - return model - - -def randX(batch_size, N): - return np.random.rand(batch_size, N) - - -@pytest.fixture(scope='module') -def randX_20_15(): - return randX(20, 15) - - -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_nested_model(randX_20_15, backend, io_type): - n_in = 15 - input_shape = (n_in,) - keras_model = make_nested_model(input_shape) - keras_model.compile(optimizer='adam', loss='mae') - - config = hls4ml.utils.config_from_keras_model(keras_model, default_precision='fixed<24,12>') - prj_name = f'hls4mlprj_nested_model_{backend}_{io_type}' - output_dir = str(test_root_path / prj_name) - hls_model = hls4ml.converters.convert_from_keras_model( - keras_model, hls_config=config, output_dir=output_dir, io_type=io_type, backend=backend - ) - hls_model.compile() - - X = randX_20_15 - y_keras = keras_model.predict(X) - y_hls4ml = hls_model.predict(X) - - np.testing.assert_allclose(y_keras.ravel(), y_hls4ml.ravel(), rtol=1e-2, atol=0.02) - - -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_sub_nested_model(randX_20_15, backend, io_type): - n_in = 15 - input_shape = (n_in,) - keras_model = make_sub_nested_model(input_shape) - keras_model.compile(optimizer='adam', loss='mae') - - config = hls4ml.utils.config_from_keras_model(keras_model, default_precision='fixed<24,12>') - prj_name = f'hls4mlprj_sub_nested_model_{backend}_{io_type}' - output_dir = str(test_root_path / prj_name) - hls_model = hls4ml.converters.convert_from_keras_model( - keras_model, hls_config=config, output_dir=output_dir, io_type=io_type, backend=backend - ) - hls_model.compile() - - X = randX_20_15 - y_keras = keras_model.predict(X) - y_hls4ml = hls_model.predict(X) - - np.testing.assert_allclose(y_keras.ravel(), y_hls4ml.ravel(), rtol=1e-2, atol=0.02) diff --git a/hls4ml/test/pytest/test_merge.py b/hls4ml/test/pytest/test_merge.py deleted file mode 100644 index cd6c0eb..0000000 --- a/hls4ml/test/pytest/test_merge.py +++ /dev/null @@ -1,155 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -import tensorflow as tf -from tensorflow.keras.layers import Add, Average, Concatenate, Dot, Input, Maximum, Minimum, Multiply, Subtract - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.mark.parametrize('merge_layer', [Add, Average, Maximum, Minimum, Multiply, Subtract]) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -def test_merge(merge_layer, io_type, backend): - input_shape = (10, 10, 3) - - in1 = Input(shape=input_shape) - in2 = Input(shape=input_shape) - out = merge_layer()([in1, in2]) - - model = tf.keras.models.Model(inputs=[in1, in2], outputs=out) - model.compile() - - config = hls4ml.utils.config_from_keras_model(model, default_precision='ap_fixed<32,16>') - output_dir = str(test_root_path / f'hls4mlprj_merge_{merge_layer.__name__.lower()}_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, io_type=io_type, backend=backend - ) - hls_model.compile() - - X_input1 = np.random.rand(100, *input_shape) - X_input2 = np.random.rand(100, *input_shape) - - keras_prediction = model.predict([X_input1, X_input2]) - hls_prediction = hls_model.predict([X_input1, X_input2]).reshape(keras_prediction.shape) - - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=0, atol=0.001) - - -@pytest.mark.parametrize('axes', [1]) -@pytest.mark.parametrize('io_type', ['io_parallel']) # No io_stream implementation yet -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -def test_dot(axes, io_type, backend): - # Only 1D implemented - input_shape = (10,) - - in1 = Input(shape=input_shape) - in2 = Input(shape=input_shape) - out = Dot(axes=axes)([in1, in2]) - - model = tf.keras.models.Model(inputs=[in1, in2], outputs=out) - model.compile() - - config = hls4ml.utils.config_from_keras_model(model, default_precision='ap_fixed<32,16>') - output_dir = str(test_root_path / f'hls4mlprj_dot_axes_{str(axes)}_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, io_type=io_type, backend=backend - ) - hls_model.compile() - - X_input1 = np.random.rand(100, *input_shape) - X_input2 = np.random.rand(100, *input_shape) - - keras_prediction = model.predict([X_input1, X_input2]) - hls_prediction = hls_model.predict([X_input1, X_input2]).reshape(keras_prediction.shape) - - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=0, atol=0.001) - - -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -def test_concatenate1d(io_type, backend): - input_shape = (10,) - - in1 = Input(shape=input_shape) - in2 = Input(shape=input_shape) - out = Concatenate()([in1, in2]) - - model = tf.keras.models.Model(inputs=[in1, in2], outputs=out) - model.compile() - - config = hls4ml.utils.config_from_keras_model(model, default_precision='ap_fixed<32,16>') - output_dir = str(test_root_path / f'hls4mlprj_concatenate1d_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, io_type=io_type, backend=backend - ) - hls_model.compile() - - X_input1 = np.random.rand(100, *input_shape) - X_input2 = np.random.rand(100, *input_shape) - - keras_prediction = model.predict([X_input1, X_input2]) - hls_prediction = hls_model.predict([X_input1, X_input2]).reshape(keras_prediction.shape) - - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=0, atol=0.001) - - -@pytest.mark.parametrize('axis', [1, 2]) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -def test_concatenate2d(axis, io_type, backend): - input_shape = (10, 3) - - in1 = Input(shape=input_shape) - in2 = Input(shape=input_shape) - out = Concatenate(axis=axis)([in1, in2]) - - model = tf.keras.models.Model(inputs=[in1, in2], outputs=out) - model.compile() - - config = hls4ml.utils.config_from_keras_model(model, default_precision='ap_fixed<32,16>') - output_dir = str(test_root_path / f'hls4mlprj_concatenate2d_axis_{str(axis)}_{io_type}_{backend}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, io_type=io_type, backend=backend - ) - hls_model.compile() - - X_input1 = np.random.rand(100, *input_shape) - X_input2 = np.random.rand(100, *input_shape) - - keras_prediction = model.predict([X_input1, X_input2]) - hls_prediction = hls_model.predict([X_input1, X_input2]).reshape(keras_prediction.shape) - - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=0, atol=0.001) - - -@pytest.mark.parametrize('axis', [1, 2, 3]) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -def test_concatenate3d(axis, io_type, backend): - input_shape = (10, 10, 3) - - in1 = Input(shape=input_shape) - in2 = Input(shape=input_shape) - out = Concatenate(axis=axis)([in1, in2]) - - model = tf.keras.models.Model(inputs=[in1, in2], outputs=out) - model.compile() - - config = hls4ml.utils.config_from_keras_model(model, default_precision='ap_fixed<32,16>') - output_dir = str(test_root_path / f'hls4mlprj_concatenate3d_axis_{str(axis)}_{io_type}_{backend}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, io_type=io_type, backend=backend - ) - hls_model.compile() - - X_input1 = np.random.rand(100, *input_shape) - X_input2 = np.random.rand(100, *input_shape) - - keras_prediction = model.predict([X_input1, X_input2]) - hls_prediction = hls_model.predict([X_input1, X_input2]).reshape(keras_prediction.shape) - - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=0, atol=0.001) diff --git a/hls4ml/test/pytest/test_merge_pytorch.py b/hls4ml/test/pytest/test_merge_pytorch.py deleted file mode 100644 index 17aa4d0..0000000 --- a/hls4ml/test/pytest/test_merge_pytorch.py +++ /dev/null @@ -1,72 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -import torch -import torch.nn as nn - -import hls4ml - -test_root_path = Path(__file__).parent - - -class MergeModule(nn.Module): - def __init__(self, merge_op): - super().__init__() - self.op = getattr(torch, merge_op) - - def forward(self, x, y): - return self.op(x, y) - - -class ConcatModule(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x, y): - # In this test the shape will be (batch, 3, 10, 10), but since we test with channels_last data format, this - # will be equivalent to the Keras default of concatenation along the last axis (axis=-1) - return torch.cat([x, y], dim=1) - - -@pytest.mark.parametrize('merge_op', ['cat', 'add', 'mul', 'sub', 'minimum', 'maximum']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -def test_merge(merge_op, io_type, backend): - input_shape = (3, 10, 10) - - if merge_op == 'cat': # Meow! - model = ConcatModule() - else: - model = MergeModule(merge_op) - model.eval() - - batch_input_shape = (None,) + input_shape - config = hls4ml.utils.config_from_pytorch_model( - model, default_precision='ap_fixed<32,16>', inputs_channel_last=True, transpose_outputs=False - ) - output_dir = str(test_root_path / f'hls4mlprj_merge_pytorch_{merge_op}_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_pytorch_model( - model, - [batch_input_shape, batch_input_shape], - hls_config=config, - output_dir=output_dir, - io_type=io_type, - backend=backend, - ) - hls_model.compile() - - X_input1 = np.random.rand(100, *input_shape) - X_input2 = np.random.rand(100, *input_shape) - - X_input1_cl = np.ascontiguousarray(np.transpose(X_input1, axes=[0, 2, 3, 1])) - X_input2_cl = np.ascontiguousarray(np.transpose(X_input2, axes=[0, 2, 3, 1])) - - pytorch_prediction = model(torch.Tensor(X_input1), torch.Tensor(X_input2)).detach().numpy() - hls_prediction = hls_model.predict([X_input1_cl, X_input2_cl]) - - output_shape = pytorch_prediction.shape - output_shape_cl = [output_shape[0], output_shape[2], output_shape[3], output_shape[1]] - hls_prediction = np.transpose(hls_prediction.reshape(output_shape_cl), axes=[0, 3, 1, 2]) - - np.testing.assert_allclose(hls_prediction, pytorch_prediction, rtol=0, atol=0.001) diff --git a/hls4ml/test/pytest/test_multi_dense.py b/hls4ml/test/pytest/test_multi_dense.py deleted file mode 100644 index 558ab2a..0000000 --- a/hls4ml/test/pytest/test_multi_dense.py +++ /dev/null @@ -1,65 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -import tensorflow as tf -from tensorflow.keras.layers import Dense - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.mark.parametrize( - 'backend, io_type', - [ - ('Quartus', 'io_parallel'), - ('Vivado', 'io_parallel'), - ('Vitis', 'io_parallel'), - ('Vivado', 'io_stream'), - ('Vivado', 'io_stream'), - ('Vitis', 'io_stream'), - ], -) -def test_multi_dense(backend, io_type): - model = tf.keras.models.Sequential() - model.add( - Dense( - 4, - input_shape=( - 8, - 8, - ), - name='Dense', - use_bias=True, - kernel_initializer=tf.keras.initializers.RandomUniform(minval=1, maxval=10), - bias_initializer='zeros', - kernel_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - kernel_constraint=None, - bias_constraint=None, - activation='relu', - ) - ) - model.compile(optimizer='adam', loss='mse') - - X_input = np.random.rand(100, 8, 8) - - keras_prediction = model.predict(X_input) - - default_precision = 'ap_fixed<32, 16>' if backend in ['Vivado', 'Vitis'] else 'ac_fixed<32, 16, true>' - config = hls4ml.utils.config_from_keras_model(model, default_precision=default_precision) - output_dir = str(test_root_path / f'hls4mlprj_multi_dense_{backend}_{io_type}') - - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - - hls_model.compile() - - hls_prediction = hls_model.predict(X_input).reshape(keras_prediction.shape) - - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=1e-2, atol=0.01) - - assert list(hls_model.get_layers())[1].class_name == 'PointwiseConv1D' diff --git a/hls4ml/test/pytest/test_pointwiseconv.py b/hls4ml/test/pytest/test_pointwiseconv.py deleted file mode 100644 index cbe2036..0000000 --- a/hls4ml/test/pytest/test_pointwiseconv.py +++ /dev/null @@ -1,156 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -import tensorflow as tf -from tensorflow.keras.layers import Conv1D, Conv2D - -import hls4ml - -test_root_path = Path(__file__).parent - -padds_options = ['same', 'valid'] -chans_options = ['channels_last'] -io_type_options = ['io_parallel', 'io_stream'] -strides1d_options = [(1,), (2,)] -strides2d_options = [(1, 1), (2, 2)] -strategy_options = ['Latency', 'Resource'] - - -@pytest.mark.parametrize('chans', chans_options) -@pytest.mark.parametrize('padds', padds_options) -@pytest.mark.parametrize('strides', strides1d_options) -@pytest.mark.parametrize( - 'backend, io_type, strategy, conv_impl', - [ - ('Quartus', 'io_parallel', 'resource', 'LineBuffer'), - ('Vivado', 'io_parallel', 'resource', 'LineBuffer'), - ('Vitis', 'io_parallel', 'resource', 'LineBuffer'), - ('Vivado', 'io_parallel', 'latency', 'LineBuffer'), - ('Vitis', 'io_parallel', 'latency', 'LineBuffer'), - ('Vivado', 'io_parallel', 'latency', 'Pointwise'), - ('Vitis', 'io_parallel', 'latency', 'Pointwise'), - ('Vivado', 'io_stream', 'latency', 'LineBuffer'), - ('Vivado', 'io_stream', 'resource', 'LineBuffer'), - ('Vitis', 'io_stream', 'latency', 'LineBuffer'), - ('Vitis', 'io_stream', 'resource', 'LineBuffer'), - ], -) -def test_pointwiseconv1d(chans, padds, strides, backend, io_type, strategy, conv_impl): - model = tf.keras.models.Sequential() - input_shape = (28, 3) - model.add( - Conv1D( - filters=32, - kernel_size=(1,), - strides=strides, - padding=padds, - input_shape=input_shape, - kernel_initializer='normal', - use_bias=False, - data_format=chans, - name='pointwise1d', - ) - ) - model.compile(optimizer='adam', loss='mse') - - X_input = np.random.rand(100, *input_shape) - keras_prediction = model.predict(X_input) - - default_precision = 'ac_fixed<32,16,true>' if backend == 'Quartus' else 'ap_fixed<32,16>' - config = hls4ml.utils.config_from_keras_model(model, default_precision=default_precision, granularity='name') - config['Model']['Strategy'] = strategy - config['LayerName']['pointwise1d']['ConvImplementation'] = conv_impl - - output_dir = str( - test_root_path - / f'hls4mlprj_pointwise1d_{chans}_strides_{strides[0]}_{padds}_padding_{backend}_{io_type}_{strategy}_{conv_impl}' - ) - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, io_type=io_type, backend=backend - ) - hls_model.compile() - hls_prediction = hls_model.predict(X_input).reshape(keras_prediction.shape) - - assert 'Pointwise' in list(hls_model.graph.values())[1].class_name - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=0, atol=0.001) - - -@pytest.mark.parametrize('chans', chans_options) -@pytest.mark.parametrize('padds', padds_options) -@pytest.mark.parametrize('strides', strides2d_options) -@pytest.mark.parametrize( - 'backend, io_type, strategy', - [ - ('Quartus', 'io_parallel', 'resource'), - ('Vivado', 'io_parallel', 'resource'), - ('Vivado', 'io_parallel', 'latency'), - ('Vivado', 'io_stream', 'latency'), - ('Vivado', 'io_stream', 'resource'), - ], -) -def test_pointwiseconv2d(chans, padds, strides, backend, io_type, strategy): - model = tf.keras.models.Sequential() - input_shape = (28, 28, 3) - model.add( - Conv2D( - filters=32, - kernel_size=(1, 1), - strides=strides, - padding=padds, - input_shape=input_shape, - kernel_initializer='normal', - use_bias=False, - data_format=chans, - name='pointwise2d', - ) - ) - - model.compile(optimizer='adam', loss='mse') - X_input = np.random.rand(100, *input_shape) - keras_prediction = model.predict(X_input) - - default_precision = 'ac_fixed<32, 9, true>' if backend == 'Quartus' else 'ap_fixed<32, 9>' - - config = hls4ml.utils.config_from_keras_model(model, default_precision=default_precision) - config['Model']['Strategy'] = strategy - stride_cfg = str(strides).replace(', ', '_').replace('(', '').replace(')', '') - output_dir = str( - test_root_path / f'hls4mlprj_pointwise2d_{chans}_strides_{stride_cfg}_{padds}_padding_{backend}_{io_type}_{strategy}' - ) - - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, io_type=io_type, backend=backend - ) - hls_model.compile() - hls_prediction = hls_model.predict(X_input).reshape(keras_prediction.shape) - - assert 'Pointwise' in list(hls_model.graph.values())[1].class_name - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=0, atol=0.001) - - -@pytest.mark.parametrize('strategy', ['Latency', 'Resource']) -def test_pointwise_config(strategy): - model = tf.keras.models.Sequential() - input_shape = (8, 8, 3) - model.add( - Conv2D( - filters=8, - kernel_size=(1, 1), - input_shape=input_shape, - kernel_initializer='normal', - use_bias=False, - name='conv2d_1x1', - ) - ) - - model.compile(optimizer='adam', loss='mse') - - config = hls4ml.utils.config_from_keras_model(model, granularity='name') - config['Model']['Strategy'] = strategy - config['LayerName']['conv2d_1x1']['Strategy'] = strategy # Will fail if the strategy is not lowercase - output_dir = str(test_root_path / f'hls4mlprj_pointwise2d_config_{strategy}') - - hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, output_dir=output_dir) - # Model will fail to compile if strategy was set incorrectly - hls_model.compile() diff --git a/hls4ml/test/pytest/test_pooling.py b/hls4ml/test/pytest/test_pooling.py deleted file mode 100644 index 1f95869..0000000 --- a/hls4ml/test/pytest/test_pooling.py +++ /dev/null @@ -1,124 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import AveragePooling1D, AveragePooling2D, MaxPooling1D, MaxPooling2D -from tensorflow.keras.models import Sequential - -import hls4ml - -test_root_path = Path(__file__).parent - -in_shape = 124 -in_filt = 5 -atol = 5e-3 - - -@pytest.fixture(scope='module') -def data_1d(): - return np.random.rand(100, in_shape, in_filt) - - -@pytest.fixture(scope='module') -def keras_model_1d(request): - model_type = request.param['model_type'] - pads = request.param['padding'] - model = Sequential() - if model_type == 'avg': - model.add(AveragePooling1D(pool_size=3, input_shape=(in_shape, in_filt), padding=pads)) - elif model_type == 'max': - model.add(MaxPooling1D(pool_size=3, input_shape=(in_shape, in_filt), padding=pads)) - model.compile() - return model, model_type, pads - - -@pytest.mark.parametrize('backend', ['Quartus', 'Vitis', 'Vivado']) -@pytest.mark.parametrize( - 'keras_model_1d', - [ - {'model_type': 'max', 'padding': 'valid'}, - {'model_type': 'max', 'padding': 'same'}, - {'model_type': 'avg', 'padding': 'valid'}, - {'model_type': 'avg', 'padding': 'same'}, - ], - ids=[ - 'model_type-max-padding-valid', - 'model_type-max-padding-same', - 'model_type-avg-padding-valid', - 'model_type-avg-padding-same', - ], - indirect=True, -) -@pytest.mark.parametrize('io_type', ['io_parallel']) -def test_pool1d(backend, keras_model_1d, data_1d, io_type): - model, model_type, padding = keras_model_1d - - config = hls4ml.utils.config_from_keras_model(model, default_precision='ap_fixed<32,9>', granularity='name') - - hls_model = hls4ml.converters.convert_from_keras_model( - model, - hls_config=config, - io_type=io_type, - output_dir=str(test_root_path / f'hls4mlprj_globalplool1d_{backend}_{io_type}_{model_type}_padding_{padding}'), - backend=backend, - ) - hls_model.compile() - - y_keras = model.predict(data_1d) - y_hls = hls_model.predict(data_1d).reshape(y_keras.shape) - np.testing.assert_allclose(y_keras, y_hls, rtol=0, atol=atol, verbose=True) - - -@pytest.fixture(scope='module') -def data_2d(): - return np.random.rand(100, in_shape, in_shape, in_filt) - - -@pytest.fixture(scope='module') -def keras_model_2d(request): - model_type = request.param['model_type'] - pads = request.param['padding'] - model = Sequential() - if model_type == 'avg': - model.add(AveragePooling2D(input_shape=(in_shape, in_shape, in_filt), padding=pads)) - elif model_type == 'max': - model.add(MaxPooling2D(input_shape=(in_shape, in_shape, in_filt), padding=pads)) - model.compile() - return model, model_type, pads - - -@pytest.mark.parametrize('backend', ['Quartus', 'Vitis', 'Vivado']) -@pytest.mark.parametrize( - 'keras_model_2d', - [ - {'model_type': 'max', 'padding': 'valid'}, - {'model_type': 'max', 'padding': 'same'}, - {'model_type': 'avg', 'padding': 'valid'}, - {'model_type': 'avg', 'padding': 'same'}, - ], - ids=[ - 'model_type-max-padding-valid', - 'model_type-max-padding-same', - 'model_type-avg-padding-valid', - 'model_type-avg-padding-same', - ], - indirect=True, -) -@pytest.mark.parametrize('io_type', ['io_parallel']) -def test_pool2d(backend, keras_model_2d, data_2d, io_type): - model, model_type, padding = keras_model_2d - - config = hls4ml.utils.config_from_keras_model(model, default_precision='ap_fixed<32,9>', granularity='name') - - hls_model = hls4ml.converters.convert_from_keras_model( - model, - hls_config=config, - io_type=io_type, - output_dir=str(test_root_path / f'hls4mlprj_globalplool2d_{backend}_{io_type}_{model_type}_padding_{padding}'), - backend=backend, - ) - hls_model.compile() - - y_keras = model.predict(data_2d) - y_hls = hls_model.predict(data_2d).reshape(y_keras.shape) - np.testing.assert_allclose(y_keras, y_hls, rtol=0, atol=atol, verbose=True) diff --git a/hls4ml/test/pytest/test_precision_parsing.py b/hls4ml/test/pytest/test_precision_parsing.py deleted file mode 100644 index 5569a3a..0000000 --- a/hls4ml/test/pytest/test_precision_parsing.py +++ /dev/null @@ -1,29 +0,0 @@ -import pytest - -import hls4ml - - -@pytest.mark.parametrize( - 'prec_pair', - [ - ('ap_fixed<3, 2>', True), - ('ap_ufixed<3, 2>', False), - ('ac_fixed<3, 2, true>', True), - ('ac_fixed<3, 2, false>', False), - ('ac_fixed<3, 2, 1>', True), - ('ac_fixed<3, 2, 0>', False), - ('ap_int<3, 2>', True), - ('ap_uint<3>', False), - ('ac_int<3, TRue>', True), - ('ac_int<3, FALse>', False), - ('ac_int<3, 1>', True), - ('ac_int<3, 0>', False), - ], -) -def test_sign_parsing(prec_pair): - '''Test that convert_precions_string determines the signedness correctly''' - strprec = prec_pair[0] - signed = prec_pair[1] - - evalprec = hls4ml.backends.fpga.fpga_backend.FPGABackend.convert_precision_string(strprec) - assert evalprec.signed == signed diff --git a/hls4ml/test/pytest/test_pytorch_api.py b/hls4ml/test/pytest/test_pytorch_api.py deleted file mode 100644 index f9bc175..0000000 --- a/hls4ml/test/pytest/test_pytorch_api.py +++ /dev/null @@ -1,742 +0,0 @@ -import math -from pathlib import Path - -import numpy as np -import pytest -import torch -import torch.nn as nn -from torch.nn import AvgPool1d, AvgPool2d, MaxPool1d, MaxPool2d - -from hls4ml.converters import convert_from_pytorch_model -from hls4ml.utils.config import config_from_pytorch_model - -test_root_path = Path(__file__).parent - - -class LinearModel(nn.Module): - def __init__(self): - super().__init__() - self.linear = nn.Linear(1, 1) - - def forward(self, x): - return self.linear(x) - - -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_linear(backend, io_type): - model = LinearModel() - model.eval() - - X_input = np.random.rand(1) - - pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy() - - config = config_from_pytorch_model(model) - output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_linear_{backend}_{io_type}') - - hls_model = convert_from_pytorch_model( - model, (None, 1), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - - hls_model.compile() - - hls_prediction = hls_model.predict(X_input) - - np.testing.assert_allclose(hls_prediction, pytorch_prediction, rtol=1e-2, atol=0.01) - - from torch.fx import symbolic_trace - - traced_model = symbolic_trace(model) - - nNodes = 0 - for _node in traced_model.graph.nodes: - nNodes += 1 - - assert nNodes - 1 == len(hls_model.get_layers()) - assert list(hls_model.get_layers())[0].attributes['class_name'] == "InputLayer" - assert list(hls_model.get_layers())[1].attributes["class_name"] == "Dense" - assert list(hls_model.get_layers())[0].attributes['input_shape'] == [1] - assert list(hls_model.get_layers())[1].attributes['n_in'] == 1 - assert list(hls_model.get_layers())[1].attributes['n_out'] == 1 - - -# TODO: add ThresholdedReLU test when it can be made to pass -@pytest.mark.parametrize( - "activation_function", - [ - nn.ReLU(), - nn.LeakyReLU(negative_slope=1.0), - nn.ELU(alpha=1.0), - nn.PReLU(init=0.25), - nn.Sigmoid(), - nn.Threshold(threshold=1.0, value=0.0), - ], -) -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_activations(activation_function, backend, io_type): - model = torch.nn.Sequential(nn.Linear(1, 1), activation_function).to() - model.eval() - - X_input = np.random.rand(1) - - pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy() - - config = config_from_pytorch_model(model) - output_dir = str( - test_root_path / f'hls4mlprj_pytorch_api_activations_{activation_function.__class__.__name__}_{backend}_{io_type}' - ) - hls_model = convert_from_pytorch_model( - model, (None, 1), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - - hls_prediction = hls_model.predict(X_input) - - np.testing.assert_allclose(hls_prediction, pytorch_prediction, rtol=1e-2, atol=0.01) - - from torch.fx import symbolic_trace - - traced_model = symbolic_trace(model) - - nNodes = 0 - for _node in traced_model.graph.nodes: - nNodes += 1 - - assert nNodes - 1 == len(hls_model.get_layers()) - - if activation_function.__class__.__name__ == 'ReLU' or activation_function.__class__.__name__ == 'Sigmoid': - assert list(hls_model.get_layers())[2].attributes['class_name'] == 'Activation' - elif activation_function.__class__.__name__ == 'Threshold': - assert list(hls_model.get_layers())[2].attributes['class_name'] == 'ThresholdedReLU' - else: - assert list(hls_model.get_layers())[2].attributes['class_name'] == activation_function.__class__.__name__ - - -class ReLuModel(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x): - return nn.functional.relu(x) - - -class LeakyReLuModel(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x): - return nn.functional.leaky_relu(x, negative_slope=1.0) - - -class EluModel(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x): - return nn.functional.elu(x, alpha=1.0) - - -class ThresholdModel(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x): - return nn.functional.threshold(x, threshold=1.0, value=0.0) - - -class SigmoidModel(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, x): - return nn.functional.sigmoid(x) - - -@pytest.mark.parametrize( - "activation_function", - [ - ReLuModel(), - LeakyReLuModel(), - EluModel(), - SigmoidModel(), - ThresholdModel(), - ], -) -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_activation_functionals(activation_function, backend, io_type): - model = activation_function - model.eval() - - X_input = np.random.rand(1) - - pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy() - - config = config_from_pytorch_model(model) - output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_activations_functional_relu_{backend}_{io_type}') - hls_model = convert_from_pytorch_model( - model, (None, 1), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - - hls_prediction = hls_model.predict(X_input) - - np.testing.assert_allclose(hls_prediction, pytorch_prediction, rtol=0, atol=0.05) - - from torch.fx import symbolic_trace - - traced_model = symbolic_trace(model) - - nNodes = 0 - for _node in traced_model.graph.nodes: - nNodes += 1 - - assert nNodes - 1 == len(hls_model.get_layers()) - - -padds_options = [0, 1] - - -@pytest.mark.parametrize('padds', padds_options) -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_conv1d(padds, backend, io_type): - n_in = 2 - n_out = 2 - kernel_size = 3 - size_in = 4 - - model = torch.nn.Sequential(nn.Conv1d(n_in, n_out, kernel_size, padding=padds), nn.ReLU()).to() - model.eval() - - X_input = np.random.rand(1, n_in, size_in) - pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy() - - if io_type == 'io_stream': - X_input = np.ascontiguousarray(X_input.transpose(0, 2, 1)) - config = config_from_pytorch_model(model, inputs_channel_last=True, transpose_outputs=False) - else: - config = config_from_pytorch_model(model, inputs_channel_last=False, transpose_outputs=True) - - output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_conv1d_{padds}_{backend}_{io_type}') - hls_model = convert_from_pytorch_model( - model, (None, n_in, size_in), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - - from torch.fx import symbolic_trace - - traced_model = symbolic_trace(model) - nNodes = 0 - convNode = None - reluNode = None - for _node in traced_model.graph.nodes: - nNodes += 1 - if nNodes == 2: - convNode = _node - if nNodes == 3: - reluNode = _node - - if io_type == 'io_stream': - # Vivado inserts and additional layer for 'same' padding in io_stream - if backend == "Vivado" and padds == 1: - assert nNodes == len(hls_model.get_layers()) - else: - assert nNodes - 1 == len(hls_model.get_layers()) - else: - assert nNodes + 1 == len(hls_model.get_layers()) - - children = {c[0]: c[1] for c in model.named_children()} - class_object_conv = children[convNode.target] - class_object_relu = children[reluNode.target] - - out_width = int( - (size_in + 2 * padds - class_object_conv.dilation[0] * (class_object_conv.kernel_size[0] - 1) - 1) - / class_object_conv.stride[0] - + 1 - ) # following https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html - - if io_type == 'io_stream': - hls_prediction = np.transpose(np.reshape(hls_model.predict(X_input), (1, out_width, n_out)), (0, 2, 1)) - else: - hls_prediction = np.reshape(hls_model.predict(X_input), (1, n_out, out_width)) - # results are not very good at the moment - np.testing.assert_allclose(hls_prediction, pytorch_prediction, rtol=0, atol=5e-2) - - # if not (backend == 'Vivado' and io_type == 'io_stream' and padds == 1): - conv_index = 2 - act_index = 3 - if io_type == "io_stream" and not (backend == "Vivado" and padds == 1): - conv_index = 1 - act_index = 2 - assert list(hls_model.get_layers())[conv_index].attributes['name'] == convNode.name - assert list(hls_model.get_layers())[conv_index].attributes['class_name'] == 'Conv1D' - assert list(hls_model.get_layers())[act_index].attributes['activation'] == class_object_relu.__class__.__name__ - if io_type == "io_stream" and backend == "Vivado" and padds == 1: - assert list(hls_model.get_layers())[conv_index].attributes["in_width"] == size_in + 2 - else: - assert list(hls_model.get_layers())[conv_index].attributes["in_width"] == size_in - assert list(hls_model.get_layers())[conv_index].attributes['filt_width'] == class_object_conv.kernel_size[0] - assert list(hls_model.get_layers())[conv_index].attributes['n_chan'] == class_object_conv.in_channels - assert list(hls_model.get_layers())[conv_index].attributes['n_filt'] == class_object_conv.out_channels - assert list(hls_model.get_layers())[conv_index].attributes['stride_width'] == class_object_conv.stride[0] - if list(hls_model.get_layers())[conv_index].attributes['padding'] == 'valid': - padding = 0 - else: - padding = 1 - if io_type == "io_stream" and backend == "Vivado" and padds == 1: - padding = 1 - padds = 0 - - assert padding == class_object_conv.padding[0] - assert list(hls_model.get_layers())[conv_index].attributes['data_format'] == 'channels_last' - assert list(hls_model.get_layers())[conv_index].attributes["out_width"] == out_width - - pad_along_width = max((out_width - 1) * class_object_conv.stride[0] + class_object_conv.kernel_size[0] - size_in, 0) - pad_left = pad_along_width // 2 - pad_right = pad_along_width - pad_left - - if padds == 1: - assert list(hls_model.get_layers())[conv_index].attributes['pad_left'] == pad_left - assert list(hls_model.get_layers())[conv_index].attributes['pad_right'] == pad_right - elif padds == 0: - assert list(hls_model.get_layers())[conv_index].attributes['pad_left'] == 0 - assert list(hls_model.get_layers())[conv_index].attributes['pad_right'] == 0 - - -padds_options = [0, 1] - - -@pytest.mark.parametrize('padds', padds_options) -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_conv2d(padds, backend, io_type): - n_in = 2 - n_out = 2 - kernel_size = 3 - size_in_width = 4 - size_in_height = 4 - - model = torch.nn.Sequential(nn.Conv2d(n_in, n_out, kernel_size, padding=padds), nn.ReLU()).to() - model.eval() - - X_input = np.random.rand(100, n_in, size_in_height, size_in_width) - pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy() - - if io_type == 'io_stream': - X_input = np.ascontiguousarray(X_input.transpose(0, 2, 3, 1)) - config = config_from_pytorch_model(model, inputs_channel_last=True, transpose_outputs=False) - else: - config = config_from_pytorch_model(model, inputs_channel_last=False, transpose_outputs=True) - - output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_conv2d_{padds}_{backend}_{io_type}') - hls_model = convert_from_pytorch_model( - model, - (None, n_in, size_in_height, size_in_width), - hls_config=config, - output_dir=output_dir, - backend=backend, - io_type=io_type, - ) - hls_model.compile() - - from torch.fx import symbolic_trace - - traced_model = symbolic_trace(model) - nNodes = 0 - convNode = None - reluNode = None - for _node in traced_model.graph.nodes: - nNodes += 1 - if nNodes == 2: - convNode = _node - if nNodes == 3: - reluNode = _node - # if io_type == 'io_stream': - # assert nNodes -1 == len(hls_model.get_layers()) - # else: - # assert nNodes == len(hls_model.get_layers()) - - children = {c[0]: c[1] for c in model.named_children()} - class_object_conv = children[convNode.target] - class_object_relu = children[reluNode.target] - - from hls4ml.converters.utils import compute_padding_2d - - padding = 'valid' if padds == 0 else 'same' - out_dims_hls = compute_padding_2d( - padding, - size_in_height, - size_in_width, - 1, - 1, - kernel_size, - kernel_size, - ) - - out_width = int( - ( - size_in_width - + 2 * class_object_conv.padding[1] - - class_object_conv.dilation[1] * (class_object_conv.kernel_size[1] - 1) - - 1 - ) - / class_object_conv.stride[1] - + 1 - ) # following https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html - assert out_dims_hls[0] == out_width - out_height = int( - ( - size_in_height - + 2 * class_object_conv.padding[0] - - class_object_conv.dilation[0] * (class_object_conv.kernel_size[0] - 1) - - 1 - ) - / class_object_conv.stride[0] - + 1 - ) # following https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html - assert out_dims_hls[1] == out_height - - if io_type == 'io_stream': - hls_prediction = np.transpose( - np.reshape(hls_model.predict(X_input), (100, out_height, out_width, n_out)), (0, 3, 1, 2) - ) - else: - hls_prediction = np.reshape(hls_model.predict(X_input), (100, n_out, out_height, out_width)) - # results are not very good at the moment - np.testing.assert_allclose(hls_prediction, pytorch_prediction, rtol=0, atol=5e-2) - - if not (backend == 'Vivado' and io_type == 'io_stream' and padds == 1): - # Vivado inserts and additional layer for 'same' padding in io_stream - conv_index = 2 - act_index = 3 - if io_type == "io_stream": - conv_index = 1 - act_index = 2 - assert list(hls_model.get_layers())[conv_index].attributes['name'] == convNode.name - assert list(hls_model.get_layers())[conv_index].attributes['class_name'] == 'Conv2D' - assert list(hls_model.get_layers())[act_index].attributes['activation'] == class_object_relu.__class__.__name__ - assert list(hls_model.get_layers())[conv_index].attributes["in_width"] == size_in_width - assert list(hls_model.get_layers())[conv_index].attributes["in_height"] == size_in_height - assert list(hls_model.get_layers())[conv_index].attributes['filt_width'] == class_object_conv.kernel_size[1] - assert list(hls_model.get_layers())[conv_index].attributes['filt_height'] == class_object_conv.kernel_size[0] - assert list(hls_model.get_layers())[conv_index].attributes['n_chan'] == class_object_conv.in_channels - assert list(hls_model.get_layers())[conv_index].attributes['n_filt'] == class_object_conv.out_channels - assert list(hls_model.get_layers())[conv_index].attributes['stride_width'] == class_object_conv.stride[1] - assert list(hls_model.get_layers())[conv_index].attributes['stride_height'] == class_object_conv.stride[0] - if list(hls_model.get_layers())[conv_index].attributes['padding'] == 'valid': - padding = 0 - else: - padding = 1 - assert padding == class_object_conv.padding[0] - assert list(hls_model.get_layers())[conv_index].attributes['data_format'] == 'channels_last' - - pad_along_width = max( - (out_width - 1) * class_object_conv.stride[1] + class_object_conv.kernel_size[1] - size_in_width, 0 - ) - pad_along_height = max( - (out_height - 1) * class_object_conv.stride[0] + class_object_conv.kernel_size[0] - size_in_height, 0 - ) - - pad_top = pad_along_height // 2 - pad_bottom = pad_along_height - pad_top - pad_left = pad_along_width // 2 - pad_right = pad_along_width - pad_left - - if padds == 1: - assert list(hls_model.get_layers())[conv_index].attributes['pad_left'] == pad_left - assert list(hls_model.get_layers())[conv_index].attributes['pad_right'] == pad_right - assert list(hls_model.get_layers())[conv_index].attributes['pad_top'] == pad_top - assert list(hls_model.get_layers())[conv_index].attributes['pad_bottom'] == pad_bottom - elif padds == 0: - assert list(hls_model.get_layers())[conv_index].attributes['pad_left'] == 0 - assert list(hls_model.get_layers())[conv_index].attributes['pad_right'] == 0 - assert list(hls_model.get_layers())[conv_index].attributes['pad_top'] == 0 - assert list(hls_model.get_layers())[conv_index].attributes['pad_bottom'] == 0 - - -padds_options = [0, 1] -pooling_layers = [MaxPool1d, MaxPool2d, AvgPool1d, AvgPool2d] - - -@pytest.mark.parametrize('pooling', pooling_layers) -@pytest.mark.parametrize('padds', padds_options) -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -def test_pooling(pooling, padds, backend): - assert '1d' in pooling.__name__ or '2d' in pooling.__name__ - - if '2d' in pooling.__name__: - n_in = 2 - size_in_height = 15 - size_in_width = 18 - else: - n_in = 2 - size_in_width = 121 - size_in_height = 0 - - input_shape = (1, n_in, size_in_height, size_in_width) if '2d' in pooling.__name__ else (1, n_in, size_in_width) - input_shape_forHLS = ( - (None, n_in, size_in_height, size_in_width) if '2d' in pooling.__name__ else (None, n_in, size_in_width) - ) - X_input = np.random.rand(*input_shape) - - model = torch.nn.Sequential(pooling(2, padding=padds)).to() - model.eval() - pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy() - - config = config_from_pytorch_model(model) - output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_pooling_{pooling.__name__}_padds_{padds}_backend_{backend}') - hls_model = convert_from_pytorch_model( - model, input_shape_forHLS, hls_config=config, output_dir=output_dir, backend=backend - ) - hls_model.compile() - - from torch.fx import symbolic_trace - - traced_model = symbolic_trace(model) - nNodes = 0 - poolNode = None - for _node in traced_model.graph.nodes: - nNodes += 1 - if nNodes == 2: - poolNode = _node - assert nNodes + 1 == len(hls_model.get_layers()) - children = {c[0]: c[1] for c in model.named_children()} - class_object_pool = children[poolNode.target] - - if "Max" in pooling.__name__: - out_height = int( - math.floor( - float(size_in_height + 2 * padds - class_object_pool.dilation * (class_object_pool.kernel_size - 1) - 1) - / float(class_object_pool.stride) - + 1 - ) - ) - out_width = int( - math.floor( - float(size_in_width + 2 * padds - class_object_pool.dilation * (class_object_pool.kernel_size - 1) - 1) - / float(class_object_pool.stride) - + 1 - ) - ) - else: - if '2d' in pooling.__name__: - out_height = int( - math.floor((size_in_height + 2 * padds - class_object_pool.kernel_size) / class_object_pool.stride + 1) - ) - out_width = int( - math.floor((size_in_width + 2 * padds - class_object_pool.kernel_size) / class_object_pool.stride + 1) - ) - else: - out_height = int( - math.floor((size_in_height + 2 * padds - class_object_pool.kernel_size[0]) / class_object_pool.stride[0] + 1) - ) - out_width = int( - math.floor((size_in_width + 2 * padds - class_object_pool.kernel_size[0]) / class_object_pool.stride[0] + 1) - ) - - if '2d' in pooling.__name__: - hls_prediction = np.reshape(hls_model.predict(X_input), (1, n_in, out_height, out_width)) - - else: - pred = hls_model.predict(X_input) - hls_prediction = np.reshape(pred, (1, n_in, out_width)) - - # results are not very good at the moment - np.testing.assert_allclose(hls_prediction, pytorch_prediction, rtol=0, atol=5e-2) - - # Verify correct parsing of layer - hls_pool = list(hls_model.get_layers())[-2] - if '2d' in pooling.__name__: - assert hls_pool.attributes['name'] == poolNode.name - assert hls_pool.attributes['class_name'][-2] == str(2) - assert hls_pool.attributes['stride_height'] == class_object_pool.stride - assert hls_pool.attributes['stride_width'] == class_object_pool.stride - assert hls_pool.attributes['pool_height'] == class_object_pool.kernel_size - assert hls_pool.attributes['pool_width'] == class_object_pool.kernel_size - assert hls_pool.attributes['padding'] == 'valid' if class_object_pool.padding == 0 else 'same' - - elif '1d' in pooling.__name__: - if "Max" in pooling.__name__: - assert hls_pool.attributes['name'] == poolNode.name - assert hls_pool.attributes['class_name'][-2] == str(1) - assert hls_pool.attributes['pool_width'] == class_object_pool.kernel_size - assert hls_pool.attributes['stride_width'] == class_object_pool.stride - assert hls_pool.attributes['padding'] == 'valid' if class_object_pool.padding == 0 else 'same' - - else: - assert hls_pool.attributes['name'] == poolNode.name - assert hls_pool.attributes['class_name'][-2] == str(1) - assert hls_pool.attributes['pool_width'] == class_object_pool.kernel_size[0] - assert hls_pool.attributes['stride_width'] == class_object_pool.stride[0] - assert hls_pool.attributes['padding'] == 'same' if class_object_pool.padding == 0 else 'valid' - - -class BatchNormModel(nn.Module): - def __init__(self): - super().__init__() - self.linear = nn.Linear(5, 8) - self.relu = nn.ReLU() - self.bn = nn.BatchNorm1d(8) - - def forward(self, x): - x = self.linear(x) - x = self.relu(x) # This is to prevent merging of BN into Linear - return self.bn(x) - - -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_bn(backend, io_type): - model = BatchNormModel() - model.eval() - - X_input = np.random.rand(1, 5) - - pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy().flatten() - - config = config_from_pytorch_model(model) - output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_bn_{backend}_{io_type}') - - hls_model = convert_from_pytorch_model( - model, (None, 5), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - - hls_model.compile() - - hls_prediction = hls_model.predict(X_input).flatten() - - np.testing.assert_allclose(hls_prediction, pytorch_prediction, rtol=1e-2, atol=0.01) - - assert list(hls_model.get_layers())[3].attributes['class_name'] == 'BatchNormalization' - assert list(hls_model.get_layers())[3].attributes['n_in'] == 8 - assert list(hls_model.get_layers())[3].attributes['n_out'] == 8 - - -class SqueezeModel(nn.Module): - def __init__(self): - super().__init__() - self.linear = nn.Linear(5, 3, bias=False) - self.bn = nn.BatchNorm1d(3) - nn.init.ones_(self.linear.weight) # This test is not about precision, so put 1's here - - def forward(self, x): - x = torch.unsqueeze(x, dim=1) # (1, 5) -> (1, 1, 5) - x = self.linear(x) # (1, 1, 3) - x = torch.squeeze(x) # (3,) - x = torch.relu(x) # (3,) - return x - - -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_squeeze(backend, io_type): - model = SqueezeModel() - model.eval() - - X_input = np.random.rand(1, 5) - - pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy().flatten() - - config = config_from_pytorch_model(model) - del config['Model']['InputsChannelLast'] # We don't want anything touched for this test - output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_squeeze_{backend}_{io_type}') - - hls_model = convert_from_pytorch_model( - model, (None, 5), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - - hls_model.compile() - - hls_prediction = hls_model.predict(X_input).flatten() - - np.testing.assert_allclose(hls_prediction, pytorch_prediction, rtol=1e-2, atol=0.01) - - if io_type == 'io_parallel': - assert list(hls_model.get_layers())[1].attributes['class_name'] == 'Reshape' - assert list(hls_model.get_layers())[1].attributes['target_shape'] == [1, 5] - assert list(hls_model.get_layers())[3].attributes['class_name'] == 'Reshape' - assert list(hls_model.get_layers())[3].attributes['target_shape'] == [3] - elif io_type == 'io_stream': - assert list(hls_model.get_layers())[1].class_name == 'Repack' - assert list(hls_model.get_layers())[1].attributes['target_shape'] == [1, 5] - assert list(hls_model.get_layers())[3].attributes['class_name'] == 'Reshape' # Exists as in-place variable - assert list(hls_model.get_layers())[3].attributes['target_shape'] == [3] - - -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -def test_flatten(backend): - input = torch.randn(1, 1, 5, 5) - model = nn.Sequential(nn.Conv2d(1, 32, 5, 1, 1), nn.Flatten(), nn.ReLU()) - pytorch_prediction = model(input).detach().numpy() - input_shape = (None, 1, 5, 5) - - config = config_from_pytorch_model(model) - output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_flatten_backend_{backend}') - hls_model = convert_from_pytorch_model(model, input_shape, hls_config=config, output_dir=output_dir, backend=backend) - hls_model.compile() - - pred = hls_model.predict(input.detach().numpy()) - hls_prediction = np.reshape(pred, (1, 288)) - - np.testing.assert_allclose(hls_prediction, pytorch_prediction, rtol=0, atol=5e-2) - - -class ModelSkippedLayers(nn.Module): - def __init__(self): - super().__init__() - self.conv1 = nn.Conv1d(in_channels=3, out_channels=6, kernel_size=3, bias=False) - self.relu1 = nn.ReLU() - self.conv2 = nn.Conv1d(in_channels=6, out_channels=5, kernel_size=3, bias=False) - self.relu2 = nn.ReLU() - self.dropout1 = nn.Dropout() # Should be skipped - self.flatten = nn.Flatten() - self.fc1 = nn.Linear(in_features=5 * 4, out_features=6, bias=False) - self.dropout2 = nn.Dropout() # Should be skipped - self.fc2 = nn.Linear(in_features=6, out_features=5, bias=False) - - def forward(self, x): - x = self.conv1(x) - x = self.relu1(x) - x = self.conv2(x) - x = self.relu2(x) - x = self.dropout1(x) - x = self.flatten(x) - x = self.fc1(x) - x = self.dropout2(x) - x = self.fc2(x) - return x - - -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_skipped_layers(backend, io_type): - model = ModelSkippedLayers() - model.eval() - - input_shape = (3, 8) - batch_input_shape = (None,) + input_shape - config = config_from_pytorch_model( - model, default_precision='ap_fixed<32,16>', inputs_channel_last=True, transpose_outputs=False - ) - output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_skipped_{backend}_{io_type}') - hls_model = convert_from_pytorch_model( - model, - batch_input_shape, - hls_config=config, - output_dir=output_dir, - io_type=io_type, - backend=backend, - ) - - hls_model.compile() - - input = torch.randn(10, 3, 8) - hls_input = np.ascontiguousarray(torch.permute(input, (0, 2, 1)).detach().numpy()) # Transpose to channels_last - - pytorch_prediction = model(input).detach().numpy().flatten() - hls_prediction = hls_model.predict(hls_input).flatten() - - np.testing.assert_allclose(hls_prediction, pytorch_prediction, rtol=0, atol=5e-2) diff --git a/hls4ml/test/pytest/test_qkeras.py b/hls4ml/test/pytest/test_qkeras.py deleted file mode 100644 index 69ca477..0000000 --- a/hls4ml/test/pytest/test_qkeras.py +++ /dev/null @@ -1,572 +0,0 @@ -import warnings -from pathlib import Path - -import numpy as np -import pytest -from qkeras.qconv2d_batchnorm import QConv2DBatchnorm -from qkeras.qconvolutional import QDepthwiseConv2D, QSeparableConv1D, QSeparableConv2D -from qkeras.qlayers import QActivation, QDense -from qkeras.quantizers import ( - binary, - quantized_bits, - quantized_po2, - quantized_relu, - quantized_sigmoid, - quantized_tanh, - ternary, -) -from qkeras.utils import _add_supported_quantized_objects -from sklearn.datasets import fetch_openml -from sklearn.model_selection import train_test_split -from sklearn.preprocessing import LabelEncoder, StandardScaler -from tensorflow.keras.layers import BatchNormalization, Input -from tensorflow.keras.models import Model, Sequential, model_from_json -from tensorflow.keras.utils import to_categorical - -import hls4ml - -co = {} -_add_supported_quantized_objects(co) - - -warnings.filterwarnings("ignore", message="numpy.dtype size changed") -warnings.filterwarnings("ignore", message="numpy.ufunc size changed") - -test_root_path = Path(__file__).parent -example_model_path = (test_root_path / '../../example-models').resolve() - - -@pytest.fixture(scope='module') -def get_jettagging_data(): - ''' - Download the jet tagging dataset - ''' - print("Fetching data from openml") - data = fetch_openml('hls4ml_lhc_jets_hlf') - X, y = data['data'], data['target'] - le = LabelEncoder() - y = le.fit_transform(y) - y = to_categorical(y, 5) - X_train_val, X_test, y_train_val, y_test = train_test_split(X, y, test_size=0.2, random_state=42) - scaler = StandardScaler() - X_train_val = scaler.fit_transform(X_train_val) - X_test = scaler.transform(X_test) - return X_train_val, X_test, y_train_val, y_test - - -@pytest.fixture(scope='module') -def load_jettagging_model(): - ''' - Load the 3 hidden layer QKeras example model trained on the jet tagging dataset - ''' - model_path = example_model_path / 'keras/qkeras_3layer.json' - with model_path.open('r') as f: - jsons = f.read() - model = model_from_json(jsons, custom_objects=co) - model.load_weights(example_model_path / 'keras/qkeras_3layer_weights.h5') - return model - - -# TODO - Paramaterize for Quartus (different strategies?) -@pytest.fixture -@pytest.mark.parametrize('strategy', ['latency', 'resource']) -def convert(load_jettagging_model, strategy): - ''' - Convert a QKeras model trained on the jet tagging dataset - ''' - model = load_jettagging_model - - config = hls4ml.utils.config_from_keras_model(model, granularity='name') - config['Model']['Strategy'] = strategy - config['LayerName']['softmax']['exp_table_t'] = 'ap_fixed<18,8>' - config['LayerName']['softmax']['inv_table_t'] = 'ap_fixed<18,4>' - hls_model = hls4ml.converters.convert_from_keras_model( - model, - hls_config=config, - output_dir=str(test_root_path / f'hls4mlprj_qkeras_accuracy_{strategy}'), - part='xcu250-figd2104-2L-e', - ) - hls_model.compile() - return hls_model - - -@pytest.mark.parametrize('strategy', ['latency', 'resource']) -def test_accuracy(convert, load_jettagging_model, get_jettagging_data, strategy): - ''' - Test the hls4ml-evaluated accuracy of a 3 hidden layer QKeras model trained on - the jet tagging dataset. QKeras model accuracy is required to be over 70%, and - hls4ml accuracy required to be within 1% of the QKeras model accuracy. - ''' - print("Test accuracy") - from sklearn.metrics import accuracy_score - - X_train_val, X_test, y_train_val, y_test = get_jettagging_data - - hls_model = convert - model = load_jettagging_model - - y_qkeras = model.predict(np.ascontiguousarray(X_test)) - y_hls4ml = hls_model.predict(np.ascontiguousarray(X_test)) - - acc_qkeras = accuracy_score(np.argmax(y_test, axis=1), np.argmax(y_qkeras, axis=1)) - acc_hls4ml = accuracy_score(np.argmax(y_test, axis=1), np.argmax(y_hls4ml, axis=1)) - rel_diff = abs(acc_qkeras - acc_hls4ml) / acc_qkeras - - print(f'Accuracy qkeras: {acc_qkeras}') - print(f'Accuracy hls4ml: {acc_hls4ml}') - print(f'Relative difference: {rel_diff}') - - assert acc_qkeras > 0.7 and rel_diff < 0.01 - - -def randX(batch_size, N): - return np.random.rand(batch_size, N) - - -@pytest.fixture(scope='module') -def randX_100_16(): - return randX(100, 16) - - -# TODO: include wider bitwidths when that can be made to pass -# Note 4-bit test can still fail sometimes depending on random seed -# https://github.com/fastmachinelearning/hls4ml/issues/381 -# @pytest.mark.parametrize('bits', [4, 6, 8]) -@pytest.mark.parametrize('bits,alpha', [(4, 1), (4, 'auto_po2')]) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_single_dense_activation_exact(randX_100_16, bits, alpha, backend, io_type): - ''' - Test a single Dense -> Activation layer topology for - bit exactness with number of bits parameter - ''' - X = randX_100_16 - model = Sequential() - model.add( - QDense( - 16, - input_shape=(16,), - name='fc1', - kernel_quantizer=quantized_bits(bits, 0, alpha=alpha), - bias_quantizer=quantized_bits(bits, 0, alpha=1), - kernel_initializer='lecun_uniform', - ) - ) - model.add(QActivation(activation=quantized_relu(bits, 0), name='relu1')) - model.compile() - - config = hls4ml.utils.config_from_keras_model(model, granularity='name') - output_dir = str(test_root_path / f'hls4mlprj_qkeras_single_dense_activation_exact_{bits}_{alpha}_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - - y_qkeras = model.predict(X) - y_hls4ml = hls_model.predict(X) - # Goal is to get it passing with all equal - # np.testing.assert_array_equal(y_qkeras, y_hls4ml) - # For now allow matching within 1 bit - np.testing.assert_allclose(y_qkeras.ravel(), y_hls4ml.ravel(), atol=2**-bits, rtol=1.0) - - -@pytest.fixture -def make_btnn(test_no, N, kernel_quantizer, bias_quantizer, activation_quantizer, use_batchnorm, is_xnor): - shape = (N,) - model = Sequential() - model.add(QDense(10, input_shape=shape, kernel_quantizer=kernel_quantizer, bias_quantizer=bias_quantizer, name='dense')) - if use_batchnorm: - model.add(BatchNormalization(name='bn')) - model.add(QActivation(activation=activation_quantizer)) - model.compile() - return model, is_xnor, test_no - - -@pytest.fixture(scope='module') -def randX_100_10(): - return randX(100, 10) - - -@pytest.mark.parametrize( - 'quantizer', [(quantized_tanh(8)), (quantized_sigmoid(5)), (quantized_sigmoid(7, use_real_sigmoid=True))] -) -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_quantizer_special(randX_1000_1, quantizer, backend, io_type): - ''' - Test a single quantizer (tanh or sigmoid) as an Activation function. - Checks the type inference through the conversion is correct without just - using the same logic. - ''' - X = randX_1000_1 - X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6> - model = Sequential() - model.add(QActivation(input_shape=(1,), activation=quantizer, name='quantizer')) - model.compile() - - config = hls4ml.utils.config_from_keras_model(model, granularity='name') - output_dir = str( - test_root_path / f'hls4mlprj_qkeras_quantizer_{quantizer.__class__.__name__}_{quantizer.bits}_{backend}_{io_type}' - ) - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - - y_qkeras = model.predict(X) - y_hls4ml = hls_model.predict(X) - # Goal is to get it passing with all equal - np.testing.assert_allclose(y_qkeras, y_hls4ml, rtol=1e-2, atol=0.02) - - -@pytest.mark.parametrize( - 'test_no,N,kernel_quantizer,bias_quantizer,activation_quantizer,use_batchnorm,is_xnor', - [ - (1, 10, ternary(alpha=1), quantized_bits(5, 2), 'binary_tanh', False, False), - (2, 10, binary(), quantized_bits(5, 2), 'binary_tanh', False, True), - (3, 10, ternary(alpha='auto'), quantized_bits(5, 2), binary(), True, True), - (4, 10, ternary(alpha='auto'), quantized_bits(5, 2), 'ternary', True, False), - (5, 10, ternary(alpha='auto'), quantized_bits(5, 2), ternary(threshold=0.2), True, False), - (6, 10, ternary(alpha='auto'), quantized_bits(5, 2), ternary(threshold=0.8), True, False), - (7, 10, binary(), quantized_bits(5, 2), binary(), False, True), - ], -) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_btnn(make_btnn, randX_100_10, backend, io_type): - model, is_xnor, test_no = make_btnn - X = randX_100_10 - cfg = hls4ml.utils.config_from_keras_model(model, granularity='name', backend=backend) - output_dir = str(test_root_path / f'hls4mlprj_btnn_{test_no}_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, output_dir=output_dir, hls_config=cfg, backend=backend, io_type=io_type - ) - hls_model.compile() - y_hls = hls_model.predict(X) - # hls4ml may return XNOR binary - if is_xnor: - y_hls = np.where(y_hls == 0, -1, 1) - y_ker = model.predict(X) - wrong = (y_hls != y_ker).ravel() - assert sum(wrong) / len(wrong) < 0.005 - - -@pytest.fixture(scope='module') -def randX_1000_1(): - return randX(1000, 1) - - -# TODO: include quantized_relu tests when they are made to pass -# https://github.com/fastmachinelearning/hls4ml/issues/377 -@pytest.mark.parametrize( - 'quantizer', - [ - (quantized_bits(8, 0)), - (quantized_bits(8, 4)), - (quantized_bits(4, 2)), - (quantized_bits(4, 0)), - (quantized_bits(10, 0)), - (quantized_relu(4)), - (quantized_relu(4, 2)), - (quantized_relu(8)), - (quantized_relu(8, 4)), - (quantized_relu(10)), - (quantized_relu(10, 5)), - ], -) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_quantizer(randX_1000_1, quantizer, backend, io_type): - ''' - Test a single quantizer as an Activation function. - Checks the type inference through the conversion is correct without just - using the same logic. - ''' - X = randX_1000_1 - X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6> - model = Sequential() - model.add(QActivation(input_shape=(1,), activation=quantizer, name='quantizer')) - model.compile() - - config = hls4ml.utils.config_from_keras_model(model, granularity='name') - output_dir = str( - test_root_path - / 'hls4mlprj_qkeras_quantizer_{}_{}_{}_{}_{}'.format( - quantizer.__class__.__name__, quantizer.bits, quantizer.integer, backend, io_type - ) - ) - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - - y_qkeras = model.predict(X) - y_hls4ml = hls_model.predict(X) - # Goal is to get it passing with all equal - np.testing.assert_array_equal(y_qkeras, y_hls4ml) - - -@pytest.mark.parametrize( - 'weight_quantizer,activation_quantizer,', - [ - ('binary', 'binary'), - ('ternary', 'ternary'), - ('quantized_bits(4, 0, alpha=1)', 'quantized_relu(2, 0)'), - ('quantized_bits(4, 0, alpha=1)', 'quantized_relu(4, 0)'), - ('quantized_bits(4, 0, alpha=1)', 'quantized_relu(8, 0)'), - ], -) -def test_qactivation_kwarg(randX_100_10, activation_quantizer, weight_quantizer): - if activation_quantizer in ['binary', 'ternary']: - name = 'bnbt_qdense_alpha' - else: - name = f'qdense_{eval(activation_quantizer).__class__.__name__}' - - inputs = Input(shape=(10,)) - - outputs = QDense( - 10, - activation=activation_quantizer, - name='qdense', - kernel_quantizer=weight_quantizer, - bias_quantizer=weight_quantizer, - kernel_initializer='lecun_uniform', - )(inputs) - model = Model(inputs, outputs) - - config = hls4ml.utils.config_from_keras_model(model, granularity='name') - - out_dir = str(test_root_path / f'hls4mlprj_qactivation_kwarg_{activation_quantizer}') - - hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, output_dir=out_dir) - hls_model.compile() - - # Verify if activation in hls_model - assert name in [layer.name for layer in hls_model.get_layers()] - - # Output tests - X = randX_100_10 - X = np.round(X * 2**10) * 2**-10 - y_qkeras = model.predict(X) - y_hls4ml = hls_model.predict(X) - if hasattr(eval(activation_quantizer), 'bits'): - np.testing.assert_allclose( - y_qkeras.ravel(), y_hls4ml.ravel(), atol=2 ** -(eval(activation_quantizer).bits - 1), rtol=1.0 - ) - else: - if activation_quantizer == 'binary': - y_hls4ml = np.where(y_hls4ml == 0, -1, 1) - wrong = (y_hls4ml != y_qkeras).ravel() - assert sum(wrong) / len(wrong) <= 0.005 - - -@pytest.fixture(scope='module') -def randX_100_8_8_1(): - return np.random.rand(100, 8, 8, 1) - - -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_qconv2dbn(randX_100_8_8_1, backend, io_type): - ''' - Test proper handling of QConv2DBatchnorm. - ''' - X = randX_100_8_8_1 - X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6> - model = Sequential() - model.add( - QConv2DBatchnorm( - 4, - kernel_size=(3, 3), - input_shape=(8, 8, 1), - kernel_quantizer='quantized_bits(8, 0, alpha=1)', - kernel_initializer='ones', - bias_quantizer='quantized_bits(8, 0, alpha=1)', - bias_initializer='zeros', - activation='quantized_relu(8, 0)', - ) - ) - model.compile() - - config = hls4ml.utils.config_from_keras_model(model, granularity='name', default_precision='fixed<24,8>') - output_dir = str(test_root_path / f'hls4mlprj_qkeras_qconv2dbn_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - - y_qkeras = model.predict(X) - y_hls4ml = hls_model.predict(X) - - np.testing.assert_array_equal(y_qkeras, y_hls4ml.reshape(y_qkeras.shape)) - - -@pytest.fixture(scope='module') -def randX_10_32_32_3(): - return np.random.rand(10, 32, 32, 3) - - -# Currently only Vivado and Vitis is supported for io_stream. -# Note, qkeras only supports 2d version of depthwise -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) -@pytest.mark.parametrize('io_type', ['io_stream']) -def test_qdepthwiseconv2d(randX_10_32_32_3, backend, io_type): - ''' - Test proper handling of QDepthwiseConv2D. - ''' - X = randX_10_32_32_3 - X = np.round(X * 2**10) * 2**-10 # make it an exact ap_fixed<16,6> - model = Sequential() - model.add( - QDepthwiseConv2D( - kernel_size=(3, 3), - input_shape=(32, 32, 3), - depthwise_quantizer='quantized_bits(6, 0, alpha=1)', - bias_quantizer='quantized_bits(4, 0, alpha=1)', - bias_initializer='he_normal', - activation='quantized_relu(3, 0)', - ) - ) - model.compile() - - config = hls4ml.utils.config_from_keras_model(model, granularity='name', default_precision='fixed<24,8>') - output_dir = str(test_root_path / f'hls4mlprj_qkeras_qdepthwiseconv2d_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - - y_qkeras = model.predict(X) - y_hls4ml = hls_model.predict(X) - - np.testing.assert_allclose(y_qkeras, y_hls4ml.reshape(y_qkeras.shape), rtol=1e-2, atol=0.01) - - -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -@pytest.mark.parametrize('strategy', ['Latency', 'Resource']) -def test_quantised_po2_bit_width(backend, io_type, strategy): - input_shape = 26 - output_shape = 6 - X = np.random.rand(100, input_shape) - - # Set a high bit-width, so that we ensure HLS doesn't allocate 2**bits for the multiplication - # The biggest allowed bit-width in Vivado HLS is 65,536 (2**16) - keras_model = Sequential() - keras_model.add( - QDense(output_shape, input_shape=(input_shape,), name='dense', kernel_quantizer=quantized_po2(18, max_value=2**20)) - ) - - # Set weights to same high random number - weights = keras_model.layers[0].get_weights() - weights[0] = (2**18) * np.random.rand(input_shape, output_shape) - keras_model.layers[0].set_weights(weights) - - # Assert output is the same and bit-width is not over-allocated [it would throw a run-time error] - keras_model.compile() - y_keras = keras_model.predict(X) - - hls_config = hls4ml.utils.config_from_keras_model( - keras_model, granularity='name', default_precision='ap_fixed<64, 32>', default_reuse_factor=1 - ) - hls_config['Model']['Strategy'] = strategy - output_dir = str(test_root_path / f'hls4mlprj_qkeras_quantised_po2_{backend}_{io_type}_{strategy}') - hls_model = hls4ml.converters.convert_from_keras_model( - keras_model, hls_config=hls_config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - y_hls = hls_model.predict(np.ascontiguousarray(X)) - - np.testing.assert_allclose(y_hls.flatten(), y_keras.flatten(), rtol=2e-2) - - -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) -@pytest.mark.parametrize('io_type', ['io_stream']) -def test_qseparableconv1d(backend, io_type): - ''' - Test proper handling of QSeparableConv1D. - ''' - x_in = Input((13, 20), name='input_layer') - x = QSeparableConv1D( - 5, - 3, - depthwise_quantizer=quantized_bits(8, 3, alpha=1), - pointwise_quantizer=quantized_bits(8, 3, alpha=1), - bias_quantizer=quantized_bits(8, 3, alpha=1), - name='qsepconv_1', - )(x_in) - model = Model(inputs=x_in, outputs=x) - - config = hls4ml.utils.config_from_keras_model( - model, granularity='name', backend=backend, default_precision='fixed<23,7>' - ) - - # Use 8 bits for input - config['LayerName']['input_layer']['Precision']['result'] = 'fixed<8,1>' - # default_precision is will be used for accum_t and result_t of the conv layer, so we don't need to set them here - # We need <15,4> for the result of depthwise step - config['LayerName']['qsepconv_1']['Precision']['dw_output'] = 'fixed<15,4>' - - output_dir = str(test_root_path / f'hls4mlprj_qsepconv1d_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, - hls_config=config, - output_dir=output_dir, - io_type=io_type, - ) - hls_model.compile() - - data = np.random.rand(100, 13, 20) - input_quantizer = quantized_bits(8, 0, alpha=1) - dataq = input_quantizer(data).numpy() - - y_qkeras = model.predict(dataq) - y_hls4ml = hls_model.predict(dataq) - - np.testing.assert_allclose(y_qkeras, y_hls4ml.reshape(y_qkeras.shape), rtol=0, atol=0) - - -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) -@pytest.mark.parametrize('io_type', ['io_stream']) -def test_qseparableconv2d(backend, io_type): - ''' - Test proper handling of QSeparableConv2D. - ''' - x_in = Input((13, 21, 20), name='input_layer') - x = QSeparableConv2D( - 5, - 3, - depthwise_quantizer=quantized_bits(8, 3, alpha=1), - pointwise_quantizer=quantized_bits(8, 3, alpha=1), - bias_quantizer=quantized_bits(8, 3, alpha=1), - name='qsepconv_1', - )(x_in) - model = Model(inputs=x_in, outputs=x) - - config = hls4ml.utils.config_from_keras_model( - model, granularity='name', backend=backend, default_precision='fixed<23,7>' - ) - - # Use 8 bits for input - config['LayerName']['input_layer']['Precision']['result'] = 'fixed<8,1>' - # default_precision is will be used for accum_t and result_t of the conv layer, so we don't need to set them here - # We need <15,4> for the result of depthwise step - config['LayerName']['qsepconv_1']['Precision']['dw_output'] = 'fixed<15,4>' - - output_dir = str(test_root_path / f'hls4mlprj_qsepconv2d_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, - hls_config=config, - output_dir=output_dir, - io_type=io_type, - ) - hls_model.compile() - - data = np.random.rand(100, 13, 21, 20) - input_quantizer = quantized_bits(8, 0, alpha=1) - dataq = input_quantizer(data).numpy() - - y_qkeras = model.predict(dataq) - y_hls4ml = hls_model.predict(dataq) - - np.testing.assert_allclose(y_qkeras, y_hls4ml.reshape(y_qkeras.shape), rtol=0, atol=0) diff --git a/hls4ml/test/pytest/test_report.py b/hls4ml/test/pytest/test_report.py deleted file mode 100644 index 5181dbe..0000000 --- a/hls4ml/test/pytest/test_report.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -import shutil -from pathlib import Path - -import pytest -from tensorflow.keras.layers import Dense -from tensorflow.keras.models import Sequential - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.mark.parametrize('backend', ['Vivado']) -def test_report(backend, capsys): - model = Sequential() - model.add(Dense(5, input_shape=(16,), name='fc1', activation='relu')) - - config = hls4ml.utils.config_from_keras_model(model, granularity='model') - - output_dir = str(test_root_path / f'hls4mlprj_report_{backend}') - test_report_dir = test_root_path / 'test_report' - - hls_model = hls4ml.converters.convert_from_keras_model( - model, io_type='io_stream', hls_config=config, output_dir=output_dir, part='xc7z020clg400-1', backend=backend - ) - hls_model.write() - - # to actually generate the reports (using Vivado 2020.1) - # hls_model.build(synth=True, vsynth=True) - - # copy pregenerated reports - os.makedirs(f'{output_dir}/myproject_prj/solution1/syn/report', exist_ok=True) - shutil.copy(test_report_dir / 'vivado_hls.app', f'{output_dir}/myproject_prj/vivado_hls.app') - shutil.copy( - test_report_dir / 'myproject_csynth.rpt', f'{output_dir}/myproject_prj/solution1/syn/report/myproject_csynth.rpt' - ) - shutil.copy( - test_report_dir / 'myproject_csynth.xml', f'{output_dir}/myproject_prj/solution1/syn/report/myproject_csynth.xml' - ) - shutil.copy(test_report_dir / 'vivado_synth.rpt', f'{output_dir}/vivado_synth.rpt') - - report = hls4ml.report.parse_vivado_report(output_dir) # or report = hls_model.build(...) - - capsys.readouterr() # capture to clear - hls4ml.report.print_vivado_report(report) - captured = capsys.readouterr() # capture again to test - - assert ( - captured.out - == '\n' - + '======================================================\n' - + '== C Synthesis report\n' - + '======================================================\n\n' - + ' - Performance estimates:\n' - + ' Best-case latency: 10 (50.0 ns)\n' - + ' Worst-case latency: 10 (50.0 ns)\n' - + ' Interval Min: 8\n' - + ' Interval Max: 8\n' - + ' Estimated Clock Period: 4.049\n\n' - + ' - Resource estimates:\n' - + ' BRAM_18K: 0 / 280 (0.0%)\n' - + ' DSP: 73 / 220 (33.2%)\n' - + ' FF: 7969 / 106400 (7.5%)\n' - + ' LUT: 2532 / 53200 (4.8%)\n' - + ' URAM: N/A\n\n' - + '======================================================\n' - + '== Vivado Synthesis report\n' - + '======================================================\n\n' - + ' - Resource utilization:\n' - + ' BRAM_18K: 0\n' - + ' DSP48E: 66\n' - + ' FF: 2428\n' - + ' LUT: 1526\n' - + ' URAM: N/A\n\n' - ) diff --git a/hls4ml/test/pytest/test_report/myproject_csynth.rpt b/hls4ml/test/pytest/test_report/myproject_csynth.rpt deleted file mode 100644 index 8354501..0000000 --- a/hls4ml/test/pytest/test_report/myproject_csynth.rpt +++ /dev/null @@ -1,196 +0,0 @@ - - -================================================================ -== Vivado HLS Report for 'myproject' -================================================================ -* Date: Sat Mar 18 22:59:37 2023 - -* Version: 2020.1 (Build 2897737 on Wed May 27 20:21:37 MDT 2020) -* Project: myproject_prj -* Solution: solution1 -* Product family: zynq -* Target device: xc7z020-clg400-1 - - -================================================================ -== Performance Estimates -================================================================ -+ Timing: - * Summary: - +--------+---------+----------+------------+ - | Clock | Target | Estimated| Uncertainty| - +--------+---------+----------+------------+ - |ap_clk | 5.00 ns | 4.049 ns | 0.62 ns | - +--------+---------+----------+------------+ - -+ Latency: - * Summary: - +---------+---------+-----------+-----------+-----+-----+----------+ - | Latency (cycles) | Latency (absolute) | Interval | Pipeline | - | min | max | min | max | min | max | Type | - +---------+---------+-----------+-----------+-----+-----+----------+ - | 10| 10| 50.000 ns | 50.000 ns | 8| 8| dataflow | - +---------+---------+-----------+-----------+-----+-----+----------+ - - + Detail: - * Instance: - +-----------------------------------------------------+----------------------------------------------------+---------+---------+-----------+-----------+-----+-----+----------+ - | | | Latency (cycles) | Latency (absolute) | Interval | Pipeline | - | Instance | Module | min | max | min | max | min | max | Type | - +-----------------------------------------------------+----------------------------------------------------+---------+---------+-----------+-----------+-----+-----+----------+ - |dense_array_array_ap_fixed_16_6_5_3_0_5u_config2_U0 |dense_array_array_ap_fixed_16_6_5_3_0_5u_config2_s | 7| 7| 35.000 ns | 35.000 ns | 7| 7| none | - |relu_array_array_ap_fixed_5u_relu_config3_U0 |relu_array_array_ap_fixed_5u_relu_config3_s | 2| 2| 10.000 ns | 10.000 ns | 1| 1| function | - +-----------------------------------------------------+----------------------------------------------------+---------+---------+-----------+-----------+-----+-----+----------+ - - * Loop: - N/A - - - -================================================================ -== Utilization Estimates -================================================================ -* Summary: -+-----------------+---------+-------+--------+-------+-----+ -| Name | BRAM_18K| DSP48E| FF | LUT | URAM| -+-----------------+---------+-------+--------+-------+-----+ -|DSP | -| -| -| -| -| -|Expression | -| -| 0| 2| -| -|FIFO | 0| -| 25| 140| -| -|Instance | 0| 73| 7944| 2390| -| -|Memory | -| -| -| -| -| -|Multiplexer | -| -| -| -| -| -|Register | -| -| -| -| -| -+-----------------+---------+-------+--------+-------+-----+ -|Total | 0| 73| 7969| 2532| 0| -+-----------------+---------+-------+--------+-------+-----+ -|Available | 280| 220| 106400| 53200| 0| -+-----------------+---------+-------+--------+-------+-----+ -|Utilization (%) | 0| 33| 7| 4| 0| -+-----------------+---------+-------+--------+-------+-----+ - -+ Detail: - * Instance: - +-----------------------------------------------------+----------------------------------------------------+---------+-------+------+------+-----+ - | Instance | Module | BRAM_18K| DSP48E| FF | LUT | URAM| - +-----------------------------------------------------+----------------------------------------------------+---------+-------+------+------+-----+ - |dense_array_array_ap_fixed_16_6_5_3_0_5u_config2_U0 |dense_array_array_ap_fixed_16_6_5_3_0_5u_config2_s | 0| 73| 7860| 2134| 0| - |relu_array_array_ap_fixed_5u_relu_config3_U0 |relu_array_array_ap_fixed_5u_relu_config3_s | 0| 0| 84| 256| 0| - +-----------------------------------------------------+----------------------------------------------------+---------+-------+------+------+-----+ - |Total | | 0| 73| 7944| 2390| 0| - +-----------------------------------------------------+----------------------------------------------------+---------+-------+------+------+-----+ - - * DSP48E: - N/A - - * Memory: - N/A - - * FIFO: - +-------------------------+---------+---+----+-----+------+-----+---------+ - | Name | BRAM_18K| FF| LUT| URAM| Depth| Bits| Size:D*B| - +-------------------------+---------+---+----+-----+------+-----+---------+ - |layer2_out_V_data_0_V_U | 0| 5| 0| -| 1| 16| 16| - |layer2_out_V_data_1_V_U | 0| 5| 0| -| 1| 16| 16| - |layer2_out_V_data_2_V_U | 0| 5| 0| -| 1| 16| 16| - |layer2_out_V_data_3_V_U | 0| 5| 0| -| 1| 16| 16| - |layer2_out_V_data_4_V_U | 0| 5| 0| -| 1| 16| 16| - +-------------------------+---------+---+----+-----+------+-----+---------+ - |Total | 0| 25| 0| 0| 5| 80| 80| - +-------------------------+---------+---+----+-----+------+-----+---------+ - - * Expression: - +--------------+----------+-------+---+----+------------+------------+ - | Variable Name| Operation| DSP48E| FF| LUT| Bitwidth P0| Bitwidth P1| - +--------------+----------+-------+---+----+------------+------------+ - |ap_idle | and | 0| 0| 2| 1| 1| - +--------------+----------+-------+---+----+------------+------------+ - |Total | | 0| 0| 2| 1| 1| - +--------------+----------+-------+---+----+------------+------------+ - - * Multiplexer: - N/A - - * Register: - N/A - - - -================================================================ -== Interface -================================================================ -* Summary: -+------------------------------+-----+-----+------------+-----------------------+--------------+ -| RTL Ports | Dir | Bits| Protocol | Source Object | C Type | -+------------------------------+-----+-----+------------+-----------------------+--------------+ -|fc1_input_V_data_0_V_TDATA | in | 16| axis | fc1_input_V_data_0_V | pointer | -|fc1_input_V_data_0_V_TVALID | in | 1| axis | fc1_input_V_data_0_V | pointer | -|fc1_input_V_data_0_V_TREADY | out | 1| axis | fc1_input_V_data_0_V | pointer | -|fc1_input_V_data_1_V_TDATA | in | 16| axis | fc1_input_V_data_1_V | pointer | -|fc1_input_V_data_1_V_TVALID | in | 1| axis | fc1_input_V_data_1_V | pointer | -|fc1_input_V_data_1_V_TREADY | out | 1| axis | fc1_input_V_data_1_V | pointer | -|fc1_input_V_data_2_V_TDATA | in | 16| axis | fc1_input_V_data_2_V | pointer | -|fc1_input_V_data_2_V_TVALID | in | 1| axis | fc1_input_V_data_2_V | pointer | -|fc1_input_V_data_2_V_TREADY | out | 1| axis | fc1_input_V_data_2_V | pointer | -|fc1_input_V_data_3_V_TDATA | in | 16| axis | fc1_input_V_data_3_V | pointer | -|fc1_input_V_data_3_V_TVALID | in | 1| axis | fc1_input_V_data_3_V | pointer | -|fc1_input_V_data_3_V_TREADY | out | 1| axis | fc1_input_V_data_3_V | pointer | -|fc1_input_V_data_4_V_TDATA | in | 16| axis | fc1_input_V_data_4_V | pointer | -|fc1_input_V_data_4_V_TVALID | in | 1| axis | fc1_input_V_data_4_V | pointer | -|fc1_input_V_data_4_V_TREADY | out | 1| axis | fc1_input_V_data_4_V | pointer | -|fc1_input_V_data_5_V_TDATA | in | 16| axis | fc1_input_V_data_5_V | pointer | -|fc1_input_V_data_5_V_TVALID | in | 1| axis | fc1_input_V_data_5_V | pointer | -|fc1_input_V_data_5_V_TREADY | out | 1| axis | fc1_input_V_data_5_V | pointer | -|fc1_input_V_data_6_V_TDATA | in | 16| axis | fc1_input_V_data_6_V | pointer | -|fc1_input_V_data_6_V_TVALID | in | 1| axis | fc1_input_V_data_6_V | pointer | -|fc1_input_V_data_6_V_TREADY | out | 1| axis | fc1_input_V_data_6_V | pointer | -|fc1_input_V_data_7_V_TDATA | in | 16| axis | fc1_input_V_data_7_V | pointer | -|fc1_input_V_data_7_V_TVALID | in | 1| axis | fc1_input_V_data_7_V | pointer | -|fc1_input_V_data_7_V_TREADY | out | 1| axis | fc1_input_V_data_7_V | pointer | -|fc1_input_V_data_8_V_TDATA | in | 16| axis | fc1_input_V_data_8_V | pointer | -|fc1_input_V_data_8_V_TVALID | in | 1| axis | fc1_input_V_data_8_V | pointer | -|fc1_input_V_data_8_V_TREADY | out | 1| axis | fc1_input_V_data_8_V | pointer | -|fc1_input_V_data_9_V_TDATA | in | 16| axis | fc1_input_V_data_9_V | pointer | -|fc1_input_V_data_9_V_TVALID | in | 1| axis | fc1_input_V_data_9_V | pointer | -|fc1_input_V_data_9_V_TREADY | out | 1| axis | fc1_input_V_data_9_V | pointer | -|fc1_input_V_data_10_V_TDATA | in | 16| axis | fc1_input_V_data_10_V | pointer | -|fc1_input_V_data_10_V_TVALID | in | 1| axis | fc1_input_V_data_10_V | pointer | -|fc1_input_V_data_10_V_TREADY | out | 1| axis | fc1_input_V_data_10_V | pointer | -|fc1_input_V_data_11_V_TDATA | in | 16| axis | fc1_input_V_data_11_V | pointer | -|fc1_input_V_data_11_V_TVALID | in | 1| axis | fc1_input_V_data_11_V | pointer | -|fc1_input_V_data_11_V_TREADY | out | 1| axis | fc1_input_V_data_11_V | pointer | -|fc1_input_V_data_12_V_TDATA | in | 16| axis | fc1_input_V_data_12_V | pointer | -|fc1_input_V_data_12_V_TVALID | in | 1| axis | fc1_input_V_data_12_V | pointer | -|fc1_input_V_data_12_V_TREADY | out | 1| axis | fc1_input_V_data_12_V | pointer | -|fc1_input_V_data_13_V_TDATA | in | 16| axis | fc1_input_V_data_13_V | pointer | -|fc1_input_V_data_13_V_TVALID | in | 1| axis | fc1_input_V_data_13_V | pointer | -|fc1_input_V_data_13_V_TREADY | out | 1| axis | fc1_input_V_data_13_V | pointer | -|fc1_input_V_data_14_V_TDATA | in | 16| axis | fc1_input_V_data_14_V | pointer | -|fc1_input_V_data_14_V_TVALID | in | 1| axis | fc1_input_V_data_14_V | pointer | -|fc1_input_V_data_14_V_TREADY | out | 1| axis | fc1_input_V_data_14_V | pointer | -|fc1_input_V_data_15_V_TDATA | in | 16| axis | fc1_input_V_data_15_V | pointer | -|fc1_input_V_data_15_V_TVALID | in | 1| axis | fc1_input_V_data_15_V | pointer | -|fc1_input_V_data_15_V_TREADY | out | 1| axis | fc1_input_V_data_15_V | pointer | -|layer3_out_V_data_0_V_TDATA | out | 16| axis | layer3_out_V_data_0_V | pointer | -|layer3_out_V_data_0_V_TVALID | out | 1| axis | layer3_out_V_data_0_V | pointer | -|layer3_out_V_data_0_V_TREADY | in | 1| axis | layer3_out_V_data_0_V | pointer | -|layer3_out_V_data_1_V_TDATA | out | 16| axis | layer3_out_V_data_1_V | pointer | -|layer3_out_V_data_1_V_TVALID | out | 1| axis | layer3_out_V_data_1_V | pointer | -|layer3_out_V_data_1_V_TREADY | in | 1| axis | layer3_out_V_data_1_V | pointer | -|layer3_out_V_data_2_V_TDATA | out | 16| axis | layer3_out_V_data_2_V | pointer | -|layer3_out_V_data_2_V_TVALID | out | 1| axis | layer3_out_V_data_2_V | pointer | -|layer3_out_V_data_2_V_TREADY | in | 1| axis | layer3_out_V_data_2_V | pointer | -|layer3_out_V_data_3_V_TDATA | out | 16| axis | layer3_out_V_data_3_V | pointer | -|layer3_out_V_data_3_V_TVALID | out | 1| axis | layer3_out_V_data_3_V | pointer | -|layer3_out_V_data_3_V_TREADY | in | 1| axis | layer3_out_V_data_3_V | pointer | -|layer3_out_V_data_4_V_TDATA | out | 16| axis | layer3_out_V_data_4_V | pointer | -|layer3_out_V_data_4_V_TVALID | out | 1| axis | layer3_out_V_data_4_V | pointer | -|layer3_out_V_data_4_V_TREADY | in | 1| axis | layer3_out_V_data_4_V | pointer | -|ap_clk | in | 1| ap_ctrl_hs | myproject | return value | -|ap_rst_n | in | 1| ap_ctrl_hs | myproject | return value | -|ap_start | in | 1| ap_ctrl_hs | myproject | return value | -|ap_done | out | 1| ap_ctrl_hs | myproject | return value | -|ap_ready | out | 1| ap_ctrl_hs | myproject | return value | -|ap_idle | out | 1| ap_ctrl_hs | myproject | return value | -+------------------------------+-----+-----+------------+-----------------------+--------------+ - diff --git a/hls4ml/test/pytest/test_report/myproject_csynth.xml b/hls4ml/test/pytest/test_report/myproject_csynth.xml deleted file mode 100644 index 711a5ec..0000000 --- a/hls4ml/test/pytest/test_report/myproject_csynth.xml +++ /dev/null @@ -1,878 +0,0 @@ - - - -2020.1 - - - -ns -zynq -xc7z020-clg400-1 -myproject -5.00 -0.62 - - - -dataflow - -ns -4.049 - - -clock cycles -10 -10 -10 -50.000 ns -50.000 ns -50.000 ns -8 -8 -8 - - - - - -0 -73 -7969 -2532 -0 - - -280 -220 -106400 -53200 -0 - - - - - -fc1_input_V_data_0_V_TDATA -fc1_input_V_data_0_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_0_V_TVALID -fc1_input_V_data_0_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_0_V_TREADY -fc1_input_V_data_0_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_1_V_TDATA -fc1_input_V_data_1_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_1_V_TVALID -fc1_input_V_data_1_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_1_V_TREADY -fc1_input_V_data_1_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_2_V_TDATA -fc1_input_V_data_2_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_2_V_TVALID -fc1_input_V_data_2_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_2_V_TREADY -fc1_input_V_data_2_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_3_V_TDATA -fc1_input_V_data_3_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_3_V_TVALID -fc1_input_V_data_3_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_3_V_TREADY -fc1_input_V_data_3_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_4_V_TDATA -fc1_input_V_data_4_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_4_V_TVALID -fc1_input_V_data_4_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_4_V_TREADY -fc1_input_V_data_4_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_5_V_TDATA -fc1_input_V_data_5_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_5_V_TVALID -fc1_input_V_data_5_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_5_V_TREADY -fc1_input_V_data_5_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_6_V_TDATA -fc1_input_V_data_6_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_6_V_TVALID -fc1_input_V_data_6_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_6_V_TREADY -fc1_input_V_data_6_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_7_V_TDATA -fc1_input_V_data_7_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_7_V_TVALID -fc1_input_V_data_7_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_7_V_TREADY -fc1_input_V_data_7_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_8_V_TDATA -fc1_input_V_data_8_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_8_V_TVALID -fc1_input_V_data_8_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_8_V_TREADY -fc1_input_V_data_8_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_9_V_TDATA -fc1_input_V_data_9_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_9_V_TVALID -fc1_input_V_data_9_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_9_V_TREADY -fc1_input_V_data_9_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_10_V_TDATA -fc1_input_V_data_10_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_10_V_TVALID -fc1_input_V_data_10_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_10_V_TREADY -fc1_input_V_data_10_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_11_V_TDATA -fc1_input_V_data_11_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_11_V_TVALID -fc1_input_V_data_11_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_11_V_TREADY -fc1_input_V_data_11_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_12_V_TDATA -fc1_input_V_data_12_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_12_V_TVALID -fc1_input_V_data_12_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_12_V_TREADY -fc1_input_V_data_12_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_13_V_TDATA -fc1_input_V_data_13_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_13_V_TVALID -fc1_input_V_data_13_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_13_V_TREADY -fc1_input_V_data_13_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_14_V_TDATA -fc1_input_V_data_14_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_14_V_TVALID -fc1_input_V_data_14_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_14_V_TREADY -fc1_input_V_data_14_V -pointer - -axis -register, both mode -out -1 -control -int - - -fc1_input_V_data_15_V_TDATA -fc1_input_V_data_15_V -pointer - -axis -register, both mode -in -16 -data -int - - -fc1_input_V_data_15_V_TVALID -fc1_input_V_data_15_V -pointer - -axis -register, both mode -in -1 -control -int - - -fc1_input_V_data_15_V_TREADY -fc1_input_V_data_15_V -pointer - -axis -register, both mode -out -1 -control -int - - -layer3_out_V_data_0_V_TDATA -layer3_out_V_data_0_V -pointer - -axis -register, both mode -out -16 -data -int - - -layer3_out_V_data_0_V_TVALID -layer3_out_V_data_0_V -pointer - -axis -register, both mode -out -1 -control -int - - -layer3_out_V_data_0_V_TREADY -layer3_out_V_data_0_V -pointer - -axis -register, both mode -in -1 -control -int - - -layer3_out_V_data_1_V_TDATA -layer3_out_V_data_1_V -pointer - -axis -register, both mode -out -16 -data -int - - -layer3_out_V_data_1_V_TVALID -layer3_out_V_data_1_V -pointer - -axis -register, both mode -out -1 -control -int - - -layer3_out_V_data_1_V_TREADY -layer3_out_V_data_1_V -pointer - -axis -register, both mode -in -1 -control -int - - -layer3_out_V_data_2_V_TDATA -layer3_out_V_data_2_V -pointer - -axis -register, both mode -out -16 -data -int - - -layer3_out_V_data_2_V_TVALID -layer3_out_V_data_2_V -pointer - -axis -register, both mode -out -1 -control -int - - -layer3_out_V_data_2_V_TREADY -layer3_out_V_data_2_V -pointer - -axis -register, both mode -in -1 -control -int - - -layer3_out_V_data_3_V_TDATA -layer3_out_V_data_3_V -pointer - -axis -register, both mode -out -16 -data -int - - -layer3_out_V_data_3_V_TVALID -layer3_out_V_data_3_V -pointer - -axis -register, both mode -out -1 -control -int - - -layer3_out_V_data_3_V_TREADY -layer3_out_V_data_3_V -pointer - -axis -register, both mode -in -1 -control -int - - -layer3_out_V_data_4_V_TDATA -layer3_out_V_data_4_V -pointer - -axis -register, both mode -out -16 -data -int - - -layer3_out_V_data_4_V_TVALID -layer3_out_V_data_4_V -pointer - -axis -register, both mode -out -1 -control -int - - -layer3_out_V_data_4_V_TREADY -layer3_out_V_data_4_V -pointer - -axis -register, both mode -in -1 -control -int - - -ap_clk -myproject -return value - -ap_ctrl_hs - -in -1 -control - - -ap_rst_n -myproject -return value - -ap_ctrl_hs - -in -1 -control - - -ap_start -myproject -return value - -ap_ctrl_hs - -in -1 -control - - -ap_done -myproject -return value - -ap_ctrl_hs - -out -1 -control - - -ap_ready -myproject -return value - -ap_ctrl_hs - -out -1 -control - - -ap_idle -myproject -return value - -ap_ctrl_hs - -out -1 -control - - - - diff --git a/hls4ml/test/pytest/test_report/vivado_hls.app b/hls4ml/test/pytest/test_report/vivado_hls.app deleted file mode 100644 index c57b8a4..0000000 --- a/hls4ml/test/pytest/test_report/vivado_hls.app +++ /dev/null @@ -1,15 +0,0 @@ - - - - - - - - - - - - - - - diff --git a/hls4ml/test/pytest/test_report/vivado_synth.rpt b/hls4ml/test/pytest/test_report/vivado_synth.rpt deleted file mode 100644 index 971f3e5..0000000 --- a/hls4ml/test/pytest/test_report/vivado_synth.rpt +++ /dev/null @@ -1,184 +0,0 @@ -Copyright 1986-2020 Xilinx, Inc. All Rights Reserved. ------------------------------------------------------------------------------------- -| Tool Version : Vivado v.2020.1 (lin64) Build 2902540 Wed May 27 19:54:35 MDT 2020 -| Date : Sun Mar 19 07:48:36 2023 -| Host : mulder.t2.ucsd.edu running 64-bit unknown -| Command : report_utilization -file vivado_synth.rpt -| Design : myproject -| Device : 7z020clg400-1 -| Design State : Synthesized ------------------------------------------------------------------------------------- - -Utilization Design Information - -Table of Contents ------------------ -1. Slice Logic -1.1 Summary of Registers by Type -2. Memory -3. DSP -4. IO and GT Specific -5. Clocking -6. Specific Feature -7. Primitives -8. Black Boxes -9. Instantiated Netlists - -1. Slice Logic --------------- - -+----------------------------+------+-------+-----------+-------+ -| Site Type | Used | Fixed | Available | Util% | -+----------------------------+------+-------+-----------+-------+ -| Slice LUTs* | 1526 | 0 | 53200 | 2.87 | -| LUT as Logic | 1478 | 0 | 53200 | 2.78 | -| LUT as Memory | 48 | 0 | 17400 | 0.28 | -| LUT as Distributed RAM | 0 | 0 | | | -| LUT as Shift Register | 48 | 0 | | | -| Slice Registers | 2428 | 0 | 106400 | 2.28 | -| Register as Flip Flop | 2428 | 0 | 106400 | 2.28 | -| Register as Latch | 0 | 0 | 106400 | 0.00 | -| F7 Muxes | 0 | 0 | 26600 | 0.00 | -| F8 Muxes | 0 | 0 | 13300 | 0.00 | -+----------------------------+------+-------+-----------+-------+ -* Warning! The Final LUT count, after physical optimizations and full implementation, is typically lower. Run opt_design after synthesis, if not already completed, for a more realistic count. - - -1.1 Summary of Registers by Type --------------------------------- - -+-------+--------------+-------------+--------------+ -| Total | Clock Enable | Synchronous | Asynchronous | -+-------+--------------+-------------+--------------+ -| 0 | _ | - | - | -| 0 | _ | - | Set | -| 0 | _ | - | Reset | -| 0 | _ | Set | - | -| 0 | _ | Reset | - | -| 0 | Yes | - | - | -| 0 | Yes | - | Set | -| 0 | Yes | - | Reset | -| 18 | Yes | Set | - | -| 2410 | Yes | Reset | - | -+-------+--------------+-------------+--------------+ - - -2. Memory ---------- - -+----------------+------+-------+-----------+-------+ -| Site Type | Used | Fixed | Available | Util% | -+----------------+------+-------+-----------+-------+ -| Block RAM Tile | 0 | 0 | 140 | 0.00 | -| RAMB36/FIFO* | 0 | 0 | 140 | 0.00 | -| RAMB18 | 0 | 0 | 280 | 0.00 | -+----------------+------+-------+-----------+-------+ -* Note: Each Block RAM Tile only has one FIFO logic available and therefore can accommodate only one FIFO36E1 or one FIFO18E1. However, if a FIFO18E1 occupies a Block RAM Tile, that tile can still accommodate a RAMB18E1 - - -3. DSP ------- - -+----------------+------+-------+-----------+-------+ -| Site Type | Used | Fixed | Available | Util% | -+----------------+------+-------+-----------+-------+ -| DSPs | 66 | 0 | 220 | 30.00 | -| DSP48E1 only | 66 | | | | -+----------------+------+-------+-----------+-------+ - - -4. IO and GT Specific ---------------------- - -+-----------------------------+------+-------+-----------+--------+ -| Site Type | Used | Fixed | Available | Util% | -+-----------------------------+------+-------+-----------+--------+ -| Bonded IOB | 384 | 0 | 125 | 307.20 | -| Bonded IPADs | 0 | 0 | 2 | 0.00 | -| Bonded IOPADs | 0 | 0 | 130 | 0.00 | -| PHY_CONTROL | 0 | 0 | 4 | 0.00 | -| PHASER_REF | 0 | 0 | 4 | 0.00 | -| OUT_FIFO | 0 | 0 | 16 | 0.00 | -| IN_FIFO | 0 | 0 | 16 | 0.00 | -| IDELAYCTRL | 0 | 0 | 4 | 0.00 | -| IBUFDS | 0 | 0 | 121 | 0.00 | -| PHASER_OUT/PHASER_OUT_PHY | 0 | 0 | 16 | 0.00 | -| PHASER_IN/PHASER_IN_PHY | 0 | 0 | 16 | 0.00 | -| IDELAYE2/IDELAYE2_FINEDELAY | 0 | 0 | 200 | 0.00 | -| ILOGIC | 0 | 0 | 125 | 0.00 | -| OLOGIC | 0 | 0 | 125 | 0.00 | -+-----------------------------+------+-------+-----------+--------+ - - -5. Clocking ------------ - -+------------+------+-------+-----------+-------+ -| Site Type | Used | Fixed | Available | Util% | -+------------+------+-------+-----------+-------+ -| BUFGCTRL | 1 | 0 | 32 | 3.13 | -| BUFIO | 0 | 0 | 16 | 0.00 | -| MMCME2_ADV | 0 | 0 | 4 | 0.00 | -| PLLE2_ADV | 0 | 0 | 4 | 0.00 | -| BUFMRCE | 0 | 0 | 8 | 0.00 | -| BUFHCE | 0 | 0 | 72 | 0.00 | -| BUFR | 0 | 0 | 16 | 0.00 | -+------------+------+-------+-----------+-------+ - - -6. Specific Feature -------------------- - -+-------------+------+-------+-----------+-------+ -| Site Type | Used | Fixed | Available | Util% | -+-------------+------+-------+-----------+-------+ -| BSCANE2 | 0 | 0 | 4 | 0.00 | -| CAPTUREE2 | 0 | 0 | 1 | 0.00 | -| DNA_PORT | 0 | 0 | 1 | 0.00 | -| EFUSE_USR | 0 | 0 | 1 | 0.00 | -| FRAME_ECCE2 | 0 | 0 | 1 | 0.00 | -| ICAPE2 | 0 | 0 | 2 | 0.00 | -| STARTUPE2 | 0 | 0 | 1 | 0.00 | -| XADC | 0 | 0 | 1 | 0.00 | -+-------------+------+-------+-----------+-------+ - - -7. Primitives -------------- - -+----------+------+---------------------+ -| Ref Name | Used | Functional Category | -+----------+------+---------------------+ -| FDRE | 2410 | Flop & Latch | -| LUT2 | 864 | LUT | -| LUT3 | 671 | LUT | -| LUT4 | 499 | LUT | -| CARRY4 | 295 | CarryLogic | -| IBUF | 280 | IO | -| OBUF | 104 | IO | -| DSP48E1 | 66 | Block Arithmetic | -| LUT1 | 63 | LUT | -| SRL16E | 48 | Distributed Memory | -| LUT5 | 43 | LUT | -| LUT6 | 34 | LUT | -| FDSE | 18 | Flop & Latch | -| BUFG | 1 | Clock | -+----------+------+---------------------+ - - -8. Black Boxes --------------- - -+----------+------+ -| Ref Name | Used | -+----------+------+ - - -9. Instantiated Netlists ------------------------- - -+----------+------+ -| Ref Name | Used | -+----------+------+ - - diff --git a/hls4ml/test/pytest/test_reshape.py b/hls4ml/test/pytest/test_reshape.py deleted file mode 100755 index 3c421c1..0000000 --- a/hls4ml/test/pytest/test_reshape.py +++ /dev/null @@ -1,49 +0,0 @@ -""" Test that reshape is properly handled by optimizers. -""" - -from pathlib import Path - -import numpy as np -import pytest -import tensorflow as tf - -import hls4ml - -test_root_path = Path(__file__).parent - - -def randX(batch_size, N): - return np.random.rand(batch_size, N) - - -@pytest.fixture(scope='module') -def randX_20_10(): - return randX(20, 10) - - -@pytest.mark.parametrize('backend', ['Vivado', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_reshape_parallel(randX_20_10, backend, io_type): - model = tf.keras.models.Sequential( - [ - tf.keras.layers.Input(shape=(10,)), - tf.keras.layers.Dense(10 * 3), - tf.keras.layers.Reshape((10, 3)), - tf.keras.layers.ReLU(), - ] - ) - model.compile(optimizer='adam', loss='mse') - config = hls4ml.utils.config_from_keras_model(model) - prj_name = f'hls4mlprj_reshape_{backend}_{io_type}' - output_dir = str(test_root_path / prj_name) - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, io_type=io_type, backend=backend - ) - hls_model.compile() - - X = randX_20_10 - y_qkeras = model.predict(X) - y_hls4ml = hls_model.predict(X) - - # check that the values are close - np.testing.assert_allclose(y_qkeras.ravel(), y_hls4ml.ravel(), atol=0.02) diff --git a/hls4ml/test/pytest/test_rnn.py b/hls4ml/test/pytest/test_rnn.py deleted file mode 100644 index 3e6e978..0000000 --- a/hls4ml/test/pytest/test_rnn.py +++ /dev/null @@ -1,126 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import GRU, LSTM, Input, SimpleRNN -from tensorflow.keras.models import Model, Sequential - -import hls4ml - -test_root_path = Path(__file__).parent - -rnn_layers = [SimpleRNN, LSTM, GRU] - - -@pytest.mark.parametrize('rnn_layer', rnn_layers) -@pytest.mark.parametrize('return_sequences', [True, False]) -def test_rnn_parsing(rnn_layer, return_sequences): - time_steps = 3 - input_size = 8 - input_shape = (time_steps, input_size) - - model_input = Input(shape=input_shape) - model_output = rnn_layer(64, return_sequences=return_sequences)(model_input) - - model = Model(model_input, model_output) - model.compile(optimizer='adam', loss='mse') - - config = hls4ml.utils.config_from_keras_model(model, granularity='name') - prj_name = f'hls4mlprj_rnn_{rnn_layer.__class__.__name__.lower()}_seq_{int(return_sequences)}' - output_dir = str(test_root_path / prj_name) - hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, output_dir=output_dir) - - hls_layer = list(hls_model.get_layers())[1] # 0 is input, 1 is the RNN layer - keras_layer = model.layers[1] - - # Basic sanity check, I/O, activations - assert hls_layer.class_name == rnn_layer.__name__ - assert hls_layer.attributes['n_out'] == keras_layer.units - assert hls_layer.attributes['activation'] == keras_layer.activation.__name__ - if 'recurrent_activation' in hls_layer.attributes: # SimpleRNN doesn't have this - assert hls_layer.attributes['recurrent_activation'] == keras_layer.recurrent_activation.__name__ - assert hls_layer.get_input_variable().shape == list(input_shape) - assert hls_layer.get_output_variable().shape == model_output.shape.as_list()[1:] # Ignore the batch size - - # Compare weights - hls_weights = list(hls_layer.get_weights()) # [weights, recurrent_weights, bias, recurrent_bias] - rnn_weights = keras_layer.get_weights() # [weights, recurrent_weights, bias] - - assert hls_weights[0].data.shape == rnn_weights[0].shape - assert hls_weights[1].data.shape == rnn_weights[1].shape - if 'gru' in rnn_layer.__name__.lower(): - # GRU has both bias and recurrent bias - assert hls_weights[2].data.shape == rnn_weights[2][0].shape - assert hls_weights[3].data.shape == rnn_weights[2][1].shape - else: - # LSTM and SimpleRNN only have bias - assert hls_weights[2].data.shape == rnn_weights[2].shape - - np.testing.assert_array_equal(hls_weights[0].data, rnn_weights[0]) - np.testing.assert_array_equal(hls_weights[1].data, rnn_weights[1]) - if 'gru' in rnn_layer.__name__.lower(): - np.testing.assert_array_equal(hls_weights[2].data, rnn_weights[2][0]) - np.testing.assert_array_equal(hls_weights[3].data, rnn_weights[2][1]) - else: - np.testing.assert_array_equal(hls_weights[2].data, rnn_weights[2]) - - -@pytest.mark.parametrize( - 'rnn_layer,backend, io_type', - [ - (SimpleRNN, 'Quartus', 'io_parallel'), - (LSTM, 'Vivado', 'io_parallel'), - (LSTM, 'Vitis', 'io_parallel'), - (LSTM, 'Quartus', 'io_parallel'), - (LSTM, 'Vivado', 'io_stream'), - (LSTM, 'Vitis', 'io_stream'), - (GRU, 'Vivado', 'io_parallel'), - (GRU, 'Vivado', 'io_stream'), - (GRU, 'Vitis', 'io_parallel'), - (GRU, 'Vitis', 'io_stream'), - (GRU, 'Quartus', 'io_parallel'), - (GRU, 'Quartus', 'io_stream'), - ], -) -@pytest.mark.parametrize('return_sequences', [True, False]) -@pytest.mark.parametrize('static', [True, False]) -@pytest.mark.parametrize('strategy', ['latency', 'resource']) -def test_rnn_accuracy(rnn_layer, return_sequences, backend, io_type, strategy, static): - # Subtract 0.5 to include negative values - input_shape = (12, 8) - X = np.random.rand(50, *input_shape) - 0.5 - - layer_name = rnn_layer.__class__.__name__.lower() - keras_model = Sequential() - keras_model.add( - rnn_layer( - units=32, - input_shape=input_shape, - kernel_initializer='lecun_uniform', - recurrent_initializer='lecun_uniform', - bias_initializer='lecun_uniform', - return_sequences=return_sequences, - name=layer_name, - ) - ) - keras_model.compile() - - default_precision = 'ap_fixed<32, 16>' if backend in ['Vivado', 'Vitis'] else 'ac_fixed<32, 16, true>' - hls_config = hls4ml.utils.config_from_keras_model( - keras_model, granularity='name', default_precision=default_precision, backend=backend - ) - hls_config['LayerName'][layer_name]['static'] = static - hls_config['LayerName'][layer_name]['Strategy'] = strategy - prj_name = 'hls4mlprj_rnn_accuracy_{}_static_{}_ret_seq_{}_{}_{}_{}'.format( - rnn_layer.__class__.__name__.lower(), int(static), int(return_sequences), backend, io_type, strategy - ) - output_dir = str(test_root_path / prj_name) - - hls_model = hls4ml.converters.convert_from_keras_model( - keras_model, hls_config=hls_config, output_dir=output_dir, backend=backend, io_type=io_type - ) - hls_model.compile() - - keras_prediction = keras_model.predict(X) - hls_prediction = hls_model.predict(X) - np.testing.assert_allclose(hls_prediction.flatten(), keras_prediction.flatten(), rtol=0.0, atol=5e-2) diff --git a/hls4ml/test/pytest/test_sepconv2d.py b/hls4ml/test/pytest/test_sepconv2d.py deleted file mode 100644 index 1ce85c5..0000000 --- a/hls4ml/test/pytest/test_sepconv2d.py +++ /dev/null @@ -1,63 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -import tensorflow as tf -from tensorflow.keras.layers import SeparableConv2D - -import hls4ml - -test_root_path = Path(__file__).parent - -keras_conv2d = [SeparableConv2D] -padds_options = ['same', 'valid'] -chans_options = ['channels_last'] -io_type_options = ['io_stream'] -strides_options = [(1, 1), (2, 2)] -kernel_options = [(2, 2), (3, 3)] -bias_options = [False] - - -@pytest.mark.parametrize("conv2d", keras_conv2d) -@pytest.mark.parametrize("chans", chans_options) -@pytest.mark.parametrize("padds", padds_options) -@pytest.mark.parametrize("strides", strides_options) -@pytest.mark.parametrize("kernels", kernel_options) -@pytest.mark.parametrize("bias", bias_options) -@pytest.mark.parametrize("io_type", io_type_options) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis']) -def test_sepconv2d(conv2d, chans, padds, strides, kernels, bias, io_type, backend): - model = tf.keras.models.Sequential() - input_shape = (28, 28, 3) - model.add( - conv2d( - filters=32, - kernel_size=kernels, - strides=strides, - padding=padds, - input_shape=input_shape, - kernel_initializer='normal', - use_bias=bias, - data_format=chans, - ) - ) - - model.compile(optimizer='adam', loss='mse') - X_input = np.random.rand(100, *input_shape) - keras_prediction = model.predict(X_input) - config = hls4ml.utils.config_from_keras_model(model, default_precision='ap_fixed<32,16>') - stride_cfg = str(strides).replace(', ', '_').replace('(', '').replace(')', '') - kernel_cfg = str(kernels).replace(', ', '_').replace('(', '').replace(')', '') - output_dir = str( - test_root_path - / 'hls4mlprj_{}_{}_strides_{}_kernels_{}_{}_padding_{}_{}'.format( - conv2d.__name__.lower(), chans, stride_cfg, kernel_cfg, padds, backend, io_type - ) - ) - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, output_dir=output_dir, io_type=io_type, backend=backend - ) - hls_model.compile() - hls_prediction = hls_model.predict(X_input).reshape(keras_prediction.shape) - - np.testing.assert_allclose(hls_prediction, keras_prediction, rtol=0, atol=0.001) diff --git a/hls4ml/test/pytest/test_sequential_parsing_pytorch.py b/hls4ml/test/pytest/test_sequential_parsing_pytorch.py deleted file mode 100644 index 569c6a5..0000000 --- a/hls4ml/test/pytest/test_sequential_parsing_pytorch.py +++ /dev/null @@ -1,83 +0,0 @@ -from collections import OrderedDict -from pathlib import Path - -import pytest -import torch.nn as nn - -from hls4ml.converters import convert_from_pytorch_model -from hls4ml.utils.config import config_from_pytorch_model - -test_root_path = Path(__file__).parent - -# Model with unnamed Sequential and no named layers -seq_unnamed = nn.Sequential(nn.Conv2d(1, 20, 5), nn.ReLU(), nn.Conv2d(20, 64, 5), nn.ReLU()) - -# Model with unnamed Sequential and named layers -seq_named = nn.Sequential( - OrderedDict( - [('conv_1', nn.Conv2d(1, 20, 5)), ('relu_1', nn.ReLU()), ('conv_2', nn.Conv2d(20, 64, 5)), ('relu_2', nn.ReLU())] - ) -) - - -# Model with named Sequential and no named layers -class SeqModelUnnamedLayers(nn.Module): - def __init__(self): - super().__init__() - self.layer = nn.Sequential(nn.Conv2d(1, 20, 5), nn.ReLU(), nn.Conv2d(20, 64, 5), nn.ReLU()) - - def forward(self, x): - output = self.layer(x) - return output - - -# Model with named Sequential and named layers -class SeqModelNamedLayers(nn.Module): - def __init__(self): - super().__init__() - self.layer = nn.Sequential( - OrderedDict( - [ - ('conv_1', nn.Conv2d(1, 20, 5)), - ('relu_1', nn.ReLU()), - ('conv_2', nn.Conv2d(20, 64, 5)), - ('relu_2', nn.ReLU()), - ] - ) - ) - - def forward(self, x): - output = self.layer(x) - return output - - -@pytest.mark.parametrize('backend', ['Vivado']) -@pytest.mark.parametrize('io_type', ['io_parallel']) -@pytest.mark.parametrize('named_layers', [True, False]) -def test_unnamed_seq(backend, io_type, named_layers): - if named_layers: - model = seq_named - else: - model = seq_unnamed - config = config_from_pytorch_model(model) - output_dir = str(test_root_path / f'hls4mlprj_pytorch_seq_unnamed_{backend}_{io_type}_{named_layers}') - - convert_from_pytorch_model( - model, (None, 1, 5, 5), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) - - -@pytest.mark.parametrize('backend', ['Vivado']) -@pytest.mark.parametrize('io_type', ['io_parallel']) -@pytest.mark.parametrize('named_layers', [True, False]) -def test_named_seq(backend, io_type, named_layers): - if named_layers: - model = SeqModelNamedLayers() - else: - model = SeqModelUnnamedLayers() - config = config_from_pytorch_model(model) - output_dir = str(test_root_path / f'hls4mlprj_pytorch_seq_named_{backend}_{io_type}_{named_layers}') - - convert_from_pytorch_model( - model, (None, 1, 5, 5), hls_config=config, output_dir=output_dir, backend=backend, io_type=io_type - ) diff --git a/hls4ml/test/pytest/test_softmax.py b/hls4ml/test/pytest/test_softmax.py deleted file mode 100644 index 1568063..0000000 --- a/hls4ml/test/pytest/test_softmax.py +++ /dev/null @@ -1,98 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -import tensorflow as tf -from sklearn.metrics import accuracy_score - -import hls4ml - -test_root_path = Path(__file__).parent - - -def flat_distribution(shape): - return np.random.rand(*shape) - - -def high_accuracy_distribution(shape): - '''Start with a flat distribution, then pick a random member of each row to amplify''' - x = np.random.rand(*shape) - imax = np.random.randint(0, shape[1], size=shape[0]) - x[:, imax] *= 10 - return x - - -@pytest.fixture() -def generate_data(function, input_shape): - return function((1000, *input_shape)) - - -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('strategy', ['stable', 'argmax']) -@pytest.mark.parametrize( - 'function,input_shape,io_type', - [ - (flat_distribution, (8,), 'io_parallel'), - (high_accuracy_distribution, (8,), 'io_parallel'), - (flat_distribution, (8,), 'io_stream'), - (high_accuracy_distribution, (8,), 'io_stream'), - (flat_distribution, (8, 8, 3), 'io_stream'), - (high_accuracy_distribution, (8, 8, 3), 'io_stream'), - ], -) -def test_softmax(backend, strategy, generate_data, input_shape, io_type, function): - X = generate_data - model = tf.keras.models.Sequential() - model.add(tf.keras.layers.Activation(input_shape=input_shape, activation='softmax', name='softmax')) - model.compile() - - f_type = 'ac_fixed<18,8,true,AC_RND,AC_SAT>' if backend == 'Quartus' else 'ap_fixed<18,8,AP_RND,AP_SAT>' - cfg = hls4ml.utils.config_from_keras_model(model, granularity='name') - cfg['LayerName']['softmax']['Strategy'] = strategy - cfg['LayerName']['softmax']['inv_table_t'] = f_type - cfg['LayerName']['softmax']['exp_table_t'] = f_type - - odir = str(test_root_path / 'hls4mlprj_softmax_{}_{}_{}_{}_{}').format( - backend, io_type, strategy, function.__name__, str(input_shape) - ) - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=cfg, io_type=io_type, output_dir=odir, backend=backend - ) - hls_model.compile() - - y_keras = model.predict(X) - y_hls4ml = hls_model.predict(X).reshape(y_keras.shape) - acc_hls4ml = accuracy_score(np.argmax(y_keras, axis=-1).ravel(), np.argmax(y_hls4ml, axis=-1).ravel()) - - print(f'Accuracy hls4ml relative to keras: {acc_hls4ml}') - - assert acc_hls4ml >= 0.98 - - -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('io_type', ['io_parallel', 'io_stream']) -def test_softmax_skipped(backend, io_type): - X = np.random.rand(100, 10) - model = tf.keras.models.Sequential() - model.add(tf.keras.layers.Dense(14, input_shape=(10,), name='dense')) - model.add(tf.keras.layers.Activation(activation='softmax', name='softmax')) - model.compile() - - cfg = hls4ml.utils.config_from_keras_model(model, granularity='name') - cfg['LayerName']['softmax']['skip'] = True - - odir = str(test_root_path / 'hls4mlprj_softmax_skipped_{}_{}').format(backend, io_type) - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=cfg, io_type=io_type, output_dir=odir, backend=backend - ) - hls_model.compile() - - # Verify Softmax was removed - hls_layers = list(hls_model.get_layers()) # 0 is Input, 1 is Dense, 2 is Softmax (if not removed) - assert len(hls_layers) == 2 - - # Verify hls4ml output is equal to Dense output - y_keras = model.predict(X) - y_hls4ml = hls_model.predict(X).reshape(y_keras.shape) - keras_trace = hls4ml.model.profiling.get_ymodel_keras(model, X) - np.testing.assert_allclose(y_hls4ml, keras_trace['dense'], rtol=0, atol=2e-2) diff --git a/hls4ml/test/pytest/test_softsign.py b/hls4ml/test/pytest/test_softsign.py deleted file mode 100644 index a23e89e..0000000 --- a/hls4ml/test/pytest/test_softsign.py +++ /dev/null @@ -1,33 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -import tensorflow as tf -from sklearn.metrics import accuracy_score - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('input_shape, io_type', [((8,), 'io_parallel'), ((8,), 'io_stream'), ((8, 8, 3), 'io_stream')]) -def test_softsign(backend, input_shape, io_type): - X = np.random.rand(1000, *input_shape) - model = tf.keras.models.Sequential() - model.add(tf.keras.layers.Activation(input_shape=input_shape, activation='softsign', name='softsign')) - model.compile() - - cfg = hls4ml.utils.config_from_keras_model(model, granularity='name') - odir = str(test_root_path / f'hls4mlprj_softsign_{backend}_{io_type}_{str(input_shape)}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=cfg, io_type=io_type, output_dir=odir, backend=backend - ) - hls_model.compile() - - y_keras = model.predict(X) - y_hls4ml = hls_model.predict(X).reshape(y_keras.shape) - acc_hls4ml = accuracy_score(np.argmax(y_keras, axis=-1).ravel(), np.argmax(y_hls4ml, axis=-1).ravel()) - - print(f'Accuracy hls4ml relative to keras: {acc_hls4ml}') - assert acc_hls4ml >= 0.96 diff --git a/hls4ml/test/pytest/test_sr.py b/hls4ml/test/pytest/test_sr.py deleted file mode 100644 index e4c922c..0000000 --- a/hls4ml/test/pytest/test_sr.py +++ /dev/null @@ -1,71 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.fixture(scope='module') -def data(): - X = 2 * np.random.rand(100, 5) - y = 2.5382 * np.cos(X[:, 3]) + X[:, 0] ** 2 - 0.5 - - return X, y - - -def test_hlssr(data): - expr = 'x0**2 + 2.5382*cos_lut(x3) - 0.5' - - lut_functions = {'cos_lut': {'math_func': 'cos', 'range_start': -4, 'range_end': 4, 'table_size': 2048}} - - output_dir = str(test_root_path / 'hls4mlprj_sr') - - hls_model = hls4ml.converters.convert_from_symbolic_expression( - expr, - n_symbols=5, - precision='ap_fixed<18,6>', - output_dir=output_dir, - lut_functions=lut_functions, - hls_include_path='', - hls_libs_path='', - ) - hls_model.write() - hls_model.compile() - - X, y = data - y_hls = hls_model.predict(X) - y_hls = y_hls.reshape(y.shape) - - np.testing.assert_allclose(y, y_hls, rtol=1e-2, atol=1e-2, verbose=True) - - -def test_pysr_luts(data): - try: - from pysr import PySRRegressor - except ImportError: - pytest.skip('Failed to import PySR, test will be skipped.') - - function_definitions = ['cos_lut(x) = math_lut(cos, x, N=1024, range_start=-4, range_end=4)'] - hls4ml.utils.symbolic_utils.init_pysr_lut_functions(init_defaults=True, function_definitions=function_definitions) - - model = PySRRegressor( - model_selection='best', # Result is mix of simplicity+accuracy - niterations=10, - binary_operators=['+', '*'], - unary_operators=[ - 'cos_lut', - ], - loss='loss(x, y) = (x - y)^2', - temp_equation_file=True, - ) - - X, y = data - - model.fit(X, y) - - eq = str(model.sympy()) - - assert 'cos_lut' in eq diff --git a/hls4ml/test/pytest/test_trace.py b/hls4ml/test/pytest/test_trace.py deleted file mode 100644 index 14e218f..0000000 --- a/hls4ml/test/pytest/test_trace.py +++ /dev/null @@ -1,56 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -import tensorflow as tf -from tensorflow.keras.layers import Activation, Dense - -import hls4ml -import hls4ml.model.profiling - -test_root_path = Path(__file__).parent - - -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('activation', ['relu', None]) -def test_trace(backend, activation): - '''Test the tracing feature with a simple Keras model.''' - model = tf.keras.models.Sequential() - model.add( - Dense( - 2, - input_shape=(1,), - name='Dense', - activation=activation, - use_bias=True, - kernel_initializer=tf.keras.initializers.RandomUniform(minval=1, maxval=10), - bias_initializer='zeros', - kernel_regularizer=None, - bias_regularizer=None, - activity_regularizer=None, - kernel_constraint=None, - bias_constraint=None, - ) - ) - model.add(Activation(activation='elu', name='Activation')) - model.compile(optimizer='adam', loss='mse') - - X_input = np.random.rand(100, 1) - - keras_prediction = model.predict(X_input) - - config = hls4ml.utils.config_from_keras_model(model, granularity='name') - for layer in config['LayerName'].keys(): - config['LayerName'][layer]['Trace'] = True - - output_dir = str(test_root_path / f'hls4mlprj_trace_{backend}') - - hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, output_dir=output_dir, backend=backend) - - hls_model.compile() - hls4ml_pred, hls4ml_trace = hls_model.trace(X_input) - keras_trace = hls4ml.model.profiling.get_ymodel_keras(model, X_input) - assert keras_trace.keys() == hls4ml_trace.keys() - for key in hls4ml_trace.keys(): - np.testing.assert_allclose(hls4ml_trace[key], keras_trace[key], rtol=1e-2, atol=0.01) - np.testing.assert_allclose(hls4ml_pred, keras_prediction, rtol=1e-2, atol=0.01) diff --git a/hls4ml/test/pytest/test_transpose_concat.py b/hls4ml/test/pytest/test_transpose_concat.py deleted file mode 100644 index db3e031..0000000 --- a/hls4ml/test/pytest/test_transpose_concat.py +++ /dev/null @@ -1,56 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import Activation, Concatenate, Input, Permute -from tensorflow.keras.models import Model - -import hls4ml - -test_root_path = Path(__file__).parent - - -@pytest.fixture(scope='module') -def data(): - X = np.random.rand(100, 2, 3) - return X - - -@pytest.fixture(scope='module') -def keras_model(): - inp = Input(shape=(2, 3), name='input_1') - x = Permute((2, 1))(inp) - y = Concatenate(axis=1)([x, x]) - x = Activation('relu', name='relu')(x) - out = Concatenate(axis=1)([x, y]) - model = Model(inputs=inp, outputs=out) - return model - - -@pytest.fixture -@pytest.mark.parametrize('io_type', ['io_stream', 'io_parallel']) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -def hls_model(keras_model, backend, io_type): - hls_config = hls4ml.utils.config_from_keras_model( - keras_model, default_precision='ap_fixed<16,3,AP_RND_CONV,AP_SAT>', granularity='name' - ) - hls_config['LayerName']['relu']['Precision'] = 'ap_ufixed<17,3>' - output_dir = str(test_root_path / f'hls4mlprj_transpose_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - keras_model, hls_config=hls_config, io_type=io_type, backend=backend, output_dir=output_dir - ) - - hls_model.compile() - return hls_model - - -@pytest.mark.parametrize('io_type', ['io_stream', 'io_parallel']) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -def test_accuracy(data, keras_model, hls_model): - X = data - model = keras_model - # model under test predictions and accuracy - y_keras = model.predict(X) - y_hls4ml = hls_model.predict(X).reshape(y_keras.shape) - # "accuracy" of hls4ml predictions vs keras - np.testing.assert_allclose(y_keras, y_hls4ml, rtol=0, atol=1e-04, verbose=True) diff --git a/hls4ml/test/pytest/test_upsampling.py b/hls4ml/test/pytest/test_upsampling.py deleted file mode 100644 index 8ec5cab..0000000 --- a/hls4ml/test/pytest/test_upsampling.py +++ /dev/null @@ -1,69 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import UpSampling1D, UpSampling2D -from tensorflow.keras.models import Sequential - -import hls4ml - -test_root_path = Path(__file__).parent - -in_height = 6 -in_width = 8 -in_feat = 4 - -size = 2 -atol = 5e-3 - - -@pytest.fixture(scope='module') -def data_1d(): - X = np.random.rand(100, in_width, in_feat) - return X - - -@pytest.fixture(scope='module') -def data_2d(): - X = np.random.rand(100, in_height, in_width, in_feat) - return X - - -@pytest.fixture(scope='module') -def keras_model_1d(): - model = Sequential() - model.add(UpSampling1D(input_shape=(in_width, in_feat), size=size)) - model.compile() - return model - - -@pytest.fixture(scope='module') -def keras_model_2d(): - model = Sequential() - model.add(UpSampling2D(input_shape=(in_height, in_width, in_feat), size=(size, size))) - model.compile() - return model - - -@pytest.mark.parametrize('io_type', ['io_stream', 'io_parallel']) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('model_type', ['1d', '2d']) -def test_upsampling(keras_model_1d, keras_model_2d, data_1d, data_2d, model_type, io_type, backend): - if model_type == '1d': - model = keras_model_1d - data = data_1d - else: - model = keras_model_2d - data = data_2d - - config = hls4ml.utils.config_from_keras_model(model, default_precision='ap_fixed<32,1>', granularity='name') - odir = str(test_root_path / f'hls4mlprj_upsampling_{model_type}_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, io_type=io_type, output_dir=odir, backend=backend - ) - hls_model.compile() - - # Predict - y_keras = model.predict(data).flatten() - y_hls = hls_model.predict(data).flatten() - np.testing.assert_allclose(y_keras, y_hls, rtol=0, atol=atol, verbose=True) diff --git a/hls4ml/test/pytest/test_zeropadding.py b/hls4ml/test/pytest/test_zeropadding.py deleted file mode 100644 index 962a333..0000000 --- a/hls4ml/test/pytest/test_zeropadding.py +++ /dev/null @@ -1,73 +0,0 @@ -from pathlib import Path - -import numpy as np -import pytest -from tensorflow.keras.layers import ZeroPadding1D, ZeroPadding2D -from tensorflow.keras.models import Sequential - -import hls4ml - -test_root_path = Path(__file__).parent - -in_height = 6 -in_width = 8 -in_feat = 4 - -pad_t = 1 -pad_b = 2 -pad_l = 3 -pad_r = 4 - -atol = 5e-3 - - -@pytest.fixture(scope='module') -def data_1d(): - X = np.random.rand(100, in_width, in_feat) - return X - - -@pytest.fixture(scope='module') -def data_2d(): - X = np.random.rand(100, in_height, in_width, in_feat) - return X - - -@pytest.fixture(scope='module') -def keras_model_1d(): - model = Sequential() - model.add(ZeroPadding1D(input_shape=(in_width, in_feat), padding=(pad_l, pad_r))) - model.compile() - return model - - -@pytest.fixture(scope='module') -def keras_model_2d(): - model = Sequential() - model.add(ZeroPadding2D(input_shape=(in_height, in_width, in_feat), padding=((pad_t, pad_b), (pad_l, pad_r)))) - model.compile() - return model - - -@pytest.mark.parametrize('io_type', ['io_stream', 'io_parallel']) -@pytest.mark.parametrize('backend', ['Vivado', 'Vitis', 'Quartus']) -@pytest.mark.parametrize('model_type', ['1d', '2d']) -def test_zeropadding(keras_model_1d, keras_model_2d, data_1d, data_2d, model_type, io_type, backend): - if model_type == '1d': - model = keras_model_1d - data = data_1d - else: - model = keras_model_2d - data = data_2d - - config = hls4ml.utils.config_from_keras_model(model, default_precision='ap_fixed<32,1>', granularity='name') - odir = str(test_root_path / f'hls4mlprj_zeropadding_{model_type}_{backend}_{io_type}') - hls_model = hls4ml.converters.convert_from_keras_model( - model, hls_config=config, io_type=io_type, output_dir=odir, backend=backend - ) - hls_model.compile() - - # Predict - y_keras = model.predict(data).flatten() - y_hls = hls_model.predict(data).flatten() - np.testing.assert_allclose(y_keras, y_hls, rtol=0, atol=atol, verbose=True) diff --git a/hls4ml/test/pytorch-models.txt b/hls4ml/test/pytorch-models.txt deleted file mode 100644 index deab196..0000000 --- a/hls4ml/test/pytorch-models.txt +++ /dev/null @@ -1,18 +0,0 @@ -# PyTorch models from examples directory that will be used for testing -# -# Synthax: -# MODEL_NAME [x:XILINXPART] [c:CLOCK_PERIOD] [io:s] [r:REUSE_FACTOR] [t:AP_TYPE] [s:STRATEGY] -# where -# MODEL_NAME - Name of the file containing the model (without ".pt") -# x:XILINXPART - Xilinx part number to use -# c:CLOCK_PERIOD - Clock period -# io:s - User streaming I/O, otherwise use parallel I/O -# r:REUSE_FACTOR - Reuse factor -# s:STRATEGY - Latency-optimized or Resource-optimized strategy -# t:AP_TYPE - Default precision -# -# Lines starting with "#" are ignored. -# - -two_layer_model -three_layer_model diff --git a/hls4ml/test/pytorch-to-hls.sh b/hls4ml/test/pytorch-to-hls.sh deleted file mode 100755 index 42551b3..0000000 --- a/hls4ml/test/pytorch-to-hls.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/bin/bash - -pycmd=python -part="xc7vx690tffg1927-2" -clock=5 -io=io_parallel -rf=1 -strategy="Latency" -type="ap_fixed<16,6>" -basedir=vivado_prj - -sanitizer="[^A-Za-z0-9._]" - -function print_usage { - echo "Usage: `basename $0` [OPTION] MODEL..." - echo "" - echo "MODEL is the name of the model pt file without extension. Multiple" - echo "models can be specified." - echo "" - echo "Options are:" - echo " -x DEVICE" - echo " Xilinx device part number. Defaults to 'xc7vx690tffg1927-2'." - echo " -c CLOCK" - echo " Clock period to use. Defaults to 5." - echo " -s" - echo " Use streaming I/O. If not specified uses parallel I/O." - echo " -r FACTOR" - echo " Reuse factor. Defaults to 1." - echo " -g STRATEGY" - echo " Strategy. 'Latency' or 'Resource'." - echo " -t TYPE" - echo " Default precision. Defaults to 'ap_fixed<16,6>'." - echo " -d DIR" - echo " Output directory." - echo " -h" - echo " Prints this help message." -} - -while getopts ":x:c:sr:g:t:d:h" opt; do - case "$opt" in - x) part=$OPTARG - ;; - c) clock=$OPTARG - ;; - s) io=io_stream - ;; - r) rf=$OPTARG - ;; - g) strategy=$OPTARG - ;; - t) type=$OPTARG - ;; - d) basedir=$OPTARG - ;; - h) - print_usage - exit - ;; - :) - echo "Option -$OPTARG requires an argument." - exit 1 - ;; - esac -done - -shift $((OPTIND-1)) - -models=("$@") -if [[ ${#models[@]} -eq 0 ]]; then - echo "No models specified." - exit 1 -fi - -mkdir -p "${basedir}" - -for model in "${models[@]}" -do - echo "Creating config file for model '${model}'" - base=${model%.*} - file="${basedir}/${base}.yml" - - echo "PytorchModel: ../example-models/pytorch/${model}.pt" > ${file} - echo "OutputDir: ${basedir}/${base}-${part//${sanitizer}/_}-c${clock}-${io}-rf${rf}-${type//${sanitizer}/_}-${strategy}" >> ${file} - echo "ProjectName: myproject" >> ${file} - echo "Part: ${part}" >> ${file} - echo "ClockPeriod: ${clock}" >> ${file} - echo "" >> ${file} - echo "IOType: ${io}" >> ${file} - echo "HLSConfig:" >> ${file} - echo " Model:" >> ${file} - echo " ReuseFactor: ${rf}" >> ${file} - echo " Precision: ${type} " >> ${file} - echo " Strategy: ${strategy} " >> ${file} - - ${pycmd} ../scripts/hls4ml convert -c ${file} || exit 1 - rm ${file} - echo "" -done
  • %r)_|mxML3dMRM~faf?<|1P`GRk zTS1>It4b1r^H-sgq(Dc(@pCV|T3H*BxN(sUk46<4x_9{A9z?htmbNB~%ex!8~U54*7Osx!+ zbS*~2*4X&?5h$Me1f}InP5f?enBmlBqG4Hf#10KKDZIyZ*?IP^#pw`Qs!i%(0Z|sW z`Gcy$Gr(T=Xb#Cmi#+hMOar`7H5Ra)W@#W`f~|^%xljo(-yt7HBHO|s>OD@2CGa&< zP-2<{bF_fw9W*g~f;{5BLNSgCF3VEV}bGtO~~;A#Pqbj1GO@BlUC!by91Fsh~wbz3j-biXzs0 zz3^{NhemSC%i>NKF`S|U1R_bgva?YJzxiYQ7-60@TN1Vg6M%|6YvXVR*APtyASgMnoXF z`OLm~%D^x2O-vHzqP$>pb=1OkWDRMTc&ASvAh{2jo0$lNWbJ4m!hQGTxia4`8R!io z<5ht0jwrOmnisbqS+)Pa$trnZ$OPgAQCwbUEr>;o4&N-8QYa2zo!|FdKDi{a(g`t3 z83nFA*}*1ZF)y%Dsz_GL##L*+z20T_IB2JJ31Cn45gHGcoxx0(iH=~zvJ$<-5|}7DC@&|(q$o+SQBA;uZ#I= zj`I;V33b1WM=V?$!ao38>hgNF6mID<5D{H~;HcyJOtH@zCY@lRT9uohKgT(5!%O{r z$rY^46g3o_=V216)WtNt4-{A*CfOGFW+ji~|-pk8Nja3P0U%6t(y#!c(AawiBJt9E)K_VIo zAo;tD`d3rbk~)m_EcpR*)E9qON)l#DmYwyW+Y&0D@wb-%J(h@+OQ)pggC~gmVf-Aj zPY?=mGj45TrF4`3A2!WYIe0wJ=e7u;DI;hipfh5MTIO`e75vDTFIk#On?fRiMnxCT z6pf*I%`K=(Ats^;(MdsxwlAq*Ftno(WGIEaw5K&9)>Ix8NXFmjo(rhlg`CyrX+00| zP$xYSicLMXB-0XcyC)IZN#Mp4V0tDV#2D;VlIEb6ipnd%mds(=5!6To0L6Il?4zzE6unw51qhe_z&$TH<*ae-y>}Cg0J}Zm2YX$ zNT(o$g*e|(577*qU=1xTtt6XYcf>lN0}@!`Sct)ru&t?%qi-r{C!A{PU8axI(}gcS zQ^L5-A2^j9!JiOuPD@@<>y$H36!Knt?@k>|hT><9nXX-(j}KJd>VgtC^VZaejLb|8 zRn_NNqM(zGD?m>mCCSCC29b_P$6+oOiMuDvY+mdJ65y3zl3u75q8LCE%+$Nl|Khy5 z#{JcS`JlX?73nYid@QLB6|6!~FFSj;IHnFP;*Mvm;yQ@w+d!2AlMrAG^bs=~SZ)q& zBi(E3;?sy-g7uNa+<6T!l!Oc}gB^cI>jwLnJ-Btv$QW2kiR zbD6uP4_yv0m?ms3u$&3A`lCs}Z36jLt`GRVX8*rOqyW+qTZrmTfaxnRG3N@_*&K$g zo^i+^dy596mEd)W#&jfU*RMG>n0@tr#)V z*N^mj@*6;MPCH6AjPD5#3M_t``5`dd%midyjz?aveR1rVWIfCRt-m9N7L|Qgmx%y% zK_xA-b+Iteu#+`f6V-789dsBb1tv}{^ezmD1crrSodRqCnYsKo;u;Tt6p<8}%8*=| z)mDTH=kdi%Ks`pRH#Dqi9-3N)s+vp~c5Ce9rIQFj3mC$YtO^&H)0}Vbf+-EY_!#dO zcB=K{>MBk;7F`!pg36~Wj^1*ducH?F4k3f>M)x^CePH~~FPP=rJVb>C*0r&gzkni* zq2Q7Q1JJ}AtAzVz}?A# zI@I}+b!Mc~I9M+Z2=aWf5~u32U{d68Ysr#b3e*=l^wB^`2zq(*SE)vI*H%k2=V2`1 zt@k$KPz863c`(_4?U{@aR3uqdhT6zYI7`*cn45f%Y#q+5t)1>@I?i0z zMUyQoukbu-4xH4qmJu|zIvP|L=gc);9cU9$JQ*?e&N<>n@vMn6^ykS|V#Dl=?|6hi zw+R);y?KZRY(`DxFCZ(CYyAxk9!9))Xqngml{QmgNqz;S6+q4JuH6Lij;A1=B6`hG zKs%y1sPw@?fO67)RI9eN|L*Fx&=4Sa+cU!?-KuNz3^@Mg^UWy^;5&RRHbG_woErZu z8!t`ugj=In$j@vXQ}F*de#r&nOA$mT(|k7w2su!|)1k(wFg!bUg0~esLIb4PC9HoxMw_WZoDGI5E~wM3N5(AZdTfn|!{J4HROo zK^ha3(xD@E{eh^zkwT-kWF;}F62A6IG2b9$2BGdQXu=5fsjyzoXvb;rI27r3#^30M zA-Y&QrvzA`LknI1LB5alR?}jx^Z^f1`wtIMJ@5g6-b*fmVWF4Moqg4V+7XnH_xu!c zsNuL@_?;J6eC^O~9Bc-5Fk9(V|H9&!tIvGhhtbi|2jTLh zJY?ZD;413LYw*^}S2=8ydJWYGDsw0-xAcy-+p>R~c<-oE@j`NLD&>7o&h(T;DC_ke z5gI|p8)_tW-9Ez?7!tC#)vgfY7N*f`HcW2aY9;9dd>g7T*3jTOetgpFr`=pbuJu)@ zK)v8swY-=Lg~f$X&z(89ut>RN4H>^mfhCL9x+6t&SDODN#_mLCW9 zLC|rB2IZvsdcyJ^aY>9^_OS1-fq`_tTtwq0EKmyNLZq@<|D ze<|yxX+S&X^a~g6h@S}V>eAJ-Z#TBqYl7ZF_0wi#7{1TCMs-lu`@vqzCdt#20@bG# zIZmmwwC34rW1Zq=( zeFB7Vb>M~HOWFXe0^s-o?H@kRA}*TTsa85oPTldwhmb+^|GOS>^eivbFTa~&oPtIT zFU0RTf?7QMqofBLrzv#Lm`XO^vfZo7I#m?4H@&x@A6d z&N~|TMXMEKR}Iv%UcVi!0M#$>hlmP=47j#SGxI_NM2Q)h?w|Mpi7w)=PLTtn_{4i4 zz@8&Em-wMtEJeSlgY@Jt*RkReZk};=8_d9cd)rFBV=UT^+22!gcFGJe}GS$qG!R4h+?-ubMDA?voZHMNn7se}HzNzx*%fTK%Tr5@vda`Ygjjj~>G&daq`ek5K(5n%epr ziPp5z=JVs0sUkPJw7|yYI9J1QWM50Oe~6^>q745|)Zu1V=%3^@nJ!U{6&d90y0&SO>x z;=j~Spl{8m#7?M*g^i%W<=~!~%m-}1aCxs)qi9oO0o`kWx=7k4es$Dobz*YTGU`^* zB*dGQ3j-YhKXI4zx>kSVh*X87rUh_7QXb+4>auMQVAzHJ@^~SyK3hDQ_5ujdzw+L= z)|(t>iP-v1qzPs`e48uxCU)kuexdkk0SD#E(%vqB6&_aDa&(d z%bv3e5rErth;=(!LPx`6%gm7u10npZmx^rt5O3aNg*X4v;2l|M-nDhC;Eyg&Rp-OL zxki?_?(uZ+hB9&?z*`0+aqanbNs01Lno!m8KUFk;aN%!>Gm0K{EPN>po0PlQ-0P*HqbLWX4lQL6%PWXBK%iiy% zMQ(Z!p`?Re6ZkB zf`FPi%Zqz( z9(Q^V-KC9ywmk;|TTDzLDZi z<|k#Moo1R@a49ni{wWPInIgL|I?oL6BB&ar)V;uOG4dPahRal^Y;FYn2GIWjKA2C-G6vwu&e`>K@WXR1S_2Z)!(|okBP<5 z)UfV0eev<{f(rLKa3h5oW(~JZtQav_7aA1yH%WF{v~^=cvT(~cF5t!pm@aR%qdbdFk6Ps73j>ys_BXNr6ELuy+MM`Lh%{ zz3#W=WRuJjvcL2*)b=oA4pT^`cHrj4&8|yg@8(XoH>T7H@#^L(tGS}#8ro3p>fuDa ze?OeNoVag7H$O{d%9CUIXzmMrKaHYI3T;JqJjzbw-*kjRcpL5AbkN#Wrs-Hg(wH9? z8NMZ_dw*E)Q8@}s-t_R>x?4SzjPB{zmye2R72G03O$-KE;+dfmJ9O?jV1L6BnlUpe++87zWO!MED7r1jTxnYS6PoZC+`L`Vr6 zwjODWeVVLSKp4HX0ps9iNaeT_ASkypL79d`H?kd{1Wz=zwY^As{?d;idnW?FT%0kn zKPNfQ^|^&dvOoaUL}tGhHhDPJ>qg_N3Iq!F*4S|%?t(Bt^a*I`1?5hBYC>1x&ilhn_E}}0NklG zbpk0rlVA#r@=s!Q%B_kK*J{yv6A^VLF`|fB=H*i@E?1%RfXNu`;jJJhP>J30?F48& zw7)OB{;tK8p(SKCLivJR7N=S*g70sy3l~*udZP=@emm3~(;d++(0C*s+-+EQ?3YQ) z(rf5ZO6zg1wueP|{AW2eb{I{PykVb+!R(@0+szEt{?+)%?lXYWg zvH2r!P@Vrd%$^Fbqcu!Ny#Sjk#-Q^H79Q?K*RyCXPv}h{3@#LC%zJ<$e^JEqvK&GC zju%hCNC|Ka1q#$LZs-4QlQtCrjoNkevxS}**D^O)T7vWeu8czMoMQB_LDnwFvrkkB$H!N zNqCXeFto}{zMR8%`2f2-ngOW5Pzo@Q5WK@-^JhFFHJu`SL2SL3dEg=3)UH`vBd>IN zt(9=o)b<74qk8VU@AL;=Quj%zGdW&x@!!{`B)s?9TTX7rWB-2d^#s;lv!=KZy@MD! z{QJPL{wW?8zt*(h(n~hwS3-VDo%`rK;FG)=o6Vt%TP)wDBxW?7dYW?TM>k;e(9Ea6 zBzPO<=!U=g3tQF!$~p0lbu)B_^*Fb`T}M3QVhxK2YWmb}yirQ!DHS2KW3tD<_$?XD z-f6G#R~;{9T{VbI`SwKf=^Ljvo7BE|<>=+WEQp~y^tk+$=;1tKcBiRyPuAu)J06#h zszG%j`+3d#co^=aG4&MzDK+nLerF#u-+D4EIecj94;R2%AFAQe6B{njCkx>_k{D{9 zwIoqdwe6S@OX%SLxg+&${Ped(0JP;mKf@0(w`$Gt<`fr91;y*k@uAMWFLZ2&dS<^ zl_9r?m!wlA?ot#R2Zgk4JE=kalvZ$Xa2RY9d*xPwX}c-X;_PgDS(A&P3lfe&X4k=s zM;-A;KbXZw#5oq`o!Wq@9asFRR9B`Gx1sYDLr+o%+r@~JEu(rH-645=c3dmRvXd|h zRRE$g=`uAk9LNC8R{b<2qLg9L(K%@0{7j$s;)Lt+j4Q6v31kLsw)AzPjNh^2jCNbF zyi{%Z%yx0+t~=3r<~>@48eBRSI*f{7=osz`=?F6-byg+&Xbbi+i~WnQ(hblI#PS!n z(Q8$9DT;Ih1UyXEO?&=loW8vCik9pWbRC}4QKV8J=eVeXRDcJ50b z=hXP-OKRPPcsF!%X$+5bp$kPD7eGgjd<@akGTJrSic0{M3D{P-) zlV^N}?s=Dp%aAR^!s@s&@B8Q725e8#^FM(eaM$~WePx?_GVLK#5T>^uLlDIv;27 zi)#63FuojX+E<`lrhpYNwao<2Hptbf`?yZ!WoAf}%11-*_ZPy#^CNrg`?M;?mfUxo ztQzH`&=$lJj0=c4g!Yha<@QeyWl-`LD#JRA6;6@_gKSMOGQ!ERnb+%u z%{8jj5NiKb^ixq&+1g?q*@E?nSiT2R49XlWJ>UVO0`OW@zWe_BTKixiI;~34p3qab zQ)!aUusVT;p|8J}>iQ;3vg#rTT19gPbMhkgT^Me0nZIP=iv7%kBLI|p4k8Bff8I;r z?0x(J`Je>-?atHf*JY9^9FpVWWX74>urnp0>u%RA(wx?@oCd9Q(3;tQdnUpS8^q>X zNIGNd#7r)I8d9L>e!zu6_p2S81jYfVG3hS9R!6{arisjyF#vJ0AN5x$-@4xclA!e?onn`8#CK;PFx_X1aNzNo5_2 z<@y@LW#IUTWZovr7>x_N0V9b~&&&TxoUfrJJQT-pcC2K%7%d^fgJB8viyT9c5)8E1 zg7xpt)Rd<`;K zk`)Smo`o+fyH9R1w)t8+_;Ya7z+88dtff@?X7R0%A(Yy8{D>B~WDy|xAUK9d%2G>~ z94l--zuTp>Vt(4U6MFKDJ!)NxPTL*ri}sNaa1vp!k@4E|>dp?`J6|6DS2&EyI#4&X zJIA0$k(UnrL^EU>;mhukZ3$iZvQ5~ru8eaop_z)=7PMv-7V0e99l#f<>!GU3fPfvg z>gs#1V_^FJbKns(0h9-Dc}OP|Qq%BLXhtJKvXU~Jv2X6_9Ko0Qw(2|8g+G)17;f8m z9wZK0MO`NuNA@*WdhhOJLj z<=@?sfR}aM5WJNN4Lad|kfAmC-}_PRIVuJBgMNzWe$_W}cp}LVuY@EWA2_wtD4Ron z4Yxck2=ol^aD%3+;{VT0^dL#?$#51K4z!u71?(=6phvRkc|rcZ5VSA#hfxJQE;H17 z1vm?swyB}1dZ26BcOLy;{fJ9S(%^sc$&ptpU25vLD|Fr^`99K0;GQUB1t24ci~zO7 ziBn9E3y-cC0hqe+{}=&aNxX7AjYtpCnvcaMkypXGOwXhF0(so@yk$D_WcY)Cdwh8; zB<;nfxd8*ye*Oft+nI=;Rir8`p);9hCBzETNL-|4xwL8o;U%1=En1eAfsSz0#NGGy zdvBJcJEIPCCopsn60Zi|A+IDkGz-}C;e0&PWtph@`K6rRXYp7i^HsD{wyNadYMLv! zEX`hrJ3VC3KWbuJb*hrt#_Q`FHi^5rB<1kn?|A{QD5qpmVf_C0+T-_|q$QzofH~zX3DU;C}>0NE;j& zx7SU%(8la#_H2EM_T~Ea>rp9fTc8W~A?(N_hb5rBBup}iGZF_D4!tBOHT>vG<$*kS1gGHNly zK5oA)q$6KTed03)X|rn$sQ**PGMZfsGUFZ?6vWLs3CzQGl7=}4OY>_Jw#@M8&>ixP z_mZ2XNy`!f%kre^^cI$P?DBqQ0Q-MEhHgMox5mKkjO9PPzhrSbY*?xd_02;or8)92 zO}HjhvNm-;YnhJAe&pjHjr>y9fijAYMp*zvMPH)y2cxVubq9_7B}s4wOy|*i!w=&> z+y#Y&L-h`BpMX$3X?hug=4=%8B-xe8Ga2_i1=@N22SPB%4VBQ}W({cisM}yz% z4)e8MW4}y|uapW32#^zG6oS)dis!py79FlukkT(- zDg$4SmJ#E1{Z2ZfbhS`MgYYvJ>AWz6sYp1VrQwF?$E3~uJXtRZZZf1jAc#0x3X{@s zj>$?e4~6vYyuw;F!EVn$0FYOY*;D`1P~qIQpP$Qc46?wRtwwR9&JV@HKY;}YqBs&6 zAdL%7ca?8Y!LBHvKCIkIBi+P)-J1mm8m44tYfgPN1^OHc3r#=0JdLZIMW+#I6YqxJN#Yn?mcUIplR#^5N84r!v(n)d9e5W0F(N|03&78(YQ|NMtg=d zJc6MW8gwROPeEE>k4Q*9 zx=}-ft6Ps)7j)OT&QH|v$GR>rdD=zXs^&UBwxt)q@{5q;Lbm9{*sI5^+z9DNu?9PP zesXTnMXz&c%JJL4SW~>u*I(vkp*8)Dwq2n*3JTBGzh~%^Czam-A3c;H5R0M>6#oXl5 z8-XV1Xgtrkb1;XD{p_Rd4a_GXaVCTM!D+#yn;k1?sWGosn7C8+Fu5vl572WP8e`>! z^e|~U`07zr&}Tp_5Nuic663EZP;fD;6iApFJ_x8bgSS$q)1gf^K4n-_X=n*?bVifn zsUZq)<{gid4NCN&X$HM( za8JxGIGfc7Z0S&{jX2dhs`^R|8iNj7M;tY}j}ECy6{l8>W@(UBnNFI1(KR4ISS~Fi zh3y3mJtJX2C$LFiGKzroM;Im(AT&MP!t-PK@{!$pk6n2nw(zrD0Qv&Ld?ly8(ad_q zy&YWSr)No1ko2H$W^6`=Dj57g^3N@~Oy^2z_ELSVd3BB< zZ?+yZV>Fvj;8KfA(9eXv=O_9L%M6vl>5YnUfElmx0ZoH2!7W$ZV+wF!y(zO5m8q@i zE<|(Q!7rrku7p8$joWD;(5w3l!nn1oq$Py81s5Q-9N%G32s5Yv41I>?kF|Ug1q+V^ z`?b-vbV4&yPGUJP`!`%J-i#ILxf4BHcqsDDm1>9jlOso&Cru@VE5SZ)jkuQgki!Vu(RyjoPi7UK^efo)?P{)gQr5udh+e+B+Ojps4gl(^TDdo0 z#ckk^Jc+^N1=koVV9|-`fFxJUBdY!m?r zPAyAB)q}EP=UHky5=VaNVIwUpI7|=&v8|hoqadK(zYZ?SOK!w>{u4Y}Ok#J^2OPE$vcdKkX2D6?-=E8mq00~7jNMO~W*_(T zP*1CIsvPI@Uq0@(3iX+T8!#2ndsroOE&w>!FZ9e+=HdA<;1P#ok}T2k*(<&=OJ?ptvV`zE(W#2L`7f%y;&HiKIyZli&oz04+C^?I@vs0M7U z4?^YuINGYv$dn&mN?~sTcpRn*7q<}*k5+*}87i?RhLh4lRj#NMEX|Dwu`q?z9Rv!% zag>w0_W~60z>@37a8Z-s%eIeTEXzrkp8NdVE>+&b|I^o$T&Fc|m{JwD9KQZF`1eKocw$lB6jLB1@?f!--m_;!18CNPi2VfY^aEuvpC-Oie8pEQ4;tj3>3^v7K7Lp*( z(a>hpn-zHOUS7Vg4atJXjjQD`U7iR|i1;GJ@3TcOD^b_ty2 zr@dprh!~9KH^J$>1prkwkSJ~g4>SNCq6fH88a8CeqAORW2Xc~mkYng)TmJnRJqitI zgM>{%;p{-z1fUmtG9n3{u&KucctPE9@$+B5$?H=H8R*bcWuSFhQQD4t7#*ku>QsP9 zKrE9)%o;iC7MHLB*RXl+VJ}E8p{^c-oK9JGaQE-@I7uDv+s<5jd4wg$L@H1 zcE8_~_5(ROh{rpyv<$Gm0Ek=F-upsC;{9OZY8y)78jnMPG|7pZu<&WljgDwwtAjYq z22=`-f^UzZB%qmCqR@ju?M8uWI!u;vm6Mi!I10uxe zq|v1mX_C#Tn+eN#mQjK5Q9^jzvJRX^(sP0h2(YP6C5(xCH)MOTPAOe;4@AB^mr?@& zF^cV;hTkDW3h-Kfd3;nY$_gu zuX|kQBZNf&nB3ZU^FWK9LT$}7eO}-VJVQP`bY0Jl=?6e=DFY~*75SiAU)(C0fph|^ z>wW+Tph!U?@bP(4@qmtPsXGiti4W>qh*+P1uh6 zcwk(MnCYRh!;qAT3wME8g{pdb5z~IIfONf5K5RA%h)YBEE?e$tL*DU4BhcL$VnDXf zupLru2VOn;x}}BVv*q@X+2tfgpQRR8A9o~zlNSIZ5F~~c@9O2-`n`YuK5MdHfRtJO z8rKPn9h;h}XvYFH#PcJ{j)lW8wCj1#76KF)g#iEOt0U~LpKYyuhuNW7@Tt|Yo5~p7 z2wYa6#hu-;0BQP@f|lo$1Zbpc`_SA`KVdt@jTLeOja_Vjqj|WNnizsz%b#;~Q76O9 zas`)#>bxX8FjTmlJ`D&-d>22&pSjJ=%~gP~a=ucQL#GlNgWq~bBl}LFHO-&|vdciF zJx^h9(r%a&1|s@`eAj?WXVj)QTEUuJ@#r|pjR^9714R|wZQseurGB}{5g#`Tq4iwM z6ljS>v-o=BbTss>LO}%R9(5z45O4`a91jz*Qgr+^fKECZ7aBKJ8!E&zG-(YA-aJ+9 zDR8)}fHjuU0p&bbOzU*OWi)*@!L7BcK_nM#GaipO3D&)d1{vth-vKw$BzQZ0pI--I_30b{GNlDxWw}rr`;IOSO!NL6kkQ>o+zNL0*D8~Q!eNhhN z`_HoGv}vlTMLU!n-lx@x$iN8>dl9+6s}TI9u+&4879CmVA&- z+mJ&s*RO}rYnO->42e}!oPlAJ2U@;O&e~e4NlQyN3NSJ<+VY+~|8+)77>u((vVICW z{xS{>$wMr<$ZG&IS2mhK`4*HALLKs_QwR)PWEfu$D9SxwA1Q7(r2=(4Ztcbl4}g_6 zT%4T`#;%T!3G(hUxuoC?>Tp$9lvjH&`T-n zRIwxZFb6xkV-GKgP0F=O$p3|PPV%7#;QfuIP50y_PAi&K;7XsjZ7#31$Yro zLp%OXoA4BUQS(8PQ%^w;=&lT5^>jSC0W4+*z15pZ`tz(Trb*HY_c~(0%!yTYbWt$K9MgI61pM3 zpQ1nr5gnVHF2<`sjd>n%1(v(J(CLASLlrrHVF5~PJ6_4p%Xe~6`Z0^vFMzHxZ!bJ1 zJRVv)5eIhkakt#=3c0;uh4CLDJ(`^Dkgu7O#ZAvCjR2eGf>&T-9uFXTZcu1~ou+sE z@LTye9KdSikxE_a)LFnt6Lu1J1Mht#{5*VUy7f_Mmv2iEhb$G}A0v_>_~i+RRl_m4 zWzt-wf9$0Zy7%Jfc`7;gZ2Zc?-T^M1bTsECcFoqLv54h@g zK)K`qwpSDczrCRoCvoA8kRwla*{)T0odFL7Ai-%=-S%(x9IEBy7&4-Epur&x=dW)= zp*anil0{QReR<(_8zkghpkuz)D;#uHs^gscaPxReMll*3N+qF1LG;PKl_R-G{ttW6 zZsZYl6YOgX*RZ3W*A{;6ULg#HhUmYA>nw>)rbOq$nBo>ocg%%b8d00jNtj(n)t zNw_~s3NR}Yb7RST;;D?B^|w(myPz3OKV&BA(xzVs?+k>R{QUf{GQd~lw$T8e(z(eA z1at!e;?s$RA7BtPjj(+RqRJ4@Gy53DQ!mKuvOz>TQ&kJiqeO-@k+FM-%Ze+8u^(sD z@{q}dn#r$^5o6*Y&scuw2`~XJL63cK>|+gq_b7L^f{2VbE`mu6HxRuNrhL17C>RD&B$d3dUf3%2yN%AlUgpds-4oPWGT%iZdL5 z=C{p?sLH;HsX*|GAq|-)wULEB#I%QG9RtZ%&d;4lWM*M*&O_LYix#xj?D*qJL@_9d zk~Ll-5|X7?NP-8^eh4t_6J7B1V3`9^;w6M!Y1(GFh&12Py$0!Z$L?WZj|xxYii}|b zkVPZ{g)+6?2@eZv1jol|HrDVawEhxck3xJ4P=y^J*;G#UfPoJKvPnz@sm2mm-HW2M zH!uhy4jDvfY6~oXHvG8GOGRto7cGKW-2EO*9xC=+`(n)BJ8%jxND?o<^`01EX7a)zs=Y z{v;uBnMVRNU?{yRt)dfkeYFX+VpH4429N@z0WEM-^R;-Z=&H7-#u1VOw;+?5{nHyS z0av!61u4=t!>O>AjRobq``o~1qdJ69LfdXNd2yN(Zh5t=F(*U$Ifhw9La=Kd2Ay;Ivc7kLTCj@r?5s}wM{^hZ!okgcICAT04qi(p}&M|j47yTn(n=` z-=B~MClGzz#a?=!0yy*^tDV8+q^dPaLWWyk9lYo5IR8;J*&1q<6)uTnBro)VeZV-j zU8Ps$$>*{-9=Kzdcmy1`r!WQ1uf??4JQnDmI}f+-Qjq9BQK^p6_(>eN7M8lfLjoi< zCL(KywNrl<46VR*w8AN}N5)|B_Gh0jadCbi@rK2F#&_yJg;-h)b$?nSS zpCGs60AXq0GVFAm1$QBQHzmHcqE&P?yALw46ObN@0MIUNVGyLtbbmX_v=;{4S61qR zup7w^--pytBkT<`6n`o-TEsTL+@ulreC;?*Yzeid2k!YU2DD`_$r)xr^c4{$3|K@c z0e>ZL01eC9Qgv^nhN^oJRF?wJ87rGly2m@p9HvT_5r4Uy=m4h$1F02m-ODi7(cA_Z z&V^3^zqARaebJXBz_g!I3uwLZ%+lp+yNpv#`a5tl=}UG3-3K>8tdEwhs9SPHHs`zI zA^ryq;+{_&_q&+zVGE_4N`$zm4A5Vx#l?c@^9)+x7GDR_E+2SEH%q2~6c`2o4hFLU z7`(!T1PO(pJLrgd=fHj;O2{9y4us@WRw9u2VE}k}Xq*#K1BpQE@@Ln7s3)n`V(?k$ zs9%SD4?w;jVjCb%$t?|oqg_6|2DyXXG^AE5sDT9ge()Zy+LXipsrR^|&bFyk_pdFC zjiu25v2|`%-aSbQp-*jB0@D&~mf|2kpgX%N5+JQ6A97A~G{OPw;b-03*LF>krXpy} zLSR(Vh`KteT}wc+m{3e(M1&B`gDkl9J`#+6(#hr1+63nucYt#6IZWO{Mq!Ub#wB8L z(Y|0{RwHu@tbSw7$Acsc;+qihAcP3q*O!p235Tql9)b>uzUGt#lNbqg^X5qL4jGHO z2%`87^f@*n(U5l2D+Jjf2~OCvFl&nQ01j(rjik%mC#73#jd+F!2FCJch+lj zL9R0nlG=7i*A(ehr!7!zajz8N2qw(!R&0K+Y0r9yXGO+~b#TiT4z0lG9t5%c6>yos zLZgR2frm#A-l09nUNa@M5TgoDn?YTZ z11S#7K!RoTp3pc@FT()tc~@RRVIu$SvFGPRU1cM=bW&i@tMKej8;oXs?TTX|c_H+@ zT)p+y4u~KM9ek{cF)1$F;5A+SWP=BMQF^>5OwN&;MiPXdT@h!IFjJ_|G#1T{-T-6q zENufQqscA=Nl6%!y;iyozIH5s%S>OsvV!gy2s zCy$hLid|im!-vK!LdKPDWjkEL5@shw0?!}s6=+nSvEK=uOTleSEXwaGz;*jZ~*>v%nXr}+|Wg)_R<5c zIc>GA&j!G8HVb2Hdq{Z5c3l!o-@QRV7%+;=zEbWed`wQq3Mi!rP%eUZ2w-p^B*oUw zI;RhoAPDKva;Q;rf!CxE(`H)quGa751Y{pV0Y4{=IGZj2mzaRUeqd}asByWav?D|$ zVCA0$znulFpl#C^TkvK0uyfkNuu0D+kua;nrV~O#cF1GJZMv-ilzZw9r_3-Nq!gF1(b0Xjg&(6)VI#C-Gj3aJTt`2 zcg&dvwbX+TMzV-CkB*o?YYjOs1;kOUU350`dy%|zkUt+fU>Vd_Gl(@8f{gy;ZDKhKprkP9XTYKB3$=A8D4u~QKLIob zo(eB@3|FK=+MPr$NDlD2mFyu2rP>v*W_KZ*_9Q7jz}U6)gdryj8PO&+9RsS;ac)q< zZXL;5fRTR(@r{(EF-&Kd>&yr;L$b;CP||CLY`{ww0_3bOfL!Dvg_j;d;Ol8Z?~{Wq zds5$yg!cFMQ_WTx%Iq5QZ~B}>$W}5V(i#YD0$f*SEWX`9%3o|Sr4?xrF?VYp34;O^ zm-Zzj6whSYSQc_7F%lxLu_mUZ=KJeWdfE4pMD8xvRC9~8WxLF0g z%lOk#XMzd96&B%|3JB&s_BhFnfPc_Nhl!eh0MR!b$!(vz|Hqy(U^g3Yt}USM-ep7* zhH-Fez-;OZFLx;;$x?fwhS@u4Jybik&avg@& zvLP>f|K2?sg~on;vG8JF?mD`=WEt=hkPpL8C|=O1$S3q477`7DM2Al!K%ak5tsw8X z`%lLI0Gy-|m<@dRzWa9w#BUcB5C25-4{vH0Oaj>BK=2P0m6bZc2SZX|7E&J(V8bVO z#1QwT3&O7{|9+6{?ku)TFWbNJXLnn!WWtR@cVS3yVYq!zM9}V?7b$*v%D)K%e_!J# zMEy@|{u>GprV#nukp77r@b{yhz_k~)Bj30owu=FRFT#rc#=A@Yv+%!}0KWh1&#w>I zcvt3ZD%+)n``0NC?+13q^>ZOb%O5bF zuiLQ_u;NgY{MQEk4O#oooATCu`uN5E>FHFlGtcgI%~&qpm9=b&2L}c0u7AUMi-l)Q z+hlAUoZ-O9WOGtg`0$3kldQ+T;Zv_#RgtzOE#<|-@_3A!VyX%1Y4ytK^o*#j0#cO- zYjMw1lo}0B8Eq7sk^yyWH@jN4s#U_Z&bF3_pEd}g%6(u_;Y^K4TdH9UXDm9>B=cLQ zW39WhWzD_|#2~)Zh=gcFbeR9deCOhoAM(u zdl+LGoLWgk(JeOUcszc-_WY%Er~PS3_d%rnB?tNJlpqjr2lKYQzepI$lw zhi|t-_g>C#953_ojURB`a9`-wY-JW6F7rIKYm66!YT$&&dRAXbX zVfH>d6D~>jIn~CE(`i}6((m!U(d@6XF44;L&IF-2kY8O!Rs1rg+JI!YKIi2JF_xKG zSc_0mEi^<}R7 zpvkcUjmw4wE-74g_2%fkl}*;2XrrngoS{RQTv-Y0x_gskus8l-mHKs?lF}hAJG3+I zt(s=#Y@EGY?ZE4*V$${4V7+jajpyRE?Jx3N^xU$iV?yrmk4LhHD8#MI&YKtY*WR6s zkk?b9tO~AK#-l#54e;jFu{)bZmvVTZ@-8j~2+F=lLtW=e@3kh*CR!7OT+qdRb1pP~Kc?QDQBE%@HgRQK(bB%YtA2R@oUFq5kP(_zR?wL* zWb6)L_AJyM#bL^lFQ`oP7Mzdk?g+I^lC4l7Sj8OdoV7{xQE6a)OPOKsH2BU^2*Dx;7SRZKq(RdzyT`c2?=y z)tDGXZPjJD5v2g1%@zZ5?d(e@wWuEAUGsFDHfBD4ixN&6zB9f3NB3Cj@B~>U3rP z@slB)S>?7lsi#MkcTkb$N&}0e==b}eN>Eg^`(6Fxx?o-+38{tA9;oNQC|Xw!3K$=OOC`DA_IPc?O+q9 zT0x(cp&~`p%$W~@O0RTZw~U19Tz49f$#I~-sjk-=sf%A!z)g&1XGfs2A6qC~Mrp=p z^%76vROPJ=Kcw*R=-!Pq)e0gb);NvI@&0LBpjl_Cu&o^yhE7#s9P~dzyzWVw(ogKw z&#Atc>!Va}=-^xAxM9{AfsPeu5jG4hZM)5Q0CO6pteQn1W^LQrbZx<{Yr4-_jF8G@ zeTFJr-$|nh+o{p%emtUg2sNPpUM#3fJ#ACB0Bvnply3spDJY0;_ zr-m{}nuYe5oDN=OuKhlXV95Sw&ZuKZh8ZuF+;M_B3X}(>^|sb!o$8pnLXTl|t*$N` zewm!o&)-*)g+A+fOsPoPse|=Fhlyay*n`L!tWckq>%=Xu&_=wdl5aY@(3!ezReK1d z_Qa^67(I*9oTIQcNmWtnZYs9FFv=Z5dU$36^?Y>Q`)R1&>jLZ)x~VoYht96>6i%R` zjh8kvK!hu`o4jR8h2Owcr&{w?%Q0;DT5M%Vk^w2vwe#SBTDnZu)^8T@FtkCkH$8Ih zH(S{?bL3damPr{@iOgx^g_=^E&i8wY+2$26GF1Any=fc8J=0Gk6oD-CS{GicQ$FeI z_0@)%*{S=SUA>|@Yh_sMNs20w6?z9>LYmlm!|C??t_{kFPZ_r=L@BCDjcytR=rmFi>no4B9W*~;-1BSSYSHnIP@^omIIE)*9O(hpy= zDePw0!A!29g6lG=9icX8%VwkbqhDAK+-rBW9ehLRq*J=juOl?ldLmtg#&Pw=%!}f> zyENh_Vw3e-M!(84OpN#)&xzTa)WqNR;K+tH@4k6wKZ2c%r2SdkY=(j`4<_OEoqVGE z$CRpRj+s?oQNc}Fr`41_)tWPeb(QmU)TF*w1+^7;*SIOq;{*iUTogaA%1Bw(b<2`Dfyz%?i)uZX=|}$F`t-A};g