From b2b5b425ac5d87cb8ec32fc3108972b70f7ce4d4 Mon Sep 17 00:00:00 2001 From: Ruihang Lai Date: Tue, 3 Dec 2024 20:14:15 -0500 Subject: [PATCH] Add project code --- README.md | 33 + eval_script/run_15.sh | 26 + fx_translator.py | 1972 +++++++++++++++++++ models/_align_scripted.py | 1689 ++++++++++++++++ models/_bert_scripted.py | 385 ++++ models/_deberta_scripted.py | 1057 ++++++++++ models/_densenet_scripted.py | 252 +++ models/_monodepth_scripted.py | 338 ++++ models/_quantized_scripted.py | 491 +++++ models/_tridentnet_scripted.py | 2075 ++++++++++++++++++++ models/align.py | 1692 ++++++++++++++++ models/bart.py | 1305 ++++++++++++ models/bert.py | 388 ++++ models/blockdrop.py | 286 +++ models/deberta.py | 1058 ++++++++++ models/densenet.py | 255 +++ models/longformer.py | 1789 +++++++++++++++++ models/lstm.py | 171 ++ models/monodepth.py | 323 +++ models/quantized.py | 480 +++++ models/resnet.py | 289 +++ models/seq2seq.py | 152 ++ models/tridentnet.py | 2072 +++++++++++++++++++ reports/15745_Project_Milestone_Report.pdf | Bin 0 -> 184303 bytes reports/15745_Project_Proposal.pdf | Bin 0 -> 208886 bytes run.py | 122 ++ scripts/compile_longobj.sh | 10 +- test/example.py | 223 ++- timer.py | 86 + utils.py | 843 ++++++++ 30 files changed, 19846 insertions(+), 16 deletions(-) create mode 100755 eval_script/run_15.sh create mode 100644 fx_translator.py create mode 100644 models/_align_scripted.py create mode 100644 models/_bert_scripted.py create mode 100644 models/_deberta_scripted.py create mode 100644 models/_densenet_scripted.py create mode 100644 models/_monodepth_scripted.py create mode 100644 models/_quantized_scripted.py create mode 100644 models/_tridentnet_scripted.py create mode 100644 models/align.py create mode 100644 models/bart.py create mode 100644 models/bert.py create mode 100644 models/blockdrop.py create mode 100644 models/deberta.py create mode 100644 models/densenet.py create mode 100644 models/longformer.py create mode 100644 models/lstm.py create mode 100644 models/monodepth.py create mode 100644 models/quantized.py create mode 100644 models/resnet.py create mode 100644 models/seq2seq.py create mode 100644 models/tridentnet.py create mode 100644 reports/15745_Project_Milestone_Report.pdf create mode 100644 reports/15745_Project_Proposal.pdf create mode 100755 run.py create mode 100644 timer.py create mode 100644 utils.py diff --git a/README.md b/README.md index f41c8d6ca6f3..9d6454c48a83 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,36 @@ +# 15-745 Project - MagPy with TVM + +This branch is the integration of MagPy with TVM, for eager-mode DNN +program compilation in TVM. + +## Project Reports + +* [Project proposal](reports/15745_Project_Proposal.pdf) +* [Project milestone report](reports/15745_Project_Milestone_Report.pdf) +* Project final report (TBA) + +## Reproduce the results + +Please follow the steps below to reproduce the evaluation results. + +**Step 1.** Install TVM via + +```bash +python -m pip install --pre -U -f https://mlc.ai/wheels mlc-ai-nightly-cu123 +``` + +**Step 2.** Install MagPy by following [the MagPy installation](#installation). + +**Step 3.** Run evaluation via + +```bash +./eval_script/run_15.sh +``` + + + +--- + # MagPy MagPy is a JIT compiler for PyTorch programs. It can extract the operator graph from PyTorch programs and optimize the graph with a wide range of deep learning graph compilers. diff --git a/eval_script/run_15.sh b/eval_script/run_15.sh new file mode 100755 index 000000000000..31d25f5a56df --- /dev/null +++ b/eval_script/run_15.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +set -euxo pipefail + +TIME_TAG=`date +%y%m%d-%H%M%S` + +export GPU_NUM_DEVICES=1 +export PJRT_DEVICE=GPU + +LOG_DIR=logs/e2e +mkdir -p $LOG_DIR + +for bs in 1 16 +do + for model in align bert deberta densenet monodepth quantized resnet tridentnet + for model in resnet + do + for compile in eager dynamo sys script sys-torchscript sys-tvm + do + rm -rf $LOG_DIR/$model.$bs.$compile.log + LD_PRELOAD=build/ldlong.v3.9.12.so python3 run.py --bs $bs --model $model --compile $compile 2>&1 | tee $LOG_DIR/$model.$bs.$compile.log + done + done +done + +wait diff --git a/fx_translator.py b/fx_translator.py new file mode 100644 index 000000000000..0dab05148a7e --- /dev/null +++ b/fx_translator.py @@ -0,0 +1,1972 @@ +# pylint: disable=invalid-name, inconsistent-return-statements, unidiomatic-typecheck +# pylint: disable=import-outside-toplevel +from functools import partial, reduce +from typing import Callable, Dict, List, Optional, Tuple, Union + +import tvm +from tvm import relax + + +class TorchFXImporter: + + import torch # type: ignore + from torch import fx + + def __init__(self) -> None: + import torch # type: ignore + from torch import fx + + self.env: Dict[fx.Node, relax.Expr] = {} + self.params: Dict[torch.Tensor, relax.Expr] = {} + self.block_builder: relax.BlockBuilder = None + self.convert_map: Dict[ + Union[torch.nn.Module, str], Callable[[fx.Node], relax.Var] + ] = self.create_convert_map() + self.named_modules: Dict[str, torch.Module] = None + + ########## Utilities ########## + + @staticmethod + def _convert_data_type( + input_type: Union[str, torch.dtype], env: Optional[Dict] = None + ): + """converts the PyTorch scalar type input_type to a TVM dtype.""" + import torch # type: ignore + + if env is not None and input_type in env: + input_type = env[input_type] + + input_type = input_type.lower() if isinstance(input_type, str) else input_type + if input_type in ["float", "float32", "torch.float32", torch.float32]: + return "float32" + elif input_type in ["float16", "torch.float16", torch.float16]: + return "float16" + elif input_type in ["int64", "torch.int64", torch.int64]: + return "int64" + elif input_type in ["int32", "torch.int32", torch.int32]: + return "int32" + elif input_type in ["bool", "torch.bool", torch.bool]: + return "bool" + else: + raise NotImplementedError( + "input_type {} is not handled yet".format(input_type) + ) + + @staticmethod + def _convert_torch_tensor_to_relax(tensor: torch.Tensor) -> relax.Var: + tensor = tensor.detach().cpu() + dtype = TorchFXImporter._convert_data_type(str(tensor.data.dtype)) + return relax.const(tensor.data.numpy(), dtype) + + @staticmethod + def shape_of(tensor): + """Get the shape of a tensor.""" + import torch # type: ignore + + if isinstance(tensor, relax.Expr): + if not isinstance(tensor.struct_info, relax.TensorStructInfo): + raise TypeError("The input Expr of shape_of should be a Tensor") + return tensor.struct_info.shape + elif isinstance(tensor, torch.Tensor): + return tensor.shape + raise ValueError("Unsupported type: {}".format(type(tensor))) + + def retrieve_args(self, node: fx.Node): + return self._retrieve_args(node.args) + + def _retrieve_args(self, node): + from torch import fx + + if isinstance(node, fx.Node): + return self.env[node] + elif isinstance(node, tuple): + return tuple(self._retrieve_args(x) for x in node) + elif isinstance(node, list): + return [self._retrieve_args(x) for x in node] + elif isinstance(node, dict): + return { + self._retrieve_args(k): self._retrieve_args(v) for k, v in node.items() + } + else: + return node + + ########## Unary Ops ########## + + def _unary_op(self, op: Callable) -> Callable: + from torch import fx + + def convert(node: fx.Node) -> relax.Var: + return self.block_builder.emit(op(self.env[node.args[0]])) + + return convert + + def _clamp(self, node: fx.Node) -> relax.Expr: + args = self.retrieve_args(node) + a_min = args[1] if len(args) > 1 else node.kwargs["min"] + a_max = args[2] if len(args) > 2 else node.kwargs["max"] + if not isinstance(a_min, (int, float)): + raise ValueError( + f"TVM only supports constant min value for torch.clamp/clip, " + f"but got {a_min} with type {type(a_min)}" + ) + if not isinstance(a_max, (int, float)): + raise ValueError( + f"TVM only supports constant max value for torch.clamp/clip, " + f"but got {a_max} with type {type(a_max)}" + ) + return self.block_builder.emit(relax.op.clip(args[0], a_min, a_max)) + + def _gelu(self, node: fx.Node) -> relax.Expr: + approximate = node.kwargs.get("approximate", "none") + if approximate == "none": + return self.block_builder.emit(relax.op.nn.gelu(self.env[node.args[0]])) + elif approximate == "tanh": + return self.block_builder.emit( + relax.op.nn.gelu_tanh(self.env[node.args[0]]) + ) + else: + raise KeyError( + "Unregonized approximate algorithm for gelu: {}.".format(approximate) + ) + + def _hardsigmoid(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + x = args[0] + dtype = x.struct_info.dtype + x0 = relax.op.add(x, relax.const(3, dtype)) + x1 = relax.op.clip(x0, 0, 6) + return self.block_builder.emit(relax.op.divide(x1, relax.const(6, dtype))) + + def _hardswish(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + x = args[0] + dtype = x.struct_info.dtype + x0 = relax.op.add(x, relax.const(3, dtype)) + x1 = relax.op.clip(x0, 0, 6) + x2 = relax.op.divide(x1, relax.const(6, dtype)) + return self.block_builder.emit(relax.op.multiply(x, x2)) + + def _leakyrelu(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + alpha = ( + node.args[1] + if len(node.args) > 1 + else node.kwargs.get("negative_slope", 0.01) + ) + return self.block_builder.emit(relax.op.nn.leakyrelu(x, alpha)) + + def _log_softmax(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + dim = node.args[1] if len(node.args) > 1 else node.kwargs.get("dim", -1) + return self.block_builder.emit(relax.op.nn.log_softmax(x, dim)) + + def _round(self, node: fx.Node) -> relax.Expr: + if node.kwargs.get("decimals", 0) != 0: + raise ValueError("specifying decimals for round is not supported yet") + arg = self.env[node.args[0]] + return self.block_builder.emit(relax.op.round(arg)) + + def _softmax(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + dim = node.args[1] if len(node.args) > 1 else node.kwargs.get("dim", -1) + return self.block_builder.emit(relax.op.nn.softmax(x, dim)) + + def _tril_triu(self, op: Callable) -> Callable: + from torch import fx + + def convert(node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + k = node.args[1] if len(node.args) > 1 else node.kwargs.get("diagonal", 0) + assert isinstance(k, int) + return self.block_builder.emit(op(x, k)) + + return convert + + ########## Binary Ops ########## + + def _binary_op(self, relax_op: Callable, intrinsic_op: Callable) -> Callable: + from torch import fx + + def convert(node: fx.Node) -> relax.Var: + def promote_binary_op_args(lhs, rhs): + if isinstance(lhs, relax.Expr) and isinstance(rhs, relax.Expr): + return lhs, rhs + elif isinstance(lhs, relax.Expr): + assert isinstance(lhs.struct_info, relax.TensorStructInfo) + return lhs, relax.const(rhs, lhs.struct_info.dtype) + elif isinstance(rhs, relax.Expr): + assert isinstance(rhs.struct_info, relax.TensorStructInfo) + return relax.const(lhs, rhs.struct_info.dtype), rhs + else: + assert False + + def call_binary_op(op, lhs, rhs): + lhs, rhs = promote_binary_op_args(lhs, rhs) + return self.block_builder.emit(op(lhs, rhs)) + + lhs, rhs = self.retrieve_args(node) + if isinstance(lhs, relax.Var) or isinstance(rhs, relax.Var): + return call_binary_op(relax_op, lhs, rhs) + elif isinstance(lhs, relax.expr.Constant): + return call_binary_op( + relax_op, lhs, relax.const(rhs, dtype=lhs.struct_info.dtype) + ) + elif isinstance(rhs, relax.expr.Constant): + return call_binary_op( + relax_op, relax.const(lhs, dtype=rhs.struct_info.dtype), rhs + ) + return intrinsic_op(lhs, rhs) + + return convert + + ########## Neural Network ########## + + def _adaptive_avg_pool2d(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + output_size = node.args[1] + return self.block_builder.emit( + relax.op.nn.adaptive_avg_pool2d(x, output_size, layout="NCHW") + ) + + def _addmm(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + y = self.env[node.args[1]] + z = self.env[node.args[2]] + alpha = node.kwargs.get("alpha", 1) + beta = node.kwargs.get("beta", 1) + + res = None + if alpha != 0: + res = self.block_builder.emit( + relax.op.linear_algebra.matmul(y, z, out_dtype="float32") + ) + if alpha != 1: + dtype = res.struct_info.dtype + res = self.block_builder.emit( + relax.op.multiply(res, relax.const(alpha, dtype)) + ) + if beta != 0: + dtype = x.struct_info.dtype + if beta != 1: + bias = self.block_builder.emit( + relax.op.multiply(x, relax.const(beta, dtype)) + ) + else: + bias = x + res = ( + bias + if res is None + else self.block_builder.emit(relax.op.add(bias, res)) + ) + return res + + def _avg_pool2d_impl( + self, + x: relax.Expr, + kernel_size: Union[int, Tuple[int, int]] = (1, 1), + stride: Optional[Union[int, Tuple[int, int]]] = None, + padding: Optional[int] = 0, + ceil_mode: Optional[bool] = False, + ) -> relax.Var: + stride = kernel_size if stride is None or stride == [] else stride + return self.block_builder.emit( + relax.op.nn.avg_pool2d( + x, + pool_size=kernel_size, + strides=stride, + padding=padding, + ceil_mode=ceil_mode, + layout="NCHW", + ) + ) + + def _avg_pool2d(self, node: fx.Node) -> relax.Var: + args, kwargs = node.normalized_arguments(node) + x = self.env[args[0]] + kernel_size = args[1] if len(args) > 1 else kwargs["kernel_size"] + stride = args[2] if len(args) > 2 else kwargs.get("stride", None) + padding = args[3] if len(args) > 3 else kwargs.get("padding", 0) + ceil_mode = args[4] if len(args) > 4 else kwargs.get("ceil_mode", False) + return self._avg_pool2d_impl(x, kernel_size, stride, padding, ceil_mode) + + def _baddbmm(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + batch1 = self.env[node.args[1]] + batch2 = self.env[node.args[2]] + alpha = node.kwargs.get("alpha", 1) + beta = node.kwargs.get("beta", 1) + + res = None + if alpha != 0: + res = self.block_builder.emit(relax.op.matmul(batch1, batch2)) + if alpha != 1: + dtype = res.struct_info.dtype + res = self.block_builder.emit( + relax.op.multiply(res, relax.const(alpha, dtype)) + ) + if beta != 0: + dtype = x.struct_info.dtype + if beta != 1: + bias = self.block_builder.emit( + relax.op.multiply(x, relax.const(beta, dtype)) + ) + else: + bias = x + res = ( + bias + if res is None + else self.block_builder.emit(relax.op.add(res, bias)) + ) + return res + + def _conv_transpose1d_impl( + self, + x: relax.Expr, + weight: relax.Expr, + bias: Optional[relax.Expr], + strides: Optional[Tuple], + padding: Optional[Tuple], + dilation: Optional[Tuple], + groups: Optional[Tuple], + ) -> relax.Var: + conv1d_transpose = self.block_builder.emit( + relax.op.nn.conv1d_transpose( + x, + weight, + strides=strides, + padding=padding, + dilation=dilation, + groups=groups, + data_layout="NCW", + kernel_layout="OIW", + out_dtype="float32", + ) + ) + + if bias is None: + return conv1d_transpose + + assert len(self.shape_of(bias)) == 1 + bias = relax.op.reshape(bias, (1, -1, 1)) + return self.block_builder.emit(relax.op.add(conv1d_transpose, bias)) + + def _conv_transpose1d(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + x = args[0] + weight = args[1] + bias = args[2] if len(args) > 2 else None + stride = args[3] if len(args) > 3 else 1 + padding = args[4] if len(args) > 4 else 0 + dilation = args[5] if len(args) > 5 else 1 + groups = args[6] if len(args) > 6 else 1 + return self._conv_transpose1d_impl( + x, + weight, + bias=bias, + strides=stride, + padding=padding, + dilation=dilation, + groups=groups, + ) + + def _conv_transpose2d_impl( + self, + x: relax.Expr, + weight: relax.Expr, + bias: Optional[relax.Expr], + strides: Optional[Tuple], + padding: Optional[Tuple], + dilation: Optional[Tuple], + groups: Optional[Tuple], + ) -> relax.Var: + conv2d_transpose = self.block_builder.emit( + relax.op.nn.conv2d_transpose( + x, + weight, + strides=strides, + padding=padding, + dilation=dilation, + groups=groups, + data_layout="NCHW", + kernel_layout="OIHW", + out_dtype="float32", + ) + ) + + if bias is None: + return conv2d_transpose + + assert len(self.shape_of(bias)) == 1 + bias = relax.op.reshape(bias, (1, -1, 1, 1)) + return self.block_builder.emit(relax.op.add(conv2d_transpose, bias)) + + def _conv_transpose2d(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + x = args[0] + weight = args[1] + bias = args[2] if len(args) > 2 else None + stride = args[3] if len(args) > 3 else 1 + padding = args[4] if len(args) > 4 else 0 + dilation = args[5] if len(args) > 5 else 1 + groups = args[6] if len(args) > 6 else 1 + return self._conv_transpose2d_impl( + x, + weight, + bias=bias, + strides=stride, + padding=padding, + dilation=dilation, + groups=groups, + ) + + def _conv1d_impl( + self, + x: relax.Expr, + weight: relax.Expr, + bias: Optional[relax.Expr], + strides: Optional[Tuple], + padding: Optional[Tuple], + dilation: Optional[Tuple], + groups: Optional[Tuple], + ) -> relax.Var: + conv1d = self.block_builder.emit( + relax.op.nn.conv1d( + x, + weight, + strides=strides, + padding=padding, + dilation=dilation, + groups=groups, + data_layout="NCW", + kernel_layout="OIW", + out_dtype="float32", + ) + ) + + if bias is None: + return conv1d + assert len(self.shape_of(bias)) == 1 + bias = relax.op.reshape(bias, (1, -1, 1)) + return self.block_builder.emit(relax.op.add(conv1d, bias)) + + def _conv1d(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + x = args[0] + weight = args[1] + bias = args[2] if len(args) > 2 else None + stride = args[3] if len(args) > 3 else 1 + padding = args[4] if len(args) > 4 else 0 + dilation = args[5] if len(args) > 5 else 1 + groups = args[6] if len(args) > 6 else 1 + return self._conv1d_impl( + x, + weight, + bias=bias, + strides=stride, + padding=padding, + dilation=dilation, + groups=groups, + ) + + def _conv2d_impl( + self, + x: relax.Expr, + weight: relax.Expr, + bias: Optional[relax.Expr], + strides: Optional[Tuple], + padding: Optional[Tuple], + dilation: Optional[Tuple], + groups: Optional[Tuple], + ): + conv2d = self.block_builder.emit( + relax.op.nn.conv2d( + x, + weight, + strides=strides, + padding=padding, + dilation=dilation, + groups=groups, + data_layout="NCHW", + kernel_layout="OIHW", + out_dtype="float32", + ) + ) + + if bias is None: + return conv2d + assert len(self.shape_of(bias)) == 1 + bias = relax.op.reshape(bias, (1, -1, 1, 1)) + return self.block_builder.emit(relax.op.add(conv2d, bias)) + + def _conv2d(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + x = args[0] + weight = args[1] + bias = args[2] if len(args) > 2 else None + stride = args[3] if len(args) > 3 else 1 + padding = args[4] if len(args) > 4 else 0 + dilation = args[5] if len(args) > 5 else 1 + groups = args[6] if len(args) > 6 else 1 + return self._conv2d_impl( + x, + weight, + bias=bias, + strides=stride, + padding=padding, + dilation=dilation, + groups=groups, + ) + + def _conv3d_impl( + self, + x: relax.Expr, + weight: relax.Expr, + bias: Optional[relax.Expr], + strides: Optional[Tuple], + padding: Optional[Tuple], + dilation: Optional[Tuple], + groups: Optional[Tuple], + ): + conv3d = self.block_builder.emit( + relax.op.nn.conv3d( + x, + weight, + strides=strides, + padding=padding, + dilation=dilation, + groups=groups, + data_layout="NCDHW", + kernel_layout="OIDHW", + out_dtype="float32", + ) + ) + + if bias is None: + return conv3d + assert len(self.shape_of(bias)) == 1 + bias = relax.op.reshape(bias, (1, -1, 1, 1, 1)) + return self.block_builder.emit(relax.op.add(conv3d, bias)) + + def _conv3d(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + x = args[0] + weight = args[1] + bias = args[2] if len(args) > 2 else None + stride = args[3] if len(args) > 3 else 1 + padding = args[4] if len(args) > 4 else 0 + dilation = args[5] if len(args) > 5 else 1 + groups = args[6] if len(args) > 6 else 1 + return self._conv3d_impl( + x, + weight, + bias=bias, + strides=stride, + padding=padding, + dilation=dilation, + groups=groups, + ) + + def _einsum(self, node: fx.Node) -> relax.Var: + import torch # type: ignore + + args = self.retrieve_args(node) + operands = ( + args[1] if isinstance(args[1], (torch.Size, tuple, list)) else args[1:] + ) + return self.block_builder.emit(relax.op.einsum(operands, args[0])) + + def _embedding_impl( + self, + x, + weight, + ) -> relax.Var: + x = self.block_builder.emit(relax.op.astype(x, "int32")) + + ndim = x.struct_info.ndim + if ndim == 1: + return self.block_builder.emit(relax.op.take(weight, x, axis=0)) + else: + x_shape = x.struct_info.shape.values + emb_size = weight.struct_info.shape.values[-1] + x = self.block_builder.emit(relax.op.reshape(x, shape=[-1])) + embedding = self.block_builder.emit(relax.op.take(weight, x, axis=0)) + return self.block_builder.emit( + relax.op.reshape(embedding, [*x_shape, emb_size]) + ) + + def _layer_norm_impl(self, x, gamma, beta, eps, normalized_shape) -> relax.Var: + import numpy as np # type: ignore + from torch.fx.immutable_collections import immutable_list + + if isinstance(normalized_shape, (immutable_list, tuple)): + normalized_shape = tuple(normalized_shape) + else: + try: + normalized_shape = self.env[normalized_shape] + except TypeError: + normalized_shape = tuple(normalized_shape) + + dim_num = len(normalized_shape) + axes = list(range(-dim_num, 0)) + + if gamma is None: + shape_tuple = [int(s) for s in normalized_shape] + gamma = relax.const(np.ones(shape_tuple), x.struct_info.dtype) + if beta is None: + shape_tuple = [int(s) for s in normalized_shape] + beta = relax.const(np.zeros(shape_tuple), x.struct_info.dtype) + + return self.block_builder.emit( + relax.op.nn.layer_norm( + x, + gamma, + beta, + axes=axes, + epsilon=eps, + ) + ) + + def _layer_norm(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + normalized_shape = node.args[1] + gamma = self.env[node.args[2]] if len(node.args) > 2 else None + beta = self.env[node.args[3]] if len(node.args) > 3 else None + eps = node.args[4] if len(node.args) > 4 else 1e-05 + return self._layer_norm_impl(x, gamma, beta, eps, normalized_shape) + + def _layer_norm_module(self, node: fx.Node) -> relax.Var: + import torch # type: ignore + + x = self.env[node.args[0]] + module = self.named_modules[node.target] + normalized_shape = module.normalized_shape + if module.elementwise_affine: + gamma = self.params[module.weight] + beta = self.params[module.bias] + else: + gamma = relax.const( + torch.ones_like(module.normalized_shape), x.struct_info.dtype + ) + beta = relax.const( + torch.zeros_like(module.normalized_shape), x.struct_info.dtype + ) + eps = module.eps + return self._layer_norm_impl(x, gamma, beta, eps, normalized_shape) + + def _linear(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + x = args[0] + weight = args[1] + bias = args[2] if len(args) > 2 else None + return self.block_builder.emit(relax.op.linear(x, weight, bias, "float32")) + + def _max_pool2d_impl( + self, + x: relax.Expr, + kernel_size: Union[int, Tuple[int, int]] = (1, 1), + stride: Optional[Union[int, Tuple[int, int]]] = None, + padding: Optional[int] = 0, + dilation: Optional[int] = 1, + ceil_mode: Optional[bool] = False, + ) -> relax.Var: + stride = kernel_size if stride is None else stride + return self.block_builder.emit( + relax.op.nn.max_pool2d( + x, + pool_size=kernel_size, + strides=stride, + padding=padding, + dilation=dilation, + ceil_mode=ceil_mode, + layout="NCHW", + ) + ) + + def _max_pool2d(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + x = args[0] + kernel_size = args[1] + stride = args[2] if len(args) > 2 else None + padding = args[3] if len(args) > 3 else 0 + dilation = args[4] if len(args) > 4 else 1 + ceil_mode = args[5] if len(args) > 5 else False + + return self._max_pool2d_impl( + x, kernel_size, stride, padding, dilation, ceil_mode + ) + + def _scaled_dot_product_attention(self, node: fx.Node) -> relax.Var: + transpose_S_H = lambda tensor: relax.op.permute_dims(tensor, [0, 2, 1, 3]) + query = transpose_S_H(self.env[node.args[0]]) + key = transpose_S_H(self.env[node.args[1]]) + value = transpose_S_H(self.env[node.args[2]]) + attn_mask = ( + node.args[3] if len(node.args) > 3 else node.kwargs.get("attn_mask", None) + ) + dropout_p = ( + node.args[4] if len(node.args) > 4 else node.kwargs.get("dropout_p", 0.0) + ) + assert dropout_p == 0.0, "Dropout is not supported" + is_causal = ( + node.args[5] if len(node.args) > 5 else node.kwargs.get("is_causal", False) + ) + causal_mask = "TopLeft" if is_causal else None + + if attn_mask is not None: + attn_mask = self.env[attn_mask] + msg = "Only a float mask is supported for the attn_mask input." + assert "float" in attn_mask.struct_info.dtype, msg + + return self.block_builder.emit( + transpose_S_H( + relax.op.nn.attention( + query, key, value, bias=attn_mask, causal_mask=causal_mask + ) + ) + ) + + def _unbind(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + dim = node.args[1] if len(node.args) > 1 else node.kwargs.get("dim", 0) + assert isinstance(dim, int), "Expected 2nd argument of unbind as int" + selections = self.shape_of(x)[dim].value + n_section = list(range(1, selections + 1)) + ret, split = [], self.block_builder.emit(relax.op.split(x, n_section, dim)) + for i in range(selections): + ret.append(self.block_builder.emit(relax.op.squeeze(split[i], axis=dim))) + return self.block_builder.emit(relax.Tuple(ret)) + + ########## Statistical ########## + + def _mean(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + x = args[0] + dim = args[1] if len(node.args) > 1 else node.kwargs.get("dim", None) + keepdim = args[2] if len(node.args) > 2 else node.kwargs.get("keepdim", False) + return self.block_builder.emit(relax.op.mean(x, dim, keepdims=keepdim)) + + def _sum(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + keepdim = node.kwargs["keepdim"] if "keepdim" in node.kwargs else False + if len(args) == 1: + return self.block_builder.emit(relax.op.sum(args[0], keepdims=keepdim)) + return self.block_builder.emit(relax.op.sum(args[0], args[1])) + + ########## Search ########## + + def _argmax_argmin(self, op: Callable) -> Callable: + from torch import fx + + def convert(node: fx.Node): + x = self.env[node.args[0]] + dim = node.args[1] if len(node.args) > 1 else node.kwargs.get("dim", None) + keepdim = ( + node.args[2] + if len(node.args) > 2 + else node.kwargs.get("keepdim", False) + ) + return self.block_builder.emit(op(x, dim, keepdim)) + + return convert + + ########## Manipulation ########## + + def _cat(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + axis = args[1] if len(node.args) > 1 else node.kwargs.get("dim", 0) + return self.block_builder.emit(relax.op.concat(args[0], axis=axis)) + + def _cumsum(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + + dim = node.args[1] if len(node.args) > 1 else node.kwargs.get("dim", None) + if "dtype" in node.kwargs: + dtype = self._convert_data_type(str(node.kwargs["dtype"]), self.env) + else: + dtype = None + if "out" in node.kwargs: + raise ValueError("specifying out for cumsum is not supported yet") + + return self.block_builder.emit(relax.op.cumsum(x, dim, dtype)) + + def _expand(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + sizes = args[1:] if len(args) > 2 else args[1] + broadcast_shape, in_shape = [], self.shape_of(args[0]) + for idx, i in enumerate(sizes): + if isinstance(i, int) and i == -1: + broadcast_shape.append(in_shape[idx]) + else: + broadcast_shape.append(i) + return self.block_builder.emit(relax.op.broadcast_to(args[0], broadcast_shape)) + + def _permute(self, node: fx.Node) -> relax.Var: + import torch # type: ignore + + args = self.retrieve_args(node) + x = args[0] + dims = args[1] if isinstance(args[1], (torch.Size, tuple, list)) else args[1:] + return self.block_builder.emit(relax.op.permute_dims(x, dims)) + + def _repeat(self, node: fx.Node) -> relax.Var: + import torch # type: ignore + + args = self.retrieve_args(node) + x = args[0] + dims = args[1] if isinstance(args[1], (torch.Size, tuple, list)) else args[1:] + return self.block_builder.emit(relax.op.tile(x, dims)) + + def _reshape(self, node: fx.Node) -> relax.Var: + import torch # type: ignore + + args = self.retrieve_args(node) + x = args[0] + dims = args[1] if isinstance(args[1], (torch.Size, tuple, list)) else args[1:] + return self.block_builder.emit(relax.op.reshape(x, dims)) + + def _scatter(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + if len(node.args) == 1: + dim = node.kwargs["dim"] + index = self.env[node.kwargs["index"]] + src = self.env[node.kwargs["src"]] + elif len(node.args) == 4: + dim = node.args[1] + index = self.env[node.args[2]] + src = self.env[node.args[3]] + else: + raise Exception("Unexpected args " + str(node.args)) + return self.block_builder.emit( + relax.op.scatter_elements(x, index, src, axis=dim) + ) + + def _split(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + split_size = node.args[1] + dim = node.args[2] if len(node.args) > 2 else node.kwargs.get("dim", 0) + if isinstance(split_size, (list, tuple)): + n_section = [] + for s in split_size[:-1]: + cum_sum = 0 if not n_section else n_section[-1] + n_section.append(s + cum_sum) + else: + n_section = (self.shape_of(x)[dim].value + split_size - 1) // split_size + return self.block_builder.emit(relax.op.split(x, n_section, dim)) + + def _squeeze(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + dim = node.args[1] if len(node.args) > 1 else node.kwargs.get("dim", None) + return self.block_builder.emit(relax.op.squeeze(x, dim)) + + def _stack(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + axis = args[1] if len(node.args) > 1 else node.kwargs.get("dim", 0) + in_args = args[0] + assert all( + a.struct_info.shape[axis] == in_args[0].struct_info.shape[axis] + for a in in_args[1:] + ), "Expect all dim at {} to be the same, get {}".format( + axis, [a.struct_info.shape for a in args] + ) + cat = self.block_builder.emit(relax.op.concat(in_args, axis=axis)) + s_shape = [] + for idx, s in enumerate(cat.struct_info.shape): + if idx == axis: + s_shape.extend([len(in_args), in_args[0].struct_info.shape[axis]]) + else: + s_shape.append(s) + return self.block_builder.emit(relax.op.reshape(cat, s_shape)) + + def _tile(self, node: fx.Node) -> relax.Var: + import torch # type: ignore + + args = self.retrieve_args(node) + x = args[0] + dims = args[1] if isinstance(args[1], (torch.Size, tuple, list)) else args[1:] + return self.block_builder.emit(relax.op.tile(x, dims)) + + def _transpose(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + full_idx = list(range(len(self.shape_of(args[0])))) + full_idx[args[1]], full_idx[args[2]] = full_idx[args[2]], full_idx[args[1]] + return self.block_builder.emit(relax.op.permute_dims(args[0], full_idx)) + + ########## Creation ########## + + def _to_copy(self, node: fx.Node) -> relax.Var: + import torch # type: ignore + + x = self.env[node.args[0]] + if len(node.args) == 2: + if isinstance(node.args[1], torch.dtype): + dtype = self._convert_data_type(node.args[1], self.env) + return self.block_builder.emit(relax.op.astype(x, dtype)) + elif "dtype" in node.kwargs: + dtype = self._convert_data_type(node.kwargs["dtype"], self.env) + return self.block_builder.emit(relax.op.astype(x, dtype)) + return x + + def _arange(self, node: fx.Node) -> relax.Var: + import torch # type: ignore + + start_end_step = [None, None, None] + if "start" in node.kwargs: + start_end_step[0] = node.kwargs["start"] + if "end" in node.kwargs: + start_end_step[1] = node.kwargs["end"] + if "step" in node.kwargs: + start_end_step[2] = node.kwargs["step"] + + if len(node.args) == 1: + assert start_end_step[1] is None + start_end_step[1] = node.args[0] + elif len(node.args) == 2: + assert start_end_step[0] is None + assert start_end_step[1] is None + start_end_step[0] = node.args[0] + start_end_step[1] = node.args[1] + elif len(node.args) == 3: + assert start_end_step[0] is None + assert start_end_step[1] is None + assert start_end_step[2] is None + start_end_step[0] = node.args[0] + start_end_step[1] = node.args[1] + start_end_step[2] = node.args[2] + + if start_end_step[0] is None: + start_end_step[0] = 0 + if start_end_step[2] is None: + start_end_step[2] = 1 + + if "dtype" in node.kwargs: + dtype = self._convert_data_type(str(node.kwargs["dtype"]), self.env) + elif any([isinstance(x, float) for x in start_end_step]): + dtype = self._convert_data_type(torch.get_default_dtype()) + else: + dtype = "int64" + start_end_step = [ + self.env[x] if isinstance(x, torch.fx.Node) else x for x in start_end_step + ] + return self.block_builder.emit(relax.op.arange(*start_end_step, dtype=dtype)) + + def _empty(self, node: fx.Node) -> relax.Var: + dtype = self._convert_data_type(str(node.kwargs["dtype"]), self.env) + return self.block_builder.emit(relax.op.zeros(node.args[0], dtype)) + + def _fill(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + x = args[0] + dtype = x.struct_info.dtype + value = ( + args[1] if isinstance(args[1], relax.Expr) else relax.const(args[1], dtype) + ) + return self.block_builder.emit(relax.op.full(x.struct_info.shape, value, dtype)) + + def _new_ones(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + self_var = args[0] + size = args[1] if isinstance(args[1], (list, tuple)) else args[1:] + if not isinstance(size, (list, tuple)): + size = (size,) + size = relax.ShapeExpr(size) + return self.block_builder.emit( + relax.op.full( + size, + relax.const(1, self_var.struct_info.dtype), + self_var.struct_info.dtype, + ) + ) + + ########## Others ########## + + def _getitem(self, node: fx.Node) -> relax.Var: + import torch + + x = self.env[node.args[0]] + if isinstance(x, (list, tuple, relax.ShapeExpr, relax.Tuple)): + return x[node.args[1]] + elif isinstance(x, relax.Var): + if isinstance(x.struct_info, relax.TupleStructInfo): + return self.block_builder.emit(relax.TupleGetItem(x, node.args[1])) + + assert isinstance(x.struct_info, relax.TensorStructInfo) + take_indices = [] + take_axes = [] + stride_begin = [] + stride_end = [] + stride = [] + stride_axes = [] + expand_dim = [] + i = 0 + shape = self.shape_of(x) + non_ellipsis_cnt = 0 + for index in node.args[1]: + if isinstance(index, (int, slice, torch.fx.Node)): + non_ellipsis_cnt += 1 + for index in node.args[1]: + if isinstance(index, int): + stride_begin.append(index) + stride_end.append(index + 1) + stride.append(1) + stride_axes.append(i) + i = i + 1 + elif isinstance(index, slice): + stride_begin.append(0 if index.start is None else index.start) + stride_end.append(shape[i] if index.stop is None else index.stop) + stride.append(1 if index.step is None else index.step) + stride_axes.append(i) + i = i + 1 + elif index is None: + expand_dim.append(len(stride_axes) + len(expand_dim)) + elif index is Ellipsis: + for _ in range(len(shape) - non_ellipsis_cnt): + stride_begin.append(0) + stride_end.append(shape[i]) + stride.append(1) + stride_axes.append(i) + i += 1 + elif isinstance(index, torch.fx.Node): + node_index = self.env[index] + if not isinstance(node_index, relax.Expr): + raise ValueError( + "Unsupported index type for relax.op.take: " + + str(type(node_index)) + ) + take_indices.append(node_index) + take_axes.append(i) + i = i + 1 + else: + raise ValueError("Unsupported index type: " + str(type(index))) + while i < len(shape): + stride_begin.append(0) + stride_end.append(shape[i]) + stride.append(1) + stride_axes.append(i) + i += 1 + taken = x + if len(take_indices) > 1: + raise ValueError("Multiple tensors as index not yet supported") + for each_index, each_axis in zip(take_indices, take_axes): + taken = self.block_builder.emit( + relax.op.take(taken, each_index, each_axis) + ) + sliced = self.block_builder.emit( + relax.op.strided_slice( + taken, stride_axes, stride_begin, stride_end, stride + ) + ) + sliced_shape = list(self.shape_of(sliced)) + for i in expand_dim: + sliced_shape.insert(i, 1) + return self.block_builder.emit(relax.op.reshape(sliced, sliced_shape)) + elif isinstance(x, relax.Constant): + dtype = x.struct_info.dtype + return relax.const(x.data.numpy()[node.args[1]], dtype) + else: + assert False + + ########## Utilities ########## + + def _fetch_attr(self, model, target: str): + import torch # type: ignore + + target_atoms = target.split(".") + attr_itr = model + for i, atom in enumerate(target_atoms): + if not hasattr(attr_itr, atom): + raise RuntimeError( + f"Node referenced non existing target {'.'.join(target_atoms[:i])}" + ) + attr_itr = getattr(attr_itr, atom) + if isinstance(attr_itr, torch.Tensor): + # Its possible for the resulting tensor to be a parameter. + # If so, return the parameter instead. + if attr_itr in self.params: + return self.params[attr_itr] + return self._convert_torch_tensor_to_relax(attr_itr) + return attr_itr + + ########## Unary Ops ########## + + def _leakyrelu_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + alpha = module.negative_slope + return self.block_builder.emit(relax.op.nn.leakyrelu(x, alpha)) + + def _log_softmax_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + dim = module.dim + assert dim is not None + return self.block_builder.emit(relax.op.nn.log_softmax(x, dim)) + + def _softmax_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + dim = module.dim + assert dim is not None + return self.block_builder.emit(relax.op.nn.softmax(x, dim)) + + def _inplace_tril_triu(self, op: Callable) -> Callable: + from torch import fx + + def convert(node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + k = node.args[1] if len(node.args) > 1 else 0 + assert isinstance(k, int) + + mutated = self.block_builder.emit(op(x, k)) + self.env[node.args[0]] = mutated + return mutated + + return convert + + ########## Neural Network ########## + + def _adaptive_avg_pool2d_module(self, node: fx.Node) -> relax.Var: + + module = self.named_modules[node.target] + x = self.env[node.args[0]] + output_size = module.output_size + return self.block_builder.emit( + relax.op.nn.adaptive_avg_pool2d(x, output_size, layout="NCHW") + ) + + def _avg_pool2d_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + kernel_size = module.kernel_size + stride = module.stride + padding = module.padding + ceil_mode = module.ceil_mode + return self._avg_pool2d_impl(x, kernel_size, stride, padding, ceil_mode) + + def _batch_norm_2d_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + weight = self.params[module.weight] + bias = self.params[module.bias] + running_mean = self._convert_torch_tensor_to_relax(module.running_mean) + running_var = self._convert_torch_tensor_to_relax(module.running_var) + eps = module.eps + + res_tuple = self.block_builder.emit( + relax.op.nn.batch_norm( + x, + weight, + bias, + running_mean, + running_var, + axis=1, + epsilon=eps, + ) + ) + + return self.block_builder.emit(relax.TupleGetItem(res_tuple, 0)) + + def _conv_transpose1d_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + weight = self.params[module.weight] + bias = self.params.get(module.bias, None) + + return self._conv_transpose1d_impl( + x, + weight, + bias=bias, + strides=module.stride, + padding=module.padding, + dilation=module.dilation, + groups=module.groups, + ) + + def _conv_transpose2d_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + weight = self.params[module.weight] + bias = self.params.get(module.bias, None) + + return self._conv_transpose2d_impl( + x, + weight, + bias=bias, + strides=module.stride, + padding=module.padding, + dilation=module.dilation, + groups=module.groups, + ) + + def _conv1d_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + weight = self.params[module.weight] + bias = self.params.get(module.bias, None) + + return self._conv1d_impl( + x, + weight, + bias=bias, + strides=module.stride, + padding=module.padding, + dilation=module.dilation, + groups=module.groups, + ) + + def _conv2d_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + weight = self.params[module.weight] + bias = self.params.get(module.bias, None) + + return self._conv2d_impl( + x, + weight, + bias=bias, + strides=module.stride, + padding=module.padding, + dilation=module.dilation, + groups=module.groups, + ) + + def _conv3d_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + weight = self.params[module.weight] + bias = self.params.get(module.bias, None) + + return self._conv3d_impl( + x, + weight, + bias=bias, + strides=module.stride, + padding=module.padding, + dilation=module.dilation, + groups=module.groups, + ) + + def _cross_entropy(self, node: fx.Node) -> relax.Expr: + preds = self.env[node.args[0]] + targets = self.env[node.args[1]] + weights = self.env.get(node.kwargs["weight"], None) + reduction = node.kwargs["reduction"] + ignore_index = node.kwargs["ignore_index"] + + return self.block_builder.emit( + relax.op.nn.nll_loss( + relax.op.nn.log_softmax(preds), + targets, + weights, + reduction, + ignore_index, + ) + ) + + def _cross_entropy_module(self, node: fx.Node) -> relax.Expr: + preds = self.env[node.args[0]] + targets = self.env[node.args[1]] + module = self.named_modules[node.target] + + weights = module.weight + if weights is not None: + if weights in self.params: + weights = self.params[weights] + else: + weights = relax.const(weights.numpy(), preds.struct_info.dtype) + + reduction = module.reduction + ignore_index = module.ignore_index + + return self.block_builder.emit( + relax.op.nn.nll_loss( + relax.op.nn.log_softmax(preds), + targets, + weights, + reduction, + ignore_index, + ) + ) + + def _embedding_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + weight = self.params[module.weight] + return self._embedding_impl(x, weight) + + def _group_norm_module(self, node: fx.Node) -> relax.Var: + import torch # type: ignore + + x = self.env[node.args[0]] + module = self.named_modules[node.target] + num_groups = module.num_groups + if module.affine: + gamma = self.params[module.weight] + beta = self.params[module.bias] + else: + gamma = relax.const(torch.ones_like(module.num_channels), x.checked_type) + beta = relax.const(torch.zeros_like(module.num_channels), x.checked_type) + eps = module.eps + + dim = len(self.shape_of(x)) + return self.block_builder.emit( + relax.op.nn.group_norm( + x, + gamma, + beta, + num_groups=num_groups, + channel_axis=1, + axes=list(range(2, dim)), + epsilon=eps, + ) + ) + + def _interpolate(self, node: fx.Node) -> relax.Var: + # torch.nn.functional.interpolate( + # input, size=None, scale_factor=None, mode='nearest', align_corners=None, + # recompute_scale_factor=None, antialias=False) + # (TODO) this is a temporary implementation for interpolate that only considers NCHW layout + # it basically replicates the implementation in tvm.relay.frontend.pytorch + data = self.env[node.args[0]] + size = ( + node.args[1] + if len(node.args) > 1 + else (node.kwargs["size"] if "size" in node.kwargs else None) + ) + scale_factor = ( + node.args[2] + if len(node.args) > 2 + else ( + node.kwargs["scale_factor"] if "scale_factor" in node.kwargs else None + ) + ) + method = ( + node.args[3] + if len(node.args) > 3 + else (node.kwargs["mode"] if "mode" in node.kwargs else "nearest") + ) + align_corners = ( + node.args[4] + if len(node.args) > 4 + else ( + node.kwargs["align_corners"] if "align_corners" in node.kwargs else None + ) + ) + recompute_scale_factor = ( + node.args[5] + if len(node.args) > 5 + else ( + node.kwargs["recompute_scale_factor"] + if "recompute_scale_factor" in node.kwargs + else None + ) + ) + antialias = ( + node.args[6] + if len(node.args) > 6 + else (node.kwargs["antialias"] if "antialias" in node.kwargs else False) + ) + + assert recompute_scale_factor is None + assert antialias is False + + if size is None: + shape = self.shape_of(data) + assert isinstance(shape, relax.ShapeExpr) + if isinstance(scale_factor, tuple): + assert len(scale_factor) == len(shape) - 2 + size = tuple( + int(shape[i].value * scale_factor[i - 2]) + for i in range(2, len(shape)) + ) + else: + size = tuple( + int(shape[i].value * scale_factor) for i in range(2, len(shape)) + ) + + if method.startswith("nearest"): + method = "nearest_neighbor" + elif method[0:2] == "bi": + method = method[2:] + + if method == "nearest_neighbor": + coord_trans = "asymmetric" + elif align_corners: + coord_trans = "align_corners" + else: + coord_trans = "half_pixel" + + return self.block_builder.emit( + relax.op.image.resize2d( + data, + size, + layout="NCHW", + method=method, + coordinate_transformation_mode=coord_trans, + ) + ) + + def _linear_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + weight = self.params[module.weight] + bias = self.params.get(module.bias, None) + return self.block_builder.emit(relax.op.linear(x, weight, bias, "float32")) + + def _max_pool2d_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + kernel_size = module.kernel_size + stride = module.stride + padding = module.padding + dilation = module.dilation + ceil_mode = module.ceil_mode + + return self._max_pool2d_impl( + x, kernel_size, stride, padding, dilation, ceil_mode + ) + + ########## Manipulation ########## + + def _chunk(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + chunks = node.args[1] + dim = node.args[2] if len(node.args) > 2 else node.kwargs.get("dim", 0) + return self.block_builder.emit(relax.op.split(x, chunks, dim)) + + def _flatten_impl(self, x, start_dim, end_dim) -> relax.Var: + shape = self.shape_of(x) + start_dim = start_dim if start_dim >= 0 else len(shape) + start_dim + end_dim = end_dim if end_dim >= 0 else len(shape) + end_dim + flattened = reduce( + lambda x, y: x * y, [shape[i] for i in range(start_dim, end_dim + 1)] + ) + new_shape = ( + [shape[i] for i in range(0, start_dim)] + + [flattened] + + [shape[i] for i in range(end_dim + 1, len(shape))] + ) + return self.block_builder.emit(relax.op.reshape(x, new_shape)) + + def _flatten(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + start_dim = ( + node.args[1] if len(node.args) >= 2 else node.kwargs.get("start_dim", 0) + ) + end_dim = ( + node.args[2] if len(node.args) == 3 else node.kwargs.get("end_dim", -1) + ) + return self._flatten_impl(x, start_dim, end_dim) + + def _flatten_module(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + module = self.named_modules[node.target] + start_dim = module.start_dim + end_dim = module.end_dim + return self._flatten_impl(x, start_dim, end_dim) + + def _size(self, node: fx.Node) -> relax.Expr: + x = self.env[node.args[0]] + shape = self.shape_of(x) + if len(node.args) == 1: + assert isinstance(shape, relax.ShapeExpr) + return shape + assert len(node.args) == 2 + idx = node.args[1] + return self.shape_of(x)[idx].value + + ########## Creation ########## + + def _inplace_fill(self, node: fx.Node) -> relax.Var: + args = self.retrieve_args(node) + x = args[0] + dtype = x.struct_info.dtype + value = ( + args[1] if isinstance(args[1], relax.Expr) else relax.const(args[1], dtype) + ) + filled = self.block_builder.emit( + relax.op.full(x.struct_info.shape, value, dtype) + ) + self.env[node.args[0]] = filled + return filled + + def _full(self, node: fx.Node) -> relax.Var: + import torch + + args = self.retrieve_args(node) + size = relax.ShapeExpr( + args[0] if isinstance(args[0], (list, tuple)) else (args[0],) + ) + dtype = self._convert_data_type( + node.kwargs.get("dtype", torch.get_default_dtype()), self.env + ) + value = ( + args[1] + if isinstance(args[1], relax.expr.Constant) + else relax.const(args[1], dtype) + ) + return self.block_builder.emit( + relax.op.full( + size, + value, + dtype, + ) + ) + + def _index_select(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + dim = node.args[1] + index = self.env[node.args[2]] + return self.block_builder.emit(relax.op.take(x, index, dim)) + + def _inplace_masked_fill(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + mask = self.env[node.args[1]] + value = node.args[2] + rx_value = relax.const(value) + values = self.block_builder.emit(relax.op.full_like(x, rx_value)) + output = self.block_builder.emit(relax.op.where(mask, values, x)) + self.env[node.args[0]] = output + return output + + def _masked_fill(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + mask = self.env[node.args[1]] + rx_value = relax.const(node.args[2]) + values = self.block_builder.emit(relax.op.full_like(x, rx_value)) + return self.block_builder.emit(relax.op.where(mask, values, x)) + + def _masked_scatter(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + mask = self.env[node.args[1]] + source = self.env[node.args[2]] + ndim = len(mask.struct_info.shape) + if ndim == 1: + index = self.block_builder.emit(relax.op.cumsum(mask, 0, dtype="int32")) + index = self.block_builder.emit( + relax.op.subtract(index, relax.const(1, "int32")) + ) + gathered_source = self.block_builder.emit( + relax.op.take(source, index, axis=0) + ) + else: + f_mask = self.block_builder.emit(relax.op.reshape(mask, [-1])) + index = self.block_builder.emit(relax.op.cumsum(f_mask, 0, dtype="int32")) + index = self.block_builder.emit( + relax.op.subtract(index, relax.const(1, "int32")) + ) + source_shape = [-1] + [ + s for idx, s in enumerate(source.struct_info.shape) if idx >= ndim + ] + f_source = self.block_builder.emit(relax.op.reshape(source, source_shape)) + gathered_source = self.block_builder.emit( + relax.op.take(f_source, index, axis=0) + ) + gathered_source = self.block_builder.emit( + relax.op.reshape(gathered_source, x.struct_info.shape) + ) + if ndim != len(x.struct_info.shape): + mask = self.block_builder.emit( + relax.op.broadcast_to(mask, x.struct_info.shape) + ) + return self.block_builder.emit(relax.op.where(mask, gathered_source, x)) + + def _ones(self, node: fx.Node) -> relax.Var: + import torch + + args = self.retrieve_args(node) + size = relax.ShapeExpr( + args[0] if isinstance(args[0], (list, tuple)) else (args[0],) + ) + dtype = self._convert_data_type( + node.kwargs.get("dtype", torch.get_default_dtype()), self.env + ) + return self.block_builder.emit( + relax.op.full( + size, + relax.const(1, dtype), + dtype, + ) + ) + + def _tensor(self, node: fx.Node) -> relax.Var: + dtype = node.kwargs.get("dtype", None) + if isinstance(node.args[0], float): + return relax.const(node.args[0], dtype if dtype is not None else "float32") + elif isinstance(node.args[0], int): + return relax.const(node.args[0], dtype if dtype is not None else "int64") + raise ValueError("torch.tensor with value not a float or int is not accepted") + + ########## DataType ########## + + def _float(self, node: fx.Node) -> relax.Var: + return self.block_builder.emit( + relax.op.astype(self.env[node.args[0]], "float32") + ) + + def _half(self, node: fx.Node) -> relax.Var: + return self.block_builder.emit( + relax.op.astype(self.env[node.args[0]], "float16") + ) + + def _to(self, node: fx.Node) -> relax.Var: + import torch + + x = self.env[node.args[0]] + if len(node.args) == 2: + if isinstance(node.args[1], torch.dtype): + dtype = TorchFXImporter._convert_data_type(node.args[1], self.env) + return self.block_builder.emit(relax.op.astype(x, dtype)) + elif "dtype" in node.kwargs: + dtype = TorchFXImporter._convert_data_type(node.kwargs["dtype"], self.env) + return self.block_builder.emit(relax.op.astype(x, dtype)) + return x + + def _type(self, node: fx.Node) -> relax.Var: + x = self.env[node.args[0]] + dtype = TorchFXImporter._convert_data_type(node.args[1], self.env) + return self.block_builder.emit(relax.op.astype(x, dtype)) + + ########## Others ########## + + def _getattr(self, node: fx.Node) -> relax.Var: + if isinstance(self.env[node.args[0]], relax.Expr): + if node.args[1] == "dtype": + return self.env[node.args[0]].struct_info.dtype + elif node.args[1] == "shape": + return self.shape_of(self.env[node.args[0]]) + return getattr(self.env[node.args[0]], node.args[1]) + + def _sym_size_int(self, node: fx.Node) -> relax.Expr: + x = self.env[node.args[0]] + shape = self.shape_of(x) + idx = node.args[1] + return self.block_builder.emit(relax.const(shape[idx].value, "int32")) + + def create_input_vars( + self, input_info: List[Tuple[Tuple[int], str]] + ) -> List[relax.Var]: + inputs = list() + for idx, (shape, dtype) in enumerate(input_info): + inputs.append( + relax.Var( + f"inp_{idx}", + relax.TensorStructInfo(shape, self._convert_data_type(dtype)), + ) + ) + return inputs + + def create_convert_map( + self, + ) -> Dict[Union[torch.nn.Module, str], Callable[[fx.Node], relax.Var]]: + import operator + + from torch import nn + + return { + ## call_module + # unary + nn.Dropout: lambda node: self.env[node.args[0]], + nn.GELU: self._gelu, + nn.Hardsigmoid: self._hardsigmoid, + nn.Hardswish: self._hardswish, + nn.Identity: lambda node: self.env[node.args[0]], + nn.LeakyReLU: self._leakyrelu_module, + nn.LogSoftmax: self._log_softmax_module, + nn.ReLU: self._unary_op(relax.op.nn.relu), + nn.ReLU6: lambda node: self.block_builder.emit( + relax.op.clip(self.env[node.args[0]], 0, 6) + ), + nn.Sigmoid: self._unary_op(relax.op.sigmoid), + nn.SiLU: self._unary_op(relax.op.nn.silu), + nn.Softmax: self._softmax_module, + nn.Tanh: self._unary_op(relax.op.tanh), + # neural network + nn.AdaptiveAvgPool2d: self._adaptive_avg_pool2d_module, + nn.AvgPool2d: self._avg_pool2d_module, + nn.BatchNorm2d: self._batch_norm_2d_module, + nn.Conv1d: self._conv1d_module, + nn.Conv2d: self._conv2d_module, + nn.Conv3d: self._conv3d_module, + nn.ConvTranspose1d: self._conv_transpose1d_module, + nn.ConvTranspose2d: self._conv_transpose2d_module, + nn.CrossEntropyLoss: self._cross_entropy_module, + nn.GroupNorm: self._group_norm_module, + nn.LayerNorm: self._layer_norm_module, + nn.Linear: self._linear_module, + nn.MaxPool2d: self._max_pool2d_module, + nn.modules.sparse.Embedding: self._embedding_module, + # tensor manipulation + nn.Flatten: self._flatten_module, + ## call_function and call_method + # unary + "acos": self._unary_op(relax.op.acos), + "acosh": self._unary_op(relax.op.acosh), + "asin": self._unary_op(relax.op.asin), + "asinh": self._unary_op(relax.op.asinh), + "atan": self._unary_op(relax.op.atan), + "atanh": self._unary_op(relax.op.atanh), + "clamp": self._clamp, + "cos": self._unary_op(relax.op.cos), + "cosh": self._unary_op(relax.op.cosh), + "dropout": lambda node: self.env[node.args[0]], + "exp": self._unary_op(relax.op.exp), + "gelu": self._gelu, + "hardsigmoid": self._hardsigmoid, + "hardswish": self._hardswish, + "leaky_relu": self._leakyrelu, + "log_softmax": self._log_softmax, + "neg": self._unary_op(relax.op.negative), + "relu": self._unary_op(relax.op.nn.relu), + "round": self._round, + "rsqrt": self._unary_op(relax.op.rsqrt), + "sigmoid": self._unary_op(relax.op.sigmoid), + "silu": self._unary_op(relax.op.nn.silu), + "sin": self._unary_op(relax.op.sin), + "sinh": self._unary_op(relax.op.sinh), + "softmax": self._softmax, + "sqrt": self._unary_op(relax.op.sqrt), + "tan": self._unary_op(relax.op.tan), + "tanh": self._unary_op(relax.op.tanh), + "tril_": self._inplace_tril_triu(relax.op.tril), + "tril": self._tril_triu(relax.op.tril), + "triu_": self._inplace_tril_triu(relax.op.triu), + "triu": self._tril_triu(relax.op.triu), + # binary + "add": self._binary_op(relax.op.add, operator.add), + "eq": self._binary_op(relax.op.equal, operator.eq), + "floordiv": self._binary_op(relax.op.floor_divide, operator.floordiv), + "iadd": self._binary_op(relax.op.add, operator.add), + "lt": self._binary_op(relax.op.less, operator.lt), + "matmul": self._binary_op( + partial(relax.op.linear_algebra.matmul, out_dtype="float32"), + operator.matmul, + ), + "max": self._binary_op(relax.op.maximum, max), + "mul": self._binary_op(relax.op.multiply, operator.mul), + "pow": self._binary_op(relax.op.power, operator.pow), + "sub": self._binary_op(relax.op.subtract, operator.sub), + "truediv": self._binary_op(relax.op.divide, operator.truediv), + # neural network + "adaptive_avg_pool2d": self._adaptive_avg_pool2d, + "addmm": self._addmm, + "avg_pool2d": self._avg_pool2d, + "baddbmm": self._baddbmm, + "bmm": self._binary_op( + partial(relax.op.linear_algebra.matmul, out_dtype="float32"), + operator.matmul, + ), + "conv_transpose1d": self._conv_transpose1d, + "conv_transpose2d": self._conv_transpose2d, + "conv1d": self._conv1d, + "conv2d": self._conv2d, + "conv3d": self._conv3d, + "cross_entropy": self._cross_entropy, + "einsum": self._einsum, + "interpolate": self._interpolate, + "layer_norm": self._layer_norm, + "linear": self._linear, + "max_pool2d": self._max_pool2d, + "scaled_dot_product_attention": self._scaled_dot_product_attention, + "stochastic_depth": lambda node: self.env[node.args[0]], + "unbind": self._unbind, + # statistical + "mean": self._mean, + "sum": self._sum, + # search + "argmax": self._argmax_argmin(relax.op.argmax), + "argmin": self._argmax_argmin(relax.op.argmin), + # tensor manipulation + "cat": self._cat, + "chunk": self._chunk, + "concat": self._cat, + "contiguous": lambda node: self.env[node.args[0]], + "cumsum": self._cumsum, + "expand": self._expand, + "flatten": self._flatten, + "permute": self._permute, + "repeat": self._repeat, + "reshape": self._reshape, + "scatter": self._scatter, + "size": self._size, + "split": self._split, + "squeeze": self._squeeze, + "stack": self._stack, + "tile": self._tile, + "transpose": self._transpose, + "unsqueeze": lambda node: self.block_builder.emit( + relax.op.expand_dims(self.env[node.args[0]], node.args[1]) + ), + "view": self._reshape, + # tensor creation + "arange": self._arange, + "empty": self._empty, + "fill_": self._inplace_fill, + "full": self._full, + "index_select": self._index_select, + "masked_fill_": self._inplace_masked_fill, + "masked_fill": self._masked_fill, + "masked_scatter": self._masked_scatter, + "new_ones": self._new_ones, + "ones": self._ones, + "tensor": self._tensor, + # datatype + "astype": self._type, + "float": self._float, + "half": self._half, + "to": self._to, + "type": self._type, + # other + "getattr": self._getattr, + "getitem": self._getitem, + "sym_size.int": self._sym_size_int, + } + + def update_convert_map(self, custom_convert_map: dict): + """Update self.convert_map with custom convert map + + Parameters + ---------- + custom_convert_map : Dictionary of str to Relax op + A custom op conversion map in the same format as self.convert_map + """ + + self.convert_map.update(custom_convert_map) + + def from_fx( + self, + model, + input_info: List[Tuple[Tuple[int], str]], + keep_params_as_input: bool, + unwrap_unit_return_tuple: bool, + no_bind_return_tuple: bool, + custom_convert_map: dict = None, + ) -> tvm.IRModule: + """Convert a PyTorch FX GraphModule to a Relax program.""" + from torch import fx + + if custom_convert_map: + custom_ops = set(custom_convert_map.keys()) + self.update_convert_map(custom_convert_map) + else: + custom_ops = set() + self.named_modules = dict(model.named_modules()) + + graph: fx.Graph = model.graph + + # Create input variables. + inputs = self.create_input_vars(input_info) + + # Initialize the block builder with a function and a dataflow block. + func_name = "main" + self.block_builder = relax.BlockBuilder() + params = [] + if keep_params_as_input: + func_attrs = {"num_input": len(inputs)} + for name, param in sorted(model.named_parameters(), key=lambda x: x[0]): + shape = param.data.shape + dtype = self._convert_data_type(str(param.data.dtype)) + inputs.append(relax.Var(name, relax.TensorStructInfo(shape, dtype))) + self.params[param] = inputs[-1] + params.append(tvm.nd.array(param.data.cpu().numpy())) + else: + func_attrs = None + + with self.block_builder.function( + name=func_name, params=inputs.copy(), attrs=func_attrs + ): + output = None + with self.block_builder.dataflow(): + # Translate model parameters. + for _, param in model.named_parameters(): + shape = param.data.shape + dtype = self._convert_data_type(str(param.data.dtype)) + if dtype in ("float32", "float16"): + if not keep_params_as_input: + self.params[param] = self._convert_torch_tensor_to_relax( + param + ) + else: + raise ValueError( + "Unsupported data type for model parameters: %s" % dtype + ) + # Translate the model. + for node in graph.nodes: + if node.op == "placeholder": + assert ( + len(inputs) > 0 + ), "Provided inputs is less than actual inputs" + if ( + "grapharg" in node.meta + and node.meta["grapharg"].fake_tensor is None + ): + # Ignore sym input + continue + + self.env[node] = inputs.pop(0) + elif node.op == "output": + args = self.retrieve_args(node) + assert len(args) == 1 + + # return tuple + if isinstance(args[0], (tuple, list, relax.Tuple)): + # unit tuple + if unwrap_unit_return_tuple and len(args[0]) == 1: + output = self.block_builder.emit_output(args[0][0]) + elif no_bind_return_tuple: + output = [] + for ret in args[0]: + output.append(self.block_builder.emit_output(ret)) + + if output is None: + output = self.block_builder.emit_output(args[0]) + break + elif node.op == "get_attr": + self.env[node] = self._fetch_attr(model, node.target) + elif node.op == "call_module": + module = self.named_modules[node.target] + assert ( + type(module) in self.convert_map + ), f"Unsupported module type {type(module)}" + self.env[node] = self.convert_map[type(module)](node) + elif node.op == "call_function": + func_name = node.target.__name__ + assert ( + func_name in self.convert_map + ), f"Unsupported function type {func_name}" + if func_name in custom_ops: + self.env[node] = self.convert_map[func_name](node, self) + else: + self.env[node] = self.convert_map[func_name](node) + elif node.op == "call_method": + assert ( + node.target in self.convert_map + ), f"Unsupported function target {node.target}" + self.env[node] = self.convert_map[node.target](node) + else: + raise ValueError(f"Unsupported op {node.op}") + assert output is not None + self.block_builder.emit_func_output(output) + + mod = self.block_builder.get() + if keep_params_as_input: + mod["main"] = mod["main"].with_attr("params", params) + return mod + + +def from_fx( + model, + input_info: List[Tuple[Tuple[int], str]], + *, + keep_params_as_input: bool = False, + unwrap_unit_return_tuple: bool = False, + no_bind_return_tuple: bool = False, + custom_convert_map: dict = None, +) -> tvm.IRModule: + return TorchFXImporter().from_fx( + model, + input_info, + keep_params_as_input, + unwrap_unit_return_tuple, + no_bind_return_tuple, + custom_convert_map=custom_convert_map, + ) diff --git a/models/_align_scripted.py b/models/_align_scripted.py new file mode 100644 index 000000000000..a36acac6b646 --- /dev/null +++ b/models/_align_scripted.py @@ -0,0 +1,1689 @@ +# coding=utf-8 +# Copyright 2023 The Google Research Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch ALIGN model.""" + +import math +from dataclasses import dataclass +from typing import Any, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutputWithNoAttention, + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + BaseModelOutputWithPoolingAndNoAttention, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from transformers.utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from transformers.models.align.configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig +from utils import script_with_log + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "kakaobrain/align-base" +_CONFIG_FOR_DOC = "AlignConfig" + + +ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "kakaobrain/align-base", + # See all ALIGN models at https://huggingface.co/models?filter=align +] + + +ALIGN_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`AlignConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +ALIGN_TEXT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +ALIGN_VISION_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +ALIGN_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details. + return_loss (`bool`, *optional*): + Whether or not to return the contrastive loss. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@dataclass +class AlignVisionModelOutput(ModelOutput): + """ + Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. + + Args: + image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): + The image embeddings obtained by applying the projection layer to the pooler_output. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + """ + + image_embeds: Optional[torch.FloatTensor] = None + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class AlignTextModelOutput(ModelOutput): + """ + Base class for text model's outputs that also contains a pooling of the last hidden states. + + Args: + text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): + The text embeddings obtained by applying the projection layer to the pooler_output. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + text_embeds: Optional[torch.FloatTensor] = None + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class AlignOutput(ModelOutput): + """ + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): + Contrastive loss for image-text similarity. + logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): + The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text + similarity scores. + logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): + The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image + similarity scores. + text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): + The text embeddings obtained by applying the projection layer to the pooled output of [`AlignTextModel`]. + image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): + The output of [`AlignVisionModel`]. + text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`): + The output of the [`AlignTextModel`]. + vision_model_output(`BaseModelOutputWithPoolingAndNoAttention`): + The output of the [`AlignVisionModel`]. + """ + + loss: Optional[torch.FloatTensor] = None + logits_per_image: torch.FloatTensor = None + logits_per_text: torch.FloatTensor = None + text_embeds: torch.FloatTensor = None + image_embeds: torch.FloatTensor = None + text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None + vision_model_output: BaseModelOutputWithPoolingAndNoAttention = None + + def to_tuple(self) -> Tuple[Any]: + return tuple( + self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() + for k in self.keys() + ) + + +# contrastive loss function, adapted from +# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html +def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: + return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device), label_smoothing=0.1) + + +def align_loss(similarity: torch.Tensor) -> torch.Tensor: + caption_loss = contrastive_loss(similarity) + image_loss = contrastive_loss(similarity.t()) + return (caption_loss + image_loss) / 2.0 + + +# Copied from transformers.models.efficientnet.modeling_efficientnet.round_filters with EfficientNet -> AlignVision +def round_filters(config: AlignVisionConfig, num_channels: int): + r""" + Round number of filters based on depth multiplier. + """ + divisor = config.depth_divisor + num_channels *= config.width_coefficient + new_dim = max(divisor, int(num_channels + divisor / 2) // divisor * divisor) + + # Make sure that round down does not go down by more than 10%. + if new_dim < 0.9 * num_channels: + new_dim += divisor + + return int(new_dim) + + +# Copied from transformers.models.efficientnet.modeling_efficientnet.correct_pad +def correct_pad(kernel_size: Union[int, Tuple], adjust: bool = True): + r""" + Utility function to get the tuple padding value for the depthwise convolution. + + Args: + kernel_size (`int` or `tuple`): + Kernel size of the convolution layers. + adjust (`bool`, *optional*, defaults to `True`): + Adjusts padding value to apply to right and bottom sides of the input. + """ + if isinstance(kernel_size, int): + kernel_size = (kernel_size, kernel_size) + + correct = (kernel_size[0] // 2, kernel_size[1] // 2) + if adjust: + return (correct[1] - 1, correct[1], correct[0] - 1, correct[0]) + else: + return (correct[1], correct[1], correct[0], correct[0]) + + +# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetEmbeddings with EfficientNet->AlignVision +class AlignVisionEmbeddings(nn.Module): + r""" + A module that corresponds to the stem module of the original work. + """ + + def __init__(self, config: AlignVisionConfig): + super().__init__() + + self.out_dim = round_filters(config, 32) + self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1)) + self.convolution = nn.Conv2d( + config.num_channels, self.out_dim, kernel_size=3, stride=2, padding="valid", bias=False + ) + self.batchnorm = nn.BatchNorm2d(self.out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum) + self.activation = ACT2FN[config.hidden_act] + + def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: + features = self.padding(pixel_values) + features = self.convolution(features) + features = self.batchnorm(features) + features = self.activation(features) + + return features + + +# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseConv2d with EfficientNet->AlignVision +class AlignVisionDepthwiseConv2d(nn.Conv2d): + def __init__( + self, + in_channels, + depth_multiplier=1, + kernel_size=3, + stride=1, + padding=0, + dilation=1, + bias=True, + padding_mode="zeros", + ): + out_channels = in_channels * depth_multiplier + super().__init__( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=in_channels, + bias=bias, + padding_mode=padding_mode, + ) + + +# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetExpansionLayer with EfficientNet->AlignVision +class AlignVisionExpansionLayer(nn.Module): + r""" + This corresponds to the expansion phase of each block in the original implementation. + """ + + def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int): + super().__init__() + self.expand_conv = nn.Conv2d( + in_channels=in_dim, + out_channels=out_dim, + kernel_size=1, + padding="same", + bias=False, + ) + self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps) + self.expand_act = ACT2FN[config.hidden_act] + + def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: + # Expand phase + hidden_states = self.expand_conv(hidden_states) + hidden_states = self.expand_bn(hidden_states) + hidden_states = self.expand_act(hidden_states) + + return hidden_states + + +# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseLayer with with EfficientNet->AlignVision +class AlignVisionDepthwiseLayer(nn.Module): + r""" + This corresponds to the depthwise convolution phase of each block in the original implementation. + """ + + def __init__( + self, + config: AlignVisionConfig, + in_dim: int, + stride: int, + kernel_size: int, + adjust_padding: bool, + ): + super().__init__() + self.stride = stride + conv_pad = "valid" if self.stride == 2 else "same" + padding = correct_pad(kernel_size, adjust=adjust_padding) + + self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding) + self.depthwise_conv = AlignVisionDepthwiseConv2d( + in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False + ) + self.depthwise_norm = nn.BatchNorm2d( + num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum + ) + self.depthwise_act = ACT2FN[config.hidden_act] + + def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: + # Depthwise convolution + if self.stride == 2: + hidden_states = self.depthwise_conv_pad(hidden_states) + + hidden_states = self.depthwise_conv(hidden_states) + hidden_states = self.depthwise_norm(hidden_states) + hidden_states = self.depthwise_act(hidden_states) + + return hidden_states + + +# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetSqueezeExciteLayer with with EfficientNet->AlignVision +class AlignVisionSqueezeExciteLayer(nn.Module): + r""" + This corresponds to the Squeeze and Excitement phase of each block in the original implementation. + """ + + def __init__(self, config: AlignVisionConfig, in_dim: int, expand_dim: int, expand: bool = False): + super().__init__() + self.dim = expand_dim if expand else in_dim + self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio)) + + self.squeeze = nn.AdaptiveAvgPool2d(output_size=1) + self.reduce = nn.Conv2d( + in_channels=self.dim, + out_channels=self.dim_se, + kernel_size=1, + padding="same", + ) + self.expand = nn.Conv2d( + in_channels=self.dim_se, + out_channels=self.dim, + kernel_size=1, + padding="same", + ) + self.act_reduce = ACT2FN[config.hidden_act] + self.act_expand = nn.Sigmoid() + + def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: + inputs = hidden_states + hidden_states = self.squeeze(hidden_states) + hidden_states = self.reduce(hidden_states) + hidden_states = self.act_reduce(hidden_states) + + hidden_states = self.expand(hidden_states) + hidden_states = self.act_expand(hidden_states) + hidden_states = torch.mul(inputs, hidden_states) + + return hidden_states + + +class AlignVisionFinalBlockLayer(nn.Module): + r""" + This corresponds to the final phase of each block in the original implementation. + """ + + def __init__( + self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool + ): + super().__init__() + self.apply_dropout = stride == 1 and not id_skip + self.project_conv = nn.Conv2d( + in_channels=in_dim, + out_channels=out_dim, + kernel_size=1, + padding="same", + bias=False, + ) + self.project_bn = nn.BatchNorm2d( + num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum + ) + self.dropout = nn.Dropout(p=drop_rate) + + def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor: + hidden_states = self.project_conv(hidden_states) + hidden_states = self.project_bn(hidden_states) + + if self.apply_dropout: + hidden_states = self.dropout(hidden_states) + hidden_states = hidden_states + embeddings + + return hidden_states + + +class AlignVisionBlock(nn.Module): + r""" + This corresponds to the block module of original the EfficientNet vision encoder implementation. + + Args: + config ([`AlignVisionConfig`]): + Model configuration class. + in_dim (`int`): + Number of input channels. + out_dim (`int`): + Number of output channels. + stride (`int`): + Stride size to be used in convolution layers. + expand_ratio (`int`): + Expand ratio to set the output dimensions for the expansion and squeeze-excite layers. + kernel_size (`int`): + Kernel size for the depthwise convolution layer. + drop_rate (`float`): + Dropout rate to be used in the final phase of each block. + id_skip (`bool`): + Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase + of each block. Set to `True` for the first block of each stage. + adjust_padding (`bool`): + Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution + operation, set to `True` for inputs with odd input sizes. + """ + + def __init__( + self, + config: AlignVisionConfig, + in_dim: int, + out_dim: int, + stride: int, + expand_ratio: int, + kernel_size: int, + drop_rate: float, + id_skip: bool, + adjust_padding: bool, + ): + super().__init__() + self.expand_ratio = expand_ratio + self.expand = True if self.expand_ratio != 1 else False + expand_in_dim = in_dim * expand_ratio + + if self.expand: + self.expansion = AlignVisionExpansionLayer( + config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride + ) + + self.depthwise_conv = script_with_log(AlignVisionDepthwiseLayer( + config=config, + in_dim=expand_in_dim if self.expand else in_dim, + stride=stride, + kernel_size=kernel_size, + adjust_padding=adjust_padding, + )) + self.squeeze_excite = script_with_log(AlignVisionSqueezeExciteLayer( + config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand + )) + self.projection = script_with_log(AlignVisionFinalBlockLayer( + config=config, + in_dim=expand_in_dim if self.expand else in_dim, + out_dim=out_dim, + stride=stride, + drop_rate=drop_rate, + id_skip=id_skip, + )) + + def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: + embeddings = hidden_states + # Expansion and depthwise convolution phase + if self.expand_ratio != 1: + hidden_states = self.expansion(hidden_states) + hidden_states = self.depthwise_conv(hidden_states) + + # Squeeze and excite phase + hidden_states = self.squeeze_excite(hidden_states) + hidden_states = self.projection(embeddings, hidden_states) + return hidden_states + + +class AlignVisionEncoder(nn.Module): + r""" + Forward propogates the embeddings through each vision encoder (EfficientNet) block. + + Args: + config ([`AlignVisionConfig`]): + Model configuration class. + """ + + def __init__(self, config: AlignVisionConfig): + super().__init__() + self.depth_coefficient = config.depth_coefficient + + def round_repeats(repeats): + # Round number of block repeats based on depth multiplier. + return int(math.ceil(self.depth_coefficient * repeats)) + + num_base_blocks = len(config.in_channels) + num_blocks = sum(round_repeats(n) for n in config.num_block_repeats) + + curr_block_num = 0 + blocks = [] + for i in range(num_base_blocks): + in_dim = round_filters(config, config.in_channels[i]) + out_dim = round_filters(config, config.out_channels[i]) + stride = config.strides[i] + kernel_size = config.kernel_sizes[i] + expand_ratio = config.expand_ratios[i] + + for j in range(round_repeats(config.num_block_repeats[i])): + id_skip = True if j == 0 else False + stride = 1 if j > 0 else stride + in_dim = out_dim if j > 0 else in_dim + adjust_padding = False if curr_block_num in config.depthwise_padding else True + drop_rate = config.drop_connect_rate * curr_block_num / num_blocks + + block = AlignVisionBlock( + config=config, + in_dim=in_dim, + out_dim=out_dim, + stride=stride, + kernel_size=kernel_size, + expand_ratio=expand_ratio, + drop_rate=drop_rate, + id_skip=id_skip, + adjust_padding=adjust_padding, + ) + blocks.append(block) + curr_block_num += 1 + + self.blocks = nn.ModuleList(blocks) + + def forward( + self, + hidden_states: torch.FloatTensor, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> BaseModelOutputWithPoolingAndNoAttention: + all_hidden_states = (hidden_states,) if output_hidden_states else None + + for block in self.blocks: + hidden_states = block(hidden_states) + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) + + return BaseModelOutputWithNoAttention( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->AlignText +class AlignTextEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + past_key_values_length: int = 0, + ) -> torch.Tensor: + if input_ids is not None: + input_shape = input_ids.size() + else: + assert inputs_embeds is not None + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + assert input_ids is not None + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->AlignText +class AlignTextSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + assert encoder_hidden_states is not None + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + use_cache = past_key_value is not None + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + query_length, key_length = query_layer.shape[2], key_layer.shape[2] + if use_cache: + position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( + -1, 1 + ) + else: + position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in AlignTextModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->AlignText +class AlignTextSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->AlignText +class AlignTextAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = AlignTextSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = script_with_log(AlignTextSelfOutput(config)) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->AlignText +class AlignTextIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->AlignText +class AlignTextOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->AlignText +class AlignTextLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = AlignTextAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = AlignTextAttention(config, position_embedding_type="absolute") + self.intermediate = script_with_log(AlignTextIntermediate(config)) + self.output = script_with_log(AlignTextOutput(config)) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->AlignText +class AlignTextEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([AlignTextLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert -> AlignText +class AlignTextPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class AlignPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = AlignConfig + base_model_prefix = "align" + supports_gradient_checkpointing = True + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Conv2d)): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, AlignModel): + nn.init.xavier_uniform_(module.text_projection.weight) + module.text_projection.bias.data.zero_() + module.text_projection._is_hf_initialized = True + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + if isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (AlignTextModel, AlignVisionModel)): + module.gradient_checkpointing = value + + +@add_start_docstrings( + """The text model from ALIGN without any head or projection on top.""", + ALIGN_START_DOCSTRING, +) +class AlignTextModel(AlignPreTrainedModel): + config_class = AlignTextConfig + + def __init__(self, config: AlignTextConfig, add_pooling_layer: bool = True): + super().__init__(config) + self.config = config + + self.embeddings = script_with_log(AlignTextEmbeddings(config)) + self.encoder = AlignTextEncoder(config) + + self.pooler = script_with_log(AlignTextPooler(config)) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + @add_start_docstrings_to_model_forward(ALIGN_TEXT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=AlignTextConfig) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AlignTextModel + + >>> model = AlignTextModel.from_pretrained("kakaobrain/align-base") + >>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled (EOS token) states + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length)), device=device) + + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + """The vision model from ALIGN without any head or projection on top.""", + ALIGN_START_DOCSTRING, +) +class AlignVisionModel(AlignPreTrainedModel): + config_class = AlignVisionConfig + main_input_name = "pixel_values" + + def __init__(self, config: AlignVisionConfig): + super().__init__(config) + self.config = config + self.embeddings = script_with_log(AlignVisionEmbeddings(config)) + self.encoder = AlignVisionEncoder(config) + + # Final pooling layer + if config.pooling_type == "mean": + self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True) + elif config.pooling_type == "max": + self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True) + else: + raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}") + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.vision_model.embeddings.convolution + + @add_start_docstrings_to_model_forward(ALIGN_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=AlignVisionConfig) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, AlignVisionModel + + >>> model = AlignVisionModel.from_pretrained("kakaobrain/align-base") + >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled CLS states + ```""" + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + embedding_output = self.embeddings(pixel_values) + encoder_outputs = self.encoder( + embedding_output, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # Apply pooling + last_hidden_state = encoder_outputs[0] + pooled_output = self.pooler(last_hidden_state) + # Reshape (batch_size, projection_dim, 1 , 1) -> (batch_size, projection_dim) + pooled_output = pooled_output.reshape(pooled_output.shape[:2]) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndNoAttention( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + ) + + +@add_start_docstrings(ALIGN_START_DOCSTRING) +class AlignModel(AlignPreTrainedModel): + config_class = AlignConfig + + def __init__(self, config: AlignConfig): + super().__init__(config) + + if not isinstance(config.text_config, AlignTextConfig): + raise ValueError( + "config.text_config is expected to be of type AlignTextConfig but is of type" + f" {type(config.text_config)}." + ) + + if not isinstance(config.vision_config, AlignVisionConfig): + raise ValueError( + "config.vision_config is expected to be of type AlignVisionConfig but is of type" + f" {type(config.vision_config)}." + ) + + text_config = config.text_config + vision_config = config.vision_config + + self.projection_dim = config.projection_dim + self.text_embed_dim = text_config.hidden_size + + self.text_model = AlignTextModel(text_config) + self.vision_model = AlignVisionModel(vision_config) + + self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim) + self.temperature = nn.Parameter(torch.ones([]) * self.config.temperature_init_value) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ALIGN_TEXT_INPUTS_DOCSTRING) + def get_text_features( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by + applying the projection layer to the pooled output of [`AlignTextModel`]. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AlignModel + + >>> model = AlignModel.from_pretrained("kakaobrain/align-base") + >>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") + >>> text_features = model.get_text_features(**inputs) + ```""" + # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = text_outputs[0][:, 0, :] + text_features = self.text_projection(last_hidden_state) + + return text_features + + @add_start_docstrings_to_model_forward(ALIGN_VISION_INPUTS_DOCSTRING) + def get_image_features( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by + applying the projection layer to the pooled output of [`AlignVisionModel`]. + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, AlignModel + + >>> model = AlignModel.from_pretrained("kakaobrain/align-base") + >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> image_features = model.get_image_features(**inputs) + ```""" + # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components. + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + image_features = vision_outputs[1] # pooled_output + + return image_features + + @add_start_docstrings_to_model_forward(ALIGN_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=AlignOutput, config_class=AlignConfig) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + return_loss: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, AlignOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, AlignModel + + >>> model = AlignModel.from_pretrained("kakaobrain/align-base") + >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor( + ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True + ... ) + + >>> outputs = model(**inputs) + >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score + >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities + ```""" + # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + image_embeds = vision_outputs[1] + text_embeds = text_outputs[0][:, 0, :] + text_embeds = self.text_projection(text_embeds) + + # normalized features + image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) + text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) + + # cosine similarity as logits + logits_per_text = torch.matmul(text_embeds, image_embeds.t()) / self.temperature + logits_per_image = logits_per_text.t() + + loss = None + if return_loss: + loss = align_loss(logits_per_text) + + if not return_dict: + output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) + return ((loss,) + output) if loss is not None else output + + return AlignOutput( + loss=loss, + logits_per_image=logits_per_image, + logits_per_text=logits_per_text, + text_embeds=text_embeds, + image_embeds=image_embeds, + text_model_output=text_outputs, + vision_model_output=vision_outputs, + ) + + +model_name = "kakaobrain/align-base" +device = "cuda:0" +# device = "cpu" + +def _get_scripted_model(): + config = AlignConfig.from_pretrained(model_name) + model = AlignModel.from_pretrained("kakaobrain/align-base", + return_dict=False).cuda() + return model + +def get_input(batch_size): + seq_len = 64 + vocab_size = 30522 + inputs = { + 'input_ids': + torch.randint(low=0, + high=vocab_size, + size=(2, seq_len), + dtype=torch.int64), + 'token_type_ids': + torch.zeros((2, seq_len), dtype=torch.int64), + 'attention_mask': + torch.ones((2, seq_len), dtype=torch.int64), + 'pixel_values': + torch.randn([batch_size, 3, 289, 289]) + } + inputs = ( + inputs['input_ids'], + inputs['pixel_values'], + inputs['attention_mask'], + inputs['token_type_ids'], + ) + inputs = tuple((i.to(device) for i in inputs)) + return inputs, {} + + +if __name__ == "__main__": + with torch.no_grad(): + model = _get_scripted_model().eval() + input_args, input_kwargs = get_input(batch_size=1) + outputs = model(*input_args, **input_kwargs) + print(outputs) \ No newline at end of file diff --git a/models/_bert_scripted.py b/models/_bert_scripted.py new file mode 100644 index 000000000000..934cfa1140c5 --- /dev/null +++ b/models/_bert_scripted.py @@ -0,0 +1,385 @@ +import torch +from transformers import BertConfig +import logging +import numpy as np +# import onnx +import time + +import math +import os +import warnings +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from transformers.utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from transformers.models.bert.configuration_bert import BertConfig +from utils import script_with_log + + +class BertSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + assert encoder_hidden_states is not None + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + use_cache = past_key_value is not None + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + query_length, key_length = query_layer.shape[2], key_layer.shape[2] + if use_cache: + position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( + -1, 1 + ) + else: + position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + # new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + # for run dynamo.dynamic + new_context_layer_shape = context_layer.size()[:-2] + (768,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = BertSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = script_with_log(BertSelfOutput(config)) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = BertAttention(config, position_embedding_type="absolute") + self.intermediate = script_with_log(BertIntermediate(config)) + self.output = script_with_log(BertOutput(config)) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + +class BertModel(torch.nn.Module): + def __init__(self, config): + super().__init__() + self.layers = torch.nn.ModuleList( + [BertLayer(config) for _ in range(config.num_hidden_layers)] + ) + + def forward(self, x): + for layer in self.layers: + x = layer(x)[0] + return x[0] + + +def _get_scripted_model(): + model = BertModel(BertConfig(vocab_size=32768, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, return_dict=False)).cuda() + return model + +def get_input(batch_size, seq_len=256): + inputs = torch.randn((batch_size, seq_len, 768), device='cuda') + return (inputs,), {} + + +# if __name__ == '__main__': +# model = get_model() +# input_args, input_kwargs = get_input(8, 80) +# model(*input_args, **input_kwargs) diff --git a/models/_deberta_scripted.py b/models/_deberta_scripted.py new file mode 100644 index 000000000000..8fec2bf3d07d --- /dev/null +++ b/models/_deberta_scripted.py @@ -0,0 +1,1057 @@ +from transformers import AutoTokenizer, AutoConfig, AutoModel +import torch + +# dynamo compile error in torch 2.0.1: torch._dynamo.exc.BackendCompilerFailed: debug_wrapper raised ValueError: Cannot view a tensor with shape torch.Size([1, 512, 12, 64]) and strides (393216, 64, 32768, 1) as a tensor with shape (1, 512, 768)! +# 10.4: dont know how to reproduce the bug + +from collections.abc import Sequence +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutput, + MaskedLMOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import softmax_backward_data +from transformers.utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging +from transformers.models.deberta.configuration_deberta import DebertaConfig +from utils import script_with_log +logger = logging.get_logger(__name__) +_CONFIG_FOR_DOC = "DebertaConfig" +_CHECKPOINT_FOR_DOC = "microsoft/deberta-base" + +# Masked LM docstring +_CHECKPOINT_FOR_MASKED_LM = "lsanochkin/deberta-large-feedback" +_MASKED_LM_EXPECTED_OUTPUT = "' Paris'" +_MASKED_LM_EXPECTED_LOSS = "0.54" + +# QuestionAnswering docstring +_CHECKPOINT_FOR_QA = "Palak/microsoft_deberta-large_squad" +_QA_EXPECTED_OUTPUT = "' a nice puppet'" +_QA_EXPECTED_LOSS = 0.14 +_QA_TARGET_START_INDEX = 12 +_QA_TARGET_END_INDEX = 14 + + +DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "microsoft/deberta-base", + "microsoft/deberta-large", + "microsoft/deberta-xlarge", + "microsoft/deberta-base-mnli", + "microsoft/deberta-large-mnli", + "microsoft/deberta-xlarge-mnli", +] + + +class ContextPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size) + self.dropout = StableDropout(config.pooler_dropout) + self.config = config + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + + context_token = hidden_states[:, 0] + context_token = self.dropout(context_token) + pooled_output = self.dense(context_token) + pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output) + return pooled_output + + @property + def output_dim(self): + return self.config.hidden_size + + +class XSoftmax(torch.autograd.Function): + """ + Masked Softmax which is optimized for saving memory + + Args: + input (`torch.tensor`): The input tensor that will apply softmax. + mask (`torch.IntTensor`): + The mask matrix where 0 indicate that element will be ignored in the softmax calculation. + dim (int): The dimension that will apply softmax + + Example: + + ```python + >>> import torch + >>> from transformers.models.deberta.modeling_deberta import XSoftmax + + >>> # Make a tensor + >>> x = torch.randn([4, 20, 100]) + + >>> # Create a mask + >>> mask = (x > 0).int() + + >>> # Specify the dimension to apply softmax + >>> dim = -1 + + >>> y = XSoftmax.apply(x, mask, dim) + ```""" + + @staticmethod + def forward(self, input, mask, dim): + self.dim = dim + rmask = ~(mask.to(torch.bool)) + + output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min)) + output = torch.softmax(output, self.dim) + output.masked_fill_(rmask, 0) + self.save_for_backward(output) + return output + + @staticmethod + def backward(self, grad_output): + (output,) = self.saved_tensors + inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output) + return inputGrad, None, None + + @staticmethod + def symbolic(g, self, mask, dim): + import torch.onnx.symbolic_helper as sym_help + from torch.onnx.symbolic_opset9 import masked_fill, softmax + + mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"]) + r_mask = g.op( + "Cast", + g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value), + to_i=sym_help.cast_pytorch_to_onnx["Byte"], + ) + output = masked_fill( + g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.type().dtype()).min)) + ) + output = softmax(g, output, dim) + return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool))) + + +def xsoftmax_call(input, mask, dim): + rmask = ~(mask.to(torch.bool)) + + output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min)) + output = torch.softmax(output, dim) + output.masked_fill_(rmask, 0) + return output + +class DropoutContext(object): + def __init__(self): + self.dropout = 0 + self.mask = None + self.scale = 1 + self.reuse_mask = True + + +def get_mask(input, local_context): + if not isinstance(local_context, DropoutContext): + dropout = local_context + mask = None + else: + dropout = local_context.dropout + dropout *= local_context.scale + mask = local_context.mask if local_context.reuse_mask else None + + if dropout > 0 and mask is None: + mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool) + + if isinstance(local_context, DropoutContext): + if local_context.mask is None: + local_context.mask = mask + + return mask, dropout + + +class XDropout(torch.autograd.Function): + """Optimized dropout function to save computation and memory by using mask operation instead of multiplication.""" + + @staticmethod + def forward(ctx, input, local_ctx): + mask, dropout = get_mask(input, local_ctx) + ctx.scale = 1.0 / (1 - dropout) + if dropout > 0: + ctx.save_for_backward(mask) + return input.masked_fill(mask, 0) * ctx.scale + else: + return input + + @staticmethod + def backward(ctx, grad_output): + if ctx.scale > 1: + (mask,) = ctx.saved_tensors + return grad_output.masked_fill(mask, 0) * ctx.scale, None + else: + return grad_output, None + + @staticmethod + def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value: + from torch.onnx import symbolic_opset12 + + dropout_p = local_ctx + if isinstance(local_ctx, DropoutContext): + dropout_p = local_ctx.dropout + # StableDropout only calls this function when training. + train = True + # TODO: We should check if the opset_version being used to export + # is > 12 here, but there's no good way to do that. As-is, if the + # opset_version < 12, export will fail with a CheckerError. + # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like: + # if opset_version < 12: + # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train) + return symbolic_opset12.dropout(g, input, dropout_p, train) + + +class StableDropout(nn.Module): + """ + Optimized dropout module for stabilizing the training + + Args: + drop_prob (float): the dropout probabilities + """ + + def __init__(self, drop_prob): + super().__init__() + self.drop_prob = drop_prob + self.count = 0 + self.context_stack = None + + def forward(self, x): + """ + Call the module + + Args: + x (`torch.tensor`): The input tensor to apply dropout + """ + if self.training and self.drop_prob > 0: + return XDropout.apply(x, self.get_context()) + return x + + def clear_context(self): + self.count = 0 + self.context_stack = None + + def init_context(self, reuse_mask=True, scale=1): + if self.context_stack is None: + self.context_stack = [] + self.count = 0 + for c in self.context_stack: + c.reuse_mask = reuse_mask + c.scale = scale + + def get_context(self): + if self.context_stack is not None: + if self.count >= len(self.context_stack): + self.context_stack.append(DropoutContext()) + ctx = self.context_stack[self.count] + ctx.dropout = self.drop_prob + self.count += 1 + return ctx + else: + return self.drop_prob + + +class DebertaLayerNorm(nn.Module): + """LayerNorm module in the TF style (epsilon inside the square root).""" + + def __init__(self, size, eps=1e-12): + super().__init__() + self.weight = nn.Parameter(torch.ones(size)) + self.bias = nn.Parameter(torch.zeros(size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_type = hidden_states.dtype + hidden_states = hidden_states.float() + mean = hidden_states.mean(-1, keepdim=True) + variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True) + hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon) + hidden_states = hidden_states.to(input_type) + y = self.weight * hidden_states + self.bias + return y + + +class DebertaSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) + self.dropout = StableDropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class DebertaAttention(nn.Module): + def __init__(self, config): + super().__init__() + self.self = DisentangledSelfAttention(config) + self.output = script_with_log(DebertaSelfOutput(config)) + self.config = config + + def forward( + self, + hidden_states, + attention_mask, + output_attentions=False, + query_states=None, + relative_pos=None, + rel_embeddings=None, + ): + self_output = self.self( + hidden_states, + attention_mask, + output_attentions, + query_states=query_states, + relative_pos=relative_pos, + rel_embeddings=rel_embeddings, + ) + if output_attentions: + self_output, att_matrix = self_output + if query_states is None: + query_states = hidden_states + attention_output = self.output(self_output, query_states) + + if output_attentions: + return (attention_output, att_matrix) + else: + return attention_output + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Deberta +class DebertaIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class DebertaOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) + self.dropout = StableDropout(config.hidden_dropout_prob) + self.config = config + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class DebertaLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.attention = DebertaAttention(config) + self.intermediate = script_with_log(DebertaIntermediate(config)) + self.output = script_with_log(DebertaOutput(config)) + + def forward( + self, + hidden_states, + attention_mask, + query_states=None, + relative_pos=None, + rel_embeddings=None, + output_attentions=False, + ): + attention_output = self.attention( + hidden_states, + attention_mask, + output_attentions=output_attentions, + query_states=query_states, + relative_pos=relative_pos, + rel_embeddings=rel_embeddings, + ) + if output_attentions: + attention_output, att_matrix = attention_output + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + if output_attentions: + return (layer_output, att_matrix) + else: + return layer_output + + +class DebertaEncoder(nn.Module): + """Modified BertEncoder with relative position bias support""" + + def __init__(self, config): + super().__init__() + self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)]) + self.relative_attention = getattr(config, "relative_attention", False) + if self.relative_attention: + self.max_relative_positions = getattr(config, "max_relative_positions", -1) + if self.max_relative_positions < 1: + self.max_relative_positions = config.max_position_embeddings + self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size) + self.gradient_checkpointing = False + + def get_rel_embedding(self): + rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None + return rel_embeddings + + def get_attention_mask(self, attention_mask): + if attention_mask.dim() <= 2: + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1) + attention_mask = attention_mask.byte() + elif attention_mask.dim() == 3: + attention_mask = attention_mask.unsqueeze(1) + + return attention_mask + + def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None): + if self.relative_attention and relative_pos is None: + q = query_states.size(-2) if query_states is not None else hidden_states.size(-2) + relative_pos = build_relative_position(q, hidden_states.size(-2), hidden_states.device) + return relative_pos + + def forward( + self, + hidden_states, + attention_mask, + output_hidden_states=True, + output_attentions=False, + query_states=None, + relative_pos=None, + return_dict=True, + ): + attention_mask = self.get_attention_mask(attention_mask) + relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos) + + all_hidden_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + if isinstance(hidden_states, Sequence): + next_kv = hidden_states[0] + else: + next_kv = hidden_states + rel_embeddings = self.get_rel_embedding() + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + next_kv, + attention_mask, + query_states, + relative_pos, + rel_embeddings, + ) + else: + hidden_states = layer_module( + next_kv, + attention_mask, + query_states=query_states, + relative_pos=relative_pos, + rel_embeddings=rel_embeddings, + output_attentions=output_attentions, + ) + + if output_attentions: + hidden_states, att_m = hidden_states + + if query_states is not None: + query_states = hidden_states + if isinstance(hidden_states, Sequence): + next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None + else: + next_kv = hidden_states + + if output_attentions: + all_attentions = all_attentions + (att_m,) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions + ) + + +def build_relative_position(query_size, key_size, device): + """ + Build relative position according to the query and key + + We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key + \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q - + P_k\\) + + Args: + query_size (int): the length of query + key_size (int): the length of key + + Return: + `torch.LongTensor`: A tensor with shape [1, query_size, key_size] + + """ + + q_ids = torch.arange(query_size, dtype=torch.long, device=device) + k_ids = torch.arange(key_size, dtype=torch.long, device=device) + rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1) + rel_pos_ids = rel_pos_ids[:query_size, :] + rel_pos_ids = rel_pos_ids.unsqueeze(0) + return rel_pos_ids + + +def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos): + return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)]) + + +def p2c_dynamic_expand(c2p_pos, query_layer, key_layer): + return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)]) + + +def pos_dynamic_expand(pos_index, p2c_att, key_layer): + return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2))) + + +class DisentangledSelfAttention(nn.Module): + """ + Disentangled self-attention module + + Parameters: + config (`str`): + A model config class instance with the configuration to build a new model. The schema is similar to + *BertConfig*, for more details, please refer [`DebertaConfig`] + + """ + + def __init__(self, config): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.in_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=False) + self.q_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float)) + self.v_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float)) + self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else [] + + self.relative_attention = getattr(config, "relative_attention", False) + self.talking_head = getattr(config, "talking_head", False) + + if self.talking_head: + self.head_logits_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False) + self.head_weights_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False) + + if self.relative_attention: + self.max_relative_positions = getattr(config, "max_relative_positions", -1) + if self.max_relative_positions < 1: + self.max_relative_positions = config.max_position_embeddings + self.pos_dropout = StableDropout(config.hidden_dropout_prob) + + if "c2p" in self.pos_att_type: + self.pos_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=False) + if "p2c" in self.pos_att_type: + self.pos_q_proj = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = StableDropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask, + output_attentions=False, + query_states=None, + relative_pos=None, + rel_embeddings=None, + ): + """ + Call the module + + Args: + hidden_states (`torch.FloatTensor`): + Input states to the module usually the output from previous layer, it will be the Q,K and V in + *Attention(Q,K,V)* + + attention_mask (`torch.ByteTensor`): + An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum + sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j* + th token. + + output_attentions (`bool`, optional): + Whether return the attention matrix. + + query_states (`torch.FloatTensor`, optional): + The *Q* state in *Attention(Q,K,V)*. + + relative_pos (`torch.LongTensor`): + The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with + values ranging in [*-max_relative_positions*, *max_relative_positions*]. + + rel_embeddings (`torch.FloatTensor`): + The embedding of relative distances. It's a tensor of shape [\\(2 \\times + \\text{max_relative_positions}\\), *hidden_size*]. + + + """ + if query_states is None: + qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1) + query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1) + else: + + def linear(w, b, x): + if b is not None: + return torch.matmul(x, w.t()) + b.t() + else: + return torch.matmul(x, w.t()) # + b.t() + + ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0) + qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)] + qkvb = [None] * 3 + + q = linear(qkvw[0], qkvb[0], query_states.to(dtype=qkvw[0].dtype)) + k, v = [linear(qkvw[i], qkvb[i], hidden_states.to(dtype=qkvw[i].dtype)) for i in range(1, 3)] + query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]] + + query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :]) + value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :]) + + rel_att = None + # Take the dot product between "query" and "key" to get the raw attention scores. + scale_factor = 1 + len(self.pos_att_type) + scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor) + query_layer = query_layer / scale.to(dtype=query_layer.dtype) + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + if self.relative_attention: + rel_embeddings = self.pos_dropout(rel_embeddings) + rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor) + + if rel_att is not None: + attention_scores = attention_scores + rel_att + + # bxhxlxd + if self.talking_head: + attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + + attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1) + # attention_probs = xsoftmax_call(attention_scores, attention_mask, -1) + attention_probs = self.dropout(attention_probs) + if self.talking_head: + attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (-1,) + context_layer = context_layer.view(new_context_layer_shape) + if output_attentions: + return (context_layer, attention_probs) + else: + return context_layer + + def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor): + if relative_pos is None: + q = query_layer.size(-2) + relative_pos = build_relative_position(q, key_layer.size(-2), query_layer.device) + if relative_pos.dim() == 2: + relative_pos = relative_pos.unsqueeze(0).unsqueeze(0) + elif relative_pos.dim() == 3: + relative_pos = relative_pos.unsqueeze(1) + # bxhxqxk + elif relative_pos.dim() != 4: + raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}") + + # att_span = min(max(query_layer.size(-2), key_layer.size(-2)), self.max_relative_positions) + # relative_pos = relative_pos.long().to(query_layer.device) + # rel_embeddings = rel_embeddings[ + # self.max_relative_positions - att_span : self.max_relative_positions + att_span, : + # ].unsqueeze(0) + + # enable dynamo dynamic shape mode + att_span = min(max(query_layer.size(-2), key_layer.size(-2)), 512) + # att_span = min(max(query_layer.size(-2), key_layer.size(-2)), self.max_relative_positions) + relative_pos = relative_pos.long().to(query_layer.device) + # rel_embeddings = rel_embeddings[ + # self.max_relative_positions - att_span : self.max_relative_positions + att_span, : + # ].unsqueeze(0) + rel_embeddings = rel_embeddings[ + 512 - att_span : 512 + att_span, : + ].unsqueeze(0) + + score = 0 + + # content->position + if "c2p" in self.pos_att_type: + pos_key_layer = self.pos_proj(rel_embeddings) + pos_key_layer = self.transpose_for_scores(pos_key_layer) + c2p_att = torch.matmul(query_layer, pos_key_layer.transpose(-1, -2)) + c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1) + c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_dynamic_expand(c2p_pos, query_layer, relative_pos)) + score += c2p_att + + # position->content + if "p2c" in self.pos_att_type: + pos_query_layer = self.pos_q_proj(rel_embeddings) + pos_query_layer = self.transpose_for_scores(pos_query_layer) + pos_query_layer /= torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor) + if query_layer.size(-2) != key_layer.size(-2): + r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), query_layer.device) + else: + r_pos = relative_pos + p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1) + p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2).to(dtype=key_layer.dtype)) + p2c_att = torch.gather( + p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer) + ).transpose(-1, -2) + + if query_layer.size(-2) != key_layer.size(-2): + pos_index = relative_pos[:, :, :, 0].unsqueeze(-1) + p2c_att = torch.gather(p2c_att, dim=-2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer)) + score += p2c_att + + return score + + +class DebertaEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + pad_token_id = getattr(config, "pad_token_id", 0) + self.embedding_size = getattr(config, "embedding_size", config.hidden_size) + self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id) + + self.position_biased_input = getattr(config, "position_biased_input", True) + if not self.position_biased_input: + self.position_embeddings = None + else: + self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size) + + if config.type_vocab_size > 0: + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size) + + if self.embedding_size != config.hidden_size: + self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False) + self.LayerNorm = script_with_log(DebertaLayerNorm(config.hidden_size, config.layer_norm_eps)) + self.dropout = StableDropout(config.hidden_dropout_prob) + self.config = config + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + + def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + if self.position_embeddings is not None: + position_embeddings = self.position_embeddings(position_ids.long()) + else: + position_embeddings = torch.zeros_like(inputs_embeds) + + embeddings = inputs_embeds + if self.position_biased_input: + embeddings += position_embeddings + if self.config.type_vocab_size > 0: + token_type_embeddings = self.token_type_embeddings(token_type_ids) + embeddings += token_type_embeddings + + # if self.embedding_size != self.config.hidden_size: + # embeddings = self.embed_proj(embeddings) + + embeddings = self.LayerNorm(embeddings) + + if mask is not None: + if mask.dim() != embeddings.dim(): + if mask.dim() == 4: + mask = mask.squeeze(1).squeeze(1) + mask = mask.unsqueeze(2) + mask = mask.to(embeddings.dtype) + + embeddings = embeddings * mask + + embeddings = self.dropout(embeddings) + return embeddings + + +class DebertaPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = DebertaConfig + base_model_prefix = "deberta" + _keys_to_ignore_on_load_missing = ["position_ids"] + _keys_to_ignore_on_load_unexpected = ["position_embeddings"] + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, DebertaEncoder): + module.gradient_checkpointing = value + + +DEBERTA_START_DOCSTRING = r""" + The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled + Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build + on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two + improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data. + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + + Parameters: + config ([`DebertaConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +DEBERTA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert *input_ids* indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.", + DEBERTA_START_DOCSTRING, +) +class DebertaModel(DebertaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.embeddings = DebertaEmbeddings(config) + self.encoder = DebertaEncoder(config) + self.z_steps = 0 + self.config = config + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, new_embeddings): + self.embeddings.word_embeddings = new_embeddings + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + raise NotImplementedError("The prune function is not implemented in DeBERTa model.") + + @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if attention_mask is None: + attention_mask = torch.ones(input_shape, device=device) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + embedding_output = self.embeddings( + input_ids=input_ids, + token_type_ids=token_type_ids, + position_ids=position_ids, + mask=attention_mask, + inputs_embeds=inputs_embeds, + ) + + encoder_outputs = self.encoder( + embedding_output, + attention_mask, + output_hidden_states=True, + output_attentions=output_attentions, + return_dict=return_dict, + ) + encoded_layers = encoder_outputs[1] + + if self.z_steps > 1: + hidden_states = encoded_layers[-2] + layers = [self.encoder.layer[-1] for _ in range(self.z_steps)] + query_states = encoded_layers[-1] + rel_embeddings = self.encoder.get_rel_embedding() + attention_mask = self.encoder.get_attention_mask(attention_mask) + rel_pos = self.encoder.get_rel_pos(embedding_output) + for layer in layers[1:]: + query_states = layer( + hidden_states, + attention_mask, + output_attentions=False, + query_states=query_states, + relative_pos=rel_pos, + rel_embeddings=rel_embeddings, + ) + encoded_layers.append(query_states) + + sequence_output = encoded_layers[-1] + + if not return_dict: + return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :] + + return BaseModelOutput( + last_hidden_state=sequence_output, + hidden_states=encoder_outputs.hidden_states if output_hidden_states else None, + attentions=encoder_outputs.attentions, + ) + + + +model_name = "microsoft/deberta-base" +device = "cuda:0" + +def _get_scripted_model(): + config = AutoConfig.from_pretrained(model_name) + config.return_dict = False + model = DebertaModel(config).to(device) + print("model type", type(model)) + return model + +def get_input(batch_size, seq_len=256): + # tokenizer = AutoTokenizer.from_pretrained(model_name) + # inputs = tokenizer("Hello world! Hello world! Hello world! Hello world! Hello world!", return_tensors="pt").to(device) + # assert len(inputs) == 3 + # return (inputs['input_ids'], inputs['attention_mask'], inputs['token_type_ids']), {} + vocab_size = 50265 + input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), dtype=torch.int64).to(device) + attention_mask = torch.ones((batch_size, seq_len), dtype=torch.int64).to(device) + token_type_ids = torch.zeros((batch_size, seq_len), dtype=torch.int64).to(device) + return (input_ids, attention_mask, token_type_ids), {} + + +if __name__ == "__main__": + model = _get_scripted_model() + input_args, input_kwargs = get_input(batch_size=1) + print([x.shape for x in input_args]) + outputs = model(*input_args, **input_kwargs) + print(outputs) + diff --git a/models/_densenet_scripted.py b/models/_densenet_scripted.py new file mode 100644 index 000000000000..6c54aad15b6e --- /dev/null +++ b/models/_densenet_scripted.py @@ -0,0 +1,252 @@ +# https://github.com/pytorch/vision/blob/f677ea31db8f45dbfec2fe5e519da82853815776/torchvision/models/densenet.py + +import re +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from collections import OrderedDict +from utils import script_with_log + +__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161'] + +model_urls = { + 'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth', + 'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth', + 'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth', + 'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth', +} + + +def _bn_function_factory(norm, relu, conv): + def bn_function(*inputs): + concated_features = torch.cat(inputs, 1) + bottleneck_output = conv(relu(norm(concated_features))) + return bottleneck_output + + return bn_function + + +class _DenseLayer(nn.Sequential): + def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, memory_efficient=False): + super(_DenseLayer, self).__init__() + self.add_module('norm1', nn.BatchNorm2d(num_input_features)), + self.add_module('relu1', nn.ReLU(inplace=True)), + self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * + growth_rate, kernel_size=1, stride=1, + bias=False)), + self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)), + self.add_module('relu2', nn.ReLU(inplace=True)), + self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, + kernel_size=3, stride=1, padding=1, + bias=False)), + self.drop_rate = drop_rate + self.memory_efficient = memory_efficient + + def forward(self, *prev_features): + bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1) + if self.memory_efficient and any(prev_feature.requires_grad for prev_feature in prev_features): + bottleneck_output = cp.checkpoint(bn_function, *prev_features) + else: + bottleneck_output = bn_function(*prev_features) + new_features = self.conv2(self.relu2(self.norm2(bottleneck_output))) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, + training=self.training) + return new_features + + +class _DenseBlock(nn.Module): + def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, memory_efficient=False): + super(_DenseBlock, self).__init__() + for i in range(num_layers): + layer = _DenseLayer( + num_input_features + i * growth_rate, + growth_rate=growth_rate, + bn_size=bn_size, + drop_rate=drop_rate, + memory_efficient=memory_efficient, + ) + self.add_module('denselayer%d' % (i + 1), layer) + + def forward(self, init_features): + features = [init_features] + for name, layer in self.named_children(): + new_features = layer(*features) + features.append(new_features) + return torch.cat(features, 1) + + +class _Transition(nn.Sequential): + def __init__(self, num_input_features, num_output_features): + super(_Transition, self).__init__() + self.add_module('norm', nn.BatchNorm2d(num_input_features)) + self.add_module('relu', nn.ReLU(inplace=True)) + self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, + kernel_size=1, stride=1, bias=False)) + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + + +class DenseNet(nn.Module): + r"""Densenet-BC model class, based on + `"Densely Connected Convolutional Networks" `_ + + Args: + growth_rate (int) - how many filters to add each layer (`k` in paper) + block_config (list of 4 ints) - how many layers in each pooling block + num_init_features (int) - the number of filters to learn in the first convolution layer + bn_size (int) - multiplicative factor for number of bottle neck layers + (i.e. bn_size * k features in the bottleneck layer) + drop_rate (float) - dropout rate after each dense layer + num_classes (int) - number of classification classes + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + + def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), + num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, memory_efficient=False): + + super(DenseNet, self).__init__() + + # First convolution + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, + padding=3, bias=False)), + ('norm0', nn.BatchNorm2d(num_init_features)), + ('relu0', nn.ReLU(inplace=True)), + ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)), + ])) + + # Each denseblock + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = _DenseBlock( + num_layers=num_layers, + num_input_features=num_features, + bn_size=bn_size, + growth_rate=growth_rate, + drop_rate=drop_rate, + memory_efficient=memory_efficient + ) + self.features.add_module('denseblock%d' % (i + 1), block) + num_features = num_features + num_layers * growth_rate + if i != len(block_config) - 1: + trans = script_with_log(_Transition(num_input_features=num_features, + num_output_features=num_features // 2)) + self.features.add_module('transition%d' % (i + 1), trans) + num_features = num_features // 2 + + # Final batch norm + self.features.add_module('norm5', nn.BatchNorm2d(num_features)) + + # Linear layer + self.classifier = nn.Linear(num_features, num_classes) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + def forward(self, x): + features = self.features(x) + out = F.relu(features, inplace=True) + out = F.adaptive_avg_pool2d(out, (1, 1)) + out = torch.flatten(out, 1) + out = self.classifier(out) + return out + + +def _load_state_dict(model, model_url, progress): + # '.'s are no longer allowed in module names, but previous _DenseLayer + # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. + # They are also in the checkpoints in model_urls. This pattern is used + # to find such keys. + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + + state_dict = load_state_dict_from_url(model_url, progress=progress) + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + model.load_state_dict(state_dict) + + +def _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress, + **kwargs): + model = DenseNet(growth_rate, block_config, num_init_features, **kwargs) + if pretrained: + _load_state_dict(model, model_urls[arch], progress) + return model + + +def densenet121(pretrained=False, progress=True, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress, + **kwargs) + + +def densenet161(pretrained=False, progress=True, **kwargs): + r"""Densenet-161 model from + `"Densely Connected Convolutional Networks" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress, + **kwargs) + + +def densenet169(pretrained=False, progress=True, **kwargs): + r"""Densenet-169 model from + `"Densely Connected Convolutional Networks" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress, + **kwargs) + + +def densenet201(pretrained=False, progress=True, **kwargs): + r"""Densenet-201 model from + `"Densely Connected Convolutional Networks" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress, + **kwargs) + + +def _get_scripted_model(): + model = densenet121(pretrained=False).cuda() + return model + +def get_input(batch_size): + return (torch.randn((batch_size, 3, 224, 224)).cuda(),), {} + diff --git a/models/_monodepth_scripted.py b/models/_monodepth_scripted.py new file mode 100644 index 000000000000..211a2e9411f5 --- /dev/null +++ b/models/_monodepth_scripted.py @@ -0,0 +1,338 @@ +# from paritybench +from __future__ import absolute_import, division, print_function +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from utils import script_with_log + + +class conv(nn.Module): + def __init__(self, num_in_layers, num_out_layers, kernel_size, stride): + super(conv, self).__init__() + self.kernel_size = kernel_size + self.conv_base = nn.Conv2d(num_in_layers, num_out_layers, kernel_size=kernel_size, stride=stride) + self.normalize = nn.BatchNorm2d(num_out_layers) + print("create class with torch.jit.ignore", self) + + @torch.jit.ignore + def _get_p(self) -> int: + p = int(np.floor((self.kernel_size-1)/2)) + return p + + def forward(self, x): + p = self._get_p() + p2d = (p, p, p, p) + x = self.conv_base(F.pad(x, p2d)) + x = self.normalize(x) + return F.elu(x, inplace=True) + + +class convblock(nn.Module): + def __init__(self, num_in_layers, num_out_layers, kernel_size): + super(convblock, self).__init__() + self.conv1 = conv(num_in_layers, num_out_layers, kernel_size, 1) + self.conv2 = conv(num_out_layers, num_out_layers, kernel_size, 2) + + def forward(self, x): + x = self.conv1(x) + return self.conv2(x) + + +class maxpool(nn.Module): + def __init__(self, kernel_size): + super(maxpool, self).__init__() + self.kernel_size = kernel_size + print("create class with torch.jit.ignore", self) + + @torch.jit.ignore + def _get_p(self) -> int: + p = int(np.floor((self.kernel_size-1)/2)) + return p + + def forward(self, x): + p = self._get_p() + p2d = (p, p, p, p) + return F.max_pool2d(F.pad(x, p2d), self.kernel_size, stride=2) + + +class resconv(nn.Module): + def __init__(self, num_in_layers, num_out_layers, stride): + super(resconv, self).__init__() + self.num_out_layers = num_out_layers + self.stride = stride + self.conv1 = conv(num_in_layers, num_out_layers, 1, 1) + self.conv2 = conv(num_out_layers, num_out_layers, 3, stride) + self.conv3 = nn.Conv2d(num_out_layers, 4*num_out_layers, kernel_size=1, stride=1) + self.conv4 = nn.Conv2d(num_in_layers, 4*num_out_layers, kernel_size=1, stride=stride) + self.normalize = nn.BatchNorm2d(4*num_out_layers) + + def forward(self, x): + # do_proj = x.size()[1] != self.num_out_layers or self.stride == 2 + do_proj = True + shortcut = [] + x_out = self.conv1(x) + x_out = self.conv2(x_out) + x_out = self.conv3(x_out) + if do_proj: + shortcut = self.conv4(x) + else: + shortcut = x + return F.elu(self.normalize(x_out + shortcut), inplace=True) + + +class resconv_basic(nn.Module): + # for resnet18 + def __init__(self, num_in_layers, num_out_layers, stride): + super(resconv_basic, self).__init__() + self.num_out_layers = num_out_layers + self.stride = stride + self.conv1 = conv(num_in_layers, num_out_layers, 3, stride) + self.conv2 = conv(num_out_layers, num_out_layers, 3, 1) + self.conv3 = nn.Conv2d(num_in_layers, num_out_layers, kernel_size=1, stride=stride) + self.normalize = nn.BatchNorm2d(num_out_layers) + + def forward(self, x): + # do_proj = x.size()[1] != self.num_out_layers or self.stride == 2 + do_proj = True + x_out = self.conv1(x) + x_out = self.conv2(x_out) + if do_proj: + shortcut = self.conv3(x) + else: + shortcut = x + return F.elu(self.normalize(x_out + shortcut), inplace=True) + + +def resblock(num_in_layers, num_out_layers, num_blocks, stride): + layers = [] + layers.append(resconv(num_in_layers, num_out_layers, stride)) + for i in range(1, num_blocks - 1): + layers.append(resconv(4 * num_out_layers, num_out_layers, 1)) + layers.append(resconv(4 * num_out_layers, num_out_layers, 1)) + return nn.Sequential(*layers) + + +def resblock_basic(num_in_layers, num_out_layers, num_blocks, stride): + layers = [] + layers.append(resconv_basic(num_in_layers, num_out_layers, stride)) + for i in range(1, num_blocks): + layers.append(resconv_basic(num_out_layers, num_out_layers, 1)) + return nn.Sequential(*layers) + + +class upconv(nn.Module): + def __init__(self, num_in_layers, num_out_layers, kernel_size, scale): + super(upconv, self).__init__() + self.scale = float(scale) + self.conv1 = conv(num_in_layers, num_out_layers, kernel_size, 1) + + def forward(self, x): + x = nn.functional.interpolate(x, scale_factor=self.scale, mode='bilinear', align_corners=True) + return self.conv1(x) + + +class get_disp(nn.Module): + def __init__(self, num_in_layers): + super(get_disp, self).__init__() + self.conv1 = nn.Conv2d(num_in_layers, 2, kernel_size=3, stride=1) + self.normalize = nn.BatchNorm2d(2) + self.sigmoid = torch.nn.Sigmoid() + + def forward(self, x): + p = 1 + p2d = (p, p, p, p) + x = self.conv1(F.pad(x, p2d)) + x = self.normalize(x) + return 0.3 * self.sigmoid(x) + + +class Resnet50_md(nn.Module): + def __init__(self, num_in_layers): + super(Resnet50_md, self).__init__() + # encoder + self.conv1 = conv(num_in_layers, 64, 7, 2) # H/2 - 64D + self.pool1 = maxpool(3) # H/4 - 64D + self.conv2 = resblock(64, 64, 3, 2) # H/8 - 256D + self.conv3 = resblock(256, 128, 4, 2) # H/16 - 512D + self.conv4 = resblock(512, 256, 6, 2) # H/32 - 1024D + self.conv5 = resblock(1024, 512, 3, 2) # H/64 - 2048D + + # decoder + self.upconv6 = upconv(2048, 512, 3, 2) + self.iconv6 = conv(1024 + 512, 512, 3, 1) + + self.upconv5 = upconv(512, 256, 3, 2) + self.iconv5 = conv(512+256, 256, 3, 1) + + self.upconv4 = upconv(256, 128, 3, 2) + self.iconv4 = conv(256+128, 128, 3, 1) + self.disp4_layer = get_disp(128) + + self.upconv3 = upconv(128, 64, 3, 2) + self.iconv3 = conv(64+64+2, 64, 3, 1) + self.disp3_layer = get_disp(64) + + self.upconv2 = upconv(64, 32, 3, 2) + self.iconv2 = conv(32+64+2, 32, 3, 1) + self.disp2_layer = get_disp(32) + + self.upconv1 = upconv(32, 16, 3, 2) + self.iconv1 = conv(16+2, 16, 3, 1) + self.disp1_layer = get_disp(16) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_uniform_(m.weight) + + def forward(self, x): + # encoder + x1 = self.conv1(x) + x_pool1 = self.pool1(x1) + x2 = self.conv2(x_pool1) + x3 = self.conv3(x2) + x4 = self.conv4(x3) + x5 = self.conv5(x4) + + # skips + skip1 = x1 + skip2 = x_pool1 + skip3 = x2 + skip4 = x3 + skip5 = x4 + + # decoder + upconv6 = self.upconv6(x5) + concat6 = torch.cat((upconv6, skip5), 1) + iconv6 = self.iconv6(concat6) + + upconv5 = self.upconv5(iconv6) + concat5 = torch.cat((upconv5, skip4), 1) + iconv5 = self.iconv5(concat5) + + upconv4 = self.upconv4(iconv5) + concat4 = torch.cat((upconv4, skip3), 1) + iconv4 = self.iconv4(concat4) + self.disp4 = self.disp4_layer(iconv4) + self.udisp4 = nn.functional.interpolate(self.disp4, scale_factor=2, mode='bilinear', align_corners=True) + + upconv3 = self.upconv3(iconv4) + concat3 = torch.cat((upconv3, skip2, self.udisp4), 1) + iconv3 = self.iconv3(concat3) + self.disp3 = self.disp3_layer(iconv3) + self.udisp3 = nn.functional.interpolate(self.disp3, scale_factor=2, mode='bilinear', align_corners=True) + + upconv2 = self.upconv2(iconv3) + concat2 = torch.cat((upconv2, skip1, self.udisp3), 1) + iconv2 = self.iconv2(concat2) + self.disp2 = self.disp2_layer(iconv2) + self.udisp2 = nn.functional.interpolate(self.disp2, scale_factor=2, mode='bilinear', align_corners=True) + + upconv1 = self.upconv1(iconv2) + concat1 = torch.cat((upconv1, self.udisp2), 1) + iconv1 = self.iconv1(concat1) + self.disp1 = self.disp1_layer(iconv1) + return self.disp1, self.disp2, self.disp3, self.disp4 + + +class Resnet18_md(nn.Module): + def __init__(self, num_in_layers): + super(Resnet18_md, self).__init__() + # encoder + self.conv1 = conv(num_in_layers, 64, 7, 2) # H/2 - 64D + self.pool1 = maxpool(3) # H/4 - 64D + self.conv2 = resblock_basic(64, 64, 2, 2) # H/8 - 64D + self.conv3 = resblock_basic(64, 128, 2, 2) # H/16 - 128D + self.conv4 = resblock_basic(128, 256, 2, 2) # H/32 - 256D + self.conv5 = resblock_basic(256, 512, 2, 2) # H/64 - 512D + + # decoder + self.upconv6 = upconv(512, 512, 3, 2) + self.iconv6 = conv(256+512, 512, 3, 1) + + self.upconv5 = upconv(512, 256, 3, 2) + self.iconv5 = conv(128+256, 256, 3, 1) + + self.upconv4 = upconv(256, 128, 3, 2) + self.iconv4 = conv(64+128, 128, 3, 1) + self.disp4_layer = get_disp(128) + + self.upconv3 = upconv(128, 64, 3, 2) + self.iconv3 = conv(64+64 + 2, 64, 3, 1) + self.disp3_layer = get_disp(64) + + self.upconv2 = upconv(64, 32, 3, 2) + self.iconv2 = conv(64+32 + 2, 32, 3, 1) + self.disp2_layer = get_disp(32) + + self.upconv1 = upconv(32, 16, 3, 2) + self.iconv1 = conv(16+2, 16, 3, 1) + self.disp1_layer = get_disp(16) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_uniform_(m.weight) + + def forward(self, x): + # encoder + x1 = self.conv1(x) + x_pool1 = self.pool1(x1) + x2 = self.conv2(x_pool1) + x3 = self.conv3(x2) + x4 = self.conv4(x3) + x5 = self.conv5(x4) + + # skips + skip1 = x1 + skip2 = x_pool1 + skip3 = x2 + skip4 = x3 + skip5 = x4 + + # decoder + upconv6 = self.upconv6(x5) + concat6 = torch.cat((upconv6, skip5), 1) + iconv6 = self.iconv6(concat6) + + upconv5 = self.upconv5(iconv6) + concat5 = torch.cat((upconv5, skip4), 1) + iconv5 = self.iconv5(concat5) + + upconv4 = self.upconv4(iconv5) + concat4 = torch.cat((upconv4, skip3), 1) + iconv4 = self.iconv4(concat4) + disp4 = self.disp4_layer(iconv4) + udisp4 = nn.functional.interpolate(disp4, scale_factor=2.0, mode='bilinear', align_corners=True) + + upconv3 = self.upconv3(iconv4) + concat3 = torch.cat((upconv3, skip2, udisp4), 1) + iconv3 = self.iconv3(concat3) + disp3 = self.disp3_layer(iconv3) + udisp3 = nn.functional.interpolate(disp3, scale_factor=2.0, mode='bilinear', align_corners=True) + + upconv2 = self.upconv2(iconv3) + concat2 = torch.cat((upconv2, skip1, udisp3), 1) + iconv2 = self.iconv2(concat2) + disp2 = self.disp2_layer(iconv2) + udisp2 = nn.functional.interpolate(disp2, scale_factor=2.0, mode='bilinear', align_corners=True) + + upconv1 = self.upconv1(iconv2) + concat1 = torch.cat((upconv1, udisp2), 1) + iconv1 = self.iconv1(concat1) + disp1 = self.disp1_layer(iconv1) + return disp1, disp2, disp3, disp4 + + +def get_model(): + return Resnet18_md(3).cuda() + +def _get_scripted_model(): + model = get_model() + model = script_with_log(model) + return model + + +def get_input(batch_size): + return (torch.randn((batch_size, 3, 256, 256)).cuda(),), {} + + \ No newline at end of file diff --git a/models/_quantized_scripted.py b/models/_quantized_scripted.py new file mode 100644 index 000000000000..db2f88a12443 --- /dev/null +++ b/models/_quantized_scripted.py @@ -0,0 +1,491 @@ +import math +import torch +import torch.nn as nn +import torch.nn.parallel +import torch.optim +import torch.utils.data +from collections import OrderedDict +import torchvision.transforms as transforms +import math +import torch.nn.functional as F +from torch.autograd import Variable +from torch.autograd.function import InplaceFunction +from utils import script_with_log + + +# models/modules/quantize.py + +class UniformQuantize(InplaceFunction): + + @classmethod + def forward(cls, ctx, input, num_bits=8, min_value=None, max_value=None, + stochastic=False, inplace=False, enforce_true_zero=False, num_chunks=None, out_half=False): + + num_chunks = num_chunks = input.shape[ + 0] if num_chunks is None else num_chunks + if min_value is None or max_value is None: + B = input.shape[0] + y = input.view(B // num_chunks, -1) + if min_value is None: + min_value = y.min(-1)[0].mean(-1) # C + #min_value = float(input.view(input.size(0), -1).min(-1)[0].mean()) + if max_value is None: + #max_value = float(input.view(input.size(0), -1).max(-1)[0].mean()) + max_value = y.max(-1)[0].mean(-1) # C + + ctx.inplace = inplace + ctx.num_bits = num_bits + ctx.min_value = min_value + ctx.max_value = max_value + ctx.stochastic = stochastic + + if ctx.inplace: + ctx.mark_dirty(input) + output = input + else: + output = input.clone() + + qmin = 0. + qmax = 2.**num_bits - 1. + #import pdb; pdb.set_trace() + scale = (max_value - min_value) / (qmax - qmin) + + scale = max(scale, 1e-8) + + if enforce_true_zero: + initial_zero_point = qmin - min_value / scale + zero_point = 0. + # make zero exactly represented + if initial_zero_point < qmin: + zero_point = qmin + elif initial_zero_point > qmax: + zero_point = qmax + else: + zero_point = initial_zero_point + zero_point = int(zero_point) + output.div_(scale).add_(zero_point) + else: + output.add_(-min_value).div_(scale).add_(qmin) + + if ctx.stochastic: + noise = output.new(output.shape).uniform_(-0.5, 0.5) + output.add_(noise) + output.clamp_(qmin, qmax).round_() # quantize + + if enforce_true_zero: + output.add_(-zero_point).mul_(scale) # dequantize + else: + output.add_(-qmin).mul_(scale).add_(min_value) # dequantize + if out_half and num_bits <= 16: + output = output.half() + return output + + @staticmethod + def backward(ctx, grad_output): + # straight-through estimator + grad_input = grad_output + return grad_input, None, None, None, None, None, None + + +class UniformQuantizeGrad(InplaceFunction): + + @classmethod + def forward(cls, ctx, input, num_bits=8, min_value=None, max_value=None, stochastic=True, inplace=False): + ctx.inplace = inplace + ctx.num_bits = num_bits + ctx.min_value = min_value + ctx.max_value = max_value + ctx.stochastic = stochastic + return input + + @staticmethod + def backward(ctx, grad_output): + if ctx.min_value is None: + min_value = float(grad_output.min()) + # min_value = float(grad_output.view( + # grad_output.size(0), -1).min(-1)[0].mean()) + else: + min_value = ctx.min_value + if ctx.max_value is None: + max_value = float(grad_output.max()) + # max_value = float(grad_output.view( + # grad_output.size(0), -1).max(-1)[0].mean()) + else: + max_value = ctx.max_value + grad_input = UniformQuantize().apply(grad_output, ctx.num_bits, + min_value, max_value, ctx.stochastic, ctx.inplace) + return grad_input, None, None, None, None, None + + +def conv2d_biprec(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, num_bits_grad=None): + out1 = F.conv2d(input.detach(), weight, bias, + stride, padding, dilation, groups) + out2 = F.conv2d(input, weight.detach(), bias.detach() if bias is not None else None, + stride, padding, dilation, groups) + out2 = quantize_grad(out2, num_bits=num_bits_grad) + return out1 + out2 - out1.detach() + + +def linear_biprec(input, weight, bias=None, num_bits_grad=None): + out1 = F.linear(input.detach(), weight, bias) + out2 = F.linear(input, weight.detach(), bias.detach() + if bias is not None else None) + out2 = quantize_grad(out2, num_bits=num_bits_grad) + return out1 + out2 - out1.detach() + + +def quantize(x, num_bits:int=8, min_value=None, max_value=None, num_chunks=None, stochastic=False, inplace=False): + return UniformQuantize().apply(x, num_bits, min_value, max_value, num_chunks, stochastic, inplace) + + +def quantize_grad(x, num_bits=8, min_value=None, max_value=None, stochastic=True, inplace=False): + return UniformQuantizeGrad().apply(x, num_bits, min_value, max_value, stochastic, inplace) + + +class QuantMeasure(nn.Module): + """docstring for QuantMeasure.""" + + def __init__(self, num_bits=8, momentum=0.1): + super(QuantMeasure, self).__init__() + self.register_buffer('running_min', torch.zeros(1)) + self.register_buffer('running_max', torch.zeros(1)) + self.momentum = momentum + self.num_bits = num_bits + + @torch.jit.ignore + def _fwd_quant(self, input, min_value, max_value) -> torch.Tensor: + return quantize(input, self.num_bits, min_value=min_value, max_value=max_value, num_chunks=16) + + + def forward(self, input): + if self.training: + min_value = input.detach().view( + input.size(0), -1).min(-1)[0].mean() + max_value = input.detach().view( + input.size(0), -1).max(-1)[0].mean() + self.running_min.mul_(self.momentum).add_( + min_value * (1 - self.momentum)) + self.running_max.mul_(self.momentum).add_( + max_value * (1 - self.momentum)) + else: + min_value = self.running_min + max_value = self.running_max + return self._fwd_quant(input, min_value, max_value) + + +class QConv2d(nn.Conv2d): + """docstring for QConv2d.""" + + def __init__(self, in_channels, out_channels, kernel_size, + stride=1, padding=0, dilation=1, groups=1, bias=True, num_bits=8, num_bits_weight=None, num_bits_grad=None, biprecision=False): + super(QConv2d, self).__init__(in_channels, out_channels, kernel_size, + stride, padding, dilation, groups, bias) + self.num_bits = num_bits + self.num_bits_weight = num_bits_weight or num_bits + self.num_bits_grad = num_bits_grad + self.quantize_input = QuantMeasure(self.num_bits) + self.biprecision = biprecision + print("create class with torch.jit.ignore", self) + + @torch.jit.ignore + def _quant_fwd(self, input: torch.Tensor) -> torch.Tensor: + qinput = self.quantize_input.forward(input) # add .forward as scriptmodule is not callable + qweight = quantize(self.weight, num_bits=self.num_bits_weight, + min_value=float(self.weight.min()), + max_value=float(self.weight.max())) + if self.bias is not None: + qbias = quantize(self.bias, num_bits=self.num_bits_weight) + else: + qbias = None + if not self.biprecision or self.num_bits_grad is None: + output = F.conv2d(qinput, qweight, qbias, self.stride, + self.padding, self.dilation, self.groups) + if self.num_bits_grad is not None: + output = quantize_grad(output, num_bits=self.num_bits_grad) + else: + output = conv2d_biprec(qinput, qweight, qbias, self.stride, + self.padding, self.dilation, self.groups, num_bits_grad=self.num_bits_grad) + return output + + + def forward(self, input): + return self._quant_fwd(input) + + +class QLinear(nn.Linear): + """docstring for QConv2d.""" + + def __init__(self, in_features, out_features, bias=True, num_bits=8, num_bits_weight=None, num_bits_grad=None, biprecision=False): + super(QLinear, self).__init__(in_features, out_features, bias) + self.num_bits = num_bits + self.num_bits_weight = num_bits_weight or num_bits + self.num_bits_grad = num_bits_grad + self.biprecision = biprecision + self.quantize_input = QuantMeasure(self.num_bits) + print("create class with torch.jit.ignore", self) + + @torch.jit.ignore + def forward(self, input): + qinput = self.quantize_input.forward(input) + qweight = quantize(self.weight, num_bits=self.num_bits_weight, + min_value=float(self.weight.min()), + max_value=float(self.weight.max())) + if self.bias is not None: + qbias = quantize(self.bias, num_bits=self.num_bits_weight) + else: + qbias = None + + if not self.biprecision or self.num_bits_grad is None: + output = F.linear(qinput, qweight, qbias) + if self.num_bits_grad is not None: + output = quantize_grad(output, num_bits=self.num_bits_grad) + else: + output = linear_biprec(qinput, qweight, qbias, self.num_bits_grad) + return output + + +class RangeBN(nn.Module): + # this is normalized RangeBN + + def __init__(self, num_features, dim=1, momentum=0.1, affine=True, num_chunks=16, eps=1e-5, num_bits=8, num_bits_grad=8): + super(RangeBN, self).__init__() + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.zeros(num_features)) + + self.momentum = momentum + self.dim = dim + if affine: + self.bias = nn.Parameter(torch.Tensor(num_features)) + self.weight = nn.Parameter(torch.Tensor(num_features)) + self.num_bits = num_bits + self.num_bits_grad = num_bits_grad + self.quantize_input = QuantMeasure(self.num_bits) + self.eps = eps + self.num_chunks = num_chunks + self.reset_params() + + def reset_params(self): + if self.weight is not None: + self.weight.data.uniform_() + if self.bias is not None: + self.bias.data.zero_() + + def forward(self, x): + x = self.quantize_input(x) + if x.dim() == 2: # 1d + x = x.unsqueeze(-1,).unsqueeze(-1) + + if self.training: + B, C, H, W = x.shape + y = x.transpose(0, 1).contiguous() # C x B x H x W + y = y.view(C, self.num_chunks, B * H * W // self.num_chunks) + mean_max = y.max(-1)[0].mean(-1) # C + mean_min = y.min(-1)[0].mean(-1) # C + mean = y.view(C, -1).mean(-1) # C + scale_fix = (0.5 * 0.35) * (1 + (math.pi * math.log(4)) ** + 0.5) / ((2 * math.log(y.size(-1))) ** 0.5) + + scale = 1 / ((mean_max - mean_min) * scale_fix + self.eps) + + self.running_mean.detach().mul_(self.momentum).add_( + mean * (1 - self.momentum)) + + self.running_var.detach().mul_(self.momentum).add_( + scale * (1 - self.momentum)) + else: + mean = self.running_mean + scale = self.running_var + scale = quantize(scale, num_bits=self.num_bits, min_value=float( + scale.min()), max_value=float(scale.max())) + out = (x - mean.view(1, mean.size(0), 1, 1)) * \ + scale.view(1, scale.size(0), 1, 1) + + if self.weight is not None: + qweight = quantize(self.weight, num_bits=self.num_bits, + min_value=float(self.weight.min()), + max_value=float(self.weight.max())) + out = out * qweight.view(1, qweight.size(0), 1, 1) + + if self.bias is not None: + qbias = quantize(self.bias, num_bits=self.num_bits) + out = out + qbias.view(1, qbias.size(0), 1, 1) + if self.num_bits_grad is not None: + out = quantize_grad(out, num_bits=self.num_bits_grad) + + if out.size(3) == 1 and out.size(2) == 1: + out = out.squeeze(-1).squeeze(-1) + return out + + +# models/resnet_quantized_float_bn.py +NUM_BITS = 8 +NUM_BITS_WEIGHT = 8 +NUM_BITS_GRAD = 8 + + +def conv3x3(in_planes, out_planes, stride=1): + "3x3 convolution with padding" + return QConv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False, num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD) + + +def init_model(model): + for m in model.modules(): + if isinstance(m, QConv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = QConv2d(inplanes, planes, kernel_size=1, bias=False, + num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = QConv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=False, num_bits=NUM_BITS, + num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = QConv2d(planes, planes * 4, kernel_size=1, bias=False, + num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self): + super(ResNet, self).__init__() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + QConv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False, + num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +class ResNet_imagenet(ResNet): + + def __init__(self, num_classes=1000, + block=Bottleneck, layers=[3, 4, 23, 3]): + super(ResNet_imagenet, self).__init__() + self.inplanes = 64 + self.conv1 = QConv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False, num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + self.avgpool = nn.AvgPool2d(7) + self.fc = QLinear(512 * block.expansion, num_classes, num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD) + + init_model(self) + self.regime = [ + {'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1, + 'weight_decay': 1e-4, 'momentum': 0.9}, + {'epoch': 30, 'lr': 1e-2}, + {'epoch': 60, 'lr': 1e-3, 'weight_decay': 0}, + {'epoch': 90, 'lr': 1e-4} + ] + + +def _get_scripted_model(): + model = ResNet_imagenet(block=BasicBlock, layers=[2, 2, 2, 2]).cuda() + model = script_with_log(model) + return model + + +def get_input(batch_size): + return (torch.randn(batch_size, 3, 224, 224).cuda(),), {} \ No newline at end of file diff --git a/models/_tridentnet_scripted.py b/models/_tridentnet_scripted.py new file mode 100644 index 000000000000..a27f8ad5218a --- /dev/null +++ b/models/_tridentnet_scripted.py @@ -0,0 +1,2075 @@ +# trident resnet in mmdetection, graph break due to numpy + +import warnings +import functools + +import torch.nn as nn +import torch.utils.checkpoint as cp +# from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer +# from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +import torch +from torch import nn as nn +from typing import Dict, Union, Tuple, Optional, List, Callable + +import math +import copy +import warnings +from abc import ABCMeta +from collections import defaultdict +from logging import FileHandler +from typing import Iterable, Optional + +# from mmcv.runner.dist_utils import master_only +# from mmcv.utils.logging import get_logger, logger_initialized, print_log +import inspect +from torch.nn.modules.utils import _pair +import numpy as np +import torch.nn.functional as F +from torch import distributed as dist +import logging +from utils import script_with_log + +CONV_LAYERS = { + 'Conv1d': nn.Conv1d, + 'Conv2d': nn.Conv2d, + 'Conv3d': nn.Conv3d, + 'Conv': nn.Conv2d, +} + +NORM_LAYERS = { + + 'BN': nn.BatchNorm2d, + 'BN1d': nn.BatchNorm1d, + 'BN2d': nn.BatchNorm2d, + 'BN3d': nn.BatchNorm3d, + # 'SyncBN': SyncBatchNorm, + 'GN': nn.GroupNorm, + 'LN': nn.LayerNorm, + 'IN': nn.InstanceNorm2d, + 'IN1d': nn.InstanceNorm1d, + 'IN2d': nn.InstanceNorm2d, + 'IN3d': nn.InstanceNorm3d, +} + + +def get_dist_info() -> Tuple[int, int]: + if dist.is_available() and dist.is_initialized(): + rank = dist.get_rank() + world_size = dist.get_world_size() + else: + rank = 0 + world_size = 1 + return rank, world_size + + +def master_only(func: Callable) -> Callable: + + @functools.wraps(func) + def wrapper(*args, **kwargs): + rank, _ = get_dist_info() + if rank == 0: + return func(*args, **kwargs) + + return wrapper + + + +logger_initialized: dict = {} + + +def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'): + """Initialize and get a logger by name. + + If the logger has not been initialized, this method will initialize the + logger by adding one or two handlers, otherwise the initialized logger will + be directly returned. During initialization, a StreamHandler will always be + added. If `log_file` is specified and the process rank is 0, a FileHandler + will also be added. + + Args: + name (str): Logger name. + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the logger. + log_level (int): The logger level. Note that only the process of + rank 0 is affected, and other processes will set the level to + "Error" thus be silent most of the time. + file_mode (str): The file mode used in opening log file. + Defaults to 'w'. + + Returns: + logging.Logger: The expected logger. + """ + logger = logging.getLogger(name) + if name in logger_initialized: + return logger + # handle hierarchical names + # e.g., logger "a" is initialized, then logger "a.b" will skip the + # initialization since it is a child of "a". + for logger_name in logger_initialized: + if name.startswith(logger_name): + return logger + + # handle duplicate logs to the console + # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler (NOTSET) + # to the root logger. As logger.propagate is True by default, this root + # level handler causes logging messages from rank>0 processes to + # unexpectedly show up on the console, creating much unwanted clutter. + # To fix this issue, we set the root logger's StreamHandler, if any, to log + # at the ERROR level. + for handler in logger.root.handlers: + if type(handler) is logging.StreamHandler: + handler.setLevel(logging.ERROR) + + stream_handler = logging.StreamHandler() + handlers = [stream_handler] + + if dist.is_available() and dist.is_initialized(): + rank = dist.get_rank() + else: + rank = 0 + + # only rank 0 will add a FileHandler + if rank == 0 and log_file is not None: + # Here, the default behaviour of the official logger is 'a'. Thus, we + # provide an interface to change the file mode to the default + # behaviour. + file_handler = logging.FileHandler(log_file, file_mode) + handlers.append(file_handler) + + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + for handler in handlers: + handler.setFormatter(formatter) + handler.setLevel(log_level) + logger.addHandler(handler) + + if rank == 0: + logger.setLevel(log_level) + else: + logger.setLevel(logging.ERROR) + + logger_initialized[name] = True + + return logger + + +def print_log(msg, logger=None, level=logging.INFO): + """Print a log message. + + Args: + msg (str): The message to be logged. + logger (logging.Logger | str | None): The logger to be used. + Some special loggers are: + + - "silent": no message will be printed. + - other str: the logger obtained with `get_root_logger(logger)`. + - None: The `print()` method will be used to print log messages. + level (int): Logging level. Only available when `logger` is a Logger + object or "root". + """ + if logger is None: + print(msg) + elif isinstance(logger, logging.Logger): + logger.log(level, msg) + elif logger == 'silent': + pass + elif isinstance(logger, str): + _logger = get_logger(logger) + _logger.log(level, msg) + else: + raise TypeError( + 'logger should be either a logging.Logger object, str, ' + f'"silent" or None, but got {type(logger)}') + + +def infer_abbr_norm(class_type): + """Infer abbreviation from the class name. + + When we build a norm layer with `build_norm_layer()`, we want to preserve + the norm type in variable names, e.g, self.bn1, self.gn. This method will + infer the abbreviation to map class types to abbreviations. + + Rule 1: If the class has the property "_abbr_", return the property. + Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or + InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and + "in" respectively. + Rule 3: If the class name contains "batch", "group", "layer" or "instance", + the abbreviation of this layer will be "bn", "gn", "ln" and "in" + respectively. + Rule 4: Otherwise, the abbreviation falls back to "norm". + + Args: + class_type (type): The norm layer type. + + Returns: + str: The inferred abbreviation. + """ + if not inspect.isclass(class_type): + raise TypeError( + f'class_type must be a type, but got {type(class_type)}') + if hasattr(class_type, '_abbr_'): + return class_type._abbr_ + # if issubclass(class_type, _InstanceNorm): # IN is a subclass of BN + # return 'in' + if issubclass(class_type, _BatchNorm): + return 'bn' + elif issubclass(class_type, nn.GroupNorm): + return 'gn' + elif issubclass(class_type, nn.LayerNorm): + return 'ln' + else: + class_name = class_type.__name__.lower() + if 'batch' in class_name: + return 'bn' + elif 'group' in class_name: + return 'gn' + elif 'layer' in class_name: + return 'ln' + elif 'instance' in class_name: + return 'in' + else: + return 'norm_layer' + +def build_norm_layer(cfg: Dict, + num_features: int, + postfix: Union[int, str] = '') -> Tuple[str, nn.Module]: + """Build normalization layer. + + Args: + cfg (dict): The norm layer config, which should contain: + + - type (str): Layer type. + - layer args: Args needed to instantiate a norm layer. + - requires_grad (bool, optional): Whether stop gradient updates. + num_features (int): Number of input channels. + postfix (int | str): The postfix to be appended into norm abbreviation + to create named layer. + + Returns: + tuple[str, nn.Module]: The first element is the layer name consisting + of abbreviation and postfix, e.g., bn1, gn. The second element is the + created norm layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in NORM_LAYERS: + raise KeyError(f'Unrecognized norm type {layer_type}') + + norm_layer = NORM_LAYERS.get(layer_type) + abbr = infer_abbr_norm(norm_layer) + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + requires_grad = cfg_.pop('requires_grad', True) + cfg_.setdefault('eps', 1e-5) + if layer_type != 'GN': + layer = norm_layer(num_features, **cfg_) + if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'): + layer._specify_ddp_gpu_num(1) + else: + assert 'num_groups' in cfg_ + layer = norm_layer(num_channels=num_features, **cfg_) + + for param in layer.parameters(): + param.requires_grad = requires_grad + + return name, layer + +def build_conv_layer(cfg: Optional[Dict], *args, **kwargs) -> nn.Module: + """Build convolution layer. + + Args: + cfg (None or dict): The conv layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate an conv layer. + args (argument list): Arguments passed to the `__init__` + method of the corresponding conv layer. + kwargs (keyword arguments): Keyword arguments passed to the `__init__` + method of the corresponding conv layer. + + Returns: + nn.Module: Created conv layer. + """ + if cfg is None: + cfg_ = dict(type='Conv2d') + else: + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in CONV_LAYERS: + raise KeyError(f'Unrecognized layer type {layer_type}') + else: + conv_layer = CONV_LAYERS.get(layer_type) + + layer = conv_layer(*args, **kwargs, **cfg_) + + return layer + +PLUGIN_LAYERS = {} + + +def infer_abbr_plugin(class_type: type) -> str: + """Infer abbreviation from the class name. + + This method will infer the abbreviation to map class types to + abbreviations. + + Rule 1: If the class has the property "abbr", return the property. + Rule 2: Otherwise, the abbreviation falls back to snake case of class + name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``. + + Args: + class_type (type): The norm layer type. + + Returns: + str: The inferred abbreviation. + """ + + def camel2snack(word): + """Convert camel case word into snack case. + + Modified from `inflection lib + `_. + + Example:: + + >>> camel2snack("FancyBlock") + 'fancy_block' + """ + + word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word) + word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word) + word = word.replace('-', '_') + return word.lower() + + if not inspect.isclass(class_type): + raise TypeError( + f'class_type must be a type, but got {type(class_type)}') + if hasattr(class_type, '_abbr_'): + return class_type._abbr_ # type: ignore + else: + return camel2snack(class_type.__name__) + + +def build_plugin_layer(cfg: Dict, + postfix: Union[int, str] = '', + **kwargs) -> Tuple[str, nn.Module]: + """Build plugin layer. + + Args: + cfg (dict): cfg should contain: + + - type (str): identify plugin layer type. + - layer args: args needed to instantiate a plugin layer. + postfix (int, str): appended into norm abbreviation to + create named layer. Default: ''. + + Returns: + tuple[str, nn.Module]: The first one is the concatenation of + abbreviation and postfix. The second is the created plugin layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in PLUGIN_LAYERS: + raise KeyError(f'Unrecognized plugin type {layer_type}') + + plugin_layer = PLUGIN_LAYERS.get(layer_type) + abbr = infer_abbr_plugin(plugin_layer) + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + layer = plugin_layer(**kwargs, **cfg_) + + return name, layer + + +class BaseModule(nn.Module, metaclass=ABCMeta): + """Base module for all modules in openmmlab. + + ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional + functionality of parameter initialization. Compared with + ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes. + + - ``init_cfg``: the config to control the initialization. + - ``init_weights``: The function of parameter initialization and recording + initialization information. + - ``_params_init_info``: Used to track the parameter initialization + information. This attribute only exists during executing the + ``init_weights``. + + Args: + init_cfg (dict, optional): Initialization config dict. + """ + + def __init__(self, init_cfg: Optional[dict] = None): + """Initialize BaseModule, inherited from `torch.nn.Module`""" + + # NOTE init_cfg can be defined in different levels, but init_cfg + # in low levels has a higher priority. + + super().__init__() + # define default value of init_cfg instead of hard code + # in init_weights() function + self._is_init = False + + self.init_cfg = copy.deepcopy(init_cfg) + + # Backward compatibility in derived classes + # if pretrained is not None: + # warnings.warn('DeprecationWarning: pretrained is a deprecated \ + # key, please consider using init_cfg') + # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + + @property + def is_init(self) -> bool: + return self._is_init + + def init_weights(self) -> None: + """Initialize the weights.""" + + is_top_level_module = False + # check if it is top-level module + if not hasattr(self, '_params_init_info'): + # The `_params_init_info` is used to record the initialization + # information of the parameters + # the key should be the obj:`nn.Parameter` of model and the value + # should be a dict containing + # - init_info (str): The string that describes the initialization. + # - tmp_mean_value (FloatTensor): The mean of the parameter, + # which indicates whether the parameter has been modified. + # this attribute would be deleted after all parameters + # is initialized. + self._params_init_info: defaultdict = defaultdict(dict) + is_top_level_module = True + + # Initialize the `_params_init_info`, + # When detecting the `tmp_mean_value` of + # the corresponding parameter is changed, update related + # initialization information + for name, param in self.named_parameters(): + self._params_init_info[param][ + 'init_info'] = f'The value is the same before and ' \ + f'after calling `init_weights` ' \ + f'of {self.__class__.__name__} ' + self._params_init_info[param][ + 'tmp_mean_value'] = param.data.mean() + + # pass `params_init_info` to all submodules + # All submodules share the same `params_init_info`, + # so it will be updated when parameters are + # modified at any level of the model. + for sub_module in self.modules(): + sub_module._params_init_info = self._params_init_info + + # Get the initialized logger, if not exist, + # create a logger named `mmcv` + logger_names = list(logger_initialized.keys()) + logger_name = logger_names[0] if logger_names else 'mmcv' + + from ..cnn import initialize + from ..cnn.utils.weight_init import update_init_info + module_name = self.__class__.__name__ + if not self._is_init: + if self.init_cfg: + print_log( + f'initialize {module_name} with init_cfg {self.init_cfg}', + logger=logger_name) + initialize(self, self.init_cfg) + if isinstance(self.init_cfg, dict): + # prevent the parameters of + # the pre-trained model + # from being overwritten by + # the `init_weights` + if self.init_cfg['type'] == 'Pretrained': + return + + for m in self.children(): + if hasattr(m, 'init_weights'): + m.init_weights() + # users may overload the `init_weights` + update_init_info( + m, + init_info=f'Initialized by ' + f'user-defined `init_weights`' + f' in {m.__class__.__name__} ') + + self._is_init = True + else: + warnings.warn(f'init_weights of {self.__class__.__name__} has ' + f'been called more than once.') + + if is_top_level_module: + self._dump_init_info(logger_name) + + for sub_module in self.modules(): + del sub_module._params_init_info + + @master_only + def _dump_init_info(self, logger_name: str) -> None: + """Dump the initialization information to a file named + `initialization.log.json` in workdir. + + Args: + logger_name (str): The name of logger. + """ + + logger = get_logger(logger_name) + + with_file_handler = False + # dump the information to the logger file if there is a `FileHandler` + for handler in logger.handlers: + if isinstance(handler, FileHandler): + handler.stream.write( + 'Name of parameter - Initialization information\n') + for name, param in self.named_parameters(): + handler.stream.write( + f'\n{name} - {param.shape}: ' + f"\n{self._params_init_info[param]['init_info']} \n") + handler.stream.flush() + with_file_handler = True + if not with_file_handler: + for name, param in self.named_parameters(): + print_log( + f'\n{name} - {param.shape}: ' + f"\n{self._params_init_info[param]['init_info']} \n ", + logger=logger_name) + + def __repr__(self): + s = super().__repr__() + if self.init_cfg: + s += f'\ninit_cfg={self.init_cfg}' + return s + +class Sequential(BaseModule, nn.Sequential): + """Sequential module in openmmlab. + + Args: + init_cfg (dict, optional): Initialization config dict. + """ + + def __init__(self, *args, init_cfg: Optional[dict] = None): + BaseModule.__init__(self, init_cfg) + nn.Sequential.__init__(self, *args) + + +def kaiming_init(module: nn.Module, + a: float = 0, + mode: str = 'fan_out', + nonlinearity: str = 'relu', + bias: float = 0, + distribution: str = 'normal') -> None: + assert distribution in ['uniform', 'normal'] + if hasattr(module, 'weight') and module.weight is not None: + if distribution == 'uniform': + nn.init.kaiming_uniform_( + module.weight, a=a, mode=mode, nonlinearity=nonlinearity) + else: + nn.init.kaiming_normal_( + module.weight, a=a, mode=mode, nonlinearity=nonlinearity) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +class GeneralizedAttention(nn.Module): + """GeneralizedAttention module. + + See 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks' + (https://arxiv.org/abs/1711.07971) for details. + + Args: + in_channels (int): Channels of the input feature map. + spatial_range (int): The spatial range. -1 indicates no spatial range + constraint. Default: -1. + num_heads (int): The head number of empirical_attention module. + Default: 9. + position_embedding_dim (int): The position embedding dimension. + Default: -1. + position_magnitude (int): A multiplier acting on coord difference. + Default: 1. + kv_stride (int): The feature stride acting on key/value feature map. + Default: 2. + q_stride (int): The feature stride acting on query feature map. + Default: 1. + attention_type (str): A binary indicator string for indicating which + items in generalized empirical_attention module are used. + Default: '1111'. + + - '1000' indicates 'query and key content' (appr - appr) item, + - '0100' indicates 'query content and relative position' + (appr - position) item, + - '0010' indicates 'key content only' (bias - appr) item, + - '0001' indicates 'relative position only' (bias - position) item. + """ + + _abbr_ = 'gen_attention_block' + + def __init__(self, + in_channels: int, + spatial_range: int = -1, + num_heads: int = 9, + position_embedding_dim: int = -1, + position_magnitude: int = 1, + kv_stride: int = 2, + q_stride: int = 1, + attention_type: str = '1111'): + + super().__init__() + + # hard range means local range for non-local operation + self.position_embedding_dim = ( + position_embedding_dim + if position_embedding_dim > 0 else in_channels) + + self.position_magnitude = position_magnitude + self.num_heads = num_heads + self.in_channels = in_channels + self.spatial_range = spatial_range + self.kv_stride = kv_stride + self.q_stride = q_stride + self.attention_type = [bool(int(_)) for _ in attention_type] + self.qk_embed_dim = in_channels // num_heads + out_c = self.qk_embed_dim * num_heads + + if self.attention_type[0] or self.attention_type[1]: + self.query_conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_c, + kernel_size=1, + bias=False) + self.query_conv.kaiming_init = True + + if self.attention_type[0] or self.attention_type[2]: + self.key_conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_c, + kernel_size=1, + bias=False) + self.key_conv.kaiming_init = True + + self.v_dim = in_channels // num_heads + self.value_conv = nn.Conv2d( + in_channels=in_channels, + out_channels=self.v_dim * num_heads, + kernel_size=1, + bias=False) + self.value_conv.kaiming_init = True + + if self.attention_type[1] or self.attention_type[3]: + self.appr_geom_fc_x = nn.Linear( + self.position_embedding_dim // 2, out_c, bias=False) + self.appr_geom_fc_x.kaiming_init = True + + self.appr_geom_fc_y = nn.Linear( + self.position_embedding_dim // 2, out_c, bias=False) + self.appr_geom_fc_y.kaiming_init = True + + if self.attention_type[2]: + stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) + appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv + self.appr_bias = nn.Parameter(appr_bias_value) + + if self.attention_type[3]: + stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) + geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv + self.geom_bias = nn.Parameter(geom_bias_value) + + self.proj_conv = nn.Conv2d( + in_channels=self.v_dim * num_heads, + out_channels=in_channels, + kernel_size=1, + bias=True) + self.proj_conv.kaiming_init = True + self.gamma = nn.Parameter(torch.zeros(1)) + + if self.spatial_range >= 0: + # only works when non local is after 3*3 conv + if in_channels == 256: + max_len = 84 + elif in_channels == 512: + max_len = 42 + + max_len_kv = int((max_len - 1.0) / self.kv_stride + 1) + local_constraint_map = np.ones( + (max_len, max_len, max_len_kv, max_len_kv), dtype=int) + for iy in range(max_len): + for ix in range(max_len): + local_constraint_map[ + iy, ix, + max((iy - self.spatial_range) // + self.kv_stride, 0):min((iy + self.spatial_range + + 1) // self.kv_stride + + 1, max_len), + max((ix - self.spatial_range) // + self.kv_stride, 0):min((ix + self.spatial_range + + 1) // self.kv_stride + + 1, max_len)] = 0 + + self.local_constraint_map = nn.Parameter( + torch.from_numpy(local_constraint_map).byte(), + requires_grad=False) + + if self.q_stride > 1: + self.q_downsample = nn.AvgPool2d( + kernel_size=1, stride=self.q_stride) + else: + self.q_downsample = None + + if self.kv_stride > 1: + self.kv_downsample = nn.AvgPool2d( + kernel_size=1, stride=self.kv_stride) + else: + self.kv_downsample = None + + self.init_weights() + + def get_position_embedding(self, + h, + w, + h_kv, + w_kv, + q_stride, + kv_stride, + device, + dtype, + feat_dim, + wave_length=1000): + # the default type of Tensor is float32, leading to type mismatch + # in fp16 mode. Cast it to support fp16 mode. + h_idxs = torch.linspace(0, h - 1, h).to(device=device, dtype=dtype) + h_idxs = h_idxs.view((h, 1)) * q_stride + + w_idxs = torch.linspace(0, w - 1, w).to(device=device, dtype=dtype) + w_idxs = w_idxs.view((w, 1)) * q_stride + + h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).to( + device=device, dtype=dtype) + h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride + + w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).to( + device=device, dtype=dtype) + w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride + + # (h, h_kv, 1) + h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0) + h_diff *= self.position_magnitude + + # (w, w_kv, 1) + w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0) + w_diff *= self.position_magnitude + + feat_range = torch.arange(0, feat_dim / 4).to( + device=device, dtype=dtype) + + dim_mat = torch.Tensor([wave_length]).to(device=device, dtype=dtype) + dim_mat = dim_mat**((4. / feat_dim) * feat_range) + dim_mat = dim_mat.view((1, 1, -1)) + + embedding_x = torch.cat( + ((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2) + + embedding_y = torch.cat( + ((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2) + + return embedding_x, embedding_y + + def forward(self, x_input: torch.Tensor) -> torch.Tensor: + num_heads = self.num_heads + + # use empirical_attention + if self.q_downsample is not None: + x_q = self.q_downsample(x_input) + else: + x_q = x_input + n, _, h, w = x_q.shape + + if self.kv_downsample is not None: + x_kv = self.kv_downsample(x_input) + else: + x_kv = x_input + _, _, h_kv, w_kv = x_kv.shape + + if self.attention_type[0] or self.attention_type[1]: + proj_query = self.query_conv(x_q).view( + (n, num_heads, self.qk_embed_dim, h * w)) + proj_query = proj_query.permute(0, 1, 3, 2) + + if self.attention_type[0] or self.attention_type[2]: + proj_key = self.key_conv(x_kv).view( + (n, num_heads, self.qk_embed_dim, h_kv * w_kv)) + + if self.attention_type[1] or self.attention_type[3]: + position_embed_x, position_embed_y = self.get_position_embedding( + h, w, h_kv, w_kv, self.q_stride, self.kv_stride, + x_input.device, x_input.dtype, self.position_embedding_dim) + # (n, num_heads, w, w_kv, dim) + position_feat_x = self.appr_geom_fc_x(position_embed_x).\ + view(1, w, w_kv, num_heads, self.qk_embed_dim).\ + permute(0, 3, 1, 2, 4).\ + repeat(n, 1, 1, 1, 1) + + # (n, num_heads, h, h_kv, dim) + position_feat_y = self.appr_geom_fc_y(position_embed_y).\ + view(1, h, h_kv, num_heads, self.qk_embed_dim).\ + permute(0, 3, 1, 2, 4).\ + repeat(n, 1, 1, 1, 1) + + position_feat_x /= math.sqrt(2) + position_feat_y /= math.sqrt(2) + + # accelerate for saliency only + if (np.sum(self.attention_type) == 1) and self.attention_type[2]: + appr_bias = self.appr_bias.\ + view(1, num_heads, 1, self.qk_embed_dim).\ + repeat(n, 1, 1, 1) + + energy = torch.matmul(appr_bias, proj_key).\ + view(n, num_heads, 1, h_kv * w_kv) + + h = 1 + w = 1 + else: + # (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for + if not self.attention_type[0]: + energy = torch.zeros( + n, + num_heads, + h, + w, + h_kv, + w_kv, + dtype=x_input.dtype, + device=x_input.device) + + # attention_type[0]: appr - appr + # attention_type[1]: appr - position + # attention_type[2]: bias - appr + # attention_type[3]: bias - position + if self.attention_type[0] or self.attention_type[2]: + if self.attention_type[0] and self.attention_type[2]: + appr_bias = self.appr_bias.\ + view(1, num_heads, 1, self.qk_embed_dim) + energy = torch.matmul(proj_query + appr_bias, proj_key).\ + view(n, num_heads, h, w, h_kv, w_kv) + + elif self.attention_type[0]: + energy = torch.matmul(proj_query, proj_key).\ + view(n, num_heads, h, w, h_kv, w_kv) + + elif self.attention_type[2]: + appr_bias = self.appr_bias.\ + view(1, num_heads, 1, self.qk_embed_dim).\ + repeat(n, 1, 1, 1) + + energy += torch.matmul(appr_bias, proj_key).\ + view(n, num_heads, 1, 1, h_kv, w_kv) + + if self.attention_type[1] or self.attention_type[3]: + if self.attention_type[1] and self.attention_type[3]: + geom_bias = self.geom_bias.\ + view(1, num_heads, 1, self.qk_embed_dim) + + proj_query_reshape = (proj_query + geom_bias).\ + view(n, num_heads, h, w, self.qk_embed_dim) + + energy_x = torch.matmul( + proj_query_reshape.permute(0, 1, 3, 2, 4), + position_feat_x.permute(0, 1, 2, 4, 3)) + energy_x = energy_x.\ + permute(0, 1, 3, 2, 4).unsqueeze(4) + + energy_y = torch.matmul( + proj_query_reshape, + position_feat_y.permute(0, 1, 2, 4, 3)) + energy_y = energy_y.unsqueeze(5) + + energy += energy_x + energy_y + + elif self.attention_type[1]: + proj_query_reshape = proj_query.\ + view(n, num_heads, h, w, self.qk_embed_dim) + proj_query_reshape = proj_query_reshape.\ + permute(0, 1, 3, 2, 4) + position_feat_x_reshape = position_feat_x.\ + permute(0, 1, 2, 4, 3) + position_feat_y_reshape = position_feat_y.\ + permute(0, 1, 2, 4, 3) + + energy_x = torch.matmul(proj_query_reshape, + position_feat_x_reshape) + energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4) + + energy_y = torch.matmul(proj_query_reshape, + position_feat_y_reshape) + energy_y = energy_y.unsqueeze(5) + + energy += energy_x + energy_y + + elif self.attention_type[3]: + geom_bias = self.geom_bias.\ + view(1, num_heads, self.qk_embed_dim, 1).\ + repeat(n, 1, 1, 1) + + position_feat_x_reshape = position_feat_x.\ + view(n, num_heads, w * w_kv, self.qk_embed_dim) + + position_feat_y_reshape = position_feat_y.\ + view(n, num_heads, h * h_kv, self.qk_embed_dim) + + energy_x = torch.matmul(position_feat_x_reshape, geom_bias) + energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv) + + energy_y = torch.matmul(position_feat_y_reshape, geom_bias) + energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1) + + energy += energy_x + energy_y + + energy = energy.view(n, num_heads, h * w, h_kv * w_kv) + + if self.spatial_range >= 0: + cur_local_constraint_map = \ + self.local_constraint_map[:h, :w, :h_kv, :w_kv].\ + contiguous().\ + view(1, 1, h*w, h_kv*w_kv) + + energy = energy.masked_fill_(cur_local_constraint_map, + float('-inf')) + + attention = F.softmax(energy, 3) + + proj_value = self.value_conv(x_kv) + proj_value_reshape = proj_value.\ + view((n, num_heads, self.v_dim, h_kv * w_kv)).\ + permute(0, 1, 3, 2) + + out = torch.matmul(attention, proj_value_reshape).\ + permute(0, 1, 3, 2).\ + contiguous().\ + view(n, self.v_dim * self.num_heads, h, w) + + out = self.proj_conv(out) + + # output is downsampled, upsample back to input size + if self.q_downsample is not None: + out = F.interpolate( + out, + size=x_input.shape[2:], + mode='bilinear', + align_corners=False) + + out = self.gamma * out + x_input + return out + + def init_weights(self): + for m in self.modules(): + if hasattr(m, 'kaiming_init') and m.kaiming_init: + kaiming_init( + m, + mode='fan_in', + nonlinearity='leaky_relu', + bias=0, + distribution='uniform', + a=1) + + +PLUGIN_LAYERS['GeneralizedAttention'] = GeneralizedAttention + +class ResLayer(Sequential): + """ResLayer to build ResNet style backbone. + + Args: + block (nn.Module): block used to build ResLayer. + inplanes (int): inplanes of block. + planes (int): planes of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + downsample_first (bool): Downsample at the first block or last block. + False for Hourglass, True for ResNet. Default: True + """ + + def __init__(self, + block, + inplanes, + planes, + num_blocks, + stride=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + downsample_first=True, + **kwargs): + self.block = block + + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = [] + conv_stride = stride + if avg_down: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + if downsample_first: + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + inplanes = planes * block.expansion + for _ in range(1, num_blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + + else: # downsample_first=False is for HourglassModule + for _ in range(num_blocks - 1): + layers.append( + block( + inplanes=inplanes, + planes=inplanes, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + super(ResLayer, self).__init__(*layers) + +class BasicBlock(BaseModule): + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + init_cfg=None): + super(BasicBlock, self).__init__(init_cfg) + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, planes, planes, 3, padding=1, bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.with_cp = with_cp + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Bottleneck(BaseModule): + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + init_cfg=None): + """Bottleneck block for ResNet. + + If style is "pytorch", the stride-two layer is the 3x3 conv layer, if + it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__(init_cfg) + assert style in ['pytorch', 'caffe'] + assert dcn is None or isinstance(dcn, dict) + assert plugins is None or isinstance(plugins, list) + if plugins is not None: + allowed_position = ['after_conv1', 'after_conv2', 'after_conv3'] + assert all(p['position'] in allowed_position for p in plugins) + + self.inplanes = inplanes + self.planes = planes + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.dcn = dcn + self.with_dcn = dcn is not None + self.plugins = plugins + self.with_plugins = plugins is not None + + if self.with_plugins: + # collect plugins for conv1/conv2/conv3 + self.after_conv1_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv1' + ] + self.after_conv2_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv2' + ] + self.after_conv3_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv3' + ] + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + if self.with_dcn: + fallback_on_stride = dcn.pop('fallback_on_stride', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + conv_cfg, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + self.conv2 = build_conv_layer( + dcn, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + planes, + planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + if self.with_plugins: + self.after_conv1_plugin_names = self.make_block_plugins( + planes, self.after_conv1_plugins) + self.after_conv2_plugin_names = self.make_block_plugins( + planes, self.after_conv2_plugins) + self.after_conv3_plugin_names = self.make_block_plugins( + planes * self.expansion, self.after_conv3_plugins) + + + self.seq1 = script_with_log(nn.Sequential(self.conv1, self.norm1, self.relu)) + self.seq2 = script_with_log(nn.Sequential(self.conv2, self.norm2, self.relu)) + self.seq3 = script_with_log(nn.Sequential(self.conv3, self.norm3)) + + def make_block_plugins(self, in_channels, plugins): + """make plugins for block. + + Args: + in_channels (int): Input channels of plugin. + plugins (list[dict]): List of plugins cfg to build. + + Returns: + list[str]: List of the names of plugin. + """ + assert isinstance(plugins, list) + plugin_names = [] + for plugin in plugins: + plugin = plugin.copy() + name, layer = build_plugin_layer( + plugin, + in_channels=in_channels, + postfix=plugin.pop('postfix', '')) + assert not hasattr(self, name), f'duplicate plugin {name}' + self.add_module(name, layer) + plugin_names.append(name) + return plugin_names + + def forward_plugin(self, x, plugin_names): + out = x + for name in plugin_names: + out = getattr(self, name)(out) + return out + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) + + @property + def norm3(self): + """nn.Module: normalization layer after the third convolution layer""" + return getattr(self, self.norm3_name) + + def _inner_forward(self, x): + identity = x + # out = self.conv1(x) + # out = self.norm1(out) + # out = self.relu(out) + out = self.seq1(x) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv1_plugin_names) + + # out = self.conv2(out) + # out = self.norm2(out) + # out = self.relu(out) + out = self.seq2(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv2_plugin_names) + + # out = self.conv3(out) + # out = self.norm3(out) + out = self.seq3(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv3_plugin_names) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + + def forward(self, x): + """Forward function.""" + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self._inner_forward, x) + else: + out = self._inner_forward(x) + + out = self.relu(out) + + return out + + +class ResNet(BaseModule): + """ResNet backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + stem_channels (int | None): Number of stem channels. If not specified, + it will be the same as `base_channels`. Default: None. + base_channels (int): Number of base channels of res layer. Default: 64. + in_channels (int): Number of input image channels. Default: 3. + num_stages (int): Resnet stages. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + norm_cfg (dict): Dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + plugins (list[dict]): List of plugins for stages, each dict contains: + + - cfg (dict, required): Cfg dict to build plugin. + - position (str, required): Position inside block to insert + plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Example: + >>> from mmdet.models import ResNet + >>> import torch + >>> self = ResNet(depth=18) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 8, 8) + (1, 128, 4, 4) + (1, 256, 2, 2) + (1, 512, 1, 1) + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + in_channels=3, + stem_channels=None, + base_channels=64, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + dcn=None, + stage_with_dcn=(False, False, False, False), + plugins=None, + with_cp=False, + zero_init_residual=True, + pretrained=None, + init_cfg=None): + super(ResNet, self).__init__(init_cfg) + self.zero_init_residual = zero_init_residual + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + + block_init_cfg = None + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be specified at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + block = self.arch_settings[depth][0] + if self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', + val=0, + override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', + val=0, + override=dict(name='norm3')) + else: + raise TypeError('pretrained must be a str or None') + + self.depth = depth + if stem_channels is None: + stem_channels = base_channels + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.dcn = dcn + self.stage_with_dcn = stage_with_dcn + if dcn is not None: + assert len(stage_with_dcn) == num_stages + self.plugins = plugins + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.inplanes = stem_channels + + self._make_stem_layer(in_channels, stem_channels) + + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + if plugins is not None: + stage_plugins = self.make_stage_plugins(plugins, i) + else: + stage_plugins = None + planes = base_channels * 2**i + res_layer = self.make_res_layer( + block=self.block, + inplanes=self.inplanes, + planes=planes, + num_blocks=num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + plugins=stage_plugins, + init_cfg=block_init_cfg) + self.inplanes = planes * self.block.expansion + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = self.block.expansion * base_channels * 2**( + len(self.stage_blocks) - 1) + + def make_stage_plugins(self, plugins, stage_idx): + """Make plugins for ResNet ``stage_idx`` th stage. + + Currently we support to insert ``context_block``, + ``empirical_attention_block``, ``nonlocal_block`` into the backbone + like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of + Bottleneck. + + An example of plugins format could be: + + Examples: + >>> plugins=[ + ... dict(cfg=dict(type='xxx', arg1='xxx'), + ... stages=(False, True, True, True), + ... position='after_conv2'), + ... dict(cfg=dict(type='yyy'), + ... stages=(True, True, True, True), + ... position='after_conv3'), + ... dict(cfg=dict(type='zzz', postfix='1'), + ... stages=(True, True, True, True), + ... position='after_conv3'), + ... dict(cfg=dict(type='zzz', postfix='2'), + ... stages=(True, True, True, True), + ... position='after_conv3') + ... ] + >>> self = ResNet(depth=18) + >>> stage_plugins = self.make_stage_plugins(plugins, 0) + >>> assert len(stage_plugins) == 3 + + Suppose ``stage_idx=0``, the structure of blocks in the stage would be: + + .. code-block:: none + + conv1-> conv2->conv3->yyy->zzz1->zzz2 + + Suppose 'stage_idx=1', the structure of blocks in the stage would be: + + .. code-block:: none + + conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2 + + If stages is missing, the plugin would be applied to all stages. + + Args: + plugins (list[dict]): List of plugins cfg to build. The postfix is + required if multiple same type plugins are inserted. + stage_idx (int): Index of stage to build + + Returns: + list[dict]: Plugins for current stage + """ + stage_plugins = [] + for plugin in plugins: + plugin = plugin.copy() + stages = plugin.pop('stages', None) + assert stages is None or len(stages) == self.num_stages + # whether to insert plugin into current stage + if stages is None or stages[stage_idx]: + stage_plugins.append(plugin) + + return stage_plugins + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer``.""" + return ResLayer(**kwargs) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels): + if self.deep_stem: + self.stem = nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer( + self.conv_cfg, + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer( + self.conv_cfg, + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels)[1], + nn.ReLU(inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + if self.deep_stem: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + """Forward function.""" + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super(ResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +class ResNetV1d(ResNet): + r"""ResNetV1d variant described in `Bag of Tricks + `_. + + Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in + the input stem with three 3x3 convs. And in the downsampling block, a 2x2 + avg_pool with stride 2 is added before conv, whose stride is changed to 1. + """ + + def __init__(self, **kwargs): + super(ResNetV1d, self).__init__( + deep_stem=True, avg_down=True, **kwargs) + +class TridentConv(BaseModule): + """Trident Convolution Module. + + Args: + in_channels (int): Number of channels in input. + out_channels (int): Number of channels in output. + kernel_size (int): Size of convolution kernel. + stride (int, optional): Convolution stride. Default: 1. + trident_dilations (tuple[int, int, int], optional): Dilations of + different trident branch. Default: (1, 2, 3). + test_branch_idx (int, optional): In inference, all 3 branches will + be used if `test_branch_idx==-1`, otherwise only branch with + index `test_branch_idx` will be used. Default: 1. + bias (bool, optional): Whether to use bias in convolution or not. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + trident_dilations=(1, 2, 3), + test_branch_idx=1, + bias=False, + init_cfg=None): + super(TridentConv, self).__init__(init_cfg) + self.num_branch = len(trident_dilations) + self.with_bias = bias + self.test_branch_idx = test_branch_idx + self.stride = _pair(stride) + self.kernel_size = _pair(kernel_size) + self.paddings = _pair(trident_dilations) + self.dilations = trident_dilations + self.in_channels = in_channels + self.out_channels = out_channels + self.bias = bias + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels, *self.kernel_size)) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.bias = None + + def extra_repr(self): + tmpstr = f'in_channels={self.in_channels}' + tmpstr += f', out_channels={self.out_channels}' + tmpstr += f', kernel_size={self.kernel_size}' + tmpstr += f', num_branch={self.num_branch}' + tmpstr += f', test_branch_idx={self.test_branch_idx}' + tmpstr += f', stride={self.stride}' + tmpstr += f', paddings={self.paddings}' + tmpstr += f', dilations={self.dilations}' + tmpstr += f', bias={self.bias}' + return tmpstr + + def forward(self, inputs): + if self.training or self.test_branch_idx == -1: + outputs = [ + F.conv2d(input, self.weight, self.bias, self.stride, padding, + dilation) for input, dilation, padding in zip( + inputs, self.dilations, self.paddings) + ] + else: + assert len(inputs) == 1 + outputs = [ + F.conv2d(inputs[0], self.weight, self.bias, self.stride, + self.paddings[self.test_branch_idx], + self.dilations[self.test_branch_idx]) + ] + + return outputs + + + +# Since TridentNet is defined over ResNet50 and ResNet101, here we +# only support TridentBottleneckBlock. +class TridentBottleneck(Bottleneck): + """BottleBlock for TridentResNet. + + Args: + trident_dilations (tuple[int, int, int]): Dilations of different + trident branch. + test_branch_idx (int): In inference, all 3 branches will be used + if `test_branch_idx==-1`, otherwise only branch with index + `test_branch_idx` will be used. + concat_output (bool): Whether to concat the output list to a Tensor. + `True` only in the last Block. + """ + + def __init__(self, trident_dilations, test_branch_idx, concat_output, + **kwargs): + + super(TridentBottleneck, self).__init__(**kwargs) + self.trident_dilations = trident_dilations + self.num_branch = len(trident_dilations) + self.concat_output = concat_output + self.test_branch_idx = test_branch_idx + self.conv2 = TridentConv( + self.planes, + self.planes, + kernel_size=3, + stride=self.conv2_stride, + bias=False, + trident_dilations=self.trident_dilations, + test_branch_idx=test_branch_idx, + init_cfg=dict( + type='Kaiming', + distribution='uniform', + mode='fan_in', + override=dict(name='conv2'))) + + def _inner_forward(self, x): + num_branch = ( + self.num_branch + if self.training or self.test_branch_idx == -1 else 1) + identity = x + if not isinstance(x, list): + x = (x, ) * num_branch + identity = x + if self.downsample is not None: + identity = [self.downsample(b) for b in x] + + out = [self.conv1(b) for b in x] + out = [self.norm1(b) for b in out] + out = [self.relu(b) for b in out] + + if self.with_plugins: + for k in range(len(out)): + out[k] = self.forward_plugin(out[k], + self.after_conv1_plugin_names) + + out = self.conv2(out) + out = [self.norm2(b) for b in out] + out = [self.relu(b) for b in out] + if self.with_plugins: + for k in range(len(out)): + out[k] = self.forward_plugin(out[k], + self.after_conv2_plugin_names) + + out = [self.conv3(b) for b in out] + out = [self.norm3(b) for b in out] + + if self.with_plugins: + for k in range(len(out)): + out[k] = self.forward_plugin(out[k], + self.after_conv3_plugin_names) + + out = [ + out_b + identity_b for out_b, identity_b in zip(out, identity) + ] + return out + + def forward(self, x): + if self.with_cp and x.requires_grad: + out = cp.checkpoint(self._inner_forward, x) + else: + out = self._inner_forward(x) + + out = [self.relu(b) for b in out] + if self.concat_output: + out = torch.cat(out, dim=0) + return out + +def make_trident_res_layer(block, + inplanes, + planes, + num_blocks, + stride=1, + trident_dilations=(1, 2, 3), + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + test_branch_idx=-1): + """Build Trident Res Layers.""" + + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = [] + conv_stride = stride + downsample.extend([ + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + for i in range(num_blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride if i == 0 else 1, + trident_dilations=trident_dilations, + downsample=downsample if i == 0 else None, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + plugins=plugins, + test_branch_idx=test_branch_idx, + concat_output=True if i == num_blocks - 1 else False)) + inplanes = planes * block.expansion + return nn.Sequential(*layers) + + +class TridentResNet(ResNet): + """The stem layer, stage 1 and stage 2 in Trident ResNet are identical to + ResNet, while in stage 3, Trident BottleBlock is utilized to replace the + normal BottleBlock to yield trident output. Different branch shares the + convolution weight but uses different dilations to achieve multi-scale + output. + + / stage3(b0) \ + x - stem - stage1 - stage2 - stage3(b1) - output + \ stage3(b2) / + + Args: + depth (int): Depth of resnet, from {50, 101, 152}. + num_branch (int): Number of branches in TridentNet. + test_branch_idx (int): In inference, all 3 branches will be used + if `test_branch_idx==-1`, otherwise only branch with index + `test_branch_idx` will be used. + trident_dilations (tuple[int]): Dilations of different trident branch. + len(trident_dilations) should be equal to num_branch. + """ # noqa + + def __init__(self, depth, num_branch, test_branch_idx, trident_dilations, + **kwargs): + + assert num_branch == len(trident_dilations) + assert depth in (50, 101, 152) + super(TridentResNet, self).__init__(depth, **kwargs) + assert self.num_stages == 3 + self.test_branch_idx = test_branch_idx + self.num_branch = num_branch + + last_stage_idx = self.num_stages - 1 + stride = self.strides[last_stage_idx] + dilation = trident_dilations + dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None + if self.plugins is not None: + stage_plugins = self.make_stage_plugins(self.plugins, + last_stage_idx) + else: + stage_plugins = None + planes = self.base_channels * 2**last_stage_idx + res_layer = make_trident_res_layer( + TridentBottleneck, + inplanes=(self.block.expansion * self.base_channels * + 2**(last_stage_idx - 1)), + planes=planes, + num_blocks=self.stage_blocks[last_stage_idx], + stride=stride, + trident_dilations=dilation, + style=self.style, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dcn=dcn, + plugins=stage_plugins, + test_branch_idx=self.test_branch_idx) + + layer_name = f'layer{last_stage_idx + 1}' + + self.__setattr__(layer_name, res_layer) + self.res_layers.pop(last_stage_idx) + self.res_layers.insert(last_stage_idx, layer_name) + + self._freeze_stages() +# ''' + + +# from mmdet.models.backbones import TridentResNet +import torch + +def _get_scripted_model(): + plugins = [ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='0010', + kv_stride=2), + position='after_conv2') + ] + + tridentresnet_config = dict( + num_branch=3, + test_branch_idx=1, + strides=(1, 2, 2), + dilations=(1, 1, 1), + trident_dilations=(1, 2, 3), + out_indices=(2, ), + plugins=plugins, + ) + + model = TridentResNet(50, num_stages=3, **tridentresnet_config).cuda() + return model + + +def get_input(batch_size): + return (torch.randn((batch_size, 3, 224, 224)).cuda(),), {} \ No newline at end of file diff --git a/models/align.py b/models/align.py new file mode 100644 index 000000000000..eaa835650514 --- /dev/null +++ b/models/align.py @@ -0,0 +1,1692 @@ +# coding=utf-8 +# Copyright 2023 The Google Research Team Authors and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch ALIGN model.""" + +import math +from dataclasses import dataclass +from typing import Any, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutputWithNoAttention, + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + BaseModelOutputWithPoolingAndNoAttention, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from transformers.utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from transformers.models.align.configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "kakaobrain/align-base" +_CONFIG_FOR_DOC = "AlignConfig" + + +ALIGN_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "kakaobrain/align-base", + # See all ALIGN models at https://huggingface.co/models?filter=align +] + + +ALIGN_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`AlignConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +ALIGN_TEXT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +ALIGN_VISION_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +ALIGN_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using + [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details. + return_loss (`bool`, *optional*): + Whether or not to return the contrastive loss. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@dataclass +class AlignVisionModelOutput(ModelOutput): + """ + Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states. + + Args: + image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): + The image embeddings obtained by applying the projection layer to the pooler_output. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + """ + + image_embeds: Optional[torch.FloatTensor] = None + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class AlignTextModelOutput(ModelOutput): + """ + Base class for text model's outputs that also contains a pooling of the last hidden states. + + Args: + text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): + The text embeddings obtained by applying the projection layer to the pooler_output. + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + + Attentions weights after the attention softmax, used to compute the weighted average in the self-attention + heads. + """ + + text_embeds: Optional[torch.FloatTensor] = None + last_hidden_state: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class AlignOutput(ModelOutput): + """ + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): + Contrastive loss for image-text similarity. + logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): + The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text + similarity scores. + logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): + The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image + similarity scores. + text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): + The text embeddings obtained by applying the projection layer to the pooled output of [`AlignTextModel`]. + image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): + The output of [`AlignVisionModel`]. + text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`): + The output of the [`AlignTextModel`]. + vision_model_output(`BaseModelOutputWithPoolingAndNoAttention`): + The output of the [`AlignVisionModel`]. + """ + + loss: Optional[torch.FloatTensor] = None + logits_per_image: torch.FloatTensor = None + logits_per_text: torch.FloatTensor = None + text_embeds: torch.FloatTensor = None + image_embeds: torch.FloatTensor = None + text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None + vision_model_output: BaseModelOutputWithPoolingAndNoAttention = None + + def to_tuple(self) -> Tuple[Any]: + return tuple( + self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() + for k in self.keys() + ) + + +# contrastive loss function, adapted from +# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html +def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: + return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device), label_smoothing=0.1) + + +def align_loss(similarity: torch.Tensor) -> torch.Tensor: + caption_loss = contrastive_loss(similarity) + image_loss = contrastive_loss(similarity.t()) + return (caption_loss + image_loss) / 2.0 + + +# Copied from transformers.models.efficientnet.modeling_efficientnet.round_filters with EfficientNet -> AlignVision +def round_filters(config: AlignVisionConfig, num_channels: int): + r""" + Round number of filters based on depth multiplier. + """ + divisor = config.depth_divisor + num_channels *= config.width_coefficient + new_dim = max(divisor, int(num_channels + divisor / 2) // divisor * divisor) + + # Make sure that round down does not go down by more than 10%. + if new_dim < 0.9 * num_channels: + new_dim += divisor + + return int(new_dim) + + +# Copied from transformers.models.efficientnet.modeling_efficientnet.correct_pad +def correct_pad(kernel_size: Union[int, Tuple], adjust: bool = True): + r""" + Utility function to get the tuple padding value for the depthwise convolution. + + Args: + kernel_size (`int` or `tuple`): + Kernel size of the convolution layers. + adjust (`bool`, *optional*, defaults to `True`): + Adjusts padding value to apply to right and bottom sides of the input. + """ + if isinstance(kernel_size, int): + kernel_size = (kernel_size, kernel_size) + + correct = (kernel_size[0] // 2, kernel_size[1] // 2) + if adjust: + return (correct[1] - 1, correct[1], correct[0] - 1, correct[0]) + else: + return (correct[1], correct[1], correct[0], correct[0]) + + +# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetEmbeddings with EfficientNet->AlignVision +class AlignVisionEmbeddings(nn.Module): + r""" + A module that corresponds to the stem module of the original work. + """ + + def __init__(self, config: AlignVisionConfig): + super().__init__() + + self.out_dim = round_filters(config, 32) + self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1)) + self.convolution = nn.Conv2d( + config.num_channels, self.out_dim, kernel_size=3, stride=2, padding="valid", bias=False + ) + self.batchnorm = nn.BatchNorm2d(self.out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum) + self.activation = ACT2FN[config.hidden_act] + + def forward(self, pixel_values: torch.Tensor) -> torch.Tensor: + features = self.padding(pixel_values) + features = self.convolution(features) + features = self.batchnorm(features) + features = self.activation(features) + + return features + + +# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseConv2d with EfficientNet->AlignVision +class AlignVisionDepthwiseConv2d(nn.Conv2d): + def __init__( + self, + in_channels, + depth_multiplier=1, + kernel_size=3, + stride=1, + padding=0, + dilation=1, + bias=True, + padding_mode="zeros", + ): + out_channels = in_channels * depth_multiplier + super().__init__( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=in_channels, + bias=bias, + padding_mode=padding_mode, + ) + + +# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetExpansionLayer with EfficientNet->AlignVision +class AlignVisionExpansionLayer(nn.Module): + r""" + This corresponds to the expansion phase of each block in the original implementation. + """ + + def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int): + super().__init__() + self.expand_conv = nn.Conv2d( + in_channels=in_dim, + out_channels=out_dim, + kernel_size=1, + padding="same", + bias=False, + ) + self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps) + self.expand_act = ACT2FN[config.hidden_act] + + def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: + # Expand phase + hidden_states = self.expand_conv(hidden_states) + hidden_states = self.expand_bn(hidden_states) + hidden_states = self.expand_act(hidden_states) + + return hidden_states + + +# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseLayer with with EfficientNet->AlignVision +class AlignVisionDepthwiseLayer(nn.Module): + r""" + This corresponds to the depthwise convolution phase of each block in the original implementation. + """ + + def __init__( + self, + config: AlignVisionConfig, + in_dim: int, + stride: int, + kernel_size: int, + adjust_padding: bool, + ): + super().__init__() + self.stride = stride + conv_pad = "valid" if self.stride == 2 else "same" + padding = correct_pad(kernel_size, adjust=adjust_padding) + + self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding) + self.depthwise_conv = AlignVisionDepthwiseConv2d( + in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False + ) + self.depthwise_norm = nn.BatchNorm2d( + num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum + ) + self.depthwise_act = ACT2FN[config.hidden_act] + + def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: + # Depthwise convolution + if self.stride == 2: + hidden_states = self.depthwise_conv_pad(hidden_states) + + hidden_states = self.depthwise_conv(hidden_states) + hidden_states = self.depthwise_norm(hidden_states) + hidden_states = self.depthwise_act(hidden_states) + + return hidden_states + + +# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetSqueezeExciteLayer with with EfficientNet->AlignVision +class AlignVisionSqueezeExciteLayer(nn.Module): + r""" + This corresponds to the Squeeze and Excitement phase of each block in the original implementation. + """ + + def __init__(self, config: AlignVisionConfig, in_dim: int, expand_dim: int, expand: bool = False): + super().__init__() + self.dim = expand_dim if expand else in_dim + self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio)) + + self.squeeze = nn.AdaptiveAvgPool2d(output_size=1) + self.reduce = nn.Conv2d( + in_channels=self.dim, + out_channels=self.dim_se, + kernel_size=1, + padding="same", + ) + self.expand = nn.Conv2d( + in_channels=self.dim_se, + out_channels=self.dim, + kernel_size=1, + padding="same", + ) + self.act_reduce = ACT2FN[config.hidden_act] + self.act_expand = nn.Sigmoid() + + def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: + inputs = hidden_states + hidden_states = self.squeeze(hidden_states) + hidden_states = self.reduce(hidden_states) + hidden_states = self.act_reduce(hidden_states) + + hidden_states = self.expand(hidden_states) + hidden_states = self.act_expand(hidden_states) + hidden_states = torch.mul(inputs, hidden_states) + + return hidden_states + + +class AlignVisionFinalBlockLayer(nn.Module): + r""" + This corresponds to the final phase of each block in the original implementation. + """ + + def __init__( + self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool + ): + super().__init__() + self.apply_dropout = stride == 1 and not id_skip + self.project_conv = nn.Conv2d( + in_channels=in_dim, + out_channels=out_dim, + kernel_size=1, + padding="same", + bias=False, + ) + self.project_bn = nn.BatchNorm2d( + num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum + ) + self.dropout = nn.Dropout(p=drop_rate) + + def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor: + hidden_states = self.project_conv(hidden_states) + hidden_states = self.project_bn(hidden_states) + + if self.apply_dropout: + hidden_states = self.dropout(hidden_states) + hidden_states = hidden_states + embeddings + + return hidden_states + + +class AlignVisionBlock(nn.Module): + r""" + This corresponds to the block module of original the EfficientNet vision encoder implementation. + + Args: + config ([`AlignVisionConfig`]): + Model configuration class. + in_dim (`int`): + Number of input channels. + out_dim (`int`): + Number of output channels. + stride (`int`): + Stride size to be used in convolution layers. + expand_ratio (`int`): + Expand ratio to set the output dimensions for the expansion and squeeze-excite layers. + kernel_size (`int`): + Kernel size for the depthwise convolution layer. + drop_rate (`float`): + Dropout rate to be used in the final phase of each block. + id_skip (`bool`): + Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase + of each block. Set to `True` for the first block of each stage. + adjust_padding (`bool`): + Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution + operation, set to `True` for inputs with odd input sizes. + """ + + def __init__( + self, + config: AlignVisionConfig, + in_dim: int, + out_dim: int, + stride: int, + expand_ratio: int, + kernel_size: int, + drop_rate: float, + id_skip: bool, + adjust_padding: bool, + ): + super().__init__() + self.expand_ratio = expand_ratio + self.expand = True if self.expand_ratio != 1 else False + expand_in_dim = in_dim * expand_ratio + + if self.expand: + self.expansion = AlignVisionExpansionLayer( + config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride + ) + + self.depthwise_conv = AlignVisionDepthwiseLayer( + config=config, + in_dim=expand_in_dim if self.expand else in_dim, + stride=stride, + kernel_size=kernel_size, + adjust_padding=adjust_padding, + ) + self.squeeze_excite = AlignVisionSqueezeExciteLayer( + config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand + ) + self.projection = AlignVisionFinalBlockLayer( + config=config, + in_dim=expand_in_dim if self.expand else in_dim, + out_dim=out_dim, + stride=stride, + drop_rate=drop_rate, + id_skip=id_skip, + ) + + def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor: + embeddings = hidden_states + # Expansion and depthwise convolution phase + if self.expand_ratio != 1: + hidden_states = self.expansion(hidden_states) + hidden_states = self.depthwise_conv(hidden_states) + + # Squeeze and excite phase + hidden_states = self.squeeze_excite(hidden_states) + hidden_states = self.projection(embeddings, hidden_states) + return hidden_states + + +class AlignVisionEncoder(nn.Module): + r""" + Forward propogates the embeddings through each vision encoder (EfficientNet) block. + + Args: + config ([`AlignVisionConfig`]): + Model configuration class. + """ + + def __init__(self, config: AlignVisionConfig): + super().__init__() + self.depth_coefficient = config.depth_coefficient + + def round_repeats(repeats): + # Round number of block repeats based on depth multiplier. + return int(math.ceil(self.depth_coefficient * repeats)) + + num_base_blocks = len(config.in_channels) + num_blocks = sum(round_repeats(n) for n in config.num_block_repeats) + + curr_block_num = 0 + blocks = [] + for i in range(num_base_blocks): + in_dim = round_filters(config, config.in_channels[i]) + out_dim = round_filters(config, config.out_channels[i]) + stride = config.strides[i] + kernel_size = config.kernel_sizes[i] + expand_ratio = config.expand_ratios[i] + + for j in range(round_repeats(config.num_block_repeats[i])): + id_skip = True if j == 0 else False + stride = 1 if j > 0 else stride + in_dim = out_dim if j > 0 else in_dim + adjust_padding = False if curr_block_num in config.depthwise_padding else True + drop_rate = config.drop_connect_rate * curr_block_num / num_blocks + + block = AlignVisionBlock( + config=config, + in_dim=in_dim, + out_dim=out_dim, + stride=stride, + kernel_size=kernel_size, + expand_ratio=expand_ratio, + drop_rate=drop_rate, + id_skip=id_skip, + adjust_padding=adjust_padding, + ) + blocks.append(block) + curr_block_num += 1 + + self.blocks = nn.ModuleList(blocks) + + def forward( + self, + hidden_states: torch.FloatTensor, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> BaseModelOutputWithPoolingAndNoAttention: + all_hidden_states = (hidden_states,) if output_hidden_states else None + + for block in self.blocks: + hidden_states = block(hidden_states) + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) + + return BaseModelOutputWithNoAttention( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->AlignText +class AlignTextEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + past_key_values_length: int = 0, + ) -> torch.Tensor: + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + token_type_embeddings + if self.position_embedding_type == "absolute": + position_embeddings = self.position_embeddings(position_ids) + embeddings += position_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +# Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->AlignText +class AlignTextSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + use_cache = past_key_value is not None + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + query_length, key_length = query_layer.shape[2], key_layer.shape[2] + if use_cache: + position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( + -1, 1 + ) + else: + position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in AlignTextModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->AlignText +class AlignTextSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->AlignText +class AlignTextAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = AlignTextSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = AlignTextSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->AlignText +class AlignTextIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->AlignText +class AlignTextOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->AlignText +class AlignTextLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = AlignTextAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = AlignTextAttention(config, position_embedding_type="absolute") + self.intermediate = AlignTextIntermediate(config) + self.output = AlignTextOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +# Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->AlignText +class AlignTextEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([AlignTextLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + next_decoder_cache = () if use_cache else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + past_key_value = past_key_values[i] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, past_key_value, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + + hidden_states = layer_outputs[0] + if use_cache: + next_decoder_cache += (layer_outputs[-1],) + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [ + hidden_states, + next_decoder_cache, + all_hidden_states, + all_self_attentions, + all_cross_attentions, + ] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_decoder_cache, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert -> AlignText +class AlignTextPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +class AlignPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = AlignConfig + base_model_prefix = "align" + supports_gradient_checkpointing = True + _keys_to_ignore_on_load_missing = [r"position_ids"] + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Conv2d)): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, AlignModel): + nn.init.xavier_uniform_(module.text_projection.weight) + module.text_projection.bias.data.zero_() + module.text_projection._is_hf_initialized = True + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + if isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (AlignTextModel, AlignVisionModel)): + module.gradient_checkpointing = value + + +@add_start_docstrings( + """The text model from ALIGN without any head or projection on top.""", + ALIGN_START_DOCSTRING, +) +class AlignTextModel(AlignPreTrainedModel): + config_class = AlignTextConfig + + def __init__(self, config: AlignTextConfig, add_pooling_layer: bool = True): + super().__init__(config) + self.config = config + + self.embeddings = AlignTextEmbeddings(config) + self.encoder = AlignTextEncoder(config) + + self.pooler = AlignTextPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + @add_start_docstrings_to_model_forward(ALIGN_TEXT_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=AlignTextConfig) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPoolingAndCrossAttentions]: + r""" + Returns: + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AlignTextModel + + >>> model = AlignTextModel.from_pretrained("kakaobrain/align-base") + >>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled (EOS token) states + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if attention_mask is None: + attention_mask = torch.ones(((batch_size, seq_length)), device=device) + + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] + # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + embedding_output = self.embeddings( + input_ids=input_ids, + position_ids=position_ids, + token_type_ids=token_type_ids, + inputs_embeds=inputs_embeds, + ) + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndCrossAttentions( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + cross_attentions=encoder_outputs.cross_attentions, + ) + + +@add_start_docstrings( + """The vision model from ALIGN without any head or projection on top.""", + ALIGN_START_DOCSTRING, +) +class AlignVisionModel(AlignPreTrainedModel): + config_class = AlignVisionConfig + main_input_name = "pixel_values" + + def __init__(self, config: AlignVisionConfig): + super().__init__(config) + self.config = config + self.embeddings = AlignVisionEmbeddings(config) + self.encoder = AlignVisionEncoder(config) + + # Final pooling layer + if config.pooling_type == "mean": + self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True) + elif config.pooling_type == "max": + self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True) + else: + raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}") + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self) -> nn.Module: + return self.vision_model.embeddings.convolution + + @add_start_docstrings_to_model_forward(ALIGN_VISION_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndNoAttention, config_class=AlignVisionConfig) + def forward( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPoolingAndNoAttention]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, AlignVisionModel + + >>> model = AlignVisionModel.from_pretrained("kakaobrain/align-base") + >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> outputs = model(**inputs) + >>> last_hidden_state = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output # pooled CLS states + ```""" + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + embedding_output = self.embeddings(pixel_values) + encoder_outputs = self.encoder( + embedding_output, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # Apply pooling + last_hidden_state = encoder_outputs[0] + pooled_output = self.pooler(last_hidden_state) + # Reshape (batch_size, projection_dim, 1 , 1) -> (batch_size, projection_dim) + pooled_output = pooled_output.reshape(pooled_output.shape[:2]) + + if not return_dict: + return (last_hidden_state, pooled_output) + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndNoAttention( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + ) + + +@add_start_docstrings(ALIGN_START_DOCSTRING) +class AlignModel(AlignPreTrainedModel): + config_class = AlignConfig + + def __init__(self, config: AlignConfig): + super().__init__(config) + + if not isinstance(config.text_config, AlignTextConfig): + raise ValueError( + "config.text_config is expected to be of type AlignTextConfig but is of type" + f" {type(config.text_config)}." + ) + + if not isinstance(config.vision_config, AlignVisionConfig): + raise ValueError( + "config.vision_config is expected to be of type AlignVisionConfig but is of type" + f" {type(config.vision_config)}." + ) + + text_config = config.text_config + vision_config = config.vision_config + + self.projection_dim = config.projection_dim + self.text_embed_dim = text_config.hidden_size + + self.text_model = AlignTextModel(text_config) + self.vision_model = AlignVisionModel(vision_config) + + self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim) + self.temperature = nn.Parameter(torch.ones([]) * self.config.temperature_init_value) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(ALIGN_TEXT_INPUTS_DOCSTRING) + def get_text_features( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by + applying the projection layer to the pooled output of [`AlignTextModel`]. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AlignModel + + >>> model = AlignModel.from_pretrained("kakaobrain/align-base") + >>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base") + + >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") + >>> text_features = model.get_text_features(**inputs) + ```""" + # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + last_hidden_state = text_outputs[0][:, 0, :] + text_features = self.text_projection(last_hidden_state) + + return text_features + + @add_start_docstrings_to_model_forward(ALIGN_VISION_INPUTS_DOCSTRING) + def get_image_features( + self, + pixel_values: Optional[torch.FloatTensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> torch.FloatTensor: + r""" + Returns: + image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by + applying the projection layer to the pooled output of [`AlignVisionModel`]. + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, AlignModel + + >>> model = AlignModel.from_pretrained("kakaobrain/align-base") + >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor(images=image, return_tensors="pt") + + >>> image_features = model.get_image_features(**inputs) + ```""" + # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components. + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + image_features = vision_outputs[1] # pooled_output + + return image_features + + @add_start_docstrings_to_model_forward(ALIGN_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=AlignOutput, config_class=AlignConfig) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + pixel_values: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + return_loss: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, AlignOutput]: + r""" + Returns: + + Examples: + + ```python + >>> from PIL import Image + >>> import requests + >>> from transformers import AutoProcessor, AlignModel + + >>> model = AlignModel.from_pretrained("kakaobrain/align-base") + >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base") + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> inputs = processor( + ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True + ... ) + + >>> outputs = model(**inputs) + >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score + >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities + ```""" + # Use ALIGN model's config for some fields (if specified) instead of those of vision & text components. + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + vision_outputs = self.vision_model( + pixel_values=pixel_values, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + text_outputs = self.text_model( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + image_embeds = vision_outputs[1] + text_embeds = text_outputs[0][:, 0, :] + text_embeds = self.text_projection(text_embeds) + + # normalized features + image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) + text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) + + # cosine similarity as logits + logits_per_text = torch.matmul(text_embeds, image_embeds.t()) / self.temperature + logits_per_image = logits_per_text.t() + + loss = None + if return_loss: + loss = align_loss(logits_per_text) + + if not return_dict: + output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) + return ((loss,) + output) if loss is not None else output + + return AlignOutput( + loss=loss, + logits_per_image=logits_per_image, + logits_per_text=logits_per_text, + text_embeds=text_embeds, + image_embeds=image_embeds, + text_model_output=text_outputs, + vision_model_output=vision_outputs, + ) + + +model_name = "kakaobrain/align-base" +device = "cuda:0" +# device = "cpu" + +def get_model(): + config = AlignConfig.from_pretrained(model_name) + model = AlignModel.from_pretrained("kakaobrain/align-base", + return_dict=False).cuda() + return model + + +def get_scripted_model(): + from ._align_scripted import _get_scripted_model + return _get_scripted_model() + + +def get_input(batch_size): + seq_len = 64 + vocab_size = 30522 + inputs = { + 'input_ids': + torch.randint(low=0, + high=vocab_size, + size=(2, seq_len), + dtype=torch.int64), + 'token_type_ids': + torch.zeros((2, seq_len), dtype=torch.int64), + 'attention_mask': + torch.ones((2, seq_len), dtype=torch.int64), + 'pixel_values': + torch.randn([batch_size, 3, 289, 289]) + } + inputs = ( + inputs['input_ids'], + inputs['pixel_values'], + inputs['attention_mask'], + inputs['token_type_ids'], + ) + inputs = tuple((i.to(device) for i in inputs)) + return inputs, {} + + +if __name__ == "__main__": + with torch.no_grad(): + model = get_model().eval() + input_args, input_kwargs = get_input(batch_size=1) + outputs = model(*input_args, **input_kwargs) + print(outputs) \ No newline at end of file diff --git a/models/bart.py b/models/bart.py new file mode 100644 index 000000000000..0faebc73509c --- /dev/null +++ b/models/bart.py @@ -0,0 +1,1305 @@ +from transformers import AutoTokenizer, AutoConfig, AutoModel +import torch + +# low speedup +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss +from collections import OrderedDict, UserDict +from dataclasses import fields +from typing import List, Optional, Tuple, Union +import copy +import math +import random +from transformers.modeling_outputs import ( + BaseModelOutput, + BaseModelOutputWithPastAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + Seq2SeqLMOutput, + Seq2SeqModelOutput, + Seq2SeqQuestionAnsweringModelOutput, + Seq2SeqSequenceClassifierOutput, +) +from transformers.activations import ACT2FN +from transformers.models.bart.configuration_bart import BartConfig +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import ( + add_code_sample_docstrings, + add_end_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) + +_CHECKPOINT_FOR_DOC = "facebook/bart-base" +_CONFIG_FOR_DOC = "BartConfig" + +# Base model docstring +_EXPECTED_OUTPUT_SHAPE = [1, 8, 768] + +# SequenceClassification docstring +_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "valhalla/bart-large-sst2" +_SEQ_CLASS_EXPECTED_LOSS = 0.0 +_SEQ_CLASS_EXPECTED_OUTPUT = "'POSITIVE'" + +# QuestionAsnwering docstring +_CHECKPOINT_FOR_QA = "valhalla/bart-large-finetuned-squadv1" +_QA_EXPECTED_LOSS = 0.59 +_QA_EXPECTED_OUTPUT = "' nice puppet'" + + +BART_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "facebook/bart-large", + # see all BART models at https://huggingface.co/models?filter=bart +] + + +def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): + """ + Shift input ids one token to the right. + """ + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() + shifted_input_ids[:, 0] = decoder_start_token_id + + if pad_token_id is None: + raise ValueError("self.model.config.pad_token_id has to be defined.") + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + return shifted_input_ids + + +def _make_causal_mask( + input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 +): + """ + Make causal mask used for bi-directional self-attention. + """ + bsz, tgt_len = input_ids_shape + mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device) + mask_cond = torch.arange(mask.size(-1), device=device) + mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) + mask = mask.to(dtype) + + if past_key_values_length > 0: + mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) + return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) + + +def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): + """ + Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. + """ + bsz, src_len = mask.size() + tgt_len = tgt_len if tgt_len is not None else src_len + + expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) + + inverted_mask = 1.0 - expanded_mask + + return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) + + +class BartLearnedPositionalEmbedding(nn.Embedding): + """ + This module learns positional embeddings up to a fixed maximum size. + """ + + def __init__(self, num_embeddings: int, embedding_dim: int): + # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2 + # and adjust num_embeddings appropriately. Other models don't have this hack + self.offset = 2 + super().__init__(num_embeddings + self.offset, embedding_dim) + + def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): + """`input_ids' shape is expected to be [bsz x seqlen].""" + + bsz, seq_len = input_ids.shape[:2] + positions = torch.arange( + past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device + ).expand(bsz, -1) + + return super().forward(positions + self.offset) + + +class BartAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + embed_dim: int, + num_heads: int, + dropout: float = 0.0, + is_decoder: bool = False, + bias: bool = True, + ): + super().__init__() + self.embed_dim = embed_dim + self.num_heads = num_heads + self.dropout = dropout + self.head_dim = embed_dim // num_heads + + if (self.head_dim * num_heads) != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {num_heads})." + ) + self.scaling = self.head_dim**-0.5 + self.is_decoder = is_decoder + + self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + key_value_states: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + output_attentions: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + + bsz, tgt_len, _ = hidden_states.size() + + # get query proj + query_states = self.q_proj(hidden_states) * self.scaling + # get key, value proj + # `past_key_value[0].shape[2] == key_value_states.shape[1]` + # is checking that the `sequence_length` of the `past_key_value` is the same as + # the provided `key_value_states` to support prefix tuning + if ( + is_cross_attention + and past_key_value is not None + and past_key_value[0].shape[2] == key_value_states.shape[1] + ): + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.k_proj(key_value_states), -1, bsz) + value_states = self._shape(self.v_proj(key_value_states), -1, bsz) + elif past_key_value is not None: + # reuse k, v, self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + key_states = torch.cat([past_key_value[0], key_states], dim=2) + value_states = torch.cat([past_key_value[1], value_states], dim=2) + else: + # self_attention + key_states = self._shape(self.k_proj(hidden_states), -1, bsz) + value_states = self._shape(self.v_proj(hidden_states), -1, bsz) + + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + proj_shape = (bsz * self.num_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) + key_states = key_states.reshape(*proj_shape) + value_states = value_states.reshape(*proj_shape) + + src_len = key_states.size(1) + attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) + + if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): + raise ValueError( + f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" + f" {attn_weights.size()}" + ) + + if attention_mask is not None: + if attention_mask.size() != (bsz, 1, tgt_len, src_len): + raise ValueError( + f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" + ) + attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + if layer_head_mask.size() != (self.num_heads,): + raise ValueError( + f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) + + if output_attentions: + # this operation is a bit awkward, but it's required to + # make sure that attn_weights keeps its gradient. + # In order to do so, attn_weights have to be reshaped + # twice and have to be reused in the following + attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) + else: + attn_weights_reshaped = None + + attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) + + attn_output = torch.bmm(attn_probs, value_states) + + if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) + attn_output = attn_output.transpose(1, 2) + + # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be + # partitioned across GPUs when using tensor-parallelism. + attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) + + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights_reshaped, past_key_value + + +class BartEncoderLayer(nn.Module): + def __init__(self, config: BartConfig): + super().__init__() + self.embed_dim = config.d_model + self.self_attn = BartAttention( + embed_dim=self.embed_dim, + num_heads=config.encoder_attention_heads, + dropout=config.attention_dropout, + ) + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) + self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.FloatTensor, + attention_mask: torch.FloatTensor, + layer_head_mask: torch.FloatTensor, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + hidden_states, attn_weights, _ = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + if hidden_states.dtype == torch.float16 and ( + torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() + ): + clamp_value = torch.finfo(hidden_states.dtype).max - 1000 + hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +class BartDecoderLayer(nn.Module): + def __init__(self, config: BartConfig): + super().__init__() + self.embed_dim = config.d_model + + self.self_attn = BartAttention( + embed_dim=self.embed_dim, + num_heads=config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + ) + self.dropout = config.dropout + self.activation_fn = ACT2FN[config.activation_function] + self.activation_dropout = config.activation_dropout + + self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.encoder_attn = BartAttention( + self.embed_dim, + config.decoder_attention_heads, + dropout=config.attention_dropout, + is_decoder=True, + ) + self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) + self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) + self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) + self.final_layer_norm = nn.LayerNorm(self.embed_dim) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + layer_head_mask: Optional[torch.Tensor] = None, + cross_attn_layer_head_mask: Optional[torch.Tensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = True, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`): attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + encoder_hidden_states (`torch.FloatTensor`): + cross attention input to the layer of shape `(batch, seq_len, embed_dim)` + encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size + `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. + layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size + `(encoder_attention_heads,)`. + cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of + size `(decoder_attention_heads,)`. + past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + """ + residual = hidden_states + + # Self Attention + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + # add present self-attn cache to positions 1,2 of present_key_value tuple + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.self_attn_layer_norm(hidden_states) + + # Cross-Attention Block + cross_attn_present_key_value = None + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + output_attentions=output_attentions, + ) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.encoder_attn_layer_norm(hidden_states) + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value = present_key_value + cross_attn_present_key_value + + # Fully Connected + residual = hidden_states + hidden_states = self.activation_fn(self.fc1(hidden_states)) + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.fc2(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + hidden_states = residual + hidden_states + hidden_states = self.final_layer_norm(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, cross_attn_weights) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +class BartClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__( + self, + input_dim: int, + inner_dim: int, + num_classes: int, + pooler_dropout: float, + ): + super().__init__() + self.dense = nn.Linear(input_dim, inner_dim) + self.dropout = nn.Dropout(p=pooler_dropout) + self.out_proj = nn.Linear(inner_dim, num_classes) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dropout(hidden_states) + hidden_states = self.dense(hidden_states) + hidden_states = torch.tanh(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.out_proj(hidden_states) + return hidden_states + + +class BartPretrainedModel(PreTrainedModel): + config_class = BartConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _keys_to_ignore_on_load_unexpected = [r"encoder.version", r"decoder.version"] + _no_split_modules = [r"BartEncoderLayer", r"BartDecoderLayer"] + + def _init_weights(self, module): + std = self.config.init_std + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, (BartDecoder, BartEncoder)): + module.gradient_checkpointing = value + + @property + def dummy_inputs(self): + pad_token = self.config.pad_token_id + input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) + dummy_inputs = { + "attention_mask": input_ids.ne(pad_token), + "input_ids": input_ids, + } + return dummy_inputs + + +class PretrainedBartModel(BartPretrainedModel): + def __init_subclass__(self): + warnings.warn( + "The class `PretrainedBartModel` has been depreciated, please use `BartPretrainedModel` instead.", + FutureWarning, + ) + + +BART_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`BartConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +BART_GENERATION_EXAMPLE = r""" + Summarization example: + + ```python + >>> from transformers import AutoTokenizer, BartForConditionalGeneration + + >>> model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn") + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-large-cnn") + + >>> ARTICLE_TO_SUMMARIZE = ( + ... "PG&E stated it scheduled the blackouts in response to forecasts for high winds " + ... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were " + ... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow." + ... ) + >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="pt") + + >>> # Generate Summary + >>> summary_ids = model.generate(inputs["input_ids"], num_beams=2, min_length=0, max_length=20) + >>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + 'PG&E scheduled the blackouts in response to forecasts for high winds amid dry conditions' + ``` + + Mask filling example: + + ```python + >>> from transformers import AutoTokenizer, BartForConditionalGeneration + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/bart-base") + >>> model = BartForConditionalGeneration.from_pretrained("facebook/bart-base") + + >>> TXT = "My friends are but they eat too many carbs." + >>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"] + >>> logits = model(input_ids).logits + + >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() + >>> probs = logits[0, masked_index].softmax(dim=0) + >>> values, predictions = probs.topk(5) + + >>> tokenizer.decode(predictions).split() + ['not', 'good', 'healthy', 'great', 'very'] + ``` +""" + +BART_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + Bart uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` + is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). + + For translation and summarization training, `decoder_input_ids` should be provided. If no + `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right + for denoising pre-training following the paper. + decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + + If you want to change padding behavior, you should read [`modeling_bart._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0, + 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape + `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape + `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you + can choose to directly pass an embedded representation. This is useful if you want more control over how to + convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded + representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be + input (see `past_key_values`). This is useful if you want more control over how to convert + `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. + + If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value + of `inputs_embeds`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class BartEncoder(BartPretrainedModel): + """ + Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a + [`BartEncoderLayer`]. + + Args: + config: BartConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None): + super().__init__(config) + + self.dropout = config.dropout + self.layerdrop = config.encoder_layerdrop + + embed_dim = config.d_model + self.padding_idx = config.pad_token_id + self.max_source_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 + + self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) + + if embed_tokens is not None: + self.embed_tokens.weight = embed_tokens.weight + + self.embed_positions = BartLearnedPositionalEmbedding( + config.max_position_embeddings, + embed_dim, + ) + self.layers = nn.ModuleList([BartEncoderLayer(config) for _ in range(config.encoder_layers)]) + self.layernorm_embedding = nn.LayerNorm(embed_dim) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert `input_ids` indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input = input_ids + input_ids = input_ids.view(-1, input_ids.shape[-1]) + elif inputs_embeds is not None: + input = inputs_embeds[:, :, -1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale + + embed_pos = self.embed_positions(input) + embed_pos = embed_pos.to(inputs_embeds.device) + + hidden_states = inputs_embeds + embed_pos + hidden_states = self.layernorm_embedding(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + # expand attention_mask + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) + + encoder_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + if head_mask.size()[0] != (len(self.layers)): + raise ValueError( + f"The head_mask should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + dropout_probability = random.uniform(0, 1) + if self.training and (dropout_probability < self.layerdrop): # skip the layer + layer_outputs = (None, None) + else: + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(encoder_layer), + hidden_states, + attention_mask, + (head_mask[idx] if head_mask is not None else None), + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_states = encoder_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions + ) + + +class BartDecoder(BartPretrainedModel): + """ + Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BartDecoderLayer`] + + Args: + config: BartConfig + embed_tokens (nn.Embedding): output embedding + """ + + def __init__(self, config: BartConfig, embed_tokens: Optional[nn.Embedding] = None): + super().__init__(config) + self.dropout = config.dropout + self.layerdrop = config.decoder_layerdrop + self.padding_idx = config.pad_token_id + self.max_target_positions = config.max_position_embeddings + self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 + + self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) + + if embed_tokens is not None: + self.embed_tokens.weight = embed_tokens.weight + + self.embed_positions = BartLearnedPositionalEmbedding( + config.max_position_embeddings, + config.d_model, + ) + self.layers = nn.ModuleList([BartDecoderLayer(config) for _ in range(config.decoder_layers)]) + self.layernorm_embedding = nn.LayerNorm(config.d_model) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): + # create causal mask + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + combined_attention_mask = None + if input_shape[-1] > 1: + combined_attention_mask = _make_causal_mask( + input_shape, + inputs_embeds.dtype, + device=inputs_embeds.device, + past_key_values_length=past_key_values_length, + ) + + if attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( + inputs_embeds.device + ) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask + ) + + return combined_attention_mask + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you + provide it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention + of the decoder. + encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): + Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values + selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing + cross-attention on hidden heads. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of + shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks and in the + cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those + that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of + all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of + shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing + `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more + control over how to convert `input_ids` indices into associated vectors than the model's internal + embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + """ + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # retrieve input_ids and inputs_embeds + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") + elif input_ids is not None: + input = input_ids + input_shape = input.shape + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + input = inputs_embeds[:, :, -1] + else: + raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") + + # past_key_values_length + past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input) * self.embed_scale + + attention_mask = self._prepare_decoder_attention_mask( + attention_mask, input_shape, inputs_embeds, past_key_values_length + ) + + # expand encoder attention mask + if encoder_hidden_states is not None and encoder_attention_mask is not None: + # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] + encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) + + # embed positions + positions = self.embed_positions(input, past_key_values_length) + positions = positions.to(inputs_embeds.device) + + hidden_states = inputs_embeds + positions + hidden_states = self.layernorm_embedding(hidden_states) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None + next_decoder_cache = () if use_cache else None + + # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired + for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): + if attn_mask is not None: + if attn_mask.size()[0] != (len(self.layers)): + raise ValueError( + f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + + for idx, decoder_layer in enumerate(self.layers): + # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) + if output_hidden_states: + all_hidden_states += (hidden_states,) + dropout_probability = random.uniform(0, 1) + if self.training and (dropout_probability < self.layerdrop): + continue + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, output_attentions, use_cache) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + head_mask[idx] if head_mask is not None else None, + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, + None, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + cross_attn_layer_head_mask=( + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None + ), + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = layer_outputs[0] + if use_cache: + # next_decoder_cache.append(layer_outputs[3 if output_attentions else 1]) + next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = tuple(next_decoder_cache) if use_cache else None + if not return_dict: + return tuple( + v + for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] + if v is not None + ) + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +@add_start_docstrings( + "The bare BART Model outputting raw hidden-states without any specific head on top.", + BART_START_DOCSTRING, +) +class BartModel(BartPretrainedModel): + _keys_to_ignore_on_load_missing = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] + + def __init__(self, config: BartConfig): + super().__init__(config) + + padding_idx, vocab_size = config.pad_token_id, config.vocab_size + self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) + + self.encoder = BartEncoder(config, self.shared) + self.decoder = BartDecoder(config, self.shared) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.shared + + def set_input_embeddings(self, value): + self.shared = value + self.encoder.embed_tokens = self.shared + self.decoder.embed_tokens = self.shared + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + @add_start_docstrings_to_model_forward(BART_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=Seq2SeqModelOutput, + config_class=_CONFIG_FOR_DOC, + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.LongTensor] = None, + decoder_attention_mask: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[List[torch.FloatTensor]] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + decoder_inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, Seq2SeqModelOutput]: + # different to other models, Bart automatically creates decoder_input_ids from + # input_ids if no decoder_input_ids are provided + if decoder_input_ids is None and decoder_inputs_embeds is None: + if input_ids is None: + raise ValueError( + "If no `decoder_input_ids` or `decoder_inputs_embeds` are " + "passed, `input_ids` cannot be `None`. Please pass either " + "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." + ) + + decoder_input_ids = shift_tokens_right( + input_ids, self.config.pad_token_id, self.config.decoder_start_token_id + ) + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True + elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): + encoder_outputs = BaseModelOutput( + last_hidden_state=encoder_outputs[0], + hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, + attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, + ) + + # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=attention_mask, + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + + return Seq2SeqModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + +model_name = "facebook/bart-base" +device = "cuda:0" +# device = "cpu" + +def get_model(): + config = AutoConfig.from_pretrained(model_name) + config.return_dict = False + # model = AutoModel.from_config(config).to(device) + model = BartModel(config).to(device) + print(model) + return model + + +def get_input(batch_size): + # tokenizer = AutoTokenizer.from_pretrained(model_name) + # inputs = tokenizer("Hello world! Hello world! Hello world! Hello world! Hello world!", return_tensors="pt").to(device) + # print(inputs.keys()) + # print(inputs['input_ids'].shape, inputs['attention_mask'].shape) + # return (inputs['input_ids'], inputs['attention_mask']), {} + vocab_size = 50265 + seq_len = 256 + input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), dtype=torch.int64).to(device) + attention_mask = torch.ones((batch_size, seq_len), dtype=torch.int64).to(device) + return (input_ids, attention_mask), {} + + +if __name__ == "__main__": + model = get_model() + input_args, input_kwargs = get_input(batch_size=1) + print([x.shape for x in input_args]) + outputs = model(*input_args, **input_kwargs) + print(outputs) diff --git a/models/bert.py b/models/bert.py new file mode 100644 index 000000000000..2cbe5d1af1e2 --- /dev/null +++ b/models/bert.py @@ -0,0 +1,388 @@ +import torch +from transformers import BertConfig +import logging +import numpy as np +# import onnx +import time + +import math +import os +import warnings +from dataclasses import dataclass +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutputWithPastAndCrossAttentions, + BaseModelOutputWithPoolingAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + NextSentencePredictorOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from transformers.utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from transformers.models.bert.configuration_bert import BertConfig + + + +class BertSelfAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + self.position_embedding_type = position_embedding_type or getattr( + config, "position_embedding_type", "absolute" + ) + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + self.max_position_embeddings = config.max_position_embeddings + self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) + + self.is_decoder = config.is_decoder + + def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + mixed_query_layer = self.query(hidden_states) + + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + is_cross_attention = encoder_hidden_states is not None + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_layer = past_key_value[0] + value_layer = past_key_value[1] + attention_mask = encoder_attention_mask + elif is_cross_attention: + key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) + value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) + attention_mask = encoder_attention_mask + elif past_key_value is not None: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + key_layer = torch.cat([past_key_value[0], key_layer], dim=2) + value_layer = torch.cat([past_key_value[1], value_layer], dim=2) + else: + key_layer = self.transpose_for_scores(self.key(hidden_states)) + value_layer = self.transpose_for_scores(self.value(hidden_states)) + + query_layer = self.transpose_for_scores(mixed_query_layer) + + use_cache = past_key_value is not None + if self.is_decoder: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of + # all previous decoder key/value_states. Further calls to uni-directional self-attention + # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_layer, value_layer) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + + if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": + query_length, key_length = query_layer.shape[2], key_layer.shape[2] + if use_cache: + position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( + -1, 1 + ) + else: + position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) + position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) + distance = position_ids_l - position_ids_r + + positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) + positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility + + if self.position_embedding_type == "relative_key": + relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores + elif self.position_embedding_type == "relative_key_query": + relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) + relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) + attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key + + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in BertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + # new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) + # MODIFIED for run dynamo.dynamic + new_context_layer_shape = context_layer.size()[:-2] + (768,) + context_layer = context_layer.view(new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + if self.is_decoder: + outputs = outputs + (past_key_value,) + return outputs + + +class BertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertAttention(nn.Module): + def __init__(self, config, position_embedding_type=None): + super().__init__() + self.self = BertSelfAttention(config, position_embedding_type=position_embedding_type) + self.output = BertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + past_key_value, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class BertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class BertOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class BertLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = BertAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise ValueError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = BertAttention(config, position_embedding_type="absolute") + self.intermediate = BertIntermediate(config) + self.output = BertOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor]: + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + past_key_value=self_attn_past_key_value, + ) + attention_output = self_attention_outputs[0] + + # if decoder, the last output is tuple of self-attn cache + if self.is_decoder: + outputs = self_attention_outputs[1:-1] + present_key_value = self_attention_outputs[-1] + else: + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + cross_attn_present_key_value = None + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise ValueError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + + # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attention_outputs = self.crossattention( + attention_output, + attention_mask, + head_mask, + encoder_hidden_states, + encoder_attention_mask, + cross_attn_past_key_value, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights + + # add cross-attn cache to positions 3,4 of present_key_value tuple + cross_attn_present_key_value = cross_attention_outputs[-1] + present_key_value = present_key_value + cross_attn_present_key_value + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + + # if decoder, return the attn key/values as the last output + if self.is_decoder: + outputs = outputs + (present_key_value,) + + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + +class BertModel(torch.nn.Module): + def __init__(self, config): + super().__init__() + self.layers = torch.nn.ModuleList( + [BertLayer(config) for _ in range(config.num_hidden_layers)] + ) + + def forward(self, x): + for layer in self.layers: + x = layer(x)[0] + return x[0] + + +def get_model(): + model = BertModel(BertConfig(vocab_size=32768, hidden_size=768, + num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, return_dict=False)).cuda() + return model + +def get_scripted_model(): + from ._bert_scripted import _get_scripted_model + return _get_scripted_model() + +def get_input(batch_size, seq_len=256): + inputs = torch.randn((batch_size, seq_len, 768), device='cuda') + return (inputs,), {} + + +# if __name__ == '__main__': +# model = get_model() +# input_args, input_kwargs = get_input(8, 80) +# model(*input_args, **input_kwargs) diff --git a/models/blockdrop.py b/models/blockdrop.py new file mode 100644 index 000000000000..f2eb8d84c420 --- /dev/null +++ b/models/blockdrop.py @@ -0,0 +1,286 @@ +import torch.nn as nn +import math +import torch +import torch.nn.functional as F +from utils import read_bin + +data_dir = './data/blockdrop' + +class Identity(nn.Module): + def __init__(self): + super(Identity, self).__init__() + def forward(self, x): + return x + +class Flatten(nn.Module): + def __init__(self): + super(Flatten, self).__init__() + def forward(self, x): + return x.view(x.size(0), -1) + +def conv3x3(in_planes, out_planes, stride=1): + "3x3 convolution with padding" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + + def forward(self, x): + + out = self.conv1(x) + out = self.bn1(out) + out = F.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + return out + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1): + super(Bottleneck, self).__init__() + self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + return out + +class DownsampleB(nn.Module): + + def __init__(self, nIn, nOut, stride): + super(DownsampleB, self).__init__() + self.avg = nn.AvgPool2d(stride) + self.expand_ratio = nOut // nIn + + def forward(self, x): + x = self.avg(x) + return torch.cat([x] + [x.mul(0)] * (self.expand_ratio - 1), 1) + +#--------------------------------------------------------------------------------------------------# +class FlatResNet(nn.Module): + + def seed(self, x): + # x = self.relu(self.bn1(self.conv1(x))) -- CIFAR + # x = self.maxpool(self.relu(self.bn1(self.conv1(x)))) -- ImageNet + raise NotImplementedError + + # run a variable policy batch through the resnet implemented as a full mask over the residual + # fast to train, non-indicative of time saving (use forward_single instead) + def forward(self, x, policy): + + x = self.seed(x) + + t = 0 + for segment, num_blocks in enumerate(self.layer_config): + for b in range(num_blocks): + action = policy[:,t].contiguous() + residual = self.ds[segment](x) if b==0 else x + + # early termination if all actions in the batch are zero + if action.data.sum() == 0: + x = residual + t += 1 + continue + + action_mask = action.float().view(-1,1,1,1) + fx = F.relu(residual + self.blocks[segment][b](x)) + x = fx*action_mask + residual*(1-action_mask) + t += 1 + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + return x + + # run a single, fixed policy for all items in the batch + # policy is a (15,) vector. Use with batch_size=1 for profiling + def forward_single(self, x, policy): + x = self.seed(x) + + t = 0 + for segment, num_blocks in enumerate(self.layer_config): + for b in range(num_blocks): + residual = self.ds[segment](x) if b==0 else x + if policy[t]==1: + x = residual + self.blocks[segment][b](x) + x = F.relu(x) + else: + x = residual + t += 1 + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + return x + + + def forward_full(self, x): + x = self.seed(x) + + for segment, num_blocks in enumerate(self.layer_config): + for b in range(num_blocks): + residual = self.ds[segment](x) if b==0 else x + x = F.relu(residual + self.blocks[segment][b](x)) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + return x + + + +# Smaller Flattened Resnet, tailored for CIFAR +class FlatResNet32(FlatResNet): + + def __init__(self, block, layers, num_classes=10): + super(FlatResNet32, self).__init__() + + self.inplanes = 16 + self.conv1 = conv3x3(3, 16) + self.bn1 = nn.BatchNorm2d(16) + self.relu = nn.ReLU(inplace=True) + self.avgpool = nn.AvgPool2d(8) + + strides = [1, 2, 2] + filt_sizes = [16, 32, 64] + self.blocks, self.ds = [], [] + for idx, (filt_size, num_blocks, stride) in enumerate(zip(filt_sizes, layers, strides)): + blocks, ds = self._make_layer(block, filt_size, num_blocks, stride=stride) + self.blocks.append(nn.ModuleList(blocks)) + self.ds.append(ds) + + self.blocks = nn.ModuleList(self.blocks) + self.ds = nn.ModuleList(self.ds) + self.fc = nn.Linear(64 * block.expansion, num_classes) + self.fc_dim = 64 * block.expansion + + self.layer_config = layers + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + def seed(self, x): + x = self.relu(self.bn1(self.conv1(x))) + return x + + def _make_layer(self, block, planes, blocks, stride=1): + + downsample = nn.Sequential() + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = DownsampleB(self.inplanes, planes * block.expansion, stride) + + layers = [block(self.inplanes, planes, stride)] + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, 1)) + + return layers, downsample + + +# Regular Flattened Resnet, tailored for Imagenet etc. +class FlatResNet224(FlatResNet): + + def __init__(self, block, layers, num_classes=1000): + self.inplanes = 64 + super(FlatResNet224, self).__init__() + self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + strides = [1, 2, 2, 2] + filt_sizes = [64, 128, 256, 512] + self.blocks, self.ds = [], [] + for idx, (filt_size, num_blocks, stride) in enumerate(zip(filt_sizes, layers, strides)): + blocks, ds = self._make_layer(block, filt_size, num_blocks, stride=stride) + self.blocks.append(nn.ModuleList(blocks)) + self.ds.append(ds) + + self.blocks = nn.ModuleList(self.blocks) + self.ds = nn.ModuleList(self.ds) + + self.avgpool = nn.AvgPool2d(7) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + self.layer_config = layers + + def seed(self, x): + x = self.maxpool(self.relu(self.bn1(self.conv1(x)))) + return x + + def _make_layer(self, block, planes, blocks, stride=1): + + downsample = nn.Sequential() + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + nn.Conv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [block(self.inplanes, planes, stride)] + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return layers, downsample + + +num_layers = 15 + +def get_model(): + return FlatResNet32(BasicBlock, [5, 5, 5]).cuda() + +def get_input(batch_size): + return (torch.randn(batch_size, 3, 32, 32).cuda(), torch.randint(0, 2, (batch_size, 15)).cuda()), {} + +def get_dynamic_inputs(batch_size, num_inputs): + inp = torch.randn(batch_size, 3, 32, 32).cuda() + policies = read_bin(data_dir + '/policies').cuda() + sampled_indices = torch.randint(0, policies.shape[0], (num_inputs * batch_size,)).cuda() + policy = policies[sampled_indices] + policy = policy.view(num_inputs, batch_size, -1) + policy = [policy[i] for i in range(num_inputs)] + return [(inp, policy[i]) for i in range(num_inputs)], [{} for _ in range(num_inputs)] + + diff --git a/models/deberta.py b/models/deberta.py new file mode 100644 index 000000000000..ba7266efed7c --- /dev/null +++ b/models/deberta.py @@ -0,0 +1,1058 @@ +from transformers import AutoTokenizer, AutoConfig, AutoModel +import torch + +# dynamo compile error in torch 2.0.1: torch._dynamo.exc.BackendCompilerFailed: debug_wrapper raised ValueError: Cannot view a tensor with shape torch.Size([1, 512, 12, 64]) and strides (393216, 64, 32768, 1) as a tensor with shape (1, 512, 768)! +# 10.4: dont know how to reproduce the bug + +from collections.abc import Sequence +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN +from transformers.modeling_outputs import ( + BaseModelOutput, + MaskedLMOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import softmax_backward_data +from transformers.utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging +from transformers.models.deberta.configuration_deberta import DebertaConfig + +logger = logging.get_logger(__name__) +_CONFIG_FOR_DOC = "DebertaConfig" +_CHECKPOINT_FOR_DOC = "microsoft/deberta-base" + +# Masked LM docstring +_CHECKPOINT_FOR_MASKED_LM = "lsanochkin/deberta-large-feedback" +_MASKED_LM_EXPECTED_OUTPUT = "' Paris'" +_MASKED_LM_EXPECTED_LOSS = "0.54" + +# QuestionAnswering docstring +_CHECKPOINT_FOR_QA = "Palak/microsoft_deberta-large_squad" +_QA_EXPECTED_OUTPUT = "' a nice puppet'" +_QA_EXPECTED_LOSS = 0.14 +_QA_TARGET_START_INDEX = 12 +_QA_TARGET_END_INDEX = 14 + + +DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "microsoft/deberta-base", + "microsoft/deberta-large", + "microsoft/deberta-xlarge", + "microsoft/deberta-base-mnli", + "microsoft/deberta-large-mnli", + "microsoft/deberta-xlarge-mnli", +] + + +class ContextPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.pooler_hidden_size, config.pooler_hidden_size) + self.dropout = StableDropout(config.pooler_dropout) + self.config = config + + def forward(self, hidden_states): + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + + context_token = hidden_states[:, 0] + context_token = self.dropout(context_token) + pooled_output = self.dense(context_token) + pooled_output = ACT2FN[self.config.pooler_hidden_act](pooled_output) + return pooled_output + + @property + def output_dim(self): + return self.config.hidden_size + + +class XSoftmax(torch.autograd.Function): + """ + Masked Softmax which is optimized for saving memory + + Args: + input (`torch.tensor`): The input tensor that will apply softmax. + mask (`torch.IntTensor`): + The mask matrix where 0 indicate that element will be ignored in the softmax calculation. + dim (int): The dimension that will apply softmax + + Example: + + ```python + >>> import torch + >>> from transformers.models.deberta.modeling_deberta import XSoftmax + + >>> # Make a tensor + >>> x = torch.randn([4, 20, 100]) + + >>> # Create a mask + >>> mask = (x > 0).int() + + >>> # Specify the dimension to apply softmax + >>> dim = -1 + + >>> y = XSoftmax.apply(x, mask, dim) + ```""" + + @staticmethod + def forward(self, input, mask, dim): + self.dim = dim + rmask = ~(mask.to(torch.bool)) + + output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min)) + output = torch.softmax(output, self.dim) + output.masked_fill_(rmask, 0) + self.save_for_backward(output) + return output + + @staticmethod + def backward(self, grad_output): + (output,) = self.saved_tensors + inputGrad = softmax_backward_data(self, grad_output, output, self.dim, output) + return inputGrad, None, None + + @staticmethod + def symbolic(g, self, mask, dim): + import torch.onnx.symbolic_helper as sym_help + from torch.onnx.symbolic_opset9 import masked_fill, softmax + + mask_cast_value = g.op("Cast", mask, to_i=sym_help.cast_pytorch_to_onnx["Long"]) + r_mask = g.op( + "Cast", + g.op("Sub", g.op("Constant", value_t=torch.tensor(1, dtype=torch.int64)), mask_cast_value), + to_i=sym_help.cast_pytorch_to_onnx["Byte"], + ) + output = masked_fill( + g, self, r_mask, g.op("Constant", value_t=torch.tensor(torch.finfo(self.type().dtype()).min)) + ) + output = softmax(g, output, dim) + return masked_fill(g, output, r_mask, g.op("Constant", value_t=torch.tensor(0, dtype=torch.bool))) + + +def xsoftmax_call(input, mask, dim): + rmask = ~(mask.to(torch.bool)) + + output = input.masked_fill(rmask, torch.tensor(torch.finfo(input.dtype).min)) + output = torch.softmax(output, dim) + output.masked_fill_(rmask, 0) + return output + +class DropoutContext(object): + def __init__(self): + self.dropout = 0 + self.mask = None + self.scale = 1 + self.reuse_mask = True + + +def get_mask(input, local_context): + if not isinstance(local_context, DropoutContext): + dropout = local_context + mask = None + else: + dropout = local_context.dropout + dropout *= local_context.scale + mask = local_context.mask if local_context.reuse_mask else None + + if dropout > 0 and mask is None: + mask = (1 - torch.empty_like(input).bernoulli_(1 - dropout)).to(torch.bool) + + if isinstance(local_context, DropoutContext): + if local_context.mask is None: + local_context.mask = mask + + return mask, dropout + + +class XDropout(torch.autograd.Function): + """Optimized dropout function to save computation and memory by using mask operation instead of multiplication.""" + + @staticmethod + def forward(ctx, input, local_ctx): + mask, dropout = get_mask(input, local_ctx) + ctx.scale = 1.0 / (1 - dropout) + if dropout > 0: + ctx.save_for_backward(mask) + return input.masked_fill(mask, 0) * ctx.scale + else: + return input + + @staticmethod + def backward(ctx, grad_output): + if ctx.scale > 1: + (mask,) = ctx.saved_tensors + return grad_output.masked_fill(mask, 0) * ctx.scale, None + else: + return grad_output, None + + @staticmethod + def symbolic(g: torch._C.Graph, input: torch._C.Value, local_ctx: Union[float, DropoutContext]) -> torch._C.Value: + from torch.onnx import symbolic_opset12 + + dropout_p = local_ctx + if isinstance(local_ctx, DropoutContext): + dropout_p = local_ctx.dropout + # StableDropout only calls this function when training. + train = True + # TODO: We should check if the opset_version being used to export + # is > 12 here, but there's no good way to do that. As-is, if the + # opset_version < 12, export will fail with a CheckerError. + # Once https://github.com/pytorch/pytorch/issues/78391 is fixed, do something like: + # if opset_version < 12: + # return torch.onnx.symbolic_opset9.dropout(g, input, dropout_p, train) + return symbolic_opset12.dropout(g, input, dropout_p, train) + + +class StableDropout(nn.Module): + """ + Optimized dropout module for stabilizing the training + + Args: + drop_prob (float): the dropout probabilities + """ + + def __init__(self, drop_prob): + super().__init__() + self.drop_prob = drop_prob + self.count = 0 + self.context_stack = None + + def forward(self, x): + """ + Call the module + + Args: + x (`torch.tensor`): The input tensor to apply dropout + """ + if self.training and self.drop_prob > 0: + return XDropout.apply(x, self.get_context()) + return x + + def clear_context(self): + self.count = 0 + self.context_stack = None + + def init_context(self, reuse_mask=True, scale=1): + if self.context_stack is None: + self.context_stack = [] + self.count = 0 + for c in self.context_stack: + c.reuse_mask = reuse_mask + c.scale = scale + + def get_context(self): + if self.context_stack is not None: + if self.count >= len(self.context_stack): + self.context_stack.append(DropoutContext()) + ctx = self.context_stack[self.count] + ctx.dropout = self.drop_prob + self.count += 1 + return ctx + else: + return self.drop_prob + + +class DebertaLayerNorm(nn.Module): + """LayerNorm module in the TF style (epsilon inside the square root).""" + + def __init__(self, size, eps=1e-12): + super().__init__() + self.weight = nn.Parameter(torch.ones(size)) + self.bias = nn.Parameter(torch.zeros(size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_type = hidden_states.dtype + hidden_states = hidden_states.float() + mean = hidden_states.mean(-1, keepdim=True) + variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True) + hidden_states = (hidden_states - mean) / torch.sqrt(variance + self.variance_epsilon) + hidden_states = hidden_states.to(input_type) + y = self.weight * hidden_states + self.bias + return y + + +class DebertaSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) + self.dropout = StableDropout(config.hidden_dropout_prob) + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class DebertaAttention(nn.Module): + def __init__(self, config): + super().__init__() + self.self = DisentangledSelfAttention(config) + self.output = DebertaSelfOutput(config) + self.config = config + + def forward( + self, + hidden_states, + attention_mask, + output_attentions=False, + query_states=None, + relative_pos=None, + rel_embeddings=None, + ): + self_output = self.self( + hidden_states, + attention_mask, + output_attentions, + query_states=query_states, + relative_pos=relative_pos, + rel_embeddings=rel_embeddings, + ) + if output_attentions: + self_output, att_matrix = self_output + if query_states is None: + query_states = hidden_states + attention_output = self.output(self_output, query_states) + + if output_attentions: + return (attention_output, att_matrix) + else: + return attention_output + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Deberta +class DebertaIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class DebertaOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) + self.dropout = StableDropout(config.hidden_dropout_prob) + self.config = config + + def forward(self, hidden_states, input_tensor): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class DebertaLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.attention = DebertaAttention(config) + self.intermediate = DebertaIntermediate(config) + self.output = DebertaOutput(config) + + def forward( + self, + hidden_states, + attention_mask, + query_states=None, + relative_pos=None, + rel_embeddings=None, + output_attentions=False, + ): + attention_output = self.attention( + hidden_states, + attention_mask, + output_attentions=output_attentions, + query_states=query_states, + relative_pos=relative_pos, + rel_embeddings=rel_embeddings, + ) + if output_attentions: + attention_output, att_matrix = attention_output + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + if output_attentions: + return (layer_output, att_matrix) + else: + return layer_output + + +class DebertaEncoder(nn.Module): + """Modified BertEncoder with relative position bias support""" + + def __init__(self, config): + super().__init__() + self.layer = nn.ModuleList([DebertaLayer(config) for _ in range(config.num_hidden_layers)]) + self.relative_attention = getattr(config, "relative_attention", False) + if self.relative_attention: + self.max_relative_positions = getattr(config, "max_relative_positions", -1) + if self.max_relative_positions < 1: + self.max_relative_positions = config.max_position_embeddings + self.rel_embeddings = nn.Embedding(self.max_relative_positions * 2, config.hidden_size) + self.gradient_checkpointing = False + + def get_rel_embedding(self): + rel_embeddings = self.rel_embeddings.weight if self.relative_attention else None + return rel_embeddings + + def get_attention_mask(self, attention_mask): + if attention_mask.dim() <= 2: + extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) + attention_mask = extended_attention_mask * extended_attention_mask.squeeze(-2).unsqueeze(-1) + attention_mask = attention_mask.byte() + elif attention_mask.dim() == 3: + attention_mask = attention_mask.unsqueeze(1) + + return attention_mask + + def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None): + if self.relative_attention and relative_pos is None: + q = query_states.size(-2) if query_states is not None else hidden_states.size(-2) + relative_pos = build_relative_position(q, hidden_states.size(-2), hidden_states.device) + return relative_pos + + def forward( + self, + hidden_states, + attention_mask, + output_hidden_states=True, + output_attentions=False, + query_states=None, + relative_pos=None, + return_dict=True, + ): + attention_mask = self.get_attention_mask(attention_mask) + relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos) + + all_hidden_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + if isinstance(hidden_states, Sequence): + next_kv = hidden_states[0] + else: + next_kv = hidden_states + rel_embeddings = self.get_rel_embedding() + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, output_attentions) + + return custom_forward + + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + next_kv, + attention_mask, + query_states, + relative_pos, + rel_embeddings, + ) + else: + hidden_states = layer_module( + next_kv, + attention_mask, + query_states=query_states, + relative_pos=relative_pos, + rel_embeddings=rel_embeddings, + output_attentions=output_attentions, + ) + + if output_attentions: + hidden_states, att_m = hidden_states + + if query_states is not None: + query_states = hidden_states + if isinstance(hidden_states, Sequence): + next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None + else: + next_kv = hidden_states + + if output_attentions: + all_attentions = all_attentions + (att_m,) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions + ) + + +def build_relative_position(query_size, key_size, device): + """ + Build relative position according to the query and key + + We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key + \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q - + P_k\\) + + Args: + query_size (int): the length of query + key_size (int): the length of key + + Return: + `torch.LongTensor`: A tensor with shape [1, query_size, key_size] + + """ + + q_ids = torch.arange(query_size, dtype=torch.long, device=device) + k_ids = torch.arange(key_size, dtype=torch.long, device=device) + rel_pos_ids = q_ids[:, None] - k_ids.view(1, -1).repeat(query_size, 1) + rel_pos_ids = rel_pos_ids[:query_size, :] + rel_pos_ids = rel_pos_ids.unsqueeze(0) + return rel_pos_ids + + +def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos): + return c2p_pos.expand([query_layer.size(0), query_layer.size(1), query_layer.size(2), relative_pos.size(-1)]) + + +def p2c_dynamic_expand(c2p_pos, query_layer, key_layer): + return c2p_pos.expand([query_layer.size(0), query_layer.size(1), key_layer.size(-2), key_layer.size(-2)]) + + +def pos_dynamic_expand(pos_index, p2c_att, key_layer): + return pos_index.expand(p2c_att.size()[:2] + (pos_index.size(-2), key_layer.size(-2))) + + +class DisentangledSelfAttention(nn.Module): + """ + Disentangled self-attention module + + Parameters: + config (`str`): + A model config class instance with the configuration to build a new model. The schema is similar to + *BertConfig*, for more details, please refer [`DebertaConfig`] + + """ + + def __init__(self, config): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + self.num_attention_heads = config.num_attention_heads + self.attention_head_size = int(config.hidden_size / config.num_attention_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.in_proj = nn.Linear(config.hidden_size, self.all_head_size * 3, bias=False) + self.q_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float)) + self.v_bias = nn.Parameter(torch.zeros((self.all_head_size), dtype=torch.float)) + self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else [] + + self.relative_attention = getattr(config, "relative_attention", False) + self.talking_head = getattr(config, "talking_head", False) + + if self.talking_head: + self.head_logits_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False) + self.head_weights_proj = nn.Linear(config.num_attention_heads, config.num_attention_heads, bias=False) + + if self.relative_attention: + self.max_relative_positions = getattr(config, "max_relative_positions", -1) + if self.max_relative_positions < 1: + self.max_relative_positions = config.max_position_embeddings + self.pos_dropout = StableDropout(config.hidden_dropout_prob) + + if "c2p" in self.pos_att_type: + self.pos_proj = nn.Linear(config.hidden_size, self.all_head_size, bias=False) + if "p2c" in self.pos_att_type: + self.pos_q_proj = nn.Linear(config.hidden_size, self.all_head_size) + + self.dropout = StableDropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, -1) + x = x.view(new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states, + attention_mask, + output_attentions=False, + query_states=None, + relative_pos=None, + rel_embeddings=None, + ): + """ + Call the module + + Args: + hidden_states (`torch.FloatTensor`): + Input states to the module usually the output from previous layer, it will be the Q,K and V in + *Attention(Q,K,V)* + + attention_mask (`torch.ByteTensor`): + An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum + sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j* + th token. + + output_attentions (`bool`, optional): + Whether return the attention matrix. + + query_states (`torch.FloatTensor`, optional): + The *Q* state in *Attention(Q,K,V)*. + + relative_pos (`torch.LongTensor`): + The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with + values ranging in [*-max_relative_positions*, *max_relative_positions*]. + + rel_embeddings (`torch.FloatTensor`): + The embedding of relative distances. It's a tensor of shape [\\(2 \\times + \\text{max_relative_positions}\\), *hidden_size*]. + + + """ + if query_states is None: + qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1) + query_layer, key_layer, value_layer = self.transpose_for_scores(qp).chunk(3, dim=-1) + else: + + def linear(w, b, x): + if b is not None: + return torch.matmul(x, w.t()) + b.t() + else: + return torch.matmul(x, w.t()) # + b.t() + + ws = self.in_proj.weight.chunk(self.num_attention_heads * 3, dim=0) + qkvw = [torch.cat([ws[i * 3 + k] for i in range(self.num_attention_heads)], dim=0) for k in range(3)] + qkvb = [None] * 3 + + q = linear(qkvw[0], qkvb[0], query_states.to(dtype=qkvw[0].dtype)) + k, v = [linear(qkvw[i], qkvb[i], hidden_states.to(dtype=qkvw[i].dtype)) for i in range(1, 3)] + query_layer, key_layer, value_layer = [self.transpose_for_scores(x) for x in [q, k, v]] + + query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :]) + value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :]) + + rel_att = None + # Take the dot product between "query" and "key" to get the raw attention scores. + scale_factor = 1 + len(self.pos_att_type) + scale = torch.sqrt(torch.tensor(query_layer.size(-1), dtype=torch.float) * scale_factor) + query_layer = query_layer / scale.to(dtype=query_layer.dtype) + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + if self.relative_attention: + rel_embeddings = self.pos_dropout(rel_embeddings) + rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor) + + if rel_att is not None: + attention_scores = attention_scores + rel_att + + # bxhxlxd + if self.talking_head: + attention_scores = self.head_logits_proj(attention_scores.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + + attention_probs = XSoftmax.apply(attention_scores, attention_mask, -1) + attention_probs = self.dropout(attention_probs) + if self.talking_head: + attention_probs = self.head_weights_proj(attention_probs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2) + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + new_context_layer_shape = context_layer.size()[:-2] + (-1,) + context_layer = context_layer.view(new_context_layer_shape) + if output_attentions: + return (context_layer, attention_probs) + else: + return context_layer + + def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor): + if relative_pos is None: + q = query_layer.size(-2) + relative_pos = build_relative_position(q, key_layer.size(-2), query_layer.device) + if relative_pos.dim() == 2: + relative_pos = relative_pos.unsqueeze(0).unsqueeze(0) + elif relative_pos.dim() == 3: + relative_pos = relative_pos.unsqueeze(1) + # bxhxqxk + elif relative_pos.dim() != 4: + raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {relative_pos.dim()}") + + # MODIFIED: enable dynamo dynamic shape mode + # att_span = min(max(query_layer.size(-2), key_layer.size(-2)), self.max_relative_positions) + # relative_pos = relative_pos.long().to(query_layer.device) + # rel_embeddings = rel_embeddings[ + # self.max_relative_positions - att_span : self.max_relative_positions + att_span, : + # ].unsqueeze(0) + + att_span = min(max(query_layer.size(-2), key_layer.size(-2)), 512) + relative_pos = relative_pos.long().to(query_layer.device) + rel_embeddings = rel_embeddings[ + 512 - att_span : 512 + att_span, : + ].unsqueeze(0) + + score = 0 + + # content->position + if "c2p" in self.pos_att_type: + pos_key_layer = self.pos_proj(rel_embeddings) + pos_key_layer = self.transpose_for_scores(pos_key_layer) + c2p_att = torch.matmul(query_layer, pos_key_layer.transpose(-1, -2)) + c2p_pos = torch.clamp(relative_pos + att_span, 0, att_span * 2 - 1) + c2p_att = torch.gather(c2p_att, dim=-1, index=c2p_dynamic_expand(c2p_pos, query_layer, relative_pos)) + score += c2p_att + + # position->content + if "p2c" in self.pos_att_type: + pos_query_layer = self.pos_q_proj(rel_embeddings) + pos_query_layer = self.transpose_for_scores(pos_query_layer) + pos_query_layer /= torch.sqrt(torch.tensor(pos_query_layer.size(-1), dtype=torch.float) * scale_factor) + if query_layer.size(-2) != key_layer.size(-2): + r_pos = build_relative_position(key_layer.size(-2), key_layer.size(-2), query_layer.device) + else: + r_pos = relative_pos + p2c_pos = torch.clamp(-r_pos + att_span, 0, att_span * 2 - 1) + p2c_att = torch.matmul(key_layer, pos_query_layer.transpose(-1, -2).to(dtype=key_layer.dtype)) + p2c_att = torch.gather( + p2c_att, dim=-1, index=p2c_dynamic_expand(p2c_pos, query_layer, key_layer) + ).transpose(-1, -2) + + if query_layer.size(-2) != key_layer.size(-2): + pos_index = relative_pos[:, :, :, 0].unsqueeze(-1) + p2c_att = torch.gather(p2c_att, dim=-2, index=pos_dynamic_expand(pos_index, p2c_att, key_layer)) + score += p2c_att + + return score + + +class DebertaEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + pad_token_id = getattr(config, "pad_token_id", 0) + self.embedding_size = getattr(config, "embedding_size", config.hidden_size) + self.word_embeddings = nn.Embedding(config.vocab_size, self.embedding_size, padding_idx=pad_token_id) + + self.position_biased_input = getattr(config, "position_biased_input", True) + if not self.position_biased_input: + self.position_embeddings = None + else: + self.position_embeddings = nn.Embedding(config.max_position_embeddings, self.embedding_size) + + if config.type_vocab_size > 0: + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, self.embedding_size) + + if self.embedding_size != config.hidden_size: + self.embed_proj = nn.Linear(self.embedding_size, config.hidden_size, bias=False) + self.LayerNorm = DebertaLayerNorm(config.hidden_size, config.layer_norm_eps) + self.dropout = StableDropout(config.hidden_dropout_prob) + self.config = config + + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) + + def forward(self, input_ids=None, token_type_ids=None, position_ids=None, mask=None, inputs_embeds=None): + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + if self.position_embeddings is not None: + position_embeddings = self.position_embeddings(position_ids.long()) + else: + position_embeddings = torch.zeros_like(inputs_embeds) + + embeddings = inputs_embeds + if self.position_biased_input: + embeddings += position_embeddings + if self.config.type_vocab_size > 0: + token_type_embeddings = self.token_type_embeddings(token_type_ids) + embeddings += token_type_embeddings + + # MODIFIED: for dynamo dynamic shape + # if self.embedding_size != self.config.hidden_size: + # embeddings = self.embed_proj(embeddings) + + embeddings = self.LayerNorm(embeddings) + + if mask is not None: + if mask.dim() != embeddings.dim(): + if mask.dim() == 4: + mask = mask.squeeze(1).squeeze(1) + mask = mask.unsqueeze(2) + mask = mask.to(embeddings.dtype) + + embeddings = embeddings * mask + + embeddings = self.dropout(embeddings) + return embeddings + + +class DebertaPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = DebertaConfig + base_model_prefix = "deberta" + _keys_to_ignore_on_load_missing = ["position_ids"] + _keys_to_ignore_on_load_unexpected = ["position_embeddings"] + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, DebertaEncoder): + module.gradient_checkpointing = value + + +DEBERTA_START_DOCSTRING = r""" + The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled + Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build + on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two + improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data. + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + + Parameters: + config ([`DebertaConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +DEBERTA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert *input_ids* indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.", + DEBERTA_START_DOCSTRING, +) +class DebertaModel(DebertaPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.embeddings = DebertaEmbeddings(config) + self.encoder = DebertaEncoder(config) + self.z_steps = 0 + self.config = config + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, new_embeddings): + self.embeddings.word_embeddings = new_embeddings + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + raise NotImplementedError("The prune function is not implemented in DeBERTa model.") + + @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if attention_mask is None: + attention_mask = torch.ones(input_shape, device=device) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + embedding_output = self.embeddings( + input_ids=input_ids, + token_type_ids=token_type_ids, + position_ids=position_ids, + mask=attention_mask, + inputs_embeds=inputs_embeds, + ) + + encoder_outputs = self.encoder( + embedding_output, + attention_mask, + output_hidden_states=True, + output_attentions=output_attentions, + return_dict=return_dict, + ) + encoded_layers = encoder_outputs[1] + + if self.z_steps > 1: + hidden_states = encoded_layers[-2] + layers = [self.encoder.layer[-1] for _ in range(self.z_steps)] + query_states = encoded_layers[-1] + rel_embeddings = self.encoder.get_rel_embedding() + attention_mask = self.encoder.get_attention_mask(attention_mask) + rel_pos = self.encoder.get_rel_pos(embedding_output) + for layer in layers[1:]: + query_states = layer( + hidden_states, + attention_mask, + output_attentions=False, + query_states=query_states, + relative_pos=rel_pos, + rel_embeddings=rel_embeddings, + ) + encoded_layers.append(query_states) + + sequence_output = encoded_layers[-1] + + if not return_dict: + return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :] + + return BaseModelOutput( + last_hidden_state=sequence_output, + hidden_states=encoder_outputs.hidden_states if output_hidden_states else None, + attentions=encoder_outputs.attentions, + ) + + + +model_name = "microsoft/deberta-base" +device = "cuda:0" + +def get_model(): + config = AutoConfig.from_pretrained(model_name) + config.return_dict = False + model = DebertaModel(config).to(device) + print("model type", type(model)) + return model + +def get_scripted_model(): + from ._deberta_scripted import _get_scripted_model + return _get_scripted_model() + + +def get_input(batch_size, seq_len=256): + # tokenizer = AutoTokenizer.from_pretrained(model_name) + # inputs = tokenizer("Hello world! Hello world! Hello world! Hello world! Hello world!", return_tensors="pt").to(device) + # assert len(inputs) == 3 + # return (inputs['input_ids'], inputs['attention_mask'], inputs['token_type_ids']), {} + vocab_size = 50265 + input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), dtype=torch.int64).to(device) + attention_mask = torch.ones((batch_size, seq_len), dtype=torch.int64).to(device) + token_type_ids = torch.zeros((batch_size, seq_len), dtype=torch.int64).to(device) + return (input_ids, attention_mask, token_type_ids), {} + + +if __name__ == "__main__": + model = get_model() + input_args, input_kwargs = get_input(batch_size=1) + print([x.shape for x in input_args]) + outputs = model(*input_args, **input_kwargs) + print(outputs) + diff --git a/models/densenet.py b/models/densenet.py new file mode 100644 index 000000000000..22617d9eedca --- /dev/null +++ b/models/densenet.py @@ -0,0 +1,255 @@ +# https://github.com/pytorch/vision/blob/f677ea31db8f45dbfec2fe5e519da82853815776/torchvision/models/densenet.py + +import re +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from collections import OrderedDict + + +__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161'] + +model_urls = { + 'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth', + 'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth', + 'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth', + 'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth', +} + + +def _bn_function_factory(norm, relu, conv): + def bn_function(*inputs): + concated_features = torch.cat(inputs, 1) + bottleneck_output = conv(relu(norm(concated_features))) + return bottleneck_output + + return bn_function + + +class _DenseLayer(nn.Sequential): + def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, memory_efficient=False): + super(_DenseLayer, self).__init__() + self.add_module('norm1', nn.BatchNorm2d(num_input_features)), + self.add_module('relu1', nn.ReLU(inplace=True)), + self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * + growth_rate, kernel_size=1, stride=1, + bias=False)), + self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)), + self.add_module('relu2', nn.ReLU(inplace=True)), + self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, + kernel_size=3, stride=1, padding=1, + bias=False)), + self.drop_rate = drop_rate + self.memory_efficient = memory_efficient + + def forward(self, *prev_features): + bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1) + if self.memory_efficient and any(prev_feature.requires_grad for prev_feature in prev_features): + bottleneck_output = cp.checkpoint(bn_function, *prev_features) + else: + bottleneck_output = bn_function(*prev_features) + new_features = self.conv2(self.relu2(self.norm2(bottleneck_output))) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, + training=self.training) + return new_features + + +class _DenseBlock(nn.Module): + def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, memory_efficient=False): + super(_DenseBlock, self).__init__() + for i in range(num_layers): + layer = _DenseLayer( + num_input_features + i * growth_rate, + growth_rate=growth_rate, + bn_size=bn_size, + drop_rate=drop_rate, + memory_efficient=memory_efficient, + ) + self.add_module('denselayer%d' % (i + 1), layer) + + def forward(self, init_features): + features = [init_features] + for name, layer in self.named_children(): + new_features = layer(*features) + features.append(new_features) + return torch.cat(features, 1) + + +class _Transition(nn.Sequential): + def __init__(self, num_input_features, num_output_features): + super(_Transition, self).__init__() + self.add_module('norm', nn.BatchNorm2d(num_input_features)) + self.add_module('relu', nn.ReLU(inplace=True)) + self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, + kernel_size=1, stride=1, bias=False)) + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + + +class DenseNet(nn.Module): + r"""Densenet-BC model class, based on + `"Densely Connected Convolutional Networks" `_ + + Args: + growth_rate (int) - how many filters to add each layer (`k` in paper) + block_config (list of 4 ints) - how many layers in each pooling block + num_init_features (int) - the number of filters to learn in the first convolution layer + bn_size (int) - multiplicative factor for number of bottle neck layers + (i.e. bn_size * k features in the bottleneck layer) + drop_rate (float) - dropout rate after each dense layer + num_classes (int) - number of classification classes + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + + def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16), + num_init_features=64, bn_size=4, drop_rate=0, num_classes=1000, memory_efficient=False): + + super(DenseNet, self).__init__() + + # First convolution + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, + padding=3, bias=False)), + ('norm0', nn.BatchNorm2d(num_init_features)), + ('relu0', nn.ReLU(inplace=True)), + ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)), + ])) + + # Each denseblock + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = _DenseBlock( + num_layers=num_layers, + num_input_features=num_features, + bn_size=bn_size, + growth_rate=growth_rate, + drop_rate=drop_rate, + memory_efficient=memory_efficient + ) + self.features.add_module('denseblock%d' % (i + 1), block) + num_features = num_features + num_layers * growth_rate + if i != len(block_config) - 1: + trans = _Transition(num_input_features=num_features, + num_output_features=num_features // 2) + self.features.add_module('transition%d' % (i + 1), trans) + num_features = num_features // 2 + + # Final batch norm + self.features.add_module('norm5', nn.BatchNorm2d(num_features)) + + # Linear layer + self.classifier = nn.Linear(num_features, num_classes) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + def forward(self, x): + features = self.features(x) + out = F.relu(features, inplace=True) + out = F.adaptive_avg_pool2d(out, (1, 1)) + out = torch.flatten(out, 1) + out = self.classifier(out) + return out + + +def _load_state_dict(model, model_url, progress): + # '.'s are no longer allowed in module names, but previous _DenseLayer + # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. + # They are also in the checkpoints in model_urls. This pattern is used + # to find such keys. + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + + state_dict = load_state_dict_from_url(model_url, progress=progress) + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + model.load_state_dict(state_dict) + + +def _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress, + **kwargs): + model = DenseNet(growth_rate, block_config, num_init_features, **kwargs) + if pretrained: + _load_state_dict(model, model_urls[arch], progress) + return model + + +def densenet121(pretrained=False, progress=True, **kwargs): + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress, + **kwargs) + + +def densenet161(pretrained=False, progress=True, **kwargs): + r"""Densenet-161 model from + `"Densely Connected Convolutional Networks" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress, + **kwargs) + + +def densenet169(pretrained=False, progress=True, **kwargs): + r"""Densenet-169 model from + `"Densely Connected Convolutional Networks" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress, + **kwargs) + + +def densenet201(pretrained=False, progress=True, **kwargs): + r"""Densenet-201 model from + `"Densely Connected Convolutional Networks" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" `_ + """ + return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress, + **kwargs) + + +def get_model(): + return densenet121(pretrained=False).cuda() + +def get_scripted_model(): + from ._densenet_scripted import _get_scripted_model + return _get_scripted_model() + +def get_input(batch_size): + return (torch.randn((batch_size, 3, 224, 224)).cuda(),), {} + diff --git a/models/longformer.py b/models/longformer.py new file mode 100644 index 000000000000..5e47a558a511 --- /dev/null +++ b/models/longformer.py @@ -0,0 +1,1789 @@ +from transformers import AutoTokenizer, AutoConfig, AutoModel +import torch + +import math +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from transformers.activations import ACT2FN, gelu +from transformers.modeling_utils import PreTrainedModel +from transformers.pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from transformers.utils import ( + ModelOutput, + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from transformers.models.longformer.configuration_longformer import LongformerConfig + +_CHECKPOINT_FOR_DOC = "allenai/longformer-base-4096" +_CONFIG_FOR_DOC = "LongformerConfig" + +LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "allenai/longformer-base-4096", + "allenai/longformer-large-4096", + "allenai/longformer-large-4096-finetuned-triviaqa", + "allenai/longformer-base-4096-extra.pos.embd.only", + "allenai/longformer-large-4096-extra.pos.embd.only", + # See all Longformer models at https://huggingface.co/models?filter=longformer +] + + +@dataclass +class LongformerBaseModelOutput(ModelOutput): + """ + Base class for Longformer's outputs, with potential hidden states, local and global attentions. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + + attention_window + 1)`, where `x` is the number of tokens with global attention mask. + + Local attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. Those are the attention weights from every token in the sequence to every token with + global attention (first `x` values) and to every token in the attention window (remaining `attention_window + + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the + remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a + token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding + (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. + If the attention window contains a token with global attention, the attention weight at the corresponding + index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global + attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be + accessed from `global_attentions`. + global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, + where `x` is the number of tokens with global attention mask. + + Global attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. Those are the attention weights from every token with global attention to every token + in the sequence. + """ + + last_hidden_state: torch.FloatTensor + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + global_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class LongformerBaseModelOutputWithPooling(ModelOutput): + """ + Base class for Longformer's outputs that also contains a pooling of the last hidden states. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): + Sequence of hidden-states at the output of the last layer of the model. + pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): + Last layer hidden-state of the first token of the sequence (classification token) further processed by a + Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence + prediction (classification) objective during pretraining. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + + attention_window + 1)`, where `x` is the number of tokens with global attention mask. + + Local attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. Those are the attention weights from every token in the sequence to every token with + global attention (first `x` values) and to every token in the attention window (remaining `attention_window + + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the + remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a + token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding + (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. + If the attention window contains a token with global attention, the attention weight at the corresponding + index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global + attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be + accessed from `global_attentions`. + global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, + where `x` is the number of tokens with global attention mask. + + Global attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. Those are the attention weights from every token with global attention to every token + in the sequence. + """ + + last_hidden_state: torch.FloatTensor + pooler_output: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + global_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class LongformerMaskedLMOutput(ModelOutput): + """ + Base class for masked language models outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Masked language modeling (MLM) loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): + Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + + attention_window + 1)`, where `x` is the number of tokens with global attention mask. + + Local attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. Those are the attention weights from every token in the sequence to every token with + global attention (first `x` values) and to every token in the attention window (remaining `attention_window + + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the + remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a + token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding + (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. + If the attention window contains a token with global attention, the attention weight at the corresponding + index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global + attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be + accessed from `global_attentions`. + global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, + where `x` is the number of tokens with global attention mask. + + Global attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. Those are the attention weights from every token with global attention to every token + in the sequence. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + global_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class LongformerQuestionAnsweringModelOutput(ModelOutput): + """ + Base class for outputs of question answering Longformer models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. + start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Span-start scores (before SoftMax). + end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): + Span-end scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + + attention_window + 1)`, where `x` is the number of tokens with global attention mask. + + Local attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. Those are the attention weights from every token in the sequence to every token with + global attention (first `x` values) and to every token in the attention window (remaining `attention_window + + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the + remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a + token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding + (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. + If the attention window contains a token with global attention, the attention weight at the corresponding + index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global + attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be + accessed from `global_attentions`. + global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, + where `x` is the number of tokens with global attention mask. + + Global attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. Those are the attention weights from every token with global attention to every token + in the sequence. + """ + + loss: Optional[torch.FloatTensor] = None + start_logits: torch.FloatTensor = None + end_logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + global_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class LongformerSequenceClassifierOutput(ModelOutput): + """ + Base class for outputs of sentence classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Classification (or regression if config.num_labels==1) loss. + logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): + Classification (or regression if config.num_labels==1) scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + + attention_window + 1)`, where `x` is the number of tokens with global attention mask. + + Local attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. Those are the attention weights from every token in the sequence to every token with + global attention (first `x` values) and to every token in the attention window (remaining `attention_window + + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the + remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a + token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding + (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. + If the attention window contains a token with global attention, the attention weight at the corresponding + index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global + attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be + accessed from `global_attentions`. + global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, + where `x` is the number of tokens with global attention mask. + + Global attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. Those are the attention weights from every token with global attention to every token + in the sequence. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + global_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class LongformerMultipleChoiceModelOutput(ModelOutput): + """ + Base class for outputs of multiple choice Longformer models. + + Args: + loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided): + Classification loss. + logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): + *num_choices* is the second dimension of the input tensors. (see *input_ids* above). + + Classification scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + + attention_window + 1)`, where `x` is the number of tokens with global attention mask. + + Local attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. Those are the attention weights from every token in the sequence to every token with + global attention (first `x` values) and to every token in the attention window (remaining `attention_window + + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the + remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a + token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding + (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. + If the attention window contains a token with global attention, the attention weight at the corresponding + index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global + attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be + accessed from `global_attentions`. + global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, + where `x` is the number of tokens with global attention mask. + + Global attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. Those are the attention weights from every token with global attention to every token + in the sequence. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + global_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +class LongformerTokenClassifierOutput(ModelOutput): + """ + Base class for outputs of token classification models. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided) : + Classification loss. + logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`): + Classification scores (before SoftMax). + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + + Hidden-states of the model at the output of each layer plus the initial embedding outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x + + attention_window + 1)`, where `x` is the number of tokens with global attention mask. + + Local attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. Those are the attention weights from every token in the sequence to every token with + global attention (first `x` values) and to every token in the attention window (remaining `attention_window + + 1` values). Note that the first `x` values refer to tokens with fixed positions in the text, but the + remaining `attention_window + 1` values refer to tokens with relative positions: the attention weight of a + token to itself is located at index `x + attention_window / 2` and the `attention_window / 2` preceding + (succeeding) values are the attention weights to the `attention_window / 2` preceding (succeeding) tokens. + If the attention window contains a token with global attention, the attention weight at the corresponding + index is set to 0; the value should be accessed from the first `x` attention weights. If a token has global + attention, the attention weights to all other tokens in `attentions` is set to 0, the values should be + accessed from `global_attentions`. + global_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, x)`, + where `x` is the number of tokens with global attention mask. + + Global attentions weights after the attention softmax, used to compute the weighted average in the + self-attention heads. Those are the attention weights from every token with global attention to every token + in the sequence. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + global_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +def _get_question_end_index(input_ids, sep_token_id): + """ + Computes the index of the first occurrence of `sep_token_id`. + """ + + sep_token_indices = (input_ids == sep_token_id).nonzero() + batch_size = input_ids.shape[0] + + assert sep_token_indices.shape[1] == 2, "`input_ids` should have two dimensions" + assert sep_token_indices.shape[0] == 3 * batch_size, ( + f"There should be exactly three separator tokens: {sep_token_id} in every sample for questions answering. You" + " might also consider to set `global_attention_mask` manually in the forward function to avoid this error." + ) + return sep_token_indices.view(batch_size, 3, 2)[:, 0, 1] + + +def _compute_global_attention_mask(input_ids, sep_token_id, before_sep_token=True): + """ + Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is + True` else after `sep_token_id`. + """ + question_end_index = _get_question_end_index(input_ids, sep_token_id) + question_end_index = question_end_index.unsqueeze(dim=1) # size: batch_size x 1 + # bool attention mask with True in locations of global attention + attention_mask = torch.arange(input_ids.shape[1], device=input_ids.device) + if before_sep_token is True: + attention_mask = (attention_mask.expand_as(input_ids) < question_end_index).to(torch.bool) + else: + # last token is separation token and should not be counted and in the middle are two separation tokens + attention_mask = (attention_mask.expand_as(input_ids) > (question_end_index + 1)).to(torch.bool) * ( + attention_mask.expand_as(input_ids) < input_ids.shape[-1] + ).to(torch.bool) + + return attention_mask + + +def create_position_ids_from_input_ids(input_ids, padding_idx): + """ + Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols + are ignored. This is modified from fairseq's `utils.make_positions`. + + Args: + x: torch.Tensor x: + + Returns: torch.Tensor + """ + # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. + mask = input_ids.ne(padding_idx).int() + incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask + return incremental_indices.long() + padding_idx + + +class LongformerEmbeddings(nn.Module): + """ + Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. + """ + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") + + self.padding_idx = config.pad_token_id + self.position_embeddings = nn.Embedding( + config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx + ) + + def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): + if position_ids is None: + if input_ids is not None: + # Create the position ids from the input token ids. Any padded tokens remain padded. + position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device) + else: + position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) + + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + position_embeddings = self.position_embeddings(position_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + position_embeddings + token_type_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + def create_position_ids_from_inputs_embeds(self, inputs_embeds): + """ + We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. + + Args: + inputs_embeds: torch.Tensor inputs_embeds: + + Returns: torch.Tensor + """ + input_shape = inputs_embeds.size()[:-1] + sequence_length = input_shape[1] + + position_ids = torch.arange( + self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device + ) + return position_ids.unsqueeze(0).expand(input_shape) + + +class LongformerSelfAttention(nn.Module): + def __init__(self, config, layer_id): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + self.num_heads = config.num_attention_heads + self.head_dim = int(config.hidden_size / config.num_attention_heads) + self.embed_dim = config.hidden_size + + self.query = nn.Linear(config.hidden_size, self.embed_dim) + self.key = nn.Linear(config.hidden_size, self.embed_dim) + self.value = nn.Linear(config.hidden_size, self.embed_dim) + + # separate projection layers for tokens with global attention + self.query_global = nn.Linear(config.hidden_size, self.embed_dim) + self.key_global = nn.Linear(config.hidden_size, self.embed_dim) + self.value_global = nn.Linear(config.hidden_size, self.embed_dim) + + self.dropout = config.attention_probs_dropout_prob + + self.layer_id = layer_id + attention_window = config.attention_window[self.layer_id] + assert ( + attention_window % 2 == 0 + ), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}" + assert ( + attention_window > 0 + ), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}" + + self.one_sided_attn_window_size = attention_window // 2 + + self.config = config + + def forward( + self, + hidden_states, + attention_mask=None, + layer_head_mask=None, + is_index_masked=None, + is_index_global_attn=None, + is_global_attn=None, + output_attentions=False, + ): + """ + [`LongformerSelfAttention`] expects *len(hidden_states)* to be multiple of *attention_window*. Padding to + *attention_window* happens in [`LongformerModel.forward`] to avoid redoing the padding on each layer. + + The *attention_mask* is changed in [`LongformerModel.forward`] from 0, 1, 2 to: + + - -10000: no attention + - 0: local attention + - +10000: global attention + """ + hidden_states = hidden_states.transpose(0, 1) + + # project hidden states + query_vectors = self.query(hidden_states) + key_vectors = self.key(hidden_states) + value_vectors = self.value(hidden_states) + + seq_len, batch_size, embed_dim = hidden_states.size() + assert ( + embed_dim == self.embed_dim + ), f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}" + + # normalize query + query_vectors /= math.sqrt(self.head_dim) + + query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) + key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) + + attn_scores = self._sliding_chunks_query_key_matmul( + query_vectors, key_vectors, self.one_sided_attn_window_size + ) + + # values to pad for attention probs + remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None] + + # cast to fp32/fp16 then replace 1's with -inf + float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( + remove_from_windowed_attention_mask, torch.finfo(query_vectors.dtype).min + ) + # diagonal mask with zeros everywhere and -inf inplace of padding + diagonal_mask = self._sliding_chunks_query_key_matmul( + float_mask.new_ones(size=float_mask.size()), float_mask, self.one_sided_attn_window_size + ) + + # pad local attention probs + attn_scores += diagonal_mask + + assert list(attn_scores.size()) == [ + batch_size, + seq_len, + self.num_heads, + self.one_sided_attn_window_size * 2 + 1, + ], ( + f"local_attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}," + f" {self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}" + ) + + # compute local attention probs from global attention keys and contact over window dim + if is_global_attn: + # compute global attn indices required through out forward fn + ( + max_num_global_attn_indices, + is_index_global_attn_nonzero, + is_local_index_global_attn_nonzero, + is_local_index_no_global_attn_nonzero, + ) = self._get_global_attn_indices(is_index_global_attn) + # calculate global attn probs from global key + + global_key_attn_scores = self._concat_with_global_key_attn_probs( + query_vectors=query_vectors, + key_vectors=key_vectors, + max_num_global_attn_indices=max_num_global_attn_indices, + is_index_global_attn_nonzero=is_index_global_attn_nonzero, + is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, + is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, + ) + # concat to local_attn_probs + # (batch_size, seq_len, num_heads, extra attention count + 2*window+1) + attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1) + + # free memory + del global_key_attn_scores + + attn_probs = nn.functional.softmax( + attn_scores, dim=-1, dtype=torch.float32 + ) # use fp32 for numerical stability + + if layer_head_mask is not None: + assert layer_head_mask.size() == ( + self.num_heads, + ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}" + attn_probs = layer_head_mask.view(1, 1, -1, 1) * attn_probs + + # softmax sometimes inserts NaN if all positions are masked, replace them with 0 + attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0) + attn_probs = attn_probs.type_as(attn_scores) + + # free memory + del attn_scores + + # apply dropout + attn_probs = nn.functional.dropout(attn_probs, p=self.dropout, training=self.training) + + value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) + + # compute local attention output with global attention value and add + if is_global_attn: + # compute sum of global and local attn + attn_output = self._compute_attn_output_with_global_indices( + value_vectors=value_vectors, + attn_probs=attn_probs, + max_num_global_attn_indices=max_num_global_attn_indices, + is_index_global_attn_nonzero=is_index_global_attn_nonzero, + is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, + ) + else: + # compute local attn only + attn_output = self._sliding_chunks_matmul_attn_probs_value( + attn_probs, value_vectors, self.one_sided_attn_window_size + ) + + assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), "Unexpected size" + attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous() + + # compute value for global attention and overwrite to attention output + # TODO: remove the redundant computation + if is_global_attn: + global_attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden( + hidden_states=hidden_states, + max_num_global_attn_indices=max_num_global_attn_indices, + layer_head_mask=layer_head_mask, + is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, + is_index_global_attn_nonzero=is_index_global_attn_nonzero, + is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, + is_index_masked=is_index_masked, + ) + + # get only non zero global attn output + nonzero_global_attn_output = global_attn_output[ + is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1] + ] + + # overwrite values with global attention + attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view( + len(is_local_index_global_attn_nonzero[0]), -1 + ) + # The attention weights for tokens with global attention are + # just filler values, they were never used to compute the output. + # Fill with 0 now, the correct values are in 'global_attn_probs'. + attn_probs[is_index_global_attn_nonzero] = 0 + + outputs = (attn_output.transpose(0, 1),) + + if output_attentions: + outputs += (attn_probs,) + + return outputs + (global_attn_probs,) if (is_global_attn and output_attentions) else outputs + + @staticmethod + def _pad_and_transpose_last_two_dims(hidden_states_padded, padding): + """pads rows and then flips rows and columns""" + hidden_states_padded = nn.functional.pad( + hidden_states_padded, padding + ) # padding value is not important because it will be overwritten + hidden_states_padded = hidden_states_padded.view( + *hidden_states_padded.size()[:-2], hidden_states_padded.size(-1), hidden_states_padded.size(-2) + ) + return hidden_states_padded + + @staticmethod + def _pad_and_diagonalize(chunked_hidden_states): + """ + shift every row 1 step right, converting columns into diagonals. + + Example: + + ```python + chunked_hidden_states: [ + 0.4983, + 2.6918, + -0.0071, + 1.0492, + -1.8348, + 0.7672, + 0.2986, + 0.0285, + -0.7584, + 0.4206, + -0.0405, + 0.1599, + 2.0514, + -1.1600, + 0.5372, + 0.2629, + ] + window_overlap = num_rows = 4 + ``` + + (pad & diagonalize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 + 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206, + -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] + """ + total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size() + chunked_hidden_states = nn.functional.pad( + chunked_hidden_states, (0, window_overlap + 1) + ) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten + chunked_hidden_states = chunked_hidden_states.view( + total_num_heads, num_chunks, -1 + ) # total_num_heads x num_chunks x window_overlap*window_overlap+window_overlap + chunked_hidden_states = chunked_hidden_states[ + :, :, :-window_overlap + ] # total_num_heads x num_chunks x window_overlap*window_overlap + chunked_hidden_states = chunked_hidden_states.view( + total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim + ) + chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] + return chunked_hidden_states + + @staticmethod + def _chunk(hidden_states, window_overlap, onnx_export: bool = False): + """convert into overlapping chunks. Chunk size = 2w, overlap size = w""" + if not onnx_export: + # non-overlapping chunks of size = 2w + hidden_states = hidden_states.view( + hidden_states.size(0), + torch.div(hidden_states.size(1), (window_overlap * 2), rounding_mode="trunc"), + window_overlap * 2, + hidden_states.size(2), + ) + # use `as_strided` to make the chunks overlap with an overlap size = window_overlap + chunk_size = list(hidden_states.size()) + chunk_size[1] = chunk_size[1] * 2 - 1 + + chunk_stride = list(hidden_states.stride()) + chunk_stride[1] = chunk_stride[1] // 2 + return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) + + # When exporting to ONNX, use this separate logic + # have to use slow implementation since as_strided, unfold and 2d-tensor indexing aren't supported (yet) in ONNX export + + # TODO replace this with + # > return hidden_states.unfold(dimension=1, size=window_overlap * 2, step=window_overlap).transpose(2, 3) + # once `unfold` is supported + # the case hidden_states.size(1) == window_overlap * 2 can also simply return hidden_states.unsqueeze(1), but that's control flow + + chunk_size = [ + hidden_states.size(0), + torch.div(hidden_states.size(1), window_overlap, rounding_mode="trunc") - 1, + window_overlap * 2, + hidden_states.size(2), + ] + + overlapping_chunks = torch.empty(chunk_size, device=hidden_states.device) + for chunk in range(chunk_size[1]): + overlapping_chunks[:, chunk, :, :] = hidden_states[ + :, chunk * window_overlap : chunk * window_overlap + 2 * window_overlap, : + ] + return overlapping_chunks + + @staticmethod + def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor: + beginning_mask_2d = input_tensor.new_ones(affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0]) + beginning_mask = beginning_mask_2d[None, :, None, :] + ending_mask = beginning_mask.flip(dims=(1, 3)) + beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] + beginning_mask = beginning_mask.expand(beginning_input.size()) + input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] = torch.full_like( + beginning_input, -float("inf") + ).where(beginning_mask.bool(), beginning_input) + ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :] + ending_mask = ending_mask.expand(ending_input.size()) + input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :] = torch.full_like( + ending_input, -float("inf") + ).where(ending_mask.bool(), ending_input) + + def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int): + """ + Matrix multiplication of query and key tensors using with a sliding window attention pattern. This + implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an + overlap of size window_overlap + """ + batch_size, seq_len, num_heads, head_dim = query.size() + assert ( + seq_len % (window_overlap * 2) == 0 + ), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}" + assert query.size() == key.size() + + chunks_count = torch.div(seq_len, window_overlap, rounding_mode="trunc") - 1 + + # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2 + query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) + key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) + + query = self._chunk(query, window_overlap, getattr(self.config, "onnx_export", False)) + key = self._chunk(key, window_overlap, getattr(self.config, "onnx_export", False)) + # query = self._chunk(query, window_overlap, False) + # key = self._chunk(key, window_overlap, False) + + # matrix multiplication + # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim + # bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim + # bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap + diagonal_chunked_attention_scores = torch.einsum("bcxd,bcyd->bcxy", (query, key)) # multiply + + # convert diagonals into columns + diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( + diagonal_chunked_attention_scores, padding=(0, 0, 0, 1) + ) + + # allocate space for the overall attention matrix where the chunks are combined. The last dimension + # has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to + # window_overlap previous words). The following column is attention score from each word to itself, then + # followed by window_overlap columns for the upper triangle. + + diagonal_attention_scores = diagonal_chunked_attention_scores.new_zeros( + (batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1) + ) + + # copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions + # - copying the main diagonal and the upper triangle + diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[ + :, :, :window_overlap, : window_overlap + 1 + ] + diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[ + :, -1, window_overlap:, : window_overlap + 1 + ] + # - copying the lower triangle + diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[ + :, :, -(window_overlap + 1) : -1, window_overlap + 1 : + ] + + diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[ + :, 0, : window_overlap - 1, 1 - window_overlap : + ] + + # separate batch_size and num_heads dimensions again + diagonal_attention_scores = diagonal_attention_scores.view( + batch_size, num_heads, seq_len, 2 * window_overlap + 1 + ).transpose(2, 1) + + self._mask_invalid_locations(diagonal_attention_scores, window_overlap) + return diagonal_attention_scores + + def _sliding_chunks_matmul_attn_probs_value( + self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int + ): + """ + Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the + same shape as `attn_probs` + """ + batch_size, seq_len, num_heads, head_dim = value.size() + + assert seq_len % (window_overlap * 2) == 0 + assert attn_probs.size()[:3] == value.size()[:3] + assert attn_probs.size(3) == 2 * window_overlap + 1 + chunks_count = torch.div(seq_len, window_overlap, rounding_mode="trunc") - 1 + # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap + + chunked_attn_probs = attn_probs.transpose(1, 2).reshape( + batch_size * num_heads, + torch.div(seq_len, window_overlap, rounding_mode="trunc"), + window_overlap, + 2 * window_overlap + 1, + ) + + # group batch_size and num_heads dimensions into one + value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) + + # pad seq_len with w at the beginning of the sequence and another window overlap at the end + padded_value = nn.functional.pad(value, (0, 0, window_overlap, window_overlap), value=-1) + + # chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap + chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim) + chunked_value_stride = padded_value.stride() + chunked_value_stride = ( + chunked_value_stride[0], + window_overlap * chunked_value_stride[1], + chunked_value_stride[1], + chunked_value_stride[2], + ) + chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) + + chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) + + context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value)) + return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) + + @staticmethod + def _get_global_attn_indices(is_index_global_attn): + """compute global attn indices required throughout forward pass""" + # helper variable + num_global_attn_indices = is_index_global_attn.long().sum(dim=1) + + # max number of global attn indices in batch + max_num_global_attn_indices = num_global_attn_indices.max() + + # indices of global attn + is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True) + + # helper variable + is_local_index_global_attn = torch.arange( + max_num_global_attn_indices, device=is_index_global_attn.device + ) < num_global_attn_indices.unsqueeze(dim=-1) + + # location of the non-padding values within global attention indices + is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True) + + # location of the padding values within global attention indices + is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(as_tuple=True) + return ( + max_num_global_attn_indices, + is_index_global_attn_nonzero, + is_local_index_global_attn_nonzero, + is_local_index_no_global_attn_nonzero, + ) + + def _concat_with_global_key_attn_probs( + self, + key_vectors, + query_vectors, + max_num_global_attn_indices, + is_index_global_attn_nonzero, + is_local_index_global_attn_nonzero, + is_local_index_no_global_attn_nonzero, + ): + batch_size = key_vectors.shape[0] + + # create only global key vectors + key_vectors_only_global = key_vectors.new_zeros( + batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim + ) + + key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] + + # (batch_size, seq_len, num_heads, max_num_global_attn_indices) + attn_probs_from_global_key = torch.einsum("blhd,bshd->blhs", (query_vectors, key_vectors_only_global)) + + # need to transpose since ONNX export only supports consecutive indexing: https://pytorch.org/docs/stable/onnx.html#writes-sets + attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3) + attn_probs_from_global_key[ + is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, : + ] = torch.finfo(attn_probs_from_global_key.dtype).min + attn_probs_from_global_key = attn_probs_from_global_key.transpose(1, 3) + + return attn_probs_from_global_key + + def _compute_attn_output_with_global_indices( + self, + value_vectors, + attn_probs, + max_num_global_attn_indices, + is_index_global_attn_nonzero, + is_local_index_global_attn_nonzero, + ): + batch_size = attn_probs.shape[0] + + # cut local attn probs to global only + attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices) + # get value vectors for global only + value_vectors_only_global = value_vectors.new_zeros( + batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim + ) + value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] + + # use `matmul` because `einsum` crashes sometimes with fp16 + # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) + # compute attn output only global + attn_output_only_global = torch.matmul( + attn_probs_only_global.transpose(1, 2).clone(), value_vectors_only_global.transpose(1, 2).clone() + ).transpose(1, 2) + + # reshape attn probs + attn_probs_without_global = attn_probs.narrow( + -1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices + ).contiguous() + + # compute attn output with global + attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value( + attn_probs_without_global, value_vectors, self.one_sided_attn_window_size + ) + return attn_output_only_global + attn_output_without_global + + def _compute_global_attn_output_from_hidden( + self, + hidden_states, + max_num_global_attn_indices, + layer_head_mask, + is_local_index_global_attn_nonzero, + is_index_global_attn_nonzero, + is_local_index_no_global_attn_nonzero, + is_index_masked, + ): + seq_len, batch_size = hidden_states.shape[:2] + + # prepare global hidden states + global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) + global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[ + is_index_global_attn_nonzero[::-1] + ] + + # global key, query, value + global_query_vectors_only_global = self.query_global(global_attn_hidden_states) + global_key_vectors = self.key_global(hidden_states) + global_value_vectors = self.value_global(hidden_states) + + # normalize + global_query_vectors_only_global /= math.sqrt(self.head_dim) + + # reshape + global_query_vectors_only_global = ( + global_query_vectors_only_global.contiguous() + .view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim) + .transpose(0, 1) + ) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim) + global_key_vectors = ( + global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) + ) # batch_size * self.num_heads, seq_len, head_dim) + global_value_vectors = ( + global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) + ) # batch_size * self.num_heads, seq_len, head_dim) + + # compute attn scores + global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) + + assert list(global_attn_scores.size()) == [ + batch_size * self.num_heads, + max_num_global_attn_indices, + seq_len, + ], ( + "global_attn_scores have the wrong size. Size should be" + f" {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is" + f" {global_attn_scores.size()}." + ) + + global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) + + # need to transpose since ONNX export only supports consecutive indexing: https://pytorch.org/docs/stable/onnx.html#writes-sets + global_attn_scores = global_attn_scores.transpose(1, 2) + global_attn_scores[ + is_local_index_no_global_attn_nonzero[0], is_local_index_no_global_attn_nonzero[1], :, : + ] = torch.finfo(global_attn_scores.dtype).min + global_attn_scores = global_attn_scores.transpose(1, 2) + + global_attn_scores = global_attn_scores.masked_fill( + is_index_masked[:, None, None, :], + torch.finfo(global_attn_scores.dtype).min, + ) + + global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len) + + # compute global attn probs + global_attn_probs_float = nn.functional.softmax( + global_attn_scores, dim=-1, dtype=torch.float32 + ) # use fp32 for numerical stability + + # apply layer head masking + if layer_head_mask is not None: + assert layer_head_mask.size() == ( + self.num_heads, + ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}" + global_attn_probs_float = layer_head_mask.view(1, -1, 1, 1) * global_attn_probs_float.view( + batch_size, self.num_heads, max_num_global_attn_indices, seq_len + ) + global_attn_probs_float = global_attn_probs_float.view( + batch_size * self.num_heads, max_num_global_attn_indices, seq_len + ) + + global_attn_probs = nn.functional.dropout( + global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training + ) + + # global attn output + global_attn_output = torch.bmm(global_attn_probs, global_value_vectors) + + assert list(global_attn_output.size()) == [ + batch_size * self.num_heads, + max_num_global_attn_indices, + self.head_dim, + ], ( + "global_attn_output tensor has the wrong size. Size should be" + f" {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is" + f" {global_attn_output.size()}." + ) + + global_attn_probs = global_attn_probs.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) + global_attn_output = global_attn_output.view( + batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim + ) + return global_attn_output, global_attn_probs + + +# Copied from transformers.models.bert.modeling_bert.BertSelfOutput +class LongformerSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class LongformerAttention(nn.Module): + def __init__(self, config, layer_id=0): + super().__init__() + self.self = LongformerSelfAttention(config, layer_id) + self.output = LongformerSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states, + attention_mask=None, + layer_head_mask=None, + is_index_masked=None, + is_index_global_attn=None, + is_global_attn=None, + output_attentions=False, + ): + self_outputs = self.self( + hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + is_index_masked=is_index_masked, + is_index_global_attn=is_index_global_attn, + is_global_attn=is_global_attn, + output_attentions=output_attentions, + ) + attn_output = self.output(self_outputs[0], hidden_states) + outputs = (attn_output,) + self_outputs[1:] + return outputs + + +# Copied from transformers.models.bert.modeling_bert.BertIntermediate +class LongformerIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +# Copied from transformers.models.bert.modeling_bert.BertOutput +class LongformerOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class LongformerLayer(nn.Module): + def __init__(self, config, layer_id=0): + super().__init__() + self.attention = LongformerAttention(config, layer_id) + self.intermediate = LongformerIntermediate(config) + self.output = LongformerOutput(config) + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + + def forward( + self, + hidden_states, + attention_mask=None, + layer_head_mask=None, + is_index_masked=None, + is_index_global_attn=None, + is_global_attn=None, + output_attentions=False, + ): + self_attn_outputs = self.attention( + hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + is_index_masked=is_index_masked, + is_index_global_attn=is_index_global_attn, + is_global_attn=is_global_attn, + output_attentions=output_attentions, + ) + attn_output = self_attn_outputs[0] + outputs = self_attn_outputs[1:] + + layer_output = apply_chunking_to_forward( + self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attn_output + ) + outputs = (layer_output,) + outputs + return outputs + + def ff_chunk(self, attn_output): + intermediate_output = self.intermediate(attn_output) + layer_output = self.output(intermediate_output, attn_output) + return layer_output + + +class LongformerEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([LongformerLayer(config, layer_id=i) for i in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states, + attention_mask=None, + head_mask=None, + padding_len=0, + output_attentions=False, + output_hidden_states=False, + return_dict=True, + ): + is_index_masked = attention_mask < 0 + is_index_global_attn = attention_mask > 0 + + # Record `is_global_attn == True` to enable ONNX export + is_global_attn = is_index_global_attn.flatten().any().item() + # is_global_attn = False + + all_hidden_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None # All local attentions. + all_global_attentions = () if (output_attentions and is_global_attn) else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + assert head_mask.size()[0] == ( + len(self.layer) + ), f"The head_mask should be specified for {len(self.layer)} layers, but it is for {head_mask.size()[0]}." + for idx, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + return module(*inputs, is_global_attn, output_attentions) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(layer_module), + hidden_states, + attention_mask, + head_mask[idx] if head_mask is not None else None, + is_index_masked, + is_index_global_attn, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask=attention_mask, + layer_head_mask=head_mask[idx] if head_mask is not None else None, + is_index_masked=is_index_masked, + is_index_global_attn=is_index_global_attn, + is_global_attn=is_global_attn, + output_attentions=output_attentions, + ) + hidden_states = layer_outputs[0] + + if output_attentions: + # bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1) + all_attentions = all_attentions + (layer_outputs[1].transpose(1, 2),) + + if is_global_attn: + # bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn + all_global_attentions = all_global_attentions + (layer_outputs[2].transpose(2, 3),) + + # Add last layer + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + # undo padding if necessary + # unpad `hidden_states` because the calling function is expecting a length == input_ids.size(1) + hidden_states = hidden_states[:, : hidden_states.shape[1] - padding_len] + if output_hidden_states: + all_hidden_states = tuple([state[:, : state.shape[1] - padding_len] for state in all_hidden_states]) + + if output_attentions: + all_attentions = tuple([state[:, :, : state.shape[2] - padding_len, :] for state in all_attentions]) + + if not return_dict: + return tuple( + v for v in [hidden_states, all_hidden_states, all_attentions, all_global_attentions] if v is not None + ) + return LongformerBaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_attentions, + global_attentions=all_global_attentions, + ) + + +# Copied from transformers.models.bert.modeling_bert.BertPooler +class LongformerPooler(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.activation = nn.Tanh() + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # We "pool" the model by simply taking the hidden state corresponding + # to the first token. + first_token_tensor = hidden_states[:, 0] + pooled_output = self.dense(first_token_tensor) + pooled_output = self.activation(pooled_output) + return pooled_output + + +# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Longformer +class LongformerLMHead(nn.Module): + """Longformer Head for masked language modeling.""" + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + self.decoder = nn.Linear(config.hidden_size, config.vocab_size) + self.bias = nn.Parameter(torch.zeros(config.vocab_size)) + self.decoder.bias = self.bias + + def forward(self, features, **kwargs): + x = self.dense(features) + x = gelu(x) + x = self.layer_norm(x) + + # project back to size of vocabulary with bias + x = self.decoder(x) + + return x + + def _tie_weights(self): + # To tie those two weights if they get disconnected (on TPU or when the bias is resized) + # For accelerate compatibility and to not break backward compatibility + if self.decoder.bias.device.type == "meta": + self.decoder.bias = self.bias + else: + self.bias = self.decoder.bias + + +class LongformerPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = LongformerConfig + base_model_prefix = "longformer" + supports_gradient_checkpointing = True + _keys_to_ignore_on_load_unexpected = [r"position_ids"] + _no_split_modules = ["LongformerSelfAttention"] + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, LongformerEncoder): + module.gradient_checkpointing = value + + +LONGFORMER_START_DOCSTRING = r""" + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`LongformerConfig`]): Model configuration class with all the parameters of the + model. Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +LONGFORMER_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + global_attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to decide the attention given on each token, local attention or global attention. Tokens with global + attention attends to all other tokens, and all other tokens attend to them. This is important for + task-specific finetuning because it makes the model more flexible at representing the task. For example, + for classification, the token should be given global attention. For QA, all question tokens should also + have global attention. Please refer to the [Longformer paper](https://arxiv.org/abs/2004.05150) for more + details. Mask values selected in `[0, 1]`: + + - 0 for local attention (a sliding window attention), + - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). + + head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + decoder_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Longformer Model outputting raw hidden-states without any specific head on top.", + LONGFORMER_START_DOCSTRING, +) +class LongformerModel(LongformerPreTrainedModel): + """ + This class copied code from [`RobertaModel`] and overwrote standard self-attention with longformer self-attention + to provide the ability to process long sequences following the self-attention approach described in [Longformer: + the Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, and Arman Cohan. + Longformer self-attention combines a local (sliding window) and global attention to extend to long documents + without the O(n^2) increase in memory and compute. + + The self-attention module `LongformerSelfAttention` implemented here supports the combination of local and global + attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated + attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future + release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA + kernel to be memory and compute efficient. + + """ + + def __init__(self, config, add_pooling_layer=True): + super().__init__(config) + self.config = config + + if isinstance(config.attention_window, int): + assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value" + assert config.attention_window > 0, "`config.attention_window` has to be positive" + config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer + else: + assert len(config.attention_window) == config.num_hidden_layers, ( + "`len(config.attention_window)` should equal `config.num_hidden_layers`. " + f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}" + ) + + self.embeddings = LongformerEmbeddings(config) + self.encoder = LongformerEncoder(config) + self.pooler = LongformerPooler(config) if add_pooling_layer else None + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + def _pad_to_window_size( + self, + input_ids: torch.Tensor, + attention_mask: torch.Tensor, + token_type_ids: torch.Tensor, + position_ids: torch.Tensor, + inputs_embeds: torch.Tensor, + pad_token_id: int, + ): + """A helper function to pad tokens and mask to work with implementation of Longformer self-attention.""" + # padding + attention_window = ( + self.config.attention_window + if isinstance(self.config.attention_window, int) + else max(self.config.attention_window) + ) + + assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}" + input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape + batch_size, seq_len = input_shape[:2] + + padding_len = (attention_window - seq_len % attention_window) % attention_window + + # this path should be recorded in the ONNX export, it is fine with padding_len == 0 as well + if padding_len > 0: + # logger.info( + # f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of " + # f"`config.attention_window`: {attention_window}" + # ) + if input_ids is not None: + input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id) + if position_ids is not None: + # pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings + position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id) + if inputs_embeds is not None: + input_ids_padding = inputs_embeds.new_full( + (batch_size, padding_len), + self.config.pad_token_id, + dtype=torch.long, + ) + inputs_embeds_padding = self.embeddings(input_ids_padding) + inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2) + + attention_mask = nn.functional.pad( + attention_mask, (0, padding_len), value=0 + ) # no attention on the padding tokens + token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0 + + return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds + + def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor): + # longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn) + # (global_attention_mask + 1) => 1 for local attention, 2 for global attention + # => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention + if attention_mask is not None: + attention_mask = attention_mask * (global_attention_mask + 1) + else: + # simply use `global_attention_mask` as `attention_mask` + # if no `attention_mask` is given + attention_mask = global_attention_mask + 1 + return attention_mask + + @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @replace_return_docstrings(output_type=LongformerBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + global_attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, LongformerBaseModelOutputWithPooling]: + r""" + + Returns: + + Examples: + + ```python + >>> import torch + >>> from transformers import LongformerModel, AutoTokenizer + + >>> model = LongformerModel.from_pretrained("allenai/longformer-base-4096") + >>> tokenizer = AutoTokenizer.from_pretrained("allenai/longformer-base-4096") + + >>> SAMPLE_TEXT = " ".join(["Hello world! "] * 1000) # long input document + >>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1 + + >>> attention_mask = torch.ones( + ... input_ids.shape, dtype=torch.long, device=input_ids.device + ... ) # initialize to local attention + >>> global_attention_mask = torch.zeros( + ... input_ids.shape, dtype=torch.long, device=input_ids.device + ... ) # initialize to global attention to be deactivated for all tokens + >>> global_attention_mask[ + ... :, + ... [ + ... 1, + ... 4, + ... 21, + ... ], + ... ] = 1 # Set global attention to random tokens for the sake of this example + >>> # Usually, set global attention based on the task. For example, + >>> # classification: the token + >>> # QA: question tokens + >>> # LM: potentially on the beginning of sentences and paragraphs + >>> outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask) + >>> sequence_output = outputs.last_hidden_state + >>> pooled_output = outputs.pooler_output + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if attention_mask is None: + attention_mask = torch.ones(input_shape, device=device) + if token_type_ids is None: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + # merge `global_attention_mask` and `attention_mask` + if global_attention_mask is not None: + attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask) + + padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + inputs_embeds=inputs_embeds, + pad_token_id=self.config.pad_token_id, + ) + + # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] + # ourselves in which case we just need to make it broadcastable to all heads. + extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)[ + :, 0, 0, : + ] + + embedding_output = self.embeddings( + input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds + ) + + encoder_outputs = self.encoder( + embedding_output, + attention_mask=extended_attention_mask, + head_mask=head_mask, + padding_len=padding_len, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + sequence_output = encoder_outputs[0] + pooled_output = self.pooler(sequence_output) if self.pooler is not None else None + + if not return_dict: + return (sequence_output, pooled_output) + encoder_outputs[1:] + + return LongformerBaseModelOutputWithPooling( + last_hidden_state=sequence_output, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + attentions=encoder_outputs.attentions, + global_attentions=encoder_outputs.global_attentions, + ) + + +model_name = "allenai/longformer-base-4096" +device = "cuda:0" + +def get_model(): + config = AutoConfig.from_pretrained(model_name) + config.return_dict = True + model = AutoModel.from_config(config).to(device) + print(model) + return model + + +def get_input(batch_size): + # tokenizer = AutoTokenizer.from_pretrained(model_name) + # inputs = tokenizer("Hello world! Hello world! Hello world! Hello world! Hello world!", return_tensors="pt").to(device) + # return (inputs['input_ids'], inputs['attention_mask']), {} + + vocab_size = 50265 + seq_len = 256 + input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), dtype=torch.int64).to(device) + attention_mask = torch.ones((batch_size, seq_len), dtype=torch.int64).to(device) + return (input_ids, attention_mask), {} + +if __name__ == "__main__": + model = get_model() + input_args, input_kwargs = get_input(batch_size=1) + print([x.shape for x in input_args]) + outputs = model(*input_args, **input_kwargs) + print(outputs) + diff --git a/models/lstm.py b/models/lstm.py new file mode 100644 index 000000000000..45f1159aa097 --- /dev/null +++ b/models/lstm.py @@ -0,0 +1,171 @@ +import torch +import torch.nn as nn +import random +import torch.nn as nn +from torch.nn import Parameter +from torch import Tensor +from typing import Tuple + +# https://github.com/pytorch/pytorch/blob/95a86ed9ca107329151e0dc172386d50dd3471c6/benchmarks/fastrnns/custom_lstms.py#L121 +class LSTMCell(nn.Module): + + def __init__(self, input_size, hidden_size): + super().__init__() + self.input_size = input_size + self.hidden_size = hidden_size + self.weight_ih = Parameter(torch.randn(4 * hidden_size, input_size)) + self.weight_hh = Parameter(torch.randn(4 * hidden_size, hidden_size)) + self.bias_ih = Parameter(torch.randn(4 * hidden_size)) + self.bias_hh = Parameter(torch.randn(4 * hidden_size)) + nn.init.xavier_uniform_(self.weight_ih) + nn.init.xavier_uniform_(self.weight_hh) + + + def forward( + self, input: Tensor, + state: Tuple[Tensor, + Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: + hx, cx = state + gates = (torch.mm(input, self.weight_ih.t()) + self.bias_ih + + torch.mm(hx, self.weight_hh.t()) + self.bias_hh) + ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1) + + ingate = torch.sigmoid(ingate) + forgetgate = torch.sigmoid(forgetgate) + cellgate = torch.tanh(cellgate) + outgate = torch.sigmoid(outgate) + + cy = (forgetgate * cx) + (ingate * cellgate) + hy = outgate * torch.tanh(cy) + + return hy, (hy, cy) + + +class LSTMLayer(nn.Module): + + def __init__(self, batch_size, input_size, hidden_size, num_layers): + super().__init__() + self.layers = nn.ModuleList() + self.layers.append(LSTMCell(input_size, hidden_size)) + for i in range(num_layers): + self.layers.append(LSTMCell(hidden_size, hidden_size)) + self.num_layers = num_layers + self.batch_size = batch_size + self.hidden_size = hidden_size + + def forward(self, cur_input, state_c, state_h): + state_c_new = [] + state_h_new = [] + for j in range(self.num_layers): + c = state_c[j] + h = state_h[j] + _, (h, c) = self.layers[j](cur_input, (h, c)) + state_c_new.append(c) + state_h_new.append(h) + cur_input = h + return state_c_new, state_h_new + + +class LSTM(nn.Module): + + def __init__(self, batch_size, input_size, hidden_size, num_layers): + super().__init__() + self.layers = nn.ModuleList() + self.layers.append(LSTMCell(input_size, hidden_size)) + for i in range(num_layers): + self.layers.append(LSTMCell(hidden_size, hidden_size)) + self.num_layers = num_layers + self.batch_size = batch_size + self.hidden_size = hidden_size + + def forward(self, inputs): # seq_len, batch, input_size + state_c = [ + torch.zeros(self.batch_size, self.hidden_size, device='cuda') + for _ in range(self.num_layers) + ] + state_h = [ + torch.zeros(self.batch_size, self.hidden_size, device='cuda') + for _ in range(self.num_layers) + ] + for i in range(inputs.size()[0]): + cur_input = inputs[i] + for j in range(self.num_layers): + c = state_c[j] + h = state_h[j] + _, (h, c) = self.layers[j](cur_input, (h, c)) + state_c[j].copy_(c) + state_h[j].copy_(h) + cur_input = h + return state_h[self.num_layers - 1] + + +def forward_seq(model, inputs): + state_c = [ + torch.zeros(model.batch_size, model.hidden_size, device='cuda') + for _ in range(model.num_layers) + ] + state_h = [ + torch.zeros(model.batch_size, model.hidden_size, device='cuda') + for _ in range(model.num_layers) + ] + for i in range(inputs.size()[0]): + state_c, state_h = model.forward(inputs[i], state_c, state_h) + +num_layers = 10 +input_size = 256 +hidden_size = 256 +seq_len = 64 + +def get_layer_with_bs(batch_size): + model = LSTMLayer(batch_size, input_size, hidden_size, num_layers).cuda() + return model + + +def get_model_with_bs(batch_size): + model = LSTM(batch_size, input_size, hidden_size, num_layers).cuda() + return model + + +def get_input(batch_size): + inputs = torch.randn(seq_len, batch_size, input_size).cuda() + return (inputs,), {} + + +def get_dynamic_inputs(batch_size, num_inputs): + inputs = [(torch.randn(seq_len, batch_size, input_size).cuda(),) for i in range(num_inputs)] + random.shuffle(inputs) + return inputs, [{} for _ in range(num_inputs)] + + +def perf_test(batch_size): + import sys, os + sys.path.append(os.path.join(os.path.dirname(__file__), "..")) + from utils import perf_test_run, custom_backend, assert_equal, nnf_backend + model = get_layer_with_bs(batch_size).eval() + compiled = torch.compile(model, backend=nnf_backend) + # compiled = torch.compile(model) + input_args, input_kwargs = get_input(batch_size) + ref = forward_seq(model, input_args[0]) + out = forward_seq(compiled, input_args[0]) + assert_equal(ref, out) + perf_test_run(ref, forward_seq, "lstm+cellcompile", 100, (compiled,) + input_args, input_kwargs) + # perf_test_run(forward_seq, "lstm+cellcompile", 100, (model,) + input_args, input_kwargs) + + +if __name__ == '__main__': + import torch + torch.backends.cuda.matmul.allow_tf32 = False + torch.backends.cudnn.allow_tf32 = False + import argparse + from frontend.no_preload import NO_LD_PRELOAD_CTX + from frontend import config + + with NO_LD_PRELOAD_CTX(): + with torch.no_grad(): + parser = argparse.ArgumentParser() + parser.add_argument("--bs", type=int, default=1) + args = parser.parse_args() + + config.set_config('model_name', f'lstm_bs{args.bs}') + perf_test(args.bs) + diff --git a/models/monodepth.py b/models/monodepth.py new file mode 100644 index 000000000000..70dbf9fadf7d --- /dev/null +++ b/models/monodepth.py @@ -0,0 +1,323 @@ +from __future__ import absolute_import, division, print_function +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import importlib + + +class conv(nn.Module): + def __init__(self, num_in_layers, num_out_layers, kernel_size, stride): + super(conv, self).__init__() + self.kernel_size = kernel_size + self.conv_base = nn.Conv2d(num_in_layers, num_out_layers, kernel_size=kernel_size, stride=stride) + self.normalize = nn.BatchNorm2d(num_out_layers) + + def forward(self, x): + p = int(np.floor((self.kernel_size-1)/2)) + p2d = (p, p, p, p) + x = self.conv_base(F.pad(x, p2d)) + x = self.normalize(x) + return F.elu(x, inplace=True) + + +class convblock(nn.Module): + def __init__(self, num_in_layers, num_out_layers, kernel_size): + super(convblock, self).__init__() + self.conv1 = conv(num_in_layers, num_out_layers, kernel_size, 1) + self.conv2 = conv(num_out_layers, num_out_layers, kernel_size, 2) + + def forward(self, x): + x = self.conv1(x) + return self.conv2(x) + + +class maxpool(nn.Module): + def __init__(self, kernel_size): + super(maxpool, self).__init__() + self.kernel_size = kernel_size + + def forward(self, x): + p = int(np.floor((self.kernel_size-1) / 2)) + p2d = (p, p, p, p) + return F.max_pool2d(F.pad(x, p2d), self.kernel_size, stride=2) + + +class resconv(nn.Module): + def __init__(self, num_in_layers, num_out_layers, stride): + super(resconv, self).__init__() + self.num_out_layers = num_out_layers + self.stride = stride + self.conv1 = conv(num_in_layers, num_out_layers, 1, 1) + self.conv2 = conv(num_out_layers, num_out_layers, 3, stride) + self.conv3 = nn.Conv2d(num_out_layers, 4*num_out_layers, kernel_size=1, stride=1) + self.conv4 = nn.Conv2d(num_in_layers, 4*num_out_layers, kernel_size=1, stride=stride) + self.normalize = nn.BatchNorm2d(4*num_out_layers) + + def forward(self, x): + # do_proj = x.size()[1] != self.num_out_layers or self.stride == 2 + do_proj = True + shortcut = [] + x_out = self.conv1(x) + x_out = self.conv2(x_out) + x_out = self.conv3(x_out) + if do_proj: + shortcut = self.conv4(x) + else: + shortcut = x + return F.elu(self.normalize(x_out + shortcut), inplace=True) + + +class resconv_basic(nn.Module): + # for resnet18 + def __init__(self, num_in_layers, num_out_layers, stride): + super(resconv_basic, self).__init__() + self.num_out_layers = num_out_layers + self.stride = stride + self.conv1 = conv(num_in_layers, num_out_layers, 3, stride) + self.conv2 = conv(num_out_layers, num_out_layers, 3, 1) + self.conv3 = nn.Conv2d(num_in_layers, num_out_layers, kernel_size=1, stride=stride) + self.normalize = nn.BatchNorm2d(num_out_layers) + + def forward(self, x): + # do_proj = x.size()[1] != self.num_out_layers or self.stride == 2 + do_proj = True + shortcut = [] + x_out = self.conv1(x) + x_out = self.conv2(x_out) + if do_proj: + shortcut = self.conv3(x) + else: + shortcut = x + return F.elu(self.normalize(x_out + shortcut), inplace=True) + + +def resblock(num_in_layers, num_out_layers, num_blocks, stride): + layers = [] + layers.append(resconv(num_in_layers, num_out_layers, stride)) + for i in range(1, num_blocks - 1): + layers.append(resconv(4 * num_out_layers, num_out_layers, 1)) + layers.append(resconv(4 * num_out_layers, num_out_layers, 1)) + return nn.Sequential(*layers) + + +def resblock_basic(num_in_layers, num_out_layers, num_blocks, stride): + layers = [] + layers.append(resconv_basic(num_in_layers, num_out_layers, stride)) + for i in range(1, num_blocks): + layers.append(resconv_basic(num_out_layers, num_out_layers, 1)) + return nn.Sequential(*layers) + + +class upconv(nn.Module): + def __init__(self, num_in_layers, num_out_layers, kernel_size, scale): + super(upconv, self).__init__() + self.scale = scale + self.conv1 = conv(num_in_layers, num_out_layers, kernel_size, 1) + + def forward(self, x): + x = nn.functional.interpolate(x, scale_factor=self.scale, mode='bilinear', align_corners=True) + return self.conv1(x) + + +class get_disp(nn.Module): + def __init__(self, num_in_layers): + super(get_disp, self).__init__() + self.conv1 = nn.Conv2d(num_in_layers, 2, kernel_size=3, stride=1) + self.normalize = nn.BatchNorm2d(2) + self.sigmoid = torch.nn.Sigmoid() + + def forward(self, x): + p = 1 + p2d = (p, p, p, p) + x = self.conv1(F.pad(x, p2d)) + x = self.normalize(x) + return 0.3 * self.sigmoid(x) + + +class Resnet50_md(nn.Module): + def __init__(self, num_in_layers): + super(Resnet50_md, self).__init__() + # encoder + self.conv1 = conv(num_in_layers, 64, 7, 2) # H/2 - 64D + self.pool1 = maxpool(3) # H/4 - 64D + self.conv2 = resblock(64, 64, 3, 2) # H/8 - 256D + self.conv3 = resblock(256, 128, 4, 2) # H/16 - 512D + self.conv4 = resblock(512, 256, 6, 2) # H/32 - 1024D + self.conv5 = resblock(1024, 512, 3, 2) # H/64 - 2048D + + # decoder + self.upconv6 = upconv(2048, 512, 3, 2) + self.iconv6 = conv(1024 + 512, 512, 3, 1) + + self.upconv5 = upconv(512, 256, 3, 2) + self.iconv5 = conv(512+256, 256, 3, 1) + + self.upconv4 = upconv(256, 128, 3, 2) + self.iconv4 = conv(256+128, 128, 3, 1) + self.disp4_layer = get_disp(128) + + self.upconv3 = upconv(128, 64, 3, 2) + self.iconv3 = conv(64+64+2, 64, 3, 1) + self.disp3_layer = get_disp(64) + + self.upconv2 = upconv(64, 32, 3, 2) + self.iconv2 = conv(32+64+2, 32, 3, 1) + self.disp2_layer = get_disp(32) + + self.upconv1 = upconv(32, 16, 3, 2) + self.iconv1 = conv(16+2, 16, 3, 1) + self.disp1_layer = get_disp(16) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_uniform_(m.weight) + + def forward(self, x): + # encoder + x1 = self.conv1(x) + x_pool1 = self.pool1(x1) + x2 = self.conv2(x_pool1) + x3 = self.conv3(x2) + x4 = self.conv4(x3) + x5 = self.conv5(x4) + + # skips + skip1 = x1 + skip2 = x_pool1 + skip3 = x2 + skip4 = x3 + skip5 = x4 + + # decoder + upconv6 = self.upconv6(x5) + concat6 = torch.cat((upconv6, skip5), 1) + iconv6 = self.iconv6(concat6) + + upconv5 = self.upconv5(iconv6) + concat5 = torch.cat((upconv5, skip4), 1) + iconv5 = self.iconv5(concat5) + + upconv4 = self.upconv4(iconv5) + concat4 = torch.cat((upconv4, skip3), 1) + iconv4 = self.iconv4(concat4) + self.disp4 = self.disp4_layer(iconv4) + self.udisp4 = nn.functional.interpolate(self.disp4, scale_factor=2, mode='bilinear', align_corners=True) + + upconv3 = self.upconv3(iconv4) + concat3 = torch.cat((upconv3, skip2, self.udisp4), 1) + iconv3 = self.iconv3(concat3) + self.disp3 = self.disp3_layer(iconv3) + self.udisp3 = nn.functional.interpolate(self.disp3, scale_factor=2, mode='bilinear', align_corners=True) + + upconv2 = self.upconv2(iconv3) + concat2 = torch.cat((upconv2, skip1, self.udisp3), 1) + iconv2 = self.iconv2(concat2) + self.disp2 = self.disp2_layer(iconv2) + self.udisp2 = nn.functional.interpolate(self.disp2, scale_factor=2, mode='bilinear', align_corners=True) + + upconv1 = self.upconv1(iconv2) + concat1 = torch.cat((upconv1, self.udisp2), 1) + iconv1 = self.iconv1(concat1) + self.disp1 = self.disp1_layer(iconv1) + return self.disp1, self.disp2, self.disp3, self.disp4 + + +class Resnet18_md(nn.Module): + def __init__(self, num_in_layers): + super(Resnet18_md, self).__init__() + # encoder + self.conv1 = conv(num_in_layers, 64, 7, 2) # H/2 - 64D + self.pool1 = maxpool(3) # H/4 - 64D + self.conv2 = resblock_basic(64, 64, 2, 2) # H/8 - 64D + self.conv3 = resblock_basic(64, 128, 2, 2) # H/16 - 128D + self.conv4 = resblock_basic(128, 256, 2, 2) # H/32 - 256D + self.conv5 = resblock_basic(256, 512, 2, 2) # H/64 - 512D + + # decoder + self.upconv6 = upconv(512, 512, 3, 2) + self.iconv6 = conv(256+512, 512, 3, 1) + + self.upconv5 = upconv(512, 256, 3, 2) + self.iconv5 = conv(128+256, 256, 3, 1) + + self.upconv4 = upconv(256, 128, 3, 2) + self.iconv4 = conv(64+128, 128, 3, 1) + self.disp4_layer = get_disp(128) + + self.upconv3 = upconv(128, 64, 3, 2) + self.iconv3 = conv(64+64 + 2, 64, 3, 1) + self.disp3_layer = get_disp(64) + + self.upconv2 = upconv(64, 32, 3, 2) + self.iconv2 = conv(64+32 + 2, 32, 3, 1) + self.disp2_layer = get_disp(32) + + self.upconv1 = upconv(32, 16, 3, 2) + self.iconv1 = conv(16+2, 16, 3, 1) + self.disp1_layer = get_disp(16) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.xavier_uniform_(m.weight) + + def forward(self, x): + # encoder + x1 = self.conv1(x) + x_pool1 = self.pool1(x1) + x2 = self.conv2(x_pool1) + x3 = self.conv3(x2) + x4 = self.conv4(x3) + x5 = self.conv5(x4) + + # skips + skip1 = x1 + skip2 = x_pool1 + skip3 = x2 + skip4 = x3 + skip5 = x4 + + # decoder + upconv6 = self.upconv6(x5) + concat6 = torch.cat((upconv6, skip5), 1) + iconv6 = self.iconv6(concat6) + + upconv5 = self.upconv5(iconv6) + concat5 = torch.cat((upconv5, skip4), 1) + iconv5 = self.iconv5(concat5) + + upconv4 = self.upconv4(iconv5) + concat4 = torch.cat((upconv4, skip3), 1) + iconv4 = self.iconv4(concat4) + self.disp4 = self.disp4_layer(iconv4) + self.udisp4 = nn.functional.interpolate(self.disp4, scale_factor=2, mode='bilinear', align_corners=True) + + upconv3 = self.upconv3(iconv4) + concat3 = torch.cat((upconv3, skip2, self.udisp4), 1) + iconv3 = self.iconv3(concat3) + self.disp3 = self.disp3_layer(iconv3) + self.udisp3 = nn.functional.interpolate(self.disp3, scale_factor=2, mode='bilinear', align_corners=True) + + upconv2 = self.upconv2(iconv3) + concat2 = torch.cat((upconv2, skip1, self.udisp3), 1) + iconv2 = self.iconv2(concat2) + self.disp2 = self.disp2_layer(iconv2) + self.udisp2 = nn.functional.interpolate(self.disp2, scale_factor=2, mode='bilinear', align_corners=True) + + upconv1 = self.upconv1(iconv2) + concat1 = torch.cat((upconv1, self.udisp2), 1) + iconv1 = self.iconv1(concat1) + self.disp1 = self.disp1_layer(iconv1) + return self.disp1, self.disp2, self.disp3, self.disp4 + + +def get_model(): + return Resnet18_md(3).cuda() + +def get_scripted_model(): + from ._monodepth_scripted import _get_scripted_model + return _get_scripted_model() + + +def get_input(batch_size): + return (torch.randn((batch_size, 3, 256, 256)).cuda(),), {} \ No newline at end of file diff --git a/models/quantized.py b/models/quantized.py new file mode 100644 index 000000000000..fa51f0051950 --- /dev/null +++ b/models/quantized.py @@ -0,0 +1,480 @@ +import math +import torch +import torch.nn as nn +import torch.nn.parallel +import torch.optim +import torch.utils.data +from collections import OrderedDict +import torchvision.transforms as transforms +import math +import torch.nn.functional as F +from torch.autograd import Variable +from torch.autograd.function import InplaceFunction + + +# models/modules/quantize.py + +class UniformQuantize(InplaceFunction): + + @classmethod + def forward(cls, ctx, input, num_bits=8, min_value=None, max_value=None, + stochastic=False, inplace=False, enforce_true_zero=False, num_chunks=None, out_half=False): + + num_chunks = num_chunks = input.shape[ + 0] if num_chunks is None else num_chunks + if min_value is None or max_value is None: + B = input.shape[0] + y = input.view(B // num_chunks, -1) + if min_value is None: + min_value = y.min(-1)[0].mean(-1) # C + #min_value = float(input.view(input.size(0), -1).min(-1)[0].mean()) + if max_value is None: + #max_value = float(input.view(input.size(0), -1).max(-1)[0].mean()) + max_value = y.max(-1)[0].mean(-1) # C + ctx.inplace = inplace + ctx.num_bits = num_bits + ctx.min_value = min_value + ctx.max_value = max_value + ctx.stochastic = stochastic + + if ctx.inplace: + ctx.mark_dirty(input) + output = input + else: + output = input.clone() + + qmin = 0. + qmax = 2.**num_bits - 1. + #import pdb; pdb.set_trace() + scale = (max_value - min_value) / (qmax - qmin) + + scale = max(scale, 1e-8) + + if enforce_true_zero: + initial_zero_point = qmin - min_value / scale + zero_point = 0. + # make zero exactly represented + if initial_zero_point < qmin: + zero_point = qmin + elif initial_zero_point > qmax: + zero_point = qmax + else: + zero_point = initial_zero_point + zero_point = int(zero_point) + output.div_(scale).add_(zero_point) + else: + output.add_(-min_value).div_(scale).add_(qmin) + + if ctx.stochastic: + noise = output.new(output.shape).uniform_(-0.5, 0.5) + output.add_(noise) + output.clamp_(qmin, qmax).round_() # quantize + + if enforce_true_zero: + output.add_(-zero_point).mul_(scale) # dequantize + else: + output.add_(-qmin).mul_(scale).add_(min_value) # dequantize + if out_half and num_bits <= 16: + output = output.half() + return output + + @staticmethod + def backward(ctx, grad_output): + # straight-through estimator + grad_input = grad_output + return grad_input, None, None, None, None, None, None + + +class UniformQuantizeGrad(InplaceFunction): + + @classmethod + def forward(cls, ctx, input, num_bits=8, min_value=None, max_value=None, stochastic=True, inplace=False): + ctx.inplace = inplace + ctx.num_bits = num_bits + ctx.min_value = min_value + ctx.max_value = max_value + ctx.stochastic = stochastic + return input + + @staticmethod + def backward(ctx, grad_output): + if ctx.min_value is None: + min_value = float(grad_output.min()) + # min_value = float(grad_output.view( + # grad_output.size(0), -1).min(-1)[0].mean()) + else: + min_value = ctx.min_value + if ctx.max_value is None: + max_value = float(grad_output.max()) + # max_value = float(grad_output.view( + # grad_output.size(0), -1).max(-1)[0].mean()) + else: + max_value = ctx.max_value + grad_input = UniformQuantize().apply(grad_output, ctx.num_bits, + min_value, max_value, ctx.stochastic, ctx.inplace) + return grad_input, None, None, None, None, None + + +def conv2d_biprec(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1, num_bits_grad=None): + out1 = F.conv2d(input.detach(), weight, bias, + stride, padding, dilation, groups) + out2 = F.conv2d(input, weight.detach(), bias.detach() if bias is not None else None, + stride, padding, dilation, groups) + out2 = quantize_grad(out2, num_bits=num_bits_grad) + return out1 + out2 - out1.detach() + + +def linear_biprec(input, weight, bias=None, num_bits_grad=None): + out1 = F.linear(input.detach(), weight, bias) + out2 = F.linear(input, weight.detach(), bias.detach() + if bias is not None else None) + out2 = quantize_grad(out2, num_bits=num_bits_grad) + return out1 + out2 - out1.detach() + + +def quantize(x, num_bits=8, min_value=None, max_value=None, num_chunks=None, stochastic=False, inplace=False): + return UniformQuantize().apply(x, num_bits, min_value, max_value, num_chunks, stochastic, inplace) + + +def quantize_grad(x, num_bits=8, min_value=None, max_value=None, stochastic=True, inplace=False): + return UniformQuantizeGrad().apply(x, num_bits, min_value, max_value, stochastic, inplace) + + +class QuantMeasure(nn.Module): + """docstring for QuantMeasure.""" + + def __init__(self, num_bits=8, momentum=0.1): + super(QuantMeasure, self).__init__() + self.register_buffer('running_min', torch.zeros(1)) + self.register_buffer('running_max', torch.zeros(1)) + self.momentum = momentum + self.num_bits = num_bits + + def forward(self, input): + if self.training: + min_value = input.detach().view( + input.size(0), -1).min(-1)[0].mean() + max_value = input.detach().view( + input.size(0), -1).max(-1)[0].mean() + self.running_min.mul_(self.momentum).add_( + min_value * (1 - self.momentum)) + self.running_max.mul_(self.momentum).add_( + max_value * (1 - self.momentum)) + else: + min_value = self.running_min + max_value = self.running_max + return quantize(input, self.num_bits, min_value=float(min_value), max_value=float(max_value), num_chunks=16) + + +class QConv2d(nn.Conv2d): + """docstring for QConv2d.""" + + def __init__(self, in_channels, out_channels, kernel_size, + stride=1, padding=0, dilation=1, groups=1, bias=True, num_bits=8, num_bits_weight=None, num_bits_grad=None, biprecision=False): + super(QConv2d, self).__init__(in_channels, out_channels, kernel_size, + stride, padding, dilation, groups, bias) + self.num_bits = num_bits + self.num_bits_weight = num_bits_weight or num_bits + self.num_bits_grad = num_bits_grad + self.quantize_input = QuantMeasure(self.num_bits) + self.biprecision = biprecision + + def forward(self, input): + qinput = self.quantize_input(input) + qweight = quantize(self.weight, num_bits=self.num_bits_weight, + min_value=float(self.weight.min()), + max_value=float(self.weight.max())) + if self.bias is not None: + qbias = quantize(self.bias, num_bits=self.num_bits_weight) + else: + qbias = None + if not self.biprecision or self.num_bits_grad is None: + output = F.conv2d(qinput, qweight, qbias, self.stride, + self.padding, self.dilation, self.groups) + if self.num_bits_grad is not None: + output = quantize_grad(output, num_bits=self.num_bits_grad) + else: + output = conv2d_biprec(qinput, qweight, qbias, self.stride, + self.padding, self.dilation, self.groups, num_bits_grad=self.num_bits_grad) + + return output + + +class QLinear(nn.Linear): + """docstring for QConv2d.""" + + def __init__(self, in_features, out_features, bias=True, num_bits=8, num_bits_weight=None, num_bits_grad=None, biprecision=False): + super(QLinear, self).__init__(in_features, out_features, bias) + self.num_bits = num_bits + self.num_bits_weight = num_bits_weight or num_bits + self.num_bits_grad = num_bits_grad + self.biprecision = biprecision + self.quantize_input = QuantMeasure(self.num_bits) + + def forward(self, input): + qinput = self.quantize_input(input) + qweight = quantize(self.weight, num_bits=self.num_bits_weight, + min_value=float(self.weight.min()), + max_value=float(self.weight.max())) + if self.bias is not None: + qbias = quantize(self.bias, num_bits=self.num_bits_weight) + else: + qbias = None + + if not self.biprecision or self.num_bits_grad is None: + output = F.linear(qinput, qweight, qbias) + if self.num_bits_grad is not None: + output = quantize_grad(output, num_bits=self.num_bits_grad) + else: + output = linear_biprec(qinput, qweight, qbias, self.num_bits_grad) + return output + + +class RangeBN(nn.Module): + # this is normalized RangeBN + + def __init__(self, num_features, dim=1, momentum=0.1, affine=True, num_chunks=16, eps=1e-5, num_bits=8, num_bits_grad=8): + super(RangeBN, self).__init__() + self.register_buffer('running_mean', torch.zeros(num_features)) + self.register_buffer('running_var', torch.zeros(num_features)) + + self.momentum = momentum + self.dim = dim + if affine: + self.bias = nn.Parameter(torch.Tensor(num_features)) + self.weight = nn.Parameter(torch.Tensor(num_features)) + self.num_bits = num_bits + self.num_bits_grad = num_bits_grad + self.quantize_input = QuantMeasure(self.num_bits) + self.eps = eps + self.num_chunks = num_chunks + self.reset_params() + + def reset_params(self): + if self.weight is not None: + self.weight.data.uniform_() + if self.bias is not None: + self.bias.data.zero_() + + def forward(self, x): + x = self.quantize_input(x) + if x.dim() == 2: # 1d + x = x.unsqueeze(-1,).unsqueeze(-1) + + if self.training: + B, C, H, W = x.shape + y = x.transpose(0, 1).contiguous() # C x B x H x W + y = y.view(C, self.num_chunks, B * H * W // self.num_chunks) + mean_max = y.max(-1)[0].mean(-1) # C + mean_min = y.min(-1)[0].mean(-1) # C + mean = y.view(C, -1).mean(-1) # C + scale_fix = (0.5 * 0.35) * (1 + (math.pi * math.log(4)) ** + 0.5) / ((2 * math.log(y.size(-1))) ** 0.5) + + scale = 1 / ((mean_max - mean_min) * scale_fix + self.eps) + + self.running_mean.detach().mul_(self.momentum).add_( + mean * (1 - self.momentum)) + + self.running_var.detach().mul_(self.momentum).add_( + scale * (1 - self.momentum)) + else: + mean = self.running_mean + scale = self.running_var + scale = quantize(scale, num_bits=self.num_bits, min_value=float( + scale.min()), max_value=float(scale.max())) + out = (x - mean.view(1, mean.size(0), 1, 1)) * \ + scale.view(1, scale.size(0), 1, 1) + + if self.weight is not None: + qweight = quantize(self.weight, num_bits=self.num_bits, + min_value=float(self.weight.min()), + max_value=float(self.weight.max())) + out = out * qweight.view(1, qweight.size(0), 1, 1) + + if self.bias is not None: + qbias = quantize(self.bias, num_bits=self.num_bits) + out = out + qbias.view(1, qbias.size(0), 1, 1) + if self.num_bits_grad is not None: + out = quantize_grad(out, num_bits=self.num_bits_grad) + + if out.size(3) == 1 and out.size(2) == 1: + out = out.squeeze(-1).squeeze(-1) + return out + + +# models/resnet_quantized_float_bn.py +NUM_BITS = 8 +NUM_BITS_WEIGHT = 8 +NUM_BITS_GRAD = 8 + + +def conv3x3(in_planes, out_planes, stride=1): + "3x3 convolution with padding" + return QConv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=1, bias=False, num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD) + + +def init_model(model): + for m in model.modules(): + if isinstance(m, QConv2d): + n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels + m.weight.data.normal_(0, math.sqrt(2. / n)) + elif isinstance(m, nn.BatchNorm2d): + m.weight.data.fill_(1) + m.bias.data.zero_() + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = nn.BatchNorm2d(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = nn.BatchNorm2d(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = QConv2d(inplanes, planes, kernel_size=1, bias=False, + num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD) + self.bn1 = nn.BatchNorm2d(planes) + self.conv2 = QConv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=False, num_bits=NUM_BITS, + num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD) + self.bn2 = nn.BatchNorm2d(planes) + self.conv3 = QConv2d(planes, planes * 4, kernel_size=1, bias=False, + num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD) + self.bn3 = nn.BatchNorm2d(planes * 4) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self): + super(ResNet, self).__init__() + + def _make_layer(self, block, planes, blocks, stride=1): + downsample = None + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + QConv2d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=stride, bias=False, + num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD), + nn.BatchNorm2d(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample)) + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + + return x + + +class ResNet_imagenet(ResNet): + + def __init__(self, num_classes=1000, + block=Bottleneck, layers=[3, 4, 23, 3]): + super(ResNet_imagenet, self).__init__() + self.inplanes = 64 + self.conv1 = QConv2d(3, 64, kernel_size=7, stride=2, padding=3, + bias=False, num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD) + self.bn1 = nn.BatchNorm2d(64) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + self.avgpool = nn.AvgPool2d(7) + self.fc = QLinear(512 * block.expansion, num_classes, num_bits=NUM_BITS, num_bits_weight=NUM_BITS_WEIGHT, num_bits_grad=NUM_BITS_GRAD) + + init_model(self) + self.regime = [ + {'epoch': 0, 'optimizer': 'SGD', 'lr': 1e-1, + 'weight_decay': 1e-4, 'momentum': 0.9}, + {'epoch': 30, 'lr': 1e-2}, + {'epoch': 60, 'lr': 1e-3, 'weight_decay': 0}, + {'epoch': 90, 'lr': 1e-4} + ] + +def get_model(): + return ResNet_imagenet( block=BasicBlock, layers=[2, 2, 2, 2]).cuda() + +def get_scripted_model(): + from ._quantized_scripted import _get_scripted_model + return _get_scripted_model() + + +def get_input(batch_size): + return (torch.randn(batch_size, 3, 224, 224).cuda(),), {} + diff --git a/models/resnet.py b/models/resnet.py new file mode 100644 index 000000000000..dee2572114d4 --- /dev/null +++ b/models/resnet.py @@ -0,0 +1,289 @@ +from functools import partial +from typing import Any, Callable, List, Optional, Type, Union + +import torch +import torch.nn as nn +from torch import Tensor +from utils import script_with_log + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', + 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', + 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', + 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', +} + + +def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes, out_planes, stride=1): + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1, + base_width=64, dilation=1, norm_layer=None): + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, + groups=1, width_per_group=64, replace_stride_with_dilation=None, + norm_layer=None): + super(ResNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) + + def _make_layer(self, block, planes, blocks, stride=1, dilate=False): + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.fc(x) + + return x + + +def _resnet(arch, block, layers, pretrained, progress, **kwargs): + model = ResNet(block, layers, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls[arch], + progress=progress) + model.load_state_dict(state_dict) + return model + + +def resnet18(pretrained=False, progress=True, **kwargs): + r"""ResNet-18 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, + **kwargs) + + +def resnet34(pretrained=False, progress=True, **kwargs): + r"""ResNet-34 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet50(pretrained=False, progress=True, **kwargs): + r"""ResNet-50 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet101(pretrained=False, progress=True, **kwargs): + r"""ResNet-101 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, + **kwargs) + + +def resnet152(pretrained=False, progress=True, **kwargs): + r"""ResNet-152 model from + `"Deep Residual Learning for Image Recognition" `_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, + **kwargs) + + +def get_model(): + return resnet101().cuda() + +def get_scripted_model(): + model = resnet101().cuda() + model = script_with_log(model) + return model + +def get_input(batch_size): + return (torch.normal(0.5, 0.25, (batch_size, 3, 224, 224), device='cuda'),), {} diff --git a/models/seq2seq.py b/models/seq2seq.py new file mode 100644 index 000000000000..85546132f468 --- /dev/null +++ b/models/seq2seq.py @@ -0,0 +1,152 @@ +import torch +import torch.nn as nn +import numpy as np +import random + +MAX_LENGTH = 50 +OUTPUT_SIZE = 3797 +HIDDEN_SIZE = 256 + +class LSTMCell(nn.Module): + def __init__(self, hidden_size, input_size): + super().__init__() + self.weight_ih_l0_t = nn.Parameter(torch.randn(4, input_size, hidden_size, dtype=torch.float32)) + self.weight_hh_l0_t = nn.Parameter(torch.randn(4, input_size, hidden_size, dtype=torch.float32)) + self.bias_ih_0 = nn.Parameter( + torch.randn(hidden_size, dtype=torch.float32)) + self.bias_hh_0 = nn.Parameter( + torch.randn(hidden_size, dtype=torch.float32)) + self.bias_ih_1 = nn.Parameter( + torch.randn(hidden_size, dtype=torch.float32)) + self.bias_hh_1 = nn.Parameter( + torch.randn(hidden_size, dtype=torch.float32)) + self.bias_ih_2 = nn.Parameter( + torch.randn(hidden_size, dtype=torch.float32)) + self.bias_hh_2 = nn.Parameter( + torch.randn(hidden_size, dtype=torch.float32)) + self.bias_ih_3 = nn.Parameter( + torch.randn(hidden_size, dtype=torch.float32)) + self.bias_hh_3 = nn.Parameter( + torch.randn(hidden_size, dtype=torch.float32)) + self.hidden_size = hidden_size + self.input_size = input_size + nn.init.xavier_uniform_(self.weight_ih_l0_t) + nn.init.xavier_uniform_(self.weight_hh_l0_t) + + def forward(self, x, h, c): + ih = torch.matmul(x, self.weight_ih_l0_t) + hh = torch.matmul(h, self.weight_hh_l0_t) + ih0 = ih[0] + self.bias_ih_0 + hh0 = hh[0] + self.bias_hh_0 + ih1 = ih[1] + self.bias_ih_1 + hh1 = hh[1] + self.bias_hh_1 + ih2 = ih[2] + self.bias_ih_2 + hh2 = hh[2] + self.bias_hh_2 + ih3 = ih[3] + self.bias_ih_3 + hh3 = hh[3] + self.bias_hh_3 + + ingate = torch.sigmoid(ih0 + hh0) + forgetgate = torch.sigmoid(ih1 + hh1) + cellgate = torch.tanh(ih2 + hh2) + outgate = torch.sigmoid(ih3 + hh3) + + c = (forgetgate * c) + (ingate * cellgate) + h = outgate * torch.tanh(c) + return h, c + +class AttnDecoderRNN(nn.Module): + def __init__(self, hidden_size, output_size, dropout_p=0.1, max_length=MAX_LENGTH): + super(AttnDecoderRNN, self).__init__() + self.hidden_size = hidden_size + self.output_size = output_size + self.dropout_p = dropout_p + self.max_length = max_length + + self.gru = LSTMCell(self.hidden_size, self.hidden_size) + self.out = nn.Linear(self.hidden_size, self.output_size) + self.embedding = nn.Embedding(self.output_size, self.hidden_size) + self.EOS_token = 0 + self.SOS_token = 1 + + def forward(self, encoder_output, std, h, c): + batch_size = encoder_output.size()[1] + output_all = torch.zeros(self.max_length, batch_size, dtype=torch.int64, device='cuda') + 0 + output = torch.full((batch_size,), self.SOS_token, dtype=torch.int64, device='cuda') + cond = True + id = 0 + while cond: + x = self.embedding(output) + h = torch.reshape(h, (batch_size, self.hidden_size)) + # lstm start + ih = torch.matmul(x, self.gru.weight_ih_l0_t) + hh = torch.matmul(h, self.gru.weight_hh_l0_t) + ih0 = ih[0] + self.gru.bias_ih_0 + hh0 = hh[0] + self.gru.bias_hh_0 + ih1 = ih[1] + self.gru.bias_ih_1 + hh1 = hh[1] + self.gru.bias_hh_1 + ih2 = ih[2] + self.gru.bias_ih_2 + hh2 = hh[2] + self.gru.bias_hh_2 + ih3 = ih[3] + self.gru.bias_ih_3 + hh3 = hh[3] + self.gru.bias_hh_3 + + ingate = torch.sigmoid(ih0 + hh0) + forgetgate = torch.sigmoid(ih1 + hh1) + cellgate = torch.tanh(ih2 + hh2) + outgate = torch.sigmoid(ih3 + hh3) + + c = (forgetgate * c) + (ingate * cellgate) + h = outgate * torch.tanh(c) + # lstm end + output = self.out(h) + std[id] + output = output.argmax(1) + output_all[id] = output + id = id + 1 + cond = (torch.max(output) > self.EOS_token) & (id < self.max_length) + return output_all, h + +def get_model(): + attn_decoder = AttnDecoderRNN(HIDDEN_SIZE, OUTPUT_SIZE, dropout_p=0.1).cuda() + return attn_decoder + +def gen_mask_from_sequence(std): + bs = std.shape[0] + padded_std = torch.zeros((bs, MAX_LENGTH), dtype=std.dtype, device='cuda') + padded_std[:, :std.shape[1]] = std + mask = torch.zeros(bs, MAX_LENGTH, OUTPUT_SIZE, device='cuda') + mask[torch.arange(bs).unsqueeze(1), torch.arange(MAX_LENGTH).unsqueeze(0), padded_std] = 1000000.0 + mask = mask.transpose(0, 1).contiguous().clone() + return mask + +def get_input(batch_size): + std = [] + MAX_LENGTH = 50 + for i in range(batch_size): + l = max(i, 10) + l = min(l, MAX_LENGTH) + lst = list(range(1, l)) + lst.append(0) + assert(len(lst) <= MAX_LENGTH) + # pad to MAX_LENGTH + lst = lst + [0] * (MAX_LENGTH - len(lst)) + std.append(lst) + std = torch.tensor(std, device='cuda') + mask = gen_mask_from_sequence(std) + encoder_output = torch.randn(MAX_LENGTH, batch_size, HIDDEN_SIZE, device='cuda') + h = torch.randn(batch_size, HIDDEN_SIZE, device='cuda') + c = torch.randn(batch_size, HIDDEN_SIZE, device='cuda') + return (encoder_output, mask, h, c), {} + +def get_dynamic_inputs(batch_size, num_inputs): + seq_len = [random.randint(1, MAX_LENGTH) for _ in range(num_inputs)] + all_masks = [] + for i in range(num_inputs): + std = np.random.randint(1, OUTPUT_SIZE, (batch_size, MAX_LENGTH)) + if seq_len[i] < MAX_LENGTH: + std[:, seq_len[i]:] = 0 + std = torch.tensor(std, device='cuda') + mask = gen_mask_from_sequence(std) + all_masks.append(mask) + encoder_output = torch.randn(MAX_LENGTH, batch_size, HIDDEN_SIZE, device='cuda') + h = torch.randn(batch_size, HIDDEN_SIZE, device='cuda') + c = torch.randn(batch_size, HIDDEN_SIZE, device='cuda') + return [(encoder_output, all_masks[i], h, c) for i in range(num_inputs)], [{} for _ in range(num_inputs)] diff --git a/models/tridentnet.py b/models/tridentnet.py new file mode 100644 index 000000000000..5d9e04153141 --- /dev/null +++ b/models/tridentnet.py @@ -0,0 +1,2072 @@ +# trident resnet in mmdetection, graph break due to numpy + +import warnings +import functools + +import torch.nn as nn +import torch.utils.checkpoint as cp +# from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer +# from mmcv.runner import BaseModule +from torch.nn.modules.batchnorm import _BatchNorm + +import torch +from torch import nn as nn +from typing import Dict, Union, Tuple, Optional, List, Callable + +import math +import copy +import warnings +from abc import ABCMeta +from collections import defaultdict +from logging import FileHandler +from typing import Iterable, Optional + +# from mmcv.runner.dist_utils import master_only +# from mmcv.utils.logging import get_logger, logger_initialized, print_log +import inspect +from torch.nn.modules.utils import _pair +import numpy as np +import torch.nn.functional as F +from torch import distributed as dist +import logging + +# MODIFIED: a simplified operator registry +CONV_LAYERS = { + 'Conv1d': nn.Conv1d, + 'Conv2d': nn.Conv2d, + 'Conv3d': nn.Conv3d, + 'Conv': nn.Conv2d, +} + +NORM_LAYERS = { + + 'BN': nn.BatchNorm2d, + 'BN1d': nn.BatchNorm1d, + 'BN2d': nn.BatchNorm2d, + 'BN3d': nn.BatchNorm3d, + # 'SyncBN': SyncBatchNorm, + 'GN': nn.GroupNorm, + 'LN': nn.LayerNorm, + 'IN': nn.InstanceNorm2d, + 'IN1d': nn.InstanceNorm1d, + 'IN2d': nn.InstanceNorm2d, + 'IN3d': nn.InstanceNorm3d, +} + +PLUGIN_LAYERS = {} + + +def get_dist_info() -> Tuple[int, int]: + if dist.is_available() and dist.is_initialized(): + rank = dist.get_rank() + world_size = dist.get_world_size() + else: + rank = 0 + world_size = 1 + return rank, world_size + + +def master_only(func: Callable) -> Callable: + + @functools.wraps(func) + def wrapper(*args, **kwargs): + rank, _ = get_dist_info() + if rank == 0: + return func(*args, **kwargs) + + return wrapper + + + +logger_initialized: dict = {} + + +def get_logger(name, log_file=None, log_level=logging.INFO, file_mode='w'): + """Initialize and get a logger by name. + + If the logger has not been initialized, this method will initialize the + logger by adding one or two handlers, otherwise the initialized logger will + be directly returned. During initialization, a StreamHandler will always be + added. If `log_file` is specified and the process rank is 0, a FileHandler + will also be added. + + Args: + name (str): Logger name. + log_file (str | None): The log filename. If specified, a FileHandler + will be added to the logger. + log_level (int): The logger level. Note that only the process of + rank 0 is affected, and other processes will set the level to + "Error" thus be silent most of the time. + file_mode (str): The file mode used in opening log file. + Defaults to 'w'. + + Returns: + logging.Logger: The expected logger. + """ + logger = logging.getLogger(name) + if name in logger_initialized: + return logger + # handle hierarchical names + # e.g., logger "a" is initialized, then logger "a.b" will skip the + # initialization since it is a child of "a". + for logger_name in logger_initialized: + if name.startswith(logger_name): + return logger + + # handle duplicate logs to the console + # Starting in 1.8.0, PyTorch DDP attaches a StreamHandler (NOTSET) + # to the root logger. As logger.propagate is True by default, this root + # level handler causes logging messages from rank>0 processes to + # unexpectedly show up on the console, creating much unwanted clutter. + # To fix this issue, we set the root logger's StreamHandler, if any, to log + # at the ERROR level. + for handler in logger.root.handlers: + if type(handler) is logging.StreamHandler: + handler.setLevel(logging.ERROR) + + stream_handler = logging.StreamHandler() + handlers = [stream_handler] + + if dist.is_available() and dist.is_initialized(): + rank = dist.get_rank() + else: + rank = 0 + + # only rank 0 will add a FileHandler + if rank == 0 and log_file is not None: + # Here, the default behaviour of the official logger is 'a'. Thus, we + # provide an interface to change the file mode to the default + # behaviour. + file_handler = logging.FileHandler(log_file, file_mode) + handlers.append(file_handler) + + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + for handler in handlers: + handler.setFormatter(formatter) + handler.setLevel(log_level) + logger.addHandler(handler) + + if rank == 0: + logger.setLevel(log_level) + else: + logger.setLevel(logging.ERROR) + + logger_initialized[name] = True + + return logger + + +def print_log(msg, logger=None, level=logging.INFO): + """Print a log message. + + Args: + msg (str): The message to be logged. + logger (logging.Logger | str | None): The logger to be used. + Some special loggers are: + + - "silent": no message will be printed. + - other str: the logger obtained with `get_root_logger(logger)`. + - None: The `print()` method will be used to print log messages. + level (int): Logging level. Only available when `logger` is a Logger + object or "root". + """ + if logger is None: + print(msg) + elif isinstance(logger, logging.Logger): + logger.log(level, msg) + elif logger == 'silent': + pass + elif isinstance(logger, str): + _logger = get_logger(logger) + _logger.log(level, msg) + else: + raise TypeError( + 'logger should be either a logging.Logger object, str, ' + f'"silent" or None, but got {type(logger)}') + + +# MODIFIED: rename infer_abbr in mmcv/cnn/bricks/norm.py to avoid name conflit +def infer_abbr_norm(class_type): + """Infer abbreviation from the class name. + + When we build a norm layer with `build_norm_layer()`, we want to preserve + the norm type in variable names, e.g, self.bn1, self.gn. This method will + infer the abbreviation to map class types to abbreviations. + + Rule 1: If the class has the property "_abbr_", return the property. + Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or + InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and + "in" respectively. + Rule 3: If the class name contains "batch", "group", "layer" or "instance", + the abbreviation of this layer will be "bn", "gn", "ln" and "in" + respectively. + Rule 4: Otherwise, the abbreviation falls back to "norm". + + Args: + class_type (type): The norm layer type. + + Returns: + str: The inferred abbreviation. + """ + if not inspect.isclass(class_type): + raise TypeError( + f'class_type must be a type, but got {type(class_type)}') + if hasattr(class_type, '_abbr_'): + return class_type._abbr_ + # MODIFIED: remove _InstanceNorm to simplify dependency + # if issubclass(class_type, _InstanceNorm): # IN is a subclass of BN + # return 'in' + if issubclass(class_type, _BatchNorm): + return 'bn' + elif issubclass(class_type, nn.GroupNorm): + return 'gn' + elif issubclass(class_type, nn.LayerNorm): + return 'ln' + else: + class_name = class_type.__name__.lower() + if 'batch' in class_name: + return 'bn' + elif 'group' in class_name: + return 'gn' + elif 'layer' in class_name: + return 'ln' + elif 'instance' in class_name: + return 'in' + else: + return 'norm_layer' + +def build_norm_layer(cfg: Dict, + num_features: int, + postfix: Union[int, str] = '') -> Tuple[str, nn.Module]: + """Build normalization layer. + + Args: + cfg (dict): The norm layer config, which should contain: + + - type (str): Layer type. + - layer args: Args needed to instantiate a norm layer. + - requires_grad (bool, optional): Whether stop gradient updates. + num_features (int): Number of input channels. + postfix (int | str): The postfix to be appended into norm abbreviation + to create named layer. + + Returns: + tuple[str, nn.Module]: The first element is the layer name consisting + of abbreviation and postfix, e.g., bn1, gn. The second element is the + created norm layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in NORM_LAYERS: + raise KeyError(f'Unrecognized norm type {layer_type}') + + norm_layer = NORM_LAYERS.get(layer_type) + abbr = infer_abbr_norm(norm_layer) # MODIFIED: rename infer_abbr to infer_abbr_norm + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + requires_grad = cfg_.pop('requires_grad', True) + cfg_.setdefault('eps', 1e-5) + if layer_type != 'GN': + layer = norm_layer(num_features, **cfg_) + if layer_type == 'SyncBN' and hasattr(layer, '_specify_ddp_gpu_num'): + layer._specify_ddp_gpu_num(1) + else: + assert 'num_groups' in cfg_ + layer = norm_layer(num_channels=num_features, **cfg_) + + for param in layer.parameters(): + param.requires_grad = requires_grad + + return name, layer + +def build_conv_layer(cfg: Optional[Dict], *args, **kwargs) -> nn.Module: + """Build convolution layer. + + Args: + cfg (None or dict): The conv layer config, which should contain: + - type (str): Layer type. + - layer args: Args needed to instantiate an conv layer. + args (argument list): Arguments passed to the `__init__` + method of the corresponding conv layer. + kwargs (keyword arguments): Keyword arguments passed to the `__init__` + method of the corresponding conv layer. + + Returns: + nn.Module: Created conv layer. + """ + if cfg is None: + cfg_ = dict(type='Conv2d') + else: + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in CONV_LAYERS: + raise KeyError(f'Unrecognized layer type {layer_type}') + else: + conv_layer = CONV_LAYERS.get(layer_type) + + layer = conv_layer(*args, **kwargs, **cfg_) + + return layer + + +# MODIFIED: rename infer_abbr in mmcv/cnn/bricks/plugin.py to avoid name conflit +def infer_abbr_plugin(class_type: type) -> str: + """Infer abbreviation from the class name. + + This method will infer the abbreviation to map class types to + abbreviations. + + Rule 1: If the class has the property "abbr", return the property. + Rule 2: Otherwise, the abbreviation falls back to snake case of class + name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``. + + Args: + class_type (type): The norm layer type. + + Returns: + str: The inferred abbreviation. + """ + + def camel2snack(word): + """Convert camel case word into snack case. + + Modified from `inflection lib + `_. + + Example:: + + >>> camel2snack("FancyBlock") + 'fancy_block' + """ + + word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word) + word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word) + word = word.replace('-', '_') + return word.lower() + + if not inspect.isclass(class_type): + raise TypeError( + f'class_type must be a type, but got {type(class_type)}') + if hasattr(class_type, '_abbr_'): + return class_type._abbr_ # type: ignore + else: + return camel2snack(class_type.__name__) + + +def build_plugin_layer(cfg: Dict, + postfix: Union[int, str] = '', + **kwargs) -> Tuple[str, nn.Module]: + """Build plugin layer. + + Args: + cfg (dict): cfg should contain: + + - type (str): identify plugin layer type. + - layer args: args needed to instantiate a plugin layer. + postfix (int, str): appended into norm abbreviation to + create named layer. Default: ''. + + Returns: + tuple[str, nn.Module]: The first one is the concatenation of + abbreviation and postfix. The second is the created plugin layer. + """ + if not isinstance(cfg, dict): + raise TypeError('cfg must be a dict') + if 'type' not in cfg: + raise KeyError('the cfg dict must contain the key "type"') + cfg_ = cfg.copy() + + layer_type = cfg_.pop('type') + if layer_type not in PLUGIN_LAYERS: + raise KeyError(f'Unrecognized plugin type {layer_type}') + + plugin_layer = PLUGIN_LAYERS.get(layer_type) + abbr = infer_abbr_plugin(plugin_layer) # MODIFIED: rename infer_abbr to infer_abbr_plugin + + assert isinstance(postfix, (int, str)) + name = abbr + str(postfix) + + layer = plugin_layer(**kwargs, **cfg_) + + return name, layer + + +class BaseModule(nn.Module, metaclass=ABCMeta): + """Base module for all modules in openmmlab. + + ``BaseModule`` is a wrapper of ``torch.nn.Module`` with additional + functionality of parameter initialization. Compared with + ``torch.nn.Module``, ``BaseModule`` mainly adds three attributes. + + - ``init_cfg``: the config to control the initialization. + - ``init_weights``: The function of parameter initialization and recording + initialization information. + - ``_params_init_info``: Used to track the parameter initialization + information. This attribute only exists during executing the + ``init_weights``. + + Args: + init_cfg (dict, optional): Initialization config dict. + """ + + def __init__(self, init_cfg: Optional[dict] = None): + """Initialize BaseModule, inherited from `torch.nn.Module`""" + + # NOTE init_cfg can be defined in different levels, but init_cfg + # in low levels has a higher priority. + + super().__init__() + # define default value of init_cfg instead of hard code + # in init_weights() function + self._is_init = False + + self.init_cfg = copy.deepcopy(init_cfg) + + # Backward compatibility in derived classes + # if pretrained is not None: + # warnings.warn('DeprecationWarning: pretrained is a deprecated \ + # key, please consider using init_cfg') + # self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + + @property + def is_init(self) -> bool: + return self._is_init + + def init_weights(self) -> None: + """Initialize the weights.""" + + is_top_level_module = False + # check if it is top-level module + if not hasattr(self, '_params_init_info'): + # The `_params_init_info` is used to record the initialization + # information of the parameters + # the key should be the obj:`nn.Parameter` of model and the value + # should be a dict containing + # - init_info (str): The string that describes the initialization. + # - tmp_mean_value (FloatTensor): The mean of the parameter, + # which indicates whether the parameter has been modified. + # this attribute would be deleted after all parameters + # is initialized. + self._params_init_info: defaultdict = defaultdict(dict) + is_top_level_module = True + + # Initialize the `_params_init_info`, + # When detecting the `tmp_mean_value` of + # the corresponding parameter is changed, update related + # initialization information + for name, param in self.named_parameters(): + self._params_init_info[param][ + 'init_info'] = f'The value is the same before and ' \ + f'after calling `init_weights` ' \ + f'of {self.__class__.__name__} ' + self._params_init_info[param][ + 'tmp_mean_value'] = param.data.mean() + + # pass `params_init_info` to all submodules + # All submodules share the same `params_init_info`, + # so it will be updated when parameters are + # modified at any level of the model. + for sub_module in self.modules(): + sub_module._params_init_info = self._params_init_info + + # Get the initialized logger, if not exist, + # create a logger named `mmcv` + logger_names = list(logger_initialized.keys()) + logger_name = logger_names[0] if logger_names else 'mmcv' + + from ..cnn import initialize + from ..cnn.utils.weight_init import update_init_info + module_name = self.__class__.__name__ + if not self._is_init: + if self.init_cfg: + print_log( + f'initialize {module_name} with init_cfg {self.init_cfg}', + logger=logger_name) + initialize(self, self.init_cfg) + if isinstance(self.init_cfg, dict): + # prevent the parameters of + # the pre-trained model + # from being overwritten by + # the `init_weights` + if self.init_cfg['type'] == 'Pretrained': + return + + for m in self.children(): + if hasattr(m, 'init_weights'): + m.init_weights() + # users may overload the `init_weights` + update_init_info( + m, + init_info=f'Initialized by ' + f'user-defined `init_weights`' + f' in {m.__class__.__name__} ') + + self._is_init = True + else: + warnings.warn(f'init_weights of {self.__class__.__name__} has ' + f'been called more than once.') + + if is_top_level_module: + self._dump_init_info(logger_name) + + for sub_module in self.modules(): + del sub_module._params_init_info + + @master_only + def _dump_init_info(self, logger_name: str) -> None: + """Dump the initialization information to a file named + `initialization.log.json` in workdir. + + Args: + logger_name (str): The name of logger. + """ + + logger = get_logger(logger_name) + + with_file_handler = False + # dump the information to the logger file if there is a `FileHandler` + for handler in logger.handlers: + if isinstance(handler, FileHandler): + handler.stream.write( + 'Name of parameter - Initialization information\n') + for name, param in self.named_parameters(): + handler.stream.write( + f'\n{name} - {param.shape}: ' + f"\n{self._params_init_info[param]['init_info']} \n") + handler.stream.flush() + with_file_handler = True + if not with_file_handler: + for name, param in self.named_parameters(): + print_log( + f'\n{name} - {param.shape}: ' + f"\n{self._params_init_info[param]['init_info']} \n ", + logger=logger_name) + + def __repr__(self): + s = super().__repr__() + if self.init_cfg: + s += f'\ninit_cfg={self.init_cfg}' + return s + +class Sequential(BaseModule, nn.Sequential): + """Sequential module in openmmlab. + + Args: + init_cfg (dict, optional): Initialization config dict. + """ + + def __init__(self, *args, init_cfg: Optional[dict] = None): + BaseModule.__init__(self, init_cfg) + nn.Sequential.__init__(self, *args) + + +def kaiming_init(module: nn.Module, + a: float = 0, + mode: str = 'fan_out', + nonlinearity: str = 'relu', + bias: float = 0, + distribution: str = 'normal') -> None: + assert distribution in ['uniform', 'normal'] + if hasattr(module, 'weight') and module.weight is not None: + if distribution == 'uniform': + nn.init.kaiming_uniform_( + module.weight, a=a, mode=mode, nonlinearity=nonlinearity) + else: + nn.init.kaiming_normal_( + module.weight, a=a, mode=mode, nonlinearity=nonlinearity) + if hasattr(module, 'bias') and module.bias is not None: + nn.init.constant_(module.bias, bias) + + +class GeneralizedAttention(nn.Module): + """GeneralizedAttention module. + + See 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks' + (https://arxiv.org/abs/1711.07971) for details. + + Args: + in_channels (int): Channels of the input feature map. + spatial_range (int): The spatial range. -1 indicates no spatial range + constraint. Default: -1. + num_heads (int): The head number of empirical_attention module. + Default: 9. + position_embedding_dim (int): The position embedding dimension. + Default: -1. + position_magnitude (int): A multiplier acting on coord difference. + Default: 1. + kv_stride (int): The feature stride acting on key/value feature map. + Default: 2. + q_stride (int): The feature stride acting on query feature map. + Default: 1. + attention_type (str): A binary indicator string for indicating which + items in generalized empirical_attention module are used. + Default: '1111'. + + - '1000' indicates 'query and key content' (appr - appr) item, + - '0100' indicates 'query content and relative position' + (appr - position) item, + - '0010' indicates 'key content only' (bias - appr) item, + - '0001' indicates 'relative position only' (bias - position) item. + """ + + _abbr_ = 'gen_attention_block' + + def __init__(self, + in_channels: int, + spatial_range: int = -1, + num_heads: int = 9, + position_embedding_dim: int = -1, + position_magnitude: int = 1, + kv_stride: int = 2, + q_stride: int = 1, + attention_type: str = '1111'): + + super().__init__() + + # hard range means local range for non-local operation + self.position_embedding_dim = ( + position_embedding_dim + if position_embedding_dim > 0 else in_channels) + + self.position_magnitude = position_magnitude + self.num_heads = num_heads + self.in_channels = in_channels + self.spatial_range = spatial_range + self.kv_stride = kv_stride + self.q_stride = q_stride + self.attention_type = [bool(int(_)) for _ in attention_type] + self.qk_embed_dim = in_channels // num_heads + out_c = self.qk_embed_dim * num_heads + + if self.attention_type[0] or self.attention_type[1]: + self.query_conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_c, + kernel_size=1, + bias=False) + self.query_conv.kaiming_init = True + + if self.attention_type[0] or self.attention_type[2]: + self.key_conv = nn.Conv2d( + in_channels=in_channels, + out_channels=out_c, + kernel_size=1, + bias=False) + self.key_conv.kaiming_init = True + + self.v_dim = in_channels // num_heads + self.value_conv = nn.Conv2d( + in_channels=in_channels, + out_channels=self.v_dim * num_heads, + kernel_size=1, + bias=False) + self.value_conv.kaiming_init = True + + if self.attention_type[1] or self.attention_type[3]: + self.appr_geom_fc_x = nn.Linear( + self.position_embedding_dim // 2, out_c, bias=False) + self.appr_geom_fc_x.kaiming_init = True + + self.appr_geom_fc_y = nn.Linear( + self.position_embedding_dim // 2, out_c, bias=False) + self.appr_geom_fc_y.kaiming_init = True + + if self.attention_type[2]: + stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) + appr_bias_value = -2 * stdv * torch.rand(out_c) + stdv + self.appr_bias = nn.Parameter(appr_bias_value) + + if self.attention_type[3]: + stdv = 1.0 / math.sqrt(self.qk_embed_dim * 2) + geom_bias_value = -2 * stdv * torch.rand(out_c) + stdv + self.geom_bias = nn.Parameter(geom_bias_value) + + self.proj_conv = nn.Conv2d( + in_channels=self.v_dim * num_heads, + out_channels=in_channels, + kernel_size=1, + bias=True) + self.proj_conv.kaiming_init = True + self.gamma = nn.Parameter(torch.zeros(1)) + + if self.spatial_range >= 0: + # only works when non local is after 3*3 conv + if in_channels == 256: + max_len = 84 + elif in_channels == 512: + max_len = 42 + + max_len_kv = int((max_len - 1.0) / self.kv_stride + 1) + local_constraint_map = np.ones( + (max_len, max_len, max_len_kv, max_len_kv), dtype=int) + for iy in range(max_len): + for ix in range(max_len): + local_constraint_map[ + iy, ix, + max((iy - self.spatial_range) // + self.kv_stride, 0):min((iy + self.spatial_range + + 1) // self.kv_stride + + 1, max_len), + max((ix - self.spatial_range) // + self.kv_stride, 0):min((ix + self.spatial_range + + 1) // self.kv_stride + + 1, max_len)] = 0 + + self.local_constraint_map = nn.Parameter( + torch.from_numpy(local_constraint_map).byte(), + requires_grad=False) + + if self.q_stride > 1: + self.q_downsample = nn.AvgPool2d( + kernel_size=1, stride=self.q_stride) + else: + self.q_downsample = None + + if self.kv_stride > 1: + self.kv_downsample = nn.AvgPool2d( + kernel_size=1, stride=self.kv_stride) + else: + self.kv_downsample = None + + self.init_weights() + + def get_position_embedding(self, + h, + w, + h_kv, + w_kv, + q_stride, + kv_stride, + device, + dtype, + feat_dim, + wave_length=1000): + # the default type of Tensor is float32, leading to type mismatch + # in fp16 mode. Cast it to support fp16 mode. + h_idxs = torch.linspace(0, h - 1, h).to(device=device, dtype=dtype) + h_idxs = h_idxs.view((h, 1)) * q_stride + + w_idxs = torch.linspace(0, w - 1, w).to(device=device, dtype=dtype) + w_idxs = w_idxs.view((w, 1)) * q_stride + + h_kv_idxs = torch.linspace(0, h_kv - 1, h_kv).to( + device=device, dtype=dtype) + h_kv_idxs = h_kv_idxs.view((h_kv, 1)) * kv_stride + + w_kv_idxs = torch.linspace(0, w_kv - 1, w_kv).to( + device=device, dtype=dtype) + w_kv_idxs = w_kv_idxs.view((w_kv, 1)) * kv_stride + + # (h, h_kv, 1) + h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0) + h_diff *= self.position_magnitude + + # (w, w_kv, 1) + w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0) + w_diff *= self.position_magnitude + + feat_range = torch.arange(0, feat_dim / 4).to( + device=device, dtype=dtype) + + dim_mat = torch.Tensor([wave_length]).to(device=device, dtype=dtype) + dim_mat = dim_mat**((4. / feat_dim) * feat_range) + dim_mat = dim_mat.view((1, 1, -1)) + + embedding_x = torch.cat( + ((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2) + + embedding_y = torch.cat( + ((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2) + + return embedding_x, embedding_y + + def forward(self, x_input: torch.Tensor) -> torch.Tensor: + num_heads = self.num_heads + + # use empirical_attention + if self.q_downsample is not None: + x_q = self.q_downsample(x_input) + else: + x_q = x_input + n, _, h, w = x_q.shape + + if self.kv_downsample is not None: + x_kv = self.kv_downsample(x_input) + else: + x_kv = x_input + _, _, h_kv, w_kv = x_kv.shape + + if self.attention_type[0] or self.attention_type[1]: + proj_query = self.query_conv(x_q).view( + (n, num_heads, self.qk_embed_dim, h * w)) + proj_query = proj_query.permute(0, 1, 3, 2) + + if self.attention_type[0] or self.attention_type[2]: + proj_key = self.key_conv(x_kv).view( + (n, num_heads, self.qk_embed_dim, h_kv * w_kv)) + + if self.attention_type[1] or self.attention_type[3]: + position_embed_x, position_embed_y = self.get_position_embedding( + h, w, h_kv, w_kv, self.q_stride, self.kv_stride, + x_input.device, x_input.dtype, self.position_embedding_dim) + # (n, num_heads, w, w_kv, dim) + position_feat_x = self.appr_geom_fc_x(position_embed_x).\ + view(1, w, w_kv, num_heads, self.qk_embed_dim).\ + permute(0, 3, 1, 2, 4).\ + repeat(n, 1, 1, 1, 1) + + # (n, num_heads, h, h_kv, dim) + position_feat_y = self.appr_geom_fc_y(position_embed_y).\ + view(1, h, h_kv, num_heads, self.qk_embed_dim).\ + permute(0, 3, 1, 2, 4).\ + repeat(n, 1, 1, 1, 1) + + position_feat_x /= math.sqrt(2) + position_feat_y /= math.sqrt(2) + + # accelerate for saliency only + if (np.sum(self.attention_type) == 1) and self.attention_type[2]: + appr_bias = self.appr_bias.\ + view(1, num_heads, 1, self.qk_embed_dim).\ + repeat(n, 1, 1, 1) + + energy = torch.matmul(appr_bias, proj_key).\ + view(n, num_heads, 1, h_kv * w_kv) + + h = 1 + w = 1 + else: + # (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for + if not self.attention_type[0]: + energy = torch.zeros( + n, + num_heads, + h, + w, + h_kv, + w_kv, + dtype=x_input.dtype, + device=x_input.device) + + # attention_type[0]: appr - appr + # attention_type[1]: appr - position + # attention_type[2]: bias - appr + # attention_type[3]: bias - position + if self.attention_type[0] or self.attention_type[2]: + if self.attention_type[0] and self.attention_type[2]: + appr_bias = self.appr_bias.\ + view(1, num_heads, 1, self.qk_embed_dim) + energy = torch.matmul(proj_query + appr_bias, proj_key).\ + view(n, num_heads, h, w, h_kv, w_kv) + + elif self.attention_type[0]: + energy = torch.matmul(proj_query, proj_key).\ + view(n, num_heads, h, w, h_kv, w_kv) + + elif self.attention_type[2]: + appr_bias = self.appr_bias.\ + view(1, num_heads, 1, self.qk_embed_dim).\ + repeat(n, 1, 1, 1) + + energy += torch.matmul(appr_bias, proj_key).\ + view(n, num_heads, 1, 1, h_kv, w_kv) + + if self.attention_type[1] or self.attention_type[3]: + if self.attention_type[1] and self.attention_type[3]: + geom_bias = self.geom_bias.\ + view(1, num_heads, 1, self.qk_embed_dim) + + proj_query_reshape = (proj_query + geom_bias).\ + view(n, num_heads, h, w, self.qk_embed_dim) + + energy_x = torch.matmul( + proj_query_reshape.permute(0, 1, 3, 2, 4), + position_feat_x.permute(0, 1, 2, 4, 3)) + energy_x = energy_x.\ + permute(0, 1, 3, 2, 4).unsqueeze(4) + + energy_y = torch.matmul( + proj_query_reshape, + position_feat_y.permute(0, 1, 2, 4, 3)) + energy_y = energy_y.unsqueeze(5) + + energy += energy_x + energy_y + + elif self.attention_type[1]: + proj_query_reshape = proj_query.\ + view(n, num_heads, h, w, self.qk_embed_dim) + proj_query_reshape = proj_query_reshape.\ + permute(0, 1, 3, 2, 4) + position_feat_x_reshape = position_feat_x.\ + permute(0, 1, 2, 4, 3) + position_feat_y_reshape = position_feat_y.\ + permute(0, 1, 2, 4, 3) + + energy_x = torch.matmul(proj_query_reshape, + position_feat_x_reshape) + energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4) + + energy_y = torch.matmul(proj_query_reshape, + position_feat_y_reshape) + energy_y = energy_y.unsqueeze(5) + + energy += energy_x + energy_y + + elif self.attention_type[3]: + geom_bias = self.geom_bias.\ + view(1, num_heads, self.qk_embed_dim, 1).\ + repeat(n, 1, 1, 1) + + position_feat_x_reshape = position_feat_x.\ + view(n, num_heads, w * w_kv, self.qk_embed_dim) + + position_feat_y_reshape = position_feat_y.\ + view(n, num_heads, h * h_kv, self.qk_embed_dim) + + energy_x = torch.matmul(position_feat_x_reshape, geom_bias) + energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv) + + energy_y = torch.matmul(position_feat_y_reshape, geom_bias) + energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1) + + energy += energy_x + energy_y + + energy = energy.view(n, num_heads, h * w, h_kv * w_kv) + + if self.spatial_range >= 0: + cur_local_constraint_map = \ + self.local_constraint_map[:h, :w, :h_kv, :w_kv].\ + contiguous().\ + view(1, 1, h*w, h_kv*w_kv) + + energy = energy.masked_fill_(cur_local_constraint_map, + float('-inf')) + + attention = F.softmax(energy, 3) + + proj_value = self.value_conv(x_kv) + proj_value_reshape = proj_value.\ + view((n, num_heads, self.v_dim, h_kv * w_kv)).\ + permute(0, 1, 3, 2) + + out = torch.matmul(attention, proj_value_reshape).\ + permute(0, 1, 3, 2).\ + contiguous().\ + view(n, self.v_dim * self.num_heads, h, w) + + out = self.proj_conv(out) + + # output is downsampled, upsample back to input size + if self.q_downsample is not None: + out = F.interpolate( + out, + size=x_input.shape[2:], + mode='bilinear', + align_corners=False) + + out = self.gamma * out + x_input + return out + + def init_weights(self): + for m in self.modules(): + if hasattr(m, 'kaiming_init') and m.kaiming_init: + kaiming_init( + m, + mode='fan_in', + nonlinearity='leaky_relu', + bias=0, + distribution='uniform', + a=1) + + +class ResLayer(Sequential): + """ResLayer to build ResNet style backbone. + + Args: + block (nn.Module): block used to build ResLayer. + inplanes (int): inplanes of block. + planes (int): planes of block. + num_blocks (int): number of blocks. + stride (int): stride of the first block. Default: 1 + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. Default: False + conv_cfg (dict): dictionary to construct and config conv layer. + Default: None + norm_cfg (dict): dictionary to construct and config norm layer. + Default: dict(type='BN') + downsample_first (bool): Downsample at the first block or last block. + False for Hourglass, True for ResNet. Default: True + """ + + def __init__(self, + block, + inplanes, + planes, + num_blocks, + stride=1, + avg_down=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + downsample_first=True, + **kwargs): + self.block = block + + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = [] + conv_stride = stride + if avg_down: + conv_stride = 1 + downsample.append( + nn.AvgPool2d( + kernel_size=stride, + stride=stride, + ceil_mode=True, + count_include_pad=False)) + downsample.extend([ + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + if downsample_first: + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + inplanes = planes * block.expansion + for _ in range(1, num_blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + + else: # downsample_first=False is for HourglassModule + for _ in range(num_blocks - 1): + layers.append( + block( + inplanes=inplanes, + planes=inplanes, + stride=1, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride, + downsample=downsample, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + **kwargs)) + super(ResLayer, self).__init__(*layers) + +class BasicBlock(BaseModule): + expansion = 1 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + init_cfg=None): + super(BasicBlock, self).__init__(init_cfg) + assert dcn is None, 'Not implemented yet.' + assert plugins is None, 'Not implemented yet.' + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + 3, + stride=stride, + padding=dilation, + dilation=dilation, + bias=False) + self.add_module(self.norm1_name, norm1) + self.conv2 = build_conv_layer( + conv_cfg, planes, planes, 3, padding=1, bias=False) + self.add_module(self.norm2_name, norm2) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + self.dilation = dilation + self.with_cp = with_cp + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.norm2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class Bottleneck(BaseModule): + expansion = 4 + + def __init__(self, + inplanes, + planes, + stride=1, + dilation=1, + downsample=None, + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + init_cfg=None): + """Bottleneck block for ResNet. + + If style is "pytorch", the stride-two layer is the 3x3 conv layer, if + it is "caffe", the stride-two layer is the first 1x1 conv layer. + """ + super(Bottleneck, self).__init__(init_cfg) + assert style in ['pytorch', 'caffe'] + assert dcn is None or isinstance(dcn, dict) + assert plugins is None or isinstance(plugins, list) + if plugins is not None: + allowed_position = ['after_conv1', 'after_conv2', 'after_conv3'] + assert all(p['position'] in allowed_position for p in plugins) + + self.inplanes = inplanes + self.planes = planes + self.stride = stride + self.dilation = dilation + self.style = style + self.with_cp = with_cp + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.dcn = dcn + self.with_dcn = dcn is not None + self.plugins = plugins + self.with_plugins = plugins is not None + + if self.with_plugins: + # collect plugins for conv1/conv2/conv3 + self.after_conv1_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv1' + ] + self.after_conv2_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv2' + ] + self.after_conv3_plugins = [ + plugin['cfg'] for plugin in plugins + if plugin['position'] == 'after_conv3' + ] + + if self.style == 'pytorch': + self.conv1_stride = 1 + self.conv2_stride = stride + else: + self.conv1_stride = stride + self.conv2_stride = 1 + + self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) + self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2) + self.norm3_name, norm3 = build_norm_layer( + norm_cfg, planes * self.expansion, postfix=3) + + self.conv1 = build_conv_layer( + conv_cfg, + inplanes, + planes, + kernel_size=1, + stride=self.conv1_stride, + bias=False) + self.add_module(self.norm1_name, norm1) + fallback_on_stride = False + if self.with_dcn: + fallback_on_stride = dcn.pop('fallback_on_stride', False) + if not self.with_dcn or fallback_on_stride: + self.conv2 = build_conv_layer( + conv_cfg, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + else: + assert self.conv_cfg is None, 'conv_cfg must be None for DCN' + self.conv2 = build_conv_layer( + dcn, + planes, + planes, + kernel_size=3, + stride=self.conv2_stride, + padding=dilation, + dilation=dilation, + bias=False) + + self.add_module(self.norm2_name, norm2) + self.conv3 = build_conv_layer( + conv_cfg, + planes, + planes * self.expansion, + kernel_size=1, + bias=False) + self.add_module(self.norm3_name, norm3) + + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + + if self.with_plugins: + self.after_conv1_plugin_names = self.make_block_plugins( + planes, self.after_conv1_plugins) + self.after_conv2_plugin_names = self.make_block_plugins( + planes, self.after_conv2_plugins) + self.after_conv3_plugin_names = self.make_block_plugins( + planes * self.expansion, self.after_conv3_plugins) + + def make_block_plugins(self, in_channels, plugins): + """make plugins for block. + + Args: + in_channels (int): Input channels of plugin. + plugins (list[dict]): List of plugins cfg to build. + + Returns: + list[str]: List of the names of plugin. + """ + assert isinstance(plugins, list) + plugin_names = [] + for plugin in plugins: + plugin = plugin.copy() + name, layer = build_plugin_layer( + plugin, + in_channels=in_channels, + postfix=plugin.pop('postfix', '')) + assert not hasattr(self, name), f'duplicate plugin {name}' + self.add_module(name, layer) + plugin_names.append(name) + return plugin_names + + def forward_plugin(self, x, plugin_names): + out = x + for name in plugin_names: + out = getattr(self, name)(out) + return out + + @property + def norm1(self): + """nn.Module: normalization layer after the first convolution layer""" + return getattr(self, self.norm1_name) + + @property + def norm2(self): + """nn.Module: normalization layer after the second convolution layer""" + return getattr(self, self.norm2_name) + + @property + def norm3(self): + """nn.Module: normalization layer after the third convolution layer""" + return getattr(self, self.norm3_name) + + def forward(self, x): + """Forward function.""" + + def _inner_forward(x): + identity = x + out = self.conv1(x) + out = self.norm1(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv1_plugin_names) + + out = self.conv2(out) + out = self.norm2(out) + out = self.relu(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv2_plugin_names) + + out = self.conv3(out) + out = self.norm3(out) + + if self.with_plugins: + out = self.forward_plugin(out, self.after_conv3_plugin_names) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = self.relu(out) + + return out + + +class ResNet(BaseModule): + """ResNet backbone. + + Args: + depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. + stem_channels (int | None): Number of stem channels. If not specified, + it will be the same as `base_channels`. Default: None. + base_channels (int): Number of base channels of res layer. Default: 64. + in_channels (int): Number of input image channels. Default: 3. + num_stages (int): Resnet stages. Default: 4. + strides (Sequence[int]): Strides of the first block of each stage. + dilations (Sequence[int]): Dilation of each stage. + out_indices (Sequence[int]): Output from which stages. + style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two + layer is the 3x3 conv layer, otherwise the stride-two layer is + the first 1x1 conv layer. + deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv + avg_down (bool): Use AvgPool instead of stride conv when + downsampling in the bottleneck. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + norm_cfg (dict): Dictionary to construct and config norm layer. + norm_eval (bool): Whether to set norm layers to eval mode, namely, + freeze running stats (mean and var). Note: Effect on Batch Norm + and its variants only. + plugins (list[dict]): List of plugins for stages, each dict contains: + + - cfg (dict, required): Cfg dict to build plugin. + - position (str, required): Position inside block to insert + plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'. + - stages (tuple[bool], optional): Stages to apply plugin, length + should be same as 'num_stages'. + with_cp (bool): Use checkpoint or not. Using checkpoint will save some + memory while slowing down the training speed. + zero_init_residual (bool): Whether to use zero init for last norm layer + in resblocks to let them behave as identity. + pretrained (str, optional): model pretrained path. Default: None + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + + Example: + >>> from mmdet.models import ResNet + >>> import torch + >>> self = ResNet(depth=18) + >>> self.eval() + >>> inputs = torch.rand(1, 3, 32, 32) + >>> level_outputs = self.forward(inputs) + >>> for level_out in level_outputs: + ... print(tuple(level_out.shape)) + (1, 64, 8, 8) + (1, 128, 4, 4) + (1, 256, 2, 2) + (1, 512, 1, 1) + """ + + arch_settings = { + 18: (BasicBlock, (2, 2, 2, 2)), + 34: (BasicBlock, (3, 4, 6, 3)), + 50: (Bottleneck, (3, 4, 6, 3)), + 101: (Bottleneck, (3, 4, 23, 3)), + 152: (Bottleneck, (3, 8, 36, 3)) + } + + def __init__(self, + depth, + in_channels=3, + stem_channels=None, + base_channels=64, + num_stages=4, + strides=(1, 2, 2, 2), + dilations=(1, 1, 1, 1), + out_indices=(0, 1, 2, 3), + style='pytorch', + deep_stem=False, + avg_down=False, + frozen_stages=-1, + conv_cfg=None, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + dcn=None, + stage_with_dcn=(False, False, False, False), + plugins=None, + with_cp=False, + zero_init_residual=True, + pretrained=None, + init_cfg=None): + super(ResNet, self).__init__(init_cfg) + self.zero_init_residual = zero_init_residual + if depth not in self.arch_settings: + raise KeyError(f'invalid depth {depth} for resnet') + + block_init_cfg = None + assert not (init_cfg and pretrained), \ + 'init_cfg and pretrained cannot be specified at the same time' + if isinstance(pretrained, str): + warnings.warn('DeprecationWarning: pretrained is deprecated, ' + 'please use "init_cfg" instead') + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + elif pretrained is None: + if init_cfg is None: + self.init_cfg = [ + dict(type='Kaiming', layer='Conv2d'), + dict( + type='Constant', + val=1, + layer=['_BatchNorm', 'GroupNorm']) + ] + block = self.arch_settings[depth][0] + if self.zero_init_residual: + if block is BasicBlock: + block_init_cfg = dict( + type='Constant', + val=0, + override=dict(name='norm2')) + elif block is Bottleneck: + block_init_cfg = dict( + type='Constant', + val=0, + override=dict(name='norm3')) + else: + raise TypeError('pretrained must be a str or None') + + self.depth = depth + if stem_channels is None: + stem_channels = base_channels + self.stem_channels = stem_channels + self.base_channels = base_channels + self.num_stages = num_stages + assert num_stages >= 1 and num_stages <= 4 + self.strides = strides + self.dilations = dilations + assert len(strides) == len(dilations) == num_stages + self.out_indices = out_indices + assert max(out_indices) < num_stages + self.style = style + self.deep_stem = deep_stem + self.avg_down = avg_down + self.frozen_stages = frozen_stages + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.with_cp = with_cp + self.norm_eval = norm_eval + self.dcn = dcn + self.stage_with_dcn = stage_with_dcn + if dcn is not None: + assert len(stage_with_dcn) == num_stages + self.plugins = plugins + self.block, stage_blocks = self.arch_settings[depth] + self.stage_blocks = stage_blocks[:num_stages] + self.inplanes = stem_channels + + self._make_stem_layer(in_channels, stem_channels) + + self.res_layers = [] + for i, num_blocks in enumerate(self.stage_blocks): + stride = strides[i] + dilation = dilations[i] + dcn = self.dcn if self.stage_with_dcn[i] else None + if plugins is not None: + stage_plugins = self.make_stage_plugins(plugins, i) + else: + stage_plugins = None + planes = base_channels * 2**i + res_layer = self.make_res_layer( + block=self.block, + inplanes=self.inplanes, + planes=planes, + num_blocks=num_blocks, + stride=stride, + dilation=dilation, + style=self.style, + avg_down=self.avg_down, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + plugins=stage_plugins, + init_cfg=block_init_cfg) + self.inplanes = planes * self.block.expansion + layer_name = f'layer{i + 1}' + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + + self._freeze_stages() + + self.feat_dim = self.block.expansion * base_channels * 2**( + len(self.stage_blocks) - 1) + + def make_stage_plugins(self, plugins, stage_idx): + """Make plugins for ResNet ``stage_idx`` th stage. + + Currently we support to insert ``context_block``, + ``empirical_attention_block``, ``nonlocal_block`` into the backbone + like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of + Bottleneck. + + An example of plugins format could be: + + Examples: + >>> plugins=[ + ... dict(cfg=dict(type='xxx', arg1='xxx'), + ... stages=(False, True, True, True), + ... position='after_conv2'), + ... dict(cfg=dict(type='yyy'), + ... stages=(True, True, True, True), + ... position='after_conv3'), + ... dict(cfg=dict(type='zzz', postfix='1'), + ... stages=(True, True, True, True), + ... position='after_conv3'), + ... dict(cfg=dict(type='zzz', postfix='2'), + ... stages=(True, True, True, True), + ... position='after_conv3') + ... ] + >>> self = ResNet(depth=18) + >>> stage_plugins = self.make_stage_plugins(plugins, 0) + >>> assert len(stage_plugins) == 3 + + Suppose ``stage_idx=0``, the structure of blocks in the stage would be: + + .. code-block:: none + + conv1-> conv2->conv3->yyy->zzz1->zzz2 + + Suppose 'stage_idx=1', the structure of blocks in the stage would be: + + .. code-block:: none + + conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2 + + If stages is missing, the plugin would be applied to all stages. + + Args: + plugins (list[dict]): List of plugins cfg to build. The postfix is + required if multiple same type plugins are inserted. + stage_idx (int): Index of stage to build + + Returns: + list[dict]: Plugins for current stage + """ + stage_plugins = [] + for plugin in plugins: + plugin = plugin.copy() + stages = plugin.pop('stages', None) + assert stages is None or len(stages) == self.num_stages + # whether to insert plugin into current stage + if stages is None or stages[stage_idx]: + stage_plugins.append(plugin) + + return stage_plugins + + def make_res_layer(self, **kwargs): + """Pack all blocks in a stage into a ``ResLayer``.""" + return ResLayer(**kwargs) + + @property + def norm1(self): + """nn.Module: the normalization layer named "norm1" """ + return getattr(self, self.norm1_name) + + def _make_stem_layer(self, in_channels, stem_channels): + if self.deep_stem: + self.stem = nn.Sequential( + build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels // 2, + kernel_size=3, + stride=2, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer( + self.conv_cfg, + stem_channels // 2, + stem_channels // 2, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels // 2)[1], + nn.ReLU(inplace=True), + build_conv_layer( + self.conv_cfg, + stem_channels // 2, + stem_channels, + kernel_size=3, + stride=1, + padding=1, + bias=False), + build_norm_layer(self.norm_cfg, stem_channels)[1], + nn.ReLU(inplace=True)) + else: + self.conv1 = build_conv_layer( + self.conv_cfg, + in_channels, + stem_channels, + kernel_size=7, + stride=2, + padding=3, + bias=False) + self.norm1_name, norm1 = build_norm_layer( + self.norm_cfg, stem_channels, postfix=1) + self.add_module(self.norm1_name, norm1) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + def _freeze_stages(self): + if self.frozen_stages >= 0: + if self.deep_stem: + self.stem.eval() + for param in self.stem.parameters(): + param.requires_grad = False + else: + self.norm1.eval() + for m in [self.conv1, self.norm1]: + for param in m.parameters(): + param.requires_grad = False + + for i in range(1, self.frozen_stages + 1): + m = getattr(self, f'layer{i}') + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + """Forward function.""" + if self.deep_stem: + x = self.stem(x) + else: + x = self.conv1(x) + x = self.norm1(x) + x = self.relu(x) + x = self.maxpool(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i in self.out_indices: + outs.append(x) + return tuple(outs) + + def train(self, mode=True): + """Convert the model into training mode while keep normalization layer + freezed.""" + super(ResNet, self).train(mode) + self._freeze_stages() + if mode and self.norm_eval: + for m in self.modules(): + # trick: eval have effect on BatchNorm only + if isinstance(m, _BatchNorm): + m.eval() + + +class ResNetV1d(ResNet): + r"""ResNetV1d variant described in `Bag of Tricks + `_. + + Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in + the input stem with three 3x3 convs. And in the downsampling block, a 2x2 + avg_pool with stride 2 is added before conv, whose stride is changed to 1. + """ + + def __init__(self, **kwargs): + super(ResNetV1d, self).__init__( + deep_stem=True, avg_down=True, **kwargs) + +class TridentConv(BaseModule): + """Trident Convolution Module. + + Args: + in_channels (int): Number of channels in input. + out_channels (int): Number of channels in output. + kernel_size (int): Size of convolution kernel. + stride (int, optional): Convolution stride. Default: 1. + trident_dilations (tuple[int, int, int], optional): Dilations of + different trident branch. Default: (1, 2, 3). + test_branch_idx (int, optional): In inference, all 3 branches will + be used if `test_branch_idx==-1`, otherwise only branch with + index `test_branch_idx` will be used. Default: 1. + bias (bool, optional): Whether to use bias in convolution or not. + Default: False. + init_cfg (dict or list[dict], optional): Initialization config dict. + Default: None + """ + + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride=1, + trident_dilations=(1, 2, 3), + test_branch_idx=1, + bias=False, + init_cfg=None): + super(TridentConv, self).__init__(init_cfg) + self.num_branch = len(trident_dilations) + self.with_bias = bias + self.test_branch_idx = test_branch_idx + self.stride = _pair(stride) + self.kernel_size = _pair(kernel_size) + self.paddings = _pair(trident_dilations) + self.dilations = trident_dilations + self.in_channels = in_channels + self.out_channels = out_channels + self.bias = bias + + self.weight = nn.Parameter( + torch.Tensor(out_channels, in_channels, *self.kernel_size)) + if bias: + self.bias = nn.Parameter(torch.Tensor(out_channels)) + else: + self.bias = None + + def extra_repr(self): + tmpstr = f'in_channels={self.in_channels}' + tmpstr += f', out_channels={self.out_channels}' + tmpstr += f', kernel_size={self.kernel_size}' + tmpstr += f', num_branch={self.num_branch}' + tmpstr += f', test_branch_idx={self.test_branch_idx}' + tmpstr += f', stride={self.stride}' + tmpstr += f', paddings={self.paddings}' + tmpstr += f', dilations={self.dilations}' + tmpstr += f', bias={self.bias}' + return tmpstr + + def forward(self, inputs): + if self.training or self.test_branch_idx == -1: + outputs = [ + F.conv2d(input, self.weight, self.bias, self.stride, padding, + dilation) for input, dilation, padding in zip( + inputs, self.dilations, self.paddings) + ] + else: + assert len(inputs) == 1 + outputs = [ + F.conv2d(inputs[0], self.weight, self.bias, self.stride, + self.paddings[self.test_branch_idx], + self.dilations[self.test_branch_idx]) + ] + + return outputs + + + +# Since TridentNet is defined over ResNet50 and ResNet101, here we +# only support TridentBottleneckBlock. +class TridentBottleneck(Bottleneck): + """BottleBlock for TridentResNet. + + Args: + trident_dilations (tuple[int, int, int]): Dilations of different + trident branch. + test_branch_idx (int): In inference, all 3 branches will be used + if `test_branch_idx==-1`, otherwise only branch with index + `test_branch_idx` will be used. + concat_output (bool): Whether to concat the output list to a Tensor. + `True` only in the last Block. + """ + + def __init__(self, trident_dilations, test_branch_idx, concat_output, + **kwargs): + + super(TridentBottleneck, self).__init__(**kwargs) + self.trident_dilations = trident_dilations + self.num_branch = len(trident_dilations) + self.concat_output = concat_output + self.test_branch_idx = test_branch_idx + self.conv2 = TridentConv( + self.planes, + self.planes, + kernel_size=3, + stride=self.conv2_stride, + bias=False, + trident_dilations=self.trident_dilations, + test_branch_idx=test_branch_idx, + init_cfg=dict( + type='Kaiming', + distribution='uniform', + mode='fan_in', + override=dict(name='conv2'))) + + def forward(self, x): + + def _inner_forward(x): + num_branch = ( + self.num_branch + if self.training or self.test_branch_idx == -1 else 1) + identity = x + if not isinstance(x, list): + x = (x, ) * num_branch + identity = x + if self.downsample is not None: + identity = [self.downsample(b) for b in x] + + out = [self.conv1(b) for b in x] + out = [self.norm1(b) for b in out] + out = [self.relu(b) for b in out] + + if self.with_plugins: + for k in range(len(out)): + out[k] = self.forward_plugin(out[k], + self.after_conv1_plugin_names) + + out = self.conv2(out) + out = [self.norm2(b) for b in out] + out = [self.relu(b) for b in out] + if self.with_plugins: + for k in range(len(out)): + out[k] = self.forward_plugin(out[k], + self.after_conv2_plugin_names) + + out = [self.conv3(b) for b in out] + out = [self.norm3(b) for b in out] + + if self.with_plugins: + for k in range(len(out)): + out[k] = self.forward_plugin(out[k], + self.after_conv3_plugin_names) + + out = [ + out_b + identity_b for out_b, identity_b in zip(out, identity) + ] + return out + + if self.with_cp and x.requires_grad: + out = cp.checkpoint(_inner_forward, x) + else: + out = _inner_forward(x) + + out = [self.relu(b) for b in out] + if self.concat_output: + out = torch.cat(out, dim=0) + return out + +def make_trident_res_layer(block, + inplanes, + planes, + num_blocks, + stride=1, + trident_dilations=(1, 2, 3), + style='pytorch', + with_cp=False, + conv_cfg=None, + norm_cfg=dict(type='BN'), + dcn=None, + plugins=None, + test_branch_idx=-1): + """Build Trident Res Layers.""" + + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = [] + conv_stride = stride + downsample.extend([ + build_conv_layer( + conv_cfg, + inplanes, + planes * block.expansion, + kernel_size=1, + stride=conv_stride, + bias=False), + build_norm_layer(norm_cfg, planes * block.expansion)[1] + ]) + downsample = nn.Sequential(*downsample) + + layers = [] + for i in range(num_blocks): + layers.append( + block( + inplanes=inplanes, + planes=planes, + stride=stride if i == 0 else 1, + trident_dilations=trident_dilations, + downsample=downsample if i == 0 else None, + style=style, + with_cp=with_cp, + conv_cfg=conv_cfg, + norm_cfg=norm_cfg, + dcn=dcn, + plugins=plugins, + test_branch_idx=test_branch_idx, + concat_output=True if i == num_blocks - 1 else False)) + inplanes = planes * block.expansion + return nn.Sequential(*layers) + + +class TridentResNet(ResNet): + """The stem layer, stage 1 and stage 2 in Trident ResNet are identical to + ResNet, while in stage 3, Trident BottleBlock is utilized to replace the + normal BottleBlock to yield trident output. Different branch shares the + convolution weight but uses different dilations to achieve multi-scale + output. + + / stage3(b0) \ + x - stem - stage1 - stage2 - stage3(b1) - output + \ stage3(b2) / + + Args: + depth (int): Depth of resnet, from {50, 101, 152}. + num_branch (int): Number of branches in TridentNet. + test_branch_idx (int): In inference, all 3 branches will be used + if `test_branch_idx==-1`, otherwise only branch with index + `test_branch_idx` will be used. + trident_dilations (tuple[int]): Dilations of different trident branch. + len(trident_dilations) should be equal to num_branch. + """ # noqa + + def __init__(self, depth, num_branch, test_branch_idx, trident_dilations, + **kwargs): + + assert num_branch == len(trident_dilations) + assert depth in (50, 101, 152) + super(TridentResNet, self).__init__(depth, **kwargs) + assert self.num_stages == 3 + self.test_branch_idx = test_branch_idx + self.num_branch = num_branch + + last_stage_idx = self.num_stages - 1 + stride = self.strides[last_stage_idx] + dilation = trident_dilations + dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None + if self.plugins is not None: + stage_plugins = self.make_stage_plugins(self.plugins, + last_stage_idx) + else: + stage_plugins = None + planes = self.base_channels * 2**last_stage_idx + res_layer = make_trident_res_layer( + TridentBottleneck, + inplanes=(self.block.expansion * self.base_channels * + 2**(last_stage_idx - 1)), + planes=planes, + num_blocks=self.stage_blocks[last_stage_idx], + stride=stride, + trident_dilations=dilation, + style=self.style, + with_cp=self.with_cp, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + dcn=dcn, + plugins=stage_plugins, + test_branch_idx=self.test_branch_idx) + + layer_name = f'layer{last_stage_idx + 1}' + + self.__setattr__(layer_name, res_layer) + self.res_layers.pop(last_stage_idx) + self.res_layers.insert(last_stage_idx, layer_name) + + self._freeze_stages() + +# MODIFIED: register GeneralizedAttention +PLUGIN_LAYERS['GeneralizedAttention'] = GeneralizedAttention + +import torch + +def get_model(): + plugins = [ + dict( + cfg=dict( + type='GeneralizedAttention', + spatial_range=-1, + num_heads=8, + attention_type='0010', + kv_stride=2), + position='after_conv2') + ] + + tridentresnet_config = dict( + num_branch=3, + test_branch_idx=1, + strides=(1, 2, 2), + dilations=(1, 1, 1), + trident_dilations=(1, 2, 3), + out_indices=(2, ), + plugins=plugins, + ) + + model = TridentResNet(50, num_stages=3, **tridentresnet_config).cuda() + return model + +def get_scripted_model(): + from ._tridentnet_scripted import _get_scripted_model + return _get_scripted_model() + +def get_input(batch_size): + return (torch.randn((batch_size, 3, 224, 224)).cuda(),), {} \ No newline at end of file diff --git a/reports/15745_Project_Milestone_Report.pdf b/reports/15745_Project_Milestone_Report.pdf new file mode 100644 index 0000000000000000000000000000000000000000..fb1a4f2f663f9ab513792e6af3372e44dc4faba3 GIT binary patch literal 184303 zcmc$_W0WUPmo8eitIPV8ZQHhO+qPX@>auOyHoL4Y+jiCI|Ge{_HFM^CxF7DiH*@8R zh>V>(b0gw;QlttZVzi8OtT3d#&jnvF%m4;}y^$3R4-bG|#?sEz(84*h7yaAK%+Bz)CijoD%0JQ!jGO>IJ{VIwlYdkK z{=ZycSpJJ4DxMCe0D282Q?tKjnA(}USOD1m+976X<6`Rcx3n>IF%>a2wm11J^{-OF z(8<;r@Q)LO?QL9b?VJIu0D1){Qxi*L7keiFBhz2I0QBmA9l;1-{=5D4f~1{^smI?P z%m8MVf3?l*|MoBdlmPTf_VzA+HT*|i`L9C(%0`!obdEYGTB~$75__#K^|Mz{JU6WNK!}!OCf3%4}lD z#>imA%*t%a%FL(xkN=%roJsb|4FC6fmARwH!nKC952aP-DcuG)OAbM>wZY`_@13D4-*REn5 zJ3i6O_tPHuExvC)ko7j|wK_Z=>T|?*)laMSKXD-Mk45<}c{XGSB$}qptMv368$;oZ zUaQnN6#IkSzI%mtQpwF+DF~j<%SxDt9OD$h~%3&t*GvoS5( z`|A0qUe}^N@GS_j)35OA(?rE_>!sQ4D#&cq>~W7^fty}SGiS}foC|+!({;i0M!8Y zR(lT=#)G#A$fD<`32y<2HYgR$9^ow|f#v^qF}x*=v< zgy0B_p;ULeKSpqReu0LZhX(Ot2~0{$3^Z2JgB>kOYC;+Pl-ee<*w~~BVW5#&%8o4t zvdAEw9(#(~UE)iiI(Qz}KvO!=cognRe8Csyb%Y8649>q4^of>f@NJQzr;*$+?0J#^wSI$Jee-Tpw0dp_usg2UZQe8rCcsMbi_ zJq3FEy>iP)OH9mDMNbD2(Es>=3-ykM4_vC)owg-R>I6%>&ngil_d!*F!r?6oh3T&C zao6eUL8S8mYzj6-{Iyu{Oa5wZq7bd_uGQS&mp_s|vi@Mwr~}UZ27;rh10$o686I>w0x+ZCQ+?BrdKO_C_0%Q|oUXynuF+bLTz%)%?K|M! zePO>@UcNoVkA(M+NJyW7!I1SKAil#yaUlRYk(Ags{v!<#eCz-(pdp9u92enOEDX z^ykz+qG*#HVtq}(9%12&g^b;^M4l?^lRsMU`Ud)U3Vf=_y|&;+mK@BT!t5Q?YANaY z0FIODUrOJ7pKS2x4;I&5Z1S8^7fv}B-8`40ADbrT0a-&l5(lIwD3{F|rJeV5U8ih5 z3_;ICkTHC?oUoHEF$k0n;B}oS?9KSC;@Gv2pruGpr9_ZUtqJF97^=jk{>HWVc8?kT zRcl4Do}54E{jXPL>t$jzQWeWrHqr{7Ps(K5+4kNqOB>Xxs!wHKE$chX*bd!C&amT_ zN-iC*u)L@bQ)nM6Q|MqVJ4^5z0?wk|TI!ijRgCSitH}3I<-0ArfMVM}3M{>Ue*-Uu zT4VoD2xb2-knn$<_m%%aqkoeYR_6aU?;9H!7#JJuRQ*$O@q`0I7}%Hr%nSv9*eJ{l zD9n(+sFJkUE$^FwD53T6A%G5%AkF{h6>$8QD-gD~b1}7ZaRzYy3+-h8@*P7V`+rys z!{3#ala-E%oejXj!br!#`47kWOOyV}|8L+^GIh3hbu#{oh>ZV9cK!u(|Dfi7!NGsD z{~HJYuSWkTM`L2-WczOhXKQLUZnU8JJ=e}jmoCN(CxWp*7AKWBj%MqyJNuGbmR^C< zD54nAVWY(OzMp%u1r!b8w0fdAH~?iGa>JHcIR6pJ?Cpu_xqCk@RrmKmEuAN$8z7na zJ-uLwnre!9c`vJXHApeZExp@n%zn|%{e{sp6vt{qp z*mZcRrgg)g|5Mg3V$e5*)FXIefMkNLvj}d|cM5|i?xhgo0R4yP3xq z5f(J3TQbT)AKf7ZMpf-b^SZ+p`6K9Jdi0VmE~w(zR`OvTK5;QJ#pY?3Pu?ln2ICk& zM$xP6sc{6I3yE#sip`Iut**_%ou!gBGrqte*Ei7g$?aS7wj~_C@zW^3x67No^{9OX zezx~Aqq4l2HM{I>Z8aKGGuPkaC_iDC{6#XDfI!XFkT8IrW+6t@bB^ELDB6B}Q&a)I z68i@y5RoetkFLm74ZHcmS%&$s+{&O*fnD-_Ej5E7wz088xP{XNfx7CLKvV-*E0PCT zl;Q(-JN^)4r5P}8bAeFB0Ez(_)khO8FjUuVo~TyGI?GCjRmJk3^tHj-`6qy0-OLZP zAeoi@DRq}khGn&w0{fW#=b5WEs|H8`mJN9WEe*jRhSU-Q=vdhK61`?%&4>#W?aUxH zCd7R^Gf)D?z=A|@n5*}W$>3_h5G8n3cGZq&pq7}3;Z~RjDze3=&HNU?NbqvJ!NR zfaj(OXk!cwdOW7PMQV6sK_Wi2G%^S1X(}Ol(58Lp)zL*(K)~;*3XCv>%rjA;LYUkw zmB|bq@KG4%?kIvMHl|adT2qFD_8W?OUXVz$D2#C41tF^m+U|iOhB1o}!Zwm2K|k$X zSy63dg62sJl63KM0^?4cgax)*htVBv?v1cO#=ityn8NMsXET`i3VwGu9ae-(^+%(! zIf!u#^#8^dIH_4?IpY5j0y=CJ3DV-;%KD~Bw5E+wmeKcq*D^Mv6Xt}&xf#ww=WusmaSuLDvi$ly?4#IuJ;2(z7RR(p@6Oo>t-@< zS1JtqcBhR!&kD);MOisdO-XL=&jSWHV^G%eqw(gWp=yxSB2L@N&Wv!DpQwjxabiMv z-f3Fs*`bFbVd+N2ZpsYNaP|bR&-E3L805s?9RNgqv3Yj#I4%l|2m)a|r?UwLINX9H zmB8|%;REcXqV!pyKF>2TDukv?rsEIXR7}H@={+g$iKoHlXij~rU2Yd*be8cJ0VI97 zQL2X*7_Y2vBA$=`-DAti;H&v;&068!KXaHTnjVt3vD zFbyQ98ml3#d2J!EK2jwDkccpZ19`PeX#&-`LFHCPmFYYfg(ynPLKs8EDJjiheqOY% z>~_rKuUXga;ergd{;c)HSVMu56T?+n8VXF4Xzze$@pvEObw|UsXoTTQk~nL=acB)Q zcS_WbM*9guNx>fPKp%5d^9g)5di)KE7oWU0(0%&jVIgC`G;U>~Y3GT!w8X_`RT4OB57^(sR<&`zvwaeT5IgMC;S+N>6iyb|0CQIC)b5W6A=sBn zG{4HuMF&~-A;qrmYMlZ*>I`C=bA(vF!NTXg9zR|~ z>MeA$lFR0T?6*%F^bvNNPZh2@gf8*x7xDCAzSUL`n9O3 zm+!zY$s(N)PMCAyP_}Wr#CLI3`6v6-LUA&43`*;BG|jwAZltu{jd$lF6XE=$SI|?y zl#sYXe1+S*IfjqRywMq5fo`yhVudg9ZAzA{>>S7RSTEgN(yoo`Y`>d<+}x_y_ti5^ zB$8x1OI2e-k~ZDF-Ai%GAgH2b^6VbxMTu`)9cU!3JgAllh?`LZsiCLL z_?tU9LVi$V^$mgM*;twyo7yABn}JbmtgX&{@8r&v2*jh#MLus2NKj6KmK6s8@(SzW1=fa#MkXF| zC9nfBe>EQFdibY-bYy~DesfXuVVXcFBqo2i0v5kjr}%xbzTt14PXo=qpleyL6p1btT~_5!A=s;a(iC1w1r3X*}+BiGi}zbvy+L13Bt zrY07#3_zOdKnNK=(WJHp@u3WW(}1VHZ>tP0E-Vfa=1k2@%*=opzFj}h<-QS#&D;Pu zKfi;hu{S_~WfJe4tf=-}zH27@4MjvRH-YuwXr?x+fFRTm=zz#!F<=8iLw|wp++QRA zimm>-j!OHU)P*pygWP!)-!-Fq`lk8wNYDVG@J(xt-PbFw)a3lNpaS&xEs*c!FY&bv zdryDe`%pGI)&KELH_p#Z^Q~X;vGMqgw6Cuh;}8CAZ}O75;J@(HtMh&Lv2AhF>+(zx zqx3Z!_W8XM@m&k~-ivkgJq-0-3-SETSS6E4$HwN5IQ&h>`}n3&ro>-@xy2FW%UE~6wq*5VJkRFX zGy5yZsG{7IdDDeJqbZ*~4cAJ*SCvx9@{ z%X9mKx8C39`_;zov6K%mhi4I0*JWTnfnlSIv3aXOEVMIH#}DkNfN6u?@{%-&$^R}k z=zNBxh$F0b_PVgJTt62@p?*=SImz} z1i5&|U(< z%N^0Zw)&LQD7r{)YBXJ(YVvaKau1&ElY&z_r$l;C-_vcYvsG*351T4eZ#>n!x|45M zgn3#_l264@;pnUSOvx$E42Na`YyJzS&dKxM`3PJy-PH&sL@TZ2JQ`u8$ z>`xh_40+4S1s=lkV*PYy>SPvWQfxFlWlUsZX^C}F!D@@&iM8a1T78cpzi{h~GXv`S zvGgx&TJ9aURe0zpx993+1p#)85V66LP>eJ6c&%r1_hIr%r^8P0-$T%^gzF>WH=$10 zZG~Ntkp72;+@~>u9xM;9Ze`y1(6q?cVq)$NqM2%`nx>UZm0<$&J<~;@+tq^%F%n1v z77r7xA~S_CXk>4nsH2^&GFxI6{RjTmF_AL z`J|#eWr8Y6UBF1Jl7R8Y(6XZ+5FQehmNYKs1y+=W_bl1f$ZiZIaWahcTduXH=yB#j zqf0!Q3DB&HShYwGHA)$$6daV1>|-AZHzt=PF5{q=!vC2%C#_}dmNO&m$IBhj#dZh2 zPM)+@C7D5G-FNVAOsWhAk9G8jx^XS$p8~^PH(m4_EC09js2_{SFiWMMpO-#Mh^;Ck zUqFo0AG%PR=Vg{J7r&lRW^&_j4hH8YVuZr921L2}9 z!X#8;<}wgb389?WMBJvvw7Wh8)5Y!K13W1jj0I;@AP9^3_5&>l7`JVuS#XNkH~K-l z-gb5!$1!pbcxTE;OggY3UuxihDh|b>RSd|DX6qH4$tjAX6+IM=sW6R+<^0c6C4?tw_z;GE#NXE*9)TN( z5N)6+=di_{68&A;O}w}mB&n~h2(5tpNRh&f1vHL2}{xlOF~lmCH&2`i#;GDFX+FxNCD`M;~?^gNdOW=dt%D zMXX+9pL~rWi-lTQ*%C?7bd(pqO<>T{aElk5-A@+O^+NKX`wZ?-wu%N(C9`8h4sGz) z5j~XZ%5x!r6Lvb)C|rQk%rPFsN_bR6h2mJ-y{Y1+mFps6MRy(bc4U>X%&yg*k`$cq zwjsFTnr{^JgDMC<^|(~G#^x8_6kocI22%7{m09Z6gen~kVpq@4zmw+vMRuj>0oO1& zLzNlTN&<1q`iJosFrvksmiY&j`8s6K1a}2y;6)iF7f1Got`bUytkS8Iv8Gp}Jo%GG zVQzq`QRC}PMWR3`~|mMxTwLvQW-aSlg=( zYY#e(-_dnDN9m_{kVBn`U{)B!c^fARMGxA~@U{EHp+g}A7MCK6Ftyjr=5y<>8-=Nw zf=9qDGm89N=vx)tAwv{C70x92?Xs>>O9^5UiOMGP@i`!xVQD#qgi zXBm}4j4s7J7QO^t`V-%DfG6JxroDwMcIy5YH7_ohCO;4J=2J)3ZHd8zd{~Se<}dYy z;(dG}ym7kBqb)8Z6Pf~QUH-n(m>sjc{bS_CJ})g02Tx1rosXRj;wTbA(}=z~x6VHz zWNe;tB?5?6xnhJ=FGvzb+?}P9u17aB<~Gr3SE^C)+KR6;$KIvyjD*7Z2yc}1tS~kX zAb74@=LE{3;!}`F$2xZNG!IoAQ=oo_!^>LHd8^nz6aW}lLiOn+OjeOv6cQ;}QW~1D zO$e{neqK;B7R()F;n77~c)ZK^fnGW}WB@%wt(}S2WqA}(aK2@&id?stR4{Obn~hJC zaY<^QYV=aM${NbRx&jSO?06%g+m4Y+NZB392u$6q6kq76aBk~%~9<&vfbvfHt? z>ab7QP!|@do5QS{{PpWDq=%%+E;n3)p9R?pjw&63E(=)A1_5ClY%QaaJLbPP_aCFm zi&#FpWGy-hDPYkZfAesRI`7VJrg-j}_vyx$!Ka63`=P}7cSPq$I0Q{tkfLI5N9|VY zb0a)A1`Rtz3eR)L8b~fxG4w$G;MNNuYjn0I(T-SGPTKDXTzp=ERnM=Wb*$mlpxJmJ zOdhqm&|w9mW|=L6Vqc=h42gJQ=Cj_C@g|vBDqBDgNso&g%s2GCn*oM#6=|m-tAhMK z5W$$y)dOyB$ZEq7#9o%5?2*<$B86ko&10mKoE)>B`hym&QM3IY40-a&wLU3f{D3g$ zuQp`$HOu5D@n*DhGC!?cOZ%)NHzA9ottFD^ofloSm#nScN zcVB{>cFQ$^<>+rHfdxL8vg?KjrCiJ_c$qu*kBpqMrCDV!_0Cc?2;Kpgrb^`QenRDw zy#3dPfV`BTZR8jnPfVw@0;S$xiBvqP2D` zRikwxehf5}zW9gtnlFo*1)W(sO?-%gcq%O0L~`Q_n;ty}k|kpyi~t)m;U9{UTJzcd ziy1{%E`ybGaori6FlS+$Jq~-LABW$?72s6_vA}H1Z+3Ab`2a)oG&p zjz{DrUCH9cTgdUHpQp4gH~2^+yUIe=JsNwzh=&VhccmPBjW^&W?KWv>Hzl^`khwic z6}ljVp_-YqEtn0qEL+7D*%p3>^l8taNJ)nUFaOKRl1(7yX7KjX8qA>Mn_ceHO2u&~ z6ZXV8I_`Hbs&u7gIM{6LuLCW9Ig+`qDMMQ5p-Edyx45aw9DLSB6*IABeQd;149D;X z(8HG*fdHNr{e_a)RPi?OQyDUO2+;fA={up9*hEnfBjXY*`@w`ws4}uxxi76sqAEfI zFg;L=nDS@AdlZ!^@p>x{5)i`D8uFOOn&*)BpcfYCTlo^+Nl!AXd2v;)%ccdu7OiKo z@vIT0SNMoML`Gcn1cdH8t%oo>B`Ajh9qIm1n99Pj<9mEQ&>AvYrmR+`z%yqj(D`|z_xW3t$Y6>vIf;2)BE@8x zxP^R$F;sXwp(A#$4b26WKJnVx*$B55Xyeu5t~R>14vUyXuP%Ptvfkx&s%nBHl`PxA zi~g`_%O4=!xgr$>$9&?P!g1tO{z7XFr2oRqL@r+kXi~qXkK>+p^EmYO`_R!bmN09h z=7}MR!z9OeNj6w->A79>^{-R6Qu{43ap$hgc6GJ~TzN+Ub%2#7Ns>?IHV&$QY zEozQu=yL+~Wj*SpMXuT6nd0+sJ|?(^Q>DB(u*P-Hi*K`%xVL9s4fAt!fKX)&aVv-1 zGxIm5Y#%VJ*(%3q^B=$uu4YOqxShaG#L0d9Kk?~#u*$jkcAQQ_Cmvxu6t;Q$;G zWjGz|%(SGdaEi@xBrfwFm(jdOX+x2a%o8jm0 z0ZM~<%=oF=o>VBedw{rkU2#2BjrT1;=j=(g86n{$i zs$T$hO@hJXRbj4dnUoLm_7nWxZ{CJ9=vQ;X_ejfC-U#v>DcG~czjH#zm%oqbl&^KR zx78f=0@Y0GGI9=?n>Cs5BgGNg6f4Xm@PO+oK;8(s-!_asls=~qEkuxwgmZcXnT{_G z&?}#c*j4v;JBh|+N6l6<=clj)<@IFIsS0)shL&i|1s^cRo$z5lgkWAjKFiq9HEBwC zq)hQ#YUxATqj|!y_%pEYt?0^~Zyhp|LUU z#d=CHeE@|P@fxP^KP2|*5Vp;8Jmm{re`0QdQcILfn=@rCoOU}o2MVSock{{DLf&mb z;Ud$dS<|Y>9!gc0W#9Uu&#>a5YxyOkF47wFHTu@|27em1as1>8`& z@hjAH@bM;{r4^hVRc@!l{175j*qM_UQ06YUmDt={rRg*aTsngRo#RYyqHOaQ@9rqK;O>j=_!J-Ur=Cq&i)6s=+2(v46ieCjCv+(>4Pw z172@h#ELTzU4aF2hhQN_qCg0_*puQ`e%*;M zBEP?8H`NenXLU|FdF#oFlIDny0WE__s-OAmhj4Q7Pv%akJ}i`9dir(xy^KGP=zk+3 zb?wnAKBn@R`^dacsUYqg@6j%YJ!tHQB5RHn?ah}$`34enB$G4$xb;7wtzM;;nGW)- z=Q9?&l#y)9WbHb|0AI|%$NR{D8`}p<7QQ7%+EAwU4CEkkzA!1ehkVhz>M?bxU;WL< zQ-hYM9^VR;>8)22hFrBrN~+~oWJ>pvi~@`Xb}N2f*ew%J0BKPs%ShwT9q(TEf;n|A zyrhT*%2G}AB z4ZTB;`n9eOk#8`m+U(1Ck$rGmon|V0MZY8=ml(EB?jpCP;EcjnNabmiVzqoVW`K>M z%O9>MRR>Yhu^>$^RLoH{i(+MoOJ(yiY`vIPQy^Q|@~C`X<^)9H`5){etSOh20ozC> zlpLpeoTvr`Y6BhiId3$`aml20T@8{ByL9Aq6_01klDa%VH8+P$qQ0-QlIrg3rllu_ zj@Alfoy=3)u_4DHG?4(ka7sY542LpC4Gc0Y14%oyDZ^p)Q0SN^FSwi<%$Uu73c5^A ztdvj?Xu<|&g$>aW&in$C30Dh{fIERu#VS%w$<*3Cu_T6~J{y@p+1O!Hm49GV*DswW z$qd%&cdj`gonQ6pmt3~R@Kp3KY*W2sOZzqF))g=M_$kh6Lvd=@85H&OZhisS5ZxUX zoUej;;|L#7o$FEln(_!INjITfsRT}?esxML6Shato^pUp-wJW*aMK4ly(1TEJZR3( zV!)%>E`Q4flXbc4ZX4;Qad?);%@uK;BylSi&1C+gkn{?J(d*OnY6_#1-Y6W;#aTX} z@E+kP#gRrqrYg)uPM}J)+`NBcEH3B7P$4ttsg60-Zcz@6QUbDw>^{pwTfmE}>8o`~ zkm9Dzg~24X0-g>Z{#>19+6=*>*wjZ{lHfBxhV?*^DTiTTi~wCoZ(IpCaj(t?RzMhi z*6RxMOl-1bB8+4{1QgHRT}gk4M$2T(Nt6##caue0^{K*;nMRxwbF(5KyRzX5AO`-S)OOJ<*>$!cU8BK+yD#N<`$A#h zwqXbVRlmK4rK^F2g%|ijsDpYu+Nw63)lEX+^vwQgQ|`ozZO`!4!= z1?b(92OOjk^@=F^QzXpH$G$vTPv`!il9u6G zmdKYttWlwTf8ifDD3%!!dc0LWlcVXVo-OWwU>=V|G)`rq1~&WUIO=4cqASyd4#vbh z3Y+C4!L}JCxOOn;Ap*yjZ9l8vXBN2A%5?M1Wr%c%9H^#Z7@HNSMDz0d!*3$a+XgDI z2_{YMMdP0`ABV-MwV!y!wGS&HMnl1g6YSj`g0t9tx1KR>| ziBSeAjqch+mMDL41?2BR>t+?ugO?cvohC&OK5UL7v(%9K+ofJkI=0niFSKIek!y}-a zV{`Uaef4SDRAv@G@}Sm|d2Jy5Y-xd&+$m$CSDhSKmb!Zmj0cyRt8PiyB?4poT8HTF zl;n!qSkh1Qtf49)YvF_<0x7+Y6GZ!XsMnNC<}RK*swgccJakF=if`M6o|t8P+W{L7 z;WXMB9(2KZXB0W-btY%s5__?HL&M{akEXmUzXkly8~k`1*(DTvNZlj9?xjYUD7#k~ z2ilfJru$<|aztVYB0+?;3HU+Yul#=i&CGb3klAvgK#?B~P~j3tV?B{ zWcyBJ?rAkdc~rYuSysZ&k<{z^iSU{9lV1zWPX}z8Lj?B3W3O7meKMHYzQ^VnOZP{9wyvRg+J!8=^b{|_C8jjq^61s? zF}ppln46nfY$#Z4Y0xcw$r&%2t0fL%rQVS)uPPUdWtcIHF>`!ZnQfDY zuT3eff*2*OA7NurN7)1u93!|}N1qY^=g_lknj-3hlA5i<|DqG)-u!%GW;--T1O-AUU8e7=x zjxn9h9R`1a;rH)!@q5MobHxgzV%<}2Mecn-)vQ%zJw z(UAEacq59deN2LhY2ik^`-vb>JZyfKn^|%5Hq9)OyC>-AoHfdrf8fgSdtpGEc##+Y# z+G1!-NFkFzd7F3_jGTn8%%zjb-Dqi!Ahe#j$2_aV&c5cUj^DsNb?5MTt@Za-pYJIo zsznl0X%%U>L45*VMK)Nb$gLpv-Sz>+mS?9Q!mz9p1{;y_HN0xZ6o0!s)Q>L(E~jTg z$jAExR$L+8(%AD*b6Sa`W#8-MVl=l%)&^=3*Ey#PhZJXrzVHRX_NWo5dF&mn*$-Nh zN*b%u$MUYng>dd*CX0q_;70tr>OvSUxu(IdYrb{l(B$Z*n~t{*P2p{B&{%Id$_fW=f&u5S_`ZxG<@Y)NHm@ zg9Bro0;8=Gfz4YKCWmFtTWH+blFI z^C1kABIj8M^5LY(CpuxYbs_yImx5){Ce~C|B6W@2RPpxlIovpS;u3i=RhTSrqSBD zUO(=`##$gaj{jNMg1PRzH~*a8BZTuQ6+ejy7xhmFXdx0!C~I)pua#}8cHu2Cmel2c zh0mtVX~wRe6#kJ`hhG6gmNw1E!@+UA1%hFX-y~5KHBCr;rCdEydErqLYnAW|&Zw&8 z2bxitNl1>o;7>67nt?0MO^Hx%f{ctDXlV81fkZWx+%)8dcOV9nLPAm1{j2wJFY zU4vU)|A~#4pB{V1Y7P_h@bW=7D1-tNQWlg>i=pu;q`$~y;%Kz>iQcwx_AO^D0nKTq5J=24w# ze-)SYfzSr!L!2Ti%DhVGs>x98_?kotg(fq$H4tEK zc16wp^2)ENrz%Y#Fjo8(bc4JvuZhKkrHw#;KJ>>0qKj|uIjZ_fe|2Sz-_AOaMC^o% z$4Xm`9^2&yQ_%vlHZ;MTYnvznFyas90&mT(=KT*4iV)SSWG3`Ya*8hCht{S;c5i>!Au)B*TAH5f;zVO*dMDEz0TZVjW z$?Wpk*0-KAS9XmYrzv>Kv-4?CgW4~1Skgo*U@gonD>o`2bG=nN-3Fn~gIwTqP`~i~f`ijtaOPx%WNFjo&yq|aj z7k>_+Rj0B7oQ_CH{&v>U?)}2WPr3;_ZC@2z3T?Lu(&VDO!>7ktCx@S)5!&d!2Bo#h zk6m-C`nVbEzp5X-M79`&_ZjK+u&j@w58!bg}F}q~x*ieE1a}+IrE*g$#Fuq+G zHb=2xqeq~Rav7%BAGRN8rG$lJD_AIf2S>>8erb{`hP1$nK?R(e!m*D7Ty6#9UQzt; z24RK-;%kuJqY*|KxwUyn2A8tUEcmrQw~Uj9XH2QO-05S|C{h;v>U&j=i7M zFABXdSi{?BTc~2}(|Coau=kFZi3Af8Bw|db;_lz?z^lX(dIzEBcYOTtkt9xZ-0_B6kz+P$f@#P%o=j%>W6G?m zl4W3wb@+>JeO9ftO*8T5H=9gE zC-nb7FTJmclQBzJuz6!qES|evV{R{LAAUQVW4w5~&F(mtvndyrpzeWa^-F63fURt_ zpWR?(8{?2JIYf~9M+psp`E68S>e5G;Axjm;Pf5{cOFwv4N%M?kwdIwB{7J<|tdf0@ z5`c~d$u$FVCwoCCQtsfYVI~vKV2Zmdo&^drjx4T=%QX4jBk17EKuDzloHa^mqW z%l*6@)~$lBm^yuOmbf!XU1d4^Ot3_%1v#+u>-s{RIr6HqaHKJb`cXt+8&iKb>99(i z4cE~vdEzb%an%F8w#ad6-kLgeJtd=dH(}xKOYan83xP$|Hb8}h@JY%{XJhW$KJ0ST zi)j2YHTv>?RgkbltO*opu$008B0;y*n?!2c0^yZN>GB3mWL}6rkg)6Ov(kPjr6}(W zKmG&0rRY1P8txDwDIeRrp?Zd#cynh{0=@0&0{kA0SCgk88F`atQ;FYM2{D94WN3NVmToqa|_L*UStx_Gu%-(tO>>H{A4|c>X!s6 zJDsTIVx3gNdR}qywb>bG!Cd_MbOS!}bpNpgnM2IR1BomyQp^54ietxR=t5AQ)$CLB zCxxxvYfs`7ZmPq?ocHNd00md!5X_~5>|J5Dxo`#Kd{$abvzi!5Op20f7Gfy8{KK-2 zvGd~fsIxNWAm2b@%L3uKct{FPWR_x!qaIf~XYc@G)T`+(vhRDS=?snHTK)-jk|p%Rsu=XY z-x{aMXB_~=VQZdYewTGjRX53SxVSChsgkx2Q^rs}>t&x~1jL7jGhi5fayXaijJLK4 zdLbv3jXjE`GutVhIitr3AvpA=v1UrCObuR!M~y1-sw-Sq2y5B*9g%_zitvI}`SO!OIe61mrROAN%5kEkn$5ZVF5um9W;==@)%7$4Nh(x; z)PaKur^>|Uc&cbTAww&?U``=nh)}+4j7N$)lR;2E5jIg=$1i87^6Vwy$AxT1Tn{}? zSotNS4?k8oeL==$yXcoPeyYC(vtc$8x(B3f+4to!x9#Vz`~)uB&?|}f`0X)* zXB2)qiHagJtzk>PhT7{_gICJ?S`|Tc;$UL%=%~^}z=Gi!kyp7EGS`@G;4)2R`3G9@ z_e<2GFloR5)UwgOowjb6CsyNE)GqhQ6o6R3T9aJCVF3C{8(*48ytVang9F6Jcrb#X zwW)^a$5`u? zeA8+nQq?Olpje)fe0X_VQb|}Da<@D3+-ZGIF>sG4iMtn*-14~QJ$Vs5Sp{_(^aBKB zn)a;xIJ#3w4tg>fl!E@C;=;b7beosvg((vtBy)<2;-ac?du38OB;Hi zvyaau4!9k}uKhmjzk9kU;vjg7g~?YwC+0*xkx=1(3ti28 z6nQl<7LT6jHPw`(5WGl3(uIG4_)tv~B2ht23l;c4nT2#9-KU#u;tvdXJ5R&uEi4iJ zwgwsRXlPzWw;QaVB9+8D;;LA?{cS04_p7FlT6G95jG>Jz?|XhM*)rCOfdmDH>ReEU z`K8>L!q;%=PzZT&?lmrJz<(4Qv|g_*vR{Huw7~Tu%#>R3&myYpNlSLqK&<) zV~04+#+VW@$RW?#*2 zTD(TV7g&J!F35AZYT*bmjGX%&DS0x?5uae5^t-~Q(}lXs{8@X^M4GFneYd~1Kv;A1 zGwL`o91%K+eCncSoDM zBK7Ebh`)3P`i!lG4=NZWqnTK^A1s1HH&b9sW>^jMcnT9j70bdd6w$Nv5b!S2IiDX_ z^4he9oous6WL~|H5;PSYUV>SkxLuH?%$fvz41GSh_gIGjV_XU`eb+d#>HMLYyGj@%>$NFHrECjF^cWvYRtddZNJW* zgkG)eA|Kam@+ptTqmE@flB}@50wS7{F@q)~wy6zUC4SxanrzneH0%1HZChu>#w7lG4+1i8q{L-qvqOwF~8gO>;VAyF-1nYF^7@=afV@?8fRX^!PlpI1^o`}(b;B9C2qG_wsnG2k=9@YRm zfU#XJ!N+)&)mn&O#^4=IhUst9a1k?B5tnFWgEH{&0FSs**SuS?nn0q$25PM;G~|dG z8ZTo`OlH#qlR3}Ixhl82F2oEws`cvbPobVZAo1lrT%Q;nQ0R=lWQ0H!(FEt1f=Re0 z^0YcNEiG`8J1v+c@rB%x1g|+G_hc0Bd(yxIenYZj{L=5CcV7t8S#1a;Y1rgI7l2&7 z)V)sSlQ0KZ$d=3#LA5#iI|0L42T+p%Xc$Y`&l4YpO8A2TFLm+P25{F+dJ>W)gBX4f zN-2s~3lp(U-ehsP@+OcgV4{7`|1OOe!QY1D&OL+P-zigP(U|ejb7)u=%sZ17CxOn! zfuD9uvh1seV7^LN= z{j$MO`*)5yX-)W05@aqjr^L?lf<=)yA8p&gLjSEIn2O1k^G%y0`$ItA&=%fdquS9t z?!yhrpcr<1ML`m>6AU6%PiYc4tPb*9-KUmy5Rw6_#_+(r-8C4I$6koO7Sk z-DG?r_glp!kv9Vt#6<;7DX{SY18;z(x%C7C6MPpV9rfycqw@Vu5Ewzo4vAf!s<%Z!6%TkpoYl<+Wc0 z6otyqq&dN`+a#LPeCj|GjUc#pM}Fgsn$(p9eYp~yE*liezKq*3Ya%q~4*&Tru4MrVe1NGh#8c(Dp|B1mYK1Z646 z@V6o$0TI&Y`U1m@CpvtOks)}tp(!s!tI%ZAv#r~<+N!4Xm zy{OJJUa@)CrRlSx(sopRKE@XrIh!?1#F^E_a$EmW5=wV&%#ug9TpJYFP*Hx=)o$G~1o|z_M2Sg}Y zamk)?=ZV#e7+sL0`p6N#s6yFd9wk1O2EBIQk`t3i^*T9VIq z^x104OpaQg$rOUtRpsm1waM_?xNWva^7&S?C-EcP!t3gIJLD2(p&g#2X-aN?=x>R; zAU+1Yzhaa%fcwNZ+o`s>`tuD*8aT(EzCh!W{Cv%4yom~o8$~&>gmx@DE`xK?Lrp$RIl;oW;~%uA ze5Akl@$u0DM9TEuKw6_H*eUrIL3JY!IfbBVZsPuZk?8atFO0 zU7N&6k*^n(bocu@m{)12WbN-xGPdmS(P6SIg!eresM_N9kdNp~3mwO2JB*Rv`rn2u z5WM|gg(JUU4sf<0#u2!Ryf`|j8Om1$98Jj45(ko%ghb2cy%a`uJmhFfK){Vz(LiRQ z`C$pNi=!An(6gplMKXa8Y}a@nj&Rdjzq(y7-cN!0B9jh?-{}wo!t)28d?uj*D0^;Y zRlSv>&B_wGga?V%H(mm~eTf#j%IxU(dyNt#o+H&)JfR1d)Cp z9mrV1@d`+{emB9ml9Rc2FuP*!T0G|p8;`J_8&V8#xZr6`4#`#<-7x|8q*O|h)kd04 z?_)Pdmuba7g3~A5HQ?yx0Q=;@hayRF}K=g=I^Fa0^C|VU*~34+170dvM!3f zsn2EWrZ(2Tf+VikcU2W%+dQY;_V}j%v_HxJBlE%c%57GU8^2^`{ntsA{XCS+_lTI^ zEf}G=$7_ert&5MMtEFc<&HS4QJjT^}FxXV;7<*uyHv}J@2==k%OEa;#6|cei?i2&x za3vf=OY>Ri&^Kf=w*LBb#N^weT;CSLv=T7B*@3aA2 zMX$dStX~Ox5}XN2-rSpZo6GG(M5a>M_?-DYf8!1%)u&LY%#T&^a3`)Gme!tNx|ZNp z$J|eE=fvPq*Z^d@)wqvdqTlj0=eG?|H{L{I|K+xKajyK+bzG&!zQ`X@T)nMPG?fH4%WvEm+H^WV6jKs1O!bhQ zI>tsh#jH?~hNeF?eRU<*OqZ#4zO*rB{aP$a1|8#`96K zd$cI+mjlbQyz$AHW!V;(9VD0S#otTS*Q6GKQkto0EBZQ@wIqTp5?Z0ilnx;~Fayro z`30%vL3a*j0;T}8n1CR`rMoh;1loL=U@AKWva;H9Yf7g`b?sYd7Tq1E{e{&pyY+wl zggx43F1rDvn}|ezs2wQoF(TA@w@!o0%qW)5PUUq0aacZZnmK-I(7W1%5wF`)Ts5(Q zYO>+_+HaR7J=)bA`nt#rB@(Ab=965ktFn7k2CtHJzr*qcu}eH_6dx%Q00r(cpsgE$ z=@K_F6Py9bW__2EyxD&V@Ok(|w^{9BqNJRW+>CZQ#tN5?$GL69AM>F~P z<%fe8<_#-eYNhq^Uf(B%eoa498FrXH;|}y?)ly@IQNrOBPXP##;J17cP8QX=xTZ=> zIgQh<)CpF&H(k&1?((F($_gQs3@&xV7l|bH_mfV%o#%<&cadQ} zQo;5Pl&{lSFKiY|;gEWwk#PP2A0X%AW9m|25LdJ*B9I>D!p&OVEHl5bS=J+F>(no? zW&E07L5R_XJbK=4U$?S;yvB@1kIcenyyS9{sgUlATO&U}prkJlQ-OQ&U6h)mXIt2U z>ZVKIR8Zz%8eSB|B}kI!CWD&WLsAveLE^;5qK9*edI5T&1Q%V8;)RjhjZnb4UGPe)=^ZKi;&BCAE(Ev^7h}JcVUTm(kHBr%{JE1urW&bZefyH%yojm~ zW9;Zxj%{_ni_mdlOSRV24PX+l?3}S02D(PEhjnh{Nl6VHCq=tN-{@?t_(vM@r|=3Z zK{K%<6wecA?rPS*y&kb~kw#g9VeWc`5cK5=s~*L8T&*4ag$RL-l@|Ig!o(Hk}rNN4-ohF~mK%%4{~8d31;;d7O+cbS}$>jP>)+3$gh)6~c1_nR2+Pd}yEA z)n<5fY_7Avy7k(@)hn#g*5tBd|Ju|*s}hOAmP~zoXy0{Aa4v^yJ+%Dv@t+!#-C=?l z$5HjaGVIMs>j*TNTJY>M+PXA0(8OGu3Vy0lfIEI%lq$2#AJxKdM2+T}6YT4AIYs}x zb+?XJ z09jWo3Uecc&arKQws4W{gX{F3I%6VeO-k*x7=k+bV9ZZ;r^xf!je?vNgWM8Dk#Nvu zCDkeVX@zg4C4S}zP$mMvk$Q8N4HQJH6e)>t`Ph^kO5V2FdfF7os!6{?nievMM4OHs|_ zX5=DS*$O%AMsiL6SSn~dU}zPM;@Of!l(iH)t|%Fv$522MHej~lz3RK)_ZS4ssmL+U z5Pk)&T1TA7bm}lX*LfEe>I%8%ZnWY{(m7M+;63Go3bdWAm&?&Zl^GEZES05}y1?|j z2Ab$I^px-^Sh+@aPxh*4U&O-b@0OX6c{RmrFK$_)f@;+ojs*Sf8qR{oHI<#ub-ff( z1Sv^p6fV$5M9#CB=!ElAmStR9`&1yFwCbSUe1On|wLq);UEDFm<4JJl)dNh|t5`K|@!INfocjF7-#}{^8B`0pspnNW8z42?#1x!A_tVQ7 z1q9|BI0?f0@LJc|yZi;6TaQjx(uZ$C6l}gE6HLy;o&2~~{W8UHm1Q=T4`$dT zB39Q83gHSLv)h45?X%63UO!VPSWJN)1!Xd{koy@`8j;{V1_LsVMKqX@ z*{P+ovNvBkO5b0R8;CzsRpZS`DugH=q|VZ$BXOsBio=)%x0$|DIJVjt`AkvWMZv_x z+O&VAOsbukJdm>gzqmN3Em0I0!Io{?wr$(CZQHhO+qP}nb;`D9*33)y{fHzx$xgsz zS$dq`Qr`zhE_~WUDeXfgSaf4VamDlk!4SFcC>8}K7g(Zo9_i7_mkoZQz6ucT( zCs-$n5w@%cL(0tb!++Z_GtF^ue|`mHInP@UE7&Wze{C{u2?8Ai$lIoq3|$m&GmABh zdgQPbNGqwcS&6~U_f>R158XSq`HW=|#X-i@g1i~n z3Gya1I{!voX#)-kf1u$MW9OFbmwDcl6M+10EJovRLzHzOzsJq6Q(>WQGmOtCjU8}! zgct|f%UeUuMa_ump6~xsfq#6d20<4mt5FBgMd>-G8-_{YqAa0^M2>vS0XRlF0;oYK zj2ye?Ku}W-IIW}XrX)>Bb3OXx&x)*zX!%OR9K>4_fIfos5|ky3jKyIb zAcvDV?ZB0vH9K&jqpVr)!D*-Zm70v?=~CmO&YMn>#DKSUK=1oEJ6Or=`b`7Ih?IGW z-S+uTN47G%F-D#qhLRhkklsg{d~`T^TE2|Z!zl@6j4?JoCz_}w{nZ+>uM|01>Tp)6 z!(vZg)LPpw<*pf~g#KESjrb27i*`H%C#b^%M983SeO&9zd4&X#Zt}(%C$J6>_kGM3 zdLQ{GAaOv=-t}ar@i^Q6%_$#eUA9K~#IaxjDEu84=gfMmeNm=(;9+W=TD!#Xdp8 zckm}rx+4#D5^EE{H!mCjFB5)MOTZ9+bKWXnL)nyr=5Wsg->@v10S^xu;il)v_C@}AAplG|HWE0BWOC({?1aFL}t8|?CpC) zCs)4W%!t_AFRhWEMiP4zL1%1;M?3RqL3LYEmu_1RE*_JPeA00(Q#T?Zv2+(^cngOC ztMjJ_LpEy6EUm|nkY#pS4~sZV5ajUgK(v)AHVjxDP+1L-bWg8oFpc`>&Ae`m0RdAg zRd3Vi!u_mo zW#8C*1Ett$bnBGtG<)KVlN6BpKFmrC+==1O{8%bWw`iu$0nO#s7!RbuwL&7FCXn^t zKeoQKfdJ~sOA!lZdSrF=T6v@Lz5r%YYV#DYz)@`luT2)ea%-&bhq)`j(!;yVftyhk zGx(7|sC}Z>niag(FTTFjq#pjHXw}BnBJ85bw4w;Fnx;mLk8&EJF6Bm%=`OLO_Vt<^ z6{DSj!#?R2XwSAMAC$oEsx-U+US9kJD0UGw4zlop9y;Ud3 z=ZZw|inuRuR@D!o!|PHIl^1-610lEqR!KeC!J#iDOZnMzz!k?^^{ti~&_vaCu`&)^tK;FrULp(?Jwuf#uP5VOCi8XwU9mJ8{Dp&?zVXtyzfSTJD~Hz^cXexE za2>{lZW|n7bhB zG^s_(*}DM_<|!ex#D5}K=uSJ}KO3FjzfZa=E2~qRDqLLKRh4l^gW(wa?(0>QEzCKd zc^Q(0_jM7zDKFamwhcj`4v{gZbfSwO@S#`2nLdr5{LbZ%*?MFN-eI&SX~eAxd&Iisv*=bywB6{itE-uvC%veAuR&e7-Y!_#FiNu4QEBBu!p_6&9+blI-FN z%aORFq7UD75XdPqTk=ykO&ym%{2XSKrz_MYqr)^-CXXvUqtd!PA)Hc&N_RtITP@CS z5Aj&g%tJW%Hl0Ms=o66w`8@;?bU|=%5_BWq-Lt636>aY_PYK?-5ZCs%SPyk*bQ6#- zzqnbplqt1-0x5P4e0iHYJGy{lyM#im63j=&##0xy1@@uO}9AyR2)EqlCtt5TX zC?f_(WRDy?=+sj%%Py*!?IlTo$n0ksS!vrdtp0FqZ@fV(g&!h9{7SKk*6!^CP6k8^ z9~1+B+K9u@y`ljl3#9mje~dh4zcCE%$Uhkx!>#hJ1ma&M(l<^JJ;00dM$p+CFbqEq zsu$ITTFw44{IJwQwkdB}+Z1p*!vUN}NtsdkuU&je#JLI~2C$gw%&0S4AEpj{MQF^! zr-g%E^S{N3F{Znh`r^qOeMG@_wg*;fL#(J$7^oJ70JNGRbQ;;77v4etb=8>FU(^3i z!}_9IG@_VbBzXN1b>cIF2}0gevdQf*Da0ANuG!ASv6zE22Y2U@{-mF_0m%!gQ3+S6xfCC{WP{kK(%dU z4(p$E_PSTvTD+~rpA1ukQn)=RwTQ&3@*>VZ!mH(s`fes|`!)uDuQkDe?_b%Y)&^u7 zrc>a?X$*nePl>2lO~CP$T!f#&yr;nop`c%Dq$I(^1_YcJc~&fz2H}07mCXRbz~wFL zd>CwSiS;hf%%-AQc{(m(H`A5K^iog!(0bgDOMb(G9j$h~)$v|fUw70dS#sD&s%;-f z$tn0eEN=FkyuDi72Ot4iLNbrPLIm#rP*{*HsirPwAMq+h{QyM`p>E}BV$tFb>dZ@;_A$@Az)|e}<_kQeKee35Im%ulJu55Cc zc-~B}o_dM(7heo;tEMJ`L4wP@cOuhuzVy+JL}V0-!PuBdN6Qy+hP+jz#QIyvoB*D| zpycq0R1xXnH+Usz_WfQWJOF!fsay^TYhW5;vw_mGom2?3%^TEE0>nW9rhyO}RZ6cI zFyZ%6zy+6vX^&`+7su%q!Oj?2J6}oAbAuZQnL=Tv-BCDJwTn_B4*_kTa7$Yg(Z!Nl z)fXQ^7LLD=Sy(cJ#!0NQH+!ai+2A{TqKc~YP%DCeo(H7xKHm7}L1Y(`fVI4_!(B;D zD1gGmcydT2%Eu6YVpCr93m>soq}1>~hM=@6*Y7myD!SIwo0pGH?@;DRTjy+^@FdGt zI*4Q(uEkVZ-lvMUhADI~x8^7C^z!xK7s7WTZH^QR09LvRU3Kcr59zgkH?__|2@F%G z+U{0JYmI5>qB4q`O08*ddI%E$1aso2X|J1WvM=JypqyjYoOie}G&`HkcLZqX2xZ?V z@S09nP$EP`d9sW&n8AfbYcfW*l?ZPi!Z0Eu9nKRgb(98~fyLxXI~Y>H#tI~AN#8`~ z{2AM7Gv4GE*oCpS>yoy6*XE$*xvbQg%BP`#+pIh+@p7^pXCbeF%VcB3Xye5C*((?E zp#Mbnm^ddneLPL{4w82v;UiM$rvtL|GeDCI3$7kc37i5EtGO-=z@1uT(6Oan%9NIs z1IMuwt(IV+!x|I+w1ML_U~m}{XDrse-i)intUZL?p_EOTX4*S>X3ebm-f0BaJ=(CQ zsum716|ve;k*^;X_|+wY4mf(wIbw&pDk-v&w?loPEJ&KRK=DC}+90x&xa%K;UI(YA z{4bLE%=cuYYV5gEh{c&f4G=tg)-QwnBRk6?D6>|F4$uG)IXl2lw#Fn@XGGYIx)=v6Gu^)3%|xWD9bVY3+|1i{voCbHm*6aqoD}i0PThQ z_>D~1+AsSt`nRZ_LnZ&#+JKyqhZCTwA)3i9KXCAmD5w1{k6WnZo7o=z;xG_H=?4^N zN^4iisd9f2X4r6dHljW>+7W!sau$ggFCWyJwB}kgS?8qQxCdBaI>V&DNMemP6vc`I z=Dw)z+l0h$0}V(dafzmVsnL*4J{2Ig`EJ|J@&TZuhf8VUA~<`Mc2-u25{*UA!Je9u z!ayKtqI!JOHRojHSWK5(?hBgzyHrvmD3Vs?B0sG7R3Ny$fl9#ra-40>aI_T95t3&4 zebMIK2AelGVW`eBc#MZQXot5W;($gdNXd@j@&qp@ZN(X&mi-Lu$SIcJN4Q#2RqxQd z^@M8=C%yImA_JM$1y|Z-pNCBTkFBaA@jASw=(@6547G+`J23`(@!M%A$o^roLL3W3 zWbplqn7?8w68azeniTcK>XU*r;<7yno>>4|GEcS4cWW1l%D=);yLp9GGMBY9uP2It zq@@OeFuf{PWq@RIaUuA5vS6=JZTmTiw+!udmC!=H&CSFl+JX6=$-FQ!mn6E{G*j5#c@>iu5f)f4 zTUxA8soyx9a>Rzs)?YTqgWdS_6kk2yEnQr2qYRS^`^mz{KVDM&7!DR~jGTVYYLWUJ z;}}7zX&(9BfI)3`4MgI@dZi?pd}W2({@YvC`aGT!JZbVQ z*DZ0P5qu>(gR|}8fZah2V)KSP z2&HHnPw_IDotV%DltZz8DB|%I06j-=?2K~A{Q?M6w*!GJxbM=&!G>S@hK9->5qIFc zbu=d|zjwDFTV8SPZF{D(7*e=mWSB}2c=a#z`))crX{pId@sxH-SVW55Ie;p?L4``^ z$Gik_O06ET$g@%X2nWrMZ+jzWjsp5@`jo!@JhjPh3aqYh2tQmb`{Ta1-8Bf$QH?UW zio3zE?^i&T%wh8#uihR+N{6b-X6#qC50#7y8gWK`d!yu;J4U*N&`d%#1@VmsU>h06g_{?k6!Wlk!VAxic&#^$pE9ai6qI2tRWoAH4=PzM4 zBeRhXwH`P#mV~ob_>DK9WKJ%}ONH5_vdNX-QYQ@Tty;%7km6CEV({uMCC=~O$5V24HtA)K zJdQ25933k9QkfF-kiLXMM*;+j1I?X=ertIewl7WInlet2v0-+$&)@&e`C7IHCH?sNv@BBNS5E+1FhU^WVS|xU@v-nmx-nu&!IU5DMS|0u7 zB8U@r1CfcLQd_N`kagJjd61RfS#Yh4F9UJujNnjMoyQ<#15edH3_^CwwuPV+I>mxZ zZWvZ0r|^+Qyp?>Z#ND~xKLR}SkBi$;wqWg!yJ?J@Lcl31rE(>}LWqy@lgTO{+zRmS zR0kK#f9=H#iE=EGPnaRSN7_htsT@r^W2xsljg+A4-9e-PhNCx1UJYI~&@jtwBx}6J zIvPa^pB)^`o!E}YXP^{B#^J+oF+#jp&lv= z22cxSW8KGQQuy`3()a8Pc46{LV677wOZpF1rLDCDdeEojD&SNaH!*$2gL^GL`W1;Q zYeQF&ytnaoLc#B6P^0(~Wr@$upCeHXqIRRAU&$3}9x6uWvA=QC7j8sxKFom~n`^)t zRlv2o2M$=I;J)AT&^i}`8yBG;Yb$`;xG~?MDeEAyj?wEEL^A&@gU*zmLGq`WW3@`g zzZKx&My{{#TFt|w3`R~|mT4Y+o) zHjC}LrQu5os6y0KqqZds0TNGD?!tzB5w@hYMKmHsHbLFA-GESSUm)pp*;J?p*#pv zu?}^s)mF6O9r}pR`I+AlavbroVHDq(dMUnzpm_Mtl8msG4i|(?jBl7p=8w194;#yg z99&21O*t%S-J0r4?_%=%Ie}_Io1T$2!A>#2lE?S;Fd`efF`lx!x6_i8_AH{H)2yv& z!Z6B=@NopSV_(|V(}AbxG9d)e9fn9j`V*&`>KgE+5|roz*2lo*AV#=SB6add;ev^cJ5E--Q+64x*ib@|`SAF9Jqi8gmDUW8$DzfM8 z;$ZlcV7jHeCNzrb+0k-(^VbDYS_!(hA`QLQtn8)D@np?gT!SQm^?czZ<#IEamI=?6 z$@nWTz3%UM*6R#}lCe6d6kx4~3rCL31}|&?tl~c2$3t$j343V1#VV}*U!VJ2Dak~$ z|H_=U&0@U;WF~@)ZVy~R-Qm>KqWzLaF`)H#2ZRc|G7-=55X>~SxIysxsoH{qSG@}- zS2MCsceNtN+DUaW=ueXOWmt3jbKSu`dK>|g%!6AwFP@kG+Xv$TAxT1?{wGozEmMKt z>hS1lm$}NV&jsB1aIZ`o*+mXOH#+7C%iooPUuis#jkcg26O`vzRX*Gc(OMYFUDTnp z#EYE=|JGt#Tp)N?U{0kzN7>fE*B`^7 zklv}sJp79H;c)OqI~u{05TJ0-BaVi#ejJH1{fWT(zvs7wg9q%qQX=t$t?N#{>Rx)X zSQY=`7RB-3+H*1X%4WRrNp4E;*Vl`D!tPnQF&ZQO#eY;Vt(1}mJmBPY+)<`mcDrbi zuTz&Ri7e*?+EY@%^J}-f1YtcG|Hi)j!$dL|ZfyB_@tUUzMHrpm9?YNCesdf-XrqiU zfyP9uQwjnc$cB4=j)jBs>k#5uMEgi|uZBc$9}1~#sMQw3F2c<}IsK z29$D^p_k=b$gZtdA^&XMrRm?fThHR&0!tJwv;3pMW3|_Cem(*8bm9v5s1Yi@kj3ZC z!~MmJQM`YyY%EC2s5C?QVETw)|461TR~WAQb+X@RPpk)r&?dIO=NwVp8C|OV zvIdKpqwGDPcNs4b>l0?g_5q)wLVoa$>);p;Nuvs=Cg>G>eBbA@LV zLz0P6*KNfp>}I0dpQw2RyR$O1stP9d^j56g_H?d8=?o|zRyPR1F40KA_#<{ija&mup_2Q%@=lBpy2Bd%q{}KU_k)r{*IEKn8r1!_< zjZEhK9i!N99k@*Xd8C?I2v-%Tk_?9IrRasTuU5}U0Xef9@W8vwV+I0%_=K&4{pF>2 zVGcz8rFA;t_!gGgGhzEaT`L2ops)Q?W+F>qJ8m#siZS%V7eyQTgrhQF zJp{Vt=+Qg#IblVRw2&U7$h3x@ZloBQiTki%BlXpcbFW4OLKp0B8W3gjt;{1oEjj!%p+T;mP%L;GmLZzj3X`ZYyTPBXyk`hNmpc6l4 zs?l6tU**&?HhAJiR>HLU#mO(C+KsRS!Xto7riKb}7tz1eIhIAGj%sQFdV?l?;cm_& zW;1%%nOHhIF&vZrG`y~0+pU-Sd@_j}qV6e;k%H3PK+ zUmSI_hcoV#As7J|IAiSQqaNFkwqZrifPF4Do8*8hRdggU9ip9chLg&1)H_N@0XQ3A zb%#$~4at=!Wv*|TQ9oKHYYF_6ksvJ`eQ%DX`JeMV0@X$M1Ak)tL;U|}hUZ>{)vIA1 zBG3Tu2w2gF*NuEcWBeA=^9SAgSqsMJqP(BXf~j7_s-17a1`AI6(DQiVP9Un043)S} zOI++H^s;dSeKLY z6-~kIY^ZMNy>A_yJ%RUZ1Qsu||7li`8HSlzd>g->De+l;Bica<$}#1_oH%xq#$0dV z%zc!l3yL@kjXPV8JbAI&bhpy#^B(p% zN^n$*u+c*i-J)8+rPGc#`HT7L5FW0zpx_q1Kiu6Dlerjpoe$FeQh3E=%8qAuZ=22G zU*NRZ(=-TB^e2Z*Zm57H70VS>Al6^C?=u>1&?;{# zFQA^hW4z7rJb6&q*H=e1xS2!ya|57CK(rOswtq8OtUlhII#>~w}NYk*dh<3XR z<(Q*D;lSqa5yo@mmPX7r*&~I}}oSx=mV?!}k>EOC;?o&(M|z z^pHDFVZ9e377alcF6kpag;xNf`)H2LU2AI}m76h5U%B16d$*;OjMs&HkF{Fq;81rq zaeNE~mp9CAUK*f2uY{|~`u8WS+f@1#^VOj+oWbrD&7D`Gg78v*cFDR`w;EV`Y9BHx zivg`I$YcG+LbEO8%pWz0zt{WmH6=gFsAOgp7VrQZ=|QF4bX}fB2;W|P zwr%#Mbd~oA(RgCSjVMw^jt@-06y`Y`DCkHMrX^iGAl=K!1l8}rmz}Ej}ePtnly>HfKw0k{a>quNgT5KvjnC4i2O;ka`{kE8V8AP8Ar zDfr5QIB$mQ_aG*wc|=o6a2%Whi@tAX&y+K^^h1>9RfPRv70Qk^&b|K%*e6lA1rktV zUL7ch(+3r>5Z73lLGxKabzPDBs&+RKq)WS*^AsB zDBQUn0E9CePeJ8svMfeDWMSusC*4Ja<}2--EZITf9J4>Pn z|Bad8!o-=bQ+*+93vsiYi1Gq$Cc)lx6|9t2yP9$kK9UW#cuI?-_j~kLrKq3R4S{)x*z;Aa=uVR`(7WD@WWxi>i^Ut@#xJ6xexw37F~% zZuYaS1I@am?^_L0%%E2rCl?!?$F9S1)d$1bO*@WPBC_^=6-MI88^NRs1>T!Wan z3$~#A00I4&->D=LnDoVjlh#c8vZsG>@Ol=c8Q?T6Z<5ZxyOY1k`f7KI7m1^!rS)&( zf%l@v%a7D&Nc3Z2uBZy*AxpQS5Z8X+@CC}n+|zZ?V@|9Q3Z5yLn1~Vh3eiwA!z50p z6mUmNN!8wf7mid{zUreB=Udk(Zy}v9`tuAgf~t|s%g#}LPKuADK@;Iw-u6YAA>G>> z)i$^+TfXX@rQn-Y>|46KZ?$tJ)Igl@0mh#3-dOZjYYnC}tbLi+p8k^Q zc?6;8Wja{~l6m%mS05D&ytUDLw}BR52N3VP#r;=2v@saHAH@mN&}RZ@W2~8ax2K(c>utNXmMEPK=QL z$5!QNo`Sn?djw9k^-2-CIql!4(sc89t-%mcM|l&7O66Ofl0j&TZ1Xc&^czm$6HZWn zoAGf=D=*E(QMCKAww+8?+Cb4*E7_%I`1yhZZU|F(5Cf&tksUaPn~1qv9U+p>{tTkD z`s8$bULiR@-F8mDRa7~Vt$c^c+hD+Ae}d(bThhuA@PMY`K%Y*a>}|AWd1h9<3La#o zhIQSf8h02M0?`W$m4E_kQPBYecL$lGN=%}f!lsGAQ#bd9YXsTFaJRUEgYp^Xzcj$Ot?vLF3 z#}tlN+=`x88EoKsYJnNhKK(w%OD{IbYj+9Ont3)h;dKsnV5-kk0Uw>5Kk3%~1 z9^-50mXNbQLcbl*T4YI~?jjf^6_7FZx(@Okjy=MPM!Dc#r$3l!PqdP))eVZEywZUM z%4=+uOQJnf<j61j3-4Ig+fN?a|A2=~ zkW(;u>Ika$GJOtnS&9%7&ayER3brvyx&H1_epskS;)XlQ$OY)B_Lw0OjWJ9@!JFVE z*9{-~pv=33cmJY{0qs4<7yeD?lE0}WUdvJE=_t28@;qS~YQ`sE9UxroyIkWZ=f-aj zT=hBUO+qEiO-54d9{b!;MPd%`l|^KFRpUh4Y5V|XQrexWgZottWzMDXbu4Q`ciydm zm|Rz%OZ6=(44P3P8@FmrM?c{FYLgW{TGVieK0t=H08~>n178PEgiG)hg`CCMc-!VLVsL38p? z%Oc~m`yJ7;2S}aK-x6#DQyZP(^;-(RP<_j#xOGo|_krKvUFBXd5-}{SZjRNQ+kVnx z<99n^EvYkWT^b+-`(Q4SJ}zpl)&mZgX@7xegSPP;h$YLKSFw4j{^IQe0p-k;l8nKa zUlt}A4Lx43?WZ;Yh9ok9oDl?l!Ql*vX{7|1cHQmy;oC>Q(4pA-6mVAvBQ}Q4#H_Wa zstRxiwzEj0n4`cxJO)-+cd{R!u{Q~^ZT&%gdb}q7AmFQVe8j(ev~X?>&fy@21KN-T zZ8|J1w&ug1MygKDi%Bt6O1z6uG5qh8+4C&Pp%_|YE!q)cl(rRMp#SxIY=+9NPUR1t zwCdzL881yW<$z5Jr=(fogUxN$3%V}=lGo+idE^{@A}Q+5n@s|U0zPVXdrxQOzC0y< ziNE9&@L^*&`M{{j(lw_XV(s%-yREjd&+zs&zv!Pv4Z2_$rUSpg2GgEApwsAfteKskeMefpn|N;B z|3@WOVHqxz1*9TMoyCZv=n4y_F9Khv`a4G)dr2g;+)2C<9WvsP0u~YzKU6;R<`ZBP zAwpJ<s@0gL;tJD+&{b=mlOxLr}Iw7Z#HmLUYQPq~y!U)8wg(rDn z{5y?V2rz3lv2j(@1;2si9MhzP+)h`U#mZw$hTT_J|LlZ!{AUvo;i02|=lC$P%}@X( z7DuF?xBwXc8ACFg*U8TMps};UrIWJ#2)|JTD@d7pSFUMdE>k3p4#akn6(0_Y{f zjWDfyyGFyZ_@5!A4jdj?pKeEKpU43SqkiOw1EKwFMMf>{_iD2_k>6jIG)XE^0H9-k zR3O)yrJr&MKU+q>20_DQ63qPC42Xq0Q?)hyQe*s`{C;4pci<2 zYe!_tS^jgR>9t4s{Q^h-W4jQpaX9Extg8sD1lZF+TNw6)&h1F%9SfAeefc`5A)^Rukt2koE;$WZlqa$ewxS|% zeRI7_YaKOv6>L0Qi+oov=-nEL$}y;hH>u3pJsuiHg)ST$h(|XcxC(nnq#VMa$S5j7%5YOfKvmXyTMPZaa_+UTj*PDc6Kym8{^CE+RVcw;sMI4uCTfO zKmW-ZOGt=(| z`p6cdO<3x4M$47=pg4o-`<|6XhU0NboZMVi zW(l?9R<&-~El0-j0%-u?I4MOK)F~ok@2Yb~(Ib0)$DrP=La7&<_}&9VfE0n6Hq+Fk zj0_pSjhA!Y>hHwKQ|Ic*8B(EH8-(1R?g+$`p_GgwPFZuqnwvBC#|6ZFmipsf%AX-n z8RR6sR-j@tzE356J)e9h=W$S;RnQpj1u`D%$eut&mB0I77x2CYr?3aUq`o2jnn+D3 zWX0J@r@W^-DjBEf37K@l>vXIeVj1e(?fqm``*I5%L)?8`+d2rn+lD2JN_6P!bN^M; zuO5p3xEY*T(0M_ublxS8EVlbw5vzO|zl00e-m435fTk4-qiTR3nXO0_U`P&a8*r=$ zCqd0-$Ir`hYU>~0_mW6ES1ltQ1W=yBNLf+#jXJ$SQF?DzgO_Bnk_FU0%Fs6qQL|oH zJKMoa0*Sa^BoT%ilst#(NzVtbVv|7o4c|046^pi6w00%~Vz}+KZ%i zGi7K@12XYTVL1h1Yd&3`j9o5BE5A&@sT|4@*DZ@x(w6OOim=6-sM3^&c=JgYv=m5c zG5{l>mi!b9LwUzxLe=exEc@3UF;$zUj^wn`+EsOsV~m=C+j*LVTiC z#l+P%$bwEF_u`O#AhZCQ))9EH?CR=E27I*up*R!zjX2ey@YQH3)yH_H)`cz90N|3q z3$+!QLn`h7@%hz`vYIC{a1@GXsdi?d zEwBzb;R6?Q#|l%<_rU2^w2oPaG|Jt)T}4WM*^ZxagjODB z!_h6~dwYvqB<0jMG`iICN?9knghW*92Tr)z)R;%vDcBAR4}x|jlh&9{)N9(>%m7g^ z^5?La<}vjt&K~VL=OZzH&(=_MbE(B%NYDzK%E3Cc3Uyrw$uR*}997 zE&Nbv67MU$6c$epf?;`FN4KoxcdC`HDf^s~ z#`aS94+y7?>*J>uiW~*T@i37+)(3F9UxQT)EBnn?cpBHKWALubn$dLhkC?%}IG@3( zeDI0ckeE2Ix=s<~sQXzF6S1%!D117{a|um`L^RLIr=-#H0S;8#fVb{l#328Y2YZy* zL~aqWM$t*Cl3%j>>*Yf?4?nlk=PZ}dcda7R0 zgOuGST}#4qU>-jLpHEj97K^k+R!dHs{5w^}6UKrurCX@=_cpJ03%uwRy8Gl$im(y3g-RCWVgOfB!!c3 z%F)hx#0i|W-}R$y=O-e?wTRk>>}YIA-7YXmg&i$l;4rQAK$nNn=rJ^ZZ?0GxtFh#J z9^-_Ts&4X>rN&_L_>Gd`b~I{eRY65Eb zan>4UnXVYg6}bYqRuOcr*v3)Kri(S5M-hH-|0jvCIC&=a8GaxYy%HF+HB62u0eIG< z>L}Hzx&=P$@(8EFmeub{jUb$G0^o&g>hy28<@8xql}ko#gVdNS%>yCOB~5bNwveWAf1+^g;qel39;6lQKN^Ha=&Sgfy9 z1^O*9ik3X&Ts;NDkAGj3tsvT&KFzZ)(YdLa-ZP#~ zeL&5wcszks#CD1ocY}8nB4XmwTIwOv=YXhRU^@B-u|_HTQ-two)Wjp9)BT8Y|IQky z=5=)LFKd4>2L62i2!jA+MX>r)x(zJoc{g*{b1Q{J+ya@3Fsup5lJ8R`c-!}?Sw6XV zM&tC~3E%(az)*ABAZ3_(r;7!NRg?BU(#N3q3GS*fp)=g&z>^70%fZ#5LcT`v4{fda zJ2a}xuw|S>yE#TFUzsklgPNAVIF85`opHccJMj=&lFR`f3Ip- z9`bdBI;p0)>+1x*)ZcfFLk#Nc?W_9ZTd_A^Yc+)QUysi-kW&>5GEiopk`A&$y}O?y zaoKof5ARB>eoK$CVz6x|++l0qdw#sBVs$u&naf$c<$4W?ACVMW={4})H%lZ>QJ)?; zM3>DUDOmsI!Y@w2A(>d}z?v;~NdD}^3hwhszcp9)wN&9jvmgHwTtWX4essj_ zso=x6UuexNcds1Ey|T}quL=?lipf>j;>6kUL&eC3`BXG+6e+jKX_hg!DnTvlDc$LY zLNM^A*x=VL%g&X5CXuU7 zGK_vg9d{Qf^!l7xGBu8}em<_czo zVtv;z^_AA0HB`?!R3GWih_Pyle9uwEjsyly@>BhmTd_C#qOOfc!CgN)@Eyyof~7ol z)ZN(p6wI$EO*R6O+rM%fK98g*{d>$M?TK{&l-Tu1sWy zia>E6r=abLZqG{~L!eAJFz*;=K#WVmPib>4P;T~{p9(kY&rZZQHhO+jgC@ZQHhO+qP}nJ@e8rbAKWua_8DB&G1?6nuSnTPsOeo{b|Ha z@oSV8S;FYKOdV%GD}7Jp{L|FB^*Ps4Z{Jxi38C+&m#(7Ck!#i|$ZIXekac0Cr? zM=Ktq1(E)`7Ul|fQk`faa<2JFj>f*(3Q_B7`Pn;{sLs%%GwlkXg!Z*^BPo?)<#ND* z%(2flzUj+314ge6N6_5vCW@5-bDkrs$w(|WZ`9yB@?)g09f5#FG^dpe~H4hGylhV^1$dhAe zK7;X@7SSoy)K*0&=zoUL6PG-5HnlFnOV6ravDHKQp~9$7|G{hw_P?F{-r^Wbk2COx zCt=bNwa@XFS~7#mO`t4CG=tBT+de1%YYU}`aK4Hj05D*YaN7E&DWu7J9)$hG&6^K7 zX1svCGW1<3dM&`-_4QucoKSh(%#oYv1__`71E!K|2;_i^c97Si%P5tT=DwbVMqyr8CF_8C`SB5UE0t=I@It+Jg&a9*Uk(UsC{g|pg-1=vZF2ofT(swlZV%c-|9;mKj)IFwEbz06oY#ga{Mv8lGqU{38r~f{I2iUP4$;$N z@2X6U`6FgbyMDXQUTNQ8)~QRiy&v4mX;0g}DPZLUY@8xBH>>Z)_s5zgB_NJ5j@4-t zLp$mBCi3$zMv0hP1$0JfLhCz_+52yX^MiWI@m6QDcv^7h92yClwewef!nb}M8#@(X zH$*5HbL|Kp-oPx+`2)^+wC=tKVjp$c9-SGrSP;19Mf?`G_8>fzZb)C}!-*lA%W34B8SE$iNiRz)f}_4#^VpIJEZnBM2k)&?>XSL*gtMg6q3$}I<~2M@Dlx-QOMN>W7u zr*{^i-!+)~5Pvbu^XTuZ5oB~F_v~=%-c)$w1Vk0II{-zVPBcUsm<9KIL%t`)0JMSL z4r1N7Z(QSPw^UV!yxn#%R1aB(t?_$$zE$sG*$VvxLrl!CNe)6L(nXH36(xD%ng4p$ zvC3w+7=&}GCOISsi`NSF)6ruQ(vXw5dxQgXYnzx_674?{U#rq4KBP7`#VY+&Z1_%| zW3g{GwAKXlZZs-~-F2l3(A{lGlII|9mNxhGdXUMgcxI+GI7I7_E0a$<>Y%H*)3JQrI5;G3WPCs4h?{jeR39}P+x6@Jd_|#fh z&`4m_&glCDg8aJ3+7VQ(ac3sRhxFOzOmiwAT1?jM)Bw|OI3jk5DTXzJvW!&3E+oJ4 zvlgs-%2f~!0XWbS1uAGG~x;G6!khpmwZl3hsGh{$xy9j`0fLH;sEtEp(wjT z0=J*-`}GQPcCb`A$(1z7Tu6}WWV>(LlCDrPKxu)wv9}NS>>hx_%aaJpIMRAQsZN$q zlyf6-JJx>8mfgCQzn4cbPt=l-x_WN1;UZyxzFSLA_+{NV^Z3`)-30$4i)-eRf z4{jNhjVpzkNucw80^2Ji0`?|)l8Q%V>1FQSlq|wkhF77*J+^-+h?Z1NUqHrFMUL5y zTizdJjinr7s?3iD2=xsXGUrShotn-`Sbtl-lwVMWZY4dDY8_!<1ki)elnn^l3gU6! zOma{!h|qemH*kuAuvp&4I7fk}OY|M%~{K`m0AObMGXPttpac$B2U|=&)cRRnvjk$GBYxqOa%z7ydX07}CCz z$%(Z+!uYlKpNCr^?GVE3X74lzr@vxaZK<`g-VPf^#|VpA!<&)d8|&@pwK&I&2z z(Z2*aC~1_?PQJl*{Ntq!N1Q8e%Epl?52j3T)S$bjbAAQn&+&Ct|fy-RbG`(e`2#*KLs!fMHM7Ak{+nCNy|Qr#$Nq1vgS|F70Z3h%VI!;u_E`G z+$6`xQ-5NL$@FF5RiHtGz9@0l_UD4O-WO#0uHBQuV9tC`+8X0QPes|EE>!K1OvLXu?j(m|C|vT z!--SO$Lu)k%@wh79Fr6L+pq8>A&9g4i&63P2Y|E?Xz4(>E8a(h*i%`OS(8CjZUNu3 z@)&{&5JA4LR_1H}<6g+mUaM(ScQ8i60E_1O-ao6RyZQ*Io$)CH1mt%xjc+vj$>Uq3a`99-r8^ScwH1MULB2JHtuNkodHj13{|x!Y1RX*Nd0gK6SN;b1JE@J!re^di zAGm8QES0^DA9xG$Naf$k5gANESI&2wRP3sV72J5^w42-@93{wfhRaM0&>=2F?aUWb zAQZjsM3-STidexC1Vb6I=AIo~>X@aCc1-aXQbR<5^if0=T(d!!1E)9dT#SVIuz zD{_XLT4#v&WEOa{ju&;V=eJ|+hdMPY@~MUmDtgFw+&j~AO{&x9-;y3z9@5AJ(X~^I zN6bX!Oj#f50P7OE+Thh9XYo-v;LIV)BErnVF(oNFf+tf6L=V`<^gB7V3_sxbAvWRr zlgX1>LSXC?1PSr7>4{LW&c+@RBH*qFeg}{eWYDw?nSo71QTd2xFik3H_4W0}XMnzv z?DdhDbB`;txGJd|*df5>^3Qx(9<1Zxyy;4(bAn~vhB)RcJo5b3L*`$FX`-&)#Wu1* zTRYnYHBa8Xn;k6Zj`5y03~5PYqC5)^U)^$TklaLz07m>kjIJ*~1~5uh_k6 z9{MdlGvi*qsg=aa5_(ogW2glkXwZO|#*rSP{1q7)e|9}l4~+X}CwQzLHy%y&1kl2i z0nT>vNr7U7yC@?m_XE_am6cVv(G+?bJ=PRHCx{ixnm6psRUfJ`M5`dwikMC1O$a`^ zvd$g{OFMc_;#%l&U%+OhzbeaD>ZRytbk z41B|A^f;3LdWW&Zcf9)LynhcHdFG;pSs`)FKhySKYqlZFd2;wBH^eFQ0y@)9)3I42 zm7!YSF&Jo&(GRGv13dW7HduFwRui`cJxaS6<&%lci~{>LF3G+pq!sj?I3k845sK{3t0Ux6oo@|{-( z&cxmz3EFe(ictmP)b%bvZ;}YoUVkZKX7M%+$|Vtmdk3vNGo)_G!Y45w^{%IRE^wF> zo^)vB-Eq!0#9;-eOi0DAc@7!!)(%2e12<<)k9<>`p?vTyuLqR>8uAl@kkGHopsB3` z!eIL81Cg&ks}&F$u*ZY~FyTjO{tXfzfOy890soP;qz?BHO5K7mok7S|%13&c2&wLm z?~spYsLhEZBYS9`${c15*{bm7$q3Rk(2l2V|Hko4k?{*RD9BSh9ztLSsZr~~!Mm%i z2=q7{cfzbK68UM}Un7=altd9Wn0t00S7jgBXfIP)qdV~`-2B)a`0&e(vU4*u0(9a* z5LWb+P0`ro)JI5J>Y<&m3b|(=ZN~(=hcJs*Dekxd%1{zKB0VC5c?;}+tW4h8@PhfI zJbBDqN_r`9EvX|MH=L2^W+D94rFL6rG)RCsoRGHxacp7c&78pdJd{bm`(I0;$Ep{A>u2^F z%1S>hDj*IU3eiTG!bCp<0!tOmtl*UH2yg4f{?5AuDEEd~cAJ3b#@D_yZT z0_W6vdb6X`@XXXx2V86f3z4}?*G-yj8RKbm?nTv~JZmw2ibXZxG9$k04#Vuwep$=hoSHtM=rY{!Pt$+1q7tcp zT42AX1gy6e#qIFK5H{TyG@E7jNkHKeK@<-KYd>0Qi7XRehnsUjk5sg`y#IvjtI&3n z$r$2Am?ajMkGnDocH!`82+_O!?#X?`IoH2r4aQUMGKJwj+s{vC)`PS3!M7j-hRwW1 z{S)lpL=O!EFvD;Qs^d~-Lf$f2!~^qJ;Pv#dU&`0uTZLn3Xv#QEWm1iyZ9UMcezp;Y z2yt&+Yd1!QpLuX79O=e(Xtf*>znE9IAGt9_0>Ga zU8a0EEt-8aBHqdFI1C2ebml49R?t$Z^Gabk+-M$(#s{CR`W#&nC{!pL{dWowAu}j| zNGQ{2hZ_VpwxD2zGarh(D;hv^gE?1CS=g2%JMBJl{gW;BPgQAzi4G_`G}GIjhN+L# zAH(m)pTUGMN5~M54Xj06w)?R?WN0qSu@@}!blCX*Q1#3)q(J_XXwd*xXGMzW?K7th08DyqzYX;0^KOK6#BRn0=evofHW6h&#j&6MdO z#7X!B2q8?q(OGMX81LW+nUU?i_qL8WVLXGi^mv^S$(RAEK3JCAuI!OXnxu@>4$(&? zy){w;*sS(}uRfV1bG>i#Dl`33+hO14!eYZl81iM1jt5Wy!z*LuiMM8GkBYG_53E0) z8qJrmNz{{$*97|=-vP`D#$Em5z@uf{hoGewCM{ATkxi3n@gJn2`)t1vDf5i3>~Z$uR73#pHq24?sMik47bk%F2T*cX8BF|dzA^iV3MoFapGO=<}S*o|hb zW>)XYT?2dk3eIp9lUwrS~bZgOqdoN^$`O-VD69YQ1xp}D~ zY`JRTFyQqGoG$CK+8@r$tKGqiIneZs;IRGE+gmGE47~FfQ{4a+mYbtNH;p%L4t0eR z3oytshr5R(H}~_U(1d0i`|n6d6d!tkBisTe493O?__YfDWZIwBZ$|C*n=K8&YSUU<=gY)L*TRaB4o1j`y_)d*J`m8=LJ;2FQJ6fXHDx zk^r@V=qPP7PiH?LirEHejX#)PLebi^s;JXRj)ibNTU%EdUPY!Np8g^ zmdYZ5RgmXazm7dk-a6F*sWcS`dQhvu1k`9^`JWBv|R#a)VfLvju-V~qnq zVvVyKOPw0+TT8VUWPED{(8%>|wHyQIyP*(%_3;+j@K~gF{KJ(fnBBc?8L7iivrC>~ zCjfw@W|gN$>16G)OJ;mF0QgA8arT!`jhR-L?$J=?g4GHz!Pf&!;cJXK&I0b3^H{Cu zJOv`v`j565^w6mfRH-37Iq$j%%CNWs?3X;D-`H7i{dF=MEW863eZ+$+E_W|L0{u;} zeZjj;pj29DfSqgZY3Vu>+wJZ+*#OZF6r)1+$Mi~EL0-=+N)UbVc{dImI?}1Rsh6}v zrhK)WQ05>J6;PcG*SnNL131#N2!~f$1jtKa+1g#1GUrdT*NXJU?!Pp{g{_Wo326q6 zg>lKX6t&?<9!*v2iTPMlVFg%u<^%!Xgd!H#>hy(lf(ALsO6Ua73AjS$8E@g$i-sII z0VVz1yV^tVtxNsrL)`V?C%%O97EDZ&5l)1Sovb;DQZZk%w~6bSt(=P#WyyG;VGwQG z4;+VEMKGYmcNAn!(urHzN9V{6nw(^#WMAzr&Q$V0!5G~Waz~{KYGSw^pEc2&r}(;d z(-d+(OrDVQo0;tJ)hww!E;jVj`iA|z8`q+oLTkyYdOxq80GGIOV|NK1_BA-%#Xdn{ zHl_-HW~PIjKEx~6LOcM)y_pikFz*Zg?oo287sg2msB@A9`S-x$v$65eA5)?@EB20P1k@t17IzN zq&J5>P_F~o&8CeZn==!I?;oUtDO?hCj!04VzRrd~VHB2~`Zt9@Im z5RDnuHGmK-m^k*y*!6g;PwhIo%^JV^+6g!1UZOEt7%_FhmW=H15B*NaSB(T#)}Nmw zw^f0w@|k+Ag*K*9>K2fCH!4EU3B2^W&9@7iy+1?b`UbVU!dLIX5>R}YJIHb_tJu47 zFI>%V5NRd)U&QW#js?(3!g+_VMM%ust#MYykQ3d2gdT@;&7LM|-Z@dWKWw%H0!KmW zdH#+`ePM3sE6GV{l--0jJwjDZp?8jLUe`b@voh_AYgp`wc$Ql)H&rZX!*c=j#C*F! zr9%Wo>>c?I!uTe5yIwG*EB{V%daC_#W}3)NTo2ITi;!mOJCSg?gWlg6OoB2$9yC|z zmll9Of`PS^cq$SJq<7D2HftAZ!J8Qi?wU|5bu1QKM(xI@%kxfI(|x24QTbdk;|uGK z;42=54G$6u-k~n>!zH<1pHp=Atb#Z#?Hq#b^ATzS}bUQ z8R-kch(7@P^sOz368X9h)XMCclp3OWAaz2-@F5^kK_d<5kdCTRoK?;JF=Dz@v6oeG z1Nd;vaZNk}6h~1Y7eoTl7=4I)jXD)P@#b%UO9&;qv9%|`qc2VX@00Q^k>EWfc_&C{ zlRTUj+YfRM7~?1Cc?C|Goi)j6A|S5(_^W>_p~YAU>;seBl}$?iSo;_yd?x+^v|Tj0 zRLYvL$`u0kz>-(#gE|aa>D`=4BHlM<_LE3{BY_0q!4Wy$Ca>;IUlHn|75H?T)&~cY zyk-YH$zJ~gXpWV>Hd${}!OpyxuicFCxKS*;a#`^)lUD`-{`HcB0pCk|zC1+{W{xY? z<(|i}%CY4-LVy4q>N*sd4r8duL=RpHPci30jeZE+${{6U)Zu%bd%rnf3#<4MtEc$G zl;7c%o*Z-t{$vT2@1DZUkl4*1Os?pyNfuFv0?h0wpss(*R#jWYA&BwebK`*ad>g6% ztxcgG?k&E`cppT%VWh@_Eo==Py8*co_JXUG%l7a~Ph_vukn1_;<`D#g-K z^oaaFFrl{RipqyX98JKg-Hwdp+9X5+!3%!mtekY2V!6F6T7?ti#r7?W>-F7r6A46`>P2VzI*@#frCKJ z5Xc6u`1W>w{Ykg$CUkmr`QQDlb(4CNfO7v}?J*~RPT|{CEU=^k-#fC%N(V9e442X) zl#(f?zyG$_YUE4l#n`RZLUFCzvuD#!oi=MV-%c)e2NGu66P%&sV|HuVDB?e*;meza zXus8wkIi3WNGBCh8))cmI`oy7hQtUt%5&BwWTg@1e<+UKsLJJK1qsGfPoGT=Ie?Nn zx&E@#UxZRj(g+~HW$n91?5oH5Own>GSC&ZH6iKRfoX6i76SA~6<7Gx=FpK=G$*>#( zZ*ZPy7VhDk$Q}C=L`;q*-bq_}AgnAMr%cx8EBpc#Ed#g4db57X5j4;#pzA%G#Z0-W zm5%3leAyefy!Yx!1!SFQF+)b;zIhaTvC7D!{w&(vsmgO;#Qle+t5$|?ox=7~W}i4T zK48(49&2hi-fTRlPm3qsv(v$o8idzjL|irTk(bPEba5FiE*Bzqu}d*O;JzbH-y|Y{ zh{$jmh*s_IzZd?voGI?jm67huq~8#yyUE`S>6;4j&rd^La;w#FGkAp3NuRKi+W6vu z$?b?f1!xPXHsISdzPC%+ToD61ZS`5J&~IL}^|0+8%n;)Z8-!s9WaWJF#Tcl&VuMhA zAQcFZ5sjb7MDJ-}!w^Tkp=9~r*@{^{N6-AMpfiCPgMC647Vd|CAhio8^W-wOIe186 z^H|=V6J{=)cd(FZt2cL6Pf3H*uj$lYxW~&`Q!+5b86RknU0M(}Jj(e&+cuz4 zL~iP96!%cLh1qM^BE!atp%f$Crv%i5~p0}CTtFyFU z-06P?`E~6$FkLRH%H@7IY^)7S4pib0dx|Ra+V@d}q`#-hM>e+=k-Q~qD3}V1Vl|+Y zk0e+4VNm_Op#B zj5_=myZ6e0+RUirJliHm2vW`JlWhV{6qjSKX+>b{P4m^a*uT;or@?G!$JL*{oo&2| z+LXSv;0NY$D*6{Wz)IRec?~^BZ!?Dkls&{#s+^Ek1ody_p>EJ0=F63G{p>BXcK?0J2^CLRUwx2RszMkx6g{#k*^J)U$Wz zxA%AESPTrPKM*y6A>=On_?vJ9#sS#)|sqRckX-?7baJ(l4TGDU@cfZ&Z z4}WZ|mC!&sO~29CGrZNpdwfP;my=tRuMd@IiE{`Wk|9;@a?m?u07IFXnRpG{9aL8L zzDLnspL>s87y+j3l4)B!#J;zNGBVJ0kF!?+N?J+cl> zXA`Ym-Ymatf`M-Sq$6S+UZ|1ML_0#_-Y|KeI*)eA9=D^BsJxm;9td~FXLFJj5?RzV z>*bKUA$M;HRM3P5Iph#jo;NkLlm2HdQ(gw3r7U6pM8tybJCXy#4+gy)m*(69Z*$o% zu<3zqiRDPew8tt$@=MEEb2qTh6!-Sun%LLXRp3pKR1}xs=nmTPTUV$rLFVWIYTlFJ?-~Bn4A1AcXDwmf&Oo zUGaxKBt)HRhlV1WWJqgn-@G0i3eaZ%wlllE4Bk{jy6KJ&P+c!j zPtS*c!HZDD0ruA7YEM_RIU7pZTb%*%4sN6a+(8K~zB%j0)P2qy=V3eNpi0r?bRzvt z&B#E7Q$8KTKuxy(t}PR2=ijXGxA?mukSADsq5 za%C?wnS*#s#!#1^@RSuQw%&{-Yy!vnHM4%cOeS0RuXclBdzehjUt-L^Eyh&FW?tm1e4+0q>8k^`w7>n?ipthdrb_+P(JR?O~S z6;v>awE!@|{6u--F24&Wki5y4MS>xJPH|E;o z6?lYjM-slffRe(UzGDxu@hwB^b?KLDcqvKiX*QFKU#|7ZO&Lo$F#^8ARVoBf z77+9g~7X`bL}mtz4iE-Zd;9 z-WCLN1C;)QcMhSV0a!=zw5j1-2^(C@_*tmJ?kQa+!adL#yoy;rMtN|-EzjBcV79|} z6E8cW497{LO@JQSTQo&Fd(;hkb0i8h+1z3L1PVa93mXD6Wb9&M(kP|Cg?Ev{x77x+ zC8XmU&3{nE!^p-Afz974d6k2-dY+ffw@PBRX4!jSlGNAk6Heu)0pSB|SmEjS<@ZC1 zcf-u22CyfnoXOR~@jyJq54DW=kGIIEaDs3lXlaN`oo*wi{zv#=?RybhKKXRnZf8}J zqQV~T0Z}rs?d%1(zShKJmnuCEL@s!HU3^haX_hf-;l}6HovskvnQ>dp%&o}{pvc&h zk8;j)%0z1q)XfbHVyTTFBh3k7A#EB8fGFg!#~(-QIkxT5qC`*Yf2P~1BGj(7w=8cH zv|q^n=?guPXh*yQx#4q&i)UYBgqtBelA2EXCFa|5?gN~ETyYI88!+${d(a}dnge5B zQ98Z6lv>=-`dvufp1lgbxx!3T2Kx;m({!J1Y`n>{7N*XLsalii`^ISf`irPR65P$ul6d1aR6L+)xif-PXt<``3`yl-+hqxG{t~YqC zq^X88kSA7bwdHMMjgTI76O!F_<$}HucLqOCgvMOJ`X#}EDcVn=M*Ue6;gj9lk0S!C@=;CW31N3YZfBeY%w+cPsEI85e{g;ZgJ$`$2EjCVC1Iz=UkMGfl247lvJ zQ!{Bvwp9)G|Kb@4cp)Dc*6?YK?nD&EH~G0H=LL!y)mG7{uz|OQ=C&4^M+HPmigqSC zHh>}Y*#MGO-OKhXr{d$#P@g&BF3?t+)ZYSfRY_0@>oe$hL>C}CrbRhRDT?{o*fb>?KG4=qexs##ty!2?eQumtKPbme863255z5xt8x@TqEd{-xdWTD7*eE&CIdn+?Gv z;J5FDR<(*2eYku7FdgV6qo*^l4a*2F_iVJ+I}JORh0;@JBV3?QT2VhI(Y*FK%$93iaDr|PFgs4!s$VUA#+?P5&+0Q|vyVjds{uXM<3;Jo`!zE>~ z73GQq!jCYKvrFB@xirMx7Pgl7}x&9=y?5K#L zdzVZe%v@WL+x|TZsvRevUBIK?J!d*aZy-KJTdHiD6tWf~Z>6ctym;f1a+~ zJw{YC@PP{iznh}Y{PWy~Ok9g|Wf}YWHS}5#^HtmcJV}l8FQ4=3(!ZkLEcv-^q7F6K zkaw269_hR7J)Z7+Fs(8WemB^S7T5LK=F}WwaoPF}sFS`p{oLR9OX4S)X-f+?s4}1vq1?OR_0!+ecLSfX}zgH3+VNOzLd|k zoW#+ZDrYv(|4ULR-AP)VvcDeD&1?J57_#%TY3nYx(&zgqTofh%;AEHy%yViL+psgd z1x@5FLrgt6`ga@E)n7o+E#+!S#2v7AzKwxEkdC{H!QFA9%_%U{358h)sUm7)N$`bI z1taIvo^y6$HQtfBMcsLqro;PS_Ka662|SM;nLog@>AA$1ECF zpQ5?F_+09-EKaeUmykY-$C^cv4By(n{KI$mjttCP$a5vgkV@9ub$Dk zJ)_7XxKqW{~(<4{uzk^iT!trRpZ))ow^J^$7|wG zzj1DK#baO&V6;Gaqn)Dw!k0Zb^Nt`UXN<=lMXe=qI#I8u&MLuzvM0$y#{Rf69ce%e zMr%9RS@BUG0#H0l_`aMrWX@Bnp}KKF1=+_*Avk2v2`oaoqgXlNT&!}PT?XFoSD zsGuTo=oYY)Luk9TF|>QdIz=Isy7(j7oD#P{Nsh$Y1&MMzlsXvy^|p3wtqj0F#n!|o zMG?ipZOw`>J{2VL7mYkSUHA+%zorgPxR@10b~u(bC)aaKQzbq`*>JLdgM47C$(`3N~n3wJ0lUaN(f(Fiv3Tg&7%p%GKkbE*H>b0K0q<+>hy7JN9%0AmiYL2FF~ z@Soql10*eh2GbtGLyZ5xf`BvWes4CmbG5LKwVukF`~tCr0O?7Atudl1yzX7mOR^Qatn%-yqi)v2T1484F_07^i$zi+0%*2(^dhCj)|JiQsugJ5)r;jW`y%QTDEih?TpCc2 zE5Hm(rB>HWv0(~)VkWGeb2l=RQxl5XgmewcE7^E|6=8xn>8wB$sbn&^#hMlgoddYW z=m-f50~GjzDVnB04KN>Vh#@)-ks_D*=f0~|6z6Hp)COpG=~0ncZ$zGQq#>q}F7Xa* zPWQFQV+g{n5zt%9zRc%(pKc3~`lYj*ON`|{$cguqclbp#zI;qw3x@%@1VZgSej*1$ zc7@l3#Cc)PNjzKn?wQL%5RI4+or1422zUpPTO7YuZS23N5{xSa%3)en+p@g&3J**v zMoVx1g;8UO(|u55B$G#ZB|M%e{xx!4z4}Hu%-o^&JPou=4MXXbp}_(>DY*F31GmLN z+>I8f^x`vo6m;PQM6y>!N}9;+mGnY&rR#vMvpF-0cPK5eqU4ezkWvp<^9kS6ln56y zw?p<(M^pp$rD|8|MMDQ~%}MqXN1F#AR2^rYb|Ipb>!XL4U@Q&qS8{ zqa`y7r~eAN2QsHzW^fo#H>cn%4z4!~B52p=zXdYm(_MmbTH5AKv0T>G9+X&=bmA(N zXzGi-$gWZ8dCAA9h`Q!j3h|N43N4h@?bci8XY*{xKxn{ zK5rc7k3YJz`(X;-wH@JBkAWuqTr*o2Zntzq*zaNF77#byJYAO*euA45A`=7z9DOV* z?AQL21Aww$FBpX{G@~`s{nbI3vl1N zwmu#lylDhi@KNhnGd9OAbwXN2X9+j(4^OpFP*kaH7{uK&qYqRh;{U<;8&ffm!M(zu2+@yzoye&2!IQJ$R+Aag0w}kZq)wgV*_#{2{PQSD}`a$&#yL& zXmGRVdt4ySv1k_MAp?9kq8(4fG6)kYD!mN5_-#U#$J-cEEChhYAYrY}!_8F=Pvix_=ga$wf<@%xlUj(p(% zus)`WH9E-zd;Y)`YMS@U}^aAUNG z7^ooGg~ye&HsIZqZjf5ugGQtAwEyxD#8ZbVsGNV~va`opCrjjhI9`dO1Pt5oRO@KF=TXm&L!ykN zR^UD~!a=k1^0C4)1qZ1ulMBWXqt#4NcMlCA6`W_OG#rL?NSb)h(r@L%7ze?cwUb!+ zee6+C%lglqlg6z(sqMNlxu0zHwaiyD27f4GsV|CHDz_$DRV?_wEdo{XUxzSU*eK`# zQdiFP3iI7lD7BP39P#T$6aJ%ft}!eWx}YMR!PD+?3`S9_%a+UHN*GfAPvZ8{`4*5A zd8^}Q3ZsLMDkdv@SW0YmB0anTL2!2k$*xYf4iL>a!2r>$f+ec9FvdI6j*Vs`NZF~y z5Ji{W3ose4&n)w4FHiM-qW$Rk$pZ)oXk2falO2ya;dX5nnzii>-5mnwuv?d7xI;Sm zK9N|3gH8(`NcgCcQ-J}+F%w?Qqw)l))AfM|{s>jvh@fi#N-8=GjYo_V76r8bxBo+O zIXpl{EhO2QIep#bbLaDE zB~c4h&WqScr|$vKRo$Y7{Z{tX{$8<1k!h_O^V%1&?n@4~xnD=N`&=HbT<@<5$t-2& z6itvVd-}HL1Zo8`bl~yr@^K6wHu50L>J&m_7KGqZ#6rRDuL)jvns`sZ2IGjZjEOKJ zC)xv-HW|USVN^ELR)86%>>|+5PNogf@$$h_zM_U)LA|6T0}C%BfzmQ%1~`34PjxV5 z;y)ik#b!Y_Wqc>82OaLP+6mD%GPa%v>}lu^<}W5nOG+gC23v!LA=sg=8K!x_OJ`go zCGWYLj_QAz(DhlwP&&@iD1*tXR+WmoJVztR4F0BG0lcix{dDkn5A3fQ!l9$iUWmgi ziq3GeY1{7IBVL+0`t9~5HTn@1G4?vLtA$VCGvhsW=)Y+IcS3^Tpc?(ZBPZ?~k7+=T zZf|JrLiYpass!dyAN~Jm?uW7RRQKE^ zyVnKYyAQnsp!bsMHAZ+%Hn6RqJb}a8_qGV)G67(UQDyk!Qln6$-4SqL8n%XxiXG{d zvuK4F3AV;mCn<-72eKve!1-c#&|qe9%ziQb<}IIvk@-!txhJ+U66Jq#^TsM%C|rx*v^209;j(5Tz`4l&Fl)ieIzfQVS{Rux3`og#GiVX2DtKW1fm2KYXhs91YL2XrQbP$BSZ zrf37N5_NyM1kAXSW6UqD_==(pZjhoW6-`C70jcP!zv)-Gh)8p*;yW{TI4%DgJ~ITd z^8BHHL0D+|Q>t5AXBdDPy)bE6&y&PRe5jLP(ntU`AR6rYVcXlOf&$c0Y1&qbeNcWw^zDj(E! zrr2FlF8o~kA^eK~{JQF(*D!nn8DNcjub(s2-t75GHLKE=Q^j!rfjrNFusIZ}=D-ev zUKIQ+kFRC=ZW11}5wK@GA^y*P#(XNuQ4(yh z>}AzAztAWhbEG6>&dEafdMwqiO@pNK zi7J(7NOUjo9m2Oo&L*RG|KJA04cqq1VtttWKv--&L`2poa}_$nzeB7I^&{9m`Dk_T zz*tmb{e{9@0yN=lSzX5_>IMQVYJFfz31?UnYAx1E1YDFoW&gMpBOgKOA5;`;zh0*! zVR^;3`oSJgv#aztlUs%idsq!&ZO6R9D-wPmwfF-eB+0<#!d<2F6pZYuDlhoewC(fc zTWVu;G^>;ggU7??E5+$FMC&3ES7>WZKJ_BV-@r%BY6%&3LU=w`!x+Rqk^86Dbbd__ z0L_x6vA##r7s)6^g?ld|fPEn@oxJGc5JMdJ3GV0D-h-S&pApw|P~wvUoZtU6$>P%W z2AH%t<%aR`7M(SQxAi7MVZ#3Mb>NDb4$vplvM|h+)sAO_ixiC0t%O>{_wn9h^I@0e zY*0q$iI!UBijML;Tj8x0yL zm9a}Cub$am-ZY54u~SgG_tzkkyeWojE~_~m8XJLo?F`oTsc@6!;vb z8i{Z4qbSpAgIKToEil1)bgJ?Ua5iZK?$WN&h3o-<@Z1O{-92bc%6bcfPO||WpuQ{Z z3nK{#D}2O6R?897*BityK49E~zK~lDZo|>_Bv6n_-owU5jZ}|^{d0rJOJfVc0Lyh4 zr_k23VWA|KdqT*vfEu7@B)x6JiqMN*)rVm`ULMYO#9|6!-Owk2dT`+F&I2$h^%3Mg z9S7*AOQrs@48y$4Mrlz&o;%OF6?IZmuY#)*Xoqj;MS2lUjH7H*2&z^XRm(>mq0gu^ z7C*h7;KEiYD}`k0{2pu1&BPGYxFo0fq$@f4PO*wc`0lQa(dK+u0M>{qihA)NUzdPJ z)vZ&riA(i%P`FywGhE0xwBq*kfS_rdXmfF5qzR~#ZaM>lAs9FEc&HTzT*}Tdr13$1 zwuSOeMn`#|MxNI4Q9_zEjHtZaZMb)W(bk-jW~>vy!Y(I|TmzzZ#l@2D+Nn^ z$dKRgIH=nn!>rPD6mxDW&lDQKr^iP50gc%ArQIEtBqV4fFcTvG!qC2j`goh0gBFRH z+?xqUeHCl1M7D}UPb5&nhnE~ANYBpmvubf+gE&4KxkC+~?qi$unVDH{qyzLK#kllc z^;#YZIU@7p?uGn?RN)rEasJ$1+q%Rb z6##t{-GXr0ZU*1uUc`YGKNbp@zR@$+>f~z3A)F3yke6b=upM+NskiP;jN$0?@_6`z zleRE`_;Bb-%kbAJE$TbL^VNMyDK~hKoO|tmgseVQj5g%#=Q>l4@TimJ8eSFYY$Sq5 z+^j>f(pU$HGADz*QCPn9LjVbN{Dke;?&7u(-JxxSOWUTxe?7t@_X0ml z52QU%NFoyd$<_vL`Hn?Ac_#I?25Gys*q^dy2qo(82Osgb581)hKn`;{@div3Q6}ZI zaGI@-;YcC#4wS!1>1_wJJy~UA*nqvvn_~8*_68i_ z`w4#7S9967#lxZ$vs;M&pHMwRs%S4EXuDMz+xxEazp`?5b9osu9InJt%*m8VcVSwe_wQbwBZQHhO+qP}n-gj-=w(WazD^*E-`4clWJ>C7RG@Yt}PUX^-RjSmU z1aKTgY9fB zEhQ#6`*wA82q#d*FB3oe?=5ledhejR;4Q#>fn`C>;?*v$2hg&XL?NScu1;HILPhC6 zt;uRF9-+A$_Kb3)T~nty4{&X1kC<$MG`8)&h)Gf<1M4Q3FbBv+$6c*O6K~P|U@&qc z^CVBnkUMpbK3iz-5d8l)RO*Z;pG}#NB^9xC%4r{$+GL-=tkqO|=K; zX3+QSEsF->KasV8rObhW^ipM1s{CD_=hBspj$8uQa!?`db*o9AWLpU{+(usn8A;JE z(*pHn+Ij)N4{!*pBJLUcs)jn&pXt{k=6%AN-Iy6v(`*%~ShcxU4I+^W3@FjdUm`AB zvJSax>(FE#MiNX@Bd35Z@Km3)S#^PilK<+y+;z7RX-&Aw66cPCH$lb@eo?!lCbEkL z5D2|KvZ@D)L-}{n;zu<~2wqKB-z}ET!s|LqG%5MKX_ujPLGVu+>^BK5b1DGO?LGIq zDTHy%%hpKbeL&}q5!rfs6@N5~9hjvZ0PFg$Ekypr*mpNxWlGN-M{f{F}W;LM#c5uwR;qBo&ZJ}p8u-6ZWCyX|NC-hV&v4V4v$ zwM0}wzbb^1V{q z{{3+hfD4viM`2Fin*E~kPE&45f$~_>f_iWk$`z^XPoa}$6N$U;nFx^L8^>;&OnUdI ziT$C1ME8pqiDyNfuW$58ZCHsqL@hM4BH!>r6yFyh@q%lnq<2<3bHN{C6bCf;N{cXu zMG;pS!I-J62YaM0EzXcKBY2fbfJDvma2DphtSut#z&1Q$${Kz!#CY^a?(yl{y$7Z|yh+^QvOmNo-Zk5;4nc zmJeUh@U^(Bo5i3OV@hG&&ISVD{R$%lR8ei8zme+Uv!<1BJZkF2?=P+CwA9Q0!a(*# z%>6am%c|ru!Zzq+pg1h~ficRkU`iMg!>e2 z>%xD%YYLcl_6EN{B4a+Et@uj`Tl0&5t%v>hZ%`b&bji{kGA_pD@cDruX(#)^$ z=Q&UmFt#kZslv2xvnDeL-0>Qt{m4t$PH*tQ zp1{KSvl;yMbLu7Z9Yud}8x@|wtO{ke-XV8*!pg|Li1^v08D$>~KKWOaBgdmirqHrqkEOpouNo!(z~* zsYINWE&2Vl_uXbwJw=2P3`3NXL>Q-S)hnG{Sko+NSuu=L4hm^~LiO0 zJK9_HtF?%Lk;`7A9svswTt(wo=fDLqw$xmzOMNRc<71#ul7ZnA*VOCM7<_rNaz0Sl zVYTDJ)rk11Am)!h z!13@%3?2JaPTCqBppK~tH#K09&w=7!ufOv)2M-_oOd{Zl5N@ zc$F1?q3AjVc{#u;EQU!wdMCq(=gph&<1lkq6b*>2w#1#svJm}!sC`JMpx3`2+dQy^f>&w)*KO3?R5>9MO7nLW=lT7_aecN}U? zj?-JUDYfT+IpHfwcaaWQD9P>(qdXA_rx@ zy2c*^4Fa;Z$2@bCb65fSOOF-#an`n*2(ztR+tAMP(z-BOJjzZk7zqql5U}}4*cv!M z+HP6N=oplCDp*K_5;&*dMh*0z>(po`5lrM>jEmzsN%ne1A{?f_hLUh%K`b8&7=B7e ztj3+)@A?(bifi>7)0g18ml-3sI!AgF(3bp+p9P;{k1VKKL!G)Kg^^V2?YDy;+KC1c zx+RzDnNVHrr}CSroitN3Kb};j2l>aB+~L9NDe^VfW6TrFDwpd_843{y?ZLv19k54L zmFJFiv|47XWzpe>ayl7OpXQNLNw}ragFPLtWO5F9TAH#V_XN~Jm80HD%^U(=C$@rU zF~8`Z?EuqJbfDg_>$RQM9}EVg?^3-ymWqf1&74R76NK0MjAOOqq+{7999-=-y`-Q& zqVa^l`*#>>B>A$kU=xM1+VAfR^qab!q&NB!;v@pSQbkIu<~mXZ9joI!3}~HQyC$Sr zL3?$j%%|U3pbwZ%6Xw6=R-SSHlViPv}px( z=eEA+j-I+r=S8J7OZ?{iblgVXpD!$h+*`Q}h^u=QjvfnDuxE6uA;1xhNhM6#$=((U zx%{!;02#+kae@=Dk@WgdQEQHN}7h58*d`v9<; zJiV~}LAiv+Qox}ie6Pf3^AG(Bo5Cfb4Ms>Pf>FIKVkF#7aj+2HsT$JlnGWkE@PZde z9LEdTvWO!vrN-i#eV^s2N(%RJ@mjX0kbT_5I-sZS=0Z}RO;g2!_%nO(SbWlx$eI6c z&J4OQ2?5-vLXbyyT;#>x=W~>uegS;8ZRayUEB_P`(Jyn?YaZ1ZEl@*4r*u9?d|qnT zu@tz9nfmf9xZd_cb-9|=-9uEL5c~VS- z^5^0sxzMDbP>{xTT!IlNrhmgPbY~A> zMDLLpBb=hPc{IA2{Xtumg3{K5yyG$+w=q?KXw-NaudV!}4DVr@vTq7qe%U7(4hRnY zN?1x+Z!02@d;M1du9EheJ zYixozocKzFW_?({_lpyxV-7Kv%7rGq;|S{frh;|>;8m%2+#)~;V`rkiX@RpIjcxN% z6x&ps#0t%DP0bs~14xzVOI|HIuDXq>7RJGp( z?iR8`mD&`1OX0e{KtuWe${>|bG5%Bxyf}=iN%`dC8fQBod#*5~Uh~K;PyTb{s2ebWMsKvn9-6wR4Q63myduBMr$_b%SFr^BuV0zr2X#5E@OyAMFn;fomdv-jI16)s;I8uU_nq!9@=v zTYREbOF114YF5<7K^)ETc@VDNGS;;^rY*g8#1+BKAko*bz2tAY-*vq=G*_hu6{lHSx`Ke4~BUsWukh&;EbB#d}_iCFN0ANil;xmb69a4ulwYLqqQW71q!WhxG&-~dP8 z>I@Ts1m($n9*uU*FO<%S=4m8_3fn2)9eJ^-O*DZ)u4k%vXxkVVI~DM-xavpLl7xrX zS992hD^FyrQtO+A^ zia)!Z_$TN*Y zn@0T2f{N%V@Q#>^c zlq>cZns$!050z(4jzNsNR=J8!**l?)z~f~yWVOATwJ;*g2pxC2rF4NRpQzxcJ=U-k z*w*PqcV}sl?feHDr=;lyiT5KCJh(QS92X9~Zp1fN3!qtGLW{bftZpRSx98p=(9{Q~ z!J8$UM^FJRa!p^ku_lrDFSQ6Ck>%=`-he*y4=r)P`-+6(+tjUUf6MH=Vv2I6h5!@1 zo!_1Wj3BFc)_?VnCG&HG(J@CW>+Kn~j7CL2D!-4rd`|Kb{+wEe7Y+*Wr_R=vIPB55 zX6gq;qt=1wp?H$Z)a|&X^$UUM1Ni=458nM!!@?KMQ{^qbI0#w-`X7EKFs5FeNiCmT z2OEhb-|iaBFATYbnXjmIuy-Myemtr7j5u4(cBG~%UcZfR4`PXVIr-79zKPPKki$c7?TIwF zd)@7~qv*$hcCl5Ovup29$?eCSaZ#K-Rp8METi^c`ith5+dB1uK6XE4G2KqEERfrp2 z{Uze0arJA8Xg%!*0h%q&_{i?jrlWXD3#pzfXcav!K0Pfej|OvvvyAm+mL*W^fX>lR zTP$Gb-W6+N+CTDdE-n4h)zm9Kkfemr?JdHpx`EMAJeB$GA*HApkkIpI?~nthPZg;B zzYRxu^)(p2Zw|ARJUWM%j^?P~j4qFAw(lF4`&qGUJy~Zj{-pF%FfEU0sRX;nHwh({ znAFD%yL3iZYGE?})d3{6IL7QHbV%K@-hnK7$?{~13TS2>F%!uvI&qlWNUOZx;_g`PDD!uAxux(mR!k`WD9OvdlhsdqM}Bkob#a(k04HSoG;t7qLga zLT-vMMi&jNq}`zL5=C_3>~LQF8t9yHF(98NyWDY73vf*EY7z_s?mHz@`O0}E$Alx@k3x&t$5oMx&bx{hfAT_k_8z~Y5XqJHJ8|{S5DZ=a~;X| zJ`?i?9JVJT7JGazt?N>u=9z0joOCqr&wB9okCwoT?Wc2rV>`sB)(YOCXN^Ko7~{<* zFA3cE(jLc+a0A1Y`uIT8nh268UjI=hdgmB9&2UT*s(S&@vGY*|atKUA5kdbeTn!}{ zPJaev;JmjE{v2$Xd^^5<{P$BQ-AZP{ZlqFh-HR^q*nV+@+Nl2C={+B0;~kx*g-;D8 zCgY^<_r%X%+bvHi*VLyKe6NefjSLwuYpnKJg&}mK;;@=DM=A5 zeai(OURR{N5q&QNETJ{FLo|cxO#Us3vw&4Sh{p@9bBMUiF<9YHk6vgZ6vj|^UM>2p zXB9PNw8L^xaLWHj1G>x`M-9x@?tJimxBGUj3tR=3o2mNily6cxLWV!+kvoL z0fb;-^Y{kva^=E^@$wN$hJvnPY;|J2{!b~B8y=4EFnVv!EX1~LkHHLZY%?c5I|vrE z1IOe2KoQ^q2X&YLv@^}f4Md=b(T}5TVH}fm3LSunCFscyH7DJywNi{LY9P;HJ=`#& zl!q-UmKF6ku351Am&N|F>CVc>%}-__N~B~1ak#joGx)xpOM$$2zl1|vvV1*?C4aPh z$_M;W@-1;Y(&KJ62)7D{QYib291hZcwWIs!34p>y5#LC=7OI#&>rxI@fcZVHuk2;G zFm-E`#-lvtaqMW3HJ0AsOmKDV7zTe`hM>dBEJItCbF*~dLjF7fV4)+h%O%IBi!-NL zk^MDDmqlo@oz>1FGo7&**_2?s-gh{#bO`NpPNH2*8;ArA*PW>Oy4I(re$TvUs~W}> z1qKM(%WRdR_Zp}k`NFgAQOJbwtCh~dXDR^PXo)*1AsiP<3{Ahaqo4a;;=(SDagoKf zcLF{RH0?u6v?@1IE%l>oVNARsy1PdSs(h`s3nK{r@4@eITI5mjY*&dQnggk6+CXIn zN2j0&wUJ*)#h;G$)-AHzCs{|tfpT@`fmRxk+nNi`N+?3d2z*VVBwb6mfc0V)qZ4rn z_KgGTsT()w?{|1Md%`C`A}9fgE86_D4q6;@StgNvR%B&qA|nUn{NzCaqACaB3!8A) zOi+XQ#UdgOumZqfe^Om>rV5wN?em?0k+%2D=JLKRnJ2@NjirL1mQcTWD2eMbbJ(Gk z2sn_RN2R3Bb1hBP_HKon#f00qFAniOPqm9I(ds?0e1%AIDlDPgfNyxzeL7 z5XzK`F~S^~dOPAt`)qVw+hFOcqtwp@-82R>N4efWLaV_qc}dQpA3p!qAsS8{F6QgL zZBFbxGj#Upf0i`h_fL;Q2>^9n?2qOSp^+phaCem2%Uh$}%j)nr-EqBFt)EUj!*^dL z>%e|2$&uwRtsBND;S(JAI}l8D6xa`;&5=TnOvnVN5C_dl1DJeG-!l$_%f6WN{atj- z`~Tx$2ff&Jnt1{?ro@%O5)+kn(d6iiJ@73&f#x4aUIh7Wx1zzN81Z%R0flQ6hY-QN z6^JCF@j-VBRW*Pfkq)zF4p+o=~#VR(dG$O{?4$zSR#0Ln0FU(s$`z(aYy$IUj+lU)_uW#l}= z0Dv?LPDCIXxDA*gbG>g+2nD0SM>|sbZn)ta+cmJKRiF3*n`MD9mbqIQkf^oDimu(G z@C5D%S)xWZecn;!#FTc`2iPbs&Xx!4aa9)jHk=SDyAC_ze>poUDocKq0Dj=h-Hz23 zUuM&mNvY2yZwd4=Kv`f8K*8wqKPP*x!zPSsW$C86M+|6*oR{f_%#ySeZWL;38;6zI zjK~jl{^3Z<7#7#N;K$M0Mmb#6oPM_hQp(_f6j?!L7xkw(cM@N^y@%~h z=gbjnBc_e|n4)b`E+)!~m7-A7X&MyZ@NXytB^>!B8$<8E?&Kf$eGi|;$BnEr_MrP@ zA}*5vIkAIn^aiS;H~Z%ndQUFAJVrA6U4oEre6vHMHg>K7Y!R0>_u008a(sA5@}>8hQiLxOqDik&-UofUHglsDw#nmW++jS;65P{GbXH&7YnrCOjPBMrZ==Q zKeDPK%IjpGM`z}f*56PpCB`X=X%(cYy-2K!uEpXiXGiB6mI0!rgMx;T)5j`#+HN2A zQJ{F}qiZRUGr~<6)}wK4)$NdxFp#z18YU5!k6$du?-t|Q+=mmJpdTtanqs58l$f}B ztW0Nc$fn-TF3BfNN`CLesYdsfY6zJ7QkZVo45HV9GeM`3m(bF$dAk<@_yo=pcm<3!=!6LwS-}`R zFhX?03S3&(*Dzzr-6co<<|{8pL8bYXoZjlAd=xzEE_J|!#hYVv$zNXI(pSP@DicAjtse6^2XWx z$L{iG%M+6&BX)x1S!2XG{*a1zOT3s>rALCNM{Z?&QF@*jJZ`?5xuekuANfCTzM;#^ zEce5KOq76b%~umHv1Wg9FIxpp3ePEWT+7d!)A8@ewfF?5F_T;s4<~4{F3h+6k8wNv zU1Y0wwCNIm5IT7ALW8TF0;8dU#F|-|5yk4XblS z%+1z#sK z#yRZYyTiRRC{c99j1#0#hUbYHf?W^xWvg`flXD#BVd7z{U}GbyX=>tAg))^XgD(3x zF(a5*4~UVJERVNJWRwUa7z`NIPB$jJD4wV?vlBbf92T;PYC;1es1~%A3>Y%JckZt= zaX5=kd`l11w@vf8m@K<8PHJ6LZnTDeEWjejC-HcF|nn4H`)jmp0gR2<6e-;qFsXxG@}*)9Ygc6N`*Ka;As<~qdg(J#&Y%FKfvR|G_>AQ*s=zF z0zEpGdKk$$py-OZ0v72u!x1&yattQuir2w&5i(mNM^$3ZeW++Soh#wdiVC^crqKK7 zJ}8|yPMk3{3ZACRUDYZF7L9d$i4bUH8rgSTR7lVRTsTF$8#EkHD;^NbB~HphL_*fL z-7pXVYd3g~aNMe`-QD@47H1uuRBlx_(IL#ZRRpQp4NxGGPs|{kpQ^h2(MBT(?dn}h z4_9;GX@C>359fpcsrOi7pi|z2Ql%z(e(!39BZ(u=INBzfJeY6(@-AxvG5{XWFyQN- zN1T8pi;&vHPiFvNGj}L0l|8sbV|EKCXOclLzI;Msjla^r?&xJj=2Q79Ln{ih+27j<%G%qPX)7pDHL3WT%Mh4-*rbM)NUe zcptv7>;i0?%m>6tJi{V+=O)6f6AB`gp^V;uVtk++kt+D;8Gujv+8r9BVg$R?MHg*d*Coc)h z?EJEjmc9df>MT(z|Gq1=sq(a(H_lUNsJ)GIrPiHidN7sS*`aXtfV+YFHNvN0IGS;f zRNJKUn=eKs`TSSPRIVcl>n5h+?*rX1_^c zo2}6@k=bgW`{y$gCet2ErItqz+Jh|`LSy>nS&wc0s~vO;v`q&{GopqYN9 z^lC`TWbKfjbd;kMS!^3*>IvNuo6UmN_VrLj6c}>{LNG*!^b3J{9hW@K+Ya;EPza+A zuzY&Tm*?xtt(Qtq``FY>er9CmDCkKLq@LQh>D?fOt$%%g!2CUhJ&qXW!$#Meb0>bwQ z_o2JxUQ|foSu?b43xVNT0cuTzla5O1bDod(Zy$aTO#%C!CVn6dO^@&FvCU|J$2RuA zeE?%4|2x4&^e(c$hWw=%vv1Rvt*NT%V0Raj*JeFuF+7a**pvflY&7i6FhBk0?tud% z#sL5GcU8!++GR_sNfJ^_EX$6V=6Mz~{7nWk2V{fZu1MO?S)Z;Hpt9qLmfPL8L!<_q@rhexd&*>0TRR-?OU!c|KvOiq)_4 zwm$-Oa%kb%`3pcn@A{ZD^!d)jKb4_puDD0}V}Md%v2@SuBU|MV-R~8eTA8J~<5I8=?QFuXe2ghTB0pe{qZ-t#W{2>M|l*TqLy)SdGMXYxg4iH7t(@ zm#ny1wlJ`A*1J4~R$OXSJnOo*mRRI7ow7ABR!zzGznIp)k|6G?sn| zVk6!xouApE*KKr#amTW)} z7==I*;}rwoOKhEJ@+sj1G)F??kxzdrnSrrttzh1lyVMKTRc>a2IIh8` zYP}~Qh|vk8BGlW|cRaae28v2&I6554#Sme6a&@i+aR6*7=Jm7U(@;x?x5O&LkUDr+0r zHVG{r0La@i4t4-sLB-K$vfh?(p*%ET)+pZJE^Wx+Iilg#`3B!qXHI$Z-h^h!!^l5R z(RH9!vhp9Qmf}`#Rd5}3)`9$*bvij5!zDa6;!_Z9b=pZ;1YE`F5pt=qp|xr0QiXvT()?xC&jWFcI1+dO$AI77(HW(%2)Z+ z2iQ6@Dvv-Y$b1ub@Lhm?mc1L;7y<$$#w%QsPK`UqlEJ)R^I&;}CxSk^-u|>(Y=fS+ z!;EqLzaG_i`;mTkGl^*DBhu8Sj{`j$B<K5nixXIN%aNwS-GnIZ9KkU#4XAq@|j|5@VFsPO*V#fQOGQk4A zfPW0tP}SbiBKfOr(qAGbtaC&aHPIuyZh@deqA8+aH^9f6?3@18^DvNHiq&ma2c}Va zQyxQm`LRJ87yjVEHeTwRYuRgKfK?D|VT_mq+HVP-4jIBqX9%hy`MSXVAv^Bx74ZCbt)pCph$R@GbP z!>(~vfc}cLdu|UKB@3FNX`|ceErXI=2Vt=KSpJs^5Y(ymC@PZY5SahE|E})8v$ri! z0Ze}`VfT=V6O`NUOt38k3Q~cZ)53gGyn{aO(7W$a5h9v(P*+L<3MU#N#lNug=Hsim z@O7YPADDM2Whj4UzBSAI!)$T@cd5Ezt4NHm{qC~L$$;>;T84DOMeJ)$CFURRg^4Bp z8@B;Gr?Xl+ChM^A^B^m|v*21D+h8j+Qwp-@@t07vU$>B zWBbVTR-S^aBu2a+AZq2IP*{}rEKv0 zUG(EEyB;;yB~SF^&Ep>Y2amgZ*Ge~qHsSgb{A~t(39gl?DvI?0?QblHJUoDG%jwr*=v8W9R{#zpy$XuQG{}}e&)S5I$jdZ z)`1rIqY}faGodB!1hA+6lW|aQkwvr}moLfHcGD;gT%nm<)5zz}g2t%wbFAr5KSXAQ z%|8>o{dA}CE3czW{Qr!O!Kfay5CM5CY;``ZPbokq?NYA^7T0t_ZmKYO<1Jz?%GqZn z-Ed(l3g=5#fkQ2fF(>Q-7izI9`rXuF4*`e2p1bORGb{(77n~5tB(18SMCN5I9Jy)5QBoToRtX<($?E>=at3a2AjU@C6L)D( zm|mmaB3$@{9Iepaxcy{-L^)K(ulZt$e$o=$*dS??BjklM_67}B8tg>FbC)aXD3eOQ zmUKC}VuohsA(j3ZS0)0yVA~(rVEKko&GLru0dK`J5n+}uecj+&t0(Nc z*3lYdRHeCnoutUUof)~sqrHVJeu4J;=)EX&!>dGL5WS}!{qpeCq#`D#c^M}P4&cznQwn1W=S(X&<&*J*ttGjKk&ZF(L zB19bfKlV7NDsYU_yI-%~@i<(~2xyV9MfH*3Vt_>0^5-T_zU(VfMG!nbwiamtz8rhWBX`(jlo2wk79$G6_Mw~-pV!@^8{_w+aHqfmPR$ZV`RiQ@#NU9Fd?@mayN3K|N!z%>Rw{TG{or)KG#2-{diu8&Bo*9?-9j;Civ8?dn2ou&?!3^*V zQ<$cFnCGRMt6ZU20CAv`4&I+$1(Eo$UM@){UtZ?6|Mu2b%JWPNsn?51hP{k%3r@Sc zrl4w4B{npIKvt$X@jtJ{b)wU(0gtn{^Ihqvl`%X-VjcfirdIaMWu#LDM0IZCRDA4i zpzJdPyoAsMg~3ZxM`@f_0|KrY9VN95^4229h|WNc!Wjcnp}8AyS(Vp7U{-Mhm`?$O zNOWPzKF0GH>gCnVwt-EAq9vM{gGP_~4?y#|F2&9I%(v)JbQ>@+5>l|tFxG(pJlPkX zdp>D)ojW8hN&(Elgyrwf*6u2-SVw}puupK9QG)~g7ApB%){f8{&A?P{Y)@q&gQ_eR zdswMC_=87S#3K#&)Xr_l@#Aw7i0C1KSa31@PiNvctbqB8@(dmq{inn%rlkPPL#MII zVlTSjmt&~_uQob}&)A59>`26wQp6j`8_7wEabOS1 z!k^W&rBZ{NkIvH|r$%}mE!{o(JH;Zxy}qXxoEv_Uq0CrZerwt2&rgs1&mrx=*W>UN z0h`t@Xwg!miYYCpsE<5lwe6z~mF;(_Wr%vVfFEXl0G^W<7kqIsz9`Z%tIQ(1)kU}X zD6D&)RJ?qtgfWZnVUv@KFK^_Oi;gAw)2kw+BFl#KVtP_g6-C2E9)Dwzov{jL0X(o*nVt06`Pj_GiWr{jkefi_+c#b4AE$9om#);bH6 zZZxdj&TlRivH*H1dLd}aKCDv6Jym>2MK~_|h{;k9ER%_$VcyfZ;*_7QPJ&_zfd?*E z^N)S_`SdsiQO0*8@LB1|@x7auBv)9N!fPM2{Nz=gO?A0Y)8Tzcms^qq5>C3Vy!-7R z3A4ra?dbQ8v|IsSnV$nk5z6S)Bn4CqUV+n>X3wz`l&~ z$mk|5BDzcbha03Et*MD6`aK>CWyfE+8DsUu(t&E8*gYO?nf1gF`y#&JFS7Nx-n*h(Td3X>P+RmZw$u^3UMf z2CHDmU!}lriRxxtJ+0f?5Q~U65(Ldce;NT9K1AAj&2UsxPrniNL)$1G-evBytTl+% zmj_|@HC3=moBbv$GLaE*kDGZSPc{F-Kda6QT-f+F5@J3{rJI>6K;I6TAkiea64Y5b zl@*W|?Gt=B=obh|56;57M@!buAUk790Ijrb-B^U9!)e)%$z-)gh-|;NWXHx5c0Nbr zI34tzv6~;cc!+P;sn^L(D{Eguow7;bECh>39oywvvri?ghZV<-sxG~SXg7(rqh<0m zS{56UMu0szP?K=h87hrFZ7sL>1F4FZNPWoxoK$}7JAC0-4^Y1fsE=s9+%n~rxm;b$ z)IL@TW}Rr=R|b>>d}h`8v}u9EiOR5CGJ&LKEC@&&5o14~W#7(r6Vl{lnmpJkcXU~A z(lfGNnZ{xkWT&V$ui|=zhU8ZqiYYV-Z1K4Qd}wW=QErm1RvYL*ej#D|bm3zZ!GL9D zXD35chjyQc0+KyNUeZo7IgW&S5dv?d5~U2?DmhAZN8h@q3|o<6BwOo;+C}yatV(}$ zgjl)asy*HynW#Ei6n`xcTa>sUHZV9*cmzoj~>`^gd9;eW34uXQrh*0Q2_w1{z$ z>vOH_uEi>>c-r~_>q~PAt1~9FsE^}Dxmzk#He4=yMZF9=ucnFm&X_lh~n-2rEtadNH>kXimt9%pBFZtnzc zqVJkZ{%FuMH(g31eH=N(?M>^}SX_{hS2!{ASMMFHy14eB$d>(}(GJ<7gaxy!YIj!` zu|cn}l273yqIm(xX?6~E;9?x4VI7*b6sgoQPIss(A*Glhh@BONm@zephD4-;Ps%V?(xXNJ-j}! zgsed|G~#_h^9G2aQXJ5#C86;^Fp!rD+_P7FBBqehc>;zzC@4e*V77ju_21Z({WR(!8$DaapoN`)}@SgpYt58cqFg;DQ8&&MX& z;hwaPQj=2+R`XzEwdsAq7zng8+Jmm_&FMjjyYLfMjcAvghz>Twjo=+liI>YUMkMF} zO{3)oEsgpQPPpBxLM2*4%Q6x&N1yO24Uo{+{R*L(a=?a&V;w|FN5HL8Q{=`a>XDR; z1DJbbh#v*t@XFWBPnp_>{JaS0pbCc*?jmt`r^%u#dm?~r5Z^9Ku^sH zzH0&z<_icaVOog#^AXx@b!L96x~G0cCZSC?GYu6TVRWe~6kX9*wwxP0mMAi(@mhg0 z5APGfZm!RLD7)YSrM^x^6@QRP9-hHcuyjXP0){}Nr5>@SanKWya)j*cTCi-BMO`^C zOBj0FH?e2ji&F{$N>+&$*U~NMxb%fA|NnO@3xA^jA(R@mWI7{yu?)RuGdtTT@ha{a z4jjv>RyE@?+V7nWpAZ-F3KBKLE-9yWF_MDwEWrbdoniF?K8F+%QLwCk^RwCk!~fu4 zc3Ww;;2W>|PF<2Ns|vFS8CuCGyIKJ3wVR8{Psei~x~H9BpQh;8Y$vW5`5JB@A$=6; zhyb^b=J=56e}Jv|vIwOjCHM`*>qTwBWx>dnosBanCt3eVHX0D^Te)EwD?)w$J#VB! zVXABwlM)&=l*O~Nv%L{PV^eNV*tinD#FEutA0nto2U(7fXI@oc(R7K_M$Vligy)W< z`&$MQBwdiH#V$Z~cqkXAxylA5L^}pskW-CHuzb&OJymksnX@u8yF??D$+%^oJ!h{f zS?5e~UCh+nSTIrJ@@e|b>_^DMyZ!!!BzC6y$OLE=A6EiW$ZKTJz;Zaj81Xfa3qqP# zV^uVMytWIuk3BxyzE8CsVVpLs%6dW?G21E5e~AbO`n`P^XIM1Bi|b@xcyGgl20O2D zO1KZf-X%AvkG)z2YO;p!F#GC{k0OoqYDu z!DI!5C&rh9t441K*SzN@?7#?4BTV`p$6e9%VW5Z{Zq2CpT-@lwR);b6r&y{>$;Ie% zR0Lx0zy$I)#!sW^NX4h_g9~v6=GsB;;)mV*wAcgK)v3LB#O@EUXL2qnY8#JV;cU33 zB*@qhO15Bq@HzTk{nmSWbJ;>wcguU#CSh^uD=;P?{dQa4zc|lchh#XKgOHF zH5A*;`Iaka&7_mbfYXN4`jdL&m2C~`V^tnEpeYB&hqYxkT8yUV3rGh6EBsuLq)#w6 zW9HS6a=1!iOBGthlogWz(9;u@vtwhI@$9EoU z&!%)03bw$D6+EDUCB)xO%t^D=+!{LVLnhK{;M-;57m#+&nlw~v0I1F`o>;I0rxY%C7CLZO=} zmKhK59+tXwO}5ZPXI)7wwMcsZy?ok?m(<5msUg#$Vk5&|qj6irgYL{80QwX5giIT- zIGF=e@`jg6fNZSzf~Ti)U(P9T0CYYXW!UBe^?0-L;5I+}Q_P>Cu7c-Ihthlqso>Ir zD(M{56wpwN;5hTd#T~)CLxO)d@gPsn#e6^GT!%M=zIj4uVtpuwJ*htcfat{EcHq84 zUo;jVe7!t#$z4|4U%KIU{4ib}mgpu6KX1faU&{Jyg3k82w~#Cm_U!)_f{-_@^PO|y zKcy8YqK11|n5^hcD$vmKqx zQh&TD{4TiGGT1idQR9qcNZ8xM0q`$am9X)jN9bY??2<%Bai0F3-#Yo~*qz1+@dAvS z=4Jg~aFVR4lV#ez07}24$Fo3ZXemSSN*ycR93uzs1I*JF=xWyqobaV(kSGiw745%OVv87@x_;Dp+JrRV*;hMU`Rpr$;yTw3w`Ic;d~rQq5S1YWRK-B%X}Pf zOP`i*so{;{blC7^3Z^0kwIo%>tuaAS09H#gr!yFBQruFtm*%&qm!3ukpvi`zS!;rf zI2}l#ti#oM2=tK^lkrCZ_=zbg&PrXVj&+rhtDt`4K{l6CrmP-Belm2<2w*l^Zvydl z*Ei(=r6ax=bt#d1hvZonrY&`e<1I^OtsekSSy2nrRZ@cD!J_;D9yzoyL}`kwrRs6I z_`v^MCuSoSd}SDMD|{FvY)b=M($8+!6_5gR7r%qcl!ZRLy_--mCs;}*?$ zag%XQ_3S#aK~o)EHf2td(h7CTzt$n4%h%noJtRa?_2D-z8Rd@u@kayk9GC5lCR`zR z1o0nBX);0yv1(7KOSy+E-7EtD){d*e!jh)4*JNCBE4)lzMlV0zvH0RTg$32#%kAca zPJ4K?6Yg;L0_7RDYAX!ktHrj1gj&>+n=?v`TGMFgh;mPq<$%sC6rVD(*)QPwver-T zxDgVX)pdkw1?riqsBfWMXXy4Rgzwu+m|TwLXoNxI;3`yiI=oSC;F(HozU(^b^`maT z%RY>t%1+4}B>8`S+6h;akZ7-2iRvdUm#ng)#7k5^f^dFhPjm&VZTgSjtR<}%CP&_( zKWhHPaA8n#12Fci(wcB`2;!0SQ!!f zfQJ#YO2V=t0=w61e9FBMYvm^$pQGK$3yw+kDjzL(=oJ&f9nGr?$7-G(Ig0b1D*T5; z1!@hW8pHw{&ri+E9e?t`_d6OdYmAkEdb@V+RGB3oO#hZI?qFSr13HUGJy!qf)snu6 z-31RT8us~V>MkLM2zGHp876AMQ2j( z>ru}*p{@gv-GsavtVnD%HAaV7K7B2alV|j{95Ie9Er+3L@^mK-f(AX_fMP$og~HR} z;dJjI?l4mDoL+pFY*(17JIn^PaOtzVTx4y8NT%N(HRdF<(j$yLP;)h^O#M5m_pO*XCBKhAgL#g!z*uWG< zFg}I?GbSfaHpo8|+OH#V9y_2=jFx{G{l3Iqv~Ky2F=(Pc<>Jg6rHtCcqVo7Fp9~xD z^&|TQlxqGbIrzI&1IGbnEM-@mFs%tt($ND=Ujf@bJkFhLI>1OEF1#lwjNJGo5&b(h zwcS9n)U3ZE1I)Y;X(?~G2Qz5`o@v8(VA%As#W<|)rR?X+cxkrb^gu0W-X}HV98^43 zRrCN**^AmOSZ;j`UNf`}SpfM+S zT@}XJS}%;KbUPD1sx;)&-q(iw^=yRuuART2vFUC*ac3W9$;VX51#?|L|Ol{FCh5g>7l-zC+E@2 z7UMqmI1d-G!n_R^+5-DNO>}}bGO2!G0GgOMmcmK3_;0i2N7C89pyob`!6RKP0mtUH zkHXQ>3_3OGPe*;Pq7M3^kG|E4!iNC;Ub1;j%2&e0x*j9QEy>0nM!noX#KzIb`g^mu z9}DB?69X?zB8uOnxb{@u&*p&=m^b=G^7+s8w)H`q*GTtQ(n|Pc-To-(y6Z#vK#8M* zUwQGibAdV}q1s6I8Xgo%cNrXPRB4#n?Jl zruSESJ*uF>&#%v{E#*h|UYl;+sX^oGfX890EQ4w(((p|G3|iOOQOE%m=nM0H5Fbh= z;}|4!FXNU1g+*Qz|8j<**lBE1tm|4*1VS`4=WC_iT|r9ZJ~rh`-i7`wV4tr^er5{i zT)GC`cNC);Cr<&NkOtoHvMTp#CYuvFD-a*3x=yhV>>%nOVI>CoT^V9U0c9-|o(KbW z=K+1f)ys>G!@+Ep;f&J3CAVLtEINLKM$jlKD!kM$5dknPaZ*)$!v)-neGc;GlUe+Z zfD{SXmF?rEDsT44#d`R;d{a4hTgXX)lc|`^x1v8xw6^$G$Q}pzd9hF9wwgORIw+T> z0-)h{ba}F5|Do^R#ZL6Cx86!fe#Nh~ti4lhR0VC{OZS`f6_Unt8T~)eusYfc3E;ox zj_4hVVkL^q^ao(dLtD;oLK@4=@1GB74FUsBT;xHE8xddjQ$OUSXPloy?#!?4z@bSM z_Bc|RsAboG_u?ES(ke-!kSPpQWXn-m#{QGHox3XqT5-pO_MKc*`~@(a`VPDtLq|mX zxhyc|g8iAycb-z(0Qoc{+8FOzNd#OR3i`WOO%yFbLu-pm>u&WT9^zl+uZ%oJvysWH z7e9Mb5nf`e1J9d0lFIPEf$az2w3oXtmI&(kTf5BbUX!dTfg)!qHgb@rO@7kPd!0@>vf}(=WGTO#w(UH$a3^vD zz#uGzoFr*8rhm?mZPJQ?nqb$@{{n&J;e}*SV|Uv0dpT81eu*GS>>BhYY-FriZp+y+ zWbnf!-ust22MEiLzsZ7Z4uX~$0PG2w29Vo5IFHMk{Y%_TTSG~uqn&$|aYb$b#diZ5 zX#BYwJ%$ly1RcX61UqAoGc*bZ-p%+u=ktL+HGTkvYBe%z>tso@-14F0YFWv+LWj0E z2R*FG*+i3ANc*1`e}_w`c{-peRC;5x;g$)Ov}(7jzh9TC^Dg=1vsq=a<|_02xbgTvuPJ%HIl2c9yc zH;1ITi-bCZu0l8e0`>3i=&KIGVbpg;>JqL_pGL1zXMM+A+MWGa^|38O4y{+9_*@^|g2T>Ov^xnIg4QWuztj|6pDoVAj)O)L zh8_!!pk^+&MoV4t4U7N+&m~L+c+avHML2prR+Do4c6G%-C4T!MBqK5M0{{1o=&eFT zK~jIe@@DJp5+RcNq$L6Y*K3CxW^#RU@7%K#u#^kFFGQazZ*kxgb5xv^jraD1Iw`8D3p zDeC4QeKnEa-mttiOSkw_5$S7+Z*XWToR3}4DqpjX!`XKkkn9C*Vr*p-?zJ-(&qOTN z3t@vW)v)HGn>Ol_H6tD`>q>seRH){wwhV_a$Y!MX-3aTvn>yQRp97Zm?rX8l=#o$x zsWOjGC<%@<2ZBr%4#Cq`*G68D2vQbvb*{6T2n_Soy8yd6=Mm9ro3i1=uJFuP`)$`? zw&vhNupBXSX;)&F57{E7TH|V@McC{~XcqmJ!U!KPnRxDRiv?VR>lynSk+|>D#5obR z%=ldW0_ObrZ3XJ5{l)xUf)#e9{srW+t0pnn(pDp8W=wx{r8oO3Tn?OvEEtZye|<1o zCx0iv$chqNf|c|kOS-WG$njE}983}APcf9kwa?|8i$PQt4R^I*7C86z^zS!9Q7Od| zq_&%1BDx@m_fII$)w=*25BaWH=z$OnfKO6ab)uuybowamUBi z>At+^T=6!*3h$@`vE|;76dwbCStBj1G&)oo1I&1*F~)LrED=FOlqQXRJJh~Zb|aLbhrEhY`l;U08sJx*iN6)2?GNad)FiB^Y8Jr6L*#rV0cwLd;@I6vgkF$V4le-M<8XhtutN8OO~9F z`zUeCg@Zo9_&v7Qs}^NTK;k(eREn*7EeYlKm`6R$Tz%BpmUwF5+trd}s%TQ|Y?g@G z7v1&vxt(OUI+?HoW=m~nvR8Er%|czoc>#7fKSkuP_8$iyImgDCdw3M6oOZ@$0f)=& zmtcM(xz;nBJK_j8Vte;QN(Y21L8$kS$_;*wcC%layZd5^p=dLn^H1~k8IKC z7yN1Du62T}zV*P5VUR-`qh8H*t&9CZAt#|k5*~W$6h&Uz- zD|^_6R_ak1;J5Ct;aW846HIr*Kb|3=*tCfXmK-f?srxR2Ss+4HQIE4ZnxgmieR(5B z_E&O@9_Qe^YwAv9?!)=HmHlGK{q9x{P?tuN5p^=e#xc}#%mZ7%Wh_tejrzUg`I`Rl zdD7)O0B}`B;hL zypyLHLpWQx;Za#5k)L;dAopPqmRcb`NRB5O!yD=@&R6mnRtJmW(IqStyo7B%)JDl0 zblGtZR6LaLSb-N8nsD^i+m2`Ek^orAPvz}Vkd!|)u3n_2=(6@wxWZ-I7#i|5XPpQi z#Z8eM>(Uxz|FQ_25{jonEz?YQFJoUWm9m1LpY*P6r}?3u{mIcP`85l&|EQbq9Pw-S zQz|)0WFW&ATW+~p&V&g7W`j%2tN+Jqy>KA7@gFaS6ahm1hmdW>2;*gaWtE5pW>R)g zZE88#aZ#+$0ARv-a$I^%^a4*9`BS?G9#tv7DFnu+&REB;q84;$*?)=!3W1P$p4HHu zp1)3PXXnX`Q~p_`!Zp~p_LuXO)wz|i>U+#vnt*xF1E*OZCakYlS6cf;qI5%sR&m|E z0+Mv6Rw3Yoqsp+YA5NRGu|GrNe+?uw!B# z!d~kvTbgUmoM=;m>)==AbzL+sWE7{%>6-XaGIvNX$HJxNi7(K((p1Wc6+Q`EA(3$||T(B?93coqm?;6^E z1^1uzuGV0vmlT78rDhB-EfKnea_|vQ2q*RGS&=sufRnAhij)UohGfni@xPITQczO( zJX`9^DCfE*Ibe;+pKSa!xKib+24s$gAZHxWVmm*eBs5@sh?veIvs6s!`j95$E1BWY zTOA}w%y_{mpF}hoBeZi`8LA~b-#^(wC-Cp0h&1hZbt19QMtgeV(j2UfdYQHY3~lZ} zgr4H=Al|o{D5$QbU2EhCKNEMD3Ygc@Dh3k(4js(!ozdEUo;g+}63ooTkL_iKX~wIb zTol$HEt`k}MYch(KXaF2NI=IEOoK=8p$M%ZsBvY|M0vIU3nx+bCQOLQy3cC-3T#+7 z+;_2Vqr~t&tCMwV@#=9Vq~OIFTEHlD{}i?SpOvO63+wW6D-R4a72Dd7I%oDC)P?+R zo}>+^5E`Zg%A`u4CaBuQD&52M3!8{&=YoaK!W70Y7LmkbI2x_mtf~)sM#%Zr_~gFw zGid@m-VFHXw`ex=s``?$$!P^Z(cU{%&aR~$1aw2Yc$AW+c+qhe+iV;I64>YVOAb!~ zMfLq;+Yvg$_exhn(G>ngC2T_~Oe0-3ylI3L1Z-7Y^B*e>yvJsTijw*g?S5DM4oK~KZP8eL|6T-mI(Dayl)ZdJ6M+$J65Z!*)X zDn;!2BOV?8>*bHLrb8XvlS&D1tU;x!sE{)_^P;M}f|8f;YTG18OOOSP?hw*JQ>|E6OUG+!(BTn(7|rd zVXCXt^blm8U+I*rlbrM!m{I>egLa{liRTW*a>ep&A-lFyq~OB;IMC53 zIxeFDC>H}KJY)Qjbq!oB1z3Nat zGYmKbmPtI)6`&>i4i*+g_5`7wV&CjBxc#f3b0r#;svOYIs*}qovf6?QWfgJlIA^t6 zs5+2a-s4pffzp^9)b3!!nSr-JO_Veb&$#x0*ylO;-^f;++Bg}}aJ&9CNXYnU7G#&G z6Cd+mxgJeY6eXEL zp-f*wIYPXMm(M%&Y93y5W_8ttQgKGvTWN}+y{IM;Nkf=2cxf<#ndH|3 ze)J#wtcJzeQ*j3{S(a|+x0LsRK}h*n;ViMIzw5*{o90mA7!n@fYSV3rV_Tg(xbJ`i8Rf4{3RqgT6=c67|0W@^@N<1mn7ymuCUy z5;oMoMSh|n!g>h9%E@h|%qB|z8SK$b3j(USTv+aE8>Ic>g9f$DV5jCBcLOqYLFnsJ z4PRGW+(}`;yx-gOZn>!zD3{zk$Z|fe=Wv?Fk-uz8+DG*{-m-0TXpZtjEF0yXZ`@M} zU%M#sM>fpt7HD)ze9$jPFL*flM^F7N-)2OTQ9c7tC9oMX^crcv*Ig!XC zC;2Swm>>9xf%F z(GP}@N7(OaKDmsW9AVfXX4Pzb(zBi2e>es^c|p)s)o*?NVZSs=g?&tascPrOKHo?d z({1VaD^O32a{!dAFXH~LHM9>#*8`Sth~1#AF6hy?``y-_>Uuh)cG)>e3*!=|p%`e)Y}mQ>u&H zO^t%AMkl#HhOx$v^A631w~AA$i}5pmf|0=PkQHijE7Lx|gS5mkc6jL)0=)1Jfg`%K1PP4oc;;lhLg zin~LPJlDHR=^}6;xEjA#)=+O)m(5s%cft-~sA6up3l)}L66YUlf^zYZ(lz?(|9#fWAxhlB$lp}^Z2AsOm z)3x|5tq*T~DL;08=y~3n;iAtGAGGy!{#|WMvb_ij@uf$hrK_oAzA=l#d+lZ^nW8@| z9r{q)N!3!y9;@R1?ELjEuQ+b(sc)JM#H@xX{<4_*v`*Q07S&jqwt+7~)ceUHop@Re zCpDOF_cGWvr21%{BlXa_T;C@_H2s_e>@$&o87X0S7Thlp2`d-$t>=L2P^N6TxK_6F zBk>dq)K~D0Z9GEXQ6nK9T%GaUNTP^LFjyQ@rFs$6_G5`4a2ZVcD~oj4JL%>;&~?(7HNhb zRK^ve>h${&Bpg5sv9STh02j2g1PJN#YCv%?1rcU3s*mk<*Q|~$0qQK*CZHHE$1G1{ z4qG$0{$QsyG6}KML4$i{O98c|%G5j}`$8dhVp!ggmYw5-eq3%yWzH}4v+_h1b%mv$ zhI)QpkAM>w=vbi-ISulh~rhy9Ro(56|Kv7M7`*Fl+^XV!BnBMw8a=*z{6 zR~6}0R1y=O6ZnVf0XirRyG3(JR9zgvGy0*Ji9!~Td&fJASvWF!LH84>S|!Uj{OD0P zta<3*KXHw~POcB&xNR87tx*o7h>Pa`e8aY%9M4*;Q(vls1N#GRz2|IFYvv|jHdXS= z*e?vP!k7i7z&_ zZIAe}(-QVdo^Z)&Zd=tgQwDf{At8Y|k2u+N4T_|5V3_0Z)AfPPsHfssgJg773+Izv z7xbEq&FmlmY$cDPe37uUUuKAL0g8@;c5i^5wtMw*|my40s9JP>nY%l?Ync>_$PV*4YYX`9C7ek58&1;gVMREFvVa zSSh=If8;6}o!7us=!CT#s=YY(VW&5pv$$wEk`d|&BR+>72be|NJEp)EcZ(%++wdE~ zA9D1vK9;mW0xXC~b#Z!vufp%y57a*Eloqw0)sU}=Io9@y6fVx?W#&-(;x;*dTLr?k zC#s_L(Wne$0ivGO=|&e`zq%Q)PZ!vPUx@1nW%n8vD8ZNYG>#!2HAo#9BT&0fKlP3f zpnnIrenEzV#$Ju<@8zwWzM3Qx-YFd9zP7{B@(IT)Y}ssD6e}8m>8_aaK;oxaw)cN2 zXaBDgPjqy4i>=QWd^+qkV#93!`bg*wn1i`UOXVJ!vAmtmwOK#MV3lY&Nw@dg*?h(@ zx{k;lF+$jmP>Rk3WSWM^1EU@K1!M9|O$(CMQ|=|K40CQT$Ia+71NrXI2fGvDEbdb) z;iBAG>ZJxhao`JMy5ANZku}n_GUx+rJF^kv`uW6zgg?7;ezMh#b*lE)$=*=CT?XU4 z!U$~{$nJ+*UCjP|_<&bn|3DKg8Hho~7nu$&*S#uxn2XMwFEJ8knv$feM2h>2nkNYn zmC%K5(%qTrkuKx7ATd!BT$ijY$Qb-nKy^EzAjY;U|0uj>@c<^=P(;|+j)*%vIjY36 zNsVN)G$MwLsMx??QkpuAM6CC@IO;k6Q65(V@H-L?Ky$5e%LT?;_bW!h)fcN91uFKR za+~a1U1sPV5x0_NQ1vbn2)V>1Q*)by8!dh>062ac4gC~a9u-yIAk+AJHcV>l={NFy zQ_w}jbMz+YV`~LX*7L4HINrehXAf(27lKogR4v376!?!a>=JUod^ zpQUT%(C)zGdiPr71pN4u-T8M2BWR;*@_uwM1L7BR#7?sGOkl!TjlmRejRmJ%8k#Mh zyA1-3?u*iFa?lHM*K(+}os)0Cr6OyVWWtjmQxCRCR}e#i;i$>shcsJ{HP2#7m%Z>g zbQ2-EW7mRpgbB%Q+0fm?`8@h=W$1j~X2Hq7mf~8a(jN+G)lxm~kG+2^-?54)jMkk^ z-PEFpf|F?A9oct4D9#pGh{?+aK|U}a^p|cZL{~!koMuaaDbR^jTwHkB)edL~XEoLq zVa8|4RLbx!Hfloz&TLOw)7c;^EpiwjU}#ehTh>A20RC*s_fYIoe7grqHJRNl znrWbUH(`nwLtg>=8IXQYw_pjb3b4W{{ zp7^|~8UvNzFgjd-@>)nd@{@9Cd6Bdt)CicfcQD!>%e6pQiEK~4!w;Y(@F&U;?i&8x zHUYST7R)0L$!*RI843#l3~kqzN2~#e6f4MK-i5XyfOqQ7cEIqxb1|&H<=k?{NO7-= zqH`^?NH!H&i)qfm9k#({`upk`8)t%(tKJPP_aO2vyZKiQhy?5^G1%au5x+H@IxN0Odu{_TS9T{AedWkLZsMT7?6zBR*!;f@8qlwzQNLs_s^Rkrcb;5fYwOSshp$K13oZ^Y4gkFtK;D9ZOvNObjBqVxo z%?0^+Sp~2+ASte4X11C`4o*6=@H_AFgghOGyEeH2a8XSs`P-)>I*wrzR<4ghqv_-* z#YyrH%O(xrejzRga9(3_2>C5YfHRVB+)CQ0P`H9>e2=eGtMxx|J^t5@ZxIkZg#<_0 z1Pb)GJ{B6U)od?(b@vJQ4}Y5p?kBYVwYDW|(Vn*K82gi_tuYIr9xu^OkyF#;0CcsZ zg%y|ii_RMSF^T&1dPuc6Fr%=s*6g*8A}1LE;ZTers%%&N0aKKio{lTS+MsYOw2 zN|ohc&}OfQ@ER|0aZiP}J-6=b#iu_$cs8zMdb% z`H>eYn;Ihv-vTn*-l%k9D%~&77IvtGKBX3vxiR=8mhNtN$v^>`>T&EzN1$mm8sK;G zi;~bwzoQyHTa`j-qa)}eeV<$(179xA&vg1Uze@#Qx((f$v7s*i5IFj=&_kYu3qUM`Ua{)m33iK3>SZdsoYdr*mg|jw0K(+HIZT#f~VqUrvp97MjAG+RLr8^WRs#-Xic6L)tP8HYt zCEeQVVo!}@BnE~5$)@Z?iw0XZF4ER@50S%;f8LQ!N5rjfN?1^ZF#C);e-baz+buT& z5Q4;9X58V7yjvVMH$=z`YHuoN^OCOO{ZrP7!Z1TasR%NhXG}Yj*UIJK_QHP{3NnmM zv?P}T_85)cyGhm*2v@6<%#FOUrDKnP%WqU9=C7rwXcQoU8#=SNlTVRTB27;mD7di52`rNhqb+7)$kC^+C*^pCl!_E(tAmQ@CFm}%Wsm&O2Illbw*CAO^;)rpL9M6+l>xdSWGy$j2c)6Fz-xR|dil&RD)W9yrheX`v`n9I zb{N|eabqMWL%l}Qw8X|aV#JKt2f~+<-Kwwnf}lC*=QA*L{>?&1l_XIh{3*v|Idpy6 z%(YKigkd(k{P$glQVjS;7Ce>M%icq1uU}?S30O_EUl4W3J;3_7FbwXM5JS4PGB?ke z#4!ba%XN`nMv$LGZMXj~O&4e73>NGOI)F|&>y{pm-Epy8rmhMrEoU71={A~X%L=#} zl~(K#t^rbltbIS=^-LL9;GI8!w)v5F&D?GaVF~JM4Ll-QdWVdTr_XHxTW1NX5213)G=sql>Pb1Z z8OQH5^+OkID?EJ$rb43IrsE&i4lm4^fju_WgC@RaJUKg#!}I)}zB~w`h-|dv zlZii7N0NeL$P%TKAOS<*tNV$}8Z= zgqEFy9HHXu+-cVNB6sg>M__>1IJztF&V;U68WPlTT3m&g$oS2ZopCiLb;R@aVIHu^ zGK_>xtBNJ^%z0EshY`dZ0I%Vwcn^-{*5zeaOX(h$)!)}{#aS4A(-XR-qbC?A*_hQo zpkJR*`E!W%71VxDHZBxR@{RGsF-x)ALw6Hkq}WzIO{#*e^9xMpCH6Gg&nmN!3cJ@Q zQ<>(9pL$k@hi5_(%}2MCxXe$y``*c?@QQ4c&x*KGK)otQFi$R(d}NvYuXy?_V%29_ zw%E-U%cX6Zdr%FfAhyd9qD&s{vCcaYl*^p@1vmw5)Y=kj@po@2e@nComgLuGRx%X5 zb7Y6BvOO7=UtE?+6b7>cKql_+P>m7m@lBalPM7rx=LxiB=Wqz%ae|3NDp7zbpMwgb z)!mih`;zFN(ah`HiTZOqGII@gY4R3zp~cocZE>+oF8hjJH}J=#!WUdc(iotKBc#(V z&l(a_ECYZ^jh)?l2+x*&$-Z-u2ZRHqs#jP^^OKE@cqxitJ^hnt572_IemFdb5AjG1 zE-as{)2zyLnAOD8KLP&54g+}x)uvjeZKO-_6)t|oIy0J|ifW7^p2gZtLGYw+A{dkK zC{St$3h$rA+2SF;h6*KZ7*}fNyjwLgSXj@I)LY1GWLY4I(CNF%p-Ke|;Q1{$(T9Zm1Zt5#J%;MY-Xcx^xPvrb_?%f(q_nK!4sU>~Q9CtA(? zEd=YKU4FXNElvPAd~uVrtE|DyIcs~u20G1mv^#4g&6{FTta5C*sFQ%lq_yrq8rN;4 z*m$Uyen;JAZV%X{{N)lV08v1$zZ3o_o(r+Ux%}oMBJs_M-iUl3WC0^1Sfx$3*h?F; zqwpCP@Ko~l%<#Uw?qp`5R;ipAPtBv+3l+WQ1bZoU@wU(tffhm%wwV@~?gxk3Ak4*d&WIjs#QZl&2 zQe8D?@)qa(wE_gTeCPzBY+*KMFA3KG0PusDGL_TL`landI4yAFX+)sb>su=8RjchC zhkHh$o9AO5dPvZOg94H2pZP2ug-1ryM}N@_EJ~;clMDgxJy($CCY4NyU{`?BAgkBe z_4hvBt;qU&)qq|(=^+}E#AWBR9*%E~+G!zB*6o>~gxu=z_&~Wu_Ket9W(?|BsYD5{k97s9>P?A9K{Q(2O z>N59O47i#ElY+*GG1j}lVF+!Z-(6*7e-?Zdj2BB#zLUQJ9a^jMx6poyOAiXo{GT}&LeL|BY& zZP;SCwX7ZM-eP86UA?8inCxikT-BN@TMO-^hM^qfq;rti9|px#Gs0nOKEnqp4x-2D z$_goRjrF_~u5Fk-*<}1MT_D|7KHS|n@26HOX2-SbN2CwZt1S>t(cAf-AD(L`6sdUk zTNz4~d&dGyBU5Fu&yLDD75vb;@j=|ZP*+uzSTwvD=l zPrtLr9;9?Sl|!t77Aaw>q&dtJ+18|h;_%jWAS6I2p&@=dj0NAAQ$y8n?k1RuXq(9? z?VT|k7f#j}FFsFrAx9?#HjVg7MvKs~cPB-?mpV;5E-B|>Cv^smNM63>#*0v4u zXh|3iH>e~UNvMM+0%OpV2;LR@#vGNV&NcpJz8pQ5t*A@;DU!2=*l~w$pGUS+otF1b znOYn?$4&Vz_X~|PjZi3Ei8C6fv}s|6%2*P4HltIR8+B%p;Qn-+nR|IijKXRiSS4C} z?@x$O;LS-EV0iom{@!RP@%D5ReUB9ZsrHf}K$VJHC3Rtm%1l*n>>R@|xELEpK4X|o zwExR?_evOf=4jpz(ktv!voOU0BxQg!zP;N(NN>${cFV$1M&x^eBrgcSb&YLYRSas~ z4zPv;d{2Yes3@Wf;T4U~xDMG^wAq8YTyeMgOa>GwhH4n0U=E$;qtQ60qTu#8u|M^f#g&+l3(bJ@`m>~lA7Vie)sh9g`P`hL0qR5zAH7`kqVFh4(C zD3}UM;x>jaPG`u%urQa)P_~=%JaDWv_6AuxW^1dsE$f$-s-X+jKYnXcczwJ6&Y*4 zpzOK=Lj^a-rH4(i|5I+GBs%NPx1istJqb%Bie27|Vz$dls;!DVbjapk(HAIC-)i(Mi($PpvB6Cy zwI`qLw5E%Q{$z%?b2D*0y^nY*ue z`Xqs2sKl&4Zf2PIm2%vF6Ks{zJc+vdYkExSsIeuW-gIXE6@%IwU%li;hb;^38w-Hj00;8R=%kulN}vK2 zN*(-HwQ5Y@y5=}Ko>kzUw$!Wf%e?JE>q>j{idWiFThX>Jh*^DoG6JE~0ofg^uj$FF+B8(CRn#R8==Ys#JxV&4nEs(QzGshCq=NL&1 zJ+S}z=P(^9(+nuyaW6?!l+ahdO|fM~*(c^(^9k&_rLW|J$cjafcZ}6;dX~^(EnL4h zpRdPKSpx%?-F9y9BqFfn$+vAeQFkeyQ*Uto1wSpFLzO|q-A@P$nqeGAyvy2IwZ1=+ ze^8J+_d;^HAK8_Tu}Me#CmfaD>Gt$3Kn@M?1=karhsA9$Pc&WnaS;0t)}=iUPIY?7 z^vSW%|BUAJpOOkyqz69hwf-$@n5=7C&7FVz$|IAsXL_oh@HY-?|56dM4YQnNaDvue5@$rYFX%rA)EGQo;@c^)QDdn+! z2P+V|S&%broMzwJve=y&U7bJ1{_3m0Av>k7bID)7lF$pJ_)E1f2%!4+7#D_5HbADF zeI$*A9Jw2`icfAOL_3?_P0D2W60wp{im;QoZ!Y6zRTSnH5ccpWLOA^tf2(J9Ri23UT27F}h~YXohzp;W zvhpZEh!x3{8+92~P!)1F5U&ZtO5LQZf;oIs^z_S<8N=IT>9E&AF zbE$><3V9*wYCoHUlVh;zS&e}hVcMr+v+myfC5wjRU_i*F(<~E&Rtk@V`F3G7S|Hg8 zusjJio=TsL!(+)8nDl`8Y|82}Cuj+t%tT%>aMXvMwdvA_d5I*U9*u6$sCLu1erRFT>5lBU;~zfb7mow6XITnn->RFI{T)p zSj!qP!RNn>!TSCidhs#^jqL6msT{lEM+mo}@GecQ&+tV}8kg_leEA|)viTq*@m93$ z(N~%A8t1<`dFr*IXh#a~2!1l5vvc%uzLV{H0j&?07Rjs{@DVse$O^9nf~o_i^qF_@ zcBgZo&C&tV?{KDpeoS@|evlSslafbE5(uI?PRj-(-G^l&?kE&20Fun4Mrt$~DqciE{MW-YA zc@<2A$}9A!28fK|Ektn3IWa+!GE&=Y25GYAZ?pY^`Zp=E4p8=~8r~;4{egDQV|PJn zMh9UcRh`jFUEy_eU-* zD&*gWfXg9`E#ci>T?<)uC2KIss2Q-8t4YX(O6ZRSE(Tx}E!MLqYyT9DZ#TW0g1+Xcz>p+X$P?&GVhOv_C_eUwW9F6&L08d;_*Y#^*grS+uvo=3EU4h*Kc$lnG)Sc z`V;E(8kGtWt&`+%6+d*zkr2HEG&MAYpiUPW_S?JGy@cDO`t(8F?i!I zubZ2k&|-p*ZdZ#&#AuMU=9V@Z(kDy%M357Y4Q9nBAMhD*oveqLMVcPU#!`3(=@|p5 z#C_k!wKJ|UX3K{BDM2*(#~gJ zY!x0|x3#34;voNrqW6HtylZ1+eF4{WxG_5J?#b*%vBJ-jKF%Ss4ceClrp({Z`P=TY z$yr_HNbOY!nO_?rU};wi|9(h4x|6zFF0-=g4-e`IBDP=Z+gYVkAB{Jp=b$vLeJ7XR zhpQ33Iq;|-dCS)=2UL2%b+9vU{-D$x7<*J_nJ8AE)_{@972h<>ao5m-!!tbZL5L=l zSc4%|j~uP~5_^WXzoH66-I-S_N4_~7uvA~)wb4)A8V9%CuSzCvf`EYP+MIoMPaWR% zXJOWMXTI$|tgagX=L-s=%BO=C4ye8r%&x#4-*|v!Br)PeV%(r-a_JOvHNW*^Ii}w! zMetrN8M3l>o!Z{V#APfQ{aOM+)d5Q9GE;=?@lzNSsIjbc#>g&BpX^B9p-Cwid`zoH z#JYw67P3;kXx1B-#4*H>3lpmpMl26G^wa&mu6k=8=C^ZN1lJCgN55U19yQ9tR_9qdSD1Vr3kBeM!_W2?GAbDD#!#rruQGR_{E|=d@2`VdO|HWiRbp z?w|(=8c`}j4K_H{!Ks+&tP(0;ls8J`;Pg;Idcbnz9Qk+u7|-|T&Mj?9)sk&n&)l1p z=1}$G@tuI!lg@DkS{~Ah==X@LYL#UkFsv9O-ED_h78d-E$hETbk8q<|7+#4;JrmmVpW z?WBm)%od%bLEiUh&APDxL2ST2&gJH`V<5=>VY5se14Lx-{fn5lZ0Pc%`rqt<46>q` z!}afeMtk|C7_L|56DI}OW-bjtS|MA+rBZ?QEieCCRsR#1=iMBV#|o2kkW@95|B8i- z5}D2S;Cqrds0ul}nW{$`J;F=z75Q?V-myv<`o zV6ixqquRq<+om{v!9}vMQ@|_c6H$?|&bdgFv{G~@T``kxIE>s#mS&zlB&X`Nh z?`XusL9Y?tj&5za`i!^P)Zov|{|oRj56<#zLYkd`Y%KVyVPJn}l*{yL39&gbi1)Xf zNx<`;8Shuj&L;Q{TRBmJ$YS@o?!JNFD_bSGxKt_k~9s9>)=U52`&6XTKKmMnSrMGR)i#n~FP)Lv4Ewft7|mkUX{@ z71Q}H1Au+F5IRR!LKuVqBsebk@Pr#|2c8|V;yJk}A=D+kGe<%al~05R{!UJkvrqeJ zQRC^@x39lhrNI?aW5D9NFyJ6~9g8*&TBO@)UMO@_+K!=UoY4u#mJVjV{XzE%0Rov8 zFh?V$T%#A$Vh`EfzkVh+9;1-Vmd}&3fKV$X6_tJ6qHTjkW2O=)rTc9^{1K5 z0UZD2&RoZe7XC~Xe_Myaun?v7!%sBowfAV#HmgOSfHhD$^bt)4a4jRHkW3o;4=x6d zteLWo6OJvlg~-9oJrIA6Mr~zr-9bvI6I_lR##<&k#kMQ|Kct@s95H0s$0<3Bu;41i z?`G&bS5Xl0YGC+;rHGnP%R`mqn95~@9`0#aSZPi4yB$#Cmd<+_%~&TmFSU`Q)x}wH zuDr;;0zbp8WQV;=h8;I%qCr3fW71G4W(Gf*2;70cNVw4acLlnwa>vUmnrdW48~F_S z^f6cZr0g*tV7o4kP|`Cfu2ze6m8UJM=+4KgW-_*88rNwGe(NNrgG|BbM=BwtSJ8ex zu%j)wK@0&=*?pP%CiaEOvPXKiOp#g`I*-4{rEh4tMJK9pNckw%ULd_aC_Cb>mlV?w zl3e3|$9a-EeEQ|8GR`VDOLzO@tV&X*hSYiQhd}uv@gS-{#zC9*Hy`JKbbdyQE{=9I z71XQ(%Hr0W7yKu#5xA6)#m6Elw>(h3c?f2+c}x#PbXSEY@{sl8=Ik+kxK1%j4$uXI z89QPD;ra2vGqb1Tg6iWW9307nYDV*# zdY~5!+f5y*lcM5(Yw_BCGP<*6GNoDRZi=fB~%h9prwm*(3y*72OkS zySfxtr@5<|R*mZ6Mj&bCSOP1 z4QYn_hJ|zQ2nkpRMKdQVgWBNcv}L!i;_-|Ua$;%<4pB77NlG6r9J14ZN}kf3DC+1%*l9qMq}I zgI(-Spk!Z1pqi*>vy>yhK>(=eBZg;et}Ph6n^_mRsInpw`VS9V*kGE9|y#@*D@*h3bN6y2Q$Bn&!&0<*hVF{#?ndPIl!r1@sO+rUtN z1bs;cMM}tL?1jp&98cT_i_AZIFPLxe|YSF6@D4M>LXYXgoqLm+fi!+eE z21TKVqh~0tnC2R6_Y_g(v-ITBzlL-jd$&+lM@eD77fB|Ded#_E$|X+CbVo~xkUGv% z1%dl`xNE8-r$F7gTkMWx(nycq3ttxThFx&fp4+j?FQhA2ceOkqJ*UEg6iqnL5hM*+ zRW8d!*BdOme{rAj1Ox~uQK3TXl-cqLWv82%s@54&tO~_ay}tjQlbos6c1+4Bq-Jv* z+s!8^0H72-C@R=+o+0b8(AGqGb5#)OIZmeURHWrhRuOw&CKV zVnkhRqiD&R6rNq<;A!g-R`BaYYb@1{4^gqW(ZYi`H)^*$b z6p?szTU`OM|J5GhmXhsc^FC<MpwCgM&yFiQJE zL9JZ`*a8FReq3Y1b3?;>>A5EE-Ayk{lg|ul*$;DwXYfrxNv8Zv*{+r88U_m>ypJVT zSo~7%NRmqawBsprH+U;_LO#L$zA{j}(kk}CN+C3n9QUslCp1d8w3iIv1^f2!m*p5+ zA%Y|Khi81;JTDx1i}@7a9nCl6(SyIc{{e!zX~?nNJ(OoC*1mghYtJ29Swd@Od~d|T z<1r~pcyTXHegh|uR-k+|B>j1&B}dFLtpiqvNT=}B-Mz$}WqxV3>sIi*Lkyvfee9k? z@W2{yOjD=-AxnZqmz9vqg9!(m-Z18m;AH7nSuV@+x~r2eg$0vLlZ{m4uIVESPEP!RW3fB4u)d;kKrq0~#{Z?();&}V>qUyYCxgY|DJplcF ziw%K~kzr8_J(42fGx+an|uyx*fTVlW{;e= zYjeA01aOHIbPkHb;pJr;om&aPc!ScGiG~L6lRqSW;YmmGA3QaQPkH;`bRAGNr9#Lv zKh(PNY!z0@CEO9OG9(NT?s090BMiN^w6x6qf5;HX@l}8I0B@8R(wukecAp@6 zKySc&GVB=CkLxMvXuG;MpV}H1F$)xIPDcW{>L#vzi^&3`;W*!R=!#~+3F|V>QDSfy zXPDL7govRz-k);@INEEGiZ~dUWH2g?Y9^IL)la70zWXQxh16byqWo3D2yKpfok-j!rZb-ZgiEvc==_{L{e@PlcMva0bQN(=%18zouea zia_YCz=Y;Hd{`4CQ_q=3@%?5ThtTPK^!iYnH(o@s8>2~T9bpz7(N;`M8SOIlN>h{2 z9OCz_t@y&voSpKpd%k4@?pI6{;`qbrSi?g_!8ViZO_PfXLmLa(5%JWI=qKN!#E1K? zz8#D)d^7|-ZJ0*cxK*kP#Fk9ENh18WH5b6@^S;HGM@UU*Rkx>xV9$wa2OlwQlXCB3QTo7H7wFeEnhX*iI==fqxdBU^+;KD&szK0|8f+ z`j^+grlpqt_QUSvPq)biRw#sUMwTZQb$+!Hv8Z?|B>DpG*FlMA# zl;|sSfwi?61a0B+%0*TM^@V!78=H7#4BwpCU3ViTcynpMd(pG zxfHjm)>T5^N4$Hw)Pnd`sm`@Grw^Y+oko0S;IiRfgyb%uX8xNw)zFc(ya(TmkSFCx;GcQGZ7`YEHa%n9Il*Pum)I;w&|xpn-UYx8B&iI1)C*enPPtUkk5d7s<;@ z(VNINn<{I7sazwu&fc{1ibTp-H|8mZ_!3$AxB$`U9E^y)9(hsGL^`-@ja3O>p{}VV z`sA^QM7*l@7lKoE$#)E4{JenZJuLOVpk>#|S(0FUo-$KDT#{9k7=+j^^Vj@2nM*k` zqS)o{9QRh9fWGFE2i63V;S0OA*bx$z1mICpe$0%#wghq*@+jSwNj~P%n zo4=xE`I5)F(aiBJfiz&S2QvGRN2}rN?ATy!vC>d53glsf)>ZvS6Ebs6hzC;EpOx`P$pw&}>#~YDq(hUoS zv625<1B(jTy5xNNCKsNlhVsg?wjX6I%;>p}orY0F{D+f8pPNapuxfTH)&;Rf-JL68 z7FQM9nqi1qh}ej9id#t*XkH{7Qq$$b2<7{TJO$8sV)le$9Y+$tWOf52maq5fa0lLP z|CVyI-mIzW>$+TYJf%L0jLbkbT*D%=iDA+m?DWdpK0kiaxWU;O6hRoA$D645Sz)zc z*hR>6_sYbaSS}e(Vq*TKIiyuOIM!t%O}px5-nAFNs%2_XL5%3UJ(VYWLeL zZIY{)^G9pd+$OYUic}FI5akW(6h+zO%gv?PHO!RP{r)?TlX~am&}oy#9$$xEylr{@&VFgyZ0pb$d9W5V zY?Og=+@q}0xVf(II&k!?Xd)rCBM0lZ#hOe-j*;i;WFDi9HvB2>0bM7#N0Avbdb|hc~p_(&^yO0rjESi~TNR@;J%3A@K zqJ$L-$-iP+6_%%?y}+W=PjQi1pVlg?H1xm^xE?si16j&}GpZg(pojgFfT3=?#}-U) zr*}_N3!e*v~Dht%#RjnR2Es6o7}Bmq|*^t_yH;n|x)1Yj35H91$t=f=Y>z*3s|Z zLZij_ny3Yp=9}#XyY&4M(xLHb_OQ8i{0Z2n>-7DoHyxuOUWACI%ElQANi}t7wCWkc2!vdsyVR37)REnI)|Jr?_dw;?N4XAFIjH#@Cn><64c4Q+kO>Jpp6 zwy?P67V1Zl4uc(THQ4+eoj{4nbe_MV4g5#fzYf? zV=K)~8T~C7^YLT%e)E|BQLRIgO&uW6-$lbMIFn95H!W6R=SDSgYDWk+X!RGW>8f5E zIHg=88gP{f-8?y#pHu?ZRZb3rM;d3b{cFV zlbvyu17k9Ca+_C??`%c2u!!XpK&-EB*B+u~jN8sIh*FV(%-{4XvxlFWRuSGMx?WM` zaJDBIKnHO%EASC}bBr1P%Uj;i$y2mPQMOnncefe!%Y+!YGFyN z$YxJHumvkVHxWxCS6!*0tYERn)&WIGvN<(!*{CLzFysczevFh7gjOGyNer+ELLC)? z>LE)uf1rt*VTCFDT1Ld0W;%$EiVyA+2AT!S7vjrx&d|dr1X1J^b_8amDAnfg9o>rjDZnP$&a&>43 z^p}PaU}QX)Wy1~Um~+_RKla_YEn-Houig)4jL~Bnf>yR}V2bUrw2+sOh@{Ll;JtE& z>PzHR{9pg4^A`+p#xiK7XV2V>YmR9=r-Fv*$+-c=-N&wOi=m(^7vbfjA^L2e^B1BR z9pGmICGQVAbmLWE(6jC`54$yA{t>?50$@tuU998EmSuPqq){`57l>gngDuAx;%1QY zG8guhf!MdBkKH_tA+zmZv}80u>Z+*GyvYZR_M9kvBs~}-0ZP07x=gNM0aZ@kZs#4z z?!glS0}yB2d4KI2w!brzjZC0=A`o9QM+48Ls2UB9ep!htW*%A$_XoQ)>H%D10+SF~ zq>Fe5dCQ+kZJpDx+|OkP=$o99z>{k5xS<$0EgbUJMlSb7oyyE@l&21iP1@jRL~4oL(sCw;6`RPN5^=qV*3OiY0X+%X+eqwsQNO^n_p%V7 zWatn_9eSpp;D(S?moE^s;Fa>ouU;Cx_|80KD^G~t%u^4jI}As}Gj4>mS$^3PCPXYl{;@4gGu8&BCI9)jCU&!+OF>v+NjhG*H6Xw4FA|Oxx~#-iFidZB|Gs7=4xtch+@kUsjdz4jUCu! zXId_+Z~mMq8RV?1V_06RYkGHVXiade)bIUO#1K|7p=2!;(NkAqL%!d4xInVL2&QKr zIJ&BUh#e_X6(ohG5Tc~c`j<^j+LHZ8fwu1d5br>5zsAs0!iTnrGM~p5ru0rHw!OB& z#GDm;#gMgUFa;jbAyV+;4LIVk^it=+S|r^WZcgc;6P@)zNF?Xgu(K!LR71D*8kcho zZR^~>g&2#MSjW4XgxN_}s$tD*9pRMQ3Fg^b09I757R;xcOIVEH-if~D;JjrW-C`@u zsRtbdAsDXlCj9NE6ORaO3O}KN9c|6tB)`&w>^OtI)tU zlao_un~?)q`iHoT^(Lz-J`IRnD!fI~RIu{-cGSn}FQ&nd*&$BN{ceiQ^d zg<_X1QGwBgrCSnsd@NbtDu%*V@eO9Yc-vhWeI+F2Z}as%@_i}{+ucm%9`Qi;HsUUWaIVbVTpPg3++S0?2hBD>Q~2u=@`J)L46kn9I5=tbIMJ@Zn53zID>;FlhWDGxLQ{Fx3`JW@0^*p zZOfCQDTbgv{_|vFYwPmFDH?HRF}hJ(6F(tYG;MO2CsFHaL3I4!YD%I2tSjwU121MF|P^wa+@(E-aD!v%29|GdVNo)=b4 z+CyP>-Sq1YfF(|tKfa<>A2U7<$)D2tdCG5R!QjT;P_IgdJykxjpi@gO*yJHF)?$-z zDUCzzF*&hn8FG!_+1+P^1&KxckD-nRD*r=M_ingd@%GRivjF8u4{*^W9o-Vly^e=w zC2gzWYY@U&eg7NsrdlYkPOprP+vgNQ=tmB zFi_)eq55A;pdzm;mayo*lrraHswvirSaVkCEPlW*s_L$yuD4f zv%=si^J#GhBO_8G_L+Te`86B}>o?aY0*!{cXGR)>{+cTcy&jr@40WXg7GH6*rGINc z)2YI#ki??n9ivz5aruFQ7vN(0fX9Rd0}g@n{YQ{@R*3k5Z*^t9-mzqXvNmsEYlpOY z1VWXHPSBXu)J4L_YVCK-c7kdiIrU-2?ul-C)C=o`XpedY{F%~bILkMGgL`D5&30Qf zVn?e2if0MN@Ye93Ib&GZ2~{!rB+HPIar{HzWEzLy$cBlK5_nNl$Q$&z>b%+;Wtlw#S{*tGc{=?Nn2Mofj&h9FjpEN_*6^iUm za@WR9_=k|iYRY*rvNI7@#1x!gXMP#!4*vZ>IObwk^J)qPD%j}QHzxj@X5&+Q7RPgN z4D&;d!Bs)q&jb7Cv*%*K2nv=35=OmtP}9y0caHWCecG<2FSu73Fw`I%phk+RTDI{S zxms*rx5GV}6KkyxX&NQ98Swy8D-p6@2))0ewDqwSTuV#1ID-z};rY$LMxnl0HfFFc zNe{LSAfAL>!skEe`Xy&{G)k{QnXx0Q)}>vr#ZutDgUgdP^)S_UNU6$}cl{Z1gQRC0 z0vs7$-HYEvTp#X3Fy^2?a(bWJa;-9A;%Rc&b=9S%Zp5*Wzl) zp8?S-eKoN;#k9RLHGoFXP=#oeoq&4zmmAWkxirpDpk!wtxub>e80jU62naT@$oeCI z&VY22|DtHj|1{lQC>0)WjY*Fx31CI11}4@^L7Wy5dp} zV8LIRfXG<3DW>?={GAk@pQOrGKIw9h+coIASNS!rzdvPK`=LGe{4^BgMfm^^Gfnv} z>$H1GFz|p-{JT>jF?k(uT6oloWb2B*H4uIWDP-ZAtD1(^9M2^(?2LfoPptjNt zi7ujtNAT9X=^MD9lY`-yejKaUe%Aa z>i#;Kg1L~T6FSw7yX zS{FeMnBK-=t|X4DHAkDZkpMqx#)lNq^py{k&pl);{Ea^k z!Fdk5{^d`t4|yZzSs_B!vbg15ai=OlbQCcEkDX13j$FE=CGm`B42C?PbQt@0y!#@s ze}5KAt{{=QN>u~9JMCMFa%~1bd^7#}sG@5-Hik5=T`Q1(I!>RDyfC53* zxA=PYm{TCdc-Zo({3wDNaliN)u?kZSg2Jse-W|y>4*1X9_{7Le1aK%Esk2$U3`NQ6 z{m6|ItCYb-lTNzViBJS!6O8wcDyH@GjF$t|2LGM&*cGtSW8-E$<+fwaV!d$z&H!U^ zb^PN^1&9&rX|_x%AADf|E%2hejyC9k5e&_0=W07F*54(z1eo%&E z5VR5XqaBifD*>xud~t>%3s6XVmUJ+92Ax;@9E6~1mEl`!R#iw2LzE4{uF{Zc9@fn@ z?v>2G)iuukPgZ#4%9PT7wC!>v@YHxmY>|+6-xra96AhkV%LY01xfnv?mN1vj*e3>S1$ikHZ#%%AS`O}Xf@A*9fvu-( zWt3j)DtyKNEQ?Cm*};nDP+;Gd)2(@OPZh38SyzBL!lZrpUTbi(IcyPUxl?w)Ykuu_}zt1xVcrux1+){(pxaAH+GEpm_ep(X4F>GB{U_Lg(H-oxpszM%698zE#} zZZ6|c){x#f5WYK)y%9NG#-ZsP-9PxhOC9hUe?^g@sT}-i(qMMCa&OadqV@4no&F{^-M*Un-Pr_)~gt^O(E*@n^_uA$Uf!|xqdp-?}YkH zGjeFwbO5>2+atfCwm&wV8j5;<=5iEA1};dvmrBX%_pitFbg1{QcB*+9u1brEpd+(` z6pE54QazJ-GA?NhH-pYfxq039hl2-dOBqDeo}wSP4x*i)qUlrpXylHWAL5VJN3sfu znd~sQF7@;X)3021$&&KJFPi1$W-n*Y_Q=)%?eeWEf|oZsZbdc$4K!w2$JeaZ&RctP zm*0^MT)jWn=E}g$z<2p&>Rw|eaEh8h#a2rmtI_FxIGUAt=AAV?6eaIU)8L)!wCH%* z>l~0uHBLov6NUbhLD@rnwWddGghv5uuaMdaN|{jEo<4$(UF>aEg=d78TSLzM&MjXl zfvI6yR5c1NefZ)**iNas-hwpE#j*gRXRMsAJn$|$K+Y}+M&^7n-cbYZ97_vnrOkp7 z)`Tm2Ow%NE$`lEI!PC3tY2)XD;tRGZMkVDaoA@7=2+J6{v0au5sv{^8nL$=GuN$GOG2US_-maFiv_P*UWWToq#BUm8hg5`k%RDkrE9% zA>YCi{Lx^LBrhEwdtQ?|q{m1+aN&&M{Mf|ns%fNri&qba>`M%T@u$y zFIosL(VZ~)CW5$$e(vdymSUi3%YlpMVvP|4P{36_BegxqcyX01VEjF*QPJ`G%xdoe zjFe|s#MLsSt>;ui)I*8s{)Q+MD^G)u>IBH(3h#m1u1tAV%Y)dr7N6CnFihn2PiQ^) zAd(zd+{>|huSK~Ud%DoYIS&>%UER3<{{S)^E8GflV?DxPxwlc^E>^pWM8 zyYBv5a`baX(AF90aRaEmDK4W)8k13zu07+z&7Y`CRr@(fZ-=&|YE(5!Mt^E?SL3Db zxQd6E^LHu^T{f4tkVl|5nkrs(YV-L}jaQOZ;hc&73i~Ck=uq)-cRK@n9nv0!;u@2Z zBR>MnAh?LPfu&s$s85Cu(7AykBE$8MS*SQo$gi6~>k>Pu=dnq^@sz!LK)FBw-#k${ z7ODN6wI?vWC}R6cZSMIS*{TyNviv4&-Q>E+z!{nW9V{0UM}ItiW0#kO6}>B)BmHcA zNj#N?4CS+?6-zE1zOd4}pWFZ%>01;6+%7N0Fn`y;Fena;uOhs(GL*lMGGzQ#Y11cymXRAn7EJB`qnJwj|J~V3 zPtM4$jLD6|94hn(h{DM@+I4K^zqet#VHPEOQBqcK{)Mt`lGwKQ0vohJNwKSk+?#W8 z&NuTmmm?P6MmpG+xmmMWtvJSxP3@aMWO0ZxTno}h6IZ~-9&h2MaXx3~WCq2DEZ_pr zvp_&t;NA-rm&uq_A)3=Zz)|AZ&Qi+ny&gvrw3nzhB~=UX zK~hH-$FcqNeBMX!XU5gCnO-)*ugJ~(ce)^4@-X;VALnY6I0JL?%fk<*N3; zjb0?Sgm0SjyGn)yIzMKq7>FN~cHMG@j$t6lH}u0oVSh~^4%)_|t7Gjn){~u{N zmzR~d-`lRWG4dRN$~lKIX%F?4?a*L9;R09`(;8En4+gzHbR*QV>Khy?T&!0QA z?qja4OdN2${$?vpCvo-s^oon^tw>{?Vbu-mmpq41gl>#eNLuvAyaYWzYU% zkV40XZXJjGjKWJ;D3%w5YxlZcYhJd&B0e;P>%&vSnL;6;zXFVOgzFUnpNwSG6QPN- zje0hy{;k=Wk^I!->dj`m0m$3cCw-A=YrjCKl|xZl;ao|f^0T?X=h?FU(tq$#8i}K2 zd?>L%r=`rkT!&bFCEVVP*ReIj|HEz)yA$c01xLxuza96CsEY)8#h6bRFkX`;U#eH@ zpIz;H9iIUgviHNSvhpAS!SFA$+^h)o*m%YKz|q|X^yH3Dh`~x82Ou$5=&oxpMqPYO zXD=>P=8Kxx#s|Len;rIR<%!n%qN%Oea--oqV2noox*8!qYRf>#eu-p6sEIOE+|((g zVYBV5_Fq3y(fnNVKSlziEEZf2e!CUrYPgp3a#tZ4Y{_(Z{mL7Hsb1jmI>E-KrlIgc zPnWJ1GG>{9V4HZ6x=q`r&r0n-CWwrXIw8Ga-Iku z)3*S7K!m?ae8Q?ee$H@whh@OKKG`PCv8|bp%GW_ng3;(}!{V1d zm^S4QE(QiIh$3f?#Mo@YLM^ThvQJ?ys5fQuE59WMf8H&fnv9?0=QHVMANomNZi{f; zIR^LRdEZZn5bxX(qIU>Nr@+4tt7NK8-q^pnt zztJuukmvJ~#+;=70f=UNLXPR?705H4x;~qoQ8hfQu#Fkn57%9%%QMYEQH4xVkTM}q zkpQV!YymfRqKIr~f`4PwBhUmPTWtJ{9d~zC1K)S*ti&)Fi*H6bn}<@7?UD;033%<(c!^dyUdr5Owlm4bF6}Sx*BGb>RxLxrk`~nss~Jv(V!V z-&HkeP$#YuP?A)ud`hBCU$^{>(u2>Q9%~Ry4PDWs$#~E)MNUMQ@Y*t#E8>#|{toJN zr2#pB`S46`*?V0fPco+!#7Rjjnd6tNOMRbHCn8Hp&i}nQM$Ja>nm1^a?}fQ-dXd5} zSli3kK)`UuA)tOJe`05K3aC|66X_nM$IrHMg89UdxNZ>;cWN%_>3NbMtFd%PY zY6?6&ATLH~Y;|N?E+qP|cmu=fNy34k0+je!CT{d5T-+BM}-#Pc48#n$NJ0fGPIY*9}Gjrs^USve_ zN;Cq-wuV4aTN@`D23mSffV8=dw7DVB!O7ePsH_QKp=F?Dgd-zUHg~cD{`)K(nJUo1 z(cIRC^Pe~&2cUt|pAlgLr$2BRTN{9^tqXvW6~IW($xP4r_W{E{fwm5u06|+rfHKg= z25931M0>@|CG)Vj)5LvY;NQPFa(;K+rZKNMWyU+2LvzxjDaTqIF~VSaxixTXwlQs z(=!0*|N8y&Nas&RjBRbK-2WRcYhVoo(5b0Qs;jG0{hKcT2@({vbpv?PFf+3PXqXt; z0jvyc3;+&#dVu%863QEx|Gxpz|F^S*jfpLQ;~#MwJKOzN>@NRMHGuqYRZ{@|l}Xmt z=}%1o=+kYjZ32|KRoSU^U=h*9QRke>i84Xhl2{}@;Kt3pzHl=K4Ze(c#baVtT|AP(qC*}XTqKJ)= zt?^%lRdV`M4+96|e-8XLZRG6W@aNj|cl-YF{jYNq^FQ$cfo?z}xYZ?FBd#Eex}c7y z8Ub8anuAFQp?P>K{@?&r^uov%J)L=wV~;y=1w+7d%07NLz386PY3M>y{2 zph$u`%XBg@wU=)n;*JNB5I_WUuoV&e-`X8b(@SZ;Xl;r|i|38A&L)#Dh)~l3FF<_K z#e)wbz_N9o<4>*A+ihAHk(8S|JnuW;| zR4QKaHXn=cZ0`H~T@$leWa_291hMZlK!SG$N%Li)x_UZj^S8kxf{blaNWVjZ+68@M z?hV!x+Ii%Jh@{I~6VqkRyn0^TwgO3gTa)s zBWqa-W-D*pWK*IPEsGp)XU+8tY)T|tE6F;zJZ`0$(%_}+ml<{I1@a{F1lu-S@Kp{( z0nXZs&mI17-+Ol1i4le)FspZKzgb$^chvdmsZ8Z6G>>23JU}f#jUle(ia{ugq%ug6LbeZPv{8= zBX~SP2a8JeeP--YN86OPKiYmRfcMqlei{iUZx$aIEm?Ia)6LcAcd%l>x^I}Pa7f(a zwc9Y^1L5TqWuQl(X47!*Bt3WtijR zrCWPCxD#YCdl#LehRE%Ic2A(9NEdN@z%k7-KWW$cRE!g;;iL*9^b4N*8&Hp(rx<)j z#dZ_&*_O5sdw#jwU}B}SSPk1g4&0!tD-kX7*m6e^M+)Ks{S|EzxEJ|}(Z*G=JN0CJ24KLMk=h%e44tml zH2W6b8D4kS-*%**ch`3LMWn}~XbFK;1k?B|4*hGNf+);|;^KaqU(>s%3}X-`dP_1f z96xerm49nBcm`;$ylmM#frO+S0G1_wxR; zan3amsb*$q+nDETQmYHv8r#n%5;|ZEZcSk!(SmI!E;|_lt>kR?2Wy`8lS|eFy2qCZ1&sR_22zI3=Jh ztInKxRN%C?CpNV%zu?0~q!{W62(%DGYiZfq#Ybzu3JxltmB2ur*L~rNCBPrx480qk z@;99x^`?o9D;B<+ z2s_kNS-jI^92cgR)HNdx^B!`e>yt_D`?^_bi1XSG43g1zAvNegIiABBg{FAC1mqK3 z()&h)(^%qUe;wP7H?YRM`BPFkbWBsN0@d8ls`qviuwV8V4}J3|lHH1%p}1UdDz1PO zgCLF|EU0b1J5fJGGx!2Nix`vX2N);rd|esFMr>o(l)YfeLs7knws-aTs{{EgZpW_= z#2nzoOYm;1g9RYb;e90{0fi~)sfRVOmy*ewYP_VeZ^;l;Nzm~#8#f_8M@r_tkxiS& z6&u19ct)a1;JATs#na&7CGWHq6wp&UFNEJ1nqfTg?c~IlI_^{Htp?sxn(jCTI^8c4Gr4}+J>k+Q1M8B#*iU>MScL`8V?lUkv3}}k9QC8 z3`}O$i+p2c^o+{=zRE@w%he}9kqp732sK!#yrpnocAS)Y)x+*trwP_Xq%T> z_#na!NexVV{kiX;u_f`PE=D`AgEVMsvia3vs%ZNx5eiUBRt0f}$ARKmLe`xhDdwGU zkMcvjzt~=Wu!nvhpa1QpfG35=z|9JY{H;`kP$i5Rd@2-DZ@>{5-zLk@l%FmFbc??( z{~TZahcP;{g@v0hF*RgjekV4%HKD&eh2tKZ`NSl7dDTSrlh++H(M2~*-RCD&|I0yH zfy)BZeW^>ipW@F&(?d#7|GYI2WQDMtT@S*W+RxX>cLtggotupWX%}@>tuGE7Sg?(i z#qSy+btBv%*O5`|i-r#7U03>)JX9*gEy76QN!aa*-f99~E|S8}xbx%6A3YirtG1`V z#~N*ra%|BPEh9`Lv%g;fwP=);qeeUS=b3rJTYvMYCl%{C*VJcM(3LB$Z zuJm>)M?pP3uBJ-Vcf9p#*YUe8fm8k5zi(04!7vZrS7GI9gl}5==k5YNx!0wnL zCyw(yX+VGLM*DNWp>_)X%8CoLn=7hVz*AN;p;!N8j@YK5)F@^UPS({Az4rV9rGb|X z908R^N)eT*4_d|JoL-eS6;Sk~eB@uRE}b;0_j5SRe?6+;5cNJZ?n{hQZtX(g*;s`E zCAZ$Hw!85qgbn1U*Vs|e-a(U@l9nLVyxaoT9$g{>6b=#ETMgZWUKU_xy{K;5) zkv!_wR7@Y=N7!`&kQg$jzo8AJ_r?5&cxtjE<%?f^SR zOmlBa=n3M%_5*nEJ#&-%9U~g$TP;=@_&~7Bk#PTt&+yeNM%(q~WGQ6@k%^ufhq=bIMghzrNnMsplmpNGYLp zh~-!AeQ0U07T(X4r=du5z`!{p`cZ+Z`R4(*ao#NkZB5PQgMvgx53|>4YS-yK&+b|P zQV34dIkmRJ^ez5UapRcv1>q3xuTh~-->t-OK#rML&=re{%}+5u7&&yCczB8i-IzI|n9rl!gY*;wY<(BlF_lM1hafU$IY{7`fH z`PNDq+X^ua#z3pJDtrJAq0!t7Yex;m{nd?!J4_@$;?=WFT@YK`YbBDV(C5OzX6PBK zaPa;&Kjqo&5%&VGIL%c@Jf>cBz0S|7DLL%=ZcBN9BBA?|DvL-IsgnL48O(vp^(%L~ zt{~fKDx!us%&=x+=Org%t4Z<C>RrDYhnVK zx$pc~(Q>V(yAmid^kH#CUJ(e<3e+e4%-F<+zc{nx=R-z6p0ioPT^v|6htJQuJl?`) z^>Hbwku6;9je5ABmR_kT2s%N($DVll;W=&jgQl5>yTtW7=AN7}UoNlHRIb{;uwnj0 z{FoPA@_UnlbFg$p^gt>?9mss= zN-4NUPtKz$R16SDoY9$`(%<nhO3Q~SP*l~Z3M=cdhMjmOVuVk+D|87rJDxj=-?}TprcDG*&8L-XNT!<-zJd~ zJ?sPU^BWRoz0)MqB}IVI-KOipT1E8kXDVUs089qdx+xtEB_xai1J>0ZC=Mxc)j z-K+!L3C!T8e_3}+^@9+q^5iBaR(h)3YbX@1vYLx^D$0vif5J}(q;BJ}pz*3rkD)pX(q_)Jbc zzCi|-qSF(^FK&}u&(vG9`?N7G=bz0YiE|-GU5_R+s#psbv;~MA`&I@Q;`O7P=ZZu# z;J#!)vA-vlLDIogchfs(iN$irY1=Frsy}6nr<{uL^;6fDGds2&S=oQoWjZ;)CZwIV zw(mDSptt(J!hR4q1P|-?n)OwE2ST-As$0n+2&7FBtPl?Bl6M)4lK**6-mN#&Y|WyG zAM7FsjO$_^|7>{+I&hEZAo9Z}{NdjwifM5*GTA+(Neu4{Hp-~WQ;l3)W(X!Q84QL_ zXJ8M1>YG)EuRZa7BobgZx4EefS}z@<7lGXU)t8J43lopTF*;ANt6qnVF63aN92Ph@ z_yJh@y6GdGLuTK#mhc^9wqDzjQ>a*Zjig+9M}g&ptERre2m{0;FzZ5RMZ~dDFSN!m^__vo_8sYH67` zBBA^6a~RM0Io!5Zq$-1_J+5sF_GT&hsqo|yc4NQ({aa@aDZ?)L5-9#GRF>UxPD4u; zhyOO7d06+wxR*#48y)Hz?zjmxn*!KSwjb7Z8{nkLHzrV#){3|=Hp+CmsbT~V{E4Xi ztSEc@k_*$O5plEUL^l)ltg*hVRC&1srfzEU(YD#f=Q1OG zxJm+$X;ynn)b3{!$L@u60CB`kz0*A;2kW7OhO4~QUb1!ZLRxfzsoC&Z2JP0D-A-Gb z;HO~eLl*PV6rz0$t_1MngxDCKK_%^nuaZ<2v`an1vCtL--^Y&S& zF4(WA;#|Tq!9nTEeUp3PR(Z*ov0?rL^R}jfIO~zGTH~6k16S5lECZ#l&mL`*Ua=+% zlS`_NheScK3(D_QejN5h4D1IIFADV6I`#53jaHq4-;Tp^ zmckXwx`(?Dm9PpZ&DJs^pEN#ieNxU8{;*iJwUMo(&J^n_k0#9;Gd;Xdev3f$pBpKo zaFW)!BZ+Z#cp@3x>cAp?SH>(kc8MbrY3$4V?BEWq?W$NF;*MXxzD(Pj#v#~7h8d7E zP_v`}uUL|k2b6G>!3A-&YB{9&YPR=|i?;{lL{?pNygz0?6$>kek(|YjCxiN0*YQ%W zEzYh9g80ZJntgJdUV{97w4Fq5A)te31_Wp%etdw=?kgZ=Xy$W-qCrUcHgvoYMBO_am6mou!e-lcGRW za;tqV&Arxql5}kRX6}*P*mo4#gVMDT&S*PERxvz-)s0&a6$g}Dy-i?Kszm>Q&DRKd z=K|xlFL;;P`3lQIoj@=Y7SSkq%GbbcN+P)hLWQ*-9BPL=leO>@0STcmlz3;&njqZp zymlntuj^7s){@~<96)&SY|oz9vo1F9NXn+EQv~pvK5~DeLt>)xFHwW+S3J|8kF1
  • O z=NDFyW_v&v<~!kz&t9sQoQ*!cqfSmKFiM{MwL7u)J+hkwRw_m2#5TNtcmyQzEWdE+ ze$WNVD`Tx`oHBgvsl_CtHz6k|qI*Srm=>l3!>~+rxJ{ty42&5A6&)fEeQIAfd<=y+ zDtg<|a)aETx<(t$>dQ&M{_65*hus|;0?yK;Tf6|!H4tD&uSS{jJQ&D!Yv~FvRqH=* z;b(*2)CLln-ahwwL$N)m7;?yJ1cxDK9)YSH*JL?u1Y0}fLBcD?3TCtj`HxtzzcT-X zHMojd2`W3Z(ARFwku%sv>5H8Vf*=h_-{ey_8qnPtDdwRd-Nx$sRdX^ij*|@f!Yf8` z2|O8GDJ@e_ivk`92#qyPwS97&in$Cgw7%w)ap#uD)%ad&WAr!G=G+CpcB68@`Pc2> zss!K@30&?N&-93@UnCHg3GIQf2O%~+>gFr;uIetS!OKbLfy2^0py<{aNwIUFij-Z) zM6NQZkXDg%thZ=q3p}5S%m?P$Ucjm|T^mKNoX>EzX2;%+S&$GmHFgIkXnVCSK$6QV zdyS*vnSQluIzA>&M?SV%`_T=}$iz(M3()drUGHHpDN#i0sIPh4&yHe~G+oPC{+m7{ zd6=y^DvS7YMG5P5k8Jz`qEf3nt^PE`?i*n0?DJ-MgL_$Z*25rB8t_3%N#E;mspGA7 zfSZQ(uYnir-O%(p-Q@^4=deKnHNVO+c=dDMwGYU>FWFsp?<7i|f7cdiPOH4iz6!T@ z+*7tT_Gh6!_YnlIubL7Cx{S(9a$%SRs1GQQH92|5njbN<=Hu}%e6_7t;33OLoFcc1 zPo2)fbSTeJvLD9?7~veLY>6Y($S=Ndh$+?OzB3H3_k(r~L8Qyd#DT9dAp=$)zZL{# zrPkl#jj+)5;p?0X)U=@{R6QqBceg^nG3x38nX*1hcHm6J+G&=EH4wqXBMD@eTIzk` z3o^-Wc!txMi`j(x?46x^JXCof$CXVPlBkSJIbDdQxtd{WjHldc-O4>N%;n4&F~-aY zktv>~UDOt$(3Hx=u-_mbLDc$YouoJ*s_P`#jI?KhJrc-#Opc@A5t0 z-}#;Q>-+iVyng8tBb|tIFE#u`GcUI8x6WJ=+<0-j3I?+2JU$nsgsfPh)F2od1E7Ev zgGdCBcC27N$c65~6h3GJ(n9GVfOG`eOnx9>VQvnPNU$%XBeT;zoX*Qo>f&LqOvyLT zx3@W8LArZzFWUJiZ>cF(H*Yh1tC8P6i|G%^T;W;l&S0mQ`LtQRykporm9#d=EiRnp82cdJ< zt~R5o&gQkkCI6^~)R$EkHsgL9%i~$8y-SZ0mCc@MYVr7GDWHE+~S1E zcui?|Y*R4ab=31i-!^f{!PrPr^V>-CiMoI?*CeF8KkDjLR^I*U0a;slzW&v=H&usb zjW=Y-Mt=reMgzp2IbDm+y?u*sTJxU@qGm2G&c|@S&MoUFye{Pp%kG8CPDDc6M2wgD z;F|Vwtmo>mn!EfO~;Ru)$cjWYuP3tmMlVN^A#rwjb7DAZhaf4;aan3CHvDKel!ik~@&4 z3mNeCtPY%ba%fJkX2<;IJv&c+)E>7ka>v%HUY<~xznEH8+cjEBQ58&|MK27YX>H9J z$>xs-ORoPmorWG8)}{>%60OVz2iD9OAR9F*2MuU}Qj^+AqY`bmQ&)O+b*_z)!1t=O z3$KM%FKAAxw7V?k1uQFn*i>kB%eP%fosFAe)-aci-ydZvHLV$MoFuEXYr7%FlZ`XN zbrv!^nMp?r=aaRI=Cc)&nrk)+Tc3a5+?nJVW9M%|lDL?VH0@(*Cp=>kVogZaqRm3| z!_C6}Ea-Qx%6Zjgs9Ne8W7p>zqg1s?SdDzj95H^%Y=C-28=ZqE!@v0OepT zr(JSgsw0^`6%^F>eRu21Qc9Y;{nA>|5Lr1U@T;A@C=q*bQ?A?EIy53HL-(54F||r> z#4h*FR#8ROIx?b27I|RH^2oi1WSRG=pg7@~XNpCCK`r+Gr#7kUpt^Y6i?61b8b83U zUx3xNTr`CrG~A9gbwE^KdVOd0v8jfsCle(uRRvRx>}T5voy2s15Aoct`|RGB*mWW1 zZ++oavazIn=QHxdDKlZwFgAOvRIJy7>f6^*{&r#fH}L{72a20v#rb8u_K8amZeTSs|R%@{V|=1ca$YPvlCAw>}{3Hx(3e$ zay48Yadx^o;v@OSRPanvu(%IM?t<=Ck;OF;ejUtc|q#fN%cOm&29%6^9D* zwE+DJN^8|4@%Sm(e%P zA>^p3Eu^WnAAeac_Wu=I*|#mnXNl~KY&ze(TePDBKXG&Gz6estG4qhb-CBir$^Y_@ z6)KwZt(F*6_NzA~-6_LkwF29{i1zA+Yp-nFQJZ1PFk>yM|5;TO%qMz6rM&KtDm2a zXzOSsLDOnj^Yo9|gh|(L9 z8I7!qjlOL7(eB!udd{##{qpF?se-*JyZNn&yRV$)wx`zL7khw}qN zxqy)|fV774F`pI6wtB? zWI)XhU{pjXm(Kg-%_)@r_l^#`+j=AH!h?gU6gC^m^hWN1EFJ)z96AH?YzlaSTpkn( zfJLCq&^Q7HkHhZ4An<$82s0D{g9m(#RswKjodAKAFnx`I;~WY?glyy?8{inxf&t?26eJA<=x@IGvRRf9=O8#RjUgB d_iF+E>wu&eh0BkGmK=sb<6#B{wk~$C-vL5C({}&> literal 0 HcmV?d00001 diff --git a/reports/15745_Project_Proposal.pdf b/reports/15745_Project_Proposal.pdf new file mode 100644 index 0000000000000000000000000000000000000000..127fbf3317a46852ca9c88ee6912f0cea1281d3d GIT binary patch literal 208886 zcmeFYQ*@7EI0skLp?Ti6*KNO8k{_Mlp#?;9Sz{Uum6E(MTGIsbgTIo9(3mY5S z8vU8|&nU0&VC@L_tAdcNm9w>tBY*`!C+}cvWNzqW>i}S2{If3ro!Va|{_K|FuM844 zM#gS`9&i9SIR3FsYyoV4R-p)>Q?#{p0x&TAV<`QR6u>~w{x63v|LDZ_ziUG$Aq>#u zW??X5Fkmz`VK!l5XJ%#MFkxadG-70CG~zI3V>dK3;N{k5W;JA|H=<``)Hh*bWi?@B zFl5zdXES0qFlN-(XJO#g{(AzBP7cQU*3fR5h6YA@+opO3dV1_ogzCHj;)9$u!|3#$ zWTE@YKyiwCNfj>q@eIKj!mC*jXRF|ZX+Ay6kzY=1{xW)cI3!}&P(Wb-7^c4``X4U; ze>mI!U8?_U|6c|EC;}2(a zaDONLM_|h9n;Ogf3j!*R#{c!2?(gG2@&6Z%e?s{mmI&!P>08;F{tvqSLBfBd^WTz1 z>93E!wEVNE|HKdn%l{gldU}6j^T9|@Uk@>J3eXP(rpL-8p6FK?rk@}`Hwy;~8xN<{ zOIbwB?=J2O6OM)H!U*|ICXu_sxts8GF&-bOqAL{#>nxpDU_Ok2UV9bt7Zw(#8#eII zV6?0Mchvsj_J6Ngz{bYb$?*@sn3?~zQ)ZTb2G)Oue}rUa|Ht}A43@u;RRpm7qu(C} z3fbB?8Qc8v!tjp|I+;Jj)EBh<%f-KQSvXi|8QE9??92?b?Ej+VAJG2!!hfl&XzXa~ z>|pp;@4tHfm)?KR;D3t!*ZqGU^}iMSKQPb8$od!6{|&$_%(3{j=9|mvY3tH5q!iGt zChm5Z%-eGtj)Y3(%AeixiGitvX#V#Vw+|;6?6Li6Mver2q6IToHUNzawVBh8jN`8N zulwu6H&^>IRePi|b}2Q4wj_Uhl`<6_RsF<+@{o(uCBS4jv!<;wIv5|q8RPNSN(GyU zd<^e-VD9Fr%g4b^wICaOIkl2F8%#p8hP5(QvdXwJpUh!y8_(Qx2^@k`lXEQ82`jGL z7dQ;MM0Gqdb=fyF68(5I>9b;wbNr&+&<&tpqV zzGI@PuXTL6qJ@GJky$b(JU6H6C%znaGou?|Z}aQ1k9^^7(Qo<6;Vbflqn^mdoD4w- z8&yLa_1^($>nNm?eab2+$Ujx>o$5!H__2yaps1>UJUS+dfK>nXDY5>6UmQE%mZeAG zom31rx781%`0~{^m(MlfR zhYWS){F5G+Q$8KSx*%jSR#iV(ddr9SP%j(o)yYCeM#l(CD507A^3@rM`^L?K`73>V zS*AgldXuK+FxxDZH872 z{u>IOx5Eb{{})E`w{8sBso#*lYZ&kXmke#iNQbZve&>}yEZA}CFGyDo^3x@3vr|El zX2cVRnZ%%#ga90!K|ncKUYcY z*=kGk%?hPjJ|Pa?q5htVxe`MqSB{A}r2%$MRjJ`;F$Zn?5%2V|S2D7I`UA*EqQ>@3 zY=iOG=+xc;uUuIE89(!g(v%vA7xz^)xPB9>u+Usiy2rw@bJAP0Y&*ts9>~yQFqi0J zKAOCVQl0Ai)Ut0qfS#Yin%fd}piatv3p_&DIt?r#%V{m!-GO=WR=k}N#-apnPF$gK z4Q@UXr9~MU-F{}3v8d{-$QoQt6lgYw-8Yu6OAuXLAg&sa?btM{R2o7t^sV(9W1gi2GBwxy zp}w2=8Sl^bO#h&H&#i zHuGd~gpffGu?S#$&TN9*6JM~cJk%ijj|scb!f)|KkcY1`(E(N393Y{0nhtWQN_AWg zajX^AzKZOQ(7$Dd%4(2pYsS?{;NVSsmbC_BTNUvA#edQo`hKGS+L`wjfg|jtKp4Up zP3W9QgFJWF-tDu7z&d{|UGuQci)hB`DP6}ExEw~N>(8}&vpX*4!Gqxl(_*5a^GE1z zxOBT_xAWSK^*poxwFdJ8yD5!mYIFZu85TzfgJ-rvXjd^3RFVb~% z$u1#|wEr_Kg|~C>B}DEWqXQk-iJOu1-elV#5alPRh|o}6Nw`yFP0^ZCz93SVl;E%D zo`8uNl_Y9~_*F2|HshoEXfeYyr=PxdK&^^KKgHtjhba%TLq;`KK?^Jg^^zS=fypsV z+Av^Gu_JNS(}1q22+tYB04|w^e)i9-3e;>4H6z*r&?8p^K`}u7XtIL=c<%EL?cXlx zZ*Z?UHOFl9{Rm<9uD(vvcBMA!TuOIsc2TcqlaJUUB z{PANwmlS^=zO?GiP_c~?@ zwKb7EN1S7p`#ypDEIjlbPL&u+oq@s}XDOt+fo4X{qMSrj;=Zrrn9~NMqSBe0c!Ny1 zV+cX%cMX&v@ThXOxKlSSL`x_V@7r6q+yTFJ>>mvB@`U3>qvxgb_P!4)Zd&mlO+u7e z+2GVz`moX^*(7N{Gurwt5s*sZ1F@TN6+aHN`nkb(+xKHB9R7qXSCTZ19m z(AL28E-SRRj~>kTB3-wak5<#w2a{m`XaCz+0x5 z!?<+zEL*?A)k+kT_dCMeom_&i+>+dw@u)Mtt5kL>sBEvIWxe|F_ZFB;*;C?svP|KU zC#;MQErC2kdwp2T_(i5035}0o-y@svrgu!#6W&wK4=|xwLl!puH?R{%*P|d9330O| zPCw9A&{X0l8_($=56Qr{qS$-7vC|Mf?>qz8wG}v8 z{5rNjxz6&mA0&Z3uTP?%c*6{5ON-tpQ4uDt+wFu~+y%IEc3u^KQjYdPZWSdkOYbgs zEWfeyaksx95yK_DFg24DdOZ*L%c z1pF_)JH1H+3PejTDSFggL(`LU$U)=e5DpJ^A*+jR1K+OFFM3BH-}JKPaUj4l@cBDG z+_xX@K%c`({v2!mUxWuc2RmRj%RM{M2J7pr2(c6Rp<@HVW5BRSCO?l$kAdPtR@MS= zw`rT(xq%S-vEggJtqJ1TwS38+c(D5ew*w(AAL5&`NHsHo0@EMv?7Hh6zTCZ^P23Cc zLLLG=JiE5G7V=)VC0V0Pbb!GF4D$5Wxcrr0ER zfqvmH&*FW1c=kco3HXwR?#>4X7tp%+l53g#>WpAqM~1FVcCHEfqyn{9@CS4QWFXg9 z;q?rmoPocHs*ZrZaS0K?>&c|T>`$=*v%9;yeZtH&1pMf725M^t0{sH^!{z@X;h*Wj z2KQ%Q{`L$S1e82EnT*%Wjev59f(q((^9_I?4GDyKGo6t!1`O^)4$R)#`x6KuBH(Sv zj2Eb>g9;Vi-fPEu zeJ~hHU@uDVgS$6J^i){CLMGO4avpuF_mD2%%m=^qDUtGLG;})@M$8W}+l)b}Qaj z9xGJ3cJg7a+*F|nF4BFmW0s!1-NAq!0mgTImLRqq4b>;zjUF&DfCsy(dRdfWLbw6D z{nKNa(|uorc#C^4@hO7onm+YAbtw$|*$@B52Rn7U-kYeO`Y7p9nM?09y2ER_~3q)lim*N9nDsZ3^8O;QJIRXQ1}X1HEBm(j=GdTEsl9wT7@z z5R6aq5wDou1-`7lS2{Qo&g2JM%L01x?nT!~6A$03XR`tZuj=TybX=|-6J&H;w9-wSe5p4!E%b_Cv*5C&^&t6Yu zQ>UVcCPrQO<+ilOT=~!MsD-qVe-87*~--xm|c=;so26C)4tW@4S zblZ^+NjdzTsFRW>HtdRWY6W{(HOw=kVog&BD^*dO&ZAgXdSxPQb1W6~u2wlTlv z+E8emylUNo(swW8YN~?Xuj*B=@d(=r2>yyXItA^O;&|Eo$?`?LDVb z>n+>7J?;)JFBmp6n-h7;pw?Al9q16?IMweu7s;oV8QjzyORe-XnVvS12kNN~axJ6d zn$cN`K-zTd#9#cfPK_f-=2(OMsA-^-jZ}2$GrJDtwOuiJM_vvy?wJg|93ydcJ!1nH zB@l{pVlKMK)fPV_kw4?0(TvB3T{Qyp<2K$weAH{ou&kStmzWN$HihuUdnWLQ=V1 z9II{gakv}bI>%jfvK_d6;kS}S=bnl3R(nx9R=p@ntr|8`7A4%buFNpbcCxIT_SD>{ zD3JUfqfo;E%#Dd}bG<~ak*S^L7X{%wA0m7+`uS!B%mHH9W-9H_P64y_`I0}TFDkuc^7VHU`$jZTv5e@B zs_l+`9rbzWf~s-HHiGKgDKaG()HdSjtdT>KYV`kF%4&SS^&;Zy)_e3wko!xlL8Ef7h-N zgnov9A76V&97nGqF{qB4kU>i$?`DGHuDX6vEsly6G&gcK!b+B#t&8JRsR7<&Y`pO2aTKH>YXu_pX z@b$oS2;@CZeP~L#b82yR3>u|=YQA&;6ga7ZOw`A2LD=^L@=CSpoHMGQb?T<=H<1!Y z6J@zFs+n?ph9TE*JP58ca_dsRrUzT4An#Wy7GD_=!P8q~k%BV@rGZhY`J(C^L>A0< z2?>wly#?b;-3Q`4hKE#~Fm8<4KMQ1f|be4;0jq z@tbVNae9%nAJWU;-!v3)=a{<01ul3F&=+Rw^)>t5>-we4#9L*)GEh1XnugFrYAo=< zml}fbie<9YL#L*UCio-B81@Fflc&1rqDdvWUk3}Omk^G&pAqx2jZ6&ePa_I1Vam>V z(w1HbQ>dsf<6lF2oho_EkgmS!o81Q`$)T<-$=>Y7tKESg{7Ad=Q%77)O=xha?s&SB z$)00`pA=3yy2Ys?{$2?2qHXF49q^!L@_}??TC(=7Ve4lyF+%IkoEP3E`|rDF_n+J4 zP-2y)P6{*2{VjVH94$XeB<;g5YzkMVjH8GcqhL$h5cmL(&JxP+nPSUxtM=l7Xg8oM zDVi&^DXlW>X7yusKXuuzF7&7{WSi6^SmBtsoo#J$e26D-?8nGt-4TsH8xujT6{2(E zd?NLykl#3eN_~Kzgx(JLBl2h1`j?q@`YFaijUO!E=kc;D(?Sng;SwQJNJOdj3eQfJ zZKCN}0pV0D^2e~wSOLt(h^)P|>z?IV-+sG@8Or>26186&(? zQL1OWFvQ{-Ia6(a5^;hyTkyn)&utY^KGKk_qypJ>xhf<6<`AQ>Rg$00MKb z@)Ttme6=kQAq(NhOyChR6ZD-$5tpTm9n|6Y3BD|oJ@9EfXa$;MEpO@!K2Xj{7u_*9 z**5s~3lTBj)Vi7KLV7Pg&4OX5HE-6$>@=PFl=zPSskTv5X>sw<_0xcR>|}m(;QHGH zguoXhW5DM~VTfi&#-6A#ph7&D_~}#XdYFyS>q|KX&&PDYT_X!G$_z2%VX`9b1#(CH zxtYEg7c>9PYU^$8;6C%_Kt}-6S3`{SNbz?-9tTeokU;$4Y9K-!UVG~V}+4R`|13h^ljU+7-v;=89S1;R+*8rxA+vu z(W;9T)R^0(D&;~JN0g_#BKsml3G)l;@{w@APkF|N82L%p)fF$93ddXV3O_alLs&kn zP7b#8b+oJDZ3=*V(~?e??0Ss_0k%|(8C*bA0!J0*zjznyMv1%iqCE_=%m3|K<^;KM ztmZxcbL+As2f8Nvuo()&z)*^YTR|*aRvTNu01Br3;dn{@aNm0avBLnG5_$lH=+Qn? z>}{rHEbi?5*C=IsmOu0k5A9ySG89x2MY6*Qi+*8>6GYf$Vw_F>LPnr!X$DWwu(m$G zp45_wID3O)E%H5mp7XlVcU@0cQ*0`&JSkxly5O&hf|!f@!;KINaA18gBG?(xpW%@X zSj?cJww-9@nyh;(b_aGP%I*BkC(U3jtNtuX#jC-q+hCd5@4*ZE?h|vrgs+2456d=e z%{(qRD14l(>bn=>sh4mDV4QFmra*JED%3NSH4ZX^KkHNX8~h`RyHaCjgjwb{hxyDg zF6GRkoCjXp^F`}MweG5lA~b_51?LXY3aTG69;R&x6g^E@^Xub(=p6(n7OjWQa(?#e z!zLy*bE!1e=Ma9hC%LjN<~$-8kBCjvMe%!|zxsvDGPkx!oQ)m#oi|M>PN-nbk?*Q| z?Z=ka`5s-aX3IBM8s&|I0?Wj!qTfTzN)6JS*mn6Y4sNLwd092;psXKR`7q9tCPy1j z)e$z_kpL)V-{^z~I6Q|G*vw^R;Xhg8-qeQGHNM{19;*-KoQPB6N3bwABPT|uA1 z`hR__{aDbx%xLhd0zY~_w6l|s4kD<$G1VG?wZa=p+?QqQd#FC02-So+ta47nWuc53Z^6Xa4)d+PFD;fk=}EO*5_(W z|M7b%sSIjbg80fJLIVqsYHOqzFO!SUB|Zjv$a6}kt?k2A*7EgDb#FOty^EoR>Nnbt zsL{SYG|0|y2_kH$+CQOgs6wzJz zO(|Y$W672zq$cjDK3QCR^f6b5ums=7=jz(_R!%3JcBpkyZ_7M2nXS@~#2kYk{Fy0f z1X<-Ac)T`Bm@G3Ia@|}mCRSr4!|r`~Q&@7ml?XYYy>#NDwFfual3reJ8u<8rZ_(Lc z7J5Y!-yxMN?s-R5#$1)SGBM6>(FkHg&sELcxZJPVDDF3#NbGBydOT0{dJDt!yXKLL zZ$ry0?$PR+3*{ijb>96TsC{@IFmdR<2;VZv=2^dEs(9s3_BwE^-a%r$kS!{XzZVvs zW#t)tZO|lWBu+YPt?Bb@{)WiICLB{^kv4FeHFK@)ug;LW2F}ydL!(5d7$n_UD!rF2 zO^#|!$P|-+em6S4V)_#^*fy;VFO#>wdNkm()oBm*(|-}IVSCBbg9}!mVCu@;e{>~U zCzm$WGN~`mu)~&ruudrAK2!|5xpG$rTmATo<5>-zr}K2j>#HbGs9TT+Epw#8=cevn z_Ub2u3u{_Tm8s1YPNmqS!Y!KYkz?(8gDNom*!)THD)m#B<$55kIc@#hz}9;&m^~X5=m{C0hN(K^y`%NzxVkdYnSI_Ot&axY&Ygjd z7A)le51J#VIAg1RgL`HhO7jKh7xoGPmNZ0Uj5vLE!2Hw_>2qToZ_|S%@6-%Ft86rb zp!$Aye#6vzjk~`#QWXe`0Iq&=sd3)s8A@Qxg_2)CTs)b_PTFsrhe2#wvosSJ;6WFz zX5$_*8vaagdUSKOtTK{l%o9CDxjdFfPZIyUY8)x>jv#A;$1b_Vm#uUN{u4{1Q})V! zWj9`gVl{3{dCtCDTdm={`h@!L)=u59H%Kd3I2lrt_86mHJm^sfERQ64#@cf`jdY$A zh7zMLwaMwgo)!HwCtr`ylJV-d0R`9~`TYEss?tJedB&riJv5dnfH`}%roM*H-M8E& z&IGv0X4y=}y|C|$-qSf&#*;j4nJn#QGW=sx0raJ13l)epNZX^wTs!OnzrjM&pFR$e zu;P1Ji!-Iq$AzKUbL}d5HP(y_Rwp-24e$B*>h&qUvWZ5VEkvRI&=gNP4*7n-QnFKr55G&BF{mgXZ_rL{rTF0bb{2{cP9rac zenvODbmVtNbJ!gwMs#)#;bHaQG?e|SLe_B*99||1Z<*G0zELsnQ1{zD;)*Zk>ueHU z_-_6t+j^^PmW?ODMqA4gJ^a#jZBnzk|C`srVaAQa0X-(V?$Mt>1>fAvQXWIwpGxM> zRXNJfe_Z`*|76~GW12DGe$~;K)T^syyI=KH2Jv11xr0D&-`sB<59=|^I;mP`dZ`J+ z;Bcr_{>{SjeFCQ#hsjr7kLHp#)<#@@3%gjdF8AJ}EJCs(<;d`TubI^yF}P!`-?GY5 zIelq&i<5oi?$V_zu|NITHN^5Z)x@TFm`bhUYE;bQ!PDdgWmFtdDmQ;LD|o3kcf4*S zGdbfk?fX%OeAmbf*TPVfpyZ8e4a&lOr`i2I*`Cpw=H(~ z&$$Ih<9n+=E;$q%_81{?`7PlmiW&IJClsFq^f2iIB^L{7Ck_XUhK=MlC|8+zfjyw- zuRz+{=fF_npH9vvyQ{USH?D3sJGFUc74k&6tNJTgL=@08s`I+ckAra^h?MFlLg)$G zPza?zI8a4>Q}<`%4{3O++?YREj~wCfd18hJ!wC(BdCl02xC%ZCjegJ<-*s@dQPVx% zDCp*#hG;I);L@tO9d6%+zlgc*?ER!)`G%%Vqt-wKBNdgWH`<-kBofAj*by^p)MbR_ z^vzImBZ`@1@8zW-rfIXxI?xOPo=k47BG`0}Na7jp8D0HJB|iuI>5`wflFRcuQ&p+Q zqgiwyBW^2OyXsWHz&Th^jh8jLOhfvT=EPxgbwK9xlR;34mRaa(niqYRxF&Z$Rob}< zixzJ;uU4{N5lXQ$PaQB`L&=`uwIjrlQsm63q5w?+A=6HFZta{LpVK1wv;$r8ctzd! z1>NZnlg=!M>_SbAi!!%r|H0aRf1z_?e8Q3~rWz4`DHHPwyj9Z_IxN2e8QjzH`ky3} z6VD=@EVy)XJM(h2B9<%#^1!nBvKm&Ipzi-%5l@?kQ~;VF5CtZT{cTk z*s%!KPv6VgB7lN|mt}jJ(@ig?be>wqPi2hqV3a zdGqH;5|n>gRXuhr$HSq_Vc>s9Vp2yx9HyQyukT99_6V@DX-+W2;;-4$!VBf;==Rq zXngb+U&0ZuPWRjub?OZoN|)}T*MQV45R6b}9x_5NwcE$IDqFK1eo#wV_ExRTD?wJm zw?}5Lv|S^CGgfflHeMp$0zLw^kk9kn1Ge!9TZVc>gcM?6jAi@B3Qop)Foivc_9qBx zE9E?bi0!H^@Zp*@$5}J?`CC>U=0xj_ZCTpDqvHjb-MixBMXyU<=3ET4h*(c8V$R%N zv*{m+g9V|~ZP90Q0~ZjMSB8G-5^MDIg;d(~u``c!A%v24zf=zP^8(3c#?N9YVEIZ% zF_`=~W{DY(18w-|))yF64l|f4N!(VbPBA%xaspSY^t0)}dCN(1QVo}Lji5{neJoo;PrIyW+hx0@fXd0Ww zg$HU)bB+l}0u|Wn?}9xayIpmpyCB$oh2n>ckV5m z%FsxNhA-S!2v6b(BE*onN=wH!PT+I~ert858es>!WYBIu#XyPI`1vx|sFn@e^@e#3 z&c_Ow?L?cGx(n0CsmXl!0O>~OZ~mKd(BB@O|ATVS-&UyqwOz(qbiXb!y%m7Dl+EK{?JbJe!^$#O05bP z??ciD{RY7Yd(v=E?cnRp(8*B=ZsKM8?WFvv(w0t6PIvceZ+iWWqLb#TtgSPH7wo95 z)19N~bN8WEX(y-6+g>2+`LX9qL&it8x_Du)#WStdn`HZKT86IYs9?yxd)M`cGy&g- z+iCEp6g3|V!8iFQYYv?0J_?N~?%ju=6{V%7J!w*J&<63c=8JUmPm!$&f>6Khx23s# z1lNx_L~GWYS1~uY^g%U)X9^9U@0xrrAC6vc(dupvKD>Ck+&}sWS8etd`GNxTo~qvh zVwUh(o)2%YJ??Asx8l`VgwEuKe(nwZ{5mg)LR@pJU$Z9KYWm_fDNRN7vdk!*~gr6bLATSq|$O0hTCykL1L)2=~ z5paYaoFFR#+P7`cLX*TbCZ+A=NkHk&nr!+`bfeXw?Kz$fpS&~0gek%l1r=w%g+qzB zh_KGZ8Ic8E4IipXW%|+M!03azQrrLVhu2j`v&v6I7OOSfFq2Fb`i4KEfw>=tH74IX zxdH)4)iVb?k!1AUJ<;HMZ8!%pvmzD(!po6WLYHC2J5@9on@W9F-yuc(c2Q|nuHno$ z)3{?zX;pshOa)q2Vo|Lu4EAcG^{9}Bxy)z366gHFdNq*T#0PZvL7&UCz?}<}Rmovt zY#@_Qm=5xySRa|OPY{-zdmGxTq&o%EHJg_UdS;aIcW$_)hGE-7?qTz0+71%frOM%| z5k3;1MH&G*muv>RnqL`nZraQ0Y~%3@WAMv?DX@;l?t{k*dQ6Lc?=abH$0lJh4Esv8 zGHw&B%`3QTzl)=rpl}#Z$0VMQOnMwWgQkuI*>hk{GIg^=baYnG=*V8`NtM~Q7)L|FZn;*uR0P&5A; z9F8`Qjbl{sWhwN$78|8l8Jb8wkA+Wm5NE0Qxl^ehVSgHPtOA-IsP<=|C@61-xvRC4F9jT2U?}#iOb#FQgckd+5Hv@62(hkq5cKsRsaP%Q&CO}4pX>AtUns}h z<8X*B^x!4wD{C1mZN0H!?Cc&Kn(9AVKeI+AP_C{{ zY~8@bm^{06i-DR~GSWG|!cMG?zS+@3q<;Bvy8T$u1nSS47zd(+KdizNoiyIzN=&Y*ie86>^(mzPvJBcB$lTl;GYSi*kCmr-DKAnO_x!`*RdLB~vDmy^v`BE9t zMHDqLIs9rQ4(4Nt3tPzuXJ20VFwcU4MIq{*AMOA1L#&CuF+$(98o3Ic7!wdm_x$)f z{*;4g)ud`R5U!|)s3HdPpR^f%6<-R(HEV{=f$f} z=nWm+YnT5Q8{_-fNtgMXZk3Pwr!DidZtEL@=*XvO-{+TQ|JPE$#}@3}mqFl{TEN>E zrYg8So90?q$nIBc_NkAB^7`=P=;E0VEmdt0YiP4$CV~le{SGvI)SwMb3#KIQptiAo`P;yyeQdm?+S_F*m1OCkT zXU+Q+*1)+8j`Y`QVfa}NIwnWPr{zwyH42;Q5#Lot7ujRX6<^Lo<>$Pq$)UN)xBAw7 zyOjQ^a)m^v)=Og7m)qNs(k}ajz zi*T7=a`aTZfXKcAZ=ECr%<4WwQui>Y6Y+V$L?In4kljXPXoB z2%X=NxN2t&Jyw1~Tb~_+Rn^u}^VC`_w;G*WkUez4tW274uh;C=rIO+3TbmZ#ZtT;{4u+{lRbpNM`&*ojLl zyDD<_S!&F4MoMxma6B~MMrRFPwBv1nBR#(Zg7?@yjTv>&#t|916qdc;BynUM zFAThVyOtc1wg}my>cjWkbdjgLNM(VEcqge~Ga}kcR@h$|Pejxz*^sXc?XwD@FgXF~ zk)w*1(7%$A?*%7XSZ!fsk4Ldz4CIISovxfn70E{u;478!8V495dYCh`Bg5lhQ%wCK z<^(dsb-fi*iGjK7eM>g)851)0UA!^94W&`m=`v=1tHcwU4znK=rV}KGV_rNa9psV3 zmX3m$akads@V_+N0gx-af)@aEm3+wKRdtXE{F1eLIm)fJrtqd!HPgLl#l!@%J{%e% z+P9X2n2ShmKn#Ww1vuUgX}z5;_Mfelucvhklt+D;_Lj$zkOvpW9vMfj5}Ee?ON+L` z(TFKBrpv=fTyPunX9|?l$m~DzE3fRP)b1`K_4PWJxg7Q9tzqIE?Ft_72OppX*Y63H=r6ghC9&B#oc=D_SNe^i zFCug$M|6eZzFk#j)Pn{bssC35IoNkDw;^slB3waKVP?kQG%auNTn!0> z-)b`RtwZ%ffS(vr*10OBsSECf(bSM1mkPr#p0(b48(|0sorFc#_PDorE^y+Kr*kWN z(Hg%UbN0k-Z3(Bd;`GMo+fq+n3rIR#eyaA#d17(Od+AUM*&0v@-^QTRMB(`-4tMLxz_)tr-trg4A;IimYr+vpB6AI;L?*F&d(?XDxEDdQtcRSYj+gaJ+`~jeN5x^u zirWYU>gp|)3cRcQhl*18SSIx_492x`g~bdt?QLq7ISGYAK9Ae>2S2tU@zBkomIIUh zXJ;A!(iOpiWx^31Y>&N|IaTmsgUX0i=~zr-U%Q`ukxh2%Yng`kC(Cc7c)lQBk4%4=I+c6Ok?LJ;&0wW|`ardd2B{oB>eN_f ztXs8lY|twXp_gIc%yKBx7#yF6Io+14x;368=Km5FyN^IX1P@4&R?I zV^x7W5~cIGAaO?y_IfsmaLhR8X0hiQmBZ*v%j!e*CnVZ-<6e+1Jr{T*f=l5O+-8k{ zsvZr<9@(x?)XsjO$N}~+9ie4G+4gg-@zL~ZLt=T{0}kf$x$9(5%5V>#cjYC7b7N=t z6ul_@FruqBfIcU1@rcF;4ssPZQfj%ggco@gWuz&`)p`F>J#mNXbem$msrjM zOEFZb)KSawyP{6v(V?U^{)uSvAz&s6qFUpj+yIU`%sRhJtRRk9!fNpdD zs2a$(p~Y(5tgsaP9UO2tM`beyv}=z0y1W3zju*rX0LFuKMz(U1GQaipJ_Q6}0Y35t z-0Uz9VhnCmtQ#EBRj}qy5~8H|P~pc9H546Nc}{Xl$y1`4->xweVFrdVecUZDzBF*) z#3m^Gq3Z#$@S}K)*aWK$!=qtAxY&*dT_T7^O_c5dN7z|fcdBno=mOjNYe2!-IoWg~ zBy4tBLlrMS7o}2h*Wg8%!tNTUnYKf^^R&7yg_9mNh(S`_uBa9!W2t35kO=~a-_oBN z3Y;mX!DJqY*;XUt$Iq>0!FIj{7r}5qlqX*xU#@R2Gk8FRMO22{V{46or(ae7=Hn-Ef5Ft>FvbE%WGT zXQmEGScKY;yY353MT?LOLl1sxaQWCv<}EQXY`c@6CIlaA%9CD3Cw|v0m^BAj+ZIu# z2ixKc10j2gh&v86?X308MK$V*)aR4oW<88XUXHXP4nDH4=2M`_F&yu_n4!&@1iGzq z;>6C7o`bkRwdVEMjj6K$?dmG!(-3;JMq5Q@Td^Do!Gi!NCPkv9y3XrQMyL+exG!6x zt-czCz{iD1U~Fj)FMtk?ZTQhb>(q5I#FriwM)l*pVl4H$D+#|4*Ik*CV6Y?KX&F=l z2DNtjTLaE%cWwL&Q`T-CJhyTZxgyi`%pS)Q)#LVkqN9Ywy`|x%zld4r7XYu6OWPou zAvC*6Y(;&T z4~gHCNuEk6^=mNE%gS>zB-?8&j$wjGvF~&u83ZMJ@H!*pcr*P%dxCS2dD-`;W+Yoj ze~io7`=I3kSOT3+P^V&S_&+2RP1j2+te39P^)=HL26JRA@k0tpytzL`n=m2hiQdWJ{cRm&65eVe1bAPZ7@W z2TOF1jkj+K5I~8~Gi|u>mTf4#fFaNSVzG5AW1(ER2H!(NTXO`1o;)i>w>+-cj7h#> z)d!a=lt!wUgYcVBbA-kEp56j8u>^=X9j*~=5=qjX@aMJmT^(&X!_YR)lY{s4?+cod zm7wr8&+|ft$HAUPYE7copO@ko_i&UNN{H1BgWPn^r-n?uhvKJqw(bPv`YSeHaIohPv)o?-P998PGCF=;z`LZpfNm^l&TQwRI~ zw1B;wDsRl?Cst!bkhNrUB05ir0FIL;vGs1q=v5yu4MD=BfUJIC zi-I&ls)eI0Q=kl+}c$C8+8LIZnup4Dn9t0`yOs>m{p zn}x&qqY)1oij-nOlr{J=g}8|SD4{o801OTNSUYDO6uv_{7P8^H3!ZZ=ZROUFXNzP1 z2j1tPWiu!0!0~}6i6*LmPU4JOR~M`y@?~-IZd-M^aGQ=KO4n-N$*HUmUetmF#EV7W z>FPxOM&N7Sh-^e9`QbDzydoSRDa6?+tV9;R_i9T(TB^Enl&hi#_Jd`y!r7qCtT&^IE)_Vq^ zdPY6O-<~y0tN@;Atzbz^QrhzeE^@<79q&>H9}`O&)@ZKGaV@Rn0*VEHg6I1Qr&{iB zq!wNu7VyJ*Va#t7Thv;Z?xPYyIPKzA#=mEaJ`%ElR(apQY4m!zh+mQW_*alQoc6d^ z=_sFShs9^Im*y3zG(?|zGFpA5NMXY;JZU5}Lsog$ICV)o<5k-CbfV6B)Ve#wxF=A2 zAfc-?%zEHQbl)q6bBxzrsNdAG-6S6UWCG0#)uf62&B-pwS+;sE;Hf$6%mSiG&mESq zjVe~P*4D?NpglfR`0m1d@uV)+XNuURI7Kv&zIF!_c8L|vn_bkArU&e%#lhNOWR-+M zS^W#QDyu%$Wxj0eV!~jSy%s}7lWdY2o)>5jMNq8d^@Ti1v;zB`l7_2N$wKDFM6>iR z?0X`BNo%4S)_QEwTm{EQT75*_zkIEI+sQMzB9axvOdk!z8?T^ZA3I@{@QsNLbYw-X z=A{Ou#Dnq@=cC=G(FFQAH#p{2XF=#`Cy>LctZeDY) zPBAuoTtd{9fgI%hc8zLrO>Xd}*Lam+9pM};yr-_IqMD;iw?T;gWqAxv|6_3(=^0(| zWcj!GxAbYErMiA_LBA=G0~LsY6Ar|yMU2Q$9`mr6J8;*4USCn8?1Gh(+;1-YB$&2@ zenf@-F+7^e*#U`0JBW01druWQq+HmoDNzRTl<_VCj#p<7kn7Ip`^00_k>?~`4qi+g z-kCQ=nD~azDFl>~D9U3HWbrnnd>p!({b9(j@>oh1NJCVY+~_-CY^6#$wT5+Sf;rp9e3WsB@}Wts;D2bUw2HJaK8nk( zu#(wt3@F^9*agH?UTueTrpq!~&?-m+lT}>QGmBW;H~nbyntrBYYpVZ-+8R{GeMT7H zeFyXXBq$Jx1kUxEU4(YsJi|L}+XTlK@}<*&^D+hd?Lea_!b)5GGhAAL`U{oZ3*#q? zFjI*W7ccPQwVtQgpTB{gFbqt4Dk0T3^{KW#n3NiXA|Qy~Hd3-m+p2*c#>*SSO)`6I zkxMYvvtpNidn1HmlB=3#ndm;+hO@0iN)mIH$vFI7?lVM)cHHb-^#ThhV+3k zbWZFD{{qP`gf?s*&y-D_L!*Z0+6#TN_O^+JB5=uVJkpg6bhhZQ4};+js2qN# zWLU1+;j!@`1octRB;3*^!@T!P!^~pdAa+uyVz*WK8p-mc*?{cQF|^g9HDQb54QEo~ zoLhS|PrS4|2$qcVGG1x+RVEtApozMfn>tfT&NpFgf+PAOu;l0Qer?+G|;P zi|jbID6k-j{pjyFu%!-4Y^ECEx8-CP;$rI}>qWLdIw(u}UtwvU@7g4)2UIZVR->Kt z@0EZqm6`Woql0GUt_N(BZhq*dRxKY-aHP3;eHAf!>~@6K9rpCZ6c2h|?*?8V{0wUNMzv$mbC&Axz{hm)p)O6|wsnD1>(<=%ZFgA(H z?EIzvq{52f7scB#ZG*@t zr}Bb5*eWo}Sim2<4)nb@iA3@4Gv3A00f@iO5K4W#oc0rv_k{3zwqDu3|EgK0S1O=E z5cUIw>7h*_8vzRxMb;e6VK?~JsLte3;^{4?g~|1$*~EgdG7RYrFY?C^jnTLk7(w!Z z%P*h&5t7#PClj^bme}d9s!i)ndyQ_w^KpZ#@BTJHj&s;@xTa^EB^HNCbZt1k5?2FQ z=BYO)0*)4y!J!{>A^BjN`yGIJ1ad!nzqFdXdHLN z%nP-QZ&>|^1uP{R4)YVMK`oRX4l*8#uMsDni^2fVwks>3W%s} z9-SB+iQ~iBu~!17_O*bt#NnBrCD7{2JH$kE(w_p3JK~=j4Y{NzwLIkl)a*1NB9!HI zB78*;o|q*AMop%7^Z658mFEz}b3R86W8#bXW(i#$G`^Io9}C8DHCE3TJMwVsw?S91QX>zrg&1T~CWk5SA5-&vnTX}z`7e12nEp14_(mVu^& zv`rCf6Zl-D$3Z+%A&7A-8n}ZZZU!zAK{wBW_C^v4SAAql;QkT~D|41;Tc3xpIbLiF zFXilUIM8)?ut!#e1MfW*rXHtjJ6ki_YwENgI3)j47}9BX zq{wihL-we2wR&~%%xMH#;bCVUN#w5NTQ@-JJ13*Cn4|U9B~T(`N*FS}Pru4$GOT!z zLM-`tsfJ(oajQ8e1xbfdqPamBr=!Zi#MoLLTC>W4mR;Y5+;F0kFr=J+*S{E@&5 z60WwCq)CMXUi(Slm6V1>%GYo^fD1T`nFp?9!>?-Er~uvASJ;HkgDaP51fDU@%*AxO zT|%5;w0Ac8t#4nfydD9l6{X)Vo{qVZu$)9(nXQ#CbD;Zo; zg4J$lLLg}NFC%*oTz)Ll5N|R`ij5p+){su%)M#x49-BwOB^c8_WAOY4=;HN@oV&?K z{XO}d`Jh}}lKT5;@ajOJZ45%4GSpE#;C!1pdE5fpLJ?V&oY_VX;G%a#(X!Jse>A0= zQ2u~e(PZY8EhIx?qcU4JGbjU=v=N{M6EO{I-ZWt)W|wz3j$GNPN~w2_0xIBV#6hWq zJ&{R7jvD5q*yJ&q7;8V(QhtHHeY(ovS@j4z^N5OxRC*zVQ6=j#T4cZu1~3v`5vL!| zbhA7Uh*92{od29O)4?BF0oPkLG{^yHqZw-+QWYHql%Mvjc(O2#48`1LECuF#V9s(| z5EKq9-7zaWvhNKIMm@Dkiu70Br(Xgvx{TxAhm`DL@e<&h7KXg zkY7h+(rG!vO&j#6o-C5$t_IR1MC@d*9i_orAa=KsunuOu3cp8Du^dSIP1xCx!HoSL zO3o)a0d40gbJekPl`N&QT7N7k+U%4$)XPz$u^sgE+(UQsH!5 zWcI^tKsmNXOQmg4MdXxRiY%KdZFJPYy}ZC92R3+g-ja~1NxCFYx5tQaeElM^u8m_< zXyZ#pl6NvX2oZd0fNn+;2t+)@Wl+pi-XxafL7X-u?1qIg;Gn@{GyNS-sn7@IF5u+f{*_FO3B$QMug9r4eoYZvRHb~#2|5_$h$Cgs7WDEs2QPwD z&TxPl;Z-F4i<`y=G#D7aonWuvX+sno^-=a-quv({aB2Q;8dS@_LgCsBMV_T%ub?6# z!pYzp*u_73LV(6He&sW!14~~TP{PZwc2$j+LP1MYs_IBV<70$t%8J%`ZFW|ygaleX zy(4N+XrOdFHq)F(UxN@8!XH_ZP2Wt#k1vadYlThL?!^3x1_9+MjVK^85dh~xY~qBIUC_uHgXkG-sb z13Qe~Kti4Za{F}_Yw`#0t^`KNHbD{*8uEs-HgzX_7!P74gkj+pD}5T@VBKlc`KJQs zXf&8-jvXzhWbi95mL%c*^SO&=CId1#Y#+3P&Y0FqL~O=Itu1iw%xVr@0)KS zRIz(2&EOlRmQlGX~;rYqElQ$wrU9crztkh3rRZw{x^cKvAm5BqHmqspi zdsvyTL9XiISL@92MWqouK_Y&b(2}&CVIj0vxDHDsCe^cv)-< zH=l#E9eq{o%gK|WA8b(=z&fnBrVEo5)rxxjbm}gbHd?RX8*{kyi5KmREK1Bkb6bb#C1A#Oug%Qi03=NM`oiiUN3gI~z_0*}5uxY=^wdaE5&~}})^|=r$a#@=7 zRkiZ_)Q@{(b^ogq;83nloSi0Ev*+NA7-(iVE)Ki+P5`G?-y+FdBX{|Q1?Yc8PmWfsrcy2<2Ji++%N#ag^AbPiu_*AnGW39gH7_W*1C8u4I0*e> z45H0|AD8K%UC&)>wKtOD&QI;W7A9nnz9QwyKe^!uULw!FmjGQ*|U zFWn&pPfC%Vv^o(p`Pn-Y0j?XQVv;f&$BOFq&7p#G)`67@i=2W*S0`2HxD08aDYU`7 zu+9#2&65`)bkfT7@}BX4hDm99E9ciFQ47aW>DbFldLY;rqr@-LxVw}Z6JuA&=I>z# zMnn+4ET_H|xPXOB>E^6zglKt|IKoJXOWgs?J>=K?{L?ogggw1leBE?O*AQ4VWYVn& zT;93%LYqb2voq}&exVPn0e#kIaHFS$4r3_1;p2?K|8)U({c;>c!K{*gJNKZ05Bg~3B+(x8q*DP+(mxqpe9 zca{n8J(e+*1Lo>Ena6C|Ft0U)sqDlSKF_=p3_%C%lVY+15{~mc!_RvlX#Q?@BZXzs zXR)mYvnF7aZSnXe2*CvXSTfn@1$WmQ3CH+_pYAR3+h56ylaXu6O61{Z z@09QSP#Xn-PK4_|F}(d6SU0hO5UrH7walnbZ@j|sE#QDEKdMtkdZ73N51 zSKnC{vK<`kB3)_DAw&=9!m@dI!2Iw&;i{?Y;z+T}ooD3kumJjay-hup6 z=+L*aduF9Lf%E(dtk7PDb-J4=ln$+Ba_xg0{1ynQbW~aT9x;!|c{tk~AL%(SRsfQr ziY&%tgZzq_{)su)Gca2(83%RiB)HX4A}AMI?pA#s~_y zx6Xn==AN$AwzfV&%_rSk|AGctzp6A2uO`W!A`QmGbqaxxD@%YRj=98@9>_j|hL(JH zoAUa$;*mieGfH$nMS}vwTSor_g^6S2c2X?6snw}rH}^2uTPJufzrr`;vgvI*5(Gxb zACiPnNeR+Mr7n;4YZI8o11IV!=&Imo70ZAlzY!t0d1$ihTlvgxYw;TS3Ov(AV+#B$ zg1sXWHik&&7Xt>!e)?i;KWdbR(ECVX>skAwdSroWe?$_9)A$wTSy-RKMC;Yf+{vnj z*3VXyzD%37ina+H&X5bq#3xD%Bt1DKYvQwtZmg_9R1WLdk2L4DT1n5$udn^7GYp|stm_y zw12))XzDA4i$inb$LkFfw*?eCoMgUF0ha|Qq!-+*1KIg)0+M{xv_^h5{ z$}K>=un8a@nVxEB(NElURwdDgfhWl(5S{a@a?_!4%=E5gz=E|t@019BRd!ePvZ@9e z>YFKM#x8lmaTtM2YY;P^OJP+mMYLqNhPTvgtf(X;su$9jRJVC5zGxOOT*A8}#j4!M zNw90OSzP<`%%SDqt5!f=6b@eHOkX{?r$$H zLTdA}Y_COXH6|fY_{N(MQJ8{rS%#J^xEA4qbzv3^o(;6j*Ud2Cgz=slJJ}-%1mbZK z&%Z8Qo9_!(Sxafp`pojHt|~;dO$l}wAB{QY+ zLdhLDiGkKer;^<=>Yjl$+A=)u9};k8rnj@v*p59*KzPYM^w0Mt0zZW8&nZobm2pdo~xfgZTi2iNTJh4y2PR%rtMD0M-x8LB5-j2kl1xZ_vg@XeDKkF>cp~ zemuWq`chbPc|1ZtT8wM2Y{Fv9pO94 z8yzBl?8IUL6_5Uu0M~|NrHhAE-B_vQkWzIQg-tg-7Jrtb2`ho)Fw2vFtMQ*;a+M0b zBfs!1Z}j;Ua}46c`z|tbt|ciH73Z%|??2qU7Ic&On?esw%k(3?s({ySpGmPI7{z7Z zgmFABI{;Pppg$@<`@mWkbFx}3AcT@mv)^BXjgH@@ZSPaF_`73b4sR|tYe%RM@szw( z1u@X;=?ol1Okpr_(o6Hm>V905!n93zZ+^*395U|NouYE8RLi9F%E^uA={aMJ*yZ%o zGcuCrn|1lGwl*X z_?;A7%k){s|Fpru+G1>Vl7;mELZfx-xgnczAK~%@_<;d)D$EG>*nHi9%}5=uTvk~w z9iqf0AjPW3BWEMXF1__jscFdwzGqp zSkB&RTI+W_34;(2Qe&qauEW72ssbCSs`>z*o*=ilauZKUT~_fnLoT0T{6q_e8%{x= zA@Crqvreh5yYu*%Oov3HWd#4DCXxs$R`h(J1^U+ReVK$q9MdWLY{ubMnlFLkJSaRS z)723wuBd3ene-u?FAvxruNIhevSpmh$ZKS7;-dNuW32V`i(b}?ftjDOPRe0h=iJ|y z{of%6*Bn5t=rJXhET>q^BLwKKOWC!%&||SnzEr}&)a{35+g2=4S2U=d+w@LmqUkZ&N7@PcUQ5;yw-A0HU=QrX zd=nn#n-pbvdMw`3dFk97C@Ld-k7CN-Om{6sC;7#cQUEE&%|ymm13I(K-i}@G0x>ez z!!y~KtZ2LrTAYvy?xk-(3Ry0a99Fq7@-0Rai)^Cy+$%rC7($m2?X3ivZ!Z#edR*R< z{V5}>?K9|fD|Vh|>SC;*hKtGqa|RFs#3}Xugdwm6NIv+1c-+5&DcXxQJS?@j9uv{(H?<3fwYkH=9=9eW-wT z+KaWAZ@DGVI^^GdmxyW3;pc%DO(hbbgz2^|S zFHK6MSi5JmO$r*8g39LS6dh-&B%<1pQ4&aTi}`ST7z$1JKobH}q>T?tPE=%Mf6LXU zy=g|TIwV}YjPylVJ1S*QD~4QB1Q#9g*P*r)6-iuwDN=;HG&Q+y0?b|7)D^#?-qD9- zuXDAiF0Z9{A8?h%xaz3&x*LS*2OM0rBED&f$dQ$%=t%q1}#H~L3f;NgH@ zaG24*XKFR&l}19Q(WDX)Gat@MU+v1PFsTM~78mo0CQ6Cw{i6yg(+F3VxQp|Lr0xYlT#siKxQha-Iw~eY#hnW5rD21qn8+2~WB?D8Wg!RG|F@`(WH7 zXGj-SaS)L_tNkT4>)@^TObpdu!#rU%GP#R_kSTao&k=yNUf(^(oyH z%cS&8G%yLqv9hFvi0>{H62z>y%r1D=;xE!xH=TAn7Ut$it2%zUGC>YdJ!0$fGPYvPkZFdNU{}Aw%rnvaAKsxM==-GX72%xj)pIgz$&oZtgjQ$* z74E3^yYcA2LW6dA*uWbc@!Gz-C=)7oz?w+{ksA1FQBs!g@N{|c>A?Xx94yquosmPMExU9Eg3yP*dN%T#USwhpv}9WG+&11J~%z>>#_d z+F!-?abP5<`xiL&e_Y4zgRId*SYW=c4G5h5W*Agwkqn?koQCa*BBfuFWe=lyMZFX) z#-Srn6~e0UKgZ|h=6eS@NFi;vruE5JhqUCTs-j!!zQP`A1N)j5yxD7U89DP`Rn^|e zLQ9&{m_lh!N>`(Rtp$VF2~0GP#N>^5=SJ4;rN#&pO8($H%CHFGg+ZtKMku%t32|S| zrn@?~3Fkm)VkD`&QHg|K(0J_rZOmRN@{X#<3tH=FlFWmsbw>{dmL75R9W_zU^n9HV zrKkkImT25TzrJ)wL?u5_OvBaEQbIY;a;x+;Yl|M$&U~Iwkjbf#w5oXCeM{lRgkq@& zkG2~2tsZ)o)R!PK^m@?gs|Pk?sI24Rg)cl$eT*hBEb%o*hQq~~d&c)tTxw$$)hMSN z(1USNa4at^t8puGds;$G-!x(x4zh$L64j^!;qx5k5DO=7!P-I7-)O+9uPtor={quQ znfd$P;~@TLxEJvScaI`Rc?hXmv;5}3!O6INUs@k}`PwcrBPjw+b9q6OU#5xB296!Ub;W41JGzt5&iTOp64fLKC4%CEWD-MgxPW z{0#z0iZb@_@Z@w$##~JLr%0uS7^gFA(9@zR(HBC4hWrKO;&gS6L7cKB10e_GrUHc4&#A=$ZjD~_FuaEGW9W1Rq z1NU$}vbknFh7y%Upoox83Ija5!MV+932%yoTjqgkGJG;9x@K#j4ZMRzGh$w1CNoY+ z0{0QXvAy#h5%lU7XJX@> z+Qgjoem}sj4j0{pEg}~q6-^CNGa@DWi}9D1@vSFp{ZunGZ6{&;|EmmDPZRy_)?*(drnE<9=3~Q!^G8V z#YArmTfZc@B@%c34(EO!t0Pi4aV;V1dCRjQynuZ#v}Z}F>ZpY0;kzxh3s9`1m@4{= zS&;wrr(G$*X<7x&AqgFMGR~DY-UB`2OhN$LvE$P3Vt35FPJK5x40!8>nu~ zQ9R*m^;?1YR;XAcb2PZbUUTt%I?og}pcAeOuT{JT70u})icO@tA$<23AX z3A)5c*4&4)=n@5uOmVJP~lwZndJ8hjp)-PMM>Oj#a`|VM8Rjc@^|YhQh2~2XuwT8+10!tT z3Ixo3P2??&@bFY?BA^|694uk^X%sDKmXE1}HY5+*)bWNbd3wiEnw#gsA+766JNie0 zgnw2gw!X^~U-?;Vgy?m;tblRzYJh(fd{NYuePLz#c)-rRv(Z#Dt$bKwS3fDfY%Qh3 z9>sA0r;|~Mq5U4+yJa^uM5DEtetW&H@ObG|U1e%1$E=V+vJ!u2RmV8ps~`@`*KPj^ z5_|8L8@g5xm$p0(JfVJ24o(2gG4D)31)qy|-=uWb6~Z)bz*a+DT}qr6BiN4Z_ z*OZC78k<7UUK2%3>W1GWIC(+0KHGYz{;`V$^y{nz0&6zTe|yI}QVnICby`(^+u0a7 zm+@SUc8J#LN9c1wKp6P*@Tq*K)JL+fFgyKg)t3?LxmUTrsl@`Eh#GMj*Sd9C@r5U_&1&wc}-_q{1c`&S8r%7Hai*$k;zbLH=oNXuv$rdpRCOOM56Sg?Gv+|({wq5Qcl2N1L$3j zxcZhDj!l;en_9MYWqvgMu*U?5AV6dUzD4i#qBCYsb&UNR%CffjyEfoE?aU?u7Pi zc@n>Kf!$nZdcIY#7ACKn-smuictI7j%mcPV)=MaV7Tx|Z#dz{FmgVAC0cQ4n*Ugl6 z3Vml#Wn7PpXLCB6DwN@)E@|)}F^?9r@SL9>%VaAwZ zvA(!)A3i);3C^=k%v=yF^!_v+#9zQR|2e-}42lzL!xAX}{_}vGQIN|@MS7n1oP7k* zE+AB^I&FpGCu8SIZO!G`KF-M~fSq`R3L)iO9!vLyvbrnUZGDyv0&R(@2Bp{jYHYE7 zE!~OEqs&+A44#>p}DjHt3?pHp#IykSp%!uSHx2MdY39cHDvh_5UFT? zAe|ghi7~P*YH|Co#TFbLPqSX-)5-O4vuY+fq*d2mQ$+q$Pr1ZW zwjk8oudAKO0(Rfq$v$l6f@Rag6r_eqxz)ihI>Z;eiHvugCrCAkR)w}teUB%o@n#B)i zN@q7)Ky8U68{EL7aBiYI_5*V(tV*@4`9R>d0JR=E$ApoQ->#7zC_T-(^ik*+?Ci47 zJE{?n;7mI^24+_n}Q>OL&cZ-fmkuw$Y08b=@X>$(z#H_n-x4{CTZL9Rj({U>1Zo zCF0|DDR`PpFT^jU*UNq~5RJheO*>O1zsyM5rDWb_#095@ir_HC`d3fNa>*a_sJsUKB#Smqk+XL`_|LmN<;h#8PiC_U*gM=!o9aky5~%`7dIi4{OD(`G2$a8OP~9b_ zmDzW$SYdIG93=~6O|;SE7Z!<8lr7Fhe2swXbVRPk(7&b;fS(X zYqSEfU3P~&2pm5|i!#RfT{h>Ov4oSJiC!UY@Kwr$(ov3G2nJ9ggKwr$(C zZQFJ-XD&|F%=ZtvsxO}IUaObvklT(426SrHkl2PRIduxc?HY&7|+2qu}_W&hC~nAL02Na zrFl*V<=wbh=gf%oD3B~wUvsUM;6|;*pX2+#7=?$|Tp$LqqY2!v7x(hJ2K;Q^KBw;^ z-lg_s3MW$#>BE2DnRRFFuHi>z#A# zCeMQ*e5atC@sJ%l!AC1)-xUA2XCA=$gqeky_OcsM1N-W+zy5{%CqAyX2KC%`20cM# z@u9HKR>tH39sBiOeDu$X!k+I5qfDulH=KT;jYBL3HfrSOi~bgH^%2LfI(YNN(U_6j zfvw#^RgaqVb$-To5a)@Yet|$~R+975?y6RMEs3vp9d>bM&0~Tml@#0I-YvhrWNrud zIuS6lT(lsB<3B^0iLPlgRIh7_hQ0A6qB$mYYmyid$yg+8V# z)yg@Npa)4(kElOWF0`0(bM}5U*j1XWFx7lB@c^{{mOxEMN^7bc6+iSo5Y6SwT!@)m z49BqYh}NX3-fL?2lMNz?SP|>f}Xr{J5&PAttQ)>>+&IlOdEj- zPDwP$bF>=@$y){M!sr}G5Fk`4@}$mvyTagqbku`+~B+tJeGlYl0dQ z;D3Jlprj(OaswTRAQL^m3{U6R7r>2pNgB$!ukOPXmCwC}5Jq!^WnSZ9zasoV6^ZVh z9n*p4Wo1okVNJ;)p2$Yt_uyGZQ%za(ONfR~a0T-nRJIoAC6s)WHem`NxUtmGu#|i1 z#C@cUvA}?;T>cj%e^{#qz8S^&w`e%bW0>IP?$`V~H#n_Y!Ev|VA*mg{PzzO+sWGeh zD&G2^PR43m7n)0aB??_tTGO+6J04~G6XKwV;NrhvS8^$)zn-BUhT0Chn9mde6KKF9 zHwoUyCy;|W7cd)$3QjoI&>IFoHF+m%v0qbIZiA;Ziv8{;pJVN`Zfg$VcX|EO_^dna z+AzB}6K_A8D^0x{2iDbl)y;L}vFgFEkLWK|`ITGhK5xI%V1pNKsu&WD`FTGcp&II` zcjc-2H;Is{xe+ho+Zuwv)4L}HR-v>%4GId1L(3wGvmEZw_@MaN6%f@ueg$hchiP|K zc5F-Ue)@`xjCfpqz>7(+%Gzx36X|cv(jljCYenCWIQstd)zw3VPMXJ~|NGYV!(3{Z?XM+FyM%vXcL=?(IS}9+E@#(4lr5vpo zMk}Hfwvn=XJOF^mh=wm*7JY68_Sqd=|A&^^?YsZIlXv(9Q0=?Y^39>|%;B9ierfh( zLM0N#JXEIYgOyC`__F#XDBi&}P(jkrd8SzG;i5y!%#n#>x@tqO}~-JkXD zyB)%TVe3i}B|<%1`|0fLw7>|Lc_kZPjAC=HEJRu}oDg-`KA;5Yz;9PVQxE0QCXVtm zH0KAW@mANK_B8)^w3o+Gm9VT)^=W8%j<=4YaWa?v;SYmwWcnuKlQ#(xH)3Jfv<@wF z62SbgC-^Q81YCc%$fkCYm8JT|s;@Q)C+ylo4F9#iabD@19HRk#0SwaA<(|nt7i0DV z2R&rfi+jez>=2{jhBeAkB$G??LUOhg$9)cqqcn{5l_G>s(NoikC>{>lini6e4`&Vg zxkd|x3PIkA_#N}gX~;wqDZS>%L}wl{rz@O#RtX@CKdg_$T{?`urWxaueW|LDh2RlT zCcl&rTX`GdFb$nrZtZJ!7f6=s-VUTSvWJzsuj|Z-6XbrFK0~57G;h6n-aU{(rmVVX zKCI|wD57+^ARS?tBkC=d+ALCZsHIB1#on+#$2x`cwc>1Sa`s%NGYvmqJYgb?PRq+W zfjYjoJvA9x@dJsIgDYyT!m&d1EjC#vZsT>~{1lr>)af+L2~0-bHl?o#m80Wqsm@JW zWnpyD;Q;JB`GE;_thr%`{7P?y(^EPM_b*$ig-#lqoFm4Jt0bK6A&R?kyp11HIKN^; zEA=G=r*im!ZVLW88kgDui^jX&_`1@UiLnlE>EF1T)RH3;z2t$)0dcr#W@UCkWrRB& zQFJNKO~55gjv^DXX7%vq=Zjgi7z9OhHb^}U&J`afjZ86OISv!W?)z)nrc)tt<~$_IyElg>kpYTG-0kYFO-S#?A(VAS0UbBJB@?{vR@ z_1Vrx9DZ0H@AF5&$PydHq<9ok(qx`hlxf4S!h%SJ628*fMzyXLZ8lgdX;R+{860tJ$tMTQ(;)l@|Z$#tiQPGz_6f|^an*9 z69PfF7dF);TS%+gDpqsI)psx-an%zS9-n4MxW@El#yOT&FB?RBJ=eAL0XJs1fxFHn zVbT&pAo#R7q5EcLXdU0&Zs%uArzmXROaUkqcol|1<8LeoA*V1o$6Q=3UMzJ~ejK9| zfmw?|ke3|zW1gX*c%)5Yz5#XFEnEv^vzI>(`?%m0FjlE+dK|NV0Tnl?#r7LT&Qum( zmwTOml(Q)g`!53;Ecd+=Z;V+O$^9?*N01vbWh@9OZn~+M?l3$9+|O|r{9?e6UMRWG zWdxx~>`|ZrrK#vQ>m!PNBf}T_0F+8b!C2-@k+%?yXPo;05s8nhhJO$P$r`0mfJZ|a zx?5g{6^Z9qjhVrWnGrghe7>zQ;tm$5h~I;$o8d{cN6!np3u|bEILg+%*$KMzhI4B0 znAm4^g~pT~=`~M)?RiohjpUM@w$z?VlESbI^K83f#|78{Ja$4X0|gk{E~!i!+Iwl| z(SjQ(G*6bzFvsnd!Et7(2?n|bkrArJwuk>UU>x1JHJ;)GGf6hdQa>>R&$47i$kjE5T2}sa&1e{Q*K9?cPbFE*|BU53tP6AF;(UyB4rS87601*M?Vk=o!?o$n-vcgX zhcG@`gVl#iDM^KEJfm8MkLCb@a3-mY=^zt)W4&t*VG8{K{TM4VUeteay)C$&O92bc zC_62sYn5d&cz^?|=kF;46RDs8oCDI?zvIET)X^R06sG$5d0L#~NaCI-N13P1_Xi4-K*hO`zuSy7HIjtM<}wo)TBLfVr(EZ~11tt3ywbTKie`#V)%S#L?O%spOtk0e zhbI6gWLsZ%+8t!}>@~f@7!A4ZP%UIO`o)4q zVYUoT4cFo9ih-J-IX32{RIe&msaOhp4nfo+t$VPJFOkr8Iic9P5bo z=sOI-ygnWl4kT72(vN0s;EP!x`T4Ye9VUC4tIxlO1f5w)hLQO6Ox5|$$#cW zCzUKGn1GkxA;kDHx;>HTd9x#!`y@#5>iU|_?$%>h++-P6O#hjd&0sQ6MU^v#=1A^! z(+bkS+1cK|mfAdB%CQgI3D^$ApOlN|{rrz48gas+)Zi;M(}mWL;3jbGF7+K}s|LiH zkHo>dyzvTx9q(dOc8Z$F)z3|@l5?14KZG`)lAphK{O3Q5jW2DXOZiPu%>N&6s_@}( znkf?tCcOo1ou80B)Fq#>J+g2q6$=quw7E{&Nz&FbUyn3_wzVgV1u^6|sXoHbiL%1h z+wYeC=R}vwCu+iR>30oVxk`Ic*zK#Is*4RTy%61-j+#dMqZVZTWlD|wqoz?che}KmapoXSv zzH1zn)-5!lPCQv48v^FVZ_~z~;p=GGIAr=kjw#6co=5$ugN*05Qv7m&`7i~6jIJBx8SHokN=6==aCojXZV4q*>)XgpT0w~@d)bX${9iK1=Ot}G4|#Cb>&_EZ+_l`KDu;*y*A+$tf!%I1El$ z-Z#heKJ#gR_LaXinlEJU;_WTfx0IHoQNpx}|F@M}UB%Y$E~O9or(cugXfV(*Az%QN z9k%HV9`>MMi$&kAId4i5~bU3#fRcj^pXzlrrz{Cw?XE24#_!oqc$ zd0FnrV=%1R2CL;Ag*Xr4*ycLn(2*g!-ybT0lz;rGi|#&=wnB@P`ytYWl_Wrd#blNn!V4<8AOK`HNks;ZXqR4oLuhXUIZ#9%gSn!? z_+lS7M#(3t$AORQ(7(htP&PU+j1|K=?DZCQSG#^$>Fj3b074wz$PTXxHEm3)q*ibq zlgC-OO6^9h#ATeLj}VOLXZN+*jR$8W z8a~1-seR{IqeBe3!)MkaUhBByMF0A`_wQ!N3O)%!i>=}?ZWcO~Hulgs)n*slcb;C$ z@$BB8k-*zD0uWP0-J}3JK*YbU0SRJ%=Y(zv+GKb%3bLc~iMA2CTot=5P!bv>5kx_s zE2v6Ut`?4Dp_fiP8i(nu;0qma!=&s{O6_qJQEgjV@3ZoJ&!$b%OA^vASmHr#pRLaX}{xKPC+O|N``DltK zH3|&GFble<_v{{t9^&s%@79qYi~aA1rZ>de@cJ-8pC=SNt|5kn^to9Y6BlZ5dFVXm zir)Xrybxkjy|UJ9gL!2-cg?Yb|7{;+6BvhJ=l(E{E-DBk#GXUbtdcT* z+%8&PfmV= zw4Je5CKh@LPVJnK&m%9l2l0!vf+a%*G)$N~+UlTDFeiuyGIREfp@3^}qJ=ti{M5%F zfDV7T5IHJzM@J8EHl4)3u%~{CSd}DXZiN4M#U#cVj$fswUF=vHo5g_9^Kv%v`gO1- zn;MGg2uwrB$6P7Fo%{=@wRC8&WJEq*j_SoJ##qA@M7!$Z_DSU zXPOt`iHUw&;Ax0XYj3Y!y218>o{ni)d}9z`R40pG3|b|*qKl_4!_?02MMvNi_%w=o zKNfRs>YGGisZO>Y1G@&(9ae5Qz5oY&K!iim(B-(cVmjp$!kGDMpSLZeS?k)JqjB8j&gm+Z*vC5 z5~FNpFcS&mL}ED3G0l#=$&B5@+sCfEQiivKS=e9=~331#`p`t6PU7TJ-QL z*gjO_Kd=@kOUDZ%xctl!am(YiE+Ip$X8wLJb&}RUMHdht;O+ZXRs^3r9fde5syJWf z&;JX#@g*+JE1^u1K=D;^R$G@&C$oEj$5Hgf?l~)i8YLmf61UIZDx*s}RRumucRGn@7Ya8*}+MN4?6f0d_ZvZ3vx?)c)`%^3CCWvgqtuB*7)yl$CK0L@um-V0&?F62G1tbviLx2H46}5EKcp(f~C4DS7(DLu*-t- z;Rlgl1*$CH%JwAgxpICF#UVK10d>uuA=KSA zyL&6~HnV~hCVrIXiNH1Fw_-kAi&$vbW}uB%WQrCcErf)ulfLhxAY1rVv0$wfy z@D~N#c?D&RyH$IEM@#i)c?%hn65$^UD>!q^f7ymhO7n1JW979!XV?RFK(YU^)BiAT z$r?B>(L^%+RtT5)P@uk|e}yYtu^mUg4CmgKH2ZcRWaw_dB6=tY>jGrG4hCUAf$K^= zu=VgCliRr+bW~m0;1nq3-_{&wOtzjxEB4CV=rg@zddC8)LSP_*Tv7fd~9zDeL#fvHg}$zq1n;zYGiu`wpZO1UYj5QZBh z{DVZ5Cgn6ZgzZQ%s%r9!v|Fy?5U;NoB=iQIk^Du_9icZsVD!o3RGaPSG z8t_Few}2>bqcgzZ`v4m_-0^@3%(mZIb(8f2ShLJ3q?uTn3+-E|_|`2W)p0;r1Jx3T zb=Ch)Y4QM;U=uu198EjZe-gQli1yeau*C+ACs2c%Q}&IF!+V!q!!?sepwv<`g-HL| zy;UmH5}YHxG>rqdP2B_cJ2XNwpfu*~7B^tVR(EtL)p?dwsRoSrIoCU149vf!5Oyx; zg?NdOt6>bo-j)J~CAVN+Cq_551b_x4Ji1s&dEU4*0SR7oiwK3CG_bN(zKg?9tH;}n zc7n~22R~;^9@YFflc8c3q+B|eLhzNqe4ld@b_gl36UHnsMqBy6VM}N_JC9&*rTrTq z`FtNI=Qt@=K?{^mOxOKKm9q#r%2W%CvU`1@#`Jg6nSm3ytk$nHaUPShP6lfg4TJ;)~P|C4f>z6S%)Nv&mttB|TgT$aMFAD@GbebjB_&={!K}K4~guzZ~bPs~E{r(BB}Q zzgA-CaIx*^9y8^=8gnim#Z0LRwjTDb0}pvCglu8uNvS59+_VF6sp`$UV=j~JgMrES zBxZv#VSk7FkRc~=w&1XCm|%KZgUHPO50fHenk18r3~9;Jx;hZroZpH(498lD=p?8g z*KU%S!=GQ#!B@}W6C)b&x_}&}Y&3as0hMJRAA~n_=n=OtnL6K7Vs!Q} zoU_*WdaI3$^79;}&LILLJe%`3u@>x2%r~b*22%>S>N`f@tO+SDxzruTOutXTT)lZ2 zFoc>3GwUbCeEQ;?LC|u7gN_G>?SU5~d_RiUX5`{}$4*4DjB4^I0_K7andLujw7lAV zF^TkI#*Xbq{MxUh$F!?~;tFEI0{2#|FH<4sO?xz-C$b-QiEBw%CBxtVh=J)j!hP*7 zM)&ThWGl2p4WG{woQZ0ow_HiPX*I9c4_k3B@R#77f-UeHM*+gLn}0H<=>k`(@x>pa zLSRar9Gh9CdcJx1+=;h3-wND+ylLKukrB+U3@3*6R->jI#yif=5`12bF8y%rcNWEF zO_zuRYKM|EBfLSh4o&+U$CZiaIu3WzU9ONRupP;zAyeRXe`;8MhyxHg=@!!{N9xlH zTe{bLU$Y1N?6nmJ0%YBJa?=SCeDpE=A3CzMC{}W@=#PUzr)0oo! zHlq;BezAms%f$Nt)z2=v<9f~l#-kc)#!)odG#h|G{wo=dv`lcE;^-&~A#hk9eykJH zO6m4M&nOA)s;#tiC0dW|r6m zP4x|JlPM+3tyS>zWfG!GI|-4vBCsM8S#*MDe(#Rri)TUH48x@q zcQfUz@|jus&abKuZa+s)nXI&F4iWgpoLJ73~` z{5nH;R90tENSUA$)%X3-br4r@A%$2V_9bUj$tG^_FnX7p$<7a2#>q}u(I#J#@GxnuRL7xEayL` z-FV|!b`6bO^&q;nR9B8t_ZVB|Z;nTlU%E>MKtk4Q)9kJpd$L+rLTsMxgn z!bu}Lfbt`Bowb$t@KETkHey7SB;&u^*Xd4ylz9Hm)qTGE%JAvH;|sB%tJabz2S+jo zp78w=%7yfYee?>yuipuxnf;4`OZ^$;z)%H=hOo@61Kdd+G8RsP=_kXHyNMKN4qWix zB&}vdgIP3@F{=i$x^;nswSV5ZaMMdrX_GnSoE;p{9b=4K86a-7eORg zGfW_k^ZFX1moFXNc9W(p6@L=y!Q^R&3M>XW>R=I|U(hY$`Yv^&0aF_Dt}-njYNseH z{`c1HM435Qw-7z(W@`|aKXfwL=w~RcJU*+@l!a%nlczVq2ySXYKhU+F$woxwH;sO7 zY6NkMLZ$_Hz7RDhr*P-nv~=-~s=a&+mUg+)yrE~vS{614s0{X!3-kN{!4xAKu+Bv; zaB+d-2;T>Km2NjHc^Xg3mD5^!tYnvmmp8+*!Sp4noiJ17`*garBys{ofD@g60p(o= zz-0{@N?un6n|bDsXFlimW?EJ@x>&fmVWATQ@83N(a`JiQpssO@)(tzNh9)DnTKpDw zL1k`)Eote@Zd*=cti8nK3&&F)W_Sz-*C))q^se(=P;HMpeSCyCKaDgy2Wat>d#SfC zTAj$#Vtqgul+gKq(QxZC(G4}NL|`dvUsyEPW+vIca~ODjv%mMb&Mbd{i5hela%<<# zCTehAWev{hB^w}#NUvSos2eupGJ}acFY}o08@v`H$ble^oS8c*#S1_uE`kH-4-lco zzTv?jS#*C*M@xtXYLj`N!kxPeud+tlpI?oiq;^{WKV|^sdA33M0I?AUR=FiCeN4?` zprxhoVgAJFZF4xA1Fyqgu(Vt&phyN{6mp&Hv23%3ZvNaHydKk zd8*D)ve)RUr+1$0p?8pK6Uf%fpcv){0IgX}Lzw1@EANt}9?))j@{aIrDL2#qpatLZ z+!p>9rMt&?!4zo?~9639T6tA|5UdTacOAlQb!$fHLZ%b;}(wqjv)Z)swwS zvYoxx8ghvRd~6+}3uf|-QuCicN+D&-=(Md=Uw!u}9&sl`hOLF|wNl}*u2SAvIEd4J zw=<-#R``Is>4(cS`nsM?y3CO}d&)*4CnEUBt{@suf-tdE`=gYbBYzhsjN6wyKxMm);? zNHU1p4hBQdsGf*)n>QK>I>}WZ)M;HT7UN@ZBcrZV$RK?GSyoQ&qxLW`$HwyNVyByO z!Y%@P%{sa7iQE0FN$Zi3!Q93BNId^!U}H|Z@_X*)T){vn2vq3S7-L&AL>qTy6eri+{4FfM%{jlH@KO-P`&9eACbYy%guUzGt@n zC=rs_5-4@Ie)kv&G0(@z<%W*5>lVY1q`ad6E-ouCGF@fkXhQb6^JmxmDL{sflywkn zBgZI|q&xS6NOpU-VaW^|0jG7Zb-Bp!R*k3P%lLR>f8puXcMHQv3EM|Ud3_PrlkJwh zS)ppAFmZB9s8~Y}KllAdA2;xSOJ(CrHZcJ5oxt)@^dcfGSo!P3H7ljCC45O&yJ zapFUljYF7tl2H~%ue6$dy!1c0j@BEvTbd8vAQN6!yfokalvL=upHA~pZJBHK04qmH z!qG%Fy0_{qMx?;9-*1FJm!|>K^o$*yoQaLOSQI^~&Qw0WHP$FwatTGutzDp9sGdc? zIPvjp6{7hUTP{TvBy+<9=Sq&cb=fr3Mg-#_ijomwU$4_0uog1vc3X3y8~V!#R{FUC zJaQkIktNX|VJ0WNEFi{*W{=IHa|UCQeMG1Mw?q-9PEF=j0J| z)0}Q;yc4ZuI%dreA&CY>ZO%(6HyfrLQ`^fY?S^7+p~GIR2Dnq*hAL1!q-Qnup%9`a zVx*7KNE&j|TQrYF7vB@nf-kR$@Cfb%M8*&)Ba_Jzi2?^i5HL>qsr*S+UeoK&j1 zSV@ zpxRVa*ev1rUx5>Z>Qxi^=t?^+JX{tRW@kHJr2ISp@(!z^#d(W%TFqVmr7*AM?Qg;p z+uVc5k3l5Iyxu6iI|`#{x%3;LtP6vuY0vWK(rn;i3u$LH_46_g{MpI<0`5UnHA|H7 zBCD7~TvI};y*xcyn^EWCu0W}u{hBnugCVLb;eFSUW?s^hW!$t-7%s+IKm2bRsCJRE zh%}^;>|?o$zBN(gp}eVt8p0CmrYbAl%0}a*KxH|5v;u^(04m-la1E^F_@(FiAx?jP z&rxdK^gkjv@?$u171X~Iv*SSOX*vU&GNoybYhic1zS9Ja?7^Lu0|5AjGhgv$K3ah4 zyx#(p*k6vxZV2twqNR7(M}it|r>@Gp55!csOv-8_=EW7^Y@g_|X}ngXn*I>*p}680 zEYDEx6CaK4?oU!&Ev zo^>b_ipOBK;nQ2A*VnUK_ic;j0s?ioNLZbrPD`9LPvCEvVD76m;aI60iOkocon0^3 z#PS!sDKUHdg?YZ%H#B2gOdr$jkHR=Ehsr0&HDU<`zaKvTPM?3Oi)lHy%t3HnRQ7jh z30^63Q}3mn&|PgIEi<`E=En$vhNYULa5Ujnc2+TtvFe8W!Ot{tn?Ir1pFs1mPja*n zTH$3;8|}A$wocdrgkR$gJSL<65Ulb&PD0=qhYv2+0PJQd5|f~zt~g1Cj;(h{`0XjM zzDB77xmQ4=vtT*e*!pqEn3WAXgp4|59C8T;Otz81~lTe?(m% z-PspDP3|VF^(kq7PxO5omsccaERR%1dTP_T>k>a~LsZzQAy&Dkyzo)!(JQZHo)6lS zLba|HSY%V3|G0PI94(QHWRWsa5U#F8rvw$ox%z`^3+kgR%PJ)U!MTp*rjkKC^960DedxJX3?X9 z6J1?CS{QYt2wOGS1Re#Lmi3Fx3npI!9^HGwyPEwhg7&Zn%5boL#UVbK5fZp$Uf*-9 zP^QQW-Ke;jU>ZmGi#-U&m1W{Gm@P|MJK~yFdlztnaU#C}(K#O}TAG0Afx?Biu4l&m zb{aIdep%~bm<5h_P$3nabe74jU)RJ=Q9K!&1lz=1F7XFNR@a*XJ+fW8c-n=s-( zkBm!HrvtH(j&h*>r;SxrhH2J=wS4J@guJ&@^Fm|Hd{FRAdx-X;az@jd*#^em<`K@&Jvpym&=3Yo_ZY619`zKqG zK>NbLZ^oFIC~{<+dzwND11g@~|4(mex}9U4T{g|J65ZwN5TLjAai8;4m~0fUPJR(h z9pg0a^(GC`mxNVu?7o8;J!;0lj*lBy5c%iO!NBh)(P&@;`R<*?2+$Tgc?nJ06n+>w{b`J+n(C>pXf_Yj|-|dN7&^jpA8odaEGisvx_T3Kz~>+ z0q(Y;;nU`;C$@*!{^sP$gha^ZU4&>2z+>n*@7e`{QR#uvn@4O15{&u%!ZUap*4hnR zpaG;~dK#GjJzq&{`wjq5xNpMiT&O6PYD3%;5Zj1rwE_=8coY!imv7@BlM>ijh5Lb{ zv@r9LAmsQ}C2EipyGA!WL8}Udp_%~(-bePTY-J$JXs6P;F5wj9D5bQO-?H#aeV+c? zCv-o?wS=#6bHQ?!RvsY}@XvuwtuguqbW_a!={3vb0;Es{(}?3{m=Lb@e7($=2(M7p zfCz98xgW8O$q}o&Gc-~d$_p#O?=cvZhcrr7$zr?K66KfQM?JL1I-`EwrPX_9Mi8jh z6R{}hK5E#5sxK~AHc$h0iAHmq8~VF3Pc^rT`2kGPX15UiHqiuzK*D^mM3BOl#;ry} zNI@m2T2@wdF~je`sGfJlov7&=n5!(b!-NfeD6pxvV06xW+rvrz>zdFS0cHmG`>*?jP>KAgzxmed700#0 zI9C87Gq{G4B{{&?xj={%-aT}FRFOT}sEdmhZdaok+h0xC?B=^H<;A>2HzjVu{2ORb zK}Kwf=D+txwZ%9MO0wmUalT_n#f($_g3D*!27sPoehhN&pn1~D#k@nrjkM}MrxTlo zrN$_n+XUGgT^2^Dz*<(#Wajj4zAI8i>#PKAl*H7ia2Kduw(0ghpUac`##pQ53z(~w zzR{wEQ#$VhhEh`dOTKw(Y8wb9uM1>@KR*%4S*M7pi(~G6R17zx{?1tp+f@K>=3C>~Y3QKA->xa~;LHX&bK)YM8k!ee&9o*4QX1{H& zEXj&&o2AqkJ68B&T_(C3q9$|LKK5G}3t78Nf#n0&f9i`wq0?60zHWCCqTSQpe?kQG z-KUbMD6uG4MGtvZ;TPgL>2QJFdnzK^ifa})pOWh_fpT(+zSkx1QcYCQI|+;Rl(_8l^7z04N{XJLn-Cky-zaRVIe4O8U7t{rG1@U$Unj)jn3k& z4J{o}etUNOXt@*xF-V_zKeSeBt^hw7iPzP#X3(iBB}zv)m~8Wc3@e5UX)5}@lSL|X#`sR1 z(KZ2<9A#Bqp$hJAnc+;p=N4}udIfh1M&9-72uHDK`uB>kr`zSlY=7wKXH*^gyF{AI z_43@L?;onD0u8EogjkU8F9*sbYuJ*St$lcdi-+X{LD(!y;F?_wcc$7{xkE1B2v1J9 zsAtqRj;?v)feDmUi}X+$Pu1Khp5{w_Dv@|<)%6Q#LyRKFLt2^xI=$Lk(T8NhmxGuH zC>7X7vY@{c>PKvur8A^Io$t~?#HN^?FuG@2o<32fiB+Wg=+R)Cl^qy;MR#&5Le%gb zbyb>5qEWBk!;QamwM@fUmW&p27knno5g_|SxKs19Da6^y=ZH;ziz6L*p2)7tRCn=| zg;HPlA%TDk61hw^?B9)b7nKBH?P=n~7|r9*et(6I(B{?xS^)^oF2fg4Rx3>H7tY=! zY`fP=(-U`Et)%(&go@@jJ9UP*ETU*>MxrtfASbee#wGvp4^;-pUf;CA-~ELrP9;-_ z%d=#Q-ntfIcJWIENUZCscT6&B0-b!z+MqAa`Y==DA zqgk6`{l`#3KaYUzCZ^Yq=OTWoJm*>MHx8{2^j zqxwz@5MxL4YxZh5hNhNJsV3a<7DIZK0H8LFiFQ_)$=fVdxeB$m0isjN#lvrBD~F5G zA^5#AVzsOTgI(`eX6%tH1B=6}w_i(c-0BO1i>tQeRYH(fm6ek6my1np3Wg6Wr!e@S z@pw=s?~9_Ipfyb-9qthmwG_U4W>~QE3j#ipqKAm%y`H|c7QdU)T=_ITq!*P>>blfZLsd1y}S_$!iO=2)x2oOCqrPP+*9 z4(FaOQwsd|g)_0!{mCh4@sb|`j`5LeY7Hn`%)sm|9Y%Juo09CO47AK@_S#+tuixw& zy=W3s<67P}puaD&aKaZaf|ncu_~iFMMV%4yBO57clCNnI_I-fwgxMuQJV+Rk)sL?R zHQ5Axoo|T6GF^f-?2zz^^rco8DSnA0xJRIX`)_4yDp>wobZxLLNcYlAzCJp;Msx z-4*=q_~D}wj~#tH>FWoHMr7~Q=a#BbnhG($Swdc*uDpxU{3WgfxAAa9I;#rox3pu< zF90Wbia)F*BalxXTVrxzVcf{HtOg^FntkDArnm0=s2*uQ!Ox33TD_?Uq^6rarTi3+ zSyRKSj?-NcA=#QqTp2)30S2ofISIqZXJI$s}@R$D!%PTKq{#pe0 z{S3sRxdyyZeja7QUiA!tLHayQSB@}0{yz9<_jMR}tgKQIf3}Xj;*dvWrHX6SZU|MoC}L_+VXPn(CI91;Vs2M%pJiDO zz?bUf_b&ymeUnKT`ggDrC%3i&f{y}40II?+D6toCEQTn#7f7RAoiHo`8c_0?7jhs$G&E8`MuHJ;HEAy%{z$L(opemW^Z!gB&tF)NjDoPUwI~96=%$>fBXfIq z$+zX?bgGj@3#+>YlrVSkWM-{&go&ne55Q&_(_3<&#J^>X%f6{eAQge4mMVk*ia;ts z!sx9aF!cPJ_Q9-vUfgNqYdem$2p6zD{Zu{2?9+cEQviQBi_0R__wm`NNQSGZggZ#5jr#{0l`d!Zj`k5Ijs|P z`L>iD^b6K))c-7u{iCo|Ek)I=aJN^{q`p%Sev4DYJ;ihle! zxU?*tWJ?l$wNzGwF#u}y%05G=`DE2DXjo`t4H=0mdHboXcG{XRACA4aVRjWicn^Z= zDT<{{2A<%0-fuOHr=5R22K)!1uw24?LeG%YsIPKjBQRTs@Z%EDvrV#}@OrvC=&Q-# z6iiG&UM&RRh#Z8}Ir=eNqs2a=(LD;-8PbR{Sc#cWp^e4k+VQT>@h#zYLb}pz)19H+ zh%C4Ipb!K+w7iFDElQ6@ZW)5J#>AIpy{c!nOml(~OBdU-gYzrCvB{8v4VP=KQGcqI z@ai(2a|rLXsMh7r%cHoZZp2=HH~QN?fkX!a1>(d$y*CYQt)y)jS=JBEmP^3KHbY(U z*>)zM@zc@wk77~070~xQxVs!c<-~{O{a)MIK-PDlnU=Gc3tau~9Mx#7N0@M}3BxfAg5so{TwGEd*|8}Mh@#$H zX4SaCFRX2My_+40b=a4GU4{F?`z;jVwJuEbr*CU2Ie(roTp5?&5Ao;pCV`oeIZlCb|NR1e-IvpYkd}iWoDI+u{A|k@ zet$K*>yy^ofMIpE>!yU2W_P+9xS04fUS^Ilum6`UALW=A!4C|v-FDF8&Js8s)$-i> zHMW=`c={kt=_MrXt!t|P&H;6~cu5Pu`ZPRk>qb8N)T-#5#lMF+%9B7jDghp!yM8G_5zq%*Lf0gE%=?16L9M6qDa&c6DaHnn<0OwE|F?(r`3-+$gTcGVz}C4)O`}grhtT z*%@j_eX@BvG;HJ`B~8%<4$w24XctI*ZH^OE@CLe>4T>xI!Hx6Z)9cn!<5_z$e?CaO z_UOxz4ly3;1(5cT)-4k~;|=1?HN|7sOsqKRkLL}uV@hRBldfE=;1{-;e<9XcmA*V2 zV%f#6Ai6R*p5Ry=X0crgt;?c~d6J+Y+thlcNi1X9e%a=jUl1fC@!N*G9HhPrD3<+3 zhfNm1a=hda%*JM_SIjX>s<SNy?q3{fE7%bZT0SSa`QjL6Ysxo2w!oZBx2y=qj8 zYZI=l_@AJ;-F4Qshvxg=@p{ehc%8BIZ*j2Wnv^%oy&cnwP z<}?b55!O}Ma~cYC^49c+^d}~{6E)a77BOjLo8D7)EdMIZa{wfmt4tuL*2zAkCLW{$W3@nS z7k)sKGjNl6%3R~G|9HBqz>DzO9aqd(I<)9=!`!39)Y^~EdSuD!a7*e27ZzHxk=>EUMB-HHtJZ zkbZOdoY5SNQ{zeM%3+tVe`g7P@JepALXEmLT#n^Nd*+#x5QlzST-`>kB91*T_Z)`D zY!yPeuY@4#?)mYHyFy(bu98%35E?qZnkn1*DP_p^!M`A*s?kW+adPu&G%o1l1nHTJ zx$f>nH!c1YstHG__`-sRbG`tq37!6PukO*h7%*U*b9!D$V-H0V1PCq=IxaOGEA+MO zSa>q%(HH*I&H$xEhupb-j9e7YPpp$Pq7C6g?Uq%s>l<)hN?EnS>KQA6_;Ul2pA2?E zw>+gGidZ=$TdG|1Tm7rOX@CAeobKoTTT<=-zC6VX{=XW9+Te;nTPkKKTsdgu^Z_h~ z5jX4;Oz1VXdsXS05_OLeI{bmLW8Ne9U2Wo>V^&KufRoaCghkQQ-ZN^G>h;mb>8gdg zXrq{@BBKRfgy0W$F0S8YtwuNp4fun?WapvWu>khQv~TQ8l<(8@i9i~|5p|zE`35Z0 z>tetch?s>TtJ!!K5E9G6e>E~1!TpAuKDonfw#F25qvGH9q1la3j5EH6y4lXwZywJo z4BDJEZ17?jXL4bZHPjjUqM>5QWB%??;vjLmLySC8*}ZFnjXRb=OfZQmn}_@9k33|} z_Qub?dK6AcJ!k$r9RI-?$0f&c9*gZ-&TjFBa8O12AXG|Sgs47U&2q*#st+!1dVanu zD?!E502d3CFl;or{jWYknO3<5BzB`*8zFU-XIX~{^1{=90*Eq#-Q*pOqmVV=V)v@^ zr;4n(GG%PdNPQop`kPiWZ5Dj!guV4?xG40&%CdeF+q~W&{N5pTh42&FFLT9`q@2o! zA9-m&Y!?{Nz_>Ej#;En=2u%+9c5a0(w780I=~nn>=i|nU;JVVXfO1PIU6EqM&uCE3 zcvni#1xqvuKN^MPsY1Unq`2)}#AyV5+P8Y#hIqqucSg#T^jTaiJDI=I;JvUm+-+j* z?s>Cie>|c6^#zH24>8Tpi+S?waH7QJtX@M&d`DZNF!5P;xy{kkQm>>R*tc7lt}QO= z)=HV`opH?SOX)E7=|Mv%;0;%2ktg%6J88c*CDWYFYkR|9{@m;)(>&FZQg}&un~p)> zc_9h~JDOhL5ArxUK+auaP=eO64+5ZWo%MmNn=#ddu)7!HA(5RC@IjS>RaCQq9#!BZ{fTXxmJwmnF7SoiYQ&7N-QNv8*`eI;gJSov zy!w3a3ri&5u~mZWl}m0?UjGs&=SzJ%BH2;pk1k?V;x`m+LuQ!+z9%wkIU%Whv>ctc zG?Oki@4A-_iR6GMpD+0XtZ0MMhz!Zw`55#*d_h}XCE>$@{h5cU=KF2o=~|p12q3Y( zCU5({_&DMR=%)8rhvV0A>RkeO-r72?jjfYC5O}gGiy}Z{WhnMY^D#;T#=25gOgWTI z6n?w#>Mx!tFSe@jJMc9HAq6`5?b6w=?Ka5r#+ZboRwD#Tbw)8X5Lx`xQTPAedeBhJ zH9?h9RrBk;EQzfT%^>xN`0eI}FW}UxO->}OmkN9E^-*HdZSh;zzOE@on&O32entxY zlH@AMSu*i3CxNdnmMWIgtn$g*h$l#z$5sJF#2Y#OIc}7bB{Lj@*t%`9(^aaT*hT2n z=zvXxpgceh*D8ON=4ZPEY&8!=W_qL@9$4NtDUpVLcs^1RC}Jc=iz_R*&%*Jr`a;Lv zN(R~~oLuSn)Q|Aj(k{F_S4h_rY``$&qe#p@-EFNIate~s12fVW zgb{xL_UT!f6HENb8&OKI=H(1S%--vzc8r2>X-xcgpp~i#5{DhYFUj#LzDwE&fg^Jo zQ%vAf^@}i?xdO4yh0bGnN9tUQ8sWe_ZQS>-?z#WMj;RWwJ!%IbPwx!+k~)g3x@>3Y zX>vF$C41R5jK!!G6@|LfLESOWN>OA;5!o5>1?K0RECvrwzRtd~o$+NT-Cr;{n$$aG ztFcrCeWE31CF zV@%_8s=(l)PwxUTnuZ2(m_WoIpwii`H;pDyH~r($d8tMFcTT$E#Y3(N$t@NQ%$j$v zk$-1^u9HzL+ree{R->D&LvuuVtk7;nYy-46_UgjZi$qdpcP;u?0O-KVrzH(Kq!_A2 z-ijhL_E$#86SmuwAiUVERRRc$D%!HqvVn?r;ye|X?AQZ4MvVCAZ0 zj}BAaMu`E$P~z{qPax8x+yc6j(d@^BixTy3eVxC5D}q$b>Gd=&_Qyv>)l|38=~eVo zY$%kH1B}SfZ+4!6>Azd+$jU1jd2ll*viSq$_IO4&wpVroDrBP;&8esdoL?=nf=Bae zc9Bil;hYw z;2v*P>8!RN-AIA&u!HZC@g&}L`c5bl*nq_#PS$&jR))Ms&d$q;r4jq0Jgn;;3UZ`V zsC}@D?&(+`2LNYp_ho+xgc~oD5ddRS_1u^dqDHTTb|)GUlb%lo5K$`VM3LFf#MO*3 z_a;{zL7W5~Cvy8qYMe$!*cHJw;wv1m4)L3Xzz?hfWQtJ=D00?3e@?k4#I+cQFU9t= z`!D7{AIiil5bXdZ6~Ph&S{#itkJ%}+Gzn$mELax0hg?9@~Hyqh*}e{ zRI}uo{#1cFR30C3z0_p~h*3kr!torLHoAZVoU#-qoJS!7Q@Ohf6L+~5?ahb;|hv}oTt-?2P)Z`jYx!OhWk}}yC^X5Nb{N=LHUJ`1Z zW5ZS;_c$N~vl4a}DxkAzEimh5&Rk!tw@0)@QD3uNzG0H|R|Hwu+%CeRS$6&s>-7Nu z-#^nTN=vLpOHdK+KVxxukcAuPT znFM$f8=`4o=H?ty2w~oIYm(4rLD1i5&?n1;!k{0OGI?#M?2nWJK3l}+N5vZBR}2&5 zm%RbStpYdM=)X4zyF`{B8fz}PJo8NL4u?4ERe~xOF_#l?9gqLtXZWqs&JHt(d_Q7P zAh^_na#O?+uP&z1L=F(XOk>*b>zJVK8-3GHJLjz{x;(uG-ZJa9X5ypC4>JOBEv1Wl zYVt=Xvhp}v+-p;2 z0)QZPVo)<^K)P_X+7bZ(&(i=*K(oIhr)TROqKs3ok2~wZ_?AwR zBpqvd)?#sAvaUqt<(a!7d&@5RK^xq8AECWX0R*0QO~8%D zaxBxxVv22^yXDeRsx5`=X32!t)iMfLC6T%HJEk(p-hF`?o2HG-WHSC&J-5k=Ig?ClV)p7 zvGB4R_8D`N$Yz%EN3#Fni#jvTK9pNW6nj!_h;@L_F9+OU_L7^3Z$1rfY8`*dFHX62 zDKIboec56E)&aa+4&;u~+We@ZVC>B=BOFsd2Y39m=kJWj?bRykPx`4)(N9P*iEJi{ z9yevnIks$O$|~7=RycKlfY50)iY3^)f1S|!Lc%~`VPJA{4xoP9)g9I{E_gnCsOAvH zi?qZHB5%+oxsD1kpVWv_P->*v*0E7N_VE9neC3-*YX=^RiXaZ8RNgKd z6yjv)2pf5e0*MBos9Vjp>$}a79_S`_*46>%aksY!o9KYG`H$xMOI6)2u%;;X#vV|{n{v@<9SLv_*y^9FUoq4oiJOcwXzjuocW z*1AU0ieF?v-+aB%?|oZzgV~xH9};mrbQwZGc0U0zIV}@=lwyOA8?CbAAtnD|_@X1C zvG-uETqHx-pd7A6#Uf2?mx=_U|DwOmW|Hc>A8iNs`+piJx$TLHmS+X_izl8K!-~4X ze6XY#H-o7X_OO!xd`?`!Cld?Af@hYtP@(~5x9c1*6KxzTwuGl5$An#OJhtb3N}XmA zF(kntr$4fYrl5RZn24dFv*)mofQ2pQ=5)}zg9jJ??=4G8CP>+zE2kn2H6+)8*{;_D zw8(d;TRo~*7Ns7EtTD%%vIN?elhp5qO^j^DaCQ|o*K(ZBf7kHww#fhGAI0@T8gt_A zAMib|`$Krz;yM9APot7<(Lmci_&~J#d zO*tnP0aFiVJBK~XXNrTMQ!Q*eYU5Qf~^{yq1@pbC%7FY+nGfL;&3>yYWdq+zG#TIfX2)t1EreK$;-`((?qVl>AO>jb`%De_TG?6k zkOpS+fPa$n|JmhN_!GE%mb{|$zx-JUVxxXt{pEa8yTW58ptb%rcPO%S$e|H=!$@^T z^B6GJyWDdUTvcwWyt^rPs1BX&z`zD&0bAA*5=+X}Ydl+ke(d|E{fwpDT6uG7uet-! z5Tta#%C^z>ge5nhvq(_o4bA3&Q61wbCJk>>udZcH5M1|orzK4kkU3$nrZw?`PTFTFn)yqCKnB@9X;aXFE&*D% zVNYz;eJEaZj)J^nD^#5`{=>oea<(L=;IS_rkx+KCD*i1@{mZAf8A>2nuGa^v&F&H7p#30%N)V0Lv5QI+~F?Q zT|+|Rq0Ca3u#XMT$>X3Jd(#8<UK&jk6r9 z3{hwFtw%uu-{D6_qB)xsN@XDiw`o#qgF~)7sQJgbUzQQni->;{Sy^bDqKoh(M4kvQ=0MT~Tj_noP#*H5 zhdou956PttizOth`1%J&qZbsR7yYy?H2JXK9NA@8JCKjgh_V3|B=C&N}K zU@o^g-D$KO9`j)~DGb9V4}elygn8pGp)PZA#3PDw?UsyuGr=O_Avdwbhuy}V;gpR; zioXbbA`}jKs2DspS$Z4@zZx%2O+xi$`1`;kU&2gv1+MGY-6PsXSA_a0;m91&N((pORxc?C9CdQuNi6#-3+pe z6nPqTLNo`e_Y-k8-FpTqju>sABds{$iQxi3erW)A#T~*kLdcwV2ejAEk-bxvU&_Y@ zLm(_xu3))|$($oDKmvJ)Ms@b+1I0p{&n;vg#u1Y8IDOGixBF0FGjXkhCqUQ9Fm5}l zXxn!?40{S>|81O?R}GZ@lSqVBdJ@f;lhxb&-wd1fuD_k4=1v($nHH{DOwO0z^1p0y zKuzy^di!Z#r#fWGn>^73aq5f893Wjd@dag zowrlA9Tcl$J0$1`Hiw6LOmH^m;R6^CR>D9sAPcs*uoeAzEaijVw|a4CS29^eT(u$M z*qmhWCKZ{)GnIa%^q5*7)^G zq=VNGL^{UNz4eRh`s=?|Az_#gMb^J9U=7ldZIl{CY0QBJyI8>ZSF3!U$jPS!d=bfv zdR>zqFhQ>%xfE4MA46aq<52;R#r`c==>#?3yWk?*F6UV-;ou$-#45fkwrf)^*rbus z0$m+g+_|OF3g*~2!3HH8*9i={m}(9F-Rp`UUusg{`N&=9(R9TG!|A9{=5=xbmY`POL4;)j4MoY>5CQNPmHXg~syl&6 zIs@dxa!TjR`z32&YmCn3AxQ-YY_&N5@MKoEwA2dhRZliYWAb_eAhDgFnF8XlPG1=- zY4!iY+G|f)`M4)2Kocixa_GR`dnGlUJOU9^88X$`>(u~@Cd+g^%S`u$!XLyYN)(m# z6sc1RHUeIuc{Xpix?aKTIu9e8+R5lTU%7~d6Nr2_o@7@|0af*6j=tm^8w77hu`pL+ zG+c{~#C1eQpYMzf@)u?R$F53rmi6k262~xe*1xHe_SckVX*%#SKC-X=0tD@tW+#z- zDw0J)s?{~tEBgi`x+-9#O#zai%&aXr#^+k2*)MXDwR(q57uz(>_v?db!B!vqgkukxhK z{1uko^Ft-eg8@aCqMnKmtC3V-Q)-NJnL6iQ8{4hJ0QwLqRS0Mq_B(itHVC*O?}zfC)`Ra9Y<5obh@E4r%2g($RqCa9Y--fnNjjM=Smf zM|_fSGogY(tz}C~Hlmy(Te^f$Rx?hBD9I1s*xu(GVD@KkH&@OSen;^S2`AlFZuC)7 zmq1ffuoUs-?mNfhj(tG$E(q-j3UQ$wnhD74EEru6@irRCtKxKBe_3JTSc}{ECh8Ub zMp4H%c826f3T#$ovlY_~x+17SA>7-ENmJne4jhnyc(NwO7=0{5?Y07*+_~?uq~gYn z{)-Ago-yx6gJYmhqnP_;U*=*kWOigszTsv1xMNzNmtbsaeC^aX-Yb# zO9D7)SDe;&-(!Kbv#wjh2hF6W z`a<$vSyEqvWonjzrcEs_zK47nQ21BsrRWbKF(3o{vW-a}LptOr?3GJfvHgBPHN*Lb zusKU@12(K8?4hUttT~Ko-xRY1%2>!f-*M|-r@W0XMAq1WIp*TGa>Zn~^YrKK_!>w( zYm&j9Yy-CM*2L%W+2E#z2wX_v+{rzul8(HdNpgkG(adcJ93!|s^&%j+ar47OxH}MF zB7mYzg+qqb#&bm-YC2M3#udl+-QT!X8@;#weRZexwC1>P=~#qf(^lm?!~N0SV0@*^ zJ|W~LoF5BwB%Mrtdu%cUh`F1o60V3^ztM$j{F>h~@>HktoaA}tmkG;jh>ToSVGZI& zW~Z#bxq7_l@Iq2D?*}9#okfqBWN|wWT`yj{FCz5mEWl;haejVA#jPa%zW!$5PCWVv|En^F7-I#0U9pOE;g|| z2=y`+Fe!BQ#Ec0-K(5Eh!6Y>GOfQ=Z+FPJ`ikL3T;|u<&B}dRk}q(_j5~Z5uDqQiSh~o^mqgMThluVFE6dDHC+Kq`pTD zn~n9g2#6{aoy5DDx`5|-q8+PAWLePOPSshav>K|_0d3li2cB;C{)51z@T5Z{?~Zf2 zAr31zWkM?cpqm@)DTMfeeJGJ9q2l1g?tQCVdyglf%Is|MRRGLw{+%~6!XO45r7xyd zcWdD$;3*pnM3G@%{DdaE_IfaUmEEa2esd7IS{VQ5rYiWCCZC0Vt zeFy#ioTzNdDLfIKQ&F-l>^`H6r}E5mI-cGgof51)$}T$cF{!Z2aZdg$j=F^zNg(vB z;$MV5rC-xqV(mDFu5W!T1W>4%ET{_@`!{kyHYYh{Qx02;p=n)U&pnZ7^9B7f6uW=S z)m4Bz4je*1G)PYjvMxF81nPdKBs`GrxsD&8qk80&uw~)NU)u;cd{H{B2qMGU`_jv8jFs(=*Y%(0-VJ_I ztf_S3U_BCuB4kd~PIyf(?nqc>z+)C|l+koZ_v`-FL zTXJddyI-P86Tl%HNpLUFFH0a&)j0xV<~_Yc-l%Mux;ZY*xd+~6GVe#`n|ZUY{0&^-{bIT0 z<3nuna%I;K->3&Wk#1nzi)I!w@12#sm!2|b#X)Qn0Tjn0`BTn)*qS#Ox#_Lp4M^cR z>#UyT-kAd*-BVOwI7d&o%Y>I>Y-p2-q}F|7Ot9Qg%<_ru(5henk13CgU`PrzJg1MG zbsaTAiB9>1mytNG?fExnI?jF4RYNa_|GpugaTMm0&8g#l%DU~6kJ7F20Z2;r1>MYEKB|FatNuDEVAjcCI$9)0>9+{q z3^EIL#`<8?l7ettHEbNIsyEQ0EcmZa^b6xKFiq5*4^qaGNXorNT0_!*u{dnUNXS8$ z@UelfYDVwJ?J_o=vSDQZtzPr9>UAU89(FH)!W(be!sw2ra{4#UEP5F5J$w=RvQdOT z;DPKPYBN@B&U*@ny%m3$_H6ab`>M=gNoxWV#!|*T%@IUuEWh-ezs_qDFj*fSA z`~lQDeh31|+_?p|^c?Ikj9gj2hC=upQ1hZNTtJAsu3HBMRcMY*s-7LxKh4YCKW7TI zy}Is~t5sno;<1J9*m|!t>KKSC05=DZ25lrp$ZQM{rOvDAln-xa>l{~V8VowvjRWiM ztDrP>iYC9qBY8>cHpMcIz0deoOp}~B*G+9iW-`j{5IzSc@$qq?=i(o64@6@Cr;wp) z_RTM1t@6Gfr7}pIin>mcg4EoODf!9$otY9Q(U5zLvz?&JK60owaE}S`DqJ&9^pU&j zQBo9QY=@)5^@Sagw4OMnXLw*v=aa-k##j|YNBWzDe6{P zdAeDf%h*>h-~Ffzqrtu<{%51>PczRGD0XT5aZnnng6)D*w}(cP49i61_T~(P?Ac73 z2~^Lja=s4zIq^jnWLF8eyCULjYy#zn7X@XqD858vduH;nt)Kz!1FVR#uP2f1PXkuV zN*=J>-%zi_^t<%DZ7xz5Z+QV9M>)6lQP)Z>HDsecIHNJ4MT49a7*FL`;5o>goHdWE z_83%aQ?jO5Ng3)j7qzk0#hw%)$)f>KD&47!$csSLjsf;U(E>1hQ15*y;$}wA_;xc0Wbm_ z;>Jud#*%|%q=gwCKOz6;0oE-DHB!;u^8OR9r%c;TCT)NhNk_N`Wic?o=JXPRz{ER= z9SVO(9E}FuG~;EPE*kB&ExQ^`z7y;~XgV6jc-T%spu|TCfdL+sHm}LmS0-X;#6|9uv2)r|Uf9aM0&g*ej+r z`~1h2{_7Ky?gk7|DfhZ)Zw&Ic4WjEveh3(8OHI;%xyCD332s9e^xR)evNE8%Wg>0Q zPr0{41NA_^^+Jf)gR20pu>9$UzLZOB#&kPSKR2=G6A+v@ma2KepQnEgoa%fEw^8-Q z&~vm6o^8c=0$u$d!}yA!?j>}TRs#JD7j7%h3Yvg=Ubwyc{}lakbuRp_fo7C?o-(*q)z;zTOyiz&u?T zETD{Z84Z&HI0y(7KA(tR&+zv&ME^-F=9`Bi;J$7gs{hGS1Ww^xO59VNd0ER7 zltXN&N61HrCs=I}nj4%(CfSV7KQrJ5478S<1E8rMzM5w2r~nhkPl`jFn`&Dtsl-OIu_|M5%hJjWYNaE?dPLHYopAdhbw@BnM~bu#M6 z7gpL-fA@(n>~|52&gpFAW!pp{tTR2f{@xCt_9RH4AbEMs;5vW#B>M6ko~>91gJUFK zK9{S<>Rr8B&^NK1;h>}YGHc>2{^8Z?{ZC~32(hc%hfn;>1?vn()ITAxQ+Oh# zkk){V<*KDjAN+BVLZ+P4 z0!%9mMunc}Gt@*)Ei3BA-jL0u42EJcQu%JhDi-C|1LNw%0o}C0{rB_Maq!_WRsmGn z5$%~`g;UD~wJ|zZGnjHMgQzTM=Ujk|Hr2X1Ro6XmW}O43Tf6atPgq0-Y9fqux|g!F zAhy=_P*4Ek75+~%=BNqb$U|}zm+)I^{mKi6_N1ogmT1}7SO z<&2Pg5D@uB3xPMb=Cz`o(a4@uP{IC0Rm6dyOYBEsKF(2F7t)Vyjnw}M5Ar<9)* zMXpHIjH+vlm@(A{zOi-7eay?EK!mZv_nBNI$H(Kg&TozzD>qZtLCoGY!seF0hU;o1 zsN;AY2IB1=NTA}hTNd??GD?=CeJ}&0s@VO-|T3Y9(5Fc z870lo9O%**i_8Pm%vH0hKZsaq%FOKGfW5y#VLi#$LUVUG_`H_l(b|?iy|k-FMrFv} zM8_5C3h$duX+>b(z(RJ+09XGg{=_NBB3sp&F{6P~;!RI<3*pxf z12!+Jgp&xQ4by~iixhCDUx#pHsaz*n-vp{zxF4Jy|7Rowy8Tevfa)vX^L*!$xAk%k1zkgjd=j4>-S2 z4`M@10t81EL9f#E`J3b=yn^N)B)3`o?*~SYHYz^3{McL6jC?V1hTJeUqeanZaXlGq z6;08Mr&rQjq&|n{$+&zOh;TV2(4Z9d{LPgZXR}=C-{Z9^u`Odr{xC7;tl(t&lXxkc z#ah}~SA9rZrxFdE6>c+%)D633vE@l?(Yp`PPM8ATi?8%E2Zg@AVwMVKho%{rT?M;` zni8SAYg}8L8K`83m-uDFD5EXRh!$2%JP*F1 zQpDXI)z$nD|GvaJzdp!xbta3a1$NG%k)Z!|{;Ey*)^AnO{zP-d>@XEd0ai41l#D#H z*t6pRfj16~iH{CE@8oCsaz23>sropLfT{Q zlHToXj0$F?0_1{eJ()0~83eBU2b!Q-_>eR8+O{KmAQDGSCqDJUFlI-+5&sds%?~65 z2`W(noYVL?+adDs52CL}KhrPG_i2G3?#H87(jK9LwQSjV9gx*Slt?)J)m8D}`TE15 z)D7!9#W>l*{vN53qT3SNUYVTIIdImEYJ>>ezGt$J$^fJ%i`Lu9S% z8>C*0Q(z{17ho^C%1i65jiJ7X*V3T6)hIHy{D_WrU6iNZax6mMC>L@w-KZ%|ip$@% z2!FmNNCK=yIEUJzF79wpd4c9k`qRuTEyin}b)r|KdUct}T`tKxD>z5jZ-v#NhhCOT z!GkbK@nJ60+bH%XDfd-3TvdYXr1W!$d@4karG*}%V8@5SF!?f5jAj?CAE~{&ZpDBB zydz2JwOJlMGGW$)IQ|g$9dfrR)`%?ELCj&O7J#_;x^x`CZ9l7f^2>aa1^8zR4F7|F z+1b%X5QuPkyg{YhbG6}K21n^>&tK1F)+PCjwCQ*9;U)+}@Y&oXnD}LF@@YT)SZJJE z(VTwBrHT~xO66P<0qw5Cr3jc)K2a7F7n{~nOAvo9kQbC0Bm^xx>Q{PQ2v&|aF?Kk@ z{W%>47BeXR5e?={OZD3-lpxXmh6c!NW%zGf!^H{@kCD24mA%DWzI$XF0NmtvR1(6= z7&HmlNw3(Gc2)?BA{zx*Bq_(PuF7Lz>yv<~JEc*6XW6@Gf6*}O{%!6G!aJ}`%^u?% z6&w&%=OdNfi8sC#=mHk*0k)9`_(>}72yiyFJI#N|ka!Cl+G)o=_ zi}O$Hud~jsPmnzkLHkx=Wu2&pKh?Z4%wAfGFfPk)*|=VGyCSlu<}%|g)%;m7v61h2 zG>H=bUI!9`9vKUw8H%%2;IL&4Do%9i+FGQgKKulQzq-zL)zZzmCX`>7_TIQ*2B4aR zzWydjFh#TZ6UaA5NjbB52x&1zW^r}R_=MFyOZm!#WRi3@6g(&IN{$`)SckDsfcel$ zI=$eJ&I-0;Hdd!S0VxbLoJADCOtMIDAB_0I(Kq=jgspUIyS4;5vvm1vV(0Q2$T>FR zR?sZaSoi!`bjSirse!G316zq(t(msAzZk3$M!DAJ{fFKFlKZ3A=iXIOt-z{GnNT)0 zF->&0(G4Ig6g1w&@@hwv6SKzq0bjU;X581 zbPbpl2?+aB1z%LF93*l$M%2^sx5TW?&ijInQ0V6SG4;tO$*vJ~lAQnyNJrHZZiAbR zR0#l3C*$$RsNERF%h!?w!DQ#Ma%oDu>Q|3;tbv?beQz|Jtyr(p7B_1bcMpL5d%%W; zgA_;~9GYV2U^Sf)SsDNrI!v&0Aota&rPI3`eCz-yWcz<>p=}vgkjc`{`uXQ<5PepXFNAC7xNTI_OHz#MKDC_ z+v~HuL}zDp*g0lu%Y%WihLn`rYAb!VQh@hHt+fUz4j`hcTV`$}+a%%?4dDNEdR#?C zL)X93$&}eRiTH5Em_C_~*JUnwbtSOLhk`4EmN3Gt@${kVYu1q6u zEU2=kOq)IoY>VgU5ewj&Vkl`jA)7zB1Hw0geGArLbCvxKXSP8WJ4sBK zLKGNGijTiR_x&FujAt;{=z+986p9GsI=cs+tNw4x%GJ7Rux>Buvtvq_)oYN?700yC zC=TMJO7&a%l@$;*w?HNr!#baHtE13!@ooorPA91@Qo`fV{z~G5@E9$=!1H#t3o~6?aqQKtG zOUxwXq#%6Ux_(sl`jQ$1e;o4g!8gyOt}m>>&EB#2e~PFb$ocR&Ns?J?T ziPWIv`LW@RoCd&IkAmX&qVi)+OrQ@issYo-{08^LZyw}Ad=1-r0Ai-0#9jUxO2v*Q zpI`Un=i1d3SbI$;K&K_eWJ_9~I%tdK$+N zkLrU^RU-XClYyaWpH1rMcdp6{g*Y{k35W4oCuTe6QJ~G|W$qDi@>+_tt~9#k)UIM!I)n z7JUmZJkW=nQ2XpsH_s^4B>MIu-E&TeBPKsDZ*f9CB0ILT%u)!^aPxt^HOrcmiyxKy z@F;31O)VPA&Xq@vYn6E5lM1%L@+gDsPUw(Br|mWhwpPFzcC5+a-mgS}7bB~sbk`PD zwMZwn{zUb&Q$~rS{});Bz?};htL@gdZQHhO+qP}nww~H{SKC!%)wXTp+v8xIao+tG zl8nq`Cb{m=QimPyfi&f*MhZ`HW$R5k?lOi8miP2c7F0Oy_O8_u83$3~zSf%`cS7vc zga-)D|0PTJq|jGDTJtn%;x5En1XqH*WF_kcb2)vm^&sx~nHu)0_>Z*-ze?$0ll=juqvxu>Z)QymJX zl%<6c8J19~cU5U+LpF+jWDhn5N1hu~;bS}rJ5qSrudH_?FGS;Hbh?n=oa_liFI z8x8X^_t>00LC1P?K$6|L69T?`F(cJg7NbVIjIml0LsjqNUsAmn$ogPvY zo;)|ie20c910v4dDjE1Zaed=vqil*;0YxLXF*yR$iU7t!o+M%r{ckEtxIBTovlngF zuDvk|5vUlRA}j9xy2d^OmO1~i=KvNYhybR677zcsN};daxG2QT!b)qKIC|&*3X|UW zoL9S7T79-kivrc`bSfs{&isavhIbbqQ*t1_KO{r2_Ft8pv)9%R)R|}4Rw7mk{f$s< zv{j4c%EfMcpeGr#Mw{fFh(&)fKy?Xh0fB#c zrVL9IglA_74#n4rT@~9$9#~1*o}20lEiex)wd2pE>Zq0e;9=Nbe3AW*tAF@%7v|lO zSCZZlaY#J#ZC?>&^-3K)65RP`%Kiby4WkT0Mj``Y7YGd83xi{S%20>Km|rZxd&M-~ zk>EKrm>Dtrrl22tokxG_GD!hMM5Lp5V8yGy-Q$&VOiFs;QYf!s1H{3JcuQujd5#yht)@RU2zNwuV9H&j!?D1T9Omdn z`z7jt$`j{Z=h!D^*1y(VwK@cR{wLQbjZk7emSCez-%vsXlIY`@-hTgU=BdK+k^2G?vg7_xkh3-!VNSF~=5G_j>XuwH-Ihn%7)`EY8%mWmCGxg5i5& zFA~jEDJ8RDf&A~$^Qiri5)1i$Wf619`bj9rtL&#zDmw*##Q&vdD5i9kaZhkQ8gxLl z^X3!VNr{7R#;@mg&*Z7ySLX~kn9Ar6JT`v9DyxVHhJCwFg{F1dvVTHvBKjNU z@^Wqo!~2ilV^J{^Voj) z_i+^dElm><eE3?3Q{59^JEUu<$95DpbZ6Nobx`F-V)Z^Wh0+A`t5^jp74)}&{>b*pto zfxo(K4Z-7}Typv7Oj?dtbl89MbIL4|x^5Sw2nq|f`4d_n)->qLYqk?J1(DK%R9m!$ z@)+|7f#qf7{2amd0TdV=TvxhsdBNn1SNcM9xY2tRRvr~w1+hE5WLy;J7A?r{jxJ!t zbALR4x_sr48?pBky-H4**=@J7$$Q03l+&c?T}2=St;bwt%MYnBOyGtWX>~!+iZGV_ zvL?)v$A{!Hj?J3}P+eKH1_n;?eON%pK~svph1Q!Pd;^zeRkRNMO~knNGCHV3r+;VA z-i*zKk7!4Bc{w$0A(N2ui z%Pbsrux=q?S}3y|xqYm&XE;dV1~YI+BCl~9#N}di&RyBHDvzVY(Tr!k{I;acR$x!f_FehuVHrMi{Bbst@5K)H8e1-C*T|2$_v6del3{; z*ef4zF?ysEtlVa}V;kc-s?=%MSt*pYn)OlAyM7Zyixtgto{12&!VFNf>h?5zhC{5>A4UdP$H9z9T4A9awtUh& z*I1Qc6;DCg5m$*Tkb9NZsR~jJ&_{_DRGDcQX~C>~lvm*Ar)9M00CFL;uB3{0-8MjD z(_EQDnpA|fz`ylIyU1x)+uX*5(PashLZAYfU%OCN4|l#rn_HWjG=7+ccRM8XVddgs))D1sy) z*}VUf9s|5xS6%5D+;~|~2A*N5fF}mFeq!<7=X?gpUCP40IMkat70v5gW_*wLsCXT0 zZONQ-zZ|#dWB)@%Iq|3C~}H zPxN*_yt5x~%Vth*6&{BFAuIyY)C=I{yF@M42{Yq5cgEz^`3ne#D+ky72Rzc4u&<_1o~k(!zX(5$v53J>I1hL zVeS?3!CuFWRNGg%U9dVpc6vEhyQ%c4_r&51g$R_5&IdDZO5X7J-Ic4&XMLU3Inkl> z3L)iW$l-FksG<|6yS*dm7lWXY+o8GeIRR-kecGc@7K!&Jjx{|f6&0}e4Oy_(s%FW^ zE9s|9TWNo9$I@cey()aPZL?>F+7IF{g-{gE1+=pyBFY7Qr^6C5yBE-Nt5 z*N~5bRH6E};wi8gS5CP28oqGFY0SQO9ZutbG*f$6lJpEQ7vcLH!R`EmP zUs7kp=;Hc+&6?Q_S3;Nfz%bnz898dko%nUm%#n11f=)VV$Nu)-YdSV(4;w^K*rkE*KHt{mNJeJ=zAM)aBUtGwrHr~vs z87B=D4Nus@WMrP(6hkD5KJ{s4m3CQpAJ;;EBM7mUbD;k&2$D9}KmLjnKlw}Qt`EeH z85g=J5eLeN7g435`I|Kf-usPJ`rdcmu1ro3A*YMF*4T65@@q-az)i#}(yS|SVP#>3 zkT+?69M*Q3F=JFi0PuJb&9bE!4vDn4SUKXqR=P8-%hxl*cDD$RIOp9Uz^5m$c^&-e zaaq}_%YkUO2vQXA9I3Y7Gc?Pq)TMnB=dN3OwK$ap(?!C=HCQYjKE+(*&$YQpDE_QB z#BbH#*MP99F${s~=Ou`;UT)QUlKrdLB1G*U~bz){2$Qm-f>)9N&xxSg+!Q( z4`-=Xla*jU_voPI{D8i95k{;;3GJw2zHf=4+m^iMIJTKGV;C}{K9Aw*yF+OIQ3sHE`ptT4*x;<5w`M&Q9h41*Px&O>O9SX&3<&PYa)l>yJ)eZPL9Yv#tNwukZ z-epQVyDc{rDZ-BUpYLWZ8QZg5tS1m!?R&2+x$MVC*B8oaI!K)ym6kO?*V~6{KQO$X z^qZ!1!w^HrLD=9PrlSwN*YBty~vVNm5+uQ4lTQe`YJ?zH|PP4Lett51(Y?@Y~7r-ZEs zgV+m7v53Oti!KJq72KrV)Q~bN54F0P(J;IkbTW=J~?6uIs&DdxV#Sr6?1Ys zPh7iy5D&`+4vkqs zsQqGG;USl?XS8KT*?03VGm(ynI}^#Drw1f+R>wVO|J50o#h5uizH|T1EJ$w%lBF2V z7n!YfS-0XuC!Wg>XthX<=G{N_J`+7r4jNP=k^0gp_AUXd~ z_*PH2F~F11X|C8-lF7AM-LAW_ntrSI7P_0rEJQPJmpxyH>|5Lhpr**Hw^K z533+sKowPB!zY&D>NZwx%}hzLmv?ZI|}i3*GDZ>fjzZ~UEY9^`&Cs+ zt`X5~5OOP{x=Xo)l1X@74+U1_<0jGiq0HPqPz60x4ashbUILHjDh7)t8)1shsER?f zEn1yn+xS}=Ih~TCgQETO5}u0{YfLkA2GNz<+v_`4lpH(#-ITCSmMbX;G~Cm}ak2NU zKNJuuStWM{pxB$Nka&pJC}~V}YxE0Sf?c}93kKn;9*|By+?@@30ZGV@qNf*jMkX3k zmc%g_nhRM&Tmsv?C>0!Ln|Ch`e#?<~5ns%M!D2y=(?Xue<(<4tce}E@-;;FXU*i;)_{%+b+`Ns*q%z zsKW&EBqy(?Ale#!aJkoq-g5fyqb$G>wZ;V16gNeUXSd9(azr)Wxs-2nb1G7Da;a1Q=s7{Y1sivAU zmNA~-FBE|27>e{fsllbZzSTkhVu48#O#a0f+^P8Am3E1KMS#d4_c~UQdu<@x6mAH) z!XH-OO9QvAk!UH6m3+qpJ9aM*?st-|^WpYNI4*6_Rv8f~3>@oO6NZ7exdQW3)>x6> zqF&?u#IPTg8rG0Gb z2i%B>M-|*}81|q}WBAGgp!@Z0a8(ijQsbGXDy@)dj3&1fR!&2OmCJ-*Kj?^ju0k|F zi~y6t@%GZ~^n;8U`O;WJZvpdvq|GNnF(`90`u~-%a43o}1g5JFk$+#Y28c~UG9Q2` zjc-jbk-d|zE!;TtHe?+vqMd1`VFd{8igOf29elJaknl!)P^}=w#x2>_I4~mhS|-Ne9xhzBqhV2I%Mv`unXd4J24vdho?v z(LOOfEDH&^8y*7lpJ+gyCGv%n59&?)L=>@pQlBE?V}93+&jpTPf)#fpJijma z)Ys+RcEgX~-s1VR!0l^c5XLEKZ4f{}nEx}0tFxZZhqrAbQ5B^QJyir5jg zk;3bFR11xUZj7A_)-Wx0w4-DY4<=aRZ3k8CKXr*?ffxCxwlb1{(%sXZq@5np?$5JT4fevjS9n4o)sf1HKLlb{4`8{&GnAtt^$s zf8*E{C*4ir(ObwbuOpgfxdwnO1GAD%GPPcLhg!Q;skF6VyS%4%5MUR+2(^8-arbE;&kTHfKT4!uDT1<@04X zd-Fuox+OH=-u?smWLJ`rMz%7@s}k~inHe&;+&kh z#1oU=gd)AYA}yiYiNHhhTuvAX$Q%506sS2f(qK$Asz535rmDrr!O<6d)lEbWnF_gG zUXi~N15*Ek{0;7+oo*Z;tTLuwgGB}_f7e4*8-^ycRabh7!rJ!VZ`}cF4M8*HSw2|) z_Echw?`AU^V)^z2QS^b%`Vd`A)YO23ihq9NaLniU^eB7%O^;zDkPI!&m0wb->ZOFb z!^sE@TOi8W`SId-B{v@t1fh=JG&Ka_{M1pJz6JIo9MWGLUVa*-3q4hbwEoBDd4Ol8 zEPFm1L}5$(q*t1tCEpjRgUm+1Skj`hOAxPmB?UIxZO4AMv8gw&ps6LEXHd4JEJh5o zP;DL!yln(wQjH=$wbK@)6|Vxde|CcVl5DK<&Xs4W`s9FzmgEGYEQ=+EFP6FQ^(KFK zfo$C{{wq*zn0M?U@xHMmW;5P?|JhfsUPDm2{6@Zl- z?#6`HO4_NETV4r51or`x*sBQWuC9aW$+7%SokYYGKNo*IVE)$wF=PWCW3*qQt5|c2 zV@LDhllOHLES+-U#gi%WHk&9*F0f-{_1Akqa4xJt&TM;J$i8=hQk+Oxf|_XgE8Xjm z)_ewU^e@u?6RU3~WqZ?MHzi^d(ni$$&J=`*syxr}lF%8TaMDqEQe_@NXY}x9zcB4^ zil@DbO2j?E7(IW#@EpsTXC5G?%K|zbOu^k1?)s) zQH<;8uSd5|;P!P6^&$4mf_{odb1I*v4Q`>NiaG{NdzFtRbJf$E+mQpboqc zz8ngOo?8G4B3er{s+=RShD($HsWt0B8YE{VBB(Hu%|Q%*M(I@}98Gv_MV$5$U*7Ua z(u#T2AFU7C3Enq2!lWgb_i(gZGP4QN=|pazK3v|7#2bF-%PRRR=9%C zt4X3#9ii?R4k;!0TGaa2N?~r6fZi+XST25A>2$z!#wau5`NT zo?SU)sWD!4;A#USMf04RzGjVVp76bmM60BN7Sc$T_gJK?q7N`N%DjcJ3dIuzDrFQ> zhKY!$3_@mXI7(VM6PJ4ow!QNwlQ$tNOmyAnS{*iJ#}mWVno6oN5$q!v=$`88@B9Lb z|Ds8puV#n)TXffKA+HCg(|+t!?w%Z5ZMd80xpJ{cn)XUl8o4K5K>gAN3LY?wP|0!! z>W>+Y(_-ctYOmwL+XBMQn#I!3RAG+isn?tVX(W zHD?0Rr>p~HT3$N&hWf>MTCf0By>-3++cq?#pvm)1LJf6!*TUCy7>9`l`{sm43hVWZGE+u#6p{k2Ht*v#O4ZBrFGS`EZIH-oFh*sM>(^y zo2#2AWT)h2m=Y3oeh*|xymv#L=?BqGE)BV# zKMfZHE%28Su3KtHn-9W;=Hkdx2ra9V_n!edk;)sy#El2gho?nfb>Yol(Fp2r5Mr7e zGX7p*ju`~$vVzkKA}=_q{8yKq=(I_qix&TC%SF>6R@7r63agUScsF0{V7AFcTX?|a z^hwl}%!D3WTPC=gQH z*qjt3XylR{agVNQRK?Edkb}a#Q+^^zY6jYz=_R-z+@V`|WzkR_xCj|0#=ZOJf;&*Fq!i@`&aypGfccSJNM0;W|!{ zDyi@`;gEM7Xh(7d&C$X)=^Y{JNL)g7!H#6YB@b)Vex$xNoLgxlm12!-IjE{l&2_WU zeF1_Wu@VcOFlZdZo{M!l=LP9c9Apr^*7(+leGO5@e(75$?2F1pLJ3eC+RNig!w@M9 zD&R%nD}s#}^}kjBv?%-6u>@k_@;aa3x&qZ~=Va@?ZlG3tmI7(L(Lm)m53*a{!_1|D zOgN)PO+j3M!F|hndmyLZ;>ywY$x)h7CQzboo%y&KH?t$&WS2Dd#K)E!(g#keNpdn1 zKCB1Q%v%N(@V@^XWSZ%OxMRdEM)8mkRA&ABMlwJj2yzT$J@fv^`QN5w24x!F%iuJ~OHmB`MCt?9fyj6PffCGG&fgx4HU2+Ynw)nsE81ZyGtoBFWGuN;{GHnGMRonYjS`-44Qe| z768L+_f;%B=9}H*5C@pjUvZKWw?`{5n;skJjJa@{P>Pq>g50L5cmi$dtPp_mLvfJ* zPW5t#u&q(1Emc)^xRfF+iuZC-*3)2X#}<7$ktO67L; zNS}Z1NnCOrvJ6Z6_2}D#;Y0aGr3$`{GzWgwPQNJ%AYBz=mx^lV+xc!mPme$o(bMLEkJ!e{vOkB{Oc9h^&xR!pCTZ|PqXYr) z`$Kf#DUA97bJeSO&Iit0Gf8kLPT&kfg*7TO!BOOr>d)!8NEBP(n_D(g29c{Unze}x z6Q1X>pIK%(e2c>c5S{AL{narjA#+>G3>S+3yiI}hzd2`V205xhN0jB(a8&T#TWLGNA-&EtBJ^I)KmW2#LkUV)lMJFAQnkMD7UQx;F^!Oaab*dCf; z)>+#svO5Rlx{0pokr%sPR6It)d^d7tKxVdtI&ky*sWKvWg~*eXSCB9~g0fFKxAcXn zd;aqP{SA_#+ewQwLSCq%B9K2AzOB?pb+C7|fm-qWaBjJxW@UBEs-e)uPW#8)x zYT=I}Xz|r(0`ePiUCO$~EzhTrF0fFCqcES1oyVH*JJ%Gq@d=9C4Y1>dvxtAL>50Rh zM)QgywAcpaBel~DQ&dy3_0ql9_jnffGFR)VdShW<)eTZ&Q84}USJ#va0!tg^Rfuzq z#?d@rUP>R$?}#q zGAG)SViaSEj>_y5Oz#v8KqgmmFb%1Wg6Ne^X;$*1=3A2woB`6}f|W^Fr{PR%(48yD z47#H_O76P|?J@XbKET+5HnBhI4FWpKL8=G!)aa3Y1CgWpw#l-4s4=Je>CcjMxy_d) zuR4_S$X3a_>ZyN<%t&n-#)^50%-jDA%f`_J)o!)7)&5<_+(`xcq$Nv{_A}RZ3fHF1 z@A?lA`+_uP$1#_FN}LtjpQIPlqC(O=ERpqt{G8UE-2oKsWz9-LEOl2DPXFkx_Dsws z3OrG{R6b#bktz$Mtxb%>BV|Jn(LNhonoYlt_On~!;@kIkqCo;9>@bVNjnm>VyrLO| zgaZwcEA>eF^?)`sa`o)))nA>cAD?_;ldLR4i>!~`p=uKl!5f+6iU*Eb<{)Y-r|H!FL!`hSd3VfIAQ2|}K&*Y!31ZD^Qvj63mJ8uwiLgD?)d&VESDmUkQv ziEJM2O{n|zP&yC&acE5UMFExX4TXm2pon?r-kWk_-cb|%W^H0j)f1l}tx8uHVobKm zbLu0C=bWzi@n2J?2#$6~Y|@D7+&nUWMoF;AOgu3}xS*BMyBAhV=m2E8kCpO^DE=k* ztXx&MjehOwdcM2?|ha7 zO|nchsl5x<#W|N<5pPCdM8|P;i9hGsCt;zgNcwH%ctKMH+cmP}ow!x{T+S_b4fR0I zb>3B*tD0awU?erSdPz8?)gxOz=43e?_@{fkG|3)Z_-X}n0Qzi&GcwKdsiqrdNOwFR z#X`fblPu()+hJ~ZtMVAPWw&_s&c&FNuN6sD;M#O(aq@Kcv7rdAmDplvgRxY=N$eX! z@~KY*MM*Bn?p2wk$N9rzZ#=_bj480A_t^X~>ghVNCY-Kl0G$ZsHFa0z9Q8)(J$sGz zYcxx@%gRS^w|Sx4vTSs|PSXeI*t%=D&TY)0C}Gu?qQ1llYBAfd(NZBEZMQZb!YoVa!hL8|DrgW zt&{0qGqvkgUtl7aR@R&Cje+&-K<-?1-84Im&;Cxu7mY^j7Oq-}6|Uf92HnbbRk6P?p({}{hHHBhw1 z_sA^wf|Gqx7c>ntXoI~~&Y@y`5GXRGi!Q>vYM$mBE&K##5sQhX3s*2X!?AtMwSzG@ zY%1<9JvmzAN|zlxo_rJ}?Rar2B;)c&8!d2@$ZUo{5#_A1$rPtfQt>+mmbOm&zZV6p za(s0{B~CbL;m%|21mtg&i~EXiLPhQOKw_EXSyJDG_4UVS5mxQYw5*}6c9YwJgtI_| zX#W;-#Rflas5*^f1c68Km4DZF0~RwHDVKVGn;BLUia!#Q&@Na(&vkfK?Q1b+3)9u` zN4|C#a|-^!c&BkQyga46t?|PfWf#8T%s;?=wTW?7*RM|gZY)_kRLY*x@+kKs`+$&t zH0{(_w;31EqW^5d9g~vku0GT0dD4%&`Tk=tC4mJ%0|gs4A>bt2{*3d5*rffvc-KEl z34-P1E-E|z@Z_4*p>XiPABGaaPAJGSYRvejBS(PWebcODySo^G6o;Uk&!qIN1#RK+ zD{K|>I8XW?EjFP0Zqybxmz3ktx{V30pz~rU#LlGJsctxVla+6C1zBx41RP(;xL5x) z@pJfI1b4Jn>?VLD63as!hF~%iJRk7*&)rN%a`9xohfaJ(Dn(!U0U4+$M=4(TYm?7)}N?PAcaACy?^SH8m1cmi-N-FcZjarx!effOTYZ}-JP8MDZ=2tJF9$6 z$B@oaPg19puAcsN-Z8&zAnMD8tjftsix_ASHe&NE5ozWd^6&lkZ=xSr z&2$&qvn_-v5fZ(Oz4RTCii%9}%Vyw!FPU)>gD`nE(EHTm>SzVS=)B&aDg!HnHb>!R z$;bOtEQFMOb*Ndwu0%HxN>qRKlc_m?4v8R~85CmeePj*)x^NZEsNc~}H{-}?!hR52dZt2RoCd(jle*6B(0Hs{FP2pE4ml}gUIcsLm=&PSi8VE{rN{r@Eh+v znteFX#HXEv&s3)Df0}4ckfY8z;*!jn<2f zvXvz|7?%PDhX+(tl~_e<@eMo6_a6o8JTb~uz)tAwd=Rr&AT%(p-I*n)VRD`ZKen7dIKBUYVbCX1>q1;o(a_-j&2cJ-n| zeU9cSTdYB&&TJ8G}6a=L*eLz;15XHQlxXXZyB0W#p0RM1!}8o zSMuxYQ6;S8oe5?x0!a3^^>EgH68d*uZMHn{;{)<94?I=W`7b9o0?M zC^<)M7Co1dh93w73PcvH=)niGOxMgM3@b_8YdGp4Qmf)#S9k?P#2j#N6dQKZ?EwL) zkDexS*nrfb*cl{7zRG<@Pbt7aESim20&WMNWb|sA=_h@EZ75fXGoj#J!_2PAiRBfX+S3}&eInOgmM6%7+%112ArXFF&J_lOq(8%@lpXlw~4i-%dAa@ttgXdyA+Q7QA#347o0DuZhGgb#>fnP0i!P0wVEq|05L%sltQ69 z@U}dB>|g;1S=Qe;CNdX%jHuob45?oRWqt8QWJ4orDTw8GAxxcyAxnZKDq1pZwP);Y^ zlt~)x$6c)g_zE-fYmMergX^K=#0?9^BZ79?$m>NXtPUJ%uOP4Qm0*36<7&k4ncN^$ zMuQF>1j@w=Z(CL=d)vU65~lyE9+!>OpaRo$I8q96H{f*+{Li?BHx=Lox=J%s8a61P zK!e$7QA>#}Q**LSK^0vw`W2@pa+ZU%T%uk5sh1le6l?pR@R;28BwX)qW40m<*PuP* z?4IfBBZJa~MnH%LP6l57Gu?Xk-cX%?2A#qe&BHsatO*m}^2LN&zX5gP`aA8IJd@oP ze*Ut%Hw7Bfa7@OQQN5C^WeBGkt2&Iwr)Z5S7dOQev--57;AyoIOSwUgSSh%t%eNo? zXVnfzv>YdM)RQty^K00Byh&MfYGW{-$BR^Fz1rsMx1vP|%T2h)`i2SF8h*N+oME6F z2OTBOThWhZ(3&$fWmU!w1+d){s2|XC4@z6>H4UA%nBu#bG<3(>OJrIQ#s(=-uLomS z$Y-&~qxls)6Y#`(LgS~z&h>uj;90u08Mb`Tv_6~*NY2E@O5(eM@Xv2HRW7&6wp&5+ zR$)H;$vu1*f|Cb&e=rP){nC0BoxvEt0~icues{KY34q80m6StKA>j}gY+BCEXuKmG#KJ3#ZeLsTx3_P>*3HUlX1xa9Gj zI@pHtkf6>+jUSu@V~`3Q);G@_nQKF99}OJj!Rs@KS%X+@ijpbFjMaevKvywj_>w~lS1f^1ux@W>FuLmt|~_% zESxVlq@a+Fo6g2H1`u8B+Uc%!8+RJTcv6{UzF5f2BvfEw@RL}%N+UyR#L7)v#b>=( zXi*dq6vE|-L*SB}mCmeSY~vXvIi$!3H`A zja2E>X%C)t61o3{sn%s#39C?TZ}pt$PQjiJnMH@=-f*4!(FN}NZQEN8w|+E0Br6SH zPQq_YDaP5=o}=H(IzoJch`j`zf&0&$4#nkmnco5e&uhEOnkQt&>wc?+dNpCqa|mr} z4eN~!b5{tQPlKOdnw!eL2>I46Q_;o0D+4>0W0@?xYqv;`FoG5u4RU@&zu zRyIq+_BWqog4-RaLC;j7n+4Tz14<0`@KZgjLEERwtD2dW%bt^?qPN7bAF-)tJ#K@s z?KUK8+9umooyQ8=NTSr88=Xt;PZ|b!KI7;do;?zq<=j&MHnhat0|*&TGfP=kEQYJ_ zV@bz`D+?=VdBRFnQ$~Hh_ABioG;@f9mRDK-d+1F_h&KuGr`xbSKHI_BhW|^&Zk`#bH2F6en_2DTdac$ZM8yQKE_~mXJsv@oh^(xvo+?!Q z%v%(|eLxOx8aIRgwYx6ErC32}rP4a(TZauA?LJ(}MTN8aDKgaz$iBhNE&T#FtqUzF z`QmAb?xk{#UbKU$b%Og+oZ%!9k8YK!H@WxZ^q$$l2EU7#vwgES=Bi1kZ(ALX7!UMj zLmo*w0(7XZ6tL}5+RgfN0RFjgx0GNh@KqS$_o4T*Wz)qlBh~*_>vy1gDO^GWeGg(eV+F^g0=q-1Uea5Fxhj0gayOz&N!tKg<($P9+IbL#Y79+? zpy;{jN_s6f*1gSefcTl`DSlH3FrYH#XK(2yW(AV%)ul}5av!n& z0#Q!>``$_6EVD@M*?c7q^2$H+Ciu`Z))er~1Z3mCFxX&B*TSwRFQaHafaAnle5(8# zL!a@3PebC#qb#-S)eO2Rx>iOE-D&o0GbuT2tQa5{L9EcZ`0`>xP6t=e#U@z@hJC99 zTu8pqf`CS3GHXBvc_$uE#m$adPsaX_F|orMw4525_GnH4ZRt}n7E-O4SfU?zW;2-GP91*p*ij;|SL4??gn?i0E z#FIck(24kKeW`b;>`xKF@NUZ{=Ek=}ekixdoiBzXzJ|NRop##mAK%J0<_R=+-`TOh zRUO&2uQUA&seC=4L}oNuuS z&|))(i>lG-j&uy-poNQ2zBeXvD*-wcVYYMjfzojNau=-D6)~y@g8jHt^mL$U@VKmR zX%UxmsFzkbgFgH|I0haMuykLjlbkLfjnU6ML~P+3@f*5`#%ek1XcO=Pihs@s;K~^2 zi9s4cj_l5_)?(|zw0Ua2N~kX+f~ZJtl7|Kur);T4DTis$=D6M);7!5d%T$u_h{p)- zX+uEg)OZ2FAQlmnrvKA1iq9=ciS0I&wIoz{uD|4>l5A|CN?(oC$IYe6ei}IuDtg|p zM8y6Srt>~j$m6!S@OCUh8(%5yF0I3A1S=b*YW(*phmOY;FMQk(;fu}P{f=c5KQs6PWLih@O2{H zH5Q7NV+s^pVGCmi(2R){;jmsZK)g!nGMM6Z*ciz4CH{NnCVBCsLobv-EgR?lX@aws zk$RRQ33c0H`GeoBE&&Zj-^D5&gvL8Kj<4f-8s@>Ivf#@=k zOuGGBklI;+>&i@E#W($pgFD9&g9d#NdlvLZv`+KTjhM3}VILQ=ikBcr9*>lhZ#2sA zW^B&4X*XJ75eN=b5@4zLq9kg$7-pRtR+~sm-EYc*NSjGvTmEO8dTo64F$=P-&$`uCX9p~f-A?u%pR6imno zQHM#S2ZIvzW$pQPwg37d;5x}LkbDWWp=5R7Ph|E@?k;N>*O+ zO#N6CcJKe;>YR3jVW1`3wr$(CZQHi{YumPM+qP}nwms)u%w+Pv#Ln8O)T+74c%LeQ zMlGBikVxP#(;yMO_G8hz-B^%y^`E8YA?n$Dewg`wcurni@TKr1{XN&<6OYLpA@yOa^iY=$c0S7k`dmg6S&hB3O211Vy@X1BU>LO z1>ocbtcd~9ENHHQZA8zmKj#1FN;f$2W})eZAo6ImoT3kh&xTHUUTB9U*gnnaiOSYK zyzNd*iI!kqv%C6K%vFCDhUV&GHsw7a5jt&}6g|51m85LIZB6H_aa(KoyYx(~e{ZVU z=V%>XmT~5$^3u;tDDvIk?=E(BJP#}01&`5$;iE)=NMu;UBY|Wc<0AoaWioH%kZ&u~ zBN1+KJiQmF5tsVng1Y&5hxGe}%pv(3oxOZ-28Ui1@TK#|DUckrJvT!siOQd1k|Az(l-uwE7fRUB23^%JaktYCqqfn1u_PAZ$_FKV{t@|)vrXp6 zcYeaNb2@oE7R>U280JbM#6wjDlfyoUw!5^?FT=&aMS4O}R zd6)^+C%AX6e9I*uFZ12StnafCy;`wM_k{oIpsj$DO|=x6RE3%^Fq}<@l?z zBo2wH8H?aGv2xX@0Mx@bjkCTPIQimhQG?ss`PM;WX@|#okcXij1HQ4bH@r*3Uijs( zvyleKbiB|TNQT$TU_)FlJ_XXyC-^NSg?2Gsx;b(6B51KK=&hpQyaIE+tL|A4MYp(V z8oh^mv}l+EK?sv?bk>?8#ydDdW@LNshKx(>u;uQPN-}u|e!#i*Zz-G+2IGq9P6&+=#t^zDz zjI^3xmZIUrE~KCuDF(*v!}iPzJrhO4$MacK08-Rcved3Ef0nZD+X5>Lu2f)|q@by? zLyibGXQJz=OiPe3ds^GzQPy}x(hoXV+~E*HC|^phDrow-mtc1PH~3H@s#oYpFs39l zMaQF8kJ9q(zD%#D#=Vz@P?~oNH4n(IDWNgDE@u^=?b6c8`(t`o*Hs{j^#xA{7F(Us z7>gWB+Y&N9tQfIdGF+H;>|A{*mZ2gqyP+HAdH^$~0! z8iw^lD<%TP0l}T!$o_AI*+W9D{1?^M+gVXlCRg-M8BW9 za@uYixwnt!$0c9tG)Sc4L6yco`~rmBx8Htn_Y6@p$gRC#+=1N`{9+$+Icr%YcmG2~ zTgKnWf4na!Y8ee5gkpRQ@-Wb7z#IM${fU544V=YvJU97pCu2^w#-5+CF&nC9YLvQa zdE?zbXD4lpi`ix$JF}o{Hll5Zg%^IhJccsfuCO`pmU%pDAMq2js$Z&YOX*BcRuMDx zbR)DgbM=W%HA)c%hCqomd=s3{Q(OCk>eU5nkj)sVG!!usDOQBM9?;%8M!vZCuu6q~ zb-H~10Bdl>xCtmq9glON!e%B~m+z!#w(laHo<_EF@LKPWB0N*%_K&8id0+iB7TA97 zKD~8O^RDF`KdwI1=WHPST%0>mH4&S&-SGyFmr9Z~sN!jS@{vR}YJTkaJq&NG44CczL#^pP_8=bw?}fTBg@2vP#%bC9SH8V?c@nE%TBT zsHP5^JCF<=G&MTyr}+~klBh;!wm#xGRW3?N}O zlLtpA4wFrrWR1nKy+O(j(FwUtfwtC@8mXf0KV#dNnoIZSZX*bj6!{Kcj3Kbq9JL7M z8bt+A{fP#sRO>7Rizeb}viee#Ak^iTb7-Wk0Uo2YL0PODiNSBlK?K3?$ywxfm=xj+ zo!4wf@cBxH+C~c@_gc+gS!~t_8HD?@0`E!BqgD<*p@Kx6iMF&If{pnE)#kbo80G?l z9&&g{zxL5S!kp=Q=8cMA2LhGouB9hGw-n2WW3ZImXZ7wyGScjs4V2_+dk!W;+amQG zvl7tUk@MDfETFfS_jl%5+_71Dsu(_B_ zLzQLw4=ccg$b6?1XG7bU%Z>g&5UMo@n~>tI`_*~4+UMjTeBamv?sy*0%P=L zrErdzcb}_dE}z4;ug#ROiyVO0yhPCs6%J}g8{funxjb)0=kdP3dN3fh4P%ML+qrQ@ zmz<>fAe&A)K!4n^VJq3*1*aLEMAyp}r>~j-< z9fIu|0F`CeN_(nMsY%?j2Yz%aH|)}XB6v-;oLg3%E}!_t@u@eB>B^+E9k>I};%E#5 z7edkv@2Vk)16@T+Z&J2-{f0JVNHf$P!m11vhfsyiS6(IY@x+L!MGv9#daOqUNw^uq z5`V1XEc$hN;m{d!aMF>B9%*|m)bNAi9qZ&dM$4*sAC2#BjYU+9*KS>C0IIY;4^cU2 zMMr~@>VyHq3xURUg6PuN^@wuPgZxoY8Rv*8rv{kVN?02-kb9wY4tSkgONV{QVWM?! z3#$qB+Xb4+>JVEfwSRk*Ov!#%R3qK_S4Mko4vJK(RX`UVG2%Cx)wjVOTK#)5@{<7X z_ZJe(xvv{QZ?o~FPD#^##9;rg(6j{U{Tw1oo66p(+EkW__Q)-lSNd?Z9xTTgL8hOL zJA48s6iN2tM5ie?F<(lUGu0H`4DKt>ZC`+8*1eKLYpo?3P$m*A&N$%!-K6vP8;PRt z0U=e{m6alkD1fk0$0LzzV`8u9vm-aG!H2V+mlg%u6vO}yfyq~##7Y12Y#+Ut${q}P z&9~kpRm7trlT578BE>PBqY&!&(<5U0Rvb~ZJxGIoBea|RTe0bo=6gJKxLym?Xx6VX z7C7~Ey1#x&0Zx;wpag}*WG_p=w&1jGzH9G!5po8T~Lj&j$OPQ+9 zL;SMfTxs}dlcA0W*SSz0iPd-!@0d)JcHZ~sZ~Hc2jD{5xyt;plYdC`6?5@Dag|#i1 zu;SNE!AGWVljZMoNO?73>J5J3jIrT?Nokno^$as274H;X)XSLwm{N`fkSP)qEfZ+slmi#ob-L4 z`1w8{yV;$UW#Pn34)9RL2{gnnl!Mcr+VGu&?2KS^pS$nIRoH8|dBG*jgK0}}UzA;q z)URoz8wUS+tA^EIV{MlvzpB2^ZAK>#g6sdCBH_TNWDXT1qfg$-6gLKrA7KVi80l(~ zLtxL|F9$yW0=dQmD%WQv*#tOOcd7OzqA}oM_LCj~6PzJ(5f#;M8)0mvU-zxX9caV-Ow-tN{LVlxS&q#FrKI{=-^^|@eZp}CSS8k#reuX`45 zlX0>nQdQ^6lm*W&wy<0&mqWsx7&FZtAp!N%p8ma;A&f$dNiMW4C$iwxE(yN?$tsQl zP2^Z}Tnc4Np@o!~=&bIyUKxhg`&*^vtrtlIM*PCMVLn~sxZ^Q6bx0S~aAgE}I!uA& z41A?)(*{RyxDx=?w%}C^?@31md(eHWQ)U82=x)c*AvHV7JQ$1MD4xe#12wgkQ6z`u z1s4tKx;9!9TLiGx^z5%`!}$H2%jLh-Vq)z*hR~1I;g%(@O-4a6%UJI9$&6mb-J>Gg zid4BcyluH5l`JKcIfYu@Wt3(%$M?H)MBKInJz3fOc;4qtPCT&)S#xf_cGP7U>xXZ( z9lzhXpPeYabk_Ieh&IQDwy;+@@;u%V!a~2yJ`_Q_{sOt{5AR~Kq9J`j<2kHbj__Z{ zlMMzl85nZ^4n_OUAjUsO^)E`>rA4{x&k4A%0N9)CeCU!b$K{`JPy@p92rn3_W{Hr@#j&R>Ph%swwd4|*50fF4j4%im(<&|3+2+>;T~*MQw2e_IB4q!d!8KnWR9`` zAoca0`<@Cvj}0kxu<~kQu1j=1T09n@x?n7(NySkFd;UgdOfzN=12HRAC{#aDT3jm& zfXF!Xy+Y++7b^9?c^Fe>ig1)obAYqnQ`eR_?JcAi^NP1MHVh3Gkftd){9NzDqgB@2 z*>Ohca>>wa!R)qT=-bQdxHAeM4V?I?NI=qFPvyO|q6~4%I(;!S=g;ueL zO>DhdUpgRt^y_uFOMRMdfCsEX1p zm4ZaFgd|#g$j7m@_0lrTgS4pTe@i-ro8P=|FfByl@VZ(H2kXq~O?Y=$Loza4Gpeg< zpx9M0O3_G~-RIV3)A-+Z=qoP`i4k&?=d4S}N+ZhRh;pNqshN^y%6GdpXP*V~@{*42 zAhJSyt*OHqo*z&W8n8Y@OlJ`^+dw zAv-wxRGe{nbrE+v?_q{P>Q5>fTmdW;i5H&<#4oyUC$``j%?b?a`dyNE22FFK+vPcg zUd#Fdd1(Eu+0h|uiNsRJbeAu5kuGM;`IDv80VN&nz#D@w=0vIuGlS*^rGdX^G1=n( z&tD#v?J=I(lnji}9|{^jN3_uRUJY!*_SSjN%Y0Eyb8B?3zq~>ckn`I8u|&e{+rKFL2_9uVYB;sQ5wc+IIM3O1LxVEjGMC_H!UZydPqOs zlF_$iXN>${j$5BG+=)Jl#cE+3V*EQ|4KU4xVc9Fep!3Cr+cqtmAmtdH7MCA>mb;G1 zlH|itf@FHzLVs@~l8sKmbV+kc3$$#vkl!im3?lhd8L3t?O;$V;4Ml3E`U4Cko6bK- zT%zA+2x*fb=-RE5QF7r=avX?CDAajoalJ{m#rL9{*Msp57O?19)()VCN6%a0jhJ5FNP~FT3`2cPxG-DwaK;-rfQCB03#)Y}G+3EHdnV)@ZrGq|Qt52KHPLso#UxTaQf;zX%~ z#=|eGA&_OWgRfmIW&0^D+Cu=W7Wphkcf9L!7ojzYC;;~N8tLxl$>j6)9b&r({I9w% z|DLg2rXLEbz>|;#z#VgxP|{`a?f6ia*;i^p3C6+SI=74EVZdeD2l!x@q54?F5=5ya z+4mIzuXD7i7-G*V&lMuqQ;{>ZoJ0iEhwwN9g zugDZLg1?kbODn`WzFkA{orEO5{*XC{@~c23{L6uDsv=2>^e|2(Dl;f~qo5;bzp*Xtqpd<71L&myh za^qee7HSr^Te&hzkAu_(^hm^t zh}oOS7Oxj^;ggF(lBsRzS6}9}kej@9tnBfagm&MI-`WJ0F4_376<4$;Qx^z}nn;|E zT}P0`vTY`(=hMk3yh{#`k=~~NeuuJbnU@6-K?ul}&^m6&7xZvfo9lLqb4572j~kc} z4GU0fXam0e8^+oD%x;W^i=jFwe_(I6G`}tS9ZKu2nS|khr55g-gU3C_5ZB^+~aruFLahRZBWFW7j)8?78} zyzLU;KhpE{zbkj{=_ylCAl|5?xuY8}ALT!R01r&q7N~tUDL;aeOMa)_KM$h-W1&BM zq2Ohr2~ES%DX0#E?8j32WQXHP{$!hk3s$P=f}1hbgN(%tibn+NzoL4(#`4YGKp*xx z1>5d6zEsNsj_Nc$0?l7}qKTBMksm9$Vc%$2mn5g7+vZX<-cm`+&KVWQ?%F>RxrW0~ zY)jqk-GilGvGCP{vyAKRtLJO`7HGDaRGKa=-@1^2O3k57?Blp$sSzC5#!G(>h{Wq0rb5Qva<#*GX7 zXj*i3pZ;PrxI`stO~!Q?6=864x{&M4+HskyPdT=a_AtXYrVCDS@sbTd+qO+|w z*w{Ex+j1g7DFp22B|?}Ra)yI@T%OkEENEw!Qg9?U&cn$@iAk>i+w0btm!)-0*tfsJE?Ca9IF9I73h8riS-H=cSF8 zb909X9JKXJxDexXzGqJTjHuL`X}Gk)(k2!#8K2_XjRb!@Lr#j&{$mb%D*{`(LY)3c zS~zs)P1H$iR`5Ub#tosu)#i9-1F&!3kcO|V)Bc(6V4f2*oPpQo_p(}I9nC$ho!9Aw z1Uf#SPi7r~4QDyFDRT1^l=189pJF&nC!OE`7DX?)0U%i(ESjpYLJ%Uz*%T2RM5j-| zLuZ0`PL)Ce)qwgc3g0Xlt`^tjN9Hjb=<~F_lGEzvg~sZMZ~P3BWd82CTOMRWW0v)A-Z4tv!Fp^U3l&zOtvAmO6kCdaj=U=r`@O+=drZo4iY zOhp;sttkH`3m&k6N#=!Xrhy<6|3=Ar7x7fEYV{FmxF(!S6TVqHm_~qLb9T}}?Y=K} zaB9%z+GL9sr8sTf@!o2z1)keW*XofY-FSjUytr2v#IIJ~qyFj*I-zvc^ckxVjrQTd zcQimXvL?~SZVleG@10*`w*!k?uG0R{B1E}2MlCqsA*Op)V$Ktd9r?kaG!KM8kcZx4 z451%m3~1{O%qJCWBtU&k{jN*ZyWWc~L&Scghz?DuY9L??i^Kj{cz85JiW7AR&0?yClAUU5g*P~|i1lv3>KAQqVyJopoHZ~OL1gE2}SbH^L``G2tO(scT!J;bUxcJJaqt(eWbUtM)Vu~sMNv5&ntqSr>;9vsPmCg zyfq^7MC$5*#&(y3dEd(|$O%|%p~;|T74Q6{&^JIwm(CB2mABoZMQe1RO;}<3aBMhx z21U{U^-qhD3Vmwlds#>&YXyaj)C&*>h=|&8tg4t7s_z=&zHG z(phTA+0l2bt@ibal;JK-qIu=n$1MeR__^O^CGxi*?~nQl9=rDpyH0Y$Lvgd71bi_t zzqVW>>8nZIVsYb!{SuE~Nb3IF+x=E$g*Wfoe#jJRE!^bzp?>$5 zy^K$Unc2et5fcjRr6W|Q>Cqz4%5f9TNDH)t13h|xnv9RyUF)aLIA%f#HwU5A;u`*t zc&?Z7v3JE7km(Mx8pNNc`Pa|sx%68BcB#Ndu&3u<_i1)1>F|P>Oa+Kc?i^|I)0Aju zH26*IOvA?lx?F+q-j1GO_R#wCe^-ge+frKZ;?y9LIcaw3CNgyt1ecU*$`kj8UvZe- zxMwhe5{68uc3wDol(-#9j-8nZy)erlUTKIt`-SgkWnD&T!A>sCSgz<3!m7gteQd5z zxhBswLH?rhtT0m8KOCt*YzOnMGMcK{*5R{x7ULn9;mK35xL;8TYu$C+UJ`9O_cjJ6 zKIWx1IN#E>@9d)^ey0Ezn>BADYDCtcYd%}9?jEav9|`2wlvk`$tp!t z6YF83Y3P)+rV-H^StimMM(*9@`bP;hDh+GC!3>(q3EK5O1zfPZ?Do>{XUOQBF+y)|)1ur|?oI&wtXC)aLJOH2R*&W-$Z@-P=I~S@;Xaj;DH`@=PWO86luH_duo)&EkGh#7@&Y-w%~UI}`LDOC`O3c#%qtO+$v0 z$*I+4Z`%goJ8sgE=hmtOc4I%sJ!E-57UC|(q0L()NIy*Ajl{6Vd(me$D+5;8Z4a%w zkMd%Fa!#VPi6@%8G*B4b#GpO50qh5WM}H~2$KdU6XWL=+|6Zyyh2n@MidREPO^AH( z_4$nzYHO|!EUlhnNKO&2DXH|JF^;4SkRgwW?o!}+G- zt(XPJiIOp^r1sDPB#m08qtyL|EkFNEVUZ}vXOWd~Rs&yMJ(6*{-N<)~Cs743LmmfF zKjb>1s5ygLS7`0TDc5+#(vN|}aV;@Y01-|QTKx3FW>Dw-_8tGh#wtmn{@9iUT+hmE z1ek+XQ2;2vM>k!&BWA?RRJ?8NsGap@yyWau6E02P&Ayp4o|LmG@3-ZNP$gKlj*1*3@QihvF06n`Z4yJ~Q%Qs&WYt4KMhyyt?q-j1$=H~lTD=qh zkm2CW?GTWt;x(`QS^+!R8GbrQ7bAgp89%}0Et49#p~@j8JNkz}o)h8?$?Oy%M<)g5tGfx07(oFsm!g8Qg5bB%u~mJLwr|TKV@lALOM5vKu^yt z35=}X;S4wYPiQfjTV8M^m0%Fwa+Zrsd2tsDAmonfZ#e2 zS~d!-`$w)}5&ot(Kv(jf$;C%zA{6QxF1aZve(p@!4X#h2sTsoC)C+oLl|}3m~e3-1G1oi6wTK8=dW!Gu_SXkZyfL zfmv5K0l($6eZc~S460-N=9z5owNd%TR39{!muEDRIc4 zo)yhpv^MMVXQN3|g#fhrWMwxcpJx1g(sT#jN;=xp;2O|*>7lyPSv^|iJ5JKH+eTg+ zo=~wiU4+lhc(zy3b-DVvR1Ep)(8c(}o=l^ygV zVF%YC(eJo)$@~gQiKR|$aoZlL(OawZh9ey@A|?7v+rpSL9)dj|$TGC|tyX{3Mqa6F z|0{b3OVMJ4=M;(dE7Qke%ZWix$0n1`HmV~Y9|h}Wb1r2*uyi%|zK*MLmF8yw1wNQhFv|)vosi$J zO!e0{dHff|+~Dc>FQv44@9hgH`U7i)I`D|r`#Qk1)V6?3>l?-yIiQt^c-vdy6Ua3* zsUV&@zIHcaWG;)uqxo#86`_QL9LG0)A_xDeXo3nw3tjew;^oM|2u#rWoz*uONO0^M zN9uyk>@{n*q+?{@ZRO+#{`Ve@Z&sbJDrbz%VU_Ujx>1PurFT!t(OhI{AxGJsTweJO z#H4O*0%2upa%0w@Ic2Jiw6Xk;HSQnn3kpmFmJRHlB!8Ha;E4I}kT|*oxTUb1jieCs zEJ~p^!I46_NF%KX6qR!gK1TXGtOWQ0fHBfjZzrokC<=ou2kyB}?JPJ>p}GIDja&z| z-6giZ3W#W0v4=c@I$mv~a1Gwq`ujbiX;8%o`T+On`d)w)RWLfjMFOIB!(C;nE>igi|KZF-kIiS#YTLrZDobygl z`~I&5!+_m~A~a`FAVj7@vl1Nxn;3Sw zl?2zPanAIRFqq}%E90E>z=-7|3DH7oO4)st#%pv(xs;zI*m&~0LD$cMe9A`qn*0Vj zQ6(p94cE-+M{Uh1Vf(zX#+L1()TG1N>O^7C`wJ zfILBemN@R-?X-?D$;5lS>c)A{qbrYwwI6(V@N~Di zt8X@-L+^w|KBg{JO56o!$>R-vx#h~Y0?Q{?&I)igR?Fwdj(3oj{vX#alz6AX1>F?uvtOwvhM}C`Ccfn#id*ntD`Mzjs5&d`v)?k#{5_)5s8I8mduqZ zEY{`809^S#gro?!nI$cw(-N*PPSda6s=7D=lN^7fYPBMiFAuX)j-hstT&tG@HHkk* zvJ^BVz0L8x$=@hpBOwRw{J|JWJ_M^w>T?-MY5B894((P~%O1(KhgdgkBl8GH*)T0LqW2C{SlA zqy1NWNL&U7Bs#fTuHDD0Dvp)Z(^`p3speUHc6Ns)D%vb2P7}<56b;^*m&5WD`yf^M zkD;2t7fB#DdSR+Hlx+G~0E$8Y*C6r>0+!g6nP{gm9|6{u2YJpUUDsf} zEo-k8|9Jf8YCJ(tkD44AS99x~_`ZI?;-C$QR5mtnk}XmZ^@SIyIoGiGH#Ec&@hBc( za&%DbNsrH*2zlHng1!AX-0McS`TTAiHQ_D!SNotxfEPq71bU4t>A#i%9duqTye=x| zPYTrP-^L#Jk!JTlf6eg!5mtY1DV4g2bskUNCqb7 zUMTvUk5BmHDD37jyrFngJ@u2ZKZ2~Gv-|t12s$>rs~^#Xrxdz*`0pf9ZtO;VFwqkl zFpqNBkZdkSDM!31GIl}_N7+a~t)!mhk}exnCD|51?%&fnfdWTM4ZGM>-NlOpO8Cay z6H7u9C`a#S=evdR%}eI&9Hm6*@~=ZbAEQ$xfEK0jVc^S1fGC}GOHVJsi?+MtG69h% z2z}qTNYhgwyAc=Q92msEhJo4Yq#nvvk^qL~UTJtdTrn@7qKaH7YLf1ma8L$Xe7rQC zOxdaLiun`+w8Oq38$+jRjE$2gKCfymUsj80z%iRYx#`?{nRHvCGP* zv&1V*rW)az`?T{fi^Kqjfm$R=t`i9ARheYgR$0)U28kWJvt-Ks`@a9Naoip5w zI&xpuj{|rr91(TNY_|;nINIbLW|u`AjoADDu>uO?r7H0=ZwB$2JDM14U9mvI!py5@e;=&^W}SYHv@ltYNrOyD}mnj zFOxEpQ{VO?P62E3mvPNNQo9Saq;bYv?&%zt}ITqW`dC{4zII7zYdKH{lAx8v^c6Q-Y=`7qrwOF##2LcD9;?cI)Ef^F+|RMPe!@cjc_ zUnW*;Im%@(3A~&v@shJum?e%~DypsU!^vSdErpkMUlj%61%IV)f}VgEVy9bbx`?xv zDy;X`xjenWQr+~@7AG2pW&OTMwZ}MeQrTB_ACke5vR_2$&K$Z9w#wCoJsU{^{xv52 zoGyCdq4xp88ZBB^K;n zIy}-Yeam%~J8>qI{`C~AT~!Xrthr;iO(eWw8oET3*k zBJ4-MUNU~02+L#@h*nhqZO8)+!jffYKQBk-ulDy0m#ftd+c>fg-`3~_n30}~;5tRT zkzeNANsI*6dOjqv%rc?-zLh7yN}|Hn9bzQNNMRrd*+K*)<}V^ktY3sqpvh3s?(C$& zKyKQK8KFqac0gD5hl32zOW9BqHT%R+m>CieyM$ClbEHNsU%o^$d%oMeRsH3%T^oQj z^T{*HAsGdU~10{10|aE2$L%?U-Z(2CYDW8DV$WJeB=Mu zc-4?AStT!$0$0DY;nL^f^_A2ny8q>Q)D_y8>0Hqi75(;&>J&SP`TJ|Qz;W{5o#FQb?OJ?_zowBLSL{*Y49}V9c25Cb?;%2`na8($0_!HfUcARo?bHrfdYZ*vH$0SL5&m-e!>45aqWk zRK%d&5F|E(@9TfV)by&BT4LID+aq%;d#Tlf`h_tAlFCFl;C7BK7g$b*Z>5g;T^$Nsc_S72SOZU@`EG~1Yj}6*$E=^nTGL1Q* zibQ6G%b6{9m`E{fy;G|QsWrT6z2Z5*{Qc4~&@64moNlV<2tz$;EOka75o2+$``)I_ z%-KhZ2j{jBbPeCTi};qny_*i@z^=N*e}@~#|8`@U^N@jKjtw9Fv)HJt9U^~bV$TC$ zOn)iyrjkZTlY`C^;0YHOlSXTl5AjbFza#GUn}5Qhx2!dh4PaWZocr;haWBB}Je2^8e$TUKO(H;Yyp^zF)wr||F zV4Bjpf9RP2WDZklMH^Pdnt^TrDW)i-!MBk2%svDE)p(R&puf^Fvz$7T1b4}7C{MK* z*5A0f30X4!u9Biho9N-eQxNOLuNZX9D!quUQ`mm;7M)n#k-yta4E-r24iVRM`$vVK zrj=13sG2>JhCTc)P*WsTGq=rjtvy@*<~q&M@V-NrBUT&PGYH~ z%)>yG*VjfjqKj%M+Tve?exo=%qU&Pt84{HVkMfG>?u*Xe*#7enyeG=@jda3xt9PFt zejSIn9}pcG+qW_gcp*-|&?&L7X$Or!r%qOWL`Xh)WN%g%wHl>>`(D>zASxIjQ)=d2 z7BAP47G$#7gsIaK&Oc2@xfBehQx7E?G2o&YC|)ib zG8FCO0iZ=r1Y@x(rH`O0!1d)Tmax*#GUyt}Va~@s5NWfllXfA2)GMl0+V1fzgb5a^ zLVFmO)3EnpKI?@%~^qAA6U_Rlax)?w(hS^1Mfr<_G7pe zRUUR5II1NGqpITAxPyD?-Ue{A{OV;A7u9O;mXC`W$7!_De^EI|spx-UbaC9YRQQC0 zFz;}7gRN==^w2};aT@D~{-2Yo^5LLW?zm*+FQ&wz(*bS?6FmbUvsP-h(DyG!mZHK7 z&*nBCHE_f@fP>t6j+M7>jM8Ky`>UPz6ByCbFsuddV%=(67=+*=-NL?o4zQj5R zEP1FbKQrdFMGgUL8wiL2Qfh&pO}qy2AliRZY9uA}abPPFd#dxfw6;9C5LeL!G0n|4 zJ)^6{ihW)U9tK1@JghEQX+ z3fzP&KrqqZXsw_>$4w{^>+e~9q?&YeqU{5=6fiTFByPJY9a_b?z0&g4%1WkiUL>%+ zs070FHuOvu+XHV;kPpPV)Mt5Nw~33Uhx`5&#n4LBOXV_HX54-FGzkCD*xdq6@cdHu z=eoJ{7z6y*fPsZeKYY9UViZu<;i9V_B#~hLd==eMGT}_8UPVAlFU8E?o5~kQt1Wr( zhov`iTPA;S0#*D3mFg*Pn{?BBMS(;JlY^$IJ46{SPO0X&gW1thnT@dDKB$Ye47Gr~ z94+_CR^kT%V4C2-7u^NQtv_Q^pKdW?k)?$S`|5!gQ-sY@g~|An4`ElHUc1>c5Se1A zX*h{4`{-&R^~rMJi1gOTSD-WqdQ5LX5D*p4@)9n5CU7EP;WTT`>;QIghAGG&>mn=X z**-;D^d)Kl*X=e-+Y1@C6Xvoi%nZ&4Z6xOr*i+-0>sRhZ97T+-0|>lM zOZZ+=cUzJ5%ed67=n8L#HWBU3JZqybN%|$s!c}%@LDzW9ZGw}#Q0=V*VRjY^=TKR| zd0HuRNH$|wMeY-l{;~pk=`pRZ?XJJmZHd&D=N8=El4EIK_WPQ4kt8f==CVad=I^oB zg25+_CVinh(@B(dT>kGEpuqCV6PZgl721qeHAEc_{O1biyb9l$cw%u4hrRp*%;Fi0 zNaUp)(HLbGbhC(%3cDwuge198x?b~9`RvaZ164)=kg%mCRngMNrKuOV)4ze6rLBnN)+99Ub>~#minSJ*E|seBl4-A!(r@uT8&M_Y;O4eY+#n_9PJ;H= z{dEb^&lT~#!}`S%;E4ETCEHZXR{B-*mxc1-4D7(R~?*RVWR8k*H$OuzR7Q@sx6@4HkA^<#NTk2 z_pULlF4qbwmQ%N=G!8wj2X{Hq~vq^r%40gZTf^KaD3q{o-elMqNt_PV%XW}vQ4o3{CI zFJ+r1Y-R6s5*JdGSoEr|a(XC2#w@xx|H^*!hW>4qzS{d&-lP-K`d6MyrL*?mO$O z&ut$f-eg+wSh!-_1n2ica#7*!f#Qv1m8oc(9OyBF-k~-!;#$X)?Nep1#$;YXD*9hd z3j^DBf4&EwMylsUWw*XB&QLTgXB!l%b7uuSOtBPodkgS1$F!PF^jrC3F;O=uzj^G9R^77(!!kQa+q>)0VV3?U!ke7THrNFau$ ze%{L^74YJDMc&iFBrSZ2d4RD;$T++O5Y2`@$vTs>*kNKO%89nCzW;MG`OH*0XF%<$ zU3es0oh&gwIo;=|kWRY$wWxddHtH2%P~=>@$h)qmDc3hE1Q`#;S1oqxY=qnsi@6U{ zdAzYWHrvv1VG`>P(qbms!B4GNc}9Io?CII8xK+CedQdXETx#tsG9{Z-p^_RW7R%q1 z(7}wyyb6Bh6(qLTI;n?IP5^@`)z5u(T-(CJDfoc6)J$x=Yk@UOF1`$1e>6*A7A7A~ime8xaG%ZAp-=&QnNG$^)o*QygodroILx~>kyE#x`T}P3g@^%tP*QZ}9?K&%D^962vj~#xa6Y#<&?j!{x z&+w`@-Pb5IfMqTsQP}`ks^*>w^RFumJ^hs(70j=4bB5B2{`D4%vx{$9_(SFfu9D(4 zIT4QTL>UW7ucJ!#LX1NL`Vz0UZl*$&+`yym*4?z(KJt#0IeASWjSwc6 zT%c-szP~?KV;G#JIT8G!N>o7}q`YX)@HXfZ8OX-U&TO|4MXY&b^k&k^mNIjbJ=*~M z9%n`a)&H_8RJ_f3iN2ahvNt_oC^?*$C}fXa>W`Zb8~PU5 zRLbFrSe6ac%G7fT&a!lAi0@~XM0>J{Z!W=K^JR^%RuTD{znU-I^w*f*$*bZ5`s+mV zAd8qp^+b0k_x9q6&spC7^S#4$KH$b=oSe0D_;tV8&LU?F<409feLx|wVkHD`AFSED zSbQF=Fo1?ROkYtcsphxbVm+XKoKCrFW3f2NF%^FBF?O)lM;z zxhT~#TaS>$_q7f&^pmp$-KWJcQ=-ku(8=|~2dxxLb8Y+Nd}!*ZXdwYrY?ObFo;`0aO`Mh>wX`!^QKBgkFcVB0wt_m0NxAH^%7}^~Z37w13 zRom2@!DI4Ng9Qo*ECgDEr^{;`x4VU3pE1neMpAq4l+k_T>48*E)m!5De*(K0MCb0k z*nJOVA=3aeqTfJ=M_9EGnThxoF!lL`gxm?@$|2ee4dq(Z@=JBm9TYgYOj~fL8C3V; zN?;w7SQ=IFH@tw0LsM_Ga+G)oN3f_|&+MYcegLq5i&+#luoFeXHFItN!tKMT{AAsR z#AqoIw+_N`ZhqWuXIv^L_IkhGn);1fhu+JSdWBSXQt-Pp@M_7m4G; zjiUSOv-Wm;Hwnajsi|&1+VoMlnPgdHkP&tawYjSbX?F{03K{H{e9dUBN-YO&w!y+hi)b?TL;==PB)-uW0`>1*E7wKrFJLd3 zO-NgxcXRKiB3X6S6{yRinJp^asBJKewh|pCA%s#KlXdN6M$;isW}+bX*B0!VVmiRk z7(i5v&%Om5G2#)i3??guC-(gPi<+?P1Q-Xad2IvuDWD@$9jRM42=gK69i?^qQ05*- zQj+&+Cfodq=%Doef@BHBpu#>AdFjy_=4|5HITEsTbz^%G$AHroQ{^NY@R?o4-Try4 zL*r&w)LOaK|9W6`d+WcvTd2txMvFntFx>sf%?o+dbz4PrNdo@@Z#iWcLl!bls2Qph zd`Q8a6hOp2TF_QGp?zU8ksj!$!^%7(CoxP>*Wg2~rk4-E4~VKBHo%5%Nz}K^`r9HG z(g?Y50^ge0_?VJ6{WOZBLW$y&Ia?zoJj9rtGQXBL(eq#H6MMK3Qrdbb`P3$-_m7%% z;BG-bGvW3MPNSM*l1$$7j`TZ8WUO(j0Jq^`g$>%RfH^cJub_>T&l1M7daac;EN!$S zeC?R@Q0)YPFGPvZ&FsBROoT)h(ctQ$1-biO=3Gh`!yc$2iqQw}%T;`NUU_vps;Lw# ze#c4mt)Z4D)be&r^SIHP#=fo%YrgAV^TB6z>%?O!y^VTCU6U7HMSUXVkPdN4TfbBl z&Ov$T1b%8zNCW1c_UIp$HjD1FUpX4=^Z)B2GabZijz58-6eL2q<>fqm_rBofNTk%z||<|RIU$9FIAhv{VywhdG9 zMFfW#J$IDH?t=$JG!7_Q+n9*&BG>%`>uE)BdjOlX*d;`Hcl)rgPcNE_yXnX<$7F6S zF_(>u^6Jb)#<#jIe)$Pw(zH~NU0tpk8C1pcC`>Z#8de%w6D7-YNi*1urqBr5?X(z3 z`KjHAn`nYBk1aZgnMX6E)~6+AoMj5nqscz(D-KSK#6|~Oc9+*_Kc}43JqOtw`48^ zC}ME{0R8S8^$9ozKr7VDs2{z>V@)xRA;8*M~1D7cT4x|J8yO>TfQ z8sQ`CQD@V~DXrFq%GP6ECR1;ImN7el!If$_#~QNs16kE;_eq6e&2oA?LubPjp|@id zdkz8}d}K$j;~g;ctz0t-6Oa_6c?5Bmi@}Y*Rd#)NCV2SZHK96I=ViHlB*j65&jROr zfT{gJhT0_AEpM^P_XR0TkChk7zFs*buf|}k8>T9&p}EmfLo7)dnR9FssVsotOl(8k zc-RV@w(uRLB+M(Mh**PqJU`tQK($Ni2c(%*KPnzXFk@Sav^CU;qQMKOi z$zH!J-r($Q82AB(R<^~9^_SbKq%xhc^sAHGOJdS;40#G8F-sU#5_s2@W#afl)=7`n z=4Sb_65*UH$HKUy2YKt{SWDafP?cl_{DBte@4JgO2lw{AobigmV{zY~hxOIf1vKb- z!@96zd$!h@yO|jg`zQ%F#-R>@!J4$(cK#9ZFhu=v+?~(v16zO82~ew8DnFhXvRO2xXRyBv>~IAZjUm{3;o_wjOJ~oG zK~O4%sFd>sL;Xxff=&kDQ3;tZ7mN?D5(`Vcz2?equP)X>oMN17^Us7v23uSb`>NCxM*|AUU3Cyy9x&LEcFSsOA`6PV%4MLi)1oOJF9$V zZsswS)6x=ljXzL}yA#+X;VC}xi~lI8V^nLc@U76bSt5%rrL8He6_ZB^4!Ar5LRG#n zE-!twjFx2MGD{AAw*sQ;3;I)0F$!O|$Bl7$R!%z3;@Y6{(Kz?-uRs(CBo{0$9>}>0 zG^S5FByeFg>Tv~|&e(sotpV3{3-SMb%;#@qV zBY-7+RP(By*#3Yk*bk&AO**RzG$)v6NogCXFmx%@N{NwSqI%4r6zINdRu%4q3(kh4 z&(GrI^j=v1Y+>ud-5wi6L!n)}JTJ5t%@|FsWBJNPfwf<;jSXPji>UINJFL74gD~+@xa=4n zENEHB>8x?wsjDrYU_4uqvw-6V?rJ>@qv#j8+DVOtGHhs54M)I|NG!QV!pMCnZq1z0a1eJag;~~LW zqzY(TWje(Z_(WBM!R#MGVsDvF3fW`PmEe_@bOKDM3vro8lfiT0Lg-#H6rS3f?M45c z00afpwC0fGH&8>~cY*kIyHt+YqPuu@dD&;lTECT{Zd5eP=aT zw_9wqUIhW7E74(*z~TC=+Q~smViIfDJp%woh5*z_eX@a9V`tVA_m>}XcJXHP4wuTH zJxrK1NaPV@sBEE67TDPbW)I^$?&iJMg-RO?Bm*6&kp`yqG-^Ew%fEF3iZoIyAAN(( z2Rws<*cd_rvQOhA3ivnWIZdJJ)nZz4L~vmCP`Is(=%&qXTa;L`?RggGP;X!*JoWjv zMwmUtV1t#ZHBaA>OPsTPmq>qr#~qj-c!lJAxjQB$zr=JC8AulxNA({ZRd>~{QC)74x+YFY_%S;2ykkEk z4{&JzF39hAAFwDZmEC<3B2(EdL_6gn--a5kD^4w5;*9Df&Gv^1(3`m}4L2b(Q)?vz z*edQjvtZqjd-&V#b)q(ff|Rp>(<01B?4l)#NlWmZ5=(7&)m~rWxsR7ke7CO~7Ahd7 z1XDDOH%+mR7KL-&ul(3aLn1XZp2CzFcz$PpDhuWHYjIu-s>DQ2W3I&Zbg}=Ev z{#w@tx@vv{@8J)$9a*ud=(|2VWN52#oN{Kl{TYd#81P`2Lf{pahw6UevY#BrKDU!_ zB)=c}m3@wD5R|>=jH@2SXgj`gspI%`wYsm|st+N`wg{633yeay4muIQZJ=PY7=0C7 z>?MmqDb;3ae~w&xq3hbJd~ik1xMfOah}bSUQ0d9Vg3ykaf$oM`&yD?N`U!Q$Eo5s= z8&3prA_bsL3pFuT`LEgv`!cB)uQE~5BKMQ4V+0prnbmAJZm1FMu zpu;YLf}jVM3T@)incVMA<9p(RdfQ;&IWuTN7Uo~2;vJVmou2Zj!8pBT$?T-bk26J z0tV-ri+@lZO+QuI6t*^I&VXc`*^ioXUdp-}0Fq1r6cv4L7#RcFekfR5DLzEVpQ1%Z z$n-iQ+k7T<#G|m@wsnqA@zeALnfLEp#-7Vd2OFZ*yuzDsbhv~9oxJtRGx4_`_@@{z ztn7j#R4zi*cUq5Xo>AJlnFMVJ8rppHCf&)QB~%wc=xAmV$3?tNAzlKvKft-lD0;56RefVh~hC$%ab${S0i8YR>%mEi+gq`3mp#rsJvj5gHcKtzD$OSM1 za&|^5?zaW;^m6EelG+y)bZjns81c$77T&TKZ=3z%AQr3YlXoH*(H7#-+{v+SazXj{RP4+)l#=f`n z;2fS-i@$mf!>3pj*C9ci%!aDDqVKt~{Co82x(oE2-!AXq(f#VT9q?lO=f420g(qAWsO#O;>y$Ru! z?>54?!|blB^k*g2{Jz;eIFsu`&4ci*Kt#U^cpNpNK)jCaS6c3VMFqmp&RMoTE7UdH z<{?^7%H#hcI<#MvV8hKDR`y{S#io|qdXm;@ij|Xx_TSQ~;^oa}j2UmNNwS<#;&yAS zl)=bk+(_~WjFk3hTW%AurVKlu=c{CXz2CQ_lt)f~34s+{TlFg4$AE-hZe~}PDEe66 z{tb}WhiP8HRCxnI52XqX>{qAA@hU+{jFlJQ+Uj}PDslmr;hMJGk6_iO|#rjW+ zBe#U&5Ez2lPg`O^fJ!}lS*0mTD<%wx`TA$uViBq9lN>+?{#StGQ9)Lve-?6_zfi|l zlIL02cwub<;lnHzz-&6deBZ%}`~?3B`|id`fx{@D+NW&mx_i7YsRoklV~+{p!qvo^t}V+u%V#+5CodGfy3EQmmyACZ})@ zn}OpIuh2$hq@%LmNWE8!rPzeLD{tS&i9DtA%f}pEgmaCI8ocMQ^&PwUut9A-IUF%>khj{??0SX?L$8DW#HDAcwsQ8|?kGP4s>ZuaBBftx6jZ-3%(Mp8H z=4ZY(qQTd|v9|V0)UBzMxVvG^LEohMdBJ5l`yxu`+jHvm@jB3()l4Brr7W2?b# ziL8ZOO_9001bbCR#cNoyPf}sX{^ED~TSzl>$i>59vn!BzSxP}ma zJt{AtAp$ejj&Np!HdAnMabRMGEfa<>duZ0z+X9QgP#a9b8dpG-9fRu=M%VkHfeIS$ z>sR-<+-ffpgS-%$95x$8C0kOAF34T*Dijn13?WUP1>0@nr!s-b?fDKntXI6(gdkE; zZyedRnwwNR3(H6mYEyZt^iuQjpZSHhj!bkg(bb+Jhy(%ya5DbeiWNh(B(O1{nrV!l zstWRD<(mK%I`S+V`(h-tjG*%opTt4FSU-m9`Em#yQcsisYCVMi@B^)h%|KG3^D&Dh z%nqbdx?6;AV+al!PlG_vPRvFNdbxF+8G`Ug8Mqw;@KXhLBORaC(bm2-nWb|QyoWG$ zt_2H6AT^xgB;#$I?LL75{=ooRSAC)jl&zUGrEeY-P8)t~07Jk*ZxgnXN$5nK(YnTP zxIde|P$JlkRxwLquAU+vB)^7Rs|3*aOF^jm2Wrt^eZxxjWG_#lGoh$KAUzKHVRPsV z3@iPVKoM@VMhBO4>pAKpEnJkj9wB1Ff``j|DkgvD68dtZc#d%bOc95OjW>NU6g6@K zRB&`D*l|E`w%)tH7ljTF5=7-wC;D8sgtB0yB8r7%DuM#0v1aM5pIB<+5O|uaj(Sn# z&VB@h>7rN$h>D^n)dr+`luJH|X$V$^9!!~oWA=4JAFZCdeS@lILU)^e{^*y?ou`sQ zM=dnbb!@Zy_dcp`<$BU4q!AuBq^|EqwQX3=UzOU8?AD%MYd*Usd_SH|$>P7~FG|9B z?Sw$0Mx>c|U#*LNZk2?a+i3*|bUa7m2QXyIoB2Ha&N13-ERMX{A^d7m2a;K%V29ml z_5G~VNYqux0dP3F6}1Csa}C17`MQ!}ubef|Ityy-x41Qwa^ZL~v+RY-Lkg6sypyoE zl08}+zrSa3$nc!Gx4^BFvJ6fU0yZf-7F^jD(PZR_FoT297lK-Zj&D~*1I4-04JCk& zomPcAcp&w@BkCUVIr}_8zKXpj)*e;iS1;gM}jdL^=EjUQ}L8@nx zi5x2BP6&Gv0-+s_{QZW$<}@^_@xal0FN_@b;u}xN`#IY)0_Wahow%L!0o>P(1A>=w zcmPC&_25Lv^Frxp-|L%x=Fu5)7SZK|XoHdN@a6nEKS9EhLp#M9v*5;#4_peDA8D}O zb@}yOpV5L4vdMVzYC00Eo_uL-E0oQqGJNt7|7+!bx41M6oF$Zps7RPqS`EB8)|-xS zZwD@b7Na9g5lGIh4aed`02_H;V7O%PJ7qm-R+hWBepyizCI>0uEYc>7jaw9Md;AJa zrZznH`~XyFvp&8qNW+kVi!BGpmlBQ~8Wav?Oo?*eL6GZsn=aPsZ6l~fJf>Hcr-SX6 z-95Sq@i=tTDA?53{54=FuGV8ER4H^rXx^6ndSNvnO0VWKqm=>J$L<+O7vh5&u3|qDDo-hr9xP&PO!i0-fURRq6j(!AOZv6W zQwFx`Ql>7;`bY*fNFGg%(bY(Vm za%Ev{3V58PxdX6e+p;aXY}>YN+qP}nHfq_nZQHh0%SJ8M(yzVGzvupQ-ajv1+!r$< zV~pNgw#=T{a*UXBk`gK?(+Zl{8<~pP+quv((lKxm$XMFRSQ?o+xmenns%R0g(lOF8 zL6MTGSi0Dl{`)QzshX*iv!%Tq*FSN>PNs$~e^x{cUH-sj?d=HU?A-{M*a(;yxL6pt z{=Q)RC(z!Bi$KWUh(N{E&d$`%1&UPI-oew!(%i!3?@+ED1pkaiVB|@lU}0%v>EJ*h zK}VoqZ(~FAFL-$eQ#*pc&OzX6XJYE~httW{nZVwR!0Zpy-rds9oIuRU)RaKk-ps|_ z(8=@zu{UjTof-qhqDgn!}0?48U_2`DUFTpYOQ>Hi90_SXuX zvl*S8sSD-5I4l2&{HKfz%rt*lmcK3Q-X%wN!sr7-{~|2ZDbb z^{-t16!^b0`3u6p#KiO;Q=0nC0NMK@W_K$m6Ll-AY4+3olItB(t0*1dn|Gd)uQxOw;I~&jchRYe+ni9|}iAe~{ zt5W})F8>J<60-Lo@TO&9VI!brX5t`VV`OI};ACJR@cmap1w+gKHz9`qc9yg=vnSyE zN8To`4*!+A+douIK=HS#DGC0SNzUHoPfrOb{tpDI^QXrDb6x)B{g)hd7+4wp{}uUX zhw*;Evz z|APtsAIZX4^9|$P^ z6Pcuo;h(Yz+L_z@bBZjT#VkEcO%yC$jQ>>7(8k&HAM2`rb;#yV6a{-{%fIVM%gOO? zTPhZo#@2SG&dvlZ|6nuyQ}Ta}C~9YHZ}L}Tm0kYy!_dj(p9_C&8@oC={dxBMJ-&Z@ z|Lfk&@=t!IrXHroP#dfE#@r!RjUnBy^@7;$w8t|b!asd|S!CRo!YNbvc;3Fp50LyO z2_;29B{KWmI+MaBSHMk@m|m(4#yKD9!H7e8Ds{8bb=H18$Da)+!vF~CVk*H7eRMgS zXH?K3XzxhGN)%49Eu>H^i_*}W-T?SzNQ53o0p;nwC0y8MblJ5t!Kt)$d*eE-lu0b@ zzKo2FG5wff1gSn6vB`-ro3E2Zs90+9*buwcqfdQ3J}$(hK=k}(wr1yXPhd|uyJ-@= zJp$Z88w5!~P*Y^&!iqJ$_dIg@Vte$5s}L=(Jk{Nh1F_0!hAk zIIZ8yb#-$gJJ*IUFtYZ^VM9)d>NgB61rHdnC|A+bqEc=jt<1OiOB#jody2$OoiPS= zwW8!^Up#R_H|CHpJ^S=x${uTm-{<{2nXa} zydD)=GQbrax7m$b#R|j@djp^cF#t|ZdyWe|k)Q5lK65ua&S188=|sC;T0 z)j5(}TninyI%T0)^@)U;L~|qX$+pDB$*}cy^2Ez!@h!bT4wFCn?wdwNlqu!>f?{4^ zdDUs~tDYiM$4V1{84|h*G^CllN;UkBiR;7Xx3B0L^G0~wW@e+e+KAXY3*M%0EEg;F z+Vw<~KnZfAdGwh!@<~AvEy-c*TKfIs0mRNx+CTFMT*n6+Ty|WkD9%6WRDU)Ol)9_}F=iAlh$Lx(dT4if(cl zkBT&?C>-38`x0?oDEo@AXm8r`s{@6v3Efx3wii+ z=&6HiFge4z5suTUAkQzL|CE6GeiP2`jrW_q(oyJe2I^sd`eLsMZ0m8+fHElHfL+-h z3Rj7ZfK%R0&&r7p99XNbUz$(Hj^Fo8jw%HugV+)~HZcRt2~oe=dl{+c><&DG`G@ED zGNjMVv80t&wC0=jLp*vZT?Nv_lS1G^ylXv3nuP_*9{Sa$^v1G|=ANy_I|Q;8|0Qwj zavksI>&!4CY^T?HLCD1otSSyT!4qR9WUz|A>UCejWp$xu`1;wqX(lVk3$Z&3+Rt;% z60~A{-@@?UgU3bcRMR3+m*J+>L0k#IYVOd7UbithTKDCZot^#e(d;2Rx~EMLPb*0d zHY!lHMfv7!gU1@U&sW8|c`( zBqkb=gho^@%OOCo8WFhT@NkE@!XL+G{a{xV2S5j^)5Ffn32$%CaH|BW+EyoJRta3OMeF1Mia=4(`kV_W^T z)^PxfQRg;!u^1YI@$^m6@zBc79S7Ya_;+`Fk{sg++`^_v*zGaaX z=DK+R0cT7utPUP5&wEm@*qVTo2!D=E^3(!z5l55~sB7Q-0n}2sbU`AIif*o5tXA+{ z``Kj%wC{-aJh*f&)u*%*j?E3F>Q0bq7{VEX0k+5gC>8)~0bR^*^}}rL8N!99NKclj z1=GYm^)Qs`L`;9W^HU???pPt0#~JCFh!ePM720ECq!=JJawS1#zclh;WlyuKUQ>e4zjLFJ?pGm2P#u@QCe@J&>AX zRj)*FG+B~&_-6p_sbE1~8H;9$1kW(<;1m}9=npm~@0bGe4R-1{?mKp>cK>{#Sc`VbUtLa zoSvbms-4bz^?76=yy=5z{QjmM`h8qk?6%DORN+;@enJHpShxuQuNaa4(+mHh z;rl)MlaaPu_hCCx#!W*_8^MVa1G1&2>{BzWahxaYK01bD#mLFB_s)Qdms*vmT?8&N z8M8~tS6$G@O-keqdudALt6!6H!~WuWvc(QA-ySu|I?6mckNnP5n^r|7W}^FOiG??^ z!yCINg8WR2K%Uv#n( z-%Hl#X#OjRTEMoLvv(!KSR^K~jLTepgS`ktd!Tgu6nax}B#7sGO!zCgjW`y- z8Ou7VQfZmx1^O2gr(P>BZ|R69Gqc9T5MZTZ2}XLw%kAk%(M`W}cNqT)(WoRD5(f(n zb#B=98uzw7HxP<+WIYIkwa3esx=Yqa2UT1L$Q%$Oo%V*vF))Z`TN|VU4HyrS2QN>A zXi?O=cc+FBri9OWG;N9Bjg#Hz8%D{<)3pHA<*!qoWj+boyY2*Z{n#emtlC+5%%(nT z1p*~}&s8;6(HIhCgF{k?W4HTvo-REh_KP%FO$mrGt)!k?F4zvUlzB)^-l_sn578?$ z053dLTd5aj0@T>fDe`b2X3&nLL{dxtrODE@1}#rzKqByyvZ%sR0K#>!Z-)8F>Fq!X z7OC&&>>(Uii^Ruxpjb}7tj9vWk~WPgY3cD@Y@O{Us4Q!rv{V?~kn71;-XUl%dx4NS zma$$5gYLywSM=Yv_vxy4T?p*xS+HMAVygik(ojy;?#MoD!M$O)rOWB243Tr4dOa~h ze2IQm{4A=GWsgpZ2M;3Ho2fo}cZwKwhbtxR&Bed$76I%ER0e(q3lo6ZOtcOC zXO;>yw?Z8;*-ZzSfbc$N>aXe{jY+r{6PaY&=*|`8(ljlms>X=6|o(J}#_1oN#YQ)8WOBLF_FZ^7_5=j;fqhd zZiGbxxm6F77}$Jl0N=p-vz5HdN3EOPcLxfE#(S&H&lJ6Sk-ThqSq_}k)f?fEi%vKZ zcKGr+U$6@T<*MGP$3|JtyJ^RDHjWX@B~ja8jfY!viUi+xSB$?&7~^8u&0n3y4_?!T zo3Q{?J==cINFUVdE9vQ8tscvgbEg(Rp{5klmM8^Dz|QL~%o-dZh$__fG}pA)Fg5~p z%Ihzwox?MP#cfIax-eY{&_Wa9VZn+xWVhPswt{z8ESi3DxLebUD(iG3UDwsUU=h=#N# z?hKtU)xdJ);=-#=)e_)VL&rPi>tZ)#cL(1y2Fjhe(nrL%GPj0h87#4u{!wH{l_H&$ znln=8Af$-`=?WA2_%zX+q0)%9xdnDRGP3Vgr1R_5Vij`m!0k-Rg_5dezN_lsJhDo) z=9F)tyFQ+?yf0!70-PE+Tr-`o(zfU1V4k~^gh&W|(_Z`S?M0Me+wFNI-v50xv2`TWu*quaI)K4|FIy zvvG1_VwNJ&sNAS&aMlwNbQq7Q-WaGIUL%Rgt6VZ#h{pUhE|=uurP+A{a?Qv324sp< zxRXN~1J5d9B{w}q8vQ%M&nVRK(T6PpPdp2#xqaI{=^+53kM-ssOixF=&vOHq1YIZw zq_bXAzpL}*{I=)(f_E7SC1n<0=G8Z6)I$`4aZ_ub8HkV**onrWDQK@7FhRsI@lBgc zvcGSXu%h&j*tbw8UDb6HhWX7dyvRUN&Rf5t()b%mA=8DI0%Inyz8fm;{Po-Xn z@(~56PdD z8Ur1N*g3XPskcd&oj&Y%y9&~DWaNur6=}y$CZE)?cQcV3V4+FJnM=4#Ws|r{=0K74 zob!s3IiG9M-S!bsomh(IL2l4A$41N=UYm{9k-^u<|Fa(PSHW+%#)411rcI5?L^D&G zPDYV=sdz}R5 zt^PlPmFR4UO5$S7=US`Bae!Y5t1e6Prf#{>?OI@W`p@-pkS|-BDl1gh${`wOcfRcs zUCvWv64#L^<9|wD=%;N49`vj+F+{2+nldlw?20*LwQ~NvkqIJ-dT4Ta0_9{oanf{G zusKYzEn7~HEjG6pyUeEBMfllcuN(RrDs#eWIgv_ugvOl+T$UL3gLgz(=NU=d9`3u2$IU(_9uq}w}@0wkWvZ#!&{cw3?)P347`yX^q zV%gHcC?pXFTbHu8MaR2oLb-wl!N5X8N~*5qtkaxS{yYx_Hy$;&OxfF(ndrajtBG=7 z`0F(9sk^b|yv4H-2L~NdCK#0JbJ4k_J9&u}m3qMfE)=F9&&7c<;rI~2ks39sHZ?nR zi?5wkWH2;xd@GgIjKR5zpUs2}c5LCK-dkPXJDdw>MVN|T5;8WGhZb-~}vV@@o!3qXx= z6dLXTKcBmt1p@~&7*2GsX^R(Wbk#6k6wrGsEN9L5EkS^P^XABr)W0P@{6fO6rCUl6 zFn8+tN)N|O9aydoI;3=|$q-#LyB;!KDY!1^Ai#`EvXu^R1G;Gnmw#VEGyz$u{DVgU zQxMV;UWb%U@-Yj|2%g$J66b&dZ8+ogePDGUXM1Aydz=JRs12x0354bHoULGG*z|#P zUA07nuO7y`bTs{J-dG`D;j&U3RZWq@pGuSquT~KmeTTKH>~~D0c=Kfi_>thxnmcV6 zNdTC&+K1s_T6vWI;;V2>X^yw@8o%k^+$H8aM*mgSPy7zfHUIB#bsj*ionlFMmq7jVMjVCKS zGnbRYQ8MH70*X{9uc3usO(aT_4-ZY$oO2~{OioLjQXgCKsRFfmk-Zl)`2-WJIf%aG z69}+~B?m1aI_J<8E+*YhM)dJEL}+znb+dF8=`JGSHc+*Fnb>m5j8_u)Ph*7iwl=_y zbCtp-%MYF~ejHtE?PLJ81;e$|Prth~SGA~ic!RqAhfI_^^rGrcx5XbQ@sk;Nw(gBM z4)#u#yl|5!GHyS60Vj$xpdOFy1H$q0tjB6vl1tiD#vnOQA=fZAl9Smub5o)(YFQcC z^sp%5lxR@eJ3s}2-3zN@S@$Lq3UWr&%C7lB?iLwjs4wCE;#^Y zqdt9fF>5(Y)X3oS!7sxiqZ_q|=q~U!L!l-F@X!o~#C^<|dcS!0Rkgn|6I^*ck&Hun zochQFjG~KIS4ArV7b>p^6bkcwg#eWG6+8CEZ(GTsFRxZ3sXwV1_e~Tts z#{>{w`b;^t0J#)hIAl`c?2!hK_l~KXVraAY&26VJTO)c+1H+&{BO~ALk#G-xxXe9R zVs}r}AxpeYo<`wyoXDF>LUn$Hv z78#F5eTr5tYsi+4UI3nqKk~Zv!no(?jOR3U!ukECPcQWa;T9u~x&AqAzOzsGAj$)) z7u!A7Tee=;0-Jze)6mp9lvuZ}j##BH-+?U=bRvk={Ph{hqk)8}?&~ccQvUc1(-Z8K z4Lqfj>#pdGB^AbW%-i=4b6r%|>{=rz2A{&R!s10kdn#ShH4_z^=L;J@V9P6wC?DMmOk{?iUD?0&We9+ z;X$#x&66BiWkjfb9kxdVg6dt5{!2FUrl0y@#XM}G{R&Qixa;kNXN@3yAR}hK*7A43 zyRXO#Q6X4&r=HtrTIHbHfE`EZ^?;4bJx1aE>k3{ovILADjJQ9T2Uw@R0onOJP^F%PG zS{udC$uhQAP)c~B4b`l7d+vPZtIEu-DQ_vQRvPEH1TXpVsw^RGqUPC?Ui#5{WMCi7 zY7WyQ;sy5$Nh5yS`!SAF&=e$CeRI)4l4%@WrgSs92{dqn_lxJ2j}NgcNZwD`jntWa z(_qo3(~no^d*s9&*(WpFsa0=|<8-U`DQLJc?KqBps)4(wAx6lmq-E=DSwkJY*ZaO( z9o`sO==SFAaG5#md-QwAkjOc0@*+vy$`+D=-)npyp^o1jqgHviP!RX7x_GV6yCqlQ z@kyf(`j%yp9CXHL#olk)R=X<7q-j>&DwVZh6tu4%-|8s|hE% zY2V;sd`St39~o&X8d>7Klxu$)KrixE8Tu6M!cz@y0D?<-FH`!MC;x<01={-mg5M8l~p->9y9FkipL7Z5|&x^fyvZzCY)$LD5OYX{Q^5Ms_wuy=aTs$s|K5zAl0sfaEL9zC`ksj9DS1g1yT z{e;N$E9Q6tj1OqC#TzB-p4=ScNDPfAT-{IZCVGm)xObCYt~cjK=b(gRYWpA+J9|_A@Weu%JmGuFu&7`Ihv}RDeF6KgGLqGO-iCaPQGs>3ODaEFLN?Nm}gh| zo%reqJjh~0d>}8)@@O*Nd{6Zlcpv2&6GDdFDyeiuhdpIg*`R+JsJ9XOi-~6oDt*W2 zm8KD99KQpO*flhLFM(9#w(~eIkW2q8ZBQ}*oX>bCJB20ZP+`FKusXG8J}D52U+xQq z#B?ONQ%)++e7yk*T3kHgNa^4j^Q2hz|*9N1o*8?FS8nsl+yjc26WJ*Y#>EzT|39t0A#H8jk(5y z0+ebVYdm+X}^HoNK+zH#KmICW?$eC z9&AKJwjaWBZgr8xnevG(R=b#jWV&ubThe|?Lvnpfkh#YDz8w#BF($b|*<{{@?1=rY zI?#5KHtK>wBvS-shE+AuwWf9O$A0~yOWVA^7qWMhsCU|1J6kQpoCOh{GZF-YQ^k`% z`2|iC>(yYhsk%5Z_~q85GwGpsOxUu*bnx-D)`P6et10AFrU7_soIIK`tu!ufx!GfX zupI`lX`sqBoc0+uv7U^Hf4L{%O8wxVUG%e1-QA@lANwBbF}G*H zg?)sxHX}CR;3JZv4rAD%h~~v0<2{ioz=PiT2`+_N+YrHAY7oHxcPI+OR2y_6ZG~li z%Pq|}3`DzVRpQ+!#K~zSu-#dr)+c!@Sr0qeR)aYrg01zLwFA}OD0*gGx=evM>BsdP z3WFne5?oh{?8O~$);pV60muyO8CTFPS2}mdSiRBI;IhV%6&-BOU5WBw*VL%qTM|_= z1xfz9U%t2;9{v6Bbjz9arga0Ana2o7a2$B40E5-KiMR!>7Wxa(PFO7+9rn-b2${P1 zNF-{gq{ibedNTb%7)|msBnzHm#;kxG)J>mS%c+WO3TPf4TtKX#FI?*aKttT1BKy34 zSeV40YJ-#|7%8i10l!C*p@z9iHoO4nGqRg)IiM&HeL{sHE;f)WaM+a@f8+J_PR1BX z-Z^KGZfJL%ruoP?gQT*k&NWgx%^2okgB;!(bc(R#e|s??Ob?(#61OXZouL7ffw>;k z%v$!NuyhkMWvi@PHXwR5$&(=ZI9`E9e;MmrdBdsemgP{kU^Ae~p+XY2@NX99fvUUa12p>qw4Y;hKob4I^>ON9yr!_Ln8WHza2Fm zVq3-M>=?kccoFg^)UK??)!i5G7Tj_I@^8@-}4#yq9l z_xN8A@Ry{EvB|8TE0aPF;LyuR`qsf#Ff1Ej_wU<2Wj^?f;ZJGvlFpVb!Rw=wk`F)k zugIFUtR@={lqXwz?CT)k5=uv%9bMn`AISwEJT0$hkk1=`G9$k)bX}~vXR(CLnuR%5 z)qJdLg73ZcgS3|}1lM9{lERakEUZGKFF1dTuYqqm5!?&f+M^l%lv-<`;C?u-FE`|@ z#wI*PpAD#(6fRuVeS4N9Vf9I6Gg7(1xQutjcGiBz8_u}=rrPs>!z}1J{$es#Eo&rW z<;WigI8HH_F8!g?;Uru$9LOI+St0+yuf;dG1XP{;&^_Il0R+GW$CKz}{FCL%JU&+n ztY2dUrkUdnICjR=z~XC$g>Bi>G)qfU{6CXrxKJ0RJJin0+33$*4$jwM zb2HGDe-fdbo8&XFHtWG%hH6(*SKo#P0J=;rUftyM$@U{fI?i9Rm(kL7iYJ%|8!3 z@e*KXj6~Lh>h1aL#mP17l09b|(>-9kFhi_|kzohJ)6-{>)sE-#rw;mdg`oVv)Qhsy zsP&{`NYP1rhd8!btYVDcC+?a77B*Uc%M1IurvycNu*ds%`0?K7)zmHgn zFm1jMH<6XB5Z>SRK!E*9BLzm?B@hKARmi)W(wJ~MEZVtwVGCZ)ACX8GRuZ60B|c2X zs??Mv{k2LW`yB2(k62@(;%mWLr2d_J-D%=53!xsdlBS`=_Z9iryFshEs-?m0^SeBI zAK9n_&foK6V(slYeY=G{%}!B_aDyz6UY`EO1$x+VGKCo}7$i`NHa}+4aF(3$ISW2X z;CU=u`~)6|jhS?$YyoG`TSZE&ubIpp>PW4>xVogO;xeu(9@HaB8>qF{*d&_BdHdV> zc^jn!YN?)5B14X{&IjpMMqyY0?aS!wr=V!tqW_}p-FpjFJ41R(CBF?`OXTCM0O4Rz z7jd7}AWO7ZmwL{Ev4rnbtWzwy5A!iF3Dao{1QTuoDR~N8l)?9Ppf|p@?9t}8-0$os zAFI18n8FLCqqnhDh0L*`~5VHJ!(g00Vu)(w7b zu8yX?_#ju5%0A1Y+5q^$V3q@wrHrTr8u<{xQyeL7%+q5l{uZ9wVCtc&|@#~O9r!B&`lD~64Z*2vN6uO|p@Totd40$tC2Ug$T_$y`}pDSup2>>odJF^i)|h zkWB9p5n&4Guob))7*4_ug3a1;P zdQb)0DgoYjn*-M2%t)AqpM$;{Yy#>ey_ACk<_V8jD~MWqw3jS;4#iumBi)wHFU8gI z45}*}$vynGsn-^{!$Vb{xaf?c&Q^R?ZCN4ZkQr9J`G~6$9@Znj#Sk zyM7yCvqT@(dE3Cpg<3VjdPzYzF_b&(l;&hfmlpzBr6)mdS{3I(ULls)Y<5R5fz5nT zt2~6Bbh7lvx z&~dY&YyGu1lTB{x%EtIPwoLItIGpaI0~1sTY@GHIUT@==48JxDa<`%{e*6YBvwVfJ zYregBFz#|i`}<_Azck*aI@mxvLcgUy!SPl}6cLpr{27-W>Gy2EkEy{J#?^``!C;5D z{BRjZKix8U>i%|`sLHbC2kzAho;H7_=vXK0tha5?dD9F&U_Lo+sqKLN08IHB)Qwha z;ktL|13u?noo*Gl6C38V;1wm}&kieSf!c!PM0fh1o%n{pbE}3`Z)G$^p&3;vl@S1~ zcfZ>2coj3s5F9A7GK_+Tz7=26kZBon}#2n~E!MteHGoSW*&s*LIGni1lTc_sp8h@%WHE~D|RVZ$?x zqbf|0bn-q;-KyW%B?nlj9s$y&@aT6upmJGqi)7I$`@s1W9EX~qDjd-Hop%Ay7M>E{ z7nmbNuS9!M9;N`Gz$hHNycjQ@%BwweA!Za9C6Bex@I8yYi^uKDseg%Ow%&Xop?t^h zYf%u(I1Y(8udg^%j_6SFB-gWQ_frJ zM8V(6@YD<*NEC;$9fXqK1W8uZMAFJKy2nA4=cy-=wI$ZR986IV}SgCH)?`e zSLBi6%B~3Tq?d=*K2)Zt08&p^{uKh||MhlCA8ZnijGKwPq#u60B`6IzgjJ@#q{ z3mZR}<{LU3zyU1?9v?j9ijnr(KwQ&eD0_s{uPC7^Q-9(=NWCDbuRyt&efS@LCWO$I{PO9hY~gmT?ks_K3GeI}H%3Ja*-|lp z_M>$gGQ(4eYKI<)%xc1nk1Is}c4aq>AeE{R#Uf(^3Ix*pVoXnNooW(2$Q1Th@NhkW z>iS^t^9q6*@OE4|nJ|T>xonF3j<$M3DXqUz@xB})nr+NRibl&qLR6gw44Z1&MAP}o zXsOansN}iOlT(2`LaNn2Qu*bpR@a&I3%QgIAO+|dXzG#J7 z^GiD{9$wJfA&hkXYeZ6$S%@0E%4{hi;ry&rzZdkaS#JQAQn0l^a9ed|3`6KXUSudWkLf1`QTvTRnE?*9s{dt{1YRH~KxWFquYA%S$OOB$3 zh}*ftrQ~VayU~xTrU%9#feL{d_!5T zd3(HokGmMPxsE5!>@-}i)@nm_wUx@_`ko8tSV9~d08WmmlMi|WC_k*l{M6Wf83QBFm z*xD$NYoVDWaeKeA8|eH*oa-C~`brb`r)3`3BzTdyY{i!h;L&BxmqR4yIh!rbp&43S z=I8|->GnCrlI_^+7KKJrCD_MzAiG62JXUYKrr{oNhqn_--?sL(+~&mXq*gA#464&#;%tSdanuI|5Z;U7tH~xWrG~gfW;}FX_~$e@r*bmd zs@UvmXfbUy3AS-PpaLn(q~g#-(Mqtgr9eTI`KHhG79PL# z>05s<`!5#l=(TdK=>+&W+mBA_+U^5^%pa>CYw7DTHidQiD$i$AUag1$xafg1<)YF= zeJrLSzl%UIQ7~p;X*qNkUIc+qD(V#CqS_=zacAqR+cAwimOJ%bCL#(7n1Sj+=2nm zbSA_Kg9pcS1amWEdlHgf@71BgPDTF(mkhEt2%L`Qq0@kzV63{Y*Awd?(+!pFlu<`- z_zF+sqXFDv0@xPNNo}K}0(xceh+Fy5|C|26b7IDb=(ftq{B@{ID55yK0&-ST3l`He zl!jF>ieHMRgT>YRwJ*pyu){#)pd{@M{JC;*Wl`;`7RaLOJ*M1<7(*b$Fv4oc?qDaw zxA|w&u8XOVq}ise*w;w;ft(ilx4B&l!KxbGEvHHJxVdsgWF@pNBu|2uRY{CDV~0mh zJXIJ=H3XdZ)N-f!58IN+C(b^bfb1tZrV#H>?h6wi$`2U3$h*SO#1tsZ-bo&FV*zrSgt z4diX+fgo>NVK63}jJg#M2I;T{-EkYlmgWQ7c|V9C3hO7%Ih-vL{j`Y>fV5yni%PO+ zEq(lwx-Rsqo_q8ngfXn({Yp$W2-~?`*xyBFlk_m?PEjTKEXFK(=w4k4$*!rqiAM~p z>Hz9&dBuHW+Z@oz64aPDBx*F5P1~f_w3ra>2ZRTGF#>ko!C>jHQ1U*~A~nnrM-{hB zMxz;jwo?!&^OHhgfJdjTx>SNYu<1V7Wqvs}g3m|X{5YGll*G+x7LMfP3v9V0<4$!K zmyBjqMD0`hl)G_qz?vb_wYE3FSanejp4Y2V0OA;i+w7MIxofaB`XzpYYbHp3!&;Zh zUBk2hI3H{V(cZYNfRWrBr5S?%yOci)zQ7AKA7Q#~7ls7lFoz*}FERKPUiOCHNHw?F-y5UJHN}Aw|$m z8C33vGXv}4u+b~S5j6MPiDG5IoX5!bZ-+w{3{_k;((?p(78|)GURTcMV9WF{q4)kD z*!ek7` zZvQ5bK~5z$BZCw0Gxdp)je!26Fp8d_$FtQf@V#<=(E1 z&}JfPIK(%sWxuw}n|bZ^W3eJ%@^zjOpBo3X=fg<-dune*6^Ty5Hi}2Oh*A(>BQgJFex}g1qZ+0(5 zsG!)CV@$fIX>OZcIb?pnqin|8>$Bn|K|*+rd~~4Rvj&g*(Go}8Iw*cfmE5LQ6U9>X z81DN37T&_#*5%X=f$MfGuN?Ifnz4B7(I3Lre7R2mp3EFgX9=Q#3if?M9EoU705D^) zkGr=PjqvcGz4P!3084x9tQAd%;^;5roj-lO%m`?N;%r8RYOS+c%kfqfLkxc);g887 z8MOEbGrdYr=2-H*R-Fb4)n%8Q7*iANF5rRfG?jTo#O6MQSxsNYg{l27Tbyn!xP&lnc|oAR(=34D;>-s_C6u4N>Si!U~xEzp32&nVW|#v5k6Ne z5b@yG_e7U7wm=7<`Bf)?s=F>BC`Y3wPt^v69J?E5!%)aZJ;47oRH&5UIMx5IOC5JUESKz)86 zkm>Q>va_3C`b37FLYiaarV!DI<1U{>TF5E7Z|PJ*Ye9xP3=rwy`Bll7F3*XMLjJdS-fz)6#ZHbze&`t%&ebHt_?qd4rc-{p7 z*2+VLHK=y|_xA33;KX;B=v|9{d^mi;WCUz{CUY^@S%Vsy_IO2jhm%|ymyz2?Js|yK znXM6lJJ4*a;bN$>#6u3*w5{M6>XzD@YiPp7Nl6|9GEJDdD0Iaren?x9aNU91lYVjg{ zwe9}K>tNu5N8EFZl4>|G>gAx?akEl%>(d=Lampr`*{4Rt!s^Rtk83R$>tMY}Z(wO1DCAkr4LM=}TLFq{o!46`K9Kv&U27WE6b{#6%R%=#pBnZVC7~9Grv%=eP@29Ot&f_Ux0wiD{ z%*;9$$Co$7`wVuyMbi3OJkY-HRv6>t4bBbr>$vIvhm3RD5=DWQYumPM+qP}nwr$(C zZQI^$+qOH&L&mr{`HQMmbIq}k4PAf2;oRQCK%y9Dwtau(MPrN=WLqj@l?Z$NmuIvA z&&tL3E*u-uTAizbi^AJ9@6@}_9pfaKk(Te)Q!z?@E^1aDp6n*O7CmEJF}6;fY_26% z01Zt(6PQ~Nk`1oytVn@pGz zT(};!OgS%`TiAHFex(JqgTRCUXSArC?P(l5Ruo1^xQR&VMNBnBP^;oC(W03aG|hIUrL*p9M~ zQO5YKU>Zk#0#s3wVQrA)81GVD;WBs`y?HB~wI#p$rD1QY%}@7$LVqaY;yJIbRB|^WByGge^V4}r4SI7|8IdC8 zh`*_bwA+xsUbK3jNLbB8nb5{Z`Xlm`Nu9OZ(y{kACdPMrR?$JcHx$vJ()ti_w&1y5 zQ5F75?$V^F{IpDi0Xv`!`i;KKnxT38^+!@4!EAe&?HV0`nG$*ziX-2WnytiPTMA_m z-Sw{oJWumNd3`$t)P_kjR$xE^v6E|>O`&se+paA&YFaOJx2MKVilyn8dTq|BsAo;p zcyA;{!_pif`ePuF3#+L199)y$t+_>Rd3BT(Ec#avM|5)@W)x5Yi z>Bfft37XeaZ)1mux1C5ERqnh}8kq_JA84&*1JnvXe`W3MDQmii4Y@8eyF~yd@ zH3L}G`lfJ7pqGKAU=2KlU7A_v5)9P$Twqw!vDh6mksQZwHI!ql(PsF3Nnev0p9is* z*67J;yX*wW{pT^r9mp4g#aIVm%9E*S;O7#&U#PNEz}BVI2e0Dvrp}EvB5V1_xcpxf!#qAkDus2jq{cVx)cC zxad;T?$`7Q2u>VF)iN2tJ1`GUbv`pK3;Ua2wavKCM+%SgAY)EvSJOtp z%8&os3v;(V*5XGHs%3rL@%k>6G3jWe>@wbVQk1OEVi6a{<+vT=R&noCKJF}7Jxz5X zSk*PRkjD6XR&LvRrFP#V3{x2Edz~kIKY+7S`$&?{Ja?>`f{mkO=fr%$M`}Lg-@YIH zss4Yt#t(7H^ayiDHknxgy>96s&xpHyn3n}Qw`@Ft+ASzL-eU5-`(G6mX&?}(|-%-}RyNN)7}eUv5L z2%jlJjm|98S&_sKzYpJUbEMINaup)+lfmbd<;DwfxNfW;7fwNk(tgq)c`uE%j=4pB z!M8$X&}G_-u{M;nny_HqZkM^N8qwNw--)$a%JpLz|KGcX40hpRHDE$EBCKKYSUOSD z)_9JZ))g7@1{f;`^FA!SLlO~dp0sDv!wo8!@OyRtsl#3=S0|c$9$^!Qxuo z380E7;L_+i=K#bzhJK5J8&^PRd{cz=PqBTQy!?;s$)2}vpYuHd?pcNo^`}6XN7H4isWz*8sp<_LPm#~cc5FFdKX);>)v!PazMVk>E9NBBLV2oNW>J3s*%ve>a- z_W`uiTrH!fLrEmHg__u{l8c|RTbgk6ovSRD%1}opr-HyQ()_5-jgYXxCm-WQj*5JC!$IXE^ zQnQ6jaKrnb1d}@y3c>$<1jPCBM z6YJy&yu%qiy$v;QMf{T3x4#AScgZf>{4504xCx*R(b#}t4XJP>qzI=lnQVhn9a0Ksh6eJQ;HwuQ#pW4Z|en9eq zs{E6-lJ75>lgsf^VK%AkF?#&Z`j1j?VOG@QgB~=xdmi0?F!iNp2Gk0eL=;HcsqwfV z&8lgt;W6{);Q7N_w5sSqP?N=nUAUZUp~CXyoBq{jaU@vxAUwHSG;=hR@A9VYq$f2% zyf1wy2L(*qU67~KoFZa|N~u0y7ZnygrXeDqTj$(QYh+7Bpr{@sf~v18Tw7{rh?x_g!+L7i`U93f*GQTUSj5=C)#}R_uA2W=cC3XPzj1Gmcpv|D20|V~U zb6hORkd$%{0Ftcn0GkaP;ZX*#;)&kb(~8TSE8Upl;C?xaGn!+CoY9{N$1;i>?bAu$cEo4Mo%4c*%lGPxUhrDj`^E$2WgxohN8DCG;! z)$wrGcxe@OCU9|Z#1^XG?(U`*1j!5N)q@VPNn3{Ec9IB`-mZsWYWim)F{?llUsy{; zxaPVzEC6Y)0wadhrZaMbyGOECT}{y;MRxv5D{YkNMmCig8BWzWeLXxJeX+Sj(r6qv zXm3*C(vL^xv0RfJj2^F=i@MZ&Ph*}`_P%Grg3^ouB805ZJF8d)H!2Jtt{}Lo&n>Wz ziN1~leMTqSvRxa1^s}DWwkkL-Gua=+^hI|aM*7P{5em@x_;~9Ad%Uj)4lfFKW{8BO zdpCbrOW^DhKiksH0Jz@fHqj%vnd&js6X2N%DFB}wD)c038%G|heX-i&3y6=tPGfXm z{Ws0C#5>+-A*4zW15A7z#C+i;plcAHA?~nOYATW8An^K>!P~w5-RzVE>Cd14w43g3 zwC3_IK95&X1kLpg`A>(Ff0TCN>S51;H=&EAIW~C|A{G0_cp^pC6m$A+(WaePv7aY) z-$}3&;OfSB@dGkfl&&p+g~xi3C;m<}u;f8RZh0VN!^qEZsBapv!QYu^9M9qw(wHC? z{p~&eXQZw9v9`rsmmqOmRfFKKG``fZ++{ZLY#&qYNqFgU@Ia~oe5^ndTfAd#7j_>G zw#%H;i|=7c!y8@{sm%0kP@6#FlV6srHRub*Qc4Dd$@TwEJe><5;o-W;&at76Hy~DQ zG%Db@xeS$qJnIV}Gc_bj<68mN4f?(yX$adMqe}HOUeXI}qOeHxH^HkB8!7C1m0C3w zVp{k`)w$X3gSHSu-*~rPS7KN3jq?*EB}u*Z42WU6!+@ zCG-lOyGm9zOw}w3-(^)rl-J2VkIu}etiPdH3fFPX_i!h`MC*bYty$C0>r|VQHXsms zyvbV}Pk!QFu+6xN4W8e9Dw#ydu}Az1n;!Y=$;}qwv*0O#(B6p#kT^`#dxX1(i+hdf zhGj*~9dg-gRy;#x7IDw8wr`lHGcO#319`Z5+3LeNWb3wDxr3X4E{A>YLNWTM3l4q^ zY&g||x{Wb1oi}o-mXp4%ztflF1A}EiGJJr!Ki+LS;>QMeu4P79Jy?dh7$^p0Cf85p zxUA(CA;4vnaNh5RCj&m(LaWw&kOMqtfYbq?8>JqR+!`Lch)c2+c4g|DH_uBBV1dPJ z>buk4(E!NnefAq^L)kNhxpQW9u8d$GlVyP_R)?_B@WMfug6MZLG(6NBVErTnSDb=P z^nyo)%GOC(#LZP4@RM8e67wU@G!P8J_Y^oY6OE*D`WS!x&5odJ2^hfJ3OzEd>hs7( z5Jix9K_8q%PP*MQMAD%oqr~+VWM?j98Y1EIav4<^)ZggrgwIA{7wXx(9NNQZBAu^d z1_2CMB%HRssb(wqY6aB1J@BUv{JvxGvd0;(5os*EVN@V5I2Z_mQj84b;BzB0r)9oM zS$x`w(*);lVoI#VvJA8i1+%C9CDZIkeig?zbRP)m_w?Knv=?)u=oHnz^y>j-pgJDI z4IfkZGQ*8hiS$&JCUp2OsmrDvKPaV;g_R1h5IXQg#R7MG&CFX1W zTat$QYDqEDt1J7%IofJRL{)s4CB$5YVi_K2-|YEj^*_CKvQA`G5_2b zW8(eDTTJ>Ff!F5=C_R-V%w!x4S)_t=cyUaY0lH*~+}X@R6Op2!FwB{YM<4juKGvn9 z^}}s&)hmG|GcB-Pp4FK)DfVCyUiYFoz07?%$==B;V%vXt6sNnN_6alA%~U*D8m$+r zjU8}&F)Wc@LBc*JaOg4TBiZ75YyR0Bs~G`#nUwy`%rkUVZ@&MK7gwWxQt2+-R z_dsV2BN_W+%*l5xApeK|X?HQ}Ql$6_MPo=(+63}TKVhjVn%I`o3|>Y(q2FSa1vV@O zl^yQMrXP)Av(NVXg-mGg4wohFd9cZFKk@r1n3HY&3SfopZfD{UBc4k=-L7l}kf1N) z{izIUQ%@_mZy7H0HAg5MIO!I!8h_At*qZ`%4sSUpbYxVK%?52T@~?+EXiuPn$$8M; zFn}m)Wpgq&E_37{Sj97U!bu5eTD8(`A;x}(T#xxGT8cw?V{oF;8?*B&U7)f5d@oe; z3(Y4)OA|_Mxoq@o__PR)(^sQs9HRijG2BSnDnk?3bv-$$cBM1Vcgj8E=8Hj!D)nKy z(|3a0s@{D|S4t8hRc%&Uz2-G+*oR461vODjx2YNF&@>@cne*PCN$3dSriO9VlJ$&) zYD#6!#iyFHQ~m?=RJN^;RAQ7>ws^?aO- z$!;Vx1!goal=*cBHf^i02NUAX)R?I%8LGDAz{#fZx8_3G;^k2fN66_p_Lf+QjNWEd zk&AN|Z4X!;u9v~x`iW>`(<1S`;5YT;s`jFc8Y`se!AFZS zxsAHC&5VXO+VUzy5M+ce8ja(bqsD?$fo7uo81$!$opzCIg6}4mM)d%O@VsAyB)7Uc z^7#F+q7nM>@323Hnwg7BjRVe=(hmqY?VuhX>v>SOZ^|6jiy8JMS&L8<7xM_rnd|HIq;VS(IL8#!C{^GJ=oCO*MW1@P*75k%-k;s2Vlm zp02kUT{r3Qjy*zbVB}s(boIx`n6m(>ICry0?dgH-JIr%b#}+DOR6If3G->3&yRAsE z#B*ch4{u+?b-ak7_Xzcg5la6O=6kY{6#}2*EXjybE5|wA{}$y=_${eO`WP+_Qw^zs z8HpS#Qv23#y8B{1+)K`yVaShtld&*f<_5C)mH3f5!sfQPugXHz7+|^q$y%aI@Xm z@L@;ZU}7IHiQvFOzfd!~P^)>MN2@b_ZSjhzx?}qBJU}QQl@1v6x?uo^Igy5cl56su zrJjX8@EL7AI1{91wq!O`JoT)56fz8QS#=yTRc~&uIpg)_r5F-DR zor9hHS6Y^+be;+@ndeToz|HSQ=tGN#8CS&tL zQx_hPDlh^>&@wYV3Asg?`Dn_JMvZx9V)b}LHJUMpgqdj9L zBbhx$qDdo;-_9WZI2}FEAE{(~YmA_0!uv^G=75lk_&TAF^V}ps<^sO-RrVzHd$;x` z%t?>XI^wwv@yc38nsw5e#`T|#Y&&qNS^?Sy+3Y7hL$jubl+mB^+rwd9t;XlzN!p8_ z8beoK0gpmhj!QA_!f$=)ZDpDivP~7$W%9!b4H(8iQNh>?$J#Xn8k?+f_uoR_GB#;` z6zzNB;kCQbwyC@pV9%DVxW5`rT29ic9j+HKN>uo=gBZ`8BD{k8oeM1YN3wid6W)tw zI81nrCB(w8$O91laRck0v3B&}VngAo(PBl8gzs11n{9uqvsQhoN|cZ2FT&;m6TWUD zZJO@+?HbD%-R^QBtyS(wB&G7@E+W2&yk^2~BTHWf z@0R~qW%Z(rLeeWd4#zFs6<#wE=Y=^Z@m$%vR~`%1SF}^FtpdYY3}XM(tf6EPV0h>u zT1yZ12RF(qCOK05lBY%`sk& z_n`m?EkdhglcfuAhzDy8S0*A5<&~bwJ{#n zBIcO&m60N599h9=?0f$?#w!=sqqGS{v|?tgza^5iHI34|8H_i!;JFo3eJ>7pTMLxf zBa}~soFvxr72Sc2OIAhy4;HM3;dtXbFC$Y;9>23zVk9INRg$Ok!8S!`5)5y%+lYb~f{T6b1B5#M|yVv+g=I&gb)W=%stGI3BL z$^+wT&kUznIUzI(ru5_YWP+SHH0q6RT1PMr6iXmx3#!9;FpoM1R>$H1cFBr@w|{kY zm5;kV9l1z;Y9eic2vAE-a;smRC!+=%OIdd$0KKeV-o~FnOp?a@Z^TQVlu;EB>pA{(H#FB0u&%C z#BjMHs59EU8fI4NwR4SaMU)J&-ZX9xty?oWC7}MJ+FZFu%$>kWw3Am`R56s%%C6m!BjB`J9t5$JS?Fobs3{kCt-<< zKplWegf`w;d~B8pUoqn6IBsF9#%xGs&d>ui(%QJ?zv>Ui5Y*Yd7giWI>$T1+AIk(& zXTN*iwp8+Am(h?5QGi{QEbXZzeWO>xW!gxtQQx9;W764j^=ZQKflYv@6I}kFa$ay($=HC&hMH6ZN5$=;>tq zC=;}`FMS!7XW18n}9RG^e>EoqT>Q1~0WiQ*>t}8{(DqB9A+&QS&NwNcD zeO?9E3QyI?$k!XQ$$;|CMCrRABTyN38z4tPM(OG>Qq9|p3Q%SFO1l(YPtDOaT=YMs zxIZEM`kCNasfCMpWi>G=3_l7Ztls%?(6l?s6M(Wumx166SVX(EC$VH$a4hq@nR6;b z*L?z_h3JwWT^xWTP4Ww+IjtZ!K=B3|8H`)Atv7sHIlQR*EFy`T%&hRDZcQdZX{Hxd zAF*e-4y>?0NK6A;ztfskU3GpgoR7kdvUWwLvm{NU+e?k}^>~hh+xgR)pn_v;&e^{_ zWuilLu=&QrN95hpELov~0~TX-92Dpis_&t<&+F{mSL(d_A@C`C&qBDvj!0il)|ZSM zT-~M90oNfSb@~p3jV&29JF{_V9TH{4R7a6@U9*X!y3=3kN?p_r@*e*2pFo&+Q1yQS zuunl>%JIzsu!zu33jwUqYDZ~AT(9_zQ3c#yPoNH7RApUQr4tOH0q>{unY0%#XcWmz z(Lh#miF0TfmBxW<7*&!VXw4!u_*v3dabMSF(ivZo`&%`nWHb;}*6SqT2nc#TmW3VE zu7|^TO^6ev08{lY`0S^K>x}t#UkHC+6=q9myupR2%deJxXYvaNt*KA@eSv$kWyxJ* zv;>MST^2X>yU1oiiQEyBSkfUtzN+`wDJMw77ja`QO$$AqOJQ*{NLw%rM#-Xip> zwN#3xm1sZiUH80J&Td37$gYs^w)`}GG7Vc3ADr`SMr$sHU9S5-nA^rgIAm5McC9-F zkWrBx2&=e;_OEVLecHI(7&r$LU0TrN?9PV-ks3cIzQiQqBO6o({GYD6AwuR^hRN}j z!4Pfnno+}?7#p3iJbdP%I^8P!8s=6$%ioEaab(kFi_X3GNL51nmNtp7LsR*4Y#1lr9eXwiZ{tbLd z%`%%>SpSEd9A|?I9xcey3`(HV*3rXVZFG2VnZOsfN4JHsfx|NB8cI2_!QFfNe`cNt z))ZV8DbvtLEja}1-8iF_Pqe#!{B1Xzebs91a~IRY9Z_%aX6><0%Z>NlyW(xVXFVnm zl@m0e>Cun|yS;$@2pF7_YlC+TVE@d83F|9zl znJFr`i41MHlvho+@H-De^YuarkA1K7TcLJ3oMne3r^K$0g(z?R#qBs`s-|88PV7;a z+2@{g73_)j^9};_y~AUR)u%A5v%?%833?9YZ&f>?I_WfQIs4qrE`}cNJx4PqW;*B2 z?U-@j1$tw{`y*+5n|1S=pDa`aN@6D2lMRSG>;l&>iHkwvyLSKlLhdP>^B2MQjxp)4 zuFI=(MT5DIV@85ZQ5OiJACgXcwGZP5?&N`k%8OsHOi>Fl!||1@xrdB}*J7b2%9QL1 z1rl7pBfR+xUEI?>R3vZ~n>$V}432%e1;3dXFzb)NS``h*E1$MlMY(RQKw6CWR?ABo z$q7#}i}_hGQM|p$O?UwnGYyD-M44Wg&sW8QB@U&MnyZm)jUXrGlg9DJ+d*kOwXH=> zubaS^nq0pwE>h^iiiinY>2N{V#Q27pWd3-o{g4*w&z*Kt8ik_;na*&Ti2*vrh3Ep& zS^~KD4`()^pu_3(Dg^1e?OpE&TqGSnAKIBmFVNFELeb45hlBzp?R^DnvO3$Vm0gY5 z2iyA>VSy$}J{&k`wer+kOX<^6jZ$U{&(-NL2Z5mIvP?z`_DL$o4#0zu9-hW1ic8Rp z0Jy&MhJ0M@{0e{1@!)`hb@p!sc(<#A3+BIeV}?XI*29y$bGNpRB)>Rf zHxSm`L4)h;M>e#q-%IdW+I?iV2{L|k)0PnWQV|#%U_Hyo{pN;DBW+#4XFtn2$d)6~ zGK1`o7-Z;g%Nc%G<|__VkTnK-+?1M<5j{@kCZ@C>lN|0Q0gHjaB{>kefzwyWjjsT| z*BkzWijgT3S9 zqD?Z&eBmayNuCohK4kQ0hglh)0oV@ZUs<)QKi7&Ng45sE!kzgSup=RaQk$wJ4Qh?( z1ed@Ji0p=HixYE^ z-goH36@96zHw%#Aa3z&tWqEc953oUZOv<9v{hX-|ja@Y}A1V##C zt;j~esaUW+#v}XA9@tLJ8DLA=oLH|9DP?lQ!x0`v@6DTqzrIV3GAXlj2&fL>RgW4u z?l0N!r(!V>L+J+;XG&{V$*FRG5#|A^iW=ldgjVxy40G4i5V-4Q#>6B&z$}g%sO$yUc0JfcF=R z(7kkTm3IJ};_OljIpAs0GIL8I$S<_U2DKJnDlvZN3GmOcNg-C`m7Zr;qKSzIxE^X= z#Wpn$W5;LAFwof8cHA>6Ugd+6INHo)Am@&w%AZD$(8pU-jvp4Pg$)TD#b;Fg0BZVV z1y}^d(6kbU8u7?u)z}gQ5(Dr5DQ;Y9y z#2|EJI+)cm1_~ofmzGP1>fDzAsSB_eH{ePT?m&dqX*Dj*5kHX#W?nySQ~0HB?;55S zfu#DPYc1rVj>W4zs{PG=AK2CWi!`E#u_~RIY4K`IW{xusVHHh+_ctSHdm>s=A$znd zP;Q+aRb!OZNvCC7s7s9NRkltjGt(-)U*aO|OaX8*+ghkH=d+~<7ZC_(quSXn`;lvG zF~v7P)Z$d$xBOD2CXpoEz;lfqyQJ({%JS?&IE(+(O1!9Aa?yP|hiq?O%>nG9ZJt`6?zCD##$2X zm2agftd6qGo34*)etVldK}S$Ujs zCx_^1>uNjNIgb&fA0!Wr}y#HmxBcgN-k@E7GPy^B5GQBT3&i#O)q=(_|~-sxv4!jsb7J9RWZsk{N6BYRJm1wPP3+)(Bv*qO zJY3|xmPJm$p=lx`k9uV15%sMg7s-2rUdxNbIZ5ql+4gDTr7gPuz#i-63-lOdzt;^R z3A#-Erpn46$+NM6%Bo*p56+W&$}a3;X6|?h`E^o+E$ugzl2q3#jBm?w({5|ZOEe|B zmt?Dl30c10FlJQHm~$)$=LmB|Z{{@~1*H7A)uA%d&sH3M<9ngiYKWm!QuidgEUPg`aR1T+onKo)k(44W%z>|DOPw0PF;x_6W@K z#l9IeF1Vx>L^~atpFFB_LA2*Hj8ygD)kVUvB|I#hrj_at_css61v{h=rS$3{F+;yP z{ku0{QWey)Yi}&uyQJKQFRO!kBTvS*ApUr{Qn5Rqg|KO{L;-DOY8SUePknx?Y1Vhr z-+JG8$TKqS<=<%bHUSs90I`fFYRE&RwSK~`1eNpaMmh@8#fb}34bK4x2e6P^f~h&%ob;ccOwMSVaHZE zDeFHcJWAL+oTp~R%kE&tDXkg%iKXw$-vglSC6eG#hJdl_^DjM;Kj{%v>po=l%YyF{K(_OE8M^`}A zeDowgG4&ez$Rv$e!tWTOMcDtqquCv#@K#1(+1Kb{r2;|dXP@fE;nDRTM!pMZE}3AK zGdNl|m*T0P_Bdpn^G4A3Yy&W9$iq#o>4tDw`Tld1#0J47L$^M>#7wyT_At+$A8c+Gm}XKo9$ei@dYnGVk&WL} z#GW_P`nI9xwA%Hk(SmjvqksQ_pCN>!(DmjR_#v0%>1V4yA!5y$Bb_cGuxYvi_i~NXL-H925Lt=SK#9LF? zcscTI_!lJ8=qluKz7k4Klnx=8_C`rIV*G-LBvV{%^8E4e3b0x(kW`b-=~@?}VV1#4 z8O}i65fPhamou_+mRTK&=1NjF`Q0X*DB1Cb2FPq}#Ja8NYH=Scmykc>uo%bE>3jcC zh)DRs!C9QV7d(HnrY)M=ftfdCzorQcV?jL8a6xt7iLBvH7xa4wmJ;FV0XH;4m!Hm{ z(pj#P8UkI-e^5;kRx%`ZftDNmV-y`=8+qoO>>eAwZs-A{CKxd8KoV>lVOss*wvga~ zI>ml{TmO$eUfun0QIJNL>YQtmN*Cv7E>!$~N57l;p;m4OLge~+mSKSdi2}E?Nm=9L zlYd9_H>zd(414FuW%77H7d80L+5|h)fA8W)keuuJbwUQr4l%F#mj_Q88wPA9W;B&nSQo=`!s+G#hBiOwFucjVywC0(R;?KW5l2dlHe|XJLBrs z5@*juY+_xMND{mfn?!Uc+vEm3F)M9AN;{}@P^c$RCY@#HtX_WC0@0)n`Q zK5eBGnt1oQ3F=Qk%|F?L?jp35_Z2nUUDWTMM2R9zC16Eq^WZ4k{S5 z*uic{=x%5?2oNXos;)Hm2q@zcLam7H$2$CQHX^WC3%-2MVPL?SDAjTm<9HY<5O0H1xBN6iwHx8_%EX#GlbNQ8*L|jN5(XYHRMPHHcr0IY z?~XbxCRJ!aHEsHmEq<_enuz;EdL@8cqSEsO0Y$KFV43t?2+Kn1TxupU2`$e^^2OBB zXo+^jOkGKKEEVs=7#Txr;vdv`zb6K242`NT+xWNRks2)4Zt`N(J0${dsvKDJW;jb2 zZpUN^$DpZxHpQd_B+0)Ua9Jn!xNS>)1rIQwW#GeCQZ`_3tGGC8I!)9jJwT?JrS}y+^I>v8Q*ca&UintQD9quk{Gq&?hJ82 z`yXQIO+XXOp(tphFmE<`-1dci41Rai35BQ+{)CB5`vIok#q8`Tc=}-eAGx%sP(V8Z zE{8O>)N%;C%Josz;*M1~u_OB^EB&|eF;G=}H9a}!E6ldR9`hq9n>cu0nw`HQ)KOr^jkJHw}EJJL>)otG=+laCfK11dCX7S$P+1yO%r8t5s3bKf${-vNANUU8C)FP^mMCQ|eVMgi zz$H{Y6q{-CeNP$@=YCu-J6&#XP8%yX|Ko{lYu^shI){1vGe~4MX6VywYFHZO-o409 z$^ZgN#-+!RoOtg^K9wXIDPvwG3z&lAMza-JJEkh1??K{ZiPrMV4uA|4Fta2-Lu{0f zInc~yTzX#Vi4KbR@y;X!d$PT;QOzv5s(J+4q+TvM5b&?YJM!?DH~)n0h|OlvYWsS) zA_|PT0~u-k^P*2DyCBlF2LHY=hn*^&-KZJ9^A0{t8P6wXl^`VPtVE)-GGB0kKK#Y% zYp95SIm`e(+8e?R3m&J^$E-cSfi(uDMCdH-073qJ^f_V|nRpDoj(%s36y9kG?Hlp? zg&bm8Qd3e9Dh&8{+ML3fbQ-8N zruiSIRf7+0&ghP+;;(sUt%_#X~UzDoeha3V0E)C6nB4aR_1E97&FAree7l0pIG zQE4NlP}S8aWw0!4J1+M4-^z!YqHp%F0sKkLxIE4GMf=>QTHNA_Nd{P%PXPJIQhm8R z|2xIh;4BL)1(*h~Dl1=+EM{0)fR9Igzvyp`H{n^diTl!{F}V7L3`E$yD0J_v`wy1H z*UbhRI(&dLe|O7C+WGsKt0QCs?N3h!_!4`JvrSVc_C8?l&c`d$uq4FI zx__Sm{m39EHIX`(SUpE4oNsZq094=^Mp?LQ-fze?D=@Y}jU%ZyggSZL`hNO(1plv8 ztVvr_iBzqfA5Ui0(bVIT05zGBXpHkZ!i+dngN5(sZzj8bn^;qajk+LK^*)!3wD4KJ z0WnCah*gxQdn&m78L3uoLVhFoDh4GF0--+d&4q>@zwskB--QZc{-^p1RTYR(qkIr2 zU>>O6xWVnZc?6|ggb_Nor|U)|cX+q$Fa&hdnWuDP`nz690EGcD5fv}dG+&uc<_m`M zhwYykXJS%c2b6We238@9_0!QX0r6dbQ*mUCbfV6rZ;S&1^GBbCqH_ZW_IEWM3Ck0n z@(P~2A+j^NQq6Gv;g+y$*0t3boQe5a?fR7GS(Pncx9Gm*X!xfRy^K#x0g$A)4aA7^ zh=13nkl&MLis*7)>Z?J8N1)X_azRiL+UKp=4MT=5>UR%mAS`(`+^QaHBS^w;Yx z=S_#!i=%0?zu=%yDcd(HN1xeSh?Eblq1w<@GZH$Ie52IV6}HBzMkzHhQin6qHmx`8 zeLBEys89&z+7Ujyfmy!u2b|5rUSfGa;uLxJ3+)TB-ywm1pu5OUQVt?kI+m8mGO%d; zGe0^M1UWdiagL*Y@_V);n-@51G4 zfJ;hmk%=z+*30AB_)L-8fJ;@)%Rvw8RHy!xzPpVR3~-f%FmIEIvBrGhn78XYt)gc7Yudzsj=BS8KiU{KUEpu<7_@fprYp8(fI&bfpCcq zbS_U=y+&z8cRN^K$Sq>Y+Xbn_?oPm8Jf<>uGj3j-Zf92rEEZrXi8<4vaaI2Ss&l0L zBM7;Q{DZI3-nr{h0nH<;@q@(nI<}^LhSj3e9_NkUqm+YR;8>ns;Lr z@{u^Sv_wmctdst9h|*q-${}F%od-e}mMD1a?EkU7`_oKd#4%`l-U(xQe!t6Y%gCmF zAQ936+W>g`g}3ScBgGN!lsKXz1fr|IY^Kyxj{h|x}d32|40CKQ#GTuN7Hza{f4nN;bZ5*D1g$3F>P7}v{a6zgjT%^x4}p* zl!fTY@3TMB7;HX!QhY-6z?c5leF<6x=khAalwN8KC3d1HIh~(?KB2iBg9hvj_4`RP z(Nc=pC2KeInPv}nMmW8IQ6{jLe5-B*^vVV<8ic_NSqJ)OC|MS(jNt(|{-pz7N&_AF zI3h#!prJV3+0g&3f7km+1E=$X@? zHPrJP^pEmiz1uIs(zOLMm~{}SM2g`@|V}m734DFFZ%D$A97oA5b@4$ zT;J}S)wUaMtY7Zcp}S(OQo4!_N+4@R1O+KtdjNFBbsugt;eXwxVS1gUREL<9r5a}w zwqlZ;6*Dk+0HfrR22xPa|DStm{1;f#7fsWkFI?WZnSwm{*v zFNPU3#m zTENY13qDg5pE=mbFQgHcNZp*r5AN=yzw&$~!WM)U%x8=1YAU*YJFl2Vhy zE{29DE{gxBD?Gh0*7|nT5yg$C8Tzew27d_8jj))%*0*PagbhBRSh*c9Xt9)~1qT)^ zw0b58`xv(yLG%?};35DEK3T2Q?_!@7elEJAfDM{r-iu8i?8U*}*{nGZq`x399PE5j zENHAhi|x9%QG5<542i}#k@vVrD^}}263yQrmx#;6iZ_eURr+Z%;=Qsyp^8rFPb3+Rp`O4{ys|jC(tA6vDlyhaVi-*Px1&tpplVEgsv3>prN!>ei_9hXtigkas`(k z7Ahpopk!;6Gc)%(t;B;%tZVP? z#y~ev&m#))=FuS9v7j+PKqvt1uzcO^CeDhTZ=O_yhaeC`>*SiX1K7kUSy_QebJI)? ztNdLxM{9V+F5dUmzTsYoXAg_jk#+TH*8fG;J8g@iG|jfdwr$(CHHU56wr$(CZQHhO z8|V8k&du7dP_3$3W@OI@?`7H37l)&#!DnfAdlfI@j~|A4-gR;nko*`bN|NeJ6Ba=&DYC3Q~{8o|M>eVxJO;yf5QiW}P-$W3+i?x-b; z5dA_~={eEdI+hD)q7VH0b@xa4dY~Vsq{kr`i%0xn1OVgP7YukgP%6)J1WduGjRc0_ z7WTDBi*c0+dfb({Ckn+4Lc4D$-QuQg0;JBTo7W@r%VDX*Ve#`>)jZL}$H^<`eVBJG zn-3~4>A|mD2@dH5oPOG5&^?L%hc~^LV9(T43MK?&RPZZ|F@naoWVdo(%ucbKA%=%H zgv`sl7~=XWuBVA6l0PS5iZ3JkXrcJP4udjIdzQ`u{~3ZpvhmE?~99x7+_`V2;{cVK1D~3;48!W)ol>>_!CqH0$DI z$ce(6K?t+F{l{THOl{r(^RJk<|2a+x8!z2i%$xL|^9m%9z`utmBjp=!7341Ty@m1z zLfd8r=0M zI!d|PIV>xwd_-mxh@nu;SHz6>JEem8eEx$BF^-Oktpkgf&)*Fu2kd1rmo6uB|KdAMe`=V zC7^VPoZfY>d;S05C=b5GM#GXs)LW7MV|^l1lz9J$Zk;t?S;t$^3S$;;r83lqeLNy5 z;~yl}tt+w@r_1S}YO*!$5i?^-KpyLwJ#&aj4)5@~7u6vNJs~=)C=6~+r{P3z?CdFb zH{|wTy-@bmmFr7ABR^d7VUKaaF0L?TN^9WxHl}7_9M=epue6x+A?f%r4(uLK-kZ#B zI(qnaSB%WuOv9g`dHhI0hVo`GHIQq!oQxV8!u0lDhsjC|1Z5H{# zi$6sW+q5lSBhLuDky$nC4|P=oC|v(w2_P2!P&EQxB$Z$_?3|&9r+JK<#-ZR>4M(~J zMcILVD*z0~$fXgWx&g5ZoIE|iuG$|y0|wy;jg~Y%Lgn8IPtKn$*37?{TR{~U*-G_(}9YdK9lqRZP=45ew+_GeK#uz)St@lilS0DD|l=J2lpC? z(~qp#COo>SeTMJ>=GaR7o=Dhxc%5~-&}bafBUmyo^;rH~U^!mfq=vWYz7uV-`*%6} z&R1(;xA1b1kv$2a!6JEt_?}J1x{~sfE>qgWj%9E>h#&*HsNXa)HM59(q6QAv4JdQD z`@Md1Yli6qUQxLbuq5P%EQpv^No3>1(ydFe5PWxA@=NQhL1f*{kFKXj|3hvyNIgmO#hHt`Y7PY%ua| z7`0d_eH!SlEl`^9$JQ9?3*bT2*9Q3vYPgg$Y-IA+%6lMo=>qf@rQon=83n%N3)ei0 z;CB@#LYH_)2hhIjOL0Mt-@N^&MM4j{B(LkC)S9rc8Ne0ArV!ozn45F48M>yK6UR>@ z3_G9u>iMKdl?-U!mt@A$@V1SSaB;h@t_NqU3~Cy+;W2*@fpi+EtjhYlf`fiopqL`R zq9fu2ApnQVEEV7?xL@}4$c1-a5M$nK_lie6P-rDc7$B|uUJ(FM$$l7+m2`~k4VLy9FxeU3$ zow|{PB|AXcbp0kWqwqz-rF%I&U#e3dagcZ)zN^tYHvA-z{T%5hgxu88bSBzapXxj%>24g(nDP2++BeYawZ6IsC~8}uR(1e za>4M1U``#Oj1X!Id-El@sLJ{&bEgm2f-KBw`0&5SM5RUwZ^E6FM#MoMK~QCmq2 zr@Ybba)?I*-B3ONN!y-iw!P^QgV95^iQrl_&3hKw5<#Zhtejdrsmf>9`H}LlZWlX1 z_oom&1EJ=u5;6<$k<1gJ+!Cei!0n4I3klIqlp5*9knPl@amY` zPMLyBiCmsaHnNy($r_$GWCj}r9Wknz9*7~Q#U>DtmOHS8F}e)Vr7GdO@o#5mE&7J3 zNuYcTLD>XvhX_8j^|#*KvFLulFL^|Ty{7(ia zo5{eDO$vrC5{B{Vhyn@y62P$5VI2$G{!r{&$q>;@LB)BFAT#`pEG&`=81W3xGBR{7zQ&!(pi;PGIB& z?#*F!8tl4C{feENL3*Oi+zEnUmly#|Pl(+)NPK8pAUlKia7RM+HNeKhVkAyR&XiN4 zSA3IE+lr4Sf`Iiht^1HFeF{wr2ccRJx@}q+ZHzyLD9JvbVK>v(9F<4^g;DYq^K=EdJAo-8fg zz*G6-7Y>`Ps{6%U)1?XmsRN338EoVSRis>51Mm4L@2RaGt5d=NK_PWAuX_xusoW2e zFmI}kLSiZ~dM5m$%tv@^z*@`0p4P6v2V~v!cAZiY`EiTo zb9HTNci%#^H(-NI1@zWDFL4AJ_h(M#izNqK#h1-1uAr!}SzI}Ry?)qHZ<6lo^IrMr zf3+D1C7o8%j)AOt|MHbp;bP9_J9TVMyLAy*NIS&7vzyKI>a_>PQCQh~Ni0*SZ4Jhb z&+RQ8YtH54_IFNG+(a=_;@t5>;n<+%htp+jDF@5dta+Cbq2GJU5=9mHKPjq%&3>8<`0JR=Sh$DDL4-tydCyYDHu0ajk{~#^l0C za*KPo!l0nTjCBZ{vp&tnVv;h+d^u)eQ#x$@v=wuqwUZu! zVzw!Qm3&S%KYwx*_|qM@uwb~puEVuVr&Z?BHI_TPgwMY%lPOx* zwA!<4W9i-r7!&O6OX#NNz;jiY80OHRXyHX+nRBmTA*dMVWv5BI?5JY?J# zoHn!ge`%(5Bmpl4(S1N^GlUe4*gg}pdT->763QZ}X4NNzqMPGEnf@aRLFUbtuMWY0 z__U4NDkjJ$N+r!t{5kXCd)%>+)ml~)OYe@6WtV0r;!tIaH`3N8PW5_9=bud=;1>Ph zf@S(5r3dCKh4LjjkR37;McYkT>-lV=o|(F7G>hhAX-00h=D@$_vn{@n5*x0GBg6Ua z6Ap1`!Fck2xtm1tx`;s+C;!LFq<_&_#3Yf*L%&uKe}VfqERS%) zfg8Y_PSFpOgh|yPK!4|aimOZ zhDL?2%d0{@Ui5puu~R++@ImxIP_9|ug+HsqNEZtLAiDZh z>}Kk!URWws=hF%JhNsh=h~b5pOwj(V@5b%`(N0oeW34)?FH(uAer*QG?d{W+o`b7! zfi#=ujs?~d%`te=QGCsJ@FmO9|4|P?fAixISJ~ElI}?j@U#zhWOXcJUk#reWHDKi& zUqkmcXStiUnL0AeGs=Swmx;2a(gf`d;F-mt8%gm2CPRu{P=>!wXI@S61ySv7O4=d2y$FVh8gqz?wV^Zu16 zr&rSBn<~sZTtTOs@lbpKAxx9@0hTz}mP#K8wno4+XAPQR027#|{}@m|h!tY7BN)n6 zN`J)BBYSch);!9YcxJjqxcj_j2eZvZ8xY~|GkTAaNlOAie&QR8Xp<&pDMv(AG?5oD zszVi!HYwiA00-fGC){&27X_YX9%$tbY}yQ$@WT$kErhRzoG>JYHP<^Vy96ux`UqzY zvI?BEg|;1D*q~}SZz^cX_97o83#dF2$007p)ZMrLb{^dmT~-0L{(f(V_^UYd2fk!= zIK^s>xHMivHcHSUnPKQ_u7HUpa_=*V7OG}v3$kXR*B z`V;pG{3Vu}ftcZ`GJ2ZF##XcU@Bg)}=KxgUqij!@$Uqn6I90jwF?drZy>kDbqRZ~} zg9}nDSv}2Oxt~wuRP6bOuSJ1Nb#$f~=ZazGIGH|~@;8re*5QjHg~=#N`g`r_MhzCV zZ^T-~KMpNCs1Oo4+l_D{GG;l0+s;sMn7>Igd)m369L-LaN_g{$TdqT<~g{bfZs z4Z4Z$`t*?N_q-eWz>f8a!80DBkdF7^&{8~KjX*=GMcgnjOy70H3W*@wb$tbG1Z#K9 zB@2ce)RjYd#ZC+F?LIlxG6`5@{Jw+f*C`S5sOR65C#5bOpI`2e(AmEt)!f6x3uqI; zt&kqT!fG#GseGt$mE3D&-RfS8Q8*Y#fyx^c7Ntu_h1Q;MKjvF#X{0(?@MZS}53@{xgtBS|HN%G39}M;8<_);=ytH%F~XMF2uf^}dR6gvB#WnU!i*7bppwVP^P%~rr9sAzifQWl?bNNU*VtK~p zDL%!>Y@#!=E;=48$R)@cag*FBxq+fmUXu<9h06>x;JOtjaHX#*JKtKNYX8AuyT*6W zzkN_HK@V`@80Ds4e?YBpKrEa1`KatpZWA0nD~fsC;KA^wf(lr*pLT+bT*0RYxtCYr zNwBXAJt?UldrSukBOv9WuS1$kbMy+!)OAne>8)waZuie1S-mIIKhc(vV;7%!8 zCduQ!IiS5S5m5^&N%Y9N+Kt4cBg*0KCyfIo-`BjmqC08E!3j&(>bP}a2&_f%p0q#G z40@_(FKY4T12ta4A86pztio3>g#U6diho}J$u!C$pD-=Jq5(-ZD%qJ})A*Ua3Y<8f zG~YTHZBXjetv@5F%YVU+rIt_t19(9x5=cSR?i_^LpLmBGM#!|EJx5BDmJVXihkuB@ z9GPO+VQ5?Lk8JJ(z**rOk}kH<#1qnts_b*xD9q%B@QFw`TFP*1$dOFfw zI*Nr{m@644^QZ@w_8c&l-v{2MGH+=`&}m%N!Ik z(>ySzH$WA87Ke|SD80OBy*RtDL*M0WQC>6J{{XV3yw#Q6)-Q`cFPFaOtj|ksaf!KQ z#FWBMQFJzZA0qXftz_Z&9Q{}#2~;zO)V)y_dQRY_*J-|8*zEnXV+RlpHXG)<8eUXA z;CFwf%vf>WyS;&m$Nh4cX-apn5YHB&yN1c^pYx3=4&aIG<3L9Xr|85m1O%xsJ%Qh~ zYi;y|*TmXvSVsI7!iF$V2R*-p%)ek+<;Udx4bAaLJh-Xtz%ITOz2ot~21S#XUrm*= zBo0d{HCCr!bh5Oc<{GI|virHO7Wq_CLJ5QU^ZlXXA9eR7&Cr8n>xKO%JhtS7_-Uc} zNsFe~#GumQf17VIKkf>Zs=6=yV2+ZUG81jQunwxgo3&%bc}m@vLy>q7h2HNDBBmHG z36W(5nrPOrB`zoew32xdSP0uqDgWb3CJc-tlj{3Y!axph6+-o2HKbccnCsA z60jQeA~+qrg`_N^$?!|Ut3Clcvs7o(aw-j%ejFgyV`EGWq`ij3&0Q5P-3rD=&rP*KR>1O4pd`CFE$?(MdCGb)?M=JHFhNVpVLJJ=6qs77cQ=tB}&FK{FTa;$5a6^J~DHl2z8R>wwPom`}n-^CgX}=4;4&8T18au ztghmm;9OWg=8g;Yucc$wnVFB~t^dvlN8+ea82fwaab{b>AXThE&|SnMgQ9uUEeu`2 zr~hYBny!W39Zi++IBdZ~(02*Ku@cQKbrkZ~GY`R2bT@R0IZ!lzAE#Ix#=Io^ zz?naR6ZlEt%(~yZKhzfKu-zekH~IAGQ(TbCo$}5TOwCQ7T)MeXZRB%hu+h;^C}M_Z z_8Pn2 zOA-nicS(ITowW!PC1m)42Nf>N9?q0B?t*{;cLauBSwg0~A7<}3ZS4&Oo7m@N zkJqWA)U<0r6g>}*&+i@{mwURdm%Fv_^h$o1jj%_f}-Wt%6q{HKCU5?7@6s8}QSYXrO=PDnb zFZfpho`rK92@rZfi#(7jL!9fdP}9jKRBm4V;Hu*?Z&+u1?rIXx5N{*b4{W1~q7c}z zG7sB*Y6ndwk}NN3oP@7~{k*nf7momW{@HGu=#mA_R%MWX+)1?vh1B<|uUpH+=Hd>A zqz28pDaEs#IznFrFT&q8g0+@Xm`z)ep*`W_`_NQ%C%}xTiOn06sQ|svuIx~fR2YIg zBD*X>bI3Y(KQMTT|9ZCpr+rk{(}S9~*&QNTE8JO0^L7nG)kS(gYwg7bi1kKNdFI+) zlQ&!Cs6d?B`TX^uMeu;JeR#SnqI~Y4wRn?k9hULBF$G>>1I3J@_7|9Ni+yJ-@`ugv zfE9q}ZnwTog)_7Y5@nq?Bpa;GQ5|R;Ivq}=spd3`ITwF_^iX6J zJLMDG$!1mGh1(e7zzbjEkQ$JmVF2^DYfTIS7U!%9Ivxt) zF4Jv-wstTid?1jyA*Ji537B$fu&ho}0}kRDw61m?y4f0{OGGzkP^X?ZIENOmSFB(N z)+#vXDi)+-bL@x^fr7rJV_3f#_Z=KSuayvrUvK8PziuqU{~AlYgospw<{-$G+`&=P zmo`dPa*Kr+Vlz&gC|F=qS;eQgl`aDBu3^l^UpZvQ$`K2ByATy=hBWonrTyRs8~gK7 zpUSx4GhrHlLpl*I%iaII1YrW+3A3cM>kyo(X&VVK+ZIJ@J}V&M|C%oi(qvfk^DNWXca-vV4(SL9QuKG>;+J#0))TD zo2=w71>*>#N$uza=gock{9B$$ZIt|8`L}2mHw=mNrSuJD(3x5>ZNBKjg@(_6M*OiQOem+=L5uW1JY)8zEKf_v!$*7dri{jCAF zR9u#o@Zm~$y&&tT0=J`K;t86(*9qS}awa~9tPpX$`n6g}@Mt`6-xLWcI4r2pvCg6+ z`6`x-wrpd6_8^xhh6ol(`ck{b&32v}048Wbm7qLtSW1JUtLkVG0yc*;7N1mo6w>G| zEl3;QZ<=2caOudm^FdgUNB>mbeCwd1u4q0d_H2N3chk<~hGCCCfamq$b(OkaUWkLQ zk@HCoxBei3wr(p8L#v6i2jp*ODlHyFg6NY0@`&uk1JgiKnW@xX#^r}G~f3nZAUes;)#C!f^hu7T%Vw21d$aR-0E zXYuQR4cu%Lp*t0@Wz*DR_yRf!9Ws4;&~j$?)dVil)ry$z%qS;DBr6!(Jq@?o1&$@BAC2Ru zcQgi>(q6>%gk5pMhvU>VSw>T9BDv=WGh%T(%q*anf* zB)jj{K81Ao>dcmR)O+$J4nd{ z+ZYF<=8HmX&5%@Y1MNS%D&ceEM4^wY$7=d|0WrEMn%Y=-Yl%(oHv=D4Nl-^Zt;C~8 zW*&&}OChUoiuAam?~c(V4pE zsRyC^2H(E9z%avr7@LTn-0G6A)Z{x&xwu~m3-2Y5Jqnec(!;7_fd?6b-|Axzb?jHf zs~^qbcpu1712}4;nD~(4=dF3JEKxG90g-19&CTvVQ{-||klMdD?W7YX3N5#Bo=^RH zrtq)jc4wVvI4Z&Rq0heGuvk?hO!$g<+LYlfXZrq#8Bpwnd=M0*U7BFTG*Bjh1qc}_ zYGS2G4(!YwjL>o=1L4xlKyVhQl)TNB-1&m0Ptt?FRbHVWC$E!bhtAj$|@r`na&v^qm+b{>K#IM{@t zJOW3E-{cfn&a(sTtoU-b|D{}-inN1H-%$Jn^20($?4gW;o7TV)nN}QcyOu-W%WWh( zrMN8*SBVMunbQAnY&q)+p+iLJPUkyU9DLnc)nKN#i&es@g2zeET|Dy70&q!j=^Yeu z0W!Xm+ql#oXW6&3L}asU99?2b|7gvb!zJ=hjtj9{p%p`saM#qv8f~X|wp~qUvxy?+ zgsR40ke$X74T zZuGuO_98Jm`!;aQs2FL6B^XckI|YWgoPUDs_&6J!m)>|70hQ~oVee-;6AU6sXeyqj zcVVefZysx0Ko2Cb$8!_Fp~NIBBW2RMZdvxdB>8x+k(r+HsvDklnD2Ici{xSI(%&#U z3SsP696HUwAI=I!v0XkVtDV&bAjan7|9tBIM2K|y<=X|_qz9pD!kpClK*5x`l*R(U zaUNctt9X6R6B&UZGmFNl?jA>5&&9!r?AN4sfwAR4#(hCd-5+KEP~nd9pi0u-SuCs3 z4*@hFW!5aa6J@henN~MUvPF&SOo7YZ0lGscV;oIex&RX;BQO4*e&zZ=1qyUHTqWXW z|7ltM6=%doL$WMF2oaHyNu-Avy4i!%WRZJMOZo-wv)9^rr_ksemqMnHJXstWuah@# zIq?QyQQT!!6gfucvZ{bCHJ?hwK+~i=Qsb#rC_v=mm4{zprz-c)!0LV^xF0u9CBj}^ z(lyBIyxA_R00un3`CF9Jc!sD}!Q$$^q>4*WgjOvfG0mb*?ucE8tgY5r#*5oPh#r2q*qJpOUp}vk4-gL9xSivb#J}Z|kZ`M<$2;eTfeqa(oWEqeHbw z|NcLWh;>TIe*Y9vD|2`HR9zyd*B}Q0Sss3Q-1385*{Jl<1J`jAe5#iPgnc6Z9la<$Za$Dc$Wu z2qx2_Z2lgdaoL_LP1aoN72N4u`Sweqr!r1d&8#9Zv~r70wjFjY-*XMS*gWP9w7U#O z!q)jAg!Q}U^6Zc8hXawzaI9GAAQfc?xLF&u{Pj&b+h!Tq%-P09i_)*vRwy2y%)F{A+@v`Uw3)**q^fWVq@ELYzIIjyA zLY=V7$Ymyv{ja(1i$Pvk9d^%^(%Dt{@qWRdo$9FuB=%V{mp1Oj(?*Z=tc z2K@{^h{P1r>owI5|9$}^-%2h|omA8HG6>6GEclS8p>m%(|DmLi*dH16xS7-mEXT>Y zyi#EPrWaY{%MFmEtrcZrRFQgS#VP4@nj1+2cZ7N*KFsspnk3Sa0~tAYA@;lw}CH@bv|>1YLavHiv&pA z-v7p(X3pYnm%iYRK1#CFkIvzQTTZc2slI*@iBQ{AuAujc z3wk_778gEW_5i>L#aQ)b0AxqG$^Umf+@7cZ|GAh2IgQh~Jv%pKU_S~o0{ZFqX~^y7 z|4CASKBZrc{|d)|I}ceNQLuo8P25f48T-giZu$#Wa`0}v|tXNlY%{92pm$4xwoI0w9u`W@17vBp@@Q7j8l;8#xCzm$B0t0q&g18UE$4djYI(Bqq*G!d}Yz{RF)d! z6HLF9#{Zj&Q;&hvb`n~+rM}2f1;dj;oPIL`YtUJsjtqETP63ed%j^$fC+rBrhi9O< zy>BKItK=z8Plg`t+?5ryw+(qQe>pT}2v8EW0>NoPFh5uCL#J2bCEg6r)mn&J z*`Z{;@ITKAg*PL*g6v0S*tz_z$4R)&QOu$t@wDiQ^UqLDc#9spyJmM`dwh@Yl6Td(vI^gs^3WQ+&fxeRh^#LxACo;XPviw6rgfY^3}< zb~=lU(x!$o2&KIqq@LSCa-I{qfz_o5^DAnWy3cNet$l9E}2CTL7C%!A1OeR%36i~28SZc(sTuMqH(a zhtok!=tTkaV|@0qS(k)}ZERZEfOxHg%0;mlqaU^UkNnEh^2zdG+^)Rs`}x5dm!MV( zg@uvetqRfZ>Wnhj@i<;TD%55YgBF1a3aO!Q*}TS^W?1|Fb8Z{=V{BVi_+={bP4DLX zvONqXnffoyQp{}Ukdt3E`!kNEz5?P8%w8*8&$IDRVmJOPdX6$))ZR}aF(oMwMe{Vr;xf0|9G zQnV$)OMQYm7mNpgLQ$np^ihpR@<_rko~;d#lXx){<2belBW?sVeaK81O#5fFVmqVlqQ$4I2R^Zi@7jr!e(}wSR>f-3 zo{a+}tU3`PXs&Aq`}L*T#!y2>R<3CHW#k5rs(arT#oE2$64L)!;N`}6c0U-rD<0HXAMId2_RXmah6=8up`)0~|0jMOCaPN|RK zgos@$c*`^&vR`$)ztjEdndlkMNrYr;vB8*1?b7O>;7Kqt+jA^Gd&T0uV_Qv`OjFuts3EJ86Q7AYEP zP`8rDo$NtqRD!Uvvt?w(DNGB7a7xA96`jj6S$tC)1cI(#36SrcaTGvPW!J`cWi-w6 zI9@)AxuF_omwB`Ws*zkiv^#JOQ78G3Sx}h4qzw5d3thN_^a1 z&5q3%&L_lRMFyk2yiwxMLrT0Z{CEiJtPR@Eeg;e)ML?j8ng##(&QZRLQ7rnF%+o~n zm@esyW2ZJmCSv@yr*PSL{W{q6WvZ>KiUPQd{jT6h_&#r2xy)^*`V_T39!~hQk@ckx!n15~D4mwmR)|JD_EDDl zOvMbg=L_j8zG&;&rt2aYlZ|>ZFmFKX^7FZlRftfaJIAqtriXWssKWs}-+Z=%0|&Q< zdK@o^ER5|F89tc(_nq=$fXokpXX#V~$6TA<;>hGuNN(E-#e%hF^uvt&wa|6B>XyY4 zJZl;vj(w_3WdPWM55_DWJ8ONF+lnGMXhs0k!0F3$K^I_Sjl&c44Mo?RM$?dxR%86J z;{sS%d-$$d;`oK}pA=Fe5CJ&a?*|prY9ed9&d3{eB2)*A@Aen2x5EwffhSE7by zx9D4iC@Ou#Fc-U>=fm6l`@~o3?2(2c=>lU%$#AD$hQj-2pTYL*LK7?T>f11rf9q9^uChwg90w63cx9)%Lv)iK`eu@S}Wd z^^vbyoO$E|!ziNza2@LZuox{L7uUzd@%1bEk+5^obKS2v6`_Q+BXwP zg$#4LlSq~VYq=d_;cN4cUS|}pT{fRxSzZhr*PYcG6U5A&@-{(5KwfR4R=;c}yF>BZ z#_H>p`-_^$z(y){u%8lrtVS8f0-GS`q@ zef04?#VFBg;)8J8vNGJfDlL>xV4~m0&)4@~#V?3(+N7!6mNS z3rJ2|C0Jld_R{1sOU1Y3hDi)yx*vbbJb4H=X6-uS?bY^x^}1 z$xef4%W>c`qr@?Rh(`&lKM^2W7%(F8HQFtBJ4}4WY%g!w(SH3*U!snIS(K%g6Tv|4 z^yTF(-YqbzfN{{Z?z0Hrl^lXCoe__XW#^q^5rRfm%_Bt0kl=*U5-Zo~ow!MPryvp~ znt!?knfz~(yHie!QPK@MNNuNDYOAeSd?6v}%21XT{wvY{;kVdZ_Qh1vHGm1NKUYL( zKD6Apj|-?h#oUwSnc4Y36G>>t@T?*?uXt4i#2sT@`Q5xb32=!QG;!|QjZGKDCJxv9 z;;pL1(QAc@Y14S#KUU%y7Rf6o0d6HnqhdBws75^?x0a4`A$re1dX6`iT(wJE4ye?be+VVW@s#TL0;x3N3fmT7p{ z;y(_#v>Utj>LdFM6iVrJjWl^JcNZz0^#^-e?D8-GstY2?IvWDCRX&(9PATP>-qqR9X zNC6+&eTP6-8brc$Sw5cr=1X$ItB%J1C{PA{Z^WV^uSOvGxMJlVQvWST`grAz)i?n| z<#GSWZO5sbXea~1RJ;GX{5=p9wnftn*dVm(@hgMj#$l&>k1H(eMdO*?NrM_nI5jC5 zHJf)>(2yD=^|`)J6LY`Rl9c$yDnOGh}Xp+OFX6 zn{2dL4q9+`2q(QULYIFiqWo-;Q$k=?yu9wr+en|qWf8=*E5~tc8Y$B zPt7dJr65*&8Ln1AhJDd^=m|Xtfc|fniS6sHd9z#SHzib+4R&x185FaJ)4fE(2SeHp(yH34fnfB!8Bg9I`AnM0ajcrn@uhQMZ|L#}A)~J;g>A)Z6qL(y7N$ zyPLj(mo91l3}% zr5EgZa_Eye$^w9F&fHt77NmT1TdO#0RQ1s=(Eh~&wOwhC2E4F;sg?V`y8ljIj83CN z-8mCS?A-23Z;(?vPBVffZjqkUFit-e1sRq3XxK>VQl8D+l#zo3LtyK$qO{{HJ=GZz z075{$ze}IVANVhQr>GgW`d*pyN_Zh(z=X=>QYCsT;LV=rA~W-P`yzwK6+1c(t~zgtiqdYG!Gv}ew8Zu1UX5_2DsdfOkG*_gBUkHOu#EJbEfw2nu~;I6ux zE|Ac5nhy1qM}sNdNga?hhxwGgyp~)7g*XIco#$(#>3Ym=?IVOGb9$uB=X*v`U1`)U z#lqu7PF=G>?uXiP8ARHgi&6P&;Lb&+i~q*nJ9w-cjx6cmZ1iE234;$810RZwme2yd z6&hHI1}111if6ODzwHD9lwj0$WpSPHk!$lY_a{BL_@}C(A`|a9yEp)JO!tEWir*64 zW2O2go6)-45bpv9C}uCfE;OR^2XmjyC6ns8`8>;@@ut6dL~Zqm5c|)Hw;e@x1oOyn z^h6Yf;Ush?{1=E3=svA4!ms{zfkx2wf%$*?U$(6xrM728$X;%4pQ?g6lW57@9c+JC^Yh-+D*Q*%832S}6W zZN%5>bSq%HOAj&(5u>^_G+<~ezc7AZspQ%FOE~afhM8~8rol*q-OY53eMGP?y7^2A zw{1lOBrFK9!f!-BawV!yHM>dsvZL;K13ZEwkX*E0k|dFDTWs&lS0a%@3g?AXhNEAu zOm4Gvwy8^RQix(guO15cN^#UXhevO;x*B5PN?lyV_4ku&4DFW5$9)P@?CA zqbd?S*_82h<^an-n&qCb*R9?}h}|Vo_F39gnRa zVy+EYh<4{$sv-Y>Y=YXFD90aYQ%`?WO~EaX$nC#v3c+jBSNE5-uAmZliHF z-6$Bb)5inK>5CzI^7W#r-9`*rPc4}l{+4u7fj_(a3V#BZ&yrV^{+B;XuMo#8`@mJ7 zJ-Q{F`~3DJ6PZ9(D>e1psMf1`qiOm>QP@^O?Pr4p`YX`kt`lrWY)Xa9e=F63S&H<0=ua=t?_r9!cn!(Yx6&d$lh9JW3N? zao@`}WBh(Y+mz3@T8ui zdD1Kf3ZIqC3~k)pkfEGXiTd3m43j!xGbKKxfUCnJiwy~vn0lS+kN8)&sxfh9y+Sm< zn+p{4d+48lWbL+#G4GRY2~l})4UdX zh)TFVxuTp7n5|1x;;#~jZfyL@B@KB?et;`9-67kn8}T+^Il~cih%}~LT5decdH~3v zsD+_kp)ap|v$~~2Rf{?=DS>Zp%EOYR9C0`05w^Px^SKPr!*n)6%oY}g*j}N@F}DZr ziC~S{d+}C3V(icu0LW{gz|!#J4F3lekgUkgZ`afSc1;2>u{_H$AL|z zN6$;Ls%syAcvDQnaksh-Zsf4KQR_ELfS7F2VMZ%(9Ahn!KkxFE3xjW2xLc=c$8u|` zc-?(Ic3B#~vBBBz-^A!ziWJmM!vQn0EM3a%Z>kQ7*H2bMA1L<|^uTA92v$9A*k!V< zX0W$yXgs>eszk`ILbK48*l$<6Sz|kc-s+X&?aaqV2*E%yu%exSKC*7$KRVhk`9(az zD1R4cayq_KOT$-|g>;v}e21F2+gw#QrSjmQTj>=z#?kqEqs_l8*$0ZNA9u|Q(L zez==6NtoX+9H}4P>7L}98!+*tcdHD5C>+1FU?-%emQvMZfGBPgx;43{A_mD;%QD{RG+41;&D zl>vf(T>2vu3nvHR7QEihgF<}il~*P~ZbSarvJbV~;v0K~kH86f2T~m?ZQ0m2lu##p z@nXbJKyEZ%yy^qFxidw;!ll^F#s|;An!K{^A&e&{UA~lCEjq4ppM}M1(P3$_H68pk zP<3crOo)`s!pETsM0HUQHgA}ZAmrV%ec{RtxmSvKonb=!lVMbJpboaPBwJMRgdw1Z zH}LDXkyw7-p!X>{1o=u;wy4*r-6W@(vQ*APCPQdMTEV}M9fU(bXD3${>9REl zzo&m>iu2L-19JOM9NYdn5Nh)97W{{jVv-}fwodU}RR7W(Dr}VIOT^k0mFA5K$Fly? z9;OILWI~DZsbn*ak%rNX_1%5%NU$k~51`39i+qjvBkq_PyrN)EF~_+0L{dc^@C=G^ z9SxYRHtRasd+WUfD(q^fqS#uY;!l(j_&I*^xYF0Z0OpU+aw8XT+gtAf*r>%h1jS)1Y&aa1OWgpih^awa4gfxJA* zUwmF(H@;=Bkr+(LlaN8JLIFgSa`<-&Or{!jU;93gG{kwJ&K7q_O`GBG;Yy-9gl#O0ZsX{i^!`067Pa}1mr|0H$9 zpdwe(x(v=rFT@sj{l8oAYynsspUF$4tijg$_7n6(!vChAoPV3>-r#;CcDQhRpy$Z>$G~=BZ!}THGi>{1*mxd5kDV$R zV%8KqDYz&S|_vr5d60xDcb0i&0yd6>pg{__6 ze>H>l>=B$|CMgAIsMK~0d4eUZ8MrGdeXHsUKbd*CoW4<>9ra9ISWEw?qOU8U*rWh5 zwRhkQvSvMMCB#qQp>wb;*;9M#{;(w>Zb|x7T39;_9B{V( zu@l@iFRp@B47KFoLmJ-b2>u<_0h^&-BVz&Teh1K+?A4>pycMb{-~hEgzA&jh2sQWs zt&ufkIb~LER@}EYcv!upHab!o)Ct(|{!4=_6*F6hPJg+09J8b=S0TQ-1lhNlqB0$hC94i1{S+j6&+=$clSdeKzzFY@Z{`py&pZYPz-bf~Hu_g#sg|aLEcxv8bA!cKy zLN+#N^u;wSxwnq%*+&FO7N~ADNl8pkm-eBbzo0nQ<={Ds9kmd|MVWwW+~Z+>iZ(|l z35$9jdndjIr<4!?UZS`5IBudL$&}@Gn6oi+JUR2lQCZd48u*@zHIDJn!YXg{{U~yugs5 z7xBLHF%HIvDd{rWZ~N50UBPF&oi&yNjRr)3fABGs=HycXWb*#f&XjZr{-P`i2p^7^WuqV^?fZGzhF4*=I{puE11Qm^u+}G7En{MkBbHe3WlTlw6Hs$Zfx50I?cG zi0W&jg#QB4>DjPo6o!(oQR+2^@K{+^ZDr-y6TNX_Di9h@Fm5&`3E^3$HvE~@asBnB ziqX7G7)89A74AhymD(dkT1Cm51zj3`lfRWkGQ6N>aR}o6KaO^Ygi8IN?Pyf0+UG00RR`3o?b=PyUP$+ zKUyn2lH`_yvvSoq#sa8$+#`s1W2Av@$vF*n z`ZN4y^hDdU1CDml1?c`nR(ND@S?oD2bBp+nJ>il67%=)OZDe!F`L~m%9CPkaG0TXA zKMaL2eEs6q>-cmVeRY5=Yaz@@lR%CL)7kCz9~85g&8`!yifGF_QF7)DTxcoO11FJv za^dTg;=(yIU^BUylkH$o^o`n7)g0+famtu+A-^$z{cjo zUz?ET?0&$n1Ei0%S-E_VxOEzw+*YUk3T?l9b0j$$%MGr*52QeVysar_gaW(V4E3X4 zpOdE0e}^91kR{^psK7+?S{Ar;Gq6J=7KqJb-d1q_u{HuS{4=_Hd?*UfTvQ@rZJ#Z} z@<^c&t*^<2ZlmN@3haK5cNmP%Yj(t(GjnS-+j~?>w$b>_r@NLjEWmv(3bE{Sw&FSh z@=P9zM6mF*xt@59m?d7_qS<0h+I(mV>4FQ}o9}=cKqrcN0@4PhBT-YNT@lm@!wh@; zjI-E1Xj`BE*Y5(`2kyd00h61aI>W%w`yNjM{-#GCh9b11xR25OV@cOpV8XsZ8f6|_S0914r(v$g=z-Xa~IBhv*crRT(Ez)6F zBZEp#T2eg&7@C{EXCqp2e**id;wc>()IzC}{T8$#2qt%RPr9-TszE(#xBT&Kj#MunR~v|wqbCUr!!5ZP5_VYD{}1{;GM2lyq% zLfOVnTH1P;IxDD?!On|J#{*=LMx0eJnDQfv0Y2JT=mYO_2<`?6(rXB%L|o!>xd{c! z7$MCg(|Z7C`32c~g24A>6N+Ii5Sl;(Z%~eWn0C_=TdZcK9ZUI!llvky2Tbtc1^>{a zJ4F~WHuVR>hM5FK`59IHZ*wMZEQ4m58xf%Ti(ovfFv%Da?qM69Jk%%qLph6E$9sXM zKS~HL*uH`mQU+R3qGUe;=Q7J|daL{5iJR@M64XA53FXUNHiztIDzS-=Gexoy4*(2uZ=@Jd8eTv~Evp z3>39yckkw{!}B95i(`A~=R`b=7-jPY4*48qa=jD`gRdwDQhc&38X*?aly5b>{4eCP zxbHVUAGSn_(Y|#F=%`J#3uxt&00P=JGlr{S3+CrjO#T~u^GQRPy7o8BDOQp?N3g#{ThQQD7^55UOh{gc$UT-Q`%64%!Djk+>RHd@O0mgHng57c93--1R2dd%esb+$?2SmfOIUvifvH{p#MW(msDYYBK|ps;mq z=}C7@jL)Q%Rp2~r$X3xMVy*tK;ytMq!*BKY#ClDX@udgy0y$_v2}e22_JjJh(&>$6 z72TffT35#d3HQ(=5^2R&zSI4}pbQD4n^$fzarFZRsZN5RuYt zkfoMOI3`(=3o7cPo}APk@#(i=JQS?0f6OClCTO}9siTO@h^lM4Pb%kqR3k_Pf>&r7 zR{f-U+aroKOl@&4jBQ*S)>Bb4X4`&4kN|peH&erA2HQ&~FO4Fm$9yB%*}dI?E>;$(yoMY?of+MxK*w)WoyiKhClpoek}VqV_) z72l1qyau{<&W=IKNl^VaC$I_}WAvD&=J})A$LRV`?$qE%OP+f7s8Rmo2*;s{KF}$L z_vJ=nm89rMuUVYXo)*Lw-_QfL)IF^3BrAwsrmBBUe7U#c)@H|B70gyk5+{l$q2e@G z|9l@-XL@xjGu*))wISO6)L?N;iw*nBtS*7%d5--uw}%_g$s%KGM^^K94zuuKNWkLg)z>eovbC2CZ~jirGOJ!)zkv7z)v}=0Zt;G&emE6!T(RvM zOrMzS41LzWVp6DZRp;YXf`rwNgZVaEegJ7LfMqt2rzACU-*8i##p4$cw2x8{RY{K_ z>1#DstR8w*#i;+VlVP909XE;%PP2H+yzW>^k1&tTt^vWUn3NnxstDWm9)L9Iaz#0zMJ^e-y+C{7veC17YqwAjsa$y+U}HkBHwJHm-B98BN;`zM3HE}D3}g)Guk)2L-PbR*%EqzVT@ zerff3qVcNnGzE#+qd^_J+x`j!*)bx#$W=ir#OkfCUJYf?yv#cGSSiq4;Ul0)z>nuW zZG!3dgme6_m7ZHk9!$Y&&Ez(#(2b%h6L!-&JA&nAAh>vPKxj;TG@1CS6;c~Fsq6o` z2WmU8_y5~;4zeUWCQb8VHsQ9CjlR0R%@;SzuW{Kk`QSvqg9E1Xb|*zRT8IfGR985}+;8v* z)CB00<-|W`oERi1l`vS$-Q%6BDFC*|W(=kvF`HffW7YEN_VP<}2NzQoIQt zgCUwfq`v;K$QmuQpkF?)_0uVLO5$@bv`awu21+6JNjR|FOibJ~c05}J7L2RB@#qF? za|+=ohsQ0;l2WXQLJ1b&b?pK*T+z&uLD`u=SovR?Y|_i|@`DltY2O!Vp2>L5rk*f2 z6?Oye=$AfAwtI?>N^~7Gj|3E7&s~j(#8-oro>Dyo7Z3d*X#yOZs@IKN{$tl_8aTDj zJOmu0#|KZL5;5{IZ4H{9dMA)hIymn=kCAuVm&ib{n5mdn_*MTV`)paqG(_*oe`MV- z)Ugg6h*DnO^|8A17!l>9y>|XV;SwbzHlL115_BepSOEw0Yj{QvN`Mhn?M9~|#2=hh zMh)uxSg-2jy1*k^d?|W?@a+R4v>6U;+ z_jP;`da`-lU2L(D9ECOkqX%YeiQXu|n;7xb3Dy?Xw~Wz<_Ksar+%GuYBe)2rehR50 zzOZbcZgj+SoJ;T1KbyxnaAG<+>p*p& zBiRmAAgbuD)|AZbzZCtT>mB*=G*X0H)5!ur66w9Zp|2u9AChu62!(`fs`*@hVUhhl zcj>NBfg{+W#|U(?Sbb@*u5O=)eI>EVNBRKS7omb4w{#0Trg{qHYqx-4xE(3{q|Emh z4UYg0*PIdvDT?uevUlKkjZYlDci{L$+|J*Rf$)}A|L})nzvR5*CEWAUmMv8s$Cld}i060TJDim&`!?^;iSLM2iCU6+LI+NRIVo!|( z`p!qlS(P;=&2jIQJ1ev*q~$9KwHLS=`HrgX%yr+mQ;VXLcS<|eDQN1KCBqsz`6L<; z{oz{?urhCEch9svLN4Hi4Cmx8@eu%Jn6xWzvqZX1)5K{z?MV8w1Ti0-x9*Qhm7n}| zrfZm#z*zu61mnWk#fx;dXu+`)(IgE;5*vBqR)$NQkH*=Igsja>01-VuGtxSb+hipQ zZx>gqPVECqiYWr9UWA?ytnN(aT|2CAU?FrNcoh%JMo?>9}#FgE6BL4In z=SEjJ2Igay@ltvOFDo6)A@({YtxA#SwOCbd}E&K;FU zq9Z1Z>ncQ-v-`IS1^Az2!UV&4NV94~6l%{v?0JhsuSU}j4BPWryzs8jEscElMOd~VgCl*c+N3XP*F?oWZtBvo2?GR z=6H3HsLU28lDLj?Cm_Mh4o<7NUod9I$Xuza#kGtV5XA9|t8I)!%rB8>p;JjUiv zaJQV8)u9$BAQZURJTOM8+(`@U4y|Svd`_ybA6RJ08aOq@6^sUtC9W6p4)(y1EymopNGFTN^aO&jAG zt06Q=d}R_{m{u$Izzs#gh^|=rut8uDIm!9Y{amQ$srL`Egem0`882Gq8ZVJJ=mgR_ zKlyY!6Or?QlD^9)S9>V;cYvuRU+Mz=zVWaF6|&(h@u)ogFXnF*sPRWsq{OX`gEXT$ z5I^ijl{h7H!0YQ%|ID|SPLhBcM&qpQ^Ru$66lQ-VxiuaaFX4Pt=l(xJe#a3stv#zE z-3u_qHxrb3D9*j=)Rnnhl{jRZ6PK*0;W-ZoCEeB2aa|DgRoDPLNsaU`pY!U{yP_mk z-(W3PzuW9b2Pj#~n-`%jCj{5DYvhM~!*ro*g+8Evho8`aABqhI_l$s41?>wV_5N3S zgU~vvLn!4Iroqw6wXBs3l_uis3$41U|3fSman}{jvhXM;NaFxVt7}GuwhaYMRuvB+ zy9u4z^h88go6?3%TT>cm36|R<1y`uUj(u6}9yJxzw$^mJP^hJ3OpmY%!TQ6<^i*|K@jJ`~aQ6;uPzsXMe9e&U;F!U_HzS%M?~#^>1bPE(M?v z{MET6cii!S$D91Q;#*X5S{zN9T^dXv(kn(G&^s#YU-SR79+& zONFeKUx;_H3932)xT)>_Py5R&5Ol~E-EF#Pm(3^RQdkYz?}sf)bx35dpv?*18<`yp z`WXaFuz=6eM5x<3zGzg7lT%Qa9kSsxweta<3UmzG7!yFI1mmTqt~*ZNym2IGoY80d zU2v4M4#tSV0&JHEJ+A7^Z?nos16-GOS`celC~82^14fG@YaFt$12Cpf`zC#D42 zFX4=w(|0S8rx~}7hF9Rzc$s&+T%1$rh2WE}{ZN~-$?RNbvQYf8B|>=e^mfDV1FtY2 zCRifgSlD03T{!fASckp(*u(9-@)U&q4=t<&!ulvJhc;fKxv0M1M^na8!og@&v1)o1 zUHL^4-V=&@$#$_Atdq-Y-?vLvY`;Yy420zW(-f#|MrR3e=OU9J>Bb@VRep!nEM?(E z7CJ$hCTS!5tZd~B&>&4Kpqd<%HwBF=+_axNQB&13Cs?IvG&DE=5kGx!x5;ir@ zuUMcF5khF)3@TM36BG(S`BQrW_LTJ^1uTOTA~`8=C1VQaYtJSZrELK_z13DcSqAn| z94^v%I!G^Xr~eVa)W9Vw($2C764Z_ZfM45FO+qxJ2f(oNyr*7AW1_9lu|IT|7P8Um zo}rMRJ_6X*579oRT|?*+6sYhYh%>3>5WK9#lzqCOjsXfCFDCaRm|;LL%Pc4Wm~KD4NeG#aC<}Y1 zjoL=da!r~YQwR*SO3dZmy3rxC$Q>i;129_hC=V-;_bj*QIIMj!ej;=!VvSM|r97!9 zZ!o^)+IkZgVkXGR;U>?b6+Qv9<&_v**(jO$?Ov*o+{TFw;{6RwW@1KgBDSHRz|0VJ z3=AbcN2IDELhNx>qs3-Pm-piyDwMlSu0AlVV`kz8{54cw{v|j(4bjP5&O|64eJ^ml zr2|fU57gks7tf~>=;_pZj|s64JNf8#7E(|ZuI>WELGf$(Mr)@;MNZ{lm(dzzn(=71 zR8v&+R&}&TDqS;FN?cF37X}$T^Eq)cZ*Yax3k4$riJbJt-Y&cKNJZ@9^;*j0q(wzW zYd+UPk_sp!OTx&89VrgC9ql-R2tr%#j{skQ_-9QlSaASl-Rw1Km#*(MAiK$PAN#~4 zdNyVwnFrX;>28q~j{k8*WslKDwk}_?0&>tDGn+xAHL#P2$K)xq+dQygH3$V!79I(} zCG0CSHFiM9NY_o7K(ltrMyD9Zl;hJ&ie%P%;}y>IDh5Ok9BQccRO{qBV8D*R89LSr zmN;8KN4eYUFZ)U;P4FjsTfo3=_jSOJ{DWCT$=MnqJZ(kkmynm=%5nrvhQPK3op?B4? zb@*IGojc#=+jD07KQpT0aiJ2MCcl^r#PhTfPRI^;qmG}ix0lkOi8QRO%+j7fiX>G8 zkCG}P93S`Ue}xYp`8+fJ*f~yz(t|z;ZH5?c8)_6k&vTzGAV|oFZpjVnN~9=;En$BX>NhK5*q)@S z(QFgfnxF8zqdoQyASv8P_+$&L-WGL-l9(efb49mSAu*;9??)FF(y`OE+W9&OZT6!~ zn+$fw>6y16p>OLVEd>1UsNr&h%NKLc7|p_FoQ$!NRY~@qAQky{!SM)eM!%GtK9PH5 zMHv+j=H@31B;Sh}Z?fNyKZsj_Bl!t(VOu7DnQZ5U(FDjZqwH(bt&LdJkS<%=$-{2v zUdGyqZuw*wBC3H*v)}uY$6kr=Qzd*JI15hUV6pR%OB%Wa7N&0>>s&^ZfVJiy_ir1a4a=-#GU>Envnsth_?d-`0sVFT;RSpZy5%&7 zYrUSxSY{*X4D4ODx&3e>Non*G_{+Ws=tmeTA?haGAMDNjrow&Oos}xa(gI(%;>r6xIFWx_<1Pg!Cc0J&<|Sd^+l;?io3KuZ3%DO(+meGs~dR1{X|F z=wxMf`g<>!t|D;l6KXx4==Ot;Y9U*8hz>qn!gw3*HDa9aOY=;HEKgC|O_sBF%kN7W zM}L`)#N=gV5LB)e2HBD{(#mpj#n}hoW5`9*r$m=ZNM6BQ7LL_sR;I&zbjQHQC)P$i zCIMs%;CkkI*z1hVfYL>1& z2U1`K+Z)!=&W%2dzt%fHqDYm2i5YN&kxkQsKHM}pir2lCD7QEvbph-|=>mj*s6W~( z-LNW4=qT=+q(ev{UV84Mxnp*i3Z(!mnmS5Go>}Y}>%l<|Ec3ur%^x5cYL=kE+33T9 zcS^LPhXsAWIBM6YJMilvo+$nffk!OZIy}ltBb>Fud_1R@YB|h3#FM%AZSA5gfOpY@ zfh)3!(<;Ek-JUztEF3<*t`tu^l@ujCE)H)Qg<%TGysl(}!+_$?8?`2cN(Mz{I59nq zxsvB-pL!I8J8q$_cz_wrBF zX*V2aO41U*V%=F9N`xEelbo~CiE0VCgWn5_;>oxGYn?o2smt!k0Z*yPxxJum+i4+? z?^N&}bT;?%Mt`rk)qSQ4>w3;MMl8~&%xfPBi_f6Vt(Hv`Rk_(Vb*f7U3hfOa;=HMY z!m`C*n`FQm5HWFdC|IINJToS7!t-6y_2lZuOOr^q$iD&?#x!_I0qYkh&h6Il+;4?! z8JvjW8_oTqVptfA0Hvt82$~?#lfurniqJG|BoAu&%6D39}qjMX*u{=ZxkmOBlgZcz~!16!r`4t1`eB3JH znSR5XaV-;~kc(NWmS)@wdu%r`ozoP^)6u&97AQdYrXbsXYams{u0lY_8Undg zgLv|sG0a9}%+r#*6Oqu>Xu{S_&YBd2iMrISgxZzX-aP+q@w(BgBo?{rvD-E^D6xpM zaTSw+1GFWUc_%huS1~=!3)H($bj_KwJXdQ;1~lhjjwWI4`M||9dYdj>wD<4;Bz45`R?Xrm(Se?;B_` znWaUExzk(w@5^NxH$EABg7-!fFWt^{AzWq5-^fZsdj?FhwtrKLy_;TADDw*zkOj-_ z<>6nw$DfvOmIU%xVkCg;eFaRn`n;LBFuJzYZ@BcQ5ZJSa^Vhj~mN9O|D25<~D#tuP z^@ss}A7~ncV6|B~8T_&YkqG$4$U)t4q!DSq&2@(@T{|zb+hr8s7AKSq3hD;#`9rvY z@hP9M(54n`lVaug2HlcVS$A^0&Qf(=88c zS6qQA*ja=gPbki-DU|C3<%w0I%x=6mRP>aCb8ADtuM|?5r~28T(O|ZbdaJ?nC4F^9 zTrR|3g6L$#ul_yB{svTJ@hE6eyb0==BXq|z3>441@Lm&tY;Ggp6-bR2V}(uLW!V|D zSfmU!(fiXIiP368vUfgbYTboWR5-ux`(1hQF#OsUi&C0m_sbCZTR+%=0B==e9;!mr zCv>7mn(7C7r4>qkJAj)9CrLCi^lEVOWX+JTRHTC>SxiT6+S(0|4|tN6Zx?cTZ=u!I z^|jlOkZLB77-F&9psp<}&}cBo8czY(#Q`aSa|@b6TCyeP$ykr>5FMX@9(%4WT{dS( zXXfN+?rBWp1)_}=KR95#hX;*DKM=4VGED}EZN?j_%rUdnodN1%?P^bFXdu}M*P@#vy$C1@jJsYEpH`yPF{{r&1E&st>L}i@(M{n&TIF_ z66wi|T8=@t#b_tI8q;W2RoE>N+Dw$SWui!k=eJag(H_ISt;MdY&JQ6(d#t}}re;+Z zWtP)&P_N&*RF|%P%)6E04rJO~whzyvf*InQI_&~$5cMBcVV1<;A4c*UTM1ODccZq< z&22fMPQ9NS*>J|Rc;?6~LAFo%XZGP3?sIH1(r-&z~*iU8i4ez;!P z*EG75E1Oh72w-#}6)E0A??CFaE!GY=AS6~7=58eTU$}I7e4u8RIRE#b zHY8!$}@ zmC}IgD`V4v-I!O=z3B}}!#+rH@%noZN@Y!#A&XyaiF(7I$D<-2y&m$jhamUfgQYpLiB>h>EAuru*OclQzJRhK4iH`Mx(i?aQvv12WM^jA;30uSB?DtMa0#z73VZC;o*vhAm7u832Ko!2hQI zNQtbWzrgyUFxni@uMUP`1j&E6;p|H%Ym}w4e(53TM#Sg1&a+IEYl7IC*5)pkTpz0_ z_E!F~E#)x)vzq>yj|i_jav2R$#OfH2=F*w-WJ@UlQGc&2myn+}-f>{m>CqA3abm8~E*&Us4~ zYnBLU6{e57iz=d0vIgi3KiLktOvLLDAgesuzzQX@49pLuD?`4uPBD0;30o_KIH?9;MUx&D`yQYt|D}q~74#Wg$c~{@#~iEe*Bescx{b9G6W8N`z5N&X1DAm8Nw%@Q9N-nt_{38@`aT) z)!%(04EtRKqjNe7a9g=cMOGZUxsQ*(+}%Y4SX#bI=)u8A0$`2QQzoE5yirMWhs?=J z|HT4UrM?yKsaACb+PU?9ooxnzqnP`4Q6P3c5s59}tQ+J7_PwKfoILLLRzaAv+<$tc zGJTa!;d3;dv!&Ya!*Lh%_)1`Uqf>vACj!P}`B#4ZT7d=|4=QycJr@GoZee|?AB=Wa z*ilq%{P#)jCmzXSpZhi$h;{Qh+I$=wiG=txB(fiKKEzSEjZ;M%X9uO#fSfC@rtqcI zf)^Fvn}hNNkUPu9*dDV$F^ZeUEKG~6OXO}Us5{x8#cs&-!EUc#T(zQQ$p9CUkt%CU zKW1iSFwxonJ-xcR(}h$%QB_WPb)3*l~1dRS)7CYc{w7ZpohFX!Gie(Nbbf zE1>)1wQ0C!f~v8;o;Uu)vCACYr>&-;N~y5{d4ClzsyGCc3^4BoF!$VP*>7XWOhZJm zsmbMC&ts&z+ej}m_r?6{a=wdg>YOGCgGI60w@-u%3Ms-P(2(I3>|VcE$n30N_zl62v>>c~f^;lO9O6uy(uDyj#VGwER=s6{&y>PsC>lGlUa@U#vS`r4(Wt zVC=BvEZ;x5^`4~aA3}_s?s1~x%xFhV?fL~oHvKbeJ(b1s1@tu!pa4!G(M z&8gJyZy#83g-3w3`a-NM>G6!pe3C4R`{#EXi70MOezHEJkLgE;&`RLDM73R={-vnD zIz_z#>!WcE?^lv4JhlI)wi1{o!>P)|X%3EAUvY+GTOGJj8rD0nlP1^f`G9YBJcf{q zfLa7kq>Hh5T3}U6AGyO4i|m7EY$yB~;FBH_0{qteV)P~L0o}QJ z6s~^1(tPZ5{SvU3N}?tc0d#%zAjT+k5m>T2pre`gGC zA$Mc^w=*;F;%?Q9rAY{bR$)-Mg{M4(yIABmG0;amD{|8$c*yaJYwSxZv*!Z9nq`73 zuEF&$Lm|R%Js}mMDw^P(UP6*-;HToQC7^4EHndNcq*@a3bbMOEq9Lj? z09@Z6v0#ylKe1hl#)>7hF1%GT-Tb`kNa_&zY-faEOb|?)ll|Nn_b4?~TtxDwm^~xF zDg0*edMwAtUdOL<;ib2+z^`X7no*$;9hejgm5Blh^NT}8zmmd4s9h>Sd7A=VFF%L& zIFW45JkWbDf4Rv9l{L`Bx=s60=(d^SYh{>oOxNQMOF00hcEq^-2X=_6RhZ2+ag!h3 zCIeK0MW=`QGuQ)Le2#^G>~z}S*F+h@-)A)Gmd9wy;Zx+SKPs6wu0%xrG5vrn6RKwV zz`c7CLw%Uw?#v?*VW-SaJ1J$hyy1i>XV+2@y!sLkmP~s6(}W(vquoxeaJf4fNCucd z)#hYdB=hfW&nv^NGf1(^{CYfnU=?B0cwnA>hwF}OsBWmA&ma}xeYU)g|OY1fw+cL=;(XX+PeFFNq6S z$Opu3yw)YakSP#QHagwsI*R+<@+N%PB-{H5;NcVZ$RIRevOI4`Oy#$|Ip;9sT8km} zOYzFlz4XB)Z1XhKVIcO%jh_eh@ZkWOKVdbK2j^cLCd)nd#1J{5s#J8Y5@5w}P%)116<1F?>^$>2R0Id= z;hS(>+S=4m=-<{1{Ro&ivN*_%nK5EROGfu#%DQpW+w4F)rcRIdSLTK-`j!1N&!NA5 zIUYX3%^i{xFaXU{47jT}%N;_$M^cxe<7vpqdtoM{=afTdOBV+kN}YejBm-ab-|N!% zDPw4HLF}L*q8oFXxj^w+O^H>L(54g15uQJ?xwgV0-+75ua+ZeoQSB%HOVy~+)2$4G zf8A%NP|7YuvDAjTL!Q+yG>;(et!ws=pJI?Sg5vzXco7N~XF!T3SA!?IhX#u+N_283 zf8?Tj3eB~OUT8BszriPgFjnaJy1H6&vSTgm7IP>NPeG5V-EM|m4tFFSdABe3;m$$^ z>_+pz=q>={u|j|Ee4f1_2xLasH@2*St}$yiz>s5K4N4cAnCTq9s!4D51@Eoq$7#!U zcFmoD#Dd{YZ@i$Ad!&9P@{%&8j0?hccrdi*D3#wmG~gb)>nrzEX2WEN+Hzs^Aygxf zHaR_qzdf_%B~;A}Ci<}jv|tAdpQCPnQ!AWnS`~Wp21?IpYiD(nY1pm2VFQg0_Y%P+ znxkD$vd*T8*x!tgI2+j^3OG-JeDX5~v@oaycJsW45qa|aTv5`oz?`F(q5;OE$ z?rR)c1Pto$wA3M)lvuty6-An&yu5>?mJ~gA{P0dox%r*DDk=$sQMzKw=BY!;dAaSLx6!*0%EFU}>!1eR zf$t9BVV714n6|%MdisK~nd;%zgArZ;P(ZK0 z*x-wKY|@-@^G?2r#m?5I1Pt>rBCNo(z#w7f0W=OGL`YW88MoN~ zoSl+H*={T-Jre3N(9L!9)(2L}{i^Sr&}K;0z5bdH=Z^^$=dzjIX5Knb|KnzgK-Q$% zdSWm1h@y+Xk7|=LO9)qR zS;uxzh9G$g0{1UVkt+(aZnKu9Sa{eu%{yQ03~qSZz3K&e?gbegQJ%~5C1I?)1mOiw z=7+l{_<+#XSh2T8h@`yhfN6@Einp1b(Vg+Idgwq#xvCodh@5b*uFkDNX>y}EXqz|M zP3+dD5HCD=u|KDnN&T3jJw}h74R#q0SxvSb=pXgw*sK3=%5E1#S|hIusD)&(hgQ3d z$XpIe#mLV%ls!8T_nNonV4cH5z<)bmMC7b1ag+Iv>Knb|jUuoi^o}0+ zPDhGeA~?fGq5PZnSOlN1^q`C9o5P`G>j4cNGc)4z&ZlWiX?$1(MpM)IQKZJNlT! zNUtY~qVW%)!8V}i&whDGOh)U|g_BmHOWt!>P)p0=6`fcIel))|n^J1TO*tDmboKlq z0q3mvI-L+2>e=oaS>-GX{77J==qgNep+SvC<=^ZyV3Q4-H5Byi;OgP$qH!vE^(9JWMJfCO2#ZQHhO+qP}nb<4JG+qP}np6Qv>oA(cS za+4Vm&p90D-;h#4f@?sH$Iq5L(BDj}Klu-Jg55Kk@MnJX6V>;7mg%Sy-Au?OotTTR zcZx3r#<*HQaET=?Jq;`Vg7REvaOCjIPPkYJ-NWFgsuTST!hmH0S_rRA&R#~G1RO4G z`+A38labJYh^d^}B)5U6Cw0$$0)oXFy-Whc|@G%e)xk2C6O~kbHS6k*GIF zzIWeFe&CgsMmh`d?V$sr-Liz!z9Nyy3)kvLZ+1}F^+P5*hZv2k_HM5y#|!^TLha5( zH|&37qeF3XSRtUC%$z^}$owAwwH$PYx6v%!3M#9t&k&30CVd6vAY+G5Ht&x3WXwr# zw?~$Jf_=lLqeKZg<+0T83PMM48}xSNj(aFjxa@mGhCJ`)fG_CItAfG$K*|VPyWB;* zO7zgD6km$r(oDqOR)O=Zrhkxm5(la+`Yq}bLfsJbL$K(CGb>CYQer6}%f)>BYVU~Z zNGxAuV@qv^v6ZwnQ;-K?%DBN51B^ByoU+Bn70DnlP|AOFtO$vKrA$Qno_V639}vjf zUr`!#i!bqzyBZd2gELSCgq1j=IU&&ploJCe>(5?UM~BlkJ8>w(Dx_R9H7SLunv{7b zZ56L5`~PG0;ETqj7Yeo5IrHFn#c1LTh(YIrLZ_Kwqyo5}x%nt^_pBT5XVioKTFf@u zs~NZy5GBreTM1M56Gjv_8z|0o!x-~7w-Q^@{F1}f1vJ~-TQ)MbEnc!_uJZKFwV&;!0!^n0?2tAG_wTj@h4dUp1z4tUTrw4 z!zSVOTB)a;kEE~O5=O70;D>r82YL2NBgy8~io4>xCG^8zeX?JuJ?n`n%hjsX=XkmM zFh9Wm#W(-5MXO=#3O>Y17#7p@ij^ieMCdNc8+CgiZ5v*-%HV3CYNM1#d|8g7L+H>7 zZGRBABKeSbqhFBkeHAz8!k7U$4oV`EXlnJ4l z^Q?8a`j3-@9w1NP!L^ofy8;4ANaR^S9rE*}rL-O}HJ3^zV_ob_NGZ!U1y03&EPHo7 zUWoD>4+Wvc&d}asC>{Aae)EhdDW^q3+ce&_*~K0_qBVSFG*T)Fe{?w)^iRd; zg*(~-k#bU;+GHjZ=s;hM-wnIinJ#ttu`9i$j$xahyKmCLH7U&L5IldNzX*+tU8E5O z^Q6YTsETPe+w**YGqz`chz;ZW6Z_yUdgE!U!1mwG9J*s)ls16hZidE}++^_kQvU6h zWq~ib@nwK#bI#{?41_HHloloGi-kdq;hY>@zY&4;vWhV`$2l^95ILIthd&;PN$t z&x%Ui|F0;wTEcgOl<_1Nb<8i#5LQN?kF8#|+=@iji5EYX${(fcG7$ z>*mNAqZ@Pg7}S@KAb_Io6hu@2v%a?wW*{QPpa|RcYdTitlP&8(BOB~_<&|p$<2p)aC0U| z4Hx9YFtLUy*M5O~Z9A}H%<4NQsMqRZ&eKBH88ee%qy^3~*hzcZ`t{_$LTv_Stziao zEhcDI={eZxwY7W+t4tB^Z+J$-kXF`{AVlFQ1=x2n=}K_U0yB97!SYOflj}LABsnT> z4YM|}i*OeT)gp~y>YEHL*Lv(8DRU6Rh~Ud2E8 z(EpIP2*jXcj)$lN)4#cfJ{J_NOT`?y2>W7fG?wE8hWJ3CRdwyzmQbGTiBu{S1RaP3 zhBsA8cYtj>#{e|zuQgr+^&=T&wIK4RB=Rr2MZ6W-9IZ34QCFDl9AoWARM)4eY{j{8 zN^rwTIHrd1ICti4wTGOE&_DsKmMuAopt+j4w6hyh~uO;s#19YY!34 zqb9TiMFikh)U|5|KMQ1u^Ol9InEBPwfzwQ~p|up^A)&VA!OQ$rKA9#d1H6zFhSI7mp@P>J8K+TQ3NWhXRmtNlZG|+E8pLMju}&hLpibh)U;A zAdGtvDu(}^Jae8YIT%B0tVN&iPt79A;O92L3Ke~VO7djNh)%0t0G9xv;agVtr*eM) zOgLB6%AhA2RJJOPg&#JbYqFlH^gE9Nh%fP>t%%o$zgW;5Vitv*?X2qNNk(HRQvAlr z-iyBJgQYk?&f?nr9$n?X=F`HDwp`R~qBpHCCon!l#tO%0TCV{Lp%tD8Bvq6hu?w5* zk1j?3?_XW3F~T$ch6dRtqo9cf_FlX5rX@D5=ZW8~2kJg;DB`FOlq~-{TQSS$=$XG2 zBs*ROhIhd4^4&I!=|8%#k0hmhP8(&M86AuoZYzJjV^%LAR9Dh~Hh|;KW1LUq0rjV6 zzH*J3v6PaWoH8)LSa_ig5UEhU4p~)RwWv;20FiA|&WL3E)6{Dlj|42RP-) z?i+gyXm!mV_N#$_eTxlWsSYBtIg{aEDnFr-TN?-L*BQqu$4ST1PdK=mZF)&TKT{Ef zatL>8eFGX_V_6y1sJPEO)kucVtVX4^vFP}fwU8_YQt)HCrfbUZa>Zq5X zb?;3xlJ;<5_iY7j_h1huqtY_B>N+val)ichZeegfQg~9$8WPK@W*%&u`|V66 zzae3!rRU#P2?+Of$;fD9cZW&3Z%mHNccpst$N~%82`SZ z&XcH2louTe2P3?*BMi8TE^=@UNs5}cW@&>BK~uQT!n_!=&0rNkG_;F$Silmk`!<_y zyi^6e9W#gLR(RC!`kdXT%vTgEO>C_KX_I6FHOk7H0j?~NBSGg+vX(N3?xB&)bzsek z4`dnI_y_wA#3s-WTbM+oApqDy5hClJZhWp}JrY4>s7tUUQ_deVp!1>O)LD-Y&?-Tb zYA|p@UGcj(ch)||LvY!GW7Fas+K2_nWPB(^mh?+odd#`cx4|pcg>Ke-rNJ75+?)1i zN)}Ct+vOyI#uOs{B-+xY2Yl79W>KA{-o1ALpE2yJ6D~+p9KMWo+Q@OEch~E6!B^kz zK7XZDTsf(w@&C0f;Q)oL>=Empr@A0ab&@#0gk`K0CMV@egtxGJRA14co}!m18`7n} z^qD;D*i}~D(!}(_fR4kiXC7a*8%;Hmd_CgZI7ta>XBav~?y!6~r(qlrI-R+rsjB4u z)8ddB06+-EP;ZJ5<>ec_i!n*sV!(?@UoPM8<`@O(HbuY!u_9hZRbMJSd^X&kNQ2wb z5|#`EJrYPGjFOqxLEM4#>uTT3lNdjMZ@cHH=^m1pX0D6CHqNb_Z_dF3s25uIK#?lF zka-Pz)H%#X!^QnwNh#1KJ80b3CfLP)nQV$g$tsVH@wLUwO0AI$Ie7GZdnu;Y*L`T! zypC7BJ7^lP3S_C`Xld%1Xhq8Y*(US6sK;UB-?)ivPs}taTbI!;ijRTp7xW*BF$M#{ zqMw^2VuqIUZO=J7u^R74-J!<*r1@$F)&o7JLfO4 z#7uW81JZ1AkOktOUha@?xVRIYpF;D#ico(zk;G;B)(-_F)bSU#W4nvrLiFTLBIf|+ z-WXz@P6vNVE6Dom6n-WWU6bD7{L7u3PS4yiOyp&p$0)_mSu^lTG-ou}@zyFc8{Siv z2Km>p8nptM5Qvc3X*`g3KnAA7%%Jieq(wNt$3gmlh$BMf^U{uJgJ!Mz2RG3?YOoSO zI8*d5pp$IctVPJ3QSh{*G)NWSR)bbYt7RVV-qBbL{ zUmhBflZn$k4Ti7XjKo2(oONbd<>3i;G|0ut03h)5IsIn^(`m^Ey1m3h6vNzpP+t}` zg*krzQVH_!WIDObz$wUo`H9TUIl@^lEe9j|EUdTG54G`{QpaaMlt9g{cG9Fi(;C&b zy8ROk%RCL-cDI;`wbAWJ=uw1qH!kl=p*`aGq1z^(mvj4gmymR)tvi5n+NLJQ&y`NNHIS}Vw1w6#~q5>tQh zG{vkAz}kX=-^c7HoxZV^lfTtI9Yvz8i}x=hjB?a20osBD>z5Un>yp2#OTZ6m zT3P@3qv`ioE3rXmz#VU=qaIjQTa)>N2;@{JRRGdL9;WwdHn*FK^9)dp=zPfJ)9b+{ zWD$xu(B5*2(zxnc}GwRG`V z;)EYqx$J-=%r%;G9~y(?w2|?D*oWYy8isAtOQpoznQpLTkJ~53r@n0sc=tsXi*RU>m z2LPTSZ~)~w(>+?*A6?R^rJ~_Q9Zy1J8yKG8kvJfg5x?gg znjC9@;R0j!-x!*8Ut+DZt`e1Y`}j1<;j^#sUJ=Q7&8z+4_?-PKwxOC_N&UZ1e?1mt zpXo~@Rgv8P{7{ZCuH`Dj8O%jmf#WmUCSP~gWUVBO@xX(CJF_~?d1tXRsn-Sj-g|Yt z6YCYFCiHJ~xz z!$bYEC;X^VgwcCv+a+v79mY7;3&z$pC^f&+U_P%T$145HJu4ix{qzPf5(z>nu8dJt z%6;7=8CTn5<1blydFsDtJVBwUmcueTKY6j2QPZ%D>`MG#OIUZ_9T&d@y|bDlC4QP6d_(RF|21yn z2IuIp)bI!(p!#mJ+ASKWyWL-mn7x_6Lwyp6*6X*AGL;a@lk-ysJF*GOvme#ohQErg9m9dfl3I<>JV zc^Z`%3%`7UhS!a|k%>K?5Yt&0@yDu&`2&(*iy1MJ!T(%L?Kf0@s!$ntBXGC4s$G@d zYN*ZHfJX&)a1Dw^cJRq+keKtWIau4Red?BhgBXF9pBtU3)8`7d#nA&uH!tq3SERZ^ z-H?%W4lhNf#HkuizrqQD@fNoHD@LNm3;_B74vd@a;XEqX;Y*&wo7=H>cBgm#a&dq0 zVifP6i$eImiU1JQiuto_uM#;xGi8vLE0hMAM*>XnVh9Krjt_bC;QWGyh|rR{FnUxq ze$EX$R~5YSUyIbHWMGu{eeUyf)c!TWLf*B);8(5-E#L5+DiRmErquqPKSszcP+OrV z4-|=w<5=Y8*NXZ-XPx7}LU?bqAqODP@LbdgwFNrN!Jy%Ehsun?lZ?jHMnRT69kNlw zev<7;cxRm?HCq23Yea1s8{4#&y;a1I19FWY>htEGx2>ACyr|2AHCVXDUSbw9s-rzf z8u286ZCANM2m`Bvm1`l8>c+2h?Aw47$$V<{PV!@~XJmb0-|7&(6C0|mOOEp~)5t@O zRKa4R+JA6!=YdFY*Yt7}QwXbLby9tuD8I(5XnuIC9{$s7q|i~LCU?n~#Xn;hGzU73 z_XHuy{VaAW(l`&Aq3lZ0ZgW-J#!x*x_ItN=?A&GII+CH48*wd#xPUy{sP=^ore_9) zC0y1S^wW6o>DG)eh89GJbh6uFIi%;GDu~hdJ^%b)bv;J4CPTC(WXx*eu_aw*HWHjz z!lR`E3_dzUe5;@zDz11_L~gABW5>6x=O~9^ao2I$?eWnY&H$Z`!m-Tq$QUL4S!4(-b-!pGLbZn&0028DQ7<48b@itjUoCnF^DYNput%5%qujKydT)%>jq4u^+D%|Zi>U8K~!b*fARR)_m zh$8h-o86Kb*#tkNl)~h1;B<;+9Iqdo;N>e~;^X(;x>GGECna`*K!?5f!-*c~4lIcE zA#Z{TtOHR}Dx;df%4>B~K>F^)=eM?X;Kdc5)P8DG0k%zI7qF;{$RW}TF#C*NeOaMb z+9s^G{NqwuHs9#x{FZC#|JwOFpDF>Su)e(RAC6_YUQDG}v~upIt4E?RnX&z~T`yhs zn6}%Jj=^-!*;5`It`BfG{th| z1pVZJ-9`dHY@iBV>3K;$Q+!K$0qo4IGn(itOIZXMMHg>X?%i6M36dUy>OD6|(g`a= zbI_@|n=8(FQm26-YsHI!cMhytVAWdC2gw2`lqE)l8D0j{!vDb$sxk4ncE3tZI7`!L zTzq-{DkA*x0P(OLAnV$a7ADR|L-6`O&R82mU~+Q3tN!&&c}4S!>O)8lsf9o!>N8~m zUj$94dOc{SMVW1p#8Ivncbl63wnWs>LR z(M&=To-BpZixt_Z-O@|xzGt89S=|i)>y5EmEqfV+GxF;@vA`en!ZFX*?hI}M=L;o< zzg(N9`Z(|pSz4r*MLEz5G3pee$Hi90NZKHl)>QTjEu?F+J`(_{WFV%F>3XMB2X>r8 z>{o~s9dyQ&66;Up&~nE@Zk;@e%vFsFsQ6O+m%+^^3F$oFKE!#VjW;#(1rRooU11M# zRn6xjPA%KR18St%-&a*h;31VI3*^~CxgoXemEEGl^Zg-D^G2OZ8UAvfWW;4Uc_bIP zuer)DQTF_b2u%f`dFtTWC1*ys58E^`4#D=X4$elo1w9=0%@CEFL164;oup=F8GqZ& zD4xv)4U88KN;h8OA-Ea$WuY#jFYRnsLFN_5(Qv+!$FMp?4392#0^e`@i|<+|Pg@4} z;TfT!xyRf;*%cUG{cC_JpLmJ8Wp(0W?_oc%TTnb=MF^j)I+EuU1f@}5#(qvt(xQDC z%+gdf8Rzl4i~D3UMO)=XUMM_4Q5F`R*A}}a(173U|E?Np2&9?eLBqF{yFJuS3ef=y za`@9Cth7#EbCa?*5^?>`|j%J)vCJ@`s2{+5fRe2H1a^H1N z_;!Y6ESei)9x`x4M@uXChIP~kQdAqS6*yWz+B$F6XVcLE$$s^hBnb1VF#!8! zRU+!cOoUR5P7mk|*(=QUXJGu#LI7ZkMA;*xP05FNuH2I_fNwl9Pvyd>Ioy?M^>~5^ z2v(zo{N_KF%$X^b*NXNAr&JA!-q38CR`=}Q!YA7~AD~jI&#lk>==4k&-?f>My5cK4 zfB7#8$%OgkHb5b09L*X$ap=59u)?$5FAm^z^JKA88~gt2brY7!))@ab;9??W%uW4Y z5_78JRiBxiZXnP7~=3+Kwz6hkD;`<6^dLU=PE%lvUSePC*$O>>&oEWXRb zA#(ym@xGU(@jt>#8blO#m4F#AdG=KxU$`Y)sYg@fC3%3%OsZ$~kzQ~|l}hHXa?TKI z7x>M@qScrZ)&OSo8SYa;@P#zbi;S>NXv0hCIrQkxz(5QI^@3GZs+}OJ<>CS#tW$BE;l2$Y^2~da1lkh z8Xd#pd|HLeqVI|JCCzWu$#BHzJOlU+EEc`m3Y2cOqlw0+uO59Opja(?a0pqcg*V>h zn&5P#l(i*#_3ov%1UNu!!WD3XB|7l4XycvgS^=wG%t&j250xQ_v3b!h~Aoif>A0w3uwO=Axl3%b8Z$n9CuXSvcBq@;X3ALINa&> z)?^|4!>YNpwZUJ|b~teU3+@RM8eZ`NbVP8m6tMt8E#v?%SS)BR3pz|;MYRDruywzp zVB}v;e~l+!Ig}4v_M0b?qf%To35_P}W>y+821L`ZG`n?VML+k-A?`D(q00g0k^;PN z#Mg%Brw~CZWstPiW8WBElXD_HZVwTF)fzs)7b(S&j1}b~6sR1+VSIFDtQqi$eN=busbZ{U1-=Z8bee_(C|isyj1wc+Da7pI!1-wSmE z7NbKwJaK)s>-oUXp(_l{M8`v9EitLggp}=NCztNZMMDp41$_>F6J>{-Q{)+gjBHVh z6GBmRg^>8jqgRPZ*@EWu;fb`kyDiosEiLZOl2HGnLYtB=c6&rN@z?FB=;vN?h)ky> zh4^bl2`(r8^6NvIL%9NpE1LU1-3oxB2Xahy<<5amz;XxzFmXI)ejCKHEK~^!FZjls zO&oE%VQV{G%Sd(%Mt&LqC0Yn~#;r4*C+9n>{Rc?rnW6 zD4WDbrNQw9II7MC$f$o`F_x37E1_x^Sk)S)cJLG=y{?FfokO;%ZB-7tnj4RZQ5y1e zjOSr9c~vWn{p>!#@1?DdjsCvTXOGosA9>oFCdFKvVP0B<@1t5@I7k&ii_2Y9tq?Eo zo{Y}e?+g?5R&@Mw1YV^tIN?D|6sGe%Tf?%el9PR$1laF&Z^PNkE!K%u`C~R$+qN*b z{|w1U1>Md}Bw9zDMSLN^m;%EvjslMQg7t&FEvX-&&;Xq~v_N6G6?Ne)>lH5;9`twV zZ7JdEpk^V=DYFF|5%YoQyjxu?=G*Q?5yqw^K2N5@umib5?co8v8J~WKUjF48XkV-! z;FWbxyzQxr{Vilk$wP1pEQ%TCq4!*as;0Ze%&8n2A5q*U1blqpy+(SsoAfX=aUkba z&-b;L7D^AAds?v^Byu_^XfH98IJ4YE!kAk#gH=&e!PU~;*H=1P;tzXtV@Ke5nTUz| zEShp}D<{`|3}_BA>Ej)E6h72DYmp)&ra6p-}#lgg^Cb~i`c{6#1{KZ zs^~7C;6c#%?ovm0cP`5{+bUVZN4#uU>ijwyIAdKpl|}}8b#;uvZUZL?Oe1cabMcn$ zCM1ItFjrbDn*A*hEGpgnuGVD46-OsNoD&pq)2NqEG399jZq|CBM;mxfN!uA3nwZPu z7a_9u7#@b+aj0M^OJsiFGnfitmUM>+^`dHj2t`{fUi?iz44-Jo>W2=04LFw}&gxJO zGEn4~3RkhRJHcT8L0l2-CO52>LIm7S2Ar}6XJCs>hf>BZmu3;yDOf7N+VKi`9BoT) z^Gndgu=CbJajvc@fizNd9$eE&VF-4rl4^{y-svI*xv?M1Z*ov-Gqu#MpkeIsB-%1n3PY%onY6eArtE_i=+) zqVMWUEkD=a7yW^t@3FdW&!ddlI7A_}eO(otGCV1I`Q!4)aT{v1p}i}ebIxd3wfcpq zto~2eYhL#u4j5ybE6T^As}=%X!IffwuiIH*AwQ}5?|Yso&N`to?%{>$;I+VC%1K2v z>|zK0QrTSZ$-vC$3-nE;(T#FZ9qN06 z{~IacGKmtfL#`=wsodQ3ERExM9gb$E9*#wtju4toIp!O^4ybkId07O5R#IS=*b{?v z=(p%IO_eWGGu*4gEjs3^`!lot3OmCe>yI`5?_34z81^1fCDtOuAx!0tRbgA z6FJ-WF3)`QTskij#C>rX#1pw91XI&oH?#CVU)9^K1j*0^F&F z;JT1VlSmB}{Zo2R;14ztd!+JBQ+^NFC`x2{)O=ieiCVtLlDB z4%haCQcdT~yT{t}F!o~}u>LC6c%ulYqfxT6Q3at?H63F`V>j)4lb**lCEos8%2bP# z@n}_;8Iz)F2iH8{t0CS?pIwPxJbnJaX9ZOHftAyE2wYBla;kT+6GCFz23VsqtfhN^2%04=uLwIRu$s6g6!fp$V}YetE~F8 z6(Aa44gikck`VpHUD2yTd_zQ{x|94k4`^Jkw~&8{QYH*&g{2u+!wn5~Rm&s1T<%G} z2JRLo7Ky-uTzqY41hTeM@6HwuaHO-h9opAAp0V*I9A}^`c=0c^z^HVi-C8v?v}%j} z&*`F(@1LEM$IXsXJve1&20^T|&Ncy6_W7xHm<_0yd56*mBQ-UKMvA)FYwrj-VIG?E_qy4{?EPo{(}xBMO%!Gm8$2aG^`_T z@L`svt}P09tk>BxZo$4~PIwtVmsfjD zN+%8Sxh6!2t>>n+EIfOWhw)Kcfpt+<5sxC5CGTsK_x4!M4?uwyni(%-4)7fiK_7eM zwYEV4Lq1R~vVw3(lEU(H2xiiPc1sk^ZO4hE9c#4R@to}g{@NVF4cKL#L(f>w-DKZk z0U}V5!=OwqEHtC(kqZU6K;DYb=ShU%FQawFVFKIPvPp&bescyh1@&N!X8vYE#o+wU zYoM^^4n#}O501U=IH_-n^c*W2X%W!u6C5!TG6h@+pU9xq=5JXD%lc^@AD;F|83uNa z3>j3&ESSuRqsI_}N&#Vdpq!ESE+4a8GVox1AJ7=Box@m}%XT(fIzKKDipwpMeCM`v zMTFhl8N01NN7AH{&g1lneWCy@>i0}^@8eiFxDdqw_GKW8|+ zNnW$=qg(pO(z9flc`|t)=GWU6k*I1Pqdg=HHX4pJSG3)?^oA1*JuCx}gB1=Pa{;e~ zHX!^vA@BJjnx*@ID?X7+K36XWu)pYTZR?8iQkO|ueSYmAG$V~3>`9iSGNz7-mJi1Q zz4BQki)Kg0R3(*x<l#AU?((&K8rB0>Zd8ty4=|7hWS+6@`^1rGq`%z_e>C4vkw^1vAc;k4 z3Aw0*AWTy8Dj!bkZhBnOi(?wL*-+V8pjMXg)&w6S*m^qdX!n$G_?bHml89J2g83E@ zq=%TK)vUi*X2VSSOJ28-kSwpaS0WLnQ+Bm3o`8jvn41*zoE&w0#wD zJ#Kclny=JH9wmC<{!8pn==%LRw(fvXWAj`7t}Z52oyb-^a5y8=oJ`T%%CXdSE>Pofn z6Zy#q-!h`;q|gN`*-xjP0fA&R8rFNon{NaaVST#Ma1)){$(xKu2fE&0kU`5qOO4-$ zz1dR~agh_K`DSLbPbB@829!zJJCFnSWW-z-hY?Dif==<0RRabkpMCK$m(;(f-B!Lt z`b>HHet}$>P)9`lA4$w&*FM4s*dskXk5yOQEqvx8%y6ZSQC$$j%MdyP4JsRjv}=X_ z=#iA)L^4YWX@s0abH`n8t_+Ap==OA1OHerTJ48kaNEfPTma149H9uImNr#CJfrkgx$t zHcN!e>!tK;4v%fMjtplNVSDEj_;i}-cTOTS$N+73EH?=5Gk@Tc00a#E z6(nhi`yUopDSISm(wl{?*#|-jpyHDn3ZpO5sin$kR-%c8E}9zYac@AXAZX>@vN+w) z=og;b==ykF&G9UkU(iBoHBfNu1^La|68yBZ7R~_}$BHmD$}8wS4Yn?`I4;NSJ2C-p zMCOK;V8eBmbtwWnk5C`ZN>tRQC@GlEElr!>=i=*-p4pI2alSKyuSf*vwHHQn%>;0D zPiTMG2||GCf-qbdEgnpP7FjPRxNmX4s{Wc6Zrj7S+dt6?Cj^(md8L25S$>3bH!#8{ zhXd(7J(mrXr)^3ge_KQVP&hy012#E!Q6fxb<~X54Q`c2Re;=!!F#MUtb%}0Lk1#XL zW7cX31RHW8i^A&v=-tm%@-%LdX2FqqYoG8V=go0v;;PZ&4&qtgXCWz6@1Lkvz*s|M zkH2m&nU$VQ81l&4t+H^LxwkhS7y4?rrRmv+2N*YMHAUTzf{?&LCw6Wo_&RGnf67+* z>!ynYg&%_4+SAA^_UixaGOPM;5(WGv;wP}jq35OOyTe%Ux9|rJcpcn=as)P;D2)&k(=c#2_kcSs`_tH0 z4quHK%7vsKU$V`6g?}up<4!pswHyOnd`wUm(T`VP>IK;F5Jc;Fla;ZD z5q+jwDR>He48Pg0S4c$ytvyyAnVxekac=QJ8__*OvXw%br5;D-NZrKA0{&Kqy68KX z&uNrS`Xi+dUR3G~FAKav-nJLhH7LBzStwP>nwc2okf_DawM{`BgC!@^%!azGHfPI2 zb87LT-#1Yi zx>Y^j?L9@j&blM;<(91zi!l&oM*H-hjADW{zPAdtx5vrUQ2dqr4=LmcM|TaMed4H7 z)sZMmSy7Rn`I_fM-A0Uf=e+wanI)hk;)R27;SgriFZM$Q6E_=K_~9a?O6f(vJFIbq zIcBW3KQ1s1sW$wRobD$@_Eh&JFFcI`p9I5#7N6}nlsAyiF{bqu53!3}!Cd~l9_cWE zo(#N-d_vUTUdufzqGd}1Yhls-q?3I zTrmsJ15Xv@XU9hCjc7f!8j|?Uhpa99ABxQ~d_S1%c+@2!tv5gdYZXxE99Glv%QRyh z^2s4{TGUYVM2yH!4D=;{?LA1uJ1vXP`R;67v>fSu-7H-V7Kt(ZUw(fzXExiTvke7! z!iu0D_@=FbKU{(&)z2G8&9KrKSe}iOBVymUgn?vg^Ao(7FjJyTBHIIRb+@cGZ! zHI6@xyPmIBV_Dk1Llh7(hy-D9L>Ish!Im*MPBu^hS3MWnNKBLr8H+rn z$V9Q({o+ganavsZRE{V+kqA{klaPo(CXOGyyw2cOR+3~ei~EPu!|WFimwumXAzKM4 z)Qt`af(_7Xob`d{o&eLyS=?1&`n^ujo`}~C)&hlrK{Eztr@1$>zjyRXB0>$*ehB?} zY6yznThwl>f!ZemFElN7gU{!p5P0Y#ye(X*BLE^9vTph?h^7fn$6l(UN0St)RIGrK zBL(^$uV?NGYnYLib6{u8@7L70o}(Me5CX)X6jvT!CGtPFBLwG;DW}F{c%p*bcti9u zeLjmVaq`Qx7iYf|UF)TuSyhhx0bbl`$1v%1e(tj+^z&x;TFx4}aq58scTDq=R9Q zXnw33@;uG0#u|^Eff>hkPk72S!S+?TcNQyS;s~-D7se6lG}xq3Xc*1f(s6_`PXr`M zi9~iam9-)DEJYKtkS%Bk^-h-$L1^z`wc)blxh!64Q@zBi)PW|AVi(uaYPu@cXO(mH)%s~;MhWk;| ztiJt?tm7?=#mn$Z^G5rLfu}nw<5Ceuv9->|4oMuyz5ur&Q$mo2q-gfdbNdWehx-II z<3}cHs}f6|6y~5^EijXE>+@Y2;Vk7BCwh@A-90q=ouXL8Y88AjgVbtQW_v&;cb}#D z#Z9-Om*Lu{(VW`;g8Wv?Hh2L_iWj(u#zFZ)=^z;Ai!buL5lFISgtA2rJ8h|@uybd8 zU@>UfL$Zsye)y_vEr0ujT^JA{>+K^EkHHcT3mntM1Mci(vKPn493RlY01NP8fz8LY zLt-}(#L)vunknh+ZQB8fNP}~&EcE#b;*_Lkaf^lbeWhrOr@SA)bYpp8LZd(@$fFE> zf!d}`SvGTciRgwiCSwo$W&q?F(_sYYq&Fq>QC?LKAJ0zj?ceP-D|G;1j7I2D&9viL z80&l>0;LnB+MyW6{7CRf1S<_@5pbzn-vY&Wd2#Bv_-8StzuccaIb#bzZ$>Qs(fj>n zo9l?e$o|-xa=VdkjDTn5-&r6f8Z)+3lOij&(JqUipoqpqmy}9OOA+93J31$z`D^cL zA(*qnj~Opn|B`=zA*v7#ZO|igiO6BWqDC)FEtugE$ENl+BUN_?ZRtagS3 zDqzf|WsitVEW%=RQBdujJ4vJ7fgmBjWD}{;7X)@IZN#EafmyI`H|nL%Vzf%nEYOI> zDuRHammrBAs0-r~7BDK-MMQ1Ewybhy@N6EJlRV0~u{gGKhhJPKi|}yscG*Hv z=%5}!8>yp_OAETgvP{H&*Ik~HQ9#5~$1S|}ybCFYt@baqyI~X&6`7WC`-8E)eM5ZA!^Ix{HE=3$x(pxd_j(P8#_JIT z*kXuP{2|ux8iR#(23mkj;-bSqE~P2sptodI!rcw4b~;l?!!Jwf*RlX`q(mBn)oqwo z@{7<%b^Mc2jjwh;Hpofd`VKv@lG@Crj*u|cH;{xYvy|9f2Cb;H7xIRYwt($L=D zuDWQ70jz`^bu6etlRyAfJD3T!2U(p6TRm5r46c|t64xOj#+i|GJ>V%Qakb5od%vUT z+<2IXQn!YbjD2H}C{2^@c+S|iZQHhO+qP}nJY$<@Y@D%en|I#3-)`*2?!B=;I=U*W zs-u5Y)|0ACLM8aq1i?s-p@E$Oa{W1FZGg>ilMSc~_{tZ4f-(n-_wu`4K}ps6PlS8R|#XcU#)&6uNgYA``bj|KkK!OCOY+AXt>mq*GRLOcd3GK>bq#&$dWDWvqwcej8<>^lX-y}_Hu*g5Qq#%H)2o(O zR_|lq*in6T)>)9Z?RmF3i%Xl++3+Pxxv9k0r8q0L$=hMK8|M2md%eN@9OMOkJjk0Z zgMQUwhKB2Uf5Rh{C%aC(NPf361D^Mx=P{o2w>JTW<@K!8i4XQV#iOkGDPU?=Wgo#c zJ18W&7TFYyid6Dd|gBJVH>x*R@~Q}cjn5S?P5f~Suu+4Yu6(;*P^tw ze{W&2y#gLqi2^Qd;dBRI?o_{9HD~=@c5D0na;{>?&*L%i{l(jnvaorVW3Arl zc%j8*!PVWdU-GMh0FMF6chA)?C!j9?9uN7It1hlY$HF(;zK6Gs{6-aEP8iPPx#F4@n zS$19EvS=Fn;dTuBX@@~#RG*g*>&N4-5|Y`OreCrlQZ$@XxpC3!?sI-?i;Hq+>y zP^;34@459V-DSG^w>0#ntYwz&o1!go12K7S&35h7S*HT9h9TuMH2HK zzEJfJ^`+t!DnrR*=tRle%|gkm=y`$u{2@;(dW&u=dV}>M=k?PKnVnD!nT`L<@}l5H z((@+uoh}13;P9iDqWCgORs0y?r5^ID9NUg4$^3{(Ru=mJFFVqO@HPh_+M+MBJwPs0 z0v}4TjTfN{ei28Zb(y6wh7fbG+S&}!{0CEEU5EuVjEqocUb`(hi(SG&#+-$W~~L77#1wQmFtfJ zIB!3#r{Qfo@b`&S8?PGg2hoAW%~|xJ96T7_3+KB?TWU;afkikW6Q&jq+8i>qU4(38xnt)O8HV# zP9@(hyV{eX80aEH%&b3WbpJ$N+7=HhiN%;*6%;2z+2vA(EpR=@&l{lq!?5vf{-Ulp z^87-D1akzSg5nQ|0ofQ*4Gt9SSvt_ly%Wa_EP&BZGM?vjk8^<8KZ^iq~DA+Nz>yQB@8rPrf2 z4`lD7PbTjH9_DQ_#QHlup#q>q@6r^yBNvRY7iL?6No!&h*0LX|1!*!olNZE{F+FG9 zNGrZbmsU;rpEe%>a+4uA(XSgnh-yyb4QLfo;PyE&J_T|RA2_8F<=#~(PEsCflO-n(!X#TA!U z;bQRU;00-Gdsg3#y-B~#wcLL7cK+J^#vOARl=aufMQt1A^J6LLI`ql@bR*@_N%1uI zZ8s&5=j>_j6T|l;4DNGU#)+?Oq;}VPC9Dx*aP#S6V9*w~=M#%$!tV=~rIh>WzTfRz zx9HCLG0IxR>dSti9WU&8ylX-8eZcgj@-|#}x?fPcV(9O24_=2vRRvBo7^$Mp_OzqO z_T`*d)KS>+{Y9+Xpf9v4RrAZ9ANR}xQ@bY?HLpxg*4wVc=2xNV_Psqv4lL|jGVGGo z-}Y|3L#Cc>+#JmhU2VFlxEZoC!VcDpVjM0vUK=eon=mr8swUmvDhpM>Y|eZirazwj z%i4{Y^@udPw?6 zAkd@$1p}!mCT>_&7Ic=@MSbhh;L-uGXfPoxTMPOo0V$=Q?1eHhKr=V!Pqn;IPLG>k z%{jz`bg)6q1lqI#2LA#rgnvNGFh;iRU!c|f16nS7Xd6GERYPQ@m}*$-&VZdU2m>RCSeS+;F;=`O(7eQgJ6CfAcblw~qFn>j8g zh{$GO;uVa9GB8dN4X>n(L^CkyF>V1Um4YyMA zOpIQafo{atD}(e~H2)bs=a_x$^Yr_!k~4VM!`#{a~% z${zMVVx^p+rINGFe~6b%^c<}J4*^sbpPl8OAx9@?d{$QG|AJ?inpRHhtf-$oy+C;T zm+5GB{#-fJe`K@W_8byPr({jV5-hS}s4Eb2)?OcZ(29){6Rq2?TzfB?phe(8rhRg} z0M)SwLRtoP^vD8{)}TzPW(CSOe&K{HwH=zrZH%dFOQxd-&kay+MAzx=huF78BjsXs z-=MbQdrxN|DXO`etH*kuL)t^_s*R);NA#|REGM*RuddV>4}77m7o1v+v+breqn z=8g*U#Vu#3(;uVv*MH&E6bB6`AiB1PGk=cPF2EVkgdWzsdqIzv2Jv-fU2sCNyFJCRCJYd0!Dv7^IUrl&T{~a-NU~#{NR>CSP7Oey_why6w@L?5nT2bfV00z2v@`wm#<#K!r zTbS=(>5>zEA1_7uVtf5v=&Fat*%defcAz{NU0LCIzLr~GST%sxJctjsX>#%83mjOo zdo_Pv44D!0&~oqwu+C~~qb4FhIe4&PO^4bf)fetN8!%C@fj<7^{WDpNvRNq3Fs~R3SiKUT}Q|vWU_2?9SAh705>beaFS8-9}hjqF$=;>RE!t;Ki-!GrBdW- zE61$emlXW-yT>lt(hcM@z6R~zPX+$M$$|19IfFbFpj4%J)Fn1kM&+wpDVwq%vS&!3 zN7ZY4SJae-={Fj&X1Yy>m>a% z>973_QZtudIXomSRXr_sc)`08J%LQdrk^wljyVn`j`$3qmQtjkP%raaLqlfxS-=d2 z8Kq{fKUw$GtH6kktGz2O?~peNQJ-)!QW5Z#voSP^`UOuioe!|m!S_=wACh(O+Dci$ zY;@KSXz_K&+0zA2IF^G_?&?~8ME#mEKWKtLVC`Fnz3^g;dbtD_Q-~vIc9K|-A9Uk0 zwGWmwfB8X3WBL{5kRn-snhuY9xgsh|xoRnSBA6SahXx%MhnKStIop=kPI{7|-q(K} ze9=^AX*+&MU_D=8^Sp~K9Swr3+tcCUXyG>={_ez`6vOXvhg;d%{abrS?O3^XeQR}Z z*4n2TFs8-h=9Rd}S9?|2-Fj`FQdv518N3n~DX%Iv?+4pEUv0w`pc3DtfcbOGk+8cq z&MYT`4skJ@bTM$b=k2cRfse&ui#PWEIx6;iUfC)K+rEmwrrEAM)9Z^BZ}ceRhwG1H zIktTiZ2ODn4!o=Dd@m0#VP}rMEZ6F8pwM)n9p<(z)!We`E%g_2v^UegJQZ+fYJFN& zAld?}iGYLCd9ww$? zL~`dd9@k^63MV!sw=;PKlCR@(X)LJ!X1Xi{HCAR$e~UP4s+ytcHS8W_>-Mh=UA!tC z2a?d!s5BPlCquiUtZhuwME_PrXPZMqy<#nYC&KwHdIvm||n*vd46Sm`NhltVS zcAhN{pA7FhP|z0E?K&c-jZfn~27K1r_uB$#L;Bth`C4}8BwKIO(F46^`IN3VSUtF( z40D-lSzA7$Oseli+?rMEh<+AnIkf_!Ke@cY6eh*;P(uB9+1n2r48qF|C>yzlI6Lcq zp1;CHDMKTI-(v9_Svj^lS3TvbDi(4#<*uSvdsq%dTXo=>7|v1&&W6sWDJdN z{fth$>1pZ?zJc&_TXal0D%stvlMdvLUd~wR!ZKyNzS7*;$`jtH?dml}NKz=umA99u z;;1Jbz1H5?!isUxS4}e-`^^NrEwIEtW>b$0HniHS!_ftF+>HXf=Ljn)I8@DijU8n1 zciWZ{7`dxAs)W{#hDx63ItEKuyOL(N>hS$Cyep^5j`;5Gpg0|6-+)1m6vf$zEk>txH9gED5S^QR1~)Uz(Y&r+_aNgE>1o{vgOy2fuaBX)B>r zOS6PF^;c!KB-ks#H9;Sn8eyFk+-AU5sYxzemUllX* zOICISAkeKS69%5$LD)Y?4ZPh4z|forMo^rR1h27iq$ffn^(X0{k+L2a&+@%n>c>0>;B&wWPUVmlp1 zGYf=Qce@(X??oik!IhFImN*Bt)nkD1%1wPEfG`@hZiuPp#K#M;m()EP=Gb>#0H@Mg zQa?MK)~^}Vt8t8rJv{W1&H>suJLV_3#aFsbOR5f#TxM1`mi(L`HZ-&JH(=+sKujfE(nyiihRv7fV9iNo*dY3+1`s zChbXgV)i8SMt*`OkhhcD*neZCKoSuTggVo#-r7O$bLmC@xkM)RrZY>jK;*MM8XrCs z-?GiJ_q(U9A(bBvN|g8U&vlcaq1A}7&$&*ABC~iJx5)9M$qPuz`+!wU#G_8uAzP~s zImyD15jE{o&*(^K8@3qeS$JP=C`n9=`N&XijT5ir5=5F11oEyBwtzqbc4g1VTNW~m zcSqz#3elM&2qJ=|pPk40*^n-5QW-Hnbq4^r)X~=F|d^6%_zk=05%Ofil-5QC+OpHjcnSYWRu2O7q?VCB?wiFvc^s} z*`PDc=C%g)+nUiUta={p%m#&QrX9wU+oy>&Ctig8sXB+AMOTCjmi502G-k*cBu{HB zR8H1iKXvy#la5+5k+TR5H6fO3mT z#pzr=I=4(nbv;_{(|qU@8C5rHkJro_>RDj1CN5jb1nAi+VVyM8w#=$(eYPpDzYdjC zmKE9NJ3;j7w&A3#%o(@6w_r|I=A;PG{u*q1-_-8LYdESJ`Ms2tGALK8WtXKy zb**J%C7aQ0-K$4w{|x-wd_@x{I~PYI z6DNGue^LHT$&oiOGxP{PeR`&=+iqeeA1(^H0>6| z7hk8J+Z{vr0rBHPd$!ya9;w9Hgt9DZ!A%l#&PFY%UiYPQ%ME7Ia;frPXHt_37PuF_ z=SuI6o7~cfv{p-isUzz?jO$>`}dfT(2y!F*T3F6=>m!GX8U5LWu5}sfXuxD*8U^Umwh|5@V%fLibuqSbbK<`risG^!;f!WN8546ZrL_~B!(qk(AWsy0xX zSJoN`I}11y{deq*Z>nDa*LH|*ITn>GRgA9Ym_>XHA-hsGGZ|G+cC0GpVF$tu=*S*y?o5muYsZB=1Z$$LkT9bgoif#lmaf^ zDU!B!Qv|GtfqgmdHo!{+Sf`XFV9tc`+w~#E$ntD%EP?ja8UVDGTH$Lrj2BxEaAAe! zkyZlG+y^4D4#3Ae<0R98v%#hojFlOUj1lDr8AuP5Pvr<~Fm0rvdLRtUq7hg8k}nPJ z5Ckzt6mAdpkkdaW3x@kJ9>aS==a1E5(B@U_SkH-!`-*@XNimp#se_uZWhU>m50CyN z-V*{M_!@WwH1@ZT;wkUyvQw|w?@NmUX<4_?E8WpE&1Q$wgH`mT_WdVuiVwZJG z+1)FDXcPbZN#s-YOCx~D7$FEju-u%FL9pq@Nc|E6kWG^)`2M8tS2!^s1@M5f=fpHT z0}5CEN=Ug1o`5#hi*jYP_*i_T1G;qVvr~a6woQOhhFC!(6O!_Qd-kn7dcUaCb_QE#s+eOAwlg7EDh6Z6v&^)VB{6M_*3T<^;UAbekq49orm8UHiXCC~vFtzpb zFxdW?3h)oi(0=x(9Trwfjo3^%#i)pLU6J#yeN46FRZb?x}5>uw|ojPKy0?Uy|#eA zb8B(o4X#-i4i4KcfUMD_fEKWii~Z%thQ_;1x3hVNg^;ZlZ)(UbRa>JT8vQDKt1bxY zj;%=h7zqiTZGCi^00s=XrvUVnSJxFVLH>_`tudYiw%slWmDW4^(ng z*jkAKI=0U)lvl&#;9m`x)3Espf)O<5dwzNR+SOq@SHwzp(v52G(<7cdZ?sXWZ#}E@ z?o}0fUns4pj{aniJDvhR26f^~q<~ZMK4HTM50WW@^2fWxl*E@czLObc|0OH@0wt?6 ze}0A{O=21!O1(vd`83oFtWy)!Bk|C*=pNPaQ8e+)DW(_q$<~~i5Ns>_!K4|$h+stm zYW3(DRt#ax?!SPh+G}JBjT(_B>cfZmdY113q5f3t!{VvqL4m~g@$C&M30~!M(uAjj zDk_V!RXJKam?{pMsa06r^Z9dZxHS^S0@;zy%Fm6;4HZE~j0ZSLxV>va+AcVkY zj=(yHMMu5`dUL`Qj})rLM||S@SO?ad+yqn-j8?`$jYErG?$IvCq%cl+V3*U7jhnsk zP*P_T7?NFdX9=ZBreV+?USXdBex4B6e22+-m005SOLZ265UwCH9H>Pe$l>wkAnxtC zaz-g$<0%2W3okUf$$*ZA%7D__^A-E@8jXn!IsOqAg}O$z#IN>ry-BhEj6>b`K)R-~ zsKc@ZoMB5czjloRz={FJg^($28`G`u$m>qxz^AED=nrMtGC(NQ<{w;Rjoohua=eb? zCAm46moI}cjMs9K=ICjuQU3dtzFDL_hOrJ0;P(7oEJCcP=WOz1Ub}f?P|}LR zKE!Hwt-sk}zuh&IVyX^1fWTLA&{wE|sj}qb2<6^{@K;7 z#%rNcqs8R!tjf!d**JyZDb0Vn{3-c`s$ z?KeVkFdVg&)LEJlPxkFy`_Acl2E69U$s9Ju*_=yE2A3GNq1A;~tDE<5?>m$x#4VJq zmA(^*Th#8g$WnXf{Q(})1M*Y!xGM= zm~Je{1#VdjeJ~pKV27)MCC>spDOBJ#5Urp2Ji4Hp7{6%=h}l7KAMyPgH* z2fgf(7SI`*m}MQ>f3`T%^^m^0D#6}0p#A+O#}fE`K)d0suDBm*GjZ46fh*{oRk_n1 zBjY%%q*U;bJ{zoE40upH^9pF&4o29j7dtP=J>lPQu7OXWG*na_I);A&&a^$v~$KDf)EAk~H&eJ}M zASSvx3Oo6JU-gQ6nN|LGNsZ}8qWWLNBQW4I(lgNiyZM+ApMjp8)hVRqd-^Y+yGbV^%3n*29icnVv(PZ9R1M!IT}pVvRSyCSGNDu`wj<-lgB zzZkN`z7doISl6g3TU=TBOtq$E=bIKPLK5lX!thXk=2@+$jS2nWt%C^A(Wj)MGXM}0 zpALWu+&?8NE+*>Qr60`r@RMn-KLajb-^3CEXdP&gT3|X>pP14CT#U|)3Q9_iw@*(B zpqgTn3So5UVcgURq&yIt2JlYbjjHam7r}h0I$KBkQovQ;RBILw3IU2MK82>SA_xa( z|64Ua(wBM(Rt9{E0AH!;fuTuM0^0ytzmpd{*bc`Fkn+^O--yfHFdAYQLoS>?a}0uz8Y%BGOE;CRs`3|YQUwAPP}>m027sx znm@$Kz&LKip&Nxuty)YK08`y7o;u*cne`#WEHEQ*Ko!8wbzZM3SZy`EQ(f!h8<);) zb2|X44TlYl2}S7lu!tW?R0M}Rkhebosj96l09ia*-{3UF&HHoVIR5YNpE&g_awEGd zfET5%jI8}UxSg-CTY#o-MqHeoZ7B-Qww@<={`Y6C1#m|o89l%HFE-XUwSn)0-0cm& zwVgcB@20uDrB}5Jy62C$$?v*hvpDp#}Kt%s^9*#kr!W~Rkg=lA5(rXoGU@!2);bqx7YN`R6tx>9O<7#`zn2W z2S0TckM3GOwtroG!Es>MPC0tyU{CnT>;CTe0`4(w`vmTJn*6jIeX3kO*Sp!?YN^O- zyg#S;s^z-*3+ZR*^jobrpg&h%sW^oMT&%R!wFr#zp8F>jO!T5ylgYQ_w_Tz-*8oNSYrC;2Bfit z#Dov~fdOdhUj)DmS@8)no`Kfiv^b+LAXfd49>}X0D4s8MornwT(}R5KLi$`{Z^`|% z5mm-Q@jb~s@2P6$=gWng7|`YO_)W%rx!&OhuAH&skPQG&(YmwnB@q-ViPdfH*92C< zl)h&x67>uo8sO>lnihTlgHmGF@z8$MnQ@$1-s`OIulErFGg=wija}B-hK#cP$o zzzR^88n3BPmmE$dwVwcWC9g`EBGS~vbsKz1_nhW8Cd-^&#kTXWxCb3A&PIljmv?GX z+8$dI_wNRj^IZi92_y8Gd~&bdH4&w7}D9nJT^1_K5=vA@@Q63oj409B&gkRKc zj=pG;@@w15W<0R4+x4pMQh*YwE5Pzp4?d`hilJ4vz!q-;(D@jxGB^oFMXldl?Q){8 zw-w?Ddm33;ou4i|*v6*$vDWa(6<6!hsX65J)NUFEwi5oP^2EYz=Y`>G^M22hDbop} zr;GivMA`$gs0&OcbtP_qvc`5P>a98O(M5<`X~Cehn!mEvmRXAb3D;MUkJ*a-_U-C> zDR&&25bf+wrI&j!E=u4KUK(jntRR`(Pm+?=tS;k@IV<@!@=i{rcmOm_T!Dh&r4!C6 z^U@WTupMfpPCNq2dz zdsd1u2=Ky*ETfp)!kTjU;84$zTvbfGZ1|zxrMd4}A0W%fa&XidYK9U0a z27#}DVQV1|7l?$;PR+`xvsz2+jud3$tJ#3{s7FKK89HK;o(Q8#_7~qNMoi5FY0uX? zcSWn0!aOA77C6)YbkZqAsET>hufSr&qDk^PWwTjf!>7 z^D)Hd7~`Bbt}s(1p62$=K{Cr6%~$Ng=*47fR1_v9+C~Sxk7(_Y8FA1XyA)}Cqk$PG$rZ?RtJE!IMIx9@7?E5%3Q{2j>+G2ad zG(qQ)9Q$CO&Tc)*)AV=J$3w+r190G2QzK6(4=Nk49ow`@njPj}7oR1}JLJ*gUL>fx zv(Yo3VCQf9B_b z-fH_hkNQ>ks}YpOvCBm^*^BB2$3zt*f1iB1H$zw}REN9_ow}1cSPipZw>Q`jqiE%4 zEywsvobut6+|(SIho6_+VZsjXB8T%&Xd*{M5wg7SzuW$8#Xw`eaG5-Pr+;!d-D6JKYGm8kzn3}$ey-aFvidxh zYI)b}0bDb+^&3q1#lXox#h$t`!m2ALUZ8WRtkz?2ezdVT6jt)$jKBAow*Ilf@ z*p-s)`GC0sXZ+&650V!6JIKQmf0*@A-ye!$ZV*lV3~M zkg_?b_V=VNjkJ&L;)hVAdww93aOhN9@kSr;coJJH?t+GW%kX`ux(T=!@pf5z(bZ>9 z020F@4dtRVzejN>-t@V!3@@C@BI>l`7m$U{zR17H8Mo6?=wv}FuyHHB10bUkHU!VU z+rO<3cGC1;8lMfNzHcTU zH}oFq3!NFn@$kAHZPzGb zYcr=(>h-qojb>(e(v$sDz9fa4@E|kXE_`hVmlzB6c zgd2FU=gH6K7Q0T%jni-AZ8TNKQO`Q7crvNDO!9lRQB6ME6_gf^vq8&tTreX_6QQ&% zqkr7TT=^{VFRLQ((t-C#ND%KzL3%5Hy3Tc4Rk4l1ieW|{8j~Ysq%1Zv4m^lDO`qtp zNpRGqtE|^Cd}%=PcYJ--Eq&Or5RS-=S(xBB_e^ci@l_{+Jw>WN_WCR#w8^$-#UP}6 zDS^h+nOF!%L8r^bp^G;yf!n&WnY>k+hVAe~O=^I|-in`!7ZQ2IpAMz?#+_?tih`)MSsGJ zGMu2&J}ij;l$n>l7j0^YM;H+Oji&TlcIrZ*6dpw|#u>=xLtEc#k*Aj*CRkPzgMOl^ z70D-UOhzUVbw7Xziy=xZBN^!Z{F#2nkeJ^3WUhe<7Mn_;dO2CC&IIR`n~)1-F7(S zpujwL;C5O~LC z3?Uyl5I0GqW{2-a4xU5dt*TuQukJbuD5x#OETYM5)GIrp$AK~^5fk*Xf2&n!Ms`ct z8>E>x6GOELVY|{!1@I)Bz@w!!#`uRBmt3Jz%3ACy;D#*XF9tfU^dBzhuYZwjtfvIx z++NX9tL9XR=eV)cV*AAKX$_kiUe?=xc4tNLGRRxkpDxy&-|%$`lf(*S&!Lb?yzCi+ zu69EhnToSt`rME8C7Gq`iJM$#WQSn)VXFxIGE!n}4*vFQ0~xqem*M=$waGp84uwMCS9mNIErJKLJP5mH116p@(sryxzkU95_Dz42*HX{#>Duo(H6oJXqVT zX~`vH@+{_@t?iE}bq#Xu2ithUE%by*X*I~s5_1T00Q<@ZH}0)>WI_GDKJetle19;x`nrYtGu6aU=< zM(I*~IeG7gvjch`INQGP_c7G*69ug%;~RS_p|R5}ZDj$}6*j+T=?f^B&Vv{ZG#be8%M?M} z<(A@#k2ZZpY{2US?S7AO=D6$x{V!M7kOV%guBK?@SxI(fC3k{j>6%j4A)hI^LUPZy z2U!u8eXobg0DR5+qjR>RiJi=hlY;>Mex7>8nXH#Pk+aE}vOIHun5`={INSs(49@{hCGAW-ZDD>(*qT|=xj;DPB^n#eK-Nsc7G29t^ic;wbdAM z_MY@zuR)vNz^mwK`!x76E4;3Qd9N|tzNZqQbVJjt;~W=L659-~ zN^8=KP{978fgpujP`UChL^VoxeuOlJ@~!I8a54o*F z;bu*$E-LOyF7meHl|V!@g~^DTKFsCum;k!INWSdDT3&jxl>xj9;vLSnwWN}7+>y37 za$VFJ*iWUgy&2Evdo)Af-rXhp2A6ngt3VNVUj+c$iwBDI&ES$lai|tLQusp?lSV-E zo7*}l@0i;tHU|2Ml58W>Iyn@e99yW_0Nw`k* zsjq_q2B{{$4RJzP!c4w{h<~C8^-Cx#Za_{?8r!Z4(_gZVEl1_lsZKA4D?cb1{s_ME zv1AW)$Gv2?Z@ErQ&Z+AR#*w{K5DR-2w@irrjx&XA)Rh){7(nA5zE{pOwjG&d&0J_b z;e4ueeW_hDP_cE8;bZOOLZZJ901rJE8!wZjD$}eBc7};hxy5TiD;HXw1Q2Glsk~S@ z%cGhB@n2YwoXDGa@|-oz-ePpQrbMCsBMOz33rO}X{;)=fMFdg~bY4n8LN=_^j9AMX zURdMW?(Oz0?Ll_Fz3YF*xfan#3e=nv2z+liqBdM!9F;aGQiw9zonp~o_WoK-nH)S# zv}r~fwm$jFS%fQ72sk!yPH@ zi$5}sB)vIb`xcwbNFayQuzGml18;5ki4IeaRp)5$2PO@FqScSJE-9=VC|?!$8Orh! z0Vog?CZ81l0*Pr;i1q3<#%aVO=S8ljzJ1#Q_O!z#KIT~fZ~|3haT3fjW~69*4y3dg zf#A?>RW6(i;vLK0U#~9gBZpdB+C=!~62JhrjtzULzaTFXPUlapydASzCFPYe>C$k6 zSI_e#!0QuJdh0)R-D>11S3HNe9_iHK+KYkAZ*V7AAqhYIM%r<4BRoFfUQy0h=2*W)wDGkPZza8aJ&wA$;h)h6HU{YH>Lxl z(_jscLw!QG9J)Y0npKregRE@(aTF9=jfOl16MDD_xdxE)_I1Sh$>zPMKIri3O!G<4 z-yVOBNctN|=A~qFD_&lP6Q-{J4!)!(S#0IfY3=UCJWg#_tD!s4r6|u_&X&gO}`!vs4*g1lb>ot0c5`G+I-uz@R362V_2t_ z##saSv{E1xG3c|bVCo{Wn16WDi;Q6#R5BlH##REsH=DCQ__V<(YAF7Ov* z)Yi!5fF1X#o)B$UNk$gJuW$t5wO}KNhd*)-jN11w<8T^$Hu767=1d;u8X%v{jTQVG zEyDs0uPrN~nFmN~LK+Bs3}VcB!x#7#UnQS_q2fxBo+7b^6)9Rlq-xv^ImRWP?PE=M zg1yElJd_{7JJ8o7{y~&0P5F2rU3S!f!>U|X>Z-p@7G{>H9(C%-D@-qGE=j@P<;%2y zx>F`OW@a$tH>AojPoLZdUxF6w=EW7k6Av<{u5_hOr>ZAuxtq{|FwxqW1D`Po&l*2Y zvlzLm`f>fajhLEUtUj;IJY07l5#63)wTyZToN>@Xo{4qR#J{w?04^}&s5O$*(qlQV63wSTnMp?2V4DJ8;^`t-F_0Xld_Q8eD=346q!Vz=+1en|) zwhQYPNWb(VZk^^bJE3sSjk?t(IdQeW{n`(O#; z6rf&ME|Z>EimEcHVp^Js2_Hpd2iPz4HbHu?949i6$q8Rbt(6}R zX!9BanT!Kvn7 z-oahHo8p!6bwc-0R{^;>c(QHD2JH!ML&F#^GChhTpkQzczKG#aMjBdR?Kp3wpizU^ zuBzS0PBrnvDLW#`^;{J$zSlz@jI7@Aso?SYgn9m zDtVhhRCTLk0prhMap9hT8n2Rod23vG1b2i+mL}1vUyL||o!0mQqOY8QUyg#Eme(d= zbZqo}mGqqEcMWNUUC}i$#Re^Pk5&`5MF^LYB&4!qlB$IzwK0Tg|mWLZ8B+lWI zdAB9c4p_g~tHu5k%(#D)XwE^ff_4pGvD$!Avhk=+E$MqEsb}^Y|C|{714O?kWnxsD zeHGntqiGd3#)rWS8%sPUA-Hh_nF)^fVu!{%Omk<5FDNo$dHdd<0qy!^BGb(yS7#6h zvXz4(I?PUj@Y`dAVPlbB;ytGA*uL7<56Nm0-34*aVRIunlb9M=q_@|S_h!neKinkU zj$QEFnX38FlL=W6ugjGP1#Z_cAg^zufLNHrdCeT^N{@vyt;tmmyC%O{7R~|cC`B#~le66j_^}?%zoLp?80zwPUxGtb zC_0kz`pv%Z_z$w2ouNLd7ssy!HpbQ=ECE`jh&a*k)VHH8sv^tTsl9f?Zz?ti32cc9 zFZGahm2BQQb&v1+T5LvS?S89e@h{gFlf-i4&FQ3J8VZL?!xiIeL$+wahwJEcnwPwD zY-GPn^XFb)_IK^50hIZNQ6FZ9V_sJFk-FoQ<#mO_cI|4Zj<_<8l4kSx%};^lLyye3#6X;>-LH0>Z^ok>7{$P@F?bwmf!$5yiwF zHHq8P*}t>U^oTz(2otmAsS?9uC^q&ZD5~#)0W4%Jr5EXmr(ZSdaNi+6L9;9EEv${G zb&tU0NdXgaYH=-Y%T3%glGvXJB5^oBVL1 z^tVS$jSddcSf|ODdu4m63XD>3#o@ojqj*z+;a(*XMI&Ta3JfCE@wk1b726wLWQSl| zP2Z)*#${D|rOsaKD?&ItcuKFPr}w;}O>MHCi;g?v)B z$`wYUnZfAAqC|;SB>^~}hZ*M6;q7*Ty{KXLi>`DGE&3Z0nuTKaM+PY_UK=pxygdzMtr5OfxCr;B~@ln}Kn5C~IjlKdAI<0%l@b39gKZ zqmo3uyzJWKbq$K^xO5H$r|3D>D7S;S)(go1so&I|VF8hqH<(zE{Jj$*7L{czF7>c3 zhk(9Hl>GM$7)-FPfCcDtJoGOljnU#?HJ+nX7STKz4pA9)ce9z-_tq^P`{AO=^byYD z0)sM14w{AWNO-cpv>+vmwALUlqr(veP`}VqusFlyjhC+W&ja_l(4?_zK)M;UI43dE zlMAu~xZiUR~Y-u<6gn;rBuL&6?eG06n8%v z$m5*Ww^DR%T+noo$WDo6VcN>zb`ET#S7~i2qzWYw#a}yU8_bjY{Z6#RIa$T1eq7Y= zszGXG6%XjXZCv?zhN2FTB+xgcadNnA+4|C^8;Yqz`4lWt2S7oR@)3;^*-eZi2HklB zdz}+POdKBQ;Z=<)bYOUqu0iaBBn%azme&F5U5Z|a zQM?964W0WlI2%IV4TFWK(+mQyAd1ouyDS=vSI})=l`GskOq){J>Un4{0#R`p>r z>hl2IzQfrvWrz3pxIMl_<^>L8Uq2=CN*@=)P6S!Q&~;qgW- zz2tHqA@a>lc5x>SHOY`f=LJAV;vyS1NwIp0HqT_elfiznNtD(DCavP1^Lx~3?M_N-y z3!qPdlJNVR+E{XXccO8sK&oHN&g8)|yB;Pxg)oME^&AV>)z-rGHGshp< zW0Fu3tE?^whpX^}A7CG)eL3V}M+z;Ue1!70Wmku~;+HJqK{&dyZH1GigQ4YjI~#ob z_02R*?9x;LE>9Lj@#hz)6^#&Gw$9G?c$Sm0EPci!K=`|!*3{Chb7NT!yNyX`XFPE} z8r==eXX?T+9G$rL(7w7RRskLM0Xq?xz^#P z6GnxINjjUH7UB}a+U0*~2Bt&+pO>}h*ZQL2pF(rx2>L>H7D+75etDlPw|{;tz07OR zZJqoYV1&jXG#k zBof9-ToTowmJASOU{9AjsQfN)diQU!K1*vlk%=pBy~b(zy3?d$TDO3wJXCZtec5I& zqJNXFh?cb-Mt^S1r*#Z2XLRv^)`7z(+iKf1qVeo78FW&2d4Kf7WoxD9ly&f4g{8I) zoxYLRU;d-`0(#QwpS-)`N^jS`qJ6SMGP&Gct6MP)yWoD>;)u8mMLKvLpQC<>aYrw~ z@*sEbTBS+SeWn~5Pj)3)&1Vvf$Pqep>LrR#v%u9oJnQuO%q_G{4Zz896M05(=j6n( zAUN3|zHGwJ(m8u5DRU5Db<(3wW4@C9bGSt)D1-B`z$)V-O;37G93rmDon6~eBC z({JdtRwOK0laAPhKffriRpV*@Sj<1%8M>R+uZvvZD@|JY7F!b3^*dvXdp0d`cI5f? zxEy1HMNDjGAJgum!YS1nYB`cle);=F^Q|J4p()>5`xlv;P3=7Yb9*7|23qDr=Y z`M^muWR-6+KWIqy-z694UlsDf7gO1JQGBSp#SyyeMd)rOrpVsw+X9jwepXG}0m$2_V-J&V4<-s)6F=iD}0`Q^>ie?M5;?NQnM7NBH8!#RMyn;{a}qXIwQ7zYLH40h z=Bde0EviMEaN<&vR9ZE`2<4&!1;j3OyV)Ijzc(kAD6CkrezWFyz{z*=Q;i7IKDxoq z_!(}MC#7ghOOiFYQ<2N|#sb|&Ll8O&hwm?6P9RliYR89fcEI#dLfdmGVP#uWM6}=q z!5nXO$6=5Fr)*YNJoz*_Om*pJqY@lujl!Wu)OXfY5xPfeA5@uhuSf1 zF>SW4d7AB78I+mA%I?XS1mX0nhLt3F$!OiPgPY#fqL|b=nnsRXlI) z9E%Wd+IZ?3hL30p>c;6=y|}}^EnA)Xyp9Gv5%l-kON1cIZmNKDGW&JdGBO-79UYaH zwS@Cv&L|Z3$10w@0d{|R2{o<9(}e2z=r_`?s#p``)ty!cm6BupcWH=5*e@gCNMHD@ zTQDJLRK~Tsqv#c<&-9OHPl@ymk0VxH^k{;9C~sqA;m9Yh?T?A+1j?Sl1VU^_{)jW>X1HHf5S5~POUqb)nRcZ?O?>FHZATgJf#p+AlCb$<~TKNEO! zSSR9#62l3ys0?nuVicFcER}a4#Bb4mPjH)zH>`#)hfT+!B5`Yp3V}V7NbG*-;MeGP zy8SuEW06NJe~+T5F;w#{Y;!W#N&Qi1F`lgaeGhp@TRSaIk&sBl_LSmKPus^2VA%N? zuWL6W0{O3l`dNP1mJ3ur>V(|m!l?*i^$WnA#(KvKUq!+>r^@{Iv)ZT`ckmN>!_F%J zMU6?PBPj3hG7yHOlf!|zbZ8InO=-dm@;)-iY7A5K0A>DOVCW_~eAiT_fP?oe!L9 zLq3ah(b;Ur*50}jyvp$FQtk?mYTwqKrmf|^s#~+>lFEVHK*M%KG4bfVwJ@h7pd8?k zG4=jfWt@Oxe%q-W1Bu_N2Dk0=sovv2RyePPi7(5hs0jgv_jhcW-q_{ zo-87O;a$?N!F_STY!$V3mxC?wQ{A@*0Ky!jyE%$a9v$|VZnArV_pNpwnkzQ%?@k|$ zo?W&pXjt#rZ3HVdWzG;w55LR+#6F(HNJLYLB(H3A{6JW8JD=zZ1%iG;p2b057h^{) zo-a=l2(x0m;Q}`jLb1;S5>lzc8Mc=|&C9Z%i|`2nF0GEBA@$mwA<~qmMZYn~HGA)DapS z57xKJi-WaS$`Ps!U^aC7;FIyZV{E9}Z%($RI2A+~w(*L5b0PVK4;He&#iNW*j6pf6 zpzM6XK@&ri=}`kk&?pFTCK?Xyh%%#*|0W9e)MILmCoC$er1{bVkw&&us1N*zR;UI! zn=vMQl0MtF2emEt0^`Td(3xnJs!)mQZY^KiN_%2>qz;_lXqC#viKdnhlninMk7MiT zJJNHF>fYd_EA&$lc|JlrT05{9OGfrI9dhEbLf01ik`$f}`MUjCfKqOHc z|1TDw8~`>yEc=4dMB&Bcvs~fDpv08?nR#&Wif32JHY~J6l$@o|1~O76MJw_wCL1;r z;N^&t*zs_vEv+zD$qXy}e$OzDdpCD;CW;nrm!X1|6~$WL#iUR8<7>PSPZ%+jd$DOVg+XIn1N8DL1t*Liv+|c}8G)2+quj z#ALfw#(kOTW};X|rq3uV2y2X@)@(-!+E;>E+?r7atqqsg;<&&p#o0V2)0%got?E8N zIvIg6=xp0z0s-3rfnuBjn#wPp56uN@^sFsYGsFsR)f~}Fl3`a zic%77;`0)1q{xl1BiQz1(=q&PXzrnK7>hyU6JfC^X|z^`CVhK}&U!rM=>n_=nGzJE#c{7t>rF=B z)0fc}pXmSF=VLhcpdk9{9lP8neQ|yExJG8h>xSST^t)|hwVS-R_-jFesx$Bb25DS( z;eQ=<9RF$5{U7BoH>6Y<01<}#WA#@Z4B7gp_?P>ydIYldcep}UfA{})jWFjwO8!4I z!rcE6;lI@g|E>EU=wATrtemX>CTl6u(eYm4obH;azF3psR~V%j7d_Xd8q4GHFmYcg zK7})3!RNx>jZoIUU+_DBmrK8l84_)0gZvJGjeE4i+0fC+Svj-ALpk_u-GKjb0Yml* z2dp!8J3P0Kqke!x7SYjgw`1jXrE^zYx(qH!R5AK>;yTBm!$&J%@u$(!-b@XLmox8{ z?;cHX&vz(w^4hmk`?;xku0mffY0OI+$EaTJdA<1V^}X7$m#mwB4#yf{qH@?zM$URQ zcYc&C`|?|_dDXR@*zyI;lcd^J+TF)DMfVXTItzF^^3bZT4XKrrI&HaMHm#4Hn8uFl zkxj>-{V)%=F7}ZNaR|xeSPKWfyJFv!$Sc!k)`MrI!MY;M{*C*(e8*yYt}OeW>4epl zg$Kk3=E%D)OP@Htok?TwwgJ?}iMCJyl18(+3|4<3#=r7SiQ(Ugm!1qEG1X-hmi76^ z*6&ifdR*_EUE zpo1k#Z?!Vjj?4Tj{TupQ&nSdhF&Nm=Bo=aNWBq2LuTkD~NToaRa@m4Zt#B@H!rCyG z-&%|$=N2i}qBF6IW(2xBw9}ix)_)GE@ICfYYh2skwNk1i5}fvM{C;hwwl?mwa_i){ zGP(x%S%8w&X=V9`YN9&(}M0W<+J>IkUvvWwM!g=(hC<^ z>t%%Z;hJg4f9Wo)CQVNR{-c)L8}LE{`x`PrO1rNAdBg>fhD zydLT1(1{NoTR~PPHfKLZ|~$p%e~VCSU}9SR0SvcY)WsKgp4!_E%$$(1FDV;-|>( zU`UK~e~eQ*qi*MqrpxB%vbPZK5;cfghWdD0V-VD7Vq$vH2PX3A|D;;V}mD*Z^t zDOm9TCUq?o%Jr@kd{i6*g**I?a0lx|rnII?3O5ZJiO22$FDc?grg!%yB0ufar6o#F$;Ry%Sb}62@qLVI z(b<0&7Ms@G26qdK!TL1LPd+diBWWzMe7oW?1r&F5^}#P zkLF&1@zT5^cQLO9r7-Uq)cLv0CZ+xL;<%-a=MsZNpO5G9RQ1Ovv#NFvOtl_ZP4kmW zn!dJ6<|+G^*dB{GMKLIYZmYM#$#4KdliMb`nwds@vA*rA@sZDQ=2F{oFct||>(tjd zvGXj5#iHZ~YEtw0gQSV@(DmVLh}z^!`TefQkPNcb>}K?cH}UUzu8V=SXL8B*=YUVh&pg(bnj%xEKk?yS^!suEhpr7au zMh|n4rUwrQeq;QmDV6=oxQ#Fw(yzvP_kAGs-IXpSLszcQ61Olki`G#X!q2=U#Cv2- zB?^)^>U&%QNu75OF(4k|A<}JmtXjRZoH;#b93Fu>)Y@vYv}U+*-|1RG)Uq!l0*3q% zr7!?B3RMid>7Rw#U*t$&3x7esd79pMH+0UF(;4y2&~d+!R~$A5&73wImd2FR&H>p=1WnA4BG^!Y zzpLF!Hd>cP%O_nq-(aI8k+K{8&V3Fq!N)cwo1Nx!3&kjv)#b;6 zT&^3N`K2pm%e$K**U3W5R!k?)ld>h(#KqylUaIO!R#nhQGBnK9#Ef2 zvTK+>+cicDqXEt5G^KI#wLovd>TZ{Z#;a{X=%i)?-7b_C8J3FK4lQ4aRza|yzJ_z` z2Fn$3A69yCG_Y%;yiBK>6Bj_HRO?T0d_l}F3q^?}z=qPj#DyewKp%@2Q2ppC;N7;P*Knk6 zAL^%CBv?{*h2@|ij}!~2XjF3UcjgrINwX42@4`fr=nr2=MC(OmojGwy&9p#BN|A-G zD}V13eKV4iVjs*u_E)S7eHxs2&b-i9h5k0Y^j$1z*{DI}yXbe_NJBJ(*-DW~CDD9C zPAF__v)_@D4yp8fxvW5Zib+46C5fi~Ka zua2u8URx;c`oqp1KI`WCvDIb~*V28t)stmCju5H{u|^wpy~?35lwjG+rANOhY){HF z>0+!Z$xd8&kjAPh$#F=m2}{}Nk5Vs;pB=TH8Tp~SB zpm>oa%-{-d_7r~LXA>vX>q%#zln+v&K=u@+_Ade5>*s`O3#`-uapFF|QpdE!6d~g} z7_GSkTEQK~z?CfCnySd54N)kPkWXY`!I$|J%E(CBV8;iMesY(6p_r-(++GoSww|>h ziu?7mM)|_Qk3|7bPdN#Kv{n^fBTx*_kcq2jmJZ%@8d*y^QOf2MN8y3bs};=y1q63a ztz{i|s!JnYCEFFJdyF*O&tH$)o&rF>jn0r%ZVP-fKC2f50E@#_CaI zo9#ASpYLCKxP5IZdIMEhpwtutC0*NHTMym8Z~5rrNZF2)l!Sc>`hfrs)t$lYq2qZd zEvTnG*eD2gsKY+vNxHM!b9FyGmTaA14E^J5us|m|k?M#yIy<+JEpDSp9oA1PmN
    FYPIqGN){;osHXwc`u^6*m$<2~68bp++{wLU{PA$Nj~qSO9GhnBQ881~ld=Ga z0cPLV%$zE&mHVX=OQeQ8z-P)=7(?doFP>h2pJlS0EDsDAt{_mL4%F1DA-%VM(!rCW z?PW1KqP|1>m!^N_MtTNM50qz*m!!mfV-Jhp}>KgR8%o4l5{=wz6lXM0p_&| zzW+7pb62w8bnigL?fhw=%s}^4(N;*Wl&3|J%dE6*x4ebjvj;WPS+_FxZpEm+n`)o~ z?_vGT+TfuD>a*i^x4&U$uLRLRL0>^R@Zs})=+Ngclf?{kM?|zY_XUIzO$>of&aGe3 zKXYLu>wg_hdE!LunDD!5^)0G*Wjh~%KQMon=E3Qq|F+vv!_;L@wX>phWsu=uV#<)G@c5~i$t>WT2l9~eQ1?BX&24BziavEf{Uf<3h~&Ec>m5s!PlO|lW_bW!boyzZ92 ze{Vnjd!S62L7algH_3az+13L&yzdbmX!e1`$?u>YmZS;)VkN4f0O-)HhR{0?Jr52s zY<&(!jc5Tg*qgv*3&FxEqsxzSYIXIWT_T&-+|sV_$X1X1H)|(|YQm#ju7j zJKLcWilV{zTx0p>_65iA81k6b^y+#|(c$oWA@<h%1vQl#-ZLX_64*vRK<lcSrB9vQE zU0U-{c?-c49Fgz$r9-!4RnWejJ3S4}nqo3|1t1H0F8Q4zwtPg1rloE#wA_?B^LeR} z3H)`ZiX9#_(5kFyzKl?DPB-rA`O*-6e<>d_Fijp$4bPsR{I2Cs(TA(A^G#qDU7+L0 z(nE>W{N}nyORyD#f9YDG1`dO;>tuy1))XRdd@Bnu1S&hVRvEvSOO{+o^1(QN{6r(!e>-fGp`HxNppMN8xE3Zq$J?awmT-@-)Ys61#h(UsurXP zL1hF*TcPI77X90w1^E ztKNj_o2u4cto-F^3#4DEo2|Ea@^VsY@6(yS{#bI6g~1;h?K<(wCAe8nEWFx3_Twtp zKA>xin@w5}$~?C9D@x|4}!rlo$O#=b4K|D%a zEbZ+?Ax0zsYEfQRW>$6yYnHV#$}4()%n_}g06NEM_BaJPhblvqH>(oRkwGf1@z z$kW~4$<+BD(d6t+|L=hmRmC(J#a(S|jE(H}t9-t0#a)xvT zz{beJ&ceaN%E`gTz{<$Uz{1GR%*e_K(53#{0a;5o5QIs>%C1WTuy!zVv2X?%0o?v$ zS58KDMrKH_6rJo%T}}Re$v=btD+Z)W#nspfWa9FV{;FEK*gy^&QmX1?tE;XpLrxrWsuqbrk=3>gP8r_xVik()dhmDvvmgiwR{n=w{f+#a|S@# zD>^}_cP1{7bIS5Jvjkw$fZ*>~0Bn#2LcCM4^a25R08CP%03ALK7E=~uR*)IH83#8z zCmW9$8<&YGD?6(x4~UD~#Kc&D&xoDVgqzuvnU&SZjE$4ijFrWN(}oPA(ph%L18|g%g>ALQFv%`F{aIM?>xa literal 0 HcmV?d00001 diff --git a/run.py b/run.py new file mode 100755 index 000000000000..1574a5fefc8a --- /dev/null +++ b/run.py @@ -0,0 +1,122 @@ +import logging + +logging.basicConfig(level=logging.WARNING) +from frontend.no_preload import NO_LD_PRELOAD_CTX + +with NO_LD_PRELOAD_CTX(): + import argparse + import importlib + import random + + import numpy as np + import torch + from utils import perf_test + + torch.backends.cuda.matmul.allow_tf32 = False + torch.backends.cudnn.allow_tf32 = False + parser = argparse.ArgumentParser() + parser.add_argument("--compile", type=str, default="sys") + parser.add_argument("--model", type=str, required=True) + parser.add_argument("--bs", type=int, default=1) + parser.add_argument("--dyn_cf", action="store_true") + parser.add_argument("--dyn_bs", action="store_true") + parser.add_argument("--dyn_len", action="store_true") + parser.add_argument("--repeat", type=int, default=100) + parser.add_argument("--no_check", dest="check", action="store_false") + + args = parser.parse_args() + print(args) + + random_seed = 23333 + torch.manual_seed(random_seed) + random.seed(random_seed) + np.random.seed(random_seed) + + def main(): + module = importlib.import_module("." + args.model, package="models") + if args.compile == "script": + assert hasattr(module, "get_scripted_model") + model = module.get_scripted_model() + elif hasattr(module, "get_model"): + model = module.get_model() + elif hasattr(module, "get_model_with_bs"): + model = module.get_model_with_bs(args.bs) + else: + raise ValueError("lack of get_model in {}".format(args.model)) + model.eval() + if args.dyn_cf + args.dyn_bs + args.dyn_len == 0: + input_args, input_kwargs = module.get_input(batch_size=args.bs) + expected_output = model(*input_args, **input_kwargs) + else: + expected_output = None + assert args.dyn_cf + args.dyn_bs + args.dyn_len <= 1 + import frontend + + frontend.config.set_config("model_name", f"{args.model}_bs{args.bs}") + if args.dyn_cf: + assert hasattr(module, "get_dynamic_inputs") + input_args, input_kwargs = module.get_dynamic_inputs( + args.bs, 2 * args.repeat + ) + if args.model == "blockdrop": + frontend.dynamic.add_branch_rewrite_pc( + frontend.c_api.get_next_frame_id(), 51 + ) + if args.model == "lstm": + for_iter_pc = 32 + frontend.dynamic.mark_dynamic_pc( + frontend.c_api.get_next_frame_id(), + for_iter_pc, + frontend.dynamic.DynamicControlFlow(for_iter_pc, "FOR_ITER"), + ) + perf_test( + model, + args.compile, + input_args, + input_kwargs, + None, + module.get_input, + args.repeat, + "cf", + args.check, + ) + elif args.dyn_bs: + perf_test( + model, + args.compile, + None, + None, + None, + module.get_input, + args.repeat, + "bs", + args.check, + ) + elif args.dyn_len: + perf_test( + model, + args.compile, + None, + None, + None, + module.get_input, + args.repeat, + "len", + args.check, + ) + else: + perf_test( + model, + args.compile, + input_args, + input_kwargs, + expected_output, + module.get_input, + args.repeat, + None, + args.check, + ) + + if __name__ == "__main__": + with torch.no_grad(): + main() diff --git a/scripts/compile_longobj.sh b/scripts/compile_longobj.sh index a68dabb64daa..21e751e4f970 100755 --- a/scripts/compile_longobj.sh +++ b/scripts/compile_longobj.sh @@ -1,9 +1,9 @@ #!/bin/bash set -e -source /opt/spack/share/spack/setup-env.sh -spack unload -spack load gcc@11.3.0 /ohhhwxj -spack load python@3.9.12%gcc@=11.3.0 +# source /opt/spack/share/spack/setup-env.sh +# spack unload +# spack load gcc@11.3.0 /ohhhwxj +# spack load python@3.9.12%gcc@=11.3.0 set -x BUILD_DIR="${BUILD_DIR:-`pwd`/../build}" @@ -12,7 +12,7 @@ if [ ! -d $BUILD_DIR ]; then mkdir $BUILD_DIR fi if [ ! -d $CPYTHON_DIR ]; then - git clone git@github.com:python/cpython.git $CPYTHON_DIR + git clone https://github.com/python/cpython.git $CPYTHON_DIR fi TAG=v3.9.12 if [ ! -f $BUILD_DIR/ldlong.${TAG}.so ]; then diff --git a/test/example.py b/test/example.py index eb7dafa45ef1..951d3365eacc 100644 --- a/test/example.py +++ b/test/example.py @@ -1,7 +1,201 @@ +from pathlib import Path +from typing import Optional + +# isort: off import torch +from torch.utils import dlpack from frontend.compile import compile from frontend.utils import SetConfig +from mlc_llm.compiler_pass.blas_dispatch import BLASDispatch +from mlc_llm.compiler_pass.clean_up_tir_attrs import CleanUpTIRAttrs +from mlc_llm.compiler_pass.fuse_transpose_matmul import FuseTransposeMatmul +from mlc_llm.compiler_pass.lift_global_buffer_alloc import LiftTIRGlobalBufferAlloc + +# isort: on +import tvm +from tvm import IRModule +from tvm import dlight as dl + +from fx_translator import from_fx + + +@tvm.transform.module_pass(opt_level=0, name="DebugDump") +class _DebugDump: # pylint: disable=too-few-public-methods + """A dummy compiler pass that does nothing but logging. + Only enabled when debug_dump is not None""" + + def __init__(self, file_name: str, show_meta: bool = False): + self.file_name = file_name + self.file_path = Path("debug") + self.show_meta = show_meta + + def transform_module( + self, mod: IRModule, _ctx: tvm.transform.PassContext + ) -> IRModule: + """A dummy transformation that dumps the module to file""" + if self.file_path is not None: + # NOTE: We use debug level here to avoid spamming the console + print(f"Dumping IR to {self.file_path / self.file_name}") + with open(self.file_path / self.file_name, "w", encoding="utf-8") as f: + f.write(mod.script(show_meta=self.show_meta)) + return mod + + +def magpy_pipeline(target: tvm.target.Target): + # variable_bounds = {} + + print(f"target = {target}") + + @tvm.transform.module_pass(opt_level=0) + def _pipeline( + mod: tvm.ir.IRModule, _ctx: tvm.transform.PassContext + ) -> tvm.ir.IRModule: + seq = tvm.transform.Sequential( + [ + # Phase 0. Add additional information for compilation and remove unused Relax func + # AttachMemoryPlanAttr(), + # tvm.tir.transform.BindTarget( + # tvm.target.Target.current(allow_none=False) + # ), + _DebugDump("debug-phase0.py", show_meta=False), + # Phase 1. Passes on high-level operator graph + BLASDispatch(target), + FuseTransposeMatmul(), + _DebugDump("debug-phase1.py", show_meta=False), + # Phase 2. Lowering to TIR, inherited TVM Relax's official "zero" pipeline + tvm.relax.transform.LegalizeOps(), + tvm.relax.transform.AnnotateTIROpPattern(), + tvm.relax.transform.FoldConstant(), + tvm.relax.transform.FuseOps(), + tvm.relax.transform.FuseTIR(), + _DebugDump("debug-phase2.py", show_meta=False), + # Phase 3. Passes on TIR + tvm.relax.transform.DeadCodeElimination(), + CleanUpTIRAttrs(["op_pattern"]), + _DebugDump("debug-phase3.py", show_meta=False), + # Phase 4. Low-level Optimizations + dl.ApplyDefaultSchedule( + dl.gpu.Matmul(), + dl.gpu.GEMV(), + dl.gpu.Reduction(), + dl.gpu.GeneralReduction(), + dl.gpu.Fallback(), + ), + LiftTIRGlobalBufferAlloc(), + _DebugDump("debug-phase4.py", show_meta=False), + ] + ) + mod = seq(mod) + return mod + + return _pipeline + + +def relax_dynamo(pipeline: Optional[tvm.transform.Pass] = None): + + def _relax_backend(graph_module, example_inputs): + print("start relax backend") + target = tvm.target.Target.current() + dev = tvm.device(str(target.kind)) + # device = device_from_inputs(example_inputs) + + import torch # type: ignore[import] + + assert isinstance(graph_module, torch.fx.GraphModule) + + def to_torch_tensor(nd_tensor): + """A helper function to transfer a NDArray to torch.tensor.""" + if isinstance(nd_tensor, tvm.nd.NDArray): + return torch.from_numpy(nd_tensor.numpy()) + elif isinstance(nd_tensor, tvm.ir.Array): + return tuple(to_torch_tensor(x) for x in nd_tensor) + else: + raise ValueError(f"Unsupported type {type(nd_tensor)}") + + def to_tvm_tensor(torch_tensor: torch.Tensor): + """A helper function to transfer a torch.tensor to NDArray.""" + if not isinstance(torch_tensor, torch._subclasses.fake_tensor.FakeTensor): + return ( + tvm.nd.from_dlpack(dlpack.to_dlpack(torch_tensor)) + if torch_tensor.device.type == "cuda" + else tvm.nd.array(torch_tensor.numpy(), device=dev) + ) + # Fake Tensor + real_tensor = torch.randn(torch_tensor.shape, dtype=torch_tensor.dtype) + return tvm.nd.array(real_tensor.numpy()) + + graph_module.graph.eliminate_dead_code() + + assert len(example_inputs) + + fake_inputs = [] + if isinstance(example_inputs[0], torch._subclasses.fake_tensor.FakeTensor): + # Fake tensors + fake_inputs = example_inputs + else: + # Real tensors + for node in graph_module.graph.nodes: + if node.op != "placeholder": + continue + if "grapharg" not in node.meta: + continue + fake_tensor = node.meta["grapharg"].fake_tensor + if fake_tensor is None: + continue + fake_inputs.append(fake_tensor) + + input_info = [] + shape_vars = {} + for tensor in fake_inputs: + shape = [] + for s in tensor.shape: + if isinstance(s, torch.SymInt): + if str(s) not in shape_vars: + shape_vars[str(s)] = tvm.tir.Var(str(s), "int64") + shape.append(shape_vars[str(s)]) + else: + shape.append(s) + input_info.append((shape, tensor.dtype)) + + mod = from_fx(graph_module, input_info) + print(f"init mod = {mod}") + + # invoke optimization pipeline. + if pipeline is None: + # get default pipeline + seq = tvm.relax.get_pipeline() + elif isinstance(pipeline, str): + # lookup by name + seq = tvm.relax.get_pipeline(pipeline) + else: + seq = pipeline + + mod = mod.with_attr("target", target) + mod = seq(mod) + + ex = tvm.relax.build(mod, target=target) + + vm = tvm.relax.VirtualMachine(ex.mod, device=dev) + + def exec_tvm(*i_args): + print(f"start relax run") + args = [a.contiguous() for a in i_args if isinstance(a, torch.Tensor)] + vm_args = list() + for arg in args: + if arg.dim() != 0: + if arg.requires_grad: + arg = arg.detach() + vm_args.append(to_tvm_tensor(arg)) + outputs = vm["main"](*vm_args) + print(f"finish relax run") + return to_torch_tensor(outputs) + + print("finish relax backend") + return exec_tvm + + return _relax_backend + class Example(torch.nn.Module): @@ -23,14 +217,21 @@ def forward(self, x): print("expect:", expect_output) # set the graph compiler to inductor - with SetConfig({'backend': 'inductor'}): - compiled = compile(model) - # run the python code to compile the model. The fx graph and the guards will be printed out - output1 = compiled(x) - print("output1:", output1) - - # run the compiled model. "guard cache hit" means we find the compiled record and use it directly - output2 = compiled(x) - print("output2", output2) - assert torch.allclose(expect_output, output1) - assert torch.allclose(expect_output, output2) + # backend = "inductor" + # backend = relax_dynamo() + device = tvm.cuda() + target = tvm.target.Target.from_device(device) + print(target) + backend = relax_dynamo(magpy_pipeline(target=target)) + with target: + with SetConfig({"backend": backend}): + compiled = compile(model) + # run the python code to compile the model. The fx graph and the guards will be printed out + output1 = compiled(x) + print("output1:", output1) + + # run the compiled model. "guard cache hit" means we find the compiled record and use it directly + output2 = compiled(x) + print("output2", output2) + assert torch.allclose(expect_output, output1) + assert torch.allclose(expect_output, output2) diff --git a/timer.py b/timer.py new file mode 100644 index 000000000000..1b028c17c4bb --- /dev/null +++ b/timer.py @@ -0,0 +1,86 @@ +from time import time + + +class Timer: + def __init__(self, unit="s", color=True): + self.clear() + self.unit = unit + self.color = color + + def clear(self): + self.min = 1e9 + self.max = 0 + self.sum = 0 + self.cnt = 0 + + def start(self): + self.start_time = time() + + def end(self): + end = time() + duration = end - self.start_time + return self.convert_unit(duration) + + def log(self): + end = time() + duration = end - self.start_time + self.min = min(self.min, duration) + self.max = max(self.max, duration) + self.sum += duration + self.cnt += 1 + # print("iter {} time: {:.4f} {}".format(self.cnt, self.convert_unit(duration), self.unit)) + + def report(self, color=None, text=None): + if text is None: + text = "" + else: + text += " " + if color is None: + color = self.color + if color: + print( + "\033[31m{}{} iters, min = {:.4f} {}, max = {:.4f} {}, avg = {:.4f} {}\033[m".format( + text, + self.cnt, + self.convert_unit(self.min), + self.unit, + self.convert_unit(self.max), + self.unit, + self.convert_unit(self.sum / self.cnt), + self.unit, + ), + flush=True, + ) + else: + print( + "{}{} iters, min = {:.4f} {}, max = {:.4f} {}, avg = {:.4f} {}".format( + text, + self.cnt, + self.convert_unit(self.min), + self.unit, + self.convert_unit(self.max), + self.unit, + self.convert_unit(self.sum / self.cnt), + self.unit, + ), + flush=True, + ) + ret = { + "cnt": self.cnt, + "min": self.convert_unit(self.min), + "max": self.convert_unit(self.max), + "avg": self.convert_unit(self.sum / self.cnt), + } + self.clear() + + def convert_unit(self, t): + if self.unit == "s": + return t + elif self.unit == "ms": + return t * 1000 + elif self.unit == "us": + return t * 1e6 + elif self.unit == "ns": + return t * 1e9 + else: + raise NotImplementedError diff --git a/utils.py b/utils.py new file mode 100644 index 000000000000..4f90a7417729 --- /dev/null +++ b/utils.py @@ -0,0 +1,843 @@ +# isort: off +import ctypes +import logging +import os +import sys +import time +import traceback +from pathlib import Path +from typing import Iterable, List, Optional + +import numpy as np +import torch +import torch._inductor.compile_fx +from torch import _dynamo + +from frontend import config as sys_config +from frontend.compile import compile as sys_compile +from frontend.utils import enable_dyn_shape +from timer import Timer + +from mlc_llm.compiler_pass.blas_dispatch import BLASDispatch +from mlc_llm.compiler_pass.clean_up_tir_attrs import CleanUpTIRAttrs +from mlc_llm.compiler_pass.fuse_transpose_matmul import FuseTransposeMatmul +from mlc_llm.compiler_pass.lift_global_buffer_alloc import LiftTIRGlobalBufferAlloc + +# isort: on + +import tvm +from torch.utils import dlpack +from tvm import IRModule +from tvm import dlight as dl + +from fx_translator import from_fx + +_cudart = ctypes.CDLL("libcudart.so") + + +def profile_start(): + ret = _cudart.cudaProfilerStart() + if ret != 0: + raise Exception("cudaProfilerStart() returned %d" % ret) + + +def profile_stop(): + ret = _cudart.cudaProfilerStop() + if ret != 0: + raise Exception("cudaProfilerStop() returned %d" % ret) + + +# torch._dynamo.config.suppress_errors = True +# torch._dynamo.config.verbose=True +# # torch._dynamo.config.output_code=True +# import logging +# logging.basicConfig(level=logging.INFO) + +num_graph = 0 + + +def custom_backend(gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): + global num_graph + # logging.info("graph break!") + # print(gm.graph) + # print(dir(gm.graph)) + # for node in gm.graph.nodes: + # print(node, node.meta) + # print("example_inputs:", example_inputs) + num_graph += 1 + return gm.forward + + +def get_inductor_with_profile(timer: Timer): + import torch._inductor + + def inductor_with_profile( + gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor] + ): + start_time = time.time() + compiled = torch._inductor.compile_fx.compile_fx(gm, example_inputs) + end_time = time.time() + + def run(*args): + torch.cuda.synchronize() + timer.start() + o = compiled(*args) + torch.cuda.synchronize() + timer.log() + return o + + print(f"compile time: {end_time - start_time} s") + return run + + return inductor_with_profile + + +def onnx_backend(gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): + global num_graph + real_inputs = tuple( + [ + torch.rand(x.shape, dtype=x.dtype, layout=x.layout, device=x.device) + for x in example_inputs + ] + ) + input_names = tuple([f"input_{i}" for i in range(len(real_inputs))]) + model_path = f"tmp/onnx_graph_{num_graph}.onnx" + import onnx + import onnxruntime as ort + + def load_model(model_path): + onnx_model = onnx.load(model_path) + print(onnx.helper.printable_graph(onnx_model.graph)) + # print(onnx_model.graph.value_info) + onnx.checker.check_model(onnx_model) + print(f"{model_path}: check passed!") + onnx_model = onnx.shape_inference.infer_shapes(onnx_model) + session = ort.InferenceSession(model_path) + + inputs_name = [item.name for item in onnx_model.graph.input] + outputs_name = [item.name for item in onnx_model.graph.output] + return session, inputs_name, outputs_name + + torch.onnx.export( + gm, + real_inputs, + model_path, + verbose=True, + opset_version=12, + input_names=input_names, + training=torch.onnx.TrainingMode.TRAINING, + do_constant_folding=False, + ) + session, onnx_input_names, outputs_name = load_model(model_path) + + def fn(*args): + ort_inputs = { + onnx_input_names[i]: args[i].contiguous().cpu().detach().numpy() + for i in range(len(args)) + } + ort_outputs = session.run(outputs_name, ort_inputs) + output_gm = list(gm.forward(*args)) + output_ort = list([torch.from_numpy(item).cuda() for item in ort_outputs]) + assert_equal(output_gm, output_ort) + return output_ort + + num_graph += 1 + return fn + + +def nnf_backend(gm: torch.fx.GraphModule, example_inputs: List[torch.Tensor]): + global num_graph + num_graph += 1 + real_inputs = tuple( + [ + torch.rand(x.shape, dtype=x.dtype, layout=x.layout, device=x.device) + for x in example_inputs + ] + ) + input_names = tuple([f"input_{i}" for i in range(len(real_inputs))]) + import os + + os.makedirs("tmp", exist_ok=True) + model_name = sys_config.get_config("model_name") + model_path = f"tmp/{model_name}_onnx_graph_{num_graph}.onnx" + import onnx + from fx2onnx import to_onnx + + onnx_graph = to_onnx(gm, *real_inputs) + onnx.save(onnx_graph, model_path) + + # run with onnx + # import onnxruntime as ort + # ort_session = ort.InferenceSession(model_path) + # input_names = [inp.name for inp in ort_session.get_inputs()] + + # def run_with_onnx(*args): + # print("-- run with onnx") + # import numpy as np + # inputs = [x.cpu().numpy() for x in args] + # input_names = [inp.name for inp in ort_session.get_inputs()] + # ort_inputs = dict(zip(input_names, inputs)) + # # print("ort_inputs", ort_inputs) + # outputs = ort_session.run(None, ort_inputs) + # outputs = [torch.tensor(x).cuda() for x in outputs] + # expect_outputs = gm.forward(*args) + # for i in range(len(expect_outputs)): + # # print("evaluate", i, flush=True) + # assert_equal(expect_outputs[i], outputs[i]) + # return outputs + # return run_with_onnx + + import onnx + import onnxruntime as ort + + def load_ort_session(model_path): + onnx_model = onnx.load(model_path) + print(onnx.helper.printable_graph(onnx_model.graph)) + # print(onnx_model.graph.value_info) + onnx.checker.check_model(onnx_model) + print(f"{model_path}: check passed!") + onnx_model = onnx.shape_inference.infer_shapes(onnx_model) + session = ort.InferenceSession(model_path) + + inputs_name = [item.name for item in onnx_model.graph.input] + outputs_name = [item.name for item in onnx_model.graph.output] + return session, inputs_name, outputs_name + + # NNFUSION_ROOT = os.path.expanduser("~/frontend/nnfusion") + # os.environ["PATH"] = os.path.abspath(NNFUSION_ROOT) + ":" + os.environ["PATH"] + # sys.path.insert(1, os.path.abspath(NNFUSION_ROOT + "/src/python")) + from nnfusion.data_format import cast_pytorch_tensor + from nnfusion.executor import Executor + from nnfusion.session import build, codegen, modify_nnfusion_rt + + def build_nnfusion(onnx_model_path, codegen_flags, workdir, rt_dir): + flags_str = "-f onnx " + flags_str += " ".join( + ["-f{}={}".format(k, v) for k, v in codegen_flags.items()] + ) + # print("work dir:", workdir,) + os.system(f"rm -r {workdir}") + os.system(f"mkdir -p {workdir}") + os.system(f"cp -r tmp/bin {workdir}") + codegen(onnx_model_path, flags_str, workdir) + # os.system(f"cat {workdir}/codegen.log ") + modify_nnfusion_rt(rt_dir) + build(rt_dir) + + def load_executor(model_path: str): + assert model_path.endswith(".onnx") + workdir = os.path.abspath(model_path[:-5]) + codegen_flags = { + "autodiff": False, # add backward graph + "training_mode": False, # move weight external + "extern_result_memory": True, # move result external + "codegen_unexist_kernel": True, # generate kernel for unexist op + "product_name": "A100", + "default_device": "CUDA", + "kernel_cache_path": f'/tmp/{os.environ.get("USER")}/nnfusion/kernel_cache.db', + "biasadd_fix": True, + "check_result": True, + "conv_cnhw": True, + "max_grid_dim": 256, + "cf_level": 2, + "branch_fine_grained": False, + "branch_split": False, + "log_kerneldb_request": False, + } + rt_dir = os.path.join(workdir, "nnfusion_rt/cuda_codegen") + build_nnfusion(model_path, codegen_flags, workdir, rt_dir) + executor = Executor(rt_dir) + return executor + + executor = load_executor(model_path) + + def fn(*args): + inputs = [cast_pytorch_tensor(item) for item in args] + input_signatures = [x.pointer_type for x in inputs] + input_pointers = [x.pointer for x in inputs] + output_tensors = executor.alloc_output_buffer() + output_casted = [cast_pytorch_tensor(x) for x in output_tensors] + output_signatures = [x.pointer_type for x in output_casted] + output_pointers = [x.pointer for x in output_casted] + signatures = input_signatures + output_signatures + pointers = input_pointers + output_pointers + executor.feed_pointers(signatures, pointers) + return output_tensors + + return fn + + +def explain(compiled_func, *args, **kwargs): + if torch.__version__ >= "2.1.0": + torch._logging.set_logs(bytecode=True) + torch._dynamo.reset() + explain_output = torch._dynamo.explain(compiled_func)(*args, **kwargs) + print(explain_output) + return + ( + explanation, + out_guards, + graphs, + ops_per_graph, + break_reasons, + explanation_verbose, + ) = torch._dynamo.explain(compiled_func, *args, **kwargs) + print(explanation_verbose) + for i, (graph_guard, graph, ops, break_reason) in enumerate( + zip(out_guards, graphs, ops_per_graph, break_reasons) + ): + print("GRAPH", i) + print("++graph_guard:", len(graph_guard)) + for guard in graph_guard: + print(guard) + print("++graph:") + print(graph.print_readable(print_output=False)) + print("++ops:", len(ops)) + for op in ops: + print(op) + print("++break_reason:", break_reason.reason) + print("".join(traceback.format_list(break_reason.user_stack))) + print("finish") + + +def assert_equal(ref, out): + precision = 5e-3 + assert type(ref) == type(out), f"wrong type: expect {type(ref)}, got {type(out)}" + if isinstance(ref, torch.Tensor): + assert isinstance(out, torch.Tensor) + r = ref.cpu() + o = out.cpu() + if r.dtype == torch.bool and o.dtype == torch.int8: + o = o.bool() + all_close = torch.allclose(r, o, atol=precision, rtol=precision) + if not all_close: + close = torch.isclose(r, o, rtol=precision, atol=precision) + print("ref:", torch.masked_select(r, ~close)) + print("out:", torch.masked_select(o, ~close)) + print(torch.sum(~close)) + print("wrong answer !!!!!!!!!!!!!!!!!!!!!!!!!!") + assert False + elif isinstance(ref, Iterable): + assert isinstance(out, Iterable) + if isinstance(ref, dict): + assert len(ref) == len(out) + for k, v in ref.items(): + assert_equal(v, out[k]) + else: + for r, o in zip(ref, out): + assert_equal(r, o) + else: + assert ref == out, f"wrong answer: expect {ref}, got {out}" + + +def perf(repeat=100, sync=True, nvprof=True): + def wrapper1(func): + def wrapper(*args, **kwargs): + for _ in range(repeat): + o = func(*args, **kwargs) + if nvprof: + profile_start() + if sync: + torch.cuda.synchronize() + timer = Timer() + timer.start() + for _ in range(repeat): + o = func(*args, **kwargs) + if sync: + torch.cuda.synchronize() + timer.log() + if nvprof: + profile_stop() + timer.report() + return o + + return wrapper + + return wrapper1 + + +def perf_test_run(expect_output, f, compile_mode, repeat, args, kwargs): + for idx in range(repeat): + torch.cuda.synchronize() + o = f(*args, **kwargs) + if idx == 0: + assert_equal(expect_output, o) + torch.cuda.synchronize() + + profile_start() + timer = Timer() + + for idx in range(repeat): + torch.cuda.synchronize() + timer.start() + o = f(*args, **kwargs) + torch.cuda.synchronize() + timer.log() + print("compile_mode:", compile_mode) + timer.report() + # nsys will kill proc after profile_stop + profile_stop() + + +def perf_test_with_profile(f, graph_timer, compile_mode, repeat, args, kwargs): + torch.cuda.synchronize() + start_time = time.time() + o = f(*args, **kwargs) + torch.cuda.synchronize() + end_time = time.time() + print("first run", end_time - start_time, "s") + + for idx in range(repeat - 1): + torch.cuda.synchronize() + o = f(*args, **kwargs) + torch.cuda.synchronize() + graph_timer.clear() + timer = Timer("ms") + for idx in range(repeat): + torch.cuda.synchronize() + timer.start() + o = f(*args, **kwargs) + torch.cuda.synchronize() + timer.log() + print("compile_mode:", compile_mode) + timer.report(text="e2e") + graph_timer.report(text="graph profile") + + +def perf_test_run_cf(f, compiled, compile_mode, repeat, args_all, kwargs_all): + for idx in range(repeat): + o1 = f(*args_all[idx], **kwargs_all[idx]) + torch.cuda.synchronize() + o2 = compiled(*args_all[idx], **kwargs_all[idx]) + torch.cuda.synchronize() + assert_equal(o1, o2) + + profile_start() + timer = Timer("ms") + for idx in range(repeat): + # print("run:", idx) + torch.cuda.synchronize() + timer.start() + o = compiled(*args_all[idx], **kwargs_all[idx]) + torch.cuda.synchronize() + timer.log() + profile_stop() + print("compile_mode:", compile_mode) + timer.report() + + +def perf_test_run_bs(orignal, f, compile_mode, num_repeat, get_input_fn): + bs_list = list(range(2, 17)) + assert num_repeat % len(bs_list) == 0 + num_repeat_per_bs = num_repeat // len(bs_list) + # compile with bs=5 to avoid specialization + args, kwargs = get_input_fn(5) + o = f(*args, **kwargs) + + for i in range(num_repeat_per_bs): + for bs in bs_list: + args, kwargs = get_input_fn(bs) + if i == 0: + expect = orignal(*args, **kwargs) + torch.cuda.synchronize() + o = f(*args, **kwargs) + torch.cuda.synchronize() + if i == 0: + assert_equal(expect, o) + + profile_start() + timer = Timer() + for i in range(num_repeat_per_bs): + for bs in bs_list: + # print("run:", i, bs, flush=True) + args, kwargs = get_input_fn(bs) + torch.cuda.synchronize() + timer.start() + o = f(*args, **kwargs) + torch.cuda.synchronize() + timer.log() + profile_stop() + print("compile_mode:", compile_mode) + timer.report() + + +@tvm.transform.module_pass(opt_level=0, name="DebugDump") +class _DebugDump: # pylint: disable=too-few-public-methods + """A dummy compiler pass that does nothing but logging. + Only enabled when debug_dump is not None""" + + def __init__(self, file_name: str, show_meta: bool = False): + self.file_name = file_name + self.file_path = Path("debug") + self.show_meta = show_meta + + def transform_module( + self, mod: IRModule, _ctx: tvm.transform.PassContext + ) -> IRModule: + """A dummy transformation that dumps the module to file""" + if self.file_path is not None: + # NOTE: We use debug level here to avoid spamming the console + print(f"Dumping IR to {self.file_path / self.file_name}") + with open(self.file_path / self.file_name, "w", encoding="utf-8") as f: + f.write(mod.script(show_meta=self.show_meta)) + return mod + + +def magpy_pipeline(target: tvm.target.Target): + # variable_bounds = {} + + print(f"target = {target}") + + @tvm.transform.module_pass(opt_level=0) + def _pipeline( + mod: tvm.ir.IRModule, _ctx: tvm.transform.PassContext + ) -> tvm.ir.IRModule: + seq = tvm.transform.Sequential( + [ + # Phase 0. Add additional information for compilation and remove unused Relax func + # AttachMemoryPlanAttr(), + # tvm.tir.transform.BindTarget( + # tvm.target.Target.current(allow_none=False) + # ), + _DebugDump("debug-phase0.py", show_meta=False), + # Phase 1. Passes on high-level operator graph + BLASDispatch(target), + FuseTransposeMatmul(), + _DebugDump("debug-phase1.py", show_meta=False), + # Phase 2. Lowering to TIR, inherited TVM Relax's official "zero" pipeline + tvm.relax.transform.LegalizeOps(), + tvm.relax.transform.AnnotateTIROpPattern(), + tvm.relax.transform.FoldConstant(), + tvm.relax.transform.FuseOps(), + tvm.relax.transform.FuseTIR(), + _DebugDump("debug-phase2.py", show_meta=False), + # Phase 3. Passes on TIR + tvm.relax.transform.DeadCodeElimination(), + CleanUpTIRAttrs(["op_pattern"]), + _DebugDump("debug-phase3.py", show_meta=False), + # Phase 4. Low-level Optimizations + dl.ApplyDefaultSchedule( + dl.gpu.Matmul(), + dl.gpu.GEMV(), + dl.gpu.Reduction(), + dl.gpu.GeneralReduction(), + dl.gpu.Fallback(), + ), + LiftTIRGlobalBufferAlloc(), + _DebugDump("debug-phase4.py", show_meta=False), + ] + ) + mod = seq(mod) + return mod + + return _pipeline + + +def relax_dynamo(pipeline: Optional[tvm.transform.Pass] = None): + + def _relax_backend(graph_module, example_inputs): + print("start relax backend") + dev = tvm.cuda(0) + target = tvm.target.Target.from_device(dev) + # device = device_from_inputs(example_inputs) + + import torch # type: ignore[import] + + assert isinstance(graph_module, torch.fx.GraphModule) + + def to_torch_tensor(nd_tensor): + """A helper function to transfer a NDArray to torch.tensor.""" + if isinstance(nd_tensor, tvm.nd.NDArray): + return dlpack.from_dlpack(nd_tensor.to_dlpack()) + # return torch.from_numpy(nd_tensor.numpy()) + elif isinstance(nd_tensor, tvm.ir.Array): + return tuple(to_torch_tensor(x) for x in nd_tensor) + else: + raise ValueError(f"Unsupported type {type(nd_tensor)}") + + def to_tvm_tensor(torch_tensor: torch.Tensor): + """A helper function to transfer a torch.tensor to NDArray.""" + if not isinstance(torch_tensor, torch._subclasses.fake_tensor.FakeTensor): + return ( + tvm.nd.from_dlpack(dlpack.to_dlpack(torch_tensor)) + if torch_tensor.device.type == "cuda" + else tvm.nd.array(torch_tensor.numpy(), device=dev) + ) + # Fake Tensor + real_tensor = torch.randn(torch_tensor.shape, dtype=torch_tensor.dtype) + return tvm.nd.array(real_tensor.numpy()) + + graph_module.graph.eliminate_dead_code() + + assert len(example_inputs) + + fake_inputs = [] + if isinstance(example_inputs[0], torch._subclasses.fake_tensor.FakeTensor): + # Fake tensors + fake_inputs = example_inputs + else: + # Real tensors + for node in graph_module.graph.nodes: + if node.op != "placeholder": + continue + if "grapharg" not in node.meta: + continue + fake_tensor = node.meta["grapharg"].fake_tensor + if fake_tensor is None: + continue + fake_inputs.append(fake_tensor) + + input_info = [] + shape_vars = {} + for tensor in fake_inputs: + shape = [] + for s in tensor.shape: + if isinstance(s, torch.SymInt): + if str(s) not in shape_vars: + shape_vars[str(s)] = tvm.tir.Var(str(s), "int64") + shape.append(shape_vars[str(s)]) + else: + shape.append(s) + input_info.append((shape, tensor.dtype)) + + mod = from_fx(graph_module, input_info) + # print(f"init mod = {mod}") + + # invoke optimization pipeline. + if pipeline is None: + # get default pipeline + seq = tvm.relax.get_pipeline() + elif isinstance(pipeline, str): + # lookup by name + seq = tvm.relax.get_pipeline(pipeline) + else: + seq = pipeline + + with target: + mod = mod.with_attr("target", target) + mod = seq(mod) + + ex = tvm.relax.build(mod, target=target) + + vm = tvm.relax.VirtualMachine(ex.mod, device=dev) + + def exec_tvm(*i_args): + print(f"start relax run") + args = [a.contiguous() for a in i_args if isinstance(a, torch.Tensor)] + vm_args = list() + for arg in args: + if arg.dim() != 0: + if arg.requires_grad: + arg = arg.detach() + vm_args.append(to_tvm_tensor(arg)) + outputs = vm["main"](*vm_args) + print(f"finish relax run") + return to_torch_tensor(outputs) + + print("finish relax backend") + return exec_tvm + + return _relax_backend + + +def perf_test_run_seq_len(orignal, f, compile_mode, num_repeat, get_input_fn): + len_list = list([x * 16 for x in range(2, 17)]) + assert num_repeat % len(len_list) == 0 + num_repeat_per_bs = num_repeat // len(len_list) + # compile with bs=5 to avoid specialization + batch_size = 8 + args, kwargs = get_input_fn(batch_size, 80) + o = f(*args, **kwargs) + # print("end!!!!!!!!!!!!!!!!!") + # exit(0) + + for i in range(num_repeat_per_bs): + for seq_len in len_list: + args, kwargs = get_input_fn(batch_size, seq_len) + if i == 0: + expect = orignal(*args, **kwargs) + torch.cuda.synchronize() + o = f(*args, **kwargs) + torch.cuda.synchronize() + if i == 0: + assert_equal(expect, o) + + profile_start() + timer = Timer() + for i in range(num_repeat_per_bs): + for seq_len in len_list: + # print("run:", i, bs, flush=True) + args, kwargs = get_input_fn(batch_size, seq_len) + torch.cuda.synchronize() + timer.start() + o = f(*args, **kwargs) + torch.cuda.synchronize() + timer.log() + profile_stop() + print("compile_mode:", compile_mode) + timer.report() + + +# import torch._dynamo.config +# import logging +# torch._dynamo.config.verbose=True +# torch._dynamo.config.output_code=True + + +def perf_test( + f, + compile_mode, + args, + kwargs, + expect_output, + get_input_fn, + num_repeat, + dynamic_mode, + check, +): + # logging.basicConfig(level=logging.INFO, force=True) + if compile_mode == "trace": + # only when kwargs is empty + if len(kwargs) > 0: + raise ValueError("kwargs must be empty when compile_mode is trace") + compiled = torch.jit.trace(f, args, strict=False) + elif compile_mode == "script": + compiled = f + elif compile_mode == "dynamo": + torch._dynamo.reset() + compiled = torch.compile(f) + elif compile_mode == "dynamo-tensorrt": + import torch_tensorrt + + torch._dynamo.reset() + compiled = torch_tensorrt.dynamo.compile(f, args) + elif compile_mode == "dynamo-dynamic": + torch._dynamo.reset() + compiled = torch.compile(f, dynamic=True) + elif compile_mode == "dynamo-graph": + torch._dynamo.reset() + # explain(f, *args, **kwargs) + # torch._dynamo.reset() + compiled = torch.compile(f, backend=custom_backend) + elif compile_mode == "dynamo-onnx": + torch._dynamo.reset() + compiled = torch.compile(f, backend=onnx_backend) + elif compile_mode == "dynamo-nnf": + torch._dynamo.reset() + compiled = torch.compile(f, backend=nnf_backend) + elif compile_mode == "eager": + compiled = f + elif compile_mode == "fxtrace": + if len(kwargs) > 0: + raise ValueError("kwargs must be empty when compile_mode is fxtrace") + fx_graph = torch.fx.symbolic_trace(f) + compiled = torch._inductor.compile_fx.compile_fx(fx_graph, args) + elif compile_mode == "trace+fx": # measure trace time + fx compile time + if len(kwargs) > 0: + raise ValueError("kwargs must be empty when compile_mode is trace+fx") + fx_graph = torch.fx.symbolic_trace(f) + compiled_func = torch._inductor.compile_fx.compile_fx(fx_graph, args) + + def fn(*args): + fx_graph = torch.fx.symbolic_trace( + f + ) # intentionally retrace for time measure + return compiled_func(*args) + + compiled = fn + elif compile_mode == "sys": + sys_config.set_config("debug", False) + compiled = sys_compile(f) + elif compile_mode == "sys-profile": + graph_timer = Timer("ms") + compiler = get_inductor_with_profile(graph_timer) + sys_config.set_config("backend", compiler) + sys_config.set_config("debug", False) + # print("compiler:", compiler, flush=True) + # print("is_debug", sys_config.get_config('debug')) + compiled = sys_compile(f) + elif compile_mode == "sys-dynamic": + sys_config.set_config("debug", False) + compiled = sys_compile(f) + elif compile_mode == "sys-nnf": + sys_config.set_config("debug", False) + + def compile_fn(gm, example_inputs): + model_name = sys_config.get_config("model_name") + from fx2onnx import compile_with_nnf # type: ignore[import] + + from frontend.fx_graph import generate_real_tensors + + real_inputs = generate_real_tensors(example_inputs) + return compile_with_nnf(model_name, gm, real_inputs) + + sys_config.set_config("backend", compile_fn) + compiled = sys_compile(f) + elif compile_mode == "sys-torchscript": + sys_config.set_config("debug", False) + sys_config.set_config("backend", "script") + compiled = sys_compile(f) + elif compile_mode == "sys-tvm": + sys_config.set_config("debug", False) + sys_config.set_config("backend", "script") + compiled = sys_compile(f) + # device = tvm.cuda() + # target = tvm.target.Target.from_device(device) + # with target: + # backend = relax_dynamo(magpy_pipeline(target=target)) + + # sys_config.set_config("debug", False) + # sys_config.set_config("backend", backend) + # compiled = sys_compile(f) + else: + raise NotImplementedError + global num_graph + if compile_mode == "dynamo-graph" or compile_mode == "dynamo-onnx": + num_graph = 0 + if not check: + f = compiled + if dynamic_mode == "cf": + perf_test_run_cf(f, compiled, compile_mode, num_repeat, args, kwargs) + elif dynamic_mode == "bs": + if compile_mode == "sys-dynamic": + with enable_dyn_shape(): + perf_test_run_bs(f, compiled, compile_mode, num_repeat, get_input_fn) + else: + perf_test_run_bs(f, compiled, compile_mode, num_repeat, get_input_fn) + elif dynamic_mode == "len": + if compile_mode == "sys-dynamic": + with enable_dyn_shape(): + perf_test_run_seq_len( + f, compiled, compile_mode, num_repeat, get_input_fn + ) + else: + perf_test_run_seq_len(f, compiled, compile_mode, num_repeat, get_input_fn) + elif compile_mode == "sys-profile": + perf_test_with_profile( + compiled, graph_timer, compile_mode, num_repeat, args, kwargs + ) + else: + perf_test_run(expect_output, compiled, compile_mode, num_repeat, args, kwargs) + + if compile_mode == "dynamo-graph": + print("num_graph:", num_graph) + num_graph = 0 + + +def read_bin(s, dtype=np.float32): + with open(s + ".shape") as f: + shape = tuple((int(x) for x in f.read().strip().split(" "))) + tensor = torch.from_numpy(np.fromfile(s + ".bin", dtype=dtype)).reshape(shape) + return tensor + + +def save_bin(data, path): + data = data.clone().detach().cpu().numpy() + with open(path + ".shape", "w") as f: + f.write(" ".join(str(x) for x in data.shape)) + data.tofile(path + ".bin") + + +def script_with_log(*args, **kwargs): + print("run torch.jit.script") + return torch.jit.script(*args, **kwargs)
  • t;aLZ#0z?vg#lhI1t{lqXtrZNr3+abpo zNPl|mUFyx;7@hhYA%zrd1}j#CU_Lux&0QP--jJ=R6bkd!Ab1uHr5(>0$>+$Qm5HIN zDzN)fif|HCDWGC*va}b!M1_kroRvcF3+z^2X(32}AS_qi3nQxBR4+>hWmzj>)K$LI@DxxYJPxcy}*eb%Op&w(hK5;{u5Si=suIZjlf=Yzt?YKS!*J^ck_ zPyB8tp5#6!oXcsfYjWr@hSH>L_1bjs{(c8cJT$uSl~N~0;Z0iKySBy#no9~^wcxo+ z?QoA8#y&{)R_+aG@}p4Tk-89icHqH2=*l=)^MoPX_f{}k~r+Pr}UapTJ#h zd~pIoXbfYVS0|US6+M!*)%o|kD!&fKUPu(>Y|G3iEycHL?kX*+%F6_UaTH40$zju} z@)0#1@S;DpwxoY%{3q{Ty3qwrx z#M}PRU6`wN%K!o5fHNXSeG+Q!c@N&H*BlU|iyGjln;&x%zI`cNgj;LCV}GA8bxUYl z+2U+rV9DH~)ZNPU)PC|Pk?^G*6B=1rFJByK`Mqs*YLrUi ztz~8&*Fs}=@?hJx=W%?}f~KYdaBSEpw^q0YBnhuYSXhUeW>f{XcFGrysSvkAK6s&T zjd$X`To+Wt(W@-7Cw1~!r~Ed{8hmhAn$FhCMia6vuq@1|4i`+tJa4T)B7PSy(eYfZXH@EnNA5 z4BgxBLq3BEX~-azqslA=Y;^o+`?(jtdRteEF7B<>DxNV@`AlN4AUA2v)_x4H>ZK$! zR}!7nJ^JR{ln{qjqh)%_^gZ3^leiXHZ>p4kKJ`Z`#X&^4LNXQX+f;iZ>pKOpf$1JU z8I7)Dm_9P1$m%2A7{zj{!$MMZVOK6jMU(j$3WkVNtuv9PGMq8-3XVHxQUwez2H5Ka zfO-VN%A^4GC=eeUtV{L?TT4{l%%XvP>MVk&cFt^p;Fjq6-6Wy#*TNK272!j2+tHLl?+WVW}@e1lU2?>kaP{W%-h9@tc=6Z1==gStkg{16^Qwq zC^uSZeG1V|Qu3=tmB~uXbsO3?Gs?VLVHK!r+ijp`We5*2lf+6qCD#6oEh;H`c09@GuGqf-7*RBG0Of90g1C-?-=kZ&oc-~vzLSDS>F zTw&zTnmdrjDO(JQZ(5P#d*x#wb#H3Bw6ZmJ(-~|U3pdj&ag3ZwO;@&+p?VlhM9Qp% zLX4zgAe-Er3E=s-pqiMHd@dhtrg;VSA|T(uwJ8nFltz&?DE3tNtiykuW7B?tMm=K{51!amy$a*WN&%P3wdU-o9_%I?4?XQini014vTTP_ z`0hLF9Gh{k77Ja(Pj9he%paJNesX4yfY8IL$pEet{z>VL0?Hh_J?Q%V&@@<*71eg? ziFe7Sykz=oszH-)5U05Vh#JM(6qvz>2z+6;Dz!wxoyC#g5!sBPy|0@dI8T_CLrnNy zk*v>AHHa7znii5JJAs5G2_~(Alsnpem(kFO7ghbv4_YNl@nE7L0~2p3G--jf#Ko~* zyc44d`F^LBQ0CW#WUD>_Tr-ewp$*%Zn%+bnQ~tTIW{VO`?>1iMHo^}J4^lFeTD&Gm z*W!3>YfjyjxE1=ux2fqrI&V9uRLn@2Ubo6=vGuG}`?`zD1a!q;@w zeGfyF#vawhGD#PzM7Q!yg%eEfWEm<{bn8TlK&XX48n1q2EJ9B>g0^*xB;CA&BEj z$oV~E`$knraEik{+>vBXuUcI&(21ORoqm;j*ud=fs))@!e6uju(Y>2(Z9{n6FMfJZ z9c3R7%C}+ASMJ}Pk%HO(o3+a{5k~`zW$PCcLTu>eaw!h43QlkMpz<(0syX8;%^l%K1Vho%yR_zM00P39_UWE=@n_= zzyd6hv3>ec(mr(GSSBzbfJ|Pkpi`2ABJ<)9!SJ^DYB_Rd+uKr^qI6sdG>kHel0bSZ$ zlthBZFKL8)4XmbY0*o?MgDx&{l7bx|qbCsBq4w^JX%}aMv4u2OuPz&QTr+NwYc%#Z zq{vgpjSow{a_`7CZI6xje2C!wr6~=vxa?)lb?r}6yrP`rRsk|JYsLom2rcP@EbdQK z)8kaItLH_W3i!MVq2SZq*$qd@z2fKeh@4M+8$a(KNnyov@F*wXJ{!WZ&fv9EQ<~@u z^+VZIz1Pwp2hZhbDe=XX_Z8n>GbLl{XCY9n??h9j4GzHv-lwVTAwwl)3kgi1&VvZ> z!LufAOefcP{HduD`wT;iy)+8#CLA6$acyqsUSKYicafv; z%XVxGk5>+&Da#k%bLcp2-_=rsy|=sY$wt>n3qsUMJ6UW>SD-IK;p(TCB38>AW~vN> zReAh2V|z+R$rKu$wh_Nm+jHeNp;V7F)!|UVIT?p&Qdp4Ds2J1oaYbD)FBT89f6Sad_&FtY<{9fCuirm~hm8%eqHD`G z(PDrP9}tV`;iq6V)xvu`-9jT^Ed7eGWgS~i^V2>sf+I8Q9oReN4_Krow|4b>Azh2cX~FXBPVV~qQRpNP(;ZXQurjah7#4b~8glh{?n z>%r!7jo&kE1&e>7`!NyTL1S+RZ&sPT&qM$V@W)Tz*1q&X6DNX(W)H(R4;s?_dZUU@ zjpKxz`L*B`9va{==tx0eqZ%wg^84s;O(`4&L2hG=57+Dr|Tok!jA$6@v{59?RyjuUZb735#u^z zB%r7O?Ja4vjbV0J*ZA@5l_Kx-14NMQMU^!YO>KDB-{_uN)-DIEg|1 z;@Hr6cO;~H#ZFQsGxmg-j1&WPub84)SXOd~U6AgnqV$8{Mvz6hOe9;b6k2hpP6-r| zBO&)CpPnYo8WAcJ2xFJZL$cE7>JStUrmRctd9v_1Gs0fbJBm$B+V}nwBaX{nt=R#c z?>*Np0?5%o*gg0>eUUU_6ejuEBJO0H`W?EPDF|?O2mxg312nOr={F=c&fr_F{K2_5!klnkAVGsmLy#VGB5^Q zX85;KJ9^z6naBX@b22Sasdr>fO)rPWZe8qUo~N!^SL|_i{ozGNFtufI)exk zbE-{xp2wZ#D&a+mOg2P4%}Bc0?5OJ3=9iIqAG-n*S=;uOb9Pp!;HYWcx!By@F6eQ^ zCPqSE{(0V;3+b!P#EYKes-q-D<0*%YoufH=jaL@fu<=z46A0W|={FT<&f>eA;>0xTjLiBFcuRB-~sJ zcGAmyBT+i7AO2Z(Ih(~VC$wEOCvl@vQJi%@2+kw3MRj*01Oa9&Cwnd+6}@P@>7foE z%0T2svQ9Hx?IkkYx2no$i(Bik_C3Mc`k4`zD_bv-eERTMb)>mWMWM*PZTFvvHhV+oY<><3WEj)HPie`2Eq|w!i>I;0 z7}1o4z@s6{#L?P7`l?Q}eg+r#W&(eT=%^U_(nE-Ybc^PVFHJZ6oxxHUtyXB_J6)wW z?n}cxFm*VxO5ggSQ|+PRZ0O=1%obA_VtNN=n=)xj z!$DF_`rbN$obBReQ1v*2*-)nM3c|28+%+Wi@)l1YsLd{o=p>A;aqe7Hl37sIV2Hm2uWaW*&{5_^`Ay=kEit6}jkb4A+zVh|>p&G_7deyDSvhTVm&s`u} zy~6OEPm@6f<~9D#CWT;OV5=ofu;8`ow!*_ZwF7zJF6?&H@!{b2?_J7;gZT64$ifO! zC~iz@-HJX_-nkU%=ErW3liETG=KRexWBSjvp3e02iul%4Y$T3}2S@-s0{Uu)!T z{xC%;due6Xcwoj(yxG`{?VgQ%m5Y^e)Jiq}GK*fp=ln4003}t&ynVE2ab1CZmWE|r z$S|B8o2yJ9yHC(mbH|L%0A2J>3^{tVkQ12R(aHF+{22s|GgXhE1Nc(ID@rnze7I5C z2qI2b#PePnnop7oz%!@_!>*Fhq$FD`L7gf3>3cC_4(`4LY&D0*yO|m5R#IYj7h%2a zCCv0Eqju;!NLY2VSoIJtAHAuZ!q*1cahwxc#9n)6Ly8{t?(Q(}2+$Y6GRg9_gpV`G zhFlaEbQQmd_R7>!lBV_(@*uwGZzHZ$%^hJDLKQc4TPUf2BE;i|Tf^ePnSzoBM|{49 zsCEzKBc@5Jg`FtspNz^%K7z++$BZUkP5velmfdq`Qw?mGmkqAI*S|)VYym^Y;T)z(eZFkRjK4_0(^15CTxtm7va^7F<8e+ogkQN1-)~Rojl_dGak+< zqBQH(dVkACn{UMuj?ZBdZTHSq?TjkD%QwM;#TZEk?olj?(89==fV@J3M0B_b>^!Ao zUg2rxcOJUyud%?f=Ju4jFT}cp@ZV`dpR<5NrI8}mzWd1>X;JsTuw3@{IFu+zm$N4K zF0J>IJcR!6NgbE1>vhyt@<({(kl2?=Dy>KaM%pjsjOh~JaC?nB?i&IMi!bk`MaiNj zJup$XJQkaH<8XsN)72vV(x&%n^bT>1yLJ8B+iqVv*R8Y&%O{&O0cC!8m27|*C&%Qj zz-KKZ=>M!c291Ki$z);5Q#RewnyVf?Nx#v0&Bqt}o5fCvKlO0Q6FCM(Ir{ivz0 zKbPk;=>{`b$6m=PLGl%}5FahBje0v~4wLYWMFZHm2x7dAxHA@!`eVJi4ckQVwUa2} zCu)7W;ZBseH5{Oa9cNqo$ml$%WCcr=Jgkp6vB(5~gL0njxIecAVL0Zdw%wUj@YV}8 z2V%8|&~vUQvBFCGzC|WX*xG`lk#6X% zZf`^0zfqEC*mdesy&9~+rcV#Yeki-Tl!e49BC~6KK>>w;ZoC;gn2RBbaa3y^(T8sm z3JeGVFBVRILcj)cm>xggGiyYgvU+%bK&`3dfbrOXrkfYJjVw;ivX$^zn^DE6G3&hG zJ8DdAm_F0YNWrj)n|FxO=~5w_qN_{pOX=KU>US+M+n;^V@TnEyZvx9ZKJsn<7RJtDsx zT~L-ZS|!R2*zf}A{RH4-D=vfZwZk8P1)KX@lOr0BXc1aR<65`;5FDDe$ytH7l*YIz zGaT}oT@I^T!1bp@*_3zebe;N_m;Xe!*qfL4PP9f=lQBsQ63D@qTx%7NGS&c0lnlfq zx(F))@&{F)<$b+-P}5(wFs()?BO>R;-RTnM+jumB@+;O+O7aozy<09ENMOUcdi~>V z?In38se=ibpXen#v+@Xf3tn}V;^3RVDHu8>u#~<#HF<;WYB7DL8ScU0e7rwL)K+6- z4?MZ=4(K}(v8R3C7wYclkN7NCTLpy#5^^t$ClPVCt=r*qM7 z1REP={*aJ;j7V%b7Pbf&k1AOPsrfl82RH`YaWdB!>Go3C_0ekT8Z&5!O9LpY5f-D z;!N%2(}X-_;*8b{?IjZ^m&l23N8ro7D3R=N68c;cIX;Wn*r+U?kfQu}Z&iaVeDYM} z{60Pv=^yf&cP~DrvFgSZOwI%5e2ukeHqV*DXOa(WU1WzH;=~q@5btS`R5U?d+lDDt zFS&%fAp4UK-o03!A9RycSli2i|i22AcwFW7Zn?IF3GgW5Rs4lOEE&WlzGPjkyuEmi$?zp- zL3ha^{cc$epeFtR|&mJX++z0nu5}xr-Yso86L_x~;HRD{+ zM3bAuuUleE&ug=hhek6EX}fw0NltSW5ocI6KZddBODUC`3>>gTiiQSZdRM%P(Urs4 zv|Vi!pfDuw7yLqFigc@+R5q0*rj?9?onJRKt2(EjE&R!SyND4`)KqleDaO?%alC9l;J0j;lpj*bv4TmlmW=r>XdlKWapcj&<_DrCTOBsTw z&G+`f-y2}S!JfWbEuxZ^F&C)g)Jtdpa_0p7RT6a7WXToo3!FwfoTrCaz^!h4pL+s+ z4TN~?ieW-10c`uc`C`G9yHrm~Sm{ojc1~6hv(hn@e#t7UaQS^TT6eisw1w}Ak<+1N zZ%0aqCU%VFCZEULJuV4lwKL=#vy4!cRS8seV`Z`GyvOPZQ5H6MhD=Moq+S`~R|#id zoDl(Ry1`{JXXVRaQglP}%G>uv+IAXoKXu2qy{86sZ1gXrHE4 zzdt{0kDog{f52PEWGtlL@sJ+pmY^Z{Yof#wI({^<@gQ7SirRZV)n6UxXZWK?K8xAG z8Nt6>DC~905_5&TkS~3yEb<0dsun3uLrWT1M}AgPOmtw=)+v~d>`jHxJ(ox9VJw^o zay+LT*{>VaH255~4zwtr=bW8t(vUX(>{`HGa)k#XI`+jZuW?#op|f7W)9S-7U1^pP znqjAB;b_wpF~A?UBqh>IS!AHOy-SY#;@Z#oW_PhD^jj(e4$qq^I!K2v;mxZC&oAU0 zM@veHv5C=K_|@DA>kh!EnMlc{orF11O(5&)#nc^cPiuylY8*#X;S$ukl>}rQ^p%A1 z0|9IBou}NWNPAV(U+Oz&MYbie;Od#2aWDeWUbIVF1|3aUga`ojz=?Pa-&wmD`*v7VK|xC8|0vHaYV z)Apn->M2KfSk!`_K$S3E65`b$feHF9S{@g+Dvkf3n9gw8rX%j5gbsylyiqo%X^woI zU(#Bsk+kDJ_s!b{V@k&Gy}Vv^omyCm#{J98D&u(ogGSQ%BWiCq3_+a_9g@VRhTq;6 zB6%aHW|;>yag!s>yqd;Y@gW_%+@2cfm`DKfW%siZ14$2$p>K|lG! z(=zyE!xCG6DYAqJw|E){%1(6Fp(^^?$MA4eWO?+4>t!0!GYiDGF_I00GyV{vlJJsYkaPwMH_;B|k z6mtFOmc_X=CI7AIXa^@#e7I9wli?=GUdW~RsdCrGr-kJm79w6O6F5uTppod0EXc^( z*cQfeCv7P}*)_~E6zKUSl8KAhCGYoQ9RXtmb{8R+iynpJyR5CCr4@wD5180uz2kQ` zXrD=I0;=Ko?`H``B9%E2!uR&nwH-+R_0MSBeC25>xjh%cc=u##4pTXrpi6oMXHtjmGDwT$wx>95z1X0!o%k;+>-A zmZHK*U2;dQ1v*N61sVL;iFR9mt5Zev-D$G~gT)WTNZLUu?YBHH8R8L!B_Z;&wV*L7 zgru?v>?}ND9yVBl?qab|T<0q&1cwtxnjo@_8y4=CLlvqlAbPFnlTi+|Uv789-UMGf zwSN!nVn;d$6|=mHBI#O%F(xtWqr8^03dv8yOz~!xlo~Cx=>r`s5!MM?wGoMQQg$Gs@!G3;;kz#%GP=Ex`Qlgo-b2;qoa@pne?3jFy}p5?AS3j$<#8*cy!m)l!4|)A z04qrDUEqX!az^weiO^eKKwn2Mvg3+Jk18dJR@*IT@z|SG>*sh_ico=vk0IUdsOjZR zCDl(FYgkXB1=cmZjHnf{k=d=d;N!cWm^SD{`B4BuK)t^ulVu|?%4+lErC#(LN>KN9 zwvHzCF%aE3S)owJqe6~EsjJ6c-(W(9a}QG+fAtpFX$VUVq)F36nf9sOZ)vn~qm*u4Cn%lu{ei^q9kj`(u- z&smdtcd|bbZ_>K$u`OM-C7VbeD?#41)7np2fX55Ms%+&4NpL7~YGc<@e4v?c9rWqt z<4V7nleQj))Wcc3&5N?y%j=i$Rvnhfd!3q0U$nPoOBbE&BeOPt(LU5p&&ZA;qn@25xUXUi$#Q9<(|b@mI2f1vDFnrh0|u1eyTsMiKz<~05;yMooKEQHpf3+0#W zQ#hu!jjsIB?cHiZ~+wpmeBKSGXm~}~bKaTvZgnEByEg&(1Ufa+2JwjIY0J<1_9{kA-A9#G^Yy3FB zuLP2pEFOL2y*U3lNu@axYn}r7)IpE!d%BAkqRG88{8a`BqJrh)P0uPduvlkQT&J?S zO?2Q;ghhHLW|pOp-A?72Ty8JULc;G9z>ql7P~5BtTPD+jzw_I3hBW0JJ*teewrSAO z`lgr7U#W{kwvhmu0;=*PDwikUs@2IxVLWB8mp8l_WMK-$kW*D?`Mk9s!=;uTS`i^q z4!IvHj;AAw`UkkcJw+_h5a%{ zo-jyY$ZTSiq4A!P1||y0BXq(vgH{FNI=u<#@LERf&j3Wo7Cg77BM3h_ydzqc19ua+ zgm0K%8B&(qz8&p(u9t`H`h|tf)ZSW%N#igJQ3*z z!*ap+pmVcGKQfp`3SiFNZN);vM>sUpcae1lkKQ!K*cmPRGWU&Dez{dX#neciob~bV zV=SJQN25XrYNn#Y`yGCpKU-kpDvKKb)xj;ESuvbdc*d2Kr;09u@IDNA$ROn)wCXjT zX0+W7BJI=E6fYnfI=V)wl))lu`Kwq+^8@i>-pE$3)w8Ee04n4?Deun`^+h@M_^1Yq zq~yscvGIy7-vfU(JAbP_S`2=0iHSs}6J#RJDU`}ijZSpF zh>B}0JG`KcUrmClirxIJV`p=AQbXaujW_Ghk?AJSpb|Y(e&yYW(p$rI)6X*!wvW6p z0#yQ%45Q)EYi)1r8MNTotp?B2d=bqN$@pTfG#jy&y;v8~l1PXb$9$4ubSqYGXCY%R z?n06by8I!#xgj0*v358p%!Vj*zuS7D$M}7gBGLU8jPtC2V!@}ijN|Mb4?lI~zIJ<= z^gddapf%>~cY`3s=L>!w4M_6sJ@J%$I&F)vGgYT)h-4%+TZ@Qgs&gBr00Y?{Da9Xfm!IbZ znipkQ7ba>pD8qZ>4?9Vo!hOHIsdJQ2Z5K5dOre)@24aZ#3FO^>66}i!71*$NNz@A) z%@DFdmbfd|qb8Ty3Y~BvJz}48YCxzer&sezHs$O6Sewho)@J9mc5zGXvVh8ZzU?mz zEWV>eod2wR&tL9z(WZR{|F6^YDmn)G-z%MLxvjI1Q~CX^tma3I!0%vGhJUHFz*dSy&WBrUc`{S}g*#JZ)2kn@;0a#CG_aS&A7c|(kMjQGI894eO}LW67ZeUldd z`JOhBV`^5sOkd(#lZ&Q}9e&=97n4}9Y?g_uwX{mUBHl=a2Q;}8z0V9a)>kU1c}>ey zs8F40C8Uk>s=9%;mLf)Sg$Z~e^KeC3M9;Ajl6+YsN+P0kxSwN@kci~GO9&rV zQtbQ51dB`k|D=z1zCO!XtdUVY5nCXo=?(YOa&Vwt@b%gQ#yme%O3QtCKuF88gY`UK5*AACu7wyXo zaIE%v<{Y12SzI_<+uYt!WL?V~JYHbaX4Hx5d^&XcDNv3^;Bwo>mvS|vndg}L@?rP< z=09BnD{3WAHMO?i&Q>FxN2Vl$}Si6lEz;*syx|0l=oU;g&%*7wcEIV>l#E{2fa9 zjA_AF>(x3nB)@8x)*i2i9;MHHPQBXapC>FTFQAk@+2o0LC{#ZG;Qu{nLY zfw10TrRbsNBM>o6xv<5Z@@|9FhCZHc zwqyF?+3o%63D%zh?t$t}6}l=g#adk`h0%I^oW_9N60US3WF=V7WJ(s7`{o{t+$LW# z$%0AbMoYxaOZq&}_xbK)p&tf3*Z(Cc=1pCJ&S#mFy|Xy8rN)!r(Y*&^C*2NnQ%(Df zyIOIZ9Xhqt4%S9&C${gaCG}*S)v&=|3@3S5_$D0ICIIVcD>yRtq*E9YS>#7U7w@WT z)~c$%=;5P)XC?qgV3P(`y4pWcpq3s1WyXbss&&=c?c#B%JC?3L${5hzb8P-#;PLL% zWV2s&L$~WEHpPf`W&PlEMI~)48W5EXVI^%~11mi8wC+E>;L;y-!$on*EWZ?NM5wBs z+M(W&@#<+<3a~Ug4}z{7aF@OY7e#sdHpuqEjA{jXV$3 zLu5yrM|Vl>qItJz5U*B@Z{~v3>O;PmaoKv!jpGYajk8l- z`W=lliI|YiD|02DDkf4z+ISPBr3B_>{@yCxX&9ff`#5=g28>4Q4Ptim3Kub-Uz;dY zWdzK)E@0kLG+c=;*dw&S{Ny`1RSPB?H)EX69VMin{vikSkc)3OT|-7S|G_ybk1SY!(G6 zp?U2g0hKX?0$>hKClA6-cl5dv;mYIx_nP`|5SnTy#~beGI_Ia6Qj0JSet5@7A3KJ8 zc*!lLZ#N`OyfnP51pc$5C3B3@A(|?T@+eet<|7Wh%D5sofa1G>4KxAVjh+LcSq)3_ zNJNnKV1&X8K$lf%F!(n<}q_ z&TpgkRcyj2J(j40wHu|9u%o3jG1#wg?XsRw`QvV}V*_(;{!JI`TZwV~<%zjiuMmb; z;vC%t=Twz!jvU3Qdq$CiN)>xsS77!jLzw1D_A`maT`IiV&F6wbh#jtgPj6n^JqSPR z*OPrtGLW-ZS_S#Sesj~sW3ro?RQ7C!Fb{*j;PcFV0@A3w{vr{PkJD%t4^Cumx{s$F zfLzyS&3Mx?NexLKbvsCT_iC4LF4gaPd?Jg{@S&0CdX6e3yvli!GEO(*xc5@wEM1(p z?oY~rht}{wh5>oB0cvw1ct-?tSiE!z0_VEK$)ZjM!cjblSUprIaG_skXLBuQ797ez z_F!CsR{b^yvqQad;L*1x_Qc0`#?gd86Z0--89S-*YIIt^%eZ~vAQ)&>Yeh}qQ`dbf zg4+h=3Fj5k_niOoQ(9bOFQ!bwLlE{*l+A?tM4*$9M! zonA@1>>Xi;FXM&_&tS%8Sa`hO>j>)nSkMLK!P8czJIfwTUOe_N?`R?B5F<=o0K#1r z1tIgWVy<-ePz1EL;Fa@-;&&MzD_!R+v9boF@MmwerJEcOE+V#W`C!T~g8c1d+7KNt zAH3u%D=#4*2Q-X4i=ya`#urz-B8(WM+l{s!( zE@`k~b7Bt~3}LcBMULx&_4ZMl9nd}33E=3e%&(7k4iG;@l$0%#?>VXT@eQ-!1y*wL zUgsrF70V)nsChp+NKr!q)Phx2yLC!`GpvsE+`Wwu_)Jtg0&E84F=IQuBDrYlFPkX# zqn6#3#LH?-cfTe~lFYwq()8AF8ZaAi64LcuHl1*skJ~J0;k)zLPV_}csdZwp=fx!$ zOJ%q7UH-(T5V!vj5L>zqKlE;tu0EZgH2+A{kuYoB4r6#- ziyXDm98!u6E^6g$khsJ`goacE#bB{6V*N{QBNpnpZ!R;vXdwh2owyz&8A9a*KY~I? zsgsbaqyF3=X~rPEg-U23eO0RBBOQ%F(@SkU2`Ak2&Imv$JDDSU=xu@NP7`3*s48^% zI^dXnnOYH-yuf(gs%IieufNVY7UAQ@E`uW^S8`4F_*{oIfrEm4YEW1`)e*4U=FYl0 zYVtmIz{R~K>xmy!7`(6jK1iJdG|gO0KR)E9Wc-mL@!ro1iUpZXu1RhArg~} z1n;Kall(HbrLEEGrFWdSnWc4X3cYw2$v=WGH$=_~UZNCDB8()9t_dNr6H4qjy4zvs z&@CeU#xN|82X{s{rdaQAGS`YQAM~x}?sRic2>C|+$Y`777~*N1d8fCB_!3h^3G8E+N|rc*D#H2D)|3%x)V z=5lSuT}MK z>T59aaOIu@7_iS$3DanJKRb<3cBD+QbF|55LbmQawaFSjz|Od{^`OEwb0W}myl6W% zVh7{8PcBg^jPgfV;7dFy&{svreP;`4IdB{07Vr%Fqt)XouyX^gJa4a`LXI2!N&~3FFjpLM#7K$Nq#1vyA4MN;`t2 zcu8kX?Z{xI+tVB?+|!U4Kr{*!hXocW==zeVUlIiTi&5q))m^=*RDvs&rTq*)&^pyx z$;wQcqNu)NujBJUd#%|(lvhl%1v1JaM}kJ)fCNoId(0kC@*~v+4`P~`N;yi$9Q596TSP!~u~L4`&q zzt}+%irSK#`c=#n0*JQtB8yy#i(ij!tm9=;5&s1zzd_vW2sygmtYy5I^ISHS(o9do zeH@2)>_R`wS}Lj>+`5t40;rZ|*FdWIDCRmFQXOs}?EIJnsYPBEQKT=E255bh*Bd;{ z&0ud;G;LJRjPc7&^O&=KDZLDF&Ct+OI+hG_&B4g=@75-vrq~M#R0ZQQDtLAef=zuH z?|kGE-O)0qp2NEHuAPMHP!gzfr!QkZ1A(I~8RNx80&oQ8mH$jP4c#_5SUT@o7;}Lu zu?y-0XBb{F;mY+s4FC;0izE(|E4ulmG_X$f-4%F(CQtu7Z4h~hP^o7bo$P6wnp+g- zm~sJ3zzoPb+2tYD2qc-h*IivME?HKz@~))JHU+5AX6Vbj>x@!oO_aVfB!vruHDMj; z44qR=F=9Oh^!9qvPS~xgwFp130HdbKksx**!eJBdE1EHd z^iv@7P&~zQr*;ePtFuOIk;+hVb>g_&g0($74WaRB#cgL^i&qtNpgnK_W_IjVQLmx6nVJg_5sTDa1c%KuykI!?oS>TOwN{M!2Bfzec8p2_Wc<+|$xKE6z^r)$N~ z5=F4L+ikkYAyk6uJm{QWXXkFZ%t=FJaa`=y)C9yfu@T(~&z)bNp!hhphTU~5)!`tPCbH2qU zdDD7xU$Z4t!{7BgS~HLDUUF5N0&hY^P7$-73s=bKg3=|_mJooQPQ(cCA{|b~cfVT0 zvz$AYGh9y4*S#L6srxwr72VjCB|NeZ;gngJlDvfqmsIA zA4pg6_8Ud_KT3GY;B11e!!eC0?jdtk=yC^|z_{YSNucW{JQuVLc^o~T3CIXA&?Xt3 zonXDJcfAmrXpW)vL}+MMmarj0h8MlgNsLzo!nSsY!`;Q+?*H(4Fq;d0l2r&sOqp_D zy-g^EF&ocaH6c!xFJ9z;<+<Uy^0l*QDIfqfuQ(w9>yksvEI8)$&6vQ6{$=3+Jjh|&m-b|$WP5@&bv%I??NxM0eJDrj9Yu`r`x?AHA6%({uH=i@7%mc}QMg5p3>4A&sjI_N zQ%ivpXY@r#(k_QdEu5ZEm(I}l>Ub=?oPSpvlWi}8L;dJcXz9wPRFa3csaMysCkU>4eA1IADjp}`;*yV7 zEcJJg%PL&Vx^w_JGjU~mRBi{l!&y_qP=s_e8Bo!G|I}gK<}tm?meR}GY8GV}hyN}- z+60yT=XJPb0RWx-vx6~+g?4U}2B$p%KEe;^)(Q15H{mo8W9G6avJ@6Wy`olT2M!i| zJnT-5pkJU{9X1oXfo#%XFJ|MKf`N zxt4jZ%)ZPg@YNd?*-IdqMW)nCqzF2e-Ux?uJwGVY09X8IrhHgHcx^MpHriFIphp`u zNa^D}aQYeN8dzC^f_8s58)6I#0;U)y50}KN+6(W4J(H-6eS%V+C*~r^ui?ujio?$M z$&bMmG^uVii%qOPVq)AD6{xqIDrebq@iV>kk*GHR9yKD_bPc>qLdutpKK&&T0wsRTv)Kg?04z)(^`qpW%90Ta-QWmH+-ndU^28D zR3q710Uuk>tiMY95``XN23WbbQAn5}UU*;Z>*Y zs~FA-ra927K^`*Yb=X;H<;T!FasK0_#U~6>w}8~UQ4xAh54WwVM-??xW%A0G>+1?7PHZ7jg+ecs^E)bqzRvF zOy%sUi8&$*M{?`vj_x4w)V3Biy>0|wY;^s)xJV%<8z^u#lJz-qx8R21N|p%etRw{C zcX@Lq)1vIx^-?Pb*{n~!qd}hTg3fl~=jb09uH%^)wyTG{1tHns+CIM9`LTD%$TgPp z&N0EbezjQYB@tiJWSbTKMS$a*aPYF$r4%>rvU$A)$sTg5vq{3IB|7Q>G~aP^Hs3sG zjmabd`SC$aN;lvd#XC(JLU1o5p{s;7uS^GGuFn7;WP-Snfwwl1DowJU^mf-kxg@f! zuVq^W(D-K`8lRk8VxCjBNvNWEH>+`&E5P8xDTj-1b|S@Bb0?ft?d97sPo=9SW_V9w zh_3P>*))(d=3SlJ(+JDL;jbq>Tp;p>Z$G=>ds1`#z(P~rz^Mf`48VAB(r9C`Y1x@y zO_n^P-x}xfCG;=$F&po9U@yviCO~gQk)E#MY=BE~ID|R%zC^0n#oJ5pBaI~7JBYeJ z%b&SSi7&?UtN@>8(=t2`Q zBWg@AWfuS}Fp(M)hn~B^tLz<%CQekdw^o@~$IK8%8(yB}(kA>C6VO^AK_`jWZWRBQ z(?B^P{icqO)qK_dq}UU79TU~Znp3U@9Fa>C>46sR)d8@h5Q&unAISaE@z+u#ndv&cz4 zJY8T23U;M%kp`dUE}#GO1w61$w}Y`A*`pk=lE!bm2mGPxtq)vt;M!S?JibGR!I9p+ zTb0uNP7C9`QA*zxmB)d;d~adGpcD~e#^I2LH==*guY9bFBcO$r4PF-7W+a|2U6%T~ zgqR!I!R{NwpIKa2%w24mpDA2h!;(muSt`I#xls#bUa?EpWqR4WePSuw@ zT*1PoKo&8cIddn_$ zn1KMNwUL$)hUWt+%Dsna&+J;K;KU5a~^VZYmJq6xNs5{ll>=*K2Ai zkr+dG{Et#=ef4Sn-$-s>NOP(3*@cI?wWd_Tk*ZCtN)$dI6fA|tGt*p4ig+7E{{Tsv zxCf;z*vL$dmd^pwl=35>MlyedI<9Jf_F=kAvs)I_=}78)Du+ib_fv?(N>Tu?z|TJY zDoXNC5u%!$1?9EB0m_GDAnR8lYwr*xo6fb{Vt?Gd!1V8NJdP)dHsMv-dA){)Wn&sa zOPl6E@V*sVGE zCNW}voy}~lf7CH0R_T(qE^zDctP3{gY*4j)XaO^v#a7KM?mvgP07(ao#O)2I<*LzP z8VJ3CAg^$%e7XRlq!3%!czKv0{(Xuhiu>8x2uFqtB*Npw>*L=-0ZHtQX}`FcXuqfF z6Tx(bBVt01V@wsZ_P_jY5IXK+S_6oGuv6iAQIUlHr3vXBJ zy3%qflT@Z2W!_*6R-6$^EP`?2f)Akt2kwx>K&~?-;Lb_ zda2*-7@&5NV?oA;z@KVIT!7(Qt8E@wv`SdtPgadRax=h#-V%_o6jc$SA2 zD9Zsz;t!+H%e#uDFlvjW_iND1X<^lGf=-HyjtRC4f-t^ddPpy*kD0{g8Cl(T&3LPk z(A6e60Aoo}xb$CIcGe^SpiyDwkf-ZwZSTK(U4}V{7O?dlST9eP zx=N?d3t@-*2j{wlvN}0=ZReCv6tC!hK>vW9niZe%MF$vo+EKwv@F$LR14AJXQkX^Ssh{FSV2zg7b9!c?T9-`Wd zti=9foL1|3r_F+%P+D9BOWJSVu5aW6s0kqH19)UtIet#khkw1M;qbKbbZ@@;oW1Q{ zD$Us>l_0Q&s9^B%qZ5lIhu~sUZJEKWMDv)5VqLTD2VY+E!=F$pahBLSZe!!#_21eH z)*}`f0vs6N$@_e8y)y^Uzy`n+9`oxpQ(HqntpXy;EBYg)2 z!Mp54CAnSh7Vz{URa_=AyjwNiN{1JMv424L=~jgDceiln2r59vW-=Ij)-tA9qY@GC zUn`F8>;a7EJrYwVa6P7`A2ch2K|KH$+||SCA3ruR1UM347^R1YiI5DDJR$!n`8RNA ziSyE76N2vIcEt@tgx+F%0yF$t6vjbtyg>abH&eLlQL|JA-HE}E(D-#-L|WJRW>fL4 zWx~SJf9)$37z>AEWr|bo#S6c=RI$)L81W_lClTYZCOV`Up%58}nouueMU#CA+Ttzn z6KW^EYp3sz;s4aHYHP`F>lpCvuwCQnnc1gu0AkUkTtdCo*WyOz8JO~9<~e6}F3?GQ zv{gC|DAyKZKC}76TGYwJ#XN0DruMY$mo^5OPMm8%O{ypA1gffzBx7Y9HqKBTamVQ6jCN1nRHARP?w{SNob!e>J07 zIDJ3(1h!a~z|~@*r+i#4bBtI@tZSN<@wFA*~NB*r;4y8E5k7u#l=Yu3BaS z8tDK&e(g3pdVOLy;<@p;6J!N8+t#Zs8p0dkC+4IRs{@{zm4}nY*9DAl^k`}RV_n29 zpsxH^4%paRG;?0utV(&f@inZh@AL%u$cUkI(d7g2R3(I0?D`^9U_*kup0BVGph${> z30(O8+}$)AwzU;$Kv9i+Xuft45463Bx~IS-_8i`w5k&GIU&x zhYV+n0?0~E>NK8Q6X*X%VK0q?2#X(yz8Rw48nWKkzCji$)I0YwYGZJ{IOUHGDkKo1 z=a4#9Bdb0s!$&k5+#Prl*MM-PPI>&01vzl6Tu>y5QL=4U7qanjtdWyr4n`{x9=neK zl<(H2@smzHkAj~j(LI4O&X8vi=L}ECq*w|7$W8Fls}?#cF3=#LG66s337lAc8#rMW zvgd>dflNzi)0n4BuR!JaCBDweMg(#Zb4JoUremzHCevd$T%5_BpjtW(!4X;WPfX`` zzOww;P;nhLBfN~1b2LptrWo@Ir9+m9o3BHsLyp%Bfxay7&6Q92fImvUC2mK0+|35z zR$7B4`A%xY;bBCrs(qt|&|1pAw^WxkT$-%8V#dT7q?ehL3^$R-4)S-dy1RQV{)LyH z9=kGcS^1!372JygL3(rQyXtGi(CWkwE)dJAM z+wIP^Zekotpm%(O;f|(p-`bQM9h;cK+q|y)@uMd7n~&Os9?einG#Um=AVmpop&=Na z%b9;qzdy92w)OD2Efiq@n!Ef`)&!whn_*$_Yv+i6O#D~b=wQ}Ygw!JxZQh5b$x+kN z9KbVzhlL*ri?GLO>oSqrj?E2pIQUcVl$MoV6l{l?yO*5@o|Q(=Vu;g?cXWC$6l!N zn2z#)ajPV_m{soE{g0 z1=RRXxM6TsGL)%tRnEmPQ?AlpqJtrJCDdtgb8-OQ-+pH*K)&TsC;0!c?243N?58|F zz*#OTJObl<*aFKIotUi|@qJ`NKNp3~x0XeGn;-Q-&O%f={DPx`fd0dTiuM(X-f`^) zq}s5sR3tadSomk>0P$$$Ph#!GrdLrHkh%f*1{v^GQ@(x&jwYyVr-H2gF?g@Gdz%xp z79;qTHmCM9k3*Q!ZSIatdDxDk?yoTMvyh*aJTnFQuHt}cfsi@z&PmYfYH2!$kT|H` zW9+w{QdFPLYF4GBkjq}Q0zjaEMZFdj=Dy9|?#@p$0(}H(-#3@JF-3jncXMK}l=HC+ zbQ|%v@?k8bf*s``Pfn#?JSs#{CAS-prfN}J%ni4^+TFkI5OmBA#H}n2Isc{1Y4I*H zFcbHH?B~KP&eZWHoxRRxs<+j2Mix`as5YW{Oo0-}n3`*uwXcZl zQ1l%fsW&o|Y7?MxqXAc^d*IxlqXZB^wsW#&Vmu}oEzqD`@JBDN`KCY5{uyuxX=1I3 zBl(F33cW;As%Ck716%QoAJtxg^He6zTu?IXxDZ6)j{%ZYos_` z-q%I%l-}e4wS5iQqZ%)(Dgu!x_h3`%Bj>A8tOyEJ4&g99x~eAEUd|N>MWh|vQ{vt$ zCxQ{UhwH#i{x(G!xy=8yL_S+zj&l{D*HP(v-NFuzMhCbV$G@tDK>}NLhw$EJ7}^0P z#NX~Y>gxdRCL8PP^o?Ax5{3l!#rq!Dfe4Y7(14uz?GEUzp)&YBCq0LZ%ag zGo?m20f?vMp^g#%@fH~sj5T7A_VkLGfZ*h=2qLXX?0XkNSaigEu@6(Jl-~UAcr%bB zHgk&H4)(PLO0l6HLmHEvZNyEkhaGMlTR}u|cR3YRuFcn8NdhgUpyj<*E_JCjYfclo zXO}&)AFIbD6|p(^X&E?Aj$`3|DzC(4hG9J$De{I4$4;gI*ri>ICvv+TE(HP)G7Ee{ zNcG8OGG$Y-9U)HtGYTPFPZDQ*3Q5JY^T7+S!{VJ@Zx zvd}Z5w>C@0#lQ9tiEzlGQyPjTpAk+=G5K8NWX%e-_DMS|0bbDCA&hkXcZ?k8yiy*E zuBv{4H4>`jT4aP@i&uW&Ie!1i+M;mb(pXPY6N&QW`Zk;O-sV$f8F?xarSMRjDw>W% zY@BCBvs)o-Slf({Y%ab1ijwiOr+)N?*@0l{!>{PnLj(TxWW@bpIvW% z+Aa2o6yntdf zq7Rj&5MVFRz6_!MHAu4_XBCJ&llU|r(8FM64>wYf<`HG)#HtV8uz_l0v5-M*=U~M{ zvRT>j)NQt6s0Ps~t1^uBS6kDbu}pUk1b`TxY4)?4GOI>U_6Th?%ozE=D6xrn9&Mu@ zB0*yoG187BLr4YZSt<>OVI7ht-m{FGyvLAM&7wk`5)Y3LNFf|X*4(|YSDG@9#7l9z zV8#}r8*G0?JHrF>y%_zQ$heQJ1k}c9R(k=#L@j7@Acm-&K(!btsX}0e(McZZ0NM~8 zfMNy5X!qI|X8-u)F7605aq^+*QqqRmO{i9p`VnfaFmQ_y%7XV3APGG-i?MCi^w*f*ukd(EP)p#9j9^J3`JBEk?nAHJhY@rrjX&xKaR01z!2H@I%V-nongW``c zEGMiAaHHG0*}dP@@!at}Rd=^j`MIPM9^;ihY=RXRj4@Quy-H-5k9v@@z{4-q>ZF$9{6gb;xn_KcB& z8`c2H_cjjD^mPPyR-VweAnAU`!<1D6a>M)JnMWzG;6!LVEWq*bTMc3 z1?G{eVHHJA2YU?)OU=;1Xf;Y5Bj)D^Yx`rJhGz6v5(L(~=eIbnM2HcsUgya*hvJd+e$tQ*FL0AWQ%kRQgSDQ_)d{pXr zEb{!??$fa_YObN}Y@{K2kb%XF_(nSj_v9sT$Ux26$p9%F*`o=Q_ivq0z#88h<>mmU{_>&Gnw_v>&e(Mkszc7{1$HRv&}<~<88F#6_Djzq2Q>e zK6q8dIO2jHFkIbsR{!Lm^(F_vY6lGegMZl(&x^N(fK6xujD}hJoNP-AgT1<99@8?F zDSf^htHji%S)yoTLt7Hm28(F|G(%Wzv3(Ouk6~LVY`rj{_vey5+&!-8VBPt))XE}m|h6ZaCM$_Jg+J-|RZLQ%+6OgZ*j%*_QKI>@|^yC^1 zfQv}rdl;+KxOII!s6V~!AI4)jIgYJErndk=tNfGK*#|xmaN#FwO8q(encUfxQ*_um%~a}s79{mQ29Xmd0}qR2EEw^qFX!;g?CTq-$xa)zY*L2} z&?0I+8yaRm8O-5**2A5JOxTU)fze$6$YaI+-bKH)!MB{YkU|AC2PVUR*^lay+9_M8 z#Ba%Lku@7-wlAh+B{&UY+hARqQh_dmRh;4;H+Tf<0`%!?dmfqi*?(_|bJu$Z)dg<> z=8MP5Aph~3!fk@EGyKnD6Q5rQ1;;k2eM1-Ra4I{XEow1GQX&Hbh2fE|LE-L-awUza z81G^ga9zKmHD}~Qy$j_^rX59|t+iW~tt1SJ_!1UYAd5wO4J=3zBR-7nDl6P&VRHfz zt%AGUI!)i@7M-+3Cy~;vn!fujOPRC7nH-K6Uh3O2Zz;#P2Gb+o*MJ(&7KY~!A{odB zi5G?f4WbYH*cx@wYE|7xWXP!HZinhg(%W-sZ{24&c49w#G1&a+d~jg-h-9P=0x=u1 zUCU0v4avI#_{N^FZb}k^@5WtP4Ep_EET?hfUt<3POFHVro#9!E8e&*) zqA9HfrxvgekYfxxi?bnA8=kn5Uzkfb1K4KVnZu5Zu6;VSM} z$-gt2`pist>OS=R!0(YD$FuTF-Kp1xC;hGFum)|zqU+(W{KoR2{NK1AtJ{UTc#+k zu=e^tLBq6@2jT?hXYK@;>b;X(G< z+hyGpN1k23yfpF=&oeB08O`k*V(Pi<_3>bC=Mr7xfPP7-9+Y8q!plbyq=MR*PvFZ^ zFR_}u49pZq&qJ|_Jv?){{}5+V=6;Qf5-vUM6pTQ%zkIdRADbJ2zVFN)D@ChYdnPeA zW*i8~aA^XOevVe2ezd*Gb$^G=HX85OOw)Fj+RDB9rZnFsw3Cx+Gb|V zVH+veOc1gWZhsFeIParzAFpolAeS$KP2{~5xR%rrjvLNMbh8nD>eD&~Eb(hvN&+;A z!E+GAA5Qc@_f8dm1P{pW`KX>siFsHJt^*t%d)Lq-wEln|Wd%SgC=bl5we-Cs$Re># zcAZ+_S(h#i(cx2;_E>@%gxyE!Xp;Ypg}X*yE6l?9;Fs95JcqGXjsCH~;x zdP|y#>yj{xU&Zict)wzQ?TexP{u`%_!*OBeUY=BAmZ(x5g*`bj&KGmKfaA{H8n;PO zNVX}Kw7~yPlrBWcKoBuV0~;!t<6X+Fslw8F7*#n4Ry{aRsikUhbvlKI;_^U7PeTCdMAw(tDw z03CIY_}o=W(6&V3lj$%K?t#waRm}b|%7+VSdCsYF3%`KYQ75Tm=e#<)Jt+AIY*N|R zqIvfAgJb-Kjy!aKq3Lsx9kYA5cYM0gYBcvut*-8H)RCkn5j+@(?Kqy{N4j~DbxS5> zFpTuAL2(w@2Kdo8xm=H3{+Ak}BAmM-xCO$|g#bsSv4RAaXdglMyE82iqaSi~=7FtU?nO4q&FvaNM5|z9WQ}n!Z71_K zbV^5#a%qz`VCNk?8O@?-WV-ediTy^ltj- zu=OV?MCx1lMcr=mN|DN&JXEk^6#d{u(S|kN*saQhvEl~>bbZr#S>Y{+yqutVOtrb}kg)6udCJV%l;-yAKFtcC)$ z1!E=qTp0A}T@7{OnRM!Pw^SG%Lu9zY&!+xiVjdAOukv%#N z0GN}q;(f$E8!*B%KA`lej9{nM$N1PBz#+}7TP7cKQ_wHe{6Bw4IC)1ne5o@O8uq;| z6J9ipbcB6nkik}D44-KoGRk4g?Y05Un*o)|(Vc)@csed(*i(P?I0LxKBOA z1A^JyanzJyxSZu-^n!w#H#JoEYDq}G^>W0c(c9K-#h~Gc&4Y=qikOTr(9DOR2{2%d zukZ?~4!1L<$WRHrrH!h6!@&ERGt^B1YI|0X(IbRse(a)Vp2+}HZT=kp^c$k6K6>;F z;Y_khdNI!G#kSsp$AZ-6-;BvudvIuKpg;dX&Xf4yMHz5+F!2;E2KUJ^Yp?LD&WFw2 z=)CVGmiHk}l6Sq(z7YEz66goI%YVCkrednxBcgIDoV?M$R@AGSRx*(Z%T2%9BG_d# zsvS}?Wg8`t9)-^W^5g;O->k9qV;n#;q>(NEYd)0lv&N&jwCeO4kY{xk|4;yQ>`Dti zaBm}l_W=1~c1)>$$i<#Ah3Z1qT`<4I9T!(j5?nQk*X&8Mv3On!5@&>^HGQi>7Ue@B z-)ETnyG32h#fi#|ls;H)%X#q`q1dZPjGQKOvqR0cD7r&6&YQhK0land9~5!xwQem{J*)Hl%|({*3AZE>Vd0 z3ThlVdi-7*K#&j3ns5LclbS6>&9K!v*^^)nf8*dqm2Np^V+DK%8@F(Ap6ac5wCWrv z>Uc|I)rf0xQ`wtmq zEgX43?x-6C=JheY%Qb3wI?Y+@C`Q+P0-}ZJlpk5>ha*k$3nj6-*Ws>){;^&p;GS|I zCV(Fz!bgic=9+H$R|Dtgytg{t`4rp+m$YNgM)GZucKFxIf?syT!)NOj&t=#Rk$}9p ze#pU&`mCt&|Hyg=9$~boNw;m=w)wVg+qP}nwr$(CZQHiH?>EVv%;d~p*gGo=sjBC< zSCV;@$i?7l26|xzZ6+p8zkduCVjg1_q}SyV|lEHxxeqz8(j8XFfTv+f!Qhj(2uj^{t-Y0qKqV1lu06tDn*<7pJB zY_PnZ`jC(4h)AFyUFTRy+;%1X6)WlmqQAQT=O5WVUxWe2-N$Lgj|<-}NYzg~s(4d} z0AD6^3^C<8rn2e+(E*MR7-X8bLPScpL6%xB;h1DeE-1YsA5_Z**|)#fIeXnHx$cd= zj+7|;{B!B^QkPZDZqj_CG&d-ok`~QOORUG2CE38=G7wGHWWMD~H@}B`%=A#XHJBEiAr0JT`wdB%XLMwaD+My$Li@_I zhF-mPvvv#)!!L%6pDr=?pKSBNF1s2d#O z|GO9@W~I5K1Nyh;%t7JPtzdhKt|lJ|o~L*iD?1)1PAFnooFzi?U~5)=oQUr5Vs_{@ zM6CuAi+eQk&voc$%Wc>E-)ET>%|nhQV}pe(6LM%Ltf37|Jp58T(@>va)>$PY<6}D> zw|KBCBkU=9iMYi(AVEm8Rlrez@Xv?pr$nyn8Y`C!T7E*jy2Vpwb&gb$uFHSh$|PL0 zG+i3tH@M;!NmyTL{V91}L*kUjFdppV@Ro7WAIPjK+vATW@nwOwVa%wwe4Ef*-9<5M zgClg-X3wWr-3wPLO@e#&P`ML`adLkv!lGk~SarpL&9%j%t*cSeb|t3vTFox5{dF?{ z6=?urQb<*DLTMyoF1*$^N=IgKTZ98_^6_uWPg5sTu+?!v+0V9}*CnDJ+RT*S&E)`B z*UDgfq6SDhVlTcK(W+Q2+Ou(hgjFZP#KUHvm}uxMr#1R%qWGlFH+^)}!#)~-buEbV z=7U6a6Oa^=GD7uZQOUaIAg1_y9YT(GH|vkg;7&XS0bS=7g%oUx1Cq z>~S>kM03u5VvPEOld#=$WkgcnSnt3-J*GJrO<|NLc2sg;)!w7Yx+@t5SsthpR| zsf#M{nuwVxkZRPpGtm=C)X-oX+)@o+J1avBGo^c69my-masHY2yWJJ(q|5sLA-ke| zIH!aWi=kTSA+ytc3Sj4DqtWLF>6XK7A-}(n`2o4*3VSy1`QEv%aTj6f1_>Fhe`T;+ z+^5fk8;Z4@nM6KqFvu$)k{%dMae&la(I4YFMFC0FaoNiHi-xf34z-Z92I`}9u(+N8 z15h;@V9d&CGp$9X;z=^QUW>?KiGIXZd9|ijyIY5s9EKIGLivDJWHLxL|9SqVS%~ zj7iM{(uC|aYXGH(v9Rc7$t-CC5_-7D$XRU5$CQ3WVjr>28uscd4o?R^DFO2b-MQR) zU3bn;%@4Irjn5Da^wc^YFfiXB8hr!ewuFQK<@0W>-#k{XaA>Q|5PX+faV$Rg;(tee zpQH+*%QCIc&+6Upp)pCtCeZ0>Hxg#L^RH=Gf`u^kRX8X8%&uoH~(f z2Cz@n1)PH|g#a8*l#xlbEoH5uqod>bA_qP%h(}6L_L0LhBwWSnmuV|c^;eo!$#vh) zFTsdwHD4cVEqK%S*|js3q(X#pnYa%UgCHmd-gkMhL#^tyaQ*LIgbLE(NGL$@F=*)qH1sP6a#;KTaRaY zi#2jq>n>`gi$3c9KYv~HrD)feCR|q-2|te#(^(U9j!ToU-s$K;HuyucAQ*ax_cw!}3c%#1zIiQ!!NHD4bPxLo^P)4{ zY}&Sa_lTEf_I}%=g=?NhWDw6N=f#gX?i_UfpnH8$R@@n7N;1M+G>$v2yF+Q6Ci|kg zZsd0@ncK-%N-(_0S*b*Qi4_Wqdw~h&3TZV%#@=F3WDE*+9a!^u6|DF&r$U}edUAgL zn#u~XimDP5tHd^AO-5G~;<Xn4yNdkTs>PLI6qQgQpH|LHLfPdyp&E>g9ahp@O6A{v?D{j1De6xk>VMqas3k zgG`5ZE-h2dw^Jr^5a~hJBw&DC0d2@nPEq4uiscqwndr6x6S^M$7LP5n)H@+a2;rv{ z1NfymYok7Alp!EUN@u3U-6iAh6H56_4drM_-?P`b-?8j^&XyaMh@tXyP^aedV(Yyi zNn|oE2;1So(4M1Ie)rITdu+})O!tbHwcvq3rk?F?kd@2dL+1y9adxA5MhCFN7aXhyS%zP&ejj!VfzPVc-kV1PAn~FK9ZGp>RNTia zK)P5$*bDyDZhfZd_ftnfc9n8Qpj5VH+H+at&uB9EZO3bn{tMLTS{ur~xS{O3XOUxI zHvqJw`G>&gA_Q@X?P)lOmM?_#Qw=zZyph(fFVbW^Wg|!o{jt=h?fUYlYlZ^Lq8h}V z=Zs-CB4eJG}}c*RKA9qSem32Y$)vqXb<_6g-XQ8bW<~ z2Q7ld(wjp*A*icKNFaF?(IZ7%|qpI|8cFPSs!O$~REo+W4wL zFKhBbj;*fqYA#u~Xz|>L{LHG??DWI^n2}7(Elg!}Ji@ICHD3~2FVq4480m%x(=zwNB#sb(4Y zXySD>JkT)yKR!YHIMv0^l)gtSRYM_r05%hBe8!4ti2AJp^+W_+J!;s{HXwNrLJO(H z!@u8^6YX;?aSZ124u`tCL*^8$ zjGBemY^I1J087^`G_66u=E4f~2C1gCKvaY|?C6~fdPB;qKET6}W|}&#eku-I&xfUN z_m^NIm(TwxG2E2Z>%F?Ze3YAbg&4woq{9-+CYh=BVP*gQWHwCKkq7QHq)5uwP|WjM zCr5XAZspXZbxCT+g65({jLep~ALC0cs?GHHig$d3Cwr#2BNMU>tn%-K+`RVU3T0wV zwfmPG8Xq&|;$I9L19k^&M5KdY>eO=~;`onircNL};YM1YG zJtvZ#D>b(29gV;kvQ^N4=#{bw6L86x>;y>)nJ=)53 zUMVbx8_h$}_~5fupQB5{kKQiK=PYdTF=X7CtnPQ9ntY2;HhnKz;I?>F#K^-2*&HM* zs7M{R)0#p-Pf@Nri>(J3Ke(a-*EYqnl$1m1nOmFr##;0Z6PMAA2F&EcnLAvKxC~Ci zR0FroR|3qvfi)B4vE*V#u)_&+s1A2UHI6G$sYZ~*GgkRZqf`Y=N?dq(ZgRH7Bc?n+ zb*!Ui9qDbv297y~ERiTLe(CdP*kdN&KuNW&SQ(|Dgh>`OQ6<@3YG zpU5Un>8kHe?IKl=rZDzT75AW2>it6pRW(+#vgR$bs07H(s9l^7Bxepw^jXt#GCxjf zy{Hx;?b}w}B5#O8fLQKK(~&8eIl6M=Nyni5BIAd?oeP~+U_9ajaRY7#9*+$U$zW`K z2!g0u2QbIhJ!iXvG0T57R{Af9GD?8_1C!HHGiwe8t-Z(l7=)N?yB5_ zizjaSfNxm~=pF~QNb(TgTLFZFf&rGhUe76?93Rg3hA*xL2Mh^-{wvFN^oX<)sQ zwlY2e!^}rCluf)|YG3`C3tCHA_bW;KcIDQI)s7HL z5jvQUHzHHr>s|Crph;))dysX+ynI$FT_*=Te1cefUC+7QpM07$9{f(2f6=dc^d2Gg zwi~YRK#oo3rKr%y6cx`R?sCCZXuHugbd#^fph|bLq^}7G2^%&-&%`#mtFNvt-z;}0 z%f$$Q4MZbjSt*qEFuhI%kcJ%IQoS>-TSu6c!FF&Eo9N9T+pIjZq^$i{|E-zui=QVh zPxsjpz#Dg+w|Wh6(4s{z|LdpSw2iDKXVtcyMT#V?XC=g^MD}ZXJv~iyNw$@PLsCcg zTB@I2E1W8wCI_)jQk0C0Oh6(wBu<+?|2ciH<6G^MlO-_(m)eBG=9F{2GjkK0RWo*V zMwX;gr0K}djmf5rk_He9^=NTDIZzdGrD;(0t=`?Tc(?7DD6KwT9H4Ro7!qWZR()r@oi75fFb>tSiY*|<3QM#3*)StDdX6YC!AZMqLtWsW+M zeReJ5lXjQjjlgF_&hee&;i7Y_!4d{H-(H}-Aq#t|aFEyPlKtSv5|!5ntl5X4s>7aP zrwAPqoenpm{a6Lh(=53gdZtao2AM%|g4P5r#IV|~mHZ5c{Wnuvq#0qLB>)BVC?6ROSlKnH9XHN@sdyK(C>S#hq8e8?$yCX*Ph5 zQWMBsioZ{#%U(bQUDsr0V`81J1`f(8fl=tU?%$~?&5h-KlSbkQMBBO}++-1(>Pu0v zOAsW)%O)p+#o8NtNQi*j^LLgQ=bdKrJ;miyJcpxQG;0P_UD>d%Idu^1C!pAwF}P`( z9P}l7nHP(xKGHA)rpWCzI^kA6U{f_}`g_*kbbm2sGfNm=IE0zc)p~QLQu}{ksb>q} zz3>fM&34J$taZqWINiMp&027>!=-J`eRjHR<=Oklh8+O2RZ1l>qPG0AMTrRawYy(nGDUIW zt1?C6k#+Fwv$uJAt;H~B+q9Xc*}@V9A4$rWn$HC$d&a1x(sH5G|ME*<{Xt)b)Y1V3 zJ?{ipBO+1Y?bzx%A%w*n+$OvhG-pK*+aOW6_3TQ&Iks|SF6`JITOs;KaD)+P>qX9n zy5m6ZtksL|NRbDQ{tfC8JDDLBqjVvB&RO1`+{Cd=^z%0eRztX7)3pt>9sc4Z3i-*q zyPQIGeKYua20{&ZB>KR6rKn4_W!SG5!wXy>u^%r^D9^FByU^9&eN$1^sMY1nB_FYb z#7OUC+)XE@T-fNny$#8(pW&zdgTS(a`#xlpAj8*sgZ$Q6H94>^xea8vuM_-K01;O> zB+L7bMnei_xa;qb76>tvUM3?oFlQzLhY+Mh6d^8xPSj#Pnk1&v?FZEdhk6-c1_6em z8-Fc`H*LY8g|Vqr(J6231GK8^eFV?F@U8J8sH8|Y{!khH04iayN$LO8dMtLwt)I}V zI08Vfa-Ty#Cpt=T<<#5Pn1AZEP0S*0oHVdev{r&cHY>eBw5S{Zj)x zmNZYtQHP!&#&cfZ2+G=wbThM~ky3h!n*x&kW@W3a%mAR~MUzqTtOKpYs4GWV%YT0k zGR$RFKRzjgQ%rHrf5jwa!RkTXuL%`ox_mWmKD>UoNTPyhUhD!EbNO>E4nno#gK)T~ z()tt*!;Iskd3nJrV!7QyhqjYSRiP9_#yARsu_$a})2{mOWJ*hSBF5n#RQTAO&2*w9 zZO!8?{u}U=xTJBOZ?|RAZReQ51fY41tx{xjv>QI8Pn(Rh&hux-wIA0!u3c1g4Nfd< zS7awlvIgwnI|NOvs!;6VUMM|H={{))AZ`B!M~`8L=!yYlpqqw-dVHKBA$t2(3Ce(v zS2qB>{*}etxjkc7RX#>7Ppmy;j}cO7g&cIyq=^pvs9VG3SZ=guo=FLD=(okyZG`WX zGp;bgBSW;6F>EB8PzX7{z>2<@s|pw*2ePANyF~L0MT|up2Jv3~lgsM?q$DdiY;2U- zp*Nm~paqPF(}>lC>R-TDiWkwS$k<_xfz5dn#T9+W0+XFrD8U?Fc0&0z|SYnFP?O zD#%)TiS8a+KIGB)8DHS70-Fy|Kd9{1`UB-gNC6*1G!TAQ4rt=h>8afEq+qV7p>Rr_ zp#uXIk4bF_tK#>jxE#<66Ma7IDF*^zzK&QfooT=~b*K6#O=>(?WAy%$xhq9djJXMfW-(k3nPbEl@zUV;Ci_uD%PFGtxN-on8oaLmKkmUpLeZ&pbA zdQRKUOFfbR*IlcKENNoWme3CmQZa&nYPsYj!QX#E0YD@%Aj-nW`K{HT-wqrnJX_4= z7+&e`s5m;?B~vG;|K-5@^c=mgRD;+we1}&w9D_{ qo}K*Y}s`1t=PBnGhV8ks>6 z6XH0EfNAs02FSW zgZ&4umcy~TD*qEc4JM1c#*lHP+2cnGNPU(d4$LgwIz!z5H;$s^6AT<#*=BsS>t=_v z?&pwTq)-?BpY3L9Q71jQq3uWi|I(adaxxhm5N3SWz6b&ml`)^OmfWVqp!|n?qPl~T zQewm`LdMrCl3vo~amcf&bu(H4M5WcdPS6=jI}DeyJw6qb-`~l{5h8`&Zu-0f!&iwo zpd3}06xdJ@zQ@OPiNoaT$*jOFc(2l?Y}fK$$1LSI+`l@sza;?{Y_}#pP&rlJ2)Dti zmhE&EfD=AHhL#L^n0rn6G$kM*ZPi60x{^0 z)7K+kGO0q;1^ViZx;xc_^Nz6Cl!fHv%rep&#wsW`2EKg?{lsoz{^`cSPS(1kqt~|+dJU~3Uq3==!1}$VK|wg?;bj{dFF+4EkQrei zAW}%ByYkxDq6jO z&(TSUvg@(9K3dX(sFH9DWjJww&CiXv{XNEk5%-ti`5fYuz|uXE z7p=H!RFx#8G-<5;jPKi^_q)~~CBO4wrM!}f+GzEn2`uLQjd}7<|qV0oG2J$0#5_F)%`V1qB5de!|r#_#4>5j~+1qRki?kI9BE;Oe7QNqrC41O*TIRr30{Un84GD zih9jAPMnn5dl_;6S9+>hv>MKI-e33d6ymAkB0Ba)C4<@&db%P!`K$66tXsx6bT0(SU+5 zGxykOUk6cUn9^+ZumNbykc2>ZQE={+#Xs8wH7i?YptK1D%iHyywZm#?KUpl*qG|Aw z0gd)8={ZJmF?q)h+k1>a*krXAK_gItb`vxNg?>(n~inEaLOC) zE{AwD&<*7SkhJZ2rx-nOfy+*0n@Epx?cnUm&nlg!%g#sr_d(9!z4eZv54?%JgDTxM z=OoOKNJBt~>f8=X_&#wj3X%V|&pu+xJle)TNm1+B5rc(Ty+nzg*8fblQ-!HrZEjiK zCTKqmSnwtKDb3`UnD?^ZH;PUl$@yWos2dWSJ}k|%y)6c|n<4D*{DW=FyQ{&2@Fa_* z7`1PcsQJrVb95S)K||@y1n`R*miL`OMDiZ%$i3wvZxG;g|vVK~A!Hdea2BFiwHj4xH> zXY*l|23LjsqJ;+Z>qghu3!__X#3lAvPEgBy2DE1v$?YLrEvl%stFv#v!j6Rh;bt>n zeSn4)Yn}<74-NmAj3y5$*r9UV;$zu7*oFwy^Lf&_?7RaM@3kcyMz=_@ndl0_C@c7O zbXh>N9AbpI#E)4Fpw9$Kf*}WWv}<_Cjbb8hfeO#K_bd0MxIR@_KIqB|@r zlfgE#A|=f5t6#kX_)U+b+*XuH3n&G*sYt18ZQm70P>FB~BbIZ5!leT(wTP&M5ut0d zH=MD2P(pTfz4zbtbk9>o3W!bN2)9lf63K}ja@cQ_qq8wy$j~;%VqLIaG}~3y0uy}n zuaK><-zBF5cn^isuWM)!dwlt*+&;wI(=+DY#m7aT{LL890DBhfm1y@EX@UDk*5;(L zju`2L71u;A$Y{WjoQhOnqt;{ZDYS0L!)o9_=@EaKM$K>c5evcu4%p7+aNqiJeMoW(Qa~ zZUG)P5Vm_+Z)&vRnw%+K&lKuW$=2H-PWtqe(XpG^|4m(mF83cCrn4HfLI2zC9BI{{ zj6kb?ll@)?Ph&*1P~OW;PACV!+jxY`zf3XF7W=|(!wJYX5V(sI{Z2GBrj+L$j0}aS zHUs<%_zEY7<&>Z+zawyda4Qb7pBQi5mjW1h@S0wx)6|S9wI?tOLC+&bH$xJ=UAEAE> z*7OblOpvpOW7DX)>f-GO@47Q5B~+MGel<`3H69J_PT>EFg7u_sgM79ugb?EvU&)Uu zAm08mj+as8DR-{>7e&4C39o7rj^Acs=6cy&L;GnI<0GY81pnhM5}~Jm_`%6zugx?V zExOodPUh*NxE{tbP>R=gr}`W*iWmh4ATq|ci!|B-@~3(xFjN}cQ{qapR7Vdoh=Lm2 zY@cf`M+BXVr;r~K)~#CD;d4RezJ;51*%OJSdc2J+8@dnp^_!nt9qnwgsaWa5v@~6Iu zwsS%qWlI(MGvUTW%2l$LtU&gv!1bZE=KCfpWZ;>|ls~ywGJR3I zNluRgR$8j42LDn#-w(IAoX}CAk>Ge&|4K>ZOD5Sy?@SH>6n|-~SjL%JIB}(GkKMVC|Y>kPM0#1X(D z;Yo0;Yw9(P=G#Uen!_Dre5R4a8S`@!f@Da==wr`Rg(AU+%k*;ou$V8~7daI%>))2j zK%z@_fxPH);mSnt6VsR0!4{s70t2P>1t>1K_GgtIs$aeBa<8iOm`&`}qO0tIapkzcU7r9k(Q`vO8{p1~R+wLlxrz1$wQ_~W7yq{mG=8Q_w&4M_O3 zM(BOqRu1&efa>U9Go**a`64HAZq6I&Pbw!=8T3hcVdn@P#?XeCH@PCGyRC8ZquO8tV-Cu&8O!q&R2XG9T&V?yIS zZ9z%B@UKth%;vdj!G?a2O!ZUA8gw=JZDf@8saUeyPU7*Mj|*YmSlpjnr7UNyLr;*X zytB>Ku%iWj!*TkNA>`XLDs}R;QmeD><9F*P#r~J?Q8bw<5N=$F-QC{|R{AA?f;jLC zc?hqvSGf69bC7p5oMEF9ZM?`9^9MTopIu_jMr7lx5z;4rcx0x7PKlYhYF>Gy3WRUt zrLpDV;j-V39~fBZ^<`yEPsldUM!nDmCAkq)76=2L$w?q((zifMgP71>^I$w^`bK=Y z?WYOHmPTM#E+`W%r|B&_kk<>q>}#JoA)|{ibuu+Sv;;-8IIv=sJ0JUr*UG@}b3~vr zMMt^!VG=d@9(5CX)Oz6cC4Voc40H%k!LcpC^q7*Spph}H$Kk^={a3VS(L)|?!y!ip z1>rJj53`scQi=>+9$zY|_vQG`-r>ZDz@Rf(-DW+Ww9=inaVE0y1Wt{5G)C3cx96ZZMi6087r*fkuYi7(YZXjap%qC{y` zaOd6g2t42v0dsF`s%*o2tP<%6%wq-L2Fa{VrGiDiQI$q-6@WOkR)JFB0{Tucw&s;_ zo(#Jpj3y)#JMnXG*my4)k=|x~x^0v~T>ZCe0P<+g@cvKw~I?-w$zR%a{EhU3Uux4DKF}_=7ktu!E1M zyRlM-%!BqUBogv_@8ZV$6~vEMHM-TSuM0B(#%rmL?lLE+-4o8;gR$V+eiUFc3bgW{ zHQgvYA_J-C$w>Sw!|z{tSV$E&tFAWOIjLLV6%Z zO5AGAw6*=kV2v=!wKng6;ja1aR7dO(J~-llqDT15_U{Ya-4d z4_f8AIJ=nWuk1Q(doWwP3K7Z}l%71_wl@T!9z$H4^Sg{s#%Pn?C(>7|zze zb;#GZD@|cF!5Ut1ke|&EDN>h3j5`<@sY-0^v)Q|S^mo&_NV91Xo>LxNnURXFo%lbx2ONQ`5>(n0x7BR)fN_NzXBAOhR``L0G$}{vqp2F2 zTGI+Zxe_uAqUmk=ga1rT8(>|;V=Yko9QzSf)2cy9N2hgIQ7+g)JH@DM? zb)PovGK(XMbG@LEhi~{yS&!4kA0Xebcr#n&efeQ{5hfIU4BqyX?(dUb7tBKmDev4M z>`iuQalwVMdPm`3^DqFwY+-;qR``d>HF z3fIWA&UwUuP8Z#=%gSVBkI@_f-D3cdKCzkp&Ln1+R%(C4GS(+Dl1>z`_kuc~pMdUo zQvd&G82!nmGqWGo{Evl9q4*g3#7%CMXJ2roFkq{TaqFTMm33*dOp! zwqH~}$SmdXU7@K&{()q@WJi&GEh$>R=BGoVN!v!2@%mZ z)-1zdB(KqPA=*|Ple3z00B5}4tbABIVL`Vw02suu<8NB^af#>nRVWDc7*g# z_ymv)9t6JS%`1b!{syYtVpk0E%~$FX^i3{2r7PmV1p*)>l%#KHLS+*lf6fx0kE%I2 zwXD?i#|ctI7**Q1a#+81#DL+I`K3*Rl8Z%9i)RB_c_a-?hyGxHm=vq)n^rKMD-#gU zHyY1b`KQSBdiH?n$ERjsd!J(#|MXc~#}-*$=hELx4~vru=)2dN8ogPyHZ;LWZV$)d z<_-=my2RQu>PSn{q`RF0>XuUIa35=)IL$#Bfp%cRnI-qNouOVf5cy2WX!!8;1)%`m%0<7T;jHKl(v3Z2b zaP&m|x3VT8yj&p$wriW4VnV*}+(OFLi|Z43T)%%FOdCzbMpPgLlD-vEmsuM@C3a?q zA5+c49qD?we!vYV!o}4oovG!H9QZo)Ef!!nwaHyILJ{P~bNF70xHwt+6=fgVxMu1O zks|Z15C?R4J%jd(+Fy03WB_zGPn8DLeg1y~svx=gCRpbz-MU2xXgC`l;oE1(?J)D4 zde864h#ujniQM1-&Ly4O^Q=lGsY+SVW*c531?9z9-jc(Yu1+d6nLE-|PdHRT{%68Y zHE_HuZ)XtJny9{~)a7zW%5eb(=I#IM(yXqR+U*u3VgUcS&Lt9tto|?bCMV(bPXwNQ zIL*GqlZN4cs*Ai4Qio9fKiLfK|9oXk0N42Y%}wxJU+gU|(4#BIanS{~50JG>DN6uE(9z8*L8MyAw|z;(f<+ zhp`UDiDL%w!-bX$L@6L0Wse*RIrm+Clx^cF*w+FFyb)Sf)|(Nlt8YUOI*YYKNKCC} z`zmj?gjAzEDX=OR zKZ0(E8&Ik@MZ5}?pGO5c!qYIMP>CJ@kM{we+VKQWY{m9h$=t(-zR8f)U8VCnsOiMe zY6Q^vxK+e6{f0H;S|&sx7xP?AbUgpUZL|s-rf%<1GNne)!oHn}GT-QJkqEprPtw$^ zB8nx{fZOiMBJ=qkXEWa2oEI+%5JEyYb?5cmG+wVIsI}LdWMR)Zu@Dem5EVkTwM8u4 zWoNY6OeGc4cSmnDwVj43WVnpQ!^K(-TUs%{g#v4Hu$>#@9;Jqgi%8xSv!yQ%=W-(~ zBozF6fYaCnJht?xDThrO!()~>nO7xRnYcjJp!chK-A_AaiI_2&Fj&TdHR&NIq^NeW zI@~9ZsCUkU{8%+?8p-gWTD44wAw@m@bF3h!ElLyG04Hr_&aX1*I&HpR-Pt8Wzt4vE z(U)!)4Mv8KjdC=H?8NooHMWZ(Jl#AD&KGssNkGS)Zix6L8d1%nZJ-o==xIX+xT@p1 zgSIiWlJZ(y>rRVyL)YQLim7VM$_Uv=+!__i)$w}x&r-I=D#b~H2GOW~@EKNeY*^#z zKmk=>Fb^=96GpRaYDUTB_wCra@m#q!Vfxw=n1@kaGd{mis(EUrV>hf}9Fr+_dYS@s zpTJmb0X$XmkdSF_r4s^-kwdQ}aju5$lUz%jh5TKbzf=&_+oLj@m{RL{pHY_9ntq@q zo=Ow<0RH*3#5nkFCzP;8;N>i6Y1Mg+q1D7-A#WOU*AxP3hnbjlFcO;{71%Al`oVE@wmUs5$4? zB2-6oss3_tH+2~7hvK-YEMkSNNDodhMSnYEl!t5-N2CbDpj>rL@ zx(3JkAL!VcF82dq;A^C^6CfPB>3Q5}CK8fY?W58ZNw$pDU1dS(9xRJpFv~eCkFATO zwQ6LTzQ9T}%8zZ(#!##?nvs<@zOWqVZKXrR(!s>nw?Z>?+B6vnZKfh08G6IeX4J}J zaKL|XgrrFa*On61br;MDx{{bI^3d~mdyXAt*i{47==I{J?rP~6OKnSSHDF)5gn37( zD^WCN2>|?EvkmZADc>nx7aTo#0#bkWY*APVXiEVKc!y?HzSs1Vguyg%pplqoA%h7J5>wr1 z)FH&K^M^C)oZfa<`!A9ou{hB-FyQTHhF2;(9$#LFU_$Cu8J2XvIpsuNp)GLbJ18$a zF@lL}F;K$&E!ZCZ0p?PX`ya#4|r(`@k_ox+sPO$u?;EO^d?a z2w~p%HRM<|k{DsH>V>AxWM+d-OKg)OfpJOvM@e$c&Qsy!+ssd1-HV((V?l#d_aI-w zBXS*0`zHFKbrh)O9385X)K#a`X?OV%!63-H)Iqc#?k#HO%lrf4mAtmgRGhuobnYx- zkysUU{saGFPdFSC@Qek)v^=|MeIL4ny9lUi7aHMaUtYz5#D?spsjY7%NfBh%mkjn> zs4{AdNf#9ZIP(hm)#4oi3F$e$=&mNAgjW*|Fk!JETcI1ibzpu15w&%)uiCZruRoJlla?g2C0>mYU9|eVY+1*3n29>XD^N0n)AbZh|AC!$@E5-&Rz}* zK9i|L@Qc$zu2oDH8{h0x!Rw6Ef7Y?7)7r1nbm=58{)Smgdzsj8i85KB4y!?DDneK; zJL$2klsxk}d&Y{DY3Cg}x+?vE&r6=}T=!1)h%a|YrfSofZaB1Ia2XP&&DT8Nj4DN~ z+=blWwaXS|TN|g6EW92u3_VS+1Iu*-|Dnl$%4QpE;vOMMv!x(mkZ-FAQh-fnNG|t4 zXYdVxM^xLDdNSn*LzqH&#?kz2u3GF%P=w995`yEyMy!0J@xPG{K|zO1MBOc1z+uh= zTlpZ0jvlol@wjZ^>)_7roo<)DR!A(PY55l{RhgYcdJJAALXD(U5q@C zO`vs(>#W)^ znT~TQ-|3||Mm0!BlV~fI_z`3(K@cLW7y~;SYYV4Y3)T^txRhG+igR590EV%s(L!(XRkMK)}CzR{R@iJCk0y-*IG2EONs*+&A7^0)^Cp zJ%x)ltAlwGWowM3Erl3BFIm~w?XwZnI$3}tPfPdZ6jU3X^+|v_PS%hlk+Z-d<35NZ zlMO=4Vi#&~JDGUc#s2obs$`i*?>CBCpA=e2xvn=)jZU$D&YR{lzGHse5!G>MM5HYq z%us+_X4zoaG06dyI_nE1v7Y$pyiK;YObQHPCAz4m zT`rqqHECU-kkI6zw>ls3bpE7w_4J@XjeFj02enS##ItN3$c8EK4HbFGDuS9eGob=P zZp6((KA>z6Si+Cy734Ju!(w$C8-qZ_MKVhHs^tr?!gIsL6x5|x(_e#Dtlb%SHg*{5 zJ)GPK90in#=lbIr6ZA2sWC`n)XAnp*mF~09$c9-dha>(f2uT}HCa;$?vCMcFdLK#O zCfL@(wqmB^EUtV6YXU#6RJxstVB_uj3I+GKjcRX#Btlo5FiYmrQ!cG@;Cd1=qp9j6 zyg%i>+QSxj^|ejNPmsr1%cb}6yf943)Yg2H)DeS^BtgupLIm8ZKO>{DqS|gj!hz@Im1&~{+pf<#*a&^tkIuBS^zYY9t*fpwy+_JE zA-t1HANkn$*19Co(_eOZ7^6aH!bSBpGR3Sl=uarZ5=F^YOQP{YoH@XNR#kc*Avt-@ z=Z~>tXcA@i%?z1O%EplKYu{^#y$VQvg}{SQXh#zGcLx%;3kSnPpA60gN>wb%k8 z5)Y5|m_}$X-qMXV0M=pBlEz1<$gFVZ{AsflFlW|s8#0!YgvP`(jc><*X=vWl9#l}@ zY!M!DVmkBZsYJ7_AVHSE4jr=#RQo-#ZAK*yT!vd1;O59^Zr3k>!9l7)eh7wwV=4%Y z1pdYbAJX-dnei;Mbmi-_m1VP#oY;E!Q^(O|c9o^yR^FK`%pAFj0f|?Derkg^YJ7i- zBBy@3Fs!&m^%!3()0&}C;p_6KkdGJro-gbO3shdC_aGR>#KmZ0^|7|78-dkhM!#Sv(N`R9bQy6n7n)Nf=PVGxT?ZZ7T3eEP#V7?z=dLjHpl-hry1pI)BOnSg_;M|0 z%a?taB>;*ct#c3vxWW+2jcBZH9XeetuaMd=gs$yWe<~+^T`I%hLcWu2-}|^sSk_x1 zK4f>FJdWF>31^b7@+KcAA;WCzfYOdD#55Y=^6U9Owzq0xJq6NM#r_I`co7(Tchd_} zxIw zZCd4Bhl<>s7uj(Se;mr;$AP<7H|oG4#x6d?6ak;5mvd!IC^K|t(a@?b;+5+DX< z9pZHo>8yQEdBKepDndwW%{v{co>2^*(7Hk>;)?bLW&neqRR ztaob8h3mR*W7~GJV%xTD+qP}n$+cqJwryK0w!i0S*IW1g2lHUosM%+Y-Wq$u?{h9% z#pH@qP2+c%Eh5^s)1xCNdQkY6hmf*Z1~lFCrrlQxtZ)_@x|lXVAQRkp6221Kw1#P8 z@ySChnly#r9fJ8p)U{qqFJkcdXNSHAUQ4BtulLP14n;(xQM%Duc)liy5it;-v4Ni) zWCHy#nEUe77ExDAs?W|-tqJ90x2}B8rqnwUNmTH81dvQNDy1vk6gG@F z5g)yk+1pdO;xka4pVqxj2n*Pw=+JU8)!yaH3CJZ?=XafYtn z_l(sc7$eWA%df90F+My>cmR)iK-f)KJ!GYqvDiJymc|YFJ!Bh-1awv%lGSWHKlN}Y zYfi4l9-eaTGIaQLlM^tgq})X~FzD{RO(&4#b5)#RXR|P(+U-Id4jJ^n#9%+=ND>&< zV&fOlPq1=&oZsWXo#e@km$XSv!H-1o`Grn5Z5K7n53$m-j-lh6IS{c z`g8gJs5@3DRQb=BQEf2>afwaQMt3T{0;%rtltQQh8&=x%vNlUS z(4$==DIkbdGPO&YD$%jL>KE|YSd0SB{XYwO10VFFNf$p*{TE7Mh%qckQ?f(_|{!xb-STLSG~yVssuw2@(E&; z76GVjRC_Rhhc50HyHU7Gj?d(4bjUH7YA8*D=1iM1@%5~P&Wiye*g&B}ZY4ba5t6B4 zCtJEtSQAGpSFkpk+Pq@*;d?8qg6w}cTw=>qEo9$HL&|xRU~%AYN}AO|%H0gkqOWe! zGeiQ0cx<;pdCwm~e0Vc^!NSKFH*l#R!};5-pcrrZyYDUb^+4vs!Vm43lnBEX7sG;e zvi-6rLZss|c&Q*R`xAyf^JX=sTU5=Fqh9v~dDwC`OFC*6xlTx{;=%a}*ePnxC|0-a z?><_wFvo~g**?TyQN)f*v==YeNLm5aucxlUsS+^~_bxVn@nPO;djz1>>5kNev6U|7)1TZ{iQGe|N={G~D?VYycMl-GmYQ zJ-{R?A!SBSVfW)AS&`25j0E|Mlw)JCjY#{?~pGP;3d+bQbqxtDY6c`SwhSo z4)hB*Q~-Kp{T70Ip$m*bJXo$+k%bUsWV(G}Ao>pm>h<;2Gn~-|AUF^iNT=ooZ#&3x zl!=U55f``j<BQ)my@?;-v|>w%O+70Y3pIL{@CACT9kUl&BKenEdOY8^Q7zt zNLyIPQ+~AkT8^GL?FlcoU1<+m^*+!b1FxHJ0$hbF(<*31XNb-0lHj$j&EoRO$7D+HPt0xpFd09KKYE7ud5*<8b*K0rb=e(*ec1r>{Gy zVDD|5Ai(Nq1Mnli%|nvdN1QNY<+P-foQeCmL(R$wg!a(~K*+|MVI7rlb|V}Eq>ic& zOd=Vk6B>M40T+A%p{CB%YE5gWk33y*g?PAlAQnQ?)Gv-(B?ru07GxV&`>RvrOkxy z9zOKn>kJ0wb387wujx`i&T78I)1B+Wg?}swO13c(d{8W!hRr_6F$GxO3#mSJk@38j zFzub@sQ>Xm636@rH{+<7G?2w;0Gn4b(2B>Ba0SiQfz2l}`*yh?5``a#O45gPc! z=IKb41w+-MG=~t#g4z!1lWd>=@&NDB1nLx5>FifZE;!LZHvYS9LE-#wR>qcK(wRg%S(T`&?=F zRFoFIOAuC=5(P!ET$;(w;oCv`ixGe&L&aaBk#y*S7M26TaC?`wDM>A#-<|+w5%(1K zzsti#Se6Q%AOH@m&koo<*$O&0iXwa-A+J80rf$H}0ctr71se59iv3@Jl?euRaH@4)Q zSyW;A3_aftX1Zx!N(GTa!1u+fQkBG+fX}Pv^Nc5T4$j>;POw8|@t=BxIVHmEuKj{Z z|DKvMEtd?ps*qOn?c@>Z?7l%gX${e7ghKFdVRH7v-bxY6BjEth;Pg!D=IQlCdU`w= zyy!qIKfLhzybqJH;pRbW_&-&#AD7fm_`AilWoa)kwzc1f4qT*NQa;#6z%)9Q(AEM~ zf>ca7!cH_qdFafGeUio}^gyq*v%{|QKO(;9ZlgpxUp=rqgzQXd>a1G)v>2i%e?T+q zWgEg$7+rl}ED-l-t$R=G=I5v***0eONj@#%AgtiLS4Ga?WS>HCuH5-el`E`ptVj+Q z-BwX7+k0Q!bUzE}k5IHmvmPOOek%}%p2eCUatAHMi*R{sn1@8nR{h_t1gkY$o-W~h z>|5VmmNI$HxgItteIXBVTMX{bXNL){vF6%VHJj0~U(3&VQM{kQ+YNpZq{bu<`Dw)} ziBcpdEVg`W%81G-8uWx{=V6)>9yCes$U+d^Isgl;TiijSua2Lf8ylAM{8M0AD|`*! zVRy|HLu%e=Ee4!JEQ~P`@&0hJQo>PFm3c6d8Zip%VHk{gP=KQoB98O=5L38MUAG-` z1C&yIh%P6dlEft#tY=@^+G{>NyDT& zseQ(OTt*WHnl<_k9T-mRmHc=o6PDWRbEZ_0heL$%Vu$1}2djdSXd z? zl#~NypjaAly=iPEMBJ#5z=9F@9gqp$n9F=&iay&`-grdCgZ}5Ca9lOky~rwhpYY(d z#k;ee{N?QE26$(I+)K^C?2mJHZ7CYEpV;DBE-s_k+nH{IB%kjS^d2d@Fjh_XdT(rc zoOK*}-DbLmD{-+K6|)%~`=HeYQOJKQDOLDRK$GlM=Zoz8=>)`8yGg1YuL zvo^C#_Hzg~%CgXE_^zRX&{P!c*pFB1{mpB?VXx5u_-)`_Pu8`*M_ejBsjeFR;bM0! z0f`ZaMU#>68Q%JRAyEpk-9RV?TTN-$1h(Q@DF-@DdGX7oI=mZYv>}VT!rXcR6&B&X zpuAyP#L5zcWQqCez-`&IyYm~5n9|Va!+M@OsKOdSDrtjMx8B6ae|oAQ!GIBtyb@!^ z5;D|l&uIH8AL>HgDb&d64<^>Nc$g=er~$5Jrry(tPTZ;7kjD4m%%`L@15N8 z1pALfze2|bhK~AE#>P0#mwzb~kuxTnZ_DJ9EP-2&Rm=z+uk}%t%;G5|*JnR1SGDQ} zW@%yq;>(H%B2Njy_6ynxGAdez&7esh2nGTZon#D;rq!aM3#FN@Qm`30<+q)i!RQzl zBv6KuiUUu+^p%-0JoOsNK9D7!9rqK^R!!AZmDAq_Vw~UqG#Uiw) z^7gZC6^{21lOJ&&#-@oYPra~V5Ni=yv!l~hD~zhAZ1?(6lj5y^xz%fg=Eta$` zS#yHBE4*e;-$x3p2Y+rf_Y<;~#?C%&JzEleTw#NnKgvHc*uIy|nA;?B` zMb1G`!yGY~dyhqfs61@7s}A?E7yAEPXUjJB`A;QrR~7(Oa%V-u%4vc&^;ZG}<|?r8 z-9A~-{L;eQBh^?t-^IOjv0~Zb$UeUX5gLIaPMn<+RtsysoP4ndr+P1g0Bsc1LR&^zws2@qYMkeDJN;Thmi9H9XCBgB*9mz>2EADeIE`A|~3K_Db+MOk~&r_h{W^kmVpNRSB z$&!lP9VV-)i+C{f1P=qoxvyFg-q1=SauwwPpsdCdck1Ya`lq;im~DM3T}5 zf4e5#AtCVN$Z{ozn!+Z=}NjwJzWtXp)}B$?Gt5xL{Q);P)_86b7AlR zmRQ&yoAQa;PxV_j?+&}^36JETLlyN$McP**5Sl*+n$WFXIJYFuzs zpUQayV)k??#^B)M(DSpp_^7KN^Od-0pvLtKP24bR`Z~o+Mf+wAOWjRoKWFXP4utJm z8dB(S->9Qg?GpYG6PFQ{whj6c21!i1Mgw2~Vi)X=8>krd|GuJ_{962p`!Yjzt; z+0wRN=+ylMVojh(R71E143iNCDPAcI6oIwD5J677*kyPz;KH35?FP5mWl* zkzlifT;8AclQ)~mc-{QXaN6#V_-M~D;t80)!ghq4o1o; zjbJlG2Ne6*s^OF{Gn3MX#Q^U{CyZzz!^o7SVFhXE2?hW5;)Bvzz+9_R9O1}!UWMDW z_Xk%+Y*@~Q!sa)lQ~^)mQ19af`0&ac{S=2$G?6JDo19tp(>Vv1%4zGn88x7mL89HT zrDArp_D`cX<_OX`{0%x3Facj#Ef&RB0FDvq-?%2ZMrG-N$itjV{G=7jQ33d8C62`{ z-?BdewXW@ptJWa(3oCAGbF*#hEp~iFqUM~sjn4BYy0oa#&(+kXL)xs8k>$gRNq(W13mRMUyewlZL}2&oCoM~d%g8d`x#&0d_I8c1I=q4Gnx-ub(_{d$~Yicjuz%S(b5THHKsIQcto-aPF zfpKZV$3%qyJUF?T(!briYhX9n7^F2yVuVV#U@uzN6e$ThIAUE`IWyYx`K%Gy{py^W z`XX!NWZq~jt(P}@m2MLN%B zUdWk%vh-SL<-HhP6UVSxwzcW(7M*~kD-b5V(V5Dq=2#8AE@=CT%Meet9;p_yz(j&O z>iV=w%yU4NErAtM_c;(mCQ?Scw(K7tR*(+|b*}X+)=*|_Y%;;tVb2wGz$0d8%hQR^ z?++s|O=lbD4BJ)#p9Z{&+`$zBx9jNjem>A{m~bfe+7Th5p?QJJJACQZX1Ky4XqKYr zH<6&dKXF)o53ApYTv+%Gnd*7M17;fQjRM`fBva+<##JstAN%5%8uhb~P5@g^B?|v{ zT+sS5*$WEA+=vmY$)0=hk~_B~?q!RNb~-Kr;L-{2vd*D=gFGzVTs{h|ddp#)Ppo6< zbd{mLl;>SnnrT^ImSM|MVWe~xt{n7~TECSLvRM|q&f3k-KwLpq=>eq;f2#14>*e#4X1M&o7vm?7_BFDEVA0C2$QEMhZkBn-!E*T*5w1 zzdMChmNNMJ@Ohx&b{?_YPqTuwc&ove}5E<2HgPN5%Grq7Uu#AHf z1GQ2kI?$fg%&$S8su5P}8A8b^vB&EI?eHeD!^v8j?u$wrpF)ecg1%ndQkBRq4LY7w zahQ1_*Zz^HIP;=&dx;j~;+39xqf!xfLCr!%D1^_5sow35o9G-lo}n#qEq)Z*73EAN zO(Sj~EOFiDfx_J-(nUWb4=d&@ytmXJEfl$f8%S`0Z<6WyULtNwdIEFVI~{I&Ma^(l zv>Zj(1!lW*nzr3F9{Eni?qP{mAknLsKf;}h4$F4sy~~4K#X-@|Uv3|o(TS>G49L9V zhU+H{6b8t;JnsD@335D@w7cH~)o1!%!bM_%Gs|Qc@3YizcIq*0VLS^@fKl^QY!m4( zCB~RWr<+`O-JQaxP!`iS;NQLU6t9!wQ4{ZlZ^-!pW5ojy%}g|AjbO3z*Fqi7ptphO zuA$@kah)lJ;-(Bpz}W_MgveFytLz^}V^mJqpor7{={b9_?6Tx0wy8Mhj1Y~t|G>!q z#d%RkDXNo0%{b?fiNFEP{s1S^Cuvz?vWBqA3xb9kMLFDE4br%d@i2W}i8`DIXVb5m z94sniszum@*#n2Nq|q3+>~0`2dJ4q$^yhww?gL`c-3Mc7J>ty(Lu#j$U20rvz0xyA zU<@D_O>#~VIYP17E6qXW~=d6 z(JyO62`S7EGV)4%UIYpKQNbm4hbOWSeJy>Ldj7%iXzhsxo@YRvg#6}#{WT44!zUF+ z%d=)BOtXA7mR!A3+a&let7M~5Pd4|Mj!F|$w$#LKgrD=l^xp6W;5+LoNy+NPG$z$; zo{DdpMYQ74L3RP>nAJ(FM!J)-AqZ9xGEcrJzwVTBJYWVbrL)@%R-H`1M^p_4Clq(1R4~@C z$g%|{Vbs`(b6RBGic3;OIKl7L!hj1oLcVJpr5!Ax?&N~Ln6 z0G2Bze#gy6l=Ay=1+2LjOT)wM$+qXV8ax<-H68RmO4us1mEI(wsRIOr68x49{FnJQ z$Je#MT<-Zd9Gk7X^G(vmUqK1$)qloP09G_y83M|}Z`hpz5;{2N5>K=6-rXD_sn0>0 z79Hjg!wpmcVN~%KVFvfL+12N@2U)x4Zxi20)5SAU7}L!us1)2P+FNXuOf1P21wkWo z#YU;?q|uLTCbg+l%PL>b&@10&Rbv0}JpYHuGJ>VzxNx34%oiGhBX}*VK#LcS4dci@ zC{yuDtTjS*%~FP@weP>#9I}-LTol=Pz1gU6?Dom{FG|Yjb0N8$6*FHO2$PVOk$%k6 z%_Q#B@j@V$PX0y90m+dSUWJmfPDyRiq334QXWY>_^W~2sTntDp!(Q7^FKngc*(ZHr zrfbBQvHm!g^IBD~ndF2?*!AZf7NGAz#dH@oT!N|#Un;VMkXuP@Wx&E0C+Nc5+4c3h zfch0H#_quXw3aO#3|P2r_YILndx-zwBYho!87KicN>B##9R3>#klWh8{}l;%RrMC{ zq@NWq+qB`IfEe5V@IbD*k~UAONbsRO(GHDq<`b-(-KgfBj-C%1^Kls3*{+TWKPRjw zVGmP5`}&W26Rr*B3QuGX*q(<84g_}%9fkS6fq+WmYy%X}HK0bB2@hl(APb|H(tag@ zLG|-a|2n)= zAgETep=JRfc9?Z51-kEw4iG%Sx1TttSNZL%Ajjl*6C&l#U;E(@G1usrsd(5xy0|^E z_|wh(Dbu0j#*{M(5h8+ClwVKQnu|99W2fK2zOTx+vcm({&XJ%i1$e_Nn4*7m$lFtM zo%Opo*gbr6@sGx$Ve0%^;MANYNRt0gF;^^lTs^(ap?Rp(&jP8a|D;RMdhiwQV5Wap zS;MnqteB9*AY|CwTCq9vzrQ|S{pf8ba+`o3n9$?jH=}dOM91BM=3Q{>Zt*2ei!)d% znDIE52cuB^x?w9Z*2?*juz?utQCy`N@!BIRJC{~}f|b04IW|Z6Ou*(LPR~e?NFDrO zm%?Z|q8AOB9u5-64?VR)O>0YGDNj}ktYck5NT-p=2Fu?6CA8HAuHvkJX^r5pBM1y2 zE)L$uS#)a+0VElKE}UaL4C@bQlnzMclP2Y-3TrXrkb7NYcgn9#K+V#B@4whXmJ3HD zGewjIsnSW0QgJFua6SJc?Ud_uY-x&O1^tWh9@OzEn!@2{lfG@AVHdvSDI2_FdzCCo zT`@b$VsheUq-?GHf9L8mfp3gx!D_3_r>H6097%a~%m}k;UKgNRgStdh+weS0vJ0>I z4Je$YMeWSquiftOZkFM3gb)GR2L@IU6nzgmpe%FO?x8YhkHC8YN+ZPd#vX8bgl8rv zy!VUWSSaF$ON4|KWUdF=EF&eSWc`h~#kpPZg9i#uk`cw>hT* zZI%5V<_r*->5SzC zkXM=^#QW`%!NA)=;eRVU=6*w2R&#!QyGJ@%)8iA5p8|i81*;GLLrg?5z;64GSVq~h z&$`0&tg#2DS5E$gzRz}V!2>&_C2Fp4re9w^hChcU1`p+gwF40TA8z_gyNV8h0x-I{ z`SjV#Wr~Lja8&XswJvv+`W)vjO})*a%C9rO17i6A$$c$Af^?H`;T##H zSPMC~aiktEn(U%tsdR;^tf|>%bL4TBY*URAbLVB{hcBus52BL~2wS!=7`41pB}aD( zO{VuOq6Rr9gFnN8LwkM!o=Y@uPQtIo2axyaRn|mE%Ip~u2z8bGPf9)qr(t*$`Dq=$ z9m+vR2oJmezE<|DXgIHxSLQn^2*Laj7Uhkf%k^exu#I^$&4%F5GF{g91zvXA=}f#l z_IVo_SE!6Y+*PF|!m~|Jxs*{eYu&)&%e)aPoYg=lLIrmXoZf~}x76!Uly2vts=O)w zH#VpGh9_Zv>%@)N<-B3?v7~&KnD6%d2*?P54TGFR7&zyncQsL!^^@9y5iDr0R+A>{ zbTO@3iWNZx{5i)zSS+7Tz6T3Ed{&1C(1damQaq%7bfhZffH43Igkwf5kP2UM=a2kj8>WDC(3_%4=Fx&DwQUHf7gJE$|=xplUzK_&IbMD=C3+ z^6qfXby1&Whkrz;bANLZmvg)kdJ#q7%}sC4H6=ChC5mFZgGEnQ@k@`DosG zAer^Pb|&*m0t-;Gktka7JVass-q9OkRY}$YGbWBu42aMtb4jOR+tqpil+~hHS^&gc z&A&@)s^<4Ci2sz=wFbys*TD38NkRhY_WN_lHUoOX8Y2JBD5zP zwyUMW;DH2>V42iN!vi|TjOP4Rr!t5aS&;&S2s2zQ5b`V=(m*%H$^(KYYduOOP^0lZ z9OpB3ei7iFtl>A0-6Nk#DXR!Z*gIoA*>C&uW}{;EFpyl2n(=nD9fPQPRV2$Dc#+E& z>qWb}L%?+FT2<;93BTm*k$S?SqK}gftJ3D7&^&8L;n0Q}U|h4&*njHvoWoY^P4$%f zF&!8}C~*-AY<4eve$<_X#ns>p%ml<(jvNA^5hjJXWHwWqk2sWTZ7rC^Mk8<6Tz>&} zv+qb{+QUPcJ|utX+JqIXuS+{=;jw1|Daqiw7?`F_@d}N zYWTN*`1ypj&qI@v1e#b-Y0Z*yHtE#ItM5O>DwwJI7iD0;RqJm^Xf^FGFZDJO#3)CCs44ZNl?uIM7UPj#(t2^f*Q;aGbABuc)ykm++R5Xx<_6JTin5}zYHA08k9@ErgB53lH z6ThV~m~}I6O8?kbI*TxH_N)~D(7fw$TL#zr#4bUi$eIT{%X@vOWg-2qrOUYFn2Bh) zQ09|LBv9$+)YFBw>7{dXGWG`&ZsfV{F+`w$3`PAD21q$IJllhe`C_)-xmy{$X-?n> zk?`Sl!bnE53+Jhx`M@&J#}Mu0osy^KU^Lm6^poOPgN?HsbNt>J0F}a%MRWL|D`GhH z66db`q^zY|)7T*AoFIK4{5CNi2rWIaGMAA!6WJ(;vo;zh1u}iXYg3{FY9(vyyf5zo z{}cM`!U1#x^V7`<&P*;p!`U}wiwVqNL*3Nl_l(wW*4vZGh@&26D4hZyd&zQI)11`a zV019FS#dSZVIY`f4p02vSz&2Z9@l&bjd>>fWWt;UhFx<((BE$UkentdqsY<3#af*9 z{O9?PPD+F`Kg}rL12{bz>)ERHhODQ@CJ0P+PAl{30*BA!mFC_wM>8}~09G?4TOXt} zaMT*wNn84l+-8dHC<1I zX0%<-$pYqEk?v_)p2HmZ+2MJI36kg4BDuURgV)o6XS?U~iuERT2%Q;M)0q7%R2#6hoTHbAT`~i(i?UMDJOI$K2axiQEq6WxT z!CB#2_jB^-TNaYT!*yykA^CHQU%nicQ%=r)0fo`O z(WA#X1?za}&5}!k6(rAk7KJaPZrfhIbVl;80%xVz#`v!BmE7&$U}CBN$rHKXp6jO= z3lqqWV&!Inrx?18=L-tsQ{mkE6oDxb&RIpmV;3fP*Dp`TqY{ujKp0@BzZW)8s0IeQH$1fw(OJD^~ z*G)BK6nof^Fnqgu0B0&t*L6u+F9Ww;697_b$e)U8+=m1@VvO3GMs>EUlu*?CvafY) z@hg1pje6b`{U?=8D__sXle7~F+RsXJQCv^RqKTDV%#el`6=7tSccPq0&I3P;criJX zhA@=66RtUdt2Gj%)L6E@v89r*d7S6XJr{-2GP#H_ySmvxu3J%azDC@UIP93yq0}fD zL9m!DMcvh{KUwHR%xm&~nu)bJ8^nNbsoeq*2(#l<^%Sc8p00i(l|2pO5Z8Z$YjCfK zLttaWBIFv7%Fds|!Ls_5>(2F-1P^7#u3hjPPXPGye9I^^o-|V-#E?-qugMA{RBZ*2 z;;);Nf^WmpUV2LZUtVZqL}}o;>Zg}5mpIoI9h4SMoK~|Go69};p|Ql{BJ@3)ieRd# zjwz7&~t8E)%^ zeUjO3aR|20_{tWdBHd~N5W8Q6CZ=01Jowp5{DMq#F6;;&xe6Gr)>45M$PgX^_^PIu z6A!cDuC>b0KzTki%`34h{C$OR+Uk3d^b#B!2|&gJ3}3EgMR*_90N#;Ug}i3U7QeL~ zoG;fn`~Ob=a(g+m0W_nT7)OQ-5mvGERrFe#;n-5w(o)hoRoZ$ReC!fL1X4ljuQywy zCxCRY{3638%j7~9=$j4X^DUiUHk*tnT>d}cfsA+gh_$w9V-JmW?;6|-?~cK#Q`?}# zKB|$wtK#Ns%Oauex5fad_Wy6A@w>cWPGJoeli!d8!#1Rf^xM7^D_q7&uEj zXM*~J`tQuwfqnH-lraOG)ep4*ax8o&WINZff5&7VUW2KuZm?DQ=D$wUo~R~zr_3`J zjh%GdL0bm)2JrBzeajB>`pgtHw;c8JRGD{58n13v!VzhFPo8`7SOcUCkC?Ai%Sey9 z?jA@U#7V|d6QQK$vz&Idi&hlsN{A8dL_s|RfpD^5ekE=-D1m{m68V@LGx^Jlrmb)d^#89Mzw30Gv5c|x|i0SM0rcmZ8Q zfZ+j`m=hN*FBXrhO>ikCC~Gcc#bAYl{F|HnX9?FcfgzH3=AnPur?7T)`q#O1Y+L@T zwX*|2!(rPxL2{!#)i?*}q{ZC?dDH`fXoD!GIi>-w*hpK7q& zQ~`<~%0S>$*vm{u3b#`Nf?XQ7?6on|pyg;6jSXgM!*6T&(prpzU^$>L#2bFI&G6dx zlbNCF>M_6yNj-*PNV<{2(Pt?~GGA46^!pjtNL{|9KKUa+pB2@2`qBjY5#N9NlmDM? zfsQQx{S#6{b9WCjLar#+zkJ5iZxA#5Ms@wrJjf`@nzZtJDhpv1i-dx-igWU3oj>2! z1~Fc57Tic@stvXarg6~HV6wR@R1+}ZeXSz32-d&%2P~5uov0ss$Am$g^B891*4AIt zF2dT32iR(J6M{`pFW%R{)-Ih;V_j{!v?>CJ_wq`eCWgaz-K-86smc$g2yM;oH78-w z;(Yk%;%&sD{62p{H)AwJt1|SRNZ|GPh*ZQFP2_uy{ePyT;s$oZ=xydRC8SQz(a^*@ z@7-P@qpgp`+^3&mro7$zPV3cES35Qzo9tcR2dQ;``}sX8p6=UWmKYOPr64&UM@e zG0D0m4IYy@Ex8UTeY_2mG8_OV#Vd))np&0IFy3tNcgTu9y^0~Qfse>GiLNk35p~&E zAzJ@S()$K2@@hp-*R%itvx!jeCNe-(N+BEt5+J3b%9Eh(qYjoM%=hU;4xx&LA-A_f zfI|3hX?3ekc$2|}EUx#ih&C<0>jlDB?k1bJ*UZK2;$Aw<;nL)_EQh?CXQDbDFZ9s* z`Fq;XR4OD0=a9l+uMce%+c@=*BRzek!GLMUAmN!5UtQXk*D(~bt=DzDwHBc--cQl`B&)U zXCnhnbiEXt8~4%DI3Phv;s^mA@41@fpC1+foB00FRWZyE*=~!mz!Z@6Zg$u`#K^Ov zEp`omP{^f9%&Jz>VNktk`W0F(=N1CZ&pAF~?o97+k$=aC(^LcGf-R4IkPZ#w%xZAhpCGEtaWjz9x1U+f6px zSJAYHxGE!(0=7T@=e-m9+piM#&ZzA%`&4XO@OvLp2F2?)-W_L3qoLY~ozQF@(zjbs z&o+fnk|O#x>>uZPb=3;EGO8pj#2f?dmeXVAz++I#iL3h{P*F$M^KGrdn1A;YJzE%0 zI@$6tiX|Be!PBS=&aJ!J>VpdA(coB+V9{Is7Gpbu-^lFa3SGzWXn!>ydqh z{rRR71`rD(VcAJq1|dCn&uE{$@Uu~HE=*t#8gkF@jjlo*wzLvl|lPctVpIRp{tnD z$H(bdwb;aa!4B3dO-GVr)=$V49ae*pWSmbX+wru&koDw zVPReBPWUh$#7YRm;$2qy489RCDh@?r5oh(V-G4rnci>{hhafON5N@n$owsW#T5N|M z%WP9QpnRt5wCi5bY<_pF3}VZ#ZU{S+62&@@w<*qf?BujRL(iZav0ot0cJ1@n<&?l* zir3s(;yH8qJrFLh$FR|xrka?jRKPX*K9rXt>3t7@lW zoKR6+h~NR~bI{S94*Q98J5BCO@t005C$zju-T3VoI@j0k=D_biec>kS4G3sK*twxi zxR|eJDn5Y$twotY11gm*e@tgDw^MH2dCMkk?-I(uuM4>hi#{wZ{FeoXUeWdkW^Sz{ zvyhC0PKc%HpV?RD+hMb%Myw zEtTWS-NQ7SVnYD}c;{EP9GmSeU1Wb6V_FbHPzS-pdDDug+ISHhQ>HY2@@IkFlGLA2 zTZT{5OnsXgdm~NZf&ema25z(&xpW;{K;-HAd)w5TSLmDXTD;XeP0PF|8FS7XqZ4b7 z#iMHOITyMK?+}MSqcc7~7)e1O5anIiw8%y;|_jGO^)}1uLj};VRN*_ni z#$Le)1KPY?Kizcf)K03P*Wh5=)C!MZ+ir1lWxiV*oxo?`XscYzPW2y28cJ;a;%1qQ z0Epe={0gAW+cfaPLvE>X{-~e~I5WU2myo77)gm{xvIg!Uc{<4nAAA39F4XoUV(;$yF6D=z?Ti{xcBrXDTvD-}E59Je2llyD-!bcQ1_LIwQXB^+Rt& zk;k8v3E5guaK=ah>q^^Dh&Fz=2XjoodizRjPY%e5Z6TytV|?3Zu>4NFkqm8Tb;1*W z+T8ZK!8AiNoA!w-j*jN+41kiW0!cx1Vt2d{;S*lmyLCx=eQNK4EgOjdT-1ItIEtLn zfAWrQqI&tBjrwc_mMh`1hLI`U=baFDq`4l*LTbRWbiG-EbD3M&?_nLYYm7F%6SB&XuN)A zE&PUp)^}xqdyNJDy<9>HHu#3}4_Y7!DLlMhuJx?@Uh+f3R~sr62n(*>)$3y5o+It? zxrf?i#8Le8+t+@4Z*Fe_*@1Z_?i#!TN|LMcH*Jf;9mKG!b!wT{jUY~7-?YX-dS_Zz zH|Fr4VHhQ?09b7Nzas3^yzL%8EXs{3e=cE7Em`nEvGL&boFo6B?`21h$H9A|@g63@ zo+fYrXOdHiq5X^>-m{xpypNGcT#sio3QV(EYcEP;^9ZH!FH-VxD15Ml;~cZ)xIOrS zs2}9OJ`p5m>)WT0qLkV2=10pjCv~z%t5@>H!5Ev>f$)Y1(6bLFX7gd`+WbVyeuzLk zdAWX`TjrPzVR<1m2d$mODiAt$7#`{GyB8W~dXN_+-hA?)Q>!XNneH1UO`3~|kDJNL zdIYv#m&quPU%{4PmLC$VT=TQtM_v#H5x{FX5!t!!%jqO{O<$P2B@)&ja)@`lRm8BC zX(T3N?!dE`^8fUdLq-(d)v69x`uQ7T77z)#PsOJllf_CS?V1G+>eIYRtQc3uM7-7? zW>kNie+kZfce69gBeSX>km7yv4xfPF2Mj>8xM$Jt@xh5UTlD$eOSqK7x)4s39jRO( zQ)4NtROn0$r&(an_55_Kebc1pL_gNB!^930Sajf17o}+CC1MrP>F3>aT#;pe6%crF zqP{sz1y1*iXC!pYeafNIY~S1__^GD(j`GUpGpqmoIs0aKJKIJ(VdyLOd-RpgfU>?BwaH#R<>Z^E88Uh~u=r8tGvP|LfECZ!gYC)z!HFWWoJeQ^i44PL(q1rrF7x z@77X#YrW~iW-}3cfd5Ehiv=S%!2+@J-4Q(JVf|xqoXYwpW(hSkyqN*M9u-z^y}Ud( zqqD{SYKx;iw5}(ES5ta1rEg_oxU!Vnf?t@*wtVEnoPEm0ZQWSF_|mXxCE?~g!)!3} z9LgIFq%mE=A!-8u+5N z9QVOc?3Mwt^rg7H#_*s5E2uiE057vZxQ=YR!b4>*V_Tg;YN?)z83^S_QF7PiV~Yh0 z8A(-2ERb`#;K-`cF*^Cx1XzA85{~ayD!ixpmH-RzbF{mTw=(I!9Ojb+S#mnCQ6v%35Sus%&SV$L14uI?= zI|#{(vnk$N&6CHiXnwL+oSfa6KJ6F(T&xHhcJJibE2v=I=!6DrA8L+mymwrX93q(y zM+!rX+-^DOfVvOiqSQ0*url1-N$qnF{0 zNngZJ*Y+E5;7NVIzEv^TGlSM&DL&aym01oViofCD*95e!`@YYPuM_8h-QN0W83CH# z<{TN04~Ostut-+WwS*w#qJnNaGi&*^e;yeS%|4jW8=YD)=Qkd86U| zz@g$)`u%VnvA8bAy2j0|g&*emR>Wj*H5AlDR^9dKi}@61gae-lR?Ch+faW1Cj3*-z z#Rr@80h05kRy`S5;A)!u|Lf`xfxzP6%xNS}FlcMbY^y z1VCa_WmnAQzXE>Kgh*lJkgpy@t9lJ3$zzx~o8L4khudsFUOEgKDA2zbNay`2#)0@> z%8`NNBI6uY()gr1hX;k$PYn@`PV z2?R=w(F&w)yHWHzHt$ng@{!ELBmaIkg2_-cSw&Yv0|jAHcHYIqIdb0G%(i$-@t?`$ z0;VV>ao-J>RaPZfza0YwQ?Q%;n&atiSBbRl6wrkYPUOK%`197_bzpP3WLtfnAbq9x z^2G9Jdd1`*((4EHEWTDDHI^Co7z$$-H=gEZVsLHj-Qq<{t)bBRi)2cOp>sJ93XclR zE5U95a#J~t1!d8hXKxe`QR&NSJ=?5EW8h*x$}fme3VhW%+U|MOv*XZcBdHa*56u=5 z*1BI<uY!qJ%Y!Rh2z#p5?BT+)ItZ(F&*!o~u-@R>^4L13M+{Eel$YlH1t_{ErZ=jC)WaY+JkI* z{(5H|E(5%v`$m5W(Hg8=4)m8~w{h9z-I8Nli?LfArsirwn9kvaAB4@47cwqc>&Qor zksy8*46)7O@rk+VK|r>n`K@dxU=t?jDJRWCT|LJ*_x%$8P7Qm%&4@8bGv~>D;%Vr^ zes2-0_xp%5Ou#M*B;Mikw8@jy&HS~lv}F;wcWITm34g@U1c$%5{$*X@ zv#DjTz@&g}q80R5%TEtIxC4AcUppF*%2OzIg2b6t->sL4IqiR)-t3h#DT{@Ee#Oz3 zp!Tx;dtqyil{{icsFzxQaPyLPx;5aN$5xpc$4;10FDl=8?UfNUNf(LA7z$K1h~xG} z-%|ou4Yp`d3xa0PI9!qUK;^kR{CDf3A@eDb>}7F-I5z9yC6;`sl7t-%Cr*xTyHO&- zy=341&zp*kfP$Il9-iN+k48T0(3eL+3T+`-8x=Z9LU-Q3kEfS^*jfa}F#%KvEnu@9 zCm`$^OIuo*sg=o62=6VRbymU|#PD zD3P=eCY@WjYzpU!XuwvcjFt3BLszBiUCkd&gk%bXM`!!1UEXENTR+6$V13(lC-X#B zMUMC!_BKH*~f(SV8GGyuZF;q%DnME+(3?xcYGLMSowVU zi+p(CkW_6=7{B7DjI=;U0;$SI$^_vEoxZ+@sEt{GUre|_#s@tISMg4`y-W;(_+@73 zPbXA&2i6EuXMZfkxSAbq=^H9;{`y}YLpezh=EO0JlU?t1N2nj_j6U&6wXQl`C+|yJ z8;#B@hH6weQUIRP_6=t9fIPLl(#^n_0Fd_^r{UB~GIHciztQUGy!VwTOgTdaj~@kd zMm<4{HQFdWr{Y(t>c=I<^JO=GDL%svT&*_mN1tLxE0WPbFoVzmt`|Bcxb{O5YLfy? z0Nd7PpMYjn{UbqC#74Xo_yyXnL5$ZG(DP3veB4oi&DbuS54~A?Z1<#SQqu}<(y=}l zYNuuvg+~&=A;z_uZ~6#%lfZnFKdWrEa5sAG^}Y9&Agn1HyJYj%?o&_dI0wA+5+4D0 zFj}fYYNkD%J{O#a{?MbMm%HKIb2Fy}V?7h-Vv+t0*!<%1F>y}r_7A8~AikPn z*}o^N#>rsiKqp^Ryg0hbD@8HJUqnVYhJIat1)KDA08PegP!b$LOtxl!Q5qcFfA?zHR zvRz))ZI~U@1`FH8FX-Ky_=?Y82vr(7Vjvq`%V<%xoBSzTVr z&hL%v?6KB%2&W1J3dwZwb#oz6fP;%0c3))A#$Q1nYJ$-%peQ*e zDUIQ~i87V>@RU9PN2cX{uQ68%GMNZNJgcvhHA@8BsyA0*B#crb=E+| zYXt?j?-}P^)w3OmeK{U(wD^J_4&=V@gO$3sMu2fqzNDnpbie7xsZ!*V&swAcJR z^}dp74F_pzR+8_%3B0|S9XSwrna{~^jtwLd=47=VEXNo@rk{;FVge@&N%mr2F(DOq zDY%!&13R*(g)3dDs3+NEucyrTp_9W<4mDmZ`D+N}lpbEIen&dSw`Hh5B)u{A+k;X+ zC_UuvOL$!<>8#-JN{y+G`_&+iL_x|r@ zW&N=j11w~U@&uk{y87#u3R_B^&8dXQU3FF2%Px?(_NVSXOSZ3POLf#t-4i+Kl1>N} z-hj?usZR2LPSa1E`MCB|-9+*sajrBWs>ss$hn{5X|N5~Ii03~cp>VBqk2f5=efvFX zpyvLtY}j8DhDw*=JKxO<6St3D1e=l5bzqDM&QM8(&i2ftMcPA5bnEiu8X}X)Q#?(b znNnxB(5vruqY7G)n{V100BWiyd)W+j1^IqUE!)x$d&h`Ff+e@&`enK;Rp1iwfViD; zdywC?Wp5{6DZ%ifW~Gx1q~%0r0sjA-#mK2v(4IYlQ_N)LKuy)!jv+6w1hP5yiDHk` zFHM5^gV<}epjR;nxN?NT*2%T^tPge4RBq`=PQQdR`S}0t0SXvCC!Lu9v%)5a5xYH2 z%marNBQ*Ehx{!YCYNYjl^`m(_c)>A&dqX2_)hNn6g>Leu$gHx$frk~zilj^!$gNQ} zO_%X4oB*?Tme2Kvb*gM(qXo^*peDm*Ghw7W2}mB(5A*H)k|U&WO))Z4N+nJ4WU>Qj zz-rmZ1DE?7>Xn&(mtG>^-A@cHH+Ikckg1u@DHq@>ENeoj{_~uGq_6b-pN#*jBISPl zos$g}9Qxte{blrX*9~xxs7q>ej|$6@B%NTL@$ETI7H2ScdGS9cb+#Mb!iKD|N_j)E;o}{sM(uAsE$(VaL15HmE)7 zJL+nRCWmUl?n?utC46-VvL5c%hyvNQvX2oLZ|i^*m(07H=W_hroj2@s{b93WNSFZR8W@msuNWCbZEiW;hkiQ z)^&FjA%qSW|}_GdgYk zvBBjy&AQ71o~y;!H94x?H)>6#SC3ilh>hcjo)ayi`uJmY&IT*)Wn@T+?VI(CamCm= zbh5Y>SphUOIhs7z*ZKAW$sgVdLqOa`2IMw*OY?|piTKD^w%eDo4e;A-1ro}l!|d_* ze*QNOjgpR5&j2;fxXvDH$NYF5>MES%^80QV;H~TOg-M+}G?fFne;Xab=@SPyS zKy#F?j01n3=MU8<&450rhq2padV!Br@iwxK^m?Hvnfw46ZUc&7zG!?0tooz1CmKXJmIt%>CVRHVG+pqYJxWdzQX z)|$b)^X%raMW7+~=xZ%pf|$IzAG9&7mQqnTAvF5WfdLxg<1HKWIV-;8_1++DhlQ2t zOj>UmZs9Do&sKUb^KPYETUNra9_;INDC&5Qm zr11swi$m3**1VT%QPuG9#F~fZg&+fApJmq?L#uD0g$eH<>|6uSv^*~qNAj1|X2ic0 zTMFC-Stm3TN;0n=0LQlJzel|b==x1jic|UXcc&!=bgWHJI#hikSWzxUQr;TixjnXa z%z9_AQ9>?S70ejV_f1;Wwh>V#^7-!=wuy}!mcZZT0`mH%vA7CTTJxY(7Lj3yONtk1 zdl94x$&S>49;~t!I&=z_{CZw}CgEbemodE7>_Auw?nZ=MZ9syAM>P zh4V!hxtAE?K%vz7?t%;!y2X_THj~7K`gi(QnI+0tL}Dk5&{-3(uW(AL*ND|mQ!0Mi zK7T;P@P~JHBGYhjFnj!N*Ws+Q$111BjEvz?6I*^#=*lw+>+j8`VMd#B;iOeee^ngT zTs^myq6~&eoVCSR^hw`oJ5vf(8^S1U_V;*i2{_!^L<-Tgf&WX3OWD9L-$J4?(z)?~ zE8^QK@5j|BMrF*Vow>!^sc(JpMNAUCV{ES4{AuI?nLyq>^vLAY2};>zabI+h3aG_j zGcT6M>t6lg?(5$EL!X==WK1gUds?j;9!Oo>D5UJSy2Iq-rqSs{E{oC0CVt)&J1KoO zgO0%sA^Y*!sc{h2EiXPwAad{-_2DAIsLbORSn#jh#5?RcsCo9fjedQX_?xmswTmnr z-;&IGP$cr<*MPIZKjeFbiJ{@b5sJ<0byN9b)Lu;)g~lDj2cKn2FQEJ5wQaa&f~v8( zo;Ud#JL;IAC}wEKHwd(d2(n}d6{h@~DjM)(50i>UF0oqYMn{wzDRo>JiwGf@=;^!J zHh$r010G^(-p680YJpW!+Mu(1N8_!~Hyj(Ixi}bjJT~nIdgGXu%Dy-`W{LEpA)T<~ zs$G(669mq&q3yJ*E64ajEV5DwLETwfPSAK~B1s19b&iMKQ>lU0s}jhZ>E+wb&004C z?n|fv4Y)+#`V(D8!ws-XyD?7(vgJPIu`8rPlZE?*YUQ~Dl8G)%N-=QAifh)Yis}Z@ z)!J_H-7({XOb4O!m=zTd@sBHyMOBZv^<5yV(bMrx<87)Mq8n$XX*f~Pa}%^HS176U zftaj-*4<9*pnh4kX7Qf@*|`^_VXx?$ig%`6gV86ORA)UtK&u2zuEBrz#pNEzC}4y{ zn$&~y2w)B8KV7&~$lak{`G$+bpwAbMuo4Q&CO*Fs#FCv;kD`)OA~W|_Y|pAL_2VLS-O)%|=Ybi@q9}jis7#**r837I!QUz+VaFuLo~@4q zg^0?Lk69<1duc~jVHxTC#%X@h)~NxbXjGL$#U?;w3++{=etNUqO6%<&pC(^L2k1tb z4zCXliIld4rdw(Z7h)=@jEDGccU%u4A2FBZrkJA#6q`U7`kK!487~TEovUm7htp^) zvX`Igyi8EHw@ zV=d5k>oZI1i0Jc|!sSru3`gUri;d>dy{Cor5zJKP@6lpeB~(rU_n){7t*1KnUEXMI zD=SiNkC!3+`yH%vushvc+AC+nxY5HFvNn;)Ni&_f{pyH!v^b zKCzt~YC|-2)U8az2(5_q2+!wptj+nUJ~vOTDqTRH)25&9JHMu7>t&a&7vs&X4)!APa?bbycHYMRw^61IomaO7;1^%EgehorOTRpumN^+GW3!DU5Swv}x?qZ4SSxMR(!FAB?+H$yds zUuGFqarR{%lz>)kCivbInMgA0Bp&saVP_jMSYmzYXTVw3I1$mn{P0YihL=1&uDk6^M4GKS*YT?l zAg>)^bNg>LT8xda{M&#UDyUWg-0IS6Fq`}cRN1$n{V}TpU!#t?Nt!1-?*PNK{VwoW z7TL_s)%6cH6_8Gh1OSI;{P3@FP(?aGjxIG&sShnCPz2xs=f?&olmj&yK6-GN(?ha-#pV2L><2WjGI7wJ7{o}whm!0Pmq z-?=6nb>mPIYZ*xiGm}zif9Fw9M?$G6Uw$kDk28>L2`bxaseBm}3wY+K!Cihy?AzZ0 z`nzP8O{=YmA=fXmT4G=)OM8t%(muml0UJQYv_Zjby=o72>(kFfs!xm)r!c0EA_zHy zEX8`@08d09#T^uawfD^otVfs>TEHD_-gBgQ*43%O!sWh&cc7bYGuoFV9q1#H6Q;Oi zR}+_-|J-58!;9wVr9Dh+g-jCP!X;56$qmIqxo^d?AQ0-+&`(v|@iAKx-8M3fqx!cI@Mz~siUgnXl)xR= z-K?J*?Zk5mk@kTF8nwn>0AxpM1qJbjYV^6-Z|A+fep$O3_|d2rYCv zJ41zM`qkY>9((T)0@_b`20$@#@Hf`?_?gilM*%T`o$Z0{r6ko*H_8=?RaF=Rbsqh0 z-qJYXmAdF?a=^!Ht*;7MP;zIq8-t}NnIjDWzFVjABbKmXw!rlg^}VJy5{WWVHB5|$ z^)PC#13YwdWt~(-8kPc8Jwfgtyuls~k$Ao1~R>4<;ZdiQNJ|(Vg;T|0k zC5~*1OOAX}b%V8&boV(I?z(46FDd=Y-Z_SR3k#O4lY^`dKJ}u~x+;%mqcO!p5WpbM z8C`q$9Y3%FW<>-RcQdiQY|UE$C()_U#&WNl z&qwDa?h7f6GGRVaT3ryjR?YWH;gih_)Eii_Uz~Fqj`8`;{s#Xqjq|K<14z`^dTNU( z+^$+kDIC7rZaU29g6ZQ~R$ot^2tA;izxVslpD`bYD|@6scR;jP!vx~fia-u{5sPN@ zACJ-BwUzHvk>deMY6$KR>Z2fuTK|2NLhA&{I+eElriKxjdiu5hxGKrw>wT(!(wTN1 z`s=znpQtwWUqg2@A}@Ul2FDV@qwy1w`yMde3oNo$NR5x*2sax%2r7-*Tpw!;FOAJE zyO8#xe$Cx_)vi>73JBf#6;c*9@QJ;1r+BC zdN5#i*&K2)1O;+;{i@z6QU%o{5WF4q5?A~N&?PdWRf1tn! zaV#W!S(B@|Vg}DKK~soP7?{-Ug`v2U+@SUL^^KuxJQA2A`U181t-m8@Udjd|0Af4v zzM1Aw#3EVh6~~4Am#!*%$Kl=WkpJ4DgU5ea5J) zbEU^UaPy8B^vHiLLA62uM{05^OSSiuYD7UWP;{`Q>JM3YxPMW$yJ7hH|GZvR4GTt@ zcz4VbedMno@2}0V0Nzu-pe*d1Q3;#+kPa+(;K(0<=?R-*LaUVx*xajgI(WH3DP<7& zN1hQDApEWci1me_q8Yj{9W?^ocqxTtuuuppe^<1#^HX=Pzfv|MNU8>f*sO8h*ZTr~ zw6A;bJgD|FF{_TCk>Jw`r~y65$M;Kabalfu2A!u~Dg_s3Y-BDSi$H*xj{^qsrMtaf z6u_a&%IEep>hhAt9hRY=UR7{MeWcVe=yJARl%cx6iBg%j3t5YFeZzj($F(ong;u)8 z!K!O^`N-h?8%L0Bmak$H)kflL0Q5*sJT1Z;;I+if>US>(c%L)v*yF5p(u-_(W<$k? z&3?hiTJEhDSTG7!us@pzDU!{jo4|$3ZaUZRoc6h^`0Mas4$EFu(2@VE_Ws7~?=gb) zzqh%K#KXOQdFU9va0k42-`XB$H{QEgK*-Kp(TIOo{b8*bRq*-DSL=g9>Wxv_ru9GZ zUjhy*Y%a0o&Co6O3cZa3G*1Ss>h6}uff3fvSak%hnUM81c9nJl>;7ted|l+PN%YID zW(Cg6T#gGM&yn14?%$TY;xphTf-EH?gg}>_#=XD{6f)RSob-iJ3pmgnU_6Gyc|>{- zVyJp7GAWqcxgK2E41fL&g3Sd853^~}g#Jvb&@Ug7m%8wpmiq1!Rqy(-vs#Rmn4I$z z&S3Zf6QTM*12K#`VE}%F17mNwq<5tk{L~%^WkS#9o=;d#h0WoX1E>-2ST2cuav4 z-3Jba!JFb}k0ws?upLHgYMu~*MYc+dCu3KjOFkh5cLPz;y&)Y$Gr41Ylod#?c_KJN z+-F2>Q;A6mDmBKZhptk6{;t7({gmJuB5OJWMUwFZFDtRB>ASYvfZC89xSO;C=Q>Kb zu{IM_?1EjNH+uCpy&P>t6lMLT*~PDAY%uN?WO4T%}5UY zGJQFUiblnbx9rFjrS;SSAGlYBy1*9YkuH#4@O%uZ2gMVkOUwYTViJ{Fk($B61d;H^9zFs->3g$=qq z@{1&ZEb0aVcx7rBXZ0Os(Xg*M+* zmH4z@lE61QI!luME;Ev1z<813a(WzK^b0`_OSGs*9%=GA!SSdSyU;^xhTyj9aF_UjT*G7^KEF?=((J8)Ykg`j=aP2@uJK`b${->S= zz-ptc6tK0NZf8Roj6?~Clb!;LzAvGc9s8o=P+m)Xv;(gTbHcIBG$gIz*jRbn6Qh?5p;Q` z=-^z(J}u>vSsk$JhL@I&>32pk!Ph8ltHyUmMB3vYjTV?my~O`XpqS*K5htayX-6p& zmaD#WNSPO>IoKmj@q_G2!w)w{%!4Kg{{}>se=6ucQHE7$BE8R0XlA6Fs`b(atKw~`i;iCE}T(4k^u$BRhclF!bTu zR`!~^k7m}0Hf(rgR2uAS3=Aa8#w_ zC1a1NYfhBq<~ACdlq>#)tW;$=$e)2&npzWLOS2XT-UDsD_-dJWANM>&zOzV9%{BDh z!(iGI>X_i;T2bqD>!0l6e!N_3`Yow|{n^)S{<>lQSKcQ%WXFJDTNc!qidx53OQ@5T zSvvc%5f>^EbuNtY3!|j2{(w0rl{jkaV!-64eQe5$e!(NwvRtJnZ#BRrLjwrzrkw;G z>~sfmAKS}g!~yI(pij|y!m7miI;=r$usQJ6kHKiujUprGzm8jBZo19^@mGmbFP)^;O{gznl{ZG~8~2uy~#Jpl2=p}kAJ=%MZ;ty%DP z)_aXV;OK5Vy)?k3q=xyzSx5)eL;X~mrr949&+JyXpywJTKyOPrckq3@p-|i&XAohJ zJ5YhP-+DUWd_9A3sXZM|>|!@ZRDdDQoxnf?k7hJ=P&|4BDMNaK@QD&fyum-DZOKdb z*sUXIBG7)n#~&mej&y^~d-B`-_08dqEppBa!A%&5q>dx%o~3AKCC95gjy)$<6Z4G< z*(O5-I@zgX#rN|Zpp3aq%6L;H8ro0 zaqCf@79^h_B)b-w*#XG=m^uMCvM`aZi(vQq&G}}h&D-fKW0w71G2c=bz&Y^1p#hy; zaQ{dj(&dn&iQI@|_~mi1a)9KDNgCmIJ_pLvk>|Ti(r~~|6G3KZx|WVzupgKBjS?dy zp(|69fMW^DR=?umac9nUl`3O#j0yLLe000SDZokRQJ%K&1Zk6e)yP)mjrJfzs%gAS zy1GtNN(18QE_cn?Cg6IF{O4ba2a|b3295OIy*{`6_S3cvWJKQ@1Qlu#*OFuvMwRQK zubQr;T-pU^fRazMx=HU6P}hv#`W(H&+jz@CY2N?>A)d)=bwJN&Dc01SPiw-p95zft z0DqN($mVZNZJDLj)Qv;C0{|aa2~lR%&~YFjvB~oe*Hsv^IguQmNHGmJbSLqv^BIx* z*%Q?lqokq-?D)g1ELC*iIl4&?TGHbHV0irNu(H$FkngDzy+p6cMBuaYQ79oKR}G9C#14vw@M_|K2(h}p%ppaYzk_V zRsXw6COQMSy(`|f#pw4GW+4@#JlZzJR4E+RjuZ2d>91nPSGleM)d&HKUrvnA!r12i zo5v?#yGYhrN?|q~1$_{uKw3w1e&=0!;P%}URYJ1sHsMAXNxT)KLcAoJ@GmXnxJ}NP z@ebnDb13&4BiE!otZ%oNzV~!*yH_}M878q{4kh#q)(AhdC6BbeTXB~>T3wL5vC?kH zQdjanm##r5qeL$_^~=bdqm0>rB~^0?X6Kjg0>f;@y-A7sRNsrNDl(Lf`U-Y_vxE~d zg~M~B$#TZS9vqrO;vxYuj~r z5xx0$?IyZ(KckIoU4xW=>43VyLzsGH`h_3{?K+<8lfXbi>SB^U~`1xKb#5#{D~+r z+$uqkHsw9zWpMMNF>U4X&xCgft~Gx9U|+Gc50%6--B0}z9UKr_j{`~1$5S^L1=|j? zR&NFD_+}`pREO;??g5`Y1XRAhVWr7C$H<1! zQSSA+;gWGGfZBo|bmZ;A>s{AG+n5dC0Fp1NT#*_G z0xqd?wdg70%5Be4TIq57S0-M6qt+YkKQTarMbBZ-*u?H_rm3rzu346s%s8OOoMHA% zSxKL+Bu|O>SCCglO|j#uU|xaiEHOgC(Oa;pVd}sBYLd3XFrBV!a4;GBbC3hUTrVi7 z<+mh5l7K%ORGukklPjWlEu~-l@0Pa9Mn~E}8l3>;!a;TOgmDFpP7!U-nIN#}nFO%m z(=~|Q7G0ho%mX!Pl_(yRO_+7q#kul_Mt(>w zB*I_8vSCT;gda>tG6QXr`Ck|lp{teNaM)UX&@kVDWqR#S;WbY=+s;ckXA*2&fuM~) znH_Z^UTWHA=m4N$EgZ(aDr0DIRpATU>eW_o5|0|9qqfSvCfhC<+~m2VIt&t0 z&z)8hy+fl$!HyNXF}Cqz5IYd-ldt6Kul8pO5w@`8MJUkuO+owI{V+lI-bcHe7?2{h zX)v(aAe)#|JDw!BW*S?0r%eAunIQZ4eaMjexvDLvN?f9H=jkx1@%psb*S)w#r2}Ct zk`$u^0x*r+kzsFlk6%4^a>ZXB{T!$1-f$3z>_b8jsw+jy@|Y2A-qRrWn!F2lbjYwG zh8TbHipEh2HAy(+!!O#}FM_wI4Vb|ls>nH2MVS$sd{I0W?Bk#i<;a-5nRP%Wj4?82 zd2SV!qB!JjhJ9vY*wt)hQD;=Ge&)vI-DgMG>2Ww~6^8xg6J+JZT3A_|7cyXE_Q#%O zh+}dGFa~b&^qjzz+?O-!J8o6?_4tQVivFg}fai)FCIwlI)AF+`!xoEMi{I~i$XMF{$ap_^sE~lwyfiPM4=V5RD8!#TE6yXYYk4e zq?5L*3in@jw%BEmrECTY15F!Lqa@lawBm{|K#iSHhf-6>GOlt44g6-*5YZJUOH#$x zl?MFuy332PpCOb-AQ=Qr3UYaS zK8FfHcPU?%)w;eKA`>KAEdFlQecI$*7!HQMML|PtyPil-%Q$9l|6F`>854OBq%~^* zjWR2mXi7Si9s-_80FT77bP{*X>I%I3PM^V8Bo4nDl6x|lEIUXMjJMi_Wpe4S2Ez>5 z5`N)2`e&O9c~cF)=9aYw#6TB9A?&%OM&toKgUp52?`H3m2Moh}F5hX(76%il!nf33 zH}zz{j@LPm9u!=`_i&MMz3^wgn;mYpygG{M*X_svamD!gz&iYHjZR={{L&Jj2pw`o zT(Na0pH;bvngWyjh9#2+2263S_M^qgwJgswuw$d?Z#i1YgU;U-;}hGV@eC=>3t0c7 zIY?n++Dkv~S|HIn&Ijd~=`gwj!D&Qmm=(p_bkj@)5>!NmJ(Ag$Fv>dxOjHi1(9HHh zR$Pf~Z`8$xK`nR^e?A$lEEwhq4-o;x;$do1!qX7BV~6dypz3GXNgJqy$+-TXu;LFm z#O;j@tDq1b{OLY#gz2jtcYGA~$>tKrQ9?W0w@+D4P9WcTT0!ffozXCax+QXnj8gu0 z2%turo}A#Jd>5-f0#{%kt*P`NhRpP)=5>t9+Jz)0Sg-hDRPaw?NSYG|@6oxXu}uxC?1M1OTzaJ-O2l3BJp zAbzjdg_!C4+VLxR{+Vt7gHxJ=!W}`+u~l9MOd>AEkMeOrIj4pCJaWJuVm(3B zTGi@&$s23&r`3IN*z|&|+%XfnTwZkzJTcOC$H%RpTuLkA$WPMstX`~Gfo1KvQ$}mA z%tP`#v}v4UF!@AL!N4b#8cBQ_xb&TFryUc8%ObJHgQ8RlA8*|8`-dC>}t#8|Zd-1#k7jJYaPNT>3Ok})r~^WP-nHDBnUx?B_Z z;;6>*rPYwI!D7-^?j78nC%>buuF|)xK^V1Js-ce>v3S`Jg#`xF0&C{6)=YNs#d3*w z_AwJ*bpjK_dJ6?G6+(J{$Hfx&GP{4^L>Rc4OPo`MnGb-^t4=_r{(Zo1g6IEXEVSe~ z8uDxz7nY_+wIoKB=QYfY1NO5I2!D@oP8&8ILwkIc$MbGax$`*h^@6%A9Tdl9ZXn9sf~@#>TGLkzD=9ibb<~0ptLyDyF*fx1d%{Og!`Dn8!%%GqY7U)UQ zm2PzYqR#l8!$-Nm)$?9ldNEPZFLv8nM3olf$=PuUt4#mFqHc|3G)8=@X@^%q1UD4M ztBT@(RG_#R`?pMpr-#yI#wk?;>*-c`Cy>b%h&6Rj#WZa(rD*OJV7&f$8f_A$k78i} zNU3X0dJIPR;QcOXXlB4`5Ci;_b)%wmd|!yYxd~aU2ke$ydMTNjmh%5l&W7%Zza6)1 zgOl$QO6>Nu<#)v;Xv?W|iyWt)C^7B)=6HmKX1%&B|>xzw>;u_7Sy_@i#lhkat7k(6$BIDn#k%~4k