From 9067cab86b79ff3a4deb5099624851e55253488f Mon Sep 17 00:00:00 2001 From: Brant Burnett Date: Sun, 22 Dec 2024 16:36:42 -0500 Subject: [PATCH] Add a documentation site (#120) --- .github/workflows/pages.yml | 56 ++++++++++++++++ .gitignore | 1 + COPYING.txt | 2 +- README.md | 99 +-------------------------- Snappier/Snappier.csproj | 3 +- docfx.json | 54 +++++++++++++++ docs/block.md | 129 ++++++++++++++++++++++++++++++++++++ docs/getting-started.md | 15 +++++ docs/stream.md | 49 ++++++++++++++ docs/toc.yml | 6 ++ images/icon-48.png | Bin 0 -> 6379 bytes icon.png => images/icon.png | Bin index.md | 58 ++++++++++++++++ toc.yml | 8 +++ 14 files changed, 381 insertions(+), 99 deletions(-) create mode 100644 .github/workflows/pages.yml create mode 100644 docfx.json create mode 100644 docs/block.md create mode 100644 docs/getting-started.md create mode 100644 docs/stream.md create mode 100644 docs/toc.yml create mode 100644 images/icon-48.png rename icon.png => images/icon.png (100%) create mode 100644 index.md create mode 100644 toc.yml diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml new file mode 100644 index 0000000..4f8a68e --- /dev/null +++ b/.github/workflows/pages.yml @@ -0,0 +1,56 @@ +name: GitHub Pages + +on: + push: + branches: + - main + +# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. +# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. +concurrency: + group: pages + cancel-in-progress: false + +jobs: + build-docs: + name: Build Documentation + + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Setup .NET + uses: actions/setup-dotnet@v4 + with: + dotnet-version: 9.0.x + + - name: Install docfX + run: dotnet tool update -g docfx + - name: Build documentation + run: docfx docfx.json + + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + with: + path: 'artifacts/_site' + + publish-docs: + name: Publish Documentation + needs: build-docs + + # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages + permissions: + actions: read + pages: write + id-token: write + + # Deploy to the github-pages environment + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + + runs-on: ubuntu-latest + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4 \ No newline at end of file diff --git a/.gitignore b/.gitignore index 8fec8cd..645b2fb 100644 --- a/.gitignore +++ b/.gitignore @@ -24,3 +24,4 @@ BenchmarkDotNet.Artifacts/ test-results/ TestResults/ .DS_Store +/api/ \ No newline at end of file diff --git a/COPYING.txt b/COPYING.txt index f9c2870..9a87994 100644 --- a/COPYING.txt +++ b/COPYING.txt @@ -1,4 +1,4 @@ -Copyright 2011-2020, Google, Inc. and Snappier Authors +Copyright 2011-2024, Google, Inc. and Snappier Authors. All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/README.md b/README.md index 1995f6c..28f5488 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,8 @@ Snappier is a pure C# port of Google's [Snappy](https://github.com/google/snappy) compression algorithm. It is designed with speed as the primary goal, rather than compression ratio, and is ideal for compressing network traffic. Please see [the Snappy README file](https://github.com/google/snappy/blob/master/README.md) for more details on Snappy. +Complete documentation is available at (https://brantburnett.github.io/Snappier/). + ## Project Goals The Snappier project aims to meet the following needs of the .NET community. @@ -31,44 +33,6 @@ or dotnet add package Snappier ``` -## Block compression/decompression using a buffer you already own - -```cs -using Snappier; - -public class Program -{ - private static byte[] Data = {0, 1, 2}; // Wherever you get the data from - - public static void Main() - { - // This option assumes that you are managing buffers yourself in an efficient way. - // In this example, we're using heap allocated byte arrays, however in most cases - // you would get these buffers from a buffer pool like ArrayPool or MemoryPool. - - // If the output buffer is too small, an ArgumentException is thrown. This will not - // occur in this example because a sufficient buffer is always allocated via - // Snappy.GetMaxCompressedLength or Snappy.GetUncompressedLength. There are TryCompress - // and TryDecompress overloads that return false if the output buffer is too small - // rather than throwing an exception. - - // Compression - byte[] buffer = new byte[Snappy.GetMaxCompressedLength(Data)]; - int compressedLength = Snappy.Compress(Data, buffer); - Span compressed = buffer.AsSpan(0, compressedLength); - - // Decompression - byte[] outputBuffer = new byte[Snappy.GetUncompressedLength(compressed)]; - int decompressedLength = Snappy.Decompress(compressed, outputBuffer); - - for (var i = 0; i < decompressedLength; i++) - { - // Do something with the data - } - } -} -``` - ## Block compression/decompression using a memory pool buffer ```cs @@ -97,65 +61,6 @@ public class Program } ``` -## Block compression/decompression using a buffer writter - -```cs -using Snappier; -using System.Buffers; - -public class Program -{ - private static byte[] Data = {0, 1, 2}; // Wherever you get the data from - - public static void Main() - { - // This option uses `IBufferWriter`. In .NET 6 you can get a simple - // implementation such as `ArrayBufferWriter` but it may also be a `PipeWriter` - // or any other more advanced implementation of `IBufferWriter`. - - // These overloads also accept a `ReadOnlySequence` which allows the source data - // to be made up of buffer segments rather than one large buffer. However, segment size - // may be a factor in performance. For compression, segments that are some multiple of - // 64KB are recommended. For decompression, simply avoid small segments. - - // Compression - var compressedBufferWriter = new ArrayBufferWriter(); - Snappy.Compress(new ReadOnlySequence(Data), compressedBufferWriter); - var compressedData = compressedBufferWriter.WrittenMemory; - - // Decompression - var decompressedBufferWriter = new ArrayBufferWriter(); - Snappy.Decompress(new ReadOnlySequence(compressedData), decompressedBufferWriter); - var decompressedData = decompressedBufferWriter.WrittenMemory; - - // Do something with the data - } -} -``` - -## Block compression/decompression using heap allocated byte[] - -```cs -using Snappier; - -public class Program -{ - private static byte[] Data = {0, 1, 2}; // Wherever you get the data from - - public static void Main() - { - // This is generally the least efficient option, - // but in some cases may be the simplest to implement. - - // Compression - byte[] compressed = Snappy.CompressToArray(Data); - - // Decompression - byte[] decompressed = Snappy.DecompressToArray(compressed); - } -} -``` - ## Stream compression/decompression Compressing or decompressing a stream follows the same paradigm as other compression streams in .NET. `SnappyStream` wraps an inner stream. If decompressing you read from the `SnappyStream`, if compressing you write to the `SnappyStream` diff --git a/Snappier/Snappier.csproj b/Snappier/Snappier.csproj index 298cbdf..1503edb 100644 --- a/Snappier/Snappier.csproj +++ b/Snappier/Snappier.csproj @@ -26,6 +26,7 @@ BSD-3-Clause README.md icon.png + https://brantburnett.github.io/Snappier/ true true true @@ -44,7 +45,7 @@ - + diff --git a/docfx.json b/docfx.json new file mode 100644 index 0000000..e38746a --- /dev/null +++ b/docfx.json @@ -0,0 +1,54 @@ +{ + "$schema": "https://raw.githubusercontent.com/dotnet/docfx/main/schemas/docfx.schema.json", + "metadata": [ + { + "src": [ + { + "src": "./Snappier", + "files": [ + "**/*.csproj" + ] + } + ], + "dest": "api", + "properties": { + "TargetFramework": "net8.0" + } + } + ], + "build": { + "content": [ + { + "files": [ + "**/*.{md,yml}" + ], + "exclude": [ + "_site/**", + "artifacts/**", + "**/BenchmarkDotNet.Artifacts/**" + ] + } + ], + "resource": [ + { + "files": [ + "images/**" + ] + } + ], + "output": "artifacts/_site", + "template": [ + "default", + "material/material" + ], + "globalMetadata": { + "_appName": "Snappier", + "_appTitle": "Snappier", + "_appLogoPath": "images/icon-48.png", + "_disableContribution": true, + "_enableSearch": true, + "pdf": false + }, + "postProcessors": ["ExtractSearchIndex"] + } +} \ No newline at end of file diff --git a/docs/block.md b/docs/block.md new file mode 100644 index 0000000..a4fd12d --- /dev/null +++ b/docs/block.md @@ -0,0 +1,129 @@ +# Block Compression + +Block compression is ideal for data up to 64KB, though it may be used for data of any size. It does not include any stream +framing or CRC validation. It also doesn't automatically revert to uncompressed data in the event of data size growth. + +## Block compression/decompression using a buffer you already own + +```cs +using Snappier; + +public class Program +{ + private static byte[] Data = {0, 1, 2}; // Wherever you get the data from + + public static void Main() + { + // This option assumes that you are managing buffers yourself in an efficient way. + // In this example, we're using heap allocated byte arrays, however in most cases + // you would get these buffers from a buffer pool like ArrayPool or MemoryPool. + + // If the output buffer is too small, an ArgumentException is thrown. This will not + // occur in this example because a sufficient buffer is always allocated via + // Snappy.GetMaxCompressedLength or Snappy.GetUncompressedLength. There are TryCompress + // and TryDecompress overloads that return false if the output buffer is too small + // rather than throwing an exception. + + // Compression + byte[] buffer = new byte[Snappy.GetMaxCompressedLength(Data)]; + int compressedLength = Snappy.Compress(Data, buffer); + Span compressed = buffer.AsSpan(0, compressedLength); + + // Decompression + byte[] outputBuffer = new byte[Snappy.GetUncompressedLength(compressed)]; + int decompressedLength = Snappy.Decompress(compressed, outputBuffer); + + for (var i = 0; i < decompressedLength; i++) + { + // Do something with the data + } + } +} +``` + +## Block compression/decompression using a memory pool buffer + +```cs +using Snappier; + +public class Program +{ + private static byte[] Data = {0, 1, 2}; // Wherever you get the data from + + public static void Main() + { + // This option uses `MemoryPool.Shared`. However, if you fail to + // dispose of the returned buffers correctly it can result in inefficient garbage collection. + // It is important to either call .Dispose() or use a using statement. + + // Compression + using (IMemoryOwner compressed = Snappy.CompressToMemory(Data)) + { + // Decompression + using (IMemoryOwner decompressed = Snappy.DecompressToMemory(compressed.Memory.Span)) + { + // Do something with the data + } + } + } +} +``` + +## Block compression/decompression using a buffer writter + +```cs +using Snappier; +using System.Buffers; + +public class Program +{ + private static byte[] Data = {0, 1, 2}; // Wherever you get the data from + + public static void Main() + { + // This option uses `IBufferWriter`. In .NET 6 you can get a simple + // implementation such as `ArrayBufferWriter` but it may also be a `PipeWriter` + // or any other more advanced implementation of `IBufferWriter`. + + // These overloads also accept a `ReadOnlySequence` which allows the source data + // to be made up of buffer segments rather than one large buffer. However, segment size + // may be a factor in performance. For compression, segments that are some multiple of + // 64KB are recommended. For decompression, simply avoid small segments. + + // Compression + var compressedBufferWriter = new ArrayBufferWriter(); + Snappy.Compress(new ReadOnlySequence(Data), compressedBufferWriter); + var compressedData = compressedBufferWriter.WrittenMemory; + + // Decompression + var decompressedBufferWriter = new ArrayBufferWriter(); + Snappy.Decompress(new ReadOnlySequence(compressedData), decompressedBufferWriter); + var decompressedData = decompressedBufferWriter.WrittenMemory; + + // Do something with the data + } +} +``` + +## Block compression/decompression using heap allocated byte[] + +```cs +using Snappier; + +public class Program +{ + private static byte[] Data = {0, 1, 2}; // Wherever you get the data from + + public static void Main() + { + // This is generally the least efficient option, + // but in some cases may be the simplest to implement. + + // Compression + byte[] compressed = Snappy.CompressToArray(Data); + + // Decompression + byte[] decompressed = Snappy.DecompressToArray(compressed); + } +} +``` \ No newline at end of file diff --git a/docs/getting-started.md b/docs/getting-started.md new file mode 100644 index 0000000..3be18ca --- /dev/null +++ b/docs/getting-started.md @@ -0,0 +1,15 @@ +# Getting Started + +## Installing + +Simply add a NuGet package reference to the latest version of Snappier. + +```xml + +``` + +or + +```sh +dotnet add package Snappier +``` diff --git a/docs/stream.md b/docs/stream.md new file mode 100644 index 0000000..a8213c0 --- /dev/null +++ b/docs/stream.md @@ -0,0 +1,49 @@ +# Stream Compression + +Stream compression reads or writes the [Snappy framing format](https://github.com/google/snappy/blob/master/framing_format.txt) designed for streaming. +It is ideal for data being sent over a network stream, and includes additional framing data and CRC validation. +It also recognizes when an individual block in the stream compresses poorly and will include it in uncompressed form. + +## Stream compression/decompression + +Compressing or decompressing a stream follows the same paradigm as other compression streams in .NET. `SnappyStream` wraps an inner stream. If decompressing you read from the `SnappyStream`, if compressing you write to the `SnappyStream` + +```cs +using System.IO; +using System.IO.Compression; +using Snappier; + +public class Program +{ + public static async Task Main() + { + using var fileStream = File.OpenRead("somefile.txt"); + + // First, compression + using var compressed = new MemoryStream(); + + using (var compressor = new SnappyStream(compressed, CompressionMode.Compress, leaveOpen: true)) + { + await fileStream.CopyToAsync(compressor); + + // Disposing the compressor also flushes the buffers to the inner stream + // We pass true to the constructor above so that it doesn't close/dispose the inner stream + // Alternatively, we could call compressor.Flush() + } + + // Then, decompression + + compressed.Position = 0; // Reset to beginning of the stream so we can read + using var decompressor = new SnappyStream(compressed, CompressionMode.Decompress); + + var buffer = new byte[65536]; + var bytesRead = decompressor.Read(buffer, 0, buffer.Length); + while (bytesRead > 0) + { + // Do something with the data + + bytesRead = decompressor.Read(buffer, 0, buffer.Length) + } + } +} +``` \ No newline at end of file diff --git a/docs/toc.yml b/docs/toc.yml new file mode 100644 index 0000000..2cc1ff7 --- /dev/null +++ b/docs/toc.yml @@ -0,0 +1,6 @@ +- name: Getting Started + href: getting-started.md +- name: Block Compression + href: block.md +- name: Stream Compression + href: stream.md diff --git a/images/icon-48.png b/images/icon-48.png new file mode 100644 index 0000000000000000000000000000000000000000..addb001405ffea1f1527d3ab1c29606e47db2dfd GIT binary patch literal 6379 zcmVD;P)Px#1ZP1_K>z@;j|==^1poj532;bRa{vGf5&!@T5&_cPe*6Fc7=TGcK~!i%MVe`N zT*a05e^qtw?L{rMc3F#ejCZ`@4X}g35U|OBAr6=jhb$yyLSixxOcEv`4llzSvhmJK zfXNUN2ni4%ChUs=8*DH(2JgFM%a&|OmMp2YwA6iXRn3QLlMh#|zR&Gjb?Th+Kg<7! zIsbCIwMrtD#8VOrU=5yoipasON61T+StBe$+K zP5>Ptt+jaI)Gj4LNR(6seF%i~0T5Dw6<`eV>;{a*3W*UO2q(M&tPogn_ER{ar9?=D z77}d*T1QR=(w*s9tr%kgft5bmy#y)TGoY{*+=+oS76}3iD3iy>BC%jKN=PsoVKqWn zj4^2IgmRY?(E{3Pv;cgMSW%3Ix^m`BAH)2a;}|`%l0+&Fz64J?TlND~EP)rq5z0d- z1)c| zP{iJ!{*m6EL7sZzW;XTt+?|clR~`@_93nY9?94+-v@a>L9=|S)sF#D>^Za%)XZkUD zr%555A2r5BFd43yVb_ld7M1(@X1FTY5Um=CH2x}ca3umji!*(cR4OoM= z1_F<|`bzFxy_C5VN0RvDRf;!1ifrD;^xZGB>U&G*|L!vODku|%st+4c|JXuRT?KbF z`wSYNaa{vEIypt<$a-$+9KkhbYWbJ`I#$VA+VE*pNq*X>nKym}^%W%;3)&b5uht?8 z@x~yCBI{7teioqq=_uMdoY%&p0qJ{8nb^RZo0d{tmZE1cV&~8Ha_qSz2i~mW-MQao z@AXSKp)FIj#XP-@S&!e%?bC*6?#^?fUy$#}GdYM+T$I4%@+|4t#|`a!SlRe7b9d|} z*Kde=4WDEb*`bh$4dn=>^EYWb-h$Ux#G&8b#Z&X9Gi+dHq-pxg25vQfh-mFo#CwOiWsKp-sU8lu_oBlHYi%KZ z35<|v1l9#JgRugOMO%Tf7Ha?mQb`iY7%P@7;PS;Y3B!=n`}Xq0PoLtxi`!Vbax8l{ z{E|n;jU#3R=k^bC_`#>Szvi!8R(~8%8TK`Hl8!4%kG12S9HjqXAJ;$E#iHLev*@7{ zEPmt^drtO|Os1&L`^eTDBRfx%NXF<3<4hV|iHgNALOK8`Y#cx-i4;P*8wL~>X$+VM z6qKZsd}ZYlrcD~fa4t{d_Feq-|F37wMZ+}h33%`JJ9z#~jOsvA)pCaZhaTeb=`V1u z>_B&gJo$DhAMHIukPNsY*@12AC7tQx=?i+;aD(E#8$C9xvP`Qj;+d^EHf`=9d9;Ju zrwH1!mc6G0KfdOC7EK>T5Q`zDvkY8PfiNMee&YPKNa;{gxOgWdl$RHA)Ag59QC>nm zpJ(H%TiE&RChi>B$&P3mfBX3l*|KaPmWWEEg$t=z-4zWr2N!6%J;PJ ztLvk{t*8+27R5k>fp_Io-{ix-9p8qmDVV>iET*ZLHhUPWZyh zd~U@8%2NS#6-nOT(~6E<>Mamp4dT4zw^^wqfv_m)((3c)P2!?OGZ9!`eQYDi4V(E| zJi}jBt>%LjSJUxnhWab25KnL7XM>M(sc1pv3_A`evP*7a!QyVb%ueX-A^NbBm;aXF zj+tv1JAWcK%#Jaw&L`8`$Kaqw`ihG3Btfc(EsZ&zc>grVyK_`L_dM~w4wl|^BX{4v z7(v8$9(a|WzAW0fU?zn{)l68h7U5D>gg_|C;?K?x+8;m$)YE;ZLhq^WJ4w z(fvU$l@}yPJ@p3vulq3;WZEEW*mAH8b>k}LPd|X{+KCefp)(p!>IkIgDkK9^Yhr=Na4tvc)Dgz(h=*>unLQUSW$>*|N*1N5d-4??HqTJs zF+hmmjgA_o|M&}xH*a9nWXKK!0uw>&$vWQIF1h^N3g#U@#Ev7o`Do;54vjj8Y(+T) zK6Yq;MAI>95AWi~x7PE;nFBO`X*qY^@irpU>*_Bfh9Mt85$;& z8KhNvyxuB!_tur{op&h%FL#ibnqum@XLzjlX+|6$WVlbVu6qm@-LsVOLoc95Ekn0; z!C(%Z^Vzwsl(#lkas5q8x#YT8EdSTpESZufk<9F zs8d6XZ`#b-ThB#VQ>gQtpK2W;ZHTZ2YqcwAFxp*Vp8Vhd8=L#tw!aI1`e+VKUclf> zZNw)gnfvzRJkqs}qSh?EeUjIMDJ=i@X;hwig=pj@=ug(8I(9OcFXffrB{`BA%{LcK zC!JH|k-WYAB#&QvCHwwzH^p~d!{nb{Lu}RrO5S}BFC3=l(H4$%HIN^uq~%bOj{7$w zU1o!}E*hK2AhboA$W@1~v@uw1h-`$IJC)<7dWd@a*;+ZB{(qdpA78{JoBqW89j_C$ zYEBI(-mRX>@^2QA>e!AMe>ry3KakmDboSNr+-gr{iefa&Q3lK6#wCA3VygCl0fx+k(lXttGMsYYKUq z0EtBjkXpOKTx0O(PDdoFN$ftrsn0E@`_BhZ=Tva{Cx7M#UGHP3mUANX*)!vOu3g?o z(A|m|b2;IQFCs;rBTe=E@&67pr+zd`JRh029Le-@->0W|>JLAmY3>we`*87@LH=;x zqujjo_uTpQ5{6OS^5e@8D=*}Q(5GF*`Q4*;@%AHkp?wc+ERg{rq$~0ZrKS-AsjUmo zi5cf2bqxFN2MphN8=cQI5gV0e>8>YP-MX31;v_rQ^>b$76uvO?2vSFc^%r2CdlFS) z`DFV@o_$PlZAA^|S%p;rA9i$eZ#Kc}f4YnA5oJsaBt=_W_}lO9<|ntk!lD^z9(y8U zV&{ddS<%Kl|E{^Tg;KI7H`=kE^XM4IM_t4yBPEI(}-%xJFi#p+Ut^Q zDobhbeey}4zn?hGz0;@j&OKje$n&VMhBHrpMC+zIxbHs>QCXDcq5FJRmrv)a+MS5g zEo2Y1lX`7EXLg;S>zMqv5@`-+G($x`zdzc? z!`EHSCpTV&MNpd$8Tie5#_D@n^Zh=~oDO*Q=LznaRl;<+6Z%6=eVpXzPxg>GnV~cl z5;Qka^8Rbgyl^}bhR9lux1Vt}vNcGn@oeb2C`Ky6@@l#(Cz9LNOm)7KZ|r-Xz8;Ux zcFWA`>X@6^i9FdwbRqsI9W_=6(-ep8@?ZU4~bL+k*80;A4L@vqsSC=xWZ8x$z zgU*1qF$Stf@azi#2TqrBj)e7n!+d^>&oey{PyOf?8YfRA2n3@#`Wb%U5td#13=7YP zEw3jjU0=@snb}RMr<1`xpAY|A68q5OfKIXY>YLep-w((fA0Rw2%)QUuMLgyaY0HCq zvY05s7RpOkp@EDd7#cB-;R7w`kriCF|7GmyURt6e7F-@sb?6AHB1t|2(U4EJvWCCB z>hb)*XeL{at%DKQj7;)SUyOHtbSo!G;|~tgaI_cu!~=ZcTW>M3LGjj~%b2|@%{S|h z<7Ecv?keJg2PI>=OL#dG@Qc;oqVLu(VRp7L`S0)X>os#JE=w`2Ek9e|#nw+-@$*@i zOk0aAl#iUxgm-^c!-NvZ>P4zXWNry+|UznMf?Q9KvQ=G zyPxzJrHgoIFv(xPdn^6dU4+_jkdJeZ0dh6o-=X@jj1ispG9BD;VxKG_>w! z;*el(Cd0`)zeab{8Oly}VkV5Gs_8e(SBN=wzFn}<WBg2y$3N73ewk0l{&pVk7)^Y1lKbAP<=x9~L=WYeG*!@j^<^~Ic${AQ zZ8AICC~s`R)YVYlbUzK}H)FaJeDYs4{J2MRZqFGIl6Uu4(6u4J4<+~55$yl!cTi($ zsC{u0_b*9t@g?K17`C+xbIXG}XguzUnwpL4Pzi}fVUU0IGl7p#hc4@U^OT zHXM#|;{u)NZ{UaHPVw7KWo(>s2})}!XH=7|tEYbNPI|v~4XqoFQgPx4J$2(L zJ@5eOQJutMRTO{hu{J)4?F%WbR=nR{f;y3+-$j86$>Y1`N~(O;s=tPk^JGqUS8kQ zf-xcf-j69c@Bv<3IjQ#Jp2)HD zjACMg=I@6Si0&8%^NM%k)kNt^L?BpkWt!zH&IM`djXc(_YvH2HGgaJ$qprH z)SCUm5GG1UrQxeTn9uZu^=NB3mXZADy5lsrXCTaydi(Drds^_T)5NPPiKWxTk_m*8 z7=&x|>Ih+715V7CzuG2kjv6z7Ncr64o#fa1Dj0uZ17|w(WMgrTRgWgvx`*snZzT29 zQ)Di`5PxeE!Jt8ujiCCxb69!RSW1Ejiy+mV;nRm-qN`ScpkCK;Sd^{5meZa0FFJ*X1(>K22cp zq@-iWA9A=#nE0;x=x+2 zh?#S4uoY33URgva!7~vhHeyQeAe)jEWX7LI;`kv) zV`rforh$|Yi&J;r7-mgMQ<73hCCT(_wjb=KwYd}PDUcz-2XB#RZ$ykaOG=RO zxI-IHB0W{$&^*@*5}*rRY8T85V$SUAZ6p;|c}PG-dE!yXpcf}fm146)C_+~}4EG~+ zgpMKzBM9@DFv8{{OqfTfYl)^TLL}1U{RBE5Kq7{b0%anki4Y2Gm?g2{1^nC@RA~vZ zbTL6H3GpP>3sAm?@?5m_JoOpdX#peDXIjOCh#7Nkut*OngllCxqSVniMqsUL;fW~3 zXpJz=EQmsgG*K90^BOZejLqjkc!c9-lN&h&T~dlo#6U?r3?kc$YT8Tez{jYBK$Vx^ zrBcL_NxVc1f&eRh$L?E=6ar65kd_FEC~TfW2T@3gm~sA%)(VMNXl_LX8i@koAqsRW zIFdn~<%qQgZ3rWc4nyY?xnamh5JhMNQDr?kT?q*M?sjB%8`77EbP=*7MJ!c}iU+88 z4C#4T<$;uLazF@_l&C@#uMrq4T|FS&BtV2f%$R$VHCUvOZf?O^v~cZqfpko;H5loL zY)^nP+O;~2As;$HWE4RdLOzcTBXpR@YK_%}HlU{NjHC? z6jHj5gKHlaOb4(+AzUVek;0LD)92i10jCZrk-`mgP{_CK*#QmVNNQ;`N;nM}EIAEQ z8>9{~Vd#QP@2GkZvMJ0vXtn{#0lyuhMu;!oSo-Ala3yHxX-S~_}7l<=cAhEUm=XZp` z3gbp4ute4|_ts6mAeBJ)65%OSEI`HFDXT;RzbNU