+
+
+
+
+
+
diff --git a/packages/browser-integration-tests/suites/replay/compressionDisabled/test.ts b/packages/browser-integration-tests/suites/replay/compressionDisabled/test.ts
new file mode 100644
index 000000000000..31ad7fc22991
--- /dev/null
+++ b/packages/browser-integration-tests/suites/replay/compressionDisabled/test.ts
@@ -0,0 +1,52 @@
+import { expect } from '@playwright/test';
+
+import { sentryTest } from '../../../utils/fixtures';
+import { getExpectedReplayEvent } from '../../../utils/replayEventTemplates';
+import {
+ getFullRecordingSnapshots,
+ getReplayEvent,
+ replayEnvelopeIsCompressed,
+ shouldSkipReplayTest,
+ waitForReplayRequest,
+} from '../../../utils/replayHelpers';
+
+sentryTest(
+ 'replay recording should allow to disable compression',
+ async ({ getLocalTestPath, page, forceFlushReplay }) => {
+ if (shouldSkipReplayTest()) {
+ sentryTest.skip();
+ }
+
+ const reqPromise0 = waitForReplayRequest(page, 0);
+
+ await page.route('https://dsn.ingest.sentry.io/**/*', route => {
+ return route.fulfill({
+ status: 200,
+ contentType: 'application/json',
+ body: JSON.stringify({ id: 'test-id' }),
+ });
+ });
+
+ const url = await getLocalTestPath({ testDir: __dirname });
+
+ await page.goto(url);
+ await forceFlushReplay();
+
+ const req0 = await reqPromise0;
+
+ const replayEvent0 = getReplayEvent(req0);
+ expect(replayEvent0).toEqual(getExpectedReplayEvent());
+
+ expect(replayEnvelopeIsCompressed(req0)).toEqual(false);
+
+ const snapshots = getFullRecordingSnapshots(req0);
+ expect(snapshots.length).toEqual(1);
+
+ const stringifiedSnapshot = JSON.stringify(snapshots[0]);
+ expect(stringifiedSnapshot).toContain('"tagName":"body"');
+ expect(stringifiedSnapshot).toContain('"tagName":"html"');
+ expect(stringifiedSnapshot).toContain('"tagName":"button"');
+ expect(stringifiedSnapshot).toContain('"textContent":"*** ***"');
+ expect(stringifiedSnapshot).toContain('"id":"go-background"');
+ },
+);
diff --git a/packages/browser-integration-tests/suites/replay/compressionWorkerUrl/init.js b/packages/browser-integration-tests/suites/replay/compressionWorkerUrl/init.js
new file mode 100644
index 000000000000..0d0e7ac0e71e
--- /dev/null
+++ b/packages/browser-integration-tests/suites/replay/compressionWorkerUrl/init.js
@@ -0,0 +1,19 @@
+import * as Sentry from '@sentry/browser';
+
+window.Sentry = Sentry;
+window.Replay = new Sentry.Replay({
+ flushMinDelay: 200,
+ flushMaxDelay: 200,
+ minReplayDuration: 0,
+ useCompression: true,
+ workerUrl: `${window.location.origin}/my-test-worker.js`,
+});
+
+Sentry.init({
+ dsn: 'https://public@dsn.ingest.sentry.io/1337',
+ sampleRate: 0,
+ replaysSessionSampleRate: 1.0,
+ replaysOnErrorSampleRate: 0.0,
+
+ integrations: [window.Replay],
+});
diff --git a/packages/browser-integration-tests/suites/replay/compressionWorkerUrl/template.html b/packages/browser-integration-tests/suites/replay/compressionWorkerUrl/template.html
new file mode 100644
index 000000000000..31cfc73ec3c3
--- /dev/null
+++ b/packages/browser-integration-tests/suites/replay/compressionWorkerUrl/template.html
@@ -0,0 +1,9 @@
+
+
+
+
+
+
+
+
+
diff --git a/packages/browser-integration-tests/suites/replay/compressionWorkerUrl/test.ts b/packages/browser-integration-tests/suites/replay/compressionWorkerUrl/test.ts
new file mode 100644
index 000000000000..006e70bed4ee
--- /dev/null
+++ b/packages/browser-integration-tests/suites/replay/compressionWorkerUrl/test.ts
@@ -0,0 +1,66 @@
+import { expect } from '@playwright/test';
+import fs from 'fs';
+import path from 'path';
+
+import { sentryTest, TEST_HOST } from '../../../utils/fixtures';
+import { getExpectedReplayEvent } from '../../../utils/replayEventTemplates';
+import {
+ getFullRecordingSnapshots,
+ getReplayEvent,
+ replayEnvelopeIsCompressed,
+ shouldSkipReplayTest,
+ waitForReplayRequest,
+} from '../../../utils/replayHelpers';
+
+sentryTest(
+ 'replay recording should be compressed if using custom workerUrl',
+ async ({ getLocalTestUrl, page, forceFlushReplay }) => {
+ if (shouldSkipReplayTest()) {
+ sentryTest.skip();
+ }
+
+ const reqPromise0 = waitForReplayRequest(page, 0);
+
+ await page.route('https://dsn.ingest.sentry.io/**/*', route => {
+ return route.fulfill({
+ status: 200,
+ contentType: 'application/json',
+ body: JSON.stringify({ id: 'test-id' }),
+ });
+ });
+
+ const url = await getLocalTestUrl({ testDir: __dirname });
+
+ let customCompressCalled = 0;
+
+ // Ensure to register this _after_ getLocalTestUrl is called, as that also registers a default route for TEST_HOST
+ await page.route(`${TEST_HOST}/my-test-worker.js`, route => {
+ const filePath = path.resolve(__dirname, '../../../../replay-worker/examples/worker.min.js');
+
+ customCompressCalled++;
+
+ return fs.existsSync(filePath) ? route.fulfill({ path: filePath }) : route.continue();
+ });
+
+ await page.goto(url);
+ await forceFlushReplay();
+
+ const req0 = await reqPromise0;
+
+ const replayEvent0 = getReplayEvent(req0);
+ expect(replayEvent0).toEqual(getExpectedReplayEvent());
+
+ expect(replayEnvelopeIsCompressed(req0)).toEqual(true);
+ expect(customCompressCalled).toBe(1);
+
+ const snapshots = getFullRecordingSnapshots(req0);
+ expect(snapshots.length).toEqual(1);
+
+ const stringifiedSnapshot = JSON.stringify(snapshots[0]);
+ expect(stringifiedSnapshot).toContain('"tagName":"body"');
+ expect(stringifiedSnapshot).toContain('"tagName":"html"');
+ expect(stringifiedSnapshot).toContain('"tagName":"button"');
+ expect(stringifiedSnapshot).toContain('"textContent":"*** ***"');
+ expect(stringifiedSnapshot).toContain('"id":"go-background"');
+ },
+);
diff --git a/packages/browser-integration-tests/utils/replayEventTemplates.ts b/packages/browser-integration-tests/utils/replayEventTemplates.ts
index 46208b39af00..8824d8ab1812 100644
--- a/packages/browser-integration-tests/utils/replayEventTemplates.ts
+++ b/packages/browser-integration-tests/utils/replayEventTemplates.ts
@@ -8,7 +8,7 @@ const DEFAULT_REPLAY_EVENT = {
timestamp: expect.any(Number),
error_ids: [],
trace_ids: [],
- urls: [expect.stringContaining('/dist/index.html')],
+ urls: [expect.stringContaining('/index.html')],
replay_id: expect.stringMatching(/\w{32}/),
replay_start_timestamp: expect.any(Number),
segment_id: 0,
@@ -31,7 +31,7 @@ const DEFAULT_REPLAY_EVENT = {
name: 'sentry.javascript.browser',
},
request: {
- url: expect.stringContaining('/dist/index.html'),
+ url: expect.stringContaining('/index.html'),
headers: {
'User-Agent': expect.stringContaining(''),
},
diff --git a/packages/browser-integration-tests/utils/replayHelpers.ts b/packages/browser-integration-tests/utils/replayHelpers.ts
index 7ad63a734067..b487342f5593 100644
--- a/packages/browser-integration-tests/utils/replayHelpers.ts
+++ b/packages/browser-integration-tests/utils/replayHelpers.ts
@@ -302,6 +302,38 @@ const replayEnvelopeRequestParser = (request: Request | null, envelopeIndex = 2)
return envelope[envelopeIndex] as Event;
};
+export function replayEnvelopeIsCompressed(resOrReq: Request | Response): boolean {
+ const request = getRequest(resOrReq);
+
+ // https://develop.sentry.dev/sdk/envelopes/
+ const envelopeBytes = request.postDataBuffer() || '';
+
+ // first, we convert the bugger to string to split and go through the uncompressed lines
+ const envelopeString = envelopeBytes.toString();
+
+ const lines: boolean[] = envelopeString.split('\n').map(line => {
+ try {
+ JSON.parse(line);
+ } catch (error) {
+ // If we fail to parse a line, we _might_ have found a compressed payload,
+ // so let's check if this is actually the case.
+ // This is quite hacky but we can't go through `line` because the prior operations
+ // seem to have altered its binary content. Hence, we take the raw envelope and
+ // look up the place where the zlib compression header(0x78 0x9c) starts
+ for (let i = 0; i < envelopeBytes.length; i++) {
+ if (envelopeBytes[i] === 0x78 && envelopeBytes[i + 1] === 0x9c) {
+ // We found a zlib-compressed payload
+ return true;
+ }
+ }
+ }
+
+ return false;
+ });
+
+ return lines.some(line => line);
+}
+
export const replayEnvelopeParser = (request: Request | null): unknown[] => {
// https://develop.sentry.dev/sdk/envelopes/
const envelopeBytes = request?.postDataBuffer() || '';
diff --git a/packages/replay-worker/examples/worker.js b/packages/replay-worker/examples/worker.js
new file mode 100644
index 000000000000..4c815fe97862
--- /dev/null
+++ b/packages/replay-worker/examples/worker.js
@@ -0,0 +1,4165 @@
+/*! Sentry Replay Worker 7.76.0 (de39290bc) | https://github.com/getsentry/sentry-javascript */
+var deflate$3 = {};
+
+var trees = {};
+
+// (C) 1995-2013 Jean-loup Gailly and Mark Adler
+// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+/* eslint-disable space-unary-ops */
+
+/* Public constants ==========================================================*/
+/* ===========================================================================*/
+
+
+//const Z_FILTERED = 1;
+//const Z_HUFFMAN_ONLY = 2;
+//const Z_RLE = 3;
+const Z_FIXED$1 = 4;
+//const Z_DEFAULT_STRATEGY = 0;
+
+/* Possible values of the data_type field (though see inflate()) */
+const Z_BINARY = 0;
+const Z_TEXT = 1;
+//const Z_ASCII = 1; // = Z_TEXT
+const Z_UNKNOWN$1 = 2;
+
+/*============================================================================*/
+
+
+function zero$1(buf) { let len = buf.length; while (--len >= 0) { buf[len] = 0; } }
+
+// From zutil.h
+
+const STORED_BLOCK = 0;
+const STATIC_TREES = 1;
+const DYN_TREES = 2;
+/* The three kinds of block type */
+
+const MIN_MATCH$1 = 3;
+const MAX_MATCH$1 = 258;
+/* The minimum and maximum match lengths */
+
+// From deflate.h
+/* ===========================================================================
+ * Internal compression state.
+ */
+
+const LENGTH_CODES$1 = 29;
+/* number of length codes, not counting the special END_BLOCK code */
+
+const LITERALS$1 = 256;
+/* number of literal bytes 0..255 */
+
+const L_CODES$1 = LITERALS$1 + 1 + LENGTH_CODES$1;
+/* number of Literal or Length codes, including the END_BLOCK code */
+
+const D_CODES$1 = 30;
+/* number of distance codes */
+
+const BL_CODES$1 = 19;
+/* number of codes used to transfer the bit lengths */
+
+const HEAP_SIZE$1 = 2 * L_CODES$1 + 1;
+/* maximum heap size */
+
+const MAX_BITS$1 = 15;
+/* All codes must not exceed MAX_BITS bits */
+
+const Buf_size = 16;
+/* size of bit buffer in bi_buf */
+
+
+/* ===========================================================================
+ * Constants
+ */
+
+const MAX_BL_BITS = 7;
+/* Bit length codes must not exceed MAX_BL_BITS bits */
+
+const END_BLOCK = 256;
+/* end of block literal code */
+
+const REP_3_6 = 16;
+/* repeat previous bit length 3-6 times (2 bits of repeat count) */
+
+const REPZ_3_10 = 17;
+/* repeat a zero length 3-10 times (3 bits of repeat count) */
+
+const REPZ_11_138 = 18;
+/* repeat a zero length 11-138 times (7 bits of repeat count) */
+
+/* eslint-disable comma-spacing,array-bracket-spacing */
+const extra_lbits = /* extra bits for each length code */
+ new Uint8Array([0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0]);
+
+const extra_dbits = /* extra bits for each distance code */
+ new Uint8Array([0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13]);
+
+const extra_blbits = /* extra bits for each bit length code */
+ new Uint8Array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7]);
+
+const bl_order =
+ new Uint8Array([16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15]);
+/* eslint-enable comma-spacing,array-bracket-spacing */
+
+/* The lengths of the bit length codes are sent in order of decreasing
+ * probability, to avoid transmitting the lengths for unused bit length codes.
+ */
+
+/* ===========================================================================
+ * Local data. These are initialized only once.
+ */
+
+// We pre-fill arrays with 0 to avoid uninitialized gaps
+
+const DIST_CODE_LEN = 512; /* see definition of array dist_code below */
+
+// !!!! Use flat array instead of structure, Freq = i*2, Len = i*2+1
+const static_ltree = new Array((L_CODES$1 + 2) * 2);
+zero$1(static_ltree);
+/* The static literal tree. Since the bit lengths are imposed, there is no
+ * need for the L_CODES extra codes used during heap construction. However
+ * The codes 286 and 287 are needed to build a canonical tree (see _tr_init
+ * below).
+ */
+
+const static_dtree = new Array(D_CODES$1 * 2);
+zero$1(static_dtree);
+/* The static distance tree. (Actually a trivial tree since all codes use
+ * 5 bits.)
+ */
+
+const _dist_code = new Array(DIST_CODE_LEN);
+zero$1(_dist_code);
+/* Distance codes. The first 256 values correspond to the distances
+ * 3 .. 258, the last 256 values correspond to the top 8 bits of
+ * the 15 bit distances.
+ */
+
+const _length_code = new Array(MAX_MATCH$1 - MIN_MATCH$1 + 1);
+zero$1(_length_code);
+/* length code for each normalized match length (0 == MIN_MATCH) */
+
+const base_length = new Array(LENGTH_CODES$1);
+zero$1(base_length);
+/* First normalized length for each code (0 = MIN_MATCH) */
+
+const base_dist = new Array(D_CODES$1);
+zero$1(base_dist);
+/* First normalized distance for each code (0 = distance of 1) */
+
+
+function StaticTreeDesc(static_tree, extra_bits, extra_base, elems, max_length) {
+
+ this.static_tree = static_tree; /* static tree or NULL */
+ this.extra_bits = extra_bits; /* extra bits for each code or NULL */
+ this.extra_base = extra_base; /* base index for extra_bits */
+ this.elems = elems; /* max number of elements in the tree */
+ this.max_length = max_length; /* max bit length for the codes */
+
+ // show if `static_tree` has data or dummy - needed for monomorphic objects
+ this.has_stree = static_tree && static_tree.length;
+}
+
+
+let static_l_desc;
+let static_d_desc;
+let static_bl_desc;
+
+
+function TreeDesc(dyn_tree, stat_desc) {
+ this.dyn_tree = dyn_tree; /* the dynamic tree */
+ this.max_code = 0; /* largest code with non zero frequency */
+ this.stat_desc = stat_desc; /* the corresponding static tree */
+}
+
+
+
+const d_code = (dist) => {
+
+ return dist < 256 ? _dist_code[dist] : _dist_code[256 + (dist >>> 7)];
+};
+
+
+/* ===========================================================================
+ * Output a short LSB first on the stream.
+ * IN assertion: there is enough room in pendingBuf.
+ */
+const put_short = (s, w) => {
+// put_byte(s, (uch)((w) & 0xff));
+// put_byte(s, (uch)((ush)(w) >> 8));
+ s.pending_buf[s.pending++] = (w) & 0xff;
+ s.pending_buf[s.pending++] = (w >>> 8) & 0xff;
+};
+
+
+/* ===========================================================================
+ * Send a value on a given number of bits.
+ * IN assertion: length <= 16 and value fits in length bits.
+ */
+const send_bits = (s, value, length) => {
+
+ if (s.bi_valid > (Buf_size - length)) {
+ s.bi_buf |= (value << s.bi_valid) & 0xffff;
+ put_short(s, s.bi_buf);
+ s.bi_buf = value >> (Buf_size - s.bi_valid);
+ s.bi_valid += length - Buf_size;
+ } else {
+ s.bi_buf |= (value << s.bi_valid) & 0xffff;
+ s.bi_valid += length;
+ }
+};
+
+
+const send_code = (s, c, tree) => {
+
+ send_bits(s, tree[c * 2]/*.Code*/, tree[c * 2 + 1]/*.Len*/);
+};
+
+
+/* ===========================================================================
+ * Reverse the first len bits of a code, using straightforward code (a faster
+ * method would use a table)
+ * IN assertion: 1 <= len <= 15
+ */
+const bi_reverse = (code, len) => {
+
+ let res = 0;
+ do {
+ res |= code & 1;
+ code >>>= 1;
+ res <<= 1;
+ } while (--len > 0);
+ return res >>> 1;
+};
+
+
+/* ===========================================================================
+ * Flush the bit buffer, keeping at most 7 bits in it.
+ */
+const bi_flush = (s) => {
+
+ if (s.bi_valid === 16) {
+ put_short(s, s.bi_buf);
+ s.bi_buf = 0;
+ s.bi_valid = 0;
+
+ } else if (s.bi_valid >= 8) {
+ s.pending_buf[s.pending++] = s.bi_buf & 0xff;
+ s.bi_buf >>= 8;
+ s.bi_valid -= 8;
+ }
+};
+
+
+/* ===========================================================================
+ * Compute the optimal bit lengths for a tree and update the total bit length
+ * for the current block.
+ * IN assertion: the fields freq and dad are set, heap[heap_max] and
+ * above are the tree nodes sorted by increasing frequency.
+ * OUT assertions: the field len is set to the optimal bit length, the
+ * array bl_count contains the frequencies for each bit length.
+ * The length opt_len is updated; static_len is also updated if stree is
+ * not null.
+ */
+const gen_bitlen = (s, desc) => {
+// deflate_state *s;
+// tree_desc *desc; /* the tree descriptor */
+
+ const tree = desc.dyn_tree;
+ const max_code = desc.max_code;
+ const stree = desc.stat_desc.static_tree;
+ const has_stree = desc.stat_desc.has_stree;
+ const extra = desc.stat_desc.extra_bits;
+ const base = desc.stat_desc.extra_base;
+ const max_length = desc.stat_desc.max_length;
+ let h; /* heap index */
+ let n, m; /* iterate over the tree elements */
+ let bits; /* bit length */
+ let xbits; /* extra bits */
+ let f; /* frequency */
+ let overflow = 0; /* number of elements with bit length too large */
+
+ for (bits = 0; bits <= MAX_BITS$1; bits++) {
+ s.bl_count[bits] = 0;
+ }
+
+ /* In a first pass, compute the optimal bit lengths (which may
+ * overflow in the case of the bit length tree).
+ */
+ tree[s.heap[s.heap_max] * 2 + 1]/*.Len*/ = 0; /* root of the heap */
+
+ for (h = s.heap_max + 1; h < HEAP_SIZE$1; h++) {
+ n = s.heap[h];
+ bits = tree[tree[n * 2 + 1]/*.Dad*/ * 2 + 1]/*.Len*/ + 1;
+ if (bits > max_length) {
+ bits = max_length;
+ overflow++;
+ }
+ tree[n * 2 + 1]/*.Len*/ = bits;
+ /* We overwrite tree[n].Dad which is no longer needed */
+
+ if (n > max_code) { continue; } /* not a leaf node */
+
+ s.bl_count[bits]++;
+ xbits = 0;
+ if (n >= base) {
+ xbits = extra[n - base];
+ }
+ f = tree[n * 2]/*.Freq*/;
+ s.opt_len += f * (bits + xbits);
+ if (has_stree) {
+ s.static_len += f * (stree[n * 2 + 1]/*.Len*/ + xbits);
+ }
+ }
+ if (overflow === 0) { return; }
+
+ // Tracev((stderr,"\nbit length overflow\n"));
+ /* This happens for example on obj2 and pic of the Calgary corpus */
+
+ /* Find the first bit length which could increase: */
+ do {
+ bits = max_length - 1;
+ while (s.bl_count[bits] === 0) { bits--; }
+ s.bl_count[bits]--; /* move one leaf down the tree */
+ s.bl_count[bits + 1] += 2; /* move one overflow item as its brother */
+ s.bl_count[max_length]--;
+ /* The brother of the overflow item also moves one step up,
+ * but this does not affect bl_count[max_length]
+ */
+ overflow -= 2;
+ } while (overflow > 0);
+
+ /* Now recompute all bit lengths, scanning in increasing frequency.
+ * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all
+ * lengths instead of fixing only the wrong ones. This idea is taken
+ * from 'ar' written by Haruhiko Okumura.)
+ */
+ for (bits = max_length; bits !== 0; bits--) {
+ n = s.bl_count[bits];
+ while (n !== 0) {
+ m = s.heap[--h];
+ if (m > max_code) { continue; }
+ if (tree[m * 2 + 1]/*.Len*/ !== bits) {
+ // Tracev((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits));
+ s.opt_len += (bits - tree[m * 2 + 1]/*.Len*/) * tree[m * 2]/*.Freq*/;
+ tree[m * 2 + 1]/*.Len*/ = bits;
+ }
+ n--;
+ }
+ }
+};
+
+
+/* ===========================================================================
+ * Generate the codes for a given tree and bit counts (which need not be
+ * optimal).
+ * IN assertion: the array bl_count contains the bit length statistics for
+ * the given tree and the field len is set for all tree elements.
+ * OUT assertion: the field code is set for all tree elements of non
+ * zero code length.
+ */
+const gen_codes = (tree, max_code, bl_count) => {
+// ct_data *tree; /* the tree to decorate */
+// int max_code; /* largest code with non zero frequency */
+// ushf *bl_count; /* number of codes at each bit length */
+
+ const next_code = new Array(MAX_BITS$1 + 1); /* next code value for each bit length */
+ let code = 0; /* running code value */
+ let bits; /* bit index */
+ let n; /* code index */
+
+ /* The distribution counts are first used to generate the code values
+ * without bit reversal.
+ */
+ for (bits = 1; bits <= MAX_BITS$1; bits++) {
+ code = (code + bl_count[bits - 1]) << 1;
+ next_code[bits] = code;
+ }
+ /* Check that the bit counts in bl_count are consistent. The last code
+ * must be all ones.
+ */
+ //Assert (code + bl_count[MAX_BITS]-1 == (1< {
+
+ let n; /* iterates over tree elements */
+ let bits; /* bit counter */
+ let length; /* length value */
+ let code; /* code value */
+ let dist; /* distance index */
+ const bl_count = new Array(MAX_BITS$1 + 1);
+ /* number of codes at each bit length for an optimal tree */
+
+ // do check in _tr_init()
+ //if (static_init_done) return;
+
+ /* For some embedded targets, global variables are not initialized: */
+/*#ifdef NO_INIT_GLOBAL_POINTERS
+ static_l_desc.static_tree = static_ltree;
+ static_l_desc.extra_bits = extra_lbits;
+ static_d_desc.static_tree = static_dtree;
+ static_d_desc.extra_bits = extra_dbits;
+ static_bl_desc.extra_bits = extra_blbits;
+#endif*/
+
+ /* Initialize the mapping length (0..255) -> length code (0..28) */
+ length = 0;
+ for (code = 0; code < LENGTH_CODES$1 - 1; code++) {
+ base_length[code] = length;
+ for (n = 0; n < (1 << extra_lbits[code]); n++) {
+ _length_code[length++] = code;
+ }
+ }
+ //Assert (length == 256, "tr_static_init: length != 256");
+ /* Note that the length 255 (match length 258) can be represented
+ * in two different ways: code 284 + 5 bits or code 285, so we
+ * overwrite length_code[255] to use the best encoding:
+ */
+ _length_code[length - 1] = code;
+
+ /* Initialize the mapping dist (0..32K) -> dist code (0..29) */
+ dist = 0;
+ for (code = 0; code < 16; code++) {
+ base_dist[code] = dist;
+ for (n = 0; n < (1 << extra_dbits[code]); n++) {
+ _dist_code[dist++] = code;
+ }
+ }
+ //Assert (dist == 256, "tr_static_init: dist != 256");
+ dist >>= 7; /* from now on, all distances are divided by 128 */
+ for (; code < D_CODES$1; code++) {
+ base_dist[code] = dist << 7;
+ for (n = 0; n < (1 << (extra_dbits[code] - 7)); n++) {
+ _dist_code[256 + dist++] = code;
+ }
+ }
+ //Assert (dist == 256, "tr_static_init: 256+dist != 512");
+
+ /* Construct the codes of the static literal tree */
+ for (bits = 0; bits <= MAX_BITS$1; bits++) {
+ bl_count[bits] = 0;
+ }
+
+ n = 0;
+ while (n <= 143) {
+ static_ltree[n * 2 + 1]/*.Len*/ = 8;
+ n++;
+ bl_count[8]++;
+ }
+ while (n <= 255) {
+ static_ltree[n * 2 + 1]/*.Len*/ = 9;
+ n++;
+ bl_count[9]++;
+ }
+ while (n <= 279) {
+ static_ltree[n * 2 + 1]/*.Len*/ = 7;
+ n++;
+ bl_count[7]++;
+ }
+ while (n <= 287) {
+ static_ltree[n * 2 + 1]/*.Len*/ = 8;
+ n++;
+ bl_count[8]++;
+ }
+ /* Codes 286 and 287 do not exist, but we must include them in the
+ * tree construction to get a canonical Huffman tree (longest code
+ * all ones)
+ */
+ gen_codes(static_ltree, L_CODES$1 + 1, bl_count);
+
+ /* The static distance tree is trivial: */
+ for (n = 0; n < D_CODES$1; n++) {
+ static_dtree[n * 2 + 1]/*.Len*/ = 5;
+ static_dtree[n * 2]/*.Code*/ = bi_reverse(n, 5);
+ }
+
+ // Now data ready and we can init static trees
+ static_l_desc = new StaticTreeDesc(static_ltree, extra_lbits, LITERALS$1 + 1, L_CODES$1, MAX_BITS$1);
+ static_d_desc = new StaticTreeDesc(static_dtree, extra_dbits, 0, D_CODES$1, MAX_BITS$1);
+ static_bl_desc = new StaticTreeDesc(new Array(0), extra_blbits, 0, BL_CODES$1, MAX_BL_BITS);
+
+ //static_init_done = true;
+};
+
+
+/* ===========================================================================
+ * Initialize a new block.
+ */
+const init_block = (s) => {
+
+ let n; /* iterates over tree elements */
+
+ /* Initialize the trees. */
+ for (n = 0; n < L_CODES$1; n++) { s.dyn_ltree[n * 2]/*.Freq*/ = 0; }
+ for (n = 0; n < D_CODES$1; n++) { s.dyn_dtree[n * 2]/*.Freq*/ = 0; }
+ for (n = 0; n < BL_CODES$1; n++) { s.bl_tree[n * 2]/*.Freq*/ = 0; }
+
+ s.dyn_ltree[END_BLOCK * 2]/*.Freq*/ = 1;
+ s.opt_len = s.static_len = 0;
+ s.sym_next = s.matches = 0;
+};
+
+
+/* ===========================================================================
+ * Flush the bit buffer and align the output on a byte boundary
+ */
+const bi_windup = (s) =>
+{
+ if (s.bi_valid > 8) {
+ put_short(s, s.bi_buf);
+ } else if (s.bi_valid > 0) {
+ //put_byte(s, (Byte)s->bi_buf);
+ s.pending_buf[s.pending++] = s.bi_buf;
+ }
+ s.bi_buf = 0;
+ s.bi_valid = 0;
+};
+
+/* ===========================================================================
+ * Compares to subtrees, using the tree depth as tie breaker when
+ * the subtrees have equal frequency. This minimizes the worst case length.
+ */
+const smaller = (tree, n, m, depth) => {
+
+ const _n2 = n * 2;
+ const _m2 = m * 2;
+ return (tree[_n2]/*.Freq*/ < tree[_m2]/*.Freq*/ ||
+ (tree[_n2]/*.Freq*/ === tree[_m2]/*.Freq*/ && depth[n] <= depth[m]));
+};
+
+/* ===========================================================================
+ * Restore the heap property by moving down the tree starting at node k,
+ * exchanging a node with the smallest of its two sons if necessary, stopping
+ * when the heap property is re-established (each father smaller than its
+ * two sons).
+ */
+const pqdownheap = (s, tree, k) => {
+// deflate_state *s;
+// ct_data *tree; /* the tree to restore */
+// int k; /* node to move down */
+
+ const v = s.heap[k];
+ let j = k << 1; /* left son of k */
+ while (j <= s.heap_len) {
+ /* Set j to the smallest of the two sons: */
+ if (j < s.heap_len &&
+ smaller(tree, s.heap[j + 1], s.heap[j], s.depth)) {
+ j++;
+ }
+ /* Exit if v is smaller than both sons */
+ if (smaller(tree, v, s.heap[j], s.depth)) { break; }
+
+ /* Exchange v with the smallest son */
+ s.heap[k] = s.heap[j];
+ k = j;
+
+ /* And continue down the tree, setting j to the left son of k */
+ j <<= 1;
+ }
+ s.heap[k] = v;
+};
+
+
+// inlined manually
+// const SMALLEST = 1;
+
+/* ===========================================================================
+ * Send the block data compressed using the given Huffman trees
+ */
+const compress_block = (s, ltree, dtree) => {
+// deflate_state *s;
+// const ct_data *ltree; /* literal tree */
+// const ct_data *dtree; /* distance tree */
+
+ let dist; /* distance of matched string */
+ let lc; /* match length or unmatched char (if dist == 0) */
+ let sx = 0; /* running index in sym_buf */
+ let code; /* the code to send */
+ let extra; /* number of extra bits to send */
+
+ if (s.sym_next !== 0) {
+ do {
+ dist = s.pending_buf[s.sym_buf + sx++] & 0xff;
+ dist += (s.pending_buf[s.sym_buf + sx++] & 0xff) << 8;
+ lc = s.pending_buf[s.sym_buf + sx++];
+ if (dist === 0) {
+ send_code(s, lc, ltree); /* send a literal byte */
+ //Tracecv(isgraph(lc), (stderr," '%c' ", lc));
+ } else {
+ /* Here, lc is the match length - MIN_MATCH */
+ code = _length_code[lc];
+ send_code(s, code + LITERALS$1 + 1, ltree); /* send the length code */
+ extra = extra_lbits[code];
+ if (extra !== 0) {
+ lc -= base_length[code];
+ send_bits(s, lc, extra); /* send the extra length bits */
+ }
+ dist--; /* dist is now the match distance - 1 */
+ code = d_code(dist);
+ //Assert (code < D_CODES, "bad d_code");
+
+ send_code(s, code, dtree); /* send the distance code */
+ extra = extra_dbits[code];
+ if (extra !== 0) {
+ dist -= base_dist[code];
+ send_bits(s, dist, extra); /* send the extra distance bits */
+ }
+ } /* literal or match pair ? */
+
+ /* Check that the overlay between pending_buf and sym_buf is ok: */
+ //Assert(s->pending < s->lit_bufsize + sx, "pendingBuf overflow");
+
+ } while (sx < s.sym_next);
+ }
+
+ send_code(s, END_BLOCK, ltree);
+};
+
+
+/* ===========================================================================
+ * Construct one Huffman tree and assigns the code bit strings and lengths.
+ * Update the total bit length for the current block.
+ * IN assertion: the field freq is set for all tree elements.
+ * OUT assertions: the fields len and code are set to the optimal bit length
+ * and corresponding code. The length opt_len is updated; static_len is
+ * also updated if stree is not null. The field max_code is set.
+ */
+const build_tree = (s, desc) => {
+// deflate_state *s;
+// tree_desc *desc; /* the tree descriptor */
+
+ const tree = desc.dyn_tree;
+ const stree = desc.stat_desc.static_tree;
+ const has_stree = desc.stat_desc.has_stree;
+ const elems = desc.stat_desc.elems;
+ let n, m; /* iterate over heap elements */
+ let max_code = -1; /* largest code with non zero frequency */
+ let node; /* new node being created */
+
+ /* Construct the initial heap, with least frequent element in
+ * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1].
+ * heap[0] is not used.
+ */
+ s.heap_len = 0;
+ s.heap_max = HEAP_SIZE$1;
+
+ for (n = 0; n < elems; n++) {
+ if (tree[n * 2]/*.Freq*/ !== 0) {
+ s.heap[++s.heap_len] = max_code = n;
+ s.depth[n] = 0;
+
+ } else {
+ tree[n * 2 + 1]/*.Len*/ = 0;
+ }
+ }
+
+ /* The pkzip format requires that at least one distance code exists,
+ * and that at least one bit should be sent even if there is only one
+ * possible code. So to avoid special checks later on we force at least
+ * two codes of non zero frequency.
+ */
+ while (s.heap_len < 2) {
+ node = s.heap[++s.heap_len] = (max_code < 2 ? ++max_code : 0);
+ tree[node * 2]/*.Freq*/ = 1;
+ s.depth[node] = 0;
+ s.opt_len--;
+
+ if (has_stree) {
+ s.static_len -= stree[node * 2 + 1]/*.Len*/;
+ }
+ /* node is 0 or 1 so it does not have extra bits */
+ }
+ desc.max_code = max_code;
+
+ /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree,
+ * establish sub-heaps of increasing lengths:
+ */
+ for (n = (s.heap_len >> 1/*int /2*/); n >= 1; n--) { pqdownheap(s, tree, n); }
+
+ /* Construct the Huffman tree by repeatedly combining the least two
+ * frequent nodes.
+ */
+ node = elems; /* next internal node of the tree */
+ do {
+ //pqremove(s, tree, n); /* n = node of least frequency */
+ /*** pqremove ***/
+ n = s.heap[1/*SMALLEST*/];
+ s.heap[1/*SMALLEST*/] = s.heap[s.heap_len--];
+ pqdownheap(s, tree, 1/*SMALLEST*/);
+ /***/
+
+ m = s.heap[1/*SMALLEST*/]; /* m = node of next least frequency */
+
+ s.heap[--s.heap_max] = n; /* keep the nodes sorted by frequency */
+ s.heap[--s.heap_max] = m;
+
+ /* Create a new node father of n and m */
+ tree[node * 2]/*.Freq*/ = tree[n * 2]/*.Freq*/ + tree[m * 2]/*.Freq*/;
+ s.depth[node] = (s.depth[n] >= s.depth[m] ? s.depth[n] : s.depth[m]) + 1;
+ tree[n * 2 + 1]/*.Dad*/ = tree[m * 2 + 1]/*.Dad*/ = node;
+
+ /* and insert the new node in the heap */
+ s.heap[1/*SMALLEST*/] = node++;
+ pqdownheap(s, tree, 1/*SMALLEST*/);
+
+ } while (s.heap_len >= 2);
+
+ s.heap[--s.heap_max] = s.heap[1/*SMALLEST*/];
+
+ /* At this point, the fields freq and dad are set. We can now
+ * generate the bit lengths.
+ */
+ gen_bitlen(s, desc);
+
+ /* The field len is now set, we can generate the bit codes */
+ gen_codes(tree, max_code, s.bl_count);
+};
+
+
+/* ===========================================================================
+ * Scan a literal or distance tree to determine the frequencies of the codes
+ * in the bit length tree.
+ */
+const scan_tree = (s, tree, max_code) => {
+// deflate_state *s;
+// ct_data *tree; /* the tree to be scanned */
+// int max_code; /* and its largest code of non zero frequency */
+
+ let n; /* iterates over all tree elements */
+ let prevlen = -1; /* last emitted length */
+ let curlen; /* length of current code */
+
+ let nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */
+
+ let count = 0; /* repeat count of the current code */
+ let max_count = 7; /* max repeat count */
+ let min_count = 4; /* min repeat count */
+
+ if (nextlen === 0) {
+ max_count = 138;
+ min_count = 3;
+ }
+ tree[(max_code + 1) * 2 + 1]/*.Len*/ = 0xffff; /* guard */
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen;
+ nextlen = tree[(n + 1) * 2 + 1]/*.Len*/;
+
+ if (++count < max_count && curlen === nextlen) {
+ continue;
+
+ } else if (count < min_count) {
+ s.bl_tree[curlen * 2]/*.Freq*/ += count;
+
+ } else if (curlen !== 0) {
+
+ if (curlen !== prevlen) { s.bl_tree[curlen * 2]/*.Freq*/++; }
+ s.bl_tree[REP_3_6 * 2]/*.Freq*/++;
+
+ } else if (count <= 10) {
+ s.bl_tree[REPZ_3_10 * 2]/*.Freq*/++;
+
+ } else {
+ s.bl_tree[REPZ_11_138 * 2]/*.Freq*/++;
+ }
+
+ count = 0;
+ prevlen = curlen;
+
+ if (nextlen === 0) {
+ max_count = 138;
+ min_count = 3;
+
+ } else if (curlen === nextlen) {
+ max_count = 6;
+ min_count = 3;
+
+ } else {
+ max_count = 7;
+ min_count = 4;
+ }
+ }
+};
+
+
+/* ===========================================================================
+ * Send a literal or distance tree in compressed form, using the codes in
+ * bl_tree.
+ */
+const send_tree = (s, tree, max_code) => {
+// deflate_state *s;
+// ct_data *tree; /* the tree to be scanned */
+// int max_code; /* and its largest code of non zero frequency */
+
+ let n; /* iterates over all tree elements */
+ let prevlen = -1; /* last emitted length */
+ let curlen; /* length of current code */
+
+ let nextlen = tree[0 * 2 + 1]/*.Len*/; /* length of next code */
+
+ let count = 0; /* repeat count of the current code */
+ let max_count = 7; /* max repeat count */
+ let min_count = 4; /* min repeat count */
+
+ /* tree[max_code+1].Len = -1; */ /* guard already set */
+ if (nextlen === 0) {
+ max_count = 138;
+ min_count = 3;
+ }
+
+ for (n = 0; n <= max_code; n++) {
+ curlen = nextlen;
+ nextlen = tree[(n + 1) * 2 + 1]/*.Len*/;
+
+ if (++count < max_count && curlen === nextlen) {
+ continue;
+
+ } else if (count < min_count) {
+ do { send_code(s, curlen, s.bl_tree); } while (--count !== 0);
+
+ } else if (curlen !== 0) {
+ if (curlen !== prevlen) {
+ send_code(s, curlen, s.bl_tree);
+ count--;
+ }
+ //Assert(count >= 3 && count <= 6, " 3_6?");
+ send_code(s, REP_3_6, s.bl_tree);
+ send_bits(s, count - 3, 2);
+
+ } else if (count <= 10) {
+ send_code(s, REPZ_3_10, s.bl_tree);
+ send_bits(s, count - 3, 3);
+
+ } else {
+ send_code(s, REPZ_11_138, s.bl_tree);
+ send_bits(s, count - 11, 7);
+ }
+
+ count = 0;
+ prevlen = curlen;
+ if (nextlen === 0) {
+ max_count = 138;
+ min_count = 3;
+
+ } else if (curlen === nextlen) {
+ max_count = 6;
+ min_count = 3;
+
+ } else {
+ max_count = 7;
+ min_count = 4;
+ }
+ }
+};
+
+
+/* ===========================================================================
+ * Construct the Huffman tree for the bit lengths and return the index in
+ * bl_order of the last bit length code to send.
+ */
+const build_bl_tree = (s) => {
+
+ let max_blindex; /* index of last bit length code of non zero freq */
+
+ /* Determine the bit length frequencies for literal and distance trees */
+ scan_tree(s, s.dyn_ltree, s.l_desc.max_code);
+ scan_tree(s, s.dyn_dtree, s.d_desc.max_code);
+
+ /* Build the bit length tree: */
+ build_tree(s, s.bl_desc);
+ /* opt_len now includes the length of the tree representations, except
+ * the lengths of the bit lengths codes and the 5+5+4 bits for the counts.
+ */
+
+ /* Determine the number of bit length codes to send. The pkzip format
+ * requires that at least 4 bit length codes be sent. (appnote.txt says
+ * 3 but the actual value used is 4.)
+ */
+ for (max_blindex = BL_CODES$1 - 1; max_blindex >= 3; max_blindex--) {
+ if (s.bl_tree[bl_order[max_blindex] * 2 + 1]/*.Len*/ !== 0) {
+ break;
+ }
+ }
+ /* Update opt_len to include the bit length tree and counts */
+ s.opt_len += 3 * (max_blindex + 1) + 5 + 5 + 4;
+ //Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld",
+ // s->opt_len, s->static_len));
+
+ return max_blindex;
+};
+
+
+/* ===========================================================================
+ * Send the header for a block using dynamic Huffman trees: the counts, the
+ * lengths of the bit length codes, the literal tree and the distance tree.
+ * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4.
+ */
+const send_all_trees = (s, lcodes, dcodes, blcodes) => {
+// deflate_state *s;
+// int lcodes, dcodes, blcodes; /* number of codes for each tree */
+
+ let rank; /* index in bl_order */
+
+ //Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes");
+ //Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES,
+ // "too many codes");
+ //Tracev((stderr, "\nbl counts: "));
+ send_bits(s, lcodes - 257, 5); /* not +255 as stated in appnote.txt */
+ send_bits(s, dcodes - 1, 5);
+ send_bits(s, blcodes - 4, 4); /* not -3 as stated in appnote.txt */
+ for (rank = 0; rank < blcodes; rank++) {
+ //Tracev((stderr, "\nbl code %2d ", bl_order[rank]));
+ send_bits(s, s.bl_tree[bl_order[rank] * 2 + 1]/*.Len*/, 3);
+ }
+ //Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent));
+
+ send_tree(s, s.dyn_ltree, lcodes - 1); /* literal tree */
+ //Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent));
+
+ send_tree(s, s.dyn_dtree, dcodes - 1); /* distance tree */
+ //Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent));
+};
+
+
+/* ===========================================================================
+ * Check if the data type is TEXT or BINARY, using the following algorithm:
+ * - TEXT if the two conditions below are satisfied:
+ * a) There are no non-portable control characters belonging to the
+ * "block list" (0..6, 14..25, 28..31).
+ * b) There is at least one printable character belonging to the
+ * "allow list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255).
+ * - BINARY otherwise.
+ * - The following partially-portable control characters form a
+ * "gray list" that is ignored in this detection algorithm:
+ * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}).
+ * IN assertion: the fields Freq of dyn_ltree are set.
+ */
+const detect_data_type = (s) => {
+ /* block_mask is the bit mask of block-listed bytes
+ * set bits 0..6, 14..25, and 28..31
+ * 0xf3ffc07f = binary 11110011111111111100000001111111
+ */
+ let block_mask = 0xf3ffc07f;
+ let n;
+
+ /* Check for non-textual ("block-listed") bytes. */
+ for (n = 0; n <= 31; n++, block_mask >>>= 1) {
+ if ((block_mask & 1) && (s.dyn_ltree[n * 2]/*.Freq*/ !== 0)) {
+ return Z_BINARY;
+ }
+ }
+
+ /* Check for textual ("allow-listed") bytes. */
+ if (s.dyn_ltree[9 * 2]/*.Freq*/ !== 0 || s.dyn_ltree[10 * 2]/*.Freq*/ !== 0 ||
+ s.dyn_ltree[13 * 2]/*.Freq*/ !== 0) {
+ return Z_TEXT;
+ }
+ for (n = 32; n < LITERALS$1; n++) {
+ if (s.dyn_ltree[n * 2]/*.Freq*/ !== 0) {
+ return Z_TEXT;
+ }
+ }
+
+ /* There are no "block-listed" or "allow-listed" bytes:
+ * this stream either is empty or has tolerated ("gray-listed") bytes only.
+ */
+ return Z_BINARY;
+};
+
+
+let static_init_done = false;
+
+/* ===========================================================================
+ * Initialize the tree data structures for a new zlib stream.
+ */
+const _tr_init$1 = (s) =>
+{
+
+ if (!static_init_done) {
+ tr_static_init();
+ static_init_done = true;
+ }
+
+ s.l_desc = new TreeDesc(s.dyn_ltree, static_l_desc);
+ s.d_desc = new TreeDesc(s.dyn_dtree, static_d_desc);
+ s.bl_desc = new TreeDesc(s.bl_tree, static_bl_desc);
+
+ s.bi_buf = 0;
+ s.bi_valid = 0;
+
+ /* Initialize the first block of the first file: */
+ init_block(s);
+};
+
+
+/* ===========================================================================
+ * Send a stored block
+ */
+const _tr_stored_block$1 = (s, buf, stored_len, last) => {
+//DeflateState *s;
+//charf *buf; /* input block */
+//ulg stored_len; /* length of input block */
+//int last; /* one if this is the last block for a file */
+
+ send_bits(s, (STORED_BLOCK << 1) + (last ? 1 : 0), 3); /* send block type */
+ bi_windup(s); /* align on byte boundary */
+ put_short(s, stored_len);
+ put_short(s, ~stored_len);
+ if (stored_len) {
+ s.pending_buf.set(s.window.subarray(buf, buf + stored_len), s.pending);
+ }
+ s.pending += stored_len;
+};
+
+
+/* ===========================================================================
+ * Send one empty static block to give enough lookahead for inflate.
+ * This takes 10 bits, of which 7 may remain in the bit buffer.
+ */
+const _tr_align$1 = (s) => {
+ send_bits(s, STATIC_TREES << 1, 3);
+ send_code(s, END_BLOCK, static_ltree);
+ bi_flush(s);
+};
+
+
+/* ===========================================================================
+ * Determine the best encoding for the current block: dynamic trees, static
+ * trees or store, and write out the encoded block.
+ */
+const _tr_flush_block$1 = (s, buf, stored_len, last) => {
+//DeflateState *s;
+//charf *buf; /* input block, or NULL if too old */
+//ulg stored_len; /* length of input block */
+//int last; /* one if this is the last block for a file */
+
+ let opt_lenb, static_lenb; /* opt_len and static_len in bytes */
+ let max_blindex = 0; /* index of last bit length code of non zero freq */
+
+ /* Build the Huffman trees unless a stored block is forced */
+ if (s.level > 0) {
+
+ /* Check if the file is binary or text */
+ if (s.strm.data_type === Z_UNKNOWN$1) {
+ s.strm.data_type = detect_data_type(s);
+ }
+
+ /* Construct the literal and distance trees */
+ build_tree(s, s.l_desc);
+ // Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len,
+ // s->static_len));
+
+ build_tree(s, s.d_desc);
+ // Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len,
+ // s->static_len));
+ /* At this point, opt_len and static_len are the total bit lengths of
+ * the compressed block data, excluding the tree representations.
+ */
+
+ /* Build the bit length tree for the above two trees, and get the index
+ * in bl_order of the last bit length code to send.
+ */
+ max_blindex = build_bl_tree(s);
+
+ /* Determine the best encoding. Compute the block lengths in bytes. */
+ opt_lenb = (s.opt_len + 3 + 7) >>> 3;
+ static_lenb = (s.static_len + 3 + 7) >>> 3;
+
+ // Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ",
+ // opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len,
+ // s->sym_next / 3));
+
+ if (static_lenb <= opt_lenb) { opt_lenb = static_lenb; }
+
+ } else {
+ // Assert(buf != (char*)0, "lost buf");
+ opt_lenb = static_lenb = stored_len + 5; /* force a stored block */
+ }
+
+ if ((stored_len + 4 <= opt_lenb) && (buf !== -1)) {
+ /* 4: two words for the lengths */
+
+ /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE.
+ * Otherwise we can't have processed more than WSIZE input bytes since
+ * the last block flush, because compression would have been
+ * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to
+ * transform a block into a stored block.
+ */
+ _tr_stored_block$1(s, buf, stored_len, last);
+
+ } else if (s.strategy === Z_FIXED$1 || static_lenb === opt_lenb) {
+
+ send_bits(s, (STATIC_TREES << 1) + (last ? 1 : 0), 3);
+ compress_block(s, static_ltree, static_dtree);
+
+ } else {
+ send_bits(s, (DYN_TREES << 1) + (last ? 1 : 0), 3);
+ send_all_trees(s, s.l_desc.max_code + 1, s.d_desc.max_code + 1, max_blindex + 1);
+ compress_block(s, s.dyn_ltree, s.dyn_dtree);
+ }
+ // Assert (s->compressed_len == s->bits_sent, "bad compressed size");
+ /* The above check is made mod 2^32, for files larger than 512 MB
+ * and uLong implemented on 32 bits.
+ */
+ init_block(s);
+
+ if (last) {
+ bi_windup(s);
+ }
+ // Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3,
+ // s->compressed_len-7*last));
+};
+
+/* ===========================================================================
+ * Save the match info and tally the frequency counts. Return true if
+ * the current block must be flushed.
+ */
+const _tr_tally$1 = (s, dist, lc) => {
+// deflate_state *s;
+// unsigned dist; /* distance of matched string */
+// unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */
+
+ s.pending_buf[s.sym_buf + s.sym_next++] = dist;
+ s.pending_buf[s.sym_buf + s.sym_next++] = dist >> 8;
+ s.pending_buf[s.sym_buf + s.sym_next++] = lc;
+ if (dist === 0) {
+ /* lc is the unmatched char */
+ s.dyn_ltree[lc * 2]/*.Freq*/++;
+ } else {
+ s.matches++;
+ /* Here, lc is the match length - MIN_MATCH */
+ dist--; /* dist = match distance - 1 */
+ //Assert((ush)dist < (ush)MAX_DIST(s) &&
+ // (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) &&
+ // (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match");
+
+ s.dyn_ltree[(_length_code[lc] + LITERALS$1 + 1) * 2]/*.Freq*/++;
+ s.dyn_dtree[d_code(dist) * 2]/*.Freq*/++;
+ }
+
+ return (s.sym_next === s.sym_end);
+};
+
+trees._tr_init = _tr_init$1;
+trees._tr_stored_block = _tr_stored_block$1;
+trees._tr_flush_block = _tr_flush_block$1;
+trees._tr_tally = _tr_tally$1;
+trees._tr_align = _tr_align$1;
+
+// Note: adler32 takes 12% for level 0 and 2% for level 6.
+// It isn't worth it to make additional optimizations as in original.
+// Small size is preferable.
+
+// (C) 1995-2013 Jean-loup Gailly and Mark Adler
+// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+const adler32$1 = (adler, buf, len, pos) => {
+ let s1 = (adler & 0xffff) |0,
+ s2 = ((adler >>> 16) & 0xffff) |0,
+ n = 0;
+
+ while (len !== 0) {
+ // Set limit ~ twice less than 5552, to keep
+ // s2 in 31-bits, because we force signed ints.
+ // in other case %= will fail.
+ n = len > 2000 ? 2000 : len;
+ len -= n;
+
+ do {
+ s1 = (s1 + buf[pos++]) |0;
+ s2 = (s2 + s1) |0;
+ } while (--n);
+
+ s1 %= 65521;
+ s2 %= 65521;
+ }
+
+ return (s1 | (s2 << 16)) |0;
+};
+
+
+var adler32_1 = adler32$1;
+
+// Note: we can't get significant speed boost here.
+// So write code to minimize size - no pregenerated tables
+// and array tools dependencies.
+
+// (C) 1995-2013 Jean-loup Gailly and Mark Adler
+// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+// Use ordinary array, since untyped makes no boost here
+const makeTable = () => {
+ let c, table = [];
+
+ for (var n = 0; n < 256; n++) {
+ c = n;
+ for (var k = 0; k < 8; k++) {
+ c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1));
+ }
+ table[n] = c;
+ }
+
+ return table;
+};
+
+// Create table on load. Just 255 signed longs. Not a problem.
+const crcTable = new Uint32Array(makeTable());
+
+
+const crc32$1 = (crc, buf, len, pos) => {
+ const t = crcTable;
+ const end = pos + len;
+
+ crc ^= -1;
+
+ for (let i = pos; i < end; i++) {
+ crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF];
+ }
+
+ return (crc ^ (-1)); // >>> 0;
+};
+
+
+var crc32_1 = crc32$1;
+
+// (C) 1995-2013 Jean-loup Gailly and Mark Adler
+// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+var messages = {
+ 2: 'need dictionary', /* Z_NEED_DICT 2 */
+ 1: 'stream end', /* Z_STREAM_END 1 */
+ 0: '', /* Z_OK 0 */
+ '-1': 'file error', /* Z_ERRNO (-1) */
+ '-2': 'stream error', /* Z_STREAM_ERROR (-2) */
+ '-3': 'data error', /* Z_DATA_ERROR (-3) */
+ '-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */
+ '-5': 'buffer error', /* Z_BUF_ERROR (-5) */
+ '-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */
+};
+
+// (C) 1995-2013 Jean-loup Gailly and Mark Adler
+// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+var constants$2 = {
+
+ /* Allowed flush values; see deflate() and inflate() below for details */
+ Z_NO_FLUSH: 0,
+ Z_PARTIAL_FLUSH: 1,
+ Z_SYNC_FLUSH: 2,
+ Z_FULL_FLUSH: 3,
+ Z_FINISH: 4,
+ Z_BLOCK: 5,
+ Z_TREES: 6,
+
+ /* Return codes for the compression/decompression functions. Negative values
+ * are errors, positive values are used for special but normal events.
+ */
+ Z_OK: 0,
+ Z_STREAM_END: 1,
+ Z_NEED_DICT: 2,
+ Z_ERRNO: -1,
+ Z_STREAM_ERROR: -2,
+ Z_DATA_ERROR: -3,
+ Z_MEM_ERROR: -4,
+ Z_BUF_ERROR: -5,
+ //Z_VERSION_ERROR: -6,
+
+ /* compression levels */
+ Z_NO_COMPRESSION: 0,
+ Z_BEST_SPEED: 1,
+ Z_BEST_COMPRESSION: 9,
+ Z_DEFAULT_COMPRESSION: -1,
+
+
+ Z_FILTERED: 1,
+ Z_HUFFMAN_ONLY: 2,
+ Z_RLE: 3,
+ Z_FIXED: 4,
+ Z_DEFAULT_STRATEGY: 0,
+
+ /* Possible values of the data_type field (though see inflate()) */
+ Z_BINARY: 0,
+ Z_TEXT: 1,
+ //Z_ASCII: 1, // = Z_TEXT (deprecated)
+ Z_UNKNOWN: 2,
+
+ /* The deflate compression method */
+ Z_DEFLATED: 8
+ //Z_NULL: null // Use -1 or null inline, depending on var type
+};
+
+// (C) 1995-2013 Jean-loup Gailly and Mark Adler
+// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+const { _tr_init, _tr_stored_block, _tr_flush_block, _tr_tally, _tr_align } = trees;
+const adler32 = adler32_1;
+const crc32 = crc32_1;
+const msg$1 = messages;
+
+/* Public constants ==========================================================*/
+/* ===========================================================================*/
+
+const {
+ Z_NO_FLUSH: Z_NO_FLUSH$1, Z_PARTIAL_FLUSH, Z_FULL_FLUSH: Z_FULL_FLUSH$1, Z_FINISH: Z_FINISH$1, Z_BLOCK,
+ Z_OK: Z_OK$1, Z_STREAM_END: Z_STREAM_END$1, Z_STREAM_ERROR, Z_DATA_ERROR, Z_BUF_ERROR,
+ Z_DEFAULT_COMPRESSION: Z_DEFAULT_COMPRESSION$1,
+ Z_FILTERED, Z_HUFFMAN_ONLY, Z_RLE, Z_FIXED, Z_DEFAULT_STRATEGY: Z_DEFAULT_STRATEGY$1,
+ Z_UNKNOWN,
+ Z_DEFLATED: Z_DEFLATED$1
+} = constants$2;
+
+/*============================================================================*/
+
+
+const MAX_MEM_LEVEL = 9;
+/* Maximum value for memLevel in deflateInit2 */
+const MAX_WBITS = 15;
+/* 32K LZ77 window */
+const DEF_MEM_LEVEL = 8;
+
+
+const LENGTH_CODES = 29;
+/* number of length codes, not counting the special END_BLOCK code */
+const LITERALS = 256;
+/* number of literal bytes 0..255 */
+const L_CODES = LITERALS + 1 + LENGTH_CODES;
+/* number of Literal or Length codes, including the END_BLOCK code */
+const D_CODES = 30;
+/* number of distance codes */
+const BL_CODES = 19;
+/* number of codes used to transfer the bit lengths */
+const HEAP_SIZE = 2 * L_CODES + 1;
+/* maximum heap size */
+const MAX_BITS = 15;
+/* All codes must not exceed MAX_BITS bits */
+
+const MIN_MATCH = 3;
+const MAX_MATCH = 258;
+const MIN_LOOKAHEAD = (MAX_MATCH + MIN_MATCH + 1);
+
+const PRESET_DICT = 0x20;
+
+const INIT_STATE = 42; /* zlib header -> BUSY_STATE */
+//#ifdef GZIP
+const GZIP_STATE = 57; /* gzip header -> BUSY_STATE | EXTRA_STATE */
+//#endif
+const EXTRA_STATE = 69; /* gzip extra block -> NAME_STATE */
+const NAME_STATE = 73; /* gzip file name -> COMMENT_STATE */
+const COMMENT_STATE = 91; /* gzip comment -> HCRC_STATE */
+const HCRC_STATE = 103; /* gzip header CRC -> BUSY_STATE */
+const BUSY_STATE = 113; /* deflate -> FINISH_STATE */
+const FINISH_STATE = 666; /* stream complete */
+
+const BS_NEED_MORE = 1; /* block not completed, need more input or more output */
+const BS_BLOCK_DONE = 2; /* block flush performed */
+const BS_FINISH_STARTED = 3; /* finish started, need only more output at next deflate */
+const BS_FINISH_DONE = 4; /* finish done, accept no more input or output */
+
+const OS_CODE = 0x03; // Unix :) . Don't detect, use this default.
+
+const err = (strm, errorCode) => {
+ strm.msg = msg$1[errorCode];
+ return errorCode;
+};
+
+const rank = (f) => {
+ return ((f) * 2) - ((f) > 4 ? 9 : 0);
+};
+
+const zero = (buf) => {
+ let len = buf.length; while (--len >= 0) { buf[len] = 0; }
+};
+
+/* ===========================================================================
+ * Slide the hash table when sliding the window down (could be avoided with 32
+ * bit values at the expense of memory usage). We slide even when level == 0 to
+ * keep the hash table consistent if we switch back to level > 0 later.
+ */
+const slide_hash = (s) => {
+ let n, m;
+ let p;
+ let wsize = s.w_size;
+
+ n = s.hash_size;
+ p = n;
+ do {
+ m = s.head[--p];
+ s.head[p] = (m >= wsize ? m - wsize : 0);
+ } while (--n);
+ n = wsize;
+//#ifndef FASTEST
+ p = n;
+ do {
+ m = s.prev[--p];
+ s.prev[p] = (m >= wsize ? m - wsize : 0);
+ /* If n is not on any hash chain, prev[n] is garbage but
+ * its value will never be used.
+ */
+ } while (--n);
+//#endif
+};
+
+/* eslint-disable new-cap */
+let HASH_ZLIB = (s, prev, data) => ((prev << s.hash_shift) ^ data) & s.hash_mask;
+// This hash causes less collisions, https://github.com/nodeca/pako/issues/135
+// But breaks binary compatibility
+//let HASH_FAST = (s, prev, data) => ((prev << 8) + (prev >> 8) + (data << 4)) & s.hash_mask;
+let HASH = HASH_ZLIB;
+
+
+/* =========================================================================
+ * Flush as much pending output as possible. All deflate() output, except for
+ * some deflate_stored() output, goes through this function so some
+ * applications may wish to modify it to avoid allocating a large
+ * strm->next_out buffer and copying into it. (See also read_buf()).
+ */
+const flush_pending = (strm) => {
+ const s = strm.state;
+
+ //_tr_flush_bits(s);
+ let len = s.pending;
+ if (len > strm.avail_out) {
+ len = strm.avail_out;
+ }
+ if (len === 0) { return; }
+
+ strm.output.set(s.pending_buf.subarray(s.pending_out, s.pending_out + len), strm.next_out);
+ strm.next_out += len;
+ s.pending_out += len;
+ strm.total_out += len;
+ strm.avail_out -= len;
+ s.pending -= len;
+ if (s.pending === 0) {
+ s.pending_out = 0;
+ }
+};
+
+
+const flush_block_only = (s, last) => {
+ _tr_flush_block(s, (s.block_start >= 0 ? s.block_start : -1), s.strstart - s.block_start, last);
+ s.block_start = s.strstart;
+ flush_pending(s.strm);
+};
+
+
+const put_byte = (s, b) => {
+ s.pending_buf[s.pending++] = b;
+};
+
+
+/* =========================================================================
+ * Put a short in the pending buffer. The 16-bit value is put in MSB order.
+ * IN assertion: the stream state is correct and there is enough room in
+ * pending_buf.
+ */
+const putShortMSB = (s, b) => {
+
+ // put_byte(s, (Byte)(b >> 8));
+// put_byte(s, (Byte)(b & 0xff));
+ s.pending_buf[s.pending++] = (b >>> 8) & 0xff;
+ s.pending_buf[s.pending++] = b & 0xff;
+};
+
+
+/* ===========================================================================
+ * Read a new buffer from the current input stream, update the adler32
+ * and total number of bytes read. All deflate() input goes through
+ * this function so some applications may wish to modify it to avoid
+ * allocating a large strm->input buffer and copying from it.
+ * (See also flush_pending()).
+ */
+const read_buf = (strm, buf, start, size) => {
+
+ let len = strm.avail_in;
+
+ if (len > size) { len = size; }
+ if (len === 0) { return 0; }
+
+ strm.avail_in -= len;
+
+ // zmemcpy(buf, strm->next_in, len);
+ buf.set(strm.input.subarray(strm.next_in, strm.next_in + len), start);
+ if (strm.state.wrap === 1) {
+ strm.adler = adler32(strm.adler, buf, len, start);
+ }
+
+ else if (strm.state.wrap === 2) {
+ strm.adler = crc32(strm.adler, buf, len, start);
+ }
+
+ strm.next_in += len;
+ strm.total_in += len;
+
+ return len;
+};
+
+
+/* ===========================================================================
+ * Set match_start to the longest match starting at the given string and
+ * return its length. Matches shorter or equal to prev_length are discarded,
+ * in which case the result is equal to prev_length and match_start is
+ * garbage.
+ * IN assertions: cur_match is the head of the hash chain for the current
+ * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1
+ * OUT assertion: the match length is not greater than s->lookahead.
+ */
+const longest_match = (s, cur_match) => {
+
+ let chain_length = s.max_chain_length; /* max hash chain length */
+ let scan = s.strstart; /* current string */
+ let match; /* matched string */
+ let len; /* length of current match */
+ let best_len = s.prev_length; /* best match length so far */
+ let nice_match = s.nice_match; /* stop if match long enough */
+ const limit = (s.strstart > (s.w_size - MIN_LOOKAHEAD)) ?
+ s.strstart - (s.w_size - MIN_LOOKAHEAD) : 0/*NIL*/;
+
+ const _win = s.window; // shortcut
+
+ const wmask = s.w_mask;
+ const prev = s.prev;
+
+ /* Stop when cur_match becomes <= limit. To simplify the code,
+ * we prevent matches with the string of window index 0.
+ */
+
+ const strend = s.strstart + MAX_MATCH;
+ let scan_end1 = _win[scan + best_len - 1];
+ let scan_end = _win[scan + best_len];
+
+ /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16.
+ * It is easy to get rid of this optimization if necessary.
+ */
+ // Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever");
+
+ /* Do not waste too much time if we already have a good match: */
+ if (s.prev_length >= s.good_match) {
+ chain_length >>= 2;
+ }
+ /* Do not look for matches beyond the end of the input. This is necessary
+ * to make deflate deterministic.
+ */
+ if (nice_match > s.lookahead) { nice_match = s.lookahead; }
+
+ // Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead");
+
+ do {
+ // Assert(cur_match < s->strstart, "no future");
+ match = cur_match;
+
+ /* Skip to next match if the match length cannot increase
+ * or if the match length is less than 2. Note that the checks below
+ * for insufficient lookahead only occur occasionally for performance
+ * reasons. Therefore uninitialized memory will be accessed, and
+ * conditional jumps will be made that depend on those values.
+ * However the length of the match is limited to the lookahead, so
+ * the output of deflate is not affected by the uninitialized values.
+ */
+
+ if (_win[match + best_len] !== scan_end ||
+ _win[match + best_len - 1] !== scan_end1 ||
+ _win[match] !== _win[scan] ||
+ _win[++match] !== _win[scan + 1]) {
+ continue;
+ }
+
+ /* The check at best_len-1 can be removed because it will be made
+ * again later. (This heuristic is not always a win.)
+ * It is not necessary to compare scan[2] and match[2] since they
+ * are always equal when the other bytes match, given that
+ * the hash keys are equal and that HASH_BITS >= 8.
+ */
+ scan += 2;
+ match++;
+ // Assert(*scan == *match, "match[2]?");
+
+ /* We check for insufficient lookahead only every 8th comparison;
+ * the 256th check will be made at strstart+258.
+ */
+ do {
+ /*jshint noempty:false*/
+ } while (_win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&
+ _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&
+ _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&
+ _win[++scan] === _win[++match] && _win[++scan] === _win[++match] &&
+ scan < strend);
+
+ // Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan");
+
+ len = MAX_MATCH - (strend - scan);
+ scan = strend - MAX_MATCH;
+
+ if (len > best_len) {
+ s.match_start = cur_match;
+ best_len = len;
+ if (len >= nice_match) {
+ break;
+ }
+ scan_end1 = _win[scan + best_len - 1];
+ scan_end = _win[scan + best_len];
+ }
+ } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length !== 0);
+
+ if (best_len <= s.lookahead) {
+ return best_len;
+ }
+ return s.lookahead;
+};
+
+
+/* ===========================================================================
+ * Fill the window when the lookahead becomes insufficient.
+ * Updates strstart and lookahead.
+ *
+ * IN assertion: lookahead < MIN_LOOKAHEAD
+ * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD
+ * At least one byte has been read, or avail_in == 0; reads are
+ * performed for at least two bytes (required for the zip translate_eol
+ * option -- not supported here).
+ */
+const fill_window = (s) => {
+
+ const _w_size = s.w_size;
+ let n, more, str;
+
+ //Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead");
+
+ do {
+ more = s.window_size - s.lookahead - s.strstart;
+
+ // JS ints have 32 bit, block below not needed
+ /* Deal with !@#$% 64K limit: */
+ //if (sizeof(int) <= 2) {
+ // if (more == 0 && s->strstart == 0 && s->lookahead == 0) {
+ // more = wsize;
+ //
+ // } else if (more == (unsigned)(-1)) {
+ // /* Very unlikely, but possible on 16 bit machine if
+ // * strstart == 0 && lookahead == 1 (input done a byte at time)
+ // */
+ // more--;
+ // }
+ //}
+
+
+ /* If the window is almost full and there is insufficient lookahead,
+ * move the upper half to the lower one to make room in the upper half.
+ */
+ if (s.strstart >= _w_size + (_w_size - MIN_LOOKAHEAD)) {
+
+ s.window.set(s.window.subarray(_w_size, _w_size + _w_size - more), 0);
+ s.match_start -= _w_size;
+ s.strstart -= _w_size;
+ /* we now have strstart >= MAX_DIST */
+ s.block_start -= _w_size;
+ if (s.insert > s.strstart) {
+ s.insert = s.strstart;
+ }
+ slide_hash(s);
+ more += _w_size;
+ }
+ if (s.strm.avail_in === 0) {
+ break;
+ }
+
+ /* If there was no sliding:
+ * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 &&
+ * more == window_size - lookahead - strstart
+ * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1)
+ * => more >= window_size - 2*WSIZE + 2
+ * In the BIG_MEM or MMAP case (not yet supported),
+ * window_size == input_size + MIN_LOOKAHEAD &&
+ * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD.
+ * Otherwise, window_size == 2*WSIZE so more >= 2.
+ * If there was sliding, more >= WSIZE. So in all cases, more >= 2.
+ */
+ //Assert(more >= 2, "more < 2");
+ n = read_buf(s.strm, s.window, s.strstart + s.lookahead, more);
+ s.lookahead += n;
+
+ /* Initialize the hash value now that we have some input: */
+ if (s.lookahead + s.insert >= MIN_MATCH) {
+ str = s.strstart - s.insert;
+ s.ins_h = s.window[str];
+
+ /* UPDATE_HASH(s, s->ins_h, s->window[str + 1]); */
+ s.ins_h = HASH(s, s.ins_h, s.window[str + 1]);
+//#if MIN_MATCH != 3
+// Call update_hash() MIN_MATCH-3 more times
+//#endif
+ while (s.insert) {
+ /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */
+ s.ins_h = HASH(s, s.ins_h, s.window[str + MIN_MATCH - 1]);
+
+ s.prev[str & s.w_mask] = s.head[s.ins_h];
+ s.head[s.ins_h] = str;
+ str++;
+ s.insert--;
+ if (s.lookahead + s.insert < MIN_MATCH) {
+ break;
+ }
+ }
+ }
+ /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage,
+ * but this is not important since only literal bytes will be emitted.
+ */
+
+ } while (s.lookahead < MIN_LOOKAHEAD && s.strm.avail_in !== 0);
+
+ /* If the WIN_INIT bytes after the end of the current data have never been
+ * written, then zero those bytes in order to avoid memory check reports of
+ * the use of uninitialized (or uninitialised as Julian writes) bytes by
+ * the longest match routines. Update the high water mark for the next
+ * time through here. WIN_INIT is set to MAX_MATCH since the longest match
+ * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead.
+ */
+// if (s.high_water < s.window_size) {
+// const curr = s.strstart + s.lookahead;
+// let init = 0;
+//
+// if (s.high_water < curr) {
+// /* Previous high water mark below current data -- zero WIN_INIT
+// * bytes or up to end of window, whichever is less.
+// */
+// init = s.window_size - curr;
+// if (init > WIN_INIT)
+// init = WIN_INIT;
+// zmemzero(s->window + curr, (unsigned)init);
+// s->high_water = curr + init;
+// }
+// else if (s->high_water < (ulg)curr + WIN_INIT) {
+// /* High water mark at or above current data, but below current data
+// * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up
+// * to end of window, whichever is less.
+// */
+// init = (ulg)curr + WIN_INIT - s->high_water;
+// if (init > s->window_size - s->high_water)
+// init = s->window_size - s->high_water;
+// zmemzero(s->window + s->high_water, (unsigned)init);
+// s->high_water += init;
+// }
+// }
+//
+// Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD,
+// "not enough room for search");
+};
+
+/* ===========================================================================
+ * Copy without compression as much as possible from the input stream, return
+ * the current block state.
+ *
+ * In case deflateParams() is used to later switch to a non-zero compression
+ * level, s->matches (otherwise unused when storing) keeps track of the number
+ * of hash table slides to perform. If s->matches is 1, then one hash table
+ * slide will be done when switching. If s->matches is 2, the maximum value
+ * allowed here, then the hash table will be cleared, since two or more slides
+ * is the same as a clear.
+ *
+ * deflate_stored() is written to minimize the number of times an input byte is
+ * copied. It is most efficient with large input and output buffers, which
+ * maximizes the opportunites to have a single copy from next_in to next_out.
+ */
+const deflate_stored = (s, flush) => {
+
+ /* Smallest worthy block size when not flushing or finishing. By default
+ * this is 32K. This can be as small as 507 bytes for memLevel == 1. For
+ * large input and output buffers, the stored block size will be larger.
+ */
+ let min_block = s.pending_buf_size - 5 > s.w_size ? s.w_size : s.pending_buf_size - 5;
+
+ /* Copy as many min_block or larger stored blocks directly to next_out as
+ * possible. If flushing, copy the remaining available input to next_out as
+ * stored blocks, if there is enough space.
+ */
+ let len, left, have, last = 0;
+ let used = s.strm.avail_in;
+ do {
+ /* Set len to the maximum size block that we can copy directly with the
+ * available input data and output space. Set left to how much of that
+ * would be copied from what's left in the window.
+ */
+ len = 65535/* MAX_STORED */; /* maximum deflate stored block length */
+ have = (s.bi_valid + 42) >> 3; /* number of header bytes */
+ if (s.strm.avail_out < have) { /* need room for header */
+ break;
+ }
+ /* maximum stored block length that will fit in avail_out: */
+ have = s.strm.avail_out - have;
+ left = s.strstart - s.block_start; /* bytes left in window */
+ if (len > left + s.strm.avail_in) {
+ len = left + s.strm.avail_in; /* limit len to the input */
+ }
+ if (len > have) {
+ len = have; /* limit len to the output */
+ }
+
+ /* If the stored block would be less than min_block in length, or if
+ * unable to copy all of the available input when flushing, then try
+ * copying to the window and the pending buffer instead. Also don't
+ * write an empty block when flushing -- deflate() does that.
+ */
+ if (len < min_block && ((len === 0 && flush !== Z_FINISH$1) ||
+ flush === Z_NO_FLUSH$1 ||
+ len !== left + s.strm.avail_in)) {
+ break;
+ }
+
+ /* Make a dummy stored block in pending to get the header bytes,
+ * including any pending bits. This also updates the debugging counts.
+ */
+ last = flush === Z_FINISH$1 && len === left + s.strm.avail_in ? 1 : 0;
+ _tr_stored_block(s, 0, 0, last);
+
+ /* Replace the lengths in the dummy stored block with len. */
+ s.pending_buf[s.pending - 4] = len;
+ s.pending_buf[s.pending - 3] = len >> 8;
+ s.pending_buf[s.pending - 2] = ~len;
+ s.pending_buf[s.pending - 1] = ~len >> 8;
+
+ /* Write the stored block header bytes. */
+ flush_pending(s.strm);
+
+//#ifdef ZLIB_DEBUG
+// /* Update debugging counts for the data about to be copied. */
+// s->compressed_len += len << 3;
+// s->bits_sent += len << 3;
+//#endif
+
+ /* Copy uncompressed bytes from the window to next_out. */
+ if (left) {
+ if (left > len) {
+ left = len;
+ }
+ //zmemcpy(s->strm->next_out, s->window + s->block_start, left);
+ s.strm.output.set(s.window.subarray(s.block_start, s.block_start + left), s.strm.next_out);
+ s.strm.next_out += left;
+ s.strm.avail_out -= left;
+ s.strm.total_out += left;
+ s.block_start += left;
+ len -= left;
+ }
+
+ /* Copy uncompressed bytes directly from next_in to next_out, updating
+ * the check value.
+ */
+ if (len) {
+ read_buf(s.strm, s.strm.output, s.strm.next_out, len);
+ s.strm.next_out += len;
+ s.strm.avail_out -= len;
+ s.strm.total_out += len;
+ }
+ } while (last === 0);
+
+ /* Update the sliding window with the last s->w_size bytes of the copied
+ * data, or append all of the copied data to the existing window if less
+ * than s->w_size bytes were copied. Also update the number of bytes to
+ * insert in the hash tables, in the event that deflateParams() switches to
+ * a non-zero compression level.
+ */
+ used -= s.strm.avail_in; /* number of input bytes directly copied */
+ if (used) {
+ /* If any input was used, then no unused input remains in the window,
+ * therefore s->block_start == s->strstart.
+ */
+ if (used >= s.w_size) { /* supplant the previous history */
+ s.matches = 2; /* clear hash */
+ //zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size);
+ s.window.set(s.strm.input.subarray(s.strm.next_in - s.w_size, s.strm.next_in), 0);
+ s.strstart = s.w_size;
+ s.insert = s.strstart;
+ }
+ else {
+ if (s.window_size - s.strstart <= used) {
+ /* Slide the window down. */
+ s.strstart -= s.w_size;
+ //zmemcpy(s->window, s->window + s->w_size, s->strstart);
+ s.window.set(s.window.subarray(s.w_size, s.w_size + s.strstart), 0);
+ if (s.matches < 2) {
+ s.matches++; /* add a pending slide_hash() */
+ }
+ if (s.insert > s.strstart) {
+ s.insert = s.strstart;
+ }
+ }
+ //zmemcpy(s->window + s->strstart, s->strm->next_in - used, used);
+ s.window.set(s.strm.input.subarray(s.strm.next_in - used, s.strm.next_in), s.strstart);
+ s.strstart += used;
+ s.insert += used > s.w_size - s.insert ? s.w_size - s.insert : used;
+ }
+ s.block_start = s.strstart;
+ }
+ if (s.high_water < s.strstart) {
+ s.high_water = s.strstart;
+ }
+
+ /* If the last block was written to next_out, then done. */
+ if (last) {
+ return BS_FINISH_DONE;
+ }
+
+ /* If flushing and all input has been consumed, then done. */
+ if (flush !== Z_NO_FLUSH$1 && flush !== Z_FINISH$1 &&
+ s.strm.avail_in === 0 && s.strstart === s.block_start) {
+ return BS_BLOCK_DONE;
+ }
+
+ /* Fill the window with any remaining input. */
+ have = s.window_size - s.strstart;
+ if (s.strm.avail_in > have && s.block_start >= s.w_size) {
+ /* Slide the window down. */
+ s.block_start -= s.w_size;
+ s.strstart -= s.w_size;
+ //zmemcpy(s->window, s->window + s->w_size, s->strstart);
+ s.window.set(s.window.subarray(s.w_size, s.w_size + s.strstart), 0);
+ if (s.matches < 2) {
+ s.matches++; /* add a pending slide_hash() */
+ }
+ have += s.w_size; /* more space now */
+ if (s.insert > s.strstart) {
+ s.insert = s.strstart;
+ }
+ }
+ if (have > s.strm.avail_in) {
+ have = s.strm.avail_in;
+ }
+ if (have) {
+ read_buf(s.strm, s.window, s.strstart, have);
+ s.strstart += have;
+ s.insert += have > s.w_size - s.insert ? s.w_size - s.insert : have;
+ }
+ if (s.high_water < s.strstart) {
+ s.high_water = s.strstart;
+ }
+
+ /* There was not enough avail_out to write a complete worthy or flushed
+ * stored block to next_out. Write a stored block to pending instead, if we
+ * have enough input for a worthy block, or if flushing and there is enough
+ * room for the remaining input as a stored block in the pending buffer.
+ */
+ have = (s.bi_valid + 42) >> 3; /* number of header bytes */
+ /* maximum stored block length that will fit in pending: */
+ have = s.pending_buf_size - have > 65535/* MAX_STORED */ ? 65535/* MAX_STORED */ : s.pending_buf_size - have;
+ min_block = have > s.w_size ? s.w_size : have;
+ left = s.strstart - s.block_start;
+ if (left >= min_block ||
+ ((left || flush === Z_FINISH$1) && flush !== Z_NO_FLUSH$1 &&
+ s.strm.avail_in === 0 && left <= have)) {
+ len = left > have ? have : left;
+ last = flush === Z_FINISH$1 && s.strm.avail_in === 0 &&
+ len === left ? 1 : 0;
+ _tr_stored_block(s, s.block_start, len, last);
+ s.block_start += len;
+ flush_pending(s.strm);
+ }
+
+ /* We've done all we can with the available input and output. */
+ return last ? BS_FINISH_STARTED : BS_NEED_MORE;
+};
+
+
+/* ===========================================================================
+ * Compress as much as possible from the input stream, return the current
+ * block state.
+ * This function does not perform lazy evaluation of matches and inserts
+ * new strings in the dictionary only for unmatched strings or for short
+ * matches. It is used only for the fast compression options.
+ */
+const deflate_fast = (s, flush) => {
+
+ let hash_head; /* head of the hash chain */
+ let bflush; /* set if current block must be flushed */
+
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s.lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH$1) {
+ return BS_NEED_MORE;
+ }
+ if (s.lookahead === 0) {
+ break; /* flush the current block */
+ }
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ hash_head = 0/*NIL*/;
+ if (s.lookahead >= MIN_MATCH) {
+ /*** INSERT_STRING(s, s.strstart, hash_head); ***/
+ s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH - 1]);
+ hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];
+ s.head[s.ins_h] = s.strstart;
+ /***/
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ * At this point we have always match_length < MIN_MATCH
+ */
+ if (hash_head !== 0/*NIL*/ && ((s.strstart - hash_head) <= (s.w_size - MIN_LOOKAHEAD))) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ s.match_length = longest_match(s, hash_head);
+ /* longest_match() sets match_start */
+ }
+ if (s.match_length >= MIN_MATCH) {
+ // check_match(s, s.strstart, s.match_start, s.match_length); // for debug only
+
+ /*** _tr_tally_dist(s, s.strstart - s.match_start,
+ s.match_length - MIN_MATCH, bflush); ***/
+ bflush = _tr_tally(s, s.strstart - s.match_start, s.match_length - MIN_MATCH);
+
+ s.lookahead -= s.match_length;
+
+ /* Insert new strings in the hash table only if the match length
+ * is not too large. This saves time but degrades compression.
+ */
+ if (s.match_length <= s.max_lazy_match/*max_insert_length*/ && s.lookahead >= MIN_MATCH) {
+ s.match_length--; /* string at strstart already in table */
+ do {
+ s.strstart++;
+ /*** INSERT_STRING(s, s.strstart, hash_head); ***/
+ s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH - 1]);
+ hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];
+ s.head[s.ins_h] = s.strstart;
+ /***/
+ /* strstart never exceeds WSIZE-MAX_MATCH, so there are
+ * always MIN_MATCH bytes ahead.
+ */
+ } while (--s.match_length !== 0);
+ s.strstart++;
+ } else
+ {
+ s.strstart += s.match_length;
+ s.match_length = 0;
+ s.ins_h = s.window[s.strstart];
+ /* UPDATE_HASH(s, s.ins_h, s.window[s.strstart+1]); */
+ s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + 1]);
+
+//#if MIN_MATCH != 3
+// Call UPDATE_HASH() MIN_MATCH-3 more times
+//#endif
+ /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not
+ * matter since it will be recomputed at next deflate call.
+ */
+ }
+ } else {
+ /* No match, output a literal byte */
+ //Tracevv((stderr,"%c", s.window[s.strstart]));
+ /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/
+ bflush = _tr_tally(s, 0, s.window[s.strstart]);
+
+ s.lookahead--;
+ s.strstart++;
+ }
+ if (bflush) {
+ /*** FLUSH_BLOCK(s, 0); ***/
+ flush_block_only(s, false);
+ if (s.strm.avail_out === 0) {
+ return BS_NEED_MORE;
+ }
+ /***/
+ }
+ }
+ s.insert = ((s.strstart < (MIN_MATCH - 1)) ? s.strstart : MIN_MATCH - 1);
+ if (flush === Z_FINISH$1) {
+ /*** FLUSH_BLOCK(s, 1); ***/
+ flush_block_only(s, true);
+ if (s.strm.avail_out === 0) {
+ return BS_FINISH_STARTED;
+ }
+ /***/
+ return BS_FINISH_DONE;
+ }
+ if (s.sym_next) {
+ /*** FLUSH_BLOCK(s, 0); ***/
+ flush_block_only(s, false);
+ if (s.strm.avail_out === 0) {
+ return BS_NEED_MORE;
+ }
+ /***/
+ }
+ return BS_BLOCK_DONE;
+};
+
+/* ===========================================================================
+ * Same as above, but achieves better compression. We use a lazy
+ * evaluation for matches: a match is finally adopted only if there is
+ * no better match at the next window position.
+ */
+const deflate_slow = (s, flush) => {
+
+ let hash_head; /* head of hash chain */
+ let bflush; /* set if current block must be flushed */
+
+ let max_insert;
+
+ /* Process the input block. */
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the next match, plus MIN_MATCH bytes to insert the
+ * string following the next match.
+ */
+ if (s.lookahead < MIN_LOOKAHEAD) {
+ fill_window(s);
+ if (s.lookahead < MIN_LOOKAHEAD && flush === Z_NO_FLUSH$1) {
+ return BS_NEED_MORE;
+ }
+ if (s.lookahead === 0) { break; } /* flush the current block */
+ }
+
+ /* Insert the string window[strstart .. strstart+2] in the
+ * dictionary, and set hash_head to the head of the hash chain:
+ */
+ hash_head = 0/*NIL*/;
+ if (s.lookahead >= MIN_MATCH) {
+ /*** INSERT_STRING(s, s.strstart, hash_head); ***/
+ s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH - 1]);
+ hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];
+ s.head[s.ins_h] = s.strstart;
+ /***/
+ }
+
+ /* Find the longest match, discarding those <= prev_length.
+ */
+ s.prev_length = s.match_length;
+ s.prev_match = s.match_start;
+ s.match_length = MIN_MATCH - 1;
+
+ if (hash_head !== 0/*NIL*/ && s.prev_length < s.max_lazy_match &&
+ s.strstart - hash_head <= (s.w_size - MIN_LOOKAHEAD)/*MAX_DIST(s)*/) {
+ /* To simplify the code, we prevent matches with the string
+ * of window index 0 (in particular we have to avoid a match
+ * of the string with itself at the start of the input file).
+ */
+ s.match_length = longest_match(s, hash_head);
+ /* longest_match() sets match_start */
+
+ if (s.match_length <= 5 &&
+ (s.strategy === Z_FILTERED || (s.match_length === MIN_MATCH && s.strstart - s.match_start > 4096/*TOO_FAR*/))) {
+
+ /* If prev_match is also MIN_MATCH, match_start is garbage
+ * but we will ignore the current match anyway.
+ */
+ s.match_length = MIN_MATCH - 1;
+ }
+ }
+ /* If there was a match at the previous step and the current
+ * match is not better, output the previous match:
+ */
+ if (s.prev_length >= MIN_MATCH && s.match_length <= s.prev_length) {
+ max_insert = s.strstart + s.lookahead - MIN_MATCH;
+ /* Do not insert strings in hash table beyond this. */
+
+ //check_match(s, s.strstart-1, s.prev_match, s.prev_length);
+
+ /***_tr_tally_dist(s, s.strstart - 1 - s.prev_match,
+ s.prev_length - MIN_MATCH, bflush);***/
+ bflush = _tr_tally(s, s.strstart - 1 - s.prev_match, s.prev_length - MIN_MATCH);
+ /* Insert in hash table all strings up to the end of the match.
+ * strstart-1 and strstart are already inserted. If there is not
+ * enough lookahead, the last two strings are not inserted in
+ * the hash table.
+ */
+ s.lookahead -= s.prev_length - 1;
+ s.prev_length -= 2;
+ do {
+ if (++s.strstart <= max_insert) {
+ /*** INSERT_STRING(s, s.strstart, hash_head); ***/
+ s.ins_h = HASH(s, s.ins_h, s.window[s.strstart + MIN_MATCH - 1]);
+ hash_head = s.prev[s.strstart & s.w_mask] = s.head[s.ins_h];
+ s.head[s.ins_h] = s.strstart;
+ /***/
+ }
+ } while (--s.prev_length !== 0);
+ s.match_available = 0;
+ s.match_length = MIN_MATCH - 1;
+ s.strstart++;
+
+ if (bflush) {
+ /*** FLUSH_BLOCK(s, 0); ***/
+ flush_block_only(s, false);
+ if (s.strm.avail_out === 0) {
+ return BS_NEED_MORE;
+ }
+ /***/
+ }
+
+ } else if (s.match_available) {
+ /* If there was no match at the previous position, output a
+ * single literal. If there was a match but the current match
+ * is longer, truncate the previous match to a single literal.
+ */
+ //Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/
+ bflush = _tr_tally(s, 0, s.window[s.strstart - 1]);
+
+ if (bflush) {
+ /*** FLUSH_BLOCK_ONLY(s, 0) ***/
+ flush_block_only(s, false);
+ /***/
+ }
+ s.strstart++;
+ s.lookahead--;
+ if (s.strm.avail_out === 0) {
+ return BS_NEED_MORE;
+ }
+ } else {
+ /* There is no previous match to compare with, wait for
+ * the next step to decide.
+ */
+ s.match_available = 1;
+ s.strstart++;
+ s.lookahead--;
+ }
+ }
+ //Assert (flush != Z_NO_FLUSH, "no flush?");
+ if (s.match_available) {
+ //Tracevv((stderr,"%c", s->window[s->strstart-1]));
+ /*** _tr_tally_lit(s, s.window[s.strstart-1], bflush); ***/
+ bflush = _tr_tally(s, 0, s.window[s.strstart - 1]);
+
+ s.match_available = 0;
+ }
+ s.insert = s.strstart < MIN_MATCH - 1 ? s.strstart : MIN_MATCH - 1;
+ if (flush === Z_FINISH$1) {
+ /*** FLUSH_BLOCK(s, 1); ***/
+ flush_block_only(s, true);
+ if (s.strm.avail_out === 0) {
+ return BS_FINISH_STARTED;
+ }
+ /***/
+ return BS_FINISH_DONE;
+ }
+ if (s.sym_next) {
+ /*** FLUSH_BLOCK(s, 0); ***/
+ flush_block_only(s, false);
+ if (s.strm.avail_out === 0) {
+ return BS_NEED_MORE;
+ }
+ /***/
+ }
+
+ return BS_BLOCK_DONE;
+};
+
+
+/* ===========================================================================
+ * For Z_RLE, simply look for runs of bytes, generate matches only of distance
+ * one. Do not maintain a hash table. (It will be regenerated if this run of
+ * deflate switches away from Z_RLE.)
+ */
+const deflate_rle = (s, flush) => {
+
+ let bflush; /* set if current block must be flushed */
+ let prev; /* byte at distance one to match */
+ let scan, strend; /* scan goes up to strend for length of run */
+
+ const _win = s.window;
+
+ for (;;) {
+ /* Make sure that we always have enough lookahead, except
+ * at the end of the input file. We need MAX_MATCH bytes
+ * for the longest run, plus one for the unrolled loop.
+ */
+ if (s.lookahead <= MAX_MATCH) {
+ fill_window(s);
+ if (s.lookahead <= MAX_MATCH && flush === Z_NO_FLUSH$1) {
+ return BS_NEED_MORE;
+ }
+ if (s.lookahead === 0) { break; } /* flush the current block */
+ }
+
+ /* See how many times the previous byte repeats */
+ s.match_length = 0;
+ if (s.lookahead >= MIN_MATCH && s.strstart > 0) {
+ scan = s.strstart - 1;
+ prev = _win[scan];
+ if (prev === _win[++scan] && prev === _win[++scan] && prev === _win[++scan]) {
+ strend = s.strstart + MAX_MATCH;
+ do {
+ /*jshint noempty:false*/
+ } while (prev === _win[++scan] && prev === _win[++scan] &&
+ prev === _win[++scan] && prev === _win[++scan] &&
+ prev === _win[++scan] && prev === _win[++scan] &&
+ prev === _win[++scan] && prev === _win[++scan] &&
+ scan < strend);
+ s.match_length = MAX_MATCH - (strend - scan);
+ if (s.match_length > s.lookahead) {
+ s.match_length = s.lookahead;
+ }
+ }
+ //Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan");
+ }
+
+ /* Emit match if have run of MIN_MATCH or longer, else emit literal */
+ if (s.match_length >= MIN_MATCH) {
+ //check_match(s, s.strstart, s.strstart - 1, s.match_length);
+
+ /*** _tr_tally_dist(s, 1, s.match_length - MIN_MATCH, bflush); ***/
+ bflush = _tr_tally(s, 1, s.match_length - MIN_MATCH);
+
+ s.lookahead -= s.match_length;
+ s.strstart += s.match_length;
+ s.match_length = 0;
+ } else {
+ /* No match, output a literal byte */
+ //Tracevv((stderr,"%c", s->window[s->strstart]));
+ /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/
+ bflush = _tr_tally(s, 0, s.window[s.strstart]);
+
+ s.lookahead--;
+ s.strstart++;
+ }
+ if (bflush) {
+ /*** FLUSH_BLOCK(s, 0); ***/
+ flush_block_only(s, false);
+ if (s.strm.avail_out === 0) {
+ return BS_NEED_MORE;
+ }
+ /***/
+ }
+ }
+ s.insert = 0;
+ if (flush === Z_FINISH$1) {
+ /*** FLUSH_BLOCK(s, 1); ***/
+ flush_block_only(s, true);
+ if (s.strm.avail_out === 0) {
+ return BS_FINISH_STARTED;
+ }
+ /***/
+ return BS_FINISH_DONE;
+ }
+ if (s.sym_next) {
+ /*** FLUSH_BLOCK(s, 0); ***/
+ flush_block_only(s, false);
+ if (s.strm.avail_out === 0) {
+ return BS_NEED_MORE;
+ }
+ /***/
+ }
+ return BS_BLOCK_DONE;
+};
+
+/* ===========================================================================
+ * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table.
+ * (It will be regenerated if this run of deflate switches away from Huffman.)
+ */
+const deflate_huff = (s, flush) => {
+
+ let bflush; /* set if current block must be flushed */
+
+ for (;;) {
+ /* Make sure that we have a literal to write. */
+ if (s.lookahead === 0) {
+ fill_window(s);
+ if (s.lookahead === 0) {
+ if (flush === Z_NO_FLUSH$1) {
+ return BS_NEED_MORE;
+ }
+ break; /* flush the current block */
+ }
+ }
+
+ /* Output a literal byte */
+ s.match_length = 0;
+ //Tracevv((stderr,"%c", s->window[s->strstart]));
+ /*** _tr_tally_lit(s, s.window[s.strstart], bflush); ***/
+ bflush = _tr_tally(s, 0, s.window[s.strstart]);
+ s.lookahead--;
+ s.strstart++;
+ if (bflush) {
+ /*** FLUSH_BLOCK(s, 0); ***/
+ flush_block_only(s, false);
+ if (s.strm.avail_out === 0) {
+ return BS_NEED_MORE;
+ }
+ /***/
+ }
+ }
+ s.insert = 0;
+ if (flush === Z_FINISH$1) {
+ /*** FLUSH_BLOCK(s, 1); ***/
+ flush_block_only(s, true);
+ if (s.strm.avail_out === 0) {
+ return BS_FINISH_STARTED;
+ }
+ /***/
+ return BS_FINISH_DONE;
+ }
+ if (s.sym_next) {
+ /*** FLUSH_BLOCK(s, 0); ***/
+ flush_block_only(s, false);
+ if (s.strm.avail_out === 0) {
+ return BS_NEED_MORE;
+ }
+ /***/
+ }
+ return BS_BLOCK_DONE;
+};
+
+/* Values for max_lazy_match, good_match and max_chain_length, depending on
+ * the desired pack level (0..9). The values given below have been tuned to
+ * exclude worst case performance for pathological files. Better values may be
+ * found for specific files.
+ */
+function Config(good_length, max_lazy, nice_length, max_chain, func) {
+
+ this.good_length = good_length;
+ this.max_lazy = max_lazy;
+ this.nice_length = nice_length;
+ this.max_chain = max_chain;
+ this.func = func;
+}
+
+const configuration_table = [
+ /* good lazy nice chain */
+ new Config(0, 0, 0, 0, deflate_stored), /* 0 store only */
+ new Config(4, 4, 8, 4, deflate_fast), /* 1 max speed, no lazy matches */
+ new Config(4, 5, 16, 8, deflate_fast), /* 2 */
+ new Config(4, 6, 32, 32, deflate_fast), /* 3 */
+
+ new Config(4, 4, 16, 16, deflate_slow), /* 4 lazy matches */
+ new Config(8, 16, 32, 32, deflate_slow), /* 5 */
+ new Config(8, 16, 128, 128, deflate_slow), /* 6 */
+ new Config(8, 32, 128, 256, deflate_slow), /* 7 */
+ new Config(32, 128, 258, 1024, deflate_slow), /* 8 */
+ new Config(32, 258, 258, 4096, deflate_slow) /* 9 max compression */
+];
+
+
+/* ===========================================================================
+ * Initialize the "longest match" routines for a new zlib stream
+ */
+const lm_init = (s) => {
+
+ s.window_size = 2 * s.w_size;
+
+ /*** CLEAR_HASH(s); ***/
+ zero(s.head); // Fill with NIL (= 0);
+
+ /* Set the default configuration parameters:
+ */
+ s.max_lazy_match = configuration_table[s.level].max_lazy;
+ s.good_match = configuration_table[s.level].good_length;
+ s.nice_match = configuration_table[s.level].nice_length;
+ s.max_chain_length = configuration_table[s.level].max_chain;
+
+ s.strstart = 0;
+ s.block_start = 0;
+ s.lookahead = 0;
+ s.insert = 0;
+ s.match_length = s.prev_length = MIN_MATCH - 1;
+ s.match_available = 0;
+ s.ins_h = 0;
+};
+
+
+function DeflateState() {
+ this.strm = null; /* pointer back to this zlib stream */
+ this.status = 0; /* as the name implies */
+ this.pending_buf = null; /* output still pending */
+ this.pending_buf_size = 0; /* size of pending_buf */
+ this.pending_out = 0; /* next pending byte to output to the stream */
+ this.pending = 0; /* nb of bytes in the pending buffer */
+ this.wrap = 0; /* bit 0 true for zlib, bit 1 true for gzip */
+ this.gzhead = null; /* gzip header information to write */
+ this.gzindex = 0; /* where in extra, name, or comment */
+ this.method = Z_DEFLATED$1; /* can only be DEFLATED */
+ this.last_flush = -1; /* value of flush param for previous deflate call */
+
+ this.w_size = 0; /* LZ77 window size (32K by default) */
+ this.w_bits = 0; /* log2(w_size) (8..16) */
+ this.w_mask = 0; /* w_size - 1 */
+
+ this.window = null;
+ /* Sliding window. Input bytes are read into the second half of the window,
+ * and move to the first half later to keep a dictionary of at least wSize
+ * bytes. With this organization, matches are limited to a distance of
+ * wSize-MAX_MATCH bytes, but this ensures that IO is always
+ * performed with a length multiple of the block size.
+ */
+
+ this.window_size = 0;
+ /* Actual size of window: 2*wSize, except when the user input buffer
+ * is directly used as sliding window.
+ */
+
+ this.prev = null;
+ /* Link to older string with same hash index. To limit the size of this
+ * array to 64K, this link is maintained only for the last 32K strings.
+ * An index in this array is thus a window index modulo 32K.
+ */
+
+ this.head = null; /* Heads of the hash chains or NIL. */
+
+ this.ins_h = 0; /* hash index of string to be inserted */
+ this.hash_size = 0; /* number of elements in hash table */
+ this.hash_bits = 0; /* log2(hash_size) */
+ this.hash_mask = 0; /* hash_size-1 */
+
+ this.hash_shift = 0;
+ /* Number of bits by which ins_h must be shifted at each input
+ * step. It must be such that after MIN_MATCH steps, the oldest
+ * byte no longer takes part in the hash key, that is:
+ * hash_shift * MIN_MATCH >= hash_bits
+ */
+
+ this.block_start = 0;
+ /* Window position at the beginning of the current output block. Gets
+ * negative when the window is moved backwards.
+ */
+
+ this.match_length = 0; /* length of best match */
+ this.prev_match = 0; /* previous match */
+ this.match_available = 0; /* set if previous match exists */
+ this.strstart = 0; /* start of string to insert */
+ this.match_start = 0; /* start of matching string */
+ this.lookahead = 0; /* number of valid bytes ahead in window */
+
+ this.prev_length = 0;
+ /* Length of the best match at previous step. Matches not greater than this
+ * are discarded. This is used in the lazy match evaluation.
+ */
+
+ this.max_chain_length = 0;
+ /* To speed up deflation, hash chains are never searched beyond this
+ * length. A higher limit improves compression ratio but degrades the
+ * speed.
+ */
+
+ this.max_lazy_match = 0;
+ /* Attempt to find a better match only when the current match is strictly
+ * smaller than this value. This mechanism is used only for compression
+ * levels >= 4.
+ */
+ // That's alias to max_lazy_match, don't use directly
+ //this.max_insert_length = 0;
+ /* Insert new strings in the hash table only if the match length is not
+ * greater than this length. This saves time but degrades compression.
+ * max_insert_length is used only for compression levels <= 3.
+ */
+
+ this.level = 0; /* compression level (1..9) */
+ this.strategy = 0; /* favor or force Huffman coding*/
+
+ this.good_match = 0;
+ /* Use a faster search when the previous match is longer than this */
+
+ this.nice_match = 0; /* Stop searching when current match exceeds this */
+
+ /* used by trees.c: */
+
+ /* Didn't use ct_data typedef below to suppress compiler warning */
+
+ // struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */
+ // struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */
+ // struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */
+
+ // Use flat array of DOUBLE size, with interleaved fata,
+ // because JS does not support effective
+ this.dyn_ltree = new Uint16Array(HEAP_SIZE * 2);
+ this.dyn_dtree = new Uint16Array((2 * D_CODES + 1) * 2);
+ this.bl_tree = new Uint16Array((2 * BL_CODES + 1) * 2);
+ zero(this.dyn_ltree);
+ zero(this.dyn_dtree);
+ zero(this.bl_tree);
+
+ this.l_desc = null; /* desc. for literal tree */
+ this.d_desc = null; /* desc. for distance tree */
+ this.bl_desc = null; /* desc. for bit length tree */
+
+ //ush bl_count[MAX_BITS+1];
+ this.bl_count = new Uint16Array(MAX_BITS + 1);
+ /* number of codes at each bit length for an optimal tree */
+
+ //int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */
+ this.heap = new Uint16Array(2 * L_CODES + 1); /* heap used to build the Huffman trees */
+ zero(this.heap);
+
+ this.heap_len = 0; /* number of elements in the heap */
+ this.heap_max = 0; /* element of largest frequency */
+ /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used.
+ * The same heap array is used to build all trees.
+ */
+
+ this.depth = new Uint16Array(2 * L_CODES + 1); //uch depth[2*L_CODES+1];
+ zero(this.depth);
+ /* Depth of each subtree used as tie breaker for trees of equal frequency
+ */
+
+ this.sym_buf = 0; /* buffer for distances and literals/lengths */
+
+ this.lit_bufsize = 0;
+ /* Size of match buffer for literals/lengths. There are 4 reasons for
+ * limiting lit_bufsize to 64K:
+ * - frequencies can be kept in 16 bit counters
+ * - if compression is not successful for the first block, all input
+ * data is still in the window so we can still emit a stored block even
+ * when input comes from standard input. (This can also be done for
+ * all blocks if lit_bufsize is not greater than 32K.)
+ * - if compression is not successful for a file smaller than 64K, we can
+ * even emit a stored file instead of a stored block (saving 5 bytes).
+ * This is applicable only for zip (not gzip or zlib).
+ * - creating new Huffman trees less frequently may not provide fast
+ * adaptation to changes in the input data statistics. (Take for
+ * example a binary file with poorly compressible code followed by
+ * a highly compressible string table.) Smaller buffer sizes give
+ * fast adaptation but have of course the overhead of transmitting
+ * trees more frequently.
+ * - I can't count above 4
+ */
+
+ this.sym_next = 0; /* running index in sym_buf */
+ this.sym_end = 0; /* symbol table full when sym_next reaches this */
+
+ this.opt_len = 0; /* bit length of current block with optimal trees */
+ this.static_len = 0; /* bit length of current block with static trees */
+ this.matches = 0; /* number of string matches in current block */
+ this.insert = 0; /* bytes at end of window left to insert */
+
+
+ this.bi_buf = 0;
+ /* Output buffer. bits are inserted starting at the bottom (least
+ * significant bits).
+ */
+ this.bi_valid = 0;
+ /* Number of valid bits in bi_buf. All bits above the last valid bit
+ * are always zero.
+ */
+
+ // Used for window memory init. We safely ignore it for JS. That makes
+ // sense only for pointers and memory check tools.
+ //this.high_water = 0;
+ /* High water mark offset in window for initialized bytes -- bytes above
+ * this are set to zero in order to avoid memory check warnings when
+ * longest match routines access bytes past the input. This is then
+ * updated to the new high water mark.
+ */
+}
+
+
+/* =========================================================================
+ * Check for a valid deflate stream state. Return 0 if ok, 1 if not.
+ */
+const deflateStateCheck = (strm) => {
+
+ if (!strm) {
+ return 1;
+ }
+ const s = strm.state;
+ if (!s || s.strm !== strm || (s.status !== INIT_STATE &&
+//#ifdef GZIP
+ s.status !== GZIP_STATE &&
+//#endif
+ s.status !== EXTRA_STATE &&
+ s.status !== NAME_STATE &&
+ s.status !== COMMENT_STATE &&
+ s.status !== HCRC_STATE &&
+ s.status !== BUSY_STATE &&
+ s.status !== FINISH_STATE)) {
+ return 1;
+ }
+ return 0;
+};
+
+
+const deflateResetKeep = (strm) => {
+
+ if (deflateStateCheck(strm)) {
+ return err(strm, Z_STREAM_ERROR);
+ }
+
+ strm.total_in = strm.total_out = 0;
+ strm.data_type = Z_UNKNOWN;
+
+ const s = strm.state;
+ s.pending = 0;
+ s.pending_out = 0;
+
+ if (s.wrap < 0) {
+ s.wrap = -s.wrap;
+ /* was made negative by deflate(..., Z_FINISH); */
+ }
+ s.status =
+//#ifdef GZIP
+ s.wrap === 2 ? GZIP_STATE :
+//#endif
+ s.wrap ? INIT_STATE : BUSY_STATE;
+ strm.adler = (s.wrap === 2) ?
+ 0 // crc32(0, Z_NULL, 0)
+ :
+ 1; // adler32(0, Z_NULL, 0)
+ s.last_flush = -2;
+ _tr_init(s);
+ return Z_OK$1;
+};
+
+
+const deflateReset = (strm) => {
+
+ const ret = deflateResetKeep(strm);
+ if (ret === Z_OK$1) {
+ lm_init(strm.state);
+ }
+ return ret;
+};
+
+
+const deflateSetHeader = (strm, head) => {
+
+ if (deflateStateCheck(strm) || strm.state.wrap !== 2) {
+ return Z_STREAM_ERROR;
+ }
+ strm.state.gzhead = head;
+ return Z_OK$1;
+};
+
+
+const deflateInit2 = (strm, level, method, windowBits, memLevel, strategy) => {
+
+ if (!strm) { // === Z_NULL
+ return Z_STREAM_ERROR;
+ }
+ let wrap = 1;
+
+ if (level === Z_DEFAULT_COMPRESSION$1) {
+ level = 6;
+ }
+
+ if (windowBits < 0) { /* suppress zlib wrapper */
+ wrap = 0;
+ windowBits = -windowBits;
+ }
+
+ else if (windowBits > 15) {
+ wrap = 2; /* write gzip wrapper instead */
+ windowBits -= 16;
+ }
+
+
+ if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method !== Z_DEFLATED$1 ||
+ windowBits < 8 || windowBits > 15 || level < 0 || level > 9 ||
+ strategy < 0 || strategy > Z_FIXED || (windowBits === 8 && wrap !== 1)) {
+ return err(strm, Z_STREAM_ERROR);
+ }
+
+
+ if (windowBits === 8) {
+ windowBits = 9;
+ }
+ /* until 256-byte window bug fixed */
+
+ const s = new DeflateState();
+
+ strm.state = s;
+ s.strm = strm;
+ s.status = INIT_STATE; /* to pass state test in deflateReset() */
+
+ s.wrap = wrap;
+ s.gzhead = null;
+ s.w_bits = windowBits;
+ s.w_size = 1 << s.w_bits;
+ s.w_mask = s.w_size - 1;
+
+ s.hash_bits = memLevel + 7;
+ s.hash_size = 1 << s.hash_bits;
+ s.hash_mask = s.hash_size - 1;
+ s.hash_shift = ~~((s.hash_bits + MIN_MATCH - 1) / MIN_MATCH);
+
+ s.window = new Uint8Array(s.w_size * 2);
+ s.head = new Uint16Array(s.hash_size);
+ s.prev = new Uint16Array(s.w_size);
+
+ // Don't need mem init magic for JS.
+ //s.high_water = 0; /* nothing written to s->window yet */
+
+ s.lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */
+
+ /* We overlay pending_buf and sym_buf. This works since the average size
+ * for length/distance pairs over any compressed block is assured to be 31
+ * bits or less.
+ *
+ * Analysis: The longest fixed codes are a length code of 8 bits plus 5
+ * extra bits, for lengths 131 to 257. The longest fixed distance codes are
+ * 5 bits plus 13 extra bits, for distances 16385 to 32768. The longest
+ * possible fixed-codes length/distance pair is then 31 bits total.
+ *
+ * sym_buf starts one-fourth of the way into pending_buf. So there are
+ * three bytes in sym_buf for every four bytes in pending_buf. Each symbol
+ * in sym_buf is three bytes -- two for the distance and one for the
+ * literal/length. As each symbol is consumed, the pointer to the next
+ * sym_buf value to read moves forward three bytes. From that symbol, up to
+ * 31 bits are written to pending_buf. The closest the written pending_buf
+ * bits gets to the next sym_buf symbol to read is just before the last
+ * code is written. At that time, 31*(n-2) bits have been written, just
+ * after 24*(n-2) bits have been consumed from sym_buf. sym_buf starts at
+ * 8*n bits into pending_buf. (Note that the symbol buffer fills when n-1
+ * symbols are written.) The closest the writing gets to what is unread is
+ * then n+14 bits. Here n is lit_bufsize, which is 16384 by default, and
+ * can range from 128 to 32768.
+ *
+ * Therefore, at a minimum, there are 142 bits of space between what is
+ * written and what is read in the overlain buffers, so the symbols cannot
+ * be overwritten by the compressed data. That space is actually 139 bits,
+ * due to the three-bit fixed-code block header.
+ *
+ * That covers the case where either Z_FIXED is specified, forcing fixed
+ * codes, or when the use of fixed codes is chosen, because that choice
+ * results in a smaller compressed block than dynamic codes. That latter
+ * condition then assures that the above analysis also covers all dynamic
+ * blocks. A dynamic-code block will only be chosen to be emitted if it has
+ * fewer bits than a fixed-code block would for the same set of symbols.
+ * Therefore its average symbol length is assured to be less than 31. So
+ * the compressed data for a dynamic block also cannot overwrite the
+ * symbols from which it is being constructed.
+ */
+
+ s.pending_buf_size = s.lit_bufsize * 4;
+ s.pending_buf = new Uint8Array(s.pending_buf_size);
+
+ // It is offset from `s.pending_buf` (size is `s.lit_bufsize * 2`)
+ //s->sym_buf = s->pending_buf + s->lit_bufsize;
+ s.sym_buf = s.lit_bufsize;
+
+ //s->sym_end = (s->lit_bufsize - 1) * 3;
+ s.sym_end = (s.lit_bufsize - 1) * 3;
+ /* We avoid equality with lit_bufsize*3 because of wraparound at 64K
+ * on 16 bit machines and because stored blocks are restricted to
+ * 64K-1 bytes.
+ */
+
+ s.level = level;
+ s.strategy = strategy;
+ s.method = method;
+
+ return deflateReset(strm);
+};
+
+const deflateInit = (strm, level) => {
+
+ return deflateInit2(strm, level, Z_DEFLATED$1, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY$1);
+};
+
+
+/* ========================================================================= */
+const deflate$2 = (strm, flush) => {
+
+ if (deflateStateCheck(strm) || flush > Z_BLOCK || flush < 0) {
+ return strm ? err(strm, Z_STREAM_ERROR) : Z_STREAM_ERROR;
+ }
+
+ const s = strm.state;
+
+ if (!strm.output ||
+ (strm.avail_in !== 0 && !strm.input) ||
+ (s.status === FINISH_STATE && flush !== Z_FINISH$1)) {
+ return err(strm, (strm.avail_out === 0) ? Z_BUF_ERROR : Z_STREAM_ERROR);
+ }
+
+ const old_flush = s.last_flush;
+ s.last_flush = flush;
+
+ /* Flush as much pending output as possible */
+ if (s.pending !== 0) {
+ flush_pending(strm);
+ if (strm.avail_out === 0) {
+ /* Since avail_out is 0, deflate will be called again with
+ * more output space, but possibly with both pending and
+ * avail_in equal to zero. There won't be anything to do,
+ * but this is not an error situation so make sure we
+ * return OK instead of BUF_ERROR at next call of deflate:
+ */
+ s.last_flush = -1;
+ return Z_OK$1;
+ }
+
+ /* Make sure there is something to do and avoid duplicate consecutive
+ * flushes. For repeated and useless calls with Z_FINISH, we keep
+ * returning Z_STREAM_END instead of Z_BUF_ERROR.
+ */
+ } else if (strm.avail_in === 0 && rank(flush) <= rank(old_flush) &&
+ flush !== Z_FINISH$1) {
+ return err(strm, Z_BUF_ERROR);
+ }
+
+ /* User must not provide more input after the first FINISH: */
+ if (s.status === FINISH_STATE && strm.avail_in !== 0) {
+ return err(strm, Z_BUF_ERROR);
+ }
+
+ /* Write the header */
+ if (s.status === INIT_STATE && s.wrap === 0) {
+ s.status = BUSY_STATE;
+ }
+ if (s.status === INIT_STATE) {
+ /* zlib header */
+ let header = (Z_DEFLATED$1 + ((s.w_bits - 8) << 4)) << 8;
+ let level_flags = -1;
+
+ if (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2) {
+ level_flags = 0;
+ } else if (s.level < 6) {
+ level_flags = 1;
+ } else if (s.level === 6) {
+ level_flags = 2;
+ } else {
+ level_flags = 3;
+ }
+ header |= (level_flags << 6);
+ if (s.strstart !== 0) { header |= PRESET_DICT; }
+ header += 31 - (header % 31);
+
+ putShortMSB(s, header);
+
+ /* Save the adler32 of the preset dictionary: */
+ if (s.strstart !== 0) {
+ putShortMSB(s, strm.adler >>> 16);
+ putShortMSB(s, strm.adler & 0xffff);
+ }
+ strm.adler = 1; // adler32(0L, Z_NULL, 0);
+ s.status = BUSY_STATE;
+
+ /* Compression must start with an empty pending buffer */
+ flush_pending(strm);
+ if (s.pending !== 0) {
+ s.last_flush = -1;
+ return Z_OK$1;
+ }
+ }
+//#ifdef GZIP
+ if (s.status === GZIP_STATE) {
+ /* gzip header */
+ strm.adler = 0; //crc32(0L, Z_NULL, 0);
+ put_byte(s, 31);
+ put_byte(s, 139);
+ put_byte(s, 8);
+ if (!s.gzhead) { // s->gzhead == Z_NULL
+ put_byte(s, 0);
+ put_byte(s, 0);
+ put_byte(s, 0);
+ put_byte(s, 0);
+ put_byte(s, 0);
+ put_byte(s, s.level === 9 ? 2 :
+ (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ?
+ 4 : 0));
+ put_byte(s, OS_CODE);
+ s.status = BUSY_STATE;
+
+ /* Compression must start with an empty pending buffer */
+ flush_pending(strm);
+ if (s.pending !== 0) {
+ s.last_flush = -1;
+ return Z_OK$1;
+ }
+ }
+ else {
+ put_byte(s, (s.gzhead.text ? 1 : 0) +
+ (s.gzhead.hcrc ? 2 : 0) +
+ (!s.gzhead.extra ? 0 : 4) +
+ (!s.gzhead.name ? 0 : 8) +
+ (!s.gzhead.comment ? 0 : 16)
+ );
+ put_byte(s, s.gzhead.time & 0xff);
+ put_byte(s, (s.gzhead.time >> 8) & 0xff);
+ put_byte(s, (s.gzhead.time >> 16) & 0xff);
+ put_byte(s, (s.gzhead.time >> 24) & 0xff);
+ put_byte(s, s.level === 9 ? 2 :
+ (s.strategy >= Z_HUFFMAN_ONLY || s.level < 2 ?
+ 4 : 0));
+ put_byte(s, s.gzhead.os & 0xff);
+ if (s.gzhead.extra && s.gzhead.extra.length) {
+ put_byte(s, s.gzhead.extra.length & 0xff);
+ put_byte(s, (s.gzhead.extra.length >> 8) & 0xff);
+ }
+ if (s.gzhead.hcrc) {
+ strm.adler = crc32(strm.adler, s.pending_buf, s.pending, 0);
+ }
+ s.gzindex = 0;
+ s.status = EXTRA_STATE;
+ }
+ }
+ if (s.status === EXTRA_STATE) {
+ if (s.gzhead.extra/* != Z_NULL*/) {
+ let beg = s.pending; /* start of bytes to update crc */
+ let left = (s.gzhead.extra.length & 0xffff) - s.gzindex;
+ while (s.pending + left > s.pending_buf_size) {
+ let copy = s.pending_buf_size - s.pending;
+ // zmemcpy(s.pending_buf + s.pending,
+ // s.gzhead.extra + s.gzindex, copy);
+ s.pending_buf.set(s.gzhead.extra.subarray(s.gzindex, s.gzindex + copy), s.pending);
+ s.pending = s.pending_buf_size;
+ //--- HCRC_UPDATE(beg) ---//
+ if (s.gzhead.hcrc && s.pending > beg) {
+ strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);
+ }
+ //---//
+ s.gzindex += copy;
+ flush_pending(strm);
+ if (s.pending !== 0) {
+ s.last_flush = -1;
+ return Z_OK$1;
+ }
+ beg = 0;
+ left -= copy;
+ }
+ // JS specific: s.gzhead.extra may be TypedArray or Array for backward compatibility
+ // TypedArray.slice and TypedArray.from don't exist in IE10-IE11
+ let gzhead_extra = new Uint8Array(s.gzhead.extra);
+ // zmemcpy(s->pending_buf + s->pending,
+ // s->gzhead->extra + s->gzindex, left);
+ s.pending_buf.set(gzhead_extra.subarray(s.gzindex, s.gzindex + left), s.pending);
+ s.pending += left;
+ //--- HCRC_UPDATE(beg) ---//
+ if (s.gzhead.hcrc && s.pending > beg) {
+ strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);
+ }
+ //---//
+ s.gzindex = 0;
+ }
+ s.status = NAME_STATE;
+ }
+ if (s.status === NAME_STATE) {
+ if (s.gzhead.name/* != Z_NULL*/) {
+ let beg = s.pending; /* start of bytes to update crc */
+ let val;
+ do {
+ if (s.pending === s.pending_buf_size) {
+ //--- HCRC_UPDATE(beg) ---//
+ if (s.gzhead.hcrc && s.pending > beg) {
+ strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);
+ }
+ //---//
+ flush_pending(strm);
+ if (s.pending !== 0) {
+ s.last_flush = -1;
+ return Z_OK$1;
+ }
+ beg = 0;
+ }
+ // JS specific: little magic to add zero terminator to end of string
+ if (s.gzindex < s.gzhead.name.length) {
+ val = s.gzhead.name.charCodeAt(s.gzindex++) & 0xff;
+ } else {
+ val = 0;
+ }
+ put_byte(s, val);
+ } while (val !== 0);
+ //--- HCRC_UPDATE(beg) ---//
+ if (s.gzhead.hcrc && s.pending > beg) {
+ strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);
+ }
+ //---//
+ s.gzindex = 0;
+ }
+ s.status = COMMENT_STATE;
+ }
+ if (s.status === COMMENT_STATE) {
+ if (s.gzhead.comment/* != Z_NULL*/) {
+ let beg = s.pending; /* start of bytes to update crc */
+ let val;
+ do {
+ if (s.pending === s.pending_buf_size) {
+ //--- HCRC_UPDATE(beg) ---//
+ if (s.gzhead.hcrc && s.pending > beg) {
+ strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);
+ }
+ //---//
+ flush_pending(strm);
+ if (s.pending !== 0) {
+ s.last_flush = -1;
+ return Z_OK$1;
+ }
+ beg = 0;
+ }
+ // JS specific: little magic to add zero terminator to end of string
+ if (s.gzindex < s.gzhead.comment.length) {
+ val = s.gzhead.comment.charCodeAt(s.gzindex++) & 0xff;
+ } else {
+ val = 0;
+ }
+ put_byte(s, val);
+ } while (val !== 0);
+ //--- HCRC_UPDATE(beg) ---//
+ if (s.gzhead.hcrc && s.pending > beg) {
+ strm.adler = crc32(strm.adler, s.pending_buf, s.pending - beg, beg);
+ }
+ //---//
+ }
+ s.status = HCRC_STATE;
+ }
+ if (s.status === HCRC_STATE) {
+ if (s.gzhead.hcrc) {
+ if (s.pending + 2 > s.pending_buf_size) {
+ flush_pending(strm);
+ if (s.pending !== 0) {
+ s.last_flush = -1;
+ return Z_OK$1;
+ }
+ }
+ put_byte(s, strm.adler & 0xff);
+ put_byte(s, (strm.adler >> 8) & 0xff);
+ strm.adler = 0; //crc32(0L, Z_NULL, 0);
+ }
+ s.status = BUSY_STATE;
+
+ /* Compression must start with an empty pending buffer */
+ flush_pending(strm);
+ if (s.pending !== 0) {
+ s.last_flush = -1;
+ return Z_OK$1;
+ }
+ }
+//#endif
+
+ /* Start a new block or continue the current one.
+ */
+ if (strm.avail_in !== 0 || s.lookahead !== 0 ||
+ (flush !== Z_NO_FLUSH$1 && s.status !== FINISH_STATE)) {
+ let bstate = s.level === 0 ? deflate_stored(s, flush) :
+ s.strategy === Z_HUFFMAN_ONLY ? deflate_huff(s, flush) :
+ s.strategy === Z_RLE ? deflate_rle(s, flush) :
+ configuration_table[s.level].func(s, flush);
+
+ if (bstate === BS_FINISH_STARTED || bstate === BS_FINISH_DONE) {
+ s.status = FINISH_STATE;
+ }
+ if (bstate === BS_NEED_MORE || bstate === BS_FINISH_STARTED) {
+ if (strm.avail_out === 0) {
+ s.last_flush = -1;
+ /* avoid BUF_ERROR next call, see above */
+ }
+ return Z_OK$1;
+ /* If flush != Z_NO_FLUSH && avail_out == 0, the next call
+ * of deflate should use the same flush parameter to make sure
+ * that the flush is complete. So we don't have to output an
+ * empty block here, this will be done at next call. This also
+ * ensures that for a very small output buffer, we emit at most
+ * one empty block.
+ */
+ }
+ if (bstate === BS_BLOCK_DONE) {
+ if (flush === Z_PARTIAL_FLUSH) {
+ _tr_align(s);
+ }
+ else if (flush !== Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */
+
+ _tr_stored_block(s, 0, 0, false);
+ /* For a full flush, this empty block will be recognized
+ * as a special marker by inflate_sync().
+ */
+ if (flush === Z_FULL_FLUSH$1) {
+ /*** CLEAR_HASH(s); ***/ /* forget history */
+ zero(s.head); // Fill with NIL (= 0);
+
+ if (s.lookahead === 0) {
+ s.strstart = 0;
+ s.block_start = 0;
+ s.insert = 0;
+ }
+ }
+ }
+ flush_pending(strm);
+ if (strm.avail_out === 0) {
+ s.last_flush = -1; /* avoid BUF_ERROR at next call, see above */
+ return Z_OK$1;
+ }
+ }
+ }
+
+ if (flush !== Z_FINISH$1) { return Z_OK$1; }
+ if (s.wrap <= 0) { return Z_STREAM_END$1; }
+
+ /* Write the trailer */
+ if (s.wrap === 2) {
+ put_byte(s, strm.adler & 0xff);
+ put_byte(s, (strm.adler >> 8) & 0xff);
+ put_byte(s, (strm.adler >> 16) & 0xff);
+ put_byte(s, (strm.adler >> 24) & 0xff);
+ put_byte(s, strm.total_in & 0xff);
+ put_byte(s, (strm.total_in >> 8) & 0xff);
+ put_byte(s, (strm.total_in >> 16) & 0xff);
+ put_byte(s, (strm.total_in >> 24) & 0xff);
+ }
+ else
+ {
+ putShortMSB(s, strm.adler >>> 16);
+ putShortMSB(s, strm.adler & 0xffff);
+ }
+
+ flush_pending(strm);
+ /* If avail_out is zero, the application will call deflate again
+ * to flush the rest.
+ */
+ if (s.wrap > 0) { s.wrap = -s.wrap; }
+ /* write the trailer only once! */
+ return s.pending !== 0 ? Z_OK$1 : Z_STREAM_END$1;
+};
+
+
+const deflateEnd = (strm) => {
+
+ if (deflateStateCheck(strm)) {
+ return Z_STREAM_ERROR;
+ }
+
+ const status = strm.state.status;
+
+ strm.state = null;
+
+ return status === BUSY_STATE ? err(strm, Z_DATA_ERROR) : Z_OK$1;
+};
+
+
+/* =========================================================================
+ * Initializes the compression dictionary from the given byte
+ * sequence without producing any compressed output.
+ */
+const deflateSetDictionary = (strm, dictionary) => {
+
+ let dictLength = dictionary.length;
+
+ if (deflateStateCheck(strm)) {
+ return Z_STREAM_ERROR;
+ }
+
+ const s = strm.state;
+ const wrap = s.wrap;
+
+ if (wrap === 2 || (wrap === 1 && s.status !== INIT_STATE) || s.lookahead) {
+ return Z_STREAM_ERROR;
+ }
+
+ /* when using zlib wrappers, compute Adler-32 for provided dictionary */
+ if (wrap === 1) {
+ /* adler32(strm->adler, dictionary, dictLength); */
+ strm.adler = adler32(strm.adler, dictionary, dictLength, 0);
+ }
+
+ s.wrap = 0; /* avoid computing Adler-32 in read_buf */
+
+ /* if dictionary would fill window, just replace the history */
+ if (dictLength >= s.w_size) {
+ if (wrap === 0) { /* already empty otherwise */
+ /*** CLEAR_HASH(s); ***/
+ zero(s.head); // Fill with NIL (= 0);
+ s.strstart = 0;
+ s.block_start = 0;
+ s.insert = 0;
+ }
+ /* use the tail */
+ // dictionary = dictionary.slice(dictLength - s.w_size);
+ let tmpDict = new Uint8Array(s.w_size);
+ tmpDict.set(dictionary.subarray(dictLength - s.w_size, dictLength), 0);
+ dictionary = tmpDict;
+ dictLength = s.w_size;
+ }
+ /* insert dictionary into window and hash */
+ const avail = strm.avail_in;
+ const next = strm.next_in;
+ const input = strm.input;
+ strm.avail_in = dictLength;
+ strm.next_in = 0;
+ strm.input = dictionary;
+ fill_window(s);
+ while (s.lookahead >= MIN_MATCH) {
+ let str = s.strstart;
+ let n = s.lookahead - (MIN_MATCH - 1);
+ do {
+ /* UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); */
+ s.ins_h = HASH(s, s.ins_h, s.window[str + MIN_MATCH - 1]);
+
+ s.prev[str & s.w_mask] = s.head[s.ins_h];
+
+ s.head[s.ins_h] = str;
+ str++;
+ } while (--n);
+ s.strstart = str;
+ s.lookahead = MIN_MATCH - 1;
+ fill_window(s);
+ }
+ s.strstart += s.lookahead;
+ s.block_start = s.strstart;
+ s.insert = s.lookahead;
+ s.lookahead = 0;
+ s.match_length = s.prev_length = MIN_MATCH - 1;
+ s.match_available = 0;
+ strm.next_in = next;
+ strm.input = input;
+ strm.avail_in = avail;
+ s.wrap = wrap;
+ return Z_OK$1;
+};
+
+
+deflate$3.deflateInit = deflateInit;
+deflate$3.deflateInit2 = deflateInit2;
+deflate$3.deflateReset = deflateReset;
+deflate$3.deflateResetKeep = deflateResetKeep;
+deflate$3.deflateSetHeader = deflateSetHeader;
+deflate$3.deflate = deflate$2;
+deflate$3.deflateEnd = deflateEnd;
+deflate$3.deflateSetDictionary = deflateSetDictionary;
+deflate$3.deflateInfo = 'pako deflate (from Nodeca project)';
+
+var common = {};
+
+const _has = (obj, key) => {
+ return Object.prototype.hasOwnProperty.call(obj, key);
+};
+
+common.assign = function (obj /*from1, from2, from3, ...*/) {
+ const sources = Array.prototype.slice.call(arguments, 1);
+ while (sources.length) {
+ const source = sources.shift();
+ if (!source) { continue; }
+
+ if (typeof source !== 'object') {
+ throw new TypeError(source + 'must be non-object');
+ }
+
+ for (const p in source) {
+ if (_has(source, p)) {
+ obj[p] = source[p];
+ }
+ }
+ }
+
+ return obj;
+};
+
+
+// Join array of chunks to single array.
+common.flattenChunks = (chunks) => {
+ // calculate data length
+ let len = 0;
+
+ for (let i = 0, l = chunks.length; i < l; i++) {
+ len += chunks[i].length;
+ }
+
+ // join chunks
+ const result = new Uint8Array(len);
+
+ for (let i = 0, pos = 0, l = chunks.length; i < l; i++) {
+ let chunk = chunks[i];
+ result.set(chunk, pos);
+ pos += chunk.length;
+ }
+
+ return result;
+};
+
+var strings$1 = {};
+
+// Quick check if we can use fast array to bin string conversion
+//
+// - apply(Array) can fail on Android 2.2
+// - apply(Uint8Array) can fail on iOS 5.1 Safari
+//
+let STR_APPLY_UIA_OK = true;
+
+try { String.fromCharCode.apply(null, new Uint8Array(1)); } catch (__) { STR_APPLY_UIA_OK = false; }
+
+
+// Table with utf8 lengths (calculated by first byte of sequence)
+// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS,
+// because max possible codepoint is 0x10ffff
+const _utf8len = new Uint8Array(256);
+for (let q = 0; q < 256; q++) {
+ _utf8len[q] = (q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1);
+}
+_utf8len[254] = _utf8len[254] = 1; // Invalid sequence start
+
+
+// convert string to array (typed, when possible)
+strings$1.string2buf = (str) => {
+ if (typeof TextEncoder === 'function' && TextEncoder.prototype.encode) {
+ return new TextEncoder().encode(str);
+ }
+
+ let buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0;
+
+ // count binary size
+ for (m_pos = 0; m_pos < str_len; m_pos++) {
+ c = str.charCodeAt(m_pos);
+ if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {
+ c2 = str.charCodeAt(m_pos + 1);
+ if ((c2 & 0xfc00) === 0xdc00) {
+ c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);
+ m_pos++;
+ }
+ }
+ buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4;
+ }
+
+ // allocate buffer
+ buf = new Uint8Array(buf_len);
+
+ // convert
+ for (i = 0, m_pos = 0; i < buf_len; m_pos++) {
+ c = str.charCodeAt(m_pos);
+ if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {
+ c2 = str.charCodeAt(m_pos + 1);
+ if ((c2 & 0xfc00) === 0xdc00) {
+ c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);
+ m_pos++;
+ }
+ }
+ if (c < 0x80) {
+ /* one byte */
+ buf[i++] = c;
+ } else if (c < 0x800) {
+ /* two bytes */
+ buf[i++] = 0xC0 | (c >>> 6);
+ buf[i++] = 0x80 | (c & 0x3f);
+ } else if (c < 0x10000) {
+ /* three bytes */
+ buf[i++] = 0xE0 | (c >>> 12);
+ buf[i++] = 0x80 | (c >>> 6 & 0x3f);
+ buf[i++] = 0x80 | (c & 0x3f);
+ } else {
+ /* four bytes */
+ buf[i++] = 0xf0 | (c >>> 18);
+ buf[i++] = 0x80 | (c >>> 12 & 0x3f);
+ buf[i++] = 0x80 | (c >>> 6 & 0x3f);
+ buf[i++] = 0x80 | (c & 0x3f);
+ }
+ }
+
+ return buf;
+};
+
+// Helper
+const buf2binstring = (buf, len) => {
+ // On Chrome, the arguments in a function call that are allowed is `65534`.
+ // If the length of the buffer is smaller than that, we can use this optimization,
+ // otherwise we will take a slower path.
+ if (len < 65534) {
+ if (buf.subarray && STR_APPLY_UIA_OK) {
+ return String.fromCharCode.apply(null, buf.length === len ? buf : buf.subarray(0, len));
+ }
+ }
+
+ let result = '';
+ for (let i = 0; i < len; i++) {
+ result += String.fromCharCode(buf[i]);
+ }
+ return result;
+};
+
+
+// convert array to string
+strings$1.buf2string = (buf, max) => {
+ const len = max || buf.length;
+
+ if (typeof TextDecoder === 'function' && TextDecoder.prototype.decode) {
+ return new TextDecoder().decode(buf.subarray(0, max));
+ }
+
+ let i, out;
+
+ // Reserve max possible length (2 words per char)
+ // NB: by unknown reasons, Array is significantly faster for
+ // String.fromCharCode.apply than Uint16Array.
+ const utf16buf = new Array(len * 2);
+
+ for (out = 0, i = 0; i < len;) {
+ let c = buf[i++];
+ // quick process ascii
+ if (c < 0x80) { utf16buf[out++] = c; continue; }
+
+ let c_len = _utf8len[c];
+ // skip 5 & 6 byte codes
+ if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; }
+
+ // apply mask on first byte
+ c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07;
+ // join the rest
+ while (c_len > 1 && i < len) {
+ c = (c << 6) | (buf[i++] & 0x3f);
+ c_len--;
+ }
+
+ // terminated by end of string?
+ if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; }
+
+ if (c < 0x10000) {
+ utf16buf[out++] = c;
+ } else {
+ c -= 0x10000;
+ utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff);
+ utf16buf[out++] = 0xdc00 | (c & 0x3ff);
+ }
+ }
+
+ return buf2binstring(utf16buf, out);
+};
+
+
+// Calculate max possible position in utf8 buffer,
+// that will not break sequence. If that's not possible
+// - (very small limits) return max size as is.
+//
+// buf[] - utf8 bytes array
+// max - length limit (mandatory);
+strings$1.utf8border = (buf, max) => {
+
+ max = max || buf.length;
+ if (max > buf.length) { max = buf.length; }
+
+ // go back from last position, until start of sequence found
+ let pos = max - 1;
+ while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; }
+
+ // Very small and broken sequence,
+ // return max, because we should return something anyway.
+ if (pos < 0) { return max; }
+
+ // If we came to start of buffer - that means buffer is too small,
+ // return max too.
+ if (pos === 0) { return max; }
+
+ return (pos + _utf8len[buf[pos]] > max) ? pos : max;
+};
+
+// (C) 1995-2013 Jean-loup Gailly and Mark Adler
+// (C) 2014-2017 Vitaly Puzrin and Andrey Tupitsin
+//
+// This software is provided 'as-is', without any express or implied
+// warranty. In no event will the authors be held liable for any damages
+// arising from the use of this software.
+//
+// Permission is granted to anyone to use this software for any purpose,
+// including commercial applications, and to alter it and redistribute it
+// freely, subject to the following restrictions:
+//
+// 1. The origin of this software must not be misrepresented; you must not
+// claim that you wrote the original software. If you use this software
+// in a product, an acknowledgment in the product documentation would be
+// appreciated but is not required.
+// 2. Altered source versions must be plainly marked as such, and must not be
+// misrepresented as being the original software.
+// 3. This notice may not be removed or altered from any source distribution.
+
+function ZStream$1() {
+ /* next input byte */
+ this.input = null; // JS specific, because we have no pointers
+ this.next_in = 0;
+ /* number of bytes available at input */
+ this.avail_in = 0;
+ /* total number of input bytes read so far */
+ this.total_in = 0;
+ /* next output byte should be put there */
+ this.output = null; // JS specific, because we have no pointers
+ this.next_out = 0;
+ /* remaining free space at output */
+ this.avail_out = 0;
+ /* total number of bytes output so far */
+ this.total_out = 0;
+ /* last error message, NULL if no error */
+ this.msg = ''/*Z_NULL*/;
+ /* not visible by applications */
+ this.state = null;
+ /* best guess about the data type: binary or text */
+ this.data_type = 2/*Z_UNKNOWN*/;
+ /* adler32 value of the uncompressed data */
+ this.adler = 0;
+}
+
+var zstream = ZStream$1;
+
+const zlib_deflate = deflate$3;
+const utils = common;
+const strings = strings$1;
+const msg = messages;
+const ZStream = zstream;
+
+const toString = Object.prototype.toString;
+
+/* Public constants ==========================================================*/
+/* ===========================================================================*/
+
+const {
+ Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FULL_FLUSH, Z_FINISH,
+ Z_OK, Z_STREAM_END,
+ Z_DEFAULT_COMPRESSION,
+ Z_DEFAULT_STRATEGY,
+ Z_DEFLATED
+} = constants$2;
+
+/* ===========================================================================*/
+
+
+/**
+ * class Deflate
+ *
+ * Generic JS-style wrapper for zlib calls. If you don't need
+ * streaming behaviour - use more simple functions: [[deflate]],
+ * [[deflateRaw]] and [[gzip]].
+ **/
+
+/* internal
+ * Deflate.chunks -> Array
+ *
+ * Chunks of output data, if [[Deflate#onData]] not overridden.
+ **/
+
+/**
+ * Deflate.result -> Uint8Array
+ *
+ * Compressed result, generated by default [[Deflate#onData]]
+ * and [[Deflate#onEnd]] handlers. Filled after you push last chunk
+ * (call [[Deflate#push]] with `Z_FINISH` / `true` param).
+ **/
+
+/**
+ * Deflate.err -> Number
+ *
+ * Error code after deflate finished. 0 (Z_OK) on success.
+ * You will not need it in real life, because deflate errors
+ * are possible only on wrong options or bad `onData` / `onEnd`
+ * custom handlers.
+ **/
+
+/**
+ * Deflate.msg -> String
+ *
+ * Error message, if [[Deflate.err]] != 0
+ **/
+
+
+/**
+ * new Deflate(options)
+ * - options (Object): zlib deflate options.
+ *
+ * Creates new deflator instance with specified params. Throws exception
+ * on bad params. Supported options:
+ *
+ * - `level`
+ * - `windowBits`
+ * - `memLevel`
+ * - `strategy`
+ * - `dictionary`
+ *
+ * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
+ * for more information on these.
+ *
+ * Additional options, for internal needs:
+ *
+ * - `chunkSize` - size of generated data chunks (16K by default)
+ * - `raw` (Boolean) - do raw deflate
+ * - `gzip` (Boolean) - create gzip wrapper
+ * - `header` (Object) - custom header for gzip
+ * - `text` (Boolean) - true if compressed data believed to be text
+ * - `time` (Number) - modification time, unix timestamp
+ * - `os` (Number) - operation system code
+ * - `extra` (Array) - array of bytes with extra data (max 65536)
+ * - `name` (String) - file name (binary string)
+ * - `comment` (String) - comment (binary string)
+ * - `hcrc` (Boolean) - true if header crc should be added
+ *
+ * ##### Example:
+ *
+ * ```javascript
+ * const pako = require('pako')
+ * , chunk1 = new Uint8Array([1,2,3,4,5,6,7,8,9])
+ * , chunk2 = new Uint8Array([10,11,12,13,14,15,16,17,18,19]);
+ *
+ * const deflate = new pako.Deflate({ level: 3});
+ *
+ * deflate.push(chunk1, false);
+ * deflate.push(chunk2, true); // true -> last chunk
+ *
+ * if (deflate.err) { throw new Error(deflate.err); }
+ *
+ * console.log(deflate.result);
+ * ```
+ **/
+function Deflate$1(options) {
+ this.options = utils.assign({
+ level: Z_DEFAULT_COMPRESSION,
+ method: Z_DEFLATED,
+ chunkSize: 16384,
+ windowBits: 15,
+ memLevel: 8,
+ strategy: Z_DEFAULT_STRATEGY
+ }, options || {});
+
+ let opt = this.options;
+
+ if (opt.raw && (opt.windowBits > 0)) {
+ opt.windowBits = -opt.windowBits;
+ }
+
+ else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) {
+ opt.windowBits += 16;
+ }
+
+ this.err = 0; // error code, if happens (0 = Z_OK)
+ this.msg = ''; // error message
+ this.ended = false; // used to avoid multiple onEnd() calls
+ this.chunks = []; // chunks of compressed data
+
+ this.strm = new ZStream();
+ this.strm.avail_out = 0;
+
+ let status = zlib_deflate.deflateInit2(
+ this.strm,
+ opt.level,
+ opt.method,
+ opt.windowBits,
+ opt.memLevel,
+ opt.strategy
+ );
+
+ if (status !== Z_OK) {
+ throw new Error(msg[status]);
+ }
+
+ if (opt.header) {
+ zlib_deflate.deflateSetHeader(this.strm, opt.header);
+ }
+
+ if (opt.dictionary) {
+ let dict;
+ // Convert data if needed
+ if (typeof opt.dictionary === 'string') {
+ // If we need to compress text, change encoding to utf8.
+ dict = strings.string2buf(opt.dictionary);
+ } else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') {
+ dict = new Uint8Array(opt.dictionary);
+ } else {
+ dict = opt.dictionary;
+ }
+
+ status = zlib_deflate.deflateSetDictionary(this.strm, dict);
+
+ if (status !== Z_OK) {
+ throw new Error(msg[status]);
+ }
+
+ this._dict_set = true;
+ }
+}
+
+/**
+ * Deflate#push(data[, flush_mode]) -> Boolean
+ * - data (Uint8Array|ArrayBuffer|String): input data. Strings will be
+ * converted to utf8 byte sequence.
+ * - flush_mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.
+ * See constants. Skipped or `false` means Z_NO_FLUSH, `true` means Z_FINISH.
+ *
+ * Sends input data to deflate pipe, generating [[Deflate#onData]] calls with
+ * new compressed chunks. Returns `true` on success. The last data block must
+ * have `flush_mode` Z_FINISH (or `true`). That will flush internal pending
+ * buffers and call [[Deflate#onEnd]].
+ *
+ * On fail call [[Deflate#onEnd]] with error code and return false.
+ *
+ * ##### Example
+ *
+ * ```javascript
+ * push(chunk, false); // push one of data chunks
+ * ...
+ * push(chunk, true); // push last chunk
+ * ```
+ **/
+Deflate$1.prototype.push = function (data, flush_mode) {
+ const strm = this.strm;
+ const chunkSize = this.options.chunkSize;
+ let status, _flush_mode;
+
+ if (this.ended) { return false; }
+
+ if (flush_mode === ~~flush_mode) _flush_mode = flush_mode;
+ else _flush_mode = flush_mode === true ? Z_FINISH : Z_NO_FLUSH;
+
+ // Convert data if needed
+ if (typeof data === 'string') {
+ // If we need to compress text, change encoding to utf8.
+ strm.input = strings.string2buf(data);
+ } else if (toString.call(data) === '[object ArrayBuffer]') {
+ strm.input = new Uint8Array(data);
+ } else {
+ strm.input = data;
+ }
+
+ strm.next_in = 0;
+ strm.avail_in = strm.input.length;
+
+ for (;;) {
+ if (strm.avail_out === 0) {
+ strm.output = new Uint8Array(chunkSize);
+ strm.next_out = 0;
+ strm.avail_out = chunkSize;
+ }
+
+ // Make sure avail_out > 6 to avoid repeating markers
+ if ((_flush_mode === Z_SYNC_FLUSH || _flush_mode === Z_FULL_FLUSH) && strm.avail_out <= 6) {
+ this.onData(strm.output.subarray(0, strm.next_out));
+ strm.avail_out = 0;
+ continue;
+ }
+
+ status = zlib_deflate.deflate(strm, _flush_mode);
+
+ // Ended => flush and finish
+ if (status === Z_STREAM_END) {
+ if (strm.next_out > 0) {
+ this.onData(strm.output.subarray(0, strm.next_out));
+ }
+ status = zlib_deflate.deflateEnd(this.strm);
+ this.onEnd(status);
+ this.ended = true;
+ return status === Z_OK;
+ }
+
+ // Flush if out buffer full
+ if (strm.avail_out === 0) {
+ this.onData(strm.output);
+ continue;
+ }
+
+ // Flush if requested and has data
+ if (_flush_mode > 0 && strm.next_out > 0) {
+ this.onData(strm.output.subarray(0, strm.next_out));
+ strm.avail_out = 0;
+ continue;
+ }
+
+ if (strm.avail_in === 0) break;
+ }
+
+ return true;
+};
+
+
+/**
+ * Deflate#onData(chunk) -> Void
+ * - chunk (Uint8Array): output data.
+ *
+ * By default, stores data blocks in `chunks[]` property and glue
+ * those in `onEnd`. Override this handler, if you need another behaviour.
+ **/
+Deflate$1.prototype.onData = function (chunk) {
+ this.chunks.push(chunk);
+};
+
+
+/**
+ * Deflate#onEnd(status) -> Void
+ * - status (Number): deflate status. 0 (Z_OK) on success,
+ * other if not.
+ *
+ * Called once after you tell deflate that the input stream is
+ * complete (Z_FINISH). By default - join collected chunks,
+ * free memory and fill `results` / `err` properties.
+ **/
+Deflate$1.prototype.onEnd = function (status) {
+ // On success - join
+ if (status === Z_OK) {
+ this.result = utils.flattenChunks(this.chunks);
+ }
+ this.chunks = [];
+ this.err = status;
+ this.msg = this.strm.msg;
+};
+
+
+/**
+ * deflate(data[, options]) -> Uint8Array
+ * - data (Uint8Array|ArrayBuffer|String): input data to compress.
+ * - options (Object): zlib deflate options.
+ *
+ * Compress `data` with deflate algorithm and `options`.
+ *
+ * Supported options are:
+ *
+ * - level
+ * - windowBits
+ * - memLevel
+ * - strategy
+ * - dictionary
+ *
+ * [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
+ * for more information on these.
+ *
+ * Sugar (options):
+ *
+ * - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify
+ * negative windowBits implicitly.
+ *
+ * ##### Example:
+ *
+ * ```javascript
+ * const pako = require('pako')
+ * const data = new Uint8Array([1,2,3,4,5,6,7,8,9]);
+ *
+ * console.log(pako.deflate(data));
+ * ```
+ **/
+function deflate$1(input, options) {
+ const deflator = new Deflate$1(options);
+
+ deflator.push(input, true);
+
+ // That will never happens, if you don't cheat with options :)
+ if (deflator.err) { throw deflator.msg || msg[deflator.err]; }
+
+ return deflator.result;
+}
+
+
+var Deflate_1 = Deflate$1;
+var deflate_2 = deflate$1;
+var constants$1 = constants$2;
+
+// @ts-expect-error no types here
+// NOTE: We have to do this weird workaround because by default,
+// pako does not treeshake when importing from 'pako'.
+// In order to get proper tree shaking, we have to import from 'pako/lib/deflate.js',
+// Which is not great but works
+// types come from @types/pako, so we can safely use them by casting
+const Deflate = Deflate_1;
+const deflate = deflate_2;
+const constants = constants$1;
+/**
+ * A stateful compressor that can be used to batch compress events.
+ */
+class Compressor {
+ constructor() {
+ this._init();
+ }
+ /**
+ * Clear the compressor buffer.
+ */
+ clear() {
+ this._init();
+ }
+ /**
+ * Add an event to the compressor buffer.
+ */
+ addEvent(data) {
+ if (!data) {
+ throw new Error('Adding invalid event');
+ }
+ // If the event is not the first event, we need to prefix it with a `,` so
+ // that we end up with a list of events
+ const prefix = this._hasEvents ? ',' : '';
+ // TODO: We may want Z_SYNC_FLUSH or Z_FULL_FLUSH (not sure the difference)
+ // Using NO_FLUSH here for now as we can create many attachments that our
+ // web UI will get API rate limited.
+ this.deflate.push(prefix + data, constants.Z_SYNC_FLUSH);
+ this._hasEvents = true;
+ }
+ /**
+ * Finish compression of the current buffer.
+ */
+ finish() {
+ // We should always have a list, it can be empty
+ this.deflate.push(']', constants.Z_FINISH);
+ if (this.deflate.err) {
+ throw this.deflate.err;
+ }
+ // Copy result before we create a new deflator and return the compressed
+ // result
+ const result = this.deflate.result;
+ this._init();
+ return result;
+ }
+ /**
+ * Re-initialize the compressor buffer.
+ */
+ _init() {
+ this._hasEvents = false;
+ this.deflate = new Deflate();
+ // Fake an array by adding a `[`
+ this.deflate.push('[', constants.Z_NO_FLUSH);
+ }
+}
+/**
+ * Compress a string.
+ */
+function compress(data) {
+ return deflate(data);
+}
+
+/* eslint-disable @typescript-eslint/no-unsafe-member-access */
+const compressor = new Compressor();
+const handlers = {
+ clear: () => {
+ compressor.clear();
+ },
+ addEvent: (data) => {
+ return compressor.addEvent(data);
+ },
+ finish: () => {
+ return compressor.finish();
+ },
+ compress: (data) => {
+ return compress(data);
+ },
+};
+/**
+ * Handler for worker messages.
+ */
+function handleMessage(e) {
+ const method = e.data.method;
+ const id = e.data.id;
+ const data = e.data.arg;
+ // @ts-expect-error this syntax is actually fine
+ if (method in handlers && typeof handlers[method] === 'function') {
+ try {
+ // @ts-expect-error this syntax is actually fine
+ const response = handlers[method](data);
+ postMessage({
+ id,
+ method,
+ success: true,
+ response,
+ });
+ }
+ catch (err) {
+ postMessage({
+ id,
+ method,
+ success: false,
+ response: err.message,
+ });
+ // eslint-disable-next-line no-console
+ console.error(err);
+ }
+ }
+}
+
+addEventListener('message', handleMessage);
+// Immediately send a message when worker loads, so we know the worker is ready
+postMessage({
+ id: undefined,
+ method: 'init',
+ success: true,
+ response: undefined,
+});
diff --git a/packages/replay-worker/examples/worker.min.js b/packages/replay-worker/examples/worker.min.js
new file mode 100644
index 000000000000..29d33d15d3bb
--- /dev/null
+++ b/packages/replay-worker/examples/worker.min.js
@@ -0,0 +1,2 @@
+/*! Sentry Replay Worker 7.76.0 (de39290bc) | https://github.com/getsentry/sentry-javascript */
+var t={},e={};function a(t){let e=t.length;for(;--e>=0;)t[e]=0}const s=new Uint8Array([0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0]),r=new Uint8Array([0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13]),n=new Uint8Array([0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7]),i=new Uint8Array([16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15]),_=new Array(576);a(_);const l=new Array(60);a(l);const h=new Array(512);a(h);const o=new Array(256);a(o);const d=new Array(29);a(d);const u=new Array(30);function f(t,e,a,s,r){this.static_tree=t,this.extra_bits=e,this.extra_base=a,this.elems=s,this.max_length=r,this.has_stree=t&&t.length}let c,p,g;function w(t,e){this.dyn_tree=t,this.max_code=0,this.stat_desc=e}a(u);const m=t=>t<256?h[t]:h[256+(t>>>7)],b=(t,e)=>{t.pending_buf[t.pending++]=255&e,t.pending_buf[t.pending++]=e>>>8&255},y=(t,e,a)=>{t.bi_valid>16-a?(t.bi_buf|=e<>16-t.bi_valid,t.bi_valid+=a-16):(t.bi_buf|=e<{y(t,a[2*e],a[2*e+1])},z=(t,e)=>{let a=0;do{a|=1&t,t>>>=1,a<<=1}while(--e>0);return a>>>1},k=(t,e,a)=>{const s=new Array(16);let r,n,i=0;for(r=1;r<=15;r++)i=i+a[r-1]<<1,s[r]=i;for(n=0;n<=e;n++){let e=t[2*n+1];0!==e&&(t[2*n]=z(s[e]++,e))}},x=t=>{let e;for(e=0;e<286;e++)t.dyn_ltree[2*e]=0;for(e=0;e<30;e++)t.dyn_dtree[2*e]=0;for(e=0;e<19;e++)t.bl_tree[2*e]=0;t.dyn_ltree[512]=1,t.opt_len=t.static_len=0,t.sym_next=t.matches=0},A=t=>{t.bi_valid>8?b(t,t.bi_buf):t.bi_valid>0&&(t.pending_buf[t.pending++]=t.bi_buf),t.bi_buf=0,t.bi_valid=0},E=(t,e,a,s)=>{const r=2*e,n=2*a;return t[r]{const s=t.heap[a];let r=a<<1;for(;r<=t.heap_len&&(r{let n,i,_,l,h=0;if(0!==t.sym_next)do{n=255&t.pending_buf[t.sym_buf+h++],n+=(255&t.pending_buf[t.sym_buf+h++])<<8,i=t.pending_buf[t.sym_buf+h++],0===n?v(t,i,e):(_=o[i],v(t,_+256+1,e),l=s[_],0!==l&&(i-=d[_],y(t,i,l)),n--,_=m(n),v(t,_,a),l=r[_],0!==l&&(n-=u[_],y(t,n,l)))}while(h{const a=e.dyn_tree,s=e.stat_desc.static_tree,r=e.stat_desc.has_stree,n=e.stat_desc.elems;let i,_,l,h=-1;for(t.heap_len=0,t.heap_max=573,i=0;i>1;i>=1;i--)Z(t,a,i);l=n;do{i=t.heap[1],t.heap[1]=t.heap[t.heap_len--],Z(t,a,1),_=t.heap[1],t.heap[--t.heap_max]=i,t.heap[--t.heap_max]=_,a[2*l]=a[2*i]+a[2*_],t.depth[l]=(t.depth[i]>=t.depth[_]?t.depth[i]:t.depth[_])+1,a[2*i+1]=a[2*_+1]=l,t.heap[1]=l++,Z(t,a,1)}while(t.heap_len>=2);t.heap[--t.heap_max]=t.heap[1],((t,e)=>{const a=e.dyn_tree,s=e.max_code,r=e.stat_desc.static_tree,n=e.stat_desc.has_stree,i=e.stat_desc.extra_bits,_=e.stat_desc.extra_base,l=e.stat_desc.max_length;let h,o,d,u,f,c,p=0;for(u=0;u<=15;u++)t.bl_count[u]=0;for(a[2*t.heap[t.heap_max]+1]=0,h=t.heap_max+1;h<573;h++)o=t.heap[h],u=a[2*a[2*o+1]+1]+1,u>l&&(u=l,p++),a[2*o+1]=u,o>s||(t.bl_count[u]++,f=0,o>=_&&(f=i[o-_]),c=a[2*o],t.opt_len+=c*(u+f),n&&(t.static_len+=c*(r[2*o+1]+f)));if(0!==p){do{for(u=l-1;0===t.bl_count[u];)u--;t.bl_count[u]--,t.bl_count[u+1]+=2,t.bl_count[l]--,p-=2}while(p>0);for(u=l;0!==u;u--)for(o=t.bl_count[u];0!==o;)d=t.heap[--h],d>s||(a[2*d+1]!==u&&(t.opt_len+=(u-a[2*d+1])*a[2*d],a[2*d+1]=u),o--)}})(t,e),k(a,h,t.bl_count)},R=(t,e,a)=>{let s,r,n=-1,i=e[1],_=0,l=7,h=4;for(0===i&&(l=138,h=3),e[2*(a+1)+1]=65535,s=0;s<=a;s++)r=i,i=e[2*(s+1)+1],++_{let s,r,n=-1,i=e[1],_=0,l=7,h=4;for(0===i&&(l=138,h=3),s=0;s<=a;s++)if(r=i,i=e[2*(s+1)+1],!(++_{y(t,0+(s?1:0),3),A(t),b(t,a),b(t,~a),a&&t.pending_buf.set(t.window.subarray(e,e+a),t.pending),t.pending+=a};e._tr_init=t=>{T||((()=>{let t,e,a,i,w;const m=new Array(16);for(a=0,i=0;i<28;i++)for(d[i]=a,t=0;t<1<>=7;i<30;i++)for(u[i]=w<<7,t=0;t<1<{let r,n,h=0;t.level>0?(2===t.strm.data_type&&(t.strm.data_type=(t=>{let e,a=4093624447;for(e=0;e<=31;e++,a>>>=1)if(1&a&&0!==t.dyn_ltree[2*e])return 0;if(0!==t.dyn_ltree[18]||0!==t.dyn_ltree[20]||0!==t.dyn_ltree[26])return 1;for(e=32;e<256;e++)if(0!==t.dyn_ltree[2*e])return 1;return 0})(t)),U(t,t.l_desc),U(t,t.d_desc),h=(t=>{let e;for(R(t,t.dyn_ltree,t.l_desc.max_code),R(t,t.dyn_dtree,t.d_desc.max_code),U(t,t.bl_desc),e=18;e>=3&&0===t.bl_tree[2*i[e]+1];e--);return t.opt_len+=3*(e+1)+5+5+4,e})(t),r=t.opt_len+3+7>>>3,n=t.static_len+3+7>>>3,n<=r&&(r=n)):r=n=a+5,a+4<=r&&-1!==e?F(t,e,a,s):4===t.strategy||n===r?(y(t,2+(s?1:0),3),S(t,_,l)):(y(t,4+(s?1:0),3),((t,e,a,s)=>{let r;for(y(t,e-257,5),y(t,a-1,5),y(t,s-4,4),r=0;r(t.pending_buf[t.sym_buf+t.sym_next++]=e,t.pending_buf[t.sym_buf+t.sym_next++]=e>>8,t.pending_buf[t.sym_buf+t.sym_next++]=a,0===e?t.dyn_ltree[2*a]++:(t.matches++,e--,t.dyn_ltree[2*(o[a]+256+1)]++,t.dyn_dtree[2*m(e)]++),t.sym_next===t.sym_end),e._tr_align=t=>{y(t,2,3),v(t,256,_),(t=>{16===t.bi_valid?(b(t,t.bi_buf),t.bi_buf=0,t.bi_valid=0):t.bi_valid>=8&&(t.pending_buf[t.pending++]=255&t.bi_buf,t.bi_buf>>=8,t.bi_valid-=8)})(t)};var O=(t,e,a,s)=>{let r=65535&t|0,n=t>>>16&65535|0,i=0;for(;0!==a;){i=a>2e3?2e3:a,a-=i;do{r=r+e[s++]|0,n=n+r|0}while(--i);r%=65521,n%=65521}return r|n<<16|0};const D=new Uint32Array((()=>{let t,e=[];for(var a=0;a<256;a++){t=a;for(var s=0;s<8;s++)t=1&t?3988292384^t>>>1:t>>>1;e[a]=t}return e})());var N=(t,e,a,s)=>{const r=D,n=s+a;t^=-1;for(let a=s;a>>8^r[255&(t^e[a])];return-1^t},C={2:"need dictionary",1:"stream end",0:"","-1":"file error","-2":"stream error","-3":"data error","-4":"insufficient memory","-5":"buffer error","-6":"incompatible version"},I={Z_NO_FLUSH:0,Z_PARTIAL_FLUSH:1,Z_SYNC_FLUSH:2,Z_FULL_FLUSH:3,Z_FINISH:4,Z_BLOCK:5,Z_TREES:6,Z_OK:0,Z_STREAM_END:1,Z_NEED_DICT:2,Z_ERRNO:-1,Z_STREAM_ERROR:-2,Z_DATA_ERROR:-3,Z_MEM_ERROR:-4,Z_BUF_ERROR:-5,Z_NO_COMPRESSION:0,Z_BEST_SPEED:1,Z_BEST_COMPRESSION:9,Z_DEFAULT_COMPRESSION:-1,Z_FILTERED:1,Z_HUFFMAN_ONLY:2,Z_RLE:3,Z_FIXED:4,Z_DEFAULT_STRATEGY:0,Z_BINARY:0,Z_TEXT:1,Z_UNKNOWN:2,Z_DEFLATED:8};const{_tr_init:H,_tr_stored_block:B,_tr_flush_block:M,_tr_tally:P,_tr_align:Y}=e,K=O,j=N,G=C,{Z_NO_FLUSH:X,Z_PARTIAL_FLUSH:W,Z_FULL_FLUSH:q,Z_FINISH:J,Z_BLOCK:Q,Z_OK:V,Z_STREAM_END:$,Z_STREAM_ERROR:tt,Z_DATA_ERROR:et,Z_BUF_ERROR:at,Z_DEFAULT_COMPRESSION:st,Z_FILTERED:rt,Z_HUFFMAN_ONLY:nt,Z_RLE:it,Z_FIXED:_t,Z_DEFAULT_STRATEGY:lt,Z_UNKNOWN:ht,Z_DEFLATED:ot}=I,dt=(t,e)=>(t.msg=G[e],e),ut=t=>2*t-(t>4?9:0),ft=t=>{let e=t.length;for(;--e>=0;)t[e]=0},ct=t=>{let e,a,s,r=t.w_size;e=t.hash_size,s=e;do{a=t.head[--s],t.head[s]=a>=r?a-r:0}while(--e);e=r,s=e;do{a=t.prev[--s],t.prev[s]=a>=r?a-r:0}while(--e)};let pt=(t,e,a)=>(e<{const e=t.state;let a=e.pending;a>t.avail_out&&(a=t.avail_out),0!==a&&(t.output.set(e.pending_buf.subarray(e.pending_out,e.pending_out+a),t.next_out),t.next_out+=a,e.pending_out+=a,t.total_out+=a,t.avail_out-=a,e.pending-=a,0===e.pending&&(e.pending_out=0))},wt=(t,e)=>{M(t,t.block_start>=0?t.block_start:-1,t.strstart-t.block_start,e),t.block_start=t.strstart,gt(t.strm)},mt=(t,e)=>{t.pending_buf[t.pending++]=e},bt=(t,e)=>{t.pending_buf[t.pending++]=e>>>8&255,t.pending_buf[t.pending++]=255&e},yt=(t,e,a,s)=>{let r=t.avail_in;return r>s&&(r=s),0===r?0:(t.avail_in-=r,e.set(t.input.subarray(t.next_in,t.next_in+r),a),1===t.state.wrap?t.adler=K(t.adler,e,r,a):2===t.state.wrap&&(t.adler=j(t.adler,e,r,a)),t.next_in+=r,t.total_in+=r,r)},vt=(t,e)=>{let a,s,r=t.max_chain_length,n=t.strstart,i=t.prev_length,_=t.nice_match;const l=t.strstart>t.w_size-262?t.strstart-(t.w_size-262):0,h=t.window,o=t.w_mask,d=t.prev,u=t.strstart+258;let f=h[n+i-1],c=h[n+i];t.prev_length>=t.good_match&&(r>>=2),_>t.lookahead&&(_=t.lookahead);do{if(a=e,h[a+i]===c&&h[a+i-1]===f&&h[a]===h[n]&&h[++a]===h[n+1]){n+=2,a++;do{}while(h[++n]===h[++a]&&h[++n]===h[++a]&&h[++n]===h[++a]&&h[++n]===h[++a]&&h[++n]===h[++a]&&h[++n]===h[++a]&&h[++n]===h[++a]&&h[++n]===h[++a]&&ni){if(t.match_start=e,i=s,s>=_)break;f=h[n+i-1],c=h[n+i]}}}while((e=d[e&o])>l&&0!=--r);return i<=t.lookahead?i:t.lookahead},zt=t=>{const e=t.w_size;let a,s,r;do{if(s=t.window_size-t.lookahead-t.strstart,t.strstart>=e+(e-262)&&(t.window.set(t.window.subarray(e,e+e-s),0),t.match_start-=e,t.strstart-=e,t.block_start-=e,t.insert>t.strstart&&(t.insert=t.strstart),ct(t),s+=e),0===t.strm.avail_in)break;if(a=yt(t.strm,t.window,t.strstart+t.lookahead,s),t.lookahead+=a,t.lookahead+t.insert>=3)for(r=t.strstart-t.insert,t.ins_h=t.window[r],t.ins_h=pt(t,t.ins_h,t.window[r+1]);t.insert&&(t.ins_h=pt(t,t.ins_h,t.window[r+3-1]),t.prev[r&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=r,r++,t.insert--,!(t.lookahead+t.insert<3)););}while(t.lookahead<262&&0!==t.strm.avail_in)},kt=(t,e)=>{let a,s,r,n=t.pending_buf_size-5>t.w_size?t.w_size:t.pending_buf_size-5,i=0,_=t.strm.avail_in;do{if(a=65535,r=t.bi_valid+42>>3,t.strm.avail_outs+t.strm.avail_in&&(a=s+t.strm.avail_in),a>r&&(a=r),a>8,t.pending_buf[t.pending-2]=~a,t.pending_buf[t.pending-1]=~a>>8,gt(t.strm),s&&(s>a&&(s=a),t.strm.output.set(t.window.subarray(t.block_start,t.block_start+s),t.strm.next_out),t.strm.next_out+=s,t.strm.avail_out-=s,t.strm.total_out+=s,t.block_start+=s,a-=s),a&&(yt(t.strm,t.strm.output,t.strm.next_out,a),t.strm.next_out+=a,t.strm.avail_out-=a,t.strm.total_out+=a)}while(0===i);return _-=t.strm.avail_in,_&&(_>=t.w_size?(t.matches=2,t.window.set(t.strm.input.subarray(t.strm.next_in-t.w_size,t.strm.next_in),0),t.strstart=t.w_size,t.insert=t.strstart):(t.window_size-t.strstart<=_&&(t.strstart-=t.w_size,t.window.set(t.window.subarray(t.w_size,t.w_size+t.strstart),0),t.matches<2&&t.matches++,t.insert>t.strstart&&(t.insert=t.strstart)),t.window.set(t.strm.input.subarray(t.strm.next_in-_,t.strm.next_in),t.strstart),t.strstart+=_,t.insert+=_>t.w_size-t.insert?t.w_size-t.insert:_),t.block_start=t.strstart),t.high_waterr&&t.block_start>=t.w_size&&(t.block_start-=t.w_size,t.strstart-=t.w_size,t.window.set(t.window.subarray(t.w_size,t.w_size+t.strstart),0),t.matches<2&&t.matches++,r+=t.w_size,t.insert>t.strstart&&(t.insert=t.strstart)),r>t.strm.avail_in&&(r=t.strm.avail_in),r&&(yt(t.strm,t.window,t.strstart,r),t.strstart+=r,t.insert+=r>t.w_size-t.insert?t.w_size-t.insert:r),t.high_water>3,r=t.pending_buf_size-r>65535?65535:t.pending_buf_size-r,n=r>t.w_size?t.w_size:r,s=t.strstart-t.block_start,(s>=n||(s||e===J)&&e!==X&&0===t.strm.avail_in&&s<=r)&&(a=s>r?r:s,i=e===J&&0===t.strm.avail_in&&a===s?1:0,B(t,t.block_start,a,i),t.block_start+=a,gt(t.strm)),i?3:1)},xt=(t,e)=>{let a,s;for(;;){if(t.lookahead<262){if(zt(t),t.lookahead<262&&e===X)return 1;if(0===t.lookahead)break}if(a=0,t.lookahead>=3&&(t.ins_h=pt(t,t.ins_h,t.window[t.strstart+3-1]),a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),0!==a&&t.strstart-a<=t.w_size-262&&(t.match_length=vt(t,a)),t.match_length>=3)if(s=P(t,t.strstart-t.match_start,t.match_length-3),t.lookahead-=t.match_length,t.match_length<=t.max_lazy_match&&t.lookahead>=3){t.match_length--;do{t.strstart++,t.ins_h=pt(t,t.ins_h,t.window[t.strstart+3-1]),a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart}while(0!=--t.match_length);t.strstart++}else t.strstart+=t.match_length,t.match_length=0,t.ins_h=t.window[t.strstart],t.ins_h=pt(t,t.ins_h,t.window[t.strstart+1]);else s=P(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++;if(s&&(wt(t,!1),0===t.strm.avail_out))return 1}return t.insert=t.strstart<2?t.strstart:2,e===J?(wt(t,!0),0===t.strm.avail_out?3:4):t.sym_next&&(wt(t,!1),0===t.strm.avail_out)?1:2},At=(t,e)=>{let a,s,r;for(;;){if(t.lookahead<262){if(zt(t),t.lookahead<262&&e===X)return 1;if(0===t.lookahead)break}if(a=0,t.lookahead>=3&&(t.ins_h=pt(t,t.ins_h,t.window[t.strstart+3-1]),a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart),t.prev_length=t.match_length,t.prev_match=t.match_start,t.match_length=2,0!==a&&t.prev_length4096)&&(t.match_length=2)),t.prev_length>=3&&t.match_length<=t.prev_length){r=t.strstart+t.lookahead-3,s=P(t,t.strstart-1-t.prev_match,t.prev_length-3),t.lookahead-=t.prev_length-1,t.prev_length-=2;do{++t.strstart<=r&&(t.ins_h=pt(t,t.ins_h,t.window[t.strstart+3-1]),a=t.prev[t.strstart&t.w_mask]=t.head[t.ins_h],t.head[t.ins_h]=t.strstart)}while(0!=--t.prev_length);if(t.match_available=0,t.match_length=2,t.strstart++,s&&(wt(t,!1),0===t.strm.avail_out))return 1}else if(t.match_available){if(s=P(t,0,t.window[t.strstart-1]),s&&wt(t,!1),t.strstart++,t.lookahead--,0===t.strm.avail_out)return 1}else t.match_available=1,t.strstart++,t.lookahead--}return t.match_available&&(s=P(t,0,t.window[t.strstart-1]),t.match_available=0),t.insert=t.strstart<2?t.strstart:2,e===J?(wt(t,!0),0===t.strm.avail_out?3:4):t.sym_next&&(wt(t,!1),0===t.strm.avail_out)?1:2};function Et(t,e,a,s,r){this.good_length=t,this.max_lazy=e,this.nice_length=a,this.max_chain=s,this.func=r}const Zt=[new Et(0,0,0,0,kt),new Et(4,4,8,4,xt),new Et(4,5,16,8,xt),new Et(4,6,32,32,xt),new Et(4,4,16,16,At),new Et(8,16,32,32,At),new Et(8,16,128,128,At),new Et(8,32,128,256,At),new Et(32,128,258,1024,At),new Et(32,258,258,4096,At)];function St(){this.strm=null,this.status=0,this.pending_buf=null,this.pending_buf_size=0,this.pending_out=0,this.pending=0,this.wrap=0,this.gzhead=null,this.gzindex=0,this.method=ot,this.last_flush=-1,this.w_size=0,this.w_bits=0,this.w_mask=0,this.window=null,this.window_size=0,this.prev=null,this.head=null,this.ins_h=0,this.hash_size=0,this.hash_bits=0,this.hash_mask=0,this.hash_shift=0,this.block_start=0,this.match_length=0,this.prev_match=0,this.match_available=0,this.strstart=0,this.match_start=0,this.lookahead=0,this.prev_length=0,this.max_chain_length=0,this.max_lazy_match=0,this.level=0,this.strategy=0,this.good_match=0,this.nice_match=0,this.dyn_ltree=new Uint16Array(1146),this.dyn_dtree=new Uint16Array(122),this.bl_tree=new Uint16Array(78),ft(this.dyn_ltree),ft(this.dyn_dtree),ft(this.bl_tree),this.l_desc=null,this.d_desc=null,this.bl_desc=null,this.bl_count=new Uint16Array(16),this.heap=new Uint16Array(573),ft(this.heap),this.heap_len=0,this.heap_max=0,this.depth=new Uint16Array(573),ft(this.depth),this.sym_buf=0,this.lit_bufsize=0,this.sym_next=0,this.sym_end=0,this.opt_len=0,this.static_len=0,this.matches=0,this.insert=0,this.bi_buf=0,this.bi_valid=0}const Ut=t=>{if(!t)return 1;const e=t.state;return!e||e.strm!==t||42!==e.status&&57!==e.status&&69!==e.status&&73!==e.status&&91!==e.status&&103!==e.status&&113!==e.status&&666!==e.status?1:0},Rt=t=>{if(Ut(t))return dt(t,tt);t.total_in=t.total_out=0,t.data_type=ht;const e=t.state;return e.pending=0,e.pending_out=0,e.wrap<0&&(e.wrap=-e.wrap),e.status=2===e.wrap?57:e.wrap?42:113,t.adler=2===e.wrap?0:1,e.last_flush=-2,H(e),V},Lt=t=>{const e=Rt(t);var a;return e===V&&((a=t.state).window_size=2*a.w_size,ft(a.head),a.max_lazy_match=Zt[a.level].max_lazy,a.good_match=Zt[a.level].good_length,a.nice_match=Zt[a.level].nice_length,a.max_chain_length=Zt[a.level].max_chain,a.strstart=0,a.block_start=0,a.lookahead=0,a.insert=0,a.match_length=a.prev_length=2,a.match_available=0,a.ins_h=0),e},Tt=(t,e,a,s,r,n)=>{if(!t)return tt;let i=1;if(e===st&&(e=6),s<0?(i=0,s=-s):s>15&&(i=2,s-=16),r<1||r>9||a!==ot||s<8||s>15||e<0||e>9||n<0||n>_t||8===s&&1!==i)return dt(t,tt);8===s&&(s=9);const _=new St;return t.state=_,_.strm=t,_.status=42,_.wrap=i,_.gzhead=null,_.w_bits=s,_.w_size=1<<_.w_bits,_.w_mask=_.w_size-1,_.hash_bits=r+7,_.hash_size=1<<_.hash_bits,_.hash_mask=_.hash_size-1,_.hash_shift=~~((_.hash_bits+3-1)/3),_.window=new Uint8Array(2*_.w_size),_.head=new Uint16Array(_.hash_size),_.prev=new Uint16Array(_.w_size),_.lit_bufsize=1<Tt(t,e,ot,15,8,lt),t.deflateInit2=Tt,t.deflateReset=Lt,t.deflateResetKeep=Rt,t.deflateSetHeader=(t,e)=>Ut(t)||2!==t.state.wrap?tt:(t.state.gzhead=e,V),t.deflate=(t,e)=>{if(Ut(t)||e>Q||e<0)return t?dt(t,tt):tt;const a=t.state;if(!t.output||0!==t.avail_in&&!t.input||666===a.status&&e!==J)return dt(t,0===t.avail_out?at:tt);const s=a.last_flush;if(a.last_flush=e,0!==a.pending){if(gt(t),0===t.avail_out)return a.last_flush=-1,V}else if(0===t.avail_in&&ut(e)<=ut(s)&&e!==J)return dt(t,at);if(666===a.status&&0!==t.avail_in)return dt(t,at);if(42===a.status&&0===a.wrap&&(a.status=113),42===a.status){let e=ot+(a.w_bits-8<<4)<<8,s=-1;if(s=a.strategy>=nt||a.level<2?0:a.level<6?1:6===a.level?2:3,e|=s<<6,0!==a.strstart&&(e|=32),e+=31-e%31,bt(a,e),0!==a.strstart&&(bt(a,t.adler>>>16),bt(a,65535&t.adler)),t.adler=1,a.status=113,gt(t),0!==a.pending)return a.last_flush=-1,V}if(57===a.status)if(t.adler=0,mt(a,31),mt(a,139),mt(a,8),a.gzhead)mt(a,(a.gzhead.text?1:0)+(a.gzhead.hcrc?2:0)+(a.gzhead.extra?4:0)+(a.gzhead.name?8:0)+(a.gzhead.comment?16:0)),mt(a,255&a.gzhead.time),mt(a,a.gzhead.time>>8&255),mt(a,a.gzhead.time>>16&255),mt(a,a.gzhead.time>>24&255),mt(a,9===a.level?2:a.strategy>=nt||a.level<2?4:0),mt(a,255&a.gzhead.os),a.gzhead.extra&&a.gzhead.extra.length&&(mt(a,255&a.gzhead.extra.length),mt(a,a.gzhead.extra.length>>8&255)),a.gzhead.hcrc&&(t.adler=j(t.adler,a.pending_buf,a.pending,0)),a.gzindex=0,a.status=69;else if(mt(a,0),mt(a,0),mt(a,0),mt(a,0),mt(a,0),mt(a,9===a.level?2:a.strategy>=nt||a.level<2?4:0),mt(a,3),a.status=113,gt(t),0!==a.pending)return a.last_flush=-1,V;if(69===a.status){if(a.gzhead.extra){let e=a.pending,s=(65535&a.gzhead.extra.length)-a.gzindex;for(;a.pending+s>a.pending_buf_size;){let r=a.pending_buf_size-a.pending;if(a.pending_buf.set(a.gzhead.extra.subarray(a.gzindex,a.gzindex+r),a.pending),a.pending=a.pending_buf_size,a.gzhead.hcrc&&a.pending>e&&(t.adler=j(t.adler,a.pending_buf,a.pending-e,e)),a.gzindex+=r,gt(t),0!==a.pending)return a.last_flush=-1,V;e=0,s-=r}let r=new Uint8Array(a.gzhead.extra);a.pending_buf.set(r.subarray(a.gzindex,a.gzindex+s),a.pending),a.pending+=s,a.gzhead.hcrc&&a.pending>e&&(t.adler=j(t.adler,a.pending_buf,a.pending-e,e)),a.gzindex=0}a.status=73}if(73===a.status){if(a.gzhead.name){let e,s=a.pending;do{if(a.pending===a.pending_buf_size){if(a.gzhead.hcrc&&a.pending>s&&(t.adler=j(t.adler,a.pending_buf,a.pending-s,s)),gt(t),0!==a.pending)return a.last_flush=-1,V;s=0}e=a.gzindexs&&(t.adler=j(t.adler,a.pending_buf,a.pending-s,s)),a.gzindex=0}a.status=91}if(91===a.status){if(a.gzhead.comment){let e,s=a.pending;do{if(a.pending===a.pending_buf_size){if(a.gzhead.hcrc&&a.pending>s&&(t.adler=j(t.adler,a.pending_buf,a.pending-s,s)),gt(t),0!==a.pending)return a.last_flush=-1,V;s=0}e=a.gzindexs&&(t.adler=j(t.adler,a.pending_buf,a.pending-s,s))}a.status=103}if(103===a.status){if(a.gzhead.hcrc){if(a.pending+2>a.pending_buf_size&&(gt(t),0!==a.pending))return a.last_flush=-1,V;mt(a,255&t.adler),mt(a,t.adler>>8&255),t.adler=0}if(a.status=113,gt(t),0!==a.pending)return a.last_flush=-1,V}if(0!==t.avail_in||0!==a.lookahead||e!==X&&666!==a.status){let s=0===a.level?kt(a,e):a.strategy===nt?((t,e)=>{let a;for(;;){if(0===t.lookahead&&(zt(t),0===t.lookahead)){if(e===X)return 1;break}if(t.match_length=0,a=P(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++,a&&(wt(t,!1),0===t.strm.avail_out))return 1}return t.insert=0,e===J?(wt(t,!0),0===t.strm.avail_out?3:4):t.sym_next&&(wt(t,!1),0===t.strm.avail_out)?1:2})(a,e):a.strategy===it?((t,e)=>{let a,s,r,n;const i=t.window;for(;;){if(t.lookahead<=258){if(zt(t),t.lookahead<=258&&e===X)return 1;if(0===t.lookahead)break}if(t.match_length=0,t.lookahead>=3&&t.strstart>0&&(r=t.strstart-1,s=i[r],s===i[++r]&&s===i[++r]&&s===i[++r])){n=t.strstart+258;do{}while(s===i[++r]&&s===i[++r]&&s===i[++r]&&s===i[++r]&&s===i[++r]&&s===i[++r]&&s===i[++r]&&s===i[++r]&&rt.lookahead&&(t.match_length=t.lookahead)}if(t.match_length>=3?(a=P(t,1,t.match_length-3),t.lookahead-=t.match_length,t.strstart+=t.match_length,t.match_length=0):(a=P(t,0,t.window[t.strstart]),t.lookahead--,t.strstart++),a&&(wt(t,!1),0===t.strm.avail_out))return 1}return t.insert=0,e===J?(wt(t,!0),0===t.strm.avail_out?3:4):t.sym_next&&(wt(t,!1),0===t.strm.avail_out)?1:2})(a,e):Zt[a.level].func(a,e);if(3!==s&&4!==s||(a.status=666),1===s||3===s)return 0===t.avail_out&&(a.last_flush=-1),V;if(2===s&&(e===W?Y(a):e!==Q&&(B(a,0,0,!1),e===q&&(ft(a.head),0===a.lookahead&&(a.strstart=0,a.block_start=0,a.insert=0))),gt(t),0===t.avail_out))return a.last_flush=-1,V}return e!==J?V:a.wrap<=0?$:(2===a.wrap?(mt(a,255&t.adler),mt(a,t.adler>>8&255),mt(a,t.adler>>16&255),mt(a,t.adler>>24&255),mt(a,255&t.total_in),mt(a,t.total_in>>8&255),mt(a,t.total_in>>16&255),mt(a,t.total_in>>24&255)):(bt(a,t.adler>>>16),bt(a,65535&t.adler)),gt(t),a.wrap>0&&(a.wrap=-a.wrap),0!==a.pending?V:$)},t.deflateEnd=t=>{if(Ut(t))return tt;const e=t.state.status;return t.state=null,113===e?dt(t,et):V},t.deflateSetDictionary=(t,e)=>{let a=e.length;if(Ut(t))return tt;const s=t.state,r=s.wrap;if(2===r||1===r&&42!==s.status||s.lookahead)return tt;if(1===r&&(t.adler=K(t.adler,e,a,0)),s.wrap=0,a>=s.w_size){0===r&&(ft(s.head),s.strstart=0,s.block_start=0,s.insert=0);let t=new Uint8Array(s.w_size);t.set(e.subarray(a-s.w_size,a),0),e=t,a=s.w_size}const n=t.avail_in,i=t.next_in,_=t.input;for(t.avail_in=a,t.next_in=0,t.input=e,zt(s);s.lookahead>=3;){let t=s.strstart,e=s.lookahead-2;do{s.ins_h=pt(s,s.ins_h,s.window[t+3-1]),s.prev[t&s.w_mask]=s.head[s.ins_h],s.head[s.ins_h]=t,t++}while(--e);s.strstart=t,s.lookahead=2,zt(s)}return s.strstart+=s.lookahead,s.block_start=s.strstart,s.insert=s.lookahead,s.lookahead=0,s.match_length=s.prev_length=2,s.match_available=0,t.next_in=i,t.input=_,t.avail_in=n,s.wrap=r,V},t.deflateInfo="pako deflate (from Nodeca project)";var Ft={};const Ot=(t,e)=>Object.prototype.hasOwnProperty.call(t,e);Ft.assign=function(t){const e=Array.prototype.slice.call(arguments,1);for(;e.length;){const a=e.shift();if(a){if("object"!=typeof a)throw new TypeError(a+"must be non-object");for(const e in a)Ot(a,e)&&(t[e]=a[e])}}return t},Ft.flattenChunks=t=>{let e=0;for(let a=0,s=t.length;a=252?6:t>=248?5:t>=240?4:t>=224?3:t>=192?2:1;Ct[254]=Ct[254]=1,Dt.string2buf=t=>{if("function"==typeof TextEncoder&&TextEncoder.prototype.encode)return(new TextEncoder).encode(t);let e,a,s,r,n,i=t.length,_=0;for(r=0;r>>6,e[n++]=128|63&a):a<65536?(e[n++]=224|a>>>12,e[n++]=128|a>>>6&63,e[n++]=128|63&a):(e[n++]=240|a>>>18,e[n++]=128|a>>>12&63,e[n++]=128|a>>>6&63,e[n++]=128|63&a);return e};Dt.buf2string=(t,e)=>{const a=e||t.length;if("function"==typeof TextDecoder&&TextDecoder.prototype.decode)return(new TextDecoder).decode(t.subarray(0,e));let s,r;const n=new Array(2*a);for(r=0,s=0;s4)n[r++]=65533,s+=i-1;else{for(e&=2===i?31:3===i?15:7;i>1&&s1?n[r++]=65533:e<65536?n[r++]=e:(e-=65536,n[r++]=55296|e>>10&1023,n[r++]=56320|1023&e)}}return((t,e)=>{if(e<65534&&t.subarray&&Nt)return String.fromCharCode.apply(null,t.length===e?t:t.subarray(0,e));let a="";for(let s=0;s{(e=e||t.length)>t.length&&(e=t.length);let a=e-1;for(;a>=0&&128==(192&t[a]);)a--;return a<0||0===a?e:a+Ct[t[a]]>e?a:e};const It=t,Ht=Ft,Bt=Dt,Mt=C,Pt=function(){this.input=null,this.next_in=0,this.avail_in=0,this.total_in=0,this.output=null,this.next_out=0,this.avail_out=0,this.total_out=0,this.msg="",this.state=null,this.data_type=2,this.adler=0},Yt=Object.prototype.toString,{Z_NO_FLUSH:Kt,Z_SYNC_FLUSH:jt,Z_FULL_FLUSH:Gt,Z_FINISH:Xt,Z_OK:Wt,Z_STREAM_END:qt,Z_DEFAULT_COMPRESSION:Jt,Z_DEFAULT_STRATEGY:Qt,Z_DEFLATED:Vt}=I;function $t(t){this.options=Ht.assign({level:Jt,method:Vt,chunkSize:16384,windowBits:15,memLevel:8,strategy:Qt},t||{});let e=this.options;e.raw&&e.windowBits>0?e.windowBits=-e.windowBits:e.gzip&&e.windowBits>0&&e.windowBits<16&&(e.windowBits+=16),this.err=0,this.msg="",this.ended=!1,this.chunks=[],this.strm=new Pt,this.strm.avail_out=0;let a=It.deflateInit2(this.strm,e.level,e.method,e.windowBits,e.memLevel,e.strategy);if(a!==Wt)throw new Error(Mt[a]);if(e.header&&It.deflateSetHeader(this.strm,e.header),e.dictionary){let t;if(t="string"==typeof e.dictionary?Bt.string2buf(e.dictionary):"[object ArrayBuffer]"===Yt.call(e.dictionary)?new Uint8Array(e.dictionary):e.dictionary,a=It.deflateSetDictionary(this.strm,t),a!==Wt)throw new Error(Mt[a]);this._dict_set=!0}}$t.prototype.push=function(t,e){const a=this.strm,s=this.options.chunkSize;let r,n;if(this.ended)return!1;for(n=e===~~e?e:!0===e?Xt:Kt,"string"==typeof t?a.input=Bt.string2buf(t):"[object ArrayBuffer]"===Yt.call(t)?a.input=new Uint8Array(t):a.input=t,a.next_in=0,a.avail_in=a.input.length;;)if(0===a.avail_out&&(a.output=new Uint8Array(s),a.next_out=0,a.avail_out=s),(n===jt||n===Gt)&&a.avail_out<=6)this.onData(a.output.subarray(0,a.next_out)),a.avail_out=0;else{if(r=It.deflate(a,n),r===qt)return a.next_out>0&&this.onData(a.output.subarray(0,a.next_out)),r=It.deflateEnd(this.strm),this.onEnd(r),this.ended=!0,r===Wt;if(0!==a.avail_out){if(n>0&&a.next_out>0)this.onData(a.output.subarray(0,a.next_out)),a.avail_out=0;else if(0===a.avail_in)break}else this.onData(a.output)}return!0},$t.prototype.onData=function(t){this.chunks.push(t)},$t.prototype.onEnd=function(t){t===Wt&&(this.result=Ht.flattenChunks(this.chunks)),this.chunks=[],this.err=t,this.msg=this.strm.msg};const te=$t,ee=function(t,e){const a=new $t(e);if(a.push(t,!0),a.err)throw a.msg||Mt[a.err];return a.result},ae=I;const se=new class{constructor(){this._init()}clear(){this._init()}addEvent(t){if(!t)throw new Error("Adding invalid event");const e=this._hasEvents?",":"";this.deflate.push(e+t,ae.Z_SYNC_FLUSH),this._hasEvents=!0}finish(){if(this.deflate.push("]",ae.Z_FINISH),this.deflate.err)throw this.deflate.err;const t=this.deflate.result;return this._init(),t}_init(){this._hasEvents=!1,this.deflate=new te,this.deflate.push("[",ae.Z_NO_FLUSH)}},re={clear:()=>{se.clear()},addEvent:t=>se.addEvent(t),finish:()=>se.finish(),compress:t=>function(t){return ee(t)}(t)};addEventListener("message",(function(t){const e=t.data.method,a=t.data.id,s=t.data.arg;if(e in re&&"function"==typeof re[e])try{const t=re[e](s);postMessage({id:a,method:e,success:!0,response:t})}catch(t){postMessage({id:a,method:e,success:!1,response:t.message}),console.error(t)}})),postMessage({id:void 0,method:"init",success:!0,response:void 0});
diff --git a/packages/replay-worker/package.json b/packages/replay-worker/package.json
index d39cfa8877ff..d10bd7c14b4f 100644
--- a/packages/replay-worker/package.json
+++ b/packages/replay-worker/package.json
@@ -17,6 +17,7 @@
"scripts": {
"build": "run-p build:transpile build:types",
"build:transpile": "rollup -c rollup.worker.config.js",
+ "build:examples": "rollup -c rollup.examples.config.js",
"build:types": "run-s build:types:core build:types:downlevel",
"build:types:core": "tsc -p tsconfig.types.json",
"build:types:downlevel": "yarn downlevel-dts build/npm/types build/npm/types-ts3.8 --to ts3.8",
diff --git a/packages/replay-worker/rollup.examples.config.js b/packages/replay-worker/rollup.examples.config.js
new file mode 100644
index 000000000000..f3439b8650d2
--- /dev/null
+++ b/packages/replay-worker/rollup.examples.config.js
@@ -0,0 +1,47 @@
+import resolve from '@rollup/plugin-node-resolve';
+import commonjs from '@rollup/plugin-commonjs';
+import typescript from '@rollup/plugin-typescript';
+import { defineConfig } from 'rollup';
+import { terser } from 'rollup-plugin-terser';
+
+import { makeLicensePlugin } from '../../rollup/plugins/index.js';
+
+const licensePlugin = makeLicensePlugin('Sentry Replay Worker');
+
+const config = defineConfig([
+ {
+ input: ['./src/_worker.ts'],
+ output: {
+ file: './examples/worker.js',
+ format: 'esm',
+ },
+ treeshake: 'smallest',
+ plugins: [
+ commonjs(),
+ typescript({ tsconfig: './tsconfig.json', inlineSourceMap: false, sourceMap: false, inlineSources: false }),
+ resolve(),
+ licensePlugin,
+ ],
+ },
+ {
+ input: ['./src/_worker.ts'],
+ output: {
+ file: './examples/worker.min.js',
+ format: 'esm',
+ },
+ treeshake: 'smallest',
+ plugins: [
+ commonjs(),
+ typescript({ tsconfig: './tsconfig.json', inlineSourceMap: false, sourceMap: false, inlineSources: false }),
+ resolve(),
+ terser({
+ mangle: {
+ module: true,
+ },
+ }),
+ licensePlugin,
+ ],
+ },
+]);
+
+export default config;
diff --git a/packages/replay/src/eventBuffer/index.ts b/packages/replay/src/eventBuffer/index.ts
index 07af24ae22a2..741cb5dedc91 100644
--- a/packages/replay/src/eventBuffer/index.ts
+++ b/packages/replay/src/eventBuffer/index.ts
@@ -7,26 +7,56 @@ import { EventBufferProxy } from './EventBufferProxy';
interface CreateEventBufferParams {
useCompression: boolean;
+ workerUrl?: string;
}
+// Treeshakable guard to remove the code of the included compression worker
+declare const __SENTRY_EXCLUDE_REPLAY_WORKER__: boolean;
+
/**
* Create an event buffer for replays.
*/
-export function createEventBuffer({ useCompression }: CreateEventBufferParams): EventBuffer {
- // eslint-disable-next-line no-restricted-globals
- if (useCompression && window.Worker) {
- try {
- const workerUrl = getWorkerURL();
-
- logInfo('[Replay] Using compression worker');
- const worker = new Worker(workerUrl);
- return new EventBufferProxy(worker);
- } catch (error) {
- logInfo('[Replay] Failed to create compression worker');
- // Fall back to use simple event buffer array
+export function createEventBuffer({
+ useCompression,
+ workerUrl: customWorkerUrl,
+}: CreateEventBufferParams): EventBuffer {
+ if (
+ useCompression &&
+ // eslint-disable-next-line no-restricted-globals
+ window.Worker
+ ) {
+ const worker = _loadWorker(customWorkerUrl);
+
+ if (worker) {
+ return worker;
}
}
logInfo('[Replay] Using simple buffer');
return new EventBufferArray();
}
+
+function _loadWorker(customWorkerUrl?: string): EventBufferProxy | void {
+ try {
+ const workerUrl = customWorkerUrl || _getWorkerUrl();
+
+ if (!workerUrl) {
+ return;
+ }
+
+ logInfo(`[Replay] Using compression worker${customWorkerUrl ? ` from ${customWorkerUrl}` : ''}`);
+ const worker = new Worker(workerUrl);
+ return new EventBufferProxy(worker);
+ } catch (error) {
+ logInfo('[Replay] Failed to create compression worker');
+ // Fall back to use simple event buffer array
+ }
+}
+
+function _getWorkerUrl(): string {
+ if (typeof __SENTRY_EXCLUDE_REPLAY_WORKER__ === 'undefined' || !__SENTRY_EXCLUDE_REPLAY_WORKER__) {
+ return getWorkerURL();
+ }
+
+ return '';
+}
diff --git a/packages/replay/src/integration.ts b/packages/replay/src/integration.ts
index f0b07582ce61..bfa236d87dec 100644
--- a/packages/replay/src/integration.ts
+++ b/packages/replay/src/integration.ts
@@ -61,6 +61,7 @@ export class Replay implements Integration {
maxReplayDuration = MAX_REPLAY_DURATION,
stickySession = true,
useCompression = true,
+ workerUrl,
_experiments = {},
sessionSampleRate,
errorSampleRate,
@@ -163,6 +164,7 @@ export class Replay implements Integration {
sessionSampleRate,
errorSampleRate,
useCompression,
+ workerUrl,
blockAllMedia,
maskAllInputs,
maskAllText,
diff --git a/packages/replay/src/replay.ts b/packages/replay/src/replay.ts
index 9c0721232e43..8f185d5133ca 100644
--- a/packages/replay/src/replay.ts
+++ b/packages/replay/src/replay.ts
@@ -711,6 +711,7 @@ export class ReplayContainer implements ReplayContainerInterface {
this.eventBuffer = createEventBuffer({
useCompression: this._options.useCompression,
+ workerUrl: this._options.workerUrl,
});
this._removeListeners();
diff --git a/packages/replay/src/types/replay.ts b/packages/replay/src/types/replay.ts
index ad39a54990cd..96e4a4fbf36f 100644
--- a/packages/replay/src/types/replay.ts
+++ b/packages/replay/src/types/replay.ts
@@ -136,6 +136,12 @@ export interface ReplayPluginOptions extends ReplayNetworkOptions {
*/
useCompression: boolean;
+ /**
+ * If defined, use this worker URL instead of the default included one for compression.
+ * This will only be used if `useCompression` is not false.
+ */
+ workerUrl?: string;
+
/**
* Block all media (e.g. images, svg, video) in recordings.
*/