From 55f5b7b6c7254e83df732f83a8f7a378645bdce6 Mon Sep 17 00:00:00 2001 From: Alexandre Bury Date: Wed, 27 Mar 2024 10:46:33 -0400 Subject: [PATCH] Bump zstd-sys to 2.0.10 --- zstd-safe/zstd-sys/Cargo.toml | 2 +- zstd-safe/zstd-sys/src/bindings_zdict.rs | 2 +- .../src/bindings_zdict_experimental.rs | 2 +- zstd-safe/zstd-sys/src/bindings_zstd.rs | 19 +++---- .../src/bindings_zstd_experimental.rs | 49 +++++++++++-------- zstd-safe/zstd-sys/update_zstd.sh | 2 +- zstd-safe/zstd-sys/zstd | 2 +- 7 files changed, 44 insertions(+), 34 deletions(-) diff --git a/zstd-safe/zstd-sys/Cargo.toml b/zstd-safe/zstd-sys/Cargo.toml index 29a22cb3..fa7cc855 100644 --- a/zstd-safe/zstd-sys/Cargo.toml +++ b/zstd-safe/zstd-sys/Cargo.toml @@ -16,7 +16,7 @@ links = "zstd" name = "zstd-sys" readme = "Readme.md" repository = "https://github.com/gyscos/zstd-rs" -version = "2.0.9+zstd.1.5.5" +version = "2.0.10+zstd.1.5.6" edition = "2018" rust-version = "1.64" diff --git a/zstd-safe/zstd-sys/src/bindings_zdict.rs b/zstd-safe/zstd-sys/src/bindings_zdict.rs index bfc049ea..3bef2cb9 100644 --- a/zstd-safe/zstd-sys/src/bindings_zdict.rs +++ b/zstd-safe/zstd-sys/src/bindings_zdict.rs @@ -33,7 +33,7 @@ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/* automatically generated by rust-bindgen 0.66.1 */ +/* automatically generated by rust-bindgen 0.69.4 */ extern "C" { #[doc = " ZDICT_trainFromBuffer():\n Train a dictionary from an array of samples.\n Redirect towards ZDICT_optimizeTrainFromBuffer_fastCover() single-threaded, with d=8, steps=4,\n f=20, and accel=1.\n Samples must be stored concatenated in a single flat buffer `samplesBuffer`,\n supplied with an array of sizes `samplesSizes`, providing the size of each sample, in order.\n The resulting dictionary will be saved into `dictBuffer`.\n @return: size of dictionary stored into `dictBuffer` (<= `dictBufferCapacity`)\n or an error code, which can be tested with ZDICT_isError().\n Note: Dictionary training will fail if there are not enough samples to construct a\n dictionary, or if most of the samples are too small (< 8 bytes being the lower limit).\n If dictionary training fails, you should use zstd without a dictionary, as the dictionary\n would've been ineffective anyways. If you believe your samples would benefit from a dictionary\n please open an issue with details, and we can look into it.\n Note: ZDICT_trainFromBuffer()'s memory usage is about 6 MB.\n Tips: In general, a reasonable dictionary has a size of ~ 100 KB.\n It's possible to select smaller or larger size, just by specifying `dictBufferCapacity`.\n In general, it's recommended to provide a few thousands samples, though this can vary a lot.\n It's recommended that total size of all samples be about ~x100 times the target size of dictionary."] diff --git a/zstd-safe/zstd-sys/src/bindings_zdict_experimental.rs b/zstd-safe/zstd-sys/src/bindings_zdict_experimental.rs index 7eb782c5..c721751b 100644 --- a/zstd-safe/zstd-sys/src/bindings_zdict_experimental.rs +++ b/zstd-safe/zstd-sys/src/bindings_zdict_experimental.rs @@ -33,7 +33,7 @@ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/* automatically generated by rust-bindgen 0.66.1 */ +/* automatically generated by rust-bindgen 0.69.4 */ pub const ZDICT_DICTSIZE_MIN: u32 = 256; pub const ZDICT_CONTENTSIZE_MIN: u32 = 128; diff --git a/zstd-safe/zstd-sys/src/bindings_zstd.rs b/zstd-safe/zstd-sys/src/bindings_zstd.rs index 29ccc913..5d827cd6 100644 --- a/zstd-safe/zstd-sys/src/bindings_zstd.rs +++ b/zstd-safe/zstd-sys/src/bindings_zstd.rs @@ -33,12 +33,12 @@ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/* automatically generated by rust-bindgen 0.66.1 */ +/* automatically generated by rust-bindgen 0.69.4 */ pub const ZSTD_VERSION_MAJOR: u32 = 1; pub const ZSTD_VERSION_MINOR: u32 = 5; -pub const ZSTD_VERSION_RELEASE: u32 = 5; -pub const ZSTD_VERSION_NUMBER: u32 = 10505; +pub const ZSTD_VERSION_RELEASE: u32 = 6; +pub const ZSTD_VERSION_NUMBER: u32 = 10506; pub const ZSTD_CLEVEL_DEFAULT: u32 = 3; pub const ZSTD_MAGICNUMBER: u32 = 4247762216; pub const ZSTD_MAGIC_DICTIONARY: u32 = 3962610743; @@ -127,7 +127,7 @@ extern "C" { pub fn ZSTD_freeCCtx(cctx: *mut ZSTD_CCtx) -> usize; } extern "C" { - #[doc = " ZSTD_compressCCtx() :\n Same as ZSTD_compress(), using an explicit ZSTD_CCtx.\n Important : in order to behave similarly to `ZSTD_compress()`,\n this function compresses at requested compression level,\n __ignoring any other parameter__ .\n If any advanced parameter was set using the advanced API,\n they will all be reset. Only `compressionLevel` remains."] + #[doc = " ZSTD_compressCCtx() :\n Same as ZSTD_compress(), using an explicit ZSTD_CCtx.\n Important : in order to mirror `ZSTD_compress()` behavior,\n this function compresses at the requested compression level,\n __ignoring any other advanced parameter__ .\n If any advanced parameter was set using the advanced API,\n they will all be reset. Only `compressionLevel` remains."] pub fn ZSTD_compressCCtx( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, @@ -150,7 +150,7 @@ extern "C" { pub fn ZSTD_freeDCtx(dctx: *mut ZSTD_DCtx) -> usize; } extern "C" { - #[doc = " ZSTD_decompressDCtx() :\n Same as ZSTD_decompress(),\n requires an allocated ZSTD_DCtx.\n Compatible with sticky parameters."] + #[doc = " ZSTD_decompressDCtx() :\n Same as ZSTD_decompress(),\n requires an allocated ZSTD_DCtx.\n Compatible with sticky parameters (see below)."] pub fn ZSTD_decompressDCtx( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, @@ -184,6 +184,7 @@ pub enum ZSTD_cParameter { ZSTD_c_minMatch = 105, ZSTD_c_targetLength = 106, ZSTD_c_strategy = 107, + ZSTD_c_targetCBlockSize = 130, ZSTD_c_enableLongDistanceMatching = 160, ZSTD_c_ldmHashLog = 161, ZSTD_c_ldmMinMatch = 162, @@ -200,7 +201,6 @@ pub enum ZSTD_cParameter { ZSTD_c_experimentalParam3 = 1000, ZSTD_c_experimentalParam4 = 1001, ZSTD_c_experimentalParam5 = 1002, - ZSTD_c_experimentalParam6 = 1003, ZSTD_c_experimentalParam7 = 1004, ZSTD_c_experimentalParam8 = 1005, ZSTD_c_experimentalParam9 = 1006, @@ -256,7 +256,7 @@ extern "C" { ) -> usize; } extern "C" { - #[doc = " ZSTD_compress2() :\n Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.\n ZSTD_compress2() always starts a new frame.\n Should cctx hold data from a previously unfinished frame, everything about it is forgotten.\n - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n - The function is always blocking, returns when compression is completed.\n NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\n enough space to successfully compress the data, though it is possible it fails for other reasons.\n @return : compressed size written into `dst` (<= `dstCapacity),\n or an error code if it fails (which can be tested using ZSTD_isError())."] + #[doc = " ZSTD_compress2() :\n Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.\n (note that this entry point doesn't even expose a compression level parameter).\n ZSTD_compress2() always starts a new frame.\n Should cctx hold data from a previously unfinished frame, everything about it is forgotten.\n - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n - The function is always blocking, returns when compression is completed.\n NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\n enough space to successfully compress the data, though it is possible it fails for other reasons.\n @return : compressed size written into `dst` (<= `dstCapacity),\n or an error code if it fails (which can be tested using ZSTD_isError())."] pub fn ZSTD_compress2( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, @@ -275,6 +275,7 @@ pub enum ZSTD_dParameter { ZSTD_d_experimentalParam3 = 1002, ZSTD_d_experimentalParam4 = 1003, ZSTD_d_experimentalParam5 = 1004, + ZSTD_d_experimentalParam6 = 1005, } extern "C" { #[doc = " ZSTD_dParam_getBounds() :\n All parameters must belong to an interval with lower and upper bounds,\n otherwise they will either trigger an error or be automatically clamped.\n @return : a structure, ZSTD_bounds, which contains\n - an error status field, which must be tested using ZSTD_isError()\n - both lower and upper bounds, inclusive"] @@ -334,7 +335,7 @@ pub enum ZSTD_EndDirective { ZSTD_e_end = 2, } extern "C" { - #[doc = " ZSTD_compressStream2() : Requires v1.4.0+\n Behaves about the same as ZSTD_compressStream, with additional control on end directive.\n - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)\n - output->pos must be <= dstCapacity, input->pos must be <= srcSize\n - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.\n - endOp must be a valid directive\n - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.\n - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,\n and then immediately returns, just indicating that there is some data remaining to be flushed.\n The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.\n - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.\n - @return provides a minimum amount of data remaining to be flushed from internal buffers\n or an error code, which can be tested using ZSTD_isError().\n if @return != 0, flush is not fully completed, there is still some data left within internal buffers.\n This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.\n For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.\n - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),\n only ZSTD_e_end or ZSTD_e_flush operations are allowed.\n Before starting a new compression job, or changing compression parameters,\n it is required to fully flush internal buffers."] + #[doc = " ZSTD_compressStream2() : Requires v1.4.0+\n Behaves about the same as ZSTD_compressStream, with additional control on end directive.\n - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)\n - output->pos must be <= dstCapacity, input->pos must be <= srcSize\n - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.\n - endOp must be a valid directive\n - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.\n - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,\n and then immediately returns, just indicating that there is some data remaining to be flushed.\n The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.\n - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.\n - @return provides a minimum amount of data remaining to be flushed from internal buffers\n or an error code, which can be tested using ZSTD_isError().\n if @return != 0, flush is not fully completed, there is still some data left within internal buffers.\n This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.\n For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.\n - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),\n only ZSTD_e_end or ZSTD_e_flush operations are allowed.\n Before starting a new compression job, or changing compression parameters,\n it is required to fully flush internal buffers.\n - note: if an operation ends with an error, it may leave @cctx in an undefined state.\n Therefore, it's UB to invoke ZSTD_compressStream2() of ZSTD_compressStream() on such a state.\n In order to be re-employed after an error, a state must be reset,\n which can be done explicitly (ZSTD_CCtx_reset()),\n or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx())"] pub fn ZSTD_compressStream2( cctx: *mut ZSTD_CCtx, output: *mut ZSTD_outBuffer, @@ -389,7 +390,7 @@ extern "C" { pub fn ZSTD_initDStream(zds: *mut ZSTD_DStream) -> usize; } extern "C" { - #[doc = " ZSTD_decompressStream() :\n Streaming decompression function.\n Call repetitively to consume full input updating it as necessary.\n Function will update both input and output `pos` fields exposing current state via these fields:\n - `input.pos < input.size`, some input remaining and caller should provide remaining input\n on the next call.\n - `output.pos < output.size`, decoder finished and flushed all remaining buffers.\n - `output.pos == output.size`, potentially uncflushed data present in the internal buffers,\n call ZSTD_decompressStream() again to flush remaining data to output.\n Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.\n\n @return : 0 when a frame is completely decoded and fully flushed,\n or an error code, which can be tested using ZSTD_isError(),\n or any other value > 0, which means there is some decoding or flushing to do to complete current frame."] + #[doc = " ZSTD_decompressStream() :\n Streaming decompression function.\n Call repetitively to consume full input updating it as necessary.\n Function will update both input and output `pos` fields exposing current state via these fields:\n - `input.pos < input.size`, some input remaining and caller should provide remaining input\n on the next call.\n - `output.pos < output.size`, decoder finished and flushed all remaining buffers.\n - `output.pos == output.size`, potentially uncflushed data present in the internal buffers,\n call ZSTD_decompressStream() again to flush remaining data to output.\n Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.\n\n @return : 0 when a frame is completely decoded and fully flushed,\n or an error code, which can be tested using ZSTD_isError(),\n or any other value > 0, which means there is some decoding or flushing to do to complete current frame.\n\n Note: when an operation returns with an error code, the @zds state may be left in undefined state.\n It's UB to invoke `ZSTD_decompressStream()` on such a state.\n In order to re-use such a state, it must be first reset,\n which can be done explicitly (`ZSTD_DCtx_reset()`),\n or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`)"] pub fn ZSTD_decompressStream( zds: *mut ZSTD_DStream, output: *mut ZSTD_outBuffer, diff --git a/zstd-safe/zstd-sys/src/bindings_zstd_experimental.rs b/zstd-safe/zstd-sys/src/bindings_zstd_experimental.rs index 7161922b..9958fb0f 100644 --- a/zstd-safe/zstd-sys/src/bindings_zstd_experimental.rs +++ b/zstd-safe/zstd-sys/src/bindings_zstd_experimental.rs @@ -33,12 +33,12 @@ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -/* automatically generated by rust-bindgen 0.66.1 */ +/* automatically generated by rust-bindgen 0.69.4 */ pub const ZSTD_VERSION_MAJOR: u32 = 1; pub const ZSTD_VERSION_MINOR: u32 = 5; -pub const ZSTD_VERSION_RELEASE: u32 = 5; -pub const ZSTD_VERSION_NUMBER: u32 = 10505; +pub const ZSTD_VERSION_RELEASE: u32 = 6; +pub const ZSTD_VERSION_NUMBER: u32 = 10506; pub const ZSTD_CLEVEL_DEFAULT: u32 = 3; pub const ZSTD_MAGICNUMBER: u32 = 4247762216; pub const ZSTD_MAGIC_DICTIONARY: u32 = 3962610743; @@ -72,7 +72,7 @@ pub const ZSTD_LDM_MINMATCH_MAX: u32 = 4096; pub const ZSTD_LDM_BUCKETSIZELOG_MIN: u32 = 1; pub const ZSTD_LDM_BUCKETSIZELOG_MAX: u32 = 8; pub const ZSTD_LDM_HASHRATELOG_MIN: u32 = 0; -pub const ZSTD_TARGETCBLOCKSIZE_MIN: u32 = 64; +pub const ZSTD_TARGETCBLOCKSIZE_MIN: u32 = 1340; pub const ZSTD_TARGETCBLOCKSIZE_MAX: u32 = 131072; pub const ZSTD_SRCSIZEHINT_MIN: u32 = 0; extern "C" { @@ -154,7 +154,7 @@ extern "C" { pub fn ZSTD_freeCCtx(cctx: *mut ZSTD_CCtx) -> usize; } extern "C" { - #[doc = " ZSTD_compressCCtx() :\n Same as ZSTD_compress(), using an explicit ZSTD_CCtx.\n Important : in order to behave similarly to `ZSTD_compress()`,\n this function compresses at requested compression level,\n __ignoring any other parameter__ .\n If any advanced parameter was set using the advanced API,\n they will all be reset. Only `compressionLevel` remains."] + #[doc = " ZSTD_compressCCtx() :\n Same as ZSTD_compress(), using an explicit ZSTD_CCtx.\n Important : in order to mirror `ZSTD_compress()` behavior,\n this function compresses at the requested compression level,\n __ignoring any other advanced parameter__ .\n If any advanced parameter was set using the advanced API,\n they will all be reset. Only `compressionLevel` remains."] pub fn ZSTD_compressCCtx( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, @@ -177,7 +177,7 @@ extern "C" { pub fn ZSTD_freeDCtx(dctx: *mut ZSTD_DCtx) -> usize; } extern "C" { - #[doc = " ZSTD_decompressDCtx() :\n Same as ZSTD_decompress(),\n requires an allocated ZSTD_DCtx.\n Compatible with sticky parameters."] + #[doc = " ZSTD_decompressDCtx() :\n Same as ZSTD_decompress(),\n requires an allocated ZSTD_DCtx.\n Compatible with sticky parameters (see below)."] pub fn ZSTD_decompressDCtx( dctx: *mut ZSTD_DCtx, dst: *mut ::core::ffi::c_void, @@ -211,6 +211,7 @@ pub enum ZSTD_cParameter { ZSTD_c_minMatch = 105, ZSTD_c_targetLength = 106, ZSTD_c_strategy = 107, + ZSTD_c_targetCBlockSize = 130, ZSTD_c_enableLongDistanceMatching = 160, ZSTD_c_ldmHashLog = 161, ZSTD_c_ldmMinMatch = 162, @@ -227,7 +228,6 @@ pub enum ZSTD_cParameter { ZSTD_c_experimentalParam3 = 1000, ZSTD_c_experimentalParam4 = 1001, ZSTD_c_experimentalParam5 = 1002, - ZSTD_c_experimentalParam6 = 1003, ZSTD_c_experimentalParam7 = 1004, ZSTD_c_experimentalParam8 = 1005, ZSTD_c_experimentalParam9 = 1006, @@ -283,7 +283,7 @@ extern "C" { ) -> usize; } extern "C" { - #[doc = " ZSTD_compress2() :\n Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.\n ZSTD_compress2() always starts a new frame.\n Should cctx hold data from a previously unfinished frame, everything about it is forgotten.\n - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n - The function is always blocking, returns when compression is completed.\n NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\n enough space to successfully compress the data, though it is possible it fails for other reasons.\n @return : compressed size written into `dst` (<= `dstCapacity),\n or an error code if it fails (which can be tested using ZSTD_isError())."] + #[doc = " ZSTD_compress2() :\n Behave the same as ZSTD_compressCCtx(), but compression parameters are set using the advanced API.\n (note that this entry point doesn't even expose a compression level parameter).\n ZSTD_compress2() always starts a new frame.\n Should cctx hold data from a previously unfinished frame, everything about it is forgotten.\n - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n - The function is always blocking, returns when compression is completed.\n NOTE: Providing `dstCapacity >= ZSTD_compressBound(srcSize)` guarantees that zstd will have\n enough space to successfully compress the data, though it is possible it fails for other reasons.\n @return : compressed size written into `dst` (<= `dstCapacity),\n or an error code if it fails (which can be tested using ZSTD_isError())."] pub fn ZSTD_compress2( cctx: *mut ZSTD_CCtx, dst: *mut ::core::ffi::c_void, @@ -302,6 +302,7 @@ pub enum ZSTD_dParameter { ZSTD_d_experimentalParam3 = 1002, ZSTD_d_experimentalParam4 = 1003, ZSTD_d_experimentalParam5 = 1004, + ZSTD_d_experimentalParam6 = 1005, } extern "C" { #[doc = " ZSTD_dParam_getBounds() :\n All parameters must belong to an interval with lower and upper bounds,\n otherwise they will either trigger an error or be automatically clamped.\n @return : a structure, ZSTD_bounds, which contains\n - an error status field, which must be tested using ZSTD_isError()\n - both lower and upper bounds, inclusive"] @@ -361,7 +362,7 @@ pub enum ZSTD_EndDirective { ZSTD_e_end = 2, } extern "C" { - #[doc = " ZSTD_compressStream2() : Requires v1.4.0+\n Behaves about the same as ZSTD_compressStream, with additional control on end directive.\n - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)\n - output->pos must be <= dstCapacity, input->pos must be <= srcSize\n - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.\n - endOp must be a valid directive\n - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.\n - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,\n and then immediately returns, just indicating that there is some data remaining to be flushed.\n The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.\n - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.\n - @return provides a minimum amount of data remaining to be flushed from internal buffers\n or an error code, which can be tested using ZSTD_isError().\n if @return != 0, flush is not fully completed, there is still some data left within internal buffers.\n This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.\n For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.\n - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),\n only ZSTD_e_end or ZSTD_e_flush operations are allowed.\n Before starting a new compression job, or changing compression parameters,\n it is required to fully flush internal buffers."] + #[doc = " ZSTD_compressStream2() : Requires v1.4.0+\n Behaves about the same as ZSTD_compressStream, with additional control on end directive.\n - Compression parameters are pushed into CCtx before starting compression, using ZSTD_CCtx_set*()\n - Compression parameters cannot be changed once compression is started (save a list of exceptions in multi-threading mode)\n - output->pos must be <= dstCapacity, input->pos must be <= srcSize\n - output->pos and input->pos will be updated. They are guaranteed to remain below their respective limit.\n - endOp must be a valid directive\n - When nbWorkers==0 (default), function is blocking : it completes its job before returning to caller.\n - When nbWorkers>=1, function is non-blocking : it copies a portion of input, distributes jobs to internal worker threads, flush to output whatever is available,\n and then immediately returns, just indicating that there is some data remaining to be flushed.\n The function nonetheless guarantees forward progress : it will return only after it reads or write at least 1+ byte.\n - Exception : if the first call requests a ZSTD_e_end directive and provides enough dstCapacity, the function delegates to ZSTD_compress2() which is always blocking.\n - @return provides a minimum amount of data remaining to be flushed from internal buffers\n or an error code, which can be tested using ZSTD_isError().\n if @return != 0, flush is not fully completed, there is still some data left within internal buffers.\n This is useful for ZSTD_e_flush, since in this case more flushes are necessary to empty all buffers.\n For ZSTD_e_end, @return == 0 when internal buffers are fully flushed and frame is completed.\n - after a ZSTD_e_end directive, if internal buffer is not fully flushed (@return != 0),\n only ZSTD_e_end or ZSTD_e_flush operations are allowed.\n Before starting a new compression job, or changing compression parameters,\n it is required to fully flush internal buffers.\n - note: if an operation ends with an error, it may leave @cctx in an undefined state.\n Therefore, it's UB to invoke ZSTD_compressStream2() of ZSTD_compressStream() on such a state.\n In order to be re-employed after an error, a state must be reset,\n which can be done explicitly (ZSTD_CCtx_reset()),\n or is sometimes implied by methods starting a new compression job (ZSTD_initCStream(), ZSTD_compressCCtx())"] pub fn ZSTD_compressStream2( cctx: *mut ZSTD_CCtx, output: *mut ZSTD_outBuffer, @@ -416,7 +417,7 @@ extern "C" { pub fn ZSTD_initDStream(zds: *mut ZSTD_DStream) -> usize; } extern "C" { - #[doc = " ZSTD_decompressStream() :\n Streaming decompression function.\n Call repetitively to consume full input updating it as necessary.\n Function will update both input and output `pos` fields exposing current state via these fields:\n - `input.pos < input.size`, some input remaining and caller should provide remaining input\n on the next call.\n - `output.pos < output.size`, decoder finished and flushed all remaining buffers.\n - `output.pos == output.size`, potentially uncflushed data present in the internal buffers,\n call ZSTD_decompressStream() again to flush remaining data to output.\n Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.\n\n @return : 0 when a frame is completely decoded and fully flushed,\n or an error code, which can be tested using ZSTD_isError(),\n or any other value > 0, which means there is some decoding or flushing to do to complete current frame."] + #[doc = " ZSTD_decompressStream() :\n Streaming decompression function.\n Call repetitively to consume full input updating it as necessary.\n Function will update both input and output `pos` fields exposing current state via these fields:\n - `input.pos < input.size`, some input remaining and caller should provide remaining input\n on the next call.\n - `output.pos < output.size`, decoder finished and flushed all remaining buffers.\n - `output.pos == output.size`, potentially uncflushed data present in the internal buffers,\n call ZSTD_decompressStream() again to flush remaining data to output.\n Note : with no additional input, amount of data flushed <= ZSTD_BLOCKSIZE_MAX.\n\n @return : 0 when a frame is completely decoded and fully flushed,\n or an error code, which can be tested using ZSTD_isError(),\n or any other value > 0, which means there is some decoding or flushing to do to complete current frame.\n\n Note: when an operation returns with an error code, the @zds state may be left in undefined state.\n It's UB to invoke `ZSTD_decompressStream()` on such a state.\n In order to re-use such a state, it must be first reset,\n which can be done explicitly (`ZSTD_DCtx_reset()`),\n or is implied for operations starting some new decompression job (`ZSTD_initDStream`, `ZSTD_decompressDCtx()`, `ZSTD_decompress_usingDict()`)"] pub fn ZSTD_decompressStream( zds: *mut ZSTD_DStream, output: *mut ZSTD_outBuffer, @@ -784,7 +785,7 @@ extern "C" { pub fn ZSTD_sequenceBound(srcSize: usize) -> usize; } extern "C" { - #[doc = " ZSTD_generateSequences() :\n Generate sequences using ZSTD_compress2(), given a source buffer.\n\n Each block will end with a dummy sequence\n with offset == 0, matchLength == 0, and litLength == length of last literals.\n litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)\n simply acts as a block delimiter.\n\n @zc can be used to insert custom compression params.\n This function invokes ZSTD_compress2().\n\n The output of this function can be fed into ZSTD_compressSequences() with CCtx\n setting of ZSTD_c_blockDelimiters as ZSTD_sf_explicitBlockDelimiters\n @return : number of sequences generated"] + #[doc = " ZSTD_generateSequences() :\n WARNING: This function is meant for debugging and informational purposes ONLY!\n Its implementation is flawed, and it will be deleted in a future version.\n It is not guaranteed to succeed, as there are several cases where it will give\n up and fail. You should NOT use this function in production code.\n\n This function is deprecated, and will be removed in a future version.\n\n Generate sequences using ZSTD_compress2(), given a source buffer.\n\n @param zc The compression context to be used for ZSTD_compress2(). Set any\n compression parameters you need on this context.\n @param outSeqs The output sequences buffer of size @p outSeqsSize\n @param outSeqsSize The size of the output sequences buffer.\n ZSTD_sequenceBound(srcSize) is an upper bound on the number\n of sequences that can be generated.\n @param src The source buffer to generate sequences from of size @p srcSize.\n @param srcSize The size of the source buffer.\n\n Each block will end with a dummy sequence\n with offset == 0, matchLength == 0, and litLength == length of last literals.\n litLength may be == 0, and if so, then the sequence of (of: 0 ml: 0 ll: 0)\n simply acts as a block delimiter.\n\n @returns The number of sequences generated, necessarily less than\n ZSTD_sequenceBound(srcSize), or an error code that can be checked\n with ZSTD_isError()."] pub fn ZSTD_generateSequences( zc: *mut ZSTD_CCtx, outSeqs: *mut ZSTD_Sequence, @@ -840,9 +841,9 @@ extern "C" { ) -> ::core::ffi::c_uint; } extern "C" { - #[doc = " ZSTD_estimate*() :\n These functions make it possible to estimate memory usage\n of a future {D,C}Ctx, before its creation.\n\n ZSTD_estimateCCtxSize() will provide a memory budget large enough\n for any compression level up to selected one.\n Note : Unlike ZSTD_estimateCStreamSize*(), this estimate\n does not include space for a window buffer.\n Therefore, the estimation is only guaranteed for single-shot compressions, not streaming.\n The estimate will assume the input may be arbitrarily large,\n which is the worst case.\n\n When srcSize can be bound by a known and rather \"small\" value,\n this fact can be used to provide a tighter estimation\n because the CCtx compression context will need less memory.\n This tighter estimation can be provided by more advanced functions\n ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),\n and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().\n Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.\n\n Note : only single-threaded compression is supported.\n ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.\n\n Note 2 : ZSTD_estimateCCtxSize* functions are not compatible with the Block-Level Sequence Producer API at this time.\n Size estimates assume that no external sequence producer is registered."] + #[doc = " ZSTD_estimate*() :\n These functions make it possible to estimate memory usage\n of a future {D,C}Ctx, before its creation.\n This is useful in combination with ZSTD_initStatic(),\n which makes it possible to employ a static buffer for ZSTD_CCtx* state.\n\n ZSTD_estimateCCtxSize() will provide a memory budget large enough\n to compress data of any size using one-shot compression ZSTD_compressCCtx() or ZSTD_compress2()\n associated with any compression level up to max specified one.\n The estimate will assume the input may be arbitrarily large,\n which is the worst case.\n\n Note that the size estimation is specific for one-shot compression,\n it is not valid for streaming (see ZSTD_estimateCStreamSize*())\n nor other potential ways of using a ZSTD_CCtx* state.\n\n When srcSize can be bound by a known and rather \"small\" value,\n this knowledge can be used to provide a tighter budget estimation\n because the ZSTD_CCtx* state will need less memory for small inputs.\n This tighter estimation can be provided by employing more advanced functions\n ZSTD_estimateCCtxSize_usingCParams(), which can be used in tandem with ZSTD_getCParams(),\n and ZSTD_estimateCCtxSize_usingCCtxParams(), which can be used in tandem with ZSTD_CCtxParams_setParameter().\n Both can be used to estimate memory using custom compression parameters and arbitrary srcSize limits.\n\n Note : only single-threaded compression is supported.\n ZSTD_estimateCCtxSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1."] pub fn ZSTD_estimateCCtxSize( - compressionLevel: ::core::ffi::c_int, + maxCompressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { @@ -859,9 +860,9 @@ extern "C" { pub fn ZSTD_estimateDCtxSize() -> usize; } extern "C" { - #[doc = " ZSTD_estimateCStreamSize() :\n ZSTD_estimateCStreamSize() will provide a budget large enough for any compression level up to selected one.\n It will also consider src size to be arbitrarily \"large\", which is worst case.\n If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.\n ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.\n ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.\n Note : CStream size estimation is only correct for single-threaded compression.\n ZSTD_DStream memory budget depends on window Size.\n This information can be passed manually, using ZSTD_estimateDStreamSize,\n or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();\n Note : if streaming is init with function ZSTD_init?Stream_usingDict(),\n an internal ?Dict will be created, which additional size is not estimated here.\n In this case, get total size by adding ZSTD_estimate?DictSize\n Note 2 : only single-threaded compression is supported.\n ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.\n Note 3 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time.\n Size estimates assume that no external sequence producer is registered."] + #[doc = " ZSTD_estimateCStreamSize() :\n ZSTD_estimateCStreamSize() will provide a memory budget large enough for streaming compression\n using any compression level up to the max specified one.\n It will also consider src size to be arbitrarily \"large\", which is a worst case scenario.\n If srcSize is known to always be small, ZSTD_estimateCStreamSize_usingCParams() can provide a tighter estimation.\n ZSTD_estimateCStreamSize_usingCParams() can be used in tandem with ZSTD_getCParams() to create cParams from compressionLevel.\n ZSTD_estimateCStreamSize_usingCCtxParams() can be used in tandem with ZSTD_CCtxParams_setParameter(). Only single-threaded compression is supported. This function will return an error code if ZSTD_c_nbWorkers is >= 1.\n Note : CStream size estimation is only correct for single-threaded compression.\n ZSTD_estimateCStreamSize_usingCCtxParams() will return an error code if ZSTD_c_nbWorkers is >= 1.\n Note 2 : ZSTD_estimateCStreamSize* functions are not compatible with the Block-Level Sequence Producer API at this time.\n Size estimates assume that no external sequence producer is registered.\n\n ZSTD_DStream memory budget depends on frame's window Size.\n This information can be passed manually, using ZSTD_estimateDStreamSize,\n or deducted from a valid frame Header, using ZSTD_estimateDStreamSize_fromFrame();\n Any frame requesting a window size larger than max specified one will be rejected.\n Note : if streaming is init with function ZSTD_init?Stream_usingDict(),\n an internal ?Dict will be created, which additional size is not estimated here.\n In this case, get total size by adding ZSTD_estimate?DictSize"] pub fn ZSTD_estimateCStreamSize( - compressionLevel: ::core::ffi::c_int, + maxCompressionLevel: ::core::ffi::c_int, ) -> usize; } extern "C" { @@ -875,7 +876,7 @@ extern "C" { ) -> usize; } extern "C" { - pub fn ZSTD_estimateDStreamSize(windowSize: usize) -> usize; + pub fn ZSTD_estimateDStreamSize(maxWindowSize: usize) -> usize; } extern "C" { pub fn ZSTD_estimateDStreamSize_fromFrame( @@ -1338,7 +1339,7 @@ extern "C" { ) -> usize; } extern "C" { - #[doc = " ZSTD_resetCStream() :\n This function is DEPRECATED, and is equivalent to:\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\n Note: ZSTD_resetCStream() interprets pledgedSrcSize == 0 as ZSTD_CONTENTSIZE_UNKNOWN, but\n ZSTD_CCtx_setPledgedSrcSize() does not do the same, so ZSTD_CONTENTSIZE_UNKNOWN must be\n explicitly specified.\n\n start a new frame, using same parameters from previous frame.\n This is typically useful to skip dictionary loading stage, since it will re-use it in-place.\n Note that zcs must be init at least once before using ZSTD_resetCStream().\n If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.\n If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.\n For the time being, pledgedSrcSize==0 is interpreted as \"srcSize unknown\" for compatibility with older programs,\n but it will change to mean \"empty\" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.\n @return : 0, or an error code (which can be tested using ZSTD_isError())\n This prototype will generate compilation warnings."] + #[doc = " ZSTD_resetCStream() :\n This function is DEPRECATED, and is equivalent to:\n ZSTD_CCtx_reset(zcs, ZSTD_reset_session_only);\n ZSTD_CCtx_setPledgedSrcSize(zcs, pledgedSrcSize);\n Note: ZSTD_resetCStream() interprets pledgedSrcSize == 0 as ZSTD_CONTENTSIZE_UNKNOWN, but\n ZSTD_CCtx_setPledgedSrcSize() does not do the same, so ZSTD_CONTENTSIZE_UNKNOWN must be\n explicitly specified.\n\n start a new frame, using same parameters from previous frame.\n This is typically useful to skip dictionary loading stage, since it will reuse it in-place.\n Note that zcs must be init at least once before using ZSTD_resetCStream().\n If pledgedSrcSize is not known at reset time, use macro ZSTD_CONTENTSIZE_UNKNOWN.\n If pledgedSrcSize > 0, its value must be correct, as it will be written in header, and controlled at the end.\n For the time being, pledgedSrcSize==0 is interpreted as \"srcSize unknown\" for compatibility with older programs,\n but it will change to mean \"empty\" in future version, so use macro ZSTD_CONTENTSIZE_UNKNOWN instead.\n @return : 0, or an error code (which can be tested using ZSTD_isError())\n This prototype will generate compilation warnings."] pub fn ZSTD_resetCStream( zcs: *mut ZSTD_CStream, pledgedSrcSize: ::core::ffi::c_ulonglong, @@ -1379,7 +1380,7 @@ extern "C" { ) -> usize; } extern "C" { - #[doc = " This function is deprecated, and is equivalent to:\n\n ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n\n re-use decompression parameters from previous init; saves dictionary loading"] + #[doc = " This function is deprecated, and is equivalent to:\n\n ZSTD_DCtx_reset(zds, ZSTD_reset_session_only);\n\n reuse decompression parameters from previous init; saves dictionary loading"] pub fn ZSTD_resetDStream(zds: *mut ZSTD_DStream) -> usize; } pub type ZSTD_sequenceProducer_F = ::core::option::Option< @@ -1404,7 +1405,15 @@ extern "C" { ); } extern "C" { - #[doc = "Buffer-less streaming compression (synchronous mode)\n\nA ZSTD_CCtx object is required to track streaming operations.\nUse ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.\nZSTD_CCtx object can be re-used multiple times within successive compression operations.\n\nStart by initializing a context.\nUse ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.\n\nThen, consume your input using ZSTD_compressContinue().\nThere are some important considerations to keep in mind when using this advanced function :\n- ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.\n- Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.\n- Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.\nWorst case evaluation is provided by ZSTD_compressBound().\nZSTD_compressContinue() doesn't guarantee recover after a failed compression.\n- ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).\nIt remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)\n- ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.\nIn which case, it will \"discard\" the relevant memory section from its history.\n\nFinish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.\nIt's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.\nWithout last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.\n\n`ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress again."] + #[doc = " ZSTD_CCtxParams_registerSequenceProducer() :\n Same as ZSTD_registerSequenceProducer(), but operates on ZSTD_CCtx_params.\n This is used for accurate size estimation with ZSTD_estimateCCtxSize_usingCCtxParams(),\n which is needed when creating a ZSTD_CCtx with ZSTD_initStaticCCtx().\n\n If you are using the external sequence producer API in a scenario where ZSTD_initStaticCCtx()\n is required, then this function is for you. Otherwise, you probably don't need it.\n\n See tests/zstreamtest.c for example usage."] + pub fn ZSTD_CCtxParams_registerSequenceProducer( + params: *mut ZSTD_CCtx_params, + sequenceProducerState: *mut ::core::ffi::c_void, + sequenceProducer: ZSTD_sequenceProducer_F, + ); +} +extern "C" { + #[doc = "Buffer-less streaming compression (synchronous mode)\n\nA ZSTD_CCtx object is required to track streaming operations.\nUse ZSTD_createCCtx() / ZSTD_freeCCtx() to manage resource.\nZSTD_CCtx object can be reused multiple times within successive compression operations.\n\nStart by initializing a context.\nUse ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary compression.\n\nThen, consume your input using ZSTD_compressContinue().\nThere are some important considerations to keep in mind when using this advanced function :\n- ZSTD_compressContinue() has no internal buffer. It uses externally provided buffers only.\n- Interface is synchronous : input is consumed entirely and produces 1+ compressed blocks.\n- Caller must ensure there is enough space in `dst` to store compressed data under worst case scenario.\nWorst case evaluation is provided by ZSTD_compressBound().\nZSTD_compressContinue() doesn't guarantee recover after a failed compression.\n- ZSTD_compressContinue() presumes prior input ***is still accessible and unmodified*** (up to maximum distance size, see WindowLog).\nIt remembers all previous contiguous blocks, plus one separated memory segment (which can itself consists of multiple contiguous blocks)\n- ZSTD_compressContinue() detects that prior input has been overwritten when `src` buffer overlaps.\nIn which case, it will \"discard\" the relevant memory section from its history.\n\nFinish a frame with ZSTD_compressEnd(), which will write the last block(s) and optional checksum.\nIt's possible to use srcSize==0, in which case, it will write a final empty block to end the frame.\nWithout last block mark, frames are considered unfinished (hence corrupted) by compliant decoders.\n\n`ZSTD_CCtx` object can be reused (ZSTD_compressBegin()) to compress again."] pub fn ZSTD_compressBegin( cctx: *mut ZSTD_CCtx, compressionLevel: ::core::ffi::c_int, @@ -1467,7 +1476,7 @@ extern "C" { ) -> usize; } extern "C" { - #[doc = "Buffer-less streaming decompression (synchronous mode)\n\nA ZSTD_DCtx object is required to track streaming operations.\nUse ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.\nA ZSTD_DCtx object can be re-used multiple times.\n\nFirst typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().\nFrame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.\nData fragment must be large enough to ensure successful decoding.\n`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.\nresult : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.\n>0 : `srcSize` is too small, please provide at least result bytes on next attempt.\nerrorCode, which can be tested using ZSTD_isError().\n\nIt fills a ZSTD_frameHeader structure with important information to correctly decode the frame,\nsuch as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).\nNote that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.\nAs a consequence, check that values remain within valid application range.\nFor example, do not allocate memory blindly, check that `windowSize` is within expectation.\nEach application can set its own limits, depending on local restrictions.\nFor extended interoperability, it is recommended to support `windowSize` of at least 8 MB.\n\nZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.\nZSTD_decompressContinue() is very sensitive to contiguity,\nif 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,\nor that previous contiguous segment is large enough to properly handle maximum back-reference distance.\nThere are multiple ways to guarantee this condition.\n\nThe most memory efficient way is to use a round buffer of sufficient size.\nSufficient size is determined by invoking ZSTD_decodingBufferSize_min(),\nwhich can return an error code if required value is too large for current system (in 32-bits mode).\nIn a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,\nup to the moment there is not enough room left in the buffer to guarantee decoding another full block,\nwhich maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.\nAt which point, decoding can resume from the beginning of the buffer.\nNote that already decoded data stored in the buffer should be flushed before being overwritten.\n\nThere are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.\n\nFinally, if you control the compression process, you can also ignore all buffer size rules,\nas long as the encoder and decoder progress in \"lock-step\",\naka use exactly the same buffer sizes, break contiguity at the same place, etc.\n\nOnce buffers are setup, start decompression, with ZSTD_decompressBegin().\nIf decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().\n\nThen use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.\nZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().\nZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.\n\nresult of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).\nIt can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.\nIt can also be an error code, which can be tested with ZSTD_isError().\n\nA frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.\nContext can then be reset to start a new decompression.\n\nNote : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().\nThis information is not required to properly decode a frame.\n\n== Special case : skippable frames ==\n\nSkippable frames allow integration of user-defined data into a flow of concatenated frames.\nSkippable frames will be ignored (skipped) by decompressor.\nThe format of skippable frames is as follows :\na) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F\nb) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits\nc) Frame Content - any content (User Data) of length equal to Frame Size\nFor skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.\nFor skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content."] + #[doc = "Buffer-less streaming decompression (synchronous mode)\n\nA ZSTD_DCtx object is required to track streaming operations.\nUse ZSTD_createDCtx() / ZSTD_freeDCtx() to manage it.\nA ZSTD_DCtx object can be reused multiple times.\n\nFirst typical operation is to retrieve frame parameters, using ZSTD_getFrameHeader().\nFrame header is extracted from the beginning of compressed frame, so providing only the frame's beginning is enough.\nData fragment must be large enough to ensure successful decoding.\n`ZSTD_frameHeaderSize_max` bytes is guaranteed to always be large enough.\nresult : 0 : successful decoding, the `ZSTD_frameHeader` structure is correctly filled.\n>0 : `srcSize` is too small, please provide at least result bytes on next attempt.\nerrorCode, which can be tested using ZSTD_isError().\n\nIt fills a ZSTD_frameHeader structure with important information to correctly decode the frame,\nsuch as the dictionary ID, content size, or maximum back-reference distance (`windowSize`).\nNote that these values could be wrong, either because of data corruption, or because a 3rd party deliberately spoofs false information.\nAs a consequence, check that values remain within valid application range.\nFor example, do not allocate memory blindly, check that `windowSize` is within expectation.\nEach application can set its own limits, depending on local restrictions.\nFor extended interoperability, it is recommended to support `windowSize` of at least 8 MB.\n\nZSTD_decompressContinue() needs previous data blocks during decompression, up to `windowSize` bytes.\nZSTD_decompressContinue() is very sensitive to contiguity,\nif 2 blocks don't follow each other, make sure that either the compressor breaks contiguity at the same place,\nor that previous contiguous segment is large enough to properly handle maximum back-reference distance.\nThere are multiple ways to guarantee this condition.\n\nThe most memory efficient way is to use a round buffer of sufficient size.\nSufficient size is determined by invoking ZSTD_decodingBufferSize_min(),\nwhich can return an error code if required value is too large for current system (in 32-bits mode).\nIn a round buffer methodology, ZSTD_decompressContinue() decompresses each block next to previous one,\nup to the moment there is not enough room left in the buffer to guarantee decoding another full block,\nwhich maximum size is provided in `ZSTD_frameHeader` structure, field `blockSizeMax`.\nAt which point, decoding can resume from the beginning of the buffer.\nNote that already decoded data stored in the buffer should be flushed before being overwritten.\n\nThere are alternatives possible, for example using two or more buffers of size `windowSize` each, though they consume more memory.\n\nFinally, if you control the compression process, you can also ignore all buffer size rules,\nas long as the encoder and decoder progress in \"lock-step\",\naka use exactly the same buffer sizes, break contiguity at the same place, etc.\n\nOnce buffers are setup, start decompression, with ZSTD_decompressBegin().\nIf decompression requires a dictionary, use ZSTD_decompressBegin_usingDict() or ZSTD_decompressBegin_usingDDict().\n\nThen use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() alternatively.\nZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' to ZSTD_decompressContinue().\nZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will fail.\n\nresult of ZSTD_decompressContinue() is the number of bytes regenerated within 'dst' (necessarily <= dstCapacity).\nIt can be zero : it just means ZSTD_decompressContinue() has decoded some metadata item.\nIt can also be an error code, which can be tested with ZSTD_isError().\n\nA frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero.\nContext can then be reset to start a new decompression.\n\nNote : it's possible to know if next input to present is a header or a block, using ZSTD_nextInputType().\nThis information is not required to properly decode a frame.\n\n== Special case : skippable frames ==\n\nSkippable frames allow integration of user-defined data into a flow of concatenated frames.\nSkippable frames will be ignored (skipped) by decompressor.\nThe format of skippable frames is as follows :\na) Skippable frame ID - 4 Bytes, Little endian format, any value from 0x184D2A50 to 0x184D2A5F\nb) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits\nc) Frame Content - any content (User Data) of length equal to Frame Size\nFor skippable frames ZSTD_getFrameHeader() returns zfhPtr->frameType==ZSTD_skippableFrame.\nFor skippable frames ZSTD_decompressContinue() always returns 0 : it only skips the content."] pub fn ZSTD_decodingBufferSize_min( windowSize: ::core::ffi::c_ulonglong, frameContentSize: ::core::ffi::c_ulonglong, diff --git a/zstd-safe/zstd-sys/update_zstd.sh b/zstd-safe/zstd-sys/update_zstd.sh index 414258e4..b72ce563 100755 --- a/zstd-safe/zstd-sys/update_zstd.sh +++ b/zstd-safe/zstd-sys/update_zstd.sh @@ -5,7 +5,7 @@ set -o pipefail cd zstd CURRENT=$(git describe --tags) git fetch -q -TAG=$(git tag -l | grep '^v' | sort | tail -n 1) +TAG=$(git tag -l | grep '^v1' | sort | tail -n 1) if [ $CURRENT != $TAG ] then diff --git a/zstd-safe/zstd-sys/zstd b/zstd-safe/zstd-sys/zstd index 63779c79..794ea1b0 160000 --- a/zstd-safe/zstd-sys/zstd +++ b/zstd-safe/zstd-sys/zstd @@ -1 +1 @@ -Subproject commit 63779c798237346c2b245c546c40b72a5a5913fe +Subproject commit 794ea1b0afca0f020f4e57b6732332231fb23c70